1 // SPDX-License-Identifier: GPL-2.0 2 3 #include <linux/frame.h> 4 #include <linux/percpu.h> 5 6 #include <asm/debugreg.h> 7 #include <asm/mmu_context.h> 8 9 #include "cpuid.h" 10 #include "hyperv.h" 11 #include "mmu.h" 12 #include "nested.h" 13 #include "pmu.h" 14 #include "trace.h" 15 #include "x86.h" 16 17 static bool __read_mostly enable_shadow_vmcs = 1; 18 module_param_named(enable_shadow_vmcs, enable_shadow_vmcs, bool, S_IRUGO); 19 20 static bool __read_mostly nested_early_check = 0; 21 module_param(nested_early_check, bool, S_IRUGO); 22 23 #define CC(consistency_check) \ 24 ({ \ 25 bool failed = (consistency_check); \ 26 if (failed) \ 27 trace_kvm_nested_vmenter_failed(#consistency_check, 0); \ 28 failed; \ 29 }) 30 31 /* 32 * Hyper-V requires all of these, so mark them as supported even though 33 * they are just treated the same as all-context. 34 */ 35 #define VMX_VPID_EXTENT_SUPPORTED_MASK \ 36 (VMX_VPID_EXTENT_INDIVIDUAL_ADDR_BIT | \ 37 VMX_VPID_EXTENT_SINGLE_CONTEXT_BIT | \ 38 VMX_VPID_EXTENT_GLOBAL_CONTEXT_BIT | \ 39 VMX_VPID_EXTENT_SINGLE_NON_GLOBAL_BIT) 40 41 #define VMX_MISC_EMULATED_PREEMPTION_TIMER_RATE 5 42 43 enum { 44 VMX_VMREAD_BITMAP, 45 VMX_VMWRITE_BITMAP, 46 VMX_BITMAP_NR 47 }; 48 static unsigned long *vmx_bitmap[VMX_BITMAP_NR]; 49 50 #define vmx_vmread_bitmap (vmx_bitmap[VMX_VMREAD_BITMAP]) 51 #define vmx_vmwrite_bitmap (vmx_bitmap[VMX_VMWRITE_BITMAP]) 52 53 struct shadow_vmcs_field { 54 u16 encoding; 55 u16 offset; 56 }; 57 static struct shadow_vmcs_field shadow_read_only_fields[] = { 58 #define SHADOW_FIELD_RO(x, y) { x, offsetof(struct vmcs12, y) }, 59 #include "vmcs_shadow_fields.h" 60 }; 61 static int max_shadow_read_only_fields = 62 ARRAY_SIZE(shadow_read_only_fields); 63 64 static struct shadow_vmcs_field shadow_read_write_fields[] = { 65 #define SHADOW_FIELD_RW(x, y) { x, offsetof(struct vmcs12, y) }, 66 #include "vmcs_shadow_fields.h" 67 }; 68 static int max_shadow_read_write_fields = 69 ARRAY_SIZE(shadow_read_write_fields); 70 71 static void init_vmcs_shadow_fields(void) 72 { 73 int i, j; 74 75 memset(vmx_vmread_bitmap, 0xff, PAGE_SIZE); 76 memset(vmx_vmwrite_bitmap, 0xff, PAGE_SIZE); 77 78 for (i = j = 0; i < max_shadow_read_only_fields; i++) { 79 struct shadow_vmcs_field entry = shadow_read_only_fields[i]; 80 u16 field = entry.encoding; 81 82 if (vmcs_field_width(field) == VMCS_FIELD_WIDTH_U64 && 83 (i + 1 == max_shadow_read_only_fields || 84 shadow_read_only_fields[i + 1].encoding != field + 1)) 85 pr_err("Missing field from shadow_read_only_field %x\n", 86 field + 1); 87 88 clear_bit(field, vmx_vmread_bitmap); 89 if (field & 1) 90 #ifdef CONFIG_X86_64 91 continue; 92 #else 93 entry.offset += sizeof(u32); 94 #endif 95 shadow_read_only_fields[j++] = entry; 96 } 97 max_shadow_read_only_fields = j; 98 99 for (i = j = 0; i < max_shadow_read_write_fields; i++) { 100 struct shadow_vmcs_field entry = shadow_read_write_fields[i]; 101 u16 field = entry.encoding; 102 103 if (vmcs_field_width(field) == VMCS_FIELD_WIDTH_U64 && 104 (i + 1 == max_shadow_read_write_fields || 105 shadow_read_write_fields[i + 1].encoding != field + 1)) 106 pr_err("Missing field from shadow_read_write_field %x\n", 107 field + 1); 108 109 WARN_ONCE(field >= GUEST_ES_AR_BYTES && 110 field <= GUEST_TR_AR_BYTES, 111 "Update vmcs12_write_any() to drop reserved bits from AR_BYTES"); 112 113 /* 114 * PML and the preemption timer can be emulated, but the 115 * processor cannot vmwrite to fields that don't exist 116 * on bare metal. 117 */ 118 switch (field) { 119 case GUEST_PML_INDEX: 120 if (!cpu_has_vmx_pml()) 121 continue; 122 break; 123 case VMX_PREEMPTION_TIMER_VALUE: 124 if (!cpu_has_vmx_preemption_timer()) 125 continue; 126 break; 127 case GUEST_INTR_STATUS: 128 if (!cpu_has_vmx_apicv()) 129 continue; 130 break; 131 default: 132 break; 133 } 134 135 clear_bit(field, vmx_vmwrite_bitmap); 136 clear_bit(field, vmx_vmread_bitmap); 137 if (field & 1) 138 #ifdef CONFIG_X86_64 139 continue; 140 #else 141 entry.offset += sizeof(u32); 142 #endif 143 shadow_read_write_fields[j++] = entry; 144 } 145 max_shadow_read_write_fields = j; 146 } 147 148 /* 149 * The following 3 functions, nested_vmx_succeed()/failValid()/failInvalid(), 150 * set the success or error code of an emulated VMX instruction (as specified 151 * by Vol 2B, VMX Instruction Reference, "Conventions"), and skip the emulated 152 * instruction. 153 */ 154 static int nested_vmx_succeed(struct kvm_vcpu *vcpu) 155 { 156 vmx_set_rflags(vcpu, vmx_get_rflags(vcpu) 157 & ~(X86_EFLAGS_CF | X86_EFLAGS_PF | X86_EFLAGS_AF | 158 X86_EFLAGS_ZF | X86_EFLAGS_SF | X86_EFLAGS_OF)); 159 return kvm_skip_emulated_instruction(vcpu); 160 } 161 162 static int nested_vmx_failInvalid(struct kvm_vcpu *vcpu) 163 { 164 vmx_set_rflags(vcpu, (vmx_get_rflags(vcpu) 165 & ~(X86_EFLAGS_PF | X86_EFLAGS_AF | X86_EFLAGS_ZF | 166 X86_EFLAGS_SF | X86_EFLAGS_OF)) 167 | X86_EFLAGS_CF); 168 return kvm_skip_emulated_instruction(vcpu); 169 } 170 171 static int nested_vmx_failValid(struct kvm_vcpu *vcpu, 172 u32 vm_instruction_error) 173 { 174 struct vcpu_vmx *vmx = to_vmx(vcpu); 175 176 /* 177 * failValid writes the error number to the current VMCS, which 178 * can't be done if there isn't a current VMCS. 179 */ 180 if (vmx->nested.current_vmptr == -1ull && !vmx->nested.hv_evmcs) 181 return nested_vmx_failInvalid(vcpu); 182 183 vmx_set_rflags(vcpu, (vmx_get_rflags(vcpu) 184 & ~(X86_EFLAGS_CF | X86_EFLAGS_PF | X86_EFLAGS_AF | 185 X86_EFLAGS_SF | X86_EFLAGS_OF)) 186 | X86_EFLAGS_ZF); 187 get_vmcs12(vcpu)->vm_instruction_error = vm_instruction_error; 188 /* 189 * We don't need to force a shadow sync because 190 * VM_INSTRUCTION_ERROR is not shadowed 191 */ 192 return kvm_skip_emulated_instruction(vcpu); 193 } 194 195 static void nested_vmx_abort(struct kvm_vcpu *vcpu, u32 indicator) 196 { 197 /* TODO: not to reset guest simply here. */ 198 kvm_make_request(KVM_REQ_TRIPLE_FAULT, vcpu); 199 pr_debug_ratelimited("kvm: nested vmx abort, indicator %d\n", indicator); 200 } 201 202 static inline bool vmx_control_verify(u32 control, u32 low, u32 high) 203 { 204 return fixed_bits_valid(control, low, high); 205 } 206 207 static inline u64 vmx_control_msr(u32 low, u32 high) 208 { 209 return low | ((u64)high << 32); 210 } 211 212 static void vmx_disable_shadow_vmcs(struct vcpu_vmx *vmx) 213 { 214 secondary_exec_controls_clearbit(vmx, SECONDARY_EXEC_SHADOW_VMCS); 215 vmcs_write64(VMCS_LINK_POINTER, -1ull); 216 vmx->nested.need_vmcs12_to_shadow_sync = false; 217 } 218 219 static inline void nested_release_evmcs(struct kvm_vcpu *vcpu) 220 { 221 struct vcpu_vmx *vmx = to_vmx(vcpu); 222 223 if (!vmx->nested.hv_evmcs) 224 return; 225 226 kvm_vcpu_unmap(vcpu, &vmx->nested.hv_evmcs_map, true); 227 vmx->nested.hv_evmcs_vmptr = -1ull; 228 vmx->nested.hv_evmcs = NULL; 229 } 230 231 /* 232 * Free whatever needs to be freed from vmx->nested when L1 goes down, or 233 * just stops using VMX. 234 */ 235 static void free_nested(struct kvm_vcpu *vcpu) 236 { 237 struct vcpu_vmx *vmx = to_vmx(vcpu); 238 239 if (!vmx->nested.vmxon && !vmx->nested.smm.vmxon) 240 return; 241 242 kvm_clear_request(KVM_REQ_GET_VMCS12_PAGES, vcpu); 243 244 vmx->nested.vmxon = false; 245 vmx->nested.smm.vmxon = false; 246 free_vpid(vmx->nested.vpid02); 247 vmx->nested.posted_intr_nv = -1; 248 vmx->nested.current_vmptr = -1ull; 249 if (enable_shadow_vmcs) { 250 vmx_disable_shadow_vmcs(vmx); 251 vmcs_clear(vmx->vmcs01.shadow_vmcs); 252 free_vmcs(vmx->vmcs01.shadow_vmcs); 253 vmx->vmcs01.shadow_vmcs = NULL; 254 } 255 kfree(vmx->nested.cached_vmcs12); 256 vmx->nested.cached_vmcs12 = NULL; 257 kfree(vmx->nested.cached_shadow_vmcs12); 258 vmx->nested.cached_shadow_vmcs12 = NULL; 259 /* Unpin physical memory we referred to in the vmcs02 */ 260 if (vmx->nested.apic_access_page) { 261 kvm_release_page_clean(vmx->nested.apic_access_page); 262 vmx->nested.apic_access_page = NULL; 263 } 264 kvm_vcpu_unmap(vcpu, &vmx->nested.virtual_apic_map, true); 265 kvm_vcpu_unmap(vcpu, &vmx->nested.pi_desc_map, true); 266 vmx->nested.pi_desc = NULL; 267 268 kvm_mmu_free_roots(vcpu, &vcpu->arch.guest_mmu, KVM_MMU_ROOTS_ALL); 269 270 nested_release_evmcs(vcpu); 271 272 free_loaded_vmcs(&vmx->nested.vmcs02); 273 } 274 275 static void vmx_sync_vmcs_host_state(struct vcpu_vmx *vmx, 276 struct loaded_vmcs *prev) 277 { 278 struct vmcs_host_state *dest, *src; 279 280 if (unlikely(!vmx->guest_state_loaded)) 281 return; 282 283 src = &prev->host_state; 284 dest = &vmx->loaded_vmcs->host_state; 285 286 vmx_set_host_fs_gs(dest, src->fs_sel, src->gs_sel, src->fs_base, src->gs_base); 287 dest->ldt_sel = src->ldt_sel; 288 #ifdef CONFIG_X86_64 289 dest->ds_sel = src->ds_sel; 290 dest->es_sel = src->es_sel; 291 #endif 292 } 293 294 static void vmx_switch_vmcs(struct kvm_vcpu *vcpu, struct loaded_vmcs *vmcs) 295 { 296 struct vcpu_vmx *vmx = to_vmx(vcpu); 297 struct loaded_vmcs *prev; 298 int cpu; 299 300 if (vmx->loaded_vmcs == vmcs) 301 return; 302 303 cpu = get_cpu(); 304 prev = vmx->loaded_vmcs; 305 vmx->loaded_vmcs = vmcs; 306 vmx_vcpu_load_vmcs(vcpu, cpu); 307 vmx_sync_vmcs_host_state(vmx, prev); 308 put_cpu(); 309 310 vmx_segment_cache_clear(vmx); 311 } 312 313 /* 314 * Ensure that the current vmcs of the logical processor is the 315 * vmcs01 of the vcpu before calling free_nested(). 316 */ 317 void nested_vmx_free_vcpu(struct kvm_vcpu *vcpu) 318 { 319 vcpu_load(vcpu); 320 vmx_leave_nested(vcpu); 321 vmx_switch_vmcs(vcpu, &to_vmx(vcpu)->vmcs01); 322 free_nested(vcpu); 323 vcpu_put(vcpu); 324 } 325 326 static void nested_ept_inject_page_fault(struct kvm_vcpu *vcpu, 327 struct x86_exception *fault) 328 { 329 struct vmcs12 *vmcs12 = get_vmcs12(vcpu); 330 struct vcpu_vmx *vmx = to_vmx(vcpu); 331 u32 exit_reason; 332 unsigned long exit_qualification = vcpu->arch.exit_qualification; 333 334 if (vmx->nested.pml_full) { 335 exit_reason = EXIT_REASON_PML_FULL; 336 vmx->nested.pml_full = false; 337 exit_qualification &= INTR_INFO_UNBLOCK_NMI; 338 } else if (fault->error_code & PFERR_RSVD_MASK) 339 exit_reason = EXIT_REASON_EPT_MISCONFIG; 340 else 341 exit_reason = EXIT_REASON_EPT_VIOLATION; 342 343 nested_vmx_vmexit(vcpu, exit_reason, 0, exit_qualification); 344 vmcs12->guest_physical_address = fault->address; 345 } 346 347 static void nested_ept_init_mmu_context(struct kvm_vcpu *vcpu) 348 { 349 WARN_ON(mmu_is_nested(vcpu)); 350 351 vcpu->arch.mmu = &vcpu->arch.guest_mmu; 352 kvm_init_shadow_ept_mmu(vcpu, 353 to_vmx(vcpu)->nested.msrs.ept_caps & 354 VMX_EPT_EXECUTE_ONLY_BIT, 355 nested_ept_ad_enabled(vcpu), 356 nested_ept_get_cr3(vcpu)); 357 vcpu->arch.mmu->set_cr3 = vmx_set_cr3; 358 vcpu->arch.mmu->get_cr3 = nested_ept_get_cr3; 359 vcpu->arch.mmu->inject_page_fault = nested_ept_inject_page_fault; 360 vcpu->arch.mmu->get_pdptr = kvm_pdptr_read; 361 362 vcpu->arch.walk_mmu = &vcpu->arch.nested_mmu; 363 } 364 365 static void nested_ept_uninit_mmu_context(struct kvm_vcpu *vcpu) 366 { 367 vcpu->arch.mmu = &vcpu->arch.root_mmu; 368 vcpu->arch.walk_mmu = &vcpu->arch.root_mmu; 369 } 370 371 static bool nested_vmx_is_page_fault_vmexit(struct vmcs12 *vmcs12, 372 u16 error_code) 373 { 374 bool inequality, bit; 375 376 bit = (vmcs12->exception_bitmap & (1u << PF_VECTOR)) != 0; 377 inequality = 378 (error_code & vmcs12->page_fault_error_code_mask) != 379 vmcs12->page_fault_error_code_match; 380 return inequality ^ bit; 381 } 382 383 384 /* 385 * KVM wants to inject page-faults which it got to the guest. This function 386 * checks whether in a nested guest, we need to inject them to L1 or L2. 387 */ 388 static int nested_vmx_check_exception(struct kvm_vcpu *vcpu, unsigned long *exit_qual) 389 { 390 struct vmcs12 *vmcs12 = get_vmcs12(vcpu); 391 unsigned int nr = vcpu->arch.exception.nr; 392 bool has_payload = vcpu->arch.exception.has_payload; 393 unsigned long payload = vcpu->arch.exception.payload; 394 395 if (nr == PF_VECTOR) { 396 if (vcpu->arch.exception.nested_apf) { 397 *exit_qual = vcpu->arch.apf.nested_apf_token; 398 return 1; 399 } 400 if (nested_vmx_is_page_fault_vmexit(vmcs12, 401 vcpu->arch.exception.error_code)) { 402 *exit_qual = has_payload ? payload : vcpu->arch.cr2; 403 return 1; 404 } 405 } else if (vmcs12->exception_bitmap & (1u << nr)) { 406 if (nr == DB_VECTOR) { 407 if (!has_payload) { 408 payload = vcpu->arch.dr6; 409 payload &= ~(DR6_FIXED_1 | DR6_BT); 410 payload ^= DR6_RTM; 411 } 412 *exit_qual = payload; 413 } else 414 *exit_qual = 0; 415 return 1; 416 } 417 418 return 0; 419 } 420 421 422 static void vmx_inject_page_fault_nested(struct kvm_vcpu *vcpu, 423 struct x86_exception *fault) 424 { 425 struct vmcs12 *vmcs12 = get_vmcs12(vcpu); 426 427 WARN_ON(!is_guest_mode(vcpu)); 428 429 if (nested_vmx_is_page_fault_vmexit(vmcs12, fault->error_code) && 430 !to_vmx(vcpu)->nested.nested_run_pending) { 431 vmcs12->vm_exit_intr_error_code = fault->error_code; 432 nested_vmx_vmexit(vcpu, EXIT_REASON_EXCEPTION_NMI, 433 PF_VECTOR | INTR_TYPE_HARD_EXCEPTION | 434 INTR_INFO_DELIVER_CODE_MASK | INTR_INFO_VALID_MASK, 435 fault->address); 436 } else { 437 kvm_inject_page_fault(vcpu, fault); 438 } 439 } 440 441 static bool page_address_valid(struct kvm_vcpu *vcpu, gpa_t gpa) 442 { 443 return PAGE_ALIGNED(gpa) && !(gpa >> cpuid_maxphyaddr(vcpu)); 444 } 445 446 static int nested_vmx_check_io_bitmap_controls(struct kvm_vcpu *vcpu, 447 struct vmcs12 *vmcs12) 448 { 449 if (!nested_cpu_has(vmcs12, CPU_BASED_USE_IO_BITMAPS)) 450 return 0; 451 452 if (CC(!page_address_valid(vcpu, vmcs12->io_bitmap_a)) || 453 CC(!page_address_valid(vcpu, vmcs12->io_bitmap_b))) 454 return -EINVAL; 455 456 return 0; 457 } 458 459 static int nested_vmx_check_msr_bitmap_controls(struct kvm_vcpu *vcpu, 460 struct vmcs12 *vmcs12) 461 { 462 if (!nested_cpu_has(vmcs12, CPU_BASED_USE_MSR_BITMAPS)) 463 return 0; 464 465 if (CC(!page_address_valid(vcpu, vmcs12->msr_bitmap))) 466 return -EINVAL; 467 468 return 0; 469 } 470 471 static int nested_vmx_check_tpr_shadow_controls(struct kvm_vcpu *vcpu, 472 struct vmcs12 *vmcs12) 473 { 474 if (!nested_cpu_has(vmcs12, CPU_BASED_TPR_SHADOW)) 475 return 0; 476 477 if (CC(!page_address_valid(vcpu, vmcs12->virtual_apic_page_addr))) 478 return -EINVAL; 479 480 return 0; 481 } 482 483 /* 484 * Check if MSR is intercepted for L01 MSR bitmap. 485 */ 486 static bool msr_write_intercepted_l01(struct kvm_vcpu *vcpu, u32 msr) 487 { 488 unsigned long *msr_bitmap; 489 int f = sizeof(unsigned long); 490 491 if (!cpu_has_vmx_msr_bitmap()) 492 return true; 493 494 msr_bitmap = to_vmx(vcpu)->vmcs01.msr_bitmap; 495 496 if (msr <= 0x1fff) { 497 return !!test_bit(msr, msr_bitmap + 0x800 / f); 498 } else if ((msr >= 0xc0000000) && (msr <= 0xc0001fff)) { 499 msr &= 0x1fff; 500 return !!test_bit(msr, msr_bitmap + 0xc00 / f); 501 } 502 503 return true; 504 } 505 506 /* 507 * If a msr is allowed by L0, we should check whether it is allowed by L1. 508 * The corresponding bit will be cleared unless both of L0 and L1 allow it. 509 */ 510 static void nested_vmx_disable_intercept_for_msr(unsigned long *msr_bitmap_l1, 511 unsigned long *msr_bitmap_nested, 512 u32 msr, int type) 513 { 514 int f = sizeof(unsigned long); 515 516 /* 517 * See Intel PRM Vol. 3, 20.6.9 (MSR-Bitmap Address). Early manuals 518 * have the write-low and read-high bitmap offsets the wrong way round. 519 * We can control MSRs 0x00000000-0x00001fff and 0xc0000000-0xc0001fff. 520 */ 521 if (msr <= 0x1fff) { 522 if (type & MSR_TYPE_R && 523 !test_bit(msr, msr_bitmap_l1 + 0x000 / f)) 524 /* read-low */ 525 __clear_bit(msr, msr_bitmap_nested + 0x000 / f); 526 527 if (type & MSR_TYPE_W && 528 !test_bit(msr, msr_bitmap_l1 + 0x800 / f)) 529 /* write-low */ 530 __clear_bit(msr, msr_bitmap_nested + 0x800 / f); 531 532 } else if ((msr >= 0xc0000000) && (msr <= 0xc0001fff)) { 533 msr &= 0x1fff; 534 if (type & MSR_TYPE_R && 535 !test_bit(msr, msr_bitmap_l1 + 0x400 / f)) 536 /* read-high */ 537 __clear_bit(msr, msr_bitmap_nested + 0x400 / f); 538 539 if (type & MSR_TYPE_W && 540 !test_bit(msr, msr_bitmap_l1 + 0xc00 / f)) 541 /* write-high */ 542 __clear_bit(msr, msr_bitmap_nested + 0xc00 / f); 543 544 } 545 } 546 547 static inline void enable_x2apic_msr_intercepts(unsigned long *msr_bitmap) 548 { 549 int msr; 550 551 for (msr = 0x800; msr <= 0x8ff; msr += BITS_PER_LONG) { 552 unsigned word = msr / BITS_PER_LONG; 553 554 msr_bitmap[word] = ~0; 555 msr_bitmap[word + (0x800 / sizeof(long))] = ~0; 556 } 557 } 558 559 /* 560 * Merge L0's and L1's MSR bitmap, return false to indicate that 561 * we do not use the hardware. 562 */ 563 static inline bool nested_vmx_prepare_msr_bitmap(struct kvm_vcpu *vcpu, 564 struct vmcs12 *vmcs12) 565 { 566 int msr; 567 unsigned long *msr_bitmap_l1; 568 unsigned long *msr_bitmap_l0 = to_vmx(vcpu)->nested.vmcs02.msr_bitmap; 569 struct kvm_host_map *map = &to_vmx(vcpu)->nested.msr_bitmap_map; 570 571 /* Nothing to do if the MSR bitmap is not in use. */ 572 if (!cpu_has_vmx_msr_bitmap() || 573 !nested_cpu_has(vmcs12, CPU_BASED_USE_MSR_BITMAPS)) 574 return false; 575 576 if (kvm_vcpu_map(vcpu, gpa_to_gfn(vmcs12->msr_bitmap), map)) 577 return false; 578 579 msr_bitmap_l1 = (unsigned long *)map->hva; 580 581 /* 582 * To keep the control flow simple, pay eight 8-byte writes (sixteen 583 * 4-byte writes on 32-bit systems) up front to enable intercepts for 584 * the x2APIC MSR range and selectively disable them below. 585 */ 586 enable_x2apic_msr_intercepts(msr_bitmap_l0); 587 588 if (nested_cpu_has_virt_x2apic_mode(vmcs12)) { 589 if (nested_cpu_has_apic_reg_virt(vmcs12)) { 590 /* 591 * L0 need not intercept reads for MSRs between 0x800 592 * and 0x8ff, it just lets the processor take the value 593 * from the virtual-APIC page; take those 256 bits 594 * directly from the L1 bitmap. 595 */ 596 for (msr = 0x800; msr <= 0x8ff; msr += BITS_PER_LONG) { 597 unsigned word = msr / BITS_PER_LONG; 598 599 msr_bitmap_l0[word] = msr_bitmap_l1[word]; 600 } 601 } 602 603 nested_vmx_disable_intercept_for_msr( 604 msr_bitmap_l1, msr_bitmap_l0, 605 X2APIC_MSR(APIC_TASKPRI), 606 MSR_TYPE_R | MSR_TYPE_W); 607 608 if (nested_cpu_has_vid(vmcs12)) { 609 nested_vmx_disable_intercept_for_msr( 610 msr_bitmap_l1, msr_bitmap_l0, 611 X2APIC_MSR(APIC_EOI), 612 MSR_TYPE_W); 613 nested_vmx_disable_intercept_for_msr( 614 msr_bitmap_l1, msr_bitmap_l0, 615 X2APIC_MSR(APIC_SELF_IPI), 616 MSR_TYPE_W); 617 } 618 } 619 620 /* KVM unconditionally exposes the FS/GS base MSRs to L1. */ 621 nested_vmx_disable_intercept_for_msr(msr_bitmap_l1, msr_bitmap_l0, 622 MSR_FS_BASE, MSR_TYPE_RW); 623 624 nested_vmx_disable_intercept_for_msr(msr_bitmap_l1, msr_bitmap_l0, 625 MSR_GS_BASE, MSR_TYPE_RW); 626 627 nested_vmx_disable_intercept_for_msr(msr_bitmap_l1, msr_bitmap_l0, 628 MSR_KERNEL_GS_BASE, MSR_TYPE_RW); 629 630 /* 631 * Checking the L0->L1 bitmap is trying to verify two things: 632 * 633 * 1. L0 gave a permission to L1 to actually passthrough the MSR. This 634 * ensures that we do not accidentally generate an L02 MSR bitmap 635 * from the L12 MSR bitmap that is too permissive. 636 * 2. That L1 or L2s have actually used the MSR. This avoids 637 * unnecessarily merging of the bitmap if the MSR is unused. This 638 * works properly because we only update the L01 MSR bitmap lazily. 639 * So even if L0 should pass L1 these MSRs, the L01 bitmap is only 640 * updated to reflect this when L1 (or its L2s) actually write to 641 * the MSR. 642 */ 643 if (!msr_write_intercepted_l01(vcpu, MSR_IA32_SPEC_CTRL)) 644 nested_vmx_disable_intercept_for_msr( 645 msr_bitmap_l1, msr_bitmap_l0, 646 MSR_IA32_SPEC_CTRL, 647 MSR_TYPE_R | MSR_TYPE_W); 648 649 if (!msr_write_intercepted_l01(vcpu, MSR_IA32_PRED_CMD)) 650 nested_vmx_disable_intercept_for_msr( 651 msr_bitmap_l1, msr_bitmap_l0, 652 MSR_IA32_PRED_CMD, 653 MSR_TYPE_W); 654 655 kvm_vcpu_unmap(vcpu, &to_vmx(vcpu)->nested.msr_bitmap_map, false); 656 657 return true; 658 } 659 660 static void nested_cache_shadow_vmcs12(struct kvm_vcpu *vcpu, 661 struct vmcs12 *vmcs12) 662 { 663 struct kvm_host_map map; 664 struct vmcs12 *shadow; 665 666 if (!nested_cpu_has_shadow_vmcs(vmcs12) || 667 vmcs12->vmcs_link_pointer == -1ull) 668 return; 669 670 shadow = get_shadow_vmcs12(vcpu); 671 672 if (kvm_vcpu_map(vcpu, gpa_to_gfn(vmcs12->vmcs_link_pointer), &map)) 673 return; 674 675 memcpy(shadow, map.hva, VMCS12_SIZE); 676 kvm_vcpu_unmap(vcpu, &map, false); 677 } 678 679 static void nested_flush_cached_shadow_vmcs12(struct kvm_vcpu *vcpu, 680 struct vmcs12 *vmcs12) 681 { 682 struct vcpu_vmx *vmx = to_vmx(vcpu); 683 684 if (!nested_cpu_has_shadow_vmcs(vmcs12) || 685 vmcs12->vmcs_link_pointer == -1ull) 686 return; 687 688 kvm_write_guest(vmx->vcpu.kvm, vmcs12->vmcs_link_pointer, 689 get_shadow_vmcs12(vcpu), VMCS12_SIZE); 690 } 691 692 /* 693 * In nested virtualization, check if L1 has set 694 * VM_EXIT_ACK_INTR_ON_EXIT 695 */ 696 static bool nested_exit_intr_ack_set(struct kvm_vcpu *vcpu) 697 { 698 return get_vmcs12(vcpu)->vm_exit_controls & 699 VM_EXIT_ACK_INTR_ON_EXIT; 700 } 701 702 static bool nested_exit_on_nmi(struct kvm_vcpu *vcpu) 703 { 704 return nested_cpu_has_nmi_exiting(get_vmcs12(vcpu)); 705 } 706 707 static int nested_vmx_check_apic_access_controls(struct kvm_vcpu *vcpu, 708 struct vmcs12 *vmcs12) 709 { 710 if (nested_cpu_has2(vmcs12, SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES) && 711 CC(!page_address_valid(vcpu, vmcs12->apic_access_addr))) 712 return -EINVAL; 713 else 714 return 0; 715 } 716 717 static int nested_vmx_check_apicv_controls(struct kvm_vcpu *vcpu, 718 struct vmcs12 *vmcs12) 719 { 720 if (!nested_cpu_has_virt_x2apic_mode(vmcs12) && 721 !nested_cpu_has_apic_reg_virt(vmcs12) && 722 !nested_cpu_has_vid(vmcs12) && 723 !nested_cpu_has_posted_intr(vmcs12)) 724 return 0; 725 726 /* 727 * If virtualize x2apic mode is enabled, 728 * virtualize apic access must be disabled. 729 */ 730 if (CC(nested_cpu_has_virt_x2apic_mode(vmcs12) && 731 nested_cpu_has2(vmcs12, SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES))) 732 return -EINVAL; 733 734 /* 735 * If virtual interrupt delivery is enabled, 736 * we must exit on external interrupts. 737 */ 738 if (CC(nested_cpu_has_vid(vmcs12) && !nested_exit_on_intr(vcpu))) 739 return -EINVAL; 740 741 /* 742 * bits 15:8 should be zero in posted_intr_nv, 743 * the descriptor address has been already checked 744 * in nested_get_vmcs12_pages. 745 * 746 * bits 5:0 of posted_intr_desc_addr should be zero. 747 */ 748 if (nested_cpu_has_posted_intr(vmcs12) && 749 (CC(!nested_cpu_has_vid(vmcs12)) || 750 CC(!nested_exit_intr_ack_set(vcpu)) || 751 CC((vmcs12->posted_intr_nv & 0xff00)) || 752 CC((vmcs12->posted_intr_desc_addr & 0x3f)) || 753 CC((vmcs12->posted_intr_desc_addr >> cpuid_maxphyaddr(vcpu))))) 754 return -EINVAL; 755 756 /* tpr shadow is needed by all apicv features. */ 757 if (CC(!nested_cpu_has(vmcs12, CPU_BASED_TPR_SHADOW))) 758 return -EINVAL; 759 760 return 0; 761 } 762 763 static int nested_vmx_check_msr_switch(struct kvm_vcpu *vcpu, 764 u32 count, u64 addr) 765 { 766 int maxphyaddr; 767 768 if (count == 0) 769 return 0; 770 maxphyaddr = cpuid_maxphyaddr(vcpu); 771 if (!IS_ALIGNED(addr, 16) || addr >> maxphyaddr || 772 (addr + count * sizeof(struct vmx_msr_entry) - 1) >> maxphyaddr) 773 return -EINVAL; 774 775 return 0; 776 } 777 778 static int nested_vmx_check_exit_msr_switch_controls(struct kvm_vcpu *vcpu, 779 struct vmcs12 *vmcs12) 780 { 781 if (CC(nested_vmx_check_msr_switch(vcpu, 782 vmcs12->vm_exit_msr_load_count, 783 vmcs12->vm_exit_msr_load_addr)) || 784 CC(nested_vmx_check_msr_switch(vcpu, 785 vmcs12->vm_exit_msr_store_count, 786 vmcs12->vm_exit_msr_store_addr))) 787 return -EINVAL; 788 789 return 0; 790 } 791 792 static int nested_vmx_check_entry_msr_switch_controls(struct kvm_vcpu *vcpu, 793 struct vmcs12 *vmcs12) 794 { 795 if (CC(nested_vmx_check_msr_switch(vcpu, 796 vmcs12->vm_entry_msr_load_count, 797 vmcs12->vm_entry_msr_load_addr))) 798 return -EINVAL; 799 800 return 0; 801 } 802 803 static int nested_vmx_check_pml_controls(struct kvm_vcpu *vcpu, 804 struct vmcs12 *vmcs12) 805 { 806 if (!nested_cpu_has_pml(vmcs12)) 807 return 0; 808 809 if (CC(!nested_cpu_has_ept(vmcs12)) || 810 CC(!page_address_valid(vcpu, vmcs12->pml_address))) 811 return -EINVAL; 812 813 return 0; 814 } 815 816 static int nested_vmx_check_unrestricted_guest_controls(struct kvm_vcpu *vcpu, 817 struct vmcs12 *vmcs12) 818 { 819 if (CC(nested_cpu_has2(vmcs12, SECONDARY_EXEC_UNRESTRICTED_GUEST) && 820 !nested_cpu_has_ept(vmcs12))) 821 return -EINVAL; 822 return 0; 823 } 824 825 static int nested_vmx_check_mode_based_ept_exec_controls(struct kvm_vcpu *vcpu, 826 struct vmcs12 *vmcs12) 827 { 828 if (CC(nested_cpu_has2(vmcs12, SECONDARY_EXEC_MODE_BASED_EPT_EXEC) && 829 !nested_cpu_has_ept(vmcs12))) 830 return -EINVAL; 831 return 0; 832 } 833 834 static int nested_vmx_check_shadow_vmcs_controls(struct kvm_vcpu *vcpu, 835 struct vmcs12 *vmcs12) 836 { 837 if (!nested_cpu_has_shadow_vmcs(vmcs12)) 838 return 0; 839 840 if (CC(!page_address_valid(vcpu, vmcs12->vmread_bitmap)) || 841 CC(!page_address_valid(vcpu, vmcs12->vmwrite_bitmap))) 842 return -EINVAL; 843 844 return 0; 845 } 846 847 static int nested_vmx_msr_check_common(struct kvm_vcpu *vcpu, 848 struct vmx_msr_entry *e) 849 { 850 /* x2APIC MSR accesses are not allowed */ 851 if (CC(vcpu->arch.apic_base & X2APIC_ENABLE && e->index >> 8 == 0x8)) 852 return -EINVAL; 853 if (CC(e->index == MSR_IA32_UCODE_WRITE) || /* SDM Table 35-2 */ 854 CC(e->index == MSR_IA32_UCODE_REV)) 855 return -EINVAL; 856 if (CC(e->reserved != 0)) 857 return -EINVAL; 858 return 0; 859 } 860 861 static int nested_vmx_load_msr_check(struct kvm_vcpu *vcpu, 862 struct vmx_msr_entry *e) 863 { 864 if (CC(e->index == MSR_FS_BASE) || 865 CC(e->index == MSR_GS_BASE) || 866 CC(e->index == MSR_IA32_SMM_MONITOR_CTL) || /* SMM is not supported */ 867 nested_vmx_msr_check_common(vcpu, e)) 868 return -EINVAL; 869 return 0; 870 } 871 872 static int nested_vmx_store_msr_check(struct kvm_vcpu *vcpu, 873 struct vmx_msr_entry *e) 874 { 875 if (CC(e->index == MSR_IA32_SMBASE) || /* SMM is not supported */ 876 nested_vmx_msr_check_common(vcpu, e)) 877 return -EINVAL; 878 return 0; 879 } 880 881 static u32 nested_vmx_max_atomic_switch_msrs(struct kvm_vcpu *vcpu) 882 { 883 struct vcpu_vmx *vmx = to_vmx(vcpu); 884 u64 vmx_misc = vmx_control_msr(vmx->nested.msrs.misc_low, 885 vmx->nested.msrs.misc_high); 886 887 return (vmx_misc_max_msr(vmx_misc) + 1) * VMX_MISC_MSR_LIST_MULTIPLIER; 888 } 889 890 /* 891 * Load guest's/host's msr at nested entry/exit. 892 * return 0 for success, entry index for failure. 893 * 894 * One of the failure modes for MSR load/store is when a list exceeds the 895 * virtual hardware's capacity. To maintain compatibility with hardware inasmuch 896 * as possible, process all valid entries before failing rather than precheck 897 * for a capacity violation. 898 */ 899 static u32 nested_vmx_load_msr(struct kvm_vcpu *vcpu, u64 gpa, u32 count) 900 { 901 u32 i; 902 struct vmx_msr_entry e; 903 u32 max_msr_list_size = nested_vmx_max_atomic_switch_msrs(vcpu); 904 905 for (i = 0; i < count; i++) { 906 if (unlikely(i >= max_msr_list_size)) 907 goto fail; 908 909 if (kvm_vcpu_read_guest(vcpu, gpa + i * sizeof(e), 910 &e, sizeof(e))) { 911 pr_debug_ratelimited( 912 "%s cannot read MSR entry (%u, 0x%08llx)\n", 913 __func__, i, gpa + i * sizeof(e)); 914 goto fail; 915 } 916 if (nested_vmx_load_msr_check(vcpu, &e)) { 917 pr_debug_ratelimited( 918 "%s check failed (%u, 0x%x, 0x%x)\n", 919 __func__, i, e.index, e.reserved); 920 goto fail; 921 } 922 if (kvm_set_msr(vcpu, e.index, e.value)) { 923 pr_debug_ratelimited( 924 "%s cannot write MSR (%u, 0x%x, 0x%llx)\n", 925 __func__, i, e.index, e.value); 926 goto fail; 927 } 928 } 929 return 0; 930 fail: 931 return i + 1; 932 } 933 934 static bool nested_vmx_get_vmexit_msr_value(struct kvm_vcpu *vcpu, 935 u32 msr_index, 936 u64 *data) 937 { 938 struct vcpu_vmx *vmx = to_vmx(vcpu); 939 940 /* 941 * If the L0 hypervisor stored a more accurate value for the TSC that 942 * does not include the time taken for emulation of the L2->L1 943 * VM-exit in L0, use the more accurate value. 944 */ 945 if (msr_index == MSR_IA32_TSC) { 946 int index = vmx_find_msr_index(&vmx->msr_autostore.guest, 947 MSR_IA32_TSC); 948 949 if (index >= 0) { 950 u64 val = vmx->msr_autostore.guest.val[index].value; 951 952 *data = kvm_read_l1_tsc(vcpu, val); 953 return true; 954 } 955 } 956 957 if (kvm_get_msr(vcpu, msr_index, data)) { 958 pr_debug_ratelimited("%s cannot read MSR (0x%x)\n", __func__, 959 msr_index); 960 return false; 961 } 962 return true; 963 } 964 965 static bool read_and_check_msr_entry(struct kvm_vcpu *vcpu, u64 gpa, int i, 966 struct vmx_msr_entry *e) 967 { 968 if (kvm_vcpu_read_guest(vcpu, 969 gpa + i * sizeof(*e), 970 e, 2 * sizeof(u32))) { 971 pr_debug_ratelimited( 972 "%s cannot read MSR entry (%u, 0x%08llx)\n", 973 __func__, i, gpa + i * sizeof(*e)); 974 return false; 975 } 976 if (nested_vmx_store_msr_check(vcpu, e)) { 977 pr_debug_ratelimited( 978 "%s check failed (%u, 0x%x, 0x%x)\n", 979 __func__, i, e->index, e->reserved); 980 return false; 981 } 982 return true; 983 } 984 985 static int nested_vmx_store_msr(struct kvm_vcpu *vcpu, u64 gpa, u32 count) 986 { 987 u64 data; 988 u32 i; 989 struct vmx_msr_entry e; 990 u32 max_msr_list_size = nested_vmx_max_atomic_switch_msrs(vcpu); 991 992 for (i = 0; i < count; i++) { 993 if (unlikely(i >= max_msr_list_size)) 994 return -EINVAL; 995 996 if (!read_and_check_msr_entry(vcpu, gpa, i, &e)) 997 return -EINVAL; 998 999 if (!nested_vmx_get_vmexit_msr_value(vcpu, e.index, &data)) 1000 return -EINVAL; 1001 1002 if (kvm_vcpu_write_guest(vcpu, 1003 gpa + i * sizeof(e) + 1004 offsetof(struct vmx_msr_entry, value), 1005 &data, sizeof(data))) { 1006 pr_debug_ratelimited( 1007 "%s cannot write MSR (%u, 0x%x, 0x%llx)\n", 1008 __func__, i, e.index, data); 1009 return -EINVAL; 1010 } 1011 } 1012 return 0; 1013 } 1014 1015 static bool nested_msr_store_list_has_msr(struct kvm_vcpu *vcpu, u32 msr_index) 1016 { 1017 struct vmcs12 *vmcs12 = get_vmcs12(vcpu); 1018 u32 count = vmcs12->vm_exit_msr_store_count; 1019 u64 gpa = vmcs12->vm_exit_msr_store_addr; 1020 struct vmx_msr_entry e; 1021 u32 i; 1022 1023 for (i = 0; i < count; i++) { 1024 if (!read_and_check_msr_entry(vcpu, gpa, i, &e)) 1025 return false; 1026 1027 if (e.index == msr_index) 1028 return true; 1029 } 1030 return false; 1031 } 1032 1033 static void prepare_vmx_msr_autostore_list(struct kvm_vcpu *vcpu, 1034 u32 msr_index) 1035 { 1036 struct vcpu_vmx *vmx = to_vmx(vcpu); 1037 struct vmx_msrs *autostore = &vmx->msr_autostore.guest; 1038 bool in_vmcs12_store_list; 1039 int msr_autostore_index; 1040 bool in_autostore_list; 1041 int last; 1042 1043 msr_autostore_index = vmx_find_msr_index(autostore, msr_index); 1044 in_autostore_list = msr_autostore_index >= 0; 1045 in_vmcs12_store_list = nested_msr_store_list_has_msr(vcpu, msr_index); 1046 1047 if (in_vmcs12_store_list && !in_autostore_list) { 1048 if (autostore->nr == NR_LOADSTORE_MSRS) { 1049 /* 1050 * Emulated VMEntry does not fail here. Instead a less 1051 * accurate value will be returned by 1052 * nested_vmx_get_vmexit_msr_value() using kvm_get_msr() 1053 * instead of reading the value from the vmcs02 VMExit 1054 * MSR-store area. 1055 */ 1056 pr_warn_ratelimited( 1057 "Not enough msr entries in msr_autostore. Can't add msr %x\n", 1058 msr_index); 1059 return; 1060 } 1061 last = autostore->nr++; 1062 autostore->val[last].index = msr_index; 1063 } else if (!in_vmcs12_store_list && in_autostore_list) { 1064 last = --autostore->nr; 1065 autostore->val[msr_autostore_index] = autostore->val[last]; 1066 } 1067 } 1068 1069 static bool nested_cr3_valid(struct kvm_vcpu *vcpu, unsigned long val) 1070 { 1071 unsigned long invalid_mask; 1072 1073 invalid_mask = (~0ULL) << cpuid_maxphyaddr(vcpu); 1074 return (val & invalid_mask) == 0; 1075 } 1076 1077 /* 1078 * Load guest's/host's cr3 at nested entry/exit. @nested_ept is true if we are 1079 * emulating VM-Entry into a guest with EPT enabled. On failure, the expected 1080 * Exit Qualification (for a VM-Entry consistency check VM-Exit) is assigned to 1081 * @entry_failure_code. 1082 */ 1083 static int nested_vmx_load_cr3(struct kvm_vcpu *vcpu, unsigned long cr3, bool nested_ept, 1084 u32 *entry_failure_code) 1085 { 1086 if (cr3 != kvm_read_cr3(vcpu) || (!nested_ept && pdptrs_changed(vcpu))) { 1087 if (CC(!nested_cr3_valid(vcpu, cr3))) { 1088 *entry_failure_code = ENTRY_FAIL_DEFAULT; 1089 return -EINVAL; 1090 } 1091 1092 /* 1093 * If PAE paging and EPT are both on, CR3 is not used by the CPU and 1094 * must not be dereferenced. 1095 */ 1096 if (is_pae_paging(vcpu) && !nested_ept) { 1097 if (CC(!load_pdptrs(vcpu, vcpu->arch.walk_mmu, cr3))) { 1098 *entry_failure_code = ENTRY_FAIL_PDPTE; 1099 return -EINVAL; 1100 } 1101 } 1102 } 1103 1104 if (!nested_ept) 1105 kvm_mmu_new_cr3(vcpu, cr3, false); 1106 1107 vcpu->arch.cr3 = cr3; 1108 kvm_register_mark_available(vcpu, VCPU_EXREG_CR3); 1109 1110 kvm_init_mmu(vcpu, false); 1111 1112 return 0; 1113 } 1114 1115 /* 1116 * Returns if KVM is able to config CPU to tag TLB entries 1117 * populated by L2 differently than TLB entries populated 1118 * by L1. 1119 * 1120 * If L0 uses EPT, L1 and L2 run with different EPTP because 1121 * guest_mode is part of kvm_mmu_page_role. Thus, TLB entries 1122 * are tagged with different EPTP. 1123 * 1124 * If L1 uses VPID and we allocated a vpid02, TLB entries are tagged 1125 * with different VPID (L1 entries are tagged with vmx->vpid 1126 * while L2 entries are tagged with vmx->nested.vpid02). 1127 */ 1128 static bool nested_has_guest_tlb_tag(struct kvm_vcpu *vcpu) 1129 { 1130 struct vmcs12 *vmcs12 = get_vmcs12(vcpu); 1131 1132 return enable_ept || 1133 (nested_cpu_has_vpid(vmcs12) && to_vmx(vcpu)->nested.vpid02); 1134 } 1135 1136 static u16 nested_get_vpid02(struct kvm_vcpu *vcpu) 1137 { 1138 struct vcpu_vmx *vmx = to_vmx(vcpu); 1139 1140 return vmx->nested.vpid02 ? vmx->nested.vpid02 : vmx->vpid; 1141 } 1142 1143 static bool is_bitwise_subset(u64 superset, u64 subset, u64 mask) 1144 { 1145 superset &= mask; 1146 subset &= mask; 1147 1148 return (superset | subset) == superset; 1149 } 1150 1151 static int vmx_restore_vmx_basic(struct vcpu_vmx *vmx, u64 data) 1152 { 1153 const u64 feature_and_reserved = 1154 /* feature (except bit 48; see below) */ 1155 BIT_ULL(49) | BIT_ULL(54) | BIT_ULL(55) | 1156 /* reserved */ 1157 BIT_ULL(31) | GENMASK_ULL(47, 45) | GENMASK_ULL(63, 56); 1158 u64 vmx_basic = vmx->nested.msrs.basic; 1159 1160 if (!is_bitwise_subset(vmx_basic, data, feature_and_reserved)) 1161 return -EINVAL; 1162 1163 /* 1164 * KVM does not emulate a version of VMX that constrains physical 1165 * addresses of VMX structures (e.g. VMCS) to 32-bits. 1166 */ 1167 if (data & BIT_ULL(48)) 1168 return -EINVAL; 1169 1170 if (vmx_basic_vmcs_revision_id(vmx_basic) != 1171 vmx_basic_vmcs_revision_id(data)) 1172 return -EINVAL; 1173 1174 if (vmx_basic_vmcs_size(vmx_basic) > vmx_basic_vmcs_size(data)) 1175 return -EINVAL; 1176 1177 vmx->nested.msrs.basic = data; 1178 return 0; 1179 } 1180 1181 static int 1182 vmx_restore_control_msr(struct vcpu_vmx *vmx, u32 msr_index, u64 data) 1183 { 1184 u64 supported; 1185 u32 *lowp, *highp; 1186 1187 switch (msr_index) { 1188 case MSR_IA32_VMX_TRUE_PINBASED_CTLS: 1189 lowp = &vmx->nested.msrs.pinbased_ctls_low; 1190 highp = &vmx->nested.msrs.pinbased_ctls_high; 1191 break; 1192 case MSR_IA32_VMX_TRUE_PROCBASED_CTLS: 1193 lowp = &vmx->nested.msrs.procbased_ctls_low; 1194 highp = &vmx->nested.msrs.procbased_ctls_high; 1195 break; 1196 case MSR_IA32_VMX_TRUE_EXIT_CTLS: 1197 lowp = &vmx->nested.msrs.exit_ctls_low; 1198 highp = &vmx->nested.msrs.exit_ctls_high; 1199 break; 1200 case MSR_IA32_VMX_TRUE_ENTRY_CTLS: 1201 lowp = &vmx->nested.msrs.entry_ctls_low; 1202 highp = &vmx->nested.msrs.entry_ctls_high; 1203 break; 1204 case MSR_IA32_VMX_PROCBASED_CTLS2: 1205 lowp = &vmx->nested.msrs.secondary_ctls_low; 1206 highp = &vmx->nested.msrs.secondary_ctls_high; 1207 break; 1208 default: 1209 BUG(); 1210 } 1211 1212 supported = vmx_control_msr(*lowp, *highp); 1213 1214 /* Check must-be-1 bits are still 1. */ 1215 if (!is_bitwise_subset(data, supported, GENMASK_ULL(31, 0))) 1216 return -EINVAL; 1217 1218 /* Check must-be-0 bits are still 0. */ 1219 if (!is_bitwise_subset(supported, data, GENMASK_ULL(63, 32))) 1220 return -EINVAL; 1221 1222 *lowp = data; 1223 *highp = data >> 32; 1224 return 0; 1225 } 1226 1227 static int vmx_restore_vmx_misc(struct vcpu_vmx *vmx, u64 data) 1228 { 1229 const u64 feature_and_reserved_bits = 1230 /* feature */ 1231 BIT_ULL(5) | GENMASK_ULL(8, 6) | BIT_ULL(14) | BIT_ULL(15) | 1232 BIT_ULL(28) | BIT_ULL(29) | BIT_ULL(30) | 1233 /* reserved */ 1234 GENMASK_ULL(13, 9) | BIT_ULL(31); 1235 u64 vmx_misc; 1236 1237 vmx_misc = vmx_control_msr(vmx->nested.msrs.misc_low, 1238 vmx->nested.msrs.misc_high); 1239 1240 if (!is_bitwise_subset(vmx_misc, data, feature_and_reserved_bits)) 1241 return -EINVAL; 1242 1243 if ((vmx->nested.msrs.pinbased_ctls_high & 1244 PIN_BASED_VMX_PREEMPTION_TIMER) && 1245 vmx_misc_preemption_timer_rate(data) != 1246 vmx_misc_preemption_timer_rate(vmx_misc)) 1247 return -EINVAL; 1248 1249 if (vmx_misc_cr3_count(data) > vmx_misc_cr3_count(vmx_misc)) 1250 return -EINVAL; 1251 1252 if (vmx_misc_max_msr(data) > vmx_misc_max_msr(vmx_misc)) 1253 return -EINVAL; 1254 1255 if (vmx_misc_mseg_revid(data) != vmx_misc_mseg_revid(vmx_misc)) 1256 return -EINVAL; 1257 1258 vmx->nested.msrs.misc_low = data; 1259 vmx->nested.msrs.misc_high = data >> 32; 1260 1261 return 0; 1262 } 1263 1264 static int vmx_restore_vmx_ept_vpid_cap(struct vcpu_vmx *vmx, u64 data) 1265 { 1266 u64 vmx_ept_vpid_cap; 1267 1268 vmx_ept_vpid_cap = vmx_control_msr(vmx->nested.msrs.ept_caps, 1269 vmx->nested.msrs.vpid_caps); 1270 1271 /* Every bit is either reserved or a feature bit. */ 1272 if (!is_bitwise_subset(vmx_ept_vpid_cap, data, -1ULL)) 1273 return -EINVAL; 1274 1275 vmx->nested.msrs.ept_caps = data; 1276 vmx->nested.msrs.vpid_caps = data >> 32; 1277 return 0; 1278 } 1279 1280 static int vmx_restore_fixed0_msr(struct vcpu_vmx *vmx, u32 msr_index, u64 data) 1281 { 1282 u64 *msr; 1283 1284 switch (msr_index) { 1285 case MSR_IA32_VMX_CR0_FIXED0: 1286 msr = &vmx->nested.msrs.cr0_fixed0; 1287 break; 1288 case MSR_IA32_VMX_CR4_FIXED0: 1289 msr = &vmx->nested.msrs.cr4_fixed0; 1290 break; 1291 default: 1292 BUG(); 1293 } 1294 1295 /* 1296 * 1 bits (which indicates bits which "must-be-1" during VMX operation) 1297 * must be 1 in the restored value. 1298 */ 1299 if (!is_bitwise_subset(data, *msr, -1ULL)) 1300 return -EINVAL; 1301 1302 *msr = data; 1303 return 0; 1304 } 1305 1306 /* 1307 * Called when userspace is restoring VMX MSRs. 1308 * 1309 * Returns 0 on success, non-0 otherwise. 1310 */ 1311 int vmx_set_vmx_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 data) 1312 { 1313 struct vcpu_vmx *vmx = to_vmx(vcpu); 1314 1315 /* 1316 * Don't allow changes to the VMX capability MSRs while the vCPU 1317 * is in VMX operation. 1318 */ 1319 if (vmx->nested.vmxon) 1320 return -EBUSY; 1321 1322 switch (msr_index) { 1323 case MSR_IA32_VMX_BASIC: 1324 return vmx_restore_vmx_basic(vmx, data); 1325 case MSR_IA32_VMX_PINBASED_CTLS: 1326 case MSR_IA32_VMX_PROCBASED_CTLS: 1327 case MSR_IA32_VMX_EXIT_CTLS: 1328 case MSR_IA32_VMX_ENTRY_CTLS: 1329 /* 1330 * The "non-true" VMX capability MSRs are generated from the 1331 * "true" MSRs, so we do not support restoring them directly. 1332 * 1333 * If userspace wants to emulate VMX_BASIC[55]=0, userspace 1334 * should restore the "true" MSRs with the must-be-1 bits 1335 * set according to the SDM Vol 3. A.2 "RESERVED CONTROLS AND 1336 * DEFAULT SETTINGS". 1337 */ 1338 return -EINVAL; 1339 case MSR_IA32_VMX_TRUE_PINBASED_CTLS: 1340 case MSR_IA32_VMX_TRUE_PROCBASED_CTLS: 1341 case MSR_IA32_VMX_TRUE_EXIT_CTLS: 1342 case MSR_IA32_VMX_TRUE_ENTRY_CTLS: 1343 case MSR_IA32_VMX_PROCBASED_CTLS2: 1344 return vmx_restore_control_msr(vmx, msr_index, data); 1345 case MSR_IA32_VMX_MISC: 1346 return vmx_restore_vmx_misc(vmx, data); 1347 case MSR_IA32_VMX_CR0_FIXED0: 1348 case MSR_IA32_VMX_CR4_FIXED0: 1349 return vmx_restore_fixed0_msr(vmx, msr_index, data); 1350 case MSR_IA32_VMX_CR0_FIXED1: 1351 case MSR_IA32_VMX_CR4_FIXED1: 1352 /* 1353 * These MSRs are generated based on the vCPU's CPUID, so we 1354 * do not support restoring them directly. 1355 */ 1356 return -EINVAL; 1357 case MSR_IA32_VMX_EPT_VPID_CAP: 1358 return vmx_restore_vmx_ept_vpid_cap(vmx, data); 1359 case MSR_IA32_VMX_VMCS_ENUM: 1360 vmx->nested.msrs.vmcs_enum = data; 1361 return 0; 1362 case MSR_IA32_VMX_VMFUNC: 1363 if (data & ~vmx->nested.msrs.vmfunc_controls) 1364 return -EINVAL; 1365 vmx->nested.msrs.vmfunc_controls = data; 1366 return 0; 1367 default: 1368 /* 1369 * The rest of the VMX capability MSRs do not support restore. 1370 */ 1371 return -EINVAL; 1372 } 1373 } 1374 1375 /* Returns 0 on success, non-0 otherwise. */ 1376 int vmx_get_vmx_msr(struct nested_vmx_msrs *msrs, u32 msr_index, u64 *pdata) 1377 { 1378 switch (msr_index) { 1379 case MSR_IA32_VMX_BASIC: 1380 *pdata = msrs->basic; 1381 break; 1382 case MSR_IA32_VMX_TRUE_PINBASED_CTLS: 1383 case MSR_IA32_VMX_PINBASED_CTLS: 1384 *pdata = vmx_control_msr( 1385 msrs->pinbased_ctls_low, 1386 msrs->pinbased_ctls_high); 1387 if (msr_index == MSR_IA32_VMX_PINBASED_CTLS) 1388 *pdata |= PIN_BASED_ALWAYSON_WITHOUT_TRUE_MSR; 1389 break; 1390 case MSR_IA32_VMX_TRUE_PROCBASED_CTLS: 1391 case MSR_IA32_VMX_PROCBASED_CTLS: 1392 *pdata = vmx_control_msr( 1393 msrs->procbased_ctls_low, 1394 msrs->procbased_ctls_high); 1395 if (msr_index == MSR_IA32_VMX_PROCBASED_CTLS) 1396 *pdata |= CPU_BASED_ALWAYSON_WITHOUT_TRUE_MSR; 1397 break; 1398 case MSR_IA32_VMX_TRUE_EXIT_CTLS: 1399 case MSR_IA32_VMX_EXIT_CTLS: 1400 *pdata = vmx_control_msr( 1401 msrs->exit_ctls_low, 1402 msrs->exit_ctls_high); 1403 if (msr_index == MSR_IA32_VMX_EXIT_CTLS) 1404 *pdata |= VM_EXIT_ALWAYSON_WITHOUT_TRUE_MSR; 1405 break; 1406 case MSR_IA32_VMX_TRUE_ENTRY_CTLS: 1407 case MSR_IA32_VMX_ENTRY_CTLS: 1408 *pdata = vmx_control_msr( 1409 msrs->entry_ctls_low, 1410 msrs->entry_ctls_high); 1411 if (msr_index == MSR_IA32_VMX_ENTRY_CTLS) 1412 *pdata |= VM_ENTRY_ALWAYSON_WITHOUT_TRUE_MSR; 1413 break; 1414 case MSR_IA32_VMX_MISC: 1415 *pdata = vmx_control_msr( 1416 msrs->misc_low, 1417 msrs->misc_high); 1418 break; 1419 case MSR_IA32_VMX_CR0_FIXED0: 1420 *pdata = msrs->cr0_fixed0; 1421 break; 1422 case MSR_IA32_VMX_CR0_FIXED1: 1423 *pdata = msrs->cr0_fixed1; 1424 break; 1425 case MSR_IA32_VMX_CR4_FIXED0: 1426 *pdata = msrs->cr4_fixed0; 1427 break; 1428 case MSR_IA32_VMX_CR4_FIXED1: 1429 *pdata = msrs->cr4_fixed1; 1430 break; 1431 case MSR_IA32_VMX_VMCS_ENUM: 1432 *pdata = msrs->vmcs_enum; 1433 break; 1434 case MSR_IA32_VMX_PROCBASED_CTLS2: 1435 *pdata = vmx_control_msr( 1436 msrs->secondary_ctls_low, 1437 msrs->secondary_ctls_high); 1438 break; 1439 case MSR_IA32_VMX_EPT_VPID_CAP: 1440 *pdata = msrs->ept_caps | 1441 ((u64)msrs->vpid_caps << 32); 1442 break; 1443 case MSR_IA32_VMX_VMFUNC: 1444 *pdata = msrs->vmfunc_controls; 1445 break; 1446 default: 1447 return 1; 1448 } 1449 1450 return 0; 1451 } 1452 1453 /* 1454 * Copy the writable VMCS shadow fields back to the VMCS12, in case they have 1455 * been modified by the L1 guest. Note, "writable" in this context means 1456 * "writable by the guest", i.e. tagged SHADOW_FIELD_RW; the set of 1457 * fields tagged SHADOW_FIELD_RO may or may not align with the "read-only" 1458 * VM-exit information fields (which are actually writable if the vCPU is 1459 * configured to support "VMWRITE to any supported field in the VMCS"). 1460 */ 1461 static void copy_shadow_to_vmcs12(struct vcpu_vmx *vmx) 1462 { 1463 struct vmcs *shadow_vmcs = vmx->vmcs01.shadow_vmcs; 1464 struct vmcs12 *vmcs12 = get_vmcs12(&vmx->vcpu); 1465 struct shadow_vmcs_field field; 1466 unsigned long val; 1467 int i; 1468 1469 if (WARN_ON(!shadow_vmcs)) 1470 return; 1471 1472 preempt_disable(); 1473 1474 vmcs_load(shadow_vmcs); 1475 1476 for (i = 0; i < max_shadow_read_write_fields; i++) { 1477 field = shadow_read_write_fields[i]; 1478 val = __vmcs_readl(field.encoding); 1479 vmcs12_write_any(vmcs12, field.encoding, field.offset, val); 1480 } 1481 1482 vmcs_clear(shadow_vmcs); 1483 vmcs_load(vmx->loaded_vmcs->vmcs); 1484 1485 preempt_enable(); 1486 } 1487 1488 static void copy_vmcs12_to_shadow(struct vcpu_vmx *vmx) 1489 { 1490 const struct shadow_vmcs_field *fields[] = { 1491 shadow_read_write_fields, 1492 shadow_read_only_fields 1493 }; 1494 const int max_fields[] = { 1495 max_shadow_read_write_fields, 1496 max_shadow_read_only_fields 1497 }; 1498 struct vmcs *shadow_vmcs = vmx->vmcs01.shadow_vmcs; 1499 struct vmcs12 *vmcs12 = get_vmcs12(&vmx->vcpu); 1500 struct shadow_vmcs_field field; 1501 unsigned long val; 1502 int i, q; 1503 1504 if (WARN_ON(!shadow_vmcs)) 1505 return; 1506 1507 vmcs_load(shadow_vmcs); 1508 1509 for (q = 0; q < ARRAY_SIZE(fields); q++) { 1510 for (i = 0; i < max_fields[q]; i++) { 1511 field = fields[q][i]; 1512 val = vmcs12_read_any(vmcs12, field.encoding, 1513 field.offset); 1514 __vmcs_writel(field.encoding, val); 1515 } 1516 } 1517 1518 vmcs_clear(shadow_vmcs); 1519 vmcs_load(vmx->loaded_vmcs->vmcs); 1520 } 1521 1522 static int copy_enlightened_to_vmcs12(struct vcpu_vmx *vmx) 1523 { 1524 struct vmcs12 *vmcs12 = vmx->nested.cached_vmcs12; 1525 struct hv_enlightened_vmcs *evmcs = vmx->nested.hv_evmcs; 1526 1527 /* HV_VMX_ENLIGHTENED_CLEAN_FIELD_NONE */ 1528 vmcs12->tpr_threshold = evmcs->tpr_threshold; 1529 vmcs12->guest_rip = evmcs->guest_rip; 1530 1531 if (unlikely(!(evmcs->hv_clean_fields & 1532 HV_VMX_ENLIGHTENED_CLEAN_FIELD_GUEST_BASIC))) { 1533 vmcs12->guest_rsp = evmcs->guest_rsp; 1534 vmcs12->guest_rflags = evmcs->guest_rflags; 1535 vmcs12->guest_interruptibility_info = 1536 evmcs->guest_interruptibility_info; 1537 } 1538 1539 if (unlikely(!(evmcs->hv_clean_fields & 1540 HV_VMX_ENLIGHTENED_CLEAN_FIELD_CONTROL_PROC))) { 1541 vmcs12->cpu_based_vm_exec_control = 1542 evmcs->cpu_based_vm_exec_control; 1543 } 1544 1545 if (unlikely(!(evmcs->hv_clean_fields & 1546 HV_VMX_ENLIGHTENED_CLEAN_FIELD_CONTROL_EXCPN))) { 1547 vmcs12->exception_bitmap = evmcs->exception_bitmap; 1548 } 1549 1550 if (unlikely(!(evmcs->hv_clean_fields & 1551 HV_VMX_ENLIGHTENED_CLEAN_FIELD_CONTROL_ENTRY))) { 1552 vmcs12->vm_entry_controls = evmcs->vm_entry_controls; 1553 } 1554 1555 if (unlikely(!(evmcs->hv_clean_fields & 1556 HV_VMX_ENLIGHTENED_CLEAN_FIELD_CONTROL_EVENT))) { 1557 vmcs12->vm_entry_intr_info_field = 1558 evmcs->vm_entry_intr_info_field; 1559 vmcs12->vm_entry_exception_error_code = 1560 evmcs->vm_entry_exception_error_code; 1561 vmcs12->vm_entry_instruction_len = 1562 evmcs->vm_entry_instruction_len; 1563 } 1564 1565 if (unlikely(!(evmcs->hv_clean_fields & 1566 HV_VMX_ENLIGHTENED_CLEAN_FIELD_HOST_GRP1))) { 1567 vmcs12->host_ia32_pat = evmcs->host_ia32_pat; 1568 vmcs12->host_ia32_efer = evmcs->host_ia32_efer; 1569 vmcs12->host_cr0 = evmcs->host_cr0; 1570 vmcs12->host_cr3 = evmcs->host_cr3; 1571 vmcs12->host_cr4 = evmcs->host_cr4; 1572 vmcs12->host_ia32_sysenter_esp = evmcs->host_ia32_sysenter_esp; 1573 vmcs12->host_ia32_sysenter_eip = evmcs->host_ia32_sysenter_eip; 1574 vmcs12->host_rip = evmcs->host_rip; 1575 vmcs12->host_ia32_sysenter_cs = evmcs->host_ia32_sysenter_cs; 1576 vmcs12->host_es_selector = evmcs->host_es_selector; 1577 vmcs12->host_cs_selector = evmcs->host_cs_selector; 1578 vmcs12->host_ss_selector = evmcs->host_ss_selector; 1579 vmcs12->host_ds_selector = evmcs->host_ds_selector; 1580 vmcs12->host_fs_selector = evmcs->host_fs_selector; 1581 vmcs12->host_gs_selector = evmcs->host_gs_selector; 1582 vmcs12->host_tr_selector = evmcs->host_tr_selector; 1583 } 1584 1585 if (unlikely(!(evmcs->hv_clean_fields & 1586 HV_VMX_ENLIGHTENED_CLEAN_FIELD_CONTROL_GRP1))) { 1587 vmcs12->pin_based_vm_exec_control = 1588 evmcs->pin_based_vm_exec_control; 1589 vmcs12->vm_exit_controls = evmcs->vm_exit_controls; 1590 vmcs12->secondary_vm_exec_control = 1591 evmcs->secondary_vm_exec_control; 1592 } 1593 1594 if (unlikely(!(evmcs->hv_clean_fields & 1595 HV_VMX_ENLIGHTENED_CLEAN_FIELD_IO_BITMAP))) { 1596 vmcs12->io_bitmap_a = evmcs->io_bitmap_a; 1597 vmcs12->io_bitmap_b = evmcs->io_bitmap_b; 1598 } 1599 1600 if (unlikely(!(evmcs->hv_clean_fields & 1601 HV_VMX_ENLIGHTENED_CLEAN_FIELD_MSR_BITMAP))) { 1602 vmcs12->msr_bitmap = evmcs->msr_bitmap; 1603 } 1604 1605 if (unlikely(!(evmcs->hv_clean_fields & 1606 HV_VMX_ENLIGHTENED_CLEAN_FIELD_GUEST_GRP2))) { 1607 vmcs12->guest_es_base = evmcs->guest_es_base; 1608 vmcs12->guest_cs_base = evmcs->guest_cs_base; 1609 vmcs12->guest_ss_base = evmcs->guest_ss_base; 1610 vmcs12->guest_ds_base = evmcs->guest_ds_base; 1611 vmcs12->guest_fs_base = evmcs->guest_fs_base; 1612 vmcs12->guest_gs_base = evmcs->guest_gs_base; 1613 vmcs12->guest_ldtr_base = evmcs->guest_ldtr_base; 1614 vmcs12->guest_tr_base = evmcs->guest_tr_base; 1615 vmcs12->guest_gdtr_base = evmcs->guest_gdtr_base; 1616 vmcs12->guest_idtr_base = evmcs->guest_idtr_base; 1617 vmcs12->guest_es_limit = evmcs->guest_es_limit; 1618 vmcs12->guest_cs_limit = evmcs->guest_cs_limit; 1619 vmcs12->guest_ss_limit = evmcs->guest_ss_limit; 1620 vmcs12->guest_ds_limit = evmcs->guest_ds_limit; 1621 vmcs12->guest_fs_limit = evmcs->guest_fs_limit; 1622 vmcs12->guest_gs_limit = evmcs->guest_gs_limit; 1623 vmcs12->guest_ldtr_limit = evmcs->guest_ldtr_limit; 1624 vmcs12->guest_tr_limit = evmcs->guest_tr_limit; 1625 vmcs12->guest_gdtr_limit = evmcs->guest_gdtr_limit; 1626 vmcs12->guest_idtr_limit = evmcs->guest_idtr_limit; 1627 vmcs12->guest_es_ar_bytes = evmcs->guest_es_ar_bytes; 1628 vmcs12->guest_cs_ar_bytes = evmcs->guest_cs_ar_bytes; 1629 vmcs12->guest_ss_ar_bytes = evmcs->guest_ss_ar_bytes; 1630 vmcs12->guest_ds_ar_bytes = evmcs->guest_ds_ar_bytes; 1631 vmcs12->guest_fs_ar_bytes = evmcs->guest_fs_ar_bytes; 1632 vmcs12->guest_gs_ar_bytes = evmcs->guest_gs_ar_bytes; 1633 vmcs12->guest_ldtr_ar_bytes = evmcs->guest_ldtr_ar_bytes; 1634 vmcs12->guest_tr_ar_bytes = evmcs->guest_tr_ar_bytes; 1635 vmcs12->guest_es_selector = evmcs->guest_es_selector; 1636 vmcs12->guest_cs_selector = evmcs->guest_cs_selector; 1637 vmcs12->guest_ss_selector = evmcs->guest_ss_selector; 1638 vmcs12->guest_ds_selector = evmcs->guest_ds_selector; 1639 vmcs12->guest_fs_selector = evmcs->guest_fs_selector; 1640 vmcs12->guest_gs_selector = evmcs->guest_gs_selector; 1641 vmcs12->guest_ldtr_selector = evmcs->guest_ldtr_selector; 1642 vmcs12->guest_tr_selector = evmcs->guest_tr_selector; 1643 } 1644 1645 if (unlikely(!(evmcs->hv_clean_fields & 1646 HV_VMX_ENLIGHTENED_CLEAN_FIELD_CONTROL_GRP2))) { 1647 vmcs12->tsc_offset = evmcs->tsc_offset; 1648 vmcs12->virtual_apic_page_addr = evmcs->virtual_apic_page_addr; 1649 vmcs12->xss_exit_bitmap = evmcs->xss_exit_bitmap; 1650 } 1651 1652 if (unlikely(!(evmcs->hv_clean_fields & 1653 HV_VMX_ENLIGHTENED_CLEAN_FIELD_CRDR))) { 1654 vmcs12->cr0_guest_host_mask = evmcs->cr0_guest_host_mask; 1655 vmcs12->cr4_guest_host_mask = evmcs->cr4_guest_host_mask; 1656 vmcs12->cr0_read_shadow = evmcs->cr0_read_shadow; 1657 vmcs12->cr4_read_shadow = evmcs->cr4_read_shadow; 1658 vmcs12->guest_cr0 = evmcs->guest_cr0; 1659 vmcs12->guest_cr3 = evmcs->guest_cr3; 1660 vmcs12->guest_cr4 = evmcs->guest_cr4; 1661 vmcs12->guest_dr7 = evmcs->guest_dr7; 1662 } 1663 1664 if (unlikely(!(evmcs->hv_clean_fields & 1665 HV_VMX_ENLIGHTENED_CLEAN_FIELD_HOST_POINTER))) { 1666 vmcs12->host_fs_base = evmcs->host_fs_base; 1667 vmcs12->host_gs_base = evmcs->host_gs_base; 1668 vmcs12->host_tr_base = evmcs->host_tr_base; 1669 vmcs12->host_gdtr_base = evmcs->host_gdtr_base; 1670 vmcs12->host_idtr_base = evmcs->host_idtr_base; 1671 vmcs12->host_rsp = evmcs->host_rsp; 1672 } 1673 1674 if (unlikely(!(evmcs->hv_clean_fields & 1675 HV_VMX_ENLIGHTENED_CLEAN_FIELD_CONTROL_XLAT))) { 1676 vmcs12->ept_pointer = evmcs->ept_pointer; 1677 vmcs12->virtual_processor_id = evmcs->virtual_processor_id; 1678 } 1679 1680 if (unlikely(!(evmcs->hv_clean_fields & 1681 HV_VMX_ENLIGHTENED_CLEAN_FIELD_GUEST_GRP1))) { 1682 vmcs12->vmcs_link_pointer = evmcs->vmcs_link_pointer; 1683 vmcs12->guest_ia32_debugctl = evmcs->guest_ia32_debugctl; 1684 vmcs12->guest_ia32_pat = evmcs->guest_ia32_pat; 1685 vmcs12->guest_ia32_efer = evmcs->guest_ia32_efer; 1686 vmcs12->guest_pdptr0 = evmcs->guest_pdptr0; 1687 vmcs12->guest_pdptr1 = evmcs->guest_pdptr1; 1688 vmcs12->guest_pdptr2 = evmcs->guest_pdptr2; 1689 vmcs12->guest_pdptr3 = evmcs->guest_pdptr3; 1690 vmcs12->guest_pending_dbg_exceptions = 1691 evmcs->guest_pending_dbg_exceptions; 1692 vmcs12->guest_sysenter_esp = evmcs->guest_sysenter_esp; 1693 vmcs12->guest_sysenter_eip = evmcs->guest_sysenter_eip; 1694 vmcs12->guest_bndcfgs = evmcs->guest_bndcfgs; 1695 vmcs12->guest_activity_state = evmcs->guest_activity_state; 1696 vmcs12->guest_sysenter_cs = evmcs->guest_sysenter_cs; 1697 } 1698 1699 /* 1700 * Not used? 1701 * vmcs12->vm_exit_msr_store_addr = evmcs->vm_exit_msr_store_addr; 1702 * vmcs12->vm_exit_msr_load_addr = evmcs->vm_exit_msr_load_addr; 1703 * vmcs12->vm_entry_msr_load_addr = evmcs->vm_entry_msr_load_addr; 1704 * vmcs12->cr3_target_value0 = evmcs->cr3_target_value0; 1705 * vmcs12->cr3_target_value1 = evmcs->cr3_target_value1; 1706 * vmcs12->cr3_target_value2 = evmcs->cr3_target_value2; 1707 * vmcs12->cr3_target_value3 = evmcs->cr3_target_value3; 1708 * vmcs12->page_fault_error_code_mask = 1709 * evmcs->page_fault_error_code_mask; 1710 * vmcs12->page_fault_error_code_match = 1711 * evmcs->page_fault_error_code_match; 1712 * vmcs12->cr3_target_count = evmcs->cr3_target_count; 1713 * vmcs12->vm_exit_msr_store_count = evmcs->vm_exit_msr_store_count; 1714 * vmcs12->vm_exit_msr_load_count = evmcs->vm_exit_msr_load_count; 1715 * vmcs12->vm_entry_msr_load_count = evmcs->vm_entry_msr_load_count; 1716 */ 1717 1718 /* 1719 * Read only fields: 1720 * vmcs12->guest_physical_address = evmcs->guest_physical_address; 1721 * vmcs12->vm_instruction_error = evmcs->vm_instruction_error; 1722 * vmcs12->vm_exit_reason = evmcs->vm_exit_reason; 1723 * vmcs12->vm_exit_intr_info = evmcs->vm_exit_intr_info; 1724 * vmcs12->vm_exit_intr_error_code = evmcs->vm_exit_intr_error_code; 1725 * vmcs12->idt_vectoring_info_field = evmcs->idt_vectoring_info_field; 1726 * vmcs12->idt_vectoring_error_code = evmcs->idt_vectoring_error_code; 1727 * vmcs12->vm_exit_instruction_len = evmcs->vm_exit_instruction_len; 1728 * vmcs12->vmx_instruction_info = evmcs->vmx_instruction_info; 1729 * vmcs12->exit_qualification = evmcs->exit_qualification; 1730 * vmcs12->guest_linear_address = evmcs->guest_linear_address; 1731 * 1732 * Not present in struct vmcs12: 1733 * vmcs12->exit_io_instruction_ecx = evmcs->exit_io_instruction_ecx; 1734 * vmcs12->exit_io_instruction_esi = evmcs->exit_io_instruction_esi; 1735 * vmcs12->exit_io_instruction_edi = evmcs->exit_io_instruction_edi; 1736 * vmcs12->exit_io_instruction_eip = evmcs->exit_io_instruction_eip; 1737 */ 1738 1739 return 0; 1740 } 1741 1742 static int copy_vmcs12_to_enlightened(struct vcpu_vmx *vmx) 1743 { 1744 struct vmcs12 *vmcs12 = vmx->nested.cached_vmcs12; 1745 struct hv_enlightened_vmcs *evmcs = vmx->nested.hv_evmcs; 1746 1747 /* 1748 * Should not be changed by KVM: 1749 * 1750 * evmcs->host_es_selector = vmcs12->host_es_selector; 1751 * evmcs->host_cs_selector = vmcs12->host_cs_selector; 1752 * evmcs->host_ss_selector = vmcs12->host_ss_selector; 1753 * evmcs->host_ds_selector = vmcs12->host_ds_selector; 1754 * evmcs->host_fs_selector = vmcs12->host_fs_selector; 1755 * evmcs->host_gs_selector = vmcs12->host_gs_selector; 1756 * evmcs->host_tr_selector = vmcs12->host_tr_selector; 1757 * evmcs->host_ia32_pat = vmcs12->host_ia32_pat; 1758 * evmcs->host_ia32_efer = vmcs12->host_ia32_efer; 1759 * evmcs->host_cr0 = vmcs12->host_cr0; 1760 * evmcs->host_cr3 = vmcs12->host_cr3; 1761 * evmcs->host_cr4 = vmcs12->host_cr4; 1762 * evmcs->host_ia32_sysenter_esp = vmcs12->host_ia32_sysenter_esp; 1763 * evmcs->host_ia32_sysenter_eip = vmcs12->host_ia32_sysenter_eip; 1764 * evmcs->host_rip = vmcs12->host_rip; 1765 * evmcs->host_ia32_sysenter_cs = vmcs12->host_ia32_sysenter_cs; 1766 * evmcs->host_fs_base = vmcs12->host_fs_base; 1767 * evmcs->host_gs_base = vmcs12->host_gs_base; 1768 * evmcs->host_tr_base = vmcs12->host_tr_base; 1769 * evmcs->host_gdtr_base = vmcs12->host_gdtr_base; 1770 * evmcs->host_idtr_base = vmcs12->host_idtr_base; 1771 * evmcs->host_rsp = vmcs12->host_rsp; 1772 * sync_vmcs02_to_vmcs12() doesn't read these: 1773 * evmcs->io_bitmap_a = vmcs12->io_bitmap_a; 1774 * evmcs->io_bitmap_b = vmcs12->io_bitmap_b; 1775 * evmcs->msr_bitmap = vmcs12->msr_bitmap; 1776 * evmcs->ept_pointer = vmcs12->ept_pointer; 1777 * evmcs->xss_exit_bitmap = vmcs12->xss_exit_bitmap; 1778 * evmcs->vm_exit_msr_store_addr = vmcs12->vm_exit_msr_store_addr; 1779 * evmcs->vm_exit_msr_load_addr = vmcs12->vm_exit_msr_load_addr; 1780 * evmcs->vm_entry_msr_load_addr = vmcs12->vm_entry_msr_load_addr; 1781 * evmcs->cr3_target_value0 = vmcs12->cr3_target_value0; 1782 * evmcs->cr3_target_value1 = vmcs12->cr3_target_value1; 1783 * evmcs->cr3_target_value2 = vmcs12->cr3_target_value2; 1784 * evmcs->cr3_target_value3 = vmcs12->cr3_target_value3; 1785 * evmcs->tpr_threshold = vmcs12->tpr_threshold; 1786 * evmcs->virtual_processor_id = vmcs12->virtual_processor_id; 1787 * evmcs->exception_bitmap = vmcs12->exception_bitmap; 1788 * evmcs->vmcs_link_pointer = vmcs12->vmcs_link_pointer; 1789 * evmcs->pin_based_vm_exec_control = vmcs12->pin_based_vm_exec_control; 1790 * evmcs->vm_exit_controls = vmcs12->vm_exit_controls; 1791 * evmcs->secondary_vm_exec_control = vmcs12->secondary_vm_exec_control; 1792 * evmcs->page_fault_error_code_mask = 1793 * vmcs12->page_fault_error_code_mask; 1794 * evmcs->page_fault_error_code_match = 1795 * vmcs12->page_fault_error_code_match; 1796 * evmcs->cr3_target_count = vmcs12->cr3_target_count; 1797 * evmcs->virtual_apic_page_addr = vmcs12->virtual_apic_page_addr; 1798 * evmcs->tsc_offset = vmcs12->tsc_offset; 1799 * evmcs->guest_ia32_debugctl = vmcs12->guest_ia32_debugctl; 1800 * evmcs->cr0_guest_host_mask = vmcs12->cr0_guest_host_mask; 1801 * evmcs->cr4_guest_host_mask = vmcs12->cr4_guest_host_mask; 1802 * evmcs->cr0_read_shadow = vmcs12->cr0_read_shadow; 1803 * evmcs->cr4_read_shadow = vmcs12->cr4_read_shadow; 1804 * evmcs->vm_exit_msr_store_count = vmcs12->vm_exit_msr_store_count; 1805 * evmcs->vm_exit_msr_load_count = vmcs12->vm_exit_msr_load_count; 1806 * evmcs->vm_entry_msr_load_count = vmcs12->vm_entry_msr_load_count; 1807 * 1808 * Not present in struct vmcs12: 1809 * evmcs->exit_io_instruction_ecx = vmcs12->exit_io_instruction_ecx; 1810 * evmcs->exit_io_instruction_esi = vmcs12->exit_io_instruction_esi; 1811 * evmcs->exit_io_instruction_edi = vmcs12->exit_io_instruction_edi; 1812 * evmcs->exit_io_instruction_eip = vmcs12->exit_io_instruction_eip; 1813 */ 1814 1815 evmcs->guest_es_selector = vmcs12->guest_es_selector; 1816 evmcs->guest_cs_selector = vmcs12->guest_cs_selector; 1817 evmcs->guest_ss_selector = vmcs12->guest_ss_selector; 1818 evmcs->guest_ds_selector = vmcs12->guest_ds_selector; 1819 evmcs->guest_fs_selector = vmcs12->guest_fs_selector; 1820 evmcs->guest_gs_selector = vmcs12->guest_gs_selector; 1821 evmcs->guest_ldtr_selector = vmcs12->guest_ldtr_selector; 1822 evmcs->guest_tr_selector = vmcs12->guest_tr_selector; 1823 1824 evmcs->guest_es_limit = vmcs12->guest_es_limit; 1825 evmcs->guest_cs_limit = vmcs12->guest_cs_limit; 1826 evmcs->guest_ss_limit = vmcs12->guest_ss_limit; 1827 evmcs->guest_ds_limit = vmcs12->guest_ds_limit; 1828 evmcs->guest_fs_limit = vmcs12->guest_fs_limit; 1829 evmcs->guest_gs_limit = vmcs12->guest_gs_limit; 1830 evmcs->guest_ldtr_limit = vmcs12->guest_ldtr_limit; 1831 evmcs->guest_tr_limit = vmcs12->guest_tr_limit; 1832 evmcs->guest_gdtr_limit = vmcs12->guest_gdtr_limit; 1833 evmcs->guest_idtr_limit = vmcs12->guest_idtr_limit; 1834 1835 evmcs->guest_es_ar_bytes = vmcs12->guest_es_ar_bytes; 1836 evmcs->guest_cs_ar_bytes = vmcs12->guest_cs_ar_bytes; 1837 evmcs->guest_ss_ar_bytes = vmcs12->guest_ss_ar_bytes; 1838 evmcs->guest_ds_ar_bytes = vmcs12->guest_ds_ar_bytes; 1839 evmcs->guest_fs_ar_bytes = vmcs12->guest_fs_ar_bytes; 1840 evmcs->guest_gs_ar_bytes = vmcs12->guest_gs_ar_bytes; 1841 evmcs->guest_ldtr_ar_bytes = vmcs12->guest_ldtr_ar_bytes; 1842 evmcs->guest_tr_ar_bytes = vmcs12->guest_tr_ar_bytes; 1843 1844 evmcs->guest_es_base = vmcs12->guest_es_base; 1845 evmcs->guest_cs_base = vmcs12->guest_cs_base; 1846 evmcs->guest_ss_base = vmcs12->guest_ss_base; 1847 evmcs->guest_ds_base = vmcs12->guest_ds_base; 1848 evmcs->guest_fs_base = vmcs12->guest_fs_base; 1849 evmcs->guest_gs_base = vmcs12->guest_gs_base; 1850 evmcs->guest_ldtr_base = vmcs12->guest_ldtr_base; 1851 evmcs->guest_tr_base = vmcs12->guest_tr_base; 1852 evmcs->guest_gdtr_base = vmcs12->guest_gdtr_base; 1853 evmcs->guest_idtr_base = vmcs12->guest_idtr_base; 1854 1855 evmcs->guest_ia32_pat = vmcs12->guest_ia32_pat; 1856 evmcs->guest_ia32_efer = vmcs12->guest_ia32_efer; 1857 1858 evmcs->guest_pdptr0 = vmcs12->guest_pdptr0; 1859 evmcs->guest_pdptr1 = vmcs12->guest_pdptr1; 1860 evmcs->guest_pdptr2 = vmcs12->guest_pdptr2; 1861 evmcs->guest_pdptr3 = vmcs12->guest_pdptr3; 1862 1863 evmcs->guest_pending_dbg_exceptions = 1864 vmcs12->guest_pending_dbg_exceptions; 1865 evmcs->guest_sysenter_esp = vmcs12->guest_sysenter_esp; 1866 evmcs->guest_sysenter_eip = vmcs12->guest_sysenter_eip; 1867 1868 evmcs->guest_activity_state = vmcs12->guest_activity_state; 1869 evmcs->guest_sysenter_cs = vmcs12->guest_sysenter_cs; 1870 1871 evmcs->guest_cr0 = vmcs12->guest_cr0; 1872 evmcs->guest_cr3 = vmcs12->guest_cr3; 1873 evmcs->guest_cr4 = vmcs12->guest_cr4; 1874 evmcs->guest_dr7 = vmcs12->guest_dr7; 1875 1876 evmcs->guest_physical_address = vmcs12->guest_physical_address; 1877 1878 evmcs->vm_instruction_error = vmcs12->vm_instruction_error; 1879 evmcs->vm_exit_reason = vmcs12->vm_exit_reason; 1880 evmcs->vm_exit_intr_info = vmcs12->vm_exit_intr_info; 1881 evmcs->vm_exit_intr_error_code = vmcs12->vm_exit_intr_error_code; 1882 evmcs->idt_vectoring_info_field = vmcs12->idt_vectoring_info_field; 1883 evmcs->idt_vectoring_error_code = vmcs12->idt_vectoring_error_code; 1884 evmcs->vm_exit_instruction_len = vmcs12->vm_exit_instruction_len; 1885 evmcs->vmx_instruction_info = vmcs12->vmx_instruction_info; 1886 1887 evmcs->exit_qualification = vmcs12->exit_qualification; 1888 1889 evmcs->guest_linear_address = vmcs12->guest_linear_address; 1890 evmcs->guest_rsp = vmcs12->guest_rsp; 1891 evmcs->guest_rflags = vmcs12->guest_rflags; 1892 1893 evmcs->guest_interruptibility_info = 1894 vmcs12->guest_interruptibility_info; 1895 evmcs->cpu_based_vm_exec_control = vmcs12->cpu_based_vm_exec_control; 1896 evmcs->vm_entry_controls = vmcs12->vm_entry_controls; 1897 evmcs->vm_entry_intr_info_field = vmcs12->vm_entry_intr_info_field; 1898 evmcs->vm_entry_exception_error_code = 1899 vmcs12->vm_entry_exception_error_code; 1900 evmcs->vm_entry_instruction_len = vmcs12->vm_entry_instruction_len; 1901 1902 evmcs->guest_rip = vmcs12->guest_rip; 1903 1904 evmcs->guest_bndcfgs = vmcs12->guest_bndcfgs; 1905 1906 return 0; 1907 } 1908 1909 /* 1910 * This is an equivalent of the nested hypervisor executing the vmptrld 1911 * instruction. 1912 */ 1913 static int nested_vmx_handle_enlightened_vmptrld(struct kvm_vcpu *vcpu, 1914 bool from_launch) 1915 { 1916 struct vcpu_vmx *vmx = to_vmx(vcpu); 1917 bool evmcs_gpa_changed = false; 1918 u64 evmcs_gpa; 1919 1920 if (likely(!vmx->nested.enlightened_vmcs_enabled)) 1921 return 1; 1922 1923 if (!nested_enlightened_vmentry(vcpu, &evmcs_gpa)) 1924 return 1; 1925 1926 if (unlikely(evmcs_gpa != vmx->nested.hv_evmcs_vmptr)) { 1927 if (!vmx->nested.hv_evmcs) 1928 vmx->nested.current_vmptr = -1ull; 1929 1930 nested_release_evmcs(vcpu); 1931 1932 if (kvm_vcpu_map(vcpu, gpa_to_gfn(evmcs_gpa), 1933 &vmx->nested.hv_evmcs_map)) 1934 return 0; 1935 1936 vmx->nested.hv_evmcs = vmx->nested.hv_evmcs_map.hva; 1937 1938 /* 1939 * Currently, KVM only supports eVMCS version 1 1940 * (== KVM_EVMCS_VERSION) and thus we expect guest to set this 1941 * value to first u32 field of eVMCS which should specify eVMCS 1942 * VersionNumber. 1943 * 1944 * Guest should be aware of supported eVMCS versions by host by 1945 * examining CPUID.0x4000000A.EAX[0:15]. Host userspace VMM is 1946 * expected to set this CPUID leaf according to the value 1947 * returned in vmcs_version from nested_enable_evmcs(). 1948 * 1949 * However, it turns out that Microsoft Hyper-V fails to comply 1950 * to their own invented interface: When Hyper-V use eVMCS, it 1951 * just sets first u32 field of eVMCS to revision_id specified 1952 * in MSR_IA32_VMX_BASIC. Instead of used eVMCS version number 1953 * which is one of the supported versions specified in 1954 * CPUID.0x4000000A.EAX[0:15]. 1955 * 1956 * To overcome Hyper-V bug, we accept here either a supported 1957 * eVMCS version or VMCS12 revision_id as valid values for first 1958 * u32 field of eVMCS. 1959 */ 1960 if ((vmx->nested.hv_evmcs->revision_id != KVM_EVMCS_VERSION) && 1961 (vmx->nested.hv_evmcs->revision_id != VMCS12_REVISION)) { 1962 nested_release_evmcs(vcpu); 1963 return 0; 1964 } 1965 1966 vmx->nested.dirty_vmcs12 = true; 1967 vmx->nested.hv_evmcs_vmptr = evmcs_gpa; 1968 1969 evmcs_gpa_changed = true; 1970 /* 1971 * Unlike normal vmcs12, enlightened vmcs12 is not fully 1972 * reloaded from guest's memory (read only fields, fields not 1973 * present in struct hv_enlightened_vmcs, ...). Make sure there 1974 * are no leftovers. 1975 */ 1976 if (from_launch) { 1977 struct vmcs12 *vmcs12 = get_vmcs12(vcpu); 1978 memset(vmcs12, 0, sizeof(*vmcs12)); 1979 vmcs12->hdr.revision_id = VMCS12_REVISION; 1980 } 1981 1982 } 1983 1984 /* 1985 * Clean fields data can't be used on VMLAUNCH and when we switch 1986 * between different L2 guests as KVM keeps a single VMCS12 per L1. 1987 */ 1988 if (from_launch || evmcs_gpa_changed) 1989 vmx->nested.hv_evmcs->hv_clean_fields &= 1990 ~HV_VMX_ENLIGHTENED_CLEAN_FIELD_ALL; 1991 1992 return 1; 1993 } 1994 1995 void nested_sync_vmcs12_to_shadow(struct kvm_vcpu *vcpu) 1996 { 1997 struct vcpu_vmx *vmx = to_vmx(vcpu); 1998 1999 /* 2000 * hv_evmcs may end up being not mapped after migration (when 2001 * L2 was running), map it here to make sure vmcs12 changes are 2002 * properly reflected. 2003 */ 2004 if (vmx->nested.enlightened_vmcs_enabled && !vmx->nested.hv_evmcs) 2005 nested_vmx_handle_enlightened_vmptrld(vcpu, false); 2006 2007 if (vmx->nested.hv_evmcs) { 2008 copy_vmcs12_to_enlightened(vmx); 2009 /* All fields are clean */ 2010 vmx->nested.hv_evmcs->hv_clean_fields |= 2011 HV_VMX_ENLIGHTENED_CLEAN_FIELD_ALL; 2012 } else { 2013 copy_vmcs12_to_shadow(vmx); 2014 } 2015 2016 vmx->nested.need_vmcs12_to_shadow_sync = false; 2017 } 2018 2019 static enum hrtimer_restart vmx_preemption_timer_fn(struct hrtimer *timer) 2020 { 2021 struct vcpu_vmx *vmx = 2022 container_of(timer, struct vcpu_vmx, nested.preemption_timer); 2023 2024 vmx->nested.preemption_timer_expired = true; 2025 kvm_make_request(KVM_REQ_EVENT, &vmx->vcpu); 2026 kvm_vcpu_kick(&vmx->vcpu); 2027 2028 return HRTIMER_NORESTART; 2029 } 2030 2031 static void vmx_start_preemption_timer(struct kvm_vcpu *vcpu) 2032 { 2033 u64 preemption_timeout = get_vmcs12(vcpu)->vmx_preemption_timer_value; 2034 struct vcpu_vmx *vmx = to_vmx(vcpu); 2035 2036 /* 2037 * A timer value of zero is architecturally guaranteed to cause 2038 * a VMExit prior to executing any instructions in the guest. 2039 */ 2040 if (preemption_timeout == 0) { 2041 vmx_preemption_timer_fn(&vmx->nested.preemption_timer); 2042 return; 2043 } 2044 2045 if (vcpu->arch.virtual_tsc_khz == 0) 2046 return; 2047 2048 preemption_timeout <<= VMX_MISC_EMULATED_PREEMPTION_TIMER_RATE; 2049 preemption_timeout *= 1000000; 2050 do_div(preemption_timeout, vcpu->arch.virtual_tsc_khz); 2051 hrtimer_start(&vmx->nested.preemption_timer, 2052 ns_to_ktime(preemption_timeout), HRTIMER_MODE_REL); 2053 } 2054 2055 static u64 nested_vmx_calc_efer(struct vcpu_vmx *vmx, struct vmcs12 *vmcs12) 2056 { 2057 if (vmx->nested.nested_run_pending && 2058 (vmcs12->vm_entry_controls & VM_ENTRY_LOAD_IA32_EFER)) 2059 return vmcs12->guest_ia32_efer; 2060 else if (vmcs12->vm_entry_controls & VM_ENTRY_IA32E_MODE) 2061 return vmx->vcpu.arch.efer | (EFER_LMA | EFER_LME); 2062 else 2063 return vmx->vcpu.arch.efer & ~(EFER_LMA | EFER_LME); 2064 } 2065 2066 static void prepare_vmcs02_constant_state(struct vcpu_vmx *vmx) 2067 { 2068 /* 2069 * If vmcs02 hasn't been initialized, set the constant vmcs02 state 2070 * according to L0's settings (vmcs12 is irrelevant here). Host 2071 * fields that come from L0 and are not constant, e.g. HOST_CR3, 2072 * will be set as needed prior to VMLAUNCH/VMRESUME. 2073 */ 2074 if (vmx->nested.vmcs02_initialized) 2075 return; 2076 vmx->nested.vmcs02_initialized = true; 2077 2078 /* 2079 * We don't care what the EPTP value is we just need to guarantee 2080 * it's valid so we don't get a false positive when doing early 2081 * consistency checks. 2082 */ 2083 if (enable_ept && nested_early_check) 2084 vmcs_write64(EPT_POINTER, construct_eptp(&vmx->vcpu, 0)); 2085 2086 /* All VMFUNCs are currently emulated through L0 vmexits. */ 2087 if (cpu_has_vmx_vmfunc()) 2088 vmcs_write64(VM_FUNCTION_CONTROL, 0); 2089 2090 if (cpu_has_vmx_posted_intr()) 2091 vmcs_write16(POSTED_INTR_NV, POSTED_INTR_NESTED_VECTOR); 2092 2093 if (cpu_has_vmx_msr_bitmap()) 2094 vmcs_write64(MSR_BITMAP, __pa(vmx->nested.vmcs02.msr_bitmap)); 2095 2096 /* 2097 * The PML address never changes, so it is constant in vmcs02. 2098 * Conceptually we want to copy the PML index from vmcs01 here, 2099 * and then back to vmcs01 on nested vmexit. But since we flush 2100 * the log and reset GUEST_PML_INDEX on each vmexit, the PML 2101 * index is also effectively constant in vmcs02. 2102 */ 2103 if (enable_pml) { 2104 vmcs_write64(PML_ADDRESS, page_to_phys(vmx->pml_pg)); 2105 vmcs_write16(GUEST_PML_INDEX, PML_ENTITY_NUM - 1); 2106 } 2107 2108 if (cpu_has_vmx_encls_vmexit()) 2109 vmcs_write64(ENCLS_EXITING_BITMAP, -1ull); 2110 2111 /* 2112 * Set the MSR load/store lists to match L0's settings. Only the 2113 * addresses are constant (for vmcs02), the counts can change based 2114 * on L2's behavior, e.g. switching to/from long mode. 2115 */ 2116 vmcs_write64(VM_EXIT_MSR_STORE_ADDR, __pa(vmx->msr_autostore.guest.val)); 2117 vmcs_write64(VM_EXIT_MSR_LOAD_ADDR, __pa(vmx->msr_autoload.host.val)); 2118 vmcs_write64(VM_ENTRY_MSR_LOAD_ADDR, __pa(vmx->msr_autoload.guest.val)); 2119 2120 vmx_set_constant_host_state(vmx); 2121 } 2122 2123 static void prepare_vmcs02_early_rare(struct vcpu_vmx *vmx, 2124 struct vmcs12 *vmcs12) 2125 { 2126 prepare_vmcs02_constant_state(vmx); 2127 2128 vmcs_write64(VMCS_LINK_POINTER, -1ull); 2129 2130 if (enable_vpid) { 2131 if (nested_cpu_has_vpid(vmcs12) && vmx->nested.vpid02) 2132 vmcs_write16(VIRTUAL_PROCESSOR_ID, vmx->nested.vpid02); 2133 else 2134 vmcs_write16(VIRTUAL_PROCESSOR_ID, vmx->vpid); 2135 } 2136 } 2137 2138 static void prepare_vmcs02_early(struct vcpu_vmx *vmx, struct vmcs12 *vmcs12) 2139 { 2140 u32 exec_control, vmcs12_exec_ctrl; 2141 u64 guest_efer = nested_vmx_calc_efer(vmx, vmcs12); 2142 2143 if (vmx->nested.dirty_vmcs12 || vmx->nested.hv_evmcs) 2144 prepare_vmcs02_early_rare(vmx, vmcs12); 2145 2146 /* 2147 * PIN CONTROLS 2148 */ 2149 exec_control = vmx_pin_based_exec_ctrl(vmx); 2150 exec_control |= (vmcs12->pin_based_vm_exec_control & 2151 ~PIN_BASED_VMX_PREEMPTION_TIMER); 2152 2153 /* Posted interrupts setting is only taken from vmcs12. */ 2154 if (nested_cpu_has_posted_intr(vmcs12)) { 2155 vmx->nested.posted_intr_nv = vmcs12->posted_intr_nv; 2156 vmx->nested.pi_pending = false; 2157 } else { 2158 exec_control &= ~PIN_BASED_POSTED_INTR; 2159 } 2160 pin_controls_set(vmx, exec_control); 2161 2162 /* 2163 * EXEC CONTROLS 2164 */ 2165 exec_control = vmx_exec_control(vmx); /* L0's desires */ 2166 exec_control &= ~CPU_BASED_INTR_WINDOW_EXITING; 2167 exec_control &= ~CPU_BASED_NMI_WINDOW_EXITING; 2168 exec_control &= ~CPU_BASED_TPR_SHADOW; 2169 exec_control |= vmcs12->cpu_based_vm_exec_control; 2170 2171 vmx->nested.l1_tpr_threshold = -1; 2172 if (exec_control & CPU_BASED_TPR_SHADOW) 2173 vmcs_write32(TPR_THRESHOLD, vmcs12->tpr_threshold); 2174 #ifdef CONFIG_X86_64 2175 else 2176 exec_control |= CPU_BASED_CR8_LOAD_EXITING | 2177 CPU_BASED_CR8_STORE_EXITING; 2178 #endif 2179 2180 /* 2181 * A vmexit (to either L1 hypervisor or L0 userspace) is always needed 2182 * for I/O port accesses. 2183 */ 2184 exec_control |= CPU_BASED_UNCOND_IO_EXITING; 2185 exec_control &= ~CPU_BASED_USE_IO_BITMAPS; 2186 2187 /* 2188 * This bit will be computed in nested_get_vmcs12_pages, because 2189 * we do not have access to L1's MSR bitmap yet. For now, keep 2190 * the same bit as before, hoping to avoid multiple VMWRITEs that 2191 * only set/clear this bit. 2192 */ 2193 exec_control &= ~CPU_BASED_USE_MSR_BITMAPS; 2194 exec_control |= exec_controls_get(vmx) & CPU_BASED_USE_MSR_BITMAPS; 2195 2196 exec_controls_set(vmx, exec_control); 2197 2198 /* 2199 * SECONDARY EXEC CONTROLS 2200 */ 2201 if (cpu_has_secondary_exec_ctrls()) { 2202 exec_control = vmx->secondary_exec_control; 2203 2204 /* Take the following fields only from vmcs12 */ 2205 exec_control &= ~(SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES | 2206 SECONDARY_EXEC_ENABLE_INVPCID | 2207 SECONDARY_EXEC_RDTSCP | 2208 SECONDARY_EXEC_XSAVES | 2209 SECONDARY_EXEC_ENABLE_USR_WAIT_PAUSE | 2210 SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY | 2211 SECONDARY_EXEC_APIC_REGISTER_VIRT | 2212 SECONDARY_EXEC_ENABLE_VMFUNC); 2213 if (nested_cpu_has(vmcs12, 2214 CPU_BASED_ACTIVATE_SECONDARY_CONTROLS)) { 2215 vmcs12_exec_ctrl = vmcs12->secondary_vm_exec_control & 2216 ~SECONDARY_EXEC_ENABLE_PML; 2217 exec_control |= vmcs12_exec_ctrl; 2218 } 2219 2220 /* VMCS shadowing for L2 is emulated for now */ 2221 exec_control &= ~SECONDARY_EXEC_SHADOW_VMCS; 2222 2223 /* 2224 * Preset *DT exiting when emulating UMIP, so that vmx_set_cr4() 2225 * will not have to rewrite the controls just for this bit. 2226 */ 2227 if (!boot_cpu_has(X86_FEATURE_UMIP) && vmx_umip_emulated() && 2228 (vmcs12->guest_cr4 & X86_CR4_UMIP)) 2229 exec_control |= SECONDARY_EXEC_DESC; 2230 2231 if (exec_control & SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY) 2232 vmcs_write16(GUEST_INTR_STATUS, 2233 vmcs12->guest_intr_status); 2234 2235 secondary_exec_controls_set(vmx, exec_control); 2236 } 2237 2238 /* 2239 * ENTRY CONTROLS 2240 * 2241 * vmcs12's VM_{ENTRY,EXIT}_LOAD_IA32_EFER and VM_ENTRY_IA32E_MODE 2242 * are emulated by vmx_set_efer() in prepare_vmcs02(), but speculate 2243 * on the related bits (if supported by the CPU) in the hope that 2244 * we can avoid VMWrites during vmx_set_efer(). 2245 */ 2246 exec_control = (vmcs12->vm_entry_controls | vmx_vmentry_ctrl()) & 2247 ~VM_ENTRY_IA32E_MODE & ~VM_ENTRY_LOAD_IA32_EFER; 2248 if (cpu_has_load_ia32_efer()) { 2249 if (guest_efer & EFER_LMA) 2250 exec_control |= VM_ENTRY_IA32E_MODE; 2251 if (guest_efer != host_efer) 2252 exec_control |= VM_ENTRY_LOAD_IA32_EFER; 2253 } 2254 vm_entry_controls_set(vmx, exec_control); 2255 2256 /* 2257 * EXIT CONTROLS 2258 * 2259 * L2->L1 exit controls are emulated - the hardware exit is to L0 so 2260 * we should use its exit controls. Note that VM_EXIT_LOAD_IA32_EFER 2261 * bits may be modified by vmx_set_efer() in prepare_vmcs02(). 2262 */ 2263 exec_control = vmx_vmexit_ctrl(); 2264 if (cpu_has_load_ia32_efer() && guest_efer != host_efer) 2265 exec_control |= VM_EXIT_LOAD_IA32_EFER; 2266 vm_exit_controls_set(vmx, exec_control); 2267 2268 /* 2269 * Interrupt/Exception Fields 2270 */ 2271 if (vmx->nested.nested_run_pending) { 2272 vmcs_write32(VM_ENTRY_INTR_INFO_FIELD, 2273 vmcs12->vm_entry_intr_info_field); 2274 vmcs_write32(VM_ENTRY_EXCEPTION_ERROR_CODE, 2275 vmcs12->vm_entry_exception_error_code); 2276 vmcs_write32(VM_ENTRY_INSTRUCTION_LEN, 2277 vmcs12->vm_entry_instruction_len); 2278 vmcs_write32(GUEST_INTERRUPTIBILITY_INFO, 2279 vmcs12->guest_interruptibility_info); 2280 vmx->loaded_vmcs->nmi_known_unmasked = 2281 !(vmcs12->guest_interruptibility_info & GUEST_INTR_STATE_NMI); 2282 } else { 2283 vmcs_write32(VM_ENTRY_INTR_INFO_FIELD, 0); 2284 } 2285 } 2286 2287 static void prepare_vmcs02_rare(struct vcpu_vmx *vmx, struct vmcs12 *vmcs12) 2288 { 2289 struct hv_enlightened_vmcs *hv_evmcs = vmx->nested.hv_evmcs; 2290 2291 if (!hv_evmcs || !(hv_evmcs->hv_clean_fields & 2292 HV_VMX_ENLIGHTENED_CLEAN_FIELD_GUEST_GRP2)) { 2293 vmcs_write16(GUEST_ES_SELECTOR, vmcs12->guest_es_selector); 2294 vmcs_write16(GUEST_CS_SELECTOR, vmcs12->guest_cs_selector); 2295 vmcs_write16(GUEST_SS_SELECTOR, vmcs12->guest_ss_selector); 2296 vmcs_write16(GUEST_DS_SELECTOR, vmcs12->guest_ds_selector); 2297 vmcs_write16(GUEST_FS_SELECTOR, vmcs12->guest_fs_selector); 2298 vmcs_write16(GUEST_GS_SELECTOR, vmcs12->guest_gs_selector); 2299 vmcs_write16(GUEST_LDTR_SELECTOR, vmcs12->guest_ldtr_selector); 2300 vmcs_write16(GUEST_TR_SELECTOR, vmcs12->guest_tr_selector); 2301 vmcs_write32(GUEST_ES_LIMIT, vmcs12->guest_es_limit); 2302 vmcs_write32(GUEST_CS_LIMIT, vmcs12->guest_cs_limit); 2303 vmcs_write32(GUEST_SS_LIMIT, vmcs12->guest_ss_limit); 2304 vmcs_write32(GUEST_DS_LIMIT, vmcs12->guest_ds_limit); 2305 vmcs_write32(GUEST_FS_LIMIT, vmcs12->guest_fs_limit); 2306 vmcs_write32(GUEST_GS_LIMIT, vmcs12->guest_gs_limit); 2307 vmcs_write32(GUEST_LDTR_LIMIT, vmcs12->guest_ldtr_limit); 2308 vmcs_write32(GUEST_TR_LIMIT, vmcs12->guest_tr_limit); 2309 vmcs_write32(GUEST_GDTR_LIMIT, vmcs12->guest_gdtr_limit); 2310 vmcs_write32(GUEST_IDTR_LIMIT, vmcs12->guest_idtr_limit); 2311 vmcs_write32(GUEST_CS_AR_BYTES, vmcs12->guest_cs_ar_bytes); 2312 vmcs_write32(GUEST_SS_AR_BYTES, vmcs12->guest_ss_ar_bytes); 2313 vmcs_write32(GUEST_ES_AR_BYTES, vmcs12->guest_es_ar_bytes); 2314 vmcs_write32(GUEST_DS_AR_BYTES, vmcs12->guest_ds_ar_bytes); 2315 vmcs_write32(GUEST_FS_AR_BYTES, vmcs12->guest_fs_ar_bytes); 2316 vmcs_write32(GUEST_GS_AR_BYTES, vmcs12->guest_gs_ar_bytes); 2317 vmcs_write32(GUEST_LDTR_AR_BYTES, vmcs12->guest_ldtr_ar_bytes); 2318 vmcs_write32(GUEST_TR_AR_BYTES, vmcs12->guest_tr_ar_bytes); 2319 vmcs_writel(GUEST_ES_BASE, vmcs12->guest_es_base); 2320 vmcs_writel(GUEST_CS_BASE, vmcs12->guest_cs_base); 2321 vmcs_writel(GUEST_SS_BASE, vmcs12->guest_ss_base); 2322 vmcs_writel(GUEST_DS_BASE, vmcs12->guest_ds_base); 2323 vmcs_writel(GUEST_FS_BASE, vmcs12->guest_fs_base); 2324 vmcs_writel(GUEST_GS_BASE, vmcs12->guest_gs_base); 2325 vmcs_writel(GUEST_LDTR_BASE, vmcs12->guest_ldtr_base); 2326 vmcs_writel(GUEST_TR_BASE, vmcs12->guest_tr_base); 2327 vmcs_writel(GUEST_GDTR_BASE, vmcs12->guest_gdtr_base); 2328 vmcs_writel(GUEST_IDTR_BASE, vmcs12->guest_idtr_base); 2329 } 2330 2331 if (!hv_evmcs || !(hv_evmcs->hv_clean_fields & 2332 HV_VMX_ENLIGHTENED_CLEAN_FIELD_GUEST_GRP1)) { 2333 vmcs_write32(GUEST_SYSENTER_CS, vmcs12->guest_sysenter_cs); 2334 vmcs_writel(GUEST_PENDING_DBG_EXCEPTIONS, 2335 vmcs12->guest_pending_dbg_exceptions); 2336 vmcs_writel(GUEST_SYSENTER_ESP, vmcs12->guest_sysenter_esp); 2337 vmcs_writel(GUEST_SYSENTER_EIP, vmcs12->guest_sysenter_eip); 2338 2339 /* 2340 * L1 may access the L2's PDPTR, so save them to construct 2341 * vmcs12 2342 */ 2343 if (enable_ept) { 2344 vmcs_write64(GUEST_PDPTR0, vmcs12->guest_pdptr0); 2345 vmcs_write64(GUEST_PDPTR1, vmcs12->guest_pdptr1); 2346 vmcs_write64(GUEST_PDPTR2, vmcs12->guest_pdptr2); 2347 vmcs_write64(GUEST_PDPTR3, vmcs12->guest_pdptr3); 2348 } 2349 2350 if (kvm_mpx_supported() && vmx->nested.nested_run_pending && 2351 (vmcs12->vm_entry_controls & VM_ENTRY_LOAD_BNDCFGS)) 2352 vmcs_write64(GUEST_BNDCFGS, vmcs12->guest_bndcfgs); 2353 } 2354 2355 if (nested_cpu_has_xsaves(vmcs12)) 2356 vmcs_write64(XSS_EXIT_BITMAP, vmcs12->xss_exit_bitmap); 2357 2358 /* 2359 * Whether page-faults are trapped is determined by a combination of 2360 * 3 settings: PFEC_MASK, PFEC_MATCH and EXCEPTION_BITMAP.PF. 2361 * If enable_ept, L0 doesn't care about page faults and we should 2362 * set all of these to L1's desires. However, if !enable_ept, L0 does 2363 * care about (at least some) page faults, and because it is not easy 2364 * (if at all possible?) to merge L0 and L1's desires, we simply ask 2365 * to exit on each and every L2 page fault. This is done by setting 2366 * MASK=MATCH=0 and (see below) EB.PF=1. 2367 * Note that below we don't need special code to set EB.PF beyond the 2368 * "or"ing of the EB of vmcs01 and vmcs12, because when enable_ept, 2369 * vmcs01's EB.PF is 0 so the "or" will take vmcs12's value, and when 2370 * !enable_ept, EB.PF is 1, so the "or" will always be 1. 2371 */ 2372 vmcs_write32(PAGE_FAULT_ERROR_CODE_MASK, 2373 enable_ept ? vmcs12->page_fault_error_code_mask : 0); 2374 vmcs_write32(PAGE_FAULT_ERROR_CODE_MATCH, 2375 enable_ept ? vmcs12->page_fault_error_code_match : 0); 2376 2377 if (cpu_has_vmx_apicv()) { 2378 vmcs_write64(EOI_EXIT_BITMAP0, vmcs12->eoi_exit_bitmap0); 2379 vmcs_write64(EOI_EXIT_BITMAP1, vmcs12->eoi_exit_bitmap1); 2380 vmcs_write64(EOI_EXIT_BITMAP2, vmcs12->eoi_exit_bitmap2); 2381 vmcs_write64(EOI_EXIT_BITMAP3, vmcs12->eoi_exit_bitmap3); 2382 } 2383 2384 /* 2385 * Make sure the msr_autostore list is up to date before we set the 2386 * count in the vmcs02. 2387 */ 2388 prepare_vmx_msr_autostore_list(&vmx->vcpu, MSR_IA32_TSC); 2389 2390 vmcs_write32(VM_EXIT_MSR_STORE_COUNT, vmx->msr_autostore.guest.nr); 2391 vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, vmx->msr_autoload.host.nr); 2392 vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, vmx->msr_autoload.guest.nr); 2393 2394 set_cr4_guest_host_mask(vmx); 2395 } 2396 2397 /* 2398 * prepare_vmcs02 is called when the L1 guest hypervisor runs its nested 2399 * L2 guest. L1 has a vmcs for L2 (vmcs12), and this function "merges" it 2400 * with L0's requirements for its guest (a.k.a. vmcs01), so we can run the L2 2401 * guest in a way that will both be appropriate to L1's requests, and our 2402 * needs. In addition to modifying the active vmcs (which is vmcs02), this 2403 * function also has additional necessary side-effects, like setting various 2404 * vcpu->arch fields. 2405 * Returns 0 on success, 1 on failure. Invalid state exit qualification code 2406 * is assigned to entry_failure_code on failure. 2407 */ 2408 static int prepare_vmcs02(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12, 2409 u32 *entry_failure_code) 2410 { 2411 struct vcpu_vmx *vmx = to_vmx(vcpu); 2412 struct hv_enlightened_vmcs *hv_evmcs = vmx->nested.hv_evmcs; 2413 bool load_guest_pdptrs_vmcs12 = false; 2414 2415 if (vmx->nested.dirty_vmcs12 || hv_evmcs) { 2416 prepare_vmcs02_rare(vmx, vmcs12); 2417 vmx->nested.dirty_vmcs12 = false; 2418 2419 load_guest_pdptrs_vmcs12 = !hv_evmcs || 2420 !(hv_evmcs->hv_clean_fields & 2421 HV_VMX_ENLIGHTENED_CLEAN_FIELD_GUEST_GRP1); 2422 } 2423 2424 if (vmx->nested.nested_run_pending && 2425 (vmcs12->vm_entry_controls & VM_ENTRY_LOAD_DEBUG_CONTROLS)) { 2426 kvm_set_dr(vcpu, 7, vmcs12->guest_dr7); 2427 vmcs_write64(GUEST_IA32_DEBUGCTL, vmcs12->guest_ia32_debugctl); 2428 } else { 2429 kvm_set_dr(vcpu, 7, vcpu->arch.dr7); 2430 vmcs_write64(GUEST_IA32_DEBUGCTL, vmx->nested.vmcs01_debugctl); 2431 } 2432 if (kvm_mpx_supported() && (!vmx->nested.nested_run_pending || 2433 !(vmcs12->vm_entry_controls & VM_ENTRY_LOAD_BNDCFGS))) 2434 vmcs_write64(GUEST_BNDCFGS, vmx->nested.vmcs01_guest_bndcfgs); 2435 vmx_set_rflags(vcpu, vmcs12->guest_rflags); 2436 2437 /* EXCEPTION_BITMAP and CR0_GUEST_HOST_MASK should basically be the 2438 * bitwise-or of what L1 wants to trap for L2, and what we want to 2439 * trap. Note that CR0.TS also needs updating - we do this later. 2440 */ 2441 update_exception_bitmap(vcpu); 2442 vcpu->arch.cr0_guest_owned_bits &= ~vmcs12->cr0_guest_host_mask; 2443 vmcs_writel(CR0_GUEST_HOST_MASK, ~vcpu->arch.cr0_guest_owned_bits); 2444 2445 if (vmx->nested.nested_run_pending && 2446 (vmcs12->vm_entry_controls & VM_ENTRY_LOAD_IA32_PAT)) { 2447 vmcs_write64(GUEST_IA32_PAT, vmcs12->guest_ia32_pat); 2448 vcpu->arch.pat = vmcs12->guest_ia32_pat; 2449 } else if (vmcs_config.vmentry_ctrl & VM_ENTRY_LOAD_IA32_PAT) { 2450 vmcs_write64(GUEST_IA32_PAT, vmx->vcpu.arch.pat); 2451 } 2452 2453 vmcs_write64(TSC_OFFSET, vcpu->arch.tsc_offset); 2454 2455 if (kvm_has_tsc_control) 2456 decache_tsc_multiplier(vmx); 2457 2458 if (enable_vpid) { 2459 /* 2460 * There is no direct mapping between vpid02 and vpid12, the 2461 * vpid02 is per-vCPU for L0 and reused while the value of 2462 * vpid12 is changed w/ one invvpid during nested vmentry. 2463 * The vpid12 is allocated by L1 for L2, so it will not 2464 * influence global bitmap(for vpid01 and vpid02 allocation) 2465 * even if spawn a lot of nested vCPUs. 2466 */ 2467 if (nested_cpu_has_vpid(vmcs12) && nested_has_guest_tlb_tag(vcpu)) { 2468 if (vmcs12->virtual_processor_id != vmx->nested.last_vpid) { 2469 vmx->nested.last_vpid = vmcs12->virtual_processor_id; 2470 __vmx_flush_tlb(vcpu, nested_get_vpid02(vcpu), false); 2471 } 2472 } else { 2473 /* 2474 * If L1 use EPT, then L0 needs to execute INVEPT on 2475 * EPTP02 instead of EPTP01. Therefore, delay TLB 2476 * flush until vmcs02->eptp is fully updated by 2477 * KVM_REQ_LOAD_CR3. Note that this assumes 2478 * KVM_REQ_TLB_FLUSH is evaluated after 2479 * KVM_REQ_LOAD_CR3 in vcpu_enter_guest(). 2480 */ 2481 kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu); 2482 } 2483 } 2484 2485 if (nested_cpu_has_ept(vmcs12)) 2486 nested_ept_init_mmu_context(vcpu); 2487 2488 /* 2489 * This sets GUEST_CR0 to vmcs12->guest_cr0, possibly modifying those 2490 * bits which we consider mandatory enabled. 2491 * The CR0_READ_SHADOW is what L2 should have expected to read given 2492 * the specifications by L1; It's not enough to take 2493 * vmcs12->cr0_read_shadow because on our cr0_guest_host_mask we we 2494 * have more bits than L1 expected. 2495 */ 2496 vmx_set_cr0(vcpu, vmcs12->guest_cr0); 2497 vmcs_writel(CR0_READ_SHADOW, nested_read_cr0(vmcs12)); 2498 2499 vmx_set_cr4(vcpu, vmcs12->guest_cr4); 2500 vmcs_writel(CR4_READ_SHADOW, nested_read_cr4(vmcs12)); 2501 2502 vcpu->arch.efer = nested_vmx_calc_efer(vmx, vmcs12); 2503 /* Note: may modify VM_ENTRY/EXIT_CONTROLS and GUEST/HOST_IA32_EFER */ 2504 vmx_set_efer(vcpu, vcpu->arch.efer); 2505 2506 /* 2507 * Guest state is invalid and unrestricted guest is disabled, 2508 * which means L1 attempted VMEntry to L2 with invalid state. 2509 * Fail the VMEntry. 2510 */ 2511 if (vmx->emulation_required) { 2512 *entry_failure_code = ENTRY_FAIL_DEFAULT; 2513 return -EINVAL; 2514 } 2515 2516 /* Shadow page tables on either EPT or shadow page tables. */ 2517 if (nested_vmx_load_cr3(vcpu, vmcs12->guest_cr3, nested_cpu_has_ept(vmcs12), 2518 entry_failure_code)) 2519 return -EINVAL; 2520 2521 /* 2522 * Immediately write vmcs02.GUEST_CR3. It will be propagated to vmcs12 2523 * on nested VM-Exit, which can occur without actually running L2 and 2524 * thus without hitting vmx_set_cr3(), e.g. if L1 is entering L2 with 2525 * vmcs12.GUEST_ACTIVITYSTATE=HLT, in which case KVM will intercept the 2526 * transition to HLT instead of running L2. 2527 */ 2528 if (enable_ept) 2529 vmcs_writel(GUEST_CR3, vmcs12->guest_cr3); 2530 2531 /* Late preparation of GUEST_PDPTRs now that EFER and CRs are set. */ 2532 if (load_guest_pdptrs_vmcs12 && nested_cpu_has_ept(vmcs12) && 2533 is_pae_paging(vcpu)) { 2534 vmcs_write64(GUEST_PDPTR0, vmcs12->guest_pdptr0); 2535 vmcs_write64(GUEST_PDPTR1, vmcs12->guest_pdptr1); 2536 vmcs_write64(GUEST_PDPTR2, vmcs12->guest_pdptr2); 2537 vmcs_write64(GUEST_PDPTR3, vmcs12->guest_pdptr3); 2538 } 2539 2540 if (!enable_ept) 2541 vcpu->arch.walk_mmu->inject_page_fault = vmx_inject_page_fault_nested; 2542 2543 if ((vmcs12->vm_entry_controls & VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL) && 2544 WARN_ON_ONCE(kvm_set_msr(vcpu, MSR_CORE_PERF_GLOBAL_CTRL, 2545 vmcs12->guest_ia32_perf_global_ctrl))) 2546 return -EINVAL; 2547 2548 kvm_rsp_write(vcpu, vmcs12->guest_rsp); 2549 kvm_rip_write(vcpu, vmcs12->guest_rip); 2550 return 0; 2551 } 2552 2553 static int nested_vmx_check_nmi_controls(struct vmcs12 *vmcs12) 2554 { 2555 if (CC(!nested_cpu_has_nmi_exiting(vmcs12) && 2556 nested_cpu_has_virtual_nmis(vmcs12))) 2557 return -EINVAL; 2558 2559 if (CC(!nested_cpu_has_virtual_nmis(vmcs12) && 2560 nested_cpu_has(vmcs12, CPU_BASED_NMI_WINDOW_EXITING))) 2561 return -EINVAL; 2562 2563 return 0; 2564 } 2565 2566 static bool valid_ept_address(struct kvm_vcpu *vcpu, u64 address) 2567 { 2568 struct vcpu_vmx *vmx = to_vmx(vcpu); 2569 int maxphyaddr = cpuid_maxphyaddr(vcpu); 2570 2571 /* Check for memory type validity */ 2572 switch (address & VMX_EPTP_MT_MASK) { 2573 case VMX_EPTP_MT_UC: 2574 if (CC(!(vmx->nested.msrs.ept_caps & VMX_EPTP_UC_BIT))) 2575 return false; 2576 break; 2577 case VMX_EPTP_MT_WB: 2578 if (CC(!(vmx->nested.msrs.ept_caps & VMX_EPTP_WB_BIT))) 2579 return false; 2580 break; 2581 default: 2582 return false; 2583 } 2584 2585 /* only 4 levels page-walk length are valid */ 2586 if (CC((address & VMX_EPTP_PWL_MASK) != VMX_EPTP_PWL_4)) 2587 return false; 2588 2589 /* Reserved bits should not be set */ 2590 if (CC(address >> maxphyaddr || ((address >> 7) & 0x1f))) 2591 return false; 2592 2593 /* AD, if set, should be supported */ 2594 if (address & VMX_EPTP_AD_ENABLE_BIT) { 2595 if (CC(!(vmx->nested.msrs.ept_caps & VMX_EPT_AD_BIT))) 2596 return false; 2597 } 2598 2599 return true; 2600 } 2601 2602 /* 2603 * Checks related to VM-Execution Control Fields 2604 */ 2605 static int nested_check_vm_execution_controls(struct kvm_vcpu *vcpu, 2606 struct vmcs12 *vmcs12) 2607 { 2608 struct vcpu_vmx *vmx = to_vmx(vcpu); 2609 2610 if (CC(!vmx_control_verify(vmcs12->pin_based_vm_exec_control, 2611 vmx->nested.msrs.pinbased_ctls_low, 2612 vmx->nested.msrs.pinbased_ctls_high)) || 2613 CC(!vmx_control_verify(vmcs12->cpu_based_vm_exec_control, 2614 vmx->nested.msrs.procbased_ctls_low, 2615 vmx->nested.msrs.procbased_ctls_high))) 2616 return -EINVAL; 2617 2618 if (nested_cpu_has(vmcs12, CPU_BASED_ACTIVATE_SECONDARY_CONTROLS) && 2619 CC(!vmx_control_verify(vmcs12->secondary_vm_exec_control, 2620 vmx->nested.msrs.secondary_ctls_low, 2621 vmx->nested.msrs.secondary_ctls_high))) 2622 return -EINVAL; 2623 2624 if (CC(vmcs12->cr3_target_count > nested_cpu_vmx_misc_cr3_count(vcpu)) || 2625 nested_vmx_check_io_bitmap_controls(vcpu, vmcs12) || 2626 nested_vmx_check_msr_bitmap_controls(vcpu, vmcs12) || 2627 nested_vmx_check_tpr_shadow_controls(vcpu, vmcs12) || 2628 nested_vmx_check_apic_access_controls(vcpu, vmcs12) || 2629 nested_vmx_check_apicv_controls(vcpu, vmcs12) || 2630 nested_vmx_check_nmi_controls(vmcs12) || 2631 nested_vmx_check_pml_controls(vcpu, vmcs12) || 2632 nested_vmx_check_unrestricted_guest_controls(vcpu, vmcs12) || 2633 nested_vmx_check_mode_based_ept_exec_controls(vcpu, vmcs12) || 2634 nested_vmx_check_shadow_vmcs_controls(vcpu, vmcs12) || 2635 CC(nested_cpu_has_vpid(vmcs12) && !vmcs12->virtual_processor_id)) 2636 return -EINVAL; 2637 2638 if (!nested_cpu_has_preemption_timer(vmcs12) && 2639 nested_cpu_has_save_preemption_timer(vmcs12)) 2640 return -EINVAL; 2641 2642 if (nested_cpu_has_ept(vmcs12) && 2643 CC(!valid_ept_address(vcpu, vmcs12->ept_pointer))) 2644 return -EINVAL; 2645 2646 if (nested_cpu_has_vmfunc(vmcs12)) { 2647 if (CC(vmcs12->vm_function_control & 2648 ~vmx->nested.msrs.vmfunc_controls)) 2649 return -EINVAL; 2650 2651 if (nested_cpu_has_eptp_switching(vmcs12)) { 2652 if (CC(!nested_cpu_has_ept(vmcs12)) || 2653 CC(!page_address_valid(vcpu, vmcs12->eptp_list_address))) 2654 return -EINVAL; 2655 } 2656 } 2657 2658 return 0; 2659 } 2660 2661 /* 2662 * Checks related to VM-Exit Control Fields 2663 */ 2664 static int nested_check_vm_exit_controls(struct kvm_vcpu *vcpu, 2665 struct vmcs12 *vmcs12) 2666 { 2667 struct vcpu_vmx *vmx = to_vmx(vcpu); 2668 2669 if (CC(!vmx_control_verify(vmcs12->vm_exit_controls, 2670 vmx->nested.msrs.exit_ctls_low, 2671 vmx->nested.msrs.exit_ctls_high)) || 2672 CC(nested_vmx_check_exit_msr_switch_controls(vcpu, vmcs12))) 2673 return -EINVAL; 2674 2675 return 0; 2676 } 2677 2678 /* 2679 * Checks related to VM-Entry Control Fields 2680 */ 2681 static int nested_check_vm_entry_controls(struct kvm_vcpu *vcpu, 2682 struct vmcs12 *vmcs12) 2683 { 2684 struct vcpu_vmx *vmx = to_vmx(vcpu); 2685 2686 if (CC(!vmx_control_verify(vmcs12->vm_entry_controls, 2687 vmx->nested.msrs.entry_ctls_low, 2688 vmx->nested.msrs.entry_ctls_high))) 2689 return -EINVAL; 2690 2691 /* 2692 * From the Intel SDM, volume 3: 2693 * Fields relevant to VM-entry event injection must be set properly. 2694 * These fields are the VM-entry interruption-information field, the 2695 * VM-entry exception error code, and the VM-entry instruction length. 2696 */ 2697 if (vmcs12->vm_entry_intr_info_field & INTR_INFO_VALID_MASK) { 2698 u32 intr_info = vmcs12->vm_entry_intr_info_field; 2699 u8 vector = intr_info & INTR_INFO_VECTOR_MASK; 2700 u32 intr_type = intr_info & INTR_INFO_INTR_TYPE_MASK; 2701 bool has_error_code = intr_info & INTR_INFO_DELIVER_CODE_MASK; 2702 bool should_have_error_code; 2703 bool urg = nested_cpu_has2(vmcs12, 2704 SECONDARY_EXEC_UNRESTRICTED_GUEST); 2705 bool prot_mode = !urg || vmcs12->guest_cr0 & X86_CR0_PE; 2706 2707 /* VM-entry interruption-info field: interruption type */ 2708 if (CC(intr_type == INTR_TYPE_RESERVED) || 2709 CC(intr_type == INTR_TYPE_OTHER_EVENT && 2710 !nested_cpu_supports_monitor_trap_flag(vcpu))) 2711 return -EINVAL; 2712 2713 /* VM-entry interruption-info field: vector */ 2714 if (CC(intr_type == INTR_TYPE_NMI_INTR && vector != NMI_VECTOR) || 2715 CC(intr_type == INTR_TYPE_HARD_EXCEPTION && vector > 31) || 2716 CC(intr_type == INTR_TYPE_OTHER_EVENT && vector != 0)) 2717 return -EINVAL; 2718 2719 /* VM-entry interruption-info field: deliver error code */ 2720 should_have_error_code = 2721 intr_type == INTR_TYPE_HARD_EXCEPTION && prot_mode && 2722 x86_exception_has_error_code(vector); 2723 if (CC(has_error_code != should_have_error_code)) 2724 return -EINVAL; 2725 2726 /* VM-entry exception error code */ 2727 if (CC(has_error_code && 2728 vmcs12->vm_entry_exception_error_code & GENMASK(31, 16))) 2729 return -EINVAL; 2730 2731 /* VM-entry interruption-info field: reserved bits */ 2732 if (CC(intr_info & INTR_INFO_RESVD_BITS_MASK)) 2733 return -EINVAL; 2734 2735 /* VM-entry instruction length */ 2736 switch (intr_type) { 2737 case INTR_TYPE_SOFT_EXCEPTION: 2738 case INTR_TYPE_SOFT_INTR: 2739 case INTR_TYPE_PRIV_SW_EXCEPTION: 2740 if (CC(vmcs12->vm_entry_instruction_len > 15) || 2741 CC(vmcs12->vm_entry_instruction_len == 0 && 2742 CC(!nested_cpu_has_zero_length_injection(vcpu)))) 2743 return -EINVAL; 2744 } 2745 } 2746 2747 if (nested_vmx_check_entry_msr_switch_controls(vcpu, vmcs12)) 2748 return -EINVAL; 2749 2750 return 0; 2751 } 2752 2753 static int nested_vmx_check_controls(struct kvm_vcpu *vcpu, 2754 struct vmcs12 *vmcs12) 2755 { 2756 if (nested_check_vm_execution_controls(vcpu, vmcs12) || 2757 nested_check_vm_exit_controls(vcpu, vmcs12) || 2758 nested_check_vm_entry_controls(vcpu, vmcs12)) 2759 return -EINVAL; 2760 2761 if (to_vmx(vcpu)->nested.enlightened_vmcs_enabled) 2762 return nested_evmcs_check_controls(vmcs12); 2763 2764 return 0; 2765 } 2766 2767 static int nested_vmx_check_host_state(struct kvm_vcpu *vcpu, 2768 struct vmcs12 *vmcs12) 2769 { 2770 bool ia32e; 2771 2772 if (CC(!nested_host_cr0_valid(vcpu, vmcs12->host_cr0)) || 2773 CC(!nested_host_cr4_valid(vcpu, vmcs12->host_cr4)) || 2774 CC(!nested_cr3_valid(vcpu, vmcs12->host_cr3))) 2775 return -EINVAL; 2776 2777 if (CC(is_noncanonical_address(vmcs12->host_ia32_sysenter_esp, vcpu)) || 2778 CC(is_noncanonical_address(vmcs12->host_ia32_sysenter_eip, vcpu))) 2779 return -EINVAL; 2780 2781 if ((vmcs12->vm_exit_controls & VM_EXIT_LOAD_IA32_PAT) && 2782 CC(!kvm_pat_valid(vmcs12->host_ia32_pat))) 2783 return -EINVAL; 2784 2785 if ((vmcs12->vm_exit_controls & VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL) && 2786 CC(!kvm_valid_perf_global_ctrl(vcpu_to_pmu(vcpu), 2787 vmcs12->host_ia32_perf_global_ctrl))) 2788 return -EINVAL; 2789 2790 #ifdef CONFIG_X86_64 2791 ia32e = !!(vcpu->arch.efer & EFER_LMA); 2792 #else 2793 ia32e = false; 2794 #endif 2795 2796 if (ia32e) { 2797 if (CC(!(vmcs12->vm_exit_controls & VM_EXIT_HOST_ADDR_SPACE_SIZE)) || 2798 CC(!(vmcs12->host_cr4 & X86_CR4_PAE))) 2799 return -EINVAL; 2800 } else { 2801 if (CC(vmcs12->vm_exit_controls & VM_EXIT_HOST_ADDR_SPACE_SIZE) || 2802 CC(vmcs12->vm_entry_controls & VM_ENTRY_IA32E_MODE) || 2803 CC(vmcs12->host_cr4 & X86_CR4_PCIDE) || 2804 CC((vmcs12->host_rip) >> 32)) 2805 return -EINVAL; 2806 } 2807 2808 if (CC(vmcs12->host_cs_selector & (SEGMENT_RPL_MASK | SEGMENT_TI_MASK)) || 2809 CC(vmcs12->host_ss_selector & (SEGMENT_RPL_MASK | SEGMENT_TI_MASK)) || 2810 CC(vmcs12->host_ds_selector & (SEGMENT_RPL_MASK | SEGMENT_TI_MASK)) || 2811 CC(vmcs12->host_es_selector & (SEGMENT_RPL_MASK | SEGMENT_TI_MASK)) || 2812 CC(vmcs12->host_fs_selector & (SEGMENT_RPL_MASK | SEGMENT_TI_MASK)) || 2813 CC(vmcs12->host_gs_selector & (SEGMENT_RPL_MASK | SEGMENT_TI_MASK)) || 2814 CC(vmcs12->host_tr_selector & (SEGMENT_RPL_MASK | SEGMENT_TI_MASK)) || 2815 CC(vmcs12->host_cs_selector == 0) || 2816 CC(vmcs12->host_tr_selector == 0) || 2817 CC(vmcs12->host_ss_selector == 0 && !ia32e)) 2818 return -EINVAL; 2819 2820 if (CC(is_noncanonical_address(vmcs12->host_fs_base, vcpu)) || 2821 CC(is_noncanonical_address(vmcs12->host_gs_base, vcpu)) || 2822 CC(is_noncanonical_address(vmcs12->host_gdtr_base, vcpu)) || 2823 CC(is_noncanonical_address(vmcs12->host_idtr_base, vcpu)) || 2824 CC(is_noncanonical_address(vmcs12->host_tr_base, vcpu)) || 2825 CC(is_noncanonical_address(vmcs12->host_rip, vcpu))) 2826 return -EINVAL; 2827 2828 /* 2829 * If the load IA32_EFER VM-exit control is 1, bits reserved in the 2830 * IA32_EFER MSR must be 0 in the field for that register. In addition, 2831 * the values of the LMA and LME bits in the field must each be that of 2832 * the host address-space size VM-exit control. 2833 */ 2834 if (vmcs12->vm_exit_controls & VM_EXIT_LOAD_IA32_EFER) { 2835 if (CC(!kvm_valid_efer(vcpu, vmcs12->host_ia32_efer)) || 2836 CC(ia32e != !!(vmcs12->host_ia32_efer & EFER_LMA)) || 2837 CC(ia32e != !!(vmcs12->host_ia32_efer & EFER_LME))) 2838 return -EINVAL; 2839 } 2840 2841 return 0; 2842 } 2843 2844 static int nested_vmx_check_vmcs_link_ptr(struct kvm_vcpu *vcpu, 2845 struct vmcs12 *vmcs12) 2846 { 2847 int r = 0; 2848 struct vmcs12 *shadow; 2849 struct kvm_host_map map; 2850 2851 if (vmcs12->vmcs_link_pointer == -1ull) 2852 return 0; 2853 2854 if (CC(!page_address_valid(vcpu, vmcs12->vmcs_link_pointer))) 2855 return -EINVAL; 2856 2857 if (CC(kvm_vcpu_map(vcpu, gpa_to_gfn(vmcs12->vmcs_link_pointer), &map))) 2858 return -EINVAL; 2859 2860 shadow = map.hva; 2861 2862 if (CC(shadow->hdr.revision_id != VMCS12_REVISION) || 2863 CC(shadow->hdr.shadow_vmcs != nested_cpu_has_shadow_vmcs(vmcs12))) 2864 r = -EINVAL; 2865 2866 kvm_vcpu_unmap(vcpu, &map, false); 2867 return r; 2868 } 2869 2870 /* 2871 * Checks related to Guest Non-register State 2872 */ 2873 static int nested_check_guest_non_reg_state(struct vmcs12 *vmcs12) 2874 { 2875 if (CC(vmcs12->guest_activity_state != GUEST_ACTIVITY_ACTIVE && 2876 vmcs12->guest_activity_state != GUEST_ACTIVITY_HLT)) 2877 return -EINVAL; 2878 2879 return 0; 2880 } 2881 2882 static int nested_vmx_check_guest_state(struct kvm_vcpu *vcpu, 2883 struct vmcs12 *vmcs12, 2884 u32 *exit_qual) 2885 { 2886 bool ia32e; 2887 2888 *exit_qual = ENTRY_FAIL_DEFAULT; 2889 2890 if (CC(!nested_guest_cr0_valid(vcpu, vmcs12->guest_cr0)) || 2891 CC(!nested_guest_cr4_valid(vcpu, vmcs12->guest_cr4))) 2892 return -EINVAL; 2893 2894 if ((vmcs12->vm_entry_controls & VM_ENTRY_LOAD_DEBUG_CONTROLS) && 2895 CC(!kvm_dr7_valid(vmcs12->guest_dr7))) 2896 return -EINVAL; 2897 2898 if ((vmcs12->vm_entry_controls & VM_ENTRY_LOAD_IA32_PAT) && 2899 CC(!kvm_pat_valid(vmcs12->guest_ia32_pat))) 2900 return -EINVAL; 2901 2902 if (nested_vmx_check_vmcs_link_ptr(vcpu, vmcs12)) { 2903 *exit_qual = ENTRY_FAIL_VMCS_LINK_PTR; 2904 return -EINVAL; 2905 } 2906 2907 if ((vmcs12->vm_entry_controls & VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL) && 2908 CC(!kvm_valid_perf_global_ctrl(vcpu_to_pmu(vcpu), 2909 vmcs12->guest_ia32_perf_global_ctrl))) 2910 return -EINVAL; 2911 2912 /* 2913 * If the load IA32_EFER VM-entry control is 1, the following checks 2914 * are performed on the field for the IA32_EFER MSR: 2915 * - Bits reserved in the IA32_EFER MSR must be 0. 2916 * - Bit 10 (corresponding to IA32_EFER.LMA) must equal the value of 2917 * the IA-32e mode guest VM-exit control. It must also be identical 2918 * to bit 8 (LME) if bit 31 in the CR0 field (corresponding to 2919 * CR0.PG) is 1. 2920 */ 2921 if (to_vmx(vcpu)->nested.nested_run_pending && 2922 (vmcs12->vm_entry_controls & VM_ENTRY_LOAD_IA32_EFER)) { 2923 ia32e = (vmcs12->vm_entry_controls & VM_ENTRY_IA32E_MODE) != 0; 2924 if (CC(!kvm_valid_efer(vcpu, vmcs12->guest_ia32_efer)) || 2925 CC(ia32e != !!(vmcs12->guest_ia32_efer & EFER_LMA)) || 2926 CC(((vmcs12->guest_cr0 & X86_CR0_PG) && 2927 ia32e != !!(vmcs12->guest_ia32_efer & EFER_LME)))) 2928 return -EINVAL; 2929 } 2930 2931 if ((vmcs12->vm_entry_controls & VM_ENTRY_LOAD_BNDCFGS) && 2932 (CC(is_noncanonical_address(vmcs12->guest_bndcfgs & PAGE_MASK, vcpu)) || 2933 CC((vmcs12->guest_bndcfgs & MSR_IA32_BNDCFGS_RSVD)))) 2934 return -EINVAL; 2935 2936 if (nested_check_guest_non_reg_state(vmcs12)) 2937 return -EINVAL; 2938 2939 return 0; 2940 } 2941 2942 static int nested_vmx_check_vmentry_hw(struct kvm_vcpu *vcpu) 2943 { 2944 struct vcpu_vmx *vmx = to_vmx(vcpu); 2945 unsigned long cr3, cr4; 2946 bool vm_fail; 2947 2948 if (!nested_early_check) 2949 return 0; 2950 2951 if (vmx->msr_autoload.host.nr) 2952 vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, 0); 2953 if (vmx->msr_autoload.guest.nr) 2954 vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, 0); 2955 2956 preempt_disable(); 2957 2958 vmx_prepare_switch_to_guest(vcpu); 2959 2960 /* 2961 * Induce a consistency check VMExit by clearing bit 1 in GUEST_RFLAGS, 2962 * which is reserved to '1' by hardware. GUEST_RFLAGS is guaranteed to 2963 * be written (by preparve_vmcs02()) before the "real" VMEnter, i.e. 2964 * there is no need to preserve other bits or save/restore the field. 2965 */ 2966 vmcs_writel(GUEST_RFLAGS, 0); 2967 2968 cr3 = __get_current_cr3_fast(); 2969 if (unlikely(cr3 != vmx->loaded_vmcs->host_state.cr3)) { 2970 vmcs_writel(HOST_CR3, cr3); 2971 vmx->loaded_vmcs->host_state.cr3 = cr3; 2972 } 2973 2974 cr4 = cr4_read_shadow(); 2975 if (unlikely(cr4 != vmx->loaded_vmcs->host_state.cr4)) { 2976 vmcs_writel(HOST_CR4, cr4); 2977 vmx->loaded_vmcs->host_state.cr4 = cr4; 2978 } 2979 2980 asm( 2981 "sub $%c[wordsize], %%" _ASM_SP "\n\t" /* temporarily adjust RSP for CALL */ 2982 "cmp %%" _ASM_SP ", %c[host_state_rsp](%[loaded_vmcs]) \n\t" 2983 "je 1f \n\t" 2984 __ex("vmwrite %%" _ASM_SP ", %[HOST_RSP]") "\n\t" 2985 "mov %%" _ASM_SP ", %c[host_state_rsp](%[loaded_vmcs]) \n\t" 2986 "1: \n\t" 2987 "add $%c[wordsize], %%" _ASM_SP "\n\t" /* un-adjust RSP */ 2988 2989 /* Check if vmlaunch or vmresume is needed */ 2990 "cmpb $0, %c[launched](%[loaded_vmcs])\n\t" 2991 2992 /* 2993 * VMLAUNCH and VMRESUME clear RFLAGS.{CF,ZF} on VM-Exit, set 2994 * RFLAGS.CF on VM-Fail Invalid and set RFLAGS.ZF on VM-Fail 2995 * Valid. vmx_vmenter() directly "returns" RFLAGS, and so the 2996 * results of VM-Enter is captured via CC_{SET,OUT} to vm_fail. 2997 */ 2998 "call vmx_vmenter\n\t" 2999 3000 CC_SET(be) 3001 : ASM_CALL_CONSTRAINT, CC_OUT(be) (vm_fail) 3002 : [HOST_RSP]"r"((unsigned long)HOST_RSP), 3003 [loaded_vmcs]"r"(vmx->loaded_vmcs), 3004 [launched]"i"(offsetof(struct loaded_vmcs, launched)), 3005 [host_state_rsp]"i"(offsetof(struct loaded_vmcs, host_state.rsp)), 3006 [wordsize]"i"(sizeof(ulong)) 3007 : "memory" 3008 ); 3009 3010 if (vmx->msr_autoload.host.nr) 3011 vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, vmx->msr_autoload.host.nr); 3012 if (vmx->msr_autoload.guest.nr) 3013 vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, vmx->msr_autoload.guest.nr); 3014 3015 if (vm_fail) { 3016 u32 error = vmcs_read32(VM_INSTRUCTION_ERROR); 3017 3018 preempt_enable(); 3019 3020 trace_kvm_nested_vmenter_failed( 3021 "early hardware check VM-instruction error: ", error); 3022 WARN_ON_ONCE(error != VMXERR_ENTRY_INVALID_CONTROL_FIELD); 3023 return 1; 3024 } 3025 3026 /* 3027 * VMExit clears RFLAGS.IF and DR7, even on a consistency check. 3028 */ 3029 local_irq_enable(); 3030 if (hw_breakpoint_active()) 3031 set_debugreg(__this_cpu_read(cpu_dr7), 7); 3032 preempt_enable(); 3033 3034 /* 3035 * A non-failing VMEntry means we somehow entered guest mode with 3036 * an illegal RIP, and that's just the tip of the iceberg. There 3037 * is no telling what memory has been modified or what state has 3038 * been exposed to unknown code. Hitting this all but guarantees 3039 * a (very critical) hardware issue. 3040 */ 3041 WARN_ON(!(vmcs_read32(VM_EXIT_REASON) & 3042 VMX_EXIT_REASONS_FAILED_VMENTRY)); 3043 3044 return 0; 3045 } 3046 3047 static bool nested_get_vmcs12_pages(struct kvm_vcpu *vcpu) 3048 { 3049 struct vmcs12 *vmcs12 = get_vmcs12(vcpu); 3050 struct vcpu_vmx *vmx = to_vmx(vcpu); 3051 struct kvm_host_map *map; 3052 struct page *page; 3053 u64 hpa; 3054 3055 if (nested_cpu_has2(vmcs12, SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES)) { 3056 /* 3057 * Translate L1 physical address to host physical 3058 * address for vmcs02. Keep the page pinned, so this 3059 * physical address remains valid. We keep a reference 3060 * to it so we can release it later. 3061 */ 3062 if (vmx->nested.apic_access_page) { /* shouldn't happen */ 3063 kvm_release_page_clean(vmx->nested.apic_access_page); 3064 vmx->nested.apic_access_page = NULL; 3065 } 3066 page = kvm_vcpu_gpa_to_page(vcpu, vmcs12->apic_access_addr); 3067 if (!is_error_page(page)) { 3068 vmx->nested.apic_access_page = page; 3069 hpa = page_to_phys(vmx->nested.apic_access_page); 3070 vmcs_write64(APIC_ACCESS_ADDR, hpa); 3071 } else { 3072 pr_debug_ratelimited("%s: no backing 'struct page' for APIC-access address in vmcs12\n", 3073 __func__); 3074 vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR; 3075 vcpu->run->internal.suberror = 3076 KVM_INTERNAL_ERROR_EMULATION; 3077 vcpu->run->internal.ndata = 0; 3078 return false; 3079 } 3080 } 3081 3082 if (nested_cpu_has(vmcs12, CPU_BASED_TPR_SHADOW)) { 3083 map = &vmx->nested.virtual_apic_map; 3084 3085 if (!kvm_vcpu_map(vcpu, gpa_to_gfn(vmcs12->virtual_apic_page_addr), map)) { 3086 vmcs_write64(VIRTUAL_APIC_PAGE_ADDR, pfn_to_hpa(map->pfn)); 3087 } else if (nested_cpu_has(vmcs12, CPU_BASED_CR8_LOAD_EXITING) && 3088 nested_cpu_has(vmcs12, CPU_BASED_CR8_STORE_EXITING) && 3089 !nested_cpu_has2(vmcs12, SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES)) { 3090 /* 3091 * The processor will never use the TPR shadow, simply 3092 * clear the bit from the execution control. Such a 3093 * configuration is useless, but it happens in tests. 3094 * For any other configuration, failing the vm entry is 3095 * _not_ what the processor does but it's basically the 3096 * only possibility we have. 3097 */ 3098 exec_controls_clearbit(vmx, CPU_BASED_TPR_SHADOW); 3099 } else { 3100 /* 3101 * Write an illegal value to VIRTUAL_APIC_PAGE_ADDR to 3102 * force VM-Entry to fail. 3103 */ 3104 vmcs_write64(VIRTUAL_APIC_PAGE_ADDR, -1ull); 3105 } 3106 } 3107 3108 if (nested_cpu_has_posted_intr(vmcs12)) { 3109 map = &vmx->nested.pi_desc_map; 3110 3111 if (!kvm_vcpu_map(vcpu, gpa_to_gfn(vmcs12->posted_intr_desc_addr), map)) { 3112 vmx->nested.pi_desc = 3113 (struct pi_desc *)(((void *)map->hva) + 3114 offset_in_page(vmcs12->posted_intr_desc_addr)); 3115 vmcs_write64(POSTED_INTR_DESC_ADDR, 3116 pfn_to_hpa(map->pfn) + offset_in_page(vmcs12->posted_intr_desc_addr)); 3117 } 3118 } 3119 if (nested_vmx_prepare_msr_bitmap(vcpu, vmcs12)) 3120 exec_controls_setbit(vmx, CPU_BASED_USE_MSR_BITMAPS); 3121 else 3122 exec_controls_clearbit(vmx, CPU_BASED_USE_MSR_BITMAPS); 3123 return true; 3124 } 3125 3126 /* 3127 * Intel's VMX Instruction Reference specifies a common set of prerequisites 3128 * for running VMX instructions (except VMXON, whose prerequisites are 3129 * slightly different). It also specifies what exception to inject otherwise. 3130 * Note that many of these exceptions have priority over VM exits, so they 3131 * don't have to be checked again here. 3132 */ 3133 static int nested_vmx_check_permission(struct kvm_vcpu *vcpu) 3134 { 3135 if (!to_vmx(vcpu)->nested.vmxon) { 3136 kvm_queue_exception(vcpu, UD_VECTOR); 3137 return 0; 3138 } 3139 3140 if (vmx_get_cpl(vcpu)) { 3141 kvm_inject_gp(vcpu, 0); 3142 return 0; 3143 } 3144 3145 return 1; 3146 } 3147 3148 static u8 vmx_has_apicv_interrupt(struct kvm_vcpu *vcpu) 3149 { 3150 u8 rvi = vmx_get_rvi(); 3151 u8 vppr = kvm_lapic_get_reg(vcpu->arch.apic, APIC_PROCPRI); 3152 3153 return ((rvi & 0xf0) > (vppr & 0xf0)); 3154 } 3155 3156 static void load_vmcs12_host_state(struct kvm_vcpu *vcpu, 3157 struct vmcs12 *vmcs12); 3158 3159 /* 3160 * If from_vmentry is false, this is being called from state restore (either RSM 3161 * or KVM_SET_NESTED_STATE). Otherwise it's called from vmlaunch/vmresume. 3162 * 3163 * Returns: 3164 * NVMX_VMENTRY_SUCCESS: Entered VMX non-root mode 3165 * NVMX_VMENTRY_VMFAIL: Consistency check VMFail 3166 * NVMX_VMENTRY_VMEXIT: Consistency check VMExit 3167 * NVMX_VMENTRY_KVM_INTERNAL_ERROR: KVM internal error 3168 */ 3169 enum nvmx_vmentry_status nested_vmx_enter_non_root_mode(struct kvm_vcpu *vcpu, 3170 bool from_vmentry) 3171 { 3172 struct vcpu_vmx *vmx = to_vmx(vcpu); 3173 struct vmcs12 *vmcs12 = get_vmcs12(vcpu); 3174 bool evaluate_pending_interrupts; 3175 u32 exit_reason = EXIT_REASON_INVALID_STATE; 3176 u32 exit_qual; 3177 3178 evaluate_pending_interrupts = exec_controls_get(vmx) & 3179 (CPU_BASED_INTR_WINDOW_EXITING | CPU_BASED_NMI_WINDOW_EXITING); 3180 if (likely(!evaluate_pending_interrupts) && kvm_vcpu_apicv_active(vcpu)) 3181 evaluate_pending_interrupts |= vmx_has_apicv_interrupt(vcpu); 3182 3183 if (!(vmcs12->vm_entry_controls & VM_ENTRY_LOAD_DEBUG_CONTROLS)) 3184 vmx->nested.vmcs01_debugctl = vmcs_read64(GUEST_IA32_DEBUGCTL); 3185 if (kvm_mpx_supported() && 3186 !(vmcs12->vm_entry_controls & VM_ENTRY_LOAD_BNDCFGS)) 3187 vmx->nested.vmcs01_guest_bndcfgs = vmcs_read64(GUEST_BNDCFGS); 3188 3189 /* 3190 * Overwrite vmcs01.GUEST_CR3 with L1's CR3 if EPT is disabled *and* 3191 * nested early checks are disabled. In the event of a "late" VM-Fail, 3192 * i.e. a VM-Fail detected by hardware but not KVM, KVM must unwind its 3193 * software model to the pre-VMEntry host state. When EPT is disabled, 3194 * GUEST_CR3 holds KVM's shadow CR3, not L1's "real" CR3, which causes 3195 * nested_vmx_restore_host_state() to corrupt vcpu->arch.cr3. Stuffing 3196 * vmcs01.GUEST_CR3 results in the unwind naturally setting arch.cr3 to 3197 * the correct value. Smashing vmcs01.GUEST_CR3 is safe because nested 3198 * VM-Exits, and the unwind, reset KVM's MMU, i.e. vmcs01.GUEST_CR3 is 3199 * guaranteed to be overwritten with a shadow CR3 prior to re-entering 3200 * L1. Don't stuff vmcs01.GUEST_CR3 when using nested early checks as 3201 * KVM modifies vcpu->arch.cr3 if and only if the early hardware checks 3202 * pass, and early VM-Fails do not reset KVM's MMU, i.e. the VM-Fail 3203 * path would need to manually save/restore vmcs01.GUEST_CR3. 3204 */ 3205 if (!enable_ept && !nested_early_check) 3206 vmcs_writel(GUEST_CR3, vcpu->arch.cr3); 3207 3208 vmx_switch_vmcs(vcpu, &vmx->nested.vmcs02); 3209 3210 prepare_vmcs02_early(vmx, vmcs12); 3211 3212 if (from_vmentry) { 3213 if (unlikely(!nested_get_vmcs12_pages(vcpu))) 3214 return NVMX_VMENTRY_KVM_INTERNAL_ERROR; 3215 3216 if (nested_vmx_check_vmentry_hw(vcpu)) { 3217 vmx_switch_vmcs(vcpu, &vmx->vmcs01); 3218 return NVMX_VMENTRY_VMFAIL; 3219 } 3220 3221 if (nested_vmx_check_guest_state(vcpu, vmcs12, &exit_qual)) 3222 goto vmentry_fail_vmexit; 3223 } 3224 3225 enter_guest_mode(vcpu); 3226 if (vmcs12->cpu_based_vm_exec_control & CPU_BASED_USE_TSC_OFFSETTING) 3227 vcpu->arch.tsc_offset += vmcs12->tsc_offset; 3228 3229 if (prepare_vmcs02(vcpu, vmcs12, &exit_qual)) 3230 goto vmentry_fail_vmexit_guest_mode; 3231 3232 if (from_vmentry) { 3233 exit_reason = EXIT_REASON_MSR_LOAD_FAIL; 3234 exit_qual = nested_vmx_load_msr(vcpu, 3235 vmcs12->vm_entry_msr_load_addr, 3236 vmcs12->vm_entry_msr_load_count); 3237 if (exit_qual) 3238 goto vmentry_fail_vmexit_guest_mode; 3239 } else { 3240 /* 3241 * The MMU is not initialized to point at the right entities yet and 3242 * "get pages" would need to read data from the guest (i.e. we will 3243 * need to perform gpa to hpa translation). Request a call 3244 * to nested_get_vmcs12_pages before the next VM-entry. The MSRs 3245 * have already been set at vmentry time and should not be reset. 3246 */ 3247 kvm_make_request(KVM_REQ_GET_VMCS12_PAGES, vcpu); 3248 } 3249 3250 /* 3251 * If L1 had a pending IRQ/NMI until it executed 3252 * VMLAUNCH/VMRESUME which wasn't delivered because it was 3253 * disallowed (e.g. interrupts disabled), L0 needs to 3254 * evaluate if this pending event should cause an exit from L2 3255 * to L1 or delivered directly to L2 (e.g. In case L1 don't 3256 * intercept EXTERNAL_INTERRUPT). 3257 * 3258 * Usually this would be handled by the processor noticing an 3259 * IRQ/NMI window request, or checking RVI during evaluation of 3260 * pending virtual interrupts. However, this setting was done 3261 * on VMCS01 and now VMCS02 is active instead. Thus, we force L0 3262 * to perform pending event evaluation by requesting a KVM_REQ_EVENT. 3263 */ 3264 if (unlikely(evaluate_pending_interrupts)) 3265 kvm_make_request(KVM_REQ_EVENT, vcpu); 3266 3267 /* 3268 * Do not start the preemption timer hrtimer until after we know 3269 * we are successful, so that only nested_vmx_vmexit needs to cancel 3270 * the timer. 3271 */ 3272 vmx->nested.preemption_timer_expired = false; 3273 if (nested_cpu_has_preemption_timer(vmcs12)) 3274 vmx_start_preemption_timer(vcpu); 3275 3276 /* 3277 * Note no nested_vmx_succeed or nested_vmx_fail here. At this point 3278 * we are no longer running L1, and VMLAUNCH/VMRESUME has not yet 3279 * returned as far as L1 is concerned. It will only return (and set 3280 * the success flag) when L2 exits (see nested_vmx_vmexit()). 3281 */ 3282 return NVMX_VMENTRY_SUCCESS; 3283 3284 /* 3285 * A failed consistency check that leads to a VMExit during L1's 3286 * VMEnter to L2 is a variation of a normal VMexit, as explained in 3287 * 26.7 "VM-entry failures during or after loading guest state". 3288 */ 3289 vmentry_fail_vmexit_guest_mode: 3290 if (vmcs12->cpu_based_vm_exec_control & CPU_BASED_USE_TSC_OFFSETTING) 3291 vcpu->arch.tsc_offset -= vmcs12->tsc_offset; 3292 leave_guest_mode(vcpu); 3293 3294 vmentry_fail_vmexit: 3295 vmx_switch_vmcs(vcpu, &vmx->vmcs01); 3296 3297 if (!from_vmentry) 3298 return NVMX_VMENTRY_VMEXIT; 3299 3300 load_vmcs12_host_state(vcpu, vmcs12); 3301 vmcs12->vm_exit_reason = exit_reason | VMX_EXIT_REASONS_FAILED_VMENTRY; 3302 vmcs12->exit_qualification = exit_qual; 3303 if (enable_shadow_vmcs || vmx->nested.hv_evmcs) 3304 vmx->nested.need_vmcs12_to_shadow_sync = true; 3305 return NVMX_VMENTRY_VMEXIT; 3306 } 3307 3308 /* 3309 * nested_vmx_run() handles a nested entry, i.e., a VMLAUNCH or VMRESUME on L1 3310 * for running an L2 nested guest. 3311 */ 3312 static int nested_vmx_run(struct kvm_vcpu *vcpu, bool launch) 3313 { 3314 struct vmcs12 *vmcs12; 3315 enum nvmx_vmentry_status status; 3316 struct vcpu_vmx *vmx = to_vmx(vcpu); 3317 u32 interrupt_shadow = vmx_get_interrupt_shadow(vcpu); 3318 3319 if (!nested_vmx_check_permission(vcpu)) 3320 return 1; 3321 3322 if (!nested_vmx_handle_enlightened_vmptrld(vcpu, launch)) 3323 return 1; 3324 3325 if (!vmx->nested.hv_evmcs && vmx->nested.current_vmptr == -1ull) 3326 return nested_vmx_failInvalid(vcpu); 3327 3328 vmcs12 = get_vmcs12(vcpu); 3329 3330 /* 3331 * Can't VMLAUNCH or VMRESUME a shadow VMCS. Despite the fact 3332 * that there *is* a valid VMCS pointer, RFLAGS.CF is set 3333 * rather than RFLAGS.ZF, and no error number is stored to the 3334 * VM-instruction error field. 3335 */ 3336 if (vmcs12->hdr.shadow_vmcs) 3337 return nested_vmx_failInvalid(vcpu); 3338 3339 if (vmx->nested.hv_evmcs) { 3340 copy_enlightened_to_vmcs12(vmx); 3341 /* Enlightened VMCS doesn't have launch state */ 3342 vmcs12->launch_state = !launch; 3343 } else if (enable_shadow_vmcs) { 3344 copy_shadow_to_vmcs12(vmx); 3345 } 3346 3347 /* 3348 * The nested entry process starts with enforcing various prerequisites 3349 * on vmcs12 as required by the Intel SDM, and act appropriately when 3350 * they fail: As the SDM explains, some conditions should cause the 3351 * instruction to fail, while others will cause the instruction to seem 3352 * to succeed, but return an EXIT_REASON_INVALID_STATE. 3353 * To speed up the normal (success) code path, we should avoid checking 3354 * for misconfigurations which will anyway be caught by the processor 3355 * when using the merged vmcs02. 3356 */ 3357 if (interrupt_shadow & KVM_X86_SHADOW_INT_MOV_SS) 3358 return nested_vmx_failValid(vcpu, 3359 VMXERR_ENTRY_EVENTS_BLOCKED_BY_MOV_SS); 3360 3361 if (vmcs12->launch_state == launch) 3362 return nested_vmx_failValid(vcpu, 3363 launch ? VMXERR_VMLAUNCH_NONCLEAR_VMCS 3364 : VMXERR_VMRESUME_NONLAUNCHED_VMCS); 3365 3366 if (nested_vmx_check_controls(vcpu, vmcs12)) 3367 return nested_vmx_failValid(vcpu, VMXERR_ENTRY_INVALID_CONTROL_FIELD); 3368 3369 if (nested_vmx_check_host_state(vcpu, vmcs12)) 3370 return nested_vmx_failValid(vcpu, VMXERR_ENTRY_INVALID_HOST_STATE_FIELD); 3371 3372 /* 3373 * We're finally done with prerequisite checking, and can start with 3374 * the nested entry. 3375 */ 3376 vmx->nested.nested_run_pending = 1; 3377 status = nested_vmx_enter_non_root_mode(vcpu, true); 3378 if (unlikely(status != NVMX_VMENTRY_SUCCESS)) 3379 goto vmentry_failed; 3380 3381 /* Hide L1D cache contents from the nested guest. */ 3382 vmx->vcpu.arch.l1tf_flush_l1d = true; 3383 3384 /* 3385 * Must happen outside of nested_vmx_enter_non_root_mode() as it will 3386 * also be used as part of restoring nVMX state for 3387 * snapshot restore (migration). 3388 * 3389 * In this flow, it is assumed that vmcs12 cache was 3390 * trasferred as part of captured nVMX state and should 3391 * therefore not be read from guest memory (which may not 3392 * exist on destination host yet). 3393 */ 3394 nested_cache_shadow_vmcs12(vcpu, vmcs12); 3395 3396 /* 3397 * If we're entering a halted L2 vcpu and the L2 vcpu won't be 3398 * awakened by event injection or by an NMI-window VM-exit or 3399 * by an interrupt-window VM-exit, halt the vcpu. 3400 */ 3401 if ((vmcs12->guest_activity_state == GUEST_ACTIVITY_HLT) && 3402 !(vmcs12->vm_entry_intr_info_field & INTR_INFO_VALID_MASK) && 3403 !(vmcs12->cpu_based_vm_exec_control & CPU_BASED_NMI_WINDOW_EXITING) && 3404 !((vmcs12->cpu_based_vm_exec_control & CPU_BASED_INTR_WINDOW_EXITING) && 3405 (vmcs12->guest_rflags & X86_EFLAGS_IF))) { 3406 vmx->nested.nested_run_pending = 0; 3407 return kvm_vcpu_halt(vcpu); 3408 } 3409 return 1; 3410 3411 vmentry_failed: 3412 vmx->nested.nested_run_pending = 0; 3413 if (status == NVMX_VMENTRY_KVM_INTERNAL_ERROR) 3414 return 0; 3415 if (status == NVMX_VMENTRY_VMEXIT) 3416 return 1; 3417 WARN_ON_ONCE(status != NVMX_VMENTRY_VMFAIL); 3418 return nested_vmx_failValid(vcpu, VMXERR_ENTRY_INVALID_CONTROL_FIELD); 3419 } 3420 3421 /* 3422 * On a nested exit from L2 to L1, vmcs12.guest_cr0 might not be up-to-date 3423 * because L2 may have changed some cr0 bits directly (CR0_GUEST_HOST_MASK). 3424 * This function returns the new value we should put in vmcs12.guest_cr0. 3425 * It's not enough to just return the vmcs02 GUEST_CR0. Rather, 3426 * 1. Bits that neither L0 nor L1 trapped, were set directly by L2 and are now 3427 * available in vmcs02 GUEST_CR0. (Note: It's enough to check that L0 3428 * didn't trap the bit, because if L1 did, so would L0). 3429 * 2. Bits that L1 asked to trap (and therefore L0 also did) could not have 3430 * been modified by L2, and L1 knows it. So just leave the old value of 3431 * the bit from vmcs12.guest_cr0. Note that the bit from vmcs02 GUEST_CR0 3432 * isn't relevant, because if L0 traps this bit it can set it to anything. 3433 * 3. Bits that L1 didn't trap, but L0 did. L1 believes the guest could have 3434 * changed these bits, and therefore they need to be updated, but L0 3435 * didn't necessarily allow them to be changed in GUEST_CR0 - and rather 3436 * put them in vmcs02 CR0_READ_SHADOW. So take these bits from there. 3437 */ 3438 static inline unsigned long 3439 vmcs12_guest_cr0(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12) 3440 { 3441 return 3442 /*1*/ (vmcs_readl(GUEST_CR0) & vcpu->arch.cr0_guest_owned_bits) | 3443 /*2*/ (vmcs12->guest_cr0 & vmcs12->cr0_guest_host_mask) | 3444 /*3*/ (vmcs_readl(CR0_READ_SHADOW) & ~(vmcs12->cr0_guest_host_mask | 3445 vcpu->arch.cr0_guest_owned_bits)); 3446 } 3447 3448 static inline unsigned long 3449 vmcs12_guest_cr4(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12) 3450 { 3451 return 3452 /*1*/ (vmcs_readl(GUEST_CR4) & vcpu->arch.cr4_guest_owned_bits) | 3453 /*2*/ (vmcs12->guest_cr4 & vmcs12->cr4_guest_host_mask) | 3454 /*3*/ (vmcs_readl(CR4_READ_SHADOW) & ~(vmcs12->cr4_guest_host_mask | 3455 vcpu->arch.cr4_guest_owned_bits)); 3456 } 3457 3458 static void vmcs12_save_pending_event(struct kvm_vcpu *vcpu, 3459 struct vmcs12 *vmcs12) 3460 { 3461 u32 idt_vectoring; 3462 unsigned int nr; 3463 3464 if (vcpu->arch.exception.injected) { 3465 nr = vcpu->arch.exception.nr; 3466 idt_vectoring = nr | VECTORING_INFO_VALID_MASK; 3467 3468 if (kvm_exception_is_soft(nr)) { 3469 vmcs12->vm_exit_instruction_len = 3470 vcpu->arch.event_exit_inst_len; 3471 idt_vectoring |= INTR_TYPE_SOFT_EXCEPTION; 3472 } else 3473 idt_vectoring |= INTR_TYPE_HARD_EXCEPTION; 3474 3475 if (vcpu->arch.exception.has_error_code) { 3476 idt_vectoring |= VECTORING_INFO_DELIVER_CODE_MASK; 3477 vmcs12->idt_vectoring_error_code = 3478 vcpu->arch.exception.error_code; 3479 } 3480 3481 vmcs12->idt_vectoring_info_field = idt_vectoring; 3482 } else if (vcpu->arch.nmi_injected) { 3483 vmcs12->idt_vectoring_info_field = 3484 INTR_TYPE_NMI_INTR | INTR_INFO_VALID_MASK | NMI_VECTOR; 3485 } else if (vcpu->arch.interrupt.injected) { 3486 nr = vcpu->arch.interrupt.nr; 3487 idt_vectoring = nr | VECTORING_INFO_VALID_MASK; 3488 3489 if (vcpu->arch.interrupt.soft) { 3490 idt_vectoring |= INTR_TYPE_SOFT_INTR; 3491 vmcs12->vm_entry_instruction_len = 3492 vcpu->arch.event_exit_inst_len; 3493 } else 3494 idt_vectoring |= INTR_TYPE_EXT_INTR; 3495 3496 vmcs12->idt_vectoring_info_field = idt_vectoring; 3497 } 3498 } 3499 3500 3501 static void nested_mark_vmcs12_pages_dirty(struct kvm_vcpu *vcpu) 3502 { 3503 struct vmcs12 *vmcs12 = get_vmcs12(vcpu); 3504 gfn_t gfn; 3505 3506 /* 3507 * Don't need to mark the APIC access page dirty; it is never 3508 * written to by the CPU during APIC virtualization. 3509 */ 3510 3511 if (nested_cpu_has(vmcs12, CPU_BASED_TPR_SHADOW)) { 3512 gfn = vmcs12->virtual_apic_page_addr >> PAGE_SHIFT; 3513 kvm_vcpu_mark_page_dirty(vcpu, gfn); 3514 } 3515 3516 if (nested_cpu_has_posted_intr(vmcs12)) { 3517 gfn = vmcs12->posted_intr_desc_addr >> PAGE_SHIFT; 3518 kvm_vcpu_mark_page_dirty(vcpu, gfn); 3519 } 3520 } 3521 3522 static void vmx_complete_nested_posted_interrupt(struct kvm_vcpu *vcpu) 3523 { 3524 struct vcpu_vmx *vmx = to_vmx(vcpu); 3525 int max_irr; 3526 void *vapic_page; 3527 u16 status; 3528 3529 if (!vmx->nested.pi_desc || !vmx->nested.pi_pending) 3530 return; 3531 3532 vmx->nested.pi_pending = false; 3533 if (!pi_test_and_clear_on(vmx->nested.pi_desc)) 3534 return; 3535 3536 max_irr = find_last_bit((unsigned long *)vmx->nested.pi_desc->pir, 256); 3537 if (max_irr != 256) { 3538 vapic_page = vmx->nested.virtual_apic_map.hva; 3539 if (!vapic_page) 3540 return; 3541 3542 __kvm_apic_update_irr(vmx->nested.pi_desc->pir, 3543 vapic_page, &max_irr); 3544 status = vmcs_read16(GUEST_INTR_STATUS); 3545 if ((u8)max_irr > ((u8)status & 0xff)) { 3546 status &= ~0xff; 3547 status |= (u8)max_irr; 3548 vmcs_write16(GUEST_INTR_STATUS, status); 3549 } 3550 } 3551 3552 nested_mark_vmcs12_pages_dirty(vcpu); 3553 } 3554 3555 static void nested_vmx_inject_exception_vmexit(struct kvm_vcpu *vcpu, 3556 unsigned long exit_qual) 3557 { 3558 struct vmcs12 *vmcs12 = get_vmcs12(vcpu); 3559 unsigned int nr = vcpu->arch.exception.nr; 3560 u32 intr_info = nr | INTR_INFO_VALID_MASK; 3561 3562 if (vcpu->arch.exception.has_error_code) { 3563 vmcs12->vm_exit_intr_error_code = vcpu->arch.exception.error_code; 3564 intr_info |= INTR_INFO_DELIVER_CODE_MASK; 3565 } 3566 3567 if (kvm_exception_is_soft(nr)) 3568 intr_info |= INTR_TYPE_SOFT_EXCEPTION; 3569 else 3570 intr_info |= INTR_TYPE_HARD_EXCEPTION; 3571 3572 if (!(vmcs12->idt_vectoring_info_field & VECTORING_INFO_VALID_MASK) && 3573 vmx_get_nmi_mask(vcpu)) 3574 intr_info |= INTR_INFO_UNBLOCK_NMI; 3575 3576 nested_vmx_vmexit(vcpu, EXIT_REASON_EXCEPTION_NMI, intr_info, exit_qual); 3577 } 3578 3579 /* 3580 * Returns true if a debug trap is pending delivery. 3581 * 3582 * In KVM, debug traps bear an exception payload. As such, the class of a #DB 3583 * exception may be inferred from the presence of an exception payload. 3584 */ 3585 static inline bool vmx_pending_dbg_trap(struct kvm_vcpu *vcpu) 3586 { 3587 return vcpu->arch.exception.pending && 3588 vcpu->arch.exception.nr == DB_VECTOR && 3589 vcpu->arch.exception.payload; 3590 } 3591 3592 /* 3593 * Certain VM-exits set the 'pending debug exceptions' field to indicate a 3594 * recognized #DB (data or single-step) that has yet to be delivered. Since KVM 3595 * represents these debug traps with a payload that is said to be compatible 3596 * with the 'pending debug exceptions' field, write the payload to the VMCS 3597 * field if a VM-exit is delivered before the debug trap. 3598 */ 3599 static void nested_vmx_update_pending_dbg(struct kvm_vcpu *vcpu) 3600 { 3601 if (vmx_pending_dbg_trap(vcpu)) 3602 vmcs_writel(GUEST_PENDING_DBG_EXCEPTIONS, 3603 vcpu->arch.exception.payload); 3604 } 3605 3606 static int vmx_check_nested_events(struct kvm_vcpu *vcpu, bool external_intr) 3607 { 3608 struct vcpu_vmx *vmx = to_vmx(vcpu); 3609 unsigned long exit_qual; 3610 bool block_nested_events = 3611 vmx->nested.nested_run_pending || kvm_event_needs_reinjection(vcpu); 3612 bool mtf_pending = vmx->nested.mtf_pending; 3613 struct kvm_lapic *apic = vcpu->arch.apic; 3614 3615 /* 3616 * Clear the MTF state. If a higher priority VM-exit is delivered first, 3617 * this state is discarded. 3618 */ 3619 vmx->nested.mtf_pending = false; 3620 3621 if (lapic_in_kernel(vcpu) && 3622 test_bit(KVM_APIC_INIT, &apic->pending_events)) { 3623 if (block_nested_events) 3624 return -EBUSY; 3625 nested_vmx_update_pending_dbg(vcpu); 3626 clear_bit(KVM_APIC_INIT, &apic->pending_events); 3627 nested_vmx_vmexit(vcpu, EXIT_REASON_INIT_SIGNAL, 0, 0); 3628 return 0; 3629 } 3630 3631 /* 3632 * Process any exceptions that are not debug traps before MTF. 3633 */ 3634 if (vcpu->arch.exception.pending && 3635 !vmx_pending_dbg_trap(vcpu) && 3636 nested_vmx_check_exception(vcpu, &exit_qual)) { 3637 if (block_nested_events) 3638 return -EBUSY; 3639 nested_vmx_inject_exception_vmexit(vcpu, exit_qual); 3640 return 0; 3641 } 3642 3643 if (mtf_pending) { 3644 if (block_nested_events) 3645 return -EBUSY; 3646 nested_vmx_update_pending_dbg(vcpu); 3647 nested_vmx_vmexit(vcpu, EXIT_REASON_MONITOR_TRAP_FLAG, 0, 0); 3648 return 0; 3649 } 3650 3651 if (vcpu->arch.exception.pending && 3652 nested_vmx_check_exception(vcpu, &exit_qual)) { 3653 if (block_nested_events) 3654 return -EBUSY; 3655 nested_vmx_inject_exception_vmexit(vcpu, exit_qual); 3656 return 0; 3657 } 3658 3659 if (nested_cpu_has_preemption_timer(get_vmcs12(vcpu)) && 3660 vmx->nested.preemption_timer_expired) { 3661 if (block_nested_events) 3662 return -EBUSY; 3663 nested_vmx_vmexit(vcpu, EXIT_REASON_PREEMPTION_TIMER, 0, 0); 3664 return 0; 3665 } 3666 3667 if (vcpu->arch.nmi_pending && nested_exit_on_nmi(vcpu)) { 3668 if (block_nested_events) 3669 return -EBUSY; 3670 nested_vmx_vmexit(vcpu, EXIT_REASON_EXCEPTION_NMI, 3671 NMI_VECTOR | INTR_TYPE_NMI_INTR | 3672 INTR_INFO_VALID_MASK, 0); 3673 /* 3674 * The NMI-triggered VM exit counts as injection: 3675 * clear this one and block further NMIs. 3676 */ 3677 vcpu->arch.nmi_pending = 0; 3678 vmx_set_nmi_mask(vcpu, true); 3679 return 0; 3680 } 3681 3682 if ((kvm_cpu_has_interrupt(vcpu) || external_intr) && 3683 nested_exit_on_intr(vcpu)) { 3684 if (block_nested_events) 3685 return -EBUSY; 3686 nested_vmx_vmexit(vcpu, EXIT_REASON_EXTERNAL_INTERRUPT, 0, 0); 3687 return 0; 3688 } 3689 3690 vmx_complete_nested_posted_interrupt(vcpu); 3691 return 0; 3692 } 3693 3694 static u32 vmx_get_preemption_timer_value(struct kvm_vcpu *vcpu) 3695 { 3696 ktime_t remaining = 3697 hrtimer_get_remaining(&to_vmx(vcpu)->nested.preemption_timer); 3698 u64 value; 3699 3700 if (ktime_to_ns(remaining) <= 0) 3701 return 0; 3702 3703 value = ktime_to_ns(remaining) * vcpu->arch.virtual_tsc_khz; 3704 do_div(value, 1000000); 3705 return value >> VMX_MISC_EMULATED_PREEMPTION_TIMER_RATE; 3706 } 3707 3708 static bool is_vmcs12_ext_field(unsigned long field) 3709 { 3710 switch (field) { 3711 case GUEST_ES_SELECTOR: 3712 case GUEST_CS_SELECTOR: 3713 case GUEST_SS_SELECTOR: 3714 case GUEST_DS_SELECTOR: 3715 case GUEST_FS_SELECTOR: 3716 case GUEST_GS_SELECTOR: 3717 case GUEST_LDTR_SELECTOR: 3718 case GUEST_TR_SELECTOR: 3719 case GUEST_ES_LIMIT: 3720 case GUEST_CS_LIMIT: 3721 case GUEST_SS_LIMIT: 3722 case GUEST_DS_LIMIT: 3723 case GUEST_FS_LIMIT: 3724 case GUEST_GS_LIMIT: 3725 case GUEST_LDTR_LIMIT: 3726 case GUEST_TR_LIMIT: 3727 case GUEST_GDTR_LIMIT: 3728 case GUEST_IDTR_LIMIT: 3729 case GUEST_ES_AR_BYTES: 3730 case GUEST_DS_AR_BYTES: 3731 case GUEST_FS_AR_BYTES: 3732 case GUEST_GS_AR_BYTES: 3733 case GUEST_LDTR_AR_BYTES: 3734 case GUEST_TR_AR_BYTES: 3735 case GUEST_ES_BASE: 3736 case GUEST_CS_BASE: 3737 case GUEST_SS_BASE: 3738 case GUEST_DS_BASE: 3739 case GUEST_FS_BASE: 3740 case GUEST_GS_BASE: 3741 case GUEST_LDTR_BASE: 3742 case GUEST_TR_BASE: 3743 case GUEST_GDTR_BASE: 3744 case GUEST_IDTR_BASE: 3745 case GUEST_PENDING_DBG_EXCEPTIONS: 3746 case GUEST_BNDCFGS: 3747 return true; 3748 default: 3749 break; 3750 } 3751 3752 return false; 3753 } 3754 3755 static void sync_vmcs02_to_vmcs12_rare(struct kvm_vcpu *vcpu, 3756 struct vmcs12 *vmcs12) 3757 { 3758 struct vcpu_vmx *vmx = to_vmx(vcpu); 3759 3760 vmcs12->guest_es_selector = vmcs_read16(GUEST_ES_SELECTOR); 3761 vmcs12->guest_cs_selector = vmcs_read16(GUEST_CS_SELECTOR); 3762 vmcs12->guest_ss_selector = vmcs_read16(GUEST_SS_SELECTOR); 3763 vmcs12->guest_ds_selector = vmcs_read16(GUEST_DS_SELECTOR); 3764 vmcs12->guest_fs_selector = vmcs_read16(GUEST_FS_SELECTOR); 3765 vmcs12->guest_gs_selector = vmcs_read16(GUEST_GS_SELECTOR); 3766 vmcs12->guest_ldtr_selector = vmcs_read16(GUEST_LDTR_SELECTOR); 3767 vmcs12->guest_tr_selector = vmcs_read16(GUEST_TR_SELECTOR); 3768 vmcs12->guest_es_limit = vmcs_read32(GUEST_ES_LIMIT); 3769 vmcs12->guest_cs_limit = vmcs_read32(GUEST_CS_LIMIT); 3770 vmcs12->guest_ss_limit = vmcs_read32(GUEST_SS_LIMIT); 3771 vmcs12->guest_ds_limit = vmcs_read32(GUEST_DS_LIMIT); 3772 vmcs12->guest_fs_limit = vmcs_read32(GUEST_FS_LIMIT); 3773 vmcs12->guest_gs_limit = vmcs_read32(GUEST_GS_LIMIT); 3774 vmcs12->guest_ldtr_limit = vmcs_read32(GUEST_LDTR_LIMIT); 3775 vmcs12->guest_tr_limit = vmcs_read32(GUEST_TR_LIMIT); 3776 vmcs12->guest_gdtr_limit = vmcs_read32(GUEST_GDTR_LIMIT); 3777 vmcs12->guest_idtr_limit = vmcs_read32(GUEST_IDTR_LIMIT); 3778 vmcs12->guest_es_ar_bytes = vmcs_read32(GUEST_ES_AR_BYTES); 3779 vmcs12->guest_ds_ar_bytes = vmcs_read32(GUEST_DS_AR_BYTES); 3780 vmcs12->guest_fs_ar_bytes = vmcs_read32(GUEST_FS_AR_BYTES); 3781 vmcs12->guest_gs_ar_bytes = vmcs_read32(GUEST_GS_AR_BYTES); 3782 vmcs12->guest_ldtr_ar_bytes = vmcs_read32(GUEST_LDTR_AR_BYTES); 3783 vmcs12->guest_tr_ar_bytes = vmcs_read32(GUEST_TR_AR_BYTES); 3784 vmcs12->guest_es_base = vmcs_readl(GUEST_ES_BASE); 3785 vmcs12->guest_cs_base = vmcs_readl(GUEST_CS_BASE); 3786 vmcs12->guest_ss_base = vmcs_readl(GUEST_SS_BASE); 3787 vmcs12->guest_ds_base = vmcs_readl(GUEST_DS_BASE); 3788 vmcs12->guest_fs_base = vmcs_readl(GUEST_FS_BASE); 3789 vmcs12->guest_gs_base = vmcs_readl(GUEST_GS_BASE); 3790 vmcs12->guest_ldtr_base = vmcs_readl(GUEST_LDTR_BASE); 3791 vmcs12->guest_tr_base = vmcs_readl(GUEST_TR_BASE); 3792 vmcs12->guest_gdtr_base = vmcs_readl(GUEST_GDTR_BASE); 3793 vmcs12->guest_idtr_base = vmcs_readl(GUEST_IDTR_BASE); 3794 vmcs12->guest_pending_dbg_exceptions = 3795 vmcs_readl(GUEST_PENDING_DBG_EXCEPTIONS); 3796 if (kvm_mpx_supported()) 3797 vmcs12->guest_bndcfgs = vmcs_read64(GUEST_BNDCFGS); 3798 3799 vmx->nested.need_sync_vmcs02_to_vmcs12_rare = false; 3800 } 3801 3802 static void copy_vmcs02_to_vmcs12_rare(struct kvm_vcpu *vcpu, 3803 struct vmcs12 *vmcs12) 3804 { 3805 struct vcpu_vmx *vmx = to_vmx(vcpu); 3806 int cpu; 3807 3808 if (!vmx->nested.need_sync_vmcs02_to_vmcs12_rare) 3809 return; 3810 3811 3812 WARN_ON_ONCE(vmx->loaded_vmcs != &vmx->vmcs01); 3813 3814 cpu = get_cpu(); 3815 vmx->loaded_vmcs = &vmx->nested.vmcs02; 3816 vmx_vcpu_load(&vmx->vcpu, cpu); 3817 3818 sync_vmcs02_to_vmcs12_rare(vcpu, vmcs12); 3819 3820 vmx->loaded_vmcs = &vmx->vmcs01; 3821 vmx_vcpu_load(&vmx->vcpu, cpu); 3822 put_cpu(); 3823 } 3824 3825 /* 3826 * Update the guest state fields of vmcs12 to reflect changes that 3827 * occurred while L2 was running. (The "IA-32e mode guest" bit of the 3828 * VM-entry controls is also updated, since this is really a guest 3829 * state bit.) 3830 */ 3831 static void sync_vmcs02_to_vmcs12(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12) 3832 { 3833 struct vcpu_vmx *vmx = to_vmx(vcpu); 3834 3835 if (vmx->nested.hv_evmcs) 3836 sync_vmcs02_to_vmcs12_rare(vcpu, vmcs12); 3837 3838 vmx->nested.need_sync_vmcs02_to_vmcs12_rare = !vmx->nested.hv_evmcs; 3839 3840 vmcs12->guest_cr0 = vmcs12_guest_cr0(vcpu, vmcs12); 3841 vmcs12->guest_cr4 = vmcs12_guest_cr4(vcpu, vmcs12); 3842 3843 vmcs12->guest_rsp = kvm_rsp_read(vcpu); 3844 vmcs12->guest_rip = kvm_rip_read(vcpu); 3845 vmcs12->guest_rflags = vmcs_readl(GUEST_RFLAGS); 3846 3847 vmcs12->guest_cs_ar_bytes = vmcs_read32(GUEST_CS_AR_BYTES); 3848 vmcs12->guest_ss_ar_bytes = vmcs_read32(GUEST_SS_AR_BYTES); 3849 3850 vmcs12->guest_sysenter_cs = vmcs_read32(GUEST_SYSENTER_CS); 3851 vmcs12->guest_sysenter_esp = vmcs_readl(GUEST_SYSENTER_ESP); 3852 vmcs12->guest_sysenter_eip = vmcs_readl(GUEST_SYSENTER_EIP); 3853 3854 vmcs12->guest_interruptibility_info = 3855 vmcs_read32(GUEST_INTERRUPTIBILITY_INFO); 3856 3857 if (vcpu->arch.mp_state == KVM_MP_STATE_HALTED) 3858 vmcs12->guest_activity_state = GUEST_ACTIVITY_HLT; 3859 else 3860 vmcs12->guest_activity_state = GUEST_ACTIVITY_ACTIVE; 3861 3862 if (nested_cpu_has_preemption_timer(vmcs12) && 3863 vmcs12->vm_exit_controls & VM_EXIT_SAVE_VMX_PREEMPTION_TIMER) 3864 vmcs12->vmx_preemption_timer_value = 3865 vmx_get_preemption_timer_value(vcpu); 3866 3867 /* 3868 * In some cases (usually, nested EPT), L2 is allowed to change its 3869 * own CR3 without exiting. If it has changed it, we must keep it. 3870 * Of course, if L0 is using shadow page tables, GUEST_CR3 was defined 3871 * by L0, not L1 or L2, so we mustn't unconditionally copy it to vmcs12. 3872 * 3873 * Additionally, restore L2's PDPTR to vmcs12. 3874 */ 3875 if (enable_ept) { 3876 vmcs12->guest_cr3 = vmcs_readl(GUEST_CR3); 3877 if (nested_cpu_has_ept(vmcs12) && is_pae_paging(vcpu)) { 3878 vmcs12->guest_pdptr0 = vmcs_read64(GUEST_PDPTR0); 3879 vmcs12->guest_pdptr1 = vmcs_read64(GUEST_PDPTR1); 3880 vmcs12->guest_pdptr2 = vmcs_read64(GUEST_PDPTR2); 3881 vmcs12->guest_pdptr3 = vmcs_read64(GUEST_PDPTR3); 3882 } 3883 } 3884 3885 vmcs12->guest_linear_address = vmcs_readl(GUEST_LINEAR_ADDRESS); 3886 3887 if (nested_cpu_has_vid(vmcs12)) 3888 vmcs12->guest_intr_status = vmcs_read16(GUEST_INTR_STATUS); 3889 3890 vmcs12->vm_entry_controls = 3891 (vmcs12->vm_entry_controls & ~VM_ENTRY_IA32E_MODE) | 3892 (vm_entry_controls_get(to_vmx(vcpu)) & VM_ENTRY_IA32E_MODE); 3893 3894 if (vmcs12->vm_exit_controls & VM_EXIT_SAVE_DEBUG_CONTROLS) 3895 kvm_get_dr(vcpu, 7, (unsigned long *)&vmcs12->guest_dr7); 3896 3897 if (vmcs12->vm_exit_controls & VM_EXIT_SAVE_IA32_EFER) 3898 vmcs12->guest_ia32_efer = vcpu->arch.efer; 3899 } 3900 3901 /* 3902 * prepare_vmcs12 is part of what we need to do when the nested L2 guest exits 3903 * and we want to prepare to run its L1 parent. L1 keeps a vmcs for L2 (vmcs12), 3904 * and this function updates it to reflect the changes to the guest state while 3905 * L2 was running (and perhaps made some exits which were handled directly by L0 3906 * without going back to L1), and to reflect the exit reason. 3907 * Note that we do not have to copy here all VMCS fields, just those that 3908 * could have changed by the L2 guest or the exit - i.e., the guest-state and 3909 * exit-information fields only. Other fields are modified by L1 with VMWRITE, 3910 * which already writes to vmcs12 directly. 3911 */ 3912 static void prepare_vmcs12(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12, 3913 u32 exit_reason, u32 exit_intr_info, 3914 unsigned long exit_qualification) 3915 { 3916 /* update exit information fields: */ 3917 vmcs12->vm_exit_reason = exit_reason; 3918 vmcs12->exit_qualification = exit_qualification; 3919 vmcs12->vm_exit_intr_info = exit_intr_info; 3920 3921 vmcs12->idt_vectoring_info_field = 0; 3922 vmcs12->vm_exit_instruction_len = vmcs_read32(VM_EXIT_INSTRUCTION_LEN); 3923 vmcs12->vmx_instruction_info = vmcs_read32(VMX_INSTRUCTION_INFO); 3924 3925 if (!(vmcs12->vm_exit_reason & VMX_EXIT_REASONS_FAILED_VMENTRY)) { 3926 vmcs12->launch_state = 1; 3927 3928 /* vm_entry_intr_info_field is cleared on exit. Emulate this 3929 * instead of reading the real value. */ 3930 vmcs12->vm_entry_intr_info_field &= ~INTR_INFO_VALID_MASK; 3931 3932 /* 3933 * Transfer the event that L0 or L1 may wanted to inject into 3934 * L2 to IDT_VECTORING_INFO_FIELD. 3935 */ 3936 vmcs12_save_pending_event(vcpu, vmcs12); 3937 3938 /* 3939 * According to spec, there's no need to store the guest's 3940 * MSRs if the exit is due to a VM-entry failure that occurs 3941 * during or after loading the guest state. Since this exit 3942 * does not fall in that category, we need to save the MSRs. 3943 */ 3944 if (nested_vmx_store_msr(vcpu, 3945 vmcs12->vm_exit_msr_store_addr, 3946 vmcs12->vm_exit_msr_store_count)) 3947 nested_vmx_abort(vcpu, 3948 VMX_ABORT_SAVE_GUEST_MSR_FAIL); 3949 } 3950 3951 /* 3952 * Drop what we picked up for L2 via vmx_complete_interrupts. It is 3953 * preserved above and would only end up incorrectly in L1. 3954 */ 3955 vcpu->arch.nmi_injected = false; 3956 kvm_clear_exception_queue(vcpu); 3957 kvm_clear_interrupt_queue(vcpu); 3958 } 3959 3960 /* 3961 * A part of what we need to when the nested L2 guest exits and we want to 3962 * run its L1 parent, is to reset L1's guest state to the host state specified 3963 * in vmcs12. 3964 * This function is to be called not only on normal nested exit, but also on 3965 * a nested entry failure, as explained in Intel's spec, 3B.23.7 ("VM-Entry 3966 * Failures During or After Loading Guest State"). 3967 * This function should be called when the active VMCS is L1's (vmcs01). 3968 */ 3969 static void load_vmcs12_host_state(struct kvm_vcpu *vcpu, 3970 struct vmcs12 *vmcs12) 3971 { 3972 struct kvm_segment seg; 3973 u32 entry_failure_code; 3974 3975 if (vmcs12->vm_exit_controls & VM_EXIT_LOAD_IA32_EFER) 3976 vcpu->arch.efer = vmcs12->host_ia32_efer; 3977 else if (vmcs12->vm_exit_controls & VM_EXIT_HOST_ADDR_SPACE_SIZE) 3978 vcpu->arch.efer |= (EFER_LMA | EFER_LME); 3979 else 3980 vcpu->arch.efer &= ~(EFER_LMA | EFER_LME); 3981 vmx_set_efer(vcpu, vcpu->arch.efer); 3982 3983 kvm_rsp_write(vcpu, vmcs12->host_rsp); 3984 kvm_rip_write(vcpu, vmcs12->host_rip); 3985 vmx_set_rflags(vcpu, X86_EFLAGS_FIXED); 3986 vmx_set_interrupt_shadow(vcpu, 0); 3987 3988 /* 3989 * Note that calling vmx_set_cr0 is important, even if cr0 hasn't 3990 * actually changed, because vmx_set_cr0 refers to efer set above. 3991 * 3992 * CR0_GUEST_HOST_MASK is already set in the original vmcs01 3993 * (KVM doesn't change it); 3994 */ 3995 vcpu->arch.cr0_guest_owned_bits = X86_CR0_TS; 3996 vmx_set_cr0(vcpu, vmcs12->host_cr0); 3997 3998 /* Same as above - no reason to call set_cr4_guest_host_mask(). */ 3999 vcpu->arch.cr4_guest_owned_bits = ~vmcs_readl(CR4_GUEST_HOST_MASK); 4000 vmx_set_cr4(vcpu, vmcs12->host_cr4); 4001 4002 nested_ept_uninit_mmu_context(vcpu); 4003 4004 /* 4005 * Only PDPTE load can fail as the value of cr3 was checked on entry and 4006 * couldn't have changed. 4007 */ 4008 if (nested_vmx_load_cr3(vcpu, vmcs12->host_cr3, false, &entry_failure_code)) 4009 nested_vmx_abort(vcpu, VMX_ABORT_LOAD_HOST_PDPTE_FAIL); 4010 4011 if (!enable_ept) 4012 vcpu->arch.walk_mmu->inject_page_fault = kvm_inject_page_fault; 4013 4014 /* 4015 * If vmcs01 doesn't use VPID, CPU flushes TLB on every 4016 * VMEntry/VMExit. Thus, no need to flush TLB. 4017 * 4018 * If vmcs12 doesn't use VPID, L1 expects TLB to be 4019 * flushed on every VMEntry/VMExit. 4020 * 4021 * Otherwise, we can preserve TLB entries as long as we are 4022 * able to tag L1 TLB entries differently than L2 TLB entries. 4023 * 4024 * If vmcs12 uses EPT, we need to execute this flush on EPTP01 4025 * and therefore we request the TLB flush to happen only after VMCS EPTP 4026 * has been set by KVM_REQ_LOAD_CR3. 4027 */ 4028 if (enable_vpid && 4029 (!nested_cpu_has_vpid(vmcs12) || !nested_has_guest_tlb_tag(vcpu))) { 4030 kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu); 4031 } 4032 4033 vmcs_write32(GUEST_SYSENTER_CS, vmcs12->host_ia32_sysenter_cs); 4034 vmcs_writel(GUEST_SYSENTER_ESP, vmcs12->host_ia32_sysenter_esp); 4035 vmcs_writel(GUEST_SYSENTER_EIP, vmcs12->host_ia32_sysenter_eip); 4036 vmcs_writel(GUEST_IDTR_BASE, vmcs12->host_idtr_base); 4037 vmcs_writel(GUEST_GDTR_BASE, vmcs12->host_gdtr_base); 4038 vmcs_write32(GUEST_IDTR_LIMIT, 0xFFFF); 4039 vmcs_write32(GUEST_GDTR_LIMIT, 0xFFFF); 4040 4041 /* If not VM_EXIT_CLEAR_BNDCFGS, the L2 value propagates to L1. */ 4042 if (vmcs12->vm_exit_controls & VM_EXIT_CLEAR_BNDCFGS) 4043 vmcs_write64(GUEST_BNDCFGS, 0); 4044 4045 if (vmcs12->vm_exit_controls & VM_EXIT_LOAD_IA32_PAT) { 4046 vmcs_write64(GUEST_IA32_PAT, vmcs12->host_ia32_pat); 4047 vcpu->arch.pat = vmcs12->host_ia32_pat; 4048 } 4049 if (vmcs12->vm_exit_controls & VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL) 4050 WARN_ON_ONCE(kvm_set_msr(vcpu, MSR_CORE_PERF_GLOBAL_CTRL, 4051 vmcs12->host_ia32_perf_global_ctrl)); 4052 4053 /* Set L1 segment info according to Intel SDM 4054 27.5.2 Loading Host Segment and Descriptor-Table Registers */ 4055 seg = (struct kvm_segment) { 4056 .base = 0, 4057 .limit = 0xFFFFFFFF, 4058 .selector = vmcs12->host_cs_selector, 4059 .type = 11, 4060 .present = 1, 4061 .s = 1, 4062 .g = 1 4063 }; 4064 if (vmcs12->vm_exit_controls & VM_EXIT_HOST_ADDR_SPACE_SIZE) 4065 seg.l = 1; 4066 else 4067 seg.db = 1; 4068 vmx_set_segment(vcpu, &seg, VCPU_SREG_CS); 4069 seg = (struct kvm_segment) { 4070 .base = 0, 4071 .limit = 0xFFFFFFFF, 4072 .type = 3, 4073 .present = 1, 4074 .s = 1, 4075 .db = 1, 4076 .g = 1 4077 }; 4078 seg.selector = vmcs12->host_ds_selector; 4079 vmx_set_segment(vcpu, &seg, VCPU_SREG_DS); 4080 seg.selector = vmcs12->host_es_selector; 4081 vmx_set_segment(vcpu, &seg, VCPU_SREG_ES); 4082 seg.selector = vmcs12->host_ss_selector; 4083 vmx_set_segment(vcpu, &seg, VCPU_SREG_SS); 4084 seg.selector = vmcs12->host_fs_selector; 4085 seg.base = vmcs12->host_fs_base; 4086 vmx_set_segment(vcpu, &seg, VCPU_SREG_FS); 4087 seg.selector = vmcs12->host_gs_selector; 4088 seg.base = vmcs12->host_gs_base; 4089 vmx_set_segment(vcpu, &seg, VCPU_SREG_GS); 4090 seg = (struct kvm_segment) { 4091 .base = vmcs12->host_tr_base, 4092 .limit = 0x67, 4093 .selector = vmcs12->host_tr_selector, 4094 .type = 11, 4095 .present = 1 4096 }; 4097 vmx_set_segment(vcpu, &seg, VCPU_SREG_TR); 4098 4099 kvm_set_dr(vcpu, 7, 0x400); 4100 vmcs_write64(GUEST_IA32_DEBUGCTL, 0); 4101 4102 if (cpu_has_vmx_msr_bitmap()) 4103 vmx_update_msr_bitmap(vcpu); 4104 4105 if (nested_vmx_load_msr(vcpu, vmcs12->vm_exit_msr_load_addr, 4106 vmcs12->vm_exit_msr_load_count)) 4107 nested_vmx_abort(vcpu, VMX_ABORT_LOAD_HOST_MSR_FAIL); 4108 } 4109 4110 static inline u64 nested_vmx_get_vmcs01_guest_efer(struct vcpu_vmx *vmx) 4111 { 4112 struct shared_msr_entry *efer_msr; 4113 unsigned int i; 4114 4115 if (vm_entry_controls_get(vmx) & VM_ENTRY_LOAD_IA32_EFER) 4116 return vmcs_read64(GUEST_IA32_EFER); 4117 4118 if (cpu_has_load_ia32_efer()) 4119 return host_efer; 4120 4121 for (i = 0; i < vmx->msr_autoload.guest.nr; ++i) { 4122 if (vmx->msr_autoload.guest.val[i].index == MSR_EFER) 4123 return vmx->msr_autoload.guest.val[i].value; 4124 } 4125 4126 efer_msr = find_msr_entry(vmx, MSR_EFER); 4127 if (efer_msr) 4128 return efer_msr->data; 4129 4130 return host_efer; 4131 } 4132 4133 static void nested_vmx_restore_host_state(struct kvm_vcpu *vcpu) 4134 { 4135 struct vmcs12 *vmcs12 = get_vmcs12(vcpu); 4136 struct vcpu_vmx *vmx = to_vmx(vcpu); 4137 struct vmx_msr_entry g, h; 4138 gpa_t gpa; 4139 u32 i, j; 4140 4141 vcpu->arch.pat = vmcs_read64(GUEST_IA32_PAT); 4142 4143 if (vmcs12->vm_entry_controls & VM_ENTRY_LOAD_DEBUG_CONTROLS) { 4144 /* 4145 * L1's host DR7 is lost if KVM_GUESTDBG_USE_HW_BP is set 4146 * as vmcs01.GUEST_DR7 contains a userspace defined value 4147 * and vcpu->arch.dr7 is not squirreled away before the 4148 * nested VMENTER (not worth adding a variable in nested_vmx). 4149 */ 4150 if (vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP) 4151 kvm_set_dr(vcpu, 7, DR7_FIXED_1); 4152 else 4153 WARN_ON(kvm_set_dr(vcpu, 7, vmcs_readl(GUEST_DR7))); 4154 } 4155 4156 /* 4157 * Note that calling vmx_set_{efer,cr0,cr4} is important as they 4158 * handle a variety of side effects to KVM's software model. 4159 */ 4160 vmx_set_efer(vcpu, nested_vmx_get_vmcs01_guest_efer(vmx)); 4161 4162 vcpu->arch.cr0_guest_owned_bits = X86_CR0_TS; 4163 vmx_set_cr0(vcpu, vmcs_readl(CR0_READ_SHADOW)); 4164 4165 vcpu->arch.cr4_guest_owned_bits = ~vmcs_readl(CR4_GUEST_HOST_MASK); 4166 vmx_set_cr4(vcpu, vmcs_readl(CR4_READ_SHADOW)); 4167 4168 nested_ept_uninit_mmu_context(vcpu); 4169 vcpu->arch.cr3 = vmcs_readl(GUEST_CR3); 4170 kvm_register_mark_available(vcpu, VCPU_EXREG_CR3); 4171 4172 /* 4173 * Use ept_save_pdptrs(vcpu) to load the MMU's cached PDPTRs 4174 * from vmcs01 (if necessary). The PDPTRs are not loaded on 4175 * VMFail, like everything else we just need to ensure our 4176 * software model is up-to-date. 4177 */ 4178 if (enable_ept) 4179 ept_save_pdptrs(vcpu); 4180 4181 kvm_mmu_reset_context(vcpu); 4182 4183 if (cpu_has_vmx_msr_bitmap()) 4184 vmx_update_msr_bitmap(vcpu); 4185 4186 /* 4187 * This nasty bit of open coding is a compromise between blindly 4188 * loading L1's MSRs using the exit load lists (incorrect emulation 4189 * of VMFail), leaving the nested VM's MSRs in the software model 4190 * (incorrect behavior) and snapshotting the modified MSRs (too 4191 * expensive since the lists are unbound by hardware). For each 4192 * MSR that was (prematurely) loaded from the nested VMEntry load 4193 * list, reload it from the exit load list if it exists and differs 4194 * from the guest value. The intent is to stuff host state as 4195 * silently as possible, not to fully process the exit load list. 4196 */ 4197 for (i = 0; i < vmcs12->vm_entry_msr_load_count; i++) { 4198 gpa = vmcs12->vm_entry_msr_load_addr + (i * sizeof(g)); 4199 if (kvm_vcpu_read_guest(vcpu, gpa, &g, sizeof(g))) { 4200 pr_debug_ratelimited( 4201 "%s read MSR index failed (%u, 0x%08llx)\n", 4202 __func__, i, gpa); 4203 goto vmabort; 4204 } 4205 4206 for (j = 0; j < vmcs12->vm_exit_msr_load_count; j++) { 4207 gpa = vmcs12->vm_exit_msr_load_addr + (j * sizeof(h)); 4208 if (kvm_vcpu_read_guest(vcpu, gpa, &h, sizeof(h))) { 4209 pr_debug_ratelimited( 4210 "%s read MSR failed (%u, 0x%08llx)\n", 4211 __func__, j, gpa); 4212 goto vmabort; 4213 } 4214 if (h.index != g.index) 4215 continue; 4216 if (h.value == g.value) 4217 break; 4218 4219 if (nested_vmx_load_msr_check(vcpu, &h)) { 4220 pr_debug_ratelimited( 4221 "%s check failed (%u, 0x%x, 0x%x)\n", 4222 __func__, j, h.index, h.reserved); 4223 goto vmabort; 4224 } 4225 4226 if (kvm_set_msr(vcpu, h.index, h.value)) { 4227 pr_debug_ratelimited( 4228 "%s WRMSR failed (%u, 0x%x, 0x%llx)\n", 4229 __func__, j, h.index, h.value); 4230 goto vmabort; 4231 } 4232 } 4233 } 4234 4235 return; 4236 4237 vmabort: 4238 nested_vmx_abort(vcpu, VMX_ABORT_LOAD_HOST_MSR_FAIL); 4239 } 4240 4241 /* 4242 * Emulate an exit from nested guest (L2) to L1, i.e., prepare to run L1 4243 * and modify vmcs12 to make it see what it would expect to see there if 4244 * L2 was its real guest. Must only be called when in L2 (is_guest_mode()) 4245 */ 4246 void nested_vmx_vmexit(struct kvm_vcpu *vcpu, u32 exit_reason, 4247 u32 exit_intr_info, unsigned long exit_qualification) 4248 { 4249 struct vcpu_vmx *vmx = to_vmx(vcpu); 4250 struct vmcs12 *vmcs12 = get_vmcs12(vcpu); 4251 4252 /* trying to cancel vmlaunch/vmresume is a bug */ 4253 WARN_ON_ONCE(vmx->nested.nested_run_pending); 4254 4255 leave_guest_mode(vcpu); 4256 4257 if (nested_cpu_has_preemption_timer(vmcs12)) 4258 hrtimer_cancel(&to_vmx(vcpu)->nested.preemption_timer); 4259 4260 if (vmcs12->cpu_based_vm_exec_control & CPU_BASED_USE_TSC_OFFSETTING) 4261 vcpu->arch.tsc_offset -= vmcs12->tsc_offset; 4262 4263 if (likely(!vmx->fail)) { 4264 sync_vmcs02_to_vmcs12(vcpu, vmcs12); 4265 4266 if (exit_reason != -1) 4267 prepare_vmcs12(vcpu, vmcs12, exit_reason, exit_intr_info, 4268 exit_qualification); 4269 4270 /* 4271 * Must happen outside of sync_vmcs02_to_vmcs12() as it will 4272 * also be used to capture vmcs12 cache as part of 4273 * capturing nVMX state for snapshot (migration). 4274 * 4275 * Otherwise, this flush will dirty guest memory at a 4276 * point it is already assumed by user-space to be 4277 * immutable. 4278 */ 4279 nested_flush_cached_shadow_vmcs12(vcpu, vmcs12); 4280 } else { 4281 /* 4282 * The only expected VM-instruction error is "VM entry with 4283 * invalid control field(s)." Anything else indicates a 4284 * problem with L0. And we should never get here with a 4285 * VMFail of any type if early consistency checks are enabled. 4286 */ 4287 WARN_ON_ONCE(vmcs_read32(VM_INSTRUCTION_ERROR) != 4288 VMXERR_ENTRY_INVALID_CONTROL_FIELD); 4289 WARN_ON_ONCE(nested_early_check); 4290 } 4291 4292 vmx_switch_vmcs(vcpu, &vmx->vmcs01); 4293 4294 /* Update any VMCS fields that might have changed while L2 ran */ 4295 vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, vmx->msr_autoload.host.nr); 4296 vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, vmx->msr_autoload.guest.nr); 4297 vmcs_write64(TSC_OFFSET, vcpu->arch.tsc_offset); 4298 if (vmx->nested.l1_tpr_threshold != -1) 4299 vmcs_write32(TPR_THRESHOLD, vmx->nested.l1_tpr_threshold); 4300 4301 if (kvm_has_tsc_control) 4302 decache_tsc_multiplier(vmx); 4303 4304 if (vmx->nested.change_vmcs01_virtual_apic_mode) { 4305 vmx->nested.change_vmcs01_virtual_apic_mode = false; 4306 vmx_set_virtual_apic_mode(vcpu); 4307 } 4308 4309 /* Unpin physical memory we referred to in vmcs02 */ 4310 if (vmx->nested.apic_access_page) { 4311 kvm_release_page_clean(vmx->nested.apic_access_page); 4312 vmx->nested.apic_access_page = NULL; 4313 } 4314 kvm_vcpu_unmap(vcpu, &vmx->nested.virtual_apic_map, true); 4315 kvm_vcpu_unmap(vcpu, &vmx->nested.pi_desc_map, true); 4316 vmx->nested.pi_desc = NULL; 4317 4318 /* 4319 * We are now running in L2, mmu_notifier will force to reload the 4320 * page's hpa for L2 vmcs. Need to reload it for L1 before entering L1. 4321 */ 4322 kvm_make_request(KVM_REQ_APIC_PAGE_RELOAD, vcpu); 4323 4324 if ((exit_reason != -1) && (enable_shadow_vmcs || vmx->nested.hv_evmcs)) 4325 vmx->nested.need_vmcs12_to_shadow_sync = true; 4326 4327 /* in case we halted in L2 */ 4328 vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE; 4329 4330 if (likely(!vmx->fail)) { 4331 /* 4332 * TODO: SDM says that with acknowledge interrupt on 4333 * exit, bit 31 of the VM-exit interrupt information 4334 * (valid interrupt) is always set to 1 on 4335 * EXIT_REASON_EXTERNAL_INTERRUPT, so we shouldn't 4336 * need kvm_cpu_has_interrupt(). See the commit 4337 * message for details. 4338 */ 4339 if (nested_exit_intr_ack_set(vcpu) && 4340 exit_reason == EXIT_REASON_EXTERNAL_INTERRUPT && 4341 kvm_cpu_has_interrupt(vcpu)) { 4342 int irq = kvm_cpu_get_interrupt(vcpu); 4343 WARN_ON(irq < 0); 4344 vmcs12->vm_exit_intr_info = irq | 4345 INTR_INFO_VALID_MASK | INTR_TYPE_EXT_INTR; 4346 } 4347 4348 if (exit_reason != -1) 4349 trace_kvm_nested_vmexit_inject(vmcs12->vm_exit_reason, 4350 vmcs12->exit_qualification, 4351 vmcs12->idt_vectoring_info_field, 4352 vmcs12->vm_exit_intr_info, 4353 vmcs12->vm_exit_intr_error_code, 4354 KVM_ISA_VMX); 4355 4356 load_vmcs12_host_state(vcpu, vmcs12); 4357 4358 return; 4359 } 4360 4361 /* 4362 * After an early L2 VM-entry failure, we're now back 4363 * in L1 which thinks it just finished a VMLAUNCH or 4364 * VMRESUME instruction, so we need to set the failure 4365 * flag and the VM-instruction error field of the VMCS 4366 * accordingly, and skip the emulated instruction. 4367 */ 4368 (void)nested_vmx_failValid(vcpu, VMXERR_ENTRY_INVALID_CONTROL_FIELD); 4369 4370 /* 4371 * Restore L1's host state to KVM's software model. We're here 4372 * because a consistency check was caught by hardware, which 4373 * means some amount of guest state has been propagated to KVM's 4374 * model and needs to be unwound to the host's state. 4375 */ 4376 nested_vmx_restore_host_state(vcpu); 4377 4378 vmx->fail = 0; 4379 } 4380 4381 /* 4382 * Decode the memory-address operand of a vmx instruction, as recorded on an 4383 * exit caused by such an instruction (run by a guest hypervisor). 4384 * On success, returns 0. When the operand is invalid, returns 1 and throws 4385 * #UD or #GP. 4386 */ 4387 int get_vmx_mem_address(struct kvm_vcpu *vcpu, unsigned long exit_qualification, 4388 u32 vmx_instruction_info, bool wr, int len, gva_t *ret) 4389 { 4390 gva_t off; 4391 bool exn; 4392 struct kvm_segment s; 4393 4394 /* 4395 * According to Vol. 3B, "Information for VM Exits Due to Instruction 4396 * Execution", on an exit, vmx_instruction_info holds most of the 4397 * addressing components of the operand. Only the displacement part 4398 * is put in exit_qualification (see 3B, "Basic VM-Exit Information"). 4399 * For how an actual address is calculated from all these components, 4400 * refer to Vol. 1, "Operand Addressing". 4401 */ 4402 int scaling = vmx_instruction_info & 3; 4403 int addr_size = (vmx_instruction_info >> 7) & 7; 4404 bool is_reg = vmx_instruction_info & (1u << 10); 4405 int seg_reg = (vmx_instruction_info >> 15) & 7; 4406 int index_reg = (vmx_instruction_info >> 18) & 0xf; 4407 bool index_is_valid = !(vmx_instruction_info & (1u << 22)); 4408 int base_reg = (vmx_instruction_info >> 23) & 0xf; 4409 bool base_is_valid = !(vmx_instruction_info & (1u << 27)); 4410 4411 if (is_reg) { 4412 kvm_queue_exception(vcpu, UD_VECTOR); 4413 return 1; 4414 } 4415 4416 /* Addr = segment_base + offset */ 4417 /* offset = base + [index * scale] + displacement */ 4418 off = exit_qualification; /* holds the displacement */ 4419 if (addr_size == 1) 4420 off = (gva_t)sign_extend64(off, 31); 4421 else if (addr_size == 0) 4422 off = (gva_t)sign_extend64(off, 15); 4423 if (base_is_valid) 4424 off += kvm_register_read(vcpu, base_reg); 4425 if (index_is_valid) 4426 off += kvm_register_read(vcpu, index_reg)<<scaling; 4427 vmx_get_segment(vcpu, &s, seg_reg); 4428 4429 /* 4430 * The effective address, i.e. @off, of a memory operand is truncated 4431 * based on the address size of the instruction. Note that this is 4432 * the *effective address*, i.e. the address prior to accounting for 4433 * the segment's base. 4434 */ 4435 if (addr_size == 1) /* 32 bit */ 4436 off &= 0xffffffff; 4437 else if (addr_size == 0) /* 16 bit */ 4438 off &= 0xffff; 4439 4440 /* Checks for #GP/#SS exceptions. */ 4441 exn = false; 4442 if (is_long_mode(vcpu)) { 4443 /* 4444 * The virtual/linear address is never truncated in 64-bit 4445 * mode, e.g. a 32-bit address size can yield a 64-bit virtual 4446 * address when using FS/GS with a non-zero base. 4447 */ 4448 if (seg_reg == VCPU_SREG_FS || seg_reg == VCPU_SREG_GS) 4449 *ret = s.base + off; 4450 else 4451 *ret = off; 4452 4453 /* Long mode: #GP(0)/#SS(0) if the memory address is in a 4454 * non-canonical form. This is the only check on the memory 4455 * destination for long mode! 4456 */ 4457 exn = is_noncanonical_address(*ret, vcpu); 4458 } else { 4459 /* 4460 * When not in long mode, the virtual/linear address is 4461 * unconditionally truncated to 32 bits regardless of the 4462 * address size. 4463 */ 4464 *ret = (s.base + off) & 0xffffffff; 4465 4466 /* Protected mode: apply checks for segment validity in the 4467 * following order: 4468 * - segment type check (#GP(0) may be thrown) 4469 * - usability check (#GP(0)/#SS(0)) 4470 * - limit check (#GP(0)/#SS(0)) 4471 */ 4472 if (wr) 4473 /* #GP(0) if the destination operand is located in a 4474 * read-only data segment or any code segment. 4475 */ 4476 exn = ((s.type & 0xa) == 0 || (s.type & 8)); 4477 else 4478 /* #GP(0) if the source operand is located in an 4479 * execute-only code segment 4480 */ 4481 exn = ((s.type & 0xa) == 8); 4482 if (exn) { 4483 kvm_queue_exception_e(vcpu, GP_VECTOR, 0); 4484 return 1; 4485 } 4486 /* Protected mode: #GP(0)/#SS(0) if the segment is unusable. 4487 */ 4488 exn = (s.unusable != 0); 4489 4490 /* 4491 * Protected mode: #GP(0)/#SS(0) if the memory operand is 4492 * outside the segment limit. All CPUs that support VMX ignore 4493 * limit checks for flat segments, i.e. segments with base==0, 4494 * limit==0xffffffff and of type expand-up data or code. 4495 */ 4496 if (!(s.base == 0 && s.limit == 0xffffffff && 4497 ((s.type & 8) || !(s.type & 4)))) 4498 exn = exn || ((u64)off + len - 1 > s.limit); 4499 } 4500 if (exn) { 4501 kvm_queue_exception_e(vcpu, 4502 seg_reg == VCPU_SREG_SS ? 4503 SS_VECTOR : GP_VECTOR, 4504 0); 4505 return 1; 4506 } 4507 4508 return 0; 4509 } 4510 4511 void nested_vmx_pmu_entry_exit_ctls_update(struct kvm_vcpu *vcpu) 4512 { 4513 struct vcpu_vmx *vmx; 4514 4515 if (!nested_vmx_allowed(vcpu)) 4516 return; 4517 4518 vmx = to_vmx(vcpu); 4519 if (kvm_x86_ops->pmu_ops->is_valid_msr(vcpu, MSR_CORE_PERF_GLOBAL_CTRL)) { 4520 vmx->nested.msrs.entry_ctls_high |= 4521 VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL; 4522 vmx->nested.msrs.exit_ctls_high |= 4523 VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL; 4524 } else { 4525 vmx->nested.msrs.entry_ctls_high &= 4526 ~VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL; 4527 vmx->nested.msrs.exit_ctls_high &= 4528 ~VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL; 4529 } 4530 } 4531 4532 static int nested_vmx_get_vmptr(struct kvm_vcpu *vcpu, gpa_t *vmpointer) 4533 { 4534 gva_t gva; 4535 struct x86_exception e; 4536 4537 if (get_vmx_mem_address(vcpu, vmcs_readl(EXIT_QUALIFICATION), 4538 vmcs_read32(VMX_INSTRUCTION_INFO), false, 4539 sizeof(*vmpointer), &gva)) 4540 return 1; 4541 4542 if (kvm_read_guest_virt(vcpu, gva, vmpointer, sizeof(*vmpointer), &e)) { 4543 kvm_inject_page_fault(vcpu, &e); 4544 return 1; 4545 } 4546 4547 return 0; 4548 } 4549 4550 /* 4551 * Allocate a shadow VMCS and associate it with the currently loaded 4552 * VMCS, unless such a shadow VMCS already exists. The newly allocated 4553 * VMCS is also VMCLEARed, so that it is ready for use. 4554 */ 4555 static struct vmcs *alloc_shadow_vmcs(struct kvm_vcpu *vcpu) 4556 { 4557 struct vcpu_vmx *vmx = to_vmx(vcpu); 4558 struct loaded_vmcs *loaded_vmcs = vmx->loaded_vmcs; 4559 4560 /* 4561 * We should allocate a shadow vmcs for vmcs01 only when L1 4562 * executes VMXON and free it when L1 executes VMXOFF. 4563 * As it is invalid to execute VMXON twice, we shouldn't reach 4564 * here when vmcs01 already have an allocated shadow vmcs. 4565 */ 4566 WARN_ON(loaded_vmcs == &vmx->vmcs01 && loaded_vmcs->shadow_vmcs); 4567 4568 if (!loaded_vmcs->shadow_vmcs) { 4569 loaded_vmcs->shadow_vmcs = alloc_vmcs(true); 4570 if (loaded_vmcs->shadow_vmcs) 4571 vmcs_clear(loaded_vmcs->shadow_vmcs); 4572 } 4573 return loaded_vmcs->shadow_vmcs; 4574 } 4575 4576 static int enter_vmx_operation(struct kvm_vcpu *vcpu) 4577 { 4578 struct vcpu_vmx *vmx = to_vmx(vcpu); 4579 int r; 4580 4581 r = alloc_loaded_vmcs(&vmx->nested.vmcs02); 4582 if (r < 0) 4583 goto out_vmcs02; 4584 4585 vmx->nested.cached_vmcs12 = kzalloc(VMCS12_SIZE, GFP_KERNEL_ACCOUNT); 4586 if (!vmx->nested.cached_vmcs12) 4587 goto out_cached_vmcs12; 4588 4589 vmx->nested.cached_shadow_vmcs12 = kzalloc(VMCS12_SIZE, GFP_KERNEL_ACCOUNT); 4590 if (!vmx->nested.cached_shadow_vmcs12) 4591 goto out_cached_shadow_vmcs12; 4592 4593 if (enable_shadow_vmcs && !alloc_shadow_vmcs(vcpu)) 4594 goto out_shadow_vmcs; 4595 4596 hrtimer_init(&vmx->nested.preemption_timer, CLOCK_MONOTONIC, 4597 HRTIMER_MODE_REL_PINNED); 4598 vmx->nested.preemption_timer.function = vmx_preemption_timer_fn; 4599 4600 vmx->nested.vpid02 = allocate_vpid(); 4601 4602 vmx->nested.vmcs02_initialized = false; 4603 vmx->nested.vmxon = true; 4604 4605 if (pt_mode == PT_MODE_HOST_GUEST) { 4606 vmx->pt_desc.guest.ctl = 0; 4607 pt_update_intercept_for_msr(vmx); 4608 } 4609 4610 return 0; 4611 4612 out_shadow_vmcs: 4613 kfree(vmx->nested.cached_shadow_vmcs12); 4614 4615 out_cached_shadow_vmcs12: 4616 kfree(vmx->nested.cached_vmcs12); 4617 4618 out_cached_vmcs12: 4619 free_loaded_vmcs(&vmx->nested.vmcs02); 4620 4621 out_vmcs02: 4622 return -ENOMEM; 4623 } 4624 4625 /* 4626 * Emulate the VMXON instruction. 4627 * Currently, we just remember that VMX is active, and do not save or even 4628 * inspect the argument to VMXON (the so-called "VMXON pointer") because we 4629 * do not currently need to store anything in that guest-allocated memory 4630 * region. Consequently, VMCLEAR and VMPTRLD also do not verify that the their 4631 * argument is different from the VMXON pointer (which the spec says they do). 4632 */ 4633 static int handle_vmon(struct kvm_vcpu *vcpu) 4634 { 4635 int ret; 4636 gpa_t vmptr; 4637 uint32_t revision; 4638 struct vcpu_vmx *vmx = to_vmx(vcpu); 4639 const u64 VMXON_NEEDED_FEATURES = FEAT_CTL_LOCKED 4640 | FEAT_CTL_VMX_ENABLED_OUTSIDE_SMX; 4641 4642 /* 4643 * The Intel VMX Instruction Reference lists a bunch of bits that are 4644 * prerequisite to running VMXON, most notably cr4.VMXE must be set to 4645 * 1 (see vmx_set_cr4() for when we allow the guest to set this). 4646 * Otherwise, we should fail with #UD. But most faulting conditions 4647 * have already been checked by hardware, prior to the VM-exit for 4648 * VMXON. We do test guest cr4.VMXE because processor CR4 always has 4649 * that bit set to 1 in non-root mode. 4650 */ 4651 if (!kvm_read_cr4_bits(vcpu, X86_CR4_VMXE)) { 4652 kvm_queue_exception(vcpu, UD_VECTOR); 4653 return 1; 4654 } 4655 4656 /* CPL=0 must be checked manually. */ 4657 if (vmx_get_cpl(vcpu)) { 4658 kvm_inject_gp(vcpu, 0); 4659 return 1; 4660 } 4661 4662 if (vmx->nested.vmxon) 4663 return nested_vmx_failValid(vcpu, 4664 VMXERR_VMXON_IN_VMX_ROOT_OPERATION); 4665 4666 if ((vmx->msr_ia32_feature_control & VMXON_NEEDED_FEATURES) 4667 != VMXON_NEEDED_FEATURES) { 4668 kvm_inject_gp(vcpu, 0); 4669 return 1; 4670 } 4671 4672 if (nested_vmx_get_vmptr(vcpu, &vmptr)) 4673 return 1; 4674 4675 /* 4676 * SDM 3: 24.11.5 4677 * The first 4 bytes of VMXON region contain the supported 4678 * VMCS revision identifier 4679 * 4680 * Note - IA32_VMX_BASIC[48] will never be 1 for the nested case; 4681 * which replaces physical address width with 32 4682 */ 4683 if (!page_address_valid(vcpu, vmptr)) 4684 return nested_vmx_failInvalid(vcpu); 4685 4686 if (kvm_read_guest(vcpu->kvm, vmptr, &revision, sizeof(revision)) || 4687 revision != VMCS12_REVISION) 4688 return nested_vmx_failInvalid(vcpu); 4689 4690 vmx->nested.vmxon_ptr = vmptr; 4691 ret = enter_vmx_operation(vcpu); 4692 if (ret) 4693 return ret; 4694 4695 return nested_vmx_succeed(vcpu); 4696 } 4697 4698 static inline void nested_release_vmcs12(struct kvm_vcpu *vcpu) 4699 { 4700 struct vcpu_vmx *vmx = to_vmx(vcpu); 4701 4702 if (vmx->nested.current_vmptr == -1ull) 4703 return; 4704 4705 copy_vmcs02_to_vmcs12_rare(vcpu, get_vmcs12(vcpu)); 4706 4707 if (enable_shadow_vmcs) { 4708 /* copy to memory all shadowed fields in case 4709 they were modified */ 4710 copy_shadow_to_vmcs12(vmx); 4711 vmx_disable_shadow_vmcs(vmx); 4712 } 4713 vmx->nested.posted_intr_nv = -1; 4714 4715 /* Flush VMCS12 to guest memory */ 4716 kvm_vcpu_write_guest_page(vcpu, 4717 vmx->nested.current_vmptr >> PAGE_SHIFT, 4718 vmx->nested.cached_vmcs12, 0, VMCS12_SIZE); 4719 4720 kvm_mmu_free_roots(vcpu, &vcpu->arch.guest_mmu, KVM_MMU_ROOTS_ALL); 4721 4722 vmx->nested.current_vmptr = -1ull; 4723 } 4724 4725 /* Emulate the VMXOFF instruction */ 4726 static int handle_vmoff(struct kvm_vcpu *vcpu) 4727 { 4728 if (!nested_vmx_check_permission(vcpu)) 4729 return 1; 4730 4731 free_nested(vcpu); 4732 4733 /* Process a latched INIT during time CPU was in VMX operation */ 4734 kvm_make_request(KVM_REQ_EVENT, vcpu); 4735 4736 return nested_vmx_succeed(vcpu); 4737 } 4738 4739 /* Emulate the VMCLEAR instruction */ 4740 static int handle_vmclear(struct kvm_vcpu *vcpu) 4741 { 4742 struct vcpu_vmx *vmx = to_vmx(vcpu); 4743 u32 zero = 0; 4744 gpa_t vmptr; 4745 u64 evmcs_gpa; 4746 4747 if (!nested_vmx_check_permission(vcpu)) 4748 return 1; 4749 4750 if (nested_vmx_get_vmptr(vcpu, &vmptr)) 4751 return 1; 4752 4753 if (!page_address_valid(vcpu, vmptr)) 4754 return nested_vmx_failValid(vcpu, 4755 VMXERR_VMCLEAR_INVALID_ADDRESS); 4756 4757 if (vmptr == vmx->nested.vmxon_ptr) 4758 return nested_vmx_failValid(vcpu, 4759 VMXERR_VMCLEAR_VMXON_POINTER); 4760 4761 /* 4762 * When Enlightened VMEntry is enabled on the calling CPU we treat 4763 * memory area pointer by vmptr as Enlightened VMCS (as there's no good 4764 * way to distinguish it from VMCS12) and we must not corrupt it by 4765 * writing to the non-existent 'launch_state' field. The area doesn't 4766 * have to be the currently active EVMCS on the calling CPU and there's 4767 * nothing KVM has to do to transition it from 'active' to 'non-active' 4768 * state. It is possible that the area will stay mapped as 4769 * vmx->nested.hv_evmcs but this shouldn't be a problem. 4770 */ 4771 if (likely(!vmx->nested.enlightened_vmcs_enabled || 4772 !nested_enlightened_vmentry(vcpu, &evmcs_gpa))) { 4773 if (vmptr == vmx->nested.current_vmptr) 4774 nested_release_vmcs12(vcpu); 4775 4776 kvm_vcpu_write_guest(vcpu, 4777 vmptr + offsetof(struct vmcs12, 4778 launch_state), 4779 &zero, sizeof(zero)); 4780 } 4781 4782 return nested_vmx_succeed(vcpu); 4783 } 4784 4785 /* Emulate the VMLAUNCH instruction */ 4786 static int handle_vmlaunch(struct kvm_vcpu *vcpu) 4787 { 4788 return nested_vmx_run(vcpu, true); 4789 } 4790 4791 /* Emulate the VMRESUME instruction */ 4792 static int handle_vmresume(struct kvm_vcpu *vcpu) 4793 { 4794 4795 return nested_vmx_run(vcpu, false); 4796 } 4797 4798 static int handle_vmread(struct kvm_vcpu *vcpu) 4799 { 4800 struct vmcs12 *vmcs12 = is_guest_mode(vcpu) ? get_shadow_vmcs12(vcpu) 4801 : get_vmcs12(vcpu); 4802 unsigned long exit_qualification = vmcs_readl(EXIT_QUALIFICATION); 4803 u32 instr_info = vmcs_read32(VMX_INSTRUCTION_INFO); 4804 struct vcpu_vmx *vmx = to_vmx(vcpu); 4805 struct x86_exception e; 4806 unsigned long field; 4807 u64 value; 4808 gva_t gva = 0; 4809 short offset; 4810 int len; 4811 4812 if (!nested_vmx_check_permission(vcpu)) 4813 return 1; 4814 4815 /* 4816 * In VMX non-root operation, when the VMCS-link pointer is -1ull, 4817 * any VMREAD sets the ALU flags for VMfailInvalid. 4818 */ 4819 if (vmx->nested.current_vmptr == -1ull || 4820 (is_guest_mode(vcpu) && 4821 get_vmcs12(vcpu)->vmcs_link_pointer == -1ull)) 4822 return nested_vmx_failInvalid(vcpu); 4823 4824 /* Decode instruction info and find the field to read */ 4825 field = kvm_register_readl(vcpu, (((instr_info) >> 28) & 0xf)); 4826 4827 offset = vmcs_field_to_offset(field); 4828 if (offset < 0) 4829 return nested_vmx_failValid(vcpu, 4830 VMXERR_UNSUPPORTED_VMCS_COMPONENT); 4831 4832 if (!is_guest_mode(vcpu) && is_vmcs12_ext_field(field)) 4833 copy_vmcs02_to_vmcs12_rare(vcpu, vmcs12); 4834 4835 /* Read the field, zero-extended to a u64 value */ 4836 value = vmcs12_read_any(vmcs12, field, offset); 4837 4838 /* 4839 * Now copy part of this value to register or memory, as requested. 4840 * Note that the number of bits actually copied is 32 or 64 depending 4841 * on the guest's mode (32 or 64 bit), not on the given field's length. 4842 */ 4843 if (instr_info & BIT(10)) { 4844 kvm_register_writel(vcpu, (((instr_info) >> 3) & 0xf), value); 4845 } else { 4846 len = is_64_bit_mode(vcpu) ? 8 : 4; 4847 if (get_vmx_mem_address(vcpu, exit_qualification, 4848 instr_info, true, len, &gva)) 4849 return 1; 4850 /* _system ok, nested_vmx_check_permission has verified cpl=0 */ 4851 if (kvm_write_guest_virt_system(vcpu, gva, &value, len, &e)) { 4852 kvm_inject_page_fault(vcpu, &e); 4853 return 1; 4854 } 4855 } 4856 4857 return nested_vmx_succeed(vcpu); 4858 } 4859 4860 static bool is_shadow_field_rw(unsigned long field) 4861 { 4862 switch (field) { 4863 #define SHADOW_FIELD_RW(x, y) case x: 4864 #include "vmcs_shadow_fields.h" 4865 return true; 4866 default: 4867 break; 4868 } 4869 return false; 4870 } 4871 4872 static bool is_shadow_field_ro(unsigned long field) 4873 { 4874 switch (field) { 4875 #define SHADOW_FIELD_RO(x, y) case x: 4876 #include "vmcs_shadow_fields.h" 4877 return true; 4878 default: 4879 break; 4880 } 4881 return false; 4882 } 4883 4884 static int handle_vmwrite(struct kvm_vcpu *vcpu) 4885 { 4886 struct vmcs12 *vmcs12 = is_guest_mode(vcpu) ? get_shadow_vmcs12(vcpu) 4887 : get_vmcs12(vcpu); 4888 unsigned long exit_qualification = vmcs_readl(EXIT_QUALIFICATION); 4889 u32 instr_info = vmcs_read32(VMX_INSTRUCTION_INFO); 4890 struct vcpu_vmx *vmx = to_vmx(vcpu); 4891 struct x86_exception e; 4892 unsigned long field; 4893 short offset; 4894 gva_t gva; 4895 int len; 4896 4897 /* 4898 * The value to write might be 32 or 64 bits, depending on L1's long 4899 * mode, and eventually we need to write that into a field of several 4900 * possible lengths. The code below first zero-extends the value to 64 4901 * bit (value), and then copies only the appropriate number of 4902 * bits into the vmcs12 field. 4903 */ 4904 u64 value = 0; 4905 4906 if (!nested_vmx_check_permission(vcpu)) 4907 return 1; 4908 4909 /* 4910 * In VMX non-root operation, when the VMCS-link pointer is -1ull, 4911 * any VMWRITE sets the ALU flags for VMfailInvalid. 4912 */ 4913 if (vmx->nested.current_vmptr == -1ull || 4914 (is_guest_mode(vcpu) && 4915 get_vmcs12(vcpu)->vmcs_link_pointer == -1ull)) 4916 return nested_vmx_failInvalid(vcpu); 4917 4918 if (instr_info & BIT(10)) 4919 value = kvm_register_readl(vcpu, (((instr_info) >> 3) & 0xf)); 4920 else { 4921 len = is_64_bit_mode(vcpu) ? 8 : 4; 4922 if (get_vmx_mem_address(vcpu, exit_qualification, 4923 instr_info, false, len, &gva)) 4924 return 1; 4925 if (kvm_read_guest_virt(vcpu, gva, &value, len, &e)) { 4926 kvm_inject_page_fault(vcpu, &e); 4927 return 1; 4928 } 4929 } 4930 4931 field = kvm_register_readl(vcpu, (((instr_info) >> 28) & 0xf)); 4932 4933 offset = vmcs_field_to_offset(field); 4934 if (offset < 0) 4935 return nested_vmx_failValid(vcpu, 4936 VMXERR_UNSUPPORTED_VMCS_COMPONENT); 4937 4938 /* 4939 * If the vCPU supports "VMWRITE to any supported field in the 4940 * VMCS," then the "read-only" fields are actually read/write. 4941 */ 4942 if (vmcs_field_readonly(field) && 4943 !nested_cpu_has_vmwrite_any_field(vcpu)) 4944 return nested_vmx_failValid(vcpu, 4945 VMXERR_VMWRITE_READ_ONLY_VMCS_COMPONENT); 4946 4947 /* 4948 * Ensure vmcs12 is up-to-date before any VMWRITE that dirties 4949 * vmcs12, else we may crush a field or consume a stale value. 4950 */ 4951 if (!is_guest_mode(vcpu) && !is_shadow_field_rw(field)) 4952 copy_vmcs02_to_vmcs12_rare(vcpu, vmcs12); 4953 4954 /* 4955 * Some Intel CPUs intentionally drop the reserved bits of the AR byte 4956 * fields on VMWRITE. Emulate this behavior to ensure consistent KVM 4957 * behavior regardless of the underlying hardware, e.g. if an AR_BYTE 4958 * field is intercepted for VMWRITE but not VMREAD (in L1), then VMREAD 4959 * from L1 will return a different value than VMREAD from L2 (L1 sees 4960 * the stripped down value, L2 sees the full value as stored by KVM). 4961 */ 4962 if (field >= GUEST_ES_AR_BYTES && field <= GUEST_TR_AR_BYTES) 4963 value &= 0x1f0ff; 4964 4965 vmcs12_write_any(vmcs12, field, offset, value); 4966 4967 /* 4968 * Do not track vmcs12 dirty-state if in guest-mode as we actually 4969 * dirty shadow vmcs12 instead of vmcs12. Fields that can be updated 4970 * by L1 without a vmexit are always updated in the vmcs02, i.e. don't 4971 * "dirty" vmcs12, all others go down the prepare_vmcs02() slow path. 4972 */ 4973 if (!is_guest_mode(vcpu) && !is_shadow_field_rw(field)) { 4974 /* 4975 * L1 can read these fields without exiting, ensure the 4976 * shadow VMCS is up-to-date. 4977 */ 4978 if (enable_shadow_vmcs && is_shadow_field_ro(field)) { 4979 preempt_disable(); 4980 vmcs_load(vmx->vmcs01.shadow_vmcs); 4981 4982 __vmcs_writel(field, value); 4983 4984 vmcs_clear(vmx->vmcs01.shadow_vmcs); 4985 vmcs_load(vmx->loaded_vmcs->vmcs); 4986 preempt_enable(); 4987 } 4988 vmx->nested.dirty_vmcs12 = true; 4989 } 4990 4991 return nested_vmx_succeed(vcpu); 4992 } 4993 4994 static void set_current_vmptr(struct vcpu_vmx *vmx, gpa_t vmptr) 4995 { 4996 vmx->nested.current_vmptr = vmptr; 4997 if (enable_shadow_vmcs) { 4998 secondary_exec_controls_setbit(vmx, SECONDARY_EXEC_SHADOW_VMCS); 4999 vmcs_write64(VMCS_LINK_POINTER, 5000 __pa(vmx->vmcs01.shadow_vmcs)); 5001 vmx->nested.need_vmcs12_to_shadow_sync = true; 5002 } 5003 vmx->nested.dirty_vmcs12 = true; 5004 } 5005 5006 /* Emulate the VMPTRLD instruction */ 5007 static int handle_vmptrld(struct kvm_vcpu *vcpu) 5008 { 5009 struct vcpu_vmx *vmx = to_vmx(vcpu); 5010 gpa_t vmptr; 5011 5012 if (!nested_vmx_check_permission(vcpu)) 5013 return 1; 5014 5015 if (nested_vmx_get_vmptr(vcpu, &vmptr)) 5016 return 1; 5017 5018 if (!page_address_valid(vcpu, vmptr)) 5019 return nested_vmx_failValid(vcpu, 5020 VMXERR_VMPTRLD_INVALID_ADDRESS); 5021 5022 if (vmptr == vmx->nested.vmxon_ptr) 5023 return nested_vmx_failValid(vcpu, 5024 VMXERR_VMPTRLD_VMXON_POINTER); 5025 5026 /* Forbid normal VMPTRLD if Enlightened version was used */ 5027 if (vmx->nested.hv_evmcs) 5028 return 1; 5029 5030 if (vmx->nested.current_vmptr != vmptr) { 5031 struct kvm_host_map map; 5032 struct vmcs12 *new_vmcs12; 5033 5034 if (kvm_vcpu_map(vcpu, gpa_to_gfn(vmptr), &map)) { 5035 /* 5036 * Reads from an unbacked page return all 1s, 5037 * which means that the 32 bits located at the 5038 * given physical address won't match the required 5039 * VMCS12_REVISION identifier. 5040 */ 5041 return nested_vmx_failValid(vcpu, 5042 VMXERR_VMPTRLD_INCORRECT_VMCS_REVISION_ID); 5043 } 5044 5045 new_vmcs12 = map.hva; 5046 5047 if (new_vmcs12->hdr.revision_id != VMCS12_REVISION || 5048 (new_vmcs12->hdr.shadow_vmcs && 5049 !nested_cpu_has_vmx_shadow_vmcs(vcpu))) { 5050 kvm_vcpu_unmap(vcpu, &map, false); 5051 return nested_vmx_failValid(vcpu, 5052 VMXERR_VMPTRLD_INCORRECT_VMCS_REVISION_ID); 5053 } 5054 5055 nested_release_vmcs12(vcpu); 5056 5057 /* 5058 * Load VMCS12 from guest memory since it is not already 5059 * cached. 5060 */ 5061 memcpy(vmx->nested.cached_vmcs12, new_vmcs12, VMCS12_SIZE); 5062 kvm_vcpu_unmap(vcpu, &map, false); 5063 5064 set_current_vmptr(vmx, vmptr); 5065 } 5066 5067 return nested_vmx_succeed(vcpu); 5068 } 5069 5070 /* Emulate the VMPTRST instruction */ 5071 static int handle_vmptrst(struct kvm_vcpu *vcpu) 5072 { 5073 unsigned long exit_qual = vmcs_readl(EXIT_QUALIFICATION); 5074 u32 instr_info = vmcs_read32(VMX_INSTRUCTION_INFO); 5075 gpa_t current_vmptr = to_vmx(vcpu)->nested.current_vmptr; 5076 struct x86_exception e; 5077 gva_t gva; 5078 5079 if (!nested_vmx_check_permission(vcpu)) 5080 return 1; 5081 5082 if (unlikely(to_vmx(vcpu)->nested.hv_evmcs)) 5083 return 1; 5084 5085 if (get_vmx_mem_address(vcpu, exit_qual, instr_info, 5086 true, sizeof(gpa_t), &gva)) 5087 return 1; 5088 /* *_system ok, nested_vmx_check_permission has verified cpl=0 */ 5089 if (kvm_write_guest_virt_system(vcpu, gva, (void *)¤t_vmptr, 5090 sizeof(gpa_t), &e)) { 5091 kvm_inject_page_fault(vcpu, &e); 5092 return 1; 5093 } 5094 return nested_vmx_succeed(vcpu); 5095 } 5096 5097 /* Emulate the INVEPT instruction */ 5098 static int handle_invept(struct kvm_vcpu *vcpu) 5099 { 5100 struct vcpu_vmx *vmx = to_vmx(vcpu); 5101 u32 vmx_instruction_info, types; 5102 unsigned long type; 5103 gva_t gva; 5104 struct x86_exception e; 5105 struct { 5106 u64 eptp, gpa; 5107 } operand; 5108 5109 if (!(vmx->nested.msrs.secondary_ctls_high & 5110 SECONDARY_EXEC_ENABLE_EPT) || 5111 !(vmx->nested.msrs.ept_caps & VMX_EPT_INVEPT_BIT)) { 5112 kvm_queue_exception(vcpu, UD_VECTOR); 5113 return 1; 5114 } 5115 5116 if (!nested_vmx_check_permission(vcpu)) 5117 return 1; 5118 5119 vmx_instruction_info = vmcs_read32(VMX_INSTRUCTION_INFO); 5120 type = kvm_register_readl(vcpu, (vmx_instruction_info >> 28) & 0xf); 5121 5122 types = (vmx->nested.msrs.ept_caps >> VMX_EPT_EXTENT_SHIFT) & 6; 5123 5124 if (type >= 32 || !(types & (1 << type))) 5125 return nested_vmx_failValid(vcpu, 5126 VMXERR_INVALID_OPERAND_TO_INVEPT_INVVPID); 5127 5128 /* According to the Intel VMX instruction reference, the memory 5129 * operand is read even if it isn't needed (e.g., for type==global) 5130 */ 5131 if (get_vmx_mem_address(vcpu, vmcs_readl(EXIT_QUALIFICATION), 5132 vmx_instruction_info, false, sizeof(operand), &gva)) 5133 return 1; 5134 if (kvm_read_guest_virt(vcpu, gva, &operand, sizeof(operand), &e)) { 5135 kvm_inject_page_fault(vcpu, &e); 5136 return 1; 5137 } 5138 5139 switch (type) { 5140 case VMX_EPT_EXTENT_GLOBAL: 5141 case VMX_EPT_EXTENT_CONTEXT: 5142 /* 5143 * TODO: Sync the necessary shadow EPT roots here, rather than 5144 * at the next emulated VM-entry. 5145 */ 5146 break; 5147 default: 5148 BUG_ON(1); 5149 break; 5150 } 5151 5152 return nested_vmx_succeed(vcpu); 5153 } 5154 5155 static int handle_invvpid(struct kvm_vcpu *vcpu) 5156 { 5157 struct vcpu_vmx *vmx = to_vmx(vcpu); 5158 u32 vmx_instruction_info; 5159 unsigned long type, types; 5160 gva_t gva; 5161 struct x86_exception e; 5162 struct { 5163 u64 vpid; 5164 u64 gla; 5165 } operand; 5166 u16 vpid02; 5167 5168 if (!(vmx->nested.msrs.secondary_ctls_high & 5169 SECONDARY_EXEC_ENABLE_VPID) || 5170 !(vmx->nested.msrs.vpid_caps & VMX_VPID_INVVPID_BIT)) { 5171 kvm_queue_exception(vcpu, UD_VECTOR); 5172 return 1; 5173 } 5174 5175 if (!nested_vmx_check_permission(vcpu)) 5176 return 1; 5177 5178 vmx_instruction_info = vmcs_read32(VMX_INSTRUCTION_INFO); 5179 type = kvm_register_readl(vcpu, (vmx_instruction_info >> 28) & 0xf); 5180 5181 types = (vmx->nested.msrs.vpid_caps & 5182 VMX_VPID_EXTENT_SUPPORTED_MASK) >> 8; 5183 5184 if (type >= 32 || !(types & (1 << type))) 5185 return nested_vmx_failValid(vcpu, 5186 VMXERR_INVALID_OPERAND_TO_INVEPT_INVVPID); 5187 5188 /* according to the intel vmx instruction reference, the memory 5189 * operand is read even if it isn't needed (e.g., for type==global) 5190 */ 5191 if (get_vmx_mem_address(vcpu, vmcs_readl(EXIT_QUALIFICATION), 5192 vmx_instruction_info, false, sizeof(operand), &gva)) 5193 return 1; 5194 if (kvm_read_guest_virt(vcpu, gva, &operand, sizeof(operand), &e)) { 5195 kvm_inject_page_fault(vcpu, &e); 5196 return 1; 5197 } 5198 if (operand.vpid >> 16) 5199 return nested_vmx_failValid(vcpu, 5200 VMXERR_INVALID_OPERAND_TO_INVEPT_INVVPID); 5201 5202 vpid02 = nested_get_vpid02(vcpu); 5203 switch (type) { 5204 case VMX_VPID_EXTENT_INDIVIDUAL_ADDR: 5205 if (!operand.vpid || 5206 is_noncanonical_address(operand.gla, vcpu)) 5207 return nested_vmx_failValid(vcpu, 5208 VMXERR_INVALID_OPERAND_TO_INVEPT_INVVPID); 5209 if (cpu_has_vmx_invvpid_individual_addr()) { 5210 __invvpid(VMX_VPID_EXTENT_INDIVIDUAL_ADDR, 5211 vpid02, operand.gla); 5212 } else 5213 __vmx_flush_tlb(vcpu, vpid02, false); 5214 break; 5215 case VMX_VPID_EXTENT_SINGLE_CONTEXT: 5216 case VMX_VPID_EXTENT_SINGLE_NON_GLOBAL: 5217 if (!operand.vpid) 5218 return nested_vmx_failValid(vcpu, 5219 VMXERR_INVALID_OPERAND_TO_INVEPT_INVVPID); 5220 __vmx_flush_tlb(vcpu, vpid02, false); 5221 break; 5222 case VMX_VPID_EXTENT_ALL_CONTEXT: 5223 __vmx_flush_tlb(vcpu, vpid02, false); 5224 break; 5225 default: 5226 WARN_ON_ONCE(1); 5227 return kvm_skip_emulated_instruction(vcpu); 5228 } 5229 5230 return nested_vmx_succeed(vcpu); 5231 } 5232 5233 static int nested_vmx_eptp_switching(struct kvm_vcpu *vcpu, 5234 struct vmcs12 *vmcs12) 5235 { 5236 u32 index = kvm_rcx_read(vcpu); 5237 u64 address; 5238 bool accessed_dirty; 5239 struct kvm_mmu *mmu = vcpu->arch.walk_mmu; 5240 5241 if (!nested_cpu_has_eptp_switching(vmcs12) || 5242 !nested_cpu_has_ept(vmcs12)) 5243 return 1; 5244 5245 if (index >= VMFUNC_EPTP_ENTRIES) 5246 return 1; 5247 5248 5249 if (kvm_vcpu_read_guest_page(vcpu, vmcs12->eptp_list_address >> PAGE_SHIFT, 5250 &address, index * 8, 8)) 5251 return 1; 5252 5253 accessed_dirty = !!(address & VMX_EPTP_AD_ENABLE_BIT); 5254 5255 /* 5256 * If the (L2) guest does a vmfunc to the currently 5257 * active ept pointer, we don't have to do anything else 5258 */ 5259 if (vmcs12->ept_pointer != address) { 5260 if (!valid_ept_address(vcpu, address)) 5261 return 1; 5262 5263 kvm_mmu_unload(vcpu); 5264 mmu->ept_ad = accessed_dirty; 5265 mmu->mmu_role.base.ad_disabled = !accessed_dirty; 5266 vmcs12->ept_pointer = address; 5267 /* 5268 * TODO: Check what's the correct approach in case 5269 * mmu reload fails. Currently, we just let the next 5270 * reload potentially fail 5271 */ 5272 kvm_mmu_reload(vcpu); 5273 } 5274 5275 return 0; 5276 } 5277 5278 static int handle_vmfunc(struct kvm_vcpu *vcpu) 5279 { 5280 struct vcpu_vmx *vmx = to_vmx(vcpu); 5281 struct vmcs12 *vmcs12; 5282 u32 function = kvm_rax_read(vcpu); 5283 5284 /* 5285 * VMFUNC is only supported for nested guests, but we always enable the 5286 * secondary control for simplicity; for non-nested mode, fake that we 5287 * didn't by injecting #UD. 5288 */ 5289 if (!is_guest_mode(vcpu)) { 5290 kvm_queue_exception(vcpu, UD_VECTOR); 5291 return 1; 5292 } 5293 5294 vmcs12 = get_vmcs12(vcpu); 5295 if ((vmcs12->vm_function_control & (1 << function)) == 0) 5296 goto fail; 5297 5298 switch (function) { 5299 case 0: 5300 if (nested_vmx_eptp_switching(vcpu, vmcs12)) 5301 goto fail; 5302 break; 5303 default: 5304 goto fail; 5305 } 5306 return kvm_skip_emulated_instruction(vcpu); 5307 5308 fail: 5309 nested_vmx_vmexit(vcpu, vmx->exit_reason, 5310 vmcs_read32(VM_EXIT_INTR_INFO), 5311 vmcs_readl(EXIT_QUALIFICATION)); 5312 return 1; 5313 } 5314 5315 /* 5316 * Return true if an IO instruction with the specified port and size should cause 5317 * a VM-exit into L1. 5318 */ 5319 bool nested_vmx_check_io_bitmaps(struct kvm_vcpu *vcpu, unsigned int port, 5320 int size) 5321 { 5322 struct vmcs12 *vmcs12 = get_vmcs12(vcpu); 5323 gpa_t bitmap, last_bitmap; 5324 u8 b; 5325 5326 last_bitmap = (gpa_t)-1; 5327 b = -1; 5328 5329 while (size > 0) { 5330 if (port < 0x8000) 5331 bitmap = vmcs12->io_bitmap_a; 5332 else if (port < 0x10000) 5333 bitmap = vmcs12->io_bitmap_b; 5334 else 5335 return true; 5336 bitmap += (port & 0x7fff) / 8; 5337 5338 if (last_bitmap != bitmap) 5339 if (kvm_vcpu_read_guest(vcpu, bitmap, &b, 1)) 5340 return true; 5341 if (b & (1 << (port & 7))) 5342 return true; 5343 5344 port++; 5345 size--; 5346 last_bitmap = bitmap; 5347 } 5348 5349 return false; 5350 } 5351 5352 static bool nested_vmx_exit_handled_io(struct kvm_vcpu *vcpu, 5353 struct vmcs12 *vmcs12) 5354 { 5355 unsigned long exit_qualification; 5356 unsigned short port; 5357 int size; 5358 5359 if (!nested_cpu_has(vmcs12, CPU_BASED_USE_IO_BITMAPS)) 5360 return nested_cpu_has(vmcs12, CPU_BASED_UNCOND_IO_EXITING); 5361 5362 exit_qualification = vmcs_readl(EXIT_QUALIFICATION); 5363 5364 port = exit_qualification >> 16; 5365 size = (exit_qualification & 7) + 1; 5366 5367 return nested_vmx_check_io_bitmaps(vcpu, port, size); 5368 } 5369 5370 /* 5371 * Return 1 if we should exit from L2 to L1 to handle an MSR access, 5372 * rather than handle it ourselves in L0. I.e., check whether L1 expressed 5373 * disinterest in the current event (read or write a specific MSR) by using an 5374 * MSR bitmap. This may be the case even when L0 doesn't use MSR bitmaps. 5375 */ 5376 static bool nested_vmx_exit_handled_msr(struct kvm_vcpu *vcpu, 5377 struct vmcs12 *vmcs12, u32 exit_reason) 5378 { 5379 u32 msr_index = kvm_rcx_read(vcpu); 5380 gpa_t bitmap; 5381 5382 if (!nested_cpu_has(vmcs12, CPU_BASED_USE_MSR_BITMAPS)) 5383 return true; 5384 5385 /* 5386 * The MSR_BITMAP page is divided into four 1024-byte bitmaps, 5387 * for the four combinations of read/write and low/high MSR numbers. 5388 * First we need to figure out which of the four to use: 5389 */ 5390 bitmap = vmcs12->msr_bitmap; 5391 if (exit_reason == EXIT_REASON_MSR_WRITE) 5392 bitmap += 2048; 5393 if (msr_index >= 0xc0000000) { 5394 msr_index -= 0xc0000000; 5395 bitmap += 1024; 5396 } 5397 5398 /* Then read the msr_index'th bit from this bitmap: */ 5399 if (msr_index < 1024*8) { 5400 unsigned char b; 5401 if (kvm_vcpu_read_guest(vcpu, bitmap + msr_index/8, &b, 1)) 5402 return true; 5403 return 1 & (b >> (msr_index & 7)); 5404 } else 5405 return true; /* let L1 handle the wrong parameter */ 5406 } 5407 5408 /* 5409 * Return 1 if we should exit from L2 to L1 to handle a CR access exit, 5410 * rather than handle it ourselves in L0. I.e., check if L1 wanted to 5411 * intercept (via guest_host_mask etc.) the current event. 5412 */ 5413 static bool nested_vmx_exit_handled_cr(struct kvm_vcpu *vcpu, 5414 struct vmcs12 *vmcs12) 5415 { 5416 unsigned long exit_qualification = vmcs_readl(EXIT_QUALIFICATION); 5417 int cr = exit_qualification & 15; 5418 int reg; 5419 unsigned long val; 5420 5421 switch ((exit_qualification >> 4) & 3) { 5422 case 0: /* mov to cr */ 5423 reg = (exit_qualification >> 8) & 15; 5424 val = kvm_register_readl(vcpu, reg); 5425 switch (cr) { 5426 case 0: 5427 if (vmcs12->cr0_guest_host_mask & 5428 (val ^ vmcs12->cr0_read_shadow)) 5429 return true; 5430 break; 5431 case 3: 5432 if ((vmcs12->cr3_target_count >= 1 && 5433 vmcs12->cr3_target_value0 == val) || 5434 (vmcs12->cr3_target_count >= 2 && 5435 vmcs12->cr3_target_value1 == val) || 5436 (vmcs12->cr3_target_count >= 3 && 5437 vmcs12->cr3_target_value2 == val) || 5438 (vmcs12->cr3_target_count >= 4 && 5439 vmcs12->cr3_target_value3 == val)) 5440 return false; 5441 if (nested_cpu_has(vmcs12, CPU_BASED_CR3_LOAD_EXITING)) 5442 return true; 5443 break; 5444 case 4: 5445 if (vmcs12->cr4_guest_host_mask & 5446 (vmcs12->cr4_read_shadow ^ val)) 5447 return true; 5448 break; 5449 case 8: 5450 if (nested_cpu_has(vmcs12, CPU_BASED_CR8_LOAD_EXITING)) 5451 return true; 5452 break; 5453 } 5454 break; 5455 case 2: /* clts */ 5456 if ((vmcs12->cr0_guest_host_mask & X86_CR0_TS) && 5457 (vmcs12->cr0_read_shadow & X86_CR0_TS)) 5458 return true; 5459 break; 5460 case 1: /* mov from cr */ 5461 switch (cr) { 5462 case 3: 5463 if (vmcs12->cpu_based_vm_exec_control & 5464 CPU_BASED_CR3_STORE_EXITING) 5465 return true; 5466 break; 5467 case 8: 5468 if (vmcs12->cpu_based_vm_exec_control & 5469 CPU_BASED_CR8_STORE_EXITING) 5470 return true; 5471 break; 5472 } 5473 break; 5474 case 3: /* lmsw */ 5475 /* 5476 * lmsw can change bits 1..3 of cr0, and only set bit 0 of 5477 * cr0. Other attempted changes are ignored, with no exit. 5478 */ 5479 val = (exit_qualification >> LMSW_SOURCE_DATA_SHIFT) & 0x0f; 5480 if (vmcs12->cr0_guest_host_mask & 0xe & 5481 (val ^ vmcs12->cr0_read_shadow)) 5482 return true; 5483 if ((vmcs12->cr0_guest_host_mask & 0x1) && 5484 !(vmcs12->cr0_read_shadow & 0x1) && 5485 (val & 0x1)) 5486 return true; 5487 break; 5488 } 5489 return false; 5490 } 5491 5492 static bool nested_vmx_exit_handled_vmcs_access(struct kvm_vcpu *vcpu, 5493 struct vmcs12 *vmcs12, gpa_t bitmap) 5494 { 5495 u32 vmx_instruction_info; 5496 unsigned long field; 5497 u8 b; 5498 5499 if (!nested_cpu_has_shadow_vmcs(vmcs12)) 5500 return true; 5501 5502 /* Decode instruction info and find the field to access */ 5503 vmx_instruction_info = vmcs_read32(VMX_INSTRUCTION_INFO); 5504 field = kvm_register_read(vcpu, (((vmx_instruction_info) >> 28) & 0xf)); 5505 5506 /* Out-of-range fields always cause a VM exit from L2 to L1 */ 5507 if (field >> 15) 5508 return true; 5509 5510 if (kvm_vcpu_read_guest(vcpu, bitmap + field/8, &b, 1)) 5511 return true; 5512 5513 return 1 & (b >> (field & 7)); 5514 } 5515 5516 /* 5517 * Return 1 if we should exit from L2 to L1 to handle an exit, or 0 if we 5518 * should handle it ourselves in L0 (and then continue L2). Only call this 5519 * when in is_guest_mode (L2). 5520 */ 5521 bool nested_vmx_exit_reflected(struct kvm_vcpu *vcpu, u32 exit_reason) 5522 { 5523 u32 intr_info = vmcs_read32(VM_EXIT_INTR_INFO); 5524 struct vcpu_vmx *vmx = to_vmx(vcpu); 5525 struct vmcs12 *vmcs12 = get_vmcs12(vcpu); 5526 5527 if (vmx->nested.nested_run_pending) 5528 return false; 5529 5530 if (unlikely(vmx->fail)) { 5531 trace_kvm_nested_vmenter_failed( 5532 "hardware VM-instruction error: ", 5533 vmcs_read32(VM_INSTRUCTION_ERROR)); 5534 return true; 5535 } 5536 5537 /* 5538 * The host physical addresses of some pages of guest memory 5539 * are loaded into the vmcs02 (e.g. vmcs12's Virtual APIC 5540 * Page). The CPU may write to these pages via their host 5541 * physical address while L2 is running, bypassing any 5542 * address-translation-based dirty tracking (e.g. EPT write 5543 * protection). 5544 * 5545 * Mark them dirty on every exit from L2 to prevent them from 5546 * getting out of sync with dirty tracking. 5547 */ 5548 nested_mark_vmcs12_pages_dirty(vcpu); 5549 5550 trace_kvm_nested_vmexit(kvm_rip_read(vcpu), exit_reason, 5551 vmcs_readl(EXIT_QUALIFICATION), 5552 vmx->idt_vectoring_info, 5553 intr_info, 5554 vmcs_read32(VM_EXIT_INTR_ERROR_CODE), 5555 KVM_ISA_VMX); 5556 5557 switch (exit_reason) { 5558 case EXIT_REASON_EXCEPTION_NMI: 5559 if (is_nmi(intr_info)) 5560 return false; 5561 else if (is_page_fault(intr_info)) 5562 return !vmx->vcpu.arch.apf.host_apf_reason && enable_ept; 5563 else if (is_debug(intr_info) && 5564 vcpu->guest_debug & 5565 (KVM_GUESTDBG_SINGLESTEP | KVM_GUESTDBG_USE_HW_BP)) 5566 return false; 5567 else if (is_breakpoint(intr_info) && 5568 vcpu->guest_debug & KVM_GUESTDBG_USE_SW_BP) 5569 return false; 5570 return vmcs12->exception_bitmap & 5571 (1u << (intr_info & INTR_INFO_VECTOR_MASK)); 5572 case EXIT_REASON_EXTERNAL_INTERRUPT: 5573 return false; 5574 case EXIT_REASON_TRIPLE_FAULT: 5575 return true; 5576 case EXIT_REASON_INTERRUPT_WINDOW: 5577 return nested_cpu_has(vmcs12, CPU_BASED_INTR_WINDOW_EXITING); 5578 case EXIT_REASON_NMI_WINDOW: 5579 return nested_cpu_has(vmcs12, CPU_BASED_NMI_WINDOW_EXITING); 5580 case EXIT_REASON_TASK_SWITCH: 5581 return true; 5582 case EXIT_REASON_CPUID: 5583 return true; 5584 case EXIT_REASON_HLT: 5585 return nested_cpu_has(vmcs12, CPU_BASED_HLT_EXITING); 5586 case EXIT_REASON_INVD: 5587 return true; 5588 case EXIT_REASON_INVLPG: 5589 return nested_cpu_has(vmcs12, CPU_BASED_INVLPG_EXITING); 5590 case EXIT_REASON_RDPMC: 5591 return nested_cpu_has(vmcs12, CPU_BASED_RDPMC_EXITING); 5592 case EXIT_REASON_RDRAND: 5593 return nested_cpu_has2(vmcs12, SECONDARY_EXEC_RDRAND_EXITING); 5594 case EXIT_REASON_RDSEED: 5595 return nested_cpu_has2(vmcs12, SECONDARY_EXEC_RDSEED_EXITING); 5596 case EXIT_REASON_RDTSC: case EXIT_REASON_RDTSCP: 5597 return nested_cpu_has(vmcs12, CPU_BASED_RDTSC_EXITING); 5598 case EXIT_REASON_VMREAD: 5599 return nested_vmx_exit_handled_vmcs_access(vcpu, vmcs12, 5600 vmcs12->vmread_bitmap); 5601 case EXIT_REASON_VMWRITE: 5602 return nested_vmx_exit_handled_vmcs_access(vcpu, vmcs12, 5603 vmcs12->vmwrite_bitmap); 5604 case EXIT_REASON_VMCALL: case EXIT_REASON_VMCLEAR: 5605 case EXIT_REASON_VMLAUNCH: case EXIT_REASON_VMPTRLD: 5606 case EXIT_REASON_VMPTRST: case EXIT_REASON_VMRESUME: 5607 case EXIT_REASON_VMOFF: case EXIT_REASON_VMON: 5608 case EXIT_REASON_INVEPT: case EXIT_REASON_INVVPID: 5609 /* 5610 * VMX instructions trap unconditionally. This allows L1 to 5611 * emulate them for its L2 guest, i.e., allows 3-level nesting! 5612 */ 5613 return true; 5614 case EXIT_REASON_CR_ACCESS: 5615 return nested_vmx_exit_handled_cr(vcpu, vmcs12); 5616 case EXIT_REASON_DR_ACCESS: 5617 return nested_cpu_has(vmcs12, CPU_BASED_MOV_DR_EXITING); 5618 case EXIT_REASON_IO_INSTRUCTION: 5619 return nested_vmx_exit_handled_io(vcpu, vmcs12); 5620 case EXIT_REASON_GDTR_IDTR: case EXIT_REASON_LDTR_TR: 5621 return nested_cpu_has2(vmcs12, SECONDARY_EXEC_DESC); 5622 case EXIT_REASON_MSR_READ: 5623 case EXIT_REASON_MSR_WRITE: 5624 return nested_vmx_exit_handled_msr(vcpu, vmcs12, exit_reason); 5625 case EXIT_REASON_INVALID_STATE: 5626 return true; 5627 case EXIT_REASON_MWAIT_INSTRUCTION: 5628 return nested_cpu_has(vmcs12, CPU_BASED_MWAIT_EXITING); 5629 case EXIT_REASON_MONITOR_TRAP_FLAG: 5630 return nested_cpu_has(vmcs12, CPU_BASED_MONITOR_TRAP_FLAG); 5631 case EXIT_REASON_MONITOR_INSTRUCTION: 5632 return nested_cpu_has(vmcs12, CPU_BASED_MONITOR_EXITING); 5633 case EXIT_REASON_PAUSE_INSTRUCTION: 5634 return nested_cpu_has(vmcs12, CPU_BASED_PAUSE_EXITING) || 5635 nested_cpu_has2(vmcs12, 5636 SECONDARY_EXEC_PAUSE_LOOP_EXITING); 5637 case EXIT_REASON_MCE_DURING_VMENTRY: 5638 return false; 5639 case EXIT_REASON_TPR_BELOW_THRESHOLD: 5640 return nested_cpu_has(vmcs12, CPU_BASED_TPR_SHADOW); 5641 case EXIT_REASON_APIC_ACCESS: 5642 case EXIT_REASON_APIC_WRITE: 5643 case EXIT_REASON_EOI_INDUCED: 5644 /* 5645 * The controls for "virtualize APIC accesses," "APIC- 5646 * register virtualization," and "virtual-interrupt 5647 * delivery" only come from vmcs12. 5648 */ 5649 return true; 5650 case EXIT_REASON_EPT_VIOLATION: 5651 /* 5652 * L0 always deals with the EPT violation. If nested EPT is 5653 * used, and the nested mmu code discovers that the address is 5654 * missing in the guest EPT table (EPT12), the EPT violation 5655 * will be injected with nested_ept_inject_page_fault() 5656 */ 5657 return false; 5658 case EXIT_REASON_EPT_MISCONFIG: 5659 /* 5660 * L2 never uses directly L1's EPT, but rather L0's own EPT 5661 * table (shadow on EPT) or a merged EPT table that L0 built 5662 * (EPT on EPT). So any problems with the structure of the 5663 * table is L0's fault. 5664 */ 5665 return false; 5666 case EXIT_REASON_INVPCID: 5667 return 5668 nested_cpu_has2(vmcs12, SECONDARY_EXEC_ENABLE_INVPCID) && 5669 nested_cpu_has(vmcs12, CPU_BASED_INVLPG_EXITING); 5670 case EXIT_REASON_WBINVD: 5671 return nested_cpu_has2(vmcs12, SECONDARY_EXEC_WBINVD_EXITING); 5672 case EXIT_REASON_XSETBV: 5673 return true; 5674 case EXIT_REASON_XSAVES: case EXIT_REASON_XRSTORS: 5675 /* 5676 * This should never happen, since it is not possible to 5677 * set XSS to a non-zero value---neither in L1 nor in L2. 5678 * If if it were, XSS would have to be checked against 5679 * the XSS exit bitmap in vmcs12. 5680 */ 5681 return nested_cpu_has2(vmcs12, SECONDARY_EXEC_XSAVES); 5682 case EXIT_REASON_PREEMPTION_TIMER: 5683 return false; 5684 case EXIT_REASON_PML_FULL: 5685 /* We emulate PML support to L1. */ 5686 return false; 5687 case EXIT_REASON_VMFUNC: 5688 /* VM functions are emulated through L2->L0 vmexits. */ 5689 return false; 5690 case EXIT_REASON_ENCLS: 5691 /* SGX is never exposed to L1 */ 5692 return false; 5693 case EXIT_REASON_UMWAIT: 5694 case EXIT_REASON_TPAUSE: 5695 return nested_cpu_has2(vmcs12, 5696 SECONDARY_EXEC_ENABLE_USR_WAIT_PAUSE); 5697 default: 5698 return true; 5699 } 5700 } 5701 5702 5703 static int vmx_get_nested_state(struct kvm_vcpu *vcpu, 5704 struct kvm_nested_state __user *user_kvm_nested_state, 5705 u32 user_data_size) 5706 { 5707 struct vcpu_vmx *vmx; 5708 struct vmcs12 *vmcs12; 5709 struct kvm_nested_state kvm_state = { 5710 .flags = 0, 5711 .format = KVM_STATE_NESTED_FORMAT_VMX, 5712 .size = sizeof(kvm_state), 5713 .hdr.vmx.vmxon_pa = -1ull, 5714 .hdr.vmx.vmcs12_pa = -1ull, 5715 }; 5716 struct kvm_vmx_nested_state_data __user *user_vmx_nested_state = 5717 &user_kvm_nested_state->data.vmx[0]; 5718 5719 if (!vcpu) 5720 return kvm_state.size + sizeof(*user_vmx_nested_state); 5721 5722 vmx = to_vmx(vcpu); 5723 vmcs12 = get_vmcs12(vcpu); 5724 5725 if (nested_vmx_allowed(vcpu) && 5726 (vmx->nested.vmxon || vmx->nested.smm.vmxon)) { 5727 kvm_state.hdr.vmx.vmxon_pa = vmx->nested.vmxon_ptr; 5728 kvm_state.hdr.vmx.vmcs12_pa = vmx->nested.current_vmptr; 5729 5730 if (vmx_has_valid_vmcs12(vcpu)) { 5731 kvm_state.size += sizeof(user_vmx_nested_state->vmcs12); 5732 5733 if (vmx->nested.hv_evmcs) 5734 kvm_state.flags |= KVM_STATE_NESTED_EVMCS; 5735 5736 if (is_guest_mode(vcpu) && 5737 nested_cpu_has_shadow_vmcs(vmcs12) && 5738 vmcs12->vmcs_link_pointer != -1ull) 5739 kvm_state.size += sizeof(user_vmx_nested_state->shadow_vmcs12); 5740 } 5741 5742 if (vmx->nested.smm.vmxon) 5743 kvm_state.hdr.vmx.smm.flags |= KVM_STATE_NESTED_SMM_VMXON; 5744 5745 if (vmx->nested.smm.guest_mode) 5746 kvm_state.hdr.vmx.smm.flags |= KVM_STATE_NESTED_SMM_GUEST_MODE; 5747 5748 if (is_guest_mode(vcpu)) { 5749 kvm_state.flags |= KVM_STATE_NESTED_GUEST_MODE; 5750 5751 if (vmx->nested.nested_run_pending) 5752 kvm_state.flags |= KVM_STATE_NESTED_RUN_PENDING; 5753 5754 if (vmx->nested.mtf_pending) 5755 kvm_state.flags |= KVM_STATE_NESTED_MTF_PENDING; 5756 } 5757 } 5758 5759 if (user_data_size < kvm_state.size) 5760 goto out; 5761 5762 if (copy_to_user(user_kvm_nested_state, &kvm_state, sizeof(kvm_state))) 5763 return -EFAULT; 5764 5765 if (!vmx_has_valid_vmcs12(vcpu)) 5766 goto out; 5767 5768 /* 5769 * When running L2, the authoritative vmcs12 state is in the 5770 * vmcs02. When running L1, the authoritative vmcs12 state is 5771 * in the shadow or enlightened vmcs linked to vmcs01, unless 5772 * need_vmcs12_to_shadow_sync is set, in which case, the authoritative 5773 * vmcs12 state is in the vmcs12 already. 5774 */ 5775 if (is_guest_mode(vcpu)) { 5776 sync_vmcs02_to_vmcs12(vcpu, vmcs12); 5777 sync_vmcs02_to_vmcs12_rare(vcpu, vmcs12); 5778 } else if (!vmx->nested.need_vmcs12_to_shadow_sync) { 5779 if (vmx->nested.hv_evmcs) 5780 copy_enlightened_to_vmcs12(vmx); 5781 else if (enable_shadow_vmcs) 5782 copy_shadow_to_vmcs12(vmx); 5783 } 5784 5785 BUILD_BUG_ON(sizeof(user_vmx_nested_state->vmcs12) < VMCS12_SIZE); 5786 BUILD_BUG_ON(sizeof(user_vmx_nested_state->shadow_vmcs12) < VMCS12_SIZE); 5787 5788 /* 5789 * Copy over the full allocated size of vmcs12 rather than just the size 5790 * of the struct. 5791 */ 5792 if (copy_to_user(user_vmx_nested_state->vmcs12, vmcs12, VMCS12_SIZE)) 5793 return -EFAULT; 5794 5795 if (nested_cpu_has_shadow_vmcs(vmcs12) && 5796 vmcs12->vmcs_link_pointer != -1ull) { 5797 if (copy_to_user(user_vmx_nested_state->shadow_vmcs12, 5798 get_shadow_vmcs12(vcpu), VMCS12_SIZE)) 5799 return -EFAULT; 5800 } 5801 5802 out: 5803 return kvm_state.size; 5804 } 5805 5806 /* 5807 * Forcibly leave nested mode in order to be able to reset the VCPU later on. 5808 */ 5809 void vmx_leave_nested(struct kvm_vcpu *vcpu) 5810 { 5811 if (is_guest_mode(vcpu)) { 5812 to_vmx(vcpu)->nested.nested_run_pending = 0; 5813 nested_vmx_vmexit(vcpu, -1, 0, 0); 5814 } 5815 free_nested(vcpu); 5816 } 5817 5818 static int vmx_set_nested_state(struct kvm_vcpu *vcpu, 5819 struct kvm_nested_state __user *user_kvm_nested_state, 5820 struct kvm_nested_state *kvm_state) 5821 { 5822 struct vcpu_vmx *vmx = to_vmx(vcpu); 5823 struct vmcs12 *vmcs12; 5824 u32 exit_qual; 5825 struct kvm_vmx_nested_state_data __user *user_vmx_nested_state = 5826 &user_kvm_nested_state->data.vmx[0]; 5827 int ret; 5828 5829 if (kvm_state->format != KVM_STATE_NESTED_FORMAT_VMX) 5830 return -EINVAL; 5831 5832 if (kvm_state->hdr.vmx.vmxon_pa == -1ull) { 5833 if (kvm_state->hdr.vmx.smm.flags) 5834 return -EINVAL; 5835 5836 if (kvm_state->hdr.vmx.vmcs12_pa != -1ull) 5837 return -EINVAL; 5838 5839 /* 5840 * KVM_STATE_NESTED_EVMCS used to signal that KVM should 5841 * enable eVMCS capability on vCPU. However, since then 5842 * code was changed such that flag signals vmcs12 should 5843 * be copied into eVMCS in guest memory. 5844 * 5845 * To preserve backwards compatability, allow user 5846 * to set this flag even when there is no VMXON region. 5847 */ 5848 if (kvm_state->flags & ~KVM_STATE_NESTED_EVMCS) 5849 return -EINVAL; 5850 } else { 5851 if (!nested_vmx_allowed(vcpu)) 5852 return -EINVAL; 5853 5854 if (!page_address_valid(vcpu, kvm_state->hdr.vmx.vmxon_pa)) 5855 return -EINVAL; 5856 } 5857 5858 if ((kvm_state->hdr.vmx.smm.flags & KVM_STATE_NESTED_SMM_GUEST_MODE) && 5859 (kvm_state->flags & KVM_STATE_NESTED_GUEST_MODE)) 5860 return -EINVAL; 5861 5862 if (kvm_state->hdr.vmx.smm.flags & 5863 ~(KVM_STATE_NESTED_SMM_GUEST_MODE | KVM_STATE_NESTED_SMM_VMXON)) 5864 return -EINVAL; 5865 5866 /* 5867 * SMM temporarily disables VMX, so we cannot be in guest mode, 5868 * nor can VMLAUNCH/VMRESUME be pending. Outside SMM, SMM flags 5869 * must be zero. 5870 */ 5871 if (is_smm(vcpu) ? 5872 (kvm_state->flags & 5873 (KVM_STATE_NESTED_GUEST_MODE | KVM_STATE_NESTED_RUN_PENDING)) 5874 : kvm_state->hdr.vmx.smm.flags) 5875 return -EINVAL; 5876 5877 if ((kvm_state->hdr.vmx.smm.flags & KVM_STATE_NESTED_SMM_GUEST_MODE) && 5878 !(kvm_state->hdr.vmx.smm.flags & KVM_STATE_NESTED_SMM_VMXON)) 5879 return -EINVAL; 5880 5881 if ((kvm_state->flags & KVM_STATE_NESTED_EVMCS) && 5882 (!nested_vmx_allowed(vcpu) || !vmx->nested.enlightened_vmcs_enabled)) 5883 return -EINVAL; 5884 5885 vmx_leave_nested(vcpu); 5886 5887 if (kvm_state->hdr.vmx.vmxon_pa == -1ull) 5888 return 0; 5889 5890 vmx->nested.vmxon_ptr = kvm_state->hdr.vmx.vmxon_pa; 5891 ret = enter_vmx_operation(vcpu); 5892 if (ret) 5893 return ret; 5894 5895 /* Empty 'VMXON' state is permitted */ 5896 if (kvm_state->size < sizeof(*kvm_state) + sizeof(*vmcs12)) 5897 return 0; 5898 5899 if (kvm_state->hdr.vmx.vmcs12_pa != -1ull) { 5900 if (kvm_state->hdr.vmx.vmcs12_pa == kvm_state->hdr.vmx.vmxon_pa || 5901 !page_address_valid(vcpu, kvm_state->hdr.vmx.vmcs12_pa)) 5902 return -EINVAL; 5903 5904 set_current_vmptr(vmx, kvm_state->hdr.vmx.vmcs12_pa); 5905 } else if (kvm_state->flags & KVM_STATE_NESTED_EVMCS) { 5906 /* 5907 * Sync eVMCS upon entry as we may not have 5908 * HV_X64_MSR_VP_ASSIST_PAGE set up yet. 5909 */ 5910 vmx->nested.need_vmcs12_to_shadow_sync = true; 5911 } else { 5912 return -EINVAL; 5913 } 5914 5915 if (kvm_state->hdr.vmx.smm.flags & KVM_STATE_NESTED_SMM_VMXON) { 5916 vmx->nested.smm.vmxon = true; 5917 vmx->nested.vmxon = false; 5918 5919 if (kvm_state->hdr.vmx.smm.flags & KVM_STATE_NESTED_SMM_GUEST_MODE) 5920 vmx->nested.smm.guest_mode = true; 5921 } 5922 5923 vmcs12 = get_vmcs12(vcpu); 5924 if (copy_from_user(vmcs12, user_vmx_nested_state->vmcs12, sizeof(*vmcs12))) 5925 return -EFAULT; 5926 5927 if (vmcs12->hdr.revision_id != VMCS12_REVISION) 5928 return -EINVAL; 5929 5930 if (!(kvm_state->flags & KVM_STATE_NESTED_GUEST_MODE)) 5931 return 0; 5932 5933 vmx->nested.nested_run_pending = 5934 !!(kvm_state->flags & KVM_STATE_NESTED_RUN_PENDING); 5935 5936 vmx->nested.mtf_pending = 5937 !!(kvm_state->flags & KVM_STATE_NESTED_MTF_PENDING); 5938 5939 ret = -EINVAL; 5940 if (nested_cpu_has_shadow_vmcs(vmcs12) && 5941 vmcs12->vmcs_link_pointer != -1ull) { 5942 struct vmcs12 *shadow_vmcs12 = get_shadow_vmcs12(vcpu); 5943 5944 if (kvm_state->size < 5945 sizeof(*kvm_state) + 5946 sizeof(user_vmx_nested_state->vmcs12) + sizeof(*shadow_vmcs12)) 5947 goto error_guest_mode; 5948 5949 if (copy_from_user(shadow_vmcs12, 5950 user_vmx_nested_state->shadow_vmcs12, 5951 sizeof(*shadow_vmcs12))) { 5952 ret = -EFAULT; 5953 goto error_guest_mode; 5954 } 5955 5956 if (shadow_vmcs12->hdr.revision_id != VMCS12_REVISION || 5957 !shadow_vmcs12->hdr.shadow_vmcs) 5958 goto error_guest_mode; 5959 } 5960 5961 if (nested_vmx_check_controls(vcpu, vmcs12) || 5962 nested_vmx_check_host_state(vcpu, vmcs12) || 5963 nested_vmx_check_guest_state(vcpu, vmcs12, &exit_qual)) 5964 goto error_guest_mode; 5965 5966 vmx->nested.dirty_vmcs12 = true; 5967 ret = nested_vmx_enter_non_root_mode(vcpu, false); 5968 if (ret) 5969 goto error_guest_mode; 5970 5971 return 0; 5972 5973 error_guest_mode: 5974 vmx->nested.nested_run_pending = 0; 5975 return ret; 5976 } 5977 5978 void nested_vmx_set_vmcs_shadowing_bitmap(void) 5979 { 5980 if (enable_shadow_vmcs) { 5981 vmcs_write64(VMREAD_BITMAP, __pa(vmx_vmread_bitmap)); 5982 vmcs_write64(VMWRITE_BITMAP, __pa(vmx_vmwrite_bitmap)); 5983 } 5984 } 5985 5986 /* 5987 * nested_vmx_setup_ctls_msrs() sets up variables containing the values to be 5988 * returned for the various VMX controls MSRs when nested VMX is enabled. 5989 * The same values should also be used to verify that vmcs12 control fields are 5990 * valid during nested entry from L1 to L2. 5991 * Each of these control msrs has a low and high 32-bit half: A low bit is on 5992 * if the corresponding bit in the (32-bit) control field *must* be on, and a 5993 * bit in the high half is on if the corresponding bit in the control field 5994 * may be on. See also vmx_control_verify(). 5995 */ 5996 void nested_vmx_setup_ctls_msrs(struct nested_vmx_msrs *msrs, u32 ept_caps) 5997 { 5998 /* 5999 * Note that as a general rule, the high half of the MSRs (bits in 6000 * the control fields which may be 1) should be initialized by the 6001 * intersection of the underlying hardware's MSR (i.e., features which 6002 * can be supported) and the list of features we want to expose - 6003 * because they are known to be properly supported in our code. 6004 * Also, usually, the low half of the MSRs (bits which must be 1) can 6005 * be set to 0, meaning that L1 may turn off any of these bits. The 6006 * reason is that if one of these bits is necessary, it will appear 6007 * in vmcs01 and prepare_vmcs02, when it bitwise-or's the control 6008 * fields of vmcs01 and vmcs02, will turn these bits off - and 6009 * nested_vmx_exit_reflected() will not pass related exits to L1. 6010 * These rules have exceptions below. 6011 */ 6012 6013 /* pin-based controls */ 6014 rdmsr(MSR_IA32_VMX_PINBASED_CTLS, 6015 msrs->pinbased_ctls_low, 6016 msrs->pinbased_ctls_high); 6017 msrs->pinbased_ctls_low |= 6018 PIN_BASED_ALWAYSON_WITHOUT_TRUE_MSR; 6019 msrs->pinbased_ctls_high &= 6020 PIN_BASED_EXT_INTR_MASK | 6021 PIN_BASED_NMI_EXITING | 6022 PIN_BASED_VIRTUAL_NMIS | 6023 (enable_apicv ? PIN_BASED_POSTED_INTR : 0); 6024 msrs->pinbased_ctls_high |= 6025 PIN_BASED_ALWAYSON_WITHOUT_TRUE_MSR | 6026 PIN_BASED_VMX_PREEMPTION_TIMER; 6027 6028 /* exit controls */ 6029 rdmsr(MSR_IA32_VMX_EXIT_CTLS, 6030 msrs->exit_ctls_low, 6031 msrs->exit_ctls_high); 6032 msrs->exit_ctls_low = 6033 VM_EXIT_ALWAYSON_WITHOUT_TRUE_MSR; 6034 6035 msrs->exit_ctls_high &= 6036 #ifdef CONFIG_X86_64 6037 VM_EXIT_HOST_ADDR_SPACE_SIZE | 6038 #endif 6039 VM_EXIT_LOAD_IA32_PAT | VM_EXIT_SAVE_IA32_PAT; 6040 msrs->exit_ctls_high |= 6041 VM_EXIT_ALWAYSON_WITHOUT_TRUE_MSR | 6042 VM_EXIT_LOAD_IA32_EFER | VM_EXIT_SAVE_IA32_EFER | 6043 VM_EXIT_SAVE_VMX_PREEMPTION_TIMER | VM_EXIT_ACK_INTR_ON_EXIT; 6044 6045 /* We support free control of debug control saving. */ 6046 msrs->exit_ctls_low &= ~VM_EXIT_SAVE_DEBUG_CONTROLS; 6047 6048 /* entry controls */ 6049 rdmsr(MSR_IA32_VMX_ENTRY_CTLS, 6050 msrs->entry_ctls_low, 6051 msrs->entry_ctls_high); 6052 msrs->entry_ctls_low = 6053 VM_ENTRY_ALWAYSON_WITHOUT_TRUE_MSR; 6054 msrs->entry_ctls_high &= 6055 #ifdef CONFIG_X86_64 6056 VM_ENTRY_IA32E_MODE | 6057 #endif 6058 VM_ENTRY_LOAD_IA32_PAT; 6059 msrs->entry_ctls_high |= 6060 (VM_ENTRY_ALWAYSON_WITHOUT_TRUE_MSR | VM_ENTRY_LOAD_IA32_EFER); 6061 6062 /* We support free control of debug control loading. */ 6063 msrs->entry_ctls_low &= ~VM_ENTRY_LOAD_DEBUG_CONTROLS; 6064 6065 /* cpu-based controls */ 6066 rdmsr(MSR_IA32_VMX_PROCBASED_CTLS, 6067 msrs->procbased_ctls_low, 6068 msrs->procbased_ctls_high); 6069 msrs->procbased_ctls_low = 6070 CPU_BASED_ALWAYSON_WITHOUT_TRUE_MSR; 6071 msrs->procbased_ctls_high &= 6072 CPU_BASED_INTR_WINDOW_EXITING | 6073 CPU_BASED_NMI_WINDOW_EXITING | CPU_BASED_USE_TSC_OFFSETTING | 6074 CPU_BASED_HLT_EXITING | CPU_BASED_INVLPG_EXITING | 6075 CPU_BASED_MWAIT_EXITING | CPU_BASED_CR3_LOAD_EXITING | 6076 CPU_BASED_CR3_STORE_EXITING | 6077 #ifdef CONFIG_X86_64 6078 CPU_BASED_CR8_LOAD_EXITING | CPU_BASED_CR8_STORE_EXITING | 6079 #endif 6080 CPU_BASED_MOV_DR_EXITING | CPU_BASED_UNCOND_IO_EXITING | 6081 CPU_BASED_USE_IO_BITMAPS | CPU_BASED_MONITOR_TRAP_FLAG | 6082 CPU_BASED_MONITOR_EXITING | CPU_BASED_RDPMC_EXITING | 6083 CPU_BASED_RDTSC_EXITING | CPU_BASED_PAUSE_EXITING | 6084 CPU_BASED_TPR_SHADOW | CPU_BASED_ACTIVATE_SECONDARY_CONTROLS; 6085 /* 6086 * We can allow some features even when not supported by the 6087 * hardware. For example, L1 can specify an MSR bitmap - and we 6088 * can use it to avoid exits to L1 - even when L0 runs L2 6089 * without MSR bitmaps. 6090 */ 6091 msrs->procbased_ctls_high |= 6092 CPU_BASED_ALWAYSON_WITHOUT_TRUE_MSR | 6093 CPU_BASED_USE_MSR_BITMAPS; 6094 6095 /* We support free control of CR3 access interception. */ 6096 msrs->procbased_ctls_low &= 6097 ~(CPU_BASED_CR3_LOAD_EXITING | CPU_BASED_CR3_STORE_EXITING); 6098 6099 /* 6100 * secondary cpu-based controls. Do not include those that 6101 * depend on CPUID bits, they are added later by vmx_cpuid_update. 6102 */ 6103 if (msrs->procbased_ctls_high & CPU_BASED_ACTIVATE_SECONDARY_CONTROLS) 6104 rdmsr(MSR_IA32_VMX_PROCBASED_CTLS2, 6105 msrs->secondary_ctls_low, 6106 msrs->secondary_ctls_high); 6107 6108 msrs->secondary_ctls_low = 0; 6109 msrs->secondary_ctls_high &= 6110 SECONDARY_EXEC_DESC | 6111 SECONDARY_EXEC_RDTSCP | 6112 SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE | 6113 SECONDARY_EXEC_WBINVD_EXITING | 6114 SECONDARY_EXEC_APIC_REGISTER_VIRT | 6115 SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY | 6116 SECONDARY_EXEC_RDRAND_EXITING | 6117 SECONDARY_EXEC_ENABLE_INVPCID | 6118 SECONDARY_EXEC_RDSEED_EXITING | 6119 SECONDARY_EXEC_XSAVES; 6120 6121 /* 6122 * We can emulate "VMCS shadowing," even if the hardware 6123 * doesn't support it. 6124 */ 6125 msrs->secondary_ctls_high |= 6126 SECONDARY_EXEC_SHADOW_VMCS; 6127 6128 if (enable_ept) { 6129 /* nested EPT: emulate EPT also to L1 */ 6130 msrs->secondary_ctls_high |= 6131 SECONDARY_EXEC_ENABLE_EPT; 6132 msrs->ept_caps = VMX_EPT_PAGE_WALK_4_BIT | 6133 VMX_EPTP_WB_BIT | VMX_EPT_INVEPT_BIT; 6134 if (cpu_has_vmx_ept_execute_only()) 6135 msrs->ept_caps |= 6136 VMX_EPT_EXECUTE_ONLY_BIT; 6137 msrs->ept_caps &= ept_caps; 6138 msrs->ept_caps |= VMX_EPT_EXTENT_GLOBAL_BIT | 6139 VMX_EPT_EXTENT_CONTEXT_BIT | VMX_EPT_2MB_PAGE_BIT | 6140 VMX_EPT_1GB_PAGE_BIT; 6141 if (enable_ept_ad_bits) { 6142 msrs->secondary_ctls_high |= 6143 SECONDARY_EXEC_ENABLE_PML; 6144 msrs->ept_caps |= VMX_EPT_AD_BIT; 6145 } 6146 } 6147 6148 if (cpu_has_vmx_vmfunc()) { 6149 msrs->secondary_ctls_high |= 6150 SECONDARY_EXEC_ENABLE_VMFUNC; 6151 /* 6152 * Advertise EPTP switching unconditionally 6153 * since we emulate it 6154 */ 6155 if (enable_ept) 6156 msrs->vmfunc_controls = 6157 VMX_VMFUNC_EPTP_SWITCHING; 6158 } 6159 6160 /* 6161 * Old versions of KVM use the single-context version without 6162 * checking for support, so declare that it is supported even 6163 * though it is treated as global context. The alternative is 6164 * not failing the single-context invvpid, and it is worse. 6165 */ 6166 if (enable_vpid) { 6167 msrs->secondary_ctls_high |= 6168 SECONDARY_EXEC_ENABLE_VPID; 6169 msrs->vpid_caps = VMX_VPID_INVVPID_BIT | 6170 VMX_VPID_EXTENT_SUPPORTED_MASK; 6171 } 6172 6173 if (enable_unrestricted_guest) 6174 msrs->secondary_ctls_high |= 6175 SECONDARY_EXEC_UNRESTRICTED_GUEST; 6176 6177 if (flexpriority_enabled) 6178 msrs->secondary_ctls_high |= 6179 SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES; 6180 6181 /* miscellaneous data */ 6182 rdmsr(MSR_IA32_VMX_MISC, 6183 msrs->misc_low, 6184 msrs->misc_high); 6185 msrs->misc_low &= VMX_MISC_SAVE_EFER_LMA; 6186 msrs->misc_low |= 6187 MSR_IA32_VMX_MISC_VMWRITE_SHADOW_RO_FIELDS | 6188 VMX_MISC_EMULATED_PREEMPTION_TIMER_RATE | 6189 VMX_MISC_ACTIVITY_HLT; 6190 msrs->misc_high = 0; 6191 6192 /* 6193 * This MSR reports some information about VMX support. We 6194 * should return information about the VMX we emulate for the 6195 * guest, and the VMCS structure we give it - not about the 6196 * VMX support of the underlying hardware. 6197 */ 6198 msrs->basic = 6199 VMCS12_REVISION | 6200 VMX_BASIC_TRUE_CTLS | 6201 ((u64)VMCS12_SIZE << VMX_BASIC_VMCS_SIZE_SHIFT) | 6202 (VMX_BASIC_MEM_TYPE_WB << VMX_BASIC_MEM_TYPE_SHIFT); 6203 6204 if (cpu_has_vmx_basic_inout()) 6205 msrs->basic |= VMX_BASIC_INOUT; 6206 6207 /* 6208 * These MSRs specify bits which the guest must keep fixed on 6209 * while L1 is in VMXON mode (in L1's root mode, or running an L2). 6210 * We picked the standard core2 setting. 6211 */ 6212 #define VMXON_CR0_ALWAYSON (X86_CR0_PE | X86_CR0_PG | X86_CR0_NE) 6213 #define VMXON_CR4_ALWAYSON X86_CR4_VMXE 6214 msrs->cr0_fixed0 = VMXON_CR0_ALWAYSON; 6215 msrs->cr4_fixed0 = VMXON_CR4_ALWAYSON; 6216 6217 /* These MSRs specify bits which the guest must keep fixed off. */ 6218 rdmsrl(MSR_IA32_VMX_CR0_FIXED1, msrs->cr0_fixed1); 6219 rdmsrl(MSR_IA32_VMX_CR4_FIXED1, msrs->cr4_fixed1); 6220 6221 /* highest index: VMX_PREEMPTION_TIMER_VALUE */ 6222 msrs->vmcs_enum = VMCS12_MAX_FIELD_INDEX << 1; 6223 } 6224 6225 void nested_vmx_hardware_unsetup(void) 6226 { 6227 int i; 6228 6229 if (enable_shadow_vmcs) { 6230 for (i = 0; i < VMX_BITMAP_NR; i++) 6231 free_page((unsigned long)vmx_bitmap[i]); 6232 } 6233 } 6234 6235 __init int nested_vmx_hardware_setup(int (*exit_handlers[])(struct kvm_vcpu *)) 6236 { 6237 int i; 6238 6239 if (!cpu_has_vmx_shadow_vmcs()) 6240 enable_shadow_vmcs = 0; 6241 if (enable_shadow_vmcs) { 6242 for (i = 0; i < VMX_BITMAP_NR; i++) { 6243 /* 6244 * The vmx_bitmap is not tied to a VM and so should 6245 * not be charged to a memcg. 6246 */ 6247 vmx_bitmap[i] = (unsigned long *) 6248 __get_free_page(GFP_KERNEL); 6249 if (!vmx_bitmap[i]) { 6250 nested_vmx_hardware_unsetup(); 6251 return -ENOMEM; 6252 } 6253 } 6254 6255 init_vmcs_shadow_fields(); 6256 } 6257 6258 exit_handlers[EXIT_REASON_VMCLEAR] = handle_vmclear; 6259 exit_handlers[EXIT_REASON_VMLAUNCH] = handle_vmlaunch; 6260 exit_handlers[EXIT_REASON_VMPTRLD] = handle_vmptrld; 6261 exit_handlers[EXIT_REASON_VMPTRST] = handle_vmptrst; 6262 exit_handlers[EXIT_REASON_VMREAD] = handle_vmread; 6263 exit_handlers[EXIT_REASON_VMRESUME] = handle_vmresume; 6264 exit_handlers[EXIT_REASON_VMWRITE] = handle_vmwrite; 6265 exit_handlers[EXIT_REASON_VMOFF] = handle_vmoff; 6266 exit_handlers[EXIT_REASON_VMON] = handle_vmon; 6267 exit_handlers[EXIT_REASON_INVEPT] = handle_invept; 6268 exit_handlers[EXIT_REASON_INVVPID] = handle_invvpid; 6269 exit_handlers[EXIT_REASON_VMFUNC] = handle_vmfunc; 6270 6271 kvm_x86_ops->check_nested_events = vmx_check_nested_events; 6272 kvm_x86_ops->get_nested_state = vmx_get_nested_state; 6273 kvm_x86_ops->set_nested_state = vmx_set_nested_state; 6274 kvm_x86_ops->get_vmcs12_pages = nested_get_vmcs12_pages; 6275 kvm_x86_ops->nested_enable_evmcs = nested_enable_evmcs; 6276 kvm_x86_ops->nested_get_evmcs_version = nested_get_evmcs_version; 6277 6278 return 0; 6279 } 6280