1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Kernel-based Virtual Machine driver for Linux 4 * 5 * AMD SVM support 6 * 7 * Copyright (C) 2006 Qumranet, Inc. 8 * Copyright 2010 Red Hat, Inc. and/or its affiliates. 9 * 10 * Authors: 11 * Yaniv Kamay <yaniv@qumranet.com> 12 * Avi Kivity <avi@qumranet.com> 13 */ 14 15 #define pr_fmt(fmt) "SVM: " fmt 16 17 #include <linux/kvm_types.h> 18 #include <linux/kvm_host.h> 19 #include <linux/kernel.h> 20 21 #include <asm/msr-index.h> 22 #include <asm/debugreg.h> 23 24 #include "kvm_emulate.h" 25 #include "trace.h" 26 #include "mmu.h" 27 #include "x86.h" 28 #include "cpuid.h" 29 #include "lapic.h" 30 #include "svm.h" 31 #include "hyperv.h" 32 33 #define CC KVM_NESTED_VMENTER_CONSISTENCY_CHECK 34 35 static void nested_svm_inject_npf_exit(struct kvm_vcpu *vcpu, 36 struct x86_exception *fault) 37 { 38 struct vcpu_svm *svm = to_svm(vcpu); 39 struct vmcb *vmcb = svm->vmcb; 40 41 if (vmcb->control.exit_code != SVM_EXIT_NPF) { 42 /* 43 * TODO: track the cause of the nested page fault, and 44 * correctly fill in the high bits of exit_info_1. 45 */ 46 vmcb->control.exit_code = SVM_EXIT_NPF; 47 vmcb->control.exit_code_hi = 0; 48 vmcb->control.exit_info_1 = (1ULL << 32); 49 vmcb->control.exit_info_2 = fault->address; 50 } 51 52 vmcb->control.exit_info_1 &= ~0xffffffffULL; 53 vmcb->control.exit_info_1 |= fault->error_code; 54 55 nested_svm_vmexit(svm); 56 } 57 58 static bool nested_svm_handle_page_fault_workaround(struct kvm_vcpu *vcpu, 59 struct x86_exception *fault) 60 { 61 struct vcpu_svm *svm = to_svm(vcpu); 62 struct vmcb *vmcb = svm->vmcb; 63 64 WARN_ON(!is_guest_mode(vcpu)); 65 66 if (vmcb12_is_intercept(&svm->nested.ctl, 67 INTERCEPT_EXCEPTION_OFFSET + PF_VECTOR) && 68 !WARN_ON_ONCE(svm->nested.nested_run_pending)) { 69 vmcb->control.exit_code = SVM_EXIT_EXCP_BASE + PF_VECTOR; 70 vmcb->control.exit_code_hi = 0; 71 vmcb->control.exit_info_1 = fault->error_code; 72 vmcb->control.exit_info_2 = fault->address; 73 nested_svm_vmexit(svm); 74 return true; 75 } 76 77 return false; 78 } 79 80 static u64 nested_svm_get_tdp_pdptr(struct kvm_vcpu *vcpu, int index) 81 { 82 struct vcpu_svm *svm = to_svm(vcpu); 83 u64 cr3 = svm->nested.ctl.nested_cr3; 84 u64 pdpte; 85 int ret; 86 87 ret = kvm_vcpu_read_guest_page(vcpu, gpa_to_gfn(cr3), &pdpte, 88 offset_in_page(cr3) + index * 8, 8); 89 if (ret) 90 return 0; 91 return pdpte; 92 } 93 94 static unsigned long nested_svm_get_tdp_cr3(struct kvm_vcpu *vcpu) 95 { 96 struct vcpu_svm *svm = to_svm(vcpu); 97 98 return svm->nested.ctl.nested_cr3; 99 } 100 101 static void nested_svm_init_mmu_context(struct kvm_vcpu *vcpu) 102 { 103 struct vcpu_svm *svm = to_svm(vcpu); 104 105 WARN_ON(mmu_is_nested(vcpu)); 106 107 vcpu->arch.mmu = &vcpu->arch.guest_mmu; 108 109 /* 110 * The NPT format depends on L1's CR4 and EFER, which is in vmcb01. Note, 111 * when called via KVM_SET_NESTED_STATE, that state may _not_ match current 112 * vCPU state. CR0.WP is explicitly ignored, while CR0.PG is required. 113 */ 114 kvm_init_shadow_npt_mmu(vcpu, X86_CR0_PG, svm->vmcb01.ptr->save.cr4, 115 svm->vmcb01.ptr->save.efer, 116 svm->nested.ctl.nested_cr3); 117 vcpu->arch.mmu->get_guest_pgd = nested_svm_get_tdp_cr3; 118 vcpu->arch.mmu->get_pdptr = nested_svm_get_tdp_pdptr; 119 vcpu->arch.mmu->inject_page_fault = nested_svm_inject_npf_exit; 120 vcpu->arch.walk_mmu = &vcpu->arch.nested_mmu; 121 } 122 123 static void nested_svm_uninit_mmu_context(struct kvm_vcpu *vcpu) 124 { 125 vcpu->arch.mmu = &vcpu->arch.root_mmu; 126 vcpu->arch.walk_mmu = &vcpu->arch.root_mmu; 127 } 128 129 static bool nested_vmcb_needs_vls_intercept(struct vcpu_svm *svm) 130 { 131 if (!svm->v_vmload_vmsave_enabled) 132 return true; 133 134 if (!nested_npt_enabled(svm)) 135 return true; 136 137 if (!(svm->nested.ctl.virt_ext & VIRTUAL_VMLOAD_VMSAVE_ENABLE_MASK)) 138 return true; 139 140 return false; 141 } 142 143 void recalc_intercepts(struct vcpu_svm *svm) 144 { 145 struct vmcb_control_area *c, *h; 146 struct vmcb_ctrl_area_cached *g; 147 unsigned int i; 148 149 vmcb_mark_dirty(svm->vmcb, VMCB_INTERCEPTS); 150 151 if (!is_guest_mode(&svm->vcpu)) 152 return; 153 154 c = &svm->vmcb->control; 155 h = &svm->vmcb01.ptr->control; 156 g = &svm->nested.ctl; 157 158 for (i = 0; i < MAX_INTERCEPT; i++) 159 c->intercepts[i] = h->intercepts[i]; 160 161 if (g->int_ctl & V_INTR_MASKING_MASK) { 162 /* We only want the cr8 intercept bits of L1 */ 163 vmcb_clr_intercept(c, INTERCEPT_CR8_READ); 164 vmcb_clr_intercept(c, INTERCEPT_CR8_WRITE); 165 166 /* 167 * Once running L2 with HF_VINTR_MASK, EFLAGS.IF does not 168 * affect any interrupt we may want to inject; therefore, 169 * interrupt window vmexits are irrelevant to L0. 170 */ 171 vmcb_clr_intercept(c, INTERCEPT_VINTR); 172 } 173 174 /* We don't want to see VMMCALLs from a nested guest */ 175 vmcb_clr_intercept(c, INTERCEPT_VMMCALL); 176 177 for (i = 0; i < MAX_INTERCEPT; i++) 178 c->intercepts[i] |= g->intercepts[i]; 179 180 /* If SMI is not intercepted, ignore guest SMI intercept as well */ 181 if (!intercept_smi) 182 vmcb_clr_intercept(c, INTERCEPT_SMI); 183 184 if (nested_vmcb_needs_vls_intercept(svm)) { 185 /* 186 * If the virtual VMLOAD/VMSAVE is not enabled for the L2, 187 * we must intercept these instructions to correctly 188 * emulate them in case L1 doesn't intercept them. 189 */ 190 vmcb_set_intercept(c, INTERCEPT_VMLOAD); 191 vmcb_set_intercept(c, INTERCEPT_VMSAVE); 192 } else { 193 WARN_ON(!(c->virt_ext & VIRTUAL_VMLOAD_VMSAVE_ENABLE_MASK)); 194 } 195 } 196 197 /* 198 * Merge L0's (KVM) and L1's (Nested VMCB) MSR permission bitmaps. The function 199 * is optimized in that it only merges the parts where KVM MSR permission bitmap 200 * may contain zero bits. 201 */ 202 static bool nested_svm_vmrun_msrpm(struct vcpu_svm *svm) 203 { 204 struct hv_enlightenments *hve = 205 (struct hv_enlightenments *)svm->nested.ctl.reserved_sw; 206 int i; 207 208 /* 209 * MSR bitmap update can be skipped when: 210 * - MSR bitmap for L1 hasn't changed. 211 * - Nested hypervisor (L1) is attempting to launch the same L2 as 212 * before. 213 * - Nested hypervisor (L1) is using Hyper-V emulation interface and 214 * tells KVM (L0) there were no changes in MSR bitmap for L2. 215 */ 216 if (!svm->nested.force_msr_bitmap_recalc && 217 kvm_hv_hypercall_enabled(&svm->vcpu) && 218 hve->hv_enlightenments_control.msr_bitmap && 219 (svm->nested.ctl.clean & BIT(VMCB_HV_NESTED_ENLIGHTENMENTS))) 220 goto set_msrpm_base_pa; 221 222 if (!(vmcb12_is_intercept(&svm->nested.ctl, INTERCEPT_MSR_PROT))) 223 return true; 224 225 for (i = 0; i < MSRPM_OFFSETS; i++) { 226 u32 value, p; 227 u64 offset; 228 229 if (msrpm_offsets[i] == 0xffffffff) 230 break; 231 232 p = msrpm_offsets[i]; 233 offset = svm->nested.ctl.msrpm_base_pa + (p * 4); 234 235 if (kvm_vcpu_read_guest(&svm->vcpu, offset, &value, 4)) 236 return false; 237 238 svm->nested.msrpm[p] = svm->msrpm[p] | value; 239 } 240 241 svm->nested.force_msr_bitmap_recalc = false; 242 243 set_msrpm_base_pa: 244 svm->vmcb->control.msrpm_base_pa = __sme_set(__pa(svm->nested.msrpm)); 245 246 return true; 247 } 248 249 /* 250 * Bits 11:0 of bitmap address are ignored by hardware 251 */ 252 static bool nested_svm_check_bitmap_pa(struct kvm_vcpu *vcpu, u64 pa, u32 size) 253 { 254 u64 addr = PAGE_ALIGN(pa); 255 256 return kvm_vcpu_is_legal_gpa(vcpu, addr) && 257 kvm_vcpu_is_legal_gpa(vcpu, addr + size - 1); 258 } 259 260 static bool nested_svm_check_tlb_ctl(struct kvm_vcpu *vcpu, u8 tlb_ctl) 261 { 262 /* Nested FLUSHBYASID is not supported yet. */ 263 switch(tlb_ctl) { 264 case TLB_CONTROL_DO_NOTHING: 265 case TLB_CONTROL_FLUSH_ALL_ASID: 266 return true; 267 default: 268 return false; 269 } 270 } 271 272 static bool __nested_vmcb_check_controls(struct kvm_vcpu *vcpu, 273 struct vmcb_ctrl_area_cached *control) 274 { 275 if (CC(!vmcb12_is_intercept(control, INTERCEPT_VMRUN))) 276 return false; 277 278 if (CC(control->asid == 0)) 279 return false; 280 281 if (CC((control->nested_ctl & SVM_NESTED_CTL_NP_ENABLE) && !npt_enabled)) 282 return false; 283 284 if (CC(!nested_svm_check_bitmap_pa(vcpu, control->msrpm_base_pa, 285 MSRPM_SIZE))) 286 return false; 287 if (CC(!nested_svm_check_bitmap_pa(vcpu, control->iopm_base_pa, 288 IOPM_SIZE))) 289 return false; 290 291 if (CC(!nested_svm_check_tlb_ctl(vcpu, control->tlb_ctl))) 292 return false; 293 294 return true; 295 } 296 297 /* Common checks that apply to both L1 and L2 state. */ 298 static bool __nested_vmcb_check_save(struct kvm_vcpu *vcpu, 299 struct vmcb_save_area_cached *save) 300 { 301 if (CC(!(save->efer & EFER_SVME))) 302 return false; 303 304 if (CC((save->cr0 & X86_CR0_CD) == 0 && (save->cr0 & X86_CR0_NW)) || 305 CC(save->cr0 & ~0xffffffffULL)) 306 return false; 307 308 if (CC(!kvm_dr6_valid(save->dr6)) || CC(!kvm_dr7_valid(save->dr7))) 309 return false; 310 311 /* 312 * These checks are also performed by KVM_SET_SREGS, 313 * except that EFER.LMA is not checked by SVM against 314 * CR0.PG && EFER.LME. 315 */ 316 if ((save->efer & EFER_LME) && (save->cr0 & X86_CR0_PG)) { 317 if (CC(!(save->cr4 & X86_CR4_PAE)) || 318 CC(!(save->cr0 & X86_CR0_PE)) || 319 CC(kvm_vcpu_is_illegal_gpa(vcpu, save->cr3))) 320 return false; 321 } 322 323 if (CC(!kvm_is_valid_cr4(vcpu, save->cr4))) 324 return false; 325 326 if (CC(!kvm_valid_efer(vcpu, save->efer))) 327 return false; 328 329 return true; 330 } 331 332 static bool nested_vmcb_check_save(struct kvm_vcpu *vcpu) 333 { 334 struct vcpu_svm *svm = to_svm(vcpu); 335 struct vmcb_save_area_cached *save = &svm->nested.save; 336 337 return __nested_vmcb_check_save(vcpu, save); 338 } 339 340 static bool nested_vmcb_check_controls(struct kvm_vcpu *vcpu) 341 { 342 struct vcpu_svm *svm = to_svm(vcpu); 343 struct vmcb_ctrl_area_cached *ctl = &svm->nested.ctl; 344 345 return __nested_vmcb_check_controls(vcpu, ctl); 346 } 347 348 static 349 void __nested_copy_vmcb_control_to_cache(struct kvm_vcpu *vcpu, 350 struct vmcb_ctrl_area_cached *to, 351 struct vmcb_control_area *from) 352 { 353 unsigned int i; 354 355 for (i = 0; i < MAX_INTERCEPT; i++) 356 to->intercepts[i] = from->intercepts[i]; 357 358 to->iopm_base_pa = from->iopm_base_pa; 359 to->msrpm_base_pa = from->msrpm_base_pa; 360 to->tsc_offset = from->tsc_offset; 361 to->tlb_ctl = from->tlb_ctl; 362 to->int_ctl = from->int_ctl; 363 to->int_vector = from->int_vector; 364 to->int_state = from->int_state; 365 to->exit_code = from->exit_code; 366 to->exit_code_hi = from->exit_code_hi; 367 to->exit_info_1 = from->exit_info_1; 368 to->exit_info_2 = from->exit_info_2; 369 to->exit_int_info = from->exit_int_info; 370 to->exit_int_info_err = from->exit_int_info_err; 371 to->nested_ctl = from->nested_ctl; 372 to->event_inj = from->event_inj; 373 to->event_inj_err = from->event_inj_err; 374 to->nested_cr3 = from->nested_cr3; 375 to->virt_ext = from->virt_ext; 376 to->pause_filter_count = from->pause_filter_count; 377 to->pause_filter_thresh = from->pause_filter_thresh; 378 379 /* Copy asid here because nested_vmcb_check_controls will check it. */ 380 to->asid = from->asid; 381 to->msrpm_base_pa &= ~0x0fffULL; 382 to->iopm_base_pa &= ~0x0fffULL; 383 384 /* Hyper-V extensions (Enlightened VMCB) */ 385 if (kvm_hv_hypercall_enabled(vcpu)) { 386 to->clean = from->clean; 387 memcpy(to->reserved_sw, from->reserved_sw, 388 sizeof(struct hv_enlightenments)); 389 } 390 } 391 392 void nested_copy_vmcb_control_to_cache(struct vcpu_svm *svm, 393 struct vmcb_control_area *control) 394 { 395 __nested_copy_vmcb_control_to_cache(&svm->vcpu, &svm->nested.ctl, control); 396 } 397 398 static void __nested_copy_vmcb_save_to_cache(struct vmcb_save_area_cached *to, 399 struct vmcb_save_area *from) 400 { 401 /* 402 * Copy only fields that are validated, as we need them 403 * to avoid TOC/TOU races. 404 */ 405 to->efer = from->efer; 406 to->cr0 = from->cr0; 407 to->cr3 = from->cr3; 408 to->cr4 = from->cr4; 409 410 to->dr6 = from->dr6; 411 to->dr7 = from->dr7; 412 } 413 414 void nested_copy_vmcb_save_to_cache(struct vcpu_svm *svm, 415 struct vmcb_save_area *save) 416 { 417 __nested_copy_vmcb_save_to_cache(&svm->nested.save, save); 418 } 419 420 /* 421 * Synchronize fields that are written by the processor, so that 422 * they can be copied back into the vmcb12. 423 */ 424 void nested_sync_control_from_vmcb02(struct vcpu_svm *svm) 425 { 426 u32 mask; 427 svm->nested.ctl.event_inj = svm->vmcb->control.event_inj; 428 svm->nested.ctl.event_inj_err = svm->vmcb->control.event_inj_err; 429 430 /* Only a few fields of int_ctl are written by the processor. */ 431 mask = V_IRQ_MASK | V_TPR_MASK; 432 if (!(svm->nested.ctl.int_ctl & V_INTR_MASKING_MASK) && 433 svm_is_intercept(svm, INTERCEPT_VINTR)) { 434 /* 435 * In order to request an interrupt window, L0 is usurping 436 * svm->vmcb->control.int_ctl and possibly setting V_IRQ 437 * even if it was clear in L1's VMCB. Restoring it would be 438 * wrong. However, in this case V_IRQ will remain true until 439 * interrupt_window_interception calls svm_clear_vintr and 440 * restores int_ctl. We can just leave it aside. 441 */ 442 mask &= ~V_IRQ_MASK; 443 } 444 445 if (nested_vgif_enabled(svm)) 446 mask |= V_GIF_MASK; 447 448 svm->nested.ctl.int_ctl &= ~mask; 449 svm->nested.ctl.int_ctl |= svm->vmcb->control.int_ctl & mask; 450 } 451 452 /* 453 * Transfer any event that L0 or L1 wanted to inject into L2 to 454 * EXIT_INT_INFO. 455 */ 456 static void nested_save_pending_event_to_vmcb12(struct vcpu_svm *svm, 457 struct vmcb *vmcb12) 458 { 459 struct kvm_vcpu *vcpu = &svm->vcpu; 460 u32 exit_int_info = 0; 461 unsigned int nr; 462 463 if (vcpu->arch.exception.injected) { 464 nr = vcpu->arch.exception.nr; 465 exit_int_info = nr | SVM_EVTINJ_VALID | SVM_EVTINJ_TYPE_EXEPT; 466 467 if (vcpu->arch.exception.has_error_code) { 468 exit_int_info |= SVM_EVTINJ_VALID_ERR; 469 vmcb12->control.exit_int_info_err = 470 vcpu->arch.exception.error_code; 471 } 472 473 } else if (vcpu->arch.nmi_injected) { 474 exit_int_info = SVM_EVTINJ_VALID | SVM_EVTINJ_TYPE_NMI; 475 476 } else if (vcpu->arch.interrupt.injected) { 477 nr = vcpu->arch.interrupt.nr; 478 exit_int_info = nr | SVM_EVTINJ_VALID; 479 480 if (vcpu->arch.interrupt.soft) 481 exit_int_info |= SVM_EVTINJ_TYPE_SOFT; 482 else 483 exit_int_info |= SVM_EVTINJ_TYPE_INTR; 484 } 485 486 vmcb12->control.exit_int_info = exit_int_info; 487 } 488 489 static void nested_svm_transition_tlb_flush(struct kvm_vcpu *vcpu) 490 { 491 /* 492 * TODO: optimize unconditional TLB flush/MMU sync. A partial list of 493 * things to fix before this can be conditional: 494 * 495 * - Flush TLBs for both L1 and L2 remote TLB flush 496 * - Honor L1's request to flush an ASID on nested VMRUN 497 * - Sync nested NPT MMU on VMRUN that flushes L2's ASID[*] 498 * - Don't crush a pending TLB flush in vmcb02 on nested VMRUN 499 * - Flush L1's ASID on KVM_REQ_TLB_FLUSH_GUEST 500 * 501 * [*] Unlike nested EPT, SVM's ASID management can invalidate nested 502 * NPT guest-physical mappings on VMRUN. 503 */ 504 kvm_make_request(KVM_REQ_MMU_SYNC, vcpu); 505 kvm_make_request(KVM_REQ_TLB_FLUSH_CURRENT, vcpu); 506 } 507 508 /* 509 * Load guest's/host's cr3 on nested vmentry or vmexit. @nested_npt is true 510 * if we are emulating VM-Entry into a guest with NPT enabled. 511 */ 512 static int nested_svm_load_cr3(struct kvm_vcpu *vcpu, unsigned long cr3, 513 bool nested_npt, bool reload_pdptrs) 514 { 515 if (CC(kvm_vcpu_is_illegal_gpa(vcpu, cr3))) 516 return -EINVAL; 517 518 if (reload_pdptrs && !nested_npt && is_pae_paging(vcpu) && 519 CC(!load_pdptrs(vcpu, cr3))) 520 return -EINVAL; 521 522 vcpu->arch.cr3 = cr3; 523 524 /* Re-initialize the MMU, e.g. to pick up CR4 MMU role changes. */ 525 kvm_init_mmu(vcpu); 526 527 if (!nested_npt) 528 kvm_mmu_new_pgd(vcpu, cr3); 529 530 return 0; 531 } 532 533 void nested_vmcb02_compute_g_pat(struct vcpu_svm *svm) 534 { 535 if (!svm->nested.vmcb02.ptr) 536 return; 537 538 /* FIXME: merge g_pat from vmcb01 and vmcb12. */ 539 svm->nested.vmcb02.ptr->save.g_pat = svm->vmcb01.ptr->save.g_pat; 540 } 541 542 static void nested_vmcb02_prepare_save(struct vcpu_svm *svm, struct vmcb *vmcb12) 543 { 544 bool new_vmcb12 = false; 545 struct vmcb *vmcb01 = svm->vmcb01.ptr; 546 struct vmcb *vmcb02 = svm->nested.vmcb02.ptr; 547 548 nested_vmcb02_compute_g_pat(svm); 549 550 /* Load the nested guest state */ 551 if (svm->nested.vmcb12_gpa != svm->nested.last_vmcb12_gpa) { 552 new_vmcb12 = true; 553 svm->nested.last_vmcb12_gpa = svm->nested.vmcb12_gpa; 554 svm->nested.force_msr_bitmap_recalc = true; 555 } 556 557 if (unlikely(new_vmcb12 || vmcb_is_dirty(vmcb12, VMCB_SEG))) { 558 vmcb02->save.es = vmcb12->save.es; 559 vmcb02->save.cs = vmcb12->save.cs; 560 vmcb02->save.ss = vmcb12->save.ss; 561 vmcb02->save.ds = vmcb12->save.ds; 562 vmcb02->save.cpl = vmcb12->save.cpl; 563 vmcb_mark_dirty(vmcb02, VMCB_SEG); 564 } 565 566 if (unlikely(new_vmcb12 || vmcb_is_dirty(vmcb12, VMCB_DT))) { 567 vmcb02->save.gdtr = vmcb12->save.gdtr; 568 vmcb02->save.idtr = vmcb12->save.idtr; 569 vmcb_mark_dirty(vmcb02, VMCB_DT); 570 } 571 572 kvm_set_rflags(&svm->vcpu, vmcb12->save.rflags | X86_EFLAGS_FIXED); 573 574 svm_set_efer(&svm->vcpu, svm->nested.save.efer); 575 576 svm_set_cr0(&svm->vcpu, svm->nested.save.cr0); 577 svm_set_cr4(&svm->vcpu, svm->nested.save.cr4); 578 579 svm->vcpu.arch.cr2 = vmcb12->save.cr2; 580 581 kvm_rax_write(&svm->vcpu, vmcb12->save.rax); 582 kvm_rsp_write(&svm->vcpu, vmcb12->save.rsp); 583 kvm_rip_write(&svm->vcpu, vmcb12->save.rip); 584 585 /* In case we don't even reach vcpu_run, the fields are not updated */ 586 vmcb02->save.rax = vmcb12->save.rax; 587 vmcb02->save.rsp = vmcb12->save.rsp; 588 vmcb02->save.rip = vmcb12->save.rip; 589 590 /* These bits will be set properly on the first execution when new_vmc12 is true */ 591 if (unlikely(new_vmcb12 || vmcb_is_dirty(vmcb12, VMCB_DR))) { 592 vmcb02->save.dr7 = svm->nested.save.dr7 | DR7_FIXED_1; 593 svm->vcpu.arch.dr6 = svm->nested.save.dr6 | DR6_ACTIVE_LOW; 594 vmcb_mark_dirty(vmcb02, VMCB_DR); 595 } 596 597 if (unlikely(svm->lbrv_enabled && (svm->nested.ctl.virt_ext & LBR_CTL_ENABLE_MASK))) { 598 /* 599 * Reserved bits of DEBUGCTL are ignored. Be consistent with 600 * svm_set_msr's definition of reserved bits. 601 */ 602 svm_copy_lbrs(vmcb02, vmcb12); 603 vmcb02->save.dbgctl &= ~DEBUGCTL_RESERVED_BITS; 604 svm_update_lbrv(&svm->vcpu); 605 606 } else if (unlikely(vmcb01->control.virt_ext & LBR_CTL_ENABLE_MASK)) { 607 svm_copy_lbrs(vmcb02, vmcb01); 608 } 609 } 610 611 static void nested_vmcb02_prepare_control(struct vcpu_svm *svm) 612 { 613 u32 int_ctl_vmcb01_bits = V_INTR_MASKING_MASK; 614 u32 int_ctl_vmcb12_bits = V_TPR_MASK | V_IRQ_INJECTION_BITS_MASK; 615 616 struct kvm_vcpu *vcpu = &svm->vcpu; 617 struct vmcb *vmcb01 = svm->vmcb01.ptr; 618 struct vmcb *vmcb02 = svm->nested.vmcb02.ptr; 619 620 /* 621 * Filled at exit: exit_code, exit_code_hi, exit_info_1, exit_info_2, 622 * exit_int_info, exit_int_info_err, next_rip, insn_len, insn_bytes. 623 */ 624 625 if (svm->vgif_enabled && (svm->nested.ctl.int_ctl & V_GIF_ENABLE_MASK)) 626 int_ctl_vmcb12_bits |= (V_GIF_MASK | V_GIF_ENABLE_MASK); 627 else 628 int_ctl_vmcb01_bits |= (V_GIF_MASK | V_GIF_ENABLE_MASK); 629 630 /* Copied from vmcb01. msrpm_base can be overwritten later. */ 631 vmcb02->control.nested_ctl = vmcb01->control.nested_ctl; 632 vmcb02->control.iopm_base_pa = vmcb01->control.iopm_base_pa; 633 vmcb02->control.msrpm_base_pa = vmcb01->control.msrpm_base_pa; 634 635 /* Done at vmrun: asid. */ 636 637 /* Also overwritten later if necessary. */ 638 vmcb02->control.tlb_ctl = TLB_CONTROL_DO_NOTHING; 639 640 /* nested_cr3. */ 641 if (nested_npt_enabled(svm)) 642 nested_svm_init_mmu_context(vcpu); 643 644 vcpu->arch.tsc_offset = kvm_calc_nested_tsc_offset( 645 vcpu->arch.l1_tsc_offset, 646 svm->nested.ctl.tsc_offset, 647 svm->tsc_ratio_msr); 648 649 vmcb02->control.tsc_offset = vcpu->arch.tsc_offset; 650 651 if (svm->tsc_ratio_msr != kvm_default_tsc_scaling_ratio) { 652 WARN_ON(!svm->tsc_scaling_enabled); 653 nested_svm_update_tsc_ratio_msr(vcpu); 654 } 655 656 vmcb02->control.int_ctl = 657 (svm->nested.ctl.int_ctl & int_ctl_vmcb12_bits) | 658 (vmcb01->control.int_ctl & int_ctl_vmcb01_bits); 659 660 vmcb02->control.int_vector = svm->nested.ctl.int_vector; 661 vmcb02->control.int_state = svm->nested.ctl.int_state; 662 vmcb02->control.event_inj = svm->nested.ctl.event_inj; 663 vmcb02->control.event_inj_err = svm->nested.ctl.event_inj_err; 664 665 vmcb02->control.virt_ext = vmcb01->control.virt_ext & 666 LBR_CTL_ENABLE_MASK; 667 if (svm->lbrv_enabled) 668 vmcb02->control.virt_ext |= 669 (svm->nested.ctl.virt_ext & LBR_CTL_ENABLE_MASK); 670 671 if (!nested_vmcb_needs_vls_intercept(svm)) 672 vmcb02->control.virt_ext |= VIRTUAL_VMLOAD_VMSAVE_ENABLE_MASK; 673 674 if (kvm_pause_in_guest(svm->vcpu.kvm)) { 675 /* use guest values since host doesn't use them */ 676 vmcb02->control.pause_filter_count = 677 svm->pause_filter_enabled ? 678 svm->nested.ctl.pause_filter_count : 0; 679 680 vmcb02->control.pause_filter_thresh = 681 svm->pause_threshold_enabled ? 682 svm->nested.ctl.pause_filter_thresh : 0; 683 684 } else if (!vmcb12_is_intercept(&svm->nested.ctl, INTERCEPT_PAUSE)) { 685 /* use host values when guest doesn't use them */ 686 vmcb02->control.pause_filter_count = vmcb01->control.pause_filter_count; 687 vmcb02->control.pause_filter_thresh = vmcb01->control.pause_filter_thresh; 688 } else { 689 /* 690 * Intercept every PAUSE otherwise and 691 * ignore both host and guest values 692 */ 693 vmcb02->control.pause_filter_count = 0; 694 vmcb02->control.pause_filter_thresh = 0; 695 } 696 697 nested_svm_transition_tlb_flush(vcpu); 698 699 /* Enter Guest-Mode */ 700 enter_guest_mode(vcpu); 701 702 /* 703 * Merge guest and host intercepts - must be called with vcpu in 704 * guest-mode to take effect. 705 */ 706 recalc_intercepts(svm); 707 } 708 709 static void nested_svm_copy_common_state(struct vmcb *from_vmcb, struct vmcb *to_vmcb) 710 { 711 /* 712 * Some VMCB state is shared between L1 and L2 and thus has to be 713 * moved at the time of nested vmrun and vmexit. 714 * 715 * VMLOAD/VMSAVE state would also belong in this category, but KVM 716 * always performs VMLOAD and VMSAVE from the VMCB01. 717 */ 718 to_vmcb->save.spec_ctrl = from_vmcb->save.spec_ctrl; 719 } 720 721 int enter_svm_guest_mode(struct kvm_vcpu *vcpu, u64 vmcb12_gpa, 722 struct vmcb *vmcb12, bool from_vmrun) 723 { 724 struct vcpu_svm *svm = to_svm(vcpu); 725 int ret; 726 727 trace_kvm_nested_vmrun(svm->vmcb->save.rip, vmcb12_gpa, 728 vmcb12->save.rip, 729 vmcb12->control.int_ctl, 730 vmcb12->control.event_inj, 731 vmcb12->control.nested_ctl); 732 733 trace_kvm_nested_intercepts(vmcb12->control.intercepts[INTERCEPT_CR] & 0xffff, 734 vmcb12->control.intercepts[INTERCEPT_CR] >> 16, 735 vmcb12->control.intercepts[INTERCEPT_EXCEPTION], 736 vmcb12->control.intercepts[INTERCEPT_WORD3], 737 vmcb12->control.intercepts[INTERCEPT_WORD4], 738 vmcb12->control.intercepts[INTERCEPT_WORD5]); 739 740 741 svm->nested.vmcb12_gpa = vmcb12_gpa; 742 743 WARN_ON(svm->vmcb == svm->nested.vmcb02.ptr); 744 745 nested_svm_copy_common_state(svm->vmcb01.ptr, svm->nested.vmcb02.ptr); 746 747 svm_switch_vmcb(svm, &svm->nested.vmcb02); 748 nested_vmcb02_prepare_control(svm); 749 nested_vmcb02_prepare_save(svm, vmcb12); 750 751 ret = nested_svm_load_cr3(&svm->vcpu, svm->nested.save.cr3, 752 nested_npt_enabled(svm), from_vmrun); 753 if (ret) 754 return ret; 755 756 if (!from_vmrun) 757 kvm_make_request(KVM_REQ_GET_NESTED_STATE_PAGES, vcpu); 758 759 svm_set_gif(svm, true); 760 761 if (kvm_vcpu_apicv_active(vcpu)) 762 kvm_make_request(KVM_REQ_APICV_UPDATE, vcpu); 763 764 return 0; 765 } 766 767 int nested_svm_vmrun(struct kvm_vcpu *vcpu) 768 { 769 struct vcpu_svm *svm = to_svm(vcpu); 770 int ret; 771 struct vmcb *vmcb12; 772 struct kvm_host_map map; 773 u64 vmcb12_gpa; 774 struct vmcb *vmcb01 = svm->vmcb01.ptr; 775 776 if (!svm->nested.hsave_msr) { 777 kvm_inject_gp(vcpu, 0); 778 return 1; 779 } 780 781 if (is_smm(vcpu)) { 782 kvm_queue_exception(vcpu, UD_VECTOR); 783 return 1; 784 } 785 786 vmcb12_gpa = svm->vmcb->save.rax; 787 ret = kvm_vcpu_map(vcpu, gpa_to_gfn(vmcb12_gpa), &map); 788 if (ret == -EINVAL) { 789 kvm_inject_gp(vcpu, 0); 790 return 1; 791 } else if (ret) { 792 return kvm_skip_emulated_instruction(vcpu); 793 } 794 795 ret = kvm_skip_emulated_instruction(vcpu); 796 797 vmcb12 = map.hva; 798 799 if (WARN_ON_ONCE(!svm->nested.initialized)) 800 return -EINVAL; 801 802 nested_copy_vmcb_control_to_cache(svm, &vmcb12->control); 803 nested_copy_vmcb_save_to_cache(svm, &vmcb12->save); 804 805 if (!nested_vmcb_check_save(vcpu) || 806 !nested_vmcb_check_controls(vcpu)) { 807 vmcb12->control.exit_code = SVM_EXIT_ERR; 808 vmcb12->control.exit_code_hi = 0; 809 vmcb12->control.exit_info_1 = 0; 810 vmcb12->control.exit_info_2 = 0; 811 goto out; 812 } 813 814 /* 815 * Since vmcb01 is not in use, we can use it to store some of the L1 816 * state. 817 */ 818 vmcb01->save.efer = vcpu->arch.efer; 819 vmcb01->save.cr0 = kvm_read_cr0(vcpu); 820 vmcb01->save.cr4 = vcpu->arch.cr4; 821 vmcb01->save.rflags = kvm_get_rflags(vcpu); 822 vmcb01->save.rip = kvm_rip_read(vcpu); 823 824 if (!npt_enabled) 825 vmcb01->save.cr3 = kvm_read_cr3(vcpu); 826 827 svm->nested.nested_run_pending = 1; 828 829 if (enter_svm_guest_mode(vcpu, vmcb12_gpa, vmcb12, true)) 830 goto out_exit_err; 831 832 if (nested_svm_vmrun_msrpm(svm)) 833 goto out; 834 835 out_exit_err: 836 svm->nested.nested_run_pending = 0; 837 838 svm->vmcb->control.exit_code = SVM_EXIT_ERR; 839 svm->vmcb->control.exit_code_hi = 0; 840 svm->vmcb->control.exit_info_1 = 0; 841 svm->vmcb->control.exit_info_2 = 0; 842 843 nested_svm_vmexit(svm); 844 845 out: 846 kvm_vcpu_unmap(vcpu, &map, true); 847 848 return ret; 849 } 850 851 /* Copy state save area fields which are handled by VMRUN */ 852 void svm_copy_vmrun_state(struct vmcb_save_area *to_save, 853 struct vmcb_save_area *from_save) 854 { 855 to_save->es = from_save->es; 856 to_save->cs = from_save->cs; 857 to_save->ss = from_save->ss; 858 to_save->ds = from_save->ds; 859 to_save->gdtr = from_save->gdtr; 860 to_save->idtr = from_save->idtr; 861 to_save->rflags = from_save->rflags | X86_EFLAGS_FIXED; 862 to_save->efer = from_save->efer; 863 to_save->cr0 = from_save->cr0; 864 to_save->cr3 = from_save->cr3; 865 to_save->cr4 = from_save->cr4; 866 to_save->rax = from_save->rax; 867 to_save->rsp = from_save->rsp; 868 to_save->rip = from_save->rip; 869 to_save->cpl = 0; 870 } 871 872 void svm_copy_vmloadsave_state(struct vmcb *to_vmcb, struct vmcb *from_vmcb) 873 { 874 to_vmcb->save.fs = from_vmcb->save.fs; 875 to_vmcb->save.gs = from_vmcb->save.gs; 876 to_vmcb->save.tr = from_vmcb->save.tr; 877 to_vmcb->save.ldtr = from_vmcb->save.ldtr; 878 to_vmcb->save.kernel_gs_base = from_vmcb->save.kernel_gs_base; 879 to_vmcb->save.star = from_vmcb->save.star; 880 to_vmcb->save.lstar = from_vmcb->save.lstar; 881 to_vmcb->save.cstar = from_vmcb->save.cstar; 882 to_vmcb->save.sfmask = from_vmcb->save.sfmask; 883 to_vmcb->save.sysenter_cs = from_vmcb->save.sysenter_cs; 884 to_vmcb->save.sysenter_esp = from_vmcb->save.sysenter_esp; 885 to_vmcb->save.sysenter_eip = from_vmcb->save.sysenter_eip; 886 } 887 888 int nested_svm_vmexit(struct vcpu_svm *svm) 889 { 890 struct kvm_vcpu *vcpu = &svm->vcpu; 891 struct vmcb *vmcb01 = svm->vmcb01.ptr; 892 struct vmcb *vmcb02 = svm->nested.vmcb02.ptr; 893 struct vmcb *vmcb12; 894 struct kvm_host_map map; 895 int rc; 896 897 rc = kvm_vcpu_map(vcpu, gpa_to_gfn(svm->nested.vmcb12_gpa), &map); 898 if (rc) { 899 if (rc == -EINVAL) 900 kvm_inject_gp(vcpu, 0); 901 return 1; 902 } 903 904 vmcb12 = map.hva; 905 906 /* Exit Guest-Mode */ 907 leave_guest_mode(vcpu); 908 svm->nested.vmcb12_gpa = 0; 909 WARN_ON_ONCE(svm->nested.nested_run_pending); 910 911 kvm_clear_request(KVM_REQ_GET_NESTED_STATE_PAGES, vcpu); 912 913 /* in case we halted in L2 */ 914 svm->vcpu.arch.mp_state = KVM_MP_STATE_RUNNABLE; 915 916 /* Give the current vmcb to the guest */ 917 918 vmcb12->save.es = vmcb02->save.es; 919 vmcb12->save.cs = vmcb02->save.cs; 920 vmcb12->save.ss = vmcb02->save.ss; 921 vmcb12->save.ds = vmcb02->save.ds; 922 vmcb12->save.gdtr = vmcb02->save.gdtr; 923 vmcb12->save.idtr = vmcb02->save.idtr; 924 vmcb12->save.efer = svm->vcpu.arch.efer; 925 vmcb12->save.cr0 = kvm_read_cr0(vcpu); 926 vmcb12->save.cr3 = kvm_read_cr3(vcpu); 927 vmcb12->save.cr2 = vmcb02->save.cr2; 928 vmcb12->save.cr4 = svm->vcpu.arch.cr4; 929 vmcb12->save.rflags = kvm_get_rflags(vcpu); 930 vmcb12->save.rip = kvm_rip_read(vcpu); 931 vmcb12->save.rsp = kvm_rsp_read(vcpu); 932 vmcb12->save.rax = kvm_rax_read(vcpu); 933 vmcb12->save.dr7 = vmcb02->save.dr7; 934 vmcb12->save.dr6 = svm->vcpu.arch.dr6; 935 vmcb12->save.cpl = vmcb02->save.cpl; 936 937 vmcb12->control.int_state = vmcb02->control.int_state; 938 vmcb12->control.exit_code = vmcb02->control.exit_code; 939 vmcb12->control.exit_code_hi = vmcb02->control.exit_code_hi; 940 vmcb12->control.exit_info_1 = vmcb02->control.exit_info_1; 941 vmcb12->control.exit_info_2 = vmcb02->control.exit_info_2; 942 943 if (vmcb12->control.exit_code != SVM_EXIT_ERR) 944 nested_save_pending_event_to_vmcb12(svm, vmcb12); 945 946 if (svm->nrips_enabled) 947 vmcb12->control.next_rip = vmcb02->control.next_rip; 948 949 vmcb12->control.int_ctl = svm->nested.ctl.int_ctl; 950 vmcb12->control.tlb_ctl = svm->nested.ctl.tlb_ctl; 951 vmcb12->control.event_inj = svm->nested.ctl.event_inj; 952 vmcb12->control.event_inj_err = svm->nested.ctl.event_inj_err; 953 954 if (!kvm_pause_in_guest(vcpu->kvm) && vmcb02->control.pause_filter_count) 955 vmcb01->control.pause_filter_count = vmcb02->control.pause_filter_count; 956 957 nested_svm_copy_common_state(svm->nested.vmcb02.ptr, svm->vmcb01.ptr); 958 959 svm_switch_vmcb(svm, &svm->vmcb01); 960 961 if (unlikely(svm->lbrv_enabled && (svm->nested.ctl.virt_ext & LBR_CTL_ENABLE_MASK))) { 962 svm_copy_lbrs(vmcb12, vmcb02); 963 svm_update_lbrv(vcpu); 964 } else if (unlikely(vmcb01->control.virt_ext & LBR_CTL_ENABLE_MASK)) { 965 svm_copy_lbrs(vmcb01, vmcb02); 966 svm_update_lbrv(vcpu); 967 } 968 969 /* 970 * On vmexit the GIF is set to false and 971 * no event can be injected in L1. 972 */ 973 svm_set_gif(svm, false); 974 vmcb01->control.exit_int_info = 0; 975 976 svm->vcpu.arch.tsc_offset = svm->vcpu.arch.l1_tsc_offset; 977 if (vmcb01->control.tsc_offset != svm->vcpu.arch.tsc_offset) { 978 vmcb01->control.tsc_offset = svm->vcpu.arch.tsc_offset; 979 vmcb_mark_dirty(vmcb01, VMCB_INTERCEPTS); 980 } 981 982 if (svm->tsc_ratio_msr != kvm_default_tsc_scaling_ratio) { 983 WARN_ON(!svm->tsc_scaling_enabled); 984 vcpu->arch.tsc_scaling_ratio = vcpu->arch.l1_tsc_scaling_ratio; 985 svm_write_tsc_multiplier(vcpu, vcpu->arch.tsc_scaling_ratio); 986 } 987 988 svm->nested.ctl.nested_cr3 = 0; 989 990 /* 991 * Restore processor state that had been saved in vmcb01 992 */ 993 kvm_set_rflags(vcpu, vmcb01->save.rflags); 994 svm_set_efer(vcpu, vmcb01->save.efer); 995 svm_set_cr0(vcpu, vmcb01->save.cr0 | X86_CR0_PE); 996 svm_set_cr4(vcpu, vmcb01->save.cr4); 997 kvm_rax_write(vcpu, vmcb01->save.rax); 998 kvm_rsp_write(vcpu, vmcb01->save.rsp); 999 kvm_rip_write(vcpu, vmcb01->save.rip); 1000 1001 svm->vcpu.arch.dr7 = DR7_FIXED_1; 1002 kvm_update_dr7(&svm->vcpu); 1003 1004 trace_kvm_nested_vmexit_inject(vmcb12->control.exit_code, 1005 vmcb12->control.exit_info_1, 1006 vmcb12->control.exit_info_2, 1007 vmcb12->control.exit_int_info, 1008 vmcb12->control.exit_int_info_err, 1009 KVM_ISA_SVM); 1010 1011 kvm_vcpu_unmap(vcpu, &map, true); 1012 1013 nested_svm_transition_tlb_flush(vcpu); 1014 1015 nested_svm_uninit_mmu_context(vcpu); 1016 1017 rc = nested_svm_load_cr3(vcpu, vmcb01->save.cr3, false, true); 1018 if (rc) 1019 return 1; 1020 1021 /* 1022 * Drop what we picked up for L2 via svm_complete_interrupts() so it 1023 * doesn't end up in L1. 1024 */ 1025 svm->vcpu.arch.nmi_injected = false; 1026 kvm_clear_exception_queue(vcpu); 1027 kvm_clear_interrupt_queue(vcpu); 1028 1029 /* 1030 * If we are here following the completion of a VMRUN that 1031 * is being single-stepped, queue the pending #DB intercept 1032 * right now so that it an be accounted for before we execute 1033 * L1's next instruction. 1034 */ 1035 if (unlikely(vmcb01->save.rflags & X86_EFLAGS_TF)) 1036 kvm_queue_exception(&(svm->vcpu), DB_VECTOR); 1037 1038 /* 1039 * Un-inhibit the AVIC right away, so that other vCPUs can start 1040 * to benefit from it right away. 1041 */ 1042 if (kvm_apicv_activated(vcpu->kvm)) 1043 kvm_vcpu_update_apicv(vcpu); 1044 1045 return 0; 1046 } 1047 1048 static void nested_svm_triple_fault(struct kvm_vcpu *vcpu) 1049 { 1050 nested_svm_simple_vmexit(to_svm(vcpu), SVM_EXIT_SHUTDOWN); 1051 } 1052 1053 int svm_allocate_nested(struct vcpu_svm *svm) 1054 { 1055 struct page *vmcb02_page; 1056 1057 if (svm->nested.initialized) 1058 return 0; 1059 1060 vmcb02_page = alloc_page(GFP_KERNEL_ACCOUNT | __GFP_ZERO); 1061 if (!vmcb02_page) 1062 return -ENOMEM; 1063 svm->nested.vmcb02.ptr = page_address(vmcb02_page); 1064 svm->nested.vmcb02.pa = __sme_set(page_to_pfn(vmcb02_page) << PAGE_SHIFT); 1065 1066 svm->nested.msrpm = svm_vcpu_alloc_msrpm(); 1067 if (!svm->nested.msrpm) 1068 goto err_free_vmcb02; 1069 svm_vcpu_init_msrpm(&svm->vcpu, svm->nested.msrpm); 1070 1071 svm->nested.initialized = true; 1072 return 0; 1073 1074 err_free_vmcb02: 1075 __free_page(vmcb02_page); 1076 return -ENOMEM; 1077 } 1078 1079 void svm_free_nested(struct vcpu_svm *svm) 1080 { 1081 if (!svm->nested.initialized) 1082 return; 1083 1084 svm_vcpu_free_msrpm(svm->nested.msrpm); 1085 svm->nested.msrpm = NULL; 1086 1087 __free_page(virt_to_page(svm->nested.vmcb02.ptr)); 1088 svm->nested.vmcb02.ptr = NULL; 1089 1090 /* 1091 * When last_vmcb12_gpa matches the current vmcb12 gpa, 1092 * some vmcb12 fields are not loaded if they are marked clean 1093 * in the vmcb12, since in this case they are up to date already. 1094 * 1095 * When the vmcb02 is freed, this optimization becomes invalid. 1096 */ 1097 svm->nested.last_vmcb12_gpa = INVALID_GPA; 1098 1099 svm->nested.initialized = false; 1100 } 1101 1102 /* 1103 * Forcibly leave nested mode in order to be able to reset the VCPU later on. 1104 */ 1105 void svm_leave_nested(struct kvm_vcpu *vcpu) 1106 { 1107 struct vcpu_svm *svm = to_svm(vcpu); 1108 1109 if (is_guest_mode(vcpu)) { 1110 svm->nested.nested_run_pending = 0; 1111 svm->nested.vmcb12_gpa = INVALID_GPA; 1112 1113 leave_guest_mode(vcpu); 1114 1115 svm_switch_vmcb(svm, &svm->vmcb01); 1116 1117 nested_svm_uninit_mmu_context(vcpu); 1118 vmcb_mark_all_dirty(svm->vmcb); 1119 } 1120 1121 kvm_clear_request(KVM_REQ_GET_NESTED_STATE_PAGES, vcpu); 1122 } 1123 1124 static int nested_svm_exit_handled_msr(struct vcpu_svm *svm) 1125 { 1126 u32 offset, msr, value; 1127 int write, mask; 1128 1129 if (!(vmcb12_is_intercept(&svm->nested.ctl, INTERCEPT_MSR_PROT))) 1130 return NESTED_EXIT_HOST; 1131 1132 msr = svm->vcpu.arch.regs[VCPU_REGS_RCX]; 1133 offset = svm_msrpm_offset(msr); 1134 write = svm->vmcb->control.exit_info_1 & 1; 1135 mask = 1 << ((2 * (msr & 0xf)) + write); 1136 1137 if (offset == MSR_INVALID) 1138 return NESTED_EXIT_DONE; 1139 1140 /* Offset is in 32 bit units but need in 8 bit units */ 1141 offset *= 4; 1142 1143 if (kvm_vcpu_read_guest(&svm->vcpu, svm->nested.ctl.msrpm_base_pa + offset, &value, 4)) 1144 return NESTED_EXIT_DONE; 1145 1146 return (value & mask) ? NESTED_EXIT_DONE : NESTED_EXIT_HOST; 1147 } 1148 1149 static int nested_svm_intercept_ioio(struct vcpu_svm *svm) 1150 { 1151 unsigned port, size, iopm_len; 1152 u16 val, mask; 1153 u8 start_bit; 1154 u64 gpa; 1155 1156 if (!(vmcb12_is_intercept(&svm->nested.ctl, INTERCEPT_IOIO_PROT))) 1157 return NESTED_EXIT_HOST; 1158 1159 port = svm->vmcb->control.exit_info_1 >> 16; 1160 size = (svm->vmcb->control.exit_info_1 & SVM_IOIO_SIZE_MASK) >> 1161 SVM_IOIO_SIZE_SHIFT; 1162 gpa = svm->nested.ctl.iopm_base_pa + (port / 8); 1163 start_bit = port % 8; 1164 iopm_len = (start_bit + size > 8) ? 2 : 1; 1165 mask = (0xf >> (4 - size)) << start_bit; 1166 val = 0; 1167 1168 if (kvm_vcpu_read_guest(&svm->vcpu, gpa, &val, iopm_len)) 1169 return NESTED_EXIT_DONE; 1170 1171 return (val & mask) ? NESTED_EXIT_DONE : NESTED_EXIT_HOST; 1172 } 1173 1174 static int nested_svm_intercept(struct vcpu_svm *svm) 1175 { 1176 u32 exit_code = svm->vmcb->control.exit_code; 1177 int vmexit = NESTED_EXIT_HOST; 1178 1179 switch (exit_code) { 1180 case SVM_EXIT_MSR: 1181 vmexit = nested_svm_exit_handled_msr(svm); 1182 break; 1183 case SVM_EXIT_IOIO: 1184 vmexit = nested_svm_intercept_ioio(svm); 1185 break; 1186 case SVM_EXIT_READ_CR0 ... SVM_EXIT_WRITE_CR8: { 1187 if (vmcb12_is_intercept(&svm->nested.ctl, exit_code)) 1188 vmexit = NESTED_EXIT_DONE; 1189 break; 1190 } 1191 case SVM_EXIT_READ_DR0 ... SVM_EXIT_WRITE_DR7: { 1192 if (vmcb12_is_intercept(&svm->nested.ctl, exit_code)) 1193 vmexit = NESTED_EXIT_DONE; 1194 break; 1195 } 1196 case SVM_EXIT_EXCP_BASE ... SVM_EXIT_EXCP_BASE + 0x1f: { 1197 /* 1198 * Host-intercepted exceptions have been checked already in 1199 * nested_svm_exit_special. There is nothing to do here, 1200 * the vmexit is injected by svm_check_nested_events. 1201 */ 1202 vmexit = NESTED_EXIT_DONE; 1203 break; 1204 } 1205 case SVM_EXIT_ERR: { 1206 vmexit = NESTED_EXIT_DONE; 1207 break; 1208 } 1209 default: { 1210 if (vmcb12_is_intercept(&svm->nested.ctl, exit_code)) 1211 vmexit = NESTED_EXIT_DONE; 1212 } 1213 } 1214 1215 return vmexit; 1216 } 1217 1218 int nested_svm_exit_handled(struct vcpu_svm *svm) 1219 { 1220 int vmexit; 1221 1222 vmexit = nested_svm_intercept(svm); 1223 1224 if (vmexit == NESTED_EXIT_DONE) 1225 nested_svm_vmexit(svm); 1226 1227 return vmexit; 1228 } 1229 1230 int nested_svm_check_permissions(struct kvm_vcpu *vcpu) 1231 { 1232 if (!(vcpu->arch.efer & EFER_SVME) || !is_paging(vcpu)) { 1233 kvm_queue_exception(vcpu, UD_VECTOR); 1234 return 1; 1235 } 1236 1237 if (to_svm(vcpu)->vmcb->save.cpl) { 1238 kvm_inject_gp(vcpu, 0); 1239 return 1; 1240 } 1241 1242 return 0; 1243 } 1244 1245 static bool nested_exit_on_exception(struct vcpu_svm *svm) 1246 { 1247 unsigned int nr = svm->vcpu.arch.exception.nr; 1248 1249 return (svm->nested.ctl.intercepts[INTERCEPT_EXCEPTION] & BIT(nr)); 1250 } 1251 1252 static void nested_svm_inject_exception_vmexit(struct vcpu_svm *svm) 1253 { 1254 unsigned int nr = svm->vcpu.arch.exception.nr; 1255 struct vmcb *vmcb = svm->vmcb; 1256 1257 vmcb->control.exit_code = SVM_EXIT_EXCP_BASE + nr; 1258 vmcb->control.exit_code_hi = 0; 1259 1260 if (svm->vcpu.arch.exception.has_error_code) 1261 vmcb->control.exit_info_1 = svm->vcpu.arch.exception.error_code; 1262 1263 /* 1264 * EXITINFO2 is undefined for all exception intercepts other 1265 * than #PF. 1266 */ 1267 if (nr == PF_VECTOR) { 1268 if (svm->vcpu.arch.exception.nested_apf) 1269 vmcb->control.exit_info_2 = svm->vcpu.arch.apf.nested_apf_token; 1270 else if (svm->vcpu.arch.exception.has_payload) 1271 vmcb->control.exit_info_2 = svm->vcpu.arch.exception.payload; 1272 else 1273 vmcb->control.exit_info_2 = svm->vcpu.arch.cr2; 1274 } else if (nr == DB_VECTOR) { 1275 /* See inject_pending_event. */ 1276 kvm_deliver_exception_payload(&svm->vcpu); 1277 if (svm->vcpu.arch.dr7 & DR7_GD) { 1278 svm->vcpu.arch.dr7 &= ~DR7_GD; 1279 kvm_update_dr7(&svm->vcpu); 1280 } 1281 } else 1282 WARN_ON(svm->vcpu.arch.exception.has_payload); 1283 1284 nested_svm_vmexit(svm); 1285 } 1286 1287 static inline bool nested_exit_on_init(struct vcpu_svm *svm) 1288 { 1289 return vmcb12_is_intercept(&svm->nested.ctl, INTERCEPT_INIT); 1290 } 1291 1292 static int svm_check_nested_events(struct kvm_vcpu *vcpu) 1293 { 1294 struct vcpu_svm *svm = to_svm(vcpu); 1295 bool block_nested_events = 1296 kvm_event_needs_reinjection(vcpu) || svm->nested.nested_run_pending; 1297 struct kvm_lapic *apic = vcpu->arch.apic; 1298 1299 if (lapic_in_kernel(vcpu) && 1300 test_bit(KVM_APIC_INIT, &apic->pending_events)) { 1301 if (block_nested_events) 1302 return -EBUSY; 1303 if (!nested_exit_on_init(svm)) 1304 return 0; 1305 nested_svm_simple_vmexit(svm, SVM_EXIT_INIT); 1306 return 0; 1307 } 1308 1309 if (vcpu->arch.exception.pending) { 1310 /* 1311 * Only a pending nested run can block a pending exception. 1312 * Otherwise an injected NMI/interrupt should either be 1313 * lost or delivered to the nested hypervisor in the EXITINTINFO 1314 * vmcb field, while delivering the pending exception. 1315 */ 1316 if (svm->nested.nested_run_pending) 1317 return -EBUSY; 1318 if (!nested_exit_on_exception(svm)) 1319 return 0; 1320 nested_svm_inject_exception_vmexit(svm); 1321 return 0; 1322 } 1323 1324 if (vcpu->arch.smi_pending && !svm_smi_blocked(vcpu)) { 1325 if (block_nested_events) 1326 return -EBUSY; 1327 if (!nested_exit_on_smi(svm)) 1328 return 0; 1329 nested_svm_simple_vmexit(svm, SVM_EXIT_SMI); 1330 return 0; 1331 } 1332 1333 if (vcpu->arch.nmi_pending && !svm_nmi_blocked(vcpu)) { 1334 if (block_nested_events) 1335 return -EBUSY; 1336 if (!nested_exit_on_nmi(svm)) 1337 return 0; 1338 nested_svm_simple_vmexit(svm, SVM_EXIT_NMI); 1339 return 0; 1340 } 1341 1342 if (kvm_cpu_has_interrupt(vcpu) && !svm_interrupt_blocked(vcpu)) { 1343 if (block_nested_events) 1344 return -EBUSY; 1345 if (!nested_exit_on_intr(svm)) 1346 return 0; 1347 trace_kvm_nested_intr_vmexit(svm->vmcb->save.rip); 1348 nested_svm_simple_vmexit(svm, SVM_EXIT_INTR); 1349 return 0; 1350 } 1351 1352 return 0; 1353 } 1354 1355 int nested_svm_exit_special(struct vcpu_svm *svm) 1356 { 1357 u32 exit_code = svm->vmcb->control.exit_code; 1358 1359 switch (exit_code) { 1360 case SVM_EXIT_INTR: 1361 case SVM_EXIT_NMI: 1362 case SVM_EXIT_NPF: 1363 return NESTED_EXIT_HOST; 1364 case SVM_EXIT_EXCP_BASE ... SVM_EXIT_EXCP_BASE + 0x1f: { 1365 u32 excp_bits = 1 << (exit_code - SVM_EXIT_EXCP_BASE); 1366 1367 if (svm->vmcb01.ptr->control.intercepts[INTERCEPT_EXCEPTION] & 1368 excp_bits) 1369 return NESTED_EXIT_HOST; 1370 else if (exit_code == SVM_EXIT_EXCP_BASE + PF_VECTOR && 1371 svm->vcpu.arch.apf.host_apf_flags) 1372 /* Trap async PF even if not shadowing */ 1373 return NESTED_EXIT_HOST; 1374 break; 1375 } 1376 default: 1377 break; 1378 } 1379 1380 return NESTED_EXIT_CONTINUE; 1381 } 1382 1383 void nested_svm_update_tsc_ratio_msr(struct kvm_vcpu *vcpu) 1384 { 1385 struct vcpu_svm *svm = to_svm(vcpu); 1386 1387 vcpu->arch.tsc_scaling_ratio = 1388 kvm_calc_nested_tsc_multiplier(vcpu->arch.l1_tsc_scaling_ratio, 1389 svm->tsc_ratio_msr); 1390 svm_write_tsc_multiplier(vcpu, vcpu->arch.tsc_scaling_ratio); 1391 } 1392 1393 /* Inverse operation of nested_copy_vmcb_control_to_cache(). asid is copied too. */ 1394 static void nested_copy_vmcb_cache_to_control(struct vmcb_control_area *dst, 1395 struct vmcb_ctrl_area_cached *from) 1396 { 1397 unsigned int i; 1398 1399 memset(dst, 0, sizeof(struct vmcb_control_area)); 1400 1401 for (i = 0; i < MAX_INTERCEPT; i++) 1402 dst->intercepts[i] = from->intercepts[i]; 1403 1404 dst->iopm_base_pa = from->iopm_base_pa; 1405 dst->msrpm_base_pa = from->msrpm_base_pa; 1406 dst->tsc_offset = from->tsc_offset; 1407 dst->asid = from->asid; 1408 dst->tlb_ctl = from->tlb_ctl; 1409 dst->int_ctl = from->int_ctl; 1410 dst->int_vector = from->int_vector; 1411 dst->int_state = from->int_state; 1412 dst->exit_code = from->exit_code; 1413 dst->exit_code_hi = from->exit_code_hi; 1414 dst->exit_info_1 = from->exit_info_1; 1415 dst->exit_info_2 = from->exit_info_2; 1416 dst->exit_int_info = from->exit_int_info; 1417 dst->exit_int_info_err = from->exit_int_info_err; 1418 dst->nested_ctl = from->nested_ctl; 1419 dst->event_inj = from->event_inj; 1420 dst->event_inj_err = from->event_inj_err; 1421 dst->nested_cr3 = from->nested_cr3; 1422 dst->virt_ext = from->virt_ext; 1423 dst->pause_filter_count = from->pause_filter_count; 1424 dst->pause_filter_thresh = from->pause_filter_thresh; 1425 /* 'clean' and 'reserved_sw' are not changed by KVM */ 1426 } 1427 1428 static int svm_get_nested_state(struct kvm_vcpu *vcpu, 1429 struct kvm_nested_state __user *user_kvm_nested_state, 1430 u32 user_data_size) 1431 { 1432 struct vcpu_svm *svm; 1433 struct vmcb_control_area *ctl; 1434 unsigned long r; 1435 struct kvm_nested_state kvm_state = { 1436 .flags = 0, 1437 .format = KVM_STATE_NESTED_FORMAT_SVM, 1438 .size = sizeof(kvm_state), 1439 }; 1440 struct vmcb __user *user_vmcb = (struct vmcb __user *) 1441 &user_kvm_nested_state->data.svm[0]; 1442 1443 if (!vcpu) 1444 return kvm_state.size + KVM_STATE_NESTED_SVM_VMCB_SIZE; 1445 1446 svm = to_svm(vcpu); 1447 1448 if (user_data_size < kvm_state.size) 1449 goto out; 1450 1451 /* First fill in the header and copy it out. */ 1452 if (is_guest_mode(vcpu)) { 1453 kvm_state.hdr.svm.vmcb_pa = svm->nested.vmcb12_gpa; 1454 kvm_state.size += KVM_STATE_NESTED_SVM_VMCB_SIZE; 1455 kvm_state.flags |= KVM_STATE_NESTED_GUEST_MODE; 1456 1457 if (svm->nested.nested_run_pending) 1458 kvm_state.flags |= KVM_STATE_NESTED_RUN_PENDING; 1459 } 1460 1461 if (gif_set(svm)) 1462 kvm_state.flags |= KVM_STATE_NESTED_GIF_SET; 1463 1464 if (copy_to_user(user_kvm_nested_state, &kvm_state, sizeof(kvm_state))) 1465 return -EFAULT; 1466 1467 if (!is_guest_mode(vcpu)) 1468 goto out; 1469 1470 /* 1471 * Copy over the full size of the VMCB rather than just the size 1472 * of the structs. 1473 */ 1474 if (clear_user(user_vmcb, KVM_STATE_NESTED_SVM_VMCB_SIZE)) 1475 return -EFAULT; 1476 1477 ctl = kzalloc(sizeof(*ctl), GFP_KERNEL); 1478 if (!ctl) 1479 return -ENOMEM; 1480 1481 nested_copy_vmcb_cache_to_control(ctl, &svm->nested.ctl); 1482 r = copy_to_user(&user_vmcb->control, ctl, 1483 sizeof(user_vmcb->control)); 1484 kfree(ctl); 1485 if (r) 1486 return -EFAULT; 1487 1488 if (copy_to_user(&user_vmcb->save, &svm->vmcb01.ptr->save, 1489 sizeof(user_vmcb->save))) 1490 return -EFAULT; 1491 out: 1492 return kvm_state.size; 1493 } 1494 1495 static int svm_set_nested_state(struct kvm_vcpu *vcpu, 1496 struct kvm_nested_state __user *user_kvm_nested_state, 1497 struct kvm_nested_state *kvm_state) 1498 { 1499 struct vcpu_svm *svm = to_svm(vcpu); 1500 struct vmcb __user *user_vmcb = (struct vmcb __user *) 1501 &user_kvm_nested_state->data.svm[0]; 1502 struct vmcb_control_area *ctl; 1503 struct vmcb_save_area *save; 1504 struct vmcb_save_area_cached save_cached; 1505 struct vmcb_ctrl_area_cached ctl_cached; 1506 unsigned long cr0; 1507 int ret; 1508 1509 BUILD_BUG_ON(sizeof(struct vmcb_control_area) + sizeof(struct vmcb_save_area) > 1510 KVM_STATE_NESTED_SVM_VMCB_SIZE); 1511 1512 if (kvm_state->format != KVM_STATE_NESTED_FORMAT_SVM) 1513 return -EINVAL; 1514 1515 if (kvm_state->flags & ~(KVM_STATE_NESTED_GUEST_MODE | 1516 KVM_STATE_NESTED_RUN_PENDING | 1517 KVM_STATE_NESTED_GIF_SET)) 1518 return -EINVAL; 1519 1520 /* 1521 * If in guest mode, vcpu->arch.efer actually refers to the L2 guest's 1522 * EFER.SVME, but EFER.SVME still has to be 1 for VMRUN to succeed. 1523 */ 1524 if (!(vcpu->arch.efer & EFER_SVME)) { 1525 /* GIF=1 and no guest mode are required if SVME=0. */ 1526 if (kvm_state->flags != KVM_STATE_NESTED_GIF_SET) 1527 return -EINVAL; 1528 } 1529 1530 /* SMM temporarily disables SVM, so we cannot be in guest mode. */ 1531 if (is_smm(vcpu) && (kvm_state->flags & KVM_STATE_NESTED_GUEST_MODE)) 1532 return -EINVAL; 1533 1534 if (!(kvm_state->flags & KVM_STATE_NESTED_GUEST_MODE)) { 1535 svm_leave_nested(vcpu); 1536 svm_set_gif(svm, !!(kvm_state->flags & KVM_STATE_NESTED_GIF_SET)); 1537 return 0; 1538 } 1539 1540 if (!page_address_valid(vcpu, kvm_state->hdr.svm.vmcb_pa)) 1541 return -EINVAL; 1542 if (kvm_state->size < sizeof(*kvm_state) + KVM_STATE_NESTED_SVM_VMCB_SIZE) 1543 return -EINVAL; 1544 1545 ret = -ENOMEM; 1546 ctl = kzalloc(sizeof(*ctl), GFP_KERNEL_ACCOUNT); 1547 save = kzalloc(sizeof(*save), GFP_KERNEL_ACCOUNT); 1548 if (!ctl || !save) 1549 goto out_free; 1550 1551 ret = -EFAULT; 1552 if (copy_from_user(ctl, &user_vmcb->control, sizeof(*ctl))) 1553 goto out_free; 1554 if (copy_from_user(save, &user_vmcb->save, sizeof(*save))) 1555 goto out_free; 1556 1557 ret = -EINVAL; 1558 __nested_copy_vmcb_control_to_cache(vcpu, &ctl_cached, ctl); 1559 if (!__nested_vmcb_check_controls(vcpu, &ctl_cached)) 1560 goto out_free; 1561 1562 /* 1563 * Processor state contains L2 state. Check that it is 1564 * valid for guest mode (see nested_vmcb_check_save). 1565 */ 1566 cr0 = kvm_read_cr0(vcpu); 1567 if (((cr0 & X86_CR0_CD) == 0) && (cr0 & X86_CR0_NW)) 1568 goto out_free; 1569 1570 /* 1571 * Validate host state saved from before VMRUN (see 1572 * nested_svm_check_permissions). 1573 */ 1574 __nested_copy_vmcb_save_to_cache(&save_cached, save); 1575 if (!(save->cr0 & X86_CR0_PG) || 1576 !(save->cr0 & X86_CR0_PE) || 1577 (save->rflags & X86_EFLAGS_VM) || 1578 !__nested_vmcb_check_save(vcpu, &save_cached)) 1579 goto out_free; 1580 1581 1582 /* 1583 * All checks done, we can enter guest mode. Userspace provides 1584 * vmcb12.control, which will be combined with L1 and stored into 1585 * vmcb02, and the L1 save state which we store in vmcb01. 1586 * L2 registers if needed are moved from the current VMCB to VMCB02. 1587 */ 1588 1589 if (is_guest_mode(vcpu)) 1590 svm_leave_nested(vcpu); 1591 else 1592 svm->nested.vmcb02.ptr->save = svm->vmcb01.ptr->save; 1593 1594 svm_set_gif(svm, !!(kvm_state->flags & KVM_STATE_NESTED_GIF_SET)); 1595 1596 svm->nested.nested_run_pending = 1597 !!(kvm_state->flags & KVM_STATE_NESTED_RUN_PENDING); 1598 1599 svm->nested.vmcb12_gpa = kvm_state->hdr.svm.vmcb_pa; 1600 1601 svm_copy_vmrun_state(&svm->vmcb01.ptr->save, save); 1602 nested_copy_vmcb_control_to_cache(svm, ctl); 1603 1604 svm_switch_vmcb(svm, &svm->nested.vmcb02); 1605 nested_vmcb02_prepare_control(svm); 1606 1607 /* 1608 * While the nested guest CR3 is already checked and set by 1609 * KVM_SET_SREGS, it was set when nested state was yet loaded, 1610 * thus MMU might not be initialized correctly. 1611 * Set it again to fix this. 1612 */ 1613 1614 ret = nested_svm_load_cr3(&svm->vcpu, vcpu->arch.cr3, 1615 nested_npt_enabled(svm), false); 1616 if (WARN_ON_ONCE(ret)) 1617 goto out_free; 1618 1619 svm->nested.force_msr_bitmap_recalc = true; 1620 1621 kvm_make_request(KVM_REQ_GET_NESTED_STATE_PAGES, vcpu); 1622 ret = 0; 1623 out_free: 1624 kfree(save); 1625 kfree(ctl); 1626 1627 return ret; 1628 } 1629 1630 static bool svm_get_nested_state_pages(struct kvm_vcpu *vcpu) 1631 { 1632 struct vcpu_svm *svm = to_svm(vcpu); 1633 1634 if (WARN_ON(!is_guest_mode(vcpu))) 1635 return true; 1636 1637 if (!vcpu->arch.pdptrs_from_userspace && 1638 !nested_npt_enabled(svm) && is_pae_paging(vcpu)) 1639 /* 1640 * Reload the guest's PDPTRs since after a migration 1641 * the guest CR3 might be restored prior to setting the nested 1642 * state which can lead to a load of wrong PDPTRs. 1643 */ 1644 if (CC(!load_pdptrs(vcpu, vcpu->arch.cr3))) 1645 return false; 1646 1647 if (!nested_svm_vmrun_msrpm(svm)) { 1648 vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR; 1649 vcpu->run->internal.suberror = 1650 KVM_INTERNAL_ERROR_EMULATION; 1651 vcpu->run->internal.ndata = 0; 1652 return false; 1653 } 1654 1655 return true; 1656 } 1657 1658 struct kvm_x86_nested_ops svm_nested_ops = { 1659 .leave_nested = svm_leave_nested, 1660 .check_events = svm_check_nested_events, 1661 .handle_page_fault_workaround = nested_svm_handle_page_fault_workaround, 1662 .triple_fault = nested_svm_triple_fault, 1663 .get_nested_state_pages = svm_get_nested_state_pages, 1664 .get_state = svm_get_nested_state, 1665 .set_state = svm_set_nested_state, 1666 }; 1667