1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Kernel-based Virtual Machine driver for Linux 4 * 5 * AMD SVM support 6 * 7 * Copyright (C) 2006 Qumranet, Inc. 8 * Copyright 2010 Red Hat, Inc. and/or its affiliates. 9 * 10 * Authors: 11 * Yaniv Kamay <yaniv@qumranet.com> 12 * Avi Kivity <avi@qumranet.com> 13 */ 14 15 #define pr_fmt(fmt) "SVM: " fmt 16 17 #include <linux/kvm_types.h> 18 #include <linux/kvm_host.h> 19 #include <linux/kernel.h> 20 21 #include <asm/msr-index.h> 22 #include <asm/debugreg.h> 23 24 #include "kvm_emulate.h" 25 #include "trace.h" 26 #include "mmu.h" 27 #include "x86.h" 28 #include "cpuid.h" 29 #include "lapic.h" 30 #include "svm.h" 31 32 #define CC KVM_NESTED_VMENTER_CONSISTENCY_CHECK 33 34 static void nested_svm_inject_npf_exit(struct kvm_vcpu *vcpu, 35 struct x86_exception *fault) 36 { 37 struct vcpu_svm *svm = to_svm(vcpu); 38 39 if (svm->vmcb->control.exit_code != SVM_EXIT_NPF) { 40 /* 41 * TODO: track the cause of the nested page fault, and 42 * correctly fill in the high bits of exit_info_1. 43 */ 44 svm->vmcb->control.exit_code = SVM_EXIT_NPF; 45 svm->vmcb->control.exit_code_hi = 0; 46 svm->vmcb->control.exit_info_1 = (1ULL << 32); 47 svm->vmcb->control.exit_info_2 = fault->address; 48 } 49 50 svm->vmcb->control.exit_info_1 &= ~0xffffffffULL; 51 svm->vmcb->control.exit_info_1 |= fault->error_code; 52 53 nested_svm_vmexit(svm); 54 } 55 56 static void svm_inject_page_fault_nested(struct kvm_vcpu *vcpu, struct x86_exception *fault) 57 { 58 struct vcpu_svm *svm = to_svm(vcpu); 59 WARN_ON(!is_guest_mode(vcpu)); 60 61 if (vmcb_is_intercept(&svm->nested.ctl, INTERCEPT_EXCEPTION_OFFSET + PF_VECTOR) && 62 !svm->nested.nested_run_pending) { 63 svm->vmcb->control.exit_code = SVM_EXIT_EXCP_BASE + PF_VECTOR; 64 svm->vmcb->control.exit_code_hi = 0; 65 svm->vmcb->control.exit_info_1 = fault->error_code; 66 svm->vmcb->control.exit_info_2 = fault->address; 67 nested_svm_vmexit(svm); 68 } else { 69 kvm_inject_page_fault(vcpu, fault); 70 } 71 } 72 73 static u64 nested_svm_get_tdp_pdptr(struct kvm_vcpu *vcpu, int index) 74 { 75 struct vcpu_svm *svm = to_svm(vcpu); 76 u64 cr3 = svm->nested.ctl.nested_cr3; 77 u64 pdpte; 78 int ret; 79 80 ret = kvm_vcpu_read_guest_page(vcpu, gpa_to_gfn(cr3), &pdpte, 81 offset_in_page(cr3) + index * 8, 8); 82 if (ret) 83 return 0; 84 return pdpte; 85 } 86 87 static unsigned long nested_svm_get_tdp_cr3(struct kvm_vcpu *vcpu) 88 { 89 struct vcpu_svm *svm = to_svm(vcpu); 90 91 return svm->nested.ctl.nested_cr3; 92 } 93 94 static void nested_svm_init_mmu_context(struct kvm_vcpu *vcpu) 95 { 96 struct vcpu_svm *svm = to_svm(vcpu); 97 98 WARN_ON(mmu_is_nested(vcpu)); 99 100 vcpu->arch.mmu = &vcpu->arch.guest_mmu; 101 102 /* 103 * The NPT format depends on L1's CR4 and EFER, which is in vmcb01. Note, 104 * when called via KVM_SET_NESTED_STATE, that state may _not_ match current 105 * vCPU state. CR0.WP is explicitly ignored, while CR0.PG is required. 106 */ 107 kvm_init_shadow_npt_mmu(vcpu, X86_CR0_PG, svm->vmcb01.ptr->save.cr4, 108 svm->vmcb01.ptr->save.efer, 109 svm->nested.ctl.nested_cr3); 110 vcpu->arch.mmu->get_guest_pgd = nested_svm_get_tdp_cr3; 111 vcpu->arch.mmu->get_pdptr = nested_svm_get_tdp_pdptr; 112 vcpu->arch.mmu->inject_page_fault = nested_svm_inject_npf_exit; 113 vcpu->arch.walk_mmu = &vcpu->arch.nested_mmu; 114 } 115 116 static void nested_svm_uninit_mmu_context(struct kvm_vcpu *vcpu) 117 { 118 vcpu->arch.mmu = &vcpu->arch.root_mmu; 119 vcpu->arch.walk_mmu = &vcpu->arch.root_mmu; 120 } 121 122 void recalc_intercepts(struct vcpu_svm *svm) 123 { 124 struct vmcb_control_area *c, *h, *g; 125 unsigned int i; 126 127 vmcb_mark_dirty(svm->vmcb, VMCB_INTERCEPTS); 128 129 if (!is_guest_mode(&svm->vcpu)) 130 return; 131 132 c = &svm->vmcb->control; 133 h = &svm->vmcb01.ptr->control; 134 g = &svm->nested.ctl; 135 136 for (i = 0; i < MAX_INTERCEPT; i++) 137 c->intercepts[i] = h->intercepts[i]; 138 139 if (g->int_ctl & V_INTR_MASKING_MASK) { 140 /* We only want the cr8 intercept bits of L1 */ 141 vmcb_clr_intercept(c, INTERCEPT_CR8_READ); 142 vmcb_clr_intercept(c, INTERCEPT_CR8_WRITE); 143 144 /* 145 * Once running L2 with HF_VINTR_MASK, EFLAGS.IF does not 146 * affect any interrupt we may want to inject; therefore, 147 * interrupt window vmexits are irrelevant to L0. 148 */ 149 vmcb_clr_intercept(c, INTERCEPT_VINTR); 150 } 151 152 /* We don't want to see VMMCALLs from a nested guest */ 153 vmcb_clr_intercept(c, INTERCEPT_VMMCALL); 154 155 for (i = 0; i < MAX_INTERCEPT; i++) 156 c->intercepts[i] |= g->intercepts[i]; 157 } 158 159 static void copy_vmcb_control_area(struct vmcb_control_area *dst, 160 struct vmcb_control_area *from) 161 { 162 unsigned int i; 163 164 for (i = 0; i < MAX_INTERCEPT; i++) 165 dst->intercepts[i] = from->intercepts[i]; 166 167 dst->iopm_base_pa = from->iopm_base_pa; 168 dst->msrpm_base_pa = from->msrpm_base_pa; 169 dst->tsc_offset = from->tsc_offset; 170 /* asid not copied, it is handled manually for svm->vmcb. */ 171 dst->tlb_ctl = from->tlb_ctl; 172 dst->int_ctl = from->int_ctl; 173 dst->int_vector = from->int_vector; 174 dst->int_state = from->int_state; 175 dst->exit_code = from->exit_code; 176 dst->exit_code_hi = from->exit_code_hi; 177 dst->exit_info_1 = from->exit_info_1; 178 dst->exit_info_2 = from->exit_info_2; 179 dst->exit_int_info = from->exit_int_info; 180 dst->exit_int_info_err = from->exit_int_info_err; 181 dst->nested_ctl = from->nested_ctl; 182 dst->event_inj = from->event_inj; 183 dst->event_inj_err = from->event_inj_err; 184 dst->nested_cr3 = from->nested_cr3; 185 dst->virt_ext = from->virt_ext; 186 dst->pause_filter_count = from->pause_filter_count; 187 dst->pause_filter_thresh = from->pause_filter_thresh; 188 } 189 190 static bool nested_svm_vmrun_msrpm(struct vcpu_svm *svm) 191 { 192 /* 193 * This function merges the msr permission bitmaps of kvm and the 194 * nested vmcb. It is optimized in that it only merges the parts where 195 * the kvm msr permission bitmap may contain zero bits 196 */ 197 int i; 198 199 if (!(vmcb_is_intercept(&svm->nested.ctl, INTERCEPT_MSR_PROT))) 200 return true; 201 202 for (i = 0; i < MSRPM_OFFSETS; i++) { 203 u32 value, p; 204 u64 offset; 205 206 if (msrpm_offsets[i] == 0xffffffff) 207 break; 208 209 p = msrpm_offsets[i]; 210 offset = svm->nested.ctl.msrpm_base_pa + (p * 4); 211 212 if (kvm_vcpu_read_guest(&svm->vcpu, offset, &value, 4)) 213 return false; 214 215 svm->nested.msrpm[p] = svm->msrpm[p] | value; 216 } 217 218 svm->vmcb->control.msrpm_base_pa = __sme_set(__pa(svm->nested.msrpm)); 219 220 return true; 221 } 222 223 /* 224 * Bits 11:0 of bitmap address are ignored by hardware 225 */ 226 static bool nested_svm_check_bitmap_pa(struct kvm_vcpu *vcpu, u64 pa, u32 size) 227 { 228 u64 addr = PAGE_ALIGN(pa); 229 230 return kvm_vcpu_is_legal_gpa(vcpu, addr) && 231 kvm_vcpu_is_legal_gpa(vcpu, addr + size - 1); 232 } 233 234 static bool nested_vmcb_check_controls(struct kvm_vcpu *vcpu, 235 struct vmcb_control_area *control) 236 { 237 if (CC(!vmcb_is_intercept(control, INTERCEPT_VMRUN))) 238 return false; 239 240 if (CC(control->asid == 0)) 241 return false; 242 243 if (CC((control->nested_ctl & SVM_NESTED_CTL_NP_ENABLE) && !npt_enabled)) 244 return false; 245 246 if (CC(!nested_svm_check_bitmap_pa(vcpu, control->msrpm_base_pa, 247 MSRPM_SIZE))) 248 return false; 249 if (CC(!nested_svm_check_bitmap_pa(vcpu, control->iopm_base_pa, 250 IOPM_SIZE))) 251 return false; 252 253 return true; 254 } 255 256 static bool nested_vmcb_check_cr3_cr4(struct kvm_vcpu *vcpu, 257 struct vmcb_save_area *save) 258 { 259 /* 260 * These checks are also performed by KVM_SET_SREGS, 261 * except that EFER.LMA is not checked by SVM against 262 * CR0.PG && EFER.LME. 263 */ 264 if ((save->efer & EFER_LME) && (save->cr0 & X86_CR0_PG)) { 265 if (CC(!(save->cr4 & X86_CR4_PAE)) || 266 CC(!(save->cr0 & X86_CR0_PE)) || 267 CC(kvm_vcpu_is_illegal_gpa(vcpu, save->cr3))) 268 return false; 269 } 270 271 if (CC(!kvm_is_valid_cr4(vcpu, save->cr4))) 272 return false; 273 274 return true; 275 } 276 277 /* Common checks that apply to both L1 and L2 state. */ 278 static bool nested_vmcb_valid_sregs(struct kvm_vcpu *vcpu, 279 struct vmcb_save_area *save) 280 { 281 /* 282 * FIXME: these should be done after copying the fields, 283 * to avoid TOC/TOU races. For these save area checks 284 * the possible damage is limited since kvm_set_cr0 and 285 * kvm_set_cr4 handle failure; EFER_SVME is an exception 286 * so it is force-set later in nested_prepare_vmcb_save. 287 */ 288 if (CC(!(save->efer & EFER_SVME))) 289 return false; 290 291 if (CC((save->cr0 & X86_CR0_CD) == 0 && (save->cr0 & X86_CR0_NW)) || 292 CC(save->cr0 & ~0xffffffffULL)) 293 return false; 294 295 if (CC(!kvm_dr6_valid(save->dr6)) || CC(!kvm_dr7_valid(save->dr7))) 296 return false; 297 298 if (!nested_vmcb_check_cr3_cr4(vcpu, save)) 299 return false; 300 301 if (CC(!kvm_valid_efer(vcpu, save->efer))) 302 return false; 303 304 return true; 305 } 306 307 static void nested_load_control_from_vmcb12(struct vcpu_svm *svm, 308 struct vmcb_control_area *control) 309 { 310 copy_vmcb_control_area(&svm->nested.ctl, control); 311 312 /* Copy it here because nested_svm_check_controls will check it. */ 313 svm->nested.ctl.asid = control->asid; 314 svm->nested.ctl.msrpm_base_pa &= ~0x0fffULL; 315 svm->nested.ctl.iopm_base_pa &= ~0x0fffULL; 316 } 317 318 /* 319 * Synchronize fields that are written by the processor, so that 320 * they can be copied back into the vmcb12. 321 */ 322 void nested_sync_control_from_vmcb02(struct vcpu_svm *svm) 323 { 324 u32 mask; 325 svm->nested.ctl.event_inj = svm->vmcb->control.event_inj; 326 svm->nested.ctl.event_inj_err = svm->vmcb->control.event_inj_err; 327 328 /* Only a few fields of int_ctl are written by the processor. */ 329 mask = V_IRQ_MASK | V_TPR_MASK; 330 if (!(svm->nested.ctl.int_ctl & V_INTR_MASKING_MASK) && 331 svm_is_intercept(svm, INTERCEPT_VINTR)) { 332 /* 333 * In order to request an interrupt window, L0 is usurping 334 * svm->vmcb->control.int_ctl and possibly setting V_IRQ 335 * even if it was clear in L1's VMCB. Restoring it would be 336 * wrong. However, in this case V_IRQ will remain true until 337 * interrupt_window_interception calls svm_clear_vintr and 338 * restores int_ctl. We can just leave it aside. 339 */ 340 mask &= ~V_IRQ_MASK; 341 } 342 svm->nested.ctl.int_ctl &= ~mask; 343 svm->nested.ctl.int_ctl |= svm->vmcb->control.int_ctl & mask; 344 } 345 346 /* 347 * Transfer any event that L0 or L1 wanted to inject into L2 to 348 * EXIT_INT_INFO. 349 */ 350 static void nested_save_pending_event_to_vmcb12(struct vcpu_svm *svm, 351 struct vmcb *vmcb12) 352 { 353 struct kvm_vcpu *vcpu = &svm->vcpu; 354 u32 exit_int_info = 0; 355 unsigned int nr; 356 357 if (vcpu->arch.exception.injected) { 358 nr = vcpu->arch.exception.nr; 359 exit_int_info = nr | SVM_EVTINJ_VALID | SVM_EVTINJ_TYPE_EXEPT; 360 361 if (vcpu->arch.exception.has_error_code) { 362 exit_int_info |= SVM_EVTINJ_VALID_ERR; 363 vmcb12->control.exit_int_info_err = 364 vcpu->arch.exception.error_code; 365 } 366 367 } else if (vcpu->arch.nmi_injected) { 368 exit_int_info = SVM_EVTINJ_VALID | SVM_EVTINJ_TYPE_NMI; 369 370 } else if (vcpu->arch.interrupt.injected) { 371 nr = vcpu->arch.interrupt.nr; 372 exit_int_info = nr | SVM_EVTINJ_VALID; 373 374 if (vcpu->arch.interrupt.soft) 375 exit_int_info |= SVM_EVTINJ_TYPE_SOFT; 376 else 377 exit_int_info |= SVM_EVTINJ_TYPE_INTR; 378 } 379 380 vmcb12->control.exit_int_info = exit_int_info; 381 } 382 383 static inline bool nested_npt_enabled(struct vcpu_svm *svm) 384 { 385 return svm->nested.ctl.nested_ctl & SVM_NESTED_CTL_NP_ENABLE; 386 } 387 388 static void nested_svm_transition_tlb_flush(struct kvm_vcpu *vcpu) 389 { 390 /* 391 * TODO: optimize unconditional TLB flush/MMU sync. A partial list of 392 * things to fix before this can be conditional: 393 * 394 * - Flush TLBs for both L1 and L2 remote TLB flush 395 * - Honor L1's request to flush an ASID on nested VMRUN 396 * - Sync nested NPT MMU on VMRUN that flushes L2's ASID[*] 397 * - Don't crush a pending TLB flush in vmcb02 on nested VMRUN 398 * - Flush L1's ASID on KVM_REQ_TLB_FLUSH_GUEST 399 * 400 * [*] Unlike nested EPT, SVM's ASID management can invalidate nested 401 * NPT guest-physical mappings on VMRUN. 402 */ 403 kvm_make_request(KVM_REQ_MMU_SYNC, vcpu); 404 kvm_make_request(KVM_REQ_TLB_FLUSH_CURRENT, vcpu); 405 } 406 407 /* 408 * Load guest's/host's cr3 on nested vmentry or vmexit. @nested_npt is true 409 * if we are emulating VM-Entry into a guest with NPT enabled. 410 */ 411 static int nested_svm_load_cr3(struct kvm_vcpu *vcpu, unsigned long cr3, 412 bool nested_npt, bool reload_pdptrs) 413 { 414 if (CC(kvm_vcpu_is_illegal_gpa(vcpu, cr3))) 415 return -EINVAL; 416 417 if (reload_pdptrs && !nested_npt && is_pae_paging(vcpu) && 418 CC(!load_pdptrs(vcpu, vcpu->arch.walk_mmu, cr3))) 419 return -EINVAL; 420 421 if (!nested_npt) 422 kvm_mmu_new_pgd(vcpu, cr3); 423 424 vcpu->arch.cr3 = cr3; 425 kvm_register_mark_available(vcpu, VCPU_EXREG_CR3); 426 427 /* Re-initialize the MMU, e.g. to pick up CR4 MMU role changes. */ 428 kvm_init_mmu(vcpu); 429 430 return 0; 431 } 432 433 void nested_vmcb02_compute_g_pat(struct vcpu_svm *svm) 434 { 435 if (!svm->nested.vmcb02.ptr) 436 return; 437 438 /* FIXME: merge g_pat from vmcb01 and vmcb12. */ 439 svm->nested.vmcb02.ptr->save.g_pat = svm->vmcb01.ptr->save.g_pat; 440 } 441 442 static void nested_vmcb02_prepare_save(struct vcpu_svm *svm, struct vmcb *vmcb12) 443 { 444 bool new_vmcb12 = false; 445 446 nested_vmcb02_compute_g_pat(svm); 447 448 /* Load the nested guest state */ 449 if (svm->nested.vmcb12_gpa != svm->nested.last_vmcb12_gpa) { 450 new_vmcb12 = true; 451 svm->nested.last_vmcb12_gpa = svm->nested.vmcb12_gpa; 452 } 453 454 if (unlikely(new_vmcb12 || vmcb_is_dirty(vmcb12, VMCB_SEG))) { 455 svm->vmcb->save.es = vmcb12->save.es; 456 svm->vmcb->save.cs = vmcb12->save.cs; 457 svm->vmcb->save.ss = vmcb12->save.ss; 458 svm->vmcb->save.ds = vmcb12->save.ds; 459 svm->vmcb->save.cpl = vmcb12->save.cpl; 460 vmcb_mark_dirty(svm->vmcb, VMCB_SEG); 461 } 462 463 if (unlikely(new_vmcb12 || vmcb_is_dirty(vmcb12, VMCB_DT))) { 464 svm->vmcb->save.gdtr = vmcb12->save.gdtr; 465 svm->vmcb->save.idtr = vmcb12->save.idtr; 466 vmcb_mark_dirty(svm->vmcb, VMCB_DT); 467 } 468 469 kvm_set_rflags(&svm->vcpu, vmcb12->save.rflags | X86_EFLAGS_FIXED); 470 471 /* 472 * Force-set EFER_SVME even though it is checked earlier on the 473 * VMCB12, because the guest can flip the bit between the check 474 * and now. Clearing EFER_SVME would call svm_free_nested. 475 */ 476 svm_set_efer(&svm->vcpu, vmcb12->save.efer | EFER_SVME); 477 478 svm_set_cr0(&svm->vcpu, vmcb12->save.cr0); 479 svm_set_cr4(&svm->vcpu, vmcb12->save.cr4); 480 481 svm->vcpu.arch.cr2 = vmcb12->save.cr2; 482 483 kvm_rax_write(&svm->vcpu, vmcb12->save.rax); 484 kvm_rsp_write(&svm->vcpu, vmcb12->save.rsp); 485 kvm_rip_write(&svm->vcpu, vmcb12->save.rip); 486 487 /* In case we don't even reach vcpu_run, the fields are not updated */ 488 svm->vmcb->save.rax = vmcb12->save.rax; 489 svm->vmcb->save.rsp = vmcb12->save.rsp; 490 svm->vmcb->save.rip = vmcb12->save.rip; 491 492 /* These bits will be set properly on the first execution when new_vmc12 is true */ 493 if (unlikely(new_vmcb12 || vmcb_is_dirty(vmcb12, VMCB_DR))) { 494 svm->vmcb->save.dr7 = vmcb12->save.dr7 | DR7_FIXED_1; 495 svm->vcpu.arch.dr6 = vmcb12->save.dr6 | DR6_ACTIVE_LOW; 496 vmcb_mark_dirty(svm->vmcb, VMCB_DR); 497 } 498 } 499 500 static void nested_vmcb02_prepare_control(struct vcpu_svm *svm) 501 { 502 const u32 mask = V_INTR_MASKING_MASK | V_GIF_ENABLE_MASK | V_GIF_MASK; 503 struct kvm_vcpu *vcpu = &svm->vcpu; 504 505 /* 506 * Filled at exit: exit_code, exit_code_hi, exit_info_1, exit_info_2, 507 * exit_int_info, exit_int_info_err, next_rip, insn_len, insn_bytes. 508 */ 509 510 /* 511 * Also covers avic_vapic_bar, avic_backing_page, avic_logical_id, 512 * avic_physical_id. 513 */ 514 WARN_ON(svm->vmcb01.ptr->control.int_ctl & AVIC_ENABLE_MASK); 515 516 /* Copied from vmcb01. msrpm_base can be overwritten later. */ 517 svm->vmcb->control.nested_ctl = svm->vmcb01.ptr->control.nested_ctl; 518 svm->vmcb->control.iopm_base_pa = svm->vmcb01.ptr->control.iopm_base_pa; 519 svm->vmcb->control.msrpm_base_pa = svm->vmcb01.ptr->control.msrpm_base_pa; 520 521 /* Done at vmrun: asid. */ 522 523 /* Also overwritten later if necessary. */ 524 svm->vmcb->control.tlb_ctl = TLB_CONTROL_DO_NOTHING; 525 526 /* nested_cr3. */ 527 if (nested_npt_enabled(svm)) 528 nested_svm_init_mmu_context(vcpu); 529 530 svm->vmcb->control.tsc_offset = vcpu->arch.tsc_offset = 531 vcpu->arch.l1_tsc_offset + svm->nested.ctl.tsc_offset; 532 533 svm->vmcb->control.int_ctl = 534 (svm->nested.ctl.int_ctl & ~mask) | 535 (svm->vmcb01.ptr->control.int_ctl & mask); 536 537 svm->vmcb->control.virt_ext = svm->nested.ctl.virt_ext; 538 svm->vmcb->control.int_vector = svm->nested.ctl.int_vector; 539 svm->vmcb->control.int_state = svm->nested.ctl.int_state; 540 svm->vmcb->control.event_inj = svm->nested.ctl.event_inj; 541 svm->vmcb->control.event_inj_err = svm->nested.ctl.event_inj_err; 542 543 svm->vmcb->control.pause_filter_count = svm->nested.ctl.pause_filter_count; 544 svm->vmcb->control.pause_filter_thresh = svm->nested.ctl.pause_filter_thresh; 545 546 nested_svm_transition_tlb_flush(vcpu); 547 548 /* Enter Guest-Mode */ 549 enter_guest_mode(vcpu); 550 551 /* 552 * Merge guest and host intercepts - must be called with vcpu in 553 * guest-mode to take effect. 554 */ 555 recalc_intercepts(svm); 556 } 557 558 static void nested_svm_copy_common_state(struct vmcb *from_vmcb, struct vmcb *to_vmcb) 559 { 560 /* 561 * Some VMCB state is shared between L1 and L2 and thus has to be 562 * moved at the time of nested vmrun and vmexit. 563 * 564 * VMLOAD/VMSAVE state would also belong in this category, but KVM 565 * always performs VMLOAD and VMSAVE from the VMCB01. 566 */ 567 to_vmcb->save.spec_ctrl = from_vmcb->save.spec_ctrl; 568 } 569 570 int enter_svm_guest_mode(struct kvm_vcpu *vcpu, u64 vmcb12_gpa, 571 struct vmcb *vmcb12) 572 { 573 struct vcpu_svm *svm = to_svm(vcpu); 574 int ret; 575 576 trace_kvm_nested_vmrun(svm->vmcb->save.rip, vmcb12_gpa, 577 vmcb12->save.rip, 578 vmcb12->control.int_ctl, 579 vmcb12->control.event_inj, 580 vmcb12->control.nested_ctl); 581 582 trace_kvm_nested_intercepts(vmcb12->control.intercepts[INTERCEPT_CR] & 0xffff, 583 vmcb12->control.intercepts[INTERCEPT_CR] >> 16, 584 vmcb12->control.intercepts[INTERCEPT_EXCEPTION], 585 vmcb12->control.intercepts[INTERCEPT_WORD3], 586 vmcb12->control.intercepts[INTERCEPT_WORD4], 587 vmcb12->control.intercepts[INTERCEPT_WORD5]); 588 589 590 svm->nested.vmcb12_gpa = vmcb12_gpa; 591 592 WARN_ON(svm->vmcb == svm->nested.vmcb02.ptr); 593 594 nested_svm_copy_common_state(svm->vmcb01.ptr, svm->nested.vmcb02.ptr); 595 596 svm_switch_vmcb(svm, &svm->nested.vmcb02); 597 nested_vmcb02_prepare_control(svm); 598 nested_vmcb02_prepare_save(svm, vmcb12); 599 600 ret = nested_svm_load_cr3(&svm->vcpu, vmcb12->save.cr3, 601 nested_npt_enabled(svm), true); 602 if (ret) 603 return ret; 604 605 if (!npt_enabled) 606 vcpu->arch.mmu->inject_page_fault = svm_inject_page_fault_nested; 607 608 svm_set_gif(svm, true); 609 610 return 0; 611 } 612 613 int nested_svm_vmrun(struct kvm_vcpu *vcpu) 614 { 615 struct vcpu_svm *svm = to_svm(vcpu); 616 int ret; 617 struct vmcb *vmcb12; 618 struct kvm_host_map map; 619 u64 vmcb12_gpa; 620 621 if (is_smm(vcpu)) { 622 kvm_queue_exception(vcpu, UD_VECTOR); 623 return 1; 624 } 625 626 vmcb12_gpa = svm->vmcb->save.rax; 627 ret = kvm_vcpu_map(vcpu, gpa_to_gfn(vmcb12_gpa), &map); 628 if (ret == -EINVAL) { 629 kvm_inject_gp(vcpu, 0); 630 return 1; 631 } else if (ret) { 632 return kvm_skip_emulated_instruction(vcpu); 633 } 634 635 ret = kvm_skip_emulated_instruction(vcpu); 636 637 vmcb12 = map.hva; 638 639 if (WARN_ON_ONCE(!svm->nested.initialized)) 640 return -EINVAL; 641 642 nested_load_control_from_vmcb12(svm, &vmcb12->control); 643 644 if (!nested_vmcb_valid_sregs(vcpu, &vmcb12->save) || 645 !nested_vmcb_check_controls(vcpu, &svm->nested.ctl)) { 646 vmcb12->control.exit_code = SVM_EXIT_ERR; 647 vmcb12->control.exit_code_hi = 0; 648 vmcb12->control.exit_info_1 = 0; 649 vmcb12->control.exit_info_2 = 0; 650 goto out; 651 } 652 653 654 /* Clear internal status */ 655 kvm_clear_exception_queue(vcpu); 656 kvm_clear_interrupt_queue(vcpu); 657 658 /* 659 * Since vmcb01 is not in use, we can use it to store some of the L1 660 * state. 661 */ 662 svm->vmcb01.ptr->save.efer = vcpu->arch.efer; 663 svm->vmcb01.ptr->save.cr0 = kvm_read_cr0(vcpu); 664 svm->vmcb01.ptr->save.cr4 = vcpu->arch.cr4; 665 svm->vmcb01.ptr->save.rflags = kvm_get_rflags(vcpu); 666 svm->vmcb01.ptr->save.rip = kvm_rip_read(vcpu); 667 668 if (!npt_enabled) 669 svm->vmcb01.ptr->save.cr3 = kvm_read_cr3(vcpu); 670 671 svm->nested.nested_run_pending = 1; 672 673 if (enter_svm_guest_mode(vcpu, vmcb12_gpa, vmcb12)) 674 goto out_exit_err; 675 676 if (nested_svm_vmrun_msrpm(svm)) 677 goto out; 678 679 out_exit_err: 680 svm->nested.nested_run_pending = 0; 681 682 svm->vmcb->control.exit_code = SVM_EXIT_ERR; 683 svm->vmcb->control.exit_code_hi = 0; 684 svm->vmcb->control.exit_info_1 = 0; 685 svm->vmcb->control.exit_info_2 = 0; 686 687 nested_svm_vmexit(svm); 688 689 out: 690 kvm_vcpu_unmap(vcpu, &map, true); 691 692 return ret; 693 } 694 695 void nested_svm_vmloadsave(struct vmcb *from_vmcb, struct vmcb *to_vmcb) 696 { 697 to_vmcb->save.fs = from_vmcb->save.fs; 698 to_vmcb->save.gs = from_vmcb->save.gs; 699 to_vmcb->save.tr = from_vmcb->save.tr; 700 to_vmcb->save.ldtr = from_vmcb->save.ldtr; 701 to_vmcb->save.kernel_gs_base = from_vmcb->save.kernel_gs_base; 702 to_vmcb->save.star = from_vmcb->save.star; 703 to_vmcb->save.lstar = from_vmcb->save.lstar; 704 to_vmcb->save.cstar = from_vmcb->save.cstar; 705 to_vmcb->save.sfmask = from_vmcb->save.sfmask; 706 to_vmcb->save.sysenter_cs = from_vmcb->save.sysenter_cs; 707 to_vmcb->save.sysenter_esp = from_vmcb->save.sysenter_esp; 708 to_vmcb->save.sysenter_eip = from_vmcb->save.sysenter_eip; 709 } 710 711 int nested_svm_vmexit(struct vcpu_svm *svm) 712 { 713 struct kvm_vcpu *vcpu = &svm->vcpu; 714 struct vmcb *vmcb12; 715 struct vmcb *vmcb = svm->vmcb; 716 struct kvm_host_map map; 717 int rc; 718 719 /* Triple faults in L2 should never escape. */ 720 WARN_ON_ONCE(kvm_check_request(KVM_REQ_TRIPLE_FAULT, vcpu)); 721 722 rc = kvm_vcpu_map(vcpu, gpa_to_gfn(svm->nested.vmcb12_gpa), &map); 723 if (rc) { 724 if (rc == -EINVAL) 725 kvm_inject_gp(vcpu, 0); 726 return 1; 727 } 728 729 vmcb12 = map.hva; 730 731 /* Exit Guest-Mode */ 732 leave_guest_mode(vcpu); 733 svm->nested.vmcb12_gpa = 0; 734 WARN_ON_ONCE(svm->nested.nested_run_pending); 735 736 kvm_clear_request(KVM_REQ_GET_NESTED_STATE_PAGES, vcpu); 737 738 /* in case we halted in L2 */ 739 svm->vcpu.arch.mp_state = KVM_MP_STATE_RUNNABLE; 740 741 /* Give the current vmcb to the guest */ 742 743 vmcb12->save.es = vmcb->save.es; 744 vmcb12->save.cs = vmcb->save.cs; 745 vmcb12->save.ss = vmcb->save.ss; 746 vmcb12->save.ds = vmcb->save.ds; 747 vmcb12->save.gdtr = vmcb->save.gdtr; 748 vmcb12->save.idtr = vmcb->save.idtr; 749 vmcb12->save.efer = svm->vcpu.arch.efer; 750 vmcb12->save.cr0 = kvm_read_cr0(vcpu); 751 vmcb12->save.cr3 = kvm_read_cr3(vcpu); 752 vmcb12->save.cr2 = vmcb->save.cr2; 753 vmcb12->save.cr4 = svm->vcpu.arch.cr4; 754 vmcb12->save.rflags = kvm_get_rflags(vcpu); 755 vmcb12->save.rip = kvm_rip_read(vcpu); 756 vmcb12->save.rsp = kvm_rsp_read(vcpu); 757 vmcb12->save.rax = kvm_rax_read(vcpu); 758 vmcb12->save.dr7 = vmcb->save.dr7; 759 vmcb12->save.dr6 = svm->vcpu.arch.dr6; 760 vmcb12->save.cpl = vmcb->save.cpl; 761 762 vmcb12->control.int_state = vmcb->control.int_state; 763 vmcb12->control.exit_code = vmcb->control.exit_code; 764 vmcb12->control.exit_code_hi = vmcb->control.exit_code_hi; 765 vmcb12->control.exit_info_1 = vmcb->control.exit_info_1; 766 vmcb12->control.exit_info_2 = vmcb->control.exit_info_2; 767 768 if (vmcb12->control.exit_code != SVM_EXIT_ERR) 769 nested_save_pending_event_to_vmcb12(svm, vmcb12); 770 771 if (svm->nrips_enabled) 772 vmcb12->control.next_rip = vmcb->control.next_rip; 773 774 vmcb12->control.int_ctl = svm->nested.ctl.int_ctl; 775 vmcb12->control.tlb_ctl = svm->nested.ctl.tlb_ctl; 776 vmcb12->control.event_inj = svm->nested.ctl.event_inj; 777 vmcb12->control.event_inj_err = svm->nested.ctl.event_inj_err; 778 779 vmcb12->control.pause_filter_count = 780 svm->vmcb->control.pause_filter_count; 781 vmcb12->control.pause_filter_thresh = 782 svm->vmcb->control.pause_filter_thresh; 783 784 nested_svm_copy_common_state(svm->nested.vmcb02.ptr, svm->vmcb01.ptr); 785 786 svm_switch_vmcb(svm, &svm->vmcb01); 787 788 /* 789 * On vmexit the GIF is set to false and 790 * no event can be injected in L1. 791 */ 792 svm_set_gif(svm, false); 793 svm->vmcb->control.exit_int_info = 0; 794 795 svm->vcpu.arch.tsc_offset = svm->vcpu.arch.l1_tsc_offset; 796 if (svm->vmcb->control.tsc_offset != svm->vcpu.arch.tsc_offset) { 797 svm->vmcb->control.tsc_offset = svm->vcpu.arch.tsc_offset; 798 vmcb_mark_dirty(svm->vmcb, VMCB_INTERCEPTS); 799 } 800 801 svm->nested.ctl.nested_cr3 = 0; 802 803 /* 804 * Restore processor state that had been saved in vmcb01 805 */ 806 kvm_set_rflags(vcpu, svm->vmcb->save.rflags); 807 svm_set_efer(vcpu, svm->vmcb->save.efer); 808 svm_set_cr0(vcpu, svm->vmcb->save.cr0 | X86_CR0_PE); 809 svm_set_cr4(vcpu, svm->vmcb->save.cr4); 810 kvm_rax_write(vcpu, svm->vmcb->save.rax); 811 kvm_rsp_write(vcpu, svm->vmcb->save.rsp); 812 kvm_rip_write(vcpu, svm->vmcb->save.rip); 813 814 svm->vcpu.arch.dr7 = DR7_FIXED_1; 815 kvm_update_dr7(&svm->vcpu); 816 817 trace_kvm_nested_vmexit_inject(vmcb12->control.exit_code, 818 vmcb12->control.exit_info_1, 819 vmcb12->control.exit_info_2, 820 vmcb12->control.exit_int_info, 821 vmcb12->control.exit_int_info_err, 822 KVM_ISA_SVM); 823 824 kvm_vcpu_unmap(vcpu, &map, true); 825 826 nested_svm_transition_tlb_flush(vcpu); 827 828 nested_svm_uninit_mmu_context(vcpu); 829 830 rc = nested_svm_load_cr3(vcpu, svm->vmcb->save.cr3, false, true); 831 if (rc) 832 return 1; 833 834 /* 835 * Drop what we picked up for L2 via svm_complete_interrupts() so it 836 * doesn't end up in L1. 837 */ 838 svm->vcpu.arch.nmi_injected = false; 839 kvm_clear_exception_queue(vcpu); 840 kvm_clear_interrupt_queue(vcpu); 841 842 /* 843 * If we are here following the completion of a VMRUN that 844 * is being single-stepped, queue the pending #DB intercept 845 * right now so that it an be accounted for before we execute 846 * L1's next instruction. 847 */ 848 if (unlikely(svm->vmcb->save.rflags & X86_EFLAGS_TF)) 849 kvm_queue_exception(&(svm->vcpu), DB_VECTOR); 850 851 return 0; 852 } 853 854 static void nested_svm_triple_fault(struct kvm_vcpu *vcpu) 855 { 856 nested_svm_simple_vmexit(to_svm(vcpu), SVM_EXIT_SHUTDOWN); 857 } 858 859 int svm_allocate_nested(struct vcpu_svm *svm) 860 { 861 struct page *vmcb02_page; 862 863 if (svm->nested.initialized) 864 return 0; 865 866 vmcb02_page = alloc_page(GFP_KERNEL_ACCOUNT | __GFP_ZERO); 867 if (!vmcb02_page) 868 return -ENOMEM; 869 svm->nested.vmcb02.ptr = page_address(vmcb02_page); 870 svm->nested.vmcb02.pa = __sme_set(page_to_pfn(vmcb02_page) << PAGE_SHIFT); 871 872 svm->nested.msrpm = svm_vcpu_alloc_msrpm(); 873 if (!svm->nested.msrpm) 874 goto err_free_vmcb02; 875 svm_vcpu_init_msrpm(&svm->vcpu, svm->nested.msrpm); 876 877 svm->nested.initialized = true; 878 return 0; 879 880 err_free_vmcb02: 881 __free_page(vmcb02_page); 882 return -ENOMEM; 883 } 884 885 void svm_free_nested(struct vcpu_svm *svm) 886 { 887 if (!svm->nested.initialized) 888 return; 889 890 svm_vcpu_free_msrpm(svm->nested.msrpm); 891 svm->nested.msrpm = NULL; 892 893 __free_page(virt_to_page(svm->nested.vmcb02.ptr)); 894 svm->nested.vmcb02.ptr = NULL; 895 896 /* 897 * When last_vmcb12_gpa matches the current vmcb12 gpa, 898 * some vmcb12 fields are not loaded if they are marked clean 899 * in the vmcb12, since in this case they are up to date already. 900 * 901 * When the vmcb02 is freed, this optimization becomes invalid. 902 */ 903 svm->nested.last_vmcb12_gpa = INVALID_GPA; 904 905 svm->nested.initialized = false; 906 } 907 908 /* 909 * Forcibly leave nested mode in order to be able to reset the VCPU later on. 910 */ 911 void svm_leave_nested(struct vcpu_svm *svm) 912 { 913 struct kvm_vcpu *vcpu = &svm->vcpu; 914 915 if (is_guest_mode(vcpu)) { 916 svm->nested.nested_run_pending = 0; 917 svm->nested.vmcb12_gpa = INVALID_GPA; 918 919 leave_guest_mode(vcpu); 920 921 svm_switch_vmcb(svm, &svm->vmcb01); 922 923 nested_svm_uninit_mmu_context(vcpu); 924 vmcb_mark_all_dirty(svm->vmcb); 925 } 926 927 kvm_clear_request(KVM_REQ_GET_NESTED_STATE_PAGES, vcpu); 928 } 929 930 static int nested_svm_exit_handled_msr(struct vcpu_svm *svm) 931 { 932 u32 offset, msr, value; 933 int write, mask; 934 935 if (!(vmcb_is_intercept(&svm->nested.ctl, INTERCEPT_MSR_PROT))) 936 return NESTED_EXIT_HOST; 937 938 msr = svm->vcpu.arch.regs[VCPU_REGS_RCX]; 939 offset = svm_msrpm_offset(msr); 940 write = svm->vmcb->control.exit_info_1 & 1; 941 mask = 1 << ((2 * (msr & 0xf)) + write); 942 943 if (offset == MSR_INVALID) 944 return NESTED_EXIT_DONE; 945 946 /* Offset is in 32 bit units but need in 8 bit units */ 947 offset *= 4; 948 949 if (kvm_vcpu_read_guest(&svm->vcpu, svm->nested.ctl.msrpm_base_pa + offset, &value, 4)) 950 return NESTED_EXIT_DONE; 951 952 return (value & mask) ? NESTED_EXIT_DONE : NESTED_EXIT_HOST; 953 } 954 955 static int nested_svm_intercept_ioio(struct vcpu_svm *svm) 956 { 957 unsigned port, size, iopm_len; 958 u16 val, mask; 959 u8 start_bit; 960 u64 gpa; 961 962 if (!(vmcb_is_intercept(&svm->nested.ctl, INTERCEPT_IOIO_PROT))) 963 return NESTED_EXIT_HOST; 964 965 port = svm->vmcb->control.exit_info_1 >> 16; 966 size = (svm->vmcb->control.exit_info_1 & SVM_IOIO_SIZE_MASK) >> 967 SVM_IOIO_SIZE_SHIFT; 968 gpa = svm->nested.ctl.iopm_base_pa + (port / 8); 969 start_bit = port % 8; 970 iopm_len = (start_bit + size > 8) ? 2 : 1; 971 mask = (0xf >> (4 - size)) << start_bit; 972 val = 0; 973 974 if (kvm_vcpu_read_guest(&svm->vcpu, gpa, &val, iopm_len)) 975 return NESTED_EXIT_DONE; 976 977 return (val & mask) ? NESTED_EXIT_DONE : NESTED_EXIT_HOST; 978 } 979 980 static int nested_svm_intercept(struct vcpu_svm *svm) 981 { 982 u32 exit_code = svm->vmcb->control.exit_code; 983 int vmexit = NESTED_EXIT_HOST; 984 985 switch (exit_code) { 986 case SVM_EXIT_MSR: 987 vmexit = nested_svm_exit_handled_msr(svm); 988 break; 989 case SVM_EXIT_IOIO: 990 vmexit = nested_svm_intercept_ioio(svm); 991 break; 992 case SVM_EXIT_READ_CR0 ... SVM_EXIT_WRITE_CR8: { 993 if (vmcb_is_intercept(&svm->nested.ctl, exit_code)) 994 vmexit = NESTED_EXIT_DONE; 995 break; 996 } 997 case SVM_EXIT_READ_DR0 ... SVM_EXIT_WRITE_DR7: { 998 if (vmcb_is_intercept(&svm->nested.ctl, exit_code)) 999 vmexit = NESTED_EXIT_DONE; 1000 break; 1001 } 1002 case SVM_EXIT_EXCP_BASE ... SVM_EXIT_EXCP_BASE + 0x1f: { 1003 /* 1004 * Host-intercepted exceptions have been checked already in 1005 * nested_svm_exit_special. There is nothing to do here, 1006 * the vmexit is injected by svm_check_nested_events. 1007 */ 1008 vmexit = NESTED_EXIT_DONE; 1009 break; 1010 } 1011 case SVM_EXIT_ERR: { 1012 vmexit = NESTED_EXIT_DONE; 1013 break; 1014 } 1015 default: { 1016 if (vmcb_is_intercept(&svm->nested.ctl, exit_code)) 1017 vmexit = NESTED_EXIT_DONE; 1018 } 1019 } 1020 1021 return vmexit; 1022 } 1023 1024 int nested_svm_exit_handled(struct vcpu_svm *svm) 1025 { 1026 int vmexit; 1027 1028 vmexit = nested_svm_intercept(svm); 1029 1030 if (vmexit == NESTED_EXIT_DONE) 1031 nested_svm_vmexit(svm); 1032 1033 return vmexit; 1034 } 1035 1036 int nested_svm_check_permissions(struct kvm_vcpu *vcpu) 1037 { 1038 if (!(vcpu->arch.efer & EFER_SVME) || !is_paging(vcpu)) { 1039 kvm_queue_exception(vcpu, UD_VECTOR); 1040 return 1; 1041 } 1042 1043 if (to_svm(vcpu)->vmcb->save.cpl) { 1044 kvm_inject_gp(vcpu, 0); 1045 return 1; 1046 } 1047 1048 return 0; 1049 } 1050 1051 static bool nested_exit_on_exception(struct vcpu_svm *svm) 1052 { 1053 unsigned int nr = svm->vcpu.arch.exception.nr; 1054 1055 return (svm->nested.ctl.intercepts[INTERCEPT_EXCEPTION] & BIT(nr)); 1056 } 1057 1058 static void nested_svm_inject_exception_vmexit(struct vcpu_svm *svm) 1059 { 1060 unsigned int nr = svm->vcpu.arch.exception.nr; 1061 1062 svm->vmcb->control.exit_code = SVM_EXIT_EXCP_BASE + nr; 1063 svm->vmcb->control.exit_code_hi = 0; 1064 1065 if (svm->vcpu.arch.exception.has_error_code) 1066 svm->vmcb->control.exit_info_1 = svm->vcpu.arch.exception.error_code; 1067 1068 /* 1069 * EXITINFO2 is undefined for all exception intercepts other 1070 * than #PF. 1071 */ 1072 if (nr == PF_VECTOR) { 1073 if (svm->vcpu.arch.exception.nested_apf) 1074 svm->vmcb->control.exit_info_2 = svm->vcpu.arch.apf.nested_apf_token; 1075 else if (svm->vcpu.arch.exception.has_payload) 1076 svm->vmcb->control.exit_info_2 = svm->vcpu.arch.exception.payload; 1077 else 1078 svm->vmcb->control.exit_info_2 = svm->vcpu.arch.cr2; 1079 } else if (nr == DB_VECTOR) { 1080 /* See inject_pending_event. */ 1081 kvm_deliver_exception_payload(&svm->vcpu); 1082 if (svm->vcpu.arch.dr7 & DR7_GD) { 1083 svm->vcpu.arch.dr7 &= ~DR7_GD; 1084 kvm_update_dr7(&svm->vcpu); 1085 } 1086 } else 1087 WARN_ON(svm->vcpu.arch.exception.has_payload); 1088 1089 nested_svm_vmexit(svm); 1090 } 1091 1092 static inline bool nested_exit_on_init(struct vcpu_svm *svm) 1093 { 1094 return vmcb_is_intercept(&svm->nested.ctl, INTERCEPT_INIT); 1095 } 1096 1097 static int svm_check_nested_events(struct kvm_vcpu *vcpu) 1098 { 1099 struct vcpu_svm *svm = to_svm(vcpu); 1100 bool block_nested_events = 1101 kvm_event_needs_reinjection(vcpu) || svm->nested.nested_run_pending; 1102 struct kvm_lapic *apic = vcpu->arch.apic; 1103 1104 if (lapic_in_kernel(vcpu) && 1105 test_bit(KVM_APIC_INIT, &apic->pending_events)) { 1106 if (block_nested_events) 1107 return -EBUSY; 1108 if (!nested_exit_on_init(svm)) 1109 return 0; 1110 nested_svm_simple_vmexit(svm, SVM_EXIT_INIT); 1111 return 0; 1112 } 1113 1114 if (vcpu->arch.exception.pending) { 1115 /* 1116 * Only a pending nested run can block a pending exception. 1117 * Otherwise an injected NMI/interrupt should either be 1118 * lost or delivered to the nested hypervisor in the EXITINTINFO 1119 * vmcb field, while delivering the pending exception. 1120 */ 1121 if (svm->nested.nested_run_pending) 1122 return -EBUSY; 1123 if (!nested_exit_on_exception(svm)) 1124 return 0; 1125 nested_svm_inject_exception_vmexit(svm); 1126 return 0; 1127 } 1128 1129 if (vcpu->arch.smi_pending && !svm_smi_blocked(vcpu)) { 1130 if (block_nested_events) 1131 return -EBUSY; 1132 if (!nested_exit_on_smi(svm)) 1133 return 0; 1134 nested_svm_simple_vmexit(svm, SVM_EXIT_SMI); 1135 return 0; 1136 } 1137 1138 if (vcpu->arch.nmi_pending && !svm_nmi_blocked(vcpu)) { 1139 if (block_nested_events) 1140 return -EBUSY; 1141 if (!nested_exit_on_nmi(svm)) 1142 return 0; 1143 nested_svm_simple_vmexit(svm, SVM_EXIT_NMI); 1144 return 0; 1145 } 1146 1147 if (kvm_cpu_has_interrupt(vcpu) && !svm_interrupt_blocked(vcpu)) { 1148 if (block_nested_events) 1149 return -EBUSY; 1150 if (!nested_exit_on_intr(svm)) 1151 return 0; 1152 trace_kvm_nested_intr_vmexit(svm->vmcb->save.rip); 1153 nested_svm_simple_vmexit(svm, SVM_EXIT_INTR); 1154 return 0; 1155 } 1156 1157 return 0; 1158 } 1159 1160 int nested_svm_exit_special(struct vcpu_svm *svm) 1161 { 1162 u32 exit_code = svm->vmcb->control.exit_code; 1163 1164 switch (exit_code) { 1165 case SVM_EXIT_INTR: 1166 case SVM_EXIT_NMI: 1167 case SVM_EXIT_NPF: 1168 return NESTED_EXIT_HOST; 1169 case SVM_EXIT_EXCP_BASE ... SVM_EXIT_EXCP_BASE + 0x1f: { 1170 u32 excp_bits = 1 << (exit_code - SVM_EXIT_EXCP_BASE); 1171 1172 if (svm->vmcb01.ptr->control.intercepts[INTERCEPT_EXCEPTION] & 1173 excp_bits) 1174 return NESTED_EXIT_HOST; 1175 else if (exit_code == SVM_EXIT_EXCP_BASE + PF_VECTOR && 1176 svm->vcpu.arch.apf.host_apf_flags) 1177 /* Trap async PF even if not shadowing */ 1178 return NESTED_EXIT_HOST; 1179 break; 1180 } 1181 default: 1182 break; 1183 } 1184 1185 return NESTED_EXIT_CONTINUE; 1186 } 1187 1188 static int svm_get_nested_state(struct kvm_vcpu *vcpu, 1189 struct kvm_nested_state __user *user_kvm_nested_state, 1190 u32 user_data_size) 1191 { 1192 struct vcpu_svm *svm; 1193 struct kvm_nested_state kvm_state = { 1194 .flags = 0, 1195 .format = KVM_STATE_NESTED_FORMAT_SVM, 1196 .size = sizeof(kvm_state), 1197 }; 1198 struct vmcb __user *user_vmcb = (struct vmcb __user *) 1199 &user_kvm_nested_state->data.svm[0]; 1200 1201 if (!vcpu) 1202 return kvm_state.size + KVM_STATE_NESTED_SVM_VMCB_SIZE; 1203 1204 svm = to_svm(vcpu); 1205 1206 if (user_data_size < kvm_state.size) 1207 goto out; 1208 1209 /* First fill in the header and copy it out. */ 1210 if (is_guest_mode(vcpu)) { 1211 kvm_state.hdr.svm.vmcb_pa = svm->nested.vmcb12_gpa; 1212 kvm_state.size += KVM_STATE_NESTED_SVM_VMCB_SIZE; 1213 kvm_state.flags |= KVM_STATE_NESTED_GUEST_MODE; 1214 1215 if (svm->nested.nested_run_pending) 1216 kvm_state.flags |= KVM_STATE_NESTED_RUN_PENDING; 1217 } 1218 1219 if (gif_set(svm)) 1220 kvm_state.flags |= KVM_STATE_NESTED_GIF_SET; 1221 1222 if (copy_to_user(user_kvm_nested_state, &kvm_state, sizeof(kvm_state))) 1223 return -EFAULT; 1224 1225 if (!is_guest_mode(vcpu)) 1226 goto out; 1227 1228 /* 1229 * Copy over the full size of the VMCB rather than just the size 1230 * of the structs. 1231 */ 1232 if (clear_user(user_vmcb, KVM_STATE_NESTED_SVM_VMCB_SIZE)) 1233 return -EFAULT; 1234 if (copy_to_user(&user_vmcb->control, &svm->nested.ctl, 1235 sizeof(user_vmcb->control))) 1236 return -EFAULT; 1237 if (copy_to_user(&user_vmcb->save, &svm->vmcb01.ptr->save, 1238 sizeof(user_vmcb->save))) 1239 return -EFAULT; 1240 out: 1241 return kvm_state.size; 1242 } 1243 1244 static int svm_set_nested_state(struct kvm_vcpu *vcpu, 1245 struct kvm_nested_state __user *user_kvm_nested_state, 1246 struct kvm_nested_state *kvm_state) 1247 { 1248 struct vcpu_svm *svm = to_svm(vcpu); 1249 struct vmcb __user *user_vmcb = (struct vmcb __user *) 1250 &user_kvm_nested_state->data.svm[0]; 1251 struct vmcb_control_area *ctl; 1252 struct vmcb_save_area *save; 1253 unsigned long cr0; 1254 int ret; 1255 1256 BUILD_BUG_ON(sizeof(struct vmcb_control_area) + sizeof(struct vmcb_save_area) > 1257 KVM_STATE_NESTED_SVM_VMCB_SIZE); 1258 1259 if (kvm_state->format != KVM_STATE_NESTED_FORMAT_SVM) 1260 return -EINVAL; 1261 1262 if (kvm_state->flags & ~(KVM_STATE_NESTED_GUEST_MODE | 1263 KVM_STATE_NESTED_RUN_PENDING | 1264 KVM_STATE_NESTED_GIF_SET)) 1265 return -EINVAL; 1266 1267 /* 1268 * If in guest mode, vcpu->arch.efer actually refers to the L2 guest's 1269 * EFER.SVME, but EFER.SVME still has to be 1 for VMRUN to succeed. 1270 */ 1271 if (!(vcpu->arch.efer & EFER_SVME)) { 1272 /* GIF=1 and no guest mode are required if SVME=0. */ 1273 if (kvm_state->flags != KVM_STATE_NESTED_GIF_SET) 1274 return -EINVAL; 1275 } 1276 1277 /* SMM temporarily disables SVM, so we cannot be in guest mode. */ 1278 if (is_smm(vcpu) && (kvm_state->flags & KVM_STATE_NESTED_GUEST_MODE)) 1279 return -EINVAL; 1280 1281 if (!(kvm_state->flags & KVM_STATE_NESTED_GUEST_MODE)) { 1282 svm_leave_nested(svm); 1283 svm_set_gif(svm, !!(kvm_state->flags & KVM_STATE_NESTED_GIF_SET)); 1284 return 0; 1285 } 1286 1287 if (!page_address_valid(vcpu, kvm_state->hdr.svm.vmcb_pa)) 1288 return -EINVAL; 1289 if (kvm_state->size < sizeof(*kvm_state) + KVM_STATE_NESTED_SVM_VMCB_SIZE) 1290 return -EINVAL; 1291 1292 ret = -ENOMEM; 1293 ctl = kzalloc(sizeof(*ctl), GFP_KERNEL_ACCOUNT); 1294 save = kzalloc(sizeof(*save), GFP_KERNEL_ACCOUNT); 1295 if (!ctl || !save) 1296 goto out_free; 1297 1298 ret = -EFAULT; 1299 if (copy_from_user(ctl, &user_vmcb->control, sizeof(*ctl))) 1300 goto out_free; 1301 if (copy_from_user(save, &user_vmcb->save, sizeof(*save))) 1302 goto out_free; 1303 1304 ret = -EINVAL; 1305 if (!nested_vmcb_check_controls(vcpu, ctl)) 1306 goto out_free; 1307 1308 /* 1309 * Processor state contains L2 state. Check that it is 1310 * valid for guest mode (see nested_vmcb_check_save). 1311 */ 1312 cr0 = kvm_read_cr0(vcpu); 1313 if (((cr0 & X86_CR0_CD) == 0) && (cr0 & X86_CR0_NW)) 1314 goto out_free; 1315 1316 /* 1317 * Validate host state saved from before VMRUN (see 1318 * nested_svm_check_permissions). 1319 */ 1320 if (!(save->cr0 & X86_CR0_PG) || 1321 !(save->cr0 & X86_CR0_PE) || 1322 (save->rflags & X86_EFLAGS_VM) || 1323 !nested_vmcb_valid_sregs(vcpu, save)) 1324 goto out_free; 1325 1326 /* 1327 * While the nested guest CR3 is already checked and set by 1328 * KVM_SET_SREGS, it was set when nested state was yet loaded, 1329 * thus MMU might not be initialized correctly. 1330 * Set it again to fix this. 1331 */ 1332 1333 ret = nested_svm_load_cr3(&svm->vcpu, vcpu->arch.cr3, 1334 nested_npt_enabled(svm), false); 1335 if (WARN_ON_ONCE(ret)) 1336 goto out_free; 1337 1338 1339 /* 1340 * All checks done, we can enter guest mode. Userspace provides 1341 * vmcb12.control, which will be combined with L1 and stored into 1342 * vmcb02, and the L1 save state which we store in vmcb01. 1343 * L2 registers if needed are moved from the current VMCB to VMCB02. 1344 */ 1345 1346 if (is_guest_mode(vcpu)) 1347 svm_leave_nested(svm); 1348 else 1349 svm->nested.vmcb02.ptr->save = svm->vmcb01.ptr->save; 1350 1351 svm_set_gif(svm, !!(kvm_state->flags & KVM_STATE_NESTED_GIF_SET)); 1352 1353 svm->nested.nested_run_pending = 1354 !!(kvm_state->flags & KVM_STATE_NESTED_RUN_PENDING); 1355 1356 svm->nested.vmcb12_gpa = kvm_state->hdr.svm.vmcb_pa; 1357 1358 svm->vmcb01.ptr->save.es = save->es; 1359 svm->vmcb01.ptr->save.cs = save->cs; 1360 svm->vmcb01.ptr->save.ss = save->ss; 1361 svm->vmcb01.ptr->save.ds = save->ds; 1362 svm->vmcb01.ptr->save.gdtr = save->gdtr; 1363 svm->vmcb01.ptr->save.idtr = save->idtr; 1364 svm->vmcb01.ptr->save.rflags = save->rflags | X86_EFLAGS_FIXED; 1365 svm->vmcb01.ptr->save.efer = save->efer; 1366 svm->vmcb01.ptr->save.cr0 = save->cr0; 1367 svm->vmcb01.ptr->save.cr3 = save->cr3; 1368 svm->vmcb01.ptr->save.cr4 = save->cr4; 1369 svm->vmcb01.ptr->save.rax = save->rax; 1370 svm->vmcb01.ptr->save.rsp = save->rsp; 1371 svm->vmcb01.ptr->save.rip = save->rip; 1372 svm->vmcb01.ptr->save.cpl = 0; 1373 1374 nested_load_control_from_vmcb12(svm, ctl); 1375 1376 svm_switch_vmcb(svm, &svm->nested.vmcb02); 1377 1378 nested_vmcb02_prepare_control(svm); 1379 1380 kvm_make_request(KVM_REQ_GET_NESTED_STATE_PAGES, vcpu); 1381 ret = 0; 1382 out_free: 1383 kfree(save); 1384 kfree(ctl); 1385 1386 return ret; 1387 } 1388 1389 static bool svm_get_nested_state_pages(struct kvm_vcpu *vcpu) 1390 { 1391 struct vcpu_svm *svm = to_svm(vcpu); 1392 1393 if (WARN_ON(!is_guest_mode(vcpu))) 1394 return true; 1395 1396 if (!vcpu->arch.pdptrs_from_userspace && 1397 !nested_npt_enabled(svm) && is_pae_paging(vcpu)) 1398 /* 1399 * Reload the guest's PDPTRs since after a migration 1400 * the guest CR3 might be restored prior to setting the nested 1401 * state which can lead to a load of wrong PDPTRs. 1402 */ 1403 if (CC(!load_pdptrs(vcpu, vcpu->arch.walk_mmu, vcpu->arch.cr3))) 1404 return false; 1405 1406 if (!nested_svm_vmrun_msrpm(svm)) { 1407 vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR; 1408 vcpu->run->internal.suberror = 1409 KVM_INTERNAL_ERROR_EMULATION; 1410 vcpu->run->internal.ndata = 0; 1411 return false; 1412 } 1413 1414 return true; 1415 } 1416 1417 struct kvm_x86_nested_ops svm_nested_ops = { 1418 .check_events = svm_check_nested_events, 1419 .triple_fault = nested_svm_triple_fault, 1420 .get_nested_state_pages = svm_get_nested_state_pages, 1421 .get_state = svm_get_nested_state, 1422 .set_state = svm_set_nested_state, 1423 }; 1424