1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Kernel-based Virtual Machine driver for Linux 4 * 5 * AMD SVM support 6 * 7 * Copyright (C) 2006 Qumranet, Inc. 8 * Copyright 2010 Red Hat, Inc. and/or its affiliates. 9 * 10 * Authors: 11 * Yaniv Kamay <yaniv@qumranet.com> 12 * Avi Kivity <avi@qumranet.com> 13 */ 14 15 #define pr_fmt(fmt) "SVM: " fmt 16 17 #include <linux/kvm_types.h> 18 #include <linux/kvm_host.h> 19 #include <linux/kernel.h> 20 21 #include <asm/msr-index.h> 22 #include <asm/debugreg.h> 23 24 #include "kvm_emulate.h" 25 #include "trace.h" 26 #include "mmu.h" 27 #include "x86.h" 28 #include "cpuid.h" 29 #include "lapic.h" 30 #include "svm.h" 31 32 static void nested_svm_inject_npf_exit(struct kvm_vcpu *vcpu, 33 struct x86_exception *fault) 34 { 35 struct vcpu_svm *svm = to_svm(vcpu); 36 37 if (svm->vmcb->control.exit_code != SVM_EXIT_NPF) { 38 /* 39 * TODO: track the cause of the nested page fault, and 40 * correctly fill in the high bits of exit_info_1. 41 */ 42 svm->vmcb->control.exit_code = SVM_EXIT_NPF; 43 svm->vmcb->control.exit_code_hi = 0; 44 svm->vmcb->control.exit_info_1 = (1ULL << 32); 45 svm->vmcb->control.exit_info_2 = fault->address; 46 } 47 48 svm->vmcb->control.exit_info_1 &= ~0xffffffffULL; 49 svm->vmcb->control.exit_info_1 |= fault->error_code; 50 51 nested_svm_vmexit(svm); 52 } 53 54 static u64 nested_svm_get_tdp_pdptr(struct kvm_vcpu *vcpu, int index) 55 { 56 struct vcpu_svm *svm = to_svm(vcpu); 57 u64 cr3 = svm->nested.ctl.nested_cr3; 58 u64 pdpte; 59 int ret; 60 61 ret = kvm_vcpu_read_guest_page(vcpu, gpa_to_gfn(__sme_clr(cr3)), &pdpte, 62 offset_in_page(cr3) + index * 8, 8); 63 if (ret) 64 return 0; 65 return pdpte; 66 } 67 68 static unsigned long nested_svm_get_tdp_cr3(struct kvm_vcpu *vcpu) 69 { 70 struct vcpu_svm *svm = to_svm(vcpu); 71 72 return svm->nested.ctl.nested_cr3; 73 } 74 75 static void nested_svm_init_mmu_context(struct kvm_vcpu *vcpu) 76 { 77 struct vcpu_svm *svm = to_svm(vcpu); 78 struct vmcb *hsave = svm->nested.hsave; 79 80 WARN_ON(mmu_is_nested(vcpu)); 81 82 vcpu->arch.mmu = &vcpu->arch.guest_mmu; 83 kvm_init_shadow_npt_mmu(vcpu, X86_CR0_PG, hsave->save.cr4, hsave->save.efer, 84 svm->nested.ctl.nested_cr3); 85 vcpu->arch.mmu->get_guest_pgd = nested_svm_get_tdp_cr3; 86 vcpu->arch.mmu->get_pdptr = nested_svm_get_tdp_pdptr; 87 vcpu->arch.mmu->inject_page_fault = nested_svm_inject_npf_exit; 88 reset_shadow_zero_bits_mask(vcpu, vcpu->arch.mmu); 89 vcpu->arch.walk_mmu = &vcpu->arch.nested_mmu; 90 } 91 92 static void nested_svm_uninit_mmu_context(struct kvm_vcpu *vcpu) 93 { 94 vcpu->arch.mmu = &vcpu->arch.root_mmu; 95 vcpu->arch.walk_mmu = &vcpu->arch.root_mmu; 96 } 97 98 void recalc_intercepts(struct vcpu_svm *svm) 99 { 100 struct vmcb_control_area *c, *h, *g; 101 102 vmcb_mark_dirty(svm->vmcb, VMCB_INTERCEPTS); 103 104 if (!is_guest_mode(&svm->vcpu)) 105 return; 106 107 c = &svm->vmcb->control; 108 h = &svm->nested.hsave->control; 109 g = &svm->nested.ctl; 110 111 svm->nested.host_intercept_exceptions = h->intercept_exceptions; 112 113 c->intercept_cr = h->intercept_cr; 114 c->intercept_dr = h->intercept_dr; 115 c->intercept_exceptions = h->intercept_exceptions; 116 c->intercept = h->intercept; 117 118 if (g->int_ctl & V_INTR_MASKING_MASK) { 119 /* We only want the cr8 intercept bits of L1 */ 120 c->intercept_cr &= ~(1U << INTERCEPT_CR8_READ); 121 c->intercept_cr &= ~(1U << INTERCEPT_CR8_WRITE); 122 123 /* 124 * Once running L2 with HF_VINTR_MASK, EFLAGS.IF does not 125 * affect any interrupt we may want to inject; therefore, 126 * interrupt window vmexits are irrelevant to L0. 127 */ 128 c->intercept &= ~(1ULL << INTERCEPT_VINTR); 129 } 130 131 /* We don't want to see VMMCALLs from a nested guest */ 132 c->intercept &= ~(1ULL << INTERCEPT_VMMCALL); 133 134 c->intercept_cr |= g->intercept_cr; 135 c->intercept_dr |= g->intercept_dr; 136 c->intercept_exceptions |= g->intercept_exceptions; 137 c->intercept |= g->intercept; 138 } 139 140 static void copy_vmcb_control_area(struct vmcb_control_area *dst, 141 struct vmcb_control_area *from) 142 { 143 dst->intercept_cr = from->intercept_cr; 144 dst->intercept_dr = from->intercept_dr; 145 dst->intercept_exceptions = from->intercept_exceptions; 146 dst->intercept = from->intercept; 147 dst->iopm_base_pa = from->iopm_base_pa; 148 dst->msrpm_base_pa = from->msrpm_base_pa; 149 dst->tsc_offset = from->tsc_offset; 150 /* asid not copied, it is handled manually for svm->vmcb. */ 151 dst->tlb_ctl = from->tlb_ctl; 152 dst->int_ctl = from->int_ctl; 153 dst->int_vector = from->int_vector; 154 dst->int_state = from->int_state; 155 dst->exit_code = from->exit_code; 156 dst->exit_code_hi = from->exit_code_hi; 157 dst->exit_info_1 = from->exit_info_1; 158 dst->exit_info_2 = from->exit_info_2; 159 dst->exit_int_info = from->exit_int_info; 160 dst->exit_int_info_err = from->exit_int_info_err; 161 dst->nested_ctl = from->nested_ctl; 162 dst->event_inj = from->event_inj; 163 dst->event_inj_err = from->event_inj_err; 164 dst->nested_cr3 = from->nested_cr3; 165 dst->virt_ext = from->virt_ext; 166 dst->pause_filter_count = from->pause_filter_count; 167 dst->pause_filter_thresh = from->pause_filter_thresh; 168 } 169 170 static bool nested_svm_vmrun_msrpm(struct vcpu_svm *svm) 171 { 172 /* 173 * This function merges the msr permission bitmaps of kvm and the 174 * nested vmcb. It is optimized in that it only merges the parts where 175 * the kvm msr permission bitmap may contain zero bits 176 */ 177 int i; 178 179 if (!(svm->nested.ctl.intercept & (1ULL << INTERCEPT_MSR_PROT))) 180 return true; 181 182 for (i = 0; i < MSRPM_OFFSETS; i++) { 183 u32 value, p; 184 u64 offset; 185 186 if (msrpm_offsets[i] == 0xffffffff) 187 break; 188 189 p = msrpm_offsets[i]; 190 offset = svm->nested.ctl.msrpm_base_pa + (p * 4); 191 192 if (kvm_vcpu_read_guest(&svm->vcpu, offset, &value, 4)) 193 return false; 194 195 svm->nested.msrpm[p] = svm->msrpm[p] | value; 196 } 197 198 svm->vmcb->control.msrpm_base_pa = __sme_set(__pa(svm->nested.msrpm)); 199 200 return true; 201 } 202 203 static bool nested_vmcb_check_controls(struct vmcb_control_area *control) 204 { 205 if ((control->intercept & (1ULL << INTERCEPT_VMRUN)) == 0) 206 return false; 207 208 if (control->asid == 0) 209 return false; 210 211 if ((control->nested_ctl & SVM_NESTED_CTL_NP_ENABLE) && 212 !npt_enabled) 213 return false; 214 215 return true; 216 } 217 218 static bool nested_vmcb_checks(struct vcpu_svm *svm, struct vmcb *vmcb) 219 { 220 bool nested_vmcb_lma; 221 if ((vmcb->save.efer & EFER_SVME) == 0) 222 return false; 223 224 if (((vmcb->save.cr0 & X86_CR0_CD) == 0) && 225 (vmcb->save.cr0 & X86_CR0_NW)) 226 return false; 227 228 if (!kvm_dr6_valid(vmcb->save.dr6) || !kvm_dr7_valid(vmcb->save.dr7)) 229 return false; 230 231 nested_vmcb_lma = 232 (vmcb->save.efer & EFER_LME) && 233 (vmcb->save.cr0 & X86_CR0_PG); 234 235 if (!nested_vmcb_lma) { 236 if (vmcb->save.cr4 & X86_CR4_PAE) { 237 if (vmcb->save.cr3 & MSR_CR3_LEGACY_PAE_RESERVED_MASK) 238 return false; 239 } else { 240 if (vmcb->save.cr3 & MSR_CR3_LEGACY_RESERVED_MASK) 241 return false; 242 } 243 } else { 244 if (!(vmcb->save.cr4 & X86_CR4_PAE) || 245 !(vmcb->save.cr0 & X86_CR0_PE) || 246 (vmcb->save.cr3 & MSR_CR3_LONG_RESERVED_MASK)) 247 return false; 248 } 249 if (kvm_valid_cr4(&svm->vcpu, vmcb->save.cr4)) 250 return false; 251 252 return nested_vmcb_check_controls(&vmcb->control); 253 } 254 255 static void load_nested_vmcb_control(struct vcpu_svm *svm, 256 struct vmcb_control_area *control) 257 { 258 copy_vmcb_control_area(&svm->nested.ctl, control); 259 260 /* Copy it here because nested_svm_check_controls will check it. */ 261 svm->nested.ctl.asid = control->asid; 262 svm->nested.ctl.msrpm_base_pa &= ~0x0fffULL; 263 svm->nested.ctl.iopm_base_pa &= ~0x0fffULL; 264 } 265 266 /* 267 * Synchronize fields that are written by the processor, so that 268 * they can be copied back into the nested_vmcb. 269 */ 270 void sync_nested_vmcb_control(struct vcpu_svm *svm) 271 { 272 u32 mask; 273 svm->nested.ctl.event_inj = svm->vmcb->control.event_inj; 274 svm->nested.ctl.event_inj_err = svm->vmcb->control.event_inj_err; 275 276 /* Only a few fields of int_ctl are written by the processor. */ 277 mask = V_IRQ_MASK | V_TPR_MASK; 278 if (!(svm->nested.ctl.int_ctl & V_INTR_MASKING_MASK) && 279 svm_is_intercept(svm, INTERCEPT_VINTR)) { 280 /* 281 * In order to request an interrupt window, L0 is usurping 282 * svm->vmcb->control.int_ctl and possibly setting V_IRQ 283 * even if it was clear in L1's VMCB. Restoring it would be 284 * wrong. However, in this case V_IRQ will remain true until 285 * interrupt_window_interception calls svm_clear_vintr and 286 * restores int_ctl. We can just leave it aside. 287 */ 288 mask &= ~V_IRQ_MASK; 289 } 290 svm->nested.ctl.int_ctl &= ~mask; 291 svm->nested.ctl.int_ctl |= svm->vmcb->control.int_ctl & mask; 292 } 293 294 /* 295 * Transfer any event that L0 or L1 wanted to inject into L2 to 296 * EXIT_INT_INFO. 297 */ 298 static void nested_vmcb_save_pending_event(struct vcpu_svm *svm, 299 struct vmcb *nested_vmcb) 300 { 301 struct kvm_vcpu *vcpu = &svm->vcpu; 302 u32 exit_int_info = 0; 303 unsigned int nr; 304 305 if (vcpu->arch.exception.injected) { 306 nr = vcpu->arch.exception.nr; 307 exit_int_info = nr | SVM_EVTINJ_VALID | SVM_EVTINJ_TYPE_EXEPT; 308 309 if (vcpu->arch.exception.has_error_code) { 310 exit_int_info |= SVM_EVTINJ_VALID_ERR; 311 nested_vmcb->control.exit_int_info_err = 312 vcpu->arch.exception.error_code; 313 } 314 315 } else if (vcpu->arch.nmi_injected) { 316 exit_int_info = SVM_EVTINJ_VALID | SVM_EVTINJ_TYPE_NMI; 317 318 } else if (vcpu->arch.interrupt.injected) { 319 nr = vcpu->arch.interrupt.nr; 320 exit_int_info = nr | SVM_EVTINJ_VALID; 321 322 if (vcpu->arch.interrupt.soft) 323 exit_int_info |= SVM_EVTINJ_TYPE_SOFT; 324 else 325 exit_int_info |= SVM_EVTINJ_TYPE_INTR; 326 } 327 328 nested_vmcb->control.exit_int_info = exit_int_info; 329 } 330 331 static inline bool nested_npt_enabled(struct vcpu_svm *svm) 332 { 333 return svm->nested.ctl.nested_ctl & SVM_NESTED_CTL_NP_ENABLE; 334 } 335 336 /* 337 * Load guest's/host's cr3 on nested vmentry or vmexit. @nested_npt is true 338 * if we are emulating VM-Entry into a guest with NPT enabled. 339 */ 340 static int nested_svm_load_cr3(struct kvm_vcpu *vcpu, unsigned long cr3, 341 bool nested_npt) 342 { 343 if (cr3 & rsvd_bits(cpuid_maxphyaddr(vcpu), 63)) 344 return -EINVAL; 345 346 if (!nested_npt && is_pae_paging(vcpu) && 347 (cr3 != kvm_read_cr3(vcpu) || pdptrs_changed(vcpu))) { 348 if (!load_pdptrs(vcpu, vcpu->arch.walk_mmu, cr3)) 349 return -EINVAL; 350 } 351 352 /* 353 * TODO: optimize unconditional TLB flush/MMU sync here and in 354 * kvm_init_shadow_npt_mmu(). 355 */ 356 if (!nested_npt) 357 kvm_mmu_new_pgd(vcpu, cr3, false, false); 358 359 vcpu->arch.cr3 = cr3; 360 kvm_register_mark_available(vcpu, VCPU_EXREG_CR3); 361 362 kvm_init_mmu(vcpu, false); 363 364 return 0; 365 } 366 367 static void nested_prepare_vmcb_save(struct vcpu_svm *svm, struct vmcb *nested_vmcb) 368 { 369 /* Load the nested guest state */ 370 svm->vmcb->save.es = nested_vmcb->save.es; 371 svm->vmcb->save.cs = nested_vmcb->save.cs; 372 svm->vmcb->save.ss = nested_vmcb->save.ss; 373 svm->vmcb->save.ds = nested_vmcb->save.ds; 374 svm->vmcb->save.gdtr = nested_vmcb->save.gdtr; 375 svm->vmcb->save.idtr = nested_vmcb->save.idtr; 376 kvm_set_rflags(&svm->vcpu, nested_vmcb->save.rflags); 377 svm_set_efer(&svm->vcpu, nested_vmcb->save.efer); 378 svm_set_cr0(&svm->vcpu, nested_vmcb->save.cr0); 379 svm_set_cr4(&svm->vcpu, nested_vmcb->save.cr4); 380 svm->vmcb->save.cr2 = svm->vcpu.arch.cr2 = nested_vmcb->save.cr2; 381 kvm_rax_write(&svm->vcpu, nested_vmcb->save.rax); 382 kvm_rsp_write(&svm->vcpu, nested_vmcb->save.rsp); 383 kvm_rip_write(&svm->vcpu, nested_vmcb->save.rip); 384 385 /* In case we don't even reach vcpu_run, the fields are not updated */ 386 svm->vmcb->save.rax = nested_vmcb->save.rax; 387 svm->vmcb->save.rsp = nested_vmcb->save.rsp; 388 svm->vmcb->save.rip = nested_vmcb->save.rip; 389 svm->vmcb->save.dr7 = nested_vmcb->save.dr7; 390 svm->vcpu.arch.dr6 = nested_vmcb->save.dr6; 391 svm->vmcb->save.cpl = nested_vmcb->save.cpl; 392 } 393 394 static void nested_prepare_vmcb_control(struct vcpu_svm *svm) 395 { 396 const u32 mask = V_INTR_MASKING_MASK | V_GIF_ENABLE_MASK | V_GIF_MASK; 397 398 if (nested_npt_enabled(svm)) 399 nested_svm_init_mmu_context(&svm->vcpu); 400 401 svm->vmcb->control.tsc_offset = svm->vcpu.arch.tsc_offset = 402 svm->vcpu.arch.l1_tsc_offset + svm->nested.ctl.tsc_offset; 403 404 svm->vmcb->control.int_ctl = 405 (svm->nested.ctl.int_ctl & ~mask) | 406 (svm->nested.hsave->control.int_ctl & mask); 407 408 svm->vmcb->control.virt_ext = svm->nested.ctl.virt_ext; 409 svm->vmcb->control.int_vector = svm->nested.ctl.int_vector; 410 svm->vmcb->control.int_state = svm->nested.ctl.int_state; 411 svm->vmcb->control.event_inj = svm->nested.ctl.event_inj; 412 svm->vmcb->control.event_inj_err = svm->nested.ctl.event_inj_err; 413 414 svm->vmcb->control.pause_filter_count = svm->nested.ctl.pause_filter_count; 415 svm->vmcb->control.pause_filter_thresh = svm->nested.ctl.pause_filter_thresh; 416 417 /* Enter Guest-Mode */ 418 enter_guest_mode(&svm->vcpu); 419 420 /* 421 * Merge guest and host intercepts - must be called with vcpu in 422 * guest-mode to take affect here 423 */ 424 recalc_intercepts(svm); 425 426 vmcb_mark_all_dirty(svm->vmcb); 427 } 428 429 int enter_svm_guest_mode(struct vcpu_svm *svm, u64 vmcb_gpa, 430 struct vmcb *nested_vmcb) 431 { 432 int ret; 433 434 svm->nested.vmcb = vmcb_gpa; 435 load_nested_vmcb_control(svm, &nested_vmcb->control); 436 nested_prepare_vmcb_save(svm, nested_vmcb); 437 nested_prepare_vmcb_control(svm); 438 439 ret = nested_svm_load_cr3(&svm->vcpu, nested_vmcb->save.cr3, 440 nested_npt_enabled(svm)); 441 if (ret) 442 return ret; 443 444 svm_set_gif(svm, true); 445 446 return 0; 447 } 448 449 int nested_svm_vmrun(struct vcpu_svm *svm) 450 { 451 int ret; 452 struct vmcb *nested_vmcb; 453 struct vmcb *hsave = svm->nested.hsave; 454 struct vmcb *vmcb = svm->vmcb; 455 struct kvm_host_map map; 456 u64 vmcb_gpa; 457 458 if (is_smm(&svm->vcpu)) { 459 kvm_queue_exception(&svm->vcpu, UD_VECTOR); 460 return 1; 461 } 462 463 vmcb_gpa = svm->vmcb->save.rax; 464 ret = kvm_vcpu_map(&svm->vcpu, gpa_to_gfn(vmcb_gpa), &map); 465 if (ret == -EINVAL) { 466 kvm_inject_gp(&svm->vcpu, 0); 467 return 1; 468 } else if (ret) { 469 return kvm_skip_emulated_instruction(&svm->vcpu); 470 } 471 472 ret = kvm_skip_emulated_instruction(&svm->vcpu); 473 474 nested_vmcb = map.hva; 475 476 if (!nested_vmcb_checks(svm, nested_vmcb)) { 477 nested_vmcb->control.exit_code = SVM_EXIT_ERR; 478 nested_vmcb->control.exit_code_hi = 0; 479 nested_vmcb->control.exit_info_1 = 0; 480 nested_vmcb->control.exit_info_2 = 0; 481 goto out; 482 } 483 484 trace_kvm_nested_vmrun(svm->vmcb->save.rip, vmcb_gpa, 485 nested_vmcb->save.rip, 486 nested_vmcb->control.int_ctl, 487 nested_vmcb->control.event_inj, 488 nested_vmcb->control.nested_ctl); 489 490 trace_kvm_nested_intercepts(nested_vmcb->control.intercept_cr & 0xffff, 491 nested_vmcb->control.intercept_cr >> 16, 492 nested_vmcb->control.intercept_exceptions, 493 nested_vmcb->control.intercept); 494 495 /* Clear internal status */ 496 kvm_clear_exception_queue(&svm->vcpu); 497 kvm_clear_interrupt_queue(&svm->vcpu); 498 499 /* 500 * Save the old vmcb, so we don't need to pick what we save, but can 501 * restore everything when a VMEXIT occurs 502 */ 503 hsave->save.es = vmcb->save.es; 504 hsave->save.cs = vmcb->save.cs; 505 hsave->save.ss = vmcb->save.ss; 506 hsave->save.ds = vmcb->save.ds; 507 hsave->save.gdtr = vmcb->save.gdtr; 508 hsave->save.idtr = vmcb->save.idtr; 509 hsave->save.efer = svm->vcpu.arch.efer; 510 hsave->save.cr0 = kvm_read_cr0(&svm->vcpu); 511 hsave->save.cr4 = svm->vcpu.arch.cr4; 512 hsave->save.rflags = kvm_get_rflags(&svm->vcpu); 513 hsave->save.rip = kvm_rip_read(&svm->vcpu); 514 hsave->save.rsp = vmcb->save.rsp; 515 hsave->save.rax = vmcb->save.rax; 516 if (npt_enabled) 517 hsave->save.cr3 = vmcb->save.cr3; 518 else 519 hsave->save.cr3 = kvm_read_cr3(&svm->vcpu); 520 521 copy_vmcb_control_area(&hsave->control, &vmcb->control); 522 523 svm->nested.nested_run_pending = 1; 524 525 if (enter_svm_guest_mode(svm, vmcb_gpa, nested_vmcb)) 526 goto out_exit_err; 527 528 if (nested_svm_vmrun_msrpm(svm)) 529 goto out; 530 531 out_exit_err: 532 svm->nested.nested_run_pending = 0; 533 534 svm->vmcb->control.exit_code = SVM_EXIT_ERR; 535 svm->vmcb->control.exit_code_hi = 0; 536 svm->vmcb->control.exit_info_1 = 0; 537 svm->vmcb->control.exit_info_2 = 0; 538 539 nested_svm_vmexit(svm); 540 541 out: 542 kvm_vcpu_unmap(&svm->vcpu, &map, true); 543 544 return ret; 545 } 546 547 void nested_svm_vmloadsave(struct vmcb *from_vmcb, struct vmcb *to_vmcb) 548 { 549 to_vmcb->save.fs = from_vmcb->save.fs; 550 to_vmcb->save.gs = from_vmcb->save.gs; 551 to_vmcb->save.tr = from_vmcb->save.tr; 552 to_vmcb->save.ldtr = from_vmcb->save.ldtr; 553 to_vmcb->save.kernel_gs_base = from_vmcb->save.kernel_gs_base; 554 to_vmcb->save.star = from_vmcb->save.star; 555 to_vmcb->save.lstar = from_vmcb->save.lstar; 556 to_vmcb->save.cstar = from_vmcb->save.cstar; 557 to_vmcb->save.sfmask = from_vmcb->save.sfmask; 558 to_vmcb->save.sysenter_cs = from_vmcb->save.sysenter_cs; 559 to_vmcb->save.sysenter_esp = from_vmcb->save.sysenter_esp; 560 to_vmcb->save.sysenter_eip = from_vmcb->save.sysenter_eip; 561 } 562 563 int nested_svm_vmexit(struct vcpu_svm *svm) 564 { 565 int rc; 566 struct vmcb *nested_vmcb; 567 struct vmcb *hsave = svm->nested.hsave; 568 struct vmcb *vmcb = svm->vmcb; 569 struct kvm_host_map map; 570 571 rc = kvm_vcpu_map(&svm->vcpu, gpa_to_gfn(svm->nested.vmcb), &map); 572 if (rc) { 573 if (rc == -EINVAL) 574 kvm_inject_gp(&svm->vcpu, 0); 575 return 1; 576 } 577 578 nested_vmcb = map.hva; 579 580 /* Exit Guest-Mode */ 581 leave_guest_mode(&svm->vcpu); 582 svm->nested.vmcb = 0; 583 WARN_ON_ONCE(svm->nested.nested_run_pending); 584 585 /* in case we halted in L2 */ 586 svm->vcpu.arch.mp_state = KVM_MP_STATE_RUNNABLE; 587 588 /* Give the current vmcb to the guest */ 589 590 nested_vmcb->save.es = vmcb->save.es; 591 nested_vmcb->save.cs = vmcb->save.cs; 592 nested_vmcb->save.ss = vmcb->save.ss; 593 nested_vmcb->save.ds = vmcb->save.ds; 594 nested_vmcb->save.gdtr = vmcb->save.gdtr; 595 nested_vmcb->save.idtr = vmcb->save.idtr; 596 nested_vmcb->save.efer = svm->vcpu.arch.efer; 597 nested_vmcb->save.cr0 = kvm_read_cr0(&svm->vcpu); 598 nested_vmcb->save.cr3 = kvm_read_cr3(&svm->vcpu); 599 nested_vmcb->save.cr2 = vmcb->save.cr2; 600 nested_vmcb->save.cr4 = svm->vcpu.arch.cr4; 601 nested_vmcb->save.rflags = kvm_get_rflags(&svm->vcpu); 602 nested_vmcb->save.rip = kvm_rip_read(&svm->vcpu); 603 nested_vmcb->save.rsp = kvm_rsp_read(&svm->vcpu); 604 nested_vmcb->save.rax = kvm_rax_read(&svm->vcpu); 605 nested_vmcb->save.dr7 = vmcb->save.dr7; 606 nested_vmcb->save.dr6 = svm->vcpu.arch.dr6; 607 nested_vmcb->save.cpl = vmcb->save.cpl; 608 609 nested_vmcb->control.int_state = vmcb->control.int_state; 610 nested_vmcb->control.exit_code = vmcb->control.exit_code; 611 nested_vmcb->control.exit_code_hi = vmcb->control.exit_code_hi; 612 nested_vmcb->control.exit_info_1 = vmcb->control.exit_info_1; 613 nested_vmcb->control.exit_info_2 = vmcb->control.exit_info_2; 614 615 if (nested_vmcb->control.exit_code != SVM_EXIT_ERR) 616 nested_vmcb_save_pending_event(svm, nested_vmcb); 617 618 if (svm->nrips_enabled) 619 nested_vmcb->control.next_rip = vmcb->control.next_rip; 620 621 nested_vmcb->control.int_ctl = svm->nested.ctl.int_ctl; 622 nested_vmcb->control.tlb_ctl = svm->nested.ctl.tlb_ctl; 623 nested_vmcb->control.event_inj = svm->nested.ctl.event_inj; 624 nested_vmcb->control.event_inj_err = svm->nested.ctl.event_inj_err; 625 626 nested_vmcb->control.pause_filter_count = 627 svm->vmcb->control.pause_filter_count; 628 nested_vmcb->control.pause_filter_thresh = 629 svm->vmcb->control.pause_filter_thresh; 630 631 /* Restore the original control entries */ 632 copy_vmcb_control_area(&vmcb->control, &hsave->control); 633 634 /* On vmexit the GIF is set to false */ 635 svm_set_gif(svm, false); 636 637 svm->vmcb->control.tsc_offset = svm->vcpu.arch.tsc_offset = 638 svm->vcpu.arch.l1_tsc_offset; 639 640 svm->nested.ctl.nested_cr3 = 0; 641 642 /* Restore selected save entries */ 643 svm->vmcb->save.es = hsave->save.es; 644 svm->vmcb->save.cs = hsave->save.cs; 645 svm->vmcb->save.ss = hsave->save.ss; 646 svm->vmcb->save.ds = hsave->save.ds; 647 svm->vmcb->save.gdtr = hsave->save.gdtr; 648 svm->vmcb->save.idtr = hsave->save.idtr; 649 kvm_set_rflags(&svm->vcpu, hsave->save.rflags); 650 svm_set_efer(&svm->vcpu, hsave->save.efer); 651 svm_set_cr0(&svm->vcpu, hsave->save.cr0 | X86_CR0_PE); 652 svm_set_cr4(&svm->vcpu, hsave->save.cr4); 653 kvm_rax_write(&svm->vcpu, hsave->save.rax); 654 kvm_rsp_write(&svm->vcpu, hsave->save.rsp); 655 kvm_rip_write(&svm->vcpu, hsave->save.rip); 656 svm->vmcb->save.dr7 = 0; 657 svm->vmcb->save.cpl = 0; 658 svm->vmcb->control.exit_int_info = 0; 659 660 vmcb_mark_all_dirty(svm->vmcb); 661 662 trace_kvm_nested_vmexit_inject(nested_vmcb->control.exit_code, 663 nested_vmcb->control.exit_info_1, 664 nested_vmcb->control.exit_info_2, 665 nested_vmcb->control.exit_int_info, 666 nested_vmcb->control.exit_int_info_err, 667 KVM_ISA_SVM); 668 669 kvm_vcpu_unmap(&svm->vcpu, &map, true); 670 671 nested_svm_uninit_mmu_context(&svm->vcpu); 672 673 rc = nested_svm_load_cr3(&svm->vcpu, hsave->save.cr3, false); 674 if (rc) 675 return 1; 676 677 if (npt_enabled) 678 svm->vmcb->save.cr3 = hsave->save.cr3; 679 680 /* 681 * Drop what we picked up for L2 via svm_complete_interrupts() so it 682 * doesn't end up in L1. 683 */ 684 svm->vcpu.arch.nmi_injected = false; 685 kvm_clear_exception_queue(&svm->vcpu); 686 kvm_clear_interrupt_queue(&svm->vcpu); 687 688 return 0; 689 } 690 691 /* 692 * Forcibly leave nested mode in order to be able to reset the VCPU later on. 693 */ 694 void svm_leave_nested(struct vcpu_svm *svm) 695 { 696 if (is_guest_mode(&svm->vcpu)) { 697 struct vmcb *hsave = svm->nested.hsave; 698 struct vmcb *vmcb = svm->vmcb; 699 700 svm->nested.nested_run_pending = 0; 701 leave_guest_mode(&svm->vcpu); 702 copy_vmcb_control_area(&vmcb->control, &hsave->control); 703 nested_svm_uninit_mmu_context(&svm->vcpu); 704 } 705 } 706 707 static int nested_svm_exit_handled_msr(struct vcpu_svm *svm) 708 { 709 u32 offset, msr, value; 710 int write, mask; 711 712 if (!(svm->nested.ctl.intercept & (1ULL << INTERCEPT_MSR_PROT))) 713 return NESTED_EXIT_HOST; 714 715 msr = svm->vcpu.arch.regs[VCPU_REGS_RCX]; 716 offset = svm_msrpm_offset(msr); 717 write = svm->vmcb->control.exit_info_1 & 1; 718 mask = 1 << ((2 * (msr & 0xf)) + write); 719 720 if (offset == MSR_INVALID) 721 return NESTED_EXIT_DONE; 722 723 /* Offset is in 32 bit units but need in 8 bit units */ 724 offset *= 4; 725 726 if (kvm_vcpu_read_guest(&svm->vcpu, svm->nested.ctl.msrpm_base_pa + offset, &value, 4)) 727 return NESTED_EXIT_DONE; 728 729 return (value & mask) ? NESTED_EXIT_DONE : NESTED_EXIT_HOST; 730 } 731 732 static int nested_svm_intercept_ioio(struct vcpu_svm *svm) 733 { 734 unsigned port, size, iopm_len; 735 u16 val, mask; 736 u8 start_bit; 737 u64 gpa; 738 739 if (!(svm->nested.ctl.intercept & (1ULL << INTERCEPT_IOIO_PROT))) 740 return NESTED_EXIT_HOST; 741 742 port = svm->vmcb->control.exit_info_1 >> 16; 743 size = (svm->vmcb->control.exit_info_1 & SVM_IOIO_SIZE_MASK) >> 744 SVM_IOIO_SIZE_SHIFT; 745 gpa = svm->nested.ctl.iopm_base_pa + (port / 8); 746 start_bit = port % 8; 747 iopm_len = (start_bit + size > 8) ? 2 : 1; 748 mask = (0xf >> (4 - size)) << start_bit; 749 val = 0; 750 751 if (kvm_vcpu_read_guest(&svm->vcpu, gpa, &val, iopm_len)) 752 return NESTED_EXIT_DONE; 753 754 return (val & mask) ? NESTED_EXIT_DONE : NESTED_EXIT_HOST; 755 } 756 757 static int nested_svm_intercept(struct vcpu_svm *svm) 758 { 759 u32 exit_code = svm->vmcb->control.exit_code; 760 int vmexit = NESTED_EXIT_HOST; 761 762 switch (exit_code) { 763 case SVM_EXIT_MSR: 764 vmexit = nested_svm_exit_handled_msr(svm); 765 break; 766 case SVM_EXIT_IOIO: 767 vmexit = nested_svm_intercept_ioio(svm); 768 break; 769 case SVM_EXIT_READ_CR0 ... SVM_EXIT_WRITE_CR8: { 770 u32 bit = 1U << (exit_code - SVM_EXIT_READ_CR0); 771 if (svm->nested.ctl.intercept_cr & bit) 772 vmexit = NESTED_EXIT_DONE; 773 break; 774 } 775 case SVM_EXIT_READ_DR0 ... SVM_EXIT_WRITE_DR7: { 776 u32 bit = 1U << (exit_code - SVM_EXIT_READ_DR0); 777 if (svm->nested.ctl.intercept_dr & bit) 778 vmexit = NESTED_EXIT_DONE; 779 break; 780 } 781 case SVM_EXIT_EXCP_BASE ... SVM_EXIT_EXCP_BASE + 0x1f: { 782 /* 783 * Host-intercepted exceptions have been checked already in 784 * nested_svm_exit_special. There is nothing to do here, 785 * the vmexit is injected by svm_check_nested_events. 786 */ 787 vmexit = NESTED_EXIT_DONE; 788 break; 789 } 790 case SVM_EXIT_ERR: { 791 vmexit = NESTED_EXIT_DONE; 792 break; 793 } 794 default: { 795 u64 exit_bits = 1ULL << (exit_code - SVM_EXIT_INTR); 796 if (svm->nested.ctl.intercept & exit_bits) 797 vmexit = NESTED_EXIT_DONE; 798 } 799 } 800 801 return vmexit; 802 } 803 804 int nested_svm_exit_handled(struct vcpu_svm *svm) 805 { 806 int vmexit; 807 808 vmexit = nested_svm_intercept(svm); 809 810 if (vmexit == NESTED_EXIT_DONE) 811 nested_svm_vmexit(svm); 812 813 return vmexit; 814 } 815 816 int nested_svm_check_permissions(struct vcpu_svm *svm) 817 { 818 if (!(svm->vcpu.arch.efer & EFER_SVME) || 819 !is_paging(&svm->vcpu)) { 820 kvm_queue_exception(&svm->vcpu, UD_VECTOR); 821 return 1; 822 } 823 824 if (svm->vmcb->save.cpl) { 825 kvm_inject_gp(&svm->vcpu, 0); 826 return 1; 827 } 828 829 return 0; 830 } 831 832 static bool nested_exit_on_exception(struct vcpu_svm *svm) 833 { 834 unsigned int nr = svm->vcpu.arch.exception.nr; 835 836 return (svm->nested.ctl.intercept_exceptions & (1 << nr)); 837 } 838 839 static void nested_svm_inject_exception_vmexit(struct vcpu_svm *svm) 840 { 841 unsigned int nr = svm->vcpu.arch.exception.nr; 842 843 svm->vmcb->control.exit_code = SVM_EXIT_EXCP_BASE + nr; 844 svm->vmcb->control.exit_code_hi = 0; 845 846 if (svm->vcpu.arch.exception.has_error_code) 847 svm->vmcb->control.exit_info_1 = svm->vcpu.arch.exception.error_code; 848 849 /* 850 * EXITINFO2 is undefined for all exception intercepts other 851 * than #PF. 852 */ 853 if (nr == PF_VECTOR) { 854 if (svm->vcpu.arch.exception.nested_apf) 855 svm->vmcb->control.exit_info_2 = svm->vcpu.arch.apf.nested_apf_token; 856 else if (svm->vcpu.arch.exception.has_payload) 857 svm->vmcb->control.exit_info_2 = svm->vcpu.arch.exception.payload; 858 else 859 svm->vmcb->control.exit_info_2 = svm->vcpu.arch.cr2; 860 } else if (nr == DB_VECTOR) { 861 /* See inject_pending_event. */ 862 kvm_deliver_exception_payload(&svm->vcpu); 863 if (svm->vcpu.arch.dr7 & DR7_GD) { 864 svm->vcpu.arch.dr7 &= ~DR7_GD; 865 kvm_update_dr7(&svm->vcpu); 866 } 867 } else 868 WARN_ON(svm->vcpu.arch.exception.has_payload); 869 870 nested_svm_vmexit(svm); 871 } 872 873 static void nested_svm_smi(struct vcpu_svm *svm) 874 { 875 svm->vmcb->control.exit_code = SVM_EXIT_SMI; 876 svm->vmcb->control.exit_info_1 = 0; 877 svm->vmcb->control.exit_info_2 = 0; 878 879 nested_svm_vmexit(svm); 880 } 881 882 static void nested_svm_nmi(struct vcpu_svm *svm) 883 { 884 svm->vmcb->control.exit_code = SVM_EXIT_NMI; 885 svm->vmcb->control.exit_info_1 = 0; 886 svm->vmcb->control.exit_info_2 = 0; 887 888 nested_svm_vmexit(svm); 889 } 890 891 static void nested_svm_intr(struct vcpu_svm *svm) 892 { 893 trace_kvm_nested_intr_vmexit(svm->vmcb->save.rip); 894 895 svm->vmcb->control.exit_code = SVM_EXIT_INTR; 896 svm->vmcb->control.exit_info_1 = 0; 897 svm->vmcb->control.exit_info_2 = 0; 898 899 nested_svm_vmexit(svm); 900 } 901 902 static inline bool nested_exit_on_init(struct vcpu_svm *svm) 903 { 904 return (svm->nested.ctl.intercept & (1ULL << INTERCEPT_INIT)); 905 } 906 907 static void nested_svm_init(struct vcpu_svm *svm) 908 { 909 svm->vmcb->control.exit_code = SVM_EXIT_INIT; 910 svm->vmcb->control.exit_info_1 = 0; 911 svm->vmcb->control.exit_info_2 = 0; 912 913 nested_svm_vmexit(svm); 914 } 915 916 917 static int svm_check_nested_events(struct kvm_vcpu *vcpu) 918 { 919 struct vcpu_svm *svm = to_svm(vcpu); 920 bool block_nested_events = 921 kvm_event_needs_reinjection(vcpu) || svm->nested.nested_run_pending; 922 struct kvm_lapic *apic = vcpu->arch.apic; 923 924 if (lapic_in_kernel(vcpu) && 925 test_bit(KVM_APIC_INIT, &apic->pending_events)) { 926 if (block_nested_events) 927 return -EBUSY; 928 if (!nested_exit_on_init(svm)) 929 return 0; 930 nested_svm_init(svm); 931 return 0; 932 } 933 934 if (vcpu->arch.exception.pending) { 935 if (block_nested_events) 936 return -EBUSY; 937 if (!nested_exit_on_exception(svm)) 938 return 0; 939 nested_svm_inject_exception_vmexit(svm); 940 return 0; 941 } 942 943 if (vcpu->arch.smi_pending && !svm_smi_blocked(vcpu)) { 944 if (block_nested_events) 945 return -EBUSY; 946 if (!nested_exit_on_smi(svm)) 947 return 0; 948 nested_svm_smi(svm); 949 return 0; 950 } 951 952 if (vcpu->arch.nmi_pending && !svm_nmi_blocked(vcpu)) { 953 if (block_nested_events) 954 return -EBUSY; 955 if (!nested_exit_on_nmi(svm)) 956 return 0; 957 nested_svm_nmi(svm); 958 return 0; 959 } 960 961 if (kvm_cpu_has_interrupt(vcpu) && !svm_interrupt_blocked(vcpu)) { 962 if (block_nested_events) 963 return -EBUSY; 964 if (!nested_exit_on_intr(svm)) 965 return 0; 966 nested_svm_intr(svm); 967 return 0; 968 } 969 970 return 0; 971 } 972 973 int nested_svm_exit_special(struct vcpu_svm *svm) 974 { 975 u32 exit_code = svm->vmcb->control.exit_code; 976 977 switch (exit_code) { 978 case SVM_EXIT_INTR: 979 case SVM_EXIT_NMI: 980 case SVM_EXIT_NPF: 981 return NESTED_EXIT_HOST; 982 case SVM_EXIT_EXCP_BASE ... SVM_EXIT_EXCP_BASE + 0x1f: { 983 u32 excp_bits = 1 << (exit_code - SVM_EXIT_EXCP_BASE); 984 985 if (get_host_vmcb(svm)->control.intercept_exceptions & excp_bits) 986 return NESTED_EXIT_HOST; 987 else if (exit_code == SVM_EXIT_EXCP_BASE + PF_VECTOR && 988 svm->vcpu.arch.apf.host_apf_flags) 989 /* Trap async PF even if not shadowing */ 990 return NESTED_EXIT_HOST; 991 break; 992 } 993 default: 994 break; 995 } 996 997 return NESTED_EXIT_CONTINUE; 998 } 999 1000 static int svm_get_nested_state(struct kvm_vcpu *vcpu, 1001 struct kvm_nested_state __user *user_kvm_nested_state, 1002 u32 user_data_size) 1003 { 1004 struct vcpu_svm *svm; 1005 struct kvm_nested_state kvm_state = { 1006 .flags = 0, 1007 .format = KVM_STATE_NESTED_FORMAT_SVM, 1008 .size = sizeof(kvm_state), 1009 }; 1010 struct vmcb __user *user_vmcb = (struct vmcb __user *) 1011 &user_kvm_nested_state->data.svm[0]; 1012 1013 if (!vcpu) 1014 return kvm_state.size + KVM_STATE_NESTED_SVM_VMCB_SIZE; 1015 1016 svm = to_svm(vcpu); 1017 1018 if (user_data_size < kvm_state.size) 1019 goto out; 1020 1021 /* First fill in the header and copy it out. */ 1022 if (is_guest_mode(vcpu)) { 1023 kvm_state.hdr.svm.vmcb_pa = svm->nested.vmcb; 1024 kvm_state.size += KVM_STATE_NESTED_SVM_VMCB_SIZE; 1025 kvm_state.flags |= KVM_STATE_NESTED_GUEST_MODE; 1026 1027 if (svm->nested.nested_run_pending) 1028 kvm_state.flags |= KVM_STATE_NESTED_RUN_PENDING; 1029 } 1030 1031 if (gif_set(svm)) 1032 kvm_state.flags |= KVM_STATE_NESTED_GIF_SET; 1033 1034 if (copy_to_user(user_kvm_nested_state, &kvm_state, sizeof(kvm_state))) 1035 return -EFAULT; 1036 1037 if (!is_guest_mode(vcpu)) 1038 goto out; 1039 1040 /* 1041 * Copy over the full size of the VMCB rather than just the size 1042 * of the structs. 1043 */ 1044 if (clear_user(user_vmcb, KVM_STATE_NESTED_SVM_VMCB_SIZE)) 1045 return -EFAULT; 1046 if (copy_to_user(&user_vmcb->control, &svm->nested.ctl, 1047 sizeof(user_vmcb->control))) 1048 return -EFAULT; 1049 if (copy_to_user(&user_vmcb->save, &svm->nested.hsave->save, 1050 sizeof(user_vmcb->save))) 1051 return -EFAULT; 1052 1053 out: 1054 return kvm_state.size; 1055 } 1056 1057 static int svm_set_nested_state(struct kvm_vcpu *vcpu, 1058 struct kvm_nested_state __user *user_kvm_nested_state, 1059 struct kvm_nested_state *kvm_state) 1060 { 1061 struct vcpu_svm *svm = to_svm(vcpu); 1062 struct vmcb *hsave = svm->nested.hsave; 1063 struct vmcb __user *user_vmcb = (struct vmcb __user *) 1064 &user_kvm_nested_state->data.svm[0]; 1065 struct vmcb_control_area ctl; 1066 struct vmcb_save_area save; 1067 u32 cr0; 1068 1069 if (kvm_state->format != KVM_STATE_NESTED_FORMAT_SVM) 1070 return -EINVAL; 1071 1072 if (kvm_state->flags & ~(KVM_STATE_NESTED_GUEST_MODE | 1073 KVM_STATE_NESTED_RUN_PENDING | 1074 KVM_STATE_NESTED_GIF_SET)) 1075 return -EINVAL; 1076 1077 /* 1078 * If in guest mode, vcpu->arch.efer actually refers to the L2 guest's 1079 * EFER.SVME, but EFER.SVME still has to be 1 for VMRUN to succeed. 1080 */ 1081 if (!(vcpu->arch.efer & EFER_SVME)) { 1082 /* GIF=1 and no guest mode are required if SVME=0. */ 1083 if (kvm_state->flags != KVM_STATE_NESTED_GIF_SET) 1084 return -EINVAL; 1085 } 1086 1087 /* SMM temporarily disables SVM, so we cannot be in guest mode. */ 1088 if (is_smm(vcpu) && (kvm_state->flags & KVM_STATE_NESTED_GUEST_MODE)) 1089 return -EINVAL; 1090 1091 if (!(kvm_state->flags & KVM_STATE_NESTED_GUEST_MODE)) { 1092 svm_leave_nested(svm); 1093 goto out_set_gif; 1094 } 1095 1096 if (!page_address_valid(vcpu, kvm_state->hdr.svm.vmcb_pa)) 1097 return -EINVAL; 1098 if (kvm_state->size < sizeof(*kvm_state) + KVM_STATE_NESTED_SVM_VMCB_SIZE) 1099 return -EINVAL; 1100 if (copy_from_user(&ctl, &user_vmcb->control, sizeof(ctl))) 1101 return -EFAULT; 1102 if (copy_from_user(&save, &user_vmcb->save, sizeof(save))) 1103 return -EFAULT; 1104 1105 if (!nested_vmcb_check_controls(&ctl)) 1106 return -EINVAL; 1107 1108 /* 1109 * Processor state contains L2 state. Check that it is 1110 * valid for guest mode (see nested_vmcb_checks). 1111 */ 1112 cr0 = kvm_read_cr0(vcpu); 1113 if (((cr0 & X86_CR0_CD) == 0) && (cr0 & X86_CR0_NW)) 1114 return -EINVAL; 1115 1116 /* 1117 * Validate host state saved from before VMRUN (see 1118 * nested_svm_check_permissions). 1119 * TODO: validate reserved bits for all saved state. 1120 */ 1121 if (!(save.cr0 & X86_CR0_PG)) 1122 return -EINVAL; 1123 1124 /* 1125 * All checks done, we can enter guest mode. L1 control fields 1126 * come from the nested save state. Guest state is already 1127 * in the registers, the save area of the nested state instead 1128 * contains saved L1 state. 1129 */ 1130 copy_vmcb_control_area(&hsave->control, &svm->vmcb->control); 1131 hsave->save = save; 1132 1133 svm->nested.vmcb = kvm_state->hdr.svm.vmcb_pa; 1134 load_nested_vmcb_control(svm, &ctl); 1135 nested_prepare_vmcb_control(svm); 1136 1137 if (!nested_svm_vmrun_msrpm(svm)) 1138 return -EINVAL; 1139 1140 out_set_gif: 1141 svm_set_gif(svm, !!(kvm_state->flags & KVM_STATE_NESTED_GIF_SET)); 1142 return 0; 1143 } 1144 1145 struct kvm_x86_nested_ops svm_nested_ops = { 1146 .check_events = svm_check_nested_events, 1147 .get_state = svm_get_nested_state, 1148 .set_state = svm_set_nested_state, 1149 }; 1150