1883b0a91SJoerg Roedel // SPDX-License-Identifier: GPL-2.0-only
2883b0a91SJoerg Roedel /*
3883b0a91SJoerg Roedel * Kernel-based Virtual Machine driver for Linux
4883b0a91SJoerg Roedel *
5883b0a91SJoerg Roedel * AMD SVM support
6883b0a91SJoerg Roedel *
7883b0a91SJoerg Roedel * Copyright (C) 2006 Qumranet, Inc.
8883b0a91SJoerg Roedel * Copyright 2010 Red Hat, Inc. and/or its affiliates.
9883b0a91SJoerg Roedel *
10883b0a91SJoerg Roedel * Authors:
11883b0a91SJoerg Roedel * Yaniv Kamay <yaniv@qumranet.com>
12883b0a91SJoerg Roedel * Avi Kivity <avi@qumranet.com>
13883b0a91SJoerg Roedel */
14883b0a91SJoerg Roedel
158d20bd63SSean Christopherson #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
16883b0a91SJoerg Roedel
17883b0a91SJoerg Roedel #include <linux/kvm_types.h>
18883b0a91SJoerg Roedel #include <linux/kvm_host.h>
19883b0a91SJoerg Roedel #include <linux/kernel.h>
20883b0a91SJoerg Roedel
21883b0a91SJoerg Roedel #include <asm/msr-index.h>
225679b803SPaolo Bonzini #include <asm/debugreg.h>
23883b0a91SJoerg Roedel
24883b0a91SJoerg Roedel #include "kvm_emulate.h"
25883b0a91SJoerg Roedel #include "trace.h"
26883b0a91SJoerg Roedel #include "mmu.h"
27883b0a91SJoerg Roedel #include "x86.h"
28b0b42197SPaolo Bonzini #include "smm.h"
29cc440cdaSPaolo Bonzini #include "cpuid.h"
305b672408SPaolo Bonzini #include "lapic.h"
31883b0a91SJoerg Roedel #include "svm.h"
3266c03a92SVitaly Kuznetsov #include "hyperv.h"
33883b0a91SJoerg Roedel
3411f0cbf0SSean Christopherson #define CC KVM_NESTED_VMENTER_CONSISTENCY_CHECK
3511f0cbf0SSean Christopherson
nested_svm_inject_npf_exit(struct kvm_vcpu * vcpu,struct x86_exception * fault)36883b0a91SJoerg Roedel static void nested_svm_inject_npf_exit(struct kvm_vcpu *vcpu,
37883b0a91SJoerg Roedel struct x86_exception *fault)
38883b0a91SJoerg Roedel {
39883b0a91SJoerg Roedel struct vcpu_svm *svm = to_svm(vcpu);
40db663af4SMaxim Levitsky struct vmcb *vmcb = svm->vmcb;
41883b0a91SJoerg Roedel
42db663af4SMaxim Levitsky if (vmcb->control.exit_code != SVM_EXIT_NPF) {
43883b0a91SJoerg Roedel /*
44883b0a91SJoerg Roedel * TODO: track the cause of the nested page fault, and
45883b0a91SJoerg Roedel * correctly fill in the high bits of exit_info_1.
46883b0a91SJoerg Roedel */
47db663af4SMaxim Levitsky vmcb->control.exit_code = SVM_EXIT_NPF;
48db663af4SMaxim Levitsky vmcb->control.exit_code_hi = 0;
49db663af4SMaxim Levitsky vmcb->control.exit_info_1 = (1ULL << 32);
50db663af4SMaxim Levitsky vmcb->control.exit_info_2 = fault->address;
51883b0a91SJoerg Roedel }
52883b0a91SJoerg Roedel
53db663af4SMaxim Levitsky vmcb->control.exit_info_1 &= ~0xffffffffULL;
54db663af4SMaxim Levitsky vmcb->control.exit_info_1 |= fault->error_code;
55883b0a91SJoerg Roedel
56883b0a91SJoerg Roedel nested_svm_vmexit(svm);
57883b0a91SJoerg Roedel }
58883b0a91SJoerg Roedel
nested_svm_get_tdp_pdptr(struct kvm_vcpu * vcpu,int index)59883b0a91SJoerg Roedel static u64 nested_svm_get_tdp_pdptr(struct kvm_vcpu *vcpu, int index)
60883b0a91SJoerg Roedel {
61883b0a91SJoerg Roedel struct vcpu_svm *svm = to_svm(vcpu);
62e670bf68SPaolo Bonzini u64 cr3 = svm->nested.ctl.nested_cr3;
63883b0a91SJoerg Roedel u64 pdpte;
64883b0a91SJoerg Roedel int ret;
65883b0a91SJoerg Roedel
662c4adc9bSSean Christopherson /*
672c4adc9bSSean Christopherson * Note, nCR3 is "assumed" to be 32-byte aligned, i.e. the CPU ignores
682c4adc9bSSean Christopherson * nCR3[4:0] when loading PDPTEs from memory.
692c4adc9bSSean Christopherson */
702732be90SSean Christopherson ret = kvm_vcpu_read_guest_page(vcpu, gpa_to_gfn(cr3), &pdpte,
712c4adc9bSSean Christopherson (cr3 & GENMASK(11, 5)) + index * 8, 8);
72883b0a91SJoerg Roedel if (ret)
73883b0a91SJoerg Roedel return 0;
74883b0a91SJoerg Roedel return pdpte;
75883b0a91SJoerg Roedel }
76883b0a91SJoerg Roedel
nested_svm_get_tdp_cr3(struct kvm_vcpu * vcpu)77883b0a91SJoerg Roedel static unsigned long nested_svm_get_tdp_cr3(struct kvm_vcpu *vcpu)
78883b0a91SJoerg Roedel {
79883b0a91SJoerg Roedel struct vcpu_svm *svm = to_svm(vcpu);
80883b0a91SJoerg Roedel
81e670bf68SPaolo Bonzini return svm->nested.ctl.nested_cr3;
82883b0a91SJoerg Roedel }
83883b0a91SJoerg Roedel
nested_svm_init_mmu_context(struct kvm_vcpu * vcpu)84883b0a91SJoerg Roedel static void nested_svm_init_mmu_context(struct kvm_vcpu *vcpu)
85883b0a91SJoerg Roedel {
86929d1cfaSPaolo Bonzini struct vcpu_svm *svm = to_svm(vcpu);
87929d1cfaSPaolo Bonzini
88883b0a91SJoerg Roedel WARN_ON(mmu_is_nested(vcpu));
89883b0a91SJoerg Roedel
90883b0a91SJoerg Roedel vcpu->arch.mmu = &vcpu->arch.guest_mmu;
9131e96bc6SSean Christopherson
9231e96bc6SSean Christopherson /*
9331e96bc6SSean Christopherson * The NPT format depends on L1's CR4 and EFER, which is in vmcb01. Note,
9431e96bc6SSean Christopherson * when called via KVM_SET_NESTED_STATE, that state may _not_ match current
9531e96bc6SSean Christopherson * vCPU state. CR0.WP is explicitly ignored, while CR0.PG is required.
9631e96bc6SSean Christopherson */
974995a368SCathy Avery kvm_init_shadow_npt_mmu(vcpu, X86_CR0_PG, svm->vmcb01.ptr->save.cr4,
984995a368SCathy Avery svm->vmcb01.ptr->save.efer,
990f04a2acSVitaly Kuznetsov svm->nested.ctl.nested_cr3);
100883b0a91SJoerg Roedel vcpu->arch.mmu->get_guest_pgd = nested_svm_get_tdp_cr3;
101883b0a91SJoerg Roedel vcpu->arch.mmu->get_pdptr = nested_svm_get_tdp_pdptr;
102883b0a91SJoerg Roedel vcpu->arch.mmu->inject_page_fault = nested_svm_inject_npf_exit;
103883b0a91SJoerg Roedel vcpu->arch.walk_mmu = &vcpu->arch.nested_mmu;
104883b0a91SJoerg Roedel }
105883b0a91SJoerg Roedel
nested_svm_uninit_mmu_context(struct kvm_vcpu * vcpu)106883b0a91SJoerg Roedel static void nested_svm_uninit_mmu_context(struct kvm_vcpu *vcpu)
107883b0a91SJoerg Roedel {
108883b0a91SJoerg Roedel vcpu->arch.mmu = &vcpu->arch.root_mmu;
109883b0a91SJoerg Roedel vcpu->arch.walk_mmu = &vcpu->arch.root_mmu;
110883b0a91SJoerg Roedel }
111883b0a91SJoerg Roedel
nested_vmcb_needs_vls_intercept(struct vcpu_svm * svm)112b9f3973aSMaxim Levitsky static bool nested_vmcb_needs_vls_intercept(struct vcpu_svm *svm)
113b9f3973aSMaxim Levitsky {
1144d2a1560SSean Christopherson if (!guest_can_use(&svm->vcpu, X86_FEATURE_V_VMSAVE_VMLOAD))
115b9f3973aSMaxim Levitsky return true;
116b9f3973aSMaxim Levitsky
117b9f3973aSMaxim Levitsky if (!nested_npt_enabled(svm))
118b9f3973aSMaxim Levitsky return true;
119b9f3973aSMaxim Levitsky
120b9f3973aSMaxim Levitsky if (!(svm->nested.ctl.virt_ext & VIRTUAL_VMLOAD_VMSAVE_ENABLE_MASK))
121b9f3973aSMaxim Levitsky return true;
122b9f3973aSMaxim Levitsky
123b9f3973aSMaxim Levitsky return false;
124b9f3973aSMaxim Levitsky }
125b9f3973aSMaxim Levitsky
recalc_intercepts(struct vcpu_svm * svm)126883b0a91SJoerg Roedel void recalc_intercepts(struct vcpu_svm *svm)
127883b0a91SJoerg Roedel {
1288fc78909SEmanuele Giuseppe Esposito struct vmcb_control_area *c, *h;
1298fc78909SEmanuele Giuseppe Esposito struct vmcb_ctrl_area_cached *g;
130c45ad722SBabu Moger unsigned int i;
131883b0a91SJoerg Roedel
13206e7852cSJoerg Roedel vmcb_mark_dirty(svm->vmcb, VMCB_INTERCEPTS);
133883b0a91SJoerg Roedel
134883b0a91SJoerg Roedel if (!is_guest_mode(&svm->vcpu))
135883b0a91SJoerg Roedel return;
136883b0a91SJoerg Roedel
137883b0a91SJoerg Roedel c = &svm->vmcb->control;
1384995a368SCathy Avery h = &svm->vmcb01.ptr->control;
139e670bf68SPaolo Bonzini g = &svm->nested.ctl;
140883b0a91SJoerg Roedel
141c45ad722SBabu Moger for (i = 0; i < MAX_INTERCEPT; i++)
142c45ad722SBabu Moger c->intercepts[i] = h->intercepts[i];
143c45ad722SBabu Moger
144e9fd761aSPaolo Bonzini if (g->int_ctl & V_INTR_MASKING_MASK) {
145883b0a91SJoerg Roedel /*
1467334ede4SSantosh Shukla * If L2 is active and V_INTR_MASKING is enabled in vmcb12,
1477334ede4SSantosh Shukla * disable intercept of CR8 writes as L2's CR8 does not affect
1487334ede4SSantosh Shukla * any interrupt KVM may want to inject.
1497334ede4SSantosh Shukla *
1507334ede4SSantosh Shukla * Similarly, disable intercept of virtual interrupts (used to
1517334ede4SSantosh Shukla * detect interrupt windows) if the saved RFLAGS.IF is '0', as
1527334ede4SSantosh Shukla * the effective RFLAGS.IF for L1 interrupts will never be set
1537334ede4SSantosh Shukla * while L2 is running (L2's RFLAGS.IF doesn't affect L1 IRQs).
154883b0a91SJoerg Roedel */
15574905e3dSPaolo Bonzini vmcb_clr_intercept(c, INTERCEPT_CR8_WRITE);
1567334ede4SSantosh Shukla if (!(svm->vmcb01.ptr->save.rflags & X86_EFLAGS_IF))
157c62e2e94SBabu Moger vmcb_clr_intercept(c, INTERCEPT_VINTR);
158883b0a91SJoerg Roedel }
159883b0a91SJoerg Roedel
1603f4a812eSVitaly Kuznetsov /*
1613f4a812eSVitaly Kuznetsov * We want to see VMMCALLs from a nested guest only when Hyper-V L2 TLB
1623f4a812eSVitaly Kuznetsov * flush feature is enabled.
1633f4a812eSVitaly Kuznetsov */
1643f4a812eSVitaly Kuznetsov if (!nested_svm_l2_tlb_flush_enabled(&svm->vcpu))
165c62e2e94SBabu Moger vmcb_clr_intercept(c, INTERCEPT_VMMCALL);
166883b0a91SJoerg Roedel
167c45ad722SBabu Moger for (i = 0; i < MAX_INTERCEPT; i++)
168c45ad722SBabu Moger c->intercepts[i] |= g->intercepts[i];
1694b639a9fSMaxim Levitsky
1704b639a9fSMaxim Levitsky /* If SMI is not intercepted, ignore guest SMI intercept as well */
1714b639a9fSMaxim Levitsky if (!intercept_smi)
1724b639a9fSMaxim Levitsky vmcb_clr_intercept(c, INTERCEPT_SMI);
173c7dfa400SMaxim Levitsky
174b9f3973aSMaxim Levitsky if (nested_vmcb_needs_vls_intercept(svm)) {
175b9f3973aSMaxim Levitsky /*
176b9f3973aSMaxim Levitsky * If the virtual VMLOAD/VMSAVE is not enabled for the L2,
177b9f3973aSMaxim Levitsky * we must intercept these instructions to correctly
178b9f3973aSMaxim Levitsky * emulate them in case L1 doesn't intercept them.
179b9f3973aSMaxim Levitsky */
180c7dfa400SMaxim Levitsky vmcb_set_intercept(c, INTERCEPT_VMLOAD);
181c7dfa400SMaxim Levitsky vmcb_set_intercept(c, INTERCEPT_VMSAVE);
182b9f3973aSMaxim Levitsky } else {
183b9f3973aSMaxim Levitsky WARN_ON(!(c->virt_ext & VIRTUAL_VMLOAD_VMSAVE_ENABLE_MASK));
184b9f3973aSMaxim Levitsky }
185883b0a91SJoerg Roedel }
186883b0a91SJoerg Roedel
18766c03a92SVitaly Kuznetsov /*
18866c03a92SVitaly Kuznetsov * Merge L0's (KVM) and L1's (Nested VMCB) MSR permission bitmaps. The function
18966c03a92SVitaly Kuznetsov * is optimized in that it only merges the parts where KVM MSR permission bitmap
19066c03a92SVitaly Kuznetsov * may contain zero bits.
19166c03a92SVitaly Kuznetsov */
nested_svm_vmrun_msrpm(struct vcpu_svm * svm)192883b0a91SJoerg Roedel static bool nested_svm_vmrun_msrpm(struct vcpu_svm *svm)
193883b0a91SJoerg Roedel {
19426b516bbSSean Christopherson struct hv_vmcb_enlightenments *hve = &svm->nested.ctl.hv_enlightenments;
195883b0a91SJoerg Roedel int i;
196883b0a91SJoerg Roedel
19766c03a92SVitaly Kuznetsov /*
19866c03a92SVitaly Kuznetsov * MSR bitmap update can be skipped when:
19966c03a92SVitaly Kuznetsov * - MSR bitmap for L1 hasn't changed.
20066c03a92SVitaly Kuznetsov * - Nested hypervisor (L1) is attempting to launch the same L2 as
20166c03a92SVitaly Kuznetsov * before.
20266c03a92SVitaly Kuznetsov * - Nested hypervisor (L1) is using Hyper-V emulation interface and
20366c03a92SVitaly Kuznetsov * tells KVM (L0) there were no changes in MSR bitmap for L2.
20466c03a92SVitaly Kuznetsov */
20566c03a92SVitaly Kuznetsov if (!svm->nested.force_msr_bitmap_recalc &&
20666c03a92SVitaly Kuznetsov kvm_hv_hypercall_enabled(&svm->vcpu) &&
20766c03a92SVitaly Kuznetsov hve->hv_enlightenments_control.msr_bitmap &&
208089fe572SSean Christopherson (svm->nested.ctl.clean & BIT(HV_VMCB_NESTED_ENLIGHTENMENTS)))
20966c03a92SVitaly Kuznetsov goto set_msrpm_base_pa;
21066c03a92SVitaly Kuznetsov
2118fc78909SEmanuele Giuseppe Esposito if (!(vmcb12_is_intercept(&svm->nested.ctl, INTERCEPT_MSR_PROT)))
212883b0a91SJoerg Roedel return true;
213883b0a91SJoerg Roedel
214883b0a91SJoerg Roedel for (i = 0; i < MSRPM_OFFSETS; i++) {
215883b0a91SJoerg Roedel u32 value, p;
216883b0a91SJoerg Roedel u64 offset;
217883b0a91SJoerg Roedel
218883b0a91SJoerg Roedel if (msrpm_offsets[i] == 0xffffffff)
219883b0a91SJoerg Roedel break;
220883b0a91SJoerg Roedel
221883b0a91SJoerg Roedel p = msrpm_offsets[i];
2227a8f7c1fSMaxim Levitsky
2237a8f7c1fSMaxim Levitsky /* x2apic msrs are intercepted always for the nested guest */
2247a8f7c1fSMaxim Levitsky if (is_x2apic_msrpm_offset(p))
2257a8f7c1fSMaxim Levitsky continue;
2267a8f7c1fSMaxim Levitsky
227e670bf68SPaolo Bonzini offset = svm->nested.ctl.msrpm_base_pa + (p * 4);
228883b0a91SJoerg Roedel
229883b0a91SJoerg Roedel if (kvm_vcpu_read_guest(&svm->vcpu, offset, &value, 4))
230883b0a91SJoerg Roedel return false;
231883b0a91SJoerg Roedel
232883b0a91SJoerg Roedel svm->nested.msrpm[p] = svm->msrpm[p] | value;
233883b0a91SJoerg Roedel }
234883b0a91SJoerg Roedel
23573c25546SVitaly Kuznetsov svm->nested.force_msr_bitmap_recalc = false;
23673c25546SVitaly Kuznetsov
23766c03a92SVitaly Kuznetsov set_msrpm_base_pa:
238883b0a91SJoerg Roedel svm->vmcb->control.msrpm_base_pa = __sme_set(__pa(svm->nested.msrpm));
239883b0a91SJoerg Roedel
240883b0a91SJoerg Roedel return true;
241883b0a91SJoerg Roedel }
242883b0a91SJoerg Roedel
243ee695f22SKrish Sadhukhan /*
244ee695f22SKrish Sadhukhan * Bits 11:0 of bitmap address are ignored by hardware
245ee695f22SKrish Sadhukhan */
nested_svm_check_bitmap_pa(struct kvm_vcpu * vcpu,u64 pa,u32 size)246ee695f22SKrish Sadhukhan static bool nested_svm_check_bitmap_pa(struct kvm_vcpu *vcpu, u64 pa, u32 size)
247ee695f22SKrish Sadhukhan {
248ee695f22SKrish Sadhukhan u64 addr = PAGE_ALIGN(pa);
249ee695f22SKrish Sadhukhan
250ee695f22SKrish Sadhukhan return kvm_vcpu_is_legal_gpa(vcpu, addr) &&
251ee695f22SKrish Sadhukhan kvm_vcpu_is_legal_gpa(vcpu, addr + size - 1);
252ee695f22SKrish Sadhukhan }
253ee695f22SKrish Sadhukhan
__nested_vmcb_check_controls(struct kvm_vcpu * vcpu,struct vmcb_ctrl_area_cached * control)254bd95926cSPaolo Bonzini static bool __nested_vmcb_check_controls(struct kvm_vcpu *vcpu,
2558fc78909SEmanuele Giuseppe Esposito struct vmcb_ctrl_area_cached *control)
256ca46d739SPaolo Bonzini {
2578fc78909SEmanuele Giuseppe Esposito if (CC(!vmcb12_is_intercept(control, INTERCEPT_VMRUN)))
258ca46d739SPaolo Bonzini return false;
259ca46d739SPaolo Bonzini
26011f0cbf0SSean Christopherson if (CC(control->asid == 0))
261ca46d739SPaolo Bonzini return false;
262ca46d739SPaolo Bonzini
26311f0cbf0SSean Christopherson if (CC((control->nested_ctl & SVM_NESTED_CTL_NP_ENABLE) && !npt_enabled))
264ca46d739SPaolo Bonzini return false;
265ca46d739SPaolo Bonzini
266ee695f22SKrish Sadhukhan if (CC(!nested_svm_check_bitmap_pa(vcpu, control->msrpm_base_pa,
267ee695f22SKrish Sadhukhan MSRPM_SIZE)))
268ee695f22SKrish Sadhukhan return false;
269ee695f22SKrish Sadhukhan if (CC(!nested_svm_check_bitmap_pa(vcpu, control->iopm_base_pa,
270ee695f22SKrish Sadhukhan IOPM_SIZE)))
271ee695f22SKrish Sadhukhan return false;
272ee695f22SKrish Sadhukhan
2730977cfacSSantosh Shukla if (CC((control->int_ctl & V_NMI_ENABLE_MASK) &&
2740977cfacSSantosh Shukla !vmcb12_is_intercept(control, INTERCEPT_NMI))) {
2750977cfacSSantosh Shukla return false;
2760977cfacSSantosh Shukla }
2770977cfacSSantosh Shukla
278ca46d739SPaolo Bonzini return true;
279ca46d739SPaolo Bonzini }
280ca46d739SPaolo Bonzini
2816906e06dSKrish Sadhukhan /* Common checks that apply to both L1 and L2 state. */
__nested_vmcb_check_save(struct kvm_vcpu * vcpu,struct vmcb_save_area_cached * save)282b7a3d8b6SEmanuele Giuseppe Esposito static bool __nested_vmcb_check_save(struct kvm_vcpu *vcpu,
283b7a3d8b6SEmanuele Giuseppe Esposito struct vmcb_save_area_cached *save)
2846906e06dSKrish Sadhukhan {
28511f0cbf0SSean Christopherson if (CC(!(save->efer & EFER_SVME)))
2866906e06dSKrish Sadhukhan return false;
2876906e06dSKrish Sadhukhan
28811f0cbf0SSean Christopherson if (CC((save->cr0 & X86_CR0_CD) == 0 && (save->cr0 & X86_CR0_NW)) ||
28911f0cbf0SSean Christopherson CC(save->cr0 & ~0xffffffffULL))
2906906e06dSKrish Sadhukhan return false;
2916906e06dSKrish Sadhukhan
29211f0cbf0SSean Christopherson if (CC(!kvm_dr6_valid(save->dr6)) || CC(!kvm_dr7_valid(save->dr7)))
2936906e06dSKrish Sadhukhan return false;
2946906e06dSKrish Sadhukhan
295907afa48SEmanuele Giuseppe Esposito /*
296907afa48SEmanuele Giuseppe Esposito * These checks are also performed by KVM_SET_SREGS,
297907afa48SEmanuele Giuseppe Esposito * except that EFER.LMA is not checked by SVM against
298907afa48SEmanuele Giuseppe Esposito * CR0.PG && EFER.LME.
299907afa48SEmanuele Giuseppe Esposito */
300907afa48SEmanuele Giuseppe Esposito if ((save->efer & EFER_LME) && (save->cr0 & X86_CR0_PG)) {
301907afa48SEmanuele Giuseppe Esposito if (CC(!(save->cr4 & X86_CR4_PAE)) ||
302907afa48SEmanuele Giuseppe Esposito CC(!(save->cr0 & X86_CR0_PE)) ||
303907afa48SEmanuele Giuseppe Esposito CC(kvm_vcpu_is_illegal_gpa(vcpu, save->cr3)))
304907afa48SEmanuele Giuseppe Esposito return false;
305907afa48SEmanuele Giuseppe Esposito }
306907afa48SEmanuele Giuseppe Esposito
307c33f6f22SSean Christopherson /* Note, SVM doesn't have any additional restrictions on CR4. */
308c33f6f22SSean Christopherson if (CC(!__kvm_is_valid_cr4(vcpu, save->cr4)))
3096906e06dSKrish Sadhukhan return false;
3106906e06dSKrish Sadhukhan
31163129754SPaolo Bonzini if (CC(!kvm_valid_efer(vcpu, save->efer)))
3126906e06dSKrish Sadhukhan return false;
3136906e06dSKrish Sadhukhan
3146906e06dSKrish Sadhukhan return true;
3156906e06dSKrish Sadhukhan }
3166906e06dSKrish Sadhukhan
nested_vmcb_check_save(struct kvm_vcpu * vcpu)317b7a3d8b6SEmanuele Giuseppe Esposito static bool nested_vmcb_check_save(struct kvm_vcpu *vcpu)
318b7a3d8b6SEmanuele Giuseppe Esposito {
319b7a3d8b6SEmanuele Giuseppe Esposito struct vcpu_svm *svm = to_svm(vcpu);
320b7a3d8b6SEmanuele Giuseppe Esposito struct vmcb_save_area_cached *save = &svm->nested.save;
321b7a3d8b6SEmanuele Giuseppe Esposito
322b7a3d8b6SEmanuele Giuseppe Esposito return __nested_vmcb_check_save(vcpu, save);
323b7a3d8b6SEmanuele Giuseppe Esposito }
324b7a3d8b6SEmanuele Giuseppe Esposito
nested_vmcb_check_controls(struct kvm_vcpu * vcpu)325bd95926cSPaolo Bonzini static bool nested_vmcb_check_controls(struct kvm_vcpu *vcpu)
326bd95926cSPaolo Bonzini {
327bd95926cSPaolo Bonzini struct vcpu_svm *svm = to_svm(vcpu);
3288fc78909SEmanuele Giuseppe Esposito struct vmcb_ctrl_area_cached *ctl = &svm->nested.ctl;
329bd95926cSPaolo Bonzini
330bd95926cSPaolo Bonzini return __nested_vmcb_check_controls(vcpu, ctl);
331bd95926cSPaolo Bonzini }
332bd95926cSPaolo Bonzini
3337907160dSEmanuele Giuseppe Esposito static
__nested_copy_vmcb_control_to_cache(struct kvm_vcpu * vcpu,struct vmcb_ctrl_area_cached * to,struct vmcb_control_area * from)33466c03a92SVitaly Kuznetsov void __nested_copy_vmcb_control_to_cache(struct kvm_vcpu *vcpu,
33566c03a92SVitaly Kuznetsov struct vmcb_ctrl_area_cached *to,
3367907160dSEmanuele Giuseppe Esposito struct vmcb_control_area *from)
3377907160dSEmanuele Giuseppe Esposito {
3387907160dSEmanuele Giuseppe Esposito unsigned int i;
3397907160dSEmanuele Giuseppe Esposito
3407907160dSEmanuele Giuseppe Esposito for (i = 0; i < MAX_INTERCEPT; i++)
3417907160dSEmanuele Giuseppe Esposito to->intercepts[i] = from->intercepts[i];
3427907160dSEmanuele Giuseppe Esposito
3437907160dSEmanuele Giuseppe Esposito to->iopm_base_pa = from->iopm_base_pa;
3447907160dSEmanuele Giuseppe Esposito to->msrpm_base_pa = from->msrpm_base_pa;
3457907160dSEmanuele Giuseppe Esposito to->tsc_offset = from->tsc_offset;
3467907160dSEmanuele Giuseppe Esposito to->tlb_ctl = from->tlb_ctl;
3477907160dSEmanuele Giuseppe Esposito to->int_ctl = from->int_ctl;
3487907160dSEmanuele Giuseppe Esposito to->int_vector = from->int_vector;
3497907160dSEmanuele Giuseppe Esposito to->int_state = from->int_state;
3507907160dSEmanuele Giuseppe Esposito to->exit_code = from->exit_code;
3517907160dSEmanuele Giuseppe Esposito to->exit_code_hi = from->exit_code_hi;
3527907160dSEmanuele Giuseppe Esposito to->exit_info_1 = from->exit_info_1;
3537907160dSEmanuele Giuseppe Esposito to->exit_info_2 = from->exit_info_2;
3547907160dSEmanuele Giuseppe Esposito to->exit_int_info = from->exit_int_info;
3557907160dSEmanuele Giuseppe Esposito to->exit_int_info_err = from->exit_int_info_err;
3567907160dSEmanuele Giuseppe Esposito to->nested_ctl = from->nested_ctl;
3577907160dSEmanuele Giuseppe Esposito to->event_inj = from->event_inj;
3587907160dSEmanuele Giuseppe Esposito to->event_inj_err = from->event_inj_err;
35900f08d99SMaciej S. Szmigiero to->next_rip = from->next_rip;
3607907160dSEmanuele Giuseppe Esposito to->nested_cr3 = from->nested_cr3;
3617907160dSEmanuele Giuseppe Esposito to->virt_ext = from->virt_ext;
3627907160dSEmanuele Giuseppe Esposito to->pause_filter_count = from->pause_filter_count;
3637907160dSEmanuele Giuseppe Esposito to->pause_filter_thresh = from->pause_filter_thresh;
3647907160dSEmanuele Giuseppe Esposito
3657907160dSEmanuele Giuseppe Esposito /* Copy asid here because nested_vmcb_check_controls will check it. */
3667907160dSEmanuele Giuseppe Esposito to->asid = from->asid;
3677907160dSEmanuele Giuseppe Esposito to->msrpm_base_pa &= ~0x0fffULL;
3687907160dSEmanuele Giuseppe Esposito to->iopm_base_pa &= ~0x0fffULL;
36966c03a92SVitaly Kuznetsov
37066c03a92SVitaly Kuznetsov /* Hyper-V extensions (Enlightened VMCB) */
37166c03a92SVitaly Kuznetsov if (kvm_hv_hypercall_enabled(vcpu)) {
37266c03a92SVitaly Kuznetsov to->clean = from->clean;
37368ae7c7bSSean Christopherson memcpy(&to->hv_enlightenments, &from->hv_enlightenments,
37468ae7c7bSSean Christopherson sizeof(to->hv_enlightenments));
37566c03a92SVitaly Kuznetsov }
3767907160dSEmanuele Giuseppe Esposito }
3777907160dSEmanuele Giuseppe Esposito
nested_copy_vmcb_control_to_cache(struct vcpu_svm * svm,struct vmcb_control_area * control)3787907160dSEmanuele Giuseppe Esposito void nested_copy_vmcb_control_to_cache(struct vcpu_svm *svm,
3793e06f016SPaolo Bonzini struct vmcb_control_area *control)
3803e06f016SPaolo Bonzini {
38166c03a92SVitaly Kuznetsov __nested_copy_vmcb_control_to_cache(&svm->vcpu, &svm->nested.ctl, control);
3823e06f016SPaolo Bonzini }
3833e06f016SPaolo Bonzini
__nested_copy_vmcb_save_to_cache(struct vmcb_save_area_cached * to,struct vmcb_save_area * from)384f2740a8dSEmanuele Giuseppe Esposito static void __nested_copy_vmcb_save_to_cache(struct vmcb_save_area_cached *to,
385f2740a8dSEmanuele Giuseppe Esposito struct vmcb_save_area *from)
386f2740a8dSEmanuele Giuseppe Esposito {
387f2740a8dSEmanuele Giuseppe Esposito /*
388f2740a8dSEmanuele Giuseppe Esposito * Copy only fields that are validated, as we need them
389f2740a8dSEmanuele Giuseppe Esposito * to avoid TOC/TOU races.
390f2740a8dSEmanuele Giuseppe Esposito */
391f2740a8dSEmanuele Giuseppe Esposito to->efer = from->efer;
392f2740a8dSEmanuele Giuseppe Esposito to->cr0 = from->cr0;
393f2740a8dSEmanuele Giuseppe Esposito to->cr3 = from->cr3;
394f2740a8dSEmanuele Giuseppe Esposito to->cr4 = from->cr4;
395f2740a8dSEmanuele Giuseppe Esposito
396f2740a8dSEmanuele Giuseppe Esposito to->dr6 = from->dr6;
397f2740a8dSEmanuele Giuseppe Esposito to->dr7 = from->dr7;
398f2740a8dSEmanuele Giuseppe Esposito }
399f2740a8dSEmanuele Giuseppe Esposito
nested_copy_vmcb_save_to_cache(struct vcpu_svm * svm,struct vmcb_save_area * save)400f2740a8dSEmanuele Giuseppe Esposito void nested_copy_vmcb_save_to_cache(struct vcpu_svm *svm,
401f2740a8dSEmanuele Giuseppe Esposito struct vmcb_save_area *save)
402f2740a8dSEmanuele Giuseppe Esposito {
403f2740a8dSEmanuele Giuseppe Esposito __nested_copy_vmcb_save_to_cache(&svm->nested.save, save);
404f2740a8dSEmanuele Giuseppe Esposito }
405f2740a8dSEmanuele Giuseppe Esposito
4062d8a42beSPaolo Bonzini /*
4072d8a42beSPaolo Bonzini * Synchronize fields that are written by the processor, so that
4089e8f0fbfSPaolo Bonzini * they can be copied back into the vmcb12.
4092d8a42beSPaolo Bonzini */
nested_sync_control_from_vmcb02(struct vcpu_svm * svm)4109e8f0fbfSPaolo Bonzini void nested_sync_control_from_vmcb02(struct vcpu_svm *svm)
4112d8a42beSPaolo Bonzini {
4122d8a42beSPaolo Bonzini u32 mask;
4132d8a42beSPaolo Bonzini svm->nested.ctl.event_inj = svm->vmcb->control.event_inj;
4142d8a42beSPaolo Bonzini svm->nested.ctl.event_inj_err = svm->vmcb->control.event_inj_err;
4152d8a42beSPaolo Bonzini
4162d8a42beSPaolo Bonzini /* Only a few fields of int_ctl are written by the processor. */
4172d8a42beSPaolo Bonzini mask = V_IRQ_MASK | V_TPR_MASK;
4182d8a42beSPaolo Bonzini /*
4195faaffabSSantosh Shukla * Don't sync vmcb02 V_IRQ back to vmcb12 if KVM (L0) is intercepting
4205faaffabSSantosh Shukla * virtual interrupts in order to request an interrupt window, as KVM
4215faaffabSSantosh Shukla * has usurped vmcb02's int_ctl. If an interrupt window opens before
4225faaffabSSantosh Shukla * the next VM-Exit, svm_clear_vintr() will restore vmcb12's int_ctl.
4235faaffabSSantosh Shukla * If no window opens, V_IRQ will be correctly preserved in vmcb12's
4245faaffabSSantosh Shukla * int_ctl (because it was never recognized while L2 was running).
4252d8a42beSPaolo Bonzini */
4265faaffabSSantosh Shukla if (svm_is_intercept(svm, INTERCEPT_VINTR) &&
4275faaffabSSantosh Shukla !test_bit(INTERCEPT_VINTR, (unsigned long *)svm->nested.ctl.intercepts))
4282d8a42beSPaolo Bonzini mask &= ~V_IRQ_MASK;
4290b349662SMaxim Levitsky
4300b349662SMaxim Levitsky if (nested_vgif_enabled(svm))
4310b349662SMaxim Levitsky mask |= V_GIF_MASK;
4320b349662SMaxim Levitsky
4330977cfacSSantosh Shukla if (nested_vnmi_enabled(svm))
4340977cfacSSantosh Shukla mask |= V_NMI_BLOCKING_MASK | V_NMI_PENDING_MASK;
4350977cfacSSantosh Shukla
4362d8a42beSPaolo Bonzini svm->nested.ctl.int_ctl &= ~mask;
4372d8a42beSPaolo Bonzini svm->nested.ctl.int_ctl |= svm->vmcb->control.int_ctl & mask;
4382d8a42beSPaolo Bonzini }
4392d8a42beSPaolo Bonzini
44036e2e983SPaolo Bonzini /*
44136e2e983SPaolo Bonzini * Transfer any event that L0 or L1 wanted to inject into L2 to
44236e2e983SPaolo Bonzini * EXIT_INT_INFO.
44336e2e983SPaolo Bonzini */
nested_save_pending_event_to_vmcb12(struct vcpu_svm * svm,struct vmcb * vmcb12)4449e8f0fbfSPaolo Bonzini static void nested_save_pending_event_to_vmcb12(struct vcpu_svm *svm,
4450dd16b5bSMaxim Levitsky struct vmcb *vmcb12)
44636e2e983SPaolo Bonzini {
44736e2e983SPaolo Bonzini struct kvm_vcpu *vcpu = &svm->vcpu;
44836e2e983SPaolo Bonzini u32 exit_int_info = 0;
44936e2e983SPaolo Bonzini unsigned int nr;
45036e2e983SPaolo Bonzini
45136e2e983SPaolo Bonzini if (vcpu->arch.exception.injected) {
452d4963e31SSean Christopherson nr = vcpu->arch.exception.vector;
45336e2e983SPaolo Bonzini exit_int_info = nr | SVM_EVTINJ_VALID | SVM_EVTINJ_TYPE_EXEPT;
45436e2e983SPaolo Bonzini
45536e2e983SPaolo Bonzini if (vcpu->arch.exception.has_error_code) {
45636e2e983SPaolo Bonzini exit_int_info |= SVM_EVTINJ_VALID_ERR;
4570dd16b5bSMaxim Levitsky vmcb12->control.exit_int_info_err =
45836e2e983SPaolo Bonzini vcpu->arch.exception.error_code;
45936e2e983SPaolo Bonzini }
46036e2e983SPaolo Bonzini
46136e2e983SPaolo Bonzini } else if (vcpu->arch.nmi_injected) {
46236e2e983SPaolo Bonzini exit_int_info = SVM_EVTINJ_VALID | SVM_EVTINJ_TYPE_NMI;
46336e2e983SPaolo Bonzini
46436e2e983SPaolo Bonzini } else if (vcpu->arch.interrupt.injected) {
46536e2e983SPaolo Bonzini nr = vcpu->arch.interrupt.nr;
46636e2e983SPaolo Bonzini exit_int_info = nr | SVM_EVTINJ_VALID;
46736e2e983SPaolo Bonzini
46836e2e983SPaolo Bonzini if (vcpu->arch.interrupt.soft)
46936e2e983SPaolo Bonzini exit_int_info |= SVM_EVTINJ_TYPE_SOFT;
47036e2e983SPaolo Bonzini else
47136e2e983SPaolo Bonzini exit_int_info |= SVM_EVTINJ_TYPE_INTR;
47236e2e983SPaolo Bonzini }
47336e2e983SPaolo Bonzini
4740dd16b5bSMaxim Levitsky vmcb12->control.exit_int_info = exit_int_info;
47536e2e983SPaolo Bonzini }
47636e2e983SPaolo Bonzini
nested_svm_transition_tlb_flush(struct kvm_vcpu * vcpu)477d2e56019SSean Christopherson static void nested_svm_transition_tlb_flush(struct kvm_vcpu *vcpu)
478d2e56019SSean Christopherson {
479d2e56019SSean Christopherson /*
4803f4a812eSVitaly Kuznetsov * KVM_REQ_HV_TLB_FLUSH flushes entries from either L1's VP_ID or
4813f4a812eSVitaly Kuznetsov * L2's VP_ID upon request from the guest. Make sure we check for
4823f4a812eSVitaly Kuznetsov * pending entries in the right FIFO upon L1/L2 transition as these
4833f4a812eSVitaly Kuznetsov * requests are put by other vCPUs asynchronously.
4843f4a812eSVitaly Kuznetsov */
4853f4a812eSVitaly Kuznetsov if (to_hv_vcpu(vcpu) && npt_enabled)
4863f4a812eSVitaly Kuznetsov kvm_make_request(KVM_REQ_HV_TLB_FLUSH, vcpu);
4873f4a812eSVitaly Kuznetsov
4883f4a812eSVitaly Kuznetsov /*
489d2e56019SSean Christopherson * TODO: optimize unconditional TLB flush/MMU sync. A partial list of
490d2e56019SSean Christopherson * things to fix before this can be conditional:
491d2e56019SSean Christopherson *
492d2e56019SSean Christopherson * - Flush TLBs for both L1 and L2 remote TLB flush
493d2e56019SSean Christopherson * - Honor L1's request to flush an ASID on nested VMRUN
494d2e56019SSean Christopherson * - Sync nested NPT MMU on VMRUN that flushes L2's ASID[*]
495d2e56019SSean Christopherson * - Don't crush a pending TLB flush in vmcb02 on nested VMRUN
496d2e56019SSean Christopherson * - Flush L1's ASID on KVM_REQ_TLB_FLUSH_GUEST
497d2e56019SSean Christopherson *
498d2e56019SSean Christopherson * [*] Unlike nested EPT, SVM's ASID management can invalidate nested
499d2e56019SSean Christopherson * NPT guest-physical mappings on VMRUN.
500d2e56019SSean Christopherson */
501d2e56019SSean Christopherson kvm_make_request(KVM_REQ_MMU_SYNC, vcpu);
502d2e56019SSean Christopherson kvm_make_request(KVM_REQ_TLB_FLUSH_CURRENT, vcpu);
503d2e56019SSean Christopherson }
504d2e56019SSean Christopherson
50562156f6cSVitaly Kuznetsov /*
506d82aaef9SVitaly Kuznetsov * Load guest's/host's cr3 on nested vmentry or vmexit. @nested_npt is true
507d82aaef9SVitaly Kuznetsov * if we are emulating VM-Entry into a guest with NPT enabled.
50862156f6cSVitaly Kuznetsov */
nested_svm_load_cr3(struct kvm_vcpu * vcpu,unsigned long cr3,bool nested_npt,bool reload_pdptrs)50962156f6cSVitaly Kuznetsov static int nested_svm_load_cr3(struct kvm_vcpu *vcpu, unsigned long cr3,
510b222b0b8SMaxim Levitsky bool nested_npt, bool reload_pdptrs)
51162156f6cSVitaly Kuznetsov {
51211f0cbf0SSean Christopherson if (CC(kvm_vcpu_is_illegal_gpa(vcpu, cr3)))
513a506fdd2SVitaly Kuznetsov return -EINVAL;
514a506fdd2SVitaly Kuznetsov
515b222b0b8SMaxim Levitsky if (reload_pdptrs && !nested_npt && is_pae_paging(vcpu) &&
5162df4a5ebSLai Jiangshan CC(!load_pdptrs(vcpu, cr3)))
517a506fdd2SVitaly Kuznetsov return -EINVAL;
518a506fdd2SVitaly Kuznetsov
519a506fdd2SVitaly Kuznetsov vcpu->arch.cr3 = cr3;
520a506fdd2SVitaly Kuznetsov
521616007c8SSean Christopherson /* Re-initialize the MMU, e.g. to pick up CR4 MMU role changes. */
522c9060662SSean Christopherson kvm_init_mmu(vcpu);
523a506fdd2SVitaly Kuznetsov
5243cffc89dSPaolo Bonzini if (!nested_npt)
5253cffc89dSPaolo Bonzini kvm_mmu_new_pgd(vcpu, cr3);
5263cffc89dSPaolo Bonzini
527a506fdd2SVitaly Kuznetsov return 0;
52862156f6cSVitaly Kuznetsov }
52962156f6cSVitaly Kuznetsov
nested_vmcb02_compute_g_pat(struct vcpu_svm * svm)5304995a368SCathy Avery void nested_vmcb02_compute_g_pat(struct vcpu_svm *svm)
5314995a368SCathy Avery {
5324995a368SCathy Avery if (!svm->nested.vmcb02.ptr)
5334995a368SCathy Avery return;
5344995a368SCathy Avery
5354995a368SCathy Avery /* FIXME: merge g_pat from vmcb01 and vmcb12. */
5364995a368SCathy Avery svm->nested.vmcb02.ptr->save.g_pat = svm->vmcb01.ptr->save.g_pat;
5374995a368SCathy Avery }
5384995a368SCathy Avery
nested_vmcb02_prepare_save(struct vcpu_svm * svm,struct vmcb * vmcb12)5399e8f0fbfSPaolo Bonzini static void nested_vmcb02_prepare_save(struct vcpu_svm *svm, struct vmcb *vmcb12)
540883b0a91SJoerg Roedel {
5418173396eSCathy Avery bool new_vmcb12 = false;
5421d5a1b58SMaxim Levitsky struct vmcb *vmcb01 = svm->vmcb01.ptr;
543db663af4SMaxim Levitsky struct vmcb *vmcb02 = svm->nested.vmcb02.ptr;
544e183d17aSSean Christopherson struct kvm_vcpu *vcpu = &svm->vcpu;
5458173396eSCathy Avery
5464995a368SCathy Avery nested_vmcb02_compute_g_pat(svm);
5474995a368SCathy Avery
548883b0a91SJoerg Roedel /* Load the nested guest state */
5498173396eSCathy Avery if (svm->nested.vmcb12_gpa != svm->nested.last_vmcb12_gpa) {
5508173396eSCathy Avery new_vmcb12 = true;
5518173396eSCathy Avery svm->nested.last_vmcb12_gpa = svm->nested.vmcb12_gpa;
55273c25546SVitaly Kuznetsov svm->nested.force_msr_bitmap_recalc = true;
5538173396eSCathy Avery }
5548173396eSCathy Avery
5558173396eSCathy Avery if (unlikely(new_vmcb12 || vmcb_is_dirty(vmcb12, VMCB_SEG))) {
556db663af4SMaxim Levitsky vmcb02->save.es = vmcb12->save.es;
557db663af4SMaxim Levitsky vmcb02->save.cs = vmcb12->save.cs;
558db663af4SMaxim Levitsky vmcb02->save.ss = vmcb12->save.ss;
559db663af4SMaxim Levitsky vmcb02->save.ds = vmcb12->save.ds;
560db663af4SMaxim Levitsky vmcb02->save.cpl = vmcb12->save.cpl;
561db663af4SMaxim Levitsky vmcb_mark_dirty(vmcb02, VMCB_SEG);
5628173396eSCathy Avery }
5634bb170a5SPaolo Bonzini
5648173396eSCathy Avery if (unlikely(new_vmcb12 || vmcb_is_dirty(vmcb12, VMCB_DT))) {
565db663af4SMaxim Levitsky vmcb02->save.gdtr = vmcb12->save.gdtr;
566db663af4SMaxim Levitsky vmcb02->save.idtr = vmcb12->save.idtr;
567db663af4SMaxim Levitsky vmcb_mark_dirty(vmcb02, VMCB_DT);
5688173396eSCathy Avery }
5694bb170a5SPaolo Bonzini
570e183d17aSSean Christopherson kvm_set_rflags(vcpu, vmcb12->save.rflags | X86_EFLAGS_FIXED);
5713c346c0cSPaolo Bonzini
572e183d17aSSean Christopherson svm_set_efer(vcpu, svm->nested.save.efer);
5733c346c0cSPaolo Bonzini
574e183d17aSSean Christopherson svm_set_cr0(vcpu, svm->nested.save.cr0);
575e183d17aSSean Christopherson svm_set_cr4(vcpu, svm->nested.save.cr4);
5764bb170a5SPaolo Bonzini
5774bb170a5SPaolo Bonzini svm->vcpu.arch.cr2 = vmcb12->save.cr2;
5788173396eSCathy Avery
579e183d17aSSean Christopherson kvm_rax_write(vcpu, vmcb12->save.rax);
580e183d17aSSean Christopherson kvm_rsp_write(vcpu, vmcb12->save.rsp);
581e183d17aSSean Christopherson kvm_rip_write(vcpu, vmcb12->save.rip);
582883b0a91SJoerg Roedel
583883b0a91SJoerg Roedel /* In case we don't even reach vcpu_run, the fields are not updated */
584db663af4SMaxim Levitsky vmcb02->save.rax = vmcb12->save.rax;
585db663af4SMaxim Levitsky vmcb02->save.rsp = vmcb12->save.rsp;
586db663af4SMaxim Levitsky vmcb02->save.rip = vmcb12->save.rip;
5874bb170a5SPaolo Bonzini
5888173396eSCathy Avery /* These bits will be set properly on the first execution when new_vmc12 is true */
5898173396eSCathy Avery if (unlikely(new_vmcb12 || vmcb_is_dirty(vmcb12, VMCB_DR))) {
590db663af4SMaxim Levitsky vmcb02->save.dr7 = svm->nested.save.dr7 | DR7_FIXED_1;
591355d0473SEmanuele Giuseppe Esposito svm->vcpu.arch.dr6 = svm->nested.save.dr6 | DR6_ACTIVE_LOW;
592db663af4SMaxim Levitsky vmcb_mark_dirty(vmcb02, VMCB_DR);
593f241d711SPaolo Bonzini }
5941d5a1b58SMaxim Levitsky
595e183d17aSSean Christopherson if (unlikely(guest_can_use(vcpu, X86_FEATURE_LBRV) &&
596e183d17aSSean Christopherson (svm->nested.ctl.virt_ext & LBR_CTL_ENABLE_MASK))) {
597d20c796cSMaxim Levitsky /*
598d20c796cSMaxim Levitsky * Reserved bits of DEBUGCTL are ignored. Be consistent with
599d20c796cSMaxim Levitsky * svm_set_msr's definition of reserved bits.
600d20c796cSMaxim Levitsky */
601d20c796cSMaxim Levitsky svm_copy_lbrs(vmcb02, vmcb12);
602d20c796cSMaxim Levitsky vmcb02->save.dbgctl &= ~DEBUGCTL_RESERVED_BITS;
603d20c796cSMaxim Levitsky svm_update_lbrv(&svm->vcpu);
604d20c796cSMaxim Levitsky
605d20c796cSMaxim Levitsky } else if (unlikely(vmcb01->control.virt_ext & LBR_CTL_ENABLE_MASK)) {
6061d5a1b58SMaxim Levitsky svm_copy_lbrs(vmcb02, vmcb01);
6078173396eSCathy Avery }
608d20c796cSMaxim Levitsky }
609883b0a91SJoerg Roedel
is_evtinj_soft(u32 evtinj)6106ef88d6eSSean Christopherson static inline bool is_evtinj_soft(u32 evtinj)
6116ef88d6eSSean Christopherson {
6126ef88d6eSSean Christopherson u32 type = evtinj & SVM_EVTINJ_TYPE_MASK;
6136ef88d6eSSean Christopherson u8 vector = evtinj & SVM_EVTINJ_VEC_MASK;
6146ef88d6eSSean Christopherson
6156ef88d6eSSean Christopherson if (!(evtinj & SVM_EVTINJ_VALID))
6166ef88d6eSSean Christopherson return false;
6176ef88d6eSSean Christopherson
6187e5b5ef8SSean Christopherson if (type == SVM_EVTINJ_TYPE_SOFT)
6197e5b5ef8SSean Christopherson return true;
6207e5b5ef8SSean Christopherson
6216ef88d6eSSean Christopherson return type == SVM_EVTINJ_TYPE_EXEPT && kvm_exception_is_soft(vector);
6226ef88d6eSSean Christopherson }
6236ef88d6eSSean Christopherson
is_evtinj_nmi(u32 evtinj)624159fc6faSMaciej S. Szmigiero static bool is_evtinj_nmi(u32 evtinj)
625159fc6faSMaciej S. Szmigiero {
626159fc6faSMaciej S. Szmigiero u32 type = evtinj & SVM_EVTINJ_TYPE_MASK;
627159fc6faSMaciej S. Szmigiero
628159fc6faSMaciej S. Szmigiero if (!(evtinj & SVM_EVTINJ_VALID))
629159fc6faSMaciej S. Szmigiero return false;
630159fc6faSMaciej S. Szmigiero
631159fc6faSMaciej S. Szmigiero return type == SVM_EVTINJ_TYPE_NMI;
632159fc6faSMaciej S. Szmigiero }
633159fc6faSMaciej S. Szmigiero
nested_vmcb02_prepare_control(struct vcpu_svm * svm,unsigned long vmcb12_rip,unsigned long vmcb12_csbase)63400f08d99SMaciej S. Szmigiero static void nested_vmcb02_prepare_control(struct vcpu_svm *svm,
635da0b93d6SMaciej S. Szmigiero unsigned long vmcb12_rip,
636da0b93d6SMaciej S. Szmigiero unsigned long vmcb12_csbase)
637f241d711SPaolo Bonzini {
6380b349662SMaxim Levitsky u32 int_ctl_vmcb01_bits = V_INTR_MASKING_MASK;
6390b349662SMaxim Levitsky u32 int_ctl_vmcb12_bits = V_TPR_MASK | V_IRQ_INJECTION_BITS_MASK;
6400f923e07SMaxim Levitsky
641d2e56019SSean Christopherson struct kvm_vcpu *vcpu = &svm->vcpu;
642db663af4SMaxim Levitsky struct vmcb *vmcb01 = svm->vmcb01.ptr;
643db663af4SMaxim Levitsky struct vmcb *vmcb02 = svm->nested.vmcb02.ptr;
644e3cdaab5SPaolo Bonzini u32 pause_count12;
645e3cdaab5SPaolo Bonzini u32 pause_thresh12;
64662156f6cSVitaly Kuznetsov
647*d680a1e2SSean Christopherson nested_svm_transition_tlb_flush(vcpu);
648*d680a1e2SSean Christopherson
649*d680a1e2SSean Christopherson /* Enter Guest-Mode */
650*d680a1e2SSean Christopherson enter_guest_mode(vcpu);
651*d680a1e2SSean Christopherson
6527c3ecfcdSPaolo Bonzini /*
6537c3ecfcdSPaolo Bonzini * Filled at exit: exit_code, exit_code_hi, exit_info_1, exit_info_2,
6547c3ecfcdSPaolo Bonzini * exit_int_info, exit_int_info_err, next_rip, insn_len, insn_bytes.
6557c3ecfcdSPaolo Bonzini */
6564995a368SCathy Avery
657b89456aeSSean Christopherson if (guest_can_use(vcpu, X86_FEATURE_VGIF) &&
658b89456aeSSean Christopherson (svm->nested.ctl.int_ctl & V_GIF_ENABLE_MASK))
6590b349662SMaxim Levitsky int_ctl_vmcb12_bits |= (V_GIF_MASK | V_GIF_ENABLE_MASK);
6600b349662SMaxim Levitsky else
6610b349662SMaxim Levitsky int_ctl_vmcb01_bits |= (V_GIF_MASK | V_GIF_ENABLE_MASK);
6620b349662SMaxim Levitsky
6630977cfacSSantosh Shukla if (vnmi) {
6640977cfacSSantosh Shukla if (vmcb01->control.int_ctl & V_NMI_PENDING_MASK) {
6650977cfacSSantosh Shukla svm->vcpu.arch.nmi_pending++;
6660977cfacSSantosh Shukla kvm_make_request(KVM_REQ_EVENT, &svm->vcpu);
6670977cfacSSantosh Shukla }
6680977cfacSSantosh Shukla if (nested_vnmi_enabled(svm))
6690977cfacSSantosh Shukla int_ctl_vmcb12_bits |= (V_NMI_PENDING_MASK |
6700977cfacSSantosh Shukla V_NMI_ENABLE_MASK |
6710977cfacSSantosh Shukla V_NMI_BLOCKING_MASK);
6720977cfacSSantosh Shukla }
6730977cfacSSantosh Shukla
6747c3ecfcdSPaolo Bonzini /* Copied from vmcb01. msrpm_base can be overwritten later. */
675db663af4SMaxim Levitsky vmcb02->control.nested_ctl = vmcb01->control.nested_ctl;
676db663af4SMaxim Levitsky vmcb02->control.iopm_base_pa = vmcb01->control.iopm_base_pa;
677db663af4SMaxim Levitsky vmcb02->control.msrpm_base_pa = vmcb01->control.msrpm_base_pa;
6787c3ecfcdSPaolo Bonzini
6797c3ecfcdSPaolo Bonzini /* Done at vmrun: asid. */
6807c3ecfcdSPaolo Bonzini
6817c3ecfcdSPaolo Bonzini /* Also overwritten later if necessary. */
682db663af4SMaxim Levitsky vmcb02->control.tlb_ctl = TLB_CONTROL_DO_NOTHING;
6837c3ecfcdSPaolo Bonzini
6847c3ecfcdSPaolo Bonzini /* nested_cr3. */
68562156f6cSVitaly Kuznetsov if (nested_npt_enabled(svm))
686d2e56019SSean Christopherson nested_svm_init_mmu_context(vcpu);
68769cb8774SPaolo Bonzini
6885228eb96SMaxim Levitsky vcpu->arch.tsc_offset = kvm_calc_nested_tsc_offset(
6895228eb96SMaxim Levitsky vcpu->arch.l1_tsc_offset,
6905228eb96SMaxim Levitsky svm->nested.ctl.tsc_offset,
6915228eb96SMaxim Levitsky svm->tsc_ratio_msr);
6925228eb96SMaxim Levitsky
693db663af4SMaxim Levitsky vmcb02->control.tsc_offset = vcpu->arch.tsc_offset;
6945228eb96SMaxim Levitsky
6954365a455SSean Christopherson if (guest_can_use(vcpu, X86_FEATURE_TSCRATEMSR) &&
6967cafe9b8SSean Christopherson svm->tsc_ratio_msr != kvm_caps.default_tsc_scaling_ratio)
6975228eb96SMaxim Levitsky nested_svm_update_tsc_ratio_msr(vcpu);
698883b0a91SJoerg Roedel
699db663af4SMaxim Levitsky vmcb02->control.int_ctl =
7000f923e07SMaxim Levitsky (svm->nested.ctl.int_ctl & int_ctl_vmcb12_bits) |
701db663af4SMaxim Levitsky (vmcb01->control.int_ctl & int_ctl_vmcb01_bits);
70291b7130cSPaolo Bonzini
703db663af4SMaxim Levitsky vmcb02->control.int_vector = svm->nested.ctl.int_vector;
704db663af4SMaxim Levitsky vmcb02->control.int_state = svm->nested.ctl.int_state;
705db663af4SMaxim Levitsky vmcb02->control.event_inj = svm->nested.ctl.event_inj;
706db663af4SMaxim Levitsky vmcb02->control.event_inj_err = svm->nested.ctl.event_inj_err;
707883b0a91SJoerg Roedel
70800f08d99SMaciej S. Szmigiero /*
70900f08d99SMaciej S. Szmigiero * next_rip is consumed on VMRUN as the return address pushed on the
71000f08d99SMaciej S. Szmigiero * stack for injected soft exceptions/interrupts. If nrips is exposed
71100f08d99SMaciej S. Szmigiero * to L1, take it verbatim from vmcb12. If nrips is supported in
71200f08d99SMaciej S. Szmigiero * hardware but not exposed to L1, stuff the actual L2 RIP to emulate
71300f08d99SMaciej S. Szmigiero * what a nrips=0 CPU would do (L1 is responsible for advancing RIP
71400f08d99SMaciej S. Szmigiero * prior to injecting the event).
71500f08d99SMaciej S. Szmigiero */
7167a6a6a3bSSean Christopherson if (guest_can_use(vcpu, X86_FEATURE_NRIPS))
71700f08d99SMaciej S. Szmigiero vmcb02->control.next_rip = svm->nested.ctl.next_rip;
71800f08d99SMaciej S. Szmigiero else if (boot_cpu_has(X86_FEATURE_NRIPS))
71900f08d99SMaciej S. Szmigiero vmcb02->control.next_rip = vmcb12_rip;
72000f08d99SMaciej S. Szmigiero
721159fc6faSMaciej S. Szmigiero svm->nmi_l1_to_l2 = is_evtinj_nmi(vmcb02->control.event_inj);
7226ef88d6eSSean Christopherson if (is_evtinj_soft(vmcb02->control.event_inj)) {
7236ef88d6eSSean Christopherson svm->soft_int_injected = true;
724da0b93d6SMaciej S. Szmigiero svm->soft_int_csbase = vmcb12_csbase;
7256ef88d6eSSean Christopherson svm->soft_int_old_rip = vmcb12_rip;
7267a6a6a3bSSean Christopherson if (guest_can_use(vcpu, X86_FEATURE_NRIPS))
7276ef88d6eSSean Christopherson svm->soft_int_next_rip = svm->nested.ctl.next_rip;
7286ef88d6eSSean Christopherson else
7296ef88d6eSSean Christopherson svm->soft_int_next_rip = vmcb12_rip;
7306ef88d6eSSean Christopherson }
7316ef88d6eSSean Christopherson
7321d5a1b58SMaxim Levitsky vmcb02->control.virt_ext = vmcb01->control.virt_ext &
7331d5a1b58SMaxim Levitsky LBR_CTL_ENABLE_MASK;
734e183d17aSSean Christopherson if (guest_can_use(vcpu, X86_FEATURE_LBRV))
735d20c796cSMaxim Levitsky vmcb02->control.virt_ext |=
736d20c796cSMaxim Levitsky (svm->nested.ctl.virt_ext & LBR_CTL_ENABLE_MASK);
7371d5a1b58SMaxim Levitsky
738b9f3973aSMaxim Levitsky if (!nested_vmcb_needs_vls_intercept(svm))
739db663af4SMaxim Levitsky vmcb02->control.virt_ext |= VIRTUAL_VMLOAD_VMSAVE_ENABLE_MASK;
740b9f3973aSMaxim Levitsky
74159d67fc1SSean Christopherson if (guest_can_use(vcpu, X86_FEATURE_PAUSEFILTER))
74259d67fc1SSean Christopherson pause_count12 = svm->nested.ctl.pause_filter_count;
74359d67fc1SSean Christopherson else
74459d67fc1SSean Christopherson pause_count12 = 0;
74559d67fc1SSean Christopherson if (guest_can_use(vcpu, X86_FEATURE_PFTHRESHOLD))
74659d67fc1SSean Christopherson pause_thresh12 = svm->nested.ctl.pause_filter_thresh;
74759d67fc1SSean Christopherson else
74859d67fc1SSean Christopherson pause_thresh12 = 0;
74974fd41edSMaxim Levitsky if (kvm_pause_in_guest(svm->vcpu.kvm)) {
750e3cdaab5SPaolo Bonzini /* use guest values since host doesn't intercept PAUSE */
751e3cdaab5SPaolo Bonzini vmcb02->control.pause_filter_count = pause_count12;
752e3cdaab5SPaolo Bonzini vmcb02->control.pause_filter_thresh = pause_thresh12;
75374fd41edSMaxim Levitsky
754e3cdaab5SPaolo Bonzini } else {
755e3cdaab5SPaolo Bonzini /* start from host values otherwise */
75674fd41edSMaxim Levitsky vmcb02->control.pause_filter_count = vmcb01->control.pause_filter_count;
75774fd41edSMaxim Levitsky vmcb02->control.pause_filter_thresh = vmcb01->control.pause_filter_thresh;
758e3cdaab5SPaolo Bonzini
759e3cdaab5SPaolo Bonzini /* ... but ensure filtering is disabled if so requested. */
760e3cdaab5SPaolo Bonzini if (vmcb12_is_intercept(&svm->nested.ctl, INTERCEPT_PAUSE)) {
761e3cdaab5SPaolo Bonzini if (!pause_count12)
76274fd41edSMaxim Levitsky vmcb02->control.pause_filter_count = 0;
763e3cdaab5SPaolo Bonzini if (!pause_thresh12)
76474fd41edSMaxim Levitsky vmcb02->control.pause_filter_thresh = 0;
76574fd41edSMaxim Levitsky }
766e3cdaab5SPaolo Bonzini }
76774fd41edSMaxim Levitsky
768883b0a91SJoerg Roedel /*
769883b0a91SJoerg Roedel * Merge guest and host intercepts - must be called with vcpu in
7704bb170a5SPaolo Bonzini * guest-mode to take effect.
771883b0a91SJoerg Roedel */
772883b0a91SJoerg Roedel recalc_intercepts(svm);
773f241d711SPaolo Bonzini }
774f241d711SPaolo Bonzini
nested_svm_copy_common_state(struct vmcb * from_vmcb,struct vmcb * to_vmcb)775d00b99c5SBabu Moger static void nested_svm_copy_common_state(struct vmcb *from_vmcb, struct vmcb *to_vmcb)
776d00b99c5SBabu Moger {
777d00b99c5SBabu Moger /*
778d00b99c5SBabu Moger * Some VMCB state is shared between L1 and L2 and thus has to be
779d00b99c5SBabu Moger * moved at the time of nested vmrun and vmexit.
780d00b99c5SBabu Moger *
781d00b99c5SBabu Moger * VMLOAD/VMSAVE state would also belong in this category, but KVM
782d00b99c5SBabu Moger * always performs VMLOAD and VMSAVE from the VMCB01.
783d00b99c5SBabu Moger */
784d00b99c5SBabu Moger to_vmcb->save.spec_ctrl = from_vmcb->save.spec_ctrl;
785d00b99c5SBabu Moger }
786d00b99c5SBabu Moger
enter_svm_guest_mode(struct kvm_vcpu * vcpu,u64 vmcb12_gpa,struct vmcb * vmcb12,bool from_vmrun)78763129754SPaolo Bonzini int enter_svm_guest_mode(struct kvm_vcpu *vcpu, u64 vmcb12_gpa,
788e85d3e7bSMaxim Levitsky struct vmcb *vmcb12, bool from_vmrun)
789f241d711SPaolo Bonzini {
79063129754SPaolo Bonzini struct vcpu_svm *svm = to_svm(vcpu);
791a506fdd2SVitaly Kuznetsov int ret;
792a506fdd2SVitaly Kuznetsov
79389e54ec5SMingwei Zhang trace_kvm_nested_vmenter(svm->vmcb->save.rip,
79489e54ec5SMingwei Zhang vmcb12_gpa,
795954f419bSMaxim Levitsky vmcb12->save.rip,
796954f419bSMaxim Levitsky vmcb12->control.int_ctl,
797954f419bSMaxim Levitsky vmcb12->control.event_inj,
79889e54ec5SMingwei Zhang vmcb12->control.nested_ctl,
79902dfc44fSMingwei Zhang vmcb12->control.nested_cr3,
80002dfc44fSMingwei Zhang vmcb12->save.cr3,
80189e54ec5SMingwei Zhang KVM_ISA_SVM);
802954f419bSMaxim Levitsky
803954f419bSMaxim Levitsky trace_kvm_nested_intercepts(vmcb12->control.intercepts[INTERCEPT_CR] & 0xffff,
804954f419bSMaxim Levitsky vmcb12->control.intercepts[INTERCEPT_CR] >> 16,
805954f419bSMaxim Levitsky vmcb12->control.intercepts[INTERCEPT_EXCEPTION],
806954f419bSMaxim Levitsky vmcb12->control.intercepts[INTERCEPT_WORD3],
807954f419bSMaxim Levitsky vmcb12->control.intercepts[INTERCEPT_WORD4],
808954f419bSMaxim Levitsky vmcb12->control.intercepts[INTERCEPT_WORD5]);
809954f419bSMaxim Levitsky
810954f419bSMaxim Levitsky
8110dd16b5bSMaxim Levitsky svm->nested.vmcb12_gpa = vmcb12_gpa;
8124995a368SCathy Avery
8134995a368SCathy Avery WARN_ON(svm->vmcb == svm->nested.vmcb02.ptr);
8144995a368SCathy Avery
815d00b99c5SBabu Moger nested_svm_copy_common_state(svm->vmcb01.ptr, svm->nested.vmcb02.ptr);
8164995a368SCathy Avery
8174995a368SCathy Avery svm_switch_vmcb(svm, &svm->nested.vmcb02);
818da0b93d6SMaciej S. Szmigiero nested_vmcb02_prepare_control(svm, vmcb12->save.rip, vmcb12->save.cs.base);
8199e8f0fbfSPaolo Bonzini nested_vmcb02_prepare_save(svm, vmcb12);
820f241d711SPaolo Bonzini
821355d0473SEmanuele Giuseppe Esposito ret = nested_svm_load_cr3(&svm->vcpu, svm->nested.save.cr3,
822e85d3e7bSMaxim Levitsky nested_npt_enabled(svm), from_vmrun);
823a506fdd2SVitaly Kuznetsov if (ret)
824a506fdd2SVitaly Kuznetsov return ret;
825a506fdd2SVitaly Kuznetsov
826e85d3e7bSMaxim Levitsky if (!from_vmrun)
827e85d3e7bSMaxim Levitsky kvm_make_request(KVM_REQ_GET_NESTED_STATE_PAGES, vcpu);
828e85d3e7bSMaxim Levitsky
829ffdf7f9eSPaolo Bonzini svm_set_gif(svm, true);
83059cd9bc5SVitaly Kuznetsov
831f44509f8SMaxim Levitsky if (kvm_vcpu_apicv_active(vcpu))
832f44509f8SMaxim Levitsky kvm_make_request(KVM_REQ_APICV_UPDATE, vcpu);
833f44509f8SMaxim Levitsky
834e45aa244SVitaly Kuznetsov nested_svm_hv_update_vm_vp_ids(vcpu);
835e45aa244SVitaly Kuznetsov
83659cd9bc5SVitaly Kuznetsov return 0;
837883b0a91SJoerg Roedel }
838883b0a91SJoerg Roedel
nested_svm_vmrun(struct kvm_vcpu * vcpu)83963129754SPaolo Bonzini int nested_svm_vmrun(struct kvm_vcpu *vcpu)
840883b0a91SJoerg Roedel {
84163129754SPaolo Bonzini struct vcpu_svm *svm = to_svm(vcpu);
842883b0a91SJoerg Roedel int ret;
8430dd16b5bSMaxim Levitsky struct vmcb *vmcb12;
844883b0a91SJoerg Roedel struct kvm_host_map map;
8450dd16b5bSMaxim Levitsky u64 vmcb12_gpa;
846db663af4SMaxim Levitsky struct vmcb *vmcb01 = svm->vmcb01.ptr;
847883b0a91SJoerg Roedel
848fb79f566SVitaly Kuznetsov if (!svm->nested.hsave_msr) {
849fb79f566SVitaly Kuznetsov kvm_inject_gp(vcpu, 0);
850fb79f566SVitaly Kuznetsov return 1;
851fb79f566SVitaly Kuznetsov }
852fb79f566SVitaly Kuznetsov
85363129754SPaolo Bonzini if (is_smm(vcpu)) {
85463129754SPaolo Bonzini kvm_queue_exception(vcpu, UD_VECTOR);
8557c67f546SPaolo Bonzini return 1;
8567c67f546SPaolo Bonzini }
857883b0a91SJoerg Roedel
8583f4a812eSVitaly Kuznetsov /* This fails when VP assist page is enabled but the supplied GPA is bogus */
8593f4a812eSVitaly Kuznetsov ret = kvm_hv_verify_vp_assist(vcpu);
8603f4a812eSVitaly Kuznetsov if (ret) {
8613f4a812eSVitaly Kuznetsov kvm_inject_gp(vcpu, 0);
8623f4a812eSVitaly Kuznetsov return ret;
8633f4a812eSVitaly Kuznetsov }
8643f4a812eSVitaly Kuznetsov
8650dd16b5bSMaxim Levitsky vmcb12_gpa = svm->vmcb->save.rax;
86663129754SPaolo Bonzini ret = kvm_vcpu_map(vcpu, gpa_to_gfn(vmcb12_gpa), &map);
867883b0a91SJoerg Roedel if (ret == -EINVAL) {
86863129754SPaolo Bonzini kvm_inject_gp(vcpu, 0);
869883b0a91SJoerg Roedel return 1;
870883b0a91SJoerg Roedel } else if (ret) {
87163129754SPaolo Bonzini return kvm_skip_emulated_instruction(vcpu);
872883b0a91SJoerg Roedel }
873883b0a91SJoerg Roedel
87463129754SPaolo Bonzini ret = kvm_skip_emulated_instruction(vcpu);
875883b0a91SJoerg Roedel
8760dd16b5bSMaxim Levitsky vmcb12 = map.hva;
877883b0a91SJoerg Roedel
8782fcf4876SMaxim Levitsky if (WARN_ON_ONCE(!svm->nested.initialized))
8792fcf4876SMaxim Levitsky return -EINVAL;
8802fcf4876SMaxim Levitsky
8817907160dSEmanuele Giuseppe Esposito nested_copy_vmcb_control_to_cache(svm, &vmcb12->control);
882f2740a8dSEmanuele Giuseppe Esposito nested_copy_vmcb_save_to_cache(svm, &vmcb12->save);
883cb9b6a1bSPaolo Bonzini
884b7a3d8b6SEmanuele Giuseppe Esposito if (!nested_vmcb_check_save(vcpu) ||
885bd95926cSPaolo Bonzini !nested_vmcb_check_controls(vcpu)) {
8860dd16b5bSMaxim Levitsky vmcb12->control.exit_code = SVM_EXIT_ERR;
8870dd16b5bSMaxim Levitsky vmcb12->control.exit_code_hi = 0;
8880dd16b5bSMaxim Levitsky vmcb12->control.exit_info_1 = 0;
8890dd16b5bSMaxim Levitsky vmcb12->control.exit_info_2 = 0;
89069c9dfa2SPaolo Bonzini goto out;
891883b0a91SJoerg Roedel }
892883b0a91SJoerg Roedel
893883b0a91SJoerg Roedel /*
8944995a368SCathy Avery * Since vmcb01 is not in use, we can use it to store some of the L1
8954995a368SCathy Avery * state.
896883b0a91SJoerg Roedel */
897db663af4SMaxim Levitsky vmcb01->save.efer = vcpu->arch.efer;
898db663af4SMaxim Levitsky vmcb01->save.cr0 = kvm_read_cr0(vcpu);
899db663af4SMaxim Levitsky vmcb01->save.cr4 = vcpu->arch.cr4;
900db663af4SMaxim Levitsky vmcb01->save.rflags = kvm_get_rflags(vcpu);
901db663af4SMaxim Levitsky vmcb01->save.rip = kvm_rip_read(vcpu);
902883b0a91SJoerg Roedel
9034995a368SCathy Avery if (!npt_enabled)
904db663af4SMaxim Levitsky vmcb01->save.cr3 = kvm_read_cr3(vcpu);
905883b0a91SJoerg Roedel
906f74f9414SPaolo Bonzini svm->nested.nested_run_pending = 1;
907883b0a91SJoerg Roedel
908e85d3e7bSMaxim Levitsky if (enter_svm_guest_mode(vcpu, vmcb12_gpa, vmcb12, true))
90959cd9bc5SVitaly Kuznetsov goto out_exit_err;
91059cd9bc5SVitaly Kuznetsov
91159cd9bc5SVitaly Kuznetsov if (nested_svm_vmrun_msrpm(svm))
91259cd9bc5SVitaly Kuznetsov goto out;
91359cd9bc5SVitaly Kuznetsov
91459cd9bc5SVitaly Kuznetsov out_exit_err:
915ebdb3dbaSVitaly Kuznetsov svm->nested.nested_run_pending = 0;
916159fc6faSMaciej S. Szmigiero svm->nmi_l1_to_l2 = false;
9176ef88d6eSSean Christopherson svm->soft_int_injected = false;
918ebdb3dbaSVitaly Kuznetsov
919883b0a91SJoerg Roedel svm->vmcb->control.exit_code = SVM_EXIT_ERR;
920883b0a91SJoerg Roedel svm->vmcb->control.exit_code_hi = 0;
921883b0a91SJoerg Roedel svm->vmcb->control.exit_info_1 = 0;
922883b0a91SJoerg Roedel svm->vmcb->control.exit_info_2 = 0;
923883b0a91SJoerg Roedel
924883b0a91SJoerg Roedel nested_svm_vmexit(svm);
925883b0a91SJoerg Roedel
92669c9dfa2SPaolo Bonzini out:
92763129754SPaolo Bonzini kvm_vcpu_unmap(vcpu, &map, true);
92869c9dfa2SPaolo Bonzini
929883b0a91SJoerg Roedel return ret;
930883b0a91SJoerg Roedel }
931883b0a91SJoerg Roedel
9320a758290SVitaly Kuznetsov /* Copy state save area fields which are handled by VMRUN */
svm_copy_vmrun_state(struct vmcb_save_area * to_save,struct vmcb_save_area * from_save)9332bb16beaSVitaly Kuznetsov void svm_copy_vmrun_state(struct vmcb_save_area *to_save,
9342bb16beaSVitaly Kuznetsov struct vmcb_save_area *from_save)
9350a758290SVitaly Kuznetsov {
9360a758290SVitaly Kuznetsov to_save->es = from_save->es;
9370a758290SVitaly Kuznetsov to_save->cs = from_save->cs;
9380a758290SVitaly Kuznetsov to_save->ss = from_save->ss;
9390a758290SVitaly Kuznetsov to_save->ds = from_save->ds;
9400a758290SVitaly Kuznetsov to_save->gdtr = from_save->gdtr;
9410a758290SVitaly Kuznetsov to_save->idtr = from_save->idtr;
9420a758290SVitaly Kuznetsov to_save->rflags = from_save->rflags | X86_EFLAGS_FIXED;
9430a758290SVitaly Kuznetsov to_save->efer = from_save->efer;
9440a758290SVitaly Kuznetsov to_save->cr0 = from_save->cr0;
9450a758290SVitaly Kuznetsov to_save->cr3 = from_save->cr3;
9460a758290SVitaly Kuznetsov to_save->cr4 = from_save->cr4;
9470a758290SVitaly Kuznetsov to_save->rax = from_save->rax;
9480a758290SVitaly Kuznetsov to_save->rsp = from_save->rsp;
9490a758290SVitaly Kuznetsov to_save->rip = from_save->rip;
9500a758290SVitaly Kuznetsov to_save->cpl = 0;
9510a758290SVitaly Kuznetsov }
9520a758290SVitaly Kuznetsov
svm_copy_vmloadsave_state(struct vmcb * to_vmcb,struct vmcb * from_vmcb)9532bb16beaSVitaly Kuznetsov void svm_copy_vmloadsave_state(struct vmcb *to_vmcb, struct vmcb *from_vmcb)
954883b0a91SJoerg Roedel {
955883b0a91SJoerg Roedel to_vmcb->save.fs = from_vmcb->save.fs;
956883b0a91SJoerg Roedel to_vmcb->save.gs = from_vmcb->save.gs;
957883b0a91SJoerg Roedel to_vmcb->save.tr = from_vmcb->save.tr;
958883b0a91SJoerg Roedel to_vmcb->save.ldtr = from_vmcb->save.ldtr;
959883b0a91SJoerg Roedel to_vmcb->save.kernel_gs_base = from_vmcb->save.kernel_gs_base;
960883b0a91SJoerg Roedel to_vmcb->save.star = from_vmcb->save.star;
961883b0a91SJoerg Roedel to_vmcb->save.lstar = from_vmcb->save.lstar;
962883b0a91SJoerg Roedel to_vmcb->save.cstar = from_vmcb->save.cstar;
963883b0a91SJoerg Roedel to_vmcb->save.sfmask = from_vmcb->save.sfmask;
964883b0a91SJoerg Roedel to_vmcb->save.sysenter_cs = from_vmcb->save.sysenter_cs;
965883b0a91SJoerg Roedel to_vmcb->save.sysenter_esp = from_vmcb->save.sysenter_esp;
966883b0a91SJoerg Roedel to_vmcb->save.sysenter_eip = from_vmcb->save.sysenter_eip;
967883b0a91SJoerg Roedel }
968883b0a91SJoerg Roedel
nested_svm_vmexit(struct vcpu_svm * svm)969883b0a91SJoerg Roedel int nested_svm_vmexit(struct vcpu_svm *svm)
970883b0a91SJoerg Roedel {
97163129754SPaolo Bonzini struct kvm_vcpu *vcpu = &svm->vcpu;
972db663af4SMaxim Levitsky struct vmcb *vmcb01 = svm->vmcb01.ptr;
973db663af4SMaxim Levitsky struct vmcb *vmcb02 = svm->nested.vmcb02.ptr;
9740dd16b5bSMaxim Levitsky struct vmcb *vmcb12;
975883b0a91SJoerg Roedel struct kvm_host_map map;
97663129754SPaolo Bonzini int rc;
977883b0a91SJoerg Roedel
97863129754SPaolo Bonzini rc = kvm_vcpu_map(vcpu, gpa_to_gfn(svm->nested.vmcb12_gpa), &map);
979883b0a91SJoerg Roedel if (rc) {
980883b0a91SJoerg Roedel if (rc == -EINVAL)
98163129754SPaolo Bonzini kvm_inject_gp(vcpu, 0);
982883b0a91SJoerg Roedel return 1;
983883b0a91SJoerg Roedel }
984883b0a91SJoerg Roedel
9850dd16b5bSMaxim Levitsky vmcb12 = map.hva;
986883b0a91SJoerg Roedel
987883b0a91SJoerg Roedel /* Exit Guest-Mode */
98863129754SPaolo Bonzini leave_guest_mode(vcpu);
9890dd16b5bSMaxim Levitsky svm->nested.vmcb12_gpa = 0;
9902d8a42beSPaolo Bonzini WARN_ON_ONCE(svm->nested.nested_run_pending);
991883b0a91SJoerg Roedel
99263129754SPaolo Bonzini kvm_clear_request(KVM_REQ_GET_NESTED_STATE_PAGES, vcpu);
993f2c7ef3bSMaxim Levitsky
99438c0b192SPaolo Bonzini /* in case we halted in L2 */
99538c0b192SPaolo Bonzini svm->vcpu.arch.mp_state = KVM_MP_STATE_RUNNABLE;
99638c0b192SPaolo Bonzini
997883b0a91SJoerg Roedel /* Give the current vmcb to the guest */
998883b0a91SJoerg Roedel
999db663af4SMaxim Levitsky vmcb12->save.es = vmcb02->save.es;
1000db663af4SMaxim Levitsky vmcb12->save.cs = vmcb02->save.cs;
1001db663af4SMaxim Levitsky vmcb12->save.ss = vmcb02->save.ss;
1002db663af4SMaxim Levitsky vmcb12->save.ds = vmcb02->save.ds;
1003db663af4SMaxim Levitsky vmcb12->save.gdtr = vmcb02->save.gdtr;
1004db663af4SMaxim Levitsky vmcb12->save.idtr = vmcb02->save.idtr;
10050dd16b5bSMaxim Levitsky vmcb12->save.efer = svm->vcpu.arch.efer;
100663129754SPaolo Bonzini vmcb12->save.cr0 = kvm_read_cr0(vcpu);
100763129754SPaolo Bonzini vmcb12->save.cr3 = kvm_read_cr3(vcpu);
1008db663af4SMaxim Levitsky vmcb12->save.cr2 = vmcb02->save.cr2;
10090dd16b5bSMaxim Levitsky vmcb12->save.cr4 = svm->vcpu.arch.cr4;
101063129754SPaolo Bonzini vmcb12->save.rflags = kvm_get_rflags(vcpu);
101163129754SPaolo Bonzini vmcb12->save.rip = kvm_rip_read(vcpu);
101263129754SPaolo Bonzini vmcb12->save.rsp = kvm_rsp_read(vcpu);
101363129754SPaolo Bonzini vmcb12->save.rax = kvm_rax_read(vcpu);
1014db663af4SMaxim Levitsky vmcb12->save.dr7 = vmcb02->save.dr7;
10150dd16b5bSMaxim Levitsky vmcb12->save.dr6 = svm->vcpu.arch.dr6;
1016db663af4SMaxim Levitsky vmcb12->save.cpl = vmcb02->save.cpl;
1017883b0a91SJoerg Roedel
1018db663af4SMaxim Levitsky vmcb12->control.int_state = vmcb02->control.int_state;
1019db663af4SMaxim Levitsky vmcb12->control.exit_code = vmcb02->control.exit_code;
1020db663af4SMaxim Levitsky vmcb12->control.exit_code_hi = vmcb02->control.exit_code_hi;
1021db663af4SMaxim Levitsky vmcb12->control.exit_info_1 = vmcb02->control.exit_info_1;
1022db663af4SMaxim Levitsky vmcb12->control.exit_info_2 = vmcb02->control.exit_info_2;
102336e2e983SPaolo Bonzini
10240dd16b5bSMaxim Levitsky if (vmcb12->control.exit_code != SVM_EXIT_ERR)
10259e8f0fbfSPaolo Bonzini nested_save_pending_event_to_vmcb12(svm, vmcb12);
1026883b0a91SJoerg Roedel
10277a6a6a3bSSean Christopherson if (guest_can_use(vcpu, X86_FEATURE_NRIPS))
1028db663af4SMaxim Levitsky vmcb12->control.next_rip = vmcb02->control.next_rip;
1029883b0a91SJoerg Roedel
10300dd16b5bSMaxim Levitsky vmcb12->control.int_ctl = svm->nested.ctl.int_ctl;
10310dd16b5bSMaxim Levitsky vmcb12->control.event_inj = svm->nested.ctl.event_inj;
10320dd16b5bSMaxim Levitsky vmcb12->control.event_inj_err = svm->nested.ctl.event_inj_err;
1033883b0a91SJoerg Roedel
1034e3cdaab5SPaolo Bonzini if (!kvm_pause_in_guest(vcpu->kvm)) {
103574fd41edSMaxim Levitsky vmcb01->control.pause_filter_count = vmcb02->control.pause_filter_count;
1036e3cdaab5SPaolo Bonzini vmcb_mark_dirty(vmcb01, VMCB_INTERCEPTS);
1037e3cdaab5SPaolo Bonzini
1038e3cdaab5SPaolo Bonzini }
103974fd41edSMaxim Levitsky
1040d00b99c5SBabu Moger nested_svm_copy_common_state(svm->nested.vmcb02.ptr, svm->vmcb01.ptr);
1041d00b99c5SBabu Moger
10424995a368SCathy Avery svm_switch_vmcb(svm, &svm->vmcb01);
10434995a368SCathy Avery
10445d1ec456SMaxim Levitsky /*
10455d1ec456SMaxim Levitsky * Rules for synchronizing int_ctl bits from vmcb02 to vmcb01:
10465d1ec456SMaxim Levitsky *
10475d1ec456SMaxim Levitsky * V_IRQ, V_IRQ_VECTOR, V_INTR_PRIO_MASK, V_IGN_TPR: If L1 doesn't
10485d1ec456SMaxim Levitsky * intercept interrupts, then KVM will use vmcb02's V_IRQ (and related
10495d1ec456SMaxim Levitsky * flags) to detect interrupt windows for L1 IRQs (even if L1 uses
10505d1ec456SMaxim Levitsky * virtual interrupt masking). Raise KVM_REQ_EVENT to ensure that
10515d1ec456SMaxim Levitsky * KVM re-requests an interrupt window if necessary, which implicitly
10525d1ec456SMaxim Levitsky * copies this bits from vmcb02 to vmcb01.
10535d1ec456SMaxim Levitsky *
10545d1ec456SMaxim Levitsky * V_TPR: If L1 doesn't use virtual interrupt masking, then L1's vTPR
10555d1ec456SMaxim Levitsky * is stored in vmcb02, but its value doesn't need to be copied from/to
10565d1ec456SMaxim Levitsky * vmcb01 because it is copied from/to the virtual APIC's TPR register
10575d1ec456SMaxim Levitsky * on each VM entry/exit.
10585d1ec456SMaxim Levitsky *
10595d1ec456SMaxim Levitsky * V_GIF: If nested vGIF is not used, KVM uses vmcb02's V_GIF for L1's
10605d1ec456SMaxim Levitsky * V_GIF. However, GIF is architecturally clear on each VM exit, thus
10615d1ec456SMaxim Levitsky * there is no need to copy V_GIF from vmcb02 to vmcb01.
10625d1ec456SMaxim Levitsky */
10635d1ec456SMaxim Levitsky if (!nested_exit_on_intr(svm))
10645d1ec456SMaxim Levitsky kvm_make_request(KVM_REQ_EVENT, &svm->vcpu);
10655d1ec456SMaxim Levitsky
1066e183d17aSSean Christopherson if (unlikely(guest_can_use(vcpu, X86_FEATURE_LBRV) &&
1067e183d17aSSean Christopherson (svm->nested.ctl.virt_ext & LBR_CTL_ENABLE_MASK))) {
1068d20c796cSMaxim Levitsky svm_copy_lbrs(vmcb12, vmcb02);
1069d20c796cSMaxim Levitsky svm_update_lbrv(vcpu);
1070d20c796cSMaxim Levitsky } else if (unlikely(vmcb01->control.virt_ext & LBR_CTL_ENABLE_MASK)) {
10711d5a1b58SMaxim Levitsky svm_copy_lbrs(vmcb01, vmcb02);
10721d5a1b58SMaxim Levitsky svm_update_lbrv(vcpu);
10731d5a1b58SMaxim Levitsky }
10741d5a1b58SMaxim Levitsky
10750977cfacSSantosh Shukla if (vnmi) {
10760977cfacSSantosh Shukla if (vmcb02->control.int_ctl & V_NMI_BLOCKING_MASK)
10770977cfacSSantosh Shukla vmcb01->control.int_ctl |= V_NMI_BLOCKING_MASK;
10780977cfacSSantosh Shukla else
10790977cfacSSantosh Shukla vmcb01->control.int_ctl &= ~V_NMI_BLOCKING_MASK;
10800977cfacSSantosh Shukla
10810977cfacSSantosh Shukla if (vcpu->arch.nmi_pending) {
10820977cfacSSantosh Shukla vcpu->arch.nmi_pending--;
10830977cfacSSantosh Shukla vmcb01->control.int_ctl |= V_NMI_PENDING_MASK;
10840977cfacSSantosh Shukla } else {
10850977cfacSSantosh Shukla vmcb01->control.int_ctl &= ~V_NMI_PENDING_MASK;
10860977cfacSSantosh Shukla }
10870977cfacSSantosh Shukla }
10880977cfacSSantosh Shukla
10894995a368SCathy Avery /*
10904995a368SCathy Avery * On vmexit the GIF is set to false and
10914995a368SCathy Avery * no event can be injected in L1.
10924995a368SCathy Avery */
10939883764aSMaxim Levitsky svm_set_gif(svm, false);
1094db663af4SMaxim Levitsky vmcb01->control.exit_int_info = 0;
10959883764aSMaxim Levitsky
10967ca62d13SPaolo Bonzini svm->vcpu.arch.tsc_offset = svm->vcpu.arch.l1_tsc_offset;
1097db663af4SMaxim Levitsky if (vmcb01->control.tsc_offset != svm->vcpu.arch.tsc_offset) {
1098db663af4SMaxim Levitsky vmcb01->control.tsc_offset = svm->vcpu.arch.tsc_offset;
1099db663af4SMaxim Levitsky vmcb_mark_dirty(vmcb01, VMCB_INTERCEPTS);
11007ca62d13SPaolo Bonzini }
110118fc6c55SPaolo Bonzini
11020c94e246SSean Christopherson if (kvm_caps.has_tsc_control &&
11030c94e246SSean Christopherson vcpu->arch.tsc_scaling_ratio != vcpu->arch.l1_tsc_scaling_ratio) {
11045228eb96SMaxim Levitsky vcpu->arch.tsc_scaling_ratio = vcpu->arch.l1_tsc_scaling_ratio;
11052d636990SSean Christopherson svm_write_tsc_multiplier(vcpu);
11065228eb96SMaxim Levitsky }
11075228eb96SMaxim Levitsky
1108e670bf68SPaolo Bonzini svm->nested.ctl.nested_cr3 = 0;
1109883b0a91SJoerg Roedel
11104995a368SCathy Avery /*
11114995a368SCathy Avery * Restore processor state that had been saved in vmcb01
11124995a368SCathy Avery */
1113db663af4SMaxim Levitsky kvm_set_rflags(vcpu, vmcb01->save.rflags);
1114db663af4SMaxim Levitsky svm_set_efer(vcpu, vmcb01->save.efer);
1115db663af4SMaxim Levitsky svm_set_cr0(vcpu, vmcb01->save.cr0 | X86_CR0_PE);
1116db663af4SMaxim Levitsky svm_set_cr4(vcpu, vmcb01->save.cr4);
1117db663af4SMaxim Levitsky kvm_rax_write(vcpu, vmcb01->save.rax);
1118db663af4SMaxim Levitsky kvm_rsp_write(vcpu, vmcb01->save.rsp);
1119db663af4SMaxim Levitsky kvm_rip_write(vcpu, vmcb01->save.rip);
11204995a368SCathy Avery
11214995a368SCathy Avery svm->vcpu.arch.dr7 = DR7_FIXED_1;
11224995a368SCathy Avery kvm_update_dr7(&svm->vcpu);
1123883b0a91SJoerg Roedel
11240dd16b5bSMaxim Levitsky trace_kvm_nested_vmexit_inject(vmcb12->control.exit_code,
11250dd16b5bSMaxim Levitsky vmcb12->control.exit_info_1,
11260dd16b5bSMaxim Levitsky vmcb12->control.exit_info_2,
11270dd16b5bSMaxim Levitsky vmcb12->control.exit_int_info,
11280dd16b5bSMaxim Levitsky vmcb12->control.exit_int_info_err,
112936e2e983SPaolo Bonzini KVM_ISA_SVM);
113036e2e983SPaolo Bonzini
113163129754SPaolo Bonzini kvm_vcpu_unmap(vcpu, &map, true);
1132883b0a91SJoerg Roedel
1133d2e56019SSean Christopherson nested_svm_transition_tlb_flush(vcpu);
1134d2e56019SSean Christopherson
113563129754SPaolo Bonzini nested_svm_uninit_mmu_context(vcpu);
1136bf7dea42SVitaly Kuznetsov
1137db663af4SMaxim Levitsky rc = nested_svm_load_cr3(vcpu, vmcb01->save.cr3, false, true);
1138d82aaef9SVitaly Kuznetsov if (rc)
1139d82aaef9SVitaly Kuznetsov return 1;
1140bf7dea42SVitaly Kuznetsov
1141883b0a91SJoerg Roedel /*
1142883b0a91SJoerg Roedel * Drop what we picked up for L2 via svm_complete_interrupts() so it
1143883b0a91SJoerg Roedel * doesn't end up in L1.
1144883b0a91SJoerg Roedel */
1145883b0a91SJoerg Roedel svm->vcpu.arch.nmi_injected = false;
114663129754SPaolo Bonzini kvm_clear_exception_queue(vcpu);
114763129754SPaolo Bonzini kvm_clear_interrupt_queue(vcpu);
1148883b0a91SJoerg Roedel
11499a7de6ecSKrish Sadhukhan /*
11509a7de6ecSKrish Sadhukhan * If we are here following the completion of a VMRUN that
11519a7de6ecSKrish Sadhukhan * is being single-stepped, queue the pending #DB intercept
11529a7de6ecSKrish Sadhukhan * right now so that it an be accounted for before we execute
11539a7de6ecSKrish Sadhukhan * L1's next instruction.
11549a7de6ecSKrish Sadhukhan */
1155db663af4SMaxim Levitsky if (unlikely(vmcb01->save.rflags & X86_EFLAGS_TF))
11569a7de6ecSKrish Sadhukhan kvm_queue_exception(&(svm->vcpu), DB_VECTOR);
11579a7de6ecSKrish Sadhukhan
1158f44509f8SMaxim Levitsky /*
1159f44509f8SMaxim Levitsky * Un-inhibit the AVIC right away, so that other vCPUs can start
1160f44509f8SMaxim Levitsky * to benefit from it right away.
1161f44509f8SMaxim Levitsky */
1162f44509f8SMaxim Levitsky if (kvm_apicv_activated(vcpu->kvm))
11632008fab3SSean Christopherson __kvm_vcpu_update_apicv(vcpu);
1164f44509f8SMaxim Levitsky
1165883b0a91SJoerg Roedel return 0;
1166883b0a91SJoerg Roedel }
1167883b0a91SJoerg Roedel
nested_svm_triple_fault(struct kvm_vcpu * vcpu)1168cb6a32c2SSean Christopherson static void nested_svm_triple_fault(struct kvm_vcpu *vcpu)
1169cb6a32c2SSean Christopherson {
117092e7d5c8SMaxim Levitsky struct vcpu_svm *svm = to_svm(vcpu);
117192e7d5c8SMaxim Levitsky
117292e7d5c8SMaxim Levitsky if (!vmcb12_is_intercept(&svm->nested.ctl, INTERCEPT_SHUTDOWN))
117392e7d5c8SMaxim Levitsky return;
117492e7d5c8SMaxim Levitsky
117592e7d5c8SMaxim Levitsky kvm_clear_request(KVM_REQ_TRIPLE_FAULT, vcpu);
11763a87c7e0SSean Christopherson nested_svm_simple_vmexit(to_svm(vcpu), SVM_EXIT_SHUTDOWN);
1177cb6a32c2SSean Christopherson }
1178cb6a32c2SSean Christopherson
svm_allocate_nested(struct vcpu_svm * svm)11792fcf4876SMaxim Levitsky int svm_allocate_nested(struct vcpu_svm *svm)
11802fcf4876SMaxim Levitsky {
11814995a368SCathy Avery struct page *vmcb02_page;
11822fcf4876SMaxim Levitsky
11832fcf4876SMaxim Levitsky if (svm->nested.initialized)
11842fcf4876SMaxim Levitsky return 0;
11852fcf4876SMaxim Levitsky
11864995a368SCathy Avery vmcb02_page = alloc_page(GFP_KERNEL_ACCOUNT | __GFP_ZERO);
11874995a368SCathy Avery if (!vmcb02_page)
11882fcf4876SMaxim Levitsky return -ENOMEM;
11894995a368SCathy Avery svm->nested.vmcb02.ptr = page_address(vmcb02_page);
11904995a368SCathy Avery svm->nested.vmcb02.pa = __sme_set(page_to_pfn(vmcb02_page) << PAGE_SHIFT);
11912fcf4876SMaxim Levitsky
11922fcf4876SMaxim Levitsky svm->nested.msrpm = svm_vcpu_alloc_msrpm();
11932fcf4876SMaxim Levitsky if (!svm->nested.msrpm)
11944995a368SCathy Avery goto err_free_vmcb02;
11952fcf4876SMaxim Levitsky svm_vcpu_init_msrpm(&svm->vcpu, svm->nested.msrpm);
11962fcf4876SMaxim Levitsky
11972fcf4876SMaxim Levitsky svm->nested.initialized = true;
11982fcf4876SMaxim Levitsky return 0;
11992fcf4876SMaxim Levitsky
12004995a368SCathy Avery err_free_vmcb02:
12014995a368SCathy Avery __free_page(vmcb02_page);
12022fcf4876SMaxim Levitsky return -ENOMEM;
12032fcf4876SMaxim Levitsky }
12042fcf4876SMaxim Levitsky
svm_free_nested(struct vcpu_svm * svm)12052fcf4876SMaxim Levitsky void svm_free_nested(struct vcpu_svm *svm)
12062fcf4876SMaxim Levitsky {
12072fcf4876SMaxim Levitsky if (!svm->nested.initialized)
12082fcf4876SMaxim Levitsky return;
12092fcf4876SMaxim Levitsky
121016ae56d7SMaxim Levitsky if (WARN_ON_ONCE(svm->vmcb != svm->vmcb01.ptr))
121116ae56d7SMaxim Levitsky svm_switch_vmcb(svm, &svm->vmcb01);
121216ae56d7SMaxim Levitsky
12132fcf4876SMaxim Levitsky svm_vcpu_free_msrpm(svm->nested.msrpm);
12142fcf4876SMaxim Levitsky svm->nested.msrpm = NULL;
12152fcf4876SMaxim Levitsky
12164995a368SCathy Avery __free_page(virt_to_page(svm->nested.vmcb02.ptr));
12174995a368SCathy Avery svm->nested.vmcb02.ptr = NULL;
12182fcf4876SMaxim Levitsky
1219c74ad08fSMaxim Levitsky /*
1220c74ad08fSMaxim Levitsky * When last_vmcb12_gpa matches the current vmcb12 gpa,
1221c74ad08fSMaxim Levitsky * some vmcb12 fields are not loaded if they are marked clean
1222c74ad08fSMaxim Levitsky * in the vmcb12, since in this case they are up to date already.
1223c74ad08fSMaxim Levitsky *
1224c74ad08fSMaxim Levitsky * When the vmcb02 is freed, this optimization becomes invalid.
1225c74ad08fSMaxim Levitsky */
1226c74ad08fSMaxim Levitsky svm->nested.last_vmcb12_gpa = INVALID_GPA;
1227c74ad08fSMaxim Levitsky
12282fcf4876SMaxim Levitsky svm->nested.initialized = false;
12292fcf4876SMaxim Levitsky }
12302fcf4876SMaxim Levitsky
svm_leave_nested(struct kvm_vcpu * vcpu)1231f7e57078SSean Christopherson void svm_leave_nested(struct kvm_vcpu *vcpu)
1232c513f484SPaolo Bonzini {
1233f7e57078SSean Christopherson struct vcpu_svm *svm = to_svm(vcpu);
123463129754SPaolo Bonzini
123563129754SPaolo Bonzini if (is_guest_mode(vcpu)) {
1236c513f484SPaolo Bonzini svm->nested.nested_run_pending = 0;
1237c74ad08fSMaxim Levitsky svm->nested.vmcb12_gpa = INVALID_GPA;
1238c74ad08fSMaxim Levitsky
123963129754SPaolo Bonzini leave_guest_mode(vcpu);
12404995a368SCathy Avery
1241deee59baSMaxim Levitsky svm_switch_vmcb(svm, &svm->vmcb01);
12424995a368SCathy Avery
124363129754SPaolo Bonzini nested_svm_uninit_mmu_context(vcpu);
124456fe28deSMaxim Levitsky vmcb_mark_all_dirty(svm->vmcb);
12453fdc6087SMaxim Levitsky
12463fdc6087SMaxim Levitsky if (kvm_apicv_activated(vcpu->kvm))
12473fdc6087SMaxim Levitsky kvm_make_request(KVM_REQ_APICV_UPDATE, vcpu);
1248c513f484SPaolo Bonzini }
1249a7d5c7ceSPaolo Bonzini
125063129754SPaolo Bonzini kvm_clear_request(KVM_REQ_GET_NESTED_STATE_PAGES, vcpu);
1251c513f484SPaolo Bonzini }
1252c513f484SPaolo Bonzini
nested_svm_exit_handled_msr(struct vcpu_svm * svm)1253883b0a91SJoerg Roedel static int nested_svm_exit_handled_msr(struct vcpu_svm *svm)
1254883b0a91SJoerg Roedel {
1255883b0a91SJoerg Roedel u32 offset, msr, value;
1256883b0a91SJoerg Roedel int write, mask;
1257883b0a91SJoerg Roedel
12588fc78909SEmanuele Giuseppe Esposito if (!(vmcb12_is_intercept(&svm->nested.ctl, INTERCEPT_MSR_PROT)))
1259883b0a91SJoerg Roedel return NESTED_EXIT_HOST;
1260883b0a91SJoerg Roedel
1261883b0a91SJoerg Roedel msr = svm->vcpu.arch.regs[VCPU_REGS_RCX];
1262883b0a91SJoerg Roedel offset = svm_msrpm_offset(msr);
1263883b0a91SJoerg Roedel write = svm->vmcb->control.exit_info_1 & 1;
1264883b0a91SJoerg Roedel mask = 1 << ((2 * (msr & 0xf)) + write);
1265883b0a91SJoerg Roedel
1266883b0a91SJoerg Roedel if (offset == MSR_INVALID)
1267883b0a91SJoerg Roedel return NESTED_EXIT_DONE;
1268883b0a91SJoerg Roedel
1269883b0a91SJoerg Roedel /* Offset is in 32 bit units but need in 8 bit units */
1270883b0a91SJoerg Roedel offset *= 4;
1271883b0a91SJoerg Roedel
1272e670bf68SPaolo Bonzini if (kvm_vcpu_read_guest(&svm->vcpu, svm->nested.ctl.msrpm_base_pa + offset, &value, 4))
1273883b0a91SJoerg Roedel return NESTED_EXIT_DONE;
1274883b0a91SJoerg Roedel
1275883b0a91SJoerg Roedel return (value & mask) ? NESTED_EXIT_DONE : NESTED_EXIT_HOST;
1276883b0a91SJoerg Roedel }
1277883b0a91SJoerg Roedel
nested_svm_intercept_ioio(struct vcpu_svm * svm)1278883b0a91SJoerg Roedel static int nested_svm_intercept_ioio(struct vcpu_svm *svm)
1279883b0a91SJoerg Roedel {
1280883b0a91SJoerg Roedel unsigned port, size, iopm_len;
1281883b0a91SJoerg Roedel u16 val, mask;
1282883b0a91SJoerg Roedel u8 start_bit;
1283883b0a91SJoerg Roedel u64 gpa;
1284883b0a91SJoerg Roedel
12858fc78909SEmanuele Giuseppe Esposito if (!(vmcb12_is_intercept(&svm->nested.ctl, INTERCEPT_IOIO_PROT)))
1286883b0a91SJoerg Roedel return NESTED_EXIT_HOST;
1287883b0a91SJoerg Roedel
1288883b0a91SJoerg Roedel port = svm->vmcb->control.exit_info_1 >> 16;
1289883b0a91SJoerg Roedel size = (svm->vmcb->control.exit_info_1 & SVM_IOIO_SIZE_MASK) >>
1290883b0a91SJoerg Roedel SVM_IOIO_SIZE_SHIFT;
1291e670bf68SPaolo Bonzini gpa = svm->nested.ctl.iopm_base_pa + (port / 8);
1292883b0a91SJoerg Roedel start_bit = port % 8;
1293883b0a91SJoerg Roedel iopm_len = (start_bit + size > 8) ? 2 : 1;
1294883b0a91SJoerg Roedel mask = (0xf >> (4 - size)) << start_bit;
1295883b0a91SJoerg Roedel val = 0;
1296883b0a91SJoerg Roedel
1297883b0a91SJoerg Roedel if (kvm_vcpu_read_guest(&svm->vcpu, gpa, &val, iopm_len))
1298883b0a91SJoerg Roedel return NESTED_EXIT_DONE;
1299883b0a91SJoerg Roedel
1300883b0a91SJoerg Roedel return (val & mask) ? NESTED_EXIT_DONE : NESTED_EXIT_HOST;
1301883b0a91SJoerg Roedel }
1302883b0a91SJoerg Roedel
nested_svm_intercept(struct vcpu_svm * svm)1303883b0a91SJoerg Roedel static int nested_svm_intercept(struct vcpu_svm *svm)
1304883b0a91SJoerg Roedel {
1305883b0a91SJoerg Roedel u32 exit_code = svm->vmcb->control.exit_code;
1306883b0a91SJoerg Roedel int vmexit = NESTED_EXIT_HOST;
1307883b0a91SJoerg Roedel
1308883b0a91SJoerg Roedel switch (exit_code) {
1309883b0a91SJoerg Roedel case SVM_EXIT_MSR:
1310883b0a91SJoerg Roedel vmexit = nested_svm_exit_handled_msr(svm);
1311883b0a91SJoerg Roedel break;
1312883b0a91SJoerg Roedel case SVM_EXIT_IOIO:
1313883b0a91SJoerg Roedel vmexit = nested_svm_intercept_ioio(svm);
1314883b0a91SJoerg Roedel break;
1315883b0a91SJoerg Roedel case SVM_EXIT_READ_CR0 ... SVM_EXIT_WRITE_CR8: {
13168fc78909SEmanuele Giuseppe Esposito if (vmcb12_is_intercept(&svm->nested.ctl, exit_code))
1317883b0a91SJoerg Roedel vmexit = NESTED_EXIT_DONE;
1318883b0a91SJoerg Roedel break;
1319883b0a91SJoerg Roedel }
1320883b0a91SJoerg Roedel case SVM_EXIT_READ_DR0 ... SVM_EXIT_WRITE_DR7: {
13218fc78909SEmanuele Giuseppe Esposito if (vmcb12_is_intercept(&svm->nested.ctl, exit_code))
1322883b0a91SJoerg Roedel vmexit = NESTED_EXIT_DONE;
1323883b0a91SJoerg Roedel break;
1324883b0a91SJoerg Roedel }
1325883b0a91SJoerg Roedel case SVM_EXIT_EXCP_BASE ... SVM_EXIT_EXCP_BASE + 0x1f: {
13267c86663bSPaolo Bonzini /*
13277c86663bSPaolo Bonzini * Host-intercepted exceptions have been checked already in
13287c86663bSPaolo Bonzini * nested_svm_exit_special. There is nothing to do here,
13297c86663bSPaolo Bonzini * the vmexit is injected by svm_check_nested_events.
13307c86663bSPaolo Bonzini */
1331883b0a91SJoerg Roedel vmexit = NESTED_EXIT_DONE;
1332883b0a91SJoerg Roedel break;
1333883b0a91SJoerg Roedel }
1334883b0a91SJoerg Roedel case SVM_EXIT_ERR: {
1335883b0a91SJoerg Roedel vmexit = NESTED_EXIT_DONE;
1336883b0a91SJoerg Roedel break;
1337883b0a91SJoerg Roedel }
1338883b0a91SJoerg Roedel default: {
13398fc78909SEmanuele Giuseppe Esposito if (vmcb12_is_intercept(&svm->nested.ctl, exit_code))
1340883b0a91SJoerg Roedel vmexit = NESTED_EXIT_DONE;
1341883b0a91SJoerg Roedel }
1342883b0a91SJoerg Roedel }
1343883b0a91SJoerg Roedel
1344883b0a91SJoerg Roedel return vmexit;
1345883b0a91SJoerg Roedel }
1346883b0a91SJoerg Roedel
nested_svm_exit_handled(struct vcpu_svm * svm)1347883b0a91SJoerg Roedel int nested_svm_exit_handled(struct vcpu_svm *svm)
1348883b0a91SJoerg Roedel {
1349883b0a91SJoerg Roedel int vmexit;
1350883b0a91SJoerg Roedel
1351883b0a91SJoerg Roedel vmexit = nested_svm_intercept(svm);
1352883b0a91SJoerg Roedel
1353883b0a91SJoerg Roedel if (vmexit == NESTED_EXIT_DONE)
1354883b0a91SJoerg Roedel nested_svm_vmexit(svm);
1355883b0a91SJoerg Roedel
1356883b0a91SJoerg Roedel return vmexit;
1357883b0a91SJoerg Roedel }
1358883b0a91SJoerg Roedel
nested_svm_check_permissions(struct kvm_vcpu * vcpu)135963129754SPaolo Bonzini int nested_svm_check_permissions(struct kvm_vcpu *vcpu)
1360883b0a91SJoerg Roedel {
136163129754SPaolo Bonzini if (!(vcpu->arch.efer & EFER_SVME) || !is_paging(vcpu)) {
136263129754SPaolo Bonzini kvm_queue_exception(vcpu, UD_VECTOR);
1363883b0a91SJoerg Roedel return 1;
1364883b0a91SJoerg Roedel }
1365883b0a91SJoerg Roedel
136663129754SPaolo Bonzini if (to_svm(vcpu)->vmcb->save.cpl) {
136763129754SPaolo Bonzini kvm_inject_gp(vcpu, 0);
1368883b0a91SJoerg Roedel return 1;
1369883b0a91SJoerg Roedel }
1370883b0a91SJoerg Roedel
1371883b0a91SJoerg Roedel return 0;
1372883b0a91SJoerg Roedel }
1373883b0a91SJoerg Roedel
nested_svm_is_exception_vmexit(struct kvm_vcpu * vcpu,u8 vector,u32 error_code)13747709aba8SSean Christopherson static bool nested_svm_is_exception_vmexit(struct kvm_vcpu *vcpu, u8 vector,
13757709aba8SSean Christopherson u32 error_code)
1376883b0a91SJoerg Roedel {
13777709aba8SSean Christopherson struct vcpu_svm *svm = to_svm(vcpu);
1378883b0a91SJoerg Roedel
1379d4963e31SSean Christopherson return (svm->nested.ctl.intercepts[INTERCEPT_EXCEPTION] & BIT(vector));
13807c86663bSPaolo Bonzini }
1381883b0a91SJoerg Roedel
nested_svm_inject_exception_vmexit(struct kvm_vcpu * vcpu)1382d4963e31SSean Christopherson static void nested_svm_inject_exception_vmexit(struct kvm_vcpu *vcpu)
13837c86663bSPaolo Bonzini {
13847709aba8SSean Christopherson struct kvm_queued_exception *ex = &vcpu->arch.exception_vmexit;
1385d4963e31SSean Christopherson struct vcpu_svm *svm = to_svm(vcpu);
1386db663af4SMaxim Levitsky struct vmcb *vmcb = svm->vmcb;
1387883b0a91SJoerg Roedel
1388d4963e31SSean Christopherson vmcb->control.exit_code = SVM_EXIT_EXCP_BASE + ex->vector;
1389db663af4SMaxim Levitsky vmcb->control.exit_code_hi = 0;
13907c86663bSPaolo Bonzini
1391d4963e31SSean Christopherson if (ex->has_error_code)
1392d4963e31SSean Christopherson vmcb->control.exit_info_1 = ex->error_code;
1393883b0a91SJoerg Roedel
1394883b0a91SJoerg Roedel /*
1395883b0a91SJoerg Roedel * EXITINFO2 is undefined for all exception intercepts other
1396883b0a91SJoerg Roedel * than #PF.
1397883b0a91SJoerg Roedel */
1398d4963e31SSean Christopherson if (ex->vector == PF_VECTOR) {
13997709aba8SSean Christopherson if (ex->has_payload)
1400d4963e31SSean Christopherson vmcb->control.exit_info_2 = ex->payload;
1401883b0a91SJoerg Roedel else
1402d4963e31SSean Christopherson vmcb->control.exit_info_2 = vcpu->arch.cr2;
1403d4963e31SSean Christopherson } else if (ex->vector == DB_VECTOR) {
1404e746c1f1SSean Christopherson /* See kvm_check_and_inject_events(). */
1405d4963e31SSean Christopherson kvm_deliver_exception_payload(vcpu, ex);
1406d4963e31SSean Christopherson
1407d4963e31SSean Christopherson if (vcpu->arch.dr7 & DR7_GD) {
1408d4963e31SSean Christopherson vcpu->arch.dr7 &= ~DR7_GD;
1409d4963e31SSean Christopherson kvm_update_dr7(vcpu);
14107c86663bSPaolo Bonzini }
1411d4963e31SSean Christopherson } else {
1412d4963e31SSean Christopherson WARN_ON(ex->has_payload);
1413d4963e31SSean Christopherson }
1414883b0a91SJoerg Roedel
14157c86663bSPaolo Bonzini nested_svm_vmexit(svm);
1416883b0a91SJoerg Roedel }
1417883b0a91SJoerg Roedel
nested_exit_on_init(struct vcpu_svm * svm)14185b672408SPaolo Bonzini static inline bool nested_exit_on_init(struct vcpu_svm *svm)
14195b672408SPaolo Bonzini {
14208fc78909SEmanuele Giuseppe Esposito return vmcb12_is_intercept(&svm->nested.ctl, INTERCEPT_INIT);
14215b672408SPaolo Bonzini }
14225b672408SPaolo Bonzini
svm_check_nested_events(struct kvm_vcpu * vcpu)142333b22172SPaolo Bonzini static int svm_check_nested_events(struct kvm_vcpu *vcpu)
1424883b0a91SJoerg Roedel {
14255b672408SPaolo Bonzini struct kvm_lapic *apic = vcpu->arch.apic;
142672c14e00SSean Christopherson struct vcpu_svm *svm = to_svm(vcpu);
142772c14e00SSean Christopherson /*
142872c14e00SSean Christopherson * Only a pending nested run blocks a pending exception. If there is a
142972c14e00SSean Christopherson * previously injected event, the pending exception occurred while said
143072c14e00SSean Christopherson * event was being delivered and thus needs to be handled.
143172c14e00SSean Christopherson */
143272c14e00SSean Christopherson bool block_nested_exceptions = svm->nested.nested_run_pending;
143372c14e00SSean Christopherson /*
143472c14e00SSean Christopherson * New events (not exceptions) are only recognized at instruction
143572c14e00SSean Christopherson * boundaries. If an event needs reinjection, then KVM is handling a
143672c14e00SSean Christopherson * VM-Exit that occurred _during_ instruction execution; new events are
143772c14e00SSean Christopherson * blocked until the instruction completes.
143872c14e00SSean Christopherson */
143972c14e00SSean Christopherson bool block_nested_events = block_nested_exceptions ||
144072c14e00SSean Christopherson kvm_event_needs_reinjection(vcpu);
14415b672408SPaolo Bonzini
14425b672408SPaolo Bonzini if (lapic_in_kernel(vcpu) &&
14435b672408SPaolo Bonzini test_bit(KVM_APIC_INIT, &apic->pending_events)) {
14445b672408SPaolo Bonzini if (block_nested_events)
14455b672408SPaolo Bonzini return -EBUSY;
14465b672408SPaolo Bonzini if (!nested_exit_on_init(svm))
14475b672408SPaolo Bonzini return 0;
14483a87c7e0SSean Christopherson nested_svm_simple_vmexit(svm, SVM_EXIT_INIT);
14495b672408SPaolo Bonzini return 0;
14505b672408SPaolo Bonzini }
1451883b0a91SJoerg Roedel
14527709aba8SSean Christopherson if (vcpu->arch.exception_vmexit.pending) {
14537709aba8SSean Christopherson if (block_nested_exceptions)
14547709aba8SSean Christopherson return -EBUSY;
14557709aba8SSean Christopherson nested_svm_inject_exception_vmexit(vcpu);
14567709aba8SSean Christopherson return 0;
14577709aba8SSean Christopherson }
14587709aba8SSean Christopherson
14597c86663bSPaolo Bonzini if (vcpu->arch.exception.pending) {
146072c14e00SSean Christopherson if (block_nested_exceptions)
14617c86663bSPaolo Bonzini return -EBUSY;
14627c86663bSPaolo Bonzini return 0;
14637c86663bSPaolo Bonzini }
14647c86663bSPaolo Bonzini
146531e83e21SPaolo Bonzini #ifdef CONFIG_KVM_SMM
1466221e7610SPaolo Bonzini if (vcpu->arch.smi_pending && !svm_smi_blocked(vcpu)) {
146755714cddSPaolo Bonzini if (block_nested_events)
146855714cddSPaolo Bonzini return -EBUSY;
1469221e7610SPaolo Bonzini if (!nested_exit_on_smi(svm))
1470221e7610SPaolo Bonzini return 0;
14713a87c7e0SSean Christopherson nested_svm_simple_vmexit(svm, SVM_EXIT_SMI);
147255714cddSPaolo Bonzini return 0;
147355714cddSPaolo Bonzini }
147431e83e21SPaolo Bonzini #endif
147555714cddSPaolo Bonzini
1476221e7610SPaolo Bonzini if (vcpu->arch.nmi_pending && !svm_nmi_blocked(vcpu)) {
14779c3d370aSCathy Avery if (block_nested_events)
14789c3d370aSCathy Avery return -EBUSY;
1479221e7610SPaolo Bonzini if (!nested_exit_on_nmi(svm))
1480221e7610SPaolo Bonzini return 0;
14813a87c7e0SSean Christopherson nested_svm_simple_vmexit(svm, SVM_EXIT_NMI);
14829c3d370aSCathy Avery return 0;
14839c3d370aSCathy Avery }
14849c3d370aSCathy Avery
1485221e7610SPaolo Bonzini if (kvm_cpu_has_interrupt(vcpu) && !svm_interrupt_blocked(vcpu)) {
1486883b0a91SJoerg Roedel if (block_nested_events)
1487883b0a91SJoerg Roedel return -EBUSY;
1488221e7610SPaolo Bonzini if (!nested_exit_on_intr(svm))
1489221e7610SPaolo Bonzini return 0;
14903a87c7e0SSean Christopherson trace_kvm_nested_intr_vmexit(svm->vmcb->save.rip);
14913a87c7e0SSean Christopherson nested_svm_simple_vmexit(svm, SVM_EXIT_INTR);
1492883b0a91SJoerg Roedel return 0;
1493883b0a91SJoerg Roedel }
1494883b0a91SJoerg Roedel
1495883b0a91SJoerg Roedel return 0;
1496883b0a91SJoerg Roedel }
1497883b0a91SJoerg Roedel
nested_svm_exit_special(struct vcpu_svm * svm)1498883b0a91SJoerg Roedel int nested_svm_exit_special(struct vcpu_svm *svm)
1499883b0a91SJoerg Roedel {
1500883b0a91SJoerg Roedel u32 exit_code = svm->vmcb->control.exit_code;
15013f4a812eSVitaly Kuznetsov struct kvm_vcpu *vcpu = &svm->vcpu;
1502883b0a91SJoerg Roedel
1503883b0a91SJoerg Roedel switch (exit_code) {
1504883b0a91SJoerg Roedel case SVM_EXIT_INTR:
1505883b0a91SJoerg Roedel case SVM_EXIT_NMI:
1506883b0a91SJoerg Roedel case SVM_EXIT_NPF:
1507883b0a91SJoerg Roedel return NESTED_EXIT_HOST;
15087c86663bSPaolo Bonzini case SVM_EXIT_EXCP_BASE ... SVM_EXIT_EXCP_BASE + 0x1f: {
15097c86663bSPaolo Bonzini u32 excp_bits = 1 << (exit_code - SVM_EXIT_EXCP_BASE);
15107c86663bSPaolo Bonzini
15114995a368SCathy Avery if (svm->vmcb01.ptr->control.intercepts[INTERCEPT_EXCEPTION] &
15129780d51dSBabu Moger excp_bits)
15137c86663bSPaolo Bonzini return NESTED_EXIT_HOST;
15147c86663bSPaolo Bonzini else if (exit_code == SVM_EXIT_EXCP_BASE + PF_VECTOR &&
151568fd66f1SVitaly Kuznetsov svm->vcpu.arch.apf.host_apf_flags)
1516a3535be7SPaolo Bonzini /* Trap async PF even if not shadowing */
1517883b0a91SJoerg Roedel return NESTED_EXIT_HOST;
1518883b0a91SJoerg Roedel break;
15197c86663bSPaolo Bonzini }
15203f4a812eSVitaly Kuznetsov case SVM_EXIT_VMMCALL:
15213f4a812eSVitaly Kuznetsov /* Hyper-V L2 TLB flush hypercall is handled by L0 */
15223f4a812eSVitaly Kuznetsov if (guest_hv_cpuid_has_l2_tlb_flush(vcpu) &&
15233f4a812eSVitaly Kuznetsov nested_svm_l2_tlb_flush_enabled(vcpu) &&
15243f4a812eSVitaly Kuznetsov kvm_hv_is_tlb_flush_hcall(vcpu))
15253f4a812eSVitaly Kuznetsov return NESTED_EXIT_HOST;
15263f4a812eSVitaly Kuznetsov break;
1527883b0a91SJoerg Roedel default:
1528883b0a91SJoerg Roedel break;
1529883b0a91SJoerg Roedel }
1530883b0a91SJoerg Roedel
1531883b0a91SJoerg Roedel return NESTED_EXIT_CONTINUE;
1532883b0a91SJoerg Roedel }
153333b22172SPaolo Bonzini
nested_svm_update_tsc_ratio_msr(struct kvm_vcpu * vcpu)15345228eb96SMaxim Levitsky void nested_svm_update_tsc_ratio_msr(struct kvm_vcpu *vcpu)
15355228eb96SMaxim Levitsky {
15365228eb96SMaxim Levitsky struct vcpu_svm *svm = to_svm(vcpu);
15375228eb96SMaxim Levitsky
15385228eb96SMaxim Levitsky vcpu->arch.tsc_scaling_ratio =
15395228eb96SMaxim Levitsky kvm_calc_nested_tsc_multiplier(vcpu->arch.l1_tsc_scaling_ratio,
15405228eb96SMaxim Levitsky svm->tsc_ratio_msr);
15412d636990SSean Christopherson svm_write_tsc_multiplier(vcpu);
15425228eb96SMaxim Levitsky }
15435228eb96SMaxim Levitsky
15448fc78909SEmanuele Giuseppe Esposito /* Inverse operation of nested_copy_vmcb_control_to_cache(). asid is copied too. */
nested_copy_vmcb_cache_to_control(struct vmcb_control_area * dst,struct vmcb_ctrl_area_cached * from)15458fc78909SEmanuele Giuseppe Esposito static void nested_copy_vmcb_cache_to_control(struct vmcb_control_area *dst,
15468fc78909SEmanuele Giuseppe Esposito struct vmcb_ctrl_area_cached *from)
15478fc78909SEmanuele Giuseppe Esposito {
15488fc78909SEmanuele Giuseppe Esposito unsigned int i;
15498fc78909SEmanuele Giuseppe Esposito
15508fc78909SEmanuele Giuseppe Esposito memset(dst, 0, sizeof(struct vmcb_control_area));
15518fc78909SEmanuele Giuseppe Esposito
15528fc78909SEmanuele Giuseppe Esposito for (i = 0; i < MAX_INTERCEPT; i++)
15538fc78909SEmanuele Giuseppe Esposito dst->intercepts[i] = from->intercepts[i];
15548fc78909SEmanuele Giuseppe Esposito
15558fc78909SEmanuele Giuseppe Esposito dst->iopm_base_pa = from->iopm_base_pa;
15568fc78909SEmanuele Giuseppe Esposito dst->msrpm_base_pa = from->msrpm_base_pa;
15578fc78909SEmanuele Giuseppe Esposito dst->tsc_offset = from->tsc_offset;
15588fc78909SEmanuele Giuseppe Esposito dst->asid = from->asid;
15598fc78909SEmanuele Giuseppe Esposito dst->tlb_ctl = from->tlb_ctl;
15608fc78909SEmanuele Giuseppe Esposito dst->int_ctl = from->int_ctl;
15618fc78909SEmanuele Giuseppe Esposito dst->int_vector = from->int_vector;
15628fc78909SEmanuele Giuseppe Esposito dst->int_state = from->int_state;
15638fc78909SEmanuele Giuseppe Esposito dst->exit_code = from->exit_code;
15648fc78909SEmanuele Giuseppe Esposito dst->exit_code_hi = from->exit_code_hi;
15658fc78909SEmanuele Giuseppe Esposito dst->exit_info_1 = from->exit_info_1;
15668fc78909SEmanuele Giuseppe Esposito dst->exit_info_2 = from->exit_info_2;
15678fc78909SEmanuele Giuseppe Esposito dst->exit_int_info = from->exit_int_info;
15688fc78909SEmanuele Giuseppe Esposito dst->exit_int_info_err = from->exit_int_info_err;
15698fc78909SEmanuele Giuseppe Esposito dst->nested_ctl = from->nested_ctl;
15708fc78909SEmanuele Giuseppe Esposito dst->event_inj = from->event_inj;
15718fc78909SEmanuele Giuseppe Esposito dst->event_inj_err = from->event_inj_err;
157200f08d99SMaciej S. Szmigiero dst->next_rip = from->next_rip;
15738fc78909SEmanuele Giuseppe Esposito dst->nested_cr3 = from->nested_cr3;
15748fc78909SEmanuele Giuseppe Esposito dst->virt_ext = from->virt_ext;
15758fc78909SEmanuele Giuseppe Esposito dst->pause_filter_count = from->pause_filter_count;
15768fc78909SEmanuele Giuseppe Esposito dst->pause_filter_thresh = from->pause_filter_thresh;
157768ae7c7bSSean Christopherson /* 'clean' and 'hv_enlightenments' are not changed by KVM */
15788fc78909SEmanuele Giuseppe Esposito }
15798fc78909SEmanuele Giuseppe Esposito
svm_get_nested_state(struct kvm_vcpu * vcpu,struct kvm_nested_state __user * user_kvm_nested_state,u32 user_data_size)1580cc440cdaSPaolo Bonzini static int svm_get_nested_state(struct kvm_vcpu *vcpu,
1581cc440cdaSPaolo Bonzini struct kvm_nested_state __user *user_kvm_nested_state,
1582cc440cdaSPaolo Bonzini u32 user_data_size)
1583cc440cdaSPaolo Bonzini {
1584cc440cdaSPaolo Bonzini struct vcpu_svm *svm;
15858fc78909SEmanuele Giuseppe Esposito struct vmcb_control_area *ctl;
15868fc78909SEmanuele Giuseppe Esposito unsigned long r;
1587cc440cdaSPaolo Bonzini struct kvm_nested_state kvm_state = {
1588cc440cdaSPaolo Bonzini .flags = 0,
1589cc440cdaSPaolo Bonzini .format = KVM_STATE_NESTED_FORMAT_SVM,
1590cc440cdaSPaolo Bonzini .size = sizeof(kvm_state),
1591cc440cdaSPaolo Bonzini };
1592cc440cdaSPaolo Bonzini struct vmcb __user *user_vmcb = (struct vmcb __user *)
1593cc440cdaSPaolo Bonzini &user_kvm_nested_state->data.svm[0];
1594cc440cdaSPaolo Bonzini
1595cc440cdaSPaolo Bonzini if (!vcpu)
1596cc440cdaSPaolo Bonzini return kvm_state.size + KVM_STATE_NESTED_SVM_VMCB_SIZE;
1597cc440cdaSPaolo Bonzini
1598cc440cdaSPaolo Bonzini svm = to_svm(vcpu);
1599cc440cdaSPaolo Bonzini
1600cc440cdaSPaolo Bonzini if (user_data_size < kvm_state.size)
1601cc440cdaSPaolo Bonzini goto out;
1602cc440cdaSPaolo Bonzini
1603cc440cdaSPaolo Bonzini /* First fill in the header and copy it out. */
1604cc440cdaSPaolo Bonzini if (is_guest_mode(vcpu)) {
16050dd16b5bSMaxim Levitsky kvm_state.hdr.svm.vmcb_pa = svm->nested.vmcb12_gpa;
1606cc440cdaSPaolo Bonzini kvm_state.size += KVM_STATE_NESTED_SVM_VMCB_SIZE;
1607cc440cdaSPaolo Bonzini kvm_state.flags |= KVM_STATE_NESTED_GUEST_MODE;
1608cc440cdaSPaolo Bonzini
1609cc440cdaSPaolo Bonzini if (svm->nested.nested_run_pending)
1610cc440cdaSPaolo Bonzini kvm_state.flags |= KVM_STATE_NESTED_RUN_PENDING;
1611cc440cdaSPaolo Bonzini }
1612cc440cdaSPaolo Bonzini
1613cc440cdaSPaolo Bonzini if (gif_set(svm))
1614cc440cdaSPaolo Bonzini kvm_state.flags |= KVM_STATE_NESTED_GIF_SET;
1615cc440cdaSPaolo Bonzini
1616cc440cdaSPaolo Bonzini if (copy_to_user(user_kvm_nested_state, &kvm_state, sizeof(kvm_state)))
1617cc440cdaSPaolo Bonzini return -EFAULT;
1618cc440cdaSPaolo Bonzini
1619cc440cdaSPaolo Bonzini if (!is_guest_mode(vcpu))
1620cc440cdaSPaolo Bonzini goto out;
1621cc440cdaSPaolo Bonzini
1622cc440cdaSPaolo Bonzini /*
1623cc440cdaSPaolo Bonzini * Copy over the full size of the VMCB rather than just the size
1624cc440cdaSPaolo Bonzini * of the structs.
1625cc440cdaSPaolo Bonzini */
1626cc440cdaSPaolo Bonzini if (clear_user(user_vmcb, KVM_STATE_NESTED_SVM_VMCB_SIZE))
1627cc440cdaSPaolo Bonzini return -EFAULT;
16288fc78909SEmanuele Giuseppe Esposito
16298fc78909SEmanuele Giuseppe Esposito ctl = kzalloc(sizeof(*ctl), GFP_KERNEL);
16308fc78909SEmanuele Giuseppe Esposito if (!ctl)
16318fc78909SEmanuele Giuseppe Esposito return -ENOMEM;
16328fc78909SEmanuele Giuseppe Esposito
16338fc78909SEmanuele Giuseppe Esposito nested_copy_vmcb_cache_to_control(ctl, &svm->nested.ctl);
16348fc78909SEmanuele Giuseppe Esposito r = copy_to_user(&user_vmcb->control, ctl,
16358fc78909SEmanuele Giuseppe Esposito sizeof(user_vmcb->control));
16368fc78909SEmanuele Giuseppe Esposito kfree(ctl);
16378fc78909SEmanuele Giuseppe Esposito if (r)
1638cc440cdaSPaolo Bonzini return -EFAULT;
16398fc78909SEmanuele Giuseppe Esposito
16404995a368SCathy Avery if (copy_to_user(&user_vmcb->save, &svm->vmcb01.ptr->save,
1641cc440cdaSPaolo Bonzini sizeof(user_vmcb->save)))
1642cc440cdaSPaolo Bonzini return -EFAULT;
1643cc440cdaSPaolo Bonzini out:
1644cc440cdaSPaolo Bonzini return kvm_state.size;
1645cc440cdaSPaolo Bonzini }
1646cc440cdaSPaolo Bonzini
svm_set_nested_state(struct kvm_vcpu * vcpu,struct kvm_nested_state __user * user_kvm_nested_state,struct kvm_nested_state * kvm_state)1647cc440cdaSPaolo Bonzini static int svm_set_nested_state(struct kvm_vcpu *vcpu,
1648cc440cdaSPaolo Bonzini struct kvm_nested_state __user *user_kvm_nested_state,
1649cc440cdaSPaolo Bonzini struct kvm_nested_state *kvm_state)
1650cc440cdaSPaolo Bonzini {
1651cc440cdaSPaolo Bonzini struct vcpu_svm *svm = to_svm(vcpu);
1652cc440cdaSPaolo Bonzini struct vmcb __user *user_vmcb = (struct vmcb __user *)
1653cc440cdaSPaolo Bonzini &user_kvm_nested_state->data.svm[0];
16546ccbd29aSJoerg Roedel struct vmcb_control_area *ctl;
16556ccbd29aSJoerg Roedel struct vmcb_save_area *save;
1656b7a3d8b6SEmanuele Giuseppe Esposito struct vmcb_save_area_cached save_cached;
16578fc78909SEmanuele Giuseppe Esposito struct vmcb_ctrl_area_cached ctl_cached;
1658dbc4739bSSean Christopherson unsigned long cr0;
16596ccbd29aSJoerg Roedel int ret;
1660cc440cdaSPaolo Bonzini
16616ccbd29aSJoerg Roedel BUILD_BUG_ON(sizeof(struct vmcb_control_area) + sizeof(struct vmcb_save_area) >
16626ccbd29aSJoerg Roedel KVM_STATE_NESTED_SVM_VMCB_SIZE);
16636ccbd29aSJoerg Roedel
1664cc440cdaSPaolo Bonzini if (kvm_state->format != KVM_STATE_NESTED_FORMAT_SVM)
1665cc440cdaSPaolo Bonzini return -EINVAL;
1666cc440cdaSPaolo Bonzini
1667cc440cdaSPaolo Bonzini if (kvm_state->flags & ~(KVM_STATE_NESTED_GUEST_MODE |
1668cc440cdaSPaolo Bonzini KVM_STATE_NESTED_RUN_PENDING |
1669cc440cdaSPaolo Bonzini KVM_STATE_NESTED_GIF_SET))
1670cc440cdaSPaolo Bonzini return -EINVAL;
1671cc440cdaSPaolo Bonzini
1672cc440cdaSPaolo Bonzini /*
1673cc440cdaSPaolo Bonzini * If in guest mode, vcpu->arch.efer actually refers to the L2 guest's
1674cc440cdaSPaolo Bonzini * EFER.SVME, but EFER.SVME still has to be 1 for VMRUN to succeed.
1675cc440cdaSPaolo Bonzini */
1676cc440cdaSPaolo Bonzini if (!(vcpu->arch.efer & EFER_SVME)) {
1677cc440cdaSPaolo Bonzini /* GIF=1 and no guest mode are required if SVME=0. */
1678cc440cdaSPaolo Bonzini if (kvm_state->flags != KVM_STATE_NESTED_GIF_SET)
1679cc440cdaSPaolo Bonzini return -EINVAL;
1680cc440cdaSPaolo Bonzini }
1681cc440cdaSPaolo Bonzini
1682cc440cdaSPaolo Bonzini /* SMM temporarily disables SVM, so we cannot be in guest mode. */
1683cc440cdaSPaolo Bonzini if (is_smm(vcpu) && (kvm_state->flags & KVM_STATE_NESTED_GUEST_MODE))
1684cc440cdaSPaolo Bonzini return -EINVAL;
1685cc440cdaSPaolo Bonzini
1686cc440cdaSPaolo Bonzini if (!(kvm_state->flags & KVM_STATE_NESTED_GUEST_MODE)) {
1687f7e57078SSean Christopherson svm_leave_nested(vcpu);
1688d5cd6f34SVitaly Kuznetsov svm_set_gif(svm, !!(kvm_state->flags & KVM_STATE_NESTED_GIF_SET));
1689d5cd6f34SVitaly Kuznetsov return 0;
1690cc440cdaSPaolo Bonzini }
1691cc440cdaSPaolo Bonzini
1692cc440cdaSPaolo Bonzini if (!page_address_valid(vcpu, kvm_state->hdr.svm.vmcb_pa))
1693cc440cdaSPaolo Bonzini return -EINVAL;
1694cc440cdaSPaolo Bonzini if (kvm_state->size < sizeof(*kvm_state) + KVM_STATE_NESTED_SVM_VMCB_SIZE)
1695cc440cdaSPaolo Bonzini return -EINVAL;
1696cc440cdaSPaolo Bonzini
16976ccbd29aSJoerg Roedel ret = -ENOMEM;
1698eba04b20SSean Christopherson ctl = kzalloc(sizeof(*ctl), GFP_KERNEL_ACCOUNT);
1699eba04b20SSean Christopherson save = kzalloc(sizeof(*save), GFP_KERNEL_ACCOUNT);
17006ccbd29aSJoerg Roedel if (!ctl || !save)
17016ccbd29aSJoerg Roedel goto out_free;
17026ccbd29aSJoerg Roedel
17036ccbd29aSJoerg Roedel ret = -EFAULT;
17046ccbd29aSJoerg Roedel if (copy_from_user(ctl, &user_vmcb->control, sizeof(*ctl)))
17056ccbd29aSJoerg Roedel goto out_free;
17066ccbd29aSJoerg Roedel if (copy_from_user(save, &user_vmcb->save, sizeof(*save)))
17076ccbd29aSJoerg Roedel goto out_free;
17086ccbd29aSJoerg Roedel
17096ccbd29aSJoerg Roedel ret = -EINVAL;
171066c03a92SVitaly Kuznetsov __nested_copy_vmcb_control_to_cache(vcpu, &ctl_cached, ctl);
17118fc78909SEmanuele Giuseppe Esposito if (!__nested_vmcb_check_controls(vcpu, &ctl_cached))
17126ccbd29aSJoerg Roedel goto out_free;
1713cc440cdaSPaolo Bonzini
1714cc440cdaSPaolo Bonzini /*
1715cc440cdaSPaolo Bonzini * Processor state contains L2 state. Check that it is
1716cb9b6a1bSPaolo Bonzini * valid for guest mode (see nested_vmcb_check_save).
1717cc440cdaSPaolo Bonzini */
1718cc440cdaSPaolo Bonzini cr0 = kvm_read_cr0(vcpu);
1719cc440cdaSPaolo Bonzini if (((cr0 & X86_CR0_CD) == 0) && (cr0 & X86_CR0_NW))
17206ccbd29aSJoerg Roedel goto out_free;
1721cc440cdaSPaolo Bonzini
1722cc440cdaSPaolo Bonzini /*
1723cc440cdaSPaolo Bonzini * Validate host state saved from before VMRUN (see
1724cc440cdaSPaolo Bonzini * nested_svm_check_permissions).
1725cc440cdaSPaolo Bonzini */
1726b7a3d8b6SEmanuele Giuseppe Esposito __nested_copy_vmcb_save_to_cache(&save_cached, save);
17276906e06dSKrish Sadhukhan if (!(save->cr0 & X86_CR0_PG) ||
17286906e06dSKrish Sadhukhan !(save->cr0 & X86_CR0_PE) ||
17296906e06dSKrish Sadhukhan (save->rflags & X86_EFLAGS_VM) ||
1730b7a3d8b6SEmanuele Giuseppe Esposito !__nested_vmcb_check_save(vcpu, &save_cached))
17316ccbd29aSJoerg Roedel goto out_free;
1732cc440cdaSPaolo Bonzini
1733b222b0b8SMaxim Levitsky
1734b222b0b8SMaxim Levitsky /*
17354995a368SCathy Avery * All checks done, we can enter guest mode. Userspace provides
17364995a368SCathy Avery * vmcb12.control, which will be combined with L1 and stored into
17374995a368SCathy Avery * vmcb02, and the L1 save state which we store in vmcb01.
17384995a368SCathy Avery * L2 registers if needed are moved from the current VMCB to VMCB02.
1739cc440cdaSPaolo Bonzini */
174081f76adaSMaxim Levitsky
17419d290e16SMaxim Levitsky if (is_guest_mode(vcpu))
1742f7e57078SSean Christopherson svm_leave_nested(vcpu);
17439d290e16SMaxim Levitsky else
17449d290e16SMaxim Levitsky svm->nested.vmcb02.ptr->save = svm->vmcb01.ptr->save;
17459d290e16SMaxim Levitsky
1746063ab16cSMaxim Levitsky svm_set_gif(svm, !!(kvm_state->flags & KVM_STATE_NESTED_GIF_SET));
1747063ab16cSMaxim Levitsky
174881f76adaSMaxim Levitsky svm->nested.nested_run_pending =
174981f76adaSMaxim Levitsky !!(kvm_state->flags & KVM_STATE_NESTED_RUN_PENDING);
175081f76adaSMaxim Levitsky
17510dd16b5bSMaxim Levitsky svm->nested.vmcb12_gpa = kvm_state->hdr.svm.vmcb_pa;
1752c08f390aSPaolo Bonzini
17532bb16beaSVitaly Kuznetsov svm_copy_vmrun_state(&svm->vmcb01.ptr->save, save);
17547907160dSEmanuele Giuseppe Esposito nested_copy_vmcb_control_to_cache(svm, ctl);
17554995a368SCathy Avery
17564995a368SCathy Avery svm_switch_vmcb(svm, &svm->nested.vmcb02);
1757da0b93d6SMaciej S. Szmigiero nested_vmcb02_prepare_control(svm, svm->vmcb->save.rip, svm->vmcb->save.cs.base);
1758e1779c27SMaxim Levitsky
1759e1779c27SMaxim Levitsky /*
1760e1779c27SMaxim Levitsky * While the nested guest CR3 is already checked and set by
1761e1779c27SMaxim Levitsky * KVM_SET_SREGS, it was set when nested state was yet loaded,
1762e1779c27SMaxim Levitsky * thus MMU might not be initialized correctly.
1763e1779c27SMaxim Levitsky * Set it again to fix this.
1764e1779c27SMaxim Levitsky */
1765e1779c27SMaxim Levitsky
1766e1779c27SMaxim Levitsky ret = nested_svm_load_cr3(&svm->vcpu, vcpu->arch.cr3,
1767e1779c27SMaxim Levitsky nested_npt_enabled(svm), false);
1768e1779c27SMaxim Levitsky if (WARN_ON_ONCE(ret))
1769e1779c27SMaxim Levitsky goto out_free;
1770e1779c27SMaxim Levitsky
177173c25546SVitaly Kuznetsov svm->nested.force_msr_bitmap_recalc = true;
1772e1779c27SMaxim Levitsky
1773a7d5c7ceSPaolo Bonzini kvm_make_request(KVM_REQ_GET_NESTED_STATE_PAGES, vcpu);
17746ccbd29aSJoerg Roedel ret = 0;
17756ccbd29aSJoerg Roedel out_free:
17766ccbd29aSJoerg Roedel kfree(save);
17776ccbd29aSJoerg Roedel kfree(ctl);
17786ccbd29aSJoerg Roedel
17796ccbd29aSJoerg Roedel return ret;
1780cc440cdaSPaolo Bonzini }
1781cc440cdaSPaolo Bonzini
svm_get_nested_state_pages(struct kvm_vcpu * vcpu)1782232f75d3SMaxim Levitsky static bool svm_get_nested_state_pages(struct kvm_vcpu *vcpu)
1783232f75d3SMaxim Levitsky {
1784232f75d3SMaxim Levitsky struct vcpu_svm *svm = to_svm(vcpu);
1785232f75d3SMaxim Levitsky
1786232f75d3SMaxim Levitsky if (WARN_ON(!is_guest_mode(vcpu)))
1787232f75d3SMaxim Levitsky return true;
1788232f75d3SMaxim Levitsky
1789158a48ecSMaxim Levitsky if (!vcpu->arch.pdptrs_from_userspace &&
1790158a48ecSMaxim Levitsky !nested_npt_enabled(svm) && is_pae_paging(vcpu))
1791b222b0b8SMaxim Levitsky /*
1792b222b0b8SMaxim Levitsky * Reload the guest's PDPTRs since after a migration
1793b222b0b8SMaxim Levitsky * the guest CR3 might be restored prior to setting the nested
1794b222b0b8SMaxim Levitsky * state which can lead to a load of wrong PDPTRs.
1795b222b0b8SMaxim Levitsky */
17962df4a5ebSLai Jiangshan if (CC(!load_pdptrs(vcpu, vcpu->arch.cr3)))
1797232f75d3SMaxim Levitsky return false;
1798232f75d3SMaxim Levitsky
1799232f75d3SMaxim Levitsky if (!nested_svm_vmrun_msrpm(svm)) {
1800232f75d3SMaxim Levitsky vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
1801232f75d3SMaxim Levitsky vcpu->run->internal.suberror =
1802232f75d3SMaxim Levitsky KVM_INTERNAL_ERROR_EMULATION;
1803232f75d3SMaxim Levitsky vcpu->run->internal.ndata = 0;
1804232f75d3SMaxim Levitsky return false;
1805232f75d3SMaxim Levitsky }
1806232f75d3SMaxim Levitsky
18073f4a812eSVitaly Kuznetsov if (kvm_hv_verify_vp_assist(vcpu))
18083f4a812eSVitaly Kuznetsov return false;
18093f4a812eSVitaly Kuznetsov
1810232f75d3SMaxim Levitsky return true;
1811232f75d3SMaxim Levitsky }
1812232f75d3SMaxim Levitsky
181333b22172SPaolo Bonzini struct kvm_x86_nested_ops svm_nested_ops = {
1814f7e57078SSean Christopherson .leave_nested = svm_leave_nested,
18157709aba8SSean Christopherson .is_exception_vmexit = nested_svm_is_exception_vmexit,
181633b22172SPaolo Bonzini .check_events = svm_check_nested_events,
1817cb6a32c2SSean Christopherson .triple_fault = nested_svm_triple_fault,
1818a7d5c7ceSPaolo Bonzini .get_nested_state_pages = svm_get_nested_state_pages,
1819cc440cdaSPaolo Bonzini .get_state = svm_get_nested_state,
1820cc440cdaSPaolo Bonzini .set_state = svm_set_nested_state,
1821b0c9c25eSVitaly Kuznetsov .hv_inject_synthetic_vmexit_post_tlb_flush = svm_hv_inject_synthetic_vmexit_post_tlb_flush,
182233b22172SPaolo Bonzini };
1823