109cf57ebSDavid Brazdil // SPDX-License-Identifier: GPL-2.0-only
209cf57ebSDavid Brazdil /*
309cf57ebSDavid Brazdil * Copyright (C) 2015 - ARM Ltd
409cf57ebSDavid Brazdil * Author: Marc Zyngier <marc.zyngier@arm.com>
509cf57ebSDavid Brazdil */
609cf57ebSDavid Brazdil
709cf57ebSDavid Brazdil #include <hyp/switch.h>
813aeb9b4SDavid Brazdil #include <hyp/sysreg-sr.h>
909cf57ebSDavid Brazdil
1009cf57ebSDavid Brazdil #include <linux/arm-smccc.h>
1109cf57ebSDavid Brazdil #include <linux/kvm_host.h>
1209cf57ebSDavid Brazdil #include <linux/types.h>
1309cf57ebSDavid Brazdil #include <linux/jump_label.h>
1409cf57ebSDavid Brazdil #include <uapi/linux/psci.h>
1509cf57ebSDavid Brazdil
1609cf57ebSDavid Brazdil #include <kvm/arm_psci.h>
1709cf57ebSDavid Brazdil
1809cf57ebSDavid Brazdil #include <asm/barrier.h>
1909cf57ebSDavid Brazdil #include <asm/cpufeature.h>
2009cf57ebSDavid Brazdil #include <asm/kprobes.h>
2109cf57ebSDavid Brazdil #include <asm/kvm_asm.h>
2209cf57ebSDavid Brazdil #include <asm/kvm_emulate.h>
2309cf57ebSDavid Brazdil #include <asm/kvm_hyp.h>
2409cf57ebSDavid Brazdil #include <asm/kvm_mmu.h>
2509cf57ebSDavid Brazdil #include <asm/fpsimd.h>
2609cf57ebSDavid Brazdil #include <asm/debug-monitors.h>
2709cf57ebSDavid Brazdil #include <asm/processor.h>
2809cf57ebSDavid Brazdil
293061725dSMarc Zyngier #include <nvhe/fixed_config.h>
301025c8c0SQuentin Perret #include <nvhe/mem_protect.h>
311025c8c0SQuentin Perret
3214ef9d04SMarc Zyngier /* Non-VHE specific context */
3314ef9d04SMarc Zyngier DEFINE_PER_CPU(struct kvm_host_data, kvm_host_data);
3414ef9d04SMarc Zyngier DEFINE_PER_CPU(struct kvm_cpu_context, kvm_hyp_ctxt);
3514ef9d04SMarc Zyngier DEFINE_PER_CPU(unsigned long, kvm_hyp_vector);
362a1198c9SDavid Brazdil
37879e5ac7SKalesh Singh extern void kvm_nvhe_prepare_backtrace(unsigned long fp, unsigned long pc);
38879e5ac7SKalesh Singh
__activate_traps(struct kvm_vcpu * vcpu)3920c6561cSFuad Tabba static void __activate_cptr_traps(struct kvm_vcpu *vcpu)
4009cf57ebSDavid Brazdil {
4120c6561cSFuad Tabba u64 val = CPTR_EL2_TAM; /* Same bit irrespective of E2H */
4209cf57ebSDavid Brazdil
43*7d566962SMark Rutland if (!guest_owns_fp_regs(vcpu))
44*7d566962SMark Rutland __activate_traps_fpsimd32(vcpu);
45*7d566962SMark Rutland
4620c6561cSFuad Tabba if (has_hvhe()) {
4720c6561cSFuad Tabba val |= CPACR_ELx_TTA;
4809cf57ebSDavid Brazdil
4920c6561cSFuad Tabba if (guest_owns_fp_regs(vcpu)) {
5020c6561cSFuad Tabba val |= CPACR_ELx_FPEN;
5120c6561cSFuad Tabba if (vcpu_has_sve(vcpu))
5220c6561cSFuad Tabba val |= CPACR_ELx_ZEN;
5320c6561cSFuad Tabba }
54*7d566962SMark Rutland
55*7d566962SMark Rutland write_sysreg(val, cpacr_el1);
5620c6561cSFuad Tabba } else {
5720c6561cSFuad Tabba val |= CPTR_EL2_TTA | CPTR_NVHE_EL2_RES1;
5820c6561cSFuad Tabba
5920c6561cSFuad Tabba /*
6020c6561cSFuad Tabba * Always trap SME since it's not supported in KVM.
6120c6561cSFuad Tabba * TSM is RES1 if SME isn't implemented.
6220c6561cSFuad Tabba */
6375c76ab5SMarc Zyngier val |= CPTR_EL2_TSM;
6420c6561cSFuad Tabba
6520c6561cSFuad Tabba if (!vcpu_has_sve(vcpu) || !guest_owns_fp_regs(vcpu))
6620c6561cSFuad Tabba val |= CPTR_EL2_TZ;
6720c6561cSFuad Tabba
6820c6561cSFuad Tabba if (!guest_owns_fp_regs(vcpu))
6920c6561cSFuad Tabba val |= CPTR_EL2_TFP;
70*7d566962SMark Rutland
71*7d566962SMark Rutland write_sysreg(val, cptr_el2);
72*7d566962SMark Rutland }
7375c76ab5SMarc Zyngier }
7475c76ab5SMarc Zyngier
75*7d566962SMark Rutland static void __deactivate_cptr_traps(struct kvm_vcpu *vcpu)
76*7d566962SMark Rutland {
77*7d566962SMark Rutland if (has_hvhe()) {
78*7d566962SMark Rutland u64 val = CPACR_ELx_FPEN;
7909cf57ebSDavid Brazdil
80*7d566962SMark Rutland if (cpus_have_final_cap(ARM64_SVE))
81*7d566962SMark Rutland val |= CPACR_ELx_ZEN;
82*7d566962SMark Rutland if (cpus_have_final_cap(ARM64_SME))
83*7d566962SMark Rutland val |= CPACR_ELx_SMEN;
__deactivate_traps(struct kvm_vcpu * vcpu)84*7d566962SMark Rutland
85*7d566962SMark Rutland write_sysreg(val, cpacr_el1);
86*7d566962SMark Rutland } else {
87*7d566962SMark Rutland u64 val = CPTR_NVHE_EL2_RES1;
88*7d566962SMark Rutland
89*7d566962SMark Rutland if (!cpus_have_final_cap(ARM64_SVE))
90*7d566962SMark Rutland val |= CPTR_EL2_TZ;
91*7d566962SMark Rutland if (!cpus_have_final_cap(ARM64_SME))
92*7d566962SMark Rutland val |= CPTR_EL2_TSM;
93*7d566962SMark Rutland
94*7d566962SMark Rutland write_sysreg(val, cptr_el2);
95*7d566962SMark Rutland }
9620c6561cSFuad Tabba }
9720c6561cSFuad Tabba
9820c6561cSFuad Tabba static void __activate_traps(struct kvm_vcpu *vcpu)
9920c6561cSFuad Tabba {
10020c6561cSFuad Tabba ___activate_traps(vcpu);
10120c6561cSFuad Tabba __activate_traps_common(vcpu);
10220c6561cSFuad Tabba __activate_cptr_traps(vcpu);
10320c6561cSFuad Tabba
10414ef9d04SMarc Zyngier write_sysreg(__this_cpu_read(kvm_hyp_vector), vbar_el2);
10509cf57ebSDavid Brazdil
10609cf57ebSDavid Brazdil if (cpus_have_final_cap(ARM64_WORKAROUND_SPECULATIVE_AT)) {
10709cf57ebSDavid Brazdil struct kvm_cpu_context *ctxt = &vcpu->arch.ctxt;
10809cf57ebSDavid Brazdil
10909cf57ebSDavid Brazdil isb();
11009cf57ebSDavid Brazdil /*
11109cf57ebSDavid Brazdil * At this stage, and thanks to the above isb(), S2 is
11209cf57ebSDavid Brazdil * configured and enabled. We can now restore the guest's S1
11309cf57ebSDavid Brazdil * configuration: SCTLR, and only then TCR.
11409cf57ebSDavid Brazdil */
11571071acfSMarc Zyngier write_sysreg_el1(ctxt_sys_reg(ctxt, SCTLR_EL1), SYS_SCTLR);
__hyp_vgic_save_state(struct kvm_vcpu * vcpu)11609cf57ebSDavid Brazdil isb();
11771071acfSMarc Zyngier write_sysreg_el1(ctxt_sys_reg(ctxt, TCR_EL1), SYS_TCR);
11809cf57ebSDavid Brazdil }
11909cf57ebSDavid Brazdil }
12009cf57ebSDavid Brazdil
121c50cb043SDavid Brazdil static void __deactivate_traps(struct kvm_vcpu *vcpu)
12209cf57ebSDavid Brazdil {
1236e3bfbb2SAndrew Scull extern char __kvm_hyp_host_vector[];
12409cf57ebSDavid Brazdil
__hyp_vgic_restore_state(struct kvm_vcpu * vcpu)12509cf57ebSDavid Brazdil ___deactivate_traps(vcpu);
12609cf57ebSDavid Brazdil
12709cf57ebSDavid Brazdil if (cpus_have_final_cap(ARM64_WORKAROUND_SPECULATIVE_AT)) {
12809cf57ebSDavid Brazdil u64 val;
12909cf57ebSDavid Brazdil
13009cf57ebSDavid Brazdil /*
13109cf57ebSDavid Brazdil * Set the TCR and SCTLR registers in the exact opposite
13209cf57ebSDavid Brazdil * sequence as __activate_traps (first prevent walks,
13309cf57ebSDavid Brazdil * then force the MMU on). A generous sprinkling of isb()
13409cf57ebSDavid Brazdil * ensure that things happen in this exact order.
13509cf57ebSDavid Brazdil */
13609cf57ebSDavid Brazdil val = read_sysreg_el1(SYS_TCR);
__pmu_switch_to_guest(struct kvm_vcpu * vcpu)13709cf57ebSDavid Brazdil write_sysreg_el1(val | TCR_EPD1_MASK | TCR_EPD0_MASK, SYS_TCR);
13809cf57ebSDavid Brazdil isb();
13909cf57ebSDavid Brazdil val = read_sysreg_el1(SYS_SCTLR);
14009cf57ebSDavid Brazdil write_sysreg_el1(val | SCTLR_ELx_M, SYS_SCTLR);
14109cf57ebSDavid Brazdil isb();
14209cf57ebSDavid Brazdil }
14309cf57ebSDavid Brazdil
1441460b4b2SFuad Tabba __deactivate_traps_common(vcpu);
14509cf57ebSDavid Brazdil
146734864c1SQuentin Perret write_sysreg(this_cpu_ptr(&kvm_init_params)->hcr_el2, hcr_el2);
14709cf57ebSDavid Brazdil
148*7d566962SMark Rutland __deactivate_cptr_traps(vcpu);
149501a67a2SAndrew Scull write_sysreg(__kvm_hyp_host_vector, vbar_el2);
15009cf57ebSDavid Brazdil }
15109cf57ebSDavid Brazdil
15209cf57ebSDavid Brazdil /* Save VGICv3 state on non-VHE systems */
__pmu_switch_to_host(struct kvm_vcpu * vcpu)153c50cb043SDavid Brazdil static void __hyp_vgic_save_state(struct kvm_vcpu *vcpu)
15409cf57ebSDavid Brazdil {
15509cf57ebSDavid Brazdil if (static_branch_unlikely(&kvm_vgic_global_state.gicv3_cpuif)) {
15609cf57ebSDavid Brazdil __vgic_v3_save_state(&vcpu->arch.vgic_cpu.vgic_v3);
15709cf57ebSDavid Brazdil __vgic_v3_deactivate_traps(&vcpu->arch.vgic_cpu.vgic_v3);
15809cf57ebSDavid Brazdil }
15909cf57ebSDavid Brazdil }
16009cf57ebSDavid Brazdil
16143b233b1SWei-Lin Chang /* Restore VGICv3 state on non-VHE systems */
162c50cb043SDavid Brazdil static void __hyp_vgic_restore_state(struct kvm_vcpu *vcpu)
16309cf57ebSDavid Brazdil {
16409cf57ebSDavid Brazdil if (static_branch_unlikely(&kvm_vgic_global_state.gicv3_cpuif)) {
16509cf57ebSDavid Brazdil __vgic_v3_activate_traps(&vcpu->arch.vgic_cpu.vgic_v3);
16609cf57ebSDavid Brazdil __vgic_v3_restore_state(&vcpu->arch.vgic_cpu.vgic_v3);
16709cf57ebSDavid Brazdil }
16809cf57ebSDavid Brazdil }
16909cf57ebSDavid Brazdil
170bd61395aSRandy Dunlap /*
17109cf57ebSDavid Brazdil * Disable host events, enable guest events
17209cf57ebSDavid Brazdil */
17320492a62SMarc Zyngier #ifdef CONFIG_HW_PERF_EVENTS
kvm_handle_pvm_sys64(struct kvm_vcpu * vcpu,u64 * exit_code)17484d751a0SFuad Tabba static bool __pmu_switch_to_guest(struct kvm_vcpu *vcpu)
17509cf57ebSDavid Brazdil {
17684d751a0SFuad Tabba struct kvm_pmu_events *pmu = &vcpu->arch.pmu.events;
17709cf57ebSDavid Brazdil
17809cf57ebSDavid Brazdil if (pmu->events_host)
17909cf57ebSDavid Brazdil write_sysreg(pmu->events_host, pmcntenclr_el0);
18009cf57ebSDavid Brazdil
18109cf57ebSDavid Brazdil if (pmu->events_guest)
18209cf57ebSDavid Brazdil write_sysreg(pmu->events_guest, pmcntenset_el0);
18309cf57ebSDavid Brazdil
18409cf57ebSDavid Brazdil return (pmu->events_host || pmu->events_guest);
18509cf57ebSDavid Brazdil }
18609cf57ebSDavid Brazdil
187bd61395aSRandy Dunlap /*
18809cf57ebSDavid Brazdil * Disable guest events, enable host events
18909cf57ebSDavid Brazdil */
19084d751a0SFuad Tabba static void __pmu_switch_to_host(struct kvm_vcpu *vcpu)
19109cf57ebSDavid Brazdil {
19284d751a0SFuad Tabba struct kvm_pmu_events *pmu = &vcpu->arch.pmu.events;
19309cf57ebSDavid Brazdil
19409cf57ebSDavid Brazdil if (pmu->events_guest)
19509cf57ebSDavid Brazdil write_sysreg(pmu->events_guest, pmcntenclr_el0);
19609cf57ebSDavid Brazdil
19709cf57ebSDavid Brazdil if (pmu->events_host)
19809cf57ebSDavid Brazdil write_sysreg(pmu->events_host, pmcntenset_el0);
19909cf57ebSDavid Brazdil }
20020492a62SMarc Zyngier #else
20120492a62SMarc Zyngier #define __pmu_switch_to_guest(v) ({ false; })
20220492a62SMarc Zyngier #define __pmu_switch_to_host(v) do {} while (0)
20320492a62SMarc Zyngier #endif
20409cf57ebSDavid Brazdil
205bd61395aSRandy Dunlap /*
2061423afcbSFuad Tabba * Handler for protected VM MSR, MRS or System instruction execution in AArch64.
2071423afcbSFuad Tabba *
kvm_get_exit_handler_array(struct kvm_vcpu * vcpu)2081423afcbSFuad Tabba * Returns true if the hypervisor has handled the exit, and control should go
2091423afcbSFuad Tabba * back to the guest, or false if it hasn't.
2101423afcbSFuad Tabba */
2111423afcbSFuad Tabba static bool kvm_handle_pvm_sys64(struct kvm_vcpu *vcpu, u64 *exit_code)
2121423afcbSFuad Tabba {
21307305590SMarc Zyngier /*
21407305590SMarc Zyngier * Make sure we handle the exit for workarounds and ptrauth
21507305590SMarc Zyngier * before the pKVM handling, as the latter could decide to
21607305590SMarc Zyngier * UNDEF.
21707305590SMarc Zyngier */
21807305590SMarc Zyngier return (kvm_hyp_handle_sysreg(vcpu, exit_code) ||
21907305590SMarc Zyngier kvm_handle_pvm_sysreg(vcpu, exit_code));
2201423afcbSFuad Tabba }
2211423afcbSFuad Tabba
2228fb20461SMarc Zyngier static const exit_handler_fn hyp_exit_handlers[] = {
2238fb20461SMarc Zyngier [0 ... ESR_ELx_EC_MAX] = NULL,
2248fb20461SMarc Zyngier [ESR_ELx_EC_CP15_32] = kvm_hyp_handle_cp15_32,
2258fb20461SMarc Zyngier [ESR_ELx_EC_SYS64] = kvm_hyp_handle_sysreg,
2268fb20461SMarc Zyngier [ESR_ELx_EC_SVE] = kvm_hyp_handle_fpsimd,
early_exit_filter(struct kvm_vcpu * vcpu,u64 * exit_code)2278fb20461SMarc Zyngier [ESR_ELx_EC_FP_ASIMD] = kvm_hyp_handle_fpsimd,
2288fb20461SMarc Zyngier [ESR_ELx_EC_IABT_LOW] = kvm_hyp_handle_iabt_low,
2298fb20461SMarc Zyngier [ESR_ELx_EC_DABT_LOW] = kvm_hyp_handle_dabt_low,
230811154e2SAkihiko Odaki [ESR_ELx_EC_WATCHPT_LOW] = kvm_hyp_handle_watchpt_low,
2318fb20461SMarc Zyngier [ESR_ELx_EC_PAC] = kvm_hyp_handle_ptrauth,
2328fb20461SMarc Zyngier };
2338fb20461SMarc Zyngier
2341423afcbSFuad Tabba static const exit_handler_fn pvm_exit_handlers[] = {
2351423afcbSFuad Tabba [0 ... ESR_ELx_EC_MAX] = NULL,
2361423afcbSFuad Tabba [ESR_ELx_EC_SYS64] = kvm_handle_pvm_sys64,
2371423afcbSFuad Tabba [ESR_ELx_EC_SVE] = kvm_handle_pvm_restricted,
2384d2e469eSOliver Upton [ESR_ELx_EC_FP_ASIMD] = kvm_hyp_handle_fpsimd,
2391423afcbSFuad Tabba [ESR_ELx_EC_IABT_LOW] = kvm_hyp_handle_iabt_low,
2401423afcbSFuad Tabba [ESR_ELx_EC_DABT_LOW] = kvm_hyp_handle_dabt_low,
241811154e2SAkihiko Odaki [ESR_ELx_EC_WATCHPT_LOW] = kvm_hyp_handle_watchpt_low,
2421423afcbSFuad Tabba [ESR_ELx_EC_PAC] = kvm_hyp_handle_ptrauth,
2431423afcbSFuad Tabba };
2441423afcbSFuad Tabba
2450c7639ccSMarc Zyngier static const exit_handler_fn *kvm_get_exit_handler_array(struct kvm_vcpu *vcpu)
__kvm_vcpu_run(struct kvm_vcpu * vcpu)2468fb20461SMarc Zyngier {
2470c7639ccSMarc Zyngier if (unlikely(kvm_vm_is_protected(kern_hyp_va(vcpu->kvm))))
2481423afcbSFuad Tabba return pvm_exit_handlers;
2491423afcbSFuad Tabba
2508fb20461SMarc Zyngier return hyp_exit_handlers;
2518fb20461SMarc Zyngier }
2528fb20461SMarc Zyngier
2532afe0394SMark Rutland static inline bool fixup_guest_exit(struct kvm_vcpu *vcpu, u64 *exit_code)
2545f39efc4SFuad Tabba {
2552afe0394SMark Rutland const exit_handler_fn *handlers = kvm_get_exit_handler_array(vcpu);
2565f39efc4SFuad Tabba struct kvm *kvm = kern_hyp_va(vcpu->kvm);
2575f39efc4SFuad Tabba
2582afe0394SMark Rutland synchronize_vcpu_pstate(vcpu, exit_code);
2592afe0394SMark Rutland
2602afe0394SMark Rutland /*
2612afe0394SMark Rutland * Some guests (e.g., protected VMs) are not be allowed to run in
2622afe0394SMark Rutland * AArch32. The ARMv8 architecture does not give the hypervisor a
2632afe0394SMark Rutland * mechanism to prevent a guest from dropping to AArch32 EL0 if
2642afe0394SMark Rutland * implemented by the CPU. If the hypervisor spots a guest in such a
2652afe0394SMark Rutland * state ensure it is handled, and don't trust the host to spot or fix
2662afe0394SMark Rutland * it. The check below is based on the one in
2672afe0394SMark Rutland * kvm_arch_vcpu_ioctl_run().
2682afe0394SMark Rutland */
2695f39efc4SFuad Tabba if (kvm_vm_is_protected(kvm) && vcpu_mode_is_32bit(vcpu)) {
2705f39efc4SFuad Tabba /*
2715f39efc4SFuad Tabba * As we have caught the guest red-handed, decide that it isn't
2725f39efc4SFuad Tabba * fit for purpose anymore by making the vcpu invalid. The VMM
2735f39efc4SFuad Tabba * can try and fix it by re-initializing the vcpu with
2745f39efc4SFuad Tabba * KVM_ARM_VCPU_INIT, however, this is likely not possible for
2755f39efc4SFuad Tabba * protected VMs.
2765f39efc4SFuad Tabba */
277ef984060SOliver Upton vcpu_clear_flag(vcpu, VCPU_INITIALIZED);
278271b7286SMarc Zyngier *exit_code &= BIT(ARM_EXIT_WITH_SERROR_BIT);
279271b7286SMarc Zyngier *exit_code |= ARM_EXCEPTION_IL;
2805f39efc4SFuad Tabba }
2812afe0394SMark Rutland
2822afe0394SMark Rutland return __fixup_guest_exit(vcpu, exit_code, handlers);
2835f39efc4SFuad Tabba }
2845f39efc4SFuad Tabba
28509cf57ebSDavid Brazdil /* Switch to the guest for legacy non-VHE systems */
286c50cb043SDavid Brazdil int __kvm_vcpu_run(struct kvm_vcpu *vcpu)
28709cf57ebSDavid Brazdil {
28809cf57ebSDavid Brazdil struct kvm_cpu_context *host_ctxt;
28909cf57ebSDavid Brazdil struct kvm_cpu_context *guest_ctxt;
290923a547dSMarc Zyngier struct kvm_s2_mmu *mmu;
29109cf57ebSDavid Brazdil bool pmu_switch_needed;
29209cf57ebSDavid Brazdil u64 exit_code;
29309cf57ebSDavid Brazdil
29409cf57ebSDavid Brazdil /*
29509cf57ebSDavid Brazdil * Having IRQs masked via PMR when entering the guest means the GIC
29609cf57ebSDavid Brazdil * will not signal the CPU of interrupts of lower priority, and the
29709cf57ebSDavid Brazdil * only way to get out will be via guest exceptions.
29809cf57ebSDavid Brazdil * Naturally, we want to avoid this.
29909cf57ebSDavid Brazdil */
30009cf57ebSDavid Brazdil if (system_uses_irq_prio_masking()) {
30109cf57ebSDavid Brazdil gic_write_pmr(GIC_PRIO_IRQON | GIC_PRIO_PSR_I_SET);
30209cf57ebSDavid Brazdil pmr_sync();
30309cf57ebSDavid Brazdil }
30409cf57ebSDavid Brazdil
305717cf94aSDavid Brazdil host_ctxt = &this_cpu_ptr(&kvm_host_data)->host_ctxt;
30609cf57ebSDavid Brazdil host_ctxt->__hyp_running_vcpu = vcpu;
30709cf57ebSDavid Brazdil guest_ctxt = &vcpu->arch.ctxt;
30809cf57ebSDavid Brazdil
30984d751a0SFuad Tabba pmu_switch_needed = __pmu_switch_to_guest(vcpu);
31009cf57ebSDavid Brazdil
31109cf57ebSDavid Brazdil __sysreg_save_state_nvhe(host_ctxt);
312b96b0c5dSSuzuki K Poulose /*
313b96b0c5dSSuzuki K Poulose * We must flush and disable the SPE buffer for nVHE, as
314b96b0c5dSSuzuki K Poulose * the translation regime(EL1&0) is going to be loaded with
315b96b0c5dSSuzuki K Poulose * that of the guest. And we must do this before we change the
316b96b0c5dSSuzuki K Poulose * translation regime to EL2 (via MDCR_EL2_E2PB == 0) and
317b96b0c5dSSuzuki K Poulose * before we load guest Stage1.
318b96b0c5dSSuzuki K Poulose */
319b96b0c5dSSuzuki K Poulose __debug_save_host_buffers_nvhe(vcpu);
32009cf57ebSDavid Brazdil
32155b5bac1SMarc Zyngier /*
32255b5bac1SMarc Zyngier * We're about to restore some new MMU state. Make sure
32355b5bac1SMarc Zyngier * ongoing page-table walks that have started before we
32455b5bac1SMarc Zyngier * trapped to EL2 have completed. This also synchronises the
32555b5bac1SMarc Zyngier * above disabling of SPE and TRBE.
32655b5bac1SMarc Zyngier *
32755b5bac1SMarc Zyngier * See DDI0487I.a D8.1.5 "Out-of-context translation regimes",
32855b5bac1SMarc Zyngier * rule R_LFHQG and subsequent information statements.
32955b5bac1SMarc Zyngier */
33055b5bac1SMarc Zyngier dsb(nsh);
33155b5bac1SMarc Zyngier
332f5e30680SMarc Zyngier __kvm_adjust_pc(vcpu);
333cdb5e02eSMarc Zyngier
33409cf57ebSDavid Brazdil /*
33509cf57ebSDavid Brazdil * We must restore the 32-bit state before the sysregs, thanks
33609cf57ebSDavid Brazdil * to erratum #852523 (Cortex-A57) or #853709 (Cortex-A72).
33709cf57ebSDavid Brazdil *
33809cf57ebSDavid Brazdil * Also, and in order to be able to deal with erratum #1319537 (A57)
33909cf57ebSDavid Brazdil * and #1319367 (A72), we must ensure that all VM-related sysreg are
34009cf57ebSDavid Brazdil * restored before we enable S2 translation.
34109cf57ebSDavid Brazdil */
34209cf57ebSDavid Brazdil __sysreg32_restore_state(vcpu);
34309cf57ebSDavid Brazdil __sysreg_restore_state_nvhe(guest_ctxt);
34409cf57ebSDavid Brazdil
345923a547dSMarc Zyngier mmu = kern_hyp_va(vcpu->arch.hw_mmu);
3464efc0edeSMarc Zyngier __load_stage2(mmu, kern_hyp_va(mmu->arch));
34709cf57ebSDavid Brazdil __activate_traps(vcpu);
34809cf57ebSDavid Brazdil
34909cf57ebSDavid Brazdil __hyp_vgic_restore_state(vcpu);
35009cf57ebSDavid Brazdil __timer_enable_traps(vcpu);
35109cf57ebSDavid Brazdil
35209cf57ebSDavid Brazdil __debug_switch_to_guest(vcpu);
35309cf57ebSDavid Brazdil
35409cf57ebSDavid Brazdil do {
35509cf57ebSDavid Brazdil /* Jump in the fire! */
356b619d9aaSAndrew Scull exit_code = __guest_enter(vcpu);
35709cf57ebSDavid Brazdil
35809cf57ebSDavid Brazdil /* And we're baaack! */
35909cf57ebSDavid Brazdil } while (fixup_guest_exit(vcpu, &exit_code));
hyp_panic(void)36009cf57ebSDavid Brazdil
36109cf57ebSDavid Brazdil __sysreg_save_state_nvhe(guest_ctxt);
36209cf57ebSDavid Brazdil __sysreg32_save_state(vcpu);
36309cf57ebSDavid Brazdil __timer_disable_traps(vcpu);
36409cf57ebSDavid Brazdil __hyp_vgic_save_state(vcpu);
36509cf57ebSDavid Brazdil
36655b5bac1SMarc Zyngier /*
36755b5bac1SMarc Zyngier * Same thing as before the guest run: we're about to switch
36855b5bac1SMarc Zyngier * the MMU context, so let's make sure we don't have any
36955b5bac1SMarc Zyngier * ongoing EL1&0 translations.
37055b5bac1SMarc Zyngier */
37155b5bac1SMarc Zyngier dsb(nsh);
37255b5bac1SMarc Zyngier
37309cf57ebSDavid Brazdil __deactivate_traps(vcpu);
374501a67a2SAndrew Scull __load_host_stage2();
37509cf57ebSDavid Brazdil
37609cf57ebSDavid Brazdil __sysreg_restore_state_nvhe(host_ctxt);
37709cf57ebSDavid Brazdil
378f8077b0dSMarc Zyngier if (vcpu->arch.fp_state == FP_STATE_GUEST_OWNED)
37909cf57ebSDavid Brazdil __fpsimd_save_fpexc32(vcpu);
38009cf57ebSDavid Brazdil
381b96b0c5dSSuzuki K Poulose __debug_switch_to_host(vcpu);
38209cf57ebSDavid Brazdil /*
38309cf57ebSDavid Brazdil * This must come after restoring the host sysregs, since a non-VHE
38409cf57ebSDavid Brazdil * system may enable SPE here and make use of the TTBRs.
38509cf57ebSDavid Brazdil */
hyp_panic_bad_stack(void)386b96b0c5dSSuzuki K Poulose __debug_restore_host_buffers_nvhe(vcpu);
38709cf57ebSDavid Brazdil
38809cf57ebSDavid Brazdil if (pmu_switch_needed)
38984d751a0SFuad Tabba __pmu_switch_to_host(vcpu);
39009cf57ebSDavid Brazdil
kvm_unexpected_el2_exception(void)39109cf57ebSDavid Brazdil /* Returning to host will clear PSR.I, remask PMR if needed */
39209cf57ebSDavid Brazdil if (system_uses_irq_prio_masking())
39309cf57ebSDavid Brazdil gic_write_pmr(GIC_PRIO_IRQOFF);
39409cf57ebSDavid Brazdil
395a2e102e2SAndrew Scull host_ctxt->__hyp_running_vcpu = NULL;
396a2e102e2SAndrew Scull
39709cf57ebSDavid Brazdil return exit_code;
39809cf57ebSDavid Brazdil }
39909cf57ebSDavid Brazdil
40066de19faSKalesh Singh asmlinkage void __noreturn hyp_panic(void)
40109cf57ebSDavid Brazdil {
40209cf57ebSDavid Brazdil u64 spsr = read_sysreg_el2(SYS_SPSR);
40309cf57ebSDavid Brazdil u64 elr = read_sysreg_el2(SYS_ELR);
40496d389caSRob Herring u64 par = read_sysreg_par();
4056a0259edSAndrew Scull struct kvm_cpu_context *host_ctxt;
4066a0259edSAndrew Scull struct kvm_vcpu *vcpu;
40709cf57ebSDavid Brazdil
40814ef9d04SMarc Zyngier host_ctxt = &this_cpu_ptr(&kvm_host_data)->host_ctxt;
4096a0259edSAndrew Scull vcpu = host_ctxt->__hyp_running_vcpu;
4106a0259edSAndrew Scull
411a2e102e2SAndrew Scull if (vcpu) {
41209cf57ebSDavid Brazdil __timer_disable_traps(vcpu);
41309cf57ebSDavid Brazdil __deactivate_traps(vcpu);
414501a67a2SAndrew Scull __load_host_stage2();
41509cf57ebSDavid Brazdil __sysreg_restore_state_nvhe(host_ctxt);
41609cf57ebSDavid Brazdil }
41709cf57ebSDavid Brazdil
418879e5ac7SKalesh Singh /* Prepare to dump kvm nvhe hyp stacktrace */
419879e5ac7SKalesh Singh kvm_nvhe_prepare_backtrace((unsigned long)__builtin_frame_address(0),
420879e5ac7SKalesh Singh _THIS_IP_);
421879e5ac7SKalesh Singh
422c4b000c3SAndrew Scull __hyp_do_panic(host_ctxt, spsr, elr, par);
42309cf57ebSDavid Brazdil unreachable();
42409cf57ebSDavid Brazdil }
425e9ee186bSJames Morse
42666de19faSKalesh Singh asmlinkage void __noreturn hyp_panic_bad_stack(void)
42766de19faSKalesh Singh {
42866de19faSKalesh Singh hyp_panic();
42966de19faSKalesh Singh }
43066de19faSKalesh Singh
431e9ee186bSJames Morse asmlinkage void kvm_unexpected_el2_exception(void)
432e9ee186bSJames Morse {
4331c3ace2bSQuentin Perret __kvm_unexpected_el2_exception();
434e9ee186bSJames Morse }
435