xref: /openbmc/linux/arch/arm64/kvm/hyp/nvhe/switch.c (revision 7183b2b5)
109cf57ebSDavid Brazdil // SPDX-License-Identifier: GPL-2.0-only
209cf57ebSDavid Brazdil /*
309cf57ebSDavid Brazdil  * Copyright (C) 2015 - ARM Ltd
409cf57ebSDavid Brazdil  * Author: Marc Zyngier <marc.zyngier@arm.com>
509cf57ebSDavid Brazdil  */
609cf57ebSDavid Brazdil 
709cf57ebSDavid Brazdil #include <hyp/switch.h>
813aeb9b4SDavid Brazdil #include <hyp/sysreg-sr.h>
909cf57ebSDavid Brazdil 
1009cf57ebSDavid Brazdil #include <linux/arm-smccc.h>
1109cf57ebSDavid Brazdil #include <linux/kvm_host.h>
1209cf57ebSDavid Brazdil #include <linux/types.h>
1309cf57ebSDavid Brazdil #include <linux/jump_label.h>
1409cf57ebSDavid Brazdil #include <uapi/linux/psci.h>
1509cf57ebSDavid Brazdil 
1609cf57ebSDavid Brazdil #include <kvm/arm_psci.h>
1709cf57ebSDavid Brazdil 
1809cf57ebSDavid Brazdil #include <asm/barrier.h>
1909cf57ebSDavid Brazdil #include <asm/cpufeature.h>
2009cf57ebSDavid Brazdil #include <asm/kprobes.h>
2109cf57ebSDavid Brazdil #include <asm/kvm_asm.h>
2209cf57ebSDavid Brazdil #include <asm/kvm_emulate.h>
2309cf57ebSDavid Brazdil #include <asm/kvm_hyp.h>
2409cf57ebSDavid Brazdil #include <asm/kvm_mmu.h>
2509cf57ebSDavid Brazdil #include <asm/fpsimd.h>
2609cf57ebSDavid Brazdil #include <asm/debug-monitors.h>
2709cf57ebSDavid Brazdil #include <asm/processor.h>
2809cf57ebSDavid Brazdil #include <asm/thread_info.h>
2909cf57ebSDavid Brazdil 
303061725dSMarc Zyngier #include <nvhe/fixed_config.h>
311025c8c0SQuentin Perret #include <nvhe/mem_protect.h>
321025c8c0SQuentin Perret 
3314ef9d04SMarc Zyngier /* Non-VHE specific context */
3414ef9d04SMarc Zyngier DEFINE_PER_CPU(struct kvm_host_data, kvm_host_data);
3514ef9d04SMarc Zyngier DEFINE_PER_CPU(struct kvm_cpu_context, kvm_hyp_ctxt);
3614ef9d04SMarc Zyngier DEFINE_PER_CPU(unsigned long, kvm_hyp_vector);
372a1198c9SDavid Brazdil 
38c50cb043SDavid Brazdil static void __activate_traps(struct kvm_vcpu *vcpu)
3909cf57ebSDavid Brazdil {
4009cf57ebSDavid Brazdil 	u64 val;
4109cf57ebSDavid Brazdil 
4209cf57ebSDavid Brazdil 	___activate_traps(vcpu);
4309cf57ebSDavid Brazdil 	__activate_traps_common(vcpu);
4409cf57ebSDavid Brazdil 
45cd496228SFuad Tabba 	val = vcpu->arch.cptr_el2;
468c8010d6SMarc Zyngier 	val |= CPTR_EL2_TTA | CPTR_EL2_TAM;
4709cf57ebSDavid Brazdil 	if (!update_fp_enabled(vcpu)) {
488c8010d6SMarc Zyngier 		val |= CPTR_EL2_TFP | CPTR_EL2_TZ;
4909cf57ebSDavid Brazdil 		__activate_traps_fpsimd32(vcpu);
5009cf57ebSDavid Brazdil 	}
5109cf57ebSDavid Brazdil 
5209cf57ebSDavid Brazdil 	write_sysreg(val, cptr_el2);
5314ef9d04SMarc Zyngier 	write_sysreg(__this_cpu_read(kvm_hyp_vector), vbar_el2);
5409cf57ebSDavid Brazdil 
5509cf57ebSDavid Brazdil 	if (cpus_have_final_cap(ARM64_WORKAROUND_SPECULATIVE_AT)) {
5609cf57ebSDavid Brazdil 		struct kvm_cpu_context *ctxt = &vcpu->arch.ctxt;
5709cf57ebSDavid Brazdil 
5809cf57ebSDavid Brazdil 		isb();
5909cf57ebSDavid Brazdil 		/*
6009cf57ebSDavid Brazdil 		 * At this stage, and thanks to the above isb(), S2 is
6109cf57ebSDavid Brazdil 		 * configured and enabled. We can now restore the guest's S1
6209cf57ebSDavid Brazdil 		 * configuration: SCTLR, and only then TCR.
6309cf57ebSDavid Brazdil 		 */
6471071acfSMarc Zyngier 		write_sysreg_el1(ctxt_sys_reg(ctxt, SCTLR_EL1),	SYS_SCTLR);
6509cf57ebSDavid Brazdil 		isb();
6671071acfSMarc Zyngier 		write_sysreg_el1(ctxt_sys_reg(ctxt, TCR_EL1),	SYS_TCR);
6709cf57ebSDavid Brazdil 	}
6809cf57ebSDavid Brazdil }
6909cf57ebSDavid Brazdil 
70c50cb043SDavid Brazdil static void __deactivate_traps(struct kvm_vcpu *vcpu)
7109cf57ebSDavid Brazdil {
726e3bfbb2SAndrew Scull 	extern char __kvm_hyp_host_vector[];
731460b4b2SFuad Tabba 	u64 cptr;
7409cf57ebSDavid Brazdil 
7509cf57ebSDavid Brazdil 	___deactivate_traps(vcpu);
7609cf57ebSDavid Brazdil 
7709cf57ebSDavid Brazdil 	if (cpus_have_final_cap(ARM64_WORKAROUND_SPECULATIVE_AT)) {
7809cf57ebSDavid Brazdil 		u64 val;
7909cf57ebSDavid Brazdil 
8009cf57ebSDavid Brazdil 		/*
8109cf57ebSDavid Brazdil 		 * Set the TCR and SCTLR registers in the exact opposite
8209cf57ebSDavid Brazdil 		 * sequence as __activate_traps (first prevent walks,
8309cf57ebSDavid Brazdil 		 * then force the MMU on). A generous sprinkling of isb()
8409cf57ebSDavid Brazdil 		 * ensure that things happen in this exact order.
8509cf57ebSDavid Brazdil 		 */
8609cf57ebSDavid Brazdil 		val = read_sysreg_el1(SYS_TCR);
8709cf57ebSDavid Brazdil 		write_sysreg_el1(val | TCR_EPD1_MASK | TCR_EPD0_MASK, SYS_TCR);
8809cf57ebSDavid Brazdil 		isb();
8909cf57ebSDavid Brazdil 		val = read_sysreg_el1(SYS_SCTLR);
9009cf57ebSDavid Brazdil 		write_sysreg_el1(val | SCTLR_ELx_M, SYS_SCTLR);
9109cf57ebSDavid Brazdil 		isb();
9209cf57ebSDavid Brazdil 	}
9309cf57ebSDavid Brazdil 
941460b4b2SFuad Tabba 	__deactivate_traps_common(vcpu);
9509cf57ebSDavid Brazdil 
96734864c1SQuentin Perret 	write_sysreg(this_cpu_ptr(&kvm_init_params)->hcr_el2, hcr_el2);
9709cf57ebSDavid Brazdil 
98beed0906SMarc Zyngier 	cptr = CPTR_EL2_DEFAULT;
99beed0906SMarc Zyngier 	if (vcpu_has_sve(vcpu) && (vcpu->arch.flags & KVM_ARM64_FP_ENABLED))
100beed0906SMarc Zyngier 		cptr |= CPTR_EL2_TZ;
101beed0906SMarc Zyngier 
102beed0906SMarc Zyngier 	write_sysreg(cptr, cptr_el2);
103501a67a2SAndrew Scull 	write_sysreg(__kvm_hyp_host_vector, vbar_el2);
10409cf57ebSDavid Brazdil }
10509cf57ebSDavid Brazdil 
10609cf57ebSDavid Brazdil /* Save VGICv3 state on non-VHE systems */
107c50cb043SDavid Brazdil static void __hyp_vgic_save_state(struct kvm_vcpu *vcpu)
10809cf57ebSDavid Brazdil {
10909cf57ebSDavid Brazdil 	if (static_branch_unlikely(&kvm_vgic_global_state.gicv3_cpuif)) {
11009cf57ebSDavid Brazdil 		__vgic_v3_save_state(&vcpu->arch.vgic_cpu.vgic_v3);
11109cf57ebSDavid Brazdil 		__vgic_v3_deactivate_traps(&vcpu->arch.vgic_cpu.vgic_v3);
11209cf57ebSDavid Brazdil 	}
11309cf57ebSDavid Brazdil }
11409cf57ebSDavid Brazdil 
11509cf57ebSDavid Brazdil /* Restore VGICv3 state on non_VEH systems */
116c50cb043SDavid Brazdil static void __hyp_vgic_restore_state(struct kvm_vcpu *vcpu)
11709cf57ebSDavid Brazdil {
11809cf57ebSDavid Brazdil 	if (static_branch_unlikely(&kvm_vgic_global_state.gicv3_cpuif)) {
11909cf57ebSDavid Brazdil 		__vgic_v3_activate_traps(&vcpu->arch.vgic_cpu.vgic_v3);
12009cf57ebSDavid Brazdil 		__vgic_v3_restore_state(&vcpu->arch.vgic_cpu.vgic_v3);
12109cf57ebSDavid Brazdil 	}
12209cf57ebSDavid Brazdil }
12309cf57ebSDavid Brazdil 
12409cf57ebSDavid Brazdil /**
12509cf57ebSDavid Brazdil  * Disable host events, enable guest events
12609cf57ebSDavid Brazdil  */
127c50cb043SDavid Brazdil static bool __pmu_switch_to_guest(struct kvm_cpu_context *host_ctxt)
12809cf57ebSDavid Brazdil {
12909cf57ebSDavid Brazdil 	struct kvm_host_data *host;
13009cf57ebSDavid Brazdil 	struct kvm_pmu_events *pmu;
13109cf57ebSDavid Brazdil 
13209cf57ebSDavid Brazdil 	host = container_of(host_ctxt, struct kvm_host_data, host_ctxt);
13309cf57ebSDavid Brazdil 	pmu = &host->pmu_events;
13409cf57ebSDavid Brazdil 
13509cf57ebSDavid Brazdil 	if (pmu->events_host)
13609cf57ebSDavid Brazdil 		write_sysreg(pmu->events_host, pmcntenclr_el0);
13709cf57ebSDavid Brazdil 
13809cf57ebSDavid Brazdil 	if (pmu->events_guest)
13909cf57ebSDavid Brazdil 		write_sysreg(pmu->events_guest, pmcntenset_el0);
14009cf57ebSDavid Brazdil 
14109cf57ebSDavid Brazdil 	return (pmu->events_host || pmu->events_guest);
14209cf57ebSDavid Brazdil }
14309cf57ebSDavid Brazdil 
14409cf57ebSDavid Brazdil /**
14509cf57ebSDavid Brazdil  * Disable guest events, enable host events
14609cf57ebSDavid Brazdil  */
147c50cb043SDavid Brazdil static void __pmu_switch_to_host(struct kvm_cpu_context *host_ctxt)
14809cf57ebSDavid Brazdil {
14909cf57ebSDavid Brazdil 	struct kvm_host_data *host;
15009cf57ebSDavid Brazdil 	struct kvm_pmu_events *pmu;
15109cf57ebSDavid Brazdil 
15209cf57ebSDavid Brazdil 	host = container_of(host_ctxt, struct kvm_host_data, host_ctxt);
15309cf57ebSDavid Brazdil 	pmu = &host->pmu_events;
15409cf57ebSDavid Brazdil 
15509cf57ebSDavid Brazdil 	if (pmu->events_guest)
15609cf57ebSDavid Brazdil 		write_sysreg(pmu->events_guest, pmcntenclr_el0);
15709cf57ebSDavid Brazdil 
15809cf57ebSDavid Brazdil 	if (pmu->events_host)
15909cf57ebSDavid Brazdil 		write_sysreg(pmu->events_host, pmcntenset_el0);
16009cf57ebSDavid Brazdil }
16109cf57ebSDavid Brazdil 
1621423afcbSFuad Tabba /**
1631423afcbSFuad Tabba  * Handler for protected VM MSR, MRS or System instruction execution in AArch64.
1641423afcbSFuad Tabba  *
1651423afcbSFuad Tabba  * Returns true if the hypervisor has handled the exit, and control should go
1661423afcbSFuad Tabba  * back to the guest, or false if it hasn't.
1671423afcbSFuad Tabba  */
1681423afcbSFuad Tabba static bool kvm_handle_pvm_sys64(struct kvm_vcpu *vcpu, u64 *exit_code)
1691423afcbSFuad Tabba {
17007305590SMarc Zyngier 	/*
17107305590SMarc Zyngier 	 * Make sure we handle the exit for workarounds and ptrauth
17207305590SMarc Zyngier 	 * before the pKVM handling, as the latter could decide to
17307305590SMarc Zyngier 	 * UNDEF.
17407305590SMarc Zyngier 	 */
17507305590SMarc Zyngier 	return (kvm_hyp_handle_sysreg(vcpu, exit_code) ||
17607305590SMarc Zyngier 		kvm_handle_pvm_sysreg(vcpu, exit_code));
1771423afcbSFuad Tabba }
1781423afcbSFuad Tabba 
1791423afcbSFuad Tabba /**
1801423afcbSFuad Tabba  * Handler for protected floating-point and Advanced SIMD accesses.
1811423afcbSFuad Tabba  *
1821423afcbSFuad Tabba  * Returns true if the hypervisor has handled the exit, and control should go
1831423afcbSFuad Tabba  * back to the guest, or false if it hasn't.
1841423afcbSFuad Tabba  */
1851423afcbSFuad Tabba static bool kvm_handle_pvm_fpsimd(struct kvm_vcpu *vcpu, u64 *exit_code)
1861423afcbSFuad Tabba {
1871423afcbSFuad Tabba 	/* Linux guests assume support for floating-point and Advanced SIMD. */
1881423afcbSFuad Tabba 	BUILD_BUG_ON(!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64PFR0_FP),
1891423afcbSFuad Tabba 				PVM_ID_AA64PFR0_ALLOW));
1901423afcbSFuad Tabba 	BUILD_BUG_ON(!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64PFR0_ASIMD),
1911423afcbSFuad Tabba 				PVM_ID_AA64PFR0_ALLOW));
1921423afcbSFuad Tabba 
1931423afcbSFuad Tabba 	return kvm_hyp_handle_fpsimd(vcpu, exit_code);
1941423afcbSFuad Tabba }
1951423afcbSFuad Tabba 
1968fb20461SMarc Zyngier static const exit_handler_fn hyp_exit_handlers[] = {
1978fb20461SMarc Zyngier 	[0 ... ESR_ELx_EC_MAX]		= NULL,
1988fb20461SMarc Zyngier 	[ESR_ELx_EC_CP15_32]		= kvm_hyp_handle_cp15_32,
1998fb20461SMarc Zyngier 	[ESR_ELx_EC_SYS64]		= kvm_hyp_handle_sysreg,
2008fb20461SMarc Zyngier 	[ESR_ELx_EC_SVE]		= kvm_hyp_handle_fpsimd,
2018fb20461SMarc Zyngier 	[ESR_ELx_EC_FP_ASIMD]		= kvm_hyp_handle_fpsimd,
2028fb20461SMarc Zyngier 	[ESR_ELx_EC_IABT_LOW]		= kvm_hyp_handle_iabt_low,
2038fb20461SMarc Zyngier 	[ESR_ELx_EC_DABT_LOW]		= kvm_hyp_handle_dabt_low,
2048fb20461SMarc Zyngier 	[ESR_ELx_EC_PAC]		= kvm_hyp_handle_ptrauth,
2058fb20461SMarc Zyngier };
2068fb20461SMarc Zyngier 
2071423afcbSFuad Tabba static const exit_handler_fn pvm_exit_handlers[] = {
2081423afcbSFuad Tabba 	[0 ... ESR_ELx_EC_MAX]		= NULL,
2091423afcbSFuad Tabba 	[ESR_ELx_EC_SYS64]		= kvm_handle_pvm_sys64,
2101423afcbSFuad Tabba 	[ESR_ELx_EC_SVE]		= kvm_handle_pvm_restricted,
2111423afcbSFuad Tabba 	[ESR_ELx_EC_FP_ASIMD]		= kvm_handle_pvm_fpsimd,
2121423afcbSFuad Tabba 	[ESR_ELx_EC_IABT_LOW]		= kvm_hyp_handle_iabt_low,
2131423afcbSFuad Tabba 	[ESR_ELx_EC_DABT_LOW]		= kvm_hyp_handle_dabt_low,
2141423afcbSFuad Tabba 	[ESR_ELx_EC_PAC]		= kvm_hyp_handle_ptrauth,
2151423afcbSFuad Tabba };
2161423afcbSFuad Tabba 
2170c7639ccSMarc Zyngier static const exit_handler_fn *kvm_get_exit_handler_array(struct kvm_vcpu *vcpu)
2188fb20461SMarc Zyngier {
2190c7639ccSMarc Zyngier 	if (unlikely(kvm_vm_is_protected(kern_hyp_va(vcpu->kvm))))
2201423afcbSFuad Tabba 		return pvm_exit_handlers;
2211423afcbSFuad Tabba 
2228fb20461SMarc Zyngier 	return hyp_exit_handlers;
2238fb20461SMarc Zyngier }
2248fb20461SMarc Zyngier 
2255f39efc4SFuad Tabba /*
2265f39efc4SFuad Tabba  * Some guests (e.g., protected VMs) are not be allowed to run in AArch32.
2275f39efc4SFuad Tabba  * The ARMv8 architecture does not give the hypervisor a mechanism to prevent a
2285f39efc4SFuad Tabba  * guest from dropping to AArch32 EL0 if implemented by the CPU. If the
2295f39efc4SFuad Tabba  * hypervisor spots a guest in such a state ensure it is handled, and don't
2305f39efc4SFuad Tabba  * trust the host to spot or fix it.  The check below is based on the one in
2315f39efc4SFuad Tabba  * kvm_arch_vcpu_ioctl_run().
2325f39efc4SFuad Tabba  *
2335f39efc4SFuad Tabba  * Returns false if the guest ran in AArch32 when it shouldn't have, and
2345f39efc4SFuad Tabba  * thus should exit to the host, or true if a the guest run loop can continue.
2355f39efc4SFuad Tabba  */
236*7183b2b5SMarc Zyngier static void early_exit_filter(struct kvm_vcpu *vcpu, u64 *exit_code)
2375f39efc4SFuad Tabba {
2385f39efc4SFuad Tabba 	struct kvm *kvm = kern_hyp_va(vcpu->kvm);
2395f39efc4SFuad Tabba 
2405f39efc4SFuad Tabba 	if (kvm_vm_is_protected(kvm) && vcpu_mode_is_32bit(vcpu)) {
2415f39efc4SFuad Tabba 		/*
2425f39efc4SFuad Tabba 		 * As we have caught the guest red-handed, decide that it isn't
2435f39efc4SFuad Tabba 		 * fit for purpose anymore by making the vcpu invalid. The VMM
2445f39efc4SFuad Tabba 		 * can try and fix it by re-initializing the vcpu with
2455f39efc4SFuad Tabba 		 * KVM_ARM_VCPU_INIT, however, this is likely not possible for
2465f39efc4SFuad Tabba 		 * protected VMs.
2475f39efc4SFuad Tabba 		 */
2485f39efc4SFuad Tabba 		vcpu->arch.target = -1;
249271b7286SMarc Zyngier 		*exit_code &= BIT(ARM_EXIT_WITH_SERROR_BIT);
250271b7286SMarc Zyngier 		*exit_code |= ARM_EXCEPTION_IL;
2515f39efc4SFuad Tabba 	}
2525f39efc4SFuad Tabba }
2535f39efc4SFuad Tabba 
25409cf57ebSDavid Brazdil /* Switch to the guest for legacy non-VHE systems */
255c50cb043SDavid Brazdil int __kvm_vcpu_run(struct kvm_vcpu *vcpu)
25609cf57ebSDavid Brazdil {
25709cf57ebSDavid Brazdil 	struct kvm_cpu_context *host_ctxt;
25809cf57ebSDavid Brazdil 	struct kvm_cpu_context *guest_ctxt;
259923a547dSMarc Zyngier 	struct kvm_s2_mmu *mmu;
26009cf57ebSDavid Brazdil 	bool pmu_switch_needed;
26109cf57ebSDavid Brazdil 	u64 exit_code;
26209cf57ebSDavid Brazdil 
26309cf57ebSDavid Brazdil 	/*
26409cf57ebSDavid Brazdil 	 * Having IRQs masked via PMR when entering the guest means the GIC
26509cf57ebSDavid Brazdil 	 * will not signal the CPU of interrupts of lower priority, and the
26609cf57ebSDavid Brazdil 	 * only way to get out will be via guest exceptions.
26709cf57ebSDavid Brazdil 	 * Naturally, we want to avoid this.
26809cf57ebSDavid Brazdil 	 */
26909cf57ebSDavid Brazdil 	if (system_uses_irq_prio_masking()) {
27009cf57ebSDavid Brazdil 		gic_write_pmr(GIC_PRIO_IRQON | GIC_PRIO_PSR_I_SET);
27109cf57ebSDavid Brazdil 		pmr_sync();
27209cf57ebSDavid Brazdil 	}
27309cf57ebSDavid Brazdil 
274717cf94aSDavid Brazdil 	host_ctxt = &this_cpu_ptr(&kvm_host_data)->host_ctxt;
27509cf57ebSDavid Brazdil 	host_ctxt->__hyp_running_vcpu = vcpu;
27609cf57ebSDavid Brazdil 	guest_ctxt = &vcpu->arch.ctxt;
27709cf57ebSDavid Brazdil 
27809cf57ebSDavid Brazdil 	pmu_switch_needed = __pmu_switch_to_guest(host_ctxt);
27909cf57ebSDavid Brazdil 
28009cf57ebSDavid Brazdil 	__sysreg_save_state_nvhe(host_ctxt);
281b96b0c5dSSuzuki K Poulose 	/*
282b96b0c5dSSuzuki K Poulose 	 * We must flush and disable the SPE buffer for nVHE, as
283b96b0c5dSSuzuki K Poulose 	 * the translation regime(EL1&0) is going to be loaded with
284b96b0c5dSSuzuki K Poulose 	 * that of the guest. And we must do this before we change the
285b96b0c5dSSuzuki K Poulose 	 * translation regime to EL2 (via MDCR_EL2_E2PB == 0) and
286b96b0c5dSSuzuki K Poulose 	 * before we load guest Stage1.
287b96b0c5dSSuzuki K Poulose 	 */
288b96b0c5dSSuzuki K Poulose 	__debug_save_host_buffers_nvhe(vcpu);
28909cf57ebSDavid Brazdil 
290f5e30680SMarc Zyngier 	__kvm_adjust_pc(vcpu);
291cdb5e02eSMarc Zyngier 
29209cf57ebSDavid Brazdil 	/*
29309cf57ebSDavid Brazdil 	 * We must restore the 32-bit state before the sysregs, thanks
29409cf57ebSDavid Brazdil 	 * to erratum #852523 (Cortex-A57) or #853709 (Cortex-A72).
29509cf57ebSDavid Brazdil 	 *
29609cf57ebSDavid Brazdil 	 * Also, and in order to be able to deal with erratum #1319537 (A57)
29709cf57ebSDavid Brazdil 	 * and #1319367 (A72), we must ensure that all VM-related sysreg are
29809cf57ebSDavid Brazdil 	 * restored before we enable S2 translation.
29909cf57ebSDavid Brazdil 	 */
30009cf57ebSDavid Brazdil 	__sysreg32_restore_state(vcpu);
30109cf57ebSDavid Brazdil 	__sysreg_restore_state_nvhe(guest_ctxt);
30209cf57ebSDavid Brazdil 
303923a547dSMarc Zyngier 	mmu = kern_hyp_va(vcpu->arch.hw_mmu);
3044efc0edeSMarc Zyngier 	__load_stage2(mmu, kern_hyp_va(mmu->arch));
30509cf57ebSDavid Brazdil 	__activate_traps(vcpu);
30609cf57ebSDavid Brazdil 
30709cf57ebSDavid Brazdil 	__hyp_vgic_restore_state(vcpu);
30809cf57ebSDavid Brazdil 	__timer_enable_traps(vcpu);
30909cf57ebSDavid Brazdil 
31009cf57ebSDavid Brazdil 	__debug_switch_to_guest(vcpu);
31109cf57ebSDavid Brazdil 
31209cf57ebSDavid Brazdil 	do {
31309cf57ebSDavid Brazdil 		/* Jump in the fire! */
314b619d9aaSAndrew Scull 		exit_code = __guest_enter(vcpu);
31509cf57ebSDavid Brazdil 
31609cf57ebSDavid Brazdil 		/* And we're baaack! */
31709cf57ebSDavid Brazdil 	} while (fixup_guest_exit(vcpu, &exit_code));
31809cf57ebSDavid Brazdil 
31909cf57ebSDavid Brazdil 	__sysreg_save_state_nvhe(guest_ctxt);
32009cf57ebSDavid Brazdil 	__sysreg32_save_state(vcpu);
32109cf57ebSDavid Brazdil 	__timer_disable_traps(vcpu);
32209cf57ebSDavid Brazdil 	__hyp_vgic_save_state(vcpu);
32309cf57ebSDavid Brazdil 
32409cf57ebSDavid Brazdil 	__deactivate_traps(vcpu);
325501a67a2SAndrew Scull 	__load_host_stage2();
32609cf57ebSDavid Brazdil 
32709cf57ebSDavid Brazdil 	__sysreg_restore_state_nvhe(host_ctxt);
32809cf57ebSDavid Brazdil 
32909cf57ebSDavid Brazdil 	if (vcpu->arch.flags & KVM_ARM64_FP_ENABLED)
33009cf57ebSDavid Brazdil 		__fpsimd_save_fpexc32(vcpu);
33109cf57ebSDavid Brazdil 
332b96b0c5dSSuzuki K Poulose 	__debug_switch_to_host(vcpu);
33309cf57ebSDavid Brazdil 	/*
33409cf57ebSDavid Brazdil 	 * This must come after restoring the host sysregs, since a non-VHE
33509cf57ebSDavid Brazdil 	 * system may enable SPE here and make use of the TTBRs.
33609cf57ebSDavid Brazdil 	 */
337b96b0c5dSSuzuki K Poulose 	__debug_restore_host_buffers_nvhe(vcpu);
33809cf57ebSDavid Brazdil 
33909cf57ebSDavid Brazdil 	if (pmu_switch_needed)
34009cf57ebSDavid Brazdil 		__pmu_switch_to_host(host_ctxt);
34109cf57ebSDavid Brazdil 
34209cf57ebSDavid Brazdil 	/* Returning to host will clear PSR.I, remask PMR if needed */
34309cf57ebSDavid Brazdil 	if (system_uses_irq_prio_masking())
34409cf57ebSDavid Brazdil 		gic_write_pmr(GIC_PRIO_IRQOFF);
34509cf57ebSDavid Brazdil 
346a2e102e2SAndrew Scull 	host_ctxt->__hyp_running_vcpu = NULL;
347a2e102e2SAndrew Scull 
34809cf57ebSDavid Brazdil 	return exit_code;
34909cf57ebSDavid Brazdil }
35009cf57ebSDavid Brazdil 
3516a0259edSAndrew Scull void __noreturn hyp_panic(void)
35209cf57ebSDavid Brazdil {
35309cf57ebSDavid Brazdil 	u64 spsr = read_sysreg_el2(SYS_SPSR);
35409cf57ebSDavid Brazdil 	u64 elr = read_sysreg_el2(SYS_ELR);
35596d389caSRob Herring 	u64 par = read_sysreg_par();
3566a0259edSAndrew Scull 	struct kvm_cpu_context *host_ctxt;
3576a0259edSAndrew Scull 	struct kvm_vcpu *vcpu;
35809cf57ebSDavid Brazdil 
35914ef9d04SMarc Zyngier 	host_ctxt = &this_cpu_ptr(&kvm_host_data)->host_ctxt;
3606a0259edSAndrew Scull 	vcpu = host_ctxt->__hyp_running_vcpu;
3616a0259edSAndrew Scull 
362a2e102e2SAndrew Scull 	if (vcpu) {
36309cf57ebSDavid Brazdil 		__timer_disable_traps(vcpu);
36409cf57ebSDavid Brazdil 		__deactivate_traps(vcpu);
365501a67a2SAndrew Scull 		__load_host_stage2();
36609cf57ebSDavid Brazdil 		__sysreg_restore_state_nvhe(host_ctxt);
36709cf57ebSDavid Brazdil 	}
36809cf57ebSDavid Brazdil 
369c4b000c3SAndrew Scull 	__hyp_do_panic(host_ctxt, spsr, elr, par);
37009cf57ebSDavid Brazdil 	unreachable();
37109cf57ebSDavid Brazdil }
372e9ee186bSJames Morse 
373e9ee186bSJames Morse asmlinkage void kvm_unexpected_el2_exception(void)
374e9ee186bSJames Morse {
375e9ee186bSJames Morse 	return __kvm_unexpected_el2_exception();
376e9ee186bSJames Morse }
377