xref: /openbmc/linux/arch/arm64/kvm/handle_exit.c (revision c900529f3d9161bfde5cca0754f83b4d3c3e0220)
1caab277bSThomas Gleixner // SPDX-License-Identifier: GPL-2.0-only
2c4b1afd0SMarc Zyngier /*
3c4b1afd0SMarc Zyngier  * Copyright (C) 2012,2013 - ARM Ltd
4c4b1afd0SMarc Zyngier  * Author: Marc Zyngier <marc.zyngier@arm.com>
5c4b1afd0SMarc Zyngier  *
6c4b1afd0SMarc Zyngier  * Derived from arch/arm/kvm/handle_exit.c:
7c4b1afd0SMarc Zyngier  * Copyright (C) 2012 - Virtual Open Systems and Columbia University
8c4b1afd0SMarc Zyngier  * Author: Christoffer Dall <c.dall@virtualopensystems.com>
9c4b1afd0SMarc Zyngier  */
10c4b1afd0SMarc Zyngier 
11c4b1afd0SMarc Zyngier #include <linux/kvm.h>
12c4b1afd0SMarc Zyngier #include <linux/kvm_host.h>
13c6d01a94SMark Rutland 
14c6d01a94SMark Rutland #include <asm/esr.h>
150067df41SJames Morse #include <asm/exception.h>
169d8415d6SMarc Zyngier #include <asm/kvm_asm.h>
17c6d01a94SMark Rutland #include <asm/kvm_emulate.h>
18c4b1afd0SMarc Zyngier #include <asm/kvm_mmu.h>
1993c33702SJintack Lim #include <asm/kvm_nested.h>
20e70dce73SAlex Bennée #include <asm/debug-monitors.h>
21314a61dcSKalesh Singh #include <asm/stacktrace/nvhe.h>
223368bd80SJames Morse #include <asm/traps.h>
23c4b1afd0SMarc Zyngier 
2455009c6eSChristoffer Dall #include <kvm/arm_hypercalls.h>
2555009c6eSChristoffer Dall 
260d97f884SWei Huang #define CREATE_TRACE_POINTS
279ed24f4bSMarc Zyngier #include "trace_handle_exit.h"
280d97f884SWei Huang 
2974cc7e0cSTianjia Zhang typedef int (*exit_handle_fn)(struct kvm_vcpu *);
30c4b1afd0SMarc Zyngier 
kvm_handle_guest_serror(struct kvm_vcpu * vcpu,u64 esr)310b12620fSAlexandru Elisei static void kvm_handle_guest_serror(struct kvm_vcpu *vcpu, u64 esr)
323368bd80SJames Morse {
333368bd80SJames Morse 	if (!arm64_is_ras_serror(esr) || arm64_is_fatal_ras_serror(NULL, esr))
343368bd80SJames Morse 		kvm_inject_vabt(vcpu);
353368bd80SJames Morse }
363368bd80SJames Morse 
handle_hvc(struct kvm_vcpu * vcpu)3774cc7e0cSTianjia Zhang static int handle_hvc(struct kvm_vcpu *vcpu)
38c4b1afd0SMarc Zyngier {
39f6be563aSPavel Fedin 	trace_kvm_hvc_arm64(*vcpu_pc(vcpu), vcpu_get_reg(vcpu, 0),
400d97f884SWei Huang 			    kvm_vcpu_hvc_get_imm(vcpu));
41b19e6892SAmit Tomar 	vcpu->stat.hvc_exit_stat++;
420d97f884SWei Huang 
4393c33702SJintack Lim 	/* Forward hvc instructions to the virtual EL2 if the guest has EL2. */
4493c33702SJintack Lim 	if (vcpu_has_nv(vcpu)) {
4593c33702SJintack Lim 		if (vcpu_read_sys_reg(vcpu, HCR_EL2) & HCR_HCD)
4693c33702SJintack Lim 			kvm_inject_undefined(vcpu);
4793c33702SJintack Lim 		else
4893c33702SJintack Lim 			kvm_inject_nested_sync(vcpu, kvm_vcpu_get_esr(vcpu));
4993c33702SJintack Lim 
5093c33702SJintack Lim 		return 1;
5193c33702SJintack Lim 	}
5293c33702SJintack Lim 
5337c8e494SOliver Upton 	return kvm_smccc_call_handler(vcpu);
54e8e7fcc5SAnup Patel }
55e8e7fcc5SAnup Patel 
handle_smc(struct kvm_vcpu * vcpu)5674cc7e0cSTianjia Zhang static int handle_smc(struct kvm_vcpu *vcpu)
57c4b1afd0SMarc Zyngier {
58f5115e88SMarc Zyngier 	/*
59f5115e88SMarc Zyngier 	 * "If an SMC instruction executed at Non-secure EL1 is
60f5115e88SMarc Zyngier 	 * trapped to EL2 because HCR_EL2.TSC is 1, the exception is a
61f5115e88SMarc Zyngier 	 * Trap exception, not a Secure Monitor Call exception [...]"
62f5115e88SMarc Zyngier 	 *
63f5115e88SMarc Zyngier 	 * We need to advance the PC after the trap, as it would
64d824dff1SOliver Upton 	 * otherwise return to the same address. Furthermore, pre-incrementing
65d824dff1SOliver Upton 	 * the PC before potentially exiting to userspace maintains the same
66d824dff1SOliver Upton 	 * abstraction for both SMCs and HVCs.
67f5115e88SMarc Zyngier 	 */
68cdb5e02eSMarc Zyngier 	kvm_incr_pc(vcpu);
69c2d2e9b3SOliver Upton 
70c2d2e9b3SOliver Upton 	/*
71c2d2e9b3SOliver Upton 	 * SMCs with a nonzero immediate are reserved according to DEN0028E 2.9
72c2d2e9b3SOliver Upton 	 * "SMC and HVC immediate value".
73c2d2e9b3SOliver Upton 	 */
74c2d2e9b3SOliver Upton 	if (kvm_vcpu_hvc_get_imm(vcpu)) {
75c2d2e9b3SOliver Upton 		vcpu_set_reg(vcpu, 0, ~0UL);
76c4b1afd0SMarc Zyngier 		return 1;
77c4b1afd0SMarc Zyngier 	}
78c4b1afd0SMarc Zyngier 
7982e0191aSSuzuki K Poulose 	/*
80bd36b1a9SJintack Lim 	 * If imm is zero then it is likely an SMCCC call.
81bd36b1a9SJintack Lim 	 *
82bd36b1a9SJintack Lim 	 * Note that on ARMv8.3, even if EL3 is not implemented, SMC executed
83bd36b1a9SJintack Lim 	 * at Non-secure EL1 is trapped to EL2 if HCR_EL2.TSC==1, rather than
84bd36b1a9SJintack Lim 	 * being treated as UNDEFINED.
85bd36b1a9SJintack Lim 	 */
8637c8e494SOliver Upton 	return kvm_smccc_call_handler(vcpu);
87bd36b1a9SJintack Lim }
88bd36b1a9SJintack Lim 
89bd36b1a9SJintack Lim /*
9082e0191aSSuzuki K Poulose  * Guest access to FP/ASIMD registers are routed to this handler only
9182e0191aSSuzuki K Poulose  * when the system doesn't support FP/ASIMD.
9282e0191aSSuzuki K Poulose  */
handle_no_fpsimd(struct kvm_vcpu * vcpu)9374cc7e0cSTianjia Zhang static int handle_no_fpsimd(struct kvm_vcpu *vcpu)
9482e0191aSSuzuki K Poulose {
9582e0191aSSuzuki K Poulose 	kvm_inject_undefined(vcpu);
9682e0191aSSuzuki K Poulose 	return 1;
9782e0191aSSuzuki K Poulose }
9882e0191aSSuzuki K Poulose 
99c4b1afd0SMarc Zyngier /**
100d241aac7SMarc Zyngier  * kvm_handle_wfx - handle a wait-for-interrupts or wait-for-event
101d241aac7SMarc Zyngier  *		    instruction executed by a guest
102d241aac7SMarc Zyngier  *
103c4b1afd0SMarc Zyngier  * @vcpu:	the vcpu pointer
104c4b1afd0SMarc Zyngier  *
105a3fb5965SMarc Zyngier  * WFE[T]: Yield the CPU and come back to this vcpu when the scheduler
106d241aac7SMarc Zyngier  * decides to.
10791b99ea7SSean Christopherson  * WFI: Simply call kvm_vcpu_halt(), which will halt execution of
108c4b1afd0SMarc Zyngier  * world-switches and schedule other host processes until there is an
109c4b1afd0SMarc Zyngier  * incoming IRQ or FIQ to the VM.
11089f5074cSMarc Zyngier  * WFIT: Same as WFI, with a timed wakeup implemented as a background timer
111a3fb5965SMarc Zyngier  *
112a3fb5965SMarc Zyngier  * WF{I,E}T can immediately return if the deadline has already expired.
113c4b1afd0SMarc Zyngier  */
kvm_handle_wfx(struct kvm_vcpu * vcpu)11474cc7e0cSTianjia Zhang static int kvm_handle_wfx(struct kvm_vcpu *vcpu)
115c4b1afd0SMarc Zyngier {
11689f5074cSMarc Zyngier 	u64 esr = kvm_vcpu_get_esr(vcpu);
11789f5074cSMarc Zyngier 
11889f5074cSMarc Zyngier 	if (esr & ESR_ELx_WFx_ISS_WFE) {
1190d97f884SWei Huang 		trace_kvm_wfx_arm64(*vcpu_pc(vcpu), true);
120b19e6892SAmit Tomar 		vcpu->stat.wfe_exit_stat++;
1210d97f884SWei Huang 	} else {
1220d97f884SWei Huang 		trace_kvm_wfx_arm64(*vcpu_pc(vcpu), false);
123b19e6892SAmit Tomar 		vcpu->stat.wfi_exit_stat++;
1240d97f884SWei Huang 	}
125d241aac7SMarc Zyngier 
126a3fb5965SMarc Zyngier 	if (esr & ESR_ELx_WFx_ISS_WFxT) {
127a3fb5965SMarc Zyngier 		if (esr & ESR_ELx_WFx_ISS_RV) {
128a3fb5965SMarc Zyngier 			u64 val, now;
129a3fb5965SMarc Zyngier 
130a3fb5965SMarc Zyngier 			now = kvm_arm_timer_get_reg(vcpu, KVM_REG_ARM_TIMER_CNT);
131a3fb5965SMarc Zyngier 			val = vcpu_get_reg(vcpu, kvm_vcpu_sys_get_rt(vcpu));
132a3fb5965SMarc Zyngier 
133a3fb5965SMarc Zyngier 			if (now >= val)
134a3fb5965SMarc Zyngier 				goto out;
135a3fb5965SMarc Zyngier 		} else {
136a3fb5965SMarc Zyngier 			/* Treat WFxT as WFx if RN is invalid */
137a3fb5965SMarc Zyngier 			esr &= ~ESR_ELx_WFx_ISS_WFxT;
138a3fb5965SMarc Zyngier 		}
139a3fb5965SMarc Zyngier 	}
140a3fb5965SMarc Zyngier 
141a3fb5965SMarc Zyngier 	if (esr & ESR_ELx_WFx_ISS_WFE) {
142a3fb5965SMarc Zyngier 		kvm_vcpu_on_spin(vcpu, vcpu_mode_priv(vcpu));
143a3fb5965SMarc Zyngier 	} else {
144a3fb5965SMarc Zyngier 		if (esr & ESR_ELx_WFx_ISS_WFxT)
145eebc538dSMarc Zyngier 			vcpu_set_flag(vcpu, IN_WFIT);
146a3fb5965SMarc Zyngier 
147a3fb5965SMarc Zyngier 		kvm_vcpu_wfi(vcpu);
148a3fb5965SMarc Zyngier 	}
149a3fb5965SMarc Zyngier out:
150cdb5e02eSMarc Zyngier 	kvm_incr_pc(vcpu);
15105e0127fSChristoffer Dall 
152c4b1afd0SMarc Zyngier 	return 1;
153c4b1afd0SMarc Zyngier }
154c4b1afd0SMarc Zyngier 
1554bd611caSAlex Bennée /**
1564bd611caSAlex Bennée  * kvm_handle_guest_debug - handle a debug exception instruction
1574bd611caSAlex Bennée  *
1584bd611caSAlex Bennée  * @vcpu:	the vcpu pointer
1594bd611caSAlex Bennée  *
1604bd611caSAlex Bennée  * We route all debug exceptions through the same handler. If both the
1614bd611caSAlex Bennée  * guest and host are using the same debug facilities it will be up to
1624bd611caSAlex Bennée  * userspace to re-inject the correct exception for guest delivery.
1634bd611caSAlex Bennée  *
1648ce8a6fcSRaghavendra Rao Ananta  * @return: 0 (while setting vcpu->run->exit_reason)
1654bd611caSAlex Bennée  */
kvm_handle_guest_debug(struct kvm_vcpu * vcpu)16674cc7e0cSTianjia Zhang static int kvm_handle_guest_debug(struct kvm_vcpu *vcpu)
1674bd611caSAlex Bennée {
16874cc7e0cSTianjia Zhang 	struct kvm_run *run = vcpu->run;
1690b12620fSAlexandru Elisei 	u64 esr = kvm_vcpu_get_esr(vcpu);
1704bd611caSAlex Bennée 
1714bd611caSAlex Bennée 	run->exit_reason = KVM_EXIT_DEBUG;
1720b12620fSAlexandru Elisei 	run->debug.arch.hsr = lower_32_bits(esr);
17318f3976fSAlexandru Elisei 	run->debug.arch.hsr_high = upper_32_bits(esr);
17418f3976fSAlexandru Elisei 	run->flags = KVM_DEBUG_ARCH_HSR_HIGH_VALID;
1754bd611caSAlex Bennée 
176370531d1SReiji Watanabe 	switch (ESR_ELx_EC(esr)) {
177370531d1SReiji Watanabe 	case ESR_ELx_EC_WATCHPT_LOW:
178834bf887SAlex Bennée 		run->debug.arch.far = vcpu->arch.fault.far_el2;
179370531d1SReiji Watanabe 		break;
180370531d1SReiji Watanabe 	case ESR_ELx_EC_SOFTSTP_LOW:
181370531d1SReiji Watanabe 		vcpu_clear_flag(vcpu, DBG_SS_ACTIVE_PENDING);
182370531d1SReiji Watanabe 		break;
183370531d1SReiji Watanabe 	}
1844bd611caSAlex Bennée 
1858ce8a6fcSRaghavendra Rao Ananta 	return 0;
1864bd611caSAlex Bennée }
1874bd611caSAlex Bennée 
kvm_handle_unknown_ec(struct kvm_vcpu * vcpu)18874cc7e0cSTianjia Zhang static int kvm_handle_unknown_ec(struct kvm_vcpu *vcpu)
189ba4dd156SMark Rutland {
1900b12620fSAlexandru Elisei 	u64 esr = kvm_vcpu_get_esr(vcpu);
191ba4dd156SMark Rutland 
1920b12620fSAlexandru Elisei 	kvm_pr_unimpl("Unknown exception class: esr: %#016llx -- %s\n",
1933a949f4cSGavin Shan 		      esr, esr_get_class_string(esr));
194ba4dd156SMark Rutland 
195ba4dd156SMark Rutland 	kvm_inject_undefined(vcpu);
196ba4dd156SMark Rutland 	return 1;
197ba4dd156SMark Rutland }
198ba4dd156SMark Rutland 
199d658220aSMark Brown /*
200d658220aSMark Brown  * Guest access to SVE registers should be routed to this handler only
201d658220aSMark Brown  * when the system doesn't support SVE.
202d658220aSMark Brown  */
handle_sve(struct kvm_vcpu * vcpu)20374cc7e0cSTianjia Zhang static int handle_sve(struct kvm_vcpu *vcpu)
204aac45ffdSDave Martin {
205aac45ffdSDave Martin 	kvm_inject_undefined(vcpu);
206aac45ffdSDave Martin 	return 1;
207aac45ffdSDave Martin }
208aac45ffdSDave Martin 
209384b40caSMark Rutland /*
210a1ee8abbSMark Rutland  * Guest usage of a ptrauth instruction (which the guest EL1 did not turn into
21129eb5a3cSMarc Zyngier  * a NOP). If we get here, it is that we didn't fixup ptrauth on exit, and all
21229eb5a3cSMarc Zyngier  * that we can do is give the guest an UNDEF.
213a1ee8abbSMark Rutland  */
kvm_handle_ptrauth(struct kvm_vcpu * vcpu)21474cc7e0cSTianjia Zhang static int kvm_handle_ptrauth(struct kvm_vcpu *vcpu)
215a1ee8abbSMark Rutland {
21629eb5a3cSMarc Zyngier 	kvm_inject_undefined(vcpu);
217a1ee8abbSMark Rutland 	return 1;
218a1ee8abbSMark Rutland }
219a1ee8abbSMark Rutland 
kvm_handle_eret(struct kvm_vcpu * vcpu)2206898a55cSChristoffer Dall static int kvm_handle_eret(struct kvm_vcpu *vcpu)
2216898a55cSChristoffer Dall {
2226898a55cSChristoffer Dall 	if (kvm_vcpu_get_esr(vcpu) & ESR_ELx_ERET_ISS_ERET)
2236898a55cSChristoffer Dall 		return kvm_handle_ptrauth(vcpu);
2246898a55cSChristoffer Dall 
225*ea3b27d8SMarc Zyngier 	/*
226*ea3b27d8SMarc Zyngier 	 * If we got here, two possibilities:
227*ea3b27d8SMarc Zyngier 	 *
228*ea3b27d8SMarc Zyngier 	 * - the guest is in EL2, and we need to fully emulate ERET
229*ea3b27d8SMarc Zyngier 	 *
230*ea3b27d8SMarc Zyngier 	 * - the guest is in EL1, and we need to reinject the
231*ea3b27d8SMarc Zyngier          *   exception into the L1 hypervisor.
232*ea3b27d8SMarc Zyngier 	 *
233*ea3b27d8SMarc Zyngier 	 * If KVM ever traps ERET for its own use, we'll have to
234*ea3b27d8SMarc Zyngier 	 * revisit this.
235*ea3b27d8SMarc Zyngier 	 */
236*ea3b27d8SMarc Zyngier 	if (is_hyp_ctxt(vcpu))
2376898a55cSChristoffer Dall 		kvm_emulate_nested_eret(vcpu);
238*ea3b27d8SMarc Zyngier 	else
239*ea3b27d8SMarc Zyngier 		kvm_inject_nested_sync(vcpu, kvm_vcpu_get_esr(vcpu));
240*ea3b27d8SMarc Zyngier 
2416898a55cSChristoffer Dall 	return 1;
2426898a55cSChristoffer Dall }
2436898a55cSChristoffer Dall 
handle_svc(struct kvm_vcpu * vcpu)244a77b31dcSMarc Zyngier static int handle_svc(struct kvm_vcpu *vcpu)
245a77b31dcSMarc Zyngier {
246a77b31dcSMarc Zyngier 	/*
247a77b31dcSMarc Zyngier 	 * So far, SVC traps only for NV via HFGITR_EL2. A SVC from a
248a77b31dcSMarc Zyngier 	 * 32bit guest would be caught by vpcu_mode_is_bad_32bit(), so
249a77b31dcSMarc Zyngier 	 * we should only have to deal with a 64 bit exception.
250a77b31dcSMarc Zyngier 	 */
251a77b31dcSMarc Zyngier 	kvm_inject_nested_sync(vcpu, kvm_vcpu_get_esr(vcpu));
252a77b31dcSMarc Zyngier 	return 1;
253a77b31dcSMarc Zyngier }
254a77b31dcSMarc Zyngier 
255c4b1afd0SMarc Zyngier static exit_handle_fn arm_exit_handlers[] = {
256ba4dd156SMark Rutland 	[0 ... ESR_ELx_EC_MAX]	= kvm_handle_unknown_ec,
257c6d01a94SMark Rutland 	[ESR_ELx_EC_WFx]	= kvm_handle_wfx,
258c6d01a94SMark Rutland 	[ESR_ELx_EC_CP15_32]	= kvm_handle_cp15_32,
259c6d01a94SMark Rutland 	[ESR_ELx_EC_CP15_64]	= kvm_handle_cp15_64,
260c6d01a94SMark Rutland 	[ESR_ELx_EC_CP14_MR]	= kvm_handle_cp14_32,
261c6d01a94SMark Rutland 	[ESR_ELx_EC_CP14_LS]	= kvm_handle_cp14_load_store,
2629369bc5cSOliver Upton 	[ESR_ELx_EC_CP10_ID]	= kvm_handle_cp10_id,
263c6d01a94SMark Rutland 	[ESR_ELx_EC_CP14_64]	= kvm_handle_cp14_64,
264c6d01a94SMark Rutland 	[ESR_ELx_EC_HVC32]	= handle_hvc,
265c6d01a94SMark Rutland 	[ESR_ELx_EC_SMC32]	= handle_smc,
266c6d01a94SMark Rutland 	[ESR_ELx_EC_HVC64]	= handle_hvc,
267c6d01a94SMark Rutland 	[ESR_ELx_EC_SMC64]	= handle_smc,
268a77b31dcSMarc Zyngier 	[ESR_ELx_EC_SVC64]	= handle_svc,
269c6d01a94SMark Rutland 	[ESR_ELx_EC_SYS64]	= kvm_handle_sys_reg,
270aac45ffdSDave Martin 	[ESR_ELx_EC_SVE]	= handle_sve,
2716898a55cSChristoffer Dall 	[ESR_ELx_EC_ERET]	= kvm_handle_eret,
272c6d01a94SMark Rutland 	[ESR_ELx_EC_IABT_LOW]	= kvm_handle_guest_abort,
273c6d01a94SMark Rutland 	[ESR_ELx_EC_DABT_LOW]	= kvm_handle_guest_abort,
274337b99bfSAlex Bennée 	[ESR_ELx_EC_SOFTSTP_LOW]= kvm_handle_guest_debug,
275834bf887SAlex Bennée 	[ESR_ELx_EC_WATCHPT_LOW]= kvm_handle_guest_debug,
276834bf887SAlex Bennée 	[ESR_ELx_EC_BREAKPT_LOW]= kvm_handle_guest_debug,
2774bd611caSAlex Bennée 	[ESR_ELx_EC_BKPT32]	= kvm_handle_guest_debug,
2784bd611caSAlex Bennée 	[ESR_ELx_EC_BRK64]	= kvm_handle_guest_debug,
27982e0191aSSuzuki K Poulose 	[ESR_ELx_EC_FP_ASIMD]	= handle_no_fpsimd,
280a1ee8abbSMark Rutland 	[ESR_ELx_EC_PAC]	= kvm_handle_ptrauth,
281c4b1afd0SMarc Zyngier };
282c4b1afd0SMarc Zyngier 
kvm_get_exit_handler(struct kvm_vcpu * vcpu)283c4b1afd0SMarc Zyngier static exit_handle_fn kvm_get_exit_handler(struct kvm_vcpu *vcpu)
284c4b1afd0SMarc Zyngier {
2850b12620fSAlexandru Elisei 	u64 esr = kvm_vcpu_get_esr(vcpu);
2863a949f4cSGavin Shan 	u8 esr_ec = ESR_ELx_EC(esr);
287c4b1afd0SMarc Zyngier 
2883a949f4cSGavin Shan 	return arm_exit_handlers[esr_ec];
289c4b1afd0SMarc Zyngier }
290c4b1afd0SMarc Zyngier 
291c4b1afd0SMarc Zyngier /*
2927226bc2eSAlex Bennée  * We may be single-stepping an emulated instruction. If the emulation
2937226bc2eSAlex Bennée  * has been completed in the kernel, we can return to userspace with a
2947226bc2eSAlex Bennée  * KVM_EXIT_DEBUG, otherwise userspace needs to complete its
2957226bc2eSAlex Bennée  * emulation first.
2967226bc2eSAlex Bennée  */
handle_trap_exceptions(struct kvm_vcpu * vcpu)29774cc7e0cSTianjia Zhang static int handle_trap_exceptions(struct kvm_vcpu *vcpu)
2987226bc2eSAlex Bennée {
2997226bc2eSAlex Bennée 	int handled;
3007226bc2eSAlex Bennée 
3017226bc2eSAlex Bennée 	/*
3027226bc2eSAlex Bennée 	 * See ARM ARM B1.14.1: "Hyp traps on instructions
3037226bc2eSAlex Bennée 	 * that fail their condition code check"
3047226bc2eSAlex Bennée 	 */
3057226bc2eSAlex Bennée 	if (!kvm_condition_valid(vcpu)) {
306cdb5e02eSMarc Zyngier 		kvm_incr_pc(vcpu);
3077226bc2eSAlex Bennée 		handled = 1;
3087226bc2eSAlex Bennée 	} else {
3097226bc2eSAlex Bennée 		exit_handle_fn exit_handler;
3107226bc2eSAlex Bennée 
3117226bc2eSAlex Bennée 		exit_handler = kvm_get_exit_handler(vcpu);
31274cc7e0cSTianjia Zhang 		handled = exit_handler(vcpu);
3137226bc2eSAlex Bennée 	}
3147226bc2eSAlex Bennée 
3157226bc2eSAlex Bennée 	return handled;
3167226bc2eSAlex Bennée }
3177226bc2eSAlex Bennée 
3187226bc2eSAlex Bennée /*
319c4b1afd0SMarc Zyngier  * Return > 0 to return to guest, < 0 on error, 0 (and set exit_reason) on
320c4b1afd0SMarc Zyngier  * proper exit to userspace.
321c4b1afd0SMarc Zyngier  */
handle_exit(struct kvm_vcpu * vcpu,int exception_index)32274cc7e0cSTianjia Zhang int handle_exit(struct kvm_vcpu *vcpu, int exception_index)
323c4b1afd0SMarc Zyngier {
32474cc7e0cSTianjia Zhang 	struct kvm_run *run = vcpu->run;
32574cc7e0cSTianjia Zhang 
3261229630aSJames Morse 	if (ARM_SERROR_PENDING(exception_index)) {
3271229630aSJames Morse 		/*
3281229630aSJames Morse 		 * The SError is handled by handle_exit_early(). If the guest
3291229630aSJames Morse 		 * survives it will re-execute the original instruction.
3301229630aSJames Morse 		 */
3311229630aSJames Morse 		return 1;
3321229630aSJames Morse 	}
3331229630aSJames Morse 
334ddb3d07cSMarc Zyngier 	exception_index = ARM_EXCEPTION_CODE(exception_index);
335ddb3d07cSMarc Zyngier 
336c4b1afd0SMarc Zyngier 	switch (exception_index) {
337c4b1afd0SMarc Zyngier 	case ARM_EXCEPTION_IRQ:
338c4b1afd0SMarc Zyngier 		return 1;
3390215a6e6SMarc Zyngier 	case ARM_EXCEPTION_EL1_SERROR:
3400215a6e6SMarc Zyngier 		return 1;
341c4b1afd0SMarc Zyngier 	case ARM_EXCEPTION_TRAP:
34274cc7e0cSTianjia Zhang 		return handle_trap_exceptions(vcpu);
343c94b0cf2SJames Morse 	case ARM_EXCEPTION_HYP_GONE:
344c94b0cf2SJames Morse 		/*
345c94b0cf2SJames Morse 		 * EL2 has been reset to the hyp-stub. This happens when a guest
34621ea4578SJulia Lawall 		 * is pre-emptied by kvm_reboot()'s shutdown call.
347c94b0cf2SJames Morse 		 */
348c94b0cf2SJames Morse 		run->exit_reason = KVM_EXIT_FAIL_ENTRY;
349c94b0cf2SJames Morse 		return 0;
350e4e11cc0SChristoffer Dall 	case ARM_EXCEPTION_IL:
351e4e11cc0SChristoffer Dall 		/*
352e4e11cc0SChristoffer Dall 		 * We attempted an illegal exception return.  Guest state must
353e4e11cc0SChristoffer Dall 		 * have been corrupted somehow.  Give up.
354e4e11cc0SChristoffer Dall 		 */
355e4e11cc0SChristoffer Dall 		run->exit_reason = KVM_EXIT_FAIL_ENTRY;
356e4e11cc0SChristoffer Dall 		return -EINVAL;
357c4b1afd0SMarc Zyngier 	default:
358c4b1afd0SMarc Zyngier 		kvm_pr_unimpl("Unsupported exception type: %d",
359c4b1afd0SMarc Zyngier 			      exception_index);
360c4b1afd0SMarc Zyngier 		run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
361c4b1afd0SMarc Zyngier 		return 0;
362c4b1afd0SMarc Zyngier 	}
363c4b1afd0SMarc Zyngier }
3643368bd80SJames Morse 
3653368bd80SJames Morse /* For exit types that need handling before we can be preempted */
handle_exit_early(struct kvm_vcpu * vcpu,int exception_index)36674cc7e0cSTianjia Zhang void handle_exit_early(struct kvm_vcpu *vcpu, int exception_index)
3673368bd80SJames Morse {
3680067df41SJames Morse 	if (ARM_SERROR_PENDING(exception_index)) {
3690067df41SJames Morse 		if (this_cpu_has_cap(ARM64_HAS_RAS_EXTN)) {
3700067df41SJames Morse 			u64 disr = kvm_vcpu_get_disr(vcpu);
3710067df41SJames Morse 
3720067df41SJames Morse 			kvm_handle_guest_serror(vcpu, disr_to_esr(disr));
3730067df41SJames Morse 		} else {
3740067df41SJames Morse 			kvm_inject_vabt(vcpu);
3750067df41SJames Morse 		}
3760067df41SJames Morse 
3770067df41SJames Morse 		return;
3780067df41SJames Morse 	}
3790067df41SJames Morse 
3803368bd80SJames Morse 	exception_index = ARM_EXCEPTION_CODE(exception_index);
3813368bd80SJames Morse 
3823368bd80SJames Morse 	if (exception_index == ARM_EXCEPTION_EL1_SERROR)
3833a949f4cSGavin Shan 		kvm_handle_guest_serror(vcpu, kvm_vcpu_get_esr(vcpu));
3843368bd80SJames Morse }
385aec0fae6SAndrew Scull 
nvhe_hyp_panic_handler(u64 esr,u64 spsr,u64 elr_virt,u64 elr_phys,u64 par,uintptr_t vcpu,u64 far,u64 hpfar)386ccac9697SWill Deacon void __noreturn __cold nvhe_hyp_panic_handler(u64 esr, u64 spsr,
387ccac9697SWill Deacon 					      u64 elr_virt, u64 elr_phys,
388aec0fae6SAndrew Scull 					      u64 par, uintptr_t vcpu,
389aec0fae6SAndrew Scull 					      u64 far, u64 hpfar) {
390ccac9697SWill Deacon 	u64 elr_in_kimg = __phys_to_kimg(elr_phys);
391ccac9697SWill Deacon 	u64 hyp_offset = elr_in_kimg - kaslr_offset() - elr_virt;
392aec0fae6SAndrew Scull 	u64 mode = spsr & PSR_MODE_MASK;
3936ccf9cb5SKalesh Singh 	u64 panic_addr = elr_virt + hyp_offset;
394aec0fae6SAndrew Scull 
395aec0fae6SAndrew Scull 	if (mode != PSR_MODE_EL2t && mode != PSR_MODE_EL2h) {
396aec0fae6SAndrew Scull 		kvm_err("Invalid host exception to nVHE hyp!\n");
397aec0fae6SAndrew Scull 	} else if (ESR_ELx_EC(esr) == ESR_ELx_EC_BRK64 &&
398aec0fae6SAndrew Scull 		   (esr & ESR_ELx_BRK64_ISS_COMMENT_MASK) == BUG_BRK_IMM) {
399aec0fae6SAndrew Scull 		const char *file = NULL;
400aec0fae6SAndrew Scull 		unsigned int line = 0;
401aec0fae6SAndrew Scull 
402aec0fae6SAndrew Scull 		/* All hyp bugs, including warnings, are treated as fatal. */
403ccac9697SWill Deacon 		if (!is_protected_kvm_enabled() ||
404ccac9697SWill Deacon 		    IS_ENABLED(CONFIG_NVHE_EL2_DEBUG)) {
405ccac9697SWill Deacon 			struct bug_entry *bug = find_bug(elr_in_kimg);
406ccac9697SWill Deacon 
407aec0fae6SAndrew Scull 			if (bug)
408aec0fae6SAndrew Scull 				bug_get_file_line(bug, &file, &line);
409ccac9697SWill Deacon 		}
410aec0fae6SAndrew Scull 
411aec0fae6SAndrew Scull 		if (file)
412aec0fae6SAndrew Scull 			kvm_err("nVHE hyp BUG at: %s:%u!\n", file, line);
413aec0fae6SAndrew Scull 		else
4146ccf9cb5SKalesh Singh 			kvm_err("nVHE hyp BUG at: [<%016llx>] %pB!\n", panic_addr,
415ed6313a9SKalesh Singh 					(void *)(panic_addr + kaslr_offset()));
416aec0fae6SAndrew Scull 	} else {
4176ccf9cb5SKalesh Singh 		kvm_err("nVHE hyp panic at: [<%016llx>] %pB!\n", panic_addr,
418ed6313a9SKalesh Singh 				(void *)(panic_addr + kaslr_offset()));
419aec0fae6SAndrew Scull 	}
420aec0fae6SAndrew Scull 
421314a61dcSKalesh Singh 	/* Dump the nVHE hypervisor backtrace */
422314a61dcSKalesh Singh 	kvm_nvhe_dump_backtrace(hyp_offset);
423314a61dcSKalesh Singh 
424aec0fae6SAndrew Scull 	/*
425aec0fae6SAndrew Scull 	 * Hyp has panicked and we're going to handle that by panicking the
426aec0fae6SAndrew Scull 	 * kernel. The kernel offset will be revealed in the panic so we're
427aec0fae6SAndrew Scull 	 * also safe to reveal the hyp offset as a debugging aid for translating
428aec0fae6SAndrew Scull 	 * hyp VAs to vmlinux addresses.
429aec0fae6SAndrew Scull 	 */
430aec0fae6SAndrew Scull 	kvm_err("Hyp Offset: 0x%llx\n", hyp_offset);
431aec0fae6SAndrew Scull 
4320b12620fSAlexandru Elisei 	panic("HYP panic:\nPS:%08llx PC:%016llx ESR:%016llx\nFAR:%016llx HPFAR:%016llx PAR:%016llx\nVCPU:%016lx\n",
433ccac9697SWill Deacon 	      spsr, elr_virt, esr, far, hpfar, par, vcpu);
434aec0fae6SAndrew Scull }
435