xref: /openbmc/linux/arch/arm64/kvm/handle_exit.c (revision df202b452fe6c6d6f1351bad485e2367ef1e644e)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (C) 2012,2013 - ARM Ltd
4  * Author: Marc Zyngier <marc.zyngier@arm.com>
5  *
6  * Derived from arch/arm/kvm/handle_exit.c:
7  * Copyright (C) 2012 - Virtual Open Systems and Columbia University
8  * Author: Christoffer Dall <c.dall@virtualopensystems.com>
9  */
10 
11 #include <linux/kvm.h>
12 #include <linux/kvm_host.h>
13 
14 #include <asm/esr.h>
15 #include <asm/exception.h>
16 #include <asm/kvm_asm.h>
17 #include <asm/kvm_emulate.h>
18 #include <asm/kvm_mmu.h>
19 #include <asm/debug-monitors.h>
20 #include <asm/traps.h>
21 
22 #include <kvm/arm_hypercalls.h>
23 
24 #define CREATE_TRACE_POINTS
25 #include "trace_handle_exit.h"
26 
27 typedef int (*exit_handle_fn)(struct kvm_vcpu *);
28 
29 static void kvm_handle_guest_serror(struct kvm_vcpu *vcpu, u64 esr)
30 {
31 	if (!arm64_is_ras_serror(esr) || arm64_is_fatal_ras_serror(NULL, esr))
32 		kvm_inject_vabt(vcpu);
33 }
34 
35 static int handle_hvc(struct kvm_vcpu *vcpu)
36 {
37 	int ret;
38 
39 	trace_kvm_hvc_arm64(*vcpu_pc(vcpu), vcpu_get_reg(vcpu, 0),
40 			    kvm_vcpu_hvc_get_imm(vcpu));
41 	vcpu->stat.hvc_exit_stat++;
42 
43 	ret = kvm_hvc_call_handler(vcpu);
44 	if (ret < 0) {
45 		vcpu_set_reg(vcpu, 0, ~0UL);
46 		return 1;
47 	}
48 
49 	return ret;
50 }
51 
52 static int handle_smc(struct kvm_vcpu *vcpu)
53 {
54 	/*
55 	 * "If an SMC instruction executed at Non-secure EL1 is
56 	 * trapped to EL2 because HCR_EL2.TSC is 1, the exception is a
57 	 * Trap exception, not a Secure Monitor Call exception [...]"
58 	 *
59 	 * We need to advance the PC after the trap, as it would
60 	 * otherwise return to the same address...
61 	 */
62 	vcpu_set_reg(vcpu, 0, ~0UL);
63 	kvm_incr_pc(vcpu);
64 	return 1;
65 }
66 
67 /*
68  * Guest access to FP/ASIMD registers are routed to this handler only
69  * when the system doesn't support FP/ASIMD.
70  */
71 static int handle_no_fpsimd(struct kvm_vcpu *vcpu)
72 {
73 	kvm_inject_undefined(vcpu);
74 	return 1;
75 }
76 
77 /**
78  * kvm_handle_wfx - handle a wait-for-interrupts or wait-for-event
79  *		    instruction executed by a guest
80  *
81  * @vcpu:	the vcpu pointer
82  *
83  * WFE: Yield the CPU and come back to this vcpu when the scheduler
84  * decides to.
85  * WFI: Simply call kvm_vcpu_halt(), which will halt execution of
86  * world-switches and schedule other host processes until there is an
87  * incoming IRQ or FIQ to the VM.
88  */
89 static int kvm_handle_wfx(struct kvm_vcpu *vcpu)
90 {
91 	if (kvm_vcpu_get_esr(vcpu) & ESR_ELx_WFx_ISS_WFE) {
92 		trace_kvm_wfx_arm64(*vcpu_pc(vcpu), true);
93 		vcpu->stat.wfe_exit_stat++;
94 		kvm_vcpu_on_spin(vcpu, vcpu_mode_priv(vcpu));
95 	} else {
96 		trace_kvm_wfx_arm64(*vcpu_pc(vcpu), false);
97 		vcpu->stat.wfi_exit_stat++;
98 		kvm_vcpu_wfi(vcpu);
99 	}
100 
101 	kvm_incr_pc(vcpu);
102 
103 	return 1;
104 }
105 
106 /**
107  * kvm_handle_guest_debug - handle a debug exception instruction
108  *
109  * @vcpu:	the vcpu pointer
110  *
111  * We route all debug exceptions through the same handler. If both the
112  * guest and host are using the same debug facilities it will be up to
113  * userspace to re-inject the correct exception for guest delivery.
114  *
115  * @return: 0 (while setting vcpu->run->exit_reason)
116  */
117 static int kvm_handle_guest_debug(struct kvm_vcpu *vcpu)
118 {
119 	struct kvm_run *run = vcpu->run;
120 	u64 esr = kvm_vcpu_get_esr(vcpu);
121 
122 	run->exit_reason = KVM_EXIT_DEBUG;
123 	run->debug.arch.hsr = lower_32_bits(esr);
124 	run->debug.arch.hsr_high = upper_32_bits(esr);
125 	run->flags = KVM_DEBUG_ARCH_HSR_HIGH_VALID;
126 
127 	if (ESR_ELx_EC(esr) == ESR_ELx_EC_WATCHPT_LOW)
128 		run->debug.arch.far = vcpu->arch.fault.far_el2;
129 
130 	return 0;
131 }
132 
133 static int kvm_handle_unknown_ec(struct kvm_vcpu *vcpu)
134 {
135 	u64 esr = kvm_vcpu_get_esr(vcpu);
136 
137 	kvm_pr_unimpl("Unknown exception class: esr: %#016llx -- %s\n",
138 		      esr, esr_get_class_string(esr));
139 
140 	kvm_inject_undefined(vcpu);
141 	return 1;
142 }
143 
144 /*
145  * Guest access to SVE registers should be routed to this handler only
146  * when the system doesn't support SVE.
147  */
148 static int handle_sve(struct kvm_vcpu *vcpu)
149 {
150 	kvm_inject_undefined(vcpu);
151 	return 1;
152 }
153 
154 /*
155  * Guest usage of a ptrauth instruction (which the guest EL1 did not turn into
156  * a NOP). If we get here, it is that we didn't fixup ptrauth on exit, and all
157  * that we can do is give the guest an UNDEF.
158  */
159 static int kvm_handle_ptrauth(struct kvm_vcpu *vcpu)
160 {
161 	kvm_inject_undefined(vcpu);
162 	return 1;
163 }
164 
165 static exit_handle_fn arm_exit_handlers[] = {
166 	[0 ... ESR_ELx_EC_MAX]	= kvm_handle_unknown_ec,
167 	[ESR_ELx_EC_WFx]	= kvm_handle_wfx,
168 	[ESR_ELx_EC_CP15_32]	= kvm_handle_cp15_32,
169 	[ESR_ELx_EC_CP15_64]	= kvm_handle_cp15_64,
170 	[ESR_ELx_EC_CP14_MR]	= kvm_handle_cp14_32,
171 	[ESR_ELx_EC_CP14_LS]	= kvm_handle_cp14_load_store,
172 	[ESR_ELx_EC_CP14_64]	= kvm_handle_cp14_64,
173 	[ESR_ELx_EC_HVC32]	= handle_hvc,
174 	[ESR_ELx_EC_SMC32]	= handle_smc,
175 	[ESR_ELx_EC_HVC64]	= handle_hvc,
176 	[ESR_ELx_EC_SMC64]	= handle_smc,
177 	[ESR_ELx_EC_SYS64]	= kvm_handle_sys_reg,
178 	[ESR_ELx_EC_SVE]	= handle_sve,
179 	[ESR_ELx_EC_IABT_LOW]	= kvm_handle_guest_abort,
180 	[ESR_ELx_EC_DABT_LOW]	= kvm_handle_guest_abort,
181 	[ESR_ELx_EC_SOFTSTP_LOW]= kvm_handle_guest_debug,
182 	[ESR_ELx_EC_WATCHPT_LOW]= kvm_handle_guest_debug,
183 	[ESR_ELx_EC_BREAKPT_LOW]= kvm_handle_guest_debug,
184 	[ESR_ELx_EC_BKPT32]	= kvm_handle_guest_debug,
185 	[ESR_ELx_EC_BRK64]	= kvm_handle_guest_debug,
186 	[ESR_ELx_EC_FP_ASIMD]	= handle_no_fpsimd,
187 	[ESR_ELx_EC_PAC]	= kvm_handle_ptrauth,
188 };
189 
190 static exit_handle_fn kvm_get_exit_handler(struct kvm_vcpu *vcpu)
191 {
192 	u64 esr = kvm_vcpu_get_esr(vcpu);
193 	u8 esr_ec = ESR_ELx_EC(esr);
194 
195 	return arm_exit_handlers[esr_ec];
196 }
197 
198 /*
199  * We may be single-stepping an emulated instruction. If the emulation
200  * has been completed in the kernel, we can return to userspace with a
201  * KVM_EXIT_DEBUG, otherwise userspace needs to complete its
202  * emulation first.
203  */
204 static int handle_trap_exceptions(struct kvm_vcpu *vcpu)
205 {
206 	int handled;
207 
208 	/*
209 	 * See ARM ARM B1.14.1: "Hyp traps on instructions
210 	 * that fail their condition code check"
211 	 */
212 	if (!kvm_condition_valid(vcpu)) {
213 		kvm_incr_pc(vcpu);
214 		handled = 1;
215 	} else {
216 		exit_handle_fn exit_handler;
217 
218 		exit_handler = kvm_get_exit_handler(vcpu);
219 		handled = exit_handler(vcpu);
220 	}
221 
222 	return handled;
223 }
224 
225 /*
226  * Return > 0 to return to guest, < 0 on error, 0 (and set exit_reason) on
227  * proper exit to userspace.
228  */
229 int handle_exit(struct kvm_vcpu *vcpu, int exception_index)
230 {
231 	struct kvm_run *run = vcpu->run;
232 
233 	if (ARM_SERROR_PENDING(exception_index)) {
234 		/*
235 		 * The SError is handled by handle_exit_early(). If the guest
236 		 * survives it will re-execute the original instruction.
237 		 */
238 		return 1;
239 	}
240 
241 	exception_index = ARM_EXCEPTION_CODE(exception_index);
242 
243 	switch (exception_index) {
244 	case ARM_EXCEPTION_IRQ:
245 		return 1;
246 	case ARM_EXCEPTION_EL1_SERROR:
247 		return 1;
248 	case ARM_EXCEPTION_TRAP:
249 		return handle_trap_exceptions(vcpu);
250 	case ARM_EXCEPTION_HYP_GONE:
251 		/*
252 		 * EL2 has been reset to the hyp-stub. This happens when a guest
253 		 * is pre-emptied by kvm_reboot()'s shutdown call.
254 		 */
255 		run->exit_reason = KVM_EXIT_FAIL_ENTRY;
256 		return 0;
257 	case ARM_EXCEPTION_IL:
258 		/*
259 		 * We attempted an illegal exception return.  Guest state must
260 		 * have been corrupted somehow.  Give up.
261 		 */
262 		run->exit_reason = KVM_EXIT_FAIL_ENTRY;
263 		return -EINVAL;
264 	default:
265 		kvm_pr_unimpl("Unsupported exception type: %d",
266 			      exception_index);
267 		run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
268 		return 0;
269 	}
270 }
271 
272 /* For exit types that need handling before we can be preempted */
273 void handle_exit_early(struct kvm_vcpu *vcpu, int exception_index)
274 {
275 	if (ARM_SERROR_PENDING(exception_index)) {
276 		if (this_cpu_has_cap(ARM64_HAS_RAS_EXTN)) {
277 			u64 disr = kvm_vcpu_get_disr(vcpu);
278 
279 			kvm_handle_guest_serror(vcpu, disr_to_esr(disr));
280 		} else {
281 			kvm_inject_vabt(vcpu);
282 		}
283 
284 		return;
285 	}
286 
287 	exception_index = ARM_EXCEPTION_CODE(exception_index);
288 
289 	if (exception_index == ARM_EXCEPTION_EL1_SERROR)
290 		kvm_handle_guest_serror(vcpu, kvm_vcpu_get_esr(vcpu));
291 }
292 
293 void __noreturn __cold nvhe_hyp_panic_handler(u64 esr, u64 spsr,
294 					      u64 elr_virt, u64 elr_phys,
295 					      u64 par, uintptr_t vcpu,
296 					      u64 far, u64 hpfar) {
297 	u64 elr_in_kimg = __phys_to_kimg(elr_phys);
298 	u64 hyp_offset = elr_in_kimg - kaslr_offset() - elr_virt;
299 	u64 mode = spsr & PSR_MODE_MASK;
300 
301 	/*
302 	 * The nVHE hyp symbols are not included by kallsyms to avoid issues
303 	 * with aliasing. That means that the symbols cannot be printed with the
304 	 * "%pS" format specifier, so fall back to the vmlinux address if
305 	 * there's no better option.
306 	 */
307 	if (mode != PSR_MODE_EL2t && mode != PSR_MODE_EL2h) {
308 		kvm_err("Invalid host exception to nVHE hyp!\n");
309 	} else if (ESR_ELx_EC(esr) == ESR_ELx_EC_BRK64 &&
310 		   (esr & ESR_ELx_BRK64_ISS_COMMENT_MASK) == BUG_BRK_IMM) {
311 		const char *file = NULL;
312 		unsigned int line = 0;
313 
314 		/* All hyp bugs, including warnings, are treated as fatal. */
315 		if (!is_protected_kvm_enabled() ||
316 		    IS_ENABLED(CONFIG_NVHE_EL2_DEBUG)) {
317 			struct bug_entry *bug = find_bug(elr_in_kimg);
318 
319 			if (bug)
320 				bug_get_file_line(bug, &file, &line);
321 		}
322 
323 		if (file)
324 			kvm_err("nVHE hyp BUG at: %s:%u!\n", file, line);
325 		else
326 			kvm_err("nVHE hyp BUG at: %016llx!\n", elr_virt + hyp_offset);
327 	} else {
328 		kvm_err("nVHE hyp panic at: %016llx!\n", elr_virt + hyp_offset);
329 	}
330 
331 	/*
332 	 * Hyp has panicked and we're going to handle that by panicking the
333 	 * kernel. The kernel offset will be revealed in the panic so we're
334 	 * also safe to reveal the hyp offset as a debugging aid for translating
335 	 * hyp VAs to vmlinux addresses.
336 	 */
337 	kvm_err("Hyp Offset: 0x%llx\n", hyp_offset);
338 
339 	panic("HYP panic:\nPS:%08llx PC:%016llx ESR:%016llx\nFAR:%016llx HPFAR:%016llx PAR:%016llx\nVCPU:%016lx\n",
340 	      spsr, elr_virt, esr, far, hpfar, par, vcpu);
341 }
342