xref: /openbmc/linux/arch/arm64/kvm/hyp/hyp-entry.S (revision 852a53a0)
1/* SPDX-License-Identifier: GPL-2.0-only */
2/*
3 * Copyright (C) 2015-2018 - ARM Ltd
4 * Author: Marc Zyngier <marc.zyngier@arm.com>
5 */
6
7#include <linux/arm-smccc.h>
8#include <linux/linkage.h>
9
10#include <asm/alternative.h>
11#include <asm/assembler.h>
12#include <asm/cpufeature.h>
13#include <asm/kvm_arm.h>
14#include <asm/kvm_asm.h>
15#include <asm/kvm_mmu.h>
16#include <asm/mmu.h>
17
18	.text
19
20.macro do_el2_call
21	/*
22	 * Shuffle the parameters before calling the function
23	 * pointed to in x0. Assumes parameters in x[1,2,3].
24	 */
25	str	lr, [sp, #-16]!
26	mov	lr, x0
27	mov	x0, x1
28	mov	x1, x2
29	mov	x2, x3
30	blr	lr
31	ldr	lr, [sp], #16
32.endm
33
34el1_sync:				// Guest trapped into EL2
35
36	mrs	x0, esr_el2
37	lsr	x0, x0, #ESR_ELx_EC_SHIFT
38	cmp	x0, #ESR_ELx_EC_HVC64
39	ccmp	x0, #ESR_ELx_EC_HVC32, #4, ne
40	b.ne	el1_trap
41
42#ifdef __KVM_NVHE_HYPERVISOR__
43	mrs	x1, vttbr_el2		// If vttbr is valid, the guest
44	cbnz	x1, el1_hvc_guest	// called HVC
45
46	/* Here, we're pretty sure the host called HVC. */
47	ldp	x0, x1, [sp], #16
48
49	/* Check for a stub HVC call */
50	cmp	x0, #HVC_STUB_HCALL_NR
51	b.hs	1f
52
53	/*
54	 * Compute the idmap address of __kvm_handle_stub_hvc and
55	 * jump there. Since we use kimage_voffset, do not use the
56	 * HYP VA for __kvm_handle_stub_hvc, but the kernel VA instead
57	 * (by loading it from the constant pool).
58	 *
59	 * Preserve x0-x4, which may contain stub parameters.
60	 */
61	ldr	x5, =__kvm_handle_stub_hvc
62	ldr_l	x6, kimage_voffset
63
64	/* x5 = __pa(x5) */
65	sub	x5, x5, x6
66	br	x5
67
681:
69	/*
70	 * Perform the EL2 call
71	 */
72	kern_hyp_va	x0
73	do_el2_call
74
75	eret
76	sb
77#endif /* __KVM_NVHE_HYPERVISOR__ */
78
79el1_hvc_guest:
80	/*
81	 * Fastest possible path for ARM_SMCCC_ARCH_WORKAROUND_1.
82	 * The workaround has already been applied on the host,
83	 * so let's quickly get back to the guest. We don't bother
84	 * restoring x1, as it can be clobbered anyway.
85	 */
86	ldr	x1, [sp]				// Guest's x0
87	eor	w1, w1, #ARM_SMCCC_ARCH_WORKAROUND_1
88	cbz	w1, wa_epilogue
89
90	/* ARM_SMCCC_ARCH_WORKAROUND_2 handling */
91	eor	w1, w1, #(ARM_SMCCC_ARCH_WORKAROUND_1 ^ \
92			  ARM_SMCCC_ARCH_WORKAROUND_2)
93	cbnz	w1, el1_trap
94
95#ifdef CONFIG_ARM64_SSBD
96alternative_cb	arm64_enable_wa2_handling
97	b	wa2_end
98alternative_cb_end
99	get_vcpu_ptr	x2, x0
100	ldr	x0, [x2, #VCPU_WORKAROUND_FLAGS]
101
102	// Sanitize the argument and update the guest flags
103	ldr	x1, [sp, #8]			// Guest's x1
104	clz	w1, w1				// Murphy's device:
105	lsr	w1, w1, #5			// w1 = !!w1 without using
106	eor	w1, w1, #1			// the flags...
107	bfi	x0, x1, #VCPU_WORKAROUND_2_FLAG_SHIFT, #1
108	str	x0, [x2, #VCPU_WORKAROUND_FLAGS]
109
110	/* Check that we actually need to perform the call */
111	hyp_ldr_this_cpu x0, arm64_ssbd_callback_required, x2
112	cbz	x0, wa2_end
113
114	mov	w0, #ARM_SMCCC_ARCH_WORKAROUND_2
115	smc	#0
116
117	/* Don't leak data from the SMC call */
118	mov	x3, xzr
119wa2_end:
120	mov	x2, xzr
121	mov	x1, xzr
122#endif
123
124wa_epilogue:
125	mov	x0, xzr
126	add	sp, sp, #16
127	eret
128	sb
129
130el1_trap:
131	get_vcpu_ptr	x1, x0
132	mov	x0, #ARM_EXCEPTION_TRAP
133	b	__guest_exit
134
135el1_irq:
136	get_vcpu_ptr	x1, x0
137	mov	x0, #ARM_EXCEPTION_IRQ
138	b	__guest_exit
139
140el1_error:
141	get_vcpu_ptr	x1, x0
142	mov	x0, #ARM_EXCEPTION_EL1_SERROR
143	b	__guest_exit
144
145el2_sync:
146	/* Check for illegal exception return, otherwise panic */
147	mrs	x0, spsr_el2
148
149	/* if this was something else, then panic! */
150	tst	x0, #PSR_IL_BIT
151	b.eq	__hyp_panic
152
153	/* Let's attempt a recovery from the illegal exception return */
154	get_vcpu_ptr	x1, x0
155	mov	x0, #ARM_EXCEPTION_IL
156	b	__guest_exit
157
158
159el2_error:
160	ldp	x0, x1, [sp], #16
161
162	/*
163	 * Only two possibilities:
164	 * 1) Either we come from the exit path, having just unmasked
165	 *    PSTATE.A: change the return code to an EL2 fault, and
166	 *    carry on, as we're already in a sane state to handle it.
167	 * 2) Or we come from anywhere else, and that's a bug: we panic.
168	 *
169	 * For (1), x0 contains the original return code and x1 doesn't
170	 * contain anything meaningful at that stage. We can reuse them
171	 * as temp registers.
172	 * For (2), who cares?
173	 */
174	mrs	x0, elr_el2
175	adr	x1, abort_guest_exit_start
176	cmp	x0, x1
177	adr	x1, abort_guest_exit_end
178	ccmp	x0, x1, #4, ne
179	b.ne	__hyp_panic
180	mov	x0, #(1 << ARM_EXIT_WITH_SERROR_BIT)
181	eret
182	sb
183
184#ifdef __KVM_NVHE_HYPERVISOR__
185SYM_FUNC_START(__hyp_do_panic)
186	mov	lr, #(PSR_F_BIT | PSR_I_BIT | PSR_A_BIT | PSR_D_BIT |\
187		      PSR_MODE_EL1h)
188	msr	spsr_el2, lr
189	ldr	lr, =panic
190	msr	elr_el2, lr
191	eret
192	sb
193SYM_FUNC_END(__hyp_do_panic)
194#endif
195
196SYM_CODE_START(__hyp_panic)
197	get_host_ctxt x0, x1
198	b	hyp_panic
199SYM_CODE_END(__hyp_panic)
200
201.macro invalid_vector	label, target = __hyp_panic
202	.align	2
203SYM_CODE_START(\label)
204	b \target
205SYM_CODE_END(\label)
206.endm
207
208	/* None of these should ever happen */
209	invalid_vector	el2t_sync_invalid
210	invalid_vector	el2t_irq_invalid
211	invalid_vector	el2t_fiq_invalid
212	invalid_vector	el2t_error_invalid
213	invalid_vector	el2h_sync_invalid
214	invalid_vector	el2h_irq_invalid
215	invalid_vector	el2h_fiq_invalid
216	invalid_vector	el1_fiq_invalid
217
218	.ltorg
219
220	.align 11
221
222.macro check_preamble_length start, end
223/* kvm_patch_vector_branch() generates code that jumps over the preamble. */
224.if ((\end-\start) != KVM_VECTOR_PREAMBLE)
225	.error "KVM vector preamble length mismatch"
226.endif
227.endm
228
229.macro valid_vect target
230	.align 7
231661:
232	esb
233	stp	x0, x1, [sp, #-16]!
234662:
235	b	\target
236
237check_preamble_length 661b, 662b
238.endm
239
240.macro invalid_vect target
241	.align 7
242661:
243	b	\target
244	nop
245662:
246	ldp	x0, x1, [sp], #16
247	b	\target
248
249check_preamble_length 661b, 662b
250.endm
251
252SYM_CODE_START(__kvm_hyp_vector)
253	invalid_vect	el2t_sync_invalid	// Synchronous EL2t
254	invalid_vect	el2t_irq_invalid	// IRQ EL2t
255	invalid_vect	el2t_fiq_invalid	// FIQ EL2t
256	invalid_vect	el2t_error_invalid	// Error EL2t
257
258	valid_vect	el2_sync		// Synchronous EL2h
259	invalid_vect	el2h_irq_invalid	// IRQ EL2h
260	invalid_vect	el2h_fiq_invalid	// FIQ EL2h
261	valid_vect	el2_error		// Error EL2h
262
263	valid_vect	el1_sync		// Synchronous 64-bit EL1
264	valid_vect	el1_irq			// IRQ 64-bit EL1
265	invalid_vect	el1_fiq_invalid		// FIQ 64-bit EL1
266	valid_vect	el1_error		// Error 64-bit EL1
267
268	valid_vect	el1_sync		// Synchronous 32-bit EL1
269	valid_vect	el1_irq			// IRQ 32-bit EL1
270	invalid_vect	el1_fiq_invalid		// FIQ 32-bit EL1
271	valid_vect	el1_error		// Error 32-bit EL1
272SYM_CODE_END(__kvm_hyp_vector)
273
274#ifdef CONFIG_KVM_INDIRECT_VECTORS
275.macro hyp_ventry
276	.align 7
2771:	esb
278	.rept 26
279	nop
280	.endr
281/*
282 * The default sequence is to directly branch to the KVM vectors,
283 * using the computed offset. This applies for VHE as well as
284 * !ARM64_HARDEN_EL2_VECTORS. The first vector must always run the preamble.
285 *
286 * For ARM64_HARDEN_EL2_VECTORS configurations, this gets replaced
287 * with:
288 *
289 * stp	x0, x1, [sp, #-16]!
290 * movz	x0, #(addr & 0xffff)
291 * movk	x0, #((addr >> 16) & 0xffff), lsl #16
292 * movk	x0, #((addr >> 32) & 0xffff), lsl #32
293 * br	x0
294 *
295 * Where:
296 * addr = kern_hyp_va(__kvm_hyp_vector) + vector-offset + KVM_VECTOR_PREAMBLE.
297 * See kvm_patch_vector_branch for details.
298 */
299alternative_cb	kvm_patch_vector_branch
300	stp	x0, x1, [sp, #-16]!
301	b	__kvm_hyp_vector + (1b - 0b + KVM_VECTOR_PREAMBLE)
302	nop
303	nop
304	nop
305alternative_cb_end
306.endm
307
308.macro generate_vectors
3090:
310	.rept 16
311	hyp_ventry
312	.endr
313	.org 0b + SZ_2K		// Safety measure
314.endm
315
316	.align	11
317SYM_CODE_START(__bp_harden_hyp_vecs)
318	.rept BP_HARDEN_EL2_SLOTS
319	generate_vectors
320	.endr
3211:	.org __bp_harden_hyp_vecs + __BP_HARDEN_HYP_VECS_SZ
322	.org 1b
323SYM_CODE_END(__bp_harden_hyp_vecs)
324#endif
325