xref: /openbmc/linux/arch/arm64/kvm/hyp/hyp-entry.S (revision e56dc9e2)
1/* SPDX-License-Identifier: GPL-2.0-only */
2/*
3 * Copyright (C) 2015-2018 - ARM Ltd
4 * Author: Marc Zyngier <marc.zyngier@arm.com>
5 */
6
7#include <linux/arm-smccc.h>
8#include <linux/linkage.h>
9
10#include <asm/alternative.h>
11#include <asm/assembler.h>
12#include <asm/cpufeature.h>
13#include <asm/kvm_arm.h>
14#include <asm/kvm_asm.h>
15#include <asm/kvm_mmu.h>
16#include <asm/mmu.h>
17
18.macro save_caller_saved_regs_vect
19	/* x0 and x1 were saved in the vector entry */
20	stp	x2, x3,   [sp, #-16]!
21	stp	x4, x5,   [sp, #-16]!
22	stp	x6, x7,   [sp, #-16]!
23	stp	x8, x9,   [sp, #-16]!
24	stp	x10, x11, [sp, #-16]!
25	stp	x12, x13, [sp, #-16]!
26	stp	x14, x15, [sp, #-16]!
27	stp	x16, x17, [sp, #-16]!
28.endm
29
30.macro restore_caller_saved_regs_vect
31	ldp	x16, x17, [sp], #16
32	ldp	x14, x15, [sp], #16
33	ldp	x12, x13, [sp], #16
34	ldp	x10, x11, [sp], #16
35	ldp	x8, x9,   [sp], #16
36	ldp	x6, x7,   [sp], #16
37	ldp	x4, x5,   [sp], #16
38	ldp	x2, x3,   [sp], #16
39	ldp	x0, x1,   [sp], #16
40.endm
41
42	.text
43
44.macro do_el2_call
45	/*
46	 * Shuffle the parameters before calling the function
47	 * pointed to in x0. Assumes parameters in x[1,2,3].
48	 */
49	str	lr, [sp, #-16]!
50	mov	lr, x0
51	mov	x0, x1
52	mov	x1, x2
53	mov	x2, x3
54	blr	lr
55	ldr	lr, [sp], #16
56.endm
57
58el1_sync:				// Guest trapped into EL2
59
60	mrs	x0, esr_el2
61	lsr	x0, x0, #ESR_ELx_EC_SHIFT
62	cmp	x0, #ESR_ELx_EC_HVC64
63	ccmp	x0, #ESR_ELx_EC_HVC32, #4, ne
64	b.ne	el1_trap
65
66#ifdef __KVM_NVHE_HYPERVISOR__
67	mrs	x1, vttbr_el2		// If vttbr is valid, the guest
68	cbnz	x1, el1_hvc_guest	// called HVC
69
70	/* Here, we're pretty sure the host called HVC. */
71	ldp	x0, x1, [sp], #16
72
73	/* Check for a stub HVC call */
74	cmp	x0, #HVC_STUB_HCALL_NR
75	b.hs	1f
76
77	/*
78	 * Compute the idmap address of __kvm_handle_stub_hvc and
79	 * jump there. Since we use kimage_voffset, do not use the
80	 * HYP VA for __kvm_handle_stub_hvc, but the kernel VA instead
81	 * (by loading it from the constant pool).
82	 *
83	 * Preserve x0-x4, which may contain stub parameters.
84	 */
85	ldr	x5, =__kvm_handle_stub_hvc
86	ldr_l	x6, kimage_voffset
87
88	/* x5 = __pa(x5) */
89	sub	x5, x5, x6
90	br	x5
91
921:
93	/*
94	 * Perform the EL2 call
95	 */
96	kern_hyp_va	x0
97	do_el2_call
98
99	eret
100	sb
101#endif /* __KVM_NVHE_HYPERVISOR__ */
102
103el1_hvc_guest:
104	/*
105	 * Fastest possible path for ARM_SMCCC_ARCH_WORKAROUND_1.
106	 * The workaround has already been applied on the host,
107	 * so let's quickly get back to the guest. We don't bother
108	 * restoring x1, as it can be clobbered anyway.
109	 */
110	ldr	x1, [sp]				// Guest's x0
111	eor	w1, w1, #ARM_SMCCC_ARCH_WORKAROUND_1
112	cbz	w1, wa_epilogue
113
114	/* ARM_SMCCC_ARCH_WORKAROUND_2 handling */
115	eor	w1, w1, #(ARM_SMCCC_ARCH_WORKAROUND_1 ^ \
116			  ARM_SMCCC_ARCH_WORKAROUND_2)
117	cbnz	w1, el1_trap
118
119#ifdef CONFIG_ARM64_SSBD
120alternative_cb	arm64_enable_wa2_handling
121	b	wa2_end
122alternative_cb_end
123	get_vcpu_ptr	x2, x0
124	ldr	x0, [x2, #VCPU_WORKAROUND_FLAGS]
125
126	// Sanitize the argument and update the guest flags
127	ldr	x1, [sp, #8]			// Guest's x1
128	clz	w1, w1				// Murphy's device:
129	lsr	w1, w1, #5			// w1 = !!w1 without using
130	eor	w1, w1, #1			// the flags...
131	bfi	x0, x1, #VCPU_WORKAROUND_2_FLAG_SHIFT, #1
132	str	x0, [x2, #VCPU_WORKAROUND_FLAGS]
133
134	/* Check that we actually need to perform the call */
135	hyp_ldr_this_cpu x0, arm64_ssbd_callback_required, x2
136	cbz	x0, wa2_end
137
138	mov	w0, #ARM_SMCCC_ARCH_WORKAROUND_2
139	smc	#0
140
141	/* Don't leak data from the SMC call */
142	mov	x3, xzr
143wa2_end:
144	mov	x2, xzr
145	mov	x1, xzr
146#endif
147
148wa_epilogue:
149	mov	x0, xzr
150	add	sp, sp, #16
151	eret
152	sb
153
154el1_trap:
155	get_vcpu_ptr	x1, x0
156	mov	x0, #ARM_EXCEPTION_TRAP
157	b	__guest_exit
158
159el1_irq:
160	get_vcpu_ptr	x1, x0
161	mov	x0, #ARM_EXCEPTION_IRQ
162	b	__guest_exit
163
164el1_error:
165	get_vcpu_ptr	x1, x0
166	mov	x0, #ARM_EXCEPTION_EL1_SERROR
167	b	__guest_exit
168
169el2_sync:
170	/* Check for illegal exception return */
171	mrs	x0, spsr_el2
172	tbnz	x0, #20, 1f
173
174	save_caller_saved_regs_vect
175	stp     x29, x30, [sp, #-16]!
176	bl	kvm_unexpected_el2_exception
177	ldp     x29, x30, [sp], #16
178	restore_caller_saved_regs_vect
179
180	eret
181
1821:
183	/* Let's attempt a recovery from the illegal exception return */
184	get_vcpu_ptr	x1, x0
185	mov	x0, #ARM_EXCEPTION_IL
186	b	__guest_exit
187
188
189el2_error:
190	save_caller_saved_regs_vect
191	stp     x29, x30, [sp, #-16]!
192
193	bl	kvm_unexpected_el2_exception
194
195	ldp     x29, x30, [sp], #16
196	restore_caller_saved_regs_vect
197
198	eret
199	sb
200
201#ifdef __KVM_NVHE_HYPERVISOR__
202SYM_FUNC_START(__hyp_do_panic)
203	mov	lr, #(PSR_F_BIT | PSR_I_BIT | PSR_A_BIT | PSR_D_BIT |\
204		      PSR_MODE_EL1h)
205	msr	spsr_el2, lr
206	ldr	lr, =panic
207	msr	elr_el2, lr
208	eret
209	sb
210SYM_FUNC_END(__hyp_do_panic)
211#endif
212
213SYM_CODE_START(__hyp_panic)
214	get_host_ctxt x0, x1
215	b	hyp_panic
216SYM_CODE_END(__hyp_panic)
217
218.macro invalid_vector	label, target = __hyp_panic
219	.align	2
220SYM_CODE_START(\label)
221	b \target
222SYM_CODE_END(\label)
223.endm
224
225	/* None of these should ever happen */
226	invalid_vector	el2t_sync_invalid
227	invalid_vector	el2t_irq_invalid
228	invalid_vector	el2t_fiq_invalid
229	invalid_vector	el2t_error_invalid
230	invalid_vector	el2h_sync_invalid
231	invalid_vector	el2h_irq_invalid
232	invalid_vector	el2h_fiq_invalid
233	invalid_vector	el1_fiq_invalid
234
235	.ltorg
236
237	.align 11
238
239.macro check_preamble_length start, end
240/* kvm_patch_vector_branch() generates code that jumps over the preamble. */
241.if ((\end-\start) != KVM_VECTOR_PREAMBLE)
242	.error "KVM vector preamble length mismatch"
243.endif
244.endm
245
246.macro valid_vect target
247	.align 7
248661:
249	esb
250	stp	x0, x1, [sp, #-16]!
251662:
252	b	\target
253
254check_preamble_length 661b, 662b
255.endm
256
257.macro invalid_vect target
258	.align 7
259661:
260	b	\target
261	nop
262662:
263	ldp	x0, x1, [sp], #16
264	b	\target
265
266check_preamble_length 661b, 662b
267.endm
268
269SYM_CODE_START(__kvm_hyp_vector)
270	invalid_vect	el2t_sync_invalid	// Synchronous EL2t
271	invalid_vect	el2t_irq_invalid	// IRQ EL2t
272	invalid_vect	el2t_fiq_invalid	// FIQ EL2t
273	invalid_vect	el2t_error_invalid	// Error EL2t
274
275	valid_vect	el2_sync		// Synchronous EL2h
276	invalid_vect	el2h_irq_invalid	// IRQ EL2h
277	invalid_vect	el2h_fiq_invalid	// FIQ EL2h
278	valid_vect	el2_error		// Error EL2h
279
280	valid_vect	el1_sync		// Synchronous 64-bit EL1
281	valid_vect	el1_irq			// IRQ 64-bit EL1
282	invalid_vect	el1_fiq_invalid		// FIQ 64-bit EL1
283	valid_vect	el1_error		// Error 64-bit EL1
284
285	valid_vect	el1_sync		// Synchronous 32-bit EL1
286	valid_vect	el1_irq			// IRQ 32-bit EL1
287	invalid_vect	el1_fiq_invalid		// FIQ 32-bit EL1
288	valid_vect	el1_error		// Error 32-bit EL1
289SYM_CODE_END(__kvm_hyp_vector)
290
291#ifdef CONFIG_KVM_INDIRECT_VECTORS
292.macro hyp_ventry
293	.align 7
2941:	esb
295	.rept 26
296	nop
297	.endr
298/*
299 * The default sequence is to directly branch to the KVM vectors,
300 * using the computed offset. This applies for VHE as well as
301 * !ARM64_HARDEN_EL2_VECTORS. The first vector must always run the preamble.
302 *
303 * For ARM64_HARDEN_EL2_VECTORS configurations, this gets replaced
304 * with:
305 *
306 * stp	x0, x1, [sp, #-16]!
307 * movz	x0, #(addr & 0xffff)
308 * movk	x0, #((addr >> 16) & 0xffff), lsl #16
309 * movk	x0, #((addr >> 32) & 0xffff), lsl #32
310 * br	x0
311 *
312 * Where:
313 * addr = kern_hyp_va(__kvm_hyp_vector) + vector-offset + KVM_VECTOR_PREAMBLE.
314 * See kvm_patch_vector_branch for details.
315 */
316alternative_cb	kvm_patch_vector_branch
317	stp	x0, x1, [sp, #-16]!
318	b	__kvm_hyp_vector + (1b - 0b + KVM_VECTOR_PREAMBLE)
319	nop
320	nop
321	nop
322alternative_cb_end
323.endm
324
325.macro generate_vectors
3260:
327	.rept 16
328	hyp_ventry
329	.endr
330	.org 0b + SZ_2K		// Safety measure
331.endm
332
333	.align	11
334SYM_CODE_START(__bp_harden_hyp_vecs)
335	.rept BP_HARDEN_EL2_SLOTS
336	generate_vectors
337	.endr
3381:	.org __bp_harden_hyp_vecs + __BP_HARDEN_HYP_VECS_SZ
339	.org 1b
340SYM_CODE_END(__bp_harden_hyp_vecs)
341#endif
342