xref: /openbmc/linux/arch/arm64/kvm/hyp/hyp-entry.S (revision 22d55f02)
1/* SPDX-License-Identifier: GPL-2.0-only */
2/*
3 * Copyright (C) 2015-2018 - ARM Ltd
4 * Author: Marc Zyngier <marc.zyngier@arm.com>
5 */
6
7#include <linux/arm-smccc.h>
8#include <linux/linkage.h>
9
10#include <asm/alternative.h>
11#include <asm/assembler.h>
12#include <asm/cpufeature.h>
13#include <asm/kvm_arm.h>
14#include <asm/kvm_asm.h>
15#include <asm/kvm_mmu.h>
16#include <asm/mmu.h>
17
18	.text
19	.pushsection	.hyp.text, "ax"
20
21.macro do_el2_call
22	/*
23	 * Shuffle the parameters before calling the function
24	 * pointed to in x0. Assumes parameters in x[1,2,3].
25	 */
26	str	lr, [sp, #-16]!
27	mov	lr, x0
28	mov	x0, x1
29	mov	x1, x2
30	mov	x2, x3
31	blr	lr
32	ldr	lr, [sp], #16
33.endm
34
35el1_sync:				// Guest trapped into EL2
36
37	mrs	x0, esr_el2
38	lsr	x0, x0, #ESR_ELx_EC_SHIFT
39	cmp	x0, #ESR_ELx_EC_HVC64
40	ccmp	x0, #ESR_ELx_EC_HVC32, #4, ne
41	b.ne	el1_trap
42
43	mrs	x1, vttbr_el2		// If vttbr is valid, the guest
44	cbnz	x1, el1_hvc_guest	// called HVC
45
46	/* Here, we're pretty sure the host called HVC. */
47	ldp	x0, x1, [sp], #16
48
49	/* Check for a stub HVC call */
50	cmp	x0, #HVC_STUB_HCALL_NR
51	b.hs	1f
52
53	/*
54	 * Compute the idmap address of __kvm_handle_stub_hvc and
55	 * jump there. Since we use kimage_voffset, do not use the
56	 * HYP VA for __kvm_handle_stub_hvc, but the kernel VA instead
57	 * (by loading it from the constant pool).
58	 *
59	 * Preserve x0-x4, which may contain stub parameters.
60	 */
61	ldr	x5, =__kvm_handle_stub_hvc
62	ldr_l	x6, kimage_voffset
63
64	/* x5 = __pa(x5) */
65	sub	x5, x5, x6
66	br	x5
67
681:
69	/*
70	 * Perform the EL2 call
71	 */
72	kern_hyp_va	x0
73	do_el2_call
74
75	eret
76	sb
77
78el1_hvc_guest:
79	/*
80	 * Fastest possible path for ARM_SMCCC_ARCH_WORKAROUND_1.
81	 * The workaround has already been applied on the host,
82	 * so let's quickly get back to the guest. We don't bother
83	 * restoring x1, as it can be clobbered anyway.
84	 */
85	ldr	x1, [sp]				// Guest's x0
86	eor	w1, w1, #ARM_SMCCC_ARCH_WORKAROUND_1
87	cbz	w1, wa_epilogue
88
89	/* ARM_SMCCC_ARCH_WORKAROUND_2 handling */
90	eor	w1, w1, #(ARM_SMCCC_ARCH_WORKAROUND_1 ^ \
91			  ARM_SMCCC_ARCH_WORKAROUND_2)
92	cbnz	w1, el1_trap
93
94#ifdef CONFIG_ARM64_SSBD
95alternative_cb	arm64_enable_wa2_handling
96	b	wa2_end
97alternative_cb_end
98	get_vcpu_ptr	x2, x0
99	ldr	x0, [x2, #VCPU_WORKAROUND_FLAGS]
100
101	// Sanitize the argument and update the guest flags
102	ldr	x1, [sp, #8]			// Guest's x1
103	clz	w1, w1				// Murphy's device:
104	lsr	w1, w1, #5			// w1 = !!w1 without using
105	eor	w1, w1, #1			// the flags...
106	bfi	x0, x1, #VCPU_WORKAROUND_2_FLAG_SHIFT, #1
107	str	x0, [x2, #VCPU_WORKAROUND_FLAGS]
108
109	/* Check that we actually need to perform the call */
110	hyp_ldr_this_cpu x0, arm64_ssbd_callback_required, x2
111	cbz	x0, wa2_end
112
113	mov	w0, #ARM_SMCCC_ARCH_WORKAROUND_2
114	smc	#0
115
116	/* Don't leak data from the SMC call */
117	mov	x3, xzr
118wa2_end:
119	mov	x2, xzr
120	mov	x1, xzr
121#endif
122
123wa_epilogue:
124	mov	x0, xzr
125	add	sp, sp, #16
126	eret
127	sb
128
129el1_trap:
130	get_vcpu_ptr	x1, x0
131	mov	x0, #ARM_EXCEPTION_TRAP
132	b	__guest_exit
133
134el1_irq:
135	get_vcpu_ptr	x1, x0
136	mov	x0, #ARM_EXCEPTION_IRQ
137	b	__guest_exit
138
139el1_error:
140	get_vcpu_ptr	x1, x0
141	mov	x0, #ARM_EXCEPTION_EL1_SERROR
142	b	__guest_exit
143
144el2_sync:
145	/* Check for illegal exception return, otherwise panic */
146	mrs	x0, spsr_el2
147
148	/* if this was something else, then panic! */
149	tst	x0, #PSR_IL_BIT
150	b.eq	__hyp_panic
151
152	/* Let's attempt a recovery from the illegal exception return */
153	get_vcpu_ptr	x1, x0
154	mov	x0, #ARM_EXCEPTION_IL
155	b	__guest_exit
156
157
158el2_error:
159	ldp	x0, x1, [sp], #16
160
161	/*
162	 * Only two possibilities:
163	 * 1) Either we come from the exit path, having just unmasked
164	 *    PSTATE.A: change the return code to an EL2 fault, and
165	 *    carry on, as we're already in a sane state to handle it.
166	 * 2) Or we come from anywhere else, and that's a bug: we panic.
167	 *
168	 * For (1), x0 contains the original return code and x1 doesn't
169	 * contain anything meaningful at that stage. We can reuse them
170	 * as temp registers.
171	 * For (2), who cares?
172	 */
173	mrs	x0, elr_el2
174	adr	x1, abort_guest_exit_start
175	cmp	x0, x1
176	adr	x1, abort_guest_exit_end
177	ccmp	x0, x1, #4, ne
178	b.ne	__hyp_panic
179	mov	x0, #(1 << ARM_EXIT_WITH_SERROR_BIT)
180	eret
181	sb
182
183ENTRY(__hyp_do_panic)
184	mov	lr, #(PSR_F_BIT | PSR_I_BIT | PSR_A_BIT | PSR_D_BIT |\
185		      PSR_MODE_EL1h)
186	msr	spsr_el2, lr
187	ldr	lr, =panic
188	msr	elr_el2, lr
189	eret
190	sb
191ENDPROC(__hyp_do_panic)
192
193ENTRY(__hyp_panic)
194	get_host_ctxt x0, x1
195	b	hyp_panic
196ENDPROC(__hyp_panic)
197
198.macro invalid_vector	label, target = __hyp_panic
199	.align	2
200\label:
201	b \target
202ENDPROC(\label)
203.endm
204
205	/* None of these should ever happen */
206	invalid_vector	el2t_sync_invalid
207	invalid_vector	el2t_irq_invalid
208	invalid_vector	el2t_fiq_invalid
209	invalid_vector	el2t_error_invalid
210	invalid_vector	el2h_sync_invalid
211	invalid_vector	el2h_irq_invalid
212	invalid_vector	el2h_fiq_invalid
213	invalid_vector	el1_fiq_invalid
214
215	.ltorg
216
217	.align 11
218
219.macro valid_vect target
220	.align 7
221	stp	x0, x1, [sp, #-16]!
222	b	\target
223.endm
224
225.macro invalid_vect target
226	.align 7
227	b	\target
228	ldp	x0, x1, [sp], #16
229	b	\target
230.endm
231
232ENTRY(__kvm_hyp_vector)
233	invalid_vect	el2t_sync_invalid	// Synchronous EL2t
234	invalid_vect	el2t_irq_invalid	// IRQ EL2t
235	invalid_vect	el2t_fiq_invalid	// FIQ EL2t
236	invalid_vect	el2t_error_invalid	// Error EL2t
237
238	valid_vect	el2_sync		// Synchronous EL2h
239	invalid_vect	el2h_irq_invalid	// IRQ EL2h
240	invalid_vect	el2h_fiq_invalid	// FIQ EL2h
241	valid_vect	el2_error		// Error EL2h
242
243	valid_vect	el1_sync		// Synchronous 64-bit EL1
244	valid_vect	el1_irq			// IRQ 64-bit EL1
245	invalid_vect	el1_fiq_invalid		// FIQ 64-bit EL1
246	valid_vect	el1_error		// Error 64-bit EL1
247
248	valid_vect	el1_sync		// Synchronous 32-bit EL1
249	valid_vect	el1_irq			// IRQ 32-bit EL1
250	invalid_vect	el1_fiq_invalid		// FIQ 32-bit EL1
251	valid_vect	el1_error		// Error 32-bit EL1
252ENDPROC(__kvm_hyp_vector)
253
254#ifdef CONFIG_KVM_INDIRECT_VECTORS
255.macro hyp_ventry
256	.align 7
2571:	.rept 27
258	nop
259	.endr
260/*
261 * The default sequence is to directly branch to the KVM vectors,
262 * using the computed offset. This applies for VHE as well as
263 * !ARM64_HARDEN_EL2_VECTORS.
264 *
265 * For ARM64_HARDEN_EL2_VECTORS configurations, this gets replaced
266 * with:
267 *
268 * stp	x0, x1, [sp, #-16]!
269 * movz	x0, #(addr & 0xffff)
270 * movk	x0, #((addr >> 16) & 0xffff), lsl #16
271 * movk	x0, #((addr >> 32) & 0xffff), lsl #32
272 * br	x0
273 *
274 * Where addr = kern_hyp_va(__kvm_hyp_vector) + vector-offset + 4.
275 * See kvm_patch_vector_branch for details.
276 */
277alternative_cb	kvm_patch_vector_branch
278	b	__kvm_hyp_vector + (1b - 0b)
279	nop
280	nop
281	nop
282	nop
283alternative_cb_end
284.endm
285
286.macro generate_vectors
2870:
288	.rept 16
289	hyp_ventry
290	.endr
291	.org 0b + SZ_2K		// Safety measure
292.endm
293
294	.align	11
295ENTRY(__bp_harden_hyp_vecs_start)
296	.rept BP_HARDEN_EL2_SLOTS
297	generate_vectors
298	.endr
299ENTRY(__bp_harden_hyp_vecs_end)
300
301	.popsection
302
303ENTRY(__smccc_workaround_1_smc_start)
304	sub	sp, sp, #(8 * 4)
305	stp	x2, x3, [sp, #(8 * 0)]
306	stp	x0, x1, [sp, #(8 * 2)]
307	mov	w0, #ARM_SMCCC_ARCH_WORKAROUND_1
308	smc	#0
309	ldp	x2, x3, [sp, #(8 * 0)]
310	ldp	x0, x1, [sp, #(8 * 2)]
311	add	sp, sp, #(8 * 4)
312ENTRY(__smccc_workaround_1_smc_end)
313#endif
314