xref: /openbmc/linux/arch/arm64/kvm/hyp/hyp-entry.S (revision 7af6fbdd)
1/* SPDX-License-Identifier: GPL-2.0-only */
2/*
3 * Copyright (C) 2015-2018 - ARM Ltd
4 * Author: Marc Zyngier <marc.zyngier@arm.com>
5 */
6
7#include <linux/arm-smccc.h>
8#include <linux/linkage.h>
9
10#include <asm/alternative.h>
11#include <asm/assembler.h>
12#include <asm/cpufeature.h>
13#include <asm/kvm_arm.h>
14#include <asm/kvm_asm.h>
15#include <asm/kvm_mmu.h>
16#include <asm/mmu.h>
17
18.macro save_caller_saved_regs_vect
19	/* x0 and x1 were saved in the vector entry */
20	stp	x2, x3,   [sp, #-16]!
21	stp	x4, x5,   [sp, #-16]!
22	stp	x6, x7,   [sp, #-16]!
23	stp	x8, x9,   [sp, #-16]!
24	stp	x10, x11, [sp, #-16]!
25	stp	x12, x13, [sp, #-16]!
26	stp	x14, x15, [sp, #-16]!
27	stp	x16, x17, [sp, #-16]!
28.endm
29
30.macro restore_caller_saved_regs_vect
31	ldp	x16, x17, [sp], #16
32	ldp	x14, x15, [sp], #16
33	ldp	x12, x13, [sp], #16
34	ldp	x10, x11, [sp], #16
35	ldp	x8, x9,   [sp], #16
36	ldp	x6, x7,   [sp], #16
37	ldp	x4, x5,   [sp], #16
38	ldp	x2, x3,   [sp], #16
39	ldp	x0, x1,   [sp], #16
40.endm
41
42	.text
43
44.macro do_el2_call
45	/*
46	 * Shuffle the parameters before calling the function
47	 * pointed to in x0. Assumes parameters in x[1,2,3].
48	 */
49	str	lr, [sp, #-16]!
50	mov	lr, x0
51	mov	x0, x1
52	mov	x1, x2
53	mov	x2, x3
54	blr	lr
55	ldr	lr, [sp], #16
56.endm
57
58el1_sync:				// Guest trapped into EL2
59
60	mrs	x0, esr_el2
61	lsr	x0, x0, #ESR_ELx_EC_SHIFT
62	cmp	x0, #ESR_ELx_EC_HVC64
63	ccmp	x0, #ESR_ELx_EC_HVC32, #4, ne
64	b.ne	el1_trap
65
66#ifdef __KVM_NVHE_HYPERVISOR__
67	mrs	x1, vttbr_el2		// If vttbr is valid, the guest
68	cbnz	x1, el1_hvc_guest	// called HVC
69
70	/* Here, we're pretty sure the host called HVC. */
71	ldp	x0, x1, [sp], #16
72
73	/* Check for a stub HVC call */
74	cmp	x0, #HVC_STUB_HCALL_NR
75	b.hs	1f
76
77	/*
78	 * Compute the idmap address of __kvm_handle_stub_hvc and
79	 * jump there. Since we use kimage_voffset, do not use the
80	 * HYP VA for __kvm_handle_stub_hvc, but the kernel VA instead
81	 * (by loading it from the constant pool).
82	 *
83	 * Preserve x0-x4, which may contain stub parameters.
84	 */
85	ldr	x5, =__kvm_handle_stub_hvc
86	ldr_l	x6, kimage_voffset
87
88	/* x5 = __pa(x5) */
89	sub	x5, x5, x6
90	br	x5
91
921:
93	/*
94	 * Perform the EL2 call
95	 */
96	kern_hyp_va	x0
97	do_el2_call
98
99	eret
100	sb
101#endif /* __KVM_NVHE_HYPERVISOR__ */
102
103el1_hvc_guest:
104	/*
105	 * Fastest possible path for ARM_SMCCC_ARCH_WORKAROUND_1.
106	 * The workaround has already been applied on the host,
107	 * so let's quickly get back to the guest. We don't bother
108	 * restoring x1, as it can be clobbered anyway.
109	 */
110	ldr	x1, [sp]				// Guest's x0
111	eor	w1, w1, #ARM_SMCCC_ARCH_WORKAROUND_1
112	cbz	w1, wa_epilogue
113
114	/* ARM_SMCCC_ARCH_WORKAROUND_2 handling */
115	eor	w1, w1, #(ARM_SMCCC_ARCH_WORKAROUND_1 ^ \
116			  ARM_SMCCC_ARCH_WORKAROUND_2)
117	cbnz	w1, el1_trap
118
119wa_epilogue:
120	mov	x0, xzr
121	add	sp, sp, #16
122	eret
123	sb
124
125el1_trap:
126	get_vcpu_ptr	x1, x0
127	mov	x0, #ARM_EXCEPTION_TRAP
128	b	__guest_exit
129
130el1_irq:
131	get_vcpu_ptr	x1, x0
132	mov	x0, #ARM_EXCEPTION_IRQ
133	b	__guest_exit
134
135el1_error:
136	get_vcpu_ptr	x1, x0
137	mov	x0, #ARM_EXCEPTION_EL1_SERROR
138	b	__guest_exit
139
140el2_sync:
141	/* Check for illegal exception return */
142	mrs	x0, spsr_el2
143	tbnz	x0, #20, 1f
144
145	save_caller_saved_regs_vect
146	stp     x29, x30, [sp, #-16]!
147	bl	kvm_unexpected_el2_exception
148	ldp     x29, x30, [sp], #16
149	restore_caller_saved_regs_vect
150
151	eret
152
1531:
154	/* Let's attempt a recovery from the illegal exception return */
155	get_vcpu_ptr	x1, x0
156	mov	x0, #ARM_EXCEPTION_IL
157	b	__guest_exit
158
159
160el2_error:
161	save_caller_saved_regs_vect
162	stp     x29, x30, [sp, #-16]!
163
164	bl	kvm_unexpected_el2_exception
165
166	ldp     x29, x30, [sp], #16
167	restore_caller_saved_regs_vect
168
169	eret
170	sb
171
172#ifdef __KVM_NVHE_HYPERVISOR__
173SYM_FUNC_START(__hyp_do_panic)
174	mov	lr, #(PSR_F_BIT | PSR_I_BIT | PSR_A_BIT | PSR_D_BIT |\
175		      PSR_MODE_EL1h)
176	msr	spsr_el2, lr
177	ldr	lr, =panic
178	msr	elr_el2, lr
179	eret
180	sb
181SYM_FUNC_END(__hyp_do_panic)
182#endif
183
184SYM_CODE_START(__hyp_panic)
185	get_host_ctxt x0, x1
186	b	hyp_panic
187SYM_CODE_END(__hyp_panic)
188
189.macro invalid_vector	label, target = __hyp_panic
190	.align	2
191SYM_CODE_START(\label)
192	b \target
193SYM_CODE_END(\label)
194.endm
195
196	/* None of these should ever happen */
197	invalid_vector	el2t_sync_invalid
198	invalid_vector	el2t_irq_invalid
199	invalid_vector	el2t_fiq_invalid
200	invalid_vector	el2t_error_invalid
201	invalid_vector	el2h_sync_invalid
202	invalid_vector	el2h_irq_invalid
203	invalid_vector	el2h_fiq_invalid
204	invalid_vector	el1_fiq_invalid
205
206	.ltorg
207
208	.align 11
209
210.macro check_preamble_length start, end
211/* kvm_patch_vector_branch() generates code that jumps over the preamble. */
212.if ((\end-\start) != KVM_VECTOR_PREAMBLE)
213	.error "KVM vector preamble length mismatch"
214.endif
215.endm
216
217.macro valid_vect target
218	.align 7
219661:
220	esb
221	stp	x0, x1, [sp, #-16]!
222662:
223	b	\target
224
225check_preamble_length 661b, 662b
226.endm
227
228.macro invalid_vect target
229	.align 7
230661:
231	b	\target
232	nop
233662:
234	ldp	x0, x1, [sp], #16
235	b	\target
236
237check_preamble_length 661b, 662b
238.endm
239
240SYM_CODE_START(__kvm_hyp_vector)
241	invalid_vect	el2t_sync_invalid	// Synchronous EL2t
242	invalid_vect	el2t_irq_invalid	// IRQ EL2t
243	invalid_vect	el2t_fiq_invalid	// FIQ EL2t
244	invalid_vect	el2t_error_invalid	// Error EL2t
245
246	valid_vect	el2_sync		// Synchronous EL2h
247	invalid_vect	el2h_irq_invalid	// IRQ EL2h
248	invalid_vect	el2h_fiq_invalid	// FIQ EL2h
249	valid_vect	el2_error		// Error EL2h
250
251	valid_vect	el1_sync		// Synchronous 64-bit EL1
252	valid_vect	el1_irq			// IRQ 64-bit EL1
253	invalid_vect	el1_fiq_invalid		// FIQ 64-bit EL1
254	valid_vect	el1_error		// Error 64-bit EL1
255
256	valid_vect	el1_sync		// Synchronous 32-bit EL1
257	valid_vect	el1_irq			// IRQ 32-bit EL1
258	invalid_vect	el1_fiq_invalid		// FIQ 32-bit EL1
259	valid_vect	el1_error		// Error 32-bit EL1
260SYM_CODE_END(__kvm_hyp_vector)
261
262.macro hyp_ventry
263	.align 7
2641:	esb
265	.rept 26
266	nop
267	.endr
268/*
269 * The default sequence is to directly branch to the KVM vectors,
270 * using the computed offset. This applies for VHE as well as
271 * !ARM64_HARDEN_EL2_VECTORS. The first vector must always run the preamble.
272 *
273 * For ARM64_HARDEN_EL2_VECTORS configurations, this gets replaced
274 * with:
275 *
276 * stp	x0, x1, [sp, #-16]!
277 * movz	x0, #(addr & 0xffff)
278 * movk	x0, #((addr >> 16) & 0xffff), lsl #16
279 * movk	x0, #((addr >> 32) & 0xffff), lsl #32
280 * br	x0
281 *
282 * Where:
283 * addr = kern_hyp_va(__kvm_hyp_vector) + vector-offset + KVM_VECTOR_PREAMBLE.
284 * See kvm_patch_vector_branch for details.
285 */
286alternative_cb	kvm_patch_vector_branch
287	stp	x0, x1, [sp, #-16]!
288	b	__kvm_hyp_vector + (1b - 0b + KVM_VECTOR_PREAMBLE)
289	nop
290	nop
291	nop
292alternative_cb_end
293.endm
294
295.macro generate_vectors
2960:
297	.rept 16
298	hyp_ventry
299	.endr
300	.org 0b + SZ_2K		// Safety measure
301.endm
302
303	.align	11
304SYM_CODE_START(__bp_harden_hyp_vecs)
305	.rept BP_HARDEN_EL2_SLOTS
306	generate_vectors
307	.endr
3081:	.org __bp_harden_hyp_vecs + __BP_HARDEN_HYP_VECS_SZ
309	.org 1b
310SYM_CODE_END(__bp_harden_hyp_vecs)
311