xref: /openbmc/linux/arch/arm64/kvm/hyp/hyp-entry.S (revision 5b4cb650)
1/*
2 * Copyright (C) 2015-2018 - ARM Ltd
3 * Author: Marc Zyngier <marc.zyngier@arm.com>
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program.  If not, see <http://www.gnu.org/licenses/>.
16 */
17
18#include <linux/arm-smccc.h>
19#include <linux/linkage.h>
20
21#include <asm/alternative.h>
22#include <asm/assembler.h>
23#include <asm/cpufeature.h>
24#include <asm/kvm_arm.h>
25#include <asm/kvm_asm.h>
26#include <asm/kvm_mmu.h>
27#include <asm/mmu.h>
28
29	.text
30	.pushsection	.hyp.text, "ax"
31
32.macro do_el2_call
33	/*
34	 * Shuffle the parameters before calling the function
35	 * pointed to in x0. Assumes parameters in x[1,2,3].
36	 */
37	str	lr, [sp, #-16]!
38	mov	lr, x0
39	mov	x0, x1
40	mov	x1, x2
41	mov	x2, x3
42	blr	lr
43	ldr	lr, [sp], #16
44.endm
45
46ENTRY(__vhe_hyp_call)
47	do_el2_call
48	/*
49	 * We used to rely on having an exception return to get
50	 * an implicit isb. In the E2H case, we don't have it anymore.
51	 * rather than changing all the leaf functions, just do it here
52	 * before returning to the rest of the kernel.
53	 */
54	isb
55	ret
56ENDPROC(__vhe_hyp_call)
57
58el1_sync:				// Guest trapped into EL2
59
60	mrs	x0, esr_el2
61	lsr	x0, x0, #ESR_ELx_EC_SHIFT
62	cmp	x0, #ESR_ELx_EC_HVC64
63	ccmp	x0, #ESR_ELx_EC_HVC32, #4, ne
64	b.ne	el1_trap
65
66	mrs	x1, vttbr_el2		// If vttbr is valid, the guest
67	cbnz	x1, el1_hvc_guest	// called HVC
68
69	/* Here, we're pretty sure the host called HVC. */
70	ldp	x0, x1, [sp], #16
71
72	/* Check for a stub HVC call */
73	cmp	x0, #HVC_STUB_HCALL_NR
74	b.hs	1f
75
76	/*
77	 * Compute the idmap address of __kvm_handle_stub_hvc and
78	 * jump there. Since we use kimage_voffset, do not use the
79	 * HYP VA for __kvm_handle_stub_hvc, but the kernel VA instead
80	 * (by loading it from the constant pool).
81	 *
82	 * Preserve x0-x4, which may contain stub parameters.
83	 */
84	ldr	x5, =__kvm_handle_stub_hvc
85	ldr_l	x6, kimage_voffset
86
87	/* x5 = __pa(x5) */
88	sub	x5, x5, x6
89	br	x5
90
911:
92	/*
93	 * Perform the EL2 call
94	 */
95	kern_hyp_va	x0
96	do_el2_call
97
98	eret
99	sb
100
101el1_hvc_guest:
102	/*
103	 * Fastest possible path for ARM_SMCCC_ARCH_WORKAROUND_1.
104	 * The workaround has already been applied on the host,
105	 * so let's quickly get back to the guest. We don't bother
106	 * restoring x1, as it can be clobbered anyway.
107	 */
108	ldr	x1, [sp]				// Guest's x0
109	eor	w1, w1, #ARM_SMCCC_ARCH_WORKAROUND_1
110	cbz	w1, wa_epilogue
111
112	/* ARM_SMCCC_ARCH_WORKAROUND_2 handling */
113	eor	w1, w1, #(ARM_SMCCC_ARCH_WORKAROUND_1 ^ \
114			  ARM_SMCCC_ARCH_WORKAROUND_2)
115	cbnz	w1, el1_trap
116
117#ifdef CONFIG_ARM64_SSBD
118alternative_cb	arm64_enable_wa2_handling
119	b	wa2_end
120alternative_cb_end
121	get_vcpu_ptr	x2, x0
122	ldr	x0, [x2, #VCPU_WORKAROUND_FLAGS]
123
124	// Sanitize the argument and update the guest flags
125	ldr	x1, [sp, #8]			// Guest's x1
126	clz	w1, w1				// Murphy's device:
127	lsr	w1, w1, #5			// w1 = !!w1 without using
128	eor	w1, w1, #1			// the flags...
129	bfi	x0, x1, #VCPU_WORKAROUND_2_FLAG_SHIFT, #1
130	str	x0, [x2, #VCPU_WORKAROUND_FLAGS]
131
132	/* Check that we actually need to perform the call */
133	hyp_ldr_this_cpu x0, arm64_ssbd_callback_required, x2
134	cbz	x0, wa2_end
135
136	mov	w0, #ARM_SMCCC_ARCH_WORKAROUND_2
137	smc	#0
138
139	/* Don't leak data from the SMC call */
140	mov	x3, xzr
141wa2_end:
142	mov	x2, xzr
143	mov	x1, xzr
144#endif
145
146wa_epilogue:
147	mov	x0, xzr
148	add	sp, sp, #16
149	eret
150	sb
151
152el1_trap:
153	get_vcpu_ptr	x1, x0
154	mov	x0, #ARM_EXCEPTION_TRAP
155	b	__guest_exit
156
157el1_irq:
158	get_vcpu_ptr	x1, x0
159	mov	x0, #ARM_EXCEPTION_IRQ
160	b	__guest_exit
161
162el1_error:
163	get_vcpu_ptr	x1, x0
164	mov	x0, #ARM_EXCEPTION_EL1_SERROR
165	b	__guest_exit
166
167el2_sync:
168	/* Check for illegal exception return, otherwise panic */
169	mrs	x0, spsr_el2
170
171	/* if this was something else, then panic! */
172	tst	x0, #PSR_IL_BIT
173	b.eq	__hyp_panic
174
175	/* Let's attempt a recovery from the illegal exception return */
176	get_vcpu_ptr	x1, x0
177	mov	x0, #ARM_EXCEPTION_IL
178	b	__guest_exit
179
180
181el2_error:
182	ldp	x0, x1, [sp], #16
183
184	/*
185	 * Only two possibilities:
186	 * 1) Either we come from the exit path, having just unmasked
187	 *    PSTATE.A: change the return code to an EL2 fault, and
188	 *    carry on, as we're already in a sane state to handle it.
189	 * 2) Or we come from anywhere else, and that's a bug: we panic.
190	 *
191	 * For (1), x0 contains the original return code and x1 doesn't
192	 * contain anything meaningful at that stage. We can reuse them
193	 * as temp registers.
194	 * For (2), who cares?
195	 */
196	mrs	x0, elr_el2
197	adr	x1, abort_guest_exit_start
198	cmp	x0, x1
199	adr	x1, abort_guest_exit_end
200	ccmp	x0, x1, #4, ne
201	b.ne	__hyp_panic
202	mov	x0, #(1 << ARM_EXIT_WITH_SERROR_BIT)
203	eret
204	sb
205
206ENTRY(__hyp_do_panic)
207	mov	lr, #(PSR_F_BIT | PSR_I_BIT | PSR_A_BIT | PSR_D_BIT |\
208		      PSR_MODE_EL1h)
209	msr	spsr_el2, lr
210	ldr	lr, =panic
211	msr	elr_el2, lr
212	eret
213	sb
214ENDPROC(__hyp_do_panic)
215
216ENTRY(__hyp_panic)
217	get_host_ctxt x0, x1
218	b	hyp_panic
219ENDPROC(__hyp_panic)
220
221.macro invalid_vector	label, target = __hyp_panic
222	.align	2
223\label:
224	b \target
225ENDPROC(\label)
226.endm
227
228	/* None of these should ever happen */
229	invalid_vector	el2t_sync_invalid
230	invalid_vector	el2t_irq_invalid
231	invalid_vector	el2t_fiq_invalid
232	invalid_vector	el2t_error_invalid
233	invalid_vector	el2h_sync_invalid
234	invalid_vector	el2h_irq_invalid
235	invalid_vector	el2h_fiq_invalid
236	invalid_vector	el1_fiq_invalid
237
238	.ltorg
239
240	.align 11
241
242.macro valid_vect target
243	.align 7
244	stp	x0, x1, [sp, #-16]!
245	b	\target
246.endm
247
248.macro invalid_vect target
249	.align 7
250	b	\target
251	ldp	x0, x1, [sp], #16
252	b	\target
253.endm
254
255ENTRY(__kvm_hyp_vector)
256	invalid_vect	el2t_sync_invalid	// Synchronous EL2t
257	invalid_vect	el2t_irq_invalid	// IRQ EL2t
258	invalid_vect	el2t_fiq_invalid	// FIQ EL2t
259	invalid_vect	el2t_error_invalid	// Error EL2t
260
261	valid_vect	el2_sync		// Synchronous EL2h
262	invalid_vect	el2h_irq_invalid	// IRQ EL2h
263	invalid_vect	el2h_fiq_invalid	// FIQ EL2h
264	valid_vect	el2_error		// Error EL2h
265
266	valid_vect	el1_sync		// Synchronous 64-bit EL1
267	valid_vect	el1_irq			// IRQ 64-bit EL1
268	invalid_vect	el1_fiq_invalid		// FIQ 64-bit EL1
269	valid_vect	el1_error		// Error 64-bit EL1
270
271	valid_vect	el1_sync		// Synchronous 32-bit EL1
272	valid_vect	el1_irq			// IRQ 32-bit EL1
273	invalid_vect	el1_fiq_invalid		// FIQ 32-bit EL1
274	valid_vect	el1_error		// Error 32-bit EL1
275ENDPROC(__kvm_hyp_vector)
276
277#ifdef CONFIG_KVM_INDIRECT_VECTORS
278.macro hyp_ventry
279	.align 7
2801:	.rept 27
281	nop
282	.endr
283/*
284 * The default sequence is to directly branch to the KVM vectors,
285 * using the computed offset. This applies for VHE as well as
286 * !ARM64_HARDEN_EL2_VECTORS.
287 *
288 * For ARM64_HARDEN_EL2_VECTORS configurations, this gets replaced
289 * with:
290 *
291 * stp	x0, x1, [sp, #-16]!
292 * movz	x0, #(addr & 0xffff)
293 * movk	x0, #((addr >> 16) & 0xffff), lsl #16
294 * movk	x0, #((addr >> 32) & 0xffff), lsl #32
295 * br	x0
296 *
297 * Where addr = kern_hyp_va(__kvm_hyp_vector) + vector-offset + 4.
298 * See kvm_patch_vector_branch for details.
299 */
300alternative_cb	kvm_patch_vector_branch
301	b	__kvm_hyp_vector + (1b - 0b)
302	nop
303	nop
304	nop
305	nop
306alternative_cb_end
307.endm
308
309.macro generate_vectors
3100:
311	.rept 16
312	hyp_ventry
313	.endr
314	.org 0b + SZ_2K		// Safety measure
315.endm
316
317	.align	11
318ENTRY(__bp_harden_hyp_vecs_start)
319	.rept BP_HARDEN_EL2_SLOTS
320	generate_vectors
321	.endr
322ENTRY(__bp_harden_hyp_vecs_end)
323
324	.popsection
325
326ENTRY(__smccc_workaround_1_smc_start)
327	sub	sp, sp, #(8 * 4)
328	stp	x2, x3, [sp, #(8 * 0)]
329	stp	x0, x1, [sp, #(8 * 2)]
330	mov	w0, #ARM_SMCCC_ARCH_WORKAROUND_1
331	smc	#0
332	ldp	x2, x3, [sp, #(8 * 0)]
333	ldp	x0, x1, [sp, #(8 * 2)]
334	add	sp, sp, #(8 * 4)
335ENTRY(__smccc_workaround_1_smc_end)
336#endif
337