xref: /openbmc/linux/arch/arm64/kvm/hyp/hyp-entry.S (revision 89e33ea7)
1/*
2 * Copyright (C) 2015-2018 - ARM Ltd
3 * Author: Marc Zyngier <marc.zyngier@arm.com>
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program.  If not, see <http://www.gnu.org/licenses/>.
16 */
17
18#include <linux/arm-smccc.h>
19#include <linux/linkage.h>
20
21#include <asm/alternative.h>
22#include <asm/assembler.h>
23#include <asm/cpufeature.h>
24#include <asm/kvm_arm.h>
25#include <asm/kvm_asm.h>
26#include <asm/kvm_mmu.h>
27#include <asm/mmu.h>
28
29	.text
30	.pushsection	.hyp.text, "ax"
31
32.macro do_el2_call
33	/*
34	 * Shuffle the parameters before calling the function
35	 * pointed to in x0. Assumes parameters in x[1,2,3].
36	 */
37	str	lr, [sp, #-16]!
38	mov	lr, x0
39	mov	x0, x1
40	mov	x1, x2
41	mov	x2, x3
42	blr	lr
43	ldr	lr, [sp], #16
44.endm
45
46el1_sync:				// Guest trapped into EL2
47
48	mrs	x0, esr_el2
49	lsr	x0, x0, #ESR_ELx_EC_SHIFT
50	cmp	x0, #ESR_ELx_EC_HVC64
51	ccmp	x0, #ESR_ELx_EC_HVC32, #4, ne
52	b.ne	el1_trap
53
54	mrs	x1, vttbr_el2		// If vttbr is valid, the guest
55	cbnz	x1, el1_hvc_guest	// called HVC
56
57	/* Here, we're pretty sure the host called HVC. */
58	ldp	x0, x1, [sp], #16
59
60	/* Check for a stub HVC call */
61	cmp	x0, #HVC_STUB_HCALL_NR
62	b.hs	1f
63
64	/*
65	 * Compute the idmap address of __kvm_handle_stub_hvc and
66	 * jump there. Since we use kimage_voffset, do not use the
67	 * HYP VA for __kvm_handle_stub_hvc, but the kernel VA instead
68	 * (by loading it from the constant pool).
69	 *
70	 * Preserve x0-x4, which may contain stub parameters.
71	 */
72	ldr	x5, =__kvm_handle_stub_hvc
73	ldr_l	x6, kimage_voffset
74
75	/* x5 = __pa(x5) */
76	sub	x5, x5, x6
77	br	x5
78
791:
80	/*
81	 * Perform the EL2 call
82	 */
83	kern_hyp_va	x0
84	do_el2_call
85
86	eret
87	sb
88
89el1_hvc_guest:
90	/*
91	 * Fastest possible path for ARM_SMCCC_ARCH_WORKAROUND_1.
92	 * The workaround has already been applied on the host,
93	 * so let's quickly get back to the guest. We don't bother
94	 * restoring x1, as it can be clobbered anyway.
95	 */
96	ldr	x1, [sp]				// Guest's x0
97	eor	w1, w1, #ARM_SMCCC_ARCH_WORKAROUND_1
98	cbz	w1, wa_epilogue
99
100	/* ARM_SMCCC_ARCH_WORKAROUND_2 handling */
101	eor	w1, w1, #(ARM_SMCCC_ARCH_WORKAROUND_1 ^ \
102			  ARM_SMCCC_ARCH_WORKAROUND_2)
103	cbnz	w1, el1_trap
104
105#ifdef CONFIG_ARM64_SSBD
106alternative_cb	arm64_enable_wa2_handling
107	b	wa2_end
108alternative_cb_end
109	get_vcpu_ptr	x2, x0
110	ldr	x0, [x2, #VCPU_WORKAROUND_FLAGS]
111
112	// Sanitize the argument and update the guest flags
113	ldr	x1, [sp, #8]			// Guest's x1
114	clz	w1, w1				// Murphy's device:
115	lsr	w1, w1, #5			// w1 = !!w1 without using
116	eor	w1, w1, #1			// the flags...
117	bfi	x0, x1, #VCPU_WORKAROUND_2_FLAG_SHIFT, #1
118	str	x0, [x2, #VCPU_WORKAROUND_FLAGS]
119
120	/* Check that we actually need to perform the call */
121	hyp_ldr_this_cpu x0, arm64_ssbd_callback_required, x2
122	cbz	x0, wa2_end
123
124	mov	w0, #ARM_SMCCC_ARCH_WORKAROUND_2
125	smc	#0
126
127	/* Don't leak data from the SMC call */
128	mov	x3, xzr
129wa2_end:
130	mov	x2, xzr
131	mov	x1, xzr
132#endif
133
134wa_epilogue:
135	mov	x0, xzr
136	add	sp, sp, #16
137	eret
138	sb
139
140el1_trap:
141	get_vcpu_ptr	x1, x0
142	mov	x0, #ARM_EXCEPTION_TRAP
143	b	__guest_exit
144
145el1_irq:
146	get_vcpu_ptr	x1, x0
147	mov	x0, #ARM_EXCEPTION_IRQ
148	b	__guest_exit
149
150el1_error:
151	get_vcpu_ptr	x1, x0
152	mov	x0, #ARM_EXCEPTION_EL1_SERROR
153	b	__guest_exit
154
155el2_sync:
156	/* Check for illegal exception return, otherwise panic */
157	mrs	x0, spsr_el2
158
159	/* if this was something else, then panic! */
160	tst	x0, #PSR_IL_BIT
161	b.eq	__hyp_panic
162
163	/* Let's attempt a recovery from the illegal exception return */
164	get_vcpu_ptr	x1, x0
165	mov	x0, #ARM_EXCEPTION_IL
166	b	__guest_exit
167
168
169el2_error:
170	ldp	x0, x1, [sp], #16
171
172	/*
173	 * Only two possibilities:
174	 * 1) Either we come from the exit path, having just unmasked
175	 *    PSTATE.A: change the return code to an EL2 fault, and
176	 *    carry on, as we're already in a sane state to handle it.
177	 * 2) Or we come from anywhere else, and that's a bug: we panic.
178	 *
179	 * For (1), x0 contains the original return code and x1 doesn't
180	 * contain anything meaningful at that stage. We can reuse them
181	 * as temp registers.
182	 * For (2), who cares?
183	 */
184	mrs	x0, elr_el2
185	adr	x1, abort_guest_exit_start
186	cmp	x0, x1
187	adr	x1, abort_guest_exit_end
188	ccmp	x0, x1, #4, ne
189	b.ne	__hyp_panic
190	mov	x0, #(1 << ARM_EXIT_WITH_SERROR_BIT)
191	eret
192	sb
193
194ENTRY(__hyp_do_panic)
195	mov	lr, #(PSR_F_BIT | PSR_I_BIT | PSR_A_BIT | PSR_D_BIT |\
196		      PSR_MODE_EL1h)
197	msr	spsr_el2, lr
198	ldr	lr, =panic
199	msr	elr_el2, lr
200	eret
201	sb
202ENDPROC(__hyp_do_panic)
203
204ENTRY(__hyp_panic)
205	get_host_ctxt x0, x1
206	b	hyp_panic
207ENDPROC(__hyp_panic)
208
209.macro invalid_vector	label, target = __hyp_panic
210	.align	2
211\label:
212	b \target
213ENDPROC(\label)
214.endm
215
216	/* None of these should ever happen */
217	invalid_vector	el2t_sync_invalid
218	invalid_vector	el2t_irq_invalid
219	invalid_vector	el2t_fiq_invalid
220	invalid_vector	el2t_error_invalid
221	invalid_vector	el2h_sync_invalid
222	invalid_vector	el2h_irq_invalid
223	invalid_vector	el2h_fiq_invalid
224	invalid_vector	el1_fiq_invalid
225
226	.ltorg
227
228	.align 11
229
230.macro valid_vect target
231	.align 7
232	stp	x0, x1, [sp, #-16]!
233	b	\target
234.endm
235
236.macro invalid_vect target
237	.align 7
238	b	\target
239	ldp	x0, x1, [sp], #16
240	b	\target
241.endm
242
243ENTRY(__kvm_hyp_vector)
244	invalid_vect	el2t_sync_invalid	// Synchronous EL2t
245	invalid_vect	el2t_irq_invalid	// IRQ EL2t
246	invalid_vect	el2t_fiq_invalid	// FIQ EL2t
247	invalid_vect	el2t_error_invalid	// Error EL2t
248
249	valid_vect	el2_sync		// Synchronous EL2h
250	invalid_vect	el2h_irq_invalid	// IRQ EL2h
251	invalid_vect	el2h_fiq_invalid	// FIQ EL2h
252	valid_vect	el2_error		// Error EL2h
253
254	valid_vect	el1_sync		// Synchronous 64-bit EL1
255	valid_vect	el1_irq			// IRQ 64-bit EL1
256	invalid_vect	el1_fiq_invalid		// FIQ 64-bit EL1
257	valid_vect	el1_error		// Error 64-bit EL1
258
259	valid_vect	el1_sync		// Synchronous 32-bit EL1
260	valid_vect	el1_irq			// IRQ 32-bit EL1
261	invalid_vect	el1_fiq_invalid		// FIQ 32-bit EL1
262	valid_vect	el1_error		// Error 32-bit EL1
263ENDPROC(__kvm_hyp_vector)
264
265#ifdef CONFIG_KVM_INDIRECT_VECTORS
266.macro hyp_ventry
267	.align 7
2681:	.rept 27
269	nop
270	.endr
271/*
272 * The default sequence is to directly branch to the KVM vectors,
273 * using the computed offset. This applies for VHE as well as
274 * !ARM64_HARDEN_EL2_VECTORS.
275 *
276 * For ARM64_HARDEN_EL2_VECTORS configurations, this gets replaced
277 * with:
278 *
279 * stp	x0, x1, [sp, #-16]!
280 * movz	x0, #(addr & 0xffff)
281 * movk	x0, #((addr >> 16) & 0xffff), lsl #16
282 * movk	x0, #((addr >> 32) & 0xffff), lsl #32
283 * br	x0
284 *
285 * Where addr = kern_hyp_va(__kvm_hyp_vector) + vector-offset + 4.
286 * See kvm_patch_vector_branch for details.
287 */
288alternative_cb	kvm_patch_vector_branch
289	b	__kvm_hyp_vector + (1b - 0b)
290	nop
291	nop
292	nop
293	nop
294alternative_cb_end
295.endm
296
297.macro generate_vectors
2980:
299	.rept 16
300	hyp_ventry
301	.endr
302	.org 0b + SZ_2K		// Safety measure
303.endm
304
305	.align	11
306ENTRY(__bp_harden_hyp_vecs_start)
307	.rept BP_HARDEN_EL2_SLOTS
308	generate_vectors
309	.endr
310ENTRY(__bp_harden_hyp_vecs_end)
311
312	.popsection
313
314ENTRY(__smccc_workaround_1_smc_start)
315	sub	sp, sp, #(8 * 4)
316	stp	x2, x3, [sp, #(8 * 0)]
317	stp	x0, x1, [sp, #(8 * 2)]
318	mov	w0, #ARM_SMCCC_ARCH_WORKAROUND_1
319	smc	#0
320	ldp	x2, x3, [sp, #(8 * 0)]
321	ldp	x0, x1, [sp, #(8 * 2)]
322	add	sp, sp, #(8 * 4)
323ENTRY(__smccc_workaround_1_smc_end)
324#endif
325