xref: /openbmc/linux/arch/arm64/kvm/hyp/hyp-entry.S (revision 9a29ad52)
1/*
2 * Copyright (C) 2015-2018 - ARM Ltd
3 * Author: Marc Zyngier <marc.zyngier@arm.com>
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program.  If not, see <http://www.gnu.org/licenses/>.
16 */
17
18#include <linux/arm-smccc.h>
19#include <linux/linkage.h>
20
21#include <asm/alternative.h>
22#include <asm/assembler.h>
23#include <asm/cpufeature.h>
24#include <asm/kvm_arm.h>
25#include <asm/kvm_asm.h>
26#include <asm/kvm_mmu.h>
27#include <asm/mmu.h>
28
29	.text
30	.pushsection	.hyp.text, "ax"
31
32.macro do_el2_call
33	/*
34	 * Shuffle the parameters before calling the function
35	 * pointed to in x0. Assumes parameters in x[1,2,3].
36	 */
37	str	lr, [sp, #-16]!
38	mov	lr, x0
39	mov	x0, x1
40	mov	x1, x2
41	mov	x2, x3
42	blr	lr
43	ldr	lr, [sp], #16
44.endm
45
46ENTRY(__vhe_hyp_call)
47	do_el2_call
48	/*
49	 * We used to rely on having an exception return to get
50	 * an implicit isb. In the E2H case, we don't have it anymore.
51	 * rather than changing all the leaf functions, just do it here
52	 * before returning to the rest of the kernel.
53	 */
54	isb
55	ret
56ENDPROC(__vhe_hyp_call)
57
58el1_sync:				// Guest trapped into EL2
59
60	mrs	x0, esr_el2
61	lsr	x0, x0, #ESR_ELx_EC_SHIFT
62	cmp	x0, #ESR_ELx_EC_HVC64
63	ccmp	x0, #ESR_ELx_EC_HVC32, #4, ne
64	b.ne	el1_trap
65
66	mrs	x1, vttbr_el2		// If vttbr is valid, the guest
67	cbnz	x1, el1_hvc_guest	// called HVC
68
69	/* Here, we're pretty sure the host called HVC. */
70	ldp	x0, x1, [sp], #16
71
72	/* Check for a stub HVC call */
73	cmp	x0, #HVC_STUB_HCALL_NR
74	b.hs	1f
75
76	/*
77	 * Compute the idmap address of __kvm_handle_stub_hvc and
78	 * jump there. Since we use kimage_voffset, do not use the
79	 * HYP VA for __kvm_handle_stub_hvc, but the kernel VA instead
80	 * (by loading it from the constant pool).
81	 *
82	 * Preserve x0-x4, which may contain stub parameters.
83	 */
84	ldr	x5, =__kvm_handle_stub_hvc
85	ldr_l	x6, kimage_voffset
86
87	/* x5 = __pa(x5) */
88	sub	x5, x5, x6
89	br	x5
90
911:
92	/*
93	 * Perform the EL2 call
94	 */
95	kern_hyp_va	x0
96	do_el2_call
97
98	eret
99
100el1_hvc_guest:
101	/*
102	 * Fastest possible path for ARM_SMCCC_ARCH_WORKAROUND_1.
103	 * The workaround has already been applied on the host,
104	 * so let's quickly get back to the guest. We don't bother
105	 * restoring x1, as it can be clobbered anyway.
106	 */
107	ldr	x1, [sp]				// Guest's x0
108	eor	w1, w1, #ARM_SMCCC_ARCH_WORKAROUND_1
109	cbz	w1, wa_epilogue
110
111	/* ARM_SMCCC_ARCH_WORKAROUND_2 handling */
112	eor	w1, w1, #(ARM_SMCCC_ARCH_WORKAROUND_1 ^ \
113			  ARM_SMCCC_ARCH_WORKAROUND_2)
114	cbnz	w1, el1_trap
115
116#ifdef CONFIG_ARM64_SSBD
117alternative_cb	arm64_enable_wa2_handling
118	b	wa2_end
119alternative_cb_end
120	get_vcpu_ptr	x2, x0
121	ldr	x0, [x2, #VCPU_WORKAROUND_FLAGS]
122
123	// Sanitize the argument and update the guest flags
124	ldr	x1, [sp, #8]			// Guest's x1
125	clz	w1, w1				// Murphy's device:
126	lsr	w1, w1, #5			// w1 = !!w1 without using
127	eor	w1, w1, #1			// the flags...
128	bfi	x0, x1, #VCPU_WORKAROUND_2_FLAG_SHIFT, #1
129	str	x0, [x2, #VCPU_WORKAROUND_FLAGS]
130
131	/* Check that we actually need to perform the call */
132	hyp_ldr_this_cpu x0, arm64_ssbd_callback_required, x2
133	cbz	x0, wa2_end
134
135	mov	w0, #ARM_SMCCC_ARCH_WORKAROUND_2
136	smc	#0
137
138	/* Don't leak data from the SMC call */
139	mov	x3, xzr
140wa2_end:
141	mov	x2, xzr
142	mov	x1, xzr
143#endif
144
145wa_epilogue:
146	mov	x0, xzr
147	add	sp, sp, #16
148	eret
149
150el1_trap:
151	get_vcpu_ptr	x1, x0
152	mov	x0, #ARM_EXCEPTION_TRAP
153	b	__guest_exit
154
155el1_irq:
156	get_vcpu_ptr	x1, x0
157	mov	x0, #ARM_EXCEPTION_IRQ
158	b	__guest_exit
159
160el1_error:
161	get_vcpu_ptr	x1, x0
162	mov	x0, #ARM_EXCEPTION_EL1_SERROR
163	b	__guest_exit
164
165el2_error:
166	ldp	x0, x1, [sp], #16
167
168	/*
169	 * Only two possibilities:
170	 * 1) Either we come from the exit path, having just unmasked
171	 *    PSTATE.A: change the return code to an EL2 fault, and
172	 *    carry on, as we're already in a sane state to handle it.
173	 * 2) Or we come from anywhere else, and that's a bug: we panic.
174	 *
175	 * For (1), x0 contains the original return code and x1 doesn't
176	 * contain anything meaningful at that stage. We can reuse them
177	 * as temp registers.
178	 * For (2), who cares?
179	 */
180	mrs	x0, elr_el2
181	adr	x1, abort_guest_exit_start
182	cmp	x0, x1
183	adr	x1, abort_guest_exit_end
184	ccmp	x0, x1, #4, ne
185	b.ne	__hyp_panic
186	mov	x0, #(1 << ARM_EXIT_WITH_SERROR_BIT)
187	eret
188
189ENTRY(__hyp_do_panic)
190	mov	lr, #(PSR_F_BIT | PSR_I_BIT | PSR_A_BIT | PSR_D_BIT |\
191		      PSR_MODE_EL1h)
192	msr	spsr_el2, lr
193	ldr	lr, =panic
194	msr	elr_el2, lr
195	eret
196ENDPROC(__hyp_do_panic)
197
198ENTRY(__hyp_panic)
199	get_host_ctxt x0, x1
200	b	hyp_panic
201ENDPROC(__hyp_panic)
202
203.macro invalid_vector	label, target = __hyp_panic
204	.align	2
205\label:
206	b \target
207ENDPROC(\label)
208.endm
209
210	/* None of these should ever happen */
211	invalid_vector	el2t_sync_invalid
212	invalid_vector	el2t_irq_invalid
213	invalid_vector	el2t_fiq_invalid
214	invalid_vector	el2t_error_invalid
215	invalid_vector	el2h_sync_invalid
216	invalid_vector	el2h_irq_invalid
217	invalid_vector	el2h_fiq_invalid
218	invalid_vector	el1_fiq_invalid
219
220	.ltorg
221
222	.align 11
223
224.macro valid_vect target
225	.align 7
226	stp	x0, x1, [sp, #-16]!
227	b	\target
228.endm
229
230.macro invalid_vect target
231	.align 7
232	b	\target
233	ldp	x0, x1, [sp], #16
234	b	\target
235.endm
236
237ENTRY(__kvm_hyp_vector)
238	invalid_vect	el2t_sync_invalid	// Synchronous EL2t
239	invalid_vect	el2t_irq_invalid	// IRQ EL2t
240	invalid_vect	el2t_fiq_invalid	// FIQ EL2t
241	invalid_vect	el2t_error_invalid	// Error EL2t
242
243	invalid_vect	el2h_sync_invalid	// Synchronous EL2h
244	invalid_vect	el2h_irq_invalid	// IRQ EL2h
245	invalid_vect	el2h_fiq_invalid	// FIQ EL2h
246	valid_vect	el2_error		// Error EL2h
247
248	valid_vect	el1_sync		// Synchronous 64-bit EL1
249	valid_vect	el1_irq			// IRQ 64-bit EL1
250	invalid_vect	el1_fiq_invalid		// FIQ 64-bit EL1
251	valid_vect	el1_error		// Error 64-bit EL1
252
253	valid_vect	el1_sync		// Synchronous 32-bit EL1
254	valid_vect	el1_irq			// IRQ 32-bit EL1
255	invalid_vect	el1_fiq_invalid		// FIQ 32-bit EL1
256	valid_vect	el1_error		// Error 32-bit EL1
257ENDPROC(__kvm_hyp_vector)
258
259#ifdef CONFIG_KVM_INDIRECT_VECTORS
260.macro hyp_ventry
261	.align 7
2621:	.rept 27
263	nop
264	.endr
265/*
266 * The default sequence is to directly branch to the KVM vectors,
267 * using the computed offset. This applies for VHE as well as
268 * !ARM64_HARDEN_EL2_VECTORS.
269 *
270 * For ARM64_HARDEN_EL2_VECTORS configurations, this gets replaced
271 * with:
272 *
273 * stp	x0, x1, [sp, #-16]!
274 * movz	x0, #(addr & 0xffff)
275 * movk	x0, #((addr >> 16) & 0xffff), lsl #16
276 * movk	x0, #((addr >> 32) & 0xffff), lsl #32
277 * br	x0
278 *
279 * Where addr = kern_hyp_va(__kvm_hyp_vector) + vector-offset + 4.
280 * See kvm_patch_vector_branch for details.
281 */
282alternative_cb	kvm_patch_vector_branch
283	b	__kvm_hyp_vector + (1b - 0b)
284	nop
285	nop
286	nop
287	nop
288alternative_cb_end
289.endm
290
291.macro generate_vectors
2920:
293	.rept 16
294	hyp_ventry
295	.endr
296	.org 0b + SZ_2K		// Safety measure
297.endm
298
299	.align	11
300ENTRY(__bp_harden_hyp_vecs_start)
301	.rept BP_HARDEN_EL2_SLOTS
302	generate_vectors
303	.endr
304ENTRY(__bp_harden_hyp_vecs_end)
305
306	.popsection
307
308ENTRY(__smccc_workaround_1_smc_start)
309	sub	sp, sp, #(8 * 4)
310	stp	x2, x3, [sp, #(8 * 0)]
311	stp	x0, x1, [sp, #(8 * 2)]
312	mov	w0, #ARM_SMCCC_ARCH_WORKAROUND_1
313	smc	#0
314	ldp	x2, x3, [sp, #(8 * 0)]
315	ldp	x0, x1, [sp, #(8 * 2)]
316	add	sp, sp, #(8 * 4)
317ENTRY(__smccc_workaround_1_smc_end)
318#endif
319