xref: /openbmc/linux/arch/arm64/kvm/hyp/hyp-entry.S (revision 2359ccdd)
1/*
2 * Copyright (C) 2015-2018 - ARM Ltd
3 * Author: Marc Zyngier <marc.zyngier@arm.com>
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program.  If not, see <http://www.gnu.org/licenses/>.
16 */
17
18#include <linux/arm-smccc.h>
19#include <linux/linkage.h>
20
21#include <asm/alternative.h>
22#include <asm/assembler.h>
23#include <asm/cpufeature.h>
24#include <asm/kvm_arm.h>
25#include <asm/kvm_asm.h>
26#include <asm/kvm_mmu.h>
27#include <asm/mmu.h>
28
29	.text
30	.pushsection	.hyp.text, "ax"
31
32.macro do_el2_call
33	/*
34	 * Shuffle the parameters before calling the function
35	 * pointed to in x0. Assumes parameters in x[1,2,3].
36	 */
37	str	lr, [sp, #-16]!
38	mov	lr, x0
39	mov	x0, x1
40	mov	x1, x2
41	mov	x2, x3
42	blr	lr
43	ldr	lr, [sp], #16
44.endm
45
46ENTRY(__vhe_hyp_call)
47	do_el2_call
48	/*
49	 * We used to rely on having an exception return to get
50	 * an implicit isb. In the E2H case, we don't have it anymore.
51	 * rather than changing all the leaf functions, just do it here
52	 * before returning to the rest of the kernel.
53	 */
54	isb
55	ret
56ENDPROC(__vhe_hyp_call)
57
58el1_sync:				// Guest trapped into EL2
59
60	mrs	x0, esr_el2
61	lsr	x0, x0, #ESR_ELx_EC_SHIFT
62	cmp	x0, #ESR_ELx_EC_HVC64
63	ccmp	x0, #ESR_ELx_EC_HVC32, #4, ne
64	b.ne	el1_trap
65
66	mrs	x1, vttbr_el2		// If vttbr is valid, the guest
67	cbnz	x1, el1_hvc_guest	// called HVC
68
69	/* Here, we're pretty sure the host called HVC. */
70	ldp	x0, x1, [sp], #16
71
72	/* Check for a stub HVC call */
73	cmp	x0, #HVC_STUB_HCALL_NR
74	b.hs	1f
75
76	/*
77	 * Compute the idmap address of __kvm_handle_stub_hvc and
78	 * jump there. Since we use kimage_voffset, do not use the
79	 * HYP VA for __kvm_handle_stub_hvc, but the kernel VA instead
80	 * (by loading it from the constant pool).
81	 *
82	 * Preserve x0-x4, which may contain stub parameters.
83	 */
84	ldr	x5, =__kvm_handle_stub_hvc
85	ldr_l	x6, kimage_voffset
86
87	/* x5 = __pa(x5) */
88	sub	x5, x5, x6
89	br	x5
90
911:
92	/*
93	 * Perform the EL2 call
94	 */
95	kern_hyp_va	x0
96	do_el2_call
97
98	eret
99
100el1_hvc_guest:
101	/*
102	 * Fastest possible path for ARM_SMCCC_ARCH_WORKAROUND_1.
103	 * The workaround has already been applied on the host,
104	 * so let's quickly get back to the guest. We don't bother
105	 * restoring x1, as it can be clobbered anyway.
106	 */
107	ldr	x1, [sp]				// Guest's x0
108	eor	w1, w1, #ARM_SMCCC_ARCH_WORKAROUND_1
109	cbnz	w1, el1_trap
110	mov	x0, x1
111	add	sp, sp, #16
112	eret
113
114el1_trap:
115	get_vcpu_ptr	x1, x0
116
117	mrs		x0, esr_el2
118	lsr		x0, x0, #ESR_ELx_EC_SHIFT
119	/*
120	 * x0: ESR_EC
121	 * x1: vcpu pointer
122	 */
123
124	/*
125	 * We trap the first access to the FP/SIMD to save the host context
126	 * and restore the guest context lazily.
127	 * If FP/SIMD is not implemented, handle the trap and inject an
128	 * undefined instruction exception to the guest.
129	 */
130alternative_if_not ARM64_HAS_NO_FPSIMD
131	cmp	x0, #ESR_ELx_EC_FP_ASIMD
132	b.eq	__fpsimd_guest_restore
133alternative_else_nop_endif
134
135	mov	x0, #ARM_EXCEPTION_TRAP
136	b	__guest_exit
137
138el1_irq:
139	get_vcpu_ptr	x1, x0
140	mov	x0, #ARM_EXCEPTION_IRQ
141	b	__guest_exit
142
143el1_error:
144	get_vcpu_ptr	x1, x0
145	mov	x0, #ARM_EXCEPTION_EL1_SERROR
146	b	__guest_exit
147
148el2_error:
149	ldp	x0, x1, [sp], #16
150
151	/*
152	 * Only two possibilities:
153	 * 1) Either we come from the exit path, having just unmasked
154	 *    PSTATE.A: change the return code to an EL2 fault, and
155	 *    carry on, as we're already in a sane state to handle it.
156	 * 2) Or we come from anywhere else, and that's a bug: we panic.
157	 *
158	 * For (1), x0 contains the original return code and x1 doesn't
159	 * contain anything meaningful at that stage. We can reuse them
160	 * as temp registers.
161	 * For (2), who cares?
162	 */
163	mrs	x0, elr_el2
164	adr	x1, abort_guest_exit_start
165	cmp	x0, x1
166	adr	x1, abort_guest_exit_end
167	ccmp	x0, x1, #4, ne
168	b.ne	__hyp_panic
169	mov	x0, #(1 << ARM_EXIT_WITH_SERROR_BIT)
170	eret
171
172ENTRY(__hyp_do_panic)
173	mov	lr, #(PSR_F_BIT | PSR_I_BIT | PSR_A_BIT | PSR_D_BIT |\
174		      PSR_MODE_EL1h)
175	msr	spsr_el2, lr
176	ldr	lr, =panic
177	msr	elr_el2, lr
178	eret
179ENDPROC(__hyp_do_panic)
180
181ENTRY(__hyp_panic)
182	get_host_ctxt x0, x1
183	b	hyp_panic
184ENDPROC(__hyp_panic)
185
186.macro invalid_vector	label, target = __hyp_panic
187	.align	2
188\label:
189	b \target
190ENDPROC(\label)
191.endm
192
193	/* None of these should ever happen */
194	invalid_vector	el2t_sync_invalid
195	invalid_vector	el2t_irq_invalid
196	invalid_vector	el2t_fiq_invalid
197	invalid_vector	el2t_error_invalid
198	invalid_vector	el2h_sync_invalid
199	invalid_vector	el2h_irq_invalid
200	invalid_vector	el2h_fiq_invalid
201	invalid_vector	el1_fiq_invalid
202
203	.ltorg
204
205	.align 11
206
207.macro valid_vect target
208	.align 7
209	stp	x0, x1, [sp, #-16]!
210	b	\target
211.endm
212
213.macro invalid_vect target
214	.align 7
215	b	\target
216	ldp	x0, x1, [sp], #16
217	b	\target
218.endm
219
220ENTRY(__kvm_hyp_vector)
221	invalid_vect	el2t_sync_invalid	// Synchronous EL2t
222	invalid_vect	el2t_irq_invalid	// IRQ EL2t
223	invalid_vect	el2t_fiq_invalid	// FIQ EL2t
224	invalid_vect	el2t_error_invalid	// Error EL2t
225
226	invalid_vect	el2h_sync_invalid	// Synchronous EL2h
227	invalid_vect	el2h_irq_invalid	// IRQ EL2h
228	invalid_vect	el2h_fiq_invalid	// FIQ EL2h
229	valid_vect	el2_error		// Error EL2h
230
231	valid_vect	el1_sync		// Synchronous 64-bit EL1
232	valid_vect	el1_irq			// IRQ 64-bit EL1
233	invalid_vect	el1_fiq_invalid		// FIQ 64-bit EL1
234	valid_vect	el1_error		// Error 64-bit EL1
235
236	valid_vect	el1_sync		// Synchronous 32-bit EL1
237	valid_vect	el1_irq			// IRQ 32-bit EL1
238	invalid_vect	el1_fiq_invalid		// FIQ 32-bit EL1
239	valid_vect	el1_error		// Error 32-bit EL1
240ENDPROC(__kvm_hyp_vector)
241
242#ifdef CONFIG_KVM_INDIRECT_VECTORS
243.macro hyp_ventry
244	.align 7
2451:	.rept 27
246	nop
247	.endr
248/*
249 * The default sequence is to directly branch to the KVM vectors,
250 * using the computed offset. This applies for VHE as well as
251 * !ARM64_HARDEN_EL2_VECTORS.
252 *
253 * For ARM64_HARDEN_EL2_VECTORS configurations, this gets replaced
254 * with:
255 *
256 * stp	x0, x1, [sp, #-16]!
257 * movz	x0, #(addr & 0xffff)
258 * movk	x0, #((addr >> 16) & 0xffff), lsl #16
259 * movk	x0, #((addr >> 32) & 0xffff), lsl #32
260 * br	x0
261 *
262 * Where addr = kern_hyp_va(__kvm_hyp_vector) + vector-offset + 4.
263 * See kvm_patch_vector_branch for details.
264 */
265alternative_cb	kvm_patch_vector_branch
266	b	__kvm_hyp_vector + (1b - 0b)
267	nop
268	nop
269	nop
270	nop
271alternative_cb_end
272.endm
273
274.macro generate_vectors
2750:
276	.rept 16
277	hyp_ventry
278	.endr
279	.org 0b + SZ_2K		// Safety measure
280.endm
281
282	.align	11
283ENTRY(__bp_harden_hyp_vecs_start)
284	.rept BP_HARDEN_EL2_SLOTS
285	generate_vectors
286	.endr
287ENTRY(__bp_harden_hyp_vecs_end)
288
289	.popsection
290
291ENTRY(__smccc_workaround_1_smc_start)
292	sub	sp, sp, #(8 * 4)
293	stp	x2, x3, [sp, #(8 * 0)]
294	stp	x0, x1, [sp, #(8 * 2)]
295	mov	w0, #ARM_SMCCC_ARCH_WORKAROUND_1
296	smc	#0
297	ldp	x2, x3, [sp, #(8 * 0)]
298	ldp	x0, x1, [sp, #(8 * 2)]
299	add	sp, sp, #(8 * 4)
300ENTRY(__smccc_workaround_1_smc_end)
301#endif
302