xref: /openbmc/linux/arch/arm64/kvm/hyp/hyp-entry.S (revision feac8c8b)
1/*
2 * Copyright (C) 2015 - ARM Ltd
3 * Author: Marc Zyngier <marc.zyngier@arm.com>
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program.  If not, see <http://www.gnu.org/licenses/>.
16 */
17
18#include <linux/arm-smccc.h>
19#include <linux/linkage.h>
20
21#include <asm/alternative.h>
22#include <asm/assembler.h>
23#include <asm/cpufeature.h>
24#include <asm/kvm_arm.h>
25#include <asm/kvm_asm.h>
26#include <asm/kvm_mmu.h>
27
28	.text
29	.pushsection	.hyp.text, "ax"
30
31.macro do_el2_call
32	/*
33	 * Shuffle the parameters before calling the function
34	 * pointed to in x0. Assumes parameters in x[1,2,3].
35	 */
36	str	lr, [sp, #-16]!
37	mov	lr, x0
38	mov	x0, x1
39	mov	x1, x2
40	mov	x2, x3
41	blr	lr
42	ldr	lr, [sp], #16
43.endm
44
45ENTRY(__vhe_hyp_call)
46	do_el2_call
47	/*
48	 * We used to rely on having an exception return to get
49	 * an implicit isb. In the E2H case, we don't have it anymore.
50	 * rather than changing all the leaf functions, just do it here
51	 * before returning to the rest of the kernel.
52	 */
53	isb
54	ret
55ENDPROC(__vhe_hyp_call)
56
57el1_sync:				// Guest trapped into EL2
58	stp	x0, x1, [sp, #-16]!
59
60alternative_if_not ARM64_HAS_VIRT_HOST_EXTN
61	mrs	x1, esr_el2
62alternative_else
63	mrs	x1, esr_el1
64alternative_endif
65	lsr	x0, x1, #ESR_ELx_EC_SHIFT
66
67	cmp	x0, #ESR_ELx_EC_HVC64
68	ccmp	x0, #ESR_ELx_EC_HVC32, #4, ne
69	b.ne	el1_trap
70
71	mrs	x1, vttbr_el2		// If vttbr is valid, the guest
72	cbnz	x1, el1_hvc_guest	// called HVC
73
74	/* Here, we're pretty sure the host called HVC. */
75	ldp	x0, x1, [sp], #16
76
77	/* Check for a stub HVC call */
78	cmp	x0, #HVC_STUB_HCALL_NR
79	b.hs	1f
80
81	/*
82	 * Compute the idmap address of __kvm_handle_stub_hvc and
83	 * jump there. Since we use kimage_voffset, do not use the
84	 * HYP VA for __kvm_handle_stub_hvc, but the kernel VA instead
85	 * (by loading it from the constant pool).
86	 *
87	 * Preserve x0-x4, which may contain stub parameters.
88	 */
89	ldr	x5, =__kvm_handle_stub_hvc
90	ldr_l	x6, kimage_voffset
91
92	/* x5 = __pa(x5) */
93	sub	x5, x5, x6
94	br	x5
95
961:
97	/*
98	 * Perform the EL2 call
99	 */
100	kern_hyp_va	x0
101	do_el2_call
102
103	eret
104
105el1_hvc_guest:
106	/*
107	 * Fastest possible path for ARM_SMCCC_ARCH_WORKAROUND_1.
108	 * The workaround has already been applied on the host,
109	 * so let's quickly get back to the guest. We don't bother
110	 * restoring x1, as it can be clobbered anyway.
111	 */
112	ldr	x1, [sp]				// Guest's x0
113	eor	w1, w1, #ARM_SMCCC_ARCH_WORKAROUND_1
114	cbnz	w1, el1_trap
115	mov	x0, x1
116	add	sp, sp, #16
117	eret
118
119el1_trap:
120	/*
121	 * x0: ESR_EC
122	 */
123	ldr	x1, [sp, #16 + 8]	// vcpu stored by __guest_enter
124
125	/*
126	 * We trap the first access to the FP/SIMD to save the host context
127	 * and restore the guest context lazily.
128	 * If FP/SIMD is not implemented, handle the trap and inject an
129	 * undefined instruction exception to the guest.
130	 */
131alternative_if_not ARM64_HAS_NO_FPSIMD
132	cmp	x0, #ESR_ELx_EC_FP_ASIMD
133	b.eq	__fpsimd_guest_restore
134alternative_else_nop_endif
135
136	mov	x0, #ARM_EXCEPTION_TRAP
137	b	__guest_exit
138
139el1_irq:
140	stp     x0, x1, [sp, #-16]!
141	ldr	x1, [sp, #16 + 8]
142	mov	x0, #ARM_EXCEPTION_IRQ
143	b	__guest_exit
144
145el1_error:
146	stp     x0, x1, [sp, #-16]!
147	ldr	x1, [sp, #16 + 8]
148	mov	x0, #ARM_EXCEPTION_EL1_SERROR
149	b	__guest_exit
150
151el2_error:
152	/*
153	 * Only two possibilities:
154	 * 1) Either we come from the exit path, having just unmasked
155	 *    PSTATE.A: change the return code to an EL2 fault, and
156	 *    carry on, as we're already in a sane state to handle it.
157	 * 2) Or we come from anywhere else, and that's a bug: we panic.
158	 *
159	 * For (1), x0 contains the original return code and x1 doesn't
160	 * contain anything meaningful at that stage. We can reuse them
161	 * as temp registers.
162	 * For (2), who cares?
163	 */
164	mrs	x0, elr_el2
165	adr	x1, abort_guest_exit_start
166	cmp	x0, x1
167	adr	x1, abort_guest_exit_end
168	ccmp	x0, x1, #4, ne
169	b.ne	__hyp_panic
170	mov	x0, #(1 << ARM_EXIT_WITH_SERROR_BIT)
171	eret
172
173ENTRY(__hyp_do_panic)
174	mov	lr, #(PSR_F_BIT | PSR_I_BIT | PSR_A_BIT | PSR_D_BIT |\
175		      PSR_MODE_EL1h)
176	msr	spsr_el2, lr
177	ldr	lr, =panic
178	msr	elr_el2, lr
179	eret
180ENDPROC(__hyp_do_panic)
181
182ENTRY(__hyp_panic)
183	/*
184	 * '=kvm_host_cpu_state' is a host VA from the constant pool, it may
185	 * not be accessible by this address from EL2, hyp_panic() converts
186	 * it with kern_hyp_va() before use.
187	 */
188	ldr	x0, =kvm_host_cpu_state
189	mrs	x1, tpidr_el2
190	add	x0, x0, x1
191	b	hyp_panic
192ENDPROC(__hyp_panic)
193
194.macro invalid_vector	label, target = __hyp_panic
195	.align	2
196\label:
197	b \target
198ENDPROC(\label)
199.endm
200
201	/* None of these should ever happen */
202	invalid_vector	el2t_sync_invalid
203	invalid_vector	el2t_irq_invalid
204	invalid_vector	el2t_fiq_invalid
205	invalid_vector	el2t_error_invalid
206	invalid_vector	el2h_sync_invalid
207	invalid_vector	el2h_irq_invalid
208	invalid_vector	el2h_fiq_invalid
209	invalid_vector	el1_sync_invalid
210	invalid_vector	el1_irq_invalid
211	invalid_vector	el1_fiq_invalid
212
213	.ltorg
214
215	.align 11
216
217ENTRY(__kvm_hyp_vector)
218	ventry	el2t_sync_invalid		// Synchronous EL2t
219	ventry	el2t_irq_invalid		// IRQ EL2t
220	ventry	el2t_fiq_invalid		// FIQ EL2t
221	ventry	el2t_error_invalid		// Error EL2t
222
223	ventry	el2h_sync_invalid		// Synchronous EL2h
224	ventry	el2h_irq_invalid		// IRQ EL2h
225	ventry	el2h_fiq_invalid		// FIQ EL2h
226	ventry	el2_error			// Error EL2h
227
228	ventry	el1_sync			// Synchronous 64-bit EL1
229	ventry	el1_irq				// IRQ 64-bit EL1
230	ventry	el1_fiq_invalid			// FIQ 64-bit EL1
231	ventry	el1_error			// Error 64-bit EL1
232
233	ventry	el1_sync			// Synchronous 32-bit EL1
234	ventry	el1_irq				// IRQ 32-bit EL1
235	ventry	el1_fiq_invalid			// FIQ 32-bit EL1
236	ventry	el1_error			// Error 32-bit EL1
237ENDPROC(__kvm_hyp_vector)
238