xref: /openbmc/linux/arch/arm64/kvm/hyp/hyp-entry.S (revision bbecb07f)
1/*
2 * Copyright (C) 2015 - ARM Ltd
3 * Author: Marc Zyngier <marc.zyngier@arm.com>
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program.  If not, see <http://www.gnu.org/licenses/>.
16 */
17
18#include <linux/linkage.h>
19
20#include <asm/alternative.h>
21#include <asm/assembler.h>
22#include <asm/cpufeature.h>
23#include <asm/kvm_arm.h>
24#include <asm/kvm_asm.h>
25#include <asm/kvm_mmu.h>
26
27	.text
28	.pushsection	.hyp.text, "ax"
29
30.macro do_el2_call
31	/*
32	 * Shuffle the parameters before calling the function
33	 * pointed to in x0. Assumes parameters in x[1,2,3].
34	 */
35	str	lr, [sp, #-16]!
36	mov	lr, x0
37	mov	x0, x1
38	mov	x1, x2
39	mov	x2, x3
40	blr	lr
41	ldr	lr, [sp], #16
42.endm
43
44ENTRY(__vhe_hyp_call)
45	do_el2_call
46	/*
47	 * We used to rely on having an exception return to get
48	 * an implicit isb. In the E2H case, we don't have it anymore.
49	 * rather than changing all the leaf functions, just do it here
50	 * before returning to the rest of the kernel.
51	 */
52	isb
53	ret
54ENDPROC(__vhe_hyp_call)
55
56el1_sync:				// Guest trapped into EL2
57	stp	x0, x1, [sp, #-16]!
58
59alternative_if_not ARM64_HAS_VIRT_HOST_EXTN
60	mrs	x1, esr_el2
61alternative_else
62	mrs	x1, esr_el1
63alternative_endif
64	lsr	x0, x1, #ESR_ELx_EC_SHIFT
65
66	cmp	x0, #ESR_ELx_EC_HVC64
67	b.ne	el1_trap
68
69	mrs	x1, vttbr_el2		// If vttbr is valid, the 64bit guest
70	cbnz	x1, el1_trap		// called HVC
71
72	/* Here, we're pretty sure the host called HVC. */
73	ldp	x0, x1, [sp], #16
74
75	/* Check for a stub HVC call */
76	cmp	x0, #HVC_STUB_HCALL_NR
77	b.hs	1f
78
79	/*
80	 * Compute the idmap address of __kvm_handle_stub_hvc and
81	 * jump there. Since we use kimage_voffset, do not use the
82	 * HYP VA for __kvm_handle_stub_hvc, but the kernel VA instead
83	 * (by loading it from the constant pool).
84	 *
85	 * Preserve x0-x4, which may contain stub parameters.
86	 */
87	ldr	x5, =__kvm_handle_stub_hvc
88	ldr_l	x6, kimage_voffset
89
90	/* x5 = __pa(x5) */
91	sub	x5, x5, x6
92	br	x5
93
941:
95	/*
96	 * Perform the EL2 call
97	 */
98	kern_hyp_va	x0
99	do_el2_call
100
101	eret
102
103el1_trap:
104	/*
105	 * x0: ESR_EC
106	 */
107
108	/*
109	 * We trap the first access to the FP/SIMD to save the host context
110	 * and restore the guest context lazily.
111	 * If FP/SIMD is not implemented, handle the trap and inject an
112	 * undefined instruction exception to the guest.
113	 */
114alternative_if_not ARM64_HAS_NO_FPSIMD
115	cmp	x0, #ESR_ELx_EC_FP_ASIMD
116	b.eq	__fpsimd_guest_restore
117alternative_else_nop_endif
118
119	mrs	x1, tpidr_el2
120	mov	x0, #ARM_EXCEPTION_TRAP
121	b	__guest_exit
122
123el1_irq:
124	stp     x0, x1, [sp, #-16]!
125	mrs	x1, tpidr_el2
126	mov	x0, #ARM_EXCEPTION_IRQ
127	b	__guest_exit
128
129el1_error:
130	stp     x0, x1, [sp, #-16]!
131	mrs	x1, tpidr_el2
132	mov	x0, #ARM_EXCEPTION_EL1_SERROR
133	b	__guest_exit
134
135el2_error:
136	/*
137	 * Only two possibilities:
138	 * 1) Either we come from the exit path, having just unmasked
139	 *    PSTATE.A: change the return code to an EL2 fault, and
140	 *    carry on, as we're already in a sane state to handle it.
141	 * 2) Or we come from anywhere else, and that's a bug: we panic.
142	 *
143	 * For (1), x0 contains the original return code and x1 doesn't
144	 * contain anything meaningful at that stage. We can reuse them
145	 * as temp registers.
146	 * For (2), who cares?
147	 */
148	mrs	x0, elr_el2
149	adr	x1, abort_guest_exit_start
150	cmp	x0, x1
151	adr	x1, abort_guest_exit_end
152	ccmp	x0, x1, #4, ne
153	b.ne	__hyp_panic
154	mov	x0, #(1 << ARM_EXIT_WITH_SERROR_BIT)
155	eret
156
157ENTRY(__hyp_do_panic)
158	mov	lr, #(PSR_F_BIT | PSR_I_BIT | PSR_A_BIT | PSR_D_BIT |\
159		      PSR_MODE_EL1h)
160	msr	spsr_el2, lr
161	ldr	lr, =panic
162	msr	elr_el2, lr
163	eret
164ENDPROC(__hyp_do_panic)
165
166.macro invalid_vector	label, target = __hyp_panic
167	.align	2
168\label:
169	b \target
170ENDPROC(\label)
171.endm
172
173	/* None of these should ever happen */
174	invalid_vector	el2t_sync_invalid
175	invalid_vector	el2t_irq_invalid
176	invalid_vector	el2t_fiq_invalid
177	invalid_vector	el2t_error_invalid
178	invalid_vector	el2h_sync_invalid
179	invalid_vector	el2h_irq_invalid
180	invalid_vector	el2h_fiq_invalid
181	invalid_vector	el1_sync_invalid
182	invalid_vector	el1_irq_invalid
183	invalid_vector	el1_fiq_invalid
184
185	.ltorg
186
187	.align 11
188
189ENTRY(__kvm_hyp_vector)
190	ventry	el2t_sync_invalid		// Synchronous EL2t
191	ventry	el2t_irq_invalid		// IRQ EL2t
192	ventry	el2t_fiq_invalid		// FIQ EL2t
193	ventry	el2t_error_invalid		// Error EL2t
194
195	ventry	el2h_sync_invalid		// Synchronous EL2h
196	ventry	el2h_irq_invalid		// IRQ EL2h
197	ventry	el2h_fiq_invalid		// FIQ EL2h
198	ventry	el2_error			// Error EL2h
199
200	ventry	el1_sync			// Synchronous 64-bit EL1
201	ventry	el1_irq				// IRQ 64-bit EL1
202	ventry	el1_fiq_invalid			// FIQ 64-bit EL1
203	ventry	el1_error			// Error 64-bit EL1
204
205	ventry	el1_sync			// Synchronous 32-bit EL1
206	ventry	el1_irq				// IRQ 32-bit EL1
207	ventry	el1_fiq_invalid			// FIQ 32-bit EL1
208	ventry	el1_error			// Error 32-bit EL1
209ENDPROC(__kvm_hyp_vector)
210