xref: /openbmc/linux/arch/arm64/kvm/hyp/hyp-entry.S (revision de2bdb3d)
1/*
2 * Copyright (C) 2015 - ARM Ltd
3 * Author: Marc Zyngier <marc.zyngier@arm.com>
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program.  If not, see <http://www.gnu.org/licenses/>.
16 */
17
18#include <linux/linkage.h>
19
20#include <asm/alternative.h>
21#include <asm/assembler.h>
22#include <asm/cpufeature.h>
23#include <asm/kvm_arm.h>
24#include <asm/kvm_asm.h>
25#include <asm/kvm_mmu.h>
26
27	.text
28	.pushsection	.hyp.text, "ax"
29
30.macro do_el2_call
31	/*
32	 * Shuffle the parameters before calling the function
33	 * pointed to in x0. Assumes parameters in x[1,2,3].
34	 */
35	mov	lr, x0
36	mov	x0, x1
37	mov	x1, x2
38	mov	x2, x3
39	blr	lr
40.endm
41
42ENTRY(__vhe_hyp_call)
43	str	lr, [sp, #-16]!
44	do_el2_call
45	ldr	lr, [sp], #16
46	/*
47	 * We used to rely on having an exception return to get
48	 * an implicit isb. In the E2H case, we don't have it anymore.
49	 * rather than changing all the leaf functions, just do it here
50	 * before returning to the rest of the kernel.
51	 */
52	isb
53	ret
54ENDPROC(__vhe_hyp_call)
55
56/*
57 * Compute the idmap address of __kvm_hyp_reset based on the idmap
58 * start passed as a parameter, and jump there.
59 *
60 * x0: HYP phys_idmap_start
61 */
62ENTRY(__kvm_hyp_teardown)
63	mov	x4, x0
64	adr_l	x3, __kvm_hyp_reset
65
66	/* insert __kvm_hyp_reset()s offset into phys_idmap_start */
67	bfi	x4, x3, #0, #PAGE_SHIFT
68	br	x4
69ENDPROC(__kvm_hyp_teardown)
70
71el1_sync:				// Guest trapped into EL2
72	stp	x0, x1, [sp, #-16]!
73
74alternative_if_not ARM64_HAS_VIRT_HOST_EXTN
75	mrs	x1, esr_el2
76alternative_else
77	mrs	x1, esr_el1
78alternative_endif
79	lsr	x0, x1, #ESR_ELx_EC_SHIFT
80
81	cmp	x0, #ESR_ELx_EC_HVC64
82	b.ne	el1_trap
83
84	mrs	x1, vttbr_el2		// If vttbr is valid, the 64bit guest
85	cbnz	x1, el1_trap		// called HVC
86
87	/* Here, we're pretty sure the host called HVC. */
88	ldp	x0, x1, [sp], #16
89
90	cmp	x0, #HVC_GET_VECTORS
91	b.ne	1f
92	mrs	x0, vbar_el2
93	b	2f
94
951:
96	/*
97	 * Perform the EL2 call
98	 */
99	kern_hyp_va	x0
100	do_el2_call
101
1022:	eret
103
104el1_trap:
105	/*
106	 * x0: ESR_EC
107	 */
108
109	/* Guest accessed VFP/SIMD registers, save host, restore Guest */
110	cmp	x0, #ESR_ELx_EC_FP_ASIMD
111	b.eq	__fpsimd_guest_restore
112
113	mrs	x1, tpidr_el2
114	mov	x0, #ARM_EXCEPTION_TRAP
115	b	__guest_exit
116
117el1_irq:
118	stp     x0, x1, [sp, #-16]!
119	mrs	x1, tpidr_el2
120	mov	x0, #ARM_EXCEPTION_IRQ
121	b	__guest_exit
122
123el1_error:
124	stp     x0, x1, [sp, #-16]!
125	mrs	x1, tpidr_el2
126	mov	x0, #ARM_EXCEPTION_EL1_SERROR
127	b	__guest_exit
128
129el2_error:
130	/*
131	 * Only two possibilities:
132	 * 1) Either we come from the exit path, having just unmasked
133	 *    PSTATE.A: change the return code to an EL2 fault, and
134	 *    carry on, as we're already in a sane state to handle it.
135	 * 2) Or we come from anywhere else, and that's a bug: we panic.
136	 *
137	 * For (1), x0 contains the original return code and x1 doesn't
138	 * contain anything meaningful at that stage. We can reuse them
139	 * as temp registers.
140	 * For (2), who cares?
141	 */
142	mrs	x0, elr_el2
143	adr	x1, abort_guest_exit_start
144	cmp	x0, x1
145	adr	x1, abort_guest_exit_end
146	ccmp	x0, x1, #4, ne
147	b.ne	__hyp_panic
148	mov	x0, #(1 << ARM_EXIT_WITH_SERROR_BIT)
149	eret
150
151ENTRY(__hyp_do_panic)
152	mov	lr, #(PSR_F_BIT | PSR_I_BIT | PSR_A_BIT | PSR_D_BIT |\
153		      PSR_MODE_EL1h)
154	msr	spsr_el2, lr
155	ldr	lr, =panic
156	msr	elr_el2, lr
157	eret
158ENDPROC(__hyp_do_panic)
159
160.macro invalid_vector	label, target = __hyp_panic
161	.align	2
162\label:
163	b \target
164ENDPROC(\label)
165.endm
166
167	/* None of these should ever happen */
168	invalid_vector	el2t_sync_invalid
169	invalid_vector	el2t_irq_invalid
170	invalid_vector	el2t_fiq_invalid
171	invalid_vector	el2t_error_invalid
172	invalid_vector	el2h_sync_invalid
173	invalid_vector	el2h_irq_invalid
174	invalid_vector	el2h_fiq_invalid
175	invalid_vector	el1_sync_invalid
176	invalid_vector	el1_irq_invalid
177	invalid_vector	el1_fiq_invalid
178
179	.ltorg
180
181	.align 11
182
183ENTRY(__kvm_hyp_vector)
184	ventry	el2t_sync_invalid		// Synchronous EL2t
185	ventry	el2t_irq_invalid		// IRQ EL2t
186	ventry	el2t_fiq_invalid		// FIQ EL2t
187	ventry	el2t_error_invalid		// Error EL2t
188
189	ventry	el2h_sync_invalid		// Synchronous EL2h
190	ventry	el2h_irq_invalid		// IRQ EL2h
191	ventry	el2h_fiq_invalid		// FIQ EL2h
192	ventry	el2_error			// Error EL2h
193
194	ventry	el1_sync			// Synchronous 64-bit EL1
195	ventry	el1_irq				// IRQ 64-bit EL1
196	ventry	el1_fiq_invalid			// FIQ 64-bit EL1
197	ventry	el1_error			// Error 64-bit EL1
198
199	ventry	el1_sync			// Synchronous 32-bit EL1
200	ventry	el1_irq				// IRQ 32-bit EL1
201	ventry	el1_fiq_invalid			// FIQ 32-bit EL1
202	ventry	el1_error			// Error 32-bit EL1
203ENDPROC(__kvm_hyp_vector)
204