xref: /openbmc/linux/arch/arm64/kvm/hyp/hyp-entry.S (revision f7c35abe)
1/*
2 * Copyright (C) 2015 - ARM Ltd
3 * Author: Marc Zyngier <marc.zyngier@arm.com>
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program.  If not, see <http://www.gnu.org/licenses/>.
16 */
17
18#include <linux/linkage.h>
19
20#include <asm/alternative.h>
21#include <asm/assembler.h>
22#include <asm/cpufeature.h>
23#include <asm/kvm_arm.h>
24#include <asm/kvm_asm.h>
25#include <asm/kvm_mmu.h>
26
27	.text
28	.pushsection	.hyp.text, "ax"
29
30.macro do_el2_call
31	/*
32	 * Shuffle the parameters before calling the function
33	 * pointed to in x0. Assumes parameters in x[1,2,3].
34	 */
35	mov	lr, x0
36	mov	x0, x1
37	mov	x1, x2
38	mov	x2, x3
39	blr	lr
40.endm
41
42ENTRY(__vhe_hyp_call)
43	str	lr, [sp, #-16]!
44	do_el2_call
45	ldr	lr, [sp], #16
46	/*
47	 * We used to rely on having an exception return to get
48	 * an implicit isb. In the E2H case, we don't have it anymore.
49	 * rather than changing all the leaf functions, just do it here
50	 * before returning to the rest of the kernel.
51	 */
52	isb
53	ret
54ENDPROC(__vhe_hyp_call)
55
56/*
57 * Compute the idmap address of __kvm_hyp_reset based on the idmap
58 * start passed as a parameter, and jump there.
59 *
60 * x0: HYP phys_idmap_start
61 */
62ENTRY(__kvm_hyp_teardown)
63	mov	x4, x0
64	adr_l	x3, __kvm_hyp_reset
65
66	/* insert __kvm_hyp_reset()s offset into phys_idmap_start */
67	bfi	x4, x3, #0, #PAGE_SHIFT
68	br	x4
69ENDPROC(__kvm_hyp_teardown)
70
71el1_sync:				// Guest trapped into EL2
72	stp	x0, x1, [sp, #-16]!
73
74alternative_if_not ARM64_HAS_VIRT_HOST_EXTN
75	mrs	x1, esr_el2
76alternative_else
77	mrs	x1, esr_el1
78alternative_endif
79	lsr	x0, x1, #ESR_ELx_EC_SHIFT
80
81	cmp	x0, #ESR_ELx_EC_HVC64
82	b.ne	el1_trap
83
84	mrs	x1, vttbr_el2		// If vttbr is valid, the 64bit guest
85	cbnz	x1, el1_trap		// called HVC
86
87	/* Here, we're pretty sure the host called HVC. */
88	ldp	x0, x1, [sp], #16
89
90	cmp	x0, #HVC_GET_VECTORS
91	b.ne	1f
92	mrs	x0, vbar_el2
93	b	2f
94
951:
96	/*
97	 * Perform the EL2 call
98	 */
99	kern_hyp_va	x0
100	do_el2_call
101
1022:	eret
103
104el1_trap:
105	/*
106	 * x0: ESR_EC
107	 */
108
109	/*
110	 * We trap the first access to the FP/SIMD to save the host context
111	 * and restore the guest context lazily.
112	 * If FP/SIMD is not implemented, handle the trap and inject an
113	 * undefined instruction exception to the guest.
114	 */
115alternative_if_not ARM64_HAS_NO_FPSIMD
116	cmp	x0, #ESR_ELx_EC_FP_ASIMD
117	b.eq	__fpsimd_guest_restore
118alternative_else_nop_endif
119
120	mrs	x1, tpidr_el2
121	mov	x0, #ARM_EXCEPTION_TRAP
122	b	__guest_exit
123
124el1_irq:
125	stp     x0, x1, [sp, #-16]!
126	mrs	x1, tpidr_el2
127	mov	x0, #ARM_EXCEPTION_IRQ
128	b	__guest_exit
129
130el1_error:
131	stp     x0, x1, [sp, #-16]!
132	mrs	x1, tpidr_el2
133	mov	x0, #ARM_EXCEPTION_EL1_SERROR
134	b	__guest_exit
135
136el2_error:
137	/*
138	 * Only two possibilities:
139	 * 1) Either we come from the exit path, having just unmasked
140	 *    PSTATE.A: change the return code to an EL2 fault, and
141	 *    carry on, as we're already in a sane state to handle it.
142	 * 2) Or we come from anywhere else, and that's a bug: we panic.
143	 *
144	 * For (1), x0 contains the original return code and x1 doesn't
145	 * contain anything meaningful at that stage. We can reuse them
146	 * as temp registers.
147	 * For (2), who cares?
148	 */
149	mrs	x0, elr_el2
150	adr	x1, abort_guest_exit_start
151	cmp	x0, x1
152	adr	x1, abort_guest_exit_end
153	ccmp	x0, x1, #4, ne
154	b.ne	__hyp_panic
155	mov	x0, #(1 << ARM_EXIT_WITH_SERROR_BIT)
156	eret
157
158ENTRY(__hyp_do_panic)
159	mov	lr, #(PSR_F_BIT | PSR_I_BIT | PSR_A_BIT | PSR_D_BIT |\
160		      PSR_MODE_EL1h)
161	msr	spsr_el2, lr
162	ldr	lr, =panic
163	msr	elr_el2, lr
164	eret
165ENDPROC(__hyp_do_panic)
166
167.macro invalid_vector	label, target = __hyp_panic
168	.align	2
169\label:
170	b \target
171ENDPROC(\label)
172.endm
173
174	/* None of these should ever happen */
175	invalid_vector	el2t_sync_invalid
176	invalid_vector	el2t_irq_invalid
177	invalid_vector	el2t_fiq_invalid
178	invalid_vector	el2t_error_invalid
179	invalid_vector	el2h_sync_invalid
180	invalid_vector	el2h_irq_invalid
181	invalid_vector	el2h_fiq_invalid
182	invalid_vector	el1_sync_invalid
183	invalid_vector	el1_irq_invalid
184	invalid_vector	el1_fiq_invalid
185
186	.ltorg
187
188	.align 11
189
190ENTRY(__kvm_hyp_vector)
191	ventry	el2t_sync_invalid		// Synchronous EL2t
192	ventry	el2t_irq_invalid		// IRQ EL2t
193	ventry	el2t_fiq_invalid		// FIQ EL2t
194	ventry	el2t_error_invalid		// Error EL2t
195
196	ventry	el2h_sync_invalid		// Synchronous EL2h
197	ventry	el2h_irq_invalid		// IRQ EL2h
198	ventry	el2h_fiq_invalid		// FIQ EL2h
199	ventry	el2_error			// Error EL2h
200
201	ventry	el1_sync			// Synchronous 64-bit EL1
202	ventry	el1_irq				// IRQ 64-bit EL1
203	ventry	el1_fiq_invalid			// FIQ 64-bit EL1
204	ventry	el1_error			// Error 64-bit EL1
205
206	ventry	el1_sync			// Synchronous 32-bit EL1
207	ventry	el1_irq				// IRQ 32-bit EL1
208	ventry	el1_fiq_invalid			// FIQ 32-bit EL1
209	ventry	el1_error			// Error 32-bit EL1
210ENDPROC(__kvm_hyp_vector)
211