xref: /openbmc/linux/arch/arm64/kvm/hyp/hyp-entry.S (revision f5ad1c74)
1/* SPDX-License-Identifier: GPL-2.0-only */
2/*
3 * Copyright (C) 2015-2018 - ARM Ltd
4 * Author: Marc Zyngier <marc.zyngier@arm.com>
5 */
6
7#include <linux/arm-smccc.h>
8#include <linux/linkage.h>
9
10#include <asm/alternative.h>
11#include <asm/assembler.h>
12#include <asm/cpufeature.h>
13#include <asm/kvm_arm.h>
14#include <asm/kvm_asm.h>
15#include <asm/mmu.h>
16
17.macro save_caller_saved_regs_vect
18	/* x0 and x1 were saved in the vector entry */
19	stp	x2, x3,   [sp, #-16]!
20	stp	x4, x5,   [sp, #-16]!
21	stp	x6, x7,   [sp, #-16]!
22	stp	x8, x9,   [sp, #-16]!
23	stp	x10, x11, [sp, #-16]!
24	stp	x12, x13, [sp, #-16]!
25	stp	x14, x15, [sp, #-16]!
26	stp	x16, x17, [sp, #-16]!
27.endm
28
29.macro restore_caller_saved_regs_vect
30	ldp	x16, x17, [sp], #16
31	ldp	x14, x15, [sp], #16
32	ldp	x12, x13, [sp], #16
33	ldp	x10, x11, [sp], #16
34	ldp	x8, x9,   [sp], #16
35	ldp	x6, x7,   [sp], #16
36	ldp	x4, x5,   [sp], #16
37	ldp	x2, x3,   [sp], #16
38	ldp	x0, x1,   [sp], #16
39.endm
40
41	.text
42
43el1_sync:				// Guest trapped into EL2
44
45	mrs	x0, esr_el2
46	lsr	x0, x0, #ESR_ELx_EC_SHIFT
47	cmp	x0, #ESR_ELx_EC_HVC64
48	ccmp	x0, #ESR_ELx_EC_HVC32, #4, ne
49	b.ne	el1_trap
50
51	/*
52	 * Fastest possible path for ARM_SMCCC_ARCH_WORKAROUND_1.
53	 * The workaround has already been applied on the host,
54	 * so let's quickly get back to the guest. We don't bother
55	 * restoring x1, as it can be clobbered anyway.
56	 */
57	ldr	x1, [sp]				// Guest's x0
58	eor	w1, w1, #ARM_SMCCC_ARCH_WORKAROUND_1
59	cbz	w1, wa_epilogue
60
61	/* ARM_SMCCC_ARCH_WORKAROUND_2 handling */
62	eor	w1, w1, #(ARM_SMCCC_ARCH_WORKAROUND_1 ^ \
63			  ARM_SMCCC_ARCH_WORKAROUND_2)
64	cbnz	w1, el1_trap
65
66wa_epilogue:
67	mov	x0, xzr
68	add	sp, sp, #16
69	eret
70	sb
71
72el1_trap:
73	get_vcpu_ptr	x1, x0
74	mov	x0, #ARM_EXCEPTION_TRAP
75	b	__guest_exit
76
77el1_irq:
78	get_vcpu_ptr	x1, x0
79	mov	x0, #ARM_EXCEPTION_IRQ
80	b	__guest_exit
81
82el1_error:
83	get_vcpu_ptr	x1, x0
84	mov	x0, #ARM_EXCEPTION_EL1_SERROR
85	b	__guest_exit
86
87el2_sync:
88	/* Check for illegal exception return */
89	mrs	x0, spsr_el2
90	tbnz	x0, #20, 1f
91
92	save_caller_saved_regs_vect
93	stp     x29, x30, [sp, #-16]!
94	bl	kvm_unexpected_el2_exception
95	ldp     x29, x30, [sp], #16
96	restore_caller_saved_regs_vect
97
98	eret
99
1001:
101	/* Let's attempt a recovery from the illegal exception return */
102	get_vcpu_ptr	x1, x0
103	mov	x0, #ARM_EXCEPTION_IL
104	b	__guest_exit
105
106
107el2_error:
108	save_caller_saved_regs_vect
109	stp     x29, x30, [sp, #-16]!
110
111	bl	kvm_unexpected_el2_exception
112
113	ldp     x29, x30, [sp], #16
114	restore_caller_saved_regs_vect
115
116	eret
117	sb
118
119.macro invalid_vector	label, target = __guest_exit_panic
120	.align	2
121SYM_CODE_START(\label)
122	b \target
123SYM_CODE_END(\label)
124.endm
125
126	/* None of these should ever happen */
127	invalid_vector	el2t_sync_invalid
128	invalid_vector	el2t_irq_invalid
129	invalid_vector	el2t_fiq_invalid
130	invalid_vector	el2t_error_invalid
131	invalid_vector	el2h_irq_invalid
132	invalid_vector	el2h_fiq_invalid
133	invalid_vector	el1_fiq_invalid
134
135	.ltorg
136
137	.align 11
138
139.macro check_preamble_length start, end
140/* kvm_patch_vector_branch() generates code that jumps over the preamble. */
141.if ((\end-\start) != KVM_VECTOR_PREAMBLE)
142	.error "KVM vector preamble length mismatch"
143.endif
144.endm
145
146.macro valid_vect target
147	.align 7
148661:
149	esb
150	stp	x0, x1, [sp, #-16]!
151662:
152	b	\target
153
154check_preamble_length 661b, 662b
155.endm
156
157.macro invalid_vect target
158	.align 7
159661:
160	nop
161	stp	x0, x1, [sp, #-16]!
162662:
163	b	\target
164
165check_preamble_length 661b, 662b
166.endm
167
168SYM_CODE_START(__kvm_hyp_vector)
169	invalid_vect	el2t_sync_invalid	// Synchronous EL2t
170	invalid_vect	el2t_irq_invalid	// IRQ EL2t
171	invalid_vect	el2t_fiq_invalid	// FIQ EL2t
172	invalid_vect	el2t_error_invalid	// Error EL2t
173
174	valid_vect	el2_sync		// Synchronous EL2h
175	invalid_vect	el2h_irq_invalid	// IRQ EL2h
176	invalid_vect	el2h_fiq_invalid	// FIQ EL2h
177	valid_vect	el2_error		// Error EL2h
178
179	valid_vect	el1_sync		// Synchronous 64-bit EL1
180	valid_vect	el1_irq			// IRQ 64-bit EL1
181	invalid_vect	el1_fiq_invalid		// FIQ 64-bit EL1
182	valid_vect	el1_error		// Error 64-bit EL1
183
184	valid_vect	el1_sync		// Synchronous 32-bit EL1
185	valid_vect	el1_irq			// IRQ 32-bit EL1
186	invalid_vect	el1_fiq_invalid		// FIQ 32-bit EL1
187	valid_vect	el1_error		// Error 32-bit EL1
188SYM_CODE_END(__kvm_hyp_vector)
189
190.macro hyp_ventry
191	.align 7
1921:	esb
193	.rept 26
194	nop
195	.endr
196/*
197 * The default sequence is to directly branch to the KVM vectors,
198 * using the computed offset. This applies for VHE as well as
199 * !ARM64_HARDEN_EL2_VECTORS. The first vector must always run the preamble.
200 *
201 * For ARM64_HARDEN_EL2_VECTORS configurations, this gets replaced
202 * with:
203 *
204 * stp	x0, x1, [sp, #-16]!
205 * movz	x0, #(addr & 0xffff)
206 * movk	x0, #((addr >> 16) & 0xffff), lsl #16
207 * movk	x0, #((addr >> 32) & 0xffff), lsl #32
208 * br	x0
209 *
210 * Where:
211 * addr = kern_hyp_va(__kvm_hyp_vector) + vector-offset + KVM_VECTOR_PREAMBLE.
212 * See kvm_patch_vector_branch for details.
213 */
214alternative_cb	kvm_patch_vector_branch
215	stp	x0, x1, [sp, #-16]!
216	b	__kvm_hyp_vector + (1b - 0b + KVM_VECTOR_PREAMBLE)
217	nop
218	nop
219	nop
220alternative_cb_end
221.endm
222
223.macro generate_vectors
2240:
225	.rept 16
226	hyp_ventry
227	.endr
228	.org 0b + SZ_2K		// Safety measure
229.endm
230
231	.align	11
232SYM_CODE_START(__bp_harden_hyp_vecs)
233	.rept BP_HARDEN_EL2_SLOTS
234	generate_vectors
235	.endr
2361:	.org __bp_harden_hyp_vecs + __BP_HARDEN_HYP_VECS_SZ
237	.org 1b
238SYM_CODE_END(__bp_harden_hyp_vecs)
239