xref: /openbmc/linux/arch/arm64/kvm/hyp/hyp-entry.S (revision cce8365f)
1/* SPDX-License-Identifier: GPL-2.0-only */
2/*
3 * Copyright (C) 2015-2018 - ARM Ltd
4 * Author: Marc Zyngier <marc.zyngier@arm.com>
5 */
6
7#include <linux/arm-smccc.h>
8#include <linux/linkage.h>
9
10#include <asm/alternative.h>
11#include <asm/assembler.h>
12#include <asm/cpufeature.h>
13#include <asm/kvm_arm.h>
14#include <asm/kvm_asm.h>
15#include <asm/mmu.h>
16#include <asm/spectre.h>
17
18.macro save_caller_saved_regs_vect
19	/* x0 and x1 were saved in the vector entry */
20	stp	x2, x3,   [sp, #-16]!
21	stp	x4, x5,   [sp, #-16]!
22	stp	x6, x7,   [sp, #-16]!
23	stp	x8, x9,   [sp, #-16]!
24	stp	x10, x11, [sp, #-16]!
25	stp	x12, x13, [sp, #-16]!
26	stp	x14, x15, [sp, #-16]!
27	stp	x16, x17, [sp, #-16]!
28.endm
29
30.macro restore_caller_saved_regs_vect
31	ldp	x16, x17, [sp], #16
32	ldp	x14, x15, [sp], #16
33	ldp	x12, x13, [sp], #16
34	ldp	x10, x11, [sp], #16
35	ldp	x8, x9,   [sp], #16
36	ldp	x6, x7,   [sp], #16
37	ldp	x4, x5,   [sp], #16
38	ldp	x2, x3,   [sp], #16
39	ldp	x0, x1,   [sp], #16
40.endm
41
42	.text
43
44el1_sync:				// Guest trapped into EL2
45
46	mrs	x0, esr_el2
47	ubfx	x0, x0, #ESR_ELx_EC_SHIFT, #ESR_ELx_EC_WIDTH
48	cmp	x0, #ESR_ELx_EC_HVC64
49	ccmp	x0, #ESR_ELx_EC_HVC32, #4, ne
50	b.ne	el1_trap
51
52	/*
53	 * Fastest possible path for ARM_SMCCC_ARCH_WORKAROUND_1.
54	 * The workaround has already been applied on the host,
55	 * so let's quickly get back to the guest. We don't bother
56	 * restoring x1, as it can be clobbered anyway.
57	 */
58	ldr	x1, [sp]				// Guest's x0
59	eor	w1, w1, #ARM_SMCCC_ARCH_WORKAROUND_1
60	cbz	w1, wa_epilogue
61
62	/* ARM_SMCCC_ARCH_WORKAROUND_2 handling */
63	eor	w1, w1, #(ARM_SMCCC_ARCH_WORKAROUND_1 ^ \
64			  ARM_SMCCC_ARCH_WORKAROUND_2)
65	cbz	w1, wa_epilogue
66
67	eor	w1, w1, #(ARM_SMCCC_ARCH_WORKAROUND_2 ^ \
68			  ARM_SMCCC_ARCH_WORKAROUND_3)
69	cbnz	w1, el1_trap
70
71wa_epilogue:
72	mov	x0, xzr
73	add	sp, sp, #16
74	eret
75	sb
76
77el1_trap:
78	get_vcpu_ptr	x1, x0
79	mov	x0, #ARM_EXCEPTION_TRAP
80	b	__guest_exit
81
82el1_irq:
83el1_fiq:
84	get_vcpu_ptr	x1, x0
85	mov	x0, #ARM_EXCEPTION_IRQ
86	b	__guest_exit
87
88el1_error:
89	get_vcpu_ptr	x1, x0
90	mov	x0, #ARM_EXCEPTION_EL1_SERROR
91	b	__guest_exit
92
93el2_sync:
94	/* Check for illegal exception return */
95	mrs	x0, spsr_el2
96	tbnz	x0, #20, 1f
97
98	save_caller_saved_regs_vect
99	stp     x29, x30, [sp, #-16]!
100	bl	kvm_unexpected_el2_exception
101	ldp     x29, x30, [sp], #16
102	restore_caller_saved_regs_vect
103
104	eret
105
1061:
107	/* Let's attempt a recovery from the illegal exception return */
108	get_vcpu_ptr	x1, x0
109	mov	x0, #ARM_EXCEPTION_IL
110	b	__guest_exit
111
112
113el2_error:
114	save_caller_saved_regs_vect
115	stp     x29, x30, [sp, #-16]!
116
117	bl	kvm_unexpected_el2_exception
118
119	ldp     x29, x30, [sp], #16
120	restore_caller_saved_regs_vect
121
122	eret
123	sb
124
125.macro invalid_vector	label, target = __guest_exit_panic
126	.align	2
127SYM_CODE_START_LOCAL(\label)
128	b \target
129SYM_CODE_END(\label)
130.endm
131
132	/* None of these should ever happen */
133	invalid_vector	el2t_sync_invalid
134	invalid_vector	el2t_irq_invalid
135	invalid_vector	el2t_fiq_invalid
136	invalid_vector	el2t_error_invalid
137	invalid_vector	el2h_irq_invalid
138	invalid_vector	el2h_fiq_invalid
139
140	.ltorg
141
142	.align 11
143
144.macro check_preamble_length start, end
145/* kvm_patch_vector_branch() generates code that jumps over the preamble. */
146.if ((\end-\start) != KVM_VECTOR_PREAMBLE)
147	.error "KVM vector preamble length mismatch"
148.endif
149.endm
150
151.macro valid_vect target
152	.align 7
153661:
154	esb
155	stp	x0, x1, [sp, #-16]!
156662:
157	/*
158	 * spectre vectors __bp_harden_hyp_vecs generate br instructions at runtime
159	 * that jump at offset 8 at __kvm_hyp_vector.
160	 * As hyp .text is guarded section, it needs bti j.
161	 */
162	bti j
163	b	\target
164
165check_preamble_length 661b, 662b
166.endm
167
168.macro invalid_vect target
169	.align 7
170661:
171	nop
172	stp	x0, x1, [sp, #-16]!
173662:
174	/* Check valid_vect */
175	bti j
176	b	\target
177
178check_preamble_length 661b, 662b
179.endm
180
181SYM_CODE_START(__kvm_hyp_vector)
182	invalid_vect	el2t_sync_invalid	// Synchronous EL2t
183	invalid_vect	el2t_irq_invalid	// IRQ EL2t
184	invalid_vect	el2t_fiq_invalid	// FIQ EL2t
185	invalid_vect	el2t_error_invalid	// Error EL2t
186
187	valid_vect	el2_sync		// Synchronous EL2h
188	invalid_vect	el2h_irq_invalid	// IRQ EL2h
189	invalid_vect	el2h_fiq_invalid	// FIQ EL2h
190	valid_vect	el2_error		// Error EL2h
191
192	valid_vect	el1_sync		// Synchronous 64-bit EL1
193	valid_vect	el1_irq			// IRQ 64-bit EL1
194	valid_vect	el1_fiq			// FIQ 64-bit EL1
195	valid_vect	el1_error		// Error 64-bit EL1
196
197	valid_vect	el1_sync		// Synchronous 32-bit EL1
198	valid_vect	el1_irq			// IRQ 32-bit EL1
199	valid_vect	el1_fiq			// FIQ 32-bit EL1
200	valid_vect	el1_error		// Error 32-bit EL1
201SYM_CODE_END(__kvm_hyp_vector)
202
203.macro spectrev2_smccc_wa1_smc
204	sub	sp, sp, #(8 * 4)
205	stp	x2, x3, [sp, #(8 * 0)]
206	stp	x0, x1, [sp, #(8 * 2)]
207	alternative_cb ARM64_ALWAYS_SYSTEM, spectre_bhb_patch_wa3
208	/* Patched to mov WA3 when supported */
209	mov	w0, #ARM_SMCCC_ARCH_WORKAROUND_1
210	alternative_cb_end
211	smc	#0
212	ldp	x2, x3, [sp, #(8 * 0)]
213	add	sp, sp, #(8 * 2)
214.endm
215
216.macro hyp_ventry	indirect, spectrev2
217	.align	7
2181:	esb
219	.if \spectrev2 != 0
220	spectrev2_smccc_wa1_smc
221	.else
222	stp	x0, x1, [sp, #-16]!
223	mitigate_spectre_bhb_loop	x0
224	mitigate_spectre_bhb_clear_insn
225	.endif
226	.if \indirect != 0
227	alternative_cb ARM64_ALWAYS_SYSTEM, kvm_patch_vector_branch
228	/*
229	 * For ARM64_SPECTRE_V3A configurations, these NOPs get replaced with:
230	 *
231	 * movz	x0, #(addr & 0xffff)
232	 * movk	x0, #((addr >> 16) & 0xffff), lsl #16
233	 * movk	x0, #((addr >> 32) & 0xffff), lsl #32
234	 * br	x0
235	 *
236	 * Where:
237	 * addr = kern_hyp_va(__kvm_hyp_vector) + vector-offset + KVM_VECTOR_PREAMBLE.
238	 * See kvm_patch_vector_branch for details.
239	 */
240	nop
241	nop
242	nop
243	nop
244	alternative_cb_end
245	.endif
246	b	__kvm_hyp_vector + (1b - 0b + KVM_VECTOR_PREAMBLE)
247.endm
248
249.macro generate_vectors	indirect, spectrev2
2500:
251	.rept 16
252	hyp_ventry	\indirect, \spectrev2
253	.endr
254	.org 0b + SZ_2K		// Safety measure
255.endm
256
257	.align	11
258SYM_CODE_START(__bp_harden_hyp_vecs)
259	generate_vectors indirect = 0, spectrev2 = 1 // HYP_VECTOR_SPECTRE_DIRECT
260	generate_vectors indirect = 1, spectrev2 = 0 // HYP_VECTOR_INDIRECT
261	generate_vectors indirect = 1, spectrev2 = 1 // HYP_VECTOR_SPECTRE_INDIRECT
2621:	.org __bp_harden_hyp_vecs + __BP_HARDEN_HYP_VECS_SZ
263	.org 1b
264SYM_CODE_END(__bp_harden_hyp_vecs)
265