xref: /openbmc/linux/arch/arm64/kvm/hyp/hyp-entry.S (revision b4f18c063a13dfb33e3a63fe1844823e19c2265e)
12b28162cSMarc Zyngier/*
2e8b22d0fSMarc Zyngier * Copyright (C) 2015-2018 - ARM Ltd
32b28162cSMarc Zyngier * Author: Marc Zyngier <marc.zyngier@arm.com>
42b28162cSMarc Zyngier *
52b28162cSMarc Zyngier * This program is free software; you can redistribute it and/or modify
62b28162cSMarc Zyngier * it under the terms of the GNU General Public License version 2 as
72b28162cSMarc Zyngier * published by the Free Software Foundation.
82b28162cSMarc Zyngier *
92b28162cSMarc Zyngier * This program is distributed in the hope that it will be useful,
102b28162cSMarc Zyngier * but WITHOUT ANY WARRANTY; without even the implied warranty of
112b28162cSMarc Zyngier * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
122b28162cSMarc Zyngier * GNU General Public License for more details.
132b28162cSMarc Zyngier *
142b28162cSMarc Zyngier * You should have received a copy of the GNU General Public License
152b28162cSMarc Zyngier * along with this program.  If not, see <http://www.gnu.org/licenses/>.
162b28162cSMarc Zyngier */
172b28162cSMarc Zyngier
18f72af90cSMarc Zyngier#include <linux/arm-smccc.h>
192b28162cSMarc Zyngier#include <linux/linkage.h>
202b28162cSMarc Zyngier
212b28162cSMarc Zyngier#include <asm/alternative.h>
222b28162cSMarc Zyngier#include <asm/assembler.h>
232b28162cSMarc Zyngier#include <asm/cpufeature.h>
242b28162cSMarc Zyngier#include <asm/kvm_arm.h>
252b28162cSMarc Zyngier#include <asm/kvm_asm.h>
262b28162cSMarc Zyngier#include <asm/kvm_mmu.h>
27e8b22d0fSMarc Zyngier#include <asm/mmu.h>
282b28162cSMarc Zyngier
292b28162cSMarc Zyngier	.text
302b28162cSMarc Zyngier	.pushsection	.hyp.text, "ax"
312b28162cSMarc Zyngier
32b81125c7SMarc Zyngier.macro do_el2_call
33b81125c7SMarc Zyngier	/*
34b81125c7SMarc Zyngier	 * Shuffle the parameters before calling the function
35b81125c7SMarc Zyngier	 * pointed to in x0. Assumes parameters in x[1,2,3].
36b81125c7SMarc Zyngier	 */
376c9ae25dSMarc Zyngier	str	lr, [sp, #-16]!
38b81125c7SMarc Zyngier	mov	lr, x0
39b81125c7SMarc Zyngier	mov	x0, x1
40b81125c7SMarc Zyngier	mov	x1, x2
41b81125c7SMarc Zyngier	mov	x2, x3
42b81125c7SMarc Zyngier	blr	lr
436c9ae25dSMarc Zyngier	ldr	lr, [sp], #16
44b81125c7SMarc Zyngier.endm
45b81125c7SMarc Zyngier
46b81125c7SMarc ZyngierENTRY(__vhe_hyp_call)
47b81125c7SMarc Zyngier	do_el2_call
48b81125c7SMarc Zyngier	/*
49b81125c7SMarc Zyngier	 * We used to rely on having an exception return to get
50b81125c7SMarc Zyngier	 * an implicit isb. In the E2H case, we don't have it anymore.
51b81125c7SMarc Zyngier	 * rather than changing all the leaf functions, just do it here
52b81125c7SMarc Zyngier	 * before returning to the rest of the kernel.
53b81125c7SMarc Zyngier	 */
54b81125c7SMarc Zyngier	isb
55b81125c7SMarc Zyngier	ret
56b81125c7SMarc ZyngierENDPROC(__vhe_hyp_call)
573421e9d8SMarc Zyngier
582b28162cSMarc Zyngierel1_sync:				// Guest trapped into EL2
592b28162cSMarc Zyngier
604464e210SChristoffer Dall	mrs	x0, esr_el2
614464e210SChristoffer Dall	lsr	x0, x0, #ESR_ELx_EC_SHIFT
6268381b2bSShanker Donthineni	cmp	x0, #ESR_ELx_EC_HVC64
63f72af90cSMarc Zyngier	ccmp	x0, #ESR_ELx_EC_HVC32, #4, ne
642b28162cSMarc Zyngier	b.ne	el1_trap
652b28162cSMarc Zyngier
66f72af90cSMarc Zyngier	mrs	x1, vttbr_el2		// If vttbr is valid, the guest
67f72af90cSMarc Zyngier	cbnz	x1, el1_hvc_guest	// called HVC
682b28162cSMarc Zyngier
692b28162cSMarc Zyngier	/* Here, we're pretty sure the host called HVC. */
7068381b2bSShanker Donthineni	ldp	x0, x1, [sp], #16
712b28162cSMarc Zyngier
725fbe9a59SMarc Zyngier	/* Check for a stub HVC call */
735fbe9a59SMarc Zyngier	cmp	x0, #HVC_STUB_HCALL_NR
745fbe9a59SMarc Zyngier	b.hs	1f
755fbe9a59SMarc Zyngier
765fbe9a59SMarc Zyngier	/*
775fbe9a59SMarc Zyngier	 * Compute the idmap address of __kvm_handle_stub_hvc and
785fbe9a59SMarc Zyngier	 * jump there. Since we use kimage_voffset, do not use the
795fbe9a59SMarc Zyngier	 * HYP VA for __kvm_handle_stub_hvc, but the kernel VA instead
805fbe9a59SMarc Zyngier	 * (by loading it from the constant pool).
815fbe9a59SMarc Zyngier	 *
825fbe9a59SMarc Zyngier	 * Preserve x0-x4, which may contain stub parameters.
835fbe9a59SMarc Zyngier	 */
845fbe9a59SMarc Zyngier	ldr	x5, =__kvm_handle_stub_hvc
855fbe9a59SMarc Zyngier	ldr_l	x6, kimage_voffset
865fbe9a59SMarc Zyngier
875fbe9a59SMarc Zyngier	/* x5 = __pa(x5) */
885fbe9a59SMarc Zyngier	sub	x5, x5, x6
895fbe9a59SMarc Zyngier	br	x5
902b28162cSMarc Zyngier
91b81125c7SMarc Zyngier1:
922b28162cSMarc Zyngier	/*
93b81125c7SMarc Zyngier	 * Perform the EL2 call
942b28162cSMarc Zyngier	 */
952b28162cSMarc Zyngier	kern_hyp_va	x0
96b81125c7SMarc Zyngier	do_el2_call
972b28162cSMarc Zyngier
985fbe9a59SMarc Zyngier	eret
992b28162cSMarc Zyngier
100f72af90cSMarc Zyngierel1_hvc_guest:
101f72af90cSMarc Zyngier	/*
102f72af90cSMarc Zyngier	 * Fastest possible path for ARM_SMCCC_ARCH_WORKAROUND_1.
103f72af90cSMarc Zyngier	 * The workaround has already been applied on the host,
104f72af90cSMarc Zyngier	 * so let's quickly get back to the guest. We don't bother
105f72af90cSMarc Zyngier	 * restoring x1, as it can be clobbered anyway.
106f72af90cSMarc Zyngier	 */
107f72af90cSMarc Zyngier	ldr	x1, [sp]				// Guest's x0
108f72af90cSMarc Zyngier	eor	w1, w1, #ARM_SMCCC_ARCH_WORKAROUND_1
109*b4f18c06SMarc Zyngier	cbz	w1, wa_epilogue
110*b4f18c06SMarc Zyngier
111*b4f18c06SMarc Zyngier	/* ARM_SMCCC_ARCH_WORKAROUND_2 handling */
112*b4f18c06SMarc Zyngier	eor	w1, w1, #(ARM_SMCCC_ARCH_WORKAROUND_1 ^ \
113*b4f18c06SMarc Zyngier			  ARM_SMCCC_ARCH_WORKAROUND_2)
114f72af90cSMarc Zyngier	cbnz	w1, el1_trap
115*b4f18c06SMarc Zyngier
116*b4f18c06SMarc Zyngier#ifdef CONFIG_ARM64_SSBD
117*b4f18c06SMarc Zyngieralternative_cb	arm64_enable_wa2_handling
118*b4f18c06SMarc Zyngier	b	wa2_end
119*b4f18c06SMarc Zyngieralternative_cb_end
120*b4f18c06SMarc Zyngier	get_vcpu_ptr	x2, x0
121*b4f18c06SMarc Zyngier	ldr	x0, [x2, #VCPU_WORKAROUND_FLAGS]
122*b4f18c06SMarc Zyngier
123*b4f18c06SMarc Zyngier	// Sanitize the argument and update the guest flags
124*b4f18c06SMarc Zyngier	ldr	x1, [sp, #8]			// Guest's x1
125*b4f18c06SMarc Zyngier	clz	w1, w1				// Murphy's device:
126*b4f18c06SMarc Zyngier	lsr	w1, w1, #5			// w1 = !!w1 without using
127*b4f18c06SMarc Zyngier	eor	w1, w1, #1			// the flags...
128*b4f18c06SMarc Zyngier	bfi	x0, x1, #VCPU_WORKAROUND_2_FLAG_SHIFT, #1
129*b4f18c06SMarc Zyngier	str	x0, [x2, #VCPU_WORKAROUND_FLAGS]
130*b4f18c06SMarc Zyngier
131*b4f18c06SMarc Zyngier	/* Check that we actually need to perform the call */
132*b4f18c06SMarc Zyngier	hyp_ldr_this_cpu x0, arm64_ssbd_callback_required, x2
133*b4f18c06SMarc Zyngier	cbz	x0, wa2_end
134*b4f18c06SMarc Zyngier
135*b4f18c06SMarc Zyngier	mov	w0, #ARM_SMCCC_ARCH_WORKAROUND_2
136*b4f18c06SMarc Zyngier	smc	#0
137*b4f18c06SMarc Zyngier
138*b4f18c06SMarc Zyngier	/* Don't leak data from the SMC call */
139*b4f18c06SMarc Zyngier	mov	x3, xzr
140*b4f18c06SMarc Zyngierwa2_end:
141*b4f18c06SMarc Zyngier	mov	x2, xzr
142*b4f18c06SMarc Zyngier	mov	x1, xzr
143*b4f18c06SMarc Zyngier#endif
144*b4f18c06SMarc Zyngier
145*b4f18c06SMarc Zyngierwa_epilogue:
146*b4f18c06SMarc Zyngier	mov	x0, xzr
147f72af90cSMarc Zyngier	add	sp, sp, #16
148f72af90cSMarc Zyngier	eret
149f72af90cSMarc Zyngier
1502b28162cSMarc Zyngierel1_trap:
1514464e210SChristoffer Dall	get_vcpu_ptr	x1, x0
1524464e210SChristoffer Dall
1534464e210SChristoffer Dall	mrs		x0, esr_el2
1544464e210SChristoffer Dall	lsr		x0, x0, #ESR_ELx_EC_SHIFT
1552b28162cSMarc Zyngier	/*
15668381b2bSShanker Donthineni	 * x0: ESR_EC
1574464e210SChristoffer Dall	 * x1: vcpu pointer
1582b28162cSMarc Zyngier	 */
1592b28162cSMarc Zyngier
16082e0191aSSuzuki K Poulose	/*
16182e0191aSSuzuki K Poulose	 * We trap the first access to the FP/SIMD to save the host context
16282e0191aSSuzuki K Poulose	 * and restore the guest context lazily.
16382e0191aSSuzuki K Poulose	 * If FP/SIMD is not implemented, handle the trap and inject an
16482e0191aSSuzuki K Poulose	 * undefined instruction exception to the guest.
16582e0191aSSuzuki K Poulose	 */
16682e0191aSSuzuki K Poulosealternative_if_not ARM64_HAS_NO_FPSIMD
16768381b2bSShanker Donthineni	cmp	x0, #ESR_ELx_EC_FP_ASIMD
1682b28162cSMarc Zyngier	b.eq	__fpsimd_guest_restore
16982e0191aSSuzuki K Poulosealternative_else_nop_endif
1702b28162cSMarc Zyngier
17168381b2bSShanker Donthineni	mov	x0, #ARM_EXCEPTION_TRAP
1722b28162cSMarc Zyngier	b	__guest_exit
1732b28162cSMarc Zyngier
1742b28162cSMarc Zyngierel1_irq:
1754464e210SChristoffer Dall	get_vcpu_ptr	x1, x0
17668381b2bSShanker Donthineni	mov	x0, #ARM_EXCEPTION_IRQ
1772b28162cSMarc Zyngier	b	__guest_exit
1782b28162cSMarc Zyngier
1791b51e5faSMarc Zyngierel1_error:
1804464e210SChristoffer Dall	get_vcpu_ptr	x1, x0
1811b51e5faSMarc Zyngier	mov	x0, #ARM_EXCEPTION_EL1_SERROR
1821b51e5faSMarc Zyngier	b	__guest_exit
1831b51e5faSMarc Zyngier
184395ea79eSMarc Zyngierel2_error:
1857e80f637SMarc Zyngier	ldp	x0, x1, [sp], #16
1867e80f637SMarc Zyngier
187395ea79eSMarc Zyngier	/*
188395ea79eSMarc Zyngier	 * Only two possibilities:
189395ea79eSMarc Zyngier	 * 1) Either we come from the exit path, having just unmasked
190395ea79eSMarc Zyngier	 *    PSTATE.A: change the return code to an EL2 fault, and
191395ea79eSMarc Zyngier	 *    carry on, as we're already in a sane state to handle it.
192395ea79eSMarc Zyngier	 * 2) Or we come from anywhere else, and that's a bug: we panic.
193395ea79eSMarc Zyngier	 *
194395ea79eSMarc Zyngier	 * For (1), x0 contains the original return code and x1 doesn't
195395ea79eSMarc Zyngier	 * contain anything meaningful at that stage. We can reuse them
196395ea79eSMarc Zyngier	 * as temp registers.
197395ea79eSMarc Zyngier	 * For (2), who cares?
198395ea79eSMarc Zyngier	 */
199395ea79eSMarc Zyngier	mrs	x0, elr_el2
200395ea79eSMarc Zyngier	adr	x1, abort_guest_exit_start
201395ea79eSMarc Zyngier	cmp	x0, x1
202395ea79eSMarc Zyngier	adr	x1, abort_guest_exit_end
203395ea79eSMarc Zyngier	ccmp	x0, x1, #4, ne
204395ea79eSMarc Zyngier	b.ne	__hyp_panic
205395ea79eSMarc Zyngier	mov	x0, #(1 << ARM_EXIT_WITH_SERROR_BIT)
206395ea79eSMarc Zyngier	eret
207395ea79eSMarc Zyngier
20853fd5b64SMarc ZyngierENTRY(__hyp_do_panic)
20953fd5b64SMarc Zyngier	mov	lr, #(PSR_F_BIT | PSR_I_BIT | PSR_A_BIT | PSR_D_BIT |\
21053fd5b64SMarc Zyngier		      PSR_MODE_EL1h)
21153fd5b64SMarc Zyngier	msr	spsr_el2, lr
21253fd5b64SMarc Zyngier	ldr	lr, =panic
21353fd5b64SMarc Zyngier	msr	elr_el2, lr
21453fd5b64SMarc Zyngier	eret
21553fd5b64SMarc ZyngierENDPROC(__hyp_do_panic)
21653fd5b64SMarc Zyngier
217c97e166eSJames MorseENTRY(__hyp_panic)
2184464e210SChristoffer Dall	get_host_ctxt x0, x1
219c97e166eSJames Morse	b	hyp_panic
220c97e166eSJames MorseENDPROC(__hyp_panic)
221c97e166eSJames Morse
22253fd5b64SMarc Zyngier.macro invalid_vector	label, target = __hyp_panic
2232b28162cSMarc Zyngier	.align	2
2242b28162cSMarc Zyngier\label:
2252b28162cSMarc Zyngier	b \target
2262b28162cSMarc ZyngierENDPROC(\label)
2272b28162cSMarc Zyngier.endm
2282b28162cSMarc Zyngier
2292b28162cSMarc Zyngier	/* None of these should ever happen */
2302b28162cSMarc Zyngier	invalid_vector	el2t_sync_invalid
2312b28162cSMarc Zyngier	invalid_vector	el2t_irq_invalid
2322b28162cSMarc Zyngier	invalid_vector	el2t_fiq_invalid
2332b28162cSMarc Zyngier	invalid_vector	el2t_error_invalid
2342b28162cSMarc Zyngier	invalid_vector	el2h_sync_invalid
2352b28162cSMarc Zyngier	invalid_vector	el2h_irq_invalid
2362b28162cSMarc Zyngier	invalid_vector	el2h_fiq_invalid
2372b28162cSMarc Zyngier	invalid_vector	el1_fiq_invalid
2382b28162cSMarc Zyngier
2392b28162cSMarc Zyngier	.ltorg
2402b28162cSMarc Zyngier
2412b28162cSMarc Zyngier	.align 11
2422b28162cSMarc Zyngier
2437e80f637SMarc Zyngier.macro valid_vect target
2447e80f637SMarc Zyngier	.align 7
2457e80f637SMarc Zyngier	stp	x0, x1, [sp, #-16]!
2467e80f637SMarc Zyngier	b	\target
2477e80f637SMarc Zyngier.endm
2487e80f637SMarc Zyngier
2497e80f637SMarc Zyngier.macro invalid_vect target
2507e80f637SMarc Zyngier	.align 7
2517e80f637SMarc Zyngier	b	\target
25271dcb8beSMarc Zyngier	ldp	x0, x1, [sp], #16
25371dcb8beSMarc Zyngier	b	\target
2547e80f637SMarc Zyngier.endm
2557e80f637SMarc Zyngier
256044ac37dSMarc ZyngierENTRY(__kvm_hyp_vector)
2577e80f637SMarc Zyngier	invalid_vect	el2t_sync_invalid	// Synchronous EL2t
2587e80f637SMarc Zyngier	invalid_vect	el2t_irq_invalid	// IRQ EL2t
2597e80f637SMarc Zyngier	invalid_vect	el2t_fiq_invalid	// FIQ EL2t
2607e80f637SMarc Zyngier	invalid_vect	el2t_error_invalid	// Error EL2t
2612b28162cSMarc Zyngier
2627e80f637SMarc Zyngier	invalid_vect	el2h_sync_invalid	// Synchronous EL2h
2637e80f637SMarc Zyngier	invalid_vect	el2h_irq_invalid	// IRQ EL2h
2647e80f637SMarc Zyngier	invalid_vect	el2h_fiq_invalid	// FIQ EL2h
2657e80f637SMarc Zyngier	valid_vect	el2_error		// Error EL2h
2662b28162cSMarc Zyngier
2677e80f637SMarc Zyngier	valid_vect	el1_sync		// Synchronous 64-bit EL1
2687e80f637SMarc Zyngier	valid_vect	el1_irq			// IRQ 64-bit EL1
2697e80f637SMarc Zyngier	invalid_vect	el1_fiq_invalid		// FIQ 64-bit EL1
2707e80f637SMarc Zyngier	valid_vect	el1_error		// Error 64-bit EL1
2712b28162cSMarc Zyngier
2727e80f637SMarc Zyngier	valid_vect	el1_sync		// Synchronous 32-bit EL1
2737e80f637SMarc Zyngier	valid_vect	el1_irq			// IRQ 32-bit EL1
2747e80f637SMarc Zyngier	invalid_vect	el1_fiq_invalid		// FIQ 32-bit EL1
2757e80f637SMarc Zyngier	valid_vect	el1_error		// Error 32-bit EL1
276044ac37dSMarc ZyngierENDPROC(__kvm_hyp_vector)
277e8b22d0fSMarc Zyngier
278e8b22d0fSMarc Zyngier#ifdef CONFIG_KVM_INDIRECT_VECTORS
279e8b22d0fSMarc Zyngier.macro hyp_ventry
280e8b22d0fSMarc Zyngier	.align 7
281e8b22d0fSMarc Zyngier1:	.rept 27
282e8b22d0fSMarc Zyngier	nop
283e8b22d0fSMarc Zyngier	.endr
284e8b22d0fSMarc Zyngier/*
285e8b22d0fSMarc Zyngier * The default sequence is to directly branch to the KVM vectors,
286e8b22d0fSMarc Zyngier * using the computed offset. This applies for VHE as well as
287e8b22d0fSMarc Zyngier * !ARM64_HARDEN_EL2_VECTORS.
288e8b22d0fSMarc Zyngier *
289e8b22d0fSMarc Zyngier * For ARM64_HARDEN_EL2_VECTORS configurations, this gets replaced
290e8b22d0fSMarc Zyngier * with:
291e8b22d0fSMarc Zyngier *
292e8b22d0fSMarc Zyngier * stp	x0, x1, [sp, #-16]!
293e8b22d0fSMarc Zyngier * movz	x0, #(addr & 0xffff)
294e8b22d0fSMarc Zyngier * movk	x0, #((addr >> 16) & 0xffff), lsl #16
295e8b22d0fSMarc Zyngier * movk	x0, #((addr >> 32) & 0xffff), lsl #32
296e8b22d0fSMarc Zyngier * br	x0
297e8b22d0fSMarc Zyngier *
298e8b22d0fSMarc Zyngier * Where addr = kern_hyp_va(__kvm_hyp_vector) + vector-offset + 4.
299e8b22d0fSMarc Zyngier * See kvm_patch_vector_branch for details.
300e8b22d0fSMarc Zyngier */
301e8b22d0fSMarc Zyngieralternative_cb	kvm_patch_vector_branch
302e8b22d0fSMarc Zyngier	b	__kvm_hyp_vector + (1b - 0b)
303e8b22d0fSMarc Zyngier	nop
304e8b22d0fSMarc Zyngier	nop
305e8b22d0fSMarc Zyngier	nop
306e8b22d0fSMarc Zyngier	nop
307e8b22d0fSMarc Zyngieralternative_cb_end
308e8b22d0fSMarc Zyngier.endm
309e8b22d0fSMarc Zyngier
310e8b22d0fSMarc Zyngier.macro generate_vectors
311e8b22d0fSMarc Zyngier0:
312e8b22d0fSMarc Zyngier	.rept 16
313e8b22d0fSMarc Zyngier	hyp_ventry
314e8b22d0fSMarc Zyngier	.endr
315e8b22d0fSMarc Zyngier	.org 0b + SZ_2K		// Safety measure
316e8b22d0fSMarc Zyngier.endm
317e8b22d0fSMarc Zyngier
318e8b22d0fSMarc Zyngier	.align	11
319e8b22d0fSMarc ZyngierENTRY(__bp_harden_hyp_vecs_start)
320e8b22d0fSMarc Zyngier	.rept BP_HARDEN_EL2_SLOTS
321e8b22d0fSMarc Zyngier	generate_vectors
322e8b22d0fSMarc Zyngier	.endr
323e8b22d0fSMarc ZyngierENTRY(__bp_harden_hyp_vecs_end)
324e8b22d0fSMarc Zyngier
325e8b22d0fSMarc Zyngier	.popsection
326e8b22d0fSMarc Zyngier
327e8b22d0fSMarc ZyngierENTRY(__smccc_workaround_1_smc_start)
328e8b22d0fSMarc Zyngier	sub	sp, sp, #(8 * 4)
329e8b22d0fSMarc Zyngier	stp	x2, x3, [sp, #(8 * 0)]
330e8b22d0fSMarc Zyngier	stp	x0, x1, [sp, #(8 * 2)]
331e8b22d0fSMarc Zyngier	mov	w0, #ARM_SMCCC_ARCH_WORKAROUND_1
332e8b22d0fSMarc Zyngier	smc	#0
333e8b22d0fSMarc Zyngier	ldp	x2, x3, [sp, #(8 * 0)]
334e8b22d0fSMarc Zyngier	ldp	x0, x1, [sp, #(8 * 2)]
335e8b22d0fSMarc Zyngier	add	sp, sp, #(8 * 4)
336e8b22d0fSMarc ZyngierENTRY(__smccc_workaround_1_smc_end)
337e8b22d0fSMarc Zyngier#endif
338