xref: /openbmc/linux/arch/arm64/kvm/hyp/nvhe/host.S (revision c900529f3d9161bfde5cca0754f83b4d3c3e0220)
16e3bfbb2SAndrew Scull/* SPDX-License-Identifier: GPL-2.0-only */
26e3bfbb2SAndrew Scull/*
36e3bfbb2SAndrew Scull * Copyright (C) 2020 - Google Inc
46e3bfbb2SAndrew Scull * Author: Andrew Scull <ascull@google.com>
56e3bfbb2SAndrew Scull */
66e3bfbb2SAndrew Scull
76e3bfbb2SAndrew Scull#include <linux/linkage.h>
86e3bfbb2SAndrew Scull
96e3bfbb2SAndrew Scull#include <asm/assembler.h>
10ccac9697SWill Deacon#include <asm/kvm_arm.h>
116e3bfbb2SAndrew Scull#include <asm/kvm_asm.h>
126e3bfbb2SAndrew Scull#include <asm/kvm_mmu.h>
138c15c2a0SMostafa Saleh#include <asm/kvm_ptrauth.h>
146e3bfbb2SAndrew Scull
156e3bfbb2SAndrew Scull	.text
166e3bfbb2SAndrew Scull
174e3393a9SAndrew ScullSYM_FUNC_START(__host_exit)
184e3393a9SAndrew Scull	get_host_ctxt	x0, x1
194e3393a9SAndrew Scull
204e3393a9SAndrew Scull	/* Store the host regs x2 and x3 */
214e3393a9SAndrew Scull	stp	x2, x3,   [x0, #CPU_XREG_OFFSET(2)]
224e3393a9SAndrew Scull
234e3393a9SAndrew Scull	/* Retrieve the host regs x0-x1 from the stack */
244e3393a9SAndrew Scull	ldp	x2, x3, [sp], #16	// x0, x1
254e3393a9SAndrew Scull
264e3393a9SAndrew Scull	/* Store the host regs x0-x1 and x4-x17 */
274e3393a9SAndrew Scull	stp	x2, x3,   [x0, #CPU_XREG_OFFSET(0)]
284e3393a9SAndrew Scull	stp	x4, x5,   [x0, #CPU_XREG_OFFSET(4)]
294e3393a9SAndrew Scull	stp	x6, x7,   [x0, #CPU_XREG_OFFSET(6)]
304e3393a9SAndrew Scull	stp	x8, x9,   [x0, #CPU_XREG_OFFSET(8)]
314e3393a9SAndrew Scull	stp	x10, x11, [x0, #CPU_XREG_OFFSET(10)]
324e3393a9SAndrew Scull	stp	x12, x13, [x0, #CPU_XREG_OFFSET(12)]
334e3393a9SAndrew Scull	stp	x14, x15, [x0, #CPU_XREG_OFFSET(14)]
344e3393a9SAndrew Scull	stp	x16, x17, [x0, #CPU_XREG_OFFSET(16)]
354e3393a9SAndrew Scull
364e3393a9SAndrew Scull	/* Store the host regs x18-x29, lr */
374e3393a9SAndrew Scull	save_callee_saved_regs x0
384e3393a9SAndrew Scull
394e3393a9SAndrew Scull	/* Save the host context pointer in x29 across the function call */
404e3393a9SAndrew Scull	mov	x29, x0
418c15c2a0SMostafa Saleh
428c15c2a0SMostafa Saleh#ifdef CONFIG_ARM64_PTR_AUTH_KERNEL
438c15c2a0SMostafa Salehalternative_if_not ARM64_HAS_ADDRESS_AUTH
448c15c2a0SMostafa Salehb __skip_pauth_save
458c15c2a0SMostafa Salehalternative_else_nop_endif
468c15c2a0SMostafa Saleh
478c15c2a0SMostafa Salehalternative_if ARM64_KVM_PROTECTED_MODE
488c15c2a0SMostafa Saleh	/* Save kernel ptrauth keys. */
498c15c2a0SMostafa Saleh	add x18, x29, #CPU_APIAKEYLO_EL1
508c15c2a0SMostafa Saleh	ptrauth_save_state x18, x19, x20
518c15c2a0SMostafa Saleh
528c15c2a0SMostafa Saleh	/* Use hyp keys. */
538c15c2a0SMostafa Saleh	adr_this_cpu x18, kvm_hyp_ctxt, x19
548c15c2a0SMostafa Saleh	add x18, x18, #CPU_APIAKEYLO_EL1
558c15c2a0SMostafa Saleh	ptrauth_restore_state x18, x19, x20
568c15c2a0SMostafa Saleh	isb
578c15c2a0SMostafa Salehalternative_else_nop_endif
588c15c2a0SMostafa Saleh__skip_pauth_save:
598c15c2a0SMostafa Saleh#endif /* CONFIG_ARM64_PTR_AUTH_KERNEL */
608c15c2a0SMostafa Saleh
614e3393a9SAndrew Scull	bl	handle_trap
624e3393a9SAndrew Scull
6304e05f05SDavid Brazdil__host_enter_restore_full:
648c15c2a0SMostafa Saleh	/* Restore kernel keys. */
658c15c2a0SMostafa Saleh#ifdef CONFIG_ARM64_PTR_AUTH_KERNEL
668c15c2a0SMostafa Salehalternative_if_not ARM64_HAS_ADDRESS_AUTH
678c15c2a0SMostafa Salehb __skip_pauth_restore
688c15c2a0SMostafa Salehalternative_else_nop_endif
698c15c2a0SMostafa Saleh
708c15c2a0SMostafa Salehalternative_if ARM64_KVM_PROTECTED_MODE
718c15c2a0SMostafa Saleh	add x18, x29, #CPU_APIAKEYLO_EL1
728c15c2a0SMostafa Saleh	ptrauth_restore_state x18, x19, x20
738c15c2a0SMostafa Salehalternative_else_nop_endif
748c15c2a0SMostafa Saleh__skip_pauth_restore:
758c15c2a0SMostafa Saleh#endif /* CONFIG_ARM64_PTR_AUTH_KERNEL */
768c15c2a0SMostafa Saleh
778c15c2a0SMostafa Saleh	/* Restore host regs x0-x17 */
784e3393a9SAndrew Scull	ldp	x0, x1,   [x29, #CPU_XREG_OFFSET(0)]
794e3393a9SAndrew Scull	ldp	x2, x3,   [x29, #CPU_XREG_OFFSET(2)]
804e3393a9SAndrew Scull	ldp	x4, x5,   [x29, #CPU_XREG_OFFSET(4)]
814e3393a9SAndrew Scull	ldp	x6, x7,   [x29, #CPU_XREG_OFFSET(6)]
82a2e102e2SAndrew Scull
83a2e102e2SAndrew Scull	/* x0-7 are use for panic arguments */
84a2e102e2SAndrew Scull__host_enter_for_panic:
854e3393a9SAndrew Scull	ldp	x8, x9,   [x29, #CPU_XREG_OFFSET(8)]
864e3393a9SAndrew Scull	ldp	x10, x11, [x29, #CPU_XREG_OFFSET(10)]
874e3393a9SAndrew Scull	ldp	x12, x13, [x29, #CPU_XREG_OFFSET(12)]
884e3393a9SAndrew Scull	ldp	x14, x15, [x29, #CPU_XREG_OFFSET(14)]
894e3393a9SAndrew Scull	ldp	x16, x17, [x29, #CPU_XREG_OFFSET(16)]
904e3393a9SAndrew Scull
914e3393a9SAndrew Scull	/* Restore host regs x18-x29, lr */
924e3393a9SAndrew Scull	restore_callee_saved_regs x29
934e3393a9SAndrew Scull
944e3393a9SAndrew Scull	/* Do not touch any register after this! */
95a2e102e2SAndrew Scull__host_enter_without_restoring:
964e3393a9SAndrew Scull	eret
974e3393a9SAndrew Scull	sb
984e3393a9SAndrew ScullSYM_FUNC_END(__host_exit)
994e3393a9SAndrew Scull
100a2e102e2SAndrew Scull/*
10104e05f05SDavid Brazdil * void __noreturn __host_enter(struct kvm_cpu_context *host_ctxt);
10204e05f05SDavid Brazdil */
10304e05f05SDavid BrazdilSYM_FUNC_START(__host_enter)
10404e05f05SDavid Brazdil	mov	x29, x0
10504e05f05SDavid Brazdil	b	__host_enter_restore_full
10604e05f05SDavid BrazdilSYM_FUNC_END(__host_enter)
10704e05f05SDavid Brazdil
10804e05f05SDavid Brazdil/*
109c4b000c3SAndrew Scull * void __noreturn __hyp_do_panic(struct kvm_cpu_context *host_ctxt, u64 spsr,
110c4b000c3SAndrew Scull * 				  u64 elr, u64 par);
111a2e102e2SAndrew Scull */
1126e3bfbb2SAndrew ScullSYM_FUNC_START(__hyp_do_panic)
113a2e102e2SAndrew Scull	/* Prepare and exit to the host's panic funciton. */
1146e3bfbb2SAndrew Scull	mov	lr, #(PSR_F_BIT | PSR_I_BIT | PSR_A_BIT | PSR_D_BIT |\
1156e3bfbb2SAndrew Scull		      PSR_MODE_EL1h)
1166e3bfbb2SAndrew Scull	msr	spsr_el2, lr
1177ee74cc7SArd Biesheuvel	adr_l	lr, nvhe_hyp_panic_handler
11897cbd2fcSDavid Brazdil	hyp_kimg_va lr, x6
1196e3bfbb2SAndrew Scull	msr	elr_el2, lr
120a2e102e2SAndrew Scull
121c4b000c3SAndrew Scull	mov	x29, x0
122c4b000c3SAndrew Scull
123ccac9697SWill Deacon#ifdef CONFIG_NVHE_EL2_DEBUG
124ccac9697SWill Deacon	/* Ensure host stage-2 is disabled */
125ccac9697SWill Deacon	mrs	x0, hcr_el2
126ccac9697SWill Deacon	bic	x0, x0, #HCR_VM
127ccac9697SWill Deacon	msr	hcr_el2, x0
128ccac9697SWill Deacon	isb
129ccac9697SWill Deacon	tlbi	vmalls12e1
130ccac9697SWill Deacon	dsb	nsh
131ccac9697SWill Deacon#endif
132ccac9697SWill Deacon
133aec0fae6SAndrew Scull	/* Load the panic arguments into x0-7 */
134aec0fae6SAndrew Scull	mrs	x0, esr_el2
135ccac9697SWill Deacon	mov	x4, x3
136ccac9697SWill Deacon	mov	x3, x2
137ccac9697SWill Deacon	hyp_pa	x3, x6
138ccac9697SWill Deacon	get_vcpu_ptr x5, x6
139ccac9697SWill Deacon	mrs	x6, far_el2
140ccac9697SWill Deacon	mrs	x7, hpfar_el2
14197cbd2fcSDavid Brazdil
14297cbd2fcSDavid Brazdil	/* Enter the host, conditionally restoring the host context. */
143c4b000c3SAndrew Scull	cbz	x29, __host_enter_without_restoring
144a2e102e2SAndrew Scull	b	__host_enter_for_panic
1456e3bfbb2SAndrew ScullSYM_FUNC_END(__hyp_do_panic)
1466e3bfbb2SAndrew Scull
1478579a185SWill DeaconSYM_FUNC_START(__host_hvc)
14883fa381fSMarc Zyngier	ldp	x0, x1, [sp]		// Don't fixup the stack yet
14983fa381fSMarc Zyngier
1508579a185SWill Deacon	/* No stub for you, sonny Jim */
1518579a185SWill Deaconalternative_if ARM64_KVM_PROTECTED_MODE
1528579a185SWill Deacon	b	__host_exit
1538579a185SWill Deaconalternative_else_nop_endif
1548579a185SWill Deacon
1556e3bfbb2SAndrew Scull	/* Check for a stub HVC call */
1566e3bfbb2SAndrew Scull	cmp	x0, #HVC_STUB_HCALL_NR
1574e3393a9SAndrew Scull	b.hs	__host_exit
1586e3bfbb2SAndrew Scull
15983fa381fSMarc Zyngier	add	sp, sp, #16
1606e3bfbb2SAndrew Scull	/*
1616e3bfbb2SAndrew Scull	 * Compute the idmap address of __kvm_handle_stub_hvc and
1627ee74cc7SArd Biesheuvel	 * jump there.
1636e3bfbb2SAndrew Scull	 *
1646e3bfbb2SAndrew Scull	 * Preserve x0-x4, which may contain stub parameters.
1656e3bfbb2SAndrew Scull	 */
1667ee74cc7SArd Biesheuvel	adr_l	x5, __kvm_handle_stub_hvc
16797cbd2fcSDavid Brazdil	hyp_pa	x5, x6
1686e3bfbb2SAndrew Scull	br	x5
1698579a185SWill DeaconSYM_FUNC_END(__host_hvc)
1708579a185SWill Deacon
1718579a185SWill Deacon.macro host_el1_sync_vect
1728579a185SWill Deacon	.align 7
1738579a185SWill Deacon.L__vect_start\@:
1748579a185SWill Deacon	stp	x0, x1, [sp, #-16]!
1758579a185SWill Deacon	mrs	x0, esr_el2
1768bb08411SMark Rutland	ubfx	x0, x0, #ESR_ELx_EC_SHIFT, #ESR_ELx_EC_WIDTH
1778579a185SWill Deacon	cmp	x0, #ESR_ELx_EC_HVC64
1788579a185SWill Deacon	b.eq	__host_hvc
1798579a185SWill Deacon	b	__host_exit
1806e3bfbb2SAndrew Scull.L__vect_end\@:
1816e3bfbb2SAndrew Scull.if ((.L__vect_end\@ - .L__vect_start\@) > 0x80)
1826e3bfbb2SAndrew Scull	.error "host_el1_sync_vect larger than vector entry"
1836e3bfbb2SAndrew Scull.endif
1846e3bfbb2SAndrew Scull.endm
1856e3bfbb2SAndrew Scull
186a2e102e2SAndrew Scull.macro invalid_host_el2_vect
1876e3bfbb2SAndrew Scull	.align 7
18866de19faSKalesh Singh
18966de19faSKalesh Singh	/*
19066de19faSKalesh Singh	 * Test whether the SP has overflowed, without corrupting a GPR.
19166de19faSKalesh Singh	 * nVHE hypervisor stacks are aligned so that the PAGE_SHIFT bit
19266de19faSKalesh Singh	 * of SP should always be 1.
19366de19faSKalesh Singh	 */
19466de19faSKalesh Singh	add	sp, sp, x0			// sp' = sp + x0
19566de19faSKalesh Singh	sub	x0, sp, x0			// x0' = sp' - x0 = (sp + x0) - x0 = sp
19666de19faSKalesh Singh	tbz	x0, #PAGE_SHIFT, .L__hyp_sp_overflow\@
19766de19faSKalesh Singh	sub	x0, sp, x0			// x0'' = sp' - x0' = (sp + x0) - sp = x0
19866de19faSKalesh Singh	sub	sp, sp, x0			// sp'' = sp' - x0 = (sp + x0) - x0 = sp
19966de19faSKalesh Singh
2007db21530SAndrew Scull	/* If a guest is loaded, panic out of it. */
2017db21530SAndrew Scull	stp	x0, x1, [sp, #-16]!
2027db21530SAndrew Scull	get_loaded_vcpu x0, x1
2037db21530SAndrew Scull	cbnz	x0, __guest_exit_panic
2047db21530SAndrew Scull	add	sp, sp, #16
205a2e102e2SAndrew Scull
206a2e102e2SAndrew Scull	/*
207a2e102e2SAndrew Scull	 * The panic may not be clean if the exception is taken before the host
208a2e102e2SAndrew Scull	 * context has been saved by __host_exit or after the hyp context has
209a2e102e2SAndrew Scull	 * been partially clobbered by __host_enter.
210a2e102e2SAndrew Scull	 */
2116e3bfbb2SAndrew Scull	b	hyp_panic
21266de19faSKalesh Singh
21366de19faSKalesh Singh.L__hyp_sp_overflow\@:
214548ec333SKalesh Singh	/* Switch to the overflow stack */
215548ec333SKalesh Singh	adr_this_cpu sp, overflow_stack + OVERFLOW_STACK_SIZE, x0
21666de19faSKalesh Singh
21766de19faSKalesh Singh	b	hyp_panic_bad_stack
21866de19faSKalesh Singh	ASM_BUG()
2196e3bfbb2SAndrew Scull.endm
2206e3bfbb2SAndrew Scull
221a2e102e2SAndrew Scull.macro invalid_host_el1_vect
222a2e102e2SAndrew Scull	.align 7
223a2e102e2SAndrew Scull	mov	x0, xzr		/* restore_host = false */
224a2e102e2SAndrew Scull	mrs	x1, spsr_el2
225a2e102e2SAndrew Scull	mrs	x2, elr_el2
226a2e102e2SAndrew Scull	mrs	x3, par_el1
227a2e102e2SAndrew Scull	b	__hyp_do_panic
228a2e102e2SAndrew Scull.endm
229a2e102e2SAndrew Scull
2306e3bfbb2SAndrew Scull/*
231472fc011SAndrew Scull * The host vector does not use an ESB instruction in order to avoid consuming
232472fc011SAndrew Scull * SErrors that should only be consumed by the host. Guest entry is deferred by
233472fc011SAndrew Scull * __guest_enter if there are any pending asynchronous exceptions so hyp will
234472fc011SAndrew Scull * always return to the host without having consumerd host SErrors.
235472fc011SAndrew Scull *
2366e3bfbb2SAndrew Scull * CONFIG_KVM_INDIRECT_VECTORS is not applied to the host vectors because the
2376e3bfbb2SAndrew Scull * host knows about the EL2 vectors already, and there is no point in hiding
2386e3bfbb2SAndrew Scull * them.
2396e3bfbb2SAndrew Scull */
2406e3bfbb2SAndrew Scull	.align 11
2416e3bfbb2SAndrew ScullSYM_CODE_START(__kvm_hyp_host_vector)
242a2e102e2SAndrew Scull	invalid_host_el2_vect			// Synchronous EL2t
243a2e102e2SAndrew Scull	invalid_host_el2_vect			// IRQ EL2t
244a2e102e2SAndrew Scull	invalid_host_el2_vect			// FIQ EL2t
245a2e102e2SAndrew Scull	invalid_host_el2_vect			// Error EL2t
2466e3bfbb2SAndrew Scull
247a2e102e2SAndrew Scull	invalid_host_el2_vect			// Synchronous EL2h
248a2e102e2SAndrew Scull	invalid_host_el2_vect			// IRQ EL2h
249a2e102e2SAndrew Scull	invalid_host_el2_vect			// FIQ EL2h
250a2e102e2SAndrew Scull	invalid_host_el2_vect			// Error EL2h
2516e3bfbb2SAndrew Scull
2522a50fc5fSWill Deacon	host_el1_sync_vect			// Synchronous 64-bit EL1/EL0
2532a50fc5fSWill Deacon	invalid_host_el1_vect			// IRQ 64-bit EL1/EL0
2542a50fc5fSWill Deacon	invalid_host_el1_vect			// FIQ 64-bit EL1/EL0
2552a50fc5fSWill Deacon	invalid_host_el1_vect			// Error 64-bit EL1/EL0
2566e3bfbb2SAndrew Scull
2572a50fc5fSWill Deacon	host_el1_sync_vect			// Synchronous 32-bit EL1/EL0
2582a50fc5fSWill Deacon	invalid_host_el1_vect			// IRQ 32-bit EL1/EL0
2592a50fc5fSWill Deacon	invalid_host_el1_vect			// FIQ 32-bit EL1/EL0
2602a50fc5fSWill Deacon	invalid_host_el1_vect			// Error 32-bit EL1/EL0
2616e3bfbb2SAndrew ScullSYM_CODE_END(__kvm_hyp_host_vector)
262a805e1fbSDavid Brazdil
263a805e1fbSDavid Brazdil/*
264a805e1fbSDavid Brazdil * Forward SMC with arguments in struct kvm_cpu_context, and
265a805e1fbSDavid Brazdil * store the result into the same struct. Assumes SMCCC 1.2 or older.
266a805e1fbSDavid Brazdil *
267a805e1fbSDavid Brazdil * x0: struct kvm_cpu_context*
268a805e1fbSDavid Brazdil */
269a805e1fbSDavid BrazdilSYM_CODE_START(__kvm_hyp_host_forward_smc)
270a805e1fbSDavid Brazdil	/*
271a805e1fbSDavid Brazdil	 * Use x18 to keep the pointer to the host context because
272a805e1fbSDavid Brazdil	 * x18 is callee-saved in SMCCC but not in AAPCS64.
273a805e1fbSDavid Brazdil	 */
274a805e1fbSDavid Brazdil	mov	x18, x0
275a805e1fbSDavid Brazdil
276a805e1fbSDavid Brazdil	ldp	x0, x1,   [x18, #CPU_XREG_OFFSET(0)]
277a805e1fbSDavid Brazdil	ldp	x2, x3,   [x18, #CPU_XREG_OFFSET(2)]
278a805e1fbSDavid Brazdil	ldp	x4, x5,   [x18, #CPU_XREG_OFFSET(4)]
279a805e1fbSDavid Brazdil	ldp	x6, x7,   [x18, #CPU_XREG_OFFSET(6)]
280a805e1fbSDavid Brazdil	ldp	x8, x9,   [x18, #CPU_XREG_OFFSET(8)]
281a805e1fbSDavid Brazdil	ldp	x10, x11, [x18, #CPU_XREG_OFFSET(10)]
282a805e1fbSDavid Brazdil	ldp	x12, x13, [x18, #CPU_XREG_OFFSET(12)]
283a805e1fbSDavid Brazdil	ldp	x14, x15, [x18, #CPU_XREG_OFFSET(14)]
284a805e1fbSDavid Brazdil	ldp	x16, x17, [x18, #CPU_XREG_OFFSET(16)]
285a805e1fbSDavid Brazdil
286a805e1fbSDavid Brazdil	smc	#0
287a805e1fbSDavid Brazdil
288a805e1fbSDavid Brazdil	stp	x0, x1,   [x18, #CPU_XREG_OFFSET(0)]
289a805e1fbSDavid Brazdil	stp	x2, x3,   [x18, #CPU_XREG_OFFSET(2)]
290a805e1fbSDavid Brazdil	stp	x4, x5,   [x18, #CPU_XREG_OFFSET(4)]
291a805e1fbSDavid Brazdil	stp	x6, x7,   [x18, #CPU_XREG_OFFSET(6)]
292a805e1fbSDavid Brazdil	stp	x8, x9,   [x18, #CPU_XREG_OFFSET(8)]
293a805e1fbSDavid Brazdil	stp	x10, x11, [x18, #CPU_XREG_OFFSET(10)]
294a805e1fbSDavid Brazdil	stp	x12, x13, [x18, #CPU_XREG_OFFSET(12)]
295a805e1fbSDavid Brazdil	stp	x14, x15, [x18, #CPU_XREG_OFFSET(14)]
296a805e1fbSDavid Brazdil	stp	x16, x17, [x18, #CPU_XREG_OFFSET(16)]
297a805e1fbSDavid Brazdil
298a805e1fbSDavid Brazdil	ret
299a805e1fbSDavid BrazdilSYM_CODE_END(__kvm_hyp_host_forward_smc)
300*dcf89d11SMostafa Saleh
301*dcf89d11SMostafa Saleh/*
302*dcf89d11SMostafa Saleh * kvm_host_psci_cpu_entry is called through br instruction, which requires
303*dcf89d11SMostafa Saleh * bti j instruction as compilers (gcc and llvm) doesn't insert bti j for external
304*dcf89d11SMostafa Saleh * functions, but bti c instead.
305*dcf89d11SMostafa Saleh */
306*dcf89d11SMostafa SalehSYM_CODE_START(kvm_host_psci_cpu_entry)
307*dcf89d11SMostafa Saleh       bti j
308*dcf89d11SMostafa Saleh       b __kvm_host_psci_cpu_entry
309*dcf89d11SMostafa SalehSYM_CODE_END(kvm_host_psci_cpu_entry)
310