xref: /openbmc/linux/arch/arm64/kvm/hyp/entry.S (revision 1abf363d)
1caab277bSThomas Gleixner/* SPDX-License-Identifier: GPL-2.0-only */
2b97b66c1SMarc Zyngier/*
3b97b66c1SMarc Zyngier * Copyright (C) 2015 - ARM Ltd
4b97b66c1SMarc Zyngier * Author: Marc Zyngier <marc.zyngier@arm.com>
5b97b66c1SMarc Zyngier */
6b97b66c1SMarc Zyngier
7b97b66c1SMarc Zyngier#include <linux/linkage.h>
8b97b66c1SMarc Zyngier
95dcd0fdbSJames Morse#include <asm/alternative.h>
10b97b66c1SMarc Zyngier#include <asm/assembler.h>
11b97b66c1SMarc Zyngier#include <asm/fpsimdmacros.h>
12b97b66c1SMarc Zyngier#include <asm/kvm.h>
13b97b66c1SMarc Zyngier#include <asm/kvm_arm.h>
14b97b66c1SMarc Zyngier#include <asm/kvm_asm.h>
15b97b66c1SMarc Zyngier#include <asm/kvm_mmu.h>
16e1f358b5SSteven Price#include <asm/kvm_mte.h>
17384b40caSMark Rutland#include <asm/kvm_ptrauth.h>
18b97b66c1SMarc Zyngier
19b97b66c1SMarc Zyngier	.text
20b97b66c1SMarc Zyngier
21af123768SArd Biesheuvel/*
22b619d9aaSAndrew Scull * u64 __guest_enter(struct kvm_vcpu *vcpu);
23b97b66c1SMarc Zyngier */
246645d854SMark BrownSYM_FUNC_START(__guest_enter)
25b97b66c1SMarc Zyngier	// x0: vcpu
26b619d9aaSAndrew Scull	// x1-x17: clobbered by macros
27af123768SArd Biesheuvel	// x29: guest context
28b97b66c1SMarc Zyngier
2914ef9d04SMarc Zyngier	adr_this_cpu x1, kvm_hyp_ctxt, x2
30b619d9aaSAndrew Scull
317c2e76d8SAndrew Scull	// Store the hyp regs
32b97b66c1SMarc Zyngier	save_callee_saved_regs x1
33b97b66c1SMarc Zyngier
347c2e76d8SAndrew Scull	// Save hyp's sp_el0
356e977984SMarc Zyngier	save_sp_el0	x1, x2
366e977984SMarc Zyngier
377c2e76d8SAndrew Scull	// Now the hyp state is stored if we have a pending RAS SError it must
387c2e76d8SAndrew Scull	// affect the host or hyp. If any asynchronous exception is pending we
397c2e76d8SAndrew Scull	// defer the guest entry. The DSB isn't necessary before v8.2 as any
407c2e76d8SAndrew Scull	// SError would be fatal.
415dcd0fdbSJames Morsealternative_if ARM64_HAS_RAS_EXTN
425dcd0fdbSJames Morse	dsb	nshst
435dcd0fdbSJames Morse	isb
445dcd0fdbSJames Morsealternative_else_nop_endif
455dcd0fdbSJames Morse	mrs	x1, isr_el1
465dcd0fdbSJames Morse	cbz	x1,  1f
475dcd0fdbSJames Morse	mov	x0, #ARM_EXCEPTION_IRQ
485dcd0fdbSJames Morse	ret
495dcd0fdbSJames Morse
505dcd0fdbSJames Morse1:
517db21530SAndrew Scull	set_loaded_vcpu x0, x1, x2
527db21530SAndrew Scull
53af123768SArd Biesheuvel	add	x29, x0, #VCPU_CONTEXT
54b97b66c1SMarc Zyngier
55e1f358b5SSteven Price	// mte_switch_to_guest(g_ctxt, h_ctxt, tmp1)
56e1f358b5SSteven Price	mte_switch_to_guest x29, x1, x2
57e1f358b5SSteven Price
58384b40caSMark Rutland	// Macro ptrauth_switch_to_guest format:
59384b40caSMark Rutland	// 	ptrauth_switch_to_guest(guest cxt, tmp1, tmp2, tmp3)
60384b40caSMark Rutland	// The below macro to restore guest keys is not implemented in C code
61384b40caSMark Rutland	// as it may cause Pointer Authentication key signing mismatch errors
62384b40caSMark Rutland	// when this feature is enabled for kernel code.
63af123768SArd Biesheuvel	ptrauth_switch_to_guest x29, x0, x1, x2
64384b40caSMark Rutland
656e977984SMarc Zyngier	// Restore the guest's sp_el0
666e977984SMarc Zyngier	restore_sp_el0 x29, x0
676e977984SMarc Zyngier
6868381b2bSShanker Donthineni	// Restore guest regs x0-x17
69af123768SArd Biesheuvel	ldp	x0, x1,   [x29, #CPU_XREG_OFFSET(0)]
70af123768SArd Biesheuvel	ldp	x2, x3,   [x29, #CPU_XREG_OFFSET(2)]
71af123768SArd Biesheuvel	ldp	x4, x5,   [x29, #CPU_XREG_OFFSET(4)]
72af123768SArd Biesheuvel	ldp	x6, x7,   [x29, #CPU_XREG_OFFSET(6)]
73af123768SArd Biesheuvel	ldp	x8, x9,   [x29, #CPU_XREG_OFFSET(8)]
74af123768SArd Biesheuvel	ldp	x10, x11, [x29, #CPU_XREG_OFFSET(10)]
75af123768SArd Biesheuvel	ldp	x12, x13, [x29, #CPU_XREG_OFFSET(12)]
76af123768SArd Biesheuvel	ldp	x14, x15, [x29, #CPU_XREG_OFFSET(14)]
77af123768SArd Biesheuvel	ldp	x16, x17, [x29, #CPU_XREG_OFFSET(16)]
78b97b66c1SMarc Zyngier
79af123768SArd Biesheuvel	// Restore guest regs x18-x29, lr
80af123768SArd Biesheuvel	restore_callee_saved_regs x29
81b97b66c1SMarc Zyngier
82b97b66c1SMarc Zyngier	// Do not touch any register after this!
83b97b66c1SMarc Zyngier	eret
84679db708SWill Deacon	sb
85b97b66c1SMarc Zyngier
867db21530SAndrew ScullSYM_INNER_LABEL(__guest_exit_panic, SYM_L_GLOBAL)
877db21530SAndrew Scull	// x2-x29,lr: vcpu regs
887db21530SAndrew Scull	// vcpu x0-x1 on the stack
897db21530SAndrew Scull
907db21530SAndrew Scull	// If the hyp context is loaded, go straight to hyp_panic
917db21530SAndrew Scull	get_loaded_vcpu x0, x1
92dbaee836SSami Tolvanen	cbnz	x0, 1f
93dbaee836SSami Tolvanen	b	hyp_panic
947db21530SAndrew Scull
95dbaee836SSami Tolvanen1:
967db21530SAndrew Scull	// The hyp context is saved so make sure it is restored to allow
977db21530SAndrew Scull	// hyp_panic to run at hyp and, subsequently, panic to run in the host.
987db21530SAndrew Scull	// This makes use of __guest_exit to avoid duplication but sets the
997db21530SAndrew Scull	// return address to tail call into hyp_panic. As a side effect, the
1007db21530SAndrew Scull	// current state is saved to the guest context but it will only be
1017db21530SAndrew Scull	// accurate if the guest had been completely restored.
10214ef9d04SMarc Zyngier	adr_this_cpu x0, kvm_hyp_ctxt, x1
103dbaee836SSami Tolvanen	adr_l	x1, hyp_panic
1047db21530SAndrew Scull	str	x1, [x0, #CPU_XREG_OFFSET(30)]
1057db21530SAndrew Scull
1067db21530SAndrew Scull	get_vcpu_ptr	x1, x0
1077db21530SAndrew Scull
1086645d854SMark BrownSYM_INNER_LABEL(__guest_exit, SYM_L_GLOBAL)
10968381b2bSShanker Donthineni	// x0: return code
11068381b2bSShanker Donthineni	// x1: vcpu
11168381b2bSShanker Donthineni	// x2-x29,lr: vcpu regs
11268381b2bSShanker Donthineni	// vcpu x0-x1 on the stack
113b97b66c1SMarc Zyngier
11468381b2bSShanker Donthineni	add	x1, x1, #VCPU_CONTEXT
115b97b66c1SMarc Zyngier
116cb96408dSVladimir Murzin	ALTERNATIVE(nop, SET_PSTATE_PAN(1), ARM64_HAS_PAN, CONFIG_ARM64_PAN)
117cb96408dSVladimir Murzin
11868381b2bSShanker Donthineni	// Store the guest regs x2 and x3
11968381b2bSShanker Donthineni	stp	x2, x3,   [x1, #CPU_XREG_OFFSET(2)]
120b97b66c1SMarc Zyngier
12168381b2bSShanker Donthineni	// Retrieve the guest regs x0-x1 from the stack
12268381b2bSShanker Donthineni	ldp	x2, x3, [sp], #16	// x0, x1
123b97b66c1SMarc Zyngier
124af123768SArd Biesheuvel	// Store the guest regs x0-x1 and x4-x17
12568381b2bSShanker Donthineni	stp	x2, x3,   [x1, #CPU_XREG_OFFSET(0)]
12668381b2bSShanker Donthineni	stp	x4, x5,   [x1, #CPU_XREG_OFFSET(4)]
12768381b2bSShanker Donthineni	stp	x6, x7,   [x1, #CPU_XREG_OFFSET(6)]
12868381b2bSShanker Donthineni	stp	x8, x9,   [x1, #CPU_XREG_OFFSET(8)]
12968381b2bSShanker Donthineni	stp	x10, x11, [x1, #CPU_XREG_OFFSET(10)]
13068381b2bSShanker Donthineni	stp	x12, x13, [x1, #CPU_XREG_OFFSET(12)]
13168381b2bSShanker Donthineni	stp	x14, x15, [x1, #CPU_XREG_OFFSET(14)]
13268381b2bSShanker Donthineni	stp	x16, x17, [x1, #CPU_XREG_OFFSET(16)]
133b97b66c1SMarc Zyngier
134af123768SArd Biesheuvel	// Store the guest regs x18-x29, lr
13568381b2bSShanker Donthineni	save_callee_saved_regs x1
136b97b66c1SMarc Zyngier
1376e977984SMarc Zyngier	// Store the guest's sp_el0
1386e977984SMarc Zyngier	save_sp_el0	x1, x2
1396e977984SMarc Zyngier
14014ef9d04SMarc Zyngier	adr_this_cpu x2, kvm_hyp_ctxt, x3
14168381b2bSShanker Donthineni
1427c2e76d8SAndrew Scull	// Macro ptrauth_switch_to_hyp format:
1437c2e76d8SAndrew Scull	// 	ptrauth_switch_to_hyp(guest cxt, host cxt, tmp1, tmp2, tmp3)
144384b40caSMark Rutland	// The below macro to save/restore keys is not implemented in C code
145384b40caSMark Rutland	// as it may cause Pointer Authentication key signing mismatch errors
146384b40caSMark Rutland	// when this feature is enabled for kernel code.
1477c2e76d8SAndrew Scull	ptrauth_switch_to_hyp x1, x2, x3, x4, x5
148384b40caSMark Rutland
149e1f358b5SSteven Price	// mte_switch_to_hyp(g_ctxt, h_ctxt, reg1)
150e1f358b5SSteven Price	mte_switch_to_hyp x1, x2, x3
151e1f358b5SSteven Price
1527c2e76d8SAndrew Scull	// Restore hyp's sp_el0
1536e977984SMarc Zyngier	restore_sp_el0 x2, x3
1546e977984SMarc Zyngier
1557c2e76d8SAndrew Scull	// Now restore the hyp regs
156b97b66c1SMarc Zyngier	restore_callee_saved_regs x2
157b97b66c1SMarc Zyngier
15831948332SWill Deacon	set_loaded_vcpu xzr, x2, x3
1597db21530SAndrew Scull
1600067df41SJames Morsealternative_if ARM64_HAS_RAS_EXTN
1610067df41SJames Morse	// If we have the RAS extensions we can consume a pending error
1620e5b9c08SJames Morse	// without an unmask-SError and isb. The ESB-instruction consumed any
1630e5b9c08SJames Morse	// pending guest error when we took the exception from the guest.
1640067df41SJames Morse	mrs_s	x2, SYS_DISR_EL1
1650067df41SJames Morse	str	x2, [x1, #(VCPU_FAULT_DISR - VCPU_CONTEXT)]
1660067df41SJames Morse	cbz	x2, 1f
1670067df41SJames Morse	msr_s	SYS_DISR_EL1, xzr
1680067df41SJames Morse	orr	x0, x0, #(1<<ARM_EXIT_WITH_SERROR_BIT)
1690067df41SJames Morse1:	ret
1700067df41SJames Morsealternative_else
17111b41626SJames Morse	dsb	sy		// Synchronize against in-flight ld/st
17211b41626SJames Morse	isb			// Prevent an early read of side-effect free ISR
17311b41626SJames Morse	mrs	x2, isr_el1
174*1abf363dSMark Brown	tbnz	x2, #ISR_EL1_A_SHIFT, 2f
17511b41626SJames Morse	ret
17611b41626SJames Morse	nop
17711b41626SJames Morse2:
17811b41626SJames Morsealternative_endif
17911b41626SJames Morse	// We know we have a pending asynchronous abort, now is the
18011b41626SJames Morse	// time to flush it out. From your VAXorcist book, page 666:
181395ea79eSMarc Zyngier	// "Threaten me not, oh Evil one!  For I speak with
182395ea79eSMarc Zyngier	// the power of DEC, and I command thee to show thyself!"
183395ea79eSMarc Zyngier	mrs	x2, elr_el2
184395ea79eSMarc Zyngier	mrs	x3, esr_el2
185395ea79eSMarc Zyngier	mrs	x4, spsr_el2
186395ea79eSMarc Zyngier	mov	x5, x0
187395ea79eSMarc Zyngier
188395ea79eSMarc Zyngier	msr	daifclr, #4	// Unmask aborts
189395ea79eSMarc Zyngier
190395ea79eSMarc Zyngier	// This is our single instruction exception window. A pending
191395ea79eSMarc Zyngier	// SError is guaranteed to occur at the earliest when we unmask
192395ea79eSMarc Zyngier	// it, and at the latest just after the ISB.
193395ea79eSMarc Zyngierabort_guest_exit_start:
194395ea79eSMarc Zyngier
195395ea79eSMarc Zyngier	isb
196395ea79eSMarc Zyngier
197395ea79eSMarc Zyngierabort_guest_exit_end:
198395ea79eSMarc Zyngier
199dad6321fSJames Morse	msr	daifset, #4	// Mask aborts
200e9ee186bSJames Morse	ret
201dad6321fSJames Morse
202e9ee186bSJames Morse	_kvm_extable	abort_guest_exit_start, 9997f
203e9ee186bSJames Morse	_kvm_extable	abort_guest_exit_end, 9997f
204e9ee186bSJames Morse9997:
205e9ee186bSJames Morse	msr	daifset, #4	// Mask aborts
206e9ee186bSJames Morse	mov	x0, #(1 << ARM_EXIT_WITH_SERROR_BIT)
207e9ee186bSJames Morse
208e9ee186bSJames Morse	// restore the EL1 exception context so that we can report some
209e9ee186bSJames Morse	// information. Merge the exception code with the SError pending bit.
210395ea79eSMarc Zyngier	msr	elr_el2, x2
211395ea79eSMarc Zyngier	msr	esr_el2, x3
212395ea79eSMarc Zyngier	msr	spsr_el2, x4
213395ea79eSMarc Zyngier	orr	x0, x0, x5
214395ea79eSMarc Zyngier1:	ret
2156645d854SMark BrownSYM_FUNC_END(__guest_enter)
216