xref: /openbmc/linux/arch/arm64/kvm/hyp/entry.S (revision 14ef9d04)
1caab277bSThomas Gleixner/* SPDX-License-Identifier: GPL-2.0-only */
2b97b66c1SMarc Zyngier/*
3b97b66c1SMarc Zyngier * Copyright (C) 2015 - ARM Ltd
4b97b66c1SMarc Zyngier * Author: Marc Zyngier <marc.zyngier@arm.com>
5b97b66c1SMarc Zyngier */
6b97b66c1SMarc Zyngier
7b97b66c1SMarc Zyngier#include <linux/linkage.h>
8b97b66c1SMarc Zyngier
95dcd0fdbSJames Morse#include <asm/alternative.h>
10b97b66c1SMarc Zyngier#include <asm/assembler.h>
11b97b66c1SMarc Zyngier#include <asm/fpsimdmacros.h>
12b97b66c1SMarc Zyngier#include <asm/kvm.h>
13b97b66c1SMarc Zyngier#include <asm/kvm_arm.h>
14b97b66c1SMarc Zyngier#include <asm/kvm_asm.h>
15b97b66c1SMarc Zyngier#include <asm/kvm_mmu.h>
16384b40caSMark Rutland#include <asm/kvm_ptrauth.h>
17b97b66c1SMarc Zyngier
18b97b66c1SMarc Zyngier	.text
19b97b66c1SMarc Zyngier
20af123768SArd Biesheuvel/*
21b619d9aaSAndrew Scull * u64 __guest_enter(struct kvm_vcpu *vcpu);
22b97b66c1SMarc Zyngier */
236645d854SMark BrownSYM_FUNC_START(__guest_enter)
24b97b66c1SMarc Zyngier	// x0: vcpu
25b619d9aaSAndrew Scull	// x1-x17: clobbered by macros
26af123768SArd Biesheuvel	// x29: guest context
27b97b66c1SMarc Zyngier
2814ef9d04SMarc Zyngier	adr_this_cpu x1, kvm_hyp_ctxt, x2
29b619d9aaSAndrew Scull
307c2e76d8SAndrew Scull	// Store the hyp regs
31b97b66c1SMarc Zyngier	save_callee_saved_regs x1
32b97b66c1SMarc Zyngier
337c2e76d8SAndrew Scull	// Save hyp's sp_el0
346e977984SMarc Zyngier	save_sp_el0	x1, x2
356e977984SMarc Zyngier
367c2e76d8SAndrew Scull	// Now the hyp state is stored if we have a pending RAS SError it must
377c2e76d8SAndrew Scull	// affect the host or hyp. If any asynchronous exception is pending we
387c2e76d8SAndrew Scull	// defer the guest entry. The DSB isn't necessary before v8.2 as any
397c2e76d8SAndrew Scull	// SError would be fatal.
405dcd0fdbSJames Morsealternative_if ARM64_HAS_RAS_EXTN
415dcd0fdbSJames Morse	dsb	nshst
425dcd0fdbSJames Morse	isb
435dcd0fdbSJames Morsealternative_else_nop_endif
445dcd0fdbSJames Morse	mrs	x1, isr_el1
455dcd0fdbSJames Morse	cbz	x1,  1f
465dcd0fdbSJames Morse	mov	x0, #ARM_EXCEPTION_IRQ
475dcd0fdbSJames Morse	ret
485dcd0fdbSJames Morse
495dcd0fdbSJames Morse1:
507db21530SAndrew Scull	set_loaded_vcpu x0, x1, x2
517db21530SAndrew Scull
52af123768SArd Biesheuvel	add	x29, x0, #VCPU_CONTEXT
53b97b66c1SMarc Zyngier
54384b40caSMark Rutland	// Macro ptrauth_switch_to_guest format:
55384b40caSMark Rutland	// 	ptrauth_switch_to_guest(guest cxt, tmp1, tmp2, tmp3)
56384b40caSMark Rutland	// The below macro to restore guest keys is not implemented in C code
57384b40caSMark Rutland	// as it may cause Pointer Authentication key signing mismatch errors
58384b40caSMark Rutland	// when this feature is enabled for kernel code.
59af123768SArd Biesheuvel	ptrauth_switch_to_guest x29, x0, x1, x2
60384b40caSMark Rutland
616e977984SMarc Zyngier	// Restore the guest's sp_el0
626e977984SMarc Zyngier	restore_sp_el0 x29, x0
636e977984SMarc Zyngier
6468381b2bSShanker Donthineni	// Restore guest regs x0-x17
65af123768SArd Biesheuvel	ldp	x0, x1,   [x29, #CPU_XREG_OFFSET(0)]
66af123768SArd Biesheuvel	ldp	x2, x3,   [x29, #CPU_XREG_OFFSET(2)]
67af123768SArd Biesheuvel	ldp	x4, x5,   [x29, #CPU_XREG_OFFSET(4)]
68af123768SArd Biesheuvel	ldp	x6, x7,   [x29, #CPU_XREG_OFFSET(6)]
69af123768SArd Biesheuvel	ldp	x8, x9,   [x29, #CPU_XREG_OFFSET(8)]
70af123768SArd Biesheuvel	ldp	x10, x11, [x29, #CPU_XREG_OFFSET(10)]
71af123768SArd Biesheuvel	ldp	x12, x13, [x29, #CPU_XREG_OFFSET(12)]
72af123768SArd Biesheuvel	ldp	x14, x15, [x29, #CPU_XREG_OFFSET(14)]
73af123768SArd Biesheuvel	ldp	x16, x17, [x29, #CPU_XREG_OFFSET(16)]
74b97b66c1SMarc Zyngier
75af123768SArd Biesheuvel	// Restore guest regs x18-x29, lr
76af123768SArd Biesheuvel	restore_callee_saved_regs x29
77b97b66c1SMarc Zyngier
78b97b66c1SMarc Zyngier	// Do not touch any register after this!
79b97b66c1SMarc Zyngier	eret
80679db708SWill Deacon	sb
81b97b66c1SMarc Zyngier
827db21530SAndrew ScullSYM_INNER_LABEL(__guest_exit_panic, SYM_L_GLOBAL)
837db21530SAndrew Scull	// x2-x29,lr: vcpu regs
847db21530SAndrew Scull	// vcpu x0-x1 on the stack
857db21530SAndrew Scull
867db21530SAndrew Scull	// If the hyp context is loaded, go straight to hyp_panic
877db21530SAndrew Scull	get_loaded_vcpu x0, x1
887db21530SAndrew Scull	cbz	x0, hyp_panic
897db21530SAndrew Scull
907db21530SAndrew Scull	// The hyp context is saved so make sure it is restored to allow
917db21530SAndrew Scull	// hyp_panic to run at hyp and, subsequently, panic to run in the host.
927db21530SAndrew Scull	// This makes use of __guest_exit to avoid duplication but sets the
937db21530SAndrew Scull	// return address to tail call into hyp_panic. As a side effect, the
947db21530SAndrew Scull	// current state is saved to the guest context but it will only be
957db21530SAndrew Scull	// accurate if the guest had been completely restored.
9614ef9d04SMarc Zyngier	adr_this_cpu x0, kvm_hyp_ctxt, x1
977db21530SAndrew Scull	adr	x1, hyp_panic
987db21530SAndrew Scull	str	x1, [x0, #CPU_XREG_OFFSET(30)]
997db21530SAndrew Scull
1007db21530SAndrew Scull	get_vcpu_ptr	x1, x0
1017db21530SAndrew Scull
1026645d854SMark BrownSYM_INNER_LABEL(__guest_exit, SYM_L_GLOBAL)
10368381b2bSShanker Donthineni	// x0: return code
10468381b2bSShanker Donthineni	// x1: vcpu
10568381b2bSShanker Donthineni	// x2-x29,lr: vcpu regs
10668381b2bSShanker Donthineni	// vcpu x0-x1 on the stack
107b97b66c1SMarc Zyngier
10868381b2bSShanker Donthineni	add	x1, x1, #VCPU_CONTEXT
109b97b66c1SMarc Zyngier
110cb96408dSVladimir Murzin	ALTERNATIVE(nop, SET_PSTATE_PAN(1), ARM64_HAS_PAN, CONFIG_ARM64_PAN)
111cb96408dSVladimir Murzin
11268381b2bSShanker Donthineni	// Store the guest regs x2 and x3
11368381b2bSShanker Donthineni	stp	x2, x3,   [x1, #CPU_XREG_OFFSET(2)]
114b97b66c1SMarc Zyngier
11568381b2bSShanker Donthineni	// Retrieve the guest regs x0-x1 from the stack
11668381b2bSShanker Donthineni	ldp	x2, x3, [sp], #16	// x0, x1
117b97b66c1SMarc Zyngier
118af123768SArd Biesheuvel	// Store the guest regs x0-x1 and x4-x17
11968381b2bSShanker Donthineni	stp	x2, x3,   [x1, #CPU_XREG_OFFSET(0)]
12068381b2bSShanker Donthineni	stp	x4, x5,   [x1, #CPU_XREG_OFFSET(4)]
12168381b2bSShanker Donthineni	stp	x6, x7,   [x1, #CPU_XREG_OFFSET(6)]
12268381b2bSShanker Donthineni	stp	x8, x9,   [x1, #CPU_XREG_OFFSET(8)]
12368381b2bSShanker Donthineni	stp	x10, x11, [x1, #CPU_XREG_OFFSET(10)]
12468381b2bSShanker Donthineni	stp	x12, x13, [x1, #CPU_XREG_OFFSET(12)]
12568381b2bSShanker Donthineni	stp	x14, x15, [x1, #CPU_XREG_OFFSET(14)]
12668381b2bSShanker Donthineni	stp	x16, x17, [x1, #CPU_XREG_OFFSET(16)]
127b97b66c1SMarc Zyngier
128af123768SArd Biesheuvel	// Store the guest regs x18-x29, lr
12968381b2bSShanker Donthineni	save_callee_saved_regs x1
130b97b66c1SMarc Zyngier
1316e977984SMarc Zyngier	// Store the guest's sp_el0
1326e977984SMarc Zyngier	save_sp_el0	x1, x2
1336e977984SMarc Zyngier
13414ef9d04SMarc Zyngier	adr_this_cpu x2, kvm_hyp_ctxt, x3
13568381b2bSShanker Donthineni
1367c2e76d8SAndrew Scull	// Macro ptrauth_switch_to_hyp format:
1377c2e76d8SAndrew Scull	// 	ptrauth_switch_to_hyp(guest cxt, host cxt, tmp1, tmp2, tmp3)
138384b40caSMark Rutland	// The below macro to save/restore keys is not implemented in C code
139384b40caSMark Rutland	// as it may cause Pointer Authentication key signing mismatch errors
140384b40caSMark Rutland	// when this feature is enabled for kernel code.
1417c2e76d8SAndrew Scull	ptrauth_switch_to_hyp x1, x2, x3, x4, x5
142384b40caSMark Rutland
1437c2e76d8SAndrew Scull	// Restore hyp's sp_el0
1446e977984SMarc Zyngier	restore_sp_el0 x2, x3
1456e977984SMarc Zyngier
1467c2e76d8SAndrew Scull	// Now restore the hyp regs
147b97b66c1SMarc Zyngier	restore_callee_saved_regs x2
148b97b66c1SMarc Zyngier
1497db21530SAndrew Scull	set_loaded_vcpu xzr, x1, x2
1507db21530SAndrew Scull
1510067df41SJames Morsealternative_if ARM64_HAS_RAS_EXTN
1520067df41SJames Morse	// If we have the RAS extensions we can consume a pending error
1530e5b9c08SJames Morse	// without an unmask-SError and isb. The ESB-instruction consumed any
1540e5b9c08SJames Morse	// pending guest error when we took the exception from the guest.
1550067df41SJames Morse	mrs_s	x2, SYS_DISR_EL1
1560067df41SJames Morse	str	x2, [x1, #(VCPU_FAULT_DISR - VCPU_CONTEXT)]
1570067df41SJames Morse	cbz	x2, 1f
1580067df41SJames Morse	msr_s	SYS_DISR_EL1, xzr
1590067df41SJames Morse	orr	x0, x0, #(1<<ARM_EXIT_WITH_SERROR_BIT)
1600067df41SJames Morse1:	ret
1610067df41SJames Morsealternative_else
16211b41626SJames Morse	dsb	sy		// Synchronize against in-flight ld/st
16311b41626SJames Morse	isb			// Prevent an early read of side-effect free ISR
16411b41626SJames Morse	mrs	x2, isr_el1
16511b41626SJames Morse	tbnz	x2, #8, 2f	// ISR_EL1.A
16611b41626SJames Morse	ret
16711b41626SJames Morse	nop
16811b41626SJames Morse2:
16911b41626SJames Morsealternative_endif
17011b41626SJames Morse	// We know we have a pending asynchronous abort, now is the
17111b41626SJames Morse	// time to flush it out. From your VAXorcist book, page 666:
172395ea79eSMarc Zyngier	// "Threaten me not, oh Evil one!  For I speak with
173395ea79eSMarc Zyngier	// the power of DEC, and I command thee to show thyself!"
174395ea79eSMarc Zyngier	mrs	x2, elr_el2
175395ea79eSMarc Zyngier	mrs	x3, esr_el2
176395ea79eSMarc Zyngier	mrs	x4, spsr_el2
177395ea79eSMarc Zyngier	mov	x5, x0
178395ea79eSMarc Zyngier
179395ea79eSMarc Zyngier	msr	daifclr, #4	// Unmask aborts
180395ea79eSMarc Zyngier
181395ea79eSMarc Zyngier	// This is our single instruction exception window. A pending
182395ea79eSMarc Zyngier	// SError is guaranteed to occur at the earliest when we unmask
183395ea79eSMarc Zyngier	// it, and at the latest just after the ISB.
184395ea79eSMarc Zyngierabort_guest_exit_start:
185395ea79eSMarc Zyngier
186395ea79eSMarc Zyngier	isb
187395ea79eSMarc Zyngier
188395ea79eSMarc Zyngierabort_guest_exit_end:
189395ea79eSMarc Zyngier
190dad6321fSJames Morse	msr	daifset, #4	// Mask aborts
191e9ee186bSJames Morse	ret
192dad6321fSJames Morse
193e9ee186bSJames Morse	_kvm_extable	abort_guest_exit_start, 9997f
194e9ee186bSJames Morse	_kvm_extable	abort_guest_exit_end, 9997f
195e9ee186bSJames Morse9997:
196e9ee186bSJames Morse	msr	daifset, #4	// Mask aborts
197e9ee186bSJames Morse	mov	x0, #(1 << ARM_EXIT_WITH_SERROR_BIT)
198e9ee186bSJames Morse
199e9ee186bSJames Morse	// restore the EL1 exception context so that we can report some
200e9ee186bSJames Morse	// information. Merge the exception code with the SError pending bit.
201395ea79eSMarc Zyngier	msr	elr_el2, x2
202395ea79eSMarc Zyngier	msr	esr_el2, x3
203395ea79eSMarc Zyngier	msr	spsr_el2, x4
204395ea79eSMarc Zyngier	orr	x0, x0, x5
205395ea79eSMarc Zyngier1:	ret
2066645d854SMark BrownSYM_FUNC_END(__guest_enter)
207