xref: /openbmc/linux/arch/arm64/kernel/entry.S (revision c3357fc5415d6e0c45c7c8987e06cf8f8b3f3a54)
1caab277bSThomas Gleixner/* SPDX-License-Identifier: GPL-2.0-only */
260ffc30dSCatalin Marinas/*
360ffc30dSCatalin Marinas * Low-level exception handling code
460ffc30dSCatalin Marinas *
560ffc30dSCatalin Marinas * Copyright (C) 2012 ARM Ltd.
660ffc30dSCatalin Marinas * Authors:	Catalin Marinas <catalin.marinas@arm.com>
760ffc30dSCatalin Marinas *		Will Deacon <will.deacon@arm.com>
860ffc30dSCatalin Marinas */
960ffc30dSCatalin Marinas
108e290624SMarc Zyngier#include <linux/arm-smccc.h>
1160ffc30dSCatalin Marinas#include <linux/init.h>
1260ffc30dSCatalin Marinas#include <linux/linkage.h>
1360ffc30dSCatalin Marinas
148d883b23SMarc Zyngier#include <asm/alternative.h>
1560ffc30dSCatalin Marinas#include <asm/assembler.h>
1660ffc30dSCatalin Marinas#include <asm/asm-offsets.h>
17905e8c5dSWill Deacon#include <asm/cpufeature.h>
1860ffc30dSCatalin Marinas#include <asm/errno.h>
195c1ce6f7SMarc Zyngier#include <asm/esr.h>
208e23dacdSJames Morse#include <asm/irq.h>
21c7b9adafSWill Deacon#include <asm/memory.h>
22c7b9adafSWill Deacon#include <asm/mmu.h>
23eef94a3dSYury Norov#include <asm/processor.h>
2439bc88e5SCatalin Marinas#include <asm/ptrace.h>
2560ffc30dSCatalin Marinas#include <asm/thread_info.h>
26b4b8664dSAl Viro#include <asm/asm-uaccess.h>
2760ffc30dSCatalin Marinas#include <asm/unistd.h>
2860ffc30dSCatalin Marinas
2960ffc30dSCatalin Marinas/*
306c81fe79SLarry Bassel * Context tracking subsystem.  Used to instrument transitions
316c81fe79SLarry Bassel * between user and kernel mode.
326c81fe79SLarry Bassel */
332671828cSJames Morse	.macro ct_user_exit_irqoff
346c81fe79SLarry Bassel#ifdef CONFIG_CONTEXT_TRACKING
352671828cSJames Morse	bl	enter_from_user_mode
366c81fe79SLarry Bassel#endif
376c81fe79SLarry Bassel	.endm
386c81fe79SLarry Bassel
396c81fe79SLarry Bassel	.macro ct_user_enter
406c81fe79SLarry Bassel#ifdef CONFIG_CONTEXT_TRACKING
416c81fe79SLarry Bassel	bl	context_tracking_user_enter
426c81fe79SLarry Bassel#endif
436c81fe79SLarry Bassel	.endm
446c81fe79SLarry Bassel
45baaa7237SMark Rutland	.macro	clear_gp_regs
46baaa7237SMark Rutland	.irp	n,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29
47baaa7237SMark Rutland	mov	x\n, xzr
48baaa7237SMark Rutland	.endr
49baaa7237SMark Rutland	.endm
50baaa7237SMark Rutland
516c81fe79SLarry Bassel/*
5260ffc30dSCatalin Marinas * Bad Abort numbers
5360ffc30dSCatalin Marinas *-----------------
5460ffc30dSCatalin Marinas */
5560ffc30dSCatalin Marinas#define BAD_SYNC	0
5660ffc30dSCatalin Marinas#define BAD_IRQ		1
5760ffc30dSCatalin Marinas#define BAD_FIQ		2
5860ffc30dSCatalin Marinas#define BAD_ERROR	3
5960ffc30dSCatalin Marinas
605b1f7fe4SWill Deacon	.macro kernel_ventry, el, label, regsize = 64
61b11e5759SMark Rutland	.align 7
624bf3286dSWill Deacon#ifdef CONFIG_UNMAP_KERNEL_AT_EL0
634bf3286dSWill Deacon	.if	\el == 0
64108eae2dSJulien Thierryalternative_if ARM64_UNMAP_KERNEL_AT_EL0
654bf3286dSWill Deacon	.if	\regsize == 64
664bf3286dSWill Deacon	mrs	x30, tpidrro_el0
674bf3286dSWill Deacon	msr	tpidrro_el0, xzr
684bf3286dSWill Deacon	.else
694bf3286dSWill Deacon	mov	x30, xzr
704bf3286dSWill Deacon	.endif
71ea1e3de8SWill Deaconalternative_else_nop_endif
72108eae2dSJulien Thierry	.endif
734bf3286dSWill Deacon#endif
744bf3286dSWill Deacon
7563648dd2SWill Deacon	sub	sp, sp, #S_FRAME_SIZE
76872d8327SMark Rutland#ifdef CONFIG_VMAP_STACK
77872d8327SMark Rutland	/*
78872d8327SMark Rutland	 * Test whether the SP has overflowed, without corrupting a GPR.
79de858040SHeyi Guo	 * Task and IRQ stacks are aligned so that SP & (1 << THREAD_SHIFT)
80de858040SHeyi Guo	 * should always be zero.
81872d8327SMark Rutland	 */
82872d8327SMark Rutland	add	sp, sp, x0			// sp' = sp + x0
83872d8327SMark Rutland	sub	x0, sp, x0			// x0' = sp' - x0 = (sp + x0) - x0 = sp
84872d8327SMark Rutland	tbnz	x0, #THREAD_SHIFT, 0f
85872d8327SMark Rutland	sub	x0, sp, x0			// x0'' = sp' - x0' = (sp + x0) - sp = x0
86872d8327SMark Rutland	sub	sp, sp, x0			// sp'' = sp' - x0 = (sp + x0) - x0 = sp
875b1f7fe4SWill Deacon	b	el\()\el\()_\label
88872d8327SMark Rutland
89872d8327SMark Rutland0:
90872d8327SMark Rutland	/*
91872d8327SMark Rutland	 * Either we've just detected an overflow, or we've taken an exception
92872d8327SMark Rutland	 * while on the overflow stack. Either way, we won't return to
93872d8327SMark Rutland	 * userspace, and can clobber EL0 registers to free up GPRs.
94872d8327SMark Rutland	 */
95872d8327SMark Rutland
96872d8327SMark Rutland	/* Stash the original SP (minus S_FRAME_SIZE) in tpidr_el0. */
97872d8327SMark Rutland	msr	tpidr_el0, x0
98872d8327SMark Rutland
99872d8327SMark Rutland	/* Recover the original x0 value and stash it in tpidrro_el0 */
100872d8327SMark Rutland	sub	x0, sp, x0
101872d8327SMark Rutland	msr	tpidrro_el0, x0
102872d8327SMark Rutland
103872d8327SMark Rutland	/* Switch to the overflow stack */
104872d8327SMark Rutland	adr_this_cpu sp, overflow_stack + OVERFLOW_STACK_SIZE, x0
105872d8327SMark Rutland
106872d8327SMark Rutland	/*
107872d8327SMark Rutland	 * Check whether we were already on the overflow stack. This may happen
108872d8327SMark Rutland	 * after panic() re-enables interrupts.
109872d8327SMark Rutland	 */
110872d8327SMark Rutland	mrs	x0, tpidr_el0			// sp of interrupted context
111872d8327SMark Rutland	sub	x0, sp, x0			// delta with top of overflow stack
112872d8327SMark Rutland	tst	x0, #~(OVERFLOW_STACK_SIZE - 1)	// within range?
113872d8327SMark Rutland	b.ne	__bad_stack			// no? -> bad stack pointer
114872d8327SMark Rutland
115872d8327SMark Rutland	/* We were already on the overflow stack. Restore sp/x0 and carry on. */
116872d8327SMark Rutland	sub	sp, sp, x0
117872d8327SMark Rutland	mrs	x0, tpidrro_el0
118872d8327SMark Rutland#endif
1195b1f7fe4SWill Deacon	b	el\()\el\()_\label
120b11e5759SMark Rutland	.endm
121b11e5759SMark Rutland
1224bf3286dSWill Deacon	.macro tramp_alias, dst, sym
1234bf3286dSWill Deacon	mov_q	\dst, TRAMP_VALIAS
1244bf3286dSWill Deacon	add	\dst, \dst, #(\sym - .entry.tramp.text)
125b11e5759SMark Rutland	.endm
126b11e5759SMark Rutland
1278e290624SMarc Zyngier	// This macro corrupts x0-x3. It is the caller's duty
1288e290624SMarc Zyngier	// to save/restore them if required.
12999ed3ed0SMark Rutland	.macro	apply_ssbd, state, tmp1, tmp2
1308e290624SMarc Zyngier#ifdef CONFIG_ARM64_SSBD
131986372c4SMarc Zyngieralternative_cb	arm64_enable_wa2_handling
13299ed3ed0SMark Rutland	b	.L__asm_ssbd_skip\@
133986372c4SMarc Zyngieralternative_cb_end
1345cf9ce6eSMarc Zyngier	ldr_this_cpu	\tmp2, arm64_ssbd_callback_required, \tmp1
13599ed3ed0SMark Rutland	cbz	\tmp2,	.L__asm_ssbd_skip\@
1369dd9614fSMarc Zyngier	ldr	\tmp2, [tsk, #TSK_TI_FLAGS]
13799ed3ed0SMark Rutland	tbnz	\tmp2, #TIF_SSBD, .L__asm_ssbd_skip\@
1388e290624SMarc Zyngier	mov	w0, #ARM_SMCCC_ARCH_WORKAROUND_2
1398e290624SMarc Zyngier	mov	w1, #\state
1408e290624SMarc Zyngieralternative_cb	arm64_update_smccc_conduit
1418e290624SMarc Zyngier	nop					// Patched to SMC/HVC #0
1428e290624SMarc Zyngieralternative_cb_end
14399ed3ed0SMark Rutland.L__asm_ssbd_skip\@:
1448e290624SMarc Zyngier#endif
1458e290624SMarc Zyngier	.endm
1468e290624SMarc Zyngier
147b11e5759SMark Rutland	.macro	kernel_entry, el, regsize = 64
14860ffc30dSCatalin Marinas	.if	\regsize == 32
14960ffc30dSCatalin Marinas	mov	w0, w0				// zero upper 32 bits of x0
15060ffc30dSCatalin Marinas	.endif
15163648dd2SWill Deacon	stp	x0, x1, [sp, #16 * 0]
15263648dd2SWill Deacon	stp	x2, x3, [sp, #16 * 1]
15363648dd2SWill Deacon	stp	x4, x5, [sp, #16 * 2]
15463648dd2SWill Deacon	stp	x6, x7, [sp, #16 * 3]
15563648dd2SWill Deacon	stp	x8, x9, [sp, #16 * 4]
15663648dd2SWill Deacon	stp	x10, x11, [sp, #16 * 5]
15763648dd2SWill Deacon	stp	x12, x13, [sp, #16 * 6]
15863648dd2SWill Deacon	stp	x14, x15, [sp, #16 * 7]
15963648dd2SWill Deacon	stp	x16, x17, [sp, #16 * 8]
16063648dd2SWill Deacon	stp	x18, x19, [sp, #16 * 9]
16163648dd2SWill Deacon	stp	x20, x21, [sp, #16 * 10]
16263648dd2SWill Deacon	stp	x22, x23, [sp, #16 * 11]
16363648dd2SWill Deacon	stp	x24, x25, [sp, #16 * 12]
16463648dd2SWill Deacon	stp	x26, x27, [sp, #16 * 13]
16563648dd2SWill Deacon	stp	x28, x29, [sp, #16 * 14]
16663648dd2SWill Deacon
16760ffc30dSCatalin Marinas	.if	\el == 0
168baaa7237SMark Rutland	clear_gp_regs
16960ffc30dSCatalin Marinas	mrs	x21, sp_el0
1703e393417SMark Rutland	ldr_this_cpu	tsk, __entry_task, x20
1713e393417SMark Rutland	msr	sp_el0, tsk
1723e393417SMark Rutland
1733e393417SMark Rutland	// Ensure MDSCR_EL1.SS is clear, since we can unmask debug exceptions
1743e393417SMark Rutland	// when scheduling.
1753e393417SMark Rutland	ldr	x19, [tsk, #TSK_TI_FLAGS]
1763e393417SMark Rutland	disable_step_tsk x19, x20
17749003a8dSJames Morse
17899ed3ed0SMark Rutland	apply_ssbd 1, x22, x23
1798e290624SMarc Zyngier
18060ffc30dSCatalin Marinas	.else
18160ffc30dSCatalin Marinas	add	x21, sp, #S_FRAME_SIZE
1824caf8758SJulien Thierry	get_current_task tsk
18351369e39SRobin Murphy	/* Save the task's original addr_limit and set USER_DS */
184c02433ddSMark Rutland	ldr	x20, [tsk, #TSK_TI_ADDR_LIMIT]
185e19a6ee2SJames Morse	str	x20, [sp, #S_ORIG_ADDR_LIMIT]
18651369e39SRobin Murphy	mov	x20, #USER_DS
187c02433ddSMark Rutland	str	x20, [tsk, #TSK_TI_ADDR_LIMIT]
188563cada0SVladimir Murzin	/* No need to reset PSTATE.UAO, hardware's already set it to 0 for us */
189e19a6ee2SJames Morse	.endif /* \el == 0 */
19060ffc30dSCatalin Marinas	mrs	x22, elr_el1
19160ffc30dSCatalin Marinas	mrs	x23, spsr_el1
19260ffc30dSCatalin Marinas	stp	lr, x21, [sp, #S_LR]
19339bc88e5SCatalin Marinas
19473267498SArd Biesheuvel	/*
19573267498SArd Biesheuvel	 * In order to be able to dump the contents of struct pt_regs at the
19673267498SArd Biesheuvel	 * time the exception was taken (in case we attempt to walk the call
19773267498SArd Biesheuvel	 * stack later), chain it together with the stack frames.
19873267498SArd Biesheuvel	 */
19973267498SArd Biesheuvel	.if \el == 0
20073267498SArd Biesheuvel	stp	xzr, xzr, [sp, #S_STACKFRAME]
20173267498SArd Biesheuvel	.else
20273267498SArd Biesheuvel	stp	x29, x22, [sp, #S_STACKFRAME]
20373267498SArd Biesheuvel	.endif
20473267498SArd Biesheuvel	add	x29, sp, #S_STACKFRAME
20573267498SArd Biesheuvel
20639bc88e5SCatalin Marinas#ifdef CONFIG_ARM64_SW_TTBR0_PAN
20739bc88e5SCatalin Marinas	/*
20839bc88e5SCatalin Marinas	 * Set the TTBR0 PAN bit in SPSR. When the exception is taken from
20939bc88e5SCatalin Marinas	 * EL0, there is no need to check the state of TTBR0_EL1 since
21039bc88e5SCatalin Marinas	 * accesses are always enabled.
21139bc88e5SCatalin Marinas	 * Note that the meaning of this bit differs from the ARMv8.1 PAN
21239bc88e5SCatalin Marinas	 * feature as all TTBR0_EL1 accesses are disabled, not just those to
21339bc88e5SCatalin Marinas	 * user mappings.
21439bc88e5SCatalin Marinas	 */
21539bc88e5SCatalin Marinasalternative_if ARM64_HAS_PAN
21639bc88e5SCatalin Marinas	b	1f				// skip TTBR0 PAN
21739bc88e5SCatalin Marinasalternative_else_nop_endif
21839bc88e5SCatalin Marinas
21939bc88e5SCatalin Marinas	.if	\el != 0
22039bc88e5SCatalin Marinas	mrs	x21, ttbr0_el1
221b519538dSWill Deacon	tst	x21, #TTBR_ASID_MASK		// Check for the reserved ASID
22239bc88e5SCatalin Marinas	orr	x23, x23, #PSR_PAN_BIT		// Set the emulated PAN in the saved SPSR
22339bc88e5SCatalin Marinas	b.eq	1f				// TTBR0 access already disabled
22439bc88e5SCatalin Marinas	and	x23, x23, #~PSR_PAN_BIT		// Clear the emulated PAN in the saved SPSR
22539bc88e5SCatalin Marinas	.endif
22639bc88e5SCatalin Marinas
22739bc88e5SCatalin Marinas	__uaccess_ttbr0_disable x21
22839bc88e5SCatalin Marinas1:
22939bc88e5SCatalin Marinas#endif
23039bc88e5SCatalin Marinas
23160ffc30dSCatalin Marinas	stp	x22, x23, [sp, #S_PC]
23260ffc30dSCatalin Marinas
23317c28958SDave Martin	/* Not in a syscall by default (el0_svc overwrites for real syscall) */
23460ffc30dSCatalin Marinas	.if	\el == 0
23517c28958SDave Martin	mov	w21, #NO_SYSCALL
23635d0e6fbSDave Martin	str	w21, [sp, #S_SYSCALLNO]
23760ffc30dSCatalin Marinas	.endif
23860ffc30dSCatalin Marinas
239133d0518SJulien Thierry	/* Save pmr */
240133d0518SJulien Thierryalternative_if ARM64_HAS_IRQ_PRIO_MASKING
241133d0518SJulien Thierry	mrs_s	x20, SYS_ICC_PMR_EL1
242133d0518SJulien Thierry	str	x20, [sp, #S_PMR_SAVE]
243133d0518SJulien Thierryalternative_else_nop_endif
244133d0518SJulien Thierry
2456cdf9c7cSJungseok Lee	/*
24660ffc30dSCatalin Marinas	 * Registers that may be useful after this macro is invoked:
24760ffc30dSCatalin Marinas	 *
248bd82d4bdSJulien Thierry	 * x20 - ICC_PMR_EL1
24960ffc30dSCatalin Marinas	 * x21 - aborted SP
25060ffc30dSCatalin Marinas	 * x22 - aborted PC
25160ffc30dSCatalin Marinas	 * x23 - aborted PSTATE
25260ffc30dSCatalin Marinas	*/
25360ffc30dSCatalin Marinas	.endm
25460ffc30dSCatalin Marinas
255412fcb6cSWill Deacon	.macro	kernel_exit, el
256e19a6ee2SJames Morse	.if	\el != 0
2578d66772eSJames Morse	disable_daif
2588d66772eSJames Morse
259e19a6ee2SJames Morse	/* Restore the task's original addr_limit. */
260e19a6ee2SJames Morse	ldr	x20, [sp, #S_ORIG_ADDR_LIMIT]
261c02433ddSMark Rutland	str	x20, [tsk, #TSK_TI_ADDR_LIMIT]
262e19a6ee2SJames Morse
263e19a6ee2SJames Morse	/* No need to restore UAO, it will be restored from SPSR_EL1 */
264e19a6ee2SJames Morse	.endif
265e19a6ee2SJames Morse
266133d0518SJulien Thierry	/* Restore pmr */
267133d0518SJulien Thierryalternative_if ARM64_HAS_IRQ_PRIO_MASKING
268133d0518SJulien Thierry	ldr	x20, [sp, #S_PMR_SAVE]
269133d0518SJulien Thierry	msr_s	SYS_ICC_PMR_EL1, x20
270f2266504SMarc Zyngier	mrs_s	x21, SYS_ICC_CTLR_EL1
271f2266504SMarc Zyngier	tbz	x21, #6, .L__skip_pmr_sync\@	// Check for ICC_CTLR_EL1.PMHE
272f2266504SMarc Zyngier	dsb	sy				// Ensure priority change is seen by redistributor
273f2266504SMarc Zyngier.L__skip_pmr_sync\@:
274133d0518SJulien Thierryalternative_else_nop_endif
275133d0518SJulien Thierry
27660ffc30dSCatalin Marinas	ldp	x21, x22, [sp, #S_PC]		// load ELR, SPSR
27760ffc30dSCatalin Marinas	.if	\el == 0
2786c81fe79SLarry Bassel	ct_user_enter
27939bc88e5SCatalin Marinas	.endif
28039bc88e5SCatalin Marinas
28139bc88e5SCatalin Marinas#ifdef CONFIG_ARM64_SW_TTBR0_PAN
28239bc88e5SCatalin Marinas	/*
28339bc88e5SCatalin Marinas	 * Restore access to TTBR0_EL1. If returning to EL0, no need for SPSR
28439bc88e5SCatalin Marinas	 * PAN bit checking.
28539bc88e5SCatalin Marinas	 */
28639bc88e5SCatalin Marinasalternative_if ARM64_HAS_PAN
28739bc88e5SCatalin Marinas	b	2f				// skip TTBR0 PAN
28839bc88e5SCatalin Marinasalternative_else_nop_endif
28939bc88e5SCatalin Marinas
29039bc88e5SCatalin Marinas	.if	\el != 0
29139bc88e5SCatalin Marinas	tbnz	x22, #22, 1f			// Skip re-enabling TTBR0 access if the PSR_PAN_BIT is set
29239bc88e5SCatalin Marinas	.endif
29339bc88e5SCatalin Marinas
29427a921e7SWill Deacon	__uaccess_ttbr0_enable x0, x1
29539bc88e5SCatalin Marinas
29639bc88e5SCatalin Marinas	.if	\el == 0
29739bc88e5SCatalin Marinas	/*
29839bc88e5SCatalin Marinas	 * Enable errata workarounds only if returning to user. The only
29939bc88e5SCatalin Marinas	 * workaround currently required for TTBR0_EL1 changes are for the
30039bc88e5SCatalin Marinas	 * Cavium erratum 27456 (broadcast TLBI instructions may cause I-cache
30139bc88e5SCatalin Marinas	 * corruption).
30239bc88e5SCatalin Marinas	 */
30395e3de35SMarc Zyngier	bl	post_ttbr_update_workaround
30439bc88e5SCatalin Marinas	.endif
30539bc88e5SCatalin Marinas1:
30639bc88e5SCatalin Marinas	.if	\el != 0
30739bc88e5SCatalin Marinas	and	x22, x22, #~PSR_PAN_BIT		// ARMv8.0 CPUs do not understand this bit
30839bc88e5SCatalin Marinas	.endif
30939bc88e5SCatalin Marinas2:
31039bc88e5SCatalin Marinas#endif
31139bc88e5SCatalin Marinas
31239bc88e5SCatalin Marinas	.if	\el == 0
31360ffc30dSCatalin Marinas	ldr	x23, [sp, #S_SP]		// load return stack pointer
31460ffc30dSCatalin Marinas	msr	sp_el0, x23
3154bf3286dSWill Deacon	tst	x22, #PSR_MODE32_BIT		// native task?
3164bf3286dSWill Deacon	b.eq	3f
3174bf3286dSWill Deacon
318905e8c5dSWill Deacon#ifdef CONFIG_ARM64_ERRATUM_845719
3196ba3b554SMark Rutlandalternative_if ARM64_WORKAROUND_845719
320e28cabf1SDaniel Thompson#ifdef CONFIG_PID_IN_CONTEXTIDR
321e28cabf1SDaniel Thompson	mrs	x29, contextidr_el1
322e28cabf1SDaniel Thompson	msr	contextidr_el1, x29
323e28cabf1SDaniel Thompson#else
324e28cabf1SDaniel Thompson	msr contextidr_el1, xzr
325e28cabf1SDaniel Thompson#endif
3266ba3b554SMark Rutlandalternative_else_nop_endif
327905e8c5dSWill Deacon#endif
3284bf3286dSWill Deacon3:
329a5325089SMarc Zyngier#ifdef CONFIG_ARM64_ERRATUM_1418040
330a5325089SMarc Zyngieralternative_if_not ARM64_WORKAROUND_1418040
3310f80cad3SMarc Zyngier	b	4f
3320f80cad3SMarc Zyngieralternative_else_nop_endif
3330f80cad3SMarc Zyngier	/*
3340f80cad3SMarc Zyngier	 * if (x22.mode32 == cntkctl_el1.el0vcten)
3350f80cad3SMarc Zyngier	 *     cntkctl_el1.el0vcten = ~cntkctl_el1.el0vcten
3360f80cad3SMarc Zyngier	 */
3370f80cad3SMarc Zyngier	mrs	x1, cntkctl_el1
3380f80cad3SMarc Zyngier	eon	x0, x1, x22, lsr #3
3390f80cad3SMarc Zyngier	tbz	x0, #1, 4f
3400f80cad3SMarc Zyngier	eor	x1, x1, #2	// ARCH_TIMER_USR_VCT_ACCESS_EN
3410f80cad3SMarc Zyngier	msr	cntkctl_el1, x1
3420f80cad3SMarc Zyngier4:
3430f80cad3SMarc Zyngier#endif
34499ed3ed0SMark Rutland	apply_ssbd 0, x0, x1
34560ffc30dSCatalin Marinas	.endif
34639bc88e5SCatalin Marinas
34763648dd2SWill Deacon	msr	elr_el1, x21			// set up the return data
34863648dd2SWill Deacon	msr	spsr_el1, x22
34963648dd2SWill Deacon	ldp	x0, x1, [sp, #16 * 0]
35063648dd2SWill Deacon	ldp	x2, x3, [sp, #16 * 1]
35163648dd2SWill Deacon	ldp	x4, x5, [sp, #16 * 2]
35263648dd2SWill Deacon	ldp	x6, x7, [sp, #16 * 3]
35363648dd2SWill Deacon	ldp	x8, x9, [sp, #16 * 4]
35463648dd2SWill Deacon	ldp	x10, x11, [sp, #16 * 5]
35563648dd2SWill Deacon	ldp	x12, x13, [sp, #16 * 6]
35663648dd2SWill Deacon	ldp	x14, x15, [sp, #16 * 7]
35763648dd2SWill Deacon	ldp	x16, x17, [sp, #16 * 8]
35863648dd2SWill Deacon	ldp	x18, x19, [sp, #16 * 9]
35963648dd2SWill Deacon	ldp	x20, x21, [sp, #16 * 10]
36063648dd2SWill Deacon	ldp	x22, x23, [sp, #16 * 11]
36163648dd2SWill Deacon	ldp	x24, x25, [sp, #16 * 12]
36263648dd2SWill Deacon	ldp	x26, x27, [sp, #16 * 13]
36363648dd2SWill Deacon	ldp	x28, x29, [sp, #16 * 14]
36463648dd2SWill Deacon	ldr	lr, [sp, #S_LR]
36563648dd2SWill Deacon	add	sp, sp, #S_FRAME_SIZE		// restore sp
3664bf3286dSWill Deacon
3674bf3286dSWill Deacon	.if	\el == 0
368ea1e3de8SWill Deaconalternative_insn eret, nop, ARM64_UNMAP_KERNEL_AT_EL0
369ea1e3de8SWill Deacon#ifdef CONFIG_UNMAP_KERNEL_AT_EL0
3700f80cad3SMarc Zyngier	bne	5f
3714bf3286dSWill Deacon	msr	far_el1, x30
3724bf3286dSWill Deacon	tramp_alias	x30, tramp_exit_native
3734bf3286dSWill Deacon	br	x30
3740f80cad3SMarc Zyngier5:
3754bf3286dSWill Deacon	tramp_alias	x30, tramp_exit_compat
3764bf3286dSWill Deacon	br	x30
377ea1e3de8SWill Deacon#endif
3784bf3286dSWill Deacon	.else
3794bf3286dSWill Deacon	eret
3804bf3286dSWill Deacon	.endif
381679db708SWill Deacon	sb
38260ffc30dSCatalin Marinas	.endm
38360ffc30dSCatalin Marinas
384971c67ceSJames Morse	.macro	irq_stack_entry
3858e23dacdSJames Morse	mov	x19, sp			// preserve the original sp
3868e23dacdSJames Morse
3878e23dacdSJames Morse	/*
388c02433ddSMark Rutland	 * Compare sp with the base of the task stack.
389c02433ddSMark Rutland	 * If the top ~(THREAD_SIZE - 1) bits match, we are on a task stack,
390c02433ddSMark Rutland	 * and should switch to the irq stack.
3918e23dacdSJames Morse	 */
392c02433ddSMark Rutland	ldr	x25, [tsk, TSK_STACK]
393c02433ddSMark Rutland	eor	x25, x25, x19
394c02433ddSMark Rutland	and	x25, x25, #~(THREAD_SIZE - 1)
395c02433ddSMark Rutland	cbnz	x25, 9998f
3968e23dacdSJames Morse
397f60fe78fSMark Rutland	ldr_this_cpu x25, irq_stack_ptr, x26
39834be98f4SArd Biesheuvel	mov	x26, #IRQ_STACK_SIZE
3998e23dacdSJames Morse	add	x26, x25, x26
400d224a69eSJames Morse
401d224a69eSJames Morse	/* switch to the irq stack */
4028e23dacdSJames Morse	mov	sp, x26
4038e23dacdSJames Morse9998:
4048e23dacdSJames Morse	.endm
4058e23dacdSJames Morse
4068e23dacdSJames Morse	/*
4078e23dacdSJames Morse	 * x19 should be preserved between irq_stack_entry and
4088e23dacdSJames Morse	 * irq_stack_exit.
4098e23dacdSJames Morse	 */
4108e23dacdSJames Morse	.macro	irq_stack_exit
4118e23dacdSJames Morse	mov	sp, x19
4128e23dacdSJames Morse	.endm
4138e23dacdSJames Morse
4148c2c596fSMark Rutland/* GPRs used by entry code */
41560ffc30dSCatalin Marinastsk	.req	x28		// current thread_info
41660ffc30dSCatalin Marinas
41760ffc30dSCatalin Marinas/*
41860ffc30dSCatalin Marinas * Interrupt handling.
41960ffc30dSCatalin Marinas */
42060ffc30dSCatalin Marinas	.macro	irq_handler
4218e23dacdSJames Morse	ldr_l	x1, handle_arch_irq
42260ffc30dSCatalin Marinas	mov	x0, sp
423971c67ceSJames Morse	irq_stack_entry
42460ffc30dSCatalin Marinas	blr	x1
4258e23dacdSJames Morse	irq_stack_exit
42660ffc30dSCatalin Marinas	.endm
42760ffc30dSCatalin Marinas
42817ce302fSJulien Thierry#ifdef CONFIG_ARM64_PSEUDO_NMI
42917ce302fSJulien Thierry	/*
43017ce302fSJulien Thierry	 * Set res to 0 if irqs were unmasked in interrupted context.
43117ce302fSJulien Thierry	 * Otherwise set res to non-0 value.
43217ce302fSJulien Thierry	 */
43317ce302fSJulien Thierry	.macro	test_irqs_unmasked res:req, pmr:req
43417ce302fSJulien Thierryalternative_if ARM64_HAS_IRQ_PRIO_MASKING
43517ce302fSJulien Thierry	sub	\res, \pmr, #GIC_PRIO_IRQON
43617ce302fSJulien Thierryalternative_else
43717ce302fSJulien Thierry	mov	\res, xzr
43817ce302fSJulien Thierryalternative_endif
43917ce302fSJulien Thierry	.endm
44017ce302fSJulien Thierry#endif
44117ce302fSJulien Thierry
442bd82d4bdSJulien Thierry	.macro	gic_prio_kentry_setup, tmp:req
443bd82d4bdSJulien Thierry#ifdef CONFIG_ARM64_PSEUDO_NMI
444bd82d4bdSJulien Thierry	alternative_if ARM64_HAS_IRQ_PRIO_MASKING
445bd82d4bdSJulien Thierry	mov	\tmp, #(GIC_PRIO_PSR_I_SET | GIC_PRIO_IRQON)
446bd82d4bdSJulien Thierry	msr_s	SYS_ICC_PMR_EL1, \tmp
447bd82d4bdSJulien Thierry	alternative_else_nop_endif
448bd82d4bdSJulien Thierry#endif
449bd82d4bdSJulien Thierry	.endm
450bd82d4bdSJulien Thierry
451bd82d4bdSJulien Thierry	.macro	gic_prio_irq_setup, pmr:req, tmp:req
452bd82d4bdSJulien Thierry#ifdef CONFIG_ARM64_PSEUDO_NMI
453bd82d4bdSJulien Thierry	alternative_if ARM64_HAS_IRQ_PRIO_MASKING
454bd82d4bdSJulien Thierry	orr	\tmp, \pmr, #GIC_PRIO_PSR_I_SET
455bd82d4bdSJulien Thierry	msr_s	SYS_ICC_PMR_EL1, \tmp
456bd82d4bdSJulien Thierry	alternative_else_nop_endif
457bd82d4bdSJulien Thierry#endif
458bd82d4bdSJulien Thierry	.endm
459bd82d4bdSJulien Thierry
46060ffc30dSCatalin Marinas	.text
46160ffc30dSCatalin Marinas
46260ffc30dSCatalin Marinas/*
46360ffc30dSCatalin Marinas * Exception vectors.
46460ffc30dSCatalin Marinas */
465888b3c87SPratyush Anand	.pushsection ".entry.text", "ax"
46660ffc30dSCatalin Marinas
46760ffc30dSCatalin Marinas	.align	11
4680ccbd98aSMark BrownSYM_CODE_START(vectors)
4695b1f7fe4SWill Deacon	kernel_ventry	1, sync_invalid			// Synchronous EL1t
4705b1f7fe4SWill Deacon	kernel_ventry	1, irq_invalid			// IRQ EL1t
4715b1f7fe4SWill Deacon	kernel_ventry	1, fiq_invalid			// FIQ EL1t
4725b1f7fe4SWill Deacon	kernel_ventry	1, error_invalid		// Error EL1t
47360ffc30dSCatalin Marinas
4745b1f7fe4SWill Deacon	kernel_ventry	1, sync				// Synchronous EL1h
4755b1f7fe4SWill Deacon	kernel_ventry	1, irq				// IRQ EL1h
4765b1f7fe4SWill Deacon	kernel_ventry	1, fiq_invalid			// FIQ EL1h
4775b1f7fe4SWill Deacon	kernel_ventry	1, error			// Error EL1h
47860ffc30dSCatalin Marinas
4795b1f7fe4SWill Deacon	kernel_ventry	0, sync				// Synchronous 64-bit EL0
4805b1f7fe4SWill Deacon	kernel_ventry	0, irq				// IRQ 64-bit EL0
4815b1f7fe4SWill Deacon	kernel_ventry	0, fiq_invalid			// FIQ 64-bit EL0
4825b1f7fe4SWill Deacon	kernel_ventry	0, error			// Error 64-bit EL0
48360ffc30dSCatalin Marinas
48460ffc30dSCatalin Marinas#ifdef CONFIG_COMPAT
4855b1f7fe4SWill Deacon	kernel_ventry	0, sync_compat, 32		// Synchronous 32-bit EL0
4865b1f7fe4SWill Deacon	kernel_ventry	0, irq_compat, 32		// IRQ 32-bit EL0
4875b1f7fe4SWill Deacon	kernel_ventry	0, fiq_invalid_compat, 32	// FIQ 32-bit EL0
4885b1f7fe4SWill Deacon	kernel_ventry	0, error_compat, 32		// Error 32-bit EL0
48960ffc30dSCatalin Marinas#else
4905b1f7fe4SWill Deacon	kernel_ventry	0, sync_invalid, 32		// Synchronous 32-bit EL0
4915b1f7fe4SWill Deacon	kernel_ventry	0, irq_invalid, 32		// IRQ 32-bit EL0
4925b1f7fe4SWill Deacon	kernel_ventry	0, fiq_invalid, 32		// FIQ 32-bit EL0
4935b1f7fe4SWill Deacon	kernel_ventry	0, error_invalid, 32		// Error 32-bit EL0
49460ffc30dSCatalin Marinas#endif
4950ccbd98aSMark BrownSYM_CODE_END(vectors)
49660ffc30dSCatalin Marinas
497872d8327SMark Rutland#ifdef CONFIG_VMAP_STACK
498872d8327SMark Rutland	/*
499872d8327SMark Rutland	 * We detected an overflow in kernel_ventry, which switched to the
500872d8327SMark Rutland	 * overflow stack. Stash the exception regs, and head to our overflow
501872d8327SMark Rutland	 * handler.
502872d8327SMark Rutland	 */
503872d8327SMark Rutland__bad_stack:
504872d8327SMark Rutland	/* Restore the original x0 value */
505872d8327SMark Rutland	mrs	x0, tpidrro_el0
506872d8327SMark Rutland
507872d8327SMark Rutland	/*
508872d8327SMark Rutland	 * Store the original GPRs to the new stack. The orginal SP (minus
509872d8327SMark Rutland	 * S_FRAME_SIZE) was stashed in tpidr_el0 by kernel_ventry.
510872d8327SMark Rutland	 */
511872d8327SMark Rutland	sub	sp, sp, #S_FRAME_SIZE
512872d8327SMark Rutland	kernel_entry 1
513872d8327SMark Rutland	mrs	x0, tpidr_el0
514872d8327SMark Rutland	add	x0, x0, #S_FRAME_SIZE
515872d8327SMark Rutland	str	x0, [sp, #S_SP]
516872d8327SMark Rutland
517872d8327SMark Rutland	/* Stash the regs for handle_bad_stack */
518872d8327SMark Rutland	mov	x0, sp
519872d8327SMark Rutland
520872d8327SMark Rutland	/* Time to die */
521872d8327SMark Rutland	bl	handle_bad_stack
522872d8327SMark Rutland	ASM_BUG()
523872d8327SMark Rutland#endif /* CONFIG_VMAP_STACK */
524872d8327SMark Rutland
52560ffc30dSCatalin Marinas/*
52660ffc30dSCatalin Marinas * Invalid mode handlers
52760ffc30dSCatalin Marinas */
52860ffc30dSCatalin Marinas	.macro	inv_entry, el, reason, regsize = 64
529b660950cSArd Biesheuvel	kernel_entry \el, \regsize
53060ffc30dSCatalin Marinas	mov	x0, sp
53160ffc30dSCatalin Marinas	mov	x1, #\reason
53260ffc30dSCatalin Marinas	mrs	x2, esr_el1
5332d0e751aSMark Rutland	bl	bad_mode
5342d0e751aSMark Rutland	ASM_BUG()
53560ffc30dSCatalin Marinas	.endm
53660ffc30dSCatalin Marinas
5370ccbd98aSMark BrownSYM_CODE_START_LOCAL(el0_sync_invalid)
53860ffc30dSCatalin Marinas	inv_entry 0, BAD_SYNC
5390ccbd98aSMark BrownSYM_CODE_END(el0_sync_invalid)
54060ffc30dSCatalin Marinas
5410ccbd98aSMark BrownSYM_CODE_START_LOCAL(el0_irq_invalid)
54260ffc30dSCatalin Marinas	inv_entry 0, BAD_IRQ
5430ccbd98aSMark BrownSYM_CODE_END(el0_irq_invalid)
54460ffc30dSCatalin Marinas
5450ccbd98aSMark BrownSYM_CODE_START_LOCAL(el0_fiq_invalid)
54660ffc30dSCatalin Marinas	inv_entry 0, BAD_FIQ
5470ccbd98aSMark BrownSYM_CODE_END(el0_fiq_invalid)
54860ffc30dSCatalin Marinas
5490ccbd98aSMark BrownSYM_CODE_START_LOCAL(el0_error_invalid)
55060ffc30dSCatalin Marinas	inv_entry 0, BAD_ERROR
5510ccbd98aSMark BrownSYM_CODE_END(el0_error_invalid)
55260ffc30dSCatalin Marinas
55360ffc30dSCatalin Marinas#ifdef CONFIG_COMPAT
5540ccbd98aSMark BrownSYM_CODE_START_LOCAL(el0_fiq_invalid_compat)
55560ffc30dSCatalin Marinas	inv_entry 0, BAD_FIQ, 32
5560ccbd98aSMark BrownSYM_CODE_END(el0_fiq_invalid_compat)
55760ffc30dSCatalin Marinas#endif
55860ffc30dSCatalin Marinas
5590ccbd98aSMark BrownSYM_CODE_START_LOCAL(el1_sync_invalid)
56060ffc30dSCatalin Marinas	inv_entry 1, BAD_SYNC
5610ccbd98aSMark BrownSYM_CODE_END(el1_sync_invalid)
56260ffc30dSCatalin Marinas
5630ccbd98aSMark BrownSYM_CODE_START_LOCAL(el1_irq_invalid)
56460ffc30dSCatalin Marinas	inv_entry 1, BAD_IRQ
5650ccbd98aSMark BrownSYM_CODE_END(el1_irq_invalid)
56660ffc30dSCatalin Marinas
5670ccbd98aSMark BrownSYM_CODE_START_LOCAL(el1_fiq_invalid)
56860ffc30dSCatalin Marinas	inv_entry 1, BAD_FIQ
5690ccbd98aSMark BrownSYM_CODE_END(el1_fiq_invalid)
57060ffc30dSCatalin Marinas
5710ccbd98aSMark BrownSYM_CODE_START_LOCAL(el1_error_invalid)
57260ffc30dSCatalin Marinas	inv_entry 1, BAD_ERROR
5730ccbd98aSMark BrownSYM_CODE_END(el1_error_invalid)
57460ffc30dSCatalin Marinas
57560ffc30dSCatalin Marinas/*
57660ffc30dSCatalin Marinas * EL1 mode handlers.
57760ffc30dSCatalin Marinas */
57860ffc30dSCatalin Marinas	.align	6
5790ccbd98aSMark BrownSYM_CODE_START_LOCAL_NOALIGN(el1_sync)
58060ffc30dSCatalin Marinas	kernel_entry 1
58160ffc30dSCatalin Marinas	mov	x0, sp
582ed3768dbSMark Rutland	bl	el1_sync_handler
5830bf0f444SWill Deacon	kernel_exit 1
5840ccbd98aSMark BrownSYM_CODE_END(el1_sync)
58560ffc30dSCatalin Marinas
58660ffc30dSCatalin Marinas	.align	6
5870ccbd98aSMark BrownSYM_CODE_START_LOCAL_NOALIGN(el1_irq)
58860ffc30dSCatalin Marinas	kernel_entry 1
589bd82d4bdSJulien Thierry	gic_prio_irq_setup pmr=x20, tmp=x1
590b282e1ceSJames Morse	enable_da_f
59117ce302fSJulien Thierry
592c25349fdSJulien Thierry#ifdef CONFIG_ARM64_PSEUDO_NMI
59317ce302fSJulien Thierry	test_irqs_unmasked	res=x0, pmr=x20
59417ce302fSJulien Thierry	cbz	x0, 1f
59517ce302fSJulien Thierry	bl	asm_nmi_enter
596c25349fdSJulien Thierry1:
59760ffc30dSCatalin Marinas#endif
59864681787SMarc Zyngier
59917ce302fSJulien Thierry#ifdef CONFIG_TRACE_IRQFLAGS
60017ce302fSJulien Thierry	bl	trace_hardirqs_off
60117ce302fSJulien Thierry#endif
60217ce302fSJulien Thierry
60364681787SMarc Zyngier	irq_handler
60464681787SMarc Zyngier
6057ef858daSThomas Gleixner#ifdef CONFIG_PREEMPTION
6067faa313fSWill Deacon	ldr	x24, [tsk, #TSK_TI_PREEMPT]	// get preempt count
6071234ad68SJulien Thierryalternative_if ARM64_HAS_IRQ_PRIO_MASKING
6081234ad68SJulien Thierry	/*
6091234ad68SJulien Thierry	 * DA_F were cleared at start of handling. If anything is set in DAIF,
6101234ad68SJulien Thierry	 * we come back from an NMI, so skip preemption
6111234ad68SJulien Thierry	 */
6121234ad68SJulien Thierry	mrs	x0, daif
6131234ad68SJulien Thierry	orr	x24, x24, x0
6141234ad68SJulien Thierryalternative_else_nop_endif
6151234ad68SJulien Thierry	cbnz	x24, 1f				// preempt count != 0 || NMI return path
61619c95f26SJulien Thierry	bl	arm64_preempt_schedule_irq	// irq en/disable is done inside
61760ffc30dSCatalin Marinas1:
61860ffc30dSCatalin Marinas#endif
61917ce302fSJulien Thierry
620c25349fdSJulien Thierry#ifdef CONFIG_ARM64_PSEUDO_NMI
621c25349fdSJulien Thierry	/*
622bd82d4bdSJulien Thierry	 * When using IRQ priority masking, we can get spurious interrupts while
623bd82d4bdSJulien Thierry	 * PMR is set to GIC_PRIO_IRQOFF. An NMI might also have occurred in a
624bd82d4bdSJulien Thierry	 * section with interrupts disabled. Skip tracing in those cases.
625c25349fdSJulien Thierry	 */
62617ce302fSJulien Thierry	test_irqs_unmasked	res=x0, pmr=x20
62717ce302fSJulien Thierry	cbz	x0, 1f
62817ce302fSJulien Thierry	bl	asm_nmi_exit
62917ce302fSJulien Thierry1:
63017ce302fSJulien Thierry#endif
63117ce302fSJulien Thierry
63217ce302fSJulien Thierry#ifdef CONFIG_TRACE_IRQFLAGS
63317ce302fSJulien Thierry#ifdef CONFIG_ARM64_PSEUDO_NMI
63417ce302fSJulien Thierry	test_irqs_unmasked	res=x0, pmr=x20
63517ce302fSJulien Thierry	cbnz	x0, 1f
63660ffc30dSCatalin Marinas#endif
637c25349fdSJulien Thierry	bl	trace_hardirqs_on
638c25349fdSJulien Thierry1:
639c25349fdSJulien Thierry#endif
640c25349fdSJulien Thierry
64160ffc30dSCatalin Marinas	kernel_exit 1
6420ccbd98aSMark BrownSYM_CODE_END(el1_irq)
64360ffc30dSCatalin Marinas
64460ffc30dSCatalin Marinas/*
64560ffc30dSCatalin Marinas * EL0 mode handlers.
64660ffc30dSCatalin Marinas */
64760ffc30dSCatalin Marinas	.align	6
6480ccbd98aSMark BrownSYM_CODE_START_LOCAL_NOALIGN(el0_sync)
64960ffc30dSCatalin Marinas	kernel_entry 0
650582f9583SMark Rutland	mov	x0, sp
651582f9583SMark Rutland	bl	el0_sync_handler
652582f9583SMark Rutland	b	ret_to_user
6530ccbd98aSMark BrownSYM_CODE_END(el0_sync)
65460ffc30dSCatalin Marinas
65560ffc30dSCatalin Marinas#ifdef CONFIG_COMPAT
65660ffc30dSCatalin Marinas	.align	6
6570ccbd98aSMark BrownSYM_CODE_START_LOCAL_NOALIGN(el0_sync_compat)
65860ffc30dSCatalin Marinas	kernel_entry 0, 32
6593b714275SMark Rutland	mov	x0, sp
660582f9583SMark Rutland	bl	el0_sync_compat_handler
6613b714275SMark Rutland	b	ret_to_user
6620ccbd98aSMark BrownSYM_CODE_END(el0_sync_compat)
66360ffc30dSCatalin Marinas
66460ffc30dSCatalin Marinas	.align	6
6650ccbd98aSMark BrownSYM_CODE_START_LOCAL_NOALIGN(el0_irq_compat)
66660ffc30dSCatalin Marinas	kernel_entry 0, 32
66760ffc30dSCatalin Marinas	b	el0_irq_naked
6680ccbd98aSMark BrownSYM_CODE_END(el0_irq_compat)
669a92d4d14SXie XiuQi
6700ccbd98aSMark BrownSYM_CODE_START_LOCAL_NOALIGN(el0_error_compat)
671a92d4d14SXie XiuQi	kernel_entry 0, 32
672a92d4d14SXie XiuQi	b	el0_error_naked
6730ccbd98aSMark BrownSYM_CODE_END(el0_error_compat)
67460ffc30dSCatalin Marinas#endif
67560ffc30dSCatalin Marinas
67660ffc30dSCatalin Marinas	.align	6
6770ccbd98aSMark BrownSYM_CODE_START_LOCAL_NOALIGN(el0_irq)
67860ffc30dSCatalin Marinas	kernel_entry 0
67960ffc30dSCatalin Marinasel0_irq_naked:
680bd82d4bdSJulien Thierry	gic_prio_irq_setup pmr=x20, tmp=x0
6812671828cSJames Morse	ct_user_exit_irqoff
682b282e1ceSJames Morse	enable_da_f
683bd82d4bdSJulien Thierry
68460ffc30dSCatalin Marinas#ifdef CONFIG_TRACE_IRQFLAGS
68560ffc30dSCatalin Marinas	bl	trace_hardirqs_off
68660ffc30dSCatalin Marinas#endif
68764681787SMarc Zyngier
68830d88c0eSWill Deacon#ifdef CONFIG_HARDEN_BRANCH_PREDICTOR
68930d88c0eSWill Deacon	tbz	x22, #55, 1f
69030d88c0eSWill Deacon	bl	do_el0_irq_bp_hardening
69130d88c0eSWill Deacon1:
69230d88c0eSWill Deacon#endif
69360ffc30dSCatalin Marinas	irq_handler
69464681787SMarc Zyngier
69560ffc30dSCatalin Marinas#ifdef CONFIG_TRACE_IRQFLAGS
69660ffc30dSCatalin Marinas	bl	trace_hardirqs_on
69760ffc30dSCatalin Marinas#endif
69860ffc30dSCatalin Marinas	b	ret_to_user
6990ccbd98aSMark BrownSYM_CODE_END(el0_irq)
70060ffc30dSCatalin Marinas
7010ccbd98aSMark BrownSYM_CODE_START_LOCAL(el1_error)
702a92d4d14SXie XiuQi	kernel_entry 1
703a92d4d14SXie XiuQi	mrs	x1, esr_el1
704bd82d4bdSJulien Thierry	gic_prio_kentry_setup tmp=x2
705a92d4d14SXie XiuQi	enable_dbg
706a92d4d14SXie XiuQi	mov	x0, sp
707a92d4d14SXie XiuQi	bl	do_serror
708a92d4d14SXie XiuQi	kernel_exit 1
7090ccbd98aSMark BrownSYM_CODE_END(el1_error)
710a92d4d14SXie XiuQi
7110ccbd98aSMark BrownSYM_CODE_START_LOCAL(el0_error)
712a92d4d14SXie XiuQi	kernel_entry 0
713a92d4d14SXie XiuQiel0_error_naked:
7142671828cSJames Morse	mrs	x25, esr_el1
715bd82d4bdSJulien Thierry	gic_prio_kentry_setup tmp=x2
7162671828cSJames Morse	ct_user_exit_irqoff
717a92d4d14SXie XiuQi	enable_dbg
718a92d4d14SXie XiuQi	mov	x0, sp
7192671828cSJames Morse	mov	x1, x25
720a92d4d14SXie XiuQi	bl	do_serror
7219034f625SJulien Thierry	enable_da_f
722a92d4d14SXie XiuQi	b	ret_to_user
7230ccbd98aSMark BrownSYM_CODE_END(el0_error)
724a92d4d14SXie XiuQi
72560ffc30dSCatalin Marinas/*
72660ffc30dSCatalin Marinas * Ok, we need to do extra processing, enter the slow path.
72760ffc30dSCatalin Marinas */
72860ffc30dSCatalin Marinaswork_pending:
72960ffc30dSCatalin Marinas	mov	x0, sp				// 'regs'
73060ffc30dSCatalin Marinas	bl	do_notify_resume
731db3899a6SCatalin Marinas#ifdef CONFIG_TRACE_IRQFLAGS
732421dd6faSChris Metcalf	bl	trace_hardirqs_on		// enabled while in userspace
733db3899a6SCatalin Marinas#endif
734c02433ddSMark Rutland	ldr	x1, [tsk, #TSK_TI_FLAGS]	// re-check for single-step
735421dd6faSChris Metcalf	b	finish_ret_to_user
73660ffc30dSCatalin Marinas/*
73760ffc30dSCatalin Marinas * "slow" syscall return path.
73860ffc30dSCatalin Marinas */
73959dc67b0SCatalin Marinasret_to_user:
7408d66772eSJames Morse	disable_daif
741bd82d4bdSJulien Thierry	gic_prio_kentry_setup tmp=x3
742c02433ddSMark Rutland	ldr	x1, [tsk, #TSK_TI_FLAGS]
74360ffc30dSCatalin Marinas	and	x2, x1, #_TIF_WORK_MASK
74460ffc30dSCatalin Marinas	cbnz	x2, work_pending
745421dd6faSChris Metcalffinish_ret_to_user:
7462a283070SWill Deacon	enable_step_tsk x1, x2
7470b3e3366SLaura Abbott#ifdef CONFIG_GCC_PLUGIN_STACKLEAK
7480b3e3366SLaura Abbott	bl	stackleak_erase
7490b3e3366SLaura Abbott#endif
750412fcb6cSWill Deacon	kernel_exit 0
75160ffc30dSCatalin MarinasENDPROC(ret_to_user)
75260ffc30dSCatalin Marinas
753888b3c87SPratyush Anand	.popsection				// .entry.text
754888b3c87SPratyush Anand
755c7b9adafSWill Deacon#ifdef CONFIG_UNMAP_KERNEL_AT_EL0
756c7b9adafSWill Deacon/*
757c7b9adafSWill Deacon * Exception vectors trampoline.
758c7b9adafSWill Deacon */
759c7b9adafSWill Deacon	.pushsection ".entry.tramp.text", "ax"
760c7b9adafSWill Deacon
761c7b9adafSWill Deacon	.macro tramp_map_kernel, tmp
762c7b9adafSWill Deacon	mrs	\tmp, ttbr1_el1
7631e1b8c04SSteve Capper	add	\tmp, \tmp, #(PAGE_SIZE + RESERVED_TTBR0_SIZE)
764c7b9adafSWill Deacon	bic	\tmp, \tmp, #USER_ASID_FLAG
765c7b9adafSWill Deacon	msr	ttbr1_el1, \tmp
766d1777e68SWill Deacon#ifdef CONFIG_QCOM_FALKOR_ERRATUM_1003
767d1777e68SWill Deaconalternative_if ARM64_WORKAROUND_QCOM_FALKOR_E1003
768d1777e68SWill Deacon	/* ASID already in \tmp[63:48] */
769d1777e68SWill Deacon	movk	\tmp, #:abs_g2_nc:(TRAMP_VALIAS >> 12)
770d1777e68SWill Deacon	movk	\tmp, #:abs_g1_nc:(TRAMP_VALIAS >> 12)
771d1777e68SWill Deacon	/* 2MB boundary containing the vectors, so we nobble the walk cache */
772d1777e68SWill Deacon	movk	\tmp, #:abs_g0_nc:((TRAMP_VALIAS & ~(SZ_2M - 1)) >> 12)
773d1777e68SWill Deacon	isb
774d1777e68SWill Deacon	tlbi	vae1, \tmp
775d1777e68SWill Deacon	dsb	nsh
776d1777e68SWill Deaconalternative_else_nop_endif
777d1777e68SWill Deacon#endif /* CONFIG_QCOM_FALKOR_ERRATUM_1003 */
778c7b9adafSWill Deacon	.endm
779c7b9adafSWill Deacon
780c7b9adafSWill Deacon	.macro tramp_unmap_kernel, tmp
781c7b9adafSWill Deacon	mrs	\tmp, ttbr1_el1
7821e1b8c04SSteve Capper	sub	\tmp, \tmp, #(PAGE_SIZE + RESERVED_TTBR0_SIZE)
783c7b9adafSWill Deacon	orr	\tmp, \tmp, #USER_ASID_FLAG
784c7b9adafSWill Deacon	msr	ttbr1_el1, \tmp
785c7b9adafSWill Deacon	/*
786f167211aSWill Deacon	 * We avoid running the post_ttbr_update_workaround here because
787f167211aSWill Deacon	 * it's only needed by Cavium ThunderX, which requires KPTI to be
788f167211aSWill Deacon	 * disabled.
789c7b9adafSWill Deacon	 */
790c7b9adafSWill Deacon	.endm
791c7b9adafSWill Deacon
792c7b9adafSWill Deacon	.macro tramp_ventry, regsize = 64
793c7b9adafSWill Deacon	.align	7
794c7b9adafSWill Deacon1:
795c7b9adafSWill Deacon	.if	\regsize == 64
796c7b9adafSWill Deacon	msr	tpidrro_el0, x30	// Restored in kernel_ventry
797c7b9adafSWill Deacon	.endif
798be04a6d1SWill Deacon	/*
799be04a6d1SWill Deacon	 * Defend against branch aliasing attacks by pushing a dummy
800be04a6d1SWill Deacon	 * entry onto the return stack and using a RET instruction to
801be04a6d1SWill Deacon	 * enter the full-fat kernel vectors.
802be04a6d1SWill Deacon	 */
803be04a6d1SWill Deacon	bl	2f
804be04a6d1SWill Deacon	b	.
805be04a6d1SWill Deacon2:
806c7b9adafSWill Deacon	tramp_map_kernel	x30
8076c27c408SWill Deacon#ifdef CONFIG_RANDOMIZE_BASE
8086c27c408SWill Deacon	adr	x30, tramp_vectors + PAGE_SIZE
8096c27c408SWill Deaconalternative_insn isb, nop, ARM64_WORKAROUND_QCOM_FALKOR_E1003
8106c27c408SWill Deacon	ldr	x30, [x30]
8116c27c408SWill Deacon#else
812c7b9adafSWill Deacon	ldr	x30, =vectors
8136c27c408SWill Deacon#endif
8149405447eSMarc Zyngieralternative_if_not ARM64_WORKAROUND_CAVIUM_TX2_219_PRFM
815c7b9adafSWill Deacon	prfm	plil1strm, [x30, #(1b - tramp_vectors)]
8169405447eSMarc Zyngieralternative_else_nop_endif
817c7b9adafSWill Deacon	msr	vbar_el1, x30
818c7b9adafSWill Deacon	add	x30, x30, #(1b - tramp_vectors)
819c7b9adafSWill Deacon	isb
820be04a6d1SWill Deacon	ret
821c7b9adafSWill Deacon	.endm
822c7b9adafSWill Deacon
823c7b9adafSWill Deacon	.macro tramp_exit, regsize = 64
824c7b9adafSWill Deacon	adr	x30, tramp_vectors
825c7b9adafSWill Deacon	msr	vbar_el1, x30
826c7b9adafSWill Deacon	tramp_unmap_kernel	x30
827c7b9adafSWill Deacon	.if	\regsize == 64
828c7b9adafSWill Deacon	mrs	x30, far_el1
829c7b9adafSWill Deacon	.endif
830c7b9adafSWill Deacon	eret
831679db708SWill Deacon	sb
832c7b9adafSWill Deacon	.endm
833c7b9adafSWill Deacon
834c7b9adafSWill Deacon	.align	11
835c7b9adafSWill DeaconENTRY(tramp_vectors)
836c7b9adafSWill Deacon	.space	0x400
837c7b9adafSWill Deacon
838c7b9adafSWill Deacon	tramp_ventry
839c7b9adafSWill Deacon	tramp_ventry
840c7b9adafSWill Deacon	tramp_ventry
841c7b9adafSWill Deacon	tramp_ventry
842c7b9adafSWill Deacon
843c7b9adafSWill Deacon	tramp_ventry	32
844c7b9adafSWill Deacon	tramp_ventry	32
845c7b9adafSWill Deacon	tramp_ventry	32
846c7b9adafSWill Deacon	tramp_ventry	32
847c7b9adafSWill DeaconEND(tramp_vectors)
848c7b9adafSWill Deacon
849c7b9adafSWill DeaconENTRY(tramp_exit_native)
850c7b9adafSWill Deacon	tramp_exit
851c7b9adafSWill DeaconEND(tramp_exit_native)
852c7b9adafSWill Deacon
853c7b9adafSWill DeaconENTRY(tramp_exit_compat)
854c7b9adafSWill Deacon	tramp_exit	32
855c7b9adafSWill DeaconEND(tramp_exit_compat)
856c7b9adafSWill Deacon
857c7b9adafSWill Deacon	.ltorg
858c7b9adafSWill Deacon	.popsection				// .entry.tramp.text
8596c27c408SWill Deacon#ifdef CONFIG_RANDOMIZE_BASE
8606c27c408SWill Deacon	.pushsection ".rodata", "a"
8616c27c408SWill Deacon	.align PAGE_SHIFT
8626c27c408SWill Deacon	.globl	__entry_tramp_data_start
8636c27c408SWill Deacon__entry_tramp_data_start:
8646c27c408SWill Deacon	.quad	vectors
8656c27c408SWill Deacon	.popsection				// .rodata
8666c27c408SWill Deacon#endif /* CONFIG_RANDOMIZE_BASE */
867c7b9adafSWill Deacon#endif /* CONFIG_UNMAP_KERNEL_AT_EL0 */
868c7b9adafSWill Deacon
86960ffc30dSCatalin Marinas/*
870ed84b4e9SMark Rutland * Register switch for AArch64. The callee-saved registers need to be saved
871ed84b4e9SMark Rutland * and restored. On entry:
872ed84b4e9SMark Rutland *   x0 = previous task_struct (must be preserved across the switch)
873ed84b4e9SMark Rutland *   x1 = next task_struct
874ed84b4e9SMark Rutland * Previous and next are guaranteed not to be the same.
875ed84b4e9SMark Rutland *
876ed84b4e9SMark Rutland */
877ed84b4e9SMark RutlandENTRY(cpu_switch_to)
878ed84b4e9SMark Rutland	mov	x10, #THREAD_CPU_CONTEXT
879ed84b4e9SMark Rutland	add	x8, x0, x10
880ed84b4e9SMark Rutland	mov	x9, sp
881ed84b4e9SMark Rutland	stp	x19, x20, [x8], #16		// store callee-saved registers
882ed84b4e9SMark Rutland	stp	x21, x22, [x8], #16
883ed84b4e9SMark Rutland	stp	x23, x24, [x8], #16
884ed84b4e9SMark Rutland	stp	x25, x26, [x8], #16
885ed84b4e9SMark Rutland	stp	x27, x28, [x8], #16
886ed84b4e9SMark Rutland	stp	x29, x9, [x8], #16
887ed84b4e9SMark Rutland	str	lr, [x8]
888ed84b4e9SMark Rutland	add	x8, x1, x10
889ed84b4e9SMark Rutland	ldp	x19, x20, [x8], #16		// restore callee-saved registers
890ed84b4e9SMark Rutland	ldp	x21, x22, [x8], #16
891ed84b4e9SMark Rutland	ldp	x23, x24, [x8], #16
892ed84b4e9SMark Rutland	ldp	x25, x26, [x8], #16
893ed84b4e9SMark Rutland	ldp	x27, x28, [x8], #16
894ed84b4e9SMark Rutland	ldp	x29, x9, [x8], #16
895ed84b4e9SMark Rutland	ldr	lr, [x8]
896ed84b4e9SMark Rutland	mov	sp, x9
897ed84b4e9SMark Rutland	msr	sp_el0, x1
898ed84b4e9SMark Rutland	ret
899ed84b4e9SMark RutlandENDPROC(cpu_switch_to)
900ed84b4e9SMark RutlandNOKPROBE(cpu_switch_to)
901ed84b4e9SMark Rutland
902ed84b4e9SMark Rutland/*
903ed84b4e9SMark Rutland * This is how we return from a fork.
904ed84b4e9SMark Rutland */
905*c3357fc5SMark BrownSYM_CODE_START(ret_from_fork)
906ed84b4e9SMark Rutland	bl	schedule_tail
907ed84b4e9SMark Rutland	cbz	x19, 1f				// not a kernel thread
908ed84b4e9SMark Rutland	mov	x0, x20
909ed84b4e9SMark Rutland	blr	x19
9104caf8758SJulien Thierry1:	get_current_task tsk
911ed84b4e9SMark Rutland	b	ret_to_user
912*c3357fc5SMark BrownSYM_CODE_END(ret_from_fork)
913ed84b4e9SMark RutlandNOKPROBE(ret_from_fork)
914f5df2696SJames Morse
915f5df2696SJames Morse#ifdef CONFIG_ARM_SDE_INTERFACE
916f5df2696SJames Morse
917f5df2696SJames Morse#include <asm/sdei.h>
918f5df2696SJames Morse#include <uapi/linux/arm_sdei.h>
919f5df2696SJames Morse
92079e9aa59SJames Morse.macro sdei_handler_exit exit_mode
92179e9aa59SJames Morse	/* On success, this call never returns... */
92279e9aa59SJames Morse	cmp	\exit_mode, #SDEI_EXIT_SMC
92379e9aa59SJames Morse	b.ne	99f
92479e9aa59SJames Morse	smc	#0
92579e9aa59SJames Morse	b	.
92679e9aa59SJames Morse99:	hvc	#0
92779e9aa59SJames Morse	b	.
92879e9aa59SJames Morse.endm
92979e9aa59SJames Morse
93079e9aa59SJames Morse#ifdef CONFIG_UNMAP_KERNEL_AT_EL0
93179e9aa59SJames Morse/*
93279e9aa59SJames Morse * The regular SDEI entry point may have been unmapped along with the rest of
93379e9aa59SJames Morse * the kernel. This trampoline restores the kernel mapping to make the x1 memory
93479e9aa59SJames Morse * argument accessible.
93579e9aa59SJames Morse *
93679e9aa59SJames Morse * This clobbers x4, __sdei_handler() will restore this from firmware's
93779e9aa59SJames Morse * copy.
93879e9aa59SJames Morse */
93979e9aa59SJames Morse.ltorg
94079e9aa59SJames Morse.pushsection ".entry.tramp.text", "ax"
94179e9aa59SJames MorseENTRY(__sdei_asm_entry_trampoline)
94279e9aa59SJames Morse	mrs	x4, ttbr1_el1
94379e9aa59SJames Morse	tbz	x4, #USER_ASID_BIT, 1f
94479e9aa59SJames Morse
94579e9aa59SJames Morse	tramp_map_kernel tmp=x4
94679e9aa59SJames Morse	isb
94779e9aa59SJames Morse	mov	x4, xzr
94879e9aa59SJames Morse
94979e9aa59SJames Morse	/*
95079e9aa59SJames Morse	 * Use reg->interrupted_regs.addr_limit to remember whether to unmap
95179e9aa59SJames Morse	 * the kernel on exit.
95279e9aa59SJames Morse	 */
95379e9aa59SJames Morse1:	str	x4, [x1, #(SDEI_EVENT_INTREGS + S_ORIG_ADDR_LIMIT)]
95479e9aa59SJames Morse
95579e9aa59SJames Morse#ifdef CONFIG_RANDOMIZE_BASE
95679e9aa59SJames Morse	adr	x4, tramp_vectors + PAGE_SIZE
95779e9aa59SJames Morse	add	x4, x4, #:lo12:__sdei_asm_trampoline_next_handler
95879e9aa59SJames Morse	ldr	x4, [x4]
95979e9aa59SJames Morse#else
96079e9aa59SJames Morse	ldr	x4, =__sdei_asm_handler
96179e9aa59SJames Morse#endif
96279e9aa59SJames Morse	br	x4
96379e9aa59SJames MorseENDPROC(__sdei_asm_entry_trampoline)
96479e9aa59SJames MorseNOKPROBE(__sdei_asm_entry_trampoline)
96579e9aa59SJames Morse
96679e9aa59SJames Morse/*
96779e9aa59SJames Morse * Make the exit call and restore the original ttbr1_el1
96879e9aa59SJames Morse *
96979e9aa59SJames Morse * x0 & x1: setup for the exit API call
97079e9aa59SJames Morse * x2: exit_mode
97179e9aa59SJames Morse * x4: struct sdei_registered_event argument from registration time.
97279e9aa59SJames Morse */
97379e9aa59SJames MorseENTRY(__sdei_asm_exit_trampoline)
97479e9aa59SJames Morse	ldr	x4, [x4, #(SDEI_EVENT_INTREGS + S_ORIG_ADDR_LIMIT)]
97579e9aa59SJames Morse	cbnz	x4, 1f
97679e9aa59SJames Morse
97779e9aa59SJames Morse	tramp_unmap_kernel	tmp=x4
97879e9aa59SJames Morse
97979e9aa59SJames Morse1:	sdei_handler_exit exit_mode=x2
98079e9aa59SJames MorseENDPROC(__sdei_asm_exit_trampoline)
98179e9aa59SJames MorseNOKPROBE(__sdei_asm_exit_trampoline)
98279e9aa59SJames Morse	.ltorg
98379e9aa59SJames Morse.popsection		// .entry.tramp.text
98479e9aa59SJames Morse#ifdef CONFIG_RANDOMIZE_BASE
98579e9aa59SJames Morse.pushsection ".rodata", "a"
98679e9aa59SJames Morse__sdei_asm_trampoline_next_handler:
98779e9aa59SJames Morse	.quad	__sdei_asm_handler
98879e9aa59SJames Morse.popsection		// .rodata
98979e9aa59SJames Morse#endif /* CONFIG_RANDOMIZE_BASE */
99079e9aa59SJames Morse#endif /* CONFIG_UNMAP_KERNEL_AT_EL0 */
99179e9aa59SJames Morse
992f5df2696SJames Morse/*
993f5df2696SJames Morse * Software Delegated Exception entry point.
994f5df2696SJames Morse *
995f5df2696SJames Morse * x0: Event number
996f5df2696SJames Morse * x1: struct sdei_registered_event argument from registration time.
997f5df2696SJames Morse * x2: interrupted PC
998f5df2696SJames Morse * x3: interrupted PSTATE
99979e9aa59SJames Morse * x4: maybe clobbered by the trampoline
1000f5df2696SJames Morse *
1001f5df2696SJames Morse * Firmware has preserved x0->x17 for us, we must save/restore the rest to
1002f5df2696SJames Morse * follow SMC-CC. We save (or retrieve) all the registers as the handler may
1003f5df2696SJames Morse * want them.
1004f5df2696SJames Morse */
1005f5df2696SJames MorseENTRY(__sdei_asm_handler)
1006f5df2696SJames Morse	stp     x2, x3, [x1, #SDEI_EVENT_INTREGS + S_PC]
1007f5df2696SJames Morse	stp     x4, x5, [x1, #SDEI_EVENT_INTREGS + 16 * 2]
1008f5df2696SJames Morse	stp     x6, x7, [x1, #SDEI_EVENT_INTREGS + 16 * 3]
1009f5df2696SJames Morse	stp     x8, x9, [x1, #SDEI_EVENT_INTREGS + 16 * 4]
1010f5df2696SJames Morse	stp     x10, x11, [x1, #SDEI_EVENT_INTREGS + 16 * 5]
1011f5df2696SJames Morse	stp     x12, x13, [x1, #SDEI_EVENT_INTREGS + 16 * 6]
1012f5df2696SJames Morse	stp     x14, x15, [x1, #SDEI_EVENT_INTREGS + 16 * 7]
1013f5df2696SJames Morse	stp     x16, x17, [x1, #SDEI_EVENT_INTREGS + 16 * 8]
1014f5df2696SJames Morse	stp     x18, x19, [x1, #SDEI_EVENT_INTREGS + 16 * 9]
1015f5df2696SJames Morse	stp     x20, x21, [x1, #SDEI_EVENT_INTREGS + 16 * 10]
1016f5df2696SJames Morse	stp     x22, x23, [x1, #SDEI_EVENT_INTREGS + 16 * 11]
1017f5df2696SJames Morse	stp     x24, x25, [x1, #SDEI_EVENT_INTREGS + 16 * 12]
1018f5df2696SJames Morse	stp     x26, x27, [x1, #SDEI_EVENT_INTREGS + 16 * 13]
1019f5df2696SJames Morse	stp     x28, x29, [x1, #SDEI_EVENT_INTREGS + 16 * 14]
1020f5df2696SJames Morse	mov	x4, sp
1021f5df2696SJames Morse	stp     lr, x4, [x1, #SDEI_EVENT_INTREGS + S_LR]
1022f5df2696SJames Morse
1023f5df2696SJames Morse	mov	x19, x1
1024f5df2696SJames Morse
1025f5df2696SJames Morse#ifdef CONFIG_VMAP_STACK
1026f5df2696SJames Morse	/*
1027f5df2696SJames Morse	 * entry.S may have been using sp as a scratch register, find whether
1028f5df2696SJames Morse	 * this is a normal or critical event and switch to the appropriate
1029f5df2696SJames Morse	 * stack for this CPU.
1030f5df2696SJames Morse	 */
1031f5df2696SJames Morse	ldrb	w4, [x19, #SDEI_EVENT_PRIORITY]
1032f5df2696SJames Morse	cbnz	w4, 1f
1033f5df2696SJames Morse	ldr_this_cpu dst=x5, sym=sdei_stack_normal_ptr, tmp=x6
1034f5df2696SJames Morse	b	2f
1035f5df2696SJames Morse1:	ldr_this_cpu dst=x5, sym=sdei_stack_critical_ptr, tmp=x6
1036f5df2696SJames Morse2:	mov	x6, #SDEI_STACK_SIZE
1037f5df2696SJames Morse	add	x5, x5, x6
1038f5df2696SJames Morse	mov	sp, x5
1039f5df2696SJames Morse#endif
1040f5df2696SJames Morse
1041f5df2696SJames Morse	/*
1042f5df2696SJames Morse	 * We may have interrupted userspace, or a guest, or exit-from or
1043f5df2696SJames Morse	 * return-to either of these. We can't trust sp_el0, restore it.
1044f5df2696SJames Morse	 */
1045f5df2696SJames Morse	mrs	x28, sp_el0
1046f5df2696SJames Morse	ldr_this_cpu	dst=x0, sym=__entry_task, tmp=x1
1047f5df2696SJames Morse	msr	sp_el0, x0
1048f5df2696SJames Morse
1049f5df2696SJames Morse	/* If we interrupted the kernel point to the previous stack/frame. */
1050f5df2696SJames Morse	and     x0, x3, #0xc
1051f5df2696SJames Morse	mrs     x1, CurrentEL
1052f5df2696SJames Morse	cmp     x0, x1
1053f5df2696SJames Morse	csel	x29, x29, xzr, eq	// fp, or zero
1054f5df2696SJames Morse	csel	x4, x2, xzr, eq		// elr, or zero
1055f5df2696SJames Morse
1056f5df2696SJames Morse	stp	x29, x4, [sp, #-16]!
1057f5df2696SJames Morse	mov	x29, sp
1058f5df2696SJames Morse
1059f5df2696SJames Morse	add	x0, x19, #SDEI_EVENT_INTREGS
1060f5df2696SJames Morse	mov	x1, x19
1061f5df2696SJames Morse	bl	__sdei_handler
1062f5df2696SJames Morse
1063f5df2696SJames Morse	msr	sp_el0, x28
1064f5df2696SJames Morse	/* restore regs >x17 that we clobbered */
106579e9aa59SJames Morse	mov	x4, x19         // keep x4 for __sdei_asm_exit_trampoline
106679e9aa59SJames Morse	ldp	x28, x29, [x4, #SDEI_EVENT_INTREGS + 16 * 14]
106779e9aa59SJames Morse	ldp	x18, x19, [x4, #SDEI_EVENT_INTREGS + 16 * 9]
106879e9aa59SJames Morse	ldp	lr, x1, [x4, #SDEI_EVENT_INTREGS + S_LR]
106979e9aa59SJames Morse	mov	sp, x1
1070f5df2696SJames Morse
1071f5df2696SJames Morse	mov	x1, x0			// address to complete_and_resume
1072f5df2696SJames Morse	/* x0 = (x0 <= 1) ? EVENT_COMPLETE:EVENT_COMPLETE_AND_RESUME */
1073f5df2696SJames Morse	cmp	x0, #1
1074f5df2696SJames Morse	mov_q	x2, SDEI_1_0_FN_SDEI_EVENT_COMPLETE
1075f5df2696SJames Morse	mov_q	x3, SDEI_1_0_FN_SDEI_EVENT_COMPLETE_AND_RESUME
1076f5df2696SJames Morse	csel	x0, x2, x3, ls
1077f5df2696SJames Morse
1078f5df2696SJames Morse	ldr_l	x2, sdei_exit_mode
107979e9aa59SJames Morse
108079e9aa59SJames Morsealternative_if_not ARM64_UNMAP_KERNEL_AT_EL0
108179e9aa59SJames Morse	sdei_handler_exit exit_mode=x2
108279e9aa59SJames Morsealternative_else_nop_endif
108379e9aa59SJames Morse
108479e9aa59SJames Morse#ifdef CONFIG_UNMAP_KERNEL_AT_EL0
108579e9aa59SJames Morse	tramp_alias	dst=x5, sym=__sdei_asm_exit_trampoline
108679e9aa59SJames Morse	br	x5
108779e9aa59SJames Morse#endif
1088f5df2696SJames MorseENDPROC(__sdei_asm_handler)
1089f5df2696SJames MorseNOKPROBE(__sdei_asm_handler)
1090f5df2696SJames Morse#endif /* CONFIG_ARM_SDE_INTERFACE */
1091