xref: /openbmc/linux/arch/arm64/kernel/entry.S (revision 7a846d3c)
1/*
2 * Low-level exception handling code
3 *
4 * Copyright (C) 2012 ARM Ltd.
5 * Authors:	Catalin Marinas <catalin.marinas@arm.com>
6 *		Will Deacon <will.deacon@arm.com>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program.  If not, see <http://www.gnu.org/licenses/>.
19 */
20
21#include <linux/arm-smccc.h>
22#include <linux/init.h>
23#include <linux/linkage.h>
24
25#include <asm/alternative.h>
26#include <asm/assembler.h>
27#include <asm/asm-offsets.h>
28#include <asm/cpufeature.h>
29#include <asm/errno.h>
30#include <asm/esr.h>
31#include <asm/irq.h>
32#include <asm/memory.h>
33#include <asm/mmu.h>
34#include <asm/processor.h>
35#include <asm/ptrace.h>
36#include <asm/thread_info.h>
37#include <asm/asm-uaccess.h>
38#include <asm/unistd.h>
39
40/*
41 * Context tracking subsystem.  Used to instrument transitions
42 * between user and kernel mode.
43 */
44	.macro ct_user_exit, syscall = 0
45#ifdef CONFIG_CONTEXT_TRACKING
46	bl	context_tracking_user_exit
47	.if \syscall == 1
48	/*
49	 * Save/restore needed during syscalls.  Restore syscall arguments from
50	 * the values already saved on stack during kernel_entry.
51	 */
52	ldp	x0, x1, [sp]
53	ldp	x2, x3, [sp, #S_X2]
54	ldp	x4, x5, [sp, #S_X4]
55	ldp	x6, x7, [sp, #S_X6]
56	.endif
57#endif
58	.endm
59
60	.macro ct_user_enter
61#ifdef CONFIG_CONTEXT_TRACKING
62	bl	context_tracking_user_enter
63#endif
64	.endm
65
66/*
67 * Bad Abort numbers
68 *-----------------
69 */
70#define BAD_SYNC	0
71#define BAD_IRQ		1
72#define BAD_FIQ		2
73#define BAD_ERROR	3
74
75	.macro kernel_ventry, el, label, regsize = 64
76	.align 7
77#ifdef CONFIG_UNMAP_KERNEL_AT_EL0
78alternative_if ARM64_UNMAP_KERNEL_AT_EL0
79	.if	\el == 0
80	.if	\regsize == 64
81	mrs	x30, tpidrro_el0
82	msr	tpidrro_el0, xzr
83	.else
84	mov	x30, xzr
85	.endif
86	.endif
87alternative_else_nop_endif
88#endif
89
90	sub	sp, sp, #S_FRAME_SIZE
91#ifdef CONFIG_VMAP_STACK
92	/*
93	 * Test whether the SP has overflowed, without corrupting a GPR.
94	 * Task and IRQ stacks are aligned to (1 << THREAD_SHIFT).
95	 */
96	add	sp, sp, x0			// sp' = sp + x0
97	sub	x0, sp, x0			// x0' = sp' - x0 = (sp + x0) - x0 = sp
98	tbnz	x0, #THREAD_SHIFT, 0f
99	sub	x0, sp, x0			// x0'' = sp' - x0' = (sp + x0) - sp = x0
100	sub	sp, sp, x0			// sp'' = sp' - x0 = (sp + x0) - x0 = sp
101	b	el\()\el\()_\label
102
1030:
104	/*
105	 * Either we've just detected an overflow, or we've taken an exception
106	 * while on the overflow stack. Either way, we won't return to
107	 * userspace, and can clobber EL0 registers to free up GPRs.
108	 */
109
110	/* Stash the original SP (minus S_FRAME_SIZE) in tpidr_el0. */
111	msr	tpidr_el0, x0
112
113	/* Recover the original x0 value and stash it in tpidrro_el0 */
114	sub	x0, sp, x0
115	msr	tpidrro_el0, x0
116
117	/* Switch to the overflow stack */
118	adr_this_cpu sp, overflow_stack + OVERFLOW_STACK_SIZE, x0
119
120	/*
121	 * Check whether we were already on the overflow stack. This may happen
122	 * after panic() re-enables interrupts.
123	 */
124	mrs	x0, tpidr_el0			// sp of interrupted context
125	sub	x0, sp, x0			// delta with top of overflow stack
126	tst	x0, #~(OVERFLOW_STACK_SIZE - 1)	// within range?
127	b.ne	__bad_stack			// no? -> bad stack pointer
128
129	/* We were already on the overflow stack. Restore sp/x0 and carry on. */
130	sub	sp, sp, x0
131	mrs	x0, tpidrro_el0
132#endif
133	b	el\()\el\()_\label
134	.endm
135
136	.macro tramp_alias, dst, sym
137	mov_q	\dst, TRAMP_VALIAS
138	add	\dst, \dst, #(\sym - .entry.tramp.text)
139	.endm
140
141	// This macro corrupts x0-x3. It is the caller's duty
142	// to save/restore them if required.
143	.macro	apply_ssbd, state, targ, tmp1, tmp2
144#ifdef CONFIG_ARM64_SSBD
145alternative_cb	arm64_enable_wa2_handling
146	b	\targ
147alternative_cb_end
148	ldr_this_cpu	\tmp2, arm64_ssbd_callback_required, \tmp1
149	cbz	\tmp2, \targ
150	ldr	\tmp2, [tsk, #TSK_TI_FLAGS]
151	tbnz	\tmp2, #TIF_SSBD, \targ
152	mov	w0, #ARM_SMCCC_ARCH_WORKAROUND_2
153	mov	w1, #\state
154alternative_cb	arm64_update_smccc_conduit
155	nop					// Patched to SMC/HVC #0
156alternative_cb_end
157#endif
158	.endm
159
160	.macro	kernel_entry, el, regsize = 64
161	.if	\regsize == 32
162	mov	w0, w0				// zero upper 32 bits of x0
163	.endif
164	stp	x0, x1, [sp, #16 * 0]
165	stp	x2, x3, [sp, #16 * 1]
166	stp	x4, x5, [sp, #16 * 2]
167	stp	x6, x7, [sp, #16 * 3]
168	stp	x8, x9, [sp, #16 * 4]
169	stp	x10, x11, [sp, #16 * 5]
170	stp	x12, x13, [sp, #16 * 6]
171	stp	x14, x15, [sp, #16 * 7]
172	stp	x16, x17, [sp, #16 * 8]
173	stp	x18, x19, [sp, #16 * 9]
174	stp	x20, x21, [sp, #16 * 10]
175	stp	x22, x23, [sp, #16 * 11]
176	stp	x24, x25, [sp, #16 * 12]
177	stp	x26, x27, [sp, #16 * 13]
178	stp	x28, x29, [sp, #16 * 14]
179
180	.if	\el == 0
181	mrs	x21, sp_el0
182	ldr_this_cpu	tsk, __entry_task, x20	// Ensure MDSCR_EL1.SS is clear,
183	ldr	x19, [tsk, #TSK_TI_FLAGS]	// since we can unmask debug
184	disable_step_tsk x19, x20		// exceptions when scheduling.
185
186	apply_ssbd 1, 1f, x22, x23
187
188#ifdef CONFIG_ARM64_SSBD
189	ldp	x0, x1, [sp, #16 * 0]
190	ldp	x2, x3, [sp, #16 * 1]
191#endif
1921:
193
194	mov	x29, xzr			// fp pointed to user-space
195	.else
196	add	x21, sp, #S_FRAME_SIZE
197	get_thread_info tsk
198	/* Save the task's original addr_limit and set USER_DS */
199	ldr	x20, [tsk, #TSK_TI_ADDR_LIMIT]
200	str	x20, [sp, #S_ORIG_ADDR_LIMIT]
201	mov	x20, #USER_DS
202	str	x20, [tsk, #TSK_TI_ADDR_LIMIT]
203	/* No need to reset PSTATE.UAO, hardware's already set it to 0 for us */
204	.endif /* \el == 0 */
205	mrs	x22, elr_el1
206	mrs	x23, spsr_el1
207	stp	lr, x21, [sp, #S_LR]
208
209	/*
210	 * In order to be able to dump the contents of struct pt_regs at the
211	 * time the exception was taken (in case we attempt to walk the call
212	 * stack later), chain it together with the stack frames.
213	 */
214	.if \el == 0
215	stp	xzr, xzr, [sp, #S_STACKFRAME]
216	.else
217	stp	x29, x22, [sp, #S_STACKFRAME]
218	.endif
219	add	x29, sp, #S_STACKFRAME
220
221#ifdef CONFIG_ARM64_SW_TTBR0_PAN
222	/*
223	 * Set the TTBR0 PAN bit in SPSR. When the exception is taken from
224	 * EL0, there is no need to check the state of TTBR0_EL1 since
225	 * accesses are always enabled.
226	 * Note that the meaning of this bit differs from the ARMv8.1 PAN
227	 * feature as all TTBR0_EL1 accesses are disabled, not just those to
228	 * user mappings.
229	 */
230alternative_if ARM64_HAS_PAN
231	b	1f				// skip TTBR0 PAN
232alternative_else_nop_endif
233
234	.if	\el != 0
235	mrs	x21, ttbr0_el1
236	tst	x21, #TTBR_ASID_MASK		// Check for the reserved ASID
237	orr	x23, x23, #PSR_PAN_BIT		// Set the emulated PAN in the saved SPSR
238	b.eq	1f				// TTBR0 access already disabled
239	and	x23, x23, #~PSR_PAN_BIT		// Clear the emulated PAN in the saved SPSR
240	.endif
241
242	__uaccess_ttbr0_disable x21
2431:
244#endif
245
246	stp	x22, x23, [sp, #S_PC]
247
248	/* Not in a syscall by default (el0_svc overwrites for real syscall) */
249	.if	\el == 0
250	mov	w21, #NO_SYSCALL
251	str	w21, [sp, #S_SYSCALLNO]
252	.endif
253
254	/*
255	 * Set sp_el0 to current thread_info.
256	 */
257	.if	\el == 0
258	msr	sp_el0, tsk
259	.endif
260
261	/*
262	 * Registers that may be useful after this macro is invoked:
263	 *
264	 * x21 - aborted SP
265	 * x22 - aborted PC
266	 * x23 - aborted PSTATE
267	*/
268	.endm
269
270	.macro	kernel_exit, el
271	.if	\el != 0
272	disable_daif
273
274	/* Restore the task's original addr_limit. */
275	ldr	x20, [sp, #S_ORIG_ADDR_LIMIT]
276	str	x20, [tsk, #TSK_TI_ADDR_LIMIT]
277
278	/* No need to restore UAO, it will be restored from SPSR_EL1 */
279	.endif
280
281	ldp	x21, x22, [sp, #S_PC]		// load ELR, SPSR
282	.if	\el == 0
283	ct_user_enter
284	.endif
285
286#ifdef CONFIG_ARM64_SW_TTBR0_PAN
287	/*
288	 * Restore access to TTBR0_EL1. If returning to EL0, no need for SPSR
289	 * PAN bit checking.
290	 */
291alternative_if ARM64_HAS_PAN
292	b	2f				// skip TTBR0 PAN
293alternative_else_nop_endif
294
295	.if	\el != 0
296	tbnz	x22, #22, 1f			// Skip re-enabling TTBR0 access if the PSR_PAN_BIT is set
297	.endif
298
299	__uaccess_ttbr0_enable x0, x1
300
301	.if	\el == 0
302	/*
303	 * Enable errata workarounds only if returning to user. The only
304	 * workaround currently required for TTBR0_EL1 changes are for the
305	 * Cavium erratum 27456 (broadcast TLBI instructions may cause I-cache
306	 * corruption).
307	 */
308	bl	post_ttbr_update_workaround
309	.endif
3101:
311	.if	\el != 0
312	and	x22, x22, #~PSR_PAN_BIT		// ARMv8.0 CPUs do not understand this bit
313	.endif
3142:
315#endif
316
317	.if	\el == 0
318	ldr	x23, [sp, #S_SP]		// load return stack pointer
319	msr	sp_el0, x23
320	tst	x22, #PSR_MODE32_BIT		// native task?
321	b.eq	3f
322
323#ifdef CONFIG_ARM64_ERRATUM_845719
324alternative_if ARM64_WORKAROUND_845719
325#ifdef CONFIG_PID_IN_CONTEXTIDR
326	mrs	x29, contextidr_el1
327	msr	contextidr_el1, x29
328#else
329	msr contextidr_el1, xzr
330#endif
331alternative_else_nop_endif
332#endif
3333:
334	apply_ssbd 0, 5f, x0, x1
3355:
336	.endif
337
338	msr	elr_el1, x21			// set up the return data
339	msr	spsr_el1, x22
340	ldp	x0, x1, [sp, #16 * 0]
341	ldp	x2, x3, [sp, #16 * 1]
342	ldp	x4, x5, [sp, #16 * 2]
343	ldp	x6, x7, [sp, #16 * 3]
344	ldp	x8, x9, [sp, #16 * 4]
345	ldp	x10, x11, [sp, #16 * 5]
346	ldp	x12, x13, [sp, #16 * 6]
347	ldp	x14, x15, [sp, #16 * 7]
348	ldp	x16, x17, [sp, #16 * 8]
349	ldp	x18, x19, [sp, #16 * 9]
350	ldp	x20, x21, [sp, #16 * 10]
351	ldp	x22, x23, [sp, #16 * 11]
352	ldp	x24, x25, [sp, #16 * 12]
353	ldp	x26, x27, [sp, #16 * 13]
354	ldp	x28, x29, [sp, #16 * 14]
355	ldr	lr, [sp, #S_LR]
356	add	sp, sp, #S_FRAME_SIZE		// restore sp
357	/*
358	 * ARCH_HAS_MEMBARRIER_SYNC_CORE rely on eret context synchronization
359	 * when returning from IPI handler, and when returning to user-space.
360	 */
361
362	.if	\el == 0
363alternative_insn eret, nop, ARM64_UNMAP_KERNEL_AT_EL0
364#ifdef CONFIG_UNMAP_KERNEL_AT_EL0
365	bne	4f
366	msr	far_el1, x30
367	tramp_alias	x30, tramp_exit_native
368	br	x30
3694:
370	tramp_alias	x30, tramp_exit_compat
371	br	x30
372#endif
373	.else
374	eret
375	.endif
376	.endm
377
378	.macro	irq_stack_entry
379	mov	x19, sp			// preserve the original sp
380
381	/*
382	 * Compare sp with the base of the task stack.
383	 * If the top ~(THREAD_SIZE - 1) bits match, we are on a task stack,
384	 * and should switch to the irq stack.
385	 */
386	ldr	x25, [tsk, TSK_STACK]
387	eor	x25, x25, x19
388	and	x25, x25, #~(THREAD_SIZE - 1)
389	cbnz	x25, 9998f
390
391	ldr_this_cpu x25, irq_stack_ptr, x26
392	mov	x26, #IRQ_STACK_SIZE
393	add	x26, x25, x26
394
395	/* switch to the irq stack */
396	mov	sp, x26
3979998:
398	.endm
399
400	/*
401	 * x19 should be preserved between irq_stack_entry and
402	 * irq_stack_exit.
403	 */
404	.macro	irq_stack_exit
405	mov	sp, x19
406	.endm
407
408/*
409 * These are the registers used in the syscall handler, and allow us to
410 * have in theory up to 7 arguments to a function - x0 to x6.
411 *
412 * x7 is reserved for the system call number in 32-bit mode.
413 */
414wsc_nr	.req	w25		// number of system calls
415xsc_nr	.req	x25		// number of system calls (zero-extended)
416wscno	.req	w26		// syscall number
417xscno	.req	x26		// syscall number (zero-extended)
418stbl	.req	x27		// syscall table pointer
419tsk	.req	x28		// current thread_info
420
421/*
422 * Interrupt handling.
423 */
424	.macro	irq_handler
425	ldr_l	x1, handle_arch_irq
426	mov	x0, sp
427	irq_stack_entry
428	blr	x1
429	irq_stack_exit
430	.endm
431
432	.text
433
434/*
435 * Exception vectors.
436 */
437	.pushsection ".entry.text", "ax"
438
439	.align	11
440ENTRY(vectors)
441	kernel_ventry	1, sync_invalid			// Synchronous EL1t
442	kernel_ventry	1, irq_invalid			// IRQ EL1t
443	kernel_ventry	1, fiq_invalid			// FIQ EL1t
444	kernel_ventry	1, error_invalid		// Error EL1t
445
446	kernel_ventry	1, sync				// Synchronous EL1h
447	kernel_ventry	1, irq				// IRQ EL1h
448	kernel_ventry	1, fiq_invalid			// FIQ EL1h
449	kernel_ventry	1, error			// Error EL1h
450
451	kernel_ventry	0, sync				// Synchronous 64-bit EL0
452	kernel_ventry	0, irq				// IRQ 64-bit EL0
453	kernel_ventry	0, fiq_invalid			// FIQ 64-bit EL0
454	kernel_ventry	0, error			// Error 64-bit EL0
455
456#ifdef CONFIG_COMPAT
457	kernel_ventry	0, sync_compat, 32		// Synchronous 32-bit EL0
458	kernel_ventry	0, irq_compat, 32		// IRQ 32-bit EL0
459	kernel_ventry	0, fiq_invalid_compat, 32	// FIQ 32-bit EL0
460	kernel_ventry	0, error_compat, 32		// Error 32-bit EL0
461#else
462	kernel_ventry	0, sync_invalid, 32		// Synchronous 32-bit EL0
463	kernel_ventry	0, irq_invalid, 32		// IRQ 32-bit EL0
464	kernel_ventry	0, fiq_invalid, 32		// FIQ 32-bit EL0
465	kernel_ventry	0, error_invalid, 32		// Error 32-bit EL0
466#endif
467END(vectors)
468
469#ifdef CONFIG_VMAP_STACK
470	/*
471	 * We detected an overflow in kernel_ventry, which switched to the
472	 * overflow stack. Stash the exception regs, and head to our overflow
473	 * handler.
474	 */
475__bad_stack:
476	/* Restore the original x0 value */
477	mrs	x0, tpidrro_el0
478
479	/*
480	 * Store the original GPRs to the new stack. The orginal SP (minus
481	 * S_FRAME_SIZE) was stashed in tpidr_el0 by kernel_ventry.
482	 */
483	sub	sp, sp, #S_FRAME_SIZE
484	kernel_entry 1
485	mrs	x0, tpidr_el0
486	add	x0, x0, #S_FRAME_SIZE
487	str	x0, [sp, #S_SP]
488
489	/* Stash the regs for handle_bad_stack */
490	mov	x0, sp
491
492	/* Time to die */
493	bl	handle_bad_stack
494	ASM_BUG()
495#endif /* CONFIG_VMAP_STACK */
496
497/*
498 * Invalid mode handlers
499 */
500	.macro	inv_entry, el, reason, regsize = 64
501	kernel_entry \el, \regsize
502	mov	x0, sp
503	mov	x1, #\reason
504	mrs	x2, esr_el1
505	bl	bad_mode
506	ASM_BUG()
507	.endm
508
509el0_sync_invalid:
510	inv_entry 0, BAD_SYNC
511ENDPROC(el0_sync_invalid)
512
513el0_irq_invalid:
514	inv_entry 0, BAD_IRQ
515ENDPROC(el0_irq_invalid)
516
517el0_fiq_invalid:
518	inv_entry 0, BAD_FIQ
519ENDPROC(el0_fiq_invalid)
520
521el0_error_invalid:
522	inv_entry 0, BAD_ERROR
523ENDPROC(el0_error_invalid)
524
525#ifdef CONFIG_COMPAT
526el0_fiq_invalid_compat:
527	inv_entry 0, BAD_FIQ, 32
528ENDPROC(el0_fiq_invalid_compat)
529#endif
530
531el1_sync_invalid:
532	inv_entry 1, BAD_SYNC
533ENDPROC(el1_sync_invalid)
534
535el1_irq_invalid:
536	inv_entry 1, BAD_IRQ
537ENDPROC(el1_irq_invalid)
538
539el1_fiq_invalid:
540	inv_entry 1, BAD_FIQ
541ENDPROC(el1_fiq_invalid)
542
543el1_error_invalid:
544	inv_entry 1, BAD_ERROR
545ENDPROC(el1_error_invalid)
546
547/*
548 * EL1 mode handlers.
549 */
550	.align	6
551el1_sync:
552	kernel_entry 1
553	mrs	x1, esr_el1			// read the syndrome register
554	lsr	x24, x1, #ESR_ELx_EC_SHIFT	// exception class
555	cmp	x24, #ESR_ELx_EC_DABT_CUR	// data abort in EL1
556	b.eq	el1_da
557	cmp	x24, #ESR_ELx_EC_IABT_CUR	// instruction abort in EL1
558	b.eq	el1_ia
559	cmp	x24, #ESR_ELx_EC_SYS64		// configurable trap
560	b.eq	el1_undef
561	cmp	x24, #ESR_ELx_EC_SP_ALIGN	// stack alignment exception
562	b.eq	el1_sp_pc
563	cmp	x24, #ESR_ELx_EC_PC_ALIGN	// pc alignment exception
564	b.eq	el1_sp_pc
565	cmp	x24, #ESR_ELx_EC_UNKNOWN	// unknown exception in EL1
566	b.eq	el1_undef
567	cmp	x24, #ESR_ELx_EC_BREAKPT_CUR	// debug exception in EL1
568	b.ge	el1_dbg
569	b	el1_inv
570
571el1_ia:
572	/*
573	 * Fall through to the Data abort case
574	 */
575el1_da:
576	/*
577	 * Data abort handling
578	 */
579	mrs	x3, far_el1
580	inherit_daif	pstate=x23, tmp=x2
581	clear_address_tag x0, x3
582	mov	x2, sp				// struct pt_regs
583	bl	do_mem_abort
584
585	kernel_exit 1
586el1_sp_pc:
587	/*
588	 * Stack or PC alignment exception handling
589	 */
590	mrs	x0, far_el1
591	inherit_daif	pstate=x23, tmp=x2
592	mov	x2, sp
593	bl	do_sp_pc_abort
594	ASM_BUG()
595el1_undef:
596	/*
597	 * Undefined instruction
598	 */
599	inherit_daif	pstate=x23, tmp=x2
600	mov	x0, sp
601	bl	do_undefinstr
602	ASM_BUG()
603el1_dbg:
604	/*
605	 * Debug exception handling
606	 */
607	cmp	x24, #ESR_ELx_EC_BRK64		// if BRK64
608	cinc	x24, x24, eq			// set bit '0'
609	tbz	x24, #0, el1_inv		// EL1 only
610	mrs	x0, far_el1
611	mov	x2, sp				// struct pt_regs
612	bl	do_debug_exception
613	kernel_exit 1
614el1_inv:
615	// TODO: add support for undefined instructions in kernel mode
616	inherit_daif	pstate=x23, tmp=x2
617	mov	x0, sp
618	mov	x2, x1
619	mov	x1, #BAD_SYNC
620	bl	bad_mode
621	ASM_BUG()
622ENDPROC(el1_sync)
623
624	.align	6
625el1_irq:
626	kernel_entry 1
627	enable_da_f
628#ifdef CONFIG_TRACE_IRQFLAGS
629	bl	trace_hardirqs_off
630#endif
631
632	irq_handler
633
634#ifdef CONFIG_PREEMPT
635	ldr	w24, [tsk, #TSK_TI_PREEMPT]	// get preempt count
636	cbnz	w24, 1f				// preempt count != 0
637	ldr	x0, [tsk, #TSK_TI_FLAGS]	// get flags
638	tbz	x0, #TIF_NEED_RESCHED, 1f	// needs rescheduling?
639	bl	el1_preempt
6401:
641#endif
642#ifdef CONFIG_TRACE_IRQFLAGS
643	bl	trace_hardirqs_on
644#endif
645	kernel_exit 1
646ENDPROC(el1_irq)
647
648#ifdef CONFIG_PREEMPT
649el1_preempt:
650	mov	x24, lr
6511:	bl	preempt_schedule_irq		// irq en/disable is done inside
652	ldr	x0, [tsk, #TSK_TI_FLAGS]	// get new tasks TI_FLAGS
653	tbnz	x0, #TIF_NEED_RESCHED, 1b	// needs rescheduling?
654	ret	x24
655#endif
656
657/*
658 * EL0 mode handlers.
659 */
660	.align	6
661el0_sync:
662	kernel_entry 0
663	mrs	x25, esr_el1			// read the syndrome register
664	lsr	x24, x25, #ESR_ELx_EC_SHIFT	// exception class
665	cmp	x24, #ESR_ELx_EC_SVC64		// SVC in 64-bit state
666	b.eq	el0_svc
667	cmp	x24, #ESR_ELx_EC_DABT_LOW	// data abort in EL0
668	b.eq	el0_da
669	cmp	x24, #ESR_ELx_EC_IABT_LOW	// instruction abort in EL0
670	b.eq	el0_ia
671	cmp	x24, #ESR_ELx_EC_FP_ASIMD	// FP/ASIMD access
672	b.eq	el0_fpsimd_acc
673	cmp	x24, #ESR_ELx_EC_SVE		// SVE access
674	b.eq	el0_sve_acc
675	cmp	x24, #ESR_ELx_EC_FP_EXC64	// FP/ASIMD exception
676	b.eq	el0_fpsimd_exc
677	cmp	x24, #ESR_ELx_EC_SYS64		// configurable trap
678	b.eq	el0_sys
679	cmp	x24, #ESR_ELx_EC_SP_ALIGN	// stack alignment exception
680	b.eq	el0_sp_pc
681	cmp	x24, #ESR_ELx_EC_PC_ALIGN	// pc alignment exception
682	b.eq	el0_sp_pc
683	cmp	x24, #ESR_ELx_EC_UNKNOWN	// unknown exception in EL0
684	b.eq	el0_undef
685	cmp	x24, #ESR_ELx_EC_BREAKPT_LOW	// debug exception in EL0
686	b.ge	el0_dbg
687	b	el0_inv
688
689#ifdef CONFIG_COMPAT
690	.align	6
691el0_sync_compat:
692	kernel_entry 0, 32
693	mrs	x25, esr_el1			// read the syndrome register
694	lsr	x24, x25, #ESR_ELx_EC_SHIFT	// exception class
695	cmp	x24, #ESR_ELx_EC_SVC32		// SVC in 32-bit state
696	b.eq	el0_svc_compat
697	cmp	x24, #ESR_ELx_EC_DABT_LOW	// data abort in EL0
698	b.eq	el0_da
699	cmp	x24, #ESR_ELx_EC_IABT_LOW	// instruction abort in EL0
700	b.eq	el0_ia
701	cmp	x24, #ESR_ELx_EC_FP_ASIMD	// FP/ASIMD access
702	b.eq	el0_fpsimd_acc
703	cmp	x24, #ESR_ELx_EC_FP_EXC32	// FP/ASIMD exception
704	b.eq	el0_fpsimd_exc
705	cmp	x24, #ESR_ELx_EC_PC_ALIGN	// pc alignment exception
706	b.eq	el0_sp_pc
707	cmp	x24, #ESR_ELx_EC_UNKNOWN	// unknown exception in EL0
708	b.eq	el0_undef
709	cmp	x24, #ESR_ELx_EC_CP15_32	// CP15 MRC/MCR trap
710	b.eq	el0_undef
711	cmp	x24, #ESR_ELx_EC_CP15_64	// CP15 MRRC/MCRR trap
712	b.eq	el0_undef
713	cmp	x24, #ESR_ELx_EC_CP14_MR	// CP14 MRC/MCR trap
714	b.eq	el0_undef
715	cmp	x24, #ESR_ELx_EC_CP14_LS	// CP14 LDC/STC trap
716	b.eq	el0_undef
717	cmp	x24, #ESR_ELx_EC_CP14_64	// CP14 MRRC/MCRR trap
718	b.eq	el0_undef
719	cmp	x24, #ESR_ELx_EC_BREAKPT_LOW	// debug exception in EL0
720	b.ge	el0_dbg
721	b	el0_inv
722el0_svc_compat:
723	/*
724	 * AArch32 syscall handling
725	 */
726	ldr	x16, [tsk, #TSK_TI_FLAGS]	// load thread flags
727	adrp	stbl, compat_sys_call_table	// load compat syscall table pointer
728	mov	wscno, w7			// syscall number in w7 (r7)
729	mov     wsc_nr, #__NR_compat_syscalls
730	b	el0_svc_naked
731
732	.align	6
733el0_irq_compat:
734	kernel_entry 0, 32
735	b	el0_irq_naked
736
737el0_error_compat:
738	kernel_entry 0, 32
739	b	el0_error_naked
740#endif
741
742el0_da:
743	/*
744	 * Data abort handling
745	 */
746	mrs	x26, far_el1
747	enable_daif
748	ct_user_exit
749	clear_address_tag x0, x26
750	mov	x1, x25
751	mov	x2, sp
752	bl	do_mem_abort
753	b	ret_to_user
754el0_ia:
755	/*
756	 * Instruction abort handling
757	 */
758	mrs	x26, far_el1
759	enable_da_f
760#ifdef CONFIG_TRACE_IRQFLAGS
761	bl	trace_hardirqs_off
762#endif
763	ct_user_exit
764	mov	x0, x26
765	mov	x1, x25
766	mov	x2, sp
767	bl	do_el0_ia_bp_hardening
768	b	ret_to_user
769el0_fpsimd_acc:
770	/*
771	 * Floating Point or Advanced SIMD access
772	 */
773	enable_daif
774	ct_user_exit
775	mov	x0, x25
776	mov	x1, sp
777	bl	do_fpsimd_acc
778	b	ret_to_user
779el0_sve_acc:
780	/*
781	 * Scalable Vector Extension access
782	 */
783	enable_daif
784	ct_user_exit
785	mov	x0, x25
786	mov	x1, sp
787	bl	do_sve_acc
788	b	ret_to_user
789el0_fpsimd_exc:
790	/*
791	 * Floating Point, Advanced SIMD or SVE exception
792	 */
793	enable_daif
794	ct_user_exit
795	mov	x0, x25
796	mov	x1, sp
797	bl	do_fpsimd_exc
798	b	ret_to_user
799el0_sp_pc:
800	/*
801	 * Stack or PC alignment exception handling
802	 */
803	mrs	x26, far_el1
804	enable_da_f
805#ifdef CONFIG_TRACE_IRQFLAGS
806	bl	trace_hardirqs_off
807#endif
808	ct_user_exit
809	mov	x0, x26
810	mov	x1, x25
811	mov	x2, sp
812	bl	do_sp_pc_abort
813	b	ret_to_user
814el0_undef:
815	/*
816	 * Undefined instruction
817	 */
818	enable_daif
819	ct_user_exit
820	mov	x0, sp
821	bl	do_undefinstr
822	b	ret_to_user
823el0_sys:
824	/*
825	 * System instructions, for trapped cache maintenance instructions
826	 */
827	enable_daif
828	ct_user_exit
829	mov	x0, x25
830	mov	x1, sp
831	bl	do_sysinstr
832	b	ret_to_user
833el0_dbg:
834	/*
835	 * Debug exception handling
836	 */
837	tbnz	x24, #0, el0_inv		// EL0 only
838	mrs	x0, far_el1
839	mov	x1, x25
840	mov	x2, sp
841	bl	do_debug_exception
842	enable_daif
843	ct_user_exit
844	b	ret_to_user
845el0_inv:
846	enable_daif
847	ct_user_exit
848	mov	x0, sp
849	mov	x1, #BAD_SYNC
850	mov	x2, x25
851	bl	bad_el0_sync
852	b	ret_to_user
853ENDPROC(el0_sync)
854
855	.align	6
856el0_irq:
857	kernel_entry 0
858el0_irq_naked:
859	enable_da_f
860#ifdef CONFIG_TRACE_IRQFLAGS
861	bl	trace_hardirqs_off
862#endif
863
864	ct_user_exit
865#ifdef CONFIG_HARDEN_BRANCH_PREDICTOR
866	tbz	x22, #55, 1f
867	bl	do_el0_irq_bp_hardening
8681:
869#endif
870	irq_handler
871
872#ifdef CONFIG_TRACE_IRQFLAGS
873	bl	trace_hardirqs_on
874#endif
875	b	ret_to_user
876ENDPROC(el0_irq)
877
878el1_error:
879	kernel_entry 1
880	mrs	x1, esr_el1
881	enable_dbg
882	mov	x0, sp
883	bl	do_serror
884	kernel_exit 1
885ENDPROC(el1_error)
886
887el0_error:
888	kernel_entry 0
889el0_error_naked:
890	mrs	x1, esr_el1
891	enable_dbg
892	mov	x0, sp
893	bl	do_serror
894	enable_daif
895	ct_user_exit
896	b	ret_to_user
897ENDPROC(el0_error)
898
899
900/*
901 * This is the fast syscall return path.  We do as little as possible here,
902 * and this includes saving x0 back into the kernel stack.
903 */
904ret_fast_syscall:
905	disable_daif
906	str	x0, [sp, #S_X0]			// returned x0
907	ldr	x1, [tsk, #TSK_TI_FLAGS]	// re-check for syscall tracing
908	and	x2, x1, #_TIF_SYSCALL_WORK
909	cbnz	x2, ret_fast_syscall_trace
910	and	x2, x1, #_TIF_WORK_MASK
911	cbnz	x2, work_pending
912	enable_step_tsk x1, x2
913	kernel_exit 0
914ret_fast_syscall_trace:
915	enable_daif
916	b	__sys_trace_return_skipped	// we already saved x0
917
918/*
919 * Ok, we need to do extra processing, enter the slow path.
920 */
921work_pending:
922	mov	x0, sp				// 'regs'
923	bl	do_notify_resume
924#ifdef CONFIG_TRACE_IRQFLAGS
925	bl	trace_hardirqs_on		// enabled while in userspace
926#endif
927	ldr	x1, [tsk, #TSK_TI_FLAGS]	// re-check for single-step
928	b	finish_ret_to_user
929/*
930 * "slow" syscall return path.
931 */
932ret_to_user:
933	disable_daif
934	ldr	x1, [tsk, #TSK_TI_FLAGS]
935	and	x2, x1, #_TIF_WORK_MASK
936	cbnz	x2, work_pending
937finish_ret_to_user:
938	enable_step_tsk x1, x2
939	kernel_exit 0
940ENDPROC(ret_to_user)
941
942/*
943 * SVC handler.
944 */
945	.align	6
946el0_svc:
947	ldr	x16, [tsk, #TSK_TI_FLAGS]	// load thread flags
948	adrp	stbl, sys_call_table		// load syscall table pointer
949	mov	wscno, w8			// syscall number in w8
950	mov	wsc_nr, #__NR_syscalls
951
952#ifdef CONFIG_ARM64_SVE
953alternative_if_not ARM64_SVE
954	b	el0_svc_naked
955alternative_else_nop_endif
956	tbz	x16, #TIF_SVE, el0_svc_naked	// Skip unless TIF_SVE set:
957	bic	x16, x16, #_TIF_SVE		// discard SVE state
958	str	x16, [tsk, #TSK_TI_FLAGS]
959
960	/*
961	 * task_fpsimd_load() won't be called to update CPACR_EL1 in
962	 * ret_to_user unless TIF_FOREIGN_FPSTATE is still set, which only
963	 * happens if a context switch or kernel_neon_begin() or context
964	 * modification (sigreturn, ptrace) intervenes.
965	 * So, ensure that CPACR_EL1 is already correct for the fast-path case:
966	 */
967	mrs	x9, cpacr_el1
968	bic	x9, x9, #CPACR_EL1_ZEN_EL0EN	// disable SVE for el0
969	msr	cpacr_el1, x9			// synchronised by eret to el0
970#endif
971
972el0_svc_naked:					// compat entry point
973	stp	x0, xscno, [sp, #S_ORIG_X0]	// save the original x0 and syscall number
974	enable_daif
975	ct_user_exit 1
976
977	tst	x16, #_TIF_SYSCALL_WORK		// check for syscall hooks
978	b.ne	__sys_trace
979	cmp     wscno, wsc_nr			// check upper syscall limit
980	b.hs	ni_sys
981	mask_nospec64 xscno, xsc_nr, x19	// enforce bounds for syscall number
982	ldr	x16, [stbl, xscno, lsl #3]	// address in the syscall table
983	blr	x16				// call sys_* routine
984	b	ret_fast_syscall
985ni_sys:
986	mov	x0, sp
987	bl	do_ni_syscall
988	b	ret_fast_syscall
989ENDPROC(el0_svc)
990
991	/*
992	 * This is the really slow path.  We're going to be doing context
993	 * switches, and waiting for our parent to respond.
994	 */
995__sys_trace:
996	cmp     wscno, #NO_SYSCALL		// user-issued syscall(-1)?
997	b.ne	1f
998	mov	x0, #-ENOSYS			// set default errno if so
999	str	x0, [sp, #S_X0]
10001:	mov	x0, sp
1001	bl	syscall_trace_enter
1002	cmp	w0, #NO_SYSCALL			// skip the syscall?
1003	b.eq	__sys_trace_return_skipped
1004	mov	wscno, w0			// syscall number (possibly new)
1005	mov	x1, sp				// pointer to regs
1006	cmp	wscno, wsc_nr			// check upper syscall limit
1007	b.hs	__ni_sys_trace
1008	ldp	x0, x1, [sp]			// restore the syscall args
1009	ldp	x2, x3, [sp, #S_X2]
1010	ldp	x4, x5, [sp, #S_X4]
1011	ldp	x6, x7, [sp, #S_X6]
1012	ldr	x16, [stbl, xscno, lsl #3]	// address in the syscall table
1013	blr	x16				// call sys_* routine
1014
1015__sys_trace_return:
1016	str	x0, [sp, #S_X0]			// save returned x0
1017__sys_trace_return_skipped:
1018	mov	x0, sp
1019	bl	syscall_trace_exit
1020	b	ret_to_user
1021
1022__ni_sys_trace:
1023	mov	x0, sp
1024	bl	do_ni_syscall
1025	b	__sys_trace_return
1026
1027	.popsection				// .entry.text
1028
1029#ifdef CONFIG_UNMAP_KERNEL_AT_EL0
1030/*
1031 * Exception vectors trampoline.
1032 */
1033	.pushsection ".entry.tramp.text", "ax"
1034
1035	.macro tramp_map_kernel, tmp
1036	mrs	\tmp, ttbr1_el1
1037	add	\tmp, \tmp, #(PAGE_SIZE + RESERVED_TTBR0_SIZE)
1038	bic	\tmp, \tmp, #USER_ASID_FLAG
1039	msr	ttbr1_el1, \tmp
1040#ifdef CONFIG_QCOM_FALKOR_ERRATUM_1003
1041alternative_if ARM64_WORKAROUND_QCOM_FALKOR_E1003
1042	/* ASID already in \tmp[63:48] */
1043	movk	\tmp, #:abs_g2_nc:(TRAMP_VALIAS >> 12)
1044	movk	\tmp, #:abs_g1_nc:(TRAMP_VALIAS >> 12)
1045	/* 2MB boundary containing the vectors, so we nobble the walk cache */
1046	movk	\tmp, #:abs_g0_nc:((TRAMP_VALIAS & ~(SZ_2M - 1)) >> 12)
1047	isb
1048	tlbi	vae1, \tmp
1049	dsb	nsh
1050alternative_else_nop_endif
1051#endif /* CONFIG_QCOM_FALKOR_ERRATUM_1003 */
1052	.endm
1053
1054	.macro tramp_unmap_kernel, tmp
1055	mrs	\tmp, ttbr1_el1
1056	sub	\tmp, \tmp, #(PAGE_SIZE + RESERVED_TTBR0_SIZE)
1057	orr	\tmp, \tmp, #USER_ASID_FLAG
1058	msr	ttbr1_el1, \tmp
1059	/*
1060	 * We avoid running the post_ttbr_update_workaround here because
1061	 * it's only needed by Cavium ThunderX, which requires KPTI to be
1062	 * disabled.
1063	 */
1064	.endm
1065
1066	.macro tramp_ventry, regsize = 64
1067	.align	7
10681:
1069	.if	\regsize == 64
1070	msr	tpidrro_el0, x30	// Restored in kernel_ventry
1071	.endif
1072	/*
1073	 * Defend against branch aliasing attacks by pushing a dummy
1074	 * entry onto the return stack and using a RET instruction to
1075	 * enter the full-fat kernel vectors.
1076	 */
1077	bl	2f
1078	b	.
10792:
1080	tramp_map_kernel	x30
1081#ifdef CONFIG_RANDOMIZE_BASE
1082	adr	x30, tramp_vectors + PAGE_SIZE
1083alternative_insn isb, nop, ARM64_WORKAROUND_QCOM_FALKOR_E1003
1084	ldr	x30, [x30]
1085#else
1086	ldr	x30, =vectors
1087#endif
1088	prfm	plil1strm, [x30, #(1b - tramp_vectors)]
1089	msr	vbar_el1, x30
1090	add	x30, x30, #(1b - tramp_vectors)
1091	isb
1092	ret
1093	.endm
1094
1095	.macro tramp_exit, regsize = 64
1096	adr	x30, tramp_vectors
1097	msr	vbar_el1, x30
1098	tramp_unmap_kernel	x30
1099	.if	\regsize == 64
1100	mrs	x30, far_el1
1101	.endif
1102	eret
1103	.endm
1104
1105	.align	11
1106ENTRY(tramp_vectors)
1107	.space	0x400
1108
1109	tramp_ventry
1110	tramp_ventry
1111	tramp_ventry
1112	tramp_ventry
1113
1114	tramp_ventry	32
1115	tramp_ventry	32
1116	tramp_ventry	32
1117	tramp_ventry	32
1118END(tramp_vectors)
1119
1120ENTRY(tramp_exit_native)
1121	tramp_exit
1122END(tramp_exit_native)
1123
1124ENTRY(tramp_exit_compat)
1125	tramp_exit	32
1126END(tramp_exit_compat)
1127
1128	.ltorg
1129	.popsection				// .entry.tramp.text
1130#ifdef CONFIG_RANDOMIZE_BASE
1131	.pushsection ".rodata", "a"
1132	.align PAGE_SHIFT
1133	.globl	__entry_tramp_data_start
1134__entry_tramp_data_start:
1135	.quad	vectors
1136	.popsection				// .rodata
1137#endif /* CONFIG_RANDOMIZE_BASE */
1138#endif /* CONFIG_UNMAP_KERNEL_AT_EL0 */
1139
1140/*
1141 * Special system call wrappers.
1142 */
1143ENTRY(sys_rt_sigreturn_wrapper)
1144	mov	x0, sp
1145	b	sys_rt_sigreturn
1146ENDPROC(sys_rt_sigreturn_wrapper)
1147
1148/*
1149 * Register switch for AArch64. The callee-saved registers need to be saved
1150 * and restored. On entry:
1151 *   x0 = previous task_struct (must be preserved across the switch)
1152 *   x1 = next task_struct
1153 * Previous and next are guaranteed not to be the same.
1154 *
1155 */
1156ENTRY(cpu_switch_to)
1157	mov	x10, #THREAD_CPU_CONTEXT
1158	add	x8, x0, x10
1159	mov	x9, sp
1160	stp	x19, x20, [x8], #16		// store callee-saved registers
1161	stp	x21, x22, [x8], #16
1162	stp	x23, x24, [x8], #16
1163	stp	x25, x26, [x8], #16
1164	stp	x27, x28, [x8], #16
1165	stp	x29, x9, [x8], #16
1166	str	lr, [x8]
1167	add	x8, x1, x10
1168	ldp	x19, x20, [x8], #16		// restore callee-saved registers
1169	ldp	x21, x22, [x8], #16
1170	ldp	x23, x24, [x8], #16
1171	ldp	x25, x26, [x8], #16
1172	ldp	x27, x28, [x8], #16
1173	ldp	x29, x9, [x8], #16
1174	ldr	lr, [x8]
1175	mov	sp, x9
1176	msr	sp_el0, x1
1177	ret
1178ENDPROC(cpu_switch_to)
1179NOKPROBE(cpu_switch_to)
1180
1181/*
1182 * This is how we return from a fork.
1183 */
1184ENTRY(ret_from_fork)
1185	bl	schedule_tail
1186	cbz	x19, 1f				// not a kernel thread
1187	mov	x0, x20
1188	blr	x19
11891:	get_thread_info tsk
1190	b	ret_to_user
1191ENDPROC(ret_from_fork)
1192NOKPROBE(ret_from_fork)
1193
1194#ifdef CONFIG_ARM_SDE_INTERFACE
1195
1196#include <asm/sdei.h>
1197#include <uapi/linux/arm_sdei.h>
1198
1199.macro sdei_handler_exit exit_mode
1200	/* On success, this call never returns... */
1201	cmp	\exit_mode, #SDEI_EXIT_SMC
1202	b.ne	99f
1203	smc	#0
1204	b	.
120599:	hvc	#0
1206	b	.
1207.endm
1208
1209#ifdef CONFIG_UNMAP_KERNEL_AT_EL0
1210/*
1211 * The regular SDEI entry point may have been unmapped along with the rest of
1212 * the kernel. This trampoline restores the kernel mapping to make the x1 memory
1213 * argument accessible.
1214 *
1215 * This clobbers x4, __sdei_handler() will restore this from firmware's
1216 * copy.
1217 */
1218.ltorg
1219.pushsection ".entry.tramp.text", "ax"
1220ENTRY(__sdei_asm_entry_trampoline)
1221	mrs	x4, ttbr1_el1
1222	tbz	x4, #USER_ASID_BIT, 1f
1223
1224	tramp_map_kernel tmp=x4
1225	isb
1226	mov	x4, xzr
1227
1228	/*
1229	 * Use reg->interrupted_regs.addr_limit to remember whether to unmap
1230	 * the kernel on exit.
1231	 */
12321:	str	x4, [x1, #(SDEI_EVENT_INTREGS + S_ORIG_ADDR_LIMIT)]
1233
1234#ifdef CONFIG_RANDOMIZE_BASE
1235	adr	x4, tramp_vectors + PAGE_SIZE
1236	add	x4, x4, #:lo12:__sdei_asm_trampoline_next_handler
1237	ldr	x4, [x4]
1238#else
1239	ldr	x4, =__sdei_asm_handler
1240#endif
1241	br	x4
1242ENDPROC(__sdei_asm_entry_trampoline)
1243NOKPROBE(__sdei_asm_entry_trampoline)
1244
1245/*
1246 * Make the exit call and restore the original ttbr1_el1
1247 *
1248 * x0 & x1: setup for the exit API call
1249 * x2: exit_mode
1250 * x4: struct sdei_registered_event argument from registration time.
1251 */
1252ENTRY(__sdei_asm_exit_trampoline)
1253	ldr	x4, [x4, #(SDEI_EVENT_INTREGS + S_ORIG_ADDR_LIMIT)]
1254	cbnz	x4, 1f
1255
1256	tramp_unmap_kernel	tmp=x4
1257
12581:	sdei_handler_exit exit_mode=x2
1259ENDPROC(__sdei_asm_exit_trampoline)
1260NOKPROBE(__sdei_asm_exit_trampoline)
1261	.ltorg
1262.popsection		// .entry.tramp.text
1263#ifdef CONFIG_RANDOMIZE_BASE
1264.pushsection ".rodata", "a"
1265__sdei_asm_trampoline_next_handler:
1266	.quad	__sdei_asm_handler
1267.popsection		// .rodata
1268#endif /* CONFIG_RANDOMIZE_BASE */
1269#endif /* CONFIG_UNMAP_KERNEL_AT_EL0 */
1270
1271/*
1272 * Software Delegated Exception entry point.
1273 *
1274 * x0: Event number
1275 * x1: struct sdei_registered_event argument from registration time.
1276 * x2: interrupted PC
1277 * x3: interrupted PSTATE
1278 * x4: maybe clobbered by the trampoline
1279 *
1280 * Firmware has preserved x0->x17 for us, we must save/restore the rest to
1281 * follow SMC-CC. We save (or retrieve) all the registers as the handler may
1282 * want them.
1283 */
1284ENTRY(__sdei_asm_handler)
1285	stp     x2, x3, [x1, #SDEI_EVENT_INTREGS + S_PC]
1286	stp     x4, x5, [x1, #SDEI_EVENT_INTREGS + 16 * 2]
1287	stp     x6, x7, [x1, #SDEI_EVENT_INTREGS + 16 * 3]
1288	stp     x8, x9, [x1, #SDEI_EVENT_INTREGS + 16 * 4]
1289	stp     x10, x11, [x1, #SDEI_EVENT_INTREGS + 16 * 5]
1290	stp     x12, x13, [x1, #SDEI_EVENT_INTREGS + 16 * 6]
1291	stp     x14, x15, [x1, #SDEI_EVENT_INTREGS + 16 * 7]
1292	stp     x16, x17, [x1, #SDEI_EVENT_INTREGS + 16 * 8]
1293	stp     x18, x19, [x1, #SDEI_EVENT_INTREGS + 16 * 9]
1294	stp     x20, x21, [x1, #SDEI_EVENT_INTREGS + 16 * 10]
1295	stp     x22, x23, [x1, #SDEI_EVENT_INTREGS + 16 * 11]
1296	stp     x24, x25, [x1, #SDEI_EVENT_INTREGS + 16 * 12]
1297	stp     x26, x27, [x1, #SDEI_EVENT_INTREGS + 16 * 13]
1298	stp     x28, x29, [x1, #SDEI_EVENT_INTREGS + 16 * 14]
1299	mov	x4, sp
1300	stp     lr, x4, [x1, #SDEI_EVENT_INTREGS + S_LR]
1301
1302	mov	x19, x1
1303
1304#ifdef CONFIG_VMAP_STACK
1305	/*
1306	 * entry.S may have been using sp as a scratch register, find whether
1307	 * this is a normal or critical event and switch to the appropriate
1308	 * stack for this CPU.
1309	 */
1310	ldrb	w4, [x19, #SDEI_EVENT_PRIORITY]
1311	cbnz	w4, 1f
1312	ldr_this_cpu dst=x5, sym=sdei_stack_normal_ptr, tmp=x6
1313	b	2f
13141:	ldr_this_cpu dst=x5, sym=sdei_stack_critical_ptr, tmp=x6
13152:	mov	x6, #SDEI_STACK_SIZE
1316	add	x5, x5, x6
1317	mov	sp, x5
1318#endif
1319
1320	/*
1321	 * We may have interrupted userspace, or a guest, or exit-from or
1322	 * return-to either of these. We can't trust sp_el0, restore it.
1323	 */
1324	mrs	x28, sp_el0
1325	ldr_this_cpu	dst=x0, sym=__entry_task, tmp=x1
1326	msr	sp_el0, x0
1327
1328	/* If we interrupted the kernel point to the previous stack/frame. */
1329	and     x0, x3, #0xc
1330	mrs     x1, CurrentEL
1331	cmp     x0, x1
1332	csel	x29, x29, xzr, eq	// fp, or zero
1333	csel	x4, x2, xzr, eq		// elr, or zero
1334
1335	stp	x29, x4, [sp, #-16]!
1336	mov	x29, sp
1337
1338	add	x0, x19, #SDEI_EVENT_INTREGS
1339	mov	x1, x19
1340	bl	__sdei_handler
1341
1342	msr	sp_el0, x28
1343	/* restore regs >x17 that we clobbered */
1344	mov	x4, x19         // keep x4 for __sdei_asm_exit_trampoline
1345	ldp	x28, x29, [x4, #SDEI_EVENT_INTREGS + 16 * 14]
1346	ldp	x18, x19, [x4, #SDEI_EVENT_INTREGS + 16 * 9]
1347	ldp	lr, x1, [x4, #SDEI_EVENT_INTREGS + S_LR]
1348	mov	sp, x1
1349
1350	mov	x1, x0			// address to complete_and_resume
1351	/* x0 = (x0 <= 1) ? EVENT_COMPLETE:EVENT_COMPLETE_AND_RESUME */
1352	cmp	x0, #1
1353	mov_q	x2, SDEI_1_0_FN_SDEI_EVENT_COMPLETE
1354	mov_q	x3, SDEI_1_0_FN_SDEI_EVENT_COMPLETE_AND_RESUME
1355	csel	x0, x2, x3, ls
1356
1357	ldr_l	x2, sdei_exit_mode
1358
1359alternative_if_not ARM64_UNMAP_KERNEL_AT_EL0
1360	sdei_handler_exit exit_mode=x2
1361alternative_else_nop_endif
1362
1363#ifdef CONFIG_UNMAP_KERNEL_AT_EL0
1364	tramp_alias	dst=x5, sym=__sdei_asm_exit_trampoline
1365	br	x5
1366#endif
1367ENDPROC(__sdei_asm_handler)
1368NOKPROBE(__sdei_asm_handler)
1369#endif /* CONFIG_ARM_SDE_INTERFACE */
1370