xref: /openbmc/linux/arch/arm64/kernel/entry.S (revision 8dde5715)
1/*
2 * Low-level exception handling code
3 *
4 * Copyright (C) 2012 ARM Ltd.
5 * Authors:	Catalin Marinas <catalin.marinas@arm.com>
6 *		Will Deacon <will.deacon@arm.com>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program.  If not, see <http://www.gnu.org/licenses/>.
19 */
20
21#include <linux/arm-smccc.h>
22#include <linux/init.h>
23#include <linux/linkage.h>
24
25#include <asm/alternative.h>
26#include <asm/assembler.h>
27#include <asm/asm-offsets.h>
28#include <asm/cpufeature.h>
29#include <asm/errno.h>
30#include <asm/esr.h>
31#include <asm/irq.h>
32#include <asm/memory.h>
33#include <asm/mmu.h>
34#include <asm/processor.h>
35#include <asm/ptrace.h>
36#include <asm/thread_info.h>
37#include <asm/asm-uaccess.h>
38#include <asm/unistd.h>
39
40/*
41 * Context tracking subsystem.  Used to instrument transitions
42 * between user and kernel mode.
43 */
44	.macro ct_user_exit
45#ifdef CONFIG_CONTEXT_TRACKING
46	bl	context_tracking_user_exit
47#endif
48	.endm
49
50	.macro ct_user_enter
51#ifdef CONFIG_CONTEXT_TRACKING
52	bl	context_tracking_user_enter
53#endif
54	.endm
55
56	.macro	clear_gp_regs
57	.irp	n,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29
58	mov	x\n, xzr
59	.endr
60	.endm
61
62/*
63 * Bad Abort numbers
64 *-----------------
65 */
66#define BAD_SYNC	0
67#define BAD_IRQ		1
68#define BAD_FIQ		2
69#define BAD_ERROR	3
70
71	.macro kernel_ventry, el, label, regsize = 64
72	.align 7
73#ifdef CONFIG_UNMAP_KERNEL_AT_EL0
74alternative_if ARM64_UNMAP_KERNEL_AT_EL0
75	.if	\el == 0
76	.if	\regsize == 64
77	mrs	x30, tpidrro_el0
78	msr	tpidrro_el0, xzr
79	.else
80	mov	x30, xzr
81	.endif
82	.endif
83alternative_else_nop_endif
84#endif
85
86	sub	sp, sp, #S_FRAME_SIZE
87#ifdef CONFIG_VMAP_STACK
88	/*
89	 * Test whether the SP has overflowed, without corrupting a GPR.
90	 * Task and IRQ stacks are aligned to (1 << THREAD_SHIFT).
91	 */
92	add	sp, sp, x0			// sp' = sp + x0
93	sub	x0, sp, x0			// x0' = sp' - x0 = (sp + x0) - x0 = sp
94	tbnz	x0, #THREAD_SHIFT, 0f
95	sub	x0, sp, x0			// x0'' = sp' - x0' = (sp + x0) - sp = x0
96	sub	sp, sp, x0			// sp'' = sp' - x0 = (sp + x0) - x0 = sp
97	b	el\()\el\()_\label
98
990:
100	/*
101	 * Either we've just detected an overflow, or we've taken an exception
102	 * while on the overflow stack. Either way, we won't return to
103	 * userspace, and can clobber EL0 registers to free up GPRs.
104	 */
105
106	/* Stash the original SP (minus S_FRAME_SIZE) in tpidr_el0. */
107	msr	tpidr_el0, x0
108
109	/* Recover the original x0 value and stash it in tpidrro_el0 */
110	sub	x0, sp, x0
111	msr	tpidrro_el0, x0
112
113	/* Switch to the overflow stack */
114	adr_this_cpu sp, overflow_stack + OVERFLOW_STACK_SIZE, x0
115
116	/*
117	 * Check whether we were already on the overflow stack. This may happen
118	 * after panic() re-enables interrupts.
119	 */
120	mrs	x0, tpidr_el0			// sp of interrupted context
121	sub	x0, sp, x0			// delta with top of overflow stack
122	tst	x0, #~(OVERFLOW_STACK_SIZE - 1)	// within range?
123	b.ne	__bad_stack			// no? -> bad stack pointer
124
125	/* We were already on the overflow stack. Restore sp/x0 and carry on. */
126	sub	sp, sp, x0
127	mrs	x0, tpidrro_el0
128#endif
129	b	el\()\el\()_\label
130	.endm
131
132	.macro tramp_alias, dst, sym
133	mov_q	\dst, TRAMP_VALIAS
134	add	\dst, \dst, #(\sym - .entry.tramp.text)
135	.endm
136
137	// This macro corrupts x0-x3. It is the caller's duty
138	// to save/restore them if required.
139	.macro	apply_ssbd, state, tmp1, tmp2
140#ifdef CONFIG_ARM64_SSBD
141alternative_cb	arm64_enable_wa2_handling
142	b	.L__asm_ssbd_skip\@
143alternative_cb_end
144	ldr_this_cpu	\tmp2, arm64_ssbd_callback_required, \tmp1
145	cbz	\tmp2,	.L__asm_ssbd_skip\@
146	ldr	\tmp2, [tsk, #TSK_TI_FLAGS]
147	tbnz	\tmp2, #TIF_SSBD, .L__asm_ssbd_skip\@
148	mov	w0, #ARM_SMCCC_ARCH_WORKAROUND_2
149	mov	w1, #\state
150alternative_cb	arm64_update_smccc_conduit
151	nop					// Patched to SMC/HVC #0
152alternative_cb_end
153.L__asm_ssbd_skip\@:
154#endif
155	.endm
156
157	.macro	kernel_entry, el, regsize = 64
158	.if	\regsize == 32
159	mov	w0, w0				// zero upper 32 bits of x0
160	.endif
161	stp	x0, x1, [sp, #16 * 0]
162	stp	x2, x3, [sp, #16 * 1]
163	stp	x4, x5, [sp, #16 * 2]
164	stp	x6, x7, [sp, #16 * 3]
165	stp	x8, x9, [sp, #16 * 4]
166	stp	x10, x11, [sp, #16 * 5]
167	stp	x12, x13, [sp, #16 * 6]
168	stp	x14, x15, [sp, #16 * 7]
169	stp	x16, x17, [sp, #16 * 8]
170	stp	x18, x19, [sp, #16 * 9]
171	stp	x20, x21, [sp, #16 * 10]
172	stp	x22, x23, [sp, #16 * 11]
173	stp	x24, x25, [sp, #16 * 12]
174	stp	x26, x27, [sp, #16 * 13]
175	stp	x28, x29, [sp, #16 * 14]
176
177	.if	\el == 0
178	clear_gp_regs
179	mrs	x21, sp_el0
180	ldr_this_cpu	tsk, __entry_task, x20	// Ensure MDSCR_EL1.SS is clear,
181	ldr	x19, [tsk, #TSK_TI_FLAGS]	// since we can unmask debug
182	disable_step_tsk x19, x20		// exceptions when scheduling.
183
184	apply_ssbd 1, x22, x23
185
186	.else
187	add	x21, sp, #S_FRAME_SIZE
188	get_current_task tsk
189	/* Save the task's original addr_limit and set USER_DS */
190	ldr	x20, [tsk, #TSK_TI_ADDR_LIMIT]
191	str	x20, [sp, #S_ORIG_ADDR_LIMIT]
192	mov	x20, #USER_DS
193	str	x20, [tsk, #TSK_TI_ADDR_LIMIT]
194	/* No need to reset PSTATE.UAO, hardware's already set it to 0 for us */
195	.endif /* \el == 0 */
196	mrs	x22, elr_el1
197	mrs	x23, spsr_el1
198	stp	lr, x21, [sp, #S_LR]
199
200	/*
201	 * In order to be able to dump the contents of struct pt_regs at the
202	 * time the exception was taken (in case we attempt to walk the call
203	 * stack later), chain it together with the stack frames.
204	 */
205	.if \el == 0
206	stp	xzr, xzr, [sp, #S_STACKFRAME]
207	.else
208	stp	x29, x22, [sp, #S_STACKFRAME]
209	.endif
210	add	x29, sp, #S_STACKFRAME
211
212#ifdef CONFIG_ARM64_SW_TTBR0_PAN
213	/*
214	 * Set the TTBR0 PAN bit in SPSR. When the exception is taken from
215	 * EL0, there is no need to check the state of TTBR0_EL1 since
216	 * accesses are always enabled.
217	 * Note that the meaning of this bit differs from the ARMv8.1 PAN
218	 * feature as all TTBR0_EL1 accesses are disabled, not just those to
219	 * user mappings.
220	 */
221alternative_if ARM64_HAS_PAN
222	b	1f				// skip TTBR0 PAN
223alternative_else_nop_endif
224
225	.if	\el != 0
226	mrs	x21, ttbr0_el1
227	tst	x21, #TTBR_ASID_MASK		// Check for the reserved ASID
228	orr	x23, x23, #PSR_PAN_BIT		// Set the emulated PAN in the saved SPSR
229	b.eq	1f				// TTBR0 access already disabled
230	and	x23, x23, #~PSR_PAN_BIT		// Clear the emulated PAN in the saved SPSR
231	.endif
232
233	__uaccess_ttbr0_disable x21
2341:
235#endif
236
237	stp	x22, x23, [sp, #S_PC]
238
239	/* Not in a syscall by default (el0_svc overwrites for real syscall) */
240	.if	\el == 0
241	mov	w21, #NO_SYSCALL
242	str	w21, [sp, #S_SYSCALLNO]
243	.endif
244
245	/*
246	 * Set sp_el0 to current thread_info.
247	 */
248	.if	\el == 0
249	msr	sp_el0, tsk
250	.endif
251
252	/* Save pmr */
253alternative_if ARM64_HAS_IRQ_PRIO_MASKING
254	mrs_s	x20, SYS_ICC_PMR_EL1
255	str	x20, [sp, #S_PMR_SAVE]
256alternative_else_nop_endif
257
258	/*
259	 * Registers that may be useful after this macro is invoked:
260	 *
261	 * x21 - aborted SP
262	 * x22 - aborted PC
263	 * x23 - aborted PSTATE
264	*/
265	.endm
266
267	.macro	kernel_exit, el
268	.if	\el != 0
269	disable_daif
270
271	/* Restore the task's original addr_limit. */
272	ldr	x20, [sp, #S_ORIG_ADDR_LIMIT]
273	str	x20, [tsk, #TSK_TI_ADDR_LIMIT]
274
275	/* No need to restore UAO, it will be restored from SPSR_EL1 */
276	.endif
277
278	/* Restore pmr */
279alternative_if ARM64_HAS_IRQ_PRIO_MASKING
280	ldr	x20, [sp, #S_PMR_SAVE]
281	msr_s	SYS_ICC_PMR_EL1, x20
282	/* Ensure priority change is seen by redistributor */
283	dsb	sy
284alternative_else_nop_endif
285
286	ldp	x21, x22, [sp, #S_PC]		// load ELR, SPSR
287	.if	\el == 0
288	ct_user_enter
289	.endif
290
291#ifdef CONFIG_ARM64_SW_TTBR0_PAN
292	/*
293	 * Restore access to TTBR0_EL1. If returning to EL0, no need for SPSR
294	 * PAN bit checking.
295	 */
296alternative_if ARM64_HAS_PAN
297	b	2f				// skip TTBR0 PAN
298alternative_else_nop_endif
299
300	.if	\el != 0
301	tbnz	x22, #22, 1f			// Skip re-enabling TTBR0 access if the PSR_PAN_BIT is set
302	.endif
303
304	__uaccess_ttbr0_enable x0, x1
305
306	.if	\el == 0
307	/*
308	 * Enable errata workarounds only if returning to user. The only
309	 * workaround currently required for TTBR0_EL1 changes are for the
310	 * Cavium erratum 27456 (broadcast TLBI instructions may cause I-cache
311	 * corruption).
312	 */
313	bl	post_ttbr_update_workaround
314	.endif
3151:
316	.if	\el != 0
317	and	x22, x22, #~PSR_PAN_BIT		// ARMv8.0 CPUs do not understand this bit
318	.endif
3192:
320#endif
321
322	.if	\el == 0
323	ldr	x23, [sp, #S_SP]		// load return stack pointer
324	msr	sp_el0, x23
325	tst	x22, #PSR_MODE32_BIT		// native task?
326	b.eq	3f
327
328#ifdef CONFIG_ARM64_ERRATUM_845719
329alternative_if ARM64_WORKAROUND_845719
330#ifdef CONFIG_PID_IN_CONTEXTIDR
331	mrs	x29, contextidr_el1
332	msr	contextidr_el1, x29
333#else
334	msr contextidr_el1, xzr
335#endif
336alternative_else_nop_endif
337#endif
3383:
339#ifdef CONFIG_ARM64_ERRATUM_1418040
340alternative_if_not ARM64_WORKAROUND_1418040
341	b	4f
342alternative_else_nop_endif
343	/*
344	 * if (x22.mode32 == cntkctl_el1.el0vcten)
345	 *     cntkctl_el1.el0vcten = ~cntkctl_el1.el0vcten
346	 */
347	mrs	x1, cntkctl_el1
348	eon	x0, x1, x22, lsr #3
349	tbz	x0, #1, 4f
350	eor	x1, x1, #2	// ARCH_TIMER_USR_VCT_ACCESS_EN
351	msr	cntkctl_el1, x1
3524:
353#endif
354	apply_ssbd 0, x0, x1
355	.endif
356
357	msr	elr_el1, x21			// set up the return data
358	msr	spsr_el1, x22
359	ldp	x0, x1, [sp, #16 * 0]
360	ldp	x2, x3, [sp, #16 * 1]
361	ldp	x4, x5, [sp, #16 * 2]
362	ldp	x6, x7, [sp, #16 * 3]
363	ldp	x8, x9, [sp, #16 * 4]
364	ldp	x10, x11, [sp, #16 * 5]
365	ldp	x12, x13, [sp, #16 * 6]
366	ldp	x14, x15, [sp, #16 * 7]
367	ldp	x16, x17, [sp, #16 * 8]
368	ldp	x18, x19, [sp, #16 * 9]
369	ldp	x20, x21, [sp, #16 * 10]
370	ldp	x22, x23, [sp, #16 * 11]
371	ldp	x24, x25, [sp, #16 * 12]
372	ldp	x26, x27, [sp, #16 * 13]
373	ldp	x28, x29, [sp, #16 * 14]
374	ldr	lr, [sp, #S_LR]
375	add	sp, sp, #S_FRAME_SIZE		// restore sp
376
377	.if	\el == 0
378alternative_insn eret, nop, ARM64_UNMAP_KERNEL_AT_EL0
379#ifdef CONFIG_UNMAP_KERNEL_AT_EL0
380	bne	5f
381	msr	far_el1, x30
382	tramp_alias	x30, tramp_exit_native
383	br	x30
3845:
385	tramp_alias	x30, tramp_exit_compat
386	br	x30
387#endif
388	.else
389	eret
390	.endif
391	sb
392	.endm
393
394	.macro	irq_stack_entry
395	mov	x19, sp			// preserve the original sp
396
397	/*
398	 * Compare sp with the base of the task stack.
399	 * If the top ~(THREAD_SIZE - 1) bits match, we are on a task stack,
400	 * and should switch to the irq stack.
401	 */
402	ldr	x25, [tsk, TSK_STACK]
403	eor	x25, x25, x19
404	and	x25, x25, #~(THREAD_SIZE - 1)
405	cbnz	x25, 9998f
406
407	ldr_this_cpu x25, irq_stack_ptr, x26
408	mov	x26, #IRQ_STACK_SIZE
409	add	x26, x25, x26
410
411	/* switch to the irq stack */
412	mov	sp, x26
4139998:
414	.endm
415
416	/*
417	 * x19 should be preserved between irq_stack_entry and
418	 * irq_stack_exit.
419	 */
420	.macro	irq_stack_exit
421	mov	sp, x19
422	.endm
423
424/* GPRs used by entry code */
425tsk	.req	x28		// current thread_info
426
427/*
428 * Interrupt handling.
429 */
430	.macro	irq_handler
431	ldr_l	x1, handle_arch_irq
432	mov	x0, sp
433	irq_stack_entry
434	blr	x1
435	irq_stack_exit
436	.endm
437
438	.text
439
440/*
441 * Exception vectors.
442 */
443	.pushsection ".entry.text", "ax"
444
445	.align	11
446ENTRY(vectors)
447	kernel_ventry	1, sync_invalid			// Synchronous EL1t
448	kernel_ventry	1, irq_invalid			// IRQ EL1t
449	kernel_ventry	1, fiq_invalid			// FIQ EL1t
450	kernel_ventry	1, error_invalid		// Error EL1t
451
452	kernel_ventry	1, sync				// Synchronous EL1h
453	kernel_ventry	1, irq				// IRQ EL1h
454	kernel_ventry	1, fiq_invalid			// FIQ EL1h
455	kernel_ventry	1, error			// Error EL1h
456
457	kernel_ventry	0, sync				// Synchronous 64-bit EL0
458	kernel_ventry	0, irq				// IRQ 64-bit EL0
459	kernel_ventry	0, fiq_invalid			// FIQ 64-bit EL0
460	kernel_ventry	0, error			// Error 64-bit EL0
461
462#ifdef CONFIG_COMPAT
463	kernel_ventry	0, sync_compat, 32		// Synchronous 32-bit EL0
464	kernel_ventry	0, irq_compat, 32		// IRQ 32-bit EL0
465	kernel_ventry	0, fiq_invalid_compat, 32	// FIQ 32-bit EL0
466	kernel_ventry	0, error_compat, 32		// Error 32-bit EL0
467#else
468	kernel_ventry	0, sync_invalid, 32		// Synchronous 32-bit EL0
469	kernel_ventry	0, irq_invalid, 32		// IRQ 32-bit EL0
470	kernel_ventry	0, fiq_invalid, 32		// FIQ 32-bit EL0
471	kernel_ventry	0, error_invalid, 32		// Error 32-bit EL0
472#endif
473END(vectors)
474
475#ifdef CONFIG_VMAP_STACK
476	/*
477	 * We detected an overflow in kernel_ventry, which switched to the
478	 * overflow stack. Stash the exception regs, and head to our overflow
479	 * handler.
480	 */
481__bad_stack:
482	/* Restore the original x0 value */
483	mrs	x0, tpidrro_el0
484
485	/*
486	 * Store the original GPRs to the new stack. The orginal SP (minus
487	 * S_FRAME_SIZE) was stashed in tpidr_el0 by kernel_ventry.
488	 */
489	sub	sp, sp, #S_FRAME_SIZE
490	kernel_entry 1
491	mrs	x0, tpidr_el0
492	add	x0, x0, #S_FRAME_SIZE
493	str	x0, [sp, #S_SP]
494
495	/* Stash the regs for handle_bad_stack */
496	mov	x0, sp
497
498	/* Time to die */
499	bl	handle_bad_stack
500	ASM_BUG()
501#endif /* CONFIG_VMAP_STACK */
502
503/*
504 * Invalid mode handlers
505 */
506	.macro	inv_entry, el, reason, regsize = 64
507	kernel_entry \el, \regsize
508	mov	x0, sp
509	mov	x1, #\reason
510	mrs	x2, esr_el1
511	bl	bad_mode
512	ASM_BUG()
513	.endm
514
515el0_sync_invalid:
516	inv_entry 0, BAD_SYNC
517ENDPROC(el0_sync_invalid)
518
519el0_irq_invalid:
520	inv_entry 0, BAD_IRQ
521ENDPROC(el0_irq_invalid)
522
523el0_fiq_invalid:
524	inv_entry 0, BAD_FIQ
525ENDPROC(el0_fiq_invalid)
526
527el0_error_invalid:
528	inv_entry 0, BAD_ERROR
529ENDPROC(el0_error_invalid)
530
531#ifdef CONFIG_COMPAT
532el0_fiq_invalid_compat:
533	inv_entry 0, BAD_FIQ, 32
534ENDPROC(el0_fiq_invalid_compat)
535#endif
536
537el1_sync_invalid:
538	inv_entry 1, BAD_SYNC
539ENDPROC(el1_sync_invalid)
540
541el1_irq_invalid:
542	inv_entry 1, BAD_IRQ
543ENDPROC(el1_irq_invalid)
544
545el1_fiq_invalid:
546	inv_entry 1, BAD_FIQ
547ENDPROC(el1_fiq_invalid)
548
549el1_error_invalid:
550	inv_entry 1, BAD_ERROR
551ENDPROC(el1_error_invalid)
552
553/*
554 * EL1 mode handlers.
555 */
556	.align	6
557el1_sync:
558	kernel_entry 1
559	mrs	x1, esr_el1			// read the syndrome register
560	lsr	x24, x1, #ESR_ELx_EC_SHIFT	// exception class
561	cmp	x24, #ESR_ELx_EC_DABT_CUR	// data abort in EL1
562	b.eq	el1_da
563	cmp	x24, #ESR_ELx_EC_IABT_CUR	// instruction abort in EL1
564	b.eq	el1_ia
565	cmp	x24, #ESR_ELx_EC_SYS64		// configurable trap
566	b.eq	el1_undef
567	cmp	x24, #ESR_ELx_EC_SP_ALIGN	// stack alignment exception
568	b.eq	el1_sp_pc
569	cmp	x24, #ESR_ELx_EC_PC_ALIGN	// pc alignment exception
570	b.eq	el1_sp_pc
571	cmp	x24, #ESR_ELx_EC_UNKNOWN	// unknown exception in EL1
572	b.eq	el1_undef
573	cmp	x24, #ESR_ELx_EC_BREAKPT_CUR	// debug exception in EL1
574	b.ge	el1_dbg
575	b	el1_inv
576
577el1_ia:
578	/*
579	 * Fall through to the Data abort case
580	 */
581el1_da:
582	/*
583	 * Data abort handling
584	 */
585	mrs	x3, far_el1
586	inherit_daif	pstate=x23, tmp=x2
587	clear_address_tag x0, x3
588	mov	x2, sp				// struct pt_regs
589	bl	do_mem_abort
590
591	kernel_exit 1
592el1_sp_pc:
593	/*
594	 * Stack or PC alignment exception handling
595	 */
596	mrs	x0, far_el1
597	inherit_daif	pstate=x23, tmp=x2
598	mov	x2, sp
599	bl	do_sp_pc_abort
600	ASM_BUG()
601el1_undef:
602	/*
603	 * Undefined instruction
604	 */
605	inherit_daif	pstate=x23, tmp=x2
606	mov	x0, sp
607	bl	do_undefinstr
608	kernel_exit 1
609el1_dbg:
610	/*
611	 * Debug exception handling
612	 */
613	cmp	x24, #ESR_ELx_EC_BRK64		// if BRK64
614	cinc	x24, x24, eq			// set bit '0'
615	tbz	x24, #0, el1_inv		// EL1 only
616	mrs	x0, far_el1
617	mov	x2, sp				// struct pt_regs
618	bl	do_debug_exception
619	kernel_exit 1
620el1_inv:
621	// TODO: add support for undefined instructions in kernel mode
622	inherit_daif	pstate=x23, tmp=x2
623	mov	x0, sp
624	mov	x2, x1
625	mov	x1, #BAD_SYNC
626	bl	bad_mode
627	ASM_BUG()
628ENDPROC(el1_sync)
629
630	.align	6
631el1_irq:
632	kernel_entry 1
633	enable_da_f
634#ifdef CONFIG_TRACE_IRQFLAGS
635#ifdef CONFIG_ARM64_PSEUDO_NMI
636alternative_if ARM64_HAS_IRQ_PRIO_MASKING
637	ldr	x20, [sp, #S_PMR_SAVE]
638alternative_else
639	mov	x20, #GIC_PRIO_IRQON
640alternative_endif
641	cmp	x20, #GIC_PRIO_IRQOFF
642	/* Irqs were disabled, don't trace */
643	b.ls	1f
644#endif
645	bl	trace_hardirqs_off
6461:
647#endif
648
649	irq_handler
650
651#ifdef CONFIG_PREEMPT
652	ldr	x24, [tsk, #TSK_TI_PREEMPT]	// get preempt count
653alternative_if ARM64_HAS_IRQ_PRIO_MASKING
654	/*
655	 * DA_F were cleared at start of handling. If anything is set in DAIF,
656	 * we come back from an NMI, so skip preemption
657	 */
658	mrs	x0, daif
659	orr	x24, x24, x0
660alternative_else_nop_endif
661	cbnz	x24, 1f				// preempt count != 0 || NMI return path
662	bl	preempt_schedule_irq		// irq en/disable is done inside
6631:
664#endif
665#ifdef CONFIG_TRACE_IRQFLAGS
666#ifdef CONFIG_ARM64_PSEUDO_NMI
667	/*
668	 * if IRQs were disabled when we received the interrupt, we have an NMI
669	 * and we are not re-enabling interrupt upon eret. Skip tracing.
670	 */
671	cmp	x20, #GIC_PRIO_IRQOFF
672	b.ls	1f
673#endif
674	bl	trace_hardirqs_on
6751:
676#endif
677
678	kernel_exit 1
679ENDPROC(el1_irq)
680
681/*
682 * EL0 mode handlers.
683 */
684	.align	6
685el0_sync:
686	kernel_entry 0
687	mrs	x25, esr_el1			// read the syndrome register
688	lsr	x24, x25, #ESR_ELx_EC_SHIFT	// exception class
689	cmp	x24, #ESR_ELx_EC_SVC64		// SVC in 64-bit state
690	b.eq	el0_svc
691	cmp	x24, #ESR_ELx_EC_DABT_LOW	// data abort in EL0
692	b.eq	el0_da
693	cmp	x24, #ESR_ELx_EC_IABT_LOW	// instruction abort in EL0
694	b.eq	el0_ia
695	cmp	x24, #ESR_ELx_EC_FP_ASIMD	// FP/ASIMD access
696	b.eq	el0_fpsimd_acc
697	cmp	x24, #ESR_ELx_EC_SVE		// SVE access
698	b.eq	el0_sve_acc
699	cmp	x24, #ESR_ELx_EC_FP_EXC64	// FP/ASIMD exception
700	b.eq	el0_fpsimd_exc
701	cmp	x24, #ESR_ELx_EC_SYS64		// configurable trap
702	ccmp	x24, #ESR_ELx_EC_WFx, #4, ne
703	b.eq	el0_sys
704	cmp	x24, #ESR_ELx_EC_SP_ALIGN	// stack alignment exception
705	b.eq	el0_sp_pc
706	cmp	x24, #ESR_ELx_EC_PC_ALIGN	// pc alignment exception
707	b.eq	el0_sp_pc
708	cmp	x24, #ESR_ELx_EC_UNKNOWN	// unknown exception in EL0
709	b.eq	el0_undef
710	cmp	x24, #ESR_ELx_EC_BREAKPT_LOW	// debug exception in EL0
711	b.ge	el0_dbg
712	b	el0_inv
713
714#ifdef CONFIG_COMPAT
715	.align	6
716el0_sync_compat:
717	kernel_entry 0, 32
718	mrs	x25, esr_el1			// read the syndrome register
719	lsr	x24, x25, #ESR_ELx_EC_SHIFT	// exception class
720	cmp	x24, #ESR_ELx_EC_SVC32		// SVC in 32-bit state
721	b.eq	el0_svc_compat
722	cmp	x24, #ESR_ELx_EC_DABT_LOW	// data abort in EL0
723	b.eq	el0_da
724	cmp	x24, #ESR_ELx_EC_IABT_LOW	// instruction abort in EL0
725	b.eq	el0_ia
726	cmp	x24, #ESR_ELx_EC_FP_ASIMD	// FP/ASIMD access
727	b.eq	el0_fpsimd_acc
728	cmp	x24, #ESR_ELx_EC_FP_EXC32	// FP/ASIMD exception
729	b.eq	el0_fpsimd_exc
730	cmp	x24, #ESR_ELx_EC_PC_ALIGN	// pc alignment exception
731	b.eq	el0_sp_pc
732	cmp	x24, #ESR_ELx_EC_UNKNOWN	// unknown exception in EL0
733	b.eq	el0_undef
734	cmp	x24, #ESR_ELx_EC_CP15_32	// CP15 MRC/MCR trap
735	b.eq	el0_cp15
736	cmp	x24, #ESR_ELx_EC_CP15_64	// CP15 MRRC/MCRR trap
737	b.eq	el0_cp15
738	cmp	x24, #ESR_ELx_EC_CP14_MR	// CP14 MRC/MCR trap
739	b.eq	el0_undef
740	cmp	x24, #ESR_ELx_EC_CP14_LS	// CP14 LDC/STC trap
741	b.eq	el0_undef
742	cmp	x24, #ESR_ELx_EC_CP14_64	// CP14 MRRC/MCRR trap
743	b.eq	el0_undef
744	cmp	x24, #ESR_ELx_EC_BREAKPT_LOW	// debug exception in EL0
745	b.ge	el0_dbg
746	b	el0_inv
747el0_svc_compat:
748	mov	x0, sp
749	bl	el0_svc_compat_handler
750	b	ret_to_user
751
752	.align	6
753el0_irq_compat:
754	kernel_entry 0, 32
755	b	el0_irq_naked
756
757el0_error_compat:
758	kernel_entry 0, 32
759	b	el0_error_naked
760
761el0_cp15:
762	/*
763	 * Trapped CP15 (MRC, MCR, MRRC, MCRR) instructions
764	 */
765	enable_daif
766	ct_user_exit
767	mov	x0, x25
768	mov	x1, sp
769	bl	do_cp15instr
770	b	ret_to_user
771#endif
772
773el0_da:
774	/*
775	 * Data abort handling
776	 */
777	mrs	x26, far_el1
778	enable_daif
779	ct_user_exit
780	clear_address_tag x0, x26
781	mov	x1, x25
782	mov	x2, sp
783	bl	do_mem_abort
784	b	ret_to_user
785el0_ia:
786	/*
787	 * Instruction abort handling
788	 */
789	mrs	x26, far_el1
790	enable_da_f
791#ifdef CONFIG_TRACE_IRQFLAGS
792	bl	trace_hardirqs_off
793#endif
794	ct_user_exit
795	mov	x0, x26
796	mov	x1, x25
797	mov	x2, sp
798	bl	do_el0_ia_bp_hardening
799	b	ret_to_user
800el0_fpsimd_acc:
801	/*
802	 * Floating Point or Advanced SIMD access
803	 */
804	enable_daif
805	ct_user_exit
806	mov	x0, x25
807	mov	x1, sp
808	bl	do_fpsimd_acc
809	b	ret_to_user
810el0_sve_acc:
811	/*
812	 * Scalable Vector Extension access
813	 */
814	enable_daif
815	ct_user_exit
816	mov	x0, x25
817	mov	x1, sp
818	bl	do_sve_acc
819	b	ret_to_user
820el0_fpsimd_exc:
821	/*
822	 * Floating Point, Advanced SIMD or SVE exception
823	 */
824	enable_daif
825	ct_user_exit
826	mov	x0, x25
827	mov	x1, sp
828	bl	do_fpsimd_exc
829	b	ret_to_user
830el0_sp_pc:
831	/*
832	 * Stack or PC alignment exception handling
833	 */
834	mrs	x26, far_el1
835	enable_da_f
836#ifdef CONFIG_TRACE_IRQFLAGS
837	bl	trace_hardirqs_off
838#endif
839	ct_user_exit
840	mov	x0, x26
841	mov	x1, x25
842	mov	x2, sp
843	bl	do_sp_pc_abort
844	b	ret_to_user
845el0_undef:
846	/*
847	 * Undefined instruction
848	 */
849	enable_daif
850	ct_user_exit
851	mov	x0, sp
852	bl	do_undefinstr
853	b	ret_to_user
854el0_sys:
855	/*
856	 * System instructions, for trapped cache maintenance instructions
857	 */
858	enable_daif
859	ct_user_exit
860	mov	x0, x25
861	mov	x1, sp
862	bl	do_sysinstr
863	b	ret_to_user
864el0_dbg:
865	/*
866	 * Debug exception handling
867	 */
868	tbnz	x24, #0, el0_inv		// EL0 only
869	mrs	x0, far_el1
870	mov	x1, x25
871	mov	x2, sp
872	bl	do_debug_exception
873	enable_daif
874	ct_user_exit
875	b	ret_to_user
876el0_inv:
877	enable_daif
878	ct_user_exit
879	mov	x0, sp
880	mov	x1, #BAD_SYNC
881	mov	x2, x25
882	bl	bad_el0_sync
883	b	ret_to_user
884ENDPROC(el0_sync)
885
886	.align	6
887el0_irq:
888	kernel_entry 0
889el0_irq_naked:
890	enable_da_f
891#ifdef CONFIG_TRACE_IRQFLAGS
892	bl	trace_hardirqs_off
893#endif
894
895	ct_user_exit
896#ifdef CONFIG_HARDEN_BRANCH_PREDICTOR
897	tbz	x22, #55, 1f
898	bl	do_el0_irq_bp_hardening
8991:
900#endif
901	irq_handler
902
903#ifdef CONFIG_TRACE_IRQFLAGS
904	bl	trace_hardirqs_on
905#endif
906	b	ret_to_user
907ENDPROC(el0_irq)
908
909el1_error:
910	kernel_entry 1
911	mrs	x1, esr_el1
912	enable_dbg
913	mov	x0, sp
914	bl	do_serror
915	kernel_exit 1
916ENDPROC(el1_error)
917
918el0_error:
919	kernel_entry 0
920el0_error_naked:
921	mrs	x1, esr_el1
922	enable_dbg
923	mov	x0, sp
924	bl	do_serror
925	enable_daif
926	ct_user_exit
927	b	ret_to_user
928ENDPROC(el0_error)
929
930/*
931 * Ok, we need to do extra processing, enter the slow path.
932 */
933work_pending:
934	mov	x0, sp				// 'regs'
935	bl	do_notify_resume
936#ifdef CONFIG_TRACE_IRQFLAGS
937	bl	trace_hardirqs_on		// enabled while in userspace
938#endif
939	ldr	x1, [tsk, #TSK_TI_FLAGS]	// re-check for single-step
940	b	finish_ret_to_user
941/*
942 * "slow" syscall return path.
943 */
944ret_to_user:
945	disable_daif
946	ldr	x1, [tsk, #TSK_TI_FLAGS]
947	and	x2, x1, #_TIF_WORK_MASK
948	cbnz	x2, work_pending
949finish_ret_to_user:
950	enable_step_tsk x1, x2
951#ifdef CONFIG_GCC_PLUGIN_STACKLEAK
952	bl	stackleak_erase
953#endif
954	kernel_exit 0
955ENDPROC(ret_to_user)
956
957/*
958 * SVC handler.
959 */
960	.align	6
961el0_svc:
962	mov	x0, sp
963	bl	el0_svc_handler
964	b	ret_to_user
965ENDPROC(el0_svc)
966
967	.popsection				// .entry.text
968
969#ifdef CONFIG_UNMAP_KERNEL_AT_EL0
970/*
971 * Exception vectors trampoline.
972 */
973	.pushsection ".entry.tramp.text", "ax"
974
975	.macro tramp_map_kernel, tmp
976	mrs	\tmp, ttbr1_el1
977	add	\tmp, \tmp, #(PAGE_SIZE + RESERVED_TTBR0_SIZE)
978	bic	\tmp, \tmp, #USER_ASID_FLAG
979	msr	ttbr1_el1, \tmp
980#ifdef CONFIG_QCOM_FALKOR_ERRATUM_1003
981alternative_if ARM64_WORKAROUND_QCOM_FALKOR_E1003
982	/* ASID already in \tmp[63:48] */
983	movk	\tmp, #:abs_g2_nc:(TRAMP_VALIAS >> 12)
984	movk	\tmp, #:abs_g1_nc:(TRAMP_VALIAS >> 12)
985	/* 2MB boundary containing the vectors, so we nobble the walk cache */
986	movk	\tmp, #:abs_g0_nc:((TRAMP_VALIAS & ~(SZ_2M - 1)) >> 12)
987	isb
988	tlbi	vae1, \tmp
989	dsb	nsh
990alternative_else_nop_endif
991#endif /* CONFIG_QCOM_FALKOR_ERRATUM_1003 */
992	.endm
993
994	.macro tramp_unmap_kernel, tmp
995	mrs	\tmp, ttbr1_el1
996	sub	\tmp, \tmp, #(PAGE_SIZE + RESERVED_TTBR0_SIZE)
997	orr	\tmp, \tmp, #USER_ASID_FLAG
998	msr	ttbr1_el1, \tmp
999	/*
1000	 * We avoid running the post_ttbr_update_workaround here because
1001	 * it's only needed by Cavium ThunderX, which requires KPTI to be
1002	 * disabled.
1003	 */
1004	.endm
1005
1006	.macro tramp_ventry, regsize = 64
1007	.align	7
10081:
1009	.if	\regsize == 64
1010	msr	tpidrro_el0, x30	// Restored in kernel_ventry
1011	.endif
1012	/*
1013	 * Defend against branch aliasing attacks by pushing a dummy
1014	 * entry onto the return stack and using a RET instruction to
1015	 * enter the full-fat kernel vectors.
1016	 */
1017	bl	2f
1018	b	.
10192:
1020	tramp_map_kernel	x30
1021#ifdef CONFIG_RANDOMIZE_BASE
1022	adr	x30, tramp_vectors + PAGE_SIZE
1023alternative_insn isb, nop, ARM64_WORKAROUND_QCOM_FALKOR_E1003
1024	ldr	x30, [x30]
1025#else
1026	ldr	x30, =vectors
1027#endif
1028	prfm	plil1strm, [x30, #(1b - tramp_vectors)]
1029	msr	vbar_el1, x30
1030	add	x30, x30, #(1b - tramp_vectors)
1031	isb
1032	ret
1033	.endm
1034
1035	.macro tramp_exit, regsize = 64
1036	adr	x30, tramp_vectors
1037	msr	vbar_el1, x30
1038	tramp_unmap_kernel	x30
1039	.if	\regsize == 64
1040	mrs	x30, far_el1
1041	.endif
1042	eret
1043	sb
1044	.endm
1045
1046	.align	11
1047ENTRY(tramp_vectors)
1048	.space	0x400
1049
1050	tramp_ventry
1051	tramp_ventry
1052	tramp_ventry
1053	tramp_ventry
1054
1055	tramp_ventry	32
1056	tramp_ventry	32
1057	tramp_ventry	32
1058	tramp_ventry	32
1059END(tramp_vectors)
1060
1061ENTRY(tramp_exit_native)
1062	tramp_exit
1063END(tramp_exit_native)
1064
1065ENTRY(tramp_exit_compat)
1066	tramp_exit	32
1067END(tramp_exit_compat)
1068
1069	.ltorg
1070	.popsection				// .entry.tramp.text
1071#ifdef CONFIG_RANDOMIZE_BASE
1072	.pushsection ".rodata", "a"
1073	.align PAGE_SHIFT
1074	.globl	__entry_tramp_data_start
1075__entry_tramp_data_start:
1076	.quad	vectors
1077	.popsection				// .rodata
1078#endif /* CONFIG_RANDOMIZE_BASE */
1079#endif /* CONFIG_UNMAP_KERNEL_AT_EL0 */
1080
1081/*
1082 * Register switch for AArch64. The callee-saved registers need to be saved
1083 * and restored. On entry:
1084 *   x0 = previous task_struct (must be preserved across the switch)
1085 *   x1 = next task_struct
1086 * Previous and next are guaranteed not to be the same.
1087 *
1088 */
1089ENTRY(cpu_switch_to)
1090	mov	x10, #THREAD_CPU_CONTEXT
1091	add	x8, x0, x10
1092	mov	x9, sp
1093	stp	x19, x20, [x8], #16		// store callee-saved registers
1094	stp	x21, x22, [x8], #16
1095	stp	x23, x24, [x8], #16
1096	stp	x25, x26, [x8], #16
1097	stp	x27, x28, [x8], #16
1098	stp	x29, x9, [x8], #16
1099	str	lr, [x8]
1100	add	x8, x1, x10
1101	ldp	x19, x20, [x8], #16		// restore callee-saved registers
1102	ldp	x21, x22, [x8], #16
1103	ldp	x23, x24, [x8], #16
1104	ldp	x25, x26, [x8], #16
1105	ldp	x27, x28, [x8], #16
1106	ldp	x29, x9, [x8], #16
1107	ldr	lr, [x8]
1108	mov	sp, x9
1109	msr	sp_el0, x1
1110	ret
1111ENDPROC(cpu_switch_to)
1112NOKPROBE(cpu_switch_to)
1113
1114/*
1115 * This is how we return from a fork.
1116 */
1117ENTRY(ret_from_fork)
1118	bl	schedule_tail
1119	cbz	x19, 1f				// not a kernel thread
1120	mov	x0, x20
1121	blr	x19
11221:	get_current_task tsk
1123	b	ret_to_user
1124ENDPROC(ret_from_fork)
1125NOKPROBE(ret_from_fork)
1126
1127#ifdef CONFIG_ARM_SDE_INTERFACE
1128
1129#include <asm/sdei.h>
1130#include <uapi/linux/arm_sdei.h>
1131
1132.macro sdei_handler_exit exit_mode
1133	/* On success, this call never returns... */
1134	cmp	\exit_mode, #SDEI_EXIT_SMC
1135	b.ne	99f
1136	smc	#0
1137	b	.
113899:	hvc	#0
1139	b	.
1140.endm
1141
1142#ifdef CONFIG_UNMAP_KERNEL_AT_EL0
1143/*
1144 * The regular SDEI entry point may have been unmapped along with the rest of
1145 * the kernel. This trampoline restores the kernel mapping to make the x1 memory
1146 * argument accessible.
1147 *
1148 * This clobbers x4, __sdei_handler() will restore this from firmware's
1149 * copy.
1150 */
1151.ltorg
1152.pushsection ".entry.tramp.text", "ax"
1153ENTRY(__sdei_asm_entry_trampoline)
1154	mrs	x4, ttbr1_el1
1155	tbz	x4, #USER_ASID_BIT, 1f
1156
1157	tramp_map_kernel tmp=x4
1158	isb
1159	mov	x4, xzr
1160
1161	/*
1162	 * Use reg->interrupted_regs.addr_limit to remember whether to unmap
1163	 * the kernel on exit.
1164	 */
11651:	str	x4, [x1, #(SDEI_EVENT_INTREGS + S_ORIG_ADDR_LIMIT)]
1166
1167#ifdef CONFIG_RANDOMIZE_BASE
1168	adr	x4, tramp_vectors + PAGE_SIZE
1169	add	x4, x4, #:lo12:__sdei_asm_trampoline_next_handler
1170	ldr	x4, [x4]
1171#else
1172	ldr	x4, =__sdei_asm_handler
1173#endif
1174	br	x4
1175ENDPROC(__sdei_asm_entry_trampoline)
1176NOKPROBE(__sdei_asm_entry_trampoline)
1177
1178/*
1179 * Make the exit call and restore the original ttbr1_el1
1180 *
1181 * x0 & x1: setup for the exit API call
1182 * x2: exit_mode
1183 * x4: struct sdei_registered_event argument from registration time.
1184 */
1185ENTRY(__sdei_asm_exit_trampoline)
1186	ldr	x4, [x4, #(SDEI_EVENT_INTREGS + S_ORIG_ADDR_LIMIT)]
1187	cbnz	x4, 1f
1188
1189	tramp_unmap_kernel	tmp=x4
1190
11911:	sdei_handler_exit exit_mode=x2
1192ENDPROC(__sdei_asm_exit_trampoline)
1193NOKPROBE(__sdei_asm_exit_trampoline)
1194	.ltorg
1195.popsection		// .entry.tramp.text
1196#ifdef CONFIG_RANDOMIZE_BASE
1197.pushsection ".rodata", "a"
1198__sdei_asm_trampoline_next_handler:
1199	.quad	__sdei_asm_handler
1200.popsection		// .rodata
1201#endif /* CONFIG_RANDOMIZE_BASE */
1202#endif /* CONFIG_UNMAP_KERNEL_AT_EL0 */
1203
1204/*
1205 * Software Delegated Exception entry point.
1206 *
1207 * x0: Event number
1208 * x1: struct sdei_registered_event argument from registration time.
1209 * x2: interrupted PC
1210 * x3: interrupted PSTATE
1211 * x4: maybe clobbered by the trampoline
1212 *
1213 * Firmware has preserved x0->x17 for us, we must save/restore the rest to
1214 * follow SMC-CC. We save (or retrieve) all the registers as the handler may
1215 * want them.
1216 */
1217ENTRY(__sdei_asm_handler)
1218	stp     x2, x3, [x1, #SDEI_EVENT_INTREGS + S_PC]
1219	stp     x4, x5, [x1, #SDEI_EVENT_INTREGS + 16 * 2]
1220	stp     x6, x7, [x1, #SDEI_EVENT_INTREGS + 16 * 3]
1221	stp     x8, x9, [x1, #SDEI_EVENT_INTREGS + 16 * 4]
1222	stp     x10, x11, [x1, #SDEI_EVENT_INTREGS + 16 * 5]
1223	stp     x12, x13, [x1, #SDEI_EVENT_INTREGS + 16 * 6]
1224	stp     x14, x15, [x1, #SDEI_EVENT_INTREGS + 16 * 7]
1225	stp     x16, x17, [x1, #SDEI_EVENT_INTREGS + 16 * 8]
1226	stp     x18, x19, [x1, #SDEI_EVENT_INTREGS + 16 * 9]
1227	stp     x20, x21, [x1, #SDEI_EVENT_INTREGS + 16 * 10]
1228	stp     x22, x23, [x1, #SDEI_EVENT_INTREGS + 16 * 11]
1229	stp     x24, x25, [x1, #SDEI_EVENT_INTREGS + 16 * 12]
1230	stp     x26, x27, [x1, #SDEI_EVENT_INTREGS + 16 * 13]
1231	stp     x28, x29, [x1, #SDEI_EVENT_INTREGS + 16 * 14]
1232	mov	x4, sp
1233	stp     lr, x4, [x1, #SDEI_EVENT_INTREGS + S_LR]
1234
1235	mov	x19, x1
1236
1237#ifdef CONFIG_VMAP_STACK
1238	/*
1239	 * entry.S may have been using sp as a scratch register, find whether
1240	 * this is a normal or critical event and switch to the appropriate
1241	 * stack for this CPU.
1242	 */
1243	ldrb	w4, [x19, #SDEI_EVENT_PRIORITY]
1244	cbnz	w4, 1f
1245	ldr_this_cpu dst=x5, sym=sdei_stack_normal_ptr, tmp=x6
1246	b	2f
12471:	ldr_this_cpu dst=x5, sym=sdei_stack_critical_ptr, tmp=x6
12482:	mov	x6, #SDEI_STACK_SIZE
1249	add	x5, x5, x6
1250	mov	sp, x5
1251#endif
1252
1253	/*
1254	 * We may have interrupted userspace, or a guest, or exit-from or
1255	 * return-to either of these. We can't trust sp_el0, restore it.
1256	 */
1257	mrs	x28, sp_el0
1258	ldr_this_cpu	dst=x0, sym=__entry_task, tmp=x1
1259	msr	sp_el0, x0
1260
1261	/* If we interrupted the kernel point to the previous stack/frame. */
1262	and     x0, x3, #0xc
1263	mrs     x1, CurrentEL
1264	cmp     x0, x1
1265	csel	x29, x29, xzr, eq	// fp, or zero
1266	csel	x4, x2, xzr, eq		// elr, or zero
1267
1268	stp	x29, x4, [sp, #-16]!
1269	mov	x29, sp
1270
1271	add	x0, x19, #SDEI_EVENT_INTREGS
1272	mov	x1, x19
1273	bl	__sdei_handler
1274
1275	msr	sp_el0, x28
1276	/* restore regs >x17 that we clobbered */
1277	mov	x4, x19         // keep x4 for __sdei_asm_exit_trampoline
1278	ldp	x28, x29, [x4, #SDEI_EVENT_INTREGS + 16 * 14]
1279	ldp	x18, x19, [x4, #SDEI_EVENT_INTREGS + 16 * 9]
1280	ldp	lr, x1, [x4, #SDEI_EVENT_INTREGS + S_LR]
1281	mov	sp, x1
1282
1283	mov	x1, x0			// address to complete_and_resume
1284	/* x0 = (x0 <= 1) ? EVENT_COMPLETE:EVENT_COMPLETE_AND_RESUME */
1285	cmp	x0, #1
1286	mov_q	x2, SDEI_1_0_FN_SDEI_EVENT_COMPLETE
1287	mov_q	x3, SDEI_1_0_FN_SDEI_EVENT_COMPLETE_AND_RESUME
1288	csel	x0, x2, x3, ls
1289
1290	ldr_l	x2, sdei_exit_mode
1291
1292alternative_if_not ARM64_UNMAP_KERNEL_AT_EL0
1293	sdei_handler_exit exit_mode=x2
1294alternative_else_nop_endif
1295
1296#ifdef CONFIG_UNMAP_KERNEL_AT_EL0
1297	tramp_alias	dst=x5, sym=__sdei_asm_exit_trampoline
1298	br	x5
1299#endif
1300ENDPROC(__sdei_asm_handler)
1301NOKPROBE(__sdei_asm_handler)
1302#endif /* CONFIG_ARM_SDE_INTERFACE */
1303