xref: /openbmc/linux/arch/arm64/kernel/entry.S (revision 62e7ca52)
1/*
2 * Low-level exception handling code
3 *
4 * Copyright (C) 2012 ARM Ltd.
5 * Authors:	Catalin Marinas <catalin.marinas@arm.com>
6 *		Will Deacon <will.deacon@arm.com>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program.  If not, see <http://www.gnu.org/licenses/>.
19 */
20
21#include <linux/init.h>
22#include <linux/linkage.h>
23
24#include <asm/assembler.h>
25#include <asm/asm-offsets.h>
26#include <asm/errno.h>
27#include <asm/esr.h>
28#include <asm/thread_info.h>
29#include <asm/unistd.h>
30
31/*
32 * Context tracking subsystem.  Used to instrument transitions
33 * between user and kernel mode.
34 */
35	.macro ct_user_exit, syscall = 0
36#ifdef CONFIG_CONTEXT_TRACKING
37	bl	context_tracking_user_exit
38	.if \syscall == 1
39	/*
40	 * Save/restore needed during syscalls.  Restore syscall arguments from
41	 * the values already saved on stack during kernel_entry.
42	 */
43	ldp	x0, x1, [sp]
44	ldp	x2, x3, [sp, #S_X2]
45	ldp	x4, x5, [sp, #S_X4]
46	ldp	x6, x7, [sp, #S_X6]
47	.endif
48#endif
49	.endm
50
51	.macro ct_user_enter
52#ifdef CONFIG_CONTEXT_TRACKING
53	bl	context_tracking_user_enter
54#endif
55	.endm
56
57/*
58 * Bad Abort numbers
59 *-----------------
60 */
61#define BAD_SYNC	0
62#define BAD_IRQ		1
63#define BAD_FIQ		2
64#define BAD_ERROR	3
65
66	.macro	kernel_entry, el, regsize = 64
67	sub	sp, sp, #S_FRAME_SIZE - S_LR	// room for LR, SP, SPSR, ELR
68	.if	\regsize == 32
69	mov	w0, w0				// zero upper 32 bits of x0
70	.endif
71	push	x28, x29
72	push	x26, x27
73	push	x24, x25
74	push	x22, x23
75	push	x20, x21
76	push	x18, x19
77	push	x16, x17
78	push	x14, x15
79	push	x12, x13
80	push	x10, x11
81	push	x8, x9
82	push	x6, x7
83	push	x4, x5
84	push	x2, x3
85	push	x0, x1
86	.if	\el == 0
87	mrs	x21, sp_el0
88	get_thread_info tsk			// Ensure MDSCR_EL1.SS is clear,
89	ldr	x19, [tsk, #TI_FLAGS]		// since we can unmask debug
90	disable_step_tsk x19, x20		// exceptions when scheduling.
91	.else
92	add	x21, sp, #S_FRAME_SIZE
93	.endif
94	mrs	x22, elr_el1
95	mrs	x23, spsr_el1
96	stp	lr, x21, [sp, #S_LR]
97	stp	x22, x23, [sp, #S_PC]
98
99	/*
100	 * Set syscallno to -1 by default (overridden later if real syscall).
101	 */
102	.if	\el == 0
103	mvn	x21, xzr
104	str	x21, [sp, #S_SYSCALLNO]
105	.endif
106
107	/*
108	 * Registers that may be useful after this macro is invoked:
109	 *
110	 * x21 - aborted SP
111	 * x22 - aborted PC
112	 * x23 - aborted PSTATE
113	*/
114	.endm
115
116	.macro	kernel_exit, el, ret = 0
117	ldp	x21, x22, [sp, #S_PC]		// load ELR, SPSR
118	.if	\el == 0
119	ct_user_enter
120	ldr	x23, [sp, #S_SP]		// load return stack pointer
121	.endif
122	.if	\ret
123	ldr	x1, [sp, #S_X1]			// preserve x0 (syscall return)
124	add	sp, sp, S_X2
125	.else
126	pop	x0, x1
127	.endif
128	pop	x2, x3				// load the rest of the registers
129	pop	x4, x5
130	pop	x6, x7
131	pop	x8, x9
132	msr	elr_el1, x21			// set up the return data
133	msr	spsr_el1, x22
134	.if	\el == 0
135	msr	sp_el0, x23
136	.endif
137	pop	x10, x11
138	pop	x12, x13
139	pop	x14, x15
140	pop	x16, x17
141	pop	x18, x19
142	pop	x20, x21
143	pop	x22, x23
144	pop	x24, x25
145	pop	x26, x27
146	pop	x28, x29
147	ldr	lr, [sp], #S_FRAME_SIZE - S_LR	// load LR and restore SP
148	eret					// return to kernel
149	.endm
150
151	.macro	get_thread_info, rd
152	mov	\rd, sp
153	and	\rd, \rd, #~(THREAD_SIZE - 1)	// top of stack
154	.endm
155
156/*
157 * These are the registers used in the syscall handler, and allow us to
158 * have in theory up to 7 arguments to a function - x0 to x6.
159 *
160 * x7 is reserved for the system call number in 32-bit mode.
161 */
162sc_nr	.req	x25		// number of system calls
163scno	.req	x26		// syscall number
164stbl	.req	x27		// syscall table pointer
165tsk	.req	x28		// current thread_info
166
167/*
168 * Interrupt handling.
169 */
170	.macro	irq_handler
171	ldr	x1, handle_arch_irq
172	mov	x0, sp
173	blr	x1
174	.endm
175
176	.text
177
178/*
179 * Exception vectors.
180 */
181
182	.align	11
183ENTRY(vectors)
184	ventry	el1_sync_invalid		// Synchronous EL1t
185	ventry	el1_irq_invalid			// IRQ EL1t
186	ventry	el1_fiq_invalid			// FIQ EL1t
187	ventry	el1_error_invalid		// Error EL1t
188
189	ventry	el1_sync			// Synchronous EL1h
190	ventry	el1_irq				// IRQ EL1h
191	ventry	el1_fiq_invalid			// FIQ EL1h
192	ventry	el1_error_invalid		// Error EL1h
193
194	ventry	el0_sync			// Synchronous 64-bit EL0
195	ventry	el0_irq				// IRQ 64-bit EL0
196	ventry	el0_fiq_invalid			// FIQ 64-bit EL0
197	ventry	el0_error_invalid		// Error 64-bit EL0
198
199#ifdef CONFIG_COMPAT
200	ventry	el0_sync_compat			// Synchronous 32-bit EL0
201	ventry	el0_irq_compat			// IRQ 32-bit EL0
202	ventry	el0_fiq_invalid_compat		// FIQ 32-bit EL0
203	ventry	el0_error_invalid_compat	// Error 32-bit EL0
204#else
205	ventry	el0_sync_invalid		// Synchronous 32-bit EL0
206	ventry	el0_irq_invalid			// IRQ 32-bit EL0
207	ventry	el0_fiq_invalid			// FIQ 32-bit EL0
208	ventry	el0_error_invalid		// Error 32-bit EL0
209#endif
210END(vectors)
211
212/*
213 * Invalid mode handlers
214 */
215	.macro	inv_entry, el, reason, regsize = 64
216	kernel_entry el, \regsize
217	mov	x0, sp
218	mov	x1, #\reason
219	mrs	x2, esr_el1
220	b	bad_mode
221	.endm
222
223el0_sync_invalid:
224	inv_entry 0, BAD_SYNC
225ENDPROC(el0_sync_invalid)
226
227el0_irq_invalid:
228	inv_entry 0, BAD_IRQ
229ENDPROC(el0_irq_invalid)
230
231el0_fiq_invalid:
232	inv_entry 0, BAD_FIQ
233ENDPROC(el0_fiq_invalid)
234
235el0_error_invalid:
236	inv_entry 0, BAD_ERROR
237ENDPROC(el0_error_invalid)
238
239#ifdef CONFIG_COMPAT
240el0_fiq_invalid_compat:
241	inv_entry 0, BAD_FIQ, 32
242ENDPROC(el0_fiq_invalid_compat)
243
244el0_error_invalid_compat:
245	inv_entry 0, BAD_ERROR, 32
246ENDPROC(el0_error_invalid_compat)
247#endif
248
249el1_sync_invalid:
250	inv_entry 1, BAD_SYNC
251ENDPROC(el1_sync_invalid)
252
253el1_irq_invalid:
254	inv_entry 1, BAD_IRQ
255ENDPROC(el1_irq_invalid)
256
257el1_fiq_invalid:
258	inv_entry 1, BAD_FIQ
259ENDPROC(el1_fiq_invalid)
260
261el1_error_invalid:
262	inv_entry 1, BAD_ERROR
263ENDPROC(el1_error_invalid)
264
265/*
266 * EL1 mode handlers.
267 */
268	.align	6
269el1_sync:
270	kernel_entry 1
271	mrs	x1, esr_el1			// read the syndrome register
272	lsr	x24, x1, #ESR_EL1_EC_SHIFT	// exception class
273	cmp	x24, #ESR_EL1_EC_DABT_EL1	// data abort in EL1
274	b.eq	el1_da
275	cmp	x24, #ESR_EL1_EC_SYS64		// configurable trap
276	b.eq	el1_undef
277	cmp	x24, #ESR_EL1_EC_SP_ALIGN	// stack alignment exception
278	b.eq	el1_sp_pc
279	cmp	x24, #ESR_EL1_EC_PC_ALIGN	// pc alignment exception
280	b.eq	el1_sp_pc
281	cmp	x24, #ESR_EL1_EC_UNKNOWN	// unknown exception in EL1
282	b.eq	el1_undef
283	cmp	x24, #ESR_EL1_EC_BREAKPT_EL1	// debug exception in EL1
284	b.ge	el1_dbg
285	b	el1_inv
286el1_da:
287	/*
288	 * Data abort handling
289	 */
290	mrs	x0, far_el1
291	enable_dbg
292	// re-enable interrupts if they were enabled in the aborted context
293	tbnz	x23, #7, 1f			// PSR_I_BIT
294	enable_irq
2951:
296	mov	x2, sp				// struct pt_regs
297	bl	do_mem_abort
298
299	// disable interrupts before pulling preserved data off the stack
300	disable_irq
301	kernel_exit 1
302el1_sp_pc:
303	/*
304	 * Stack or PC alignment exception handling
305	 */
306	mrs	x0, far_el1
307	enable_dbg
308	mov	x2, sp
309	b	do_sp_pc_abort
310el1_undef:
311	/*
312	 * Undefined instruction
313	 */
314	enable_dbg
315	mov	x0, sp
316	b	do_undefinstr
317el1_dbg:
318	/*
319	 * Debug exception handling
320	 */
321	cmp	x24, #ESR_EL1_EC_BRK64		// if BRK64
322	cinc	x24, x24, eq			// set bit '0'
323	tbz	x24, #0, el1_inv		// EL1 only
324	mrs	x0, far_el1
325	mov	x2, sp				// struct pt_regs
326	bl	do_debug_exception
327	enable_dbg
328	kernel_exit 1
329el1_inv:
330	// TODO: add support for undefined instructions in kernel mode
331	enable_dbg
332	mov	x0, sp
333	mov	x1, #BAD_SYNC
334	mrs	x2, esr_el1
335	b	bad_mode
336ENDPROC(el1_sync)
337
338	.align	6
339el1_irq:
340	kernel_entry 1
341	enable_dbg
342#ifdef CONFIG_TRACE_IRQFLAGS
343	bl	trace_hardirqs_off
344#endif
345
346	irq_handler
347
348#ifdef CONFIG_PREEMPT
349	get_thread_info tsk
350	ldr	w24, [tsk, #TI_PREEMPT]		// get preempt count
351	cbnz	w24, 1f				// preempt count != 0
352	ldr	x0, [tsk, #TI_FLAGS]		// get flags
353	tbz	x0, #TIF_NEED_RESCHED, 1f	// needs rescheduling?
354	bl	el1_preempt
3551:
356#endif
357#ifdef CONFIG_TRACE_IRQFLAGS
358	bl	trace_hardirqs_on
359#endif
360	kernel_exit 1
361ENDPROC(el1_irq)
362
363#ifdef CONFIG_PREEMPT
364el1_preempt:
365	mov	x24, lr
3661:	bl	preempt_schedule_irq		// irq en/disable is done inside
367	ldr	x0, [tsk, #TI_FLAGS]		// get new tasks TI_FLAGS
368	tbnz	x0, #TIF_NEED_RESCHED, 1b	// needs rescheduling?
369	ret	x24
370#endif
371
372/*
373 * EL0 mode handlers.
374 */
375	.align	6
376el0_sync:
377	kernel_entry 0
378	mrs	x25, esr_el1			// read the syndrome register
379	lsr	x24, x25, #ESR_EL1_EC_SHIFT	// exception class
380	cmp	x24, #ESR_EL1_EC_SVC64		// SVC in 64-bit state
381	b.eq	el0_svc
382	cmp	x24, #ESR_EL1_EC_DABT_EL0	// data abort in EL0
383	b.eq	el0_da
384	cmp	x24, #ESR_EL1_EC_IABT_EL0	// instruction abort in EL0
385	b.eq	el0_ia
386	cmp	x24, #ESR_EL1_EC_FP_ASIMD	// FP/ASIMD access
387	b.eq	el0_fpsimd_acc
388	cmp	x24, #ESR_EL1_EC_FP_EXC64	// FP/ASIMD exception
389	b.eq	el0_fpsimd_exc
390	cmp	x24, #ESR_EL1_EC_SYS64		// configurable trap
391	b.eq	el0_undef
392	cmp	x24, #ESR_EL1_EC_SP_ALIGN	// stack alignment exception
393	b.eq	el0_sp_pc
394	cmp	x24, #ESR_EL1_EC_PC_ALIGN	// pc alignment exception
395	b.eq	el0_sp_pc
396	cmp	x24, #ESR_EL1_EC_UNKNOWN	// unknown exception in EL0
397	b.eq	el0_undef
398	cmp	x24, #ESR_EL1_EC_BREAKPT_EL0	// debug exception in EL0
399	b.ge	el0_dbg
400	b	el0_inv
401
402#ifdef CONFIG_COMPAT
403	.align	6
404el0_sync_compat:
405	kernel_entry 0, 32
406	mrs	x25, esr_el1			// read the syndrome register
407	lsr	x24, x25, #ESR_EL1_EC_SHIFT	// exception class
408	cmp	x24, #ESR_EL1_EC_SVC32		// SVC in 32-bit state
409	b.eq	el0_svc_compat
410	cmp	x24, #ESR_EL1_EC_DABT_EL0	// data abort in EL0
411	b.eq	el0_da
412	cmp	x24, #ESR_EL1_EC_IABT_EL0	// instruction abort in EL0
413	b.eq	el0_ia
414	cmp	x24, #ESR_EL1_EC_FP_ASIMD	// FP/ASIMD access
415	b.eq	el0_fpsimd_acc
416	cmp	x24, #ESR_EL1_EC_FP_EXC32	// FP/ASIMD exception
417	b.eq	el0_fpsimd_exc
418	cmp	x24, #ESR_EL1_EC_UNKNOWN	// unknown exception in EL0
419	b.eq	el0_undef
420	cmp	x24, #ESR_EL1_EC_CP15_32	// CP15 MRC/MCR trap
421	b.eq	el0_undef
422	cmp	x24, #ESR_EL1_EC_CP15_64	// CP15 MRRC/MCRR trap
423	b.eq	el0_undef
424	cmp	x24, #ESR_EL1_EC_CP14_MR	// CP14 MRC/MCR trap
425	b.eq	el0_undef
426	cmp	x24, #ESR_EL1_EC_CP14_LS	// CP14 LDC/STC trap
427	b.eq	el0_undef
428	cmp	x24, #ESR_EL1_EC_CP14_64	// CP14 MRRC/MCRR trap
429	b.eq	el0_undef
430	cmp	x24, #ESR_EL1_EC_BREAKPT_EL0	// debug exception in EL0
431	b.ge	el0_dbg
432	b	el0_inv
433el0_svc_compat:
434	/*
435	 * AArch32 syscall handling
436	 */
437	adr	stbl, compat_sys_call_table	// load compat syscall table pointer
438	uxtw	scno, w7			// syscall number in w7 (r7)
439	mov     sc_nr, #__NR_compat_syscalls
440	b	el0_svc_naked
441
442	.align	6
443el0_irq_compat:
444	kernel_entry 0, 32
445	b	el0_irq_naked
446#endif
447
448el0_da:
449	/*
450	 * Data abort handling
451	 */
452	mrs	x26, far_el1
453	// enable interrupts before calling the main handler
454	enable_dbg_and_irq
455	ct_user_exit
456	bic	x0, x26, #(0xff << 56)
457	mov	x1, x25
458	mov	x2, sp
459	adr	lr, ret_to_user
460	b	do_mem_abort
461el0_ia:
462	/*
463	 * Instruction abort handling
464	 */
465	mrs	x26, far_el1
466	// enable interrupts before calling the main handler
467	enable_dbg_and_irq
468	ct_user_exit
469	mov	x0, x26
470	orr	x1, x25, #1 << 24		// use reserved ISS bit for instruction aborts
471	mov	x2, sp
472	adr	lr, ret_to_user
473	b	do_mem_abort
474el0_fpsimd_acc:
475	/*
476	 * Floating Point or Advanced SIMD access
477	 */
478	enable_dbg
479	ct_user_exit
480	mov	x0, x25
481	mov	x1, sp
482	adr	lr, ret_to_user
483	b	do_fpsimd_acc
484el0_fpsimd_exc:
485	/*
486	 * Floating Point or Advanced SIMD exception
487	 */
488	enable_dbg
489	ct_user_exit
490	mov	x0, x25
491	mov	x1, sp
492	adr	lr, ret_to_user
493	b	do_fpsimd_exc
494el0_sp_pc:
495	/*
496	 * Stack or PC alignment exception handling
497	 */
498	mrs	x26, far_el1
499	// enable interrupts before calling the main handler
500	enable_dbg_and_irq
501	mov	x0, x26
502	mov	x1, x25
503	mov	x2, sp
504	adr	lr, ret_to_user
505	b	do_sp_pc_abort
506el0_undef:
507	/*
508	 * Undefined instruction
509	 */
510	// enable interrupts before calling the main handler
511	enable_dbg_and_irq
512	ct_user_exit
513	mov	x0, sp
514	adr	lr, ret_to_user
515	b	do_undefinstr
516el0_dbg:
517	/*
518	 * Debug exception handling
519	 */
520	tbnz	x24, #0, el0_inv		// EL0 only
521	mrs	x0, far_el1
522	mov	x1, x25
523	mov	x2, sp
524	bl	do_debug_exception
525	enable_dbg
526	ct_user_exit
527	b	ret_to_user
528el0_inv:
529	enable_dbg
530	ct_user_exit
531	mov	x0, sp
532	mov	x1, #BAD_SYNC
533	mrs	x2, esr_el1
534	adr	lr, ret_to_user
535	b	bad_mode
536ENDPROC(el0_sync)
537
538	.align	6
539el0_irq:
540	kernel_entry 0
541el0_irq_naked:
542	enable_dbg
543#ifdef CONFIG_TRACE_IRQFLAGS
544	bl	trace_hardirqs_off
545#endif
546
547	ct_user_exit
548	irq_handler
549
550#ifdef CONFIG_TRACE_IRQFLAGS
551	bl	trace_hardirqs_on
552#endif
553	b	ret_to_user
554ENDPROC(el0_irq)
555
556/*
557 * Register switch for AArch64. The callee-saved registers need to be saved
558 * and restored. On entry:
559 *   x0 = previous task_struct (must be preserved across the switch)
560 *   x1 = next task_struct
561 * Previous and next are guaranteed not to be the same.
562 *
563 */
564ENTRY(cpu_switch_to)
565	add	x8, x0, #THREAD_CPU_CONTEXT
566	mov	x9, sp
567	stp	x19, x20, [x8], #16		// store callee-saved registers
568	stp	x21, x22, [x8], #16
569	stp	x23, x24, [x8], #16
570	stp	x25, x26, [x8], #16
571	stp	x27, x28, [x8], #16
572	stp	x29, x9, [x8], #16
573	str	lr, [x8]
574	add	x8, x1, #THREAD_CPU_CONTEXT
575	ldp	x19, x20, [x8], #16		// restore callee-saved registers
576	ldp	x21, x22, [x8], #16
577	ldp	x23, x24, [x8], #16
578	ldp	x25, x26, [x8], #16
579	ldp	x27, x28, [x8], #16
580	ldp	x29, x9, [x8], #16
581	ldr	lr, [x8]
582	mov	sp, x9
583	ret
584ENDPROC(cpu_switch_to)
585
586/*
587 * This is the fast syscall return path.  We do as little as possible here,
588 * and this includes saving x0 back into the kernel stack.
589 */
590ret_fast_syscall:
591	disable_irq				// disable interrupts
592	ldr	x1, [tsk, #TI_FLAGS]
593	and	x2, x1, #_TIF_WORK_MASK
594	cbnz	x2, fast_work_pending
595	enable_step_tsk x1, x2
596	kernel_exit 0, ret = 1
597
598/*
599 * Ok, we need to do extra processing, enter the slow path.
600 */
601fast_work_pending:
602	str	x0, [sp, #S_X0]			// returned x0
603work_pending:
604	tbnz	x1, #TIF_NEED_RESCHED, work_resched
605	/* TIF_SIGPENDING, TIF_NOTIFY_RESUME or TIF_FOREIGN_FPSTATE case */
606	ldr	x2, [sp, #S_PSTATE]
607	mov	x0, sp				// 'regs'
608	tst	x2, #PSR_MODE_MASK		// user mode regs?
609	b.ne	no_work_pending			// returning to kernel
610	enable_irq				// enable interrupts for do_notify_resume()
611	bl	do_notify_resume
612	b	ret_to_user
613work_resched:
614	bl	schedule
615
616/*
617 * "slow" syscall return path.
618 */
619ret_to_user:
620	disable_irq				// disable interrupts
621	ldr	x1, [tsk, #TI_FLAGS]
622	and	x2, x1, #_TIF_WORK_MASK
623	cbnz	x2, work_pending
624	enable_step_tsk x1, x2
625no_work_pending:
626	kernel_exit 0, ret = 0
627ENDPROC(ret_to_user)
628
629/*
630 * This is how we return from a fork.
631 */
632ENTRY(ret_from_fork)
633	bl	schedule_tail
634	cbz	x19, 1f				// not a kernel thread
635	mov	x0, x20
636	blr	x19
6371:	get_thread_info tsk
638	b	ret_to_user
639ENDPROC(ret_from_fork)
640
641/*
642 * SVC handler.
643 */
644	.align	6
645el0_svc:
646	adrp	stbl, sys_call_table		// load syscall table pointer
647	uxtw	scno, w8			// syscall number in w8
648	mov	sc_nr, #__NR_syscalls
649el0_svc_naked:					// compat entry point
650	stp	x0, scno, [sp, #S_ORIG_X0]	// save the original x0 and syscall number
651	enable_dbg_and_irq
652	ct_user_exit 1
653
654	ldr	x16, [tsk, #TI_FLAGS]		// check for syscall hooks
655	tst	x16, #_TIF_SYSCALL_WORK
656	b.ne	__sys_trace
657	adr	lr, ret_fast_syscall		// return address
658	cmp     scno, sc_nr                     // check upper syscall limit
659	b.hs	ni_sys
660	ldr	x16, [stbl, scno, lsl #3]	// address in the syscall table
661	br	x16				// call sys_* routine
662ni_sys:
663	mov	x0, sp
664	b	do_ni_syscall
665ENDPROC(el0_svc)
666
667	/*
668	 * This is the really slow path.  We're going to be doing context
669	 * switches, and waiting for our parent to respond.
670	 */
671__sys_trace:
672	mov	x0, sp
673	bl	syscall_trace_enter
674	adr	lr, __sys_trace_return		// return address
675	uxtw	scno, w0			// syscall number (possibly new)
676	mov	x1, sp				// pointer to regs
677	cmp	scno, sc_nr			// check upper syscall limit
678	b.hs	ni_sys
679	ldp	x0, x1, [sp]			// restore the syscall args
680	ldp	x2, x3, [sp, #S_X2]
681	ldp	x4, x5, [sp, #S_X4]
682	ldp	x6, x7, [sp, #S_X6]
683	ldr	x16, [stbl, scno, lsl #3]	// address in the syscall table
684	br	x16				// call sys_* routine
685
686__sys_trace_return:
687	str	x0, [sp]			// save returned x0
688	mov	x0, sp
689	bl	syscall_trace_exit
690	b	ret_to_user
691
692/*
693 * Special system call wrappers.
694 */
695ENTRY(sys_rt_sigreturn_wrapper)
696	mov	x0, sp
697	b	sys_rt_sigreturn
698ENDPROC(sys_rt_sigreturn_wrapper)
699
700ENTRY(handle_arch_irq)
701	.quad	0
702