xref: /openbmc/linux/arch/arm64/kernel/entry.S (revision 8228a048)
1/*
2 * Low-level exception handling code
3 *
4 * Copyright (C) 2012 ARM Ltd.
5 * Authors:	Catalin Marinas <catalin.marinas@arm.com>
6 *		Will Deacon <will.deacon@arm.com>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program.  If not, see <http://www.gnu.org/licenses/>.
19 */
20
21#include <linux/init.h>
22#include <linux/linkage.h>
23
24#include <asm/alternative.h>
25#include <asm/assembler.h>
26#include <asm/asm-offsets.h>
27#include <asm/cpufeature.h>
28#include <asm/errno.h>
29#include <asm/esr.h>
30#include <asm/thread_info.h>
31#include <asm/unistd.h>
32
33/*
34 * Context tracking subsystem.  Used to instrument transitions
35 * between user and kernel mode.
36 */
37	.macro ct_user_exit, syscall = 0
38#ifdef CONFIG_CONTEXT_TRACKING
39	bl	context_tracking_user_exit
40	.if \syscall == 1
41	/*
42	 * Save/restore needed during syscalls.  Restore syscall arguments from
43	 * the values already saved on stack during kernel_entry.
44	 */
45	ldp	x0, x1, [sp]
46	ldp	x2, x3, [sp, #S_X2]
47	ldp	x4, x5, [sp, #S_X4]
48	ldp	x6, x7, [sp, #S_X6]
49	.endif
50#endif
51	.endm
52
53	.macro ct_user_enter
54#ifdef CONFIG_CONTEXT_TRACKING
55	bl	context_tracking_user_enter
56#endif
57	.endm
58
59/*
60 * Bad Abort numbers
61 *-----------------
62 */
63#define BAD_SYNC	0
64#define BAD_IRQ		1
65#define BAD_FIQ		2
66#define BAD_ERROR	3
67
68	.macro	kernel_entry, el, regsize = 64
69	sub	sp, sp, #S_FRAME_SIZE
70	.if	\regsize == 32
71	mov	w0, w0				// zero upper 32 bits of x0
72	.endif
73	stp	x0, x1, [sp, #16 * 0]
74	stp	x2, x3, [sp, #16 * 1]
75	stp	x4, x5, [sp, #16 * 2]
76	stp	x6, x7, [sp, #16 * 3]
77	stp	x8, x9, [sp, #16 * 4]
78	stp	x10, x11, [sp, #16 * 5]
79	stp	x12, x13, [sp, #16 * 6]
80	stp	x14, x15, [sp, #16 * 7]
81	stp	x16, x17, [sp, #16 * 8]
82	stp	x18, x19, [sp, #16 * 9]
83	stp	x20, x21, [sp, #16 * 10]
84	stp	x22, x23, [sp, #16 * 11]
85	stp	x24, x25, [sp, #16 * 12]
86	stp	x26, x27, [sp, #16 * 13]
87	stp	x28, x29, [sp, #16 * 14]
88
89	.if	\el == 0
90	mrs	x21, sp_el0
91	get_thread_info tsk			// Ensure MDSCR_EL1.SS is clear,
92	ldr	x19, [tsk, #TI_FLAGS]		// since we can unmask debug
93	disable_step_tsk x19, x20		// exceptions when scheduling.
94	.else
95	add	x21, sp, #S_FRAME_SIZE
96	.endif
97	mrs	x22, elr_el1
98	mrs	x23, spsr_el1
99	stp	lr, x21, [sp, #S_LR]
100	stp	x22, x23, [sp, #S_PC]
101
102	/*
103	 * Set syscallno to -1 by default (overridden later if real syscall).
104	 */
105	.if	\el == 0
106	mvn	x21, xzr
107	str	x21, [sp, #S_SYSCALLNO]
108	.endif
109
110	/*
111	 * Registers that may be useful after this macro is invoked:
112	 *
113	 * x21 - aborted SP
114	 * x22 - aborted PC
115	 * x23 - aborted PSTATE
116	*/
117	.endm
118
119	.macro	kernel_exit, el, ret = 0
120	ldp	x21, x22, [sp, #S_PC]		// load ELR, SPSR
121	.if	\el == 0
122	ct_user_enter
123	ldr	x23, [sp, #S_SP]		// load return stack pointer
124	msr	sp_el0, x23
125
126#ifdef CONFIG_ARM64_ERRATUM_845719
127
128#undef SEQUENCE_ORG
129#undef SEQUENCE_ALT
130
131#ifdef CONFIG_PID_IN_CONTEXTIDR
132
133#define SEQUENCE_ORG	"nop ; nop ; nop"
134#define SEQUENCE_ALT	"tbz x22, #4, 1f ; mrs x29, contextidr_el1; msr contextidr_el1, x29; 1:"
135
136#else
137
138#define SEQUENCE_ORG	"nop ; nop"
139#define SEQUENCE_ALT	"tbz x22, #4, 1f ; msr contextidr_el1, xzr; 1:"
140
141#endif
142
143	alternative_insn SEQUENCE_ORG, SEQUENCE_ALT, ARM64_WORKAROUND_845719
144
145#endif
146	.endif
147	msr	elr_el1, x21			// set up the return data
148	msr	spsr_el1, x22
149	.if	\ret
150	ldr	x1, [sp, #S_X1]			// preserve x0 (syscall return)
151	.else
152	ldp	x0, x1, [sp, #16 * 0]
153	.endif
154	ldp	x2, x3, [sp, #16 * 1]
155	ldp	x4, x5, [sp, #16 * 2]
156	ldp	x6, x7, [sp, #16 * 3]
157	ldp	x8, x9, [sp, #16 * 4]
158	ldp	x10, x11, [sp, #16 * 5]
159	ldp	x12, x13, [sp, #16 * 6]
160	ldp	x14, x15, [sp, #16 * 7]
161	ldp	x16, x17, [sp, #16 * 8]
162	ldp	x18, x19, [sp, #16 * 9]
163	ldp	x20, x21, [sp, #16 * 10]
164	ldp	x22, x23, [sp, #16 * 11]
165	ldp	x24, x25, [sp, #16 * 12]
166	ldp	x26, x27, [sp, #16 * 13]
167	ldp	x28, x29, [sp, #16 * 14]
168	ldr	lr, [sp, #S_LR]
169	add	sp, sp, #S_FRAME_SIZE		// restore sp
170	eret					// return to kernel
171	.endm
172
173	.macro	get_thread_info, rd
174	mov	\rd, sp
175	and	\rd, \rd, #~(THREAD_SIZE - 1)	// top of stack
176	.endm
177
178/*
179 * These are the registers used in the syscall handler, and allow us to
180 * have in theory up to 7 arguments to a function - x0 to x6.
181 *
182 * x7 is reserved for the system call number in 32-bit mode.
183 */
184sc_nr	.req	x25		// number of system calls
185scno	.req	x26		// syscall number
186stbl	.req	x27		// syscall table pointer
187tsk	.req	x28		// current thread_info
188
189/*
190 * Interrupt handling.
191 */
192	.macro	irq_handler
193	adrp	x1, handle_arch_irq
194	ldr	x1, [x1, #:lo12:handle_arch_irq]
195	mov	x0, sp
196	blr	x1
197	.endm
198
199	.text
200
201/*
202 * Exception vectors.
203 */
204
205	.align	11
206ENTRY(vectors)
207	ventry	el1_sync_invalid		// Synchronous EL1t
208	ventry	el1_irq_invalid			// IRQ EL1t
209	ventry	el1_fiq_invalid			// FIQ EL1t
210	ventry	el1_error_invalid		// Error EL1t
211
212	ventry	el1_sync			// Synchronous EL1h
213	ventry	el1_irq				// IRQ EL1h
214	ventry	el1_fiq_invalid			// FIQ EL1h
215	ventry	el1_error_invalid		// Error EL1h
216
217	ventry	el0_sync			// Synchronous 64-bit EL0
218	ventry	el0_irq				// IRQ 64-bit EL0
219	ventry	el0_fiq_invalid			// FIQ 64-bit EL0
220	ventry	el0_error_invalid		// Error 64-bit EL0
221
222#ifdef CONFIG_COMPAT
223	ventry	el0_sync_compat			// Synchronous 32-bit EL0
224	ventry	el0_irq_compat			// IRQ 32-bit EL0
225	ventry	el0_fiq_invalid_compat		// FIQ 32-bit EL0
226	ventry	el0_error_invalid_compat	// Error 32-bit EL0
227#else
228	ventry	el0_sync_invalid		// Synchronous 32-bit EL0
229	ventry	el0_irq_invalid			// IRQ 32-bit EL0
230	ventry	el0_fiq_invalid			// FIQ 32-bit EL0
231	ventry	el0_error_invalid		// Error 32-bit EL0
232#endif
233END(vectors)
234
235/*
236 * Invalid mode handlers
237 */
238	.macro	inv_entry, el, reason, regsize = 64
239	kernel_entry el, \regsize
240	mov	x0, sp
241	mov	x1, #\reason
242	mrs	x2, esr_el1
243	b	bad_mode
244	.endm
245
246el0_sync_invalid:
247	inv_entry 0, BAD_SYNC
248ENDPROC(el0_sync_invalid)
249
250el0_irq_invalid:
251	inv_entry 0, BAD_IRQ
252ENDPROC(el0_irq_invalid)
253
254el0_fiq_invalid:
255	inv_entry 0, BAD_FIQ
256ENDPROC(el0_fiq_invalid)
257
258el0_error_invalid:
259	inv_entry 0, BAD_ERROR
260ENDPROC(el0_error_invalid)
261
262#ifdef CONFIG_COMPAT
263el0_fiq_invalid_compat:
264	inv_entry 0, BAD_FIQ, 32
265ENDPROC(el0_fiq_invalid_compat)
266
267el0_error_invalid_compat:
268	inv_entry 0, BAD_ERROR, 32
269ENDPROC(el0_error_invalid_compat)
270#endif
271
272el1_sync_invalid:
273	inv_entry 1, BAD_SYNC
274ENDPROC(el1_sync_invalid)
275
276el1_irq_invalid:
277	inv_entry 1, BAD_IRQ
278ENDPROC(el1_irq_invalid)
279
280el1_fiq_invalid:
281	inv_entry 1, BAD_FIQ
282ENDPROC(el1_fiq_invalid)
283
284el1_error_invalid:
285	inv_entry 1, BAD_ERROR
286ENDPROC(el1_error_invalid)
287
288/*
289 * EL1 mode handlers.
290 */
291	.align	6
292el1_sync:
293	kernel_entry 1
294	mrs	x1, esr_el1			// read the syndrome register
295	lsr	x24, x1, #ESR_ELx_EC_SHIFT	// exception class
296	cmp	x24, #ESR_ELx_EC_DABT_CUR	// data abort in EL1
297	b.eq	el1_da
298	cmp	x24, #ESR_ELx_EC_SYS64		// configurable trap
299	b.eq	el1_undef
300	cmp	x24, #ESR_ELx_EC_SP_ALIGN	// stack alignment exception
301	b.eq	el1_sp_pc
302	cmp	x24, #ESR_ELx_EC_PC_ALIGN	// pc alignment exception
303	b.eq	el1_sp_pc
304	cmp	x24, #ESR_ELx_EC_UNKNOWN	// unknown exception in EL1
305	b.eq	el1_undef
306	cmp	x24, #ESR_ELx_EC_BREAKPT_CUR	// debug exception in EL1
307	b.ge	el1_dbg
308	b	el1_inv
309el1_da:
310	/*
311	 * Data abort handling
312	 */
313	mrs	x0, far_el1
314	enable_dbg
315	// re-enable interrupts if they were enabled in the aborted context
316	tbnz	x23, #7, 1f			// PSR_I_BIT
317	enable_irq
3181:
319	mov	x2, sp				// struct pt_regs
320	bl	do_mem_abort
321
322	// disable interrupts before pulling preserved data off the stack
323	disable_irq
324	kernel_exit 1
325el1_sp_pc:
326	/*
327	 * Stack or PC alignment exception handling
328	 */
329	mrs	x0, far_el1
330	enable_dbg
331	mov	x2, sp
332	b	do_sp_pc_abort
333el1_undef:
334	/*
335	 * Undefined instruction
336	 */
337	enable_dbg
338	mov	x0, sp
339	b	do_undefinstr
340el1_dbg:
341	/*
342	 * Debug exception handling
343	 */
344	cmp	x24, #ESR_ELx_EC_BRK64		// if BRK64
345	cinc	x24, x24, eq			// set bit '0'
346	tbz	x24, #0, el1_inv		// EL1 only
347	mrs	x0, far_el1
348	mov	x2, sp				// struct pt_regs
349	bl	do_debug_exception
350	kernel_exit 1
351el1_inv:
352	// TODO: add support for undefined instructions in kernel mode
353	enable_dbg
354	mov	x0, sp
355	mov	x1, #BAD_SYNC
356	mrs	x2, esr_el1
357	b	bad_mode
358ENDPROC(el1_sync)
359
360	.align	6
361el1_irq:
362	kernel_entry 1
363	enable_dbg
364#ifdef CONFIG_TRACE_IRQFLAGS
365	bl	trace_hardirqs_off
366#endif
367
368	irq_handler
369
370#ifdef CONFIG_PREEMPT
371	get_thread_info tsk
372	ldr	w24, [tsk, #TI_PREEMPT]		// get preempt count
373	cbnz	w24, 1f				// preempt count != 0
374	ldr	x0, [tsk, #TI_FLAGS]		// get flags
375	tbz	x0, #TIF_NEED_RESCHED, 1f	// needs rescheduling?
376	bl	el1_preempt
3771:
378#endif
379#ifdef CONFIG_TRACE_IRQFLAGS
380	bl	trace_hardirqs_on
381#endif
382	kernel_exit 1
383ENDPROC(el1_irq)
384
385#ifdef CONFIG_PREEMPT
386el1_preempt:
387	mov	x24, lr
3881:	bl	preempt_schedule_irq		// irq en/disable is done inside
389	ldr	x0, [tsk, #TI_FLAGS]		// get new tasks TI_FLAGS
390	tbnz	x0, #TIF_NEED_RESCHED, 1b	// needs rescheduling?
391	ret	x24
392#endif
393
394/*
395 * EL0 mode handlers.
396 */
397	.align	6
398el0_sync:
399	kernel_entry 0
400	mrs	x25, esr_el1			// read the syndrome register
401	lsr	x24, x25, #ESR_ELx_EC_SHIFT	// exception class
402	cmp	x24, #ESR_ELx_EC_SVC64		// SVC in 64-bit state
403	b.eq	el0_svc
404	cmp	x24, #ESR_ELx_EC_DABT_LOW	// data abort in EL0
405	b.eq	el0_da
406	cmp	x24, #ESR_ELx_EC_IABT_LOW	// instruction abort in EL0
407	b.eq	el0_ia
408	cmp	x24, #ESR_ELx_EC_FP_ASIMD	// FP/ASIMD access
409	b.eq	el0_fpsimd_acc
410	cmp	x24, #ESR_ELx_EC_FP_EXC64	// FP/ASIMD exception
411	b.eq	el0_fpsimd_exc
412	cmp	x24, #ESR_ELx_EC_SYS64		// configurable trap
413	b.eq	el0_undef
414	cmp	x24, #ESR_ELx_EC_SP_ALIGN	// stack alignment exception
415	b.eq	el0_sp_pc
416	cmp	x24, #ESR_ELx_EC_PC_ALIGN	// pc alignment exception
417	b.eq	el0_sp_pc
418	cmp	x24, #ESR_ELx_EC_UNKNOWN	// unknown exception in EL0
419	b.eq	el0_undef
420	cmp	x24, #ESR_ELx_EC_BREAKPT_LOW	// debug exception in EL0
421	b.ge	el0_dbg
422	b	el0_inv
423
424#ifdef CONFIG_COMPAT
425	.align	6
426el0_sync_compat:
427	kernel_entry 0, 32
428	mrs	x25, esr_el1			// read the syndrome register
429	lsr	x24, x25, #ESR_ELx_EC_SHIFT	// exception class
430	cmp	x24, #ESR_ELx_EC_SVC32		// SVC in 32-bit state
431	b.eq	el0_svc_compat
432	cmp	x24, #ESR_ELx_EC_DABT_LOW	// data abort in EL0
433	b.eq	el0_da
434	cmp	x24, #ESR_ELx_EC_IABT_LOW	// instruction abort in EL0
435	b.eq	el0_ia
436	cmp	x24, #ESR_ELx_EC_FP_ASIMD	// FP/ASIMD access
437	b.eq	el0_fpsimd_acc
438	cmp	x24, #ESR_ELx_EC_FP_EXC32	// FP/ASIMD exception
439	b.eq	el0_fpsimd_exc
440	cmp	x24, #ESR_ELx_EC_UNKNOWN	// unknown exception in EL0
441	b.eq	el0_undef
442	cmp	x24, #ESR_ELx_EC_CP15_32	// CP15 MRC/MCR trap
443	b.eq	el0_undef
444	cmp	x24, #ESR_ELx_EC_CP15_64	// CP15 MRRC/MCRR trap
445	b.eq	el0_undef
446	cmp	x24, #ESR_ELx_EC_CP14_MR	// CP14 MRC/MCR trap
447	b.eq	el0_undef
448	cmp	x24, #ESR_ELx_EC_CP14_LS	// CP14 LDC/STC trap
449	b.eq	el0_undef
450	cmp	x24, #ESR_ELx_EC_CP14_64	// CP14 MRRC/MCRR trap
451	b.eq	el0_undef
452	cmp	x24, #ESR_ELx_EC_BREAKPT_LOW	// debug exception in EL0
453	b.ge	el0_dbg
454	b	el0_inv
455el0_svc_compat:
456	/*
457	 * AArch32 syscall handling
458	 */
459	adrp	stbl, compat_sys_call_table	// load compat syscall table pointer
460	uxtw	scno, w7			// syscall number in w7 (r7)
461	mov     sc_nr, #__NR_compat_syscalls
462	b	el0_svc_naked
463
464	.align	6
465el0_irq_compat:
466	kernel_entry 0, 32
467	b	el0_irq_naked
468#endif
469
470el0_da:
471	/*
472	 * Data abort handling
473	 */
474	mrs	x26, far_el1
475	// enable interrupts before calling the main handler
476	enable_dbg_and_irq
477	ct_user_exit
478	bic	x0, x26, #(0xff << 56)
479	mov	x1, x25
480	mov	x2, sp
481	bl	do_mem_abort
482	b	ret_to_user
483el0_ia:
484	/*
485	 * Instruction abort handling
486	 */
487	mrs	x26, far_el1
488	// enable interrupts before calling the main handler
489	enable_dbg_and_irq
490	ct_user_exit
491	mov	x0, x26
492	orr	x1, x25, #1 << 24		// use reserved ISS bit for instruction aborts
493	mov	x2, sp
494	bl	do_mem_abort
495	b	ret_to_user
496el0_fpsimd_acc:
497	/*
498	 * Floating Point or Advanced SIMD access
499	 */
500	enable_dbg
501	ct_user_exit
502	mov	x0, x25
503	mov	x1, sp
504	bl	do_fpsimd_acc
505	b	ret_to_user
506el0_fpsimd_exc:
507	/*
508	 * Floating Point or Advanced SIMD exception
509	 */
510	enable_dbg
511	ct_user_exit
512	mov	x0, x25
513	mov	x1, sp
514	bl	do_fpsimd_exc
515	b	ret_to_user
516el0_sp_pc:
517	/*
518	 * Stack or PC alignment exception handling
519	 */
520	mrs	x26, far_el1
521	// enable interrupts before calling the main handler
522	enable_dbg_and_irq
523	ct_user_exit
524	mov	x0, x26
525	mov	x1, x25
526	mov	x2, sp
527	bl	do_sp_pc_abort
528	b	ret_to_user
529el0_undef:
530	/*
531	 * Undefined instruction
532	 */
533	// enable interrupts before calling the main handler
534	enable_dbg_and_irq
535	ct_user_exit
536	mov	x0, sp
537	bl	do_undefinstr
538	b	ret_to_user
539el0_dbg:
540	/*
541	 * Debug exception handling
542	 */
543	tbnz	x24, #0, el0_inv		// EL0 only
544	mrs	x0, far_el1
545	mov	x1, x25
546	mov	x2, sp
547	bl	do_debug_exception
548	enable_dbg
549	ct_user_exit
550	b	ret_to_user
551el0_inv:
552	enable_dbg
553	ct_user_exit
554	mov	x0, sp
555	mov	x1, #BAD_SYNC
556	mrs	x2, esr_el1
557	bl	bad_mode
558	b	ret_to_user
559ENDPROC(el0_sync)
560
561	.align	6
562el0_irq:
563	kernel_entry 0
564el0_irq_naked:
565	enable_dbg
566#ifdef CONFIG_TRACE_IRQFLAGS
567	bl	trace_hardirqs_off
568#endif
569
570	ct_user_exit
571	irq_handler
572
573#ifdef CONFIG_TRACE_IRQFLAGS
574	bl	trace_hardirqs_on
575#endif
576	b	ret_to_user
577ENDPROC(el0_irq)
578
579/*
580 * Register switch for AArch64. The callee-saved registers need to be saved
581 * and restored. On entry:
582 *   x0 = previous task_struct (must be preserved across the switch)
583 *   x1 = next task_struct
584 * Previous and next are guaranteed not to be the same.
585 *
586 */
587ENTRY(cpu_switch_to)
588	add	x8, x0, #THREAD_CPU_CONTEXT
589	mov	x9, sp
590	stp	x19, x20, [x8], #16		// store callee-saved registers
591	stp	x21, x22, [x8], #16
592	stp	x23, x24, [x8], #16
593	stp	x25, x26, [x8], #16
594	stp	x27, x28, [x8], #16
595	stp	x29, x9, [x8], #16
596	str	lr, [x8]
597	add	x8, x1, #THREAD_CPU_CONTEXT
598	ldp	x19, x20, [x8], #16		// restore callee-saved registers
599	ldp	x21, x22, [x8], #16
600	ldp	x23, x24, [x8], #16
601	ldp	x25, x26, [x8], #16
602	ldp	x27, x28, [x8], #16
603	ldp	x29, x9, [x8], #16
604	ldr	lr, [x8]
605	mov	sp, x9
606	ret
607ENDPROC(cpu_switch_to)
608
609/*
610 * This is the fast syscall return path.  We do as little as possible here,
611 * and this includes saving x0 back into the kernel stack.
612 */
613ret_fast_syscall:
614	disable_irq				// disable interrupts
615	ldr	x1, [tsk, #TI_FLAGS]		// re-check for syscall tracing
616	and	x2, x1, #_TIF_SYSCALL_WORK
617	cbnz	x2, ret_fast_syscall_trace
618	and	x2, x1, #_TIF_WORK_MASK
619	cbnz	x2, fast_work_pending
620	enable_step_tsk x1, x2
621	kernel_exit 0, ret = 1
622ret_fast_syscall_trace:
623	enable_irq				// enable interrupts
624	b	__sys_trace_return
625
626/*
627 * Ok, we need to do extra processing, enter the slow path.
628 */
629fast_work_pending:
630	str	x0, [sp, #S_X0]			// returned x0
631work_pending:
632	tbnz	x1, #TIF_NEED_RESCHED, work_resched
633	/* TIF_SIGPENDING, TIF_NOTIFY_RESUME or TIF_FOREIGN_FPSTATE case */
634	ldr	x2, [sp, #S_PSTATE]
635	mov	x0, sp				// 'regs'
636	tst	x2, #PSR_MODE_MASK		// user mode regs?
637	b.ne	no_work_pending			// returning to kernel
638	enable_irq				// enable interrupts for do_notify_resume()
639	bl	do_notify_resume
640	b	ret_to_user
641work_resched:
642	bl	schedule
643
644/*
645 * "slow" syscall return path.
646 */
647ret_to_user:
648	disable_irq				// disable interrupts
649	ldr	x1, [tsk, #TI_FLAGS]
650	and	x2, x1, #_TIF_WORK_MASK
651	cbnz	x2, work_pending
652	enable_step_tsk x1, x2
653no_work_pending:
654	kernel_exit 0, ret = 0
655ENDPROC(ret_to_user)
656
657/*
658 * This is how we return from a fork.
659 */
660ENTRY(ret_from_fork)
661	bl	schedule_tail
662	cbz	x19, 1f				// not a kernel thread
663	mov	x0, x20
664	blr	x19
6651:	get_thread_info tsk
666	b	ret_to_user
667ENDPROC(ret_from_fork)
668
669/*
670 * SVC handler.
671 */
672	.align	6
673el0_svc:
674	adrp	stbl, sys_call_table		// load syscall table pointer
675	uxtw	scno, w8			// syscall number in w8
676	mov	sc_nr, #__NR_syscalls
677el0_svc_naked:					// compat entry point
678	stp	x0, scno, [sp, #S_ORIG_X0]	// save the original x0 and syscall number
679	enable_dbg_and_irq
680	ct_user_exit 1
681
682	ldr	x16, [tsk, #TI_FLAGS]		// check for syscall hooks
683	tst	x16, #_TIF_SYSCALL_WORK
684	b.ne	__sys_trace
685	cmp     scno, sc_nr                     // check upper syscall limit
686	b.hs	ni_sys
687	ldr	x16, [stbl, scno, lsl #3]	// address in the syscall table
688	blr	x16				// call sys_* routine
689	b	ret_fast_syscall
690ni_sys:
691	mov	x0, sp
692	bl	do_ni_syscall
693	b	ret_fast_syscall
694ENDPROC(el0_svc)
695
696	/*
697	 * This is the really slow path.  We're going to be doing context
698	 * switches, and waiting for our parent to respond.
699	 */
700__sys_trace:
701	mov	w0, #-1				// set default errno for
702	cmp     scno, x0			// user-issued syscall(-1)
703	b.ne	1f
704	mov	x0, #-ENOSYS
705	str	x0, [sp, #S_X0]
7061:	mov	x0, sp
707	bl	syscall_trace_enter
708	cmp	w0, #-1				// skip the syscall?
709	b.eq	__sys_trace_return_skipped
710	uxtw	scno, w0			// syscall number (possibly new)
711	mov	x1, sp				// pointer to regs
712	cmp	scno, sc_nr			// check upper syscall limit
713	b.hs	__ni_sys_trace
714	ldp	x0, x1, [sp]			// restore the syscall args
715	ldp	x2, x3, [sp, #S_X2]
716	ldp	x4, x5, [sp, #S_X4]
717	ldp	x6, x7, [sp, #S_X6]
718	ldr	x16, [stbl, scno, lsl #3]	// address in the syscall table
719	blr	x16				// call sys_* routine
720
721__sys_trace_return:
722	str	x0, [sp, #S_X0]			// save returned x0
723__sys_trace_return_skipped:
724	mov	x0, sp
725	bl	syscall_trace_exit
726	b	ret_to_user
727
728__ni_sys_trace:
729	mov	x0, sp
730	bl	do_ni_syscall
731	b	__sys_trace_return
732
733/*
734 * Special system call wrappers.
735 */
736ENTRY(sys_rt_sigreturn_wrapper)
737	mov	x0, sp
738	b	sys_rt_sigreturn
739ENDPROC(sys_rt_sigreturn_wrapper)
740