xref: /openbmc/linux/arch/arm64/kernel/entry.S (revision 92a76f6d)
1/*
2 * Low-level exception handling code
3 *
4 * Copyright (C) 2012 ARM Ltd.
5 * Authors:	Catalin Marinas <catalin.marinas@arm.com>
6 *		Will Deacon <will.deacon@arm.com>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program.  If not, see <http://www.gnu.org/licenses/>.
19 */
20
21#include <linux/init.h>
22#include <linux/linkage.h>
23
24#include <asm/alternative.h>
25#include <asm/assembler.h>
26#include <asm/asm-offsets.h>
27#include <asm/cpufeature.h>
28#include <asm/errno.h>
29#include <asm/esr.h>
30#include <asm/irq.h>
31#include <asm/thread_info.h>
32#include <asm/unistd.h>
33
34/*
35 * Context tracking subsystem.  Used to instrument transitions
36 * between user and kernel mode.
37 */
38	.macro ct_user_exit, syscall = 0
39#ifdef CONFIG_CONTEXT_TRACKING
40	bl	context_tracking_user_exit
41	.if \syscall == 1
42	/*
43	 * Save/restore needed during syscalls.  Restore syscall arguments from
44	 * the values already saved on stack during kernel_entry.
45	 */
46	ldp	x0, x1, [sp]
47	ldp	x2, x3, [sp, #S_X2]
48	ldp	x4, x5, [sp, #S_X4]
49	ldp	x6, x7, [sp, #S_X6]
50	.endif
51#endif
52	.endm
53
54	.macro ct_user_enter
55#ifdef CONFIG_CONTEXT_TRACKING
56	bl	context_tracking_user_enter
57#endif
58	.endm
59
60/*
61 * Bad Abort numbers
62 *-----------------
63 */
64#define BAD_SYNC	0
65#define BAD_IRQ		1
66#define BAD_FIQ		2
67#define BAD_ERROR	3
68
69	.macro	kernel_entry, el, regsize = 64
70	sub	sp, sp, #S_FRAME_SIZE
71	.if	\regsize == 32
72	mov	w0, w0				// zero upper 32 bits of x0
73	.endif
74	stp	x0, x1, [sp, #16 * 0]
75	stp	x2, x3, [sp, #16 * 1]
76	stp	x4, x5, [sp, #16 * 2]
77	stp	x6, x7, [sp, #16 * 3]
78	stp	x8, x9, [sp, #16 * 4]
79	stp	x10, x11, [sp, #16 * 5]
80	stp	x12, x13, [sp, #16 * 6]
81	stp	x14, x15, [sp, #16 * 7]
82	stp	x16, x17, [sp, #16 * 8]
83	stp	x18, x19, [sp, #16 * 9]
84	stp	x20, x21, [sp, #16 * 10]
85	stp	x22, x23, [sp, #16 * 11]
86	stp	x24, x25, [sp, #16 * 12]
87	stp	x26, x27, [sp, #16 * 13]
88	stp	x28, x29, [sp, #16 * 14]
89
90	.if	\el == 0
91	mrs	x21, sp_el0
92	mov	tsk, sp
93	and	tsk, tsk, #~(THREAD_SIZE - 1)	// Ensure MDSCR_EL1.SS is clear,
94	ldr	x19, [tsk, #TI_FLAGS]		// since we can unmask debug
95	disable_step_tsk x19, x20		// exceptions when scheduling.
96
97	mov	x29, xzr			// fp pointed to user-space
98	.else
99	add	x21, sp, #S_FRAME_SIZE
100	.endif
101	mrs	x22, elr_el1
102	mrs	x23, spsr_el1
103	stp	lr, x21, [sp, #S_LR]
104	stp	x22, x23, [sp, #S_PC]
105
106	/*
107	 * Set syscallno to -1 by default (overridden later if real syscall).
108	 */
109	.if	\el == 0
110	mvn	x21, xzr
111	str	x21, [sp, #S_SYSCALLNO]
112	.endif
113
114	/*
115	 * Set sp_el0 to current thread_info.
116	 */
117	.if	\el == 0
118	msr	sp_el0, tsk
119	.endif
120
121	/*
122	 * Registers that may be useful after this macro is invoked:
123	 *
124	 * x21 - aborted SP
125	 * x22 - aborted PC
126	 * x23 - aborted PSTATE
127	*/
128	.endm
129
130	.macro	kernel_exit, el
131	ldp	x21, x22, [sp, #S_PC]		// load ELR, SPSR
132	.if	\el == 0
133	ct_user_enter
134	ldr	x23, [sp, #S_SP]		// load return stack pointer
135	msr	sp_el0, x23
136#ifdef CONFIG_ARM64_ERRATUM_845719
137alternative_if_not ARM64_WORKAROUND_845719
138	nop
139	nop
140#ifdef CONFIG_PID_IN_CONTEXTIDR
141	nop
142#endif
143alternative_else
144	tbz	x22, #4, 1f
145#ifdef CONFIG_PID_IN_CONTEXTIDR
146	mrs	x29, contextidr_el1
147	msr	contextidr_el1, x29
148#else
149	msr contextidr_el1, xzr
150#endif
1511:
152alternative_endif
153#endif
154	.endif
155	msr	elr_el1, x21			// set up the return data
156	msr	spsr_el1, x22
157	ldp	x0, x1, [sp, #16 * 0]
158	ldp	x2, x3, [sp, #16 * 1]
159	ldp	x4, x5, [sp, #16 * 2]
160	ldp	x6, x7, [sp, #16 * 3]
161	ldp	x8, x9, [sp, #16 * 4]
162	ldp	x10, x11, [sp, #16 * 5]
163	ldp	x12, x13, [sp, #16 * 6]
164	ldp	x14, x15, [sp, #16 * 7]
165	ldp	x16, x17, [sp, #16 * 8]
166	ldp	x18, x19, [sp, #16 * 9]
167	ldp	x20, x21, [sp, #16 * 10]
168	ldp	x22, x23, [sp, #16 * 11]
169	ldp	x24, x25, [sp, #16 * 12]
170	ldp	x26, x27, [sp, #16 * 13]
171	ldp	x28, x29, [sp, #16 * 14]
172	ldr	lr, [sp, #S_LR]
173	add	sp, sp, #S_FRAME_SIZE		// restore sp
174	eret					// return to kernel
175	.endm
176
177	.macro	get_thread_info, rd
178	mrs	\rd, sp_el0
179	.endm
180
181	.macro	irq_stack_entry
182	mov	x19, sp			// preserve the original sp
183
184	/*
185	 * Compare sp with the current thread_info, if the top
186	 * ~(THREAD_SIZE - 1) bits match, we are on a task stack, and
187	 * should switch to the irq stack.
188	 */
189	and	x25, x19, #~(THREAD_SIZE - 1)
190	cmp	x25, tsk
191	b.ne	9998f
192
193	this_cpu_ptr irq_stack, x25, x26
194	mov	x26, #IRQ_STACK_START_SP
195	add	x26, x25, x26
196
197	/* switch to the irq stack */
198	mov	sp, x26
199
200	/*
201	 * Add a dummy stack frame, this non-standard format is fixed up
202	 * by unwind_frame()
203	 */
204	stp     x29, x19, [sp, #-16]!
205	mov	x29, sp
206
2079998:
208	.endm
209
210	/*
211	 * x19 should be preserved between irq_stack_entry and
212	 * irq_stack_exit.
213	 */
214	.macro	irq_stack_exit
215	mov	sp, x19
216	.endm
217
218/*
219 * These are the registers used in the syscall handler, and allow us to
220 * have in theory up to 7 arguments to a function - x0 to x6.
221 *
222 * x7 is reserved for the system call number in 32-bit mode.
223 */
224sc_nr	.req	x25		// number of system calls
225scno	.req	x26		// syscall number
226stbl	.req	x27		// syscall table pointer
227tsk	.req	x28		// current thread_info
228
229/*
230 * Interrupt handling.
231 */
232	.macro	irq_handler
233	ldr_l	x1, handle_arch_irq
234	mov	x0, sp
235	irq_stack_entry
236	blr	x1
237	irq_stack_exit
238	.endm
239
240	.text
241
242/*
243 * Exception vectors.
244 */
245
246	.align	11
247ENTRY(vectors)
248	ventry	el1_sync_invalid		// Synchronous EL1t
249	ventry	el1_irq_invalid			// IRQ EL1t
250	ventry	el1_fiq_invalid			// FIQ EL1t
251	ventry	el1_error_invalid		// Error EL1t
252
253	ventry	el1_sync			// Synchronous EL1h
254	ventry	el1_irq				// IRQ EL1h
255	ventry	el1_fiq_invalid			// FIQ EL1h
256	ventry	el1_error_invalid		// Error EL1h
257
258	ventry	el0_sync			// Synchronous 64-bit EL0
259	ventry	el0_irq				// IRQ 64-bit EL0
260	ventry	el0_fiq_invalid			// FIQ 64-bit EL0
261	ventry	el0_error_invalid		// Error 64-bit EL0
262
263#ifdef CONFIG_COMPAT
264	ventry	el0_sync_compat			// Synchronous 32-bit EL0
265	ventry	el0_irq_compat			// IRQ 32-bit EL0
266	ventry	el0_fiq_invalid_compat		// FIQ 32-bit EL0
267	ventry	el0_error_invalid_compat	// Error 32-bit EL0
268#else
269	ventry	el0_sync_invalid		// Synchronous 32-bit EL0
270	ventry	el0_irq_invalid			// IRQ 32-bit EL0
271	ventry	el0_fiq_invalid			// FIQ 32-bit EL0
272	ventry	el0_error_invalid		// Error 32-bit EL0
273#endif
274END(vectors)
275
276/*
277 * Invalid mode handlers
278 */
279	.macro	inv_entry, el, reason, regsize = 64
280	kernel_entry \el, \regsize
281	mov	x0, sp
282	mov	x1, #\reason
283	mrs	x2, esr_el1
284	b	bad_mode
285	.endm
286
287el0_sync_invalid:
288	inv_entry 0, BAD_SYNC
289ENDPROC(el0_sync_invalid)
290
291el0_irq_invalid:
292	inv_entry 0, BAD_IRQ
293ENDPROC(el0_irq_invalid)
294
295el0_fiq_invalid:
296	inv_entry 0, BAD_FIQ
297ENDPROC(el0_fiq_invalid)
298
299el0_error_invalid:
300	inv_entry 0, BAD_ERROR
301ENDPROC(el0_error_invalid)
302
303#ifdef CONFIG_COMPAT
304el0_fiq_invalid_compat:
305	inv_entry 0, BAD_FIQ, 32
306ENDPROC(el0_fiq_invalid_compat)
307
308el0_error_invalid_compat:
309	inv_entry 0, BAD_ERROR, 32
310ENDPROC(el0_error_invalid_compat)
311#endif
312
313el1_sync_invalid:
314	inv_entry 1, BAD_SYNC
315ENDPROC(el1_sync_invalid)
316
317el1_irq_invalid:
318	inv_entry 1, BAD_IRQ
319ENDPROC(el1_irq_invalid)
320
321el1_fiq_invalid:
322	inv_entry 1, BAD_FIQ
323ENDPROC(el1_fiq_invalid)
324
325el1_error_invalid:
326	inv_entry 1, BAD_ERROR
327ENDPROC(el1_error_invalid)
328
329/*
330 * EL1 mode handlers.
331 */
332	.align	6
333el1_sync:
334	kernel_entry 1
335	mrs	x1, esr_el1			// read the syndrome register
336	lsr	x24, x1, #ESR_ELx_EC_SHIFT	// exception class
337	cmp	x24, #ESR_ELx_EC_DABT_CUR	// data abort in EL1
338	b.eq	el1_da
339	cmp	x24, #ESR_ELx_EC_SYS64		// configurable trap
340	b.eq	el1_undef
341	cmp	x24, #ESR_ELx_EC_SP_ALIGN	// stack alignment exception
342	b.eq	el1_sp_pc
343	cmp	x24, #ESR_ELx_EC_PC_ALIGN	// pc alignment exception
344	b.eq	el1_sp_pc
345	cmp	x24, #ESR_ELx_EC_UNKNOWN	// unknown exception in EL1
346	b.eq	el1_undef
347	cmp	x24, #ESR_ELx_EC_BREAKPT_CUR	// debug exception in EL1
348	b.ge	el1_dbg
349	b	el1_inv
350el1_da:
351	/*
352	 * Data abort handling
353	 */
354	mrs	x0, far_el1
355	enable_dbg
356	// re-enable interrupts if they were enabled in the aborted context
357	tbnz	x23, #7, 1f			// PSR_I_BIT
358	enable_irq
3591:
360	mov	x2, sp				// struct pt_regs
361	bl	do_mem_abort
362
363	// disable interrupts before pulling preserved data off the stack
364	disable_irq
365	kernel_exit 1
366el1_sp_pc:
367	/*
368	 * Stack or PC alignment exception handling
369	 */
370	mrs	x0, far_el1
371	enable_dbg
372	mov	x2, sp
373	b	do_sp_pc_abort
374el1_undef:
375	/*
376	 * Undefined instruction
377	 */
378	enable_dbg
379	mov	x0, sp
380	b	do_undefinstr
381el1_dbg:
382	/*
383	 * Debug exception handling
384	 */
385	cmp	x24, #ESR_ELx_EC_BRK64		// if BRK64
386	cinc	x24, x24, eq			// set bit '0'
387	tbz	x24, #0, el1_inv		// EL1 only
388	mrs	x0, far_el1
389	mov	x2, sp				// struct pt_regs
390	bl	do_debug_exception
391	kernel_exit 1
392el1_inv:
393	// TODO: add support for undefined instructions in kernel mode
394	enable_dbg
395	mov	x0, sp
396	mov	x2, x1
397	mov	x1, #BAD_SYNC
398	b	bad_mode
399ENDPROC(el1_sync)
400
401	.align	6
402el1_irq:
403	kernel_entry 1
404	enable_dbg
405#ifdef CONFIG_TRACE_IRQFLAGS
406	bl	trace_hardirqs_off
407#endif
408
409	get_thread_info tsk
410	irq_handler
411
412#ifdef CONFIG_PREEMPT
413	ldr	w24, [tsk, #TI_PREEMPT]		// get preempt count
414	cbnz	w24, 1f				// preempt count != 0
415	ldr	x0, [tsk, #TI_FLAGS]		// get flags
416	tbz	x0, #TIF_NEED_RESCHED, 1f	// needs rescheduling?
417	bl	el1_preempt
4181:
419#endif
420#ifdef CONFIG_TRACE_IRQFLAGS
421	bl	trace_hardirqs_on
422#endif
423	kernel_exit 1
424ENDPROC(el1_irq)
425
426#ifdef CONFIG_PREEMPT
427el1_preempt:
428	mov	x24, lr
4291:	bl	preempt_schedule_irq		// irq en/disable is done inside
430	ldr	x0, [tsk, #TI_FLAGS]		// get new tasks TI_FLAGS
431	tbnz	x0, #TIF_NEED_RESCHED, 1b	// needs rescheduling?
432	ret	x24
433#endif
434
435/*
436 * EL0 mode handlers.
437 */
438	.align	6
439el0_sync:
440	kernel_entry 0
441	mrs	x25, esr_el1			// read the syndrome register
442	lsr	x24, x25, #ESR_ELx_EC_SHIFT	// exception class
443	cmp	x24, #ESR_ELx_EC_SVC64		// SVC in 64-bit state
444	b.eq	el0_svc
445	cmp	x24, #ESR_ELx_EC_DABT_LOW	// data abort in EL0
446	b.eq	el0_da
447	cmp	x24, #ESR_ELx_EC_IABT_LOW	// instruction abort in EL0
448	b.eq	el0_ia
449	cmp	x24, #ESR_ELx_EC_FP_ASIMD	// FP/ASIMD access
450	b.eq	el0_fpsimd_acc
451	cmp	x24, #ESR_ELx_EC_FP_EXC64	// FP/ASIMD exception
452	b.eq	el0_fpsimd_exc
453	cmp	x24, #ESR_ELx_EC_SYS64		// configurable trap
454	b.eq	el0_undef
455	cmp	x24, #ESR_ELx_EC_SP_ALIGN	// stack alignment exception
456	b.eq	el0_sp_pc
457	cmp	x24, #ESR_ELx_EC_PC_ALIGN	// pc alignment exception
458	b.eq	el0_sp_pc
459	cmp	x24, #ESR_ELx_EC_UNKNOWN	// unknown exception in EL0
460	b.eq	el0_undef
461	cmp	x24, #ESR_ELx_EC_BREAKPT_LOW	// debug exception in EL0
462	b.ge	el0_dbg
463	b	el0_inv
464
465#ifdef CONFIG_COMPAT
466	.align	6
467el0_sync_compat:
468	kernel_entry 0, 32
469	mrs	x25, esr_el1			// read the syndrome register
470	lsr	x24, x25, #ESR_ELx_EC_SHIFT	// exception class
471	cmp	x24, #ESR_ELx_EC_SVC32		// SVC in 32-bit state
472	b.eq	el0_svc_compat
473	cmp	x24, #ESR_ELx_EC_DABT_LOW	// data abort in EL0
474	b.eq	el0_da
475	cmp	x24, #ESR_ELx_EC_IABT_LOW	// instruction abort in EL0
476	b.eq	el0_ia
477	cmp	x24, #ESR_ELx_EC_FP_ASIMD	// FP/ASIMD access
478	b.eq	el0_fpsimd_acc
479	cmp	x24, #ESR_ELx_EC_FP_EXC32	// FP/ASIMD exception
480	b.eq	el0_fpsimd_exc
481	cmp	x24, #ESR_ELx_EC_PC_ALIGN	// pc alignment exception
482	b.eq	el0_sp_pc
483	cmp	x24, #ESR_ELx_EC_UNKNOWN	// unknown exception in EL0
484	b.eq	el0_undef
485	cmp	x24, #ESR_ELx_EC_CP15_32	// CP15 MRC/MCR trap
486	b.eq	el0_undef
487	cmp	x24, #ESR_ELx_EC_CP15_64	// CP15 MRRC/MCRR trap
488	b.eq	el0_undef
489	cmp	x24, #ESR_ELx_EC_CP14_MR	// CP14 MRC/MCR trap
490	b.eq	el0_undef
491	cmp	x24, #ESR_ELx_EC_CP14_LS	// CP14 LDC/STC trap
492	b.eq	el0_undef
493	cmp	x24, #ESR_ELx_EC_CP14_64	// CP14 MRRC/MCRR trap
494	b.eq	el0_undef
495	cmp	x24, #ESR_ELx_EC_BREAKPT_LOW	// debug exception in EL0
496	b.ge	el0_dbg
497	b	el0_inv
498el0_svc_compat:
499	/*
500	 * AArch32 syscall handling
501	 */
502	adrp	stbl, compat_sys_call_table	// load compat syscall table pointer
503	uxtw	scno, w7			// syscall number in w7 (r7)
504	mov     sc_nr, #__NR_compat_syscalls
505	b	el0_svc_naked
506
507	.align	6
508el0_irq_compat:
509	kernel_entry 0, 32
510	b	el0_irq_naked
511#endif
512
513el0_da:
514	/*
515	 * Data abort handling
516	 */
517	mrs	x26, far_el1
518	// enable interrupts before calling the main handler
519	enable_dbg_and_irq
520	ct_user_exit
521	bic	x0, x26, #(0xff << 56)
522	mov	x1, x25
523	mov	x2, sp
524	bl	do_mem_abort
525	b	ret_to_user
526el0_ia:
527	/*
528	 * Instruction abort handling
529	 */
530	mrs	x26, far_el1
531	// enable interrupts before calling the main handler
532	enable_dbg_and_irq
533	ct_user_exit
534	mov	x0, x26
535	orr	x1, x25, #1 << 24		// use reserved ISS bit for instruction aborts
536	mov	x2, sp
537	bl	do_mem_abort
538	b	ret_to_user
539el0_fpsimd_acc:
540	/*
541	 * Floating Point or Advanced SIMD access
542	 */
543	enable_dbg
544	ct_user_exit
545	mov	x0, x25
546	mov	x1, sp
547	bl	do_fpsimd_acc
548	b	ret_to_user
549el0_fpsimd_exc:
550	/*
551	 * Floating Point or Advanced SIMD exception
552	 */
553	enable_dbg
554	ct_user_exit
555	mov	x0, x25
556	mov	x1, sp
557	bl	do_fpsimd_exc
558	b	ret_to_user
559el0_sp_pc:
560	/*
561	 * Stack or PC alignment exception handling
562	 */
563	mrs	x26, far_el1
564	// enable interrupts before calling the main handler
565	enable_dbg_and_irq
566	ct_user_exit
567	mov	x0, x26
568	mov	x1, x25
569	mov	x2, sp
570	bl	do_sp_pc_abort
571	b	ret_to_user
572el0_undef:
573	/*
574	 * Undefined instruction
575	 */
576	// enable interrupts before calling the main handler
577	enable_dbg_and_irq
578	ct_user_exit
579	mov	x0, sp
580	bl	do_undefinstr
581	b	ret_to_user
582el0_dbg:
583	/*
584	 * Debug exception handling
585	 */
586	tbnz	x24, #0, el0_inv		// EL0 only
587	mrs	x0, far_el1
588	mov	x1, x25
589	mov	x2, sp
590	bl	do_debug_exception
591	enable_dbg
592	ct_user_exit
593	b	ret_to_user
594el0_inv:
595	enable_dbg
596	ct_user_exit
597	mov	x0, sp
598	mov	x1, #BAD_SYNC
599	mov	x2, x25
600	bl	bad_mode
601	b	ret_to_user
602ENDPROC(el0_sync)
603
604	.align	6
605el0_irq:
606	kernel_entry 0
607el0_irq_naked:
608	enable_dbg
609#ifdef CONFIG_TRACE_IRQFLAGS
610	bl	trace_hardirqs_off
611#endif
612
613	ct_user_exit
614	irq_handler
615
616#ifdef CONFIG_TRACE_IRQFLAGS
617	bl	trace_hardirqs_on
618#endif
619	b	ret_to_user
620ENDPROC(el0_irq)
621
622/*
623 * Register switch for AArch64. The callee-saved registers need to be saved
624 * and restored. On entry:
625 *   x0 = previous task_struct (must be preserved across the switch)
626 *   x1 = next task_struct
627 * Previous and next are guaranteed not to be the same.
628 *
629 */
630ENTRY(cpu_switch_to)
631	mov	x10, #THREAD_CPU_CONTEXT
632	add	x8, x0, x10
633	mov	x9, sp
634	stp	x19, x20, [x8], #16		// store callee-saved registers
635	stp	x21, x22, [x8], #16
636	stp	x23, x24, [x8], #16
637	stp	x25, x26, [x8], #16
638	stp	x27, x28, [x8], #16
639	stp	x29, x9, [x8], #16
640	str	lr, [x8]
641	add	x8, x1, x10
642	ldp	x19, x20, [x8], #16		// restore callee-saved registers
643	ldp	x21, x22, [x8], #16
644	ldp	x23, x24, [x8], #16
645	ldp	x25, x26, [x8], #16
646	ldp	x27, x28, [x8], #16
647	ldp	x29, x9, [x8], #16
648	ldr	lr, [x8]
649	mov	sp, x9
650	and	x9, x9, #~(THREAD_SIZE - 1)
651	msr	sp_el0, x9
652	ret
653ENDPROC(cpu_switch_to)
654
655/*
656 * This is the fast syscall return path.  We do as little as possible here,
657 * and this includes saving x0 back into the kernel stack.
658 */
659ret_fast_syscall:
660	disable_irq				// disable interrupts
661	str	x0, [sp, #S_X0]			// returned x0
662	ldr	x1, [tsk, #TI_FLAGS]		// re-check for syscall tracing
663	and	x2, x1, #_TIF_SYSCALL_WORK
664	cbnz	x2, ret_fast_syscall_trace
665	and	x2, x1, #_TIF_WORK_MASK
666	cbnz	x2, work_pending
667	enable_step_tsk x1, x2
668	kernel_exit 0
669ret_fast_syscall_trace:
670	enable_irq				// enable interrupts
671	b	__sys_trace_return_skipped	// we already saved x0
672
673/*
674 * Ok, we need to do extra processing, enter the slow path.
675 */
676work_pending:
677	tbnz	x1, #TIF_NEED_RESCHED, work_resched
678	/* TIF_SIGPENDING, TIF_NOTIFY_RESUME or TIF_FOREIGN_FPSTATE case */
679	mov	x0, sp				// 'regs'
680	enable_irq				// enable interrupts for do_notify_resume()
681	bl	do_notify_resume
682	b	ret_to_user
683work_resched:
684#ifdef CONFIG_TRACE_IRQFLAGS
685	bl	trace_hardirqs_off		// the IRQs are off here, inform the tracing code
686#endif
687	bl	schedule
688
689/*
690 * "slow" syscall return path.
691 */
692ret_to_user:
693	disable_irq				// disable interrupts
694	ldr	x1, [tsk, #TI_FLAGS]
695	and	x2, x1, #_TIF_WORK_MASK
696	cbnz	x2, work_pending
697	enable_step_tsk x1, x2
698	kernel_exit 0
699ENDPROC(ret_to_user)
700
701/*
702 * This is how we return from a fork.
703 */
704ENTRY(ret_from_fork)
705	bl	schedule_tail
706	cbz	x19, 1f				// not a kernel thread
707	mov	x0, x20
708	blr	x19
7091:	get_thread_info tsk
710	b	ret_to_user
711ENDPROC(ret_from_fork)
712
713/*
714 * SVC handler.
715 */
716	.align	6
717el0_svc:
718	adrp	stbl, sys_call_table		// load syscall table pointer
719	uxtw	scno, w8			// syscall number in w8
720	mov	sc_nr, #__NR_syscalls
721el0_svc_naked:					// compat entry point
722	stp	x0, scno, [sp, #S_ORIG_X0]	// save the original x0 and syscall number
723	enable_dbg_and_irq
724	ct_user_exit 1
725
726	ldr	x16, [tsk, #TI_FLAGS]		// check for syscall hooks
727	tst	x16, #_TIF_SYSCALL_WORK
728	b.ne	__sys_trace
729	cmp     scno, sc_nr                     // check upper syscall limit
730	b.hs	ni_sys
731	ldr	x16, [stbl, scno, lsl #3]	// address in the syscall table
732	blr	x16				// call sys_* routine
733	b	ret_fast_syscall
734ni_sys:
735	mov	x0, sp
736	bl	do_ni_syscall
737	b	ret_fast_syscall
738ENDPROC(el0_svc)
739
740	/*
741	 * This is the really slow path.  We're going to be doing context
742	 * switches, and waiting for our parent to respond.
743	 */
744__sys_trace:
745	mov	w0, #-1				// set default errno for
746	cmp     scno, x0			// user-issued syscall(-1)
747	b.ne	1f
748	mov	x0, #-ENOSYS
749	str	x0, [sp, #S_X0]
7501:	mov	x0, sp
751	bl	syscall_trace_enter
752	cmp	w0, #-1				// skip the syscall?
753	b.eq	__sys_trace_return_skipped
754	uxtw	scno, w0			// syscall number (possibly new)
755	mov	x1, sp				// pointer to regs
756	cmp	scno, sc_nr			// check upper syscall limit
757	b.hs	__ni_sys_trace
758	ldp	x0, x1, [sp]			// restore the syscall args
759	ldp	x2, x3, [sp, #S_X2]
760	ldp	x4, x5, [sp, #S_X4]
761	ldp	x6, x7, [sp, #S_X6]
762	ldr	x16, [stbl, scno, lsl #3]	// address in the syscall table
763	blr	x16				// call sys_* routine
764
765__sys_trace_return:
766	str	x0, [sp, #S_X0]			// save returned x0
767__sys_trace_return_skipped:
768	mov	x0, sp
769	bl	syscall_trace_exit
770	b	ret_to_user
771
772__ni_sys_trace:
773	mov	x0, sp
774	bl	do_ni_syscall
775	b	__sys_trace_return
776
777/*
778 * Special system call wrappers.
779 */
780ENTRY(sys_rt_sigreturn_wrapper)
781	mov	x0, sp
782	b	sys_rt_sigreturn
783ENDPROC(sys_rt_sigreturn_wrapper)
784