xref: /openbmc/linux/arch/arm64/kernel/entry.S (revision 82003e04)
1/*
2 * Low-level exception handling code
3 *
4 * Copyright (C) 2012 ARM Ltd.
5 * Authors:	Catalin Marinas <catalin.marinas@arm.com>
6 *		Will Deacon <will.deacon@arm.com>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program.  If not, see <http://www.gnu.org/licenses/>.
19 */
20
21#include <linux/init.h>
22#include <linux/linkage.h>
23
24#include <asm/alternative.h>
25#include <asm/assembler.h>
26#include <asm/asm-offsets.h>
27#include <asm/cpufeature.h>
28#include <asm/errno.h>
29#include <asm/esr.h>
30#include <asm/irq.h>
31#include <asm/memory.h>
32#include <asm/thread_info.h>
33#include <asm/unistd.h>
34
35/*
36 * Context tracking subsystem.  Used to instrument transitions
37 * between user and kernel mode.
38 */
39	.macro ct_user_exit, syscall = 0
40#ifdef CONFIG_CONTEXT_TRACKING
41	bl	context_tracking_user_exit
42	.if \syscall == 1
43	/*
44	 * Save/restore needed during syscalls.  Restore syscall arguments from
45	 * the values already saved on stack during kernel_entry.
46	 */
47	ldp	x0, x1, [sp]
48	ldp	x2, x3, [sp, #S_X2]
49	ldp	x4, x5, [sp, #S_X4]
50	ldp	x6, x7, [sp, #S_X6]
51	.endif
52#endif
53	.endm
54
55	.macro ct_user_enter
56#ifdef CONFIG_CONTEXT_TRACKING
57	bl	context_tracking_user_enter
58#endif
59	.endm
60
61/*
62 * Bad Abort numbers
63 *-----------------
64 */
65#define BAD_SYNC	0
66#define BAD_IRQ		1
67#define BAD_FIQ		2
68#define BAD_ERROR	3
69
70	.macro	kernel_entry, el, regsize = 64
71	sub	sp, sp, #S_FRAME_SIZE
72	.if	\regsize == 32
73	mov	w0, w0				// zero upper 32 bits of x0
74	.endif
75	stp	x0, x1, [sp, #16 * 0]
76	stp	x2, x3, [sp, #16 * 1]
77	stp	x4, x5, [sp, #16 * 2]
78	stp	x6, x7, [sp, #16 * 3]
79	stp	x8, x9, [sp, #16 * 4]
80	stp	x10, x11, [sp, #16 * 5]
81	stp	x12, x13, [sp, #16 * 6]
82	stp	x14, x15, [sp, #16 * 7]
83	stp	x16, x17, [sp, #16 * 8]
84	stp	x18, x19, [sp, #16 * 9]
85	stp	x20, x21, [sp, #16 * 10]
86	stp	x22, x23, [sp, #16 * 11]
87	stp	x24, x25, [sp, #16 * 12]
88	stp	x26, x27, [sp, #16 * 13]
89	stp	x28, x29, [sp, #16 * 14]
90
91	.if	\el == 0
92	mrs	x21, sp_el0
93	mov	tsk, sp
94	and	tsk, tsk, #~(THREAD_SIZE - 1)	// Ensure MDSCR_EL1.SS is clear,
95	ldr	x19, [tsk, #TI_FLAGS]		// since we can unmask debug
96	disable_step_tsk x19, x20		// exceptions when scheduling.
97
98	mov	x29, xzr			// fp pointed to user-space
99	.else
100	add	x21, sp, #S_FRAME_SIZE
101	get_thread_info tsk
102	/* Save the task's original addr_limit and set USER_DS (TASK_SIZE_64) */
103	ldr	x20, [tsk, #TI_ADDR_LIMIT]
104	str	x20, [sp, #S_ORIG_ADDR_LIMIT]
105	mov	x20, #TASK_SIZE_64
106	str	x20, [tsk, #TI_ADDR_LIMIT]
107	/* No need to reset PSTATE.UAO, hardware's already set it to 0 for us */
108	.endif /* \el == 0 */
109	mrs	x22, elr_el1
110	mrs	x23, spsr_el1
111	stp	lr, x21, [sp, #S_LR]
112	stp	x22, x23, [sp, #S_PC]
113
114	/*
115	 * Set syscallno to -1 by default (overridden later if real syscall).
116	 */
117	.if	\el == 0
118	mvn	x21, xzr
119	str	x21, [sp, #S_SYSCALLNO]
120	.endif
121
122	/*
123	 * Set sp_el0 to current thread_info.
124	 */
125	.if	\el == 0
126	msr	sp_el0, tsk
127	.endif
128
129	/*
130	 * Registers that may be useful after this macro is invoked:
131	 *
132	 * x21 - aborted SP
133	 * x22 - aborted PC
134	 * x23 - aborted PSTATE
135	*/
136	.endm
137
138	.macro	kernel_exit, el
139	.if	\el != 0
140	/* Restore the task's original addr_limit. */
141	ldr	x20, [sp, #S_ORIG_ADDR_LIMIT]
142	str	x20, [tsk, #TI_ADDR_LIMIT]
143
144	/* No need to restore UAO, it will be restored from SPSR_EL1 */
145	.endif
146
147	ldp	x21, x22, [sp, #S_PC]		// load ELR, SPSR
148	.if	\el == 0
149	ct_user_enter
150	ldr	x23, [sp, #S_SP]		// load return stack pointer
151	msr	sp_el0, x23
152#ifdef CONFIG_ARM64_ERRATUM_845719
153alternative_if ARM64_WORKAROUND_845719
154	tbz	x22, #4, 1f
155#ifdef CONFIG_PID_IN_CONTEXTIDR
156	mrs	x29, contextidr_el1
157	msr	contextidr_el1, x29
158#else
159	msr contextidr_el1, xzr
160#endif
1611:
162alternative_else_nop_endif
163#endif
164	.endif
165	msr	elr_el1, x21			// set up the return data
166	msr	spsr_el1, x22
167	ldp	x0, x1, [sp, #16 * 0]
168	ldp	x2, x3, [sp, #16 * 1]
169	ldp	x4, x5, [sp, #16 * 2]
170	ldp	x6, x7, [sp, #16 * 3]
171	ldp	x8, x9, [sp, #16 * 4]
172	ldp	x10, x11, [sp, #16 * 5]
173	ldp	x12, x13, [sp, #16 * 6]
174	ldp	x14, x15, [sp, #16 * 7]
175	ldp	x16, x17, [sp, #16 * 8]
176	ldp	x18, x19, [sp, #16 * 9]
177	ldp	x20, x21, [sp, #16 * 10]
178	ldp	x22, x23, [sp, #16 * 11]
179	ldp	x24, x25, [sp, #16 * 12]
180	ldp	x26, x27, [sp, #16 * 13]
181	ldp	x28, x29, [sp, #16 * 14]
182	ldr	lr, [sp, #S_LR]
183	add	sp, sp, #S_FRAME_SIZE		// restore sp
184	eret					// return to kernel
185	.endm
186
187	.macro	get_thread_info, rd
188	mrs	\rd, sp_el0
189	.endm
190
191	.macro	irq_stack_entry
192	mov	x19, sp			// preserve the original sp
193
194	/*
195	 * Compare sp with the current thread_info, if the top
196	 * ~(THREAD_SIZE - 1) bits match, we are on a task stack, and
197	 * should switch to the irq stack.
198	 */
199	and	x25, x19, #~(THREAD_SIZE - 1)
200	cmp	x25, tsk
201	b.ne	9998f
202
203	this_cpu_ptr irq_stack, x25, x26
204	mov	x26, #IRQ_STACK_START_SP
205	add	x26, x25, x26
206
207	/* switch to the irq stack */
208	mov	sp, x26
209
210	/*
211	 * Add a dummy stack frame, this non-standard format is fixed up
212	 * by unwind_frame()
213	 */
214	stp     x29, x19, [sp, #-16]!
215	mov	x29, sp
216
2179998:
218	.endm
219
220	/*
221	 * x19 should be preserved between irq_stack_entry and
222	 * irq_stack_exit.
223	 */
224	.macro	irq_stack_exit
225	mov	sp, x19
226	.endm
227
228/*
229 * These are the registers used in the syscall handler, and allow us to
230 * have in theory up to 7 arguments to a function - x0 to x6.
231 *
232 * x7 is reserved for the system call number in 32-bit mode.
233 */
234sc_nr	.req	x25		// number of system calls
235scno	.req	x26		// syscall number
236stbl	.req	x27		// syscall table pointer
237tsk	.req	x28		// current thread_info
238
239/*
240 * Interrupt handling.
241 */
242	.macro	irq_handler
243	ldr_l	x1, handle_arch_irq
244	mov	x0, sp
245	irq_stack_entry
246	blr	x1
247	irq_stack_exit
248	.endm
249
250	.text
251
252/*
253 * Exception vectors.
254 */
255	.pushsection ".entry.text", "ax"
256
257	.align	11
258ENTRY(vectors)
259	ventry	el1_sync_invalid		// Synchronous EL1t
260	ventry	el1_irq_invalid			// IRQ EL1t
261	ventry	el1_fiq_invalid			// FIQ EL1t
262	ventry	el1_error_invalid		// Error EL1t
263
264	ventry	el1_sync			// Synchronous EL1h
265	ventry	el1_irq				// IRQ EL1h
266	ventry	el1_fiq_invalid			// FIQ EL1h
267	ventry	el1_error_invalid		// Error EL1h
268
269	ventry	el0_sync			// Synchronous 64-bit EL0
270	ventry	el0_irq				// IRQ 64-bit EL0
271	ventry	el0_fiq_invalid			// FIQ 64-bit EL0
272	ventry	el0_error_invalid		// Error 64-bit EL0
273
274#ifdef CONFIG_COMPAT
275	ventry	el0_sync_compat			// Synchronous 32-bit EL0
276	ventry	el0_irq_compat			// IRQ 32-bit EL0
277	ventry	el0_fiq_invalid_compat		// FIQ 32-bit EL0
278	ventry	el0_error_invalid_compat	// Error 32-bit EL0
279#else
280	ventry	el0_sync_invalid		// Synchronous 32-bit EL0
281	ventry	el0_irq_invalid			// IRQ 32-bit EL0
282	ventry	el0_fiq_invalid			// FIQ 32-bit EL0
283	ventry	el0_error_invalid		// Error 32-bit EL0
284#endif
285END(vectors)
286
287/*
288 * Invalid mode handlers
289 */
290	.macro	inv_entry, el, reason, regsize = 64
291	kernel_entry \el, \regsize
292	mov	x0, sp
293	mov	x1, #\reason
294	mrs	x2, esr_el1
295	b	bad_mode
296	.endm
297
298el0_sync_invalid:
299	inv_entry 0, BAD_SYNC
300ENDPROC(el0_sync_invalid)
301
302el0_irq_invalid:
303	inv_entry 0, BAD_IRQ
304ENDPROC(el0_irq_invalid)
305
306el0_fiq_invalid:
307	inv_entry 0, BAD_FIQ
308ENDPROC(el0_fiq_invalid)
309
310el0_error_invalid:
311	inv_entry 0, BAD_ERROR
312ENDPROC(el0_error_invalid)
313
314#ifdef CONFIG_COMPAT
315el0_fiq_invalid_compat:
316	inv_entry 0, BAD_FIQ, 32
317ENDPROC(el0_fiq_invalid_compat)
318
319el0_error_invalid_compat:
320	inv_entry 0, BAD_ERROR, 32
321ENDPROC(el0_error_invalid_compat)
322#endif
323
324el1_sync_invalid:
325	inv_entry 1, BAD_SYNC
326ENDPROC(el1_sync_invalid)
327
328el1_irq_invalid:
329	inv_entry 1, BAD_IRQ
330ENDPROC(el1_irq_invalid)
331
332el1_fiq_invalid:
333	inv_entry 1, BAD_FIQ
334ENDPROC(el1_fiq_invalid)
335
336el1_error_invalid:
337	inv_entry 1, BAD_ERROR
338ENDPROC(el1_error_invalid)
339
340/*
341 * EL1 mode handlers.
342 */
343	.align	6
344el1_sync:
345	kernel_entry 1
346	mrs	x1, esr_el1			// read the syndrome register
347	lsr	x24, x1, #ESR_ELx_EC_SHIFT	// exception class
348	cmp	x24, #ESR_ELx_EC_DABT_CUR	// data abort in EL1
349	b.eq	el1_da
350	cmp	x24, #ESR_ELx_EC_IABT_CUR	// instruction abort in EL1
351	b.eq	el1_ia
352	cmp	x24, #ESR_ELx_EC_SYS64		// configurable trap
353	b.eq	el1_undef
354	cmp	x24, #ESR_ELx_EC_SP_ALIGN	// stack alignment exception
355	b.eq	el1_sp_pc
356	cmp	x24, #ESR_ELx_EC_PC_ALIGN	// pc alignment exception
357	b.eq	el1_sp_pc
358	cmp	x24, #ESR_ELx_EC_UNKNOWN	// unknown exception in EL1
359	b.eq	el1_undef
360	cmp	x24, #ESR_ELx_EC_BREAKPT_CUR	// debug exception in EL1
361	b.ge	el1_dbg
362	b	el1_inv
363
364el1_ia:
365	/*
366	 * Fall through to the Data abort case
367	 */
368el1_da:
369	/*
370	 * Data abort handling
371	 */
372	mrs	x0, far_el1
373	enable_dbg
374	// re-enable interrupts if they were enabled in the aborted context
375	tbnz	x23, #7, 1f			// PSR_I_BIT
376	enable_irq
3771:
378	mov	x2, sp				// struct pt_regs
379	bl	do_mem_abort
380
381	// disable interrupts before pulling preserved data off the stack
382	disable_irq
383	kernel_exit 1
384el1_sp_pc:
385	/*
386	 * Stack or PC alignment exception handling
387	 */
388	mrs	x0, far_el1
389	enable_dbg
390	mov	x2, sp
391	b	do_sp_pc_abort
392el1_undef:
393	/*
394	 * Undefined instruction
395	 */
396	enable_dbg
397	mov	x0, sp
398	b	do_undefinstr
399el1_dbg:
400	/*
401	 * Debug exception handling
402	 */
403	cmp	x24, #ESR_ELx_EC_BRK64		// if BRK64
404	cinc	x24, x24, eq			// set bit '0'
405	tbz	x24, #0, el1_inv		// EL1 only
406	mrs	x0, far_el1
407	mov	x2, sp				// struct pt_regs
408	bl	do_debug_exception
409	kernel_exit 1
410el1_inv:
411	// TODO: add support for undefined instructions in kernel mode
412	enable_dbg
413	mov	x0, sp
414	mov	x2, x1
415	mov	x1, #BAD_SYNC
416	b	bad_mode
417ENDPROC(el1_sync)
418
419	.align	6
420el1_irq:
421	kernel_entry 1
422	enable_dbg
423#ifdef CONFIG_TRACE_IRQFLAGS
424	bl	trace_hardirqs_off
425#endif
426
427	irq_handler
428
429#ifdef CONFIG_PREEMPT
430	ldr	w24, [tsk, #TI_PREEMPT]		// get preempt count
431	cbnz	w24, 1f				// preempt count != 0
432	ldr	x0, [tsk, #TI_FLAGS]		// get flags
433	tbz	x0, #TIF_NEED_RESCHED, 1f	// needs rescheduling?
434	bl	el1_preempt
4351:
436#endif
437#ifdef CONFIG_TRACE_IRQFLAGS
438	bl	trace_hardirqs_on
439#endif
440	kernel_exit 1
441ENDPROC(el1_irq)
442
443#ifdef CONFIG_PREEMPT
444el1_preempt:
445	mov	x24, lr
4461:	bl	preempt_schedule_irq		// irq en/disable is done inside
447	ldr	x0, [tsk, #TI_FLAGS]		// get new tasks TI_FLAGS
448	tbnz	x0, #TIF_NEED_RESCHED, 1b	// needs rescheduling?
449	ret	x24
450#endif
451
452/*
453 * EL0 mode handlers.
454 */
455	.align	6
456el0_sync:
457	kernel_entry 0
458	mrs	x25, esr_el1			// read the syndrome register
459	lsr	x24, x25, #ESR_ELx_EC_SHIFT	// exception class
460	cmp	x24, #ESR_ELx_EC_SVC64		// SVC in 64-bit state
461	b.eq	el0_svc
462	cmp	x24, #ESR_ELx_EC_DABT_LOW	// data abort in EL0
463	b.eq	el0_da
464	cmp	x24, #ESR_ELx_EC_IABT_LOW	// instruction abort in EL0
465	b.eq	el0_ia
466	cmp	x24, #ESR_ELx_EC_FP_ASIMD	// FP/ASIMD access
467	b.eq	el0_fpsimd_acc
468	cmp	x24, #ESR_ELx_EC_FP_EXC64	// FP/ASIMD exception
469	b.eq	el0_fpsimd_exc
470	cmp	x24, #ESR_ELx_EC_SYS64		// configurable trap
471	b.eq	el0_sys
472	cmp	x24, #ESR_ELx_EC_SP_ALIGN	// stack alignment exception
473	b.eq	el0_sp_pc
474	cmp	x24, #ESR_ELx_EC_PC_ALIGN	// pc alignment exception
475	b.eq	el0_sp_pc
476	cmp	x24, #ESR_ELx_EC_UNKNOWN	// unknown exception in EL0
477	b.eq	el0_undef
478	cmp	x24, #ESR_ELx_EC_BREAKPT_LOW	// debug exception in EL0
479	b.ge	el0_dbg
480	b	el0_inv
481
482#ifdef CONFIG_COMPAT
483	.align	6
484el0_sync_compat:
485	kernel_entry 0, 32
486	mrs	x25, esr_el1			// read the syndrome register
487	lsr	x24, x25, #ESR_ELx_EC_SHIFT	// exception class
488	cmp	x24, #ESR_ELx_EC_SVC32		// SVC in 32-bit state
489	b.eq	el0_svc_compat
490	cmp	x24, #ESR_ELx_EC_DABT_LOW	// data abort in EL0
491	b.eq	el0_da
492	cmp	x24, #ESR_ELx_EC_IABT_LOW	// instruction abort in EL0
493	b.eq	el0_ia
494	cmp	x24, #ESR_ELx_EC_FP_ASIMD	// FP/ASIMD access
495	b.eq	el0_fpsimd_acc
496	cmp	x24, #ESR_ELx_EC_FP_EXC32	// FP/ASIMD exception
497	b.eq	el0_fpsimd_exc
498	cmp	x24, #ESR_ELx_EC_PC_ALIGN	// pc alignment exception
499	b.eq	el0_sp_pc
500	cmp	x24, #ESR_ELx_EC_UNKNOWN	// unknown exception in EL0
501	b.eq	el0_undef
502	cmp	x24, #ESR_ELx_EC_CP15_32	// CP15 MRC/MCR trap
503	b.eq	el0_undef
504	cmp	x24, #ESR_ELx_EC_CP15_64	// CP15 MRRC/MCRR trap
505	b.eq	el0_undef
506	cmp	x24, #ESR_ELx_EC_CP14_MR	// CP14 MRC/MCR trap
507	b.eq	el0_undef
508	cmp	x24, #ESR_ELx_EC_CP14_LS	// CP14 LDC/STC trap
509	b.eq	el0_undef
510	cmp	x24, #ESR_ELx_EC_CP14_64	// CP14 MRRC/MCRR trap
511	b.eq	el0_undef
512	cmp	x24, #ESR_ELx_EC_BREAKPT_LOW	// debug exception in EL0
513	b.ge	el0_dbg
514	b	el0_inv
515el0_svc_compat:
516	/*
517	 * AArch32 syscall handling
518	 */
519	adrp	stbl, compat_sys_call_table	// load compat syscall table pointer
520	uxtw	scno, w7			// syscall number in w7 (r7)
521	mov     sc_nr, #__NR_compat_syscalls
522	b	el0_svc_naked
523
524	.align	6
525el0_irq_compat:
526	kernel_entry 0, 32
527	b	el0_irq_naked
528#endif
529
530el0_da:
531	/*
532	 * Data abort handling
533	 */
534	mrs	x26, far_el1
535	// enable interrupts before calling the main handler
536	enable_dbg_and_irq
537	ct_user_exit
538	bic	x0, x26, #(0xff << 56)
539	mov	x1, x25
540	mov	x2, sp
541	bl	do_mem_abort
542	b	ret_to_user
543el0_ia:
544	/*
545	 * Instruction abort handling
546	 */
547	mrs	x26, far_el1
548	// enable interrupts before calling the main handler
549	enable_dbg_and_irq
550	ct_user_exit
551	mov	x0, x26
552	mov	x1, x25
553	mov	x2, sp
554	bl	do_mem_abort
555	b	ret_to_user
556el0_fpsimd_acc:
557	/*
558	 * Floating Point or Advanced SIMD access
559	 */
560	enable_dbg
561	ct_user_exit
562	mov	x0, x25
563	mov	x1, sp
564	bl	do_fpsimd_acc
565	b	ret_to_user
566el0_fpsimd_exc:
567	/*
568	 * Floating Point or Advanced SIMD exception
569	 */
570	enable_dbg
571	ct_user_exit
572	mov	x0, x25
573	mov	x1, sp
574	bl	do_fpsimd_exc
575	b	ret_to_user
576el0_sp_pc:
577	/*
578	 * Stack or PC alignment exception handling
579	 */
580	mrs	x26, far_el1
581	// enable interrupts before calling the main handler
582	enable_dbg_and_irq
583	ct_user_exit
584	mov	x0, x26
585	mov	x1, x25
586	mov	x2, sp
587	bl	do_sp_pc_abort
588	b	ret_to_user
589el0_undef:
590	/*
591	 * Undefined instruction
592	 */
593	// enable interrupts before calling the main handler
594	enable_dbg_and_irq
595	ct_user_exit
596	mov	x0, sp
597	bl	do_undefinstr
598	b	ret_to_user
599el0_sys:
600	/*
601	 * System instructions, for trapped cache maintenance instructions
602	 */
603	enable_dbg_and_irq
604	ct_user_exit
605	mov	x0, x25
606	mov	x1, sp
607	bl	do_sysinstr
608	b	ret_to_user
609el0_dbg:
610	/*
611	 * Debug exception handling
612	 */
613	tbnz	x24, #0, el0_inv		// EL0 only
614	mrs	x0, far_el1
615	mov	x1, x25
616	mov	x2, sp
617	bl	do_debug_exception
618	enable_dbg
619	ct_user_exit
620	b	ret_to_user
621el0_inv:
622	enable_dbg
623	ct_user_exit
624	mov	x0, sp
625	mov	x1, #BAD_SYNC
626	mov	x2, x25
627	bl	bad_mode
628	b	ret_to_user
629ENDPROC(el0_sync)
630
631	.align	6
632el0_irq:
633	kernel_entry 0
634el0_irq_naked:
635	enable_dbg
636#ifdef CONFIG_TRACE_IRQFLAGS
637	bl	trace_hardirqs_off
638#endif
639
640	ct_user_exit
641	irq_handler
642
643#ifdef CONFIG_TRACE_IRQFLAGS
644	bl	trace_hardirqs_on
645#endif
646	b	ret_to_user
647ENDPROC(el0_irq)
648
649/*
650 * Register switch for AArch64. The callee-saved registers need to be saved
651 * and restored. On entry:
652 *   x0 = previous task_struct (must be preserved across the switch)
653 *   x1 = next task_struct
654 * Previous and next are guaranteed not to be the same.
655 *
656 */
657ENTRY(cpu_switch_to)
658	mov	x10, #THREAD_CPU_CONTEXT
659	add	x8, x0, x10
660	mov	x9, sp
661	stp	x19, x20, [x8], #16		// store callee-saved registers
662	stp	x21, x22, [x8], #16
663	stp	x23, x24, [x8], #16
664	stp	x25, x26, [x8], #16
665	stp	x27, x28, [x8], #16
666	stp	x29, x9, [x8], #16
667	str	lr, [x8]
668	add	x8, x1, x10
669	ldp	x19, x20, [x8], #16		// restore callee-saved registers
670	ldp	x21, x22, [x8], #16
671	ldp	x23, x24, [x8], #16
672	ldp	x25, x26, [x8], #16
673	ldp	x27, x28, [x8], #16
674	ldp	x29, x9, [x8], #16
675	ldr	lr, [x8]
676	mov	sp, x9
677	and	x9, x9, #~(THREAD_SIZE - 1)
678	msr	sp_el0, x9
679	ret
680ENDPROC(cpu_switch_to)
681
682/*
683 * This is the fast syscall return path.  We do as little as possible here,
684 * and this includes saving x0 back into the kernel stack.
685 */
686ret_fast_syscall:
687	disable_irq				// disable interrupts
688	str	x0, [sp, #S_X0]			// returned x0
689	ldr	x1, [tsk, #TI_FLAGS]		// re-check for syscall tracing
690	and	x2, x1, #_TIF_SYSCALL_WORK
691	cbnz	x2, ret_fast_syscall_trace
692	and	x2, x1, #_TIF_WORK_MASK
693	cbnz	x2, work_pending
694	enable_step_tsk x1, x2
695	kernel_exit 0
696ret_fast_syscall_trace:
697	enable_irq				// enable interrupts
698	b	__sys_trace_return_skipped	// we already saved x0
699
700/*
701 * Ok, we need to do extra processing, enter the slow path.
702 */
703work_pending:
704	mov	x0, sp				// 'regs'
705	bl	do_notify_resume
706#ifdef CONFIG_TRACE_IRQFLAGS
707	bl	trace_hardirqs_on		// enabled while in userspace
708#endif
709	ldr	x1, [tsk, #TI_FLAGS]		// re-check for single-step
710	b	finish_ret_to_user
711/*
712 * "slow" syscall return path.
713 */
714ret_to_user:
715	disable_irq				// disable interrupts
716	ldr	x1, [tsk, #TI_FLAGS]
717	and	x2, x1, #_TIF_WORK_MASK
718	cbnz	x2, work_pending
719finish_ret_to_user:
720	enable_step_tsk x1, x2
721	kernel_exit 0
722ENDPROC(ret_to_user)
723
724/*
725 * This is how we return from a fork.
726 */
727ENTRY(ret_from_fork)
728	bl	schedule_tail
729	cbz	x19, 1f				// not a kernel thread
730	mov	x0, x20
731	blr	x19
7321:	get_thread_info tsk
733	b	ret_to_user
734ENDPROC(ret_from_fork)
735
736/*
737 * SVC handler.
738 */
739	.align	6
740el0_svc:
741	adrp	stbl, sys_call_table		// load syscall table pointer
742	uxtw	scno, w8			// syscall number in w8
743	mov	sc_nr, #__NR_syscalls
744el0_svc_naked:					// compat entry point
745	stp	x0, scno, [sp, #S_ORIG_X0]	// save the original x0 and syscall number
746	enable_dbg_and_irq
747	ct_user_exit 1
748
749	ldr	x16, [tsk, #TI_FLAGS]		// check for syscall hooks
750	tst	x16, #_TIF_SYSCALL_WORK
751	b.ne	__sys_trace
752	cmp     scno, sc_nr                     // check upper syscall limit
753	b.hs	ni_sys
754	ldr	x16, [stbl, scno, lsl #3]	// address in the syscall table
755	blr	x16				// call sys_* routine
756	b	ret_fast_syscall
757ni_sys:
758	mov	x0, sp
759	bl	do_ni_syscall
760	b	ret_fast_syscall
761ENDPROC(el0_svc)
762
763	/*
764	 * This is the really slow path.  We're going to be doing context
765	 * switches, and waiting for our parent to respond.
766	 */
767__sys_trace:
768	mov	w0, #-1				// set default errno for
769	cmp     scno, x0			// user-issued syscall(-1)
770	b.ne	1f
771	mov	x0, #-ENOSYS
772	str	x0, [sp, #S_X0]
7731:	mov	x0, sp
774	bl	syscall_trace_enter
775	cmp	w0, #-1				// skip the syscall?
776	b.eq	__sys_trace_return_skipped
777	uxtw	scno, w0			// syscall number (possibly new)
778	mov	x1, sp				// pointer to regs
779	cmp	scno, sc_nr			// check upper syscall limit
780	b.hs	__ni_sys_trace
781	ldp	x0, x1, [sp]			// restore the syscall args
782	ldp	x2, x3, [sp, #S_X2]
783	ldp	x4, x5, [sp, #S_X4]
784	ldp	x6, x7, [sp, #S_X6]
785	ldr	x16, [stbl, scno, lsl #3]	// address in the syscall table
786	blr	x16				// call sys_* routine
787
788__sys_trace_return:
789	str	x0, [sp, #S_X0]			// save returned x0
790__sys_trace_return_skipped:
791	mov	x0, sp
792	bl	syscall_trace_exit
793	b	ret_to_user
794
795__ni_sys_trace:
796	mov	x0, sp
797	bl	do_ni_syscall
798	b	__sys_trace_return
799
800	.popsection				// .entry.text
801
802/*
803 * Special system call wrappers.
804 */
805ENTRY(sys_rt_sigreturn_wrapper)
806	mov	x0, sp
807	b	sys_rt_sigreturn
808ENDPROC(sys_rt_sigreturn_wrapper)
809