xref: /openbmc/linux/arch/arm64/kernel/entry.S (revision 8ee90c5c)
1/*
2 * Low-level exception handling code
3 *
4 * Copyright (C) 2012 ARM Ltd.
5 * Authors:	Catalin Marinas <catalin.marinas@arm.com>
6 *		Will Deacon <will.deacon@arm.com>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program.  If not, see <http://www.gnu.org/licenses/>.
19 */
20
21#include <linux/init.h>
22#include <linux/linkage.h>
23
24#include <asm/alternative.h>
25#include <asm/assembler.h>
26#include <asm/asm-offsets.h>
27#include <asm/cpufeature.h>
28#include <asm/errno.h>
29#include <asm/esr.h>
30#include <asm/irq.h>
31#include <asm/memory.h>
32#include <asm/ptrace.h>
33#include <asm/thread_info.h>
34#include <asm/asm-uaccess.h>
35#include <asm/unistd.h>
36
37/*
38 * Context tracking subsystem.  Used to instrument transitions
39 * between user and kernel mode.
40 */
41	.macro ct_user_exit, syscall = 0
42#ifdef CONFIG_CONTEXT_TRACKING
43	bl	context_tracking_user_exit
44	.if \syscall == 1
45	/*
46	 * Save/restore needed during syscalls.  Restore syscall arguments from
47	 * the values already saved on stack during kernel_entry.
48	 */
49	ldp	x0, x1, [sp]
50	ldp	x2, x3, [sp, #S_X2]
51	ldp	x4, x5, [sp, #S_X4]
52	ldp	x6, x7, [sp, #S_X6]
53	.endif
54#endif
55	.endm
56
57	.macro ct_user_enter
58#ifdef CONFIG_CONTEXT_TRACKING
59	bl	context_tracking_user_enter
60#endif
61	.endm
62
63/*
64 * Bad Abort numbers
65 *-----------------
66 */
67#define BAD_SYNC	0
68#define BAD_IRQ		1
69#define BAD_FIQ		2
70#define BAD_ERROR	3
71
72	.macro kernel_ventry	label
73	.align 7
74	sub	sp, sp, #S_FRAME_SIZE
75#ifdef CONFIG_VMAP_STACK
76	/*
77	 * Test whether the SP has overflowed, without corrupting a GPR.
78	 * Task and IRQ stacks are aligned to (1 << THREAD_SHIFT).
79	 */
80	add	sp, sp, x0			// sp' = sp + x0
81	sub	x0, sp, x0			// x0' = sp' - x0 = (sp + x0) - x0 = sp
82	tbnz	x0, #THREAD_SHIFT, 0f
83	sub	x0, sp, x0			// x0'' = sp' - x0' = (sp + x0) - sp = x0
84	sub	sp, sp, x0			// sp'' = sp' - x0 = (sp + x0) - x0 = sp
85	b	\label
86
870:
88	/*
89	 * Either we've just detected an overflow, or we've taken an exception
90	 * while on the overflow stack. Either way, we won't return to
91	 * userspace, and can clobber EL0 registers to free up GPRs.
92	 */
93
94	/* Stash the original SP (minus S_FRAME_SIZE) in tpidr_el0. */
95	msr	tpidr_el0, x0
96
97	/* Recover the original x0 value and stash it in tpidrro_el0 */
98	sub	x0, sp, x0
99	msr	tpidrro_el0, x0
100
101	/* Switch to the overflow stack */
102	adr_this_cpu sp, overflow_stack + OVERFLOW_STACK_SIZE, x0
103
104	/*
105	 * Check whether we were already on the overflow stack. This may happen
106	 * after panic() re-enables interrupts.
107	 */
108	mrs	x0, tpidr_el0			// sp of interrupted context
109	sub	x0, sp, x0			// delta with top of overflow stack
110	tst	x0, #~(OVERFLOW_STACK_SIZE - 1)	// within range?
111	b.ne	__bad_stack			// no? -> bad stack pointer
112
113	/* We were already on the overflow stack. Restore sp/x0 and carry on. */
114	sub	sp, sp, x0
115	mrs	x0, tpidrro_el0
116#endif
117	b	\label
118	.endm
119
120	.macro	kernel_entry, el, regsize = 64
121	.if	\regsize == 32
122	mov	w0, w0				// zero upper 32 bits of x0
123	.endif
124	stp	x0, x1, [sp, #16 * 0]
125	stp	x2, x3, [sp, #16 * 1]
126	stp	x4, x5, [sp, #16 * 2]
127	stp	x6, x7, [sp, #16 * 3]
128	stp	x8, x9, [sp, #16 * 4]
129	stp	x10, x11, [sp, #16 * 5]
130	stp	x12, x13, [sp, #16 * 6]
131	stp	x14, x15, [sp, #16 * 7]
132	stp	x16, x17, [sp, #16 * 8]
133	stp	x18, x19, [sp, #16 * 9]
134	stp	x20, x21, [sp, #16 * 10]
135	stp	x22, x23, [sp, #16 * 11]
136	stp	x24, x25, [sp, #16 * 12]
137	stp	x26, x27, [sp, #16 * 13]
138	stp	x28, x29, [sp, #16 * 14]
139
140	.if	\el == 0
141	mrs	x21, sp_el0
142	ldr_this_cpu	tsk, __entry_task, x20	// Ensure MDSCR_EL1.SS is clear,
143	ldr	x19, [tsk, #TSK_TI_FLAGS]	// since we can unmask debug
144	disable_step_tsk x19, x20		// exceptions when scheduling.
145
146	mov	x29, xzr			// fp pointed to user-space
147	.else
148	add	x21, sp, #S_FRAME_SIZE
149	get_thread_info tsk
150	/* Save the task's original addr_limit and set USER_DS (TASK_SIZE_64) */
151	ldr	x20, [tsk, #TSK_TI_ADDR_LIMIT]
152	str	x20, [sp, #S_ORIG_ADDR_LIMIT]
153	mov	x20, #TASK_SIZE_64
154	str	x20, [tsk, #TSK_TI_ADDR_LIMIT]
155	/* No need to reset PSTATE.UAO, hardware's already set it to 0 for us */
156	.endif /* \el == 0 */
157	mrs	x22, elr_el1
158	mrs	x23, spsr_el1
159	stp	lr, x21, [sp, #S_LR]
160
161	/*
162	 * In order to be able to dump the contents of struct pt_regs at the
163	 * time the exception was taken (in case we attempt to walk the call
164	 * stack later), chain it together with the stack frames.
165	 */
166	.if \el == 0
167	stp	xzr, xzr, [sp, #S_STACKFRAME]
168	.else
169	stp	x29, x22, [sp, #S_STACKFRAME]
170	.endif
171	add	x29, sp, #S_STACKFRAME
172
173#ifdef CONFIG_ARM64_SW_TTBR0_PAN
174	/*
175	 * Set the TTBR0 PAN bit in SPSR. When the exception is taken from
176	 * EL0, there is no need to check the state of TTBR0_EL1 since
177	 * accesses are always enabled.
178	 * Note that the meaning of this bit differs from the ARMv8.1 PAN
179	 * feature as all TTBR0_EL1 accesses are disabled, not just those to
180	 * user mappings.
181	 */
182alternative_if ARM64_HAS_PAN
183	b	1f				// skip TTBR0 PAN
184alternative_else_nop_endif
185
186	.if	\el != 0
187	mrs	x21, ttbr0_el1
188	tst	x21, #0xffff << 48		// Check for the reserved ASID
189	orr	x23, x23, #PSR_PAN_BIT		// Set the emulated PAN in the saved SPSR
190	b.eq	1f				// TTBR0 access already disabled
191	and	x23, x23, #~PSR_PAN_BIT		// Clear the emulated PAN in the saved SPSR
192	.endif
193
194	__uaccess_ttbr0_disable x21
1951:
196#endif
197
198	stp	x22, x23, [sp, #S_PC]
199
200	/* Not in a syscall by default (el0_svc overwrites for real syscall) */
201	.if	\el == 0
202	mov	w21, #NO_SYSCALL
203	str	w21, [sp, #S_SYSCALLNO]
204	.endif
205
206	/*
207	 * Set sp_el0 to current thread_info.
208	 */
209	.if	\el == 0
210	msr	sp_el0, tsk
211	.endif
212
213	/*
214	 * Registers that may be useful after this macro is invoked:
215	 *
216	 * x21 - aborted SP
217	 * x22 - aborted PC
218	 * x23 - aborted PSTATE
219	*/
220	.endm
221
222	.macro	kernel_exit, el
223	.if	\el != 0
224	/* Restore the task's original addr_limit. */
225	ldr	x20, [sp, #S_ORIG_ADDR_LIMIT]
226	str	x20, [tsk, #TSK_TI_ADDR_LIMIT]
227
228	/* No need to restore UAO, it will be restored from SPSR_EL1 */
229	.endif
230
231	ldp	x21, x22, [sp, #S_PC]		// load ELR, SPSR
232	.if	\el == 0
233	ct_user_enter
234	.endif
235
236#ifdef CONFIG_ARM64_SW_TTBR0_PAN
237	/*
238	 * Restore access to TTBR0_EL1. If returning to EL0, no need for SPSR
239	 * PAN bit checking.
240	 */
241alternative_if ARM64_HAS_PAN
242	b	2f				// skip TTBR0 PAN
243alternative_else_nop_endif
244
245	.if	\el != 0
246	tbnz	x22, #22, 1f			// Skip re-enabling TTBR0 access if the PSR_PAN_BIT is set
247	.endif
248
249	__uaccess_ttbr0_enable x0
250
251	.if	\el == 0
252	/*
253	 * Enable errata workarounds only if returning to user. The only
254	 * workaround currently required for TTBR0_EL1 changes are for the
255	 * Cavium erratum 27456 (broadcast TLBI instructions may cause I-cache
256	 * corruption).
257	 */
258	post_ttbr0_update_workaround
259	.endif
2601:
261	.if	\el != 0
262	and	x22, x22, #~PSR_PAN_BIT		// ARMv8.0 CPUs do not understand this bit
263	.endif
2642:
265#endif
266
267	.if	\el == 0
268	ldr	x23, [sp, #S_SP]		// load return stack pointer
269	msr	sp_el0, x23
270#ifdef CONFIG_ARM64_ERRATUM_845719
271alternative_if ARM64_WORKAROUND_845719
272	tbz	x22, #4, 1f
273#ifdef CONFIG_PID_IN_CONTEXTIDR
274	mrs	x29, contextidr_el1
275	msr	contextidr_el1, x29
276#else
277	msr contextidr_el1, xzr
278#endif
2791:
280alternative_else_nop_endif
281#endif
282	.endif
283
284	msr	elr_el1, x21			// set up the return data
285	msr	spsr_el1, x22
286	ldp	x0, x1, [sp, #16 * 0]
287	ldp	x2, x3, [sp, #16 * 1]
288	ldp	x4, x5, [sp, #16 * 2]
289	ldp	x6, x7, [sp, #16 * 3]
290	ldp	x8, x9, [sp, #16 * 4]
291	ldp	x10, x11, [sp, #16 * 5]
292	ldp	x12, x13, [sp, #16 * 6]
293	ldp	x14, x15, [sp, #16 * 7]
294	ldp	x16, x17, [sp, #16 * 8]
295	ldp	x18, x19, [sp, #16 * 9]
296	ldp	x20, x21, [sp, #16 * 10]
297	ldp	x22, x23, [sp, #16 * 11]
298	ldp	x24, x25, [sp, #16 * 12]
299	ldp	x26, x27, [sp, #16 * 13]
300	ldp	x28, x29, [sp, #16 * 14]
301	ldr	lr, [sp, #S_LR]
302	add	sp, sp, #S_FRAME_SIZE		// restore sp
303	eret					// return to kernel
304	.endm
305
306	.macro	irq_stack_entry
307	mov	x19, sp			// preserve the original sp
308
309	/*
310	 * Compare sp with the base of the task stack.
311	 * If the top ~(THREAD_SIZE - 1) bits match, we are on a task stack,
312	 * and should switch to the irq stack.
313	 */
314	ldr	x25, [tsk, TSK_STACK]
315	eor	x25, x25, x19
316	and	x25, x25, #~(THREAD_SIZE - 1)
317	cbnz	x25, 9998f
318
319	ldr_this_cpu x25, irq_stack_ptr, x26
320	mov	x26, #IRQ_STACK_SIZE
321	add	x26, x25, x26
322
323	/* switch to the irq stack */
324	mov	sp, x26
3259998:
326	.endm
327
328	/*
329	 * x19 should be preserved between irq_stack_entry and
330	 * irq_stack_exit.
331	 */
332	.macro	irq_stack_exit
333	mov	sp, x19
334	.endm
335
336/*
337 * These are the registers used in the syscall handler, and allow us to
338 * have in theory up to 7 arguments to a function - x0 to x6.
339 *
340 * x7 is reserved for the system call number in 32-bit mode.
341 */
342wsc_nr	.req	w25		// number of system calls
343wscno	.req	w26		// syscall number
344xscno	.req	x26		// syscall number (zero-extended)
345stbl	.req	x27		// syscall table pointer
346tsk	.req	x28		// current thread_info
347
348/*
349 * Interrupt handling.
350 */
351	.macro	irq_handler
352	ldr_l	x1, handle_arch_irq
353	mov	x0, sp
354	irq_stack_entry
355	blr	x1
356	irq_stack_exit
357	.endm
358
359	.text
360
361/*
362 * Exception vectors.
363 */
364	.pushsection ".entry.text", "ax"
365
366	.align	11
367ENTRY(vectors)
368	kernel_ventry	el1_sync_invalid		// Synchronous EL1t
369	kernel_ventry	el1_irq_invalid			// IRQ EL1t
370	kernel_ventry	el1_fiq_invalid			// FIQ EL1t
371	kernel_ventry	el1_error_invalid		// Error EL1t
372
373	kernel_ventry	el1_sync			// Synchronous EL1h
374	kernel_ventry	el1_irq				// IRQ EL1h
375	kernel_ventry	el1_fiq_invalid			// FIQ EL1h
376	kernel_ventry	el1_error_invalid		// Error EL1h
377
378	kernel_ventry	el0_sync			// Synchronous 64-bit EL0
379	kernel_ventry	el0_irq				// IRQ 64-bit EL0
380	kernel_ventry	el0_fiq_invalid			// FIQ 64-bit EL0
381	kernel_ventry	el0_error_invalid		// Error 64-bit EL0
382
383#ifdef CONFIG_COMPAT
384	kernel_ventry	el0_sync_compat			// Synchronous 32-bit EL0
385	kernel_ventry	el0_irq_compat			// IRQ 32-bit EL0
386	kernel_ventry	el0_fiq_invalid_compat		// FIQ 32-bit EL0
387	kernel_ventry	el0_error_invalid_compat	// Error 32-bit EL0
388#else
389	kernel_ventry	el0_sync_invalid		// Synchronous 32-bit EL0
390	kernel_ventry	el0_irq_invalid			// IRQ 32-bit EL0
391	kernel_ventry	el0_fiq_invalid			// FIQ 32-bit EL0
392	kernel_ventry	el0_error_invalid		// Error 32-bit EL0
393#endif
394END(vectors)
395
396#ifdef CONFIG_VMAP_STACK
397	/*
398	 * We detected an overflow in kernel_ventry, which switched to the
399	 * overflow stack. Stash the exception regs, and head to our overflow
400	 * handler.
401	 */
402__bad_stack:
403	/* Restore the original x0 value */
404	mrs	x0, tpidrro_el0
405
406	/*
407	 * Store the original GPRs to the new stack. The orginal SP (minus
408	 * S_FRAME_SIZE) was stashed in tpidr_el0 by kernel_ventry.
409	 */
410	sub	sp, sp, #S_FRAME_SIZE
411	kernel_entry 1
412	mrs	x0, tpidr_el0
413	add	x0, x0, #S_FRAME_SIZE
414	str	x0, [sp, #S_SP]
415
416	/* Stash the regs for handle_bad_stack */
417	mov	x0, sp
418
419	/* Time to die */
420	bl	handle_bad_stack
421	ASM_BUG()
422#endif /* CONFIG_VMAP_STACK */
423
424/*
425 * Invalid mode handlers
426 */
427	.macro	inv_entry, el, reason, regsize = 64
428	kernel_entry \el, \regsize
429	mov	x0, sp
430	mov	x1, #\reason
431	mrs	x2, esr_el1
432	bl	bad_mode
433	ASM_BUG()
434	.endm
435
436el0_sync_invalid:
437	inv_entry 0, BAD_SYNC
438ENDPROC(el0_sync_invalid)
439
440el0_irq_invalid:
441	inv_entry 0, BAD_IRQ
442ENDPROC(el0_irq_invalid)
443
444el0_fiq_invalid:
445	inv_entry 0, BAD_FIQ
446ENDPROC(el0_fiq_invalid)
447
448el0_error_invalid:
449	inv_entry 0, BAD_ERROR
450ENDPROC(el0_error_invalid)
451
452#ifdef CONFIG_COMPAT
453el0_fiq_invalid_compat:
454	inv_entry 0, BAD_FIQ, 32
455ENDPROC(el0_fiq_invalid_compat)
456
457el0_error_invalid_compat:
458	inv_entry 0, BAD_ERROR, 32
459ENDPROC(el0_error_invalid_compat)
460#endif
461
462el1_sync_invalid:
463	inv_entry 1, BAD_SYNC
464ENDPROC(el1_sync_invalid)
465
466el1_irq_invalid:
467	inv_entry 1, BAD_IRQ
468ENDPROC(el1_irq_invalid)
469
470el1_fiq_invalid:
471	inv_entry 1, BAD_FIQ
472ENDPROC(el1_fiq_invalid)
473
474el1_error_invalid:
475	inv_entry 1, BAD_ERROR
476ENDPROC(el1_error_invalid)
477
478/*
479 * EL1 mode handlers.
480 */
481	.align	6
482el1_sync:
483	kernel_entry 1
484	mrs	x1, esr_el1			// read the syndrome register
485	lsr	x24, x1, #ESR_ELx_EC_SHIFT	// exception class
486	cmp	x24, #ESR_ELx_EC_DABT_CUR	// data abort in EL1
487	b.eq	el1_da
488	cmp	x24, #ESR_ELx_EC_IABT_CUR	// instruction abort in EL1
489	b.eq	el1_ia
490	cmp	x24, #ESR_ELx_EC_SYS64		// configurable trap
491	b.eq	el1_undef
492	cmp	x24, #ESR_ELx_EC_SP_ALIGN	// stack alignment exception
493	b.eq	el1_sp_pc
494	cmp	x24, #ESR_ELx_EC_PC_ALIGN	// pc alignment exception
495	b.eq	el1_sp_pc
496	cmp	x24, #ESR_ELx_EC_UNKNOWN	// unknown exception in EL1
497	b.eq	el1_undef
498	cmp	x24, #ESR_ELx_EC_BREAKPT_CUR	// debug exception in EL1
499	b.ge	el1_dbg
500	b	el1_inv
501
502el1_ia:
503	/*
504	 * Fall through to the Data abort case
505	 */
506el1_da:
507	/*
508	 * Data abort handling
509	 */
510	mrs	x3, far_el1
511	enable_dbg
512	// re-enable interrupts if they were enabled in the aborted context
513	tbnz	x23, #7, 1f			// PSR_I_BIT
514	enable_irq
5151:
516	clear_address_tag x0, x3
517	mov	x2, sp				// struct pt_regs
518	bl	do_mem_abort
519
520	// disable interrupts before pulling preserved data off the stack
521	disable_irq
522	kernel_exit 1
523el1_sp_pc:
524	/*
525	 * Stack or PC alignment exception handling
526	 */
527	mrs	x0, far_el1
528	enable_dbg
529	mov	x2, sp
530	bl	do_sp_pc_abort
531	ASM_BUG()
532el1_undef:
533	/*
534	 * Undefined instruction
535	 */
536	enable_dbg
537	mov	x0, sp
538	bl	do_undefinstr
539	ASM_BUG()
540el1_dbg:
541	/*
542	 * Debug exception handling
543	 */
544	cmp	x24, #ESR_ELx_EC_BRK64		// if BRK64
545	cinc	x24, x24, eq			// set bit '0'
546	tbz	x24, #0, el1_inv		// EL1 only
547	mrs	x0, far_el1
548	mov	x2, sp				// struct pt_regs
549	bl	do_debug_exception
550	kernel_exit 1
551el1_inv:
552	// TODO: add support for undefined instructions in kernel mode
553	enable_dbg
554	mov	x0, sp
555	mov	x2, x1
556	mov	x1, #BAD_SYNC
557	bl	bad_mode
558	ASM_BUG()
559ENDPROC(el1_sync)
560
561	.align	6
562el1_irq:
563	kernel_entry 1
564	enable_dbg
565#ifdef CONFIG_TRACE_IRQFLAGS
566	bl	trace_hardirqs_off
567#endif
568
569	irq_handler
570
571#ifdef CONFIG_PREEMPT
572	ldr	w24, [tsk, #TSK_TI_PREEMPT]	// get preempt count
573	cbnz	w24, 1f				// preempt count != 0
574	ldr	x0, [tsk, #TSK_TI_FLAGS]	// get flags
575	tbz	x0, #TIF_NEED_RESCHED, 1f	// needs rescheduling?
576	bl	el1_preempt
5771:
578#endif
579#ifdef CONFIG_TRACE_IRQFLAGS
580	bl	trace_hardirqs_on
581#endif
582	kernel_exit 1
583ENDPROC(el1_irq)
584
585#ifdef CONFIG_PREEMPT
586el1_preempt:
587	mov	x24, lr
5881:	bl	preempt_schedule_irq		// irq en/disable is done inside
589	ldr	x0, [tsk, #TSK_TI_FLAGS]	// get new tasks TI_FLAGS
590	tbnz	x0, #TIF_NEED_RESCHED, 1b	// needs rescheduling?
591	ret	x24
592#endif
593
594/*
595 * EL0 mode handlers.
596 */
597	.align	6
598el0_sync:
599	kernel_entry 0
600	mrs	x25, esr_el1			// read the syndrome register
601	lsr	x24, x25, #ESR_ELx_EC_SHIFT	// exception class
602	cmp	x24, #ESR_ELx_EC_SVC64		// SVC in 64-bit state
603	b.eq	el0_svc
604	cmp	x24, #ESR_ELx_EC_DABT_LOW	// data abort in EL0
605	b.eq	el0_da
606	cmp	x24, #ESR_ELx_EC_IABT_LOW	// instruction abort in EL0
607	b.eq	el0_ia
608	cmp	x24, #ESR_ELx_EC_FP_ASIMD	// FP/ASIMD access
609	b.eq	el0_fpsimd_acc
610	cmp	x24, #ESR_ELx_EC_FP_EXC64	// FP/ASIMD exception
611	b.eq	el0_fpsimd_exc
612	cmp	x24, #ESR_ELx_EC_SYS64		// configurable trap
613	b.eq	el0_sys
614	cmp	x24, #ESR_ELx_EC_SP_ALIGN	// stack alignment exception
615	b.eq	el0_sp_pc
616	cmp	x24, #ESR_ELx_EC_PC_ALIGN	// pc alignment exception
617	b.eq	el0_sp_pc
618	cmp	x24, #ESR_ELx_EC_UNKNOWN	// unknown exception in EL0
619	b.eq	el0_undef
620	cmp	x24, #ESR_ELx_EC_BREAKPT_LOW	// debug exception in EL0
621	b.ge	el0_dbg
622	b	el0_inv
623
624#ifdef CONFIG_COMPAT
625	.align	6
626el0_sync_compat:
627	kernel_entry 0, 32
628	mrs	x25, esr_el1			// read the syndrome register
629	lsr	x24, x25, #ESR_ELx_EC_SHIFT	// exception class
630	cmp	x24, #ESR_ELx_EC_SVC32		// SVC in 32-bit state
631	b.eq	el0_svc_compat
632	cmp	x24, #ESR_ELx_EC_DABT_LOW	// data abort in EL0
633	b.eq	el0_da
634	cmp	x24, #ESR_ELx_EC_IABT_LOW	// instruction abort in EL0
635	b.eq	el0_ia
636	cmp	x24, #ESR_ELx_EC_FP_ASIMD	// FP/ASIMD access
637	b.eq	el0_fpsimd_acc
638	cmp	x24, #ESR_ELx_EC_FP_EXC32	// FP/ASIMD exception
639	b.eq	el0_fpsimd_exc
640	cmp	x24, #ESR_ELx_EC_PC_ALIGN	// pc alignment exception
641	b.eq	el0_sp_pc
642	cmp	x24, #ESR_ELx_EC_UNKNOWN	// unknown exception in EL0
643	b.eq	el0_undef
644	cmp	x24, #ESR_ELx_EC_CP15_32	// CP15 MRC/MCR trap
645	b.eq	el0_undef
646	cmp	x24, #ESR_ELx_EC_CP15_64	// CP15 MRRC/MCRR trap
647	b.eq	el0_undef
648	cmp	x24, #ESR_ELx_EC_CP14_MR	// CP14 MRC/MCR trap
649	b.eq	el0_undef
650	cmp	x24, #ESR_ELx_EC_CP14_LS	// CP14 LDC/STC trap
651	b.eq	el0_undef
652	cmp	x24, #ESR_ELx_EC_CP14_64	// CP14 MRRC/MCRR trap
653	b.eq	el0_undef
654	cmp	x24, #ESR_ELx_EC_BREAKPT_LOW	// debug exception in EL0
655	b.ge	el0_dbg
656	b	el0_inv
657el0_svc_compat:
658	/*
659	 * AArch32 syscall handling
660	 */
661	adrp	stbl, compat_sys_call_table	// load compat syscall table pointer
662	mov	wscno, w7			// syscall number in w7 (r7)
663	mov     wsc_nr, #__NR_compat_syscalls
664	b	el0_svc_naked
665
666	.align	6
667el0_irq_compat:
668	kernel_entry 0, 32
669	b	el0_irq_naked
670#endif
671
672el0_da:
673	/*
674	 * Data abort handling
675	 */
676	mrs	x26, far_el1
677	// enable interrupts before calling the main handler
678	enable_dbg_and_irq
679	ct_user_exit
680	clear_address_tag x0, x26
681	mov	x1, x25
682	mov	x2, sp
683	bl	do_mem_abort
684	b	ret_to_user
685el0_ia:
686	/*
687	 * Instruction abort handling
688	 */
689	mrs	x26, far_el1
690	// enable interrupts before calling the main handler
691	enable_dbg_and_irq
692	ct_user_exit
693	mov	x0, x26
694	mov	x1, x25
695	mov	x2, sp
696	bl	do_mem_abort
697	b	ret_to_user
698el0_fpsimd_acc:
699	/*
700	 * Floating Point or Advanced SIMD access
701	 */
702	enable_dbg
703	ct_user_exit
704	mov	x0, x25
705	mov	x1, sp
706	bl	do_fpsimd_acc
707	b	ret_to_user
708el0_fpsimd_exc:
709	/*
710	 * Floating Point or Advanced SIMD exception
711	 */
712	enable_dbg
713	ct_user_exit
714	mov	x0, x25
715	mov	x1, sp
716	bl	do_fpsimd_exc
717	b	ret_to_user
718el0_sp_pc:
719	/*
720	 * Stack or PC alignment exception handling
721	 */
722	mrs	x26, far_el1
723	// enable interrupts before calling the main handler
724	enable_dbg_and_irq
725	ct_user_exit
726	mov	x0, x26
727	mov	x1, x25
728	mov	x2, sp
729	bl	do_sp_pc_abort
730	b	ret_to_user
731el0_undef:
732	/*
733	 * Undefined instruction
734	 */
735	// enable interrupts before calling the main handler
736	enable_dbg_and_irq
737	ct_user_exit
738	mov	x0, sp
739	bl	do_undefinstr
740	b	ret_to_user
741el0_sys:
742	/*
743	 * System instructions, for trapped cache maintenance instructions
744	 */
745	enable_dbg_and_irq
746	ct_user_exit
747	mov	x0, x25
748	mov	x1, sp
749	bl	do_sysinstr
750	b	ret_to_user
751el0_dbg:
752	/*
753	 * Debug exception handling
754	 */
755	tbnz	x24, #0, el0_inv		// EL0 only
756	mrs	x0, far_el1
757	mov	x1, x25
758	mov	x2, sp
759	bl	do_debug_exception
760	enable_dbg
761	ct_user_exit
762	b	ret_to_user
763el0_inv:
764	enable_dbg
765	ct_user_exit
766	mov	x0, sp
767	mov	x1, #BAD_SYNC
768	mov	x2, x25
769	bl	bad_el0_sync
770	b	ret_to_user
771ENDPROC(el0_sync)
772
773	.align	6
774el0_irq:
775	kernel_entry 0
776el0_irq_naked:
777	enable_dbg
778#ifdef CONFIG_TRACE_IRQFLAGS
779	bl	trace_hardirqs_off
780#endif
781
782	ct_user_exit
783	irq_handler
784
785#ifdef CONFIG_TRACE_IRQFLAGS
786	bl	trace_hardirqs_on
787#endif
788	b	ret_to_user
789ENDPROC(el0_irq)
790
791/*
792 * This is the fast syscall return path.  We do as little as possible here,
793 * and this includes saving x0 back into the kernel stack.
794 */
795ret_fast_syscall:
796	disable_irq				// disable interrupts
797	str	x0, [sp, #S_X0]			// returned x0
798	ldr	x1, [tsk, #TSK_TI_FLAGS]	// re-check for syscall tracing
799	and	x2, x1, #_TIF_SYSCALL_WORK
800	cbnz	x2, ret_fast_syscall_trace
801	and	x2, x1, #_TIF_WORK_MASK
802	cbnz	x2, work_pending
803	enable_step_tsk x1, x2
804	kernel_exit 0
805ret_fast_syscall_trace:
806	enable_irq				// enable interrupts
807	b	__sys_trace_return_skipped	// we already saved x0
808
809/*
810 * Ok, we need to do extra processing, enter the slow path.
811 */
812work_pending:
813	mov	x0, sp				// 'regs'
814	bl	do_notify_resume
815#ifdef CONFIG_TRACE_IRQFLAGS
816	bl	trace_hardirqs_on		// enabled while in userspace
817#endif
818	ldr	x1, [tsk, #TSK_TI_FLAGS]	// re-check for single-step
819	b	finish_ret_to_user
820/*
821 * "slow" syscall return path.
822 */
823ret_to_user:
824	disable_irq				// disable interrupts
825	ldr	x1, [tsk, #TSK_TI_FLAGS]
826	and	x2, x1, #_TIF_WORK_MASK
827	cbnz	x2, work_pending
828finish_ret_to_user:
829	enable_step_tsk x1, x2
830	kernel_exit 0
831ENDPROC(ret_to_user)
832
833/*
834 * SVC handler.
835 */
836	.align	6
837el0_svc:
838	adrp	stbl, sys_call_table		// load syscall table pointer
839	mov	wscno, w8			// syscall number in w8
840	mov	wsc_nr, #__NR_syscalls
841el0_svc_naked:					// compat entry point
842	stp	x0, xscno, [sp, #S_ORIG_X0]	// save the original x0 and syscall number
843	enable_dbg_and_irq
844	ct_user_exit 1
845
846	ldr	x16, [tsk, #TSK_TI_FLAGS]	// check for syscall hooks
847	tst	x16, #_TIF_SYSCALL_WORK
848	b.ne	__sys_trace
849	cmp     wscno, wsc_nr			// check upper syscall limit
850	b.hs	ni_sys
851	ldr	x16, [stbl, xscno, lsl #3]	// address in the syscall table
852	blr	x16				// call sys_* routine
853	b	ret_fast_syscall
854ni_sys:
855	mov	x0, sp
856	bl	do_ni_syscall
857	b	ret_fast_syscall
858ENDPROC(el0_svc)
859
860	/*
861	 * This is the really slow path.  We're going to be doing context
862	 * switches, and waiting for our parent to respond.
863	 */
864__sys_trace:
865	cmp     wscno, #NO_SYSCALL		// user-issued syscall(-1)?
866	b.ne	1f
867	mov	x0, #-ENOSYS			// set default errno if so
868	str	x0, [sp, #S_X0]
8691:	mov	x0, sp
870	bl	syscall_trace_enter
871	cmp	w0, #NO_SYSCALL			// skip the syscall?
872	b.eq	__sys_trace_return_skipped
873	mov	wscno, w0			// syscall number (possibly new)
874	mov	x1, sp				// pointer to regs
875	cmp	wscno, wsc_nr			// check upper syscall limit
876	b.hs	__ni_sys_trace
877	ldp	x0, x1, [sp]			// restore the syscall args
878	ldp	x2, x3, [sp, #S_X2]
879	ldp	x4, x5, [sp, #S_X4]
880	ldp	x6, x7, [sp, #S_X6]
881	ldr	x16, [stbl, xscno, lsl #3]	// address in the syscall table
882	blr	x16				// call sys_* routine
883
884__sys_trace_return:
885	str	x0, [sp, #S_X0]			// save returned x0
886__sys_trace_return_skipped:
887	mov	x0, sp
888	bl	syscall_trace_exit
889	b	ret_to_user
890
891__ni_sys_trace:
892	mov	x0, sp
893	bl	do_ni_syscall
894	b	__sys_trace_return
895
896	.popsection				// .entry.text
897
898/*
899 * Special system call wrappers.
900 */
901ENTRY(sys_rt_sigreturn_wrapper)
902	mov	x0, sp
903	b	sys_rt_sigreturn
904ENDPROC(sys_rt_sigreturn_wrapper)
905
906/*
907 * Register switch for AArch64. The callee-saved registers need to be saved
908 * and restored. On entry:
909 *   x0 = previous task_struct (must be preserved across the switch)
910 *   x1 = next task_struct
911 * Previous and next are guaranteed not to be the same.
912 *
913 */
914ENTRY(cpu_switch_to)
915	mov	x10, #THREAD_CPU_CONTEXT
916	add	x8, x0, x10
917	mov	x9, sp
918	stp	x19, x20, [x8], #16		// store callee-saved registers
919	stp	x21, x22, [x8], #16
920	stp	x23, x24, [x8], #16
921	stp	x25, x26, [x8], #16
922	stp	x27, x28, [x8], #16
923	stp	x29, x9, [x8], #16
924	str	lr, [x8]
925	add	x8, x1, x10
926	ldp	x19, x20, [x8], #16		// restore callee-saved registers
927	ldp	x21, x22, [x8], #16
928	ldp	x23, x24, [x8], #16
929	ldp	x25, x26, [x8], #16
930	ldp	x27, x28, [x8], #16
931	ldp	x29, x9, [x8], #16
932	ldr	lr, [x8]
933	mov	sp, x9
934	msr	sp_el0, x1
935	ret
936ENDPROC(cpu_switch_to)
937NOKPROBE(cpu_switch_to)
938
939/*
940 * This is how we return from a fork.
941 */
942ENTRY(ret_from_fork)
943	bl	schedule_tail
944	cbz	x19, 1f				// not a kernel thread
945	mov	x0, x20
946	blr	x19
9471:	get_thread_info tsk
948	b	ret_to_user
949ENDPROC(ret_from_fork)
950NOKPROBE(ret_from_fork)
951