xref: /openbmc/linux/arch/arm64/kernel/entry.S (revision 21278aea)
1/*
2 * Low-level exception handling code
3 *
4 * Copyright (C) 2012 ARM Ltd.
5 * Authors:	Catalin Marinas <catalin.marinas@arm.com>
6 *		Will Deacon <will.deacon@arm.com>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program.  If not, see <http://www.gnu.org/licenses/>.
19 */
20
21#include <linux/init.h>
22#include <linux/linkage.h>
23
24#include <asm/assembler.h>
25#include <asm/asm-offsets.h>
26#include <asm/errno.h>
27#include <asm/esr.h>
28#include <asm/thread_info.h>
29#include <asm/unistd.h>
30#include <asm/unistd32.h>
31
32/*
33 * Bad Abort numbers
34 *-----------------
35 */
36#define BAD_SYNC	0
37#define BAD_IRQ		1
38#define BAD_FIQ		2
39#define BAD_ERROR	3
40
41	.macro	kernel_entry, el, regsize = 64
42	sub	sp, sp, #S_FRAME_SIZE - S_LR	// room for LR, SP, SPSR, ELR
43	.if	\regsize == 32
44	mov	w0, w0				// zero upper 32 bits of x0
45	.endif
46	push	x28, x29
47	push	x26, x27
48	push	x24, x25
49	push	x22, x23
50	push	x20, x21
51	push	x18, x19
52	push	x16, x17
53	push	x14, x15
54	push	x12, x13
55	push	x10, x11
56	push	x8, x9
57	push	x6, x7
58	push	x4, x5
59	push	x2, x3
60	push	x0, x1
61	.if	\el == 0
62	mrs	x21, sp_el0
63	get_thread_info tsk			// Ensure MDSCR_EL1.SS is clear,
64	ldr	x19, [tsk, #TI_FLAGS]		// since we can unmask debug
65	disable_step_tsk x19, x20		// exceptions when scheduling.
66	.else
67	add	x21, sp, #S_FRAME_SIZE
68	.endif
69	mrs	x22, elr_el1
70	mrs	x23, spsr_el1
71	stp	lr, x21, [sp, #S_LR]
72	stp	x22, x23, [sp, #S_PC]
73
74	/*
75	 * Set syscallno to -1 by default (overridden later if real syscall).
76	 */
77	.if	\el == 0
78	mvn	x21, xzr
79	str	x21, [sp, #S_SYSCALLNO]
80	.endif
81
82	/*
83	 * Registers that may be useful after this macro is invoked:
84	 *
85	 * x21 - aborted SP
86	 * x22 - aborted PC
87	 * x23 - aborted PSTATE
88	*/
89	.endm
90
91	.macro	kernel_exit, el, ret = 0
92	ldp	x21, x22, [sp, #S_PC]		// load ELR, SPSR
93	.if	\el == 0
94	ldr	x23, [sp, #S_SP]		// load return stack pointer
95	.endif
96	.if	\ret
97	ldr	x1, [sp, #S_X1]			// preserve x0 (syscall return)
98	add	sp, sp, S_X2
99	.else
100	pop	x0, x1
101	.endif
102	pop	x2, x3				// load the rest of the registers
103	pop	x4, x5
104	pop	x6, x7
105	pop	x8, x9
106	msr	elr_el1, x21			// set up the return data
107	msr	spsr_el1, x22
108	.if	\el == 0
109	msr	sp_el0, x23
110	.endif
111	pop	x10, x11
112	pop	x12, x13
113	pop	x14, x15
114	pop	x16, x17
115	pop	x18, x19
116	pop	x20, x21
117	pop	x22, x23
118	pop	x24, x25
119	pop	x26, x27
120	pop	x28, x29
121	ldr	lr, [sp], #S_FRAME_SIZE - S_LR	// load LR and restore SP
122	eret					// return to kernel
123	.endm
124
125	.macro	get_thread_info, rd
126	mov	\rd, sp
127	and	\rd, \rd, #~(THREAD_SIZE - 1)	// top of stack
128	.endm
129
130/*
131 * These are the registers used in the syscall handler, and allow us to
132 * have in theory up to 7 arguments to a function - x0 to x6.
133 *
134 * x7 is reserved for the system call number in 32-bit mode.
135 */
136sc_nr	.req	x25		// number of system calls
137scno	.req	x26		// syscall number
138stbl	.req	x27		// syscall table pointer
139tsk	.req	x28		// current thread_info
140
141/*
142 * Interrupt handling.
143 */
144	.macro	irq_handler
145	ldr	x1, handle_arch_irq
146	mov	x0, sp
147	blr	x1
148	.endm
149
150	.text
151
152/*
153 * Exception vectors.
154 */
155
156	.align	11
157ENTRY(vectors)
158	ventry	el1_sync_invalid		// Synchronous EL1t
159	ventry	el1_irq_invalid			// IRQ EL1t
160	ventry	el1_fiq_invalid			// FIQ EL1t
161	ventry	el1_error_invalid		// Error EL1t
162
163	ventry	el1_sync			// Synchronous EL1h
164	ventry	el1_irq				// IRQ EL1h
165	ventry	el1_fiq_invalid			// FIQ EL1h
166	ventry	el1_error_invalid		// Error EL1h
167
168	ventry	el0_sync			// Synchronous 64-bit EL0
169	ventry	el0_irq				// IRQ 64-bit EL0
170	ventry	el0_fiq_invalid			// FIQ 64-bit EL0
171	ventry	el0_error_invalid		// Error 64-bit EL0
172
173#ifdef CONFIG_COMPAT
174	ventry	el0_sync_compat			// Synchronous 32-bit EL0
175	ventry	el0_irq_compat			// IRQ 32-bit EL0
176	ventry	el0_fiq_invalid_compat		// FIQ 32-bit EL0
177	ventry	el0_error_invalid_compat	// Error 32-bit EL0
178#else
179	ventry	el0_sync_invalid		// Synchronous 32-bit EL0
180	ventry	el0_irq_invalid			// IRQ 32-bit EL0
181	ventry	el0_fiq_invalid			// FIQ 32-bit EL0
182	ventry	el0_error_invalid		// Error 32-bit EL0
183#endif
184END(vectors)
185
186/*
187 * Invalid mode handlers
188 */
189	.macro	inv_entry, el, reason, regsize = 64
190	kernel_entry el, \regsize
191	mov	x0, sp
192	mov	x1, #\reason
193	mrs	x2, esr_el1
194	b	bad_mode
195	.endm
196
197el0_sync_invalid:
198	inv_entry 0, BAD_SYNC
199ENDPROC(el0_sync_invalid)
200
201el0_irq_invalid:
202	inv_entry 0, BAD_IRQ
203ENDPROC(el0_irq_invalid)
204
205el0_fiq_invalid:
206	inv_entry 0, BAD_FIQ
207ENDPROC(el0_fiq_invalid)
208
209el0_error_invalid:
210	inv_entry 0, BAD_ERROR
211ENDPROC(el0_error_invalid)
212
213#ifdef CONFIG_COMPAT
214el0_fiq_invalid_compat:
215	inv_entry 0, BAD_FIQ, 32
216ENDPROC(el0_fiq_invalid_compat)
217
218el0_error_invalid_compat:
219	inv_entry 0, BAD_ERROR, 32
220ENDPROC(el0_error_invalid_compat)
221#endif
222
223el1_sync_invalid:
224	inv_entry 1, BAD_SYNC
225ENDPROC(el1_sync_invalid)
226
227el1_irq_invalid:
228	inv_entry 1, BAD_IRQ
229ENDPROC(el1_irq_invalid)
230
231el1_fiq_invalid:
232	inv_entry 1, BAD_FIQ
233ENDPROC(el1_fiq_invalid)
234
235el1_error_invalid:
236	inv_entry 1, BAD_ERROR
237ENDPROC(el1_error_invalid)
238
239/*
240 * EL1 mode handlers.
241 */
242	.align	6
243el1_sync:
244	kernel_entry 1
245	mrs	x1, esr_el1			// read the syndrome register
246	lsr	x24, x1, #ESR_EL1_EC_SHIFT	// exception class
247	cmp	x24, #ESR_EL1_EC_DABT_EL1	// data abort in EL1
248	b.eq	el1_da
249	cmp	x24, #ESR_EL1_EC_SYS64		// configurable trap
250	b.eq	el1_undef
251	cmp	x24, #ESR_EL1_EC_SP_ALIGN	// stack alignment exception
252	b.eq	el1_sp_pc
253	cmp	x24, #ESR_EL1_EC_PC_ALIGN	// pc alignment exception
254	b.eq	el1_sp_pc
255	cmp	x24, #ESR_EL1_EC_UNKNOWN	// unknown exception in EL1
256	b.eq	el1_undef
257	cmp	x24, #ESR_EL1_EC_BREAKPT_EL1	// debug exception in EL1
258	b.ge	el1_dbg
259	b	el1_inv
260el1_da:
261	/*
262	 * Data abort handling
263	 */
264	mrs	x0, far_el1
265	enable_dbg
266	// re-enable interrupts if they were enabled in the aborted context
267	tbnz	x23, #7, 1f			// PSR_I_BIT
268	enable_irq
2691:
270	mov	x2, sp				// struct pt_regs
271	bl	do_mem_abort
272
273	// disable interrupts before pulling preserved data off the stack
274	disable_irq
275	kernel_exit 1
276el1_sp_pc:
277	/*
278	 * Stack or PC alignment exception handling
279	 */
280	mrs	x0, far_el1
281	enable_dbg
282	mov	x1, x25
283	mov	x2, sp
284	b	do_sp_pc_abort
285el1_undef:
286	/*
287	 * Undefined instruction
288	 */
289	enable_dbg
290	mov	x0, sp
291	b	do_undefinstr
292el1_dbg:
293	/*
294	 * Debug exception handling
295	 */
296	cmp	x24, #ESR_EL1_EC_BRK64		// if BRK64
297	cinc	x24, x24, eq			// set bit '0'
298	tbz	x24, #0, el1_inv		// EL1 only
299	mrs	x0, far_el1
300	mov	x2, sp				// struct pt_regs
301	bl	do_debug_exception
302	enable_dbg
303	kernel_exit 1
304el1_inv:
305	// TODO: add support for undefined instructions in kernel mode
306	enable_dbg
307	mov	x0, sp
308	mov	x1, #BAD_SYNC
309	mrs	x2, esr_el1
310	b	bad_mode
311ENDPROC(el1_sync)
312
313	.align	6
314el1_irq:
315	kernel_entry 1
316	enable_dbg
317#ifdef CONFIG_TRACE_IRQFLAGS
318	bl	trace_hardirqs_off
319#endif
320
321	irq_handler
322
323#ifdef CONFIG_PREEMPT
324	get_thread_info tsk
325	ldr	w24, [tsk, #TI_PREEMPT]		// get preempt count
326	cbnz	w24, 1f				// preempt count != 0
327	ldr	x0, [tsk, #TI_FLAGS]		// get flags
328	tbz	x0, #TIF_NEED_RESCHED, 1f	// needs rescheduling?
329	bl	el1_preempt
3301:
331#endif
332#ifdef CONFIG_TRACE_IRQFLAGS
333	bl	trace_hardirqs_on
334#endif
335	kernel_exit 1
336ENDPROC(el1_irq)
337
338#ifdef CONFIG_PREEMPT
339el1_preempt:
340	mov	x24, lr
3411:	bl	preempt_schedule_irq		// irq en/disable is done inside
342	ldr	x0, [tsk, #TI_FLAGS]		// get new tasks TI_FLAGS
343	tbnz	x0, #TIF_NEED_RESCHED, 1b	// needs rescheduling?
344	ret	x24
345#endif
346
347/*
348 * EL0 mode handlers.
349 */
350	.align	6
351el0_sync:
352	kernel_entry 0
353	mrs	x25, esr_el1			// read the syndrome register
354	lsr	x24, x25, #ESR_EL1_EC_SHIFT	// exception class
355	cmp	x24, #ESR_EL1_EC_SVC64		// SVC in 64-bit state
356	b.eq	el0_svc
357	adr	lr, ret_to_user
358	cmp	x24, #ESR_EL1_EC_DABT_EL0	// data abort in EL0
359	b.eq	el0_da
360	cmp	x24, #ESR_EL1_EC_IABT_EL0	// instruction abort in EL0
361	b.eq	el0_ia
362	cmp	x24, #ESR_EL1_EC_FP_ASIMD	// FP/ASIMD access
363	b.eq	el0_fpsimd_acc
364	cmp	x24, #ESR_EL1_EC_FP_EXC64	// FP/ASIMD exception
365	b.eq	el0_fpsimd_exc
366	cmp	x24, #ESR_EL1_EC_SYS64		// configurable trap
367	b.eq	el0_undef
368	cmp	x24, #ESR_EL1_EC_SP_ALIGN	// stack alignment exception
369	b.eq	el0_sp_pc
370	cmp	x24, #ESR_EL1_EC_PC_ALIGN	// pc alignment exception
371	b.eq	el0_sp_pc
372	cmp	x24, #ESR_EL1_EC_UNKNOWN	// unknown exception in EL0
373	b.eq	el0_undef
374	cmp	x24, #ESR_EL1_EC_BREAKPT_EL0	// debug exception in EL0
375	b.ge	el0_dbg
376	b	el0_inv
377
378#ifdef CONFIG_COMPAT
379	.align	6
380el0_sync_compat:
381	kernel_entry 0, 32
382	mrs	x25, esr_el1			// read the syndrome register
383	lsr	x24, x25, #ESR_EL1_EC_SHIFT	// exception class
384	cmp	x24, #ESR_EL1_EC_SVC32		// SVC in 32-bit state
385	b.eq	el0_svc_compat
386	adr	lr, ret_to_user
387	cmp	x24, #ESR_EL1_EC_DABT_EL0	// data abort in EL0
388	b.eq	el0_da
389	cmp	x24, #ESR_EL1_EC_IABT_EL0	// instruction abort in EL0
390	b.eq	el0_ia
391	cmp	x24, #ESR_EL1_EC_FP_ASIMD	// FP/ASIMD access
392	b.eq	el0_fpsimd_acc
393	cmp	x24, #ESR_EL1_EC_FP_EXC32	// FP/ASIMD exception
394	b.eq	el0_fpsimd_exc
395	cmp	x24, #ESR_EL1_EC_UNKNOWN	// unknown exception in EL0
396	b.eq	el0_undef
397	cmp	x24, #ESR_EL1_EC_CP15_32	// CP15 MRC/MCR trap
398	b.eq	el0_undef
399	cmp	x24, #ESR_EL1_EC_CP15_64	// CP15 MRRC/MCRR trap
400	b.eq	el0_undef
401	cmp	x24, #ESR_EL1_EC_CP14_MR	// CP14 MRC/MCR trap
402	b.eq	el0_undef
403	cmp	x24, #ESR_EL1_EC_CP14_LS	// CP14 LDC/STC trap
404	b.eq	el0_undef
405	cmp	x24, #ESR_EL1_EC_CP14_64	// CP14 MRRC/MCRR trap
406	b.eq	el0_undef
407	cmp	x24, #ESR_EL1_EC_BREAKPT_EL0	// debug exception in EL0
408	b.ge	el0_dbg
409	b	el0_inv
410el0_svc_compat:
411	/*
412	 * AArch32 syscall handling
413	 */
414	adr	stbl, compat_sys_call_table	// load compat syscall table pointer
415	uxtw	scno, w7			// syscall number in w7 (r7)
416	mov     sc_nr, #__NR_compat_syscalls
417	b	el0_svc_naked
418
419	.align	6
420el0_irq_compat:
421	kernel_entry 0, 32
422	b	el0_irq_naked
423#endif
424
425el0_da:
426	/*
427	 * Data abort handling
428	 */
429	mrs	x0, far_el1
430	bic	x0, x0, #(0xff << 56)
431	// enable interrupts before calling the main handler
432	enable_dbg_and_irq
433	mov	x1, x25
434	mov	x2, sp
435	b	do_mem_abort
436el0_ia:
437	/*
438	 * Instruction abort handling
439	 */
440	mrs	x0, far_el1
441	// enable interrupts before calling the main handler
442	enable_dbg_and_irq
443	orr	x1, x25, #1 << 24		// use reserved ISS bit for instruction aborts
444	mov	x2, sp
445	b	do_mem_abort
446el0_fpsimd_acc:
447	/*
448	 * Floating Point or Advanced SIMD access
449	 */
450	enable_dbg
451	mov	x0, x25
452	mov	x1, sp
453	b	do_fpsimd_acc
454el0_fpsimd_exc:
455	/*
456	 * Floating Point or Advanced SIMD exception
457	 */
458	enable_dbg
459	mov	x0, x25
460	mov	x1, sp
461	b	do_fpsimd_exc
462el0_sp_pc:
463	/*
464	 * Stack or PC alignment exception handling
465	 */
466	mrs	x0, far_el1
467	// enable interrupts before calling the main handler
468	enable_dbg_and_irq
469	mov	x1, x25
470	mov	x2, sp
471	b	do_sp_pc_abort
472el0_undef:
473	/*
474	 * Undefined instruction
475	 */
476	// enable interrupts before calling the main handler
477	enable_dbg_and_irq
478	mov	x0, sp
479	b	do_undefinstr
480el0_dbg:
481	/*
482	 * Debug exception handling
483	 */
484	tbnz	x24, #0, el0_inv		// EL0 only
485	mrs	x0, far_el1
486	mov	x1, x25
487	mov	x2, sp
488	bl	do_debug_exception
489	enable_dbg
490	b	ret_to_user
491el0_inv:
492	enable_dbg
493	mov	x0, sp
494	mov	x1, #BAD_SYNC
495	mrs	x2, esr_el1
496	b	bad_mode
497ENDPROC(el0_sync)
498
499	.align	6
500el0_irq:
501	kernel_entry 0
502el0_irq_naked:
503	enable_dbg
504#ifdef CONFIG_TRACE_IRQFLAGS
505	bl	trace_hardirqs_off
506#endif
507
508	irq_handler
509
510#ifdef CONFIG_TRACE_IRQFLAGS
511	bl	trace_hardirqs_on
512#endif
513	b	ret_to_user
514ENDPROC(el0_irq)
515
516/*
517 * Register switch for AArch64. The callee-saved registers need to be saved
518 * and restored. On entry:
519 *   x0 = previous task_struct (must be preserved across the switch)
520 *   x1 = next task_struct
521 * Previous and next are guaranteed not to be the same.
522 *
523 */
524ENTRY(cpu_switch_to)
525	add	x8, x0, #THREAD_CPU_CONTEXT
526	mov	x9, sp
527	stp	x19, x20, [x8], #16		// store callee-saved registers
528	stp	x21, x22, [x8], #16
529	stp	x23, x24, [x8], #16
530	stp	x25, x26, [x8], #16
531	stp	x27, x28, [x8], #16
532	stp	x29, x9, [x8], #16
533	str	lr, [x8]
534	add	x8, x1, #THREAD_CPU_CONTEXT
535	ldp	x19, x20, [x8], #16		// restore callee-saved registers
536	ldp	x21, x22, [x8], #16
537	ldp	x23, x24, [x8], #16
538	ldp	x25, x26, [x8], #16
539	ldp	x27, x28, [x8], #16
540	ldp	x29, x9, [x8], #16
541	ldr	lr, [x8]
542	mov	sp, x9
543	ret
544ENDPROC(cpu_switch_to)
545
546/*
547 * This is the fast syscall return path.  We do as little as possible here,
548 * and this includes saving x0 back into the kernel stack.
549 */
550ret_fast_syscall:
551	disable_irq				// disable interrupts
552	ldr	x1, [tsk, #TI_FLAGS]
553	and	x2, x1, #_TIF_WORK_MASK
554	cbnz	x2, fast_work_pending
555	enable_step_tsk x1, x2
556	kernel_exit 0, ret = 1
557
558/*
559 * Ok, we need to do extra processing, enter the slow path.
560 */
561fast_work_pending:
562	str	x0, [sp, #S_X0]			// returned x0
563work_pending:
564	tbnz	x1, #TIF_NEED_RESCHED, work_resched
565	/* TIF_SIGPENDING, TIF_NOTIFY_RESUME or TIF_FOREIGN_FPSTATE case */
566	ldr	x2, [sp, #S_PSTATE]
567	mov	x0, sp				// 'regs'
568	tst	x2, #PSR_MODE_MASK		// user mode regs?
569	b.ne	no_work_pending			// returning to kernel
570	enable_irq				// enable interrupts for do_notify_resume()
571	bl	do_notify_resume
572	b	ret_to_user
573work_resched:
574	bl	schedule
575
576/*
577 * "slow" syscall return path.
578 */
579ret_to_user:
580	disable_irq				// disable interrupts
581	ldr	x1, [tsk, #TI_FLAGS]
582	and	x2, x1, #_TIF_WORK_MASK
583	cbnz	x2, work_pending
584	enable_step_tsk x1, x2
585no_work_pending:
586	kernel_exit 0, ret = 0
587ENDPROC(ret_to_user)
588
589/*
590 * This is how we return from a fork.
591 */
592ENTRY(ret_from_fork)
593	bl	schedule_tail
594	cbz	x19, 1f				// not a kernel thread
595	mov	x0, x20
596	blr	x19
5971:	get_thread_info tsk
598	b	ret_to_user
599ENDPROC(ret_from_fork)
600
601/*
602 * SVC handler.
603 */
604	.align	6
605el0_svc:
606	adrp	stbl, sys_call_table		// load syscall table pointer
607	uxtw	scno, w8			// syscall number in w8
608	mov	sc_nr, #__NR_syscalls
609el0_svc_naked:					// compat entry point
610	stp	x0, scno, [sp, #S_ORIG_X0]	// save the original x0 and syscall number
611	enable_dbg_and_irq
612
613	ldr	x16, [tsk, #TI_FLAGS]		// check for syscall hooks
614	tst	x16, #_TIF_SYSCALL_WORK
615	b.ne	__sys_trace
616	adr	lr, ret_fast_syscall		// return address
617	cmp     scno, sc_nr                     // check upper syscall limit
618	b.hs	ni_sys
619	ldr	x16, [stbl, scno, lsl #3]	// address in the syscall table
620	br	x16				// call sys_* routine
621ni_sys:
622	mov	x0, sp
623	b	do_ni_syscall
624ENDPROC(el0_svc)
625
626	/*
627	 * This is the really slow path.  We're going to be doing context
628	 * switches, and waiting for our parent to respond.
629	 */
630__sys_trace:
631	mov	x0, sp
632	bl	syscall_trace_enter
633	adr	lr, __sys_trace_return		// return address
634	uxtw	scno, w0			// syscall number (possibly new)
635	mov	x1, sp				// pointer to regs
636	cmp	scno, sc_nr			// check upper syscall limit
637	b.hs	ni_sys
638	ldp	x0, x1, [sp]			// restore the syscall args
639	ldp	x2, x3, [sp, #S_X2]
640	ldp	x4, x5, [sp, #S_X4]
641	ldp	x6, x7, [sp, #S_X6]
642	ldr	x16, [stbl, scno, lsl #3]	// address in the syscall table
643	br	x16				// call sys_* routine
644
645__sys_trace_return:
646	str	x0, [sp]			// save returned x0
647	mov	x0, sp
648	bl	syscall_trace_exit
649	b	ret_to_user
650
651/*
652 * Special system call wrappers.
653 */
654ENTRY(sys_rt_sigreturn_wrapper)
655	mov	x0, sp
656	b	sys_rt_sigreturn
657ENDPROC(sys_rt_sigreturn_wrapper)
658
659ENTRY(handle_arch_irq)
660	.quad	0
661