xref: /openbmc/linux/arch/arm64/kernel/entry.S (revision d2168146)
1/*
2 * Low-level exception handling code
3 *
4 * Copyright (C) 2012 ARM Ltd.
5 * Authors:	Catalin Marinas <catalin.marinas@arm.com>
6 *		Will Deacon <will.deacon@arm.com>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program.  If not, see <http://www.gnu.org/licenses/>.
19 */
20
21#include <linux/init.h>
22#include <linux/linkage.h>
23
24#include <asm/assembler.h>
25#include <asm/asm-offsets.h>
26#include <asm/errno.h>
27#include <asm/esr.h>
28#include <asm/thread_info.h>
29#include <asm/unistd.h>
30#include <asm/unistd32.h>
31
32/*
33 * Bad Abort numbers
34 *-----------------
35 */
36#define BAD_SYNC	0
37#define BAD_IRQ		1
38#define BAD_FIQ		2
39#define BAD_ERROR	3
40
41	.macro	kernel_entry, el, regsize = 64
42	sub	sp, sp, #S_FRAME_SIZE - S_LR	// room for LR, SP, SPSR, ELR
43	.if	\regsize == 32
44	mov	w0, w0				// zero upper 32 bits of x0
45	.endif
46	push	x28, x29
47	push	x26, x27
48	push	x24, x25
49	push	x22, x23
50	push	x20, x21
51	push	x18, x19
52	push	x16, x17
53	push	x14, x15
54	push	x12, x13
55	push	x10, x11
56	push	x8, x9
57	push	x6, x7
58	push	x4, x5
59	push	x2, x3
60	push	x0, x1
61	.if	\el == 0
62	mrs	x21, sp_el0
63	get_thread_info tsk			// Ensure MDSCR_EL1.SS is clear,
64	ldr	x19, [tsk, #TI_FLAGS]		// since we can unmask debug
65	disable_step_tsk x19, x20		// exceptions when scheduling.
66	.else
67	add	x21, sp, #S_FRAME_SIZE
68	.endif
69	mrs	x22, elr_el1
70	mrs	x23, spsr_el1
71	stp	lr, x21, [sp, #S_LR]
72	stp	x22, x23, [sp, #S_PC]
73
74	/*
75	 * Set syscallno to -1 by default (overridden later if real syscall).
76	 */
77	.if	\el == 0
78	mvn	x21, xzr
79	str	x21, [sp, #S_SYSCALLNO]
80	.endif
81
82	/*
83	 * Registers that may be useful after this macro is invoked:
84	 *
85	 * x21 - aborted SP
86	 * x22 - aborted PC
87	 * x23 - aborted PSTATE
88	*/
89	.endm
90
91	.macro	kernel_exit, el, ret = 0
92	ldp	x21, x22, [sp, #S_PC]		// load ELR, SPSR
93	.if	\el == 0
94	ldr	x23, [sp, #S_SP]		// load return stack pointer
95	.endif
96	.if	\ret
97	ldr	x1, [sp, #S_X1]			// preserve x0 (syscall return)
98	add	sp, sp, S_X2
99	.else
100	pop	x0, x1
101	.endif
102	pop	x2, x3				// load the rest of the registers
103	pop	x4, x5
104	pop	x6, x7
105	pop	x8, x9
106	msr	elr_el1, x21			// set up the return data
107	msr	spsr_el1, x22
108	.if	\el == 0
109	msr	sp_el0, x23
110	.endif
111	pop	x10, x11
112	pop	x12, x13
113	pop	x14, x15
114	pop	x16, x17
115	pop	x18, x19
116	pop	x20, x21
117	pop	x22, x23
118	pop	x24, x25
119	pop	x26, x27
120	pop	x28, x29
121	ldr	lr, [sp], #S_FRAME_SIZE - S_LR	// load LR and restore SP
122	eret					// return to kernel
123	.endm
124
125	.macro	get_thread_info, rd
126	mov	\rd, sp
127	and	\rd, \rd, #~(THREAD_SIZE - 1)	// top of stack
128	.endm
129
130/*
131 * These are the registers used in the syscall handler, and allow us to
132 * have in theory up to 7 arguments to a function - x0 to x6.
133 *
134 * x7 is reserved for the system call number in 32-bit mode.
135 */
136sc_nr	.req	x25		// number of system calls
137scno	.req	x26		// syscall number
138stbl	.req	x27		// syscall table pointer
139tsk	.req	x28		// current thread_info
140
141/*
142 * Interrupt handling.
143 */
144	.macro	irq_handler
145	ldr	x1, handle_arch_irq
146	mov	x0, sp
147	blr	x1
148	.endm
149
150	.text
151
152/*
153 * Exception vectors.
154 */
155
156	.align	11
157ENTRY(vectors)
158	ventry	el1_sync_invalid		// Synchronous EL1t
159	ventry	el1_irq_invalid			// IRQ EL1t
160	ventry	el1_fiq_invalid			// FIQ EL1t
161	ventry	el1_error_invalid		// Error EL1t
162
163	ventry	el1_sync			// Synchronous EL1h
164	ventry	el1_irq				// IRQ EL1h
165	ventry	el1_fiq_invalid			// FIQ EL1h
166	ventry	el1_error_invalid		// Error EL1h
167
168	ventry	el0_sync			// Synchronous 64-bit EL0
169	ventry	el0_irq				// IRQ 64-bit EL0
170	ventry	el0_fiq_invalid			// FIQ 64-bit EL0
171	ventry	el0_error_invalid		// Error 64-bit EL0
172
173#ifdef CONFIG_COMPAT
174	ventry	el0_sync_compat			// Synchronous 32-bit EL0
175	ventry	el0_irq_compat			// IRQ 32-bit EL0
176	ventry	el0_fiq_invalid_compat		// FIQ 32-bit EL0
177	ventry	el0_error_invalid_compat	// Error 32-bit EL0
178#else
179	ventry	el0_sync_invalid		// Synchronous 32-bit EL0
180	ventry	el0_irq_invalid			// IRQ 32-bit EL0
181	ventry	el0_fiq_invalid			// FIQ 32-bit EL0
182	ventry	el0_error_invalid		// Error 32-bit EL0
183#endif
184END(vectors)
185
186/*
187 * Invalid mode handlers
188 */
189	.macro	inv_entry, el, reason, regsize = 64
190	kernel_entry el, \regsize
191	mov	x0, sp
192	mov	x1, #\reason
193	mrs	x2, esr_el1
194	b	bad_mode
195	.endm
196
197el0_sync_invalid:
198	inv_entry 0, BAD_SYNC
199ENDPROC(el0_sync_invalid)
200
201el0_irq_invalid:
202	inv_entry 0, BAD_IRQ
203ENDPROC(el0_irq_invalid)
204
205el0_fiq_invalid:
206	inv_entry 0, BAD_FIQ
207ENDPROC(el0_fiq_invalid)
208
209el0_error_invalid:
210	inv_entry 0, BAD_ERROR
211ENDPROC(el0_error_invalid)
212
213#ifdef CONFIG_COMPAT
214el0_fiq_invalid_compat:
215	inv_entry 0, BAD_FIQ, 32
216ENDPROC(el0_fiq_invalid_compat)
217
218el0_error_invalid_compat:
219	inv_entry 0, BAD_ERROR, 32
220ENDPROC(el0_error_invalid_compat)
221#endif
222
223el1_sync_invalid:
224	inv_entry 1, BAD_SYNC
225ENDPROC(el1_sync_invalid)
226
227el1_irq_invalid:
228	inv_entry 1, BAD_IRQ
229ENDPROC(el1_irq_invalid)
230
231el1_fiq_invalid:
232	inv_entry 1, BAD_FIQ
233ENDPROC(el1_fiq_invalid)
234
235el1_error_invalid:
236	inv_entry 1, BAD_ERROR
237ENDPROC(el1_error_invalid)
238
239/*
240 * EL1 mode handlers.
241 */
242	.align	6
243el1_sync:
244	kernel_entry 1
245	mrs	x1, esr_el1			// read the syndrome register
246	lsr	x24, x1, #ESR_EL1_EC_SHIFT	// exception class
247	cmp	x24, #ESR_EL1_EC_DABT_EL1	// data abort in EL1
248	b.eq	el1_da
249	cmp	x24, #ESR_EL1_EC_SYS64		// configurable trap
250	b.eq	el1_undef
251	cmp	x24, #ESR_EL1_EC_SP_ALIGN	// stack alignment exception
252	b.eq	el1_sp_pc
253	cmp	x24, #ESR_EL1_EC_PC_ALIGN	// pc alignment exception
254	b.eq	el1_sp_pc
255	cmp	x24, #ESR_EL1_EC_UNKNOWN	// unknown exception in EL1
256	b.eq	el1_undef
257	cmp	x24, #ESR_EL1_EC_BREAKPT_EL1	// debug exception in EL1
258	b.ge	el1_dbg
259	b	el1_inv
260el1_da:
261	/*
262	 * Data abort handling
263	 */
264	mrs	x0, far_el1
265	enable_dbg
266	// re-enable interrupts if they were enabled in the aborted context
267	tbnz	x23, #7, 1f			// PSR_I_BIT
268	enable_irq
2691:
270	mov	x2, sp				// struct pt_regs
271	bl	do_mem_abort
272
273	// disable interrupts before pulling preserved data off the stack
274	disable_irq
275	kernel_exit 1
276el1_sp_pc:
277	/*
278	 * Stack or PC alignment exception handling
279	 */
280	mrs	x0, far_el1
281	enable_dbg
282	mov	x2, sp
283	b	do_sp_pc_abort
284el1_undef:
285	/*
286	 * Undefined instruction
287	 */
288	enable_dbg
289	mov	x0, sp
290	b	do_undefinstr
291el1_dbg:
292	/*
293	 * Debug exception handling
294	 */
295	cmp	x24, #ESR_EL1_EC_BRK64		// if BRK64
296	cinc	x24, x24, eq			// set bit '0'
297	tbz	x24, #0, el1_inv		// EL1 only
298	mrs	x0, far_el1
299	mov	x2, sp				// struct pt_regs
300	bl	do_debug_exception
301	enable_dbg
302	kernel_exit 1
303el1_inv:
304	// TODO: add support for undefined instructions in kernel mode
305	enable_dbg
306	mov	x0, sp
307	mov	x1, #BAD_SYNC
308	mrs	x2, esr_el1
309	b	bad_mode
310ENDPROC(el1_sync)
311
312	.align	6
313el1_irq:
314	kernel_entry 1
315	enable_dbg
316#ifdef CONFIG_TRACE_IRQFLAGS
317	bl	trace_hardirqs_off
318#endif
319
320	irq_handler
321
322#ifdef CONFIG_PREEMPT
323	get_thread_info tsk
324	ldr	w24, [tsk, #TI_PREEMPT]		// get preempt count
325	cbnz	w24, 1f				// preempt count != 0
326	ldr	x0, [tsk, #TI_FLAGS]		// get flags
327	tbz	x0, #TIF_NEED_RESCHED, 1f	// needs rescheduling?
328	bl	el1_preempt
3291:
330#endif
331#ifdef CONFIG_TRACE_IRQFLAGS
332	bl	trace_hardirqs_on
333#endif
334	kernel_exit 1
335ENDPROC(el1_irq)
336
337#ifdef CONFIG_PREEMPT
338el1_preempt:
339	mov	x24, lr
3401:	bl	preempt_schedule_irq		// irq en/disable is done inside
341	ldr	x0, [tsk, #TI_FLAGS]		// get new tasks TI_FLAGS
342	tbnz	x0, #TIF_NEED_RESCHED, 1b	// needs rescheduling?
343	ret	x24
344#endif
345
346/*
347 * EL0 mode handlers.
348 */
349	.align	6
350el0_sync:
351	kernel_entry 0
352	mrs	x25, esr_el1			// read the syndrome register
353	lsr	x24, x25, #ESR_EL1_EC_SHIFT	// exception class
354	cmp	x24, #ESR_EL1_EC_SVC64		// SVC in 64-bit state
355	b.eq	el0_svc
356	adr	lr, ret_to_user
357	cmp	x24, #ESR_EL1_EC_DABT_EL0	// data abort in EL0
358	b.eq	el0_da
359	cmp	x24, #ESR_EL1_EC_IABT_EL0	// instruction abort in EL0
360	b.eq	el0_ia
361	cmp	x24, #ESR_EL1_EC_FP_ASIMD	// FP/ASIMD access
362	b.eq	el0_fpsimd_acc
363	cmp	x24, #ESR_EL1_EC_FP_EXC64	// FP/ASIMD exception
364	b.eq	el0_fpsimd_exc
365	cmp	x24, #ESR_EL1_EC_SYS64		// configurable trap
366	b.eq	el0_undef
367	cmp	x24, #ESR_EL1_EC_SP_ALIGN	// stack alignment exception
368	b.eq	el0_sp_pc
369	cmp	x24, #ESR_EL1_EC_PC_ALIGN	// pc alignment exception
370	b.eq	el0_sp_pc
371	cmp	x24, #ESR_EL1_EC_UNKNOWN	// unknown exception in EL0
372	b.eq	el0_undef
373	cmp	x24, #ESR_EL1_EC_BREAKPT_EL0	// debug exception in EL0
374	b.ge	el0_dbg
375	b	el0_inv
376
377#ifdef CONFIG_COMPAT
378	.align	6
379el0_sync_compat:
380	kernel_entry 0, 32
381	mrs	x25, esr_el1			// read the syndrome register
382	lsr	x24, x25, #ESR_EL1_EC_SHIFT	// exception class
383	cmp	x24, #ESR_EL1_EC_SVC32		// SVC in 32-bit state
384	b.eq	el0_svc_compat
385	adr	lr, ret_to_user
386	cmp	x24, #ESR_EL1_EC_DABT_EL0	// data abort in EL0
387	b.eq	el0_da
388	cmp	x24, #ESR_EL1_EC_IABT_EL0	// instruction abort in EL0
389	b.eq	el0_ia
390	cmp	x24, #ESR_EL1_EC_FP_ASIMD	// FP/ASIMD access
391	b.eq	el0_fpsimd_acc
392	cmp	x24, #ESR_EL1_EC_FP_EXC32	// FP/ASIMD exception
393	b.eq	el0_fpsimd_exc
394	cmp	x24, #ESR_EL1_EC_UNKNOWN	// unknown exception in EL0
395	b.eq	el0_undef
396	cmp	x24, #ESR_EL1_EC_CP15_32	// CP15 MRC/MCR trap
397	b.eq	el0_undef
398	cmp	x24, #ESR_EL1_EC_CP15_64	// CP15 MRRC/MCRR trap
399	b.eq	el0_undef
400	cmp	x24, #ESR_EL1_EC_CP14_MR	// CP14 MRC/MCR trap
401	b.eq	el0_undef
402	cmp	x24, #ESR_EL1_EC_CP14_LS	// CP14 LDC/STC trap
403	b.eq	el0_undef
404	cmp	x24, #ESR_EL1_EC_CP14_64	// CP14 MRRC/MCRR trap
405	b.eq	el0_undef
406	cmp	x24, #ESR_EL1_EC_BREAKPT_EL0	// debug exception in EL0
407	b.ge	el0_dbg
408	b	el0_inv
409el0_svc_compat:
410	/*
411	 * AArch32 syscall handling
412	 */
413	adr	stbl, compat_sys_call_table	// load compat syscall table pointer
414	uxtw	scno, w7			// syscall number in w7 (r7)
415	mov     sc_nr, #__NR_compat_syscalls
416	b	el0_svc_naked
417
418	.align	6
419el0_irq_compat:
420	kernel_entry 0, 32
421	b	el0_irq_naked
422#endif
423
424el0_da:
425	/*
426	 * Data abort handling
427	 */
428	mrs	x0, far_el1
429	bic	x0, x0, #(0xff << 56)
430	// enable interrupts before calling the main handler
431	enable_dbg_and_irq
432	mov	x1, x25
433	mov	x2, sp
434	b	do_mem_abort
435el0_ia:
436	/*
437	 * Instruction abort handling
438	 */
439	mrs	x0, far_el1
440	// enable interrupts before calling the main handler
441	enable_dbg_and_irq
442	orr	x1, x25, #1 << 24		// use reserved ISS bit for instruction aborts
443	mov	x2, sp
444	b	do_mem_abort
445el0_fpsimd_acc:
446	/*
447	 * Floating Point or Advanced SIMD access
448	 */
449	enable_dbg
450	mov	x0, x25
451	mov	x1, sp
452	b	do_fpsimd_acc
453el0_fpsimd_exc:
454	/*
455	 * Floating Point or Advanced SIMD exception
456	 */
457	enable_dbg
458	mov	x0, x25
459	mov	x1, sp
460	b	do_fpsimd_exc
461el0_sp_pc:
462	/*
463	 * Stack or PC alignment exception handling
464	 */
465	mrs	x0, far_el1
466	// enable interrupts before calling the main handler
467	enable_dbg_and_irq
468	mov	x1, x25
469	mov	x2, sp
470	b	do_sp_pc_abort
471el0_undef:
472	/*
473	 * Undefined instruction
474	 */
475	// enable interrupts before calling the main handler
476	enable_dbg_and_irq
477	mov	x0, sp
478	b	do_undefinstr
479el0_dbg:
480	/*
481	 * Debug exception handling
482	 */
483	tbnz	x24, #0, el0_inv		// EL0 only
484	mrs	x0, far_el1
485	mov	x1, x25
486	mov	x2, sp
487	bl	do_debug_exception
488	enable_dbg
489	b	ret_to_user
490el0_inv:
491	enable_dbg
492	mov	x0, sp
493	mov	x1, #BAD_SYNC
494	mrs	x2, esr_el1
495	b	bad_mode
496ENDPROC(el0_sync)
497
498	.align	6
499el0_irq:
500	kernel_entry 0
501el0_irq_naked:
502	enable_dbg
503#ifdef CONFIG_TRACE_IRQFLAGS
504	bl	trace_hardirqs_off
505#endif
506
507	irq_handler
508
509#ifdef CONFIG_TRACE_IRQFLAGS
510	bl	trace_hardirqs_on
511#endif
512	b	ret_to_user
513ENDPROC(el0_irq)
514
515/*
516 * Register switch for AArch64. The callee-saved registers need to be saved
517 * and restored. On entry:
518 *   x0 = previous task_struct (must be preserved across the switch)
519 *   x1 = next task_struct
520 * Previous and next are guaranteed not to be the same.
521 *
522 */
523ENTRY(cpu_switch_to)
524	add	x8, x0, #THREAD_CPU_CONTEXT
525	mov	x9, sp
526	stp	x19, x20, [x8], #16		// store callee-saved registers
527	stp	x21, x22, [x8], #16
528	stp	x23, x24, [x8], #16
529	stp	x25, x26, [x8], #16
530	stp	x27, x28, [x8], #16
531	stp	x29, x9, [x8], #16
532	str	lr, [x8]
533	add	x8, x1, #THREAD_CPU_CONTEXT
534	ldp	x19, x20, [x8], #16		// restore callee-saved registers
535	ldp	x21, x22, [x8], #16
536	ldp	x23, x24, [x8], #16
537	ldp	x25, x26, [x8], #16
538	ldp	x27, x28, [x8], #16
539	ldp	x29, x9, [x8], #16
540	ldr	lr, [x8]
541	mov	sp, x9
542	ret
543ENDPROC(cpu_switch_to)
544
545/*
546 * This is the fast syscall return path.  We do as little as possible here,
547 * and this includes saving x0 back into the kernel stack.
548 */
549ret_fast_syscall:
550	disable_irq				// disable interrupts
551	ldr	x1, [tsk, #TI_FLAGS]
552	and	x2, x1, #_TIF_WORK_MASK
553	cbnz	x2, fast_work_pending
554	enable_step_tsk x1, x2
555	kernel_exit 0, ret = 1
556
557/*
558 * Ok, we need to do extra processing, enter the slow path.
559 */
560fast_work_pending:
561	str	x0, [sp, #S_X0]			// returned x0
562work_pending:
563	tbnz	x1, #TIF_NEED_RESCHED, work_resched
564	/* TIF_SIGPENDING, TIF_NOTIFY_RESUME or TIF_FOREIGN_FPSTATE case */
565	ldr	x2, [sp, #S_PSTATE]
566	mov	x0, sp				// 'regs'
567	tst	x2, #PSR_MODE_MASK		// user mode regs?
568	b.ne	no_work_pending			// returning to kernel
569	enable_irq				// enable interrupts for do_notify_resume()
570	bl	do_notify_resume
571	b	ret_to_user
572work_resched:
573	bl	schedule
574
575/*
576 * "slow" syscall return path.
577 */
578ret_to_user:
579	disable_irq				// disable interrupts
580	ldr	x1, [tsk, #TI_FLAGS]
581	and	x2, x1, #_TIF_WORK_MASK
582	cbnz	x2, work_pending
583	enable_step_tsk x1, x2
584no_work_pending:
585	kernel_exit 0, ret = 0
586ENDPROC(ret_to_user)
587
588/*
589 * This is how we return from a fork.
590 */
591ENTRY(ret_from_fork)
592	bl	schedule_tail
593	cbz	x19, 1f				// not a kernel thread
594	mov	x0, x20
595	blr	x19
5961:	get_thread_info tsk
597	b	ret_to_user
598ENDPROC(ret_from_fork)
599
600/*
601 * SVC handler.
602 */
603	.align	6
604el0_svc:
605	adrp	stbl, sys_call_table		// load syscall table pointer
606	uxtw	scno, w8			// syscall number in w8
607	mov	sc_nr, #__NR_syscalls
608el0_svc_naked:					// compat entry point
609	stp	x0, scno, [sp, #S_ORIG_X0]	// save the original x0 and syscall number
610	enable_dbg_and_irq
611
612	ldr	x16, [tsk, #TI_FLAGS]		// check for syscall hooks
613	tst	x16, #_TIF_SYSCALL_WORK
614	b.ne	__sys_trace
615	adr	lr, ret_fast_syscall		// return address
616	cmp     scno, sc_nr                     // check upper syscall limit
617	b.hs	ni_sys
618	ldr	x16, [stbl, scno, lsl #3]	// address in the syscall table
619	br	x16				// call sys_* routine
620ni_sys:
621	mov	x0, sp
622	b	do_ni_syscall
623ENDPROC(el0_svc)
624
625	/*
626	 * This is the really slow path.  We're going to be doing context
627	 * switches, and waiting for our parent to respond.
628	 */
629__sys_trace:
630	mov	x0, sp
631	bl	syscall_trace_enter
632	adr	lr, __sys_trace_return		// return address
633	uxtw	scno, w0			// syscall number (possibly new)
634	mov	x1, sp				// pointer to regs
635	cmp	scno, sc_nr			// check upper syscall limit
636	b.hs	ni_sys
637	ldp	x0, x1, [sp]			// restore the syscall args
638	ldp	x2, x3, [sp, #S_X2]
639	ldp	x4, x5, [sp, #S_X4]
640	ldp	x6, x7, [sp, #S_X6]
641	ldr	x16, [stbl, scno, lsl #3]	// address in the syscall table
642	br	x16				// call sys_* routine
643
644__sys_trace_return:
645	str	x0, [sp]			// save returned x0
646	mov	x0, sp
647	bl	syscall_trace_exit
648	b	ret_to_user
649
650/*
651 * Special system call wrappers.
652 */
653ENTRY(sys_rt_sigreturn_wrapper)
654	mov	x0, sp
655	b	sys_rt_sigreturn
656ENDPROC(sys_rt_sigreturn_wrapper)
657
658ENTRY(handle_arch_irq)
659	.quad	0
660