xref: /openbmc/linux/arch/arm/kernel/entry-header.S (revision ee89bd6b)
1#include <linux/init.h>
2#include <linux/linkage.h>
3
4#include <asm/assembler.h>
5#include <asm/asm-offsets.h>
6#include <asm/errno.h>
7#include <asm/thread_info.h>
8
9@ Bad Abort numbers
10@ -----------------
11@
12#define BAD_PREFETCH	0
13#define BAD_DATA	1
14#define BAD_ADDREXCPTN	2
15#define BAD_IRQ		3
16#define BAD_UNDEFINSTR	4
17
18@
19@ Most of the stack format comes from struct pt_regs, but with
20@ the addition of 8 bytes for storing syscall args 5 and 6.
21@ This _must_ remain a multiple of 8 for EABI.
22@
23#define S_OFF		8
24
25/*
26 * The SWI code relies on the fact that R0 is at the bottom of the stack
27 * (due to slow/fast restore user regs).
28 */
29#if S_R0 != 0
30#error "Please fix"
31#endif
32
33	.macro	zero_fp
34#ifdef CONFIG_FRAME_POINTER
35	mov	fp, #0
36#endif
37	.endm
38
39	.macro	alignment_trap, rtemp
40#ifdef CONFIG_ALIGNMENT_TRAP
41	ldr	\rtemp, .LCcralign
42	ldr	\rtemp, [\rtemp]
43	mcr	p15, 0, \rtemp, c1, c0
44#endif
45	.endm
46
47	@
48	@ Store/load the USER SP and LR registers by switching to the SYS
49	@ mode. Useful in Thumb-2 mode where "stm/ldm rd, {sp, lr}^" is not
50	@ available. Should only be called from SVC mode
51	@
52	.macro	store_user_sp_lr, rd, rtemp, offset = 0
53	mrs	\rtemp, cpsr
54	eor	\rtemp, \rtemp, #(SVC_MODE ^ SYSTEM_MODE)
55	msr	cpsr_c, \rtemp			@ switch to the SYS mode
56
57	str	sp, [\rd, #\offset]		@ save sp_usr
58	str	lr, [\rd, #\offset + 4]		@ save lr_usr
59
60	eor	\rtemp, \rtemp, #(SVC_MODE ^ SYSTEM_MODE)
61	msr	cpsr_c, \rtemp			@ switch back to the SVC mode
62	.endm
63
64	.macro	load_user_sp_lr, rd, rtemp, offset = 0
65	mrs	\rtemp, cpsr
66	eor	\rtemp, \rtemp, #(SVC_MODE ^ SYSTEM_MODE)
67	msr	cpsr_c, \rtemp			@ switch to the SYS mode
68
69	ldr	sp, [\rd, #\offset]		@ load sp_usr
70	ldr	lr, [\rd, #\offset + 4]		@ load lr_usr
71
72	eor	\rtemp, \rtemp, #(SVC_MODE ^ SYSTEM_MODE)
73	msr	cpsr_c, \rtemp			@ switch back to the SVC mode
74	.endm
75
76#ifndef CONFIG_THUMB2_KERNEL
77	.macro	svc_exit, rpsr, irq = 0
78	.if	\irq != 0
79	@ IRQs already off
80#ifdef CONFIG_TRACE_IRQFLAGS
81	@ The parent context IRQs must have been enabled to get here in
82	@ the first place, so there's no point checking the PSR I bit.
83	bl	trace_hardirqs_on
84#endif
85	.else
86	@ IRQs off again before pulling preserved data off the stack
87	disable_irq_notrace
88#ifdef CONFIG_TRACE_IRQFLAGS
89	tst	\rpsr, #PSR_I_BIT
90	bleq	trace_hardirqs_on
91	tst	\rpsr, #PSR_I_BIT
92	blne	trace_hardirqs_off
93#endif
94	.endif
95	msr	spsr_cxsf, \rpsr
96#if defined(CONFIG_CPU_V6)
97	ldr	r0, [sp]
98	strex	r1, r2, [sp]			@ clear the exclusive monitor
99	ldmib	sp, {r1 - pc}^			@ load r1 - pc, cpsr
100#elif defined(CONFIG_CPU_32v6K)
101	clrex					@ clear the exclusive monitor
102	ldmia	sp, {r0 - pc}^			@ load r0 - pc, cpsr
103#else
104	ldmia	sp, {r0 - pc}^			@ load r0 - pc, cpsr
105#endif
106	.endm
107
108	.macro	restore_user_regs, fast = 0, offset = 0
109	ldr	r1, [sp, #\offset + S_PSR]	@ get calling cpsr
110	ldr	lr, [sp, #\offset + S_PC]!	@ get pc
111	msr	spsr_cxsf, r1			@ save in spsr_svc
112#if defined(CONFIG_CPU_V6)
113	strex	r1, r2, [sp]			@ clear the exclusive monitor
114#elif defined(CONFIG_CPU_32v6K)
115	clrex					@ clear the exclusive monitor
116#endif
117	.if	\fast
118	ldmdb	sp, {r1 - lr}^			@ get calling r1 - lr
119	.else
120	ldmdb	sp, {r0 - lr}^			@ get calling r0 - lr
121	.endif
122	mov	r0, r0				@ ARMv5T and earlier require a nop
123						@ after ldm {}^
124	add	sp, sp, #S_FRAME_SIZE - S_PC
125	movs	pc, lr				@ return & move spsr_svc into cpsr
126	.endm
127
128	.macro	get_thread_info, rd
129	mov	\rd, sp, lsr #13
130	mov	\rd, \rd, lsl #13
131	.endm
132
133	@
134	@ 32-bit wide "mov pc, reg"
135	@
136	.macro	movw_pc, reg
137	mov	pc, \reg
138	.endm
139#else	/* CONFIG_THUMB2_KERNEL */
140	.macro	svc_exit, rpsr, irq = 0
141	.if	\irq != 0
142	@ IRQs already off
143#ifdef CONFIG_TRACE_IRQFLAGS
144	@ The parent context IRQs must have been enabled to get here in
145	@ the first place, so there's no point checking the PSR I bit.
146	bl	trace_hardirqs_on
147#endif
148	.else
149	@ IRQs off again before pulling preserved data off the stack
150	disable_irq_notrace
151#ifdef CONFIG_TRACE_IRQFLAGS
152	tst	\rpsr, #PSR_I_BIT
153	bleq	trace_hardirqs_on
154	tst	\rpsr, #PSR_I_BIT
155	blne	trace_hardirqs_off
156#endif
157	.endif
158	ldr	lr, [sp, #S_SP]			@ top of the stack
159	ldrd	r0, r1, [sp, #S_LR]		@ calling lr and pc
160	clrex					@ clear the exclusive monitor
161	stmdb	lr!, {r0, r1, \rpsr}		@ calling lr and rfe context
162	ldmia	sp, {r0 - r12}
163	mov	sp, lr
164	ldr	lr, [sp], #4
165	rfeia	sp!
166	.endm
167
168	.macro	restore_user_regs, fast = 0, offset = 0
169	clrex					@ clear the exclusive monitor
170	mov	r2, sp
171	load_user_sp_lr r2, r3, \offset + S_SP	@ calling sp, lr
172	ldr	r1, [sp, #\offset + S_PSR]	@ get calling cpsr
173	ldr	lr, [sp, #\offset + S_PC]	@ get pc
174	add	sp, sp, #\offset + S_SP
175	msr	spsr_cxsf, r1			@ save in spsr_svc
176	.if	\fast
177	ldmdb	sp, {r1 - r12}			@ get calling r1 - r12
178	.else
179	ldmdb	sp, {r0 - r12}			@ get calling r0 - r12
180	.endif
181	add	sp, sp, #S_FRAME_SIZE - S_SP
182	movs	pc, lr				@ return & move spsr_svc into cpsr
183	.endm
184
185	.macro	get_thread_info, rd
186	mov	\rd, sp
187	lsr	\rd, \rd, #13
188	mov	\rd, \rd, lsl #13
189	.endm
190
191	@
192	@ 32-bit wide "mov pc, reg"
193	@
194	.macro	movw_pc, reg
195	mov	pc, \reg
196	nop
197	.endm
198#endif	/* !CONFIG_THUMB2_KERNEL */
199
200/*
201 * Context tracking subsystem.  Used to instrument transitions
202 * between user and kernel mode.
203 */
204	.macro ct_user_exit, save = 1
205#ifdef CONFIG_CONTEXT_TRACKING
206	.if	\save
207	stmdb   sp!, {r0-r3, ip, lr}
208	bl	user_exit
209	ldmia	sp!, {r0-r3, ip, lr}
210	.else
211	bl	user_exit
212	.endif
213#endif
214	.endm
215
216	.macro ct_user_enter, save = 1
217#ifdef CONFIG_CONTEXT_TRACKING
218	.if	\save
219	stmdb   sp!, {r0-r3, ip, lr}
220	bl	user_enter
221	ldmia	sp!, {r0-r3, ip, lr}
222	.else
223	bl	user_enter
224	.endif
225#endif
226	.endm
227
228/*
229 * These are the registers used in the syscall handler, and allow us to
230 * have in theory up to 7 arguments to a function - r0 to r6.
231 *
232 * r7 is reserved for the system call number for thumb mode.
233 *
234 * Note that tbl == why is intentional.
235 *
236 * We must set at least "tsk" and "why" when calling ret_with_reschedule.
237 */
238scno	.req	r7		@ syscall number
239tbl	.req	r8		@ syscall table pointer
240why	.req	r8		@ Linux syscall (!= 0)
241tsk	.req	r9		@ current thread_info
242