xref: /openbmc/linux/arch/arm/kernel/entry-common.S (revision 68d9102f76de7a923fb81c8b6de4764f8f50ed17)
1/*
2 *  linux/arch/arm/kernel/entry-common.S
3 *
4 *  Copyright (C) 2000 Russell King
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 */
10#include <linux/config.h>
11
12#include <asm/unistd.h>
13
14#include "entry-header.S"
15
16
17	.align	5
18/*
19 * This is the fast syscall return path.  We do as little as
20 * possible here, and this includes saving r0 back into the SVC
21 * stack.
22 */
23ret_fast_syscall:
24	disable_irq				@ disable interrupts
25	ldr	r1, [tsk, #TI_FLAGS]
26	tst	r1, #_TIF_WORK_MASK
27	bne	fast_work_pending
28
29	@ fast_restore_user_regs
30	ldr	r1, [sp, #S_OFF + S_PSR]	@ get calling cpsr
31	ldr	lr, [sp, #S_OFF + S_PC]!	@ get pc
32	msr	spsr_cxsf, r1			@ save in spsr_svc
33	ldmdb	sp, {r1 - lr}^			@ get calling r1 - lr
34	mov	r0, r0
35	add	sp, sp, #S_FRAME_SIZE - S_PC
36	movs	pc, lr				@ return & move spsr_svc into cpsr
37
38/*
39 * Ok, we need to do extra processing, enter the slow path.
40 */
41fast_work_pending:
42	str	r0, [sp, #S_R0+S_OFF]!		@ returned r0
43work_pending:
44	tst	r1, #_TIF_NEED_RESCHED
45	bne	work_resched
46	tst	r1, #_TIF_NOTIFY_RESUME | _TIF_SIGPENDING
47	beq	no_work_pending
48	mov	r0, sp				@ 'regs'
49	mov	r2, why				@ 'syscall'
50	bl	do_notify_resume
51	disable_irq				@ disable interrupts
52	b	no_work_pending
53
54work_resched:
55	bl	schedule
56/*
57 * "slow" syscall return path.  "why" tells us if this was a real syscall.
58 */
59ENTRY(ret_to_user)
60ret_slow_syscall:
61	disable_irq				@ disable interrupts
62	ldr	r1, [tsk, #TI_FLAGS]
63	tst	r1, #_TIF_WORK_MASK
64	bne	work_pending
65no_work_pending:
66	@ slow_restore_user_regs
67	ldr	r1, [sp, #S_PSR]		@ get calling cpsr
68	ldr	lr, [sp, #S_PC]!		@ get pc
69	msr	spsr_cxsf, r1			@ save in spsr_svc
70	ldmdb	sp, {r0 - lr}^			@ get calling r1 - lr
71	mov	r0, r0
72	add	sp, sp, #S_FRAME_SIZE - S_PC
73	movs	pc, lr				@ return & move spsr_svc into cpsr
74
75/*
76 * This is how we return from a fork.
77 */
78ENTRY(ret_from_fork)
79	bl	schedule_tail
80	get_thread_info tsk
81	ldr	r1, [tsk, #TI_FLAGS]		@ check for syscall tracing
82	mov	why, #1
83	tst	r1, #_TIF_SYSCALL_TRACE		@ are we tracing syscalls?
84	beq	ret_slow_syscall
85	mov	r1, sp
86	mov	r0, #1				@ trace exit [IP = 1]
87	bl	syscall_trace
88	b	ret_slow_syscall
89
90
91#include "calls.S"
92
93/*=============================================================================
94 * SWI handler
95 *-----------------------------------------------------------------------------
96 */
97
98	/* If we're optimising for StrongARM the resulting code won't
99	   run on an ARM7 and we can save a couple of instructions.
100								--pb */
101#ifdef CONFIG_CPU_ARM710
102	.macro	arm710_bug_check, instr, temp
103	and	\temp, \instr, #0x0f000000	@ check for SWI
104	teq	\temp, #0x0f000000
105	bne	.Larm700bug
106	.endm
107
108.Larm700bug:
109	ldr	r0, [sp, #S_PSR]		@ Get calling cpsr
110	sub	lr, lr, #4
111	str	lr, [r8]
112	msr	spsr_cxsf, r0
113	ldmia	sp, {r0 - lr}^			@ Get calling r0 - lr
114	mov	r0, r0
115	ldr	lr, [sp, #S_PC]			@ Get PC
116	add	sp, sp, #S_FRAME_SIZE
117	movs	pc, lr
118#else
119	.macro	arm710_bug_check, instr, temp
120	.endm
121#endif
122
123	.align	5
124ENTRY(vector_swi)
125	sub	sp, sp, #S_FRAME_SIZE
126	stmia	sp, {r0 - r12}			@ Calling r0 - r12
127	add	r8, sp, #S_PC
128	stmdb	r8, {sp, lr}^			@ Calling sp, lr
129	mrs	r8, spsr			@ called from non-FIQ mode, so ok.
130	str	lr, [sp, #S_PC]			@ Save calling PC
131	str	r8, [sp, #S_PSR]		@ Save CPSR
132	str	r0, [sp, #S_OLD_R0]		@ Save OLD_R0
133	zero_fp
134
135	/*
136	 * Get the system call number.
137	 */
138#ifdef CONFIG_ARM_THUMB
139	tst	r8, #PSR_T_BIT			@ this is SPSR from save_user_regs
140	addne	scno, r7, #__NR_SYSCALL_BASE	@ put OS number in
141	ldreq	scno, [lr, #-4]
142#else
143	ldr	scno, [lr, #-4]			@ get SWI instruction
144#endif
145	arm710_bug_check scno, ip
146
147#ifdef CONFIG_ALIGNMENT_TRAP
148	ldr	ip, __cr_alignment
149	ldr	ip, [ip]
150	mcr	p15, 0, ip, c1, c0		@ update control register
151#endif
152	enable_irq
153
154	str	r4, [sp, #-S_OFF]!		@ push fifth arg
155
156	get_thread_info tsk
157	ldr	ip, [tsk, #TI_FLAGS]		@ check for syscall tracing
158	bic	scno, scno, #0xff000000		@ mask off SWI op-code
159	eor	scno, scno, #__NR_SYSCALL_BASE	@ check OS number
160	adr	tbl, sys_call_table		@ load syscall table pointer
161	tst	ip, #_TIF_SYSCALL_TRACE		@ are we tracing syscalls?
162	bne	__sys_trace
163
164	adr	lr, ret_fast_syscall		@ return address
165	cmp	scno, #NR_syscalls		@ check upper syscall limit
166	ldrcc	pc, [tbl, scno, lsl #2]		@ call sys_* routine
167
168	add	r1, sp, #S_OFF
1692:	mov	why, #0				@ no longer a real syscall
170	cmp	scno, #(__ARM_NR_BASE - __NR_SYSCALL_BASE)
171	eor	r0, scno, #__NR_SYSCALL_BASE	@ put OS number back
172	bcs	arm_syscall
173	b	sys_ni_syscall			@ not private func
174
175	/*
176	 * This is the really slow path.  We're going to be doing
177	 * context switches, and waiting for our parent to respond.
178	 */
179__sys_trace:
180	add	r1, sp, #S_OFF
181	mov	r0, #0				@ trace entry [IP = 0]
182	bl	syscall_trace
183
184	adr	lr, __sys_trace_return		@ return address
185	add	r1, sp, #S_R0 + S_OFF		@ pointer to regs
186	cmp	scno, #NR_syscalls		@ check upper syscall limit
187	ldmccia	r1, {r0 - r3}			@ have to reload r0 - r3
188	ldrcc	pc, [tbl, scno, lsl #2]		@ call sys_* routine
189	b	2b
190
191__sys_trace_return:
192	str	r0, [sp, #S_R0 + S_OFF]!	@ save returned r0
193	mov	r1, sp
194	mov	r0, #1				@ trace exit [IP = 1]
195	bl	syscall_trace
196	b	ret_slow_syscall
197
198	.align	5
199#ifdef CONFIG_ALIGNMENT_TRAP
200	.type	__cr_alignment, #object
201__cr_alignment:
202	.word	cr_alignment
203#endif
204
205	.type	sys_call_table, #object
206ENTRY(sys_call_table)
207#include "calls.S"
208
209/*============================================================================
210 * Special system call wrappers
211 */
212@ r0 = syscall number
213@ r5 = syscall table
214		.type	sys_syscall, #function
215sys_syscall:
216		eor	scno, r0, #__NR_SYSCALL_BASE
217		cmp	scno, #__NR_syscall - __NR_SYSCALL_BASE
218		cmpne	scno, #NR_syscalls	@ check range
219		stmloia	sp, {r5, r6}		@ shuffle args
220		movlo	r0, r1
221		movlo	r1, r2
222		movlo	r2, r3
223		movlo	r3, r4
224		ldrlo	pc, [tbl, scno, lsl #2]
225		b	sys_ni_syscall
226
227sys_fork_wrapper:
228		add	r0, sp, #S_OFF
229		b	sys_fork
230
231sys_vfork_wrapper:
232		add	r0, sp, #S_OFF
233		b	sys_vfork
234
235sys_execve_wrapper:
236		add	r3, sp, #S_OFF
237		b	sys_execve
238
239sys_clone_wrapper:
240		add	ip, sp, #S_OFF
241		str	ip, [sp, #4]
242		b	sys_clone
243
244sys_sigsuspend_wrapper:
245		add	r3, sp, #S_OFF
246		b	sys_sigsuspend
247
248sys_rt_sigsuspend_wrapper:
249		add	r2, sp, #S_OFF
250		b	sys_rt_sigsuspend
251
252sys_sigreturn_wrapper:
253		add	r0, sp, #S_OFF
254		b	sys_sigreturn
255
256sys_rt_sigreturn_wrapper:
257		add	r0, sp, #S_OFF
258		b	sys_rt_sigreturn
259
260sys_sigaltstack_wrapper:
261		ldr	r2, [sp, #S_OFF + S_SP]
262		b	do_sigaltstack
263
264sys_futex_wrapper:
265		str	r5, [sp, #4]		@ push sixth arg
266		b	sys_futex
267
268sys_arm_fadvise64_64_wrapper:
269		str	r5, [sp, #4]		@ push r5 to stack
270		b	sys_arm_fadvise64_64
271
272/*
273 * Note: off_4k (r5) is always units of 4K.  If we can't do the requested
274 * offset, we return EINVAL.
275 */
276sys_mmap2:
277#if PAGE_SHIFT > 12
278		tst	r5, #PGOFF_MASK
279		moveq	r5, r5, lsr #PAGE_SHIFT - 12
280		streq	r5, [sp, #4]
281		beq	do_mmap2
282		mov	r0, #-EINVAL
283		RETINSTR(mov,pc, lr)
284#else
285		str	r5, [sp, #4]
286		b	do_mmap2
287#endif
288