xref: /openbmc/linux/arch/arm/kernel/entry-common.S (revision e2f1cf25)
1/*
2 *  linux/arch/arm/kernel/entry-common.S
3 *
4 *  Copyright (C) 2000 Russell King
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 */
10
11#include <asm/assembler.h>
12#include <asm/unistd.h>
13#include <asm/ftrace.h>
14#include <asm/unwind.h>
15
16#ifdef CONFIG_NEED_RET_TO_USER
17#include <mach/entry-macro.S>
18#else
19	.macro  arch_ret_to_user, tmp1, tmp2
20	.endm
21#endif
22
23#include "entry-header.S"
24
25
26	.align	5
27/*
28 * This is the fast syscall return path.  We do as little as
29 * possible here, and this includes saving r0 back into the SVC
30 * stack.
31 */
32ret_fast_syscall:
33 UNWIND(.fnstart	)
34 UNWIND(.cantunwind	)
35	disable_irq				@ disable interrupts
36	ldr	r1, [tsk, #TI_FLAGS]		@ re-check for syscall tracing
37	tst	r1, #_TIF_SYSCALL_WORK
38	bne	__sys_trace_return
39	tst	r1, #_TIF_WORK_MASK
40	bne	fast_work_pending
41	asm_trace_hardirqs_on
42
43	/* perform architecture specific actions before user return */
44	arch_ret_to_user r1, lr
45	ct_user_enter
46
47	restore_user_regs fast = 1, offset = S_OFF
48 UNWIND(.fnend		)
49
50/*
51 * Ok, we need to do extra processing, enter the slow path.
52 */
53fast_work_pending:
54	str	r0, [sp, #S_R0+S_OFF]!		@ returned r0
55work_pending:
56	mov	r0, sp				@ 'regs'
57	mov	r2, why				@ 'syscall'
58	bl	do_work_pending
59	cmp	r0, #0
60	beq	no_work_pending
61	movlt	scno, #(__NR_restart_syscall - __NR_SYSCALL_BASE)
62	ldmia	sp, {r0 - r6}			@ have to reload r0 - r6
63	b	local_restart			@ ... and off we go
64ENDPROC(ret_fast_syscall)
65
66/*
67 * "slow" syscall return path.  "why" tells us if this was a real syscall.
68 */
69ENTRY(ret_to_user)
70ret_slow_syscall:
71	disable_irq				@ disable interrupts
72ENTRY(ret_to_user_from_irq)
73	ldr	r1, [tsk, #TI_FLAGS]
74	tst	r1, #_TIF_WORK_MASK
75	bne	work_pending
76no_work_pending:
77	asm_trace_hardirqs_on
78
79	/* perform architecture specific actions before user return */
80	arch_ret_to_user r1, lr
81	ct_user_enter save = 0
82
83	restore_user_regs fast = 0, offset = 0
84ENDPROC(ret_to_user_from_irq)
85ENDPROC(ret_to_user)
86
87/*
88 * This is how we return from a fork.
89 */
90ENTRY(ret_from_fork)
91	bl	schedule_tail
92	cmp	r5, #0
93	movne	r0, r4
94	badrne	lr, 1f
95	retne	r5
961:	get_thread_info tsk
97	b	ret_slow_syscall
98ENDPROC(ret_from_fork)
99
100	.equ NR_syscalls,0
101#define CALL(x) .equ NR_syscalls,NR_syscalls+1
102#include "calls.S"
103
104/*
105 * Ensure that the system call table is equal to __NR_syscalls,
106 * which is the value the rest of the system sees
107 */
108.ifne NR_syscalls - __NR_syscalls
109.error "__NR_syscalls is not equal to the size of the syscall table"
110.endif
111
112#undef CALL
113#define CALL(x) .long x
114
115/*=============================================================================
116 * SWI handler
117 *-----------------------------------------------------------------------------
118 */
119
120	.align	5
121ENTRY(vector_swi)
122#ifdef CONFIG_CPU_V7M
123	v7m_exception_entry
124#else
125	sub	sp, sp, #S_FRAME_SIZE
126	stmia	sp, {r0 - r12}			@ Calling r0 - r12
127 ARM(	add	r8, sp, #S_PC		)
128 ARM(	stmdb	r8, {sp, lr}^		)	@ Calling sp, lr
129 THUMB(	mov	r8, sp			)
130 THUMB(	store_user_sp_lr r8, r10, S_SP	)	@ calling sp, lr
131	mrs	r8, spsr			@ called from non-FIQ mode, so ok.
132	str	lr, [sp, #S_PC]			@ Save calling PC
133	str	r8, [sp, #S_PSR]		@ Save CPSR
134	str	r0, [sp, #S_OLD_R0]		@ Save OLD_R0
135#endif
136	zero_fp
137	alignment_trap r10, ip, __cr_alignment
138	enable_irq
139	ct_user_exit
140	get_thread_info tsk
141
142	/*
143	 * Get the system call number.
144	 */
145
146#if defined(CONFIG_OABI_COMPAT)
147
148	/*
149	 * If we have CONFIG_OABI_COMPAT then we need to look at the swi
150	 * value to determine if it is an EABI or an old ABI call.
151	 */
152#ifdef CONFIG_ARM_THUMB
153	tst	r8, #PSR_T_BIT
154	movne	r10, #0				@ no thumb OABI emulation
155 USER(	ldreq	r10, [lr, #-4]		)	@ get SWI instruction
156#else
157 USER(	ldr	r10, [lr, #-4]		)	@ get SWI instruction
158#endif
159 ARM_BE8(rev	r10, r10)			@ little endian instruction
160
161#elif defined(CONFIG_AEABI)
162
163	/*
164	 * Pure EABI user space always put syscall number into scno (r7).
165	 */
166#elif defined(CONFIG_ARM_THUMB)
167	/* Legacy ABI only, possibly thumb mode. */
168	tst	r8, #PSR_T_BIT			@ this is SPSR from save_user_regs
169	addne	scno, r7, #__NR_SYSCALL_BASE	@ put OS number in
170 USER(	ldreq	scno, [lr, #-4]		)
171
172#else
173	/* Legacy ABI only. */
174 USER(	ldr	scno, [lr, #-4]		)	@ get SWI instruction
175#endif
176
177	adr	tbl, sys_call_table		@ load syscall table pointer
178
179#if defined(CONFIG_OABI_COMPAT)
180	/*
181	 * If the swi argument is zero, this is an EABI call and we do nothing.
182	 *
183	 * If this is an old ABI call, get the syscall number into scno and
184	 * get the old ABI syscall table address.
185	 */
186	bics	r10, r10, #0xff000000
187	eorne	scno, r10, #__NR_OABI_SYSCALL_BASE
188	ldrne	tbl, =sys_oabi_call_table
189#elif !defined(CONFIG_AEABI)
190	bic	scno, scno, #0xff000000		@ mask off SWI op-code
191	eor	scno, scno, #__NR_SYSCALL_BASE	@ check OS number
192#endif
193
194local_restart:
195	ldr	r10, [tsk, #TI_FLAGS]		@ check for syscall tracing
196	stmdb	sp!, {r4, r5}			@ push fifth and sixth args
197
198	tst	r10, #_TIF_SYSCALL_WORK		@ are we tracing syscalls?
199	bne	__sys_trace
200
201	cmp	scno, #NR_syscalls		@ check upper syscall limit
202	badr	lr, ret_fast_syscall		@ return address
203	ldrcc	pc, [tbl, scno, lsl #2]		@ call sys_* routine
204
205	add	r1, sp, #S_OFF
2062:	cmp	scno, #(__ARM_NR_BASE - __NR_SYSCALL_BASE)
207	eor	r0, scno, #__NR_SYSCALL_BASE	@ put OS number back
208	bcs	arm_syscall
209	mov	why, #0				@ no longer a real syscall
210	b	sys_ni_syscall			@ not private func
211
212#if defined(CONFIG_OABI_COMPAT) || !defined(CONFIG_AEABI)
213	/*
214	 * We failed to handle a fault trying to access the page
215	 * containing the swi instruction, but we're not really in a
216	 * position to return -EFAULT. Instead, return back to the
217	 * instruction and re-enter the user fault handling path trying
218	 * to page it in. This will likely result in sending SEGV to the
219	 * current task.
220	 */
2219001:
222	sub	lr, lr, #4
223	str	lr, [sp, #S_PC]
224	b	ret_fast_syscall
225#endif
226ENDPROC(vector_swi)
227
228	/*
229	 * This is the really slow path.  We're going to be doing
230	 * context switches, and waiting for our parent to respond.
231	 */
232__sys_trace:
233	mov	r1, scno
234	add	r0, sp, #S_OFF
235	bl	syscall_trace_enter
236
237	badr	lr, __sys_trace_return		@ return address
238	mov	scno, r0			@ syscall number (possibly new)
239	add	r1, sp, #S_R0 + S_OFF		@ pointer to regs
240	cmp	scno, #NR_syscalls		@ check upper syscall limit
241	ldmccia	r1, {r0 - r6}			@ have to reload r0 - r6
242	stmccia	sp, {r4, r5}			@ and update the stack args
243	ldrcc	pc, [tbl, scno, lsl #2]		@ call sys_* routine
244	cmp	scno, #-1			@ skip the syscall?
245	bne	2b
246	add	sp, sp, #S_OFF			@ restore stack
247	b	ret_slow_syscall
248
249__sys_trace_return:
250	str	r0, [sp, #S_R0 + S_OFF]!	@ save returned r0
251	mov	r0, sp
252	bl	syscall_trace_exit
253	b	ret_slow_syscall
254
255	.align	5
256#ifdef CONFIG_ALIGNMENT_TRAP
257	.type	__cr_alignment, #object
258__cr_alignment:
259	.word	cr_alignment
260#endif
261	.ltorg
262
263/*
264 * This is the syscall table declaration for native ABI syscalls.
265 * With EABI a couple syscalls are obsolete and defined as sys_ni_syscall.
266 */
267#define ABI(native, compat) native
268#ifdef CONFIG_AEABI
269#define OBSOLETE(syscall) sys_ni_syscall
270#else
271#define OBSOLETE(syscall) syscall
272#endif
273
274	.type	sys_call_table, #object
275ENTRY(sys_call_table)
276#include "calls.S"
277#undef ABI
278#undef OBSOLETE
279
280/*============================================================================
281 * Special system call wrappers
282 */
283@ r0 = syscall number
284@ r8 = syscall table
285sys_syscall:
286		bic	scno, r0, #__NR_OABI_SYSCALL_BASE
287		cmp	scno, #__NR_syscall - __NR_SYSCALL_BASE
288		cmpne	scno, #NR_syscalls	@ check range
289		stmloia	sp, {r5, r6}		@ shuffle args
290		movlo	r0, r1
291		movlo	r1, r2
292		movlo	r2, r3
293		movlo	r3, r4
294		ldrlo	pc, [tbl, scno, lsl #2]
295		b	sys_ni_syscall
296ENDPROC(sys_syscall)
297
298sys_sigreturn_wrapper:
299		add	r0, sp, #S_OFF
300		mov	why, #0		@ prevent syscall restart handling
301		b	sys_sigreturn
302ENDPROC(sys_sigreturn_wrapper)
303
304sys_rt_sigreturn_wrapper:
305		add	r0, sp, #S_OFF
306		mov	why, #0		@ prevent syscall restart handling
307		b	sys_rt_sigreturn
308ENDPROC(sys_rt_sigreturn_wrapper)
309
310sys_statfs64_wrapper:
311		teq	r1, #88
312		moveq	r1, #84
313		b	sys_statfs64
314ENDPROC(sys_statfs64_wrapper)
315
316sys_fstatfs64_wrapper:
317		teq	r1, #88
318		moveq	r1, #84
319		b	sys_fstatfs64
320ENDPROC(sys_fstatfs64_wrapper)
321
322/*
323 * Note: off_4k (r5) is always units of 4K.  If we can't do the requested
324 * offset, we return EINVAL.
325 */
326sys_mmap2:
327#if PAGE_SHIFT > 12
328		tst	r5, #PGOFF_MASK
329		moveq	r5, r5, lsr #PAGE_SHIFT - 12
330		streq	r5, [sp, #4]
331		beq	sys_mmap_pgoff
332		mov	r0, #-EINVAL
333		ret	lr
334#else
335		str	r5, [sp, #4]
336		b	sys_mmap_pgoff
337#endif
338ENDPROC(sys_mmap2)
339
340#ifdef CONFIG_OABI_COMPAT
341
342/*
343 * These are syscalls with argument register differences
344 */
345
346sys_oabi_pread64:
347		stmia	sp, {r3, r4}
348		b	sys_pread64
349ENDPROC(sys_oabi_pread64)
350
351sys_oabi_pwrite64:
352		stmia	sp, {r3, r4}
353		b	sys_pwrite64
354ENDPROC(sys_oabi_pwrite64)
355
356sys_oabi_truncate64:
357		mov	r3, r2
358		mov	r2, r1
359		b	sys_truncate64
360ENDPROC(sys_oabi_truncate64)
361
362sys_oabi_ftruncate64:
363		mov	r3, r2
364		mov	r2, r1
365		b	sys_ftruncate64
366ENDPROC(sys_oabi_ftruncate64)
367
368sys_oabi_readahead:
369		str	r3, [sp]
370		mov	r3, r2
371		mov	r2, r1
372		b	sys_readahead
373ENDPROC(sys_oabi_readahead)
374
375/*
376 * Let's declare a second syscall table for old ABI binaries
377 * using the compatibility syscall entries.
378 */
379#define ABI(native, compat) compat
380#define OBSOLETE(syscall) syscall
381
382	.type	sys_oabi_call_table, #object
383ENTRY(sys_oabi_call_table)
384#include "calls.S"
385#undef ABI
386#undef OBSOLETE
387
388#endif
389
390