xref: /openbmc/linux/arch/arm/kernel/entry-common.S (revision b04b4f78)
1/*
2 *  linux/arch/arm/kernel/entry-common.S
3 *
4 *  Copyright (C) 2000 Russell King
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 */
10
11#include <asm/unistd.h>
12#include <asm/ftrace.h>
13#include <mach/entry-macro.S>
14#include <asm/unwind.h>
15
16#include "entry-header.S"
17
18
19	.align	5
20/*
21 * This is the fast syscall return path.  We do as little as
22 * possible here, and this includes saving r0 back into the SVC
23 * stack.
24 */
25ret_fast_syscall:
26 UNWIND(.fnstart	)
27 UNWIND(.cantunwind	)
28	disable_irq				@ disable interrupts
29	ldr	r1, [tsk, #TI_FLAGS]
30	tst	r1, #_TIF_WORK_MASK
31	bne	fast_work_pending
32
33	/* perform architecture specific actions before user return */
34	arch_ret_to_user r1, lr
35
36	@ fast_restore_user_regs
37	ldr	r1, [sp, #S_OFF + S_PSR]	@ get calling cpsr
38	ldr	lr, [sp, #S_OFF + S_PC]!	@ get pc
39	msr	spsr_cxsf, r1			@ save in spsr_svc
40	ldmdb	sp, {r1 - lr}^			@ get calling r1 - lr
41	mov	r0, r0
42	add	sp, sp, #S_FRAME_SIZE - S_PC
43	movs	pc, lr				@ return & move spsr_svc into cpsr
44 UNWIND(.fnend		)
45
46/*
47 * Ok, we need to do extra processing, enter the slow path.
48 */
49fast_work_pending:
50	str	r0, [sp, #S_R0+S_OFF]!		@ returned r0
51work_pending:
52	tst	r1, #_TIF_NEED_RESCHED
53	bne	work_resched
54	tst	r1, #_TIF_SIGPENDING
55	beq	no_work_pending
56	mov	r0, sp				@ 'regs'
57	mov	r2, why				@ 'syscall'
58	bl	do_notify_resume
59	b	ret_slow_syscall		@ Check work again
60
61work_resched:
62	bl	schedule
63/*
64 * "slow" syscall return path.  "why" tells us if this was a real syscall.
65 */
66ENTRY(ret_to_user)
67ret_slow_syscall:
68	disable_irq				@ disable interrupts
69	ldr	r1, [tsk, #TI_FLAGS]
70	tst	r1, #_TIF_WORK_MASK
71	bne	work_pending
72no_work_pending:
73	/* perform architecture specific actions before user return */
74	arch_ret_to_user r1, lr
75
76	@ slow_restore_user_regs
77	ldr	r1, [sp, #S_PSR]		@ get calling cpsr
78	ldr	lr, [sp, #S_PC]!		@ get pc
79	msr	spsr_cxsf, r1			@ save in spsr_svc
80	ldmdb	sp, {r0 - lr}^			@ get calling r0 - lr
81	mov	r0, r0
82	add	sp, sp, #S_FRAME_SIZE - S_PC
83	movs	pc, lr				@ return & move spsr_svc into cpsr
84ENDPROC(ret_to_user)
85
86/*
87 * This is how we return from a fork.
88 */
89ENTRY(ret_from_fork)
90	bl	schedule_tail
91	get_thread_info tsk
92	ldr	r1, [tsk, #TI_FLAGS]		@ check for syscall tracing
93	mov	why, #1
94	tst	r1, #_TIF_SYSCALL_TRACE		@ are we tracing syscalls?
95	beq	ret_slow_syscall
96	mov	r1, sp
97	mov	r0, #1				@ trace exit [IP = 1]
98	bl	syscall_trace
99	b	ret_slow_syscall
100ENDPROC(ret_from_fork)
101
102	.equ NR_syscalls,0
103#define CALL(x) .equ NR_syscalls,NR_syscalls+1
104#include "calls.S"
105#undef CALL
106#define CALL(x) .long x
107
108#ifdef CONFIG_FUNCTION_TRACER
109#ifdef CONFIG_DYNAMIC_FTRACE
110ENTRY(mcount)
111	stmdb sp!, {r0-r3, lr}
112	mov r0, lr
113	sub r0, r0, #MCOUNT_INSN_SIZE
114
115	.globl mcount_call
116mcount_call:
117	bl ftrace_stub
118	ldr lr, [fp, #-4]			@ restore lr
119	ldmia sp!, {r0-r3, pc}
120
121ENTRY(ftrace_caller)
122	stmdb sp!, {r0-r3, lr}
123	ldr r1, [fp, #-4]
124	mov r0, lr
125	sub r0, r0, #MCOUNT_INSN_SIZE
126
127	.globl ftrace_call
128ftrace_call:
129	bl ftrace_stub
130	ldr lr, [fp, #-4]			@ restore lr
131	ldmia sp!, {r0-r3, pc}
132
133#else
134
135ENTRY(mcount)
136	stmdb sp!, {r0-r3, lr}
137	ldr r0, =ftrace_trace_function
138	ldr r2, [r0]
139	adr r0, ftrace_stub
140	cmp r0, r2
141	bne trace
142	ldr lr, [fp, #-4]			@ restore lr
143	ldmia sp!, {r0-r3, pc}
144
145trace:
146	ldr r1, [fp, #-4]			@ lr of instrumented routine
147	mov r0, lr
148	sub r0, r0, #MCOUNT_INSN_SIZE
149	mov lr, pc
150	mov pc, r2
151	mov lr, r1				@ restore lr
152	ldmia sp!, {r0-r3, pc}
153
154#endif /* CONFIG_DYNAMIC_FTRACE */
155
156	.globl ftrace_stub
157ftrace_stub:
158	mov pc, lr
159
160#endif /* CONFIG_FUNCTION_TRACER */
161
162/*=============================================================================
163 * SWI handler
164 *-----------------------------------------------------------------------------
165 */
166
167	/* If we're optimising for StrongARM the resulting code won't
168	   run on an ARM7 and we can save a couple of instructions.
169								--pb */
170#ifdef CONFIG_CPU_ARM710
171#define A710(code...) code
172.Larm710bug:
173	ldmia	sp, {r0 - lr}^			@ Get calling r0 - lr
174	mov	r0, r0
175	add	sp, sp, #S_FRAME_SIZE
176	subs	pc, lr, #4
177#else
178#define A710(code...)
179#endif
180
181	.align	5
182ENTRY(vector_swi)
183	sub	sp, sp, #S_FRAME_SIZE
184	stmia	sp, {r0 - r12}			@ Calling r0 - r12
185	add	r8, sp, #S_PC
186	stmdb	r8, {sp, lr}^			@ Calling sp, lr
187	mrs	r8, spsr			@ called from non-FIQ mode, so ok.
188	str	lr, [sp, #S_PC]			@ Save calling PC
189	str	r8, [sp, #S_PSR]		@ Save CPSR
190	str	r0, [sp, #S_OLD_R0]		@ Save OLD_R0
191	zero_fp
192
193	/*
194	 * Get the system call number.
195	 */
196
197#if defined(CONFIG_OABI_COMPAT)
198
199	/*
200	 * If we have CONFIG_OABI_COMPAT then we need to look at the swi
201	 * value to determine if it is an EABI or an old ABI call.
202	 */
203#ifdef CONFIG_ARM_THUMB
204	tst	r8, #PSR_T_BIT
205	movne	r10, #0				@ no thumb OABI emulation
206	ldreq	r10, [lr, #-4]			@ get SWI instruction
207#else
208	ldr	r10, [lr, #-4]			@ get SWI instruction
209  A710(	and	ip, r10, #0x0f000000		@ check for SWI		)
210  A710(	teq	ip, #0x0f000000						)
211  A710(	bne	.Larm710bug						)
212#endif
213
214#elif defined(CONFIG_AEABI)
215
216	/*
217	 * Pure EABI user space always put syscall number into scno (r7).
218	 */
219  A710(	ldr	ip, [lr, #-4]			@ get SWI instruction	)
220  A710(	and	ip, ip, #0x0f000000		@ check for SWI		)
221  A710(	teq	ip, #0x0f000000						)
222  A710(	bne	.Larm710bug						)
223
224#elif defined(CONFIG_ARM_THUMB)
225
226	/* Legacy ABI only, possibly thumb mode. */
227	tst	r8, #PSR_T_BIT			@ this is SPSR from save_user_regs
228	addne	scno, r7, #__NR_SYSCALL_BASE	@ put OS number in
229	ldreq	scno, [lr, #-4]
230
231#else
232
233	/* Legacy ABI only. */
234	ldr	scno, [lr, #-4]			@ get SWI instruction
235  A710(	and	ip, scno, #0x0f000000		@ check for SWI		)
236  A710(	teq	ip, #0x0f000000						)
237  A710(	bne	.Larm710bug						)
238
239#endif
240
241#ifdef CONFIG_ALIGNMENT_TRAP
242	ldr	ip, __cr_alignment
243	ldr	ip, [ip]
244	mcr	p15, 0, ip, c1, c0		@ update control register
245#endif
246	enable_irq
247
248	get_thread_info tsk
249	adr	tbl, sys_call_table		@ load syscall table pointer
250	ldr	ip, [tsk, #TI_FLAGS]		@ check for syscall tracing
251
252#if defined(CONFIG_OABI_COMPAT)
253	/*
254	 * If the swi argument is zero, this is an EABI call and we do nothing.
255	 *
256	 * If this is an old ABI call, get the syscall number into scno and
257	 * get the old ABI syscall table address.
258	 */
259	bics	r10, r10, #0xff000000
260	eorne	scno, r10, #__NR_OABI_SYSCALL_BASE
261	ldrne	tbl, =sys_oabi_call_table
262#elif !defined(CONFIG_AEABI)
263	bic	scno, scno, #0xff000000		@ mask off SWI op-code
264	eor	scno, scno, #__NR_SYSCALL_BASE	@ check OS number
265#endif
266
267	stmdb	sp!, {r4, r5}			@ push fifth and sixth args
268	tst	ip, #_TIF_SYSCALL_TRACE		@ are we tracing syscalls?
269	bne	__sys_trace
270
271	cmp	scno, #NR_syscalls		@ check upper syscall limit
272	adr	lr, ret_fast_syscall		@ return address
273	ldrcc	pc, [tbl, scno, lsl #2]		@ call sys_* routine
274
275	add	r1, sp, #S_OFF
2762:	mov	why, #0				@ no longer a real syscall
277	cmp	scno, #(__ARM_NR_BASE - __NR_SYSCALL_BASE)
278	eor	r0, scno, #__NR_SYSCALL_BASE	@ put OS number back
279	bcs	arm_syscall
280	b	sys_ni_syscall			@ not private func
281ENDPROC(vector_swi)
282
283	/*
284	 * This is the really slow path.  We're going to be doing
285	 * context switches, and waiting for our parent to respond.
286	 */
287__sys_trace:
288	mov	r2, scno
289	add	r1, sp, #S_OFF
290	mov	r0, #0				@ trace entry [IP = 0]
291	bl	syscall_trace
292
293	adr	lr, __sys_trace_return		@ return address
294	mov	scno, r0			@ syscall number (possibly new)
295	add	r1, sp, #S_R0 + S_OFF		@ pointer to regs
296	cmp	scno, #NR_syscalls		@ check upper syscall limit
297	ldmccia	r1, {r0 - r3}			@ have to reload r0 - r3
298	ldrcc	pc, [tbl, scno, lsl #2]		@ call sys_* routine
299	b	2b
300
301__sys_trace_return:
302	str	r0, [sp, #S_R0 + S_OFF]!	@ save returned r0
303	mov	r2, scno
304	mov	r1, sp
305	mov	r0, #1				@ trace exit [IP = 1]
306	bl	syscall_trace
307	b	ret_slow_syscall
308
309	.align	5
310#ifdef CONFIG_ALIGNMENT_TRAP
311	.type	__cr_alignment, #object
312__cr_alignment:
313	.word	cr_alignment
314#endif
315	.ltorg
316
317/*
318 * This is the syscall table declaration for native ABI syscalls.
319 * With EABI a couple syscalls are obsolete and defined as sys_ni_syscall.
320 */
321#define ABI(native, compat) native
322#ifdef CONFIG_AEABI
323#define OBSOLETE(syscall) sys_ni_syscall
324#else
325#define OBSOLETE(syscall) syscall
326#endif
327
328	.type	sys_call_table, #object
329ENTRY(sys_call_table)
330#include "calls.S"
331#undef ABI
332#undef OBSOLETE
333
334/*============================================================================
335 * Special system call wrappers
336 */
337@ r0 = syscall number
338@ r8 = syscall table
339sys_syscall:
340		bic	scno, r0, #__NR_OABI_SYSCALL_BASE
341		cmp	scno, #__NR_syscall - __NR_SYSCALL_BASE
342		cmpne	scno, #NR_syscalls	@ check range
343		stmloia	sp, {r5, r6}		@ shuffle args
344		movlo	r0, r1
345		movlo	r1, r2
346		movlo	r2, r3
347		movlo	r3, r4
348		ldrlo	pc, [tbl, scno, lsl #2]
349		b	sys_ni_syscall
350ENDPROC(sys_syscall)
351
352sys_fork_wrapper:
353		add	r0, sp, #S_OFF
354		b	sys_fork
355ENDPROC(sys_fork_wrapper)
356
357sys_vfork_wrapper:
358		add	r0, sp, #S_OFF
359		b	sys_vfork
360ENDPROC(sys_vfork_wrapper)
361
362sys_execve_wrapper:
363		add	r3, sp, #S_OFF
364		b	sys_execve
365ENDPROC(sys_execve_wrapper)
366
367sys_clone_wrapper:
368		add	ip, sp, #S_OFF
369		str	ip, [sp, #4]
370		b	sys_clone
371ENDPROC(sys_clone_wrapper)
372
373sys_sigsuspend_wrapper:
374		add	r3, sp, #S_OFF
375		b	sys_sigsuspend
376ENDPROC(sys_sigsuspend_wrapper)
377
378sys_rt_sigsuspend_wrapper:
379		add	r2, sp, #S_OFF
380		b	sys_rt_sigsuspend
381ENDPROC(sys_rt_sigsuspend_wrapper)
382
383sys_sigreturn_wrapper:
384		add	r0, sp, #S_OFF
385		b	sys_sigreturn
386ENDPROC(sys_sigreturn_wrapper)
387
388sys_rt_sigreturn_wrapper:
389		add	r0, sp, #S_OFF
390		b	sys_rt_sigreturn
391ENDPROC(sys_rt_sigreturn_wrapper)
392
393sys_sigaltstack_wrapper:
394		ldr	r2, [sp, #S_OFF + S_SP]
395		b	do_sigaltstack
396ENDPROC(sys_sigaltstack_wrapper)
397
398sys_statfs64_wrapper:
399		teq	r1, #88
400		moveq	r1, #84
401		b	sys_statfs64
402ENDPROC(sys_statfs64_wrapper)
403
404sys_fstatfs64_wrapper:
405		teq	r1, #88
406		moveq	r1, #84
407		b	sys_fstatfs64
408ENDPROC(sys_fstatfs64_wrapper)
409
410/*
411 * Note: off_4k (r5) is always units of 4K.  If we can't do the requested
412 * offset, we return EINVAL.
413 */
414sys_mmap2:
415#if PAGE_SHIFT > 12
416		tst	r5, #PGOFF_MASK
417		moveq	r5, r5, lsr #PAGE_SHIFT - 12
418		streq	r5, [sp, #4]
419		beq	do_mmap2
420		mov	r0, #-EINVAL
421		mov	pc, lr
422#else
423		str	r5, [sp, #4]
424		b	do_mmap2
425#endif
426ENDPROC(sys_mmap2)
427
428ENTRY(pabort_ifar)
429		mrc	p15, 0, r0, cr6, cr0, 2
430ENTRY(pabort_noifar)
431		mov	pc, lr
432ENDPROC(pabort_ifar)
433ENDPROC(pabort_noifar)
434
435#ifdef CONFIG_OABI_COMPAT
436
437/*
438 * These are syscalls with argument register differences
439 */
440
441sys_oabi_pread64:
442		stmia	sp, {r3, r4}
443		b	sys_pread64
444ENDPROC(sys_oabi_pread64)
445
446sys_oabi_pwrite64:
447		stmia	sp, {r3, r4}
448		b	sys_pwrite64
449ENDPROC(sys_oabi_pwrite64)
450
451sys_oabi_truncate64:
452		mov	r3, r2
453		mov	r2, r1
454		b	sys_truncate64
455ENDPROC(sys_oabi_truncate64)
456
457sys_oabi_ftruncate64:
458		mov	r3, r2
459		mov	r2, r1
460		b	sys_ftruncate64
461ENDPROC(sys_oabi_ftruncate64)
462
463sys_oabi_readahead:
464		str	r3, [sp]
465		mov	r3, r2
466		mov	r2, r1
467		b	sys_readahead
468ENDPROC(sys_oabi_readahead)
469
470/*
471 * Let's declare a second syscall table for old ABI binaries
472 * using the compatibility syscall entries.
473 */
474#define ABI(native, compat) compat
475#define OBSOLETE(syscall) syscall
476
477	.type	sys_oabi_call_table, #object
478ENTRY(sys_oabi_call_table)
479#include "calls.S"
480#undef ABI
481#undef OBSOLETE
482
483#endif
484
485