xref: /openbmc/linux/arch/sh/kernel/entry-common.S (revision 9cfc5c90)
1/*
2 *  Copyright (C) 1999, 2000, 2002  Niibe Yutaka
3 *  Copyright (C) 2003 - 2008  Paul Mundt
4 *
5 * This file is subject to the terms and conditions of the GNU General Public
6 * License.  See the file "COPYING" in the main directory of this archive
7 * for more details.
8 *
9 */
10
11! NOTE:
12! GNU as (as of 2.9.1) changes bf/s into bt/s and bra, when the address
13! to be jumped is too far, but it causes illegal slot exception.
14
15/*
16 * entry.S contains the system-call and fault low-level handling routines.
17 * This also contains the timer-interrupt handler, as well as all interrupts
18 * and faults that can result in a task-switch.
19 *
20 * NOTE: This code handles signal-recognition, which happens every time
21 * after a timer-interrupt and after each system call.
22 *
23 * NOTE: This code uses a convention that instructions in the delay slot
24 * of a transfer-control instruction are indented by an extra space, thus:
25 *
26 *    jmp	@k0	    ! control-transfer instruction
27 *     ldc	k1, ssr     ! delay slot
28 *
29 * Stack layout in 'ret_from_syscall':
30 * 	ptrace needs to have all regs on the stack.
31 *	if the order here is changed, it needs to be
32 *	updated in ptrace.c and ptrace.h
33 *
34 *	r0
35 *      ...
36 *	r15 = stack pointer
37 *	spc
38 *	pr
39 *	ssr
40 *	gbr
41 *	mach
42 *	macl
43 *	syscall #
44 *
45 */
46#include <asm/dwarf.h>
47
48#if defined(CONFIG_PREEMPT)
49#  define preempt_stop()	cli ; TRACE_IRQS_OFF
50#else
51#  define preempt_stop()
52#  define resume_kernel		__restore_all
53#endif
54
55
56	.align	2
57ENTRY(exception_error)
58	!
59	TRACE_IRQS_ON
60	sti
61	mov.l	1f, r0
62	jmp	@r0
63	 nop
64
65	.align	2
661:	.long	do_exception_error
67
68	.align	2
69ret_from_exception:
70	CFI_STARTPROC simple
71	CFI_DEF_CFA r14, 0
72	CFI_REL_OFFSET 17, 64
73	CFI_REL_OFFSET 15, 60
74	CFI_REL_OFFSET 14, 56
75	CFI_REL_OFFSET 13, 52
76	CFI_REL_OFFSET 12, 48
77	CFI_REL_OFFSET 11, 44
78	CFI_REL_OFFSET 10, 40
79	CFI_REL_OFFSET 9, 36
80	CFI_REL_OFFSET 8, 32
81	preempt_stop()
82ENTRY(ret_from_irq)
83	!
84	mov	#OFF_SR, r0
85	mov.l	@(r0,r15), r0	! get status register
86	shll	r0
87	shll	r0		! kernel space?
88	get_current_thread_info r8, r0
89	bt	resume_kernel	! Yes, it's from kernel, go back soon
90
91#ifdef CONFIG_PREEMPT
92	bra	resume_userspace
93	 nop
94ENTRY(resume_kernel)
95	cli
96	TRACE_IRQS_OFF
97	mov.l	@(TI_PRE_COUNT,r8), r0	! current_thread_info->preempt_count
98	tst	r0, r0
99	bf	noresched
100need_resched:
101	mov.l	@(TI_FLAGS,r8), r0	! current_thread_info->flags
102	tst	#_TIF_NEED_RESCHED, r0	! need_resched set?
103	bt	noresched
104
105	mov	#OFF_SR, r0
106	mov.l	@(r0,r15), r0		! get status register
107	shlr	r0
108	and	#(0xf0>>1), r0		! interrupts off (exception path)?
109	cmp/eq	#(0xf0>>1), r0
110	bt	noresched
111	mov.l	1f, r0
112	jsr	@r0			! call preempt_schedule_irq
113	 nop
114	bra	need_resched
115	 nop
116
117noresched:
118	bra	__restore_all
119	 nop
120
121	.align 2
1221:	.long	preempt_schedule_irq
123#endif
124
125ENTRY(resume_userspace)
126	! r8: current_thread_info
127	cli
128	TRACE_IRQS_OFF
129	mov.l	@(TI_FLAGS,r8), r0		! current_thread_info->flags
130	tst	#(_TIF_WORK_MASK & 0xff), r0
131	bt/s	__restore_all
132	 tst	#_TIF_NEED_RESCHED, r0
133
134	.align	2
135work_pending:
136	! r0: current_thread_info->flags
137	! r8: current_thread_info
138	! t:  result of "tst	#_TIF_NEED_RESCHED, r0"
139	bf/s	work_resched
140	 tst	#(_TIF_SIGPENDING | _TIF_NOTIFY_RESUME), r0
141work_notifysig:
142	bt/s	__restore_all
143	 mov	r15, r4
144	mov	r12, r5		! set arg1(save_r0)
145	mov	r0, r6
146	sti
147	mov.l	2f, r1
148	mov.l	3f, r0
149	jmp	@r1
150	 lds	r0, pr
151work_resched:
152	mov.l	1f, r1
153	jsr	@r1				! schedule
154	 nop
155	cli
156	TRACE_IRQS_OFF
157	!
158	mov.l	@(TI_FLAGS,r8), r0		! current_thread_info->flags
159	tst	#(_TIF_WORK_MASK & 0xff), r0
160	bt	__restore_all
161	bra	work_pending
162	 tst	#_TIF_NEED_RESCHED, r0
163
164	.align	2
1651:	.long	schedule
1662:	.long	do_notify_resume
1673:	.long	resume_userspace
168
169	.align	2
170syscall_exit_work:
171	! r0: current_thread_info->flags
172	! r8: current_thread_info
173	tst	#(_TIF_WORK_SYSCALL_MASK & 0xff), r0
174	bt/s	work_pending
175	 tst	#_TIF_NEED_RESCHED, r0
176	TRACE_IRQS_ON
177	sti
178	mov	r15, r4
179	mov.l	8f, r0			! do_syscall_trace_leave
180	jsr	@r0
181	 nop
182	bra	resume_userspace
183	 nop
184
185	.align	2
186syscall_trace_entry:
187	!                     	Yes it is traced.
188	mov     r15, r4
189	mov.l	7f, r11		! Call do_syscall_trace_enter which notifies
190	jsr	@r11	    	! superior (will chomp R[0-7])
191	 nop
192	mov.l	r0, @(OFF_R0,r15)	! Save return value
193	!			Reload R0-R4 from kernel stack, where the
194	!   	    	    	parent may have modified them using
195	!   	    	    	ptrace(POKEUSR).  (Note that R0-R2 are
196	!   	    	    	reloaded from the kernel stack by syscall_call
197	!   	    	    	below, so don't need to be reloaded here.)
198	!   	    	    	This allows the parent to rewrite system calls
199	!   	    	    	and args on the fly.
200	mov.l	@(OFF_R4,r15), r4   ! arg0
201	mov.l	@(OFF_R5,r15), r5
202	mov.l	@(OFF_R6,r15), r6
203	mov.l	@(OFF_R7,r15), r7   ! arg3
204	mov.l	@(OFF_R3,r15), r3   ! syscall_nr
205	!
206	mov.l	2f, r10			! Number of syscalls
207	cmp/hs	r10, r3
208	bf	syscall_call
209	mov	#-ENOSYS, r0
210	bra	syscall_exit
211	 mov.l	r0, @(OFF_R0,r15)	! Return value
212
213__restore_all:
214	mov	#OFF_SR, r0
215	mov.l	@(r0,r15), r0	! get status register
216
217	shlr2	r0
218	and	#0x3c, r0
219	cmp/eq	#0x3c, r0
220	bt	1f
221	TRACE_IRQS_ON
222	bra	2f
223	 nop
2241:
225	TRACE_IRQS_OFF
2262:
227	mov.l	3f, r0
228	jmp	@r0
229	 nop
230
231	.align	2
2323:	.long	restore_all
233
234	.align	2
235syscall_badsys:			! Bad syscall number
236	get_current_thread_info r8, r0
237	mov	#-ENOSYS, r0
238	bra	resume_userspace
239	 mov.l	r0, @(OFF_R0,r15)	! Return value
240
241/*
242 * The main debug trap handler.
243 *
244 * r8=TRA (not the trap number!)
245 *
246 * Note: This assumes that the trapa value is left in its original
247 * form (without the shlr2 shift) so the calculation for the jump
248 * call table offset remains a simple in place mask.
249 */
250debug_trap:
251	mov	r8, r0
252	and	#(0xf << 2), r0
253	mov.l	1f, r8
254	add	r0, r8
255	mov.l	@r8, r8
256	jsr	@r8
257	 nop
258	bra	__restore_all
259	 nop
260	CFI_ENDPROC
261
262	.align	2
2631:	.long	debug_trap_table
264
265/*
266 * Syscall interface:
267 *
268 *	Syscall #: R3
269 *	Arguments #0 to #3: R4--R7
270 *	Arguments #4 to #6: R0, R1, R2
271 *	TRA: (number of arguments + ABI revision) x 4
272 *
273 * This code also handles delegating other traps to the BIOS/gdb stub
274 * according to:
275 *
276 * Trap number
277 * (TRA>>2)	Purpose
278 * --------	-------
279 * 0x00-0x0f	original SH-3/4 syscall ABI (not in general use).
280 * 0x10-0x1f	general SH-3/4 syscall ABI.
281 * 0x20-0x2f	syscall ABI for SH-2 parts.
282 * 0x30-0x3f	debug traps used by the kernel.
283 * 0x40-0xff	Not supported by all parts, so left unhandled.
284 *
285 * Note: When we're first called, the TRA value must be shifted
286 * right 2 bits in order to get the value that was used as the "trapa"
287 * argument.
288 */
289
290	.align	2
291	.globl	ret_from_fork
292ret_from_fork:
293	mov.l	1f, r8
294	jsr	@r8
295	 mov	r0, r4
296	bra	syscall_exit
297	 nop
298
299	.align	2
300	.globl	ret_from_kernel_thread
301ret_from_kernel_thread:
302	mov.l	1f, r8
303	jsr	@r8
304	 mov	r0, r4
305	mov.l	@(OFF_R5,r15), r5   ! fn
306	jsr	@r5
307	 mov.l	@(OFF_R4,r15), r4   ! arg
308	bra	syscall_exit
309	 nop
310
311	.align	2
3121:	.long	schedule_tail
313
314/*
315 * The poorly named main trapa decode and dispatch routine, for
316 * system calls and debug traps through their respective jump tables.
317 */
318ENTRY(system_call)
319	setup_frame_reg
320#if !defined(CONFIG_CPU_SH2)
321	mov.l	1f, r9
322	mov.l	@r9, r8		! Read from TRA (Trap Address) Register
323#endif
324
325	mov	#OFF_TRA, r10
326	add	r15, r10
327	mov.l	r8, @r10		! set TRA value to tra
328
329	/*
330	 * Check the trap type
331	 */
332	mov	#((0x20 << 2) - 1), r9
333	cmp/hi	r9, r8
334	bt/s	debug_trap		! it's a debug trap..
335	 nop
336
337	TRACE_IRQS_ON
338	sti
339
340	!
341	get_current_thread_info r8, r10
342	mov.l	@(TI_FLAGS,r8), r8
343	mov	#(_TIF_WORK_SYSCALL_MASK & 0xff), r10
344	mov	#(_TIF_WORK_SYSCALL_MASK >> 8), r9
345	tst	r10, r8
346	shll8	r9
347	bf	syscall_trace_entry
348	tst	r9, r8
349	bf	syscall_trace_entry
350	!
351	mov.l	2f, r8			! Number of syscalls
352	cmp/hs	r8, r3
353	bt	syscall_badsys
354	!
355syscall_call:
356	shll2	r3		! x4
357	mov.l	3f, r8		! Load the address of sys_call_table
358	add	r8, r3
359	mov.l	@r3, r8
360	mov.l	@(OFF_R2,r15), r2
361	mov.l	@(OFF_R1,r15), r1
362	mov.l	@(OFF_R0,r15), r0
363	mov.l	r2, @-r15
364	mov.l	r1, @-r15
365	mov.l	r0, @-r15
366	jsr	@r8	    	! jump to specific syscall handler
367	 nop
368	add	#12, r15
369	mov.l	@(OFF_R0,r15), r12		! save r0
370	mov.l	r0, @(OFF_R0,r15)		! save the return value
371	!
372syscall_exit:
373	cli
374	TRACE_IRQS_OFF
375	!
376	get_current_thread_info r8, r0
377	mov.l	@(TI_FLAGS,r8), r0		! current_thread_info->flags
378	tst	#(_TIF_ALLWORK_MASK & 0xff), r0
379	mov	#(_TIF_ALLWORK_MASK >> 8), r1
380	bf	syscall_exit_work
381	shlr8	r0
382	tst	r0, r1
383	bf	syscall_exit_work
384	bra	__restore_all
385	 nop
386	.align	2
387#if !defined(CONFIG_CPU_SH2)
3881:	.long	TRA
389#endif
3902:	.long	NR_syscalls
3913:	.long	sys_call_table
3927:	.long	do_syscall_trace_enter
3938:	.long	do_syscall_trace_leave
394