xref: /openbmc/linux/arch/m68k/kernel/entry.S (revision 71de0a05)
1/* -*- mode: asm -*-
2 *
3 *  linux/arch/m68k/kernel/entry.S
4 *
5 *  Copyright (C) 1991, 1992  Linus Torvalds
6 *
7 * This file is subject to the terms and conditions of the GNU General Public
8 * License.  See the file README.legal in the main directory of this archive
9 * for more details.
10 *
11 * Linux/m68k support by Hamish Macdonald
12 *
13 * 68060 fixes by Jesper Skov
14 *
15 */
16
17/*
18 * entry.S  contains the system-call and fault low-level handling routines.
19 * This also contains the timer-interrupt handler, as well as all interrupts
20 * and faults that can result in a task-switch.
21 *
22 * NOTE: This code handles signal-recognition, which happens every time
23 * after a timer-interrupt and after each system call.
24 *
25 */
26
27/*
28 * 12/03/96 Jes: Currently we only support m68k single-cpu systems, so
29 *               all pointers that used to be 'current' are now entry
30 *               number 0 in the 'current_set' list.
31 *
32 *  6/05/00 RZ:	 addedd writeback completion after return from sighandler
33 *		 for 68040
34 */
35
36#include <linux/linkage.h>
37#include <asm/errno.h>
38#include <asm/setup.h>
39#include <asm/traps.h>
40#include <asm/unistd.h>
41#include <asm/asm-offsets.h>
42#include <asm/entry.h>
43
44.globl system_call, buserr, trap, resume
45.globl sys_call_table
46.globl __sys_fork, __sys_clone, __sys_vfork
47.globl bad_interrupt
48.globl auto_irqhandler_fixup
49.globl user_irqvec_fixup
50
51.text
52ENTRY(__sys_fork)
53	SAVE_SWITCH_STACK
54	jbsr	sys_fork
55	lea     %sp@(24),%sp
56	rts
57
58ENTRY(__sys_clone)
59	SAVE_SWITCH_STACK
60	pea	%sp@(SWITCH_STACK_SIZE)
61	jbsr	m68k_clone
62	lea     %sp@(28),%sp
63	rts
64
65ENTRY(__sys_vfork)
66	SAVE_SWITCH_STACK
67	jbsr	sys_vfork
68	lea     %sp@(24),%sp
69	rts
70
71ENTRY(__sys_clone3)
72	SAVE_SWITCH_STACK
73	pea	%sp@(SWITCH_STACK_SIZE)
74	jbsr	m68k_clone3
75	lea	%sp@(28),%sp
76	rts
77
78ENTRY(sys_sigreturn)
79	SAVE_SWITCH_STACK
80	movel	%sp,%a1			  	| switch_stack pointer
81	lea	%sp@(SWITCH_STACK_SIZE),%a0	| pt_regs pointer
82	lea     %sp@(-84),%sp			| leave a gap
83	movel	%a1,%sp@-
84	movel	%a0,%sp@-
85	jbsr	do_sigreturn
86	jra	1f				| shared with rt_sigreturn()
87
88ENTRY(sys_rt_sigreturn)
89	SAVE_SWITCH_STACK
90	movel	%sp,%a1			  	| switch_stack pointer
91	lea	%sp@(SWITCH_STACK_SIZE),%a0	| pt_regs pointer
92	lea     %sp@(-84),%sp			| leave a gap
93	movel	%a1,%sp@-
94	movel	%a0,%sp@-
95	| stack contents:
96	|   [original pt_regs address] [original switch_stack address]
97	|   [gap] [switch_stack] [pt_regs] [exception frame]
98	jbsr	do_rt_sigreturn
99
1001:
101	| stack contents now:
102	|   [original pt_regs address] [original switch_stack address]
103	|   [unused part of the gap] [moved switch_stack] [moved pt_regs]
104	|   [replacement exception frame]
105	| return value of do_{rt_,}sigreturn() points to moved switch_stack.
106
107	movel	%d0,%sp				| discard the leftover junk
108	RESTORE_SWITCH_STACK
109	| stack contents now is just [syscall return address] [pt_regs] [frame]
110	| return pt_regs.d0
111	movel	%sp@(PT_OFF_D0+4),%d0
112	rts
113
114ENTRY(buserr)
115	SAVE_ALL_INT
116	GET_CURRENT(%d0)
117	movel	%sp,%sp@-		| stack frame pointer argument
118	jbsr	buserr_c
119	addql	#4,%sp
120	jra	ret_from_exception
121
122ENTRY(trap)
123	SAVE_ALL_INT
124	GET_CURRENT(%d0)
125	movel	%sp,%sp@-		| stack frame pointer argument
126	jbsr	trap_c
127	addql	#4,%sp
128	jra	ret_from_exception
129
130	| After a fork we jump here directly from resume,
131	| so that %d1 contains the previous task
132	| schedule_tail now used regardless of CONFIG_SMP
133ENTRY(ret_from_fork)
134	movel	%d1,%sp@-
135	jsr	schedule_tail
136	addql	#4,%sp
137	jra	ret_from_exception
138
139ENTRY(ret_from_kernel_thread)
140	| a3 contains the kernel thread payload, d7 - its argument
141	movel	%d1,%sp@-
142	jsr	schedule_tail
143	movel	%d7,(%sp)
144	jsr	%a3@
145	addql	#4,%sp
146	jra	ret_from_exception
147
148#if defined(CONFIG_COLDFIRE) || !defined(CONFIG_MMU)
149
150#ifdef TRAP_DBG_INTERRUPT
151
152.globl dbginterrupt
153ENTRY(dbginterrupt)
154	SAVE_ALL_INT
155	GET_CURRENT(%d0)
156	movel	%sp,%sp@- 		/* stack frame pointer argument */
157	jsr	dbginterrupt_c
158	addql	#4,%sp
159	jra	ret_from_exception
160#endif
161
162ENTRY(reschedule)
163	/* save top of frame */
164	pea	%sp@
165	jbsr	set_esp0
166	addql	#4,%sp
167	pea	ret_from_exception
168	jmp	schedule
169
170ENTRY(ret_from_user_signal)
171	moveq #__NR_sigreturn,%d0
172	trap #0
173
174ENTRY(ret_from_user_rt_signal)
175	movel #__NR_rt_sigreturn,%d0
176	trap #0
177
178#else
179
180do_trace_entry:
181	movel	#-ENOSYS,%sp@(PT_OFF_D0)| needed for strace
182	subql	#4,%sp
183	SAVE_SWITCH_STACK
184	jbsr	syscall_trace_enter
185	RESTORE_SWITCH_STACK
186	addql	#4,%sp
187	addql	#1,%d0			| optimization for cmpil #-1,%d0
188	jeq	ret_from_syscall
189	movel	%sp@(PT_OFF_ORIG_D0),%d0
190	cmpl	#NR_syscalls,%d0
191	jcs	syscall
192	jra	ret_from_syscall
193badsys:
194	movel	#-ENOSYS,%sp@(PT_OFF_D0)
195	jra	ret_from_syscall
196
197do_trace_exit:
198	subql	#4,%sp
199	SAVE_SWITCH_STACK
200	jbsr	syscall_trace_leave
201	RESTORE_SWITCH_STACK
202	addql	#4,%sp
203	jra	.Lret_from_exception
204
205ENTRY(system_call)
206	SAVE_ALL_SYS
207
208	GET_CURRENT(%d1)
209	movel	%d1,%a1
210
211	| save top of frame
212	movel	%sp,%curptr@(TASK_THREAD+THREAD_ESP0)
213
214	| syscall trace?
215	tstb	%a1@(TINFO_FLAGS+2)
216	jmi	do_trace_entry
217	| seccomp filter active?
218	btst	#5,%a1@(TINFO_FLAGS+2)
219	bnes	do_trace_entry
220	cmpl	#NR_syscalls,%d0
221	jcc	badsys
222syscall:
223	jbsr	@(sys_call_table,%d0:l:4)@(0)
224	movel	%d0,%sp@(PT_OFF_D0)	| save the return value
225ret_from_syscall:
226	|oriw	#0x0700,%sr
227	movel	%curptr@(TASK_STACK),%a1
228	movew	%a1@(TINFO_FLAGS+2),%d0
229	jne	syscall_exit_work
2301:	RESTORE_ALL
231
232syscall_exit_work:
233	btst	#5,%sp@(PT_OFF_SR)	| check if returning to kernel
234	bnes	1b			| if so, skip resched, signals
235	lslw	#1,%d0
236	jcs	do_trace_exit
237	jmi	do_delayed_trace
238	lslw	#8,%d0
239	jne	do_signal_return
240	pea	resume_userspace
241	jra	schedule
242
243
244ENTRY(ret_from_exception)
245.Lret_from_exception:
246	btst	#5,%sp@(PT_OFF_SR)	| check if returning to kernel
247	bnes	1f			| if so, skip resched, signals
248	| only allow interrupts when we are really the last one on the
249	| kernel stack, otherwise stack overflow can occur during
250	| heavy interrupt load
251	andw	#ALLOWINT,%sr
252
253resume_userspace:
254	movel	%curptr@(TASK_STACK),%a1
255	moveb	%a1@(TINFO_FLAGS+3),%d0
256	jne	exit_work
2571:	RESTORE_ALL
258
259exit_work:
260	| save top of frame
261	movel	%sp,%curptr@(TASK_THREAD+THREAD_ESP0)
262	lslb	#1,%d0
263	jne	do_signal_return
264	pea	resume_userspace
265	jra	schedule
266
267
268do_signal_return:
269	|andw	#ALLOWINT,%sr
270	subql	#4,%sp			| dummy return address
271	SAVE_SWITCH_STACK
272	pea	%sp@(SWITCH_STACK_SIZE)
273	bsrl	do_notify_resume
274	addql	#4,%sp
275	RESTORE_SWITCH_STACK
276	addql	#4,%sp
277	jbra	resume_userspace
278
279do_delayed_trace:
280	bclr	#7,%sp@(PT_OFF_SR)	| clear trace bit in SR
281	pea	1			| send SIGTRAP
282	movel	%curptr,%sp@-
283	pea	LSIGTRAP
284	jbsr	send_sig
285	addql	#8,%sp
286	addql	#4,%sp
287	jbra	resume_userspace
288
289
290/* This is the main interrupt handler for autovector interrupts */
291
292ENTRY(auto_inthandler)
293	SAVE_ALL_INT
294	GET_CURRENT(%d0)
295					|  put exception # in d0
296	bfextu	%sp@(PT_OFF_FORMATVEC){#4,#10},%d0
297	subw	#VEC_SPUR,%d0
298
299	movel	%sp,%sp@-
300	movel	%d0,%sp@-		|  put vector # on stack
301auto_irqhandler_fixup = . + 2
302	jsr	do_IRQ			|  process the IRQ
303	addql	#8,%sp			|  pop parameters off stack
304	jra	ret_from_exception
305
306/* Handler for user defined interrupt vectors */
307
308ENTRY(user_inthandler)
309	SAVE_ALL_INT
310	GET_CURRENT(%d0)
311					|  put exception # in d0
312	bfextu	%sp@(PT_OFF_FORMATVEC){#4,#10},%d0
313user_irqvec_fixup = . + 2
314	subw	#VEC_USER,%d0
315
316	movel	%sp,%sp@-
317	movel	%d0,%sp@-		|  put vector # on stack
318	jsr	do_IRQ			|  process the IRQ
319	addql	#8,%sp			|  pop parameters off stack
320	jra	ret_from_exception
321
322/* Handler for uninitialized and spurious interrupts */
323
324ENTRY(bad_inthandler)
325	SAVE_ALL_INT
326	GET_CURRENT(%d0)
327
328	movel	%sp,%sp@-
329	jsr	handle_badint
330	addql	#4,%sp
331	jra	ret_from_exception
332
333resume:
334	/*
335	 * Beware - when entering resume, prev (the current task) is
336	 * in a0, next (the new task) is in a1,so don't change these
337	 * registers until their contents are no longer needed.
338	 */
339
340	/* save sr */
341	movew	%sr,%a0@(TASK_THREAD+THREAD_SR)
342
343	/* save fs (sfc,%dfc) (may be pointing to kernel memory) */
344	movec	%sfc,%d0
345	movew	%d0,%a0@(TASK_THREAD+THREAD_FC)
346
347	/* save usp */
348	/* it is better to use a movel here instead of a movew 8*) */
349	movec	%usp,%d0
350	movel	%d0,%a0@(TASK_THREAD+THREAD_USP)
351
352	/* save non-scratch registers on stack */
353	SAVE_SWITCH_STACK
354
355	/* save current kernel stack pointer */
356	movel	%sp,%a0@(TASK_THREAD+THREAD_KSP)
357
358	/* save floating point context */
359#ifndef CONFIG_M68KFPU_EMU_ONLY
360#ifdef CONFIG_M68KFPU_EMU
361	tstl	m68k_fputype
362	jeq	3f
363#endif
364	fsave	%a0@(TASK_THREAD+THREAD_FPSTATE)
365
366#if defined(CONFIG_M68060)
367#if !defined(CPU_M68060_ONLY)
368	btst	#3,m68k_cputype+3
369	beqs	1f
370#endif
371	/* The 060 FPU keeps status in bits 15-8 of the first longword */
372	tstb	%a0@(TASK_THREAD+THREAD_FPSTATE+2)
373	jeq	3f
374#if !defined(CPU_M68060_ONLY)
375	jra	2f
376#endif
377#endif /* CONFIG_M68060 */
378#if !defined(CPU_M68060_ONLY)
3791:	tstb	%a0@(TASK_THREAD+THREAD_FPSTATE)
380	jeq	3f
381#endif
3822:	fmovemx	%fp0-%fp7,%a0@(TASK_THREAD+THREAD_FPREG)
383	fmoveml	%fpcr/%fpsr/%fpiar,%a0@(TASK_THREAD+THREAD_FPCNTL)
3843:
385#endif	/* CONFIG_M68KFPU_EMU_ONLY */
386	/* Return previous task in %d1 */
387	movel	%curptr,%d1
388
389	/* switch to new task (a1 contains new task) */
390	movel	%a1,%curptr
391
392	/* restore floating point context */
393#ifndef CONFIG_M68KFPU_EMU_ONLY
394#ifdef CONFIG_M68KFPU_EMU
395	tstl	m68k_fputype
396	jeq	4f
397#endif
398#if defined(CONFIG_M68060)
399#if !defined(CPU_M68060_ONLY)
400	btst	#3,m68k_cputype+3
401	beqs	1f
402#endif
403	/* The 060 FPU keeps status in bits 15-8 of the first longword */
404	tstb	%a1@(TASK_THREAD+THREAD_FPSTATE+2)
405	jeq	3f
406#if !defined(CPU_M68060_ONLY)
407	jra	2f
408#endif
409#endif /* CONFIG_M68060 */
410#if !defined(CPU_M68060_ONLY)
4111:	tstb	%a1@(TASK_THREAD+THREAD_FPSTATE)
412	jeq	3f
413#endif
4142:	fmovemx	%a1@(TASK_THREAD+THREAD_FPREG),%fp0-%fp7
415	fmoveml	%a1@(TASK_THREAD+THREAD_FPCNTL),%fpcr/%fpsr/%fpiar
4163:	frestore %a1@(TASK_THREAD+THREAD_FPSTATE)
4174:
418#endif	/* CONFIG_M68KFPU_EMU_ONLY */
419
420	/* restore the kernel stack pointer */
421	movel	%a1@(TASK_THREAD+THREAD_KSP),%sp
422
423	/* restore non-scratch registers */
424	RESTORE_SWITCH_STACK
425
426	/* restore user stack pointer */
427	movel	%a1@(TASK_THREAD+THREAD_USP),%a0
428	movel	%a0,%usp
429
430	/* restore fs (sfc,%dfc) */
431	movew	%a1@(TASK_THREAD+THREAD_FC),%a0
432	movec	%a0,%sfc
433	movec	%a0,%dfc
434
435	/* restore status register */
436	movew	%a1@(TASK_THREAD+THREAD_SR),%sr
437
438	rts
439
440#endif /* CONFIG_MMU && !CONFIG_COLDFIRE */
441