xref: /openbmc/linux/arch/m68k/kernel/entry.S (revision f4091322d7397c8eb85c071570cab0e82ee3e261)
1/* -*- mode: asm -*-
2 *
3 *  linux/arch/m68k/kernel/entry.S
4 *
5 *  Copyright (C) 1991, 1992  Linus Torvalds
6 *
7 * This file is subject to the terms and conditions of the GNU General Public
8 * License.  See the file README.legal in the main directory of this archive
9 * for more details.
10 *
11 * Linux/m68k support by Hamish Macdonald
12 *
13 * 68060 fixes by Jesper Skov
14 *
15 */
16
17/*
18 * entry.S  contains the system-call and fault low-level handling routines.
19 * This also contains the timer-interrupt handler, as well as all interrupts
20 * and faults that can result in a task-switch.
21 *
22 * NOTE: This code handles signal-recognition, which happens every time
23 * after a timer-interrupt and after each system call.
24 *
25 */
26
27/*
28 * 12/03/96 Jes: Currently we only support m68k single-cpu systems, so
29 *               all pointers that used to be 'current' are now entry
30 *               number 0 in the 'current_set' list.
31 *
32 *  6/05/00 RZ:	 addedd writeback completion after return from sighandler
33 *		 for 68040
34 */
35
36#include <linux/linkage.h>
37#include <asm/errno.h>
38#include <asm/setup.h>
39#include <asm/segment.h>
40#include <asm/traps.h>
41#include <asm/unistd.h>
42#include <asm/asm-offsets.h>
43#include <asm/entry.h>
44
45.globl system_call, buserr, trap, resume
46.globl sys_call_table
47.globl sys_fork, sys_clone, sys_vfork
48.globl ret_from_interrupt, bad_interrupt
49.globl auto_irqhandler_fixup
50.globl user_irqvec_fixup
51
52.text
53ENTRY(sys_fork)
54	SAVE_SWITCH_STACK
55	pea	%sp@(SWITCH_STACK_SIZE)
56	jbsr	m68k_fork
57	addql	#4,%sp
58	RESTORE_SWITCH_STACK
59	rts
60
61ENTRY(sys_clone)
62	SAVE_SWITCH_STACK
63	pea	%sp@(SWITCH_STACK_SIZE)
64	jbsr	m68k_clone
65	addql	#4,%sp
66	RESTORE_SWITCH_STACK
67	rts
68
69ENTRY(sys_vfork)
70	SAVE_SWITCH_STACK
71	pea	%sp@(SWITCH_STACK_SIZE)
72	jbsr	m68k_vfork
73	addql	#4,%sp
74	RESTORE_SWITCH_STACK
75	rts
76
77ENTRY(sys_sigreturn)
78	SAVE_SWITCH_STACK
79	jbsr	do_sigreturn
80	RESTORE_SWITCH_STACK
81	rts
82
83ENTRY(sys_rt_sigreturn)
84	SAVE_SWITCH_STACK
85	jbsr	do_rt_sigreturn
86	RESTORE_SWITCH_STACK
87	rts
88
89ENTRY(buserr)
90	SAVE_ALL_INT
91	GET_CURRENT(%d0)
92	movel	%sp,%sp@-		| stack frame pointer argument
93	jbsr	buserr_c
94	addql	#4,%sp
95	jra	ret_from_exception
96
97ENTRY(trap)
98	SAVE_ALL_INT
99	GET_CURRENT(%d0)
100	movel	%sp,%sp@-		| stack frame pointer argument
101	jbsr	trap_c
102	addql	#4,%sp
103	jra	ret_from_exception
104
105	| After a fork we jump here directly from resume,
106	| so that %d1 contains the previous task
107	| schedule_tail now used regardless of CONFIG_SMP
108ENTRY(ret_from_fork)
109	movel	%d1,%sp@-
110	jsr	schedule_tail
111	addql	#4,%sp
112	jra	ret_from_exception
113
114ENTRY(ret_from_kernel_thread)
115	| a3 contains the kernel thread payload, d7 - its argument
116	movel	%d1,%sp@-
117	jsr	schedule_tail
118	movel	%d7,(%sp)
119	jsr	%a3@
120	addql	#4,%sp
121	jra	ret_from_exception
122
123#if defined(CONFIG_COLDFIRE) || !defined(CONFIG_MMU)
124
125#ifdef TRAP_DBG_INTERRUPT
126
127.globl dbginterrupt
128ENTRY(dbginterrupt)
129	SAVE_ALL_INT
130	GET_CURRENT(%d0)
131	movel	%sp,%sp@- 		/* stack frame pointer argument */
132	jsr	dbginterrupt_c
133	addql	#4,%sp
134	jra	ret_from_exception
135#endif
136
137ENTRY(reschedule)
138	/* save top of frame */
139	pea	%sp@
140	jbsr	set_esp0
141	addql	#4,%sp
142	pea	ret_from_exception
143	jmp	schedule
144
145ENTRY(ret_from_user_signal)
146	moveq #__NR_sigreturn,%d0
147	trap #0
148
149ENTRY(ret_from_user_rt_signal)
150	movel #__NR_rt_sigreturn,%d0
151	trap #0
152
153#else
154
155do_trace_entry:
156	movel	#-ENOSYS,%sp@(PT_OFF_D0)| needed for strace
157	subql	#4,%sp
158	SAVE_SWITCH_STACK
159	jbsr	syscall_trace
160	RESTORE_SWITCH_STACK
161	addql	#4,%sp
162	movel	%sp@(PT_OFF_ORIG_D0),%d0
163	cmpl	#NR_syscalls,%d0
164	jcs	syscall
165badsys:
166	movel	#-ENOSYS,%sp@(PT_OFF_D0)
167	jra	ret_from_syscall
168
169do_trace_exit:
170	subql	#4,%sp
171	SAVE_SWITCH_STACK
172	jbsr	syscall_trace
173	RESTORE_SWITCH_STACK
174	addql	#4,%sp
175	jra	.Lret_from_exception
176
177ENTRY(ret_from_signal)
178	movel	%curptr@(TASK_STACK),%a1
179	tstb	%a1@(TINFO_FLAGS+2)
180	jge	1f
181	jbsr	syscall_trace
1821:	RESTORE_SWITCH_STACK
183	addql	#4,%sp
184/* on 68040 complete pending writebacks if any */
185#ifdef CONFIG_M68040
186	bfextu	%sp@(PT_OFF_FORMATVEC){#0,#4},%d0
187	subql	#7,%d0				| bus error frame ?
188	jbne	1f
189	movel	%sp,%sp@-
190	jbsr	berr_040cleanup
191	addql	#4,%sp
1921:
193#endif
194	jra	.Lret_from_exception
195
196ENTRY(system_call)
197	SAVE_ALL_SYS
198
199	GET_CURRENT(%d1)
200	movel	%d1,%a1
201
202	| save top of frame
203	movel	%sp,%curptr@(TASK_THREAD+THREAD_ESP0)
204
205	| syscall trace?
206	tstb	%a1@(TINFO_FLAGS+2)
207	jmi	do_trace_entry
208	cmpl	#NR_syscalls,%d0
209	jcc	badsys
210syscall:
211	jbsr	@(sys_call_table,%d0:l:4)@(0)
212	movel	%d0,%sp@(PT_OFF_D0)	| save the return value
213ret_from_syscall:
214	|oriw	#0x0700,%sr
215	movel	%curptr@(TASK_STACK),%a1
216	movew	%a1@(TINFO_FLAGS+2),%d0
217	jne	syscall_exit_work
2181:	RESTORE_ALL
219
220syscall_exit_work:
221	btst	#5,%sp@(PT_OFF_SR)	| check if returning to kernel
222	bnes	1b			| if so, skip resched, signals
223	lslw	#1,%d0
224	jcs	do_trace_exit
225	jmi	do_delayed_trace
226	lslw	#8,%d0
227	jne	do_signal_return
228	pea	resume_userspace
229	jra	schedule
230
231
232ENTRY(ret_from_exception)
233.Lret_from_exception:
234	btst	#5,%sp@(PT_OFF_SR)	| check if returning to kernel
235	bnes	1f			| if so, skip resched, signals
236	| only allow interrupts when we are really the last one on the
237	| kernel stack, otherwise stack overflow can occur during
238	| heavy interrupt load
239	andw	#ALLOWINT,%sr
240
241resume_userspace:
242	movel	%curptr@(TASK_STACK),%a1
243	moveb	%a1@(TINFO_FLAGS+3),%d0
244	jne	exit_work
2451:	RESTORE_ALL
246
247exit_work:
248	| save top of frame
249	movel	%sp,%curptr@(TASK_THREAD+THREAD_ESP0)
250	lslb	#1,%d0
251	jne	do_signal_return
252	pea	resume_userspace
253	jra	schedule
254
255
256do_signal_return:
257	|andw	#ALLOWINT,%sr
258	subql	#4,%sp			| dummy return address
259	SAVE_SWITCH_STACK
260	pea	%sp@(SWITCH_STACK_SIZE)
261	bsrl	do_notify_resume
262	addql	#4,%sp
263	RESTORE_SWITCH_STACK
264	addql	#4,%sp
265	jbra	resume_userspace
266
267do_delayed_trace:
268	bclr	#7,%sp@(PT_OFF_SR)	| clear trace bit in SR
269	pea	1			| send SIGTRAP
270	movel	%curptr,%sp@-
271	pea	LSIGTRAP
272	jbsr	send_sig
273	addql	#8,%sp
274	addql	#4,%sp
275	jbra	resume_userspace
276
277
278/* This is the main interrupt handler for autovector interrupts */
279
280ENTRY(auto_inthandler)
281	SAVE_ALL_INT
282	GET_CURRENT(%d0)
283	movel	%d0,%a1
284	addqb	#1,%a1@(TINFO_PREEMPT+1)
285					|  put exception # in d0
286	bfextu	%sp@(PT_OFF_FORMATVEC){#4,#10},%d0
287	subw	#VEC_SPUR,%d0
288
289	movel	%sp,%sp@-
290	movel	%d0,%sp@-		|  put vector # on stack
291auto_irqhandler_fixup = . + 2
292	jsr	do_IRQ			|  process the IRQ
293	addql	#8,%sp			|  pop parameters off stack
294
295ret_from_interrupt:
296	movel	%curptr@(TASK_STACK),%a1
297	subqb	#1,%a1@(TINFO_PREEMPT+1)
298	jeq	ret_from_last_interrupt
2992:	RESTORE_ALL
300
301	ALIGN
302ret_from_last_interrupt:
303	moveq	#(~ALLOWINT>>8)&0xff,%d0
304	andb	%sp@(PT_OFF_SR),%d0
305	jne	2b
306
307	/* check if we need to do software interrupts */
308	tstl	irq_stat+CPUSTAT_SOFTIRQ_PENDING
309	jeq	.Lret_from_exception
310	pea	ret_from_exception
311	jra	do_softirq
312
313/* Handler for user defined interrupt vectors */
314
315ENTRY(user_inthandler)
316	SAVE_ALL_INT
317	GET_CURRENT(%d0)
318	movel	%d0,%a1
319	addqb	#1,%a1@(TINFO_PREEMPT+1)
320					|  put exception # in d0
321	bfextu	%sp@(PT_OFF_FORMATVEC){#4,#10},%d0
322user_irqvec_fixup = . + 2
323	subw	#VEC_USER,%d0
324
325	movel	%sp,%sp@-
326	movel	%d0,%sp@-		|  put vector # on stack
327	jsr	do_IRQ			|  process the IRQ
328	addql	#8,%sp			|  pop parameters off stack
329
330	movel	%curptr@(TASK_STACK),%a1
331	subqb	#1,%a1@(TINFO_PREEMPT+1)
332	jeq	ret_from_last_interrupt
333	RESTORE_ALL
334
335/* Handler for uninitialized and spurious interrupts */
336
337ENTRY(bad_inthandler)
338	SAVE_ALL_INT
339	GET_CURRENT(%d0)
340	movel	%d0,%a1
341	addqb	#1,%a1@(TINFO_PREEMPT+1)
342
343	movel	%sp,%sp@-
344	jsr	handle_badint
345	addql	#4,%sp
346
347	movel	%curptr@(TASK_STACK),%a1
348	subqb	#1,%a1@(TINFO_PREEMPT+1)
349	jeq	ret_from_last_interrupt
350	RESTORE_ALL
351
352
353resume:
354	/*
355	 * Beware - when entering resume, prev (the current task) is
356	 * in a0, next (the new task) is in a1,so don't change these
357	 * registers until their contents are no longer needed.
358	 */
359
360	/* save sr */
361	movew	%sr,%a0@(TASK_THREAD+THREAD_SR)
362
363	/* save fs (sfc,%dfc) (may be pointing to kernel memory) */
364	movec	%sfc,%d0
365	movew	%d0,%a0@(TASK_THREAD+THREAD_FS)
366
367	/* save usp */
368	/* it is better to use a movel here instead of a movew 8*) */
369	movec	%usp,%d0
370	movel	%d0,%a0@(TASK_THREAD+THREAD_USP)
371
372	/* save non-scratch registers on stack */
373	SAVE_SWITCH_STACK
374
375	/* save current kernel stack pointer */
376	movel	%sp,%a0@(TASK_THREAD+THREAD_KSP)
377
378	/* save floating point context */
379#ifndef CONFIG_M68KFPU_EMU_ONLY
380#ifdef CONFIG_M68KFPU_EMU
381	tstl	m68k_fputype
382	jeq	3f
383#endif
384	fsave	%a0@(TASK_THREAD+THREAD_FPSTATE)
385
386#if defined(CONFIG_M68060)
387#if !defined(CPU_M68060_ONLY)
388	btst	#3,m68k_cputype+3
389	beqs	1f
390#endif
391	/* The 060 FPU keeps status in bits 15-8 of the first longword */
392	tstb	%a0@(TASK_THREAD+THREAD_FPSTATE+2)
393	jeq	3f
394#if !defined(CPU_M68060_ONLY)
395	jra	2f
396#endif
397#endif /* CONFIG_M68060 */
398#if !defined(CPU_M68060_ONLY)
3991:	tstb	%a0@(TASK_THREAD+THREAD_FPSTATE)
400	jeq	3f
401#endif
4022:	fmovemx	%fp0-%fp7,%a0@(TASK_THREAD+THREAD_FPREG)
403	fmoveml	%fpcr/%fpsr/%fpiar,%a0@(TASK_THREAD+THREAD_FPCNTL)
4043:
405#endif	/* CONFIG_M68KFPU_EMU_ONLY */
406	/* Return previous task in %d1 */
407	movel	%curptr,%d1
408
409	/* switch to new task (a1 contains new task) */
410	movel	%a1,%curptr
411
412	/* restore floating point context */
413#ifndef CONFIG_M68KFPU_EMU_ONLY
414#ifdef CONFIG_M68KFPU_EMU
415	tstl	m68k_fputype
416	jeq	4f
417#endif
418#if defined(CONFIG_M68060)
419#if !defined(CPU_M68060_ONLY)
420	btst	#3,m68k_cputype+3
421	beqs	1f
422#endif
423	/* The 060 FPU keeps status in bits 15-8 of the first longword */
424	tstb	%a1@(TASK_THREAD+THREAD_FPSTATE+2)
425	jeq	3f
426#if !defined(CPU_M68060_ONLY)
427	jra	2f
428#endif
429#endif /* CONFIG_M68060 */
430#if !defined(CPU_M68060_ONLY)
4311:	tstb	%a1@(TASK_THREAD+THREAD_FPSTATE)
432	jeq	3f
433#endif
4342:	fmovemx	%a1@(TASK_THREAD+THREAD_FPREG),%fp0-%fp7
435	fmoveml	%a1@(TASK_THREAD+THREAD_FPCNTL),%fpcr/%fpsr/%fpiar
4363:	frestore %a1@(TASK_THREAD+THREAD_FPSTATE)
4374:
438#endif	/* CONFIG_M68KFPU_EMU_ONLY */
439
440	/* restore the kernel stack pointer */
441	movel	%a1@(TASK_THREAD+THREAD_KSP),%sp
442
443	/* restore non-scratch registers */
444	RESTORE_SWITCH_STACK
445
446	/* restore user stack pointer */
447	movel	%a1@(TASK_THREAD+THREAD_USP),%a0
448	movel	%a0,%usp
449
450	/* restore fs (sfc,%dfc) */
451	movew	%a1@(TASK_THREAD+THREAD_FS),%a0
452	movec	%a0,%sfc
453	movec	%a0,%dfc
454
455	/* restore status register */
456	movew	%a1@(TASK_THREAD+THREAD_SR),%sr
457
458	rts
459
460#endif /* CONFIG_MMU && !CONFIG_COLDFIRE */
461