1/* -*- mode: asm -*- 2 * 3 * linux/arch/m68k/kernel/entry.S 4 * 5 * Copyright (C) 1991, 1992 Linus Torvalds 6 * 7 * This file is subject to the terms and conditions of the GNU General Public 8 * License. See the file README.legal in the main directory of this archive 9 * for more details. 10 * 11 * Linux/m68k support by Hamish Macdonald 12 * 13 * 68060 fixes by Jesper Skov 14 * 15 */ 16 17/* 18 * entry.S contains the system-call and fault low-level handling routines. 19 * This also contains the timer-interrupt handler, as well as all interrupts 20 * and faults that can result in a task-switch. 21 * 22 * NOTE: This code handles signal-recognition, which happens every time 23 * after a timer-interrupt and after each system call. 24 * 25 */ 26 27/* 28 * 12/03/96 Jes: Currently we only support m68k single-cpu systems, so 29 * all pointers that used to be 'current' are now entry 30 * number 0 in the 'current_set' list. 31 * 32 * 6/05/00 RZ: addedd writeback completion after return from sighandler 33 * for 68040 34 */ 35 36#include <linux/linkage.h> 37#include <asm/errno.h> 38#include <asm/setup.h> 39#include <asm/segment.h> 40#include <asm/traps.h> 41#include <asm/unistd.h> 42#include <asm/asm-offsets.h> 43#include <asm/entry.h> 44 45.globl system_call, buserr, trap, resume 46.globl sys_call_table 47.globl __sys_fork, __sys_clone, __sys_vfork 48.globl bad_interrupt 49.globl auto_irqhandler_fixup 50.globl user_irqvec_fixup 51 52.text 53ENTRY(__sys_fork) 54 SAVE_SWITCH_STACK 55 jbsr sys_fork 56 lea %sp@(24),%sp 57 rts 58 59ENTRY(__sys_clone) 60 SAVE_SWITCH_STACK 61 pea %sp@(SWITCH_STACK_SIZE) 62 jbsr m68k_clone 63 lea %sp@(28),%sp 64 rts 65 66ENTRY(__sys_vfork) 67 SAVE_SWITCH_STACK 68 jbsr sys_vfork 69 lea %sp@(24),%sp 70 rts 71 72ENTRY(sys_sigreturn) 73 SAVE_SWITCH_STACK 74 movel %sp,%sp@- | switch_stack pointer 75 pea %sp@(SWITCH_STACK_SIZE+4) | pt_regs pointer 76 jbsr do_sigreturn 77 addql #8,%sp 78 RESTORE_SWITCH_STACK 79 rts 80 81ENTRY(sys_rt_sigreturn) 82 SAVE_SWITCH_STACK 83 movel %sp,%sp@- | switch_stack pointer 84 pea %sp@(SWITCH_STACK_SIZE+4) | pt_regs pointer 85 jbsr do_rt_sigreturn 86 addql #8,%sp 87 RESTORE_SWITCH_STACK 88 rts 89 90ENTRY(buserr) 91 SAVE_ALL_INT 92 GET_CURRENT(%d0) 93 movel %sp,%sp@- | stack frame pointer argument 94 jbsr buserr_c 95 addql #4,%sp 96 jra ret_from_exception 97 98ENTRY(trap) 99 SAVE_ALL_INT 100 GET_CURRENT(%d0) 101 movel %sp,%sp@- | stack frame pointer argument 102 jbsr trap_c 103 addql #4,%sp 104 jra ret_from_exception 105 106 | After a fork we jump here directly from resume, 107 | so that %d1 contains the previous task 108 | schedule_tail now used regardless of CONFIG_SMP 109ENTRY(ret_from_fork) 110 movel %d1,%sp@- 111 jsr schedule_tail 112 addql #4,%sp 113 jra ret_from_exception 114 115ENTRY(ret_from_kernel_thread) 116 | a3 contains the kernel thread payload, d7 - its argument 117 movel %d1,%sp@- 118 jsr schedule_tail 119 movel %d7,(%sp) 120 jsr %a3@ 121 addql #4,%sp 122 jra ret_from_exception 123 124#if defined(CONFIG_COLDFIRE) || !defined(CONFIG_MMU) 125 126#ifdef TRAP_DBG_INTERRUPT 127 128.globl dbginterrupt 129ENTRY(dbginterrupt) 130 SAVE_ALL_INT 131 GET_CURRENT(%d0) 132 movel %sp,%sp@- /* stack frame pointer argument */ 133 jsr dbginterrupt_c 134 addql #4,%sp 135 jra ret_from_exception 136#endif 137 138ENTRY(reschedule) 139 /* save top of frame */ 140 pea %sp@ 141 jbsr set_esp0 142 addql #4,%sp 143 pea ret_from_exception 144 jmp schedule 145 146ENTRY(ret_from_user_signal) 147 moveq #__NR_sigreturn,%d0 148 trap #0 149 150ENTRY(ret_from_user_rt_signal) 151 movel #__NR_rt_sigreturn,%d0 152 trap #0 153 154#else 155 156do_trace_entry: 157 movel #-ENOSYS,%sp@(PT_OFF_D0)| needed for strace 158 subql #4,%sp 159 SAVE_SWITCH_STACK 160 jbsr syscall_trace 161 RESTORE_SWITCH_STACK 162 addql #4,%sp 163 movel %sp@(PT_OFF_ORIG_D0),%d0 164 cmpl #NR_syscalls,%d0 165 jcs syscall 166badsys: 167 movel #-ENOSYS,%sp@(PT_OFF_D0) 168 jra ret_from_syscall 169 170do_trace_exit: 171 subql #4,%sp 172 SAVE_SWITCH_STACK 173 jbsr syscall_trace 174 RESTORE_SWITCH_STACK 175 addql #4,%sp 176 jra .Lret_from_exception 177 178ENTRY(ret_from_signal) 179 movel %curptr@(TASK_STACK),%a1 180 tstb %a1@(TINFO_FLAGS+2) 181 jge 1f 182 jbsr syscall_trace 1831: RESTORE_SWITCH_STACK 184 addql #4,%sp 185/* on 68040 complete pending writebacks if any */ 186#ifdef CONFIG_M68040 187 bfextu %sp@(PT_OFF_FORMATVEC){#0,#4},%d0 188 subql #7,%d0 | bus error frame ? 189 jbne 1f 190 movel %sp,%sp@- 191 jbsr berr_040cleanup 192 addql #4,%sp 1931: 194#endif 195 jra .Lret_from_exception 196 197ENTRY(system_call) 198 SAVE_ALL_SYS 199 200 GET_CURRENT(%d1) 201 movel %d1,%a1 202 203 | save top of frame 204 movel %sp,%curptr@(TASK_THREAD+THREAD_ESP0) 205 206 | syscall trace? 207 tstb %a1@(TINFO_FLAGS+2) 208 jmi do_trace_entry 209 cmpl #NR_syscalls,%d0 210 jcc badsys 211syscall: 212 jbsr @(sys_call_table,%d0:l:4)@(0) 213 movel %d0,%sp@(PT_OFF_D0) | save the return value 214ret_from_syscall: 215 |oriw #0x0700,%sr 216 movel %curptr@(TASK_STACK),%a1 217 movew %a1@(TINFO_FLAGS+2),%d0 218 jne syscall_exit_work 2191: RESTORE_ALL 220 221syscall_exit_work: 222 btst #5,%sp@(PT_OFF_SR) | check if returning to kernel 223 bnes 1b | if so, skip resched, signals 224 lslw #1,%d0 225 jcs do_trace_exit 226 jmi do_delayed_trace 227 lslw #8,%d0 228 jne do_signal_return 229 pea resume_userspace 230 jra schedule 231 232 233ENTRY(ret_from_exception) 234.Lret_from_exception: 235 btst #5,%sp@(PT_OFF_SR) | check if returning to kernel 236 bnes 1f | if so, skip resched, signals 237 | only allow interrupts when we are really the last one on the 238 | kernel stack, otherwise stack overflow can occur during 239 | heavy interrupt load 240 andw #ALLOWINT,%sr 241 242resume_userspace: 243 movel %curptr@(TASK_STACK),%a1 244 moveb %a1@(TINFO_FLAGS+3),%d0 245 jne exit_work 2461: RESTORE_ALL 247 248exit_work: 249 | save top of frame 250 movel %sp,%curptr@(TASK_THREAD+THREAD_ESP0) 251 lslb #1,%d0 252 jne do_signal_return 253 pea resume_userspace 254 jra schedule 255 256 257do_signal_return: 258 |andw #ALLOWINT,%sr 259 subql #4,%sp | dummy return address 260 SAVE_SWITCH_STACK 261 pea %sp@(SWITCH_STACK_SIZE) 262 bsrl do_notify_resume 263 addql #4,%sp 264 RESTORE_SWITCH_STACK 265 addql #4,%sp 266 jbra resume_userspace 267 268do_delayed_trace: 269 bclr #7,%sp@(PT_OFF_SR) | clear trace bit in SR 270 pea 1 | send SIGTRAP 271 movel %curptr,%sp@- 272 pea LSIGTRAP 273 jbsr send_sig 274 addql #8,%sp 275 addql #4,%sp 276 jbra resume_userspace 277 278 279/* This is the main interrupt handler for autovector interrupts */ 280 281ENTRY(auto_inthandler) 282 SAVE_ALL_INT 283 GET_CURRENT(%d0) 284 | put exception # in d0 285 bfextu %sp@(PT_OFF_FORMATVEC){#4,#10},%d0 286 subw #VEC_SPUR,%d0 287 288 movel %sp,%sp@- 289 movel %d0,%sp@- | put vector # on stack 290auto_irqhandler_fixup = . + 2 291 jsr do_IRQ | process the IRQ 292 addql #8,%sp | pop parameters off stack 293 jra ret_from_exception 294 295/* Handler for user defined interrupt vectors */ 296 297ENTRY(user_inthandler) 298 SAVE_ALL_INT 299 GET_CURRENT(%d0) 300 | put exception # in d0 301 bfextu %sp@(PT_OFF_FORMATVEC){#4,#10},%d0 302user_irqvec_fixup = . + 2 303 subw #VEC_USER,%d0 304 305 movel %sp,%sp@- 306 movel %d0,%sp@- | put vector # on stack 307 jsr do_IRQ | process the IRQ 308 addql #8,%sp | pop parameters off stack 309 jra ret_from_exception 310 311/* Handler for uninitialized and spurious interrupts */ 312 313ENTRY(bad_inthandler) 314 SAVE_ALL_INT 315 GET_CURRENT(%d0) 316 317 movel %sp,%sp@- 318 jsr handle_badint 319 addql #4,%sp 320 jra ret_from_exception 321 322resume: 323 /* 324 * Beware - when entering resume, prev (the current task) is 325 * in a0, next (the new task) is in a1,so don't change these 326 * registers until their contents are no longer needed. 327 */ 328 329 /* save sr */ 330 movew %sr,%a0@(TASK_THREAD+THREAD_SR) 331 332 /* save fs (sfc,%dfc) (may be pointing to kernel memory) */ 333 movec %sfc,%d0 334 movew %d0,%a0@(TASK_THREAD+THREAD_FS) 335 336 /* save usp */ 337 /* it is better to use a movel here instead of a movew 8*) */ 338 movec %usp,%d0 339 movel %d0,%a0@(TASK_THREAD+THREAD_USP) 340 341 /* save non-scratch registers on stack */ 342 SAVE_SWITCH_STACK 343 344 /* save current kernel stack pointer */ 345 movel %sp,%a0@(TASK_THREAD+THREAD_KSP) 346 347 /* save floating point context */ 348#ifndef CONFIG_M68KFPU_EMU_ONLY 349#ifdef CONFIG_M68KFPU_EMU 350 tstl m68k_fputype 351 jeq 3f 352#endif 353 fsave %a0@(TASK_THREAD+THREAD_FPSTATE) 354 355#if defined(CONFIG_M68060) 356#if !defined(CPU_M68060_ONLY) 357 btst #3,m68k_cputype+3 358 beqs 1f 359#endif 360 /* The 060 FPU keeps status in bits 15-8 of the first longword */ 361 tstb %a0@(TASK_THREAD+THREAD_FPSTATE+2) 362 jeq 3f 363#if !defined(CPU_M68060_ONLY) 364 jra 2f 365#endif 366#endif /* CONFIG_M68060 */ 367#if !defined(CPU_M68060_ONLY) 3681: tstb %a0@(TASK_THREAD+THREAD_FPSTATE) 369 jeq 3f 370#endif 3712: fmovemx %fp0-%fp7,%a0@(TASK_THREAD+THREAD_FPREG) 372 fmoveml %fpcr/%fpsr/%fpiar,%a0@(TASK_THREAD+THREAD_FPCNTL) 3733: 374#endif /* CONFIG_M68KFPU_EMU_ONLY */ 375 /* Return previous task in %d1 */ 376 movel %curptr,%d1 377 378 /* switch to new task (a1 contains new task) */ 379 movel %a1,%curptr 380 381 /* restore floating point context */ 382#ifndef CONFIG_M68KFPU_EMU_ONLY 383#ifdef CONFIG_M68KFPU_EMU 384 tstl m68k_fputype 385 jeq 4f 386#endif 387#if defined(CONFIG_M68060) 388#if !defined(CPU_M68060_ONLY) 389 btst #3,m68k_cputype+3 390 beqs 1f 391#endif 392 /* The 060 FPU keeps status in bits 15-8 of the first longword */ 393 tstb %a1@(TASK_THREAD+THREAD_FPSTATE+2) 394 jeq 3f 395#if !defined(CPU_M68060_ONLY) 396 jra 2f 397#endif 398#endif /* CONFIG_M68060 */ 399#if !defined(CPU_M68060_ONLY) 4001: tstb %a1@(TASK_THREAD+THREAD_FPSTATE) 401 jeq 3f 402#endif 4032: fmovemx %a1@(TASK_THREAD+THREAD_FPREG),%fp0-%fp7 404 fmoveml %a1@(TASK_THREAD+THREAD_FPCNTL),%fpcr/%fpsr/%fpiar 4053: frestore %a1@(TASK_THREAD+THREAD_FPSTATE) 4064: 407#endif /* CONFIG_M68KFPU_EMU_ONLY */ 408 409 /* restore the kernel stack pointer */ 410 movel %a1@(TASK_THREAD+THREAD_KSP),%sp 411 412 /* restore non-scratch registers */ 413 RESTORE_SWITCH_STACK 414 415 /* restore user stack pointer */ 416 movel %a1@(TASK_THREAD+THREAD_USP),%a0 417 movel %a0,%usp 418 419 /* restore fs (sfc,%dfc) */ 420 movew %a1@(TASK_THREAD+THREAD_FS),%a0 421 movec %a0,%sfc 422 movec %a0,%dfc 423 424 /* restore status register */ 425 movew %a1@(TASK_THREAD+THREAD_SR),%sr 426 427 rts 428 429#endif /* CONFIG_MMU && !CONFIG_COLDFIRE */ 430