1 /* 2 * linux/arch/m68k/kernel/traps.c 3 * 4 * Copyright (C) 1993, 1994 by Hamish Macdonald 5 * 6 * 68040 fixes by Michael Rausch 7 * 68040 fixes by Martin Apel 8 * 68040 fixes and writeback by Richard Zidlicky 9 * 68060 fixes by Roman Hodek 10 * 68060 fixes by Jesper Skov 11 * 12 * This file is subject to the terms and conditions of the GNU General Public 13 * License. See the file COPYING in the main directory of this archive 14 * for more details. 15 */ 16 17 /* 18 * Sets up all exception vectors 19 */ 20 21 #include <linux/sched.h> 22 #include <linux/sched/debug.h> 23 #include <linux/signal.h> 24 #include <linux/kernel.h> 25 #include <linux/mm.h> 26 #include <linux/module.h> 27 #include <linux/user.h> 28 #include <linux/string.h> 29 #include <linux/linkage.h> 30 #include <linux/init.h> 31 #include <linux/ptrace.h> 32 #include <linux/kallsyms.h> 33 34 #include <asm/setup.h> 35 #include <asm/fpu.h> 36 #include <linux/uaccess.h> 37 #include <asm/traps.h> 38 #include <asm/machdep.h> 39 #include <asm/siginfo.h> 40 #include <asm/tlbflush.h> 41 42 static const char *vec_names[] = { 43 [VEC_RESETSP] = "RESET SP", 44 [VEC_RESETPC] = "RESET PC", 45 [VEC_BUSERR] = "BUS ERROR", 46 [VEC_ADDRERR] = "ADDRESS ERROR", 47 [VEC_ILLEGAL] = "ILLEGAL INSTRUCTION", 48 [VEC_ZERODIV] = "ZERO DIVIDE", 49 [VEC_CHK] = "CHK", 50 [VEC_TRAP] = "TRAPcc", 51 [VEC_PRIV] = "PRIVILEGE VIOLATION", 52 [VEC_TRACE] = "TRACE", 53 [VEC_LINE10] = "LINE 1010", 54 [VEC_LINE11] = "LINE 1111", 55 [VEC_RESV12] = "UNASSIGNED RESERVED 12", 56 [VEC_COPROC] = "COPROCESSOR PROTOCOL VIOLATION", 57 [VEC_FORMAT] = "FORMAT ERROR", 58 [VEC_UNINT] = "UNINITIALIZED INTERRUPT", 59 [VEC_RESV16] = "UNASSIGNED RESERVED 16", 60 [VEC_RESV17] = "UNASSIGNED RESERVED 17", 61 [VEC_RESV18] = "UNASSIGNED RESERVED 18", 62 [VEC_RESV19] = "UNASSIGNED RESERVED 19", 63 [VEC_RESV20] = "UNASSIGNED RESERVED 20", 64 [VEC_RESV21] = "UNASSIGNED RESERVED 21", 65 [VEC_RESV22] = "UNASSIGNED RESERVED 22", 66 [VEC_RESV23] = "UNASSIGNED RESERVED 23", 67 [VEC_SPUR] = "SPURIOUS INTERRUPT", 68 [VEC_INT1] = "LEVEL 1 INT", 69 [VEC_INT2] = "LEVEL 2 INT", 70 [VEC_INT3] = "LEVEL 3 INT", 71 [VEC_INT4] = "LEVEL 4 INT", 72 [VEC_INT5] = "LEVEL 5 INT", 73 [VEC_INT6] = "LEVEL 6 INT", 74 [VEC_INT7] = "LEVEL 7 INT", 75 [VEC_SYS] = "SYSCALL", 76 [VEC_TRAP1] = "TRAP #1", 77 [VEC_TRAP2] = "TRAP #2", 78 [VEC_TRAP3] = "TRAP #3", 79 [VEC_TRAP4] = "TRAP #4", 80 [VEC_TRAP5] = "TRAP #5", 81 [VEC_TRAP6] = "TRAP #6", 82 [VEC_TRAP7] = "TRAP #7", 83 [VEC_TRAP8] = "TRAP #8", 84 [VEC_TRAP9] = "TRAP #9", 85 [VEC_TRAP10] = "TRAP #10", 86 [VEC_TRAP11] = "TRAP #11", 87 [VEC_TRAP12] = "TRAP #12", 88 [VEC_TRAP13] = "TRAP #13", 89 [VEC_TRAP14] = "TRAP #14", 90 [VEC_TRAP15] = "TRAP #15", 91 [VEC_FPBRUC] = "FPCP BSUN", 92 [VEC_FPIR] = "FPCP INEXACT", 93 [VEC_FPDIVZ] = "FPCP DIV BY 0", 94 [VEC_FPUNDER] = "FPCP UNDERFLOW", 95 [VEC_FPOE] = "FPCP OPERAND ERROR", 96 [VEC_FPOVER] = "FPCP OVERFLOW", 97 [VEC_FPNAN] = "FPCP SNAN", 98 [VEC_FPUNSUP] = "FPCP UNSUPPORTED OPERATION", 99 [VEC_MMUCFG] = "MMU CONFIGURATION ERROR", 100 [VEC_MMUILL] = "MMU ILLEGAL OPERATION ERROR", 101 [VEC_MMUACC] = "MMU ACCESS LEVEL VIOLATION ERROR", 102 [VEC_RESV59] = "UNASSIGNED RESERVED 59", 103 [VEC_UNIMPEA] = "UNASSIGNED RESERVED 60", 104 [VEC_UNIMPII] = "UNASSIGNED RESERVED 61", 105 [VEC_RESV62] = "UNASSIGNED RESERVED 62", 106 [VEC_RESV63] = "UNASSIGNED RESERVED 63", 107 }; 108 109 static const char *space_names[] = { 110 [0] = "Space 0", 111 [USER_DATA] = "User Data", 112 [USER_PROGRAM] = "User Program", 113 #ifndef CONFIG_SUN3 114 [3] = "Space 3", 115 #else 116 [FC_CONTROL] = "Control", 117 #endif 118 [4] = "Space 4", 119 [SUPER_DATA] = "Super Data", 120 [SUPER_PROGRAM] = "Super Program", 121 [CPU_SPACE] = "CPU" 122 }; 123 124 void die_if_kernel(char *,struct pt_regs *,int); 125 asmlinkage int do_page_fault(struct pt_regs *regs, unsigned long address, 126 unsigned long error_code); 127 int send_fault_sig(struct pt_regs *regs); 128 129 asmlinkage void trap_c(struct frame *fp); 130 131 #if defined (CONFIG_M68060) 132 static inline void access_error060 (struct frame *fp) 133 { 134 unsigned long fslw = fp->un.fmt4.pc; /* is really FSLW for access error */ 135 136 pr_debug("fslw=%#lx, fa=%#lx\n", fslw, fp->un.fmt4.effaddr); 137 138 if (fslw & MMU060_BPE) { 139 /* branch prediction error -> clear branch cache */ 140 __asm__ __volatile__ ("movec %/cacr,%/d0\n\t" 141 "orl #0x00400000,%/d0\n\t" 142 "movec %/d0,%/cacr" 143 : : : "d0" ); 144 /* return if there's no other error */ 145 if (!(fslw & MMU060_ERR_BITS) && !(fslw & MMU060_SEE)) 146 return; 147 } 148 149 if (fslw & (MMU060_DESC_ERR | MMU060_WP | MMU060_SP)) { 150 unsigned long errorcode; 151 unsigned long addr = fp->un.fmt4.effaddr; 152 153 if (fslw & MMU060_MA) 154 addr = (addr + PAGE_SIZE - 1) & PAGE_MASK; 155 156 errorcode = 1; 157 if (fslw & MMU060_DESC_ERR) { 158 __flush_tlb040_one(addr); 159 errorcode = 0; 160 } 161 if (fslw & MMU060_W) 162 errorcode |= 2; 163 pr_debug("errorcode = %ld\n", errorcode); 164 do_page_fault(&fp->ptregs, addr, errorcode); 165 } else if (fslw & (MMU060_SEE)){ 166 /* Software Emulation Error. 167 * fault during mem_read/mem_write in ifpsp060/os.S 168 */ 169 send_fault_sig(&fp->ptregs); 170 } else if (!(fslw & (MMU060_RE|MMU060_WE)) || 171 send_fault_sig(&fp->ptregs) > 0) { 172 pr_err("pc=%#lx, fa=%#lx\n", fp->ptregs.pc, 173 fp->un.fmt4.effaddr); 174 pr_err("68060 access error, fslw=%lx\n", fslw); 175 trap_c( fp ); 176 } 177 } 178 #endif /* CONFIG_M68060 */ 179 180 #if defined (CONFIG_M68040) 181 static inline unsigned long probe040(int iswrite, unsigned long addr, int wbs) 182 { 183 unsigned long mmusr; 184 185 set_fc(wbs); 186 187 if (iswrite) 188 asm volatile (".chip 68040; ptestw (%0); .chip 68k" : : "a" (addr)); 189 else 190 asm volatile (".chip 68040; ptestr (%0); .chip 68k" : : "a" (addr)); 191 192 asm volatile (".chip 68040; movec %%mmusr,%0; .chip 68k" : "=r" (mmusr)); 193 194 set_fc(USER_DATA); 195 196 return mmusr; 197 } 198 199 static inline int do_040writeback1(unsigned short wbs, unsigned long wba, 200 unsigned long wbd) 201 { 202 int res = 0; 203 204 set_fc(wbs); 205 206 switch (wbs & WBSIZ_040) { 207 case BA_SIZE_BYTE: 208 res = put_user(wbd & 0xff, (char __user *)wba); 209 break; 210 case BA_SIZE_WORD: 211 res = put_user(wbd & 0xffff, (short __user *)wba); 212 break; 213 case BA_SIZE_LONG: 214 res = put_user(wbd, (int __user *)wba); 215 break; 216 } 217 218 set_fc(USER_DATA); 219 220 pr_debug("do_040writeback1, res=%d\n", res); 221 222 return res; 223 } 224 225 /* after an exception in a writeback the stack frame corresponding 226 * to that exception is discarded, set a few bits in the old frame 227 * to simulate what it should look like 228 */ 229 static inline void fix_xframe040(struct frame *fp, unsigned long wba, unsigned short wbs) 230 { 231 fp->un.fmt7.faddr = wba; 232 fp->un.fmt7.ssw = wbs & 0xff; 233 if (wba != current->thread.faddr) 234 fp->un.fmt7.ssw |= MA_040; 235 } 236 237 static inline void do_040writebacks(struct frame *fp) 238 { 239 int res = 0; 240 #if 0 241 if (fp->un.fmt7.wb1s & WBV_040) 242 pr_err("access_error040: cannot handle 1st writeback. oops.\n"); 243 #endif 244 245 if ((fp->un.fmt7.wb2s & WBV_040) && 246 !(fp->un.fmt7.wb2s & WBTT_040)) { 247 res = do_040writeback1(fp->un.fmt7.wb2s, fp->un.fmt7.wb2a, 248 fp->un.fmt7.wb2d); 249 if (res) 250 fix_xframe040(fp, fp->un.fmt7.wb2a, fp->un.fmt7.wb2s); 251 else 252 fp->un.fmt7.wb2s = 0; 253 } 254 255 /* do the 2nd wb only if the first one was successful (except for a kernel wb) */ 256 if (fp->un.fmt7.wb3s & WBV_040 && (!res || fp->un.fmt7.wb3s & 4)) { 257 res = do_040writeback1(fp->un.fmt7.wb3s, fp->un.fmt7.wb3a, 258 fp->un.fmt7.wb3d); 259 if (res) 260 { 261 fix_xframe040(fp, fp->un.fmt7.wb3a, fp->un.fmt7.wb3s); 262 263 fp->un.fmt7.wb2s = fp->un.fmt7.wb3s; 264 fp->un.fmt7.wb3s &= (~WBV_040); 265 fp->un.fmt7.wb2a = fp->un.fmt7.wb3a; 266 fp->un.fmt7.wb2d = fp->un.fmt7.wb3d; 267 } 268 else 269 fp->un.fmt7.wb3s = 0; 270 } 271 272 if (res) 273 send_fault_sig(&fp->ptregs); 274 } 275 276 /* 277 * called from sigreturn(), must ensure userspace code didn't 278 * manipulate exception frame to circumvent protection, then complete 279 * pending writebacks 280 * we just clear TM2 to turn it into a userspace access 281 */ 282 asmlinkage void berr_040cleanup(struct frame *fp) 283 { 284 fp->un.fmt7.wb2s &= ~4; 285 fp->un.fmt7.wb3s &= ~4; 286 287 do_040writebacks(fp); 288 } 289 290 static inline void access_error040(struct frame *fp) 291 { 292 unsigned short ssw = fp->un.fmt7.ssw; 293 unsigned long mmusr; 294 295 pr_debug("ssw=%#x, fa=%#lx\n", ssw, fp->un.fmt7.faddr); 296 pr_debug("wb1s=%#x, wb2s=%#x, wb3s=%#x\n", fp->un.fmt7.wb1s, 297 fp->un.fmt7.wb2s, fp->un.fmt7.wb3s); 298 pr_debug("wb2a=%lx, wb3a=%lx, wb2d=%lx, wb3d=%lx\n", 299 fp->un.fmt7.wb2a, fp->un.fmt7.wb3a, 300 fp->un.fmt7.wb2d, fp->un.fmt7.wb3d); 301 302 if (ssw & ATC_040) { 303 unsigned long addr = fp->un.fmt7.faddr; 304 unsigned long errorcode; 305 306 /* 307 * The MMU status has to be determined AFTER the address 308 * has been corrected if there was a misaligned access (MA). 309 */ 310 if (ssw & MA_040) 311 addr = (addr + 7) & -8; 312 313 /* MMU error, get the MMUSR info for this access */ 314 mmusr = probe040(!(ssw & RW_040), addr, ssw); 315 pr_debug("mmusr = %lx\n", mmusr); 316 errorcode = 1; 317 if (!(mmusr & MMU_R_040)) { 318 /* clear the invalid atc entry */ 319 __flush_tlb040_one(addr); 320 errorcode = 0; 321 } 322 323 /* despite what documentation seems to say, RMW 324 * accesses have always both the LK and RW bits set */ 325 if (!(ssw & RW_040) || (ssw & LK_040)) 326 errorcode |= 2; 327 328 if (do_page_fault(&fp->ptregs, addr, errorcode)) { 329 pr_debug("do_page_fault() !=0\n"); 330 if (user_mode(&fp->ptregs)){ 331 /* delay writebacks after signal delivery */ 332 pr_debug(".. was usermode - return\n"); 333 return; 334 } 335 /* disable writeback into user space from kernel 336 * (if do_page_fault didn't fix the mapping, 337 * the writeback won't do good) 338 */ 339 disable_wb: 340 pr_debug(".. disabling wb2\n"); 341 if (fp->un.fmt7.wb2a == fp->un.fmt7.faddr) 342 fp->un.fmt7.wb2s &= ~WBV_040; 343 if (fp->un.fmt7.wb3a == fp->un.fmt7.faddr) 344 fp->un.fmt7.wb3s &= ~WBV_040; 345 } 346 } else { 347 /* In case of a bus error we either kill the process or expect 348 * the kernel to catch the fault, which then is also responsible 349 * for cleaning up the mess. 350 */ 351 current->thread.signo = SIGBUS; 352 current->thread.faddr = fp->un.fmt7.faddr; 353 if (send_fault_sig(&fp->ptregs) >= 0) 354 pr_err("68040 bus error (ssw=%x, faddr=%lx)\n", ssw, 355 fp->un.fmt7.faddr); 356 goto disable_wb; 357 } 358 359 do_040writebacks(fp); 360 } 361 #endif /* CONFIG_M68040 */ 362 363 #if defined(CONFIG_SUN3) 364 #include <asm/sun3mmu.h> 365 366 extern int mmu_emu_handle_fault (unsigned long, int, int); 367 368 /* sun3 version of bus_error030 */ 369 370 static inline void bus_error030 (struct frame *fp) 371 { 372 unsigned char buserr_type = sun3_get_buserr (); 373 unsigned long addr, errorcode; 374 unsigned short ssw = fp->un.fmtb.ssw; 375 extern unsigned long _sun3_map_test_start, _sun3_map_test_end; 376 377 if (ssw & (FC | FB)) 378 pr_debug("Instruction fault at %#010lx\n", 379 ssw & FC ? 380 fp->ptregs.format == 0xa ? fp->ptregs.pc + 2 : fp->un.fmtb.baddr - 2 381 : 382 fp->ptregs.format == 0xa ? fp->ptregs.pc + 4 : fp->un.fmtb.baddr); 383 if (ssw & DF) 384 pr_debug("Data %s fault at %#010lx in %s (pc=%#lx)\n", 385 ssw & RW ? "read" : "write", 386 fp->un.fmtb.daddr, 387 space_names[ssw & DFC], fp->ptregs.pc); 388 389 /* 390 * Check if this page should be demand-mapped. This needs to go before 391 * the testing for a bad kernel-space access (demand-mapping applies 392 * to kernel accesses too). 393 */ 394 395 if ((ssw & DF) 396 && (buserr_type & (SUN3_BUSERR_PROTERR | SUN3_BUSERR_INVALID))) { 397 if (mmu_emu_handle_fault (fp->un.fmtb.daddr, ssw & RW, 0)) 398 return; 399 } 400 401 /* Check for kernel-space pagefault (BAD). */ 402 if (fp->ptregs.sr & PS_S) { 403 /* kernel fault must be a data fault to user space */ 404 if (! ((ssw & DF) && ((ssw & DFC) == USER_DATA))) { 405 // try checking the kernel mappings before surrender 406 if (mmu_emu_handle_fault (fp->un.fmtb.daddr, ssw & RW, 1)) 407 return; 408 /* instruction fault or kernel data fault! */ 409 if (ssw & (FC | FB)) 410 pr_err("Instruction fault at %#010lx\n", 411 fp->ptregs.pc); 412 if (ssw & DF) { 413 /* was this fault incurred testing bus mappings? */ 414 if((fp->ptregs.pc >= (unsigned long)&_sun3_map_test_start) && 415 (fp->ptregs.pc <= (unsigned long)&_sun3_map_test_end)) { 416 send_fault_sig(&fp->ptregs); 417 return; 418 } 419 420 pr_err("Data %s fault at %#010lx in %s (pc=%#lx)\n", 421 ssw & RW ? "read" : "write", 422 fp->un.fmtb.daddr, 423 space_names[ssw & DFC], fp->ptregs.pc); 424 } 425 pr_err("BAD KERNEL BUSERR\n"); 426 427 die_if_kernel("Oops", &fp->ptregs,0); 428 force_sig(SIGKILL); 429 return; 430 } 431 } else { 432 /* user fault */ 433 if (!(ssw & (FC | FB)) && !(ssw & DF)) 434 /* not an instruction fault or data fault! BAD */ 435 panic ("USER BUSERR w/o instruction or data fault"); 436 } 437 438 439 /* First handle the data fault, if any. */ 440 if (ssw & DF) { 441 addr = fp->un.fmtb.daddr; 442 443 // errorcode bit 0: 0 -> no page 1 -> protection fault 444 // errorcode bit 1: 0 -> read fault 1 -> write fault 445 446 // (buserr_type & SUN3_BUSERR_PROTERR) -> protection fault 447 // (buserr_type & SUN3_BUSERR_INVALID) -> invalid page fault 448 449 if (buserr_type & SUN3_BUSERR_PROTERR) 450 errorcode = 0x01; 451 else if (buserr_type & SUN3_BUSERR_INVALID) 452 errorcode = 0x00; 453 else { 454 pr_debug("*** unexpected busfault type=%#04x\n", 455 buserr_type); 456 pr_debug("invalid %s access at %#lx from pc %#lx\n", 457 !(ssw & RW) ? "write" : "read", addr, 458 fp->ptregs.pc); 459 die_if_kernel ("Oops", &fp->ptregs, buserr_type); 460 force_sig (SIGBUS); 461 return; 462 } 463 464 //todo: wtf is RM bit? --m 465 if (!(ssw & RW) || ssw & RM) 466 errorcode |= 0x02; 467 468 /* Handle page fault. */ 469 do_page_fault (&fp->ptregs, addr, errorcode); 470 471 /* Retry the data fault now. */ 472 return; 473 } 474 475 /* Now handle the instruction fault. */ 476 477 /* Get the fault address. */ 478 if (fp->ptregs.format == 0xA) 479 addr = fp->ptregs.pc + 4; 480 else 481 addr = fp->un.fmtb.baddr; 482 if (ssw & FC) 483 addr -= 2; 484 485 if (buserr_type & SUN3_BUSERR_INVALID) { 486 if (!mmu_emu_handle_fault(addr, 1, 0)) 487 do_page_fault (&fp->ptregs, addr, 0); 488 } else { 489 pr_debug("protection fault on insn access (segv).\n"); 490 force_sig (SIGSEGV); 491 } 492 } 493 #else 494 #if defined(CPU_M68020_OR_M68030) 495 static inline void bus_error030 (struct frame *fp) 496 { 497 volatile unsigned short temp; 498 unsigned short mmusr; 499 unsigned long addr, errorcode; 500 unsigned short ssw = fp->un.fmtb.ssw; 501 #ifdef DEBUG 502 unsigned long desc; 503 #endif 504 505 pr_debug("pid = %x ", current->pid); 506 pr_debug("SSW=%#06x ", ssw); 507 508 if (ssw & (FC | FB)) 509 pr_debug("Instruction fault at %#010lx\n", 510 ssw & FC ? 511 fp->ptregs.format == 0xa ? fp->ptregs.pc + 2 : fp->un.fmtb.baddr - 2 512 : 513 fp->ptregs.format == 0xa ? fp->ptregs.pc + 4 : fp->un.fmtb.baddr); 514 if (ssw & DF) 515 pr_debug("Data %s fault at %#010lx in %s (pc=%#lx)\n", 516 ssw & RW ? "read" : "write", 517 fp->un.fmtb.daddr, 518 space_names[ssw & DFC], fp->ptregs.pc); 519 520 /* ++andreas: If a data fault and an instruction fault happen 521 at the same time map in both pages. */ 522 523 /* First handle the data fault, if any. */ 524 if (ssw & DF) { 525 addr = fp->un.fmtb.daddr; 526 527 #ifdef DEBUG 528 asm volatile ("ptestr %3,%2@,#7,%0\n\t" 529 "pmove %%psr,%1" 530 : "=a&" (desc), "=m" (temp) 531 : "a" (addr), "d" (ssw)); 532 pr_debug("mmusr is %#x for addr %#lx in task %p\n", 533 temp, addr, current); 534 pr_debug("descriptor address is 0x%p, contents %#lx\n", 535 __va(desc), *(unsigned long *)__va(desc)); 536 #else 537 asm volatile ("ptestr %2,%1@,#7\n\t" 538 "pmove %%psr,%0" 539 : "=m" (temp) : "a" (addr), "d" (ssw)); 540 #endif 541 mmusr = temp; 542 errorcode = (mmusr & MMU_I) ? 0 : 1; 543 if (!(ssw & RW) || (ssw & RM)) 544 errorcode |= 2; 545 546 if (mmusr & (MMU_I | MMU_WP)) { 547 if (ssw & 4) { 548 pr_err("Data %s fault at %#010lx in %s (pc=%#lx)\n", 549 ssw & RW ? "read" : "write", 550 fp->un.fmtb.daddr, 551 space_names[ssw & DFC], fp->ptregs.pc); 552 goto buserr; 553 } 554 /* Don't try to do anything further if an exception was 555 handled. */ 556 if (do_page_fault (&fp->ptregs, addr, errorcode) < 0) 557 return; 558 } else if (!(mmusr & MMU_I)) { 559 /* probably a 020 cas fault */ 560 if (!(ssw & RM) && send_fault_sig(&fp->ptregs) > 0) 561 pr_err("unexpected bus error (%#x,%#x)\n", ssw, 562 mmusr); 563 } else if (mmusr & (MMU_B|MMU_L|MMU_S)) { 564 pr_err("invalid %s access at %#lx from pc %#lx\n", 565 !(ssw & RW) ? "write" : "read", addr, 566 fp->ptregs.pc); 567 die_if_kernel("Oops",&fp->ptregs,mmusr); 568 force_sig(SIGSEGV); 569 return; 570 } else { 571 #if 0 572 static volatile long tlong; 573 #endif 574 575 pr_err("weird %s access at %#lx from pc %#lx (ssw is %#x)\n", 576 !(ssw & RW) ? "write" : "read", addr, 577 fp->ptregs.pc, ssw); 578 asm volatile ("ptestr #1,%1@,#0\n\t" 579 "pmove %%psr,%0" 580 : "=m" (temp) 581 : "a" (addr)); 582 mmusr = temp; 583 584 pr_err("level 0 mmusr is %#x\n", mmusr); 585 #if 0 586 asm volatile ("pmove %%tt0,%0" 587 : "=m" (tlong)); 588 pr_debug("tt0 is %#lx, ", tlong); 589 asm volatile ("pmove %%tt1,%0" 590 : "=m" (tlong)); 591 pr_debug("tt1 is %#lx\n", tlong); 592 #endif 593 pr_debug("Unknown SIGSEGV - 1\n"); 594 die_if_kernel("Oops",&fp->ptregs,mmusr); 595 force_sig(SIGSEGV); 596 return; 597 } 598 599 /* setup an ATC entry for the access about to be retried */ 600 if (!(ssw & RW) || (ssw & RM)) 601 asm volatile ("ploadw %1,%0@" : /* no outputs */ 602 : "a" (addr), "d" (ssw)); 603 else 604 asm volatile ("ploadr %1,%0@" : /* no outputs */ 605 : "a" (addr), "d" (ssw)); 606 } 607 608 /* Now handle the instruction fault. */ 609 610 if (!(ssw & (FC|FB))) 611 return; 612 613 if (fp->ptregs.sr & PS_S) { 614 pr_err("Instruction fault at %#010lx\n", fp->ptregs.pc); 615 buserr: 616 pr_err("BAD KERNEL BUSERR\n"); 617 die_if_kernel("Oops",&fp->ptregs,0); 618 force_sig(SIGKILL); 619 return; 620 } 621 622 /* get the fault address */ 623 if (fp->ptregs.format == 10) 624 addr = fp->ptregs.pc + 4; 625 else 626 addr = fp->un.fmtb.baddr; 627 if (ssw & FC) 628 addr -= 2; 629 630 if ((ssw & DF) && ((addr ^ fp->un.fmtb.daddr) & PAGE_MASK) == 0) 631 /* Insn fault on same page as data fault. But we 632 should still create the ATC entry. */ 633 goto create_atc_entry; 634 635 #ifdef DEBUG 636 asm volatile ("ptestr #1,%2@,#7,%0\n\t" 637 "pmove %%psr,%1" 638 : "=a&" (desc), "=m" (temp) 639 : "a" (addr)); 640 pr_debug("mmusr is %#x for addr %#lx in task %p\n", 641 temp, addr, current); 642 pr_debug("descriptor address is 0x%p, contents %#lx\n", 643 __va(desc), *(unsigned long *)__va(desc)); 644 #else 645 asm volatile ("ptestr #1,%1@,#7\n\t" 646 "pmove %%psr,%0" 647 : "=m" (temp) : "a" (addr)); 648 #endif 649 mmusr = temp; 650 if (mmusr & MMU_I) 651 do_page_fault (&fp->ptregs, addr, 0); 652 else if (mmusr & (MMU_B|MMU_L|MMU_S)) { 653 pr_err("invalid insn access at %#lx from pc %#lx\n", 654 addr, fp->ptregs.pc); 655 pr_debug("Unknown SIGSEGV - 2\n"); 656 die_if_kernel("Oops",&fp->ptregs,mmusr); 657 force_sig(SIGSEGV); 658 return; 659 } 660 661 create_atc_entry: 662 /* setup an ATC entry for the access about to be retried */ 663 asm volatile ("ploadr #2,%0@" : /* no outputs */ 664 : "a" (addr)); 665 } 666 #endif /* CPU_M68020_OR_M68030 */ 667 #endif /* !CONFIG_SUN3 */ 668 669 #if defined(CONFIG_COLDFIRE) && defined(CONFIG_MMU) 670 #include <asm/mcfmmu.h> 671 672 /* 673 * The following table converts the FS encoding of a ColdFire 674 * exception stack frame into the error_code value needed by 675 * do_fault. 676 */ 677 static const unsigned char fs_err_code[] = { 678 0, /* 0000 */ 679 0, /* 0001 */ 680 0, /* 0010 */ 681 0, /* 0011 */ 682 1, /* 0100 */ 683 0, /* 0101 */ 684 0, /* 0110 */ 685 0, /* 0111 */ 686 2, /* 1000 */ 687 3, /* 1001 */ 688 2, /* 1010 */ 689 0, /* 1011 */ 690 1, /* 1100 */ 691 1, /* 1101 */ 692 0, /* 1110 */ 693 0 /* 1111 */ 694 }; 695 696 static inline void access_errorcf(unsigned int fs, struct frame *fp) 697 { 698 unsigned long mmusr, addr; 699 unsigned int err_code; 700 int need_page_fault; 701 702 mmusr = mmu_read(MMUSR); 703 addr = mmu_read(MMUAR); 704 705 /* 706 * error_code: 707 * bit 0 == 0 means no page found, 1 means protection fault 708 * bit 1 == 0 means read, 1 means write 709 */ 710 switch (fs) { 711 case 5: /* 0101 TLB opword X miss */ 712 need_page_fault = cf_tlb_miss(&fp->ptregs, 0, 0, 0); 713 addr = fp->ptregs.pc; 714 break; 715 case 6: /* 0110 TLB extension word X miss */ 716 need_page_fault = cf_tlb_miss(&fp->ptregs, 0, 0, 1); 717 addr = fp->ptregs.pc + sizeof(long); 718 break; 719 case 10: /* 1010 TLB W miss */ 720 need_page_fault = cf_tlb_miss(&fp->ptregs, 1, 1, 0); 721 break; 722 case 14: /* 1110 TLB R miss */ 723 need_page_fault = cf_tlb_miss(&fp->ptregs, 0, 1, 0); 724 break; 725 default: 726 /* 0000 Normal */ 727 /* 0001 Reserved */ 728 /* 0010 Interrupt during debug service routine */ 729 /* 0011 Reserved */ 730 /* 0100 X Protection */ 731 /* 0111 IFP in emulator mode */ 732 /* 1000 W Protection*/ 733 /* 1001 Write error*/ 734 /* 1011 Reserved*/ 735 /* 1100 R Protection*/ 736 /* 1101 R Protection*/ 737 /* 1111 OEP in emulator mode*/ 738 need_page_fault = 1; 739 break; 740 } 741 742 if (need_page_fault) { 743 err_code = fs_err_code[fs]; 744 if ((fs == 13) && (mmusr & MMUSR_WF)) /* rd-mod-wr access */ 745 err_code |= 2; /* bit1 - write, bit0 - protection */ 746 do_page_fault(&fp->ptregs, addr, err_code); 747 } 748 } 749 #endif /* CONFIG_COLDFIRE CONFIG_MMU */ 750 751 asmlinkage void buserr_c(struct frame *fp) 752 { 753 /* Only set esp0 if coming from user mode */ 754 if (user_mode(&fp->ptregs)) 755 current->thread.esp0 = (unsigned long) fp; 756 757 pr_debug("*** Bus Error *** Format is %x\n", fp->ptregs.format); 758 759 #if defined(CONFIG_COLDFIRE) && defined(CONFIG_MMU) 760 if (CPU_IS_COLDFIRE) { 761 unsigned int fs; 762 fs = (fp->ptregs.vector & 0x3) | 763 ((fp->ptregs.vector & 0xc00) >> 8); 764 switch (fs) { 765 case 0x5: 766 case 0x6: 767 case 0x7: 768 case 0x9: 769 case 0xa: 770 case 0xd: 771 case 0xe: 772 case 0xf: 773 access_errorcf(fs, fp); 774 return; 775 default: 776 break; 777 } 778 } 779 #endif /* CONFIG_COLDFIRE && CONFIG_MMU */ 780 781 switch (fp->ptregs.format) { 782 #if defined (CONFIG_M68060) 783 case 4: /* 68060 access error */ 784 access_error060 (fp); 785 break; 786 #endif 787 #if defined (CONFIG_M68040) 788 case 0x7: /* 68040 access error */ 789 access_error040 (fp); 790 break; 791 #endif 792 #if defined (CPU_M68020_OR_M68030) 793 case 0xa: 794 case 0xb: 795 bus_error030 (fp); 796 break; 797 #endif 798 default: 799 die_if_kernel("bad frame format",&fp->ptregs,0); 800 pr_debug("Unknown SIGSEGV - 4\n"); 801 force_sig(SIGSEGV); 802 } 803 } 804 805 806 static int kstack_depth_to_print = 48; 807 808 static void show_trace(unsigned long *stack, const char *loglvl) 809 { 810 unsigned long *endstack; 811 unsigned long addr; 812 int i; 813 814 printk("%sCall Trace:", loglvl); 815 addr = (unsigned long)stack + THREAD_SIZE - 1; 816 endstack = (unsigned long *)(addr & -THREAD_SIZE); 817 i = 0; 818 while (stack + 1 <= endstack) { 819 addr = *stack++; 820 /* 821 * If the address is either in the text segment of the 822 * kernel, or in the region which contains vmalloc'ed 823 * memory, it *may* be the address of a calling 824 * routine; if so, print it so that someone tracing 825 * down the cause of the crash will be able to figure 826 * out the call path that was taken. 827 */ 828 if (__kernel_text_address(addr)) { 829 #ifndef CONFIG_KALLSYMS 830 if (i % 5 == 0) 831 pr_cont("\n "); 832 #endif 833 pr_cont(" [<%08lx>] %pS\n", addr, (void *)addr); 834 i++; 835 } 836 } 837 pr_cont("\n"); 838 } 839 840 void show_registers(struct pt_regs *regs) 841 { 842 struct frame *fp = (struct frame *)regs; 843 u16 c, *cp; 844 unsigned long addr; 845 int i; 846 847 print_modules(); 848 pr_info("PC: [<%08lx>] %pS\n", regs->pc, (void *)regs->pc); 849 pr_info("SR: %04x SP: %p a2: %08lx\n", regs->sr, regs, regs->a2); 850 pr_info("d0: %08lx d1: %08lx d2: %08lx d3: %08lx\n", 851 regs->d0, regs->d1, regs->d2, regs->d3); 852 pr_info("d4: %08lx d5: %08lx a0: %08lx a1: %08lx\n", 853 regs->d4, regs->d5, regs->a0, regs->a1); 854 855 pr_info("Process %s (pid: %d, task=%p)\n", 856 current->comm, task_pid_nr(current), current); 857 addr = (unsigned long)&fp->un; 858 pr_info("Frame format=%X ", regs->format); 859 switch (regs->format) { 860 case 0x2: 861 pr_cont("instr addr=%08lx\n", fp->un.fmt2.iaddr); 862 addr += sizeof(fp->un.fmt2); 863 break; 864 case 0x3: 865 pr_cont("eff addr=%08lx\n", fp->un.fmt3.effaddr); 866 addr += sizeof(fp->un.fmt3); 867 break; 868 case 0x4: 869 if (CPU_IS_060) 870 pr_cont("fault addr=%08lx fslw=%08lx\n", 871 fp->un.fmt4.effaddr, fp->un.fmt4.pc); 872 else 873 pr_cont("eff addr=%08lx pc=%08lx\n", 874 fp->un.fmt4.effaddr, fp->un.fmt4.pc); 875 addr += sizeof(fp->un.fmt4); 876 break; 877 case 0x7: 878 pr_cont("eff addr=%08lx ssw=%04x faddr=%08lx\n", 879 fp->un.fmt7.effaddr, fp->un.fmt7.ssw, fp->un.fmt7.faddr); 880 pr_info("wb 1 stat/addr/data: %04x %08lx %08lx\n", 881 fp->un.fmt7.wb1s, fp->un.fmt7.wb1a, fp->un.fmt7.wb1dpd0); 882 pr_info("wb 2 stat/addr/data: %04x %08lx %08lx\n", 883 fp->un.fmt7.wb2s, fp->un.fmt7.wb2a, fp->un.fmt7.wb2d); 884 pr_info("wb 3 stat/addr/data: %04x %08lx %08lx\n", 885 fp->un.fmt7.wb3s, fp->un.fmt7.wb3a, fp->un.fmt7.wb3d); 886 pr_info("push data: %08lx %08lx %08lx %08lx\n", 887 fp->un.fmt7.wb1dpd0, fp->un.fmt7.pd1, fp->un.fmt7.pd2, 888 fp->un.fmt7.pd3); 889 addr += sizeof(fp->un.fmt7); 890 break; 891 case 0x9: 892 pr_cont("instr addr=%08lx\n", fp->un.fmt9.iaddr); 893 addr += sizeof(fp->un.fmt9); 894 break; 895 case 0xa: 896 pr_cont("ssw=%04x isc=%04x isb=%04x daddr=%08lx dobuf=%08lx\n", 897 fp->un.fmta.ssw, fp->un.fmta.isc, fp->un.fmta.isb, 898 fp->un.fmta.daddr, fp->un.fmta.dobuf); 899 addr += sizeof(fp->un.fmta); 900 break; 901 case 0xb: 902 pr_cont("ssw=%04x isc=%04x isb=%04x daddr=%08lx dobuf=%08lx\n", 903 fp->un.fmtb.ssw, fp->un.fmtb.isc, fp->un.fmtb.isb, 904 fp->un.fmtb.daddr, fp->un.fmtb.dobuf); 905 pr_info("baddr=%08lx dibuf=%08lx ver=%x\n", 906 fp->un.fmtb.baddr, fp->un.fmtb.dibuf, fp->un.fmtb.ver); 907 addr += sizeof(fp->un.fmtb); 908 break; 909 default: 910 pr_cont("\n"); 911 } 912 show_stack(NULL, (unsigned long *)addr, KERN_INFO); 913 914 pr_info("Code:"); 915 cp = (u16 *)regs->pc; 916 for (i = -8; i < 16; i++) { 917 if (get_kernel_nofault(c, cp + i) && i >= 0) { 918 pr_cont(" Bad PC value."); 919 break; 920 } 921 if (i) 922 pr_cont(" %04x", c); 923 else 924 pr_cont(" <%04x>", c); 925 } 926 pr_cont("\n"); 927 } 928 929 void show_stack(struct task_struct *task, unsigned long *stack, 930 const char *loglvl) 931 { 932 unsigned long *p; 933 unsigned long *endstack; 934 int i; 935 936 if (!stack) { 937 if (task) 938 stack = (unsigned long *)task->thread.esp0; 939 else 940 stack = (unsigned long *)&stack; 941 } 942 endstack = (unsigned long *)(((unsigned long)stack + THREAD_SIZE - 1) & -THREAD_SIZE); 943 944 printk("%sStack from %08lx:", loglvl, (unsigned long)stack); 945 p = stack; 946 for (i = 0; i < kstack_depth_to_print; i++) { 947 if (p + 1 > endstack) 948 break; 949 if (i % 8 == 0) 950 pr_cont("\n "); 951 pr_cont(" %08lx", *p++); 952 } 953 pr_cont("\n"); 954 show_trace(stack, loglvl); 955 } 956 957 /* 958 * The vector number returned in the frame pointer may also contain 959 * the "fs" (Fault Status) bits on ColdFire. These are in the bottom 960 * 2 bits, and upper 2 bits. So we need to mask out the real vector 961 * number before using it in comparisons. You don't need to do this on 962 * real 68k parts, but it won't hurt either. 963 */ 964 965 void bad_super_trap (struct frame *fp) 966 { 967 int vector = (fp->ptregs.vector >> 2) & 0xff; 968 969 console_verbose(); 970 if (vector < ARRAY_SIZE(vec_names)) 971 pr_err("*** %s *** FORMAT=%X\n", 972 vec_names[vector], 973 fp->ptregs.format); 974 else 975 pr_err("*** Exception %d *** FORMAT=%X\n", 976 vector, fp->ptregs.format); 977 if (vector == VEC_ADDRERR && CPU_IS_020_OR_030) { 978 unsigned short ssw = fp->un.fmtb.ssw; 979 980 pr_err("SSW=%#06x ", ssw); 981 982 if (ssw & RC) 983 pr_err("Pipe stage C instruction fault at %#010lx\n", 984 (fp->ptregs.format) == 0xA ? 985 fp->ptregs.pc + 2 : fp->un.fmtb.baddr - 2); 986 if (ssw & RB) 987 pr_err("Pipe stage B instruction fault at %#010lx\n", 988 (fp->ptregs.format) == 0xA ? 989 fp->ptregs.pc + 4 : fp->un.fmtb.baddr); 990 if (ssw & DF) 991 pr_err("Data %s fault at %#010lx in %s (pc=%#lx)\n", 992 ssw & RW ? "read" : "write", 993 fp->un.fmtb.daddr, space_names[ssw & DFC], 994 fp->ptregs.pc); 995 } 996 pr_err("Current process id is %d\n", task_pid_nr(current)); 997 die_if_kernel("BAD KERNEL TRAP", &fp->ptregs, 0); 998 } 999 1000 asmlinkage void trap_c(struct frame *fp) 1001 { 1002 int sig, si_code; 1003 void __user *addr; 1004 int vector = (fp->ptregs.vector >> 2) & 0xff; 1005 1006 if (fp->ptregs.sr & PS_S) { 1007 if (vector == VEC_TRACE) { 1008 /* traced a trapping instruction on a 68020/30, 1009 * real exception will be executed afterwards. 1010 */ 1011 return; 1012 } 1013 #ifdef CONFIG_MMU 1014 if (fixup_exception(&fp->ptregs)) 1015 return; 1016 #endif 1017 bad_super_trap(fp); 1018 return; 1019 } 1020 1021 /* send the appropriate signal to the user program */ 1022 switch (vector) { 1023 case VEC_ADDRERR: 1024 si_code = BUS_ADRALN; 1025 sig = SIGBUS; 1026 break; 1027 case VEC_ILLEGAL: 1028 case VEC_LINE10: 1029 case VEC_LINE11: 1030 si_code = ILL_ILLOPC; 1031 sig = SIGILL; 1032 break; 1033 case VEC_PRIV: 1034 si_code = ILL_PRVOPC; 1035 sig = SIGILL; 1036 break; 1037 case VEC_COPROC: 1038 si_code = ILL_COPROC; 1039 sig = SIGILL; 1040 break; 1041 case VEC_TRAP1: 1042 case VEC_TRAP2: 1043 case VEC_TRAP3: 1044 case VEC_TRAP4: 1045 case VEC_TRAP5: 1046 case VEC_TRAP6: 1047 case VEC_TRAP7: 1048 case VEC_TRAP8: 1049 case VEC_TRAP9: 1050 case VEC_TRAP10: 1051 case VEC_TRAP11: 1052 case VEC_TRAP12: 1053 case VEC_TRAP13: 1054 case VEC_TRAP14: 1055 si_code = ILL_ILLTRP; 1056 sig = SIGILL; 1057 break; 1058 case VEC_FPBRUC: 1059 case VEC_FPOE: 1060 case VEC_FPNAN: 1061 si_code = FPE_FLTINV; 1062 sig = SIGFPE; 1063 break; 1064 case VEC_FPIR: 1065 si_code = FPE_FLTRES; 1066 sig = SIGFPE; 1067 break; 1068 case VEC_FPDIVZ: 1069 si_code = FPE_FLTDIV; 1070 sig = SIGFPE; 1071 break; 1072 case VEC_FPUNDER: 1073 si_code = FPE_FLTUND; 1074 sig = SIGFPE; 1075 break; 1076 case VEC_FPOVER: 1077 si_code = FPE_FLTOVF; 1078 sig = SIGFPE; 1079 break; 1080 case VEC_ZERODIV: 1081 si_code = FPE_INTDIV; 1082 sig = SIGFPE; 1083 break; 1084 case VEC_CHK: 1085 case VEC_TRAP: 1086 si_code = FPE_INTOVF; 1087 sig = SIGFPE; 1088 break; 1089 case VEC_TRACE: /* ptrace single step */ 1090 si_code = TRAP_TRACE; 1091 sig = SIGTRAP; 1092 break; 1093 case VEC_TRAP15: /* breakpoint */ 1094 si_code = TRAP_BRKPT; 1095 sig = SIGTRAP; 1096 break; 1097 default: 1098 si_code = ILL_ILLOPC; 1099 sig = SIGILL; 1100 break; 1101 } 1102 switch (fp->ptregs.format) { 1103 default: 1104 addr = (void __user *) fp->ptregs.pc; 1105 break; 1106 case 2: 1107 addr = (void __user *) fp->un.fmt2.iaddr; 1108 break; 1109 case 7: 1110 addr = (void __user *) fp->un.fmt7.effaddr; 1111 break; 1112 case 9: 1113 addr = (void __user *) fp->un.fmt9.iaddr; 1114 break; 1115 case 10: 1116 addr = (void __user *) fp->un.fmta.daddr; 1117 break; 1118 case 11: 1119 addr = (void __user*) fp->un.fmtb.daddr; 1120 break; 1121 } 1122 force_sig_fault(sig, si_code, addr); 1123 } 1124 1125 void die_if_kernel (char *str, struct pt_regs *fp, int nr) 1126 { 1127 if (!(fp->sr & PS_S)) 1128 return; 1129 1130 console_verbose(); 1131 pr_crit("%s: %08x\n", str, nr); 1132 show_registers(fp); 1133 add_taint(TAINT_DIE, LOCKDEP_NOW_UNRELIABLE); 1134 do_exit(SIGSEGV); 1135 } 1136 1137 asmlinkage void set_esp0(unsigned long ssp) 1138 { 1139 current->thread.esp0 = ssp; 1140 } 1141 1142 /* 1143 * This function is called if an error occur while accessing 1144 * user-space from the fpsp040 code. 1145 */ 1146 asmlinkage void fpsp040_die(void) 1147 { 1148 force_fatal_sig(SIGSEGV); 1149 } 1150 1151 #ifdef CONFIG_M68KFPU_EMU 1152 asmlinkage void fpemu_signal(int signal, int code, void *addr) 1153 { 1154 force_sig_fault(signal, code, addr); 1155 } 1156 #endif 1157