1 /* 2 * linux/arch/parisc/traps.c 3 * 4 * Copyright (C) 1991, 1992 Linus Torvalds 5 * Copyright (C) 1999, 2000 Philipp Rumpf <prumpf@tux.org> 6 */ 7 8 /* 9 * 'Traps.c' handles hardware traps and faults after we have saved some 10 * state in 'asm.s'. 11 */ 12 13 #include <linux/sched.h> 14 #include <linux/kernel.h> 15 #include <linux/string.h> 16 #include <linux/errno.h> 17 #include <linux/ptrace.h> 18 #include <linux/timer.h> 19 #include <linux/delay.h> 20 #include <linux/mm.h> 21 #include <linux/module.h> 22 #include <linux/smp.h> 23 #include <linux/spinlock.h> 24 #include <linux/init.h> 25 #include <linux/interrupt.h> 26 #include <linux/console.h> 27 #include <linux/kallsyms.h> 28 #include <linux/bug.h> 29 30 #include <asm/assembly.h> 31 #include <asm/system.h> 32 #include <asm/uaccess.h> 33 #include <asm/io.h> 34 #include <asm/irq.h> 35 #include <asm/traps.h> 36 #include <asm/unaligned.h> 37 #include <asm/atomic.h> 38 #include <asm/smp.h> 39 #include <asm/pdc.h> 40 #include <asm/pdc_chassis.h> 41 #include <asm/unwind.h> 42 #include <asm/tlbflush.h> 43 #include <asm/cacheflush.h> 44 45 #include "../math-emu/math-emu.h" /* for handle_fpe() */ 46 47 #define PRINT_USER_FAULTS /* (turn this on if you want user faults to be */ 48 /* dumped to the console via printk) */ 49 50 #if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK) 51 DEFINE_SPINLOCK(pa_dbit_lock); 52 #endif 53 54 static int printbinary(char *buf, unsigned long x, int nbits) 55 { 56 unsigned long mask = 1UL << (nbits - 1); 57 while (mask != 0) { 58 *buf++ = (mask & x ? '1' : '0'); 59 mask >>= 1; 60 } 61 *buf = '\0'; 62 63 return nbits; 64 } 65 66 #ifdef CONFIG_64BIT 67 #define RFMT "%016lx" 68 #else 69 #define RFMT "%08lx" 70 #endif 71 #define FFMT "%016llx" /* fpregs are 64-bit always */ 72 73 #define PRINTREGS(lvl,r,f,fmt,x) \ 74 printk("%s%s%02d-%02d " fmt " " fmt " " fmt " " fmt "\n", \ 75 lvl, f, (x), (x+3), (r)[(x)+0], (r)[(x)+1], \ 76 (r)[(x)+2], (r)[(x)+3]) 77 78 static void print_gr(char *level, struct pt_regs *regs) 79 { 80 int i; 81 char buf[64]; 82 83 printk("%s\n", level); 84 printk("%s YZrvWESTHLNXBCVMcbcbcbcbOGFRQPDI\n", level); 85 printbinary(buf, regs->gr[0], 32); 86 printk("%sPSW: %s %s\n", level, buf, print_tainted()); 87 88 for (i = 0; i < 32; i += 4) 89 PRINTREGS(level, regs->gr, "r", RFMT, i); 90 } 91 92 static void print_fr(char *level, struct pt_regs *regs) 93 { 94 int i; 95 char buf[64]; 96 struct { u32 sw[2]; } s; 97 98 /* FR are 64bit everywhere. Need to use asm to get the content 99 * of fpsr/fper1, and we assume that we won't have a FP Identify 100 * in our way, otherwise we're screwed. 101 * The fldd is used to restore the T-bit if there was one, as the 102 * store clears it anyway. 103 * PA2.0 book says "thou shall not use fstw on FPSR/FPERs" - T-Bone */ 104 asm volatile ("fstd %%fr0,0(%1) \n\t" 105 "fldd 0(%1),%%fr0 \n\t" 106 : "=m" (s) : "r" (&s) : "r0"); 107 108 printk("%s\n", level); 109 printk("%s VZOUICununcqcqcqcqcqcrmunTDVZOUI\n", level); 110 printbinary(buf, s.sw[0], 32); 111 printk("%sFPSR: %s\n", level, buf); 112 printk("%sFPER1: %08x\n", level, s.sw[1]); 113 114 /* here we'll print fr0 again, tho it'll be meaningless */ 115 for (i = 0; i < 32; i += 4) 116 PRINTREGS(level, regs->fr, "fr", FFMT, i); 117 } 118 119 void show_regs(struct pt_regs *regs) 120 { 121 int i; 122 char *level; 123 unsigned long cr30, cr31; 124 125 level = user_mode(regs) ? KERN_DEBUG : KERN_CRIT; 126 127 print_gr(level, regs); 128 129 for (i = 0; i < 8; i += 4) 130 PRINTREGS(level, regs->sr, "sr", RFMT, i); 131 132 if (user_mode(regs)) 133 print_fr(level, regs); 134 135 cr30 = mfctl(30); 136 cr31 = mfctl(31); 137 printk("%s\n", level); 138 printk("%sIASQ: " RFMT " " RFMT " IAOQ: " RFMT " " RFMT "\n", 139 level, regs->iasq[0], regs->iasq[1], regs->iaoq[0], regs->iaoq[1]); 140 printk("%s IIR: %08lx ISR: " RFMT " IOR: " RFMT "\n", 141 level, regs->iir, regs->isr, regs->ior); 142 printk("%s CPU: %8d CR30: " RFMT " CR31: " RFMT "\n", 143 level, current_thread_info()->cpu, cr30, cr31); 144 printk("%s ORIG_R28: " RFMT "\n", level, regs->orig_r28); 145 printk(level); 146 print_symbol(" IAOQ[0]: %s\n", regs->iaoq[0]); 147 printk(level); 148 print_symbol(" IAOQ[1]: %s\n", regs->iaoq[1]); 149 printk(level); 150 print_symbol(" RP(r2): %s\n", regs->gr[2]); 151 } 152 153 154 void dump_stack(void) 155 { 156 show_stack(NULL, NULL); 157 } 158 159 EXPORT_SYMBOL(dump_stack); 160 161 static void do_show_stack(struct unwind_frame_info *info) 162 { 163 int i = 1; 164 165 printk(KERN_CRIT "Backtrace:\n"); 166 while (i <= 16) { 167 if (unwind_once(info) < 0 || info->ip == 0) 168 break; 169 170 if (__kernel_text_address(info->ip)) { 171 printk("%s [<" RFMT ">] ", (i&0x3)==1 ? KERN_CRIT : "", info->ip); 172 #ifdef CONFIG_KALLSYMS 173 print_symbol("%s\n", info->ip); 174 #else 175 if ((i & 0x03) == 0) 176 printk("\n"); 177 #endif 178 i++; 179 } 180 } 181 printk("\n"); 182 } 183 184 void show_stack(struct task_struct *task, unsigned long *s) 185 { 186 struct unwind_frame_info info; 187 188 if (!task) { 189 unsigned long sp; 190 191 HERE: 192 asm volatile ("copy %%r30, %0" : "=r"(sp)); 193 { 194 struct pt_regs r; 195 196 memset(&r, 0, sizeof(struct pt_regs)); 197 r.iaoq[0] = (unsigned long)&&HERE; 198 r.gr[2] = (unsigned long)__builtin_return_address(0); 199 r.gr[30] = sp; 200 201 unwind_frame_init(&info, current, &r); 202 } 203 } else { 204 unwind_frame_init_from_blocked_task(&info, task); 205 } 206 207 do_show_stack(&info); 208 } 209 210 int is_valid_bugaddr(unsigned long iaoq) 211 { 212 return 1; 213 } 214 215 void die_if_kernel(char *str, struct pt_regs *regs, long err) 216 { 217 if (user_mode(regs)) { 218 if (err == 0) 219 return; /* STFU */ 220 221 printk(KERN_CRIT "%s (pid %d): %s (code %ld) at " RFMT "\n", 222 current->comm, current->pid, str, err, regs->iaoq[0]); 223 #ifdef PRINT_USER_FAULTS 224 /* XXX for debugging only */ 225 show_regs(regs); 226 #endif 227 return; 228 } 229 230 oops_in_progress = 1; 231 232 /* Amuse the user in a SPARC fashion */ 233 if (err) printk( 234 KERN_CRIT " _______________________________ \n" 235 KERN_CRIT " < Your System ate a SPARC! Gah! >\n" 236 KERN_CRIT " ------------------------------- \n" 237 KERN_CRIT " \\ ^__^\n" 238 KERN_CRIT " \\ (xx)\\_______\n" 239 KERN_CRIT " (__)\\ )\\/\\\n" 240 KERN_CRIT " U ||----w |\n" 241 KERN_CRIT " || ||\n"); 242 243 /* unlock the pdc lock if necessary */ 244 pdc_emergency_unlock(); 245 246 /* maybe the kernel hasn't booted very far yet and hasn't been able 247 * to initialize the serial or STI console. In that case we should 248 * re-enable the pdc console, so that the user will be able to 249 * identify the problem. */ 250 if (!console_drivers) 251 pdc_console_restart(); 252 253 if (err) 254 printk(KERN_CRIT "%s (pid %d): %s (code %ld)\n", 255 current->comm, current->pid, str, err); 256 257 /* Wot's wrong wif bein' racy? */ 258 if (current->thread.flags & PARISC_KERNEL_DEATH) { 259 printk(KERN_CRIT "%s() recursion detected.\n", __FUNCTION__); 260 local_irq_enable(); 261 while (1); 262 } 263 current->thread.flags |= PARISC_KERNEL_DEATH; 264 265 show_regs(regs); 266 dump_stack(); 267 add_taint(TAINT_DIE); 268 269 if (in_interrupt()) 270 panic("Fatal exception in interrupt"); 271 272 if (panic_on_oops) { 273 printk(KERN_EMERG "Fatal exception: panic in 5 seconds\n"); 274 ssleep(5); 275 panic("Fatal exception"); 276 } 277 278 do_exit(SIGSEGV); 279 } 280 281 int syscall_ipi(int (*syscall) (struct pt_regs *), struct pt_regs *regs) 282 { 283 return syscall(regs); 284 } 285 286 /* gdb uses break 4,8 */ 287 #define GDB_BREAK_INSN 0x10004 288 static void handle_gdb_break(struct pt_regs *regs, int wot) 289 { 290 struct siginfo si; 291 292 si.si_signo = SIGTRAP; 293 si.si_errno = 0; 294 si.si_code = wot; 295 si.si_addr = (void __user *) (regs->iaoq[0] & ~3); 296 force_sig_info(SIGTRAP, &si, current); 297 } 298 299 static void handle_break(struct pt_regs *regs) 300 { 301 unsigned iir = regs->iir; 302 303 if (unlikely(iir == PARISC_BUG_BREAK_INSN && !user_mode(regs))) { 304 /* check if a BUG() or WARN() trapped here. */ 305 enum bug_trap_type tt; 306 tt = report_bug(regs->iaoq[0] & ~3, regs); 307 if (tt == BUG_TRAP_TYPE_WARN) { 308 regs->iaoq[0] += 4; 309 regs->iaoq[1] += 4; 310 return; /* return to next instruction when WARN_ON(). */ 311 } 312 die_if_kernel("Unknown kernel breakpoint", regs, 313 (tt == BUG_TRAP_TYPE_NONE) ? 9 : 0); 314 } 315 316 #ifdef PRINT_USER_FAULTS 317 if (unlikely(iir != GDB_BREAK_INSN)) { 318 printk(KERN_DEBUG "break %d,%d: pid=%d command='%s'\n", 319 iir & 31, (iir>>13) & ((1<<13)-1), 320 current->pid, current->comm); 321 show_regs(regs); 322 } 323 #endif 324 325 /* send standard GDB signal */ 326 handle_gdb_break(regs, TRAP_BRKPT); 327 } 328 329 static void default_trap(int code, struct pt_regs *regs) 330 { 331 printk(KERN_ERR "Trap %d on CPU %d\n", code, smp_processor_id()); 332 show_regs(regs); 333 } 334 335 void (*cpu_lpmc) (int code, struct pt_regs *regs) __read_mostly = default_trap; 336 337 338 void transfer_pim_to_trap_frame(struct pt_regs *regs) 339 { 340 register int i; 341 extern unsigned int hpmc_pim_data[]; 342 struct pdc_hpmc_pim_11 *pim_narrow; 343 struct pdc_hpmc_pim_20 *pim_wide; 344 345 if (boot_cpu_data.cpu_type >= pcxu) { 346 347 pim_wide = (struct pdc_hpmc_pim_20 *)hpmc_pim_data; 348 349 /* 350 * Note: The following code will probably generate a 351 * bunch of truncation error warnings from the compiler. 352 * Could be handled with an ifdef, but perhaps there 353 * is a better way. 354 */ 355 356 regs->gr[0] = pim_wide->cr[22]; 357 358 for (i = 1; i < 32; i++) 359 regs->gr[i] = pim_wide->gr[i]; 360 361 for (i = 0; i < 32; i++) 362 regs->fr[i] = pim_wide->fr[i]; 363 364 for (i = 0; i < 8; i++) 365 regs->sr[i] = pim_wide->sr[i]; 366 367 regs->iasq[0] = pim_wide->cr[17]; 368 regs->iasq[1] = pim_wide->iasq_back; 369 regs->iaoq[0] = pim_wide->cr[18]; 370 regs->iaoq[1] = pim_wide->iaoq_back; 371 372 regs->sar = pim_wide->cr[11]; 373 regs->iir = pim_wide->cr[19]; 374 regs->isr = pim_wide->cr[20]; 375 regs->ior = pim_wide->cr[21]; 376 } 377 else { 378 pim_narrow = (struct pdc_hpmc_pim_11 *)hpmc_pim_data; 379 380 regs->gr[0] = pim_narrow->cr[22]; 381 382 for (i = 1; i < 32; i++) 383 regs->gr[i] = pim_narrow->gr[i]; 384 385 for (i = 0; i < 32; i++) 386 regs->fr[i] = pim_narrow->fr[i]; 387 388 for (i = 0; i < 8; i++) 389 regs->sr[i] = pim_narrow->sr[i]; 390 391 regs->iasq[0] = pim_narrow->cr[17]; 392 regs->iasq[1] = pim_narrow->iasq_back; 393 regs->iaoq[0] = pim_narrow->cr[18]; 394 regs->iaoq[1] = pim_narrow->iaoq_back; 395 396 regs->sar = pim_narrow->cr[11]; 397 regs->iir = pim_narrow->cr[19]; 398 regs->isr = pim_narrow->cr[20]; 399 regs->ior = pim_narrow->cr[21]; 400 } 401 402 /* 403 * The following fields only have meaning if we came through 404 * another path. So just zero them here. 405 */ 406 407 regs->ksp = 0; 408 regs->kpc = 0; 409 regs->orig_r28 = 0; 410 } 411 412 413 /* 414 * This routine is called as a last resort when everything else 415 * has gone clearly wrong. We get called for faults in kernel space, 416 * and HPMC's. 417 */ 418 void parisc_terminate(char *msg, struct pt_regs *regs, int code, unsigned long offset) 419 { 420 static DEFINE_SPINLOCK(terminate_lock); 421 422 oops_in_progress = 1; 423 424 set_eiem(0); 425 local_irq_disable(); 426 spin_lock(&terminate_lock); 427 428 /* unlock the pdc lock if necessary */ 429 pdc_emergency_unlock(); 430 431 /* restart pdc console if necessary */ 432 if (!console_drivers) 433 pdc_console_restart(); 434 435 /* Not all paths will gutter the processor... */ 436 switch(code){ 437 438 case 1: 439 transfer_pim_to_trap_frame(regs); 440 break; 441 442 default: 443 /* Fall through */ 444 break; 445 446 } 447 448 { 449 /* show_stack(NULL, (unsigned long *)regs->gr[30]); */ 450 struct unwind_frame_info info; 451 unwind_frame_init(&info, current, regs); 452 do_show_stack(&info); 453 } 454 455 printk("\n"); 456 printk(KERN_CRIT "%s: Code=%d regs=%p (Addr=" RFMT ")\n", 457 msg, code, regs, offset); 458 show_regs(regs); 459 460 spin_unlock(&terminate_lock); 461 462 /* put soft power button back under hardware control; 463 * if the user had pressed it once at any time, the 464 * system will shut down immediately right here. */ 465 pdc_soft_power_button(0); 466 467 /* Call kernel panic() so reboot timeouts work properly 468 * FIXME: This function should be on the list of 469 * panic notifiers, and we should call panic 470 * directly from the location that we wish. 471 * e.g. We should not call panic from 472 * parisc_terminate, but rather the oter way around. 473 * This hack works, prints the panic message twice, 474 * and it enables reboot timers! 475 */ 476 panic(msg); 477 } 478 479 void handle_interruption(int code, struct pt_regs *regs) 480 { 481 unsigned long fault_address = 0; 482 unsigned long fault_space = 0; 483 struct siginfo si; 484 485 if (code == 1) 486 pdc_console_restart(); /* switch back to pdc if HPMC */ 487 else 488 local_irq_enable(); 489 490 /* Security check: 491 * If the priority level is still user, and the 492 * faulting space is not equal to the active space 493 * then the user is attempting something in a space 494 * that does not belong to them. Kill the process. 495 * 496 * This is normally the situation when the user 497 * attempts to jump into the kernel space at the 498 * wrong offset, be it at the gateway page or a 499 * random location. 500 * 501 * We cannot normally signal the process because it 502 * could *be* on the gateway page, and processes 503 * executing on the gateway page can't have signals 504 * delivered. 505 * 506 * We merely readjust the address into the users 507 * space, at a destination address of zero, and 508 * allow processing to continue. 509 */ 510 if (((unsigned long)regs->iaoq[0] & 3) && 511 ((unsigned long)regs->iasq[0] != (unsigned long)regs->sr[7])) { 512 /* Kill the user process later */ 513 regs->iaoq[0] = 0 | 3; 514 regs->iaoq[1] = regs->iaoq[0] + 4; 515 regs->iasq[0] = regs->iasq[0] = regs->sr[7]; 516 regs->gr[0] &= ~PSW_B; 517 return; 518 } 519 520 #if 0 521 printk(KERN_CRIT "Interruption # %d\n", code); 522 #endif 523 524 switch(code) { 525 526 case 1: 527 /* High-priority machine check (HPMC) */ 528 529 /* set up a new led state on systems shipped with a LED State panel */ 530 pdc_chassis_send_status(PDC_CHASSIS_DIRECT_HPMC); 531 532 parisc_terminate("High Priority Machine Check (HPMC)", 533 regs, code, 0); 534 /* NOT REACHED */ 535 536 case 2: 537 /* Power failure interrupt */ 538 printk(KERN_CRIT "Power failure interrupt !\n"); 539 return; 540 541 case 3: 542 /* Recovery counter trap */ 543 regs->gr[0] &= ~PSW_R; 544 if (user_space(regs)) 545 handle_gdb_break(regs, TRAP_TRACE); 546 /* else this must be the start of a syscall - just let it run */ 547 return; 548 549 case 5: 550 /* Low-priority machine check */ 551 pdc_chassis_send_status(PDC_CHASSIS_DIRECT_LPMC); 552 553 flush_cache_all(); 554 flush_tlb_all(); 555 cpu_lpmc(5, regs); 556 return; 557 558 case 6: 559 /* Instruction TLB miss fault/Instruction page fault */ 560 fault_address = regs->iaoq[0]; 561 fault_space = regs->iasq[0]; 562 break; 563 564 case 8: 565 /* Illegal instruction trap */ 566 die_if_kernel("Illegal instruction", regs, code); 567 si.si_code = ILL_ILLOPC; 568 goto give_sigill; 569 570 case 9: 571 /* Break instruction trap */ 572 handle_break(regs); 573 return; 574 575 case 10: 576 /* Privileged operation trap */ 577 die_if_kernel("Privileged operation", regs, code); 578 si.si_code = ILL_PRVOPC; 579 goto give_sigill; 580 581 case 11: 582 /* Privileged register trap */ 583 if ((regs->iir & 0xffdfffe0) == 0x034008a0) { 584 585 /* This is a MFCTL cr26/cr27 to gr instruction. 586 * PCXS traps on this, so we need to emulate it. 587 */ 588 589 if (regs->iir & 0x00200000) 590 regs->gr[regs->iir & 0x1f] = mfctl(27); 591 else 592 regs->gr[regs->iir & 0x1f] = mfctl(26); 593 594 regs->iaoq[0] = regs->iaoq[1]; 595 regs->iaoq[1] += 4; 596 regs->iasq[0] = regs->iasq[1]; 597 return; 598 } 599 600 die_if_kernel("Privileged register usage", regs, code); 601 si.si_code = ILL_PRVREG; 602 give_sigill: 603 si.si_signo = SIGILL; 604 si.si_errno = 0; 605 si.si_addr = (void __user *) regs->iaoq[0]; 606 force_sig_info(SIGILL, &si, current); 607 return; 608 609 case 12: 610 /* Overflow Trap, let the userland signal handler do the cleanup */ 611 si.si_signo = SIGFPE; 612 si.si_code = FPE_INTOVF; 613 si.si_addr = (void __user *) regs->iaoq[0]; 614 force_sig_info(SIGFPE, &si, current); 615 return; 616 617 case 13: 618 /* Conditional Trap 619 The condition succeeds in an instruction which traps 620 on condition */ 621 if(user_mode(regs)){ 622 si.si_signo = SIGFPE; 623 /* Set to zero, and let the userspace app figure it out from 624 the insn pointed to by si_addr */ 625 si.si_code = 0; 626 si.si_addr = (void __user *) regs->iaoq[0]; 627 force_sig_info(SIGFPE, &si, current); 628 return; 629 } 630 /* The kernel doesn't want to handle condition codes */ 631 break; 632 633 case 14: 634 /* Assist Exception Trap, i.e. floating point exception. */ 635 die_if_kernel("Floating point exception", regs, 0); /* quiet */ 636 handle_fpe(regs); 637 return; 638 639 case 15: 640 /* Data TLB miss fault/Data page fault */ 641 /* Fall through */ 642 case 16: 643 /* Non-access instruction TLB miss fault */ 644 /* The instruction TLB entry needed for the target address of the FIC 645 is absent, and hardware can't find it, so we get to cleanup */ 646 /* Fall through */ 647 case 17: 648 /* Non-access data TLB miss fault/Non-access data page fault */ 649 /* FIXME: 650 Still need to add slow path emulation code here! 651 If the insn used a non-shadow register, then the tlb 652 handlers could not have their side-effect (e.g. probe 653 writing to a target register) emulated since rfir would 654 erase the changes to said register. Instead we have to 655 setup everything, call this function we are in, and emulate 656 by hand. Technically we need to emulate: 657 fdc,fdce,pdc,"fic,4f",prober,probeir,probew, probeiw 658 */ 659 fault_address = regs->ior; 660 fault_space = regs->isr; 661 break; 662 663 case 18: 664 /* PCXS only -- later cpu's split this into types 26,27 & 28 */ 665 /* Check for unaligned access */ 666 if (check_unaligned(regs)) { 667 handle_unaligned(regs); 668 return; 669 } 670 /* Fall Through */ 671 case 26: 672 /* PCXL: Data memory access rights trap */ 673 fault_address = regs->ior; 674 fault_space = regs->isr; 675 break; 676 677 case 19: 678 /* Data memory break trap */ 679 regs->gr[0] |= PSW_X; /* So we can single-step over the trap */ 680 /* fall thru */ 681 case 21: 682 /* Page reference trap */ 683 handle_gdb_break(regs, TRAP_HWBKPT); 684 return; 685 686 case 25: 687 /* Taken branch trap */ 688 regs->gr[0] &= ~PSW_T; 689 if (user_space(regs)) 690 handle_gdb_break(regs, TRAP_BRANCH); 691 /* else this must be the start of a syscall - just let it 692 * run. 693 */ 694 return; 695 696 case 7: 697 /* Instruction access rights */ 698 /* PCXL: Instruction memory protection trap */ 699 700 /* 701 * This could be caused by either: 1) a process attempting 702 * to execute within a vma that does not have execute 703 * permission, or 2) an access rights violation caused by a 704 * flush only translation set up by ptep_get_and_clear(). 705 * So we check the vma permissions to differentiate the two. 706 * If the vma indicates we have execute permission, then 707 * the cause is the latter one. In this case, we need to 708 * call do_page_fault() to fix the problem. 709 */ 710 711 if (user_mode(regs)) { 712 struct vm_area_struct *vma; 713 714 down_read(¤t->mm->mmap_sem); 715 vma = find_vma(current->mm,regs->iaoq[0]); 716 if (vma && (regs->iaoq[0] >= vma->vm_start) 717 && (vma->vm_flags & VM_EXEC)) { 718 719 fault_address = regs->iaoq[0]; 720 fault_space = regs->iasq[0]; 721 722 up_read(¤t->mm->mmap_sem); 723 break; /* call do_page_fault() */ 724 } 725 up_read(¤t->mm->mmap_sem); 726 } 727 /* Fall Through */ 728 case 27: 729 /* Data memory protection ID trap */ 730 die_if_kernel("Protection id trap", regs, code); 731 si.si_code = SEGV_MAPERR; 732 si.si_signo = SIGSEGV; 733 si.si_errno = 0; 734 if (code == 7) 735 si.si_addr = (void __user *) regs->iaoq[0]; 736 else 737 si.si_addr = (void __user *) regs->ior; 738 force_sig_info(SIGSEGV, &si, current); 739 return; 740 741 case 28: 742 /* Unaligned data reference trap */ 743 handle_unaligned(regs); 744 return; 745 746 default: 747 if (user_mode(regs)) { 748 #ifdef PRINT_USER_FAULTS 749 printk(KERN_DEBUG "\nhandle_interruption() pid=%d command='%s'\n", 750 current->pid, current->comm); 751 show_regs(regs); 752 #endif 753 /* SIGBUS, for lack of a better one. */ 754 si.si_signo = SIGBUS; 755 si.si_code = BUS_OBJERR; 756 si.si_errno = 0; 757 si.si_addr = (void __user *) regs->ior; 758 force_sig_info(SIGBUS, &si, current); 759 return; 760 } 761 pdc_chassis_send_status(PDC_CHASSIS_DIRECT_PANIC); 762 763 parisc_terminate("Unexpected interruption", regs, code, 0); 764 /* NOT REACHED */ 765 } 766 767 if (user_mode(regs)) { 768 if ((fault_space >> SPACEID_SHIFT) != (regs->sr[7] >> SPACEID_SHIFT)) { 769 #ifdef PRINT_USER_FAULTS 770 if (fault_space == 0) 771 printk(KERN_DEBUG "User Fault on Kernel Space "); 772 else 773 printk(KERN_DEBUG "User Fault (long pointer) (fault %d) ", 774 code); 775 printk("pid=%d command='%s'\n", current->pid, current->comm); 776 show_regs(regs); 777 #endif 778 si.si_signo = SIGSEGV; 779 si.si_errno = 0; 780 si.si_code = SEGV_MAPERR; 781 si.si_addr = (void __user *) regs->ior; 782 force_sig_info(SIGSEGV, &si, current); 783 return; 784 } 785 } 786 else { 787 788 /* 789 * The kernel should never fault on its own address space. 790 */ 791 792 if (fault_space == 0) 793 { 794 pdc_chassis_send_status(PDC_CHASSIS_DIRECT_PANIC); 795 parisc_terminate("Kernel Fault", regs, code, fault_address); 796 797 } 798 } 799 800 do_page_fault(regs, code, fault_address); 801 } 802 803 804 int __init check_ivt(void *iva) 805 { 806 extern const u32 os_hpmc[]; 807 extern const u32 os_hpmc_end[]; 808 809 int i; 810 u32 check = 0; 811 u32 *ivap; 812 u32 *hpmcp; 813 u32 length; 814 815 if (strcmp((char *)iva, "cows can fly")) 816 return -1; 817 818 ivap = (u32 *)iva; 819 820 for (i = 0; i < 8; i++) 821 *ivap++ = 0; 822 823 /* Compute Checksum for HPMC handler */ 824 825 length = os_hpmc_end - os_hpmc; 826 ivap[7] = length; 827 828 hpmcp = (u32 *)os_hpmc; 829 830 for (i=0; i<length/4; i++) 831 check += *hpmcp++; 832 833 for (i=0; i<8; i++) 834 check += ivap[i]; 835 836 ivap[5] = -check; 837 838 return 0; 839 } 840 841 #ifndef CONFIG_64BIT 842 extern const void fault_vector_11; 843 #endif 844 extern const void fault_vector_20; 845 846 void __init trap_init(void) 847 { 848 void *iva; 849 850 if (boot_cpu_data.cpu_type >= pcxu) 851 iva = (void *) &fault_vector_20; 852 else 853 #ifdef CONFIG_64BIT 854 panic("Can't boot 64-bit OS on PA1.1 processor!"); 855 #else 856 iva = (void *) &fault_vector_11; 857 #endif 858 859 if (check_ivt(iva)) 860 panic("IVT invalid"); 861 } 862