1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * linux/arch/parisc/traps.c 4 * 5 * Copyright (C) 1991, 1992 Linus Torvalds 6 * Copyright (C) 1999, 2000 Philipp Rumpf <prumpf@tux.org> 7 */ 8 9 /* 10 * 'Traps.c' handles hardware traps and faults after we have saved some 11 * state in 'asm.s'. 12 */ 13 14 #include <linux/sched.h> 15 #include <linux/sched/debug.h> 16 #include <linux/kernel.h> 17 #include <linux/string.h> 18 #include <linux/errno.h> 19 #include <linux/ptrace.h> 20 #include <linux/timer.h> 21 #include <linux/delay.h> 22 #include <linux/mm.h> 23 #include <linux/module.h> 24 #include <linux/smp.h> 25 #include <linux/spinlock.h> 26 #include <linux/init.h> 27 #include <linux/interrupt.h> 28 #include <linux/console.h> 29 #include <linux/bug.h> 30 #include <linux/ratelimit.h> 31 #include <linux/uaccess.h> 32 33 #include <asm/assembly.h> 34 #include <asm/io.h> 35 #include <asm/irq.h> 36 #include <asm/traps.h> 37 #include <asm/unaligned.h> 38 #include <linux/atomic.h> 39 #include <asm/smp.h> 40 #include <asm/pdc.h> 41 #include <asm/pdc_chassis.h> 42 #include <asm/unwind.h> 43 #include <asm/tlbflush.h> 44 #include <asm/cacheflush.h> 45 46 #include "../math-emu/math-emu.h" /* for handle_fpe() */ 47 48 static void parisc_show_stack(struct task_struct *task, unsigned long *sp, 49 struct pt_regs *regs); 50 51 static int printbinary(char *buf, unsigned long x, int nbits) 52 { 53 unsigned long mask = 1UL << (nbits - 1); 54 while (mask != 0) { 55 *buf++ = (mask & x ? '1' : '0'); 56 mask >>= 1; 57 } 58 *buf = '\0'; 59 60 return nbits; 61 } 62 63 #ifdef CONFIG_64BIT 64 #define RFMT "%016lx" 65 #else 66 #define RFMT "%08lx" 67 #endif 68 #define FFMT "%016llx" /* fpregs are 64-bit always */ 69 70 #define PRINTREGS(lvl,r,f,fmt,x) \ 71 printk("%s%s%02d-%02d " fmt " " fmt " " fmt " " fmt "\n", \ 72 lvl, f, (x), (x+3), (r)[(x)+0], (r)[(x)+1], \ 73 (r)[(x)+2], (r)[(x)+3]) 74 75 static void print_gr(char *level, struct pt_regs *regs) 76 { 77 int i; 78 char buf[64]; 79 80 printk("%s\n", level); 81 printk("%s YZrvWESTHLNXBCVMcbcbcbcbOGFRQPDI\n", level); 82 printbinary(buf, regs->gr[0], 32); 83 printk("%sPSW: %s %s\n", level, buf, print_tainted()); 84 85 for (i = 0; i < 32; i += 4) 86 PRINTREGS(level, regs->gr, "r", RFMT, i); 87 } 88 89 static void print_fr(char *level, struct pt_regs *regs) 90 { 91 int i; 92 char buf[64]; 93 struct { u32 sw[2]; } s; 94 95 /* FR are 64bit everywhere. Need to use asm to get the content 96 * of fpsr/fper1, and we assume that we won't have a FP Identify 97 * in our way, otherwise we're screwed. 98 * The fldd is used to restore the T-bit if there was one, as the 99 * store clears it anyway. 100 * PA2.0 book says "thou shall not use fstw on FPSR/FPERs" - T-Bone */ 101 asm volatile ("fstd %%fr0,0(%1) \n\t" 102 "fldd 0(%1),%%fr0 \n\t" 103 : "=m" (s) : "r" (&s) : "r0"); 104 105 printk("%s\n", level); 106 printk("%s VZOUICununcqcqcqcqcqcrmunTDVZOUI\n", level); 107 printbinary(buf, s.sw[0], 32); 108 printk("%sFPSR: %s\n", level, buf); 109 printk("%sFPER1: %08x\n", level, s.sw[1]); 110 111 /* here we'll print fr0 again, tho it'll be meaningless */ 112 for (i = 0; i < 32; i += 4) 113 PRINTREGS(level, regs->fr, "fr", FFMT, i); 114 } 115 116 void show_regs(struct pt_regs *regs) 117 { 118 int i, user; 119 char *level; 120 unsigned long cr30, cr31; 121 122 user = user_mode(regs); 123 level = user ? KERN_DEBUG : KERN_CRIT; 124 125 show_regs_print_info(level); 126 127 print_gr(level, regs); 128 129 for (i = 0; i < 8; i += 4) 130 PRINTREGS(level, regs->sr, "sr", RFMT, i); 131 132 if (user) 133 print_fr(level, regs); 134 135 cr30 = mfctl(30); 136 cr31 = mfctl(31); 137 printk("%s\n", level); 138 printk("%sIASQ: " RFMT " " RFMT " IAOQ: " RFMT " " RFMT "\n", 139 level, regs->iasq[0], regs->iasq[1], regs->iaoq[0], regs->iaoq[1]); 140 printk("%s IIR: %08lx ISR: " RFMT " IOR: " RFMT "\n", 141 level, regs->iir, regs->isr, regs->ior); 142 printk("%s CPU: %8d CR30: " RFMT " CR31: " RFMT "\n", 143 level, current_thread_info()->cpu, cr30, cr31); 144 printk("%s ORIG_R28: " RFMT "\n", level, regs->orig_r28); 145 146 if (user) { 147 printk("%s IAOQ[0]: " RFMT "\n", level, regs->iaoq[0]); 148 printk("%s IAOQ[1]: " RFMT "\n", level, regs->iaoq[1]); 149 printk("%s RP(r2): " RFMT "\n", level, regs->gr[2]); 150 } else { 151 printk("%s IAOQ[0]: %pS\n", level, (void *) regs->iaoq[0]); 152 printk("%s IAOQ[1]: %pS\n", level, (void *) regs->iaoq[1]); 153 printk("%s RP(r2): %pS\n", level, (void *) regs->gr[2]); 154 155 parisc_show_stack(current, NULL, regs); 156 } 157 } 158 159 static DEFINE_RATELIMIT_STATE(_hppa_rs, 160 DEFAULT_RATELIMIT_INTERVAL, DEFAULT_RATELIMIT_BURST); 161 162 #define parisc_printk_ratelimited(critical, regs, fmt, ...) { \ 163 if ((critical || show_unhandled_signals) && __ratelimit(&_hppa_rs)) { \ 164 printk(fmt, ##__VA_ARGS__); \ 165 show_regs(regs); \ 166 } \ 167 } 168 169 170 static void do_show_stack(struct unwind_frame_info *info) 171 { 172 int i = 1; 173 174 printk(KERN_CRIT "Backtrace:\n"); 175 while (i <= 16) { 176 if (unwind_once(info) < 0 || info->ip == 0) 177 break; 178 179 if (__kernel_text_address(info->ip)) { 180 printk(KERN_CRIT " [<" RFMT ">] %pS\n", 181 info->ip, (void *) info->ip); 182 i++; 183 } 184 } 185 printk(KERN_CRIT "\n"); 186 } 187 188 static void parisc_show_stack(struct task_struct *task, unsigned long *sp, 189 struct pt_regs *regs) 190 { 191 struct unwind_frame_info info; 192 struct task_struct *t; 193 194 t = task ? task : current; 195 if (regs) { 196 unwind_frame_init(&info, t, regs); 197 goto show_stack; 198 } 199 200 if (t == current) { 201 unsigned long sp; 202 203 HERE: 204 asm volatile ("copy %%r30, %0" : "=r"(sp)); 205 { 206 struct pt_regs r; 207 208 memset(&r, 0, sizeof(struct pt_regs)); 209 r.iaoq[0] = (unsigned long)&&HERE; 210 r.gr[2] = (unsigned long)__builtin_return_address(0); 211 r.gr[30] = sp; 212 213 unwind_frame_init(&info, current, &r); 214 } 215 } else { 216 unwind_frame_init_from_blocked_task(&info, t); 217 } 218 219 show_stack: 220 do_show_stack(&info); 221 } 222 223 void show_stack(struct task_struct *t, unsigned long *sp) 224 { 225 return parisc_show_stack(t, sp, NULL); 226 } 227 228 int is_valid_bugaddr(unsigned long iaoq) 229 { 230 return 1; 231 } 232 233 void die_if_kernel(char *str, struct pt_regs *regs, long err) 234 { 235 if (user_mode(regs)) { 236 if (err == 0) 237 return; /* STFU */ 238 239 parisc_printk_ratelimited(1, regs, 240 KERN_CRIT "%s (pid %d): %s (code %ld) at " RFMT "\n", 241 current->comm, task_pid_nr(current), str, err, regs->iaoq[0]); 242 243 return; 244 } 245 246 oops_in_progress = 1; 247 248 oops_enter(); 249 250 /* Amuse the user in a SPARC fashion */ 251 if (err) printk(KERN_CRIT 252 " _______________________________ \n" 253 " < Your System ate a SPARC! Gah! >\n" 254 " ------------------------------- \n" 255 " \\ ^__^\n" 256 " (__)\\ )\\/\\\n" 257 " U ||----w |\n" 258 " || ||\n"); 259 260 /* unlock the pdc lock if necessary */ 261 pdc_emergency_unlock(); 262 263 /* maybe the kernel hasn't booted very far yet and hasn't been able 264 * to initialize the serial or STI console. In that case we should 265 * re-enable the pdc console, so that the user will be able to 266 * identify the problem. */ 267 if (!console_drivers) 268 pdc_console_restart(); 269 270 if (err) 271 printk(KERN_CRIT "%s (pid %d): %s (code %ld)\n", 272 current->comm, task_pid_nr(current), str, err); 273 274 /* Wot's wrong wif bein' racy? */ 275 if (current->thread.flags & PARISC_KERNEL_DEATH) { 276 printk(KERN_CRIT "%s() recursion detected.\n", __func__); 277 local_irq_enable(); 278 while (1); 279 } 280 current->thread.flags |= PARISC_KERNEL_DEATH; 281 282 show_regs(regs); 283 dump_stack(); 284 add_taint(TAINT_DIE, LOCKDEP_NOW_UNRELIABLE); 285 286 if (in_interrupt()) 287 panic("Fatal exception in interrupt"); 288 289 if (panic_on_oops) 290 panic("Fatal exception"); 291 292 oops_exit(); 293 do_exit(SIGSEGV); 294 } 295 296 /* gdb uses break 4,8 */ 297 #define GDB_BREAK_INSN 0x10004 298 static void handle_gdb_break(struct pt_regs *regs, int wot) 299 { 300 struct siginfo si; 301 302 si.si_signo = SIGTRAP; 303 si.si_errno = 0; 304 si.si_code = wot; 305 si.si_addr = (void __user *) (regs->iaoq[0] & ~3); 306 force_sig_info(SIGTRAP, &si, current); 307 } 308 309 static void handle_break(struct pt_regs *regs) 310 { 311 unsigned iir = regs->iir; 312 313 if (unlikely(iir == PARISC_BUG_BREAK_INSN && !user_mode(regs))) { 314 /* check if a BUG() or WARN() trapped here. */ 315 enum bug_trap_type tt; 316 tt = report_bug(regs->iaoq[0] & ~3, regs); 317 if (tt == BUG_TRAP_TYPE_WARN) { 318 regs->iaoq[0] += 4; 319 regs->iaoq[1] += 4; 320 return; /* return to next instruction when WARN_ON(). */ 321 } 322 die_if_kernel("Unknown kernel breakpoint", regs, 323 (tt == BUG_TRAP_TYPE_NONE) ? 9 : 0); 324 } 325 326 if (unlikely(iir != GDB_BREAK_INSN)) 327 parisc_printk_ratelimited(0, regs, 328 KERN_DEBUG "break %d,%d: pid=%d command='%s'\n", 329 iir & 31, (iir>>13) & ((1<<13)-1), 330 task_pid_nr(current), current->comm); 331 332 /* send standard GDB signal */ 333 handle_gdb_break(regs, TRAP_BRKPT); 334 } 335 336 static void default_trap(int code, struct pt_regs *regs) 337 { 338 printk(KERN_ERR "Trap %d on CPU %d\n", code, smp_processor_id()); 339 show_regs(regs); 340 } 341 342 void (*cpu_lpmc) (int code, struct pt_regs *regs) __read_mostly = default_trap; 343 344 345 void transfer_pim_to_trap_frame(struct pt_regs *regs) 346 { 347 register int i; 348 extern unsigned int hpmc_pim_data[]; 349 struct pdc_hpmc_pim_11 *pim_narrow; 350 struct pdc_hpmc_pim_20 *pim_wide; 351 352 if (boot_cpu_data.cpu_type >= pcxu) { 353 354 pim_wide = (struct pdc_hpmc_pim_20 *)hpmc_pim_data; 355 356 /* 357 * Note: The following code will probably generate a 358 * bunch of truncation error warnings from the compiler. 359 * Could be handled with an ifdef, but perhaps there 360 * is a better way. 361 */ 362 363 regs->gr[0] = pim_wide->cr[22]; 364 365 for (i = 1; i < 32; i++) 366 regs->gr[i] = pim_wide->gr[i]; 367 368 for (i = 0; i < 32; i++) 369 regs->fr[i] = pim_wide->fr[i]; 370 371 for (i = 0; i < 8; i++) 372 regs->sr[i] = pim_wide->sr[i]; 373 374 regs->iasq[0] = pim_wide->cr[17]; 375 regs->iasq[1] = pim_wide->iasq_back; 376 regs->iaoq[0] = pim_wide->cr[18]; 377 regs->iaoq[1] = pim_wide->iaoq_back; 378 379 regs->sar = pim_wide->cr[11]; 380 regs->iir = pim_wide->cr[19]; 381 regs->isr = pim_wide->cr[20]; 382 regs->ior = pim_wide->cr[21]; 383 } 384 else { 385 pim_narrow = (struct pdc_hpmc_pim_11 *)hpmc_pim_data; 386 387 regs->gr[0] = pim_narrow->cr[22]; 388 389 for (i = 1; i < 32; i++) 390 regs->gr[i] = pim_narrow->gr[i]; 391 392 for (i = 0; i < 32; i++) 393 regs->fr[i] = pim_narrow->fr[i]; 394 395 for (i = 0; i < 8; i++) 396 regs->sr[i] = pim_narrow->sr[i]; 397 398 regs->iasq[0] = pim_narrow->cr[17]; 399 regs->iasq[1] = pim_narrow->iasq_back; 400 regs->iaoq[0] = pim_narrow->cr[18]; 401 regs->iaoq[1] = pim_narrow->iaoq_back; 402 403 regs->sar = pim_narrow->cr[11]; 404 regs->iir = pim_narrow->cr[19]; 405 regs->isr = pim_narrow->cr[20]; 406 regs->ior = pim_narrow->cr[21]; 407 } 408 409 /* 410 * The following fields only have meaning if we came through 411 * another path. So just zero them here. 412 */ 413 414 regs->ksp = 0; 415 regs->kpc = 0; 416 regs->orig_r28 = 0; 417 } 418 419 420 /* 421 * This routine is called as a last resort when everything else 422 * has gone clearly wrong. We get called for faults in kernel space, 423 * and HPMC's. 424 */ 425 void parisc_terminate(char *msg, struct pt_regs *regs, int code, unsigned long offset) 426 { 427 static DEFINE_SPINLOCK(terminate_lock); 428 429 oops_in_progress = 1; 430 431 set_eiem(0); 432 local_irq_disable(); 433 spin_lock(&terminate_lock); 434 435 /* unlock the pdc lock if necessary */ 436 pdc_emergency_unlock(); 437 438 /* restart pdc console if necessary */ 439 if (!console_drivers) 440 pdc_console_restart(); 441 442 /* Not all paths will gutter the processor... */ 443 switch(code){ 444 445 case 1: 446 transfer_pim_to_trap_frame(regs); 447 break; 448 449 default: 450 /* Fall through */ 451 break; 452 453 } 454 455 { 456 /* show_stack(NULL, (unsigned long *)regs->gr[30]); */ 457 struct unwind_frame_info info; 458 unwind_frame_init(&info, current, regs); 459 do_show_stack(&info); 460 } 461 462 printk("\n"); 463 pr_crit("%s: Code=%d (%s) regs=%p (Addr=" RFMT ")\n", 464 msg, code, trap_name(code), regs, offset); 465 show_regs(regs); 466 467 spin_unlock(&terminate_lock); 468 469 /* put soft power button back under hardware control; 470 * if the user had pressed it once at any time, the 471 * system will shut down immediately right here. */ 472 pdc_soft_power_button(0); 473 474 /* Call kernel panic() so reboot timeouts work properly 475 * FIXME: This function should be on the list of 476 * panic notifiers, and we should call panic 477 * directly from the location that we wish. 478 * e.g. We should not call panic from 479 * parisc_terminate, but rather the oter way around. 480 * This hack works, prints the panic message twice, 481 * and it enables reboot timers! 482 */ 483 panic(msg); 484 } 485 486 void notrace handle_interruption(int code, struct pt_regs *regs) 487 { 488 unsigned long fault_address = 0; 489 unsigned long fault_space = 0; 490 struct siginfo si; 491 492 if (code == 1) 493 pdc_console_restart(); /* switch back to pdc if HPMC */ 494 else 495 local_irq_enable(); 496 497 /* Security check: 498 * If the priority level is still user, and the 499 * faulting space is not equal to the active space 500 * then the user is attempting something in a space 501 * that does not belong to them. Kill the process. 502 * 503 * This is normally the situation when the user 504 * attempts to jump into the kernel space at the 505 * wrong offset, be it at the gateway page or a 506 * random location. 507 * 508 * We cannot normally signal the process because it 509 * could *be* on the gateway page, and processes 510 * executing on the gateway page can't have signals 511 * delivered. 512 * 513 * We merely readjust the address into the users 514 * space, at a destination address of zero, and 515 * allow processing to continue. 516 */ 517 if (((unsigned long)regs->iaoq[0] & 3) && 518 ((unsigned long)regs->iasq[0] != (unsigned long)regs->sr[7])) { 519 /* Kill the user process later */ 520 regs->iaoq[0] = 0 | 3; 521 regs->iaoq[1] = regs->iaoq[0] + 4; 522 regs->iasq[0] = regs->iasq[1] = regs->sr[7]; 523 regs->gr[0] &= ~PSW_B; 524 return; 525 } 526 527 #if 0 528 printk(KERN_CRIT "Interruption # %d\n", code); 529 #endif 530 531 switch(code) { 532 533 case 1: 534 /* High-priority machine check (HPMC) */ 535 536 /* set up a new led state on systems shipped with a LED State panel */ 537 pdc_chassis_send_status(PDC_CHASSIS_DIRECT_HPMC); 538 539 parisc_terminate("High Priority Machine Check (HPMC)", 540 regs, code, 0); 541 /* NOT REACHED */ 542 543 case 2: 544 /* Power failure interrupt */ 545 printk(KERN_CRIT "Power failure interrupt !\n"); 546 return; 547 548 case 3: 549 /* Recovery counter trap */ 550 regs->gr[0] &= ~PSW_R; 551 if (user_space(regs)) 552 handle_gdb_break(regs, TRAP_TRACE); 553 /* else this must be the start of a syscall - just let it run */ 554 return; 555 556 case 5: 557 /* Low-priority machine check */ 558 pdc_chassis_send_status(PDC_CHASSIS_DIRECT_LPMC); 559 560 flush_cache_all(); 561 flush_tlb_all(); 562 cpu_lpmc(5, regs); 563 return; 564 565 case 6: 566 /* Instruction TLB miss fault/Instruction page fault */ 567 fault_address = regs->iaoq[0]; 568 fault_space = regs->iasq[0]; 569 break; 570 571 case 8: 572 /* Illegal instruction trap */ 573 die_if_kernel("Illegal instruction", regs, code); 574 si.si_code = ILL_ILLOPC; 575 goto give_sigill; 576 577 case 9: 578 /* Break instruction trap */ 579 handle_break(regs); 580 return; 581 582 case 10: 583 /* Privileged operation trap */ 584 die_if_kernel("Privileged operation", regs, code); 585 si.si_code = ILL_PRVOPC; 586 goto give_sigill; 587 588 case 11: 589 /* Privileged register trap */ 590 if ((regs->iir & 0xffdfffe0) == 0x034008a0) { 591 592 /* This is a MFCTL cr26/cr27 to gr instruction. 593 * PCXS traps on this, so we need to emulate it. 594 */ 595 596 if (regs->iir & 0x00200000) 597 regs->gr[regs->iir & 0x1f] = mfctl(27); 598 else 599 regs->gr[regs->iir & 0x1f] = mfctl(26); 600 601 regs->iaoq[0] = regs->iaoq[1]; 602 regs->iaoq[1] += 4; 603 regs->iasq[0] = regs->iasq[1]; 604 return; 605 } 606 607 die_if_kernel("Privileged register usage", regs, code); 608 si.si_code = ILL_PRVREG; 609 give_sigill: 610 si.si_signo = SIGILL; 611 si.si_errno = 0; 612 si.si_addr = (void __user *) regs->iaoq[0]; 613 force_sig_info(SIGILL, &si, current); 614 return; 615 616 case 12: 617 /* Overflow Trap, let the userland signal handler do the cleanup */ 618 si.si_signo = SIGFPE; 619 si.si_code = FPE_INTOVF; 620 si.si_addr = (void __user *) regs->iaoq[0]; 621 force_sig_info(SIGFPE, &si, current); 622 return; 623 624 case 13: 625 /* Conditional Trap 626 The condition succeeds in an instruction which traps 627 on condition */ 628 if(user_mode(regs)){ 629 si.si_signo = SIGFPE; 630 /* Set to zero, and let the userspace app figure it out from 631 the insn pointed to by si_addr */ 632 si.si_code = FPE_FIXME; 633 si.si_addr = (void __user *) regs->iaoq[0]; 634 force_sig_info(SIGFPE, &si, current); 635 return; 636 } 637 /* The kernel doesn't want to handle condition codes */ 638 break; 639 640 case 14: 641 /* Assist Exception Trap, i.e. floating point exception. */ 642 die_if_kernel("Floating point exception", regs, 0); /* quiet */ 643 __inc_irq_stat(irq_fpassist_count); 644 handle_fpe(regs); 645 return; 646 647 case 15: 648 /* Data TLB miss fault/Data page fault */ 649 /* Fall through */ 650 case 16: 651 /* Non-access instruction TLB miss fault */ 652 /* The instruction TLB entry needed for the target address of the FIC 653 is absent, and hardware can't find it, so we get to cleanup */ 654 /* Fall through */ 655 case 17: 656 /* Non-access data TLB miss fault/Non-access data page fault */ 657 /* FIXME: 658 Still need to add slow path emulation code here! 659 If the insn used a non-shadow register, then the tlb 660 handlers could not have their side-effect (e.g. probe 661 writing to a target register) emulated since rfir would 662 erase the changes to said register. Instead we have to 663 setup everything, call this function we are in, and emulate 664 by hand. Technically we need to emulate: 665 fdc,fdce,pdc,"fic,4f",prober,probeir,probew, probeiw 666 */ 667 fault_address = regs->ior; 668 fault_space = regs->isr; 669 break; 670 671 case 18: 672 /* PCXS only -- later cpu's split this into types 26,27 & 28 */ 673 /* Check for unaligned access */ 674 if (check_unaligned(regs)) { 675 handle_unaligned(regs); 676 return; 677 } 678 /* Fall Through */ 679 case 26: 680 /* PCXL: Data memory access rights trap */ 681 fault_address = regs->ior; 682 fault_space = regs->isr; 683 break; 684 685 case 19: 686 /* Data memory break trap */ 687 regs->gr[0] |= PSW_X; /* So we can single-step over the trap */ 688 /* fall thru */ 689 case 21: 690 /* Page reference trap */ 691 handle_gdb_break(regs, TRAP_HWBKPT); 692 return; 693 694 case 25: 695 /* Taken branch trap */ 696 regs->gr[0] &= ~PSW_T; 697 if (user_space(regs)) 698 handle_gdb_break(regs, TRAP_BRANCH); 699 /* else this must be the start of a syscall - just let it 700 * run. 701 */ 702 return; 703 704 case 7: 705 /* Instruction access rights */ 706 /* PCXL: Instruction memory protection trap */ 707 708 /* 709 * This could be caused by either: 1) a process attempting 710 * to execute within a vma that does not have execute 711 * permission, or 2) an access rights violation caused by a 712 * flush only translation set up by ptep_get_and_clear(). 713 * So we check the vma permissions to differentiate the two. 714 * If the vma indicates we have execute permission, then 715 * the cause is the latter one. In this case, we need to 716 * call do_page_fault() to fix the problem. 717 */ 718 719 if (user_mode(regs)) { 720 struct vm_area_struct *vma; 721 722 down_read(¤t->mm->mmap_sem); 723 vma = find_vma(current->mm,regs->iaoq[0]); 724 if (vma && (regs->iaoq[0] >= vma->vm_start) 725 && (vma->vm_flags & VM_EXEC)) { 726 727 fault_address = regs->iaoq[0]; 728 fault_space = regs->iasq[0]; 729 730 up_read(¤t->mm->mmap_sem); 731 break; /* call do_page_fault() */ 732 } 733 up_read(¤t->mm->mmap_sem); 734 } 735 /* Fall Through */ 736 case 27: 737 /* Data memory protection ID trap */ 738 if (code == 27 && !user_mode(regs) && 739 fixup_exception(regs)) 740 return; 741 742 die_if_kernel("Protection id trap", regs, code); 743 si.si_code = SEGV_MAPERR; 744 si.si_signo = SIGSEGV; 745 si.si_errno = 0; 746 if (code == 7) 747 si.si_addr = (void __user *) regs->iaoq[0]; 748 else 749 si.si_addr = (void __user *) regs->ior; 750 force_sig_info(SIGSEGV, &si, current); 751 return; 752 753 case 28: 754 /* Unaligned data reference trap */ 755 handle_unaligned(regs); 756 return; 757 758 default: 759 if (user_mode(regs)) { 760 parisc_printk_ratelimited(0, regs, KERN_DEBUG 761 "handle_interruption() pid=%d command='%s'\n", 762 task_pid_nr(current), current->comm); 763 /* SIGBUS, for lack of a better one. */ 764 si.si_signo = SIGBUS; 765 si.si_code = BUS_OBJERR; 766 si.si_errno = 0; 767 si.si_addr = (void __user *) regs->ior; 768 force_sig_info(SIGBUS, &si, current); 769 return; 770 } 771 pdc_chassis_send_status(PDC_CHASSIS_DIRECT_PANIC); 772 773 parisc_terminate("Unexpected interruption", regs, code, 0); 774 /* NOT REACHED */ 775 } 776 777 if (user_mode(regs)) { 778 if ((fault_space >> SPACEID_SHIFT) != (regs->sr[7] >> SPACEID_SHIFT)) { 779 parisc_printk_ratelimited(0, regs, KERN_DEBUG 780 "User fault %d on space 0x%08lx, pid=%d command='%s'\n", 781 code, fault_space, 782 task_pid_nr(current), current->comm); 783 si.si_signo = SIGSEGV; 784 si.si_errno = 0; 785 si.si_code = SEGV_MAPERR; 786 si.si_addr = (void __user *) regs->ior; 787 force_sig_info(SIGSEGV, &si, current); 788 return; 789 } 790 } 791 else { 792 793 /* 794 * The kernel should never fault on its own address space, 795 * unless pagefault_disable() was called before. 796 */ 797 798 if (fault_space == 0 && !faulthandler_disabled()) 799 { 800 /* Clean up and return if in exception table. */ 801 if (fixup_exception(regs)) 802 return; 803 pdc_chassis_send_status(PDC_CHASSIS_DIRECT_PANIC); 804 parisc_terminate("Kernel Fault", regs, code, fault_address); 805 } 806 } 807 808 do_page_fault(regs, code, fault_address); 809 } 810 811 812 void __init initialize_ivt(const void *iva) 813 { 814 extern u32 os_hpmc_size; 815 extern const u32 os_hpmc[]; 816 817 int i; 818 u32 check = 0; 819 u32 *ivap; 820 u32 *hpmcp; 821 u32 length, instr; 822 823 if (strcmp((const char *)iva, "cows can fly")) 824 panic("IVT invalid"); 825 826 ivap = (u32 *)iva; 827 828 for (i = 0; i < 8; i++) 829 *ivap++ = 0; 830 831 /* 832 * Use PDC_INSTR firmware function to get instruction that invokes 833 * PDCE_CHECK in HPMC handler. See programming note at page 1-31 of 834 * the PA 1.1 Firmware Architecture document. 835 */ 836 if (pdc_instr(&instr) == PDC_OK) 837 ivap[0] = instr; 838 839 /* Compute Checksum for HPMC handler */ 840 length = os_hpmc_size; 841 ivap[7] = length; 842 843 hpmcp = (u32 *)os_hpmc; 844 845 for (i=0; i<length/4; i++) 846 check += *hpmcp++; 847 848 for (i=0; i<8; i++) 849 check += ivap[i]; 850 851 ivap[5] = -check; 852 } 853 854 855 /* early_trap_init() is called before we set up kernel mappings and 856 * write-protect the kernel */ 857 void __init early_trap_init(void) 858 { 859 extern const void fault_vector_20; 860 861 #ifndef CONFIG_64BIT 862 extern const void fault_vector_11; 863 initialize_ivt(&fault_vector_11); 864 #endif 865 866 initialize_ivt(&fault_vector_20); 867 } 868 869 void __init trap_init(void) 870 { 871 } 872