1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * linux/arch/parisc/traps.c 4 * 5 * Copyright (C) 1991, 1992 Linus Torvalds 6 * Copyright (C) 1999, 2000 Philipp Rumpf <prumpf@tux.org> 7 */ 8 9 /* 10 * 'Traps.c' handles hardware traps and faults after we have saved some 11 * state in 'asm.s'. 12 */ 13 14 #include <linux/sched.h> 15 #include <linux/sched/debug.h> 16 #include <linux/kernel.h> 17 #include <linux/string.h> 18 #include <linux/errno.h> 19 #include <linux/ptrace.h> 20 #include <linux/timer.h> 21 #include <linux/delay.h> 22 #include <linux/mm.h> 23 #include <linux/module.h> 24 #include <linux/smp.h> 25 #include <linux/spinlock.h> 26 #include <linux/init.h> 27 #include <linux/interrupt.h> 28 #include <linux/console.h> 29 #include <linux/bug.h> 30 #include <linux/ratelimit.h> 31 #include <linux/uaccess.h> 32 33 #include <asm/assembly.h> 34 #include <asm/io.h> 35 #include <asm/irq.h> 36 #include <asm/traps.h> 37 #include <asm/unaligned.h> 38 #include <linux/atomic.h> 39 #include <asm/smp.h> 40 #include <asm/pdc.h> 41 #include <asm/pdc_chassis.h> 42 #include <asm/unwind.h> 43 #include <asm/tlbflush.h> 44 #include <asm/cacheflush.h> 45 46 #include "../math-emu/math-emu.h" /* for handle_fpe() */ 47 48 static void parisc_show_stack(struct task_struct *task, unsigned long *sp, 49 struct pt_regs *regs); 50 51 static int printbinary(char *buf, unsigned long x, int nbits) 52 { 53 unsigned long mask = 1UL << (nbits - 1); 54 while (mask != 0) { 55 *buf++ = (mask & x ? '1' : '0'); 56 mask >>= 1; 57 } 58 *buf = '\0'; 59 60 return nbits; 61 } 62 63 #ifdef CONFIG_64BIT 64 #define RFMT "%016lx" 65 #else 66 #define RFMT "%08lx" 67 #endif 68 #define FFMT "%016llx" /* fpregs are 64-bit always */ 69 70 #define PRINTREGS(lvl,r,f,fmt,x) \ 71 printk("%s%s%02d-%02d " fmt " " fmt " " fmt " " fmt "\n", \ 72 lvl, f, (x), (x+3), (r)[(x)+0], (r)[(x)+1], \ 73 (r)[(x)+2], (r)[(x)+3]) 74 75 static void print_gr(char *level, struct pt_regs *regs) 76 { 77 int i; 78 char buf[64]; 79 80 printk("%s\n", level); 81 printk("%s YZrvWESTHLNXBCVMcbcbcbcbOGFRQPDI\n", level); 82 printbinary(buf, regs->gr[0], 32); 83 printk("%sPSW: %s %s\n", level, buf, print_tainted()); 84 85 for (i = 0; i < 32; i += 4) 86 PRINTREGS(level, regs->gr, "r", RFMT, i); 87 } 88 89 static void print_fr(char *level, struct pt_regs *regs) 90 { 91 int i; 92 char buf[64]; 93 struct { u32 sw[2]; } s; 94 95 /* FR are 64bit everywhere. Need to use asm to get the content 96 * of fpsr/fper1, and we assume that we won't have a FP Identify 97 * in our way, otherwise we're screwed. 98 * The fldd is used to restore the T-bit if there was one, as the 99 * store clears it anyway. 100 * PA2.0 book says "thou shall not use fstw on FPSR/FPERs" - T-Bone */ 101 asm volatile ("fstd %%fr0,0(%1) \n\t" 102 "fldd 0(%1),%%fr0 \n\t" 103 : "=m" (s) : "r" (&s) : "r0"); 104 105 printk("%s\n", level); 106 printk("%s VZOUICununcqcqcqcqcqcrmunTDVZOUI\n", level); 107 printbinary(buf, s.sw[0], 32); 108 printk("%sFPSR: %s\n", level, buf); 109 printk("%sFPER1: %08x\n", level, s.sw[1]); 110 111 /* here we'll print fr0 again, tho it'll be meaningless */ 112 for (i = 0; i < 32; i += 4) 113 PRINTREGS(level, regs->fr, "fr", FFMT, i); 114 } 115 116 void show_regs(struct pt_regs *regs) 117 { 118 int i, user; 119 char *level; 120 unsigned long cr30, cr31; 121 122 user = user_mode(regs); 123 level = user ? KERN_DEBUG : KERN_CRIT; 124 125 show_regs_print_info(level); 126 127 print_gr(level, regs); 128 129 for (i = 0; i < 8; i += 4) 130 PRINTREGS(level, regs->sr, "sr", RFMT, i); 131 132 if (user) 133 print_fr(level, regs); 134 135 cr30 = mfctl(30); 136 cr31 = mfctl(31); 137 printk("%s\n", level); 138 printk("%sIASQ: " RFMT " " RFMT " IAOQ: " RFMT " " RFMT "\n", 139 level, regs->iasq[0], regs->iasq[1], regs->iaoq[0], regs->iaoq[1]); 140 printk("%s IIR: %08lx ISR: " RFMT " IOR: " RFMT "\n", 141 level, regs->iir, regs->isr, regs->ior); 142 printk("%s CPU: %8d CR30: " RFMT " CR31: " RFMT "\n", 143 level, current_thread_info()->cpu, cr30, cr31); 144 printk("%s ORIG_R28: " RFMT "\n", level, regs->orig_r28); 145 146 if (user) { 147 printk("%s IAOQ[0]: " RFMT "\n", level, regs->iaoq[0]); 148 printk("%s IAOQ[1]: " RFMT "\n", level, regs->iaoq[1]); 149 printk("%s RP(r2): " RFMT "\n", level, regs->gr[2]); 150 } else { 151 printk("%s IAOQ[0]: %pS\n", level, (void *) regs->iaoq[0]); 152 printk("%s IAOQ[1]: %pS\n", level, (void *) regs->iaoq[1]); 153 printk("%s RP(r2): %pS\n", level, (void *) regs->gr[2]); 154 155 parisc_show_stack(current, NULL, regs); 156 } 157 } 158 159 static DEFINE_RATELIMIT_STATE(_hppa_rs, 160 DEFAULT_RATELIMIT_INTERVAL, DEFAULT_RATELIMIT_BURST); 161 162 #define parisc_printk_ratelimited(critical, regs, fmt, ...) { \ 163 if ((critical || show_unhandled_signals) && __ratelimit(&_hppa_rs)) { \ 164 printk(fmt, ##__VA_ARGS__); \ 165 show_regs(regs); \ 166 } \ 167 } 168 169 170 static void do_show_stack(struct unwind_frame_info *info) 171 { 172 int i = 1; 173 174 printk(KERN_CRIT "Backtrace:\n"); 175 while (i <= 16) { 176 if (unwind_once(info) < 0 || info->ip == 0) 177 break; 178 179 if (__kernel_text_address(info->ip)) { 180 printk(KERN_CRIT " [<" RFMT ">] %pS\n", 181 info->ip, (void *) info->ip); 182 i++; 183 } 184 } 185 printk(KERN_CRIT "\n"); 186 } 187 188 static void parisc_show_stack(struct task_struct *task, unsigned long *sp, 189 struct pt_regs *regs) 190 { 191 struct unwind_frame_info info; 192 struct task_struct *t; 193 194 t = task ? task : current; 195 if (regs) { 196 unwind_frame_init(&info, t, regs); 197 goto show_stack; 198 } 199 200 if (t == current) { 201 unsigned long sp; 202 203 HERE: 204 asm volatile ("copy %%r30, %0" : "=r"(sp)); 205 { 206 struct pt_regs r; 207 208 memset(&r, 0, sizeof(struct pt_regs)); 209 r.iaoq[0] = (unsigned long)&&HERE; 210 r.gr[2] = (unsigned long)__builtin_return_address(0); 211 r.gr[30] = sp; 212 213 unwind_frame_init(&info, current, &r); 214 } 215 } else { 216 unwind_frame_init_from_blocked_task(&info, t); 217 } 218 219 show_stack: 220 do_show_stack(&info); 221 } 222 223 void show_stack(struct task_struct *t, unsigned long *sp) 224 { 225 return parisc_show_stack(t, sp, NULL); 226 } 227 228 int is_valid_bugaddr(unsigned long iaoq) 229 { 230 return 1; 231 } 232 233 void die_if_kernel(char *str, struct pt_regs *regs, long err) 234 { 235 if (user_mode(regs)) { 236 if (err == 0) 237 return; /* STFU */ 238 239 parisc_printk_ratelimited(1, regs, 240 KERN_CRIT "%s (pid %d): %s (code %ld) at " RFMT "\n", 241 current->comm, task_pid_nr(current), str, err, regs->iaoq[0]); 242 243 return; 244 } 245 246 oops_in_progress = 1; 247 248 oops_enter(); 249 250 /* Amuse the user in a SPARC fashion */ 251 if (err) printk(KERN_CRIT 252 " _______________________________ \n" 253 " < Your System ate a SPARC! Gah! >\n" 254 " ------------------------------- \n" 255 " \\ ^__^\n" 256 " (__)\\ )\\/\\\n" 257 " U ||----w |\n" 258 " || ||\n"); 259 260 /* unlock the pdc lock if necessary */ 261 pdc_emergency_unlock(); 262 263 /* maybe the kernel hasn't booted very far yet and hasn't been able 264 * to initialize the serial or STI console. In that case we should 265 * re-enable the pdc console, so that the user will be able to 266 * identify the problem. */ 267 if (!console_drivers) 268 pdc_console_restart(); 269 270 if (err) 271 printk(KERN_CRIT "%s (pid %d): %s (code %ld)\n", 272 current->comm, task_pid_nr(current), str, err); 273 274 /* Wot's wrong wif bein' racy? */ 275 if (current->thread.flags & PARISC_KERNEL_DEATH) { 276 printk(KERN_CRIT "%s() recursion detected.\n", __func__); 277 local_irq_enable(); 278 while (1); 279 } 280 current->thread.flags |= PARISC_KERNEL_DEATH; 281 282 show_regs(regs); 283 dump_stack(); 284 add_taint(TAINT_DIE, LOCKDEP_NOW_UNRELIABLE); 285 286 if (in_interrupt()) 287 panic("Fatal exception in interrupt"); 288 289 if (panic_on_oops) 290 panic("Fatal exception"); 291 292 oops_exit(); 293 do_exit(SIGSEGV); 294 } 295 296 /* gdb uses break 4,8 */ 297 #define GDB_BREAK_INSN 0x10004 298 static void handle_gdb_break(struct pt_regs *regs, int wot) 299 { 300 struct siginfo si; 301 302 si.si_signo = SIGTRAP; 303 si.si_errno = 0; 304 si.si_code = wot; 305 si.si_addr = (void __user *) (regs->iaoq[0] & ~3); 306 force_sig_info(SIGTRAP, &si, current); 307 } 308 309 static void handle_break(struct pt_regs *regs) 310 { 311 unsigned iir = regs->iir; 312 313 if (unlikely(iir == PARISC_BUG_BREAK_INSN && !user_mode(regs))) { 314 /* check if a BUG() or WARN() trapped here. */ 315 enum bug_trap_type tt; 316 tt = report_bug(regs->iaoq[0] & ~3, regs); 317 if (tt == BUG_TRAP_TYPE_WARN) { 318 regs->iaoq[0] += 4; 319 regs->iaoq[1] += 4; 320 return; /* return to next instruction when WARN_ON(). */ 321 } 322 die_if_kernel("Unknown kernel breakpoint", regs, 323 (tt == BUG_TRAP_TYPE_NONE) ? 9 : 0); 324 } 325 326 if (unlikely(iir != GDB_BREAK_INSN)) 327 parisc_printk_ratelimited(0, regs, 328 KERN_DEBUG "break %d,%d: pid=%d command='%s'\n", 329 iir & 31, (iir>>13) & ((1<<13)-1), 330 task_pid_nr(current), current->comm); 331 332 /* send standard GDB signal */ 333 handle_gdb_break(regs, TRAP_BRKPT); 334 } 335 336 static void default_trap(int code, struct pt_regs *regs) 337 { 338 printk(KERN_ERR "Trap %d on CPU %d\n", code, smp_processor_id()); 339 show_regs(regs); 340 } 341 342 void (*cpu_lpmc) (int code, struct pt_regs *regs) __read_mostly = default_trap; 343 344 345 void transfer_pim_to_trap_frame(struct pt_regs *regs) 346 { 347 register int i; 348 extern unsigned int hpmc_pim_data[]; 349 struct pdc_hpmc_pim_11 *pim_narrow; 350 struct pdc_hpmc_pim_20 *pim_wide; 351 352 if (boot_cpu_data.cpu_type >= pcxu) { 353 354 pim_wide = (struct pdc_hpmc_pim_20 *)hpmc_pim_data; 355 356 /* 357 * Note: The following code will probably generate a 358 * bunch of truncation error warnings from the compiler. 359 * Could be handled with an ifdef, but perhaps there 360 * is a better way. 361 */ 362 363 regs->gr[0] = pim_wide->cr[22]; 364 365 for (i = 1; i < 32; i++) 366 regs->gr[i] = pim_wide->gr[i]; 367 368 for (i = 0; i < 32; i++) 369 regs->fr[i] = pim_wide->fr[i]; 370 371 for (i = 0; i < 8; i++) 372 regs->sr[i] = pim_wide->sr[i]; 373 374 regs->iasq[0] = pim_wide->cr[17]; 375 regs->iasq[1] = pim_wide->iasq_back; 376 regs->iaoq[0] = pim_wide->cr[18]; 377 regs->iaoq[1] = pim_wide->iaoq_back; 378 379 regs->sar = pim_wide->cr[11]; 380 regs->iir = pim_wide->cr[19]; 381 regs->isr = pim_wide->cr[20]; 382 regs->ior = pim_wide->cr[21]; 383 } 384 else { 385 pim_narrow = (struct pdc_hpmc_pim_11 *)hpmc_pim_data; 386 387 regs->gr[0] = pim_narrow->cr[22]; 388 389 for (i = 1; i < 32; i++) 390 regs->gr[i] = pim_narrow->gr[i]; 391 392 for (i = 0; i < 32; i++) 393 regs->fr[i] = pim_narrow->fr[i]; 394 395 for (i = 0; i < 8; i++) 396 regs->sr[i] = pim_narrow->sr[i]; 397 398 regs->iasq[0] = pim_narrow->cr[17]; 399 regs->iasq[1] = pim_narrow->iasq_back; 400 regs->iaoq[0] = pim_narrow->cr[18]; 401 regs->iaoq[1] = pim_narrow->iaoq_back; 402 403 regs->sar = pim_narrow->cr[11]; 404 regs->iir = pim_narrow->cr[19]; 405 regs->isr = pim_narrow->cr[20]; 406 regs->ior = pim_narrow->cr[21]; 407 } 408 409 /* 410 * The following fields only have meaning if we came through 411 * another path. So just zero them here. 412 */ 413 414 regs->ksp = 0; 415 regs->kpc = 0; 416 regs->orig_r28 = 0; 417 } 418 419 420 /* 421 * This routine is called as a last resort when everything else 422 * has gone clearly wrong. We get called for faults in kernel space, 423 * and HPMC's. 424 */ 425 void parisc_terminate(char *msg, struct pt_regs *regs, int code, unsigned long offset) 426 { 427 static DEFINE_SPINLOCK(terminate_lock); 428 429 oops_in_progress = 1; 430 431 set_eiem(0); 432 local_irq_disable(); 433 spin_lock(&terminate_lock); 434 435 /* unlock the pdc lock if necessary */ 436 pdc_emergency_unlock(); 437 438 /* restart pdc console if necessary */ 439 if (!console_drivers) 440 pdc_console_restart(); 441 442 /* Not all paths will gutter the processor... */ 443 switch(code){ 444 445 case 1: 446 transfer_pim_to_trap_frame(regs); 447 break; 448 449 default: 450 /* Fall through */ 451 break; 452 453 } 454 455 { 456 /* show_stack(NULL, (unsigned long *)regs->gr[30]); */ 457 struct unwind_frame_info info; 458 unwind_frame_init(&info, current, regs); 459 do_show_stack(&info); 460 } 461 462 printk("\n"); 463 pr_crit("%s: Code=%d (%s) regs=%p (Addr=" RFMT ")\n", 464 msg, code, trap_name(code), regs, offset); 465 show_regs(regs); 466 467 spin_unlock(&terminate_lock); 468 469 /* put soft power button back under hardware control; 470 * if the user had pressed it once at any time, the 471 * system will shut down immediately right here. */ 472 pdc_soft_power_button(0); 473 474 /* Call kernel panic() so reboot timeouts work properly 475 * FIXME: This function should be on the list of 476 * panic notifiers, and we should call panic 477 * directly from the location that we wish. 478 * e.g. We should not call panic from 479 * parisc_terminate, but rather the oter way around. 480 * This hack works, prints the panic message twice, 481 * and it enables reboot timers! 482 */ 483 panic(msg); 484 } 485 486 void notrace handle_interruption(int code, struct pt_regs *regs) 487 { 488 unsigned long fault_address = 0; 489 unsigned long fault_space = 0; 490 struct siginfo si; 491 492 if (code == 1) 493 pdc_console_restart(); /* switch back to pdc if HPMC */ 494 else 495 local_irq_enable(); 496 497 /* Security check: 498 * If the priority level is still user, and the 499 * faulting space is not equal to the active space 500 * then the user is attempting something in a space 501 * that does not belong to them. Kill the process. 502 * 503 * This is normally the situation when the user 504 * attempts to jump into the kernel space at the 505 * wrong offset, be it at the gateway page or a 506 * random location. 507 * 508 * We cannot normally signal the process because it 509 * could *be* on the gateway page, and processes 510 * executing on the gateway page can't have signals 511 * delivered. 512 * 513 * We merely readjust the address into the users 514 * space, at a destination address of zero, and 515 * allow processing to continue. 516 */ 517 if (((unsigned long)regs->iaoq[0] & 3) && 518 ((unsigned long)regs->iasq[0] != (unsigned long)regs->sr[7])) { 519 /* Kill the user process later */ 520 regs->iaoq[0] = 0 | 3; 521 regs->iaoq[1] = regs->iaoq[0] + 4; 522 regs->iasq[0] = regs->iasq[1] = regs->sr[7]; 523 regs->gr[0] &= ~PSW_B; 524 return; 525 } 526 527 #if 0 528 printk(KERN_CRIT "Interruption # %d\n", code); 529 #endif 530 531 switch(code) { 532 533 case 1: 534 /* High-priority machine check (HPMC) */ 535 536 /* set up a new led state on systems shipped with a LED State panel */ 537 pdc_chassis_send_status(PDC_CHASSIS_DIRECT_HPMC); 538 539 parisc_terminate("High Priority Machine Check (HPMC)", 540 regs, code, 0); 541 /* NOT REACHED */ 542 543 case 2: 544 /* Power failure interrupt */ 545 printk(KERN_CRIT "Power failure interrupt !\n"); 546 return; 547 548 case 3: 549 /* Recovery counter trap */ 550 regs->gr[0] &= ~PSW_R; 551 if (user_space(regs)) 552 handle_gdb_break(regs, TRAP_TRACE); 553 /* else this must be the start of a syscall - just let it run */ 554 return; 555 556 case 5: 557 /* Low-priority machine check */ 558 pdc_chassis_send_status(PDC_CHASSIS_DIRECT_LPMC); 559 560 flush_cache_all(); 561 flush_tlb_all(); 562 cpu_lpmc(5, regs); 563 return; 564 565 case 6: 566 /* Instruction TLB miss fault/Instruction page fault */ 567 fault_address = regs->iaoq[0]; 568 fault_space = regs->iasq[0]; 569 break; 570 571 case 8: 572 /* Illegal instruction trap */ 573 die_if_kernel("Illegal instruction", regs, code); 574 si.si_code = ILL_ILLOPC; 575 goto give_sigill; 576 577 case 9: 578 /* Break instruction trap */ 579 handle_break(regs); 580 return; 581 582 case 10: 583 /* Privileged operation trap */ 584 die_if_kernel("Privileged operation", regs, code); 585 si.si_code = ILL_PRVOPC; 586 goto give_sigill; 587 588 case 11: 589 /* Privileged register trap */ 590 if ((regs->iir & 0xffdfffe0) == 0x034008a0) { 591 592 /* This is a MFCTL cr26/cr27 to gr instruction. 593 * PCXS traps on this, so we need to emulate it. 594 */ 595 596 if (regs->iir & 0x00200000) 597 regs->gr[regs->iir & 0x1f] = mfctl(27); 598 else 599 regs->gr[regs->iir & 0x1f] = mfctl(26); 600 601 regs->iaoq[0] = regs->iaoq[1]; 602 regs->iaoq[1] += 4; 603 regs->iasq[0] = regs->iasq[1]; 604 return; 605 } 606 607 die_if_kernel("Privileged register usage", regs, code); 608 si.si_code = ILL_PRVREG; 609 give_sigill: 610 si.si_signo = SIGILL; 611 si.si_errno = 0; 612 si.si_addr = (void __user *) regs->iaoq[0]; 613 force_sig_info(SIGILL, &si, current); 614 return; 615 616 case 12: 617 /* Overflow Trap, let the userland signal handler do the cleanup */ 618 si.si_signo = SIGFPE; 619 si.si_code = FPE_INTOVF; 620 si.si_addr = (void __user *) regs->iaoq[0]; 621 force_sig_info(SIGFPE, &si, current); 622 return; 623 624 case 13: 625 /* Conditional Trap 626 The condition succeeds in an instruction which traps 627 on condition */ 628 if(user_mode(regs)){ 629 si.si_signo = SIGFPE; 630 /* Let userspace app figure it out from the insn pointed 631 * to by si_addr. 632 */ 633 si.si_code = FPE_CONDTRAP; 634 si.si_addr = (void __user *) regs->iaoq[0]; 635 force_sig_info(SIGFPE, &si, current); 636 return; 637 } 638 /* The kernel doesn't want to handle condition codes */ 639 break; 640 641 case 14: 642 /* Assist Exception Trap, i.e. floating point exception. */ 643 die_if_kernel("Floating point exception", regs, 0); /* quiet */ 644 __inc_irq_stat(irq_fpassist_count); 645 handle_fpe(regs); 646 return; 647 648 case 15: 649 /* Data TLB miss fault/Data page fault */ 650 /* Fall through */ 651 case 16: 652 /* Non-access instruction TLB miss fault */ 653 /* The instruction TLB entry needed for the target address of the FIC 654 is absent, and hardware can't find it, so we get to cleanup */ 655 /* Fall through */ 656 case 17: 657 /* Non-access data TLB miss fault/Non-access data page fault */ 658 /* FIXME: 659 Still need to add slow path emulation code here! 660 If the insn used a non-shadow register, then the tlb 661 handlers could not have their side-effect (e.g. probe 662 writing to a target register) emulated since rfir would 663 erase the changes to said register. Instead we have to 664 setup everything, call this function we are in, and emulate 665 by hand. Technically we need to emulate: 666 fdc,fdce,pdc,"fic,4f",prober,probeir,probew, probeiw 667 */ 668 fault_address = regs->ior; 669 fault_space = regs->isr; 670 break; 671 672 case 18: 673 /* PCXS only -- later cpu's split this into types 26,27 & 28 */ 674 /* Check for unaligned access */ 675 if (check_unaligned(regs)) { 676 handle_unaligned(regs); 677 return; 678 } 679 /* Fall Through */ 680 case 26: 681 /* PCXL: Data memory access rights trap */ 682 fault_address = regs->ior; 683 fault_space = regs->isr; 684 break; 685 686 case 19: 687 /* Data memory break trap */ 688 regs->gr[0] |= PSW_X; /* So we can single-step over the trap */ 689 /* fall thru */ 690 case 21: 691 /* Page reference trap */ 692 handle_gdb_break(regs, TRAP_HWBKPT); 693 return; 694 695 case 25: 696 /* Taken branch trap */ 697 regs->gr[0] &= ~PSW_T; 698 if (user_space(regs)) 699 handle_gdb_break(regs, TRAP_BRANCH); 700 /* else this must be the start of a syscall - just let it 701 * run. 702 */ 703 return; 704 705 case 7: 706 /* Instruction access rights */ 707 /* PCXL: Instruction memory protection trap */ 708 709 /* 710 * This could be caused by either: 1) a process attempting 711 * to execute within a vma that does not have execute 712 * permission, or 2) an access rights violation caused by a 713 * flush only translation set up by ptep_get_and_clear(). 714 * So we check the vma permissions to differentiate the two. 715 * If the vma indicates we have execute permission, then 716 * the cause is the latter one. In this case, we need to 717 * call do_page_fault() to fix the problem. 718 */ 719 720 if (user_mode(regs)) { 721 struct vm_area_struct *vma; 722 723 down_read(¤t->mm->mmap_sem); 724 vma = find_vma(current->mm,regs->iaoq[0]); 725 if (vma && (regs->iaoq[0] >= vma->vm_start) 726 && (vma->vm_flags & VM_EXEC)) { 727 728 fault_address = regs->iaoq[0]; 729 fault_space = regs->iasq[0]; 730 731 up_read(¤t->mm->mmap_sem); 732 break; /* call do_page_fault() */ 733 } 734 up_read(¤t->mm->mmap_sem); 735 } 736 /* Fall Through */ 737 case 27: 738 /* Data memory protection ID trap */ 739 if (code == 27 && !user_mode(regs) && 740 fixup_exception(regs)) 741 return; 742 743 die_if_kernel("Protection id trap", regs, code); 744 si.si_code = SEGV_MAPERR; 745 si.si_signo = SIGSEGV; 746 si.si_errno = 0; 747 if (code == 7) 748 si.si_addr = (void __user *) regs->iaoq[0]; 749 else 750 si.si_addr = (void __user *) regs->ior; 751 force_sig_info(SIGSEGV, &si, current); 752 return; 753 754 case 28: 755 /* Unaligned data reference trap */ 756 handle_unaligned(regs); 757 return; 758 759 default: 760 if (user_mode(regs)) { 761 parisc_printk_ratelimited(0, regs, KERN_DEBUG 762 "handle_interruption() pid=%d command='%s'\n", 763 task_pid_nr(current), current->comm); 764 /* SIGBUS, for lack of a better one. */ 765 si.si_signo = SIGBUS; 766 si.si_code = BUS_OBJERR; 767 si.si_errno = 0; 768 si.si_addr = (void __user *) regs->ior; 769 force_sig_info(SIGBUS, &si, current); 770 return; 771 } 772 pdc_chassis_send_status(PDC_CHASSIS_DIRECT_PANIC); 773 774 parisc_terminate("Unexpected interruption", regs, code, 0); 775 /* NOT REACHED */ 776 } 777 778 if (user_mode(regs)) { 779 if ((fault_space >> SPACEID_SHIFT) != (regs->sr[7] >> SPACEID_SHIFT)) { 780 parisc_printk_ratelimited(0, regs, KERN_DEBUG 781 "User fault %d on space 0x%08lx, pid=%d command='%s'\n", 782 code, fault_space, 783 task_pid_nr(current), current->comm); 784 si.si_signo = SIGSEGV; 785 si.si_errno = 0; 786 si.si_code = SEGV_MAPERR; 787 si.si_addr = (void __user *) regs->ior; 788 force_sig_info(SIGSEGV, &si, current); 789 return; 790 } 791 } 792 else { 793 794 /* 795 * The kernel should never fault on its own address space, 796 * unless pagefault_disable() was called before. 797 */ 798 799 if (fault_space == 0 && !faulthandler_disabled()) 800 { 801 /* Clean up and return if in exception table. */ 802 if (fixup_exception(regs)) 803 return; 804 pdc_chassis_send_status(PDC_CHASSIS_DIRECT_PANIC); 805 parisc_terminate("Kernel Fault", regs, code, fault_address); 806 } 807 } 808 809 do_page_fault(regs, code, fault_address); 810 } 811 812 813 void __init initialize_ivt(const void *iva) 814 { 815 extern u32 os_hpmc_size; 816 extern const u32 os_hpmc[]; 817 818 int i; 819 u32 check = 0; 820 u32 *ivap; 821 u32 *hpmcp; 822 u32 length, instr; 823 824 if (strcmp((const char *)iva, "cows can fly")) 825 panic("IVT invalid"); 826 827 ivap = (u32 *)iva; 828 829 for (i = 0; i < 8; i++) 830 *ivap++ = 0; 831 832 /* 833 * Use PDC_INSTR firmware function to get instruction that invokes 834 * PDCE_CHECK in HPMC handler. See programming note at page 1-31 of 835 * the PA 1.1 Firmware Architecture document. 836 */ 837 if (pdc_instr(&instr) == PDC_OK) 838 ivap[0] = instr; 839 840 /* 841 * Rules for the checksum of the HPMC handler: 842 * 1. The IVA does not point to PDC/PDH space (ie: the OS has installed 843 * its own IVA). 844 * 2. The word at IVA + 32 is nonzero. 845 * 3. If Length (IVA + 60) is not zero, then Length (IVA + 60) and 846 * Address (IVA + 56) are word-aligned. 847 * 4. The checksum of the 8 words starting at IVA + 32 plus the sum of 848 * the Length/4 words starting at Address is zero. 849 */ 850 851 /* Compute Checksum for HPMC handler */ 852 length = os_hpmc_size; 853 ivap[7] = length; 854 855 hpmcp = (u32 *)os_hpmc; 856 857 for (i=0; i<length/4; i++) 858 check += *hpmcp++; 859 860 for (i=0; i<8; i++) 861 check += ivap[i]; 862 863 ivap[5] = -check; 864 } 865 866 867 /* early_trap_init() is called before we set up kernel mappings and 868 * write-protect the kernel */ 869 void __init early_trap_init(void) 870 { 871 extern const void fault_vector_20; 872 873 #ifndef CONFIG_64BIT 874 extern const void fault_vector_11; 875 initialize_ivt(&fault_vector_11); 876 #endif 877 878 initialize_ivt(&fault_vector_20); 879 } 880 881 void __init trap_init(void) 882 { 883 } 884