1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * linux/arch/parisc/traps.c 4 * 5 * Copyright (C) 1991, 1992 Linus Torvalds 6 * Copyright (C) 1999, 2000 Philipp Rumpf <prumpf@tux.org> 7 */ 8 9 /* 10 * 'Traps.c' handles hardware traps and faults after we have saved some 11 * state in 'asm.s'. 12 */ 13 14 #include <linux/sched.h> 15 #include <linux/sched/debug.h> 16 #include <linux/kernel.h> 17 #include <linux/string.h> 18 #include <linux/errno.h> 19 #include <linux/ptrace.h> 20 #include <linux/timer.h> 21 #include <linux/delay.h> 22 #include <linux/mm.h> 23 #include <linux/module.h> 24 #include <linux/smp.h> 25 #include <linux/spinlock.h> 26 #include <linux/init.h> 27 #include <linux/interrupt.h> 28 #include <linux/console.h> 29 #include <linux/bug.h> 30 #include <linux/ratelimit.h> 31 #include <linux/uaccess.h> 32 #include <linux/kdebug.h> 33 #include <linux/kfence.h> 34 35 #include <asm/assembly.h> 36 #include <asm/io.h> 37 #include <asm/irq.h> 38 #include <asm/traps.h> 39 #include <asm/unaligned.h> 40 #include <linux/atomic.h> 41 #include <asm/smp.h> 42 #include <asm/pdc.h> 43 #include <asm/pdc_chassis.h> 44 #include <asm/unwind.h> 45 #include <asm/tlbflush.h> 46 #include <asm/cacheflush.h> 47 #include <linux/kgdb.h> 48 #include <linux/kprobes.h> 49 50 #include "../math-emu/math-emu.h" /* for handle_fpe() */ 51 52 static void parisc_show_stack(struct task_struct *task, 53 struct pt_regs *regs, const char *loglvl); 54 55 static int printbinary(char *buf, unsigned long x, int nbits) 56 { 57 unsigned long mask = 1UL << (nbits - 1); 58 while (mask != 0) { 59 *buf++ = (mask & x ? '1' : '0'); 60 mask >>= 1; 61 } 62 *buf = '\0'; 63 64 return nbits; 65 } 66 67 #ifdef CONFIG_64BIT 68 #define RFMT "%016lx" 69 #else 70 #define RFMT "%08lx" 71 #endif 72 #define FFMT "%016llx" /* fpregs are 64-bit always */ 73 74 #define PRINTREGS(lvl,r,f,fmt,x) \ 75 printk("%s%s%02d-%02d " fmt " " fmt " " fmt " " fmt "\n", \ 76 lvl, f, (x), (x+3), (r)[(x)+0], (r)[(x)+1], \ 77 (r)[(x)+2], (r)[(x)+3]) 78 79 static void print_gr(const char *level, struct pt_regs *regs) 80 { 81 int i; 82 char buf[64]; 83 84 printk("%s\n", level); 85 printk("%s YZrvWESTHLNXBCVMcbcbcbcbOGFRQPDI\n", level); 86 printbinary(buf, regs->gr[0], 32); 87 printk("%sPSW: %s %s\n", level, buf, print_tainted()); 88 89 for (i = 0; i < 32; i += 4) 90 PRINTREGS(level, regs->gr, "r", RFMT, i); 91 } 92 93 static void print_fr(const char *level, struct pt_regs *regs) 94 { 95 int i; 96 char buf[64]; 97 struct { u32 sw[2]; } s; 98 99 /* FR are 64bit everywhere. Need to use asm to get the content 100 * of fpsr/fper1, and we assume that we won't have a FP Identify 101 * in our way, otherwise we're screwed. 102 * The fldd is used to restore the T-bit if there was one, as the 103 * store clears it anyway. 104 * PA2.0 book says "thou shall not use fstw on FPSR/FPERs" - T-Bone */ 105 asm volatile ("fstd %%fr0,0(%1) \n\t" 106 "fldd 0(%1),%%fr0 \n\t" 107 : "=m" (s) : "r" (&s) : "r0"); 108 109 printk("%s\n", level); 110 printk("%s VZOUICununcqcqcqcqcqcrmunTDVZOUI\n", level); 111 printbinary(buf, s.sw[0], 32); 112 printk("%sFPSR: %s\n", level, buf); 113 printk("%sFPER1: %08x\n", level, s.sw[1]); 114 115 /* here we'll print fr0 again, tho it'll be meaningless */ 116 for (i = 0; i < 32; i += 4) 117 PRINTREGS(level, regs->fr, "fr", FFMT, i); 118 } 119 120 void show_regs(struct pt_regs *regs) 121 { 122 int i, user; 123 const char *level; 124 unsigned long cr30, cr31; 125 126 user = user_mode(regs); 127 level = user ? KERN_DEBUG : KERN_CRIT; 128 129 show_regs_print_info(level); 130 131 print_gr(level, regs); 132 133 for (i = 0; i < 8; i += 4) 134 PRINTREGS(level, regs->sr, "sr", RFMT, i); 135 136 if (user) 137 print_fr(level, regs); 138 139 cr30 = mfctl(30); 140 cr31 = mfctl(31); 141 printk("%s\n", level); 142 printk("%sIASQ: " RFMT " " RFMT " IAOQ: " RFMT " " RFMT "\n", 143 level, regs->iasq[0], regs->iasq[1], regs->iaoq[0], regs->iaoq[1]); 144 printk("%s IIR: %08lx ISR: " RFMT " IOR: " RFMT "\n", 145 level, regs->iir, regs->isr, regs->ior); 146 printk("%s CPU: %8d CR30: " RFMT " CR31: " RFMT "\n", 147 level, task_cpu(current), cr30, cr31); 148 printk("%s ORIG_R28: " RFMT "\n", level, regs->orig_r28); 149 150 if (user) { 151 printk("%s IAOQ[0]: " RFMT "\n", level, regs->iaoq[0]); 152 printk("%s IAOQ[1]: " RFMT "\n", level, regs->iaoq[1]); 153 printk("%s RP(r2): " RFMT "\n", level, regs->gr[2]); 154 } else { 155 printk("%s IAOQ[0]: %pS\n", level, (void *) regs->iaoq[0]); 156 printk("%s IAOQ[1]: %pS\n", level, (void *) regs->iaoq[1]); 157 printk("%s RP(r2): %pS\n", level, (void *) regs->gr[2]); 158 159 parisc_show_stack(current, regs, KERN_DEFAULT); 160 } 161 } 162 163 static DEFINE_RATELIMIT_STATE(_hppa_rs, 164 DEFAULT_RATELIMIT_INTERVAL, DEFAULT_RATELIMIT_BURST); 165 166 #define parisc_printk_ratelimited(critical, regs, fmt, ...) { \ 167 if ((critical || show_unhandled_signals) && __ratelimit(&_hppa_rs)) { \ 168 printk(fmt, ##__VA_ARGS__); \ 169 show_regs(regs); \ 170 } \ 171 } 172 173 174 static void do_show_stack(struct unwind_frame_info *info, const char *loglvl) 175 { 176 int i = 1; 177 178 printk("%sBacktrace:\n", loglvl); 179 while (i <= MAX_UNWIND_ENTRIES) { 180 if (unwind_once(info) < 0 || info->ip == 0) 181 break; 182 183 if (__kernel_text_address(info->ip)) { 184 printk("%s [<" RFMT ">] %pS\n", 185 loglvl, info->ip, (void *) info->ip); 186 i++; 187 } 188 } 189 printk("%s\n", loglvl); 190 } 191 192 static void parisc_show_stack(struct task_struct *task, 193 struct pt_regs *regs, const char *loglvl) 194 { 195 struct unwind_frame_info info; 196 197 unwind_frame_init_task(&info, task, regs); 198 199 do_show_stack(&info, loglvl); 200 } 201 202 void show_stack(struct task_struct *t, unsigned long *sp, const char *loglvl) 203 { 204 parisc_show_stack(t, NULL, loglvl); 205 } 206 207 int is_valid_bugaddr(unsigned long iaoq) 208 { 209 return 1; 210 } 211 212 void die_if_kernel(char *str, struct pt_regs *regs, long err) 213 { 214 if (user_mode(regs)) { 215 if (err == 0) 216 return; /* STFU */ 217 218 parisc_printk_ratelimited(1, regs, 219 KERN_CRIT "%s (pid %d): %s (code %ld) at " RFMT "\n", 220 current->comm, task_pid_nr(current), str, err, regs->iaoq[0]); 221 222 return; 223 } 224 225 bust_spinlocks(1); 226 227 oops_enter(); 228 229 /* Amuse the user in a SPARC fashion */ 230 if (err) printk(KERN_CRIT 231 " _______________________________ \n" 232 " < Your System ate a SPARC! Gah! >\n" 233 " ------------------------------- \n" 234 " \\ ^__^\n" 235 " (__)\\ )\\/\\\n" 236 " U ||----w |\n" 237 " || ||\n"); 238 239 /* unlock the pdc lock if necessary */ 240 pdc_emergency_unlock(); 241 242 /* maybe the kernel hasn't booted very far yet and hasn't been able 243 * to initialize the serial or STI console. In that case we should 244 * re-enable the pdc console, so that the user will be able to 245 * identify the problem. */ 246 if (!console_drivers) 247 pdc_console_restart(); 248 249 if (err) 250 printk(KERN_CRIT "%s (pid %d): %s (code %ld)\n", 251 current->comm, task_pid_nr(current), str, err); 252 253 /* Wot's wrong wif bein' racy? */ 254 if (current->thread.flags & PARISC_KERNEL_DEATH) { 255 printk(KERN_CRIT "%s() recursion detected.\n", __func__); 256 local_irq_enable(); 257 while (1); 258 } 259 current->thread.flags |= PARISC_KERNEL_DEATH; 260 261 show_regs(regs); 262 dump_stack(); 263 add_taint(TAINT_DIE, LOCKDEP_NOW_UNRELIABLE); 264 265 if (in_interrupt()) 266 panic("Fatal exception in interrupt"); 267 268 if (panic_on_oops) 269 panic("Fatal exception"); 270 271 oops_exit(); 272 do_exit(SIGSEGV); 273 } 274 275 /* gdb uses break 4,8 */ 276 #define GDB_BREAK_INSN 0x10004 277 static void handle_gdb_break(struct pt_regs *regs, int wot) 278 { 279 force_sig_fault(SIGTRAP, wot, 280 (void __user *) (regs->iaoq[0] & ~3)); 281 } 282 283 static void handle_break(struct pt_regs *regs) 284 { 285 unsigned iir = regs->iir; 286 287 if (unlikely(iir == PARISC_BUG_BREAK_INSN && !user_mode(regs))) { 288 /* check if a BUG() or WARN() trapped here. */ 289 enum bug_trap_type tt; 290 tt = report_bug(regs->iaoq[0] & ~3, regs); 291 if (tt == BUG_TRAP_TYPE_WARN) { 292 regs->iaoq[0] += 4; 293 regs->iaoq[1] += 4; 294 return; /* return to next instruction when WARN_ON(). */ 295 } 296 die_if_kernel("Unknown kernel breakpoint", regs, 297 (tt == BUG_TRAP_TYPE_NONE) ? 9 : 0); 298 } 299 300 #ifdef CONFIG_KPROBES 301 if (unlikely(iir == PARISC_KPROBES_BREAK_INSN)) { 302 parisc_kprobe_break_handler(regs); 303 return; 304 } 305 306 #endif 307 308 #ifdef CONFIG_KGDB 309 if (unlikely(iir == PARISC_KGDB_COMPILED_BREAK_INSN || 310 iir == PARISC_KGDB_BREAK_INSN)) { 311 kgdb_handle_exception(9, SIGTRAP, 0, regs); 312 return; 313 } 314 #endif 315 316 if (unlikely(iir != GDB_BREAK_INSN)) 317 parisc_printk_ratelimited(0, regs, 318 KERN_DEBUG "break %d,%d: pid=%d command='%s'\n", 319 iir & 31, (iir>>13) & ((1<<13)-1), 320 task_pid_nr(current), current->comm); 321 322 /* send standard GDB signal */ 323 handle_gdb_break(regs, TRAP_BRKPT); 324 } 325 326 static void default_trap(int code, struct pt_regs *regs) 327 { 328 printk(KERN_ERR "Trap %d on CPU %d\n", code, smp_processor_id()); 329 show_regs(regs); 330 } 331 332 void (*cpu_lpmc) (int code, struct pt_regs *regs) __read_mostly = default_trap; 333 334 335 void transfer_pim_to_trap_frame(struct pt_regs *regs) 336 { 337 register int i; 338 extern unsigned int hpmc_pim_data[]; 339 struct pdc_hpmc_pim_11 *pim_narrow; 340 struct pdc_hpmc_pim_20 *pim_wide; 341 342 if (boot_cpu_data.cpu_type >= pcxu) { 343 344 pim_wide = (struct pdc_hpmc_pim_20 *)hpmc_pim_data; 345 346 /* 347 * Note: The following code will probably generate a 348 * bunch of truncation error warnings from the compiler. 349 * Could be handled with an ifdef, but perhaps there 350 * is a better way. 351 */ 352 353 regs->gr[0] = pim_wide->cr[22]; 354 355 for (i = 1; i < 32; i++) 356 regs->gr[i] = pim_wide->gr[i]; 357 358 for (i = 0; i < 32; i++) 359 regs->fr[i] = pim_wide->fr[i]; 360 361 for (i = 0; i < 8; i++) 362 regs->sr[i] = pim_wide->sr[i]; 363 364 regs->iasq[0] = pim_wide->cr[17]; 365 regs->iasq[1] = pim_wide->iasq_back; 366 regs->iaoq[0] = pim_wide->cr[18]; 367 regs->iaoq[1] = pim_wide->iaoq_back; 368 369 regs->sar = pim_wide->cr[11]; 370 regs->iir = pim_wide->cr[19]; 371 regs->isr = pim_wide->cr[20]; 372 regs->ior = pim_wide->cr[21]; 373 } 374 else { 375 pim_narrow = (struct pdc_hpmc_pim_11 *)hpmc_pim_data; 376 377 regs->gr[0] = pim_narrow->cr[22]; 378 379 for (i = 1; i < 32; i++) 380 regs->gr[i] = pim_narrow->gr[i]; 381 382 for (i = 0; i < 32; i++) 383 regs->fr[i] = pim_narrow->fr[i]; 384 385 for (i = 0; i < 8; i++) 386 regs->sr[i] = pim_narrow->sr[i]; 387 388 regs->iasq[0] = pim_narrow->cr[17]; 389 regs->iasq[1] = pim_narrow->iasq_back; 390 regs->iaoq[0] = pim_narrow->cr[18]; 391 regs->iaoq[1] = pim_narrow->iaoq_back; 392 393 regs->sar = pim_narrow->cr[11]; 394 regs->iir = pim_narrow->cr[19]; 395 regs->isr = pim_narrow->cr[20]; 396 regs->ior = pim_narrow->cr[21]; 397 } 398 399 /* 400 * The following fields only have meaning if we came through 401 * another path. So just zero them here. 402 */ 403 404 regs->ksp = 0; 405 regs->kpc = 0; 406 regs->orig_r28 = 0; 407 } 408 409 410 /* 411 * This routine is called as a last resort when everything else 412 * has gone clearly wrong. We get called for faults in kernel space, 413 * and HPMC's. 414 */ 415 void parisc_terminate(char *msg, struct pt_regs *regs, int code, unsigned long offset) 416 { 417 static DEFINE_SPINLOCK(terminate_lock); 418 419 (void)notify_die(DIE_OOPS, msg, regs, 0, code, SIGTRAP); 420 bust_spinlocks(1); 421 422 set_eiem(0); 423 local_irq_disable(); 424 spin_lock(&terminate_lock); 425 426 /* unlock the pdc lock if necessary */ 427 pdc_emergency_unlock(); 428 429 /* restart pdc console if necessary */ 430 if (!console_drivers) 431 pdc_console_restart(); 432 433 /* Not all paths will gutter the processor... */ 434 switch(code){ 435 436 case 1: 437 transfer_pim_to_trap_frame(regs); 438 break; 439 440 default: 441 break; 442 443 } 444 445 { 446 /* show_stack(NULL, (unsigned long *)regs->gr[30]); */ 447 struct unwind_frame_info info; 448 unwind_frame_init(&info, current, regs); 449 do_show_stack(&info, KERN_CRIT); 450 } 451 452 printk("\n"); 453 pr_crit("%s: Code=%d (%s) at addr " RFMT "\n", 454 msg, code, trap_name(code), offset); 455 show_regs(regs); 456 457 spin_unlock(&terminate_lock); 458 459 /* put soft power button back under hardware control; 460 * if the user had pressed it once at any time, the 461 * system will shut down immediately right here. */ 462 pdc_soft_power_button(0); 463 464 /* Call kernel panic() so reboot timeouts work properly 465 * FIXME: This function should be on the list of 466 * panic notifiers, and we should call panic 467 * directly from the location that we wish. 468 * e.g. We should not call panic from 469 * parisc_terminate, but rather the oter way around. 470 * This hack works, prints the panic message twice, 471 * and it enables reboot timers! 472 */ 473 panic(msg); 474 } 475 476 void notrace handle_interruption(int code, struct pt_regs *regs) 477 { 478 unsigned long fault_address = 0; 479 unsigned long fault_space = 0; 480 int si_code; 481 482 if (code == 1) 483 pdc_console_restart(); /* switch back to pdc if HPMC */ 484 else if (!irqs_disabled_flags(regs->gr[0])) 485 local_irq_enable(); 486 487 /* Security check: 488 * If the priority level is still user, and the 489 * faulting space is not equal to the active space 490 * then the user is attempting something in a space 491 * that does not belong to them. Kill the process. 492 * 493 * This is normally the situation when the user 494 * attempts to jump into the kernel space at the 495 * wrong offset, be it at the gateway page or a 496 * random location. 497 * 498 * We cannot normally signal the process because it 499 * could *be* on the gateway page, and processes 500 * executing on the gateway page can't have signals 501 * delivered. 502 * 503 * We merely readjust the address into the users 504 * space, at a destination address of zero, and 505 * allow processing to continue. 506 */ 507 if (((unsigned long)regs->iaoq[0] & 3) && 508 ((unsigned long)regs->iasq[0] != (unsigned long)regs->sr[7])) { 509 /* Kill the user process later */ 510 regs->iaoq[0] = 0 | 3; 511 regs->iaoq[1] = regs->iaoq[0] + 4; 512 regs->iasq[0] = regs->iasq[1] = regs->sr[7]; 513 regs->gr[0] &= ~PSW_B; 514 return; 515 } 516 517 #if 0 518 printk(KERN_CRIT "Interruption # %d\n", code); 519 #endif 520 521 switch(code) { 522 523 case 1: 524 /* High-priority machine check (HPMC) */ 525 526 /* set up a new led state on systems shipped with a LED State panel */ 527 pdc_chassis_send_status(PDC_CHASSIS_DIRECT_HPMC); 528 529 parisc_terminate("High Priority Machine Check (HPMC)", 530 regs, code, 0); 531 /* NOT REACHED */ 532 533 case 2: 534 /* Power failure interrupt */ 535 printk(KERN_CRIT "Power failure interrupt !\n"); 536 return; 537 538 case 3: 539 /* Recovery counter trap */ 540 regs->gr[0] &= ~PSW_R; 541 542 #ifdef CONFIG_KPROBES 543 if (parisc_kprobe_ss_handler(regs)) 544 return; 545 #endif 546 547 #ifdef CONFIG_KGDB 548 if (kgdb_single_step) { 549 kgdb_handle_exception(0, SIGTRAP, 0, regs); 550 return; 551 } 552 #endif 553 554 if (user_space(regs)) 555 handle_gdb_break(regs, TRAP_TRACE); 556 /* else this must be the start of a syscall - just let it run */ 557 return; 558 559 case 5: 560 /* Low-priority machine check */ 561 pdc_chassis_send_status(PDC_CHASSIS_DIRECT_LPMC); 562 563 flush_cache_all(); 564 flush_tlb_all(); 565 cpu_lpmc(5, regs); 566 return; 567 568 case PARISC_ITLB_TRAP: 569 /* Instruction TLB miss fault/Instruction page fault */ 570 fault_address = regs->iaoq[0]; 571 fault_space = regs->iasq[0]; 572 break; 573 574 case 8: 575 /* Illegal instruction trap */ 576 die_if_kernel("Illegal instruction", regs, code); 577 si_code = ILL_ILLOPC; 578 goto give_sigill; 579 580 case 9: 581 /* Break instruction trap */ 582 handle_break(regs); 583 return; 584 585 case 10: 586 /* Privileged operation trap */ 587 die_if_kernel("Privileged operation", regs, code); 588 si_code = ILL_PRVOPC; 589 goto give_sigill; 590 591 case 11: 592 /* Privileged register trap */ 593 if ((regs->iir & 0xffdfffe0) == 0x034008a0) { 594 595 /* This is a MFCTL cr26/cr27 to gr instruction. 596 * PCXS traps on this, so we need to emulate it. 597 */ 598 599 if (regs->iir & 0x00200000) 600 regs->gr[regs->iir & 0x1f] = mfctl(27); 601 else 602 regs->gr[regs->iir & 0x1f] = mfctl(26); 603 604 regs->iaoq[0] = regs->iaoq[1]; 605 regs->iaoq[1] += 4; 606 regs->iasq[0] = regs->iasq[1]; 607 return; 608 } 609 610 die_if_kernel("Privileged register usage", regs, code); 611 si_code = ILL_PRVREG; 612 give_sigill: 613 force_sig_fault(SIGILL, si_code, 614 (void __user *) regs->iaoq[0]); 615 return; 616 617 case 12: 618 /* Overflow Trap, let the userland signal handler do the cleanup */ 619 force_sig_fault(SIGFPE, FPE_INTOVF, 620 (void __user *) regs->iaoq[0]); 621 return; 622 623 case 13: 624 /* Conditional Trap 625 The condition succeeds in an instruction which traps 626 on condition */ 627 if(user_mode(regs)){ 628 /* Let userspace app figure it out from the insn pointed 629 * to by si_addr. 630 */ 631 force_sig_fault(SIGFPE, FPE_CONDTRAP, 632 (void __user *) regs->iaoq[0]); 633 return; 634 } 635 /* The kernel doesn't want to handle condition codes */ 636 break; 637 638 case 14: 639 /* Assist Exception Trap, i.e. floating point exception. */ 640 die_if_kernel("Floating point exception", regs, 0); /* quiet */ 641 __inc_irq_stat(irq_fpassist_count); 642 handle_fpe(regs); 643 return; 644 645 case 15: 646 /* Data TLB miss fault/Data page fault */ 647 fallthrough; 648 case 16: 649 /* Non-access instruction TLB miss fault */ 650 /* The instruction TLB entry needed for the target address of the FIC 651 is absent, and hardware can't find it, so we get to cleanup */ 652 fallthrough; 653 case 17: 654 /* Non-access data TLB miss fault/Non-access data page fault */ 655 /* FIXME: 656 Still need to add slow path emulation code here! 657 If the insn used a non-shadow register, then the tlb 658 handlers could not have their side-effect (e.g. probe 659 writing to a target register) emulated since rfir would 660 erase the changes to said register. Instead we have to 661 setup everything, call this function we are in, and emulate 662 by hand. Technically we need to emulate: 663 fdc,fdce,pdc,"fic,4f",prober,probeir,probew, probeiw 664 */ 665 fault_address = regs->ior; 666 fault_space = regs->isr; 667 break; 668 669 case 18: 670 /* PCXS only -- later cpu's split this into types 26,27 & 28 */ 671 /* Check for unaligned access */ 672 if (check_unaligned(regs)) { 673 handle_unaligned(regs); 674 return; 675 } 676 fallthrough; 677 case 26: 678 /* PCXL: Data memory access rights trap */ 679 fault_address = regs->ior; 680 fault_space = regs->isr; 681 break; 682 683 case 19: 684 /* Data memory break trap */ 685 regs->gr[0] |= PSW_X; /* So we can single-step over the trap */ 686 fallthrough; 687 case 21: 688 /* Page reference trap */ 689 handle_gdb_break(regs, TRAP_HWBKPT); 690 return; 691 692 case 25: 693 /* Taken branch trap */ 694 regs->gr[0] &= ~PSW_T; 695 if (user_space(regs)) 696 handle_gdb_break(regs, TRAP_BRANCH); 697 /* else this must be the start of a syscall - just let it 698 * run. 699 */ 700 return; 701 702 case 7: 703 /* Instruction access rights */ 704 /* PCXL: Instruction memory protection trap */ 705 706 /* 707 * This could be caused by either: 1) a process attempting 708 * to execute within a vma that does not have execute 709 * permission, or 2) an access rights violation caused by a 710 * flush only translation set up by ptep_get_and_clear(). 711 * So we check the vma permissions to differentiate the two. 712 * If the vma indicates we have execute permission, then 713 * the cause is the latter one. In this case, we need to 714 * call do_page_fault() to fix the problem. 715 */ 716 717 if (user_mode(regs)) { 718 struct vm_area_struct *vma; 719 720 mmap_read_lock(current->mm); 721 vma = find_vma(current->mm,regs->iaoq[0]); 722 if (vma && (regs->iaoq[0] >= vma->vm_start) 723 && (vma->vm_flags & VM_EXEC)) { 724 725 fault_address = regs->iaoq[0]; 726 fault_space = regs->iasq[0]; 727 728 mmap_read_unlock(current->mm); 729 break; /* call do_page_fault() */ 730 } 731 mmap_read_unlock(current->mm); 732 } 733 /* CPU could not fetch instruction, so clear stale IIR value. */ 734 regs->iir = 0xbaadf00d; 735 fallthrough; 736 case 27: 737 /* Data memory protection ID trap */ 738 if (code == 27 && !user_mode(regs) && 739 fixup_exception(regs)) 740 return; 741 742 die_if_kernel("Protection id trap", regs, code); 743 force_sig_fault(SIGSEGV, SEGV_MAPERR, 744 (code == 7)? 745 ((void __user *) regs->iaoq[0]) : 746 ((void __user *) regs->ior)); 747 return; 748 749 case 28: 750 /* Unaligned data reference trap */ 751 handle_unaligned(regs); 752 return; 753 754 default: 755 if (user_mode(regs)) { 756 parisc_printk_ratelimited(0, regs, KERN_DEBUG 757 "handle_interruption() pid=%d command='%s'\n", 758 task_pid_nr(current), current->comm); 759 /* SIGBUS, for lack of a better one. */ 760 force_sig_fault(SIGBUS, BUS_OBJERR, 761 (void __user *)regs->ior); 762 return; 763 } 764 pdc_chassis_send_status(PDC_CHASSIS_DIRECT_PANIC); 765 766 parisc_terminate("Unexpected interruption", regs, code, 0); 767 /* NOT REACHED */ 768 } 769 770 if (user_mode(regs)) { 771 if ((fault_space >> SPACEID_SHIFT) != (regs->sr[7] >> SPACEID_SHIFT)) { 772 parisc_printk_ratelimited(0, regs, KERN_DEBUG 773 "User fault %d on space 0x%08lx, pid=%d command='%s'\n", 774 code, fault_space, 775 task_pid_nr(current), current->comm); 776 force_sig_fault(SIGSEGV, SEGV_MAPERR, 777 (void __user *)regs->ior); 778 return; 779 } 780 } 781 else { 782 783 /* 784 * The kernel should never fault on its own address space, 785 * unless pagefault_disable() was called before. 786 */ 787 788 if (fault_space == 0 && !faulthandler_disabled()) 789 { 790 /* Clean up and return if in exception table. */ 791 if (fixup_exception(regs)) 792 return; 793 /* Clean up and return if handled by kfence. */ 794 if (kfence_handle_page_fault(fault_address, 795 parisc_acctyp(code, regs->iir) == VM_WRITE, regs)) 796 return; 797 pdc_chassis_send_status(PDC_CHASSIS_DIRECT_PANIC); 798 parisc_terminate("Kernel Fault", regs, code, fault_address); 799 } 800 } 801 802 do_page_fault(regs, code, fault_address); 803 } 804 805 806 void __init initialize_ivt(const void *iva) 807 { 808 extern const u32 os_hpmc[]; 809 810 int i; 811 u32 check = 0; 812 u32 *ivap; 813 u32 *hpmcp; 814 u32 instr; 815 816 if (strcmp((const char *)iva, "cows can fly")) 817 panic("IVT invalid"); 818 819 ivap = (u32 *)iva; 820 821 for (i = 0; i < 8; i++) 822 *ivap++ = 0; 823 824 /* 825 * Use PDC_INSTR firmware function to get instruction that invokes 826 * PDCE_CHECK in HPMC handler. See programming note at page 1-31 of 827 * the PA 1.1 Firmware Architecture document. 828 */ 829 if (pdc_instr(&instr) == PDC_OK) 830 ivap[0] = instr; 831 832 /* 833 * Rules for the checksum of the HPMC handler: 834 * 1. The IVA does not point to PDC/PDH space (ie: the OS has installed 835 * its own IVA). 836 * 2. The word at IVA + 32 is nonzero. 837 * 3. If Length (IVA + 60) is not zero, then Length (IVA + 60) and 838 * Address (IVA + 56) are word-aligned. 839 * 4. The checksum of the 8 words starting at IVA + 32 plus the sum of 840 * the Length/4 words starting at Address is zero. 841 */ 842 843 /* Setup IVA and compute checksum for HPMC handler */ 844 ivap[6] = (u32)__pa(os_hpmc); 845 846 hpmcp = (u32 *)os_hpmc; 847 848 for (i=0; i<8; i++) 849 check += ivap[i]; 850 851 ivap[5] = -check; 852 pr_debug("initialize_ivt: IVA[6] = 0x%08x\n", ivap[6]); 853 } 854 855 856 /* early_trap_init() is called before we set up kernel mappings and 857 * write-protect the kernel */ 858 void __init early_trap_init(void) 859 { 860 extern const void fault_vector_20; 861 862 #ifndef CONFIG_64BIT 863 extern const void fault_vector_11; 864 initialize_ivt(&fault_vector_11); 865 #endif 866 867 initialize_ivt(&fault_vector_20); 868 } 869