1 /* 2 * arch/s390/kernel/traps.c 3 * 4 * S390 version 5 * Copyright (C) 1999,2000 IBM Deutschland Entwicklung GmbH, IBM Corporation 6 * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com), 7 * Denis Joseph Barrow (djbarrow@de.ibm.com,barrow_dj@yahoo.com), 8 * 9 * Derived from "arch/i386/kernel/traps.c" 10 * Copyright (C) 1991, 1992 Linus Torvalds 11 */ 12 13 /* 14 * 'Traps.c' handles hardware traps and faults after we have saved some 15 * state in 'asm.s'. 16 */ 17 #include <linux/sched.h> 18 #include <linux/kernel.h> 19 #include <linux/string.h> 20 #include <linux/errno.h> 21 #include <linux/ptrace.h> 22 #include <linux/timer.h> 23 #include <linux/mm.h> 24 #include <linux/smp.h> 25 #include <linux/init.h> 26 #include <linux/interrupt.h> 27 #include <linux/seq_file.h> 28 #include <linux/delay.h> 29 #include <linux/module.h> 30 #include <linux/kdebug.h> 31 #include <linux/kallsyms.h> 32 #include <linux/reboot.h> 33 #include <linux/kprobes.h> 34 #include <linux/bug.h> 35 #include <linux/utsname.h> 36 #include <asm/system.h> 37 #include <asm/uaccess.h> 38 #include <asm/io.h> 39 #include <linux/atomic.h> 40 #include <asm/mathemu.h> 41 #include <asm/cpcmd.h> 42 #include <asm/lowcore.h> 43 #include <asm/debug.h> 44 #include "entry.h" 45 46 void (*pgm_check_table[128])(struct pt_regs *, long, unsigned long); 47 48 int show_unhandled_signals; 49 50 #define stack_pointer ({ void **sp; asm("la %0,0(15)" : "=&d" (sp)); sp; }) 51 52 #ifndef CONFIG_64BIT 53 #define LONG "%08lx " 54 #define FOURLONG "%08lx %08lx %08lx %08lx\n" 55 static int kstack_depth_to_print = 12; 56 #else /* CONFIG_64BIT */ 57 #define LONG "%016lx " 58 #define FOURLONG "%016lx %016lx %016lx %016lx\n" 59 static int kstack_depth_to_print = 20; 60 #endif /* CONFIG_64BIT */ 61 62 /* 63 * For show_trace we have tree different stack to consider: 64 * - the panic stack which is used if the kernel stack has overflown 65 * - the asynchronous interrupt stack (cpu related) 66 * - the synchronous kernel stack (process related) 67 * The stack trace can start at any of the three stack and can potentially 68 * touch all of them. The order is: panic stack, async stack, sync stack. 69 */ 70 static unsigned long 71 __show_trace(unsigned long sp, unsigned long low, unsigned long high) 72 { 73 struct stack_frame *sf; 74 struct pt_regs *regs; 75 76 while (1) { 77 sp = sp & PSW_ADDR_INSN; 78 if (sp < low || sp > high - sizeof(*sf)) 79 return sp; 80 sf = (struct stack_frame *) sp; 81 printk("([<%016lx>] ", sf->gprs[8] & PSW_ADDR_INSN); 82 print_symbol("%s)\n", sf->gprs[8] & PSW_ADDR_INSN); 83 /* Follow the backchain. */ 84 while (1) { 85 low = sp; 86 sp = sf->back_chain & PSW_ADDR_INSN; 87 if (!sp) 88 break; 89 if (sp <= low || sp > high - sizeof(*sf)) 90 return sp; 91 sf = (struct stack_frame *) sp; 92 printk(" [<%016lx>] ", sf->gprs[8] & PSW_ADDR_INSN); 93 print_symbol("%s\n", sf->gprs[8] & PSW_ADDR_INSN); 94 } 95 /* Zero backchain detected, check for interrupt frame. */ 96 sp = (unsigned long) (sf + 1); 97 if (sp <= low || sp > high - sizeof(*regs)) 98 return sp; 99 regs = (struct pt_regs *) sp; 100 printk(" [<%016lx>] ", regs->psw.addr & PSW_ADDR_INSN); 101 print_symbol("%s\n", regs->psw.addr & PSW_ADDR_INSN); 102 low = sp; 103 sp = regs->gprs[15]; 104 } 105 } 106 107 static void show_trace(struct task_struct *task, unsigned long *stack) 108 { 109 register unsigned long __r15 asm ("15"); 110 unsigned long sp; 111 112 sp = (unsigned long) stack; 113 if (!sp) 114 sp = task ? task->thread.ksp : __r15; 115 printk("Call Trace:\n"); 116 #ifdef CONFIG_CHECK_STACK 117 sp = __show_trace(sp, S390_lowcore.panic_stack - 4096, 118 S390_lowcore.panic_stack); 119 #endif 120 sp = __show_trace(sp, S390_lowcore.async_stack - ASYNC_SIZE, 121 S390_lowcore.async_stack); 122 if (task) 123 __show_trace(sp, (unsigned long) task_stack_page(task), 124 (unsigned long) task_stack_page(task) + THREAD_SIZE); 125 else 126 __show_trace(sp, S390_lowcore.thread_info, 127 S390_lowcore.thread_info + THREAD_SIZE); 128 if (!task) 129 task = current; 130 debug_show_held_locks(task); 131 } 132 133 void show_stack(struct task_struct *task, unsigned long *sp) 134 { 135 register unsigned long * __r15 asm ("15"); 136 unsigned long *stack; 137 int i; 138 139 if (!sp) 140 stack = task ? (unsigned long *) task->thread.ksp : __r15; 141 else 142 stack = sp; 143 144 for (i = 0; i < kstack_depth_to_print; i++) { 145 if (((addr_t) stack & (THREAD_SIZE-1)) == 0) 146 break; 147 if (i && ((i * sizeof (long) % 32) == 0)) 148 printk("\n "); 149 printk(LONG, *stack++); 150 } 151 printk("\n"); 152 show_trace(task, sp); 153 } 154 155 static void show_last_breaking_event(struct pt_regs *regs) 156 { 157 #ifdef CONFIG_64BIT 158 printk("Last Breaking-Event-Address:\n"); 159 printk(" [<%016lx>] ", regs->args[0] & PSW_ADDR_INSN); 160 print_symbol("%s\n", regs->args[0] & PSW_ADDR_INSN); 161 #endif 162 } 163 164 /* 165 * The architecture-independent dump_stack generator 166 */ 167 void dump_stack(void) 168 { 169 printk("CPU: %d %s %s %.*s\n", 170 task_thread_info(current)->cpu, print_tainted(), 171 init_utsname()->release, 172 (int)strcspn(init_utsname()->version, " "), 173 init_utsname()->version); 174 printk("Process %s (pid: %d, task: %p, ksp: %p)\n", 175 current->comm, current->pid, current, 176 (void *) current->thread.ksp); 177 show_stack(NULL, NULL); 178 } 179 EXPORT_SYMBOL(dump_stack); 180 181 static inline int mask_bits(struct pt_regs *regs, unsigned long bits) 182 { 183 return (regs->psw.mask & bits) / ((~bits + 1) & bits); 184 } 185 186 void show_registers(struct pt_regs *regs) 187 { 188 char *mode; 189 190 mode = (regs->psw.mask & PSW_MASK_PSTATE) ? "User" : "Krnl"; 191 printk("%s PSW : %p %p", 192 mode, (void *) regs->psw.mask, 193 (void *) regs->psw.addr); 194 print_symbol(" (%s)\n", regs->psw.addr & PSW_ADDR_INSN); 195 printk(" R:%x T:%x IO:%x EX:%x Key:%x M:%x W:%x " 196 "P:%x AS:%x CC:%x PM:%x", mask_bits(regs, PSW_MASK_PER), 197 mask_bits(regs, PSW_MASK_DAT), mask_bits(regs, PSW_MASK_IO), 198 mask_bits(regs, PSW_MASK_EXT), mask_bits(regs, PSW_MASK_KEY), 199 mask_bits(regs, PSW_MASK_MCHECK), mask_bits(regs, PSW_MASK_WAIT), 200 mask_bits(regs, PSW_MASK_PSTATE), mask_bits(regs, PSW_MASK_ASC), 201 mask_bits(regs, PSW_MASK_CC), mask_bits(regs, PSW_MASK_PM)); 202 #ifdef CONFIG_64BIT 203 printk(" EA:%x", mask_bits(regs, PSW_MASK_EA | PSW_MASK_BA)); 204 #endif 205 printk("\n%s GPRS: " FOURLONG, mode, 206 regs->gprs[0], regs->gprs[1], regs->gprs[2], regs->gprs[3]); 207 printk(" " FOURLONG, 208 regs->gprs[4], regs->gprs[5], regs->gprs[6], regs->gprs[7]); 209 printk(" " FOURLONG, 210 regs->gprs[8], regs->gprs[9], regs->gprs[10], regs->gprs[11]); 211 printk(" " FOURLONG, 212 regs->gprs[12], regs->gprs[13], regs->gprs[14], regs->gprs[15]); 213 214 show_code(regs); 215 } 216 217 void show_regs(struct pt_regs *regs) 218 { 219 print_modules(); 220 printk("CPU: %d %s %s %.*s\n", 221 task_thread_info(current)->cpu, print_tainted(), 222 init_utsname()->release, 223 (int)strcspn(init_utsname()->version, " "), 224 init_utsname()->version); 225 printk("Process %s (pid: %d, task: %p, ksp: %p)\n", 226 current->comm, current->pid, current, 227 (void *) current->thread.ksp); 228 show_registers(regs); 229 /* Show stack backtrace if pt_regs is from kernel mode */ 230 if (!(regs->psw.mask & PSW_MASK_PSTATE)) 231 show_trace(NULL, (unsigned long *) regs->gprs[15]); 232 show_last_breaking_event(regs); 233 } 234 235 static DEFINE_SPINLOCK(die_lock); 236 237 void die(const char * str, struct pt_regs * regs, long err) 238 { 239 static int die_counter; 240 241 oops_enter(); 242 debug_stop_all(); 243 console_verbose(); 244 spin_lock_irq(&die_lock); 245 bust_spinlocks(1); 246 printk("%s: %04lx [#%d] ", str, err & 0xffff, ++die_counter); 247 #ifdef CONFIG_PREEMPT 248 printk("PREEMPT "); 249 #endif 250 #ifdef CONFIG_SMP 251 printk("SMP "); 252 #endif 253 #ifdef CONFIG_DEBUG_PAGEALLOC 254 printk("DEBUG_PAGEALLOC"); 255 #endif 256 printk("\n"); 257 notify_die(DIE_OOPS, str, regs, err, current->thread.trap_no, SIGSEGV); 258 show_regs(regs); 259 bust_spinlocks(0); 260 add_taint(TAINT_DIE); 261 spin_unlock_irq(&die_lock); 262 if (in_interrupt()) 263 panic("Fatal exception in interrupt"); 264 if (panic_on_oops) 265 panic("Fatal exception: panic_on_oops"); 266 oops_exit(); 267 do_exit(SIGSEGV); 268 } 269 270 static void inline report_user_fault(struct pt_regs *regs, long int_code, 271 int signr) 272 { 273 if ((task_pid_nr(current) > 1) && !show_unhandled_signals) 274 return; 275 if (!unhandled_signal(current, signr)) 276 return; 277 if (!printk_ratelimit()) 278 return; 279 printk("User process fault: interruption code 0x%lX ", int_code); 280 print_vma_addr("in ", regs->psw.addr & PSW_ADDR_INSN); 281 printk("\n"); 282 show_regs(regs); 283 } 284 285 int is_valid_bugaddr(unsigned long addr) 286 { 287 return 1; 288 } 289 290 static inline void __kprobes do_trap(long pgm_int_code, int signr, char *str, 291 struct pt_regs *regs, siginfo_t *info) 292 { 293 if (notify_die(DIE_TRAP, str, regs, pgm_int_code, 294 pgm_int_code, signr) == NOTIFY_STOP) 295 return; 296 297 if (regs->psw.mask & PSW_MASK_PSTATE) { 298 struct task_struct *tsk = current; 299 300 tsk->thread.trap_no = pgm_int_code & 0xffff; 301 force_sig_info(signr, info, tsk); 302 report_user_fault(regs, pgm_int_code, signr); 303 } else { 304 const struct exception_table_entry *fixup; 305 fixup = search_exception_tables(regs->psw.addr & PSW_ADDR_INSN); 306 if (fixup) 307 regs->psw.addr = fixup->fixup | PSW_ADDR_AMODE; 308 else { 309 enum bug_trap_type btt; 310 311 btt = report_bug(regs->psw.addr & PSW_ADDR_INSN, regs); 312 if (btt == BUG_TRAP_TYPE_WARN) 313 return; 314 die(str, regs, pgm_int_code); 315 } 316 } 317 } 318 319 static inline void __user *get_psw_address(struct pt_regs *regs, 320 long pgm_int_code) 321 { 322 return (void __user *) 323 ((regs->psw.addr - (pgm_int_code >> 16)) & PSW_ADDR_INSN); 324 } 325 326 void __kprobes do_per_trap(struct pt_regs *regs) 327 { 328 siginfo_t info; 329 330 if (notify_die(DIE_SSTEP, "sstep", regs, 0, 0, SIGTRAP) == NOTIFY_STOP) 331 return; 332 if (!current->ptrace) 333 return; 334 info.si_signo = SIGTRAP; 335 info.si_errno = 0; 336 info.si_code = TRAP_HWBKPT; 337 info.si_addr = 338 (void __force __user *) current->thread.per_event.address; 339 force_sig_info(SIGTRAP, &info, current); 340 } 341 342 static void default_trap_handler(struct pt_regs *regs, long pgm_int_code, 343 unsigned long trans_exc_code) 344 { 345 if (regs->psw.mask & PSW_MASK_PSTATE) { 346 report_user_fault(regs, pgm_int_code, SIGSEGV); 347 do_exit(SIGSEGV); 348 } else 349 die("Unknown program exception", regs, pgm_int_code); 350 } 351 352 #define DO_ERROR_INFO(name, signr, sicode, str) \ 353 static void name(struct pt_regs *regs, long pgm_int_code, \ 354 unsigned long trans_exc_code) \ 355 { \ 356 siginfo_t info; \ 357 info.si_signo = signr; \ 358 info.si_errno = 0; \ 359 info.si_code = sicode; \ 360 info.si_addr = get_psw_address(regs, pgm_int_code); \ 361 do_trap(pgm_int_code, signr, str, regs, &info); \ 362 } 363 364 DO_ERROR_INFO(addressing_exception, SIGILL, ILL_ILLADR, 365 "addressing exception") 366 DO_ERROR_INFO(execute_exception, SIGILL, ILL_ILLOPN, 367 "execute exception") 368 DO_ERROR_INFO(divide_exception, SIGFPE, FPE_INTDIV, 369 "fixpoint divide exception") 370 DO_ERROR_INFO(overflow_exception, SIGFPE, FPE_INTOVF, 371 "fixpoint overflow exception") 372 DO_ERROR_INFO(hfp_overflow_exception, SIGFPE, FPE_FLTOVF, 373 "HFP overflow exception") 374 DO_ERROR_INFO(hfp_underflow_exception, SIGFPE, FPE_FLTUND, 375 "HFP underflow exception") 376 DO_ERROR_INFO(hfp_significance_exception, SIGFPE, FPE_FLTRES, 377 "HFP significance exception") 378 DO_ERROR_INFO(hfp_divide_exception, SIGFPE, FPE_FLTDIV, 379 "HFP divide exception") 380 DO_ERROR_INFO(hfp_sqrt_exception, SIGFPE, FPE_FLTINV, 381 "HFP square root exception") 382 DO_ERROR_INFO(operand_exception, SIGILL, ILL_ILLOPN, 383 "operand exception") 384 DO_ERROR_INFO(privileged_op, SIGILL, ILL_PRVOPC, 385 "privileged operation") 386 DO_ERROR_INFO(special_op_exception, SIGILL, ILL_ILLOPN, 387 "special operation exception") 388 DO_ERROR_INFO(translation_exception, SIGILL, ILL_ILLOPN, 389 "translation exception") 390 391 static inline void do_fp_trap(struct pt_regs *regs, void __user *location, 392 int fpc, long pgm_int_code) 393 { 394 siginfo_t si; 395 396 si.si_signo = SIGFPE; 397 si.si_errno = 0; 398 si.si_addr = location; 399 si.si_code = 0; 400 /* FPC[2] is Data Exception Code */ 401 if ((fpc & 0x00000300) == 0) { 402 /* bits 6 and 7 of DXC are 0 iff IEEE exception */ 403 if (fpc & 0x8000) /* invalid fp operation */ 404 si.si_code = FPE_FLTINV; 405 else if (fpc & 0x4000) /* div by 0 */ 406 si.si_code = FPE_FLTDIV; 407 else if (fpc & 0x2000) /* overflow */ 408 si.si_code = FPE_FLTOVF; 409 else if (fpc & 0x1000) /* underflow */ 410 si.si_code = FPE_FLTUND; 411 else if (fpc & 0x0800) /* inexact */ 412 si.si_code = FPE_FLTRES; 413 } 414 do_trap(pgm_int_code, SIGFPE, 415 "floating point exception", regs, &si); 416 } 417 418 static void __kprobes illegal_op(struct pt_regs *regs, long pgm_int_code, 419 unsigned long trans_exc_code) 420 { 421 siginfo_t info; 422 __u8 opcode[6]; 423 __u16 __user *location; 424 int signal = 0; 425 426 location = get_psw_address(regs, pgm_int_code); 427 428 if (regs->psw.mask & PSW_MASK_PSTATE) { 429 if (get_user(*((__u16 *) opcode), (__u16 __user *) location)) 430 return; 431 if (*((__u16 *) opcode) == S390_BREAKPOINT_U16) { 432 if (current->ptrace) { 433 info.si_signo = SIGTRAP; 434 info.si_errno = 0; 435 info.si_code = TRAP_BRKPT; 436 info.si_addr = location; 437 force_sig_info(SIGTRAP, &info, current); 438 } else 439 signal = SIGILL; 440 #ifdef CONFIG_MATHEMU 441 } else if (opcode[0] == 0xb3) { 442 if (get_user(*((__u16 *) (opcode+2)), location+1)) 443 return; 444 signal = math_emu_b3(opcode, regs); 445 } else if (opcode[0] == 0xed) { 446 if (get_user(*((__u32 *) (opcode+2)), 447 (__u32 __user *)(location+1))) 448 return; 449 signal = math_emu_ed(opcode, regs); 450 } else if (*((__u16 *) opcode) == 0xb299) { 451 if (get_user(*((__u16 *) (opcode+2)), location+1)) 452 return; 453 signal = math_emu_srnm(opcode, regs); 454 } else if (*((__u16 *) opcode) == 0xb29c) { 455 if (get_user(*((__u16 *) (opcode+2)), location+1)) 456 return; 457 signal = math_emu_stfpc(opcode, regs); 458 } else if (*((__u16 *) opcode) == 0xb29d) { 459 if (get_user(*((__u16 *) (opcode+2)), location+1)) 460 return; 461 signal = math_emu_lfpc(opcode, regs); 462 #endif 463 } else 464 signal = SIGILL; 465 } else { 466 /* 467 * If we get an illegal op in kernel mode, send it through the 468 * kprobes notifier. If kprobes doesn't pick it up, SIGILL 469 */ 470 if (notify_die(DIE_BPT, "bpt", regs, pgm_int_code, 471 3, SIGTRAP) != NOTIFY_STOP) 472 signal = SIGILL; 473 } 474 475 #ifdef CONFIG_MATHEMU 476 if (signal == SIGFPE) 477 do_fp_trap(regs, location, 478 current->thread.fp_regs.fpc, pgm_int_code); 479 else if (signal == SIGSEGV) { 480 info.si_signo = signal; 481 info.si_errno = 0; 482 info.si_code = SEGV_MAPERR; 483 info.si_addr = (void __user *) location; 484 do_trap(pgm_int_code, signal, 485 "user address fault", regs, &info); 486 } else 487 #endif 488 if (signal) { 489 info.si_signo = signal; 490 info.si_errno = 0; 491 info.si_code = ILL_ILLOPC; 492 info.si_addr = (void __user *) location; 493 do_trap(pgm_int_code, signal, 494 "illegal operation", regs, &info); 495 } 496 } 497 498 499 #ifdef CONFIG_MATHEMU 500 void specification_exception(struct pt_regs *regs, long pgm_int_code, 501 unsigned long trans_exc_code) 502 { 503 __u8 opcode[6]; 504 __u16 __user *location = NULL; 505 int signal = 0; 506 507 location = (__u16 __user *) get_psw_address(regs, pgm_int_code); 508 509 if (regs->psw.mask & PSW_MASK_PSTATE) { 510 get_user(*((__u16 *) opcode), location); 511 switch (opcode[0]) { 512 case 0x28: /* LDR Rx,Ry */ 513 signal = math_emu_ldr(opcode); 514 break; 515 case 0x38: /* LER Rx,Ry */ 516 signal = math_emu_ler(opcode); 517 break; 518 case 0x60: /* STD R,D(X,B) */ 519 get_user(*((__u16 *) (opcode+2)), location+1); 520 signal = math_emu_std(opcode, regs); 521 break; 522 case 0x68: /* LD R,D(X,B) */ 523 get_user(*((__u16 *) (opcode+2)), location+1); 524 signal = math_emu_ld(opcode, regs); 525 break; 526 case 0x70: /* STE R,D(X,B) */ 527 get_user(*((__u16 *) (opcode+2)), location+1); 528 signal = math_emu_ste(opcode, regs); 529 break; 530 case 0x78: /* LE R,D(X,B) */ 531 get_user(*((__u16 *) (opcode+2)), location+1); 532 signal = math_emu_le(opcode, regs); 533 break; 534 default: 535 signal = SIGILL; 536 break; 537 } 538 } else 539 signal = SIGILL; 540 541 if (signal == SIGFPE) 542 do_fp_trap(regs, location, 543 current->thread.fp_regs.fpc, pgm_int_code); 544 else if (signal) { 545 siginfo_t info; 546 info.si_signo = signal; 547 info.si_errno = 0; 548 info.si_code = ILL_ILLOPN; 549 info.si_addr = location; 550 do_trap(pgm_int_code, signal, 551 "specification exception", regs, &info); 552 } 553 } 554 #else 555 DO_ERROR_INFO(specification_exception, SIGILL, ILL_ILLOPN, 556 "specification exception"); 557 #endif 558 559 static void data_exception(struct pt_regs *regs, long pgm_int_code, 560 unsigned long trans_exc_code) 561 { 562 __u16 __user *location; 563 int signal = 0; 564 565 location = get_psw_address(regs, pgm_int_code); 566 567 if (MACHINE_HAS_IEEE) 568 asm volatile("stfpc %0" : "=m" (current->thread.fp_regs.fpc)); 569 570 #ifdef CONFIG_MATHEMU 571 else if (regs->psw.mask & PSW_MASK_PSTATE) { 572 __u8 opcode[6]; 573 get_user(*((__u16 *) opcode), location); 574 switch (opcode[0]) { 575 case 0x28: /* LDR Rx,Ry */ 576 signal = math_emu_ldr(opcode); 577 break; 578 case 0x38: /* LER Rx,Ry */ 579 signal = math_emu_ler(opcode); 580 break; 581 case 0x60: /* STD R,D(X,B) */ 582 get_user(*((__u16 *) (opcode+2)), location+1); 583 signal = math_emu_std(opcode, regs); 584 break; 585 case 0x68: /* LD R,D(X,B) */ 586 get_user(*((__u16 *) (opcode+2)), location+1); 587 signal = math_emu_ld(opcode, regs); 588 break; 589 case 0x70: /* STE R,D(X,B) */ 590 get_user(*((__u16 *) (opcode+2)), location+1); 591 signal = math_emu_ste(opcode, regs); 592 break; 593 case 0x78: /* LE R,D(X,B) */ 594 get_user(*((__u16 *) (opcode+2)), location+1); 595 signal = math_emu_le(opcode, regs); 596 break; 597 case 0xb3: 598 get_user(*((__u16 *) (opcode+2)), location+1); 599 signal = math_emu_b3(opcode, regs); 600 break; 601 case 0xed: 602 get_user(*((__u32 *) (opcode+2)), 603 (__u32 __user *)(location+1)); 604 signal = math_emu_ed(opcode, regs); 605 break; 606 case 0xb2: 607 if (opcode[1] == 0x99) { 608 get_user(*((__u16 *) (opcode+2)), location+1); 609 signal = math_emu_srnm(opcode, regs); 610 } else if (opcode[1] == 0x9c) { 611 get_user(*((__u16 *) (opcode+2)), location+1); 612 signal = math_emu_stfpc(opcode, regs); 613 } else if (opcode[1] == 0x9d) { 614 get_user(*((__u16 *) (opcode+2)), location+1); 615 signal = math_emu_lfpc(opcode, regs); 616 } else 617 signal = SIGILL; 618 break; 619 default: 620 signal = SIGILL; 621 break; 622 } 623 } 624 #endif 625 if (current->thread.fp_regs.fpc & FPC_DXC_MASK) 626 signal = SIGFPE; 627 else 628 signal = SIGILL; 629 if (signal == SIGFPE) 630 do_fp_trap(regs, location, 631 current->thread.fp_regs.fpc, pgm_int_code); 632 else if (signal) { 633 siginfo_t info; 634 info.si_signo = signal; 635 info.si_errno = 0; 636 info.si_code = ILL_ILLOPN; 637 info.si_addr = location; 638 do_trap(pgm_int_code, signal, "data exception", regs, &info); 639 } 640 } 641 642 static void space_switch_exception(struct pt_regs *regs, long pgm_int_code, 643 unsigned long trans_exc_code) 644 { 645 siginfo_t info; 646 647 /* Set user psw back to home space mode. */ 648 if (regs->psw.mask & PSW_MASK_PSTATE) 649 regs->psw.mask |= PSW_ASC_HOME; 650 /* Send SIGILL. */ 651 info.si_signo = SIGILL; 652 info.si_errno = 0; 653 info.si_code = ILL_PRVOPC; 654 info.si_addr = get_psw_address(regs, pgm_int_code); 655 do_trap(pgm_int_code, SIGILL, "space switch event", regs, &info); 656 } 657 658 void __kprobes kernel_stack_overflow(struct pt_regs * regs) 659 { 660 bust_spinlocks(1); 661 printk("Kernel stack overflow.\n"); 662 show_regs(regs); 663 bust_spinlocks(0); 664 panic("Corrupt kernel stack, can't continue."); 665 } 666 667 /* init is done in lowcore.S and head.S */ 668 669 void __init trap_init(void) 670 { 671 int i; 672 673 for (i = 0; i < 128; i++) 674 pgm_check_table[i] = &default_trap_handler; 675 pgm_check_table[1] = &illegal_op; 676 pgm_check_table[2] = &privileged_op; 677 pgm_check_table[3] = &execute_exception; 678 pgm_check_table[4] = &do_protection_exception; 679 pgm_check_table[5] = &addressing_exception; 680 pgm_check_table[6] = &specification_exception; 681 pgm_check_table[7] = &data_exception; 682 pgm_check_table[8] = &overflow_exception; 683 pgm_check_table[9] = ÷_exception; 684 pgm_check_table[0x0A] = &overflow_exception; 685 pgm_check_table[0x0B] = ÷_exception; 686 pgm_check_table[0x0C] = &hfp_overflow_exception; 687 pgm_check_table[0x0D] = &hfp_underflow_exception; 688 pgm_check_table[0x0E] = &hfp_significance_exception; 689 pgm_check_table[0x0F] = &hfp_divide_exception; 690 pgm_check_table[0x10] = &do_dat_exception; 691 pgm_check_table[0x11] = &do_dat_exception; 692 pgm_check_table[0x12] = &translation_exception; 693 pgm_check_table[0x13] = &special_op_exception; 694 #ifdef CONFIG_64BIT 695 pgm_check_table[0x38] = &do_asce_exception; 696 pgm_check_table[0x39] = &do_dat_exception; 697 pgm_check_table[0x3A] = &do_dat_exception; 698 pgm_check_table[0x3B] = &do_dat_exception; 699 #endif /* CONFIG_64BIT */ 700 pgm_check_table[0x15] = &operand_exception; 701 pgm_check_table[0x1C] = &space_switch_exception; 702 pgm_check_table[0x1D] = &hfp_sqrt_exception; 703 /* Enable machine checks early. */ 704 local_mcck_enable(); 705 } 706