1 /* 2 * arch/s390/kernel/traps.c 3 * 4 * S390 version 5 * Copyright (C) 1999,2000 IBM Deutschland Entwicklung GmbH, IBM Corporation 6 * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com), 7 * Denis Joseph Barrow (djbarrow@de.ibm.com,barrow_dj@yahoo.com), 8 * 9 * Derived from "arch/i386/kernel/traps.c" 10 * Copyright (C) 1991, 1992 Linus Torvalds 11 */ 12 13 /* 14 * 'Traps.c' handles hardware traps and faults after we have saved some 15 * state in 'asm.s'. 16 */ 17 #include <linux/sched.h> 18 #include <linux/kernel.h> 19 #include <linux/string.h> 20 #include <linux/errno.h> 21 #include <linux/ptrace.h> 22 #include <linux/timer.h> 23 #include <linux/mm.h> 24 #include <linux/smp.h> 25 #include <linux/init.h> 26 #include <linux/interrupt.h> 27 #include <linux/delay.h> 28 #include <linux/module.h> 29 #include <linux/kdebug.h> 30 #include <linux/kallsyms.h> 31 #include <linux/reboot.h> 32 #include <linux/kprobes.h> 33 #include <linux/bug.h> 34 #include <asm/system.h> 35 #include <asm/uaccess.h> 36 #include <asm/io.h> 37 #include <asm/atomic.h> 38 #include <asm/mathemu.h> 39 #include <asm/cpcmd.h> 40 #include <asm/s390_ext.h> 41 #include <asm/lowcore.h> 42 #include <asm/debug.h> 43 44 /* Called from entry.S only */ 45 extern void handle_per_exception(struct pt_regs *regs); 46 47 typedef void pgm_check_handler_t(struct pt_regs *, long); 48 pgm_check_handler_t *pgm_check_table[128]; 49 50 #ifdef CONFIG_SYSCTL 51 #ifdef CONFIG_PROCESS_DEBUG 52 int sysctl_userprocess_debug = 1; 53 #else 54 int sysctl_userprocess_debug = 0; 55 #endif 56 #endif 57 58 extern pgm_check_handler_t do_protection_exception; 59 extern pgm_check_handler_t do_dat_exception; 60 extern pgm_check_handler_t do_monitor_call; 61 62 #define stack_pointer ({ void **sp; asm("la %0,0(15)" : "=&d" (sp)); sp; }) 63 64 #ifndef CONFIG_64BIT 65 #define FOURLONG "%08lx %08lx %08lx %08lx\n" 66 static int kstack_depth_to_print = 12; 67 #else /* CONFIG_64BIT */ 68 #define FOURLONG "%016lx %016lx %016lx %016lx\n" 69 static int kstack_depth_to_print = 20; 70 #endif /* CONFIG_64BIT */ 71 72 /* 73 * For show_trace we have tree different stack to consider: 74 * - the panic stack which is used if the kernel stack has overflown 75 * - the asynchronous interrupt stack (cpu related) 76 * - the synchronous kernel stack (process related) 77 * The stack trace can start at any of the three stack and can potentially 78 * touch all of them. The order is: panic stack, async stack, sync stack. 79 */ 80 static unsigned long 81 __show_trace(unsigned long sp, unsigned long low, unsigned long high) 82 { 83 struct stack_frame *sf; 84 struct pt_regs *regs; 85 86 while (1) { 87 sp = sp & PSW_ADDR_INSN; 88 if (sp < low || sp > high - sizeof(*sf)) 89 return sp; 90 sf = (struct stack_frame *) sp; 91 printk("([<%016lx>] ", sf->gprs[8] & PSW_ADDR_INSN); 92 print_symbol("%s)\n", sf->gprs[8] & PSW_ADDR_INSN); 93 /* Follow the backchain. */ 94 while (1) { 95 low = sp; 96 sp = sf->back_chain & PSW_ADDR_INSN; 97 if (!sp) 98 break; 99 if (sp <= low || sp > high - sizeof(*sf)) 100 return sp; 101 sf = (struct stack_frame *) sp; 102 printk(" [<%016lx>] ", sf->gprs[8] & PSW_ADDR_INSN); 103 print_symbol("%s\n", sf->gprs[8] & PSW_ADDR_INSN); 104 } 105 /* Zero backchain detected, check for interrupt frame. */ 106 sp = (unsigned long) (sf + 1); 107 if (sp <= low || sp > high - sizeof(*regs)) 108 return sp; 109 regs = (struct pt_regs *) sp; 110 printk(" [<%016lx>] ", regs->psw.addr & PSW_ADDR_INSN); 111 print_symbol("%s\n", regs->psw.addr & PSW_ADDR_INSN); 112 low = sp; 113 sp = regs->gprs[15]; 114 } 115 } 116 117 void show_trace(struct task_struct *task, unsigned long *stack) 118 { 119 register unsigned long __r15 asm ("15"); 120 unsigned long sp; 121 122 sp = (unsigned long) stack; 123 if (!sp) 124 sp = task ? task->thread.ksp : __r15; 125 printk("Call Trace:\n"); 126 #ifdef CONFIG_CHECK_STACK 127 sp = __show_trace(sp, S390_lowcore.panic_stack - 4096, 128 S390_lowcore.panic_stack); 129 #endif 130 sp = __show_trace(sp, S390_lowcore.async_stack - ASYNC_SIZE, 131 S390_lowcore.async_stack); 132 if (task) 133 __show_trace(sp, (unsigned long) task_stack_page(task), 134 (unsigned long) task_stack_page(task) + THREAD_SIZE); 135 else 136 __show_trace(sp, S390_lowcore.thread_info, 137 S390_lowcore.thread_info + THREAD_SIZE); 138 printk("\n"); 139 if (!task) 140 task = current; 141 debug_show_held_locks(task); 142 } 143 144 void show_stack(struct task_struct *task, unsigned long *sp) 145 { 146 register unsigned long * __r15 asm ("15"); 147 unsigned long *stack; 148 int i; 149 150 if (!sp) 151 stack = task ? (unsigned long *) task->thread.ksp : __r15; 152 else 153 stack = sp; 154 155 for (i = 0; i < kstack_depth_to_print; i++) { 156 if (((addr_t) stack & (THREAD_SIZE-1)) == 0) 157 break; 158 if (i && ((i * sizeof (long) % 32) == 0)) 159 printk("\n "); 160 printk("%p ", (void *)*stack++); 161 } 162 printk("\n"); 163 show_trace(task, sp); 164 } 165 166 /* 167 * The architecture-independent dump_stack generator 168 */ 169 void dump_stack(void) 170 { 171 show_stack(NULL, NULL); 172 } 173 174 EXPORT_SYMBOL(dump_stack); 175 176 static inline int mask_bits(struct pt_regs *regs, unsigned long bits) 177 { 178 return (regs->psw.mask & bits) / ((~bits + 1) & bits); 179 } 180 181 void show_registers(struct pt_regs *regs) 182 { 183 char *mode; 184 185 mode = (regs->psw.mask & PSW_MASK_PSTATE) ? "User" : "Krnl"; 186 printk("%s PSW : %p %p", 187 mode, (void *) regs->psw.mask, 188 (void *) regs->psw.addr); 189 print_symbol(" (%s)\n", regs->psw.addr & PSW_ADDR_INSN); 190 printk(" R:%x T:%x IO:%x EX:%x Key:%x M:%x W:%x " 191 "P:%x AS:%x CC:%x PM:%x", mask_bits(regs, PSW_MASK_PER), 192 mask_bits(regs, PSW_MASK_DAT), mask_bits(regs, PSW_MASK_IO), 193 mask_bits(regs, PSW_MASK_EXT), mask_bits(regs, PSW_MASK_KEY), 194 mask_bits(regs, PSW_MASK_MCHECK), mask_bits(regs, PSW_MASK_WAIT), 195 mask_bits(regs, PSW_MASK_PSTATE), mask_bits(regs, PSW_MASK_ASC), 196 mask_bits(regs, PSW_MASK_CC), mask_bits(regs, PSW_MASK_PM)); 197 #ifdef CONFIG_64BIT 198 printk(" EA:%x", mask_bits(regs, PSW_BASE_BITS)); 199 #endif 200 printk("\n%s GPRS: " FOURLONG, mode, 201 regs->gprs[0], regs->gprs[1], regs->gprs[2], regs->gprs[3]); 202 printk(" " FOURLONG, 203 regs->gprs[4], regs->gprs[5], regs->gprs[6], regs->gprs[7]); 204 printk(" " FOURLONG, 205 regs->gprs[8], regs->gprs[9], regs->gprs[10], regs->gprs[11]); 206 printk(" " FOURLONG, 207 regs->gprs[12], regs->gprs[13], regs->gprs[14], regs->gprs[15]); 208 209 show_code(regs); 210 } 211 212 /* This is called from fs/proc/array.c */ 213 char *task_show_regs(struct task_struct *task, char *buffer) 214 { 215 struct pt_regs *regs; 216 217 regs = task_pt_regs(task); 218 buffer += sprintf(buffer, "task: %p, ksp: %p\n", 219 task, (void *)task->thread.ksp); 220 buffer += sprintf(buffer, "User PSW : %p %p\n", 221 (void *) regs->psw.mask, (void *)regs->psw.addr); 222 223 buffer += sprintf(buffer, "User GPRS: " FOURLONG, 224 regs->gprs[0], regs->gprs[1], 225 regs->gprs[2], regs->gprs[3]); 226 buffer += sprintf(buffer, " " FOURLONG, 227 regs->gprs[4], regs->gprs[5], 228 regs->gprs[6], regs->gprs[7]); 229 buffer += sprintf(buffer, " " FOURLONG, 230 regs->gprs[8], regs->gprs[9], 231 regs->gprs[10], regs->gprs[11]); 232 buffer += sprintf(buffer, " " FOURLONG, 233 regs->gprs[12], regs->gprs[13], 234 regs->gprs[14], regs->gprs[15]); 235 buffer += sprintf(buffer, "User ACRS: %08x %08x %08x %08x\n", 236 task->thread.acrs[0], task->thread.acrs[1], 237 task->thread.acrs[2], task->thread.acrs[3]); 238 buffer += sprintf(buffer, " %08x %08x %08x %08x\n", 239 task->thread.acrs[4], task->thread.acrs[5], 240 task->thread.acrs[6], task->thread.acrs[7]); 241 buffer += sprintf(buffer, " %08x %08x %08x %08x\n", 242 task->thread.acrs[8], task->thread.acrs[9], 243 task->thread.acrs[10], task->thread.acrs[11]); 244 buffer += sprintf(buffer, " %08x %08x %08x %08x\n", 245 task->thread.acrs[12], task->thread.acrs[13], 246 task->thread.acrs[14], task->thread.acrs[15]); 247 return buffer; 248 } 249 250 static DEFINE_SPINLOCK(die_lock); 251 252 void die(const char * str, struct pt_regs * regs, long err) 253 { 254 static int die_counter; 255 256 oops_enter(); 257 debug_stop_all(); 258 console_verbose(); 259 spin_lock_irq(&die_lock); 260 bust_spinlocks(1); 261 printk("%s: %04lx [#%d]\n", str, err & 0xffff, ++die_counter); 262 print_modules(); 263 show_regs(regs); 264 bust_spinlocks(0); 265 add_taint(TAINT_DIE); 266 spin_unlock_irq(&die_lock); 267 if (in_interrupt()) 268 panic("Fatal exception in interrupt"); 269 if (panic_on_oops) 270 panic("Fatal exception: panic_on_oops"); 271 oops_exit(); 272 do_exit(SIGSEGV); 273 } 274 275 static void inline 276 report_user_fault(long interruption_code, struct pt_regs *regs) 277 { 278 #if defined(CONFIG_SYSCTL) 279 if (!sysctl_userprocess_debug) 280 return; 281 #endif 282 #if defined(CONFIG_SYSCTL) || defined(CONFIG_PROCESS_DEBUG) 283 printk("User process fault: interruption code 0x%lX\n", 284 interruption_code); 285 show_regs(regs); 286 #endif 287 } 288 289 int is_valid_bugaddr(unsigned long addr) 290 { 291 return 1; 292 } 293 294 static void __kprobes inline do_trap(long interruption_code, int signr, 295 char *str, struct pt_regs *regs, 296 siginfo_t *info) 297 { 298 /* 299 * We got all needed information from the lowcore and can 300 * now safely switch on interrupts. 301 */ 302 if (regs->psw.mask & PSW_MASK_PSTATE) 303 local_irq_enable(); 304 305 if (notify_die(DIE_TRAP, str, regs, interruption_code, 306 interruption_code, signr) == NOTIFY_STOP) 307 return; 308 309 if (regs->psw.mask & PSW_MASK_PSTATE) { 310 struct task_struct *tsk = current; 311 312 tsk->thread.trap_no = interruption_code & 0xffff; 313 force_sig_info(signr, info, tsk); 314 report_user_fault(interruption_code, regs); 315 } else { 316 const struct exception_table_entry *fixup; 317 fixup = search_exception_tables(regs->psw.addr & PSW_ADDR_INSN); 318 if (fixup) 319 regs->psw.addr = fixup->fixup | PSW_ADDR_AMODE; 320 else { 321 enum bug_trap_type btt; 322 323 btt = report_bug(regs->psw.addr & PSW_ADDR_INSN, regs); 324 if (btt == BUG_TRAP_TYPE_WARN) 325 return; 326 die(str, regs, interruption_code); 327 } 328 } 329 } 330 331 static inline void __user *get_check_address(struct pt_regs *regs) 332 { 333 return (void __user *)((regs->psw.addr-S390_lowcore.pgm_ilc) & PSW_ADDR_INSN); 334 } 335 336 void __kprobes do_single_step(struct pt_regs *regs) 337 { 338 if (notify_die(DIE_SSTEP, "sstep", regs, 0, 0, 339 SIGTRAP) == NOTIFY_STOP){ 340 return; 341 } 342 if ((current->ptrace & PT_PTRACED) != 0) 343 force_sig(SIGTRAP, current); 344 } 345 346 static void default_trap_handler(struct pt_regs * regs, long interruption_code) 347 { 348 if (regs->psw.mask & PSW_MASK_PSTATE) { 349 local_irq_enable(); 350 do_exit(SIGSEGV); 351 report_user_fault(interruption_code, regs); 352 } else 353 die("Unknown program exception", regs, interruption_code); 354 } 355 356 #define DO_ERROR_INFO(signr, str, name, sicode, siaddr) \ 357 static void name(struct pt_regs * regs, long interruption_code) \ 358 { \ 359 siginfo_t info; \ 360 info.si_signo = signr; \ 361 info.si_errno = 0; \ 362 info.si_code = sicode; \ 363 info.si_addr = siaddr; \ 364 do_trap(interruption_code, signr, str, regs, &info); \ 365 } 366 367 DO_ERROR_INFO(SIGILL, "addressing exception", addressing_exception, 368 ILL_ILLADR, get_check_address(regs)) 369 DO_ERROR_INFO(SIGILL, "execute exception", execute_exception, 370 ILL_ILLOPN, get_check_address(regs)) 371 DO_ERROR_INFO(SIGFPE, "fixpoint divide exception", divide_exception, 372 FPE_INTDIV, get_check_address(regs)) 373 DO_ERROR_INFO(SIGFPE, "fixpoint overflow exception", overflow_exception, 374 FPE_INTOVF, get_check_address(regs)) 375 DO_ERROR_INFO(SIGFPE, "HFP overflow exception", hfp_overflow_exception, 376 FPE_FLTOVF, get_check_address(regs)) 377 DO_ERROR_INFO(SIGFPE, "HFP underflow exception", hfp_underflow_exception, 378 FPE_FLTUND, get_check_address(regs)) 379 DO_ERROR_INFO(SIGFPE, "HFP significance exception", hfp_significance_exception, 380 FPE_FLTRES, get_check_address(regs)) 381 DO_ERROR_INFO(SIGFPE, "HFP divide exception", hfp_divide_exception, 382 FPE_FLTDIV, get_check_address(regs)) 383 DO_ERROR_INFO(SIGFPE, "HFP square root exception", hfp_sqrt_exception, 384 FPE_FLTINV, get_check_address(regs)) 385 DO_ERROR_INFO(SIGILL, "operand exception", operand_exception, 386 ILL_ILLOPN, get_check_address(regs)) 387 DO_ERROR_INFO(SIGILL, "privileged operation", privileged_op, 388 ILL_PRVOPC, get_check_address(regs)) 389 DO_ERROR_INFO(SIGILL, "special operation exception", special_op_exception, 390 ILL_ILLOPN, get_check_address(regs)) 391 DO_ERROR_INFO(SIGILL, "translation exception", translation_exception, 392 ILL_ILLOPN, get_check_address(regs)) 393 394 static inline void 395 do_fp_trap(struct pt_regs *regs, void __user *location, 396 int fpc, long interruption_code) 397 { 398 siginfo_t si; 399 400 si.si_signo = SIGFPE; 401 si.si_errno = 0; 402 si.si_addr = location; 403 si.si_code = 0; 404 /* FPC[2] is Data Exception Code */ 405 if ((fpc & 0x00000300) == 0) { 406 /* bits 6 and 7 of DXC are 0 iff IEEE exception */ 407 if (fpc & 0x8000) /* invalid fp operation */ 408 si.si_code = FPE_FLTINV; 409 else if (fpc & 0x4000) /* div by 0 */ 410 si.si_code = FPE_FLTDIV; 411 else if (fpc & 0x2000) /* overflow */ 412 si.si_code = FPE_FLTOVF; 413 else if (fpc & 0x1000) /* underflow */ 414 si.si_code = FPE_FLTUND; 415 else if (fpc & 0x0800) /* inexact */ 416 si.si_code = FPE_FLTRES; 417 } 418 current->thread.ieee_instruction_pointer = (addr_t) location; 419 do_trap(interruption_code, SIGFPE, 420 "floating point exception", regs, &si); 421 } 422 423 static void illegal_op(struct pt_regs * regs, long interruption_code) 424 { 425 siginfo_t info; 426 __u8 opcode[6]; 427 __u16 __user *location; 428 int signal = 0; 429 430 location = get_check_address(regs); 431 432 /* 433 * We got all needed information from the lowcore and can 434 * now safely switch on interrupts. 435 */ 436 if (regs->psw.mask & PSW_MASK_PSTATE) 437 local_irq_enable(); 438 439 if (regs->psw.mask & PSW_MASK_PSTATE) { 440 if (get_user(*((__u16 *) opcode), (__u16 __user *) location)) 441 return; 442 if (*((__u16 *) opcode) == S390_BREAKPOINT_U16) { 443 if (current->ptrace & PT_PTRACED) 444 force_sig(SIGTRAP, current); 445 else 446 signal = SIGILL; 447 #ifdef CONFIG_MATHEMU 448 } else if (opcode[0] == 0xb3) { 449 if (get_user(*((__u16 *) (opcode+2)), location+1)) 450 return; 451 signal = math_emu_b3(opcode, regs); 452 } else if (opcode[0] == 0xed) { 453 if (get_user(*((__u32 *) (opcode+2)), 454 (__u32 __user *)(location+1))) 455 return; 456 signal = math_emu_ed(opcode, regs); 457 } else if (*((__u16 *) opcode) == 0xb299) { 458 if (get_user(*((__u16 *) (opcode+2)), location+1)) 459 return; 460 signal = math_emu_srnm(opcode, regs); 461 } else if (*((__u16 *) opcode) == 0xb29c) { 462 if (get_user(*((__u16 *) (opcode+2)), location+1)) 463 return; 464 signal = math_emu_stfpc(opcode, regs); 465 } else if (*((__u16 *) opcode) == 0xb29d) { 466 if (get_user(*((__u16 *) (opcode+2)), location+1)) 467 return; 468 signal = math_emu_lfpc(opcode, regs); 469 #endif 470 } else 471 signal = SIGILL; 472 } else { 473 /* 474 * If we get an illegal op in kernel mode, send it through the 475 * kprobes notifier. If kprobes doesn't pick it up, SIGILL 476 */ 477 if (notify_die(DIE_BPT, "bpt", regs, interruption_code, 478 3, SIGTRAP) != NOTIFY_STOP) 479 signal = SIGILL; 480 } 481 482 #ifdef CONFIG_MATHEMU 483 if (signal == SIGFPE) 484 do_fp_trap(regs, location, 485 current->thread.fp_regs.fpc, interruption_code); 486 else if (signal == SIGSEGV) { 487 info.si_signo = signal; 488 info.si_errno = 0; 489 info.si_code = SEGV_MAPERR; 490 info.si_addr = (void __user *) location; 491 do_trap(interruption_code, signal, 492 "user address fault", regs, &info); 493 } else 494 #endif 495 if (signal) { 496 info.si_signo = signal; 497 info.si_errno = 0; 498 info.si_code = ILL_ILLOPC; 499 info.si_addr = (void __user *) location; 500 do_trap(interruption_code, signal, 501 "illegal operation", regs, &info); 502 } 503 } 504 505 506 #ifdef CONFIG_MATHEMU 507 asmlinkage void 508 specification_exception(struct pt_regs * regs, long interruption_code) 509 { 510 __u8 opcode[6]; 511 __u16 __user *location = NULL; 512 int signal = 0; 513 514 location = (__u16 __user *) get_check_address(regs); 515 516 /* 517 * We got all needed information from the lowcore and can 518 * now safely switch on interrupts. 519 */ 520 if (regs->psw.mask & PSW_MASK_PSTATE) 521 local_irq_enable(); 522 523 if (regs->psw.mask & PSW_MASK_PSTATE) { 524 get_user(*((__u16 *) opcode), location); 525 switch (opcode[0]) { 526 case 0x28: /* LDR Rx,Ry */ 527 signal = math_emu_ldr(opcode); 528 break; 529 case 0x38: /* LER Rx,Ry */ 530 signal = math_emu_ler(opcode); 531 break; 532 case 0x60: /* STD R,D(X,B) */ 533 get_user(*((__u16 *) (opcode+2)), location+1); 534 signal = math_emu_std(opcode, regs); 535 break; 536 case 0x68: /* LD R,D(X,B) */ 537 get_user(*((__u16 *) (opcode+2)), location+1); 538 signal = math_emu_ld(opcode, regs); 539 break; 540 case 0x70: /* STE R,D(X,B) */ 541 get_user(*((__u16 *) (opcode+2)), location+1); 542 signal = math_emu_ste(opcode, regs); 543 break; 544 case 0x78: /* LE R,D(X,B) */ 545 get_user(*((__u16 *) (opcode+2)), location+1); 546 signal = math_emu_le(opcode, regs); 547 break; 548 default: 549 signal = SIGILL; 550 break; 551 } 552 } else 553 signal = SIGILL; 554 555 if (signal == SIGFPE) 556 do_fp_trap(regs, location, 557 current->thread.fp_regs.fpc, interruption_code); 558 else if (signal) { 559 siginfo_t info; 560 info.si_signo = signal; 561 info.si_errno = 0; 562 info.si_code = ILL_ILLOPN; 563 info.si_addr = location; 564 do_trap(interruption_code, signal, 565 "specification exception", regs, &info); 566 } 567 } 568 #else 569 DO_ERROR_INFO(SIGILL, "specification exception", specification_exception, 570 ILL_ILLOPN, get_check_address(regs)); 571 #endif 572 573 static void data_exception(struct pt_regs * regs, long interruption_code) 574 { 575 __u16 __user *location; 576 int signal = 0; 577 578 location = get_check_address(regs); 579 580 /* 581 * We got all needed information from the lowcore and can 582 * now safely switch on interrupts. 583 */ 584 if (regs->psw.mask & PSW_MASK_PSTATE) 585 local_irq_enable(); 586 587 if (MACHINE_HAS_IEEE) 588 asm volatile("stfpc %0" : "=m" (current->thread.fp_regs.fpc)); 589 590 #ifdef CONFIG_MATHEMU 591 else if (regs->psw.mask & PSW_MASK_PSTATE) { 592 __u8 opcode[6]; 593 get_user(*((__u16 *) opcode), location); 594 switch (opcode[0]) { 595 case 0x28: /* LDR Rx,Ry */ 596 signal = math_emu_ldr(opcode); 597 break; 598 case 0x38: /* LER Rx,Ry */ 599 signal = math_emu_ler(opcode); 600 break; 601 case 0x60: /* STD R,D(X,B) */ 602 get_user(*((__u16 *) (opcode+2)), location+1); 603 signal = math_emu_std(opcode, regs); 604 break; 605 case 0x68: /* LD R,D(X,B) */ 606 get_user(*((__u16 *) (opcode+2)), location+1); 607 signal = math_emu_ld(opcode, regs); 608 break; 609 case 0x70: /* STE R,D(X,B) */ 610 get_user(*((__u16 *) (opcode+2)), location+1); 611 signal = math_emu_ste(opcode, regs); 612 break; 613 case 0x78: /* LE R,D(X,B) */ 614 get_user(*((__u16 *) (opcode+2)), location+1); 615 signal = math_emu_le(opcode, regs); 616 break; 617 case 0xb3: 618 get_user(*((__u16 *) (opcode+2)), location+1); 619 signal = math_emu_b3(opcode, regs); 620 break; 621 case 0xed: 622 get_user(*((__u32 *) (opcode+2)), 623 (__u32 __user *)(location+1)); 624 signal = math_emu_ed(opcode, regs); 625 break; 626 case 0xb2: 627 if (opcode[1] == 0x99) { 628 get_user(*((__u16 *) (opcode+2)), location+1); 629 signal = math_emu_srnm(opcode, regs); 630 } else if (opcode[1] == 0x9c) { 631 get_user(*((__u16 *) (opcode+2)), location+1); 632 signal = math_emu_stfpc(opcode, regs); 633 } else if (opcode[1] == 0x9d) { 634 get_user(*((__u16 *) (opcode+2)), location+1); 635 signal = math_emu_lfpc(opcode, regs); 636 } else 637 signal = SIGILL; 638 break; 639 default: 640 signal = SIGILL; 641 break; 642 } 643 } 644 #endif 645 if (current->thread.fp_regs.fpc & FPC_DXC_MASK) 646 signal = SIGFPE; 647 else 648 signal = SIGILL; 649 if (signal == SIGFPE) 650 do_fp_trap(regs, location, 651 current->thread.fp_regs.fpc, interruption_code); 652 else if (signal) { 653 siginfo_t info; 654 info.si_signo = signal; 655 info.si_errno = 0; 656 info.si_code = ILL_ILLOPN; 657 info.si_addr = location; 658 do_trap(interruption_code, signal, 659 "data exception", regs, &info); 660 } 661 } 662 663 static void space_switch_exception(struct pt_regs * regs, long int_code) 664 { 665 siginfo_t info; 666 667 /* Set user psw back to home space mode. */ 668 if (regs->psw.mask & PSW_MASK_PSTATE) 669 regs->psw.mask |= PSW_ASC_HOME; 670 /* Send SIGILL. */ 671 info.si_signo = SIGILL; 672 info.si_errno = 0; 673 info.si_code = ILL_PRVOPC; 674 info.si_addr = get_check_address(regs); 675 do_trap(int_code, SIGILL, "space switch event", regs, &info); 676 } 677 678 asmlinkage void kernel_stack_overflow(struct pt_regs * regs) 679 { 680 bust_spinlocks(1); 681 printk("Kernel stack overflow.\n"); 682 show_regs(regs); 683 bust_spinlocks(0); 684 panic("Corrupt kernel stack, can't continue."); 685 } 686 687 /* init is done in lowcore.S and head.S */ 688 689 void __init trap_init(void) 690 { 691 int i; 692 693 for (i = 0; i < 128; i++) 694 pgm_check_table[i] = &default_trap_handler; 695 pgm_check_table[1] = &illegal_op; 696 pgm_check_table[2] = &privileged_op; 697 pgm_check_table[3] = &execute_exception; 698 pgm_check_table[4] = &do_protection_exception; 699 pgm_check_table[5] = &addressing_exception; 700 pgm_check_table[6] = &specification_exception; 701 pgm_check_table[7] = &data_exception; 702 pgm_check_table[8] = &overflow_exception; 703 pgm_check_table[9] = ÷_exception; 704 pgm_check_table[0x0A] = &overflow_exception; 705 pgm_check_table[0x0B] = ÷_exception; 706 pgm_check_table[0x0C] = &hfp_overflow_exception; 707 pgm_check_table[0x0D] = &hfp_underflow_exception; 708 pgm_check_table[0x0E] = &hfp_significance_exception; 709 pgm_check_table[0x0F] = &hfp_divide_exception; 710 pgm_check_table[0x10] = &do_dat_exception; 711 pgm_check_table[0x11] = &do_dat_exception; 712 pgm_check_table[0x12] = &translation_exception; 713 pgm_check_table[0x13] = &special_op_exception; 714 #ifdef CONFIG_64BIT 715 pgm_check_table[0x38] = &do_dat_exception; 716 pgm_check_table[0x39] = &do_dat_exception; 717 pgm_check_table[0x3A] = &do_dat_exception; 718 pgm_check_table[0x3B] = &do_dat_exception; 719 #endif /* CONFIG_64BIT */ 720 pgm_check_table[0x15] = &operand_exception; 721 pgm_check_table[0x1C] = &space_switch_exception; 722 pgm_check_table[0x1D] = &hfp_sqrt_exception; 723 pgm_check_table[0x40] = &do_monitor_call; 724 pfault_irq_init(); 725 } 726