1 /* 2 * arch/s390/kernel/traps.c 3 * 4 * S390 version 5 * Copyright (C) 1999,2000 IBM Deutschland Entwicklung GmbH, IBM Corporation 6 * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com), 7 * Denis Joseph Barrow (djbarrow@de.ibm.com,barrow_dj@yahoo.com), 8 * 9 * Derived from "arch/i386/kernel/traps.c" 10 * Copyright (C) 1991, 1992 Linus Torvalds 11 */ 12 13 /* 14 * 'Traps.c' handles hardware traps and faults after we have saved some 15 * state in 'asm.s'. 16 */ 17 #include <linux/sched.h> 18 #include <linux/kernel.h> 19 #include <linux/string.h> 20 #include <linux/errno.h> 21 #include <linux/ptrace.h> 22 #include <linux/timer.h> 23 #include <linux/mm.h> 24 #include <linux/smp.h> 25 #include <linux/init.h> 26 #include <linux/interrupt.h> 27 #include <linux/seq_file.h> 28 #include <linux/delay.h> 29 #include <linux/module.h> 30 #include <linux/kdebug.h> 31 #include <linux/kallsyms.h> 32 #include <linux/reboot.h> 33 #include <linux/kprobes.h> 34 #include <linux/bug.h> 35 #include <linux/utsname.h> 36 #include <asm/system.h> 37 #include <asm/uaccess.h> 38 #include <asm/io.h> 39 #include <asm/atomic.h> 40 #include <asm/mathemu.h> 41 #include <asm/cpcmd.h> 42 #include <asm/s390_ext.h> 43 #include <asm/lowcore.h> 44 #include <asm/debug.h> 45 46 /* Called from entry.S only */ 47 extern void handle_per_exception(struct pt_regs *regs); 48 49 typedef void pgm_check_handler_t(struct pt_regs *, long); 50 pgm_check_handler_t *pgm_check_table[128]; 51 52 #ifdef CONFIG_SYSCTL 53 #ifdef CONFIG_PROCESS_DEBUG 54 int sysctl_userprocess_debug = 1; 55 #else 56 int sysctl_userprocess_debug = 0; 57 #endif 58 #endif 59 60 extern pgm_check_handler_t do_protection_exception; 61 extern pgm_check_handler_t do_dat_exception; 62 extern pgm_check_handler_t do_monitor_call; 63 extern pgm_check_handler_t do_asce_exception; 64 65 #define stack_pointer ({ void **sp; asm("la %0,0(15)" : "=&d" (sp)); sp; }) 66 67 #ifndef CONFIG_64BIT 68 #define FOURLONG "%08lx %08lx %08lx %08lx\n" 69 static int kstack_depth_to_print = 12; 70 #else /* CONFIG_64BIT */ 71 #define FOURLONG "%016lx %016lx %016lx %016lx\n" 72 static int kstack_depth_to_print = 20; 73 #endif /* CONFIG_64BIT */ 74 75 /* 76 * For show_trace we have tree different stack to consider: 77 * - the panic stack which is used if the kernel stack has overflown 78 * - the asynchronous interrupt stack (cpu related) 79 * - the synchronous kernel stack (process related) 80 * The stack trace can start at any of the three stack and can potentially 81 * touch all of them. The order is: panic stack, async stack, sync stack. 82 */ 83 static unsigned long 84 __show_trace(unsigned long sp, unsigned long low, unsigned long high) 85 { 86 struct stack_frame *sf; 87 struct pt_regs *regs; 88 89 while (1) { 90 sp = sp & PSW_ADDR_INSN; 91 if (sp < low || sp > high - sizeof(*sf)) 92 return sp; 93 sf = (struct stack_frame *) sp; 94 printk("([<%016lx>] ", sf->gprs[8] & PSW_ADDR_INSN); 95 print_symbol("%s)\n", sf->gprs[8] & PSW_ADDR_INSN); 96 /* Follow the backchain. */ 97 while (1) { 98 low = sp; 99 sp = sf->back_chain & PSW_ADDR_INSN; 100 if (!sp) 101 break; 102 if (sp <= low || sp > high - sizeof(*sf)) 103 return sp; 104 sf = (struct stack_frame *) sp; 105 printk(" [<%016lx>] ", sf->gprs[8] & PSW_ADDR_INSN); 106 print_symbol("%s\n", sf->gprs[8] & PSW_ADDR_INSN); 107 } 108 /* Zero backchain detected, check for interrupt frame. */ 109 sp = (unsigned long) (sf + 1); 110 if (sp <= low || sp > high - sizeof(*regs)) 111 return sp; 112 regs = (struct pt_regs *) sp; 113 printk(" [<%016lx>] ", regs->psw.addr & PSW_ADDR_INSN); 114 print_symbol("%s\n", regs->psw.addr & PSW_ADDR_INSN); 115 low = sp; 116 sp = regs->gprs[15]; 117 } 118 } 119 120 void show_trace(struct task_struct *task, unsigned long *stack) 121 { 122 register unsigned long __r15 asm ("15"); 123 unsigned long sp; 124 125 sp = (unsigned long) stack; 126 if (!sp) 127 sp = task ? task->thread.ksp : __r15; 128 printk("Call Trace:\n"); 129 #ifdef CONFIG_CHECK_STACK 130 sp = __show_trace(sp, S390_lowcore.panic_stack - 4096, 131 S390_lowcore.panic_stack); 132 #endif 133 sp = __show_trace(sp, S390_lowcore.async_stack - ASYNC_SIZE, 134 S390_lowcore.async_stack); 135 if (task) 136 __show_trace(sp, (unsigned long) task_stack_page(task), 137 (unsigned long) task_stack_page(task) + THREAD_SIZE); 138 else 139 __show_trace(sp, S390_lowcore.thread_info, 140 S390_lowcore.thread_info + THREAD_SIZE); 141 printk("\n"); 142 if (!task) 143 task = current; 144 debug_show_held_locks(task); 145 } 146 147 void show_stack(struct task_struct *task, unsigned long *sp) 148 { 149 register unsigned long * __r15 asm ("15"); 150 unsigned long *stack; 151 int i; 152 153 if (!sp) 154 stack = task ? (unsigned long *) task->thread.ksp : __r15; 155 else 156 stack = sp; 157 158 for (i = 0; i < kstack_depth_to_print; i++) { 159 if (((addr_t) stack & (THREAD_SIZE-1)) == 0) 160 break; 161 if (i && ((i * sizeof (long) % 32) == 0)) 162 printk("\n "); 163 printk("%p ", (void *)*stack++); 164 } 165 printk("\n"); 166 show_trace(task, sp); 167 } 168 169 /* 170 * The architecture-independent dump_stack generator 171 */ 172 void dump_stack(void) 173 { 174 printk("CPU: %d %s %s %.*s\n", 175 task_thread_info(current)->cpu, print_tainted(), 176 init_utsname()->release, 177 (int)strcspn(init_utsname()->version, " "), 178 init_utsname()->version); 179 printk("Process %s (pid: %d, task: %p, ksp: %p)\n", 180 current->comm, current->pid, current, 181 (void *) current->thread.ksp); 182 show_stack(NULL, NULL); 183 } 184 EXPORT_SYMBOL(dump_stack); 185 186 static inline int mask_bits(struct pt_regs *regs, unsigned long bits) 187 { 188 return (regs->psw.mask & bits) / ((~bits + 1) & bits); 189 } 190 191 void show_registers(struct pt_regs *regs) 192 { 193 char *mode; 194 195 mode = (regs->psw.mask & PSW_MASK_PSTATE) ? "User" : "Krnl"; 196 printk("%s PSW : %p %p", 197 mode, (void *) regs->psw.mask, 198 (void *) regs->psw.addr); 199 print_symbol(" (%s)\n", regs->psw.addr & PSW_ADDR_INSN); 200 printk(" R:%x T:%x IO:%x EX:%x Key:%x M:%x W:%x " 201 "P:%x AS:%x CC:%x PM:%x", mask_bits(regs, PSW_MASK_PER), 202 mask_bits(regs, PSW_MASK_DAT), mask_bits(regs, PSW_MASK_IO), 203 mask_bits(regs, PSW_MASK_EXT), mask_bits(regs, PSW_MASK_KEY), 204 mask_bits(regs, PSW_MASK_MCHECK), mask_bits(regs, PSW_MASK_WAIT), 205 mask_bits(regs, PSW_MASK_PSTATE), mask_bits(regs, PSW_MASK_ASC), 206 mask_bits(regs, PSW_MASK_CC), mask_bits(regs, PSW_MASK_PM)); 207 #ifdef CONFIG_64BIT 208 printk(" EA:%x", mask_bits(regs, PSW_BASE_BITS)); 209 #endif 210 printk("\n%s GPRS: " FOURLONG, mode, 211 regs->gprs[0], regs->gprs[1], regs->gprs[2], regs->gprs[3]); 212 printk(" " FOURLONG, 213 regs->gprs[4], regs->gprs[5], regs->gprs[6], regs->gprs[7]); 214 printk(" " FOURLONG, 215 regs->gprs[8], regs->gprs[9], regs->gprs[10], regs->gprs[11]); 216 printk(" " FOURLONG, 217 regs->gprs[12], regs->gprs[13], regs->gprs[14], regs->gprs[15]); 218 219 show_code(regs); 220 } 221 222 /* This is called from fs/proc/array.c */ 223 void task_show_regs(struct seq_file *m, struct task_struct *task) 224 { 225 struct pt_regs *regs; 226 227 regs = task_pt_regs(task); 228 seq_printf(m, "task: %p, ksp: %p\n", 229 task, (void *)task->thread.ksp); 230 seq_printf(m, "User PSW : %p %p\n", 231 (void *) regs->psw.mask, (void *)regs->psw.addr); 232 233 seq_printf(m, "User GPRS: " FOURLONG, 234 regs->gprs[0], regs->gprs[1], 235 regs->gprs[2], regs->gprs[3]); 236 seq_printf(m, " " FOURLONG, 237 regs->gprs[4], regs->gprs[5], 238 regs->gprs[6], regs->gprs[7]); 239 seq_printf(m, " " FOURLONG, 240 regs->gprs[8], regs->gprs[9], 241 regs->gprs[10], regs->gprs[11]); 242 seq_printf(m, " " FOURLONG, 243 regs->gprs[12], regs->gprs[13], 244 regs->gprs[14], regs->gprs[15]); 245 seq_printf(m, "User ACRS: %08x %08x %08x %08x\n", 246 task->thread.acrs[0], task->thread.acrs[1], 247 task->thread.acrs[2], task->thread.acrs[3]); 248 seq_printf(m, " %08x %08x %08x %08x\n", 249 task->thread.acrs[4], task->thread.acrs[5], 250 task->thread.acrs[6], task->thread.acrs[7]); 251 seq_printf(m, " %08x %08x %08x %08x\n", 252 task->thread.acrs[8], task->thread.acrs[9], 253 task->thread.acrs[10], task->thread.acrs[11]); 254 seq_printf(m, " %08x %08x %08x %08x\n", 255 task->thread.acrs[12], task->thread.acrs[13], 256 task->thread.acrs[14], task->thread.acrs[15]); 257 } 258 259 static DEFINE_SPINLOCK(die_lock); 260 261 void die(const char * str, struct pt_regs * regs, long err) 262 { 263 static int die_counter; 264 265 oops_enter(); 266 debug_stop_all(); 267 console_verbose(); 268 spin_lock_irq(&die_lock); 269 bust_spinlocks(1); 270 printk("%s: %04lx [#%d] ", str, err & 0xffff, ++die_counter); 271 #ifdef CONFIG_PREEMPT 272 printk("PREEMPT "); 273 #endif 274 #ifdef CONFIG_SMP 275 printk("SMP "); 276 #endif 277 #ifdef CONFIG_DEBUG_PAGEALLOC 278 printk("DEBUG_PAGEALLOC"); 279 #endif 280 printk("\n"); 281 notify_die(DIE_OOPS, str, regs, err, current->thread.trap_no, SIGSEGV); 282 show_regs(regs); 283 bust_spinlocks(0); 284 add_taint(TAINT_DIE); 285 spin_unlock_irq(&die_lock); 286 if (in_interrupt()) 287 panic("Fatal exception in interrupt"); 288 if (panic_on_oops) 289 panic("Fatal exception: panic_on_oops"); 290 oops_exit(); 291 do_exit(SIGSEGV); 292 } 293 294 static void inline 295 report_user_fault(long interruption_code, struct pt_regs *regs) 296 { 297 #if defined(CONFIG_SYSCTL) 298 if (!sysctl_userprocess_debug) 299 return; 300 #endif 301 #if defined(CONFIG_SYSCTL) || defined(CONFIG_PROCESS_DEBUG) 302 printk("User process fault: interruption code 0x%lX\n", 303 interruption_code); 304 show_regs(regs); 305 #endif 306 } 307 308 int is_valid_bugaddr(unsigned long addr) 309 { 310 return 1; 311 } 312 313 static void __kprobes inline do_trap(long interruption_code, int signr, 314 char *str, struct pt_regs *regs, 315 siginfo_t *info) 316 { 317 /* 318 * We got all needed information from the lowcore and can 319 * now safely switch on interrupts. 320 */ 321 if (regs->psw.mask & PSW_MASK_PSTATE) 322 local_irq_enable(); 323 324 if (notify_die(DIE_TRAP, str, regs, interruption_code, 325 interruption_code, signr) == NOTIFY_STOP) 326 return; 327 328 if (regs->psw.mask & PSW_MASK_PSTATE) { 329 struct task_struct *tsk = current; 330 331 tsk->thread.trap_no = interruption_code & 0xffff; 332 force_sig_info(signr, info, tsk); 333 report_user_fault(interruption_code, regs); 334 } else { 335 const struct exception_table_entry *fixup; 336 fixup = search_exception_tables(regs->psw.addr & PSW_ADDR_INSN); 337 if (fixup) 338 regs->psw.addr = fixup->fixup | PSW_ADDR_AMODE; 339 else { 340 enum bug_trap_type btt; 341 342 btt = report_bug(regs->psw.addr & PSW_ADDR_INSN, regs); 343 if (btt == BUG_TRAP_TYPE_WARN) 344 return; 345 die(str, regs, interruption_code); 346 } 347 } 348 } 349 350 static inline void __user *get_check_address(struct pt_regs *regs) 351 { 352 return (void __user *)((regs->psw.addr-S390_lowcore.pgm_ilc) & PSW_ADDR_INSN); 353 } 354 355 void __kprobes do_single_step(struct pt_regs *regs) 356 { 357 if (notify_die(DIE_SSTEP, "sstep", regs, 0, 0, 358 SIGTRAP) == NOTIFY_STOP){ 359 return; 360 } 361 if ((current->ptrace & PT_PTRACED) != 0) 362 force_sig(SIGTRAP, current); 363 } 364 365 static void default_trap_handler(struct pt_regs * regs, long interruption_code) 366 { 367 if (regs->psw.mask & PSW_MASK_PSTATE) { 368 local_irq_enable(); 369 do_exit(SIGSEGV); 370 report_user_fault(interruption_code, regs); 371 } else 372 die("Unknown program exception", regs, interruption_code); 373 } 374 375 #define DO_ERROR_INFO(signr, str, name, sicode, siaddr) \ 376 static void name(struct pt_regs * regs, long interruption_code) \ 377 { \ 378 siginfo_t info; \ 379 info.si_signo = signr; \ 380 info.si_errno = 0; \ 381 info.si_code = sicode; \ 382 info.si_addr = siaddr; \ 383 do_trap(interruption_code, signr, str, regs, &info); \ 384 } 385 386 DO_ERROR_INFO(SIGILL, "addressing exception", addressing_exception, 387 ILL_ILLADR, get_check_address(regs)) 388 DO_ERROR_INFO(SIGILL, "execute exception", execute_exception, 389 ILL_ILLOPN, get_check_address(regs)) 390 DO_ERROR_INFO(SIGFPE, "fixpoint divide exception", divide_exception, 391 FPE_INTDIV, get_check_address(regs)) 392 DO_ERROR_INFO(SIGFPE, "fixpoint overflow exception", overflow_exception, 393 FPE_INTOVF, get_check_address(regs)) 394 DO_ERROR_INFO(SIGFPE, "HFP overflow exception", hfp_overflow_exception, 395 FPE_FLTOVF, get_check_address(regs)) 396 DO_ERROR_INFO(SIGFPE, "HFP underflow exception", hfp_underflow_exception, 397 FPE_FLTUND, get_check_address(regs)) 398 DO_ERROR_INFO(SIGFPE, "HFP significance exception", hfp_significance_exception, 399 FPE_FLTRES, get_check_address(regs)) 400 DO_ERROR_INFO(SIGFPE, "HFP divide exception", hfp_divide_exception, 401 FPE_FLTDIV, get_check_address(regs)) 402 DO_ERROR_INFO(SIGFPE, "HFP square root exception", hfp_sqrt_exception, 403 FPE_FLTINV, get_check_address(regs)) 404 DO_ERROR_INFO(SIGILL, "operand exception", operand_exception, 405 ILL_ILLOPN, get_check_address(regs)) 406 DO_ERROR_INFO(SIGILL, "privileged operation", privileged_op, 407 ILL_PRVOPC, get_check_address(regs)) 408 DO_ERROR_INFO(SIGILL, "special operation exception", special_op_exception, 409 ILL_ILLOPN, get_check_address(regs)) 410 DO_ERROR_INFO(SIGILL, "translation exception", translation_exception, 411 ILL_ILLOPN, get_check_address(regs)) 412 413 static inline void 414 do_fp_trap(struct pt_regs *regs, void __user *location, 415 int fpc, long interruption_code) 416 { 417 siginfo_t si; 418 419 si.si_signo = SIGFPE; 420 si.si_errno = 0; 421 si.si_addr = location; 422 si.si_code = 0; 423 /* FPC[2] is Data Exception Code */ 424 if ((fpc & 0x00000300) == 0) { 425 /* bits 6 and 7 of DXC are 0 iff IEEE exception */ 426 if (fpc & 0x8000) /* invalid fp operation */ 427 si.si_code = FPE_FLTINV; 428 else if (fpc & 0x4000) /* div by 0 */ 429 si.si_code = FPE_FLTDIV; 430 else if (fpc & 0x2000) /* overflow */ 431 si.si_code = FPE_FLTOVF; 432 else if (fpc & 0x1000) /* underflow */ 433 si.si_code = FPE_FLTUND; 434 else if (fpc & 0x0800) /* inexact */ 435 si.si_code = FPE_FLTRES; 436 } 437 current->thread.ieee_instruction_pointer = (addr_t) location; 438 do_trap(interruption_code, SIGFPE, 439 "floating point exception", regs, &si); 440 } 441 442 static void illegal_op(struct pt_regs * regs, long interruption_code) 443 { 444 siginfo_t info; 445 __u8 opcode[6]; 446 __u16 __user *location; 447 int signal = 0; 448 449 location = get_check_address(regs); 450 451 /* 452 * We got all needed information from the lowcore and can 453 * now safely switch on interrupts. 454 */ 455 if (regs->psw.mask & PSW_MASK_PSTATE) 456 local_irq_enable(); 457 458 if (regs->psw.mask & PSW_MASK_PSTATE) { 459 if (get_user(*((__u16 *) opcode), (__u16 __user *) location)) 460 return; 461 if (*((__u16 *) opcode) == S390_BREAKPOINT_U16) { 462 if (current->ptrace & PT_PTRACED) 463 force_sig(SIGTRAP, current); 464 else 465 signal = SIGILL; 466 #ifdef CONFIG_MATHEMU 467 } else if (opcode[0] == 0xb3) { 468 if (get_user(*((__u16 *) (opcode+2)), location+1)) 469 return; 470 signal = math_emu_b3(opcode, regs); 471 } else if (opcode[0] == 0xed) { 472 if (get_user(*((__u32 *) (opcode+2)), 473 (__u32 __user *)(location+1))) 474 return; 475 signal = math_emu_ed(opcode, regs); 476 } else if (*((__u16 *) opcode) == 0xb299) { 477 if (get_user(*((__u16 *) (opcode+2)), location+1)) 478 return; 479 signal = math_emu_srnm(opcode, regs); 480 } else if (*((__u16 *) opcode) == 0xb29c) { 481 if (get_user(*((__u16 *) (opcode+2)), location+1)) 482 return; 483 signal = math_emu_stfpc(opcode, regs); 484 } else if (*((__u16 *) opcode) == 0xb29d) { 485 if (get_user(*((__u16 *) (opcode+2)), location+1)) 486 return; 487 signal = math_emu_lfpc(opcode, regs); 488 #endif 489 } else 490 signal = SIGILL; 491 } else { 492 /* 493 * If we get an illegal op in kernel mode, send it through the 494 * kprobes notifier. If kprobes doesn't pick it up, SIGILL 495 */ 496 if (notify_die(DIE_BPT, "bpt", regs, interruption_code, 497 3, SIGTRAP) != NOTIFY_STOP) 498 signal = SIGILL; 499 } 500 501 #ifdef CONFIG_MATHEMU 502 if (signal == SIGFPE) 503 do_fp_trap(regs, location, 504 current->thread.fp_regs.fpc, interruption_code); 505 else if (signal == SIGSEGV) { 506 info.si_signo = signal; 507 info.si_errno = 0; 508 info.si_code = SEGV_MAPERR; 509 info.si_addr = (void __user *) location; 510 do_trap(interruption_code, signal, 511 "user address fault", regs, &info); 512 } else 513 #endif 514 if (signal) { 515 info.si_signo = signal; 516 info.si_errno = 0; 517 info.si_code = ILL_ILLOPC; 518 info.si_addr = (void __user *) location; 519 do_trap(interruption_code, signal, 520 "illegal operation", regs, &info); 521 } 522 } 523 524 525 #ifdef CONFIG_MATHEMU 526 asmlinkage void 527 specification_exception(struct pt_regs * regs, long interruption_code) 528 { 529 __u8 opcode[6]; 530 __u16 __user *location = NULL; 531 int signal = 0; 532 533 location = (__u16 __user *) get_check_address(regs); 534 535 /* 536 * We got all needed information from the lowcore and can 537 * now safely switch on interrupts. 538 */ 539 if (regs->psw.mask & PSW_MASK_PSTATE) 540 local_irq_enable(); 541 542 if (regs->psw.mask & PSW_MASK_PSTATE) { 543 get_user(*((__u16 *) opcode), location); 544 switch (opcode[0]) { 545 case 0x28: /* LDR Rx,Ry */ 546 signal = math_emu_ldr(opcode); 547 break; 548 case 0x38: /* LER Rx,Ry */ 549 signal = math_emu_ler(opcode); 550 break; 551 case 0x60: /* STD R,D(X,B) */ 552 get_user(*((__u16 *) (opcode+2)), location+1); 553 signal = math_emu_std(opcode, regs); 554 break; 555 case 0x68: /* LD R,D(X,B) */ 556 get_user(*((__u16 *) (opcode+2)), location+1); 557 signal = math_emu_ld(opcode, regs); 558 break; 559 case 0x70: /* STE R,D(X,B) */ 560 get_user(*((__u16 *) (opcode+2)), location+1); 561 signal = math_emu_ste(opcode, regs); 562 break; 563 case 0x78: /* LE R,D(X,B) */ 564 get_user(*((__u16 *) (opcode+2)), location+1); 565 signal = math_emu_le(opcode, regs); 566 break; 567 default: 568 signal = SIGILL; 569 break; 570 } 571 } else 572 signal = SIGILL; 573 574 if (signal == SIGFPE) 575 do_fp_trap(regs, location, 576 current->thread.fp_regs.fpc, interruption_code); 577 else if (signal) { 578 siginfo_t info; 579 info.si_signo = signal; 580 info.si_errno = 0; 581 info.si_code = ILL_ILLOPN; 582 info.si_addr = location; 583 do_trap(interruption_code, signal, 584 "specification exception", regs, &info); 585 } 586 } 587 #else 588 DO_ERROR_INFO(SIGILL, "specification exception", specification_exception, 589 ILL_ILLOPN, get_check_address(regs)); 590 #endif 591 592 static void data_exception(struct pt_regs * regs, long interruption_code) 593 { 594 __u16 __user *location; 595 int signal = 0; 596 597 location = get_check_address(regs); 598 599 /* 600 * We got all needed information from the lowcore and can 601 * now safely switch on interrupts. 602 */ 603 if (regs->psw.mask & PSW_MASK_PSTATE) 604 local_irq_enable(); 605 606 if (MACHINE_HAS_IEEE) 607 asm volatile("stfpc %0" : "=m" (current->thread.fp_regs.fpc)); 608 609 #ifdef CONFIG_MATHEMU 610 else if (regs->psw.mask & PSW_MASK_PSTATE) { 611 __u8 opcode[6]; 612 get_user(*((__u16 *) opcode), location); 613 switch (opcode[0]) { 614 case 0x28: /* LDR Rx,Ry */ 615 signal = math_emu_ldr(opcode); 616 break; 617 case 0x38: /* LER Rx,Ry */ 618 signal = math_emu_ler(opcode); 619 break; 620 case 0x60: /* STD R,D(X,B) */ 621 get_user(*((__u16 *) (opcode+2)), location+1); 622 signal = math_emu_std(opcode, regs); 623 break; 624 case 0x68: /* LD R,D(X,B) */ 625 get_user(*((__u16 *) (opcode+2)), location+1); 626 signal = math_emu_ld(opcode, regs); 627 break; 628 case 0x70: /* STE R,D(X,B) */ 629 get_user(*((__u16 *) (opcode+2)), location+1); 630 signal = math_emu_ste(opcode, regs); 631 break; 632 case 0x78: /* LE R,D(X,B) */ 633 get_user(*((__u16 *) (opcode+2)), location+1); 634 signal = math_emu_le(opcode, regs); 635 break; 636 case 0xb3: 637 get_user(*((__u16 *) (opcode+2)), location+1); 638 signal = math_emu_b3(opcode, regs); 639 break; 640 case 0xed: 641 get_user(*((__u32 *) (opcode+2)), 642 (__u32 __user *)(location+1)); 643 signal = math_emu_ed(opcode, regs); 644 break; 645 case 0xb2: 646 if (opcode[1] == 0x99) { 647 get_user(*((__u16 *) (opcode+2)), location+1); 648 signal = math_emu_srnm(opcode, regs); 649 } else if (opcode[1] == 0x9c) { 650 get_user(*((__u16 *) (opcode+2)), location+1); 651 signal = math_emu_stfpc(opcode, regs); 652 } else if (opcode[1] == 0x9d) { 653 get_user(*((__u16 *) (opcode+2)), location+1); 654 signal = math_emu_lfpc(opcode, regs); 655 } else 656 signal = SIGILL; 657 break; 658 default: 659 signal = SIGILL; 660 break; 661 } 662 } 663 #endif 664 if (current->thread.fp_regs.fpc & FPC_DXC_MASK) 665 signal = SIGFPE; 666 else 667 signal = SIGILL; 668 if (signal == SIGFPE) 669 do_fp_trap(regs, location, 670 current->thread.fp_regs.fpc, interruption_code); 671 else if (signal) { 672 siginfo_t info; 673 info.si_signo = signal; 674 info.si_errno = 0; 675 info.si_code = ILL_ILLOPN; 676 info.si_addr = location; 677 do_trap(interruption_code, signal, 678 "data exception", regs, &info); 679 } 680 } 681 682 static void space_switch_exception(struct pt_regs * regs, long int_code) 683 { 684 siginfo_t info; 685 686 /* Set user psw back to home space mode. */ 687 if (regs->psw.mask & PSW_MASK_PSTATE) 688 regs->psw.mask |= PSW_ASC_HOME; 689 /* Send SIGILL. */ 690 info.si_signo = SIGILL; 691 info.si_errno = 0; 692 info.si_code = ILL_PRVOPC; 693 info.si_addr = get_check_address(regs); 694 do_trap(int_code, SIGILL, "space switch event", regs, &info); 695 } 696 697 asmlinkage void kernel_stack_overflow(struct pt_regs * regs) 698 { 699 bust_spinlocks(1); 700 printk("Kernel stack overflow.\n"); 701 show_regs(regs); 702 bust_spinlocks(0); 703 panic("Corrupt kernel stack, can't continue."); 704 } 705 706 /* init is done in lowcore.S and head.S */ 707 708 void __init trap_init(void) 709 { 710 int i; 711 712 for (i = 0; i < 128; i++) 713 pgm_check_table[i] = &default_trap_handler; 714 pgm_check_table[1] = &illegal_op; 715 pgm_check_table[2] = &privileged_op; 716 pgm_check_table[3] = &execute_exception; 717 pgm_check_table[4] = &do_protection_exception; 718 pgm_check_table[5] = &addressing_exception; 719 pgm_check_table[6] = &specification_exception; 720 pgm_check_table[7] = &data_exception; 721 pgm_check_table[8] = &overflow_exception; 722 pgm_check_table[9] = ÷_exception; 723 pgm_check_table[0x0A] = &overflow_exception; 724 pgm_check_table[0x0B] = ÷_exception; 725 pgm_check_table[0x0C] = &hfp_overflow_exception; 726 pgm_check_table[0x0D] = &hfp_underflow_exception; 727 pgm_check_table[0x0E] = &hfp_significance_exception; 728 pgm_check_table[0x0F] = &hfp_divide_exception; 729 pgm_check_table[0x10] = &do_dat_exception; 730 pgm_check_table[0x11] = &do_dat_exception; 731 pgm_check_table[0x12] = &translation_exception; 732 pgm_check_table[0x13] = &special_op_exception; 733 #ifdef CONFIG_64BIT 734 pgm_check_table[0x38] = &do_asce_exception; 735 pgm_check_table[0x39] = &do_dat_exception; 736 pgm_check_table[0x3A] = &do_dat_exception; 737 pgm_check_table[0x3B] = &do_dat_exception; 738 #endif /* CONFIG_64BIT */ 739 pgm_check_table[0x15] = &operand_exception; 740 pgm_check_table[0x1C] = &space_switch_exception; 741 pgm_check_table[0x1D] = &hfp_sqrt_exception; 742 pgm_check_table[0x40] = &do_monitor_call; 743 pfault_irq_init(); 744 } 745