1 /* 2 * S390 version 3 * Copyright IBM Corp. 1999, 2000 4 * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com), 5 * Denis Joseph Barrow (djbarrow@de.ibm.com,barrow_dj@yahoo.com), 6 * 7 * Derived from "arch/i386/kernel/traps.c" 8 * Copyright (C) 1991, 1992 Linus Torvalds 9 */ 10 11 /* 12 * 'Traps.c' handles hardware traps and faults after we have saved some 13 * state in 'asm.s'. 14 */ 15 #include <linux/sched.h> 16 #include <linux/kernel.h> 17 #include <linux/string.h> 18 #include <linux/errno.h> 19 #include <linux/ptrace.h> 20 #include <linux/timer.h> 21 #include <linux/mm.h> 22 #include <linux/smp.h> 23 #include <linux/init.h> 24 #include <linux/interrupt.h> 25 #include <linux/seq_file.h> 26 #include <linux/delay.h> 27 #include <linux/module.h> 28 #include <linux/kdebug.h> 29 #include <linux/kallsyms.h> 30 #include <linux/reboot.h> 31 #include <linux/kprobes.h> 32 #include <linux/bug.h> 33 #include <linux/utsname.h> 34 #include <asm/uaccess.h> 35 #include <asm/io.h> 36 #include <linux/atomic.h> 37 #include <asm/mathemu.h> 38 #include <asm/cpcmd.h> 39 #include <asm/lowcore.h> 40 #include <asm/debug.h> 41 #include <asm/ipl.h> 42 #include "entry.h" 43 44 void (*pgm_check_table[128])(struct pt_regs *regs); 45 46 int show_unhandled_signals = 1; 47 48 #define stack_pointer ({ void **sp; asm("la %0,0(15)" : "=&d" (sp)); sp; }) 49 50 #ifndef CONFIG_64BIT 51 #define LONG "%08lx " 52 #define FOURLONG "%08lx %08lx %08lx %08lx\n" 53 static int kstack_depth_to_print = 12; 54 #else /* CONFIG_64BIT */ 55 #define LONG "%016lx " 56 #define FOURLONG "%016lx %016lx %016lx %016lx\n" 57 static int kstack_depth_to_print = 20; 58 #endif /* CONFIG_64BIT */ 59 60 static inline void __user *get_trap_ip(struct pt_regs *regs) 61 { 62 #ifdef CONFIG_64BIT 63 unsigned long address; 64 65 if (regs->int_code & 0x200) 66 address = *(unsigned long *)(current->thread.trap_tdb + 24); 67 else 68 address = regs->psw.addr; 69 return (void __user *) 70 ((address - (regs->int_code >> 16)) & PSW_ADDR_INSN); 71 #else 72 return (void __user *) 73 ((regs->psw.addr - (regs->int_code >> 16)) & PSW_ADDR_INSN); 74 #endif 75 } 76 77 /* 78 * For show_trace we have tree different stack to consider: 79 * - the panic stack which is used if the kernel stack has overflown 80 * - the asynchronous interrupt stack (cpu related) 81 * - the synchronous kernel stack (process related) 82 * The stack trace can start at any of the three stack and can potentially 83 * touch all of them. The order is: panic stack, async stack, sync stack. 84 */ 85 static unsigned long 86 __show_trace(unsigned long sp, unsigned long low, unsigned long high) 87 { 88 struct stack_frame *sf; 89 struct pt_regs *regs; 90 91 while (1) { 92 sp = sp & PSW_ADDR_INSN; 93 if (sp < low || sp > high - sizeof(*sf)) 94 return sp; 95 sf = (struct stack_frame *) sp; 96 printk("([<%016lx>] ", sf->gprs[8] & PSW_ADDR_INSN); 97 print_symbol("%s)\n", sf->gprs[8] & PSW_ADDR_INSN); 98 /* Follow the backchain. */ 99 while (1) { 100 low = sp; 101 sp = sf->back_chain & PSW_ADDR_INSN; 102 if (!sp) 103 break; 104 if (sp <= low || sp > high - sizeof(*sf)) 105 return sp; 106 sf = (struct stack_frame *) sp; 107 printk(" [<%016lx>] ", sf->gprs[8] & PSW_ADDR_INSN); 108 print_symbol("%s\n", sf->gprs[8] & PSW_ADDR_INSN); 109 } 110 /* Zero backchain detected, check for interrupt frame. */ 111 sp = (unsigned long) (sf + 1); 112 if (sp <= low || sp > high - sizeof(*regs)) 113 return sp; 114 regs = (struct pt_regs *) sp; 115 printk(" [<%016lx>] ", regs->psw.addr & PSW_ADDR_INSN); 116 print_symbol("%s\n", regs->psw.addr & PSW_ADDR_INSN); 117 low = sp; 118 sp = regs->gprs[15]; 119 } 120 } 121 122 static void show_trace(struct task_struct *task, unsigned long *stack) 123 { 124 register unsigned long __r15 asm ("15"); 125 unsigned long sp; 126 127 sp = (unsigned long) stack; 128 if (!sp) 129 sp = task ? task->thread.ksp : __r15; 130 printk("Call Trace:\n"); 131 #ifdef CONFIG_CHECK_STACK 132 sp = __show_trace(sp, S390_lowcore.panic_stack - 4096, 133 S390_lowcore.panic_stack); 134 #endif 135 sp = __show_trace(sp, S390_lowcore.async_stack - ASYNC_SIZE, 136 S390_lowcore.async_stack); 137 if (task) 138 __show_trace(sp, (unsigned long) task_stack_page(task), 139 (unsigned long) task_stack_page(task) + THREAD_SIZE); 140 else 141 __show_trace(sp, S390_lowcore.thread_info, 142 S390_lowcore.thread_info + THREAD_SIZE); 143 if (!task) 144 task = current; 145 debug_show_held_locks(task); 146 } 147 148 void show_stack(struct task_struct *task, unsigned long *sp) 149 { 150 register unsigned long * __r15 asm ("15"); 151 unsigned long *stack; 152 int i; 153 154 if (!sp) 155 stack = task ? (unsigned long *) task->thread.ksp : __r15; 156 else 157 stack = sp; 158 159 for (i = 0; i < kstack_depth_to_print; i++) { 160 if (((addr_t) stack & (THREAD_SIZE-1)) == 0) 161 break; 162 if ((i * sizeof(long) % 32) == 0) 163 printk("%s ", i == 0 ? "" : "\n"); 164 printk(LONG, *stack++); 165 } 166 printk("\n"); 167 show_trace(task, sp); 168 } 169 170 static void show_last_breaking_event(struct pt_regs *regs) 171 { 172 #ifdef CONFIG_64BIT 173 printk("Last Breaking-Event-Address:\n"); 174 printk(" [<%016lx>] ", regs->args[0] & PSW_ADDR_INSN); 175 print_symbol("%s\n", regs->args[0] & PSW_ADDR_INSN); 176 #endif 177 } 178 179 /* 180 * The architecture-independent dump_stack generator 181 */ 182 void dump_stack(void) 183 { 184 printk("CPU: %d %s %s %.*s\n", 185 task_thread_info(current)->cpu, print_tainted(), 186 init_utsname()->release, 187 (int)strcspn(init_utsname()->version, " "), 188 init_utsname()->version); 189 printk("Process %s (pid: %d, task: %p, ksp: %p)\n", 190 current->comm, current->pid, current, 191 (void *) current->thread.ksp); 192 show_stack(NULL, NULL); 193 } 194 EXPORT_SYMBOL(dump_stack); 195 196 static inline int mask_bits(struct pt_regs *regs, unsigned long bits) 197 { 198 return (regs->psw.mask & bits) / ((~bits + 1) & bits); 199 } 200 201 void show_registers(struct pt_regs *regs) 202 { 203 char *mode; 204 205 mode = user_mode(regs) ? "User" : "Krnl"; 206 printk("%s PSW : %p %p", 207 mode, (void *) regs->psw.mask, 208 (void *) regs->psw.addr); 209 print_symbol(" (%s)\n", regs->psw.addr & PSW_ADDR_INSN); 210 printk(" R:%x T:%x IO:%x EX:%x Key:%x M:%x W:%x " 211 "P:%x AS:%x CC:%x PM:%x", mask_bits(regs, PSW_MASK_PER), 212 mask_bits(regs, PSW_MASK_DAT), mask_bits(regs, PSW_MASK_IO), 213 mask_bits(regs, PSW_MASK_EXT), mask_bits(regs, PSW_MASK_KEY), 214 mask_bits(regs, PSW_MASK_MCHECK), mask_bits(regs, PSW_MASK_WAIT), 215 mask_bits(regs, PSW_MASK_PSTATE), mask_bits(regs, PSW_MASK_ASC), 216 mask_bits(regs, PSW_MASK_CC), mask_bits(regs, PSW_MASK_PM)); 217 #ifdef CONFIG_64BIT 218 printk(" EA:%x", mask_bits(regs, PSW_MASK_EA | PSW_MASK_BA)); 219 #endif 220 printk("\n%s GPRS: " FOURLONG, mode, 221 regs->gprs[0], regs->gprs[1], regs->gprs[2], regs->gprs[3]); 222 printk(" " FOURLONG, 223 regs->gprs[4], regs->gprs[5], regs->gprs[6], regs->gprs[7]); 224 printk(" " FOURLONG, 225 regs->gprs[8], regs->gprs[9], regs->gprs[10], regs->gprs[11]); 226 printk(" " FOURLONG, 227 regs->gprs[12], regs->gprs[13], regs->gprs[14], regs->gprs[15]); 228 229 show_code(regs); 230 } 231 232 void show_regs(struct pt_regs *regs) 233 { 234 printk("CPU: %d %s %s %.*s\n", 235 task_thread_info(current)->cpu, print_tainted(), 236 init_utsname()->release, 237 (int)strcspn(init_utsname()->version, " "), 238 init_utsname()->version); 239 printk("Process %s (pid: %d, task: %p, ksp: %p)\n", 240 current->comm, current->pid, current, 241 (void *) current->thread.ksp); 242 show_registers(regs); 243 /* Show stack backtrace if pt_regs is from kernel mode */ 244 if (!user_mode(regs)) 245 show_trace(NULL, (unsigned long *) regs->gprs[15]); 246 show_last_breaking_event(regs); 247 } 248 249 static DEFINE_SPINLOCK(die_lock); 250 251 void die(struct pt_regs *regs, const char *str) 252 { 253 static int die_counter; 254 255 oops_enter(); 256 lgr_info_log(); 257 debug_stop_all(); 258 console_verbose(); 259 spin_lock_irq(&die_lock); 260 bust_spinlocks(1); 261 printk("%s: %04x [#%d] ", str, regs->int_code & 0xffff, ++die_counter); 262 #ifdef CONFIG_PREEMPT 263 printk("PREEMPT "); 264 #endif 265 #ifdef CONFIG_SMP 266 printk("SMP "); 267 #endif 268 #ifdef CONFIG_DEBUG_PAGEALLOC 269 printk("DEBUG_PAGEALLOC"); 270 #endif 271 printk("\n"); 272 notify_die(DIE_OOPS, str, regs, 0, regs->int_code & 0xffff, SIGSEGV); 273 print_modules(); 274 show_regs(regs); 275 bust_spinlocks(0); 276 add_taint(TAINT_DIE); 277 spin_unlock_irq(&die_lock); 278 if (in_interrupt()) 279 panic("Fatal exception in interrupt"); 280 if (panic_on_oops) 281 panic("Fatal exception: panic_on_oops"); 282 oops_exit(); 283 do_exit(SIGSEGV); 284 } 285 286 static inline void report_user_fault(struct pt_regs *regs, int signr) 287 { 288 if ((task_pid_nr(current) > 1) && !show_unhandled_signals) 289 return; 290 if (!unhandled_signal(current, signr)) 291 return; 292 if (!printk_ratelimit()) 293 return; 294 printk("User process fault: interruption code 0x%X ", regs->int_code); 295 print_vma_addr("in ", regs->psw.addr & PSW_ADDR_INSN); 296 printk("\n"); 297 show_regs(regs); 298 } 299 300 int is_valid_bugaddr(unsigned long addr) 301 { 302 return 1; 303 } 304 305 static void __kprobes do_trap(struct pt_regs *regs, 306 int si_signo, int si_code, char *str) 307 { 308 siginfo_t info; 309 310 if (notify_die(DIE_TRAP, str, regs, 0, 311 regs->int_code, si_signo) == NOTIFY_STOP) 312 return; 313 314 if (user_mode(regs)) { 315 info.si_signo = si_signo; 316 info.si_errno = 0; 317 info.si_code = si_code; 318 info.si_addr = get_trap_ip(regs); 319 force_sig_info(si_signo, &info, current); 320 report_user_fault(regs, si_signo); 321 } else { 322 const struct exception_table_entry *fixup; 323 fixup = search_exception_tables(regs->psw.addr & PSW_ADDR_INSN); 324 if (fixup) 325 regs->psw.addr = extable_fixup(fixup) | PSW_ADDR_AMODE; 326 else { 327 enum bug_trap_type btt; 328 329 btt = report_bug(regs->psw.addr & PSW_ADDR_INSN, regs); 330 if (btt == BUG_TRAP_TYPE_WARN) 331 return; 332 die(regs, str); 333 } 334 } 335 } 336 337 void __kprobes do_per_trap(struct pt_regs *regs) 338 { 339 siginfo_t info; 340 341 if (notify_die(DIE_SSTEP, "sstep", regs, 0, 0, SIGTRAP) == NOTIFY_STOP) 342 return; 343 if (!current->ptrace) 344 return; 345 info.si_signo = SIGTRAP; 346 info.si_errno = 0; 347 info.si_code = TRAP_HWBKPT; 348 info.si_addr = 349 (void __force __user *) current->thread.per_event.address; 350 force_sig_info(SIGTRAP, &info, current); 351 } 352 353 static void default_trap_handler(struct pt_regs *regs) 354 { 355 if (user_mode(regs)) { 356 report_user_fault(regs, SIGSEGV); 357 do_exit(SIGSEGV); 358 } else 359 die(regs, "Unknown program exception"); 360 } 361 362 #define DO_ERROR_INFO(name, signr, sicode, str) \ 363 static void name(struct pt_regs *regs) \ 364 { \ 365 do_trap(regs, signr, sicode, str); \ 366 } 367 368 DO_ERROR_INFO(addressing_exception, SIGILL, ILL_ILLADR, 369 "addressing exception") 370 DO_ERROR_INFO(execute_exception, SIGILL, ILL_ILLOPN, 371 "execute exception") 372 DO_ERROR_INFO(divide_exception, SIGFPE, FPE_INTDIV, 373 "fixpoint divide exception") 374 DO_ERROR_INFO(overflow_exception, SIGFPE, FPE_INTOVF, 375 "fixpoint overflow exception") 376 DO_ERROR_INFO(hfp_overflow_exception, SIGFPE, FPE_FLTOVF, 377 "HFP overflow exception") 378 DO_ERROR_INFO(hfp_underflow_exception, SIGFPE, FPE_FLTUND, 379 "HFP underflow exception") 380 DO_ERROR_INFO(hfp_significance_exception, SIGFPE, FPE_FLTRES, 381 "HFP significance exception") 382 DO_ERROR_INFO(hfp_divide_exception, SIGFPE, FPE_FLTDIV, 383 "HFP divide exception") 384 DO_ERROR_INFO(hfp_sqrt_exception, SIGFPE, FPE_FLTINV, 385 "HFP square root exception") 386 DO_ERROR_INFO(operand_exception, SIGILL, ILL_ILLOPN, 387 "operand exception") 388 DO_ERROR_INFO(privileged_op, SIGILL, ILL_PRVOPC, 389 "privileged operation") 390 DO_ERROR_INFO(special_op_exception, SIGILL, ILL_ILLOPN, 391 "special operation exception") 392 DO_ERROR_INFO(translation_exception, SIGILL, ILL_ILLOPN, 393 "translation exception") 394 395 #ifdef CONFIG_64BIT 396 DO_ERROR_INFO(transaction_exception, SIGILL, ILL_ILLOPN, 397 "transaction constraint exception") 398 #endif 399 400 static inline void do_fp_trap(struct pt_regs *regs, int fpc) 401 { 402 int si_code = 0; 403 /* FPC[2] is Data Exception Code */ 404 if ((fpc & 0x00000300) == 0) { 405 /* bits 6 and 7 of DXC are 0 iff IEEE exception */ 406 if (fpc & 0x8000) /* invalid fp operation */ 407 si_code = FPE_FLTINV; 408 else if (fpc & 0x4000) /* div by 0 */ 409 si_code = FPE_FLTDIV; 410 else if (fpc & 0x2000) /* overflow */ 411 si_code = FPE_FLTOVF; 412 else if (fpc & 0x1000) /* underflow */ 413 si_code = FPE_FLTUND; 414 else if (fpc & 0x0800) /* inexact */ 415 si_code = FPE_FLTRES; 416 } 417 do_trap(regs, SIGFPE, si_code, "floating point exception"); 418 } 419 420 static void __kprobes illegal_op(struct pt_regs *regs) 421 { 422 siginfo_t info; 423 __u8 opcode[6]; 424 __u16 __user *location; 425 int signal = 0; 426 427 location = get_trap_ip(regs); 428 429 if (user_mode(regs)) { 430 if (get_user(*((__u16 *) opcode), (__u16 __user *) location)) 431 return; 432 if (*((__u16 *) opcode) == S390_BREAKPOINT_U16) { 433 if (current->ptrace) { 434 info.si_signo = SIGTRAP; 435 info.si_errno = 0; 436 info.si_code = TRAP_BRKPT; 437 info.si_addr = location; 438 force_sig_info(SIGTRAP, &info, current); 439 } else 440 signal = SIGILL; 441 #ifdef CONFIG_MATHEMU 442 } else if (opcode[0] == 0xb3) { 443 if (get_user(*((__u16 *) (opcode+2)), location+1)) 444 return; 445 signal = math_emu_b3(opcode, regs); 446 } else if (opcode[0] == 0xed) { 447 if (get_user(*((__u32 *) (opcode+2)), 448 (__u32 __user *)(location+1))) 449 return; 450 signal = math_emu_ed(opcode, regs); 451 } else if (*((__u16 *) opcode) == 0xb299) { 452 if (get_user(*((__u16 *) (opcode+2)), location+1)) 453 return; 454 signal = math_emu_srnm(opcode, regs); 455 } else if (*((__u16 *) opcode) == 0xb29c) { 456 if (get_user(*((__u16 *) (opcode+2)), location+1)) 457 return; 458 signal = math_emu_stfpc(opcode, regs); 459 } else if (*((__u16 *) opcode) == 0xb29d) { 460 if (get_user(*((__u16 *) (opcode+2)), location+1)) 461 return; 462 signal = math_emu_lfpc(opcode, regs); 463 #endif 464 } else 465 signal = SIGILL; 466 } else { 467 /* 468 * If we get an illegal op in kernel mode, send it through the 469 * kprobes notifier. If kprobes doesn't pick it up, SIGILL 470 */ 471 if (notify_die(DIE_BPT, "bpt", regs, 0, 472 3, SIGTRAP) != NOTIFY_STOP) 473 signal = SIGILL; 474 } 475 476 #ifdef CONFIG_MATHEMU 477 if (signal == SIGFPE) 478 do_fp_trap(regs, current->thread.fp_regs.fpc); 479 else if (signal == SIGSEGV) 480 do_trap(regs, signal, SEGV_MAPERR, "user address fault"); 481 else 482 #endif 483 if (signal) 484 do_trap(regs, signal, ILL_ILLOPC, "illegal operation"); 485 } 486 487 488 #ifdef CONFIG_MATHEMU 489 void specification_exception(struct pt_regs *regs) 490 { 491 __u8 opcode[6]; 492 __u16 __user *location = NULL; 493 int signal = 0; 494 495 location = (__u16 __user *) get_trap_ip(regs); 496 497 if (user_mode(regs)) { 498 get_user(*((__u16 *) opcode), location); 499 switch (opcode[0]) { 500 case 0x28: /* LDR Rx,Ry */ 501 signal = math_emu_ldr(opcode); 502 break; 503 case 0x38: /* LER Rx,Ry */ 504 signal = math_emu_ler(opcode); 505 break; 506 case 0x60: /* STD R,D(X,B) */ 507 get_user(*((__u16 *) (opcode+2)), location+1); 508 signal = math_emu_std(opcode, regs); 509 break; 510 case 0x68: /* LD R,D(X,B) */ 511 get_user(*((__u16 *) (opcode+2)), location+1); 512 signal = math_emu_ld(opcode, regs); 513 break; 514 case 0x70: /* STE R,D(X,B) */ 515 get_user(*((__u16 *) (opcode+2)), location+1); 516 signal = math_emu_ste(opcode, regs); 517 break; 518 case 0x78: /* LE R,D(X,B) */ 519 get_user(*((__u16 *) (opcode+2)), location+1); 520 signal = math_emu_le(opcode, regs); 521 break; 522 default: 523 signal = SIGILL; 524 break; 525 } 526 } else 527 signal = SIGILL; 528 529 if (signal == SIGFPE) 530 do_fp_trap(regs, current->thread.fp_regs.fpc); 531 else if (signal) 532 do_trap(regs, signal, ILL_ILLOPN, "specification exception"); 533 } 534 #else 535 DO_ERROR_INFO(specification_exception, SIGILL, ILL_ILLOPN, 536 "specification exception"); 537 #endif 538 539 static void data_exception(struct pt_regs *regs) 540 { 541 __u16 __user *location; 542 int signal = 0; 543 544 location = get_trap_ip(regs); 545 546 if (MACHINE_HAS_IEEE) 547 asm volatile("stfpc %0" : "=m" (current->thread.fp_regs.fpc)); 548 549 #ifdef CONFIG_MATHEMU 550 else if (user_mode(regs)) { 551 __u8 opcode[6]; 552 get_user(*((__u16 *) opcode), location); 553 switch (opcode[0]) { 554 case 0x28: /* LDR Rx,Ry */ 555 signal = math_emu_ldr(opcode); 556 break; 557 case 0x38: /* LER Rx,Ry */ 558 signal = math_emu_ler(opcode); 559 break; 560 case 0x60: /* STD R,D(X,B) */ 561 get_user(*((__u16 *) (opcode+2)), location+1); 562 signal = math_emu_std(opcode, regs); 563 break; 564 case 0x68: /* LD R,D(X,B) */ 565 get_user(*((__u16 *) (opcode+2)), location+1); 566 signal = math_emu_ld(opcode, regs); 567 break; 568 case 0x70: /* STE R,D(X,B) */ 569 get_user(*((__u16 *) (opcode+2)), location+1); 570 signal = math_emu_ste(opcode, regs); 571 break; 572 case 0x78: /* LE R,D(X,B) */ 573 get_user(*((__u16 *) (opcode+2)), location+1); 574 signal = math_emu_le(opcode, regs); 575 break; 576 case 0xb3: 577 get_user(*((__u16 *) (opcode+2)), location+1); 578 signal = math_emu_b3(opcode, regs); 579 break; 580 case 0xed: 581 get_user(*((__u32 *) (opcode+2)), 582 (__u32 __user *)(location+1)); 583 signal = math_emu_ed(opcode, regs); 584 break; 585 case 0xb2: 586 if (opcode[1] == 0x99) { 587 get_user(*((__u16 *) (opcode+2)), location+1); 588 signal = math_emu_srnm(opcode, regs); 589 } else if (opcode[1] == 0x9c) { 590 get_user(*((__u16 *) (opcode+2)), location+1); 591 signal = math_emu_stfpc(opcode, regs); 592 } else if (opcode[1] == 0x9d) { 593 get_user(*((__u16 *) (opcode+2)), location+1); 594 signal = math_emu_lfpc(opcode, regs); 595 } else 596 signal = SIGILL; 597 break; 598 default: 599 signal = SIGILL; 600 break; 601 } 602 } 603 #endif 604 if (current->thread.fp_regs.fpc & FPC_DXC_MASK) 605 signal = SIGFPE; 606 else 607 signal = SIGILL; 608 if (signal == SIGFPE) 609 do_fp_trap(regs, current->thread.fp_regs.fpc); 610 else if (signal) 611 do_trap(regs, signal, ILL_ILLOPN, "data exception"); 612 } 613 614 static void space_switch_exception(struct pt_regs *regs) 615 { 616 /* Set user psw back to home space mode. */ 617 if (user_mode(regs)) 618 regs->psw.mask |= PSW_ASC_HOME; 619 /* Send SIGILL. */ 620 do_trap(regs, SIGILL, ILL_PRVOPC, "space switch event"); 621 } 622 623 void __kprobes kernel_stack_overflow(struct pt_regs * regs) 624 { 625 bust_spinlocks(1); 626 printk("Kernel stack overflow.\n"); 627 show_regs(regs); 628 bust_spinlocks(0); 629 panic("Corrupt kernel stack, can't continue."); 630 } 631 632 /* init is done in lowcore.S and head.S */ 633 634 void __init trap_init(void) 635 { 636 int i; 637 638 for (i = 0; i < 128; i++) 639 pgm_check_table[i] = &default_trap_handler; 640 pgm_check_table[1] = &illegal_op; 641 pgm_check_table[2] = &privileged_op; 642 pgm_check_table[3] = &execute_exception; 643 pgm_check_table[4] = &do_protection_exception; 644 pgm_check_table[5] = &addressing_exception; 645 pgm_check_table[6] = &specification_exception; 646 pgm_check_table[7] = &data_exception; 647 pgm_check_table[8] = &overflow_exception; 648 pgm_check_table[9] = ÷_exception; 649 pgm_check_table[0x0A] = &overflow_exception; 650 pgm_check_table[0x0B] = ÷_exception; 651 pgm_check_table[0x0C] = &hfp_overflow_exception; 652 pgm_check_table[0x0D] = &hfp_underflow_exception; 653 pgm_check_table[0x0E] = &hfp_significance_exception; 654 pgm_check_table[0x0F] = &hfp_divide_exception; 655 pgm_check_table[0x10] = &do_dat_exception; 656 pgm_check_table[0x11] = &do_dat_exception; 657 pgm_check_table[0x12] = &translation_exception; 658 pgm_check_table[0x13] = &special_op_exception; 659 #ifdef CONFIG_64BIT 660 pgm_check_table[0x18] = &transaction_exception; 661 pgm_check_table[0x38] = &do_asce_exception; 662 pgm_check_table[0x39] = &do_dat_exception; 663 pgm_check_table[0x3A] = &do_dat_exception; 664 pgm_check_table[0x3B] = &do_dat_exception; 665 #endif /* CONFIG_64BIT */ 666 pgm_check_table[0x15] = &operand_exception; 667 pgm_check_table[0x1C] = &space_switch_exception; 668 pgm_check_table[0x1D] = &hfp_sqrt_exception; 669 /* Enable machine checks early. */ 670 local_mcck_enable(); 671 } 672