xref: /openbmc/linux/arch/s390/kernel/traps.c (revision 9c1f8594)
1 /*
2  *  arch/s390/kernel/traps.c
3  *
4  *  S390 version
5  *    Copyright (C) 1999,2000 IBM Deutschland Entwicklung GmbH, IBM Corporation
6  *    Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com),
7  *               Denis Joseph Barrow (djbarrow@de.ibm.com,barrow_dj@yahoo.com),
8  *
9  *  Derived from "arch/i386/kernel/traps.c"
10  *    Copyright (C) 1991, 1992 Linus Torvalds
11  */
12 
13 /*
14  * 'Traps.c' handles hardware traps and faults after we have saved some
15  * state in 'asm.s'.
16  */
17 #include <linux/sched.h>
18 #include <linux/kernel.h>
19 #include <linux/string.h>
20 #include <linux/errno.h>
21 #include <linux/ptrace.h>
22 #include <linux/timer.h>
23 #include <linux/mm.h>
24 #include <linux/smp.h>
25 #include <linux/init.h>
26 #include <linux/interrupt.h>
27 #include <linux/seq_file.h>
28 #include <linux/delay.h>
29 #include <linux/module.h>
30 #include <linux/kdebug.h>
31 #include <linux/kallsyms.h>
32 #include <linux/reboot.h>
33 #include <linux/kprobes.h>
34 #include <linux/bug.h>
35 #include <linux/utsname.h>
36 #include <asm/system.h>
37 #include <asm/uaccess.h>
38 #include <asm/io.h>
39 #include <linux/atomic.h>
40 #include <asm/mathemu.h>
41 #include <asm/cpcmd.h>
42 #include <asm/lowcore.h>
43 #include <asm/debug.h>
44 #include "entry.h"
45 
46 void (*pgm_check_table[128])(struct pt_regs *, long, unsigned long);
47 
48 int show_unhandled_signals;
49 
50 #define stack_pointer ({ void **sp; asm("la %0,0(15)" : "=&d" (sp)); sp; })
51 
52 #ifndef CONFIG_64BIT
53 #define LONG "%08lx "
54 #define FOURLONG "%08lx %08lx %08lx %08lx\n"
55 static int kstack_depth_to_print = 12;
56 #else /* CONFIG_64BIT */
57 #define LONG "%016lx "
58 #define FOURLONG "%016lx %016lx %016lx %016lx\n"
59 static int kstack_depth_to_print = 20;
60 #endif /* CONFIG_64BIT */
61 
62 /*
63  * For show_trace we have tree different stack to consider:
64  *   - the panic stack which is used if the kernel stack has overflown
65  *   - the asynchronous interrupt stack (cpu related)
66  *   - the synchronous kernel stack (process related)
67  * The stack trace can start at any of the three stack and can potentially
68  * touch all of them. The order is: panic stack, async stack, sync stack.
69  */
70 static unsigned long
71 __show_trace(unsigned long sp, unsigned long low, unsigned long high)
72 {
73 	struct stack_frame *sf;
74 	struct pt_regs *regs;
75 
76 	while (1) {
77 		sp = sp & PSW_ADDR_INSN;
78 		if (sp < low || sp > high - sizeof(*sf))
79 			return sp;
80 		sf = (struct stack_frame *) sp;
81 		printk("([<%016lx>] ", sf->gprs[8] & PSW_ADDR_INSN);
82 		print_symbol("%s)\n", sf->gprs[8] & PSW_ADDR_INSN);
83 		/* Follow the backchain. */
84 		while (1) {
85 			low = sp;
86 			sp = sf->back_chain & PSW_ADDR_INSN;
87 			if (!sp)
88 				break;
89 			if (sp <= low || sp > high - sizeof(*sf))
90 				return sp;
91 			sf = (struct stack_frame *) sp;
92 			printk(" [<%016lx>] ", sf->gprs[8] & PSW_ADDR_INSN);
93 			print_symbol("%s\n", sf->gprs[8] & PSW_ADDR_INSN);
94 		}
95 		/* Zero backchain detected, check for interrupt frame. */
96 		sp = (unsigned long) (sf + 1);
97 		if (sp <= low || sp > high - sizeof(*regs))
98 			return sp;
99 		regs = (struct pt_regs *) sp;
100 		printk(" [<%016lx>] ", regs->psw.addr & PSW_ADDR_INSN);
101 		print_symbol("%s\n", regs->psw.addr & PSW_ADDR_INSN);
102 		low = sp;
103 		sp = regs->gprs[15];
104 	}
105 }
106 
107 static void show_trace(struct task_struct *task, unsigned long *stack)
108 {
109 	register unsigned long __r15 asm ("15");
110 	unsigned long sp;
111 
112 	sp = (unsigned long) stack;
113 	if (!sp)
114 		sp = task ? task->thread.ksp : __r15;
115 	printk("Call Trace:\n");
116 #ifdef CONFIG_CHECK_STACK
117 	sp = __show_trace(sp, S390_lowcore.panic_stack - 4096,
118 			  S390_lowcore.panic_stack);
119 #endif
120 	sp = __show_trace(sp, S390_lowcore.async_stack - ASYNC_SIZE,
121 			  S390_lowcore.async_stack);
122 	if (task)
123 		__show_trace(sp, (unsigned long) task_stack_page(task),
124 			     (unsigned long) task_stack_page(task) + THREAD_SIZE);
125 	else
126 		__show_trace(sp, S390_lowcore.thread_info,
127 			     S390_lowcore.thread_info + THREAD_SIZE);
128 	if (!task)
129 		task = current;
130 	debug_show_held_locks(task);
131 }
132 
133 void show_stack(struct task_struct *task, unsigned long *sp)
134 {
135 	register unsigned long * __r15 asm ("15");
136 	unsigned long *stack;
137 	int i;
138 
139 	if (!sp)
140 		stack = task ? (unsigned long *) task->thread.ksp : __r15;
141 	else
142 		stack = sp;
143 
144 	for (i = 0; i < kstack_depth_to_print; i++) {
145 		if (((addr_t) stack & (THREAD_SIZE-1)) == 0)
146 			break;
147 		if (i && ((i * sizeof (long) % 32) == 0))
148 			printk("\n       ");
149 		printk(LONG, *stack++);
150 	}
151 	printk("\n");
152 	show_trace(task, sp);
153 }
154 
155 static void show_last_breaking_event(struct pt_regs *regs)
156 {
157 #ifdef CONFIG_64BIT
158 	printk("Last Breaking-Event-Address:\n");
159 	printk(" [<%016lx>] ", regs->args[0] & PSW_ADDR_INSN);
160 	print_symbol("%s\n", regs->args[0] & PSW_ADDR_INSN);
161 #endif
162 }
163 
164 /*
165  * The architecture-independent dump_stack generator
166  */
167 void dump_stack(void)
168 {
169 	printk("CPU: %d %s %s %.*s\n",
170 	       task_thread_info(current)->cpu, print_tainted(),
171 	       init_utsname()->release,
172 	       (int)strcspn(init_utsname()->version, " "),
173 	       init_utsname()->version);
174 	printk("Process %s (pid: %d, task: %p, ksp: %p)\n",
175 	       current->comm, current->pid, current,
176 	       (void *) current->thread.ksp);
177 	show_stack(NULL, NULL);
178 }
179 EXPORT_SYMBOL(dump_stack);
180 
181 static inline int mask_bits(struct pt_regs *regs, unsigned long bits)
182 {
183 	return (regs->psw.mask & bits) / ((~bits + 1) & bits);
184 }
185 
186 void show_registers(struct pt_regs *regs)
187 {
188 	char *mode;
189 
190 	mode = (regs->psw.mask & PSW_MASK_PSTATE) ? "User" : "Krnl";
191 	printk("%s PSW : %p %p",
192 	       mode, (void *) regs->psw.mask,
193 	       (void *) regs->psw.addr);
194 	print_symbol(" (%s)\n", regs->psw.addr & PSW_ADDR_INSN);
195 	printk("           R:%x T:%x IO:%x EX:%x Key:%x M:%x W:%x "
196 	       "P:%x AS:%x CC:%x PM:%x", mask_bits(regs, PSW_MASK_PER),
197 	       mask_bits(regs, PSW_MASK_DAT), mask_bits(regs, PSW_MASK_IO),
198 	       mask_bits(regs, PSW_MASK_EXT), mask_bits(regs, PSW_MASK_KEY),
199 	       mask_bits(regs, PSW_MASK_MCHECK), mask_bits(regs, PSW_MASK_WAIT),
200 	       mask_bits(regs, PSW_MASK_PSTATE), mask_bits(regs, PSW_MASK_ASC),
201 	       mask_bits(regs, PSW_MASK_CC), mask_bits(regs, PSW_MASK_PM));
202 #ifdef CONFIG_64BIT
203 	printk(" EA:%x", mask_bits(regs, PSW_BASE_BITS));
204 #endif
205 	printk("\n%s GPRS: " FOURLONG, mode,
206 	       regs->gprs[0], regs->gprs[1], regs->gprs[2], regs->gprs[3]);
207 	printk("           " FOURLONG,
208 	       regs->gprs[4], regs->gprs[5], regs->gprs[6], regs->gprs[7]);
209 	printk("           " FOURLONG,
210 	       regs->gprs[8], regs->gprs[9], regs->gprs[10], regs->gprs[11]);
211 	printk("           " FOURLONG,
212 	       regs->gprs[12], regs->gprs[13], regs->gprs[14], regs->gprs[15]);
213 
214 	show_code(regs);
215 }
216 
217 void show_regs(struct pt_regs *regs)
218 {
219 	print_modules();
220 	printk("CPU: %d %s %s %.*s\n",
221 	       task_thread_info(current)->cpu, print_tainted(),
222 	       init_utsname()->release,
223 	       (int)strcspn(init_utsname()->version, " "),
224 	       init_utsname()->version);
225 	printk("Process %s (pid: %d, task: %p, ksp: %p)\n",
226 	       current->comm, current->pid, current,
227 	       (void *) current->thread.ksp);
228 	show_registers(regs);
229 	/* Show stack backtrace if pt_regs is from kernel mode */
230 	if (!(regs->psw.mask & PSW_MASK_PSTATE))
231 		show_trace(NULL, (unsigned long *) regs->gprs[15]);
232 	show_last_breaking_event(regs);
233 }
234 
235 static DEFINE_SPINLOCK(die_lock);
236 
237 void die(const char * str, struct pt_regs * regs, long err)
238 {
239 	static int die_counter;
240 
241 	oops_enter();
242 	debug_stop_all();
243 	console_verbose();
244 	spin_lock_irq(&die_lock);
245 	bust_spinlocks(1);
246 	printk("%s: %04lx [#%d] ", str, err & 0xffff, ++die_counter);
247 #ifdef CONFIG_PREEMPT
248 	printk("PREEMPT ");
249 #endif
250 #ifdef CONFIG_SMP
251 	printk("SMP ");
252 #endif
253 #ifdef CONFIG_DEBUG_PAGEALLOC
254 	printk("DEBUG_PAGEALLOC");
255 #endif
256 	printk("\n");
257 	notify_die(DIE_OOPS, str, regs, err, current->thread.trap_no, SIGSEGV);
258 	show_regs(regs);
259 	bust_spinlocks(0);
260 	add_taint(TAINT_DIE);
261 	spin_unlock_irq(&die_lock);
262 	if (in_interrupt())
263 		panic("Fatal exception in interrupt");
264 	if (panic_on_oops)
265 		panic("Fatal exception: panic_on_oops");
266 	oops_exit();
267 	do_exit(SIGSEGV);
268 }
269 
270 static void inline report_user_fault(struct pt_regs *regs, long int_code,
271 				     int signr)
272 {
273 	if ((task_pid_nr(current) > 1) && !show_unhandled_signals)
274 		return;
275 	if (!unhandled_signal(current, signr))
276 		return;
277 	if (!printk_ratelimit())
278 		return;
279 	printk("User process fault: interruption code 0x%lX ", int_code);
280 	print_vma_addr("in ", regs->psw.addr & PSW_ADDR_INSN);
281 	printk("\n");
282 	show_regs(regs);
283 }
284 
285 int is_valid_bugaddr(unsigned long addr)
286 {
287 	return 1;
288 }
289 
290 static inline void __kprobes do_trap(long pgm_int_code, int signr, char *str,
291 				     struct pt_regs *regs, siginfo_t *info)
292 {
293 	if (notify_die(DIE_TRAP, str, regs, pgm_int_code,
294 		       pgm_int_code, signr) == NOTIFY_STOP)
295 		return;
296 
297         if (regs->psw.mask & PSW_MASK_PSTATE) {
298                 struct task_struct *tsk = current;
299 
300 		tsk->thread.trap_no = pgm_int_code & 0xffff;
301 		force_sig_info(signr, info, tsk);
302 		report_user_fault(regs, pgm_int_code, signr);
303         } else {
304                 const struct exception_table_entry *fixup;
305                 fixup = search_exception_tables(regs->psw.addr & PSW_ADDR_INSN);
306                 if (fixup)
307                         regs->psw.addr = fixup->fixup | PSW_ADDR_AMODE;
308 		else {
309 			enum bug_trap_type btt;
310 
311 			btt = report_bug(regs->psw.addr & PSW_ADDR_INSN, regs);
312 			if (btt == BUG_TRAP_TYPE_WARN)
313 				return;
314 			die(str, regs, pgm_int_code);
315 		}
316         }
317 }
318 
319 static inline void __user *get_psw_address(struct pt_regs *regs,
320 					   long pgm_int_code)
321 {
322 	return (void __user *)
323 		((regs->psw.addr - (pgm_int_code >> 16)) & PSW_ADDR_INSN);
324 }
325 
326 void __kprobes do_per_trap(struct pt_regs *regs)
327 {
328 	siginfo_t info;
329 
330 	if (notify_die(DIE_SSTEP, "sstep", regs, 0, 0, SIGTRAP) == NOTIFY_STOP)
331 		return;
332 	if (!current->ptrace)
333 		return;
334 	info.si_signo = SIGTRAP;
335 	info.si_errno = 0;
336 	info.si_code = TRAP_HWBKPT;
337 	info.si_addr = (void *) current->thread.per_event.address;
338 	force_sig_info(SIGTRAP, &info, current);
339 }
340 
341 static void default_trap_handler(struct pt_regs *regs, long pgm_int_code,
342 				 unsigned long trans_exc_code)
343 {
344         if (regs->psw.mask & PSW_MASK_PSTATE) {
345 		report_user_fault(regs, pgm_int_code, SIGSEGV);
346 		do_exit(SIGSEGV);
347 	} else
348 		die("Unknown program exception", regs, pgm_int_code);
349 }
350 
351 #define DO_ERROR_INFO(name, signr, sicode, str) \
352 static void name(struct pt_regs *regs, long pgm_int_code, \
353 		 unsigned long trans_exc_code) \
354 { \
355         siginfo_t info; \
356         info.si_signo = signr; \
357         info.si_errno = 0; \
358         info.si_code = sicode; \
359 	info.si_addr = get_psw_address(regs, pgm_int_code); \
360 	do_trap(pgm_int_code, signr, str, regs, &info);	    \
361 }
362 
363 DO_ERROR_INFO(addressing_exception, SIGILL, ILL_ILLADR,
364 	      "addressing exception")
365 DO_ERROR_INFO(execute_exception, SIGILL, ILL_ILLOPN,
366 	      "execute exception")
367 DO_ERROR_INFO(divide_exception, SIGFPE, FPE_INTDIV,
368 	      "fixpoint divide exception")
369 DO_ERROR_INFO(overflow_exception, SIGFPE, FPE_INTOVF,
370 	      "fixpoint overflow exception")
371 DO_ERROR_INFO(hfp_overflow_exception, SIGFPE, FPE_FLTOVF,
372 	      "HFP overflow exception")
373 DO_ERROR_INFO(hfp_underflow_exception, SIGFPE, FPE_FLTUND,
374 	      "HFP underflow exception")
375 DO_ERROR_INFO(hfp_significance_exception, SIGFPE, FPE_FLTRES,
376 	      "HFP significance exception")
377 DO_ERROR_INFO(hfp_divide_exception, SIGFPE, FPE_FLTDIV,
378 	      "HFP divide exception")
379 DO_ERROR_INFO(hfp_sqrt_exception, SIGFPE, FPE_FLTINV,
380 	      "HFP square root exception")
381 DO_ERROR_INFO(operand_exception, SIGILL, ILL_ILLOPN,
382 	      "operand exception")
383 DO_ERROR_INFO(privileged_op, SIGILL, ILL_PRVOPC,
384 	      "privileged operation")
385 DO_ERROR_INFO(special_op_exception, SIGILL, ILL_ILLOPN,
386 	      "special operation exception")
387 DO_ERROR_INFO(translation_exception, SIGILL, ILL_ILLOPN,
388 	      "translation exception")
389 
390 static inline void do_fp_trap(struct pt_regs *regs, void __user *location,
391 			      int fpc, long pgm_int_code)
392 {
393 	siginfo_t si;
394 
395 	si.si_signo = SIGFPE;
396 	si.si_errno = 0;
397 	si.si_addr = location;
398 	si.si_code = 0;
399 	/* FPC[2] is Data Exception Code */
400 	if ((fpc & 0x00000300) == 0) {
401 		/* bits 6 and 7 of DXC are 0 iff IEEE exception */
402 		if (fpc & 0x8000) /* invalid fp operation */
403 			si.si_code = FPE_FLTINV;
404 		else if (fpc & 0x4000) /* div by 0 */
405 			si.si_code = FPE_FLTDIV;
406 		else if (fpc & 0x2000) /* overflow */
407 			si.si_code = FPE_FLTOVF;
408 		else if (fpc & 0x1000) /* underflow */
409 			si.si_code = FPE_FLTUND;
410 		else if (fpc & 0x0800) /* inexact */
411 			si.si_code = FPE_FLTRES;
412 	}
413 	do_trap(pgm_int_code, SIGFPE,
414 		"floating point exception", regs, &si);
415 }
416 
417 static void __kprobes illegal_op(struct pt_regs *regs, long pgm_int_code,
418 				 unsigned long trans_exc_code)
419 {
420 	siginfo_t info;
421         __u8 opcode[6];
422 	__u16 __user *location;
423 	int signal = 0;
424 
425 	location = get_psw_address(regs, pgm_int_code);
426 
427 	if (regs->psw.mask & PSW_MASK_PSTATE) {
428 		if (get_user(*((__u16 *) opcode), (__u16 __user *) location))
429 			return;
430 		if (*((__u16 *) opcode) == S390_BREAKPOINT_U16) {
431 			if (current->ptrace) {
432 				info.si_signo = SIGTRAP;
433 				info.si_errno = 0;
434 				info.si_code = TRAP_BRKPT;
435 				info.si_addr = location;
436 				force_sig_info(SIGTRAP, &info, current);
437 			} else
438 				signal = SIGILL;
439 #ifdef CONFIG_MATHEMU
440 		} else if (opcode[0] == 0xb3) {
441 			if (get_user(*((__u16 *) (opcode+2)), location+1))
442 				return;
443 			signal = math_emu_b3(opcode, regs);
444                 } else if (opcode[0] == 0xed) {
445 			if (get_user(*((__u32 *) (opcode+2)),
446 				     (__u32 __user *)(location+1)))
447 				return;
448 			signal = math_emu_ed(opcode, regs);
449 		} else if (*((__u16 *) opcode) == 0xb299) {
450 			if (get_user(*((__u16 *) (opcode+2)), location+1))
451 				return;
452 			signal = math_emu_srnm(opcode, regs);
453 		} else if (*((__u16 *) opcode) == 0xb29c) {
454 			if (get_user(*((__u16 *) (opcode+2)), location+1))
455 				return;
456 			signal = math_emu_stfpc(opcode, regs);
457 		} else if (*((__u16 *) opcode) == 0xb29d) {
458 			if (get_user(*((__u16 *) (opcode+2)), location+1))
459 				return;
460 			signal = math_emu_lfpc(opcode, regs);
461 #endif
462 		} else
463 			signal = SIGILL;
464 	} else {
465 		/*
466 		 * If we get an illegal op in kernel mode, send it through the
467 		 * kprobes notifier. If kprobes doesn't pick it up, SIGILL
468 		 */
469 		if (notify_die(DIE_BPT, "bpt", regs, pgm_int_code,
470 			       3, SIGTRAP) != NOTIFY_STOP)
471 			signal = SIGILL;
472 	}
473 
474 #ifdef CONFIG_MATHEMU
475         if (signal == SIGFPE)
476 		do_fp_trap(regs, location,
477 			   current->thread.fp_regs.fpc, pgm_int_code);
478         else if (signal == SIGSEGV) {
479 		info.si_signo = signal;
480 		info.si_errno = 0;
481 		info.si_code = SEGV_MAPERR;
482 		info.si_addr = (void __user *) location;
483 		do_trap(pgm_int_code, signal,
484 			"user address fault", regs, &info);
485 	} else
486 #endif
487         if (signal) {
488 		info.si_signo = signal;
489 		info.si_errno = 0;
490 		info.si_code = ILL_ILLOPC;
491 		info.si_addr = (void __user *) location;
492 		do_trap(pgm_int_code, signal,
493 			"illegal operation", regs, &info);
494 	}
495 }
496 
497 
498 #ifdef CONFIG_MATHEMU
499 void specification_exception(struct pt_regs *regs, long pgm_int_code,
500 			     unsigned long trans_exc_code)
501 {
502         __u8 opcode[6];
503 	__u16 __user *location = NULL;
504 	int signal = 0;
505 
506 	location = (__u16 __user *) get_psw_address(regs, pgm_int_code);
507 
508         if (regs->psw.mask & PSW_MASK_PSTATE) {
509 		get_user(*((__u16 *) opcode), location);
510 		switch (opcode[0]) {
511 		case 0x28: /* LDR Rx,Ry   */
512 			signal = math_emu_ldr(opcode);
513 			break;
514 		case 0x38: /* LER Rx,Ry   */
515 			signal = math_emu_ler(opcode);
516 			break;
517 		case 0x60: /* STD R,D(X,B) */
518 			get_user(*((__u16 *) (opcode+2)), location+1);
519 			signal = math_emu_std(opcode, regs);
520 			break;
521 		case 0x68: /* LD R,D(X,B) */
522 			get_user(*((__u16 *) (opcode+2)), location+1);
523 			signal = math_emu_ld(opcode, regs);
524 			break;
525 		case 0x70: /* STE R,D(X,B) */
526 			get_user(*((__u16 *) (opcode+2)), location+1);
527 			signal = math_emu_ste(opcode, regs);
528 			break;
529 		case 0x78: /* LE R,D(X,B) */
530 			get_user(*((__u16 *) (opcode+2)), location+1);
531 			signal = math_emu_le(opcode, regs);
532 			break;
533 		default:
534 			signal = SIGILL;
535 			break;
536                 }
537         } else
538 		signal = SIGILL;
539 
540         if (signal == SIGFPE)
541 		do_fp_trap(regs, location,
542 			   current->thread.fp_regs.fpc, pgm_int_code);
543         else if (signal) {
544 		siginfo_t info;
545 		info.si_signo = signal;
546 		info.si_errno = 0;
547 		info.si_code = ILL_ILLOPN;
548 		info.si_addr = location;
549 		do_trap(pgm_int_code, signal,
550 			"specification exception", regs, &info);
551 	}
552 }
553 #else
554 DO_ERROR_INFO(specification_exception, SIGILL, ILL_ILLOPN,
555 	      "specification exception");
556 #endif
557 
558 static void data_exception(struct pt_regs *regs, long pgm_int_code,
559 			   unsigned long trans_exc_code)
560 {
561 	__u16 __user *location;
562 	int signal = 0;
563 
564 	location = get_psw_address(regs, pgm_int_code);
565 
566 	if (MACHINE_HAS_IEEE)
567 		asm volatile("stfpc %0" : "=m" (current->thread.fp_regs.fpc));
568 
569 #ifdef CONFIG_MATHEMU
570         else if (regs->psw.mask & PSW_MASK_PSTATE) {
571         	__u8 opcode[6];
572 		get_user(*((__u16 *) opcode), location);
573 		switch (opcode[0]) {
574 		case 0x28: /* LDR Rx,Ry   */
575 			signal = math_emu_ldr(opcode);
576 			break;
577 		case 0x38: /* LER Rx,Ry   */
578 			signal = math_emu_ler(opcode);
579 			break;
580 		case 0x60: /* STD R,D(X,B) */
581 			get_user(*((__u16 *) (opcode+2)), location+1);
582 			signal = math_emu_std(opcode, regs);
583 			break;
584 		case 0x68: /* LD R,D(X,B) */
585 			get_user(*((__u16 *) (opcode+2)), location+1);
586 			signal = math_emu_ld(opcode, regs);
587 			break;
588 		case 0x70: /* STE R,D(X,B) */
589 			get_user(*((__u16 *) (opcode+2)), location+1);
590 			signal = math_emu_ste(opcode, regs);
591 			break;
592 		case 0x78: /* LE R,D(X,B) */
593 			get_user(*((__u16 *) (opcode+2)), location+1);
594 			signal = math_emu_le(opcode, regs);
595 			break;
596 		case 0xb3:
597 			get_user(*((__u16 *) (opcode+2)), location+1);
598 			signal = math_emu_b3(opcode, regs);
599 			break;
600                 case 0xed:
601 			get_user(*((__u32 *) (opcode+2)),
602 				 (__u32 __user *)(location+1));
603 			signal = math_emu_ed(opcode, regs);
604 			break;
605 	        case 0xb2:
606 			if (opcode[1] == 0x99) {
607 				get_user(*((__u16 *) (opcode+2)), location+1);
608 				signal = math_emu_srnm(opcode, regs);
609 			} else if (opcode[1] == 0x9c) {
610 				get_user(*((__u16 *) (opcode+2)), location+1);
611 				signal = math_emu_stfpc(opcode, regs);
612 			} else if (opcode[1] == 0x9d) {
613 				get_user(*((__u16 *) (opcode+2)), location+1);
614 				signal = math_emu_lfpc(opcode, regs);
615 			} else
616 				signal = SIGILL;
617 			break;
618 		default:
619 			signal = SIGILL;
620 			break;
621                 }
622         }
623 #endif
624 	if (current->thread.fp_regs.fpc & FPC_DXC_MASK)
625 		signal = SIGFPE;
626 	else
627 		signal = SIGILL;
628         if (signal == SIGFPE)
629 		do_fp_trap(regs, location,
630 			   current->thread.fp_regs.fpc, pgm_int_code);
631         else if (signal) {
632 		siginfo_t info;
633 		info.si_signo = signal;
634 		info.si_errno = 0;
635 		info.si_code = ILL_ILLOPN;
636 		info.si_addr = location;
637 		do_trap(pgm_int_code, signal, "data exception", regs, &info);
638 	}
639 }
640 
641 static void space_switch_exception(struct pt_regs *regs, long pgm_int_code,
642 				   unsigned long trans_exc_code)
643 {
644         siginfo_t info;
645 
646 	/* Set user psw back to home space mode. */
647 	if (regs->psw.mask & PSW_MASK_PSTATE)
648 		regs->psw.mask |= PSW_ASC_HOME;
649 	/* Send SIGILL. */
650         info.si_signo = SIGILL;
651         info.si_errno = 0;
652         info.si_code = ILL_PRVOPC;
653 	info.si_addr = get_psw_address(regs, pgm_int_code);
654 	do_trap(pgm_int_code, SIGILL, "space switch event", regs, &info);
655 }
656 
657 void __kprobes kernel_stack_overflow(struct pt_regs * regs)
658 {
659 	bust_spinlocks(1);
660 	printk("Kernel stack overflow.\n");
661 	show_regs(regs);
662 	bust_spinlocks(0);
663 	panic("Corrupt kernel stack, can't continue.");
664 }
665 
666 /* init is done in lowcore.S and head.S */
667 
668 void __init trap_init(void)
669 {
670         int i;
671 
672         for (i = 0; i < 128; i++)
673           pgm_check_table[i] = &default_trap_handler;
674         pgm_check_table[1] = &illegal_op;
675         pgm_check_table[2] = &privileged_op;
676         pgm_check_table[3] = &execute_exception;
677         pgm_check_table[4] = &do_protection_exception;
678         pgm_check_table[5] = &addressing_exception;
679         pgm_check_table[6] = &specification_exception;
680         pgm_check_table[7] = &data_exception;
681         pgm_check_table[8] = &overflow_exception;
682         pgm_check_table[9] = &divide_exception;
683         pgm_check_table[0x0A] = &overflow_exception;
684         pgm_check_table[0x0B] = &divide_exception;
685         pgm_check_table[0x0C] = &hfp_overflow_exception;
686         pgm_check_table[0x0D] = &hfp_underflow_exception;
687         pgm_check_table[0x0E] = &hfp_significance_exception;
688         pgm_check_table[0x0F] = &hfp_divide_exception;
689         pgm_check_table[0x10] = &do_dat_exception;
690         pgm_check_table[0x11] = &do_dat_exception;
691         pgm_check_table[0x12] = &translation_exception;
692         pgm_check_table[0x13] = &special_op_exception;
693 #ifdef CONFIG_64BIT
694 	pgm_check_table[0x38] = &do_asce_exception;
695 	pgm_check_table[0x39] = &do_dat_exception;
696 	pgm_check_table[0x3A] = &do_dat_exception;
697         pgm_check_table[0x3B] = &do_dat_exception;
698 #endif /* CONFIG_64BIT */
699         pgm_check_table[0x15] = &operand_exception;
700         pgm_check_table[0x1C] = &space_switch_exception;
701         pgm_check_table[0x1D] = &hfp_sqrt_exception;
702 	/* Enable machine checks early. */
703 	local_mcck_enable();
704 }
705