xref: /openbmc/linux/arch/s390/kernel/traps.c (revision 64c70b1c)
1 /*
2  *  arch/s390/kernel/traps.c
3  *
4  *  S390 version
5  *    Copyright (C) 1999,2000 IBM Deutschland Entwicklung GmbH, IBM Corporation
6  *    Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com),
7  *               Denis Joseph Barrow (djbarrow@de.ibm.com,barrow_dj@yahoo.com),
8  *
9  *  Derived from "arch/i386/kernel/traps.c"
10  *    Copyright (C) 1991, 1992 Linus Torvalds
11  */
12 
13 /*
14  * 'Traps.c' handles hardware traps and faults after we have saved some
15  * state in 'asm.s'.
16  */
17 #include <linux/sched.h>
18 #include <linux/kernel.h>
19 #include <linux/string.h>
20 #include <linux/errno.h>
21 #include <linux/ptrace.h>
22 #include <linux/timer.h>
23 #include <linux/mm.h>
24 #include <linux/smp.h>
25 #include <linux/init.h>
26 #include <linux/interrupt.h>
27 #include <linux/delay.h>
28 #include <linux/module.h>
29 #include <linux/kdebug.h>
30 #include <linux/kallsyms.h>
31 #include <linux/reboot.h>
32 #include <linux/kprobes.h>
33 #include <linux/bug.h>
34 #include <asm/system.h>
35 #include <asm/uaccess.h>
36 #include <asm/io.h>
37 #include <asm/atomic.h>
38 #include <asm/mathemu.h>
39 #include <asm/cpcmd.h>
40 #include <asm/s390_ext.h>
41 #include <asm/lowcore.h>
42 #include <asm/debug.h>
43 
44 /* Called from entry.S only */
45 extern void handle_per_exception(struct pt_regs *regs);
46 
47 typedef void pgm_check_handler_t(struct pt_regs *, long);
48 pgm_check_handler_t *pgm_check_table[128];
49 
50 #ifdef CONFIG_SYSCTL
51 #ifdef CONFIG_PROCESS_DEBUG
52 int sysctl_userprocess_debug = 1;
53 #else
54 int sysctl_userprocess_debug = 0;
55 #endif
56 #endif
57 
58 extern pgm_check_handler_t do_protection_exception;
59 extern pgm_check_handler_t do_dat_exception;
60 extern pgm_check_handler_t do_monitor_call;
61 
62 #define stack_pointer ({ void **sp; asm("la %0,0(15)" : "=&d" (sp)); sp; })
63 
64 #ifndef CONFIG_64BIT
65 #define FOURLONG "%08lx %08lx %08lx %08lx\n"
66 static int kstack_depth_to_print = 12;
67 #else /* CONFIG_64BIT */
68 #define FOURLONG "%016lx %016lx %016lx %016lx\n"
69 static int kstack_depth_to_print = 20;
70 #endif /* CONFIG_64BIT */
71 
72 /*
73  * For show_trace we have tree different stack to consider:
74  *   - the panic stack which is used if the kernel stack has overflown
75  *   - the asynchronous interrupt stack (cpu related)
76  *   - the synchronous kernel stack (process related)
77  * The stack trace can start at any of the three stack and can potentially
78  * touch all of them. The order is: panic stack, async stack, sync stack.
79  */
80 static unsigned long
81 __show_trace(unsigned long sp, unsigned long low, unsigned long high)
82 {
83 	struct stack_frame *sf;
84 	struct pt_regs *regs;
85 
86 	while (1) {
87 		sp = sp & PSW_ADDR_INSN;
88 		if (sp < low || sp > high - sizeof(*sf))
89 			return sp;
90 		sf = (struct stack_frame *) sp;
91 		printk("([<%016lx>] ", sf->gprs[8] & PSW_ADDR_INSN);
92 		print_symbol("%s)\n", sf->gprs[8] & PSW_ADDR_INSN);
93 		/* Follow the backchain. */
94 		while (1) {
95 			low = sp;
96 			sp = sf->back_chain & PSW_ADDR_INSN;
97 			if (!sp)
98 				break;
99 			if (sp <= low || sp > high - sizeof(*sf))
100 				return sp;
101 			sf = (struct stack_frame *) sp;
102 			printk(" [<%016lx>] ", sf->gprs[8] & PSW_ADDR_INSN);
103 			print_symbol("%s\n", sf->gprs[8] & PSW_ADDR_INSN);
104 		}
105 		/* Zero backchain detected, check for interrupt frame. */
106 		sp = (unsigned long) (sf + 1);
107 		if (sp <= low || sp > high - sizeof(*regs))
108 			return sp;
109 		regs = (struct pt_regs *) sp;
110 		printk(" [<%016lx>] ", regs->psw.addr & PSW_ADDR_INSN);
111 		print_symbol("%s\n", regs->psw.addr & PSW_ADDR_INSN);
112 		low = sp;
113 		sp = regs->gprs[15];
114 	}
115 }
116 
117 void show_trace(struct task_struct *task, unsigned long *stack)
118 {
119 	register unsigned long __r15 asm ("15");
120 	unsigned long sp;
121 
122 	sp = (unsigned long) stack;
123 	if (!sp)
124 		sp = task ? task->thread.ksp : __r15;
125 	printk("Call Trace:\n");
126 #ifdef CONFIG_CHECK_STACK
127 	sp = __show_trace(sp, S390_lowcore.panic_stack - 4096,
128 			  S390_lowcore.panic_stack);
129 #endif
130 	sp = __show_trace(sp, S390_lowcore.async_stack - ASYNC_SIZE,
131 			  S390_lowcore.async_stack);
132 	if (task)
133 		__show_trace(sp, (unsigned long) task_stack_page(task),
134 			     (unsigned long) task_stack_page(task) + THREAD_SIZE);
135 	else
136 		__show_trace(sp, S390_lowcore.thread_info,
137 			     S390_lowcore.thread_info + THREAD_SIZE);
138 	printk("\n");
139 	if (!task)
140 		task = current;
141 	debug_show_held_locks(task);
142 }
143 
144 void show_stack(struct task_struct *task, unsigned long *sp)
145 {
146 	register unsigned long * __r15 asm ("15");
147 	unsigned long *stack;
148 	int i;
149 
150 	if (!sp)
151 		stack = task ? (unsigned long *) task->thread.ksp : __r15;
152 	else
153 		stack = sp;
154 
155 	for (i = 0; i < kstack_depth_to_print; i++) {
156 		if (((addr_t) stack & (THREAD_SIZE-1)) == 0)
157 			break;
158 		if (i && ((i * sizeof (long) % 32) == 0))
159 			printk("\n       ");
160 		printk("%p ", (void *)*stack++);
161 	}
162 	printk("\n");
163 	show_trace(task, sp);
164 }
165 
166 /*
167  * The architecture-independent dump_stack generator
168  */
169 void dump_stack(void)
170 {
171 	show_stack(NULL, NULL);
172 }
173 
174 EXPORT_SYMBOL(dump_stack);
175 
176 static inline int mask_bits(struct pt_regs *regs, unsigned long bits)
177 {
178 	return (regs->psw.mask & bits) / ((~bits + 1) & bits);
179 }
180 
181 void show_registers(struct pt_regs *regs)
182 {
183 	char *mode;
184 
185 	mode = (regs->psw.mask & PSW_MASK_PSTATE) ? "User" : "Krnl";
186 	printk("%s PSW : %p %p",
187 	       mode, (void *) regs->psw.mask,
188 	       (void *) regs->psw.addr);
189 	print_symbol(" (%s)\n", regs->psw.addr & PSW_ADDR_INSN);
190 	printk("           R:%x T:%x IO:%x EX:%x Key:%x M:%x W:%x "
191 	       "P:%x AS:%x CC:%x PM:%x", mask_bits(regs, PSW_MASK_PER),
192 	       mask_bits(regs, PSW_MASK_DAT), mask_bits(regs, PSW_MASK_IO),
193 	       mask_bits(regs, PSW_MASK_EXT), mask_bits(regs, PSW_MASK_KEY),
194 	       mask_bits(regs, PSW_MASK_MCHECK), mask_bits(regs, PSW_MASK_WAIT),
195 	       mask_bits(regs, PSW_MASK_PSTATE), mask_bits(regs, PSW_MASK_ASC),
196 	       mask_bits(regs, PSW_MASK_CC), mask_bits(regs, PSW_MASK_PM));
197 #ifdef CONFIG_64BIT
198 	printk(" EA:%x", mask_bits(regs, PSW_BASE_BITS));
199 #endif
200 	printk("\n%s GPRS: " FOURLONG, mode,
201 	       regs->gprs[0], regs->gprs[1], regs->gprs[2], regs->gprs[3]);
202 	printk("           " FOURLONG,
203 	       regs->gprs[4], regs->gprs[5], regs->gprs[6], regs->gprs[7]);
204 	printk("           " FOURLONG,
205 	       regs->gprs[8], regs->gprs[9], regs->gprs[10], regs->gprs[11]);
206 	printk("           " FOURLONG,
207 	       regs->gprs[12], regs->gprs[13], regs->gprs[14], regs->gprs[15]);
208 
209 	show_code(regs);
210 }
211 
212 /* This is called from fs/proc/array.c */
213 char *task_show_regs(struct task_struct *task, char *buffer)
214 {
215 	struct pt_regs *regs;
216 
217 	regs = task_pt_regs(task);
218 	buffer += sprintf(buffer, "task: %p, ksp: %p\n",
219 		       task, (void *)task->thread.ksp);
220 	buffer += sprintf(buffer, "User PSW : %p %p\n",
221 		       (void *) regs->psw.mask, (void *)regs->psw.addr);
222 
223 	buffer += sprintf(buffer, "User GPRS: " FOURLONG,
224 			  regs->gprs[0], regs->gprs[1],
225 			  regs->gprs[2], regs->gprs[3]);
226 	buffer += sprintf(buffer, "           " FOURLONG,
227 			  regs->gprs[4], regs->gprs[5],
228 			  regs->gprs[6], regs->gprs[7]);
229 	buffer += sprintf(buffer, "           " FOURLONG,
230 			  regs->gprs[8], regs->gprs[9],
231 			  regs->gprs[10], regs->gprs[11]);
232 	buffer += sprintf(buffer, "           " FOURLONG,
233 			  regs->gprs[12], regs->gprs[13],
234 			  regs->gprs[14], regs->gprs[15]);
235 	buffer += sprintf(buffer, "User ACRS: %08x %08x %08x %08x\n",
236 			  task->thread.acrs[0], task->thread.acrs[1],
237 			  task->thread.acrs[2], task->thread.acrs[3]);
238 	buffer += sprintf(buffer, "           %08x %08x %08x %08x\n",
239 			  task->thread.acrs[4], task->thread.acrs[5],
240 			  task->thread.acrs[6], task->thread.acrs[7]);
241 	buffer += sprintf(buffer, "           %08x %08x %08x %08x\n",
242 			  task->thread.acrs[8], task->thread.acrs[9],
243 			  task->thread.acrs[10], task->thread.acrs[11]);
244 	buffer += sprintf(buffer, "           %08x %08x %08x %08x\n",
245 			  task->thread.acrs[12], task->thread.acrs[13],
246 			  task->thread.acrs[14], task->thread.acrs[15]);
247 	return buffer;
248 }
249 
250 static DEFINE_SPINLOCK(die_lock);
251 
252 void die(const char * str, struct pt_regs * regs, long err)
253 {
254 	static int die_counter;
255 
256 	oops_enter();
257 	debug_stop_all();
258 	console_verbose();
259 	spin_lock_irq(&die_lock);
260 	bust_spinlocks(1);
261 	printk("%s: %04lx [#%d]\n", str, err & 0xffff, ++die_counter);
262 	print_modules();
263 	show_regs(regs);
264 	bust_spinlocks(0);
265 	spin_unlock_irq(&die_lock);
266 	if (in_interrupt())
267 		panic("Fatal exception in interrupt");
268 	if (panic_on_oops)
269 		panic("Fatal exception: panic_on_oops");
270 	oops_exit();
271 	do_exit(SIGSEGV);
272 }
273 
274 static void inline
275 report_user_fault(long interruption_code, struct pt_regs *regs)
276 {
277 #if defined(CONFIG_SYSCTL)
278 	if (!sysctl_userprocess_debug)
279 		return;
280 #endif
281 #if defined(CONFIG_SYSCTL) || defined(CONFIG_PROCESS_DEBUG)
282 	printk("User process fault: interruption code 0x%lX\n",
283 	       interruption_code);
284 	show_regs(regs);
285 #endif
286 }
287 
288 int is_valid_bugaddr(unsigned long addr)
289 {
290 	return 1;
291 }
292 
293 static void __kprobes inline do_trap(long interruption_code, int signr,
294 					char *str, struct pt_regs *regs,
295 					siginfo_t *info)
296 {
297 	/*
298 	 * We got all needed information from the lowcore and can
299 	 * now safely switch on interrupts.
300 	 */
301         if (regs->psw.mask & PSW_MASK_PSTATE)
302 		local_irq_enable();
303 
304 	if (notify_die(DIE_TRAP, str, regs, interruption_code,
305 				interruption_code, signr) == NOTIFY_STOP)
306 		return;
307 
308         if (regs->psw.mask & PSW_MASK_PSTATE) {
309                 struct task_struct *tsk = current;
310 
311                 tsk->thread.trap_no = interruption_code & 0xffff;
312 		force_sig_info(signr, info, tsk);
313 		report_user_fault(interruption_code, regs);
314         } else {
315                 const struct exception_table_entry *fixup;
316                 fixup = search_exception_tables(regs->psw.addr & PSW_ADDR_INSN);
317                 if (fixup)
318                         regs->psw.addr = fixup->fixup | PSW_ADDR_AMODE;
319 		else {
320 			enum bug_trap_type btt;
321 
322 			btt = report_bug(regs->psw.addr & PSW_ADDR_INSN);
323 			if (btt == BUG_TRAP_TYPE_WARN)
324 				return;
325 			die(str, regs, interruption_code);
326 		}
327         }
328 }
329 
330 static inline void __user *get_check_address(struct pt_regs *regs)
331 {
332 	return (void __user *)((regs->psw.addr-S390_lowcore.pgm_ilc) & PSW_ADDR_INSN);
333 }
334 
335 void __kprobes do_single_step(struct pt_regs *regs)
336 {
337 	if (notify_die(DIE_SSTEP, "sstep", regs, 0, 0,
338 					SIGTRAP) == NOTIFY_STOP){
339 		return;
340 	}
341 	if ((current->ptrace & PT_PTRACED) != 0)
342 		force_sig(SIGTRAP, current);
343 }
344 
345 static void default_trap_handler(struct pt_regs * regs, long interruption_code)
346 {
347         if (regs->psw.mask & PSW_MASK_PSTATE) {
348 		local_irq_enable();
349 		do_exit(SIGSEGV);
350 		report_user_fault(interruption_code, regs);
351 	} else
352 		die("Unknown program exception", regs, interruption_code);
353 }
354 
355 #define DO_ERROR_INFO(signr, str, name, sicode, siaddr) \
356 static void name(struct pt_regs * regs, long interruption_code) \
357 { \
358         siginfo_t info; \
359         info.si_signo = signr; \
360         info.si_errno = 0; \
361         info.si_code = sicode; \
362 	info.si_addr = siaddr; \
363         do_trap(interruption_code, signr, str, regs, &info); \
364 }
365 
366 DO_ERROR_INFO(SIGILL, "addressing exception", addressing_exception,
367 	      ILL_ILLADR, get_check_address(regs))
368 DO_ERROR_INFO(SIGILL,  "execute exception", execute_exception,
369 	      ILL_ILLOPN, get_check_address(regs))
370 DO_ERROR_INFO(SIGFPE,  "fixpoint divide exception", divide_exception,
371 	      FPE_INTDIV, get_check_address(regs))
372 DO_ERROR_INFO(SIGFPE,  "fixpoint overflow exception", overflow_exception,
373 	      FPE_INTOVF, get_check_address(regs))
374 DO_ERROR_INFO(SIGFPE,  "HFP overflow exception", hfp_overflow_exception,
375 	      FPE_FLTOVF, get_check_address(regs))
376 DO_ERROR_INFO(SIGFPE,  "HFP underflow exception", hfp_underflow_exception,
377 	      FPE_FLTUND, get_check_address(regs))
378 DO_ERROR_INFO(SIGFPE,  "HFP significance exception", hfp_significance_exception,
379 	      FPE_FLTRES, get_check_address(regs))
380 DO_ERROR_INFO(SIGFPE,  "HFP divide exception", hfp_divide_exception,
381 	      FPE_FLTDIV, get_check_address(regs))
382 DO_ERROR_INFO(SIGFPE,  "HFP square root exception", hfp_sqrt_exception,
383 	      FPE_FLTINV, get_check_address(regs))
384 DO_ERROR_INFO(SIGILL,  "operand exception", operand_exception,
385 	      ILL_ILLOPN, get_check_address(regs))
386 DO_ERROR_INFO(SIGILL,  "privileged operation", privileged_op,
387 	      ILL_PRVOPC, get_check_address(regs))
388 DO_ERROR_INFO(SIGILL,  "special operation exception", special_op_exception,
389 	      ILL_ILLOPN, get_check_address(regs))
390 DO_ERROR_INFO(SIGILL,  "translation exception", translation_exception,
391 	      ILL_ILLOPN, get_check_address(regs))
392 
393 static inline void
394 do_fp_trap(struct pt_regs *regs, void __user *location,
395            int fpc, long interruption_code)
396 {
397 	siginfo_t si;
398 
399 	si.si_signo = SIGFPE;
400 	si.si_errno = 0;
401 	si.si_addr = location;
402 	si.si_code = 0;
403 	/* FPC[2] is Data Exception Code */
404 	if ((fpc & 0x00000300) == 0) {
405 		/* bits 6 and 7 of DXC are 0 iff IEEE exception */
406 		if (fpc & 0x8000) /* invalid fp operation */
407 			si.si_code = FPE_FLTINV;
408 		else if (fpc & 0x4000) /* div by 0 */
409 			si.si_code = FPE_FLTDIV;
410 		else if (fpc & 0x2000) /* overflow */
411 			si.si_code = FPE_FLTOVF;
412 		else if (fpc & 0x1000) /* underflow */
413 			si.si_code = FPE_FLTUND;
414 		else if (fpc & 0x0800) /* inexact */
415 			si.si_code = FPE_FLTRES;
416 	}
417 	current->thread.ieee_instruction_pointer = (addr_t) location;
418 	do_trap(interruption_code, SIGFPE,
419 		"floating point exception", regs, &si);
420 }
421 
422 static void illegal_op(struct pt_regs * regs, long interruption_code)
423 {
424 	siginfo_t info;
425         __u8 opcode[6];
426 	__u16 __user *location;
427 	int signal = 0;
428 
429 	location = get_check_address(regs);
430 
431 	/*
432 	 * We got all needed information from the lowcore and can
433 	 * now safely switch on interrupts.
434 	 */
435 	if (regs->psw.mask & PSW_MASK_PSTATE)
436 		local_irq_enable();
437 
438 	if (regs->psw.mask & PSW_MASK_PSTATE) {
439 		if (get_user(*((__u16 *) opcode), (__u16 __user *) location))
440 			return;
441 		if (*((__u16 *) opcode) == S390_BREAKPOINT_U16) {
442 			if (current->ptrace & PT_PTRACED)
443 				force_sig(SIGTRAP, current);
444 			else
445 				signal = SIGILL;
446 #ifdef CONFIG_MATHEMU
447 		} else if (opcode[0] == 0xb3) {
448 			if (get_user(*((__u16 *) (opcode+2)), location+1))
449 				return;
450 			signal = math_emu_b3(opcode, regs);
451                 } else if (opcode[0] == 0xed) {
452 			if (get_user(*((__u32 *) (opcode+2)),
453 				     (__u32 __user *)(location+1)))
454 				return;
455 			signal = math_emu_ed(opcode, regs);
456 		} else if (*((__u16 *) opcode) == 0xb299) {
457 			if (get_user(*((__u16 *) (opcode+2)), location+1))
458 				return;
459 			signal = math_emu_srnm(opcode, regs);
460 		} else if (*((__u16 *) opcode) == 0xb29c) {
461 			if (get_user(*((__u16 *) (opcode+2)), location+1))
462 				return;
463 			signal = math_emu_stfpc(opcode, regs);
464 		} else if (*((__u16 *) opcode) == 0xb29d) {
465 			if (get_user(*((__u16 *) (opcode+2)), location+1))
466 				return;
467 			signal = math_emu_lfpc(opcode, regs);
468 #endif
469 		} else
470 			signal = SIGILL;
471 	} else {
472 		/*
473 		 * If we get an illegal op in kernel mode, send it through the
474 		 * kprobes notifier. If kprobes doesn't pick it up, SIGILL
475 		 */
476 		if (notify_die(DIE_BPT, "bpt", regs, interruption_code,
477 			       3, SIGTRAP) != NOTIFY_STOP)
478 			signal = SIGILL;
479 	}
480 
481 #ifdef CONFIG_MATHEMU
482         if (signal == SIGFPE)
483 		do_fp_trap(regs, location,
484                            current->thread.fp_regs.fpc, interruption_code);
485         else if (signal == SIGSEGV) {
486 		info.si_signo = signal;
487 		info.si_errno = 0;
488 		info.si_code = SEGV_MAPERR;
489 		info.si_addr = (void __user *) location;
490 		do_trap(interruption_code, signal,
491 			"user address fault", regs, &info);
492 	} else
493 #endif
494         if (signal) {
495 		info.si_signo = signal;
496 		info.si_errno = 0;
497 		info.si_code = ILL_ILLOPC;
498 		info.si_addr = (void __user *) location;
499 		do_trap(interruption_code, signal,
500 			"illegal operation", regs, &info);
501 	}
502 }
503 
504 
505 #ifdef CONFIG_MATHEMU
506 asmlinkage void
507 specification_exception(struct pt_regs * regs, long interruption_code)
508 {
509         __u8 opcode[6];
510 	__u16 __user *location = NULL;
511 	int signal = 0;
512 
513 	location = (__u16 __user *) get_check_address(regs);
514 
515 	/*
516 	 * We got all needed information from the lowcore and can
517 	 * now safely switch on interrupts.
518 	 */
519         if (regs->psw.mask & PSW_MASK_PSTATE)
520 		local_irq_enable();
521 
522         if (regs->psw.mask & PSW_MASK_PSTATE) {
523 		get_user(*((__u16 *) opcode), location);
524 		switch (opcode[0]) {
525 		case 0x28: /* LDR Rx,Ry   */
526 			signal = math_emu_ldr(opcode);
527 			break;
528 		case 0x38: /* LER Rx,Ry   */
529 			signal = math_emu_ler(opcode);
530 			break;
531 		case 0x60: /* STD R,D(X,B) */
532 			get_user(*((__u16 *) (opcode+2)), location+1);
533 			signal = math_emu_std(opcode, regs);
534 			break;
535 		case 0x68: /* LD R,D(X,B) */
536 			get_user(*((__u16 *) (opcode+2)), location+1);
537 			signal = math_emu_ld(opcode, regs);
538 			break;
539 		case 0x70: /* STE R,D(X,B) */
540 			get_user(*((__u16 *) (opcode+2)), location+1);
541 			signal = math_emu_ste(opcode, regs);
542 			break;
543 		case 0x78: /* LE R,D(X,B) */
544 			get_user(*((__u16 *) (opcode+2)), location+1);
545 			signal = math_emu_le(opcode, regs);
546 			break;
547 		default:
548 			signal = SIGILL;
549 			break;
550                 }
551         } else
552 		signal = SIGILL;
553 
554         if (signal == SIGFPE)
555 		do_fp_trap(regs, location,
556                            current->thread.fp_regs.fpc, interruption_code);
557         else if (signal) {
558 		siginfo_t info;
559 		info.si_signo = signal;
560 		info.si_errno = 0;
561 		info.si_code = ILL_ILLOPN;
562 		info.si_addr = location;
563 		do_trap(interruption_code, signal,
564 			"specification exception", regs, &info);
565 	}
566 }
567 #else
568 DO_ERROR_INFO(SIGILL, "specification exception", specification_exception,
569 	      ILL_ILLOPN, get_check_address(regs));
570 #endif
571 
572 static void data_exception(struct pt_regs * regs, long interruption_code)
573 {
574 	__u16 __user *location;
575 	int signal = 0;
576 
577 	location = get_check_address(regs);
578 
579 	/*
580 	 * We got all needed information from the lowcore and can
581 	 * now safely switch on interrupts.
582 	 */
583 	if (regs->psw.mask & PSW_MASK_PSTATE)
584 		local_irq_enable();
585 
586 	if (MACHINE_HAS_IEEE)
587 		asm volatile("stfpc %0" : "=m" (current->thread.fp_regs.fpc));
588 
589 #ifdef CONFIG_MATHEMU
590         else if (regs->psw.mask & PSW_MASK_PSTATE) {
591         	__u8 opcode[6];
592 		get_user(*((__u16 *) opcode), location);
593 		switch (opcode[0]) {
594 		case 0x28: /* LDR Rx,Ry   */
595 			signal = math_emu_ldr(opcode);
596 			break;
597 		case 0x38: /* LER Rx,Ry   */
598 			signal = math_emu_ler(opcode);
599 			break;
600 		case 0x60: /* STD R,D(X,B) */
601 			get_user(*((__u16 *) (opcode+2)), location+1);
602 			signal = math_emu_std(opcode, regs);
603 			break;
604 		case 0x68: /* LD R,D(X,B) */
605 			get_user(*((__u16 *) (opcode+2)), location+1);
606 			signal = math_emu_ld(opcode, regs);
607 			break;
608 		case 0x70: /* STE R,D(X,B) */
609 			get_user(*((__u16 *) (opcode+2)), location+1);
610 			signal = math_emu_ste(opcode, regs);
611 			break;
612 		case 0x78: /* LE R,D(X,B) */
613 			get_user(*((__u16 *) (opcode+2)), location+1);
614 			signal = math_emu_le(opcode, regs);
615 			break;
616 		case 0xb3:
617 			get_user(*((__u16 *) (opcode+2)), location+1);
618 			signal = math_emu_b3(opcode, regs);
619 			break;
620                 case 0xed:
621 			get_user(*((__u32 *) (opcode+2)),
622 				 (__u32 __user *)(location+1));
623 			signal = math_emu_ed(opcode, regs);
624 			break;
625 	        case 0xb2:
626 			if (opcode[1] == 0x99) {
627 				get_user(*((__u16 *) (opcode+2)), location+1);
628 				signal = math_emu_srnm(opcode, regs);
629 			} else if (opcode[1] == 0x9c) {
630 				get_user(*((__u16 *) (opcode+2)), location+1);
631 				signal = math_emu_stfpc(opcode, regs);
632 			} else if (opcode[1] == 0x9d) {
633 				get_user(*((__u16 *) (opcode+2)), location+1);
634 				signal = math_emu_lfpc(opcode, regs);
635 			} else
636 				signal = SIGILL;
637 			break;
638 		default:
639 			signal = SIGILL;
640 			break;
641                 }
642         }
643 #endif
644 	if (current->thread.fp_regs.fpc & FPC_DXC_MASK)
645 		signal = SIGFPE;
646 	else
647 		signal = SIGILL;
648         if (signal == SIGFPE)
649 		do_fp_trap(regs, location,
650                            current->thread.fp_regs.fpc, interruption_code);
651         else if (signal) {
652 		siginfo_t info;
653 		info.si_signo = signal;
654 		info.si_errno = 0;
655 		info.si_code = ILL_ILLOPN;
656 		info.si_addr = location;
657 		do_trap(interruption_code, signal,
658 			"data exception", regs, &info);
659 	}
660 }
661 
662 static void space_switch_exception(struct pt_regs * regs, long int_code)
663 {
664         siginfo_t info;
665 
666 	/* Set user psw back to home space mode. */
667 	if (regs->psw.mask & PSW_MASK_PSTATE)
668 		regs->psw.mask |= PSW_ASC_HOME;
669 	/* Send SIGILL. */
670         info.si_signo = SIGILL;
671         info.si_errno = 0;
672         info.si_code = ILL_PRVOPC;
673         info.si_addr = get_check_address(regs);
674         do_trap(int_code, SIGILL, "space switch event", regs, &info);
675 }
676 
677 asmlinkage void kernel_stack_overflow(struct pt_regs * regs)
678 {
679 	bust_spinlocks(1);
680 	printk("Kernel stack overflow.\n");
681 	show_regs(regs);
682 	bust_spinlocks(0);
683 	panic("Corrupt kernel stack, can't continue.");
684 }
685 
686 /* init is done in lowcore.S and head.S */
687 
688 void __init trap_init(void)
689 {
690         int i;
691 
692         for (i = 0; i < 128; i++)
693           pgm_check_table[i] = &default_trap_handler;
694         pgm_check_table[1] = &illegal_op;
695         pgm_check_table[2] = &privileged_op;
696         pgm_check_table[3] = &execute_exception;
697         pgm_check_table[4] = &do_protection_exception;
698         pgm_check_table[5] = &addressing_exception;
699         pgm_check_table[6] = &specification_exception;
700         pgm_check_table[7] = &data_exception;
701         pgm_check_table[8] = &overflow_exception;
702         pgm_check_table[9] = &divide_exception;
703         pgm_check_table[0x0A] = &overflow_exception;
704         pgm_check_table[0x0B] = &divide_exception;
705         pgm_check_table[0x0C] = &hfp_overflow_exception;
706         pgm_check_table[0x0D] = &hfp_underflow_exception;
707         pgm_check_table[0x0E] = &hfp_significance_exception;
708         pgm_check_table[0x0F] = &hfp_divide_exception;
709         pgm_check_table[0x10] = &do_dat_exception;
710         pgm_check_table[0x11] = &do_dat_exception;
711         pgm_check_table[0x12] = &translation_exception;
712         pgm_check_table[0x13] = &special_op_exception;
713 #ifdef CONFIG_64BIT
714         pgm_check_table[0x38] = &do_dat_exception;
715 	pgm_check_table[0x39] = &do_dat_exception;
716 	pgm_check_table[0x3A] = &do_dat_exception;
717         pgm_check_table[0x3B] = &do_dat_exception;
718 #endif /* CONFIG_64BIT */
719         pgm_check_table[0x15] = &operand_exception;
720         pgm_check_table[0x1C] = &space_switch_exception;
721         pgm_check_table[0x1D] = &hfp_sqrt_exception;
722 	pgm_check_table[0x40] = &do_monitor_call;
723 	pfault_irq_init();
724 }
725