xref: /openbmc/linux/arch/parisc/kernel/traps.c (revision 0edbfea5)
1 /*
2  *  linux/arch/parisc/traps.c
3  *
4  *  Copyright (C) 1991, 1992  Linus Torvalds
5  *  Copyright (C) 1999, 2000  Philipp Rumpf <prumpf@tux.org>
6  */
7 
8 /*
9  * 'Traps.c' handles hardware traps and faults after we have saved some
10  * state in 'asm.s'.
11  */
12 
13 #include <linux/sched.h>
14 #include <linux/kernel.h>
15 #include <linux/string.h>
16 #include <linux/errno.h>
17 #include <linux/ptrace.h>
18 #include <linux/timer.h>
19 #include <linux/delay.h>
20 #include <linux/mm.h>
21 #include <linux/module.h>
22 #include <linux/smp.h>
23 #include <linux/spinlock.h>
24 #include <linux/init.h>
25 #include <linux/interrupt.h>
26 #include <linux/console.h>
27 #include <linux/bug.h>
28 #include <linux/ratelimit.h>
29 #include <linux/uaccess.h>
30 
31 #include <asm/assembly.h>
32 #include <asm/io.h>
33 #include <asm/irq.h>
34 #include <asm/traps.h>
35 #include <asm/unaligned.h>
36 #include <linux/atomic.h>
37 #include <asm/smp.h>
38 #include <asm/pdc.h>
39 #include <asm/pdc_chassis.h>
40 #include <asm/unwind.h>
41 #include <asm/tlbflush.h>
42 #include <asm/cacheflush.h>
43 
44 #include "../math-emu/math-emu.h"	/* for handle_fpe() */
45 
46 static void parisc_show_stack(struct task_struct *task, unsigned long *sp,
47 	struct pt_regs *regs);
48 
49 static int printbinary(char *buf, unsigned long x, int nbits)
50 {
51 	unsigned long mask = 1UL << (nbits - 1);
52 	while (mask != 0) {
53 		*buf++ = (mask & x ? '1' : '0');
54 		mask >>= 1;
55 	}
56 	*buf = '\0';
57 
58 	return nbits;
59 }
60 
61 #ifdef CONFIG_64BIT
62 #define RFMT "%016lx"
63 #else
64 #define RFMT "%08lx"
65 #endif
66 #define FFMT "%016llx"	/* fpregs are 64-bit always */
67 
68 #define PRINTREGS(lvl,r,f,fmt,x)	\
69 	printk("%s%s%02d-%02d  " fmt " " fmt " " fmt " " fmt "\n",	\
70 		lvl, f, (x), (x+3), (r)[(x)+0], (r)[(x)+1],		\
71 		(r)[(x)+2], (r)[(x)+3])
72 
73 static void print_gr(char *level, struct pt_regs *regs)
74 {
75 	int i;
76 	char buf[64];
77 
78 	printk("%s\n", level);
79 	printk("%s     YZrvWESTHLNXBCVMcbcbcbcbOGFRQPDI\n", level);
80 	printbinary(buf, regs->gr[0], 32);
81 	printk("%sPSW: %s %s\n", level, buf, print_tainted());
82 
83 	for (i = 0; i < 32; i += 4)
84 		PRINTREGS(level, regs->gr, "r", RFMT, i);
85 }
86 
87 static void print_fr(char *level, struct pt_regs *regs)
88 {
89 	int i;
90 	char buf[64];
91 	struct { u32 sw[2]; } s;
92 
93 	/* FR are 64bit everywhere. Need to use asm to get the content
94 	 * of fpsr/fper1, and we assume that we won't have a FP Identify
95 	 * in our way, otherwise we're screwed.
96 	 * The fldd is used to restore the T-bit if there was one, as the
97 	 * store clears it anyway.
98 	 * PA2.0 book says "thou shall not use fstw on FPSR/FPERs" - T-Bone */
99 	asm volatile ("fstd %%fr0,0(%1)	\n\t"
100 		      "fldd 0(%1),%%fr0	\n\t"
101 		      : "=m" (s) : "r" (&s) : "r0");
102 
103 	printk("%s\n", level);
104 	printk("%s      VZOUICununcqcqcqcqcqcrmunTDVZOUI\n", level);
105 	printbinary(buf, s.sw[0], 32);
106 	printk("%sFPSR: %s\n", level, buf);
107 	printk("%sFPER1: %08x\n", level, s.sw[1]);
108 
109 	/* here we'll print fr0 again, tho it'll be meaningless */
110 	for (i = 0; i < 32; i += 4)
111 		PRINTREGS(level, regs->fr, "fr", FFMT, i);
112 }
113 
114 void show_regs(struct pt_regs *regs)
115 {
116 	int i, user;
117 	char *level;
118 	unsigned long cr30, cr31;
119 
120 	user = user_mode(regs);
121 	level = user ? KERN_DEBUG : KERN_CRIT;
122 
123 	show_regs_print_info(level);
124 
125 	print_gr(level, regs);
126 
127 	for (i = 0; i < 8; i += 4)
128 		PRINTREGS(level, regs->sr, "sr", RFMT, i);
129 
130 	if (user)
131 		print_fr(level, regs);
132 
133 	cr30 = mfctl(30);
134 	cr31 = mfctl(31);
135 	printk("%s\n", level);
136 	printk("%sIASQ: " RFMT " " RFMT " IAOQ: " RFMT " " RFMT "\n",
137 	       level, regs->iasq[0], regs->iasq[1], regs->iaoq[0], regs->iaoq[1]);
138 	printk("%s IIR: %08lx    ISR: " RFMT "  IOR: " RFMT "\n",
139 	       level, regs->iir, regs->isr, regs->ior);
140 	printk("%s CPU: %8d   CR30: " RFMT " CR31: " RFMT "\n",
141 	       level, current_thread_info()->cpu, cr30, cr31);
142 	printk("%s ORIG_R28: " RFMT "\n", level, regs->orig_r28);
143 
144 	if (user) {
145 		printk("%s IAOQ[0]: " RFMT "\n", level, regs->iaoq[0]);
146 		printk("%s IAOQ[1]: " RFMT "\n", level, regs->iaoq[1]);
147 		printk("%s RP(r2): " RFMT "\n", level, regs->gr[2]);
148 	} else {
149 		printk("%s IAOQ[0]: %pS\n", level, (void *) regs->iaoq[0]);
150 		printk("%s IAOQ[1]: %pS\n", level, (void *) regs->iaoq[1]);
151 		printk("%s RP(r2): %pS\n", level, (void *) regs->gr[2]);
152 
153 		parisc_show_stack(current, NULL, regs);
154 	}
155 }
156 
157 static DEFINE_RATELIMIT_STATE(_hppa_rs,
158 	DEFAULT_RATELIMIT_INTERVAL, DEFAULT_RATELIMIT_BURST);
159 
160 #define parisc_printk_ratelimited(critical, regs, fmt, ...)	{	      \
161 	if ((critical || show_unhandled_signals) && __ratelimit(&_hppa_rs)) { \
162 		printk(fmt, ##__VA_ARGS__);				      \
163 		show_regs(regs);					      \
164 	}								      \
165 }
166 
167 
168 static void do_show_stack(struct unwind_frame_info *info)
169 {
170 	int i = 1;
171 
172 	printk(KERN_CRIT "Backtrace:\n");
173 	while (i <= 16) {
174 		if (unwind_once(info) < 0 || info->ip == 0)
175 			break;
176 
177 		if (__kernel_text_address(info->ip)) {
178 			printk(KERN_CRIT " [<" RFMT ">] %pS\n",
179 				info->ip, (void *) info->ip);
180 			i++;
181 		}
182 	}
183 	printk(KERN_CRIT "\n");
184 }
185 
186 static void parisc_show_stack(struct task_struct *task, unsigned long *sp,
187 	struct pt_regs *regs)
188 {
189 	struct unwind_frame_info info;
190 	struct task_struct *t;
191 
192 	t = task ? task : current;
193 	if (regs) {
194 		unwind_frame_init(&info, t, regs);
195 		goto show_stack;
196 	}
197 
198 	if (t == current) {
199 		unsigned long sp;
200 
201 HERE:
202 		asm volatile ("copy %%r30, %0" : "=r"(sp));
203 		{
204 			struct pt_regs r;
205 
206 			memset(&r, 0, sizeof(struct pt_regs));
207 			r.iaoq[0] = (unsigned long)&&HERE;
208 			r.gr[2] = (unsigned long)__builtin_return_address(0);
209 			r.gr[30] = sp;
210 
211 			unwind_frame_init(&info, current, &r);
212 		}
213 	} else {
214 		unwind_frame_init_from_blocked_task(&info, t);
215 	}
216 
217 show_stack:
218 	do_show_stack(&info);
219 }
220 
221 void show_stack(struct task_struct *t, unsigned long *sp)
222 {
223 	return parisc_show_stack(t, sp, NULL);
224 }
225 
226 int is_valid_bugaddr(unsigned long iaoq)
227 {
228 	return 1;
229 }
230 
231 void die_if_kernel(char *str, struct pt_regs *regs, long err)
232 {
233 	if (user_mode(regs)) {
234 		if (err == 0)
235 			return; /* STFU */
236 
237 		parisc_printk_ratelimited(1, regs,
238 			KERN_CRIT "%s (pid %d): %s (code %ld) at " RFMT "\n",
239 			current->comm, task_pid_nr(current), str, err, regs->iaoq[0]);
240 
241 		return;
242 	}
243 
244 	oops_in_progress = 1;
245 
246 	oops_enter();
247 
248 	/* Amuse the user in a SPARC fashion */
249 	if (err) printk(KERN_CRIT
250 			"      _______________________________ \n"
251 			"     < Your System ate a SPARC! Gah! >\n"
252 			"      ------------------------------- \n"
253 			"             \\   ^__^\n"
254 			"                 (__)\\       )\\/\\\n"
255 			"                  U  ||----w |\n"
256 			"                     ||     ||\n");
257 
258 	/* unlock the pdc lock if necessary */
259 	pdc_emergency_unlock();
260 
261 	/* maybe the kernel hasn't booted very far yet and hasn't been able
262 	 * to initialize the serial or STI console. In that case we should
263 	 * re-enable the pdc console, so that the user will be able to
264 	 * identify the problem. */
265 	if (!console_drivers)
266 		pdc_console_restart();
267 
268 	if (err)
269 		printk(KERN_CRIT "%s (pid %d): %s (code %ld)\n",
270 			current->comm, task_pid_nr(current), str, err);
271 
272 	/* Wot's wrong wif bein' racy? */
273 	if (current->thread.flags & PARISC_KERNEL_DEATH) {
274 		printk(KERN_CRIT "%s() recursion detected.\n", __func__);
275 		local_irq_enable();
276 		while (1);
277 	}
278 	current->thread.flags |= PARISC_KERNEL_DEATH;
279 
280 	show_regs(regs);
281 	dump_stack();
282 	add_taint(TAINT_DIE, LOCKDEP_NOW_UNRELIABLE);
283 
284 	if (in_interrupt())
285 		panic("Fatal exception in interrupt");
286 
287 	if (panic_on_oops)
288 		panic("Fatal exception");
289 
290 	oops_exit();
291 	do_exit(SIGSEGV);
292 }
293 
294 /* gdb uses break 4,8 */
295 #define GDB_BREAK_INSN 0x10004
296 static void handle_gdb_break(struct pt_regs *regs, int wot)
297 {
298 	struct siginfo si;
299 
300 	si.si_signo = SIGTRAP;
301 	si.si_errno = 0;
302 	si.si_code = wot;
303 	si.si_addr = (void __user *) (regs->iaoq[0] & ~3);
304 	force_sig_info(SIGTRAP, &si, current);
305 }
306 
307 static void handle_break(struct pt_regs *regs)
308 {
309 	unsigned iir = regs->iir;
310 
311 	if (unlikely(iir == PARISC_BUG_BREAK_INSN && !user_mode(regs))) {
312 		/* check if a BUG() or WARN() trapped here.  */
313 		enum bug_trap_type tt;
314 		tt = report_bug(regs->iaoq[0] & ~3, regs);
315 		if (tt == BUG_TRAP_TYPE_WARN) {
316 			regs->iaoq[0] += 4;
317 			regs->iaoq[1] += 4;
318 			return; /* return to next instruction when WARN_ON().  */
319 		}
320 		die_if_kernel("Unknown kernel breakpoint", regs,
321 			(tt == BUG_TRAP_TYPE_NONE) ? 9 : 0);
322 	}
323 
324 	if (unlikely(iir != GDB_BREAK_INSN))
325 		parisc_printk_ratelimited(0, regs,
326 			KERN_DEBUG "break %d,%d: pid=%d command='%s'\n",
327 			iir & 31, (iir>>13) & ((1<<13)-1),
328 			task_pid_nr(current), current->comm);
329 
330 	/* send standard GDB signal */
331 	handle_gdb_break(regs, TRAP_BRKPT);
332 }
333 
334 static void default_trap(int code, struct pt_regs *regs)
335 {
336 	printk(KERN_ERR "Trap %d on CPU %d\n", code, smp_processor_id());
337 	show_regs(regs);
338 }
339 
340 void (*cpu_lpmc) (int code, struct pt_regs *regs) __read_mostly = default_trap;
341 
342 
343 void transfer_pim_to_trap_frame(struct pt_regs *regs)
344 {
345     register int i;
346     extern unsigned int hpmc_pim_data[];
347     struct pdc_hpmc_pim_11 *pim_narrow;
348     struct pdc_hpmc_pim_20 *pim_wide;
349 
350     if (boot_cpu_data.cpu_type >= pcxu) {
351 
352 	pim_wide = (struct pdc_hpmc_pim_20 *)hpmc_pim_data;
353 
354 	/*
355 	 * Note: The following code will probably generate a
356 	 * bunch of truncation error warnings from the compiler.
357 	 * Could be handled with an ifdef, but perhaps there
358 	 * is a better way.
359 	 */
360 
361 	regs->gr[0] = pim_wide->cr[22];
362 
363 	for (i = 1; i < 32; i++)
364 	    regs->gr[i] = pim_wide->gr[i];
365 
366 	for (i = 0; i < 32; i++)
367 	    regs->fr[i] = pim_wide->fr[i];
368 
369 	for (i = 0; i < 8; i++)
370 	    regs->sr[i] = pim_wide->sr[i];
371 
372 	regs->iasq[0] = pim_wide->cr[17];
373 	regs->iasq[1] = pim_wide->iasq_back;
374 	regs->iaoq[0] = pim_wide->cr[18];
375 	regs->iaoq[1] = pim_wide->iaoq_back;
376 
377 	regs->sar  = pim_wide->cr[11];
378 	regs->iir  = pim_wide->cr[19];
379 	regs->isr  = pim_wide->cr[20];
380 	regs->ior  = pim_wide->cr[21];
381     }
382     else {
383 	pim_narrow = (struct pdc_hpmc_pim_11 *)hpmc_pim_data;
384 
385 	regs->gr[0] = pim_narrow->cr[22];
386 
387 	for (i = 1; i < 32; i++)
388 	    regs->gr[i] = pim_narrow->gr[i];
389 
390 	for (i = 0; i < 32; i++)
391 	    regs->fr[i] = pim_narrow->fr[i];
392 
393 	for (i = 0; i < 8; i++)
394 	    regs->sr[i] = pim_narrow->sr[i];
395 
396 	regs->iasq[0] = pim_narrow->cr[17];
397 	regs->iasq[1] = pim_narrow->iasq_back;
398 	regs->iaoq[0] = pim_narrow->cr[18];
399 	regs->iaoq[1] = pim_narrow->iaoq_back;
400 
401 	regs->sar  = pim_narrow->cr[11];
402 	regs->iir  = pim_narrow->cr[19];
403 	regs->isr  = pim_narrow->cr[20];
404 	regs->ior  = pim_narrow->cr[21];
405     }
406 
407     /*
408      * The following fields only have meaning if we came through
409      * another path. So just zero them here.
410      */
411 
412     regs->ksp = 0;
413     regs->kpc = 0;
414     regs->orig_r28 = 0;
415 }
416 
417 
418 /*
419  * This routine is called as a last resort when everything else
420  * has gone clearly wrong. We get called for faults in kernel space,
421  * and HPMC's.
422  */
423 void parisc_terminate(char *msg, struct pt_regs *regs, int code, unsigned long offset)
424 {
425 	static DEFINE_SPINLOCK(terminate_lock);
426 
427 	oops_in_progress = 1;
428 
429 	set_eiem(0);
430 	local_irq_disable();
431 	spin_lock(&terminate_lock);
432 
433 	/* unlock the pdc lock if necessary */
434 	pdc_emergency_unlock();
435 
436 	/* restart pdc console if necessary */
437 	if (!console_drivers)
438 		pdc_console_restart();
439 
440 	/* Not all paths will gutter the processor... */
441 	switch(code){
442 
443 	case 1:
444 		transfer_pim_to_trap_frame(regs);
445 		break;
446 
447 	default:
448 		/* Fall through */
449 		break;
450 
451 	}
452 
453 	{
454 		/* show_stack(NULL, (unsigned long *)regs->gr[30]); */
455 		struct unwind_frame_info info;
456 		unwind_frame_init(&info, current, regs);
457 		do_show_stack(&info);
458 	}
459 
460 	printk("\n");
461 	printk(KERN_CRIT "%s: Code=%d regs=%p (Addr=" RFMT ")\n",
462 			msg, code, regs, offset);
463 	show_regs(regs);
464 
465 	spin_unlock(&terminate_lock);
466 
467 	/* put soft power button back under hardware control;
468 	 * if the user had pressed it once at any time, the
469 	 * system will shut down immediately right here. */
470 	pdc_soft_power_button(0);
471 
472 	/* Call kernel panic() so reboot timeouts work properly
473 	 * FIXME: This function should be on the list of
474 	 * panic notifiers, and we should call panic
475 	 * directly from the location that we wish.
476 	 * e.g. We should not call panic from
477 	 * parisc_terminate, but rather the oter way around.
478 	 * This hack works, prints the panic message twice,
479 	 * and it enables reboot timers!
480 	 */
481 	panic(msg);
482 }
483 
484 void notrace handle_interruption(int code, struct pt_regs *regs)
485 {
486 	unsigned long fault_address = 0;
487 	unsigned long fault_space = 0;
488 	struct siginfo si;
489 
490 	if (code == 1)
491 	    pdc_console_restart();  /* switch back to pdc if HPMC */
492 	else
493 	    local_irq_enable();
494 
495 	/* Security check:
496 	 * If the priority level is still user, and the
497 	 * faulting space is not equal to the active space
498 	 * then the user is attempting something in a space
499 	 * that does not belong to them. Kill the process.
500 	 *
501 	 * This is normally the situation when the user
502 	 * attempts to jump into the kernel space at the
503 	 * wrong offset, be it at the gateway page or a
504 	 * random location.
505 	 *
506 	 * We cannot normally signal the process because it
507 	 * could *be* on the gateway page, and processes
508 	 * executing on the gateway page can't have signals
509 	 * delivered.
510 	 *
511 	 * We merely readjust the address into the users
512 	 * space, at a destination address of zero, and
513 	 * allow processing to continue.
514 	 */
515 	if (((unsigned long)regs->iaoq[0] & 3) &&
516 	    ((unsigned long)regs->iasq[0] != (unsigned long)regs->sr[7])) {
517 		/* Kill the user process later */
518 		regs->iaoq[0] = 0 | 3;
519 		regs->iaoq[1] = regs->iaoq[0] + 4;
520 		regs->iasq[0] = regs->iasq[1] = regs->sr[7];
521 		regs->gr[0] &= ~PSW_B;
522 		return;
523 	}
524 
525 #if 0
526 	printk(KERN_CRIT "Interruption # %d\n", code);
527 #endif
528 
529 	switch(code) {
530 
531 	case  1:
532 		/* High-priority machine check (HPMC) */
533 
534 		/* set up a new led state on systems shipped with a LED State panel */
535 		pdc_chassis_send_status(PDC_CHASSIS_DIRECT_HPMC);
536 
537 		parisc_terminate("High Priority Machine Check (HPMC)",
538 				regs, code, 0);
539 		/* NOT REACHED */
540 
541 	case  2:
542 		/* Power failure interrupt */
543 		printk(KERN_CRIT "Power failure interrupt !\n");
544 		return;
545 
546 	case  3:
547 		/* Recovery counter trap */
548 		regs->gr[0] &= ~PSW_R;
549 		if (user_space(regs))
550 			handle_gdb_break(regs, TRAP_TRACE);
551 		/* else this must be the start of a syscall - just let it run */
552 		return;
553 
554 	case  5:
555 		/* Low-priority machine check */
556 		pdc_chassis_send_status(PDC_CHASSIS_DIRECT_LPMC);
557 
558 		flush_cache_all();
559 		flush_tlb_all();
560 		cpu_lpmc(5, regs);
561 		return;
562 
563 	case  6:
564 		/* Instruction TLB miss fault/Instruction page fault */
565 		fault_address = regs->iaoq[0];
566 		fault_space   = regs->iasq[0];
567 		break;
568 
569 	case  8:
570 		/* Illegal instruction trap */
571 		die_if_kernel("Illegal instruction", regs, code);
572 		si.si_code = ILL_ILLOPC;
573 		goto give_sigill;
574 
575 	case  9:
576 		/* Break instruction trap */
577 		handle_break(regs);
578 		return;
579 
580 	case 10:
581 		/* Privileged operation trap */
582 		die_if_kernel("Privileged operation", regs, code);
583 		si.si_code = ILL_PRVOPC;
584 		goto give_sigill;
585 
586 	case 11:
587 		/* Privileged register trap */
588 		if ((regs->iir & 0xffdfffe0) == 0x034008a0) {
589 
590 			/* This is a MFCTL cr26/cr27 to gr instruction.
591 			 * PCXS traps on this, so we need to emulate it.
592 			 */
593 
594 			if (regs->iir & 0x00200000)
595 				regs->gr[regs->iir & 0x1f] = mfctl(27);
596 			else
597 				regs->gr[regs->iir & 0x1f] = mfctl(26);
598 
599 			regs->iaoq[0] = regs->iaoq[1];
600 			regs->iaoq[1] += 4;
601 			regs->iasq[0] = regs->iasq[1];
602 			return;
603 		}
604 
605 		die_if_kernel("Privileged register usage", regs, code);
606 		si.si_code = ILL_PRVREG;
607 	give_sigill:
608 		si.si_signo = SIGILL;
609 		si.si_errno = 0;
610 		si.si_addr = (void __user *) regs->iaoq[0];
611 		force_sig_info(SIGILL, &si, current);
612 		return;
613 
614 	case 12:
615 		/* Overflow Trap, let the userland signal handler do the cleanup */
616 		si.si_signo = SIGFPE;
617 		si.si_code = FPE_INTOVF;
618 		si.si_addr = (void __user *) regs->iaoq[0];
619 		force_sig_info(SIGFPE, &si, current);
620 		return;
621 
622 	case 13:
623 		/* Conditional Trap
624 		   The condition succeeds in an instruction which traps
625 		   on condition  */
626 		if(user_mode(regs)){
627 			si.si_signo = SIGFPE;
628 			/* Set to zero, and let the userspace app figure it out from
629 			   the insn pointed to by si_addr */
630 			si.si_code = 0;
631 			si.si_addr = (void __user *) regs->iaoq[0];
632 			force_sig_info(SIGFPE, &si, current);
633 			return;
634 		}
635 		/* The kernel doesn't want to handle condition codes */
636 		break;
637 
638 	case 14:
639 		/* Assist Exception Trap, i.e. floating point exception. */
640 		die_if_kernel("Floating point exception", regs, 0); /* quiet */
641 		__inc_irq_stat(irq_fpassist_count);
642 		handle_fpe(regs);
643 		return;
644 
645 	case 15:
646 		/* Data TLB miss fault/Data page fault */
647 		/* Fall through */
648 	case 16:
649 		/* Non-access instruction TLB miss fault */
650 		/* The instruction TLB entry needed for the target address of the FIC
651 		   is absent, and hardware can't find it, so we get to cleanup */
652 		/* Fall through */
653 	case 17:
654 		/* Non-access data TLB miss fault/Non-access data page fault */
655 		/* FIXME:
656 			 Still need to add slow path emulation code here!
657 			 If the insn used a non-shadow register, then the tlb
658 			 handlers could not have their side-effect (e.g. probe
659 			 writing to a target register) emulated since rfir would
660 			 erase the changes to said register. Instead we have to
661 			 setup everything, call this function we are in, and emulate
662 			 by hand. Technically we need to emulate:
663 			 fdc,fdce,pdc,"fic,4f",prober,probeir,probew, probeiw
664 		*/
665 		fault_address = regs->ior;
666 		fault_space = regs->isr;
667 		break;
668 
669 	case 18:
670 		/* PCXS only -- later cpu's split this into types 26,27 & 28 */
671 		/* Check for unaligned access */
672 		if (check_unaligned(regs)) {
673 			handle_unaligned(regs);
674 			return;
675 		}
676 		/* Fall Through */
677 	case 26:
678 		/* PCXL: Data memory access rights trap */
679 		fault_address = regs->ior;
680 		fault_space   = regs->isr;
681 		break;
682 
683 	case 19:
684 		/* Data memory break trap */
685 		regs->gr[0] |= PSW_X; /* So we can single-step over the trap */
686 		/* fall thru */
687 	case 21:
688 		/* Page reference trap */
689 		handle_gdb_break(regs, TRAP_HWBKPT);
690 		return;
691 
692 	case 25:
693 		/* Taken branch trap */
694 		regs->gr[0] &= ~PSW_T;
695 		if (user_space(regs))
696 			handle_gdb_break(regs, TRAP_BRANCH);
697 		/* else this must be the start of a syscall - just let it
698 		 * run.
699 		 */
700 		return;
701 
702 	case  7:
703 		/* Instruction access rights */
704 		/* PCXL: Instruction memory protection trap */
705 
706 		/*
707 		 * This could be caused by either: 1) a process attempting
708 		 * to execute within a vma that does not have execute
709 		 * permission, or 2) an access rights violation caused by a
710 		 * flush only translation set up by ptep_get_and_clear().
711 		 * So we check the vma permissions to differentiate the two.
712 		 * If the vma indicates we have execute permission, then
713 		 * the cause is the latter one. In this case, we need to
714 		 * call do_page_fault() to fix the problem.
715 		 */
716 
717 		if (user_mode(regs)) {
718 			struct vm_area_struct *vma;
719 
720 			down_read(&current->mm->mmap_sem);
721 			vma = find_vma(current->mm,regs->iaoq[0]);
722 			if (vma && (regs->iaoq[0] >= vma->vm_start)
723 				&& (vma->vm_flags & VM_EXEC)) {
724 
725 				fault_address = regs->iaoq[0];
726 				fault_space = regs->iasq[0];
727 
728 				up_read(&current->mm->mmap_sem);
729 				break; /* call do_page_fault() */
730 			}
731 			up_read(&current->mm->mmap_sem);
732 		}
733 		/* Fall Through */
734 	case 27:
735 		/* Data memory protection ID trap */
736 		if (code == 27 && !user_mode(regs) &&
737 			fixup_exception(regs))
738 			return;
739 
740 		die_if_kernel("Protection id trap", regs, code);
741 		si.si_code = SEGV_MAPERR;
742 		si.si_signo = SIGSEGV;
743 		si.si_errno = 0;
744 		if (code == 7)
745 		    si.si_addr = (void __user *) regs->iaoq[0];
746 		else
747 		    si.si_addr = (void __user *) regs->ior;
748 		force_sig_info(SIGSEGV, &si, current);
749 		return;
750 
751 	case 28:
752 		/* Unaligned data reference trap */
753 		handle_unaligned(regs);
754 		return;
755 
756 	default:
757 		if (user_mode(regs)) {
758 			parisc_printk_ratelimited(0, regs, KERN_DEBUG
759 				"handle_interruption() pid=%d command='%s'\n",
760 				task_pid_nr(current), current->comm);
761 			/* SIGBUS, for lack of a better one. */
762 			si.si_signo = SIGBUS;
763 			si.si_code = BUS_OBJERR;
764 			si.si_errno = 0;
765 			si.si_addr = (void __user *) regs->ior;
766 			force_sig_info(SIGBUS, &si, current);
767 			return;
768 		}
769 		pdc_chassis_send_status(PDC_CHASSIS_DIRECT_PANIC);
770 
771 		parisc_terminate("Unexpected interruption", regs, code, 0);
772 		/* NOT REACHED */
773 	}
774 
775 	if (user_mode(regs)) {
776 	    if ((fault_space >> SPACEID_SHIFT) != (regs->sr[7] >> SPACEID_SHIFT)) {
777 		parisc_printk_ratelimited(0, regs, KERN_DEBUG
778 				"User fault %d on space 0x%08lx, pid=%d command='%s'\n",
779 				code, fault_space,
780 				task_pid_nr(current), current->comm);
781 		si.si_signo = SIGSEGV;
782 		si.si_errno = 0;
783 		si.si_code = SEGV_MAPERR;
784 		si.si_addr = (void __user *) regs->ior;
785 		force_sig_info(SIGSEGV, &si, current);
786 		return;
787 	    }
788 	}
789 	else {
790 
791 	    /*
792 	     * The kernel should never fault on its own address space,
793 	     * unless pagefault_disable() was called before.
794 	     */
795 
796 	    if (fault_space == 0 && !faulthandler_disabled())
797 	    {
798 		/* Clean up and return if in exception table. */
799 		if (fixup_exception(regs))
800 			return;
801 		pdc_chassis_send_status(PDC_CHASSIS_DIRECT_PANIC);
802 		parisc_terminate("Kernel Fault", regs, code, fault_address);
803 	    }
804 	}
805 
806 	do_page_fault(regs, code, fault_address);
807 }
808 
809 
810 void __init initialize_ivt(const void *iva)
811 {
812 	extern u32 os_hpmc_size;
813 	extern const u32 os_hpmc[];
814 
815 	int i;
816 	u32 check = 0;
817 	u32 *ivap;
818 	u32 *hpmcp;
819 	u32 length;
820 
821 	if (strcmp((const char *)iva, "cows can fly"))
822 		panic("IVT invalid");
823 
824 	ivap = (u32 *)iva;
825 
826 	for (i = 0; i < 8; i++)
827 	    *ivap++ = 0;
828 
829 	/* Compute Checksum for HPMC handler */
830 	length = os_hpmc_size;
831 	ivap[7] = length;
832 
833 	hpmcp = (u32 *)os_hpmc;
834 
835 	for (i=0; i<length/4; i++)
836 	    check += *hpmcp++;
837 
838 	for (i=0; i<8; i++)
839 	    check += ivap[i];
840 
841 	ivap[5] = -check;
842 }
843 
844 
845 /* early_trap_init() is called before we set up kernel mappings and
846  * write-protect the kernel */
847 void  __init early_trap_init(void)
848 {
849 	extern const void fault_vector_20;
850 
851 #ifndef CONFIG_64BIT
852 	extern const void fault_vector_11;
853 	initialize_ivt(&fault_vector_11);
854 #endif
855 
856 	initialize_ivt(&fault_vector_20);
857 }
858 
859 void __init trap_init(void)
860 {
861 }
862