xref: /openbmc/linux/arch/parisc/kernel/traps.c (revision 82ced6fd)
1 /*
2  *  linux/arch/parisc/traps.c
3  *
4  *  Copyright (C) 1991, 1992  Linus Torvalds
5  *  Copyright (C) 1999, 2000  Philipp Rumpf <prumpf@tux.org>
6  */
7 
8 /*
9  * 'Traps.c' handles hardware traps and faults after we have saved some
10  * state in 'asm.s'.
11  */
12 
13 #include <linux/sched.h>
14 #include <linux/kernel.h>
15 #include <linux/string.h>
16 #include <linux/errno.h>
17 #include <linux/ptrace.h>
18 #include <linux/timer.h>
19 #include <linux/delay.h>
20 #include <linux/mm.h>
21 #include <linux/module.h>
22 #include <linux/smp.h>
23 #include <linux/spinlock.h>
24 #include <linux/init.h>
25 #include <linux/interrupt.h>
26 #include <linux/console.h>
27 #include <linux/bug.h>
28 
29 #include <asm/assembly.h>
30 #include <asm/system.h>
31 #include <asm/uaccess.h>
32 #include <asm/io.h>
33 #include <asm/irq.h>
34 #include <asm/traps.h>
35 #include <asm/unaligned.h>
36 #include <asm/atomic.h>
37 #include <asm/smp.h>
38 #include <asm/pdc.h>
39 #include <asm/pdc_chassis.h>
40 #include <asm/unwind.h>
41 #include <asm/tlbflush.h>
42 #include <asm/cacheflush.h>
43 
44 #include "../math-emu/math-emu.h"	/* for handle_fpe() */
45 
46 #define PRINT_USER_FAULTS /* (turn this on if you want user faults to be */
47 			  /*  dumped to the console via printk)          */
48 
49 #if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK)
50 DEFINE_SPINLOCK(pa_dbit_lock);
51 #endif
52 
53 static void parisc_show_stack(struct task_struct *task, unsigned long *sp,
54 	struct pt_regs *regs);
55 
56 static int printbinary(char *buf, unsigned long x, int nbits)
57 {
58 	unsigned long mask = 1UL << (nbits - 1);
59 	while (mask != 0) {
60 		*buf++ = (mask & x ? '1' : '0');
61 		mask >>= 1;
62 	}
63 	*buf = '\0';
64 
65 	return nbits;
66 }
67 
68 #ifdef CONFIG_64BIT
69 #define RFMT "%016lx"
70 #else
71 #define RFMT "%08lx"
72 #endif
73 #define FFMT "%016llx"	/* fpregs are 64-bit always */
74 
75 #define PRINTREGS(lvl,r,f,fmt,x)	\
76 	printk("%s%s%02d-%02d  " fmt " " fmt " " fmt " " fmt "\n",	\
77 		lvl, f, (x), (x+3), (r)[(x)+0], (r)[(x)+1],		\
78 		(r)[(x)+2], (r)[(x)+3])
79 
80 static void print_gr(char *level, struct pt_regs *regs)
81 {
82 	int i;
83 	char buf[64];
84 
85 	printk("%s\n", level);
86 	printk("%s     YZrvWESTHLNXBCVMcbcbcbcbOGFRQPDI\n", level);
87 	printbinary(buf, regs->gr[0], 32);
88 	printk("%sPSW: %s %s\n", level, buf, print_tainted());
89 
90 	for (i = 0; i < 32; i += 4)
91 		PRINTREGS(level, regs->gr, "r", RFMT, i);
92 }
93 
94 static void print_fr(char *level, struct pt_regs *regs)
95 {
96 	int i;
97 	char buf[64];
98 	struct { u32 sw[2]; } s;
99 
100 	/* FR are 64bit everywhere. Need to use asm to get the content
101 	 * of fpsr/fper1, and we assume that we won't have a FP Identify
102 	 * in our way, otherwise we're screwed.
103 	 * The fldd is used to restore the T-bit if there was one, as the
104 	 * store clears it anyway.
105 	 * PA2.0 book says "thou shall not use fstw on FPSR/FPERs" - T-Bone */
106 	asm volatile ("fstd %%fr0,0(%1)	\n\t"
107 		      "fldd 0(%1),%%fr0	\n\t"
108 		      : "=m" (s) : "r" (&s) : "r0");
109 
110 	printk("%s\n", level);
111 	printk("%s      VZOUICununcqcqcqcqcqcrmunTDVZOUI\n", level);
112 	printbinary(buf, s.sw[0], 32);
113 	printk("%sFPSR: %s\n", level, buf);
114 	printk("%sFPER1: %08x\n", level, s.sw[1]);
115 
116 	/* here we'll print fr0 again, tho it'll be meaningless */
117 	for (i = 0; i < 32; i += 4)
118 		PRINTREGS(level, regs->fr, "fr", FFMT, i);
119 }
120 
121 void show_regs(struct pt_regs *regs)
122 {
123 	int i, user;
124 	char *level;
125 	unsigned long cr30, cr31;
126 
127 	user = user_mode(regs);
128 	level = user ? KERN_DEBUG : KERN_CRIT;
129 
130 	print_gr(level, regs);
131 
132 	for (i = 0; i < 8; i += 4)
133 		PRINTREGS(level, regs->sr, "sr", RFMT, i);
134 
135 	if (user)
136 		print_fr(level, regs);
137 
138 	cr30 = mfctl(30);
139 	cr31 = mfctl(31);
140 	printk("%s\n", level);
141 	printk("%sIASQ: " RFMT " " RFMT " IAOQ: " RFMT " " RFMT "\n",
142 	       level, regs->iasq[0], regs->iasq[1], regs->iaoq[0], regs->iaoq[1]);
143 	printk("%s IIR: %08lx    ISR: " RFMT "  IOR: " RFMT "\n",
144 	       level, regs->iir, regs->isr, regs->ior);
145 	printk("%s CPU: %8d   CR30: " RFMT " CR31: " RFMT "\n",
146 	       level, current_thread_info()->cpu, cr30, cr31);
147 	printk("%s ORIG_R28: " RFMT "\n", level, regs->orig_r28);
148 
149 	if (user) {
150 		printk("%s IAOQ[0]: " RFMT "\n", level, regs->iaoq[0]);
151 		printk("%s IAOQ[1]: " RFMT "\n", level, regs->iaoq[1]);
152 		printk("%s RP(r2): " RFMT "\n", level, regs->gr[2]);
153 	} else {
154 		printk("%s IAOQ[0]: %pS\n", level, (void *) regs->iaoq[0]);
155 		printk("%s IAOQ[1]: %pS\n", level, (void *) regs->iaoq[1]);
156 		printk("%s RP(r2): %pS\n", level, (void *) regs->gr[2]);
157 
158 		parisc_show_stack(current, NULL, regs);
159 	}
160 }
161 
162 
163 void dump_stack(void)
164 {
165 	show_stack(NULL, NULL);
166 }
167 
168 EXPORT_SYMBOL(dump_stack);
169 
170 static void do_show_stack(struct unwind_frame_info *info)
171 {
172 	int i = 1;
173 
174 	printk(KERN_CRIT "Backtrace:\n");
175 	while (i <= 16) {
176 		if (unwind_once(info) < 0 || info->ip == 0)
177 			break;
178 
179 		if (__kernel_text_address(info->ip)) {
180 			printk(KERN_CRIT " [<" RFMT ">] %pS\n",
181 				info->ip, (void *) info->ip);
182 			i++;
183 		}
184 	}
185 	printk(KERN_CRIT "\n");
186 }
187 
188 static void parisc_show_stack(struct task_struct *task, unsigned long *sp,
189 	struct pt_regs *regs)
190 {
191 	struct unwind_frame_info info;
192 	struct task_struct *t;
193 
194 	t = task ? task : current;
195 	if (regs) {
196 		unwind_frame_init(&info, t, regs);
197 		goto show_stack;
198 	}
199 
200 	if (t == current) {
201 		unsigned long sp;
202 
203 HERE:
204 		asm volatile ("copy %%r30, %0" : "=r"(sp));
205 		{
206 			struct pt_regs r;
207 
208 			memset(&r, 0, sizeof(struct pt_regs));
209 			r.iaoq[0] = (unsigned long)&&HERE;
210 			r.gr[2] = (unsigned long)__builtin_return_address(0);
211 			r.gr[30] = sp;
212 
213 			unwind_frame_init(&info, current, &r);
214 		}
215 	} else {
216 		unwind_frame_init_from_blocked_task(&info, t);
217 	}
218 
219 show_stack:
220 	do_show_stack(&info);
221 }
222 
223 void show_stack(struct task_struct *t, unsigned long *sp)
224 {
225 	return parisc_show_stack(t, sp, NULL);
226 }
227 
228 int is_valid_bugaddr(unsigned long iaoq)
229 {
230 	return 1;
231 }
232 
233 void die_if_kernel(char *str, struct pt_regs *regs, long err)
234 {
235 	if (user_mode(regs)) {
236 		if (err == 0)
237 			return; /* STFU */
238 
239 		printk(KERN_CRIT "%s (pid %d): %s (code %ld) at " RFMT "\n",
240 			current->comm, task_pid_nr(current), str, err, regs->iaoq[0]);
241 #ifdef PRINT_USER_FAULTS
242 		/* XXX for debugging only */
243 		show_regs(regs);
244 #endif
245 		return;
246 	}
247 
248 	oops_in_progress = 1;
249 
250 	oops_enter();
251 
252 	/* Amuse the user in a SPARC fashion */
253 	if (err) printk(
254 KERN_CRIT "      _______________________________ \n"
255 KERN_CRIT "     < Your System ate a SPARC! Gah! >\n"
256 KERN_CRIT "      ------------------------------- \n"
257 KERN_CRIT "             \\   ^__^\n"
258 KERN_CRIT "              \\  (xx)\\_______\n"
259 KERN_CRIT "                 (__)\\       )\\/\\\n"
260 KERN_CRIT "                  U  ||----w |\n"
261 KERN_CRIT "                     ||     ||\n");
262 
263 	/* unlock the pdc lock if necessary */
264 	pdc_emergency_unlock();
265 
266 	/* maybe the kernel hasn't booted very far yet and hasn't been able
267 	 * to initialize the serial or STI console. In that case we should
268 	 * re-enable the pdc console, so that the user will be able to
269 	 * identify the problem. */
270 	if (!console_drivers)
271 		pdc_console_restart();
272 
273 	if (err)
274 		printk(KERN_CRIT "%s (pid %d): %s (code %ld)\n",
275 			current->comm, task_pid_nr(current), str, err);
276 
277 	/* Wot's wrong wif bein' racy? */
278 	if (current->thread.flags & PARISC_KERNEL_DEATH) {
279 		printk(KERN_CRIT "%s() recursion detected.\n", __func__);
280 		local_irq_enable();
281 		while (1);
282 	}
283 	current->thread.flags |= PARISC_KERNEL_DEATH;
284 
285 	show_regs(regs);
286 	dump_stack();
287 	add_taint(TAINT_DIE);
288 
289 	if (in_interrupt())
290 		panic("Fatal exception in interrupt");
291 
292 	if (panic_on_oops) {
293 		printk(KERN_EMERG "Fatal exception: panic in 5 seconds\n");
294 		ssleep(5);
295 		panic("Fatal exception");
296 	}
297 
298 	oops_exit();
299 	do_exit(SIGSEGV);
300 }
301 
302 int syscall_ipi(int (*syscall) (struct pt_regs *), struct pt_regs *regs)
303 {
304 	return syscall(regs);
305 }
306 
307 /* gdb uses break 4,8 */
308 #define GDB_BREAK_INSN 0x10004
309 static void handle_gdb_break(struct pt_regs *regs, int wot)
310 {
311 	struct siginfo si;
312 
313 	si.si_signo = SIGTRAP;
314 	si.si_errno = 0;
315 	si.si_code = wot;
316 	si.si_addr = (void __user *) (regs->iaoq[0] & ~3);
317 	force_sig_info(SIGTRAP, &si, current);
318 }
319 
320 static void handle_break(struct pt_regs *regs)
321 {
322 	unsigned iir = regs->iir;
323 
324 	if (unlikely(iir == PARISC_BUG_BREAK_INSN && !user_mode(regs))) {
325 		/* check if a BUG() or WARN() trapped here.  */
326 		enum bug_trap_type tt;
327 		tt = report_bug(regs->iaoq[0] & ~3, regs);
328 		if (tt == BUG_TRAP_TYPE_WARN) {
329 			regs->iaoq[0] += 4;
330 			regs->iaoq[1] += 4;
331 			return; /* return to next instruction when WARN_ON().  */
332 		}
333 		die_if_kernel("Unknown kernel breakpoint", regs,
334 			(tt == BUG_TRAP_TYPE_NONE) ? 9 : 0);
335 	}
336 
337 #ifdef PRINT_USER_FAULTS
338 	if (unlikely(iir != GDB_BREAK_INSN)) {
339 		printk(KERN_DEBUG "break %d,%d: pid=%d command='%s'\n",
340 			iir & 31, (iir>>13) & ((1<<13)-1),
341 			task_pid_nr(current), current->comm);
342 		show_regs(regs);
343 	}
344 #endif
345 
346 	/* send standard GDB signal */
347 	handle_gdb_break(regs, TRAP_BRKPT);
348 }
349 
350 static void default_trap(int code, struct pt_regs *regs)
351 {
352 	printk(KERN_ERR "Trap %d on CPU %d\n", code, smp_processor_id());
353 	show_regs(regs);
354 }
355 
356 void (*cpu_lpmc) (int code, struct pt_regs *regs) __read_mostly = default_trap;
357 
358 
359 void transfer_pim_to_trap_frame(struct pt_regs *regs)
360 {
361     register int i;
362     extern unsigned int hpmc_pim_data[];
363     struct pdc_hpmc_pim_11 *pim_narrow;
364     struct pdc_hpmc_pim_20 *pim_wide;
365 
366     if (boot_cpu_data.cpu_type >= pcxu) {
367 
368 	pim_wide = (struct pdc_hpmc_pim_20 *)hpmc_pim_data;
369 
370 	/*
371 	 * Note: The following code will probably generate a
372 	 * bunch of truncation error warnings from the compiler.
373 	 * Could be handled with an ifdef, but perhaps there
374 	 * is a better way.
375 	 */
376 
377 	regs->gr[0] = pim_wide->cr[22];
378 
379 	for (i = 1; i < 32; i++)
380 	    regs->gr[i] = pim_wide->gr[i];
381 
382 	for (i = 0; i < 32; i++)
383 	    regs->fr[i] = pim_wide->fr[i];
384 
385 	for (i = 0; i < 8; i++)
386 	    regs->sr[i] = pim_wide->sr[i];
387 
388 	regs->iasq[0] = pim_wide->cr[17];
389 	regs->iasq[1] = pim_wide->iasq_back;
390 	regs->iaoq[0] = pim_wide->cr[18];
391 	regs->iaoq[1] = pim_wide->iaoq_back;
392 
393 	regs->sar  = pim_wide->cr[11];
394 	regs->iir  = pim_wide->cr[19];
395 	regs->isr  = pim_wide->cr[20];
396 	regs->ior  = pim_wide->cr[21];
397     }
398     else {
399 	pim_narrow = (struct pdc_hpmc_pim_11 *)hpmc_pim_data;
400 
401 	regs->gr[0] = pim_narrow->cr[22];
402 
403 	for (i = 1; i < 32; i++)
404 	    regs->gr[i] = pim_narrow->gr[i];
405 
406 	for (i = 0; i < 32; i++)
407 	    regs->fr[i] = pim_narrow->fr[i];
408 
409 	for (i = 0; i < 8; i++)
410 	    regs->sr[i] = pim_narrow->sr[i];
411 
412 	regs->iasq[0] = pim_narrow->cr[17];
413 	regs->iasq[1] = pim_narrow->iasq_back;
414 	regs->iaoq[0] = pim_narrow->cr[18];
415 	regs->iaoq[1] = pim_narrow->iaoq_back;
416 
417 	regs->sar  = pim_narrow->cr[11];
418 	regs->iir  = pim_narrow->cr[19];
419 	regs->isr  = pim_narrow->cr[20];
420 	regs->ior  = pim_narrow->cr[21];
421     }
422 
423     /*
424      * The following fields only have meaning if we came through
425      * another path. So just zero them here.
426      */
427 
428     regs->ksp = 0;
429     regs->kpc = 0;
430     regs->orig_r28 = 0;
431 }
432 
433 
434 /*
435  * This routine is called as a last resort when everything else
436  * has gone clearly wrong. We get called for faults in kernel space,
437  * and HPMC's.
438  */
439 void parisc_terminate(char *msg, struct pt_regs *regs, int code, unsigned long offset)
440 {
441 	static DEFINE_SPINLOCK(terminate_lock);
442 
443 	oops_in_progress = 1;
444 
445 	set_eiem(0);
446 	local_irq_disable();
447 	spin_lock(&terminate_lock);
448 
449 	/* unlock the pdc lock if necessary */
450 	pdc_emergency_unlock();
451 
452 	/* restart pdc console if necessary */
453 	if (!console_drivers)
454 		pdc_console_restart();
455 
456 	/* Not all paths will gutter the processor... */
457 	switch(code){
458 
459 	case 1:
460 		transfer_pim_to_trap_frame(regs);
461 		break;
462 
463 	default:
464 		/* Fall through */
465 		break;
466 
467 	}
468 
469 	{
470 		/* show_stack(NULL, (unsigned long *)regs->gr[30]); */
471 		struct unwind_frame_info info;
472 		unwind_frame_init(&info, current, regs);
473 		do_show_stack(&info);
474 	}
475 
476 	printk("\n");
477 	printk(KERN_CRIT "%s: Code=%d regs=%p (Addr=" RFMT ")\n",
478 			msg, code, regs, offset);
479 	show_regs(regs);
480 
481 	spin_unlock(&terminate_lock);
482 
483 	/* put soft power button back under hardware control;
484 	 * if the user had pressed it once at any time, the
485 	 * system will shut down immediately right here. */
486 	pdc_soft_power_button(0);
487 
488 	/* Call kernel panic() so reboot timeouts work properly
489 	 * FIXME: This function should be on the list of
490 	 * panic notifiers, and we should call panic
491 	 * directly from the location that we wish.
492 	 * e.g. We should not call panic from
493 	 * parisc_terminate, but rather the oter way around.
494 	 * This hack works, prints the panic message twice,
495 	 * and it enables reboot timers!
496 	 */
497 	panic(msg);
498 }
499 
500 void notrace handle_interruption(int code, struct pt_regs *regs)
501 {
502 	unsigned long fault_address = 0;
503 	unsigned long fault_space = 0;
504 	struct siginfo si;
505 
506 	if (code == 1)
507 	    pdc_console_restart();  /* switch back to pdc if HPMC */
508 	else
509 	    local_irq_enable();
510 
511 	/* Security check:
512 	 * If the priority level is still user, and the
513 	 * faulting space is not equal to the active space
514 	 * then the user is attempting something in a space
515 	 * that does not belong to them. Kill the process.
516 	 *
517 	 * This is normally the situation when the user
518 	 * attempts to jump into the kernel space at the
519 	 * wrong offset, be it at the gateway page or a
520 	 * random location.
521 	 *
522 	 * We cannot normally signal the process because it
523 	 * could *be* on the gateway page, and processes
524 	 * executing on the gateway page can't have signals
525 	 * delivered.
526 	 *
527 	 * We merely readjust the address into the users
528 	 * space, at a destination address of zero, and
529 	 * allow processing to continue.
530 	 */
531 	if (((unsigned long)regs->iaoq[0] & 3) &&
532 	    ((unsigned long)regs->iasq[0] != (unsigned long)regs->sr[7])) {
533 	  	/* Kill the user process later */
534 	  	regs->iaoq[0] = 0 | 3;
535 		regs->iaoq[1] = regs->iaoq[0] + 4;
536 	 	regs->iasq[0] = regs->iasq[0] = regs->sr[7];
537 		regs->gr[0] &= ~PSW_B;
538 		return;
539 	}
540 
541 #if 0
542 	printk(KERN_CRIT "Interruption # %d\n", code);
543 #endif
544 
545 	switch(code) {
546 
547 	case  1:
548 		/* High-priority machine check (HPMC) */
549 
550 		/* set up a new led state on systems shipped with a LED State panel */
551 		pdc_chassis_send_status(PDC_CHASSIS_DIRECT_HPMC);
552 
553 	    	parisc_terminate("High Priority Machine Check (HPMC)",
554 				regs, code, 0);
555 		/* NOT REACHED */
556 
557 	case  2:
558 		/* Power failure interrupt */
559 		printk(KERN_CRIT "Power failure interrupt !\n");
560 		return;
561 
562 	case  3:
563 		/* Recovery counter trap */
564 		regs->gr[0] &= ~PSW_R;
565 		if (user_space(regs))
566 			handle_gdb_break(regs, TRAP_TRACE);
567 		/* else this must be the start of a syscall - just let it run */
568 		return;
569 
570 	case  5:
571 		/* Low-priority machine check */
572 		pdc_chassis_send_status(PDC_CHASSIS_DIRECT_LPMC);
573 
574 		flush_cache_all();
575 		flush_tlb_all();
576 		cpu_lpmc(5, regs);
577 		return;
578 
579 	case  6:
580 		/* Instruction TLB miss fault/Instruction page fault */
581 		fault_address = regs->iaoq[0];
582 		fault_space   = regs->iasq[0];
583 		break;
584 
585 	case  8:
586 		/* Illegal instruction trap */
587 		die_if_kernel("Illegal instruction", regs, code);
588 		si.si_code = ILL_ILLOPC;
589 		goto give_sigill;
590 
591 	case  9:
592 		/* Break instruction trap */
593 		handle_break(regs);
594 		return;
595 
596 	case 10:
597 		/* Privileged operation trap */
598 		die_if_kernel("Privileged operation", regs, code);
599 		si.si_code = ILL_PRVOPC;
600 		goto give_sigill;
601 
602 	case 11:
603 		/* Privileged register trap */
604 		if ((regs->iir & 0xffdfffe0) == 0x034008a0) {
605 
606 			/* This is a MFCTL cr26/cr27 to gr instruction.
607 			 * PCXS traps on this, so we need to emulate it.
608 			 */
609 
610 			if (regs->iir & 0x00200000)
611 				regs->gr[regs->iir & 0x1f] = mfctl(27);
612 			else
613 				regs->gr[regs->iir & 0x1f] = mfctl(26);
614 
615 			regs->iaoq[0] = regs->iaoq[1];
616 			regs->iaoq[1] += 4;
617 			regs->iasq[0] = regs->iasq[1];
618 			return;
619 		}
620 
621 		die_if_kernel("Privileged register usage", regs, code);
622 		si.si_code = ILL_PRVREG;
623 	give_sigill:
624 		si.si_signo = SIGILL;
625 		si.si_errno = 0;
626 		si.si_addr = (void __user *) regs->iaoq[0];
627 		force_sig_info(SIGILL, &si, current);
628 		return;
629 
630 	case 12:
631 		/* Overflow Trap, let the userland signal handler do the cleanup */
632 		si.si_signo = SIGFPE;
633 		si.si_code = FPE_INTOVF;
634 		si.si_addr = (void __user *) regs->iaoq[0];
635 		force_sig_info(SIGFPE, &si, current);
636 		return;
637 
638 	case 13:
639 		/* Conditional Trap
640 		   The condition succeeds in an instruction which traps
641 		   on condition  */
642 		if(user_mode(regs)){
643 			si.si_signo = SIGFPE;
644 			/* Set to zero, and let the userspace app figure it out from
645 		   	   the insn pointed to by si_addr */
646 			si.si_code = 0;
647 			si.si_addr = (void __user *) regs->iaoq[0];
648 			force_sig_info(SIGFPE, &si, current);
649 			return;
650 		}
651 		/* The kernel doesn't want to handle condition codes */
652 		break;
653 
654 	case 14:
655 		/* Assist Exception Trap, i.e. floating point exception. */
656 		die_if_kernel("Floating point exception", regs, 0); /* quiet */
657 		handle_fpe(regs);
658 		return;
659 
660 	case 15:
661 		/* Data TLB miss fault/Data page fault */
662 		/* Fall through */
663 	case 16:
664 		/* Non-access instruction TLB miss fault */
665 		/* The instruction TLB entry needed for the target address of the FIC
666 		   is absent, and hardware can't find it, so we get to cleanup */
667 		/* Fall through */
668 	case 17:
669 		/* Non-access data TLB miss fault/Non-access data page fault */
670 		/* FIXME:
671 		 	 Still need to add slow path emulation code here!
672 		         If the insn used a non-shadow register, then the tlb
673 			 handlers could not have their side-effect (e.g. probe
674 			 writing to a target register) emulated since rfir would
675 			 erase the changes to said register. Instead we have to
676 			 setup everything, call this function we are in, and emulate
677 			 by hand. Technically we need to emulate:
678 			 fdc,fdce,pdc,"fic,4f",prober,probeir,probew, probeiw
679 		*/
680 		fault_address = regs->ior;
681 		fault_space = regs->isr;
682 		break;
683 
684 	case 18:
685 		/* PCXS only -- later cpu's split this into types 26,27 & 28 */
686 		/* Check for unaligned access */
687 		if (check_unaligned(regs)) {
688 			handle_unaligned(regs);
689 			return;
690 		}
691 		/* Fall Through */
692 	case 26:
693 		/* PCXL: Data memory access rights trap */
694 		fault_address = regs->ior;
695 		fault_space   = regs->isr;
696 		break;
697 
698 	case 19:
699 		/* Data memory break trap */
700 		regs->gr[0] |= PSW_X; /* So we can single-step over the trap */
701 		/* fall thru */
702 	case 21:
703 		/* Page reference trap */
704 		handle_gdb_break(regs, TRAP_HWBKPT);
705 		return;
706 
707 	case 25:
708 		/* Taken branch trap */
709 		regs->gr[0] &= ~PSW_T;
710 		if (user_space(regs))
711 			handle_gdb_break(regs, TRAP_BRANCH);
712 		/* else this must be the start of a syscall - just let it
713 		 * run.
714 		 */
715 		return;
716 
717 	case  7:
718 		/* Instruction access rights */
719 		/* PCXL: Instruction memory protection trap */
720 
721 		/*
722 		 * This could be caused by either: 1) a process attempting
723 		 * to execute within a vma that does not have execute
724 		 * permission, or 2) an access rights violation caused by a
725 		 * flush only translation set up by ptep_get_and_clear().
726 		 * So we check the vma permissions to differentiate the two.
727 		 * If the vma indicates we have execute permission, then
728 		 * the cause is the latter one. In this case, we need to
729 		 * call do_page_fault() to fix the problem.
730 		 */
731 
732 		if (user_mode(regs)) {
733 			struct vm_area_struct *vma;
734 
735 			down_read(&current->mm->mmap_sem);
736 			vma = find_vma(current->mm,regs->iaoq[0]);
737 			if (vma && (regs->iaoq[0] >= vma->vm_start)
738 				&& (vma->vm_flags & VM_EXEC)) {
739 
740 				fault_address = regs->iaoq[0];
741 				fault_space = regs->iasq[0];
742 
743 				up_read(&current->mm->mmap_sem);
744 				break; /* call do_page_fault() */
745 			}
746 			up_read(&current->mm->mmap_sem);
747 		}
748 		/* Fall Through */
749 	case 27:
750 		/* Data memory protection ID trap */
751 		if (code == 27 && !user_mode(regs) &&
752 			fixup_exception(regs))
753 			return;
754 
755 		die_if_kernel("Protection id trap", regs, code);
756 		si.si_code = SEGV_MAPERR;
757 		si.si_signo = SIGSEGV;
758 		si.si_errno = 0;
759 		if (code == 7)
760 		    si.si_addr = (void __user *) regs->iaoq[0];
761 		else
762 		    si.si_addr = (void __user *) regs->ior;
763 		force_sig_info(SIGSEGV, &si, current);
764 		return;
765 
766 	case 28:
767 		/* Unaligned data reference trap */
768 		handle_unaligned(regs);
769 		return;
770 
771 	default:
772 		if (user_mode(regs)) {
773 #ifdef PRINT_USER_FAULTS
774 			printk(KERN_DEBUG "\nhandle_interruption() pid=%d command='%s'\n",
775 			    task_pid_nr(current), current->comm);
776 			show_regs(regs);
777 #endif
778 			/* SIGBUS, for lack of a better one. */
779 			si.si_signo = SIGBUS;
780 			si.si_code = BUS_OBJERR;
781 			si.si_errno = 0;
782 			si.si_addr = (void __user *) regs->ior;
783 			force_sig_info(SIGBUS, &si, current);
784 			return;
785 		}
786 		pdc_chassis_send_status(PDC_CHASSIS_DIRECT_PANIC);
787 
788 		parisc_terminate("Unexpected interruption", regs, code, 0);
789 		/* NOT REACHED */
790 	}
791 
792 	if (user_mode(regs)) {
793 	    if ((fault_space >> SPACEID_SHIFT) != (regs->sr[7] >> SPACEID_SHIFT)) {
794 #ifdef PRINT_USER_FAULTS
795 		if (fault_space == 0)
796 			printk(KERN_DEBUG "User Fault on Kernel Space ");
797 		else
798 			printk(KERN_DEBUG "User Fault (long pointer) (fault %d) ",
799 			       code);
800 		printk("pid=%d command='%s'\n", task_pid_nr(current), current->comm);
801 		show_regs(regs);
802 #endif
803 		si.si_signo = SIGSEGV;
804 		si.si_errno = 0;
805 		si.si_code = SEGV_MAPERR;
806 		si.si_addr = (void __user *) regs->ior;
807 		force_sig_info(SIGSEGV, &si, current);
808 		return;
809 	    }
810 	}
811 	else {
812 
813 	    /*
814 	     * The kernel should never fault on its own address space.
815 	     */
816 
817 	    if (fault_space == 0)
818 	    {
819 		pdc_chassis_send_status(PDC_CHASSIS_DIRECT_PANIC);
820 		parisc_terminate("Kernel Fault", regs, code, fault_address);
821 
822 	    }
823 	}
824 
825 	do_page_fault(regs, code, fault_address);
826 }
827 
828 
829 int __init check_ivt(void *iva)
830 {
831 	extern u32 os_hpmc_size;
832 	extern const u32 os_hpmc[];
833 
834 	int i;
835 	u32 check = 0;
836 	u32 *ivap;
837 	u32 *hpmcp;
838 	u32 length;
839 
840 	if (strcmp((char *)iva, "cows can fly"))
841 		return -1;
842 
843 	ivap = (u32 *)iva;
844 
845 	for (i = 0; i < 8; i++)
846 	    *ivap++ = 0;
847 
848 	/* Compute Checksum for HPMC handler */
849 	length = os_hpmc_size;
850 	ivap[7] = length;
851 
852 	hpmcp = (u32 *)os_hpmc;
853 
854 	for (i=0; i<length/4; i++)
855 	    check += *hpmcp++;
856 
857 	for (i=0; i<8; i++)
858 	    check += ivap[i];
859 
860 	ivap[5] = -check;
861 
862 	return 0;
863 }
864 
865 #ifndef CONFIG_64BIT
866 extern const void fault_vector_11;
867 #endif
868 extern const void fault_vector_20;
869 
870 void __init trap_init(void)
871 {
872 	void *iva;
873 
874 	if (boot_cpu_data.cpu_type >= pcxu)
875 		iva = (void *) &fault_vector_20;
876 	else
877 #ifdef CONFIG_64BIT
878 		panic("Can't boot 64-bit OS on PA1.1 processor!");
879 #else
880 		iva = (void *) &fault_vector_11;
881 #endif
882 
883 	if (check_ivt(iva))
884 		panic("IVT invalid");
885 }
886