xref: /openbmc/linux/arch/parisc/kernel/traps.c (revision a1e58bbd)
1 /*
2  *  linux/arch/parisc/traps.c
3  *
4  *  Copyright (C) 1991, 1992  Linus Torvalds
5  *  Copyright (C) 1999, 2000  Philipp Rumpf <prumpf@tux.org>
6  */
7 
8 /*
9  * 'Traps.c' handles hardware traps and faults after we have saved some
10  * state in 'asm.s'.
11  */
12 
13 #include <linux/sched.h>
14 #include <linux/kernel.h>
15 #include <linux/string.h>
16 #include <linux/errno.h>
17 #include <linux/ptrace.h>
18 #include <linux/timer.h>
19 #include <linux/delay.h>
20 #include <linux/mm.h>
21 #include <linux/module.h>
22 #include <linux/smp.h>
23 #include <linux/spinlock.h>
24 #include <linux/init.h>
25 #include <linux/interrupt.h>
26 #include <linux/console.h>
27 #include <linux/kallsyms.h>
28 #include <linux/bug.h>
29 
30 #include <asm/assembly.h>
31 #include <asm/system.h>
32 #include <asm/uaccess.h>
33 #include <asm/io.h>
34 #include <asm/irq.h>
35 #include <asm/traps.h>
36 #include <asm/unaligned.h>
37 #include <asm/atomic.h>
38 #include <asm/smp.h>
39 #include <asm/pdc.h>
40 #include <asm/pdc_chassis.h>
41 #include <asm/unwind.h>
42 #include <asm/tlbflush.h>
43 #include <asm/cacheflush.h>
44 
45 #include "../math-emu/math-emu.h"	/* for handle_fpe() */
46 
47 #define PRINT_USER_FAULTS /* (turn this on if you want user faults to be */
48 			  /*  dumped to the console via printk)          */
49 
50 #if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK)
51 DEFINE_SPINLOCK(pa_dbit_lock);
52 #endif
53 
54 void parisc_show_stack(struct task_struct *t, unsigned long *sp,
55 	struct pt_regs *regs);
56 
57 static int printbinary(char *buf, unsigned long x, int nbits)
58 {
59 	unsigned long mask = 1UL << (nbits - 1);
60 	while (mask != 0) {
61 		*buf++ = (mask & x ? '1' : '0');
62 		mask >>= 1;
63 	}
64 	*buf = '\0';
65 
66 	return nbits;
67 }
68 
69 #ifdef CONFIG_64BIT
70 #define RFMT "%016lx"
71 #else
72 #define RFMT "%08lx"
73 #endif
74 #define FFMT "%016llx"	/* fpregs are 64-bit always */
75 
76 #define PRINTREGS(lvl,r,f,fmt,x)	\
77 	printk("%s%s%02d-%02d  " fmt " " fmt " " fmt " " fmt "\n",	\
78 		lvl, f, (x), (x+3), (r)[(x)+0], (r)[(x)+1],		\
79 		(r)[(x)+2], (r)[(x)+3])
80 
81 static void print_gr(char *level, struct pt_regs *regs)
82 {
83 	int i;
84 	char buf[64];
85 
86 	printk("%s\n", level);
87 	printk("%s     YZrvWESTHLNXBCVMcbcbcbcbOGFRQPDI\n", level);
88 	printbinary(buf, regs->gr[0], 32);
89 	printk("%sPSW: %s %s\n", level, buf, print_tainted());
90 
91 	for (i = 0; i < 32; i += 4)
92 		PRINTREGS(level, regs->gr, "r", RFMT, i);
93 }
94 
95 static void print_fr(char *level, struct pt_regs *regs)
96 {
97 	int i;
98 	char buf[64];
99 	struct { u32 sw[2]; } s;
100 
101 	/* FR are 64bit everywhere. Need to use asm to get the content
102 	 * of fpsr/fper1, and we assume that we won't have a FP Identify
103 	 * in our way, otherwise we're screwed.
104 	 * The fldd is used to restore the T-bit if there was one, as the
105 	 * store clears it anyway.
106 	 * PA2.0 book says "thou shall not use fstw on FPSR/FPERs" - T-Bone */
107 	asm volatile ("fstd %%fr0,0(%1)	\n\t"
108 		      "fldd 0(%1),%%fr0	\n\t"
109 		      : "=m" (s) : "r" (&s) : "r0");
110 
111 	printk("%s\n", level);
112 	printk("%s      VZOUICununcqcqcqcqcqcrmunTDVZOUI\n", level);
113 	printbinary(buf, s.sw[0], 32);
114 	printk("%sFPSR: %s\n", level, buf);
115 	printk("%sFPER1: %08x\n", level, s.sw[1]);
116 
117 	/* here we'll print fr0 again, tho it'll be meaningless */
118 	for (i = 0; i < 32; i += 4)
119 		PRINTREGS(level, regs->fr, "fr", FFMT, i);
120 }
121 
122 void show_regs(struct pt_regs *regs)
123 {
124 	int i;
125 	char *level;
126 	unsigned long cr30, cr31;
127 
128 	level = user_mode(regs) ? KERN_DEBUG : KERN_CRIT;
129 
130 	print_gr(level, regs);
131 
132 	for (i = 0; i < 8; i += 4)
133 		PRINTREGS(level, regs->sr, "sr", RFMT, i);
134 
135 	if (user_mode(regs))
136 		print_fr(level, regs);
137 
138 	cr30 = mfctl(30);
139 	cr31 = mfctl(31);
140 	printk("%s\n", level);
141 	printk("%sIASQ: " RFMT " " RFMT " IAOQ: " RFMT " " RFMT "\n",
142 	       level, regs->iasq[0], regs->iasq[1], regs->iaoq[0], regs->iaoq[1]);
143 	printk("%s IIR: %08lx    ISR: " RFMT "  IOR: " RFMT "\n",
144 	       level, regs->iir, regs->isr, regs->ior);
145 	printk("%s CPU: %8d   CR30: " RFMT " CR31: " RFMT "\n",
146 	       level, current_thread_info()->cpu, cr30, cr31);
147 	printk("%s ORIG_R28: " RFMT "\n", level, regs->orig_r28);
148 	printk(level);
149 	print_symbol(" IAOQ[0]: %s\n", regs->iaoq[0]);
150 	printk(level);
151 	print_symbol(" IAOQ[1]: %s\n", regs->iaoq[1]);
152 	printk(level);
153 	print_symbol(" RP(r2): %s\n", regs->gr[2]);
154 
155 	parisc_show_stack(current, NULL, regs);
156 }
157 
158 
159 void dump_stack(void)
160 {
161 	show_stack(NULL, NULL);
162 }
163 
164 EXPORT_SYMBOL(dump_stack);
165 
166 static void do_show_stack(struct unwind_frame_info *info)
167 {
168 	int i = 1;
169 
170 	printk(KERN_CRIT "Backtrace:\n");
171 	while (i <= 16) {
172 		if (unwind_once(info) < 0 || info->ip == 0)
173 			break;
174 
175 		if (__kernel_text_address(info->ip)) {
176 			printk("%s [<" RFMT ">] ", (i&0x3)==1 ? KERN_CRIT : "", info->ip);
177 #ifdef CONFIG_KALLSYMS
178 			print_symbol("%s\n", info->ip);
179 #else
180 			if ((i & 0x03) == 0)
181 				printk("\n");
182 #endif
183 			i++;
184 		}
185 	}
186 	printk("\n");
187 }
188 
189 void parisc_show_stack(struct task_struct *task, unsigned long *sp,
190 	struct pt_regs *regs)
191 {
192 	struct unwind_frame_info info;
193 	struct task_struct *t;
194 
195 	t = task ? task : current;
196 	if (regs) {
197 		unwind_frame_init(&info, t, regs);
198 		goto show_stack;
199 	}
200 
201 	if (t == current) {
202 		unsigned long sp;
203 
204 HERE:
205 		asm volatile ("copy %%r30, %0" : "=r"(sp));
206 		{
207 			struct pt_regs r;
208 
209 			memset(&r, 0, sizeof(struct pt_regs));
210 			r.iaoq[0] = (unsigned long)&&HERE;
211 			r.gr[2] = (unsigned long)__builtin_return_address(0);
212 			r.gr[30] = sp;
213 
214 			unwind_frame_init(&info, current, &r);
215 		}
216 	} else {
217 		unwind_frame_init_from_blocked_task(&info, t);
218 	}
219 
220 show_stack:
221 	do_show_stack(&info);
222 }
223 
224 void show_stack(struct task_struct *t, unsigned long *sp)
225 {
226 	return parisc_show_stack(t, sp, NULL);
227 }
228 
229 int is_valid_bugaddr(unsigned long iaoq)
230 {
231 	return 1;
232 }
233 
234 void die_if_kernel(char *str, struct pt_regs *regs, long err)
235 {
236 	if (user_mode(regs)) {
237 		if (err == 0)
238 			return; /* STFU */
239 
240 		printk(KERN_CRIT "%s (pid %d): %s (code %ld) at " RFMT "\n",
241 			current->comm, task_pid_nr(current), str, err, regs->iaoq[0]);
242 #ifdef PRINT_USER_FAULTS
243 		/* XXX for debugging only */
244 		show_regs(regs);
245 #endif
246 		return;
247 	}
248 
249 	oops_in_progress = 1;
250 
251 	/* Amuse the user in a SPARC fashion */
252 	if (err) printk(
253 KERN_CRIT "      _______________________________ \n"
254 KERN_CRIT "     < Your System ate a SPARC! Gah! >\n"
255 KERN_CRIT "      ------------------------------- \n"
256 KERN_CRIT "             \\   ^__^\n"
257 KERN_CRIT "              \\  (xx)\\_______\n"
258 KERN_CRIT "                 (__)\\       )\\/\\\n"
259 KERN_CRIT "                  U  ||----w |\n"
260 KERN_CRIT "                     ||     ||\n");
261 
262 	/* unlock the pdc lock if necessary */
263 	pdc_emergency_unlock();
264 
265 	/* maybe the kernel hasn't booted very far yet and hasn't been able
266 	 * to initialize the serial or STI console. In that case we should
267 	 * re-enable the pdc console, so that the user will be able to
268 	 * identify the problem. */
269 	if (!console_drivers)
270 		pdc_console_restart();
271 
272 	if (err)
273 		printk(KERN_CRIT "%s (pid %d): %s (code %ld)\n",
274 			current->comm, task_pid_nr(current), str, err);
275 
276 	/* Wot's wrong wif bein' racy? */
277 	if (current->thread.flags & PARISC_KERNEL_DEATH) {
278 		printk(KERN_CRIT "%s() recursion detected.\n", __FUNCTION__);
279 		local_irq_enable();
280 		while (1);
281 	}
282 	current->thread.flags |= PARISC_KERNEL_DEATH;
283 
284 	show_regs(regs);
285 	dump_stack();
286 	add_taint(TAINT_DIE);
287 
288 	if (in_interrupt())
289 		panic("Fatal exception in interrupt");
290 
291 	if (panic_on_oops) {
292 		printk(KERN_EMERG "Fatal exception: panic in 5 seconds\n");
293 		ssleep(5);
294 		panic("Fatal exception");
295 	}
296 
297 	do_exit(SIGSEGV);
298 }
299 
300 int syscall_ipi(int (*syscall) (struct pt_regs *), struct pt_regs *regs)
301 {
302 	return syscall(regs);
303 }
304 
305 /* gdb uses break 4,8 */
306 #define GDB_BREAK_INSN 0x10004
307 static void handle_gdb_break(struct pt_regs *regs, int wot)
308 {
309 	struct siginfo si;
310 
311 	si.si_signo = SIGTRAP;
312 	si.si_errno = 0;
313 	si.si_code = wot;
314 	si.si_addr = (void __user *) (regs->iaoq[0] & ~3);
315 	force_sig_info(SIGTRAP, &si, current);
316 }
317 
318 static void handle_break(struct pt_regs *regs)
319 {
320 	unsigned iir = regs->iir;
321 
322 	if (unlikely(iir == PARISC_BUG_BREAK_INSN && !user_mode(regs))) {
323 		/* check if a BUG() or WARN() trapped here.  */
324 		enum bug_trap_type tt;
325 		tt = report_bug(regs->iaoq[0] & ~3, regs);
326 		if (tt == BUG_TRAP_TYPE_WARN) {
327 			regs->iaoq[0] += 4;
328 			regs->iaoq[1] += 4;
329 			return; /* return to next instruction when WARN_ON().  */
330 		}
331 		die_if_kernel("Unknown kernel breakpoint", regs,
332 			(tt == BUG_TRAP_TYPE_NONE) ? 9 : 0);
333 	}
334 
335 #ifdef PRINT_USER_FAULTS
336 	if (unlikely(iir != GDB_BREAK_INSN)) {
337 		printk(KERN_DEBUG "break %d,%d: pid=%d command='%s'\n",
338 			iir & 31, (iir>>13) & ((1<<13)-1),
339 			task_pid_nr(current), current->comm);
340 		show_regs(regs);
341 	}
342 #endif
343 
344 	/* send standard GDB signal */
345 	handle_gdb_break(regs, TRAP_BRKPT);
346 }
347 
348 static void default_trap(int code, struct pt_regs *regs)
349 {
350 	printk(KERN_ERR "Trap %d on CPU %d\n", code, smp_processor_id());
351 	show_regs(regs);
352 }
353 
354 void (*cpu_lpmc) (int code, struct pt_regs *regs) __read_mostly = default_trap;
355 
356 
357 void transfer_pim_to_trap_frame(struct pt_regs *regs)
358 {
359     register int i;
360     extern unsigned int hpmc_pim_data[];
361     struct pdc_hpmc_pim_11 *pim_narrow;
362     struct pdc_hpmc_pim_20 *pim_wide;
363 
364     if (boot_cpu_data.cpu_type >= pcxu) {
365 
366 	pim_wide = (struct pdc_hpmc_pim_20 *)hpmc_pim_data;
367 
368 	/*
369 	 * Note: The following code will probably generate a
370 	 * bunch of truncation error warnings from the compiler.
371 	 * Could be handled with an ifdef, but perhaps there
372 	 * is a better way.
373 	 */
374 
375 	regs->gr[0] = pim_wide->cr[22];
376 
377 	for (i = 1; i < 32; i++)
378 	    regs->gr[i] = pim_wide->gr[i];
379 
380 	for (i = 0; i < 32; i++)
381 	    regs->fr[i] = pim_wide->fr[i];
382 
383 	for (i = 0; i < 8; i++)
384 	    regs->sr[i] = pim_wide->sr[i];
385 
386 	regs->iasq[0] = pim_wide->cr[17];
387 	regs->iasq[1] = pim_wide->iasq_back;
388 	regs->iaoq[0] = pim_wide->cr[18];
389 	regs->iaoq[1] = pim_wide->iaoq_back;
390 
391 	regs->sar  = pim_wide->cr[11];
392 	regs->iir  = pim_wide->cr[19];
393 	regs->isr  = pim_wide->cr[20];
394 	regs->ior  = pim_wide->cr[21];
395     }
396     else {
397 	pim_narrow = (struct pdc_hpmc_pim_11 *)hpmc_pim_data;
398 
399 	regs->gr[0] = pim_narrow->cr[22];
400 
401 	for (i = 1; i < 32; i++)
402 	    regs->gr[i] = pim_narrow->gr[i];
403 
404 	for (i = 0; i < 32; i++)
405 	    regs->fr[i] = pim_narrow->fr[i];
406 
407 	for (i = 0; i < 8; i++)
408 	    regs->sr[i] = pim_narrow->sr[i];
409 
410 	regs->iasq[0] = pim_narrow->cr[17];
411 	regs->iasq[1] = pim_narrow->iasq_back;
412 	regs->iaoq[0] = pim_narrow->cr[18];
413 	regs->iaoq[1] = pim_narrow->iaoq_back;
414 
415 	regs->sar  = pim_narrow->cr[11];
416 	regs->iir  = pim_narrow->cr[19];
417 	regs->isr  = pim_narrow->cr[20];
418 	regs->ior  = pim_narrow->cr[21];
419     }
420 
421     /*
422      * The following fields only have meaning if we came through
423      * another path. So just zero them here.
424      */
425 
426     regs->ksp = 0;
427     regs->kpc = 0;
428     regs->orig_r28 = 0;
429 }
430 
431 
432 /*
433  * This routine is called as a last resort when everything else
434  * has gone clearly wrong. We get called for faults in kernel space,
435  * and HPMC's.
436  */
437 void parisc_terminate(char *msg, struct pt_regs *regs, int code, unsigned long offset)
438 {
439 	static DEFINE_SPINLOCK(terminate_lock);
440 
441 	oops_in_progress = 1;
442 
443 	set_eiem(0);
444 	local_irq_disable();
445 	spin_lock(&terminate_lock);
446 
447 	/* unlock the pdc lock if necessary */
448 	pdc_emergency_unlock();
449 
450 	/* restart pdc console if necessary */
451 	if (!console_drivers)
452 		pdc_console_restart();
453 
454 	/* Not all paths will gutter the processor... */
455 	switch(code){
456 
457 	case 1:
458 		transfer_pim_to_trap_frame(regs);
459 		break;
460 
461 	default:
462 		/* Fall through */
463 		break;
464 
465 	}
466 
467 	{
468 		/* show_stack(NULL, (unsigned long *)regs->gr[30]); */
469 		struct unwind_frame_info info;
470 		unwind_frame_init(&info, current, regs);
471 		do_show_stack(&info);
472 	}
473 
474 	printk("\n");
475 	printk(KERN_CRIT "%s: Code=%d regs=%p (Addr=" RFMT ")\n",
476 			msg, code, regs, offset);
477 	show_regs(regs);
478 
479 	spin_unlock(&terminate_lock);
480 
481 	/* put soft power button back under hardware control;
482 	 * if the user had pressed it once at any time, the
483 	 * system will shut down immediately right here. */
484 	pdc_soft_power_button(0);
485 
486 	/* Call kernel panic() so reboot timeouts work properly
487 	 * FIXME: This function should be on the list of
488 	 * panic notifiers, and we should call panic
489 	 * directly from the location that we wish.
490 	 * e.g. We should not call panic from
491 	 * parisc_terminate, but rather the oter way around.
492 	 * This hack works, prints the panic message twice,
493 	 * and it enables reboot timers!
494 	 */
495 	panic(msg);
496 }
497 
498 void handle_interruption(int code, struct pt_regs *regs)
499 {
500 	unsigned long fault_address = 0;
501 	unsigned long fault_space = 0;
502 	struct siginfo si;
503 
504 	if (code == 1)
505 	    pdc_console_restart();  /* switch back to pdc if HPMC */
506 	else
507 	    local_irq_enable();
508 
509 	/* Security check:
510 	 * If the priority level is still user, and the
511 	 * faulting space is not equal to the active space
512 	 * then the user is attempting something in a space
513 	 * that does not belong to them. Kill the process.
514 	 *
515 	 * This is normally the situation when the user
516 	 * attempts to jump into the kernel space at the
517 	 * wrong offset, be it at the gateway page or a
518 	 * random location.
519 	 *
520 	 * We cannot normally signal the process because it
521 	 * could *be* on the gateway page, and processes
522 	 * executing on the gateway page can't have signals
523 	 * delivered.
524 	 *
525 	 * We merely readjust the address into the users
526 	 * space, at a destination address of zero, and
527 	 * allow processing to continue.
528 	 */
529 	if (((unsigned long)regs->iaoq[0] & 3) &&
530 	    ((unsigned long)regs->iasq[0] != (unsigned long)regs->sr[7])) {
531 	  	/* Kill the user process later */
532 	  	regs->iaoq[0] = 0 | 3;
533 		regs->iaoq[1] = regs->iaoq[0] + 4;
534 	 	regs->iasq[0] = regs->iasq[0] = regs->sr[7];
535 		regs->gr[0] &= ~PSW_B;
536 		return;
537 	}
538 
539 #if 0
540 	printk(KERN_CRIT "Interruption # %d\n", code);
541 #endif
542 
543 	switch(code) {
544 
545 	case  1:
546 		/* High-priority machine check (HPMC) */
547 
548 		/* set up a new led state on systems shipped with a LED State panel */
549 		pdc_chassis_send_status(PDC_CHASSIS_DIRECT_HPMC);
550 
551 	    	parisc_terminate("High Priority Machine Check (HPMC)",
552 				regs, code, 0);
553 		/* NOT REACHED */
554 
555 	case  2:
556 		/* Power failure interrupt */
557 		printk(KERN_CRIT "Power failure interrupt !\n");
558 		return;
559 
560 	case  3:
561 		/* Recovery counter trap */
562 		regs->gr[0] &= ~PSW_R;
563 		if (user_space(regs))
564 			handle_gdb_break(regs, TRAP_TRACE);
565 		/* else this must be the start of a syscall - just let it run */
566 		return;
567 
568 	case  5:
569 		/* Low-priority machine check */
570 		pdc_chassis_send_status(PDC_CHASSIS_DIRECT_LPMC);
571 
572 		flush_cache_all();
573 		flush_tlb_all();
574 		cpu_lpmc(5, regs);
575 		return;
576 
577 	case  6:
578 		/* Instruction TLB miss fault/Instruction page fault */
579 		fault_address = regs->iaoq[0];
580 		fault_space   = regs->iasq[0];
581 		break;
582 
583 	case  8:
584 		/* Illegal instruction trap */
585 		die_if_kernel("Illegal instruction", regs, code);
586 		si.si_code = ILL_ILLOPC;
587 		goto give_sigill;
588 
589 	case  9:
590 		/* Break instruction trap */
591 		handle_break(regs);
592 		return;
593 
594 	case 10:
595 		/* Privileged operation trap */
596 		die_if_kernel("Privileged operation", regs, code);
597 		si.si_code = ILL_PRVOPC;
598 		goto give_sigill;
599 
600 	case 11:
601 		/* Privileged register trap */
602 		if ((regs->iir & 0xffdfffe0) == 0x034008a0) {
603 
604 			/* This is a MFCTL cr26/cr27 to gr instruction.
605 			 * PCXS traps on this, so we need to emulate it.
606 			 */
607 
608 			if (regs->iir & 0x00200000)
609 				regs->gr[regs->iir & 0x1f] = mfctl(27);
610 			else
611 				regs->gr[regs->iir & 0x1f] = mfctl(26);
612 
613 			regs->iaoq[0] = regs->iaoq[1];
614 			regs->iaoq[1] += 4;
615 			regs->iasq[0] = regs->iasq[1];
616 			return;
617 		}
618 
619 		die_if_kernel("Privileged register usage", regs, code);
620 		si.si_code = ILL_PRVREG;
621 	give_sigill:
622 		si.si_signo = SIGILL;
623 		si.si_errno = 0;
624 		si.si_addr = (void __user *) regs->iaoq[0];
625 		force_sig_info(SIGILL, &si, current);
626 		return;
627 
628 	case 12:
629 		/* Overflow Trap, let the userland signal handler do the cleanup */
630 		si.si_signo = SIGFPE;
631 		si.si_code = FPE_INTOVF;
632 		si.si_addr = (void __user *) regs->iaoq[0];
633 		force_sig_info(SIGFPE, &si, current);
634 		return;
635 
636 	case 13:
637 		/* Conditional Trap
638 		   The condition succeeds in an instruction which traps
639 		   on condition  */
640 		if(user_mode(regs)){
641 			si.si_signo = SIGFPE;
642 			/* Set to zero, and let the userspace app figure it out from
643 		   	   the insn pointed to by si_addr */
644 			si.si_code = 0;
645 			si.si_addr = (void __user *) regs->iaoq[0];
646 			force_sig_info(SIGFPE, &si, current);
647 			return;
648 		}
649 		/* The kernel doesn't want to handle condition codes */
650 		break;
651 
652 	case 14:
653 		/* Assist Exception Trap, i.e. floating point exception. */
654 		die_if_kernel("Floating point exception", regs, 0); /* quiet */
655 		handle_fpe(regs);
656 		return;
657 
658 	case 15:
659 		/* Data TLB miss fault/Data page fault */
660 		/* Fall through */
661 	case 16:
662 		/* Non-access instruction TLB miss fault */
663 		/* The instruction TLB entry needed for the target address of the FIC
664 		   is absent, and hardware can't find it, so we get to cleanup */
665 		/* Fall through */
666 	case 17:
667 		/* Non-access data TLB miss fault/Non-access data page fault */
668 		/* FIXME:
669 		 	 Still need to add slow path emulation code here!
670 		         If the insn used a non-shadow register, then the tlb
671 			 handlers could not have their side-effect (e.g. probe
672 			 writing to a target register) emulated since rfir would
673 			 erase the changes to said register. Instead we have to
674 			 setup everything, call this function we are in, and emulate
675 			 by hand. Technically we need to emulate:
676 			 fdc,fdce,pdc,"fic,4f",prober,probeir,probew, probeiw
677 		*/
678 		fault_address = regs->ior;
679 		fault_space = regs->isr;
680 		break;
681 
682 	case 18:
683 		/* PCXS only -- later cpu's split this into types 26,27 & 28 */
684 		/* Check for unaligned access */
685 		if (check_unaligned(regs)) {
686 			handle_unaligned(regs);
687 			return;
688 		}
689 		/* Fall Through */
690 	case 26:
691 		/* PCXL: Data memory access rights trap */
692 		fault_address = regs->ior;
693 		fault_space   = regs->isr;
694 		break;
695 
696 	case 19:
697 		/* Data memory break trap */
698 		regs->gr[0] |= PSW_X; /* So we can single-step over the trap */
699 		/* fall thru */
700 	case 21:
701 		/* Page reference trap */
702 		handle_gdb_break(regs, TRAP_HWBKPT);
703 		return;
704 
705 	case 25:
706 		/* Taken branch trap */
707 		regs->gr[0] &= ~PSW_T;
708 		if (user_space(regs))
709 			handle_gdb_break(regs, TRAP_BRANCH);
710 		/* else this must be the start of a syscall - just let it
711 		 * run.
712 		 */
713 		return;
714 
715 	case  7:
716 		/* Instruction access rights */
717 		/* PCXL: Instruction memory protection trap */
718 
719 		/*
720 		 * This could be caused by either: 1) a process attempting
721 		 * to execute within a vma that does not have execute
722 		 * permission, or 2) an access rights violation caused by a
723 		 * flush only translation set up by ptep_get_and_clear().
724 		 * So we check the vma permissions to differentiate the two.
725 		 * If the vma indicates we have execute permission, then
726 		 * the cause is the latter one. In this case, we need to
727 		 * call do_page_fault() to fix the problem.
728 		 */
729 
730 		if (user_mode(regs)) {
731 			struct vm_area_struct *vma;
732 
733 			down_read(&current->mm->mmap_sem);
734 			vma = find_vma(current->mm,regs->iaoq[0]);
735 			if (vma && (regs->iaoq[0] >= vma->vm_start)
736 				&& (vma->vm_flags & VM_EXEC)) {
737 
738 				fault_address = regs->iaoq[0];
739 				fault_space = regs->iasq[0];
740 
741 				up_read(&current->mm->mmap_sem);
742 				break; /* call do_page_fault() */
743 			}
744 			up_read(&current->mm->mmap_sem);
745 		}
746 		/* Fall Through */
747 	case 27:
748 		/* Data memory protection ID trap */
749 		die_if_kernel("Protection id trap", regs, code);
750 		si.si_code = SEGV_MAPERR;
751 		si.si_signo = SIGSEGV;
752 		si.si_errno = 0;
753 		if (code == 7)
754 		    si.si_addr = (void __user *) regs->iaoq[0];
755 		else
756 		    si.si_addr = (void __user *) regs->ior;
757 		force_sig_info(SIGSEGV, &si, current);
758 		return;
759 
760 	case 28:
761 		/* Unaligned data reference trap */
762 		handle_unaligned(regs);
763 		return;
764 
765 	default:
766 		if (user_mode(regs)) {
767 #ifdef PRINT_USER_FAULTS
768 			printk(KERN_DEBUG "\nhandle_interruption() pid=%d command='%s'\n",
769 			    task_pid_nr(current), current->comm);
770 			show_regs(regs);
771 #endif
772 			/* SIGBUS, for lack of a better one. */
773 			si.si_signo = SIGBUS;
774 			si.si_code = BUS_OBJERR;
775 			si.si_errno = 0;
776 			si.si_addr = (void __user *) regs->ior;
777 			force_sig_info(SIGBUS, &si, current);
778 			return;
779 		}
780 		pdc_chassis_send_status(PDC_CHASSIS_DIRECT_PANIC);
781 
782 		parisc_terminate("Unexpected interruption", regs, code, 0);
783 		/* NOT REACHED */
784 	}
785 
786 	if (user_mode(regs)) {
787 	    if ((fault_space >> SPACEID_SHIFT) != (regs->sr[7] >> SPACEID_SHIFT)) {
788 #ifdef PRINT_USER_FAULTS
789 		if (fault_space == 0)
790 			printk(KERN_DEBUG "User Fault on Kernel Space ");
791 		else
792 			printk(KERN_DEBUG "User Fault (long pointer) (fault %d) ",
793 			       code);
794 		printk("pid=%d command='%s'\n", task_pid_nr(current), current->comm);
795 		show_regs(regs);
796 #endif
797 		si.si_signo = SIGSEGV;
798 		si.si_errno = 0;
799 		si.si_code = SEGV_MAPERR;
800 		si.si_addr = (void __user *) regs->ior;
801 		force_sig_info(SIGSEGV, &si, current);
802 		return;
803 	    }
804 	}
805 	else {
806 
807 	    /*
808 	     * The kernel should never fault on its own address space.
809 	     */
810 
811 	    if (fault_space == 0)
812 	    {
813 		pdc_chassis_send_status(PDC_CHASSIS_DIRECT_PANIC);
814 		parisc_terminate("Kernel Fault", regs, code, fault_address);
815 
816 	    }
817 	}
818 
819 	do_page_fault(regs, code, fault_address);
820 }
821 
822 
823 int __init check_ivt(void *iva)
824 {
825 	extern const u32 os_hpmc[];
826 	extern const u32 os_hpmc_end[];
827 
828 	int i;
829 	u32 check = 0;
830 	u32 *ivap;
831 	u32 *hpmcp;
832 	u32 length;
833 
834 	if (strcmp((char *)iva, "cows can fly"))
835 		return -1;
836 
837 	ivap = (u32 *)iva;
838 
839 	for (i = 0; i < 8; i++)
840 	    *ivap++ = 0;
841 
842 	/* Compute Checksum for HPMC handler */
843 
844 	length = os_hpmc_end - os_hpmc;
845 	ivap[7] = length;
846 
847 	hpmcp = (u32 *)os_hpmc;
848 
849 	for (i=0; i<length/4; i++)
850 	    check += *hpmcp++;
851 
852 	for (i=0; i<8; i++)
853 	    check += ivap[i];
854 
855 	ivap[5] = -check;
856 
857 	return 0;
858 }
859 
860 #ifndef CONFIG_64BIT
861 extern const void fault_vector_11;
862 #endif
863 extern const void fault_vector_20;
864 
865 void __init trap_init(void)
866 {
867 	void *iva;
868 
869 	if (boot_cpu_data.cpu_type >= pcxu)
870 		iva = (void *) &fault_vector_20;
871 	else
872 #ifdef CONFIG_64BIT
873 		panic("Can't boot 64-bit OS on PA1.1 processor!");
874 #else
875 		iva = (void *) &fault_vector_11;
876 #endif
877 
878 	if (check_ivt(iva))
879 		panic("IVT invalid");
880 }
881