xref: /openbmc/linux/arch/alpha/kernel/traps.c (revision ee8a99bd)
1 /*
2  * arch/alpha/kernel/traps.c
3  *
4  * (C) Copyright 1994 Linus Torvalds
5  */
6 
7 /*
8  * This file initializes the trap entry points
9  */
10 
11 #include <linux/jiffies.h>
12 #include <linux/mm.h>
13 #include <linux/sched.h>
14 #include <linux/tty.h>
15 #include <linux/delay.h>
16 #include <linux/module.h>
17 #include <linux/init.h>
18 #include <linux/kallsyms.h>
19 #include <linux/ratelimit.h>
20 
21 #include <asm/gentrap.h>
22 #include <asm/uaccess.h>
23 #include <asm/unaligned.h>
24 #include <asm/sysinfo.h>
25 #include <asm/hwrpb.h>
26 #include <asm/mmu_context.h>
27 #include <asm/special_insns.h>
28 
29 #include "proto.h"
30 
31 /* Work-around for some SRMs which mishandle opDEC faults.  */
32 
33 static int opDEC_fix;
34 
35 static void
36 opDEC_check(void)
37 {
38 	__asm__ __volatile__ (
39 	/* Load the address of... */
40 	"	br	$16, 1f\n"
41 	/* A stub instruction fault handler.  Just add 4 to the
42 	   pc and continue.  */
43 	"	ldq	$16, 8($sp)\n"
44 	"	addq	$16, 4, $16\n"
45 	"	stq	$16, 8($sp)\n"
46 	"	call_pal %[rti]\n"
47 	/* Install the instruction fault handler.  */
48 	"1:	lda	$17, 3\n"
49 	"	call_pal %[wrent]\n"
50 	/* With that in place, the fault from the round-to-minf fp
51 	   insn will arrive either at the "lda 4" insn (bad) or one
52 	   past that (good).  This places the correct fixup in %0.  */
53 	"	lda %[fix], 0\n"
54 	"	cvttq/svm $f31,$f31\n"
55 	"	lda %[fix], 4"
56 	: [fix] "=r" (opDEC_fix)
57 	: [rti] "n" (PAL_rti), [wrent] "n" (PAL_wrent)
58 	: "$0", "$1", "$16", "$17", "$22", "$23", "$24", "$25");
59 
60 	if (opDEC_fix)
61 		printk("opDEC fixup enabled.\n");
62 }
63 
64 void
65 dik_show_regs(struct pt_regs *regs, unsigned long *r9_15)
66 {
67 	printk("pc = [<%016lx>]  ra = [<%016lx>]  ps = %04lx    %s\n",
68 	       regs->pc, regs->r26, regs->ps, print_tainted());
69 	printk("pc is at %pSR\n", (void *)regs->pc);
70 	printk("ra is at %pSR\n", (void *)regs->r26);
71 	printk("v0 = %016lx  t0 = %016lx  t1 = %016lx\n",
72 	       regs->r0, regs->r1, regs->r2);
73 	printk("t2 = %016lx  t3 = %016lx  t4 = %016lx\n",
74  	       regs->r3, regs->r4, regs->r5);
75 	printk("t5 = %016lx  t6 = %016lx  t7 = %016lx\n",
76 	       regs->r6, regs->r7, regs->r8);
77 
78 	if (r9_15) {
79 		printk("s0 = %016lx  s1 = %016lx  s2 = %016lx\n",
80 		       r9_15[9], r9_15[10], r9_15[11]);
81 		printk("s3 = %016lx  s4 = %016lx  s5 = %016lx\n",
82 		       r9_15[12], r9_15[13], r9_15[14]);
83 		printk("s6 = %016lx\n", r9_15[15]);
84 	}
85 
86 	printk("a0 = %016lx  a1 = %016lx  a2 = %016lx\n",
87 	       regs->r16, regs->r17, regs->r18);
88 	printk("a3 = %016lx  a4 = %016lx  a5 = %016lx\n",
89  	       regs->r19, regs->r20, regs->r21);
90  	printk("t8 = %016lx  t9 = %016lx  t10= %016lx\n",
91 	       regs->r22, regs->r23, regs->r24);
92 	printk("t11= %016lx  pv = %016lx  at = %016lx\n",
93 	       regs->r25, regs->r27, regs->r28);
94 	printk("gp = %016lx  sp = %p\n", regs->gp, regs+1);
95 #if 0
96 __halt();
97 #endif
98 }
99 
100 #if 0
101 static char * ireg_name[] = {"v0", "t0", "t1", "t2", "t3", "t4", "t5", "t6",
102 			   "t7", "s0", "s1", "s2", "s3", "s4", "s5", "s6",
103 			   "a0", "a1", "a2", "a3", "a4", "a5", "t8", "t9",
104 			   "t10", "t11", "ra", "pv", "at", "gp", "sp", "zero"};
105 #endif
106 
107 static void
108 dik_show_code(unsigned int *pc)
109 {
110 	long i;
111 
112 	printk("Code:");
113 	for (i = -6; i < 2; i++) {
114 		unsigned int insn;
115 		if (__get_user(insn, (unsigned int __user *)pc + i))
116 			break;
117 		printk("%c%08x%c", i ? ' ' : '<', insn, i ? ' ' : '>');
118 	}
119 	printk("\n");
120 }
121 
122 static void
123 dik_show_trace(unsigned long *sp)
124 {
125 	long i = 0;
126 	printk("Trace:\n");
127 	while (0x1ff8 & (unsigned long) sp) {
128 		extern char _stext[], _etext[];
129 		unsigned long tmp = *sp;
130 		sp++;
131 		if (tmp < (unsigned long) &_stext)
132 			continue;
133 		if (tmp >= (unsigned long) &_etext)
134 			continue;
135 		printk("[<%lx>] %pSR\n", tmp, (void *)tmp);
136 		if (i > 40) {
137 			printk(" ...");
138 			break;
139 		}
140 	}
141 	printk("\n");
142 }
143 
144 static int kstack_depth_to_print = 24;
145 
146 void show_stack(struct task_struct *task, unsigned long *sp)
147 {
148 	unsigned long *stack;
149 	int i;
150 
151 	/*
152 	 * debugging aid: "show_stack(NULL);" prints the
153 	 * back trace for this cpu.
154 	 */
155 	if(sp==NULL)
156 		sp=(unsigned long*)&sp;
157 
158 	stack = sp;
159 	for(i=0; i < kstack_depth_to_print; i++) {
160 		if (((long) stack & (THREAD_SIZE-1)) == 0)
161 			break;
162 		if (i && ((i % 4) == 0))
163 			printk("\n       ");
164 		printk("%016lx ", *stack++);
165 	}
166 	printk("\n");
167 	dik_show_trace(sp);
168 }
169 
170 void
171 die_if_kernel(char * str, struct pt_regs *regs, long err, unsigned long *r9_15)
172 {
173 	if (regs->ps & 8)
174 		return;
175 #ifdef CONFIG_SMP
176 	printk("CPU %d ", hard_smp_processor_id());
177 #endif
178 	printk("%s(%d): %s %ld\n", current->comm, task_pid_nr(current), str, err);
179 	dik_show_regs(regs, r9_15);
180 	add_taint(TAINT_DIE, LOCKDEP_NOW_UNRELIABLE);
181 	dik_show_trace((unsigned long *)(regs+1));
182 	dik_show_code((unsigned int *)regs->pc);
183 
184 	if (test_and_set_thread_flag (TIF_DIE_IF_KERNEL)) {
185 		printk("die_if_kernel recursion detected.\n");
186 		local_irq_enable();
187 		while (1);
188 	}
189 	do_exit(SIGSEGV);
190 }
191 
192 #ifndef CONFIG_MATHEMU
193 static long dummy_emul(void) { return 0; }
194 long (*alpha_fp_emul_imprecise)(struct pt_regs *regs, unsigned long writemask)
195   = (void *)dummy_emul;
196 long (*alpha_fp_emul) (unsigned long pc)
197   = (void *)dummy_emul;
198 #else
199 long alpha_fp_emul_imprecise(struct pt_regs *regs, unsigned long writemask);
200 long alpha_fp_emul (unsigned long pc);
201 #endif
202 
203 asmlinkage void
204 do_entArith(unsigned long summary, unsigned long write_mask,
205 	    struct pt_regs *regs)
206 {
207 	long si_code = FPE_FLTINV;
208 	siginfo_t info;
209 
210 	if (summary & 1) {
211 		/* Software-completion summary bit is set, so try to
212 		   emulate the instruction.  If the processor supports
213 		   precise exceptions, we don't have to search.  */
214 		if (!amask(AMASK_PRECISE_TRAP))
215 			si_code = alpha_fp_emul(regs->pc - 4);
216 		else
217 			si_code = alpha_fp_emul_imprecise(regs, write_mask);
218 		if (si_code == 0)
219 			return;
220 	}
221 	die_if_kernel("Arithmetic fault", regs, 0, NULL);
222 
223 	info.si_signo = SIGFPE;
224 	info.si_errno = 0;
225 	info.si_code = si_code;
226 	info.si_addr = (void __user *) regs->pc;
227 	send_sig_info(SIGFPE, &info, current);
228 }
229 
230 asmlinkage void
231 do_entIF(unsigned long type, struct pt_regs *regs)
232 {
233 	siginfo_t info;
234 	int signo, code;
235 
236 	if ((regs->ps & ~IPL_MAX) == 0) {
237 		if (type == 1) {
238 			const unsigned int *data
239 			  = (const unsigned int *) regs->pc;
240 			printk("Kernel bug at %s:%d\n",
241 			       (const char *)(data[1] | (long)data[2] << 32),
242 			       data[0]);
243 		}
244 		die_if_kernel((type == 1 ? "Kernel Bug" : "Instruction fault"),
245 			      regs, type, NULL);
246 	}
247 
248 	switch (type) {
249 	      case 0: /* breakpoint */
250 		info.si_signo = SIGTRAP;
251 		info.si_errno = 0;
252 		info.si_code = TRAP_BRKPT;
253 		info.si_trapno = 0;
254 		info.si_addr = (void __user *) regs->pc;
255 
256 		if (ptrace_cancel_bpt(current)) {
257 			regs->pc -= 4;	/* make pc point to former bpt */
258 		}
259 
260 		send_sig_info(SIGTRAP, &info, current);
261 		return;
262 
263 	      case 1: /* bugcheck */
264 		info.si_signo = SIGTRAP;
265 		info.si_errno = 0;
266 		info.si_code = __SI_FAULT;
267 		info.si_addr = (void __user *) regs->pc;
268 		info.si_trapno = 0;
269 		send_sig_info(SIGTRAP, &info, current);
270 		return;
271 
272 	      case 2: /* gentrap */
273 		info.si_addr = (void __user *) regs->pc;
274 		info.si_trapno = regs->r16;
275 		switch ((long) regs->r16) {
276 		case GEN_INTOVF:
277 			signo = SIGFPE;
278 			code = FPE_INTOVF;
279 			break;
280 		case GEN_INTDIV:
281 			signo = SIGFPE;
282 			code = FPE_INTDIV;
283 			break;
284 		case GEN_FLTOVF:
285 			signo = SIGFPE;
286 			code = FPE_FLTOVF;
287 			break;
288 		case GEN_FLTDIV:
289 			signo = SIGFPE;
290 			code = FPE_FLTDIV;
291 			break;
292 		case GEN_FLTUND:
293 			signo = SIGFPE;
294 			code = FPE_FLTUND;
295 			break;
296 		case GEN_FLTINV:
297 			signo = SIGFPE;
298 			code = FPE_FLTINV;
299 			break;
300 		case GEN_FLTINE:
301 			signo = SIGFPE;
302 			code = FPE_FLTRES;
303 			break;
304 		case GEN_ROPRAND:
305 			signo = SIGFPE;
306 			code = __SI_FAULT;
307 			break;
308 
309 		case GEN_DECOVF:
310 		case GEN_DECDIV:
311 		case GEN_DECINV:
312 		case GEN_ASSERTERR:
313 		case GEN_NULPTRERR:
314 		case GEN_STKOVF:
315 		case GEN_STRLENERR:
316 		case GEN_SUBSTRERR:
317 		case GEN_RANGERR:
318 		case GEN_SUBRNG:
319 		case GEN_SUBRNG1:
320 		case GEN_SUBRNG2:
321 		case GEN_SUBRNG3:
322 		case GEN_SUBRNG4:
323 		case GEN_SUBRNG5:
324 		case GEN_SUBRNG6:
325 		case GEN_SUBRNG7:
326 		default:
327 			signo = SIGTRAP;
328 			code = __SI_FAULT;
329 			break;
330 		}
331 
332 		info.si_signo = signo;
333 		info.si_errno = 0;
334 		info.si_code = code;
335 		info.si_addr = (void __user *) regs->pc;
336 		send_sig_info(signo, &info, current);
337 		return;
338 
339 	      case 4: /* opDEC */
340 		if (implver() == IMPLVER_EV4) {
341 			long si_code;
342 
343 			/* The some versions of SRM do not handle
344 			   the opDEC properly - they return the PC of the
345 			   opDEC fault, not the instruction after as the
346 			   Alpha architecture requires.  Here we fix it up.
347 			   We do this by intentionally causing an opDEC
348 			   fault during the boot sequence and testing if
349 			   we get the correct PC.  If not, we set a flag
350 			   to correct it every time through.  */
351 			regs->pc += opDEC_fix;
352 
353 			/* EV4 does not implement anything except normal
354 			   rounding.  Everything else will come here as
355 			   an illegal instruction.  Emulate them.  */
356 			si_code = alpha_fp_emul(regs->pc - 4);
357 			if (si_code == 0)
358 				return;
359 			if (si_code > 0) {
360 				info.si_signo = SIGFPE;
361 				info.si_errno = 0;
362 				info.si_code = si_code;
363 				info.si_addr = (void __user *) regs->pc;
364 				send_sig_info(SIGFPE, &info, current);
365 				return;
366 			}
367 		}
368 		break;
369 
370 	      case 3: /* FEN fault */
371 		/* Irritating users can call PAL_clrfen to disable the
372 		   FPU for the process.  The kernel will then trap in
373 		   do_switch_stack and undo_switch_stack when we try
374 		   to save and restore the FP registers.
375 
376 		   Given that GCC by default generates code that uses the
377 		   FP registers, PAL_clrfen is not useful except for DoS
378 		   attacks.  So turn the bleeding FPU back on and be done
379 		   with it.  */
380 		current_thread_info()->pcb.flags |= 1;
381 		__reload_thread(&current_thread_info()->pcb);
382 		return;
383 
384 	      case 5: /* illoc */
385 	      default: /* unexpected instruction-fault type */
386 		      ;
387 	}
388 
389 	info.si_signo = SIGILL;
390 	info.si_errno = 0;
391 	info.si_code = ILL_ILLOPC;
392 	info.si_addr = (void __user *) regs->pc;
393 	send_sig_info(SIGILL, &info, current);
394 }
395 
396 /* There is an ifdef in the PALcode in MILO that enables a
397    "kernel debugging entry point" as an unprivileged call_pal.
398 
399    We don't want to have anything to do with it, but unfortunately
400    several versions of MILO included in distributions have it enabled,
401    and if we don't put something on the entry point we'll oops.  */
402 
403 asmlinkage void
404 do_entDbg(struct pt_regs *regs)
405 {
406 	siginfo_t info;
407 
408 	die_if_kernel("Instruction fault", regs, 0, NULL);
409 
410 	info.si_signo = SIGILL;
411 	info.si_errno = 0;
412 	info.si_code = ILL_ILLOPC;
413 	info.si_addr = (void __user *) regs->pc;
414 	force_sig_info(SIGILL, &info, current);
415 }
416 
417 
418 /*
419  * entUna has a different register layout to be reasonably simple. It
420  * needs access to all the integer registers (the kernel doesn't use
421  * fp-regs), and it needs to have them in order for simpler access.
422  *
423  * Due to the non-standard register layout (and because we don't want
424  * to handle floating-point regs), user-mode unaligned accesses are
425  * handled separately by do_entUnaUser below.
426  *
427  * Oh, btw, we don't handle the "gp" register correctly, but if we fault
428  * on a gp-register unaligned load/store, something is _very_ wrong
429  * in the kernel anyway..
430  */
431 struct allregs {
432 	unsigned long regs[32];
433 	unsigned long ps, pc, gp, a0, a1, a2;
434 };
435 
436 struct unaligned_stat {
437 	unsigned long count, va, pc;
438 } unaligned[2];
439 
440 
441 /* Macro for exception fixup code to access integer registers.  */
442 #define una_reg(r)  (_regs[(r) >= 16 && (r) <= 18 ? (r)+19 : (r)])
443 
444 
445 asmlinkage void
446 do_entUna(void * va, unsigned long opcode, unsigned long reg,
447 	  struct allregs *regs)
448 {
449 	long error, tmp1, tmp2, tmp3, tmp4;
450 	unsigned long pc = regs->pc - 4;
451 	unsigned long *_regs = regs->regs;
452 	const struct exception_table_entry *fixup;
453 
454 	unaligned[0].count++;
455 	unaligned[0].va = (unsigned long) va;
456 	unaligned[0].pc = pc;
457 
458 	/* We don't want to use the generic get/put unaligned macros as
459 	   we want to trap exceptions.  Only if we actually get an
460 	   exception will we decide whether we should have caught it.  */
461 
462 	switch (opcode) {
463 	case 0x0c: /* ldwu */
464 		__asm__ __volatile__(
465 		"1:	ldq_u %1,0(%3)\n"
466 		"2:	ldq_u %2,1(%3)\n"
467 		"	extwl %1,%3,%1\n"
468 		"	extwh %2,%3,%2\n"
469 		"3:\n"
470 		".section __ex_table,\"a\"\n"
471 		"	.long 1b - .\n"
472 		"	lda %1,3b-1b(%0)\n"
473 		"	.long 2b - .\n"
474 		"	lda %2,3b-2b(%0)\n"
475 		".previous"
476 			: "=r"(error), "=&r"(tmp1), "=&r"(tmp2)
477 			: "r"(va), "0"(0));
478 		if (error)
479 			goto got_exception;
480 		una_reg(reg) = tmp1|tmp2;
481 		return;
482 
483 	case 0x28: /* ldl */
484 		__asm__ __volatile__(
485 		"1:	ldq_u %1,0(%3)\n"
486 		"2:	ldq_u %2,3(%3)\n"
487 		"	extll %1,%3,%1\n"
488 		"	extlh %2,%3,%2\n"
489 		"3:\n"
490 		".section __ex_table,\"a\"\n"
491 		"	.long 1b - .\n"
492 		"	lda %1,3b-1b(%0)\n"
493 		"	.long 2b - .\n"
494 		"	lda %2,3b-2b(%0)\n"
495 		".previous"
496 			: "=r"(error), "=&r"(tmp1), "=&r"(tmp2)
497 			: "r"(va), "0"(0));
498 		if (error)
499 			goto got_exception;
500 		una_reg(reg) = (int)(tmp1|tmp2);
501 		return;
502 
503 	case 0x29: /* ldq */
504 		__asm__ __volatile__(
505 		"1:	ldq_u %1,0(%3)\n"
506 		"2:	ldq_u %2,7(%3)\n"
507 		"	extql %1,%3,%1\n"
508 		"	extqh %2,%3,%2\n"
509 		"3:\n"
510 		".section __ex_table,\"a\"\n"
511 		"	.long 1b - .\n"
512 		"	lda %1,3b-1b(%0)\n"
513 		"	.long 2b - .\n"
514 		"	lda %2,3b-2b(%0)\n"
515 		".previous"
516 			: "=r"(error), "=&r"(tmp1), "=&r"(tmp2)
517 			: "r"(va), "0"(0));
518 		if (error)
519 			goto got_exception;
520 		una_reg(reg) = tmp1|tmp2;
521 		return;
522 
523 	/* Note that the store sequences do not indicate that they change
524 	   memory because it _should_ be affecting nothing in this context.
525 	   (Otherwise we have other, much larger, problems.)  */
526 	case 0x0d: /* stw */
527 		__asm__ __volatile__(
528 		"1:	ldq_u %2,1(%5)\n"
529 		"2:	ldq_u %1,0(%5)\n"
530 		"	inswh %6,%5,%4\n"
531 		"	inswl %6,%5,%3\n"
532 		"	mskwh %2,%5,%2\n"
533 		"	mskwl %1,%5,%1\n"
534 		"	or %2,%4,%2\n"
535 		"	or %1,%3,%1\n"
536 		"3:	stq_u %2,1(%5)\n"
537 		"4:	stq_u %1,0(%5)\n"
538 		"5:\n"
539 		".section __ex_table,\"a\"\n"
540 		"	.long 1b - .\n"
541 		"	lda %2,5b-1b(%0)\n"
542 		"	.long 2b - .\n"
543 		"	lda %1,5b-2b(%0)\n"
544 		"	.long 3b - .\n"
545 		"	lda $31,5b-3b(%0)\n"
546 		"	.long 4b - .\n"
547 		"	lda $31,5b-4b(%0)\n"
548 		".previous"
549 			: "=r"(error), "=&r"(tmp1), "=&r"(tmp2),
550 			  "=&r"(tmp3), "=&r"(tmp4)
551 			: "r"(va), "r"(una_reg(reg)), "0"(0));
552 		if (error)
553 			goto got_exception;
554 		return;
555 
556 	case 0x2c: /* stl */
557 		__asm__ __volatile__(
558 		"1:	ldq_u %2,3(%5)\n"
559 		"2:	ldq_u %1,0(%5)\n"
560 		"	inslh %6,%5,%4\n"
561 		"	insll %6,%5,%3\n"
562 		"	msklh %2,%5,%2\n"
563 		"	mskll %1,%5,%1\n"
564 		"	or %2,%4,%2\n"
565 		"	or %1,%3,%1\n"
566 		"3:	stq_u %2,3(%5)\n"
567 		"4:	stq_u %1,0(%5)\n"
568 		"5:\n"
569 		".section __ex_table,\"a\"\n"
570 		"	.long 1b - .\n"
571 		"	lda %2,5b-1b(%0)\n"
572 		"	.long 2b - .\n"
573 		"	lda %1,5b-2b(%0)\n"
574 		"	.long 3b - .\n"
575 		"	lda $31,5b-3b(%0)\n"
576 		"	.long 4b - .\n"
577 		"	lda $31,5b-4b(%0)\n"
578 		".previous"
579 			: "=r"(error), "=&r"(tmp1), "=&r"(tmp2),
580 			  "=&r"(tmp3), "=&r"(tmp4)
581 			: "r"(va), "r"(una_reg(reg)), "0"(0));
582 		if (error)
583 			goto got_exception;
584 		return;
585 
586 	case 0x2d: /* stq */
587 		__asm__ __volatile__(
588 		"1:	ldq_u %2,7(%5)\n"
589 		"2:	ldq_u %1,0(%5)\n"
590 		"	insqh %6,%5,%4\n"
591 		"	insql %6,%5,%3\n"
592 		"	mskqh %2,%5,%2\n"
593 		"	mskql %1,%5,%1\n"
594 		"	or %2,%4,%2\n"
595 		"	or %1,%3,%1\n"
596 		"3:	stq_u %2,7(%5)\n"
597 		"4:	stq_u %1,0(%5)\n"
598 		"5:\n"
599 		".section __ex_table,\"a\"\n\t"
600 		"	.long 1b - .\n"
601 		"	lda %2,5b-1b(%0)\n"
602 		"	.long 2b - .\n"
603 		"	lda %1,5b-2b(%0)\n"
604 		"	.long 3b - .\n"
605 		"	lda $31,5b-3b(%0)\n"
606 		"	.long 4b - .\n"
607 		"	lda $31,5b-4b(%0)\n"
608 		".previous"
609 			: "=r"(error), "=&r"(tmp1), "=&r"(tmp2),
610 			  "=&r"(tmp3), "=&r"(tmp4)
611 			: "r"(va), "r"(una_reg(reg)), "0"(0));
612 		if (error)
613 			goto got_exception;
614 		return;
615 	}
616 
617 	printk("Bad unaligned kernel access at %016lx: %p %lx %lu\n",
618 		pc, va, opcode, reg);
619 	do_exit(SIGSEGV);
620 
621 got_exception:
622 	/* Ok, we caught the exception, but we don't want it.  Is there
623 	   someone to pass it along to?  */
624 	if ((fixup = search_exception_tables(pc)) != 0) {
625 		unsigned long newpc;
626 		newpc = fixup_exception(una_reg, fixup, pc);
627 
628 		printk("Forwarding unaligned exception at %lx (%lx)\n",
629 		       pc, newpc);
630 
631 		regs->pc = newpc;
632 		return;
633 	}
634 
635 	/*
636 	 * Yikes!  No one to forward the exception to.
637 	 * Since the registers are in a weird format, dump them ourselves.
638  	 */
639 
640 	printk("%s(%d): unhandled unaligned exception\n",
641 	       current->comm, task_pid_nr(current));
642 
643 	printk("pc = [<%016lx>]  ra = [<%016lx>]  ps = %04lx\n",
644 	       pc, una_reg(26), regs->ps);
645 	printk("r0 = %016lx  r1 = %016lx  r2 = %016lx\n",
646 	       una_reg(0), una_reg(1), una_reg(2));
647 	printk("r3 = %016lx  r4 = %016lx  r5 = %016lx\n",
648  	       una_reg(3), una_reg(4), una_reg(5));
649 	printk("r6 = %016lx  r7 = %016lx  r8 = %016lx\n",
650 	       una_reg(6), una_reg(7), una_reg(8));
651 	printk("r9 = %016lx  r10= %016lx  r11= %016lx\n",
652 	       una_reg(9), una_reg(10), una_reg(11));
653 	printk("r12= %016lx  r13= %016lx  r14= %016lx\n",
654 	       una_reg(12), una_reg(13), una_reg(14));
655 	printk("r15= %016lx\n", una_reg(15));
656 	printk("r16= %016lx  r17= %016lx  r18= %016lx\n",
657 	       una_reg(16), una_reg(17), una_reg(18));
658 	printk("r19= %016lx  r20= %016lx  r21= %016lx\n",
659  	       una_reg(19), una_reg(20), una_reg(21));
660  	printk("r22= %016lx  r23= %016lx  r24= %016lx\n",
661 	       una_reg(22), una_reg(23), una_reg(24));
662 	printk("r25= %016lx  r27= %016lx  r28= %016lx\n",
663 	       una_reg(25), una_reg(27), una_reg(28));
664 	printk("gp = %016lx  sp = %p\n", regs->gp, regs+1);
665 
666 	dik_show_code((unsigned int *)pc);
667 	dik_show_trace((unsigned long *)(regs+1));
668 
669 	if (test_and_set_thread_flag (TIF_DIE_IF_KERNEL)) {
670 		printk("die_if_kernel recursion detected.\n");
671 		local_irq_enable();
672 		while (1);
673 	}
674 	do_exit(SIGSEGV);
675 }
676 
677 /*
678  * Convert an s-floating point value in memory format to the
679  * corresponding value in register format.  The exponent
680  * needs to be remapped to preserve non-finite values
681  * (infinities, not-a-numbers, denormals).
682  */
683 static inline unsigned long
684 s_mem_to_reg (unsigned long s_mem)
685 {
686 	unsigned long frac    = (s_mem >>  0) & 0x7fffff;
687 	unsigned long sign    = (s_mem >> 31) & 0x1;
688 	unsigned long exp_msb = (s_mem >> 30) & 0x1;
689 	unsigned long exp_low = (s_mem >> 23) & 0x7f;
690 	unsigned long exp;
691 
692 	exp = (exp_msb << 10) | exp_low;	/* common case */
693 	if (exp_msb) {
694 		if (exp_low == 0x7f) {
695 			exp = 0x7ff;
696 		}
697 	} else {
698 		if (exp_low == 0x00) {
699 			exp = 0x000;
700 		} else {
701 			exp |= (0x7 << 7);
702 		}
703 	}
704 	return (sign << 63) | (exp << 52) | (frac << 29);
705 }
706 
707 /*
708  * Convert an s-floating point value in register format to the
709  * corresponding value in memory format.
710  */
711 static inline unsigned long
712 s_reg_to_mem (unsigned long s_reg)
713 {
714 	return ((s_reg >> 62) << 30) | ((s_reg << 5) >> 34);
715 }
716 
717 /*
718  * Handle user-level unaligned fault.  Handling user-level unaligned
719  * faults is *extremely* slow and produces nasty messages.  A user
720  * program *should* fix unaligned faults ASAP.
721  *
722  * Notice that we have (almost) the regular kernel stack layout here,
723  * so finding the appropriate registers is a little more difficult
724  * than in the kernel case.
725  *
726  * Finally, we handle regular integer load/stores only.  In
727  * particular, load-linked/store-conditionally and floating point
728  * load/stores are not supported.  The former make no sense with
729  * unaligned faults (they are guaranteed to fail) and I don't think
730  * the latter will occur in any decent program.
731  *
732  * Sigh. We *do* have to handle some FP operations, because GCC will
733  * uses them as temporary storage for integer memory to memory copies.
734  * However, we need to deal with stt/ldt and sts/lds only.
735  */
736 
737 #define OP_INT_MASK	( 1L << 0x28 | 1L << 0x2c   /* ldl stl */	\
738 			| 1L << 0x29 | 1L << 0x2d   /* ldq stq */	\
739 			| 1L << 0x0c | 1L << 0x0d   /* ldwu stw */	\
740 			| 1L << 0x0a | 1L << 0x0e ) /* ldbu stb */
741 
742 #define OP_WRITE_MASK	( 1L << 0x26 | 1L << 0x27   /* sts stt */	\
743 			| 1L << 0x2c | 1L << 0x2d   /* stl stq */	\
744 			| 1L << 0x0d | 1L << 0x0e ) /* stw stb */
745 
746 #define R(x)	((size_t) &((struct pt_regs *)0)->x)
747 
748 static int unauser_reg_offsets[32] = {
749 	R(r0), R(r1), R(r2), R(r3), R(r4), R(r5), R(r6), R(r7), R(r8),
750 	/* r9 ... r15 are stored in front of regs.  */
751 	-56, -48, -40, -32, -24, -16, -8,
752 	R(r16), R(r17), R(r18),
753 	R(r19), R(r20), R(r21), R(r22), R(r23), R(r24), R(r25), R(r26),
754 	R(r27), R(r28), R(gp),
755 	0, 0
756 };
757 
758 #undef R
759 
760 asmlinkage void
761 do_entUnaUser(void __user * va, unsigned long opcode,
762 	      unsigned long reg, struct pt_regs *regs)
763 {
764 	static DEFINE_RATELIMIT_STATE(ratelimit, 5 * HZ, 5);
765 
766 	unsigned long tmp1, tmp2, tmp3, tmp4;
767 	unsigned long fake_reg, *reg_addr = &fake_reg;
768 	siginfo_t info;
769 	long error;
770 
771 	/* Check the UAC bits to decide what the user wants us to do
772 	   with the unaliged access.  */
773 
774 	if (!(current_thread_info()->status & TS_UAC_NOPRINT)) {
775 		if (__ratelimit(&ratelimit)) {
776 			printk("%s(%d): unaligned trap at %016lx: %p %lx %ld\n",
777 			       current->comm, task_pid_nr(current),
778 			       regs->pc - 4, va, opcode, reg);
779 		}
780 	}
781 	if ((current_thread_info()->status & TS_UAC_SIGBUS))
782 		goto give_sigbus;
783 	/* Not sure why you'd want to use this, but... */
784 	if ((current_thread_info()->status & TS_UAC_NOFIX))
785 		return;
786 
787 	/* Don't bother reading ds in the access check since we already
788 	   know that this came from the user.  Also rely on the fact that
789 	   the page at TASK_SIZE is unmapped and so can't be touched anyway. */
790 	if (!__access_ok((unsigned long)va, 0, USER_DS))
791 		goto give_sigsegv;
792 
793 	++unaligned[1].count;
794 	unaligned[1].va = (unsigned long)va;
795 	unaligned[1].pc = regs->pc - 4;
796 
797 	if ((1L << opcode) & OP_INT_MASK) {
798 		/* it's an integer load/store */
799 		if (reg < 30) {
800 			reg_addr = (unsigned long *)
801 			  ((char *)regs + unauser_reg_offsets[reg]);
802 		} else if (reg == 30) {
803 			/* usp in PAL regs */
804 			fake_reg = rdusp();
805 		} else {
806 			/* zero "register" */
807 			fake_reg = 0;
808 		}
809 	}
810 
811 	/* We don't want to use the generic get/put unaligned macros as
812 	   we want to trap exceptions.  Only if we actually get an
813 	   exception will we decide whether we should have caught it.  */
814 
815 	switch (opcode) {
816 	case 0x0c: /* ldwu */
817 		__asm__ __volatile__(
818 		"1:	ldq_u %1,0(%3)\n"
819 		"2:	ldq_u %2,1(%3)\n"
820 		"	extwl %1,%3,%1\n"
821 		"	extwh %2,%3,%2\n"
822 		"3:\n"
823 		".section __ex_table,\"a\"\n"
824 		"	.long 1b - .\n"
825 		"	lda %1,3b-1b(%0)\n"
826 		"	.long 2b - .\n"
827 		"	lda %2,3b-2b(%0)\n"
828 		".previous"
829 			: "=r"(error), "=&r"(tmp1), "=&r"(tmp2)
830 			: "r"(va), "0"(0));
831 		if (error)
832 			goto give_sigsegv;
833 		*reg_addr = tmp1|tmp2;
834 		break;
835 
836 	case 0x22: /* lds */
837 		__asm__ __volatile__(
838 		"1:	ldq_u %1,0(%3)\n"
839 		"2:	ldq_u %2,3(%3)\n"
840 		"	extll %1,%3,%1\n"
841 		"	extlh %2,%3,%2\n"
842 		"3:\n"
843 		".section __ex_table,\"a\"\n"
844 		"	.long 1b - .\n"
845 		"	lda %1,3b-1b(%0)\n"
846 		"	.long 2b - .\n"
847 		"	lda %2,3b-2b(%0)\n"
848 		".previous"
849 			: "=r"(error), "=&r"(tmp1), "=&r"(tmp2)
850 			: "r"(va), "0"(0));
851 		if (error)
852 			goto give_sigsegv;
853 		alpha_write_fp_reg(reg, s_mem_to_reg((int)(tmp1|tmp2)));
854 		return;
855 
856 	case 0x23: /* ldt */
857 		__asm__ __volatile__(
858 		"1:	ldq_u %1,0(%3)\n"
859 		"2:	ldq_u %2,7(%3)\n"
860 		"	extql %1,%3,%1\n"
861 		"	extqh %2,%3,%2\n"
862 		"3:\n"
863 		".section __ex_table,\"a\"\n"
864 		"	.long 1b - .\n"
865 		"	lda %1,3b-1b(%0)\n"
866 		"	.long 2b - .\n"
867 		"	lda %2,3b-2b(%0)\n"
868 		".previous"
869 			: "=r"(error), "=&r"(tmp1), "=&r"(tmp2)
870 			: "r"(va), "0"(0));
871 		if (error)
872 			goto give_sigsegv;
873 		alpha_write_fp_reg(reg, tmp1|tmp2);
874 		return;
875 
876 	case 0x28: /* ldl */
877 		__asm__ __volatile__(
878 		"1:	ldq_u %1,0(%3)\n"
879 		"2:	ldq_u %2,3(%3)\n"
880 		"	extll %1,%3,%1\n"
881 		"	extlh %2,%3,%2\n"
882 		"3:\n"
883 		".section __ex_table,\"a\"\n"
884 		"	.long 1b - .\n"
885 		"	lda %1,3b-1b(%0)\n"
886 		"	.long 2b - .\n"
887 		"	lda %2,3b-2b(%0)\n"
888 		".previous"
889 			: "=r"(error), "=&r"(tmp1), "=&r"(tmp2)
890 			: "r"(va), "0"(0));
891 		if (error)
892 			goto give_sigsegv;
893 		*reg_addr = (int)(tmp1|tmp2);
894 		break;
895 
896 	case 0x29: /* ldq */
897 		__asm__ __volatile__(
898 		"1:	ldq_u %1,0(%3)\n"
899 		"2:	ldq_u %2,7(%3)\n"
900 		"	extql %1,%3,%1\n"
901 		"	extqh %2,%3,%2\n"
902 		"3:\n"
903 		".section __ex_table,\"a\"\n"
904 		"	.long 1b - .\n"
905 		"	lda %1,3b-1b(%0)\n"
906 		"	.long 2b - .\n"
907 		"	lda %2,3b-2b(%0)\n"
908 		".previous"
909 			: "=r"(error), "=&r"(tmp1), "=&r"(tmp2)
910 			: "r"(va), "0"(0));
911 		if (error)
912 			goto give_sigsegv;
913 		*reg_addr = tmp1|tmp2;
914 		break;
915 
916 	/* Note that the store sequences do not indicate that they change
917 	   memory because it _should_ be affecting nothing in this context.
918 	   (Otherwise we have other, much larger, problems.)  */
919 	case 0x0d: /* stw */
920 		__asm__ __volatile__(
921 		"1:	ldq_u %2,1(%5)\n"
922 		"2:	ldq_u %1,0(%5)\n"
923 		"	inswh %6,%5,%4\n"
924 		"	inswl %6,%5,%3\n"
925 		"	mskwh %2,%5,%2\n"
926 		"	mskwl %1,%5,%1\n"
927 		"	or %2,%4,%2\n"
928 		"	or %1,%3,%1\n"
929 		"3:	stq_u %2,1(%5)\n"
930 		"4:	stq_u %1,0(%5)\n"
931 		"5:\n"
932 		".section __ex_table,\"a\"\n"
933 		"	.long 1b - .\n"
934 		"	lda %2,5b-1b(%0)\n"
935 		"	.long 2b - .\n"
936 		"	lda %1,5b-2b(%0)\n"
937 		"	.long 3b - .\n"
938 		"	lda $31,5b-3b(%0)\n"
939 		"	.long 4b - .\n"
940 		"	lda $31,5b-4b(%0)\n"
941 		".previous"
942 			: "=r"(error), "=&r"(tmp1), "=&r"(tmp2),
943 			  "=&r"(tmp3), "=&r"(tmp4)
944 			: "r"(va), "r"(*reg_addr), "0"(0));
945 		if (error)
946 			goto give_sigsegv;
947 		return;
948 
949 	case 0x26: /* sts */
950 		fake_reg = s_reg_to_mem(alpha_read_fp_reg(reg));
951 		/* FALLTHRU */
952 
953 	case 0x2c: /* stl */
954 		__asm__ __volatile__(
955 		"1:	ldq_u %2,3(%5)\n"
956 		"2:	ldq_u %1,0(%5)\n"
957 		"	inslh %6,%5,%4\n"
958 		"	insll %6,%5,%3\n"
959 		"	msklh %2,%5,%2\n"
960 		"	mskll %1,%5,%1\n"
961 		"	or %2,%4,%2\n"
962 		"	or %1,%3,%1\n"
963 		"3:	stq_u %2,3(%5)\n"
964 		"4:	stq_u %1,0(%5)\n"
965 		"5:\n"
966 		".section __ex_table,\"a\"\n"
967 		"	.long 1b - .\n"
968 		"	lda %2,5b-1b(%0)\n"
969 		"	.long 2b - .\n"
970 		"	lda %1,5b-2b(%0)\n"
971 		"	.long 3b - .\n"
972 		"	lda $31,5b-3b(%0)\n"
973 		"	.long 4b - .\n"
974 		"	lda $31,5b-4b(%0)\n"
975 		".previous"
976 			: "=r"(error), "=&r"(tmp1), "=&r"(tmp2),
977 			  "=&r"(tmp3), "=&r"(tmp4)
978 			: "r"(va), "r"(*reg_addr), "0"(0));
979 		if (error)
980 			goto give_sigsegv;
981 		return;
982 
983 	case 0x27: /* stt */
984 		fake_reg = alpha_read_fp_reg(reg);
985 		/* FALLTHRU */
986 
987 	case 0x2d: /* stq */
988 		__asm__ __volatile__(
989 		"1:	ldq_u %2,7(%5)\n"
990 		"2:	ldq_u %1,0(%5)\n"
991 		"	insqh %6,%5,%4\n"
992 		"	insql %6,%5,%3\n"
993 		"	mskqh %2,%5,%2\n"
994 		"	mskql %1,%5,%1\n"
995 		"	or %2,%4,%2\n"
996 		"	or %1,%3,%1\n"
997 		"3:	stq_u %2,7(%5)\n"
998 		"4:	stq_u %1,0(%5)\n"
999 		"5:\n"
1000 		".section __ex_table,\"a\"\n\t"
1001 		"	.long 1b - .\n"
1002 		"	lda %2,5b-1b(%0)\n"
1003 		"	.long 2b - .\n"
1004 		"	lda %1,5b-2b(%0)\n"
1005 		"	.long 3b - .\n"
1006 		"	lda $31,5b-3b(%0)\n"
1007 		"	.long 4b - .\n"
1008 		"	lda $31,5b-4b(%0)\n"
1009 		".previous"
1010 			: "=r"(error), "=&r"(tmp1), "=&r"(tmp2),
1011 			  "=&r"(tmp3), "=&r"(tmp4)
1012 			: "r"(va), "r"(*reg_addr), "0"(0));
1013 		if (error)
1014 			goto give_sigsegv;
1015 		return;
1016 
1017 	default:
1018 		/* What instruction were you trying to use, exactly?  */
1019 		goto give_sigbus;
1020 	}
1021 
1022 	/* Only integer loads should get here; everyone else returns early. */
1023 	if (reg == 30)
1024 		wrusp(fake_reg);
1025 	return;
1026 
1027 give_sigsegv:
1028 	regs->pc -= 4;  /* make pc point to faulting insn */
1029 	info.si_signo = SIGSEGV;
1030 	info.si_errno = 0;
1031 
1032 	/* We need to replicate some of the logic in mm/fault.c,
1033 	   since we don't have access to the fault code in the
1034 	   exception handling return path.  */
1035 	if (!__access_ok((unsigned long)va, 0, USER_DS))
1036 		info.si_code = SEGV_ACCERR;
1037 	else {
1038 		struct mm_struct *mm = current->mm;
1039 		down_read(&mm->mmap_sem);
1040 		if (find_vma(mm, (unsigned long)va))
1041 			info.si_code = SEGV_ACCERR;
1042 		else
1043 			info.si_code = SEGV_MAPERR;
1044 		up_read(&mm->mmap_sem);
1045 	}
1046 	info.si_addr = va;
1047 	send_sig_info(SIGSEGV, &info, current);
1048 	return;
1049 
1050 give_sigbus:
1051 	regs->pc -= 4;
1052 	info.si_signo = SIGBUS;
1053 	info.si_errno = 0;
1054 	info.si_code = BUS_ADRALN;
1055 	info.si_addr = va;
1056 	send_sig_info(SIGBUS, &info, current);
1057 	return;
1058 }
1059 
1060 void
1061 trap_init(void)
1062 {
1063 	/* Tell PAL-code what global pointer we want in the kernel.  */
1064 	register unsigned long gptr __asm__("$29");
1065 	wrkgp(gptr);
1066 
1067 	/* Hack for Multia (UDB) and JENSEN: some of their SRMs have
1068 	   a bug in the handling of the opDEC fault.  Fix it up if so.  */
1069 	if (implver() == IMPLVER_EV4)
1070 		opDEC_check();
1071 
1072 	wrent(entArith, 1);
1073 	wrent(entMM, 2);
1074 	wrent(entIF, 3);
1075 	wrent(entUna, 4);
1076 	wrent(entSys, 5);
1077 	wrent(entDbg, 6);
1078 }
1079