xref: /openbmc/linux/arch/alpha/kernel/traps.c (revision a1e58bbd)
1 /*
2  * arch/alpha/kernel/traps.c
3  *
4  * (C) Copyright 1994 Linus Torvalds
5  */
6 
7 /*
8  * This file initializes the trap entry points
9  */
10 
11 #include <linux/mm.h>
12 #include <linux/sched.h>
13 #include <linux/tty.h>
14 #include <linux/delay.h>
15 #include <linux/smp_lock.h>
16 #include <linux/module.h>
17 #include <linux/init.h>
18 #include <linux/kallsyms.h>
19 
20 #include <asm/gentrap.h>
21 #include <asm/uaccess.h>
22 #include <asm/unaligned.h>
23 #include <asm/sysinfo.h>
24 #include <asm/hwrpb.h>
25 #include <asm/mmu_context.h>
26 
27 #include "proto.h"
28 
29 /* Work-around for some SRMs which mishandle opDEC faults.  */
30 
31 static int opDEC_fix;
32 
33 static void __init
34 opDEC_check(void)
35 {
36 	__asm__ __volatile__ (
37 	/* Load the address of... */
38 	"	br	$16, 1f\n"
39 	/* A stub instruction fault handler.  Just add 4 to the
40 	   pc and continue.  */
41 	"	ldq	$16, 8($sp)\n"
42 	"	addq	$16, 4, $16\n"
43 	"	stq	$16, 8($sp)\n"
44 	"	call_pal %[rti]\n"
45 	/* Install the instruction fault handler.  */
46 	"1:	lda	$17, 3\n"
47 	"	call_pal %[wrent]\n"
48 	/* With that in place, the fault from the round-to-minf fp
49 	   insn will arrive either at the "lda 4" insn (bad) or one
50 	   past that (good).  This places the correct fixup in %0.  */
51 	"	lda %[fix], 0\n"
52 	"	cvttq/svm $f31,$f31\n"
53 	"	lda %[fix], 4"
54 	: [fix] "=r" (opDEC_fix)
55 	: [rti] "n" (PAL_rti), [wrent] "n" (PAL_wrent)
56 	: "$0", "$1", "$16", "$17", "$22", "$23", "$24", "$25");
57 
58 	if (opDEC_fix)
59 		printk("opDEC fixup enabled.\n");
60 }
61 
62 void
63 dik_show_regs(struct pt_regs *regs, unsigned long *r9_15)
64 {
65 	printk("pc = [<%016lx>]  ra = [<%016lx>]  ps = %04lx    %s\n",
66 	       regs->pc, regs->r26, regs->ps, print_tainted());
67 	print_symbol("pc is at %s\n", regs->pc);
68 	print_symbol("ra is at %s\n", regs->r26 );
69 	printk("v0 = %016lx  t0 = %016lx  t1 = %016lx\n",
70 	       regs->r0, regs->r1, regs->r2);
71 	printk("t2 = %016lx  t3 = %016lx  t4 = %016lx\n",
72  	       regs->r3, regs->r4, regs->r5);
73 	printk("t5 = %016lx  t6 = %016lx  t7 = %016lx\n",
74 	       regs->r6, regs->r7, regs->r8);
75 
76 	if (r9_15) {
77 		printk("s0 = %016lx  s1 = %016lx  s2 = %016lx\n",
78 		       r9_15[9], r9_15[10], r9_15[11]);
79 		printk("s3 = %016lx  s4 = %016lx  s5 = %016lx\n",
80 		       r9_15[12], r9_15[13], r9_15[14]);
81 		printk("s6 = %016lx\n", r9_15[15]);
82 	}
83 
84 	printk("a0 = %016lx  a1 = %016lx  a2 = %016lx\n",
85 	       regs->r16, regs->r17, regs->r18);
86 	printk("a3 = %016lx  a4 = %016lx  a5 = %016lx\n",
87  	       regs->r19, regs->r20, regs->r21);
88  	printk("t8 = %016lx  t9 = %016lx  t10= %016lx\n",
89 	       regs->r22, regs->r23, regs->r24);
90 	printk("t11= %016lx  pv = %016lx  at = %016lx\n",
91 	       regs->r25, regs->r27, regs->r28);
92 	printk("gp = %016lx  sp = %p\n", regs->gp, regs+1);
93 #if 0
94 __halt();
95 #endif
96 }
97 
98 #if 0
99 static char * ireg_name[] = {"v0", "t0", "t1", "t2", "t3", "t4", "t5", "t6",
100 			   "t7", "s0", "s1", "s2", "s3", "s4", "s5", "s6",
101 			   "a0", "a1", "a2", "a3", "a4", "a5", "t8", "t9",
102 			   "t10", "t11", "ra", "pv", "at", "gp", "sp", "zero"};
103 #endif
104 
105 static void
106 dik_show_code(unsigned int *pc)
107 {
108 	long i;
109 
110 	printk("Code:");
111 	for (i = -6; i < 2; i++) {
112 		unsigned int insn;
113 		if (__get_user(insn, (unsigned int __user *)pc + i))
114 			break;
115 		printk("%c%08x%c", i ? ' ' : '<', insn, i ? ' ' : '>');
116 	}
117 	printk("\n");
118 }
119 
120 static void
121 dik_show_trace(unsigned long *sp)
122 {
123 	long i = 0;
124 	printk("Trace:\n");
125 	while (0x1ff8 & (unsigned long) sp) {
126 		extern char _stext[], _etext[];
127 		unsigned long tmp = *sp;
128 		sp++;
129 		if (tmp < (unsigned long) &_stext)
130 			continue;
131 		if (tmp >= (unsigned long) &_etext)
132 			continue;
133 		printk("[<%lx>]", tmp);
134 		print_symbol(" %s", tmp);
135 		printk("\n");
136 		if (i > 40) {
137 			printk(" ...");
138 			break;
139 		}
140 	}
141 	printk("\n");
142 }
143 
144 static int kstack_depth_to_print = 24;
145 
146 void show_stack(struct task_struct *task, unsigned long *sp)
147 {
148 	unsigned long *stack;
149 	int i;
150 
151 	/*
152 	 * debugging aid: "show_stack(NULL);" prints the
153 	 * back trace for this cpu.
154 	 */
155 	if(sp==NULL)
156 		sp=(unsigned long*)&sp;
157 
158 	stack = sp;
159 	for(i=0; i < kstack_depth_to_print; i++) {
160 		if (((long) stack & (THREAD_SIZE-1)) == 0)
161 			break;
162 		if (i && ((i % 4) == 0))
163 			printk("\n       ");
164 		printk("%016lx ", *stack++);
165 	}
166 	printk("\n");
167 	dik_show_trace(sp);
168 }
169 
170 void dump_stack(void)
171 {
172 	show_stack(NULL, NULL);
173 }
174 
175 EXPORT_SYMBOL(dump_stack);
176 
177 void
178 die_if_kernel(char * str, struct pt_regs *regs, long err, unsigned long *r9_15)
179 {
180 	if (regs->ps & 8)
181 		return;
182 #ifdef CONFIG_SMP
183 	printk("CPU %d ", hard_smp_processor_id());
184 #endif
185 	printk("%s(%d): %s %ld\n", current->comm, task_pid_nr(current), str, err);
186 	dik_show_regs(regs, r9_15);
187 	add_taint(TAINT_DIE);
188 	dik_show_trace((unsigned long *)(regs+1));
189 	dik_show_code((unsigned int *)regs->pc);
190 
191 	if (test_and_set_thread_flag (TIF_DIE_IF_KERNEL)) {
192 		printk("die_if_kernel recursion detected.\n");
193 		local_irq_enable();
194 		while (1);
195 	}
196 	do_exit(SIGSEGV);
197 }
198 
199 #ifndef CONFIG_MATHEMU
200 static long dummy_emul(void) { return 0; }
201 long (*alpha_fp_emul_imprecise)(struct pt_regs *regs, unsigned long writemask)
202   = (void *)dummy_emul;
203 long (*alpha_fp_emul) (unsigned long pc)
204   = (void *)dummy_emul;
205 #else
206 long alpha_fp_emul_imprecise(struct pt_regs *regs, unsigned long writemask);
207 long alpha_fp_emul (unsigned long pc);
208 #endif
209 
210 asmlinkage void
211 do_entArith(unsigned long summary, unsigned long write_mask,
212 	    struct pt_regs *regs)
213 {
214 	long si_code = FPE_FLTINV;
215 	siginfo_t info;
216 
217 	if (summary & 1) {
218 		/* Software-completion summary bit is set, so try to
219 		   emulate the instruction.  If the processor supports
220 		   precise exceptions, we don't have to search.  */
221 		if (!amask(AMASK_PRECISE_TRAP))
222 			si_code = alpha_fp_emul(regs->pc - 4);
223 		else
224 			si_code = alpha_fp_emul_imprecise(regs, write_mask);
225 		if (si_code == 0)
226 			return;
227 	}
228 	die_if_kernel("Arithmetic fault", regs, 0, NULL);
229 
230 	info.si_signo = SIGFPE;
231 	info.si_errno = 0;
232 	info.si_code = si_code;
233 	info.si_addr = (void __user *) regs->pc;
234 	send_sig_info(SIGFPE, &info, current);
235 }
236 
237 asmlinkage void
238 do_entIF(unsigned long type, struct pt_regs *regs)
239 {
240 	siginfo_t info;
241 	int signo, code;
242 
243 	if ((regs->ps & ~IPL_MAX) == 0) {
244 		if (type == 1) {
245 			const unsigned int *data
246 			  = (const unsigned int *) regs->pc;
247 			printk("Kernel bug at %s:%d\n",
248 			       (const char *)(data[1] | (long)data[2] << 32),
249 			       data[0]);
250 		}
251 		die_if_kernel((type == 1 ? "Kernel Bug" : "Instruction fault"),
252 			      regs, type, NULL);
253 	}
254 
255 	switch (type) {
256 	      case 0: /* breakpoint */
257 		info.si_signo = SIGTRAP;
258 		info.si_errno = 0;
259 		info.si_code = TRAP_BRKPT;
260 		info.si_trapno = 0;
261 		info.si_addr = (void __user *) regs->pc;
262 
263 		if (ptrace_cancel_bpt(current)) {
264 			regs->pc -= 4;	/* make pc point to former bpt */
265 		}
266 
267 		send_sig_info(SIGTRAP, &info, current);
268 		return;
269 
270 	      case 1: /* bugcheck */
271 		info.si_signo = SIGTRAP;
272 		info.si_errno = 0;
273 		info.si_code = __SI_FAULT;
274 		info.si_addr = (void __user *) regs->pc;
275 		info.si_trapno = 0;
276 		send_sig_info(SIGTRAP, &info, current);
277 		return;
278 
279 	      case 2: /* gentrap */
280 		info.si_addr = (void __user *) regs->pc;
281 		info.si_trapno = regs->r16;
282 		switch ((long) regs->r16) {
283 		case GEN_INTOVF:
284 			signo = SIGFPE;
285 			code = FPE_INTOVF;
286 			break;
287 		case GEN_INTDIV:
288 			signo = SIGFPE;
289 			code = FPE_INTDIV;
290 			break;
291 		case GEN_FLTOVF:
292 			signo = SIGFPE;
293 			code = FPE_FLTOVF;
294 			break;
295 		case GEN_FLTDIV:
296 			signo = SIGFPE;
297 			code = FPE_FLTDIV;
298 			break;
299 		case GEN_FLTUND:
300 			signo = SIGFPE;
301 			code = FPE_FLTUND;
302 			break;
303 		case GEN_FLTINV:
304 			signo = SIGFPE;
305 			code = FPE_FLTINV;
306 			break;
307 		case GEN_FLTINE:
308 			signo = SIGFPE;
309 			code = FPE_FLTRES;
310 			break;
311 		case GEN_ROPRAND:
312 			signo = SIGFPE;
313 			code = __SI_FAULT;
314 			break;
315 
316 		case GEN_DECOVF:
317 		case GEN_DECDIV:
318 		case GEN_DECINV:
319 		case GEN_ASSERTERR:
320 		case GEN_NULPTRERR:
321 		case GEN_STKOVF:
322 		case GEN_STRLENERR:
323 		case GEN_SUBSTRERR:
324 		case GEN_RANGERR:
325 		case GEN_SUBRNG:
326 		case GEN_SUBRNG1:
327 		case GEN_SUBRNG2:
328 		case GEN_SUBRNG3:
329 		case GEN_SUBRNG4:
330 		case GEN_SUBRNG5:
331 		case GEN_SUBRNG6:
332 		case GEN_SUBRNG7:
333 		default:
334 			signo = SIGTRAP;
335 			code = __SI_FAULT;
336 			break;
337 		}
338 
339 		info.si_signo = signo;
340 		info.si_errno = 0;
341 		info.si_code = code;
342 		info.si_addr = (void __user *) regs->pc;
343 		send_sig_info(signo, &info, current);
344 		return;
345 
346 	      case 4: /* opDEC */
347 		if (implver() == IMPLVER_EV4) {
348 			long si_code;
349 
350 			/* The some versions of SRM do not handle
351 			   the opDEC properly - they return the PC of the
352 			   opDEC fault, not the instruction after as the
353 			   Alpha architecture requires.  Here we fix it up.
354 			   We do this by intentionally causing an opDEC
355 			   fault during the boot sequence and testing if
356 			   we get the correct PC.  If not, we set a flag
357 			   to correct it every time through.  */
358 			regs->pc += opDEC_fix;
359 
360 			/* EV4 does not implement anything except normal
361 			   rounding.  Everything else will come here as
362 			   an illegal instruction.  Emulate them.  */
363 			si_code = alpha_fp_emul(regs->pc - 4);
364 			if (si_code == 0)
365 				return;
366 			if (si_code > 0) {
367 				info.si_signo = SIGFPE;
368 				info.si_errno = 0;
369 				info.si_code = si_code;
370 				info.si_addr = (void __user *) regs->pc;
371 				send_sig_info(SIGFPE, &info, current);
372 				return;
373 			}
374 		}
375 		break;
376 
377 	      case 3: /* FEN fault */
378 		/* Irritating users can call PAL_clrfen to disable the
379 		   FPU for the process.  The kernel will then trap in
380 		   do_switch_stack and undo_switch_stack when we try
381 		   to save and restore the FP registers.
382 
383 		   Given that GCC by default generates code that uses the
384 		   FP registers, PAL_clrfen is not useful except for DoS
385 		   attacks.  So turn the bleeding FPU back on and be done
386 		   with it.  */
387 		current_thread_info()->pcb.flags |= 1;
388 		__reload_thread(&current_thread_info()->pcb);
389 		return;
390 
391 	      case 5: /* illoc */
392 	      default: /* unexpected instruction-fault type */
393 		      ;
394 	}
395 
396 	info.si_signo = SIGILL;
397 	info.si_errno = 0;
398 	info.si_code = ILL_ILLOPC;
399 	info.si_addr = (void __user *) regs->pc;
400 	send_sig_info(SIGILL, &info, current);
401 }
402 
403 /* There is an ifdef in the PALcode in MILO that enables a
404    "kernel debugging entry point" as an unprivileged call_pal.
405 
406    We don't want to have anything to do with it, but unfortunately
407    several versions of MILO included in distributions have it enabled,
408    and if we don't put something on the entry point we'll oops.  */
409 
410 asmlinkage void
411 do_entDbg(struct pt_regs *regs)
412 {
413 	siginfo_t info;
414 
415 	die_if_kernel("Instruction fault", regs, 0, NULL);
416 
417 	info.si_signo = SIGILL;
418 	info.si_errno = 0;
419 	info.si_code = ILL_ILLOPC;
420 	info.si_addr = (void __user *) regs->pc;
421 	force_sig_info(SIGILL, &info, current);
422 }
423 
424 
425 /*
426  * entUna has a different register layout to be reasonably simple. It
427  * needs access to all the integer registers (the kernel doesn't use
428  * fp-regs), and it needs to have them in order for simpler access.
429  *
430  * Due to the non-standard register layout (and because we don't want
431  * to handle floating-point regs), user-mode unaligned accesses are
432  * handled separately by do_entUnaUser below.
433  *
434  * Oh, btw, we don't handle the "gp" register correctly, but if we fault
435  * on a gp-register unaligned load/store, something is _very_ wrong
436  * in the kernel anyway..
437  */
438 struct allregs {
439 	unsigned long regs[32];
440 	unsigned long ps, pc, gp, a0, a1, a2;
441 };
442 
443 struct unaligned_stat {
444 	unsigned long count, va, pc;
445 } unaligned[2];
446 
447 
448 /* Macro for exception fixup code to access integer registers.  */
449 #define una_reg(r)  (regs->regs[(r) >= 16 && (r) <= 18 ? (r)+19 : (r)])
450 
451 
452 asmlinkage void
453 do_entUna(void * va, unsigned long opcode, unsigned long reg,
454 	  struct allregs *regs)
455 {
456 	long error, tmp1, tmp2, tmp3, tmp4;
457 	unsigned long pc = regs->pc - 4;
458 	const struct exception_table_entry *fixup;
459 
460 	unaligned[0].count++;
461 	unaligned[0].va = (unsigned long) va;
462 	unaligned[0].pc = pc;
463 
464 	/* We don't want to use the generic get/put unaligned macros as
465 	   we want to trap exceptions.  Only if we actually get an
466 	   exception will we decide whether we should have caught it.  */
467 
468 	switch (opcode) {
469 	case 0x0c: /* ldwu */
470 		__asm__ __volatile__(
471 		"1:	ldq_u %1,0(%3)\n"
472 		"2:	ldq_u %2,1(%3)\n"
473 		"	extwl %1,%3,%1\n"
474 		"	extwh %2,%3,%2\n"
475 		"3:\n"
476 		".section __ex_table,\"a\"\n"
477 		"	.long 1b - .\n"
478 		"	lda %1,3b-1b(%0)\n"
479 		"	.long 2b - .\n"
480 		"	lda %2,3b-2b(%0)\n"
481 		".previous"
482 			: "=r"(error), "=&r"(tmp1), "=&r"(tmp2)
483 			: "r"(va), "0"(0));
484 		if (error)
485 			goto got_exception;
486 		una_reg(reg) = tmp1|tmp2;
487 		return;
488 
489 	case 0x28: /* ldl */
490 		__asm__ __volatile__(
491 		"1:	ldq_u %1,0(%3)\n"
492 		"2:	ldq_u %2,3(%3)\n"
493 		"	extll %1,%3,%1\n"
494 		"	extlh %2,%3,%2\n"
495 		"3:\n"
496 		".section __ex_table,\"a\"\n"
497 		"	.long 1b - .\n"
498 		"	lda %1,3b-1b(%0)\n"
499 		"	.long 2b - .\n"
500 		"	lda %2,3b-2b(%0)\n"
501 		".previous"
502 			: "=r"(error), "=&r"(tmp1), "=&r"(tmp2)
503 			: "r"(va), "0"(0));
504 		if (error)
505 			goto got_exception;
506 		una_reg(reg) = (int)(tmp1|tmp2);
507 		return;
508 
509 	case 0x29: /* ldq */
510 		__asm__ __volatile__(
511 		"1:	ldq_u %1,0(%3)\n"
512 		"2:	ldq_u %2,7(%3)\n"
513 		"	extql %1,%3,%1\n"
514 		"	extqh %2,%3,%2\n"
515 		"3:\n"
516 		".section __ex_table,\"a\"\n"
517 		"	.long 1b - .\n"
518 		"	lda %1,3b-1b(%0)\n"
519 		"	.long 2b - .\n"
520 		"	lda %2,3b-2b(%0)\n"
521 		".previous"
522 			: "=r"(error), "=&r"(tmp1), "=&r"(tmp2)
523 			: "r"(va), "0"(0));
524 		if (error)
525 			goto got_exception;
526 		una_reg(reg) = tmp1|tmp2;
527 		return;
528 
529 	/* Note that the store sequences do not indicate that they change
530 	   memory because it _should_ be affecting nothing in this context.
531 	   (Otherwise we have other, much larger, problems.)  */
532 	case 0x0d: /* stw */
533 		__asm__ __volatile__(
534 		"1:	ldq_u %2,1(%5)\n"
535 		"2:	ldq_u %1,0(%5)\n"
536 		"	inswh %6,%5,%4\n"
537 		"	inswl %6,%5,%3\n"
538 		"	mskwh %2,%5,%2\n"
539 		"	mskwl %1,%5,%1\n"
540 		"	or %2,%4,%2\n"
541 		"	or %1,%3,%1\n"
542 		"3:	stq_u %2,1(%5)\n"
543 		"4:	stq_u %1,0(%5)\n"
544 		"5:\n"
545 		".section __ex_table,\"a\"\n"
546 		"	.long 1b - .\n"
547 		"	lda %2,5b-1b(%0)\n"
548 		"	.long 2b - .\n"
549 		"	lda %1,5b-2b(%0)\n"
550 		"	.long 3b - .\n"
551 		"	lda $31,5b-3b(%0)\n"
552 		"	.long 4b - .\n"
553 		"	lda $31,5b-4b(%0)\n"
554 		".previous"
555 			: "=r"(error), "=&r"(tmp1), "=&r"(tmp2),
556 			  "=&r"(tmp3), "=&r"(tmp4)
557 			: "r"(va), "r"(una_reg(reg)), "0"(0));
558 		if (error)
559 			goto got_exception;
560 		return;
561 
562 	case 0x2c: /* stl */
563 		__asm__ __volatile__(
564 		"1:	ldq_u %2,3(%5)\n"
565 		"2:	ldq_u %1,0(%5)\n"
566 		"	inslh %6,%5,%4\n"
567 		"	insll %6,%5,%3\n"
568 		"	msklh %2,%5,%2\n"
569 		"	mskll %1,%5,%1\n"
570 		"	or %2,%4,%2\n"
571 		"	or %1,%3,%1\n"
572 		"3:	stq_u %2,3(%5)\n"
573 		"4:	stq_u %1,0(%5)\n"
574 		"5:\n"
575 		".section __ex_table,\"a\"\n"
576 		"	.long 1b - .\n"
577 		"	lda %2,5b-1b(%0)\n"
578 		"	.long 2b - .\n"
579 		"	lda %1,5b-2b(%0)\n"
580 		"	.long 3b - .\n"
581 		"	lda $31,5b-3b(%0)\n"
582 		"	.long 4b - .\n"
583 		"	lda $31,5b-4b(%0)\n"
584 		".previous"
585 			: "=r"(error), "=&r"(tmp1), "=&r"(tmp2),
586 			  "=&r"(tmp3), "=&r"(tmp4)
587 			: "r"(va), "r"(una_reg(reg)), "0"(0));
588 		if (error)
589 			goto got_exception;
590 		return;
591 
592 	case 0x2d: /* stq */
593 		__asm__ __volatile__(
594 		"1:	ldq_u %2,7(%5)\n"
595 		"2:	ldq_u %1,0(%5)\n"
596 		"	insqh %6,%5,%4\n"
597 		"	insql %6,%5,%3\n"
598 		"	mskqh %2,%5,%2\n"
599 		"	mskql %1,%5,%1\n"
600 		"	or %2,%4,%2\n"
601 		"	or %1,%3,%1\n"
602 		"3:	stq_u %2,7(%5)\n"
603 		"4:	stq_u %1,0(%5)\n"
604 		"5:\n"
605 		".section __ex_table,\"a\"\n\t"
606 		"	.long 1b - .\n"
607 		"	lda %2,5b-1b(%0)\n"
608 		"	.long 2b - .\n"
609 		"	lda %1,5b-2b(%0)\n"
610 		"	.long 3b - .\n"
611 		"	lda $31,5b-3b(%0)\n"
612 		"	.long 4b - .\n"
613 		"	lda $31,5b-4b(%0)\n"
614 		".previous"
615 			: "=r"(error), "=&r"(tmp1), "=&r"(tmp2),
616 			  "=&r"(tmp3), "=&r"(tmp4)
617 			: "r"(va), "r"(una_reg(reg)), "0"(0));
618 		if (error)
619 			goto got_exception;
620 		return;
621 	}
622 
623 	lock_kernel();
624 	printk("Bad unaligned kernel access at %016lx: %p %lx %ld\n",
625 		pc, va, opcode, reg);
626 	do_exit(SIGSEGV);
627 
628 got_exception:
629 	/* Ok, we caught the exception, but we don't want it.  Is there
630 	   someone to pass it along to?  */
631 	if ((fixup = search_exception_tables(pc)) != 0) {
632 		unsigned long newpc;
633 		newpc = fixup_exception(una_reg, fixup, pc);
634 
635 		printk("Forwarding unaligned exception at %lx (%lx)\n",
636 		       pc, newpc);
637 
638 		regs->pc = newpc;
639 		return;
640 	}
641 
642 	/*
643 	 * Yikes!  No one to forward the exception to.
644 	 * Since the registers are in a weird format, dump them ourselves.
645  	 */
646 	lock_kernel();
647 
648 	printk("%s(%d): unhandled unaligned exception\n",
649 	       current->comm, task_pid_nr(current));
650 
651 	printk("pc = [<%016lx>]  ra = [<%016lx>]  ps = %04lx\n",
652 	       pc, una_reg(26), regs->ps);
653 	printk("r0 = %016lx  r1 = %016lx  r2 = %016lx\n",
654 	       una_reg(0), una_reg(1), una_reg(2));
655 	printk("r3 = %016lx  r4 = %016lx  r5 = %016lx\n",
656  	       una_reg(3), una_reg(4), una_reg(5));
657 	printk("r6 = %016lx  r7 = %016lx  r8 = %016lx\n",
658 	       una_reg(6), una_reg(7), una_reg(8));
659 	printk("r9 = %016lx  r10= %016lx  r11= %016lx\n",
660 	       una_reg(9), una_reg(10), una_reg(11));
661 	printk("r12= %016lx  r13= %016lx  r14= %016lx\n",
662 	       una_reg(12), una_reg(13), una_reg(14));
663 	printk("r15= %016lx\n", una_reg(15));
664 	printk("r16= %016lx  r17= %016lx  r18= %016lx\n",
665 	       una_reg(16), una_reg(17), una_reg(18));
666 	printk("r19= %016lx  r20= %016lx  r21= %016lx\n",
667  	       una_reg(19), una_reg(20), una_reg(21));
668  	printk("r22= %016lx  r23= %016lx  r24= %016lx\n",
669 	       una_reg(22), una_reg(23), una_reg(24));
670 	printk("r25= %016lx  r27= %016lx  r28= %016lx\n",
671 	       una_reg(25), una_reg(27), una_reg(28));
672 	printk("gp = %016lx  sp = %p\n", regs->gp, regs+1);
673 
674 	dik_show_code((unsigned int *)pc);
675 	dik_show_trace((unsigned long *)(regs+1));
676 
677 	if (test_and_set_thread_flag (TIF_DIE_IF_KERNEL)) {
678 		printk("die_if_kernel recursion detected.\n");
679 		local_irq_enable();
680 		while (1);
681 	}
682 	do_exit(SIGSEGV);
683 }
684 
685 /*
686  * Convert an s-floating point value in memory format to the
687  * corresponding value in register format.  The exponent
688  * needs to be remapped to preserve non-finite values
689  * (infinities, not-a-numbers, denormals).
690  */
691 static inline unsigned long
692 s_mem_to_reg (unsigned long s_mem)
693 {
694 	unsigned long frac    = (s_mem >>  0) & 0x7fffff;
695 	unsigned long sign    = (s_mem >> 31) & 0x1;
696 	unsigned long exp_msb = (s_mem >> 30) & 0x1;
697 	unsigned long exp_low = (s_mem >> 23) & 0x7f;
698 	unsigned long exp;
699 
700 	exp = (exp_msb << 10) | exp_low;	/* common case */
701 	if (exp_msb) {
702 		if (exp_low == 0x7f) {
703 			exp = 0x7ff;
704 		}
705 	} else {
706 		if (exp_low == 0x00) {
707 			exp = 0x000;
708 		} else {
709 			exp |= (0x7 << 7);
710 		}
711 	}
712 	return (sign << 63) | (exp << 52) | (frac << 29);
713 }
714 
715 /*
716  * Convert an s-floating point value in register format to the
717  * corresponding value in memory format.
718  */
719 static inline unsigned long
720 s_reg_to_mem (unsigned long s_reg)
721 {
722 	return ((s_reg >> 62) << 30) | ((s_reg << 5) >> 34);
723 }
724 
725 /*
726  * Handle user-level unaligned fault.  Handling user-level unaligned
727  * faults is *extremely* slow and produces nasty messages.  A user
728  * program *should* fix unaligned faults ASAP.
729  *
730  * Notice that we have (almost) the regular kernel stack layout here,
731  * so finding the appropriate registers is a little more difficult
732  * than in the kernel case.
733  *
734  * Finally, we handle regular integer load/stores only.  In
735  * particular, load-linked/store-conditionally and floating point
736  * load/stores are not supported.  The former make no sense with
737  * unaligned faults (they are guaranteed to fail) and I don't think
738  * the latter will occur in any decent program.
739  *
740  * Sigh. We *do* have to handle some FP operations, because GCC will
741  * uses them as temporary storage for integer memory to memory copies.
742  * However, we need to deal with stt/ldt and sts/lds only.
743  */
744 
745 #define OP_INT_MASK	( 1L << 0x28 | 1L << 0x2c   /* ldl stl */	\
746 			| 1L << 0x29 | 1L << 0x2d   /* ldq stq */	\
747 			| 1L << 0x0c | 1L << 0x0d   /* ldwu stw */	\
748 			| 1L << 0x0a | 1L << 0x0e ) /* ldbu stb */
749 
750 #define OP_WRITE_MASK	( 1L << 0x26 | 1L << 0x27   /* sts stt */	\
751 			| 1L << 0x2c | 1L << 0x2d   /* stl stq */	\
752 			| 1L << 0x0d | 1L << 0x0e ) /* stw stb */
753 
754 #define R(x)	((size_t) &((struct pt_regs *)0)->x)
755 
756 static int unauser_reg_offsets[32] = {
757 	R(r0), R(r1), R(r2), R(r3), R(r4), R(r5), R(r6), R(r7), R(r8),
758 	/* r9 ... r15 are stored in front of regs.  */
759 	-56, -48, -40, -32, -24, -16, -8,
760 	R(r16), R(r17), R(r18),
761 	R(r19), R(r20), R(r21), R(r22), R(r23), R(r24), R(r25), R(r26),
762 	R(r27), R(r28), R(gp),
763 	0, 0
764 };
765 
766 #undef R
767 
768 asmlinkage void
769 do_entUnaUser(void __user * va, unsigned long opcode,
770 	      unsigned long reg, struct pt_regs *regs)
771 {
772 	static int cnt = 0;
773 	static long last_time = 0;
774 
775 	unsigned long tmp1, tmp2, tmp3, tmp4;
776 	unsigned long fake_reg, *reg_addr = &fake_reg;
777 	siginfo_t info;
778 	long error;
779 
780 	/* Check the UAC bits to decide what the user wants us to do
781 	   with the unaliged access.  */
782 
783 	if (!test_thread_flag (TIF_UAC_NOPRINT)) {
784 		if (cnt >= 5 && jiffies - last_time > 5*HZ) {
785 			cnt = 0;
786 		}
787 		if (++cnt < 5) {
788 			printk("%s(%d): unaligned trap at %016lx: %p %lx %ld\n",
789 			       current->comm, task_pid_nr(current),
790 			       regs->pc - 4, va, opcode, reg);
791 		}
792 		last_time = jiffies;
793 	}
794 	if (test_thread_flag (TIF_UAC_SIGBUS))
795 		goto give_sigbus;
796 	/* Not sure why you'd want to use this, but... */
797 	if (test_thread_flag (TIF_UAC_NOFIX))
798 		return;
799 
800 	/* Don't bother reading ds in the access check since we already
801 	   know that this came from the user.  Also rely on the fact that
802 	   the page at TASK_SIZE is unmapped and so can't be touched anyway. */
803 	if (!__access_ok((unsigned long)va, 0, USER_DS))
804 		goto give_sigsegv;
805 
806 	++unaligned[1].count;
807 	unaligned[1].va = (unsigned long)va;
808 	unaligned[1].pc = regs->pc - 4;
809 
810 	if ((1L << opcode) & OP_INT_MASK) {
811 		/* it's an integer load/store */
812 		if (reg < 30) {
813 			reg_addr = (unsigned long *)
814 			  ((char *)regs + unauser_reg_offsets[reg]);
815 		} else if (reg == 30) {
816 			/* usp in PAL regs */
817 			fake_reg = rdusp();
818 		} else {
819 			/* zero "register" */
820 			fake_reg = 0;
821 		}
822 	}
823 
824 	/* We don't want to use the generic get/put unaligned macros as
825 	   we want to trap exceptions.  Only if we actually get an
826 	   exception will we decide whether we should have caught it.  */
827 
828 	switch (opcode) {
829 	case 0x0c: /* ldwu */
830 		__asm__ __volatile__(
831 		"1:	ldq_u %1,0(%3)\n"
832 		"2:	ldq_u %2,1(%3)\n"
833 		"	extwl %1,%3,%1\n"
834 		"	extwh %2,%3,%2\n"
835 		"3:\n"
836 		".section __ex_table,\"a\"\n"
837 		"	.long 1b - .\n"
838 		"	lda %1,3b-1b(%0)\n"
839 		"	.long 2b - .\n"
840 		"	lda %2,3b-2b(%0)\n"
841 		".previous"
842 			: "=r"(error), "=&r"(tmp1), "=&r"(tmp2)
843 			: "r"(va), "0"(0));
844 		if (error)
845 			goto give_sigsegv;
846 		*reg_addr = tmp1|tmp2;
847 		break;
848 
849 	case 0x22: /* lds */
850 		__asm__ __volatile__(
851 		"1:	ldq_u %1,0(%3)\n"
852 		"2:	ldq_u %2,3(%3)\n"
853 		"	extll %1,%3,%1\n"
854 		"	extlh %2,%3,%2\n"
855 		"3:\n"
856 		".section __ex_table,\"a\"\n"
857 		"	.long 1b - .\n"
858 		"	lda %1,3b-1b(%0)\n"
859 		"	.long 2b - .\n"
860 		"	lda %2,3b-2b(%0)\n"
861 		".previous"
862 			: "=r"(error), "=&r"(tmp1), "=&r"(tmp2)
863 			: "r"(va), "0"(0));
864 		if (error)
865 			goto give_sigsegv;
866 		alpha_write_fp_reg(reg, s_mem_to_reg((int)(tmp1|tmp2)));
867 		return;
868 
869 	case 0x23: /* ldt */
870 		__asm__ __volatile__(
871 		"1:	ldq_u %1,0(%3)\n"
872 		"2:	ldq_u %2,7(%3)\n"
873 		"	extql %1,%3,%1\n"
874 		"	extqh %2,%3,%2\n"
875 		"3:\n"
876 		".section __ex_table,\"a\"\n"
877 		"	.long 1b - .\n"
878 		"	lda %1,3b-1b(%0)\n"
879 		"	.long 2b - .\n"
880 		"	lda %2,3b-2b(%0)\n"
881 		".previous"
882 			: "=r"(error), "=&r"(tmp1), "=&r"(tmp2)
883 			: "r"(va), "0"(0));
884 		if (error)
885 			goto give_sigsegv;
886 		alpha_write_fp_reg(reg, tmp1|tmp2);
887 		return;
888 
889 	case 0x28: /* ldl */
890 		__asm__ __volatile__(
891 		"1:	ldq_u %1,0(%3)\n"
892 		"2:	ldq_u %2,3(%3)\n"
893 		"	extll %1,%3,%1\n"
894 		"	extlh %2,%3,%2\n"
895 		"3:\n"
896 		".section __ex_table,\"a\"\n"
897 		"	.long 1b - .\n"
898 		"	lda %1,3b-1b(%0)\n"
899 		"	.long 2b - .\n"
900 		"	lda %2,3b-2b(%0)\n"
901 		".previous"
902 			: "=r"(error), "=&r"(tmp1), "=&r"(tmp2)
903 			: "r"(va), "0"(0));
904 		if (error)
905 			goto give_sigsegv;
906 		*reg_addr = (int)(tmp1|tmp2);
907 		break;
908 
909 	case 0x29: /* ldq */
910 		__asm__ __volatile__(
911 		"1:	ldq_u %1,0(%3)\n"
912 		"2:	ldq_u %2,7(%3)\n"
913 		"	extql %1,%3,%1\n"
914 		"	extqh %2,%3,%2\n"
915 		"3:\n"
916 		".section __ex_table,\"a\"\n"
917 		"	.long 1b - .\n"
918 		"	lda %1,3b-1b(%0)\n"
919 		"	.long 2b - .\n"
920 		"	lda %2,3b-2b(%0)\n"
921 		".previous"
922 			: "=r"(error), "=&r"(tmp1), "=&r"(tmp2)
923 			: "r"(va), "0"(0));
924 		if (error)
925 			goto give_sigsegv;
926 		*reg_addr = tmp1|tmp2;
927 		break;
928 
929 	/* Note that the store sequences do not indicate that they change
930 	   memory because it _should_ be affecting nothing in this context.
931 	   (Otherwise we have other, much larger, problems.)  */
932 	case 0x0d: /* stw */
933 		__asm__ __volatile__(
934 		"1:	ldq_u %2,1(%5)\n"
935 		"2:	ldq_u %1,0(%5)\n"
936 		"	inswh %6,%5,%4\n"
937 		"	inswl %6,%5,%3\n"
938 		"	mskwh %2,%5,%2\n"
939 		"	mskwl %1,%5,%1\n"
940 		"	or %2,%4,%2\n"
941 		"	or %1,%3,%1\n"
942 		"3:	stq_u %2,1(%5)\n"
943 		"4:	stq_u %1,0(%5)\n"
944 		"5:\n"
945 		".section __ex_table,\"a\"\n"
946 		"	.long 1b - .\n"
947 		"	lda %2,5b-1b(%0)\n"
948 		"	.long 2b - .\n"
949 		"	lda %1,5b-2b(%0)\n"
950 		"	.long 3b - .\n"
951 		"	lda $31,5b-3b(%0)\n"
952 		"	.long 4b - .\n"
953 		"	lda $31,5b-4b(%0)\n"
954 		".previous"
955 			: "=r"(error), "=&r"(tmp1), "=&r"(tmp2),
956 			  "=&r"(tmp3), "=&r"(tmp4)
957 			: "r"(va), "r"(*reg_addr), "0"(0));
958 		if (error)
959 			goto give_sigsegv;
960 		return;
961 
962 	case 0x26: /* sts */
963 		fake_reg = s_reg_to_mem(alpha_read_fp_reg(reg));
964 		/* FALLTHRU */
965 
966 	case 0x2c: /* stl */
967 		__asm__ __volatile__(
968 		"1:	ldq_u %2,3(%5)\n"
969 		"2:	ldq_u %1,0(%5)\n"
970 		"	inslh %6,%5,%4\n"
971 		"	insll %6,%5,%3\n"
972 		"	msklh %2,%5,%2\n"
973 		"	mskll %1,%5,%1\n"
974 		"	or %2,%4,%2\n"
975 		"	or %1,%3,%1\n"
976 		"3:	stq_u %2,3(%5)\n"
977 		"4:	stq_u %1,0(%5)\n"
978 		"5:\n"
979 		".section __ex_table,\"a\"\n"
980 		"	.long 1b - .\n"
981 		"	lda %2,5b-1b(%0)\n"
982 		"	.long 2b - .\n"
983 		"	lda %1,5b-2b(%0)\n"
984 		"	.long 3b - .\n"
985 		"	lda $31,5b-3b(%0)\n"
986 		"	.long 4b - .\n"
987 		"	lda $31,5b-4b(%0)\n"
988 		".previous"
989 			: "=r"(error), "=&r"(tmp1), "=&r"(tmp2),
990 			  "=&r"(tmp3), "=&r"(tmp4)
991 			: "r"(va), "r"(*reg_addr), "0"(0));
992 		if (error)
993 			goto give_sigsegv;
994 		return;
995 
996 	case 0x27: /* stt */
997 		fake_reg = alpha_read_fp_reg(reg);
998 		/* FALLTHRU */
999 
1000 	case 0x2d: /* stq */
1001 		__asm__ __volatile__(
1002 		"1:	ldq_u %2,7(%5)\n"
1003 		"2:	ldq_u %1,0(%5)\n"
1004 		"	insqh %6,%5,%4\n"
1005 		"	insql %6,%5,%3\n"
1006 		"	mskqh %2,%5,%2\n"
1007 		"	mskql %1,%5,%1\n"
1008 		"	or %2,%4,%2\n"
1009 		"	or %1,%3,%1\n"
1010 		"3:	stq_u %2,7(%5)\n"
1011 		"4:	stq_u %1,0(%5)\n"
1012 		"5:\n"
1013 		".section __ex_table,\"a\"\n\t"
1014 		"	.long 1b - .\n"
1015 		"	lda %2,5b-1b(%0)\n"
1016 		"	.long 2b - .\n"
1017 		"	lda %1,5b-2b(%0)\n"
1018 		"	.long 3b - .\n"
1019 		"	lda $31,5b-3b(%0)\n"
1020 		"	.long 4b - .\n"
1021 		"	lda $31,5b-4b(%0)\n"
1022 		".previous"
1023 			: "=r"(error), "=&r"(tmp1), "=&r"(tmp2),
1024 			  "=&r"(tmp3), "=&r"(tmp4)
1025 			: "r"(va), "r"(*reg_addr), "0"(0));
1026 		if (error)
1027 			goto give_sigsegv;
1028 		return;
1029 
1030 	default:
1031 		/* What instruction were you trying to use, exactly?  */
1032 		goto give_sigbus;
1033 	}
1034 
1035 	/* Only integer loads should get here; everyone else returns early. */
1036 	if (reg == 30)
1037 		wrusp(fake_reg);
1038 	return;
1039 
1040 give_sigsegv:
1041 	regs->pc -= 4;  /* make pc point to faulting insn */
1042 	info.si_signo = SIGSEGV;
1043 	info.si_errno = 0;
1044 
1045 	/* We need to replicate some of the logic in mm/fault.c,
1046 	   since we don't have access to the fault code in the
1047 	   exception handling return path.  */
1048 	if (!__access_ok((unsigned long)va, 0, USER_DS))
1049 		info.si_code = SEGV_ACCERR;
1050 	else {
1051 		struct mm_struct *mm = current->mm;
1052 		down_read(&mm->mmap_sem);
1053 		if (find_vma(mm, (unsigned long)va))
1054 			info.si_code = SEGV_ACCERR;
1055 		else
1056 			info.si_code = SEGV_MAPERR;
1057 		up_read(&mm->mmap_sem);
1058 	}
1059 	info.si_addr = va;
1060 	send_sig_info(SIGSEGV, &info, current);
1061 	return;
1062 
1063 give_sigbus:
1064 	regs->pc -= 4;
1065 	info.si_signo = SIGBUS;
1066 	info.si_errno = 0;
1067 	info.si_code = BUS_ADRALN;
1068 	info.si_addr = va;
1069 	send_sig_info(SIGBUS, &info, current);
1070 	return;
1071 }
1072 
1073 void __init
1074 trap_init(void)
1075 {
1076 	/* Tell PAL-code what global pointer we want in the kernel.  */
1077 	register unsigned long gptr __asm__("$29");
1078 	wrkgp(gptr);
1079 
1080 	/* Hack for Multia (UDB) and JENSEN: some of their SRMs have
1081 	   a bug in the handling of the opDEC fault.  Fix it up if so.  */
1082 	if (implver() == IMPLVER_EV4)
1083 		opDEC_check();
1084 
1085 	wrent(entArith, 1);
1086 	wrent(entMM, 2);
1087 	wrent(entIF, 3);
1088 	wrent(entUna, 4);
1089 	wrent(entSys, 5);
1090 	wrent(entDbg, 6);
1091 }
1092