1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * arch/alpha/kernel/traps.c
4 *
5 * (C) Copyright 1994 Linus Torvalds
6 */
7
8 /*
9 * This file initializes the trap entry points
10 */
11
12 #include <linux/jiffies.h>
13 #include <linux/mm.h>
14 #include <linux/sched/signal.h>
15 #include <linux/sched/debug.h>
16 #include <linux/tty.h>
17 #include <linux/delay.h>
18 #include <linux/extable.h>
19 #include <linux/kallsyms.h>
20 #include <linux/ratelimit.h>
21
22 #include <asm/gentrap.h>
23 #include <linux/uaccess.h>
24 #include <asm/unaligned.h>
25 #include <asm/sysinfo.h>
26 #include <asm/hwrpb.h>
27 #include <asm/mmu_context.h>
28 #include <asm/special_insns.h>
29
30 #include "proto.h"
31
32 /* Work-around for some SRMs which mishandle opDEC faults. */
33
34 static int opDEC_fix;
35
36 static void
opDEC_check(void)37 opDEC_check(void)
38 {
39 __asm__ __volatile__ (
40 /* Load the address of... */
41 " br $16, 1f\n"
42 /* A stub instruction fault handler. Just add 4 to the
43 pc and continue. */
44 " ldq $16, 8($sp)\n"
45 " addq $16, 4, $16\n"
46 " stq $16, 8($sp)\n"
47 " call_pal %[rti]\n"
48 /* Install the instruction fault handler. */
49 "1: lda $17, 3\n"
50 " call_pal %[wrent]\n"
51 /* With that in place, the fault from the round-to-minf fp
52 insn will arrive either at the "lda 4" insn (bad) or one
53 past that (good). This places the correct fixup in %0. */
54 " lda %[fix], 0\n"
55 " cvttq/svm $f31,$f31\n"
56 " lda %[fix], 4"
57 : [fix] "=r" (opDEC_fix)
58 : [rti] "n" (PAL_rti), [wrent] "n" (PAL_wrent)
59 : "$0", "$1", "$16", "$17", "$22", "$23", "$24", "$25");
60
61 if (opDEC_fix)
62 printk("opDEC fixup enabled.\n");
63 }
64
65 void
dik_show_regs(struct pt_regs * regs,unsigned long * r9_15)66 dik_show_regs(struct pt_regs *regs, unsigned long *r9_15)
67 {
68 printk("pc = [<%016lx>] ra = [<%016lx>] ps = %04lx %s\n",
69 regs->pc, regs->r26, regs->ps, print_tainted());
70 printk("pc is at %pSR\n", (void *)regs->pc);
71 printk("ra is at %pSR\n", (void *)regs->r26);
72 printk("v0 = %016lx t0 = %016lx t1 = %016lx\n",
73 regs->r0, regs->r1, regs->r2);
74 printk("t2 = %016lx t3 = %016lx t4 = %016lx\n",
75 regs->r3, regs->r4, regs->r5);
76 printk("t5 = %016lx t6 = %016lx t7 = %016lx\n",
77 regs->r6, regs->r7, regs->r8);
78
79 if (r9_15) {
80 printk("s0 = %016lx s1 = %016lx s2 = %016lx\n",
81 r9_15[9], r9_15[10], r9_15[11]);
82 printk("s3 = %016lx s4 = %016lx s5 = %016lx\n",
83 r9_15[12], r9_15[13], r9_15[14]);
84 printk("s6 = %016lx\n", r9_15[15]);
85 }
86
87 printk("a0 = %016lx a1 = %016lx a2 = %016lx\n",
88 regs->r16, regs->r17, regs->r18);
89 printk("a3 = %016lx a4 = %016lx a5 = %016lx\n",
90 regs->r19, regs->r20, regs->r21);
91 printk("t8 = %016lx t9 = %016lx t10= %016lx\n",
92 regs->r22, regs->r23, regs->r24);
93 printk("t11= %016lx pv = %016lx at = %016lx\n",
94 regs->r25, regs->r27, regs->r28);
95 printk("gp = %016lx sp = %p\n", regs->gp, regs+1);
96 #if 0
97 __halt();
98 #endif
99 }
100
101 #if 0
102 static char * ireg_name[] = {"v0", "t0", "t1", "t2", "t3", "t4", "t5", "t6",
103 "t7", "s0", "s1", "s2", "s3", "s4", "s5", "s6",
104 "a0", "a1", "a2", "a3", "a4", "a5", "t8", "t9",
105 "t10", "t11", "ra", "pv", "at", "gp", "sp", "zero"};
106 #endif
107
108 static void
dik_show_code(unsigned int * pc)109 dik_show_code(unsigned int *pc)
110 {
111 long i;
112
113 printk("Code:");
114 for (i = -6; i < 2; i++) {
115 unsigned int insn;
116 if (__get_user(insn, (unsigned int __user *)pc + i))
117 break;
118 printk("%c%08x%c", i ? ' ' : '<', insn, i ? ' ' : '>');
119 }
120 printk("\n");
121 }
122
123 static void
dik_show_trace(unsigned long * sp,const char * loglvl)124 dik_show_trace(unsigned long *sp, const char *loglvl)
125 {
126 long i = 0;
127 printk("%sTrace:\n", loglvl);
128 while (0x1ff8 & (unsigned long) sp) {
129 extern char _stext[], _etext[];
130 unsigned long tmp = *sp;
131 sp++;
132 if (!is_kernel_text(tmp))
133 continue;
134 printk("%s[<%lx>] %pSR\n", loglvl, tmp, (void *)tmp);
135 if (i > 40) {
136 printk("%s ...", loglvl);
137 break;
138 }
139 }
140 printk("%s\n", loglvl);
141 }
142
143 static int kstack_depth_to_print = 24;
144
show_stack(struct task_struct * task,unsigned long * sp,const char * loglvl)145 void show_stack(struct task_struct *task, unsigned long *sp, const char *loglvl)
146 {
147 unsigned long *stack;
148 int i;
149
150 /*
151 * debugging aid: "show_stack(NULL, NULL, KERN_EMERG);" prints the
152 * back trace for this cpu.
153 */
154 if(sp==NULL)
155 sp=(unsigned long*)&sp;
156
157 stack = sp;
158 for(i=0; i < kstack_depth_to_print; i++) {
159 if (((long) stack & (THREAD_SIZE-1)) == 0)
160 break;
161 if ((i % 4) == 0) {
162 if (i)
163 pr_cont("\n");
164 printk("%s ", loglvl);
165 } else {
166 pr_cont(" ");
167 }
168 pr_cont("%016lx", *stack++);
169 }
170 pr_cont("\n");
171 dik_show_trace(sp, loglvl);
172 }
173
174 void
die_if_kernel(char * str,struct pt_regs * regs,long err,unsigned long * r9_15)175 die_if_kernel(char * str, struct pt_regs *regs, long err, unsigned long *r9_15)
176 {
177 if (regs->ps & 8)
178 return;
179 #ifdef CONFIG_SMP
180 printk("CPU %d ", hard_smp_processor_id());
181 #endif
182 printk("%s(%d): %s %ld\n", current->comm, task_pid_nr(current), str, err);
183 dik_show_regs(regs, r9_15);
184 add_taint(TAINT_DIE, LOCKDEP_NOW_UNRELIABLE);
185 dik_show_trace((unsigned long *)(regs+1), KERN_DEFAULT);
186 dik_show_code((unsigned int *)regs->pc);
187
188 if (test_and_set_thread_flag (TIF_DIE_IF_KERNEL)) {
189 printk("die_if_kernel recursion detected.\n");
190 local_irq_enable();
191 while (1);
192 }
193 make_task_dead(SIGSEGV);
194 }
195
196 #ifndef CONFIG_MATHEMU
dummy_emul(void)197 static long dummy_emul(void) { return 0; }
198 long (*alpha_fp_emul_imprecise)(struct pt_regs *regs, unsigned long writemask)
199 = (void *)dummy_emul;
200 EXPORT_SYMBOL_GPL(alpha_fp_emul_imprecise);
201 long (*alpha_fp_emul) (unsigned long pc)
202 = (void *)dummy_emul;
203 EXPORT_SYMBOL_GPL(alpha_fp_emul);
204 #else
205 long alpha_fp_emul_imprecise(struct pt_regs *regs, unsigned long writemask);
206 long alpha_fp_emul (unsigned long pc);
207 #endif
208
209 asmlinkage void
do_entArith(unsigned long summary,unsigned long write_mask,struct pt_regs * regs)210 do_entArith(unsigned long summary, unsigned long write_mask,
211 struct pt_regs *regs)
212 {
213 long si_code = FPE_FLTINV;
214
215 if (summary & 1) {
216 /* Software-completion summary bit is set, so try to
217 emulate the instruction. If the processor supports
218 precise exceptions, we don't have to search. */
219 if (!amask(AMASK_PRECISE_TRAP))
220 si_code = alpha_fp_emul(regs->pc - 4);
221 else
222 si_code = alpha_fp_emul_imprecise(regs, write_mask);
223 if (si_code == 0)
224 return;
225 }
226 die_if_kernel("Arithmetic fault", regs, 0, NULL);
227
228 send_sig_fault_trapno(SIGFPE, si_code, (void __user *) regs->pc, 0, current);
229 }
230
231 asmlinkage void
do_entIF(unsigned long type,struct pt_regs * regs)232 do_entIF(unsigned long type, struct pt_regs *regs)
233 {
234 int signo, code;
235
236 if (type == 3) { /* FEN fault */
237 /* Irritating users can call PAL_clrfen to disable the
238 FPU for the process. The kernel will then trap in
239 do_switch_stack and undo_switch_stack when we try
240 to save and restore the FP registers.
241
242 Given that GCC by default generates code that uses the
243 FP registers, PAL_clrfen is not useful except for DoS
244 attacks. So turn the bleeding FPU back on and be done
245 with it. */
246 current_thread_info()->pcb.flags |= 1;
247 __reload_thread(¤t_thread_info()->pcb);
248 return;
249 }
250 if (!user_mode(regs)) {
251 if (type == 1) {
252 const unsigned int *data
253 = (const unsigned int *) regs->pc;
254 printk("Kernel bug at %s:%d\n",
255 (const char *)(data[1] | (long)data[2] << 32),
256 data[0]);
257 }
258 #ifdef CONFIG_ALPHA_WTINT
259 if (type == 4) {
260 /* If CALL_PAL WTINT is totally unsupported by the
261 PALcode, e.g. MILO, "emulate" it by overwriting
262 the insn. */
263 unsigned int *pinsn
264 = (unsigned int *) regs->pc - 1;
265 if (*pinsn == PAL_wtint) {
266 *pinsn = 0x47e01400; /* mov 0,$0 */
267 imb();
268 regs->r0 = 0;
269 return;
270 }
271 }
272 #endif /* ALPHA_WTINT */
273 die_if_kernel((type == 1 ? "Kernel Bug" : "Instruction fault"),
274 regs, type, NULL);
275 }
276
277 switch (type) {
278 case 0: /* breakpoint */
279 if (ptrace_cancel_bpt(current)) {
280 regs->pc -= 4; /* make pc point to former bpt */
281 }
282
283 send_sig_fault(SIGTRAP, TRAP_BRKPT, (void __user *)regs->pc,
284 current);
285 return;
286
287 case 1: /* bugcheck */
288 send_sig_fault_trapno(SIGTRAP, TRAP_UNK,
289 (void __user *) regs->pc, 0, current);
290 return;
291
292 case 2: /* gentrap */
293 switch ((long) regs->r16) {
294 case GEN_INTOVF:
295 signo = SIGFPE;
296 code = FPE_INTOVF;
297 break;
298 case GEN_INTDIV:
299 signo = SIGFPE;
300 code = FPE_INTDIV;
301 break;
302 case GEN_FLTOVF:
303 signo = SIGFPE;
304 code = FPE_FLTOVF;
305 break;
306 case GEN_FLTDIV:
307 signo = SIGFPE;
308 code = FPE_FLTDIV;
309 break;
310 case GEN_FLTUND:
311 signo = SIGFPE;
312 code = FPE_FLTUND;
313 break;
314 case GEN_FLTINV:
315 signo = SIGFPE;
316 code = FPE_FLTINV;
317 break;
318 case GEN_FLTINE:
319 signo = SIGFPE;
320 code = FPE_FLTRES;
321 break;
322 case GEN_ROPRAND:
323 signo = SIGFPE;
324 code = FPE_FLTUNK;
325 break;
326
327 case GEN_DECOVF:
328 case GEN_DECDIV:
329 case GEN_DECINV:
330 case GEN_ASSERTERR:
331 case GEN_NULPTRERR:
332 case GEN_STKOVF:
333 case GEN_STRLENERR:
334 case GEN_SUBSTRERR:
335 case GEN_RANGERR:
336 case GEN_SUBRNG:
337 case GEN_SUBRNG1:
338 case GEN_SUBRNG2:
339 case GEN_SUBRNG3:
340 case GEN_SUBRNG4:
341 case GEN_SUBRNG5:
342 case GEN_SUBRNG6:
343 case GEN_SUBRNG7:
344 default:
345 signo = SIGTRAP;
346 code = TRAP_UNK;
347 break;
348 }
349
350 send_sig_fault_trapno(signo, code, (void __user *) regs->pc,
351 regs->r16, current);
352 return;
353
354 case 4: /* opDEC */
355 if (implver() == IMPLVER_EV4) {
356 long si_code;
357
358 /* The some versions of SRM do not handle
359 the opDEC properly - they return the PC of the
360 opDEC fault, not the instruction after as the
361 Alpha architecture requires. Here we fix it up.
362 We do this by intentionally causing an opDEC
363 fault during the boot sequence and testing if
364 we get the correct PC. If not, we set a flag
365 to correct it every time through. */
366 regs->pc += opDEC_fix;
367
368 /* EV4 does not implement anything except normal
369 rounding. Everything else will come here as
370 an illegal instruction. Emulate them. */
371 si_code = alpha_fp_emul(regs->pc - 4);
372 if (si_code == 0)
373 return;
374 if (si_code > 0) {
375 send_sig_fault_trapno(SIGFPE, si_code,
376 (void __user *) regs->pc,
377 0, current);
378 return;
379 }
380 }
381 break;
382
383 case 5: /* illoc */
384 default: /* unexpected instruction-fault type */
385 ;
386 }
387
388 send_sig_fault(SIGILL, ILL_ILLOPC, (void __user *)regs->pc, current);
389 }
390
391 /* There is an ifdef in the PALcode in MILO that enables a
392 "kernel debugging entry point" as an unprivileged call_pal.
393
394 We don't want to have anything to do with it, but unfortunately
395 several versions of MILO included in distributions have it enabled,
396 and if we don't put something on the entry point we'll oops. */
397
398 asmlinkage void
do_entDbg(struct pt_regs * regs)399 do_entDbg(struct pt_regs *regs)
400 {
401 die_if_kernel("Instruction fault", regs, 0, NULL);
402
403 force_sig_fault(SIGILL, ILL_ILLOPC, (void __user *)regs->pc);
404 }
405
406
407 /*
408 * entUna has a different register layout to be reasonably simple. It
409 * needs access to all the integer registers (the kernel doesn't use
410 * fp-regs), and it needs to have them in order for simpler access.
411 *
412 * Due to the non-standard register layout (and because we don't want
413 * to handle floating-point regs), user-mode unaligned accesses are
414 * handled separately by do_entUnaUser below.
415 *
416 * Oh, btw, we don't handle the "gp" register correctly, but if we fault
417 * on a gp-register unaligned load/store, something is _very_ wrong
418 * in the kernel anyway..
419 */
420 struct allregs {
421 unsigned long regs[32];
422 unsigned long ps, pc, gp, a0, a1, a2;
423 };
424
425 struct unaligned_stat {
426 unsigned long count, va, pc;
427 } unaligned[2];
428
429
430 /* Macro for exception fixup code to access integer registers. */
431 #define una_reg(r) (_regs[(r) >= 16 && (r) <= 18 ? (r)+19 : (r)])
432
433
434 asmlinkage void
do_entUna(void * va,unsigned long opcode,unsigned long reg,struct allregs * regs)435 do_entUna(void * va, unsigned long opcode, unsigned long reg,
436 struct allregs *regs)
437 {
438 long error, tmp1, tmp2, tmp3, tmp4;
439 unsigned long pc = regs->pc - 4;
440 unsigned long *_regs = regs->regs;
441 const struct exception_table_entry *fixup;
442
443 unaligned[0].count++;
444 unaligned[0].va = (unsigned long) va;
445 unaligned[0].pc = pc;
446
447 /* We don't want to use the generic get/put unaligned macros as
448 we want to trap exceptions. Only if we actually get an
449 exception will we decide whether we should have caught it. */
450
451 switch (opcode) {
452 case 0x0c: /* ldwu */
453 __asm__ __volatile__(
454 "1: ldq_u %1,0(%3)\n"
455 "2: ldq_u %2,1(%3)\n"
456 " extwl %1,%3,%1\n"
457 " extwh %2,%3,%2\n"
458 "3:\n"
459 EXC(1b,3b,%1,%0)
460 EXC(2b,3b,%2,%0)
461 : "=r"(error), "=&r"(tmp1), "=&r"(tmp2)
462 : "r"(va), "0"(0));
463 if (error)
464 goto got_exception;
465 una_reg(reg) = tmp1|tmp2;
466 return;
467
468 case 0x28: /* ldl */
469 __asm__ __volatile__(
470 "1: ldq_u %1,0(%3)\n"
471 "2: ldq_u %2,3(%3)\n"
472 " extll %1,%3,%1\n"
473 " extlh %2,%3,%2\n"
474 "3:\n"
475 EXC(1b,3b,%1,%0)
476 EXC(2b,3b,%2,%0)
477 : "=r"(error), "=&r"(tmp1), "=&r"(tmp2)
478 : "r"(va), "0"(0));
479 if (error)
480 goto got_exception;
481 una_reg(reg) = (int)(tmp1|tmp2);
482 return;
483
484 case 0x29: /* ldq */
485 __asm__ __volatile__(
486 "1: ldq_u %1,0(%3)\n"
487 "2: ldq_u %2,7(%3)\n"
488 " extql %1,%3,%1\n"
489 " extqh %2,%3,%2\n"
490 "3:\n"
491 EXC(1b,3b,%1,%0)
492 EXC(2b,3b,%2,%0)
493 : "=r"(error), "=&r"(tmp1), "=&r"(tmp2)
494 : "r"(va), "0"(0));
495 if (error)
496 goto got_exception;
497 una_reg(reg) = tmp1|tmp2;
498 return;
499
500 /* Note that the store sequences do not indicate that they change
501 memory because it _should_ be affecting nothing in this context.
502 (Otherwise we have other, much larger, problems.) */
503 case 0x0d: /* stw */
504 __asm__ __volatile__(
505 "1: ldq_u %2,1(%5)\n"
506 "2: ldq_u %1,0(%5)\n"
507 " inswh %6,%5,%4\n"
508 " inswl %6,%5,%3\n"
509 " mskwh %2,%5,%2\n"
510 " mskwl %1,%5,%1\n"
511 " or %2,%4,%2\n"
512 " or %1,%3,%1\n"
513 "3: stq_u %2,1(%5)\n"
514 "4: stq_u %1,0(%5)\n"
515 "5:\n"
516 EXC(1b,5b,%2,%0)
517 EXC(2b,5b,%1,%0)
518 EXC(3b,5b,$31,%0)
519 EXC(4b,5b,$31,%0)
520 : "=r"(error), "=&r"(tmp1), "=&r"(tmp2),
521 "=&r"(tmp3), "=&r"(tmp4)
522 : "r"(va), "r"(una_reg(reg)), "0"(0));
523 if (error)
524 goto got_exception;
525 return;
526
527 case 0x2c: /* stl */
528 __asm__ __volatile__(
529 "1: ldq_u %2,3(%5)\n"
530 "2: ldq_u %1,0(%5)\n"
531 " inslh %6,%5,%4\n"
532 " insll %6,%5,%3\n"
533 " msklh %2,%5,%2\n"
534 " mskll %1,%5,%1\n"
535 " or %2,%4,%2\n"
536 " or %1,%3,%1\n"
537 "3: stq_u %2,3(%5)\n"
538 "4: stq_u %1,0(%5)\n"
539 "5:\n"
540 EXC(1b,5b,%2,%0)
541 EXC(2b,5b,%1,%0)
542 EXC(3b,5b,$31,%0)
543 EXC(4b,5b,$31,%0)
544 : "=r"(error), "=&r"(tmp1), "=&r"(tmp2),
545 "=&r"(tmp3), "=&r"(tmp4)
546 : "r"(va), "r"(una_reg(reg)), "0"(0));
547 if (error)
548 goto got_exception;
549 return;
550
551 case 0x2d: /* stq */
552 __asm__ __volatile__(
553 "1: ldq_u %2,7(%5)\n"
554 "2: ldq_u %1,0(%5)\n"
555 " insqh %6,%5,%4\n"
556 " insql %6,%5,%3\n"
557 " mskqh %2,%5,%2\n"
558 " mskql %1,%5,%1\n"
559 " or %2,%4,%2\n"
560 " or %1,%3,%1\n"
561 "3: stq_u %2,7(%5)\n"
562 "4: stq_u %1,0(%5)\n"
563 "5:\n"
564 EXC(1b,5b,%2,%0)
565 EXC(2b,5b,%1,%0)
566 EXC(3b,5b,$31,%0)
567 EXC(4b,5b,$31,%0)
568 : "=r"(error), "=&r"(tmp1), "=&r"(tmp2),
569 "=&r"(tmp3), "=&r"(tmp4)
570 : "r"(va), "r"(una_reg(reg)), "0"(0));
571 if (error)
572 goto got_exception;
573 return;
574 }
575
576 printk("Bad unaligned kernel access at %016lx: %p %lx %lu\n",
577 pc, va, opcode, reg);
578 make_task_dead(SIGSEGV);
579
580 got_exception:
581 /* Ok, we caught the exception, but we don't want it. Is there
582 someone to pass it along to? */
583 if ((fixup = search_exception_tables(pc)) != 0) {
584 unsigned long newpc;
585 newpc = fixup_exception(una_reg, fixup, pc);
586
587 printk("Forwarding unaligned exception at %lx (%lx)\n",
588 pc, newpc);
589
590 regs->pc = newpc;
591 return;
592 }
593
594 /*
595 * Yikes! No one to forward the exception to.
596 * Since the registers are in a weird format, dump them ourselves.
597 */
598
599 printk("%s(%d): unhandled unaligned exception\n",
600 current->comm, task_pid_nr(current));
601
602 printk("pc = [<%016lx>] ra = [<%016lx>] ps = %04lx\n",
603 pc, una_reg(26), regs->ps);
604 printk("r0 = %016lx r1 = %016lx r2 = %016lx\n",
605 una_reg(0), una_reg(1), una_reg(2));
606 printk("r3 = %016lx r4 = %016lx r5 = %016lx\n",
607 una_reg(3), una_reg(4), una_reg(5));
608 printk("r6 = %016lx r7 = %016lx r8 = %016lx\n",
609 una_reg(6), una_reg(7), una_reg(8));
610 printk("r9 = %016lx r10= %016lx r11= %016lx\n",
611 una_reg(9), una_reg(10), una_reg(11));
612 printk("r12= %016lx r13= %016lx r14= %016lx\n",
613 una_reg(12), una_reg(13), una_reg(14));
614 printk("r15= %016lx\n", una_reg(15));
615 printk("r16= %016lx r17= %016lx r18= %016lx\n",
616 una_reg(16), una_reg(17), una_reg(18));
617 printk("r19= %016lx r20= %016lx r21= %016lx\n",
618 una_reg(19), una_reg(20), una_reg(21));
619 printk("r22= %016lx r23= %016lx r24= %016lx\n",
620 una_reg(22), una_reg(23), una_reg(24));
621 printk("r25= %016lx r27= %016lx r28= %016lx\n",
622 una_reg(25), una_reg(27), una_reg(28));
623 printk("gp = %016lx sp = %p\n", regs->gp, regs+1);
624
625 dik_show_code((unsigned int *)pc);
626 dik_show_trace((unsigned long *)(regs+1), KERN_DEFAULT);
627
628 if (test_and_set_thread_flag (TIF_DIE_IF_KERNEL)) {
629 printk("die_if_kernel recursion detected.\n");
630 local_irq_enable();
631 while (1);
632 }
633 make_task_dead(SIGSEGV);
634 }
635
636 /*
637 * Convert an s-floating point value in memory format to the
638 * corresponding value in register format. The exponent
639 * needs to be remapped to preserve non-finite values
640 * (infinities, not-a-numbers, denormals).
641 */
642 static inline unsigned long
s_mem_to_reg(unsigned long s_mem)643 s_mem_to_reg (unsigned long s_mem)
644 {
645 unsigned long frac = (s_mem >> 0) & 0x7fffff;
646 unsigned long sign = (s_mem >> 31) & 0x1;
647 unsigned long exp_msb = (s_mem >> 30) & 0x1;
648 unsigned long exp_low = (s_mem >> 23) & 0x7f;
649 unsigned long exp;
650
651 exp = (exp_msb << 10) | exp_low; /* common case */
652 if (exp_msb) {
653 if (exp_low == 0x7f) {
654 exp = 0x7ff;
655 }
656 } else {
657 if (exp_low == 0x00) {
658 exp = 0x000;
659 } else {
660 exp |= (0x7 << 7);
661 }
662 }
663 return (sign << 63) | (exp << 52) | (frac << 29);
664 }
665
666 /*
667 * Convert an s-floating point value in register format to the
668 * corresponding value in memory format.
669 */
670 static inline unsigned long
s_reg_to_mem(unsigned long s_reg)671 s_reg_to_mem (unsigned long s_reg)
672 {
673 return ((s_reg >> 62) << 30) | ((s_reg << 5) >> 34);
674 }
675
676 /*
677 * Handle user-level unaligned fault. Handling user-level unaligned
678 * faults is *extremely* slow and produces nasty messages. A user
679 * program *should* fix unaligned faults ASAP.
680 *
681 * Notice that we have (almost) the regular kernel stack layout here,
682 * so finding the appropriate registers is a little more difficult
683 * than in the kernel case.
684 *
685 * Finally, we handle regular integer load/stores only. In
686 * particular, load-linked/store-conditionally and floating point
687 * load/stores are not supported. The former make no sense with
688 * unaligned faults (they are guaranteed to fail) and I don't think
689 * the latter will occur in any decent program.
690 *
691 * Sigh. We *do* have to handle some FP operations, because GCC will
692 * uses them as temporary storage for integer memory to memory copies.
693 * However, we need to deal with stt/ldt and sts/lds only.
694 */
695
696 #define OP_INT_MASK ( 1L << 0x28 | 1L << 0x2c /* ldl stl */ \
697 | 1L << 0x29 | 1L << 0x2d /* ldq stq */ \
698 | 1L << 0x0c | 1L << 0x0d /* ldwu stw */ \
699 | 1L << 0x0a | 1L << 0x0e ) /* ldbu stb */
700
701 #define OP_WRITE_MASK ( 1L << 0x26 | 1L << 0x27 /* sts stt */ \
702 | 1L << 0x2c | 1L << 0x2d /* stl stq */ \
703 | 1L << 0x0d | 1L << 0x0e ) /* stw stb */
704
705 #define R(x) ((size_t) &((struct pt_regs *)0)->x)
706
707 static int unauser_reg_offsets[32] = {
708 R(r0), R(r1), R(r2), R(r3), R(r4), R(r5), R(r6), R(r7), R(r8),
709 /* r9 ... r15 are stored in front of regs. */
710 -56, -48, -40, -32, -24, -16, -8,
711 R(r16), R(r17), R(r18),
712 R(r19), R(r20), R(r21), R(r22), R(r23), R(r24), R(r25), R(r26),
713 R(r27), R(r28), R(gp),
714 0, 0
715 };
716
717 #undef R
718
719 asmlinkage void
do_entUnaUser(void __user * va,unsigned long opcode,unsigned long reg,struct pt_regs * regs)720 do_entUnaUser(void __user * va, unsigned long opcode,
721 unsigned long reg, struct pt_regs *regs)
722 {
723 static DEFINE_RATELIMIT_STATE(ratelimit, 5 * HZ, 5);
724
725 unsigned long tmp1, tmp2, tmp3, tmp4;
726 unsigned long fake_reg, *reg_addr = &fake_reg;
727 int si_code;
728 long error;
729
730 /* Check the UAC bits to decide what the user wants us to do
731 with the unaligned access. */
732
733 if (!(current_thread_info()->status & TS_UAC_NOPRINT)) {
734 if (__ratelimit(&ratelimit)) {
735 printk("%s(%d): unaligned trap at %016lx: %p %lx %ld\n",
736 current->comm, task_pid_nr(current),
737 regs->pc - 4, va, opcode, reg);
738 }
739 }
740 if ((current_thread_info()->status & TS_UAC_SIGBUS))
741 goto give_sigbus;
742 /* Not sure why you'd want to use this, but... */
743 if ((current_thread_info()->status & TS_UAC_NOFIX))
744 return;
745
746 /* Don't bother reading ds in the access check since we already
747 know that this came from the user. Also rely on the fact that
748 the page at TASK_SIZE is unmapped and so can't be touched anyway. */
749 if ((unsigned long)va >= TASK_SIZE)
750 goto give_sigsegv;
751
752 ++unaligned[1].count;
753 unaligned[1].va = (unsigned long)va;
754 unaligned[1].pc = regs->pc - 4;
755
756 if ((1L << opcode) & OP_INT_MASK) {
757 /* it's an integer load/store */
758 if (reg < 30) {
759 reg_addr = (unsigned long *)
760 ((char *)regs + unauser_reg_offsets[reg]);
761 } else if (reg == 30) {
762 /* usp in PAL regs */
763 fake_reg = rdusp();
764 } else {
765 /* zero "register" */
766 fake_reg = 0;
767 }
768 }
769
770 /* We don't want to use the generic get/put unaligned macros as
771 we want to trap exceptions. Only if we actually get an
772 exception will we decide whether we should have caught it. */
773
774 switch (opcode) {
775 case 0x0c: /* ldwu */
776 __asm__ __volatile__(
777 "1: ldq_u %1,0(%3)\n"
778 "2: ldq_u %2,1(%3)\n"
779 " extwl %1,%3,%1\n"
780 " extwh %2,%3,%2\n"
781 "3:\n"
782 EXC(1b,3b,%1,%0)
783 EXC(2b,3b,%2,%0)
784 : "=r"(error), "=&r"(tmp1), "=&r"(tmp2)
785 : "r"(va), "0"(0));
786 if (error)
787 goto give_sigsegv;
788 *reg_addr = tmp1|tmp2;
789 break;
790
791 case 0x22: /* lds */
792 __asm__ __volatile__(
793 "1: ldq_u %1,0(%3)\n"
794 "2: ldq_u %2,3(%3)\n"
795 " extll %1,%3,%1\n"
796 " extlh %2,%3,%2\n"
797 "3:\n"
798 EXC(1b,3b,%1,%0)
799 EXC(2b,3b,%2,%0)
800 : "=r"(error), "=&r"(tmp1), "=&r"(tmp2)
801 : "r"(va), "0"(0));
802 if (error)
803 goto give_sigsegv;
804 alpha_write_fp_reg(reg, s_mem_to_reg((int)(tmp1|tmp2)));
805 return;
806
807 case 0x23: /* ldt */
808 __asm__ __volatile__(
809 "1: ldq_u %1,0(%3)\n"
810 "2: ldq_u %2,7(%3)\n"
811 " extql %1,%3,%1\n"
812 " extqh %2,%3,%2\n"
813 "3:\n"
814 EXC(1b,3b,%1,%0)
815 EXC(2b,3b,%2,%0)
816 : "=r"(error), "=&r"(tmp1), "=&r"(tmp2)
817 : "r"(va), "0"(0));
818 if (error)
819 goto give_sigsegv;
820 alpha_write_fp_reg(reg, tmp1|tmp2);
821 return;
822
823 case 0x28: /* ldl */
824 __asm__ __volatile__(
825 "1: ldq_u %1,0(%3)\n"
826 "2: ldq_u %2,3(%3)\n"
827 " extll %1,%3,%1\n"
828 " extlh %2,%3,%2\n"
829 "3:\n"
830 EXC(1b,3b,%1,%0)
831 EXC(2b,3b,%2,%0)
832 : "=r"(error), "=&r"(tmp1), "=&r"(tmp2)
833 : "r"(va), "0"(0));
834 if (error)
835 goto give_sigsegv;
836 *reg_addr = (int)(tmp1|tmp2);
837 break;
838
839 case 0x29: /* ldq */
840 __asm__ __volatile__(
841 "1: ldq_u %1,0(%3)\n"
842 "2: ldq_u %2,7(%3)\n"
843 " extql %1,%3,%1\n"
844 " extqh %2,%3,%2\n"
845 "3:\n"
846 EXC(1b,3b,%1,%0)
847 EXC(2b,3b,%2,%0)
848 : "=r"(error), "=&r"(tmp1), "=&r"(tmp2)
849 : "r"(va), "0"(0));
850 if (error)
851 goto give_sigsegv;
852 *reg_addr = tmp1|tmp2;
853 break;
854
855 /* Note that the store sequences do not indicate that they change
856 memory because it _should_ be affecting nothing in this context.
857 (Otherwise we have other, much larger, problems.) */
858 case 0x0d: /* stw */
859 __asm__ __volatile__(
860 "1: ldq_u %2,1(%5)\n"
861 "2: ldq_u %1,0(%5)\n"
862 " inswh %6,%5,%4\n"
863 " inswl %6,%5,%3\n"
864 " mskwh %2,%5,%2\n"
865 " mskwl %1,%5,%1\n"
866 " or %2,%4,%2\n"
867 " or %1,%3,%1\n"
868 "3: stq_u %2,1(%5)\n"
869 "4: stq_u %1,0(%5)\n"
870 "5:\n"
871 EXC(1b,5b,%2,%0)
872 EXC(2b,5b,%1,%0)
873 EXC(3b,5b,$31,%0)
874 EXC(4b,5b,$31,%0)
875 : "=r"(error), "=&r"(tmp1), "=&r"(tmp2),
876 "=&r"(tmp3), "=&r"(tmp4)
877 : "r"(va), "r"(*reg_addr), "0"(0));
878 if (error)
879 goto give_sigsegv;
880 return;
881
882 case 0x26: /* sts */
883 fake_reg = s_reg_to_mem(alpha_read_fp_reg(reg));
884 fallthrough;
885
886 case 0x2c: /* stl */
887 __asm__ __volatile__(
888 "1: ldq_u %2,3(%5)\n"
889 "2: ldq_u %1,0(%5)\n"
890 " inslh %6,%5,%4\n"
891 " insll %6,%5,%3\n"
892 " msklh %2,%5,%2\n"
893 " mskll %1,%5,%1\n"
894 " or %2,%4,%2\n"
895 " or %1,%3,%1\n"
896 "3: stq_u %2,3(%5)\n"
897 "4: stq_u %1,0(%5)\n"
898 "5:\n"
899 EXC(1b,5b,%2,%0)
900 EXC(2b,5b,%1,%0)
901 EXC(3b,5b,$31,%0)
902 EXC(4b,5b,$31,%0)
903 : "=r"(error), "=&r"(tmp1), "=&r"(tmp2),
904 "=&r"(tmp3), "=&r"(tmp4)
905 : "r"(va), "r"(*reg_addr), "0"(0));
906 if (error)
907 goto give_sigsegv;
908 return;
909
910 case 0x27: /* stt */
911 fake_reg = alpha_read_fp_reg(reg);
912 fallthrough;
913
914 case 0x2d: /* stq */
915 __asm__ __volatile__(
916 "1: ldq_u %2,7(%5)\n"
917 "2: ldq_u %1,0(%5)\n"
918 " insqh %6,%5,%4\n"
919 " insql %6,%5,%3\n"
920 " mskqh %2,%5,%2\n"
921 " mskql %1,%5,%1\n"
922 " or %2,%4,%2\n"
923 " or %1,%3,%1\n"
924 "3: stq_u %2,7(%5)\n"
925 "4: stq_u %1,0(%5)\n"
926 "5:\n"
927 EXC(1b,5b,%2,%0)
928 EXC(2b,5b,%1,%0)
929 EXC(3b,5b,$31,%0)
930 EXC(4b,5b,$31,%0)
931 : "=r"(error), "=&r"(tmp1), "=&r"(tmp2),
932 "=&r"(tmp3), "=&r"(tmp4)
933 : "r"(va), "r"(*reg_addr), "0"(0));
934 if (error)
935 goto give_sigsegv;
936 return;
937
938 default:
939 /* What instruction were you trying to use, exactly? */
940 goto give_sigbus;
941 }
942
943 /* Only integer loads should get here; everyone else returns early. */
944 if (reg == 30)
945 wrusp(fake_reg);
946 return;
947
948 give_sigsegv:
949 regs->pc -= 4; /* make pc point to faulting insn */
950
951 /* We need to replicate some of the logic in mm/fault.c,
952 since we don't have access to the fault code in the
953 exception handling return path. */
954 if ((unsigned long)va >= TASK_SIZE)
955 si_code = SEGV_ACCERR;
956 else {
957 struct mm_struct *mm = current->mm;
958 mmap_read_lock(mm);
959 if (find_vma(mm, (unsigned long)va))
960 si_code = SEGV_ACCERR;
961 else
962 si_code = SEGV_MAPERR;
963 mmap_read_unlock(mm);
964 }
965 send_sig_fault(SIGSEGV, si_code, va, current);
966 return;
967
968 give_sigbus:
969 regs->pc -= 4;
970 send_sig_fault(SIGBUS, BUS_ADRALN, va, current);
971 return;
972 }
973
974 void
trap_init(void)975 trap_init(void)
976 {
977 /* Tell PAL-code what global pointer we want in the kernel. */
978 register unsigned long gptr __asm__("$29");
979 wrkgp(gptr);
980
981 /* Hack for Multia (UDB) and JENSEN: some of their SRMs have
982 a bug in the handling of the opDEC fault. Fix it up if so. */
983 if (implver() == IMPLVER_EV4)
984 opDEC_check();
985
986 wrent(entArith, 1);
987 wrent(entMM, 2);
988 wrent(entIF, 3);
989 wrent(entUna, 4);
990 wrent(entSys, 5);
991 wrent(entDbg, 6);
992 }
993