xref: /openbmc/linux/arch/parisc/kernel/traps.c (revision 75c6d083)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  *  linux/arch/parisc/traps.c
4  *
5  *  Copyright (C) 1991, 1992  Linus Torvalds
6  *  Copyright (C) 1999, 2000  Philipp Rumpf <prumpf@tux.org>
7  */
8 
9 /*
10  * 'Traps.c' handles hardware traps and faults after we have saved some
11  * state in 'asm.s'.
12  */
13 
14 #include <linux/sched.h>
15 #include <linux/sched/debug.h>
16 #include <linux/kernel.h>
17 #include <linux/string.h>
18 #include <linux/errno.h>
19 #include <linux/ptrace.h>
20 #include <linux/timer.h>
21 #include <linux/delay.h>
22 #include <linux/mm.h>
23 #include <linux/module.h>
24 #include <linux/smp.h>
25 #include <linux/spinlock.h>
26 #include <linux/init.h>
27 #include <linux/interrupt.h>
28 #include <linux/console.h>
29 #include <linux/bug.h>
30 #include <linux/ratelimit.h>
31 #include <linux/uaccess.h>
32 #include <linux/kdebug.h>
33 #include <linux/kfence.h>
34 
35 #include <asm/assembly.h>
36 #include <asm/io.h>
37 #include <asm/irq.h>
38 #include <asm/traps.h>
39 #include <asm/unaligned.h>
40 #include <linux/atomic.h>
41 #include <asm/smp.h>
42 #include <asm/pdc.h>
43 #include <asm/pdc_chassis.h>
44 #include <asm/unwind.h>
45 #include <asm/tlbflush.h>
46 #include <asm/cacheflush.h>
47 #include <linux/kgdb.h>
48 #include <linux/kprobes.h>
49 
50 #if defined(CONFIG_LIGHTWEIGHT_SPINLOCK_CHECK)
51 #include <asm/spinlock.h>
52 #endif
53 
54 #include "../math-emu/math-emu.h"	/* for handle_fpe() */
55 
56 static void parisc_show_stack(struct task_struct *task,
57 	struct pt_regs *regs, const char *loglvl);
58 
printbinary(char * buf,unsigned long x,int nbits)59 static int printbinary(char *buf, unsigned long x, int nbits)
60 {
61 	unsigned long mask = 1UL << (nbits - 1);
62 	while (mask != 0) {
63 		*buf++ = (mask & x ? '1' : '0');
64 		mask >>= 1;
65 	}
66 	*buf = '\0';
67 
68 	return nbits;
69 }
70 
71 #ifdef CONFIG_64BIT
72 #define RFMT "%016lx"
73 #else
74 #define RFMT "%08lx"
75 #endif
76 #define FFMT "%016llx"	/* fpregs are 64-bit always */
77 
78 #define PRINTREGS(lvl,r,f,fmt,x)	\
79 	printk("%s%s%02d-%02d  " fmt " " fmt " " fmt " " fmt "\n",	\
80 		lvl, f, (x), (x+3), (r)[(x)+0], (r)[(x)+1],		\
81 		(r)[(x)+2], (r)[(x)+3])
82 
print_gr(const char * level,struct pt_regs * regs)83 static void print_gr(const char *level, struct pt_regs *regs)
84 {
85 	int i;
86 	char buf[64];
87 
88 	printk("%s\n", level);
89 	printk("%s     YZrvWESTHLNXBCVMcbcbcbcbOGFRQPDI\n", level);
90 	printbinary(buf, regs->gr[0], 32);
91 	printk("%sPSW: %s %s\n", level, buf, print_tainted());
92 
93 	for (i = 0; i < 32; i += 4)
94 		PRINTREGS(level, regs->gr, "r", RFMT, i);
95 }
96 
print_fr(const char * level,struct pt_regs * regs)97 static void print_fr(const char *level, struct pt_regs *regs)
98 {
99 	int i;
100 	char buf[64];
101 	struct { u32 sw[2]; } s;
102 
103 	/* FR are 64bit everywhere. Need to use asm to get the content
104 	 * of fpsr/fper1, and we assume that we won't have a FP Identify
105 	 * in our way, otherwise we're screwed.
106 	 * The fldd is used to restore the T-bit if there was one, as the
107 	 * store clears it anyway.
108 	 * PA2.0 book says "thou shall not use fstw on FPSR/FPERs" - T-Bone */
109 	asm volatile ("fstd %%fr0,0(%1)	\n\t"
110 		      "fldd 0(%1),%%fr0	\n\t"
111 		      : "=m" (s) : "r" (&s) : "r0");
112 
113 	printk("%s\n", level);
114 	printk("%s      VZOUICununcqcqcqcqcqcrmunTDVZOUI\n", level);
115 	printbinary(buf, s.sw[0], 32);
116 	printk("%sFPSR: %s\n", level, buf);
117 	printk("%sFPER1: %08x\n", level, s.sw[1]);
118 
119 	/* here we'll print fr0 again, tho it'll be meaningless */
120 	for (i = 0; i < 32; i += 4)
121 		PRINTREGS(level, regs->fr, "fr", FFMT, i);
122 }
123 
show_regs(struct pt_regs * regs)124 void show_regs(struct pt_regs *regs)
125 {
126 	int i, user;
127 	const char *level;
128 	unsigned long cr30, cr31;
129 
130 	user = user_mode(regs);
131 	level = user ? KERN_DEBUG : KERN_CRIT;
132 
133 	show_regs_print_info(level);
134 
135 	print_gr(level, regs);
136 
137 	for (i = 0; i < 8; i += 4)
138 		PRINTREGS(level, regs->sr, "sr", RFMT, i);
139 
140 	if (user)
141 		print_fr(level, regs);
142 
143 	cr30 = mfctl(30);
144 	cr31 = mfctl(31);
145 	printk("%s\n", level);
146 	printk("%sIASQ: " RFMT " " RFMT " IAOQ: " RFMT " " RFMT "\n",
147 	       level, regs->iasq[0], regs->iasq[1], regs->iaoq[0], regs->iaoq[1]);
148 	printk("%s IIR: %08lx    ISR: " RFMT "  IOR: " RFMT "\n",
149 	       level, regs->iir, regs->isr, regs->ior);
150 	printk("%s CPU: %8d   CR30: " RFMT " CR31: " RFMT "\n",
151 	       level, task_cpu(current), cr30, cr31);
152 	printk("%s ORIG_R28: " RFMT "\n", level, regs->orig_r28);
153 
154 	if (user) {
155 		printk("%s IAOQ[0]: " RFMT "\n", level, regs->iaoq[0]);
156 		printk("%s IAOQ[1]: " RFMT "\n", level, regs->iaoq[1]);
157 		printk("%s RP(r2): " RFMT "\n", level, regs->gr[2]);
158 	} else {
159 		printk("%s IAOQ[0]: %pS\n", level, (void *) regs->iaoq[0]);
160 		printk("%s IAOQ[1]: %pS\n", level, (void *) regs->iaoq[1]);
161 		printk("%s RP(r2): %pS\n", level, (void *) regs->gr[2]);
162 
163 		parisc_show_stack(current, regs, KERN_DEFAULT);
164 	}
165 }
166 
167 static DEFINE_RATELIMIT_STATE(_hppa_rs,
168 	DEFAULT_RATELIMIT_INTERVAL, DEFAULT_RATELIMIT_BURST);
169 
170 #define parisc_printk_ratelimited(critical, regs, fmt, ...)	{	      \
171 	if ((critical || show_unhandled_signals) && __ratelimit(&_hppa_rs)) { \
172 		printk(fmt, ##__VA_ARGS__);				      \
173 		show_regs(regs);					      \
174 	}								      \
175 }
176 
177 
do_show_stack(struct unwind_frame_info * info,const char * loglvl)178 static void do_show_stack(struct unwind_frame_info *info, const char *loglvl)
179 {
180 	int i = 1;
181 
182 	printk("%sBacktrace:\n", loglvl);
183 	while (i <= MAX_UNWIND_ENTRIES) {
184 		if (unwind_once(info) < 0 || info->ip == 0)
185 			break;
186 
187 		if (__kernel_text_address(info->ip)) {
188 			printk("%s [<" RFMT ">] %pS\n",
189 				loglvl, info->ip, (void *) info->ip);
190 			i++;
191 		}
192 	}
193 	printk("%s\n", loglvl);
194 }
195 
parisc_show_stack(struct task_struct * task,struct pt_regs * regs,const char * loglvl)196 static void parisc_show_stack(struct task_struct *task,
197 	struct pt_regs *regs, const char *loglvl)
198 {
199 	struct unwind_frame_info info;
200 
201 	unwind_frame_init_task(&info, task, regs);
202 
203 	do_show_stack(&info, loglvl);
204 }
205 
show_stack(struct task_struct * t,unsigned long * sp,const char * loglvl)206 void show_stack(struct task_struct *t, unsigned long *sp, const char *loglvl)
207 {
208 	parisc_show_stack(t, NULL, loglvl);
209 }
210 
is_valid_bugaddr(unsigned long iaoq)211 int is_valid_bugaddr(unsigned long iaoq)
212 {
213 	return 1;
214 }
215 
die_if_kernel(char * str,struct pt_regs * regs,long err)216 void die_if_kernel(char *str, struct pt_regs *regs, long err)
217 {
218 	if (user_mode(regs)) {
219 		if (err == 0)
220 			return; /* STFU */
221 
222 		parisc_printk_ratelimited(1, regs,
223 			KERN_CRIT "%s (pid %d): %s (code %ld) at " RFMT "\n",
224 			current->comm, task_pid_nr(current), str, err, regs->iaoq[0]);
225 
226 		return;
227 	}
228 
229 	bust_spinlocks(1);
230 
231 	oops_enter();
232 
233 	/* Amuse the user in a SPARC fashion */
234 	if (err) printk(KERN_CRIT
235 			"      _______________________________ \n"
236 			"     < Your System ate a SPARC! Gah! >\n"
237 			"      ------------------------------- \n"
238 			"             \\   ^__^\n"
239 			"                 (__)\\       )\\/\\\n"
240 			"                  U  ||----w |\n"
241 			"                     ||     ||\n");
242 
243 	/* unlock the pdc lock if necessary */
244 	pdc_emergency_unlock();
245 
246 	if (err)
247 		printk(KERN_CRIT "%s (pid %d): %s (code %ld)\n",
248 			current->comm, task_pid_nr(current), str, err);
249 
250 	/* Wot's wrong wif bein' racy? */
251 	if (current->thread.flags & PARISC_KERNEL_DEATH) {
252 		printk(KERN_CRIT "%s() recursion detected.\n", __func__);
253 		local_irq_enable();
254 		while (1);
255 	}
256 	current->thread.flags |= PARISC_KERNEL_DEATH;
257 
258 	show_regs(regs);
259 	dump_stack();
260 	add_taint(TAINT_DIE, LOCKDEP_NOW_UNRELIABLE);
261 
262 	if (in_interrupt())
263 		panic("Fatal exception in interrupt");
264 
265 	if (panic_on_oops)
266 		panic("Fatal exception");
267 
268 	oops_exit();
269 	make_task_dead(SIGSEGV);
270 }
271 
272 /* gdb uses break 4,8 */
273 #define GDB_BREAK_INSN 0x10004
handle_gdb_break(struct pt_regs * regs,int wot)274 static void handle_gdb_break(struct pt_regs *regs, int wot)
275 {
276 	force_sig_fault(SIGTRAP, wot,
277 			(void __user *) (regs->iaoq[0] & ~3));
278 }
279 
handle_break(struct pt_regs * regs)280 static void handle_break(struct pt_regs *regs)
281 {
282 	unsigned iir = regs->iir;
283 
284 	if (unlikely(iir == PARISC_BUG_BREAK_INSN && !user_mode(regs))) {
285 		/* check if a BUG() or WARN() trapped here.  */
286 		enum bug_trap_type tt;
287 		tt = report_bug(regs->iaoq[0] & ~3, regs);
288 		if (tt == BUG_TRAP_TYPE_WARN) {
289 			regs->iaoq[0] += 4;
290 			regs->iaoq[1] += 4;
291 			return; /* return to next instruction when WARN_ON().  */
292 		}
293 		die_if_kernel("Unknown kernel breakpoint", regs,
294 			(tt == BUG_TRAP_TYPE_NONE) ? 9 : 0);
295 	}
296 
297 #ifdef CONFIG_KPROBES
298 	if (unlikely(iir == PARISC_KPROBES_BREAK_INSN && !user_mode(regs))) {
299 		parisc_kprobe_break_handler(regs);
300 		return;
301 	}
302 	if (unlikely(iir == PARISC_KPROBES_BREAK_INSN2 && !user_mode(regs))) {
303 		parisc_kprobe_ss_handler(regs);
304 		return;
305 	}
306 #endif
307 
308 #ifdef CONFIG_KGDB
309 	if (unlikely((iir == PARISC_KGDB_COMPILED_BREAK_INSN ||
310 		iir == PARISC_KGDB_BREAK_INSN)) && !user_mode(regs)) {
311 		kgdb_handle_exception(9, SIGTRAP, 0, regs);
312 		return;
313 	}
314 #endif
315 
316 #ifdef CONFIG_LIGHTWEIGHT_SPINLOCK_CHECK
317         if ((iir == SPINLOCK_BREAK_INSN) && !user_mode(regs)) {
318 		die_if_kernel("Spinlock was trashed", regs, 1);
319 	}
320 #endif
321 
322 	if (unlikely(iir != GDB_BREAK_INSN))
323 		parisc_printk_ratelimited(0, regs,
324 			KERN_DEBUG "break %d,%d: pid=%d command='%s'\n",
325 			iir & 31, (iir>>13) & ((1<<13)-1),
326 			task_pid_nr(current), current->comm);
327 
328 	/* send standard GDB signal */
329 	handle_gdb_break(regs, TRAP_BRKPT);
330 }
331 
default_trap(int code,struct pt_regs * regs)332 static void default_trap(int code, struct pt_regs *regs)
333 {
334 	printk(KERN_ERR "Trap %d on CPU %d\n", code, smp_processor_id());
335 	show_regs(regs);
336 }
337 
transfer_pim_to_trap_frame(struct pt_regs * regs)338 static void transfer_pim_to_trap_frame(struct pt_regs *regs)
339 {
340     register int i;
341     extern unsigned int hpmc_pim_data[];
342     struct pdc_hpmc_pim_11 *pim_narrow;
343     struct pdc_hpmc_pim_20 *pim_wide;
344 
345     if (boot_cpu_data.cpu_type >= pcxu) {
346 
347 	pim_wide = (struct pdc_hpmc_pim_20 *)hpmc_pim_data;
348 
349 	/*
350 	 * Note: The following code will probably generate a
351 	 * bunch of truncation error warnings from the compiler.
352 	 * Could be handled with an ifdef, but perhaps there
353 	 * is a better way.
354 	 */
355 
356 	regs->gr[0] = pim_wide->cr[22];
357 
358 	for (i = 1; i < 32; i++)
359 	    regs->gr[i] = pim_wide->gr[i];
360 
361 	for (i = 0; i < 32; i++)
362 	    regs->fr[i] = pim_wide->fr[i];
363 
364 	for (i = 0; i < 8; i++)
365 	    regs->sr[i] = pim_wide->sr[i];
366 
367 	regs->iasq[0] = pim_wide->cr[17];
368 	regs->iasq[1] = pim_wide->iasq_back;
369 	regs->iaoq[0] = pim_wide->cr[18];
370 	regs->iaoq[1] = pim_wide->iaoq_back;
371 
372 	regs->sar  = pim_wide->cr[11];
373 	regs->iir  = pim_wide->cr[19];
374 	regs->isr  = pim_wide->cr[20];
375 	regs->ior  = pim_wide->cr[21];
376     }
377     else {
378 	pim_narrow = (struct pdc_hpmc_pim_11 *)hpmc_pim_data;
379 
380 	regs->gr[0] = pim_narrow->cr[22];
381 
382 	for (i = 1; i < 32; i++)
383 	    regs->gr[i] = pim_narrow->gr[i];
384 
385 	for (i = 0; i < 32; i++)
386 	    regs->fr[i] = pim_narrow->fr[i];
387 
388 	for (i = 0; i < 8; i++)
389 	    regs->sr[i] = pim_narrow->sr[i];
390 
391 	regs->iasq[0] = pim_narrow->cr[17];
392 	regs->iasq[1] = pim_narrow->iasq_back;
393 	regs->iaoq[0] = pim_narrow->cr[18];
394 	regs->iaoq[1] = pim_narrow->iaoq_back;
395 
396 	regs->sar  = pim_narrow->cr[11];
397 	regs->iir  = pim_narrow->cr[19];
398 	regs->isr  = pim_narrow->cr[20];
399 	regs->ior  = pim_narrow->cr[21];
400     }
401 
402     /*
403      * The following fields only have meaning if we came through
404      * another path. So just zero them here.
405      */
406 
407     regs->ksp = 0;
408     regs->kpc = 0;
409     regs->orig_r28 = 0;
410 }
411 
412 
413 /*
414  * This routine is called as a last resort when everything else
415  * has gone clearly wrong. We get called for faults in kernel space,
416  * and HPMC's.
417  */
parisc_terminate(char * msg,struct pt_regs * regs,int code,unsigned long offset)418 void parisc_terminate(char *msg, struct pt_regs *regs, int code, unsigned long offset)
419 {
420 	static DEFINE_SPINLOCK(terminate_lock);
421 
422 	(void)notify_die(DIE_OOPS, msg, regs, 0, code, SIGTRAP);
423 	bust_spinlocks(1);
424 
425 	set_eiem(0);
426 	local_irq_disable();
427 	spin_lock(&terminate_lock);
428 
429 	/* unlock the pdc lock if necessary */
430 	pdc_emergency_unlock();
431 
432 	/* Not all paths will gutter the processor... */
433 	switch(code){
434 
435 	case 1:
436 		transfer_pim_to_trap_frame(regs);
437 		break;
438 
439 	default:
440 		break;
441 
442 	}
443 
444 	{
445 		/* show_stack(NULL, (unsigned long *)regs->gr[30]); */
446 		struct unwind_frame_info info;
447 		unwind_frame_init(&info, current, regs);
448 		do_show_stack(&info, KERN_CRIT);
449 	}
450 
451 	printk("\n");
452 	pr_crit("%s: Code=%d (%s) at addr " RFMT "\n",
453 		msg, code, trap_name(code), offset);
454 	show_regs(regs);
455 
456 	spin_unlock(&terminate_lock);
457 
458 	/* put soft power button back under hardware control;
459 	 * if the user had pressed it once at any time, the
460 	 * system will shut down immediately right here. */
461 	pdc_soft_power_button(0);
462 
463 	/* Call kernel panic() so reboot timeouts work properly
464 	 * FIXME: This function should be on the list of
465 	 * panic notifiers, and we should call panic
466 	 * directly from the location that we wish.
467 	 * e.g. We should not call panic from
468 	 * parisc_terminate, but rather the other way around.
469 	 * This hack works, prints the panic message twice,
470 	 * and it enables reboot timers!
471 	 */
472 	panic(msg);
473 }
474 
handle_interruption(int code,struct pt_regs * regs)475 void notrace handle_interruption(int code, struct pt_regs *regs)
476 {
477 	unsigned long fault_address = 0;
478 	unsigned long fault_space = 0;
479 	int si_code;
480 
481 	if (!irqs_disabled_flags(regs->gr[0]))
482 	    local_irq_enable();
483 
484 	/* Security check:
485 	 * If the priority level is still user, and the
486 	 * faulting space is not equal to the active space
487 	 * then the user is attempting something in a space
488 	 * that does not belong to them. Kill the process.
489 	 *
490 	 * This is normally the situation when the user
491 	 * attempts to jump into the kernel space at the
492 	 * wrong offset, be it at the gateway page or a
493 	 * random location.
494 	 *
495 	 * We cannot normally signal the process because it
496 	 * could *be* on the gateway page, and processes
497 	 * executing on the gateway page can't have signals
498 	 * delivered.
499 	 *
500 	 * We merely readjust the address into the users
501 	 * space, at a destination address of zero, and
502 	 * allow processing to continue.
503 	 */
504 	if (((unsigned long)regs->iaoq[0] & 3) &&
505 	    ((unsigned long)regs->iasq[0] != (unsigned long)regs->sr[7])) {
506 		/* Kill the user process later */
507 		regs->iaoq[0] = 0 | 3;
508 		regs->iaoq[1] = regs->iaoq[0] + 4;
509 		regs->iasq[0] = regs->iasq[1] = regs->sr[7];
510 		regs->gr[0] &= ~PSW_B;
511 		return;
512 	}
513 
514 #if 0
515 	printk(KERN_CRIT "Interruption # %d\n", code);
516 #endif
517 
518 	switch(code) {
519 
520 	case  1:
521 		/* High-priority machine check (HPMC) */
522 
523 		/* set up a new led state on systems shipped with a LED State panel */
524 		pdc_chassis_send_status(PDC_CHASSIS_DIRECT_HPMC);
525 
526 		parisc_terminate("High Priority Machine Check (HPMC)",
527 				regs, code, 0);
528 		/* NOT REACHED */
529 
530 	case  2:
531 		/* Power failure interrupt */
532 		printk(KERN_CRIT "Power failure interrupt !\n");
533 		return;
534 
535 	case  3:
536 		/* Recovery counter trap */
537 		regs->gr[0] &= ~PSW_R;
538 
539 #ifdef CONFIG_KGDB
540 		if (kgdb_single_step) {
541 			kgdb_handle_exception(0, SIGTRAP, 0, regs);
542 			return;
543 		}
544 #endif
545 
546 		if (user_space(regs))
547 			handle_gdb_break(regs, TRAP_TRACE);
548 		/* else this must be the start of a syscall - just let it run */
549 		return;
550 
551 	case  5:
552 		/* Low-priority machine check */
553 		pdc_chassis_send_status(PDC_CHASSIS_DIRECT_LPMC);
554 
555 		flush_cache_all();
556 		flush_tlb_all();
557 		default_trap(code, regs);
558 		return;
559 
560 	case  PARISC_ITLB_TRAP:
561 		/* Instruction TLB miss fault/Instruction page fault */
562 		fault_address = regs->iaoq[0];
563 		fault_space   = regs->iasq[0];
564 		break;
565 
566 	case  8:
567 		/* Illegal instruction trap */
568 		die_if_kernel("Illegal instruction", regs, code);
569 		si_code = ILL_ILLOPC;
570 		goto give_sigill;
571 
572 	case  9:
573 		/* Break instruction trap */
574 		handle_break(regs);
575 		return;
576 
577 	case 10:
578 		/* Privileged operation trap */
579 		die_if_kernel("Privileged operation", regs, code);
580 		si_code = ILL_PRVOPC;
581 		goto give_sigill;
582 
583 	case 11:
584 		/* Privileged register trap */
585 		if ((regs->iir & 0xffdfffe0) == 0x034008a0) {
586 
587 			/* This is a MFCTL cr26/cr27 to gr instruction.
588 			 * PCXS traps on this, so we need to emulate it.
589 			 */
590 
591 			if (regs->iir & 0x00200000)
592 				regs->gr[regs->iir & 0x1f] = mfctl(27);
593 			else
594 				regs->gr[regs->iir & 0x1f] = mfctl(26);
595 
596 			regs->iaoq[0] = regs->iaoq[1];
597 			regs->iaoq[1] += 4;
598 			regs->iasq[0] = regs->iasq[1];
599 			return;
600 		}
601 
602 		die_if_kernel("Privileged register usage", regs, code);
603 		si_code = ILL_PRVREG;
604 	give_sigill:
605 		force_sig_fault(SIGILL, si_code,
606 				(void __user *) regs->iaoq[0]);
607 		return;
608 
609 	case 12:
610 		/* Overflow Trap, let the userland signal handler do the cleanup */
611 		force_sig_fault(SIGFPE, FPE_INTOVF,
612 				(void __user *) regs->iaoq[0]);
613 		return;
614 
615 	case 13:
616 		/* Conditional Trap
617 		   The condition succeeds in an instruction which traps
618 		   on condition  */
619 		if(user_mode(regs)){
620 			/* Let userspace app figure it out from the insn pointed
621 			 * to by si_addr.
622 			 */
623 			force_sig_fault(SIGFPE, FPE_CONDTRAP,
624 					(void __user *) regs->iaoq[0]);
625 			return;
626 		}
627 		/* The kernel doesn't want to handle condition codes */
628 		break;
629 
630 	case 14:
631 		/* Assist Exception Trap, i.e. floating point exception. */
632 		die_if_kernel("Floating point exception", regs, 0); /* quiet */
633 		__inc_irq_stat(irq_fpassist_count);
634 		handle_fpe(regs);
635 		return;
636 
637 	case 15:
638 		/* Data TLB miss fault/Data page fault */
639 		fallthrough;
640 	case 16:
641 		/* Non-access instruction TLB miss fault */
642 		/* The instruction TLB entry needed for the target address of the FIC
643 		   is absent, and hardware can't find it, so we get to cleanup */
644 		fallthrough;
645 	case 17:
646 		/* Non-access data TLB miss fault/Non-access data page fault */
647 		/* FIXME:
648 			 Still need to add slow path emulation code here!
649 			 If the insn used a non-shadow register, then the tlb
650 			 handlers could not have their side-effect (e.g. probe
651 			 writing to a target register) emulated since rfir would
652 			 erase the changes to said register. Instead we have to
653 			 setup everything, call this function we are in, and emulate
654 			 by hand. Technically we need to emulate:
655 			 fdc,fdce,pdc,"fic,4f",prober,probeir,probew, probeiw
656 		*/
657 		if (code == 17 && handle_nadtlb_fault(regs))
658 			return;
659 		fault_address = regs->ior;
660 		fault_space = regs->isr;
661 		break;
662 
663 	case 18:
664 		/* PCXS only -- later cpu's split this into types 26,27 & 28 */
665 		/* Check for unaligned access */
666 		if (check_unaligned(regs)) {
667 			handle_unaligned(regs);
668 			return;
669 		}
670 		fallthrough;
671 	case 26:
672 		/* PCXL: Data memory access rights trap */
673 		fault_address = regs->ior;
674 		fault_space   = regs->isr;
675 		break;
676 
677 	case 19:
678 		/* Data memory break trap */
679 		regs->gr[0] |= PSW_X; /* So we can single-step over the trap */
680 		fallthrough;
681 	case 21:
682 		/* Page reference trap */
683 		handle_gdb_break(regs, TRAP_HWBKPT);
684 		return;
685 
686 	case 25:
687 		/* Taken branch trap */
688 		regs->gr[0] &= ~PSW_T;
689 		if (user_space(regs))
690 			handle_gdb_break(regs, TRAP_BRANCH);
691 		/* else this must be the start of a syscall - just let it
692 		 * run.
693 		 */
694 		return;
695 
696 	case  7:
697 		/* Instruction access rights */
698 		/* PCXL: Instruction memory protection trap */
699 
700 		/*
701 		 * This could be caused by either: 1) a process attempting
702 		 * to execute within a vma that does not have execute
703 		 * permission, or 2) an access rights violation caused by a
704 		 * flush only translation set up by ptep_get_and_clear().
705 		 * So we check the vma permissions to differentiate the two.
706 		 * If the vma indicates we have execute permission, then
707 		 * the cause is the latter one. In this case, we need to
708 		 * call do_page_fault() to fix the problem.
709 		 */
710 
711 		if (user_mode(regs)) {
712 			struct vm_area_struct *vma;
713 
714 			mmap_read_lock(current->mm);
715 			vma = find_vma(current->mm,regs->iaoq[0]);
716 			if (vma && (regs->iaoq[0] >= vma->vm_start)
717 				&& (vma->vm_flags & VM_EXEC)) {
718 
719 				fault_address = regs->iaoq[0];
720 				fault_space = regs->iasq[0];
721 
722 				mmap_read_unlock(current->mm);
723 				break; /* call do_page_fault() */
724 			}
725 			mmap_read_unlock(current->mm);
726 		}
727 		/* CPU could not fetch instruction, so clear stale IIR value. */
728 		regs->iir = 0xbaadf00d;
729 		fallthrough;
730 	case 27:
731 		/* Data memory protection ID trap */
732 		if (code == 27 && !user_mode(regs) &&
733 			fixup_exception(regs))
734 			return;
735 
736 		die_if_kernel("Protection id trap", regs, code);
737 		force_sig_fault(SIGSEGV, SEGV_MAPERR,
738 				(code == 7)?
739 				((void __user *) regs->iaoq[0]) :
740 				((void __user *) regs->ior));
741 		return;
742 
743 	case 28:
744 		/* Unaligned data reference trap */
745 		handle_unaligned(regs);
746 		return;
747 
748 	default:
749 		if (user_mode(regs)) {
750 			parisc_printk_ratelimited(0, regs, KERN_DEBUG
751 				"handle_interruption() pid=%d command='%s'\n",
752 				task_pid_nr(current), current->comm);
753 			/* SIGBUS, for lack of a better one. */
754 			force_sig_fault(SIGBUS, BUS_OBJERR,
755 					(void __user *)regs->ior);
756 			return;
757 		}
758 		pdc_chassis_send_status(PDC_CHASSIS_DIRECT_PANIC);
759 
760 		parisc_terminate("Unexpected interruption", regs, code, 0);
761 		/* NOT REACHED */
762 	}
763 
764 	if (user_mode(regs)) {
765 	    if ((fault_space >> SPACEID_SHIFT) != (regs->sr[7] >> SPACEID_SHIFT)) {
766 		parisc_printk_ratelimited(0, regs, KERN_DEBUG
767 				"User fault %d on space 0x%08lx, pid=%d command='%s'\n",
768 				code, fault_space,
769 				task_pid_nr(current), current->comm);
770 		force_sig_fault(SIGSEGV, SEGV_MAPERR,
771 				(void __user *)regs->ior);
772 		return;
773 	    }
774 	}
775 	else {
776 
777 	    /*
778 	     * The kernel should never fault on its own address space,
779 	     * unless pagefault_disable() was called before.
780 	     */
781 
782 	    if (faulthandler_disabled() || fault_space == 0)
783 	    {
784 		/* Clean up and return if in exception table. */
785 		if (fixup_exception(regs))
786 			return;
787 		/* Clean up and return if handled by kfence. */
788 		if (kfence_handle_page_fault(fault_address,
789 			parisc_acctyp(code, regs->iir) == VM_WRITE, regs))
790 			return;
791 		pdc_chassis_send_status(PDC_CHASSIS_DIRECT_PANIC);
792 		parisc_terminate("Kernel Fault", regs, code, fault_address);
793 	    }
794 	}
795 
796 	do_page_fault(regs, code, fault_address);
797 }
798 
799 
initialize_ivt(const void * iva)800 static void __init initialize_ivt(const void *iva)
801 {
802 	extern const u32 os_hpmc[];
803 
804 	int i;
805 	u32 check = 0;
806 	u32 *ivap;
807 	u32 instr;
808 
809 	if (strcmp((const char *)iva, "cows can fly"))
810 		panic("IVT invalid");
811 
812 	ivap = (u32 *)iva;
813 
814 	for (i = 0; i < 8; i++)
815 	    *ivap++ = 0;
816 
817 	/*
818 	 * Use PDC_INSTR firmware function to get instruction that invokes
819 	 * PDCE_CHECK in HPMC handler.  See programming note at page 1-31 of
820 	 * the PA 1.1 Firmware Architecture document.
821 	 */
822 	if (pdc_instr(&instr) == PDC_OK)
823 		ivap[0] = instr;
824 
825 	/*
826 	 * Rules for the checksum of the HPMC handler:
827 	 * 1. The IVA does not point to PDC/PDH space (ie: the OS has installed
828 	 *    its own IVA).
829 	 * 2. The word at IVA + 32 is nonzero.
830 	 * 3. If Length (IVA + 60) is not zero, then Length (IVA + 60) and
831 	 *    Address (IVA + 56) are word-aligned.
832 	 * 4. The checksum of the 8 words starting at IVA + 32 plus the sum of
833 	 *    the Length/4 words starting at Address is zero.
834 	 */
835 
836 	/* Setup IVA and compute checksum for HPMC handler */
837 	ivap[6] = (u32)__pa(os_hpmc);
838 
839 	for (i=0; i<8; i++)
840 	    check += ivap[i];
841 
842 	ivap[5] = -check;
843 	pr_debug("initialize_ivt: IVA[6] = 0x%08x\n", ivap[6]);
844 }
845 
846 
847 /* early_trap_init() is called before we set up kernel mappings and
848  * write-protect the kernel */
early_trap_init(void)849 void  __init early_trap_init(void)
850 {
851 	extern const void fault_vector_20;
852 
853 #ifndef CONFIG_64BIT
854 	extern const void fault_vector_11;
855 	initialize_ivt(&fault_vector_11);
856 #endif
857 
858 	initialize_ivt(&fault_vector_20);
859 }
860