xref: /openbmc/linux/arch/arm/kernel/traps.c (revision c94b731d)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  *  linux/arch/arm/kernel/traps.c
4  *
5  *  Copyright (C) 1995-2009 Russell King
6  *  Fragments that appear the same as linux/arch/i386/kernel/traps.c (C) Linus Torvalds
7  *
8  *  'traps.c' handles hardware exceptions after we have saved some state in
9  *  'linux/arch/arm/lib/traps.S'.  Mostly a debugging aid, but will probably
10  *  kill the offending process.
11  */
12 #include <linux/signal.h>
13 #include <linux/personality.h>
14 #include <linux/kallsyms.h>
15 #include <linux/spinlock.h>
16 #include <linux/uaccess.h>
17 #include <linux/hardirq.h>
18 #include <linux/kdebug.h>
19 #include <linux/kprobes.h>
20 #include <linux/module.h>
21 #include <linux/kexec.h>
22 #include <linux/bug.h>
23 #include <linux/delay.h>
24 #include <linux/init.h>
25 #include <linux/sched/signal.h>
26 #include <linux/sched/debug.h>
27 #include <linux/sched/task_stack.h>
28 #include <linux/irq.h>
29 
30 #include <linux/atomic.h>
31 #include <asm/cacheflush.h>
32 #include <asm/exception.h>
33 #include <asm/spectre.h>
34 #include <asm/unistd.h>
35 #include <asm/traps.h>
36 #include <asm/ptrace.h>
37 #include <asm/unwind.h>
38 #include <asm/tls.h>
39 #include <asm/system_misc.h>
40 #include <asm/opcodes.h>
41 
42 
43 static const char *handler[]= {
44 	"prefetch abort",
45 	"data abort",
46 	"address exception",
47 	"interrupt",
48 	"undefined instruction",
49 };
50 
51 void *vectors_page;
52 
53 #ifdef CONFIG_DEBUG_USER
54 unsigned int user_debug;
55 
56 static int __init user_debug_setup(char *str)
57 {
58 	get_option(&str, &user_debug);
59 	return 1;
60 }
61 __setup("user_debug=", user_debug_setup);
62 #endif
63 
64 static void dump_mem(const char *, const char *, unsigned long, unsigned long);
65 
66 void dump_backtrace_entry(unsigned long where, unsigned long from,
67 			  unsigned long frame, const char *loglvl)
68 {
69 	unsigned long end = frame + 4 + sizeof(struct pt_regs);
70 
71 #ifndef CONFIG_KALLSYMS
72 	printk("%sFunction entered at [<%08lx>] from [<%08lx>]\n",
73 		loglvl, where, from);
74 #elif defined CONFIG_BACKTRACE_VERBOSE
75 	printk("%s[<%08lx>] (%ps) from [<%08lx>] (%pS)\n",
76 		loglvl, where, (void *)where, from, (void *)from);
77 #else
78 	printk("%s %ps from %pS\n", loglvl, (void *)where, (void *)from);
79 #endif
80 
81 	if (in_entry_text(from) && end <= ALIGN(frame, THREAD_SIZE))
82 		dump_mem(loglvl, "Exception stack", frame + 4, end);
83 }
84 
85 void dump_backtrace_stm(u32 *stack, u32 instruction, const char *loglvl)
86 {
87 	char str[80], *p;
88 	unsigned int x;
89 	int reg;
90 
91 	for (reg = 10, x = 0, p = str; reg >= 0; reg--) {
92 		if (instruction & BIT(reg)) {
93 			p += sprintf(p, " r%d:%08x", reg, *stack--);
94 			if (++x == 6) {
95 				x = 0;
96 				p = str;
97 				printk("%s%s\n", loglvl, str);
98 			}
99 		}
100 	}
101 	if (p != str)
102 		printk("%s%s\n", loglvl, str);
103 }
104 
105 #ifndef CONFIG_ARM_UNWIND
106 /*
107  * Stack pointers should always be within the kernels view of
108  * physical memory.  If it is not there, then we can't dump
109  * out any information relating to the stack.
110  */
111 static int verify_stack(unsigned long sp)
112 {
113 	if (sp < PAGE_OFFSET ||
114 	    (sp > (unsigned long)high_memory && high_memory != NULL))
115 		return -EFAULT;
116 
117 	return 0;
118 }
119 #endif
120 
121 /*
122  * Dump out the contents of some memory nicely...
123  */
124 static void dump_mem(const char *lvl, const char *str, unsigned long bottom,
125 		     unsigned long top)
126 {
127 	unsigned long first;
128 	int i;
129 
130 	printk("%s%s(0x%08lx to 0x%08lx)\n", lvl, str, bottom, top);
131 
132 	for (first = bottom & ~31; first < top; first += 32) {
133 		unsigned long p;
134 		char str[sizeof(" 12345678") * 8 + 1];
135 
136 		memset(str, ' ', sizeof(str));
137 		str[sizeof(str) - 1] = '\0';
138 
139 		for (p = first, i = 0; i < 8 && p < top; i++, p += 4) {
140 			if (p >= bottom && p < top) {
141 				unsigned long val;
142 				if (!get_kernel_nofault(val, (unsigned long *)p))
143 					sprintf(str + i * 9, " %08lx", val);
144 				else
145 					sprintf(str + i * 9, " ????????");
146 			}
147 		}
148 		printk("%s%04lx:%s\n", lvl, first & 0xffff, str);
149 	}
150 }
151 
152 static void dump_instr(const char *lvl, struct pt_regs *regs)
153 {
154 	unsigned long addr = instruction_pointer(regs);
155 	const int thumb = thumb_mode(regs);
156 	const int width = thumb ? 4 : 8;
157 	char str[sizeof("00000000 ") * 5 + 2 + 1], *p = str;
158 	int i;
159 
160 	/*
161 	 * Note that we now dump the code first, just in case the backtrace
162 	 * kills us.
163 	 */
164 
165 	for (i = -4; i < 1 + !!thumb; i++) {
166 		unsigned int val, bad;
167 
168 		if (!user_mode(regs)) {
169 			if (thumb) {
170 				u16 val16;
171 				bad = get_kernel_nofault(val16, &((u16 *)addr)[i]);
172 				val = val16;
173 			} else {
174 				bad = get_kernel_nofault(val, &((u32 *)addr)[i]);
175 			}
176 		} else {
177 			if (thumb)
178 				bad = get_user(val, &((u16 *)addr)[i]);
179 			else
180 				bad = get_user(val, &((u32 *)addr)[i]);
181 		}
182 
183 		if (!bad)
184 			p += sprintf(p, i == 0 ? "(%0*x) " : "%0*x ",
185 					width, val);
186 		else {
187 			p += sprintf(p, "bad PC value");
188 			break;
189 		}
190 	}
191 	printk("%sCode: %s\n", lvl, str);
192 }
193 
194 #ifdef CONFIG_ARM_UNWIND
195 static inline void dump_backtrace(struct pt_regs *regs, struct task_struct *tsk,
196 				  const char *loglvl)
197 {
198 	unwind_backtrace(regs, tsk, loglvl);
199 }
200 #else
201 static void dump_backtrace(struct pt_regs *regs, struct task_struct *tsk,
202 			   const char *loglvl)
203 {
204 	unsigned int fp, mode;
205 	int ok = 1;
206 
207 	printk("%sBacktrace: ", loglvl);
208 
209 	if (!tsk)
210 		tsk = current;
211 
212 	if (regs) {
213 		fp = frame_pointer(regs);
214 		mode = processor_mode(regs);
215 	} else if (tsk != current) {
216 		fp = thread_saved_fp(tsk);
217 		mode = 0x10;
218 	} else {
219 		asm("mov %0, fp" : "=r" (fp) : : "cc");
220 		mode = 0x10;
221 	}
222 
223 	if (!fp) {
224 		pr_cont("no frame pointer");
225 		ok = 0;
226 	} else if (verify_stack(fp)) {
227 		pr_cont("invalid frame pointer 0x%08x", fp);
228 		ok = 0;
229 	} else if (fp < (unsigned long)end_of_stack(tsk))
230 		pr_cont("frame pointer underflow");
231 	pr_cont("\n");
232 
233 	if (ok)
234 		c_backtrace(fp, mode, loglvl);
235 }
236 #endif
237 
238 void show_stack(struct task_struct *tsk, unsigned long *sp, const char *loglvl)
239 {
240 	dump_backtrace(NULL, tsk, loglvl);
241 	barrier();
242 }
243 
244 #ifdef CONFIG_PREEMPT
245 #define S_PREEMPT " PREEMPT"
246 #elif defined(CONFIG_PREEMPT_RT)
247 #define S_PREEMPT " PREEMPT_RT"
248 #else
249 #define S_PREEMPT ""
250 #endif
251 #ifdef CONFIG_SMP
252 #define S_SMP " SMP"
253 #else
254 #define S_SMP ""
255 #endif
256 #ifdef CONFIG_THUMB2_KERNEL
257 #define S_ISA " THUMB2"
258 #else
259 #define S_ISA " ARM"
260 #endif
261 
262 static int __die(const char *str, int err, struct pt_regs *regs)
263 {
264 	struct task_struct *tsk = current;
265 	static int die_counter;
266 	int ret;
267 
268 	pr_emerg("Internal error: %s: %x [#%d]" S_PREEMPT S_SMP S_ISA "\n",
269 	         str, err, ++die_counter);
270 
271 	/* trap and error numbers are mostly meaningless on ARM */
272 	ret = notify_die(DIE_OOPS, str, regs, err, tsk->thread.trap_no, SIGSEGV);
273 	if (ret == NOTIFY_STOP)
274 		return 1;
275 
276 	print_modules();
277 	__show_regs(regs);
278 	__show_regs_alloc_free(regs);
279 	pr_emerg("Process %.*s (pid: %d, stack limit = 0x%p)\n",
280 		 TASK_COMM_LEN, tsk->comm, task_pid_nr(tsk), end_of_stack(tsk));
281 
282 	if (!user_mode(regs) || in_interrupt()) {
283 		dump_mem(KERN_EMERG, "Stack: ", regs->ARM_sp,
284 			 THREAD_SIZE + (unsigned long)task_stack_page(tsk));
285 		dump_backtrace(regs, tsk, KERN_EMERG);
286 		dump_instr(KERN_EMERG, regs);
287 	}
288 
289 	return 0;
290 }
291 
292 static arch_spinlock_t die_lock = __ARCH_SPIN_LOCK_UNLOCKED;
293 static int die_owner = -1;
294 static unsigned int die_nest_count;
295 
296 static unsigned long oops_begin(void)
297 {
298 	int cpu;
299 	unsigned long flags;
300 
301 	oops_enter();
302 
303 	/* racy, but better than risking deadlock. */
304 	raw_local_irq_save(flags);
305 	cpu = smp_processor_id();
306 	if (!arch_spin_trylock(&die_lock)) {
307 		if (cpu == die_owner)
308 			/* nested oops. should stop eventually */;
309 		else
310 			arch_spin_lock(&die_lock);
311 	}
312 	die_nest_count++;
313 	die_owner = cpu;
314 	console_verbose();
315 	bust_spinlocks(1);
316 	return flags;
317 }
318 
319 static void oops_end(unsigned long flags, struct pt_regs *regs, int signr)
320 {
321 	if (regs && kexec_should_crash(current))
322 		crash_kexec(regs);
323 
324 	bust_spinlocks(0);
325 	die_owner = -1;
326 	add_taint(TAINT_DIE, LOCKDEP_NOW_UNRELIABLE);
327 	die_nest_count--;
328 	if (!die_nest_count)
329 		/* Nest count reaches zero, release the lock. */
330 		arch_spin_unlock(&die_lock);
331 	raw_local_irq_restore(flags);
332 	oops_exit();
333 
334 	if (in_interrupt())
335 		panic("Fatal exception in interrupt");
336 	if (panic_on_oops)
337 		panic("Fatal exception");
338 	if (signr)
339 		make_task_dead(signr);
340 }
341 
342 /*
343  * This function is protected against re-entrancy.
344  */
345 void die(const char *str, struct pt_regs *regs, int err)
346 {
347 	enum bug_trap_type bug_type = BUG_TRAP_TYPE_NONE;
348 	unsigned long flags = oops_begin();
349 	int sig = SIGSEGV;
350 
351 	if (!user_mode(regs))
352 		bug_type = report_bug(regs->ARM_pc, regs);
353 	if (bug_type != BUG_TRAP_TYPE_NONE)
354 		str = "Oops - BUG";
355 
356 	if (__die(str, err, regs))
357 		sig = 0;
358 
359 	oops_end(flags, regs, sig);
360 }
361 
362 void arm_notify_die(const char *str, struct pt_regs *regs,
363 		int signo, int si_code, void __user *addr,
364 		unsigned long err, unsigned long trap)
365 {
366 	if (user_mode(regs)) {
367 		current->thread.error_code = err;
368 		current->thread.trap_no = trap;
369 
370 		force_sig_fault(signo, si_code, addr);
371 	} else {
372 		die(str, regs, err);
373 	}
374 }
375 
376 #ifdef CONFIG_GENERIC_BUG
377 
378 int is_valid_bugaddr(unsigned long pc)
379 {
380 #ifdef CONFIG_THUMB2_KERNEL
381 	u16 bkpt;
382 	u16 insn = __opcode_to_mem_thumb16(BUG_INSTR_VALUE);
383 #else
384 	u32 bkpt;
385 	u32 insn = __opcode_to_mem_arm(BUG_INSTR_VALUE);
386 #endif
387 
388 	if (get_kernel_nofault(bkpt, (void *)pc))
389 		return 0;
390 
391 	return bkpt == insn;
392 }
393 
394 #endif
395 
396 static LIST_HEAD(undef_hook);
397 static DEFINE_RAW_SPINLOCK(undef_lock);
398 
399 void register_undef_hook(struct undef_hook *hook)
400 {
401 	unsigned long flags;
402 
403 	raw_spin_lock_irqsave(&undef_lock, flags);
404 	list_add(&hook->node, &undef_hook);
405 	raw_spin_unlock_irqrestore(&undef_lock, flags);
406 }
407 
408 void unregister_undef_hook(struct undef_hook *hook)
409 {
410 	unsigned long flags;
411 
412 	raw_spin_lock_irqsave(&undef_lock, flags);
413 	list_del(&hook->node);
414 	raw_spin_unlock_irqrestore(&undef_lock, flags);
415 }
416 
417 static nokprobe_inline
418 int call_undef_hook(struct pt_regs *regs, unsigned int instr)
419 {
420 	struct undef_hook *hook;
421 	unsigned long flags;
422 	int (*fn)(struct pt_regs *regs, unsigned int instr) = NULL;
423 
424 	raw_spin_lock_irqsave(&undef_lock, flags);
425 	list_for_each_entry(hook, &undef_hook, node)
426 		if ((instr & hook->instr_mask) == hook->instr_val &&
427 		    (regs->ARM_cpsr & hook->cpsr_mask) == hook->cpsr_val)
428 			fn = hook->fn;
429 	raw_spin_unlock_irqrestore(&undef_lock, flags);
430 
431 	return fn ? fn(regs, instr) : 1;
432 }
433 
434 asmlinkage void do_undefinstr(struct pt_regs *regs)
435 {
436 	unsigned int instr;
437 	void __user *pc;
438 
439 	pc = (void __user *)instruction_pointer(regs);
440 
441 	if (processor_mode(regs) == SVC_MODE) {
442 #ifdef CONFIG_THUMB2_KERNEL
443 		if (thumb_mode(regs)) {
444 			instr = __mem_to_opcode_thumb16(((u16 *)pc)[0]);
445 			if (is_wide_instruction(instr)) {
446 				u16 inst2;
447 				inst2 = __mem_to_opcode_thumb16(((u16 *)pc)[1]);
448 				instr = __opcode_thumb32_compose(instr, inst2);
449 			}
450 		} else
451 #endif
452 			instr = __mem_to_opcode_arm(*(u32 *) pc);
453 	} else if (thumb_mode(regs)) {
454 		if (get_user(instr, (u16 __user *)pc))
455 			goto die_sig;
456 		instr = __mem_to_opcode_thumb16(instr);
457 		if (is_wide_instruction(instr)) {
458 			unsigned int instr2;
459 			if (get_user(instr2, (u16 __user *)pc+1))
460 				goto die_sig;
461 			instr2 = __mem_to_opcode_thumb16(instr2);
462 			instr = __opcode_thumb32_compose(instr, instr2);
463 		}
464 	} else {
465 		if (get_user(instr, (u32 __user *)pc))
466 			goto die_sig;
467 		instr = __mem_to_opcode_arm(instr);
468 	}
469 
470 	if (call_undef_hook(regs, instr) == 0)
471 		return;
472 
473 die_sig:
474 #ifdef CONFIG_DEBUG_USER
475 	if (user_debug & UDBG_UNDEFINED) {
476 		pr_info("%s (%d): undefined instruction: pc=%p\n",
477 			current->comm, task_pid_nr(current), pc);
478 		__show_regs(regs);
479 		dump_instr(KERN_INFO, regs);
480 	}
481 #endif
482 	arm_notify_die("Oops - undefined instruction", regs,
483 		       SIGILL, ILL_ILLOPC, pc, 0, 6);
484 }
485 NOKPROBE_SYMBOL(do_undefinstr)
486 
487 /*
488  * Handle FIQ similarly to NMI on x86 systems.
489  *
490  * The runtime environment for NMIs is extremely restrictive
491  * (NMIs can pre-empt critical sections meaning almost all locking is
492  * forbidden) meaning this default FIQ handling must only be used in
493  * circumstances where non-maskability improves robustness, such as
494  * watchdog or debug logic.
495  *
496  * This handler is not appropriate for general purpose use in drivers
497  * platform code and can be overrideen using set_fiq_handler.
498  */
499 asmlinkage void __exception_irq_entry handle_fiq_as_nmi(struct pt_regs *regs)
500 {
501 	struct pt_regs *old_regs = set_irq_regs(regs);
502 
503 	nmi_enter();
504 
505 	/* nop. FIQ handlers for special arch/arm features can be added here. */
506 
507 	nmi_exit();
508 
509 	set_irq_regs(old_regs);
510 }
511 
512 /*
513  * bad_mode handles the impossible case in the vectors.  If you see one of
514  * these, then it's extremely serious, and could mean you have buggy hardware.
515  * It never returns, and never tries to sync.  We hope that we can at least
516  * dump out some state information...
517  */
518 asmlinkage void bad_mode(struct pt_regs *regs, int reason)
519 {
520 	console_verbose();
521 
522 	pr_crit("Bad mode in %s handler detected\n", handler[reason]);
523 
524 	die("Oops - bad mode", regs, 0);
525 	local_irq_disable();
526 	panic("bad mode");
527 }
528 
529 static int bad_syscall(int n, struct pt_regs *regs)
530 {
531 	if ((current->personality & PER_MASK) != PER_LINUX) {
532 		send_sig(SIGSEGV, current, 1);
533 		return regs->ARM_r0;
534 	}
535 
536 #ifdef CONFIG_DEBUG_USER
537 	if (user_debug & UDBG_SYSCALL) {
538 		pr_err("[%d] %s: obsolete system call %08x.\n",
539 			task_pid_nr(current), current->comm, n);
540 		dump_instr(KERN_ERR, regs);
541 	}
542 #endif
543 
544 	arm_notify_die("Oops - bad syscall", regs, SIGILL, ILL_ILLTRP,
545 		       (void __user *)instruction_pointer(regs) -
546 			 (thumb_mode(regs) ? 2 : 4),
547 		       n, 0);
548 
549 	return regs->ARM_r0;
550 }
551 
552 static inline int
553 __do_cache_op(unsigned long start, unsigned long end)
554 {
555 	int ret;
556 
557 	do {
558 		unsigned long chunk = min(PAGE_SIZE, end - start);
559 
560 		if (fatal_signal_pending(current))
561 			return 0;
562 
563 		ret = flush_icache_user_range(start, start + chunk);
564 		if (ret)
565 			return ret;
566 
567 		cond_resched();
568 		start += chunk;
569 	} while (start < end);
570 
571 	return 0;
572 }
573 
574 static inline int
575 do_cache_op(unsigned long start, unsigned long end, int flags)
576 {
577 	if (end < start || flags)
578 		return -EINVAL;
579 
580 	if (!access_ok(start, end - start))
581 		return -EFAULT;
582 
583 	return __do_cache_op(start, end);
584 }
585 
586 /*
587  * Handle all unrecognised system calls.
588  *  0x9f0000 - 0x9fffff are some more esoteric system calls
589  */
590 #define NR(x) ((__ARM_NR_##x) - __ARM_NR_BASE)
591 asmlinkage int arm_syscall(int no, struct pt_regs *regs)
592 {
593 	if ((no >> 16) != (__ARM_NR_BASE>> 16))
594 		return bad_syscall(no, regs);
595 
596 	switch (no & 0xffff) {
597 	case 0: /* branch through 0 */
598 		arm_notify_die("branch through zero", regs,
599 			       SIGSEGV, SEGV_MAPERR, NULL, 0, 0);
600 		return 0;
601 
602 	case NR(breakpoint): /* SWI BREAK_POINT */
603 		regs->ARM_pc -= thumb_mode(regs) ? 2 : 4;
604 		ptrace_break(regs);
605 		return regs->ARM_r0;
606 
607 	/*
608 	 * Flush a region from virtual address 'r0' to virtual address 'r1'
609 	 * _exclusive_.  There is no alignment requirement on either address;
610 	 * user space does not need to know the hardware cache layout.
611 	 *
612 	 * r2 contains flags.  It should ALWAYS be passed as ZERO until it
613 	 * is defined to be something else.  For now we ignore it, but may
614 	 * the fires of hell burn in your belly if you break this rule. ;)
615 	 *
616 	 * (at a later date, we may want to allow this call to not flush
617 	 * various aspects of the cache.  Passing '0' will guarantee that
618 	 * everything necessary gets flushed to maintain consistency in
619 	 * the specified region).
620 	 */
621 	case NR(cacheflush):
622 		return do_cache_op(regs->ARM_r0, regs->ARM_r1, regs->ARM_r2);
623 
624 	case NR(usr26):
625 		if (!(elf_hwcap & HWCAP_26BIT))
626 			break;
627 		regs->ARM_cpsr &= ~MODE32_BIT;
628 		return regs->ARM_r0;
629 
630 	case NR(usr32):
631 		if (!(elf_hwcap & HWCAP_26BIT))
632 			break;
633 		regs->ARM_cpsr |= MODE32_BIT;
634 		return regs->ARM_r0;
635 
636 	case NR(set_tls):
637 		set_tls(regs->ARM_r0);
638 		return 0;
639 
640 	case NR(get_tls):
641 		return current_thread_info()->tp_value[0];
642 
643 	default:
644 		/* Calls 9f00xx..9f07ff are defined to return -ENOSYS
645 		   if not implemented, rather than raising SIGILL.  This
646 		   way the calling program can gracefully determine whether
647 		   a feature is supported.  */
648 		if ((no & 0xffff) <= 0x7ff)
649 			return -ENOSYS;
650 		break;
651 	}
652 #ifdef CONFIG_DEBUG_USER
653 	/*
654 	 * experience shows that these seem to indicate that
655 	 * something catastrophic has happened
656 	 */
657 	if (user_debug & UDBG_SYSCALL) {
658 		pr_err("[%d] %s: arm syscall %d\n",
659 		       task_pid_nr(current), current->comm, no);
660 		dump_instr(KERN_ERR, regs);
661 		if (user_mode(regs)) {
662 			__show_regs(regs);
663 			c_backtrace(frame_pointer(regs), processor_mode(regs), KERN_ERR);
664 		}
665 	}
666 #endif
667 	arm_notify_die("Oops - bad syscall(2)", regs, SIGILL, ILL_ILLTRP,
668 		       (void __user *)instruction_pointer(regs) -
669 			 (thumb_mode(regs) ? 2 : 4),
670 		       no, 0);
671 	return 0;
672 }
673 
674 #ifdef CONFIG_TLS_REG_EMUL
675 
676 /*
677  * We might be running on an ARMv6+ processor which should have the TLS
678  * register but for some reason we can't use it, or maybe an SMP system
679  * using a pre-ARMv6 processor (there are apparently a few prototypes like
680  * that in existence) and therefore access to that register must be
681  * emulated.
682  */
683 
684 static int get_tp_trap(struct pt_regs *regs, unsigned int instr)
685 {
686 	int reg = (instr >> 12) & 15;
687 	if (reg == 15)
688 		return 1;
689 	regs->uregs[reg] = current_thread_info()->tp_value[0];
690 	regs->ARM_pc += 4;
691 	return 0;
692 }
693 
694 static struct undef_hook arm_mrc_hook = {
695 	.instr_mask	= 0x0fff0fff,
696 	.instr_val	= 0x0e1d0f70,
697 	.cpsr_mask	= PSR_T_BIT,
698 	.cpsr_val	= 0,
699 	.fn		= get_tp_trap,
700 };
701 
702 static int __init arm_mrc_hook_init(void)
703 {
704 	register_undef_hook(&arm_mrc_hook);
705 	return 0;
706 }
707 
708 late_initcall(arm_mrc_hook_init);
709 
710 #endif
711 
712 /*
713  * A data abort trap was taken, but we did not handle the instruction.
714  * Try to abort the user program, or panic if it was the kernel.
715  */
716 asmlinkage void
717 baddataabort(int code, unsigned long instr, struct pt_regs *regs)
718 {
719 	unsigned long addr = instruction_pointer(regs);
720 
721 #ifdef CONFIG_DEBUG_USER
722 	if (user_debug & UDBG_BADABORT) {
723 		pr_err("8<--- cut here ---\n");
724 		pr_err("[%d] %s: bad data abort: code %d instr 0x%08lx\n",
725 		       task_pid_nr(current), current->comm, code, instr);
726 		dump_instr(KERN_ERR, regs);
727 		show_pte(KERN_ERR, current->mm, addr);
728 	}
729 #endif
730 
731 	arm_notify_die("unknown data abort code", regs,
732 		       SIGILL, ILL_ILLOPC, (void __user *)addr, instr, 0);
733 }
734 
735 void __readwrite_bug(const char *fn)
736 {
737 	pr_err("%s called, but not implemented\n", fn);
738 	BUG();
739 }
740 EXPORT_SYMBOL(__readwrite_bug);
741 
742 void __pte_error(const char *file, int line, pte_t pte)
743 {
744 	pr_err("%s:%d: bad pte %08llx.\n", file, line, (long long)pte_val(pte));
745 }
746 
747 void __pmd_error(const char *file, int line, pmd_t pmd)
748 {
749 	pr_err("%s:%d: bad pmd %08llx.\n", file, line, (long long)pmd_val(pmd));
750 }
751 
752 void __pgd_error(const char *file, int line, pgd_t pgd)
753 {
754 	pr_err("%s:%d: bad pgd %08llx.\n", file, line, (long long)pgd_val(pgd));
755 }
756 
757 asmlinkage void __div0(void)
758 {
759 	pr_err("Division by zero in kernel.\n");
760 	dump_stack();
761 }
762 EXPORT_SYMBOL(__div0);
763 
764 void abort(void)
765 {
766 	BUG();
767 
768 	/* if that doesn't kill us, halt */
769 	panic("Oops failed to kill thread");
770 }
771 
772 #ifdef CONFIG_KUSER_HELPERS
773 static void __init kuser_init(void *vectors)
774 {
775 	extern char __kuser_helper_start[], __kuser_helper_end[];
776 	int kuser_sz = __kuser_helper_end - __kuser_helper_start;
777 
778 	memcpy(vectors + 0x1000 - kuser_sz, __kuser_helper_start, kuser_sz);
779 
780 	/*
781 	 * vectors + 0xfe0 = __kuser_get_tls
782 	 * vectors + 0xfe8 = hardware TLS instruction at 0xffff0fe8
783 	 */
784 	if (tls_emu || has_tls_reg)
785 		memcpy(vectors + 0xfe0, vectors + 0xfe8, 4);
786 }
787 #else
788 static inline void __init kuser_init(void *vectors)
789 {
790 }
791 #endif
792 
793 #ifndef CONFIG_CPU_V7M
794 static void copy_from_lma(void *vma, void *lma_start, void *lma_end)
795 {
796 	memcpy(vma, lma_start, lma_end - lma_start);
797 }
798 
799 static void flush_vectors(void *vma, size_t offset, size_t size)
800 {
801 	unsigned long start = (unsigned long)vma + offset;
802 	unsigned long end = start + size;
803 
804 	flush_icache_range(start, end);
805 }
806 
807 #ifdef CONFIG_HARDEN_BRANCH_HISTORY
808 int spectre_bhb_update_vectors(unsigned int method)
809 {
810 	extern char __vectors_bhb_bpiall_start[], __vectors_bhb_bpiall_end[];
811 	extern char __vectors_bhb_loop8_start[], __vectors_bhb_loop8_end[];
812 	void *vec_start, *vec_end;
813 
814 	if (system_state >= SYSTEM_FREEING_INITMEM) {
815 		pr_err("CPU%u: Spectre BHB workaround too late - system vulnerable\n",
816 		       smp_processor_id());
817 		return SPECTRE_VULNERABLE;
818 	}
819 
820 	switch (method) {
821 	case SPECTRE_V2_METHOD_LOOP8:
822 		vec_start = __vectors_bhb_loop8_start;
823 		vec_end = __vectors_bhb_loop8_end;
824 		break;
825 
826 	case SPECTRE_V2_METHOD_BPIALL:
827 		vec_start = __vectors_bhb_bpiall_start;
828 		vec_end = __vectors_bhb_bpiall_end;
829 		break;
830 
831 	default:
832 		pr_err("CPU%u: unknown Spectre BHB state %d\n",
833 		       smp_processor_id(), method);
834 		return SPECTRE_VULNERABLE;
835 	}
836 
837 	copy_from_lma(vectors_page, vec_start, vec_end);
838 	flush_vectors(vectors_page, 0, vec_end - vec_start);
839 
840 	return SPECTRE_MITIGATED;
841 }
842 #endif
843 
844 void __init early_trap_init(void *vectors_base)
845 {
846 	extern char __stubs_start[], __stubs_end[];
847 	extern char __vectors_start[], __vectors_end[];
848 	unsigned i;
849 
850 	vectors_page = vectors_base;
851 
852 	/*
853 	 * Poison the vectors page with an undefined instruction.  This
854 	 * instruction is chosen to be undefined for both ARM and Thumb
855 	 * ISAs.  The Thumb version is an undefined instruction with a
856 	 * branch back to the undefined instruction.
857 	 */
858 	for (i = 0; i < PAGE_SIZE / sizeof(u32); i++)
859 		((u32 *)vectors_base)[i] = 0xe7fddef1;
860 
861 	/*
862 	 * Copy the vectors, stubs and kuser helpers (in entry-armv.S)
863 	 * into the vector page, mapped at 0xffff0000, and ensure these
864 	 * are visible to the instruction stream.
865 	 */
866 	copy_from_lma(vectors_base, __vectors_start, __vectors_end);
867 	copy_from_lma(vectors_base + 0x1000, __stubs_start, __stubs_end);
868 
869 	kuser_init(vectors_base);
870 
871 	flush_vectors(vectors_base, 0, PAGE_SIZE * 2);
872 }
873 #else /* ifndef CONFIG_CPU_V7M */
874 void __init early_trap_init(void *vectors_base)
875 {
876 	/*
877 	 * on V7-M there is no need to copy the vector table to a dedicated
878 	 * memory area. The address is configurable and so a table in the kernel
879 	 * image can be used.
880 	 */
881 }
882 #endif
883