xref: /openbmc/linux/arch/riscv/kernel/traps.c (revision eff53aea)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (C) 2012 Regents of the University of California
4  */
5 
6 #include <linux/cpu.h>
7 #include <linux/kernel.h>
8 #include <linux/init.h>
9 #include <linux/sched.h>
10 #include <linux/sched/debug.h>
11 #include <linux/sched/signal.h>
12 #include <linux/signal.h>
13 #include <linux/kdebug.h>
14 #include <linux/uaccess.h>
15 #include <linux/kprobes.h>
16 #include <linux/uprobes.h>
17 #include <asm/uprobes.h>
18 #include <linux/mm.h>
19 #include <linux/module.h>
20 #include <linux/irq.h>
21 #include <linux/kexec.h>
22 #include <linux/entry-common.h>
23 
24 #include <asm/asm-prototypes.h>
25 #include <asm/bug.h>
26 #include <asm/cfi.h>
27 #include <asm/csr.h>
28 #include <asm/processor.h>
29 #include <asm/ptrace.h>
30 #include <asm/syscall.h>
31 #include <asm/thread_info.h>
32 #include <asm/vector.h>
33 #include <asm/irq_stack.h>
34 
35 int show_unhandled_signals = 1;
36 
37 static DEFINE_SPINLOCK(die_lock);
38 
dump_kernel_instr(const char * loglvl,struct pt_regs * regs)39 static void dump_kernel_instr(const char *loglvl, struct pt_regs *regs)
40 {
41 	char str[sizeof("0000 ") * 12 + 2 + 1], *p = str;
42 	const u16 *insns = (u16 *)instruction_pointer(regs);
43 	long bad;
44 	u16 val;
45 	int i;
46 
47 	for (i = -10; i < 2; i++) {
48 		bad = get_kernel_nofault(val, &insns[i]);
49 		if (!bad) {
50 			p += sprintf(p, i == 0 ? "(%04hx) " : "%04hx ", val);
51 		} else {
52 			printk("%sCode: Unable to access instruction at 0x%px.\n",
53 			       loglvl, &insns[i]);
54 			return;
55 		}
56 	}
57 	printk("%sCode: %s\n", loglvl, str);
58 }
59 
die(struct pt_regs * regs,const char * str)60 void die(struct pt_regs *regs, const char *str)
61 {
62 	static int die_counter;
63 	int ret;
64 	long cause;
65 	unsigned long flags;
66 
67 	oops_enter();
68 
69 	spin_lock_irqsave(&die_lock, flags);
70 	console_verbose();
71 	bust_spinlocks(1);
72 
73 	pr_emerg("%s [#%d]\n", str, ++die_counter);
74 	print_modules();
75 	if (regs) {
76 		show_regs(regs);
77 		dump_kernel_instr(KERN_EMERG, regs);
78 	}
79 
80 	cause = regs ? regs->cause : -1;
81 	ret = notify_die(DIE_OOPS, str, regs, 0, cause, SIGSEGV);
82 
83 	if (kexec_should_crash(current))
84 		crash_kexec(regs);
85 
86 	bust_spinlocks(0);
87 	add_taint(TAINT_DIE, LOCKDEP_NOW_UNRELIABLE);
88 	spin_unlock_irqrestore(&die_lock, flags);
89 	oops_exit();
90 
91 	if (in_interrupt())
92 		panic("Fatal exception in interrupt");
93 	if (panic_on_oops)
94 		panic("Fatal exception");
95 	if (ret != NOTIFY_STOP)
96 		make_task_dead(SIGSEGV);
97 }
98 
do_trap(struct pt_regs * regs,int signo,int code,unsigned long addr)99 void do_trap(struct pt_regs *regs, int signo, int code, unsigned long addr)
100 {
101 	struct task_struct *tsk = current;
102 
103 	if (show_unhandled_signals && unhandled_signal(tsk, signo)
104 	    && printk_ratelimit()) {
105 		pr_info("%s[%d]: unhandled signal %d code 0x%x at 0x" REG_FMT,
106 			tsk->comm, task_pid_nr(tsk), signo, code, addr);
107 		print_vma_addr(KERN_CONT " in ", instruction_pointer(regs));
108 		pr_cont("\n");
109 		__show_regs(regs);
110 	}
111 
112 	force_sig_fault(signo, code, (void __user *)addr);
113 }
114 
do_trap_error(struct pt_regs * regs,int signo,int code,unsigned long addr,const char * str)115 static void do_trap_error(struct pt_regs *regs, int signo, int code,
116 	unsigned long addr, const char *str)
117 {
118 	current->thread.bad_cause = regs->cause;
119 
120 	if (user_mode(regs)) {
121 		do_trap(regs, signo, code, addr);
122 	} else {
123 		if (!fixup_exception(regs))
124 			die(regs, str);
125 	}
126 }
127 
128 #if defined(CONFIG_XIP_KERNEL) && defined(CONFIG_RISCV_ALTERNATIVE)
129 #define __trap_section __noinstr_section(".xip.traps")
130 #else
131 #define __trap_section noinstr
132 #endif
133 #define DO_ERROR_INFO(name, signo, code, str)					\
134 asmlinkage __visible __trap_section void name(struct pt_regs *regs)		\
135 {										\
136 	if (user_mode(regs)) {							\
137 		irqentry_enter_from_user_mode(regs);				\
138 		do_trap_error(regs, signo, code, regs->epc, "Oops - " str);	\
139 		irqentry_exit_to_user_mode(regs);				\
140 	} else {								\
141 		irqentry_state_t state = irqentry_nmi_enter(regs);		\
142 		do_trap_error(regs, signo, code, regs->epc, "Oops - " str);	\
143 		irqentry_nmi_exit(regs, state);					\
144 	}									\
145 }
146 
147 DO_ERROR_INFO(do_trap_unknown,
148 	SIGILL, ILL_ILLTRP, "unknown exception");
149 DO_ERROR_INFO(do_trap_insn_misaligned,
150 	SIGBUS, BUS_ADRALN, "instruction address misaligned");
151 DO_ERROR_INFO(do_trap_insn_fault,
152 	SIGSEGV, SEGV_ACCERR, "instruction access fault");
153 
do_trap_insn_illegal(struct pt_regs * regs)154 asmlinkage __visible __trap_section void do_trap_insn_illegal(struct pt_regs *regs)
155 {
156 	bool handled;
157 
158 	if (user_mode(regs)) {
159 		irqentry_enter_from_user_mode(regs);
160 
161 		local_irq_enable();
162 
163 		handled = riscv_v_first_use_handler(regs);
164 
165 		local_irq_disable();
166 
167 		if (!handled)
168 			do_trap_error(regs, SIGILL, ILL_ILLOPC, regs->epc,
169 				      "Oops - illegal instruction");
170 
171 		irqentry_exit_to_user_mode(regs);
172 	} else {
173 		irqentry_state_t state = irqentry_nmi_enter(regs);
174 
175 		do_trap_error(regs, SIGILL, ILL_ILLOPC, regs->epc,
176 			      "Oops - illegal instruction");
177 
178 		irqentry_nmi_exit(regs, state);
179 	}
180 }
181 
182 DO_ERROR_INFO(do_trap_load_fault,
183 	SIGSEGV, SEGV_ACCERR, "load access fault");
184 #ifndef CONFIG_RISCV_M_MODE
185 DO_ERROR_INFO(do_trap_load_misaligned,
186 	SIGBUS, BUS_ADRALN, "Oops - load address misaligned");
187 DO_ERROR_INFO(do_trap_store_misaligned,
188 	SIGBUS, BUS_ADRALN, "Oops - store (or AMO) address misaligned");
189 #else
190 int handle_misaligned_load(struct pt_regs *regs);
191 int handle_misaligned_store(struct pt_regs *regs);
192 
do_trap_load_misaligned(struct pt_regs * regs)193 asmlinkage __visible __trap_section void do_trap_load_misaligned(struct pt_regs *regs)
194 {
195 	if (user_mode(regs)) {
196 		irqentry_enter_from_user_mode(regs);
197 
198 		if (handle_misaligned_load(regs))
199 			do_trap_error(regs, SIGBUS, BUS_ADRALN, regs->epc,
200 			      "Oops - load address misaligned");
201 
202 		irqentry_exit_to_user_mode(regs);
203 	} else {
204 		irqentry_state_t state = irqentry_nmi_enter(regs);
205 
206 		if (handle_misaligned_load(regs))
207 			do_trap_error(regs, SIGBUS, BUS_ADRALN, regs->epc,
208 			      "Oops - load address misaligned");
209 
210 		irqentry_nmi_exit(regs, state);
211 	}
212 }
213 
do_trap_store_misaligned(struct pt_regs * regs)214 asmlinkage __visible __trap_section void do_trap_store_misaligned(struct pt_regs *regs)
215 {
216 	if (user_mode(regs)) {
217 		irqentry_enter_from_user_mode(regs);
218 
219 		if (handle_misaligned_store(regs))
220 			do_trap_error(regs, SIGBUS, BUS_ADRALN, regs->epc,
221 				"Oops - store (or AMO) address misaligned");
222 
223 		irqentry_exit_to_user_mode(regs);
224 	} else {
225 		irqentry_state_t state = irqentry_nmi_enter(regs);
226 
227 		if (handle_misaligned_store(regs))
228 			do_trap_error(regs, SIGBUS, BUS_ADRALN, regs->epc,
229 				"Oops - store (or AMO) address misaligned");
230 
231 		irqentry_nmi_exit(regs, state);
232 	}
233 }
234 #endif
235 DO_ERROR_INFO(do_trap_store_fault,
236 	SIGSEGV, SEGV_ACCERR, "store (or AMO) access fault");
237 DO_ERROR_INFO(do_trap_ecall_s,
238 	SIGILL, ILL_ILLTRP, "environment call from S-mode");
239 DO_ERROR_INFO(do_trap_ecall_m,
240 	SIGILL, ILL_ILLTRP, "environment call from M-mode");
241 
get_break_insn_length(unsigned long pc)242 static inline unsigned long get_break_insn_length(unsigned long pc)
243 {
244 	bug_insn_t insn;
245 
246 	if (get_kernel_nofault(insn, (bug_insn_t *)pc))
247 		return 0;
248 
249 	return GET_INSN_LENGTH(insn);
250 }
251 
probe_single_step_handler(struct pt_regs * regs)252 static bool probe_single_step_handler(struct pt_regs *regs)
253 {
254 	bool user = user_mode(regs);
255 
256 	return user ? uprobe_single_step_handler(regs) : kprobe_single_step_handler(regs);
257 }
258 
probe_breakpoint_handler(struct pt_regs * regs)259 static bool probe_breakpoint_handler(struct pt_regs *regs)
260 {
261 	bool user = user_mode(regs);
262 
263 	return user ? uprobe_breakpoint_handler(regs) : kprobe_breakpoint_handler(regs);
264 }
265 
handle_break(struct pt_regs * regs)266 void handle_break(struct pt_regs *regs)
267 {
268 	if (probe_single_step_handler(regs))
269 		return;
270 
271 	if (probe_breakpoint_handler(regs))
272 		return;
273 
274 	current->thread.bad_cause = regs->cause;
275 
276 	if (user_mode(regs))
277 		force_sig_fault(SIGTRAP, TRAP_BRKPT, (void __user *)regs->epc);
278 #ifdef CONFIG_KGDB
279 	else if (notify_die(DIE_TRAP, "EBREAK", regs, 0, regs->cause, SIGTRAP)
280 								== NOTIFY_STOP)
281 		return;
282 #endif
283 	else if (report_bug(regs->epc, regs) == BUG_TRAP_TYPE_WARN ||
284 		 handle_cfi_failure(regs) == BUG_TRAP_TYPE_WARN)
285 		regs->epc += get_break_insn_length(regs->epc);
286 	else
287 		die(regs, "Kernel BUG");
288 }
289 
do_trap_break(struct pt_regs * regs)290 asmlinkage __visible __trap_section void do_trap_break(struct pt_regs *regs)
291 {
292 	if (user_mode(regs)) {
293 		irqentry_enter_from_user_mode(regs);
294 
295 		handle_break(regs);
296 
297 		irqentry_exit_to_user_mode(regs);
298 	} else {
299 		irqentry_state_t state = irqentry_nmi_enter(regs);
300 
301 		handle_break(regs);
302 
303 		irqentry_nmi_exit(regs, state);
304 	}
305 }
306 
do_trap_ecall_u(struct pt_regs * regs)307 asmlinkage __visible __trap_section void do_trap_ecall_u(struct pt_regs *regs)
308 {
309 	if (user_mode(regs)) {
310 		long syscall = regs->a7;
311 
312 		regs->epc += 4;
313 		regs->orig_a0 = regs->a0;
314 
315 		riscv_v_vstate_discard(regs);
316 
317 		syscall = syscall_enter_from_user_mode(regs, syscall);
318 
319 		if (syscall >= 0 && syscall < NR_syscalls)
320 			syscall_handler(regs, syscall);
321 		else if (syscall != -1)
322 			regs->a0 = -ENOSYS;
323 
324 		syscall_exit_to_user_mode(regs);
325 	} else {
326 		irqentry_state_t state = irqentry_nmi_enter(regs);
327 
328 		do_trap_error(regs, SIGILL, ILL_ILLTRP, regs->epc,
329 			"Oops - environment call from U-mode");
330 
331 		irqentry_nmi_exit(regs, state);
332 	}
333 
334 }
335 
336 #ifdef CONFIG_MMU
do_page_fault(struct pt_regs * regs)337 asmlinkage __visible noinstr void do_page_fault(struct pt_regs *regs)
338 {
339 	irqentry_state_t state = irqentry_enter(regs);
340 
341 	handle_page_fault(regs);
342 
343 	local_irq_disable();
344 
345 	irqentry_exit(regs, state);
346 }
347 #endif
348 
handle_riscv_irq(struct pt_regs * regs)349 static void noinstr handle_riscv_irq(struct pt_regs *regs)
350 {
351 	struct pt_regs *old_regs;
352 
353 	irq_enter_rcu();
354 	old_regs = set_irq_regs(regs);
355 	handle_arch_irq(regs);
356 	set_irq_regs(old_regs);
357 	irq_exit_rcu();
358 }
359 
do_irq(struct pt_regs * regs)360 asmlinkage void noinstr do_irq(struct pt_regs *regs)
361 {
362 	irqentry_state_t state = irqentry_enter(regs);
363 #ifdef CONFIG_IRQ_STACKS
364 	if (on_thread_stack()) {
365 		ulong *sp = per_cpu(irq_stack_ptr, smp_processor_id())
366 					+ IRQ_STACK_SIZE/sizeof(ulong);
367 		__asm__ __volatile(
368 		"addi	sp, sp, -"RISCV_SZPTR  "\n"
369 		REG_S"  ra, (sp)		\n"
370 		"addi	sp, sp, -"RISCV_SZPTR  "\n"
371 		REG_S"  s0, (sp)		\n"
372 		"addi	s0, sp, 2*"RISCV_SZPTR "\n"
373 		"move	sp, %[sp]		\n"
374 		"move	a0, %[regs]		\n"
375 		"call	handle_riscv_irq	\n"
376 		"addi	sp, s0, -2*"RISCV_SZPTR"\n"
377 		REG_L"  s0, (sp)		\n"
378 		"addi	sp, sp, "RISCV_SZPTR   "\n"
379 		REG_L"  ra, (sp)		\n"
380 		"addi	sp, sp, "RISCV_SZPTR   "\n"
381 		:
382 		: [sp] "r" (sp), [regs] "r" (regs)
383 		: "a0", "a1", "a2", "a3", "a4", "a5", "a6", "a7",
384 		  "t0", "t1", "t2", "t3", "t4", "t5", "t6",
385 #ifndef CONFIG_FRAME_POINTER
386 		  "s0",
387 #endif
388 		  "memory");
389 	} else
390 #endif
391 		handle_riscv_irq(regs);
392 
393 	irqentry_exit(regs, state);
394 }
395 
396 #ifdef CONFIG_GENERIC_BUG
is_valid_bugaddr(unsigned long pc)397 int is_valid_bugaddr(unsigned long pc)
398 {
399 	bug_insn_t insn;
400 
401 	if (pc < VMALLOC_START)
402 		return 0;
403 	if (get_kernel_nofault(insn, (bug_insn_t *)pc))
404 		return 0;
405 	if ((insn & __INSN_LENGTH_MASK) == __INSN_LENGTH_32)
406 		return (insn == __BUG_INSN_32);
407 	else
408 		return ((insn & __COMPRESSED_INSN_MASK) == __BUG_INSN_16);
409 }
410 #endif /* CONFIG_GENERIC_BUG */
411 
412 #ifdef CONFIG_VMAP_STACK
413 DEFINE_PER_CPU(unsigned long [OVERFLOW_STACK_SIZE/sizeof(long)],
414 		overflow_stack)__aligned(16);
415 
handle_bad_stack(struct pt_regs * regs)416 asmlinkage void handle_bad_stack(struct pt_regs *regs)
417 {
418 	unsigned long tsk_stk = (unsigned long)current->stack;
419 	unsigned long ovf_stk = (unsigned long)this_cpu_ptr(overflow_stack);
420 
421 	console_verbose();
422 
423 	pr_emerg("Insufficient stack space to handle exception!\n");
424 	pr_emerg("Task stack:     [0x%016lx..0x%016lx]\n",
425 			tsk_stk, tsk_stk + THREAD_SIZE);
426 	pr_emerg("Overflow stack: [0x%016lx..0x%016lx]\n",
427 			ovf_stk, ovf_stk + OVERFLOW_STACK_SIZE);
428 
429 	__show_regs(regs);
430 	panic("Kernel stack overflow");
431 
432 	for (;;)
433 		wait_for_interrupt();
434 }
435 #endif
436