xref: /openbmc/linux/arch/riscv/kernel/traps.c (revision ecfb9f40)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (C) 2012 Regents of the University of California
4  */
5 
6 #include <linux/cpu.h>
7 #include <linux/kernel.h>
8 #include <linux/init.h>
9 #include <linux/sched.h>
10 #include <linux/sched/debug.h>
11 #include <linux/sched/signal.h>
12 #include <linux/signal.h>
13 #include <linux/kdebug.h>
14 #include <linux/uaccess.h>
15 #include <linux/kprobes.h>
16 #include <linux/mm.h>
17 #include <linux/module.h>
18 #include <linux/irq.h>
19 #include <linux/kexec.h>
20 
21 #include <asm/asm-prototypes.h>
22 #include <asm/bug.h>
23 #include <asm/csr.h>
24 #include <asm/processor.h>
25 #include <asm/ptrace.h>
26 #include <asm/thread_info.h>
27 
28 int show_unhandled_signals = 1;
29 
30 static DEFINE_SPINLOCK(die_lock);
31 
32 void die(struct pt_regs *regs, const char *str)
33 {
34 	static int die_counter;
35 	int ret;
36 	long cause;
37 
38 	oops_enter();
39 
40 	spin_lock_irq(&die_lock);
41 	console_verbose();
42 	bust_spinlocks(1);
43 
44 	pr_emerg("%s [#%d]\n", str, ++die_counter);
45 	print_modules();
46 	if (regs)
47 		show_regs(regs);
48 
49 	cause = regs ? regs->cause : -1;
50 	ret = notify_die(DIE_OOPS, str, regs, 0, cause, SIGSEGV);
51 
52 	if (kexec_should_crash(current))
53 		crash_kexec(regs);
54 
55 	bust_spinlocks(0);
56 	add_taint(TAINT_DIE, LOCKDEP_NOW_UNRELIABLE);
57 	spin_unlock_irq(&die_lock);
58 	oops_exit();
59 
60 	if (in_interrupt())
61 		panic("Fatal exception in interrupt");
62 	if (panic_on_oops)
63 		panic("Fatal exception");
64 	if (ret != NOTIFY_STOP)
65 		make_task_dead(SIGSEGV);
66 }
67 
68 void do_trap(struct pt_regs *regs, int signo, int code, unsigned long addr)
69 {
70 	struct task_struct *tsk = current;
71 
72 	if (show_unhandled_signals && unhandled_signal(tsk, signo)
73 	    && printk_ratelimit()) {
74 		pr_info("%s[%d]: unhandled signal %d code 0x%x at 0x" REG_FMT,
75 			tsk->comm, task_pid_nr(tsk), signo, code, addr);
76 		print_vma_addr(KERN_CONT " in ", instruction_pointer(regs));
77 		pr_cont("\n");
78 		__show_regs(regs);
79 	}
80 
81 	force_sig_fault(signo, code, (void __user *)addr);
82 }
83 
84 static void do_trap_error(struct pt_regs *regs, int signo, int code,
85 	unsigned long addr, const char *str)
86 {
87 	current->thread.bad_cause = regs->cause;
88 
89 	if (user_mode(regs)) {
90 		do_trap(regs, signo, code, addr);
91 	} else {
92 		if (!fixup_exception(regs))
93 			die(regs, str);
94 	}
95 }
96 
97 #if defined(CONFIG_XIP_KERNEL) && defined(CONFIG_RISCV_ALTERNATIVE)
98 #define __trap_section		__section(".xip.traps")
99 #else
100 #define __trap_section
101 #endif
102 #define DO_ERROR_INFO(name, signo, code, str)				\
103 asmlinkage __visible __trap_section void name(struct pt_regs *regs)	\
104 {									\
105 	do_trap_error(regs, signo, code, regs->epc, "Oops - " str);	\
106 }
107 
108 DO_ERROR_INFO(do_trap_unknown,
109 	SIGILL, ILL_ILLTRP, "unknown exception");
110 DO_ERROR_INFO(do_trap_insn_misaligned,
111 	SIGBUS, BUS_ADRALN, "instruction address misaligned");
112 DO_ERROR_INFO(do_trap_insn_fault,
113 	SIGSEGV, SEGV_ACCERR, "instruction access fault");
114 DO_ERROR_INFO(do_trap_insn_illegal,
115 	SIGILL, ILL_ILLOPC, "illegal instruction");
116 DO_ERROR_INFO(do_trap_load_fault,
117 	SIGSEGV, SEGV_ACCERR, "load access fault");
118 #ifndef CONFIG_RISCV_M_MODE
119 DO_ERROR_INFO(do_trap_load_misaligned,
120 	SIGBUS, BUS_ADRALN, "Oops - load address misaligned");
121 DO_ERROR_INFO(do_trap_store_misaligned,
122 	SIGBUS, BUS_ADRALN, "Oops - store (or AMO) address misaligned");
123 #else
124 int handle_misaligned_load(struct pt_regs *regs);
125 int handle_misaligned_store(struct pt_regs *regs);
126 
127 asmlinkage void __trap_section do_trap_load_misaligned(struct pt_regs *regs)
128 {
129 	if (!handle_misaligned_load(regs))
130 		return;
131 	do_trap_error(regs, SIGBUS, BUS_ADRALN, regs->epc,
132 		      "Oops - load address misaligned");
133 }
134 
135 asmlinkage void __trap_section do_trap_store_misaligned(struct pt_regs *regs)
136 {
137 	if (!handle_misaligned_store(regs))
138 		return;
139 	do_trap_error(regs, SIGBUS, BUS_ADRALN, regs->epc,
140 		      "Oops - store (or AMO) address misaligned");
141 }
142 #endif
143 DO_ERROR_INFO(do_trap_store_fault,
144 	SIGSEGV, SEGV_ACCERR, "store (or AMO) access fault");
145 DO_ERROR_INFO(do_trap_ecall_u,
146 	SIGILL, ILL_ILLTRP, "environment call from U-mode");
147 DO_ERROR_INFO(do_trap_ecall_s,
148 	SIGILL, ILL_ILLTRP, "environment call from S-mode");
149 DO_ERROR_INFO(do_trap_ecall_m,
150 	SIGILL, ILL_ILLTRP, "environment call from M-mode");
151 
152 static inline unsigned long get_break_insn_length(unsigned long pc)
153 {
154 	bug_insn_t insn;
155 
156 	if (get_kernel_nofault(insn, (bug_insn_t *)pc))
157 		return 0;
158 
159 	return GET_INSN_LENGTH(insn);
160 }
161 
162 asmlinkage __visible __trap_section void do_trap_break(struct pt_regs *regs)
163 {
164 #ifdef CONFIG_KPROBES
165 	if (kprobe_single_step_handler(regs))
166 		return;
167 
168 	if (kprobe_breakpoint_handler(regs))
169 		return;
170 #endif
171 #ifdef CONFIG_UPROBES
172 	if (uprobe_single_step_handler(regs))
173 		return;
174 
175 	if (uprobe_breakpoint_handler(regs))
176 		return;
177 #endif
178 	current->thread.bad_cause = regs->cause;
179 
180 	if (user_mode(regs))
181 		force_sig_fault(SIGTRAP, TRAP_BRKPT, (void __user *)regs->epc);
182 #ifdef CONFIG_KGDB
183 	else if (notify_die(DIE_TRAP, "EBREAK", regs, 0, regs->cause, SIGTRAP)
184 								== NOTIFY_STOP)
185 		return;
186 #endif
187 	else if (report_bug(regs->epc, regs) == BUG_TRAP_TYPE_WARN)
188 		regs->epc += get_break_insn_length(regs->epc);
189 	else
190 		die(regs, "Kernel BUG");
191 }
192 NOKPROBE_SYMBOL(do_trap_break);
193 
194 #ifdef CONFIG_GENERIC_BUG
195 int is_valid_bugaddr(unsigned long pc)
196 {
197 	bug_insn_t insn;
198 
199 	if (pc < VMALLOC_START)
200 		return 0;
201 	if (get_kernel_nofault(insn, (bug_insn_t *)pc))
202 		return 0;
203 	if ((insn & __INSN_LENGTH_MASK) == __INSN_LENGTH_32)
204 		return (insn == __BUG_INSN_32);
205 	else
206 		return ((insn & __COMPRESSED_INSN_MASK) == __BUG_INSN_16);
207 }
208 #endif /* CONFIG_GENERIC_BUG */
209 
210 #ifdef CONFIG_VMAP_STACK
211 /*
212  * Extra stack space that allows us to provide panic messages when the kernel
213  * has overflowed its stack.
214  */
215 static DEFINE_PER_CPU(unsigned long [OVERFLOW_STACK_SIZE/sizeof(long)],
216 		overflow_stack)__aligned(16);
217 /*
218  * A temporary stack for use by handle_kernel_stack_overflow.  This is used so
219  * we can call into C code to get the per-hart overflow stack.  Usage of this
220  * stack must be protected by spin_shadow_stack.
221  */
222 long shadow_stack[SHADOW_OVERFLOW_STACK_SIZE/sizeof(long)] __aligned(16);
223 
224 /*
225  * A pseudo spinlock to protect the shadow stack from being used by multiple
226  * harts concurrently.  This isn't a real spinlock because the lock side must
227  * be taken without a valid stack and only a single register, it's only taken
228  * while in the process of panicing anyway so the performance and error
229  * checking a proper spinlock gives us doesn't matter.
230  */
231 unsigned long spin_shadow_stack;
232 
233 asmlinkage unsigned long get_overflow_stack(void)
234 {
235 	return (unsigned long)this_cpu_ptr(overflow_stack) +
236 		OVERFLOW_STACK_SIZE;
237 }
238 
239 asmlinkage void handle_bad_stack(struct pt_regs *regs)
240 {
241 	unsigned long tsk_stk = (unsigned long)current->stack;
242 	unsigned long ovf_stk = (unsigned long)this_cpu_ptr(overflow_stack);
243 
244 	/*
245 	 * We're done with the shadow stack by this point, as we're on the
246 	 * overflow stack.  Tell any other concurrent overflowing harts that
247 	 * they can proceed with panicing by releasing the pseudo-spinlock.
248 	 *
249 	 * This pairs with an amoswap.aq in handle_kernel_stack_overflow.
250 	 */
251 	smp_store_release(&spin_shadow_stack, 0);
252 
253 	console_verbose();
254 
255 	pr_emerg("Insufficient stack space to handle exception!\n");
256 	pr_emerg("Task stack:     [0x%016lx..0x%016lx]\n",
257 			tsk_stk, tsk_stk + THREAD_SIZE);
258 	pr_emerg("Overflow stack: [0x%016lx..0x%016lx]\n",
259 			ovf_stk, ovf_stk + OVERFLOW_STACK_SIZE);
260 
261 	__show_regs(regs);
262 	panic("Kernel stack overflow");
263 
264 	for (;;)
265 		wait_for_interrupt();
266 }
267 #endif
268