xref: /openbmc/linux/arch/hexagon/kernel/traps.c (revision 23966841)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Kernel traps/events for Hexagon processor
4  *
5  * Copyright (c) 2010-2014, The Linux Foundation. All rights reserved.
6  */
7 
8 #include <linux/init.h>
9 #include <linux/sched/signal.h>
10 #include <linux/sched/debug.h>
11 #include <linux/sched/task_stack.h>
12 #include <linux/module.h>
13 #include <linux/kallsyms.h>
14 #include <linux/kdebug.h>
15 #include <linux/syscalls.h>
16 #include <linux/signal.h>
17 #include <linux/tracehook.h>
18 #include <asm/traps.h>
19 #include <asm/vm_fault.h>
20 #include <asm/syscall.h>
21 #include <asm/registers.h>
22 #include <asm/unistd.h>
23 #include <asm/sections.h>
24 #ifdef CONFIG_KGDB
25 # include <linux/kgdb.h>
26 #endif
27 
28 #define TRAP_SYSCALL	1
29 #define TRAP_DEBUG	0xdb
30 
31 void __init trap_init(void)
32 {
33 }
34 
35 #ifdef CONFIG_GENERIC_BUG
36 /* Maybe should resemble arch/sh/kernel/traps.c ?? */
37 int is_valid_bugaddr(unsigned long addr)
38 {
39 	return 1;
40 }
41 #endif /* CONFIG_GENERIC_BUG */
42 
43 static const char *ex_name(int ex)
44 {
45 	switch (ex) {
46 	case HVM_GE_C_XPROT:
47 	case HVM_GE_C_XUSER:
48 		return "Execute protection fault";
49 	case HVM_GE_C_RPROT:
50 	case HVM_GE_C_RUSER:
51 		return "Read protection fault";
52 	case HVM_GE_C_WPROT:
53 	case HVM_GE_C_WUSER:
54 		return "Write protection fault";
55 	case HVM_GE_C_XMAL:
56 		return "Misaligned instruction";
57 	case HVM_GE_C_WREG:
58 		return "Multiple writes to same register in packet";
59 	case HVM_GE_C_PCAL:
60 		return "Program counter values that are not properly aligned";
61 	case HVM_GE_C_RMAL:
62 		return "Misaligned data load";
63 	case HVM_GE_C_WMAL:
64 		return "Misaligned data store";
65 	case HVM_GE_C_INVI:
66 	case HVM_GE_C_PRIVI:
67 		return "Illegal instruction";
68 	case HVM_GE_C_BUS:
69 		return "Precise bus error";
70 	case HVM_GE_C_CACHE:
71 		return "Cache error";
72 
73 	case 0xdb:
74 		return "Debugger trap";
75 
76 	default:
77 		return "Unrecognized exception";
78 	}
79 }
80 
81 static void do_show_stack(struct task_struct *task, unsigned long *fp,
82 			  unsigned long ip)
83 {
84 	int kstack_depth_to_print = 24;
85 	unsigned long offset, size;
86 	const char *name = NULL;
87 	unsigned long *newfp;
88 	unsigned long low, high;
89 	char tmpstr[128];
90 	char *modname;
91 	int i;
92 
93 	if (task == NULL)
94 		task = current;
95 
96 	printk(KERN_INFO "CPU#%d, %s/%d, Call Trace:\n",
97 	       raw_smp_processor_id(), task->comm,
98 	       task_pid_nr(task));
99 
100 	if (fp == NULL) {
101 		if (task == current) {
102 			asm("%0 = r30" : "=r" (fp));
103 		} else {
104 			fp = (unsigned long *)
105 			     ((struct hexagon_switch_stack *)
106 			     task->thread.switch_sp)->fp;
107 		}
108 	}
109 
110 	if ((((unsigned long) fp) & 0x3) || ((unsigned long) fp < 0x1000)) {
111 		printk(KERN_INFO "-- Corrupt frame pointer %p\n", fp);
112 		return;
113 	}
114 
115 	/* Saved link reg is one word above FP */
116 	if (!ip)
117 		ip = *(fp+1);
118 
119 	/* Expect kernel stack to be in-bounds */
120 	low = (unsigned long)task_stack_page(task);
121 	high = low + THREAD_SIZE - 8;
122 	low += sizeof(struct thread_info);
123 
124 	for (i = 0; i < kstack_depth_to_print; i++) {
125 
126 		name = kallsyms_lookup(ip, &size, &offset, &modname, tmpstr);
127 
128 		printk(KERN_INFO "[%p] 0x%lx: %s + 0x%lx", fp, ip, name,
129 			offset);
130 		if (((unsigned long) fp < low) || (high < (unsigned long) fp))
131 			printk(KERN_CONT " (FP out of bounds!)");
132 		if (modname)
133 			printk(KERN_CONT " [%s] ", modname);
134 		printk(KERN_CONT "\n");
135 
136 		newfp = (unsigned long *) *fp;
137 
138 		if (((unsigned long) newfp) & 0x3) {
139 			printk(KERN_INFO "-- Corrupt frame pointer %p\n",
140 				newfp);
141 			break;
142 		}
143 
144 		/* Attempt to continue past exception. */
145 		if (0 == newfp) {
146 			struct pt_regs *regs = (struct pt_regs *) (((void *)fp)
147 						+ 8);
148 
149 			if (regs->syscall_nr != -1) {
150 				printk(KERN_INFO "-- trap0 -- syscall_nr: %ld",
151 					regs->syscall_nr);
152 				printk(KERN_CONT "  psp: %lx  elr: %lx\n",
153 					 pt_psp(regs), pt_elr(regs));
154 				break;
155 			} else {
156 				/* really want to see more ... */
157 				kstack_depth_to_print += 6;
158 				printk(KERN_INFO "-- %s (0x%lx)  badva: %lx\n",
159 					ex_name(pt_cause(regs)), pt_cause(regs),
160 					pt_badva(regs));
161 			}
162 
163 			newfp = (unsigned long *) regs->r30;
164 			ip = pt_elr(regs);
165 		} else {
166 			ip = *(newfp + 1);
167 		}
168 
169 		/* If link reg is null, we are done. */
170 		if (ip == 0x0)
171 			break;
172 
173 		/* If newfp isn't larger, we're tracing garbage. */
174 		if (newfp > fp)
175 			fp = newfp;
176 		else
177 			break;
178 	}
179 }
180 
181 void show_stack(struct task_struct *task, unsigned long *fp)
182 {
183 	/* Saved link reg is one word above FP */
184 	do_show_stack(task, fp, 0);
185 }
186 
187 int die(const char *str, struct pt_regs *regs, long err)
188 {
189 	static struct {
190 		spinlock_t lock;
191 		int counter;
192 	} die = {
193 		.lock = __SPIN_LOCK_UNLOCKED(die.lock),
194 		.counter = 0
195 	};
196 
197 	console_verbose();
198 	oops_enter();
199 
200 	spin_lock_irq(&die.lock);
201 	bust_spinlocks(1);
202 	printk(KERN_EMERG "Oops: %s[#%d]:\n", str, ++die.counter);
203 
204 	if (notify_die(DIE_OOPS, str, regs, err, pt_cause(regs), SIGSEGV) ==
205 	    NOTIFY_STOP)
206 		return 1;
207 
208 	print_modules();
209 	show_regs(regs);
210 	do_show_stack(current, &regs->r30, pt_elr(regs));
211 
212 	bust_spinlocks(0);
213 	add_taint(TAINT_DIE, LOCKDEP_NOW_UNRELIABLE);
214 
215 	spin_unlock_irq(&die.lock);
216 
217 	if (in_interrupt())
218 		panic("Fatal exception in interrupt");
219 
220 	if (panic_on_oops)
221 		panic("Fatal exception");
222 
223 	oops_exit();
224 	do_exit(err);
225 	return 0;
226 }
227 
228 int die_if_kernel(char *str, struct pt_regs *regs, long err)
229 {
230 	if (!user_mode(regs))
231 		return die(str, regs, err);
232 	else
233 		return 0;
234 }
235 
236 /*
237  * It's not clear that misaligned fetches are ever recoverable.
238  */
239 static void misaligned_instruction(struct pt_regs *regs)
240 {
241 	die_if_kernel("Misaligned Instruction", regs, 0);
242 	force_sig(SIGBUS);
243 }
244 
245 /*
246  * Misaligned loads and stores, on the other hand, can be
247  * emulated, and probably should be, some day.  But for now
248  * they will be considered fatal.
249  */
250 static void misaligned_data_load(struct pt_regs *regs)
251 {
252 	die_if_kernel("Misaligned Data Load", regs, 0);
253 	force_sig(SIGBUS);
254 }
255 
256 static void misaligned_data_store(struct pt_regs *regs)
257 {
258 	die_if_kernel("Misaligned Data Store", regs, 0);
259 	force_sig(SIGBUS);
260 }
261 
262 static void illegal_instruction(struct pt_regs *regs)
263 {
264 	die_if_kernel("Illegal Instruction", regs, 0);
265 	force_sig(SIGILL);
266 }
267 
268 /*
269  * Precise bus errors may be recoverable with a a retry,
270  * but for now, treat them as irrecoverable.
271  */
272 static void precise_bus_error(struct pt_regs *regs)
273 {
274 	die_if_kernel("Precise Bus Error", regs, 0);
275 	force_sig(SIGBUS);
276 }
277 
278 /*
279  * If anything is to be done here other than panic,
280  * it will probably be complex and migrate to another
281  * source module.  For now, just die.
282  */
283 static void cache_error(struct pt_regs *regs)
284 {
285 	die("Cache Error", regs, 0);
286 }
287 
288 /*
289  * General exception handler
290  */
291 void do_genex(struct pt_regs *regs)
292 {
293 	/*
294 	 * Decode Cause and Dispatch
295 	 */
296 	switch (pt_cause(regs)) {
297 	case HVM_GE_C_XPROT:
298 	case HVM_GE_C_XUSER:
299 		execute_protection_fault(regs);
300 		break;
301 	case HVM_GE_C_RPROT:
302 	case HVM_GE_C_RUSER:
303 		read_protection_fault(regs);
304 		break;
305 	case HVM_GE_C_WPROT:
306 	case HVM_GE_C_WUSER:
307 		write_protection_fault(regs);
308 		break;
309 	case HVM_GE_C_XMAL:
310 		misaligned_instruction(regs);
311 		break;
312 	case HVM_GE_C_WREG:
313 		illegal_instruction(regs);
314 		break;
315 	case HVM_GE_C_PCAL:
316 		misaligned_instruction(regs);
317 		break;
318 	case HVM_GE_C_RMAL:
319 		misaligned_data_load(regs);
320 		break;
321 	case HVM_GE_C_WMAL:
322 		misaligned_data_store(regs);
323 		break;
324 	case HVM_GE_C_INVI:
325 	case HVM_GE_C_PRIVI:
326 		illegal_instruction(regs);
327 		break;
328 	case HVM_GE_C_BUS:
329 		precise_bus_error(regs);
330 		break;
331 	case HVM_GE_C_CACHE:
332 		cache_error(regs);
333 		break;
334 	default:
335 		/* Halt and catch fire */
336 		panic("Unrecognized exception 0x%lx\n", pt_cause(regs));
337 		break;
338 	}
339 }
340 
341 /* Indirect system call dispatch */
342 long sys_syscall(void)
343 {
344 	printk(KERN_ERR "sys_syscall invoked!\n");
345 	return -ENOSYS;
346 }
347 
348 void do_trap0(struct pt_regs *regs)
349 {
350 	syscall_fn syscall;
351 
352 	switch (pt_cause(regs)) {
353 	case TRAP_SYSCALL:
354 		/* System call is trap0 #1 */
355 
356 		/* allow strace to catch syscall args  */
357 		if (unlikely(test_thread_flag(TIF_SYSCALL_TRACE) &&
358 			tracehook_report_syscall_entry(regs)))
359 			return;  /*  return -ENOSYS somewhere?  */
360 
361 		/* Interrupts should be re-enabled for syscall processing */
362 		__vmsetie(VM_INT_ENABLE);
363 
364 		/*
365 		 * System call number is in r6, arguments in r0..r5.
366 		 * Fortunately, no Linux syscall has more than 6 arguments,
367 		 * and Hexagon ABI passes first 6 arguments in registers.
368 		 * 64-bit arguments are passed in odd/even register pairs.
369 		 * Fortunately, we have no system calls that take more
370 		 * than three arguments with more than one 64-bit value.
371 		 * Should that change, we'd need to redesign to copy
372 		 * between user and kernel stacks.
373 		 */
374 		regs->syscall_nr = regs->r06;
375 
376 		/*
377 		 * GPR R0 carries the first parameter, and is also used
378 		 * to report the return value.  We need a backup of
379 		 * the user's value in case we need to do a late restart
380 		 * of the system call.
381 		 */
382 		regs->restart_r0 = regs->r00;
383 
384 		if ((unsigned long) regs->syscall_nr >= __NR_syscalls) {
385 			regs->r00 = -1;
386 		} else {
387 			syscall = (syscall_fn)
388 				  (sys_call_table[regs->syscall_nr]);
389 			regs->r00 = syscall(regs->r00, regs->r01,
390 				   regs->r02, regs->r03,
391 				   regs->r04, regs->r05);
392 		}
393 
394 		/* allow strace to get the syscall return state  */
395 		if (unlikely(test_thread_flag(TIF_SYSCALL_TRACE)))
396 			tracehook_report_syscall_exit(regs, 0);
397 
398 		break;
399 	case TRAP_DEBUG:
400 		/* Trap0 0xdb is debug breakpoint */
401 		if (user_mode(regs)) {
402 			/*
403 			 * Some architecures add some per-thread state
404 			 * to distinguish between breakpoint traps and
405 			 * trace traps.  We may want to do that, and
406 			 * set the si_code value appropriately, or we
407 			 * may want to use a different trap0 flavor.
408 			 */
409 			force_sig_fault(SIGTRAP, TRAP_BRKPT,
410 					(void __user *) pt_elr(regs));
411 		} else {
412 #ifdef CONFIG_KGDB
413 			kgdb_handle_exception(pt_cause(regs), SIGTRAP,
414 					      TRAP_BRKPT, regs);
415 #endif
416 		}
417 		break;
418 	}
419 	/* Ignore other trap0 codes for now, especially 0 (Angel calls) */
420 }
421 
422 /*
423  * Machine check exception handler
424  */
425 void do_machcheck(struct pt_regs *regs)
426 {
427 	/* Halt and catch fire */
428 	__vmstop();
429 }
430 
431 /*
432  * Treat this like the old 0xdb trap.
433  */
434 
435 void do_debug_exception(struct pt_regs *regs)
436 {
437 	regs->hvmer.vmest &= ~HVM_VMEST_CAUSE_MSK;
438 	regs->hvmer.vmest |= (TRAP_DEBUG << HVM_VMEST_CAUSE_SFT);
439 	do_trap0(regs);
440 }
441