xref: /openbmc/linux/arch/parisc/kernel/unwind.c (revision 6aa7de05)
1 /*
2  * Kernel unwinding support
3  *
4  * (c) 2002-2004 Randolph Chung <tausq@debian.org>
5  *
6  * Derived partially from the IA64 implementation. The PA-RISC
7  * Runtime Architecture Document is also a useful reference to
8  * understand what is happening here
9  */
10 
11 #include <linux/kernel.h>
12 #include <linux/init.h>
13 #include <linux/sched.h>
14 #include <linux/slab.h>
15 #include <linux/kallsyms.h>
16 #include <linux/sort.h>
17 #include <linux/sched.h>
18 
19 #include <linux/uaccess.h>
20 #include <asm/assembly.h>
21 #include <asm/asm-offsets.h>
22 #include <asm/ptrace.h>
23 
24 #include <asm/unwind.h>
25 
26 /* #define DEBUG 1 */
27 #ifdef DEBUG
28 #define dbg(x...) printk(x)
29 #else
30 #define dbg(x...)
31 #endif
32 
33 #define KERNEL_START (KERNEL_BINARY_TEXT_START)
34 
35 extern struct unwind_table_entry __start___unwind[];
36 extern struct unwind_table_entry __stop___unwind[];
37 
38 static DEFINE_SPINLOCK(unwind_lock);
39 /*
40  * the kernel unwind block is not dynamically allocated so that
41  * we can call unwind_init as early in the bootup process as
42  * possible (before the slab allocator is initialized)
43  */
44 static struct unwind_table kernel_unwind_table __read_mostly;
45 static LIST_HEAD(unwind_tables);
46 
47 static inline const struct unwind_table_entry *
48 find_unwind_entry_in_table(const struct unwind_table *table, unsigned long addr)
49 {
50 	const struct unwind_table_entry *e = NULL;
51 	unsigned long lo, hi, mid;
52 
53 	lo = 0;
54 	hi = table->length - 1;
55 
56 	while (lo <= hi) {
57 		mid = (hi - lo) / 2 + lo;
58 		e = &table->table[mid];
59 		if (addr < e->region_start)
60 			hi = mid - 1;
61 		else if (addr > e->region_end)
62 			lo = mid + 1;
63 		else
64 			return e;
65 	}
66 
67 	return NULL;
68 }
69 
70 static const struct unwind_table_entry *
71 find_unwind_entry(unsigned long addr)
72 {
73 	struct unwind_table *table;
74 	const struct unwind_table_entry *e = NULL;
75 
76 	if (addr >= kernel_unwind_table.start &&
77 	    addr <= kernel_unwind_table.end)
78 		e = find_unwind_entry_in_table(&kernel_unwind_table, addr);
79 	else {
80 		unsigned long flags;
81 
82 		spin_lock_irqsave(&unwind_lock, flags);
83 		list_for_each_entry(table, &unwind_tables, list) {
84 			if (addr >= table->start &&
85 			    addr <= table->end)
86 				e = find_unwind_entry_in_table(table, addr);
87 			if (e) {
88 				/* Move-to-front to exploit common traces */
89 				list_move(&table->list, &unwind_tables);
90 				break;
91 			}
92 		}
93 		spin_unlock_irqrestore(&unwind_lock, flags);
94 	}
95 
96 	return e;
97 }
98 
99 static void
100 unwind_table_init(struct unwind_table *table, const char *name,
101 		  unsigned long base_addr, unsigned long gp,
102 		  void *table_start, void *table_end)
103 {
104 	struct unwind_table_entry *start = table_start;
105 	struct unwind_table_entry *end =
106 		(struct unwind_table_entry *)table_end - 1;
107 
108 	table->name = name;
109 	table->base_addr = base_addr;
110 	table->gp = gp;
111 	table->start = base_addr + start->region_start;
112 	table->end = base_addr + end->region_end;
113 	table->table = (struct unwind_table_entry *)table_start;
114 	table->length = end - start + 1;
115 	INIT_LIST_HEAD(&table->list);
116 
117 	for (; start <= end; start++) {
118 		if (start < end &&
119 		    start->region_end > (start+1)->region_start) {
120 			printk("WARNING: Out of order unwind entry! %p and %p\n", start, start+1);
121 		}
122 
123 		start->region_start += base_addr;
124 		start->region_end += base_addr;
125 	}
126 }
127 
128 static int cmp_unwind_table_entry(const void *a, const void *b)
129 {
130 	return ((const struct unwind_table_entry *)a)->region_start
131 	     - ((const struct unwind_table_entry *)b)->region_start;
132 }
133 
134 static void
135 unwind_table_sort(struct unwind_table_entry *start,
136 		  struct unwind_table_entry *finish)
137 {
138 	sort(start, finish - start, sizeof(struct unwind_table_entry),
139 	     cmp_unwind_table_entry, NULL);
140 }
141 
142 struct unwind_table *
143 unwind_table_add(const char *name, unsigned long base_addr,
144 		 unsigned long gp,
145                  void *start, void *end)
146 {
147 	struct unwind_table *table;
148 	unsigned long flags;
149 	struct unwind_table_entry *s = (struct unwind_table_entry *)start;
150 	struct unwind_table_entry *e = (struct unwind_table_entry *)end;
151 
152 	unwind_table_sort(s, e);
153 
154 	table = kmalloc(sizeof(struct unwind_table), GFP_USER);
155 	if (table == NULL)
156 		return NULL;
157 	unwind_table_init(table, name, base_addr, gp, start, end);
158 	spin_lock_irqsave(&unwind_lock, flags);
159 	list_add_tail(&table->list, &unwind_tables);
160 	spin_unlock_irqrestore(&unwind_lock, flags);
161 
162 	return table;
163 }
164 
165 void unwind_table_remove(struct unwind_table *table)
166 {
167 	unsigned long flags;
168 
169 	spin_lock_irqsave(&unwind_lock, flags);
170 	list_del(&table->list);
171 	spin_unlock_irqrestore(&unwind_lock, flags);
172 
173 	kfree(table);
174 }
175 
176 /* Called from setup_arch to import the kernel unwind info */
177 int __init unwind_init(void)
178 {
179 	long start, stop;
180 	register unsigned long gp __asm__ ("r27");
181 
182 	start = (long)&__start___unwind[0];
183 	stop = (long)&__stop___unwind[0];
184 
185 	printk("unwind_init: start = 0x%lx, end = 0x%lx, entries = %lu\n",
186 	    start, stop,
187 	    (stop - start) / sizeof(struct unwind_table_entry));
188 
189 	unwind_table_init(&kernel_unwind_table, "kernel", KERNEL_START,
190 			  gp,
191 			  &__start___unwind[0], &__stop___unwind[0]);
192 #if 0
193 	{
194 		int i;
195 		for (i = 0; i < 10; i++)
196 		{
197 			printk("region 0x%x-0x%x\n",
198 				__start___unwind[i].region_start,
199 				__start___unwind[i].region_end);
200 		}
201 	}
202 #endif
203 	return 0;
204 }
205 
206 #ifdef CONFIG_64BIT
207 #define get_func_addr(fptr) fptr[2]
208 #else
209 #define get_func_addr(fptr) fptr[0]
210 #endif
211 
212 static int unwind_special(struct unwind_frame_info *info, unsigned long pc, int frame_size)
213 {
214 	extern void handle_interruption(int, struct pt_regs *);
215 	static unsigned long *hi = (unsigned long *)&handle_interruption;
216 
217 	if (pc == get_func_addr(hi)) {
218 		struct pt_regs *regs = (struct pt_regs *)(info->sp - frame_size - PT_SZ_ALGN);
219 		dbg("Unwinding through handle_interruption()\n");
220 		info->prev_sp = regs->gr[30];
221 		info->prev_ip = regs->iaoq[0];
222 
223 		return 1;
224 	}
225 
226 	return 0;
227 }
228 
229 static void unwind_frame_regs(struct unwind_frame_info *info)
230 {
231 	const struct unwind_table_entry *e;
232 	unsigned long npc;
233 	unsigned int insn;
234 	long frame_size = 0;
235 	int looking_for_rp, rpoffset = 0;
236 
237 	e = find_unwind_entry(info->ip);
238 	if (e == NULL) {
239 		unsigned long sp;
240 
241 		dbg("Cannot find unwind entry for 0x%lx; forced unwinding\n", info->ip);
242 
243 #ifdef CONFIG_KALLSYMS
244 		/* Handle some frequent special cases.... */
245 		{
246 			char symname[KSYM_NAME_LEN];
247 			char *modname;
248 
249 			kallsyms_lookup(info->ip, NULL, NULL, &modname,
250 				symname);
251 
252 			dbg("info->ip = 0x%lx, name = %s\n", info->ip, symname);
253 
254 			if (strcmp(symname, "_switch_to_ret") == 0) {
255 				info->prev_sp = info->sp - CALLEE_SAVE_FRAME_SIZE;
256 				info->prev_ip = *(unsigned long *)(info->prev_sp - RP_OFFSET);
257 				dbg("_switch_to_ret @ %lx - setting "
258 				    "prev_sp=%lx prev_ip=%lx\n",
259 				    info->ip, info->prev_sp,
260 				    info->prev_ip);
261 				return;
262 			} else if (strcmp(symname, "ret_from_kernel_thread") == 0 ||
263 				   strcmp(symname, "syscall_exit") == 0) {
264 				info->prev_ip = info->prev_sp = 0;
265 				return;
266 			}
267 		}
268 #endif
269 
270 		/* Since we are doing the unwinding blind, we don't know if
271 		   we are adjusting the stack correctly or extracting the rp
272 		   correctly. The rp is checked to see if it belongs to the
273 		   kernel text section, if not we assume we don't have a
274 		   correct stack frame and we continue to unwind the stack.
275 		   This is not quite correct, and will fail for loadable
276 		   modules. */
277 		sp = info->sp & ~63;
278 		do {
279 			unsigned long tmp;
280 
281 			info->prev_sp = sp - 64;
282 			info->prev_ip = 0;
283 
284 			/* The stack is at the end inside the thread_union
285 			 * struct. If we reach data, we have reached the
286 			 * beginning of the stack and should stop unwinding. */
287 			if (info->prev_sp >= (unsigned long) task_thread_info(info->t) &&
288 			    info->prev_sp < ((unsigned long) task_thread_info(info->t)
289 						+ THREAD_SZ_ALGN)) {
290 				info->prev_sp = 0;
291 				break;
292 			}
293 
294 			if (get_user(tmp, (unsigned long *)(info->prev_sp - RP_OFFSET)))
295 				break;
296 			info->prev_ip = tmp;
297 			sp = info->prev_sp;
298 		} while (!kernel_text_address(info->prev_ip));
299 
300 		info->rp = 0;
301 
302 		dbg("analyzing func @ %lx with no unwind info, setting "
303 		    "prev_sp=%lx prev_ip=%lx\n", info->ip,
304 		    info->prev_sp, info->prev_ip);
305 	} else {
306 		dbg("e->start = 0x%x, e->end = 0x%x, Save_SP = %d, "
307 		    "Save_RP = %d, Millicode = %d size = %u\n",
308 		    e->region_start, e->region_end, e->Save_SP, e->Save_RP,
309 		    e->Millicode, e->Total_frame_size);
310 
311 		looking_for_rp = e->Save_RP;
312 
313 		for (npc = e->region_start;
314 		     (frame_size < (e->Total_frame_size << 3) ||
315 		      looking_for_rp) &&
316 		     npc < info->ip;
317 		     npc += 4) {
318 
319 			insn = *(unsigned int *)npc;
320 
321 			if ((insn & 0xffffc001) == 0x37de0000 ||
322 			    (insn & 0xffe00001) == 0x6fc00000) {
323 				/* ldo X(sp), sp, or stwm X,D(sp) */
324 				frame_size += (insn & 0x3fff) >> 1;
325 				dbg("analyzing func @ %lx, insn=%08x @ "
326 				    "%lx, frame_size = %ld\n", info->ip,
327 				    insn, npc, frame_size);
328 			} else if ((insn & 0xffe00009) == 0x73c00008) {
329 				/* std,ma X,D(sp) */
330 				frame_size += ((insn >> 4) & 0x3ff) << 3;
331 				dbg("analyzing func @ %lx, insn=%08x @ "
332 				    "%lx, frame_size = %ld\n", info->ip,
333 				    insn, npc, frame_size);
334 			} else if (insn == 0x6bc23fd9) {
335 				/* stw rp,-20(sp) */
336 				rpoffset = 20;
337 				looking_for_rp = 0;
338 				dbg("analyzing func @ %lx, insn=stw rp,"
339 				    "-20(sp) @ %lx\n", info->ip, npc);
340 			} else if (insn == 0x0fc212c1) {
341 				/* std rp,-16(sr0,sp) */
342 				rpoffset = 16;
343 				looking_for_rp = 0;
344 				dbg("analyzing func @ %lx, insn=std rp,"
345 				    "-16(sp) @ %lx\n", info->ip, npc);
346 			}
347 		}
348 
349 		if (frame_size > e->Total_frame_size << 3)
350 			frame_size = e->Total_frame_size << 3;
351 
352 		if (!unwind_special(info, e->region_start, frame_size)) {
353 			info->prev_sp = info->sp - frame_size;
354 			if (e->Millicode)
355 				info->rp = info->r31;
356 			else if (rpoffset)
357 				info->rp = *(unsigned long *)(info->prev_sp - rpoffset);
358 			info->prev_ip = info->rp;
359 			info->rp = 0;
360 		}
361 
362 		dbg("analyzing func @ %lx, setting prev_sp=%lx "
363 		    "prev_ip=%lx npc=%lx\n", info->ip, info->prev_sp,
364 		    info->prev_ip, npc);
365 	}
366 }
367 
368 void unwind_frame_init(struct unwind_frame_info *info, struct task_struct *t,
369 		       struct pt_regs *regs)
370 {
371 	memset(info, 0, sizeof(struct unwind_frame_info));
372 	info->t = t;
373 	info->sp = regs->gr[30];
374 	info->ip = regs->iaoq[0];
375 	info->rp = regs->gr[2];
376 	info->r31 = regs->gr[31];
377 
378 	dbg("(%d) Start unwind from sp=%08lx ip=%08lx\n",
379 	    t ? (int)t->pid : -1, info->sp, info->ip);
380 }
381 
382 void unwind_frame_init_from_blocked_task(struct unwind_frame_info *info, struct task_struct *t)
383 {
384 	struct pt_regs *r = &t->thread.regs;
385 	struct pt_regs *r2;
386 
387 	r2 = kmalloc(sizeof(struct pt_regs), GFP_ATOMIC);
388 	if (!r2)
389 		return;
390 	*r2 = *r;
391 	r2->gr[30] = r->ksp;
392 	r2->iaoq[0] = r->kpc;
393 	unwind_frame_init(info, t, r2);
394 	kfree(r2);
395 }
396 
397 void unwind_frame_init_running(struct unwind_frame_info *info, struct pt_regs *regs)
398 {
399 	unwind_frame_init(info, current, regs);
400 }
401 
402 int unwind_once(struct unwind_frame_info *next_frame)
403 {
404 	unwind_frame_regs(next_frame);
405 
406 	if (next_frame->prev_sp == 0 ||
407 	    next_frame->prev_ip == 0)
408 		return -1;
409 
410 	next_frame->sp = next_frame->prev_sp;
411 	next_frame->ip = next_frame->prev_ip;
412 	next_frame->prev_sp = 0;
413 	next_frame->prev_ip = 0;
414 
415 	dbg("(%d) Continue unwind to sp=%08lx ip=%08lx\n",
416 	    next_frame->t ? (int)next_frame->t->pid : -1,
417 	    next_frame->sp, next_frame->ip);
418 
419 	return 0;
420 }
421 
422 int unwind_to_user(struct unwind_frame_info *info)
423 {
424 	int ret;
425 
426 	do {
427 		ret = unwind_once(info);
428 	} while (!ret && !(info->ip & 3));
429 
430 	return ret;
431 }
432 
433 unsigned long return_address(unsigned int level)
434 {
435 	struct unwind_frame_info info;
436 	struct pt_regs r;
437 	unsigned long sp;
438 
439 	/* initialize unwind info */
440 	asm volatile ("copy %%r30, %0" : "=r"(sp));
441 	memset(&r, 0, sizeof(struct pt_regs));
442 	r.iaoq[0] = (unsigned long) current_text_addr();
443 	r.gr[2] = (unsigned long) __builtin_return_address(0);
444 	r.gr[30] = sp;
445 	unwind_frame_init(&info, current, &r);
446 
447 	/* unwind stack */
448 	++level;
449 	do {
450 		if (unwind_once(&info) < 0 || info.ip == 0)
451 			return 0;
452 		if (!kernel_text_address(info.ip))
453 			return 0;
454 	} while (info.ip && level--);
455 
456 	return info.ip;
457 }
458