xref: /openbmc/linux/arch/parisc/kernel/unwind.c (revision 0edbfea5)
1 /*
2  * Kernel unwinding support
3  *
4  * (c) 2002-2004 Randolph Chung <tausq@debian.org>
5  *
6  * Derived partially from the IA64 implementation. The PA-RISC
7  * Runtime Architecture Document is also a useful reference to
8  * understand what is happening here
9  */
10 
11 #include <linux/kernel.h>
12 #include <linux/init.h>
13 #include <linux/sched.h>
14 #include <linux/slab.h>
15 #include <linux/kallsyms.h>
16 #include <linux/sort.h>
17 
18 #include <asm/uaccess.h>
19 #include <asm/assembly.h>
20 #include <asm/asm-offsets.h>
21 #include <asm/ptrace.h>
22 
23 #include <asm/unwind.h>
24 
25 /* #define DEBUG 1 */
26 #ifdef DEBUG
27 #define dbg(x...) printk(x)
28 #else
29 #define dbg(x...)
30 #endif
31 
32 #define KERNEL_START (KERNEL_BINARY_TEXT_START)
33 
34 extern struct unwind_table_entry __start___unwind[];
35 extern struct unwind_table_entry __stop___unwind[];
36 
37 static spinlock_t unwind_lock;
38 /*
39  * the kernel unwind block is not dynamically allocated so that
40  * we can call unwind_init as early in the bootup process as
41  * possible (before the slab allocator is initialized)
42  */
43 static struct unwind_table kernel_unwind_table __read_mostly;
44 static LIST_HEAD(unwind_tables);
45 
46 static inline const struct unwind_table_entry *
47 find_unwind_entry_in_table(const struct unwind_table *table, unsigned long addr)
48 {
49 	const struct unwind_table_entry *e = NULL;
50 	unsigned long lo, hi, mid;
51 
52 	lo = 0;
53 	hi = table->length - 1;
54 
55 	while (lo <= hi) {
56 		mid = (hi - lo) / 2 + lo;
57 		e = &table->table[mid];
58 		if (addr < e->region_start)
59 			hi = mid - 1;
60 		else if (addr > e->region_end)
61 			lo = mid + 1;
62 		else
63 			return e;
64 	}
65 
66 	return NULL;
67 }
68 
69 static const struct unwind_table_entry *
70 find_unwind_entry(unsigned long addr)
71 {
72 	struct unwind_table *table;
73 	const struct unwind_table_entry *e = NULL;
74 
75 	if (addr >= kernel_unwind_table.start &&
76 	    addr <= kernel_unwind_table.end)
77 		e = find_unwind_entry_in_table(&kernel_unwind_table, addr);
78 	else {
79 		unsigned long flags;
80 
81 		spin_lock_irqsave(&unwind_lock, flags);
82 		list_for_each_entry(table, &unwind_tables, list) {
83 			if (addr >= table->start &&
84 			    addr <= table->end)
85 				e = find_unwind_entry_in_table(table, addr);
86 			if (e) {
87 				/* Move-to-front to exploit common traces */
88 				list_move(&table->list, &unwind_tables);
89 				break;
90 			}
91 		}
92 		spin_unlock_irqrestore(&unwind_lock, flags);
93 	}
94 
95 	return e;
96 }
97 
98 static void
99 unwind_table_init(struct unwind_table *table, const char *name,
100 		  unsigned long base_addr, unsigned long gp,
101 		  void *table_start, void *table_end)
102 {
103 	struct unwind_table_entry *start = table_start;
104 	struct unwind_table_entry *end =
105 		(struct unwind_table_entry *)table_end - 1;
106 
107 	table->name = name;
108 	table->base_addr = base_addr;
109 	table->gp = gp;
110 	table->start = base_addr + start->region_start;
111 	table->end = base_addr + end->region_end;
112 	table->table = (struct unwind_table_entry *)table_start;
113 	table->length = end - start + 1;
114 	INIT_LIST_HEAD(&table->list);
115 
116 	for (; start <= end; start++) {
117 		if (start < end &&
118 		    start->region_end > (start+1)->region_start) {
119 			printk("WARNING: Out of order unwind entry! %p and %p\n", start, start+1);
120 		}
121 
122 		start->region_start += base_addr;
123 		start->region_end += base_addr;
124 	}
125 }
126 
127 static int cmp_unwind_table_entry(const void *a, const void *b)
128 {
129 	return ((const struct unwind_table_entry *)a)->region_start
130 	     - ((const struct unwind_table_entry *)b)->region_start;
131 }
132 
133 static void
134 unwind_table_sort(struct unwind_table_entry *start,
135 		  struct unwind_table_entry *finish)
136 {
137 	sort(start, finish - start, sizeof(struct unwind_table_entry),
138 	     cmp_unwind_table_entry, NULL);
139 }
140 
141 struct unwind_table *
142 unwind_table_add(const char *name, unsigned long base_addr,
143 		 unsigned long gp,
144                  void *start, void *end)
145 {
146 	struct unwind_table *table;
147 	unsigned long flags;
148 	struct unwind_table_entry *s = (struct unwind_table_entry *)start;
149 	struct unwind_table_entry *e = (struct unwind_table_entry *)end;
150 
151 	unwind_table_sort(s, e);
152 
153 	table = kmalloc(sizeof(struct unwind_table), GFP_USER);
154 	if (table == NULL)
155 		return NULL;
156 	unwind_table_init(table, name, base_addr, gp, start, end);
157 	spin_lock_irqsave(&unwind_lock, flags);
158 	list_add_tail(&table->list, &unwind_tables);
159 	spin_unlock_irqrestore(&unwind_lock, flags);
160 
161 	return table;
162 }
163 
164 void unwind_table_remove(struct unwind_table *table)
165 {
166 	unsigned long flags;
167 
168 	spin_lock_irqsave(&unwind_lock, flags);
169 	list_del(&table->list);
170 	spin_unlock_irqrestore(&unwind_lock, flags);
171 
172 	kfree(table);
173 }
174 
175 /* Called from setup_arch to import the kernel unwind info */
176 int __init unwind_init(void)
177 {
178 	long start, stop;
179 	register unsigned long gp __asm__ ("r27");
180 
181 	start = (long)&__start___unwind[0];
182 	stop = (long)&__stop___unwind[0];
183 
184 	spin_lock_init(&unwind_lock);
185 
186 	printk("unwind_init: start = 0x%lx, end = 0x%lx, entries = %lu\n",
187 	    start, stop,
188 	    (stop - start) / sizeof(struct unwind_table_entry));
189 
190 	unwind_table_init(&kernel_unwind_table, "kernel", KERNEL_START,
191 			  gp,
192 			  &__start___unwind[0], &__stop___unwind[0]);
193 #if 0
194 	{
195 		int i;
196 		for (i = 0; i < 10; i++)
197 		{
198 			printk("region 0x%x-0x%x\n",
199 				__start___unwind[i].region_start,
200 				__start___unwind[i].region_end);
201 		}
202 	}
203 #endif
204 	return 0;
205 }
206 
207 #ifdef CONFIG_64BIT
208 #define get_func_addr(fptr) fptr[2]
209 #else
210 #define get_func_addr(fptr) fptr[0]
211 #endif
212 
213 static int unwind_special(struct unwind_frame_info *info, unsigned long pc, int frame_size)
214 {
215 	extern void handle_interruption(int, struct pt_regs *);
216 	static unsigned long *hi = (unsigned long *)&handle_interruption;
217 
218 	if (pc == get_func_addr(hi)) {
219 		struct pt_regs *regs = (struct pt_regs *)(info->sp - frame_size - PT_SZ_ALGN);
220 		dbg("Unwinding through handle_interruption()\n");
221 		info->prev_sp = regs->gr[30];
222 		info->prev_ip = regs->iaoq[0];
223 
224 		return 1;
225 	}
226 
227 	return 0;
228 }
229 
230 static void unwind_frame_regs(struct unwind_frame_info *info)
231 {
232 	const struct unwind_table_entry *e;
233 	unsigned long npc;
234 	unsigned int insn;
235 	long frame_size = 0;
236 	int looking_for_rp, rpoffset = 0;
237 
238 	e = find_unwind_entry(info->ip);
239 	if (e == NULL) {
240 		unsigned long sp;
241 
242 		dbg("Cannot find unwind entry for 0x%lx; forced unwinding\n", info->ip);
243 
244 #ifdef CONFIG_KALLSYMS
245 		/* Handle some frequent special cases.... */
246 		{
247 			char symname[KSYM_NAME_LEN];
248 			char *modname;
249 
250 			kallsyms_lookup(info->ip, NULL, NULL, &modname,
251 				symname);
252 
253 			dbg("info->ip = 0x%lx, name = %s\n", info->ip, symname);
254 
255 			if (strcmp(symname, "_switch_to_ret") == 0) {
256 				info->prev_sp = info->sp - CALLEE_SAVE_FRAME_SIZE;
257 				info->prev_ip = *(unsigned long *)(info->prev_sp - RP_OFFSET);
258 				dbg("_switch_to_ret @ %lx - setting "
259 				    "prev_sp=%lx prev_ip=%lx\n",
260 				    info->ip, info->prev_sp,
261 				    info->prev_ip);
262 				return;
263 			} else if (strcmp(symname, "ret_from_kernel_thread") == 0 ||
264 				   strcmp(symname, "syscall_exit") == 0) {
265 				info->prev_ip = info->prev_sp = 0;
266 				return;
267 			}
268 		}
269 #endif
270 
271 		/* Since we are doing the unwinding blind, we don't know if
272 		   we are adjusting the stack correctly or extracting the rp
273 		   correctly. The rp is checked to see if it belongs to the
274 		   kernel text section, if not we assume we don't have a
275 		   correct stack frame and we continue to unwind the stack.
276 		   This is not quite correct, and will fail for loadable
277 		   modules. */
278 		sp = info->sp & ~63;
279 		do {
280 			unsigned long tmp;
281 
282 			info->prev_sp = sp - 64;
283 			info->prev_ip = 0;
284 			if (get_user(tmp, (unsigned long *)(info->prev_sp - RP_OFFSET)))
285 				break;
286 			info->prev_ip = tmp;
287 			sp = info->prev_sp;
288 		} while (!kernel_text_address(info->prev_ip));
289 
290 		info->rp = 0;
291 
292 		dbg("analyzing func @ %lx with no unwind info, setting "
293 		    "prev_sp=%lx prev_ip=%lx\n", info->ip,
294 		    info->prev_sp, info->prev_ip);
295 	} else {
296 		dbg("e->start = 0x%x, e->end = 0x%x, Save_SP = %d, "
297 		    "Save_RP = %d, Millicode = %d size = %u\n",
298 		    e->region_start, e->region_end, e->Save_SP, e->Save_RP,
299 		    e->Millicode, e->Total_frame_size);
300 
301 		looking_for_rp = e->Save_RP;
302 
303 		for (npc = e->region_start;
304 		     (frame_size < (e->Total_frame_size << 3) ||
305 		      looking_for_rp) &&
306 		     npc < info->ip;
307 		     npc += 4) {
308 
309 			insn = *(unsigned int *)npc;
310 
311 			if ((insn & 0xffffc001) == 0x37de0000 ||
312 			    (insn & 0xffe00001) == 0x6fc00000) {
313 				/* ldo X(sp), sp, or stwm X,D(sp) */
314 				frame_size += (insn & 0x3fff) >> 1;
315 				dbg("analyzing func @ %lx, insn=%08x @ "
316 				    "%lx, frame_size = %ld\n", info->ip,
317 				    insn, npc, frame_size);
318 			} else if ((insn & 0xffe00009) == 0x73c00008) {
319 				/* std,ma X,D(sp) */
320 				frame_size += ((insn >> 4) & 0x3ff) << 3;
321 				dbg("analyzing func @ %lx, insn=%08x @ "
322 				    "%lx, frame_size = %ld\n", info->ip,
323 				    insn, npc, frame_size);
324 			} else if (insn == 0x6bc23fd9) {
325 				/* stw rp,-20(sp) */
326 				rpoffset = 20;
327 				looking_for_rp = 0;
328 				dbg("analyzing func @ %lx, insn=stw rp,"
329 				    "-20(sp) @ %lx\n", info->ip, npc);
330 			} else if (insn == 0x0fc212c1) {
331 				/* std rp,-16(sr0,sp) */
332 				rpoffset = 16;
333 				looking_for_rp = 0;
334 				dbg("analyzing func @ %lx, insn=std rp,"
335 				    "-16(sp) @ %lx\n", info->ip, npc);
336 			}
337 		}
338 
339 		if (frame_size > e->Total_frame_size << 3)
340 			frame_size = e->Total_frame_size << 3;
341 
342 		if (!unwind_special(info, e->region_start, frame_size)) {
343 			info->prev_sp = info->sp - frame_size;
344 			if (e->Millicode)
345 				info->rp = info->r31;
346 			else if (rpoffset)
347 				info->rp = *(unsigned long *)(info->prev_sp - rpoffset);
348 			info->prev_ip = info->rp;
349 			info->rp = 0;
350 		}
351 
352 		dbg("analyzing func @ %lx, setting prev_sp=%lx "
353 		    "prev_ip=%lx npc=%lx\n", info->ip, info->prev_sp,
354 		    info->prev_ip, npc);
355 	}
356 }
357 
358 void unwind_frame_init(struct unwind_frame_info *info, struct task_struct *t,
359 		       struct pt_regs *regs)
360 {
361 	memset(info, 0, sizeof(struct unwind_frame_info));
362 	info->t = t;
363 	info->sp = regs->gr[30];
364 	info->ip = regs->iaoq[0];
365 	info->rp = regs->gr[2];
366 	info->r31 = regs->gr[31];
367 
368 	dbg("(%d) Start unwind from sp=%08lx ip=%08lx\n",
369 	    t ? (int)t->pid : -1, info->sp, info->ip);
370 }
371 
372 void unwind_frame_init_from_blocked_task(struct unwind_frame_info *info, struct task_struct *t)
373 {
374 	struct pt_regs *r = &t->thread.regs;
375 	struct pt_regs *r2;
376 
377 	r2 = kmalloc(sizeof(struct pt_regs), GFP_ATOMIC);
378 	if (!r2)
379 		return;
380 	*r2 = *r;
381 	r2->gr[30] = r->ksp;
382 	r2->iaoq[0] = r->kpc;
383 	unwind_frame_init(info, t, r2);
384 	kfree(r2);
385 }
386 
387 void unwind_frame_init_running(struct unwind_frame_info *info, struct pt_regs *regs)
388 {
389 	unwind_frame_init(info, current, regs);
390 }
391 
392 int unwind_once(struct unwind_frame_info *next_frame)
393 {
394 	unwind_frame_regs(next_frame);
395 
396 	if (next_frame->prev_sp == 0 ||
397 	    next_frame->prev_ip == 0)
398 		return -1;
399 
400 	next_frame->sp = next_frame->prev_sp;
401 	next_frame->ip = next_frame->prev_ip;
402 	next_frame->prev_sp = 0;
403 	next_frame->prev_ip = 0;
404 
405 	dbg("(%d) Continue unwind to sp=%08lx ip=%08lx\n",
406 	    next_frame->t ? (int)next_frame->t->pid : -1,
407 	    next_frame->sp, next_frame->ip);
408 
409 	return 0;
410 }
411 
412 int unwind_to_user(struct unwind_frame_info *info)
413 {
414 	int ret;
415 
416 	do {
417 		ret = unwind_once(info);
418 	} while (!ret && !(info->ip & 3));
419 
420 	return ret;
421 }
422 
423 unsigned long return_address(unsigned int level)
424 {
425 	struct unwind_frame_info info;
426 	struct pt_regs r;
427 	unsigned long sp;
428 
429 	/* initialize unwind info */
430 	asm volatile ("copy %%r30, %0" : "=r"(sp));
431 	memset(&r, 0, sizeof(struct pt_regs));
432 	r.iaoq[0] = (unsigned long) current_text_addr();
433 	r.gr[2] = (unsigned long) __builtin_return_address(0);
434 	r.gr[30] = sp;
435 	unwind_frame_init(&info, current, &r);
436 
437 	/* unwind stack */
438 	++level;
439 	do {
440 		if (unwind_once(&info) < 0 || info.ip == 0)
441 			return 0;
442 		if (!kernel_text_address(info.ip))
443 			return 0;
444 	} while (info.ip && level--);
445 
446 	return info.ip;
447 }
448