1 /* 2 * Kernel unwinding support 3 * 4 * (c) 2002-2004 Randolph Chung <tausq@debian.org> 5 * 6 * Derived partially from the IA64 implementation. The PA-RISC 7 * Runtime Architecture Document is also a useful reference to 8 * understand what is happening here 9 */ 10 11 #include <linux/kernel.h> 12 #include <linux/init.h> 13 #include <linux/sched.h> 14 #include <linux/slab.h> 15 #include <linux/kallsyms.h> 16 #include <linux/sort.h> 17 18 #include <linux/uaccess.h> 19 #include <asm/assembly.h> 20 #include <asm/asm-offsets.h> 21 #include <asm/ptrace.h> 22 23 #include <asm/unwind.h> 24 25 /* #define DEBUG 1 */ 26 #ifdef DEBUG 27 #define dbg(x...) printk(x) 28 #else 29 #define dbg(x...) 30 #endif 31 32 #define KERNEL_START (KERNEL_BINARY_TEXT_START) 33 34 extern struct unwind_table_entry __start___unwind[]; 35 extern struct unwind_table_entry __stop___unwind[]; 36 37 static DEFINE_SPINLOCK(unwind_lock); 38 /* 39 * the kernel unwind block is not dynamically allocated so that 40 * we can call unwind_init as early in the bootup process as 41 * possible (before the slab allocator is initialized) 42 */ 43 static struct unwind_table kernel_unwind_table __read_mostly; 44 static LIST_HEAD(unwind_tables); 45 46 static inline const struct unwind_table_entry * 47 find_unwind_entry_in_table(const struct unwind_table *table, unsigned long addr) 48 { 49 const struct unwind_table_entry *e = NULL; 50 unsigned long lo, hi, mid; 51 52 lo = 0; 53 hi = table->length - 1; 54 55 while (lo <= hi) { 56 mid = (hi - lo) / 2 + lo; 57 e = &table->table[mid]; 58 if (addr < e->region_start) 59 hi = mid - 1; 60 else if (addr > e->region_end) 61 lo = mid + 1; 62 else 63 return e; 64 } 65 66 return NULL; 67 } 68 69 static const struct unwind_table_entry * 70 find_unwind_entry(unsigned long addr) 71 { 72 struct unwind_table *table; 73 const struct unwind_table_entry *e = NULL; 74 75 if (addr >= kernel_unwind_table.start && 76 addr <= kernel_unwind_table.end) 77 e = find_unwind_entry_in_table(&kernel_unwind_table, addr); 78 else { 79 unsigned long flags; 80 81 spin_lock_irqsave(&unwind_lock, flags); 82 list_for_each_entry(table, &unwind_tables, list) { 83 if (addr >= table->start && 84 addr <= table->end) 85 e = find_unwind_entry_in_table(table, addr); 86 if (e) { 87 /* Move-to-front to exploit common traces */ 88 list_move(&table->list, &unwind_tables); 89 break; 90 } 91 } 92 spin_unlock_irqrestore(&unwind_lock, flags); 93 } 94 95 return e; 96 } 97 98 static void 99 unwind_table_init(struct unwind_table *table, const char *name, 100 unsigned long base_addr, unsigned long gp, 101 void *table_start, void *table_end) 102 { 103 struct unwind_table_entry *start = table_start; 104 struct unwind_table_entry *end = 105 (struct unwind_table_entry *)table_end - 1; 106 107 table->name = name; 108 table->base_addr = base_addr; 109 table->gp = gp; 110 table->start = base_addr + start->region_start; 111 table->end = base_addr + end->region_end; 112 table->table = (struct unwind_table_entry *)table_start; 113 table->length = end - start + 1; 114 INIT_LIST_HEAD(&table->list); 115 116 for (; start <= end; start++) { 117 if (start < end && 118 start->region_end > (start+1)->region_start) { 119 printk("WARNING: Out of order unwind entry! %p and %p\n", start, start+1); 120 } 121 122 start->region_start += base_addr; 123 start->region_end += base_addr; 124 } 125 } 126 127 static int cmp_unwind_table_entry(const void *a, const void *b) 128 { 129 return ((const struct unwind_table_entry *)a)->region_start 130 - ((const struct unwind_table_entry *)b)->region_start; 131 } 132 133 static void 134 unwind_table_sort(struct unwind_table_entry *start, 135 struct unwind_table_entry *finish) 136 { 137 sort(start, finish - start, sizeof(struct unwind_table_entry), 138 cmp_unwind_table_entry, NULL); 139 } 140 141 struct unwind_table * 142 unwind_table_add(const char *name, unsigned long base_addr, 143 unsigned long gp, 144 void *start, void *end) 145 { 146 struct unwind_table *table; 147 unsigned long flags; 148 struct unwind_table_entry *s = (struct unwind_table_entry *)start; 149 struct unwind_table_entry *e = (struct unwind_table_entry *)end; 150 151 unwind_table_sort(s, e); 152 153 table = kmalloc(sizeof(struct unwind_table), GFP_USER); 154 if (table == NULL) 155 return NULL; 156 unwind_table_init(table, name, base_addr, gp, start, end); 157 spin_lock_irqsave(&unwind_lock, flags); 158 list_add_tail(&table->list, &unwind_tables); 159 spin_unlock_irqrestore(&unwind_lock, flags); 160 161 return table; 162 } 163 164 void unwind_table_remove(struct unwind_table *table) 165 { 166 unsigned long flags; 167 168 spin_lock_irqsave(&unwind_lock, flags); 169 list_del(&table->list); 170 spin_unlock_irqrestore(&unwind_lock, flags); 171 172 kfree(table); 173 } 174 175 /* Called from setup_arch to import the kernel unwind info */ 176 int __init unwind_init(void) 177 { 178 long start, stop; 179 register unsigned long gp __asm__ ("r27"); 180 181 start = (long)&__start___unwind[0]; 182 stop = (long)&__stop___unwind[0]; 183 184 printk("unwind_init: start = 0x%lx, end = 0x%lx, entries = %lu\n", 185 start, stop, 186 (stop - start) / sizeof(struct unwind_table_entry)); 187 188 unwind_table_init(&kernel_unwind_table, "kernel", KERNEL_START, 189 gp, 190 &__start___unwind[0], &__stop___unwind[0]); 191 #if 0 192 { 193 int i; 194 for (i = 0; i < 10; i++) 195 { 196 printk("region 0x%x-0x%x\n", 197 __start___unwind[i].region_start, 198 __start___unwind[i].region_end); 199 } 200 } 201 #endif 202 return 0; 203 } 204 205 #ifdef CONFIG_64BIT 206 #define get_func_addr(fptr) fptr[2] 207 #else 208 #define get_func_addr(fptr) fptr[0] 209 #endif 210 211 static int unwind_special(struct unwind_frame_info *info, unsigned long pc, int frame_size) 212 { 213 extern void handle_interruption(int, struct pt_regs *); 214 static unsigned long *hi = (unsigned long *)&handle_interruption; 215 216 if (pc == get_func_addr(hi)) { 217 struct pt_regs *regs = (struct pt_regs *)(info->sp - frame_size - PT_SZ_ALGN); 218 dbg("Unwinding through handle_interruption()\n"); 219 info->prev_sp = regs->gr[30]; 220 info->prev_ip = regs->iaoq[0]; 221 222 return 1; 223 } 224 225 return 0; 226 } 227 228 static void unwind_frame_regs(struct unwind_frame_info *info) 229 { 230 const struct unwind_table_entry *e; 231 unsigned long npc; 232 unsigned int insn; 233 long frame_size = 0; 234 int looking_for_rp, rpoffset = 0; 235 236 e = find_unwind_entry(info->ip); 237 if (e == NULL) { 238 unsigned long sp; 239 240 dbg("Cannot find unwind entry for 0x%lx; forced unwinding\n", info->ip); 241 242 #ifdef CONFIG_KALLSYMS 243 /* Handle some frequent special cases.... */ 244 { 245 char symname[KSYM_NAME_LEN]; 246 char *modname; 247 248 kallsyms_lookup(info->ip, NULL, NULL, &modname, 249 symname); 250 251 dbg("info->ip = 0x%lx, name = %s\n", info->ip, symname); 252 253 if (strcmp(symname, "_switch_to_ret") == 0) { 254 info->prev_sp = info->sp - CALLEE_SAVE_FRAME_SIZE; 255 info->prev_ip = *(unsigned long *)(info->prev_sp - RP_OFFSET); 256 dbg("_switch_to_ret @ %lx - setting " 257 "prev_sp=%lx prev_ip=%lx\n", 258 info->ip, info->prev_sp, 259 info->prev_ip); 260 return; 261 } else if (strcmp(symname, "ret_from_kernel_thread") == 0 || 262 strcmp(symname, "syscall_exit") == 0) { 263 info->prev_ip = info->prev_sp = 0; 264 return; 265 } 266 } 267 #endif 268 269 /* Since we are doing the unwinding blind, we don't know if 270 we are adjusting the stack correctly or extracting the rp 271 correctly. The rp is checked to see if it belongs to the 272 kernel text section, if not we assume we don't have a 273 correct stack frame and we continue to unwind the stack. 274 This is not quite correct, and will fail for loadable 275 modules. */ 276 sp = info->sp & ~63; 277 do { 278 unsigned long tmp; 279 280 info->prev_sp = sp - 64; 281 info->prev_ip = 0; 282 if (get_user(tmp, (unsigned long *)(info->prev_sp - RP_OFFSET))) 283 break; 284 info->prev_ip = tmp; 285 sp = info->prev_sp; 286 } while (!kernel_text_address(info->prev_ip)); 287 288 info->rp = 0; 289 290 dbg("analyzing func @ %lx with no unwind info, setting " 291 "prev_sp=%lx prev_ip=%lx\n", info->ip, 292 info->prev_sp, info->prev_ip); 293 } else { 294 dbg("e->start = 0x%x, e->end = 0x%x, Save_SP = %d, " 295 "Save_RP = %d, Millicode = %d size = %u\n", 296 e->region_start, e->region_end, e->Save_SP, e->Save_RP, 297 e->Millicode, e->Total_frame_size); 298 299 looking_for_rp = e->Save_RP; 300 301 for (npc = e->region_start; 302 (frame_size < (e->Total_frame_size << 3) || 303 looking_for_rp) && 304 npc < info->ip; 305 npc += 4) { 306 307 insn = *(unsigned int *)npc; 308 309 if ((insn & 0xffffc001) == 0x37de0000 || 310 (insn & 0xffe00001) == 0x6fc00000) { 311 /* ldo X(sp), sp, or stwm X,D(sp) */ 312 frame_size += (insn & 0x3fff) >> 1; 313 dbg("analyzing func @ %lx, insn=%08x @ " 314 "%lx, frame_size = %ld\n", info->ip, 315 insn, npc, frame_size); 316 } else if ((insn & 0xffe00009) == 0x73c00008) { 317 /* std,ma X,D(sp) */ 318 frame_size += ((insn >> 4) & 0x3ff) << 3; 319 dbg("analyzing func @ %lx, insn=%08x @ " 320 "%lx, frame_size = %ld\n", info->ip, 321 insn, npc, frame_size); 322 } else if (insn == 0x6bc23fd9) { 323 /* stw rp,-20(sp) */ 324 rpoffset = 20; 325 looking_for_rp = 0; 326 dbg("analyzing func @ %lx, insn=stw rp," 327 "-20(sp) @ %lx\n", info->ip, npc); 328 } else if (insn == 0x0fc212c1) { 329 /* std rp,-16(sr0,sp) */ 330 rpoffset = 16; 331 looking_for_rp = 0; 332 dbg("analyzing func @ %lx, insn=std rp," 333 "-16(sp) @ %lx\n", info->ip, npc); 334 } 335 } 336 337 if (frame_size > e->Total_frame_size << 3) 338 frame_size = e->Total_frame_size << 3; 339 340 if (!unwind_special(info, e->region_start, frame_size)) { 341 info->prev_sp = info->sp - frame_size; 342 if (e->Millicode) 343 info->rp = info->r31; 344 else if (rpoffset) 345 info->rp = *(unsigned long *)(info->prev_sp - rpoffset); 346 info->prev_ip = info->rp; 347 info->rp = 0; 348 } 349 350 dbg("analyzing func @ %lx, setting prev_sp=%lx " 351 "prev_ip=%lx npc=%lx\n", info->ip, info->prev_sp, 352 info->prev_ip, npc); 353 } 354 } 355 356 void unwind_frame_init(struct unwind_frame_info *info, struct task_struct *t, 357 struct pt_regs *regs) 358 { 359 memset(info, 0, sizeof(struct unwind_frame_info)); 360 info->t = t; 361 info->sp = regs->gr[30]; 362 info->ip = regs->iaoq[0]; 363 info->rp = regs->gr[2]; 364 info->r31 = regs->gr[31]; 365 366 dbg("(%d) Start unwind from sp=%08lx ip=%08lx\n", 367 t ? (int)t->pid : -1, info->sp, info->ip); 368 } 369 370 void unwind_frame_init_from_blocked_task(struct unwind_frame_info *info, struct task_struct *t) 371 { 372 struct pt_regs *r = &t->thread.regs; 373 struct pt_regs *r2; 374 375 r2 = kmalloc(sizeof(struct pt_regs), GFP_ATOMIC); 376 if (!r2) 377 return; 378 *r2 = *r; 379 r2->gr[30] = r->ksp; 380 r2->iaoq[0] = r->kpc; 381 unwind_frame_init(info, t, r2); 382 kfree(r2); 383 } 384 385 void unwind_frame_init_running(struct unwind_frame_info *info, struct pt_regs *regs) 386 { 387 unwind_frame_init(info, current, regs); 388 } 389 390 int unwind_once(struct unwind_frame_info *next_frame) 391 { 392 unwind_frame_regs(next_frame); 393 394 if (next_frame->prev_sp == 0 || 395 next_frame->prev_ip == 0) 396 return -1; 397 398 next_frame->sp = next_frame->prev_sp; 399 next_frame->ip = next_frame->prev_ip; 400 next_frame->prev_sp = 0; 401 next_frame->prev_ip = 0; 402 403 dbg("(%d) Continue unwind to sp=%08lx ip=%08lx\n", 404 next_frame->t ? (int)next_frame->t->pid : -1, 405 next_frame->sp, next_frame->ip); 406 407 return 0; 408 } 409 410 int unwind_to_user(struct unwind_frame_info *info) 411 { 412 int ret; 413 414 do { 415 ret = unwind_once(info); 416 } while (!ret && !(info->ip & 3)); 417 418 return ret; 419 } 420 421 unsigned long return_address(unsigned int level) 422 { 423 struct unwind_frame_info info; 424 struct pt_regs r; 425 unsigned long sp; 426 427 /* initialize unwind info */ 428 asm volatile ("copy %%r30, %0" : "=r"(sp)); 429 memset(&r, 0, sizeof(struct pt_regs)); 430 r.iaoq[0] = (unsigned long) current_text_addr(); 431 r.gr[2] = (unsigned long) __builtin_return_address(0); 432 r.gr[30] = sp; 433 unwind_frame_init(&info, current, &r); 434 435 /* unwind stack */ 436 ++level; 437 do { 438 if (unwind_once(&info) < 0 || info.ip == 0) 439 return 0; 440 if (!kernel_text_address(info.ip)) 441 return 0; 442 } while (info.ip && level--); 443 444 return info.ip; 445 } 446