1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Kernel unwinding support 4 * 5 * (c) 2002-2004 Randolph Chung <tausq@debian.org> 6 * 7 * Derived partially from the IA64 implementation. The PA-RISC 8 * Runtime Architecture Document is also a useful reference to 9 * understand what is happening here 10 */ 11 12 #include <linux/kernel.h> 13 #include <linux/init.h> 14 #include <linux/sched.h> 15 #include <linux/slab.h> 16 #include <linux/kallsyms.h> 17 #include <linux/sort.h> 18 #include <linux/sched.h> 19 20 #include <linux/uaccess.h> 21 #include <asm/assembly.h> 22 #include <asm/asm-offsets.h> 23 #include <asm/ptrace.h> 24 25 #include <asm/unwind.h> 26 27 /* #define DEBUG 1 */ 28 #ifdef DEBUG 29 #define dbg(x...) printk(x) 30 #else 31 #define dbg(x...) 32 #endif 33 34 #define KERNEL_START (KERNEL_BINARY_TEXT_START) 35 36 extern struct unwind_table_entry __start___unwind[]; 37 extern struct unwind_table_entry __stop___unwind[]; 38 39 static DEFINE_SPINLOCK(unwind_lock); 40 /* 41 * the kernel unwind block is not dynamically allocated so that 42 * we can call unwind_init as early in the bootup process as 43 * possible (before the slab allocator is initialized) 44 */ 45 static struct unwind_table kernel_unwind_table __read_mostly; 46 static LIST_HEAD(unwind_tables); 47 48 static inline const struct unwind_table_entry * 49 find_unwind_entry_in_table(const struct unwind_table *table, unsigned long addr) 50 { 51 const struct unwind_table_entry *e = NULL; 52 unsigned long lo, hi, mid; 53 54 lo = 0; 55 hi = table->length - 1; 56 57 while (lo <= hi) { 58 mid = (hi - lo) / 2 + lo; 59 e = &table->table[mid]; 60 if (addr < e->region_start) 61 hi = mid - 1; 62 else if (addr > e->region_end) 63 lo = mid + 1; 64 else 65 return e; 66 } 67 68 return NULL; 69 } 70 71 static const struct unwind_table_entry * 72 find_unwind_entry(unsigned long addr) 73 { 74 struct unwind_table *table; 75 const struct unwind_table_entry *e = NULL; 76 77 if (addr >= kernel_unwind_table.start && 78 addr <= kernel_unwind_table.end) 79 e = find_unwind_entry_in_table(&kernel_unwind_table, addr); 80 else { 81 unsigned long flags; 82 83 spin_lock_irqsave(&unwind_lock, flags); 84 list_for_each_entry(table, &unwind_tables, list) { 85 if (addr >= table->start && 86 addr <= table->end) 87 e = find_unwind_entry_in_table(table, addr); 88 if (e) { 89 /* Move-to-front to exploit common traces */ 90 list_move(&table->list, &unwind_tables); 91 break; 92 } 93 } 94 spin_unlock_irqrestore(&unwind_lock, flags); 95 } 96 97 return e; 98 } 99 100 static void 101 unwind_table_init(struct unwind_table *table, const char *name, 102 unsigned long base_addr, unsigned long gp, 103 void *table_start, void *table_end) 104 { 105 struct unwind_table_entry *start = table_start; 106 struct unwind_table_entry *end = 107 (struct unwind_table_entry *)table_end - 1; 108 109 table->name = name; 110 table->base_addr = base_addr; 111 table->gp = gp; 112 table->start = base_addr + start->region_start; 113 table->end = base_addr + end->region_end; 114 table->table = (struct unwind_table_entry *)table_start; 115 table->length = end - start + 1; 116 INIT_LIST_HEAD(&table->list); 117 118 for (; start <= end; start++) { 119 if (start < end && 120 start->region_end > (start+1)->region_start) { 121 printk("WARNING: Out of order unwind entry! %p and %p\n", start, start+1); 122 } 123 124 start->region_start += base_addr; 125 start->region_end += base_addr; 126 } 127 } 128 129 static int cmp_unwind_table_entry(const void *a, const void *b) 130 { 131 return ((const struct unwind_table_entry *)a)->region_start 132 - ((const struct unwind_table_entry *)b)->region_start; 133 } 134 135 static void 136 unwind_table_sort(struct unwind_table_entry *start, 137 struct unwind_table_entry *finish) 138 { 139 sort(start, finish - start, sizeof(struct unwind_table_entry), 140 cmp_unwind_table_entry, NULL); 141 } 142 143 struct unwind_table * 144 unwind_table_add(const char *name, unsigned long base_addr, 145 unsigned long gp, 146 void *start, void *end) 147 { 148 struct unwind_table *table; 149 unsigned long flags; 150 struct unwind_table_entry *s = (struct unwind_table_entry *)start; 151 struct unwind_table_entry *e = (struct unwind_table_entry *)end; 152 153 unwind_table_sort(s, e); 154 155 table = kmalloc(sizeof(struct unwind_table), GFP_USER); 156 if (table == NULL) 157 return NULL; 158 unwind_table_init(table, name, base_addr, gp, start, end); 159 spin_lock_irqsave(&unwind_lock, flags); 160 list_add_tail(&table->list, &unwind_tables); 161 spin_unlock_irqrestore(&unwind_lock, flags); 162 163 return table; 164 } 165 166 void unwind_table_remove(struct unwind_table *table) 167 { 168 unsigned long flags; 169 170 spin_lock_irqsave(&unwind_lock, flags); 171 list_del(&table->list); 172 spin_unlock_irqrestore(&unwind_lock, flags); 173 174 kfree(table); 175 } 176 177 /* Called from setup_arch to import the kernel unwind info */ 178 int __init unwind_init(void) 179 { 180 long start, stop; 181 register unsigned long gp __asm__ ("r27"); 182 183 start = (long)&__start___unwind[0]; 184 stop = (long)&__stop___unwind[0]; 185 186 printk("unwind_init: start = 0x%lx, end = 0x%lx, entries = %lu\n", 187 start, stop, 188 (stop - start) / sizeof(struct unwind_table_entry)); 189 190 unwind_table_init(&kernel_unwind_table, "kernel", KERNEL_START, 191 gp, 192 &__start___unwind[0], &__stop___unwind[0]); 193 #if 0 194 { 195 int i; 196 for (i = 0; i < 10; i++) 197 { 198 printk("region 0x%x-0x%x\n", 199 __start___unwind[i].region_start, 200 __start___unwind[i].region_end); 201 } 202 } 203 #endif 204 return 0; 205 } 206 207 #ifdef CONFIG_64BIT 208 #define get_func_addr(fptr) fptr[2] 209 #else 210 #define get_func_addr(fptr) fptr[0] 211 #endif 212 213 static int unwind_special(struct unwind_frame_info *info, unsigned long pc, int frame_size) 214 { 215 extern void handle_interruption(int, struct pt_regs *); 216 static unsigned long *hi = (unsigned long *)&handle_interruption; 217 218 if (pc == get_func_addr(hi)) { 219 struct pt_regs *regs = (struct pt_regs *)(info->sp - frame_size - PT_SZ_ALGN); 220 dbg("Unwinding through handle_interruption()\n"); 221 info->prev_sp = regs->gr[30]; 222 info->prev_ip = regs->iaoq[0]; 223 224 return 1; 225 } 226 227 return 0; 228 } 229 230 static void unwind_frame_regs(struct unwind_frame_info *info) 231 { 232 const struct unwind_table_entry *e; 233 unsigned long npc; 234 unsigned int insn; 235 long frame_size = 0; 236 int looking_for_rp, rpoffset = 0; 237 238 e = find_unwind_entry(info->ip); 239 if (e == NULL) { 240 unsigned long sp; 241 242 dbg("Cannot find unwind entry for 0x%lx; forced unwinding\n", info->ip); 243 244 #ifdef CONFIG_KALLSYMS 245 /* Handle some frequent special cases.... */ 246 { 247 char symname[KSYM_NAME_LEN]; 248 char *modname; 249 250 kallsyms_lookup(info->ip, NULL, NULL, &modname, 251 symname); 252 253 dbg("info->ip = 0x%lx, name = %s\n", info->ip, symname); 254 255 if (strcmp(symname, "_switch_to_ret") == 0) { 256 info->prev_sp = info->sp - CALLEE_SAVE_FRAME_SIZE; 257 info->prev_ip = *(unsigned long *)(info->prev_sp - RP_OFFSET); 258 dbg("_switch_to_ret @ %lx - setting " 259 "prev_sp=%lx prev_ip=%lx\n", 260 info->ip, info->prev_sp, 261 info->prev_ip); 262 return; 263 } else if (strcmp(symname, "ret_from_kernel_thread") == 0 || 264 strcmp(symname, "syscall_exit") == 0) { 265 info->prev_ip = info->prev_sp = 0; 266 return; 267 } 268 } 269 #endif 270 271 /* Since we are doing the unwinding blind, we don't know if 272 we are adjusting the stack correctly or extracting the rp 273 correctly. The rp is checked to see if it belongs to the 274 kernel text section, if not we assume we don't have a 275 correct stack frame and we continue to unwind the stack. 276 This is not quite correct, and will fail for loadable 277 modules. */ 278 sp = info->sp & ~63; 279 do { 280 unsigned long tmp; 281 282 info->prev_sp = sp - 64; 283 info->prev_ip = 0; 284 285 /* The stack is at the end inside the thread_union 286 * struct. If we reach data, we have reached the 287 * beginning of the stack and should stop unwinding. */ 288 if (info->prev_sp >= (unsigned long) task_thread_info(info->t) && 289 info->prev_sp < ((unsigned long) task_thread_info(info->t) 290 + THREAD_SZ_ALGN)) { 291 info->prev_sp = 0; 292 break; 293 } 294 295 if (get_user(tmp, (unsigned long *)(info->prev_sp - RP_OFFSET))) 296 break; 297 info->prev_ip = tmp; 298 sp = info->prev_sp; 299 } while (!kernel_text_address(info->prev_ip)); 300 301 info->rp = 0; 302 303 dbg("analyzing func @ %lx with no unwind info, setting " 304 "prev_sp=%lx prev_ip=%lx\n", info->ip, 305 info->prev_sp, info->prev_ip); 306 } else { 307 dbg("e->start = 0x%x, e->end = 0x%x, Save_SP = %d, " 308 "Save_RP = %d, Millicode = %d size = %u\n", 309 e->region_start, e->region_end, e->Save_SP, e->Save_RP, 310 e->Millicode, e->Total_frame_size); 311 312 looking_for_rp = e->Save_RP; 313 314 for (npc = e->region_start; 315 (frame_size < (e->Total_frame_size << 3) || 316 looking_for_rp) && 317 npc < info->ip; 318 npc += 4) { 319 320 insn = *(unsigned int *)npc; 321 322 if ((insn & 0xffffc001) == 0x37de0000 || 323 (insn & 0xffe00001) == 0x6fc00000) { 324 /* ldo X(sp), sp, or stwm X,D(sp) */ 325 frame_size += (insn & 0x3fff) >> 1; 326 dbg("analyzing func @ %lx, insn=%08x @ " 327 "%lx, frame_size = %ld\n", info->ip, 328 insn, npc, frame_size); 329 } else if ((insn & 0xffe00009) == 0x73c00008) { 330 /* std,ma X,D(sp) */ 331 frame_size += ((insn >> 4) & 0x3ff) << 3; 332 dbg("analyzing func @ %lx, insn=%08x @ " 333 "%lx, frame_size = %ld\n", info->ip, 334 insn, npc, frame_size); 335 } else if (insn == 0x6bc23fd9) { 336 /* stw rp,-20(sp) */ 337 rpoffset = 20; 338 looking_for_rp = 0; 339 dbg("analyzing func @ %lx, insn=stw rp," 340 "-20(sp) @ %lx\n", info->ip, npc); 341 } else if (insn == 0x0fc212c1) { 342 /* std rp,-16(sr0,sp) */ 343 rpoffset = 16; 344 looking_for_rp = 0; 345 dbg("analyzing func @ %lx, insn=std rp," 346 "-16(sp) @ %lx\n", info->ip, npc); 347 } 348 } 349 350 if (frame_size > e->Total_frame_size << 3) 351 frame_size = e->Total_frame_size << 3; 352 353 if (!unwind_special(info, e->region_start, frame_size)) { 354 info->prev_sp = info->sp - frame_size; 355 if (e->Millicode) 356 info->rp = info->r31; 357 else if (rpoffset) 358 info->rp = *(unsigned long *)(info->prev_sp - rpoffset); 359 info->prev_ip = info->rp; 360 info->rp = 0; 361 } 362 363 dbg("analyzing func @ %lx, setting prev_sp=%lx " 364 "prev_ip=%lx npc=%lx\n", info->ip, info->prev_sp, 365 info->prev_ip, npc); 366 } 367 } 368 369 void unwind_frame_init(struct unwind_frame_info *info, struct task_struct *t, 370 struct pt_regs *regs) 371 { 372 memset(info, 0, sizeof(struct unwind_frame_info)); 373 info->t = t; 374 info->sp = regs->gr[30]; 375 info->ip = regs->iaoq[0]; 376 info->rp = regs->gr[2]; 377 info->r31 = regs->gr[31]; 378 379 dbg("(%d) Start unwind from sp=%08lx ip=%08lx\n", 380 t ? (int)t->pid : -1, info->sp, info->ip); 381 } 382 383 void unwind_frame_init_from_blocked_task(struct unwind_frame_info *info, struct task_struct *t) 384 { 385 struct pt_regs *r = &t->thread.regs; 386 struct pt_regs *r2; 387 388 r2 = kmalloc(sizeof(struct pt_regs), GFP_ATOMIC); 389 if (!r2) 390 return; 391 *r2 = *r; 392 r2->gr[30] = r->ksp; 393 r2->iaoq[0] = r->kpc; 394 unwind_frame_init(info, t, r2); 395 kfree(r2); 396 } 397 398 void unwind_frame_init_running(struct unwind_frame_info *info, struct pt_regs *regs) 399 { 400 unwind_frame_init(info, current, regs); 401 } 402 403 int unwind_once(struct unwind_frame_info *next_frame) 404 { 405 unwind_frame_regs(next_frame); 406 407 if (next_frame->prev_sp == 0 || 408 next_frame->prev_ip == 0) 409 return -1; 410 411 next_frame->sp = next_frame->prev_sp; 412 next_frame->ip = next_frame->prev_ip; 413 next_frame->prev_sp = 0; 414 next_frame->prev_ip = 0; 415 416 dbg("(%d) Continue unwind to sp=%08lx ip=%08lx\n", 417 next_frame->t ? (int)next_frame->t->pid : -1, 418 next_frame->sp, next_frame->ip); 419 420 return 0; 421 } 422 423 int unwind_to_user(struct unwind_frame_info *info) 424 { 425 int ret; 426 427 do { 428 ret = unwind_once(info); 429 } while (!ret && !(info->ip & 3)); 430 431 return ret; 432 } 433 434 unsigned long return_address(unsigned int level) 435 { 436 struct unwind_frame_info info; 437 struct pt_regs r; 438 unsigned long sp; 439 440 /* initialize unwind info */ 441 asm volatile ("copy %%r30, %0" : "=r"(sp)); 442 memset(&r, 0, sizeof(struct pt_regs)); 443 r.iaoq[0] = (unsigned long) current_text_addr(); 444 r.gr[2] = (unsigned long) __builtin_return_address(0); 445 r.gr[30] = sp; 446 unwind_frame_init(&info, current, &r); 447 448 /* unwind stack */ 449 ++level; 450 do { 451 if (unwind_once(&info) < 0 || info.ip == 0) 452 return 0; 453 if (!kernel_text_address(info.ip)) 454 return 0; 455 } while (info.ip && level--); 456 457 return info.ip; 458 } 459