1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Kernel unwinding support 4 * 5 * (c) 2002-2004 Randolph Chung <tausq@debian.org> 6 * 7 * Derived partially from the IA64 implementation. The PA-RISC 8 * Runtime Architecture Document is also a useful reference to 9 * understand what is happening here 10 */ 11 12 #include <linux/kernel.h> 13 #include <linux/init.h> 14 #include <linux/sched.h> 15 #include <linux/slab.h> 16 #include <linux/sort.h> 17 #include <linux/sched/task_stack.h> 18 19 #include <linux/uaccess.h> 20 #include <asm/assembly.h> 21 #include <asm/asm-offsets.h> 22 #include <asm/ptrace.h> 23 24 #include <asm/unwind.h> 25 #include <asm/switch_to.h> 26 #include <asm/sections.h> 27 #include <asm/ftrace.h> 28 29 /* #define DEBUG 1 */ 30 #ifdef DEBUG 31 #define dbg(x...) pr_debug(x) 32 #else 33 #define dbg(x...) do { } while (0) 34 #endif 35 36 #define KERNEL_START (KERNEL_BINARY_TEXT_START) 37 38 extern struct unwind_table_entry __start___unwind[]; 39 extern struct unwind_table_entry __stop___unwind[]; 40 41 static DEFINE_SPINLOCK(unwind_lock); 42 /* 43 * the kernel unwind block is not dynamically allocated so that 44 * we can call unwind_init as early in the bootup process as 45 * possible (before the slab allocator is initialized) 46 */ 47 static struct unwind_table kernel_unwind_table __ro_after_init; 48 static LIST_HEAD(unwind_tables); 49 50 static inline const struct unwind_table_entry * 51 find_unwind_entry_in_table(const struct unwind_table *table, unsigned long addr) 52 { 53 const struct unwind_table_entry *e = NULL; 54 unsigned long lo, hi, mid; 55 56 lo = 0; 57 hi = table->length - 1; 58 59 while (lo <= hi) { 60 mid = (hi - lo) / 2 + lo; 61 e = &table->table[mid]; 62 if (addr < e->region_start) 63 hi = mid - 1; 64 else if (addr > e->region_end) 65 lo = mid + 1; 66 else 67 return e; 68 } 69 70 return NULL; 71 } 72 73 static const struct unwind_table_entry * 74 find_unwind_entry(unsigned long addr) 75 { 76 struct unwind_table *table; 77 const struct unwind_table_entry *e = NULL; 78 79 if (addr >= kernel_unwind_table.start && 80 addr <= kernel_unwind_table.end) 81 e = find_unwind_entry_in_table(&kernel_unwind_table, addr); 82 else { 83 unsigned long flags; 84 85 spin_lock_irqsave(&unwind_lock, flags); 86 list_for_each_entry(table, &unwind_tables, list) { 87 if (addr >= table->start && 88 addr <= table->end) 89 e = find_unwind_entry_in_table(table, addr); 90 if (e) { 91 /* Move-to-front to exploit common traces */ 92 list_move(&table->list, &unwind_tables); 93 break; 94 } 95 } 96 spin_unlock_irqrestore(&unwind_lock, flags); 97 } 98 99 return e; 100 } 101 102 static void 103 unwind_table_init(struct unwind_table *table, const char *name, 104 unsigned long base_addr, unsigned long gp, 105 void *table_start, void *table_end) 106 { 107 struct unwind_table_entry *start = table_start; 108 struct unwind_table_entry *end = 109 (struct unwind_table_entry *)table_end - 1; 110 111 table->name = name; 112 table->base_addr = base_addr; 113 table->gp = gp; 114 table->start = base_addr + start->region_start; 115 table->end = base_addr + end->region_end; 116 table->table = (struct unwind_table_entry *)table_start; 117 table->length = end - start + 1; 118 INIT_LIST_HEAD(&table->list); 119 120 for (; start <= end; start++) { 121 if (start < end && 122 start->region_end > (start+1)->region_start) { 123 pr_warn("Out of order unwind entry! %px and %px\n", 124 start, start+1); 125 } 126 127 start->region_start += base_addr; 128 start->region_end += base_addr; 129 } 130 } 131 132 static int cmp_unwind_table_entry(const void *a, const void *b) 133 { 134 return ((const struct unwind_table_entry *)a)->region_start 135 - ((const struct unwind_table_entry *)b)->region_start; 136 } 137 138 static void 139 unwind_table_sort(struct unwind_table_entry *start, 140 struct unwind_table_entry *finish) 141 { 142 sort(start, finish - start, sizeof(struct unwind_table_entry), 143 cmp_unwind_table_entry, NULL); 144 } 145 146 struct unwind_table * 147 unwind_table_add(const char *name, unsigned long base_addr, 148 unsigned long gp, 149 void *start, void *end) 150 { 151 struct unwind_table *table; 152 unsigned long flags; 153 struct unwind_table_entry *s = (struct unwind_table_entry *)start; 154 struct unwind_table_entry *e = (struct unwind_table_entry *)end; 155 156 unwind_table_sort(s, e); 157 158 table = kmalloc(sizeof(struct unwind_table), GFP_USER); 159 if (table == NULL) 160 return NULL; 161 unwind_table_init(table, name, base_addr, gp, start, end); 162 spin_lock_irqsave(&unwind_lock, flags); 163 list_add_tail(&table->list, &unwind_tables); 164 spin_unlock_irqrestore(&unwind_lock, flags); 165 166 return table; 167 } 168 169 void unwind_table_remove(struct unwind_table *table) 170 { 171 unsigned long flags; 172 173 spin_lock_irqsave(&unwind_lock, flags); 174 list_del(&table->list); 175 spin_unlock_irqrestore(&unwind_lock, flags); 176 177 kfree(table); 178 } 179 180 /* Called from setup_arch to import the kernel unwind info */ 181 int __init unwind_init(void) 182 { 183 long start __maybe_unused, stop __maybe_unused; 184 register unsigned long gp __asm__ ("r27"); 185 186 start = (long)&__start___unwind[0]; 187 stop = (long)&__stop___unwind[0]; 188 189 dbg("unwind_init: start = 0x%lx, end = 0x%lx, entries = %lu\n", 190 start, stop, 191 (stop - start) / sizeof(struct unwind_table_entry)); 192 193 unwind_table_init(&kernel_unwind_table, "kernel", KERNEL_START, 194 gp, 195 &__start___unwind[0], &__stop___unwind[0]); 196 #if 0 197 { 198 int i; 199 for (i = 0; i < 10; i++) 200 { 201 printk("region 0x%x-0x%x\n", 202 __start___unwind[i].region_start, 203 __start___unwind[i].region_end); 204 } 205 } 206 #endif 207 return 0; 208 } 209 210 static bool pc_is_kernel_fn(unsigned long pc, void *fn) 211 { 212 return (unsigned long)dereference_kernel_function_descriptor(fn) == pc; 213 } 214 215 static int unwind_special(struct unwind_frame_info *info, unsigned long pc, int frame_size) 216 { 217 /* 218 * We have to use void * instead of a function pointer, because 219 * function pointers aren't a pointer to the function on 64-bit. 220 * Make them const so the compiler knows they live in .text 221 * Note: We could use dereference_kernel_function_descriptor() 222 * instead but we want to keep it simple here. 223 */ 224 extern void * const handle_interruption; 225 extern void * const ret_from_kernel_thread; 226 extern void * const syscall_exit; 227 extern void * const intr_return; 228 extern void * const _switch_to_ret; 229 #ifdef CONFIG_IRQSTACKS 230 extern void * const _call_on_stack; 231 #endif /* CONFIG_IRQSTACKS */ 232 233 if (pc_is_kernel_fn(pc, handle_interruption)) { 234 struct pt_regs *regs = (struct pt_regs *)(info->sp - frame_size - PT_SZ_ALGN); 235 dbg("Unwinding through handle_interruption()\n"); 236 info->prev_sp = regs->gr[30]; 237 info->prev_ip = regs->iaoq[0]; 238 return 1; 239 } 240 241 if (pc_is_kernel_fn(pc, ret_from_kernel_thread) || 242 pc_is_kernel_fn(pc, syscall_exit)) { 243 info->prev_sp = info->prev_ip = 0; 244 return 1; 245 } 246 247 if (pc_is_kernel_fn(pc, intr_return)) { 248 struct pt_regs *regs; 249 250 dbg("Found intr_return()\n"); 251 regs = (struct pt_regs *)(info->sp - PT_SZ_ALGN); 252 info->prev_sp = regs->gr[30]; 253 info->prev_ip = regs->iaoq[0]; 254 info->rp = regs->gr[2]; 255 return 1; 256 } 257 258 if (pc_is_kernel_fn(pc, _switch_to) || 259 pc_is_kernel_fn(pc, _switch_to_ret)) { 260 info->prev_sp = info->sp - CALLEE_SAVE_FRAME_SIZE; 261 info->prev_ip = *(unsigned long *)(info->prev_sp - RP_OFFSET); 262 return 1; 263 } 264 265 #ifdef CONFIG_IRQSTACKS 266 if (pc_is_kernel_fn(pc, _call_on_stack)) { 267 info->prev_sp = *(unsigned long *)(info->sp - FRAME_SIZE - REG_SZ); 268 info->prev_ip = *(unsigned long *)(info->sp - FRAME_SIZE - RP_OFFSET); 269 return 1; 270 } 271 #endif 272 return 0; 273 } 274 275 static void unwind_frame_regs(struct unwind_frame_info *info) 276 { 277 const struct unwind_table_entry *e; 278 unsigned long npc; 279 unsigned int insn; 280 long frame_size = 0; 281 int looking_for_rp, rpoffset = 0; 282 283 e = find_unwind_entry(info->ip); 284 if (e == NULL) { 285 unsigned long sp; 286 287 dbg("Cannot find unwind entry for %pS; forced unwinding\n", 288 (void *) info->ip); 289 290 /* Since we are doing the unwinding blind, we don't know if 291 we are adjusting the stack correctly or extracting the rp 292 correctly. The rp is checked to see if it belongs to the 293 kernel text section, if not we assume we don't have a 294 correct stack frame and we continue to unwind the stack. 295 This is not quite correct, and will fail for loadable 296 modules. */ 297 sp = info->sp & ~63; 298 do { 299 unsigned long tmp; 300 301 info->prev_sp = sp - 64; 302 info->prev_ip = 0; 303 304 /* Check if stack is inside kernel stack area */ 305 if ((info->prev_sp - (unsigned long) task_stack_page(info->t)) 306 >= THREAD_SIZE) { 307 info->prev_sp = 0; 308 break; 309 } 310 311 if (copy_from_kernel_nofault(&tmp, 312 (void *)info->prev_sp - RP_OFFSET, sizeof(tmp))) 313 break; 314 info->prev_ip = tmp; 315 sp = info->prev_sp; 316 } while (!kernel_text_address(info->prev_ip)); 317 318 info->rp = 0; 319 320 dbg("analyzing func @ %lx with no unwind info, setting " 321 "prev_sp=%lx prev_ip=%lx\n", info->ip, 322 info->prev_sp, info->prev_ip); 323 } else { 324 dbg("e->start = 0x%x, e->end = 0x%x, Save_SP = %d, " 325 "Save_RP = %d, Millicode = %d size = %u\n", 326 e->region_start, e->region_end, e->Save_SP, e->Save_RP, 327 e->Millicode, e->Total_frame_size); 328 329 looking_for_rp = e->Save_RP; 330 331 for (npc = e->region_start; 332 (frame_size < (e->Total_frame_size << 3) || 333 looking_for_rp) && 334 npc < info->ip; 335 npc += 4) { 336 337 insn = *(unsigned int *)npc; 338 339 if ((insn & 0xffffc001) == 0x37de0000 || 340 (insn & 0xffe00001) == 0x6fc00000) { 341 /* ldo X(sp), sp, or stwm X,D(sp) */ 342 frame_size += (insn & 0x3fff) >> 1; 343 dbg("analyzing func @ %lx, insn=%08x @ " 344 "%lx, frame_size = %ld\n", info->ip, 345 insn, npc, frame_size); 346 } else if ((insn & 0xffe00009) == 0x73c00008) { 347 /* std,ma X,D(sp) */ 348 frame_size += ((insn >> 4) & 0x3ff) << 3; 349 dbg("analyzing func @ %lx, insn=%08x @ " 350 "%lx, frame_size = %ld\n", info->ip, 351 insn, npc, frame_size); 352 } else if (insn == 0x6bc23fd9) { 353 /* stw rp,-20(sp) */ 354 rpoffset = 20; 355 looking_for_rp = 0; 356 dbg("analyzing func @ %lx, insn=stw rp," 357 "-20(sp) @ %lx\n", info->ip, npc); 358 } else if (insn == 0x0fc212c1) { 359 /* std rp,-16(sr0,sp) */ 360 rpoffset = 16; 361 looking_for_rp = 0; 362 dbg("analyzing func @ %lx, insn=std rp," 363 "-16(sp) @ %lx\n", info->ip, npc); 364 } 365 } 366 367 if (frame_size > e->Total_frame_size << 3) 368 frame_size = e->Total_frame_size << 3; 369 370 if (!unwind_special(info, e->region_start, frame_size)) { 371 info->prev_sp = info->sp - frame_size; 372 if (e->Millicode) 373 info->rp = info->r31; 374 else if (rpoffset) 375 info->rp = *(unsigned long *)(info->prev_sp - rpoffset); 376 info->prev_ip = info->rp; 377 info->rp = 0; 378 } 379 380 dbg("analyzing func @ %lx, setting prev_sp=%lx " 381 "prev_ip=%lx npc=%lx\n", info->ip, info->prev_sp, 382 info->prev_ip, npc); 383 } 384 } 385 386 void unwind_frame_init(struct unwind_frame_info *info, struct task_struct *t, 387 struct pt_regs *regs) 388 { 389 memset(info, 0, sizeof(struct unwind_frame_info)); 390 info->t = t; 391 info->sp = regs->gr[30]; 392 info->ip = regs->iaoq[0]; 393 info->rp = regs->gr[2]; 394 info->r31 = regs->gr[31]; 395 396 dbg("(%d) Start unwind from sp=%08lx ip=%08lx\n", 397 t ? (int)t->pid : -1, info->sp, info->ip); 398 } 399 400 void unwind_frame_init_from_blocked_task(struct unwind_frame_info *info, struct task_struct *t) 401 { 402 struct pt_regs *r = &t->thread.regs; 403 struct pt_regs *r2; 404 405 r2 = kmalloc(sizeof(struct pt_regs), GFP_ATOMIC); 406 if (!r2) 407 return; 408 *r2 = *r; 409 r2->gr[30] = r->ksp; 410 r2->iaoq[0] = r->kpc; 411 unwind_frame_init(info, t, r2); 412 kfree(r2); 413 } 414 415 #define get_parisc_stackpointer() ({ \ 416 unsigned long sp; \ 417 __asm__("copy %%r30, %0" : "=r"(sp)); \ 418 (sp); \ 419 }) 420 421 void unwind_frame_init_task(struct unwind_frame_info *info, 422 struct task_struct *task, struct pt_regs *regs) 423 { 424 task = task ? task : current; 425 426 if (task == current) { 427 struct pt_regs r; 428 429 if (!regs) { 430 memset(&r, 0, sizeof(r)); 431 r.iaoq[0] = _THIS_IP_; 432 r.gr[2] = _RET_IP_; 433 r.gr[30] = get_parisc_stackpointer(); 434 regs = &r; 435 } 436 unwind_frame_init(info, task, regs); 437 } else { 438 unwind_frame_init_from_blocked_task(info, task); 439 } 440 } 441 442 int unwind_once(struct unwind_frame_info *next_frame) 443 { 444 unwind_frame_regs(next_frame); 445 446 if (next_frame->prev_sp == 0 || 447 next_frame->prev_ip == 0) 448 return -1; 449 450 next_frame->sp = next_frame->prev_sp; 451 next_frame->ip = next_frame->prev_ip; 452 next_frame->prev_sp = 0; 453 next_frame->prev_ip = 0; 454 455 dbg("(%d) Continue unwind to sp=%08lx ip=%08lx\n", 456 next_frame->t ? (int)next_frame->t->pid : -1, 457 next_frame->sp, next_frame->ip); 458 459 return 0; 460 } 461 462 int unwind_to_user(struct unwind_frame_info *info) 463 { 464 int ret; 465 466 do { 467 ret = unwind_once(info); 468 } while (!ret && !(info->ip & 3)); 469 470 return ret; 471 } 472 473 unsigned long return_address(unsigned int level) 474 { 475 struct unwind_frame_info info; 476 477 /* initialize unwind info */ 478 unwind_frame_init_task(&info, current, NULL); 479 480 /* unwind stack */ 481 level += 2; 482 do { 483 if (unwind_once(&info) < 0 || info.ip == 0) 484 return 0; 485 if (!kernel_text_address(info.ip)) 486 return 0; 487 } while (info.ip && level--); 488 489 return info.ip; 490 } 491