1 #include <linux/module.h> 2 #include <linux/sort.h> 3 #include <asm/ptrace.h> 4 #include <asm/stacktrace.h> 5 #include <asm/unwind.h> 6 #include <asm/orc_types.h> 7 #include <asm/orc_lookup.h> 8 9 #define orc_warn(fmt, ...) \ 10 printk_deferred_once(KERN_WARNING pr_fmt("WARNING: " fmt), ##__VA_ARGS__) 11 12 extern int __start_orc_unwind_ip[]; 13 extern int __stop_orc_unwind_ip[]; 14 extern struct orc_entry __start_orc_unwind[]; 15 extern struct orc_entry __stop_orc_unwind[]; 16 17 static DEFINE_MUTEX(sort_mutex); 18 int *cur_orc_ip_table = __start_orc_unwind_ip; 19 struct orc_entry *cur_orc_table = __start_orc_unwind; 20 21 unsigned int lookup_num_blocks; 22 bool orc_init; 23 24 static inline unsigned long orc_ip(const int *ip) 25 { 26 return (unsigned long)ip + *ip; 27 } 28 29 static struct orc_entry *__orc_find(int *ip_table, struct orc_entry *u_table, 30 unsigned int num_entries, unsigned long ip) 31 { 32 int *first = ip_table; 33 int *last = ip_table + num_entries - 1; 34 int *mid = first, *found = first; 35 36 if (!num_entries) 37 return NULL; 38 39 /* 40 * Do a binary range search to find the rightmost duplicate of a given 41 * starting address. Some entries are section terminators which are 42 * "weak" entries for ensuring there are no gaps. They should be 43 * ignored when they conflict with a real entry. 44 */ 45 while (first <= last) { 46 mid = first + ((last - first) / 2); 47 48 if (orc_ip(mid) <= ip) { 49 found = mid; 50 first = mid + 1; 51 } else 52 last = mid - 1; 53 } 54 55 return u_table + (found - ip_table); 56 } 57 58 #ifdef CONFIG_MODULES 59 static struct orc_entry *orc_module_find(unsigned long ip) 60 { 61 struct module *mod; 62 63 mod = __module_address(ip); 64 if (!mod || !mod->arch.orc_unwind || !mod->arch.orc_unwind_ip) 65 return NULL; 66 return __orc_find(mod->arch.orc_unwind_ip, mod->arch.orc_unwind, 67 mod->arch.num_orcs, ip); 68 } 69 #else 70 static struct orc_entry *orc_module_find(unsigned long ip) 71 { 72 return NULL; 73 } 74 #endif 75 76 #ifdef CONFIG_DYNAMIC_FTRACE 77 static struct orc_entry *orc_find(unsigned long ip); 78 79 /* 80 * Ftrace dynamic trampolines do not have orc entries of their own. 81 * But they are copies of the ftrace entries that are static and 82 * defined in ftrace_*.S, which do have orc entries. 83 * 84 * If the undwinder comes across a ftrace trampoline, then find the 85 * ftrace function that was used to create it, and use that ftrace 86 * function's orc entrie, as the placement of the return code in 87 * the stack will be identical. 88 */ 89 static struct orc_entry *orc_ftrace_find(unsigned long ip) 90 { 91 struct ftrace_ops *ops; 92 unsigned long caller; 93 94 ops = ftrace_ops_trampoline(ip); 95 if (!ops) 96 return NULL; 97 98 if (ops->flags & FTRACE_OPS_FL_SAVE_REGS) 99 caller = (unsigned long)ftrace_regs_call; 100 else 101 caller = (unsigned long)ftrace_call; 102 103 /* Prevent unlikely recursion */ 104 if (ip == caller) 105 return NULL; 106 107 return orc_find(caller); 108 } 109 #else 110 static struct orc_entry *orc_ftrace_find(unsigned long ip) 111 { 112 return NULL; 113 } 114 #endif 115 116 /* 117 * If we crash with IP==0, the last successfully executed instruction 118 * was probably an indirect function call with a NULL function pointer, 119 * and we don't have unwind information for NULL. 120 * This hardcoded ORC entry for IP==0 allows us to unwind from a NULL function 121 * pointer into its parent and then continue normally from there. 122 */ 123 static struct orc_entry null_orc_entry = { 124 .sp_offset = sizeof(long), 125 .sp_reg = ORC_REG_SP, 126 .bp_reg = ORC_REG_UNDEFINED, 127 .type = ORC_TYPE_CALL 128 }; 129 130 static struct orc_entry *orc_find(unsigned long ip) 131 { 132 static struct orc_entry *orc; 133 134 if (!orc_init) 135 return NULL; 136 137 if (ip == 0) 138 return &null_orc_entry; 139 140 /* For non-init vmlinux addresses, use the fast lookup table: */ 141 if (ip >= LOOKUP_START_IP && ip < LOOKUP_STOP_IP) { 142 unsigned int idx, start, stop; 143 144 idx = (ip - LOOKUP_START_IP) / LOOKUP_BLOCK_SIZE; 145 146 if (unlikely((idx >= lookup_num_blocks-1))) { 147 orc_warn("WARNING: bad lookup idx: idx=%u num=%u ip=%pB\n", 148 idx, lookup_num_blocks, (void *)ip); 149 return NULL; 150 } 151 152 start = orc_lookup[idx]; 153 stop = orc_lookup[idx + 1] + 1; 154 155 if (unlikely((__start_orc_unwind + start >= __stop_orc_unwind) || 156 (__start_orc_unwind + stop > __stop_orc_unwind))) { 157 orc_warn("WARNING: bad lookup value: idx=%u num=%u start=%u stop=%u ip=%pB\n", 158 idx, lookup_num_blocks, start, stop, (void *)ip); 159 return NULL; 160 } 161 162 return __orc_find(__start_orc_unwind_ip + start, 163 __start_orc_unwind + start, stop - start, ip); 164 } 165 166 /* vmlinux .init slow lookup: */ 167 if (init_kernel_text(ip)) 168 return __orc_find(__start_orc_unwind_ip, __start_orc_unwind, 169 __stop_orc_unwind_ip - __start_orc_unwind_ip, ip); 170 171 /* Module lookup: */ 172 orc = orc_module_find(ip); 173 if (orc) 174 return orc; 175 176 return orc_ftrace_find(ip); 177 } 178 179 static void orc_sort_swap(void *_a, void *_b, int size) 180 { 181 struct orc_entry *orc_a, *orc_b; 182 struct orc_entry orc_tmp; 183 int *a = _a, *b = _b, tmp; 184 int delta = _b - _a; 185 186 /* Swap the .orc_unwind_ip entries: */ 187 tmp = *a; 188 *a = *b + delta; 189 *b = tmp - delta; 190 191 /* Swap the corresponding .orc_unwind entries: */ 192 orc_a = cur_orc_table + (a - cur_orc_ip_table); 193 orc_b = cur_orc_table + (b - cur_orc_ip_table); 194 orc_tmp = *orc_a; 195 *orc_a = *orc_b; 196 *orc_b = orc_tmp; 197 } 198 199 static int orc_sort_cmp(const void *_a, const void *_b) 200 { 201 struct orc_entry *orc_a; 202 const int *a = _a, *b = _b; 203 unsigned long a_val = orc_ip(a); 204 unsigned long b_val = orc_ip(b); 205 206 if (a_val > b_val) 207 return 1; 208 if (a_val < b_val) 209 return -1; 210 211 /* 212 * The "weak" section terminator entries need to always be on the left 213 * to ensure the lookup code skips them in favor of real entries. 214 * These terminator entries exist to handle any gaps created by 215 * whitelisted .o files which didn't get objtool generation. 216 */ 217 orc_a = cur_orc_table + (a - cur_orc_ip_table); 218 return orc_a->sp_reg == ORC_REG_UNDEFINED && !orc_a->end ? -1 : 1; 219 } 220 221 #ifdef CONFIG_MODULES 222 void unwind_module_init(struct module *mod, void *_orc_ip, size_t orc_ip_size, 223 void *_orc, size_t orc_size) 224 { 225 int *orc_ip = _orc_ip; 226 struct orc_entry *orc = _orc; 227 unsigned int num_entries = orc_ip_size / sizeof(int); 228 229 WARN_ON_ONCE(orc_ip_size % sizeof(int) != 0 || 230 orc_size % sizeof(*orc) != 0 || 231 num_entries != orc_size / sizeof(*orc)); 232 233 /* 234 * The 'cur_orc_*' globals allow the orc_sort_swap() callback to 235 * associate an .orc_unwind_ip table entry with its corresponding 236 * .orc_unwind entry so they can both be swapped. 237 */ 238 mutex_lock(&sort_mutex); 239 cur_orc_ip_table = orc_ip; 240 cur_orc_table = orc; 241 sort(orc_ip, num_entries, sizeof(int), orc_sort_cmp, orc_sort_swap); 242 mutex_unlock(&sort_mutex); 243 244 mod->arch.orc_unwind_ip = orc_ip; 245 mod->arch.orc_unwind = orc; 246 mod->arch.num_orcs = num_entries; 247 } 248 #endif 249 250 void __init unwind_init(void) 251 { 252 size_t orc_ip_size = (void *)__stop_orc_unwind_ip - (void *)__start_orc_unwind_ip; 253 size_t orc_size = (void *)__stop_orc_unwind - (void *)__start_orc_unwind; 254 size_t num_entries = orc_ip_size / sizeof(int); 255 struct orc_entry *orc; 256 int i; 257 258 if (!num_entries || orc_ip_size % sizeof(int) != 0 || 259 orc_size % sizeof(struct orc_entry) != 0 || 260 num_entries != orc_size / sizeof(struct orc_entry)) { 261 orc_warn("WARNING: Bad or missing .orc_unwind table. Disabling unwinder.\n"); 262 return; 263 } 264 265 /* Sort the .orc_unwind and .orc_unwind_ip tables: */ 266 sort(__start_orc_unwind_ip, num_entries, sizeof(int), orc_sort_cmp, 267 orc_sort_swap); 268 269 /* Initialize the fast lookup table: */ 270 lookup_num_blocks = orc_lookup_end - orc_lookup; 271 for (i = 0; i < lookup_num_blocks-1; i++) { 272 orc = __orc_find(__start_orc_unwind_ip, __start_orc_unwind, 273 num_entries, 274 LOOKUP_START_IP + (LOOKUP_BLOCK_SIZE * i)); 275 if (!orc) { 276 orc_warn("WARNING: Corrupt .orc_unwind table. Disabling unwinder.\n"); 277 return; 278 } 279 280 orc_lookup[i] = orc - __start_orc_unwind; 281 } 282 283 /* Initialize the ending block: */ 284 orc = __orc_find(__start_orc_unwind_ip, __start_orc_unwind, num_entries, 285 LOOKUP_STOP_IP); 286 if (!orc) { 287 orc_warn("WARNING: Corrupt .orc_unwind table. Disabling unwinder.\n"); 288 return; 289 } 290 orc_lookup[lookup_num_blocks-1] = orc - __start_orc_unwind; 291 292 orc_init = true; 293 } 294 295 unsigned long unwind_get_return_address(struct unwind_state *state) 296 { 297 if (unwind_done(state)) 298 return 0; 299 300 return __kernel_text_address(state->ip) ? state->ip : 0; 301 } 302 EXPORT_SYMBOL_GPL(unwind_get_return_address); 303 304 unsigned long *unwind_get_return_address_ptr(struct unwind_state *state) 305 { 306 if (unwind_done(state)) 307 return NULL; 308 309 if (state->regs) 310 return &state->regs->ip; 311 312 if (state->sp) 313 return (unsigned long *)state->sp - 1; 314 315 return NULL; 316 } 317 318 static bool stack_access_ok(struct unwind_state *state, unsigned long _addr, 319 size_t len) 320 { 321 struct stack_info *info = &state->stack_info; 322 void *addr = (void *)_addr; 323 324 if (!on_stack(info, addr, len) && 325 (get_stack_info(addr, state->task, info, &state->stack_mask))) 326 return false; 327 328 return true; 329 } 330 331 static bool deref_stack_reg(struct unwind_state *state, unsigned long addr, 332 unsigned long *val) 333 { 334 if (!stack_access_ok(state, addr, sizeof(long))) 335 return false; 336 337 *val = READ_ONCE_NOCHECK(*(unsigned long *)addr); 338 return true; 339 } 340 341 static bool deref_stack_regs(struct unwind_state *state, unsigned long addr, 342 unsigned long *ip, unsigned long *sp) 343 { 344 struct pt_regs *regs = (struct pt_regs *)addr; 345 346 /* x86-32 support will be more complicated due to the ®s->sp hack */ 347 BUILD_BUG_ON(IS_ENABLED(CONFIG_X86_32)); 348 349 if (!stack_access_ok(state, addr, sizeof(struct pt_regs))) 350 return false; 351 352 *ip = regs->ip; 353 *sp = regs->sp; 354 return true; 355 } 356 357 static bool deref_stack_iret_regs(struct unwind_state *state, unsigned long addr, 358 unsigned long *ip, unsigned long *sp) 359 { 360 struct pt_regs *regs = (void *)addr - IRET_FRAME_OFFSET; 361 362 if (!stack_access_ok(state, addr, IRET_FRAME_SIZE)) 363 return false; 364 365 *ip = regs->ip; 366 *sp = regs->sp; 367 return true; 368 } 369 370 bool unwind_next_frame(struct unwind_state *state) 371 { 372 unsigned long ip_p, sp, orig_ip = state->ip, prev_sp = state->sp; 373 enum stack_type prev_type = state->stack_info.type; 374 struct orc_entry *orc; 375 bool indirect = false; 376 377 if (unwind_done(state)) 378 return false; 379 380 /* Don't let modules unload while we're reading their ORC data. */ 381 preempt_disable(); 382 383 /* End-of-stack check for user tasks: */ 384 if (state->regs && user_mode(state->regs)) 385 goto the_end; 386 387 /* 388 * Find the orc_entry associated with the text address. 389 * 390 * Decrement call return addresses by one so they work for sibling 391 * calls and calls to noreturn functions. 392 */ 393 orc = orc_find(state->signal ? state->ip : state->ip - 1); 394 if (!orc) 395 goto err; 396 397 /* End-of-stack check for kernel threads: */ 398 if (orc->sp_reg == ORC_REG_UNDEFINED) { 399 if (!orc->end) 400 goto err; 401 402 goto the_end; 403 } 404 405 /* Find the previous frame's stack: */ 406 switch (orc->sp_reg) { 407 case ORC_REG_SP: 408 sp = state->sp + orc->sp_offset; 409 break; 410 411 case ORC_REG_BP: 412 sp = state->bp + orc->sp_offset; 413 break; 414 415 case ORC_REG_SP_INDIRECT: 416 sp = state->sp + orc->sp_offset; 417 indirect = true; 418 break; 419 420 case ORC_REG_BP_INDIRECT: 421 sp = state->bp + orc->sp_offset; 422 indirect = true; 423 break; 424 425 case ORC_REG_R10: 426 if (!state->regs || !state->full_regs) { 427 orc_warn("missing regs for base reg R10 at ip %pB\n", 428 (void *)state->ip); 429 goto err; 430 } 431 sp = state->regs->r10; 432 break; 433 434 case ORC_REG_R13: 435 if (!state->regs || !state->full_regs) { 436 orc_warn("missing regs for base reg R13 at ip %pB\n", 437 (void *)state->ip); 438 goto err; 439 } 440 sp = state->regs->r13; 441 break; 442 443 case ORC_REG_DI: 444 if (!state->regs || !state->full_regs) { 445 orc_warn("missing regs for base reg DI at ip %pB\n", 446 (void *)state->ip); 447 goto err; 448 } 449 sp = state->regs->di; 450 break; 451 452 case ORC_REG_DX: 453 if (!state->regs || !state->full_regs) { 454 orc_warn("missing regs for base reg DX at ip %pB\n", 455 (void *)state->ip); 456 goto err; 457 } 458 sp = state->regs->dx; 459 break; 460 461 default: 462 orc_warn("unknown SP base reg %d for ip %pB\n", 463 orc->sp_reg, (void *)state->ip); 464 goto err; 465 } 466 467 if (indirect) { 468 if (!deref_stack_reg(state, sp, &sp)) 469 goto err; 470 } 471 472 /* Find IP, SP and possibly regs: */ 473 switch (orc->type) { 474 case ORC_TYPE_CALL: 475 ip_p = sp - sizeof(long); 476 477 if (!deref_stack_reg(state, ip_p, &state->ip)) 478 goto err; 479 480 state->ip = ftrace_graph_ret_addr(state->task, &state->graph_idx, 481 state->ip, (void *)ip_p); 482 483 state->sp = sp; 484 state->regs = NULL; 485 state->signal = false; 486 break; 487 488 case ORC_TYPE_REGS: 489 if (!deref_stack_regs(state, sp, &state->ip, &state->sp)) { 490 orc_warn("can't dereference registers at %p for ip %pB\n", 491 (void *)sp, (void *)orig_ip); 492 goto err; 493 } 494 495 state->regs = (struct pt_regs *)sp; 496 state->full_regs = true; 497 state->signal = true; 498 break; 499 500 case ORC_TYPE_REGS_IRET: 501 if (!deref_stack_iret_regs(state, sp, &state->ip, &state->sp)) { 502 orc_warn("can't dereference iret registers at %p for ip %pB\n", 503 (void *)sp, (void *)orig_ip); 504 goto err; 505 } 506 507 state->regs = (void *)sp - IRET_FRAME_OFFSET; 508 state->full_regs = false; 509 state->signal = true; 510 break; 511 512 default: 513 orc_warn("unknown .orc_unwind entry type %d for ip %pB\n", 514 orc->type, (void *)orig_ip); 515 break; 516 } 517 518 /* Find BP: */ 519 switch (orc->bp_reg) { 520 case ORC_REG_UNDEFINED: 521 if (state->regs && state->full_regs) 522 state->bp = state->regs->bp; 523 break; 524 525 case ORC_REG_PREV_SP: 526 if (!deref_stack_reg(state, sp + orc->bp_offset, &state->bp)) 527 goto err; 528 break; 529 530 case ORC_REG_BP: 531 if (!deref_stack_reg(state, state->bp + orc->bp_offset, &state->bp)) 532 goto err; 533 break; 534 535 default: 536 orc_warn("unknown BP base reg %d for ip %pB\n", 537 orc->bp_reg, (void *)orig_ip); 538 goto err; 539 } 540 541 /* Prevent a recursive loop due to bad ORC data: */ 542 if (state->stack_info.type == prev_type && 543 on_stack(&state->stack_info, (void *)state->sp, sizeof(long)) && 544 state->sp <= prev_sp) { 545 orc_warn("stack going in the wrong direction? ip=%pB\n", 546 (void *)orig_ip); 547 goto err; 548 } 549 550 preempt_enable(); 551 return true; 552 553 err: 554 state->error = true; 555 556 the_end: 557 preempt_enable(); 558 state->stack_info.type = STACK_TYPE_UNKNOWN; 559 return false; 560 } 561 EXPORT_SYMBOL_GPL(unwind_next_frame); 562 563 void __unwind_start(struct unwind_state *state, struct task_struct *task, 564 struct pt_regs *regs, unsigned long *first_frame) 565 { 566 memset(state, 0, sizeof(*state)); 567 state->task = task; 568 569 /* 570 * Refuse to unwind the stack of a task while it's executing on another 571 * CPU. This check is racy, but that's ok: the unwinder has other 572 * checks to prevent it from going off the rails. 573 */ 574 if (task_on_another_cpu(task)) 575 goto done; 576 577 if (regs) { 578 if (user_mode(regs)) 579 goto done; 580 581 state->ip = regs->ip; 582 state->sp = kernel_stack_pointer(regs); 583 state->bp = regs->bp; 584 state->regs = regs; 585 state->full_regs = true; 586 state->signal = true; 587 588 } else if (task == current) { 589 asm volatile("lea (%%rip), %0\n\t" 590 "mov %%rsp, %1\n\t" 591 "mov %%rbp, %2\n\t" 592 : "=r" (state->ip), "=r" (state->sp), 593 "=r" (state->bp)); 594 595 } else { 596 struct inactive_task_frame *frame = (void *)task->thread.sp; 597 598 state->sp = task->thread.sp; 599 state->bp = READ_ONCE_NOCHECK(frame->bp); 600 state->ip = READ_ONCE_NOCHECK(frame->ret_addr); 601 } 602 603 if (get_stack_info((unsigned long *)state->sp, state->task, 604 &state->stack_info, &state->stack_mask)) { 605 /* 606 * We weren't on a valid stack. It's possible that 607 * we overflowed a valid stack into a guard page. 608 * See if the next page up is valid so that we can 609 * generate some kind of backtrace if this happens. 610 */ 611 void *next_page = (void *)PAGE_ALIGN((unsigned long)state->sp); 612 if (get_stack_info(next_page, state->task, &state->stack_info, 613 &state->stack_mask)) 614 return; 615 } 616 617 /* 618 * The caller can provide the address of the first frame directly 619 * (first_frame) or indirectly (regs->sp) to indicate which stack frame 620 * to start unwinding at. Skip ahead until we reach it. 621 */ 622 623 /* When starting from regs, skip the regs frame: */ 624 if (regs) { 625 unwind_next_frame(state); 626 return; 627 } 628 629 /* Otherwise, skip ahead to the user-specified starting frame: */ 630 while (!unwind_done(state) && 631 (!on_stack(&state->stack_info, first_frame, sizeof(long)) || 632 state->sp <= (unsigned long)first_frame)) 633 unwind_next_frame(state); 634 635 return; 636 637 done: 638 state->stack_info.type = STACK_TYPE_UNKNOWN; 639 return; 640 } 641 EXPORT_SYMBOL_GPL(__unwind_start); 642