1 // SPDX-License-Identifier: GPL-2.0-only 2 #include <linux/module.h> 3 #include <linux/sort.h> 4 #include <asm/ptrace.h> 5 #include <asm/stacktrace.h> 6 #include <asm/unwind.h> 7 #include <asm/orc_types.h> 8 #include <asm/orc_lookup.h> 9 10 #define orc_warn(fmt, ...) \ 11 printk_deferred_once(KERN_WARNING "WARNING: " fmt, ##__VA_ARGS__) 12 13 #define orc_warn_current(args...) \ 14 ({ \ 15 if (state->task == current) \ 16 orc_warn(args); \ 17 }) 18 19 extern int __start_orc_unwind_ip[]; 20 extern int __stop_orc_unwind_ip[]; 21 extern struct orc_entry __start_orc_unwind[]; 22 extern struct orc_entry __stop_orc_unwind[]; 23 24 static bool orc_init __ro_after_init; 25 static unsigned int lookup_num_blocks __ro_after_init; 26 27 static inline unsigned long orc_ip(const int *ip) 28 { 29 return (unsigned long)ip + *ip; 30 } 31 32 static struct orc_entry *__orc_find(int *ip_table, struct orc_entry *u_table, 33 unsigned int num_entries, unsigned long ip) 34 { 35 int *first = ip_table; 36 int *last = ip_table + num_entries - 1; 37 int *mid = first, *found = first; 38 39 if (!num_entries) 40 return NULL; 41 42 /* 43 * Do a binary range search to find the rightmost duplicate of a given 44 * starting address. Some entries are section terminators which are 45 * "weak" entries for ensuring there are no gaps. They should be 46 * ignored when they conflict with a real entry. 47 */ 48 while (first <= last) { 49 mid = first + ((last - first) / 2); 50 51 if (orc_ip(mid) <= ip) { 52 found = mid; 53 first = mid + 1; 54 } else 55 last = mid - 1; 56 } 57 58 return u_table + (found - ip_table); 59 } 60 61 #ifdef CONFIG_MODULES 62 static struct orc_entry *orc_module_find(unsigned long ip) 63 { 64 struct module *mod; 65 66 mod = __module_address(ip); 67 if (!mod || !mod->arch.orc_unwind || !mod->arch.orc_unwind_ip) 68 return NULL; 69 return __orc_find(mod->arch.orc_unwind_ip, mod->arch.orc_unwind, 70 mod->arch.num_orcs, ip); 71 } 72 #else 73 static struct orc_entry *orc_module_find(unsigned long ip) 74 { 75 return NULL; 76 } 77 #endif 78 79 #ifdef CONFIG_DYNAMIC_FTRACE 80 static struct orc_entry *orc_find(unsigned long ip); 81 82 /* 83 * Ftrace dynamic trampolines do not have orc entries of their own. 84 * But they are copies of the ftrace entries that are static and 85 * defined in ftrace_*.S, which do have orc entries. 86 * 87 * If the unwinder comes across a ftrace trampoline, then find the 88 * ftrace function that was used to create it, and use that ftrace 89 * function's orc entry, as the placement of the return code in 90 * the stack will be identical. 91 */ 92 static struct orc_entry *orc_ftrace_find(unsigned long ip) 93 { 94 struct ftrace_ops *ops; 95 unsigned long caller; 96 97 ops = ftrace_ops_trampoline(ip); 98 if (!ops) 99 return NULL; 100 101 if (ops->flags & FTRACE_OPS_FL_SAVE_REGS) 102 caller = (unsigned long)ftrace_regs_call; 103 else 104 caller = (unsigned long)ftrace_call; 105 106 /* Prevent unlikely recursion */ 107 if (ip == caller) 108 return NULL; 109 110 return orc_find(caller); 111 } 112 #else 113 static struct orc_entry *orc_ftrace_find(unsigned long ip) 114 { 115 return NULL; 116 } 117 #endif 118 119 /* 120 * If we crash with IP==0, the last successfully executed instruction 121 * was probably an indirect function call with a NULL function pointer, 122 * and we don't have unwind information for NULL. 123 * This hardcoded ORC entry for IP==0 allows us to unwind from a NULL function 124 * pointer into its parent and then continue normally from there. 125 */ 126 static struct orc_entry null_orc_entry = { 127 .sp_offset = sizeof(long), 128 .sp_reg = ORC_REG_SP, 129 .bp_reg = ORC_REG_UNDEFINED, 130 .type = ORC_TYPE_CALL 131 }; 132 133 /* Fake frame pointer entry -- used as a fallback for generated code */ 134 static struct orc_entry orc_fp_entry = { 135 .type = ORC_TYPE_CALL, 136 .sp_reg = ORC_REG_BP, 137 .sp_offset = 16, 138 .bp_reg = ORC_REG_PREV_SP, 139 .bp_offset = -16, 140 .end = 0, 141 }; 142 143 static struct orc_entry *orc_find(unsigned long ip) 144 { 145 static struct orc_entry *orc; 146 147 if (ip == 0) 148 return &null_orc_entry; 149 150 /* For non-init vmlinux addresses, use the fast lookup table: */ 151 if (ip >= LOOKUP_START_IP && ip < LOOKUP_STOP_IP) { 152 unsigned int idx, start, stop; 153 154 idx = (ip - LOOKUP_START_IP) / LOOKUP_BLOCK_SIZE; 155 156 if (unlikely((idx >= lookup_num_blocks-1))) { 157 orc_warn("WARNING: bad lookup idx: idx=%u num=%u ip=%pB\n", 158 idx, lookup_num_blocks, (void *)ip); 159 return NULL; 160 } 161 162 start = orc_lookup[idx]; 163 stop = orc_lookup[idx + 1] + 1; 164 165 if (unlikely((__start_orc_unwind + start >= __stop_orc_unwind) || 166 (__start_orc_unwind + stop > __stop_orc_unwind))) { 167 orc_warn("WARNING: bad lookup value: idx=%u num=%u start=%u stop=%u ip=%pB\n", 168 idx, lookup_num_blocks, start, stop, (void *)ip); 169 return NULL; 170 } 171 172 return __orc_find(__start_orc_unwind_ip + start, 173 __start_orc_unwind + start, stop - start, ip); 174 } 175 176 /* vmlinux .init slow lookup: */ 177 if (init_kernel_text(ip)) 178 return __orc_find(__start_orc_unwind_ip, __start_orc_unwind, 179 __stop_orc_unwind_ip - __start_orc_unwind_ip, ip); 180 181 /* Module lookup: */ 182 orc = orc_module_find(ip); 183 if (orc) 184 return orc; 185 186 return orc_ftrace_find(ip); 187 } 188 189 #ifdef CONFIG_MODULES 190 191 static DEFINE_MUTEX(sort_mutex); 192 static int *cur_orc_ip_table = __start_orc_unwind_ip; 193 static struct orc_entry *cur_orc_table = __start_orc_unwind; 194 195 static void orc_sort_swap(void *_a, void *_b, int size) 196 { 197 struct orc_entry *orc_a, *orc_b; 198 struct orc_entry orc_tmp; 199 int *a = _a, *b = _b, tmp; 200 int delta = _b - _a; 201 202 /* Swap the .orc_unwind_ip entries: */ 203 tmp = *a; 204 *a = *b + delta; 205 *b = tmp - delta; 206 207 /* Swap the corresponding .orc_unwind entries: */ 208 orc_a = cur_orc_table + (a - cur_orc_ip_table); 209 orc_b = cur_orc_table + (b - cur_orc_ip_table); 210 orc_tmp = *orc_a; 211 *orc_a = *orc_b; 212 *orc_b = orc_tmp; 213 } 214 215 static int orc_sort_cmp(const void *_a, const void *_b) 216 { 217 struct orc_entry *orc_a; 218 const int *a = _a, *b = _b; 219 unsigned long a_val = orc_ip(a); 220 unsigned long b_val = orc_ip(b); 221 222 if (a_val > b_val) 223 return 1; 224 if (a_val < b_val) 225 return -1; 226 227 /* 228 * The "weak" section terminator entries need to always be on the left 229 * to ensure the lookup code skips them in favor of real entries. 230 * These terminator entries exist to handle any gaps created by 231 * whitelisted .o files which didn't get objtool generation. 232 */ 233 orc_a = cur_orc_table + (a - cur_orc_ip_table); 234 return orc_a->sp_reg == ORC_REG_UNDEFINED && !orc_a->end ? -1 : 1; 235 } 236 237 void unwind_module_init(struct module *mod, void *_orc_ip, size_t orc_ip_size, 238 void *_orc, size_t orc_size) 239 { 240 int *orc_ip = _orc_ip; 241 struct orc_entry *orc = _orc; 242 unsigned int num_entries = orc_ip_size / sizeof(int); 243 244 WARN_ON_ONCE(orc_ip_size % sizeof(int) != 0 || 245 orc_size % sizeof(*orc) != 0 || 246 num_entries != orc_size / sizeof(*orc)); 247 248 /* 249 * The 'cur_orc_*' globals allow the orc_sort_swap() callback to 250 * associate an .orc_unwind_ip table entry with its corresponding 251 * .orc_unwind entry so they can both be swapped. 252 */ 253 mutex_lock(&sort_mutex); 254 cur_orc_ip_table = orc_ip; 255 cur_orc_table = orc; 256 sort(orc_ip, num_entries, sizeof(int), orc_sort_cmp, orc_sort_swap); 257 mutex_unlock(&sort_mutex); 258 259 mod->arch.orc_unwind_ip = orc_ip; 260 mod->arch.orc_unwind = orc; 261 mod->arch.num_orcs = num_entries; 262 } 263 #endif 264 265 void __init unwind_init(void) 266 { 267 size_t orc_ip_size = (void *)__stop_orc_unwind_ip - (void *)__start_orc_unwind_ip; 268 size_t orc_size = (void *)__stop_orc_unwind - (void *)__start_orc_unwind; 269 size_t num_entries = orc_ip_size / sizeof(int); 270 struct orc_entry *orc; 271 int i; 272 273 if (!num_entries || orc_ip_size % sizeof(int) != 0 || 274 orc_size % sizeof(struct orc_entry) != 0 || 275 num_entries != orc_size / sizeof(struct orc_entry)) { 276 orc_warn("WARNING: Bad or missing .orc_unwind table. Disabling unwinder.\n"); 277 return; 278 } 279 280 /* 281 * Note, the orc_unwind and orc_unwind_ip tables were already 282 * sorted at build time via the 'sorttable' tool. 283 * It's ready for binary search straight away, no need to sort it. 284 */ 285 286 /* Initialize the fast lookup table: */ 287 lookup_num_blocks = orc_lookup_end - orc_lookup; 288 for (i = 0; i < lookup_num_blocks-1; i++) { 289 orc = __orc_find(__start_orc_unwind_ip, __start_orc_unwind, 290 num_entries, 291 LOOKUP_START_IP + (LOOKUP_BLOCK_SIZE * i)); 292 if (!orc) { 293 orc_warn("WARNING: Corrupt .orc_unwind table. Disabling unwinder.\n"); 294 return; 295 } 296 297 orc_lookup[i] = orc - __start_orc_unwind; 298 } 299 300 /* Initialize the ending block: */ 301 orc = __orc_find(__start_orc_unwind_ip, __start_orc_unwind, num_entries, 302 LOOKUP_STOP_IP); 303 if (!orc) { 304 orc_warn("WARNING: Corrupt .orc_unwind table. Disabling unwinder.\n"); 305 return; 306 } 307 orc_lookup[lookup_num_blocks-1] = orc - __start_orc_unwind; 308 309 orc_init = true; 310 } 311 312 unsigned long unwind_get_return_address(struct unwind_state *state) 313 { 314 if (unwind_done(state)) 315 return 0; 316 317 return __kernel_text_address(state->ip) ? state->ip : 0; 318 } 319 EXPORT_SYMBOL_GPL(unwind_get_return_address); 320 321 unsigned long *unwind_get_return_address_ptr(struct unwind_state *state) 322 { 323 struct task_struct *task = state->task; 324 325 if (unwind_done(state)) 326 return NULL; 327 328 if (state->regs) 329 return &state->regs->ip; 330 331 if (task != current && state->sp == task->thread.sp) { 332 struct inactive_task_frame *frame = (void *)task->thread.sp; 333 return &frame->ret_addr; 334 } 335 336 if (state->sp) 337 return (unsigned long *)state->sp - 1; 338 339 return NULL; 340 } 341 342 static bool stack_access_ok(struct unwind_state *state, unsigned long _addr, 343 size_t len) 344 { 345 struct stack_info *info = &state->stack_info; 346 void *addr = (void *)_addr; 347 348 if (!on_stack(info, addr, len) && 349 (get_stack_info(addr, state->task, info, &state->stack_mask))) 350 return false; 351 352 return true; 353 } 354 355 static bool deref_stack_reg(struct unwind_state *state, unsigned long addr, 356 unsigned long *val) 357 { 358 if (!stack_access_ok(state, addr, sizeof(long))) 359 return false; 360 361 *val = READ_ONCE_NOCHECK(*(unsigned long *)addr); 362 return true; 363 } 364 365 static bool deref_stack_regs(struct unwind_state *state, unsigned long addr, 366 unsigned long *ip, unsigned long *sp) 367 { 368 struct pt_regs *regs = (struct pt_regs *)addr; 369 370 /* x86-32 support will be more complicated due to the ®s->sp hack */ 371 BUILD_BUG_ON(IS_ENABLED(CONFIG_X86_32)); 372 373 if (!stack_access_ok(state, addr, sizeof(struct pt_regs))) 374 return false; 375 376 *ip = regs->ip; 377 *sp = regs->sp; 378 return true; 379 } 380 381 static bool deref_stack_iret_regs(struct unwind_state *state, unsigned long addr, 382 unsigned long *ip, unsigned long *sp) 383 { 384 struct pt_regs *regs = (void *)addr - IRET_FRAME_OFFSET; 385 386 if (!stack_access_ok(state, addr, IRET_FRAME_SIZE)) 387 return false; 388 389 *ip = regs->ip; 390 *sp = regs->sp; 391 return true; 392 } 393 394 /* 395 * If state->regs is non-NULL, and points to a full pt_regs, just get the reg 396 * value from state->regs. 397 * 398 * Otherwise, if state->regs just points to IRET regs, and the previous frame 399 * had full regs, it's safe to get the value from the previous regs. This can 400 * happen when early/late IRQ entry code gets interrupted by an NMI. 401 */ 402 static bool get_reg(struct unwind_state *state, unsigned int reg_off, 403 unsigned long *val) 404 { 405 unsigned int reg = reg_off/8; 406 407 if (!state->regs) 408 return false; 409 410 if (state->full_regs) { 411 *val = ((unsigned long *)state->regs)[reg]; 412 return true; 413 } 414 415 if (state->prev_regs) { 416 *val = ((unsigned long *)state->prev_regs)[reg]; 417 return true; 418 } 419 420 return false; 421 } 422 423 bool unwind_next_frame(struct unwind_state *state) 424 { 425 unsigned long ip_p, sp, tmp, orig_ip = state->ip, prev_sp = state->sp; 426 enum stack_type prev_type = state->stack_info.type; 427 struct orc_entry *orc; 428 bool indirect = false; 429 430 if (unwind_done(state)) 431 return false; 432 433 /* Don't let modules unload while we're reading their ORC data. */ 434 preempt_disable(); 435 436 /* End-of-stack check for user tasks: */ 437 if (state->regs && user_mode(state->regs)) 438 goto the_end; 439 440 /* 441 * Find the orc_entry associated with the text address. 442 * 443 * Decrement call return addresses by one so they work for sibling 444 * calls and calls to noreturn functions. 445 */ 446 orc = orc_find(state->signal ? state->ip : state->ip - 1); 447 if (!orc) { 448 /* 449 * As a fallback, try to assume this code uses a frame pointer. 450 * This is useful for generated code, like BPF, which ORC 451 * doesn't know about. This is just a guess, so the rest of 452 * the unwind is no longer considered reliable. 453 */ 454 orc = &orc_fp_entry; 455 state->error = true; 456 } 457 458 /* End-of-stack check for kernel threads: */ 459 if (orc->sp_reg == ORC_REG_UNDEFINED) { 460 if (!orc->end) 461 goto err; 462 463 goto the_end; 464 } 465 466 /* Find the previous frame's stack: */ 467 switch (orc->sp_reg) { 468 case ORC_REG_SP: 469 sp = state->sp + orc->sp_offset; 470 break; 471 472 case ORC_REG_BP: 473 sp = state->bp + orc->sp_offset; 474 break; 475 476 case ORC_REG_SP_INDIRECT: 477 sp = state->sp + orc->sp_offset; 478 indirect = true; 479 break; 480 481 case ORC_REG_BP_INDIRECT: 482 sp = state->bp + orc->sp_offset; 483 indirect = true; 484 break; 485 486 case ORC_REG_R10: 487 if (!get_reg(state, offsetof(struct pt_regs, r10), &sp)) { 488 orc_warn_current("missing R10 value at %pB\n", 489 (void *)state->ip); 490 goto err; 491 } 492 break; 493 494 case ORC_REG_R13: 495 if (!get_reg(state, offsetof(struct pt_regs, r13), &sp)) { 496 orc_warn_current("missing R13 value at %pB\n", 497 (void *)state->ip); 498 goto err; 499 } 500 break; 501 502 case ORC_REG_DI: 503 if (!get_reg(state, offsetof(struct pt_regs, di), &sp)) { 504 orc_warn_current("missing RDI value at %pB\n", 505 (void *)state->ip); 506 goto err; 507 } 508 break; 509 510 case ORC_REG_DX: 511 if (!get_reg(state, offsetof(struct pt_regs, dx), &sp)) { 512 orc_warn_current("missing DX value at %pB\n", 513 (void *)state->ip); 514 goto err; 515 } 516 break; 517 518 default: 519 orc_warn("unknown SP base reg %d at %pB\n", 520 orc->sp_reg, (void *)state->ip); 521 goto err; 522 } 523 524 if (indirect) { 525 if (!deref_stack_reg(state, sp, &sp)) 526 goto err; 527 } 528 529 /* Find IP, SP and possibly regs: */ 530 switch (orc->type) { 531 case ORC_TYPE_CALL: 532 ip_p = sp - sizeof(long); 533 534 if (!deref_stack_reg(state, ip_p, &state->ip)) 535 goto err; 536 537 state->ip = ftrace_graph_ret_addr(state->task, &state->graph_idx, 538 state->ip, (void *)ip_p); 539 540 state->sp = sp; 541 state->regs = NULL; 542 state->prev_regs = NULL; 543 state->signal = false; 544 break; 545 546 case ORC_TYPE_REGS: 547 if (!deref_stack_regs(state, sp, &state->ip, &state->sp)) { 548 orc_warn_current("can't access registers at %pB\n", 549 (void *)orig_ip); 550 goto err; 551 } 552 553 state->regs = (struct pt_regs *)sp; 554 state->prev_regs = NULL; 555 state->full_regs = true; 556 state->signal = true; 557 break; 558 559 case ORC_TYPE_REGS_IRET: 560 if (!deref_stack_iret_regs(state, sp, &state->ip, &state->sp)) { 561 orc_warn_current("can't access iret registers at %pB\n", 562 (void *)orig_ip); 563 goto err; 564 } 565 566 if (state->full_regs) 567 state->prev_regs = state->regs; 568 state->regs = (void *)sp - IRET_FRAME_OFFSET; 569 state->full_regs = false; 570 state->signal = true; 571 break; 572 573 default: 574 orc_warn("unknown .orc_unwind entry type %d at %pB\n", 575 orc->type, (void *)orig_ip); 576 goto err; 577 } 578 579 /* Find BP: */ 580 switch (orc->bp_reg) { 581 case ORC_REG_UNDEFINED: 582 if (get_reg(state, offsetof(struct pt_regs, bp), &tmp)) 583 state->bp = tmp; 584 break; 585 586 case ORC_REG_PREV_SP: 587 if (!deref_stack_reg(state, sp + orc->bp_offset, &state->bp)) 588 goto err; 589 break; 590 591 case ORC_REG_BP: 592 if (!deref_stack_reg(state, state->bp + orc->bp_offset, &state->bp)) 593 goto err; 594 break; 595 596 default: 597 orc_warn("unknown BP base reg %d for ip %pB\n", 598 orc->bp_reg, (void *)orig_ip); 599 goto err; 600 } 601 602 /* Prevent a recursive loop due to bad ORC data: */ 603 if (state->stack_info.type == prev_type && 604 on_stack(&state->stack_info, (void *)state->sp, sizeof(long)) && 605 state->sp <= prev_sp) { 606 orc_warn_current("stack going in the wrong direction? at %pB\n", 607 (void *)orig_ip); 608 goto err; 609 } 610 611 preempt_enable(); 612 return true; 613 614 err: 615 state->error = true; 616 617 the_end: 618 preempt_enable(); 619 state->stack_info.type = STACK_TYPE_UNKNOWN; 620 return false; 621 } 622 EXPORT_SYMBOL_GPL(unwind_next_frame); 623 624 void __unwind_start(struct unwind_state *state, struct task_struct *task, 625 struct pt_regs *regs, unsigned long *first_frame) 626 { 627 memset(state, 0, sizeof(*state)); 628 state->task = task; 629 630 if (!orc_init) 631 goto err; 632 633 /* 634 * Refuse to unwind the stack of a task while it's executing on another 635 * CPU. This check is racy, but that's ok: the unwinder has other 636 * checks to prevent it from going off the rails. 637 */ 638 if (task_on_another_cpu(task)) 639 goto err; 640 641 if (regs) { 642 if (user_mode(regs)) 643 goto the_end; 644 645 state->ip = regs->ip; 646 state->sp = regs->sp; 647 state->bp = regs->bp; 648 state->regs = regs; 649 state->full_regs = true; 650 state->signal = true; 651 652 } else if (task == current) { 653 asm volatile("lea (%%rip), %0\n\t" 654 "mov %%rsp, %1\n\t" 655 "mov %%rbp, %2\n\t" 656 : "=r" (state->ip), "=r" (state->sp), 657 "=r" (state->bp)); 658 659 } else { 660 struct inactive_task_frame *frame = (void *)task->thread.sp; 661 662 state->sp = task->thread.sp; 663 state->bp = READ_ONCE_NOCHECK(frame->bp); 664 state->ip = READ_ONCE_NOCHECK(frame->ret_addr); 665 } 666 667 if (get_stack_info((unsigned long *)state->sp, state->task, 668 &state->stack_info, &state->stack_mask)) { 669 /* 670 * We weren't on a valid stack. It's possible that 671 * we overflowed a valid stack into a guard page. 672 * See if the next page up is valid so that we can 673 * generate some kind of backtrace if this happens. 674 */ 675 void *next_page = (void *)PAGE_ALIGN((unsigned long)state->sp); 676 state->error = true; 677 if (get_stack_info(next_page, state->task, &state->stack_info, 678 &state->stack_mask)) 679 return; 680 } 681 682 /* 683 * The caller can provide the address of the first frame directly 684 * (first_frame) or indirectly (regs->sp) to indicate which stack frame 685 * to start unwinding at. Skip ahead until we reach it. 686 */ 687 688 /* When starting from regs, skip the regs frame: */ 689 if (regs) { 690 unwind_next_frame(state); 691 return; 692 } 693 694 /* Otherwise, skip ahead to the user-specified starting frame: */ 695 while (!unwind_done(state) && 696 (!on_stack(&state->stack_info, first_frame, sizeof(long)) || 697 state->sp < (unsigned long)first_frame)) 698 unwind_next_frame(state); 699 700 return; 701 702 err: 703 state->error = true; 704 the_end: 705 state->stack_info.type = STACK_TYPE_UNKNOWN; 706 } 707 EXPORT_SYMBOL_GPL(__unwind_start); 708