1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * Copyright (C) 2015-2017 Josh Poimboeuf <jpoimboe@redhat.com> 4 */ 5 6 #include <string.h> 7 #include <stdlib.h> 8 #include <inttypes.h> 9 #include <sys/mman.h> 10 11 #include <arch/elf.h> 12 #include <objtool/builtin.h> 13 #include <objtool/cfi.h> 14 #include <objtool/arch.h> 15 #include <objtool/check.h> 16 #include <objtool/special.h> 17 #include <objtool/warn.h> 18 #include <objtool/endianness.h> 19 20 #include <linux/objtool.h> 21 #include <linux/hashtable.h> 22 #include <linux/kernel.h> 23 #include <linux/static_call_types.h> 24 25 struct alternative { 26 struct list_head list; 27 struct instruction *insn; 28 bool skip_orig; 29 }; 30 31 static unsigned long nr_cfi, nr_cfi_reused, nr_cfi_cache; 32 33 static struct cfi_init_state initial_func_cfi; 34 static struct cfi_state init_cfi; 35 static struct cfi_state func_cfi; 36 37 struct instruction *find_insn(struct objtool_file *file, 38 struct section *sec, unsigned long offset) 39 { 40 struct instruction *insn; 41 42 hash_for_each_possible(file->insn_hash, insn, hash, sec_offset_hash(sec, offset)) { 43 if (insn->sec == sec && insn->offset == offset) 44 return insn; 45 } 46 47 return NULL; 48 } 49 50 static struct instruction *next_insn_same_sec(struct objtool_file *file, 51 struct instruction *insn) 52 { 53 struct instruction *next = list_next_entry(insn, list); 54 55 if (!next || &next->list == &file->insn_list || next->sec != insn->sec) 56 return NULL; 57 58 return next; 59 } 60 61 static struct instruction *next_insn_same_func(struct objtool_file *file, 62 struct instruction *insn) 63 { 64 struct instruction *next = list_next_entry(insn, list); 65 struct symbol *func = insn_func(insn); 66 67 if (!func) 68 return NULL; 69 70 if (&next->list != &file->insn_list && insn_func(next) == func) 71 return next; 72 73 /* Check if we're already in the subfunction: */ 74 if (func == func->cfunc) 75 return NULL; 76 77 /* Move to the subfunction: */ 78 return find_insn(file, func->cfunc->sec, func->cfunc->offset); 79 } 80 81 static struct instruction *prev_insn_same_sym(struct objtool_file *file, 82 struct instruction *insn) 83 { 84 struct instruction *prev = list_prev_entry(insn, list); 85 86 if (&prev->list != &file->insn_list && insn_func(prev) == insn_func(insn)) 87 return prev; 88 89 return NULL; 90 } 91 92 #define func_for_each_insn(file, func, insn) \ 93 for (insn = find_insn(file, func->sec, func->offset); \ 94 insn; \ 95 insn = next_insn_same_func(file, insn)) 96 97 #define sym_for_each_insn(file, sym, insn) \ 98 for (insn = find_insn(file, sym->sec, sym->offset); \ 99 insn && &insn->list != &file->insn_list && \ 100 insn->sec == sym->sec && \ 101 insn->offset < sym->offset + sym->len; \ 102 insn = list_next_entry(insn, list)) 103 104 #define sym_for_each_insn_continue_reverse(file, sym, insn) \ 105 for (insn = list_prev_entry(insn, list); \ 106 &insn->list != &file->insn_list && \ 107 insn->sec == sym->sec && insn->offset >= sym->offset; \ 108 insn = list_prev_entry(insn, list)) 109 110 #define sec_for_each_insn_from(file, insn) \ 111 for (; insn; insn = next_insn_same_sec(file, insn)) 112 113 #define sec_for_each_insn_continue(file, insn) \ 114 for (insn = next_insn_same_sec(file, insn); insn; \ 115 insn = next_insn_same_sec(file, insn)) 116 117 static bool is_jump_table_jump(struct instruction *insn) 118 { 119 struct alt_group *alt_group = insn->alt_group; 120 121 if (insn->jump_table) 122 return true; 123 124 /* Retpoline alternative for a jump table? */ 125 return alt_group && alt_group->orig_group && 126 alt_group->orig_group->first_insn->jump_table; 127 } 128 129 static bool is_sibling_call(struct instruction *insn) 130 { 131 /* 132 * Assume only STT_FUNC calls have jump-tables. 133 */ 134 if (insn_func(insn)) { 135 /* An indirect jump is either a sibling call or a jump to a table. */ 136 if (insn->type == INSN_JUMP_DYNAMIC) 137 return !is_jump_table_jump(insn); 138 } 139 140 /* add_jump_destinations() sets insn->call_dest for sibling calls. */ 141 return (is_static_jump(insn) && insn->call_dest); 142 } 143 144 /* 145 * This checks to see if the given function is a "noreturn" function. 146 * 147 * For global functions which are outside the scope of this object file, we 148 * have to keep a manual list of them. 149 * 150 * For local functions, we have to detect them manually by simply looking for 151 * the lack of a return instruction. 152 */ 153 static bool __dead_end_function(struct objtool_file *file, struct symbol *func, 154 int recursion) 155 { 156 int i; 157 struct instruction *insn; 158 bool empty = true; 159 160 /* 161 * Unfortunately these have to be hard coded because the noreturn 162 * attribute isn't provided in ELF data. Keep 'em sorted. 163 */ 164 static const char * const global_noreturns[] = { 165 "__invalid_creds", 166 "__module_put_and_kthread_exit", 167 "__reiserfs_panic", 168 "__stack_chk_fail", 169 "__ubsan_handle_builtin_unreachable", 170 "cpu_bringup_and_idle", 171 "cpu_startup_entry", 172 "do_exit", 173 "do_group_exit", 174 "do_task_dead", 175 "ex_handler_msr_mce", 176 "fortify_panic", 177 "kthread_complete_and_exit", 178 "kthread_exit", 179 "kunit_try_catch_throw", 180 "lbug_with_loc", 181 "machine_real_restart", 182 "make_task_dead", 183 "panic", 184 "rewind_stack_and_make_dead", 185 "sev_es_terminate", 186 "snp_abort", 187 "stop_this_cpu", 188 "usercopy_abort", 189 "xen_start_kernel", 190 }; 191 192 if (!func) 193 return false; 194 195 if (func->bind == STB_WEAK) 196 return false; 197 198 if (func->bind == STB_GLOBAL) 199 for (i = 0; i < ARRAY_SIZE(global_noreturns); i++) 200 if (!strcmp(func->name, global_noreturns[i])) 201 return true; 202 203 if (!func->len) 204 return false; 205 206 insn = find_insn(file, func->sec, func->offset); 207 if (!insn || !insn_func(insn)) 208 return false; 209 210 func_for_each_insn(file, func, insn) { 211 empty = false; 212 213 if (insn->type == INSN_RETURN) 214 return false; 215 } 216 217 if (empty) 218 return false; 219 220 /* 221 * A function can have a sibling call instead of a return. In that 222 * case, the function's dead-end status depends on whether the target 223 * of the sibling call returns. 224 */ 225 func_for_each_insn(file, func, insn) { 226 if (is_sibling_call(insn)) { 227 struct instruction *dest = insn->jump_dest; 228 229 if (!dest) 230 /* sibling call to another file */ 231 return false; 232 233 /* local sibling call */ 234 if (recursion == 5) { 235 /* 236 * Infinite recursion: two functions have 237 * sibling calls to each other. This is a very 238 * rare case. It means they aren't dead ends. 239 */ 240 return false; 241 } 242 243 return __dead_end_function(file, insn_func(dest), recursion+1); 244 } 245 } 246 247 return true; 248 } 249 250 static bool dead_end_function(struct objtool_file *file, struct symbol *func) 251 { 252 return __dead_end_function(file, func, 0); 253 } 254 255 static void init_cfi_state(struct cfi_state *cfi) 256 { 257 int i; 258 259 for (i = 0; i < CFI_NUM_REGS; i++) { 260 cfi->regs[i].base = CFI_UNDEFINED; 261 cfi->vals[i].base = CFI_UNDEFINED; 262 } 263 cfi->cfa.base = CFI_UNDEFINED; 264 cfi->drap_reg = CFI_UNDEFINED; 265 cfi->drap_offset = -1; 266 } 267 268 static void init_insn_state(struct objtool_file *file, struct insn_state *state, 269 struct section *sec) 270 { 271 memset(state, 0, sizeof(*state)); 272 init_cfi_state(&state->cfi); 273 274 /* 275 * We need the full vmlinux for noinstr validation, otherwise we can 276 * not correctly determine insn->call_dest->sec (external symbols do 277 * not have a section). 278 */ 279 if (opts.link && opts.noinstr && sec) 280 state->noinstr = sec->noinstr; 281 } 282 283 static struct cfi_state *cfi_alloc(void) 284 { 285 struct cfi_state *cfi = calloc(sizeof(struct cfi_state), 1); 286 if (!cfi) { 287 WARN("calloc failed"); 288 exit(1); 289 } 290 nr_cfi++; 291 return cfi; 292 } 293 294 static int cfi_bits; 295 static struct hlist_head *cfi_hash; 296 297 static inline bool cficmp(struct cfi_state *cfi1, struct cfi_state *cfi2) 298 { 299 return memcmp((void *)cfi1 + sizeof(cfi1->hash), 300 (void *)cfi2 + sizeof(cfi2->hash), 301 sizeof(struct cfi_state) - sizeof(struct hlist_node)); 302 } 303 304 static inline u32 cfi_key(struct cfi_state *cfi) 305 { 306 return jhash((void *)cfi + sizeof(cfi->hash), 307 sizeof(*cfi) - sizeof(cfi->hash), 0); 308 } 309 310 static struct cfi_state *cfi_hash_find_or_add(struct cfi_state *cfi) 311 { 312 struct hlist_head *head = &cfi_hash[hash_min(cfi_key(cfi), cfi_bits)]; 313 struct cfi_state *obj; 314 315 hlist_for_each_entry(obj, head, hash) { 316 if (!cficmp(cfi, obj)) { 317 nr_cfi_cache++; 318 return obj; 319 } 320 } 321 322 obj = cfi_alloc(); 323 *obj = *cfi; 324 hlist_add_head(&obj->hash, head); 325 326 return obj; 327 } 328 329 static void cfi_hash_add(struct cfi_state *cfi) 330 { 331 struct hlist_head *head = &cfi_hash[hash_min(cfi_key(cfi), cfi_bits)]; 332 333 hlist_add_head(&cfi->hash, head); 334 } 335 336 static void *cfi_hash_alloc(unsigned long size) 337 { 338 cfi_bits = max(10, ilog2(size)); 339 cfi_hash = mmap(NULL, sizeof(struct hlist_head) << cfi_bits, 340 PROT_READ|PROT_WRITE, 341 MAP_PRIVATE|MAP_ANON, -1, 0); 342 if (cfi_hash == (void *)-1L) { 343 WARN("mmap fail cfi_hash"); 344 cfi_hash = NULL; 345 } else if (opts.stats) { 346 printf("cfi_bits: %d\n", cfi_bits); 347 } 348 349 return cfi_hash; 350 } 351 352 static unsigned long nr_insns; 353 static unsigned long nr_insns_visited; 354 355 /* 356 * Call the arch-specific instruction decoder for all the instructions and add 357 * them to the global instruction list. 358 */ 359 static int decode_instructions(struct objtool_file *file) 360 { 361 struct section *sec; 362 struct symbol *func; 363 unsigned long offset; 364 struct instruction *insn; 365 int ret; 366 367 for_each_sec(file, sec) { 368 369 if (!(sec->sh.sh_flags & SHF_EXECINSTR)) 370 continue; 371 372 if (strcmp(sec->name, ".altinstr_replacement") && 373 strcmp(sec->name, ".altinstr_aux") && 374 strncmp(sec->name, ".discard.", 9)) 375 sec->text = true; 376 377 if (!strcmp(sec->name, ".noinstr.text") || 378 !strcmp(sec->name, ".entry.text") || 379 !strncmp(sec->name, ".text.__x86.", 12)) 380 sec->noinstr = true; 381 382 /* 383 * .init.text code is ran before userspace and thus doesn't 384 * strictly need retpolines, except for modules which are 385 * loaded late, they very much do need retpoline in their 386 * .init.text 387 */ 388 if (!strcmp(sec->name, ".init.text") && !opts.module) 389 sec->init = true; 390 391 for (offset = 0; offset < sec->sh.sh_size; offset += insn->len) { 392 insn = malloc(sizeof(*insn)); 393 if (!insn) { 394 WARN("malloc failed"); 395 return -1; 396 } 397 memset(insn, 0, sizeof(*insn)); 398 INIT_LIST_HEAD(&insn->alts); 399 INIT_LIST_HEAD(&insn->stack_ops); 400 INIT_LIST_HEAD(&insn->call_node); 401 402 insn->sec = sec; 403 insn->offset = offset; 404 405 ret = arch_decode_instruction(file, sec, offset, 406 sec->sh.sh_size - offset, 407 &insn->len, &insn->type, 408 &insn->immediate, 409 &insn->stack_ops); 410 if (ret) 411 goto err; 412 413 /* 414 * By default, "ud2" is a dead end unless otherwise 415 * annotated, because GCC 7 inserts it for certain 416 * divide-by-zero cases. 417 */ 418 if (insn->type == INSN_BUG) 419 insn->dead_end = true; 420 421 hash_add(file->insn_hash, &insn->hash, sec_offset_hash(sec, insn->offset)); 422 list_add_tail(&insn->list, &file->insn_list); 423 nr_insns++; 424 } 425 426 list_for_each_entry(func, &sec->symbol_list, list) { 427 if (func->type != STT_NOTYPE && func->type != STT_FUNC) 428 continue; 429 430 if (func->return_thunk || func->alias != func) 431 continue; 432 433 if (!find_insn(file, sec, func->offset)) { 434 WARN("%s(): can't find starting instruction", 435 func->name); 436 return -1; 437 } 438 439 sym_for_each_insn(file, func, insn) { 440 insn->sym = func; 441 if (func->type == STT_FUNC && 442 insn->type == INSN_ENDBR && 443 list_empty(&insn->call_node)) { 444 if (insn->offset == func->offset) { 445 list_add_tail(&insn->call_node, &file->endbr_list); 446 file->nr_endbr++; 447 } else { 448 file->nr_endbr_int++; 449 } 450 } 451 } 452 } 453 } 454 455 if (opts.stats) 456 printf("nr_insns: %lu\n", nr_insns); 457 458 return 0; 459 460 err: 461 free(insn); 462 return ret; 463 } 464 465 /* 466 * Read the pv_ops[] .data table to find the static initialized values. 467 */ 468 static int add_pv_ops(struct objtool_file *file, const char *symname) 469 { 470 struct symbol *sym, *func; 471 unsigned long off, end; 472 struct reloc *rel; 473 int idx; 474 475 sym = find_symbol_by_name(file->elf, symname); 476 if (!sym) 477 return 0; 478 479 off = sym->offset; 480 end = off + sym->len; 481 for (;;) { 482 rel = find_reloc_by_dest_range(file->elf, sym->sec, off, end - off); 483 if (!rel) 484 break; 485 486 func = rel->sym; 487 if (func->type == STT_SECTION) 488 func = find_symbol_by_offset(rel->sym->sec, rel->addend); 489 490 idx = (rel->offset - sym->offset) / sizeof(unsigned long); 491 492 objtool_pv_add(file, idx, func); 493 494 off = rel->offset + 1; 495 if (off > end) 496 break; 497 } 498 499 return 0; 500 } 501 502 /* 503 * Allocate and initialize file->pv_ops[]. 504 */ 505 static int init_pv_ops(struct objtool_file *file) 506 { 507 static const char *pv_ops_tables[] = { 508 "pv_ops", 509 "xen_cpu_ops", 510 "xen_irq_ops", 511 "xen_mmu_ops", 512 NULL, 513 }; 514 const char *pv_ops; 515 struct symbol *sym; 516 int idx, nr; 517 518 if (!opts.noinstr) 519 return 0; 520 521 file->pv_ops = NULL; 522 523 sym = find_symbol_by_name(file->elf, "pv_ops"); 524 if (!sym) 525 return 0; 526 527 nr = sym->len / sizeof(unsigned long); 528 file->pv_ops = calloc(sizeof(struct pv_state), nr); 529 if (!file->pv_ops) 530 return -1; 531 532 for (idx = 0; idx < nr; idx++) 533 INIT_LIST_HEAD(&file->pv_ops[idx].targets); 534 535 for (idx = 0; (pv_ops = pv_ops_tables[idx]); idx++) 536 add_pv_ops(file, pv_ops); 537 538 return 0; 539 } 540 541 static struct instruction *find_last_insn(struct objtool_file *file, 542 struct section *sec) 543 { 544 struct instruction *insn = NULL; 545 unsigned int offset; 546 unsigned int end = (sec->sh.sh_size > 10) ? sec->sh.sh_size - 10 : 0; 547 548 for (offset = sec->sh.sh_size - 1; offset >= end && !insn; offset--) 549 insn = find_insn(file, sec, offset); 550 551 return insn; 552 } 553 554 /* 555 * Mark "ud2" instructions and manually annotated dead ends. 556 */ 557 static int add_dead_ends(struct objtool_file *file) 558 { 559 struct section *sec; 560 struct reloc *reloc; 561 struct instruction *insn; 562 563 /* 564 * Check for manually annotated dead ends. 565 */ 566 sec = find_section_by_name(file->elf, ".rela.discard.unreachable"); 567 if (!sec) 568 goto reachable; 569 570 list_for_each_entry(reloc, &sec->reloc_list, list) { 571 if (reloc->sym->type != STT_SECTION) { 572 WARN("unexpected relocation symbol type in %s", sec->name); 573 return -1; 574 } 575 insn = find_insn(file, reloc->sym->sec, reloc->addend); 576 if (insn) 577 insn = list_prev_entry(insn, list); 578 else if (reloc->addend == reloc->sym->sec->sh.sh_size) { 579 insn = find_last_insn(file, reloc->sym->sec); 580 if (!insn) { 581 WARN("can't find unreachable insn at %s+0x%" PRIx64, 582 reloc->sym->sec->name, reloc->addend); 583 return -1; 584 } 585 } else { 586 WARN("can't find unreachable insn at %s+0x%" PRIx64, 587 reloc->sym->sec->name, reloc->addend); 588 return -1; 589 } 590 591 insn->dead_end = true; 592 } 593 594 reachable: 595 /* 596 * These manually annotated reachable checks are needed for GCC 4.4, 597 * where the Linux unreachable() macro isn't supported. In that case 598 * GCC doesn't know the "ud2" is fatal, so it generates code as if it's 599 * not a dead end. 600 */ 601 sec = find_section_by_name(file->elf, ".rela.discard.reachable"); 602 if (!sec) 603 return 0; 604 605 list_for_each_entry(reloc, &sec->reloc_list, list) { 606 if (reloc->sym->type != STT_SECTION) { 607 WARN("unexpected relocation symbol type in %s", sec->name); 608 return -1; 609 } 610 insn = find_insn(file, reloc->sym->sec, reloc->addend); 611 if (insn) 612 insn = list_prev_entry(insn, list); 613 else if (reloc->addend == reloc->sym->sec->sh.sh_size) { 614 insn = find_last_insn(file, reloc->sym->sec); 615 if (!insn) { 616 WARN("can't find reachable insn at %s+0x%" PRIx64, 617 reloc->sym->sec->name, reloc->addend); 618 return -1; 619 } 620 } else { 621 WARN("can't find reachable insn at %s+0x%" PRIx64, 622 reloc->sym->sec->name, reloc->addend); 623 return -1; 624 } 625 626 insn->dead_end = false; 627 } 628 629 return 0; 630 } 631 632 static int create_static_call_sections(struct objtool_file *file) 633 { 634 struct section *sec; 635 struct static_call_site *site; 636 struct instruction *insn; 637 struct symbol *key_sym; 638 char *key_name, *tmp; 639 int idx; 640 641 sec = find_section_by_name(file->elf, ".static_call_sites"); 642 if (sec) { 643 INIT_LIST_HEAD(&file->static_call_list); 644 WARN("file already has .static_call_sites section, skipping"); 645 return 0; 646 } 647 648 if (list_empty(&file->static_call_list)) 649 return 0; 650 651 idx = 0; 652 list_for_each_entry(insn, &file->static_call_list, call_node) 653 idx++; 654 655 sec = elf_create_section(file->elf, ".static_call_sites", SHF_WRITE, 656 sizeof(struct static_call_site), idx); 657 if (!sec) 658 return -1; 659 660 idx = 0; 661 list_for_each_entry(insn, &file->static_call_list, call_node) { 662 663 site = (struct static_call_site *)sec->data->d_buf + idx; 664 memset(site, 0, sizeof(struct static_call_site)); 665 666 /* populate reloc for 'addr' */ 667 if (elf_add_reloc_to_insn(file->elf, sec, 668 idx * sizeof(struct static_call_site), 669 R_X86_64_PC32, 670 insn->sec, insn->offset)) 671 return -1; 672 673 /* find key symbol */ 674 key_name = strdup(insn->call_dest->name); 675 if (!key_name) { 676 perror("strdup"); 677 return -1; 678 } 679 if (strncmp(key_name, STATIC_CALL_TRAMP_PREFIX_STR, 680 STATIC_CALL_TRAMP_PREFIX_LEN)) { 681 WARN("static_call: trampoline name malformed: %s", key_name); 682 return -1; 683 } 684 tmp = key_name + STATIC_CALL_TRAMP_PREFIX_LEN - STATIC_CALL_KEY_PREFIX_LEN; 685 memcpy(tmp, STATIC_CALL_KEY_PREFIX_STR, STATIC_CALL_KEY_PREFIX_LEN); 686 687 key_sym = find_symbol_by_name(file->elf, tmp); 688 if (!key_sym) { 689 if (!opts.module) { 690 WARN("static_call: can't find static_call_key symbol: %s", tmp); 691 return -1; 692 } 693 694 /* 695 * For modules(), the key might not be exported, which 696 * means the module can make static calls but isn't 697 * allowed to change them. 698 * 699 * In that case we temporarily set the key to be the 700 * trampoline address. This is fixed up in 701 * static_call_add_module(). 702 */ 703 key_sym = insn->call_dest; 704 } 705 free(key_name); 706 707 /* populate reloc for 'key' */ 708 if (elf_add_reloc(file->elf, sec, 709 idx * sizeof(struct static_call_site) + 4, 710 R_X86_64_PC32, key_sym, 711 is_sibling_call(insn) * STATIC_CALL_SITE_TAIL)) 712 return -1; 713 714 idx++; 715 } 716 717 return 0; 718 } 719 720 static int create_retpoline_sites_sections(struct objtool_file *file) 721 { 722 struct instruction *insn; 723 struct section *sec; 724 int idx; 725 726 sec = find_section_by_name(file->elf, ".retpoline_sites"); 727 if (sec) { 728 WARN("file already has .retpoline_sites, skipping"); 729 return 0; 730 } 731 732 idx = 0; 733 list_for_each_entry(insn, &file->retpoline_call_list, call_node) 734 idx++; 735 736 if (!idx) 737 return 0; 738 739 sec = elf_create_section(file->elf, ".retpoline_sites", 0, 740 sizeof(int), idx); 741 if (!sec) { 742 WARN("elf_create_section: .retpoline_sites"); 743 return -1; 744 } 745 746 idx = 0; 747 list_for_each_entry(insn, &file->retpoline_call_list, call_node) { 748 749 int *site = (int *)sec->data->d_buf + idx; 750 *site = 0; 751 752 if (elf_add_reloc_to_insn(file->elf, sec, 753 idx * sizeof(int), 754 R_X86_64_PC32, 755 insn->sec, insn->offset)) { 756 WARN("elf_add_reloc_to_insn: .retpoline_sites"); 757 return -1; 758 } 759 760 idx++; 761 } 762 763 return 0; 764 } 765 766 static int create_return_sites_sections(struct objtool_file *file) 767 { 768 struct instruction *insn; 769 struct section *sec; 770 int idx; 771 772 sec = find_section_by_name(file->elf, ".return_sites"); 773 if (sec) { 774 WARN("file already has .return_sites, skipping"); 775 return 0; 776 } 777 778 idx = 0; 779 list_for_each_entry(insn, &file->return_thunk_list, call_node) 780 idx++; 781 782 if (!idx) 783 return 0; 784 785 sec = elf_create_section(file->elf, ".return_sites", 0, 786 sizeof(int), idx); 787 if (!sec) { 788 WARN("elf_create_section: .return_sites"); 789 return -1; 790 } 791 792 idx = 0; 793 list_for_each_entry(insn, &file->return_thunk_list, call_node) { 794 795 int *site = (int *)sec->data->d_buf + idx; 796 *site = 0; 797 798 if (elf_add_reloc_to_insn(file->elf, sec, 799 idx * sizeof(int), 800 R_X86_64_PC32, 801 insn->sec, insn->offset)) { 802 WARN("elf_add_reloc_to_insn: .return_sites"); 803 return -1; 804 } 805 806 idx++; 807 } 808 809 return 0; 810 } 811 812 static int create_ibt_endbr_seal_sections(struct objtool_file *file) 813 { 814 struct instruction *insn; 815 struct section *sec; 816 int idx; 817 818 sec = find_section_by_name(file->elf, ".ibt_endbr_seal"); 819 if (sec) { 820 WARN("file already has .ibt_endbr_seal, skipping"); 821 return 0; 822 } 823 824 idx = 0; 825 list_for_each_entry(insn, &file->endbr_list, call_node) 826 idx++; 827 828 if (opts.stats) { 829 printf("ibt: ENDBR at function start: %d\n", file->nr_endbr); 830 printf("ibt: ENDBR inside functions: %d\n", file->nr_endbr_int); 831 printf("ibt: superfluous ENDBR: %d\n", idx); 832 } 833 834 if (!idx) 835 return 0; 836 837 sec = elf_create_section(file->elf, ".ibt_endbr_seal", 0, 838 sizeof(int), idx); 839 if (!sec) { 840 WARN("elf_create_section: .ibt_endbr_seal"); 841 return -1; 842 } 843 844 idx = 0; 845 list_for_each_entry(insn, &file->endbr_list, call_node) { 846 847 int *site = (int *)sec->data->d_buf + idx; 848 *site = 0; 849 850 if (elf_add_reloc_to_insn(file->elf, sec, 851 idx * sizeof(int), 852 R_X86_64_PC32, 853 insn->sec, insn->offset)) { 854 WARN("elf_add_reloc_to_insn: .ibt_endbr_seal"); 855 return -1; 856 } 857 858 idx++; 859 } 860 861 return 0; 862 } 863 864 static int create_cfi_sections(struct objtool_file *file) 865 { 866 struct section *sec, *s; 867 struct symbol *sym; 868 unsigned int *loc; 869 int idx; 870 871 sec = find_section_by_name(file->elf, ".cfi_sites"); 872 if (sec) { 873 INIT_LIST_HEAD(&file->call_list); 874 WARN("file already has .cfi_sites section, skipping"); 875 return 0; 876 } 877 878 idx = 0; 879 for_each_sec(file, s) { 880 if (!s->text) 881 continue; 882 883 list_for_each_entry(sym, &s->symbol_list, list) { 884 if (sym->type != STT_FUNC) 885 continue; 886 887 if (strncmp(sym->name, "__cfi_", 6)) 888 continue; 889 890 idx++; 891 } 892 } 893 894 sec = elf_create_section(file->elf, ".cfi_sites", 0, sizeof(unsigned int), idx); 895 if (!sec) 896 return -1; 897 898 idx = 0; 899 for_each_sec(file, s) { 900 if (!s->text) 901 continue; 902 903 list_for_each_entry(sym, &s->symbol_list, list) { 904 if (sym->type != STT_FUNC) 905 continue; 906 907 if (strncmp(sym->name, "__cfi_", 6)) 908 continue; 909 910 loc = (unsigned int *)sec->data->d_buf + idx; 911 memset(loc, 0, sizeof(unsigned int)); 912 913 if (elf_add_reloc_to_insn(file->elf, sec, 914 idx * sizeof(unsigned int), 915 R_X86_64_PC32, 916 s, sym->offset)) 917 return -1; 918 919 idx++; 920 } 921 } 922 923 return 0; 924 } 925 926 static int create_mcount_loc_sections(struct objtool_file *file) 927 { 928 int addrsize = elf_class_addrsize(file->elf); 929 struct instruction *insn; 930 struct section *sec; 931 int idx; 932 933 sec = find_section_by_name(file->elf, "__mcount_loc"); 934 if (sec) { 935 INIT_LIST_HEAD(&file->mcount_loc_list); 936 WARN("file already has __mcount_loc section, skipping"); 937 return 0; 938 } 939 940 if (list_empty(&file->mcount_loc_list)) 941 return 0; 942 943 idx = 0; 944 list_for_each_entry(insn, &file->mcount_loc_list, call_node) 945 idx++; 946 947 sec = elf_create_section(file->elf, "__mcount_loc", 0, addrsize, idx); 948 if (!sec) 949 return -1; 950 951 sec->sh.sh_addralign = addrsize; 952 953 idx = 0; 954 list_for_each_entry(insn, &file->mcount_loc_list, call_node) { 955 void *loc; 956 957 loc = sec->data->d_buf + idx; 958 memset(loc, 0, addrsize); 959 960 if (elf_add_reloc_to_insn(file->elf, sec, idx, 961 addrsize == sizeof(u64) ? R_ABS64 : R_ABS32, 962 insn->sec, insn->offset)) 963 return -1; 964 965 idx += addrsize; 966 } 967 968 return 0; 969 } 970 971 static int create_direct_call_sections(struct objtool_file *file) 972 { 973 struct instruction *insn; 974 struct section *sec; 975 unsigned int *loc; 976 int idx; 977 978 sec = find_section_by_name(file->elf, ".call_sites"); 979 if (sec) { 980 INIT_LIST_HEAD(&file->call_list); 981 WARN("file already has .call_sites section, skipping"); 982 return 0; 983 } 984 985 if (list_empty(&file->call_list)) 986 return 0; 987 988 idx = 0; 989 list_for_each_entry(insn, &file->call_list, call_node) 990 idx++; 991 992 sec = elf_create_section(file->elf, ".call_sites", 0, sizeof(unsigned int), idx); 993 if (!sec) 994 return -1; 995 996 idx = 0; 997 list_for_each_entry(insn, &file->call_list, call_node) { 998 999 loc = (unsigned int *)sec->data->d_buf + idx; 1000 memset(loc, 0, sizeof(unsigned int)); 1001 1002 if (elf_add_reloc_to_insn(file->elf, sec, 1003 idx * sizeof(unsigned int), 1004 R_X86_64_PC32, 1005 insn->sec, insn->offset)) 1006 return -1; 1007 1008 idx++; 1009 } 1010 1011 return 0; 1012 } 1013 1014 /* 1015 * Warnings shouldn't be reported for ignored functions. 1016 */ 1017 static void add_ignores(struct objtool_file *file) 1018 { 1019 struct instruction *insn; 1020 struct section *sec; 1021 struct symbol *func; 1022 struct reloc *reloc; 1023 1024 sec = find_section_by_name(file->elf, ".rela.discard.func_stack_frame_non_standard"); 1025 if (!sec) 1026 return; 1027 1028 list_for_each_entry(reloc, &sec->reloc_list, list) { 1029 switch (reloc->sym->type) { 1030 case STT_FUNC: 1031 func = reloc->sym; 1032 break; 1033 1034 case STT_SECTION: 1035 func = find_func_by_offset(reloc->sym->sec, reloc->addend); 1036 if (!func) 1037 continue; 1038 break; 1039 1040 default: 1041 WARN("unexpected relocation symbol type in %s: %d", sec->name, reloc->sym->type); 1042 continue; 1043 } 1044 1045 func_for_each_insn(file, func, insn) 1046 insn->ignore = true; 1047 } 1048 } 1049 1050 /* 1051 * This is a whitelist of functions that is allowed to be called with AC set. 1052 * The list is meant to be minimal and only contains compiler instrumentation 1053 * ABI and a few functions used to implement *_{to,from}_user() functions. 1054 * 1055 * These functions must not directly change AC, but may PUSHF/POPF. 1056 */ 1057 static const char *uaccess_safe_builtin[] = { 1058 /* KASAN */ 1059 "kasan_report", 1060 "kasan_check_range", 1061 /* KASAN out-of-line */ 1062 "__asan_loadN_noabort", 1063 "__asan_load1_noabort", 1064 "__asan_load2_noabort", 1065 "__asan_load4_noabort", 1066 "__asan_load8_noabort", 1067 "__asan_load16_noabort", 1068 "__asan_storeN_noabort", 1069 "__asan_store1_noabort", 1070 "__asan_store2_noabort", 1071 "__asan_store4_noabort", 1072 "__asan_store8_noabort", 1073 "__asan_store16_noabort", 1074 "__kasan_check_read", 1075 "__kasan_check_write", 1076 /* KASAN in-line */ 1077 "__asan_report_load_n_noabort", 1078 "__asan_report_load1_noabort", 1079 "__asan_report_load2_noabort", 1080 "__asan_report_load4_noabort", 1081 "__asan_report_load8_noabort", 1082 "__asan_report_load16_noabort", 1083 "__asan_report_store_n_noabort", 1084 "__asan_report_store1_noabort", 1085 "__asan_report_store2_noabort", 1086 "__asan_report_store4_noabort", 1087 "__asan_report_store8_noabort", 1088 "__asan_report_store16_noabort", 1089 /* KCSAN */ 1090 "__kcsan_check_access", 1091 "__kcsan_mb", 1092 "__kcsan_wmb", 1093 "__kcsan_rmb", 1094 "__kcsan_release", 1095 "kcsan_found_watchpoint", 1096 "kcsan_setup_watchpoint", 1097 "kcsan_check_scoped_accesses", 1098 "kcsan_disable_current", 1099 "kcsan_enable_current_nowarn", 1100 /* KCSAN/TSAN */ 1101 "__tsan_func_entry", 1102 "__tsan_func_exit", 1103 "__tsan_read_range", 1104 "__tsan_write_range", 1105 "__tsan_read1", 1106 "__tsan_read2", 1107 "__tsan_read4", 1108 "__tsan_read8", 1109 "__tsan_read16", 1110 "__tsan_write1", 1111 "__tsan_write2", 1112 "__tsan_write4", 1113 "__tsan_write8", 1114 "__tsan_write16", 1115 "__tsan_read_write1", 1116 "__tsan_read_write2", 1117 "__tsan_read_write4", 1118 "__tsan_read_write8", 1119 "__tsan_read_write16", 1120 "__tsan_volatile_read1", 1121 "__tsan_volatile_read2", 1122 "__tsan_volatile_read4", 1123 "__tsan_volatile_read8", 1124 "__tsan_volatile_read16", 1125 "__tsan_volatile_write1", 1126 "__tsan_volatile_write2", 1127 "__tsan_volatile_write4", 1128 "__tsan_volatile_write8", 1129 "__tsan_volatile_write16", 1130 "__tsan_atomic8_load", 1131 "__tsan_atomic16_load", 1132 "__tsan_atomic32_load", 1133 "__tsan_atomic64_load", 1134 "__tsan_atomic8_store", 1135 "__tsan_atomic16_store", 1136 "__tsan_atomic32_store", 1137 "__tsan_atomic64_store", 1138 "__tsan_atomic8_exchange", 1139 "__tsan_atomic16_exchange", 1140 "__tsan_atomic32_exchange", 1141 "__tsan_atomic64_exchange", 1142 "__tsan_atomic8_fetch_add", 1143 "__tsan_atomic16_fetch_add", 1144 "__tsan_atomic32_fetch_add", 1145 "__tsan_atomic64_fetch_add", 1146 "__tsan_atomic8_fetch_sub", 1147 "__tsan_atomic16_fetch_sub", 1148 "__tsan_atomic32_fetch_sub", 1149 "__tsan_atomic64_fetch_sub", 1150 "__tsan_atomic8_fetch_and", 1151 "__tsan_atomic16_fetch_and", 1152 "__tsan_atomic32_fetch_and", 1153 "__tsan_atomic64_fetch_and", 1154 "__tsan_atomic8_fetch_or", 1155 "__tsan_atomic16_fetch_or", 1156 "__tsan_atomic32_fetch_or", 1157 "__tsan_atomic64_fetch_or", 1158 "__tsan_atomic8_fetch_xor", 1159 "__tsan_atomic16_fetch_xor", 1160 "__tsan_atomic32_fetch_xor", 1161 "__tsan_atomic64_fetch_xor", 1162 "__tsan_atomic8_fetch_nand", 1163 "__tsan_atomic16_fetch_nand", 1164 "__tsan_atomic32_fetch_nand", 1165 "__tsan_atomic64_fetch_nand", 1166 "__tsan_atomic8_compare_exchange_strong", 1167 "__tsan_atomic16_compare_exchange_strong", 1168 "__tsan_atomic32_compare_exchange_strong", 1169 "__tsan_atomic64_compare_exchange_strong", 1170 "__tsan_atomic8_compare_exchange_weak", 1171 "__tsan_atomic16_compare_exchange_weak", 1172 "__tsan_atomic32_compare_exchange_weak", 1173 "__tsan_atomic64_compare_exchange_weak", 1174 "__tsan_atomic8_compare_exchange_val", 1175 "__tsan_atomic16_compare_exchange_val", 1176 "__tsan_atomic32_compare_exchange_val", 1177 "__tsan_atomic64_compare_exchange_val", 1178 "__tsan_atomic_thread_fence", 1179 "__tsan_atomic_signal_fence", 1180 /* KCOV */ 1181 "write_comp_data", 1182 "check_kcov_mode", 1183 "__sanitizer_cov_trace_pc", 1184 "__sanitizer_cov_trace_const_cmp1", 1185 "__sanitizer_cov_trace_const_cmp2", 1186 "__sanitizer_cov_trace_const_cmp4", 1187 "__sanitizer_cov_trace_const_cmp8", 1188 "__sanitizer_cov_trace_cmp1", 1189 "__sanitizer_cov_trace_cmp2", 1190 "__sanitizer_cov_trace_cmp4", 1191 "__sanitizer_cov_trace_cmp8", 1192 "__sanitizer_cov_trace_switch", 1193 /* KMSAN */ 1194 "kmsan_copy_to_user", 1195 "kmsan_report", 1196 "kmsan_unpoison_entry_regs", 1197 "kmsan_unpoison_memory", 1198 "__msan_chain_origin", 1199 "__msan_get_context_state", 1200 "__msan_instrument_asm_store", 1201 "__msan_metadata_ptr_for_load_1", 1202 "__msan_metadata_ptr_for_load_2", 1203 "__msan_metadata_ptr_for_load_4", 1204 "__msan_metadata_ptr_for_load_8", 1205 "__msan_metadata_ptr_for_load_n", 1206 "__msan_metadata_ptr_for_store_1", 1207 "__msan_metadata_ptr_for_store_2", 1208 "__msan_metadata_ptr_for_store_4", 1209 "__msan_metadata_ptr_for_store_8", 1210 "__msan_metadata_ptr_for_store_n", 1211 "__msan_poison_alloca", 1212 "__msan_warning", 1213 /* UBSAN */ 1214 "ubsan_type_mismatch_common", 1215 "__ubsan_handle_type_mismatch", 1216 "__ubsan_handle_type_mismatch_v1", 1217 "__ubsan_handle_shift_out_of_bounds", 1218 /* misc */ 1219 "csum_partial_copy_generic", 1220 "copy_mc_fragile", 1221 "copy_mc_fragile_handle_tail", 1222 "copy_mc_enhanced_fast_string", 1223 "ftrace_likely_update", /* CONFIG_TRACE_BRANCH_PROFILING */ 1224 "clear_user_erms", 1225 "clear_user_rep_good", 1226 "clear_user_original", 1227 NULL 1228 }; 1229 1230 static void add_uaccess_safe(struct objtool_file *file) 1231 { 1232 struct symbol *func; 1233 const char **name; 1234 1235 if (!opts.uaccess) 1236 return; 1237 1238 for (name = uaccess_safe_builtin; *name; name++) { 1239 func = find_symbol_by_name(file->elf, *name); 1240 if (!func) 1241 continue; 1242 1243 func->uaccess_safe = true; 1244 } 1245 } 1246 1247 /* 1248 * FIXME: For now, just ignore any alternatives which add retpolines. This is 1249 * a temporary hack, as it doesn't allow ORC to unwind from inside a retpoline. 1250 * But it at least allows objtool to understand the control flow *around* the 1251 * retpoline. 1252 */ 1253 static int add_ignore_alternatives(struct objtool_file *file) 1254 { 1255 struct section *sec; 1256 struct reloc *reloc; 1257 struct instruction *insn; 1258 1259 sec = find_section_by_name(file->elf, ".rela.discard.ignore_alts"); 1260 if (!sec) 1261 return 0; 1262 1263 list_for_each_entry(reloc, &sec->reloc_list, list) { 1264 if (reloc->sym->type != STT_SECTION) { 1265 WARN("unexpected relocation symbol type in %s", sec->name); 1266 return -1; 1267 } 1268 1269 insn = find_insn(file, reloc->sym->sec, reloc->addend); 1270 if (!insn) { 1271 WARN("bad .discard.ignore_alts entry"); 1272 return -1; 1273 } 1274 1275 insn->ignore_alts = true; 1276 } 1277 1278 return 0; 1279 } 1280 1281 __weak bool arch_is_retpoline(struct symbol *sym) 1282 { 1283 return false; 1284 } 1285 1286 __weak bool arch_is_rethunk(struct symbol *sym) 1287 { 1288 return false; 1289 } 1290 1291 #define NEGATIVE_RELOC ((void *)-1L) 1292 1293 static struct reloc *insn_reloc(struct objtool_file *file, struct instruction *insn) 1294 { 1295 if (insn->reloc == NEGATIVE_RELOC) 1296 return NULL; 1297 1298 if (!insn->reloc) { 1299 if (!file) 1300 return NULL; 1301 1302 insn->reloc = find_reloc_by_dest_range(file->elf, insn->sec, 1303 insn->offset, insn->len); 1304 if (!insn->reloc) { 1305 insn->reloc = NEGATIVE_RELOC; 1306 return NULL; 1307 } 1308 } 1309 1310 return insn->reloc; 1311 } 1312 1313 static void remove_insn_ops(struct instruction *insn) 1314 { 1315 struct stack_op *op, *tmp; 1316 1317 list_for_each_entry_safe(op, tmp, &insn->stack_ops, list) { 1318 list_del(&op->list); 1319 free(op); 1320 } 1321 } 1322 1323 static void annotate_call_site(struct objtool_file *file, 1324 struct instruction *insn, bool sibling) 1325 { 1326 struct reloc *reloc = insn_reloc(file, insn); 1327 struct symbol *sym = insn->call_dest; 1328 1329 if (!sym) 1330 sym = reloc->sym; 1331 1332 /* 1333 * Alternative replacement code is just template code which is 1334 * sometimes copied to the original instruction. For now, don't 1335 * annotate it. (In the future we might consider annotating the 1336 * original instruction if/when it ever makes sense to do so.) 1337 */ 1338 if (!strcmp(insn->sec->name, ".altinstr_replacement")) 1339 return; 1340 1341 if (sym->static_call_tramp) { 1342 list_add_tail(&insn->call_node, &file->static_call_list); 1343 return; 1344 } 1345 1346 if (sym->retpoline_thunk) { 1347 list_add_tail(&insn->call_node, &file->retpoline_call_list); 1348 return; 1349 } 1350 1351 /* 1352 * Many compilers cannot disable KCOV or sanitizer calls with a function 1353 * attribute so they need a little help, NOP out any such calls from 1354 * noinstr text. 1355 */ 1356 if (opts.hack_noinstr && insn->sec->noinstr && sym->profiling_func) { 1357 if (reloc) { 1358 reloc->type = R_NONE; 1359 elf_write_reloc(file->elf, reloc); 1360 } 1361 1362 elf_write_insn(file->elf, insn->sec, 1363 insn->offset, insn->len, 1364 sibling ? arch_ret_insn(insn->len) 1365 : arch_nop_insn(insn->len)); 1366 1367 insn->type = sibling ? INSN_RETURN : INSN_NOP; 1368 1369 if (sibling) { 1370 /* 1371 * We've replaced the tail-call JMP insn by two new 1372 * insn: RET; INT3, except we only have a single struct 1373 * insn here. Mark it retpoline_safe to avoid the SLS 1374 * warning, instead of adding another insn. 1375 */ 1376 insn->retpoline_safe = true; 1377 } 1378 1379 return; 1380 } 1381 1382 if (opts.mcount && sym->fentry) { 1383 if (sibling) 1384 WARN_FUNC("Tail call to __fentry__ !?!?", insn->sec, insn->offset); 1385 if (opts.mnop) { 1386 if (reloc) { 1387 reloc->type = R_NONE; 1388 elf_write_reloc(file->elf, reloc); 1389 } 1390 1391 elf_write_insn(file->elf, insn->sec, 1392 insn->offset, insn->len, 1393 arch_nop_insn(insn->len)); 1394 1395 insn->type = INSN_NOP; 1396 } 1397 1398 list_add_tail(&insn->call_node, &file->mcount_loc_list); 1399 return; 1400 } 1401 1402 if (insn->type == INSN_CALL && !insn->sec->init) 1403 list_add_tail(&insn->call_node, &file->call_list); 1404 1405 if (!sibling && dead_end_function(file, sym)) 1406 insn->dead_end = true; 1407 } 1408 1409 static void add_call_dest(struct objtool_file *file, struct instruction *insn, 1410 struct symbol *dest, bool sibling) 1411 { 1412 insn->call_dest = dest; 1413 if (!dest) 1414 return; 1415 1416 /* 1417 * Whatever stack impact regular CALLs have, should be undone 1418 * by the RETURN of the called function. 1419 * 1420 * Annotated intra-function calls retain the stack_ops but 1421 * are converted to JUMP, see read_intra_function_calls(). 1422 */ 1423 remove_insn_ops(insn); 1424 1425 annotate_call_site(file, insn, sibling); 1426 } 1427 1428 static void add_retpoline_call(struct objtool_file *file, struct instruction *insn) 1429 { 1430 /* 1431 * Retpoline calls/jumps are really dynamic calls/jumps in disguise, 1432 * so convert them accordingly. 1433 */ 1434 switch (insn->type) { 1435 case INSN_CALL: 1436 insn->type = INSN_CALL_DYNAMIC; 1437 break; 1438 case INSN_JUMP_UNCONDITIONAL: 1439 insn->type = INSN_JUMP_DYNAMIC; 1440 break; 1441 case INSN_JUMP_CONDITIONAL: 1442 insn->type = INSN_JUMP_DYNAMIC_CONDITIONAL; 1443 break; 1444 default: 1445 return; 1446 } 1447 1448 insn->retpoline_safe = true; 1449 1450 /* 1451 * Whatever stack impact regular CALLs have, should be undone 1452 * by the RETURN of the called function. 1453 * 1454 * Annotated intra-function calls retain the stack_ops but 1455 * are converted to JUMP, see read_intra_function_calls(). 1456 */ 1457 remove_insn_ops(insn); 1458 1459 annotate_call_site(file, insn, false); 1460 } 1461 1462 static void add_return_call(struct objtool_file *file, struct instruction *insn, bool add) 1463 { 1464 /* 1465 * Return thunk tail calls are really just returns in disguise, 1466 * so convert them accordingly. 1467 */ 1468 insn->type = INSN_RETURN; 1469 insn->retpoline_safe = true; 1470 1471 if (add) 1472 list_add_tail(&insn->call_node, &file->return_thunk_list); 1473 } 1474 1475 static bool is_first_func_insn(struct objtool_file *file, 1476 struct instruction *insn, struct symbol *sym) 1477 { 1478 if (insn->offset == sym->offset) 1479 return true; 1480 1481 /* Allow direct CALL/JMP past ENDBR */ 1482 if (opts.ibt) { 1483 struct instruction *prev = prev_insn_same_sym(file, insn); 1484 1485 if (prev && prev->type == INSN_ENDBR && 1486 insn->offset == sym->offset + prev->len) 1487 return true; 1488 } 1489 1490 return false; 1491 } 1492 1493 /* 1494 * A sibling call is a tail-call to another symbol -- to differentiate from a 1495 * recursive tail-call which is to the same symbol. 1496 */ 1497 static bool jump_is_sibling_call(struct objtool_file *file, 1498 struct instruction *from, struct instruction *to) 1499 { 1500 struct symbol *fs = from->sym; 1501 struct symbol *ts = to->sym; 1502 1503 /* Not a sibling call if from/to a symbol hole */ 1504 if (!fs || !ts) 1505 return false; 1506 1507 /* Not a sibling call if not targeting the start of a symbol. */ 1508 if (!is_first_func_insn(file, to, ts)) 1509 return false; 1510 1511 /* Disallow sibling calls into STT_NOTYPE */ 1512 if (ts->type == STT_NOTYPE) 1513 return false; 1514 1515 /* Must not be self to be a sibling */ 1516 return fs->pfunc != ts->pfunc; 1517 } 1518 1519 /* 1520 * Find the destination instructions for all jumps. 1521 */ 1522 static int add_jump_destinations(struct objtool_file *file) 1523 { 1524 struct instruction *insn, *jump_dest; 1525 struct reloc *reloc; 1526 struct section *dest_sec; 1527 unsigned long dest_off; 1528 1529 for_each_insn(file, insn) { 1530 if (insn->jump_dest) { 1531 /* 1532 * handle_group_alt() may have previously set 1533 * 'jump_dest' for some alternatives. 1534 */ 1535 continue; 1536 } 1537 if (!is_static_jump(insn)) 1538 continue; 1539 1540 reloc = insn_reloc(file, insn); 1541 if (!reloc) { 1542 dest_sec = insn->sec; 1543 dest_off = arch_jump_destination(insn); 1544 } else if (reloc->sym->type == STT_SECTION) { 1545 dest_sec = reloc->sym->sec; 1546 dest_off = arch_dest_reloc_offset(reloc->addend); 1547 } else if (reloc->sym->retpoline_thunk) { 1548 add_retpoline_call(file, insn); 1549 continue; 1550 } else if (reloc->sym->return_thunk) { 1551 add_return_call(file, insn, true); 1552 continue; 1553 } else if (insn_func(insn)) { 1554 /* 1555 * External sibling call or internal sibling call with 1556 * STT_FUNC reloc. 1557 */ 1558 add_call_dest(file, insn, reloc->sym, true); 1559 continue; 1560 } else if (reloc->sym->sec->idx) { 1561 dest_sec = reloc->sym->sec; 1562 dest_off = reloc->sym->sym.st_value + 1563 arch_dest_reloc_offset(reloc->addend); 1564 } else { 1565 /* non-func asm code jumping to another file */ 1566 continue; 1567 } 1568 1569 jump_dest = find_insn(file, dest_sec, dest_off); 1570 if (!jump_dest) { 1571 struct symbol *sym = find_symbol_by_offset(dest_sec, dest_off); 1572 1573 /* 1574 * This is a special case for zen_untrain_ret(). 1575 * It jumps to __x86_return_thunk(), but objtool 1576 * can't find the thunk's starting RET 1577 * instruction, because the RET is also in the 1578 * middle of another instruction. Objtool only 1579 * knows about the outer instruction. 1580 */ 1581 if (sym && sym->return_thunk) { 1582 add_return_call(file, insn, false); 1583 continue; 1584 } 1585 1586 WARN_FUNC("can't find jump dest instruction at %s+0x%lx", 1587 insn->sec, insn->offset, dest_sec->name, 1588 dest_off); 1589 return -1; 1590 } 1591 1592 /* 1593 * Cross-function jump. 1594 */ 1595 if (insn_func(insn) && insn_func(jump_dest) && 1596 insn_func(insn) != insn_func(jump_dest)) { 1597 1598 /* 1599 * For GCC 8+, create parent/child links for any cold 1600 * subfunctions. This is _mostly_ redundant with a 1601 * similar initialization in read_symbols(). 1602 * 1603 * If a function has aliases, we want the *first* such 1604 * function in the symbol table to be the subfunction's 1605 * parent. In that case we overwrite the 1606 * initialization done in read_symbols(). 1607 * 1608 * However this code can't completely replace the 1609 * read_symbols() code because this doesn't detect the 1610 * case where the parent function's only reference to a 1611 * subfunction is through a jump table. 1612 */ 1613 if (!strstr(insn_func(insn)->name, ".cold") && 1614 strstr(insn_func(jump_dest)->name, ".cold")) { 1615 insn_func(insn)->cfunc = insn_func(jump_dest); 1616 insn_func(jump_dest)->pfunc = insn_func(insn); 1617 } 1618 } 1619 1620 if (jump_is_sibling_call(file, insn, jump_dest)) { 1621 /* 1622 * Internal sibling call without reloc or with 1623 * STT_SECTION reloc. 1624 */ 1625 add_call_dest(file, insn, insn_func(jump_dest), true); 1626 continue; 1627 } 1628 1629 insn->jump_dest = jump_dest; 1630 } 1631 1632 return 0; 1633 } 1634 1635 static struct symbol *find_call_destination(struct section *sec, unsigned long offset) 1636 { 1637 struct symbol *call_dest; 1638 1639 call_dest = find_func_by_offset(sec, offset); 1640 if (!call_dest) 1641 call_dest = find_symbol_by_offset(sec, offset); 1642 1643 return call_dest; 1644 } 1645 1646 /* 1647 * Find the destination instructions for all calls. 1648 */ 1649 static int add_call_destinations(struct objtool_file *file) 1650 { 1651 struct instruction *insn; 1652 unsigned long dest_off; 1653 struct symbol *dest; 1654 struct reloc *reloc; 1655 1656 for_each_insn(file, insn) { 1657 if (insn->type != INSN_CALL) 1658 continue; 1659 1660 reloc = insn_reloc(file, insn); 1661 if (!reloc) { 1662 dest_off = arch_jump_destination(insn); 1663 dest = find_call_destination(insn->sec, dest_off); 1664 1665 add_call_dest(file, insn, dest, false); 1666 1667 if (insn->ignore) 1668 continue; 1669 1670 if (!insn->call_dest) { 1671 WARN_FUNC("unannotated intra-function call", insn->sec, insn->offset); 1672 return -1; 1673 } 1674 1675 if (insn_func(insn) && insn->call_dest->type != STT_FUNC) { 1676 WARN_FUNC("unsupported call to non-function", 1677 insn->sec, insn->offset); 1678 return -1; 1679 } 1680 1681 } else if (reloc->sym->type == STT_SECTION) { 1682 dest_off = arch_dest_reloc_offset(reloc->addend); 1683 dest = find_call_destination(reloc->sym->sec, dest_off); 1684 if (!dest) { 1685 WARN_FUNC("can't find call dest symbol at %s+0x%lx", 1686 insn->sec, insn->offset, 1687 reloc->sym->sec->name, 1688 dest_off); 1689 return -1; 1690 } 1691 1692 add_call_dest(file, insn, dest, false); 1693 1694 } else if (reloc->sym->retpoline_thunk) { 1695 add_retpoline_call(file, insn); 1696 1697 } else 1698 add_call_dest(file, insn, reloc->sym, false); 1699 } 1700 1701 return 0; 1702 } 1703 1704 /* 1705 * The .alternatives section requires some extra special care over and above 1706 * other special sections because alternatives are patched in place. 1707 */ 1708 static int handle_group_alt(struct objtool_file *file, 1709 struct special_alt *special_alt, 1710 struct instruction *orig_insn, 1711 struct instruction **new_insn) 1712 { 1713 struct instruction *last_orig_insn, *last_new_insn = NULL, *insn, *nop = NULL; 1714 struct alt_group *orig_alt_group, *new_alt_group; 1715 unsigned long dest_off; 1716 1717 1718 orig_alt_group = malloc(sizeof(*orig_alt_group)); 1719 if (!orig_alt_group) { 1720 WARN("malloc failed"); 1721 return -1; 1722 } 1723 orig_alt_group->cfi = calloc(special_alt->orig_len, 1724 sizeof(struct cfi_state *)); 1725 if (!orig_alt_group->cfi) { 1726 WARN("calloc failed"); 1727 return -1; 1728 } 1729 1730 last_orig_insn = NULL; 1731 insn = orig_insn; 1732 sec_for_each_insn_from(file, insn) { 1733 if (insn->offset >= special_alt->orig_off + special_alt->orig_len) 1734 break; 1735 1736 insn->alt_group = orig_alt_group; 1737 last_orig_insn = insn; 1738 } 1739 orig_alt_group->orig_group = NULL; 1740 orig_alt_group->first_insn = orig_insn; 1741 orig_alt_group->last_insn = last_orig_insn; 1742 1743 1744 new_alt_group = malloc(sizeof(*new_alt_group)); 1745 if (!new_alt_group) { 1746 WARN("malloc failed"); 1747 return -1; 1748 } 1749 1750 if (special_alt->new_len < special_alt->orig_len) { 1751 /* 1752 * Insert a fake nop at the end to make the replacement 1753 * alt_group the same size as the original. This is needed to 1754 * allow propagate_alt_cfi() to do its magic. When the last 1755 * instruction affects the stack, the instruction after it (the 1756 * nop) will propagate the new state to the shared CFI array. 1757 */ 1758 nop = malloc(sizeof(*nop)); 1759 if (!nop) { 1760 WARN("malloc failed"); 1761 return -1; 1762 } 1763 memset(nop, 0, sizeof(*nop)); 1764 INIT_LIST_HEAD(&nop->alts); 1765 INIT_LIST_HEAD(&nop->stack_ops); 1766 1767 nop->sec = special_alt->new_sec; 1768 nop->offset = special_alt->new_off + special_alt->new_len; 1769 nop->len = special_alt->orig_len - special_alt->new_len; 1770 nop->type = INSN_NOP; 1771 nop->sym = orig_insn->sym; 1772 nop->alt_group = new_alt_group; 1773 nop->ignore = orig_insn->ignore_alts; 1774 } 1775 1776 if (!special_alt->new_len) { 1777 *new_insn = nop; 1778 goto end; 1779 } 1780 1781 insn = *new_insn; 1782 sec_for_each_insn_from(file, insn) { 1783 struct reloc *alt_reloc; 1784 1785 if (insn->offset >= special_alt->new_off + special_alt->new_len) 1786 break; 1787 1788 last_new_insn = insn; 1789 1790 insn->ignore = orig_insn->ignore_alts; 1791 insn->sym = orig_insn->sym; 1792 insn->alt_group = new_alt_group; 1793 1794 /* 1795 * Since alternative replacement code is copy/pasted by the 1796 * kernel after applying relocations, generally such code can't 1797 * have relative-address relocation references to outside the 1798 * .altinstr_replacement section, unless the arch's 1799 * alternatives code can adjust the relative offsets 1800 * accordingly. 1801 */ 1802 alt_reloc = insn_reloc(file, insn); 1803 if (alt_reloc && arch_pc_relative_reloc(alt_reloc) && 1804 !arch_support_alt_relocation(special_alt, insn, alt_reloc)) { 1805 1806 WARN_FUNC("unsupported relocation in alternatives section", 1807 insn->sec, insn->offset); 1808 return -1; 1809 } 1810 1811 if (!is_static_jump(insn)) 1812 continue; 1813 1814 if (!insn->immediate) 1815 continue; 1816 1817 dest_off = arch_jump_destination(insn); 1818 if (dest_off == special_alt->new_off + special_alt->new_len) { 1819 insn->jump_dest = next_insn_same_sec(file, last_orig_insn); 1820 if (!insn->jump_dest) { 1821 WARN_FUNC("can't find alternative jump destination", 1822 insn->sec, insn->offset); 1823 return -1; 1824 } 1825 } 1826 } 1827 1828 if (!last_new_insn) { 1829 WARN_FUNC("can't find last new alternative instruction", 1830 special_alt->new_sec, special_alt->new_off); 1831 return -1; 1832 } 1833 1834 if (nop) 1835 list_add(&nop->list, &last_new_insn->list); 1836 end: 1837 new_alt_group->orig_group = orig_alt_group; 1838 new_alt_group->first_insn = *new_insn; 1839 new_alt_group->last_insn = nop ? : last_new_insn; 1840 new_alt_group->cfi = orig_alt_group->cfi; 1841 return 0; 1842 } 1843 1844 /* 1845 * A jump table entry can either convert a nop to a jump or a jump to a nop. 1846 * If the original instruction is a jump, make the alt entry an effective nop 1847 * by just skipping the original instruction. 1848 */ 1849 static int handle_jump_alt(struct objtool_file *file, 1850 struct special_alt *special_alt, 1851 struct instruction *orig_insn, 1852 struct instruction **new_insn) 1853 { 1854 if (orig_insn->type != INSN_JUMP_UNCONDITIONAL && 1855 orig_insn->type != INSN_NOP) { 1856 1857 WARN_FUNC("unsupported instruction at jump label", 1858 orig_insn->sec, orig_insn->offset); 1859 return -1; 1860 } 1861 1862 if (opts.hack_jump_label && special_alt->key_addend & 2) { 1863 struct reloc *reloc = insn_reloc(file, orig_insn); 1864 1865 if (reloc) { 1866 reloc->type = R_NONE; 1867 elf_write_reloc(file->elf, reloc); 1868 } 1869 elf_write_insn(file->elf, orig_insn->sec, 1870 orig_insn->offset, orig_insn->len, 1871 arch_nop_insn(orig_insn->len)); 1872 orig_insn->type = INSN_NOP; 1873 } 1874 1875 if (orig_insn->type == INSN_NOP) { 1876 if (orig_insn->len == 2) 1877 file->jl_nop_short++; 1878 else 1879 file->jl_nop_long++; 1880 1881 return 0; 1882 } 1883 1884 if (orig_insn->len == 2) 1885 file->jl_short++; 1886 else 1887 file->jl_long++; 1888 1889 *new_insn = list_next_entry(orig_insn, list); 1890 return 0; 1891 } 1892 1893 /* 1894 * Read all the special sections which have alternate instructions which can be 1895 * patched in or redirected to at runtime. Each instruction having alternate 1896 * instruction(s) has them added to its insn->alts list, which will be 1897 * traversed in validate_branch(). 1898 */ 1899 static int add_special_section_alts(struct objtool_file *file) 1900 { 1901 struct list_head special_alts; 1902 struct instruction *orig_insn, *new_insn; 1903 struct special_alt *special_alt, *tmp; 1904 struct alternative *alt; 1905 int ret; 1906 1907 ret = special_get_alts(file->elf, &special_alts); 1908 if (ret) 1909 return ret; 1910 1911 list_for_each_entry_safe(special_alt, tmp, &special_alts, list) { 1912 1913 orig_insn = find_insn(file, special_alt->orig_sec, 1914 special_alt->orig_off); 1915 if (!orig_insn) { 1916 WARN_FUNC("special: can't find orig instruction", 1917 special_alt->orig_sec, special_alt->orig_off); 1918 ret = -1; 1919 goto out; 1920 } 1921 1922 new_insn = NULL; 1923 if (!special_alt->group || special_alt->new_len) { 1924 new_insn = find_insn(file, special_alt->new_sec, 1925 special_alt->new_off); 1926 if (!new_insn) { 1927 WARN_FUNC("special: can't find new instruction", 1928 special_alt->new_sec, 1929 special_alt->new_off); 1930 ret = -1; 1931 goto out; 1932 } 1933 } 1934 1935 if (special_alt->group) { 1936 if (!special_alt->orig_len) { 1937 WARN_FUNC("empty alternative entry", 1938 orig_insn->sec, orig_insn->offset); 1939 continue; 1940 } 1941 1942 ret = handle_group_alt(file, special_alt, orig_insn, 1943 &new_insn); 1944 if (ret) 1945 goto out; 1946 } else if (special_alt->jump_or_nop) { 1947 ret = handle_jump_alt(file, special_alt, orig_insn, 1948 &new_insn); 1949 if (ret) 1950 goto out; 1951 } 1952 1953 alt = malloc(sizeof(*alt)); 1954 if (!alt) { 1955 WARN("malloc failed"); 1956 ret = -1; 1957 goto out; 1958 } 1959 1960 alt->insn = new_insn; 1961 alt->skip_orig = special_alt->skip_orig; 1962 orig_insn->ignore_alts |= special_alt->skip_alt; 1963 list_add_tail(&alt->list, &orig_insn->alts); 1964 1965 list_del(&special_alt->list); 1966 free(special_alt); 1967 } 1968 1969 if (opts.stats) { 1970 printf("jl\\\tNOP\tJMP\n"); 1971 printf("short:\t%ld\t%ld\n", file->jl_nop_short, file->jl_short); 1972 printf("long:\t%ld\t%ld\n", file->jl_nop_long, file->jl_long); 1973 } 1974 1975 out: 1976 return ret; 1977 } 1978 1979 static int add_jump_table(struct objtool_file *file, struct instruction *insn, 1980 struct reloc *table) 1981 { 1982 struct reloc *reloc = table; 1983 struct instruction *dest_insn; 1984 struct alternative *alt; 1985 struct symbol *pfunc = insn_func(insn)->pfunc; 1986 unsigned int prev_offset = 0; 1987 1988 /* 1989 * Each @reloc is a switch table relocation which points to the target 1990 * instruction. 1991 */ 1992 list_for_each_entry_from(reloc, &table->sec->reloc_list, list) { 1993 1994 /* Check for the end of the table: */ 1995 if (reloc != table && reloc->jump_table_start) 1996 break; 1997 1998 /* Make sure the table entries are consecutive: */ 1999 if (prev_offset && reloc->offset != prev_offset + 8) 2000 break; 2001 2002 /* Detect function pointers from contiguous objects: */ 2003 if (reloc->sym->sec == pfunc->sec && 2004 reloc->addend == pfunc->offset) 2005 break; 2006 2007 dest_insn = find_insn(file, reloc->sym->sec, reloc->addend); 2008 if (!dest_insn) 2009 break; 2010 2011 /* Make sure the destination is in the same function: */ 2012 if (!insn_func(dest_insn) || insn_func(dest_insn)->pfunc != pfunc) 2013 break; 2014 2015 alt = malloc(sizeof(*alt)); 2016 if (!alt) { 2017 WARN("malloc failed"); 2018 return -1; 2019 } 2020 2021 alt->insn = dest_insn; 2022 list_add_tail(&alt->list, &insn->alts); 2023 prev_offset = reloc->offset; 2024 } 2025 2026 if (!prev_offset) { 2027 WARN_FUNC("can't find switch jump table", 2028 insn->sec, insn->offset); 2029 return -1; 2030 } 2031 2032 return 0; 2033 } 2034 2035 /* 2036 * find_jump_table() - Given a dynamic jump, find the switch jump table 2037 * associated with it. 2038 */ 2039 static struct reloc *find_jump_table(struct objtool_file *file, 2040 struct symbol *func, 2041 struct instruction *insn) 2042 { 2043 struct reloc *table_reloc; 2044 struct instruction *dest_insn, *orig_insn = insn; 2045 2046 /* 2047 * Backward search using the @first_jump_src links, these help avoid 2048 * much of the 'in between' code. Which avoids us getting confused by 2049 * it. 2050 */ 2051 for (; 2052 insn && insn_func(insn) && insn_func(insn)->pfunc == func; 2053 insn = insn->first_jump_src ?: prev_insn_same_sym(file, insn)) { 2054 2055 if (insn != orig_insn && insn->type == INSN_JUMP_DYNAMIC) 2056 break; 2057 2058 /* allow small jumps within the range */ 2059 if (insn->type == INSN_JUMP_UNCONDITIONAL && 2060 insn->jump_dest && 2061 (insn->jump_dest->offset <= insn->offset || 2062 insn->jump_dest->offset > orig_insn->offset)) 2063 break; 2064 2065 table_reloc = arch_find_switch_table(file, insn); 2066 if (!table_reloc) 2067 continue; 2068 dest_insn = find_insn(file, table_reloc->sym->sec, table_reloc->addend); 2069 if (!dest_insn || !insn_func(dest_insn) || insn_func(dest_insn)->pfunc != func) 2070 continue; 2071 2072 return table_reloc; 2073 } 2074 2075 return NULL; 2076 } 2077 2078 /* 2079 * First pass: Mark the head of each jump table so that in the next pass, 2080 * we know when a given jump table ends and the next one starts. 2081 */ 2082 static void mark_func_jump_tables(struct objtool_file *file, 2083 struct symbol *func) 2084 { 2085 struct instruction *insn, *last = NULL; 2086 struct reloc *reloc; 2087 2088 func_for_each_insn(file, func, insn) { 2089 if (!last) 2090 last = insn; 2091 2092 /* 2093 * Store back-pointers for unconditional forward jumps such 2094 * that find_jump_table() can back-track using those and 2095 * avoid some potentially confusing code. 2096 */ 2097 if (insn->type == INSN_JUMP_UNCONDITIONAL && insn->jump_dest && 2098 insn->offset > last->offset && 2099 insn->jump_dest->offset > insn->offset && 2100 !insn->jump_dest->first_jump_src) { 2101 2102 insn->jump_dest->first_jump_src = insn; 2103 last = insn->jump_dest; 2104 } 2105 2106 if (insn->type != INSN_JUMP_DYNAMIC) 2107 continue; 2108 2109 reloc = find_jump_table(file, func, insn); 2110 if (reloc) { 2111 reloc->jump_table_start = true; 2112 insn->jump_table = reloc; 2113 } 2114 } 2115 } 2116 2117 static int add_func_jump_tables(struct objtool_file *file, 2118 struct symbol *func) 2119 { 2120 struct instruction *insn; 2121 int ret; 2122 2123 func_for_each_insn(file, func, insn) { 2124 if (!insn->jump_table) 2125 continue; 2126 2127 ret = add_jump_table(file, insn, insn->jump_table); 2128 if (ret) 2129 return ret; 2130 } 2131 2132 return 0; 2133 } 2134 2135 /* 2136 * For some switch statements, gcc generates a jump table in the .rodata 2137 * section which contains a list of addresses within the function to jump to. 2138 * This finds these jump tables and adds them to the insn->alts lists. 2139 */ 2140 static int add_jump_table_alts(struct objtool_file *file) 2141 { 2142 struct section *sec; 2143 struct symbol *func; 2144 int ret; 2145 2146 if (!file->rodata) 2147 return 0; 2148 2149 for_each_sec(file, sec) { 2150 list_for_each_entry(func, &sec->symbol_list, list) { 2151 if (func->type != STT_FUNC) 2152 continue; 2153 2154 mark_func_jump_tables(file, func); 2155 ret = add_func_jump_tables(file, func); 2156 if (ret) 2157 return ret; 2158 } 2159 } 2160 2161 return 0; 2162 } 2163 2164 static void set_func_state(struct cfi_state *state) 2165 { 2166 state->cfa = initial_func_cfi.cfa; 2167 memcpy(&state->regs, &initial_func_cfi.regs, 2168 CFI_NUM_REGS * sizeof(struct cfi_reg)); 2169 state->stack_size = initial_func_cfi.cfa.offset; 2170 } 2171 2172 static int read_unwind_hints(struct objtool_file *file) 2173 { 2174 struct cfi_state cfi = init_cfi; 2175 struct section *sec, *relocsec; 2176 struct unwind_hint *hint; 2177 struct instruction *insn; 2178 struct reloc *reloc; 2179 int i; 2180 2181 sec = find_section_by_name(file->elf, ".discard.unwind_hints"); 2182 if (!sec) 2183 return 0; 2184 2185 relocsec = sec->reloc; 2186 if (!relocsec) { 2187 WARN("missing .rela.discard.unwind_hints section"); 2188 return -1; 2189 } 2190 2191 if (sec->sh.sh_size % sizeof(struct unwind_hint)) { 2192 WARN("struct unwind_hint size mismatch"); 2193 return -1; 2194 } 2195 2196 file->hints = true; 2197 2198 for (i = 0; i < sec->sh.sh_size / sizeof(struct unwind_hint); i++) { 2199 hint = (struct unwind_hint *)sec->data->d_buf + i; 2200 2201 reloc = find_reloc_by_dest(file->elf, sec, i * sizeof(*hint)); 2202 if (!reloc) { 2203 WARN("can't find reloc for unwind_hints[%d]", i); 2204 return -1; 2205 } 2206 2207 insn = find_insn(file, reloc->sym->sec, reloc->addend); 2208 if (!insn) { 2209 WARN("can't find insn for unwind_hints[%d]", i); 2210 return -1; 2211 } 2212 2213 insn->hint = true; 2214 2215 if (hint->type == UNWIND_HINT_TYPE_SAVE) { 2216 insn->hint = false; 2217 insn->save = true; 2218 continue; 2219 } 2220 2221 if (hint->type == UNWIND_HINT_TYPE_RESTORE) { 2222 insn->restore = true; 2223 continue; 2224 } 2225 2226 if (hint->type == UNWIND_HINT_TYPE_REGS_PARTIAL) { 2227 struct symbol *sym = find_symbol_by_offset(insn->sec, insn->offset); 2228 2229 if (sym && sym->bind == STB_GLOBAL) { 2230 if (opts.ibt && insn->type != INSN_ENDBR && !insn->noendbr) { 2231 WARN_FUNC("UNWIND_HINT_IRET_REGS without ENDBR", 2232 insn->sec, insn->offset); 2233 } 2234 2235 insn->entry = 1; 2236 } 2237 } 2238 2239 if (hint->type == UNWIND_HINT_TYPE_ENTRY) { 2240 hint->type = UNWIND_HINT_TYPE_CALL; 2241 insn->entry = 1; 2242 } 2243 2244 if (hint->type == UNWIND_HINT_TYPE_FUNC) { 2245 insn->cfi = &func_cfi; 2246 continue; 2247 } 2248 2249 if (insn->cfi) 2250 cfi = *(insn->cfi); 2251 2252 if (arch_decode_hint_reg(hint->sp_reg, &cfi.cfa.base)) { 2253 WARN_FUNC("unsupported unwind_hint sp base reg %d", 2254 insn->sec, insn->offset, hint->sp_reg); 2255 return -1; 2256 } 2257 2258 cfi.cfa.offset = bswap_if_needed(file->elf, hint->sp_offset); 2259 cfi.type = hint->type; 2260 cfi.end = hint->end; 2261 2262 insn->cfi = cfi_hash_find_or_add(&cfi); 2263 } 2264 2265 return 0; 2266 } 2267 2268 static int read_noendbr_hints(struct objtool_file *file) 2269 { 2270 struct section *sec; 2271 struct instruction *insn; 2272 struct reloc *reloc; 2273 2274 sec = find_section_by_name(file->elf, ".rela.discard.noendbr"); 2275 if (!sec) 2276 return 0; 2277 2278 list_for_each_entry(reloc, &sec->reloc_list, list) { 2279 insn = find_insn(file, reloc->sym->sec, reloc->sym->offset + reloc->addend); 2280 if (!insn) { 2281 WARN("bad .discard.noendbr entry"); 2282 return -1; 2283 } 2284 2285 insn->noendbr = 1; 2286 } 2287 2288 return 0; 2289 } 2290 2291 static int read_retpoline_hints(struct objtool_file *file) 2292 { 2293 struct section *sec; 2294 struct instruction *insn; 2295 struct reloc *reloc; 2296 2297 sec = find_section_by_name(file->elf, ".rela.discard.retpoline_safe"); 2298 if (!sec) 2299 return 0; 2300 2301 list_for_each_entry(reloc, &sec->reloc_list, list) { 2302 if (reloc->sym->type != STT_SECTION) { 2303 WARN("unexpected relocation symbol type in %s", sec->name); 2304 return -1; 2305 } 2306 2307 insn = find_insn(file, reloc->sym->sec, reloc->addend); 2308 if (!insn) { 2309 WARN("bad .discard.retpoline_safe entry"); 2310 return -1; 2311 } 2312 2313 if (insn->type != INSN_JUMP_DYNAMIC && 2314 insn->type != INSN_CALL_DYNAMIC && 2315 insn->type != INSN_RETURN && 2316 insn->type != INSN_NOP) { 2317 WARN_FUNC("retpoline_safe hint not an indirect jump/call/ret/nop", 2318 insn->sec, insn->offset); 2319 return -1; 2320 } 2321 2322 insn->retpoline_safe = true; 2323 } 2324 2325 return 0; 2326 } 2327 2328 static int read_instr_hints(struct objtool_file *file) 2329 { 2330 struct section *sec; 2331 struct instruction *insn; 2332 struct reloc *reloc; 2333 2334 sec = find_section_by_name(file->elf, ".rela.discard.instr_end"); 2335 if (!sec) 2336 return 0; 2337 2338 list_for_each_entry(reloc, &sec->reloc_list, list) { 2339 if (reloc->sym->type != STT_SECTION) { 2340 WARN("unexpected relocation symbol type in %s", sec->name); 2341 return -1; 2342 } 2343 2344 insn = find_insn(file, reloc->sym->sec, reloc->addend); 2345 if (!insn) { 2346 WARN("bad .discard.instr_end entry"); 2347 return -1; 2348 } 2349 2350 insn->instr--; 2351 } 2352 2353 sec = find_section_by_name(file->elf, ".rela.discard.instr_begin"); 2354 if (!sec) 2355 return 0; 2356 2357 list_for_each_entry(reloc, &sec->reloc_list, list) { 2358 if (reloc->sym->type != STT_SECTION) { 2359 WARN("unexpected relocation symbol type in %s", sec->name); 2360 return -1; 2361 } 2362 2363 insn = find_insn(file, reloc->sym->sec, reloc->addend); 2364 if (!insn) { 2365 WARN("bad .discard.instr_begin entry"); 2366 return -1; 2367 } 2368 2369 insn->instr++; 2370 } 2371 2372 return 0; 2373 } 2374 2375 static int read_intra_function_calls(struct objtool_file *file) 2376 { 2377 struct instruction *insn; 2378 struct section *sec; 2379 struct reloc *reloc; 2380 2381 sec = find_section_by_name(file->elf, ".rela.discard.intra_function_calls"); 2382 if (!sec) 2383 return 0; 2384 2385 list_for_each_entry(reloc, &sec->reloc_list, list) { 2386 unsigned long dest_off; 2387 2388 if (reloc->sym->type != STT_SECTION) { 2389 WARN("unexpected relocation symbol type in %s", 2390 sec->name); 2391 return -1; 2392 } 2393 2394 insn = find_insn(file, reloc->sym->sec, reloc->addend); 2395 if (!insn) { 2396 WARN("bad .discard.intra_function_call entry"); 2397 return -1; 2398 } 2399 2400 if (insn->type != INSN_CALL) { 2401 WARN_FUNC("intra_function_call not a direct call", 2402 insn->sec, insn->offset); 2403 return -1; 2404 } 2405 2406 /* 2407 * Treat intra-function CALLs as JMPs, but with a stack_op. 2408 * See add_call_destinations(), which strips stack_ops from 2409 * normal CALLs. 2410 */ 2411 insn->type = INSN_JUMP_UNCONDITIONAL; 2412 2413 dest_off = arch_jump_destination(insn); 2414 insn->jump_dest = find_insn(file, insn->sec, dest_off); 2415 if (!insn->jump_dest) { 2416 WARN_FUNC("can't find call dest at %s+0x%lx", 2417 insn->sec, insn->offset, 2418 insn->sec->name, dest_off); 2419 return -1; 2420 } 2421 } 2422 2423 return 0; 2424 } 2425 2426 /* 2427 * Return true if name matches an instrumentation function, where calls to that 2428 * function from noinstr code can safely be removed, but compilers won't do so. 2429 */ 2430 static bool is_profiling_func(const char *name) 2431 { 2432 /* 2433 * Many compilers cannot disable KCOV with a function attribute. 2434 */ 2435 if (!strncmp(name, "__sanitizer_cov_", 16)) 2436 return true; 2437 2438 /* 2439 * Some compilers currently do not remove __tsan_func_entry/exit nor 2440 * __tsan_atomic_signal_fence (used for barrier instrumentation) with 2441 * the __no_sanitize_thread attribute, remove them. Once the kernel's 2442 * minimum Clang version is 14.0, this can be removed. 2443 */ 2444 if (!strncmp(name, "__tsan_func_", 12) || 2445 !strcmp(name, "__tsan_atomic_signal_fence")) 2446 return true; 2447 2448 return false; 2449 } 2450 2451 static int classify_symbols(struct objtool_file *file) 2452 { 2453 struct section *sec; 2454 struct symbol *func; 2455 2456 for_each_sec(file, sec) { 2457 list_for_each_entry(func, &sec->symbol_list, list) { 2458 if (func->bind != STB_GLOBAL) 2459 continue; 2460 2461 if (!strncmp(func->name, STATIC_CALL_TRAMP_PREFIX_STR, 2462 strlen(STATIC_CALL_TRAMP_PREFIX_STR))) 2463 func->static_call_tramp = true; 2464 2465 if (arch_is_retpoline(func)) 2466 func->retpoline_thunk = true; 2467 2468 if (arch_is_rethunk(func)) 2469 func->return_thunk = true; 2470 2471 if (arch_ftrace_match(func->name)) 2472 func->fentry = true; 2473 2474 if (is_profiling_func(func->name)) 2475 func->profiling_func = true; 2476 } 2477 } 2478 2479 return 0; 2480 } 2481 2482 static void mark_rodata(struct objtool_file *file) 2483 { 2484 struct section *sec; 2485 bool found = false; 2486 2487 /* 2488 * Search for the following rodata sections, each of which can 2489 * potentially contain jump tables: 2490 * 2491 * - .rodata: can contain GCC switch tables 2492 * - .rodata.<func>: same, if -fdata-sections is being used 2493 * - .rodata..c_jump_table: contains C annotated jump tables 2494 * 2495 * .rodata.str1.* sections are ignored; they don't contain jump tables. 2496 */ 2497 for_each_sec(file, sec) { 2498 if (!strncmp(sec->name, ".rodata", 7) && 2499 !strstr(sec->name, ".str1.")) { 2500 sec->rodata = true; 2501 found = true; 2502 } 2503 } 2504 2505 file->rodata = found; 2506 } 2507 2508 static int decode_sections(struct objtool_file *file) 2509 { 2510 int ret; 2511 2512 mark_rodata(file); 2513 2514 ret = init_pv_ops(file); 2515 if (ret) 2516 return ret; 2517 2518 /* 2519 * Must be before add_{jump_call}_destination. 2520 */ 2521 ret = classify_symbols(file); 2522 if (ret) 2523 return ret; 2524 2525 ret = decode_instructions(file); 2526 if (ret) 2527 return ret; 2528 2529 add_ignores(file); 2530 add_uaccess_safe(file); 2531 2532 ret = add_ignore_alternatives(file); 2533 if (ret) 2534 return ret; 2535 2536 /* 2537 * Must be before read_unwind_hints() since that needs insn->noendbr. 2538 */ 2539 ret = read_noendbr_hints(file); 2540 if (ret) 2541 return ret; 2542 2543 /* 2544 * Must be before add_jump_destinations(), which depends on 'func' 2545 * being set for alternatives, to enable proper sibling call detection. 2546 */ 2547 if (opts.stackval || opts.orc || opts.uaccess || opts.noinstr) { 2548 ret = add_special_section_alts(file); 2549 if (ret) 2550 return ret; 2551 } 2552 2553 ret = add_jump_destinations(file); 2554 if (ret) 2555 return ret; 2556 2557 /* 2558 * Must be before add_call_destination(); it changes INSN_CALL to 2559 * INSN_JUMP. 2560 */ 2561 ret = read_intra_function_calls(file); 2562 if (ret) 2563 return ret; 2564 2565 ret = add_call_destinations(file); 2566 if (ret) 2567 return ret; 2568 2569 /* 2570 * Must be after add_call_destinations() such that it can override 2571 * dead_end_function() marks. 2572 */ 2573 ret = add_dead_ends(file); 2574 if (ret) 2575 return ret; 2576 2577 ret = add_jump_table_alts(file); 2578 if (ret) 2579 return ret; 2580 2581 ret = read_unwind_hints(file); 2582 if (ret) 2583 return ret; 2584 2585 ret = read_retpoline_hints(file); 2586 if (ret) 2587 return ret; 2588 2589 ret = read_instr_hints(file); 2590 if (ret) 2591 return ret; 2592 2593 return 0; 2594 } 2595 2596 static bool is_fentry_call(struct instruction *insn) 2597 { 2598 if (insn->type == INSN_CALL && 2599 insn->call_dest && 2600 insn->call_dest->fentry) 2601 return true; 2602 2603 return false; 2604 } 2605 2606 static bool has_modified_stack_frame(struct instruction *insn, struct insn_state *state) 2607 { 2608 struct cfi_state *cfi = &state->cfi; 2609 int i; 2610 2611 if (cfi->cfa.base != initial_func_cfi.cfa.base || cfi->drap) 2612 return true; 2613 2614 if (cfi->cfa.offset != initial_func_cfi.cfa.offset) 2615 return true; 2616 2617 if (cfi->stack_size != initial_func_cfi.cfa.offset) 2618 return true; 2619 2620 for (i = 0; i < CFI_NUM_REGS; i++) { 2621 if (cfi->regs[i].base != initial_func_cfi.regs[i].base || 2622 cfi->regs[i].offset != initial_func_cfi.regs[i].offset) 2623 return true; 2624 } 2625 2626 return false; 2627 } 2628 2629 static bool check_reg_frame_pos(const struct cfi_reg *reg, 2630 int expected_offset) 2631 { 2632 return reg->base == CFI_CFA && 2633 reg->offset == expected_offset; 2634 } 2635 2636 static bool has_valid_stack_frame(struct insn_state *state) 2637 { 2638 struct cfi_state *cfi = &state->cfi; 2639 2640 if (cfi->cfa.base == CFI_BP && 2641 check_reg_frame_pos(&cfi->regs[CFI_BP], -cfi->cfa.offset) && 2642 check_reg_frame_pos(&cfi->regs[CFI_RA], -cfi->cfa.offset + 8)) 2643 return true; 2644 2645 if (cfi->drap && cfi->regs[CFI_BP].base == CFI_BP) 2646 return true; 2647 2648 return false; 2649 } 2650 2651 static int update_cfi_state_regs(struct instruction *insn, 2652 struct cfi_state *cfi, 2653 struct stack_op *op) 2654 { 2655 struct cfi_reg *cfa = &cfi->cfa; 2656 2657 if (cfa->base != CFI_SP && cfa->base != CFI_SP_INDIRECT) 2658 return 0; 2659 2660 /* push */ 2661 if (op->dest.type == OP_DEST_PUSH || op->dest.type == OP_DEST_PUSHF) 2662 cfa->offset += 8; 2663 2664 /* pop */ 2665 if (op->src.type == OP_SRC_POP || op->src.type == OP_SRC_POPF) 2666 cfa->offset -= 8; 2667 2668 /* add immediate to sp */ 2669 if (op->dest.type == OP_DEST_REG && op->src.type == OP_SRC_ADD && 2670 op->dest.reg == CFI_SP && op->src.reg == CFI_SP) 2671 cfa->offset -= op->src.offset; 2672 2673 return 0; 2674 } 2675 2676 static void save_reg(struct cfi_state *cfi, unsigned char reg, int base, int offset) 2677 { 2678 if (arch_callee_saved_reg(reg) && 2679 cfi->regs[reg].base == CFI_UNDEFINED) { 2680 cfi->regs[reg].base = base; 2681 cfi->regs[reg].offset = offset; 2682 } 2683 } 2684 2685 static void restore_reg(struct cfi_state *cfi, unsigned char reg) 2686 { 2687 cfi->regs[reg].base = initial_func_cfi.regs[reg].base; 2688 cfi->regs[reg].offset = initial_func_cfi.regs[reg].offset; 2689 } 2690 2691 /* 2692 * A note about DRAP stack alignment: 2693 * 2694 * GCC has the concept of a DRAP register, which is used to help keep track of 2695 * the stack pointer when aligning the stack. r10 or r13 is used as the DRAP 2696 * register. The typical DRAP pattern is: 2697 * 2698 * 4c 8d 54 24 08 lea 0x8(%rsp),%r10 2699 * 48 83 e4 c0 and $0xffffffffffffffc0,%rsp 2700 * 41 ff 72 f8 pushq -0x8(%r10) 2701 * 55 push %rbp 2702 * 48 89 e5 mov %rsp,%rbp 2703 * (more pushes) 2704 * 41 52 push %r10 2705 * ... 2706 * 41 5a pop %r10 2707 * (more pops) 2708 * 5d pop %rbp 2709 * 49 8d 62 f8 lea -0x8(%r10),%rsp 2710 * c3 retq 2711 * 2712 * There are some variations in the epilogues, like: 2713 * 2714 * 5b pop %rbx 2715 * 41 5a pop %r10 2716 * 41 5c pop %r12 2717 * 41 5d pop %r13 2718 * 41 5e pop %r14 2719 * c9 leaveq 2720 * 49 8d 62 f8 lea -0x8(%r10),%rsp 2721 * c3 retq 2722 * 2723 * and: 2724 * 2725 * 4c 8b 55 e8 mov -0x18(%rbp),%r10 2726 * 48 8b 5d e0 mov -0x20(%rbp),%rbx 2727 * 4c 8b 65 f0 mov -0x10(%rbp),%r12 2728 * 4c 8b 6d f8 mov -0x8(%rbp),%r13 2729 * c9 leaveq 2730 * 49 8d 62 f8 lea -0x8(%r10),%rsp 2731 * c3 retq 2732 * 2733 * Sometimes r13 is used as the DRAP register, in which case it's saved and 2734 * restored beforehand: 2735 * 2736 * 41 55 push %r13 2737 * 4c 8d 6c 24 10 lea 0x10(%rsp),%r13 2738 * 48 83 e4 f0 and $0xfffffffffffffff0,%rsp 2739 * ... 2740 * 49 8d 65 f0 lea -0x10(%r13),%rsp 2741 * 41 5d pop %r13 2742 * c3 retq 2743 */ 2744 static int update_cfi_state(struct instruction *insn, 2745 struct instruction *next_insn, 2746 struct cfi_state *cfi, struct stack_op *op) 2747 { 2748 struct cfi_reg *cfa = &cfi->cfa; 2749 struct cfi_reg *regs = cfi->regs; 2750 2751 /* stack operations don't make sense with an undefined CFA */ 2752 if (cfa->base == CFI_UNDEFINED) { 2753 if (insn_func(insn)) { 2754 WARN_FUNC("undefined stack state", insn->sec, insn->offset); 2755 return -1; 2756 } 2757 return 0; 2758 } 2759 2760 if (cfi->type == UNWIND_HINT_TYPE_REGS || 2761 cfi->type == UNWIND_HINT_TYPE_REGS_PARTIAL) 2762 return update_cfi_state_regs(insn, cfi, op); 2763 2764 switch (op->dest.type) { 2765 2766 case OP_DEST_REG: 2767 switch (op->src.type) { 2768 2769 case OP_SRC_REG: 2770 if (op->src.reg == CFI_SP && op->dest.reg == CFI_BP && 2771 cfa->base == CFI_SP && 2772 check_reg_frame_pos(®s[CFI_BP], -cfa->offset)) { 2773 2774 /* mov %rsp, %rbp */ 2775 cfa->base = op->dest.reg; 2776 cfi->bp_scratch = false; 2777 } 2778 2779 else if (op->src.reg == CFI_SP && 2780 op->dest.reg == CFI_BP && cfi->drap) { 2781 2782 /* drap: mov %rsp, %rbp */ 2783 regs[CFI_BP].base = CFI_BP; 2784 regs[CFI_BP].offset = -cfi->stack_size; 2785 cfi->bp_scratch = false; 2786 } 2787 2788 else if (op->src.reg == CFI_SP && cfa->base == CFI_SP) { 2789 2790 /* 2791 * mov %rsp, %reg 2792 * 2793 * This is needed for the rare case where GCC 2794 * does: 2795 * 2796 * mov %rsp, %rax 2797 * ... 2798 * mov %rax, %rsp 2799 */ 2800 cfi->vals[op->dest.reg].base = CFI_CFA; 2801 cfi->vals[op->dest.reg].offset = -cfi->stack_size; 2802 } 2803 2804 else if (op->src.reg == CFI_BP && op->dest.reg == CFI_SP && 2805 (cfa->base == CFI_BP || cfa->base == cfi->drap_reg)) { 2806 2807 /* 2808 * mov %rbp, %rsp 2809 * 2810 * Restore the original stack pointer (Clang). 2811 */ 2812 cfi->stack_size = -cfi->regs[CFI_BP].offset; 2813 } 2814 2815 else if (op->dest.reg == cfa->base) { 2816 2817 /* mov %reg, %rsp */ 2818 if (cfa->base == CFI_SP && 2819 cfi->vals[op->src.reg].base == CFI_CFA) { 2820 2821 /* 2822 * This is needed for the rare case 2823 * where GCC does something dumb like: 2824 * 2825 * lea 0x8(%rsp), %rcx 2826 * ... 2827 * mov %rcx, %rsp 2828 */ 2829 cfa->offset = -cfi->vals[op->src.reg].offset; 2830 cfi->stack_size = cfa->offset; 2831 2832 } else if (cfa->base == CFI_SP && 2833 cfi->vals[op->src.reg].base == CFI_SP_INDIRECT && 2834 cfi->vals[op->src.reg].offset == cfa->offset) { 2835 2836 /* 2837 * Stack swizzle: 2838 * 2839 * 1: mov %rsp, (%[tos]) 2840 * 2: mov %[tos], %rsp 2841 * ... 2842 * 3: pop %rsp 2843 * 2844 * Where: 2845 * 2846 * 1 - places a pointer to the previous 2847 * stack at the Top-of-Stack of the 2848 * new stack. 2849 * 2850 * 2 - switches to the new stack. 2851 * 2852 * 3 - pops the Top-of-Stack to restore 2853 * the original stack. 2854 * 2855 * Note: we set base to SP_INDIRECT 2856 * here and preserve offset. Therefore 2857 * when the unwinder reaches ToS it 2858 * will dereference SP and then add the 2859 * offset to find the next frame, IOW: 2860 * (%rsp) + offset. 2861 */ 2862 cfa->base = CFI_SP_INDIRECT; 2863 2864 } else { 2865 cfa->base = CFI_UNDEFINED; 2866 cfa->offset = 0; 2867 } 2868 } 2869 2870 else if (op->dest.reg == CFI_SP && 2871 cfi->vals[op->src.reg].base == CFI_SP_INDIRECT && 2872 cfi->vals[op->src.reg].offset == cfa->offset) { 2873 2874 /* 2875 * The same stack swizzle case 2) as above. But 2876 * because we can't change cfa->base, case 3) 2877 * will become a regular POP. Pretend we're a 2878 * PUSH so things don't go unbalanced. 2879 */ 2880 cfi->stack_size += 8; 2881 } 2882 2883 2884 break; 2885 2886 case OP_SRC_ADD: 2887 if (op->dest.reg == CFI_SP && op->src.reg == CFI_SP) { 2888 2889 /* add imm, %rsp */ 2890 cfi->stack_size -= op->src.offset; 2891 if (cfa->base == CFI_SP) 2892 cfa->offset -= op->src.offset; 2893 break; 2894 } 2895 2896 if (op->dest.reg == CFI_SP && op->src.reg == CFI_BP) { 2897 2898 /* lea disp(%rbp), %rsp */ 2899 cfi->stack_size = -(op->src.offset + regs[CFI_BP].offset); 2900 break; 2901 } 2902 2903 if (!cfi->drap && op->src.reg == CFI_SP && 2904 op->dest.reg == CFI_BP && cfa->base == CFI_SP && 2905 check_reg_frame_pos(®s[CFI_BP], -cfa->offset + op->src.offset)) { 2906 2907 /* lea disp(%rsp), %rbp */ 2908 cfa->base = CFI_BP; 2909 cfa->offset -= op->src.offset; 2910 cfi->bp_scratch = false; 2911 break; 2912 } 2913 2914 if (op->src.reg == CFI_SP && cfa->base == CFI_SP) { 2915 2916 /* drap: lea disp(%rsp), %drap */ 2917 cfi->drap_reg = op->dest.reg; 2918 2919 /* 2920 * lea disp(%rsp), %reg 2921 * 2922 * This is needed for the rare case where GCC 2923 * does something dumb like: 2924 * 2925 * lea 0x8(%rsp), %rcx 2926 * ... 2927 * mov %rcx, %rsp 2928 */ 2929 cfi->vals[op->dest.reg].base = CFI_CFA; 2930 cfi->vals[op->dest.reg].offset = \ 2931 -cfi->stack_size + op->src.offset; 2932 2933 break; 2934 } 2935 2936 if (cfi->drap && op->dest.reg == CFI_SP && 2937 op->src.reg == cfi->drap_reg) { 2938 2939 /* drap: lea disp(%drap), %rsp */ 2940 cfa->base = CFI_SP; 2941 cfa->offset = cfi->stack_size = -op->src.offset; 2942 cfi->drap_reg = CFI_UNDEFINED; 2943 cfi->drap = false; 2944 break; 2945 } 2946 2947 if (op->dest.reg == cfi->cfa.base && !(next_insn && next_insn->hint)) { 2948 WARN_FUNC("unsupported stack register modification", 2949 insn->sec, insn->offset); 2950 return -1; 2951 } 2952 2953 break; 2954 2955 case OP_SRC_AND: 2956 if (op->dest.reg != CFI_SP || 2957 (cfi->drap_reg != CFI_UNDEFINED && cfa->base != CFI_SP) || 2958 (cfi->drap_reg == CFI_UNDEFINED && cfa->base != CFI_BP)) { 2959 WARN_FUNC("unsupported stack pointer realignment", 2960 insn->sec, insn->offset); 2961 return -1; 2962 } 2963 2964 if (cfi->drap_reg != CFI_UNDEFINED) { 2965 /* drap: and imm, %rsp */ 2966 cfa->base = cfi->drap_reg; 2967 cfa->offset = cfi->stack_size = 0; 2968 cfi->drap = true; 2969 } 2970 2971 /* 2972 * Older versions of GCC (4.8ish) realign the stack 2973 * without DRAP, with a frame pointer. 2974 */ 2975 2976 break; 2977 2978 case OP_SRC_POP: 2979 case OP_SRC_POPF: 2980 if (op->dest.reg == CFI_SP && cfa->base == CFI_SP_INDIRECT) { 2981 2982 /* pop %rsp; # restore from a stack swizzle */ 2983 cfa->base = CFI_SP; 2984 break; 2985 } 2986 2987 if (!cfi->drap && op->dest.reg == cfa->base) { 2988 2989 /* pop %rbp */ 2990 cfa->base = CFI_SP; 2991 } 2992 2993 if (cfi->drap && cfa->base == CFI_BP_INDIRECT && 2994 op->dest.reg == cfi->drap_reg && 2995 cfi->drap_offset == -cfi->stack_size) { 2996 2997 /* drap: pop %drap */ 2998 cfa->base = cfi->drap_reg; 2999 cfa->offset = 0; 3000 cfi->drap_offset = -1; 3001 3002 } else if (cfi->stack_size == -regs[op->dest.reg].offset) { 3003 3004 /* pop %reg */ 3005 restore_reg(cfi, op->dest.reg); 3006 } 3007 3008 cfi->stack_size -= 8; 3009 if (cfa->base == CFI_SP) 3010 cfa->offset -= 8; 3011 3012 break; 3013 3014 case OP_SRC_REG_INDIRECT: 3015 if (!cfi->drap && op->dest.reg == cfa->base && 3016 op->dest.reg == CFI_BP) { 3017 3018 /* mov disp(%rsp), %rbp */ 3019 cfa->base = CFI_SP; 3020 cfa->offset = cfi->stack_size; 3021 } 3022 3023 if (cfi->drap && op->src.reg == CFI_BP && 3024 op->src.offset == cfi->drap_offset) { 3025 3026 /* drap: mov disp(%rbp), %drap */ 3027 cfa->base = cfi->drap_reg; 3028 cfa->offset = 0; 3029 cfi->drap_offset = -1; 3030 } 3031 3032 if (cfi->drap && op->src.reg == CFI_BP && 3033 op->src.offset == regs[op->dest.reg].offset) { 3034 3035 /* drap: mov disp(%rbp), %reg */ 3036 restore_reg(cfi, op->dest.reg); 3037 3038 } else if (op->src.reg == cfa->base && 3039 op->src.offset == regs[op->dest.reg].offset + cfa->offset) { 3040 3041 /* mov disp(%rbp), %reg */ 3042 /* mov disp(%rsp), %reg */ 3043 restore_reg(cfi, op->dest.reg); 3044 3045 } else if (op->src.reg == CFI_SP && 3046 op->src.offset == regs[op->dest.reg].offset + cfi->stack_size) { 3047 3048 /* mov disp(%rsp), %reg */ 3049 restore_reg(cfi, op->dest.reg); 3050 } 3051 3052 break; 3053 3054 default: 3055 WARN_FUNC("unknown stack-related instruction", 3056 insn->sec, insn->offset); 3057 return -1; 3058 } 3059 3060 break; 3061 3062 case OP_DEST_PUSH: 3063 case OP_DEST_PUSHF: 3064 cfi->stack_size += 8; 3065 if (cfa->base == CFI_SP) 3066 cfa->offset += 8; 3067 3068 if (op->src.type != OP_SRC_REG) 3069 break; 3070 3071 if (cfi->drap) { 3072 if (op->src.reg == cfa->base && op->src.reg == cfi->drap_reg) { 3073 3074 /* drap: push %drap */ 3075 cfa->base = CFI_BP_INDIRECT; 3076 cfa->offset = -cfi->stack_size; 3077 3078 /* save drap so we know when to restore it */ 3079 cfi->drap_offset = -cfi->stack_size; 3080 3081 } else if (op->src.reg == CFI_BP && cfa->base == cfi->drap_reg) { 3082 3083 /* drap: push %rbp */ 3084 cfi->stack_size = 0; 3085 3086 } else { 3087 3088 /* drap: push %reg */ 3089 save_reg(cfi, op->src.reg, CFI_BP, -cfi->stack_size); 3090 } 3091 3092 } else { 3093 3094 /* push %reg */ 3095 save_reg(cfi, op->src.reg, CFI_CFA, -cfi->stack_size); 3096 } 3097 3098 /* detect when asm code uses rbp as a scratch register */ 3099 if (opts.stackval && insn_func(insn) && op->src.reg == CFI_BP && 3100 cfa->base != CFI_BP) 3101 cfi->bp_scratch = true; 3102 break; 3103 3104 case OP_DEST_REG_INDIRECT: 3105 3106 if (cfi->drap) { 3107 if (op->src.reg == cfa->base && op->src.reg == cfi->drap_reg) { 3108 3109 /* drap: mov %drap, disp(%rbp) */ 3110 cfa->base = CFI_BP_INDIRECT; 3111 cfa->offset = op->dest.offset; 3112 3113 /* save drap offset so we know when to restore it */ 3114 cfi->drap_offset = op->dest.offset; 3115 } else { 3116 3117 /* drap: mov reg, disp(%rbp) */ 3118 save_reg(cfi, op->src.reg, CFI_BP, op->dest.offset); 3119 } 3120 3121 } else if (op->dest.reg == cfa->base) { 3122 3123 /* mov reg, disp(%rbp) */ 3124 /* mov reg, disp(%rsp) */ 3125 save_reg(cfi, op->src.reg, CFI_CFA, 3126 op->dest.offset - cfi->cfa.offset); 3127 3128 } else if (op->dest.reg == CFI_SP) { 3129 3130 /* mov reg, disp(%rsp) */ 3131 save_reg(cfi, op->src.reg, CFI_CFA, 3132 op->dest.offset - cfi->stack_size); 3133 3134 } else if (op->src.reg == CFI_SP && op->dest.offset == 0) { 3135 3136 /* mov %rsp, (%reg); # setup a stack swizzle. */ 3137 cfi->vals[op->dest.reg].base = CFI_SP_INDIRECT; 3138 cfi->vals[op->dest.reg].offset = cfa->offset; 3139 } 3140 3141 break; 3142 3143 case OP_DEST_MEM: 3144 if (op->src.type != OP_SRC_POP && op->src.type != OP_SRC_POPF) { 3145 WARN_FUNC("unknown stack-related memory operation", 3146 insn->sec, insn->offset); 3147 return -1; 3148 } 3149 3150 /* pop mem */ 3151 cfi->stack_size -= 8; 3152 if (cfa->base == CFI_SP) 3153 cfa->offset -= 8; 3154 3155 break; 3156 3157 default: 3158 WARN_FUNC("unknown stack-related instruction", 3159 insn->sec, insn->offset); 3160 return -1; 3161 } 3162 3163 return 0; 3164 } 3165 3166 /* 3167 * The stack layouts of alternatives instructions can sometimes diverge when 3168 * they have stack modifications. That's fine as long as the potential stack 3169 * layouts don't conflict at any given potential instruction boundary. 3170 * 3171 * Flatten the CFIs of the different alternative code streams (both original 3172 * and replacement) into a single shared CFI array which can be used to detect 3173 * conflicts and nicely feed a linear array of ORC entries to the unwinder. 3174 */ 3175 static int propagate_alt_cfi(struct objtool_file *file, struct instruction *insn) 3176 { 3177 struct cfi_state **alt_cfi; 3178 int group_off; 3179 3180 if (!insn->alt_group) 3181 return 0; 3182 3183 if (!insn->cfi) { 3184 WARN("CFI missing"); 3185 return -1; 3186 } 3187 3188 alt_cfi = insn->alt_group->cfi; 3189 group_off = insn->offset - insn->alt_group->first_insn->offset; 3190 3191 if (!alt_cfi[group_off]) { 3192 alt_cfi[group_off] = insn->cfi; 3193 } else { 3194 if (cficmp(alt_cfi[group_off], insn->cfi)) { 3195 WARN_FUNC("stack layout conflict in alternatives", 3196 insn->sec, insn->offset); 3197 return -1; 3198 } 3199 } 3200 3201 return 0; 3202 } 3203 3204 static int handle_insn_ops(struct instruction *insn, 3205 struct instruction *next_insn, 3206 struct insn_state *state) 3207 { 3208 struct stack_op *op; 3209 3210 list_for_each_entry(op, &insn->stack_ops, list) { 3211 3212 if (update_cfi_state(insn, next_insn, &state->cfi, op)) 3213 return 1; 3214 3215 if (!insn->alt_group) 3216 continue; 3217 3218 if (op->dest.type == OP_DEST_PUSHF) { 3219 if (!state->uaccess_stack) { 3220 state->uaccess_stack = 1; 3221 } else if (state->uaccess_stack >> 31) { 3222 WARN_FUNC("PUSHF stack exhausted", 3223 insn->sec, insn->offset); 3224 return 1; 3225 } 3226 state->uaccess_stack <<= 1; 3227 state->uaccess_stack |= state->uaccess; 3228 } 3229 3230 if (op->src.type == OP_SRC_POPF) { 3231 if (state->uaccess_stack) { 3232 state->uaccess = state->uaccess_stack & 1; 3233 state->uaccess_stack >>= 1; 3234 if (state->uaccess_stack == 1) 3235 state->uaccess_stack = 0; 3236 } 3237 } 3238 } 3239 3240 return 0; 3241 } 3242 3243 static bool insn_cfi_match(struct instruction *insn, struct cfi_state *cfi2) 3244 { 3245 struct cfi_state *cfi1 = insn->cfi; 3246 int i; 3247 3248 if (!cfi1) { 3249 WARN("CFI missing"); 3250 return false; 3251 } 3252 3253 if (memcmp(&cfi1->cfa, &cfi2->cfa, sizeof(cfi1->cfa))) { 3254 3255 WARN_FUNC("stack state mismatch: cfa1=%d%+d cfa2=%d%+d", 3256 insn->sec, insn->offset, 3257 cfi1->cfa.base, cfi1->cfa.offset, 3258 cfi2->cfa.base, cfi2->cfa.offset); 3259 3260 } else if (memcmp(&cfi1->regs, &cfi2->regs, sizeof(cfi1->regs))) { 3261 for (i = 0; i < CFI_NUM_REGS; i++) { 3262 if (!memcmp(&cfi1->regs[i], &cfi2->regs[i], 3263 sizeof(struct cfi_reg))) 3264 continue; 3265 3266 WARN_FUNC("stack state mismatch: reg1[%d]=%d%+d reg2[%d]=%d%+d", 3267 insn->sec, insn->offset, 3268 i, cfi1->regs[i].base, cfi1->regs[i].offset, 3269 i, cfi2->regs[i].base, cfi2->regs[i].offset); 3270 break; 3271 } 3272 3273 } else if (cfi1->type != cfi2->type) { 3274 3275 WARN_FUNC("stack state mismatch: type1=%d type2=%d", 3276 insn->sec, insn->offset, cfi1->type, cfi2->type); 3277 3278 } else if (cfi1->drap != cfi2->drap || 3279 (cfi1->drap && cfi1->drap_reg != cfi2->drap_reg) || 3280 (cfi1->drap && cfi1->drap_offset != cfi2->drap_offset)) { 3281 3282 WARN_FUNC("stack state mismatch: drap1=%d(%d,%d) drap2=%d(%d,%d)", 3283 insn->sec, insn->offset, 3284 cfi1->drap, cfi1->drap_reg, cfi1->drap_offset, 3285 cfi2->drap, cfi2->drap_reg, cfi2->drap_offset); 3286 3287 } else 3288 return true; 3289 3290 return false; 3291 } 3292 3293 static inline bool func_uaccess_safe(struct symbol *func) 3294 { 3295 if (func) 3296 return func->uaccess_safe; 3297 3298 return false; 3299 } 3300 3301 static inline const char *call_dest_name(struct instruction *insn) 3302 { 3303 static char pvname[19]; 3304 struct reloc *rel; 3305 int idx; 3306 3307 if (insn->call_dest) 3308 return insn->call_dest->name; 3309 3310 rel = insn_reloc(NULL, insn); 3311 if (rel && !strcmp(rel->sym->name, "pv_ops")) { 3312 idx = (rel->addend / sizeof(void *)); 3313 snprintf(pvname, sizeof(pvname), "pv_ops[%d]", idx); 3314 return pvname; 3315 } 3316 3317 return "{dynamic}"; 3318 } 3319 3320 static bool pv_call_dest(struct objtool_file *file, struct instruction *insn) 3321 { 3322 struct symbol *target; 3323 struct reloc *rel; 3324 int idx; 3325 3326 rel = insn_reloc(file, insn); 3327 if (!rel || strcmp(rel->sym->name, "pv_ops")) 3328 return false; 3329 3330 idx = (arch_dest_reloc_offset(rel->addend) / sizeof(void *)); 3331 3332 if (file->pv_ops[idx].clean) 3333 return true; 3334 3335 file->pv_ops[idx].clean = true; 3336 3337 list_for_each_entry(target, &file->pv_ops[idx].targets, pv_target) { 3338 if (!target->sec->noinstr) { 3339 WARN("pv_ops[%d]: %s", idx, target->name); 3340 file->pv_ops[idx].clean = false; 3341 } 3342 } 3343 3344 return file->pv_ops[idx].clean; 3345 } 3346 3347 static inline bool noinstr_call_dest(struct objtool_file *file, 3348 struct instruction *insn, 3349 struct symbol *func) 3350 { 3351 /* 3352 * We can't deal with indirect function calls at present; 3353 * assume they're instrumented. 3354 */ 3355 if (!func) { 3356 if (file->pv_ops) 3357 return pv_call_dest(file, insn); 3358 3359 return false; 3360 } 3361 3362 /* 3363 * If the symbol is from a noinstr section; we good. 3364 */ 3365 if (func->sec->noinstr) 3366 return true; 3367 3368 /* 3369 * The __ubsan_handle_*() calls are like WARN(), they only happen when 3370 * something 'BAD' happened. At the risk of taking the machine down, 3371 * let them proceed to get the message out. 3372 */ 3373 if (!strncmp(func->name, "__ubsan_handle_", 15)) 3374 return true; 3375 3376 return false; 3377 } 3378 3379 static int validate_call(struct objtool_file *file, 3380 struct instruction *insn, 3381 struct insn_state *state) 3382 { 3383 if (state->noinstr && state->instr <= 0 && 3384 !noinstr_call_dest(file, insn, insn->call_dest)) { 3385 WARN_FUNC("call to %s() leaves .noinstr.text section", 3386 insn->sec, insn->offset, call_dest_name(insn)); 3387 return 1; 3388 } 3389 3390 if (state->uaccess && !func_uaccess_safe(insn->call_dest)) { 3391 WARN_FUNC("call to %s() with UACCESS enabled", 3392 insn->sec, insn->offset, call_dest_name(insn)); 3393 return 1; 3394 } 3395 3396 if (state->df) { 3397 WARN_FUNC("call to %s() with DF set", 3398 insn->sec, insn->offset, call_dest_name(insn)); 3399 return 1; 3400 } 3401 3402 return 0; 3403 } 3404 3405 static int validate_sibling_call(struct objtool_file *file, 3406 struct instruction *insn, 3407 struct insn_state *state) 3408 { 3409 if (insn_func(insn) && has_modified_stack_frame(insn, state)) { 3410 WARN_FUNC("sibling call from callable instruction with modified stack frame", 3411 insn->sec, insn->offset); 3412 return 1; 3413 } 3414 3415 return validate_call(file, insn, state); 3416 } 3417 3418 static int validate_return(struct symbol *func, struct instruction *insn, struct insn_state *state) 3419 { 3420 if (state->noinstr && state->instr > 0) { 3421 WARN_FUNC("return with instrumentation enabled", 3422 insn->sec, insn->offset); 3423 return 1; 3424 } 3425 3426 if (state->uaccess && !func_uaccess_safe(func)) { 3427 WARN_FUNC("return with UACCESS enabled", 3428 insn->sec, insn->offset); 3429 return 1; 3430 } 3431 3432 if (!state->uaccess && func_uaccess_safe(func)) { 3433 WARN_FUNC("return with UACCESS disabled from a UACCESS-safe function", 3434 insn->sec, insn->offset); 3435 return 1; 3436 } 3437 3438 if (state->df) { 3439 WARN_FUNC("return with DF set", 3440 insn->sec, insn->offset); 3441 return 1; 3442 } 3443 3444 if (func && has_modified_stack_frame(insn, state)) { 3445 WARN_FUNC("return with modified stack frame", 3446 insn->sec, insn->offset); 3447 return 1; 3448 } 3449 3450 if (state->cfi.bp_scratch) { 3451 WARN_FUNC("BP used as a scratch register", 3452 insn->sec, insn->offset); 3453 return 1; 3454 } 3455 3456 return 0; 3457 } 3458 3459 static struct instruction *next_insn_to_validate(struct objtool_file *file, 3460 struct instruction *insn) 3461 { 3462 struct alt_group *alt_group = insn->alt_group; 3463 3464 /* 3465 * Simulate the fact that alternatives are patched in-place. When the 3466 * end of a replacement alt_group is reached, redirect objtool flow to 3467 * the end of the original alt_group. 3468 */ 3469 if (alt_group && insn == alt_group->last_insn && alt_group->orig_group) 3470 return next_insn_same_sec(file, alt_group->orig_group->last_insn); 3471 3472 return next_insn_same_sec(file, insn); 3473 } 3474 3475 /* 3476 * Follow the branch starting at the given instruction, and recursively follow 3477 * any other branches (jumps). Meanwhile, track the frame pointer state at 3478 * each instruction and validate all the rules described in 3479 * tools/objtool/Documentation/objtool.txt. 3480 */ 3481 static int validate_branch(struct objtool_file *file, struct symbol *func, 3482 struct instruction *insn, struct insn_state state) 3483 { 3484 struct alternative *alt; 3485 struct instruction *next_insn, *prev_insn = NULL; 3486 struct section *sec; 3487 u8 visited; 3488 int ret; 3489 3490 sec = insn->sec; 3491 3492 while (1) { 3493 next_insn = next_insn_to_validate(file, insn); 3494 3495 if (func && insn_func(insn) && func != insn_func(insn)->pfunc) { 3496 /* Ignore KCFI type preambles, which always fall through */ 3497 if (!strncmp(func->name, "__cfi_", 6) || 3498 !strncmp(func->name, "__pfx_", 6)) 3499 return 0; 3500 3501 WARN("%s() falls through to next function %s()", 3502 func->name, insn_func(insn)->name); 3503 return 1; 3504 } 3505 3506 if (func && insn->ignore) { 3507 WARN_FUNC("BUG: why am I validating an ignored function?", 3508 sec, insn->offset); 3509 return 1; 3510 } 3511 3512 visited = VISITED_BRANCH << state.uaccess; 3513 if (insn->visited & VISITED_BRANCH_MASK) { 3514 if (!insn->hint && !insn_cfi_match(insn, &state.cfi)) 3515 return 1; 3516 3517 if (insn->visited & visited) 3518 return 0; 3519 } else { 3520 nr_insns_visited++; 3521 } 3522 3523 if (state.noinstr) 3524 state.instr += insn->instr; 3525 3526 if (insn->hint) { 3527 if (insn->restore) { 3528 struct instruction *save_insn, *i; 3529 3530 i = insn; 3531 save_insn = NULL; 3532 3533 sym_for_each_insn_continue_reverse(file, func, i) { 3534 if (i->save) { 3535 save_insn = i; 3536 break; 3537 } 3538 } 3539 3540 if (!save_insn) { 3541 WARN_FUNC("no corresponding CFI save for CFI restore", 3542 sec, insn->offset); 3543 return 1; 3544 } 3545 3546 if (!save_insn->visited) { 3547 WARN_FUNC("objtool isn't smart enough to handle this CFI save/restore combo", 3548 sec, insn->offset); 3549 return 1; 3550 } 3551 3552 insn->cfi = save_insn->cfi; 3553 nr_cfi_reused++; 3554 } 3555 3556 state.cfi = *insn->cfi; 3557 } else { 3558 /* XXX track if we actually changed state.cfi */ 3559 3560 if (prev_insn && !cficmp(prev_insn->cfi, &state.cfi)) { 3561 insn->cfi = prev_insn->cfi; 3562 nr_cfi_reused++; 3563 } else { 3564 insn->cfi = cfi_hash_find_or_add(&state.cfi); 3565 } 3566 } 3567 3568 insn->visited |= visited; 3569 3570 if (propagate_alt_cfi(file, insn)) 3571 return 1; 3572 3573 if (!insn->ignore_alts && !list_empty(&insn->alts)) { 3574 bool skip_orig = false; 3575 3576 list_for_each_entry(alt, &insn->alts, list) { 3577 if (alt->skip_orig) 3578 skip_orig = true; 3579 3580 ret = validate_branch(file, func, alt->insn, state); 3581 if (ret) { 3582 if (opts.backtrace) 3583 BT_FUNC("(alt)", insn); 3584 return ret; 3585 } 3586 } 3587 3588 if (skip_orig) 3589 return 0; 3590 } 3591 3592 if (handle_insn_ops(insn, next_insn, &state)) 3593 return 1; 3594 3595 switch (insn->type) { 3596 3597 case INSN_RETURN: 3598 return validate_return(func, insn, &state); 3599 3600 case INSN_CALL: 3601 case INSN_CALL_DYNAMIC: 3602 ret = validate_call(file, insn, &state); 3603 if (ret) 3604 return ret; 3605 3606 if (opts.stackval && func && !is_fentry_call(insn) && 3607 !has_valid_stack_frame(&state)) { 3608 WARN_FUNC("call without frame pointer save/setup", 3609 sec, insn->offset); 3610 return 1; 3611 } 3612 3613 if (insn->dead_end) 3614 return 0; 3615 3616 break; 3617 3618 case INSN_JUMP_CONDITIONAL: 3619 case INSN_JUMP_UNCONDITIONAL: 3620 if (is_sibling_call(insn)) { 3621 ret = validate_sibling_call(file, insn, &state); 3622 if (ret) 3623 return ret; 3624 3625 } else if (insn->jump_dest) { 3626 ret = validate_branch(file, func, 3627 insn->jump_dest, state); 3628 if (ret) { 3629 if (opts.backtrace) 3630 BT_FUNC("(branch)", insn); 3631 return ret; 3632 } 3633 } 3634 3635 if (insn->type == INSN_JUMP_UNCONDITIONAL) 3636 return 0; 3637 3638 break; 3639 3640 case INSN_JUMP_DYNAMIC: 3641 case INSN_JUMP_DYNAMIC_CONDITIONAL: 3642 if (is_sibling_call(insn)) { 3643 ret = validate_sibling_call(file, insn, &state); 3644 if (ret) 3645 return ret; 3646 } 3647 3648 if (insn->type == INSN_JUMP_DYNAMIC) 3649 return 0; 3650 3651 break; 3652 3653 case INSN_CONTEXT_SWITCH: 3654 if (func && (!next_insn || !next_insn->hint)) { 3655 WARN_FUNC("unsupported instruction in callable function", 3656 sec, insn->offset); 3657 return 1; 3658 } 3659 return 0; 3660 3661 case INSN_STAC: 3662 if (state.uaccess) { 3663 WARN_FUNC("recursive UACCESS enable", sec, insn->offset); 3664 return 1; 3665 } 3666 3667 state.uaccess = true; 3668 break; 3669 3670 case INSN_CLAC: 3671 if (!state.uaccess && func) { 3672 WARN_FUNC("redundant UACCESS disable", sec, insn->offset); 3673 return 1; 3674 } 3675 3676 if (func_uaccess_safe(func) && !state.uaccess_stack) { 3677 WARN_FUNC("UACCESS-safe disables UACCESS", sec, insn->offset); 3678 return 1; 3679 } 3680 3681 state.uaccess = false; 3682 break; 3683 3684 case INSN_STD: 3685 if (state.df) { 3686 WARN_FUNC("recursive STD", sec, insn->offset); 3687 return 1; 3688 } 3689 3690 state.df = true; 3691 break; 3692 3693 case INSN_CLD: 3694 if (!state.df && func) { 3695 WARN_FUNC("redundant CLD", sec, insn->offset); 3696 return 1; 3697 } 3698 3699 state.df = false; 3700 break; 3701 3702 default: 3703 break; 3704 } 3705 3706 if (insn->dead_end) 3707 return 0; 3708 3709 if (!next_insn) { 3710 if (state.cfi.cfa.base == CFI_UNDEFINED) 3711 return 0; 3712 WARN("%s: unexpected end of section", sec->name); 3713 return 1; 3714 } 3715 3716 prev_insn = insn; 3717 insn = next_insn; 3718 } 3719 3720 return 0; 3721 } 3722 3723 static int validate_unwind_hints(struct objtool_file *file, struct section *sec) 3724 { 3725 struct instruction *insn; 3726 struct insn_state state; 3727 int ret, warnings = 0; 3728 3729 if (!file->hints) 3730 return 0; 3731 3732 init_insn_state(file, &state, sec); 3733 3734 if (sec) { 3735 insn = find_insn(file, sec, 0); 3736 if (!insn) 3737 return 0; 3738 } else { 3739 insn = list_first_entry(&file->insn_list, typeof(*insn), list); 3740 } 3741 3742 while (&insn->list != &file->insn_list && (!sec || insn->sec == sec)) { 3743 if (insn->hint && !insn->visited && !insn->ignore) { 3744 ret = validate_branch(file, insn_func(insn), insn, state); 3745 if (ret && opts.backtrace) 3746 BT_FUNC("<=== (hint)", insn); 3747 warnings += ret; 3748 } 3749 3750 insn = list_next_entry(insn, list); 3751 } 3752 3753 return warnings; 3754 } 3755 3756 /* 3757 * Validate rethunk entry constraint: must untrain RET before the first RET. 3758 * 3759 * Follow every branch (intra-function) and ensure ANNOTATE_UNRET_END comes 3760 * before an actual RET instruction. 3761 */ 3762 static int validate_entry(struct objtool_file *file, struct instruction *insn) 3763 { 3764 struct instruction *next, *dest; 3765 int ret, warnings = 0; 3766 3767 for (;;) { 3768 next = next_insn_to_validate(file, insn); 3769 3770 if (insn->visited & VISITED_ENTRY) 3771 return 0; 3772 3773 insn->visited |= VISITED_ENTRY; 3774 3775 if (!insn->ignore_alts && !list_empty(&insn->alts)) { 3776 struct alternative *alt; 3777 bool skip_orig = false; 3778 3779 list_for_each_entry(alt, &insn->alts, list) { 3780 if (alt->skip_orig) 3781 skip_orig = true; 3782 3783 ret = validate_entry(file, alt->insn); 3784 if (ret) { 3785 if (opts.backtrace) 3786 BT_FUNC("(alt)", insn); 3787 return ret; 3788 } 3789 } 3790 3791 if (skip_orig) 3792 return 0; 3793 } 3794 3795 switch (insn->type) { 3796 3797 case INSN_CALL_DYNAMIC: 3798 case INSN_JUMP_DYNAMIC: 3799 case INSN_JUMP_DYNAMIC_CONDITIONAL: 3800 WARN_FUNC("early indirect call", insn->sec, insn->offset); 3801 return 1; 3802 3803 case INSN_JUMP_UNCONDITIONAL: 3804 case INSN_JUMP_CONDITIONAL: 3805 if (!is_sibling_call(insn)) { 3806 if (!insn->jump_dest) { 3807 WARN_FUNC("unresolved jump target after linking?!?", 3808 insn->sec, insn->offset); 3809 return -1; 3810 } 3811 ret = validate_entry(file, insn->jump_dest); 3812 if (ret) { 3813 if (opts.backtrace) { 3814 BT_FUNC("(branch%s)", insn, 3815 insn->type == INSN_JUMP_CONDITIONAL ? "-cond" : ""); 3816 } 3817 return ret; 3818 } 3819 3820 if (insn->type == INSN_JUMP_UNCONDITIONAL) 3821 return 0; 3822 3823 break; 3824 } 3825 3826 /* fallthrough */ 3827 case INSN_CALL: 3828 dest = find_insn(file, insn->call_dest->sec, 3829 insn->call_dest->offset); 3830 if (!dest) { 3831 WARN("Unresolved function after linking!?: %s", 3832 insn->call_dest->name); 3833 return -1; 3834 } 3835 3836 ret = validate_entry(file, dest); 3837 if (ret) { 3838 if (opts.backtrace) 3839 BT_FUNC("(call)", insn); 3840 return ret; 3841 } 3842 /* 3843 * If a call returns without error, it must have seen UNTRAIN_RET. 3844 * Therefore any non-error return is a success. 3845 */ 3846 return 0; 3847 3848 case INSN_RETURN: 3849 WARN_FUNC("RET before UNTRAIN", insn->sec, insn->offset); 3850 return 1; 3851 3852 case INSN_NOP: 3853 if (insn->retpoline_safe) 3854 return 0; 3855 break; 3856 3857 default: 3858 break; 3859 } 3860 3861 if (!next) { 3862 WARN_FUNC("teh end!", insn->sec, insn->offset); 3863 return -1; 3864 } 3865 insn = next; 3866 } 3867 3868 return warnings; 3869 } 3870 3871 /* 3872 * Validate that all branches starting at 'insn->entry' encounter UNRET_END 3873 * before RET. 3874 */ 3875 static int validate_unret(struct objtool_file *file) 3876 { 3877 struct instruction *insn; 3878 int ret, warnings = 0; 3879 3880 for_each_insn(file, insn) { 3881 if (!insn->entry) 3882 continue; 3883 3884 ret = validate_entry(file, insn); 3885 if (ret < 0) { 3886 WARN_FUNC("Failed UNRET validation", insn->sec, insn->offset); 3887 return ret; 3888 } 3889 warnings += ret; 3890 } 3891 3892 return warnings; 3893 } 3894 3895 static int validate_retpoline(struct objtool_file *file) 3896 { 3897 struct instruction *insn; 3898 int warnings = 0; 3899 3900 for_each_insn(file, insn) { 3901 if (insn->type != INSN_JUMP_DYNAMIC && 3902 insn->type != INSN_CALL_DYNAMIC && 3903 insn->type != INSN_RETURN) 3904 continue; 3905 3906 if (insn->retpoline_safe) 3907 continue; 3908 3909 if (insn->sec->init) 3910 continue; 3911 3912 if (insn->type == INSN_RETURN) { 3913 if (opts.rethunk) { 3914 WARN_FUNC("'naked' return found in RETHUNK build", 3915 insn->sec, insn->offset); 3916 } else 3917 continue; 3918 } else { 3919 WARN_FUNC("indirect %s found in RETPOLINE build", 3920 insn->sec, insn->offset, 3921 insn->type == INSN_JUMP_DYNAMIC ? "jump" : "call"); 3922 } 3923 3924 warnings++; 3925 } 3926 3927 return warnings; 3928 } 3929 3930 static bool is_kasan_insn(struct instruction *insn) 3931 { 3932 return (insn->type == INSN_CALL && 3933 !strcmp(insn->call_dest->name, "__asan_handle_no_return")); 3934 } 3935 3936 static bool is_ubsan_insn(struct instruction *insn) 3937 { 3938 return (insn->type == INSN_CALL && 3939 !strcmp(insn->call_dest->name, 3940 "__ubsan_handle_builtin_unreachable")); 3941 } 3942 3943 static bool ignore_unreachable_insn(struct objtool_file *file, struct instruction *insn) 3944 { 3945 int i; 3946 struct instruction *prev_insn; 3947 3948 if (insn->ignore || insn->type == INSN_NOP || insn->type == INSN_TRAP) 3949 return true; 3950 3951 /* 3952 * Ignore alternative replacement instructions. This can happen 3953 * when a whitelisted function uses one of the ALTERNATIVE macros. 3954 */ 3955 if (!strcmp(insn->sec->name, ".altinstr_replacement") || 3956 !strcmp(insn->sec->name, ".altinstr_aux")) 3957 return true; 3958 3959 /* 3960 * Whole archive runs might encounter dead code from weak symbols. 3961 * This is where the linker will have dropped the weak symbol in 3962 * favour of a regular symbol, but leaves the code in place. 3963 * 3964 * In this case we'll find a piece of code (whole function) that is not 3965 * covered by a !section symbol. Ignore them. 3966 */ 3967 if (opts.link && !insn_func(insn)) { 3968 int size = find_symbol_hole_containing(insn->sec, insn->offset); 3969 unsigned long end = insn->offset + size; 3970 3971 if (!size) /* not a hole */ 3972 return false; 3973 3974 if (size < 0) /* hole until the end */ 3975 return true; 3976 3977 sec_for_each_insn_continue(file, insn) { 3978 /* 3979 * If we reach a visited instruction at or before the 3980 * end of the hole, ignore the unreachable. 3981 */ 3982 if (insn->visited) 3983 return true; 3984 3985 if (insn->offset >= end) 3986 break; 3987 3988 /* 3989 * If this hole jumps to a .cold function, mark it ignore too. 3990 */ 3991 if (insn->jump_dest && insn_func(insn->jump_dest) && 3992 strstr(insn_func(insn->jump_dest)->name, ".cold")) { 3993 struct instruction *dest = insn->jump_dest; 3994 func_for_each_insn(file, insn_func(dest), dest) 3995 dest->ignore = true; 3996 } 3997 } 3998 3999 return false; 4000 } 4001 4002 if (!insn_func(insn)) 4003 return false; 4004 4005 if (insn_func(insn)->static_call_tramp) 4006 return true; 4007 4008 /* 4009 * CONFIG_UBSAN_TRAP inserts a UD2 when it sees 4010 * __builtin_unreachable(). The BUG() macro has an unreachable() after 4011 * the UD2, which causes GCC's undefined trap logic to emit another UD2 4012 * (or occasionally a JMP to UD2). 4013 * 4014 * It may also insert a UD2 after calling a __noreturn function. 4015 */ 4016 prev_insn = list_prev_entry(insn, list); 4017 if ((prev_insn->dead_end || dead_end_function(file, prev_insn->call_dest)) && 4018 (insn->type == INSN_BUG || 4019 (insn->type == INSN_JUMP_UNCONDITIONAL && 4020 insn->jump_dest && insn->jump_dest->type == INSN_BUG))) 4021 return true; 4022 4023 /* 4024 * Check if this (or a subsequent) instruction is related to 4025 * CONFIG_UBSAN or CONFIG_KASAN. 4026 * 4027 * End the search at 5 instructions to avoid going into the weeds. 4028 */ 4029 for (i = 0; i < 5; i++) { 4030 4031 if (is_kasan_insn(insn) || is_ubsan_insn(insn)) 4032 return true; 4033 4034 if (insn->type == INSN_JUMP_UNCONDITIONAL) { 4035 if (insn->jump_dest && 4036 insn_func(insn->jump_dest) == insn_func(insn)) { 4037 insn = insn->jump_dest; 4038 continue; 4039 } 4040 4041 break; 4042 } 4043 4044 if (insn->offset + insn->len >= insn_func(insn)->offset + insn_func(insn)->len) 4045 break; 4046 4047 insn = list_next_entry(insn, list); 4048 } 4049 4050 return false; 4051 } 4052 4053 static int add_prefix_symbol(struct objtool_file *file, struct symbol *func, 4054 struct instruction *insn) 4055 { 4056 if (!opts.prefix) 4057 return 0; 4058 4059 for (;;) { 4060 struct instruction *prev = list_prev_entry(insn, list); 4061 u64 offset; 4062 4063 if (&prev->list == &file->insn_list) 4064 break; 4065 4066 if (prev->type != INSN_NOP) 4067 break; 4068 4069 offset = func->offset - prev->offset; 4070 if (offset >= opts.prefix) { 4071 if (offset == opts.prefix) { 4072 /* 4073 * Since the sec->symbol_list is ordered by 4074 * offset (see elf_add_symbol()) the added 4075 * symbol will not be seen by the iteration in 4076 * validate_section(). 4077 * 4078 * Hence the lack of list_for_each_entry_safe() 4079 * there. 4080 * 4081 * The direct concequence is that prefix symbols 4082 * don't get visited (because pointless), except 4083 * for the logic in ignore_unreachable_insn() 4084 * that needs the terminating insn to be visited 4085 * otherwise it will report the hole. 4086 * 4087 * Hence mark the first instruction of the 4088 * prefix symbol as visisted. 4089 */ 4090 prev->visited |= VISITED_BRANCH; 4091 elf_create_prefix_symbol(file->elf, func, opts.prefix); 4092 } 4093 break; 4094 } 4095 insn = prev; 4096 } 4097 4098 return 0; 4099 } 4100 4101 static int validate_symbol(struct objtool_file *file, struct section *sec, 4102 struct symbol *sym, struct insn_state *state) 4103 { 4104 struct instruction *insn; 4105 int ret; 4106 4107 if (!sym->len) { 4108 WARN("%s() is missing an ELF size annotation", sym->name); 4109 return 1; 4110 } 4111 4112 if (sym->pfunc != sym || sym->alias != sym) 4113 return 0; 4114 4115 insn = find_insn(file, sec, sym->offset); 4116 if (!insn || insn->ignore || insn->visited) 4117 return 0; 4118 4119 add_prefix_symbol(file, sym, insn); 4120 4121 state->uaccess = sym->uaccess_safe; 4122 4123 ret = validate_branch(file, insn_func(insn), insn, *state); 4124 if (ret && opts.backtrace) 4125 BT_FUNC("<=== (sym)", insn); 4126 return ret; 4127 } 4128 4129 static int validate_section(struct objtool_file *file, struct section *sec) 4130 { 4131 struct insn_state state; 4132 struct symbol *func; 4133 int warnings = 0; 4134 4135 list_for_each_entry(func, &sec->symbol_list, list) { 4136 if (func->type != STT_FUNC) 4137 continue; 4138 4139 init_insn_state(file, &state, sec); 4140 set_func_state(&state.cfi); 4141 4142 warnings += validate_symbol(file, sec, func, &state); 4143 } 4144 4145 return warnings; 4146 } 4147 4148 static int validate_noinstr_sections(struct objtool_file *file) 4149 { 4150 struct section *sec; 4151 int warnings = 0; 4152 4153 sec = find_section_by_name(file->elf, ".noinstr.text"); 4154 if (sec) { 4155 warnings += validate_section(file, sec); 4156 warnings += validate_unwind_hints(file, sec); 4157 } 4158 4159 sec = find_section_by_name(file->elf, ".entry.text"); 4160 if (sec) { 4161 warnings += validate_section(file, sec); 4162 warnings += validate_unwind_hints(file, sec); 4163 } 4164 4165 return warnings; 4166 } 4167 4168 static int validate_functions(struct objtool_file *file) 4169 { 4170 struct section *sec; 4171 int warnings = 0; 4172 4173 for_each_sec(file, sec) { 4174 if (!(sec->sh.sh_flags & SHF_EXECINSTR)) 4175 continue; 4176 4177 warnings += validate_section(file, sec); 4178 } 4179 4180 return warnings; 4181 } 4182 4183 static void mark_endbr_used(struct instruction *insn) 4184 { 4185 if (!list_empty(&insn->call_node)) 4186 list_del_init(&insn->call_node); 4187 } 4188 4189 static bool noendbr_range(struct objtool_file *file, struct instruction *insn) 4190 { 4191 struct symbol *sym = find_symbol_containing(insn->sec, insn->offset-1); 4192 struct instruction *first; 4193 4194 if (!sym) 4195 return false; 4196 4197 first = find_insn(file, sym->sec, sym->offset); 4198 if (!first) 4199 return false; 4200 4201 if (first->type != INSN_ENDBR && !first->noendbr) 4202 return false; 4203 4204 return insn->offset == sym->offset + sym->len; 4205 } 4206 4207 static int validate_ibt_insn(struct objtool_file *file, struct instruction *insn) 4208 { 4209 struct instruction *dest; 4210 struct reloc *reloc; 4211 unsigned long off; 4212 int warnings = 0; 4213 4214 /* 4215 * Looking for function pointer load relocations. Ignore 4216 * direct/indirect branches: 4217 */ 4218 switch (insn->type) { 4219 case INSN_CALL: 4220 case INSN_CALL_DYNAMIC: 4221 case INSN_JUMP_CONDITIONAL: 4222 case INSN_JUMP_UNCONDITIONAL: 4223 case INSN_JUMP_DYNAMIC: 4224 case INSN_JUMP_DYNAMIC_CONDITIONAL: 4225 case INSN_RETURN: 4226 case INSN_NOP: 4227 return 0; 4228 default: 4229 break; 4230 } 4231 4232 for (reloc = insn_reloc(file, insn); 4233 reloc; 4234 reloc = find_reloc_by_dest_range(file->elf, insn->sec, 4235 reloc->offset + 1, 4236 (insn->offset + insn->len) - (reloc->offset + 1))) { 4237 4238 /* 4239 * static_call_update() references the trampoline, which 4240 * doesn't have (or need) ENDBR. Skip warning in that case. 4241 */ 4242 if (reloc->sym->static_call_tramp) 4243 continue; 4244 4245 off = reloc->sym->offset; 4246 if (reloc->type == R_X86_64_PC32 || reloc->type == R_X86_64_PLT32) 4247 off += arch_dest_reloc_offset(reloc->addend); 4248 else 4249 off += reloc->addend; 4250 4251 dest = find_insn(file, reloc->sym->sec, off); 4252 if (!dest) 4253 continue; 4254 4255 if (dest->type == INSN_ENDBR) { 4256 mark_endbr_used(dest); 4257 continue; 4258 } 4259 4260 if (insn_func(dest) && insn_func(dest) == insn_func(insn)) { 4261 /* 4262 * Anything from->to self is either _THIS_IP_ or 4263 * IRET-to-self. 4264 * 4265 * There is no sane way to annotate _THIS_IP_ since the 4266 * compiler treats the relocation as a constant and is 4267 * happy to fold in offsets, skewing any annotation we 4268 * do, leading to vast amounts of false-positives. 4269 * 4270 * There's also compiler generated _THIS_IP_ through 4271 * KCOV and such which we have no hope of annotating. 4272 * 4273 * As such, blanket accept self-references without 4274 * issue. 4275 */ 4276 continue; 4277 } 4278 4279 /* 4280 * Accept anything ANNOTATE_NOENDBR. 4281 */ 4282 if (dest->noendbr) 4283 continue; 4284 4285 /* 4286 * Accept if this is the instruction after a symbol 4287 * that is (no)endbr -- typical code-range usage. 4288 */ 4289 if (noendbr_range(file, dest)) 4290 continue; 4291 4292 WARN_FUNC("relocation to !ENDBR: %s", 4293 insn->sec, insn->offset, 4294 offstr(dest->sec, dest->offset)); 4295 4296 warnings++; 4297 } 4298 4299 return warnings; 4300 } 4301 4302 static int validate_ibt_data_reloc(struct objtool_file *file, 4303 struct reloc *reloc) 4304 { 4305 struct instruction *dest; 4306 4307 dest = find_insn(file, reloc->sym->sec, 4308 reloc->sym->offset + reloc->addend); 4309 if (!dest) 4310 return 0; 4311 4312 if (dest->type == INSN_ENDBR) { 4313 mark_endbr_used(dest); 4314 return 0; 4315 } 4316 4317 if (dest->noendbr) 4318 return 0; 4319 4320 WARN_FUNC("data relocation to !ENDBR: %s", 4321 reloc->sec->base, reloc->offset, 4322 offstr(dest->sec, dest->offset)); 4323 4324 return 1; 4325 } 4326 4327 /* 4328 * Validate IBT rules and remove used ENDBR instructions from the seal list. 4329 * Unused ENDBR instructions will be annotated for sealing (i.e., replaced with 4330 * NOPs) later, in create_ibt_endbr_seal_sections(). 4331 */ 4332 static int validate_ibt(struct objtool_file *file) 4333 { 4334 struct section *sec; 4335 struct reloc *reloc; 4336 struct instruction *insn; 4337 int warnings = 0; 4338 4339 for_each_insn(file, insn) 4340 warnings += validate_ibt_insn(file, insn); 4341 4342 for_each_sec(file, sec) { 4343 4344 /* Already done by validate_ibt_insn() */ 4345 if (sec->sh.sh_flags & SHF_EXECINSTR) 4346 continue; 4347 4348 if (!sec->reloc) 4349 continue; 4350 4351 /* 4352 * These sections can reference text addresses, but not with 4353 * the intent to indirect branch to them. 4354 */ 4355 if ((!strncmp(sec->name, ".discard", 8) && 4356 strcmp(sec->name, ".discard.ibt_endbr_noseal")) || 4357 !strncmp(sec->name, ".debug", 6) || 4358 !strcmp(sec->name, ".altinstructions") || 4359 !strcmp(sec->name, ".ibt_endbr_seal") || 4360 !strcmp(sec->name, ".orc_unwind_ip") || 4361 !strcmp(sec->name, ".parainstructions") || 4362 !strcmp(sec->name, ".retpoline_sites") || 4363 !strcmp(sec->name, ".smp_locks") || 4364 !strcmp(sec->name, ".static_call_sites") || 4365 !strcmp(sec->name, "_error_injection_whitelist") || 4366 !strcmp(sec->name, "_kprobe_blacklist") || 4367 !strcmp(sec->name, "__bug_table") || 4368 !strcmp(sec->name, "__ex_table") || 4369 !strcmp(sec->name, "__jump_table") || 4370 !strcmp(sec->name, "__mcount_loc") || 4371 !strcmp(sec->name, ".kcfi_traps") || 4372 strstr(sec->name, "__patchable_function_entries")) 4373 continue; 4374 4375 list_for_each_entry(reloc, &sec->reloc->reloc_list, list) 4376 warnings += validate_ibt_data_reloc(file, reloc); 4377 } 4378 4379 return warnings; 4380 } 4381 4382 static int validate_sls(struct objtool_file *file) 4383 { 4384 struct instruction *insn, *next_insn; 4385 int warnings = 0; 4386 4387 for_each_insn(file, insn) { 4388 next_insn = next_insn_same_sec(file, insn); 4389 4390 if (insn->retpoline_safe) 4391 continue; 4392 4393 switch (insn->type) { 4394 case INSN_RETURN: 4395 if (!next_insn || next_insn->type != INSN_TRAP) { 4396 WARN_FUNC("missing int3 after ret", 4397 insn->sec, insn->offset); 4398 warnings++; 4399 } 4400 4401 break; 4402 case INSN_JUMP_DYNAMIC: 4403 if (!next_insn || next_insn->type != INSN_TRAP) { 4404 WARN_FUNC("missing int3 after indirect jump", 4405 insn->sec, insn->offset); 4406 warnings++; 4407 } 4408 break; 4409 default: 4410 break; 4411 } 4412 } 4413 4414 return warnings; 4415 } 4416 4417 static int validate_reachable_instructions(struct objtool_file *file) 4418 { 4419 struct instruction *insn; 4420 4421 if (file->ignore_unreachables) 4422 return 0; 4423 4424 for_each_insn(file, insn) { 4425 if (insn->visited || ignore_unreachable_insn(file, insn)) 4426 continue; 4427 4428 WARN_FUNC("unreachable instruction", insn->sec, insn->offset); 4429 return 1; 4430 } 4431 4432 return 0; 4433 } 4434 4435 int check(struct objtool_file *file) 4436 { 4437 int ret, warnings = 0; 4438 4439 arch_initial_func_cfi_state(&initial_func_cfi); 4440 init_cfi_state(&init_cfi); 4441 init_cfi_state(&func_cfi); 4442 set_func_state(&func_cfi); 4443 4444 if (!cfi_hash_alloc(1UL << (file->elf->symbol_bits - 3))) 4445 goto out; 4446 4447 cfi_hash_add(&init_cfi); 4448 cfi_hash_add(&func_cfi); 4449 4450 ret = decode_sections(file); 4451 if (ret < 0) 4452 goto out; 4453 4454 warnings += ret; 4455 4456 if (list_empty(&file->insn_list)) 4457 goto out; 4458 4459 if (opts.retpoline) { 4460 ret = validate_retpoline(file); 4461 if (ret < 0) 4462 return ret; 4463 warnings += ret; 4464 } 4465 4466 if (opts.stackval || opts.orc || opts.uaccess) { 4467 ret = validate_functions(file); 4468 if (ret < 0) 4469 goto out; 4470 warnings += ret; 4471 4472 ret = validate_unwind_hints(file, NULL); 4473 if (ret < 0) 4474 goto out; 4475 warnings += ret; 4476 4477 if (!warnings) { 4478 ret = validate_reachable_instructions(file); 4479 if (ret < 0) 4480 goto out; 4481 warnings += ret; 4482 } 4483 4484 } else if (opts.noinstr) { 4485 ret = validate_noinstr_sections(file); 4486 if (ret < 0) 4487 goto out; 4488 warnings += ret; 4489 } 4490 4491 if (opts.unret) { 4492 /* 4493 * Must be after validate_branch() and friends, it plays 4494 * further games with insn->visited. 4495 */ 4496 ret = validate_unret(file); 4497 if (ret < 0) 4498 return ret; 4499 warnings += ret; 4500 } 4501 4502 if (opts.ibt) { 4503 ret = validate_ibt(file); 4504 if (ret < 0) 4505 goto out; 4506 warnings += ret; 4507 } 4508 4509 if (opts.sls) { 4510 ret = validate_sls(file); 4511 if (ret < 0) 4512 goto out; 4513 warnings += ret; 4514 } 4515 4516 if (opts.static_call) { 4517 ret = create_static_call_sections(file); 4518 if (ret < 0) 4519 goto out; 4520 warnings += ret; 4521 } 4522 4523 if (opts.retpoline) { 4524 ret = create_retpoline_sites_sections(file); 4525 if (ret < 0) 4526 goto out; 4527 warnings += ret; 4528 } 4529 4530 if (opts.cfi) { 4531 ret = create_cfi_sections(file); 4532 if (ret < 0) 4533 goto out; 4534 warnings += ret; 4535 } 4536 4537 if (opts.rethunk) { 4538 ret = create_return_sites_sections(file); 4539 if (ret < 0) 4540 goto out; 4541 warnings += ret; 4542 4543 if (opts.hack_skylake) { 4544 ret = create_direct_call_sections(file); 4545 if (ret < 0) 4546 goto out; 4547 warnings += ret; 4548 } 4549 } 4550 4551 if (opts.mcount) { 4552 ret = create_mcount_loc_sections(file); 4553 if (ret < 0) 4554 goto out; 4555 warnings += ret; 4556 } 4557 4558 if (opts.ibt) { 4559 ret = create_ibt_endbr_seal_sections(file); 4560 if (ret < 0) 4561 goto out; 4562 warnings += ret; 4563 } 4564 4565 if (opts.orc && !list_empty(&file->insn_list)) { 4566 ret = orc_create(file); 4567 if (ret < 0) 4568 goto out; 4569 warnings += ret; 4570 } 4571 4572 4573 if (opts.stats) { 4574 printf("nr_insns_visited: %ld\n", nr_insns_visited); 4575 printf("nr_cfi: %ld\n", nr_cfi); 4576 printf("nr_cfi_reused: %ld\n", nr_cfi_reused); 4577 printf("nr_cfi_cache: %ld\n", nr_cfi_cache); 4578 } 4579 4580 out: 4581 /* 4582 * For now, don't fail the kernel build on fatal warnings. These 4583 * errors are still fairly common due to the growing matrix of 4584 * supported toolchains and their recent pace of change. 4585 */ 4586 return 0; 4587 } 4588