1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * Copyright (C) 2015-2017 Josh Poimboeuf <jpoimboe@redhat.com> 4 */ 5 6 #include <string.h> 7 #include <stdlib.h> 8 9 #include <arch/elf.h> 10 #include <objtool/builtin.h> 11 #include <objtool/cfi.h> 12 #include <objtool/arch.h> 13 #include <objtool/check.h> 14 #include <objtool/special.h> 15 #include <objtool/warn.h> 16 #include <objtool/endianness.h> 17 18 #include <linux/objtool.h> 19 #include <linux/hashtable.h> 20 #include <linux/kernel.h> 21 #include <linux/static_call_types.h> 22 23 struct alternative { 24 struct list_head list; 25 struct instruction *insn; 26 bool skip_orig; 27 }; 28 29 struct cfi_init_state initial_func_cfi; 30 31 struct instruction *find_insn(struct objtool_file *file, 32 struct section *sec, unsigned long offset) 33 { 34 struct instruction *insn; 35 36 hash_for_each_possible(file->insn_hash, insn, hash, sec_offset_hash(sec, offset)) { 37 if (insn->sec == sec && insn->offset == offset) 38 return insn; 39 } 40 41 return NULL; 42 } 43 44 static struct instruction *next_insn_same_sec(struct objtool_file *file, 45 struct instruction *insn) 46 { 47 struct instruction *next = list_next_entry(insn, list); 48 49 if (!next || &next->list == &file->insn_list || next->sec != insn->sec) 50 return NULL; 51 52 return next; 53 } 54 55 static struct instruction *next_insn_same_func(struct objtool_file *file, 56 struct instruction *insn) 57 { 58 struct instruction *next = list_next_entry(insn, list); 59 struct symbol *func = insn->func; 60 61 if (!func) 62 return NULL; 63 64 if (&next->list != &file->insn_list && next->func == func) 65 return next; 66 67 /* Check if we're already in the subfunction: */ 68 if (func == func->cfunc) 69 return NULL; 70 71 /* Move to the subfunction: */ 72 return find_insn(file, func->cfunc->sec, func->cfunc->offset); 73 } 74 75 static struct instruction *prev_insn_same_sym(struct objtool_file *file, 76 struct instruction *insn) 77 { 78 struct instruction *prev = list_prev_entry(insn, list); 79 80 if (&prev->list != &file->insn_list && prev->func == insn->func) 81 return prev; 82 83 return NULL; 84 } 85 86 #define func_for_each_insn(file, func, insn) \ 87 for (insn = find_insn(file, func->sec, func->offset); \ 88 insn; \ 89 insn = next_insn_same_func(file, insn)) 90 91 #define sym_for_each_insn(file, sym, insn) \ 92 for (insn = find_insn(file, sym->sec, sym->offset); \ 93 insn && &insn->list != &file->insn_list && \ 94 insn->sec == sym->sec && \ 95 insn->offset < sym->offset + sym->len; \ 96 insn = list_next_entry(insn, list)) 97 98 #define sym_for_each_insn_continue_reverse(file, sym, insn) \ 99 for (insn = list_prev_entry(insn, list); \ 100 &insn->list != &file->insn_list && \ 101 insn->sec == sym->sec && insn->offset >= sym->offset; \ 102 insn = list_prev_entry(insn, list)) 103 104 #define sec_for_each_insn_from(file, insn) \ 105 for (; insn; insn = next_insn_same_sec(file, insn)) 106 107 #define sec_for_each_insn_continue(file, insn) \ 108 for (insn = next_insn_same_sec(file, insn); insn; \ 109 insn = next_insn_same_sec(file, insn)) 110 111 static bool is_sibling_call(struct instruction *insn) 112 { 113 /* 114 * Assume only ELF functions can make sibling calls. This ensures 115 * sibling call detection consistency between vmlinux.o and individual 116 * objects. 117 */ 118 if (!insn->func) 119 return false; 120 121 /* An indirect jump is either a sibling call or a jump to a table. */ 122 if (insn->type == INSN_JUMP_DYNAMIC) 123 return list_empty(&insn->alts); 124 125 /* add_jump_destinations() sets insn->call_dest for sibling calls. */ 126 return (is_static_jump(insn) && insn->call_dest); 127 } 128 129 /* 130 * This checks to see if the given function is a "noreturn" function. 131 * 132 * For global functions which are outside the scope of this object file, we 133 * have to keep a manual list of them. 134 * 135 * For local functions, we have to detect them manually by simply looking for 136 * the lack of a return instruction. 137 */ 138 static bool __dead_end_function(struct objtool_file *file, struct symbol *func, 139 int recursion) 140 { 141 int i; 142 struct instruction *insn; 143 bool empty = true; 144 145 /* 146 * Unfortunately these have to be hard coded because the noreturn 147 * attribute isn't provided in ELF data. 148 */ 149 static const char * const global_noreturns[] = { 150 "__stack_chk_fail", 151 "panic", 152 "do_exit", 153 "do_task_dead", 154 "__module_put_and_exit", 155 "complete_and_exit", 156 "__reiserfs_panic", 157 "lbug_with_loc", 158 "fortify_panic", 159 "usercopy_abort", 160 "machine_real_restart", 161 "rewind_stack_do_exit", 162 "kunit_try_catch_throw", 163 "xen_start_kernel", 164 }; 165 166 if (!func) 167 return false; 168 169 if (func->bind == STB_WEAK) 170 return false; 171 172 if (func->bind == STB_GLOBAL) 173 for (i = 0; i < ARRAY_SIZE(global_noreturns); i++) 174 if (!strcmp(func->name, global_noreturns[i])) 175 return true; 176 177 if (!func->len) 178 return false; 179 180 insn = find_insn(file, func->sec, func->offset); 181 if (!insn->func) 182 return false; 183 184 func_for_each_insn(file, func, insn) { 185 empty = false; 186 187 if (insn->type == INSN_RETURN) 188 return false; 189 } 190 191 if (empty) 192 return false; 193 194 /* 195 * A function can have a sibling call instead of a return. In that 196 * case, the function's dead-end status depends on whether the target 197 * of the sibling call returns. 198 */ 199 func_for_each_insn(file, func, insn) { 200 if (is_sibling_call(insn)) { 201 struct instruction *dest = insn->jump_dest; 202 203 if (!dest) 204 /* sibling call to another file */ 205 return false; 206 207 /* local sibling call */ 208 if (recursion == 5) { 209 /* 210 * Infinite recursion: two functions have 211 * sibling calls to each other. This is a very 212 * rare case. It means they aren't dead ends. 213 */ 214 return false; 215 } 216 217 return __dead_end_function(file, dest->func, recursion+1); 218 } 219 } 220 221 return true; 222 } 223 224 static bool dead_end_function(struct objtool_file *file, struct symbol *func) 225 { 226 return __dead_end_function(file, func, 0); 227 } 228 229 static void init_cfi_state(struct cfi_state *cfi) 230 { 231 int i; 232 233 for (i = 0; i < CFI_NUM_REGS; i++) { 234 cfi->regs[i].base = CFI_UNDEFINED; 235 cfi->vals[i].base = CFI_UNDEFINED; 236 } 237 cfi->cfa.base = CFI_UNDEFINED; 238 cfi->drap_reg = CFI_UNDEFINED; 239 cfi->drap_offset = -1; 240 } 241 242 static void init_insn_state(struct insn_state *state, struct section *sec) 243 { 244 memset(state, 0, sizeof(*state)); 245 init_cfi_state(&state->cfi); 246 247 /* 248 * We need the full vmlinux for noinstr validation, otherwise we can 249 * not correctly determine insn->call_dest->sec (external symbols do 250 * not have a section). 251 */ 252 if (vmlinux && noinstr && sec) 253 state->noinstr = sec->noinstr; 254 } 255 256 /* 257 * Call the arch-specific instruction decoder for all the instructions and add 258 * them to the global instruction list. 259 */ 260 static int decode_instructions(struct objtool_file *file) 261 { 262 struct section *sec; 263 struct symbol *func; 264 unsigned long offset; 265 struct instruction *insn; 266 unsigned long nr_insns = 0; 267 int ret; 268 269 for_each_sec(file, sec) { 270 271 if (!(sec->sh.sh_flags & SHF_EXECINSTR)) 272 continue; 273 274 if (strcmp(sec->name, ".altinstr_replacement") && 275 strcmp(sec->name, ".altinstr_aux") && 276 strncmp(sec->name, ".discard.", 9)) 277 sec->text = true; 278 279 if (!strcmp(sec->name, ".noinstr.text") || 280 !strcmp(sec->name, ".entry.text")) 281 sec->noinstr = true; 282 283 for (offset = 0; offset < sec->len; offset += insn->len) { 284 insn = malloc(sizeof(*insn)); 285 if (!insn) { 286 WARN("malloc failed"); 287 return -1; 288 } 289 memset(insn, 0, sizeof(*insn)); 290 INIT_LIST_HEAD(&insn->alts); 291 INIT_LIST_HEAD(&insn->stack_ops); 292 init_cfi_state(&insn->cfi); 293 294 insn->sec = sec; 295 insn->offset = offset; 296 297 ret = arch_decode_instruction(file->elf, sec, offset, 298 sec->len - offset, 299 &insn->len, &insn->type, 300 &insn->immediate, 301 &insn->stack_ops); 302 if (ret) 303 goto err; 304 305 hash_add(file->insn_hash, &insn->hash, sec_offset_hash(sec, insn->offset)); 306 list_add_tail(&insn->list, &file->insn_list); 307 nr_insns++; 308 } 309 310 list_for_each_entry(func, &sec->symbol_list, list) { 311 if (func->type != STT_FUNC || func->alias != func) 312 continue; 313 314 if (!find_insn(file, sec, func->offset)) { 315 WARN("%s(): can't find starting instruction", 316 func->name); 317 return -1; 318 } 319 320 sym_for_each_insn(file, func, insn) 321 insn->func = func; 322 } 323 } 324 325 if (stats) 326 printf("nr_insns: %lu\n", nr_insns); 327 328 return 0; 329 330 err: 331 free(insn); 332 return ret; 333 } 334 335 static struct instruction *find_last_insn(struct objtool_file *file, 336 struct section *sec) 337 { 338 struct instruction *insn = NULL; 339 unsigned int offset; 340 unsigned int end = (sec->len > 10) ? sec->len - 10 : 0; 341 342 for (offset = sec->len - 1; offset >= end && !insn; offset--) 343 insn = find_insn(file, sec, offset); 344 345 return insn; 346 } 347 348 /* 349 * Mark "ud2" instructions and manually annotated dead ends. 350 */ 351 static int add_dead_ends(struct objtool_file *file) 352 { 353 struct section *sec; 354 struct reloc *reloc; 355 struct instruction *insn; 356 357 /* 358 * By default, "ud2" is a dead end unless otherwise annotated, because 359 * GCC 7 inserts it for certain divide-by-zero cases. 360 */ 361 for_each_insn(file, insn) 362 if (insn->type == INSN_BUG) 363 insn->dead_end = true; 364 365 /* 366 * Check for manually annotated dead ends. 367 */ 368 sec = find_section_by_name(file->elf, ".rela.discard.unreachable"); 369 if (!sec) 370 goto reachable; 371 372 list_for_each_entry(reloc, &sec->reloc_list, list) { 373 if (reloc->sym->type != STT_SECTION) { 374 WARN("unexpected relocation symbol type in %s", sec->name); 375 return -1; 376 } 377 insn = find_insn(file, reloc->sym->sec, reloc->addend); 378 if (insn) 379 insn = list_prev_entry(insn, list); 380 else if (reloc->addend == reloc->sym->sec->len) { 381 insn = find_last_insn(file, reloc->sym->sec); 382 if (!insn) { 383 WARN("can't find unreachable insn at %s+0x%x", 384 reloc->sym->sec->name, reloc->addend); 385 return -1; 386 } 387 } else { 388 WARN("can't find unreachable insn at %s+0x%x", 389 reloc->sym->sec->name, reloc->addend); 390 return -1; 391 } 392 393 insn->dead_end = true; 394 } 395 396 reachable: 397 /* 398 * These manually annotated reachable checks are needed for GCC 4.4, 399 * where the Linux unreachable() macro isn't supported. In that case 400 * GCC doesn't know the "ud2" is fatal, so it generates code as if it's 401 * not a dead end. 402 */ 403 sec = find_section_by_name(file->elf, ".rela.discard.reachable"); 404 if (!sec) 405 return 0; 406 407 list_for_each_entry(reloc, &sec->reloc_list, list) { 408 if (reloc->sym->type != STT_SECTION) { 409 WARN("unexpected relocation symbol type in %s", sec->name); 410 return -1; 411 } 412 insn = find_insn(file, reloc->sym->sec, reloc->addend); 413 if (insn) 414 insn = list_prev_entry(insn, list); 415 else if (reloc->addend == reloc->sym->sec->len) { 416 insn = find_last_insn(file, reloc->sym->sec); 417 if (!insn) { 418 WARN("can't find reachable insn at %s+0x%x", 419 reloc->sym->sec->name, reloc->addend); 420 return -1; 421 } 422 } else { 423 WARN("can't find reachable insn at %s+0x%x", 424 reloc->sym->sec->name, reloc->addend); 425 return -1; 426 } 427 428 insn->dead_end = false; 429 } 430 431 return 0; 432 } 433 434 static int create_static_call_sections(struct objtool_file *file) 435 { 436 struct section *sec; 437 struct static_call_site *site; 438 struct instruction *insn; 439 struct symbol *key_sym; 440 char *key_name, *tmp; 441 int idx; 442 443 sec = find_section_by_name(file->elf, ".static_call_sites"); 444 if (sec) { 445 INIT_LIST_HEAD(&file->static_call_list); 446 WARN("file already has .static_call_sites section, skipping"); 447 return 0; 448 } 449 450 if (list_empty(&file->static_call_list)) 451 return 0; 452 453 idx = 0; 454 list_for_each_entry(insn, &file->static_call_list, call_node) 455 idx++; 456 457 sec = elf_create_section(file->elf, ".static_call_sites", SHF_WRITE, 458 sizeof(struct static_call_site), idx); 459 if (!sec) 460 return -1; 461 462 idx = 0; 463 list_for_each_entry(insn, &file->static_call_list, call_node) { 464 465 site = (struct static_call_site *)sec->data->d_buf + idx; 466 memset(site, 0, sizeof(struct static_call_site)); 467 468 /* populate reloc for 'addr' */ 469 if (elf_add_reloc_to_insn(file->elf, sec, 470 idx * sizeof(struct static_call_site), 471 R_X86_64_PC32, 472 insn->sec, insn->offset)) 473 return -1; 474 475 /* find key symbol */ 476 key_name = strdup(insn->call_dest->name); 477 if (!key_name) { 478 perror("strdup"); 479 return -1; 480 } 481 if (strncmp(key_name, STATIC_CALL_TRAMP_PREFIX_STR, 482 STATIC_CALL_TRAMP_PREFIX_LEN)) { 483 WARN("static_call: trampoline name malformed: %s", key_name); 484 return -1; 485 } 486 tmp = key_name + STATIC_CALL_TRAMP_PREFIX_LEN - STATIC_CALL_KEY_PREFIX_LEN; 487 memcpy(tmp, STATIC_CALL_KEY_PREFIX_STR, STATIC_CALL_KEY_PREFIX_LEN); 488 489 key_sym = find_symbol_by_name(file->elf, tmp); 490 if (!key_sym) { 491 if (!module) { 492 WARN("static_call: can't find static_call_key symbol: %s", tmp); 493 return -1; 494 } 495 496 /* 497 * For modules(), the key might not be exported, which 498 * means the module can make static calls but isn't 499 * allowed to change them. 500 * 501 * In that case we temporarily set the key to be the 502 * trampoline address. This is fixed up in 503 * static_call_add_module(). 504 */ 505 key_sym = insn->call_dest; 506 } 507 free(key_name); 508 509 /* populate reloc for 'key' */ 510 if (elf_add_reloc(file->elf, sec, 511 idx * sizeof(struct static_call_site) + 4, 512 R_X86_64_PC32, key_sym, 513 is_sibling_call(insn) * STATIC_CALL_SITE_TAIL)) 514 return -1; 515 516 idx++; 517 } 518 519 return 0; 520 } 521 522 static int create_mcount_loc_sections(struct objtool_file *file) 523 { 524 struct section *sec; 525 unsigned long *loc; 526 struct instruction *insn; 527 int idx; 528 529 sec = find_section_by_name(file->elf, "__mcount_loc"); 530 if (sec) { 531 INIT_LIST_HEAD(&file->mcount_loc_list); 532 WARN("file already has __mcount_loc section, skipping"); 533 return 0; 534 } 535 536 if (list_empty(&file->mcount_loc_list)) 537 return 0; 538 539 idx = 0; 540 list_for_each_entry(insn, &file->mcount_loc_list, mcount_loc_node) 541 idx++; 542 543 sec = elf_create_section(file->elf, "__mcount_loc", 0, sizeof(unsigned long), idx); 544 if (!sec) 545 return -1; 546 547 idx = 0; 548 list_for_each_entry(insn, &file->mcount_loc_list, mcount_loc_node) { 549 550 loc = (unsigned long *)sec->data->d_buf + idx; 551 memset(loc, 0, sizeof(unsigned long)); 552 553 if (elf_add_reloc_to_insn(file->elf, sec, 554 idx * sizeof(unsigned long), 555 R_X86_64_64, 556 insn->sec, insn->offset)) 557 return -1; 558 559 idx++; 560 } 561 562 return 0; 563 } 564 565 /* 566 * Warnings shouldn't be reported for ignored functions. 567 */ 568 static void add_ignores(struct objtool_file *file) 569 { 570 struct instruction *insn; 571 struct section *sec; 572 struct symbol *func; 573 struct reloc *reloc; 574 575 sec = find_section_by_name(file->elf, ".rela.discard.func_stack_frame_non_standard"); 576 if (!sec) 577 return; 578 579 list_for_each_entry(reloc, &sec->reloc_list, list) { 580 switch (reloc->sym->type) { 581 case STT_FUNC: 582 func = reloc->sym; 583 break; 584 585 case STT_SECTION: 586 func = find_func_by_offset(reloc->sym->sec, reloc->addend); 587 if (!func) 588 continue; 589 break; 590 591 default: 592 WARN("unexpected relocation symbol type in %s: %d", sec->name, reloc->sym->type); 593 continue; 594 } 595 596 func_for_each_insn(file, func, insn) 597 insn->ignore = true; 598 } 599 } 600 601 /* 602 * This is a whitelist of functions that is allowed to be called with AC set. 603 * The list is meant to be minimal and only contains compiler instrumentation 604 * ABI and a few functions used to implement *_{to,from}_user() functions. 605 * 606 * These functions must not directly change AC, but may PUSHF/POPF. 607 */ 608 static const char *uaccess_safe_builtin[] = { 609 /* KASAN */ 610 "kasan_report", 611 "kasan_check_range", 612 /* KASAN out-of-line */ 613 "__asan_loadN_noabort", 614 "__asan_load1_noabort", 615 "__asan_load2_noabort", 616 "__asan_load4_noabort", 617 "__asan_load8_noabort", 618 "__asan_load16_noabort", 619 "__asan_storeN_noabort", 620 "__asan_store1_noabort", 621 "__asan_store2_noabort", 622 "__asan_store4_noabort", 623 "__asan_store8_noabort", 624 "__asan_store16_noabort", 625 "__kasan_check_read", 626 "__kasan_check_write", 627 /* KASAN in-line */ 628 "__asan_report_load_n_noabort", 629 "__asan_report_load1_noabort", 630 "__asan_report_load2_noabort", 631 "__asan_report_load4_noabort", 632 "__asan_report_load8_noabort", 633 "__asan_report_load16_noabort", 634 "__asan_report_store_n_noabort", 635 "__asan_report_store1_noabort", 636 "__asan_report_store2_noabort", 637 "__asan_report_store4_noabort", 638 "__asan_report_store8_noabort", 639 "__asan_report_store16_noabort", 640 /* KCSAN */ 641 "__kcsan_check_access", 642 "kcsan_found_watchpoint", 643 "kcsan_setup_watchpoint", 644 "kcsan_check_scoped_accesses", 645 "kcsan_disable_current", 646 "kcsan_enable_current_nowarn", 647 /* KCSAN/TSAN */ 648 "__tsan_func_entry", 649 "__tsan_func_exit", 650 "__tsan_read_range", 651 "__tsan_write_range", 652 "__tsan_read1", 653 "__tsan_read2", 654 "__tsan_read4", 655 "__tsan_read8", 656 "__tsan_read16", 657 "__tsan_write1", 658 "__tsan_write2", 659 "__tsan_write4", 660 "__tsan_write8", 661 "__tsan_write16", 662 "__tsan_read_write1", 663 "__tsan_read_write2", 664 "__tsan_read_write4", 665 "__tsan_read_write8", 666 "__tsan_read_write16", 667 "__tsan_atomic8_load", 668 "__tsan_atomic16_load", 669 "__tsan_atomic32_load", 670 "__tsan_atomic64_load", 671 "__tsan_atomic8_store", 672 "__tsan_atomic16_store", 673 "__tsan_atomic32_store", 674 "__tsan_atomic64_store", 675 "__tsan_atomic8_exchange", 676 "__tsan_atomic16_exchange", 677 "__tsan_atomic32_exchange", 678 "__tsan_atomic64_exchange", 679 "__tsan_atomic8_fetch_add", 680 "__tsan_atomic16_fetch_add", 681 "__tsan_atomic32_fetch_add", 682 "__tsan_atomic64_fetch_add", 683 "__tsan_atomic8_fetch_sub", 684 "__tsan_atomic16_fetch_sub", 685 "__tsan_atomic32_fetch_sub", 686 "__tsan_atomic64_fetch_sub", 687 "__tsan_atomic8_fetch_and", 688 "__tsan_atomic16_fetch_and", 689 "__tsan_atomic32_fetch_and", 690 "__tsan_atomic64_fetch_and", 691 "__tsan_atomic8_fetch_or", 692 "__tsan_atomic16_fetch_or", 693 "__tsan_atomic32_fetch_or", 694 "__tsan_atomic64_fetch_or", 695 "__tsan_atomic8_fetch_xor", 696 "__tsan_atomic16_fetch_xor", 697 "__tsan_atomic32_fetch_xor", 698 "__tsan_atomic64_fetch_xor", 699 "__tsan_atomic8_fetch_nand", 700 "__tsan_atomic16_fetch_nand", 701 "__tsan_atomic32_fetch_nand", 702 "__tsan_atomic64_fetch_nand", 703 "__tsan_atomic8_compare_exchange_strong", 704 "__tsan_atomic16_compare_exchange_strong", 705 "__tsan_atomic32_compare_exchange_strong", 706 "__tsan_atomic64_compare_exchange_strong", 707 "__tsan_atomic8_compare_exchange_weak", 708 "__tsan_atomic16_compare_exchange_weak", 709 "__tsan_atomic32_compare_exchange_weak", 710 "__tsan_atomic64_compare_exchange_weak", 711 "__tsan_atomic8_compare_exchange_val", 712 "__tsan_atomic16_compare_exchange_val", 713 "__tsan_atomic32_compare_exchange_val", 714 "__tsan_atomic64_compare_exchange_val", 715 "__tsan_atomic_thread_fence", 716 "__tsan_atomic_signal_fence", 717 /* KCOV */ 718 "write_comp_data", 719 "check_kcov_mode", 720 "__sanitizer_cov_trace_pc", 721 "__sanitizer_cov_trace_const_cmp1", 722 "__sanitizer_cov_trace_const_cmp2", 723 "__sanitizer_cov_trace_const_cmp4", 724 "__sanitizer_cov_trace_const_cmp8", 725 "__sanitizer_cov_trace_cmp1", 726 "__sanitizer_cov_trace_cmp2", 727 "__sanitizer_cov_trace_cmp4", 728 "__sanitizer_cov_trace_cmp8", 729 "__sanitizer_cov_trace_switch", 730 /* UBSAN */ 731 "ubsan_type_mismatch_common", 732 "__ubsan_handle_type_mismatch", 733 "__ubsan_handle_type_mismatch_v1", 734 "__ubsan_handle_shift_out_of_bounds", 735 /* misc */ 736 "csum_partial_copy_generic", 737 "copy_mc_fragile", 738 "copy_mc_fragile_handle_tail", 739 "copy_mc_enhanced_fast_string", 740 "ftrace_likely_update", /* CONFIG_TRACE_BRANCH_PROFILING */ 741 NULL 742 }; 743 744 static void add_uaccess_safe(struct objtool_file *file) 745 { 746 struct symbol *func; 747 const char **name; 748 749 if (!uaccess) 750 return; 751 752 for (name = uaccess_safe_builtin; *name; name++) { 753 func = find_symbol_by_name(file->elf, *name); 754 if (!func) 755 continue; 756 757 func->uaccess_safe = true; 758 } 759 } 760 761 /* 762 * FIXME: For now, just ignore any alternatives which add retpolines. This is 763 * a temporary hack, as it doesn't allow ORC to unwind from inside a retpoline. 764 * But it at least allows objtool to understand the control flow *around* the 765 * retpoline. 766 */ 767 static int add_ignore_alternatives(struct objtool_file *file) 768 { 769 struct section *sec; 770 struct reloc *reloc; 771 struct instruction *insn; 772 773 sec = find_section_by_name(file->elf, ".rela.discard.ignore_alts"); 774 if (!sec) 775 return 0; 776 777 list_for_each_entry(reloc, &sec->reloc_list, list) { 778 if (reloc->sym->type != STT_SECTION) { 779 WARN("unexpected relocation symbol type in %s", sec->name); 780 return -1; 781 } 782 783 insn = find_insn(file, reloc->sym->sec, reloc->addend); 784 if (!insn) { 785 WARN("bad .discard.ignore_alts entry"); 786 return -1; 787 } 788 789 insn->ignore_alts = true; 790 } 791 792 return 0; 793 } 794 795 __weak bool arch_is_retpoline(struct symbol *sym) 796 { 797 return false; 798 } 799 800 #define NEGATIVE_RELOC ((void *)-1L) 801 802 static struct reloc *insn_reloc(struct objtool_file *file, struct instruction *insn) 803 { 804 if (insn->reloc == NEGATIVE_RELOC) 805 return NULL; 806 807 if (!insn->reloc) { 808 insn->reloc = find_reloc_by_dest_range(file->elf, insn->sec, 809 insn->offset, insn->len); 810 if (!insn->reloc) { 811 insn->reloc = NEGATIVE_RELOC; 812 return NULL; 813 } 814 } 815 816 return insn->reloc; 817 } 818 819 /* 820 * Find the destination instructions for all jumps. 821 */ 822 static int add_jump_destinations(struct objtool_file *file) 823 { 824 struct instruction *insn; 825 struct reloc *reloc; 826 struct section *dest_sec; 827 unsigned long dest_off; 828 829 for_each_insn(file, insn) { 830 if (!is_static_jump(insn)) 831 continue; 832 833 reloc = insn_reloc(file, insn); 834 if (!reloc) { 835 dest_sec = insn->sec; 836 dest_off = arch_jump_destination(insn); 837 } else if (reloc->sym->type == STT_SECTION) { 838 dest_sec = reloc->sym->sec; 839 dest_off = arch_dest_reloc_offset(reloc->addend); 840 } else if (arch_is_retpoline(reloc->sym)) { 841 /* 842 * Retpoline jumps are really dynamic jumps in 843 * disguise, so convert them accordingly. 844 */ 845 if (insn->type == INSN_JUMP_UNCONDITIONAL) 846 insn->type = INSN_JUMP_DYNAMIC; 847 else 848 insn->type = INSN_JUMP_DYNAMIC_CONDITIONAL; 849 850 list_add_tail(&insn->call_node, 851 &file->retpoline_call_list); 852 853 insn->retpoline_safe = true; 854 continue; 855 } else if (insn->func) { 856 /* internal or external sibling call (with reloc) */ 857 insn->call_dest = reloc->sym; 858 if (insn->call_dest->static_call_tramp) { 859 list_add_tail(&insn->call_node, 860 &file->static_call_list); 861 } 862 continue; 863 } else if (reloc->sym->sec->idx) { 864 dest_sec = reloc->sym->sec; 865 dest_off = reloc->sym->sym.st_value + 866 arch_dest_reloc_offset(reloc->addend); 867 } else { 868 /* non-func asm code jumping to another file */ 869 continue; 870 } 871 872 insn->jump_dest = find_insn(file, dest_sec, dest_off); 873 if (!insn->jump_dest) { 874 875 /* 876 * This is a special case where an alt instruction 877 * jumps past the end of the section. These are 878 * handled later in handle_group_alt(). 879 */ 880 if (!strcmp(insn->sec->name, ".altinstr_replacement")) 881 continue; 882 883 WARN_FUNC("can't find jump dest instruction at %s+0x%lx", 884 insn->sec, insn->offset, dest_sec->name, 885 dest_off); 886 return -1; 887 } 888 889 /* 890 * Cross-function jump. 891 */ 892 if (insn->func && insn->jump_dest->func && 893 insn->func != insn->jump_dest->func) { 894 895 /* 896 * For GCC 8+, create parent/child links for any cold 897 * subfunctions. This is _mostly_ redundant with a 898 * similar initialization in read_symbols(). 899 * 900 * If a function has aliases, we want the *first* such 901 * function in the symbol table to be the subfunction's 902 * parent. In that case we overwrite the 903 * initialization done in read_symbols(). 904 * 905 * However this code can't completely replace the 906 * read_symbols() code because this doesn't detect the 907 * case where the parent function's only reference to a 908 * subfunction is through a jump table. 909 */ 910 if (!strstr(insn->func->name, ".cold") && 911 strstr(insn->jump_dest->func->name, ".cold")) { 912 insn->func->cfunc = insn->jump_dest->func; 913 insn->jump_dest->func->pfunc = insn->func; 914 915 } else if (insn->jump_dest->func->pfunc != insn->func->pfunc && 916 insn->jump_dest->offset == insn->jump_dest->func->offset) { 917 918 /* internal sibling call (without reloc) */ 919 insn->call_dest = insn->jump_dest->func; 920 if (insn->call_dest->static_call_tramp) { 921 list_add_tail(&insn->call_node, 922 &file->static_call_list); 923 } 924 } 925 } 926 } 927 928 return 0; 929 } 930 931 static void remove_insn_ops(struct instruction *insn) 932 { 933 struct stack_op *op, *tmp; 934 935 list_for_each_entry_safe(op, tmp, &insn->stack_ops, list) { 936 list_del(&op->list); 937 free(op); 938 } 939 } 940 941 static struct symbol *find_call_destination(struct section *sec, unsigned long offset) 942 { 943 struct symbol *call_dest; 944 945 call_dest = find_func_by_offset(sec, offset); 946 if (!call_dest) 947 call_dest = find_symbol_by_offset(sec, offset); 948 949 return call_dest; 950 } 951 952 /* 953 * Find the destination instructions for all calls. 954 */ 955 static int add_call_destinations(struct objtool_file *file) 956 { 957 struct instruction *insn; 958 unsigned long dest_off; 959 struct reloc *reloc; 960 961 for_each_insn(file, insn) { 962 if (insn->type != INSN_CALL) 963 continue; 964 965 reloc = insn_reloc(file, insn); 966 if (!reloc) { 967 dest_off = arch_jump_destination(insn); 968 insn->call_dest = find_call_destination(insn->sec, dest_off); 969 970 if (insn->ignore) 971 continue; 972 973 if (!insn->call_dest) { 974 WARN_FUNC("unannotated intra-function call", insn->sec, insn->offset); 975 return -1; 976 } 977 978 if (insn->func && insn->call_dest->type != STT_FUNC) { 979 WARN_FUNC("unsupported call to non-function", 980 insn->sec, insn->offset); 981 return -1; 982 } 983 984 } else if (reloc->sym->type == STT_SECTION) { 985 dest_off = arch_dest_reloc_offset(reloc->addend); 986 insn->call_dest = find_call_destination(reloc->sym->sec, 987 dest_off); 988 if (!insn->call_dest) { 989 WARN_FUNC("can't find call dest symbol at %s+0x%lx", 990 insn->sec, insn->offset, 991 reloc->sym->sec->name, 992 dest_off); 993 return -1; 994 } 995 996 } else if (arch_is_retpoline(reloc->sym)) { 997 /* 998 * Retpoline calls are really dynamic calls in 999 * disguise, so convert them accordingly. 1000 */ 1001 insn->type = INSN_CALL_DYNAMIC; 1002 insn->retpoline_safe = true; 1003 1004 list_add_tail(&insn->call_node, 1005 &file->retpoline_call_list); 1006 1007 remove_insn_ops(insn); 1008 continue; 1009 1010 } else 1011 insn->call_dest = reloc->sym; 1012 1013 if (insn->call_dest && insn->call_dest->static_call_tramp) { 1014 list_add_tail(&insn->call_node, 1015 &file->static_call_list); 1016 } 1017 1018 /* 1019 * Many compilers cannot disable KCOV with a function attribute 1020 * so they need a little help, NOP out any KCOV calls from noinstr 1021 * text. 1022 */ 1023 if (insn->sec->noinstr && 1024 !strncmp(insn->call_dest->name, "__sanitizer_cov_", 16)) { 1025 if (reloc) { 1026 reloc->type = R_NONE; 1027 elf_write_reloc(file->elf, reloc); 1028 } 1029 1030 elf_write_insn(file->elf, insn->sec, 1031 insn->offset, insn->len, 1032 arch_nop_insn(insn->len)); 1033 insn->type = INSN_NOP; 1034 } 1035 1036 if (mcount && !strcmp(insn->call_dest->name, "__fentry__")) { 1037 if (reloc) { 1038 reloc->type = R_NONE; 1039 elf_write_reloc(file->elf, reloc); 1040 } 1041 1042 elf_write_insn(file->elf, insn->sec, 1043 insn->offset, insn->len, 1044 arch_nop_insn(insn->len)); 1045 1046 insn->type = INSN_NOP; 1047 1048 list_add_tail(&insn->mcount_loc_node, 1049 &file->mcount_loc_list); 1050 } 1051 1052 /* 1053 * Whatever stack impact regular CALLs have, should be undone 1054 * by the RETURN of the called function. 1055 * 1056 * Annotated intra-function calls retain the stack_ops but 1057 * are converted to JUMP, see read_intra_function_calls(). 1058 */ 1059 remove_insn_ops(insn); 1060 } 1061 1062 return 0; 1063 } 1064 1065 /* 1066 * The .alternatives section requires some extra special care over and above 1067 * other special sections because alternatives are patched in place. 1068 */ 1069 static int handle_group_alt(struct objtool_file *file, 1070 struct special_alt *special_alt, 1071 struct instruction *orig_insn, 1072 struct instruction **new_insn) 1073 { 1074 struct instruction *last_orig_insn, *last_new_insn = NULL, *insn, *nop = NULL; 1075 struct alt_group *orig_alt_group, *new_alt_group; 1076 unsigned long dest_off; 1077 1078 1079 orig_alt_group = malloc(sizeof(*orig_alt_group)); 1080 if (!orig_alt_group) { 1081 WARN("malloc failed"); 1082 return -1; 1083 } 1084 orig_alt_group->cfi = calloc(special_alt->orig_len, 1085 sizeof(struct cfi_state *)); 1086 if (!orig_alt_group->cfi) { 1087 WARN("calloc failed"); 1088 return -1; 1089 } 1090 1091 last_orig_insn = NULL; 1092 insn = orig_insn; 1093 sec_for_each_insn_from(file, insn) { 1094 if (insn->offset >= special_alt->orig_off + special_alt->orig_len) 1095 break; 1096 1097 insn->alt_group = orig_alt_group; 1098 last_orig_insn = insn; 1099 } 1100 orig_alt_group->orig_group = NULL; 1101 orig_alt_group->first_insn = orig_insn; 1102 orig_alt_group->last_insn = last_orig_insn; 1103 1104 1105 new_alt_group = malloc(sizeof(*new_alt_group)); 1106 if (!new_alt_group) { 1107 WARN("malloc failed"); 1108 return -1; 1109 } 1110 1111 if (special_alt->new_len < special_alt->orig_len) { 1112 /* 1113 * Insert a fake nop at the end to make the replacement 1114 * alt_group the same size as the original. This is needed to 1115 * allow propagate_alt_cfi() to do its magic. When the last 1116 * instruction affects the stack, the instruction after it (the 1117 * nop) will propagate the new state to the shared CFI array. 1118 */ 1119 nop = malloc(sizeof(*nop)); 1120 if (!nop) { 1121 WARN("malloc failed"); 1122 return -1; 1123 } 1124 memset(nop, 0, sizeof(*nop)); 1125 INIT_LIST_HEAD(&nop->alts); 1126 INIT_LIST_HEAD(&nop->stack_ops); 1127 init_cfi_state(&nop->cfi); 1128 1129 nop->sec = special_alt->new_sec; 1130 nop->offset = special_alt->new_off + special_alt->new_len; 1131 nop->len = special_alt->orig_len - special_alt->new_len; 1132 nop->type = INSN_NOP; 1133 nop->func = orig_insn->func; 1134 nop->alt_group = new_alt_group; 1135 nop->ignore = orig_insn->ignore_alts; 1136 } 1137 1138 if (!special_alt->new_len) { 1139 *new_insn = nop; 1140 goto end; 1141 } 1142 1143 insn = *new_insn; 1144 sec_for_each_insn_from(file, insn) { 1145 struct reloc *alt_reloc; 1146 1147 if (insn->offset >= special_alt->new_off + special_alt->new_len) 1148 break; 1149 1150 last_new_insn = insn; 1151 1152 insn->ignore = orig_insn->ignore_alts; 1153 insn->func = orig_insn->func; 1154 insn->alt_group = new_alt_group; 1155 1156 /* 1157 * Since alternative replacement code is copy/pasted by the 1158 * kernel after applying relocations, generally such code can't 1159 * have relative-address relocation references to outside the 1160 * .altinstr_replacement section, unless the arch's 1161 * alternatives code can adjust the relative offsets 1162 * accordingly. 1163 */ 1164 alt_reloc = insn_reloc(file, insn); 1165 if (alt_reloc && 1166 !arch_support_alt_relocation(special_alt, insn, alt_reloc)) { 1167 1168 WARN_FUNC("unsupported relocation in alternatives section", 1169 insn->sec, insn->offset); 1170 return -1; 1171 } 1172 1173 if (!is_static_jump(insn)) 1174 continue; 1175 1176 if (!insn->immediate) 1177 continue; 1178 1179 dest_off = arch_jump_destination(insn); 1180 if (dest_off == special_alt->new_off + special_alt->new_len) 1181 insn->jump_dest = next_insn_same_sec(file, last_orig_insn); 1182 1183 if (!insn->jump_dest) { 1184 WARN_FUNC("can't find alternative jump destination", 1185 insn->sec, insn->offset); 1186 return -1; 1187 } 1188 } 1189 1190 if (!last_new_insn) { 1191 WARN_FUNC("can't find last new alternative instruction", 1192 special_alt->new_sec, special_alt->new_off); 1193 return -1; 1194 } 1195 1196 if (nop) 1197 list_add(&nop->list, &last_new_insn->list); 1198 end: 1199 new_alt_group->orig_group = orig_alt_group; 1200 new_alt_group->first_insn = *new_insn; 1201 new_alt_group->last_insn = nop ? : last_new_insn; 1202 new_alt_group->cfi = orig_alt_group->cfi; 1203 return 0; 1204 } 1205 1206 /* 1207 * A jump table entry can either convert a nop to a jump or a jump to a nop. 1208 * If the original instruction is a jump, make the alt entry an effective nop 1209 * by just skipping the original instruction. 1210 */ 1211 static int handle_jump_alt(struct objtool_file *file, 1212 struct special_alt *special_alt, 1213 struct instruction *orig_insn, 1214 struct instruction **new_insn) 1215 { 1216 if (orig_insn->type == INSN_NOP) 1217 return 0; 1218 1219 if (orig_insn->type != INSN_JUMP_UNCONDITIONAL) { 1220 WARN_FUNC("unsupported instruction at jump label", 1221 orig_insn->sec, orig_insn->offset); 1222 return -1; 1223 } 1224 1225 *new_insn = list_next_entry(orig_insn, list); 1226 return 0; 1227 } 1228 1229 /* 1230 * Read all the special sections which have alternate instructions which can be 1231 * patched in or redirected to at runtime. Each instruction having alternate 1232 * instruction(s) has them added to its insn->alts list, which will be 1233 * traversed in validate_branch(). 1234 */ 1235 static int add_special_section_alts(struct objtool_file *file) 1236 { 1237 struct list_head special_alts; 1238 struct instruction *orig_insn, *new_insn; 1239 struct special_alt *special_alt, *tmp; 1240 struct alternative *alt; 1241 int ret; 1242 1243 ret = special_get_alts(file->elf, &special_alts); 1244 if (ret) 1245 return ret; 1246 1247 list_for_each_entry_safe(special_alt, tmp, &special_alts, list) { 1248 1249 orig_insn = find_insn(file, special_alt->orig_sec, 1250 special_alt->orig_off); 1251 if (!orig_insn) { 1252 WARN_FUNC("special: can't find orig instruction", 1253 special_alt->orig_sec, special_alt->orig_off); 1254 ret = -1; 1255 goto out; 1256 } 1257 1258 new_insn = NULL; 1259 if (!special_alt->group || special_alt->new_len) { 1260 new_insn = find_insn(file, special_alt->new_sec, 1261 special_alt->new_off); 1262 if (!new_insn) { 1263 WARN_FUNC("special: can't find new instruction", 1264 special_alt->new_sec, 1265 special_alt->new_off); 1266 ret = -1; 1267 goto out; 1268 } 1269 } 1270 1271 if (special_alt->group) { 1272 if (!special_alt->orig_len) { 1273 WARN_FUNC("empty alternative entry", 1274 orig_insn->sec, orig_insn->offset); 1275 continue; 1276 } 1277 1278 ret = handle_group_alt(file, special_alt, orig_insn, 1279 &new_insn); 1280 if (ret) 1281 goto out; 1282 } else if (special_alt->jump_or_nop) { 1283 ret = handle_jump_alt(file, special_alt, orig_insn, 1284 &new_insn); 1285 if (ret) 1286 goto out; 1287 } 1288 1289 alt = malloc(sizeof(*alt)); 1290 if (!alt) { 1291 WARN("malloc failed"); 1292 ret = -1; 1293 goto out; 1294 } 1295 1296 alt->insn = new_insn; 1297 alt->skip_orig = special_alt->skip_orig; 1298 orig_insn->ignore_alts |= special_alt->skip_alt; 1299 list_add_tail(&alt->list, &orig_insn->alts); 1300 1301 list_del(&special_alt->list); 1302 free(special_alt); 1303 } 1304 1305 out: 1306 return ret; 1307 } 1308 1309 static int add_jump_table(struct objtool_file *file, struct instruction *insn, 1310 struct reloc *table) 1311 { 1312 struct reloc *reloc = table; 1313 struct instruction *dest_insn; 1314 struct alternative *alt; 1315 struct symbol *pfunc = insn->func->pfunc; 1316 unsigned int prev_offset = 0; 1317 1318 /* 1319 * Each @reloc is a switch table relocation which points to the target 1320 * instruction. 1321 */ 1322 list_for_each_entry_from(reloc, &table->sec->reloc_list, list) { 1323 1324 /* Check for the end of the table: */ 1325 if (reloc != table && reloc->jump_table_start) 1326 break; 1327 1328 /* Make sure the table entries are consecutive: */ 1329 if (prev_offset && reloc->offset != prev_offset + 8) 1330 break; 1331 1332 /* Detect function pointers from contiguous objects: */ 1333 if (reloc->sym->sec == pfunc->sec && 1334 reloc->addend == pfunc->offset) 1335 break; 1336 1337 dest_insn = find_insn(file, reloc->sym->sec, reloc->addend); 1338 if (!dest_insn) 1339 break; 1340 1341 /* Make sure the destination is in the same function: */ 1342 if (!dest_insn->func || dest_insn->func->pfunc != pfunc) 1343 break; 1344 1345 alt = malloc(sizeof(*alt)); 1346 if (!alt) { 1347 WARN("malloc failed"); 1348 return -1; 1349 } 1350 1351 alt->insn = dest_insn; 1352 list_add_tail(&alt->list, &insn->alts); 1353 prev_offset = reloc->offset; 1354 } 1355 1356 if (!prev_offset) { 1357 WARN_FUNC("can't find switch jump table", 1358 insn->sec, insn->offset); 1359 return -1; 1360 } 1361 1362 return 0; 1363 } 1364 1365 /* 1366 * find_jump_table() - Given a dynamic jump, find the switch jump table 1367 * associated with it. 1368 */ 1369 static struct reloc *find_jump_table(struct objtool_file *file, 1370 struct symbol *func, 1371 struct instruction *insn) 1372 { 1373 struct reloc *table_reloc; 1374 struct instruction *dest_insn, *orig_insn = insn; 1375 1376 /* 1377 * Backward search using the @first_jump_src links, these help avoid 1378 * much of the 'in between' code. Which avoids us getting confused by 1379 * it. 1380 */ 1381 for (; 1382 insn && insn->func && insn->func->pfunc == func; 1383 insn = insn->first_jump_src ?: prev_insn_same_sym(file, insn)) { 1384 1385 if (insn != orig_insn && insn->type == INSN_JUMP_DYNAMIC) 1386 break; 1387 1388 /* allow small jumps within the range */ 1389 if (insn->type == INSN_JUMP_UNCONDITIONAL && 1390 insn->jump_dest && 1391 (insn->jump_dest->offset <= insn->offset || 1392 insn->jump_dest->offset > orig_insn->offset)) 1393 break; 1394 1395 table_reloc = arch_find_switch_table(file, insn); 1396 if (!table_reloc) 1397 continue; 1398 dest_insn = find_insn(file, table_reloc->sym->sec, table_reloc->addend); 1399 if (!dest_insn || !dest_insn->func || dest_insn->func->pfunc != func) 1400 continue; 1401 1402 return table_reloc; 1403 } 1404 1405 return NULL; 1406 } 1407 1408 /* 1409 * First pass: Mark the head of each jump table so that in the next pass, 1410 * we know when a given jump table ends and the next one starts. 1411 */ 1412 static void mark_func_jump_tables(struct objtool_file *file, 1413 struct symbol *func) 1414 { 1415 struct instruction *insn, *last = NULL; 1416 struct reloc *reloc; 1417 1418 func_for_each_insn(file, func, insn) { 1419 if (!last) 1420 last = insn; 1421 1422 /* 1423 * Store back-pointers for unconditional forward jumps such 1424 * that find_jump_table() can back-track using those and 1425 * avoid some potentially confusing code. 1426 */ 1427 if (insn->type == INSN_JUMP_UNCONDITIONAL && insn->jump_dest && 1428 insn->offset > last->offset && 1429 insn->jump_dest->offset > insn->offset && 1430 !insn->jump_dest->first_jump_src) { 1431 1432 insn->jump_dest->first_jump_src = insn; 1433 last = insn->jump_dest; 1434 } 1435 1436 if (insn->type != INSN_JUMP_DYNAMIC) 1437 continue; 1438 1439 reloc = find_jump_table(file, func, insn); 1440 if (reloc) { 1441 reloc->jump_table_start = true; 1442 insn->jump_table = reloc; 1443 } 1444 } 1445 } 1446 1447 static int add_func_jump_tables(struct objtool_file *file, 1448 struct symbol *func) 1449 { 1450 struct instruction *insn; 1451 int ret; 1452 1453 func_for_each_insn(file, func, insn) { 1454 if (!insn->jump_table) 1455 continue; 1456 1457 ret = add_jump_table(file, insn, insn->jump_table); 1458 if (ret) 1459 return ret; 1460 } 1461 1462 return 0; 1463 } 1464 1465 /* 1466 * For some switch statements, gcc generates a jump table in the .rodata 1467 * section which contains a list of addresses within the function to jump to. 1468 * This finds these jump tables and adds them to the insn->alts lists. 1469 */ 1470 static int add_jump_table_alts(struct objtool_file *file) 1471 { 1472 struct section *sec; 1473 struct symbol *func; 1474 int ret; 1475 1476 if (!file->rodata) 1477 return 0; 1478 1479 for_each_sec(file, sec) { 1480 list_for_each_entry(func, &sec->symbol_list, list) { 1481 if (func->type != STT_FUNC) 1482 continue; 1483 1484 mark_func_jump_tables(file, func); 1485 ret = add_func_jump_tables(file, func); 1486 if (ret) 1487 return ret; 1488 } 1489 } 1490 1491 return 0; 1492 } 1493 1494 static void set_func_state(struct cfi_state *state) 1495 { 1496 state->cfa = initial_func_cfi.cfa; 1497 memcpy(&state->regs, &initial_func_cfi.regs, 1498 CFI_NUM_REGS * sizeof(struct cfi_reg)); 1499 state->stack_size = initial_func_cfi.cfa.offset; 1500 } 1501 1502 static int read_unwind_hints(struct objtool_file *file) 1503 { 1504 struct section *sec, *relocsec; 1505 struct reloc *reloc; 1506 struct unwind_hint *hint; 1507 struct instruction *insn; 1508 int i; 1509 1510 sec = find_section_by_name(file->elf, ".discard.unwind_hints"); 1511 if (!sec) 1512 return 0; 1513 1514 relocsec = sec->reloc; 1515 if (!relocsec) { 1516 WARN("missing .rela.discard.unwind_hints section"); 1517 return -1; 1518 } 1519 1520 if (sec->len % sizeof(struct unwind_hint)) { 1521 WARN("struct unwind_hint size mismatch"); 1522 return -1; 1523 } 1524 1525 file->hints = true; 1526 1527 for (i = 0; i < sec->len / sizeof(struct unwind_hint); i++) { 1528 hint = (struct unwind_hint *)sec->data->d_buf + i; 1529 1530 reloc = find_reloc_by_dest(file->elf, sec, i * sizeof(*hint)); 1531 if (!reloc) { 1532 WARN("can't find reloc for unwind_hints[%d]", i); 1533 return -1; 1534 } 1535 1536 insn = find_insn(file, reloc->sym->sec, reloc->addend); 1537 if (!insn) { 1538 WARN("can't find insn for unwind_hints[%d]", i); 1539 return -1; 1540 } 1541 1542 insn->hint = true; 1543 1544 if (hint->type == UNWIND_HINT_TYPE_FUNC) { 1545 set_func_state(&insn->cfi); 1546 continue; 1547 } 1548 1549 if (arch_decode_hint_reg(insn, hint->sp_reg)) { 1550 WARN_FUNC("unsupported unwind_hint sp base reg %d", 1551 insn->sec, insn->offset, hint->sp_reg); 1552 return -1; 1553 } 1554 1555 insn->cfi.cfa.offset = bswap_if_needed(hint->sp_offset); 1556 insn->cfi.type = hint->type; 1557 insn->cfi.end = hint->end; 1558 } 1559 1560 return 0; 1561 } 1562 1563 static int read_retpoline_hints(struct objtool_file *file) 1564 { 1565 struct section *sec; 1566 struct instruction *insn; 1567 struct reloc *reloc; 1568 1569 sec = find_section_by_name(file->elf, ".rela.discard.retpoline_safe"); 1570 if (!sec) 1571 return 0; 1572 1573 list_for_each_entry(reloc, &sec->reloc_list, list) { 1574 if (reloc->sym->type != STT_SECTION) { 1575 WARN("unexpected relocation symbol type in %s", sec->name); 1576 return -1; 1577 } 1578 1579 insn = find_insn(file, reloc->sym->sec, reloc->addend); 1580 if (!insn) { 1581 WARN("bad .discard.retpoline_safe entry"); 1582 return -1; 1583 } 1584 1585 if (insn->type != INSN_JUMP_DYNAMIC && 1586 insn->type != INSN_CALL_DYNAMIC) { 1587 WARN_FUNC("retpoline_safe hint not an indirect jump/call", 1588 insn->sec, insn->offset); 1589 return -1; 1590 } 1591 1592 insn->retpoline_safe = true; 1593 } 1594 1595 return 0; 1596 } 1597 1598 static int read_instr_hints(struct objtool_file *file) 1599 { 1600 struct section *sec; 1601 struct instruction *insn; 1602 struct reloc *reloc; 1603 1604 sec = find_section_by_name(file->elf, ".rela.discard.instr_end"); 1605 if (!sec) 1606 return 0; 1607 1608 list_for_each_entry(reloc, &sec->reloc_list, list) { 1609 if (reloc->sym->type != STT_SECTION) { 1610 WARN("unexpected relocation symbol type in %s", sec->name); 1611 return -1; 1612 } 1613 1614 insn = find_insn(file, reloc->sym->sec, reloc->addend); 1615 if (!insn) { 1616 WARN("bad .discard.instr_end entry"); 1617 return -1; 1618 } 1619 1620 insn->instr--; 1621 } 1622 1623 sec = find_section_by_name(file->elf, ".rela.discard.instr_begin"); 1624 if (!sec) 1625 return 0; 1626 1627 list_for_each_entry(reloc, &sec->reloc_list, list) { 1628 if (reloc->sym->type != STT_SECTION) { 1629 WARN("unexpected relocation symbol type in %s", sec->name); 1630 return -1; 1631 } 1632 1633 insn = find_insn(file, reloc->sym->sec, reloc->addend); 1634 if (!insn) { 1635 WARN("bad .discard.instr_begin entry"); 1636 return -1; 1637 } 1638 1639 insn->instr++; 1640 } 1641 1642 return 0; 1643 } 1644 1645 static int read_intra_function_calls(struct objtool_file *file) 1646 { 1647 struct instruction *insn; 1648 struct section *sec; 1649 struct reloc *reloc; 1650 1651 sec = find_section_by_name(file->elf, ".rela.discard.intra_function_calls"); 1652 if (!sec) 1653 return 0; 1654 1655 list_for_each_entry(reloc, &sec->reloc_list, list) { 1656 unsigned long dest_off; 1657 1658 if (reloc->sym->type != STT_SECTION) { 1659 WARN("unexpected relocation symbol type in %s", 1660 sec->name); 1661 return -1; 1662 } 1663 1664 insn = find_insn(file, reloc->sym->sec, reloc->addend); 1665 if (!insn) { 1666 WARN("bad .discard.intra_function_call entry"); 1667 return -1; 1668 } 1669 1670 if (insn->type != INSN_CALL) { 1671 WARN_FUNC("intra_function_call not a direct call", 1672 insn->sec, insn->offset); 1673 return -1; 1674 } 1675 1676 /* 1677 * Treat intra-function CALLs as JMPs, but with a stack_op. 1678 * See add_call_destinations(), which strips stack_ops from 1679 * normal CALLs. 1680 */ 1681 insn->type = INSN_JUMP_UNCONDITIONAL; 1682 1683 dest_off = insn->offset + insn->len + insn->immediate; 1684 insn->jump_dest = find_insn(file, insn->sec, dest_off); 1685 if (!insn->jump_dest) { 1686 WARN_FUNC("can't find call dest at %s+0x%lx", 1687 insn->sec, insn->offset, 1688 insn->sec->name, dest_off); 1689 return -1; 1690 } 1691 } 1692 1693 return 0; 1694 } 1695 1696 static int read_static_call_tramps(struct objtool_file *file) 1697 { 1698 struct section *sec; 1699 struct symbol *func; 1700 1701 for_each_sec(file, sec) { 1702 list_for_each_entry(func, &sec->symbol_list, list) { 1703 if (func->bind == STB_GLOBAL && 1704 !strncmp(func->name, STATIC_CALL_TRAMP_PREFIX_STR, 1705 strlen(STATIC_CALL_TRAMP_PREFIX_STR))) 1706 func->static_call_tramp = true; 1707 } 1708 } 1709 1710 return 0; 1711 } 1712 1713 static void mark_rodata(struct objtool_file *file) 1714 { 1715 struct section *sec; 1716 bool found = false; 1717 1718 /* 1719 * Search for the following rodata sections, each of which can 1720 * potentially contain jump tables: 1721 * 1722 * - .rodata: can contain GCC switch tables 1723 * - .rodata.<func>: same, if -fdata-sections is being used 1724 * - .rodata..c_jump_table: contains C annotated jump tables 1725 * 1726 * .rodata.str1.* sections are ignored; they don't contain jump tables. 1727 */ 1728 for_each_sec(file, sec) { 1729 if (!strncmp(sec->name, ".rodata", 7) && 1730 !strstr(sec->name, ".str1.")) { 1731 sec->rodata = true; 1732 found = true; 1733 } 1734 } 1735 1736 file->rodata = found; 1737 } 1738 1739 __weak int arch_rewrite_retpolines(struct objtool_file *file) 1740 { 1741 return 0; 1742 } 1743 1744 static int decode_sections(struct objtool_file *file) 1745 { 1746 int ret; 1747 1748 mark_rodata(file); 1749 1750 ret = decode_instructions(file); 1751 if (ret) 1752 return ret; 1753 1754 ret = add_dead_ends(file); 1755 if (ret) 1756 return ret; 1757 1758 add_ignores(file); 1759 add_uaccess_safe(file); 1760 1761 ret = add_ignore_alternatives(file); 1762 if (ret) 1763 return ret; 1764 1765 /* 1766 * Must be before add_{jump_call}_destination. 1767 */ 1768 ret = read_static_call_tramps(file); 1769 if (ret) 1770 return ret; 1771 1772 /* 1773 * Must be before add_special_section_alts() as that depends on 1774 * jump_dest being set. 1775 */ 1776 ret = add_jump_destinations(file); 1777 if (ret) 1778 return ret; 1779 1780 ret = add_special_section_alts(file); 1781 if (ret) 1782 return ret; 1783 1784 /* 1785 * Must be before add_call_destination(); it changes INSN_CALL to 1786 * INSN_JUMP. 1787 */ 1788 ret = read_intra_function_calls(file); 1789 if (ret) 1790 return ret; 1791 1792 ret = add_call_destinations(file); 1793 if (ret) 1794 return ret; 1795 1796 ret = add_jump_table_alts(file); 1797 if (ret) 1798 return ret; 1799 1800 ret = read_unwind_hints(file); 1801 if (ret) 1802 return ret; 1803 1804 ret = read_retpoline_hints(file); 1805 if (ret) 1806 return ret; 1807 1808 ret = read_instr_hints(file); 1809 if (ret) 1810 return ret; 1811 1812 /* 1813 * Must be after add_special_section_alts(), since this will emit 1814 * alternatives. Must be after add_{jump,call}_destination(), since 1815 * those create the call insn lists. 1816 */ 1817 ret = arch_rewrite_retpolines(file); 1818 if (ret) 1819 return ret; 1820 1821 return 0; 1822 } 1823 1824 static bool is_fentry_call(struct instruction *insn) 1825 { 1826 if (insn->type == INSN_CALL && insn->call_dest && 1827 insn->call_dest->type == STT_NOTYPE && 1828 !strcmp(insn->call_dest->name, "__fentry__")) 1829 return true; 1830 1831 return false; 1832 } 1833 1834 static bool has_modified_stack_frame(struct instruction *insn, struct insn_state *state) 1835 { 1836 struct cfi_state *cfi = &state->cfi; 1837 int i; 1838 1839 if (cfi->cfa.base != initial_func_cfi.cfa.base || cfi->drap) 1840 return true; 1841 1842 if (cfi->cfa.offset != initial_func_cfi.cfa.offset) 1843 return true; 1844 1845 if (cfi->stack_size != initial_func_cfi.cfa.offset) 1846 return true; 1847 1848 for (i = 0; i < CFI_NUM_REGS; i++) { 1849 if (cfi->regs[i].base != initial_func_cfi.regs[i].base || 1850 cfi->regs[i].offset != initial_func_cfi.regs[i].offset) 1851 return true; 1852 } 1853 1854 return false; 1855 } 1856 1857 static bool check_reg_frame_pos(const struct cfi_reg *reg, 1858 int expected_offset) 1859 { 1860 return reg->base == CFI_CFA && 1861 reg->offset == expected_offset; 1862 } 1863 1864 static bool has_valid_stack_frame(struct insn_state *state) 1865 { 1866 struct cfi_state *cfi = &state->cfi; 1867 1868 if (cfi->cfa.base == CFI_BP && 1869 check_reg_frame_pos(&cfi->regs[CFI_BP], -cfi->cfa.offset) && 1870 check_reg_frame_pos(&cfi->regs[CFI_RA], -cfi->cfa.offset + 8)) 1871 return true; 1872 1873 if (cfi->drap && cfi->regs[CFI_BP].base == CFI_BP) 1874 return true; 1875 1876 return false; 1877 } 1878 1879 static int update_cfi_state_regs(struct instruction *insn, 1880 struct cfi_state *cfi, 1881 struct stack_op *op) 1882 { 1883 struct cfi_reg *cfa = &cfi->cfa; 1884 1885 if (cfa->base != CFI_SP && cfa->base != CFI_SP_INDIRECT) 1886 return 0; 1887 1888 /* push */ 1889 if (op->dest.type == OP_DEST_PUSH || op->dest.type == OP_DEST_PUSHF) 1890 cfa->offset += 8; 1891 1892 /* pop */ 1893 if (op->src.type == OP_SRC_POP || op->src.type == OP_SRC_POPF) 1894 cfa->offset -= 8; 1895 1896 /* add immediate to sp */ 1897 if (op->dest.type == OP_DEST_REG && op->src.type == OP_SRC_ADD && 1898 op->dest.reg == CFI_SP && op->src.reg == CFI_SP) 1899 cfa->offset -= op->src.offset; 1900 1901 return 0; 1902 } 1903 1904 static void save_reg(struct cfi_state *cfi, unsigned char reg, int base, int offset) 1905 { 1906 if (arch_callee_saved_reg(reg) && 1907 cfi->regs[reg].base == CFI_UNDEFINED) { 1908 cfi->regs[reg].base = base; 1909 cfi->regs[reg].offset = offset; 1910 } 1911 } 1912 1913 static void restore_reg(struct cfi_state *cfi, unsigned char reg) 1914 { 1915 cfi->regs[reg].base = initial_func_cfi.regs[reg].base; 1916 cfi->regs[reg].offset = initial_func_cfi.regs[reg].offset; 1917 } 1918 1919 /* 1920 * A note about DRAP stack alignment: 1921 * 1922 * GCC has the concept of a DRAP register, which is used to help keep track of 1923 * the stack pointer when aligning the stack. r10 or r13 is used as the DRAP 1924 * register. The typical DRAP pattern is: 1925 * 1926 * 4c 8d 54 24 08 lea 0x8(%rsp),%r10 1927 * 48 83 e4 c0 and $0xffffffffffffffc0,%rsp 1928 * 41 ff 72 f8 pushq -0x8(%r10) 1929 * 55 push %rbp 1930 * 48 89 e5 mov %rsp,%rbp 1931 * (more pushes) 1932 * 41 52 push %r10 1933 * ... 1934 * 41 5a pop %r10 1935 * (more pops) 1936 * 5d pop %rbp 1937 * 49 8d 62 f8 lea -0x8(%r10),%rsp 1938 * c3 retq 1939 * 1940 * There are some variations in the epilogues, like: 1941 * 1942 * 5b pop %rbx 1943 * 41 5a pop %r10 1944 * 41 5c pop %r12 1945 * 41 5d pop %r13 1946 * 41 5e pop %r14 1947 * c9 leaveq 1948 * 49 8d 62 f8 lea -0x8(%r10),%rsp 1949 * c3 retq 1950 * 1951 * and: 1952 * 1953 * 4c 8b 55 e8 mov -0x18(%rbp),%r10 1954 * 48 8b 5d e0 mov -0x20(%rbp),%rbx 1955 * 4c 8b 65 f0 mov -0x10(%rbp),%r12 1956 * 4c 8b 6d f8 mov -0x8(%rbp),%r13 1957 * c9 leaveq 1958 * 49 8d 62 f8 lea -0x8(%r10),%rsp 1959 * c3 retq 1960 * 1961 * Sometimes r13 is used as the DRAP register, in which case it's saved and 1962 * restored beforehand: 1963 * 1964 * 41 55 push %r13 1965 * 4c 8d 6c 24 10 lea 0x10(%rsp),%r13 1966 * 48 83 e4 f0 and $0xfffffffffffffff0,%rsp 1967 * ... 1968 * 49 8d 65 f0 lea -0x10(%r13),%rsp 1969 * 41 5d pop %r13 1970 * c3 retq 1971 */ 1972 static int update_cfi_state(struct instruction *insn, struct cfi_state *cfi, 1973 struct stack_op *op) 1974 { 1975 struct cfi_reg *cfa = &cfi->cfa; 1976 struct cfi_reg *regs = cfi->regs; 1977 1978 /* stack operations don't make sense with an undefined CFA */ 1979 if (cfa->base == CFI_UNDEFINED) { 1980 if (insn->func) { 1981 WARN_FUNC("undefined stack state", insn->sec, insn->offset); 1982 return -1; 1983 } 1984 return 0; 1985 } 1986 1987 if (cfi->type == UNWIND_HINT_TYPE_REGS || 1988 cfi->type == UNWIND_HINT_TYPE_REGS_PARTIAL) 1989 return update_cfi_state_regs(insn, cfi, op); 1990 1991 switch (op->dest.type) { 1992 1993 case OP_DEST_REG: 1994 switch (op->src.type) { 1995 1996 case OP_SRC_REG: 1997 if (op->src.reg == CFI_SP && op->dest.reg == CFI_BP && 1998 cfa->base == CFI_SP && 1999 check_reg_frame_pos(®s[CFI_BP], -cfa->offset)) { 2000 2001 /* mov %rsp, %rbp */ 2002 cfa->base = op->dest.reg; 2003 cfi->bp_scratch = false; 2004 } 2005 2006 else if (op->src.reg == CFI_SP && 2007 op->dest.reg == CFI_BP && cfi->drap) { 2008 2009 /* drap: mov %rsp, %rbp */ 2010 regs[CFI_BP].base = CFI_BP; 2011 regs[CFI_BP].offset = -cfi->stack_size; 2012 cfi->bp_scratch = false; 2013 } 2014 2015 else if (op->src.reg == CFI_SP && cfa->base == CFI_SP) { 2016 2017 /* 2018 * mov %rsp, %reg 2019 * 2020 * This is needed for the rare case where GCC 2021 * does: 2022 * 2023 * mov %rsp, %rax 2024 * ... 2025 * mov %rax, %rsp 2026 */ 2027 cfi->vals[op->dest.reg].base = CFI_CFA; 2028 cfi->vals[op->dest.reg].offset = -cfi->stack_size; 2029 } 2030 2031 else if (op->src.reg == CFI_BP && op->dest.reg == CFI_SP && 2032 cfa->base == CFI_BP) { 2033 2034 /* 2035 * mov %rbp, %rsp 2036 * 2037 * Restore the original stack pointer (Clang). 2038 */ 2039 cfi->stack_size = -cfi->regs[CFI_BP].offset; 2040 } 2041 2042 else if (op->dest.reg == cfa->base) { 2043 2044 /* mov %reg, %rsp */ 2045 if (cfa->base == CFI_SP && 2046 cfi->vals[op->src.reg].base == CFI_CFA) { 2047 2048 /* 2049 * This is needed for the rare case 2050 * where GCC does something dumb like: 2051 * 2052 * lea 0x8(%rsp), %rcx 2053 * ... 2054 * mov %rcx, %rsp 2055 */ 2056 cfa->offset = -cfi->vals[op->src.reg].offset; 2057 cfi->stack_size = cfa->offset; 2058 2059 } else if (cfa->base == CFI_SP && 2060 cfi->vals[op->src.reg].base == CFI_SP_INDIRECT && 2061 cfi->vals[op->src.reg].offset == cfa->offset) { 2062 2063 /* 2064 * Stack swizzle: 2065 * 2066 * 1: mov %rsp, (%[tos]) 2067 * 2: mov %[tos], %rsp 2068 * ... 2069 * 3: pop %rsp 2070 * 2071 * Where: 2072 * 2073 * 1 - places a pointer to the previous 2074 * stack at the Top-of-Stack of the 2075 * new stack. 2076 * 2077 * 2 - switches to the new stack. 2078 * 2079 * 3 - pops the Top-of-Stack to restore 2080 * the original stack. 2081 * 2082 * Note: we set base to SP_INDIRECT 2083 * here and preserve offset. Therefore 2084 * when the unwinder reaches ToS it 2085 * will dereference SP and then add the 2086 * offset to find the next frame, IOW: 2087 * (%rsp) + offset. 2088 */ 2089 cfa->base = CFI_SP_INDIRECT; 2090 2091 } else { 2092 cfa->base = CFI_UNDEFINED; 2093 cfa->offset = 0; 2094 } 2095 } 2096 2097 else if (op->dest.reg == CFI_SP && 2098 cfi->vals[op->src.reg].base == CFI_SP_INDIRECT && 2099 cfi->vals[op->src.reg].offset == cfa->offset) { 2100 2101 /* 2102 * The same stack swizzle case 2) as above. But 2103 * because we can't change cfa->base, case 3) 2104 * will become a regular POP. Pretend we're a 2105 * PUSH so things don't go unbalanced. 2106 */ 2107 cfi->stack_size += 8; 2108 } 2109 2110 2111 break; 2112 2113 case OP_SRC_ADD: 2114 if (op->dest.reg == CFI_SP && op->src.reg == CFI_SP) { 2115 2116 /* add imm, %rsp */ 2117 cfi->stack_size -= op->src.offset; 2118 if (cfa->base == CFI_SP) 2119 cfa->offset -= op->src.offset; 2120 break; 2121 } 2122 2123 if (op->dest.reg == CFI_SP && op->src.reg == CFI_BP) { 2124 2125 /* lea disp(%rbp), %rsp */ 2126 cfi->stack_size = -(op->src.offset + regs[CFI_BP].offset); 2127 break; 2128 } 2129 2130 if (!cfi->drap && op->src.reg == CFI_SP && 2131 op->dest.reg == CFI_BP && cfa->base == CFI_SP && 2132 check_reg_frame_pos(®s[CFI_BP], -cfa->offset + op->src.offset)) { 2133 2134 /* lea disp(%rsp), %rbp */ 2135 cfa->base = CFI_BP; 2136 cfa->offset -= op->src.offset; 2137 cfi->bp_scratch = false; 2138 break; 2139 } 2140 2141 if (op->src.reg == CFI_SP && cfa->base == CFI_SP) { 2142 2143 /* drap: lea disp(%rsp), %drap */ 2144 cfi->drap_reg = op->dest.reg; 2145 2146 /* 2147 * lea disp(%rsp), %reg 2148 * 2149 * This is needed for the rare case where GCC 2150 * does something dumb like: 2151 * 2152 * lea 0x8(%rsp), %rcx 2153 * ... 2154 * mov %rcx, %rsp 2155 */ 2156 cfi->vals[op->dest.reg].base = CFI_CFA; 2157 cfi->vals[op->dest.reg].offset = \ 2158 -cfi->stack_size + op->src.offset; 2159 2160 break; 2161 } 2162 2163 if (cfi->drap && op->dest.reg == CFI_SP && 2164 op->src.reg == cfi->drap_reg) { 2165 2166 /* drap: lea disp(%drap), %rsp */ 2167 cfa->base = CFI_SP; 2168 cfa->offset = cfi->stack_size = -op->src.offset; 2169 cfi->drap_reg = CFI_UNDEFINED; 2170 cfi->drap = false; 2171 break; 2172 } 2173 2174 if (op->dest.reg == cfi->cfa.base) { 2175 WARN_FUNC("unsupported stack register modification", 2176 insn->sec, insn->offset); 2177 return -1; 2178 } 2179 2180 break; 2181 2182 case OP_SRC_AND: 2183 if (op->dest.reg != CFI_SP || 2184 (cfi->drap_reg != CFI_UNDEFINED && cfa->base != CFI_SP) || 2185 (cfi->drap_reg == CFI_UNDEFINED && cfa->base != CFI_BP)) { 2186 WARN_FUNC("unsupported stack pointer realignment", 2187 insn->sec, insn->offset); 2188 return -1; 2189 } 2190 2191 if (cfi->drap_reg != CFI_UNDEFINED) { 2192 /* drap: and imm, %rsp */ 2193 cfa->base = cfi->drap_reg; 2194 cfa->offset = cfi->stack_size = 0; 2195 cfi->drap = true; 2196 } 2197 2198 /* 2199 * Older versions of GCC (4.8ish) realign the stack 2200 * without DRAP, with a frame pointer. 2201 */ 2202 2203 break; 2204 2205 case OP_SRC_POP: 2206 case OP_SRC_POPF: 2207 if (op->dest.reg == CFI_SP && cfa->base == CFI_SP_INDIRECT) { 2208 2209 /* pop %rsp; # restore from a stack swizzle */ 2210 cfa->base = CFI_SP; 2211 break; 2212 } 2213 2214 if (!cfi->drap && op->dest.reg == cfa->base) { 2215 2216 /* pop %rbp */ 2217 cfa->base = CFI_SP; 2218 } 2219 2220 if (cfi->drap && cfa->base == CFI_BP_INDIRECT && 2221 op->dest.reg == cfi->drap_reg && 2222 cfi->drap_offset == -cfi->stack_size) { 2223 2224 /* drap: pop %drap */ 2225 cfa->base = cfi->drap_reg; 2226 cfa->offset = 0; 2227 cfi->drap_offset = -1; 2228 2229 } else if (regs[op->dest.reg].offset == -cfi->stack_size) { 2230 2231 /* pop %reg */ 2232 restore_reg(cfi, op->dest.reg); 2233 } 2234 2235 cfi->stack_size -= 8; 2236 if (cfa->base == CFI_SP) 2237 cfa->offset -= 8; 2238 2239 break; 2240 2241 case OP_SRC_REG_INDIRECT: 2242 if (!cfi->drap && op->dest.reg == cfa->base && 2243 op->dest.reg == CFI_BP) { 2244 2245 /* mov disp(%rsp), %rbp */ 2246 cfa->base = CFI_SP; 2247 cfa->offset = cfi->stack_size; 2248 } 2249 2250 if (cfi->drap && op->src.reg == CFI_BP && 2251 op->src.offset == cfi->drap_offset) { 2252 2253 /* drap: mov disp(%rbp), %drap */ 2254 cfa->base = cfi->drap_reg; 2255 cfa->offset = 0; 2256 cfi->drap_offset = -1; 2257 } 2258 2259 if (cfi->drap && op->src.reg == CFI_BP && 2260 op->src.offset == regs[op->dest.reg].offset) { 2261 2262 /* drap: mov disp(%rbp), %reg */ 2263 restore_reg(cfi, op->dest.reg); 2264 2265 } else if (op->src.reg == cfa->base && 2266 op->src.offset == regs[op->dest.reg].offset + cfa->offset) { 2267 2268 /* mov disp(%rbp), %reg */ 2269 /* mov disp(%rsp), %reg */ 2270 restore_reg(cfi, op->dest.reg); 2271 2272 } else if (op->src.reg == CFI_SP && 2273 op->src.offset == regs[op->dest.reg].offset + cfi->stack_size) { 2274 2275 /* mov disp(%rsp), %reg */ 2276 restore_reg(cfi, op->dest.reg); 2277 } 2278 2279 break; 2280 2281 default: 2282 WARN_FUNC("unknown stack-related instruction", 2283 insn->sec, insn->offset); 2284 return -1; 2285 } 2286 2287 break; 2288 2289 case OP_DEST_PUSH: 2290 case OP_DEST_PUSHF: 2291 cfi->stack_size += 8; 2292 if (cfa->base == CFI_SP) 2293 cfa->offset += 8; 2294 2295 if (op->src.type != OP_SRC_REG) 2296 break; 2297 2298 if (cfi->drap) { 2299 if (op->src.reg == cfa->base && op->src.reg == cfi->drap_reg) { 2300 2301 /* drap: push %drap */ 2302 cfa->base = CFI_BP_INDIRECT; 2303 cfa->offset = -cfi->stack_size; 2304 2305 /* save drap so we know when to restore it */ 2306 cfi->drap_offset = -cfi->stack_size; 2307 2308 } else if (op->src.reg == CFI_BP && cfa->base == cfi->drap_reg) { 2309 2310 /* drap: push %rbp */ 2311 cfi->stack_size = 0; 2312 2313 } else { 2314 2315 /* drap: push %reg */ 2316 save_reg(cfi, op->src.reg, CFI_BP, -cfi->stack_size); 2317 } 2318 2319 } else { 2320 2321 /* push %reg */ 2322 save_reg(cfi, op->src.reg, CFI_CFA, -cfi->stack_size); 2323 } 2324 2325 /* detect when asm code uses rbp as a scratch register */ 2326 if (!no_fp && insn->func && op->src.reg == CFI_BP && 2327 cfa->base != CFI_BP) 2328 cfi->bp_scratch = true; 2329 break; 2330 2331 case OP_DEST_REG_INDIRECT: 2332 2333 if (cfi->drap) { 2334 if (op->src.reg == cfa->base && op->src.reg == cfi->drap_reg) { 2335 2336 /* drap: mov %drap, disp(%rbp) */ 2337 cfa->base = CFI_BP_INDIRECT; 2338 cfa->offset = op->dest.offset; 2339 2340 /* save drap offset so we know when to restore it */ 2341 cfi->drap_offset = op->dest.offset; 2342 } else { 2343 2344 /* drap: mov reg, disp(%rbp) */ 2345 save_reg(cfi, op->src.reg, CFI_BP, op->dest.offset); 2346 } 2347 2348 } else if (op->dest.reg == cfa->base) { 2349 2350 /* mov reg, disp(%rbp) */ 2351 /* mov reg, disp(%rsp) */ 2352 save_reg(cfi, op->src.reg, CFI_CFA, 2353 op->dest.offset - cfi->cfa.offset); 2354 2355 } else if (op->dest.reg == CFI_SP) { 2356 2357 /* mov reg, disp(%rsp) */ 2358 save_reg(cfi, op->src.reg, CFI_CFA, 2359 op->dest.offset - cfi->stack_size); 2360 2361 } else if (op->src.reg == CFI_SP && op->dest.offset == 0) { 2362 2363 /* mov %rsp, (%reg); # setup a stack swizzle. */ 2364 cfi->vals[op->dest.reg].base = CFI_SP_INDIRECT; 2365 cfi->vals[op->dest.reg].offset = cfa->offset; 2366 } 2367 2368 break; 2369 2370 case OP_DEST_LEAVE: 2371 if ((!cfi->drap && cfa->base != CFI_BP) || 2372 (cfi->drap && cfa->base != cfi->drap_reg)) { 2373 WARN_FUNC("leave instruction with modified stack frame", 2374 insn->sec, insn->offset); 2375 return -1; 2376 } 2377 2378 /* leave (mov %rbp, %rsp; pop %rbp) */ 2379 2380 cfi->stack_size = -cfi->regs[CFI_BP].offset - 8; 2381 restore_reg(cfi, CFI_BP); 2382 2383 if (!cfi->drap) { 2384 cfa->base = CFI_SP; 2385 cfa->offset -= 8; 2386 } 2387 2388 break; 2389 2390 case OP_DEST_MEM: 2391 if (op->src.type != OP_SRC_POP && op->src.type != OP_SRC_POPF) { 2392 WARN_FUNC("unknown stack-related memory operation", 2393 insn->sec, insn->offset); 2394 return -1; 2395 } 2396 2397 /* pop mem */ 2398 cfi->stack_size -= 8; 2399 if (cfa->base == CFI_SP) 2400 cfa->offset -= 8; 2401 2402 break; 2403 2404 default: 2405 WARN_FUNC("unknown stack-related instruction", 2406 insn->sec, insn->offset); 2407 return -1; 2408 } 2409 2410 return 0; 2411 } 2412 2413 /* 2414 * The stack layouts of alternatives instructions can sometimes diverge when 2415 * they have stack modifications. That's fine as long as the potential stack 2416 * layouts don't conflict at any given potential instruction boundary. 2417 * 2418 * Flatten the CFIs of the different alternative code streams (both original 2419 * and replacement) into a single shared CFI array which can be used to detect 2420 * conflicts and nicely feed a linear array of ORC entries to the unwinder. 2421 */ 2422 static int propagate_alt_cfi(struct objtool_file *file, struct instruction *insn) 2423 { 2424 struct cfi_state **alt_cfi; 2425 int group_off; 2426 2427 if (!insn->alt_group) 2428 return 0; 2429 2430 alt_cfi = insn->alt_group->cfi; 2431 group_off = insn->offset - insn->alt_group->first_insn->offset; 2432 2433 if (!alt_cfi[group_off]) { 2434 alt_cfi[group_off] = &insn->cfi; 2435 } else { 2436 if (memcmp(alt_cfi[group_off], &insn->cfi, sizeof(struct cfi_state))) { 2437 WARN_FUNC("stack layout conflict in alternatives", 2438 insn->sec, insn->offset); 2439 return -1; 2440 } 2441 } 2442 2443 return 0; 2444 } 2445 2446 static int handle_insn_ops(struct instruction *insn, struct insn_state *state) 2447 { 2448 struct stack_op *op; 2449 2450 list_for_each_entry(op, &insn->stack_ops, list) { 2451 2452 if (update_cfi_state(insn, &state->cfi, op)) 2453 return 1; 2454 2455 if (!insn->alt_group) 2456 continue; 2457 2458 if (op->dest.type == OP_DEST_PUSHF) { 2459 if (!state->uaccess_stack) { 2460 state->uaccess_stack = 1; 2461 } else if (state->uaccess_stack >> 31) { 2462 WARN_FUNC("PUSHF stack exhausted", 2463 insn->sec, insn->offset); 2464 return 1; 2465 } 2466 state->uaccess_stack <<= 1; 2467 state->uaccess_stack |= state->uaccess; 2468 } 2469 2470 if (op->src.type == OP_SRC_POPF) { 2471 if (state->uaccess_stack) { 2472 state->uaccess = state->uaccess_stack & 1; 2473 state->uaccess_stack >>= 1; 2474 if (state->uaccess_stack == 1) 2475 state->uaccess_stack = 0; 2476 } 2477 } 2478 } 2479 2480 return 0; 2481 } 2482 2483 static bool insn_cfi_match(struct instruction *insn, struct cfi_state *cfi2) 2484 { 2485 struct cfi_state *cfi1 = &insn->cfi; 2486 int i; 2487 2488 if (memcmp(&cfi1->cfa, &cfi2->cfa, sizeof(cfi1->cfa))) { 2489 2490 WARN_FUNC("stack state mismatch: cfa1=%d%+d cfa2=%d%+d", 2491 insn->sec, insn->offset, 2492 cfi1->cfa.base, cfi1->cfa.offset, 2493 cfi2->cfa.base, cfi2->cfa.offset); 2494 2495 } else if (memcmp(&cfi1->regs, &cfi2->regs, sizeof(cfi1->regs))) { 2496 for (i = 0; i < CFI_NUM_REGS; i++) { 2497 if (!memcmp(&cfi1->regs[i], &cfi2->regs[i], 2498 sizeof(struct cfi_reg))) 2499 continue; 2500 2501 WARN_FUNC("stack state mismatch: reg1[%d]=%d%+d reg2[%d]=%d%+d", 2502 insn->sec, insn->offset, 2503 i, cfi1->regs[i].base, cfi1->regs[i].offset, 2504 i, cfi2->regs[i].base, cfi2->regs[i].offset); 2505 break; 2506 } 2507 2508 } else if (cfi1->type != cfi2->type) { 2509 2510 WARN_FUNC("stack state mismatch: type1=%d type2=%d", 2511 insn->sec, insn->offset, cfi1->type, cfi2->type); 2512 2513 } else if (cfi1->drap != cfi2->drap || 2514 (cfi1->drap && cfi1->drap_reg != cfi2->drap_reg) || 2515 (cfi1->drap && cfi1->drap_offset != cfi2->drap_offset)) { 2516 2517 WARN_FUNC("stack state mismatch: drap1=%d(%d,%d) drap2=%d(%d,%d)", 2518 insn->sec, insn->offset, 2519 cfi1->drap, cfi1->drap_reg, cfi1->drap_offset, 2520 cfi2->drap, cfi2->drap_reg, cfi2->drap_offset); 2521 2522 } else 2523 return true; 2524 2525 return false; 2526 } 2527 2528 static inline bool func_uaccess_safe(struct symbol *func) 2529 { 2530 if (func) 2531 return func->uaccess_safe; 2532 2533 return false; 2534 } 2535 2536 static inline const char *call_dest_name(struct instruction *insn) 2537 { 2538 if (insn->call_dest) 2539 return insn->call_dest->name; 2540 2541 return "{dynamic}"; 2542 } 2543 2544 static inline bool noinstr_call_dest(struct symbol *func) 2545 { 2546 /* 2547 * We can't deal with indirect function calls at present; 2548 * assume they're instrumented. 2549 */ 2550 if (!func) 2551 return false; 2552 2553 /* 2554 * If the symbol is from a noinstr section; we good. 2555 */ 2556 if (func->sec->noinstr) 2557 return true; 2558 2559 /* 2560 * The __ubsan_handle_*() calls are like WARN(), they only happen when 2561 * something 'BAD' happened. At the risk of taking the machine down, 2562 * let them proceed to get the message out. 2563 */ 2564 if (!strncmp(func->name, "__ubsan_handle_", 15)) 2565 return true; 2566 2567 return false; 2568 } 2569 2570 static int validate_call(struct instruction *insn, struct insn_state *state) 2571 { 2572 if (state->noinstr && state->instr <= 0 && 2573 !noinstr_call_dest(insn->call_dest)) { 2574 WARN_FUNC("call to %s() leaves .noinstr.text section", 2575 insn->sec, insn->offset, call_dest_name(insn)); 2576 return 1; 2577 } 2578 2579 if (state->uaccess && !func_uaccess_safe(insn->call_dest)) { 2580 WARN_FUNC("call to %s() with UACCESS enabled", 2581 insn->sec, insn->offset, call_dest_name(insn)); 2582 return 1; 2583 } 2584 2585 if (state->df) { 2586 WARN_FUNC("call to %s() with DF set", 2587 insn->sec, insn->offset, call_dest_name(insn)); 2588 return 1; 2589 } 2590 2591 return 0; 2592 } 2593 2594 static int validate_sibling_call(struct instruction *insn, struct insn_state *state) 2595 { 2596 if (has_modified_stack_frame(insn, state)) { 2597 WARN_FUNC("sibling call from callable instruction with modified stack frame", 2598 insn->sec, insn->offset); 2599 return 1; 2600 } 2601 2602 return validate_call(insn, state); 2603 } 2604 2605 static int validate_return(struct symbol *func, struct instruction *insn, struct insn_state *state) 2606 { 2607 if (state->noinstr && state->instr > 0) { 2608 WARN_FUNC("return with instrumentation enabled", 2609 insn->sec, insn->offset); 2610 return 1; 2611 } 2612 2613 if (state->uaccess && !func_uaccess_safe(func)) { 2614 WARN_FUNC("return with UACCESS enabled", 2615 insn->sec, insn->offset); 2616 return 1; 2617 } 2618 2619 if (!state->uaccess && func_uaccess_safe(func)) { 2620 WARN_FUNC("return with UACCESS disabled from a UACCESS-safe function", 2621 insn->sec, insn->offset); 2622 return 1; 2623 } 2624 2625 if (state->df) { 2626 WARN_FUNC("return with DF set", 2627 insn->sec, insn->offset); 2628 return 1; 2629 } 2630 2631 if (func && has_modified_stack_frame(insn, state)) { 2632 WARN_FUNC("return with modified stack frame", 2633 insn->sec, insn->offset); 2634 return 1; 2635 } 2636 2637 if (state->cfi.bp_scratch) { 2638 WARN_FUNC("BP used as a scratch register", 2639 insn->sec, insn->offset); 2640 return 1; 2641 } 2642 2643 return 0; 2644 } 2645 2646 static struct instruction *next_insn_to_validate(struct objtool_file *file, 2647 struct instruction *insn) 2648 { 2649 struct alt_group *alt_group = insn->alt_group; 2650 2651 /* 2652 * Simulate the fact that alternatives are patched in-place. When the 2653 * end of a replacement alt_group is reached, redirect objtool flow to 2654 * the end of the original alt_group. 2655 */ 2656 if (alt_group && insn == alt_group->last_insn && alt_group->orig_group) 2657 return next_insn_same_sec(file, alt_group->orig_group->last_insn); 2658 2659 return next_insn_same_sec(file, insn); 2660 } 2661 2662 /* 2663 * Follow the branch starting at the given instruction, and recursively follow 2664 * any other branches (jumps). Meanwhile, track the frame pointer state at 2665 * each instruction and validate all the rules described in 2666 * tools/objtool/Documentation/stack-validation.txt. 2667 */ 2668 static int validate_branch(struct objtool_file *file, struct symbol *func, 2669 struct instruction *insn, struct insn_state state) 2670 { 2671 struct alternative *alt; 2672 struct instruction *next_insn; 2673 struct section *sec; 2674 u8 visited; 2675 int ret; 2676 2677 sec = insn->sec; 2678 2679 while (1) { 2680 next_insn = next_insn_to_validate(file, insn); 2681 2682 if (file->c_file && func && insn->func && func != insn->func->pfunc) { 2683 WARN("%s() falls through to next function %s()", 2684 func->name, insn->func->name); 2685 return 1; 2686 } 2687 2688 if (func && insn->ignore) { 2689 WARN_FUNC("BUG: why am I validating an ignored function?", 2690 sec, insn->offset); 2691 return 1; 2692 } 2693 2694 visited = 1 << state.uaccess; 2695 if (insn->visited) { 2696 if (!insn->hint && !insn_cfi_match(insn, &state.cfi)) 2697 return 1; 2698 2699 if (insn->visited & visited) 2700 return 0; 2701 } 2702 2703 if (state.noinstr) 2704 state.instr += insn->instr; 2705 2706 if (insn->hint) 2707 state.cfi = insn->cfi; 2708 else 2709 insn->cfi = state.cfi; 2710 2711 insn->visited |= visited; 2712 2713 if (propagate_alt_cfi(file, insn)) 2714 return 1; 2715 2716 if (!insn->ignore_alts && !list_empty(&insn->alts)) { 2717 bool skip_orig = false; 2718 2719 list_for_each_entry(alt, &insn->alts, list) { 2720 if (alt->skip_orig) 2721 skip_orig = true; 2722 2723 ret = validate_branch(file, func, alt->insn, state); 2724 if (ret) { 2725 if (backtrace) 2726 BT_FUNC("(alt)", insn); 2727 return ret; 2728 } 2729 } 2730 2731 if (skip_orig) 2732 return 0; 2733 } 2734 2735 if (handle_insn_ops(insn, &state)) 2736 return 1; 2737 2738 switch (insn->type) { 2739 2740 case INSN_RETURN: 2741 return validate_return(func, insn, &state); 2742 2743 case INSN_CALL: 2744 case INSN_CALL_DYNAMIC: 2745 ret = validate_call(insn, &state); 2746 if (ret) 2747 return ret; 2748 2749 if (!no_fp && func && !is_fentry_call(insn) && 2750 !has_valid_stack_frame(&state)) { 2751 WARN_FUNC("call without frame pointer save/setup", 2752 sec, insn->offset); 2753 return 1; 2754 } 2755 2756 if (dead_end_function(file, insn->call_dest)) 2757 return 0; 2758 2759 break; 2760 2761 case INSN_JUMP_CONDITIONAL: 2762 case INSN_JUMP_UNCONDITIONAL: 2763 if (is_sibling_call(insn)) { 2764 ret = validate_sibling_call(insn, &state); 2765 if (ret) 2766 return ret; 2767 2768 } else if (insn->jump_dest) { 2769 ret = validate_branch(file, func, 2770 insn->jump_dest, state); 2771 if (ret) { 2772 if (backtrace) 2773 BT_FUNC("(branch)", insn); 2774 return ret; 2775 } 2776 } 2777 2778 if (insn->type == INSN_JUMP_UNCONDITIONAL) 2779 return 0; 2780 2781 break; 2782 2783 case INSN_JUMP_DYNAMIC: 2784 case INSN_JUMP_DYNAMIC_CONDITIONAL: 2785 if (is_sibling_call(insn)) { 2786 ret = validate_sibling_call(insn, &state); 2787 if (ret) 2788 return ret; 2789 } 2790 2791 if (insn->type == INSN_JUMP_DYNAMIC) 2792 return 0; 2793 2794 break; 2795 2796 case INSN_CONTEXT_SWITCH: 2797 if (func && (!next_insn || !next_insn->hint)) { 2798 WARN_FUNC("unsupported instruction in callable function", 2799 sec, insn->offset); 2800 return 1; 2801 } 2802 return 0; 2803 2804 case INSN_STAC: 2805 if (state.uaccess) { 2806 WARN_FUNC("recursive UACCESS enable", sec, insn->offset); 2807 return 1; 2808 } 2809 2810 state.uaccess = true; 2811 break; 2812 2813 case INSN_CLAC: 2814 if (!state.uaccess && func) { 2815 WARN_FUNC("redundant UACCESS disable", sec, insn->offset); 2816 return 1; 2817 } 2818 2819 if (func_uaccess_safe(func) && !state.uaccess_stack) { 2820 WARN_FUNC("UACCESS-safe disables UACCESS", sec, insn->offset); 2821 return 1; 2822 } 2823 2824 state.uaccess = false; 2825 break; 2826 2827 case INSN_STD: 2828 if (state.df) { 2829 WARN_FUNC("recursive STD", sec, insn->offset); 2830 return 1; 2831 } 2832 2833 state.df = true; 2834 break; 2835 2836 case INSN_CLD: 2837 if (!state.df && func) { 2838 WARN_FUNC("redundant CLD", sec, insn->offset); 2839 return 1; 2840 } 2841 2842 state.df = false; 2843 break; 2844 2845 default: 2846 break; 2847 } 2848 2849 if (insn->dead_end) 2850 return 0; 2851 2852 if (!next_insn) { 2853 if (state.cfi.cfa.base == CFI_UNDEFINED) 2854 return 0; 2855 WARN("%s: unexpected end of section", sec->name); 2856 return 1; 2857 } 2858 2859 insn = next_insn; 2860 } 2861 2862 return 0; 2863 } 2864 2865 static int validate_unwind_hints(struct objtool_file *file, struct section *sec) 2866 { 2867 struct instruction *insn; 2868 struct insn_state state; 2869 int ret, warnings = 0; 2870 2871 if (!file->hints) 2872 return 0; 2873 2874 init_insn_state(&state, sec); 2875 2876 if (sec) { 2877 insn = find_insn(file, sec, 0); 2878 if (!insn) 2879 return 0; 2880 } else { 2881 insn = list_first_entry(&file->insn_list, typeof(*insn), list); 2882 } 2883 2884 while (&insn->list != &file->insn_list && (!sec || insn->sec == sec)) { 2885 if (insn->hint && !insn->visited) { 2886 ret = validate_branch(file, insn->func, insn, state); 2887 if (ret && backtrace) 2888 BT_FUNC("<=== (hint)", insn); 2889 warnings += ret; 2890 } 2891 2892 insn = list_next_entry(insn, list); 2893 } 2894 2895 return warnings; 2896 } 2897 2898 static int validate_retpoline(struct objtool_file *file) 2899 { 2900 struct instruction *insn; 2901 int warnings = 0; 2902 2903 for_each_insn(file, insn) { 2904 if (insn->type != INSN_JUMP_DYNAMIC && 2905 insn->type != INSN_CALL_DYNAMIC) 2906 continue; 2907 2908 if (insn->retpoline_safe) 2909 continue; 2910 2911 /* 2912 * .init.text code is ran before userspace and thus doesn't 2913 * strictly need retpolines, except for modules which are 2914 * loaded late, they very much do need retpoline in their 2915 * .init.text 2916 */ 2917 if (!strcmp(insn->sec->name, ".init.text") && !module) 2918 continue; 2919 2920 WARN_FUNC("indirect %s found in RETPOLINE build", 2921 insn->sec, insn->offset, 2922 insn->type == INSN_JUMP_DYNAMIC ? "jump" : "call"); 2923 2924 warnings++; 2925 } 2926 2927 return warnings; 2928 } 2929 2930 static bool is_kasan_insn(struct instruction *insn) 2931 { 2932 return (insn->type == INSN_CALL && 2933 !strcmp(insn->call_dest->name, "__asan_handle_no_return")); 2934 } 2935 2936 static bool is_ubsan_insn(struct instruction *insn) 2937 { 2938 return (insn->type == INSN_CALL && 2939 !strcmp(insn->call_dest->name, 2940 "__ubsan_handle_builtin_unreachable")); 2941 } 2942 2943 static bool ignore_unreachable_insn(struct objtool_file *file, struct instruction *insn) 2944 { 2945 int i; 2946 struct instruction *prev_insn; 2947 2948 if (insn->ignore || insn->type == INSN_NOP) 2949 return true; 2950 2951 /* 2952 * Ignore any unused exceptions. This can happen when a whitelisted 2953 * function has an exception table entry. 2954 * 2955 * Also ignore alternative replacement instructions. This can happen 2956 * when a whitelisted function uses one of the ALTERNATIVE macros. 2957 */ 2958 if (!strcmp(insn->sec->name, ".fixup") || 2959 !strcmp(insn->sec->name, ".altinstr_replacement") || 2960 !strcmp(insn->sec->name, ".altinstr_aux")) 2961 return true; 2962 2963 if (!insn->func) 2964 return false; 2965 2966 /* 2967 * CONFIG_UBSAN_TRAP inserts a UD2 when it sees 2968 * __builtin_unreachable(). The BUG() macro has an unreachable() after 2969 * the UD2, which causes GCC's undefined trap logic to emit another UD2 2970 * (or occasionally a JMP to UD2). 2971 * 2972 * It may also insert a UD2 after calling a __noreturn function. 2973 */ 2974 prev_insn = list_prev_entry(insn, list); 2975 if ((prev_insn->dead_end || dead_end_function(file, prev_insn->call_dest)) && 2976 (insn->type == INSN_BUG || 2977 (insn->type == INSN_JUMP_UNCONDITIONAL && 2978 insn->jump_dest && insn->jump_dest->type == INSN_BUG))) 2979 return true; 2980 2981 /* 2982 * Check if this (or a subsequent) instruction is related to 2983 * CONFIG_UBSAN or CONFIG_KASAN. 2984 * 2985 * End the search at 5 instructions to avoid going into the weeds. 2986 */ 2987 for (i = 0; i < 5; i++) { 2988 2989 if (is_kasan_insn(insn) || is_ubsan_insn(insn)) 2990 return true; 2991 2992 if (insn->type == INSN_JUMP_UNCONDITIONAL) { 2993 if (insn->jump_dest && 2994 insn->jump_dest->func == insn->func) { 2995 insn = insn->jump_dest; 2996 continue; 2997 } 2998 2999 break; 3000 } 3001 3002 if (insn->offset + insn->len >= insn->func->offset + insn->func->len) 3003 break; 3004 3005 insn = list_next_entry(insn, list); 3006 } 3007 3008 return false; 3009 } 3010 3011 static int validate_symbol(struct objtool_file *file, struct section *sec, 3012 struct symbol *sym, struct insn_state *state) 3013 { 3014 struct instruction *insn; 3015 int ret; 3016 3017 if (!sym->len) { 3018 WARN("%s() is missing an ELF size annotation", sym->name); 3019 return 1; 3020 } 3021 3022 if (sym->pfunc != sym || sym->alias != sym) 3023 return 0; 3024 3025 insn = find_insn(file, sec, sym->offset); 3026 if (!insn || insn->ignore || insn->visited) 3027 return 0; 3028 3029 state->uaccess = sym->uaccess_safe; 3030 3031 ret = validate_branch(file, insn->func, insn, *state); 3032 if (ret && backtrace) 3033 BT_FUNC("<=== (sym)", insn); 3034 return ret; 3035 } 3036 3037 static int validate_section(struct objtool_file *file, struct section *sec) 3038 { 3039 struct insn_state state; 3040 struct symbol *func; 3041 int warnings = 0; 3042 3043 list_for_each_entry(func, &sec->symbol_list, list) { 3044 if (func->type != STT_FUNC) 3045 continue; 3046 3047 init_insn_state(&state, sec); 3048 set_func_state(&state.cfi); 3049 3050 warnings += validate_symbol(file, sec, func, &state); 3051 } 3052 3053 return warnings; 3054 } 3055 3056 static int validate_vmlinux_functions(struct objtool_file *file) 3057 { 3058 struct section *sec; 3059 int warnings = 0; 3060 3061 sec = find_section_by_name(file->elf, ".noinstr.text"); 3062 if (sec) { 3063 warnings += validate_section(file, sec); 3064 warnings += validate_unwind_hints(file, sec); 3065 } 3066 3067 sec = find_section_by_name(file->elf, ".entry.text"); 3068 if (sec) { 3069 warnings += validate_section(file, sec); 3070 warnings += validate_unwind_hints(file, sec); 3071 } 3072 3073 return warnings; 3074 } 3075 3076 static int validate_functions(struct objtool_file *file) 3077 { 3078 struct section *sec; 3079 int warnings = 0; 3080 3081 for_each_sec(file, sec) { 3082 if (!(sec->sh.sh_flags & SHF_EXECINSTR)) 3083 continue; 3084 3085 warnings += validate_section(file, sec); 3086 } 3087 3088 return warnings; 3089 } 3090 3091 static int validate_reachable_instructions(struct objtool_file *file) 3092 { 3093 struct instruction *insn; 3094 3095 if (file->ignore_unreachables) 3096 return 0; 3097 3098 for_each_insn(file, insn) { 3099 if (insn->visited || ignore_unreachable_insn(file, insn)) 3100 continue; 3101 3102 WARN_FUNC("unreachable instruction", insn->sec, insn->offset); 3103 return 1; 3104 } 3105 3106 return 0; 3107 } 3108 3109 int check(struct objtool_file *file) 3110 { 3111 int ret, warnings = 0; 3112 3113 arch_initial_func_cfi_state(&initial_func_cfi); 3114 3115 ret = decode_sections(file); 3116 if (ret < 0) 3117 goto out; 3118 warnings += ret; 3119 3120 if (list_empty(&file->insn_list)) 3121 goto out; 3122 3123 if (vmlinux && !validate_dup) { 3124 ret = validate_vmlinux_functions(file); 3125 if (ret < 0) 3126 goto out; 3127 3128 warnings += ret; 3129 goto out; 3130 } 3131 3132 if (retpoline) { 3133 ret = validate_retpoline(file); 3134 if (ret < 0) 3135 return ret; 3136 warnings += ret; 3137 } 3138 3139 ret = validate_functions(file); 3140 if (ret < 0) 3141 goto out; 3142 warnings += ret; 3143 3144 ret = validate_unwind_hints(file, NULL); 3145 if (ret < 0) 3146 goto out; 3147 warnings += ret; 3148 3149 if (!warnings) { 3150 ret = validate_reachable_instructions(file); 3151 if (ret < 0) 3152 goto out; 3153 warnings += ret; 3154 } 3155 3156 ret = create_static_call_sections(file); 3157 if (ret < 0) 3158 goto out; 3159 warnings += ret; 3160 3161 if (mcount) { 3162 ret = create_mcount_loc_sections(file); 3163 if (ret < 0) 3164 goto out; 3165 warnings += ret; 3166 } 3167 3168 out: 3169 /* 3170 * For now, don't fail the kernel build on fatal warnings. These 3171 * errors are still fairly common due to the growing matrix of 3172 * supported toolchains and their recent pace of change. 3173 */ 3174 return 0; 3175 } 3176