1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * Copyright (C) 2015-2017 Josh Poimboeuf <jpoimboe@redhat.com> 4 */ 5 6 #include <string.h> 7 #include <stdlib.h> 8 #include <sys/mman.h> 9 10 #include <arch/elf.h> 11 #include <objtool/builtin.h> 12 #include <objtool/cfi.h> 13 #include <objtool/arch.h> 14 #include <objtool/check.h> 15 #include <objtool/special.h> 16 #include <objtool/warn.h> 17 #include <objtool/endianness.h> 18 19 #include <linux/objtool.h> 20 #include <linux/hashtable.h> 21 #include <linux/kernel.h> 22 #include <linux/static_call_types.h> 23 24 struct alternative { 25 struct list_head list; 26 struct instruction *insn; 27 bool skip_orig; 28 }; 29 30 static unsigned long nr_cfi, nr_cfi_reused, nr_cfi_cache; 31 32 static struct cfi_init_state initial_func_cfi; 33 static struct cfi_state init_cfi; 34 static struct cfi_state func_cfi; 35 36 struct instruction *find_insn(struct objtool_file *file, 37 struct section *sec, unsigned long offset) 38 { 39 struct instruction *insn; 40 41 hash_for_each_possible(file->insn_hash, insn, hash, sec_offset_hash(sec, offset)) { 42 if (insn->sec == sec && insn->offset == offset) 43 return insn; 44 } 45 46 return NULL; 47 } 48 49 static struct instruction *next_insn_same_sec(struct objtool_file *file, 50 struct instruction *insn) 51 { 52 struct instruction *next = list_next_entry(insn, list); 53 54 if (!next || &next->list == &file->insn_list || next->sec != insn->sec) 55 return NULL; 56 57 return next; 58 } 59 60 static struct instruction *next_insn_same_func(struct objtool_file *file, 61 struct instruction *insn) 62 { 63 struct instruction *next = list_next_entry(insn, list); 64 struct symbol *func = insn->func; 65 66 if (!func) 67 return NULL; 68 69 if (&next->list != &file->insn_list && next->func == func) 70 return next; 71 72 /* Check if we're already in the subfunction: */ 73 if (func == func->cfunc) 74 return NULL; 75 76 /* Move to the subfunction: */ 77 return find_insn(file, func->cfunc->sec, func->cfunc->offset); 78 } 79 80 static struct instruction *prev_insn_same_sym(struct objtool_file *file, 81 struct instruction *insn) 82 { 83 struct instruction *prev = list_prev_entry(insn, list); 84 85 if (&prev->list != &file->insn_list && prev->func == insn->func) 86 return prev; 87 88 return NULL; 89 } 90 91 #define func_for_each_insn(file, func, insn) \ 92 for (insn = find_insn(file, func->sec, func->offset); \ 93 insn; \ 94 insn = next_insn_same_func(file, insn)) 95 96 #define sym_for_each_insn(file, sym, insn) \ 97 for (insn = find_insn(file, sym->sec, sym->offset); \ 98 insn && &insn->list != &file->insn_list && \ 99 insn->sec == sym->sec && \ 100 insn->offset < sym->offset + sym->len; \ 101 insn = list_next_entry(insn, list)) 102 103 #define sym_for_each_insn_continue_reverse(file, sym, insn) \ 104 for (insn = list_prev_entry(insn, list); \ 105 &insn->list != &file->insn_list && \ 106 insn->sec == sym->sec && insn->offset >= sym->offset; \ 107 insn = list_prev_entry(insn, list)) 108 109 #define sec_for_each_insn_from(file, insn) \ 110 for (; insn; insn = next_insn_same_sec(file, insn)) 111 112 #define sec_for_each_insn_continue(file, insn) \ 113 for (insn = next_insn_same_sec(file, insn); insn; \ 114 insn = next_insn_same_sec(file, insn)) 115 116 static bool is_jump_table_jump(struct instruction *insn) 117 { 118 struct alt_group *alt_group = insn->alt_group; 119 120 if (insn->jump_table) 121 return true; 122 123 /* Retpoline alternative for a jump table? */ 124 return alt_group && alt_group->orig_group && 125 alt_group->orig_group->first_insn->jump_table; 126 } 127 128 static bool is_sibling_call(struct instruction *insn) 129 { 130 /* 131 * Assume only ELF functions can make sibling calls. This ensures 132 * sibling call detection consistency between vmlinux.o and individual 133 * objects. 134 */ 135 if (!insn->func) 136 return false; 137 138 /* An indirect jump is either a sibling call or a jump to a table. */ 139 if (insn->type == INSN_JUMP_DYNAMIC) 140 return !is_jump_table_jump(insn); 141 142 /* add_jump_destinations() sets insn->call_dest for sibling calls. */ 143 return (is_static_jump(insn) && insn->call_dest); 144 } 145 146 /* 147 * This checks to see if the given function is a "noreturn" function. 148 * 149 * For global functions which are outside the scope of this object file, we 150 * have to keep a manual list of them. 151 * 152 * For local functions, we have to detect them manually by simply looking for 153 * the lack of a return instruction. 154 */ 155 static bool __dead_end_function(struct objtool_file *file, struct symbol *func, 156 int recursion) 157 { 158 int i; 159 struct instruction *insn; 160 bool empty = true; 161 162 /* 163 * Unfortunately these have to be hard coded because the noreturn 164 * attribute isn't provided in ELF data. 165 */ 166 static const char * const global_noreturns[] = { 167 "__stack_chk_fail", 168 "panic", 169 "do_exit", 170 "do_task_dead", 171 "make_task_dead", 172 "__module_put_and_exit", 173 "complete_and_exit", 174 "__reiserfs_panic", 175 "lbug_with_loc", 176 "fortify_panic", 177 "usercopy_abort", 178 "machine_real_restart", 179 "rewind_stack_and_make_dead" 180 "kunit_try_catch_throw", 181 "xen_start_kernel", 182 "cpu_bringup_and_idle", 183 }; 184 185 if (!func) 186 return false; 187 188 if (func->bind == STB_WEAK) 189 return false; 190 191 if (func->bind == STB_GLOBAL) 192 for (i = 0; i < ARRAY_SIZE(global_noreturns); i++) 193 if (!strcmp(func->name, global_noreturns[i])) 194 return true; 195 196 if (!func->len) 197 return false; 198 199 insn = find_insn(file, func->sec, func->offset); 200 if (!insn->func) 201 return false; 202 203 func_for_each_insn(file, func, insn) { 204 empty = false; 205 206 if (insn->type == INSN_RETURN) 207 return false; 208 } 209 210 if (empty) 211 return false; 212 213 /* 214 * A function can have a sibling call instead of a return. In that 215 * case, the function's dead-end status depends on whether the target 216 * of the sibling call returns. 217 */ 218 func_for_each_insn(file, func, insn) { 219 if (is_sibling_call(insn)) { 220 struct instruction *dest = insn->jump_dest; 221 222 if (!dest) 223 /* sibling call to another file */ 224 return false; 225 226 /* local sibling call */ 227 if (recursion == 5) { 228 /* 229 * Infinite recursion: two functions have 230 * sibling calls to each other. This is a very 231 * rare case. It means they aren't dead ends. 232 */ 233 return false; 234 } 235 236 return __dead_end_function(file, dest->func, recursion+1); 237 } 238 } 239 240 return true; 241 } 242 243 static bool dead_end_function(struct objtool_file *file, struct symbol *func) 244 { 245 return __dead_end_function(file, func, 0); 246 } 247 248 static void init_cfi_state(struct cfi_state *cfi) 249 { 250 int i; 251 252 for (i = 0; i < CFI_NUM_REGS; i++) { 253 cfi->regs[i].base = CFI_UNDEFINED; 254 cfi->vals[i].base = CFI_UNDEFINED; 255 } 256 cfi->cfa.base = CFI_UNDEFINED; 257 cfi->drap_reg = CFI_UNDEFINED; 258 cfi->drap_offset = -1; 259 } 260 261 static void init_insn_state(struct insn_state *state, struct section *sec) 262 { 263 memset(state, 0, sizeof(*state)); 264 init_cfi_state(&state->cfi); 265 266 /* 267 * We need the full vmlinux for noinstr validation, otherwise we can 268 * not correctly determine insn->call_dest->sec (external symbols do 269 * not have a section). 270 */ 271 if (vmlinux && noinstr && sec) 272 state->noinstr = sec->noinstr; 273 } 274 275 static struct cfi_state *cfi_alloc(void) 276 { 277 struct cfi_state *cfi = calloc(sizeof(struct cfi_state), 1); 278 if (!cfi) { 279 WARN("calloc failed"); 280 exit(1); 281 } 282 nr_cfi++; 283 return cfi; 284 } 285 286 static int cfi_bits; 287 static struct hlist_head *cfi_hash; 288 289 static inline bool cficmp(struct cfi_state *cfi1, struct cfi_state *cfi2) 290 { 291 return memcmp((void *)cfi1 + sizeof(cfi1->hash), 292 (void *)cfi2 + sizeof(cfi2->hash), 293 sizeof(struct cfi_state) - sizeof(struct hlist_node)); 294 } 295 296 static inline u32 cfi_key(struct cfi_state *cfi) 297 { 298 return jhash((void *)cfi + sizeof(cfi->hash), 299 sizeof(*cfi) - sizeof(cfi->hash), 0); 300 } 301 302 static struct cfi_state *cfi_hash_find_or_add(struct cfi_state *cfi) 303 { 304 struct hlist_head *head = &cfi_hash[hash_min(cfi_key(cfi), cfi_bits)]; 305 struct cfi_state *obj; 306 307 hlist_for_each_entry(obj, head, hash) { 308 if (!cficmp(cfi, obj)) { 309 nr_cfi_cache++; 310 return obj; 311 } 312 } 313 314 obj = cfi_alloc(); 315 *obj = *cfi; 316 hlist_add_head(&obj->hash, head); 317 318 return obj; 319 } 320 321 static void cfi_hash_add(struct cfi_state *cfi) 322 { 323 struct hlist_head *head = &cfi_hash[hash_min(cfi_key(cfi), cfi_bits)]; 324 325 hlist_add_head(&cfi->hash, head); 326 } 327 328 static void *cfi_hash_alloc(unsigned long size) 329 { 330 cfi_bits = max(10, ilog2(size)); 331 cfi_hash = mmap(NULL, sizeof(struct hlist_head) << cfi_bits, 332 PROT_READ|PROT_WRITE, 333 MAP_PRIVATE|MAP_ANON, -1, 0); 334 if (cfi_hash == (void *)-1L) { 335 WARN("mmap fail cfi_hash"); 336 cfi_hash = NULL; 337 } else if (stats) { 338 printf("cfi_bits: %d\n", cfi_bits); 339 } 340 341 return cfi_hash; 342 } 343 344 static unsigned long nr_insns; 345 static unsigned long nr_insns_visited; 346 347 /* 348 * Call the arch-specific instruction decoder for all the instructions and add 349 * them to the global instruction list. 350 */ 351 static int decode_instructions(struct objtool_file *file) 352 { 353 struct section *sec; 354 struct symbol *func; 355 unsigned long offset; 356 struct instruction *insn; 357 int ret; 358 359 for_each_sec(file, sec) { 360 361 if (!(sec->sh.sh_flags & SHF_EXECINSTR)) 362 continue; 363 364 if (strcmp(sec->name, ".altinstr_replacement") && 365 strcmp(sec->name, ".altinstr_aux") && 366 strncmp(sec->name, ".discard.", 9)) 367 sec->text = true; 368 369 if (!strcmp(sec->name, ".noinstr.text") || 370 !strcmp(sec->name, ".entry.text")) 371 sec->noinstr = true; 372 373 for (offset = 0; offset < sec->sh.sh_size; offset += insn->len) { 374 insn = malloc(sizeof(*insn)); 375 if (!insn) { 376 WARN("malloc failed"); 377 return -1; 378 } 379 memset(insn, 0, sizeof(*insn)); 380 INIT_LIST_HEAD(&insn->alts); 381 INIT_LIST_HEAD(&insn->stack_ops); 382 383 insn->sec = sec; 384 insn->offset = offset; 385 386 ret = arch_decode_instruction(file, sec, offset, 387 sec->sh.sh_size - offset, 388 &insn->len, &insn->type, 389 &insn->immediate, 390 &insn->stack_ops); 391 if (ret) 392 goto err; 393 394 hash_add(file->insn_hash, &insn->hash, sec_offset_hash(sec, insn->offset)); 395 list_add_tail(&insn->list, &file->insn_list); 396 nr_insns++; 397 } 398 399 list_for_each_entry(func, &sec->symbol_list, list) { 400 if (func->type != STT_FUNC || func->alias != func) 401 continue; 402 403 if (!find_insn(file, sec, func->offset)) { 404 WARN("%s(): can't find starting instruction", 405 func->name); 406 return -1; 407 } 408 409 sym_for_each_insn(file, func, insn) 410 insn->func = func; 411 } 412 } 413 414 if (stats) 415 printf("nr_insns: %lu\n", nr_insns); 416 417 return 0; 418 419 err: 420 free(insn); 421 return ret; 422 } 423 424 /* 425 * Read the pv_ops[] .data table to find the static initialized values. 426 */ 427 static int add_pv_ops(struct objtool_file *file, const char *symname) 428 { 429 struct symbol *sym, *func; 430 unsigned long off, end; 431 struct reloc *rel; 432 int idx; 433 434 sym = find_symbol_by_name(file->elf, symname); 435 if (!sym) 436 return 0; 437 438 off = sym->offset; 439 end = off + sym->len; 440 for (;;) { 441 rel = find_reloc_by_dest_range(file->elf, sym->sec, off, end - off); 442 if (!rel) 443 break; 444 445 func = rel->sym; 446 if (func->type == STT_SECTION) 447 func = find_symbol_by_offset(rel->sym->sec, rel->addend); 448 449 idx = (rel->offset - sym->offset) / sizeof(unsigned long); 450 451 objtool_pv_add(file, idx, func); 452 453 off = rel->offset + 1; 454 if (off > end) 455 break; 456 } 457 458 return 0; 459 } 460 461 /* 462 * Allocate and initialize file->pv_ops[]. 463 */ 464 static int init_pv_ops(struct objtool_file *file) 465 { 466 static const char *pv_ops_tables[] = { 467 "pv_ops", 468 "xen_cpu_ops", 469 "xen_irq_ops", 470 "xen_mmu_ops", 471 NULL, 472 }; 473 const char *pv_ops; 474 struct symbol *sym; 475 int idx, nr; 476 477 if (!noinstr) 478 return 0; 479 480 file->pv_ops = NULL; 481 482 sym = find_symbol_by_name(file->elf, "pv_ops"); 483 if (!sym) 484 return 0; 485 486 nr = sym->len / sizeof(unsigned long); 487 file->pv_ops = calloc(sizeof(struct pv_state), nr); 488 if (!file->pv_ops) 489 return -1; 490 491 for (idx = 0; idx < nr; idx++) 492 INIT_LIST_HEAD(&file->pv_ops[idx].targets); 493 494 for (idx = 0; (pv_ops = pv_ops_tables[idx]); idx++) 495 add_pv_ops(file, pv_ops); 496 497 return 0; 498 } 499 500 static struct instruction *find_last_insn(struct objtool_file *file, 501 struct section *sec) 502 { 503 struct instruction *insn = NULL; 504 unsigned int offset; 505 unsigned int end = (sec->sh.sh_size > 10) ? sec->sh.sh_size - 10 : 0; 506 507 for (offset = sec->sh.sh_size - 1; offset >= end && !insn; offset--) 508 insn = find_insn(file, sec, offset); 509 510 return insn; 511 } 512 513 /* 514 * Mark "ud2" instructions and manually annotated dead ends. 515 */ 516 static int add_dead_ends(struct objtool_file *file) 517 { 518 struct section *sec; 519 struct reloc *reloc; 520 struct instruction *insn; 521 522 /* 523 * By default, "ud2" is a dead end unless otherwise annotated, because 524 * GCC 7 inserts it for certain divide-by-zero cases. 525 */ 526 for_each_insn(file, insn) 527 if (insn->type == INSN_BUG) 528 insn->dead_end = true; 529 530 /* 531 * Check for manually annotated dead ends. 532 */ 533 sec = find_section_by_name(file->elf, ".rela.discard.unreachable"); 534 if (!sec) 535 goto reachable; 536 537 list_for_each_entry(reloc, &sec->reloc_list, list) { 538 if (reloc->sym->type != STT_SECTION) { 539 WARN("unexpected relocation symbol type in %s", sec->name); 540 return -1; 541 } 542 insn = find_insn(file, reloc->sym->sec, reloc->addend); 543 if (insn) 544 insn = list_prev_entry(insn, list); 545 else if (reloc->addend == reloc->sym->sec->sh.sh_size) { 546 insn = find_last_insn(file, reloc->sym->sec); 547 if (!insn) { 548 WARN("can't find unreachable insn at %s+0x%x", 549 reloc->sym->sec->name, reloc->addend); 550 return -1; 551 } 552 } else { 553 WARN("can't find unreachable insn at %s+0x%x", 554 reloc->sym->sec->name, reloc->addend); 555 return -1; 556 } 557 558 insn->dead_end = true; 559 } 560 561 reachable: 562 /* 563 * These manually annotated reachable checks are needed for GCC 4.4, 564 * where the Linux unreachable() macro isn't supported. In that case 565 * GCC doesn't know the "ud2" is fatal, so it generates code as if it's 566 * not a dead end. 567 */ 568 sec = find_section_by_name(file->elf, ".rela.discard.reachable"); 569 if (!sec) 570 return 0; 571 572 list_for_each_entry(reloc, &sec->reloc_list, list) { 573 if (reloc->sym->type != STT_SECTION) { 574 WARN("unexpected relocation symbol type in %s", sec->name); 575 return -1; 576 } 577 insn = find_insn(file, reloc->sym->sec, reloc->addend); 578 if (insn) 579 insn = list_prev_entry(insn, list); 580 else if (reloc->addend == reloc->sym->sec->sh.sh_size) { 581 insn = find_last_insn(file, reloc->sym->sec); 582 if (!insn) { 583 WARN("can't find reachable insn at %s+0x%x", 584 reloc->sym->sec->name, reloc->addend); 585 return -1; 586 } 587 } else { 588 WARN("can't find reachable insn at %s+0x%x", 589 reloc->sym->sec->name, reloc->addend); 590 return -1; 591 } 592 593 insn->dead_end = false; 594 } 595 596 return 0; 597 } 598 599 static int create_static_call_sections(struct objtool_file *file) 600 { 601 struct section *sec; 602 struct static_call_site *site; 603 struct instruction *insn; 604 struct symbol *key_sym; 605 char *key_name, *tmp; 606 int idx; 607 608 sec = find_section_by_name(file->elf, ".static_call_sites"); 609 if (sec) { 610 INIT_LIST_HEAD(&file->static_call_list); 611 WARN("file already has .static_call_sites section, skipping"); 612 return 0; 613 } 614 615 if (list_empty(&file->static_call_list)) 616 return 0; 617 618 idx = 0; 619 list_for_each_entry(insn, &file->static_call_list, call_node) 620 idx++; 621 622 sec = elf_create_section(file->elf, ".static_call_sites", SHF_WRITE, 623 sizeof(struct static_call_site), idx); 624 if (!sec) 625 return -1; 626 627 idx = 0; 628 list_for_each_entry(insn, &file->static_call_list, call_node) { 629 630 site = (struct static_call_site *)sec->data->d_buf + idx; 631 memset(site, 0, sizeof(struct static_call_site)); 632 633 /* populate reloc for 'addr' */ 634 if (elf_add_reloc_to_insn(file->elf, sec, 635 idx * sizeof(struct static_call_site), 636 R_X86_64_PC32, 637 insn->sec, insn->offset)) 638 return -1; 639 640 /* find key symbol */ 641 key_name = strdup(insn->call_dest->name); 642 if (!key_name) { 643 perror("strdup"); 644 return -1; 645 } 646 if (strncmp(key_name, STATIC_CALL_TRAMP_PREFIX_STR, 647 STATIC_CALL_TRAMP_PREFIX_LEN)) { 648 WARN("static_call: trampoline name malformed: %s", key_name); 649 return -1; 650 } 651 tmp = key_name + STATIC_CALL_TRAMP_PREFIX_LEN - STATIC_CALL_KEY_PREFIX_LEN; 652 memcpy(tmp, STATIC_CALL_KEY_PREFIX_STR, STATIC_CALL_KEY_PREFIX_LEN); 653 654 key_sym = find_symbol_by_name(file->elf, tmp); 655 if (!key_sym) { 656 if (!module) { 657 WARN("static_call: can't find static_call_key symbol: %s", tmp); 658 return -1; 659 } 660 661 /* 662 * For modules(), the key might not be exported, which 663 * means the module can make static calls but isn't 664 * allowed to change them. 665 * 666 * In that case we temporarily set the key to be the 667 * trampoline address. This is fixed up in 668 * static_call_add_module(). 669 */ 670 key_sym = insn->call_dest; 671 } 672 free(key_name); 673 674 /* populate reloc for 'key' */ 675 if (elf_add_reloc(file->elf, sec, 676 idx * sizeof(struct static_call_site) + 4, 677 R_X86_64_PC32, key_sym, 678 is_sibling_call(insn) * STATIC_CALL_SITE_TAIL)) 679 return -1; 680 681 idx++; 682 } 683 684 return 0; 685 } 686 687 static int create_retpoline_sites_sections(struct objtool_file *file) 688 { 689 struct instruction *insn; 690 struct section *sec; 691 int idx; 692 693 sec = find_section_by_name(file->elf, ".retpoline_sites"); 694 if (sec) { 695 WARN("file already has .retpoline_sites, skipping"); 696 return 0; 697 } 698 699 idx = 0; 700 list_for_each_entry(insn, &file->retpoline_call_list, call_node) 701 idx++; 702 703 if (!idx) 704 return 0; 705 706 sec = elf_create_section(file->elf, ".retpoline_sites", 0, 707 sizeof(int), idx); 708 if (!sec) { 709 WARN("elf_create_section: .retpoline_sites"); 710 return -1; 711 } 712 713 idx = 0; 714 list_for_each_entry(insn, &file->retpoline_call_list, call_node) { 715 716 int *site = (int *)sec->data->d_buf + idx; 717 *site = 0; 718 719 if (elf_add_reloc_to_insn(file->elf, sec, 720 idx * sizeof(int), 721 R_X86_64_PC32, 722 insn->sec, insn->offset)) { 723 WARN("elf_add_reloc_to_insn: .retpoline_sites"); 724 return -1; 725 } 726 727 idx++; 728 } 729 730 return 0; 731 } 732 733 static int create_mcount_loc_sections(struct objtool_file *file) 734 { 735 struct section *sec; 736 unsigned long *loc; 737 struct instruction *insn; 738 int idx; 739 740 sec = find_section_by_name(file->elf, "__mcount_loc"); 741 if (sec) { 742 INIT_LIST_HEAD(&file->mcount_loc_list); 743 WARN("file already has __mcount_loc section, skipping"); 744 return 0; 745 } 746 747 if (list_empty(&file->mcount_loc_list)) 748 return 0; 749 750 idx = 0; 751 list_for_each_entry(insn, &file->mcount_loc_list, call_node) 752 idx++; 753 754 sec = elf_create_section(file->elf, "__mcount_loc", 0, sizeof(unsigned long), idx); 755 if (!sec) 756 return -1; 757 758 idx = 0; 759 list_for_each_entry(insn, &file->mcount_loc_list, call_node) { 760 761 loc = (unsigned long *)sec->data->d_buf + idx; 762 memset(loc, 0, sizeof(unsigned long)); 763 764 if (elf_add_reloc_to_insn(file->elf, sec, 765 idx * sizeof(unsigned long), 766 R_X86_64_64, 767 insn->sec, insn->offset)) 768 return -1; 769 770 idx++; 771 } 772 773 return 0; 774 } 775 776 /* 777 * Warnings shouldn't be reported for ignored functions. 778 */ 779 static void add_ignores(struct objtool_file *file) 780 { 781 struct instruction *insn; 782 struct section *sec; 783 struct symbol *func; 784 struct reloc *reloc; 785 786 sec = find_section_by_name(file->elf, ".rela.discard.func_stack_frame_non_standard"); 787 if (!sec) 788 return; 789 790 list_for_each_entry(reloc, &sec->reloc_list, list) { 791 switch (reloc->sym->type) { 792 case STT_FUNC: 793 func = reloc->sym; 794 break; 795 796 case STT_SECTION: 797 func = find_func_by_offset(reloc->sym->sec, reloc->addend); 798 if (!func) 799 continue; 800 break; 801 802 default: 803 WARN("unexpected relocation symbol type in %s: %d", sec->name, reloc->sym->type); 804 continue; 805 } 806 807 func_for_each_insn(file, func, insn) 808 insn->ignore = true; 809 } 810 } 811 812 /* 813 * This is a whitelist of functions that is allowed to be called with AC set. 814 * The list is meant to be minimal and only contains compiler instrumentation 815 * ABI and a few functions used to implement *_{to,from}_user() functions. 816 * 817 * These functions must not directly change AC, but may PUSHF/POPF. 818 */ 819 static const char *uaccess_safe_builtin[] = { 820 /* KASAN */ 821 "kasan_report", 822 "kasan_check_range", 823 /* KASAN out-of-line */ 824 "__asan_loadN_noabort", 825 "__asan_load1_noabort", 826 "__asan_load2_noabort", 827 "__asan_load4_noabort", 828 "__asan_load8_noabort", 829 "__asan_load16_noabort", 830 "__asan_storeN_noabort", 831 "__asan_store1_noabort", 832 "__asan_store2_noabort", 833 "__asan_store4_noabort", 834 "__asan_store8_noabort", 835 "__asan_store16_noabort", 836 "__kasan_check_read", 837 "__kasan_check_write", 838 /* KASAN in-line */ 839 "__asan_report_load_n_noabort", 840 "__asan_report_load1_noabort", 841 "__asan_report_load2_noabort", 842 "__asan_report_load4_noabort", 843 "__asan_report_load8_noabort", 844 "__asan_report_load16_noabort", 845 "__asan_report_store_n_noabort", 846 "__asan_report_store1_noabort", 847 "__asan_report_store2_noabort", 848 "__asan_report_store4_noabort", 849 "__asan_report_store8_noabort", 850 "__asan_report_store16_noabort", 851 /* KCSAN */ 852 "__kcsan_check_access", 853 "kcsan_found_watchpoint", 854 "kcsan_setup_watchpoint", 855 "kcsan_check_scoped_accesses", 856 "kcsan_disable_current", 857 "kcsan_enable_current_nowarn", 858 /* KCSAN/TSAN */ 859 "__tsan_func_entry", 860 "__tsan_func_exit", 861 "__tsan_read_range", 862 "__tsan_write_range", 863 "__tsan_read1", 864 "__tsan_read2", 865 "__tsan_read4", 866 "__tsan_read8", 867 "__tsan_read16", 868 "__tsan_write1", 869 "__tsan_write2", 870 "__tsan_write4", 871 "__tsan_write8", 872 "__tsan_write16", 873 "__tsan_read_write1", 874 "__tsan_read_write2", 875 "__tsan_read_write4", 876 "__tsan_read_write8", 877 "__tsan_read_write16", 878 "__tsan_atomic8_load", 879 "__tsan_atomic16_load", 880 "__tsan_atomic32_load", 881 "__tsan_atomic64_load", 882 "__tsan_atomic8_store", 883 "__tsan_atomic16_store", 884 "__tsan_atomic32_store", 885 "__tsan_atomic64_store", 886 "__tsan_atomic8_exchange", 887 "__tsan_atomic16_exchange", 888 "__tsan_atomic32_exchange", 889 "__tsan_atomic64_exchange", 890 "__tsan_atomic8_fetch_add", 891 "__tsan_atomic16_fetch_add", 892 "__tsan_atomic32_fetch_add", 893 "__tsan_atomic64_fetch_add", 894 "__tsan_atomic8_fetch_sub", 895 "__tsan_atomic16_fetch_sub", 896 "__tsan_atomic32_fetch_sub", 897 "__tsan_atomic64_fetch_sub", 898 "__tsan_atomic8_fetch_and", 899 "__tsan_atomic16_fetch_and", 900 "__tsan_atomic32_fetch_and", 901 "__tsan_atomic64_fetch_and", 902 "__tsan_atomic8_fetch_or", 903 "__tsan_atomic16_fetch_or", 904 "__tsan_atomic32_fetch_or", 905 "__tsan_atomic64_fetch_or", 906 "__tsan_atomic8_fetch_xor", 907 "__tsan_atomic16_fetch_xor", 908 "__tsan_atomic32_fetch_xor", 909 "__tsan_atomic64_fetch_xor", 910 "__tsan_atomic8_fetch_nand", 911 "__tsan_atomic16_fetch_nand", 912 "__tsan_atomic32_fetch_nand", 913 "__tsan_atomic64_fetch_nand", 914 "__tsan_atomic8_compare_exchange_strong", 915 "__tsan_atomic16_compare_exchange_strong", 916 "__tsan_atomic32_compare_exchange_strong", 917 "__tsan_atomic64_compare_exchange_strong", 918 "__tsan_atomic8_compare_exchange_weak", 919 "__tsan_atomic16_compare_exchange_weak", 920 "__tsan_atomic32_compare_exchange_weak", 921 "__tsan_atomic64_compare_exchange_weak", 922 "__tsan_atomic8_compare_exchange_val", 923 "__tsan_atomic16_compare_exchange_val", 924 "__tsan_atomic32_compare_exchange_val", 925 "__tsan_atomic64_compare_exchange_val", 926 "__tsan_atomic_thread_fence", 927 "__tsan_atomic_signal_fence", 928 /* KCOV */ 929 "write_comp_data", 930 "check_kcov_mode", 931 "__sanitizer_cov_trace_pc", 932 "__sanitizer_cov_trace_const_cmp1", 933 "__sanitizer_cov_trace_const_cmp2", 934 "__sanitizer_cov_trace_const_cmp4", 935 "__sanitizer_cov_trace_const_cmp8", 936 "__sanitizer_cov_trace_cmp1", 937 "__sanitizer_cov_trace_cmp2", 938 "__sanitizer_cov_trace_cmp4", 939 "__sanitizer_cov_trace_cmp8", 940 "__sanitizer_cov_trace_switch", 941 /* UBSAN */ 942 "ubsan_type_mismatch_common", 943 "__ubsan_handle_type_mismatch", 944 "__ubsan_handle_type_mismatch_v1", 945 "__ubsan_handle_shift_out_of_bounds", 946 /* misc */ 947 "csum_partial_copy_generic", 948 "copy_mc_fragile", 949 "copy_mc_fragile_handle_tail", 950 "copy_mc_enhanced_fast_string", 951 "ftrace_likely_update", /* CONFIG_TRACE_BRANCH_PROFILING */ 952 NULL 953 }; 954 955 static void add_uaccess_safe(struct objtool_file *file) 956 { 957 struct symbol *func; 958 const char **name; 959 960 if (!uaccess) 961 return; 962 963 for (name = uaccess_safe_builtin; *name; name++) { 964 func = find_symbol_by_name(file->elf, *name); 965 if (!func) 966 continue; 967 968 func->uaccess_safe = true; 969 } 970 } 971 972 /* 973 * FIXME: For now, just ignore any alternatives which add retpolines. This is 974 * a temporary hack, as it doesn't allow ORC to unwind from inside a retpoline. 975 * But it at least allows objtool to understand the control flow *around* the 976 * retpoline. 977 */ 978 static int add_ignore_alternatives(struct objtool_file *file) 979 { 980 struct section *sec; 981 struct reloc *reloc; 982 struct instruction *insn; 983 984 sec = find_section_by_name(file->elf, ".rela.discard.ignore_alts"); 985 if (!sec) 986 return 0; 987 988 list_for_each_entry(reloc, &sec->reloc_list, list) { 989 if (reloc->sym->type != STT_SECTION) { 990 WARN("unexpected relocation symbol type in %s", sec->name); 991 return -1; 992 } 993 994 insn = find_insn(file, reloc->sym->sec, reloc->addend); 995 if (!insn) { 996 WARN("bad .discard.ignore_alts entry"); 997 return -1; 998 } 999 1000 insn->ignore_alts = true; 1001 } 1002 1003 return 0; 1004 } 1005 1006 __weak bool arch_is_retpoline(struct symbol *sym) 1007 { 1008 return false; 1009 } 1010 1011 #define NEGATIVE_RELOC ((void *)-1L) 1012 1013 static struct reloc *insn_reloc(struct objtool_file *file, struct instruction *insn) 1014 { 1015 if (insn->reloc == NEGATIVE_RELOC) 1016 return NULL; 1017 1018 if (!insn->reloc) { 1019 if (!file) 1020 return NULL; 1021 1022 insn->reloc = find_reloc_by_dest_range(file->elf, insn->sec, 1023 insn->offset, insn->len); 1024 if (!insn->reloc) { 1025 insn->reloc = NEGATIVE_RELOC; 1026 return NULL; 1027 } 1028 } 1029 1030 return insn->reloc; 1031 } 1032 1033 static void remove_insn_ops(struct instruction *insn) 1034 { 1035 struct stack_op *op, *tmp; 1036 1037 list_for_each_entry_safe(op, tmp, &insn->stack_ops, list) { 1038 list_del(&op->list); 1039 free(op); 1040 } 1041 } 1042 1043 static void annotate_call_site(struct objtool_file *file, 1044 struct instruction *insn, bool sibling) 1045 { 1046 struct reloc *reloc = insn_reloc(file, insn); 1047 struct symbol *sym = insn->call_dest; 1048 1049 if (!sym) 1050 sym = reloc->sym; 1051 1052 /* 1053 * Alternative replacement code is just template code which is 1054 * sometimes copied to the original instruction. For now, don't 1055 * annotate it. (In the future we might consider annotating the 1056 * original instruction if/when it ever makes sense to do so.) 1057 */ 1058 if (!strcmp(insn->sec->name, ".altinstr_replacement")) 1059 return; 1060 1061 if (sym->static_call_tramp) { 1062 list_add_tail(&insn->call_node, &file->static_call_list); 1063 return; 1064 } 1065 1066 if (sym->retpoline_thunk) { 1067 list_add_tail(&insn->call_node, &file->retpoline_call_list); 1068 return; 1069 } 1070 1071 /* 1072 * Many compilers cannot disable KCOV with a function attribute 1073 * so they need a little help, NOP out any KCOV calls from noinstr 1074 * text. 1075 */ 1076 if (insn->sec->noinstr && sym->kcov) { 1077 if (reloc) { 1078 reloc->type = R_NONE; 1079 elf_write_reloc(file->elf, reloc); 1080 } 1081 1082 elf_write_insn(file->elf, insn->sec, 1083 insn->offset, insn->len, 1084 sibling ? arch_ret_insn(insn->len) 1085 : arch_nop_insn(insn->len)); 1086 1087 insn->type = sibling ? INSN_RETURN : INSN_NOP; 1088 return; 1089 } 1090 1091 if (mcount && sym->fentry) { 1092 if (sibling) 1093 WARN_FUNC("Tail call to __fentry__ !?!?", insn->sec, insn->offset); 1094 1095 if (reloc) { 1096 reloc->type = R_NONE; 1097 elf_write_reloc(file->elf, reloc); 1098 } 1099 1100 elf_write_insn(file->elf, insn->sec, 1101 insn->offset, insn->len, 1102 arch_nop_insn(insn->len)); 1103 1104 insn->type = INSN_NOP; 1105 1106 list_add_tail(&insn->call_node, &file->mcount_loc_list); 1107 return; 1108 } 1109 } 1110 1111 static void add_call_dest(struct objtool_file *file, struct instruction *insn, 1112 struct symbol *dest, bool sibling) 1113 { 1114 insn->call_dest = dest; 1115 if (!dest) 1116 return; 1117 1118 /* 1119 * Whatever stack impact regular CALLs have, should be undone 1120 * by the RETURN of the called function. 1121 * 1122 * Annotated intra-function calls retain the stack_ops but 1123 * are converted to JUMP, see read_intra_function_calls(). 1124 */ 1125 remove_insn_ops(insn); 1126 1127 annotate_call_site(file, insn, sibling); 1128 } 1129 1130 static void add_retpoline_call(struct objtool_file *file, struct instruction *insn) 1131 { 1132 /* 1133 * Retpoline calls/jumps are really dynamic calls/jumps in disguise, 1134 * so convert them accordingly. 1135 */ 1136 switch (insn->type) { 1137 case INSN_CALL: 1138 insn->type = INSN_CALL_DYNAMIC; 1139 break; 1140 case INSN_JUMP_UNCONDITIONAL: 1141 insn->type = INSN_JUMP_DYNAMIC; 1142 break; 1143 case INSN_JUMP_CONDITIONAL: 1144 insn->type = INSN_JUMP_DYNAMIC_CONDITIONAL; 1145 break; 1146 default: 1147 return; 1148 } 1149 1150 insn->retpoline_safe = true; 1151 1152 /* 1153 * Whatever stack impact regular CALLs have, should be undone 1154 * by the RETURN of the called function. 1155 * 1156 * Annotated intra-function calls retain the stack_ops but 1157 * are converted to JUMP, see read_intra_function_calls(). 1158 */ 1159 remove_insn_ops(insn); 1160 1161 annotate_call_site(file, insn, false); 1162 } 1163 /* 1164 * Find the destination instructions for all jumps. 1165 */ 1166 static int add_jump_destinations(struct objtool_file *file) 1167 { 1168 struct instruction *insn; 1169 struct reloc *reloc; 1170 struct section *dest_sec; 1171 unsigned long dest_off; 1172 1173 for_each_insn(file, insn) { 1174 if (!is_static_jump(insn)) 1175 continue; 1176 1177 reloc = insn_reloc(file, insn); 1178 if (!reloc) { 1179 dest_sec = insn->sec; 1180 dest_off = arch_jump_destination(insn); 1181 } else if (reloc->sym->type == STT_SECTION) { 1182 dest_sec = reloc->sym->sec; 1183 dest_off = arch_dest_reloc_offset(reloc->addend); 1184 } else if (reloc->sym->retpoline_thunk) { 1185 add_retpoline_call(file, insn); 1186 continue; 1187 } else if (insn->func) { 1188 /* internal or external sibling call (with reloc) */ 1189 add_call_dest(file, insn, reloc->sym, true); 1190 continue; 1191 } else if (reloc->sym->sec->idx) { 1192 dest_sec = reloc->sym->sec; 1193 dest_off = reloc->sym->sym.st_value + 1194 arch_dest_reloc_offset(reloc->addend); 1195 } else { 1196 /* non-func asm code jumping to another file */ 1197 continue; 1198 } 1199 1200 insn->jump_dest = find_insn(file, dest_sec, dest_off); 1201 if (!insn->jump_dest) { 1202 1203 /* 1204 * This is a special case where an alt instruction 1205 * jumps past the end of the section. These are 1206 * handled later in handle_group_alt(). 1207 */ 1208 if (!strcmp(insn->sec->name, ".altinstr_replacement")) 1209 continue; 1210 1211 WARN_FUNC("can't find jump dest instruction at %s+0x%lx", 1212 insn->sec, insn->offset, dest_sec->name, 1213 dest_off); 1214 return -1; 1215 } 1216 1217 /* 1218 * Cross-function jump. 1219 */ 1220 if (insn->func && insn->jump_dest->func && 1221 insn->func != insn->jump_dest->func) { 1222 1223 /* 1224 * For GCC 8+, create parent/child links for any cold 1225 * subfunctions. This is _mostly_ redundant with a 1226 * similar initialization in read_symbols(). 1227 * 1228 * If a function has aliases, we want the *first* such 1229 * function in the symbol table to be the subfunction's 1230 * parent. In that case we overwrite the 1231 * initialization done in read_symbols(). 1232 * 1233 * However this code can't completely replace the 1234 * read_symbols() code because this doesn't detect the 1235 * case where the parent function's only reference to a 1236 * subfunction is through a jump table. 1237 */ 1238 if (!strstr(insn->func->name, ".cold") && 1239 strstr(insn->jump_dest->func->name, ".cold")) { 1240 insn->func->cfunc = insn->jump_dest->func; 1241 insn->jump_dest->func->pfunc = insn->func; 1242 1243 } else if (insn->jump_dest->func->pfunc != insn->func->pfunc && 1244 insn->jump_dest->offset == insn->jump_dest->func->offset) { 1245 /* internal sibling call (without reloc) */ 1246 add_call_dest(file, insn, insn->jump_dest->func, true); 1247 } 1248 } 1249 } 1250 1251 return 0; 1252 } 1253 1254 static struct symbol *find_call_destination(struct section *sec, unsigned long offset) 1255 { 1256 struct symbol *call_dest; 1257 1258 call_dest = find_func_by_offset(sec, offset); 1259 if (!call_dest) 1260 call_dest = find_symbol_by_offset(sec, offset); 1261 1262 return call_dest; 1263 } 1264 1265 /* 1266 * Find the destination instructions for all calls. 1267 */ 1268 static int add_call_destinations(struct objtool_file *file) 1269 { 1270 struct instruction *insn; 1271 unsigned long dest_off; 1272 struct symbol *dest; 1273 struct reloc *reloc; 1274 1275 for_each_insn(file, insn) { 1276 if (insn->type != INSN_CALL) 1277 continue; 1278 1279 reloc = insn_reloc(file, insn); 1280 if (!reloc) { 1281 dest_off = arch_jump_destination(insn); 1282 dest = find_call_destination(insn->sec, dest_off); 1283 1284 add_call_dest(file, insn, dest, false); 1285 1286 if (insn->ignore) 1287 continue; 1288 1289 if (!insn->call_dest) { 1290 WARN_FUNC("unannotated intra-function call", insn->sec, insn->offset); 1291 return -1; 1292 } 1293 1294 if (insn->func && insn->call_dest->type != STT_FUNC) { 1295 WARN_FUNC("unsupported call to non-function", 1296 insn->sec, insn->offset); 1297 return -1; 1298 } 1299 1300 } else if (reloc->sym->type == STT_SECTION) { 1301 dest_off = arch_dest_reloc_offset(reloc->addend); 1302 dest = find_call_destination(reloc->sym->sec, dest_off); 1303 if (!dest) { 1304 WARN_FUNC("can't find call dest symbol at %s+0x%lx", 1305 insn->sec, insn->offset, 1306 reloc->sym->sec->name, 1307 dest_off); 1308 return -1; 1309 } 1310 1311 add_call_dest(file, insn, dest, false); 1312 1313 } else if (reloc->sym->retpoline_thunk) { 1314 add_retpoline_call(file, insn); 1315 1316 } else 1317 add_call_dest(file, insn, reloc->sym, false); 1318 } 1319 1320 return 0; 1321 } 1322 1323 /* 1324 * The .alternatives section requires some extra special care over and above 1325 * other special sections because alternatives are patched in place. 1326 */ 1327 static int handle_group_alt(struct objtool_file *file, 1328 struct special_alt *special_alt, 1329 struct instruction *orig_insn, 1330 struct instruction **new_insn) 1331 { 1332 struct instruction *last_orig_insn, *last_new_insn = NULL, *insn, *nop = NULL; 1333 struct alt_group *orig_alt_group, *new_alt_group; 1334 unsigned long dest_off; 1335 1336 1337 orig_alt_group = malloc(sizeof(*orig_alt_group)); 1338 if (!orig_alt_group) { 1339 WARN("malloc failed"); 1340 return -1; 1341 } 1342 orig_alt_group->cfi = calloc(special_alt->orig_len, 1343 sizeof(struct cfi_state *)); 1344 if (!orig_alt_group->cfi) { 1345 WARN("calloc failed"); 1346 return -1; 1347 } 1348 1349 last_orig_insn = NULL; 1350 insn = orig_insn; 1351 sec_for_each_insn_from(file, insn) { 1352 if (insn->offset >= special_alt->orig_off + special_alt->orig_len) 1353 break; 1354 1355 insn->alt_group = orig_alt_group; 1356 last_orig_insn = insn; 1357 } 1358 orig_alt_group->orig_group = NULL; 1359 orig_alt_group->first_insn = orig_insn; 1360 orig_alt_group->last_insn = last_orig_insn; 1361 1362 1363 new_alt_group = malloc(sizeof(*new_alt_group)); 1364 if (!new_alt_group) { 1365 WARN("malloc failed"); 1366 return -1; 1367 } 1368 1369 if (special_alt->new_len < special_alt->orig_len) { 1370 /* 1371 * Insert a fake nop at the end to make the replacement 1372 * alt_group the same size as the original. This is needed to 1373 * allow propagate_alt_cfi() to do its magic. When the last 1374 * instruction affects the stack, the instruction after it (the 1375 * nop) will propagate the new state to the shared CFI array. 1376 */ 1377 nop = malloc(sizeof(*nop)); 1378 if (!nop) { 1379 WARN("malloc failed"); 1380 return -1; 1381 } 1382 memset(nop, 0, sizeof(*nop)); 1383 INIT_LIST_HEAD(&nop->alts); 1384 INIT_LIST_HEAD(&nop->stack_ops); 1385 1386 nop->sec = special_alt->new_sec; 1387 nop->offset = special_alt->new_off + special_alt->new_len; 1388 nop->len = special_alt->orig_len - special_alt->new_len; 1389 nop->type = INSN_NOP; 1390 nop->func = orig_insn->func; 1391 nop->alt_group = new_alt_group; 1392 nop->ignore = orig_insn->ignore_alts; 1393 } 1394 1395 if (!special_alt->new_len) { 1396 *new_insn = nop; 1397 goto end; 1398 } 1399 1400 insn = *new_insn; 1401 sec_for_each_insn_from(file, insn) { 1402 struct reloc *alt_reloc; 1403 1404 if (insn->offset >= special_alt->new_off + special_alt->new_len) 1405 break; 1406 1407 last_new_insn = insn; 1408 1409 insn->ignore = orig_insn->ignore_alts; 1410 insn->func = orig_insn->func; 1411 insn->alt_group = new_alt_group; 1412 1413 /* 1414 * Since alternative replacement code is copy/pasted by the 1415 * kernel after applying relocations, generally such code can't 1416 * have relative-address relocation references to outside the 1417 * .altinstr_replacement section, unless the arch's 1418 * alternatives code can adjust the relative offsets 1419 * accordingly. 1420 */ 1421 alt_reloc = insn_reloc(file, insn); 1422 if (alt_reloc && 1423 !arch_support_alt_relocation(special_alt, insn, alt_reloc)) { 1424 1425 WARN_FUNC("unsupported relocation in alternatives section", 1426 insn->sec, insn->offset); 1427 return -1; 1428 } 1429 1430 if (!is_static_jump(insn)) 1431 continue; 1432 1433 if (!insn->immediate) 1434 continue; 1435 1436 dest_off = arch_jump_destination(insn); 1437 if (dest_off == special_alt->new_off + special_alt->new_len) 1438 insn->jump_dest = next_insn_same_sec(file, last_orig_insn); 1439 1440 if (!insn->jump_dest) { 1441 WARN_FUNC("can't find alternative jump destination", 1442 insn->sec, insn->offset); 1443 return -1; 1444 } 1445 } 1446 1447 if (!last_new_insn) { 1448 WARN_FUNC("can't find last new alternative instruction", 1449 special_alt->new_sec, special_alt->new_off); 1450 return -1; 1451 } 1452 1453 if (nop) 1454 list_add(&nop->list, &last_new_insn->list); 1455 end: 1456 new_alt_group->orig_group = orig_alt_group; 1457 new_alt_group->first_insn = *new_insn; 1458 new_alt_group->last_insn = nop ? : last_new_insn; 1459 new_alt_group->cfi = orig_alt_group->cfi; 1460 return 0; 1461 } 1462 1463 /* 1464 * A jump table entry can either convert a nop to a jump or a jump to a nop. 1465 * If the original instruction is a jump, make the alt entry an effective nop 1466 * by just skipping the original instruction. 1467 */ 1468 static int handle_jump_alt(struct objtool_file *file, 1469 struct special_alt *special_alt, 1470 struct instruction *orig_insn, 1471 struct instruction **new_insn) 1472 { 1473 if (orig_insn->type != INSN_JUMP_UNCONDITIONAL && 1474 orig_insn->type != INSN_NOP) { 1475 1476 WARN_FUNC("unsupported instruction at jump label", 1477 orig_insn->sec, orig_insn->offset); 1478 return -1; 1479 } 1480 1481 if (special_alt->key_addend & 2) { 1482 struct reloc *reloc = insn_reloc(file, orig_insn); 1483 1484 if (reloc) { 1485 reloc->type = R_NONE; 1486 elf_write_reloc(file->elf, reloc); 1487 } 1488 elf_write_insn(file->elf, orig_insn->sec, 1489 orig_insn->offset, orig_insn->len, 1490 arch_nop_insn(orig_insn->len)); 1491 orig_insn->type = INSN_NOP; 1492 } 1493 1494 if (orig_insn->type == INSN_NOP) { 1495 if (orig_insn->len == 2) 1496 file->jl_nop_short++; 1497 else 1498 file->jl_nop_long++; 1499 1500 return 0; 1501 } 1502 1503 if (orig_insn->len == 2) 1504 file->jl_short++; 1505 else 1506 file->jl_long++; 1507 1508 *new_insn = list_next_entry(orig_insn, list); 1509 return 0; 1510 } 1511 1512 /* 1513 * Read all the special sections which have alternate instructions which can be 1514 * patched in or redirected to at runtime. Each instruction having alternate 1515 * instruction(s) has them added to its insn->alts list, which will be 1516 * traversed in validate_branch(). 1517 */ 1518 static int add_special_section_alts(struct objtool_file *file) 1519 { 1520 struct list_head special_alts; 1521 struct instruction *orig_insn, *new_insn; 1522 struct special_alt *special_alt, *tmp; 1523 struct alternative *alt; 1524 int ret; 1525 1526 ret = special_get_alts(file->elf, &special_alts); 1527 if (ret) 1528 return ret; 1529 1530 list_for_each_entry_safe(special_alt, tmp, &special_alts, list) { 1531 1532 orig_insn = find_insn(file, special_alt->orig_sec, 1533 special_alt->orig_off); 1534 if (!orig_insn) { 1535 WARN_FUNC("special: can't find orig instruction", 1536 special_alt->orig_sec, special_alt->orig_off); 1537 ret = -1; 1538 goto out; 1539 } 1540 1541 new_insn = NULL; 1542 if (!special_alt->group || special_alt->new_len) { 1543 new_insn = find_insn(file, special_alt->new_sec, 1544 special_alt->new_off); 1545 if (!new_insn) { 1546 WARN_FUNC("special: can't find new instruction", 1547 special_alt->new_sec, 1548 special_alt->new_off); 1549 ret = -1; 1550 goto out; 1551 } 1552 } 1553 1554 if (special_alt->group) { 1555 if (!special_alt->orig_len) { 1556 WARN_FUNC("empty alternative entry", 1557 orig_insn->sec, orig_insn->offset); 1558 continue; 1559 } 1560 1561 ret = handle_group_alt(file, special_alt, orig_insn, 1562 &new_insn); 1563 if (ret) 1564 goto out; 1565 } else if (special_alt->jump_or_nop) { 1566 ret = handle_jump_alt(file, special_alt, orig_insn, 1567 &new_insn); 1568 if (ret) 1569 goto out; 1570 } 1571 1572 alt = malloc(sizeof(*alt)); 1573 if (!alt) { 1574 WARN("malloc failed"); 1575 ret = -1; 1576 goto out; 1577 } 1578 1579 alt->insn = new_insn; 1580 alt->skip_orig = special_alt->skip_orig; 1581 orig_insn->ignore_alts |= special_alt->skip_alt; 1582 list_add_tail(&alt->list, &orig_insn->alts); 1583 1584 list_del(&special_alt->list); 1585 free(special_alt); 1586 } 1587 1588 if (stats) { 1589 printf("jl\\\tNOP\tJMP\n"); 1590 printf("short:\t%ld\t%ld\n", file->jl_nop_short, file->jl_short); 1591 printf("long:\t%ld\t%ld\n", file->jl_nop_long, file->jl_long); 1592 } 1593 1594 out: 1595 return ret; 1596 } 1597 1598 static int add_jump_table(struct objtool_file *file, struct instruction *insn, 1599 struct reloc *table) 1600 { 1601 struct reloc *reloc = table; 1602 struct instruction *dest_insn; 1603 struct alternative *alt; 1604 struct symbol *pfunc = insn->func->pfunc; 1605 unsigned int prev_offset = 0; 1606 1607 /* 1608 * Each @reloc is a switch table relocation which points to the target 1609 * instruction. 1610 */ 1611 list_for_each_entry_from(reloc, &table->sec->reloc_list, list) { 1612 1613 /* Check for the end of the table: */ 1614 if (reloc != table && reloc->jump_table_start) 1615 break; 1616 1617 /* Make sure the table entries are consecutive: */ 1618 if (prev_offset && reloc->offset != prev_offset + 8) 1619 break; 1620 1621 /* Detect function pointers from contiguous objects: */ 1622 if (reloc->sym->sec == pfunc->sec && 1623 reloc->addend == pfunc->offset) 1624 break; 1625 1626 dest_insn = find_insn(file, reloc->sym->sec, reloc->addend); 1627 if (!dest_insn) 1628 break; 1629 1630 /* Make sure the destination is in the same function: */ 1631 if (!dest_insn->func || dest_insn->func->pfunc != pfunc) 1632 break; 1633 1634 alt = malloc(sizeof(*alt)); 1635 if (!alt) { 1636 WARN("malloc failed"); 1637 return -1; 1638 } 1639 1640 alt->insn = dest_insn; 1641 list_add_tail(&alt->list, &insn->alts); 1642 prev_offset = reloc->offset; 1643 } 1644 1645 if (!prev_offset) { 1646 WARN_FUNC("can't find switch jump table", 1647 insn->sec, insn->offset); 1648 return -1; 1649 } 1650 1651 return 0; 1652 } 1653 1654 /* 1655 * find_jump_table() - Given a dynamic jump, find the switch jump table 1656 * associated with it. 1657 */ 1658 static struct reloc *find_jump_table(struct objtool_file *file, 1659 struct symbol *func, 1660 struct instruction *insn) 1661 { 1662 struct reloc *table_reloc; 1663 struct instruction *dest_insn, *orig_insn = insn; 1664 1665 /* 1666 * Backward search using the @first_jump_src links, these help avoid 1667 * much of the 'in between' code. Which avoids us getting confused by 1668 * it. 1669 */ 1670 for (; 1671 insn && insn->func && insn->func->pfunc == func; 1672 insn = insn->first_jump_src ?: prev_insn_same_sym(file, insn)) { 1673 1674 if (insn != orig_insn && insn->type == INSN_JUMP_DYNAMIC) 1675 break; 1676 1677 /* allow small jumps within the range */ 1678 if (insn->type == INSN_JUMP_UNCONDITIONAL && 1679 insn->jump_dest && 1680 (insn->jump_dest->offset <= insn->offset || 1681 insn->jump_dest->offset > orig_insn->offset)) 1682 break; 1683 1684 table_reloc = arch_find_switch_table(file, insn); 1685 if (!table_reloc) 1686 continue; 1687 dest_insn = find_insn(file, table_reloc->sym->sec, table_reloc->addend); 1688 if (!dest_insn || !dest_insn->func || dest_insn->func->pfunc != func) 1689 continue; 1690 1691 return table_reloc; 1692 } 1693 1694 return NULL; 1695 } 1696 1697 /* 1698 * First pass: Mark the head of each jump table so that in the next pass, 1699 * we know when a given jump table ends and the next one starts. 1700 */ 1701 static void mark_func_jump_tables(struct objtool_file *file, 1702 struct symbol *func) 1703 { 1704 struct instruction *insn, *last = NULL; 1705 struct reloc *reloc; 1706 1707 func_for_each_insn(file, func, insn) { 1708 if (!last) 1709 last = insn; 1710 1711 /* 1712 * Store back-pointers for unconditional forward jumps such 1713 * that find_jump_table() can back-track using those and 1714 * avoid some potentially confusing code. 1715 */ 1716 if (insn->type == INSN_JUMP_UNCONDITIONAL && insn->jump_dest && 1717 insn->offset > last->offset && 1718 insn->jump_dest->offset > insn->offset && 1719 !insn->jump_dest->first_jump_src) { 1720 1721 insn->jump_dest->first_jump_src = insn; 1722 last = insn->jump_dest; 1723 } 1724 1725 if (insn->type != INSN_JUMP_DYNAMIC) 1726 continue; 1727 1728 reloc = find_jump_table(file, func, insn); 1729 if (reloc) { 1730 reloc->jump_table_start = true; 1731 insn->jump_table = reloc; 1732 } 1733 } 1734 } 1735 1736 static int add_func_jump_tables(struct objtool_file *file, 1737 struct symbol *func) 1738 { 1739 struct instruction *insn; 1740 int ret; 1741 1742 func_for_each_insn(file, func, insn) { 1743 if (!insn->jump_table) 1744 continue; 1745 1746 ret = add_jump_table(file, insn, insn->jump_table); 1747 if (ret) 1748 return ret; 1749 } 1750 1751 return 0; 1752 } 1753 1754 /* 1755 * For some switch statements, gcc generates a jump table in the .rodata 1756 * section which contains a list of addresses within the function to jump to. 1757 * This finds these jump tables and adds them to the insn->alts lists. 1758 */ 1759 static int add_jump_table_alts(struct objtool_file *file) 1760 { 1761 struct section *sec; 1762 struct symbol *func; 1763 int ret; 1764 1765 if (!file->rodata) 1766 return 0; 1767 1768 for_each_sec(file, sec) { 1769 list_for_each_entry(func, &sec->symbol_list, list) { 1770 if (func->type != STT_FUNC) 1771 continue; 1772 1773 mark_func_jump_tables(file, func); 1774 ret = add_func_jump_tables(file, func); 1775 if (ret) 1776 return ret; 1777 } 1778 } 1779 1780 return 0; 1781 } 1782 1783 static void set_func_state(struct cfi_state *state) 1784 { 1785 state->cfa = initial_func_cfi.cfa; 1786 memcpy(&state->regs, &initial_func_cfi.regs, 1787 CFI_NUM_REGS * sizeof(struct cfi_reg)); 1788 state->stack_size = initial_func_cfi.cfa.offset; 1789 } 1790 1791 static int read_unwind_hints(struct objtool_file *file) 1792 { 1793 struct cfi_state cfi = init_cfi; 1794 struct section *sec, *relocsec; 1795 struct unwind_hint *hint; 1796 struct instruction *insn; 1797 struct reloc *reloc; 1798 int i; 1799 1800 sec = find_section_by_name(file->elf, ".discard.unwind_hints"); 1801 if (!sec) 1802 return 0; 1803 1804 relocsec = sec->reloc; 1805 if (!relocsec) { 1806 WARN("missing .rela.discard.unwind_hints section"); 1807 return -1; 1808 } 1809 1810 if (sec->sh.sh_size % sizeof(struct unwind_hint)) { 1811 WARN("struct unwind_hint size mismatch"); 1812 return -1; 1813 } 1814 1815 file->hints = true; 1816 1817 for (i = 0; i < sec->sh.sh_size / sizeof(struct unwind_hint); i++) { 1818 hint = (struct unwind_hint *)sec->data->d_buf + i; 1819 1820 reloc = find_reloc_by_dest(file->elf, sec, i * sizeof(*hint)); 1821 if (!reloc) { 1822 WARN("can't find reloc for unwind_hints[%d]", i); 1823 return -1; 1824 } 1825 1826 insn = find_insn(file, reloc->sym->sec, reloc->addend); 1827 if (!insn) { 1828 WARN("can't find insn for unwind_hints[%d]", i); 1829 return -1; 1830 } 1831 1832 insn->hint = true; 1833 1834 if (hint->type == UNWIND_HINT_TYPE_FUNC) { 1835 insn->cfi = &func_cfi; 1836 continue; 1837 } 1838 1839 if (insn->cfi) 1840 cfi = *(insn->cfi); 1841 1842 if (arch_decode_hint_reg(hint->sp_reg, &cfi.cfa.base)) { 1843 WARN_FUNC("unsupported unwind_hint sp base reg %d", 1844 insn->sec, insn->offset, hint->sp_reg); 1845 return -1; 1846 } 1847 1848 cfi.cfa.offset = bswap_if_needed(hint->sp_offset); 1849 cfi.type = hint->type; 1850 cfi.end = hint->end; 1851 1852 insn->cfi = cfi_hash_find_or_add(&cfi); 1853 } 1854 1855 return 0; 1856 } 1857 1858 static int read_retpoline_hints(struct objtool_file *file) 1859 { 1860 struct section *sec; 1861 struct instruction *insn; 1862 struct reloc *reloc; 1863 1864 sec = find_section_by_name(file->elf, ".rela.discard.retpoline_safe"); 1865 if (!sec) 1866 return 0; 1867 1868 list_for_each_entry(reloc, &sec->reloc_list, list) { 1869 if (reloc->sym->type != STT_SECTION) { 1870 WARN("unexpected relocation symbol type in %s", sec->name); 1871 return -1; 1872 } 1873 1874 insn = find_insn(file, reloc->sym->sec, reloc->addend); 1875 if (!insn) { 1876 WARN("bad .discard.retpoline_safe entry"); 1877 return -1; 1878 } 1879 1880 if (insn->type != INSN_JUMP_DYNAMIC && 1881 insn->type != INSN_CALL_DYNAMIC) { 1882 WARN_FUNC("retpoline_safe hint not an indirect jump/call", 1883 insn->sec, insn->offset); 1884 return -1; 1885 } 1886 1887 insn->retpoline_safe = true; 1888 } 1889 1890 return 0; 1891 } 1892 1893 static int read_instr_hints(struct objtool_file *file) 1894 { 1895 struct section *sec; 1896 struct instruction *insn; 1897 struct reloc *reloc; 1898 1899 sec = find_section_by_name(file->elf, ".rela.discard.instr_end"); 1900 if (!sec) 1901 return 0; 1902 1903 list_for_each_entry(reloc, &sec->reloc_list, list) { 1904 if (reloc->sym->type != STT_SECTION) { 1905 WARN("unexpected relocation symbol type in %s", sec->name); 1906 return -1; 1907 } 1908 1909 insn = find_insn(file, reloc->sym->sec, reloc->addend); 1910 if (!insn) { 1911 WARN("bad .discard.instr_end entry"); 1912 return -1; 1913 } 1914 1915 insn->instr--; 1916 } 1917 1918 sec = find_section_by_name(file->elf, ".rela.discard.instr_begin"); 1919 if (!sec) 1920 return 0; 1921 1922 list_for_each_entry(reloc, &sec->reloc_list, list) { 1923 if (reloc->sym->type != STT_SECTION) { 1924 WARN("unexpected relocation symbol type in %s", sec->name); 1925 return -1; 1926 } 1927 1928 insn = find_insn(file, reloc->sym->sec, reloc->addend); 1929 if (!insn) { 1930 WARN("bad .discard.instr_begin entry"); 1931 return -1; 1932 } 1933 1934 insn->instr++; 1935 } 1936 1937 return 0; 1938 } 1939 1940 static int read_intra_function_calls(struct objtool_file *file) 1941 { 1942 struct instruction *insn; 1943 struct section *sec; 1944 struct reloc *reloc; 1945 1946 sec = find_section_by_name(file->elf, ".rela.discard.intra_function_calls"); 1947 if (!sec) 1948 return 0; 1949 1950 list_for_each_entry(reloc, &sec->reloc_list, list) { 1951 unsigned long dest_off; 1952 1953 if (reloc->sym->type != STT_SECTION) { 1954 WARN("unexpected relocation symbol type in %s", 1955 sec->name); 1956 return -1; 1957 } 1958 1959 insn = find_insn(file, reloc->sym->sec, reloc->addend); 1960 if (!insn) { 1961 WARN("bad .discard.intra_function_call entry"); 1962 return -1; 1963 } 1964 1965 if (insn->type != INSN_CALL) { 1966 WARN_FUNC("intra_function_call not a direct call", 1967 insn->sec, insn->offset); 1968 return -1; 1969 } 1970 1971 /* 1972 * Treat intra-function CALLs as JMPs, but with a stack_op. 1973 * See add_call_destinations(), which strips stack_ops from 1974 * normal CALLs. 1975 */ 1976 insn->type = INSN_JUMP_UNCONDITIONAL; 1977 1978 dest_off = insn->offset + insn->len + insn->immediate; 1979 insn->jump_dest = find_insn(file, insn->sec, dest_off); 1980 if (!insn->jump_dest) { 1981 WARN_FUNC("can't find call dest at %s+0x%lx", 1982 insn->sec, insn->offset, 1983 insn->sec->name, dest_off); 1984 return -1; 1985 } 1986 } 1987 1988 return 0; 1989 } 1990 1991 static int classify_symbols(struct objtool_file *file) 1992 { 1993 struct section *sec; 1994 struct symbol *func; 1995 1996 for_each_sec(file, sec) { 1997 list_for_each_entry(func, &sec->symbol_list, list) { 1998 if (func->bind != STB_GLOBAL) 1999 continue; 2000 2001 if (!strncmp(func->name, STATIC_CALL_TRAMP_PREFIX_STR, 2002 strlen(STATIC_CALL_TRAMP_PREFIX_STR))) 2003 func->static_call_tramp = true; 2004 2005 if (arch_is_retpoline(func)) 2006 func->retpoline_thunk = true; 2007 2008 if (!strcmp(func->name, "__fentry__")) 2009 func->fentry = true; 2010 2011 if (!strncmp(func->name, "__sanitizer_cov_", 16)) 2012 func->kcov = true; 2013 } 2014 } 2015 2016 return 0; 2017 } 2018 2019 static void mark_rodata(struct objtool_file *file) 2020 { 2021 struct section *sec; 2022 bool found = false; 2023 2024 /* 2025 * Search for the following rodata sections, each of which can 2026 * potentially contain jump tables: 2027 * 2028 * - .rodata: can contain GCC switch tables 2029 * - .rodata.<func>: same, if -fdata-sections is being used 2030 * - .rodata..c_jump_table: contains C annotated jump tables 2031 * 2032 * .rodata.str1.* sections are ignored; they don't contain jump tables. 2033 */ 2034 for_each_sec(file, sec) { 2035 if (!strncmp(sec->name, ".rodata", 7) && 2036 !strstr(sec->name, ".str1.")) { 2037 sec->rodata = true; 2038 found = true; 2039 } 2040 } 2041 2042 file->rodata = found; 2043 } 2044 2045 static int decode_sections(struct objtool_file *file) 2046 { 2047 int ret; 2048 2049 mark_rodata(file); 2050 2051 ret = init_pv_ops(file); 2052 if (ret) 2053 return ret; 2054 2055 ret = decode_instructions(file); 2056 if (ret) 2057 return ret; 2058 2059 ret = add_dead_ends(file); 2060 if (ret) 2061 return ret; 2062 2063 add_ignores(file); 2064 add_uaccess_safe(file); 2065 2066 ret = add_ignore_alternatives(file); 2067 if (ret) 2068 return ret; 2069 2070 /* 2071 * Must be before add_{jump_call}_destination. 2072 */ 2073 ret = classify_symbols(file); 2074 if (ret) 2075 return ret; 2076 2077 /* 2078 * Must be before add_special_section_alts() as that depends on 2079 * jump_dest being set. 2080 */ 2081 ret = add_jump_destinations(file); 2082 if (ret) 2083 return ret; 2084 2085 ret = add_special_section_alts(file); 2086 if (ret) 2087 return ret; 2088 2089 /* 2090 * Must be before add_call_destination(); it changes INSN_CALL to 2091 * INSN_JUMP. 2092 */ 2093 ret = read_intra_function_calls(file); 2094 if (ret) 2095 return ret; 2096 2097 ret = add_call_destinations(file); 2098 if (ret) 2099 return ret; 2100 2101 ret = add_jump_table_alts(file); 2102 if (ret) 2103 return ret; 2104 2105 ret = read_unwind_hints(file); 2106 if (ret) 2107 return ret; 2108 2109 ret = read_retpoline_hints(file); 2110 if (ret) 2111 return ret; 2112 2113 ret = read_instr_hints(file); 2114 if (ret) 2115 return ret; 2116 2117 return 0; 2118 } 2119 2120 static bool is_fentry_call(struct instruction *insn) 2121 { 2122 if (insn->type == INSN_CALL && 2123 insn->call_dest && 2124 insn->call_dest->fentry) 2125 return true; 2126 2127 return false; 2128 } 2129 2130 static bool has_modified_stack_frame(struct instruction *insn, struct insn_state *state) 2131 { 2132 struct cfi_state *cfi = &state->cfi; 2133 int i; 2134 2135 if (cfi->cfa.base != initial_func_cfi.cfa.base || cfi->drap) 2136 return true; 2137 2138 if (cfi->cfa.offset != initial_func_cfi.cfa.offset) 2139 return true; 2140 2141 if (cfi->stack_size != initial_func_cfi.cfa.offset) 2142 return true; 2143 2144 for (i = 0; i < CFI_NUM_REGS; i++) { 2145 if (cfi->regs[i].base != initial_func_cfi.regs[i].base || 2146 cfi->regs[i].offset != initial_func_cfi.regs[i].offset) 2147 return true; 2148 } 2149 2150 return false; 2151 } 2152 2153 static bool check_reg_frame_pos(const struct cfi_reg *reg, 2154 int expected_offset) 2155 { 2156 return reg->base == CFI_CFA && 2157 reg->offset == expected_offset; 2158 } 2159 2160 static bool has_valid_stack_frame(struct insn_state *state) 2161 { 2162 struct cfi_state *cfi = &state->cfi; 2163 2164 if (cfi->cfa.base == CFI_BP && 2165 check_reg_frame_pos(&cfi->regs[CFI_BP], -cfi->cfa.offset) && 2166 check_reg_frame_pos(&cfi->regs[CFI_RA], -cfi->cfa.offset + 8)) 2167 return true; 2168 2169 if (cfi->drap && cfi->regs[CFI_BP].base == CFI_BP) 2170 return true; 2171 2172 return false; 2173 } 2174 2175 static int update_cfi_state_regs(struct instruction *insn, 2176 struct cfi_state *cfi, 2177 struct stack_op *op) 2178 { 2179 struct cfi_reg *cfa = &cfi->cfa; 2180 2181 if (cfa->base != CFI_SP && cfa->base != CFI_SP_INDIRECT) 2182 return 0; 2183 2184 /* push */ 2185 if (op->dest.type == OP_DEST_PUSH || op->dest.type == OP_DEST_PUSHF) 2186 cfa->offset += 8; 2187 2188 /* pop */ 2189 if (op->src.type == OP_SRC_POP || op->src.type == OP_SRC_POPF) 2190 cfa->offset -= 8; 2191 2192 /* add immediate to sp */ 2193 if (op->dest.type == OP_DEST_REG && op->src.type == OP_SRC_ADD && 2194 op->dest.reg == CFI_SP && op->src.reg == CFI_SP) 2195 cfa->offset -= op->src.offset; 2196 2197 return 0; 2198 } 2199 2200 static void save_reg(struct cfi_state *cfi, unsigned char reg, int base, int offset) 2201 { 2202 if (arch_callee_saved_reg(reg) && 2203 cfi->regs[reg].base == CFI_UNDEFINED) { 2204 cfi->regs[reg].base = base; 2205 cfi->regs[reg].offset = offset; 2206 } 2207 } 2208 2209 static void restore_reg(struct cfi_state *cfi, unsigned char reg) 2210 { 2211 cfi->regs[reg].base = initial_func_cfi.regs[reg].base; 2212 cfi->regs[reg].offset = initial_func_cfi.regs[reg].offset; 2213 } 2214 2215 /* 2216 * A note about DRAP stack alignment: 2217 * 2218 * GCC has the concept of a DRAP register, which is used to help keep track of 2219 * the stack pointer when aligning the stack. r10 or r13 is used as the DRAP 2220 * register. The typical DRAP pattern is: 2221 * 2222 * 4c 8d 54 24 08 lea 0x8(%rsp),%r10 2223 * 48 83 e4 c0 and $0xffffffffffffffc0,%rsp 2224 * 41 ff 72 f8 pushq -0x8(%r10) 2225 * 55 push %rbp 2226 * 48 89 e5 mov %rsp,%rbp 2227 * (more pushes) 2228 * 41 52 push %r10 2229 * ... 2230 * 41 5a pop %r10 2231 * (more pops) 2232 * 5d pop %rbp 2233 * 49 8d 62 f8 lea -0x8(%r10),%rsp 2234 * c3 retq 2235 * 2236 * There are some variations in the epilogues, like: 2237 * 2238 * 5b pop %rbx 2239 * 41 5a pop %r10 2240 * 41 5c pop %r12 2241 * 41 5d pop %r13 2242 * 41 5e pop %r14 2243 * c9 leaveq 2244 * 49 8d 62 f8 lea -0x8(%r10),%rsp 2245 * c3 retq 2246 * 2247 * and: 2248 * 2249 * 4c 8b 55 e8 mov -0x18(%rbp),%r10 2250 * 48 8b 5d e0 mov -0x20(%rbp),%rbx 2251 * 4c 8b 65 f0 mov -0x10(%rbp),%r12 2252 * 4c 8b 6d f8 mov -0x8(%rbp),%r13 2253 * c9 leaveq 2254 * 49 8d 62 f8 lea -0x8(%r10),%rsp 2255 * c3 retq 2256 * 2257 * Sometimes r13 is used as the DRAP register, in which case it's saved and 2258 * restored beforehand: 2259 * 2260 * 41 55 push %r13 2261 * 4c 8d 6c 24 10 lea 0x10(%rsp),%r13 2262 * 48 83 e4 f0 and $0xfffffffffffffff0,%rsp 2263 * ... 2264 * 49 8d 65 f0 lea -0x10(%r13),%rsp 2265 * 41 5d pop %r13 2266 * c3 retq 2267 */ 2268 static int update_cfi_state(struct instruction *insn, 2269 struct instruction *next_insn, 2270 struct cfi_state *cfi, struct stack_op *op) 2271 { 2272 struct cfi_reg *cfa = &cfi->cfa; 2273 struct cfi_reg *regs = cfi->regs; 2274 2275 /* stack operations don't make sense with an undefined CFA */ 2276 if (cfa->base == CFI_UNDEFINED) { 2277 if (insn->func) { 2278 WARN_FUNC("undefined stack state", insn->sec, insn->offset); 2279 return -1; 2280 } 2281 return 0; 2282 } 2283 2284 if (cfi->type == UNWIND_HINT_TYPE_REGS || 2285 cfi->type == UNWIND_HINT_TYPE_REGS_PARTIAL) 2286 return update_cfi_state_regs(insn, cfi, op); 2287 2288 switch (op->dest.type) { 2289 2290 case OP_DEST_REG: 2291 switch (op->src.type) { 2292 2293 case OP_SRC_REG: 2294 if (op->src.reg == CFI_SP && op->dest.reg == CFI_BP && 2295 cfa->base == CFI_SP && 2296 check_reg_frame_pos(®s[CFI_BP], -cfa->offset)) { 2297 2298 /* mov %rsp, %rbp */ 2299 cfa->base = op->dest.reg; 2300 cfi->bp_scratch = false; 2301 } 2302 2303 else if (op->src.reg == CFI_SP && 2304 op->dest.reg == CFI_BP && cfi->drap) { 2305 2306 /* drap: mov %rsp, %rbp */ 2307 regs[CFI_BP].base = CFI_BP; 2308 regs[CFI_BP].offset = -cfi->stack_size; 2309 cfi->bp_scratch = false; 2310 } 2311 2312 else if (op->src.reg == CFI_SP && cfa->base == CFI_SP) { 2313 2314 /* 2315 * mov %rsp, %reg 2316 * 2317 * This is needed for the rare case where GCC 2318 * does: 2319 * 2320 * mov %rsp, %rax 2321 * ... 2322 * mov %rax, %rsp 2323 */ 2324 cfi->vals[op->dest.reg].base = CFI_CFA; 2325 cfi->vals[op->dest.reg].offset = -cfi->stack_size; 2326 } 2327 2328 else if (op->src.reg == CFI_BP && op->dest.reg == CFI_SP && 2329 (cfa->base == CFI_BP || cfa->base == cfi->drap_reg)) { 2330 2331 /* 2332 * mov %rbp, %rsp 2333 * 2334 * Restore the original stack pointer (Clang). 2335 */ 2336 cfi->stack_size = -cfi->regs[CFI_BP].offset; 2337 } 2338 2339 else if (op->dest.reg == cfa->base) { 2340 2341 /* mov %reg, %rsp */ 2342 if (cfa->base == CFI_SP && 2343 cfi->vals[op->src.reg].base == CFI_CFA) { 2344 2345 /* 2346 * This is needed for the rare case 2347 * where GCC does something dumb like: 2348 * 2349 * lea 0x8(%rsp), %rcx 2350 * ... 2351 * mov %rcx, %rsp 2352 */ 2353 cfa->offset = -cfi->vals[op->src.reg].offset; 2354 cfi->stack_size = cfa->offset; 2355 2356 } else if (cfa->base == CFI_SP && 2357 cfi->vals[op->src.reg].base == CFI_SP_INDIRECT && 2358 cfi->vals[op->src.reg].offset == cfa->offset) { 2359 2360 /* 2361 * Stack swizzle: 2362 * 2363 * 1: mov %rsp, (%[tos]) 2364 * 2: mov %[tos], %rsp 2365 * ... 2366 * 3: pop %rsp 2367 * 2368 * Where: 2369 * 2370 * 1 - places a pointer to the previous 2371 * stack at the Top-of-Stack of the 2372 * new stack. 2373 * 2374 * 2 - switches to the new stack. 2375 * 2376 * 3 - pops the Top-of-Stack to restore 2377 * the original stack. 2378 * 2379 * Note: we set base to SP_INDIRECT 2380 * here and preserve offset. Therefore 2381 * when the unwinder reaches ToS it 2382 * will dereference SP and then add the 2383 * offset to find the next frame, IOW: 2384 * (%rsp) + offset. 2385 */ 2386 cfa->base = CFI_SP_INDIRECT; 2387 2388 } else { 2389 cfa->base = CFI_UNDEFINED; 2390 cfa->offset = 0; 2391 } 2392 } 2393 2394 else if (op->dest.reg == CFI_SP && 2395 cfi->vals[op->src.reg].base == CFI_SP_INDIRECT && 2396 cfi->vals[op->src.reg].offset == cfa->offset) { 2397 2398 /* 2399 * The same stack swizzle case 2) as above. But 2400 * because we can't change cfa->base, case 3) 2401 * will become a regular POP. Pretend we're a 2402 * PUSH so things don't go unbalanced. 2403 */ 2404 cfi->stack_size += 8; 2405 } 2406 2407 2408 break; 2409 2410 case OP_SRC_ADD: 2411 if (op->dest.reg == CFI_SP && op->src.reg == CFI_SP) { 2412 2413 /* add imm, %rsp */ 2414 cfi->stack_size -= op->src.offset; 2415 if (cfa->base == CFI_SP) 2416 cfa->offset -= op->src.offset; 2417 break; 2418 } 2419 2420 if (op->dest.reg == CFI_SP && op->src.reg == CFI_BP) { 2421 2422 /* lea disp(%rbp), %rsp */ 2423 cfi->stack_size = -(op->src.offset + regs[CFI_BP].offset); 2424 break; 2425 } 2426 2427 if (!cfi->drap && op->src.reg == CFI_SP && 2428 op->dest.reg == CFI_BP && cfa->base == CFI_SP && 2429 check_reg_frame_pos(®s[CFI_BP], -cfa->offset + op->src.offset)) { 2430 2431 /* lea disp(%rsp), %rbp */ 2432 cfa->base = CFI_BP; 2433 cfa->offset -= op->src.offset; 2434 cfi->bp_scratch = false; 2435 break; 2436 } 2437 2438 if (op->src.reg == CFI_SP && cfa->base == CFI_SP) { 2439 2440 /* drap: lea disp(%rsp), %drap */ 2441 cfi->drap_reg = op->dest.reg; 2442 2443 /* 2444 * lea disp(%rsp), %reg 2445 * 2446 * This is needed for the rare case where GCC 2447 * does something dumb like: 2448 * 2449 * lea 0x8(%rsp), %rcx 2450 * ... 2451 * mov %rcx, %rsp 2452 */ 2453 cfi->vals[op->dest.reg].base = CFI_CFA; 2454 cfi->vals[op->dest.reg].offset = \ 2455 -cfi->stack_size + op->src.offset; 2456 2457 break; 2458 } 2459 2460 if (cfi->drap && op->dest.reg == CFI_SP && 2461 op->src.reg == cfi->drap_reg) { 2462 2463 /* drap: lea disp(%drap), %rsp */ 2464 cfa->base = CFI_SP; 2465 cfa->offset = cfi->stack_size = -op->src.offset; 2466 cfi->drap_reg = CFI_UNDEFINED; 2467 cfi->drap = false; 2468 break; 2469 } 2470 2471 if (op->dest.reg == cfi->cfa.base && !(next_insn && next_insn->hint)) { 2472 WARN_FUNC("unsupported stack register modification", 2473 insn->sec, insn->offset); 2474 return -1; 2475 } 2476 2477 break; 2478 2479 case OP_SRC_AND: 2480 if (op->dest.reg != CFI_SP || 2481 (cfi->drap_reg != CFI_UNDEFINED && cfa->base != CFI_SP) || 2482 (cfi->drap_reg == CFI_UNDEFINED && cfa->base != CFI_BP)) { 2483 WARN_FUNC("unsupported stack pointer realignment", 2484 insn->sec, insn->offset); 2485 return -1; 2486 } 2487 2488 if (cfi->drap_reg != CFI_UNDEFINED) { 2489 /* drap: and imm, %rsp */ 2490 cfa->base = cfi->drap_reg; 2491 cfa->offset = cfi->stack_size = 0; 2492 cfi->drap = true; 2493 } 2494 2495 /* 2496 * Older versions of GCC (4.8ish) realign the stack 2497 * without DRAP, with a frame pointer. 2498 */ 2499 2500 break; 2501 2502 case OP_SRC_POP: 2503 case OP_SRC_POPF: 2504 if (op->dest.reg == CFI_SP && cfa->base == CFI_SP_INDIRECT) { 2505 2506 /* pop %rsp; # restore from a stack swizzle */ 2507 cfa->base = CFI_SP; 2508 break; 2509 } 2510 2511 if (!cfi->drap && op->dest.reg == cfa->base) { 2512 2513 /* pop %rbp */ 2514 cfa->base = CFI_SP; 2515 } 2516 2517 if (cfi->drap && cfa->base == CFI_BP_INDIRECT && 2518 op->dest.reg == cfi->drap_reg && 2519 cfi->drap_offset == -cfi->stack_size) { 2520 2521 /* drap: pop %drap */ 2522 cfa->base = cfi->drap_reg; 2523 cfa->offset = 0; 2524 cfi->drap_offset = -1; 2525 2526 } else if (cfi->stack_size == -regs[op->dest.reg].offset) { 2527 2528 /* pop %reg */ 2529 restore_reg(cfi, op->dest.reg); 2530 } 2531 2532 cfi->stack_size -= 8; 2533 if (cfa->base == CFI_SP) 2534 cfa->offset -= 8; 2535 2536 break; 2537 2538 case OP_SRC_REG_INDIRECT: 2539 if (!cfi->drap && op->dest.reg == cfa->base && 2540 op->dest.reg == CFI_BP) { 2541 2542 /* mov disp(%rsp), %rbp */ 2543 cfa->base = CFI_SP; 2544 cfa->offset = cfi->stack_size; 2545 } 2546 2547 if (cfi->drap && op->src.reg == CFI_BP && 2548 op->src.offset == cfi->drap_offset) { 2549 2550 /* drap: mov disp(%rbp), %drap */ 2551 cfa->base = cfi->drap_reg; 2552 cfa->offset = 0; 2553 cfi->drap_offset = -1; 2554 } 2555 2556 if (cfi->drap && op->src.reg == CFI_BP && 2557 op->src.offset == regs[op->dest.reg].offset) { 2558 2559 /* drap: mov disp(%rbp), %reg */ 2560 restore_reg(cfi, op->dest.reg); 2561 2562 } else if (op->src.reg == cfa->base && 2563 op->src.offset == regs[op->dest.reg].offset + cfa->offset) { 2564 2565 /* mov disp(%rbp), %reg */ 2566 /* mov disp(%rsp), %reg */ 2567 restore_reg(cfi, op->dest.reg); 2568 2569 } else if (op->src.reg == CFI_SP && 2570 op->src.offset == regs[op->dest.reg].offset + cfi->stack_size) { 2571 2572 /* mov disp(%rsp), %reg */ 2573 restore_reg(cfi, op->dest.reg); 2574 } 2575 2576 break; 2577 2578 default: 2579 WARN_FUNC("unknown stack-related instruction", 2580 insn->sec, insn->offset); 2581 return -1; 2582 } 2583 2584 break; 2585 2586 case OP_DEST_PUSH: 2587 case OP_DEST_PUSHF: 2588 cfi->stack_size += 8; 2589 if (cfa->base == CFI_SP) 2590 cfa->offset += 8; 2591 2592 if (op->src.type != OP_SRC_REG) 2593 break; 2594 2595 if (cfi->drap) { 2596 if (op->src.reg == cfa->base && op->src.reg == cfi->drap_reg) { 2597 2598 /* drap: push %drap */ 2599 cfa->base = CFI_BP_INDIRECT; 2600 cfa->offset = -cfi->stack_size; 2601 2602 /* save drap so we know when to restore it */ 2603 cfi->drap_offset = -cfi->stack_size; 2604 2605 } else if (op->src.reg == CFI_BP && cfa->base == cfi->drap_reg) { 2606 2607 /* drap: push %rbp */ 2608 cfi->stack_size = 0; 2609 2610 } else { 2611 2612 /* drap: push %reg */ 2613 save_reg(cfi, op->src.reg, CFI_BP, -cfi->stack_size); 2614 } 2615 2616 } else { 2617 2618 /* push %reg */ 2619 save_reg(cfi, op->src.reg, CFI_CFA, -cfi->stack_size); 2620 } 2621 2622 /* detect when asm code uses rbp as a scratch register */ 2623 if (!no_fp && insn->func && op->src.reg == CFI_BP && 2624 cfa->base != CFI_BP) 2625 cfi->bp_scratch = true; 2626 break; 2627 2628 case OP_DEST_REG_INDIRECT: 2629 2630 if (cfi->drap) { 2631 if (op->src.reg == cfa->base && op->src.reg == cfi->drap_reg) { 2632 2633 /* drap: mov %drap, disp(%rbp) */ 2634 cfa->base = CFI_BP_INDIRECT; 2635 cfa->offset = op->dest.offset; 2636 2637 /* save drap offset so we know when to restore it */ 2638 cfi->drap_offset = op->dest.offset; 2639 } else { 2640 2641 /* drap: mov reg, disp(%rbp) */ 2642 save_reg(cfi, op->src.reg, CFI_BP, op->dest.offset); 2643 } 2644 2645 } else if (op->dest.reg == cfa->base) { 2646 2647 /* mov reg, disp(%rbp) */ 2648 /* mov reg, disp(%rsp) */ 2649 save_reg(cfi, op->src.reg, CFI_CFA, 2650 op->dest.offset - cfi->cfa.offset); 2651 2652 } else if (op->dest.reg == CFI_SP) { 2653 2654 /* mov reg, disp(%rsp) */ 2655 save_reg(cfi, op->src.reg, CFI_CFA, 2656 op->dest.offset - cfi->stack_size); 2657 2658 } else if (op->src.reg == CFI_SP && op->dest.offset == 0) { 2659 2660 /* mov %rsp, (%reg); # setup a stack swizzle. */ 2661 cfi->vals[op->dest.reg].base = CFI_SP_INDIRECT; 2662 cfi->vals[op->dest.reg].offset = cfa->offset; 2663 } 2664 2665 break; 2666 2667 case OP_DEST_MEM: 2668 if (op->src.type != OP_SRC_POP && op->src.type != OP_SRC_POPF) { 2669 WARN_FUNC("unknown stack-related memory operation", 2670 insn->sec, insn->offset); 2671 return -1; 2672 } 2673 2674 /* pop mem */ 2675 cfi->stack_size -= 8; 2676 if (cfa->base == CFI_SP) 2677 cfa->offset -= 8; 2678 2679 break; 2680 2681 default: 2682 WARN_FUNC("unknown stack-related instruction", 2683 insn->sec, insn->offset); 2684 return -1; 2685 } 2686 2687 return 0; 2688 } 2689 2690 /* 2691 * The stack layouts of alternatives instructions can sometimes diverge when 2692 * they have stack modifications. That's fine as long as the potential stack 2693 * layouts don't conflict at any given potential instruction boundary. 2694 * 2695 * Flatten the CFIs of the different alternative code streams (both original 2696 * and replacement) into a single shared CFI array which can be used to detect 2697 * conflicts and nicely feed a linear array of ORC entries to the unwinder. 2698 */ 2699 static int propagate_alt_cfi(struct objtool_file *file, struct instruction *insn) 2700 { 2701 struct cfi_state **alt_cfi; 2702 int group_off; 2703 2704 if (!insn->alt_group) 2705 return 0; 2706 2707 if (!insn->cfi) { 2708 WARN("CFI missing"); 2709 return -1; 2710 } 2711 2712 alt_cfi = insn->alt_group->cfi; 2713 group_off = insn->offset - insn->alt_group->first_insn->offset; 2714 2715 if (!alt_cfi[group_off]) { 2716 alt_cfi[group_off] = insn->cfi; 2717 } else { 2718 if (cficmp(alt_cfi[group_off], insn->cfi)) { 2719 WARN_FUNC("stack layout conflict in alternatives", 2720 insn->sec, insn->offset); 2721 return -1; 2722 } 2723 } 2724 2725 return 0; 2726 } 2727 2728 static int handle_insn_ops(struct instruction *insn, 2729 struct instruction *next_insn, 2730 struct insn_state *state) 2731 { 2732 struct stack_op *op; 2733 2734 list_for_each_entry(op, &insn->stack_ops, list) { 2735 2736 if (update_cfi_state(insn, next_insn, &state->cfi, op)) 2737 return 1; 2738 2739 if (!insn->alt_group) 2740 continue; 2741 2742 if (op->dest.type == OP_DEST_PUSHF) { 2743 if (!state->uaccess_stack) { 2744 state->uaccess_stack = 1; 2745 } else if (state->uaccess_stack >> 31) { 2746 WARN_FUNC("PUSHF stack exhausted", 2747 insn->sec, insn->offset); 2748 return 1; 2749 } 2750 state->uaccess_stack <<= 1; 2751 state->uaccess_stack |= state->uaccess; 2752 } 2753 2754 if (op->src.type == OP_SRC_POPF) { 2755 if (state->uaccess_stack) { 2756 state->uaccess = state->uaccess_stack & 1; 2757 state->uaccess_stack >>= 1; 2758 if (state->uaccess_stack == 1) 2759 state->uaccess_stack = 0; 2760 } 2761 } 2762 } 2763 2764 return 0; 2765 } 2766 2767 static bool insn_cfi_match(struct instruction *insn, struct cfi_state *cfi2) 2768 { 2769 struct cfi_state *cfi1 = insn->cfi; 2770 int i; 2771 2772 if (!cfi1) { 2773 WARN("CFI missing"); 2774 return false; 2775 } 2776 2777 if (memcmp(&cfi1->cfa, &cfi2->cfa, sizeof(cfi1->cfa))) { 2778 2779 WARN_FUNC("stack state mismatch: cfa1=%d%+d cfa2=%d%+d", 2780 insn->sec, insn->offset, 2781 cfi1->cfa.base, cfi1->cfa.offset, 2782 cfi2->cfa.base, cfi2->cfa.offset); 2783 2784 } else if (memcmp(&cfi1->regs, &cfi2->regs, sizeof(cfi1->regs))) { 2785 for (i = 0; i < CFI_NUM_REGS; i++) { 2786 if (!memcmp(&cfi1->regs[i], &cfi2->regs[i], 2787 sizeof(struct cfi_reg))) 2788 continue; 2789 2790 WARN_FUNC("stack state mismatch: reg1[%d]=%d%+d reg2[%d]=%d%+d", 2791 insn->sec, insn->offset, 2792 i, cfi1->regs[i].base, cfi1->regs[i].offset, 2793 i, cfi2->regs[i].base, cfi2->regs[i].offset); 2794 break; 2795 } 2796 2797 } else if (cfi1->type != cfi2->type) { 2798 2799 WARN_FUNC("stack state mismatch: type1=%d type2=%d", 2800 insn->sec, insn->offset, cfi1->type, cfi2->type); 2801 2802 } else if (cfi1->drap != cfi2->drap || 2803 (cfi1->drap && cfi1->drap_reg != cfi2->drap_reg) || 2804 (cfi1->drap && cfi1->drap_offset != cfi2->drap_offset)) { 2805 2806 WARN_FUNC("stack state mismatch: drap1=%d(%d,%d) drap2=%d(%d,%d)", 2807 insn->sec, insn->offset, 2808 cfi1->drap, cfi1->drap_reg, cfi1->drap_offset, 2809 cfi2->drap, cfi2->drap_reg, cfi2->drap_offset); 2810 2811 } else 2812 return true; 2813 2814 return false; 2815 } 2816 2817 static inline bool func_uaccess_safe(struct symbol *func) 2818 { 2819 if (func) 2820 return func->uaccess_safe; 2821 2822 return false; 2823 } 2824 2825 static inline const char *call_dest_name(struct instruction *insn) 2826 { 2827 static char pvname[16]; 2828 struct reloc *rel; 2829 int idx; 2830 2831 if (insn->call_dest) 2832 return insn->call_dest->name; 2833 2834 rel = insn_reloc(NULL, insn); 2835 if (rel && !strcmp(rel->sym->name, "pv_ops")) { 2836 idx = (rel->addend / sizeof(void *)); 2837 snprintf(pvname, sizeof(pvname), "pv_ops[%d]", idx); 2838 return pvname; 2839 } 2840 2841 return "{dynamic}"; 2842 } 2843 2844 static bool pv_call_dest(struct objtool_file *file, struct instruction *insn) 2845 { 2846 struct symbol *target; 2847 struct reloc *rel; 2848 int idx; 2849 2850 rel = insn_reloc(file, insn); 2851 if (!rel || strcmp(rel->sym->name, "pv_ops")) 2852 return false; 2853 2854 idx = (arch_dest_reloc_offset(rel->addend) / sizeof(void *)); 2855 2856 if (file->pv_ops[idx].clean) 2857 return true; 2858 2859 file->pv_ops[idx].clean = true; 2860 2861 list_for_each_entry(target, &file->pv_ops[idx].targets, pv_target) { 2862 if (!target->sec->noinstr) { 2863 WARN("pv_ops[%d]: %s", idx, target->name); 2864 file->pv_ops[idx].clean = false; 2865 } 2866 } 2867 2868 return file->pv_ops[idx].clean; 2869 } 2870 2871 static inline bool noinstr_call_dest(struct objtool_file *file, 2872 struct instruction *insn, 2873 struct symbol *func) 2874 { 2875 /* 2876 * We can't deal with indirect function calls at present; 2877 * assume they're instrumented. 2878 */ 2879 if (!func) { 2880 if (file->pv_ops) 2881 return pv_call_dest(file, insn); 2882 2883 return false; 2884 } 2885 2886 /* 2887 * If the symbol is from a noinstr section; we good. 2888 */ 2889 if (func->sec->noinstr) 2890 return true; 2891 2892 /* 2893 * The __ubsan_handle_*() calls are like WARN(), they only happen when 2894 * something 'BAD' happened. At the risk of taking the machine down, 2895 * let them proceed to get the message out. 2896 */ 2897 if (!strncmp(func->name, "__ubsan_handle_", 15)) 2898 return true; 2899 2900 return false; 2901 } 2902 2903 static int validate_call(struct objtool_file *file, 2904 struct instruction *insn, 2905 struct insn_state *state) 2906 { 2907 if (state->noinstr && state->instr <= 0 && 2908 !noinstr_call_dest(file, insn, insn->call_dest)) { 2909 WARN_FUNC("call to %s() leaves .noinstr.text section", 2910 insn->sec, insn->offset, call_dest_name(insn)); 2911 return 1; 2912 } 2913 2914 if (state->uaccess && !func_uaccess_safe(insn->call_dest)) { 2915 WARN_FUNC("call to %s() with UACCESS enabled", 2916 insn->sec, insn->offset, call_dest_name(insn)); 2917 return 1; 2918 } 2919 2920 if (state->df) { 2921 WARN_FUNC("call to %s() with DF set", 2922 insn->sec, insn->offset, call_dest_name(insn)); 2923 return 1; 2924 } 2925 2926 return 0; 2927 } 2928 2929 static int validate_sibling_call(struct objtool_file *file, 2930 struct instruction *insn, 2931 struct insn_state *state) 2932 { 2933 if (has_modified_stack_frame(insn, state)) { 2934 WARN_FUNC("sibling call from callable instruction with modified stack frame", 2935 insn->sec, insn->offset); 2936 return 1; 2937 } 2938 2939 return validate_call(file, insn, state); 2940 } 2941 2942 static int validate_return(struct symbol *func, struct instruction *insn, struct insn_state *state) 2943 { 2944 if (state->noinstr && state->instr > 0) { 2945 WARN_FUNC("return with instrumentation enabled", 2946 insn->sec, insn->offset); 2947 return 1; 2948 } 2949 2950 if (state->uaccess && !func_uaccess_safe(func)) { 2951 WARN_FUNC("return with UACCESS enabled", 2952 insn->sec, insn->offset); 2953 return 1; 2954 } 2955 2956 if (!state->uaccess && func_uaccess_safe(func)) { 2957 WARN_FUNC("return with UACCESS disabled from a UACCESS-safe function", 2958 insn->sec, insn->offset); 2959 return 1; 2960 } 2961 2962 if (state->df) { 2963 WARN_FUNC("return with DF set", 2964 insn->sec, insn->offset); 2965 return 1; 2966 } 2967 2968 if (func && has_modified_stack_frame(insn, state)) { 2969 WARN_FUNC("return with modified stack frame", 2970 insn->sec, insn->offset); 2971 return 1; 2972 } 2973 2974 if (state->cfi.bp_scratch) { 2975 WARN_FUNC("BP used as a scratch register", 2976 insn->sec, insn->offset); 2977 return 1; 2978 } 2979 2980 return 0; 2981 } 2982 2983 static struct instruction *next_insn_to_validate(struct objtool_file *file, 2984 struct instruction *insn) 2985 { 2986 struct alt_group *alt_group = insn->alt_group; 2987 2988 /* 2989 * Simulate the fact that alternatives are patched in-place. When the 2990 * end of a replacement alt_group is reached, redirect objtool flow to 2991 * the end of the original alt_group. 2992 */ 2993 if (alt_group && insn == alt_group->last_insn && alt_group->orig_group) 2994 return next_insn_same_sec(file, alt_group->orig_group->last_insn); 2995 2996 return next_insn_same_sec(file, insn); 2997 } 2998 2999 /* 3000 * Follow the branch starting at the given instruction, and recursively follow 3001 * any other branches (jumps). Meanwhile, track the frame pointer state at 3002 * each instruction and validate all the rules described in 3003 * tools/objtool/Documentation/stack-validation.txt. 3004 */ 3005 static int validate_branch(struct objtool_file *file, struct symbol *func, 3006 struct instruction *insn, struct insn_state state) 3007 { 3008 struct alternative *alt; 3009 struct instruction *next_insn, *prev_insn = NULL; 3010 struct section *sec; 3011 u8 visited; 3012 int ret; 3013 3014 sec = insn->sec; 3015 3016 while (1) { 3017 next_insn = next_insn_to_validate(file, insn); 3018 3019 if (file->c_file && func && insn->func && func != insn->func->pfunc) { 3020 WARN("%s() falls through to next function %s()", 3021 func->name, insn->func->name); 3022 return 1; 3023 } 3024 3025 if (func && insn->ignore) { 3026 WARN_FUNC("BUG: why am I validating an ignored function?", 3027 sec, insn->offset); 3028 return 1; 3029 } 3030 3031 visited = 1 << state.uaccess; 3032 if (insn->visited) { 3033 if (!insn->hint && !insn_cfi_match(insn, &state.cfi)) 3034 return 1; 3035 3036 if (insn->visited & visited) 3037 return 0; 3038 } else { 3039 nr_insns_visited++; 3040 } 3041 3042 if (state.noinstr) 3043 state.instr += insn->instr; 3044 3045 if (insn->hint) { 3046 state.cfi = *insn->cfi; 3047 } else { 3048 /* XXX track if we actually changed state.cfi */ 3049 3050 if (prev_insn && !cficmp(prev_insn->cfi, &state.cfi)) { 3051 insn->cfi = prev_insn->cfi; 3052 nr_cfi_reused++; 3053 } else { 3054 insn->cfi = cfi_hash_find_or_add(&state.cfi); 3055 } 3056 } 3057 3058 insn->visited |= visited; 3059 3060 if (propagate_alt_cfi(file, insn)) 3061 return 1; 3062 3063 if (!insn->ignore_alts && !list_empty(&insn->alts)) { 3064 bool skip_orig = false; 3065 3066 list_for_each_entry(alt, &insn->alts, list) { 3067 if (alt->skip_orig) 3068 skip_orig = true; 3069 3070 ret = validate_branch(file, func, alt->insn, state); 3071 if (ret) { 3072 if (backtrace) 3073 BT_FUNC("(alt)", insn); 3074 return ret; 3075 } 3076 } 3077 3078 if (skip_orig) 3079 return 0; 3080 } 3081 3082 if (handle_insn_ops(insn, next_insn, &state)) 3083 return 1; 3084 3085 switch (insn->type) { 3086 3087 case INSN_RETURN: 3088 return validate_return(func, insn, &state); 3089 3090 case INSN_CALL: 3091 case INSN_CALL_DYNAMIC: 3092 ret = validate_call(file, insn, &state); 3093 if (ret) 3094 return ret; 3095 3096 if (!no_fp && func && !is_fentry_call(insn) && 3097 !has_valid_stack_frame(&state)) { 3098 WARN_FUNC("call without frame pointer save/setup", 3099 sec, insn->offset); 3100 return 1; 3101 } 3102 3103 if (dead_end_function(file, insn->call_dest)) 3104 return 0; 3105 3106 break; 3107 3108 case INSN_JUMP_CONDITIONAL: 3109 case INSN_JUMP_UNCONDITIONAL: 3110 if (is_sibling_call(insn)) { 3111 ret = validate_sibling_call(file, insn, &state); 3112 if (ret) 3113 return ret; 3114 3115 } else if (insn->jump_dest) { 3116 ret = validate_branch(file, func, 3117 insn->jump_dest, state); 3118 if (ret) { 3119 if (backtrace) 3120 BT_FUNC("(branch)", insn); 3121 return ret; 3122 } 3123 } 3124 3125 if (insn->type == INSN_JUMP_UNCONDITIONAL) 3126 return 0; 3127 3128 break; 3129 3130 case INSN_JUMP_DYNAMIC: 3131 case INSN_JUMP_DYNAMIC_CONDITIONAL: 3132 if (is_sibling_call(insn)) { 3133 ret = validate_sibling_call(file, insn, &state); 3134 if (ret) 3135 return ret; 3136 } 3137 3138 if (insn->type == INSN_JUMP_DYNAMIC) 3139 return 0; 3140 3141 break; 3142 3143 case INSN_CONTEXT_SWITCH: 3144 if (func && (!next_insn || !next_insn->hint)) { 3145 WARN_FUNC("unsupported instruction in callable function", 3146 sec, insn->offset); 3147 return 1; 3148 } 3149 return 0; 3150 3151 case INSN_STAC: 3152 if (state.uaccess) { 3153 WARN_FUNC("recursive UACCESS enable", sec, insn->offset); 3154 return 1; 3155 } 3156 3157 state.uaccess = true; 3158 break; 3159 3160 case INSN_CLAC: 3161 if (!state.uaccess && func) { 3162 WARN_FUNC("redundant UACCESS disable", sec, insn->offset); 3163 return 1; 3164 } 3165 3166 if (func_uaccess_safe(func) && !state.uaccess_stack) { 3167 WARN_FUNC("UACCESS-safe disables UACCESS", sec, insn->offset); 3168 return 1; 3169 } 3170 3171 state.uaccess = false; 3172 break; 3173 3174 case INSN_STD: 3175 if (state.df) { 3176 WARN_FUNC("recursive STD", sec, insn->offset); 3177 return 1; 3178 } 3179 3180 state.df = true; 3181 break; 3182 3183 case INSN_CLD: 3184 if (!state.df && func) { 3185 WARN_FUNC("redundant CLD", sec, insn->offset); 3186 return 1; 3187 } 3188 3189 state.df = false; 3190 break; 3191 3192 default: 3193 break; 3194 } 3195 3196 if (insn->dead_end) 3197 return 0; 3198 3199 if (!next_insn) { 3200 if (state.cfi.cfa.base == CFI_UNDEFINED) 3201 return 0; 3202 WARN("%s: unexpected end of section", sec->name); 3203 return 1; 3204 } 3205 3206 prev_insn = insn; 3207 insn = next_insn; 3208 } 3209 3210 return 0; 3211 } 3212 3213 static int validate_unwind_hints(struct objtool_file *file, struct section *sec) 3214 { 3215 struct instruction *insn; 3216 struct insn_state state; 3217 int ret, warnings = 0; 3218 3219 if (!file->hints) 3220 return 0; 3221 3222 init_insn_state(&state, sec); 3223 3224 if (sec) { 3225 insn = find_insn(file, sec, 0); 3226 if (!insn) 3227 return 0; 3228 } else { 3229 insn = list_first_entry(&file->insn_list, typeof(*insn), list); 3230 } 3231 3232 while (&insn->list != &file->insn_list && (!sec || insn->sec == sec)) { 3233 if (insn->hint && !insn->visited && !insn->ignore) { 3234 ret = validate_branch(file, insn->func, insn, state); 3235 if (ret && backtrace) 3236 BT_FUNC("<=== (hint)", insn); 3237 warnings += ret; 3238 } 3239 3240 insn = list_next_entry(insn, list); 3241 } 3242 3243 return warnings; 3244 } 3245 3246 static int validate_retpoline(struct objtool_file *file) 3247 { 3248 struct instruction *insn; 3249 int warnings = 0; 3250 3251 for_each_insn(file, insn) { 3252 if (insn->type != INSN_JUMP_DYNAMIC && 3253 insn->type != INSN_CALL_DYNAMIC) 3254 continue; 3255 3256 if (insn->retpoline_safe) 3257 continue; 3258 3259 /* 3260 * .init.text code is ran before userspace and thus doesn't 3261 * strictly need retpolines, except for modules which are 3262 * loaded late, they very much do need retpoline in their 3263 * .init.text 3264 */ 3265 if (!strcmp(insn->sec->name, ".init.text") && !module) 3266 continue; 3267 3268 WARN_FUNC("indirect %s found in RETPOLINE build", 3269 insn->sec, insn->offset, 3270 insn->type == INSN_JUMP_DYNAMIC ? "jump" : "call"); 3271 3272 warnings++; 3273 } 3274 3275 return warnings; 3276 } 3277 3278 static bool is_kasan_insn(struct instruction *insn) 3279 { 3280 return (insn->type == INSN_CALL && 3281 !strcmp(insn->call_dest->name, "__asan_handle_no_return")); 3282 } 3283 3284 static bool is_ubsan_insn(struct instruction *insn) 3285 { 3286 return (insn->type == INSN_CALL && 3287 !strcmp(insn->call_dest->name, 3288 "__ubsan_handle_builtin_unreachable")); 3289 } 3290 3291 static bool ignore_unreachable_insn(struct objtool_file *file, struct instruction *insn) 3292 { 3293 int i; 3294 struct instruction *prev_insn; 3295 3296 if (insn->ignore || insn->type == INSN_NOP) 3297 return true; 3298 3299 /* 3300 * Ignore any unused exceptions. This can happen when a whitelisted 3301 * function has an exception table entry. 3302 * 3303 * Also ignore alternative replacement instructions. This can happen 3304 * when a whitelisted function uses one of the ALTERNATIVE macros. 3305 */ 3306 if (!strcmp(insn->sec->name, ".fixup") || 3307 !strcmp(insn->sec->name, ".altinstr_replacement") || 3308 !strcmp(insn->sec->name, ".altinstr_aux")) 3309 return true; 3310 3311 if (!insn->func) 3312 return false; 3313 3314 if (insn->func->static_call_tramp) 3315 return true; 3316 3317 /* 3318 * CONFIG_UBSAN_TRAP inserts a UD2 when it sees 3319 * __builtin_unreachable(). The BUG() macro has an unreachable() after 3320 * the UD2, which causes GCC's undefined trap logic to emit another UD2 3321 * (or occasionally a JMP to UD2). 3322 * 3323 * It may also insert a UD2 after calling a __noreturn function. 3324 */ 3325 prev_insn = list_prev_entry(insn, list); 3326 if ((prev_insn->dead_end || dead_end_function(file, prev_insn->call_dest)) && 3327 (insn->type == INSN_BUG || 3328 (insn->type == INSN_JUMP_UNCONDITIONAL && 3329 insn->jump_dest && insn->jump_dest->type == INSN_BUG))) 3330 return true; 3331 3332 /* 3333 * Check if this (or a subsequent) instruction is related to 3334 * CONFIG_UBSAN or CONFIG_KASAN. 3335 * 3336 * End the search at 5 instructions to avoid going into the weeds. 3337 */ 3338 for (i = 0; i < 5; i++) { 3339 3340 if (is_kasan_insn(insn) || is_ubsan_insn(insn)) 3341 return true; 3342 3343 if (insn->type == INSN_JUMP_UNCONDITIONAL) { 3344 if (insn->jump_dest && 3345 insn->jump_dest->func == insn->func) { 3346 insn = insn->jump_dest; 3347 continue; 3348 } 3349 3350 break; 3351 } 3352 3353 if (insn->offset + insn->len >= insn->func->offset + insn->func->len) 3354 break; 3355 3356 insn = list_next_entry(insn, list); 3357 } 3358 3359 return false; 3360 } 3361 3362 static int validate_symbol(struct objtool_file *file, struct section *sec, 3363 struct symbol *sym, struct insn_state *state) 3364 { 3365 struct instruction *insn; 3366 int ret; 3367 3368 if (!sym->len) { 3369 WARN("%s() is missing an ELF size annotation", sym->name); 3370 return 1; 3371 } 3372 3373 if (sym->pfunc != sym || sym->alias != sym) 3374 return 0; 3375 3376 insn = find_insn(file, sec, sym->offset); 3377 if (!insn || insn->ignore || insn->visited) 3378 return 0; 3379 3380 state->uaccess = sym->uaccess_safe; 3381 3382 ret = validate_branch(file, insn->func, insn, *state); 3383 if (ret && backtrace) 3384 BT_FUNC("<=== (sym)", insn); 3385 return ret; 3386 } 3387 3388 static int validate_section(struct objtool_file *file, struct section *sec) 3389 { 3390 struct insn_state state; 3391 struct symbol *func; 3392 int warnings = 0; 3393 3394 list_for_each_entry(func, &sec->symbol_list, list) { 3395 if (func->type != STT_FUNC) 3396 continue; 3397 3398 init_insn_state(&state, sec); 3399 set_func_state(&state.cfi); 3400 3401 warnings += validate_symbol(file, sec, func, &state); 3402 } 3403 3404 return warnings; 3405 } 3406 3407 static int validate_vmlinux_functions(struct objtool_file *file) 3408 { 3409 struct section *sec; 3410 int warnings = 0; 3411 3412 sec = find_section_by_name(file->elf, ".noinstr.text"); 3413 if (sec) { 3414 warnings += validate_section(file, sec); 3415 warnings += validate_unwind_hints(file, sec); 3416 } 3417 3418 sec = find_section_by_name(file->elf, ".entry.text"); 3419 if (sec) { 3420 warnings += validate_section(file, sec); 3421 warnings += validate_unwind_hints(file, sec); 3422 } 3423 3424 return warnings; 3425 } 3426 3427 static int validate_functions(struct objtool_file *file) 3428 { 3429 struct section *sec; 3430 int warnings = 0; 3431 3432 for_each_sec(file, sec) { 3433 if (!(sec->sh.sh_flags & SHF_EXECINSTR)) 3434 continue; 3435 3436 warnings += validate_section(file, sec); 3437 } 3438 3439 return warnings; 3440 } 3441 3442 static int validate_reachable_instructions(struct objtool_file *file) 3443 { 3444 struct instruction *insn; 3445 3446 if (file->ignore_unreachables) 3447 return 0; 3448 3449 for_each_insn(file, insn) { 3450 if (insn->visited || ignore_unreachable_insn(file, insn)) 3451 continue; 3452 3453 WARN_FUNC("unreachable instruction", insn->sec, insn->offset); 3454 return 1; 3455 } 3456 3457 return 0; 3458 } 3459 3460 int check(struct objtool_file *file) 3461 { 3462 int ret, warnings = 0; 3463 3464 arch_initial_func_cfi_state(&initial_func_cfi); 3465 init_cfi_state(&init_cfi); 3466 init_cfi_state(&func_cfi); 3467 set_func_state(&func_cfi); 3468 3469 if (!cfi_hash_alloc(1UL << (file->elf->symbol_bits - 3))) 3470 goto out; 3471 3472 cfi_hash_add(&init_cfi); 3473 cfi_hash_add(&func_cfi); 3474 3475 ret = decode_sections(file); 3476 if (ret < 0) 3477 goto out; 3478 3479 warnings += ret; 3480 3481 if (list_empty(&file->insn_list)) 3482 goto out; 3483 3484 if (vmlinux && !validate_dup) { 3485 ret = validate_vmlinux_functions(file); 3486 if (ret < 0) 3487 goto out; 3488 3489 warnings += ret; 3490 goto out; 3491 } 3492 3493 if (retpoline) { 3494 ret = validate_retpoline(file); 3495 if (ret < 0) 3496 return ret; 3497 warnings += ret; 3498 } 3499 3500 ret = validate_functions(file); 3501 if (ret < 0) 3502 goto out; 3503 warnings += ret; 3504 3505 ret = validate_unwind_hints(file, NULL); 3506 if (ret < 0) 3507 goto out; 3508 warnings += ret; 3509 3510 if (!warnings) { 3511 ret = validate_reachable_instructions(file); 3512 if (ret < 0) 3513 goto out; 3514 warnings += ret; 3515 } 3516 3517 ret = create_static_call_sections(file); 3518 if (ret < 0) 3519 goto out; 3520 warnings += ret; 3521 3522 if (retpoline) { 3523 ret = create_retpoline_sites_sections(file); 3524 if (ret < 0) 3525 goto out; 3526 warnings += ret; 3527 } 3528 3529 if (mcount) { 3530 ret = create_mcount_loc_sections(file); 3531 if (ret < 0) 3532 goto out; 3533 warnings += ret; 3534 } 3535 3536 if (stats) { 3537 printf("nr_insns_visited: %ld\n", nr_insns_visited); 3538 printf("nr_cfi: %ld\n", nr_cfi); 3539 printf("nr_cfi_reused: %ld\n", nr_cfi_reused); 3540 printf("nr_cfi_cache: %ld\n", nr_cfi_cache); 3541 } 3542 3543 out: 3544 /* 3545 * For now, don't fail the kernel build on fatal warnings. These 3546 * errors are still fairly common due to the growing matrix of 3547 * supported toolchains and their recent pace of change. 3548 */ 3549 return 0; 3550 } 3551