1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * Copyright (C) 2015-2017 Josh Poimboeuf <jpoimboe@redhat.com> 4 */ 5 6 #include <string.h> 7 #include <stdlib.h> 8 #include <sys/mman.h> 9 10 #include <arch/elf.h> 11 #include <objtool/builtin.h> 12 #include <objtool/cfi.h> 13 #include <objtool/arch.h> 14 #include <objtool/check.h> 15 #include <objtool/special.h> 16 #include <objtool/warn.h> 17 #include <objtool/endianness.h> 18 19 #include <linux/objtool.h> 20 #include <linux/hashtable.h> 21 #include <linux/kernel.h> 22 #include <linux/static_call_types.h> 23 24 struct alternative { 25 struct list_head list; 26 struct instruction *insn; 27 bool skip_orig; 28 }; 29 30 static unsigned long nr_cfi, nr_cfi_reused, nr_cfi_cache; 31 32 static struct cfi_init_state initial_func_cfi; 33 static struct cfi_state init_cfi; 34 static struct cfi_state func_cfi; 35 36 struct instruction *find_insn(struct objtool_file *file, 37 struct section *sec, unsigned long offset) 38 { 39 struct instruction *insn; 40 41 hash_for_each_possible(file->insn_hash, insn, hash, sec_offset_hash(sec, offset)) { 42 if (insn->sec == sec && insn->offset == offset) 43 return insn; 44 } 45 46 return NULL; 47 } 48 49 static struct instruction *next_insn_same_sec(struct objtool_file *file, 50 struct instruction *insn) 51 { 52 struct instruction *next = list_next_entry(insn, list); 53 54 if (!next || &next->list == &file->insn_list || next->sec != insn->sec) 55 return NULL; 56 57 return next; 58 } 59 60 static struct instruction *next_insn_same_func(struct objtool_file *file, 61 struct instruction *insn) 62 { 63 struct instruction *next = list_next_entry(insn, list); 64 struct symbol *func = insn->func; 65 66 if (!func) 67 return NULL; 68 69 if (&next->list != &file->insn_list && next->func == func) 70 return next; 71 72 /* Check if we're already in the subfunction: */ 73 if (func == func->cfunc) 74 return NULL; 75 76 /* Move to the subfunction: */ 77 return find_insn(file, func->cfunc->sec, func->cfunc->offset); 78 } 79 80 static struct instruction *prev_insn_same_sym(struct objtool_file *file, 81 struct instruction *insn) 82 { 83 struct instruction *prev = list_prev_entry(insn, list); 84 85 if (&prev->list != &file->insn_list && prev->func == insn->func) 86 return prev; 87 88 return NULL; 89 } 90 91 #define func_for_each_insn(file, func, insn) \ 92 for (insn = find_insn(file, func->sec, func->offset); \ 93 insn; \ 94 insn = next_insn_same_func(file, insn)) 95 96 #define sym_for_each_insn(file, sym, insn) \ 97 for (insn = find_insn(file, sym->sec, sym->offset); \ 98 insn && &insn->list != &file->insn_list && \ 99 insn->sec == sym->sec && \ 100 insn->offset < sym->offset + sym->len; \ 101 insn = list_next_entry(insn, list)) 102 103 #define sym_for_each_insn_continue_reverse(file, sym, insn) \ 104 for (insn = list_prev_entry(insn, list); \ 105 &insn->list != &file->insn_list && \ 106 insn->sec == sym->sec && insn->offset >= sym->offset; \ 107 insn = list_prev_entry(insn, list)) 108 109 #define sec_for_each_insn_from(file, insn) \ 110 for (; insn; insn = next_insn_same_sec(file, insn)) 111 112 #define sec_for_each_insn_continue(file, insn) \ 113 for (insn = next_insn_same_sec(file, insn); insn; \ 114 insn = next_insn_same_sec(file, insn)) 115 116 static bool is_jump_table_jump(struct instruction *insn) 117 { 118 struct alt_group *alt_group = insn->alt_group; 119 120 if (insn->jump_table) 121 return true; 122 123 /* Retpoline alternative for a jump table? */ 124 return alt_group && alt_group->orig_group && 125 alt_group->orig_group->first_insn->jump_table; 126 } 127 128 static bool is_sibling_call(struct instruction *insn) 129 { 130 /* 131 * Assume only ELF functions can make sibling calls. This ensures 132 * sibling call detection consistency between vmlinux.o and individual 133 * objects. 134 */ 135 if (!insn->func) 136 return false; 137 138 /* An indirect jump is either a sibling call or a jump to a table. */ 139 if (insn->type == INSN_JUMP_DYNAMIC) 140 return !is_jump_table_jump(insn); 141 142 /* add_jump_destinations() sets insn->call_dest for sibling calls. */ 143 return (is_static_jump(insn) && insn->call_dest); 144 } 145 146 /* 147 * This checks to see if the given function is a "noreturn" function. 148 * 149 * For global functions which are outside the scope of this object file, we 150 * have to keep a manual list of them. 151 * 152 * For local functions, we have to detect them manually by simply looking for 153 * the lack of a return instruction. 154 */ 155 static bool __dead_end_function(struct objtool_file *file, struct symbol *func, 156 int recursion) 157 { 158 int i; 159 struct instruction *insn; 160 bool empty = true; 161 162 /* 163 * Unfortunately these have to be hard coded because the noreturn 164 * attribute isn't provided in ELF data. 165 */ 166 static const char * const global_noreturns[] = { 167 "__stack_chk_fail", 168 "panic", 169 "do_exit", 170 "do_task_dead", 171 "kthread_exit", 172 "make_task_dead", 173 "__module_put_and_kthread_exit", 174 "kthread_complete_and_exit", 175 "__reiserfs_panic", 176 "lbug_with_loc", 177 "fortify_panic", 178 "usercopy_abort", 179 "machine_real_restart", 180 "rewind_stack_and_make_dead", 181 "kunit_try_catch_throw", 182 "xen_start_kernel", 183 "cpu_bringup_and_idle", 184 }; 185 186 if (!func) 187 return false; 188 189 if (func->bind == STB_WEAK) 190 return false; 191 192 if (func->bind == STB_GLOBAL) 193 for (i = 0; i < ARRAY_SIZE(global_noreturns); i++) 194 if (!strcmp(func->name, global_noreturns[i])) 195 return true; 196 197 if (!func->len) 198 return false; 199 200 insn = find_insn(file, func->sec, func->offset); 201 if (!insn->func) 202 return false; 203 204 func_for_each_insn(file, func, insn) { 205 empty = false; 206 207 if (insn->type == INSN_RETURN) 208 return false; 209 } 210 211 if (empty) 212 return false; 213 214 /* 215 * A function can have a sibling call instead of a return. In that 216 * case, the function's dead-end status depends on whether the target 217 * of the sibling call returns. 218 */ 219 func_for_each_insn(file, func, insn) { 220 if (is_sibling_call(insn)) { 221 struct instruction *dest = insn->jump_dest; 222 223 if (!dest) 224 /* sibling call to another file */ 225 return false; 226 227 /* local sibling call */ 228 if (recursion == 5) { 229 /* 230 * Infinite recursion: two functions have 231 * sibling calls to each other. This is a very 232 * rare case. It means they aren't dead ends. 233 */ 234 return false; 235 } 236 237 return __dead_end_function(file, dest->func, recursion+1); 238 } 239 } 240 241 return true; 242 } 243 244 static bool dead_end_function(struct objtool_file *file, struct symbol *func) 245 { 246 return __dead_end_function(file, func, 0); 247 } 248 249 static void init_cfi_state(struct cfi_state *cfi) 250 { 251 int i; 252 253 for (i = 0; i < CFI_NUM_REGS; i++) { 254 cfi->regs[i].base = CFI_UNDEFINED; 255 cfi->vals[i].base = CFI_UNDEFINED; 256 } 257 cfi->cfa.base = CFI_UNDEFINED; 258 cfi->drap_reg = CFI_UNDEFINED; 259 cfi->drap_offset = -1; 260 } 261 262 static void init_insn_state(struct insn_state *state, struct section *sec) 263 { 264 memset(state, 0, sizeof(*state)); 265 init_cfi_state(&state->cfi); 266 267 /* 268 * We need the full vmlinux for noinstr validation, otherwise we can 269 * not correctly determine insn->call_dest->sec (external symbols do 270 * not have a section). 271 */ 272 if (vmlinux && noinstr && sec) 273 state->noinstr = sec->noinstr; 274 } 275 276 static struct cfi_state *cfi_alloc(void) 277 { 278 struct cfi_state *cfi = calloc(sizeof(struct cfi_state), 1); 279 if (!cfi) { 280 WARN("calloc failed"); 281 exit(1); 282 } 283 nr_cfi++; 284 return cfi; 285 } 286 287 static int cfi_bits; 288 static struct hlist_head *cfi_hash; 289 290 static inline bool cficmp(struct cfi_state *cfi1, struct cfi_state *cfi2) 291 { 292 return memcmp((void *)cfi1 + sizeof(cfi1->hash), 293 (void *)cfi2 + sizeof(cfi2->hash), 294 sizeof(struct cfi_state) - sizeof(struct hlist_node)); 295 } 296 297 static inline u32 cfi_key(struct cfi_state *cfi) 298 { 299 return jhash((void *)cfi + sizeof(cfi->hash), 300 sizeof(*cfi) - sizeof(cfi->hash), 0); 301 } 302 303 static struct cfi_state *cfi_hash_find_or_add(struct cfi_state *cfi) 304 { 305 struct hlist_head *head = &cfi_hash[hash_min(cfi_key(cfi), cfi_bits)]; 306 struct cfi_state *obj; 307 308 hlist_for_each_entry(obj, head, hash) { 309 if (!cficmp(cfi, obj)) { 310 nr_cfi_cache++; 311 return obj; 312 } 313 } 314 315 obj = cfi_alloc(); 316 *obj = *cfi; 317 hlist_add_head(&obj->hash, head); 318 319 return obj; 320 } 321 322 static void cfi_hash_add(struct cfi_state *cfi) 323 { 324 struct hlist_head *head = &cfi_hash[hash_min(cfi_key(cfi), cfi_bits)]; 325 326 hlist_add_head(&cfi->hash, head); 327 } 328 329 static void *cfi_hash_alloc(unsigned long size) 330 { 331 cfi_bits = max(10, ilog2(size)); 332 cfi_hash = mmap(NULL, sizeof(struct hlist_head) << cfi_bits, 333 PROT_READ|PROT_WRITE, 334 MAP_PRIVATE|MAP_ANON, -1, 0); 335 if (cfi_hash == (void *)-1L) { 336 WARN("mmap fail cfi_hash"); 337 cfi_hash = NULL; 338 } else if (stats) { 339 printf("cfi_bits: %d\n", cfi_bits); 340 } 341 342 return cfi_hash; 343 } 344 345 static unsigned long nr_insns; 346 static unsigned long nr_insns_visited; 347 348 /* 349 * Call the arch-specific instruction decoder for all the instructions and add 350 * them to the global instruction list. 351 */ 352 static int decode_instructions(struct objtool_file *file) 353 { 354 struct section *sec; 355 struct symbol *func; 356 unsigned long offset; 357 struct instruction *insn; 358 int ret; 359 360 for_each_sec(file, sec) { 361 362 if (!(sec->sh.sh_flags & SHF_EXECINSTR)) 363 continue; 364 365 if (strcmp(sec->name, ".altinstr_replacement") && 366 strcmp(sec->name, ".altinstr_aux") && 367 strncmp(sec->name, ".discard.", 9)) 368 sec->text = true; 369 370 if (!strcmp(sec->name, ".noinstr.text") || 371 !strcmp(sec->name, ".entry.text")) 372 sec->noinstr = true; 373 374 for (offset = 0; offset < sec->sh.sh_size; offset += insn->len) { 375 insn = malloc(sizeof(*insn)); 376 if (!insn) { 377 WARN("malloc failed"); 378 return -1; 379 } 380 memset(insn, 0, sizeof(*insn)); 381 INIT_LIST_HEAD(&insn->alts); 382 INIT_LIST_HEAD(&insn->stack_ops); 383 384 insn->sec = sec; 385 insn->offset = offset; 386 387 ret = arch_decode_instruction(file, sec, offset, 388 sec->sh.sh_size - offset, 389 &insn->len, &insn->type, 390 &insn->immediate, 391 &insn->stack_ops); 392 if (ret) 393 goto err; 394 395 hash_add(file->insn_hash, &insn->hash, sec_offset_hash(sec, insn->offset)); 396 list_add_tail(&insn->list, &file->insn_list); 397 nr_insns++; 398 } 399 400 list_for_each_entry(func, &sec->symbol_list, list) { 401 if (func->type != STT_FUNC || func->alias != func) 402 continue; 403 404 if (!find_insn(file, sec, func->offset)) { 405 WARN("%s(): can't find starting instruction", 406 func->name); 407 return -1; 408 } 409 410 sym_for_each_insn(file, func, insn) 411 insn->func = func; 412 } 413 } 414 415 if (stats) 416 printf("nr_insns: %lu\n", nr_insns); 417 418 return 0; 419 420 err: 421 free(insn); 422 return ret; 423 } 424 425 /* 426 * Read the pv_ops[] .data table to find the static initialized values. 427 */ 428 static int add_pv_ops(struct objtool_file *file, const char *symname) 429 { 430 struct symbol *sym, *func; 431 unsigned long off, end; 432 struct reloc *rel; 433 int idx; 434 435 sym = find_symbol_by_name(file->elf, symname); 436 if (!sym) 437 return 0; 438 439 off = sym->offset; 440 end = off + sym->len; 441 for (;;) { 442 rel = find_reloc_by_dest_range(file->elf, sym->sec, off, end - off); 443 if (!rel) 444 break; 445 446 func = rel->sym; 447 if (func->type == STT_SECTION) 448 func = find_symbol_by_offset(rel->sym->sec, rel->addend); 449 450 idx = (rel->offset - sym->offset) / sizeof(unsigned long); 451 452 objtool_pv_add(file, idx, func); 453 454 off = rel->offset + 1; 455 if (off > end) 456 break; 457 } 458 459 return 0; 460 } 461 462 /* 463 * Allocate and initialize file->pv_ops[]. 464 */ 465 static int init_pv_ops(struct objtool_file *file) 466 { 467 static const char *pv_ops_tables[] = { 468 "pv_ops", 469 "xen_cpu_ops", 470 "xen_irq_ops", 471 "xen_mmu_ops", 472 NULL, 473 }; 474 const char *pv_ops; 475 struct symbol *sym; 476 int idx, nr; 477 478 if (!noinstr) 479 return 0; 480 481 file->pv_ops = NULL; 482 483 sym = find_symbol_by_name(file->elf, "pv_ops"); 484 if (!sym) 485 return 0; 486 487 nr = sym->len / sizeof(unsigned long); 488 file->pv_ops = calloc(sizeof(struct pv_state), nr); 489 if (!file->pv_ops) 490 return -1; 491 492 for (idx = 0; idx < nr; idx++) 493 INIT_LIST_HEAD(&file->pv_ops[idx].targets); 494 495 for (idx = 0; (pv_ops = pv_ops_tables[idx]); idx++) 496 add_pv_ops(file, pv_ops); 497 498 return 0; 499 } 500 501 static struct instruction *find_last_insn(struct objtool_file *file, 502 struct section *sec) 503 { 504 struct instruction *insn = NULL; 505 unsigned int offset; 506 unsigned int end = (sec->sh.sh_size > 10) ? sec->sh.sh_size - 10 : 0; 507 508 for (offset = sec->sh.sh_size - 1; offset >= end && !insn; offset--) 509 insn = find_insn(file, sec, offset); 510 511 return insn; 512 } 513 514 /* 515 * Mark "ud2" instructions and manually annotated dead ends. 516 */ 517 static int add_dead_ends(struct objtool_file *file) 518 { 519 struct section *sec; 520 struct reloc *reloc; 521 struct instruction *insn; 522 523 /* 524 * By default, "ud2" is a dead end unless otherwise annotated, because 525 * GCC 7 inserts it for certain divide-by-zero cases. 526 */ 527 for_each_insn(file, insn) 528 if (insn->type == INSN_BUG) 529 insn->dead_end = true; 530 531 /* 532 * Check for manually annotated dead ends. 533 */ 534 sec = find_section_by_name(file->elf, ".rela.discard.unreachable"); 535 if (!sec) 536 goto reachable; 537 538 list_for_each_entry(reloc, &sec->reloc_list, list) { 539 if (reloc->sym->type != STT_SECTION) { 540 WARN("unexpected relocation symbol type in %s", sec->name); 541 return -1; 542 } 543 insn = find_insn(file, reloc->sym->sec, reloc->addend); 544 if (insn) 545 insn = list_prev_entry(insn, list); 546 else if (reloc->addend == reloc->sym->sec->sh.sh_size) { 547 insn = find_last_insn(file, reloc->sym->sec); 548 if (!insn) { 549 WARN("can't find unreachable insn at %s+0x%x", 550 reloc->sym->sec->name, reloc->addend); 551 return -1; 552 } 553 } else { 554 WARN("can't find unreachable insn at %s+0x%x", 555 reloc->sym->sec->name, reloc->addend); 556 return -1; 557 } 558 559 insn->dead_end = true; 560 } 561 562 reachable: 563 /* 564 * These manually annotated reachable checks are needed for GCC 4.4, 565 * where the Linux unreachable() macro isn't supported. In that case 566 * GCC doesn't know the "ud2" is fatal, so it generates code as if it's 567 * not a dead end. 568 */ 569 sec = find_section_by_name(file->elf, ".rela.discard.reachable"); 570 if (!sec) 571 return 0; 572 573 list_for_each_entry(reloc, &sec->reloc_list, list) { 574 if (reloc->sym->type != STT_SECTION) { 575 WARN("unexpected relocation symbol type in %s", sec->name); 576 return -1; 577 } 578 insn = find_insn(file, reloc->sym->sec, reloc->addend); 579 if (insn) 580 insn = list_prev_entry(insn, list); 581 else if (reloc->addend == reloc->sym->sec->sh.sh_size) { 582 insn = find_last_insn(file, reloc->sym->sec); 583 if (!insn) { 584 WARN("can't find reachable insn at %s+0x%x", 585 reloc->sym->sec->name, reloc->addend); 586 return -1; 587 } 588 } else { 589 WARN("can't find reachable insn at %s+0x%x", 590 reloc->sym->sec->name, reloc->addend); 591 return -1; 592 } 593 594 insn->dead_end = false; 595 } 596 597 return 0; 598 } 599 600 static int create_static_call_sections(struct objtool_file *file) 601 { 602 struct section *sec; 603 struct static_call_site *site; 604 struct instruction *insn; 605 struct symbol *key_sym; 606 char *key_name, *tmp; 607 int idx; 608 609 sec = find_section_by_name(file->elf, ".static_call_sites"); 610 if (sec) { 611 INIT_LIST_HEAD(&file->static_call_list); 612 WARN("file already has .static_call_sites section, skipping"); 613 return 0; 614 } 615 616 if (list_empty(&file->static_call_list)) 617 return 0; 618 619 idx = 0; 620 list_for_each_entry(insn, &file->static_call_list, call_node) 621 idx++; 622 623 sec = elf_create_section(file->elf, ".static_call_sites", SHF_WRITE, 624 sizeof(struct static_call_site), idx); 625 if (!sec) 626 return -1; 627 628 idx = 0; 629 list_for_each_entry(insn, &file->static_call_list, call_node) { 630 631 site = (struct static_call_site *)sec->data->d_buf + idx; 632 memset(site, 0, sizeof(struct static_call_site)); 633 634 /* populate reloc for 'addr' */ 635 if (elf_add_reloc_to_insn(file->elf, sec, 636 idx * sizeof(struct static_call_site), 637 R_X86_64_PC32, 638 insn->sec, insn->offset)) 639 return -1; 640 641 /* find key symbol */ 642 key_name = strdup(insn->call_dest->name); 643 if (!key_name) { 644 perror("strdup"); 645 return -1; 646 } 647 if (strncmp(key_name, STATIC_CALL_TRAMP_PREFIX_STR, 648 STATIC_CALL_TRAMP_PREFIX_LEN)) { 649 WARN("static_call: trampoline name malformed: %s", key_name); 650 return -1; 651 } 652 tmp = key_name + STATIC_CALL_TRAMP_PREFIX_LEN - STATIC_CALL_KEY_PREFIX_LEN; 653 memcpy(tmp, STATIC_CALL_KEY_PREFIX_STR, STATIC_CALL_KEY_PREFIX_LEN); 654 655 key_sym = find_symbol_by_name(file->elf, tmp); 656 if (!key_sym) { 657 if (!module) { 658 WARN("static_call: can't find static_call_key symbol: %s", tmp); 659 return -1; 660 } 661 662 /* 663 * For modules(), the key might not be exported, which 664 * means the module can make static calls but isn't 665 * allowed to change them. 666 * 667 * In that case we temporarily set the key to be the 668 * trampoline address. This is fixed up in 669 * static_call_add_module(). 670 */ 671 key_sym = insn->call_dest; 672 } 673 free(key_name); 674 675 /* populate reloc for 'key' */ 676 if (elf_add_reloc(file->elf, sec, 677 idx * sizeof(struct static_call_site) + 4, 678 R_X86_64_PC32, key_sym, 679 is_sibling_call(insn) * STATIC_CALL_SITE_TAIL)) 680 return -1; 681 682 idx++; 683 } 684 685 return 0; 686 } 687 688 static int create_retpoline_sites_sections(struct objtool_file *file) 689 { 690 struct instruction *insn; 691 struct section *sec; 692 int idx; 693 694 sec = find_section_by_name(file->elf, ".retpoline_sites"); 695 if (sec) { 696 WARN("file already has .retpoline_sites, skipping"); 697 return 0; 698 } 699 700 idx = 0; 701 list_for_each_entry(insn, &file->retpoline_call_list, call_node) 702 idx++; 703 704 if (!idx) 705 return 0; 706 707 sec = elf_create_section(file->elf, ".retpoline_sites", 0, 708 sizeof(int), idx); 709 if (!sec) { 710 WARN("elf_create_section: .retpoline_sites"); 711 return -1; 712 } 713 714 idx = 0; 715 list_for_each_entry(insn, &file->retpoline_call_list, call_node) { 716 717 int *site = (int *)sec->data->d_buf + idx; 718 *site = 0; 719 720 if (elf_add_reloc_to_insn(file->elf, sec, 721 idx * sizeof(int), 722 R_X86_64_PC32, 723 insn->sec, insn->offset)) { 724 WARN("elf_add_reloc_to_insn: .retpoline_sites"); 725 return -1; 726 } 727 728 idx++; 729 } 730 731 return 0; 732 } 733 734 static int create_mcount_loc_sections(struct objtool_file *file) 735 { 736 struct section *sec; 737 unsigned long *loc; 738 struct instruction *insn; 739 int idx; 740 741 sec = find_section_by_name(file->elf, "__mcount_loc"); 742 if (sec) { 743 INIT_LIST_HEAD(&file->mcount_loc_list); 744 WARN("file already has __mcount_loc section, skipping"); 745 return 0; 746 } 747 748 if (list_empty(&file->mcount_loc_list)) 749 return 0; 750 751 idx = 0; 752 list_for_each_entry(insn, &file->mcount_loc_list, call_node) 753 idx++; 754 755 sec = elf_create_section(file->elf, "__mcount_loc", 0, sizeof(unsigned long), idx); 756 if (!sec) 757 return -1; 758 759 idx = 0; 760 list_for_each_entry(insn, &file->mcount_loc_list, call_node) { 761 762 loc = (unsigned long *)sec->data->d_buf + idx; 763 memset(loc, 0, sizeof(unsigned long)); 764 765 if (elf_add_reloc_to_insn(file->elf, sec, 766 idx * sizeof(unsigned long), 767 R_X86_64_64, 768 insn->sec, insn->offset)) 769 return -1; 770 771 idx++; 772 } 773 774 return 0; 775 } 776 777 /* 778 * Warnings shouldn't be reported for ignored functions. 779 */ 780 static void add_ignores(struct objtool_file *file) 781 { 782 struct instruction *insn; 783 struct section *sec; 784 struct symbol *func; 785 struct reloc *reloc; 786 787 sec = find_section_by_name(file->elf, ".rela.discard.func_stack_frame_non_standard"); 788 if (!sec) 789 return; 790 791 list_for_each_entry(reloc, &sec->reloc_list, list) { 792 switch (reloc->sym->type) { 793 case STT_FUNC: 794 func = reloc->sym; 795 break; 796 797 case STT_SECTION: 798 func = find_func_by_offset(reloc->sym->sec, reloc->addend); 799 if (!func) 800 continue; 801 break; 802 803 default: 804 WARN("unexpected relocation symbol type in %s: %d", sec->name, reloc->sym->type); 805 continue; 806 } 807 808 func_for_each_insn(file, func, insn) 809 insn->ignore = true; 810 } 811 } 812 813 /* 814 * This is a whitelist of functions that is allowed to be called with AC set. 815 * The list is meant to be minimal and only contains compiler instrumentation 816 * ABI and a few functions used to implement *_{to,from}_user() functions. 817 * 818 * These functions must not directly change AC, but may PUSHF/POPF. 819 */ 820 static const char *uaccess_safe_builtin[] = { 821 /* KASAN */ 822 "kasan_report", 823 "kasan_check_range", 824 /* KASAN out-of-line */ 825 "__asan_loadN_noabort", 826 "__asan_load1_noabort", 827 "__asan_load2_noabort", 828 "__asan_load4_noabort", 829 "__asan_load8_noabort", 830 "__asan_load16_noabort", 831 "__asan_storeN_noabort", 832 "__asan_store1_noabort", 833 "__asan_store2_noabort", 834 "__asan_store4_noabort", 835 "__asan_store8_noabort", 836 "__asan_store16_noabort", 837 "__kasan_check_read", 838 "__kasan_check_write", 839 /* KASAN in-line */ 840 "__asan_report_load_n_noabort", 841 "__asan_report_load1_noabort", 842 "__asan_report_load2_noabort", 843 "__asan_report_load4_noabort", 844 "__asan_report_load8_noabort", 845 "__asan_report_load16_noabort", 846 "__asan_report_store_n_noabort", 847 "__asan_report_store1_noabort", 848 "__asan_report_store2_noabort", 849 "__asan_report_store4_noabort", 850 "__asan_report_store8_noabort", 851 "__asan_report_store16_noabort", 852 /* KCSAN */ 853 "__kcsan_check_access", 854 "__kcsan_mb", 855 "__kcsan_wmb", 856 "__kcsan_rmb", 857 "__kcsan_release", 858 "kcsan_found_watchpoint", 859 "kcsan_setup_watchpoint", 860 "kcsan_check_scoped_accesses", 861 "kcsan_disable_current", 862 "kcsan_enable_current_nowarn", 863 /* KCSAN/TSAN */ 864 "__tsan_func_entry", 865 "__tsan_func_exit", 866 "__tsan_read_range", 867 "__tsan_write_range", 868 "__tsan_read1", 869 "__tsan_read2", 870 "__tsan_read4", 871 "__tsan_read8", 872 "__tsan_read16", 873 "__tsan_write1", 874 "__tsan_write2", 875 "__tsan_write4", 876 "__tsan_write8", 877 "__tsan_write16", 878 "__tsan_read_write1", 879 "__tsan_read_write2", 880 "__tsan_read_write4", 881 "__tsan_read_write8", 882 "__tsan_read_write16", 883 "__tsan_atomic8_load", 884 "__tsan_atomic16_load", 885 "__tsan_atomic32_load", 886 "__tsan_atomic64_load", 887 "__tsan_atomic8_store", 888 "__tsan_atomic16_store", 889 "__tsan_atomic32_store", 890 "__tsan_atomic64_store", 891 "__tsan_atomic8_exchange", 892 "__tsan_atomic16_exchange", 893 "__tsan_atomic32_exchange", 894 "__tsan_atomic64_exchange", 895 "__tsan_atomic8_fetch_add", 896 "__tsan_atomic16_fetch_add", 897 "__tsan_atomic32_fetch_add", 898 "__tsan_atomic64_fetch_add", 899 "__tsan_atomic8_fetch_sub", 900 "__tsan_atomic16_fetch_sub", 901 "__tsan_atomic32_fetch_sub", 902 "__tsan_atomic64_fetch_sub", 903 "__tsan_atomic8_fetch_and", 904 "__tsan_atomic16_fetch_and", 905 "__tsan_atomic32_fetch_and", 906 "__tsan_atomic64_fetch_and", 907 "__tsan_atomic8_fetch_or", 908 "__tsan_atomic16_fetch_or", 909 "__tsan_atomic32_fetch_or", 910 "__tsan_atomic64_fetch_or", 911 "__tsan_atomic8_fetch_xor", 912 "__tsan_atomic16_fetch_xor", 913 "__tsan_atomic32_fetch_xor", 914 "__tsan_atomic64_fetch_xor", 915 "__tsan_atomic8_fetch_nand", 916 "__tsan_atomic16_fetch_nand", 917 "__tsan_atomic32_fetch_nand", 918 "__tsan_atomic64_fetch_nand", 919 "__tsan_atomic8_compare_exchange_strong", 920 "__tsan_atomic16_compare_exchange_strong", 921 "__tsan_atomic32_compare_exchange_strong", 922 "__tsan_atomic64_compare_exchange_strong", 923 "__tsan_atomic8_compare_exchange_weak", 924 "__tsan_atomic16_compare_exchange_weak", 925 "__tsan_atomic32_compare_exchange_weak", 926 "__tsan_atomic64_compare_exchange_weak", 927 "__tsan_atomic8_compare_exchange_val", 928 "__tsan_atomic16_compare_exchange_val", 929 "__tsan_atomic32_compare_exchange_val", 930 "__tsan_atomic64_compare_exchange_val", 931 "__tsan_atomic_thread_fence", 932 "__tsan_atomic_signal_fence", 933 /* KCOV */ 934 "write_comp_data", 935 "check_kcov_mode", 936 "__sanitizer_cov_trace_pc", 937 "__sanitizer_cov_trace_const_cmp1", 938 "__sanitizer_cov_trace_const_cmp2", 939 "__sanitizer_cov_trace_const_cmp4", 940 "__sanitizer_cov_trace_const_cmp8", 941 "__sanitizer_cov_trace_cmp1", 942 "__sanitizer_cov_trace_cmp2", 943 "__sanitizer_cov_trace_cmp4", 944 "__sanitizer_cov_trace_cmp8", 945 "__sanitizer_cov_trace_switch", 946 /* UBSAN */ 947 "ubsan_type_mismatch_common", 948 "__ubsan_handle_type_mismatch", 949 "__ubsan_handle_type_mismatch_v1", 950 "__ubsan_handle_shift_out_of_bounds", 951 /* misc */ 952 "csum_partial_copy_generic", 953 "copy_mc_fragile", 954 "copy_mc_fragile_handle_tail", 955 "copy_mc_enhanced_fast_string", 956 "ftrace_likely_update", /* CONFIG_TRACE_BRANCH_PROFILING */ 957 NULL 958 }; 959 960 static void add_uaccess_safe(struct objtool_file *file) 961 { 962 struct symbol *func; 963 const char **name; 964 965 if (!uaccess) 966 return; 967 968 for (name = uaccess_safe_builtin; *name; name++) { 969 func = find_symbol_by_name(file->elf, *name); 970 if (!func) 971 continue; 972 973 func->uaccess_safe = true; 974 } 975 } 976 977 /* 978 * FIXME: For now, just ignore any alternatives which add retpolines. This is 979 * a temporary hack, as it doesn't allow ORC to unwind from inside a retpoline. 980 * But it at least allows objtool to understand the control flow *around* the 981 * retpoline. 982 */ 983 static int add_ignore_alternatives(struct objtool_file *file) 984 { 985 struct section *sec; 986 struct reloc *reloc; 987 struct instruction *insn; 988 989 sec = find_section_by_name(file->elf, ".rela.discard.ignore_alts"); 990 if (!sec) 991 return 0; 992 993 list_for_each_entry(reloc, &sec->reloc_list, list) { 994 if (reloc->sym->type != STT_SECTION) { 995 WARN("unexpected relocation symbol type in %s", sec->name); 996 return -1; 997 } 998 999 insn = find_insn(file, reloc->sym->sec, reloc->addend); 1000 if (!insn) { 1001 WARN("bad .discard.ignore_alts entry"); 1002 return -1; 1003 } 1004 1005 insn->ignore_alts = true; 1006 } 1007 1008 return 0; 1009 } 1010 1011 __weak bool arch_is_retpoline(struct symbol *sym) 1012 { 1013 return false; 1014 } 1015 1016 #define NEGATIVE_RELOC ((void *)-1L) 1017 1018 static struct reloc *insn_reloc(struct objtool_file *file, struct instruction *insn) 1019 { 1020 if (insn->reloc == NEGATIVE_RELOC) 1021 return NULL; 1022 1023 if (!insn->reloc) { 1024 if (!file) 1025 return NULL; 1026 1027 insn->reloc = find_reloc_by_dest_range(file->elf, insn->sec, 1028 insn->offset, insn->len); 1029 if (!insn->reloc) { 1030 insn->reloc = NEGATIVE_RELOC; 1031 return NULL; 1032 } 1033 } 1034 1035 return insn->reloc; 1036 } 1037 1038 static void remove_insn_ops(struct instruction *insn) 1039 { 1040 struct stack_op *op, *tmp; 1041 1042 list_for_each_entry_safe(op, tmp, &insn->stack_ops, list) { 1043 list_del(&op->list); 1044 free(op); 1045 } 1046 } 1047 1048 static void annotate_call_site(struct objtool_file *file, 1049 struct instruction *insn, bool sibling) 1050 { 1051 struct reloc *reloc = insn_reloc(file, insn); 1052 struct symbol *sym = insn->call_dest; 1053 1054 if (!sym) 1055 sym = reloc->sym; 1056 1057 /* 1058 * Alternative replacement code is just template code which is 1059 * sometimes copied to the original instruction. For now, don't 1060 * annotate it. (In the future we might consider annotating the 1061 * original instruction if/when it ever makes sense to do so.) 1062 */ 1063 if (!strcmp(insn->sec->name, ".altinstr_replacement")) 1064 return; 1065 1066 if (sym->static_call_tramp) { 1067 list_add_tail(&insn->call_node, &file->static_call_list); 1068 return; 1069 } 1070 1071 if (sym->retpoline_thunk) { 1072 list_add_tail(&insn->call_node, &file->retpoline_call_list); 1073 return; 1074 } 1075 1076 /* 1077 * Many compilers cannot disable KCOV or sanitizer calls with a function 1078 * attribute so they need a little help, NOP out any such calls from 1079 * noinstr text. 1080 */ 1081 if (insn->sec->noinstr && sym->profiling_func) { 1082 if (reloc) { 1083 reloc->type = R_NONE; 1084 elf_write_reloc(file->elf, reloc); 1085 } 1086 1087 elf_write_insn(file->elf, insn->sec, 1088 insn->offset, insn->len, 1089 sibling ? arch_ret_insn(insn->len) 1090 : arch_nop_insn(insn->len)); 1091 1092 insn->type = sibling ? INSN_RETURN : INSN_NOP; 1093 return; 1094 } 1095 1096 if (mcount && sym->fentry) { 1097 if (sibling) 1098 WARN_FUNC("Tail call to __fentry__ !?!?", insn->sec, insn->offset); 1099 1100 if (reloc) { 1101 reloc->type = R_NONE; 1102 elf_write_reloc(file->elf, reloc); 1103 } 1104 1105 elf_write_insn(file->elf, insn->sec, 1106 insn->offset, insn->len, 1107 arch_nop_insn(insn->len)); 1108 1109 insn->type = INSN_NOP; 1110 1111 list_add_tail(&insn->call_node, &file->mcount_loc_list); 1112 return; 1113 } 1114 } 1115 1116 static void add_call_dest(struct objtool_file *file, struct instruction *insn, 1117 struct symbol *dest, bool sibling) 1118 { 1119 insn->call_dest = dest; 1120 if (!dest) 1121 return; 1122 1123 /* 1124 * Whatever stack impact regular CALLs have, should be undone 1125 * by the RETURN of the called function. 1126 * 1127 * Annotated intra-function calls retain the stack_ops but 1128 * are converted to JUMP, see read_intra_function_calls(). 1129 */ 1130 remove_insn_ops(insn); 1131 1132 annotate_call_site(file, insn, sibling); 1133 } 1134 1135 static void add_retpoline_call(struct objtool_file *file, struct instruction *insn) 1136 { 1137 /* 1138 * Retpoline calls/jumps are really dynamic calls/jumps in disguise, 1139 * so convert them accordingly. 1140 */ 1141 switch (insn->type) { 1142 case INSN_CALL: 1143 insn->type = INSN_CALL_DYNAMIC; 1144 break; 1145 case INSN_JUMP_UNCONDITIONAL: 1146 insn->type = INSN_JUMP_DYNAMIC; 1147 break; 1148 case INSN_JUMP_CONDITIONAL: 1149 insn->type = INSN_JUMP_DYNAMIC_CONDITIONAL; 1150 break; 1151 default: 1152 return; 1153 } 1154 1155 insn->retpoline_safe = true; 1156 1157 /* 1158 * Whatever stack impact regular CALLs have, should be undone 1159 * by the RETURN of the called function. 1160 * 1161 * Annotated intra-function calls retain the stack_ops but 1162 * are converted to JUMP, see read_intra_function_calls(). 1163 */ 1164 remove_insn_ops(insn); 1165 1166 annotate_call_site(file, insn, false); 1167 } 1168 /* 1169 * Find the destination instructions for all jumps. 1170 */ 1171 static int add_jump_destinations(struct objtool_file *file) 1172 { 1173 struct instruction *insn; 1174 struct reloc *reloc; 1175 struct section *dest_sec; 1176 unsigned long dest_off; 1177 1178 for_each_insn(file, insn) { 1179 if (!is_static_jump(insn)) 1180 continue; 1181 1182 reloc = insn_reloc(file, insn); 1183 if (!reloc) { 1184 dest_sec = insn->sec; 1185 dest_off = arch_jump_destination(insn); 1186 } else if (reloc->sym->type == STT_SECTION) { 1187 dest_sec = reloc->sym->sec; 1188 dest_off = arch_dest_reloc_offset(reloc->addend); 1189 } else if (reloc->sym->retpoline_thunk) { 1190 add_retpoline_call(file, insn); 1191 continue; 1192 } else if (insn->func) { 1193 /* internal or external sibling call (with reloc) */ 1194 add_call_dest(file, insn, reloc->sym, true); 1195 continue; 1196 } else if (reloc->sym->sec->idx) { 1197 dest_sec = reloc->sym->sec; 1198 dest_off = reloc->sym->sym.st_value + 1199 arch_dest_reloc_offset(reloc->addend); 1200 } else { 1201 /* non-func asm code jumping to another file */ 1202 continue; 1203 } 1204 1205 insn->jump_dest = find_insn(file, dest_sec, dest_off); 1206 if (!insn->jump_dest) { 1207 1208 /* 1209 * This is a special case where an alt instruction 1210 * jumps past the end of the section. These are 1211 * handled later in handle_group_alt(). 1212 */ 1213 if (!strcmp(insn->sec->name, ".altinstr_replacement")) 1214 continue; 1215 1216 WARN_FUNC("can't find jump dest instruction at %s+0x%lx", 1217 insn->sec, insn->offset, dest_sec->name, 1218 dest_off); 1219 return -1; 1220 } 1221 1222 /* 1223 * Cross-function jump. 1224 */ 1225 if (insn->func && insn->jump_dest->func && 1226 insn->func != insn->jump_dest->func) { 1227 1228 /* 1229 * For GCC 8+, create parent/child links for any cold 1230 * subfunctions. This is _mostly_ redundant with a 1231 * similar initialization in read_symbols(). 1232 * 1233 * If a function has aliases, we want the *first* such 1234 * function in the symbol table to be the subfunction's 1235 * parent. In that case we overwrite the 1236 * initialization done in read_symbols(). 1237 * 1238 * However this code can't completely replace the 1239 * read_symbols() code because this doesn't detect the 1240 * case where the parent function's only reference to a 1241 * subfunction is through a jump table. 1242 */ 1243 if (!strstr(insn->func->name, ".cold") && 1244 strstr(insn->jump_dest->func->name, ".cold")) { 1245 insn->func->cfunc = insn->jump_dest->func; 1246 insn->jump_dest->func->pfunc = insn->func; 1247 1248 } else if (insn->jump_dest->func->pfunc != insn->func->pfunc && 1249 insn->jump_dest->offset == insn->jump_dest->func->offset) { 1250 /* internal sibling call (without reloc) */ 1251 add_call_dest(file, insn, insn->jump_dest->func, true); 1252 } 1253 } 1254 } 1255 1256 return 0; 1257 } 1258 1259 static struct symbol *find_call_destination(struct section *sec, unsigned long offset) 1260 { 1261 struct symbol *call_dest; 1262 1263 call_dest = find_func_by_offset(sec, offset); 1264 if (!call_dest) 1265 call_dest = find_symbol_by_offset(sec, offset); 1266 1267 return call_dest; 1268 } 1269 1270 /* 1271 * Find the destination instructions for all calls. 1272 */ 1273 static int add_call_destinations(struct objtool_file *file) 1274 { 1275 struct instruction *insn; 1276 unsigned long dest_off; 1277 struct symbol *dest; 1278 struct reloc *reloc; 1279 1280 for_each_insn(file, insn) { 1281 if (insn->type != INSN_CALL) 1282 continue; 1283 1284 reloc = insn_reloc(file, insn); 1285 if (!reloc) { 1286 dest_off = arch_jump_destination(insn); 1287 dest = find_call_destination(insn->sec, dest_off); 1288 1289 add_call_dest(file, insn, dest, false); 1290 1291 if (insn->ignore) 1292 continue; 1293 1294 if (!insn->call_dest) { 1295 WARN_FUNC("unannotated intra-function call", insn->sec, insn->offset); 1296 return -1; 1297 } 1298 1299 if (insn->func && insn->call_dest->type != STT_FUNC) { 1300 WARN_FUNC("unsupported call to non-function", 1301 insn->sec, insn->offset); 1302 return -1; 1303 } 1304 1305 } else if (reloc->sym->type == STT_SECTION) { 1306 dest_off = arch_dest_reloc_offset(reloc->addend); 1307 dest = find_call_destination(reloc->sym->sec, dest_off); 1308 if (!dest) { 1309 WARN_FUNC("can't find call dest symbol at %s+0x%lx", 1310 insn->sec, insn->offset, 1311 reloc->sym->sec->name, 1312 dest_off); 1313 return -1; 1314 } 1315 1316 add_call_dest(file, insn, dest, false); 1317 1318 } else if (reloc->sym->retpoline_thunk) { 1319 add_retpoline_call(file, insn); 1320 1321 } else 1322 add_call_dest(file, insn, reloc->sym, false); 1323 } 1324 1325 return 0; 1326 } 1327 1328 /* 1329 * The .alternatives section requires some extra special care over and above 1330 * other special sections because alternatives are patched in place. 1331 */ 1332 static int handle_group_alt(struct objtool_file *file, 1333 struct special_alt *special_alt, 1334 struct instruction *orig_insn, 1335 struct instruction **new_insn) 1336 { 1337 struct instruction *last_orig_insn, *last_new_insn = NULL, *insn, *nop = NULL; 1338 struct alt_group *orig_alt_group, *new_alt_group; 1339 unsigned long dest_off; 1340 1341 1342 orig_alt_group = malloc(sizeof(*orig_alt_group)); 1343 if (!orig_alt_group) { 1344 WARN("malloc failed"); 1345 return -1; 1346 } 1347 orig_alt_group->cfi = calloc(special_alt->orig_len, 1348 sizeof(struct cfi_state *)); 1349 if (!orig_alt_group->cfi) { 1350 WARN("calloc failed"); 1351 return -1; 1352 } 1353 1354 last_orig_insn = NULL; 1355 insn = orig_insn; 1356 sec_for_each_insn_from(file, insn) { 1357 if (insn->offset >= special_alt->orig_off + special_alt->orig_len) 1358 break; 1359 1360 insn->alt_group = orig_alt_group; 1361 last_orig_insn = insn; 1362 } 1363 orig_alt_group->orig_group = NULL; 1364 orig_alt_group->first_insn = orig_insn; 1365 orig_alt_group->last_insn = last_orig_insn; 1366 1367 1368 new_alt_group = malloc(sizeof(*new_alt_group)); 1369 if (!new_alt_group) { 1370 WARN("malloc failed"); 1371 return -1; 1372 } 1373 1374 if (special_alt->new_len < special_alt->orig_len) { 1375 /* 1376 * Insert a fake nop at the end to make the replacement 1377 * alt_group the same size as the original. This is needed to 1378 * allow propagate_alt_cfi() to do its magic. When the last 1379 * instruction affects the stack, the instruction after it (the 1380 * nop) will propagate the new state to the shared CFI array. 1381 */ 1382 nop = malloc(sizeof(*nop)); 1383 if (!nop) { 1384 WARN("malloc failed"); 1385 return -1; 1386 } 1387 memset(nop, 0, sizeof(*nop)); 1388 INIT_LIST_HEAD(&nop->alts); 1389 INIT_LIST_HEAD(&nop->stack_ops); 1390 1391 nop->sec = special_alt->new_sec; 1392 nop->offset = special_alt->new_off + special_alt->new_len; 1393 nop->len = special_alt->orig_len - special_alt->new_len; 1394 nop->type = INSN_NOP; 1395 nop->func = orig_insn->func; 1396 nop->alt_group = new_alt_group; 1397 nop->ignore = orig_insn->ignore_alts; 1398 } 1399 1400 if (!special_alt->new_len) { 1401 *new_insn = nop; 1402 goto end; 1403 } 1404 1405 insn = *new_insn; 1406 sec_for_each_insn_from(file, insn) { 1407 struct reloc *alt_reloc; 1408 1409 if (insn->offset >= special_alt->new_off + special_alt->new_len) 1410 break; 1411 1412 last_new_insn = insn; 1413 1414 insn->ignore = orig_insn->ignore_alts; 1415 insn->func = orig_insn->func; 1416 insn->alt_group = new_alt_group; 1417 1418 /* 1419 * Since alternative replacement code is copy/pasted by the 1420 * kernel after applying relocations, generally such code can't 1421 * have relative-address relocation references to outside the 1422 * .altinstr_replacement section, unless the arch's 1423 * alternatives code can adjust the relative offsets 1424 * accordingly. 1425 */ 1426 alt_reloc = insn_reloc(file, insn); 1427 if (alt_reloc && 1428 !arch_support_alt_relocation(special_alt, insn, alt_reloc)) { 1429 1430 WARN_FUNC("unsupported relocation in alternatives section", 1431 insn->sec, insn->offset); 1432 return -1; 1433 } 1434 1435 if (!is_static_jump(insn)) 1436 continue; 1437 1438 if (!insn->immediate) 1439 continue; 1440 1441 dest_off = arch_jump_destination(insn); 1442 if (dest_off == special_alt->new_off + special_alt->new_len) 1443 insn->jump_dest = next_insn_same_sec(file, last_orig_insn); 1444 1445 if (!insn->jump_dest) { 1446 WARN_FUNC("can't find alternative jump destination", 1447 insn->sec, insn->offset); 1448 return -1; 1449 } 1450 } 1451 1452 if (!last_new_insn) { 1453 WARN_FUNC("can't find last new alternative instruction", 1454 special_alt->new_sec, special_alt->new_off); 1455 return -1; 1456 } 1457 1458 if (nop) 1459 list_add(&nop->list, &last_new_insn->list); 1460 end: 1461 new_alt_group->orig_group = orig_alt_group; 1462 new_alt_group->first_insn = *new_insn; 1463 new_alt_group->last_insn = nop ? : last_new_insn; 1464 new_alt_group->cfi = orig_alt_group->cfi; 1465 return 0; 1466 } 1467 1468 /* 1469 * A jump table entry can either convert a nop to a jump or a jump to a nop. 1470 * If the original instruction is a jump, make the alt entry an effective nop 1471 * by just skipping the original instruction. 1472 */ 1473 static int handle_jump_alt(struct objtool_file *file, 1474 struct special_alt *special_alt, 1475 struct instruction *orig_insn, 1476 struct instruction **new_insn) 1477 { 1478 if (orig_insn->type != INSN_JUMP_UNCONDITIONAL && 1479 orig_insn->type != INSN_NOP) { 1480 1481 WARN_FUNC("unsupported instruction at jump label", 1482 orig_insn->sec, orig_insn->offset); 1483 return -1; 1484 } 1485 1486 if (special_alt->key_addend & 2) { 1487 struct reloc *reloc = insn_reloc(file, orig_insn); 1488 1489 if (reloc) { 1490 reloc->type = R_NONE; 1491 elf_write_reloc(file->elf, reloc); 1492 } 1493 elf_write_insn(file->elf, orig_insn->sec, 1494 orig_insn->offset, orig_insn->len, 1495 arch_nop_insn(orig_insn->len)); 1496 orig_insn->type = INSN_NOP; 1497 } 1498 1499 if (orig_insn->type == INSN_NOP) { 1500 if (orig_insn->len == 2) 1501 file->jl_nop_short++; 1502 else 1503 file->jl_nop_long++; 1504 1505 return 0; 1506 } 1507 1508 if (orig_insn->len == 2) 1509 file->jl_short++; 1510 else 1511 file->jl_long++; 1512 1513 *new_insn = list_next_entry(orig_insn, list); 1514 return 0; 1515 } 1516 1517 /* 1518 * Read all the special sections which have alternate instructions which can be 1519 * patched in or redirected to at runtime. Each instruction having alternate 1520 * instruction(s) has them added to its insn->alts list, which will be 1521 * traversed in validate_branch(). 1522 */ 1523 static int add_special_section_alts(struct objtool_file *file) 1524 { 1525 struct list_head special_alts; 1526 struct instruction *orig_insn, *new_insn; 1527 struct special_alt *special_alt, *tmp; 1528 struct alternative *alt; 1529 int ret; 1530 1531 ret = special_get_alts(file->elf, &special_alts); 1532 if (ret) 1533 return ret; 1534 1535 list_for_each_entry_safe(special_alt, tmp, &special_alts, list) { 1536 1537 orig_insn = find_insn(file, special_alt->orig_sec, 1538 special_alt->orig_off); 1539 if (!orig_insn) { 1540 WARN_FUNC("special: can't find orig instruction", 1541 special_alt->orig_sec, special_alt->orig_off); 1542 ret = -1; 1543 goto out; 1544 } 1545 1546 new_insn = NULL; 1547 if (!special_alt->group || special_alt->new_len) { 1548 new_insn = find_insn(file, special_alt->new_sec, 1549 special_alt->new_off); 1550 if (!new_insn) { 1551 WARN_FUNC("special: can't find new instruction", 1552 special_alt->new_sec, 1553 special_alt->new_off); 1554 ret = -1; 1555 goto out; 1556 } 1557 } 1558 1559 if (special_alt->group) { 1560 if (!special_alt->orig_len) { 1561 WARN_FUNC("empty alternative entry", 1562 orig_insn->sec, orig_insn->offset); 1563 continue; 1564 } 1565 1566 ret = handle_group_alt(file, special_alt, orig_insn, 1567 &new_insn); 1568 if (ret) 1569 goto out; 1570 } else if (special_alt->jump_or_nop) { 1571 ret = handle_jump_alt(file, special_alt, orig_insn, 1572 &new_insn); 1573 if (ret) 1574 goto out; 1575 } 1576 1577 alt = malloc(sizeof(*alt)); 1578 if (!alt) { 1579 WARN("malloc failed"); 1580 ret = -1; 1581 goto out; 1582 } 1583 1584 alt->insn = new_insn; 1585 alt->skip_orig = special_alt->skip_orig; 1586 orig_insn->ignore_alts |= special_alt->skip_alt; 1587 list_add_tail(&alt->list, &orig_insn->alts); 1588 1589 list_del(&special_alt->list); 1590 free(special_alt); 1591 } 1592 1593 if (stats) { 1594 printf("jl\\\tNOP\tJMP\n"); 1595 printf("short:\t%ld\t%ld\n", file->jl_nop_short, file->jl_short); 1596 printf("long:\t%ld\t%ld\n", file->jl_nop_long, file->jl_long); 1597 } 1598 1599 out: 1600 return ret; 1601 } 1602 1603 static int add_jump_table(struct objtool_file *file, struct instruction *insn, 1604 struct reloc *table) 1605 { 1606 struct reloc *reloc = table; 1607 struct instruction *dest_insn; 1608 struct alternative *alt; 1609 struct symbol *pfunc = insn->func->pfunc; 1610 unsigned int prev_offset = 0; 1611 1612 /* 1613 * Each @reloc is a switch table relocation which points to the target 1614 * instruction. 1615 */ 1616 list_for_each_entry_from(reloc, &table->sec->reloc_list, list) { 1617 1618 /* Check for the end of the table: */ 1619 if (reloc != table && reloc->jump_table_start) 1620 break; 1621 1622 /* Make sure the table entries are consecutive: */ 1623 if (prev_offset && reloc->offset != prev_offset + 8) 1624 break; 1625 1626 /* Detect function pointers from contiguous objects: */ 1627 if (reloc->sym->sec == pfunc->sec && 1628 reloc->addend == pfunc->offset) 1629 break; 1630 1631 dest_insn = find_insn(file, reloc->sym->sec, reloc->addend); 1632 if (!dest_insn) 1633 break; 1634 1635 /* Make sure the destination is in the same function: */ 1636 if (!dest_insn->func || dest_insn->func->pfunc != pfunc) 1637 break; 1638 1639 alt = malloc(sizeof(*alt)); 1640 if (!alt) { 1641 WARN("malloc failed"); 1642 return -1; 1643 } 1644 1645 alt->insn = dest_insn; 1646 list_add_tail(&alt->list, &insn->alts); 1647 prev_offset = reloc->offset; 1648 } 1649 1650 if (!prev_offset) { 1651 WARN_FUNC("can't find switch jump table", 1652 insn->sec, insn->offset); 1653 return -1; 1654 } 1655 1656 return 0; 1657 } 1658 1659 /* 1660 * find_jump_table() - Given a dynamic jump, find the switch jump table 1661 * associated with it. 1662 */ 1663 static struct reloc *find_jump_table(struct objtool_file *file, 1664 struct symbol *func, 1665 struct instruction *insn) 1666 { 1667 struct reloc *table_reloc; 1668 struct instruction *dest_insn, *orig_insn = insn; 1669 1670 /* 1671 * Backward search using the @first_jump_src links, these help avoid 1672 * much of the 'in between' code. Which avoids us getting confused by 1673 * it. 1674 */ 1675 for (; 1676 insn && insn->func && insn->func->pfunc == func; 1677 insn = insn->first_jump_src ?: prev_insn_same_sym(file, insn)) { 1678 1679 if (insn != orig_insn && insn->type == INSN_JUMP_DYNAMIC) 1680 break; 1681 1682 /* allow small jumps within the range */ 1683 if (insn->type == INSN_JUMP_UNCONDITIONAL && 1684 insn->jump_dest && 1685 (insn->jump_dest->offset <= insn->offset || 1686 insn->jump_dest->offset > orig_insn->offset)) 1687 break; 1688 1689 table_reloc = arch_find_switch_table(file, insn); 1690 if (!table_reloc) 1691 continue; 1692 dest_insn = find_insn(file, table_reloc->sym->sec, table_reloc->addend); 1693 if (!dest_insn || !dest_insn->func || dest_insn->func->pfunc != func) 1694 continue; 1695 1696 return table_reloc; 1697 } 1698 1699 return NULL; 1700 } 1701 1702 /* 1703 * First pass: Mark the head of each jump table so that in the next pass, 1704 * we know when a given jump table ends and the next one starts. 1705 */ 1706 static void mark_func_jump_tables(struct objtool_file *file, 1707 struct symbol *func) 1708 { 1709 struct instruction *insn, *last = NULL; 1710 struct reloc *reloc; 1711 1712 func_for_each_insn(file, func, insn) { 1713 if (!last) 1714 last = insn; 1715 1716 /* 1717 * Store back-pointers for unconditional forward jumps such 1718 * that find_jump_table() can back-track using those and 1719 * avoid some potentially confusing code. 1720 */ 1721 if (insn->type == INSN_JUMP_UNCONDITIONAL && insn->jump_dest && 1722 insn->offset > last->offset && 1723 insn->jump_dest->offset > insn->offset && 1724 !insn->jump_dest->first_jump_src) { 1725 1726 insn->jump_dest->first_jump_src = insn; 1727 last = insn->jump_dest; 1728 } 1729 1730 if (insn->type != INSN_JUMP_DYNAMIC) 1731 continue; 1732 1733 reloc = find_jump_table(file, func, insn); 1734 if (reloc) { 1735 reloc->jump_table_start = true; 1736 insn->jump_table = reloc; 1737 } 1738 } 1739 } 1740 1741 static int add_func_jump_tables(struct objtool_file *file, 1742 struct symbol *func) 1743 { 1744 struct instruction *insn; 1745 int ret; 1746 1747 func_for_each_insn(file, func, insn) { 1748 if (!insn->jump_table) 1749 continue; 1750 1751 ret = add_jump_table(file, insn, insn->jump_table); 1752 if (ret) 1753 return ret; 1754 } 1755 1756 return 0; 1757 } 1758 1759 /* 1760 * For some switch statements, gcc generates a jump table in the .rodata 1761 * section which contains a list of addresses within the function to jump to. 1762 * This finds these jump tables and adds them to the insn->alts lists. 1763 */ 1764 static int add_jump_table_alts(struct objtool_file *file) 1765 { 1766 struct section *sec; 1767 struct symbol *func; 1768 int ret; 1769 1770 if (!file->rodata) 1771 return 0; 1772 1773 for_each_sec(file, sec) { 1774 list_for_each_entry(func, &sec->symbol_list, list) { 1775 if (func->type != STT_FUNC) 1776 continue; 1777 1778 mark_func_jump_tables(file, func); 1779 ret = add_func_jump_tables(file, func); 1780 if (ret) 1781 return ret; 1782 } 1783 } 1784 1785 return 0; 1786 } 1787 1788 static void set_func_state(struct cfi_state *state) 1789 { 1790 state->cfa = initial_func_cfi.cfa; 1791 memcpy(&state->regs, &initial_func_cfi.regs, 1792 CFI_NUM_REGS * sizeof(struct cfi_reg)); 1793 state->stack_size = initial_func_cfi.cfa.offset; 1794 } 1795 1796 static int read_unwind_hints(struct objtool_file *file) 1797 { 1798 struct cfi_state cfi = init_cfi; 1799 struct section *sec, *relocsec; 1800 struct unwind_hint *hint; 1801 struct instruction *insn; 1802 struct reloc *reloc; 1803 int i; 1804 1805 sec = find_section_by_name(file->elf, ".discard.unwind_hints"); 1806 if (!sec) 1807 return 0; 1808 1809 relocsec = sec->reloc; 1810 if (!relocsec) { 1811 WARN("missing .rela.discard.unwind_hints section"); 1812 return -1; 1813 } 1814 1815 if (sec->sh.sh_size % sizeof(struct unwind_hint)) { 1816 WARN("struct unwind_hint size mismatch"); 1817 return -1; 1818 } 1819 1820 file->hints = true; 1821 1822 for (i = 0; i < sec->sh.sh_size / sizeof(struct unwind_hint); i++) { 1823 hint = (struct unwind_hint *)sec->data->d_buf + i; 1824 1825 reloc = find_reloc_by_dest(file->elf, sec, i * sizeof(*hint)); 1826 if (!reloc) { 1827 WARN("can't find reloc for unwind_hints[%d]", i); 1828 return -1; 1829 } 1830 1831 insn = find_insn(file, reloc->sym->sec, reloc->addend); 1832 if (!insn) { 1833 WARN("can't find insn for unwind_hints[%d]", i); 1834 return -1; 1835 } 1836 1837 insn->hint = true; 1838 1839 if (hint->type == UNWIND_HINT_TYPE_FUNC) { 1840 insn->cfi = &func_cfi; 1841 continue; 1842 } 1843 1844 if (insn->cfi) 1845 cfi = *(insn->cfi); 1846 1847 if (arch_decode_hint_reg(hint->sp_reg, &cfi.cfa.base)) { 1848 WARN_FUNC("unsupported unwind_hint sp base reg %d", 1849 insn->sec, insn->offset, hint->sp_reg); 1850 return -1; 1851 } 1852 1853 cfi.cfa.offset = bswap_if_needed(hint->sp_offset); 1854 cfi.type = hint->type; 1855 cfi.end = hint->end; 1856 1857 insn->cfi = cfi_hash_find_or_add(&cfi); 1858 } 1859 1860 return 0; 1861 } 1862 1863 static int read_retpoline_hints(struct objtool_file *file) 1864 { 1865 struct section *sec; 1866 struct instruction *insn; 1867 struct reloc *reloc; 1868 1869 sec = find_section_by_name(file->elf, ".rela.discard.retpoline_safe"); 1870 if (!sec) 1871 return 0; 1872 1873 list_for_each_entry(reloc, &sec->reloc_list, list) { 1874 if (reloc->sym->type != STT_SECTION) { 1875 WARN("unexpected relocation symbol type in %s", sec->name); 1876 return -1; 1877 } 1878 1879 insn = find_insn(file, reloc->sym->sec, reloc->addend); 1880 if (!insn) { 1881 WARN("bad .discard.retpoline_safe entry"); 1882 return -1; 1883 } 1884 1885 if (insn->type != INSN_JUMP_DYNAMIC && 1886 insn->type != INSN_CALL_DYNAMIC) { 1887 WARN_FUNC("retpoline_safe hint not an indirect jump/call", 1888 insn->sec, insn->offset); 1889 return -1; 1890 } 1891 1892 insn->retpoline_safe = true; 1893 } 1894 1895 return 0; 1896 } 1897 1898 static int read_instr_hints(struct objtool_file *file) 1899 { 1900 struct section *sec; 1901 struct instruction *insn; 1902 struct reloc *reloc; 1903 1904 sec = find_section_by_name(file->elf, ".rela.discard.instr_end"); 1905 if (!sec) 1906 return 0; 1907 1908 list_for_each_entry(reloc, &sec->reloc_list, list) { 1909 if (reloc->sym->type != STT_SECTION) { 1910 WARN("unexpected relocation symbol type in %s", sec->name); 1911 return -1; 1912 } 1913 1914 insn = find_insn(file, reloc->sym->sec, reloc->addend); 1915 if (!insn) { 1916 WARN("bad .discard.instr_end entry"); 1917 return -1; 1918 } 1919 1920 insn->instr--; 1921 } 1922 1923 sec = find_section_by_name(file->elf, ".rela.discard.instr_begin"); 1924 if (!sec) 1925 return 0; 1926 1927 list_for_each_entry(reloc, &sec->reloc_list, list) { 1928 if (reloc->sym->type != STT_SECTION) { 1929 WARN("unexpected relocation symbol type in %s", sec->name); 1930 return -1; 1931 } 1932 1933 insn = find_insn(file, reloc->sym->sec, reloc->addend); 1934 if (!insn) { 1935 WARN("bad .discard.instr_begin entry"); 1936 return -1; 1937 } 1938 1939 insn->instr++; 1940 } 1941 1942 return 0; 1943 } 1944 1945 static int read_intra_function_calls(struct objtool_file *file) 1946 { 1947 struct instruction *insn; 1948 struct section *sec; 1949 struct reloc *reloc; 1950 1951 sec = find_section_by_name(file->elf, ".rela.discard.intra_function_calls"); 1952 if (!sec) 1953 return 0; 1954 1955 list_for_each_entry(reloc, &sec->reloc_list, list) { 1956 unsigned long dest_off; 1957 1958 if (reloc->sym->type != STT_SECTION) { 1959 WARN("unexpected relocation symbol type in %s", 1960 sec->name); 1961 return -1; 1962 } 1963 1964 insn = find_insn(file, reloc->sym->sec, reloc->addend); 1965 if (!insn) { 1966 WARN("bad .discard.intra_function_call entry"); 1967 return -1; 1968 } 1969 1970 if (insn->type != INSN_CALL) { 1971 WARN_FUNC("intra_function_call not a direct call", 1972 insn->sec, insn->offset); 1973 return -1; 1974 } 1975 1976 /* 1977 * Treat intra-function CALLs as JMPs, but with a stack_op. 1978 * See add_call_destinations(), which strips stack_ops from 1979 * normal CALLs. 1980 */ 1981 insn->type = INSN_JUMP_UNCONDITIONAL; 1982 1983 dest_off = insn->offset + insn->len + insn->immediate; 1984 insn->jump_dest = find_insn(file, insn->sec, dest_off); 1985 if (!insn->jump_dest) { 1986 WARN_FUNC("can't find call dest at %s+0x%lx", 1987 insn->sec, insn->offset, 1988 insn->sec->name, dest_off); 1989 return -1; 1990 } 1991 } 1992 1993 return 0; 1994 } 1995 1996 /* 1997 * Return true if name matches an instrumentation function, where calls to that 1998 * function from noinstr code can safely be removed, but compilers won't do so. 1999 */ 2000 static bool is_profiling_func(const char *name) 2001 { 2002 /* 2003 * Many compilers cannot disable KCOV with a function attribute. 2004 */ 2005 if (!strncmp(name, "__sanitizer_cov_", 16)) 2006 return true; 2007 2008 /* 2009 * Some compilers currently do not remove __tsan_func_entry/exit nor 2010 * __tsan_atomic_signal_fence (used for barrier instrumentation) with 2011 * the __no_sanitize_thread attribute, remove them. Once the kernel's 2012 * minimum Clang version is 14.0, this can be removed. 2013 */ 2014 if (!strncmp(name, "__tsan_func_", 12) || 2015 !strcmp(name, "__tsan_atomic_signal_fence")) 2016 return true; 2017 2018 return false; 2019 } 2020 2021 static int classify_symbols(struct objtool_file *file) 2022 { 2023 struct section *sec; 2024 struct symbol *func; 2025 2026 for_each_sec(file, sec) { 2027 list_for_each_entry(func, &sec->symbol_list, list) { 2028 if (func->bind != STB_GLOBAL) 2029 continue; 2030 2031 if (!strncmp(func->name, STATIC_CALL_TRAMP_PREFIX_STR, 2032 strlen(STATIC_CALL_TRAMP_PREFIX_STR))) 2033 func->static_call_tramp = true; 2034 2035 if (arch_is_retpoline(func)) 2036 func->retpoline_thunk = true; 2037 2038 if (!strcmp(func->name, "__fentry__")) 2039 func->fentry = true; 2040 2041 if (is_profiling_func(func->name)) 2042 func->profiling_func = true; 2043 } 2044 } 2045 2046 return 0; 2047 } 2048 2049 static void mark_rodata(struct objtool_file *file) 2050 { 2051 struct section *sec; 2052 bool found = false; 2053 2054 /* 2055 * Search for the following rodata sections, each of which can 2056 * potentially contain jump tables: 2057 * 2058 * - .rodata: can contain GCC switch tables 2059 * - .rodata.<func>: same, if -fdata-sections is being used 2060 * - .rodata..c_jump_table: contains C annotated jump tables 2061 * 2062 * .rodata.str1.* sections are ignored; they don't contain jump tables. 2063 */ 2064 for_each_sec(file, sec) { 2065 if (!strncmp(sec->name, ".rodata", 7) && 2066 !strstr(sec->name, ".str1.")) { 2067 sec->rodata = true; 2068 found = true; 2069 } 2070 } 2071 2072 file->rodata = found; 2073 } 2074 2075 static int decode_sections(struct objtool_file *file) 2076 { 2077 int ret; 2078 2079 mark_rodata(file); 2080 2081 ret = init_pv_ops(file); 2082 if (ret) 2083 return ret; 2084 2085 ret = decode_instructions(file); 2086 if (ret) 2087 return ret; 2088 2089 ret = add_dead_ends(file); 2090 if (ret) 2091 return ret; 2092 2093 add_ignores(file); 2094 add_uaccess_safe(file); 2095 2096 ret = add_ignore_alternatives(file); 2097 if (ret) 2098 return ret; 2099 2100 /* 2101 * Must be before add_{jump_call}_destination. 2102 */ 2103 ret = classify_symbols(file); 2104 if (ret) 2105 return ret; 2106 2107 /* 2108 * Must be before add_special_section_alts() as that depends on 2109 * jump_dest being set. 2110 */ 2111 ret = add_jump_destinations(file); 2112 if (ret) 2113 return ret; 2114 2115 ret = add_special_section_alts(file); 2116 if (ret) 2117 return ret; 2118 2119 /* 2120 * Must be before add_call_destination(); it changes INSN_CALL to 2121 * INSN_JUMP. 2122 */ 2123 ret = read_intra_function_calls(file); 2124 if (ret) 2125 return ret; 2126 2127 ret = add_call_destinations(file); 2128 if (ret) 2129 return ret; 2130 2131 ret = add_jump_table_alts(file); 2132 if (ret) 2133 return ret; 2134 2135 ret = read_unwind_hints(file); 2136 if (ret) 2137 return ret; 2138 2139 ret = read_retpoline_hints(file); 2140 if (ret) 2141 return ret; 2142 2143 ret = read_instr_hints(file); 2144 if (ret) 2145 return ret; 2146 2147 return 0; 2148 } 2149 2150 static bool is_fentry_call(struct instruction *insn) 2151 { 2152 if (insn->type == INSN_CALL && 2153 insn->call_dest && 2154 insn->call_dest->fentry) 2155 return true; 2156 2157 return false; 2158 } 2159 2160 static bool has_modified_stack_frame(struct instruction *insn, struct insn_state *state) 2161 { 2162 struct cfi_state *cfi = &state->cfi; 2163 int i; 2164 2165 if (cfi->cfa.base != initial_func_cfi.cfa.base || cfi->drap) 2166 return true; 2167 2168 if (cfi->cfa.offset != initial_func_cfi.cfa.offset) 2169 return true; 2170 2171 if (cfi->stack_size != initial_func_cfi.cfa.offset) 2172 return true; 2173 2174 for (i = 0; i < CFI_NUM_REGS; i++) { 2175 if (cfi->regs[i].base != initial_func_cfi.regs[i].base || 2176 cfi->regs[i].offset != initial_func_cfi.regs[i].offset) 2177 return true; 2178 } 2179 2180 return false; 2181 } 2182 2183 static bool check_reg_frame_pos(const struct cfi_reg *reg, 2184 int expected_offset) 2185 { 2186 return reg->base == CFI_CFA && 2187 reg->offset == expected_offset; 2188 } 2189 2190 static bool has_valid_stack_frame(struct insn_state *state) 2191 { 2192 struct cfi_state *cfi = &state->cfi; 2193 2194 if (cfi->cfa.base == CFI_BP && 2195 check_reg_frame_pos(&cfi->regs[CFI_BP], -cfi->cfa.offset) && 2196 check_reg_frame_pos(&cfi->regs[CFI_RA], -cfi->cfa.offset + 8)) 2197 return true; 2198 2199 if (cfi->drap && cfi->regs[CFI_BP].base == CFI_BP) 2200 return true; 2201 2202 return false; 2203 } 2204 2205 static int update_cfi_state_regs(struct instruction *insn, 2206 struct cfi_state *cfi, 2207 struct stack_op *op) 2208 { 2209 struct cfi_reg *cfa = &cfi->cfa; 2210 2211 if (cfa->base != CFI_SP && cfa->base != CFI_SP_INDIRECT) 2212 return 0; 2213 2214 /* push */ 2215 if (op->dest.type == OP_DEST_PUSH || op->dest.type == OP_DEST_PUSHF) 2216 cfa->offset += 8; 2217 2218 /* pop */ 2219 if (op->src.type == OP_SRC_POP || op->src.type == OP_SRC_POPF) 2220 cfa->offset -= 8; 2221 2222 /* add immediate to sp */ 2223 if (op->dest.type == OP_DEST_REG && op->src.type == OP_SRC_ADD && 2224 op->dest.reg == CFI_SP && op->src.reg == CFI_SP) 2225 cfa->offset -= op->src.offset; 2226 2227 return 0; 2228 } 2229 2230 static void save_reg(struct cfi_state *cfi, unsigned char reg, int base, int offset) 2231 { 2232 if (arch_callee_saved_reg(reg) && 2233 cfi->regs[reg].base == CFI_UNDEFINED) { 2234 cfi->regs[reg].base = base; 2235 cfi->regs[reg].offset = offset; 2236 } 2237 } 2238 2239 static void restore_reg(struct cfi_state *cfi, unsigned char reg) 2240 { 2241 cfi->regs[reg].base = initial_func_cfi.regs[reg].base; 2242 cfi->regs[reg].offset = initial_func_cfi.regs[reg].offset; 2243 } 2244 2245 /* 2246 * A note about DRAP stack alignment: 2247 * 2248 * GCC has the concept of a DRAP register, which is used to help keep track of 2249 * the stack pointer when aligning the stack. r10 or r13 is used as the DRAP 2250 * register. The typical DRAP pattern is: 2251 * 2252 * 4c 8d 54 24 08 lea 0x8(%rsp),%r10 2253 * 48 83 e4 c0 and $0xffffffffffffffc0,%rsp 2254 * 41 ff 72 f8 pushq -0x8(%r10) 2255 * 55 push %rbp 2256 * 48 89 e5 mov %rsp,%rbp 2257 * (more pushes) 2258 * 41 52 push %r10 2259 * ... 2260 * 41 5a pop %r10 2261 * (more pops) 2262 * 5d pop %rbp 2263 * 49 8d 62 f8 lea -0x8(%r10),%rsp 2264 * c3 retq 2265 * 2266 * There are some variations in the epilogues, like: 2267 * 2268 * 5b pop %rbx 2269 * 41 5a pop %r10 2270 * 41 5c pop %r12 2271 * 41 5d pop %r13 2272 * 41 5e pop %r14 2273 * c9 leaveq 2274 * 49 8d 62 f8 lea -0x8(%r10),%rsp 2275 * c3 retq 2276 * 2277 * and: 2278 * 2279 * 4c 8b 55 e8 mov -0x18(%rbp),%r10 2280 * 48 8b 5d e0 mov -0x20(%rbp),%rbx 2281 * 4c 8b 65 f0 mov -0x10(%rbp),%r12 2282 * 4c 8b 6d f8 mov -0x8(%rbp),%r13 2283 * c9 leaveq 2284 * 49 8d 62 f8 lea -0x8(%r10),%rsp 2285 * c3 retq 2286 * 2287 * Sometimes r13 is used as the DRAP register, in which case it's saved and 2288 * restored beforehand: 2289 * 2290 * 41 55 push %r13 2291 * 4c 8d 6c 24 10 lea 0x10(%rsp),%r13 2292 * 48 83 e4 f0 and $0xfffffffffffffff0,%rsp 2293 * ... 2294 * 49 8d 65 f0 lea -0x10(%r13),%rsp 2295 * 41 5d pop %r13 2296 * c3 retq 2297 */ 2298 static int update_cfi_state(struct instruction *insn, 2299 struct instruction *next_insn, 2300 struct cfi_state *cfi, struct stack_op *op) 2301 { 2302 struct cfi_reg *cfa = &cfi->cfa; 2303 struct cfi_reg *regs = cfi->regs; 2304 2305 /* stack operations don't make sense with an undefined CFA */ 2306 if (cfa->base == CFI_UNDEFINED) { 2307 if (insn->func) { 2308 WARN_FUNC("undefined stack state", insn->sec, insn->offset); 2309 return -1; 2310 } 2311 return 0; 2312 } 2313 2314 if (cfi->type == UNWIND_HINT_TYPE_REGS || 2315 cfi->type == UNWIND_HINT_TYPE_REGS_PARTIAL) 2316 return update_cfi_state_regs(insn, cfi, op); 2317 2318 switch (op->dest.type) { 2319 2320 case OP_DEST_REG: 2321 switch (op->src.type) { 2322 2323 case OP_SRC_REG: 2324 if (op->src.reg == CFI_SP && op->dest.reg == CFI_BP && 2325 cfa->base == CFI_SP && 2326 check_reg_frame_pos(®s[CFI_BP], -cfa->offset)) { 2327 2328 /* mov %rsp, %rbp */ 2329 cfa->base = op->dest.reg; 2330 cfi->bp_scratch = false; 2331 } 2332 2333 else if (op->src.reg == CFI_SP && 2334 op->dest.reg == CFI_BP && cfi->drap) { 2335 2336 /* drap: mov %rsp, %rbp */ 2337 regs[CFI_BP].base = CFI_BP; 2338 regs[CFI_BP].offset = -cfi->stack_size; 2339 cfi->bp_scratch = false; 2340 } 2341 2342 else if (op->src.reg == CFI_SP && cfa->base == CFI_SP) { 2343 2344 /* 2345 * mov %rsp, %reg 2346 * 2347 * This is needed for the rare case where GCC 2348 * does: 2349 * 2350 * mov %rsp, %rax 2351 * ... 2352 * mov %rax, %rsp 2353 */ 2354 cfi->vals[op->dest.reg].base = CFI_CFA; 2355 cfi->vals[op->dest.reg].offset = -cfi->stack_size; 2356 } 2357 2358 else if (op->src.reg == CFI_BP && op->dest.reg == CFI_SP && 2359 (cfa->base == CFI_BP || cfa->base == cfi->drap_reg)) { 2360 2361 /* 2362 * mov %rbp, %rsp 2363 * 2364 * Restore the original stack pointer (Clang). 2365 */ 2366 cfi->stack_size = -cfi->regs[CFI_BP].offset; 2367 } 2368 2369 else if (op->dest.reg == cfa->base) { 2370 2371 /* mov %reg, %rsp */ 2372 if (cfa->base == CFI_SP && 2373 cfi->vals[op->src.reg].base == CFI_CFA) { 2374 2375 /* 2376 * This is needed for the rare case 2377 * where GCC does something dumb like: 2378 * 2379 * lea 0x8(%rsp), %rcx 2380 * ... 2381 * mov %rcx, %rsp 2382 */ 2383 cfa->offset = -cfi->vals[op->src.reg].offset; 2384 cfi->stack_size = cfa->offset; 2385 2386 } else if (cfa->base == CFI_SP && 2387 cfi->vals[op->src.reg].base == CFI_SP_INDIRECT && 2388 cfi->vals[op->src.reg].offset == cfa->offset) { 2389 2390 /* 2391 * Stack swizzle: 2392 * 2393 * 1: mov %rsp, (%[tos]) 2394 * 2: mov %[tos], %rsp 2395 * ... 2396 * 3: pop %rsp 2397 * 2398 * Where: 2399 * 2400 * 1 - places a pointer to the previous 2401 * stack at the Top-of-Stack of the 2402 * new stack. 2403 * 2404 * 2 - switches to the new stack. 2405 * 2406 * 3 - pops the Top-of-Stack to restore 2407 * the original stack. 2408 * 2409 * Note: we set base to SP_INDIRECT 2410 * here and preserve offset. Therefore 2411 * when the unwinder reaches ToS it 2412 * will dereference SP and then add the 2413 * offset to find the next frame, IOW: 2414 * (%rsp) + offset. 2415 */ 2416 cfa->base = CFI_SP_INDIRECT; 2417 2418 } else { 2419 cfa->base = CFI_UNDEFINED; 2420 cfa->offset = 0; 2421 } 2422 } 2423 2424 else if (op->dest.reg == CFI_SP && 2425 cfi->vals[op->src.reg].base == CFI_SP_INDIRECT && 2426 cfi->vals[op->src.reg].offset == cfa->offset) { 2427 2428 /* 2429 * The same stack swizzle case 2) as above. But 2430 * because we can't change cfa->base, case 3) 2431 * will become a regular POP. Pretend we're a 2432 * PUSH so things don't go unbalanced. 2433 */ 2434 cfi->stack_size += 8; 2435 } 2436 2437 2438 break; 2439 2440 case OP_SRC_ADD: 2441 if (op->dest.reg == CFI_SP && op->src.reg == CFI_SP) { 2442 2443 /* add imm, %rsp */ 2444 cfi->stack_size -= op->src.offset; 2445 if (cfa->base == CFI_SP) 2446 cfa->offset -= op->src.offset; 2447 break; 2448 } 2449 2450 if (op->dest.reg == CFI_SP && op->src.reg == CFI_BP) { 2451 2452 /* lea disp(%rbp), %rsp */ 2453 cfi->stack_size = -(op->src.offset + regs[CFI_BP].offset); 2454 break; 2455 } 2456 2457 if (!cfi->drap && op->src.reg == CFI_SP && 2458 op->dest.reg == CFI_BP && cfa->base == CFI_SP && 2459 check_reg_frame_pos(®s[CFI_BP], -cfa->offset + op->src.offset)) { 2460 2461 /* lea disp(%rsp), %rbp */ 2462 cfa->base = CFI_BP; 2463 cfa->offset -= op->src.offset; 2464 cfi->bp_scratch = false; 2465 break; 2466 } 2467 2468 if (op->src.reg == CFI_SP && cfa->base == CFI_SP) { 2469 2470 /* drap: lea disp(%rsp), %drap */ 2471 cfi->drap_reg = op->dest.reg; 2472 2473 /* 2474 * lea disp(%rsp), %reg 2475 * 2476 * This is needed for the rare case where GCC 2477 * does something dumb like: 2478 * 2479 * lea 0x8(%rsp), %rcx 2480 * ... 2481 * mov %rcx, %rsp 2482 */ 2483 cfi->vals[op->dest.reg].base = CFI_CFA; 2484 cfi->vals[op->dest.reg].offset = \ 2485 -cfi->stack_size + op->src.offset; 2486 2487 break; 2488 } 2489 2490 if (cfi->drap && op->dest.reg == CFI_SP && 2491 op->src.reg == cfi->drap_reg) { 2492 2493 /* drap: lea disp(%drap), %rsp */ 2494 cfa->base = CFI_SP; 2495 cfa->offset = cfi->stack_size = -op->src.offset; 2496 cfi->drap_reg = CFI_UNDEFINED; 2497 cfi->drap = false; 2498 break; 2499 } 2500 2501 if (op->dest.reg == cfi->cfa.base && !(next_insn && next_insn->hint)) { 2502 WARN_FUNC("unsupported stack register modification", 2503 insn->sec, insn->offset); 2504 return -1; 2505 } 2506 2507 break; 2508 2509 case OP_SRC_AND: 2510 if (op->dest.reg != CFI_SP || 2511 (cfi->drap_reg != CFI_UNDEFINED && cfa->base != CFI_SP) || 2512 (cfi->drap_reg == CFI_UNDEFINED && cfa->base != CFI_BP)) { 2513 WARN_FUNC("unsupported stack pointer realignment", 2514 insn->sec, insn->offset); 2515 return -1; 2516 } 2517 2518 if (cfi->drap_reg != CFI_UNDEFINED) { 2519 /* drap: and imm, %rsp */ 2520 cfa->base = cfi->drap_reg; 2521 cfa->offset = cfi->stack_size = 0; 2522 cfi->drap = true; 2523 } 2524 2525 /* 2526 * Older versions of GCC (4.8ish) realign the stack 2527 * without DRAP, with a frame pointer. 2528 */ 2529 2530 break; 2531 2532 case OP_SRC_POP: 2533 case OP_SRC_POPF: 2534 if (op->dest.reg == CFI_SP && cfa->base == CFI_SP_INDIRECT) { 2535 2536 /* pop %rsp; # restore from a stack swizzle */ 2537 cfa->base = CFI_SP; 2538 break; 2539 } 2540 2541 if (!cfi->drap && op->dest.reg == cfa->base) { 2542 2543 /* pop %rbp */ 2544 cfa->base = CFI_SP; 2545 } 2546 2547 if (cfi->drap && cfa->base == CFI_BP_INDIRECT && 2548 op->dest.reg == cfi->drap_reg && 2549 cfi->drap_offset == -cfi->stack_size) { 2550 2551 /* drap: pop %drap */ 2552 cfa->base = cfi->drap_reg; 2553 cfa->offset = 0; 2554 cfi->drap_offset = -1; 2555 2556 } else if (cfi->stack_size == -regs[op->dest.reg].offset) { 2557 2558 /* pop %reg */ 2559 restore_reg(cfi, op->dest.reg); 2560 } 2561 2562 cfi->stack_size -= 8; 2563 if (cfa->base == CFI_SP) 2564 cfa->offset -= 8; 2565 2566 break; 2567 2568 case OP_SRC_REG_INDIRECT: 2569 if (!cfi->drap && op->dest.reg == cfa->base && 2570 op->dest.reg == CFI_BP) { 2571 2572 /* mov disp(%rsp), %rbp */ 2573 cfa->base = CFI_SP; 2574 cfa->offset = cfi->stack_size; 2575 } 2576 2577 if (cfi->drap && op->src.reg == CFI_BP && 2578 op->src.offset == cfi->drap_offset) { 2579 2580 /* drap: mov disp(%rbp), %drap */ 2581 cfa->base = cfi->drap_reg; 2582 cfa->offset = 0; 2583 cfi->drap_offset = -1; 2584 } 2585 2586 if (cfi->drap && op->src.reg == CFI_BP && 2587 op->src.offset == regs[op->dest.reg].offset) { 2588 2589 /* drap: mov disp(%rbp), %reg */ 2590 restore_reg(cfi, op->dest.reg); 2591 2592 } else if (op->src.reg == cfa->base && 2593 op->src.offset == regs[op->dest.reg].offset + cfa->offset) { 2594 2595 /* mov disp(%rbp), %reg */ 2596 /* mov disp(%rsp), %reg */ 2597 restore_reg(cfi, op->dest.reg); 2598 2599 } else if (op->src.reg == CFI_SP && 2600 op->src.offset == regs[op->dest.reg].offset + cfi->stack_size) { 2601 2602 /* mov disp(%rsp), %reg */ 2603 restore_reg(cfi, op->dest.reg); 2604 } 2605 2606 break; 2607 2608 default: 2609 WARN_FUNC("unknown stack-related instruction", 2610 insn->sec, insn->offset); 2611 return -1; 2612 } 2613 2614 break; 2615 2616 case OP_DEST_PUSH: 2617 case OP_DEST_PUSHF: 2618 cfi->stack_size += 8; 2619 if (cfa->base == CFI_SP) 2620 cfa->offset += 8; 2621 2622 if (op->src.type != OP_SRC_REG) 2623 break; 2624 2625 if (cfi->drap) { 2626 if (op->src.reg == cfa->base && op->src.reg == cfi->drap_reg) { 2627 2628 /* drap: push %drap */ 2629 cfa->base = CFI_BP_INDIRECT; 2630 cfa->offset = -cfi->stack_size; 2631 2632 /* save drap so we know when to restore it */ 2633 cfi->drap_offset = -cfi->stack_size; 2634 2635 } else if (op->src.reg == CFI_BP && cfa->base == cfi->drap_reg) { 2636 2637 /* drap: push %rbp */ 2638 cfi->stack_size = 0; 2639 2640 } else { 2641 2642 /* drap: push %reg */ 2643 save_reg(cfi, op->src.reg, CFI_BP, -cfi->stack_size); 2644 } 2645 2646 } else { 2647 2648 /* push %reg */ 2649 save_reg(cfi, op->src.reg, CFI_CFA, -cfi->stack_size); 2650 } 2651 2652 /* detect when asm code uses rbp as a scratch register */ 2653 if (!no_fp && insn->func && op->src.reg == CFI_BP && 2654 cfa->base != CFI_BP) 2655 cfi->bp_scratch = true; 2656 break; 2657 2658 case OP_DEST_REG_INDIRECT: 2659 2660 if (cfi->drap) { 2661 if (op->src.reg == cfa->base && op->src.reg == cfi->drap_reg) { 2662 2663 /* drap: mov %drap, disp(%rbp) */ 2664 cfa->base = CFI_BP_INDIRECT; 2665 cfa->offset = op->dest.offset; 2666 2667 /* save drap offset so we know when to restore it */ 2668 cfi->drap_offset = op->dest.offset; 2669 } else { 2670 2671 /* drap: mov reg, disp(%rbp) */ 2672 save_reg(cfi, op->src.reg, CFI_BP, op->dest.offset); 2673 } 2674 2675 } else if (op->dest.reg == cfa->base) { 2676 2677 /* mov reg, disp(%rbp) */ 2678 /* mov reg, disp(%rsp) */ 2679 save_reg(cfi, op->src.reg, CFI_CFA, 2680 op->dest.offset - cfi->cfa.offset); 2681 2682 } else if (op->dest.reg == CFI_SP) { 2683 2684 /* mov reg, disp(%rsp) */ 2685 save_reg(cfi, op->src.reg, CFI_CFA, 2686 op->dest.offset - cfi->stack_size); 2687 2688 } else if (op->src.reg == CFI_SP && op->dest.offset == 0) { 2689 2690 /* mov %rsp, (%reg); # setup a stack swizzle. */ 2691 cfi->vals[op->dest.reg].base = CFI_SP_INDIRECT; 2692 cfi->vals[op->dest.reg].offset = cfa->offset; 2693 } 2694 2695 break; 2696 2697 case OP_DEST_MEM: 2698 if (op->src.type != OP_SRC_POP && op->src.type != OP_SRC_POPF) { 2699 WARN_FUNC("unknown stack-related memory operation", 2700 insn->sec, insn->offset); 2701 return -1; 2702 } 2703 2704 /* pop mem */ 2705 cfi->stack_size -= 8; 2706 if (cfa->base == CFI_SP) 2707 cfa->offset -= 8; 2708 2709 break; 2710 2711 default: 2712 WARN_FUNC("unknown stack-related instruction", 2713 insn->sec, insn->offset); 2714 return -1; 2715 } 2716 2717 return 0; 2718 } 2719 2720 /* 2721 * The stack layouts of alternatives instructions can sometimes diverge when 2722 * they have stack modifications. That's fine as long as the potential stack 2723 * layouts don't conflict at any given potential instruction boundary. 2724 * 2725 * Flatten the CFIs of the different alternative code streams (both original 2726 * and replacement) into a single shared CFI array which can be used to detect 2727 * conflicts and nicely feed a linear array of ORC entries to the unwinder. 2728 */ 2729 static int propagate_alt_cfi(struct objtool_file *file, struct instruction *insn) 2730 { 2731 struct cfi_state **alt_cfi; 2732 int group_off; 2733 2734 if (!insn->alt_group) 2735 return 0; 2736 2737 if (!insn->cfi) { 2738 WARN("CFI missing"); 2739 return -1; 2740 } 2741 2742 alt_cfi = insn->alt_group->cfi; 2743 group_off = insn->offset - insn->alt_group->first_insn->offset; 2744 2745 if (!alt_cfi[group_off]) { 2746 alt_cfi[group_off] = insn->cfi; 2747 } else { 2748 if (cficmp(alt_cfi[group_off], insn->cfi)) { 2749 WARN_FUNC("stack layout conflict in alternatives", 2750 insn->sec, insn->offset); 2751 return -1; 2752 } 2753 } 2754 2755 return 0; 2756 } 2757 2758 static int handle_insn_ops(struct instruction *insn, 2759 struct instruction *next_insn, 2760 struct insn_state *state) 2761 { 2762 struct stack_op *op; 2763 2764 list_for_each_entry(op, &insn->stack_ops, list) { 2765 2766 if (update_cfi_state(insn, next_insn, &state->cfi, op)) 2767 return 1; 2768 2769 if (!insn->alt_group) 2770 continue; 2771 2772 if (op->dest.type == OP_DEST_PUSHF) { 2773 if (!state->uaccess_stack) { 2774 state->uaccess_stack = 1; 2775 } else if (state->uaccess_stack >> 31) { 2776 WARN_FUNC("PUSHF stack exhausted", 2777 insn->sec, insn->offset); 2778 return 1; 2779 } 2780 state->uaccess_stack <<= 1; 2781 state->uaccess_stack |= state->uaccess; 2782 } 2783 2784 if (op->src.type == OP_SRC_POPF) { 2785 if (state->uaccess_stack) { 2786 state->uaccess = state->uaccess_stack & 1; 2787 state->uaccess_stack >>= 1; 2788 if (state->uaccess_stack == 1) 2789 state->uaccess_stack = 0; 2790 } 2791 } 2792 } 2793 2794 return 0; 2795 } 2796 2797 static bool insn_cfi_match(struct instruction *insn, struct cfi_state *cfi2) 2798 { 2799 struct cfi_state *cfi1 = insn->cfi; 2800 int i; 2801 2802 if (!cfi1) { 2803 WARN("CFI missing"); 2804 return false; 2805 } 2806 2807 if (memcmp(&cfi1->cfa, &cfi2->cfa, sizeof(cfi1->cfa))) { 2808 2809 WARN_FUNC("stack state mismatch: cfa1=%d%+d cfa2=%d%+d", 2810 insn->sec, insn->offset, 2811 cfi1->cfa.base, cfi1->cfa.offset, 2812 cfi2->cfa.base, cfi2->cfa.offset); 2813 2814 } else if (memcmp(&cfi1->regs, &cfi2->regs, sizeof(cfi1->regs))) { 2815 for (i = 0; i < CFI_NUM_REGS; i++) { 2816 if (!memcmp(&cfi1->regs[i], &cfi2->regs[i], 2817 sizeof(struct cfi_reg))) 2818 continue; 2819 2820 WARN_FUNC("stack state mismatch: reg1[%d]=%d%+d reg2[%d]=%d%+d", 2821 insn->sec, insn->offset, 2822 i, cfi1->regs[i].base, cfi1->regs[i].offset, 2823 i, cfi2->regs[i].base, cfi2->regs[i].offset); 2824 break; 2825 } 2826 2827 } else if (cfi1->type != cfi2->type) { 2828 2829 WARN_FUNC("stack state mismatch: type1=%d type2=%d", 2830 insn->sec, insn->offset, cfi1->type, cfi2->type); 2831 2832 } else if (cfi1->drap != cfi2->drap || 2833 (cfi1->drap && cfi1->drap_reg != cfi2->drap_reg) || 2834 (cfi1->drap && cfi1->drap_offset != cfi2->drap_offset)) { 2835 2836 WARN_FUNC("stack state mismatch: drap1=%d(%d,%d) drap2=%d(%d,%d)", 2837 insn->sec, insn->offset, 2838 cfi1->drap, cfi1->drap_reg, cfi1->drap_offset, 2839 cfi2->drap, cfi2->drap_reg, cfi2->drap_offset); 2840 2841 } else 2842 return true; 2843 2844 return false; 2845 } 2846 2847 static inline bool func_uaccess_safe(struct symbol *func) 2848 { 2849 if (func) 2850 return func->uaccess_safe; 2851 2852 return false; 2853 } 2854 2855 static inline const char *call_dest_name(struct instruction *insn) 2856 { 2857 static char pvname[19]; 2858 struct reloc *rel; 2859 int idx; 2860 2861 if (insn->call_dest) 2862 return insn->call_dest->name; 2863 2864 rel = insn_reloc(NULL, insn); 2865 if (rel && !strcmp(rel->sym->name, "pv_ops")) { 2866 idx = (rel->addend / sizeof(void *)); 2867 snprintf(pvname, sizeof(pvname), "pv_ops[%d]", idx); 2868 return pvname; 2869 } 2870 2871 return "{dynamic}"; 2872 } 2873 2874 static bool pv_call_dest(struct objtool_file *file, struct instruction *insn) 2875 { 2876 struct symbol *target; 2877 struct reloc *rel; 2878 int idx; 2879 2880 rel = insn_reloc(file, insn); 2881 if (!rel || strcmp(rel->sym->name, "pv_ops")) 2882 return false; 2883 2884 idx = (arch_dest_reloc_offset(rel->addend) / sizeof(void *)); 2885 2886 if (file->pv_ops[idx].clean) 2887 return true; 2888 2889 file->pv_ops[idx].clean = true; 2890 2891 list_for_each_entry(target, &file->pv_ops[idx].targets, pv_target) { 2892 if (!target->sec->noinstr) { 2893 WARN("pv_ops[%d]: %s", idx, target->name); 2894 file->pv_ops[idx].clean = false; 2895 } 2896 } 2897 2898 return file->pv_ops[idx].clean; 2899 } 2900 2901 static inline bool noinstr_call_dest(struct objtool_file *file, 2902 struct instruction *insn, 2903 struct symbol *func) 2904 { 2905 /* 2906 * We can't deal with indirect function calls at present; 2907 * assume they're instrumented. 2908 */ 2909 if (!func) { 2910 if (file->pv_ops) 2911 return pv_call_dest(file, insn); 2912 2913 return false; 2914 } 2915 2916 /* 2917 * If the symbol is from a noinstr section; we good. 2918 */ 2919 if (func->sec->noinstr) 2920 return true; 2921 2922 /* 2923 * The __ubsan_handle_*() calls are like WARN(), they only happen when 2924 * something 'BAD' happened. At the risk of taking the machine down, 2925 * let them proceed to get the message out. 2926 */ 2927 if (!strncmp(func->name, "__ubsan_handle_", 15)) 2928 return true; 2929 2930 return false; 2931 } 2932 2933 static int validate_call(struct objtool_file *file, 2934 struct instruction *insn, 2935 struct insn_state *state) 2936 { 2937 if (state->noinstr && state->instr <= 0 && 2938 !noinstr_call_dest(file, insn, insn->call_dest)) { 2939 WARN_FUNC("call to %s() leaves .noinstr.text section", 2940 insn->sec, insn->offset, call_dest_name(insn)); 2941 return 1; 2942 } 2943 2944 if (state->uaccess && !func_uaccess_safe(insn->call_dest)) { 2945 WARN_FUNC("call to %s() with UACCESS enabled", 2946 insn->sec, insn->offset, call_dest_name(insn)); 2947 return 1; 2948 } 2949 2950 if (state->df) { 2951 WARN_FUNC("call to %s() with DF set", 2952 insn->sec, insn->offset, call_dest_name(insn)); 2953 return 1; 2954 } 2955 2956 return 0; 2957 } 2958 2959 static int validate_sibling_call(struct objtool_file *file, 2960 struct instruction *insn, 2961 struct insn_state *state) 2962 { 2963 if (has_modified_stack_frame(insn, state)) { 2964 WARN_FUNC("sibling call from callable instruction with modified stack frame", 2965 insn->sec, insn->offset); 2966 return 1; 2967 } 2968 2969 return validate_call(file, insn, state); 2970 } 2971 2972 static int validate_return(struct symbol *func, struct instruction *insn, struct insn_state *state) 2973 { 2974 if (state->noinstr && state->instr > 0) { 2975 WARN_FUNC("return with instrumentation enabled", 2976 insn->sec, insn->offset); 2977 return 1; 2978 } 2979 2980 if (state->uaccess && !func_uaccess_safe(func)) { 2981 WARN_FUNC("return with UACCESS enabled", 2982 insn->sec, insn->offset); 2983 return 1; 2984 } 2985 2986 if (!state->uaccess && func_uaccess_safe(func)) { 2987 WARN_FUNC("return with UACCESS disabled from a UACCESS-safe function", 2988 insn->sec, insn->offset); 2989 return 1; 2990 } 2991 2992 if (state->df) { 2993 WARN_FUNC("return with DF set", 2994 insn->sec, insn->offset); 2995 return 1; 2996 } 2997 2998 if (func && has_modified_stack_frame(insn, state)) { 2999 WARN_FUNC("return with modified stack frame", 3000 insn->sec, insn->offset); 3001 return 1; 3002 } 3003 3004 if (state->cfi.bp_scratch) { 3005 WARN_FUNC("BP used as a scratch register", 3006 insn->sec, insn->offset); 3007 return 1; 3008 } 3009 3010 return 0; 3011 } 3012 3013 static struct instruction *next_insn_to_validate(struct objtool_file *file, 3014 struct instruction *insn) 3015 { 3016 struct alt_group *alt_group = insn->alt_group; 3017 3018 /* 3019 * Simulate the fact that alternatives are patched in-place. When the 3020 * end of a replacement alt_group is reached, redirect objtool flow to 3021 * the end of the original alt_group. 3022 */ 3023 if (alt_group && insn == alt_group->last_insn && alt_group->orig_group) 3024 return next_insn_same_sec(file, alt_group->orig_group->last_insn); 3025 3026 return next_insn_same_sec(file, insn); 3027 } 3028 3029 /* 3030 * Follow the branch starting at the given instruction, and recursively follow 3031 * any other branches (jumps). Meanwhile, track the frame pointer state at 3032 * each instruction and validate all the rules described in 3033 * tools/objtool/Documentation/stack-validation.txt. 3034 */ 3035 static int validate_branch(struct objtool_file *file, struct symbol *func, 3036 struct instruction *insn, struct insn_state state) 3037 { 3038 struct alternative *alt; 3039 struct instruction *next_insn, *prev_insn = NULL; 3040 struct section *sec; 3041 u8 visited; 3042 int ret; 3043 3044 sec = insn->sec; 3045 3046 while (1) { 3047 next_insn = next_insn_to_validate(file, insn); 3048 3049 if (file->c_file && func && insn->func && func != insn->func->pfunc) { 3050 WARN("%s() falls through to next function %s()", 3051 func->name, insn->func->name); 3052 return 1; 3053 } 3054 3055 if (func && insn->ignore) { 3056 WARN_FUNC("BUG: why am I validating an ignored function?", 3057 sec, insn->offset); 3058 return 1; 3059 } 3060 3061 visited = 1 << state.uaccess; 3062 if (insn->visited) { 3063 if (!insn->hint && !insn_cfi_match(insn, &state.cfi)) 3064 return 1; 3065 3066 if (insn->visited & visited) 3067 return 0; 3068 } else { 3069 nr_insns_visited++; 3070 } 3071 3072 if (state.noinstr) 3073 state.instr += insn->instr; 3074 3075 if (insn->hint) { 3076 state.cfi = *insn->cfi; 3077 } else { 3078 /* XXX track if we actually changed state.cfi */ 3079 3080 if (prev_insn && !cficmp(prev_insn->cfi, &state.cfi)) { 3081 insn->cfi = prev_insn->cfi; 3082 nr_cfi_reused++; 3083 } else { 3084 insn->cfi = cfi_hash_find_or_add(&state.cfi); 3085 } 3086 } 3087 3088 insn->visited |= visited; 3089 3090 if (propagate_alt_cfi(file, insn)) 3091 return 1; 3092 3093 if (!insn->ignore_alts && !list_empty(&insn->alts)) { 3094 bool skip_orig = false; 3095 3096 list_for_each_entry(alt, &insn->alts, list) { 3097 if (alt->skip_orig) 3098 skip_orig = true; 3099 3100 ret = validate_branch(file, func, alt->insn, state); 3101 if (ret) { 3102 if (backtrace) 3103 BT_FUNC("(alt)", insn); 3104 return ret; 3105 } 3106 } 3107 3108 if (skip_orig) 3109 return 0; 3110 } 3111 3112 if (handle_insn_ops(insn, next_insn, &state)) 3113 return 1; 3114 3115 switch (insn->type) { 3116 3117 case INSN_RETURN: 3118 if (next_insn && next_insn->type == INSN_TRAP) { 3119 next_insn->ignore = true; 3120 } else if (sls && !insn->retpoline_safe) { 3121 WARN_FUNC("missing int3 after ret", 3122 insn->sec, insn->offset); 3123 } 3124 return validate_return(func, insn, &state); 3125 3126 case INSN_CALL: 3127 case INSN_CALL_DYNAMIC: 3128 ret = validate_call(file, insn, &state); 3129 if (ret) 3130 return ret; 3131 3132 if (!no_fp && func && !is_fentry_call(insn) && 3133 !has_valid_stack_frame(&state)) { 3134 WARN_FUNC("call without frame pointer save/setup", 3135 sec, insn->offset); 3136 return 1; 3137 } 3138 3139 if (dead_end_function(file, insn->call_dest)) 3140 return 0; 3141 3142 break; 3143 3144 case INSN_JUMP_CONDITIONAL: 3145 case INSN_JUMP_UNCONDITIONAL: 3146 if (is_sibling_call(insn)) { 3147 ret = validate_sibling_call(file, insn, &state); 3148 if (ret) 3149 return ret; 3150 3151 } else if (insn->jump_dest) { 3152 ret = validate_branch(file, func, 3153 insn->jump_dest, state); 3154 if (ret) { 3155 if (backtrace) 3156 BT_FUNC("(branch)", insn); 3157 return ret; 3158 } 3159 } 3160 3161 if (insn->type == INSN_JUMP_UNCONDITIONAL) 3162 return 0; 3163 3164 break; 3165 3166 case INSN_JUMP_DYNAMIC: 3167 if (next_insn && next_insn->type == INSN_TRAP) { 3168 next_insn->ignore = true; 3169 } else if (sls && !insn->retpoline_safe) { 3170 WARN_FUNC("missing int3 after indirect jump", 3171 insn->sec, insn->offset); 3172 } 3173 3174 /* fallthrough */ 3175 case INSN_JUMP_DYNAMIC_CONDITIONAL: 3176 if (is_sibling_call(insn)) { 3177 ret = validate_sibling_call(file, insn, &state); 3178 if (ret) 3179 return ret; 3180 } 3181 3182 if (insn->type == INSN_JUMP_DYNAMIC) 3183 return 0; 3184 3185 break; 3186 3187 case INSN_CONTEXT_SWITCH: 3188 if (func && (!next_insn || !next_insn->hint)) { 3189 WARN_FUNC("unsupported instruction in callable function", 3190 sec, insn->offset); 3191 return 1; 3192 } 3193 return 0; 3194 3195 case INSN_STAC: 3196 if (state.uaccess) { 3197 WARN_FUNC("recursive UACCESS enable", sec, insn->offset); 3198 return 1; 3199 } 3200 3201 state.uaccess = true; 3202 break; 3203 3204 case INSN_CLAC: 3205 if (!state.uaccess && func) { 3206 WARN_FUNC("redundant UACCESS disable", sec, insn->offset); 3207 return 1; 3208 } 3209 3210 if (func_uaccess_safe(func) && !state.uaccess_stack) { 3211 WARN_FUNC("UACCESS-safe disables UACCESS", sec, insn->offset); 3212 return 1; 3213 } 3214 3215 state.uaccess = false; 3216 break; 3217 3218 case INSN_STD: 3219 if (state.df) { 3220 WARN_FUNC("recursive STD", sec, insn->offset); 3221 return 1; 3222 } 3223 3224 state.df = true; 3225 break; 3226 3227 case INSN_CLD: 3228 if (!state.df && func) { 3229 WARN_FUNC("redundant CLD", sec, insn->offset); 3230 return 1; 3231 } 3232 3233 state.df = false; 3234 break; 3235 3236 default: 3237 break; 3238 } 3239 3240 if (insn->dead_end) 3241 return 0; 3242 3243 if (!next_insn) { 3244 if (state.cfi.cfa.base == CFI_UNDEFINED) 3245 return 0; 3246 WARN("%s: unexpected end of section", sec->name); 3247 return 1; 3248 } 3249 3250 prev_insn = insn; 3251 insn = next_insn; 3252 } 3253 3254 return 0; 3255 } 3256 3257 static int validate_unwind_hints(struct objtool_file *file, struct section *sec) 3258 { 3259 struct instruction *insn; 3260 struct insn_state state; 3261 int ret, warnings = 0; 3262 3263 if (!file->hints) 3264 return 0; 3265 3266 init_insn_state(&state, sec); 3267 3268 if (sec) { 3269 insn = find_insn(file, sec, 0); 3270 if (!insn) 3271 return 0; 3272 } else { 3273 insn = list_first_entry(&file->insn_list, typeof(*insn), list); 3274 } 3275 3276 while (&insn->list != &file->insn_list && (!sec || insn->sec == sec)) { 3277 if (insn->hint && !insn->visited && !insn->ignore) { 3278 ret = validate_branch(file, insn->func, insn, state); 3279 if (ret && backtrace) 3280 BT_FUNC("<=== (hint)", insn); 3281 warnings += ret; 3282 } 3283 3284 insn = list_next_entry(insn, list); 3285 } 3286 3287 return warnings; 3288 } 3289 3290 static int validate_retpoline(struct objtool_file *file) 3291 { 3292 struct instruction *insn; 3293 int warnings = 0; 3294 3295 for_each_insn(file, insn) { 3296 if (insn->type != INSN_JUMP_DYNAMIC && 3297 insn->type != INSN_CALL_DYNAMIC) 3298 continue; 3299 3300 if (insn->retpoline_safe) 3301 continue; 3302 3303 /* 3304 * .init.text code is ran before userspace and thus doesn't 3305 * strictly need retpolines, except for modules which are 3306 * loaded late, they very much do need retpoline in their 3307 * .init.text 3308 */ 3309 if (!strcmp(insn->sec->name, ".init.text") && !module) 3310 continue; 3311 3312 WARN_FUNC("indirect %s found in RETPOLINE build", 3313 insn->sec, insn->offset, 3314 insn->type == INSN_JUMP_DYNAMIC ? "jump" : "call"); 3315 3316 warnings++; 3317 } 3318 3319 return warnings; 3320 } 3321 3322 static bool is_kasan_insn(struct instruction *insn) 3323 { 3324 return (insn->type == INSN_CALL && 3325 !strcmp(insn->call_dest->name, "__asan_handle_no_return")); 3326 } 3327 3328 static bool is_ubsan_insn(struct instruction *insn) 3329 { 3330 return (insn->type == INSN_CALL && 3331 !strcmp(insn->call_dest->name, 3332 "__ubsan_handle_builtin_unreachable")); 3333 } 3334 3335 static bool ignore_unreachable_insn(struct objtool_file *file, struct instruction *insn) 3336 { 3337 int i; 3338 struct instruction *prev_insn; 3339 3340 if (insn->ignore || insn->type == INSN_NOP) 3341 return true; 3342 3343 /* 3344 * Ignore alternative replacement instructions. This can happen 3345 * when a whitelisted function uses one of the ALTERNATIVE macros. 3346 */ 3347 if (!strcmp(insn->sec->name, ".altinstr_replacement") || 3348 !strcmp(insn->sec->name, ".altinstr_aux")) 3349 return true; 3350 3351 if (!insn->func) 3352 return false; 3353 3354 if (insn->func->static_call_tramp) 3355 return true; 3356 3357 /* 3358 * CONFIG_UBSAN_TRAP inserts a UD2 when it sees 3359 * __builtin_unreachable(). The BUG() macro has an unreachable() after 3360 * the UD2, which causes GCC's undefined trap logic to emit another UD2 3361 * (or occasionally a JMP to UD2). 3362 * 3363 * It may also insert a UD2 after calling a __noreturn function. 3364 */ 3365 prev_insn = list_prev_entry(insn, list); 3366 if ((prev_insn->dead_end || dead_end_function(file, prev_insn->call_dest)) && 3367 (insn->type == INSN_BUG || 3368 (insn->type == INSN_JUMP_UNCONDITIONAL && 3369 insn->jump_dest && insn->jump_dest->type == INSN_BUG))) 3370 return true; 3371 3372 /* 3373 * Check if this (or a subsequent) instruction is related to 3374 * CONFIG_UBSAN or CONFIG_KASAN. 3375 * 3376 * End the search at 5 instructions to avoid going into the weeds. 3377 */ 3378 for (i = 0; i < 5; i++) { 3379 3380 if (is_kasan_insn(insn) || is_ubsan_insn(insn)) 3381 return true; 3382 3383 if (insn->type == INSN_JUMP_UNCONDITIONAL) { 3384 if (insn->jump_dest && 3385 insn->jump_dest->func == insn->func) { 3386 insn = insn->jump_dest; 3387 continue; 3388 } 3389 3390 break; 3391 } 3392 3393 if (insn->offset + insn->len >= insn->func->offset + insn->func->len) 3394 break; 3395 3396 insn = list_next_entry(insn, list); 3397 } 3398 3399 return false; 3400 } 3401 3402 static int validate_symbol(struct objtool_file *file, struct section *sec, 3403 struct symbol *sym, struct insn_state *state) 3404 { 3405 struct instruction *insn; 3406 int ret; 3407 3408 if (!sym->len) { 3409 WARN("%s() is missing an ELF size annotation", sym->name); 3410 return 1; 3411 } 3412 3413 if (sym->pfunc != sym || sym->alias != sym) 3414 return 0; 3415 3416 insn = find_insn(file, sec, sym->offset); 3417 if (!insn || insn->ignore || insn->visited) 3418 return 0; 3419 3420 state->uaccess = sym->uaccess_safe; 3421 3422 ret = validate_branch(file, insn->func, insn, *state); 3423 if (ret && backtrace) 3424 BT_FUNC("<=== (sym)", insn); 3425 return ret; 3426 } 3427 3428 static int validate_section(struct objtool_file *file, struct section *sec) 3429 { 3430 struct insn_state state; 3431 struct symbol *func; 3432 int warnings = 0; 3433 3434 list_for_each_entry(func, &sec->symbol_list, list) { 3435 if (func->type != STT_FUNC) 3436 continue; 3437 3438 init_insn_state(&state, sec); 3439 set_func_state(&state.cfi); 3440 3441 warnings += validate_symbol(file, sec, func, &state); 3442 } 3443 3444 return warnings; 3445 } 3446 3447 static int validate_vmlinux_functions(struct objtool_file *file) 3448 { 3449 struct section *sec; 3450 int warnings = 0; 3451 3452 sec = find_section_by_name(file->elf, ".noinstr.text"); 3453 if (sec) { 3454 warnings += validate_section(file, sec); 3455 warnings += validate_unwind_hints(file, sec); 3456 } 3457 3458 sec = find_section_by_name(file->elf, ".entry.text"); 3459 if (sec) { 3460 warnings += validate_section(file, sec); 3461 warnings += validate_unwind_hints(file, sec); 3462 } 3463 3464 return warnings; 3465 } 3466 3467 static int validate_functions(struct objtool_file *file) 3468 { 3469 struct section *sec; 3470 int warnings = 0; 3471 3472 for_each_sec(file, sec) { 3473 if (!(sec->sh.sh_flags & SHF_EXECINSTR)) 3474 continue; 3475 3476 warnings += validate_section(file, sec); 3477 } 3478 3479 return warnings; 3480 } 3481 3482 static int validate_reachable_instructions(struct objtool_file *file) 3483 { 3484 struct instruction *insn; 3485 3486 if (file->ignore_unreachables) 3487 return 0; 3488 3489 for_each_insn(file, insn) { 3490 if (insn->visited || ignore_unreachable_insn(file, insn)) 3491 continue; 3492 3493 WARN_FUNC("unreachable instruction", insn->sec, insn->offset); 3494 return 1; 3495 } 3496 3497 return 0; 3498 } 3499 3500 int check(struct objtool_file *file) 3501 { 3502 int ret, warnings = 0; 3503 3504 arch_initial_func_cfi_state(&initial_func_cfi); 3505 init_cfi_state(&init_cfi); 3506 init_cfi_state(&func_cfi); 3507 set_func_state(&func_cfi); 3508 3509 if (!cfi_hash_alloc(1UL << (file->elf->symbol_bits - 3))) 3510 goto out; 3511 3512 cfi_hash_add(&init_cfi); 3513 cfi_hash_add(&func_cfi); 3514 3515 ret = decode_sections(file); 3516 if (ret < 0) 3517 goto out; 3518 3519 warnings += ret; 3520 3521 if (list_empty(&file->insn_list)) 3522 goto out; 3523 3524 if (vmlinux && !validate_dup) { 3525 ret = validate_vmlinux_functions(file); 3526 if (ret < 0) 3527 goto out; 3528 3529 warnings += ret; 3530 goto out; 3531 } 3532 3533 if (retpoline) { 3534 ret = validate_retpoline(file); 3535 if (ret < 0) 3536 return ret; 3537 warnings += ret; 3538 } 3539 3540 ret = validate_functions(file); 3541 if (ret < 0) 3542 goto out; 3543 warnings += ret; 3544 3545 ret = validate_unwind_hints(file, NULL); 3546 if (ret < 0) 3547 goto out; 3548 warnings += ret; 3549 3550 if (!warnings) { 3551 ret = validate_reachable_instructions(file); 3552 if (ret < 0) 3553 goto out; 3554 warnings += ret; 3555 } 3556 3557 ret = create_static_call_sections(file); 3558 if (ret < 0) 3559 goto out; 3560 warnings += ret; 3561 3562 if (retpoline) { 3563 ret = create_retpoline_sites_sections(file); 3564 if (ret < 0) 3565 goto out; 3566 warnings += ret; 3567 } 3568 3569 if (mcount) { 3570 ret = create_mcount_loc_sections(file); 3571 if (ret < 0) 3572 goto out; 3573 warnings += ret; 3574 } 3575 3576 if (stats) { 3577 printf("nr_insns_visited: %ld\n", nr_insns_visited); 3578 printf("nr_cfi: %ld\n", nr_cfi); 3579 printf("nr_cfi_reused: %ld\n", nr_cfi_reused); 3580 printf("nr_cfi_cache: %ld\n", nr_cfi_cache); 3581 } 3582 3583 out: 3584 /* 3585 * For now, don't fail the kernel build on fatal warnings. These 3586 * errors are still fairly common due to the growing matrix of 3587 * supported toolchains and their recent pace of change. 3588 */ 3589 return 0; 3590 } 3591