1 #include <dirent.h> 2 #include <errno.h> 3 #include <stdlib.h> 4 #include <stdio.h> 5 #include <string.h> 6 #include <sys/types.h> 7 #include <sys/stat.h> 8 #include <sys/param.h> 9 #include <fcntl.h> 10 #include <unistd.h> 11 #include <inttypes.h> 12 #include "build-id.h" 13 #include "util.h" 14 #include "debug.h" 15 #include "machine.h" 16 #include "symbol.h" 17 #include "strlist.h" 18 #include "header.h" 19 20 #include <elf.h> 21 #include <limits.h> 22 #include <symbol/kallsyms.h> 23 #include <sys/utsname.h> 24 25 static int dso__load_kernel_sym(struct dso *dso, struct map *map, 26 symbol_filter_t filter); 27 static int dso__load_guest_kernel_sym(struct dso *dso, struct map *map, 28 symbol_filter_t filter); 29 int vmlinux_path__nr_entries; 30 char **vmlinux_path; 31 32 struct symbol_conf symbol_conf = { 33 .use_modules = true, 34 .try_vmlinux_path = true, 35 .annotate_src = true, 36 .demangle = true, 37 .demangle_kernel = false, 38 .cumulate_callchain = true, 39 .show_hist_headers = true, 40 .symfs = "", 41 }; 42 43 static enum dso_binary_type binary_type_symtab[] = { 44 DSO_BINARY_TYPE__KALLSYMS, 45 DSO_BINARY_TYPE__GUEST_KALLSYMS, 46 DSO_BINARY_TYPE__JAVA_JIT, 47 DSO_BINARY_TYPE__DEBUGLINK, 48 DSO_BINARY_TYPE__BUILD_ID_CACHE, 49 DSO_BINARY_TYPE__FEDORA_DEBUGINFO, 50 DSO_BINARY_TYPE__UBUNTU_DEBUGINFO, 51 DSO_BINARY_TYPE__BUILDID_DEBUGINFO, 52 DSO_BINARY_TYPE__SYSTEM_PATH_DSO, 53 DSO_BINARY_TYPE__GUEST_KMODULE, 54 DSO_BINARY_TYPE__GUEST_KMODULE_COMP, 55 DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE, 56 DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE_COMP, 57 DSO_BINARY_TYPE__OPENEMBEDDED_DEBUGINFO, 58 DSO_BINARY_TYPE__NOT_FOUND, 59 }; 60 61 #define DSO_BINARY_TYPE__SYMTAB_CNT ARRAY_SIZE(binary_type_symtab) 62 63 bool symbol_type__is_a(char symbol_type, enum map_type map_type) 64 { 65 symbol_type = toupper(symbol_type); 66 67 switch (map_type) { 68 case MAP__FUNCTION: 69 return symbol_type == 'T' || symbol_type == 'W'; 70 case MAP__VARIABLE: 71 return symbol_type == 'D'; 72 default: 73 return false; 74 } 75 } 76 77 static int prefix_underscores_count(const char *str) 78 { 79 const char *tail = str; 80 81 while (*tail == '_') 82 tail++; 83 84 return tail - str; 85 } 86 87 #define SYMBOL_A 0 88 #define SYMBOL_B 1 89 90 static int choose_best_symbol(struct symbol *syma, struct symbol *symb) 91 { 92 s64 a; 93 s64 b; 94 size_t na, nb; 95 96 /* Prefer a symbol with non zero length */ 97 a = syma->end - syma->start; 98 b = symb->end - symb->start; 99 if ((b == 0) && (a > 0)) 100 return SYMBOL_A; 101 else if ((a == 0) && (b > 0)) 102 return SYMBOL_B; 103 104 /* Prefer a non weak symbol over a weak one */ 105 a = syma->binding == STB_WEAK; 106 b = symb->binding == STB_WEAK; 107 if (b && !a) 108 return SYMBOL_A; 109 if (a && !b) 110 return SYMBOL_B; 111 112 /* Prefer a global symbol over a non global one */ 113 a = syma->binding == STB_GLOBAL; 114 b = symb->binding == STB_GLOBAL; 115 if (a && !b) 116 return SYMBOL_A; 117 if (b && !a) 118 return SYMBOL_B; 119 120 /* Prefer a symbol with less underscores */ 121 a = prefix_underscores_count(syma->name); 122 b = prefix_underscores_count(symb->name); 123 if (b > a) 124 return SYMBOL_A; 125 else if (a > b) 126 return SYMBOL_B; 127 128 /* Choose the symbol with the longest name */ 129 na = strlen(syma->name); 130 nb = strlen(symb->name); 131 if (na > nb) 132 return SYMBOL_A; 133 else if (na < nb) 134 return SYMBOL_B; 135 136 /* Avoid "SyS" kernel syscall aliases */ 137 if (na >= 3 && !strncmp(syma->name, "SyS", 3)) 138 return SYMBOL_B; 139 if (na >= 10 && !strncmp(syma->name, "compat_SyS", 10)) 140 return SYMBOL_B; 141 142 return SYMBOL_A; 143 } 144 145 void symbols__fixup_duplicate(struct rb_root *symbols) 146 { 147 struct rb_node *nd; 148 struct symbol *curr, *next; 149 150 nd = rb_first(symbols); 151 152 while (nd) { 153 curr = rb_entry(nd, struct symbol, rb_node); 154 again: 155 nd = rb_next(&curr->rb_node); 156 next = rb_entry(nd, struct symbol, rb_node); 157 158 if (!nd) 159 break; 160 161 if (curr->start != next->start) 162 continue; 163 164 if (choose_best_symbol(curr, next) == SYMBOL_A) { 165 rb_erase(&next->rb_node, symbols); 166 symbol__delete(next); 167 goto again; 168 } else { 169 nd = rb_next(&curr->rb_node); 170 rb_erase(&curr->rb_node, symbols); 171 symbol__delete(curr); 172 } 173 } 174 } 175 176 void symbols__fixup_end(struct rb_root *symbols) 177 { 178 struct rb_node *nd, *prevnd = rb_first(symbols); 179 struct symbol *curr, *prev; 180 181 if (prevnd == NULL) 182 return; 183 184 curr = rb_entry(prevnd, struct symbol, rb_node); 185 186 for (nd = rb_next(prevnd); nd; nd = rb_next(nd)) { 187 prev = curr; 188 curr = rb_entry(nd, struct symbol, rb_node); 189 190 if (prev->end == prev->start && prev->end != curr->start) 191 prev->end = curr->start; 192 } 193 194 /* Last entry */ 195 if (curr->end == curr->start) 196 curr->end = roundup(curr->start, 4096); 197 } 198 199 void __map_groups__fixup_end(struct map_groups *mg, enum map_type type) 200 { 201 struct map *prev, *curr; 202 struct rb_node *nd, *prevnd = rb_first(&mg->maps[type]); 203 204 if (prevnd == NULL) 205 return; 206 207 curr = rb_entry(prevnd, struct map, rb_node); 208 209 for (nd = rb_next(prevnd); nd; nd = rb_next(nd)) { 210 prev = curr; 211 curr = rb_entry(nd, struct map, rb_node); 212 prev->end = curr->start; 213 } 214 215 /* 216 * We still haven't the actual symbols, so guess the 217 * last map final address. 218 */ 219 curr->end = ~0ULL; 220 } 221 222 struct symbol *symbol__new(u64 start, u64 len, u8 binding, const char *name) 223 { 224 size_t namelen = strlen(name) + 1; 225 struct symbol *sym = calloc(1, (symbol_conf.priv_size + 226 sizeof(*sym) + namelen)); 227 if (sym == NULL) 228 return NULL; 229 230 if (symbol_conf.priv_size) 231 sym = ((void *)sym) + symbol_conf.priv_size; 232 233 sym->start = start; 234 sym->end = len ? start + len : start; 235 sym->binding = binding; 236 sym->namelen = namelen - 1; 237 238 pr_debug4("%s: %s %#" PRIx64 "-%#" PRIx64 "\n", 239 __func__, name, start, sym->end); 240 memcpy(sym->name, name, namelen); 241 242 return sym; 243 } 244 245 void symbol__delete(struct symbol *sym) 246 { 247 free(((void *)sym) - symbol_conf.priv_size); 248 } 249 250 size_t symbol__fprintf(struct symbol *sym, FILE *fp) 251 { 252 return fprintf(fp, " %" PRIx64 "-%" PRIx64 " %c %s\n", 253 sym->start, sym->end, 254 sym->binding == STB_GLOBAL ? 'g' : 255 sym->binding == STB_LOCAL ? 'l' : 'w', 256 sym->name); 257 } 258 259 size_t symbol__fprintf_symname_offs(const struct symbol *sym, 260 const struct addr_location *al, FILE *fp) 261 { 262 unsigned long offset; 263 size_t length; 264 265 if (sym && sym->name) { 266 length = fprintf(fp, "%s", sym->name); 267 if (al) { 268 if (al->addr < sym->end) 269 offset = al->addr - sym->start; 270 else 271 offset = al->addr - al->map->start - sym->start; 272 length += fprintf(fp, "+0x%lx", offset); 273 } 274 return length; 275 } else 276 return fprintf(fp, "[unknown]"); 277 } 278 279 size_t symbol__fprintf_symname(const struct symbol *sym, FILE *fp) 280 { 281 return symbol__fprintf_symname_offs(sym, NULL, fp); 282 } 283 284 void symbols__delete(struct rb_root *symbols) 285 { 286 struct symbol *pos; 287 struct rb_node *next = rb_first(symbols); 288 289 while (next) { 290 pos = rb_entry(next, struct symbol, rb_node); 291 next = rb_next(&pos->rb_node); 292 rb_erase(&pos->rb_node, symbols); 293 symbol__delete(pos); 294 } 295 } 296 297 void symbols__insert(struct rb_root *symbols, struct symbol *sym) 298 { 299 struct rb_node **p = &symbols->rb_node; 300 struct rb_node *parent = NULL; 301 const u64 ip = sym->start; 302 struct symbol *s; 303 304 while (*p != NULL) { 305 parent = *p; 306 s = rb_entry(parent, struct symbol, rb_node); 307 if (ip < s->start) 308 p = &(*p)->rb_left; 309 else 310 p = &(*p)->rb_right; 311 } 312 rb_link_node(&sym->rb_node, parent, p); 313 rb_insert_color(&sym->rb_node, symbols); 314 } 315 316 static struct symbol *symbols__find(struct rb_root *symbols, u64 ip) 317 { 318 struct rb_node *n; 319 320 if (symbols == NULL) 321 return NULL; 322 323 n = symbols->rb_node; 324 325 while (n) { 326 struct symbol *s = rb_entry(n, struct symbol, rb_node); 327 328 if (ip < s->start) 329 n = n->rb_left; 330 else if (ip >= s->end) 331 n = n->rb_right; 332 else 333 return s; 334 } 335 336 return NULL; 337 } 338 339 static struct symbol *symbols__first(struct rb_root *symbols) 340 { 341 struct rb_node *n = rb_first(symbols); 342 343 if (n) 344 return rb_entry(n, struct symbol, rb_node); 345 346 return NULL; 347 } 348 349 static struct symbol *symbols__next(struct symbol *sym) 350 { 351 struct rb_node *n = rb_next(&sym->rb_node); 352 353 if (n) 354 return rb_entry(n, struct symbol, rb_node); 355 356 return NULL; 357 } 358 359 struct symbol_name_rb_node { 360 struct rb_node rb_node; 361 struct symbol sym; 362 }; 363 364 static void symbols__insert_by_name(struct rb_root *symbols, struct symbol *sym) 365 { 366 struct rb_node **p = &symbols->rb_node; 367 struct rb_node *parent = NULL; 368 struct symbol_name_rb_node *symn, *s; 369 370 symn = container_of(sym, struct symbol_name_rb_node, sym); 371 372 while (*p != NULL) { 373 parent = *p; 374 s = rb_entry(parent, struct symbol_name_rb_node, rb_node); 375 if (strcmp(sym->name, s->sym.name) < 0) 376 p = &(*p)->rb_left; 377 else 378 p = &(*p)->rb_right; 379 } 380 rb_link_node(&symn->rb_node, parent, p); 381 rb_insert_color(&symn->rb_node, symbols); 382 } 383 384 static void symbols__sort_by_name(struct rb_root *symbols, 385 struct rb_root *source) 386 { 387 struct rb_node *nd; 388 389 for (nd = rb_first(source); nd; nd = rb_next(nd)) { 390 struct symbol *pos = rb_entry(nd, struct symbol, rb_node); 391 symbols__insert_by_name(symbols, pos); 392 } 393 } 394 395 static struct symbol *symbols__find_by_name(struct rb_root *symbols, 396 const char *name) 397 { 398 struct rb_node *n; 399 struct symbol_name_rb_node *s; 400 401 if (symbols == NULL) 402 return NULL; 403 404 n = symbols->rb_node; 405 406 while (n) { 407 int cmp; 408 409 s = rb_entry(n, struct symbol_name_rb_node, rb_node); 410 cmp = strcmp(name, s->sym.name); 411 412 if (cmp < 0) 413 n = n->rb_left; 414 else if (cmp > 0) 415 n = n->rb_right; 416 else 417 break; 418 } 419 420 if (n == NULL) 421 return NULL; 422 423 /* return first symbol that has same name (if any) */ 424 for (n = rb_prev(n); n; n = rb_prev(n)) { 425 struct symbol_name_rb_node *tmp; 426 427 tmp = rb_entry(n, struct symbol_name_rb_node, rb_node); 428 if (strcmp(tmp->sym.name, s->sym.name)) 429 break; 430 431 s = tmp; 432 } 433 434 return &s->sym; 435 } 436 437 struct symbol *dso__find_symbol(struct dso *dso, 438 enum map_type type, u64 addr) 439 { 440 return symbols__find(&dso->symbols[type], addr); 441 } 442 443 struct symbol *dso__first_symbol(struct dso *dso, enum map_type type) 444 { 445 return symbols__first(&dso->symbols[type]); 446 } 447 448 struct symbol *dso__next_symbol(struct symbol *sym) 449 { 450 return symbols__next(sym); 451 } 452 453 struct symbol *symbol__next_by_name(struct symbol *sym) 454 { 455 struct symbol_name_rb_node *s = container_of(sym, struct symbol_name_rb_node, sym); 456 struct rb_node *n = rb_next(&s->rb_node); 457 458 return n ? &rb_entry(n, struct symbol_name_rb_node, rb_node)->sym : NULL; 459 } 460 461 /* 462 * Teturns first symbol that matched with @name. 463 */ 464 struct symbol *dso__find_symbol_by_name(struct dso *dso, enum map_type type, 465 const char *name) 466 { 467 return symbols__find_by_name(&dso->symbol_names[type], name); 468 } 469 470 void dso__sort_by_name(struct dso *dso, enum map_type type) 471 { 472 dso__set_sorted_by_name(dso, type); 473 return symbols__sort_by_name(&dso->symbol_names[type], 474 &dso->symbols[type]); 475 } 476 477 size_t dso__fprintf_symbols_by_name(struct dso *dso, 478 enum map_type type, FILE *fp) 479 { 480 size_t ret = 0; 481 struct rb_node *nd; 482 struct symbol_name_rb_node *pos; 483 484 for (nd = rb_first(&dso->symbol_names[type]); nd; nd = rb_next(nd)) { 485 pos = rb_entry(nd, struct symbol_name_rb_node, rb_node); 486 fprintf(fp, "%s\n", pos->sym.name); 487 } 488 489 return ret; 490 } 491 492 int modules__parse(const char *filename, void *arg, 493 int (*process_module)(void *arg, const char *name, 494 u64 start)) 495 { 496 char *line = NULL; 497 size_t n; 498 FILE *file; 499 int err = 0; 500 501 file = fopen(filename, "r"); 502 if (file == NULL) 503 return -1; 504 505 while (1) { 506 char name[PATH_MAX]; 507 u64 start; 508 char *sep; 509 ssize_t line_len; 510 511 line_len = getline(&line, &n, file); 512 if (line_len < 0) { 513 if (feof(file)) 514 break; 515 err = -1; 516 goto out; 517 } 518 519 if (!line) { 520 err = -1; 521 goto out; 522 } 523 524 line[--line_len] = '\0'; /* \n */ 525 526 sep = strrchr(line, 'x'); 527 if (sep == NULL) 528 continue; 529 530 hex2u64(sep + 1, &start); 531 532 sep = strchr(line, ' '); 533 if (sep == NULL) 534 continue; 535 536 *sep = '\0'; 537 538 scnprintf(name, sizeof(name), "[%s]", line); 539 540 err = process_module(arg, name, start); 541 if (err) 542 break; 543 } 544 out: 545 free(line); 546 fclose(file); 547 return err; 548 } 549 550 struct process_kallsyms_args { 551 struct map *map; 552 struct dso *dso; 553 }; 554 555 /* 556 * These are symbols in the kernel image, so make sure that 557 * sym is from a kernel DSO. 558 */ 559 bool symbol__is_idle(struct symbol *sym) 560 { 561 const char * const idle_symbols[] = { 562 "cpu_idle", 563 "cpu_startup_entry", 564 "intel_idle", 565 "default_idle", 566 "native_safe_halt", 567 "enter_idle", 568 "exit_idle", 569 "mwait_idle", 570 "mwait_idle_with_hints", 571 "poll_idle", 572 "ppc64_runlatch_off", 573 "pseries_dedicated_idle_sleep", 574 NULL 575 }; 576 577 int i; 578 579 if (!sym) 580 return false; 581 582 for (i = 0; idle_symbols[i]; i++) { 583 if (!strcmp(idle_symbols[i], sym->name)) 584 return true; 585 } 586 587 return false; 588 } 589 590 static int map__process_kallsym_symbol(void *arg, const char *name, 591 char type, u64 start) 592 { 593 struct symbol *sym; 594 struct process_kallsyms_args *a = arg; 595 struct rb_root *root = &a->dso->symbols[a->map->type]; 596 597 if (!symbol_type__is_a(type, a->map->type)) 598 return 0; 599 600 /* 601 * module symbols are not sorted so we add all 602 * symbols, setting length to 0, and rely on 603 * symbols__fixup_end() to fix it up. 604 */ 605 sym = symbol__new(start, 0, kallsyms2elf_type(type), name); 606 if (sym == NULL) 607 return -ENOMEM; 608 /* 609 * We will pass the symbols to the filter later, in 610 * map__split_kallsyms, when we have split the maps per module 611 */ 612 symbols__insert(root, sym); 613 614 return 0; 615 } 616 617 /* 618 * Loads the function entries in /proc/kallsyms into kernel_map->dso, 619 * so that we can in the next step set the symbol ->end address and then 620 * call kernel_maps__split_kallsyms. 621 */ 622 static int dso__load_all_kallsyms(struct dso *dso, const char *filename, 623 struct map *map) 624 { 625 struct process_kallsyms_args args = { .map = map, .dso = dso, }; 626 return kallsyms__parse(filename, &args, map__process_kallsym_symbol); 627 } 628 629 static int dso__split_kallsyms_for_kcore(struct dso *dso, struct map *map, 630 symbol_filter_t filter) 631 { 632 struct map_groups *kmaps = map__kmap(map)->kmaps; 633 struct map *curr_map; 634 struct symbol *pos; 635 int count = 0, moved = 0; 636 struct rb_root *root = &dso->symbols[map->type]; 637 struct rb_node *next = rb_first(root); 638 639 while (next) { 640 char *module; 641 642 pos = rb_entry(next, struct symbol, rb_node); 643 next = rb_next(&pos->rb_node); 644 645 module = strchr(pos->name, '\t'); 646 if (module) 647 *module = '\0'; 648 649 curr_map = map_groups__find(kmaps, map->type, pos->start); 650 651 if (!curr_map || (filter && filter(curr_map, pos))) { 652 rb_erase(&pos->rb_node, root); 653 symbol__delete(pos); 654 } else { 655 pos->start -= curr_map->start - curr_map->pgoff; 656 if (pos->end) 657 pos->end -= curr_map->start - curr_map->pgoff; 658 if (curr_map != map) { 659 rb_erase(&pos->rb_node, root); 660 symbols__insert( 661 &curr_map->dso->symbols[curr_map->type], 662 pos); 663 ++moved; 664 } else { 665 ++count; 666 } 667 } 668 } 669 670 /* Symbols have been adjusted */ 671 dso->adjust_symbols = 1; 672 673 return count + moved; 674 } 675 676 /* 677 * Split the symbols into maps, making sure there are no overlaps, i.e. the 678 * kernel range is broken in several maps, named [kernel].N, as we don't have 679 * the original ELF section names vmlinux have. 680 */ 681 static int dso__split_kallsyms(struct dso *dso, struct map *map, u64 delta, 682 symbol_filter_t filter) 683 { 684 struct map_groups *kmaps = map__kmap(map)->kmaps; 685 struct machine *machine = kmaps->machine; 686 struct map *curr_map = map; 687 struct symbol *pos; 688 int count = 0, moved = 0; 689 struct rb_root *root = &dso->symbols[map->type]; 690 struct rb_node *next = rb_first(root); 691 int kernel_range = 0; 692 693 while (next) { 694 char *module; 695 696 pos = rb_entry(next, struct symbol, rb_node); 697 next = rb_next(&pos->rb_node); 698 699 module = strchr(pos->name, '\t'); 700 if (module) { 701 if (!symbol_conf.use_modules) 702 goto discard_symbol; 703 704 *module++ = '\0'; 705 706 if (strcmp(curr_map->dso->short_name, module)) { 707 if (curr_map != map && 708 dso->kernel == DSO_TYPE_GUEST_KERNEL && 709 machine__is_default_guest(machine)) { 710 /* 711 * We assume all symbols of a module are 712 * continuous in * kallsyms, so curr_map 713 * points to a module and all its 714 * symbols are in its kmap. Mark it as 715 * loaded. 716 */ 717 dso__set_loaded(curr_map->dso, 718 curr_map->type); 719 } 720 721 curr_map = map_groups__find_by_name(kmaps, 722 map->type, module); 723 if (curr_map == NULL) { 724 pr_debug("%s/proc/{kallsyms,modules} " 725 "inconsistency while looking " 726 "for \"%s\" module!\n", 727 machine->root_dir, module); 728 curr_map = map; 729 goto discard_symbol; 730 } 731 732 if (curr_map->dso->loaded && 733 !machine__is_default_guest(machine)) 734 goto discard_symbol; 735 } 736 /* 737 * So that we look just like we get from .ko files, 738 * i.e. not prelinked, relative to map->start. 739 */ 740 pos->start = curr_map->map_ip(curr_map, pos->start); 741 pos->end = curr_map->map_ip(curr_map, pos->end); 742 } else if (curr_map != map) { 743 char dso_name[PATH_MAX]; 744 struct dso *ndso; 745 746 if (delta) { 747 /* Kernel was relocated at boot time */ 748 pos->start -= delta; 749 pos->end -= delta; 750 } 751 752 if (count == 0) { 753 curr_map = map; 754 goto filter_symbol; 755 } 756 757 if (dso->kernel == DSO_TYPE_GUEST_KERNEL) 758 snprintf(dso_name, sizeof(dso_name), 759 "[guest.kernel].%d", 760 kernel_range++); 761 else 762 snprintf(dso_name, sizeof(dso_name), 763 "[kernel].%d", 764 kernel_range++); 765 766 ndso = dso__new(dso_name); 767 if (ndso == NULL) 768 return -1; 769 770 ndso->kernel = dso->kernel; 771 772 curr_map = map__new2(pos->start, ndso, map->type); 773 if (curr_map == NULL) { 774 dso__delete(ndso); 775 return -1; 776 } 777 778 curr_map->map_ip = curr_map->unmap_ip = identity__map_ip; 779 map_groups__insert(kmaps, curr_map); 780 ++kernel_range; 781 } else if (delta) { 782 /* Kernel was relocated at boot time */ 783 pos->start -= delta; 784 pos->end -= delta; 785 } 786 filter_symbol: 787 if (filter && filter(curr_map, pos)) { 788 discard_symbol: rb_erase(&pos->rb_node, root); 789 symbol__delete(pos); 790 } else { 791 if (curr_map != map) { 792 rb_erase(&pos->rb_node, root); 793 symbols__insert(&curr_map->dso->symbols[curr_map->type], pos); 794 ++moved; 795 } else 796 ++count; 797 } 798 } 799 800 if (curr_map != map && 801 dso->kernel == DSO_TYPE_GUEST_KERNEL && 802 machine__is_default_guest(kmaps->machine)) { 803 dso__set_loaded(curr_map->dso, curr_map->type); 804 } 805 806 return count + moved; 807 } 808 809 bool symbol__restricted_filename(const char *filename, 810 const char *restricted_filename) 811 { 812 bool restricted = false; 813 814 if (symbol_conf.kptr_restrict) { 815 char *r = realpath(filename, NULL); 816 817 if (r != NULL) { 818 restricted = strcmp(r, restricted_filename) == 0; 819 free(r); 820 return restricted; 821 } 822 } 823 824 return restricted; 825 } 826 827 struct module_info { 828 struct rb_node rb_node; 829 char *name; 830 u64 start; 831 }; 832 833 static void add_module(struct module_info *mi, struct rb_root *modules) 834 { 835 struct rb_node **p = &modules->rb_node; 836 struct rb_node *parent = NULL; 837 struct module_info *m; 838 839 while (*p != NULL) { 840 parent = *p; 841 m = rb_entry(parent, struct module_info, rb_node); 842 if (strcmp(mi->name, m->name) < 0) 843 p = &(*p)->rb_left; 844 else 845 p = &(*p)->rb_right; 846 } 847 rb_link_node(&mi->rb_node, parent, p); 848 rb_insert_color(&mi->rb_node, modules); 849 } 850 851 static void delete_modules(struct rb_root *modules) 852 { 853 struct module_info *mi; 854 struct rb_node *next = rb_first(modules); 855 856 while (next) { 857 mi = rb_entry(next, struct module_info, rb_node); 858 next = rb_next(&mi->rb_node); 859 rb_erase(&mi->rb_node, modules); 860 zfree(&mi->name); 861 free(mi); 862 } 863 } 864 865 static struct module_info *find_module(const char *name, 866 struct rb_root *modules) 867 { 868 struct rb_node *n = modules->rb_node; 869 870 while (n) { 871 struct module_info *m; 872 int cmp; 873 874 m = rb_entry(n, struct module_info, rb_node); 875 cmp = strcmp(name, m->name); 876 if (cmp < 0) 877 n = n->rb_left; 878 else if (cmp > 0) 879 n = n->rb_right; 880 else 881 return m; 882 } 883 884 return NULL; 885 } 886 887 static int __read_proc_modules(void *arg, const char *name, u64 start) 888 { 889 struct rb_root *modules = arg; 890 struct module_info *mi; 891 892 mi = zalloc(sizeof(struct module_info)); 893 if (!mi) 894 return -ENOMEM; 895 896 mi->name = strdup(name); 897 mi->start = start; 898 899 if (!mi->name) { 900 free(mi); 901 return -ENOMEM; 902 } 903 904 add_module(mi, modules); 905 906 return 0; 907 } 908 909 static int read_proc_modules(const char *filename, struct rb_root *modules) 910 { 911 if (symbol__restricted_filename(filename, "/proc/modules")) 912 return -1; 913 914 if (modules__parse(filename, modules, __read_proc_modules)) { 915 delete_modules(modules); 916 return -1; 917 } 918 919 return 0; 920 } 921 922 int compare_proc_modules(const char *from, const char *to) 923 { 924 struct rb_root from_modules = RB_ROOT; 925 struct rb_root to_modules = RB_ROOT; 926 struct rb_node *from_node, *to_node; 927 struct module_info *from_m, *to_m; 928 int ret = -1; 929 930 if (read_proc_modules(from, &from_modules)) 931 return -1; 932 933 if (read_proc_modules(to, &to_modules)) 934 goto out_delete_from; 935 936 from_node = rb_first(&from_modules); 937 to_node = rb_first(&to_modules); 938 while (from_node) { 939 if (!to_node) 940 break; 941 942 from_m = rb_entry(from_node, struct module_info, rb_node); 943 to_m = rb_entry(to_node, struct module_info, rb_node); 944 945 if (from_m->start != to_m->start || 946 strcmp(from_m->name, to_m->name)) 947 break; 948 949 from_node = rb_next(from_node); 950 to_node = rb_next(to_node); 951 } 952 953 if (!from_node && !to_node) 954 ret = 0; 955 956 delete_modules(&to_modules); 957 out_delete_from: 958 delete_modules(&from_modules); 959 960 return ret; 961 } 962 963 static int do_validate_kcore_modules(const char *filename, struct map *map, 964 struct map_groups *kmaps) 965 { 966 struct rb_root modules = RB_ROOT; 967 struct map *old_map; 968 int err; 969 970 err = read_proc_modules(filename, &modules); 971 if (err) 972 return err; 973 974 old_map = map_groups__first(kmaps, map->type); 975 while (old_map) { 976 struct map *next = map_groups__next(old_map); 977 struct module_info *mi; 978 979 if (old_map == map || old_map->start == map->start) { 980 /* The kernel map */ 981 old_map = next; 982 continue; 983 } 984 985 /* Module must be in memory at the same address */ 986 mi = find_module(old_map->dso->short_name, &modules); 987 if (!mi || mi->start != old_map->start) { 988 err = -EINVAL; 989 goto out; 990 } 991 992 old_map = next; 993 } 994 out: 995 delete_modules(&modules); 996 return err; 997 } 998 999 /* 1000 * If kallsyms is referenced by name then we look for filename in the same 1001 * directory. 1002 */ 1003 static bool filename_from_kallsyms_filename(char *filename, 1004 const char *base_name, 1005 const char *kallsyms_filename) 1006 { 1007 char *name; 1008 1009 strcpy(filename, kallsyms_filename); 1010 name = strrchr(filename, '/'); 1011 if (!name) 1012 return false; 1013 1014 name += 1; 1015 1016 if (!strcmp(name, "kallsyms")) { 1017 strcpy(name, base_name); 1018 return true; 1019 } 1020 1021 return false; 1022 } 1023 1024 static int validate_kcore_modules(const char *kallsyms_filename, 1025 struct map *map) 1026 { 1027 struct map_groups *kmaps = map__kmap(map)->kmaps; 1028 char modules_filename[PATH_MAX]; 1029 1030 if (!filename_from_kallsyms_filename(modules_filename, "modules", 1031 kallsyms_filename)) 1032 return -EINVAL; 1033 1034 if (do_validate_kcore_modules(modules_filename, map, kmaps)) 1035 return -EINVAL; 1036 1037 return 0; 1038 } 1039 1040 static int validate_kcore_addresses(const char *kallsyms_filename, 1041 struct map *map) 1042 { 1043 struct kmap *kmap = map__kmap(map); 1044 1045 if (kmap->ref_reloc_sym && kmap->ref_reloc_sym->name) { 1046 u64 start; 1047 1048 start = kallsyms__get_function_start(kallsyms_filename, 1049 kmap->ref_reloc_sym->name); 1050 if (start != kmap->ref_reloc_sym->addr) 1051 return -EINVAL; 1052 } 1053 1054 return validate_kcore_modules(kallsyms_filename, map); 1055 } 1056 1057 struct kcore_mapfn_data { 1058 struct dso *dso; 1059 enum map_type type; 1060 struct list_head maps; 1061 }; 1062 1063 static int kcore_mapfn(u64 start, u64 len, u64 pgoff, void *data) 1064 { 1065 struct kcore_mapfn_data *md = data; 1066 struct map *map; 1067 1068 map = map__new2(start, md->dso, md->type); 1069 if (map == NULL) 1070 return -ENOMEM; 1071 1072 map->end = map->start + len; 1073 map->pgoff = pgoff; 1074 1075 list_add(&map->node, &md->maps); 1076 1077 return 0; 1078 } 1079 1080 static int dso__load_kcore(struct dso *dso, struct map *map, 1081 const char *kallsyms_filename) 1082 { 1083 struct map_groups *kmaps = map__kmap(map)->kmaps; 1084 struct machine *machine = kmaps->machine; 1085 struct kcore_mapfn_data md; 1086 struct map *old_map, *new_map, *replacement_map = NULL; 1087 bool is_64_bit; 1088 int err, fd; 1089 char kcore_filename[PATH_MAX]; 1090 struct symbol *sym; 1091 1092 /* This function requires that the map is the kernel map */ 1093 if (map != machine->vmlinux_maps[map->type]) 1094 return -EINVAL; 1095 1096 if (!filename_from_kallsyms_filename(kcore_filename, "kcore", 1097 kallsyms_filename)) 1098 return -EINVAL; 1099 1100 /* Modules and kernel must be present at their original addresses */ 1101 if (validate_kcore_addresses(kallsyms_filename, map)) 1102 return -EINVAL; 1103 1104 md.dso = dso; 1105 md.type = map->type; 1106 INIT_LIST_HEAD(&md.maps); 1107 1108 fd = open(kcore_filename, O_RDONLY); 1109 if (fd < 0) 1110 return -EINVAL; 1111 1112 /* Read new maps into temporary lists */ 1113 err = file__read_maps(fd, md.type == MAP__FUNCTION, kcore_mapfn, &md, 1114 &is_64_bit); 1115 if (err) 1116 goto out_err; 1117 dso->is_64_bit = is_64_bit; 1118 1119 if (list_empty(&md.maps)) { 1120 err = -EINVAL; 1121 goto out_err; 1122 } 1123 1124 /* Remove old maps */ 1125 old_map = map_groups__first(kmaps, map->type); 1126 while (old_map) { 1127 struct map *next = map_groups__next(old_map); 1128 1129 if (old_map != map) 1130 map_groups__remove(kmaps, old_map); 1131 old_map = next; 1132 } 1133 1134 /* Find the kernel map using the first symbol */ 1135 sym = dso__first_symbol(dso, map->type); 1136 list_for_each_entry(new_map, &md.maps, node) { 1137 if (sym && sym->start >= new_map->start && 1138 sym->start < new_map->end) { 1139 replacement_map = new_map; 1140 break; 1141 } 1142 } 1143 1144 if (!replacement_map) 1145 replacement_map = list_entry(md.maps.next, struct map, node); 1146 1147 /* Add new maps */ 1148 while (!list_empty(&md.maps)) { 1149 new_map = list_entry(md.maps.next, struct map, node); 1150 list_del(&new_map->node); 1151 if (new_map == replacement_map) { 1152 map->start = new_map->start; 1153 map->end = new_map->end; 1154 map->pgoff = new_map->pgoff; 1155 map->map_ip = new_map->map_ip; 1156 map->unmap_ip = new_map->unmap_ip; 1157 map__delete(new_map); 1158 /* Ensure maps are correctly ordered */ 1159 map_groups__remove(kmaps, map); 1160 map_groups__insert(kmaps, map); 1161 } else { 1162 map_groups__insert(kmaps, new_map); 1163 } 1164 } 1165 1166 /* 1167 * Set the data type and long name so that kcore can be read via 1168 * dso__data_read_addr(). 1169 */ 1170 if (dso->kernel == DSO_TYPE_GUEST_KERNEL) 1171 dso->binary_type = DSO_BINARY_TYPE__GUEST_KCORE; 1172 else 1173 dso->binary_type = DSO_BINARY_TYPE__KCORE; 1174 dso__set_long_name(dso, strdup(kcore_filename), true); 1175 1176 close(fd); 1177 1178 if (map->type == MAP__FUNCTION) 1179 pr_debug("Using %s for kernel object code\n", kcore_filename); 1180 else 1181 pr_debug("Using %s for kernel data\n", kcore_filename); 1182 1183 return 0; 1184 1185 out_err: 1186 while (!list_empty(&md.maps)) { 1187 map = list_entry(md.maps.next, struct map, node); 1188 list_del(&map->node); 1189 map__delete(map); 1190 } 1191 close(fd); 1192 return -EINVAL; 1193 } 1194 1195 /* 1196 * If the kernel is relocated at boot time, kallsyms won't match. Compute the 1197 * delta based on the relocation reference symbol. 1198 */ 1199 static int kallsyms__delta(struct map *map, const char *filename, u64 *delta) 1200 { 1201 struct kmap *kmap = map__kmap(map); 1202 u64 addr; 1203 1204 if (!kmap->ref_reloc_sym || !kmap->ref_reloc_sym->name) 1205 return 0; 1206 1207 addr = kallsyms__get_function_start(filename, 1208 kmap->ref_reloc_sym->name); 1209 if (!addr) 1210 return -1; 1211 1212 *delta = addr - kmap->ref_reloc_sym->addr; 1213 return 0; 1214 } 1215 1216 int dso__load_kallsyms(struct dso *dso, const char *filename, 1217 struct map *map, symbol_filter_t filter) 1218 { 1219 u64 delta = 0; 1220 1221 if (symbol__restricted_filename(filename, "/proc/kallsyms")) 1222 return -1; 1223 1224 if (dso__load_all_kallsyms(dso, filename, map) < 0) 1225 return -1; 1226 1227 if (kallsyms__delta(map, filename, &delta)) 1228 return -1; 1229 1230 symbols__fixup_duplicate(&dso->symbols[map->type]); 1231 symbols__fixup_end(&dso->symbols[map->type]); 1232 1233 if (dso->kernel == DSO_TYPE_GUEST_KERNEL) 1234 dso->symtab_type = DSO_BINARY_TYPE__GUEST_KALLSYMS; 1235 else 1236 dso->symtab_type = DSO_BINARY_TYPE__KALLSYMS; 1237 1238 if (!dso__load_kcore(dso, map, filename)) 1239 return dso__split_kallsyms_for_kcore(dso, map, filter); 1240 else 1241 return dso__split_kallsyms(dso, map, delta, filter); 1242 } 1243 1244 static int dso__load_perf_map(struct dso *dso, struct map *map, 1245 symbol_filter_t filter) 1246 { 1247 char *line = NULL; 1248 size_t n; 1249 FILE *file; 1250 int nr_syms = 0; 1251 1252 file = fopen(dso->long_name, "r"); 1253 if (file == NULL) 1254 goto out_failure; 1255 1256 while (!feof(file)) { 1257 u64 start, size; 1258 struct symbol *sym; 1259 int line_len, len; 1260 1261 line_len = getline(&line, &n, file); 1262 if (line_len < 0) 1263 break; 1264 1265 if (!line) 1266 goto out_failure; 1267 1268 line[--line_len] = '\0'; /* \n */ 1269 1270 len = hex2u64(line, &start); 1271 1272 len++; 1273 if (len + 2 >= line_len) 1274 continue; 1275 1276 len += hex2u64(line + len, &size); 1277 1278 len++; 1279 if (len + 2 >= line_len) 1280 continue; 1281 1282 sym = symbol__new(start, size, STB_GLOBAL, line + len); 1283 1284 if (sym == NULL) 1285 goto out_delete_line; 1286 1287 if (filter && filter(map, sym)) 1288 symbol__delete(sym); 1289 else { 1290 symbols__insert(&dso->symbols[map->type], sym); 1291 nr_syms++; 1292 } 1293 } 1294 1295 free(line); 1296 fclose(file); 1297 1298 return nr_syms; 1299 1300 out_delete_line: 1301 free(line); 1302 out_failure: 1303 return -1; 1304 } 1305 1306 static bool dso__is_compatible_symtab_type(struct dso *dso, bool kmod, 1307 enum dso_binary_type type) 1308 { 1309 switch (type) { 1310 case DSO_BINARY_TYPE__JAVA_JIT: 1311 case DSO_BINARY_TYPE__DEBUGLINK: 1312 case DSO_BINARY_TYPE__SYSTEM_PATH_DSO: 1313 case DSO_BINARY_TYPE__FEDORA_DEBUGINFO: 1314 case DSO_BINARY_TYPE__UBUNTU_DEBUGINFO: 1315 case DSO_BINARY_TYPE__BUILDID_DEBUGINFO: 1316 case DSO_BINARY_TYPE__OPENEMBEDDED_DEBUGINFO: 1317 return !kmod && dso->kernel == DSO_TYPE_USER; 1318 1319 case DSO_BINARY_TYPE__KALLSYMS: 1320 case DSO_BINARY_TYPE__VMLINUX: 1321 case DSO_BINARY_TYPE__KCORE: 1322 return dso->kernel == DSO_TYPE_KERNEL; 1323 1324 case DSO_BINARY_TYPE__GUEST_KALLSYMS: 1325 case DSO_BINARY_TYPE__GUEST_VMLINUX: 1326 case DSO_BINARY_TYPE__GUEST_KCORE: 1327 return dso->kernel == DSO_TYPE_GUEST_KERNEL; 1328 1329 case DSO_BINARY_TYPE__GUEST_KMODULE: 1330 case DSO_BINARY_TYPE__GUEST_KMODULE_COMP: 1331 case DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE: 1332 case DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE_COMP: 1333 /* 1334 * kernel modules know their symtab type - it's set when 1335 * creating a module dso in machine__new_module(). 1336 */ 1337 return kmod && dso->symtab_type == type; 1338 1339 case DSO_BINARY_TYPE__BUILD_ID_CACHE: 1340 return true; 1341 1342 case DSO_BINARY_TYPE__NOT_FOUND: 1343 default: 1344 return false; 1345 } 1346 } 1347 1348 int dso__load(struct dso *dso, struct map *map, symbol_filter_t filter) 1349 { 1350 char *name; 1351 int ret = -1; 1352 u_int i; 1353 struct machine *machine; 1354 char *root_dir = (char *) ""; 1355 int ss_pos = 0; 1356 struct symsrc ss_[2]; 1357 struct symsrc *syms_ss = NULL, *runtime_ss = NULL; 1358 bool kmod; 1359 1360 dso__set_loaded(dso, map->type); 1361 1362 if (dso->kernel == DSO_TYPE_KERNEL) 1363 return dso__load_kernel_sym(dso, map, filter); 1364 else if (dso->kernel == DSO_TYPE_GUEST_KERNEL) 1365 return dso__load_guest_kernel_sym(dso, map, filter); 1366 1367 if (map->groups && map->groups->machine) 1368 machine = map->groups->machine; 1369 else 1370 machine = NULL; 1371 1372 dso->adjust_symbols = 0; 1373 1374 if (strncmp(dso->name, "/tmp/perf-", 10) == 0) { 1375 struct stat st; 1376 1377 if (lstat(dso->name, &st) < 0) 1378 return -1; 1379 1380 if (st.st_uid && (st.st_uid != geteuid())) { 1381 pr_warning("File %s not owned by current user or root, " 1382 "ignoring it.\n", dso->name); 1383 return -1; 1384 } 1385 1386 ret = dso__load_perf_map(dso, map, filter); 1387 dso->symtab_type = ret > 0 ? DSO_BINARY_TYPE__JAVA_JIT : 1388 DSO_BINARY_TYPE__NOT_FOUND; 1389 return ret; 1390 } 1391 1392 if (machine) 1393 root_dir = machine->root_dir; 1394 1395 name = malloc(PATH_MAX); 1396 if (!name) 1397 return -1; 1398 1399 kmod = dso->symtab_type == DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE || 1400 dso->symtab_type == DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE_COMP || 1401 dso->symtab_type == DSO_BINARY_TYPE__GUEST_KMODULE || 1402 dso->symtab_type == DSO_BINARY_TYPE__GUEST_KMODULE_COMP; 1403 1404 /* 1405 * Iterate over candidate debug images. 1406 * Keep track of "interesting" ones (those which have a symtab, dynsym, 1407 * and/or opd section) for processing. 1408 */ 1409 for (i = 0; i < DSO_BINARY_TYPE__SYMTAB_CNT; i++) { 1410 struct symsrc *ss = &ss_[ss_pos]; 1411 bool next_slot = false; 1412 1413 enum dso_binary_type symtab_type = binary_type_symtab[i]; 1414 1415 if (!dso__is_compatible_symtab_type(dso, kmod, symtab_type)) 1416 continue; 1417 1418 if (dso__read_binary_type_filename(dso, symtab_type, 1419 root_dir, name, PATH_MAX)) 1420 continue; 1421 1422 /* Name is now the name of the next image to try */ 1423 if (symsrc__init(ss, dso, name, symtab_type) < 0) 1424 continue; 1425 1426 if (!syms_ss && symsrc__has_symtab(ss)) { 1427 syms_ss = ss; 1428 next_slot = true; 1429 if (!dso->symsrc_filename) 1430 dso->symsrc_filename = strdup(name); 1431 } 1432 1433 if (!runtime_ss && symsrc__possibly_runtime(ss)) { 1434 runtime_ss = ss; 1435 next_slot = true; 1436 } 1437 1438 if (next_slot) { 1439 ss_pos++; 1440 1441 if (syms_ss && runtime_ss) 1442 break; 1443 } else { 1444 symsrc__destroy(ss); 1445 } 1446 1447 } 1448 1449 if (!runtime_ss && !syms_ss) 1450 goto out_free; 1451 1452 if (runtime_ss && !syms_ss) { 1453 syms_ss = runtime_ss; 1454 } 1455 1456 /* We'll have to hope for the best */ 1457 if (!runtime_ss && syms_ss) 1458 runtime_ss = syms_ss; 1459 1460 if (syms_ss) 1461 ret = dso__load_sym(dso, map, syms_ss, runtime_ss, filter, kmod); 1462 else 1463 ret = -1; 1464 1465 if (ret > 0) { 1466 int nr_plt; 1467 1468 nr_plt = dso__synthesize_plt_symbols(dso, runtime_ss, map, filter); 1469 if (nr_plt > 0) 1470 ret += nr_plt; 1471 } 1472 1473 for (; ss_pos > 0; ss_pos--) 1474 symsrc__destroy(&ss_[ss_pos - 1]); 1475 out_free: 1476 free(name); 1477 if (ret < 0 && strstr(dso->name, " (deleted)") != NULL) 1478 return 0; 1479 return ret; 1480 } 1481 1482 struct map *map_groups__find_by_name(struct map_groups *mg, 1483 enum map_type type, const char *name) 1484 { 1485 struct rb_node *nd; 1486 1487 for (nd = rb_first(&mg->maps[type]); nd; nd = rb_next(nd)) { 1488 struct map *map = rb_entry(nd, struct map, rb_node); 1489 1490 if (map->dso && strcmp(map->dso->short_name, name) == 0) 1491 return map; 1492 } 1493 1494 return NULL; 1495 } 1496 1497 int dso__load_vmlinux(struct dso *dso, struct map *map, 1498 const char *vmlinux, bool vmlinux_allocated, 1499 symbol_filter_t filter) 1500 { 1501 int err = -1; 1502 struct symsrc ss; 1503 char symfs_vmlinux[PATH_MAX]; 1504 enum dso_binary_type symtab_type; 1505 1506 if (vmlinux[0] == '/') 1507 snprintf(symfs_vmlinux, sizeof(symfs_vmlinux), "%s", vmlinux); 1508 else 1509 symbol__join_symfs(symfs_vmlinux, vmlinux); 1510 1511 if (dso->kernel == DSO_TYPE_GUEST_KERNEL) 1512 symtab_type = DSO_BINARY_TYPE__GUEST_VMLINUX; 1513 else 1514 symtab_type = DSO_BINARY_TYPE__VMLINUX; 1515 1516 if (symsrc__init(&ss, dso, symfs_vmlinux, symtab_type)) 1517 return -1; 1518 1519 err = dso__load_sym(dso, map, &ss, &ss, filter, 0); 1520 symsrc__destroy(&ss); 1521 1522 if (err > 0) { 1523 if (dso->kernel == DSO_TYPE_GUEST_KERNEL) 1524 dso->binary_type = DSO_BINARY_TYPE__GUEST_VMLINUX; 1525 else 1526 dso->binary_type = DSO_BINARY_TYPE__VMLINUX; 1527 dso__set_long_name(dso, vmlinux, vmlinux_allocated); 1528 dso__set_loaded(dso, map->type); 1529 pr_debug("Using %s for symbols\n", symfs_vmlinux); 1530 } 1531 1532 return err; 1533 } 1534 1535 int dso__load_vmlinux_path(struct dso *dso, struct map *map, 1536 symbol_filter_t filter) 1537 { 1538 int i, err = 0; 1539 char *filename = NULL; 1540 1541 if (!symbol_conf.ignore_vmlinux_buildid) 1542 filename = dso__build_id_filename(dso, NULL, 0); 1543 if (filename != NULL) { 1544 err = dso__load_vmlinux(dso, map, filename, true, filter); 1545 if (err > 0) 1546 goto out; 1547 free(filename); 1548 } 1549 1550 pr_debug("Looking at the vmlinux_path (%d entries long)\n", 1551 vmlinux_path__nr_entries + 1); 1552 1553 for (i = 0; i < vmlinux_path__nr_entries; ++i) { 1554 err = dso__load_vmlinux(dso, map, vmlinux_path[i], false, filter); 1555 if (err > 0) 1556 break; 1557 } 1558 out: 1559 return err; 1560 } 1561 1562 static int find_matching_kcore(struct map *map, char *dir, size_t dir_sz) 1563 { 1564 char kallsyms_filename[PATH_MAX]; 1565 struct dirent *dent; 1566 int ret = -1; 1567 DIR *d; 1568 1569 d = opendir(dir); 1570 if (!d) 1571 return -1; 1572 1573 while (1) { 1574 dent = readdir(d); 1575 if (!dent) 1576 break; 1577 if (dent->d_type != DT_DIR) 1578 continue; 1579 scnprintf(kallsyms_filename, sizeof(kallsyms_filename), 1580 "%s/%s/kallsyms", dir, dent->d_name); 1581 if (!validate_kcore_addresses(kallsyms_filename, map)) { 1582 strlcpy(dir, kallsyms_filename, dir_sz); 1583 ret = 0; 1584 break; 1585 } 1586 } 1587 1588 closedir(d); 1589 1590 return ret; 1591 } 1592 1593 static char *dso__find_kallsyms(struct dso *dso, struct map *map) 1594 { 1595 u8 host_build_id[BUILD_ID_SIZE]; 1596 char sbuild_id[BUILD_ID_SIZE * 2 + 1]; 1597 bool is_host = false; 1598 char path[PATH_MAX]; 1599 1600 if (!dso->has_build_id) { 1601 /* 1602 * Last resort, if we don't have a build-id and couldn't find 1603 * any vmlinux file, try the running kernel kallsyms table. 1604 */ 1605 goto proc_kallsyms; 1606 } 1607 1608 if (sysfs__read_build_id("/sys/kernel/notes", host_build_id, 1609 sizeof(host_build_id)) == 0) 1610 is_host = dso__build_id_equal(dso, host_build_id); 1611 1612 build_id__sprintf(dso->build_id, sizeof(dso->build_id), sbuild_id); 1613 1614 scnprintf(path, sizeof(path), "%s/[kernel.kcore]/%s", buildid_dir, 1615 sbuild_id); 1616 1617 /* Use /proc/kallsyms if possible */ 1618 if (is_host) { 1619 DIR *d; 1620 int fd; 1621 1622 /* If no cached kcore go with /proc/kallsyms */ 1623 d = opendir(path); 1624 if (!d) 1625 goto proc_kallsyms; 1626 closedir(d); 1627 1628 /* 1629 * Do not check the build-id cache, until we know we cannot use 1630 * /proc/kcore. 1631 */ 1632 fd = open("/proc/kcore", O_RDONLY); 1633 if (fd != -1) { 1634 close(fd); 1635 /* If module maps match go with /proc/kallsyms */ 1636 if (!validate_kcore_addresses("/proc/kallsyms", map)) 1637 goto proc_kallsyms; 1638 } 1639 1640 /* Find kallsyms in build-id cache with kcore */ 1641 if (!find_matching_kcore(map, path, sizeof(path))) 1642 return strdup(path); 1643 1644 goto proc_kallsyms; 1645 } 1646 1647 /* Find kallsyms in build-id cache with kcore */ 1648 if (!find_matching_kcore(map, path, sizeof(path))) 1649 return strdup(path); 1650 1651 scnprintf(path, sizeof(path), "%s/[kernel.kallsyms]/%s", 1652 buildid_dir, sbuild_id); 1653 1654 if (access(path, F_OK)) { 1655 pr_err("No kallsyms or vmlinux with build-id %s was found\n", 1656 sbuild_id); 1657 return NULL; 1658 } 1659 1660 return strdup(path); 1661 1662 proc_kallsyms: 1663 return strdup("/proc/kallsyms"); 1664 } 1665 1666 static int dso__load_kernel_sym(struct dso *dso, struct map *map, 1667 symbol_filter_t filter) 1668 { 1669 int err; 1670 const char *kallsyms_filename = NULL; 1671 char *kallsyms_allocated_filename = NULL; 1672 /* 1673 * Step 1: if the user specified a kallsyms or vmlinux filename, use 1674 * it and only it, reporting errors to the user if it cannot be used. 1675 * 1676 * For instance, try to analyse an ARM perf.data file _without_ a 1677 * build-id, or if the user specifies the wrong path to the right 1678 * vmlinux file, obviously we can't fallback to another vmlinux (a 1679 * x86_86 one, on the machine where analysis is being performed, say), 1680 * or worse, /proc/kallsyms. 1681 * 1682 * If the specified file _has_ a build-id and there is a build-id 1683 * section in the perf.data file, we will still do the expected 1684 * validation in dso__load_vmlinux and will bail out if they don't 1685 * match. 1686 */ 1687 if (symbol_conf.kallsyms_name != NULL) { 1688 kallsyms_filename = symbol_conf.kallsyms_name; 1689 goto do_kallsyms; 1690 } 1691 1692 if (!symbol_conf.ignore_vmlinux && symbol_conf.vmlinux_name != NULL) { 1693 return dso__load_vmlinux(dso, map, symbol_conf.vmlinux_name, 1694 false, filter); 1695 } 1696 1697 if (!symbol_conf.ignore_vmlinux && vmlinux_path != NULL) { 1698 err = dso__load_vmlinux_path(dso, map, filter); 1699 if (err > 0) 1700 return err; 1701 } 1702 1703 /* do not try local files if a symfs was given */ 1704 if (symbol_conf.symfs[0] != 0) 1705 return -1; 1706 1707 kallsyms_allocated_filename = dso__find_kallsyms(dso, map); 1708 if (!kallsyms_allocated_filename) 1709 return -1; 1710 1711 kallsyms_filename = kallsyms_allocated_filename; 1712 1713 do_kallsyms: 1714 err = dso__load_kallsyms(dso, kallsyms_filename, map, filter); 1715 if (err > 0) 1716 pr_debug("Using %s for symbols\n", kallsyms_filename); 1717 free(kallsyms_allocated_filename); 1718 1719 if (err > 0 && !dso__is_kcore(dso)) { 1720 dso->binary_type = DSO_BINARY_TYPE__KALLSYMS; 1721 dso__set_long_name(dso, "[kernel.kallsyms]", false); 1722 map__fixup_start(map); 1723 map__fixup_end(map); 1724 } 1725 1726 return err; 1727 } 1728 1729 static int dso__load_guest_kernel_sym(struct dso *dso, struct map *map, 1730 symbol_filter_t filter) 1731 { 1732 int err; 1733 const char *kallsyms_filename = NULL; 1734 struct machine *machine; 1735 char path[PATH_MAX]; 1736 1737 if (!map->groups) { 1738 pr_debug("Guest kernel map hasn't the point to groups\n"); 1739 return -1; 1740 } 1741 machine = map->groups->machine; 1742 1743 if (machine__is_default_guest(machine)) { 1744 /* 1745 * if the user specified a vmlinux filename, use it and only 1746 * it, reporting errors to the user if it cannot be used. 1747 * Or use file guest_kallsyms inputted by user on commandline 1748 */ 1749 if (symbol_conf.default_guest_vmlinux_name != NULL) { 1750 err = dso__load_vmlinux(dso, map, 1751 symbol_conf.default_guest_vmlinux_name, 1752 false, filter); 1753 return err; 1754 } 1755 1756 kallsyms_filename = symbol_conf.default_guest_kallsyms; 1757 if (!kallsyms_filename) 1758 return -1; 1759 } else { 1760 sprintf(path, "%s/proc/kallsyms", machine->root_dir); 1761 kallsyms_filename = path; 1762 } 1763 1764 err = dso__load_kallsyms(dso, kallsyms_filename, map, filter); 1765 if (err > 0) 1766 pr_debug("Using %s for symbols\n", kallsyms_filename); 1767 if (err > 0 && !dso__is_kcore(dso)) { 1768 dso->binary_type = DSO_BINARY_TYPE__GUEST_KALLSYMS; 1769 machine__mmap_name(machine, path, sizeof(path)); 1770 dso__set_long_name(dso, strdup(path), true); 1771 map__fixup_start(map); 1772 map__fixup_end(map); 1773 } 1774 1775 return err; 1776 } 1777 1778 static void vmlinux_path__exit(void) 1779 { 1780 while (--vmlinux_path__nr_entries >= 0) 1781 zfree(&vmlinux_path[vmlinux_path__nr_entries]); 1782 1783 zfree(&vmlinux_path); 1784 } 1785 1786 static int vmlinux_path__init(struct perf_session_env *env) 1787 { 1788 struct utsname uts; 1789 char bf[PATH_MAX]; 1790 char *kernel_version; 1791 1792 vmlinux_path = malloc(sizeof(char *) * 6); 1793 if (vmlinux_path == NULL) 1794 return -1; 1795 1796 vmlinux_path[vmlinux_path__nr_entries] = strdup("vmlinux"); 1797 if (vmlinux_path[vmlinux_path__nr_entries] == NULL) 1798 goto out_fail; 1799 ++vmlinux_path__nr_entries; 1800 vmlinux_path[vmlinux_path__nr_entries] = strdup("/boot/vmlinux"); 1801 if (vmlinux_path[vmlinux_path__nr_entries] == NULL) 1802 goto out_fail; 1803 ++vmlinux_path__nr_entries; 1804 1805 /* only try kernel version if no symfs was given */ 1806 if (symbol_conf.symfs[0] != 0) 1807 return 0; 1808 1809 if (env) { 1810 kernel_version = env->os_release; 1811 } else { 1812 if (uname(&uts) < 0) 1813 goto out_fail; 1814 1815 kernel_version = uts.release; 1816 } 1817 1818 snprintf(bf, sizeof(bf), "/boot/vmlinux-%s", kernel_version); 1819 vmlinux_path[vmlinux_path__nr_entries] = strdup(bf); 1820 if (vmlinux_path[vmlinux_path__nr_entries] == NULL) 1821 goto out_fail; 1822 ++vmlinux_path__nr_entries; 1823 snprintf(bf, sizeof(bf), "/usr/lib/debug/boot/vmlinux-%s", 1824 kernel_version); 1825 vmlinux_path[vmlinux_path__nr_entries] = strdup(bf); 1826 if (vmlinux_path[vmlinux_path__nr_entries] == NULL) 1827 goto out_fail; 1828 ++vmlinux_path__nr_entries; 1829 snprintf(bf, sizeof(bf), "/lib/modules/%s/build/vmlinux", kernel_version); 1830 vmlinux_path[vmlinux_path__nr_entries] = strdup(bf); 1831 if (vmlinux_path[vmlinux_path__nr_entries] == NULL) 1832 goto out_fail; 1833 ++vmlinux_path__nr_entries; 1834 snprintf(bf, sizeof(bf), "/usr/lib/debug/lib/modules/%s/vmlinux", 1835 kernel_version); 1836 vmlinux_path[vmlinux_path__nr_entries] = strdup(bf); 1837 if (vmlinux_path[vmlinux_path__nr_entries] == NULL) 1838 goto out_fail; 1839 ++vmlinux_path__nr_entries; 1840 1841 return 0; 1842 1843 out_fail: 1844 vmlinux_path__exit(); 1845 return -1; 1846 } 1847 1848 int setup_list(struct strlist **list, const char *list_str, 1849 const char *list_name) 1850 { 1851 if (list_str == NULL) 1852 return 0; 1853 1854 *list = strlist__new(true, list_str); 1855 if (!*list) { 1856 pr_err("problems parsing %s list\n", list_name); 1857 return -1; 1858 } 1859 return 0; 1860 } 1861 1862 static bool symbol__read_kptr_restrict(void) 1863 { 1864 bool value = false; 1865 1866 if (geteuid() != 0) { 1867 FILE *fp = fopen("/proc/sys/kernel/kptr_restrict", "r"); 1868 if (fp != NULL) { 1869 char line[8]; 1870 1871 if (fgets(line, sizeof(line), fp) != NULL) 1872 value = atoi(line) != 0; 1873 1874 fclose(fp); 1875 } 1876 } 1877 1878 return value; 1879 } 1880 1881 int symbol__init(struct perf_session_env *env) 1882 { 1883 const char *symfs; 1884 1885 if (symbol_conf.initialized) 1886 return 0; 1887 1888 symbol_conf.priv_size = PERF_ALIGN(symbol_conf.priv_size, sizeof(u64)); 1889 1890 symbol__elf_init(); 1891 1892 if (symbol_conf.sort_by_name) 1893 symbol_conf.priv_size += (sizeof(struct symbol_name_rb_node) - 1894 sizeof(struct symbol)); 1895 1896 if (symbol_conf.try_vmlinux_path && vmlinux_path__init(env) < 0) 1897 return -1; 1898 1899 if (symbol_conf.field_sep && *symbol_conf.field_sep == '.') { 1900 pr_err("'.' is the only non valid --field-separator argument\n"); 1901 return -1; 1902 } 1903 1904 if (setup_list(&symbol_conf.dso_list, 1905 symbol_conf.dso_list_str, "dso") < 0) 1906 return -1; 1907 1908 if (setup_list(&symbol_conf.comm_list, 1909 symbol_conf.comm_list_str, "comm") < 0) 1910 goto out_free_dso_list; 1911 1912 if (setup_list(&symbol_conf.sym_list, 1913 symbol_conf.sym_list_str, "symbol") < 0) 1914 goto out_free_comm_list; 1915 1916 /* 1917 * A path to symbols of "/" is identical to "" 1918 * reset here for simplicity. 1919 */ 1920 symfs = realpath(symbol_conf.symfs, NULL); 1921 if (symfs == NULL) 1922 symfs = symbol_conf.symfs; 1923 if (strcmp(symfs, "/") == 0) 1924 symbol_conf.symfs = ""; 1925 if (symfs != symbol_conf.symfs) 1926 free((void *)symfs); 1927 1928 symbol_conf.kptr_restrict = symbol__read_kptr_restrict(); 1929 1930 symbol_conf.initialized = true; 1931 return 0; 1932 1933 out_free_comm_list: 1934 strlist__delete(symbol_conf.comm_list); 1935 out_free_dso_list: 1936 strlist__delete(symbol_conf.dso_list); 1937 return -1; 1938 } 1939 1940 void symbol__exit(void) 1941 { 1942 if (!symbol_conf.initialized) 1943 return; 1944 strlist__delete(symbol_conf.sym_list); 1945 strlist__delete(symbol_conf.dso_list); 1946 strlist__delete(symbol_conf.comm_list); 1947 vmlinux_path__exit(); 1948 symbol_conf.sym_list = symbol_conf.dso_list = symbol_conf.comm_list = NULL; 1949 symbol_conf.initialized = false; 1950 } 1951