1 #include <dirent.h> 2 #include <errno.h> 3 #include <stdlib.h> 4 #include <stdio.h> 5 #include <string.h> 6 #include <sys/types.h> 7 #include <sys/stat.h> 8 #include <sys/param.h> 9 #include <fcntl.h> 10 #include <unistd.h> 11 #include <inttypes.h> 12 #include "build-id.h" 13 #include "util.h" 14 #include "debug.h" 15 #include "machine.h" 16 #include "symbol.h" 17 #include "strlist.h" 18 19 #include <elf.h> 20 #include <limits.h> 21 #include <symbol/kallsyms.h> 22 #include <sys/utsname.h> 23 24 static int dso__load_kernel_sym(struct dso *dso, struct map *map, 25 symbol_filter_t filter); 26 static int dso__load_guest_kernel_sym(struct dso *dso, struct map *map, 27 symbol_filter_t filter); 28 int vmlinux_path__nr_entries; 29 char **vmlinux_path; 30 31 struct symbol_conf symbol_conf = { 32 .use_modules = true, 33 .try_vmlinux_path = true, 34 .annotate_src = true, 35 .demangle = true, 36 .cumulate_callchain = true, 37 .show_hist_headers = true, 38 .symfs = "", 39 }; 40 41 static enum dso_binary_type binary_type_symtab[] = { 42 DSO_BINARY_TYPE__KALLSYMS, 43 DSO_BINARY_TYPE__GUEST_KALLSYMS, 44 DSO_BINARY_TYPE__JAVA_JIT, 45 DSO_BINARY_TYPE__DEBUGLINK, 46 DSO_BINARY_TYPE__BUILD_ID_CACHE, 47 DSO_BINARY_TYPE__FEDORA_DEBUGINFO, 48 DSO_BINARY_TYPE__UBUNTU_DEBUGINFO, 49 DSO_BINARY_TYPE__BUILDID_DEBUGINFO, 50 DSO_BINARY_TYPE__SYSTEM_PATH_DSO, 51 DSO_BINARY_TYPE__GUEST_KMODULE, 52 DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE, 53 DSO_BINARY_TYPE__OPENEMBEDDED_DEBUGINFO, 54 DSO_BINARY_TYPE__NOT_FOUND, 55 }; 56 57 #define DSO_BINARY_TYPE__SYMTAB_CNT ARRAY_SIZE(binary_type_symtab) 58 59 bool symbol_type__is_a(char symbol_type, enum map_type map_type) 60 { 61 symbol_type = toupper(symbol_type); 62 63 switch (map_type) { 64 case MAP__FUNCTION: 65 return symbol_type == 'T' || symbol_type == 'W'; 66 case MAP__VARIABLE: 67 return symbol_type == 'D'; 68 default: 69 return false; 70 } 71 } 72 73 static int prefix_underscores_count(const char *str) 74 { 75 const char *tail = str; 76 77 while (*tail == '_') 78 tail++; 79 80 return tail - str; 81 } 82 83 #define SYMBOL_A 0 84 #define SYMBOL_B 1 85 86 static int choose_best_symbol(struct symbol *syma, struct symbol *symb) 87 { 88 s64 a; 89 s64 b; 90 size_t na, nb; 91 92 /* Prefer a symbol with non zero length */ 93 a = syma->end - syma->start; 94 b = symb->end - symb->start; 95 if ((b == 0) && (a > 0)) 96 return SYMBOL_A; 97 else if ((a == 0) && (b > 0)) 98 return SYMBOL_B; 99 100 /* Prefer a non weak symbol over a weak one */ 101 a = syma->binding == STB_WEAK; 102 b = symb->binding == STB_WEAK; 103 if (b && !a) 104 return SYMBOL_A; 105 if (a && !b) 106 return SYMBOL_B; 107 108 /* Prefer a global symbol over a non global one */ 109 a = syma->binding == STB_GLOBAL; 110 b = symb->binding == STB_GLOBAL; 111 if (a && !b) 112 return SYMBOL_A; 113 if (b && !a) 114 return SYMBOL_B; 115 116 /* Prefer a symbol with less underscores */ 117 a = prefix_underscores_count(syma->name); 118 b = prefix_underscores_count(symb->name); 119 if (b > a) 120 return SYMBOL_A; 121 else if (a > b) 122 return SYMBOL_B; 123 124 /* Choose the symbol with the longest name */ 125 na = strlen(syma->name); 126 nb = strlen(symb->name); 127 if (na > nb) 128 return SYMBOL_A; 129 else if (na < nb) 130 return SYMBOL_B; 131 132 /* Avoid "SyS" kernel syscall aliases */ 133 if (na >= 3 && !strncmp(syma->name, "SyS", 3)) 134 return SYMBOL_B; 135 if (na >= 10 && !strncmp(syma->name, "compat_SyS", 10)) 136 return SYMBOL_B; 137 138 return SYMBOL_A; 139 } 140 141 void symbols__fixup_duplicate(struct rb_root *symbols) 142 { 143 struct rb_node *nd; 144 struct symbol *curr, *next; 145 146 nd = rb_first(symbols); 147 148 while (nd) { 149 curr = rb_entry(nd, struct symbol, rb_node); 150 again: 151 nd = rb_next(&curr->rb_node); 152 next = rb_entry(nd, struct symbol, rb_node); 153 154 if (!nd) 155 break; 156 157 if (curr->start != next->start) 158 continue; 159 160 if (choose_best_symbol(curr, next) == SYMBOL_A) { 161 rb_erase(&next->rb_node, symbols); 162 symbol__delete(next); 163 goto again; 164 } else { 165 nd = rb_next(&curr->rb_node); 166 rb_erase(&curr->rb_node, symbols); 167 symbol__delete(curr); 168 } 169 } 170 } 171 172 void symbols__fixup_end(struct rb_root *symbols) 173 { 174 struct rb_node *nd, *prevnd = rb_first(symbols); 175 struct symbol *curr, *prev; 176 177 if (prevnd == NULL) 178 return; 179 180 curr = rb_entry(prevnd, struct symbol, rb_node); 181 182 for (nd = rb_next(prevnd); nd; nd = rb_next(nd)) { 183 prev = curr; 184 curr = rb_entry(nd, struct symbol, rb_node); 185 186 if (prev->end == prev->start && prev->end != curr->start) 187 prev->end = curr->start - 1; 188 } 189 190 /* Last entry */ 191 if (curr->end == curr->start) 192 curr->end = roundup(curr->start, 4096); 193 } 194 195 void __map_groups__fixup_end(struct map_groups *mg, enum map_type type) 196 { 197 struct map *prev, *curr; 198 struct rb_node *nd, *prevnd = rb_first(&mg->maps[type]); 199 200 if (prevnd == NULL) 201 return; 202 203 curr = rb_entry(prevnd, struct map, rb_node); 204 205 for (nd = rb_next(prevnd); nd; nd = rb_next(nd)) { 206 prev = curr; 207 curr = rb_entry(nd, struct map, rb_node); 208 prev->end = curr->start - 1; 209 } 210 211 /* 212 * We still haven't the actual symbols, so guess the 213 * last map final address. 214 */ 215 curr->end = ~0ULL; 216 } 217 218 struct symbol *symbol__new(u64 start, u64 len, u8 binding, const char *name) 219 { 220 size_t namelen = strlen(name) + 1; 221 struct symbol *sym = calloc(1, (symbol_conf.priv_size + 222 sizeof(*sym) + namelen)); 223 if (sym == NULL) 224 return NULL; 225 226 if (symbol_conf.priv_size) 227 sym = ((void *)sym) + symbol_conf.priv_size; 228 229 sym->start = start; 230 sym->end = len ? start + len - 1 : start; 231 sym->binding = binding; 232 sym->namelen = namelen - 1; 233 234 pr_debug4("%s: %s %#" PRIx64 "-%#" PRIx64 "\n", 235 __func__, name, start, sym->end); 236 memcpy(sym->name, name, namelen); 237 238 return sym; 239 } 240 241 void symbol__delete(struct symbol *sym) 242 { 243 free(((void *)sym) - symbol_conf.priv_size); 244 } 245 246 size_t symbol__fprintf(struct symbol *sym, FILE *fp) 247 { 248 return fprintf(fp, " %" PRIx64 "-%" PRIx64 " %c %s\n", 249 sym->start, sym->end, 250 sym->binding == STB_GLOBAL ? 'g' : 251 sym->binding == STB_LOCAL ? 'l' : 'w', 252 sym->name); 253 } 254 255 size_t symbol__fprintf_symname_offs(const struct symbol *sym, 256 const struct addr_location *al, FILE *fp) 257 { 258 unsigned long offset; 259 size_t length; 260 261 if (sym && sym->name) { 262 length = fprintf(fp, "%s", sym->name); 263 if (al) { 264 if (al->addr < sym->end) 265 offset = al->addr - sym->start; 266 else 267 offset = al->addr - al->map->start - sym->start; 268 length += fprintf(fp, "+0x%lx", offset); 269 } 270 return length; 271 } else 272 return fprintf(fp, "[unknown]"); 273 } 274 275 size_t symbol__fprintf_symname(const struct symbol *sym, FILE *fp) 276 { 277 return symbol__fprintf_symname_offs(sym, NULL, fp); 278 } 279 280 void symbols__delete(struct rb_root *symbols) 281 { 282 struct symbol *pos; 283 struct rb_node *next = rb_first(symbols); 284 285 while (next) { 286 pos = rb_entry(next, struct symbol, rb_node); 287 next = rb_next(&pos->rb_node); 288 rb_erase(&pos->rb_node, symbols); 289 symbol__delete(pos); 290 } 291 } 292 293 void symbols__insert(struct rb_root *symbols, struct symbol *sym) 294 { 295 struct rb_node **p = &symbols->rb_node; 296 struct rb_node *parent = NULL; 297 const u64 ip = sym->start; 298 struct symbol *s; 299 300 while (*p != NULL) { 301 parent = *p; 302 s = rb_entry(parent, struct symbol, rb_node); 303 if (ip < s->start) 304 p = &(*p)->rb_left; 305 else 306 p = &(*p)->rb_right; 307 } 308 rb_link_node(&sym->rb_node, parent, p); 309 rb_insert_color(&sym->rb_node, symbols); 310 } 311 312 static struct symbol *symbols__find(struct rb_root *symbols, u64 ip) 313 { 314 struct rb_node *n; 315 316 if (symbols == NULL) 317 return NULL; 318 319 n = symbols->rb_node; 320 321 while (n) { 322 struct symbol *s = rb_entry(n, struct symbol, rb_node); 323 324 if (ip < s->start) 325 n = n->rb_left; 326 else if (ip > s->end) 327 n = n->rb_right; 328 else 329 return s; 330 } 331 332 return NULL; 333 } 334 335 static struct symbol *symbols__first(struct rb_root *symbols) 336 { 337 struct rb_node *n = rb_first(symbols); 338 339 if (n) 340 return rb_entry(n, struct symbol, rb_node); 341 342 return NULL; 343 } 344 345 static struct symbol *symbols__next(struct symbol *sym) 346 { 347 struct rb_node *n = rb_next(&sym->rb_node); 348 349 if (n) 350 return rb_entry(n, struct symbol, rb_node); 351 352 return NULL; 353 } 354 355 struct symbol_name_rb_node { 356 struct rb_node rb_node; 357 struct symbol sym; 358 }; 359 360 static void symbols__insert_by_name(struct rb_root *symbols, struct symbol *sym) 361 { 362 struct rb_node **p = &symbols->rb_node; 363 struct rb_node *parent = NULL; 364 struct symbol_name_rb_node *symn, *s; 365 366 symn = container_of(sym, struct symbol_name_rb_node, sym); 367 368 while (*p != NULL) { 369 parent = *p; 370 s = rb_entry(parent, struct symbol_name_rb_node, rb_node); 371 if (strcmp(sym->name, s->sym.name) < 0) 372 p = &(*p)->rb_left; 373 else 374 p = &(*p)->rb_right; 375 } 376 rb_link_node(&symn->rb_node, parent, p); 377 rb_insert_color(&symn->rb_node, symbols); 378 } 379 380 static void symbols__sort_by_name(struct rb_root *symbols, 381 struct rb_root *source) 382 { 383 struct rb_node *nd; 384 385 for (nd = rb_first(source); nd; nd = rb_next(nd)) { 386 struct symbol *pos = rb_entry(nd, struct symbol, rb_node); 387 symbols__insert_by_name(symbols, pos); 388 } 389 } 390 391 static struct symbol *symbols__find_by_name(struct rb_root *symbols, 392 const char *name) 393 { 394 struct rb_node *n; 395 396 if (symbols == NULL) 397 return NULL; 398 399 n = symbols->rb_node; 400 401 while (n) { 402 struct symbol_name_rb_node *s; 403 int cmp; 404 405 s = rb_entry(n, struct symbol_name_rb_node, rb_node); 406 cmp = strcmp(name, s->sym.name); 407 408 if (cmp < 0) 409 n = n->rb_left; 410 else if (cmp > 0) 411 n = n->rb_right; 412 else 413 return &s->sym; 414 } 415 416 return NULL; 417 } 418 419 struct symbol *dso__find_symbol(struct dso *dso, 420 enum map_type type, u64 addr) 421 { 422 return symbols__find(&dso->symbols[type], addr); 423 } 424 425 struct symbol *dso__first_symbol(struct dso *dso, enum map_type type) 426 { 427 return symbols__first(&dso->symbols[type]); 428 } 429 430 struct symbol *dso__next_symbol(struct symbol *sym) 431 { 432 return symbols__next(sym); 433 } 434 435 struct symbol *dso__find_symbol_by_name(struct dso *dso, enum map_type type, 436 const char *name) 437 { 438 return symbols__find_by_name(&dso->symbol_names[type], name); 439 } 440 441 void dso__sort_by_name(struct dso *dso, enum map_type type) 442 { 443 dso__set_sorted_by_name(dso, type); 444 return symbols__sort_by_name(&dso->symbol_names[type], 445 &dso->symbols[type]); 446 } 447 448 size_t dso__fprintf_symbols_by_name(struct dso *dso, 449 enum map_type type, FILE *fp) 450 { 451 size_t ret = 0; 452 struct rb_node *nd; 453 struct symbol_name_rb_node *pos; 454 455 for (nd = rb_first(&dso->symbol_names[type]); nd; nd = rb_next(nd)) { 456 pos = rb_entry(nd, struct symbol_name_rb_node, rb_node); 457 fprintf(fp, "%s\n", pos->sym.name); 458 } 459 460 return ret; 461 } 462 463 int modules__parse(const char *filename, void *arg, 464 int (*process_module)(void *arg, const char *name, 465 u64 start)) 466 { 467 char *line = NULL; 468 size_t n; 469 FILE *file; 470 int err = 0; 471 472 file = fopen(filename, "r"); 473 if (file == NULL) 474 return -1; 475 476 while (1) { 477 char name[PATH_MAX]; 478 u64 start; 479 char *sep; 480 ssize_t line_len; 481 482 line_len = getline(&line, &n, file); 483 if (line_len < 0) { 484 if (feof(file)) 485 break; 486 err = -1; 487 goto out; 488 } 489 490 if (!line) { 491 err = -1; 492 goto out; 493 } 494 495 line[--line_len] = '\0'; /* \n */ 496 497 sep = strrchr(line, 'x'); 498 if (sep == NULL) 499 continue; 500 501 hex2u64(sep + 1, &start); 502 503 sep = strchr(line, ' '); 504 if (sep == NULL) 505 continue; 506 507 *sep = '\0'; 508 509 scnprintf(name, sizeof(name), "[%s]", line); 510 511 err = process_module(arg, name, start); 512 if (err) 513 break; 514 } 515 out: 516 free(line); 517 fclose(file); 518 return err; 519 } 520 521 struct process_kallsyms_args { 522 struct map *map; 523 struct dso *dso; 524 }; 525 526 bool symbol__is_idle(struct symbol *sym) 527 { 528 const char * const idle_symbols[] = { 529 "cpu_idle", 530 "intel_idle", 531 "default_idle", 532 "native_safe_halt", 533 "enter_idle", 534 "exit_idle", 535 "mwait_idle", 536 "mwait_idle_with_hints", 537 "poll_idle", 538 "ppc64_runlatch_off", 539 "pseries_dedicated_idle_sleep", 540 NULL 541 }; 542 543 int i; 544 545 if (!sym) 546 return false; 547 548 for (i = 0; idle_symbols[i]; i++) { 549 if (!strcmp(idle_symbols[i], sym->name)) 550 return true; 551 } 552 553 return false; 554 } 555 556 static int map__process_kallsym_symbol(void *arg, const char *name, 557 char type, u64 start) 558 { 559 struct symbol *sym; 560 struct process_kallsyms_args *a = arg; 561 struct rb_root *root = &a->dso->symbols[a->map->type]; 562 563 if (!symbol_type__is_a(type, a->map->type)) 564 return 0; 565 566 /* 567 * module symbols are not sorted so we add all 568 * symbols, setting length to 0, and rely on 569 * symbols__fixup_end() to fix it up. 570 */ 571 sym = symbol__new(start, 0, kallsyms2elf_type(type), name); 572 if (sym == NULL) 573 return -ENOMEM; 574 /* 575 * We will pass the symbols to the filter later, in 576 * map__split_kallsyms, when we have split the maps per module 577 */ 578 symbols__insert(root, sym); 579 580 return 0; 581 } 582 583 /* 584 * Loads the function entries in /proc/kallsyms into kernel_map->dso, 585 * so that we can in the next step set the symbol ->end address and then 586 * call kernel_maps__split_kallsyms. 587 */ 588 static int dso__load_all_kallsyms(struct dso *dso, const char *filename, 589 struct map *map) 590 { 591 struct process_kallsyms_args args = { .map = map, .dso = dso, }; 592 return kallsyms__parse(filename, &args, map__process_kallsym_symbol); 593 } 594 595 static int dso__split_kallsyms_for_kcore(struct dso *dso, struct map *map, 596 symbol_filter_t filter) 597 { 598 struct map_groups *kmaps = map__kmap(map)->kmaps; 599 struct map *curr_map; 600 struct symbol *pos; 601 int count = 0, moved = 0; 602 struct rb_root *root = &dso->symbols[map->type]; 603 struct rb_node *next = rb_first(root); 604 605 while (next) { 606 char *module; 607 608 pos = rb_entry(next, struct symbol, rb_node); 609 next = rb_next(&pos->rb_node); 610 611 module = strchr(pos->name, '\t'); 612 if (module) 613 *module = '\0'; 614 615 curr_map = map_groups__find(kmaps, map->type, pos->start); 616 617 if (!curr_map || (filter && filter(curr_map, pos))) { 618 rb_erase(&pos->rb_node, root); 619 symbol__delete(pos); 620 } else { 621 pos->start -= curr_map->start - curr_map->pgoff; 622 if (pos->end) 623 pos->end -= curr_map->start - curr_map->pgoff; 624 if (curr_map != map) { 625 rb_erase(&pos->rb_node, root); 626 symbols__insert( 627 &curr_map->dso->symbols[curr_map->type], 628 pos); 629 ++moved; 630 } else { 631 ++count; 632 } 633 } 634 } 635 636 /* Symbols have been adjusted */ 637 dso->adjust_symbols = 1; 638 639 return count + moved; 640 } 641 642 /* 643 * Split the symbols into maps, making sure there are no overlaps, i.e. the 644 * kernel range is broken in several maps, named [kernel].N, as we don't have 645 * the original ELF section names vmlinux have. 646 */ 647 static int dso__split_kallsyms(struct dso *dso, struct map *map, u64 delta, 648 symbol_filter_t filter) 649 { 650 struct map_groups *kmaps = map__kmap(map)->kmaps; 651 struct machine *machine = kmaps->machine; 652 struct map *curr_map = map; 653 struct symbol *pos; 654 int count = 0, moved = 0; 655 struct rb_root *root = &dso->symbols[map->type]; 656 struct rb_node *next = rb_first(root); 657 int kernel_range = 0; 658 659 while (next) { 660 char *module; 661 662 pos = rb_entry(next, struct symbol, rb_node); 663 next = rb_next(&pos->rb_node); 664 665 module = strchr(pos->name, '\t'); 666 if (module) { 667 if (!symbol_conf.use_modules) 668 goto discard_symbol; 669 670 *module++ = '\0'; 671 672 if (strcmp(curr_map->dso->short_name, module)) { 673 if (curr_map != map && 674 dso->kernel == DSO_TYPE_GUEST_KERNEL && 675 machine__is_default_guest(machine)) { 676 /* 677 * We assume all symbols of a module are 678 * continuous in * kallsyms, so curr_map 679 * points to a module and all its 680 * symbols are in its kmap. Mark it as 681 * loaded. 682 */ 683 dso__set_loaded(curr_map->dso, 684 curr_map->type); 685 } 686 687 curr_map = map_groups__find_by_name(kmaps, 688 map->type, module); 689 if (curr_map == NULL) { 690 pr_debug("%s/proc/{kallsyms,modules} " 691 "inconsistency while looking " 692 "for \"%s\" module!\n", 693 machine->root_dir, module); 694 curr_map = map; 695 goto discard_symbol; 696 } 697 698 if (curr_map->dso->loaded && 699 !machine__is_default_guest(machine)) 700 goto discard_symbol; 701 } 702 /* 703 * So that we look just like we get from .ko files, 704 * i.e. not prelinked, relative to map->start. 705 */ 706 pos->start = curr_map->map_ip(curr_map, pos->start); 707 pos->end = curr_map->map_ip(curr_map, pos->end); 708 } else if (curr_map != map) { 709 char dso_name[PATH_MAX]; 710 struct dso *ndso; 711 712 if (delta) { 713 /* Kernel was relocated at boot time */ 714 pos->start -= delta; 715 pos->end -= delta; 716 } 717 718 if (count == 0) { 719 curr_map = map; 720 goto filter_symbol; 721 } 722 723 if (dso->kernel == DSO_TYPE_GUEST_KERNEL) 724 snprintf(dso_name, sizeof(dso_name), 725 "[guest.kernel].%d", 726 kernel_range++); 727 else 728 snprintf(dso_name, sizeof(dso_name), 729 "[kernel].%d", 730 kernel_range++); 731 732 ndso = dso__new(dso_name); 733 if (ndso == NULL) 734 return -1; 735 736 ndso->kernel = dso->kernel; 737 738 curr_map = map__new2(pos->start, ndso, map->type); 739 if (curr_map == NULL) { 740 dso__delete(ndso); 741 return -1; 742 } 743 744 curr_map->map_ip = curr_map->unmap_ip = identity__map_ip; 745 map_groups__insert(kmaps, curr_map); 746 ++kernel_range; 747 } else if (delta) { 748 /* Kernel was relocated at boot time */ 749 pos->start -= delta; 750 pos->end -= delta; 751 } 752 filter_symbol: 753 if (filter && filter(curr_map, pos)) { 754 discard_symbol: rb_erase(&pos->rb_node, root); 755 symbol__delete(pos); 756 } else { 757 if (curr_map != map) { 758 rb_erase(&pos->rb_node, root); 759 symbols__insert(&curr_map->dso->symbols[curr_map->type], pos); 760 ++moved; 761 } else 762 ++count; 763 } 764 } 765 766 if (curr_map != map && 767 dso->kernel == DSO_TYPE_GUEST_KERNEL && 768 machine__is_default_guest(kmaps->machine)) { 769 dso__set_loaded(curr_map->dso, curr_map->type); 770 } 771 772 return count + moved; 773 } 774 775 bool symbol__restricted_filename(const char *filename, 776 const char *restricted_filename) 777 { 778 bool restricted = false; 779 780 if (symbol_conf.kptr_restrict) { 781 char *r = realpath(filename, NULL); 782 783 if (r != NULL) { 784 restricted = strcmp(r, restricted_filename) == 0; 785 free(r); 786 return restricted; 787 } 788 } 789 790 return restricted; 791 } 792 793 struct module_info { 794 struct rb_node rb_node; 795 char *name; 796 u64 start; 797 }; 798 799 static void add_module(struct module_info *mi, struct rb_root *modules) 800 { 801 struct rb_node **p = &modules->rb_node; 802 struct rb_node *parent = NULL; 803 struct module_info *m; 804 805 while (*p != NULL) { 806 parent = *p; 807 m = rb_entry(parent, struct module_info, rb_node); 808 if (strcmp(mi->name, m->name) < 0) 809 p = &(*p)->rb_left; 810 else 811 p = &(*p)->rb_right; 812 } 813 rb_link_node(&mi->rb_node, parent, p); 814 rb_insert_color(&mi->rb_node, modules); 815 } 816 817 static void delete_modules(struct rb_root *modules) 818 { 819 struct module_info *mi; 820 struct rb_node *next = rb_first(modules); 821 822 while (next) { 823 mi = rb_entry(next, struct module_info, rb_node); 824 next = rb_next(&mi->rb_node); 825 rb_erase(&mi->rb_node, modules); 826 zfree(&mi->name); 827 free(mi); 828 } 829 } 830 831 static struct module_info *find_module(const char *name, 832 struct rb_root *modules) 833 { 834 struct rb_node *n = modules->rb_node; 835 836 while (n) { 837 struct module_info *m; 838 int cmp; 839 840 m = rb_entry(n, struct module_info, rb_node); 841 cmp = strcmp(name, m->name); 842 if (cmp < 0) 843 n = n->rb_left; 844 else if (cmp > 0) 845 n = n->rb_right; 846 else 847 return m; 848 } 849 850 return NULL; 851 } 852 853 static int __read_proc_modules(void *arg, const char *name, u64 start) 854 { 855 struct rb_root *modules = arg; 856 struct module_info *mi; 857 858 mi = zalloc(sizeof(struct module_info)); 859 if (!mi) 860 return -ENOMEM; 861 862 mi->name = strdup(name); 863 mi->start = start; 864 865 if (!mi->name) { 866 free(mi); 867 return -ENOMEM; 868 } 869 870 add_module(mi, modules); 871 872 return 0; 873 } 874 875 static int read_proc_modules(const char *filename, struct rb_root *modules) 876 { 877 if (symbol__restricted_filename(filename, "/proc/modules")) 878 return -1; 879 880 if (modules__parse(filename, modules, __read_proc_modules)) { 881 delete_modules(modules); 882 return -1; 883 } 884 885 return 0; 886 } 887 888 int compare_proc_modules(const char *from, const char *to) 889 { 890 struct rb_root from_modules = RB_ROOT; 891 struct rb_root to_modules = RB_ROOT; 892 struct rb_node *from_node, *to_node; 893 struct module_info *from_m, *to_m; 894 int ret = -1; 895 896 if (read_proc_modules(from, &from_modules)) 897 return -1; 898 899 if (read_proc_modules(to, &to_modules)) 900 goto out_delete_from; 901 902 from_node = rb_first(&from_modules); 903 to_node = rb_first(&to_modules); 904 while (from_node) { 905 if (!to_node) 906 break; 907 908 from_m = rb_entry(from_node, struct module_info, rb_node); 909 to_m = rb_entry(to_node, struct module_info, rb_node); 910 911 if (from_m->start != to_m->start || 912 strcmp(from_m->name, to_m->name)) 913 break; 914 915 from_node = rb_next(from_node); 916 to_node = rb_next(to_node); 917 } 918 919 if (!from_node && !to_node) 920 ret = 0; 921 922 delete_modules(&to_modules); 923 out_delete_from: 924 delete_modules(&from_modules); 925 926 return ret; 927 } 928 929 static int do_validate_kcore_modules(const char *filename, struct map *map, 930 struct map_groups *kmaps) 931 { 932 struct rb_root modules = RB_ROOT; 933 struct map *old_map; 934 int err; 935 936 err = read_proc_modules(filename, &modules); 937 if (err) 938 return err; 939 940 old_map = map_groups__first(kmaps, map->type); 941 while (old_map) { 942 struct map *next = map_groups__next(old_map); 943 struct module_info *mi; 944 945 if (old_map == map || old_map->start == map->start) { 946 /* The kernel map */ 947 old_map = next; 948 continue; 949 } 950 951 /* Module must be in memory at the same address */ 952 mi = find_module(old_map->dso->short_name, &modules); 953 if (!mi || mi->start != old_map->start) { 954 err = -EINVAL; 955 goto out; 956 } 957 958 old_map = next; 959 } 960 out: 961 delete_modules(&modules); 962 return err; 963 } 964 965 /* 966 * If kallsyms is referenced by name then we look for filename in the same 967 * directory. 968 */ 969 static bool filename_from_kallsyms_filename(char *filename, 970 const char *base_name, 971 const char *kallsyms_filename) 972 { 973 char *name; 974 975 strcpy(filename, kallsyms_filename); 976 name = strrchr(filename, '/'); 977 if (!name) 978 return false; 979 980 name += 1; 981 982 if (!strcmp(name, "kallsyms")) { 983 strcpy(name, base_name); 984 return true; 985 } 986 987 return false; 988 } 989 990 static int validate_kcore_modules(const char *kallsyms_filename, 991 struct map *map) 992 { 993 struct map_groups *kmaps = map__kmap(map)->kmaps; 994 char modules_filename[PATH_MAX]; 995 996 if (!filename_from_kallsyms_filename(modules_filename, "modules", 997 kallsyms_filename)) 998 return -EINVAL; 999 1000 if (do_validate_kcore_modules(modules_filename, map, kmaps)) 1001 return -EINVAL; 1002 1003 return 0; 1004 } 1005 1006 static int validate_kcore_addresses(const char *kallsyms_filename, 1007 struct map *map) 1008 { 1009 struct kmap *kmap = map__kmap(map); 1010 1011 if (kmap->ref_reloc_sym && kmap->ref_reloc_sym->name) { 1012 u64 start; 1013 1014 start = kallsyms__get_function_start(kallsyms_filename, 1015 kmap->ref_reloc_sym->name); 1016 if (start != kmap->ref_reloc_sym->addr) 1017 return -EINVAL; 1018 } 1019 1020 return validate_kcore_modules(kallsyms_filename, map); 1021 } 1022 1023 struct kcore_mapfn_data { 1024 struct dso *dso; 1025 enum map_type type; 1026 struct list_head maps; 1027 }; 1028 1029 static int kcore_mapfn(u64 start, u64 len, u64 pgoff, void *data) 1030 { 1031 struct kcore_mapfn_data *md = data; 1032 struct map *map; 1033 1034 map = map__new2(start, md->dso, md->type); 1035 if (map == NULL) 1036 return -ENOMEM; 1037 1038 map->end = map->start + len; 1039 map->pgoff = pgoff; 1040 1041 list_add(&map->node, &md->maps); 1042 1043 return 0; 1044 } 1045 1046 static int dso__load_kcore(struct dso *dso, struct map *map, 1047 const char *kallsyms_filename) 1048 { 1049 struct map_groups *kmaps = map__kmap(map)->kmaps; 1050 struct machine *machine = kmaps->machine; 1051 struct kcore_mapfn_data md; 1052 struct map *old_map, *new_map, *replacement_map = NULL; 1053 bool is_64_bit; 1054 int err, fd; 1055 char kcore_filename[PATH_MAX]; 1056 struct symbol *sym; 1057 1058 /* This function requires that the map is the kernel map */ 1059 if (map != machine->vmlinux_maps[map->type]) 1060 return -EINVAL; 1061 1062 if (!filename_from_kallsyms_filename(kcore_filename, "kcore", 1063 kallsyms_filename)) 1064 return -EINVAL; 1065 1066 /* Modules and kernel must be present at their original addresses */ 1067 if (validate_kcore_addresses(kallsyms_filename, map)) 1068 return -EINVAL; 1069 1070 md.dso = dso; 1071 md.type = map->type; 1072 INIT_LIST_HEAD(&md.maps); 1073 1074 fd = open(kcore_filename, O_RDONLY); 1075 if (fd < 0) 1076 return -EINVAL; 1077 1078 /* Read new maps into temporary lists */ 1079 err = file__read_maps(fd, md.type == MAP__FUNCTION, kcore_mapfn, &md, 1080 &is_64_bit); 1081 if (err) 1082 goto out_err; 1083 dso->is_64_bit = is_64_bit; 1084 1085 if (list_empty(&md.maps)) { 1086 err = -EINVAL; 1087 goto out_err; 1088 } 1089 1090 /* Remove old maps */ 1091 old_map = map_groups__first(kmaps, map->type); 1092 while (old_map) { 1093 struct map *next = map_groups__next(old_map); 1094 1095 if (old_map != map) 1096 map_groups__remove(kmaps, old_map); 1097 old_map = next; 1098 } 1099 1100 /* Find the kernel map using the first symbol */ 1101 sym = dso__first_symbol(dso, map->type); 1102 list_for_each_entry(new_map, &md.maps, node) { 1103 if (sym && sym->start >= new_map->start && 1104 sym->start < new_map->end) { 1105 replacement_map = new_map; 1106 break; 1107 } 1108 } 1109 1110 if (!replacement_map) 1111 replacement_map = list_entry(md.maps.next, struct map, node); 1112 1113 /* Add new maps */ 1114 while (!list_empty(&md.maps)) { 1115 new_map = list_entry(md.maps.next, struct map, node); 1116 list_del(&new_map->node); 1117 if (new_map == replacement_map) { 1118 map->start = new_map->start; 1119 map->end = new_map->end; 1120 map->pgoff = new_map->pgoff; 1121 map->map_ip = new_map->map_ip; 1122 map->unmap_ip = new_map->unmap_ip; 1123 map__delete(new_map); 1124 /* Ensure maps are correctly ordered */ 1125 map_groups__remove(kmaps, map); 1126 map_groups__insert(kmaps, map); 1127 } else { 1128 map_groups__insert(kmaps, new_map); 1129 } 1130 } 1131 1132 /* 1133 * Set the data type and long name so that kcore can be read via 1134 * dso__data_read_addr(). 1135 */ 1136 if (dso->kernel == DSO_TYPE_GUEST_KERNEL) 1137 dso->binary_type = DSO_BINARY_TYPE__GUEST_KCORE; 1138 else 1139 dso->binary_type = DSO_BINARY_TYPE__KCORE; 1140 dso__set_long_name(dso, strdup(kcore_filename), true); 1141 1142 close(fd); 1143 1144 if (map->type == MAP__FUNCTION) 1145 pr_debug("Using %s for kernel object code\n", kcore_filename); 1146 else 1147 pr_debug("Using %s for kernel data\n", kcore_filename); 1148 1149 return 0; 1150 1151 out_err: 1152 while (!list_empty(&md.maps)) { 1153 map = list_entry(md.maps.next, struct map, node); 1154 list_del(&map->node); 1155 map__delete(map); 1156 } 1157 close(fd); 1158 return -EINVAL; 1159 } 1160 1161 /* 1162 * If the kernel is relocated at boot time, kallsyms won't match. Compute the 1163 * delta based on the relocation reference symbol. 1164 */ 1165 static int kallsyms__delta(struct map *map, const char *filename, u64 *delta) 1166 { 1167 struct kmap *kmap = map__kmap(map); 1168 u64 addr; 1169 1170 if (!kmap->ref_reloc_sym || !kmap->ref_reloc_sym->name) 1171 return 0; 1172 1173 addr = kallsyms__get_function_start(filename, 1174 kmap->ref_reloc_sym->name); 1175 if (!addr) 1176 return -1; 1177 1178 *delta = addr - kmap->ref_reloc_sym->addr; 1179 return 0; 1180 } 1181 1182 int dso__load_kallsyms(struct dso *dso, const char *filename, 1183 struct map *map, symbol_filter_t filter) 1184 { 1185 u64 delta = 0; 1186 1187 if (symbol__restricted_filename(filename, "/proc/kallsyms")) 1188 return -1; 1189 1190 if (dso__load_all_kallsyms(dso, filename, map) < 0) 1191 return -1; 1192 1193 if (kallsyms__delta(map, filename, &delta)) 1194 return -1; 1195 1196 symbols__fixup_duplicate(&dso->symbols[map->type]); 1197 symbols__fixup_end(&dso->symbols[map->type]); 1198 1199 if (dso->kernel == DSO_TYPE_GUEST_KERNEL) 1200 dso->symtab_type = DSO_BINARY_TYPE__GUEST_KALLSYMS; 1201 else 1202 dso->symtab_type = DSO_BINARY_TYPE__KALLSYMS; 1203 1204 if (!dso__load_kcore(dso, map, filename)) 1205 return dso__split_kallsyms_for_kcore(dso, map, filter); 1206 else 1207 return dso__split_kallsyms(dso, map, delta, filter); 1208 } 1209 1210 static int dso__load_perf_map(struct dso *dso, struct map *map, 1211 symbol_filter_t filter) 1212 { 1213 char *line = NULL; 1214 size_t n; 1215 FILE *file; 1216 int nr_syms = 0; 1217 1218 file = fopen(dso->long_name, "r"); 1219 if (file == NULL) 1220 goto out_failure; 1221 1222 while (!feof(file)) { 1223 u64 start, size; 1224 struct symbol *sym; 1225 int line_len, len; 1226 1227 line_len = getline(&line, &n, file); 1228 if (line_len < 0) 1229 break; 1230 1231 if (!line) 1232 goto out_failure; 1233 1234 line[--line_len] = '\0'; /* \n */ 1235 1236 len = hex2u64(line, &start); 1237 1238 len++; 1239 if (len + 2 >= line_len) 1240 continue; 1241 1242 len += hex2u64(line + len, &size); 1243 1244 len++; 1245 if (len + 2 >= line_len) 1246 continue; 1247 1248 sym = symbol__new(start, size, STB_GLOBAL, line + len); 1249 1250 if (sym == NULL) 1251 goto out_delete_line; 1252 1253 if (filter && filter(map, sym)) 1254 symbol__delete(sym); 1255 else { 1256 symbols__insert(&dso->symbols[map->type], sym); 1257 nr_syms++; 1258 } 1259 } 1260 1261 free(line); 1262 fclose(file); 1263 1264 return nr_syms; 1265 1266 out_delete_line: 1267 free(line); 1268 out_failure: 1269 return -1; 1270 } 1271 1272 static bool dso__is_compatible_symtab_type(struct dso *dso, bool kmod, 1273 enum dso_binary_type type) 1274 { 1275 switch (type) { 1276 case DSO_BINARY_TYPE__JAVA_JIT: 1277 case DSO_BINARY_TYPE__DEBUGLINK: 1278 case DSO_BINARY_TYPE__SYSTEM_PATH_DSO: 1279 case DSO_BINARY_TYPE__FEDORA_DEBUGINFO: 1280 case DSO_BINARY_TYPE__UBUNTU_DEBUGINFO: 1281 case DSO_BINARY_TYPE__BUILDID_DEBUGINFO: 1282 case DSO_BINARY_TYPE__OPENEMBEDDED_DEBUGINFO: 1283 return !kmod && dso->kernel == DSO_TYPE_USER; 1284 1285 case DSO_BINARY_TYPE__KALLSYMS: 1286 case DSO_BINARY_TYPE__VMLINUX: 1287 case DSO_BINARY_TYPE__KCORE: 1288 return dso->kernel == DSO_TYPE_KERNEL; 1289 1290 case DSO_BINARY_TYPE__GUEST_KALLSYMS: 1291 case DSO_BINARY_TYPE__GUEST_VMLINUX: 1292 case DSO_BINARY_TYPE__GUEST_KCORE: 1293 return dso->kernel == DSO_TYPE_GUEST_KERNEL; 1294 1295 case DSO_BINARY_TYPE__GUEST_KMODULE: 1296 case DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE: 1297 /* 1298 * kernel modules know their symtab type - it's set when 1299 * creating a module dso in machine__new_module(). 1300 */ 1301 return kmod && dso->symtab_type == type; 1302 1303 case DSO_BINARY_TYPE__BUILD_ID_CACHE: 1304 return true; 1305 1306 case DSO_BINARY_TYPE__NOT_FOUND: 1307 default: 1308 return false; 1309 } 1310 } 1311 1312 int dso__load(struct dso *dso, struct map *map, symbol_filter_t filter) 1313 { 1314 char *name; 1315 int ret = -1; 1316 u_int i; 1317 struct machine *machine; 1318 char *root_dir = (char *) ""; 1319 int ss_pos = 0; 1320 struct symsrc ss_[2]; 1321 struct symsrc *syms_ss = NULL, *runtime_ss = NULL; 1322 bool kmod; 1323 1324 dso__set_loaded(dso, map->type); 1325 1326 if (dso->kernel == DSO_TYPE_KERNEL) 1327 return dso__load_kernel_sym(dso, map, filter); 1328 else if (dso->kernel == DSO_TYPE_GUEST_KERNEL) 1329 return dso__load_guest_kernel_sym(dso, map, filter); 1330 1331 if (map->groups && map->groups->machine) 1332 machine = map->groups->machine; 1333 else 1334 machine = NULL; 1335 1336 dso->adjust_symbols = 0; 1337 1338 if (strncmp(dso->name, "/tmp/perf-", 10) == 0) { 1339 struct stat st; 1340 1341 if (lstat(dso->name, &st) < 0) 1342 return -1; 1343 1344 if (st.st_uid && (st.st_uid != geteuid())) { 1345 pr_warning("File %s not owned by current user or root, " 1346 "ignoring it.\n", dso->name); 1347 return -1; 1348 } 1349 1350 ret = dso__load_perf_map(dso, map, filter); 1351 dso->symtab_type = ret > 0 ? DSO_BINARY_TYPE__JAVA_JIT : 1352 DSO_BINARY_TYPE__NOT_FOUND; 1353 return ret; 1354 } 1355 1356 if (machine) 1357 root_dir = machine->root_dir; 1358 1359 name = malloc(PATH_MAX); 1360 if (!name) 1361 return -1; 1362 1363 kmod = dso->symtab_type == DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE || 1364 dso->symtab_type == DSO_BINARY_TYPE__GUEST_KMODULE; 1365 1366 /* 1367 * Iterate over candidate debug images. 1368 * Keep track of "interesting" ones (those which have a symtab, dynsym, 1369 * and/or opd section) for processing. 1370 */ 1371 for (i = 0; i < DSO_BINARY_TYPE__SYMTAB_CNT; i++) { 1372 struct symsrc *ss = &ss_[ss_pos]; 1373 bool next_slot = false; 1374 1375 enum dso_binary_type symtab_type = binary_type_symtab[i]; 1376 1377 if (!dso__is_compatible_symtab_type(dso, kmod, symtab_type)) 1378 continue; 1379 1380 if (dso__read_binary_type_filename(dso, symtab_type, 1381 root_dir, name, PATH_MAX)) 1382 continue; 1383 1384 /* Name is now the name of the next image to try */ 1385 if (symsrc__init(ss, dso, name, symtab_type) < 0) 1386 continue; 1387 1388 if (!syms_ss && symsrc__has_symtab(ss)) { 1389 syms_ss = ss; 1390 next_slot = true; 1391 if (!dso->symsrc_filename) 1392 dso->symsrc_filename = strdup(name); 1393 } 1394 1395 if (!runtime_ss && symsrc__possibly_runtime(ss)) { 1396 runtime_ss = ss; 1397 next_slot = true; 1398 } 1399 1400 if (next_slot) { 1401 ss_pos++; 1402 1403 if (syms_ss && runtime_ss) 1404 break; 1405 } else { 1406 symsrc__destroy(ss); 1407 } 1408 1409 } 1410 1411 if (!runtime_ss && !syms_ss) 1412 goto out_free; 1413 1414 if (runtime_ss && !syms_ss) { 1415 syms_ss = runtime_ss; 1416 } 1417 1418 /* We'll have to hope for the best */ 1419 if (!runtime_ss && syms_ss) 1420 runtime_ss = syms_ss; 1421 1422 if (syms_ss) 1423 ret = dso__load_sym(dso, map, syms_ss, runtime_ss, filter, kmod); 1424 else 1425 ret = -1; 1426 1427 if (ret > 0) { 1428 int nr_plt; 1429 1430 nr_plt = dso__synthesize_plt_symbols(dso, runtime_ss, map, filter); 1431 if (nr_plt > 0) 1432 ret += nr_plt; 1433 } 1434 1435 for (; ss_pos > 0; ss_pos--) 1436 symsrc__destroy(&ss_[ss_pos - 1]); 1437 out_free: 1438 free(name); 1439 if (ret < 0 && strstr(dso->name, " (deleted)") != NULL) 1440 return 0; 1441 return ret; 1442 } 1443 1444 struct map *map_groups__find_by_name(struct map_groups *mg, 1445 enum map_type type, const char *name) 1446 { 1447 struct rb_node *nd; 1448 1449 for (nd = rb_first(&mg->maps[type]); nd; nd = rb_next(nd)) { 1450 struct map *map = rb_entry(nd, struct map, rb_node); 1451 1452 if (map->dso && strcmp(map->dso->short_name, name) == 0) 1453 return map; 1454 } 1455 1456 return NULL; 1457 } 1458 1459 int dso__load_vmlinux(struct dso *dso, struct map *map, 1460 const char *vmlinux, bool vmlinux_allocated, 1461 symbol_filter_t filter) 1462 { 1463 int err = -1; 1464 struct symsrc ss; 1465 char symfs_vmlinux[PATH_MAX]; 1466 enum dso_binary_type symtab_type; 1467 1468 if (vmlinux[0] == '/') 1469 snprintf(symfs_vmlinux, sizeof(symfs_vmlinux), "%s", vmlinux); 1470 else 1471 snprintf(symfs_vmlinux, sizeof(symfs_vmlinux), "%s%s", 1472 symbol_conf.symfs, vmlinux); 1473 1474 if (dso->kernel == DSO_TYPE_GUEST_KERNEL) 1475 symtab_type = DSO_BINARY_TYPE__GUEST_VMLINUX; 1476 else 1477 symtab_type = DSO_BINARY_TYPE__VMLINUX; 1478 1479 if (symsrc__init(&ss, dso, symfs_vmlinux, symtab_type)) 1480 return -1; 1481 1482 err = dso__load_sym(dso, map, &ss, &ss, filter, 0); 1483 symsrc__destroy(&ss); 1484 1485 if (err > 0) { 1486 if (dso->kernel == DSO_TYPE_GUEST_KERNEL) 1487 dso->binary_type = DSO_BINARY_TYPE__GUEST_VMLINUX; 1488 else 1489 dso->binary_type = DSO_BINARY_TYPE__VMLINUX; 1490 dso__set_long_name(dso, vmlinux, vmlinux_allocated); 1491 dso__set_loaded(dso, map->type); 1492 pr_debug("Using %s for symbols\n", symfs_vmlinux); 1493 } 1494 1495 return err; 1496 } 1497 1498 int dso__load_vmlinux_path(struct dso *dso, struct map *map, 1499 symbol_filter_t filter) 1500 { 1501 int i, err = 0; 1502 char *filename; 1503 1504 pr_debug("Looking at the vmlinux_path (%d entries long)\n", 1505 vmlinux_path__nr_entries + 1); 1506 1507 filename = dso__build_id_filename(dso, NULL, 0); 1508 if (filename != NULL) { 1509 err = dso__load_vmlinux(dso, map, filename, true, filter); 1510 if (err > 0) 1511 goto out; 1512 free(filename); 1513 } 1514 1515 for (i = 0; i < vmlinux_path__nr_entries; ++i) { 1516 err = dso__load_vmlinux(dso, map, vmlinux_path[i], false, filter); 1517 if (err > 0) 1518 break; 1519 } 1520 out: 1521 return err; 1522 } 1523 1524 static int find_matching_kcore(struct map *map, char *dir, size_t dir_sz) 1525 { 1526 char kallsyms_filename[PATH_MAX]; 1527 struct dirent *dent; 1528 int ret = -1; 1529 DIR *d; 1530 1531 d = opendir(dir); 1532 if (!d) 1533 return -1; 1534 1535 while (1) { 1536 dent = readdir(d); 1537 if (!dent) 1538 break; 1539 if (dent->d_type != DT_DIR) 1540 continue; 1541 scnprintf(kallsyms_filename, sizeof(kallsyms_filename), 1542 "%s/%s/kallsyms", dir, dent->d_name); 1543 if (!validate_kcore_addresses(kallsyms_filename, map)) { 1544 strlcpy(dir, kallsyms_filename, dir_sz); 1545 ret = 0; 1546 break; 1547 } 1548 } 1549 1550 closedir(d); 1551 1552 return ret; 1553 } 1554 1555 static char *dso__find_kallsyms(struct dso *dso, struct map *map) 1556 { 1557 u8 host_build_id[BUILD_ID_SIZE]; 1558 char sbuild_id[BUILD_ID_SIZE * 2 + 1]; 1559 bool is_host = false; 1560 char path[PATH_MAX]; 1561 1562 if (!dso->has_build_id) { 1563 /* 1564 * Last resort, if we don't have a build-id and couldn't find 1565 * any vmlinux file, try the running kernel kallsyms table. 1566 */ 1567 goto proc_kallsyms; 1568 } 1569 1570 if (sysfs__read_build_id("/sys/kernel/notes", host_build_id, 1571 sizeof(host_build_id)) == 0) 1572 is_host = dso__build_id_equal(dso, host_build_id); 1573 1574 build_id__sprintf(dso->build_id, sizeof(dso->build_id), sbuild_id); 1575 1576 scnprintf(path, sizeof(path), "%s/[kernel.kcore]/%s", buildid_dir, 1577 sbuild_id); 1578 1579 /* Use /proc/kallsyms if possible */ 1580 if (is_host) { 1581 DIR *d; 1582 int fd; 1583 1584 /* If no cached kcore go with /proc/kallsyms */ 1585 d = opendir(path); 1586 if (!d) 1587 goto proc_kallsyms; 1588 closedir(d); 1589 1590 /* 1591 * Do not check the build-id cache, until we know we cannot use 1592 * /proc/kcore. 1593 */ 1594 fd = open("/proc/kcore", O_RDONLY); 1595 if (fd != -1) { 1596 close(fd); 1597 /* If module maps match go with /proc/kallsyms */ 1598 if (!validate_kcore_addresses("/proc/kallsyms", map)) 1599 goto proc_kallsyms; 1600 } 1601 1602 /* Find kallsyms in build-id cache with kcore */ 1603 if (!find_matching_kcore(map, path, sizeof(path))) 1604 return strdup(path); 1605 1606 goto proc_kallsyms; 1607 } 1608 1609 /* Find kallsyms in build-id cache with kcore */ 1610 if (!find_matching_kcore(map, path, sizeof(path))) 1611 return strdup(path); 1612 1613 scnprintf(path, sizeof(path), "%s/[kernel.kallsyms]/%s", 1614 buildid_dir, sbuild_id); 1615 1616 if (access(path, F_OK)) { 1617 pr_err("No kallsyms or vmlinux with build-id %s was found\n", 1618 sbuild_id); 1619 return NULL; 1620 } 1621 1622 return strdup(path); 1623 1624 proc_kallsyms: 1625 return strdup("/proc/kallsyms"); 1626 } 1627 1628 static int dso__load_kernel_sym(struct dso *dso, struct map *map, 1629 symbol_filter_t filter) 1630 { 1631 int err; 1632 const char *kallsyms_filename = NULL; 1633 char *kallsyms_allocated_filename = NULL; 1634 /* 1635 * Step 1: if the user specified a kallsyms or vmlinux filename, use 1636 * it and only it, reporting errors to the user if it cannot be used. 1637 * 1638 * For instance, try to analyse an ARM perf.data file _without_ a 1639 * build-id, or if the user specifies the wrong path to the right 1640 * vmlinux file, obviously we can't fallback to another vmlinux (a 1641 * x86_86 one, on the machine where analysis is being performed, say), 1642 * or worse, /proc/kallsyms. 1643 * 1644 * If the specified file _has_ a build-id and there is a build-id 1645 * section in the perf.data file, we will still do the expected 1646 * validation in dso__load_vmlinux and will bail out if they don't 1647 * match. 1648 */ 1649 if (symbol_conf.kallsyms_name != NULL) { 1650 kallsyms_filename = symbol_conf.kallsyms_name; 1651 goto do_kallsyms; 1652 } 1653 1654 if (!symbol_conf.ignore_vmlinux && symbol_conf.vmlinux_name != NULL) { 1655 return dso__load_vmlinux(dso, map, symbol_conf.vmlinux_name, 1656 false, filter); 1657 } 1658 1659 if (!symbol_conf.ignore_vmlinux && vmlinux_path != NULL) { 1660 err = dso__load_vmlinux_path(dso, map, filter); 1661 if (err > 0) 1662 return err; 1663 } 1664 1665 /* do not try local files if a symfs was given */ 1666 if (symbol_conf.symfs[0] != 0) 1667 return -1; 1668 1669 kallsyms_allocated_filename = dso__find_kallsyms(dso, map); 1670 if (!kallsyms_allocated_filename) 1671 return -1; 1672 1673 kallsyms_filename = kallsyms_allocated_filename; 1674 1675 do_kallsyms: 1676 err = dso__load_kallsyms(dso, kallsyms_filename, map, filter); 1677 if (err > 0) 1678 pr_debug("Using %s for symbols\n", kallsyms_filename); 1679 free(kallsyms_allocated_filename); 1680 1681 if (err > 0 && !dso__is_kcore(dso)) { 1682 dso->binary_type = DSO_BINARY_TYPE__KALLSYMS; 1683 dso__set_long_name(dso, "[kernel.kallsyms]", false); 1684 map__fixup_start(map); 1685 map__fixup_end(map); 1686 } 1687 1688 return err; 1689 } 1690 1691 static int dso__load_guest_kernel_sym(struct dso *dso, struct map *map, 1692 symbol_filter_t filter) 1693 { 1694 int err; 1695 const char *kallsyms_filename = NULL; 1696 struct machine *machine; 1697 char path[PATH_MAX]; 1698 1699 if (!map->groups) { 1700 pr_debug("Guest kernel map hasn't the point to groups\n"); 1701 return -1; 1702 } 1703 machine = map->groups->machine; 1704 1705 if (machine__is_default_guest(machine)) { 1706 /* 1707 * if the user specified a vmlinux filename, use it and only 1708 * it, reporting errors to the user if it cannot be used. 1709 * Or use file guest_kallsyms inputted by user on commandline 1710 */ 1711 if (symbol_conf.default_guest_vmlinux_name != NULL) { 1712 err = dso__load_vmlinux(dso, map, 1713 symbol_conf.default_guest_vmlinux_name, 1714 false, filter); 1715 return err; 1716 } 1717 1718 kallsyms_filename = symbol_conf.default_guest_kallsyms; 1719 if (!kallsyms_filename) 1720 return -1; 1721 } else { 1722 sprintf(path, "%s/proc/kallsyms", machine->root_dir); 1723 kallsyms_filename = path; 1724 } 1725 1726 err = dso__load_kallsyms(dso, kallsyms_filename, map, filter); 1727 if (err > 0) 1728 pr_debug("Using %s for symbols\n", kallsyms_filename); 1729 if (err > 0 && !dso__is_kcore(dso)) { 1730 dso->binary_type = DSO_BINARY_TYPE__GUEST_KALLSYMS; 1731 machine__mmap_name(machine, path, sizeof(path)); 1732 dso__set_long_name(dso, strdup(path), true); 1733 map__fixup_start(map); 1734 map__fixup_end(map); 1735 } 1736 1737 return err; 1738 } 1739 1740 static void vmlinux_path__exit(void) 1741 { 1742 while (--vmlinux_path__nr_entries >= 0) 1743 zfree(&vmlinux_path[vmlinux_path__nr_entries]); 1744 1745 zfree(&vmlinux_path); 1746 } 1747 1748 static int vmlinux_path__init(void) 1749 { 1750 struct utsname uts; 1751 char bf[PATH_MAX]; 1752 1753 vmlinux_path = malloc(sizeof(char *) * 5); 1754 if (vmlinux_path == NULL) 1755 return -1; 1756 1757 vmlinux_path[vmlinux_path__nr_entries] = strdup("vmlinux"); 1758 if (vmlinux_path[vmlinux_path__nr_entries] == NULL) 1759 goto out_fail; 1760 ++vmlinux_path__nr_entries; 1761 vmlinux_path[vmlinux_path__nr_entries] = strdup("/boot/vmlinux"); 1762 if (vmlinux_path[vmlinux_path__nr_entries] == NULL) 1763 goto out_fail; 1764 ++vmlinux_path__nr_entries; 1765 1766 /* only try running kernel version if no symfs was given */ 1767 if (symbol_conf.symfs[0] != 0) 1768 return 0; 1769 1770 if (uname(&uts) < 0) 1771 return -1; 1772 1773 snprintf(bf, sizeof(bf), "/boot/vmlinux-%s", uts.release); 1774 vmlinux_path[vmlinux_path__nr_entries] = strdup(bf); 1775 if (vmlinux_path[vmlinux_path__nr_entries] == NULL) 1776 goto out_fail; 1777 ++vmlinux_path__nr_entries; 1778 snprintf(bf, sizeof(bf), "/lib/modules/%s/build/vmlinux", uts.release); 1779 vmlinux_path[vmlinux_path__nr_entries] = strdup(bf); 1780 if (vmlinux_path[vmlinux_path__nr_entries] == NULL) 1781 goto out_fail; 1782 ++vmlinux_path__nr_entries; 1783 snprintf(bf, sizeof(bf), "/usr/lib/debug/lib/modules/%s/vmlinux", 1784 uts.release); 1785 vmlinux_path[vmlinux_path__nr_entries] = strdup(bf); 1786 if (vmlinux_path[vmlinux_path__nr_entries] == NULL) 1787 goto out_fail; 1788 ++vmlinux_path__nr_entries; 1789 1790 return 0; 1791 1792 out_fail: 1793 vmlinux_path__exit(); 1794 return -1; 1795 } 1796 1797 int setup_list(struct strlist **list, const char *list_str, 1798 const char *list_name) 1799 { 1800 if (list_str == NULL) 1801 return 0; 1802 1803 *list = strlist__new(true, list_str); 1804 if (!*list) { 1805 pr_err("problems parsing %s list\n", list_name); 1806 return -1; 1807 } 1808 return 0; 1809 } 1810 1811 static bool symbol__read_kptr_restrict(void) 1812 { 1813 bool value = false; 1814 1815 if (geteuid() != 0) { 1816 FILE *fp = fopen("/proc/sys/kernel/kptr_restrict", "r"); 1817 if (fp != NULL) { 1818 char line[8]; 1819 1820 if (fgets(line, sizeof(line), fp) != NULL) 1821 value = atoi(line) != 0; 1822 1823 fclose(fp); 1824 } 1825 } 1826 1827 return value; 1828 } 1829 1830 int symbol__init(void) 1831 { 1832 const char *symfs; 1833 1834 if (symbol_conf.initialized) 1835 return 0; 1836 1837 symbol_conf.priv_size = PERF_ALIGN(symbol_conf.priv_size, sizeof(u64)); 1838 1839 symbol__elf_init(); 1840 1841 if (symbol_conf.sort_by_name) 1842 symbol_conf.priv_size += (sizeof(struct symbol_name_rb_node) - 1843 sizeof(struct symbol)); 1844 1845 if (symbol_conf.try_vmlinux_path && vmlinux_path__init() < 0) 1846 return -1; 1847 1848 if (symbol_conf.field_sep && *symbol_conf.field_sep == '.') { 1849 pr_err("'.' is the only non valid --field-separator argument\n"); 1850 return -1; 1851 } 1852 1853 if (setup_list(&symbol_conf.dso_list, 1854 symbol_conf.dso_list_str, "dso") < 0) 1855 return -1; 1856 1857 if (setup_list(&symbol_conf.comm_list, 1858 symbol_conf.comm_list_str, "comm") < 0) 1859 goto out_free_dso_list; 1860 1861 if (setup_list(&symbol_conf.sym_list, 1862 symbol_conf.sym_list_str, "symbol") < 0) 1863 goto out_free_comm_list; 1864 1865 /* 1866 * A path to symbols of "/" is identical to "" 1867 * reset here for simplicity. 1868 */ 1869 symfs = realpath(symbol_conf.symfs, NULL); 1870 if (symfs == NULL) 1871 symfs = symbol_conf.symfs; 1872 if (strcmp(symfs, "/") == 0) 1873 symbol_conf.symfs = ""; 1874 if (symfs != symbol_conf.symfs) 1875 free((void *)symfs); 1876 1877 symbol_conf.kptr_restrict = symbol__read_kptr_restrict(); 1878 1879 symbol_conf.initialized = true; 1880 return 0; 1881 1882 out_free_comm_list: 1883 strlist__delete(symbol_conf.comm_list); 1884 out_free_dso_list: 1885 strlist__delete(symbol_conf.dso_list); 1886 return -1; 1887 } 1888 1889 void symbol__exit(void) 1890 { 1891 if (!symbol_conf.initialized) 1892 return; 1893 strlist__delete(symbol_conf.sym_list); 1894 strlist__delete(symbol_conf.dso_list); 1895 strlist__delete(symbol_conf.comm_list); 1896 vmlinux_path__exit(); 1897 symbol_conf.sym_list = symbol_conf.dso_list = symbol_conf.comm_list = NULL; 1898 symbol_conf.initialized = false; 1899 } 1900