1 #include <dirent.h> 2 #include <errno.h> 3 #include <stdlib.h> 4 #include <stdio.h> 5 #include <string.h> 6 #include <sys/types.h> 7 #include <sys/stat.h> 8 #include <sys/param.h> 9 #include <fcntl.h> 10 #include <unistd.h> 11 #include <inttypes.h> 12 #include "build-id.h" 13 #include "util.h" 14 #include "debug.h" 15 #include "machine.h" 16 #include "symbol.h" 17 #include "strlist.h" 18 19 #include <elf.h> 20 #include <limits.h> 21 #include <symbol/kallsyms.h> 22 #include <sys/utsname.h> 23 24 static int dso__load_kernel_sym(struct dso *dso, struct map *map, 25 symbol_filter_t filter); 26 static int dso__load_guest_kernel_sym(struct dso *dso, struct map *map, 27 symbol_filter_t filter); 28 int vmlinux_path__nr_entries; 29 char **vmlinux_path; 30 31 struct symbol_conf symbol_conf = { 32 .use_modules = true, 33 .try_vmlinux_path = true, 34 .annotate_src = true, 35 .demangle = true, 36 .symfs = "", 37 }; 38 39 static enum dso_binary_type binary_type_symtab[] = { 40 DSO_BINARY_TYPE__KALLSYMS, 41 DSO_BINARY_TYPE__GUEST_KALLSYMS, 42 DSO_BINARY_TYPE__JAVA_JIT, 43 DSO_BINARY_TYPE__DEBUGLINK, 44 DSO_BINARY_TYPE__BUILD_ID_CACHE, 45 DSO_BINARY_TYPE__FEDORA_DEBUGINFO, 46 DSO_BINARY_TYPE__UBUNTU_DEBUGINFO, 47 DSO_BINARY_TYPE__BUILDID_DEBUGINFO, 48 DSO_BINARY_TYPE__SYSTEM_PATH_DSO, 49 DSO_BINARY_TYPE__GUEST_KMODULE, 50 DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE, 51 DSO_BINARY_TYPE__OPENEMBEDDED_DEBUGINFO, 52 DSO_BINARY_TYPE__NOT_FOUND, 53 }; 54 55 #define DSO_BINARY_TYPE__SYMTAB_CNT ARRAY_SIZE(binary_type_symtab) 56 57 bool symbol_type__is_a(char symbol_type, enum map_type map_type) 58 { 59 symbol_type = toupper(symbol_type); 60 61 switch (map_type) { 62 case MAP__FUNCTION: 63 return symbol_type == 'T' || symbol_type == 'W'; 64 case MAP__VARIABLE: 65 return symbol_type == 'D'; 66 default: 67 return false; 68 } 69 } 70 71 static int prefix_underscores_count(const char *str) 72 { 73 const char *tail = str; 74 75 while (*tail == '_') 76 tail++; 77 78 return tail - str; 79 } 80 81 #define SYMBOL_A 0 82 #define SYMBOL_B 1 83 84 static int choose_best_symbol(struct symbol *syma, struct symbol *symb) 85 { 86 s64 a; 87 s64 b; 88 size_t na, nb; 89 90 /* Prefer a symbol with non zero length */ 91 a = syma->end - syma->start; 92 b = symb->end - symb->start; 93 if ((b == 0) && (a > 0)) 94 return SYMBOL_A; 95 else if ((a == 0) && (b > 0)) 96 return SYMBOL_B; 97 98 /* Prefer a non weak symbol over a weak one */ 99 a = syma->binding == STB_WEAK; 100 b = symb->binding == STB_WEAK; 101 if (b && !a) 102 return SYMBOL_A; 103 if (a && !b) 104 return SYMBOL_B; 105 106 /* Prefer a global symbol over a non global one */ 107 a = syma->binding == STB_GLOBAL; 108 b = symb->binding == STB_GLOBAL; 109 if (a && !b) 110 return SYMBOL_A; 111 if (b && !a) 112 return SYMBOL_B; 113 114 /* Prefer a symbol with less underscores */ 115 a = prefix_underscores_count(syma->name); 116 b = prefix_underscores_count(symb->name); 117 if (b > a) 118 return SYMBOL_A; 119 else if (a > b) 120 return SYMBOL_B; 121 122 /* Choose the symbol with the longest name */ 123 na = strlen(syma->name); 124 nb = strlen(symb->name); 125 if (na > nb) 126 return SYMBOL_A; 127 else if (na < nb) 128 return SYMBOL_B; 129 130 /* Avoid "SyS" kernel syscall aliases */ 131 if (na >= 3 && !strncmp(syma->name, "SyS", 3)) 132 return SYMBOL_B; 133 if (na >= 10 && !strncmp(syma->name, "compat_SyS", 10)) 134 return SYMBOL_B; 135 136 return SYMBOL_A; 137 } 138 139 void symbols__fixup_duplicate(struct rb_root *symbols) 140 { 141 struct rb_node *nd; 142 struct symbol *curr, *next; 143 144 nd = rb_first(symbols); 145 146 while (nd) { 147 curr = rb_entry(nd, struct symbol, rb_node); 148 again: 149 nd = rb_next(&curr->rb_node); 150 next = rb_entry(nd, struct symbol, rb_node); 151 152 if (!nd) 153 break; 154 155 if (curr->start != next->start) 156 continue; 157 158 if (choose_best_symbol(curr, next) == SYMBOL_A) { 159 rb_erase(&next->rb_node, symbols); 160 symbol__delete(next); 161 goto again; 162 } else { 163 nd = rb_next(&curr->rb_node); 164 rb_erase(&curr->rb_node, symbols); 165 symbol__delete(curr); 166 } 167 } 168 } 169 170 void symbols__fixup_end(struct rb_root *symbols) 171 { 172 struct rb_node *nd, *prevnd = rb_first(symbols); 173 struct symbol *curr, *prev; 174 175 if (prevnd == NULL) 176 return; 177 178 curr = rb_entry(prevnd, struct symbol, rb_node); 179 180 for (nd = rb_next(prevnd); nd; nd = rb_next(nd)) { 181 prev = curr; 182 curr = rb_entry(nd, struct symbol, rb_node); 183 184 if (prev->end == prev->start && prev->end != curr->start) 185 prev->end = curr->start - 1; 186 } 187 188 /* Last entry */ 189 if (curr->end == curr->start) 190 curr->end = roundup(curr->start, 4096); 191 } 192 193 void __map_groups__fixup_end(struct map_groups *mg, enum map_type type) 194 { 195 struct map *prev, *curr; 196 struct rb_node *nd, *prevnd = rb_first(&mg->maps[type]); 197 198 if (prevnd == NULL) 199 return; 200 201 curr = rb_entry(prevnd, struct map, rb_node); 202 203 for (nd = rb_next(prevnd); nd; nd = rb_next(nd)) { 204 prev = curr; 205 curr = rb_entry(nd, struct map, rb_node); 206 prev->end = curr->start - 1; 207 } 208 209 /* 210 * We still haven't the actual symbols, so guess the 211 * last map final address. 212 */ 213 curr->end = ~0ULL; 214 } 215 216 struct symbol *symbol__new(u64 start, u64 len, u8 binding, const char *name) 217 { 218 size_t namelen = strlen(name) + 1; 219 struct symbol *sym = calloc(1, (symbol_conf.priv_size + 220 sizeof(*sym) + namelen)); 221 if (sym == NULL) 222 return NULL; 223 224 if (symbol_conf.priv_size) 225 sym = ((void *)sym) + symbol_conf.priv_size; 226 227 sym->start = start; 228 sym->end = len ? start + len - 1 : start; 229 sym->binding = binding; 230 sym->namelen = namelen - 1; 231 232 pr_debug4("%s: %s %#" PRIx64 "-%#" PRIx64 "\n", 233 __func__, name, start, sym->end); 234 memcpy(sym->name, name, namelen); 235 236 return sym; 237 } 238 239 void symbol__delete(struct symbol *sym) 240 { 241 free(((void *)sym) - symbol_conf.priv_size); 242 } 243 244 size_t symbol__fprintf(struct symbol *sym, FILE *fp) 245 { 246 return fprintf(fp, " %" PRIx64 "-%" PRIx64 " %c %s\n", 247 sym->start, sym->end, 248 sym->binding == STB_GLOBAL ? 'g' : 249 sym->binding == STB_LOCAL ? 'l' : 'w', 250 sym->name); 251 } 252 253 size_t symbol__fprintf_symname_offs(const struct symbol *sym, 254 const struct addr_location *al, FILE *fp) 255 { 256 unsigned long offset; 257 size_t length; 258 259 if (sym && sym->name) { 260 length = fprintf(fp, "%s", sym->name); 261 if (al) { 262 if (al->addr < sym->end) 263 offset = al->addr - sym->start; 264 else 265 offset = al->addr - al->map->start - sym->start; 266 length += fprintf(fp, "+0x%lx", offset); 267 } 268 return length; 269 } else 270 return fprintf(fp, "[unknown]"); 271 } 272 273 size_t symbol__fprintf_symname(const struct symbol *sym, FILE *fp) 274 { 275 return symbol__fprintf_symname_offs(sym, NULL, fp); 276 } 277 278 void symbols__delete(struct rb_root *symbols) 279 { 280 struct symbol *pos; 281 struct rb_node *next = rb_first(symbols); 282 283 while (next) { 284 pos = rb_entry(next, struct symbol, rb_node); 285 next = rb_next(&pos->rb_node); 286 rb_erase(&pos->rb_node, symbols); 287 symbol__delete(pos); 288 } 289 } 290 291 void symbols__insert(struct rb_root *symbols, struct symbol *sym) 292 { 293 struct rb_node **p = &symbols->rb_node; 294 struct rb_node *parent = NULL; 295 const u64 ip = sym->start; 296 struct symbol *s; 297 298 while (*p != NULL) { 299 parent = *p; 300 s = rb_entry(parent, struct symbol, rb_node); 301 if (ip < s->start) 302 p = &(*p)->rb_left; 303 else 304 p = &(*p)->rb_right; 305 } 306 rb_link_node(&sym->rb_node, parent, p); 307 rb_insert_color(&sym->rb_node, symbols); 308 } 309 310 static struct symbol *symbols__find(struct rb_root *symbols, u64 ip) 311 { 312 struct rb_node *n; 313 314 if (symbols == NULL) 315 return NULL; 316 317 n = symbols->rb_node; 318 319 while (n) { 320 struct symbol *s = rb_entry(n, struct symbol, rb_node); 321 322 if (ip < s->start) 323 n = n->rb_left; 324 else if (ip > s->end) 325 n = n->rb_right; 326 else 327 return s; 328 } 329 330 return NULL; 331 } 332 333 static struct symbol *symbols__first(struct rb_root *symbols) 334 { 335 struct rb_node *n = rb_first(symbols); 336 337 if (n) 338 return rb_entry(n, struct symbol, rb_node); 339 340 return NULL; 341 } 342 343 struct symbol_name_rb_node { 344 struct rb_node rb_node; 345 struct symbol sym; 346 }; 347 348 static void symbols__insert_by_name(struct rb_root *symbols, struct symbol *sym) 349 { 350 struct rb_node **p = &symbols->rb_node; 351 struct rb_node *parent = NULL; 352 struct symbol_name_rb_node *symn, *s; 353 354 symn = container_of(sym, struct symbol_name_rb_node, sym); 355 356 while (*p != NULL) { 357 parent = *p; 358 s = rb_entry(parent, struct symbol_name_rb_node, rb_node); 359 if (strcmp(sym->name, s->sym.name) < 0) 360 p = &(*p)->rb_left; 361 else 362 p = &(*p)->rb_right; 363 } 364 rb_link_node(&symn->rb_node, parent, p); 365 rb_insert_color(&symn->rb_node, symbols); 366 } 367 368 static void symbols__sort_by_name(struct rb_root *symbols, 369 struct rb_root *source) 370 { 371 struct rb_node *nd; 372 373 for (nd = rb_first(source); nd; nd = rb_next(nd)) { 374 struct symbol *pos = rb_entry(nd, struct symbol, rb_node); 375 symbols__insert_by_name(symbols, pos); 376 } 377 } 378 379 static struct symbol *symbols__find_by_name(struct rb_root *symbols, 380 const char *name) 381 { 382 struct rb_node *n; 383 384 if (symbols == NULL) 385 return NULL; 386 387 n = symbols->rb_node; 388 389 while (n) { 390 struct symbol_name_rb_node *s; 391 int cmp; 392 393 s = rb_entry(n, struct symbol_name_rb_node, rb_node); 394 cmp = strcmp(name, s->sym.name); 395 396 if (cmp < 0) 397 n = n->rb_left; 398 else if (cmp > 0) 399 n = n->rb_right; 400 else 401 return &s->sym; 402 } 403 404 return NULL; 405 } 406 407 struct symbol *dso__find_symbol(struct dso *dso, 408 enum map_type type, u64 addr) 409 { 410 return symbols__find(&dso->symbols[type], addr); 411 } 412 413 struct symbol *dso__first_symbol(struct dso *dso, enum map_type type) 414 { 415 return symbols__first(&dso->symbols[type]); 416 } 417 418 struct symbol *dso__find_symbol_by_name(struct dso *dso, enum map_type type, 419 const char *name) 420 { 421 return symbols__find_by_name(&dso->symbol_names[type], name); 422 } 423 424 void dso__sort_by_name(struct dso *dso, enum map_type type) 425 { 426 dso__set_sorted_by_name(dso, type); 427 return symbols__sort_by_name(&dso->symbol_names[type], 428 &dso->symbols[type]); 429 } 430 431 size_t dso__fprintf_symbols_by_name(struct dso *dso, 432 enum map_type type, FILE *fp) 433 { 434 size_t ret = 0; 435 struct rb_node *nd; 436 struct symbol_name_rb_node *pos; 437 438 for (nd = rb_first(&dso->symbol_names[type]); nd; nd = rb_next(nd)) { 439 pos = rb_entry(nd, struct symbol_name_rb_node, rb_node); 440 fprintf(fp, "%s\n", pos->sym.name); 441 } 442 443 return ret; 444 } 445 446 int modules__parse(const char *filename, void *arg, 447 int (*process_module)(void *arg, const char *name, 448 u64 start)) 449 { 450 char *line = NULL; 451 size_t n; 452 FILE *file; 453 int err = 0; 454 455 file = fopen(filename, "r"); 456 if (file == NULL) 457 return -1; 458 459 while (1) { 460 char name[PATH_MAX]; 461 u64 start; 462 char *sep; 463 ssize_t line_len; 464 465 line_len = getline(&line, &n, file); 466 if (line_len < 0) { 467 if (feof(file)) 468 break; 469 err = -1; 470 goto out; 471 } 472 473 if (!line) { 474 err = -1; 475 goto out; 476 } 477 478 line[--line_len] = '\0'; /* \n */ 479 480 sep = strrchr(line, 'x'); 481 if (sep == NULL) 482 continue; 483 484 hex2u64(sep + 1, &start); 485 486 sep = strchr(line, ' '); 487 if (sep == NULL) 488 continue; 489 490 *sep = '\0'; 491 492 scnprintf(name, sizeof(name), "[%s]", line); 493 494 err = process_module(arg, name, start); 495 if (err) 496 break; 497 } 498 out: 499 free(line); 500 fclose(file); 501 return err; 502 } 503 504 struct process_kallsyms_args { 505 struct map *map; 506 struct dso *dso; 507 }; 508 509 bool symbol__is_idle(struct symbol *sym) 510 { 511 const char * const idle_symbols[] = { 512 "cpu_idle", 513 "intel_idle", 514 "default_idle", 515 "native_safe_halt", 516 "enter_idle", 517 "exit_idle", 518 "mwait_idle", 519 "mwait_idle_with_hints", 520 "poll_idle", 521 "ppc64_runlatch_off", 522 "pseries_dedicated_idle_sleep", 523 NULL 524 }; 525 526 int i; 527 528 if (!sym) 529 return false; 530 531 for (i = 0; idle_symbols[i]; i++) { 532 if (!strcmp(idle_symbols[i], sym->name)) 533 return true; 534 } 535 536 return false; 537 } 538 539 static int map__process_kallsym_symbol(void *arg, const char *name, 540 char type, u64 start) 541 { 542 struct symbol *sym; 543 struct process_kallsyms_args *a = arg; 544 struct rb_root *root = &a->dso->symbols[a->map->type]; 545 546 if (!symbol_type__is_a(type, a->map->type)) 547 return 0; 548 549 /* 550 * module symbols are not sorted so we add all 551 * symbols, setting length to 0, and rely on 552 * symbols__fixup_end() to fix it up. 553 */ 554 sym = symbol__new(start, 0, kallsyms2elf_type(type), name); 555 if (sym == NULL) 556 return -ENOMEM; 557 /* 558 * We will pass the symbols to the filter later, in 559 * map__split_kallsyms, when we have split the maps per module 560 */ 561 symbols__insert(root, sym); 562 563 return 0; 564 } 565 566 /* 567 * Loads the function entries in /proc/kallsyms into kernel_map->dso, 568 * so that we can in the next step set the symbol ->end address and then 569 * call kernel_maps__split_kallsyms. 570 */ 571 static int dso__load_all_kallsyms(struct dso *dso, const char *filename, 572 struct map *map) 573 { 574 struct process_kallsyms_args args = { .map = map, .dso = dso, }; 575 return kallsyms__parse(filename, &args, map__process_kallsym_symbol); 576 } 577 578 static int dso__split_kallsyms_for_kcore(struct dso *dso, struct map *map, 579 symbol_filter_t filter) 580 { 581 struct map_groups *kmaps = map__kmap(map)->kmaps; 582 struct map *curr_map; 583 struct symbol *pos; 584 int count = 0, moved = 0; 585 struct rb_root *root = &dso->symbols[map->type]; 586 struct rb_node *next = rb_first(root); 587 588 while (next) { 589 char *module; 590 591 pos = rb_entry(next, struct symbol, rb_node); 592 next = rb_next(&pos->rb_node); 593 594 module = strchr(pos->name, '\t'); 595 if (module) 596 *module = '\0'; 597 598 curr_map = map_groups__find(kmaps, map->type, pos->start); 599 600 if (!curr_map || (filter && filter(curr_map, pos))) { 601 rb_erase(&pos->rb_node, root); 602 symbol__delete(pos); 603 } else { 604 pos->start -= curr_map->start - curr_map->pgoff; 605 if (pos->end) 606 pos->end -= curr_map->start - curr_map->pgoff; 607 if (curr_map != map) { 608 rb_erase(&pos->rb_node, root); 609 symbols__insert( 610 &curr_map->dso->symbols[curr_map->type], 611 pos); 612 ++moved; 613 } else { 614 ++count; 615 } 616 } 617 } 618 619 /* Symbols have been adjusted */ 620 dso->adjust_symbols = 1; 621 622 return count + moved; 623 } 624 625 /* 626 * Split the symbols into maps, making sure there are no overlaps, i.e. the 627 * kernel range is broken in several maps, named [kernel].N, as we don't have 628 * the original ELF section names vmlinux have. 629 */ 630 static int dso__split_kallsyms(struct dso *dso, struct map *map, u64 delta, 631 symbol_filter_t filter) 632 { 633 struct map_groups *kmaps = map__kmap(map)->kmaps; 634 struct machine *machine = kmaps->machine; 635 struct map *curr_map = map; 636 struct symbol *pos; 637 int count = 0, moved = 0; 638 struct rb_root *root = &dso->symbols[map->type]; 639 struct rb_node *next = rb_first(root); 640 int kernel_range = 0; 641 642 while (next) { 643 char *module; 644 645 pos = rb_entry(next, struct symbol, rb_node); 646 next = rb_next(&pos->rb_node); 647 648 module = strchr(pos->name, '\t'); 649 if (module) { 650 if (!symbol_conf.use_modules) 651 goto discard_symbol; 652 653 *module++ = '\0'; 654 655 if (strcmp(curr_map->dso->short_name, module)) { 656 if (curr_map != map && 657 dso->kernel == DSO_TYPE_GUEST_KERNEL && 658 machine__is_default_guest(machine)) { 659 /* 660 * We assume all symbols of a module are 661 * continuous in * kallsyms, so curr_map 662 * points to a module and all its 663 * symbols are in its kmap. Mark it as 664 * loaded. 665 */ 666 dso__set_loaded(curr_map->dso, 667 curr_map->type); 668 } 669 670 curr_map = map_groups__find_by_name(kmaps, 671 map->type, module); 672 if (curr_map == NULL) { 673 pr_debug("%s/proc/{kallsyms,modules} " 674 "inconsistency while looking " 675 "for \"%s\" module!\n", 676 machine->root_dir, module); 677 curr_map = map; 678 goto discard_symbol; 679 } 680 681 if (curr_map->dso->loaded && 682 !machine__is_default_guest(machine)) 683 goto discard_symbol; 684 } 685 /* 686 * So that we look just like we get from .ko files, 687 * i.e. not prelinked, relative to map->start. 688 */ 689 pos->start = curr_map->map_ip(curr_map, pos->start); 690 pos->end = curr_map->map_ip(curr_map, pos->end); 691 } else if (curr_map != map) { 692 char dso_name[PATH_MAX]; 693 struct dso *ndso; 694 695 if (delta) { 696 /* Kernel was relocated at boot time */ 697 pos->start -= delta; 698 pos->end -= delta; 699 } 700 701 if (count == 0) { 702 curr_map = map; 703 goto filter_symbol; 704 } 705 706 if (dso->kernel == DSO_TYPE_GUEST_KERNEL) 707 snprintf(dso_name, sizeof(dso_name), 708 "[guest.kernel].%d", 709 kernel_range++); 710 else 711 snprintf(dso_name, sizeof(dso_name), 712 "[kernel].%d", 713 kernel_range++); 714 715 ndso = dso__new(dso_name); 716 if (ndso == NULL) 717 return -1; 718 719 ndso->kernel = dso->kernel; 720 721 curr_map = map__new2(pos->start, ndso, map->type); 722 if (curr_map == NULL) { 723 dso__delete(ndso); 724 return -1; 725 } 726 727 curr_map->map_ip = curr_map->unmap_ip = identity__map_ip; 728 map_groups__insert(kmaps, curr_map); 729 ++kernel_range; 730 } else if (delta) { 731 /* Kernel was relocated at boot time */ 732 pos->start -= delta; 733 pos->end -= delta; 734 } 735 filter_symbol: 736 if (filter && filter(curr_map, pos)) { 737 discard_symbol: rb_erase(&pos->rb_node, root); 738 symbol__delete(pos); 739 } else { 740 if (curr_map != map) { 741 rb_erase(&pos->rb_node, root); 742 symbols__insert(&curr_map->dso->symbols[curr_map->type], pos); 743 ++moved; 744 } else 745 ++count; 746 } 747 } 748 749 if (curr_map != map && 750 dso->kernel == DSO_TYPE_GUEST_KERNEL && 751 machine__is_default_guest(kmaps->machine)) { 752 dso__set_loaded(curr_map->dso, curr_map->type); 753 } 754 755 return count + moved; 756 } 757 758 bool symbol__restricted_filename(const char *filename, 759 const char *restricted_filename) 760 { 761 bool restricted = false; 762 763 if (symbol_conf.kptr_restrict) { 764 char *r = realpath(filename, NULL); 765 766 if (r != NULL) { 767 restricted = strcmp(r, restricted_filename) == 0; 768 free(r); 769 return restricted; 770 } 771 } 772 773 return restricted; 774 } 775 776 struct module_info { 777 struct rb_node rb_node; 778 char *name; 779 u64 start; 780 }; 781 782 static void add_module(struct module_info *mi, struct rb_root *modules) 783 { 784 struct rb_node **p = &modules->rb_node; 785 struct rb_node *parent = NULL; 786 struct module_info *m; 787 788 while (*p != NULL) { 789 parent = *p; 790 m = rb_entry(parent, struct module_info, rb_node); 791 if (strcmp(mi->name, m->name) < 0) 792 p = &(*p)->rb_left; 793 else 794 p = &(*p)->rb_right; 795 } 796 rb_link_node(&mi->rb_node, parent, p); 797 rb_insert_color(&mi->rb_node, modules); 798 } 799 800 static void delete_modules(struct rb_root *modules) 801 { 802 struct module_info *mi; 803 struct rb_node *next = rb_first(modules); 804 805 while (next) { 806 mi = rb_entry(next, struct module_info, rb_node); 807 next = rb_next(&mi->rb_node); 808 rb_erase(&mi->rb_node, modules); 809 zfree(&mi->name); 810 free(mi); 811 } 812 } 813 814 static struct module_info *find_module(const char *name, 815 struct rb_root *modules) 816 { 817 struct rb_node *n = modules->rb_node; 818 819 while (n) { 820 struct module_info *m; 821 int cmp; 822 823 m = rb_entry(n, struct module_info, rb_node); 824 cmp = strcmp(name, m->name); 825 if (cmp < 0) 826 n = n->rb_left; 827 else if (cmp > 0) 828 n = n->rb_right; 829 else 830 return m; 831 } 832 833 return NULL; 834 } 835 836 static int __read_proc_modules(void *arg, const char *name, u64 start) 837 { 838 struct rb_root *modules = arg; 839 struct module_info *mi; 840 841 mi = zalloc(sizeof(struct module_info)); 842 if (!mi) 843 return -ENOMEM; 844 845 mi->name = strdup(name); 846 mi->start = start; 847 848 if (!mi->name) { 849 free(mi); 850 return -ENOMEM; 851 } 852 853 add_module(mi, modules); 854 855 return 0; 856 } 857 858 static int read_proc_modules(const char *filename, struct rb_root *modules) 859 { 860 if (symbol__restricted_filename(filename, "/proc/modules")) 861 return -1; 862 863 if (modules__parse(filename, modules, __read_proc_modules)) { 864 delete_modules(modules); 865 return -1; 866 } 867 868 return 0; 869 } 870 871 int compare_proc_modules(const char *from, const char *to) 872 { 873 struct rb_root from_modules = RB_ROOT; 874 struct rb_root to_modules = RB_ROOT; 875 struct rb_node *from_node, *to_node; 876 struct module_info *from_m, *to_m; 877 int ret = -1; 878 879 if (read_proc_modules(from, &from_modules)) 880 return -1; 881 882 if (read_proc_modules(to, &to_modules)) 883 goto out_delete_from; 884 885 from_node = rb_first(&from_modules); 886 to_node = rb_first(&to_modules); 887 while (from_node) { 888 if (!to_node) 889 break; 890 891 from_m = rb_entry(from_node, struct module_info, rb_node); 892 to_m = rb_entry(to_node, struct module_info, rb_node); 893 894 if (from_m->start != to_m->start || 895 strcmp(from_m->name, to_m->name)) 896 break; 897 898 from_node = rb_next(from_node); 899 to_node = rb_next(to_node); 900 } 901 902 if (!from_node && !to_node) 903 ret = 0; 904 905 delete_modules(&to_modules); 906 out_delete_from: 907 delete_modules(&from_modules); 908 909 return ret; 910 } 911 912 static int do_validate_kcore_modules(const char *filename, struct map *map, 913 struct map_groups *kmaps) 914 { 915 struct rb_root modules = RB_ROOT; 916 struct map *old_map; 917 int err; 918 919 err = read_proc_modules(filename, &modules); 920 if (err) 921 return err; 922 923 old_map = map_groups__first(kmaps, map->type); 924 while (old_map) { 925 struct map *next = map_groups__next(old_map); 926 struct module_info *mi; 927 928 if (old_map == map || old_map->start == map->start) { 929 /* The kernel map */ 930 old_map = next; 931 continue; 932 } 933 934 /* Module must be in memory at the same address */ 935 mi = find_module(old_map->dso->short_name, &modules); 936 if (!mi || mi->start != old_map->start) { 937 err = -EINVAL; 938 goto out; 939 } 940 941 old_map = next; 942 } 943 out: 944 delete_modules(&modules); 945 return err; 946 } 947 948 /* 949 * If kallsyms is referenced by name then we look for filename in the same 950 * directory. 951 */ 952 static bool filename_from_kallsyms_filename(char *filename, 953 const char *base_name, 954 const char *kallsyms_filename) 955 { 956 char *name; 957 958 strcpy(filename, kallsyms_filename); 959 name = strrchr(filename, '/'); 960 if (!name) 961 return false; 962 963 name += 1; 964 965 if (!strcmp(name, "kallsyms")) { 966 strcpy(name, base_name); 967 return true; 968 } 969 970 return false; 971 } 972 973 static int validate_kcore_modules(const char *kallsyms_filename, 974 struct map *map) 975 { 976 struct map_groups *kmaps = map__kmap(map)->kmaps; 977 char modules_filename[PATH_MAX]; 978 979 if (!filename_from_kallsyms_filename(modules_filename, "modules", 980 kallsyms_filename)) 981 return -EINVAL; 982 983 if (do_validate_kcore_modules(modules_filename, map, kmaps)) 984 return -EINVAL; 985 986 return 0; 987 } 988 989 static int validate_kcore_addresses(const char *kallsyms_filename, 990 struct map *map) 991 { 992 struct kmap *kmap = map__kmap(map); 993 994 if (kmap->ref_reloc_sym && kmap->ref_reloc_sym->name) { 995 u64 start; 996 997 start = kallsyms__get_function_start(kallsyms_filename, 998 kmap->ref_reloc_sym->name); 999 if (start != kmap->ref_reloc_sym->addr) 1000 return -EINVAL; 1001 } 1002 1003 return validate_kcore_modules(kallsyms_filename, map); 1004 } 1005 1006 struct kcore_mapfn_data { 1007 struct dso *dso; 1008 enum map_type type; 1009 struct list_head maps; 1010 }; 1011 1012 static int kcore_mapfn(u64 start, u64 len, u64 pgoff, void *data) 1013 { 1014 struct kcore_mapfn_data *md = data; 1015 struct map *map; 1016 1017 map = map__new2(start, md->dso, md->type); 1018 if (map == NULL) 1019 return -ENOMEM; 1020 1021 map->end = map->start + len; 1022 map->pgoff = pgoff; 1023 1024 list_add(&map->node, &md->maps); 1025 1026 return 0; 1027 } 1028 1029 static int dso__load_kcore(struct dso *dso, struct map *map, 1030 const char *kallsyms_filename) 1031 { 1032 struct map_groups *kmaps = map__kmap(map)->kmaps; 1033 struct machine *machine = kmaps->machine; 1034 struct kcore_mapfn_data md; 1035 struct map *old_map, *new_map, *replacement_map = NULL; 1036 bool is_64_bit; 1037 int err, fd; 1038 char kcore_filename[PATH_MAX]; 1039 struct symbol *sym; 1040 1041 /* This function requires that the map is the kernel map */ 1042 if (map != machine->vmlinux_maps[map->type]) 1043 return -EINVAL; 1044 1045 if (!filename_from_kallsyms_filename(kcore_filename, "kcore", 1046 kallsyms_filename)) 1047 return -EINVAL; 1048 1049 /* Modules and kernel must be present at their original addresses */ 1050 if (validate_kcore_addresses(kallsyms_filename, map)) 1051 return -EINVAL; 1052 1053 md.dso = dso; 1054 md.type = map->type; 1055 INIT_LIST_HEAD(&md.maps); 1056 1057 fd = open(kcore_filename, O_RDONLY); 1058 if (fd < 0) 1059 return -EINVAL; 1060 1061 /* Read new maps into temporary lists */ 1062 err = file__read_maps(fd, md.type == MAP__FUNCTION, kcore_mapfn, &md, 1063 &is_64_bit); 1064 if (err) 1065 goto out_err; 1066 1067 if (list_empty(&md.maps)) { 1068 err = -EINVAL; 1069 goto out_err; 1070 } 1071 1072 /* Remove old maps */ 1073 old_map = map_groups__first(kmaps, map->type); 1074 while (old_map) { 1075 struct map *next = map_groups__next(old_map); 1076 1077 if (old_map != map) 1078 map_groups__remove(kmaps, old_map); 1079 old_map = next; 1080 } 1081 1082 /* Find the kernel map using the first symbol */ 1083 sym = dso__first_symbol(dso, map->type); 1084 list_for_each_entry(new_map, &md.maps, node) { 1085 if (sym && sym->start >= new_map->start && 1086 sym->start < new_map->end) { 1087 replacement_map = new_map; 1088 break; 1089 } 1090 } 1091 1092 if (!replacement_map) 1093 replacement_map = list_entry(md.maps.next, struct map, node); 1094 1095 /* Add new maps */ 1096 while (!list_empty(&md.maps)) { 1097 new_map = list_entry(md.maps.next, struct map, node); 1098 list_del(&new_map->node); 1099 if (new_map == replacement_map) { 1100 map->start = new_map->start; 1101 map->end = new_map->end; 1102 map->pgoff = new_map->pgoff; 1103 map->map_ip = new_map->map_ip; 1104 map->unmap_ip = new_map->unmap_ip; 1105 map__delete(new_map); 1106 /* Ensure maps are correctly ordered */ 1107 map_groups__remove(kmaps, map); 1108 map_groups__insert(kmaps, map); 1109 } else { 1110 map_groups__insert(kmaps, new_map); 1111 } 1112 } 1113 1114 /* 1115 * Set the data type and long name so that kcore can be read via 1116 * dso__data_read_addr(). 1117 */ 1118 if (dso->kernel == DSO_TYPE_GUEST_KERNEL) 1119 dso->binary_type = DSO_BINARY_TYPE__GUEST_KCORE; 1120 else 1121 dso->binary_type = DSO_BINARY_TYPE__KCORE; 1122 dso__set_long_name(dso, strdup(kcore_filename), true); 1123 1124 close(fd); 1125 1126 if (map->type == MAP__FUNCTION) 1127 pr_debug("Using %s for kernel object code\n", kcore_filename); 1128 else 1129 pr_debug("Using %s for kernel data\n", kcore_filename); 1130 1131 return 0; 1132 1133 out_err: 1134 while (!list_empty(&md.maps)) { 1135 map = list_entry(md.maps.next, struct map, node); 1136 list_del(&map->node); 1137 map__delete(map); 1138 } 1139 close(fd); 1140 return -EINVAL; 1141 } 1142 1143 /* 1144 * If the kernel is relocated at boot time, kallsyms won't match. Compute the 1145 * delta based on the relocation reference symbol. 1146 */ 1147 static int kallsyms__delta(struct map *map, const char *filename, u64 *delta) 1148 { 1149 struct kmap *kmap = map__kmap(map); 1150 u64 addr; 1151 1152 if (!kmap->ref_reloc_sym || !kmap->ref_reloc_sym->name) 1153 return 0; 1154 1155 addr = kallsyms__get_function_start(filename, 1156 kmap->ref_reloc_sym->name); 1157 if (!addr) 1158 return -1; 1159 1160 *delta = addr - kmap->ref_reloc_sym->addr; 1161 return 0; 1162 } 1163 1164 int dso__load_kallsyms(struct dso *dso, const char *filename, 1165 struct map *map, symbol_filter_t filter) 1166 { 1167 u64 delta = 0; 1168 1169 if (symbol__restricted_filename(filename, "/proc/kallsyms")) 1170 return -1; 1171 1172 if (dso__load_all_kallsyms(dso, filename, map) < 0) 1173 return -1; 1174 1175 if (kallsyms__delta(map, filename, &delta)) 1176 return -1; 1177 1178 symbols__fixup_duplicate(&dso->symbols[map->type]); 1179 symbols__fixup_end(&dso->symbols[map->type]); 1180 1181 if (dso->kernel == DSO_TYPE_GUEST_KERNEL) 1182 dso->symtab_type = DSO_BINARY_TYPE__GUEST_KALLSYMS; 1183 else 1184 dso->symtab_type = DSO_BINARY_TYPE__KALLSYMS; 1185 1186 if (!dso__load_kcore(dso, map, filename)) 1187 return dso__split_kallsyms_for_kcore(dso, map, filter); 1188 else 1189 return dso__split_kallsyms(dso, map, delta, filter); 1190 } 1191 1192 static int dso__load_perf_map(struct dso *dso, struct map *map, 1193 symbol_filter_t filter) 1194 { 1195 char *line = NULL; 1196 size_t n; 1197 FILE *file; 1198 int nr_syms = 0; 1199 1200 file = fopen(dso->long_name, "r"); 1201 if (file == NULL) 1202 goto out_failure; 1203 1204 while (!feof(file)) { 1205 u64 start, size; 1206 struct symbol *sym; 1207 int line_len, len; 1208 1209 line_len = getline(&line, &n, file); 1210 if (line_len < 0) 1211 break; 1212 1213 if (!line) 1214 goto out_failure; 1215 1216 line[--line_len] = '\0'; /* \n */ 1217 1218 len = hex2u64(line, &start); 1219 1220 len++; 1221 if (len + 2 >= line_len) 1222 continue; 1223 1224 len += hex2u64(line + len, &size); 1225 1226 len++; 1227 if (len + 2 >= line_len) 1228 continue; 1229 1230 sym = symbol__new(start, size, STB_GLOBAL, line + len); 1231 1232 if (sym == NULL) 1233 goto out_delete_line; 1234 1235 if (filter && filter(map, sym)) 1236 symbol__delete(sym); 1237 else { 1238 symbols__insert(&dso->symbols[map->type], sym); 1239 nr_syms++; 1240 } 1241 } 1242 1243 free(line); 1244 fclose(file); 1245 1246 return nr_syms; 1247 1248 out_delete_line: 1249 free(line); 1250 out_failure: 1251 return -1; 1252 } 1253 1254 int dso__load(struct dso *dso, struct map *map, symbol_filter_t filter) 1255 { 1256 char *name; 1257 int ret = -1; 1258 u_int i; 1259 struct machine *machine; 1260 char *root_dir = (char *) ""; 1261 int ss_pos = 0; 1262 struct symsrc ss_[2]; 1263 struct symsrc *syms_ss = NULL, *runtime_ss = NULL; 1264 1265 dso__set_loaded(dso, map->type); 1266 1267 if (dso->kernel == DSO_TYPE_KERNEL) 1268 return dso__load_kernel_sym(dso, map, filter); 1269 else if (dso->kernel == DSO_TYPE_GUEST_KERNEL) 1270 return dso__load_guest_kernel_sym(dso, map, filter); 1271 1272 if (map->groups && map->groups->machine) 1273 machine = map->groups->machine; 1274 else 1275 machine = NULL; 1276 1277 dso->adjust_symbols = 0; 1278 1279 if (strncmp(dso->name, "/tmp/perf-", 10) == 0) { 1280 struct stat st; 1281 1282 if (lstat(dso->name, &st) < 0) 1283 return -1; 1284 1285 if (st.st_uid && (st.st_uid != geteuid())) { 1286 pr_warning("File %s not owned by current user or root, " 1287 "ignoring it.\n", dso->name); 1288 return -1; 1289 } 1290 1291 ret = dso__load_perf_map(dso, map, filter); 1292 dso->symtab_type = ret > 0 ? DSO_BINARY_TYPE__JAVA_JIT : 1293 DSO_BINARY_TYPE__NOT_FOUND; 1294 return ret; 1295 } 1296 1297 if (machine) 1298 root_dir = machine->root_dir; 1299 1300 name = malloc(PATH_MAX); 1301 if (!name) 1302 return -1; 1303 1304 /* Iterate over candidate debug images. 1305 * Keep track of "interesting" ones (those which have a symtab, dynsym, 1306 * and/or opd section) for processing. 1307 */ 1308 for (i = 0; i < DSO_BINARY_TYPE__SYMTAB_CNT; i++) { 1309 struct symsrc *ss = &ss_[ss_pos]; 1310 bool next_slot = false; 1311 1312 enum dso_binary_type symtab_type = binary_type_symtab[i]; 1313 1314 if (dso__read_binary_type_filename(dso, symtab_type, 1315 root_dir, name, PATH_MAX)) 1316 continue; 1317 1318 /* Name is now the name of the next image to try */ 1319 if (symsrc__init(ss, dso, name, symtab_type) < 0) 1320 continue; 1321 1322 if (!syms_ss && symsrc__has_symtab(ss)) { 1323 syms_ss = ss; 1324 next_slot = true; 1325 if (!dso->symsrc_filename) 1326 dso->symsrc_filename = strdup(name); 1327 } 1328 1329 if (!runtime_ss && symsrc__possibly_runtime(ss)) { 1330 runtime_ss = ss; 1331 next_slot = true; 1332 } 1333 1334 if (next_slot) { 1335 ss_pos++; 1336 1337 if (syms_ss && runtime_ss) 1338 break; 1339 } 1340 1341 } 1342 1343 if (!runtime_ss && !syms_ss) 1344 goto out_free; 1345 1346 if (runtime_ss && !syms_ss) { 1347 syms_ss = runtime_ss; 1348 } 1349 1350 /* We'll have to hope for the best */ 1351 if (!runtime_ss && syms_ss) 1352 runtime_ss = syms_ss; 1353 1354 if (syms_ss) { 1355 int km; 1356 1357 km = dso->symtab_type == DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE || 1358 dso->symtab_type == DSO_BINARY_TYPE__GUEST_KMODULE; 1359 ret = dso__load_sym(dso, map, syms_ss, runtime_ss, filter, km); 1360 } else { 1361 ret = -1; 1362 } 1363 1364 if (ret > 0) { 1365 int nr_plt; 1366 1367 nr_plt = dso__synthesize_plt_symbols(dso, runtime_ss, map, filter); 1368 if (nr_plt > 0) 1369 ret += nr_plt; 1370 } 1371 1372 for (; ss_pos > 0; ss_pos--) 1373 symsrc__destroy(&ss_[ss_pos - 1]); 1374 out_free: 1375 free(name); 1376 if (ret < 0 && strstr(dso->name, " (deleted)") != NULL) 1377 return 0; 1378 return ret; 1379 } 1380 1381 struct map *map_groups__find_by_name(struct map_groups *mg, 1382 enum map_type type, const char *name) 1383 { 1384 struct rb_node *nd; 1385 1386 for (nd = rb_first(&mg->maps[type]); nd; nd = rb_next(nd)) { 1387 struct map *map = rb_entry(nd, struct map, rb_node); 1388 1389 if (map->dso && strcmp(map->dso->short_name, name) == 0) 1390 return map; 1391 } 1392 1393 return NULL; 1394 } 1395 1396 int dso__load_vmlinux(struct dso *dso, struct map *map, 1397 const char *vmlinux, bool vmlinux_allocated, 1398 symbol_filter_t filter) 1399 { 1400 int err = -1; 1401 struct symsrc ss; 1402 char symfs_vmlinux[PATH_MAX]; 1403 enum dso_binary_type symtab_type; 1404 1405 if (vmlinux[0] == '/') 1406 snprintf(symfs_vmlinux, sizeof(symfs_vmlinux), "%s", vmlinux); 1407 else 1408 snprintf(symfs_vmlinux, sizeof(symfs_vmlinux), "%s%s", 1409 symbol_conf.symfs, vmlinux); 1410 1411 if (dso->kernel == DSO_TYPE_GUEST_KERNEL) 1412 symtab_type = DSO_BINARY_TYPE__GUEST_VMLINUX; 1413 else 1414 symtab_type = DSO_BINARY_TYPE__VMLINUX; 1415 1416 if (symsrc__init(&ss, dso, symfs_vmlinux, symtab_type)) 1417 return -1; 1418 1419 err = dso__load_sym(dso, map, &ss, &ss, filter, 0); 1420 symsrc__destroy(&ss); 1421 1422 if (err > 0) { 1423 if (dso->kernel == DSO_TYPE_GUEST_KERNEL) 1424 dso->binary_type = DSO_BINARY_TYPE__GUEST_VMLINUX; 1425 else 1426 dso->binary_type = DSO_BINARY_TYPE__VMLINUX; 1427 dso__set_long_name(dso, vmlinux, vmlinux_allocated); 1428 dso__set_loaded(dso, map->type); 1429 pr_debug("Using %s for symbols\n", symfs_vmlinux); 1430 } 1431 1432 return err; 1433 } 1434 1435 int dso__load_vmlinux_path(struct dso *dso, struct map *map, 1436 symbol_filter_t filter) 1437 { 1438 int i, err = 0; 1439 char *filename; 1440 1441 pr_debug("Looking at the vmlinux_path (%d entries long)\n", 1442 vmlinux_path__nr_entries + 1); 1443 1444 filename = dso__build_id_filename(dso, NULL, 0); 1445 if (filename != NULL) { 1446 err = dso__load_vmlinux(dso, map, filename, true, filter); 1447 if (err > 0) 1448 goto out; 1449 free(filename); 1450 } 1451 1452 for (i = 0; i < vmlinux_path__nr_entries; ++i) { 1453 err = dso__load_vmlinux(dso, map, vmlinux_path[i], false, filter); 1454 if (err > 0) 1455 break; 1456 } 1457 out: 1458 return err; 1459 } 1460 1461 static int find_matching_kcore(struct map *map, char *dir, size_t dir_sz) 1462 { 1463 char kallsyms_filename[PATH_MAX]; 1464 struct dirent *dent; 1465 int ret = -1; 1466 DIR *d; 1467 1468 d = opendir(dir); 1469 if (!d) 1470 return -1; 1471 1472 while (1) { 1473 dent = readdir(d); 1474 if (!dent) 1475 break; 1476 if (dent->d_type != DT_DIR) 1477 continue; 1478 scnprintf(kallsyms_filename, sizeof(kallsyms_filename), 1479 "%s/%s/kallsyms", dir, dent->d_name); 1480 if (!validate_kcore_addresses(kallsyms_filename, map)) { 1481 strlcpy(dir, kallsyms_filename, dir_sz); 1482 ret = 0; 1483 break; 1484 } 1485 } 1486 1487 closedir(d); 1488 1489 return ret; 1490 } 1491 1492 static char *dso__find_kallsyms(struct dso *dso, struct map *map) 1493 { 1494 u8 host_build_id[BUILD_ID_SIZE]; 1495 char sbuild_id[BUILD_ID_SIZE * 2 + 1]; 1496 bool is_host = false; 1497 char path[PATH_MAX]; 1498 1499 if (!dso->has_build_id) { 1500 /* 1501 * Last resort, if we don't have a build-id and couldn't find 1502 * any vmlinux file, try the running kernel kallsyms table. 1503 */ 1504 goto proc_kallsyms; 1505 } 1506 1507 if (sysfs__read_build_id("/sys/kernel/notes", host_build_id, 1508 sizeof(host_build_id)) == 0) 1509 is_host = dso__build_id_equal(dso, host_build_id); 1510 1511 build_id__sprintf(dso->build_id, sizeof(dso->build_id), sbuild_id); 1512 1513 scnprintf(path, sizeof(path), "%s/[kernel.kcore]/%s", buildid_dir, 1514 sbuild_id); 1515 1516 /* Use /proc/kallsyms if possible */ 1517 if (is_host) { 1518 DIR *d; 1519 int fd; 1520 1521 /* If no cached kcore go with /proc/kallsyms */ 1522 d = opendir(path); 1523 if (!d) 1524 goto proc_kallsyms; 1525 closedir(d); 1526 1527 /* 1528 * Do not check the build-id cache, until we know we cannot use 1529 * /proc/kcore. 1530 */ 1531 fd = open("/proc/kcore", O_RDONLY); 1532 if (fd != -1) { 1533 close(fd); 1534 /* If module maps match go with /proc/kallsyms */ 1535 if (!validate_kcore_addresses("/proc/kallsyms", map)) 1536 goto proc_kallsyms; 1537 } 1538 1539 /* Find kallsyms in build-id cache with kcore */ 1540 if (!find_matching_kcore(map, path, sizeof(path))) 1541 return strdup(path); 1542 1543 goto proc_kallsyms; 1544 } 1545 1546 /* Find kallsyms in build-id cache with kcore */ 1547 if (!find_matching_kcore(map, path, sizeof(path))) 1548 return strdup(path); 1549 1550 scnprintf(path, sizeof(path), "%s/[kernel.kallsyms]/%s", 1551 buildid_dir, sbuild_id); 1552 1553 if (access(path, F_OK)) { 1554 pr_err("No kallsyms or vmlinux with build-id %s was found\n", 1555 sbuild_id); 1556 return NULL; 1557 } 1558 1559 return strdup(path); 1560 1561 proc_kallsyms: 1562 return strdup("/proc/kallsyms"); 1563 } 1564 1565 static int dso__load_kernel_sym(struct dso *dso, struct map *map, 1566 symbol_filter_t filter) 1567 { 1568 int err; 1569 const char *kallsyms_filename = NULL; 1570 char *kallsyms_allocated_filename = NULL; 1571 /* 1572 * Step 1: if the user specified a kallsyms or vmlinux filename, use 1573 * it and only it, reporting errors to the user if it cannot be used. 1574 * 1575 * For instance, try to analyse an ARM perf.data file _without_ a 1576 * build-id, or if the user specifies the wrong path to the right 1577 * vmlinux file, obviously we can't fallback to another vmlinux (a 1578 * x86_86 one, on the machine where analysis is being performed, say), 1579 * or worse, /proc/kallsyms. 1580 * 1581 * If the specified file _has_ a build-id and there is a build-id 1582 * section in the perf.data file, we will still do the expected 1583 * validation in dso__load_vmlinux and will bail out if they don't 1584 * match. 1585 */ 1586 if (symbol_conf.kallsyms_name != NULL) { 1587 kallsyms_filename = symbol_conf.kallsyms_name; 1588 goto do_kallsyms; 1589 } 1590 1591 if (!symbol_conf.ignore_vmlinux && symbol_conf.vmlinux_name != NULL) { 1592 return dso__load_vmlinux(dso, map, symbol_conf.vmlinux_name, 1593 false, filter); 1594 } 1595 1596 if (!symbol_conf.ignore_vmlinux && vmlinux_path != NULL) { 1597 err = dso__load_vmlinux_path(dso, map, filter); 1598 if (err > 0) 1599 return err; 1600 } 1601 1602 /* do not try local files if a symfs was given */ 1603 if (symbol_conf.symfs[0] != 0) 1604 return -1; 1605 1606 kallsyms_allocated_filename = dso__find_kallsyms(dso, map); 1607 if (!kallsyms_allocated_filename) 1608 return -1; 1609 1610 kallsyms_filename = kallsyms_allocated_filename; 1611 1612 do_kallsyms: 1613 err = dso__load_kallsyms(dso, kallsyms_filename, map, filter); 1614 if (err > 0) 1615 pr_debug("Using %s for symbols\n", kallsyms_filename); 1616 free(kallsyms_allocated_filename); 1617 1618 if (err > 0 && !dso__is_kcore(dso)) { 1619 dso__set_long_name(dso, "[kernel.kallsyms]", false); 1620 map__fixup_start(map); 1621 map__fixup_end(map); 1622 } 1623 1624 return err; 1625 } 1626 1627 static int dso__load_guest_kernel_sym(struct dso *dso, struct map *map, 1628 symbol_filter_t filter) 1629 { 1630 int err; 1631 const char *kallsyms_filename = NULL; 1632 struct machine *machine; 1633 char path[PATH_MAX]; 1634 1635 if (!map->groups) { 1636 pr_debug("Guest kernel map hasn't the point to groups\n"); 1637 return -1; 1638 } 1639 machine = map->groups->machine; 1640 1641 if (machine__is_default_guest(machine)) { 1642 /* 1643 * if the user specified a vmlinux filename, use it and only 1644 * it, reporting errors to the user if it cannot be used. 1645 * Or use file guest_kallsyms inputted by user on commandline 1646 */ 1647 if (symbol_conf.default_guest_vmlinux_name != NULL) { 1648 err = dso__load_vmlinux(dso, map, 1649 symbol_conf.default_guest_vmlinux_name, 1650 false, filter); 1651 return err; 1652 } 1653 1654 kallsyms_filename = symbol_conf.default_guest_kallsyms; 1655 if (!kallsyms_filename) 1656 return -1; 1657 } else { 1658 sprintf(path, "%s/proc/kallsyms", machine->root_dir); 1659 kallsyms_filename = path; 1660 } 1661 1662 err = dso__load_kallsyms(dso, kallsyms_filename, map, filter); 1663 if (err > 0) 1664 pr_debug("Using %s for symbols\n", kallsyms_filename); 1665 if (err > 0 && !dso__is_kcore(dso)) { 1666 machine__mmap_name(machine, path, sizeof(path)); 1667 dso__set_long_name(dso, strdup(path), true); 1668 map__fixup_start(map); 1669 map__fixup_end(map); 1670 } 1671 1672 return err; 1673 } 1674 1675 static void vmlinux_path__exit(void) 1676 { 1677 while (--vmlinux_path__nr_entries >= 0) 1678 zfree(&vmlinux_path[vmlinux_path__nr_entries]); 1679 1680 zfree(&vmlinux_path); 1681 } 1682 1683 static int vmlinux_path__init(void) 1684 { 1685 struct utsname uts; 1686 char bf[PATH_MAX]; 1687 1688 vmlinux_path = malloc(sizeof(char *) * 5); 1689 if (vmlinux_path == NULL) 1690 return -1; 1691 1692 vmlinux_path[vmlinux_path__nr_entries] = strdup("vmlinux"); 1693 if (vmlinux_path[vmlinux_path__nr_entries] == NULL) 1694 goto out_fail; 1695 ++vmlinux_path__nr_entries; 1696 vmlinux_path[vmlinux_path__nr_entries] = strdup("/boot/vmlinux"); 1697 if (vmlinux_path[vmlinux_path__nr_entries] == NULL) 1698 goto out_fail; 1699 ++vmlinux_path__nr_entries; 1700 1701 /* only try running kernel version if no symfs was given */ 1702 if (symbol_conf.symfs[0] != 0) 1703 return 0; 1704 1705 if (uname(&uts) < 0) 1706 return -1; 1707 1708 snprintf(bf, sizeof(bf), "/boot/vmlinux-%s", uts.release); 1709 vmlinux_path[vmlinux_path__nr_entries] = strdup(bf); 1710 if (vmlinux_path[vmlinux_path__nr_entries] == NULL) 1711 goto out_fail; 1712 ++vmlinux_path__nr_entries; 1713 snprintf(bf, sizeof(bf), "/lib/modules/%s/build/vmlinux", uts.release); 1714 vmlinux_path[vmlinux_path__nr_entries] = strdup(bf); 1715 if (vmlinux_path[vmlinux_path__nr_entries] == NULL) 1716 goto out_fail; 1717 ++vmlinux_path__nr_entries; 1718 snprintf(bf, sizeof(bf), "/usr/lib/debug/lib/modules/%s/vmlinux", 1719 uts.release); 1720 vmlinux_path[vmlinux_path__nr_entries] = strdup(bf); 1721 if (vmlinux_path[vmlinux_path__nr_entries] == NULL) 1722 goto out_fail; 1723 ++vmlinux_path__nr_entries; 1724 1725 return 0; 1726 1727 out_fail: 1728 vmlinux_path__exit(); 1729 return -1; 1730 } 1731 1732 int setup_list(struct strlist **list, const char *list_str, 1733 const char *list_name) 1734 { 1735 if (list_str == NULL) 1736 return 0; 1737 1738 *list = strlist__new(true, list_str); 1739 if (!*list) { 1740 pr_err("problems parsing %s list\n", list_name); 1741 return -1; 1742 } 1743 return 0; 1744 } 1745 1746 static bool symbol__read_kptr_restrict(void) 1747 { 1748 bool value = false; 1749 1750 if (geteuid() != 0) { 1751 FILE *fp = fopen("/proc/sys/kernel/kptr_restrict", "r"); 1752 if (fp != NULL) { 1753 char line[8]; 1754 1755 if (fgets(line, sizeof(line), fp) != NULL) 1756 value = atoi(line) != 0; 1757 1758 fclose(fp); 1759 } 1760 } 1761 1762 return value; 1763 } 1764 1765 int symbol__init(void) 1766 { 1767 const char *symfs; 1768 1769 if (symbol_conf.initialized) 1770 return 0; 1771 1772 symbol_conf.priv_size = PERF_ALIGN(symbol_conf.priv_size, sizeof(u64)); 1773 1774 symbol__elf_init(); 1775 1776 if (symbol_conf.sort_by_name) 1777 symbol_conf.priv_size += (sizeof(struct symbol_name_rb_node) - 1778 sizeof(struct symbol)); 1779 1780 if (symbol_conf.try_vmlinux_path && vmlinux_path__init() < 0) 1781 return -1; 1782 1783 if (symbol_conf.field_sep && *symbol_conf.field_sep == '.') { 1784 pr_err("'.' is the only non valid --field-separator argument\n"); 1785 return -1; 1786 } 1787 1788 if (setup_list(&symbol_conf.dso_list, 1789 symbol_conf.dso_list_str, "dso") < 0) 1790 return -1; 1791 1792 if (setup_list(&symbol_conf.comm_list, 1793 symbol_conf.comm_list_str, "comm") < 0) 1794 goto out_free_dso_list; 1795 1796 if (setup_list(&symbol_conf.sym_list, 1797 symbol_conf.sym_list_str, "symbol") < 0) 1798 goto out_free_comm_list; 1799 1800 /* 1801 * A path to symbols of "/" is identical to "" 1802 * reset here for simplicity. 1803 */ 1804 symfs = realpath(symbol_conf.symfs, NULL); 1805 if (symfs == NULL) 1806 symfs = symbol_conf.symfs; 1807 if (strcmp(symfs, "/") == 0) 1808 symbol_conf.symfs = ""; 1809 if (symfs != symbol_conf.symfs) 1810 free((void *)symfs); 1811 1812 symbol_conf.kptr_restrict = symbol__read_kptr_restrict(); 1813 1814 symbol_conf.initialized = true; 1815 return 0; 1816 1817 out_free_comm_list: 1818 strlist__delete(symbol_conf.comm_list); 1819 out_free_dso_list: 1820 strlist__delete(symbol_conf.dso_list); 1821 return -1; 1822 } 1823 1824 void symbol__exit(void) 1825 { 1826 if (!symbol_conf.initialized) 1827 return; 1828 strlist__delete(symbol_conf.sym_list); 1829 strlist__delete(symbol_conf.dso_list); 1830 strlist__delete(symbol_conf.comm_list); 1831 vmlinux_path__exit(); 1832 symbol_conf.sym_list = symbol_conf.dso_list = symbol_conf.comm_list = NULL; 1833 symbol_conf.initialized = false; 1834 } 1835