1 #include <dirent.h> 2 #include <errno.h> 3 #include <stdlib.h> 4 #include <stdio.h> 5 #include <string.h> 6 #include <sys/types.h> 7 #include <sys/stat.h> 8 #include <sys/param.h> 9 #include <fcntl.h> 10 #include <unistd.h> 11 #include <inttypes.h> 12 #include "build-id.h" 13 #include "util.h" 14 #include "debug.h" 15 #include "machine.h" 16 #include "symbol.h" 17 #include "strlist.h" 18 19 #include <elf.h> 20 #include <limits.h> 21 #include <symbol/kallsyms.h> 22 #include <sys/utsname.h> 23 24 static int dso__load_kernel_sym(struct dso *dso, struct map *map, 25 symbol_filter_t filter); 26 static int dso__load_guest_kernel_sym(struct dso *dso, struct map *map, 27 symbol_filter_t filter); 28 int vmlinux_path__nr_entries; 29 char **vmlinux_path; 30 31 struct symbol_conf symbol_conf = { 32 .use_modules = true, 33 .try_vmlinux_path = true, 34 .annotate_src = true, 35 .demangle = true, 36 .symfs = "", 37 }; 38 39 static enum dso_binary_type binary_type_symtab[] = { 40 DSO_BINARY_TYPE__KALLSYMS, 41 DSO_BINARY_TYPE__GUEST_KALLSYMS, 42 DSO_BINARY_TYPE__JAVA_JIT, 43 DSO_BINARY_TYPE__DEBUGLINK, 44 DSO_BINARY_TYPE__BUILD_ID_CACHE, 45 DSO_BINARY_TYPE__FEDORA_DEBUGINFO, 46 DSO_BINARY_TYPE__UBUNTU_DEBUGINFO, 47 DSO_BINARY_TYPE__BUILDID_DEBUGINFO, 48 DSO_BINARY_TYPE__SYSTEM_PATH_DSO, 49 DSO_BINARY_TYPE__GUEST_KMODULE, 50 DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE, 51 DSO_BINARY_TYPE__OPENEMBEDDED_DEBUGINFO, 52 DSO_BINARY_TYPE__NOT_FOUND, 53 }; 54 55 #define DSO_BINARY_TYPE__SYMTAB_CNT ARRAY_SIZE(binary_type_symtab) 56 57 bool symbol_type__is_a(char symbol_type, enum map_type map_type) 58 { 59 symbol_type = toupper(symbol_type); 60 61 switch (map_type) { 62 case MAP__FUNCTION: 63 return symbol_type == 'T' || symbol_type == 'W'; 64 case MAP__VARIABLE: 65 return symbol_type == 'D'; 66 default: 67 return false; 68 } 69 } 70 71 static int prefix_underscores_count(const char *str) 72 { 73 const char *tail = str; 74 75 while (*tail == '_') 76 tail++; 77 78 return tail - str; 79 } 80 81 #define SYMBOL_A 0 82 #define SYMBOL_B 1 83 84 static int choose_best_symbol(struct symbol *syma, struct symbol *symb) 85 { 86 s64 a; 87 s64 b; 88 size_t na, nb; 89 90 /* Prefer a symbol with non zero length */ 91 a = syma->end - syma->start; 92 b = symb->end - symb->start; 93 if ((b == 0) && (a > 0)) 94 return SYMBOL_A; 95 else if ((a == 0) && (b > 0)) 96 return SYMBOL_B; 97 98 /* Prefer a non weak symbol over a weak one */ 99 a = syma->binding == STB_WEAK; 100 b = symb->binding == STB_WEAK; 101 if (b && !a) 102 return SYMBOL_A; 103 if (a && !b) 104 return SYMBOL_B; 105 106 /* Prefer a global symbol over a non global one */ 107 a = syma->binding == STB_GLOBAL; 108 b = symb->binding == STB_GLOBAL; 109 if (a && !b) 110 return SYMBOL_A; 111 if (b && !a) 112 return SYMBOL_B; 113 114 /* Prefer a symbol with less underscores */ 115 a = prefix_underscores_count(syma->name); 116 b = prefix_underscores_count(symb->name); 117 if (b > a) 118 return SYMBOL_A; 119 else if (a > b) 120 return SYMBOL_B; 121 122 /* Choose the symbol with the longest name */ 123 na = strlen(syma->name); 124 nb = strlen(symb->name); 125 if (na > nb) 126 return SYMBOL_A; 127 else if (na < nb) 128 return SYMBOL_B; 129 130 /* Avoid "SyS" kernel syscall aliases */ 131 if (na >= 3 && !strncmp(syma->name, "SyS", 3)) 132 return SYMBOL_B; 133 if (na >= 10 && !strncmp(syma->name, "compat_SyS", 10)) 134 return SYMBOL_B; 135 136 return SYMBOL_A; 137 } 138 139 void symbols__fixup_duplicate(struct rb_root *symbols) 140 { 141 struct rb_node *nd; 142 struct symbol *curr, *next; 143 144 nd = rb_first(symbols); 145 146 while (nd) { 147 curr = rb_entry(nd, struct symbol, rb_node); 148 again: 149 nd = rb_next(&curr->rb_node); 150 next = rb_entry(nd, struct symbol, rb_node); 151 152 if (!nd) 153 break; 154 155 if (curr->start != next->start) 156 continue; 157 158 if (choose_best_symbol(curr, next) == SYMBOL_A) { 159 rb_erase(&next->rb_node, symbols); 160 symbol__delete(next); 161 goto again; 162 } else { 163 nd = rb_next(&curr->rb_node); 164 rb_erase(&curr->rb_node, symbols); 165 symbol__delete(curr); 166 } 167 } 168 } 169 170 void symbols__fixup_end(struct rb_root *symbols) 171 { 172 struct rb_node *nd, *prevnd = rb_first(symbols); 173 struct symbol *curr, *prev; 174 175 if (prevnd == NULL) 176 return; 177 178 curr = rb_entry(prevnd, struct symbol, rb_node); 179 180 for (nd = rb_next(prevnd); nd; nd = rb_next(nd)) { 181 prev = curr; 182 curr = rb_entry(nd, struct symbol, rb_node); 183 184 if (prev->end == prev->start && prev->end != curr->start) 185 prev->end = curr->start - 1; 186 } 187 188 /* Last entry */ 189 if (curr->end == curr->start) 190 curr->end = roundup(curr->start, 4096); 191 } 192 193 void __map_groups__fixup_end(struct map_groups *mg, enum map_type type) 194 { 195 struct map *prev, *curr; 196 struct rb_node *nd, *prevnd = rb_first(&mg->maps[type]); 197 198 if (prevnd == NULL) 199 return; 200 201 curr = rb_entry(prevnd, struct map, rb_node); 202 203 for (nd = rb_next(prevnd); nd; nd = rb_next(nd)) { 204 prev = curr; 205 curr = rb_entry(nd, struct map, rb_node); 206 prev->end = curr->start - 1; 207 } 208 209 /* 210 * We still haven't the actual symbols, so guess the 211 * last map final address. 212 */ 213 curr->end = ~0ULL; 214 } 215 216 struct symbol *symbol__new(u64 start, u64 len, u8 binding, const char *name) 217 { 218 size_t namelen = strlen(name) + 1; 219 struct symbol *sym = calloc(1, (symbol_conf.priv_size + 220 sizeof(*sym) + namelen)); 221 if (sym == NULL) 222 return NULL; 223 224 if (symbol_conf.priv_size) 225 sym = ((void *)sym) + symbol_conf.priv_size; 226 227 sym->start = start; 228 sym->end = len ? start + len - 1 : start; 229 sym->binding = binding; 230 sym->namelen = namelen - 1; 231 232 pr_debug4("%s: %s %#" PRIx64 "-%#" PRIx64 "\n", 233 __func__, name, start, sym->end); 234 memcpy(sym->name, name, namelen); 235 236 return sym; 237 } 238 239 void symbol__delete(struct symbol *sym) 240 { 241 free(((void *)sym) - symbol_conf.priv_size); 242 } 243 244 size_t symbol__fprintf(struct symbol *sym, FILE *fp) 245 { 246 return fprintf(fp, " %" PRIx64 "-%" PRIx64 " %c %s\n", 247 sym->start, sym->end, 248 sym->binding == STB_GLOBAL ? 'g' : 249 sym->binding == STB_LOCAL ? 'l' : 'w', 250 sym->name); 251 } 252 253 size_t symbol__fprintf_symname_offs(const struct symbol *sym, 254 const struct addr_location *al, FILE *fp) 255 { 256 unsigned long offset; 257 size_t length; 258 259 if (sym && sym->name) { 260 length = fprintf(fp, "%s", sym->name); 261 if (al) { 262 if (al->addr < sym->end) 263 offset = al->addr - sym->start; 264 else 265 offset = al->addr - al->map->start - sym->start; 266 length += fprintf(fp, "+0x%lx", offset); 267 } 268 return length; 269 } else 270 return fprintf(fp, "[unknown]"); 271 } 272 273 size_t symbol__fprintf_symname(const struct symbol *sym, FILE *fp) 274 { 275 return symbol__fprintf_symname_offs(sym, NULL, fp); 276 } 277 278 void symbols__delete(struct rb_root *symbols) 279 { 280 struct symbol *pos; 281 struct rb_node *next = rb_first(symbols); 282 283 while (next) { 284 pos = rb_entry(next, struct symbol, rb_node); 285 next = rb_next(&pos->rb_node); 286 rb_erase(&pos->rb_node, symbols); 287 symbol__delete(pos); 288 } 289 } 290 291 void symbols__insert(struct rb_root *symbols, struct symbol *sym) 292 { 293 struct rb_node **p = &symbols->rb_node; 294 struct rb_node *parent = NULL; 295 const u64 ip = sym->start; 296 struct symbol *s; 297 298 while (*p != NULL) { 299 parent = *p; 300 s = rb_entry(parent, struct symbol, rb_node); 301 if (ip < s->start) 302 p = &(*p)->rb_left; 303 else 304 p = &(*p)->rb_right; 305 } 306 rb_link_node(&sym->rb_node, parent, p); 307 rb_insert_color(&sym->rb_node, symbols); 308 } 309 310 static struct symbol *symbols__find(struct rb_root *symbols, u64 ip) 311 { 312 struct rb_node *n; 313 314 if (symbols == NULL) 315 return NULL; 316 317 n = symbols->rb_node; 318 319 while (n) { 320 struct symbol *s = rb_entry(n, struct symbol, rb_node); 321 322 if (ip < s->start) 323 n = n->rb_left; 324 else if (ip > s->end) 325 n = n->rb_right; 326 else 327 return s; 328 } 329 330 return NULL; 331 } 332 333 static struct symbol *symbols__first(struct rb_root *symbols) 334 { 335 struct rb_node *n = rb_first(symbols); 336 337 if (n) 338 return rb_entry(n, struct symbol, rb_node); 339 340 return NULL; 341 } 342 343 struct symbol_name_rb_node { 344 struct rb_node rb_node; 345 struct symbol sym; 346 }; 347 348 static void symbols__insert_by_name(struct rb_root *symbols, struct symbol *sym) 349 { 350 struct rb_node **p = &symbols->rb_node; 351 struct rb_node *parent = NULL; 352 struct symbol_name_rb_node *symn, *s; 353 354 symn = container_of(sym, struct symbol_name_rb_node, sym); 355 356 while (*p != NULL) { 357 parent = *p; 358 s = rb_entry(parent, struct symbol_name_rb_node, rb_node); 359 if (strcmp(sym->name, s->sym.name) < 0) 360 p = &(*p)->rb_left; 361 else 362 p = &(*p)->rb_right; 363 } 364 rb_link_node(&symn->rb_node, parent, p); 365 rb_insert_color(&symn->rb_node, symbols); 366 } 367 368 static void symbols__sort_by_name(struct rb_root *symbols, 369 struct rb_root *source) 370 { 371 struct rb_node *nd; 372 373 for (nd = rb_first(source); nd; nd = rb_next(nd)) { 374 struct symbol *pos = rb_entry(nd, struct symbol, rb_node); 375 symbols__insert_by_name(symbols, pos); 376 } 377 } 378 379 static struct symbol *symbols__find_by_name(struct rb_root *symbols, 380 const char *name) 381 { 382 struct rb_node *n; 383 384 if (symbols == NULL) 385 return NULL; 386 387 n = symbols->rb_node; 388 389 while (n) { 390 struct symbol_name_rb_node *s; 391 int cmp; 392 393 s = rb_entry(n, struct symbol_name_rb_node, rb_node); 394 cmp = strcmp(name, s->sym.name); 395 396 if (cmp < 0) 397 n = n->rb_left; 398 else if (cmp > 0) 399 n = n->rb_right; 400 else 401 return &s->sym; 402 } 403 404 return NULL; 405 } 406 407 struct symbol *dso__find_symbol(struct dso *dso, 408 enum map_type type, u64 addr) 409 { 410 return symbols__find(&dso->symbols[type], addr); 411 } 412 413 static struct symbol *dso__first_symbol(struct dso *dso, enum map_type type) 414 { 415 return symbols__first(&dso->symbols[type]); 416 } 417 418 struct symbol *dso__find_symbol_by_name(struct dso *dso, enum map_type type, 419 const char *name) 420 { 421 return symbols__find_by_name(&dso->symbol_names[type], name); 422 } 423 424 void dso__sort_by_name(struct dso *dso, enum map_type type) 425 { 426 dso__set_sorted_by_name(dso, type); 427 return symbols__sort_by_name(&dso->symbol_names[type], 428 &dso->symbols[type]); 429 } 430 431 size_t dso__fprintf_symbols_by_name(struct dso *dso, 432 enum map_type type, FILE *fp) 433 { 434 size_t ret = 0; 435 struct rb_node *nd; 436 struct symbol_name_rb_node *pos; 437 438 for (nd = rb_first(&dso->symbol_names[type]); nd; nd = rb_next(nd)) { 439 pos = rb_entry(nd, struct symbol_name_rb_node, rb_node); 440 fprintf(fp, "%s\n", pos->sym.name); 441 } 442 443 return ret; 444 } 445 446 int modules__parse(const char *filename, void *arg, 447 int (*process_module)(void *arg, const char *name, 448 u64 start)) 449 { 450 char *line = NULL; 451 size_t n; 452 FILE *file; 453 int err = 0; 454 455 file = fopen(filename, "r"); 456 if (file == NULL) 457 return -1; 458 459 while (1) { 460 char name[PATH_MAX]; 461 u64 start; 462 char *sep; 463 ssize_t line_len; 464 465 line_len = getline(&line, &n, file); 466 if (line_len < 0) { 467 if (feof(file)) 468 break; 469 err = -1; 470 goto out; 471 } 472 473 if (!line) { 474 err = -1; 475 goto out; 476 } 477 478 line[--line_len] = '\0'; /* \n */ 479 480 sep = strrchr(line, 'x'); 481 if (sep == NULL) 482 continue; 483 484 hex2u64(sep + 1, &start); 485 486 sep = strchr(line, ' '); 487 if (sep == NULL) 488 continue; 489 490 *sep = '\0'; 491 492 scnprintf(name, sizeof(name), "[%s]", line); 493 494 err = process_module(arg, name, start); 495 if (err) 496 break; 497 } 498 out: 499 free(line); 500 fclose(file); 501 return err; 502 } 503 504 struct process_kallsyms_args { 505 struct map *map; 506 struct dso *dso; 507 }; 508 509 bool symbol__is_idle(struct symbol *sym) 510 { 511 const char * const idle_symbols[] = { 512 "cpu_idle", 513 "intel_idle", 514 "default_idle", 515 "native_safe_halt", 516 "enter_idle", 517 "exit_idle", 518 "mwait_idle", 519 "mwait_idle_with_hints", 520 "poll_idle", 521 "ppc64_runlatch_off", 522 "pseries_dedicated_idle_sleep", 523 NULL 524 }; 525 526 int i; 527 528 if (!sym) 529 return false; 530 531 for (i = 0; idle_symbols[i]; i++) { 532 if (!strcmp(idle_symbols[i], sym->name)) 533 return true; 534 } 535 536 return false; 537 } 538 539 static int map__process_kallsym_symbol(void *arg, const char *name, 540 char type, u64 start) 541 { 542 struct symbol *sym; 543 struct process_kallsyms_args *a = arg; 544 struct rb_root *root = &a->dso->symbols[a->map->type]; 545 546 if (!symbol_type__is_a(type, a->map->type)) 547 return 0; 548 549 /* 550 * module symbols are not sorted so we add all 551 * symbols, setting length to 0, and rely on 552 * symbols__fixup_end() to fix it up. 553 */ 554 sym = symbol__new(start, 0, kallsyms2elf_type(type), name); 555 if (sym == NULL) 556 return -ENOMEM; 557 /* 558 * We will pass the symbols to the filter later, in 559 * map__split_kallsyms, when we have split the maps per module 560 */ 561 symbols__insert(root, sym); 562 563 return 0; 564 } 565 566 /* 567 * Loads the function entries in /proc/kallsyms into kernel_map->dso, 568 * so that we can in the next step set the symbol ->end address and then 569 * call kernel_maps__split_kallsyms. 570 */ 571 static int dso__load_all_kallsyms(struct dso *dso, const char *filename, 572 struct map *map) 573 { 574 struct process_kallsyms_args args = { .map = map, .dso = dso, }; 575 return kallsyms__parse(filename, &args, map__process_kallsym_symbol); 576 } 577 578 static int dso__split_kallsyms_for_kcore(struct dso *dso, struct map *map, 579 symbol_filter_t filter) 580 { 581 struct map_groups *kmaps = map__kmap(map)->kmaps; 582 struct map *curr_map; 583 struct symbol *pos; 584 int count = 0, moved = 0; 585 struct rb_root *root = &dso->symbols[map->type]; 586 struct rb_node *next = rb_first(root); 587 588 while (next) { 589 char *module; 590 591 pos = rb_entry(next, struct symbol, rb_node); 592 next = rb_next(&pos->rb_node); 593 594 module = strchr(pos->name, '\t'); 595 if (module) 596 *module = '\0'; 597 598 curr_map = map_groups__find(kmaps, map->type, pos->start); 599 600 if (!curr_map || (filter && filter(curr_map, pos))) { 601 rb_erase(&pos->rb_node, root); 602 symbol__delete(pos); 603 } else { 604 pos->start -= curr_map->start - curr_map->pgoff; 605 if (pos->end) 606 pos->end -= curr_map->start - curr_map->pgoff; 607 if (curr_map != map) { 608 rb_erase(&pos->rb_node, root); 609 symbols__insert( 610 &curr_map->dso->symbols[curr_map->type], 611 pos); 612 ++moved; 613 } else { 614 ++count; 615 } 616 } 617 } 618 619 /* Symbols have been adjusted */ 620 dso->adjust_symbols = 1; 621 622 return count + moved; 623 } 624 625 /* 626 * Split the symbols into maps, making sure there are no overlaps, i.e. the 627 * kernel range is broken in several maps, named [kernel].N, as we don't have 628 * the original ELF section names vmlinux have. 629 */ 630 static int dso__split_kallsyms(struct dso *dso, struct map *map, u64 delta, 631 symbol_filter_t filter) 632 { 633 struct map_groups *kmaps = map__kmap(map)->kmaps; 634 struct machine *machine = kmaps->machine; 635 struct map *curr_map = map; 636 struct symbol *pos; 637 int count = 0, moved = 0; 638 struct rb_root *root = &dso->symbols[map->type]; 639 struct rb_node *next = rb_first(root); 640 int kernel_range = 0; 641 642 while (next) { 643 char *module; 644 645 pos = rb_entry(next, struct symbol, rb_node); 646 next = rb_next(&pos->rb_node); 647 648 module = strchr(pos->name, '\t'); 649 if (module) { 650 if (!symbol_conf.use_modules) 651 goto discard_symbol; 652 653 *module++ = '\0'; 654 655 if (strcmp(curr_map->dso->short_name, module)) { 656 if (curr_map != map && 657 dso->kernel == DSO_TYPE_GUEST_KERNEL && 658 machine__is_default_guest(machine)) { 659 /* 660 * We assume all symbols of a module are 661 * continuous in * kallsyms, so curr_map 662 * points to a module and all its 663 * symbols are in its kmap. Mark it as 664 * loaded. 665 */ 666 dso__set_loaded(curr_map->dso, 667 curr_map->type); 668 } 669 670 curr_map = map_groups__find_by_name(kmaps, 671 map->type, module); 672 if (curr_map == NULL) { 673 pr_debug("%s/proc/{kallsyms,modules} " 674 "inconsistency while looking " 675 "for \"%s\" module!\n", 676 machine->root_dir, module); 677 curr_map = map; 678 goto discard_symbol; 679 } 680 681 if (curr_map->dso->loaded && 682 !machine__is_default_guest(machine)) 683 goto discard_symbol; 684 } 685 /* 686 * So that we look just like we get from .ko files, 687 * i.e. not prelinked, relative to map->start. 688 */ 689 pos->start = curr_map->map_ip(curr_map, pos->start); 690 pos->end = curr_map->map_ip(curr_map, pos->end); 691 } else if (curr_map != map) { 692 char dso_name[PATH_MAX]; 693 struct dso *ndso; 694 695 if (delta) { 696 /* Kernel was relocated at boot time */ 697 pos->start -= delta; 698 pos->end -= delta; 699 } 700 701 if (count == 0) { 702 curr_map = map; 703 goto filter_symbol; 704 } 705 706 if (dso->kernel == DSO_TYPE_GUEST_KERNEL) 707 snprintf(dso_name, sizeof(dso_name), 708 "[guest.kernel].%d", 709 kernel_range++); 710 else 711 snprintf(dso_name, sizeof(dso_name), 712 "[kernel].%d", 713 kernel_range++); 714 715 ndso = dso__new(dso_name); 716 if (ndso == NULL) 717 return -1; 718 719 ndso->kernel = dso->kernel; 720 721 curr_map = map__new2(pos->start, ndso, map->type); 722 if (curr_map == NULL) { 723 dso__delete(ndso); 724 return -1; 725 } 726 727 curr_map->map_ip = curr_map->unmap_ip = identity__map_ip; 728 map_groups__insert(kmaps, curr_map); 729 ++kernel_range; 730 } else if (delta) { 731 /* Kernel was relocated at boot time */ 732 pos->start -= delta; 733 pos->end -= delta; 734 } 735 filter_symbol: 736 if (filter && filter(curr_map, pos)) { 737 discard_symbol: rb_erase(&pos->rb_node, root); 738 symbol__delete(pos); 739 } else { 740 if (curr_map != map) { 741 rb_erase(&pos->rb_node, root); 742 symbols__insert(&curr_map->dso->symbols[curr_map->type], pos); 743 ++moved; 744 } else 745 ++count; 746 } 747 } 748 749 if (curr_map != map && 750 dso->kernel == DSO_TYPE_GUEST_KERNEL && 751 machine__is_default_guest(kmaps->machine)) { 752 dso__set_loaded(curr_map->dso, curr_map->type); 753 } 754 755 return count + moved; 756 } 757 758 bool symbol__restricted_filename(const char *filename, 759 const char *restricted_filename) 760 { 761 bool restricted = false; 762 763 if (symbol_conf.kptr_restrict) { 764 char *r = realpath(filename, NULL); 765 766 if (r != NULL) { 767 restricted = strcmp(r, restricted_filename) == 0; 768 free(r); 769 return restricted; 770 } 771 } 772 773 return restricted; 774 } 775 776 struct module_info { 777 struct rb_node rb_node; 778 char *name; 779 u64 start; 780 }; 781 782 static void add_module(struct module_info *mi, struct rb_root *modules) 783 { 784 struct rb_node **p = &modules->rb_node; 785 struct rb_node *parent = NULL; 786 struct module_info *m; 787 788 while (*p != NULL) { 789 parent = *p; 790 m = rb_entry(parent, struct module_info, rb_node); 791 if (strcmp(mi->name, m->name) < 0) 792 p = &(*p)->rb_left; 793 else 794 p = &(*p)->rb_right; 795 } 796 rb_link_node(&mi->rb_node, parent, p); 797 rb_insert_color(&mi->rb_node, modules); 798 } 799 800 static void delete_modules(struct rb_root *modules) 801 { 802 struct module_info *mi; 803 struct rb_node *next = rb_first(modules); 804 805 while (next) { 806 mi = rb_entry(next, struct module_info, rb_node); 807 next = rb_next(&mi->rb_node); 808 rb_erase(&mi->rb_node, modules); 809 zfree(&mi->name); 810 free(mi); 811 } 812 } 813 814 static struct module_info *find_module(const char *name, 815 struct rb_root *modules) 816 { 817 struct rb_node *n = modules->rb_node; 818 819 while (n) { 820 struct module_info *m; 821 int cmp; 822 823 m = rb_entry(n, struct module_info, rb_node); 824 cmp = strcmp(name, m->name); 825 if (cmp < 0) 826 n = n->rb_left; 827 else if (cmp > 0) 828 n = n->rb_right; 829 else 830 return m; 831 } 832 833 return NULL; 834 } 835 836 static int __read_proc_modules(void *arg, const char *name, u64 start) 837 { 838 struct rb_root *modules = arg; 839 struct module_info *mi; 840 841 mi = zalloc(sizeof(struct module_info)); 842 if (!mi) 843 return -ENOMEM; 844 845 mi->name = strdup(name); 846 mi->start = start; 847 848 if (!mi->name) { 849 free(mi); 850 return -ENOMEM; 851 } 852 853 add_module(mi, modules); 854 855 return 0; 856 } 857 858 static int read_proc_modules(const char *filename, struct rb_root *modules) 859 { 860 if (symbol__restricted_filename(filename, "/proc/modules")) 861 return -1; 862 863 if (modules__parse(filename, modules, __read_proc_modules)) { 864 delete_modules(modules); 865 return -1; 866 } 867 868 return 0; 869 } 870 871 int compare_proc_modules(const char *from, const char *to) 872 { 873 struct rb_root from_modules = RB_ROOT; 874 struct rb_root to_modules = RB_ROOT; 875 struct rb_node *from_node, *to_node; 876 struct module_info *from_m, *to_m; 877 int ret = -1; 878 879 if (read_proc_modules(from, &from_modules)) 880 return -1; 881 882 if (read_proc_modules(to, &to_modules)) 883 goto out_delete_from; 884 885 from_node = rb_first(&from_modules); 886 to_node = rb_first(&to_modules); 887 while (from_node) { 888 if (!to_node) 889 break; 890 891 from_m = rb_entry(from_node, struct module_info, rb_node); 892 to_m = rb_entry(to_node, struct module_info, rb_node); 893 894 if (from_m->start != to_m->start || 895 strcmp(from_m->name, to_m->name)) 896 break; 897 898 from_node = rb_next(from_node); 899 to_node = rb_next(to_node); 900 } 901 902 if (!from_node && !to_node) 903 ret = 0; 904 905 delete_modules(&to_modules); 906 out_delete_from: 907 delete_modules(&from_modules); 908 909 return ret; 910 } 911 912 static int do_validate_kcore_modules(const char *filename, struct map *map, 913 struct map_groups *kmaps) 914 { 915 struct rb_root modules = RB_ROOT; 916 struct map *old_map; 917 int err; 918 919 err = read_proc_modules(filename, &modules); 920 if (err) 921 return err; 922 923 old_map = map_groups__first(kmaps, map->type); 924 while (old_map) { 925 struct map *next = map_groups__next(old_map); 926 struct module_info *mi; 927 928 if (old_map == map || old_map->start == map->start) { 929 /* The kernel map */ 930 old_map = next; 931 continue; 932 } 933 934 /* Module must be in memory at the same address */ 935 mi = find_module(old_map->dso->short_name, &modules); 936 if (!mi || mi->start != old_map->start) { 937 err = -EINVAL; 938 goto out; 939 } 940 941 old_map = next; 942 } 943 out: 944 delete_modules(&modules); 945 return err; 946 } 947 948 /* 949 * If kallsyms is referenced by name then we look for filename in the same 950 * directory. 951 */ 952 static bool filename_from_kallsyms_filename(char *filename, 953 const char *base_name, 954 const char *kallsyms_filename) 955 { 956 char *name; 957 958 strcpy(filename, kallsyms_filename); 959 name = strrchr(filename, '/'); 960 if (!name) 961 return false; 962 963 name += 1; 964 965 if (!strcmp(name, "kallsyms")) { 966 strcpy(name, base_name); 967 return true; 968 } 969 970 return false; 971 } 972 973 static int validate_kcore_modules(const char *kallsyms_filename, 974 struct map *map) 975 { 976 struct map_groups *kmaps = map__kmap(map)->kmaps; 977 char modules_filename[PATH_MAX]; 978 979 if (!filename_from_kallsyms_filename(modules_filename, "modules", 980 kallsyms_filename)) 981 return -EINVAL; 982 983 if (do_validate_kcore_modules(modules_filename, map, kmaps)) 984 return -EINVAL; 985 986 return 0; 987 } 988 989 static int validate_kcore_addresses(const char *kallsyms_filename, 990 struct map *map) 991 { 992 struct kmap *kmap = map__kmap(map); 993 994 if (kmap->ref_reloc_sym && kmap->ref_reloc_sym->name) { 995 u64 start; 996 997 start = kallsyms__get_function_start(kallsyms_filename, 998 kmap->ref_reloc_sym->name); 999 if (start != kmap->ref_reloc_sym->addr) 1000 return -EINVAL; 1001 } 1002 1003 return validate_kcore_modules(kallsyms_filename, map); 1004 } 1005 1006 struct kcore_mapfn_data { 1007 struct dso *dso; 1008 enum map_type type; 1009 struct list_head maps; 1010 }; 1011 1012 static int kcore_mapfn(u64 start, u64 len, u64 pgoff, void *data) 1013 { 1014 struct kcore_mapfn_data *md = data; 1015 struct map *map; 1016 1017 map = map__new2(start, md->dso, md->type); 1018 if (map == NULL) 1019 return -ENOMEM; 1020 1021 map->end = map->start + len; 1022 map->pgoff = pgoff; 1023 1024 list_add(&map->node, &md->maps); 1025 1026 return 0; 1027 } 1028 1029 static int dso__load_kcore(struct dso *dso, struct map *map, 1030 const char *kallsyms_filename) 1031 { 1032 struct map_groups *kmaps = map__kmap(map)->kmaps; 1033 struct machine *machine = kmaps->machine; 1034 struct kcore_mapfn_data md; 1035 struct map *old_map, *new_map, *replacement_map = NULL; 1036 bool is_64_bit; 1037 int err, fd; 1038 char kcore_filename[PATH_MAX]; 1039 struct symbol *sym; 1040 1041 /* This function requires that the map is the kernel map */ 1042 if (map != machine->vmlinux_maps[map->type]) 1043 return -EINVAL; 1044 1045 if (!filename_from_kallsyms_filename(kcore_filename, "kcore", 1046 kallsyms_filename)) 1047 return -EINVAL; 1048 1049 /* Modules and kernel must be present at their original addresses */ 1050 if (validate_kcore_addresses(kallsyms_filename, map)) 1051 return -EINVAL; 1052 1053 md.dso = dso; 1054 md.type = map->type; 1055 INIT_LIST_HEAD(&md.maps); 1056 1057 fd = open(kcore_filename, O_RDONLY); 1058 if (fd < 0) 1059 return -EINVAL; 1060 1061 /* Read new maps into temporary lists */ 1062 err = file__read_maps(fd, md.type == MAP__FUNCTION, kcore_mapfn, &md, 1063 &is_64_bit); 1064 if (err) 1065 goto out_err; 1066 1067 if (list_empty(&md.maps)) { 1068 err = -EINVAL; 1069 goto out_err; 1070 } 1071 1072 /* Remove old maps */ 1073 old_map = map_groups__first(kmaps, map->type); 1074 while (old_map) { 1075 struct map *next = map_groups__next(old_map); 1076 1077 if (old_map != map) 1078 map_groups__remove(kmaps, old_map); 1079 old_map = next; 1080 } 1081 1082 /* Find the kernel map using the first symbol */ 1083 sym = dso__first_symbol(dso, map->type); 1084 list_for_each_entry(new_map, &md.maps, node) { 1085 if (sym && sym->start >= new_map->start && 1086 sym->start < new_map->end) { 1087 replacement_map = new_map; 1088 break; 1089 } 1090 } 1091 1092 if (!replacement_map) 1093 replacement_map = list_entry(md.maps.next, struct map, node); 1094 1095 /* Add new maps */ 1096 while (!list_empty(&md.maps)) { 1097 new_map = list_entry(md.maps.next, struct map, node); 1098 list_del(&new_map->node); 1099 if (new_map == replacement_map) { 1100 map->start = new_map->start; 1101 map->end = new_map->end; 1102 map->pgoff = new_map->pgoff; 1103 map->map_ip = new_map->map_ip; 1104 map->unmap_ip = new_map->unmap_ip; 1105 map__delete(new_map); 1106 /* Ensure maps are correctly ordered */ 1107 map_groups__remove(kmaps, map); 1108 map_groups__insert(kmaps, map); 1109 } else { 1110 map_groups__insert(kmaps, new_map); 1111 } 1112 } 1113 1114 /* 1115 * Set the data type and long name so that kcore can be read via 1116 * dso__data_read_addr(). 1117 */ 1118 if (dso->kernel == DSO_TYPE_GUEST_KERNEL) 1119 dso->binary_type = DSO_BINARY_TYPE__GUEST_KCORE; 1120 else 1121 dso->binary_type = DSO_BINARY_TYPE__KCORE; 1122 dso__set_long_name(dso, strdup(kcore_filename), true); 1123 1124 close(fd); 1125 1126 if (map->type == MAP__FUNCTION) 1127 pr_debug("Using %s for kernel object code\n", kcore_filename); 1128 else 1129 pr_debug("Using %s for kernel data\n", kcore_filename); 1130 1131 return 0; 1132 1133 out_err: 1134 while (!list_empty(&md.maps)) { 1135 map = list_entry(md.maps.next, struct map, node); 1136 list_del(&map->node); 1137 map__delete(map); 1138 } 1139 close(fd); 1140 return -EINVAL; 1141 } 1142 1143 /* 1144 * If the kernel is relocated at boot time, kallsyms won't match. Compute the 1145 * delta based on the relocation reference symbol. 1146 */ 1147 static int kallsyms__delta(struct map *map, const char *filename, u64 *delta) 1148 { 1149 struct kmap *kmap = map__kmap(map); 1150 u64 addr; 1151 1152 if (!kmap->ref_reloc_sym || !kmap->ref_reloc_sym->name) 1153 return 0; 1154 1155 addr = kallsyms__get_function_start(filename, 1156 kmap->ref_reloc_sym->name); 1157 if (!addr) 1158 return -1; 1159 1160 *delta = addr - kmap->ref_reloc_sym->addr; 1161 return 0; 1162 } 1163 1164 int dso__load_kallsyms(struct dso *dso, const char *filename, 1165 struct map *map, symbol_filter_t filter) 1166 { 1167 u64 delta = 0; 1168 1169 if (symbol__restricted_filename(filename, "/proc/kallsyms")) 1170 return -1; 1171 1172 if (dso__load_all_kallsyms(dso, filename, map) < 0) 1173 return -1; 1174 1175 if (kallsyms__delta(map, filename, &delta)) 1176 return -1; 1177 1178 symbols__fixup_duplicate(&dso->symbols[map->type]); 1179 symbols__fixup_end(&dso->symbols[map->type]); 1180 1181 if (dso->kernel == DSO_TYPE_GUEST_KERNEL) 1182 dso->symtab_type = DSO_BINARY_TYPE__GUEST_KALLSYMS; 1183 else 1184 dso->symtab_type = DSO_BINARY_TYPE__KALLSYMS; 1185 1186 if (!dso__load_kcore(dso, map, filename)) 1187 return dso__split_kallsyms_for_kcore(dso, map, filter); 1188 else 1189 return dso__split_kallsyms(dso, map, delta, filter); 1190 } 1191 1192 static int dso__load_perf_map(struct dso *dso, struct map *map, 1193 symbol_filter_t filter) 1194 { 1195 char *line = NULL; 1196 size_t n; 1197 FILE *file; 1198 int nr_syms = 0; 1199 1200 file = fopen(dso->long_name, "r"); 1201 if (file == NULL) 1202 goto out_failure; 1203 1204 while (!feof(file)) { 1205 u64 start, size; 1206 struct symbol *sym; 1207 int line_len, len; 1208 1209 line_len = getline(&line, &n, file); 1210 if (line_len < 0) 1211 break; 1212 1213 if (!line) 1214 goto out_failure; 1215 1216 line[--line_len] = '\0'; /* \n */ 1217 1218 len = hex2u64(line, &start); 1219 1220 len++; 1221 if (len + 2 >= line_len) 1222 continue; 1223 1224 len += hex2u64(line + len, &size); 1225 1226 len++; 1227 if (len + 2 >= line_len) 1228 continue; 1229 1230 sym = symbol__new(start, size, STB_GLOBAL, line + len); 1231 1232 if (sym == NULL) 1233 goto out_delete_line; 1234 1235 if (filter && filter(map, sym)) 1236 symbol__delete(sym); 1237 else { 1238 symbols__insert(&dso->symbols[map->type], sym); 1239 nr_syms++; 1240 } 1241 } 1242 1243 free(line); 1244 fclose(file); 1245 1246 return nr_syms; 1247 1248 out_delete_line: 1249 free(line); 1250 out_failure: 1251 return -1; 1252 } 1253 1254 static bool dso__is_compatible_symtab_type(struct dso *dso, bool kmod, 1255 enum dso_binary_type type) 1256 { 1257 switch (type) { 1258 case DSO_BINARY_TYPE__JAVA_JIT: 1259 case DSO_BINARY_TYPE__DEBUGLINK: 1260 case DSO_BINARY_TYPE__SYSTEM_PATH_DSO: 1261 case DSO_BINARY_TYPE__FEDORA_DEBUGINFO: 1262 case DSO_BINARY_TYPE__UBUNTU_DEBUGINFO: 1263 case DSO_BINARY_TYPE__BUILDID_DEBUGINFO: 1264 case DSO_BINARY_TYPE__OPENEMBEDDED_DEBUGINFO: 1265 return !kmod && dso->kernel == DSO_TYPE_USER; 1266 1267 case DSO_BINARY_TYPE__KALLSYMS: 1268 case DSO_BINARY_TYPE__VMLINUX: 1269 case DSO_BINARY_TYPE__KCORE: 1270 return dso->kernel == DSO_TYPE_KERNEL; 1271 1272 case DSO_BINARY_TYPE__GUEST_KALLSYMS: 1273 case DSO_BINARY_TYPE__GUEST_VMLINUX: 1274 case DSO_BINARY_TYPE__GUEST_KCORE: 1275 return dso->kernel == DSO_TYPE_GUEST_KERNEL; 1276 1277 case DSO_BINARY_TYPE__GUEST_KMODULE: 1278 case DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE: 1279 /* 1280 * kernel modules know their symtab type - it's set when 1281 * creating a module dso in machine__new_module(). 1282 */ 1283 return kmod && dso->symtab_type == type; 1284 1285 case DSO_BINARY_TYPE__BUILD_ID_CACHE: 1286 return true; 1287 1288 case DSO_BINARY_TYPE__NOT_FOUND: 1289 default: 1290 return false; 1291 } 1292 } 1293 1294 int dso__load(struct dso *dso, struct map *map, symbol_filter_t filter) 1295 { 1296 char *name; 1297 int ret = -1; 1298 u_int i; 1299 struct machine *machine; 1300 char *root_dir = (char *) ""; 1301 int ss_pos = 0; 1302 struct symsrc ss_[2]; 1303 struct symsrc *syms_ss = NULL, *runtime_ss = NULL; 1304 bool kmod; 1305 1306 dso__set_loaded(dso, map->type); 1307 1308 if (dso->kernel == DSO_TYPE_KERNEL) 1309 return dso__load_kernel_sym(dso, map, filter); 1310 else if (dso->kernel == DSO_TYPE_GUEST_KERNEL) 1311 return dso__load_guest_kernel_sym(dso, map, filter); 1312 1313 if (map->groups && map->groups->machine) 1314 machine = map->groups->machine; 1315 else 1316 machine = NULL; 1317 1318 dso->adjust_symbols = 0; 1319 1320 if (strncmp(dso->name, "/tmp/perf-", 10) == 0) { 1321 struct stat st; 1322 1323 if (lstat(dso->name, &st) < 0) 1324 return -1; 1325 1326 if (st.st_uid && (st.st_uid != geteuid())) { 1327 pr_warning("File %s not owned by current user or root, " 1328 "ignoring it.\n", dso->name); 1329 return -1; 1330 } 1331 1332 ret = dso__load_perf_map(dso, map, filter); 1333 dso->symtab_type = ret > 0 ? DSO_BINARY_TYPE__JAVA_JIT : 1334 DSO_BINARY_TYPE__NOT_FOUND; 1335 return ret; 1336 } 1337 1338 if (machine) 1339 root_dir = machine->root_dir; 1340 1341 name = malloc(PATH_MAX); 1342 if (!name) 1343 return -1; 1344 1345 kmod = dso->symtab_type == DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE || 1346 dso->symtab_type == DSO_BINARY_TYPE__GUEST_KMODULE; 1347 1348 /* 1349 * Iterate over candidate debug images. 1350 * Keep track of "interesting" ones (those which have a symtab, dynsym, 1351 * and/or opd section) for processing. 1352 */ 1353 for (i = 0; i < DSO_BINARY_TYPE__SYMTAB_CNT; i++) { 1354 struct symsrc *ss = &ss_[ss_pos]; 1355 bool next_slot = false; 1356 1357 enum dso_binary_type symtab_type = binary_type_symtab[i]; 1358 1359 if (!dso__is_compatible_symtab_type(dso, kmod, symtab_type)) 1360 continue; 1361 1362 if (dso__read_binary_type_filename(dso, symtab_type, 1363 root_dir, name, PATH_MAX)) 1364 continue; 1365 1366 /* Name is now the name of the next image to try */ 1367 if (symsrc__init(ss, dso, name, symtab_type) < 0) 1368 continue; 1369 1370 if (!syms_ss && symsrc__has_symtab(ss)) { 1371 syms_ss = ss; 1372 next_slot = true; 1373 if (!dso->symsrc_filename) 1374 dso->symsrc_filename = strdup(name); 1375 } 1376 1377 if (!runtime_ss && symsrc__possibly_runtime(ss)) { 1378 runtime_ss = ss; 1379 next_slot = true; 1380 } 1381 1382 if (next_slot) { 1383 ss_pos++; 1384 1385 if (syms_ss && runtime_ss) 1386 break; 1387 } else { 1388 symsrc__destroy(ss); 1389 } 1390 1391 } 1392 1393 if (!runtime_ss && !syms_ss) 1394 goto out_free; 1395 1396 if (runtime_ss && !syms_ss) { 1397 syms_ss = runtime_ss; 1398 } 1399 1400 /* We'll have to hope for the best */ 1401 if (!runtime_ss && syms_ss) 1402 runtime_ss = syms_ss; 1403 1404 if (syms_ss) 1405 ret = dso__load_sym(dso, map, syms_ss, runtime_ss, filter, kmod); 1406 else 1407 ret = -1; 1408 1409 if (ret > 0) { 1410 int nr_plt; 1411 1412 nr_plt = dso__synthesize_plt_symbols(dso, runtime_ss, map, filter); 1413 if (nr_plt > 0) 1414 ret += nr_plt; 1415 } 1416 1417 for (; ss_pos > 0; ss_pos--) 1418 symsrc__destroy(&ss_[ss_pos - 1]); 1419 out_free: 1420 free(name); 1421 if (ret < 0 && strstr(dso->name, " (deleted)") != NULL) 1422 return 0; 1423 return ret; 1424 } 1425 1426 struct map *map_groups__find_by_name(struct map_groups *mg, 1427 enum map_type type, const char *name) 1428 { 1429 struct rb_node *nd; 1430 1431 for (nd = rb_first(&mg->maps[type]); nd; nd = rb_next(nd)) { 1432 struct map *map = rb_entry(nd, struct map, rb_node); 1433 1434 if (map->dso && strcmp(map->dso->short_name, name) == 0) 1435 return map; 1436 } 1437 1438 return NULL; 1439 } 1440 1441 int dso__load_vmlinux(struct dso *dso, struct map *map, 1442 const char *vmlinux, bool vmlinux_allocated, 1443 symbol_filter_t filter) 1444 { 1445 int err = -1; 1446 struct symsrc ss; 1447 char symfs_vmlinux[PATH_MAX]; 1448 enum dso_binary_type symtab_type; 1449 1450 if (vmlinux[0] == '/') 1451 snprintf(symfs_vmlinux, sizeof(symfs_vmlinux), "%s", vmlinux); 1452 else 1453 snprintf(symfs_vmlinux, sizeof(symfs_vmlinux), "%s%s", 1454 symbol_conf.symfs, vmlinux); 1455 1456 if (dso->kernel == DSO_TYPE_GUEST_KERNEL) 1457 symtab_type = DSO_BINARY_TYPE__GUEST_VMLINUX; 1458 else 1459 symtab_type = DSO_BINARY_TYPE__VMLINUX; 1460 1461 if (symsrc__init(&ss, dso, symfs_vmlinux, symtab_type)) 1462 return -1; 1463 1464 err = dso__load_sym(dso, map, &ss, &ss, filter, 0); 1465 symsrc__destroy(&ss); 1466 1467 if (err > 0) { 1468 if (dso->kernel == DSO_TYPE_GUEST_KERNEL) 1469 dso->binary_type = DSO_BINARY_TYPE__GUEST_VMLINUX; 1470 else 1471 dso->binary_type = DSO_BINARY_TYPE__VMLINUX; 1472 dso__set_long_name(dso, vmlinux, vmlinux_allocated); 1473 dso__set_loaded(dso, map->type); 1474 pr_debug("Using %s for symbols\n", symfs_vmlinux); 1475 } 1476 1477 return err; 1478 } 1479 1480 int dso__load_vmlinux_path(struct dso *dso, struct map *map, 1481 symbol_filter_t filter) 1482 { 1483 int i, err = 0; 1484 char *filename; 1485 1486 pr_debug("Looking at the vmlinux_path (%d entries long)\n", 1487 vmlinux_path__nr_entries + 1); 1488 1489 filename = dso__build_id_filename(dso, NULL, 0); 1490 if (filename != NULL) { 1491 err = dso__load_vmlinux(dso, map, filename, true, filter); 1492 if (err > 0) 1493 goto out; 1494 free(filename); 1495 } 1496 1497 for (i = 0; i < vmlinux_path__nr_entries; ++i) { 1498 err = dso__load_vmlinux(dso, map, vmlinux_path[i], false, filter); 1499 if (err > 0) 1500 break; 1501 } 1502 out: 1503 return err; 1504 } 1505 1506 static int find_matching_kcore(struct map *map, char *dir, size_t dir_sz) 1507 { 1508 char kallsyms_filename[PATH_MAX]; 1509 struct dirent *dent; 1510 int ret = -1; 1511 DIR *d; 1512 1513 d = opendir(dir); 1514 if (!d) 1515 return -1; 1516 1517 while (1) { 1518 dent = readdir(d); 1519 if (!dent) 1520 break; 1521 if (dent->d_type != DT_DIR) 1522 continue; 1523 scnprintf(kallsyms_filename, sizeof(kallsyms_filename), 1524 "%s/%s/kallsyms", dir, dent->d_name); 1525 if (!validate_kcore_addresses(kallsyms_filename, map)) { 1526 strlcpy(dir, kallsyms_filename, dir_sz); 1527 ret = 0; 1528 break; 1529 } 1530 } 1531 1532 closedir(d); 1533 1534 return ret; 1535 } 1536 1537 static char *dso__find_kallsyms(struct dso *dso, struct map *map) 1538 { 1539 u8 host_build_id[BUILD_ID_SIZE]; 1540 char sbuild_id[BUILD_ID_SIZE * 2 + 1]; 1541 bool is_host = false; 1542 char path[PATH_MAX]; 1543 1544 if (!dso->has_build_id) { 1545 /* 1546 * Last resort, if we don't have a build-id and couldn't find 1547 * any vmlinux file, try the running kernel kallsyms table. 1548 */ 1549 goto proc_kallsyms; 1550 } 1551 1552 if (sysfs__read_build_id("/sys/kernel/notes", host_build_id, 1553 sizeof(host_build_id)) == 0) 1554 is_host = dso__build_id_equal(dso, host_build_id); 1555 1556 build_id__sprintf(dso->build_id, sizeof(dso->build_id), sbuild_id); 1557 1558 scnprintf(path, sizeof(path), "%s/[kernel.kcore]/%s", buildid_dir, 1559 sbuild_id); 1560 1561 /* Use /proc/kallsyms if possible */ 1562 if (is_host) { 1563 DIR *d; 1564 int fd; 1565 1566 /* If no cached kcore go with /proc/kallsyms */ 1567 d = opendir(path); 1568 if (!d) 1569 goto proc_kallsyms; 1570 closedir(d); 1571 1572 /* 1573 * Do not check the build-id cache, until we know we cannot use 1574 * /proc/kcore. 1575 */ 1576 fd = open("/proc/kcore", O_RDONLY); 1577 if (fd != -1) { 1578 close(fd); 1579 /* If module maps match go with /proc/kallsyms */ 1580 if (!validate_kcore_addresses("/proc/kallsyms", map)) 1581 goto proc_kallsyms; 1582 } 1583 1584 /* Find kallsyms in build-id cache with kcore */ 1585 if (!find_matching_kcore(map, path, sizeof(path))) 1586 return strdup(path); 1587 1588 goto proc_kallsyms; 1589 } 1590 1591 /* Find kallsyms in build-id cache with kcore */ 1592 if (!find_matching_kcore(map, path, sizeof(path))) 1593 return strdup(path); 1594 1595 scnprintf(path, sizeof(path), "%s/[kernel.kallsyms]/%s", 1596 buildid_dir, sbuild_id); 1597 1598 if (access(path, F_OK)) { 1599 pr_err("No kallsyms or vmlinux with build-id %s was found\n", 1600 sbuild_id); 1601 return NULL; 1602 } 1603 1604 return strdup(path); 1605 1606 proc_kallsyms: 1607 return strdup("/proc/kallsyms"); 1608 } 1609 1610 static int dso__load_kernel_sym(struct dso *dso, struct map *map, 1611 symbol_filter_t filter) 1612 { 1613 int err; 1614 const char *kallsyms_filename = NULL; 1615 char *kallsyms_allocated_filename = NULL; 1616 /* 1617 * Step 1: if the user specified a kallsyms or vmlinux filename, use 1618 * it and only it, reporting errors to the user if it cannot be used. 1619 * 1620 * For instance, try to analyse an ARM perf.data file _without_ a 1621 * build-id, or if the user specifies the wrong path to the right 1622 * vmlinux file, obviously we can't fallback to another vmlinux (a 1623 * x86_86 one, on the machine where analysis is being performed, say), 1624 * or worse, /proc/kallsyms. 1625 * 1626 * If the specified file _has_ a build-id and there is a build-id 1627 * section in the perf.data file, we will still do the expected 1628 * validation in dso__load_vmlinux and will bail out if they don't 1629 * match. 1630 */ 1631 if (symbol_conf.kallsyms_name != NULL) { 1632 kallsyms_filename = symbol_conf.kallsyms_name; 1633 goto do_kallsyms; 1634 } 1635 1636 if (!symbol_conf.ignore_vmlinux && symbol_conf.vmlinux_name != NULL) { 1637 return dso__load_vmlinux(dso, map, symbol_conf.vmlinux_name, 1638 false, filter); 1639 } 1640 1641 if (!symbol_conf.ignore_vmlinux && vmlinux_path != NULL) { 1642 err = dso__load_vmlinux_path(dso, map, filter); 1643 if (err > 0) 1644 return err; 1645 } 1646 1647 /* do not try local files if a symfs was given */ 1648 if (symbol_conf.symfs[0] != 0) 1649 return -1; 1650 1651 kallsyms_allocated_filename = dso__find_kallsyms(dso, map); 1652 if (!kallsyms_allocated_filename) 1653 return -1; 1654 1655 kallsyms_filename = kallsyms_allocated_filename; 1656 1657 do_kallsyms: 1658 err = dso__load_kallsyms(dso, kallsyms_filename, map, filter); 1659 if (err > 0) 1660 pr_debug("Using %s for symbols\n", kallsyms_filename); 1661 free(kallsyms_allocated_filename); 1662 1663 if (err > 0 && !dso__is_kcore(dso)) { 1664 dso__set_long_name(dso, "[kernel.kallsyms]", false); 1665 map__fixup_start(map); 1666 map__fixup_end(map); 1667 } 1668 1669 return err; 1670 } 1671 1672 static int dso__load_guest_kernel_sym(struct dso *dso, struct map *map, 1673 symbol_filter_t filter) 1674 { 1675 int err; 1676 const char *kallsyms_filename = NULL; 1677 struct machine *machine; 1678 char path[PATH_MAX]; 1679 1680 if (!map->groups) { 1681 pr_debug("Guest kernel map hasn't the point to groups\n"); 1682 return -1; 1683 } 1684 machine = map->groups->machine; 1685 1686 if (machine__is_default_guest(machine)) { 1687 /* 1688 * if the user specified a vmlinux filename, use it and only 1689 * it, reporting errors to the user if it cannot be used. 1690 * Or use file guest_kallsyms inputted by user on commandline 1691 */ 1692 if (symbol_conf.default_guest_vmlinux_name != NULL) { 1693 err = dso__load_vmlinux(dso, map, 1694 symbol_conf.default_guest_vmlinux_name, 1695 false, filter); 1696 return err; 1697 } 1698 1699 kallsyms_filename = symbol_conf.default_guest_kallsyms; 1700 if (!kallsyms_filename) 1701 return -1; 1702 } else { 1703 sprintf(path, "%s/proc/kallsyms", machine->root_dir); 1704 kallsyms_filename = path; 1705 } 1706 1707 err = dso__load_kallsyms(dso, kallsyms_filename, map, filter); 1708 if (err > 0) 1709 pr_debug("Using %s for symbols\n", kallsyms_filename); 1710 if (err > 0 && !dso__is_kcore(dso)) { 1711 machine__mmap_name(machine, path, sizeof(path)); 1712 dso__set_long_name(dso, strdup(path), true); 1713 map__fixup_start(map); 1714 map__fixup_end(map); 1715 } 1716 1717 return err; 1718 } 1719 1720 static void vmlinux_path__exit(void) 1721 { 1722 while (--vmlinux_path__nr_entries >= 0) 1723 zfree(&vmlinux_path[vmlinux_path__nr_entries]); 1724 1725 zfree(&vmlinux_path); 1726 } 1727 1728 static int vmlinux_path__init(void) 1729 { 1730 struct utsname uts; 1731 char bf[PATH_MAX]; 1732 1733 vmlinux_path = malloc(sizeof(char *) * 5); 1734 if (vmlinux_path == NULL) 1735 return -1; 1736 1737 vmlinux_path[vmlinux_path__nr_entries] = strdup("vmlinux"); 1738 if (vmlinux_path[vmlinux_path__nr_entries] == NULL) 1739 goto out_fail; 1740 ++vmlinux_path__nr_entries; 1741 vmlinux_path[vmlinux_path__nr_entries] = strdup("/boot/vmlinux"); 1742 if (vmlinux_path[vmlinux_path__nr_entries] == NULL) 1743 goto out_fail; 1744 ++vmlinux_path__nr_entries; 1745 1746 /* only try running kernel version if no symfs was given */ 1747 if (symbol_conf.symfs[0] != 0) 1748 return 0; 1749 1750 if (uname(&uts) < 0) 1751 return -1; 1752 1753 snprintf(bf, sizeof(bf), "/boot/vmlinux-%s", uts.release); 1754 vmlinux_path[vmlinux_path__nr_entries] = strdup(bf); 1755 if (vmlinux_path[vmlinux_path__nr_entries] == NULL) 1756 goto out_fail; 1757 ++vmlinux_path__nr_entries; 1758 snprintf(bf, sizeof(bf), "/lib/modules/%s/build/vmlinux", uts.release); 1759 vmlinux_path[vmlinux_path__nr_entries] = strdup(bf); 1760 if (vmlinux_path[vmlinux_path__nr_entries] == NULL) 1761 goto out_fail; 1762 ++vmlinux_path__nr_entries; 1763 snprintf(bf, sizeof(bf), "/usr/lib/debug/lib/modules/%s/vmlinux", 1764 uts.release); 1765 vmlinux_path[vmlinux_path__nr_entries] = strdup(bf); 1766 if (vmlinux_path[vmlinux_path__nr_entries] == NULL) 1767 goto out_fail; 1768 ++vmlinux_path__nr_entries; 1769 1770 return 0; 1771 1772 out_fail: 1773 vmlinux_path__exit(); 1774 return -1; 1775 } 1776 1777 int setup_list(struct strlist **list, const char *list_str, 1778 const char *list_name) 1779 { 1780 if (list_str == NULL) 1781 return 0; 1782 1783 *list = strlist__new(true, list_str); 1784 if (!*list) { 1785 pr_err("problems parsing %s list\n", list_name); 1786 return -1; 1787 } 1788 return 0; 1789 } 1790 1791 static bool symbol__read_kptr_restrict(void) 1792 { 1793 bool value = false; 1794 1795 if (geteuid() != 0) { 1796 FILE *fp = fopen("/proc/sys/kernel/kptr_restrict", "r"); 1797 if (fp != NULL) { 1798 char line[8]; 1799 1800 if (fgets(line, sizeof(line), fp) != NULL) 1801 value = atoi(line) != 0; 1802 1803 fclose(fp); 1804 } 1805 } 1806 1807 return value; 1808 } 1809 1810 int symbol__init(void) 1811 { 1812 const char *symfs; 1813 1814 if (symbol_conf.initialized) 1815 return 0; 1816 1817 symbol_conf.priv_size = PERF_ALIGN(symbol_conf.priv_size, sizeof(u64)); 1818 1819 symbol__elf_init(); 1820 1821 if (symbol_conf.sort_by_name) 1822 symbol_conf.priv_size += (sizeof(struct symbol_name_rb_node) - 1823 sizeof(struct symbol)); 1824 1825 if (symbol_conf.try_vmlinux_path && vmlinux_path__init() < 0) 1826 return -1; 1827 1828 if (symbol_conf.field_sep && *symbol_conf.field_sep == '.') { 1829 pr_err("'.' is the only non valid --field-separator argument\n"); 1830 return -1; 1831 } 1832 1833 if (setup_list(&symbol_conf.dso_list, 1834 symbol_conf.dso_list_str, "dso") < 0) 1835 return -1; 1836 1837 if (setup_list(&symbol_conf.comm_list, 1838 symbol_conf.comm_list_str, "comm") < 0) 1839 goto out_free_dso_list; 1840 1841 if (setup_list(&symbol_conf.sym_list, 1842 symbol_conf.sym_list_str, "symbol") < 0) 1843 goto out_free_comm_list; 1844 1845 /* 1846 * A path to symbols of "/" is identical to "" 1847 * reset here for simplicity. 1848 */ 1849 symfs = realpath(symbol_conf.symfs, NULL); 1850 if (symfs == NULL) 1851 symfs = symbol_conf.symfs; 1852 if (strcmp(symfs, "/") == 0) 1853 symbol_conf.symfs = ""; 1854 if (symfs != symbol_conf.symfs) 1855 free((void *)symfs); 1856 1857 symbol_conf.kptr_restrict = symbol__read_kptr_restrict(); 1858 1859 symbol_conf.initialized = true; 1860 return 0; 1861 1862 out_free_comm_list: 1863 strlist__delete(symbol_conf.comm_list); 1864 out_free_dso_list: 1865 strlist__delete(symbol_conf.dso_list); 1866 return -1; 1867 } 1868 1869 void symbol__exit(void) 1870 { 1871 if (!symbol_conf.initialized) 1872 return; 1873 strlist__delete(symbol_conf.sym_list); 1874 strlist__delete(symbol_conf.dso_list); 1875 strlist__delete(symbol_conf.comm_list); 1876 vmlinux_path__exit(); 1877 symbol_conf.sym_list = symbol_conf.dso_list = symbol_conf.comm_list = NULL; 1878 symbol_conf.initialized = false; 1879 } 1880