1 // SPDX-License-Identifier: GPL-2.0 2 #include <dirent.h> 3 #include <errno.h> 4 #include <stdlib.h> 5 #include <stdio.h> 6 #include <string.h> 7 #include <linux/kernel.h> 8 #include <linux/mman.h> 9 #include <linux/time64.h> 10 #include <sys/types.h> 11 #include <sys/stat.h> 12 #include <sys/param.h> 13 #include <fcntl.h> 14 #include <unistd.h> 15 #include <inttypes.h> 16 #include "annotate.h" 17 #include "build-id.h" 18 #include "util.h" 19 #include "debug.h" 20 #include "machine.h" 21 #include "map.h" 22 #include "symbol.h" 23 #include "strlist.h" 24 #include "intlist.h" 25 #include "namespaces.h" 26 #include "header.h" 27 #include "path.h" 28 #include <linux/ctype.h> 29 #include <linux/zalloc.h> 30 31 #include <elf.h> 32 #include <limits.h> 33 #include <symbol/kallsyms.h> 34 #include <sys/utsname.h> 35 36 static int dso__load_kernel_sym(struct dso *dso, struct map *map); 37 static int dso__load_guest_kernel_sym(struct dso *dso, struct map *map); 38 static bool symbol__is_idle(const char *name); 39 40 int vmlinux_path__nr_entries; 41 char **vmlinux_path; 42 43 struct symbol_conf symbol_conf = { 44 .nanosecs = false, 45 .use_modules = true, 46 .try_vmlinux_path = true, 47 .demangle = true, 48 .demangle_kernel = false, 49 .cumulate_callchain = true, 50 .time_quantum = 100 * NSEC_PER_MSEC, /* 100ms */ 51 .show_hist_headers = true, 52 .symfs = "", 53 .event_group = true, 54 .inline_name = true, 55 .res_sample = 0, 56 }; 57 58 static enum dso_binary_type binary_type_symtab[] = { 59 DSO_BINARY_TYPE__KALLSYMS, 60 DSO_BINARY_TYPE__GUEST_KALLSYMS, 61 DSO_BINARY_TYPE__JAVA_JIT, 62 DSO_BINARY_TYPE__DEBUGLINK, 63 DSO_BINARY_TYPE__BUILD_ID_CACHE, 64 DSO_BINARY_TYPE__BUILD_ID_CACHE_DEBUGINFO, 65 DSO_BINARY_TYPE__FEDORA_DEBUGINFO, 66 DSO_BINARY_TYPE__UBUNTU_DEBUGINFO, 67 DSO_BINARY_TYPE__BUILDID_DEBUGINFO, 68 DSO_BINARY_TYPE__SYSTEM_PATH_DSO, 69 DSO_BINARY_TYPE__GUEST_KMODULE, 70 DSO_BINARY_TYPE__GUEST_KMODULE_COMP, 71 DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE, 72 DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE_COMP, 73 DSO_BINARY_TYPE__OPENEMBEDDED_DEBUGINFO, 74 DSO_BINARY_TYPE__NOT_FOUND, 75 }; 76 77 #define DSO_BINARY_TYPE__SYMTAB_CNT ARRAY_SIZE(binary_type_symtab) 78 79 static bool symbol_type__filter(char symbol_type) 80 { 81 symbol_type = toupper(symbol_type); 82 return symbol_type == 'T' || symbol_type == 'W' || symbol_type == 'D' || symbol_type == 'B'; 83 } 84 85 static int prefix_underscores_count(const char *str) 86 { 87 const char *tail = str; 88 89 while (*tail == '_') 90 tail++; 91 92 return tail - str; 93 } 94 95 void __weak arch__symbols__fixup_end(struct symbol *p, struct symbol *c) 96 { 97 p->end = c->start; 98 } 99 100 const char * __weak arch__normalize_symbol_name(const char *name) 101 { 102 return name; 103 } 104 105 int __weak arch__compare_symbol_names(const char *namea, const char *nameb) 106 { 107 return strcmp(namea, nameb); 108 } 109 110 int __weak arch__compare_symbol_names_n(const char *namea, const char *nameb, 111 unsigned int n) 112 { 113 return strncmp(namea, nameb, n); 114 } 115 116 int __weak arch__choose_best_symbol(struct symbol *syma, 117 struct symbol *symb __maybe_unused) 118 { 119 /* Avoid "SyS" kernel syscall aliases */ 120 if (strlen(syma->name) >= 3 && !strncmp(syma->name, "SyS", 3)) 121 return SYMBOL_B; 122 if (strlen(syma->name) >= 10 && !strncmp(syma->name, "compat_SyS", 10)) 123 return SYMBOL_B; 124 125 return SYMBOL_A; 126 } 127 128 static int choose_best_symbol(struct symbol *syma, struct symbol *symb) 129 { 130 s64 a; 131 s64 b; 132 size_t na, nb; 133 134 /* Prefer a symbol with non zero length */ 135 a = syma->end - syma->start; 136 b = symb->end - symb->start; 137 if ((b == 0) && (a > 0)) 138 return SYMBOL_A; 139 else if ((a == 0) && (b > 0)) 140 return SYMBOL_B; 141 142 /* Prefer a non weak symbol over a weak one */ 143 a = syma->binding == STB_WEAK; 144 b = symb->binding == STB_WEAK; 145 if (b && !a) 146 return SYMBOL_A; 147 if (a && !b) 148 return SYMBOL_B; 149 150 /* Prefer a global symbol over a non global one */ 151 a = syma->binding == STB_GLOBAL; 152 b = symb->binding == STB_GLOBAL; 153 if (a && !b) 154 return SYMBOL_A; 155 if (b && !a) 156 return SYMBOL_B; 157 158 /* Prefer a symbol with less underscores */ 159 a = prefix_underscores_count(syma->name); 160 b = prefix_underscores_count(symb->name); 161 if (b > a) 162 return SYMBOL_A; 163 else if (a > b) 164 return SYMBOL_B; 165 166 /* Choose the symbol with the longest name */ 167 na = strlen(syma->name); 168 nb = strlen(symb->name); 169 if (na > nb) 170 return SYMBOL_A; 171 else if (na < nb) 172 return SYMBOL_B; 173 174 return arch__choose_best_symbol(syma, symb); 175 } 176 177 void symbols__fixup_duplicate(struct rb_root_cached *symbols) 178 { 179 struct rb_node *nd; 180 struct symbol *curr, *next; 181 182 if (symbol_conf.allow_aliases) 183 return; 184 185 nd = rb_first_cached(symbols); 186 187 while (nd) { 188 curr = rb_entry(nd, struct symbol, rb_node); 189 again: 190 nd = rb_next(&curr->rb_node); 191 next = rb_entry(nd, struct symbol, rb_node); 192 193 if (!nd) 194 break; 195 196 if (curr->start != next->start) 197 continue; 198 199 if (choose_best_symbol(curr, next) == SYMBOL_A) { 200 rb_erase_cached(&next->rb_node, symbols); 201 symbol__delete(next); 202 goto again; 203 } else { 204 nd = rb_next(&curr->rb_node); 205 rb_erase_cached(&curr->rb_node, symbols); 206 symbol__delete(curr); 207 } 208 } 209 } 210 211 void symbols__fixup_end(struct rb_root_cached *symbols) 212 { 213 struct rb_node *nd, *prevnd = rb_first_cached(symbols); 214 struct symbol *curr, *prev; 215 216 if (prevnd == NULL) 217 return; 218 219 curr = rb_entry(prevnd, struct symbol, rb_node); 220 221 for (nd = rb_next(prevnd); nd; nd = rb_next(nd)) { 222 prev = curr; 223 curr = rb_entry(nd, struct symbol, rb_node); 224 225 if (prev->end == prev->start && prev->end != curr->start) 226 arch__symbols__fixup_end(prev, curr); 227 } 228 229 /* Last entry */ 230 if (curr->end == curr->start) 231 curr->end = roundup(curr->start, 4096) + 4096; 232 } 233 234 void map_groups__fixup_end(struct map_groups *mg) 235 { 236 struct maps *maps = &mg->maps; 237 struct map *next, *curr; 238 239 down_write(&maps->lock); 240 241 curr = maps__first(maps); 242 if (curr == NULL) 243 goto out_unlock; 244 245 for (next = map__next(curr); next; next = map__next(curr)) { 246 if (!curr->end) 247 curr->end = next->start; 248 curr = next; 249 } 250 251 /* 252 * We still haven't the actual symbols, so guess the 253 * last map final address. 254 */ 255 if (!curr->end) 256 curr->end = ~0ULL; 257 258 out_unlock: 259 up_write(&maps->lock); 260 } 261 262 struct symbol *symbol__new(u64 start, u64 len, u8 binding, u8 type, const char *name) 263 { 264 size_t namelen = strlen(name) + 1; 265 struct symbol *sym = calloc(1, (symbol_conf.priv_size + 266 sizeof(*sym) + namelen)); 267 if (sym == NULL) 268 return NULL; 269 270 if (symbol_conf.priv_size) { 271 if (symbol_conf.init_annotation) { 272 struct annotation *notes = (void *)sym; 273 pthread_mutex_init(¬es->lock, NULL); 274 } 275 sym = ((void *)sym) + symbol_conf.priv_size; 276 } 277 278 sym->start = start; 279 sym->end = len ? start + len : start; 280 sym->type = type; 281 sym->binding = binding; 282 sym->namelen = namelen - 1; 283 284 pr_debug4("%s: %s %#" PRIx64 "-%#" PRIx64 "\n", 285 __func__, name, start, sym->end); 286 memcpy(sym->name, name, namelen); 287 288 return sym; 289 } 290 291 void symbol__delete(struct symbol *sym) 292 { 293 free(((void *)sym) - symbol_conf.priv_size); 294 } 295 296 void symbols__delete(struct rb_root_cached *symbols) 297 { 298 struct symbol *pos; 299 struct rb_node *next = rb_first_cached(symbols); 300 301 while (next) { 302 pos = rb_entry(next, struct symbol, rb_node); 303 next = rb_next(&pos->rb_node); 304 rb_erase_cached(&pos->rb_node, symbols); 305 symbol__delete(pos); 306 } 307 } 308 309 void __symbols__insert(struct rb_root_cached *symbols, 310 struct symbol *sym, bool kernel) 311 { 312 struct rb_node **p = &symbols->rb_root.rb_node; 313 struct rb_node *parent = NULL; 314 const u64 ip = sym->start; 315 struct symbol *s; 316 bool leftmost = true; 317 318 if (kernel) { 319 const char *name = sym->name; 320 /* 321 * ppc64 uses function descriptors and appends a '.' to the 322 * start of every instruction address. Remove it. 323 */ 324 if (name[0] == '.') 325 name++; 326 sym->idle = symbol__is_idle(name); 327 } 328 329 while (*p != NULL) { 330 parent = *p; 331 s = rb_entry(parent, struct symbol, rb_node); 332 if (ip < s->start) 333 p = &(*p)->rb_left; 334 else { 335 p = &(*p)->rb_right; 336 leftmost = false; 337 } 338 } 339 rb_link_node(&sym->rb_node, parent, p); 340 rb_insert_color_cached(&sym->rb_node, symbols, leftmost); 341 } 342 343 void symbols__insert(struct rb_root_cached *symbols, struct symbol *sym) 344 { 345 __symbols__insert(symbols, sym, false); 346 } 347 348 static struct symbol *symbols__find(struct rb_root_cached *symbols, u64 ip) 349 { 350 struct rb_node *n; 351 352 if (symbols == NULL) 353 return NULL; 354 355 n = symbols->rb_root.rb_node; 356 357 while (n) { 358 struct symbol *s = rb_entry(n, struct symbol, rb_node); 359 360 if (ip < s->start) 361 n = n->rb_left; 362 else if (ip > s->end || (ip == s->end && ip != s->start)) 363 n = n->rb_right; 364 else 365 return s; 366 } 367 368 return NULL; 369 } 370 371 static struct symbol *symbols__first(struct rb_root_cached *symbols) 372 { 373 struct rb_node *n = rb_first_cached(symbols); 374 375 if (n) 376 return rb_entry(n, struct symbol, rb_node); 377 378 return NULL; 379 } 380 381 static struct symbol *symbols__last(struct rb_root_cached *symbols) 382 { 383 struct rb_node *n = rb_last(&symbols->rb_root); 384 385 if (n) 386 return rb_entry(n, struct symbol, rb_node); 387 388 return NULL; 389 } 390 391 static struct symbol *symbols__next(struct symbol *sym) 392 { 393 struct rb_node *n = rb_next(&sym->rb_node); 394 395 if (n) 396 return rb_entry(n, struct symbol, rb_node); 397 398 return NULL; 399 } 400 401 static void symbols__insert_by_name(struct rb_root_cached *symbols, struct symbol *sym) 402 { 403 struct rb_node **p = &symbols->rb_root.rb_node; 404 struct rb_node *parent = NULL; 405 struct symbol_name_rb_node *symn, *s; 406 bool leftmost = true; 407 408 symn = container_of(sym, struct symbol_name_rb_node, sym); 409 410 while (*p != NULL) { 411 parent = *p; 412 s = rb_entry(parent, struct symbol_name_rb_node, rb_node); 413 if (strcmp(sym->name, s->sym.name) < 0) 414 p = &(*p)->rb_left; 415 else { 416 p = &(*p)->rb_right; 417 leftmost = false; 418 } 419 } 420 rb_link_node(&symn->rb_node, parent, p); 421 rb_insert_color_cached(&symn->rb_node, symbols, leftmost); 422 } 423 424 static void symbols__sort_by_name(struct rb_root_cached *symbols, 425 struct rb_root_cached *source) 426 { 427 struct rb_node *nd; 428 429 for (nd = rb_first_cached(source); nd; nd = rb_next(nd)) { 430 struct symbol *pos = rb_entry(nd, struct symbol, rb_node); 431 symbols__insert_by_name(symbols, pos); 432 } 433 } 434 435 int symbol__match_symbol_name(const char *name, const char *str, 436 enum symbol_tag_include includes) 437 { 438 const char *versioning; 439 440 if (includes == SYMBOL_TAG_INCLUDE__DEFAULT_ONLY && 441 (versioning = strstr(name, "@@"))) { 442 int len = strlen(str); 443 444 if (len < versioning - name) 445 len = versioning - name; 446 447 return arch__compare_symbol_names_n(name, str, len); 448 } else 449 return arch__compare_symbol_names(name, str); 450 } 451 452 static struct symbol *symbols__find_by_name(struct rb_root_cached *symbols, 453 const char *name, 454 enum symbol_tag_include includes) 455 { 456 struct rb_node *n; 457 struct symbol_name_rb_node *s = NULL; 458 459 if (symbols == NULL) 460 return NULL; 461 462 n = symbols->rb_root.rb_node; 463 464 while (n) { 465 int cmp; 466 467 s = rb_entry(n, struct symbol_name_rb_node, rb_node); 468 cmp = symbol__match_symbol_name(s->sym.name, name, includes); 469 470 if (cmp > 0) 471 n = n->rb_left; 472 else if (cmp < 0) 473 n = n->rb_right; 474 else 475 break; 476 } 477 478 if (n == NULL) 479 return NULL; 480 481 if (includes != SYMBOL_TAG_INCLUDE__DEFAULT_ONLY) 482 /* return first symbol that has same name (if any) */ 483 for (n = rb_prev(n); n; n = rb_prev(n)) { 484 struct symbol_name_rb_node *tmp; 485 486 tmp = rb_entry(n, struct symbol_name_rb_node, rb_node); 487 if (arch__compare_symbol_names(tmp->sym.name, s->sym.name)) 488 break; 489 490 s = tmp; 491 } 492 493 return &s->sym; 494 } 495 496 void dso__reset_find_symbol_cache(struct dso *dso) 497 { 498 dso->last_find_result.addr = 0; 499 dso->last_find_result.symbol = NULL; 500 } 501 502 void dso__insert_symbol(struct dso *dso, struct symbol *sym) 503 { 504 __symbols__insert(&dso->symbols, sym, dso->kernel); 505 506 /* update the symbol cache if necessary */ 507 if (dso->last_find_result.addr >= sym->start && 508 (dso->last_find_result.addr < sym->end || 509 sym->start == sym->end)) { 510 dso->last_find_result.symbol = sym; 511 } 512 } 513 514 struct symbol *dso__find_symbol(struct dso *dso, u64 addr) 515 { 516 if (dso->last_find_result.addr != addr || dso->last_find_result.symbol == NULL) { 517 dso->last_find_result.addr = addr; 518 dso->last_find_result.symbol = symbols__find(&dso->symbols, addr); 519 } 520 521 return dso->last_find_result.symbol; 522 } 523 524 struct symbol *dso__first_symbol(struct dso *dso) 525 { 526 return symbols__first(&dso->symbols); 527 } 528 529 struct symbol *dso__last_symbol(struct dso *dso) 530 { 531 return symbols__last(&dso->symbols); 532 } 533 534 struct symbol *dso__next_symbol(struct symbol *sym) 535 { 536 return symbols__next(sym); 537 } 538 539 struct symbol *symbol__next_by_name(struct symbol *sym) 540 { 541 struct symbol_name_rb_node *s = container_of(sym, struct symbol_name_rb_node, sym); 542 struct rb_node *n = rb_next(&s->rb_node); 543 544 return n ? &rb_entry(n, struct symbol_name_rb_node, rb_node)->sym : NULL; 545 } 546 547 /* 548 * Returns first symbol that matched with @name. 549 */ 550 struct symbol *dso__find_symbol_by_name(struct dso *dso, const char *name) 551 { 552 struct symbol *s = symbols__find_by_name(&dso->symbol_names, name, 553 SYMBOL_TAG_INCLUDE__NONE); 554 if (!s) 555 s = symbols__find_by_name(&dso->symbol_names, name, 556 SYMBOL_TAG_INCLUDE__DEFAULT_ONLY); 557 return s; 558 } 559 560 void dso__sort_by_name(struct dso *dso) 561 { 562 dso__set_sorted_by_name(dso); 563 return symbols__sort_by_name(&dso->symbol_names, &dso->symbols); 564 } 565 566 int modules__parse(const char *filename, void *arg, 567 int (*process_module)(void *arg, const char *name, 568 u64 start, u64 size)) 569 { 570 char *line = NULL; 571 size_t n; 572 FILE *file; 573 int err = 0; 574 575 file = fopen(filename, "r"); 576 if (file == NULL) 577 return -1; 578 579 while (1) { 580 char name[PATH_MAX]; 581 u64 start, size; 582 char *sep, *endptr; 583 ssize_t line_len; 584 585 line_len = getline(&line, &n, file); 586 if (line_len < 0) { 587 if (feof(file)) 588 break; 589 err = -1; 590 goto out; 591 } 592 593 if (!line) { 594 err = -1; 595 goto out; 596 } 597 598 line[--line_len] = '\0'; /* \n */ 599 600 sep = strrchr(line, 'x'); 601 if (sep == NULL) 602 continue; 603 604 hex2u64(sep + 1, &start); 605 606 sep = strchr(line, ' '); 607 if (sep == NULL) 608 continue; 609 610 *sep = '\0'; 611 612 scnprintf(name, sizeof(name), "[%s]", line); 613 614 size = strtoul(sep + 1, &endptr, 0); 615 if (*endptr != ' ' && *endptr != '\t') 616 continue; 617 618 err = process_module(arg, name, start, size); 619 if (err) 620 break; 621 } 622 out: 623 free(line); 624 fclose(file); 625 return err; 626 } 627 628 /* 629 * These are symbols in the kernel image, so make sure that 630 * sym is from a kernel DSO. 631 */ 632 static bool symbol__is_idle(const char *name) 633 { 634 const char * const idle_symbols[] = { 635 "arch_cpu_idle", 636 "cpu_idle", 637 "cpu_startup_entry", 638 "intel_idle", 639 "default_idle", 640 "native_safe_halt", 641 "enter_idle", 642 "exit_idle", 643 "mwait_idle", 644 "mwait_idle_with_hints", 645 "poll_idle", 646 "ppc64_runlatch_off", 647 "pseries_dedicated_idle_sleep", 648 NULL 649 }; 650 int i; 651 652 for (i = 0; idle_symbols[i]; i++) { 653 if (!strcmp(idle_symbols[i], name)) 654 return true; 655 } 656 657 return false; 658 } 659 660 static int map__process_kallsym_symbol(void *arg, const char *name, 661 char type, u64 start) 662 { 663 struct symbol *sym; 664 struct dso *dso = arg; 665 struct rb_root_cached *root = &dso->symbols; 666 667 if (!symbol_type__filter(type)) 668 return 0; 669 670 /* 671 * module symbols are not sorted so we add all 672 * symbols, setting length to 0, and rely on 673 * symbols__fixup_end() to fix it up. 674 */ 675 sym = symbol__new(start, 0, kallsyms2elf_binding(type), kallsyms2elf_type(type), name); 676 if (sym == NULL) 677 return -ENOMEM; 678 /* 679 * We will pass the symbols to the filter later, in 680 * map__split_kallsyms, when we have split the maps per module 681 */ 682 __symbols__insert(root, sym, !strchr(name, '[')); 683 684 return 0; 685 } 686 687 /* 688 * Loads the function entries in /proc/kallsyms into kernel_map->dso, 689 * so that we can in the next step set the symbol ->end address and then 690 * call kernel_maps__split_kallsyms. 691 */ 692 static int dso__load_all_kallsyms(struct dso *dso, const char *filename) 693 { 694 return kallsyms__parse(filename, dso, map__process_kallsym_symbol); 695 } 696 697 static int map_groups__split_kallsyms_for_kcore(struct map_groups *kmaps, struct dso *dso) 698 { 699 struct map *curr_map; 700 struct symbol *pos; 701 int count = 0; 702 struct rb_root_cached old_root = dso->symbols; 703 struct rb_root_cached *root = &dso->symbols; 704 struct rb_node *next = rb_first_cached(root); 705 706 if (!kmaps) 707 return -1; 708 709 *root = RB_ROOT_CACHED; 710 711 while (next) { 712 char *module; 713 714 pos = rb_entry(next, struct symbol, rb_node); 715 next = rb_next(&pos->rb_node); 716 717 rb_erase_cached(&pos->rb_node, &old_root); 718 RB_CLEAR_NODE(&pos->rb_node); 719 module = strchr(pos->name, '\t'); 720 if (module) 721 *module = '\0'; 722 723 curr_map = map_groups__find(kmaps, pos->start); 724 725 if (!curr_map) { 726 symbol__delete(pos); 727 continue; 728 } 729 730 pos->start -= curr_map->start - curr_map->pgoff; 731 if (pos->end > curr_map->end) 732 pos->end = curr_map->end; 733 if (pos->end) 734 pos->end -= curr_map->start - curr_map->pgoff; 735 symbols__insert(&curr_map->dso->symbols, pos); 736 ++count; 737 } 738 739 /* Symbols have been adjusted */ 740 dso->adjust_symbols = 1; 741 742 return count; 743 } 744 745 /* 746 * Split the symbols into maps, making sure there are no overlaps, i.e. the 747 * kernel range is broken in several maps, named [kernel].N, as we don't have 748 * the original ELF section names vmlinux have. 749 */ 750 static int map_groups__split_kallsyms(struct map_groups *kmaps, struct dso *dso, u64 delta, 751 struct map *initial_map) 752 { 753 struct machine *machine; 754 struct map *curr_map = initial_map; 755 struct symbol *pos; 756 int count = 0, moved = 0; 757 struct rb_root_cached *root = &dso->symbols; 758 struct rb_node *next = rb_first_cached(root); 759 int kernel_range = 0; 760 bool x86_64; 761 762 if (!kmaps) 763 return -1; 764 765 machine = kmaps->machine; 766 767 x86_64 = machine__is(machine, "x86_64"); 768 769 while (next) { 770 char *module; 771 772 pos = rb_entry(next, struct symbol, rb_node); 773 next = rb_next(&pos->rb_node); 774 775 module = strchr(pos->name, '\t'); 776 if (module) { 777 if (!symbol_conf.use_modules) 778 goto discard_symbol; 779 780 *module++ = '\0'; 781 782 if (strcmp(curr_map->dso->short_name, module)) { 783 if (curr_map != initial_map && 784 dso->kernel == DSO_TYPE_GUEST_KERNEL && 785 machine__is_default_guest(machine)) { 786 /* 787 * We assume all symbols of a module are 788 * continuous in * kallsyms, so curr_map 789 * points to a module and all its 790 * symbols are in its kmap. Mark it as 791 * loaded. 792 */ 793 dso__set_loaded(curr_map->dso); 794 } 795 796 curr_map = map_groups__find_by_name(kmaps, module); 797 if (curr_map == NULL) { 798 pr_debug("%s/proc/{kallsyms,modules} " 799 "inconsistency while looking " 800 "for \"%s\" module!\n", 801 machine->root_dir, module); 802 curr_map = initial_map; 803 goto discard_symbol; 804 } 805 806 if (curr_map->dso->loaded && 807 !machine__is_default_guest(machine)) 808 goto discard_symbol; 809 } 810 /* 811 * So that we look just like we get from .ko files, 812 * i.e. not prelinked, relative to initial_map->start. 813 */ 814 pos->start = curr_map->map_ip(curr_map, pos->start); 815 pos->end = curr_map->map_ip(curr_map, pos->end); 816 } else if (x86_64 && is_entry_trampoline(pos->name)) { 817 /* 818 * These symbols are not needed anymore since the 819 * trampoline maps refer to the text section and it's 820 * symbols instead. Avoid having to deal with 821 * relocations, and the assumption that the first symbol 822 * is the start of kernel text, by simply removing the 823 * symbols at this point. 824 */ 825 goto discard_symbol; 826 } else if (curr_map != initial_map) { 827 char dso_name[PATH_MAX]; 828 struct dso *ndso; 829 830 if (delta) { 831 /* Kernel was relocated at boot time */ 832 pos->start -= delta; 833 pos->end -= delta; 834 } 835 836 if (count == 0) { 837 curr_map = initial_map; 838 goto add_symbol; 839 } 840 841 if (dso->kernel == DSO_TYPE_GUEST_KERNEL) 842 snprintf(dso_name, sizeof(dso_name), 843 "[guest.kernel].%d", 844 kernel_range++); 845 else 846 snprintf(dso_name, sizeof(dso_name), 847 "[kernel].%d", 848 kernel_range++); 849 850 ndso = dso__new(dso_name); 851 if (ndso == NULL) 852 return -1; 853 854 ndso->kernel = dso->kernel; 855 856 curr_map = map__new2(pos->start, ndso); 857 if (curr_map == NULL) { 858 dso__put(ndso); 859 return -1; 860 } 861 862 curr_map->map_ip = curr_map->unmap_ip = identity__map_ip; 863 map_groups__insert(kmaps, curr_map); 864 ++kernel_range; 865 } else if (delta) { 866 /* Kernel was relocated at boot time */ 867 pos->start -= delta; 868 pos->end -= delta; 869 } 870 add_symbol: 871 if (curr_map != initial_map) { 872 rb_erase_cached(&pos->rb_node, root); 873 symbols__insert(&curr_map->dso->symbols, pos); 874 ++moved; 875 } else 876 ++count; 877 878 continue; 879 discard_symbol: 880 rb_erase_cached(&pos->rb_node, root); 881 symbol__delete(pos); 882 } 883 884 if (curr_map != initial_map && 885 dso->kernel == DSO_TYPE_GUEST_KERNEL && 886 machine__is_default_guest(kmaps->machine)) { 887 dso__set_loaded(curr_map->dso); 888 } 889 890 return count + moved; 891 } 892 893 bool symbol__restricted_filename(const char *filename, 894 const char *restricted_filename) 895 { 896 bool restricted = false; 897 898 if (symbol_conf.kptr_restrict) { 899 char *r = realpath(filename, NULL); 900 901 if (r != NULL) { 902 restricted = strcmp(r, restricted_filename) == 0; 903 free(r); 904 return restricted; 905 } 906 } 907 908 return restricted; 909 } 910 911 struct module_info { 912 struct rb_node rb_node; 913 char *name; 914 u64 start; 915 }; 916 917 static void add_module(struct module_info *mi, struct rb_root *modules) 918 { 919 struct rb_node **p = &modules->rb_node; 920 struct rb_node *parent = NULL; 921 struct module_info *m; 922 923 while (*p != NULL) { 924 parent = *p; 925 m = rb_entry(parent, struct module_info, rb_node); 926 if (strcmp(mi->name, m->name) < 0) 927 p = &(*p)->rb_left; 928 else 929 p = &(*p)->rb_right; 930 } 931 rb_link_node(&mi->rb_node, parent, p); 932 rb_insert_color(&mi->rb_node, modules); 933 } 934 935 static void delete_modules(struct rb_root *modules) 936 { 937 struct module_info *mi; 938 struct rb_node *next = rb_first(modules); 939 940 while (next) { 941 mi = rb_entry(next, struct module_info, rb_node); 942 next = rb_next(&mi->rb_node); 943 rb_erase(&mi->rb_node, modules); 944 zfree(&mi->name); 945 free(mi); 946 } 947 } 948 949 static struct module_info *find_module(const char *name, 950 struct rb_root *modules) 951 { 952 struct rb_node *n = modules->rb_node; 953 954 while (n) { 955 struct module_info *m; 956 int cmp; 957 958 m = rb_entry(n, struct module_info, rb_node); 959 cmp = strcmp(name, m->name); 960 if (cmp < 0) 961 n = n->rb_left; 962 else if (cmp > 0) 963 n = n->rb_right; 964 else 965 return m; 966 } 967 968 return NULL; 969 } 970 971 static int __read_proc_modules(void *arg, const char *name, u64 start, 972 u64 size __maybe_unused) 973 { 974 struct rb_root *modules = arg; 975 struct module_info *mi; 976 977 mi = zalloc(sizeof(struct module_info)); 978 if (!mi) 979 return -ENOMEM; 980 981 mi->name = strdup(name); 982 mi->start = start; 983 984 if (!mi->name) { 985 free(mi); 986 return -ENOMEM; 987 } 988 989 add_module(mi, modules); 990 991 return 0; 992 } 993 994 static int read_proc_modules(const char *filename, struct rb_root *modules) 995 { 996 if (symbol__restricted_filename(filename, "/proc/modules")) 997 return -1; 998 999 if (modules__parse(filename, modules, __read_proc_modules)) { 1000 delete_modules(modules); 1001 return -1; 1002 } 1003 1004 return 0; 1005 } 1006 1007 int compare_proc_modules(const char *from, const char *to) 1008 { 1009 struct rb_root from_modules = RB_ROOT; 1010 struct rb_root to_modules = RB_ROOT; 1011 struct rb_node *from_node, *to_node; 1012 struct module_info *from_m, *to_m; 1013 int ret = -1; 1014 1015 if (read_proc_modules(from, &from_modules)) 1016 return -1; 1017 1018 if (read_proc_modules(to, &to_modules)) 1019 goto out_delete_from; 1020 1021 from_node = rb_first(&from_modules); 1022 to_node = rb_first(&to_modules); 1023 while (from_node) { 1024 if (!to_node) 1025 break; 1026 1027 from_m = rb_entry(from_node, struct module_info, rb_node); 1028 to_m = rb_entry(to_node, struct module_info, rb_node); 1029 1030 if (from_m->start != to_m->start || 1031 strcmp(from_m->name, to_m->name)) 1032 break; 1033 1034 from_node = rb_next(from_node); 1035 to_node = rb_next(to_node); 1036 } 1037 1038 if (!from_node && !to_node) 1039 ret = 0; 1040 1041 delete_modules(&to_modules); 1042 out_delete_from: 1043 delete_modules(&from_modules); 1044 1045 return ret; 1046 } 1047 1048 struct map *map_groups__first(struct map_groups *mg) 1049 { 1050 return maps__first(&mg->maps); 1051 } 1052 1053 static int do_validate_kcore_modules(const char *filename, 1054 struct map_groups *kmaps) 1055 { 1056 struct rb_root modules = RB_ROOT; 1057 struct map *old_map; 1058 int err; 1059 1060 err = read_proc_modules(filename, &modules); 1061 if (err) 1062 return err; 1063 1064 old_map = map_groups__first(kmaps); 1065 while (old_map) { 1066 struct map *next = map_groups__next(old_map); 1067 struct module_info *mi; 1068 1069 if (!__map__is_kmodule(old_map)) { 1070 old_map = next; 1071 continue; 1072 } 1073 1074 /* Module must be in memory at the same address */ 1075 mi = find_module(old_map->dso->short_name, &modules); 1076 if (!mi || mi->start != old_map->start) { 1077 err = -EINVAL; 1078 goto out; 1079 } 1080 1081 old_map = next; 1082 } 1083 out: 1084 delete_modules(&modules); 1085 return err; 1086 } 1087 1088 /* 1089 * If kallsyms is referenced by name then we look for filename in the same 1090 * directory. 1091 */ 1092 static bool filename_from_kallsyms_filename(char *filename, 1093 const char *base_name, 1094 const char *kallsyms_filename) 1095 { 1096 char *name; 1097 1098 strcpy(filename, kallsyms_filename); 1099 name = strrchr(filename, '/'); 1100 if (!name) 1101 return false; 1102 1103 name += 1; 1104 1105 if (!strcmp(name, "kallsyms")) { 1106 strcpy(name, base_name); 1107 return true; 1108 } 1109 1110 return false; 1111 } 1112 1113 static int validate_kcore_modules(const char *kallsyms_filename, 1114 struct map *map) 1115 { 1116 struct map_groups *kmaps = map__kmaps(map); 1117 char modules_filename[PATH_MAX]; 1118 1119 if (!kmaps) 1120 return -EINVAL; 1121 1122 if (!filename_from_kallsyms_filename(modules_filename, "modules", 1123 kallsyms_filename)) 1124 return -EINVAL; 1125 1126 if (do_validate_kcore_modules(modules_filename, kmaps)) 1127 return -EINVAL; 1128 1129 return 0; 1130 } 1131 1132 static int validate_kcore_addresses(const char *kallsyms_filename, 1133 struct map *map) 1134 { 1135 struct kmap *kmap = map__kmap(map); 1136 1137 if (!kmap) 1138 return -EINVAL; 1139 1140 if (kmap->ref_reloc_sym && kmap->ref_reloc_sym->name) { 1141 u64 start; 1142 1143 if (kallsyms__get_function_start(kallsyms_filename, 1144 kmap->ref_reloc_sym->name, &start)) 1145 return -ENOENT; 1146 if (start != kmap->ref_reloc_sym->addr) 1147 return -EINVAL; 1148 } 1149 1150 return validate_kcore_modules(kallsyms_filename, map); 1151 } 1152 1153 struct kcore_mapfn_data { 1154 struct dso *dso; 1155 struct list_head maps; 1156 }; 1157 1158 static int kcore_mapfn(u64 start, u64 len, u64 pgoff, void *data) 1159 { 1160 struct kcore_mapfn_data *md = data; 1161 struct map *map; 1162 1163 map = map__new2(start, md->dso); 1164 if (map == NULL) 1165 return -ENOMEM; 1166 1167 map->end = map->start + len; 1168 map->pgoff = pgoff; 1169 1170 list_add(&map->node, &md->maps); 1171 1172 return 0; 1173 } 1174 1175 /* 1176 * Merges map into map_groups by splitting the new map 1177 * within the existing map regions. 1178 */ 1179 int map_groups__merge_in(struct map_groups *kmaps, struct map *new_map) 1180 { 1181 struct map *old_map; 1182 LIST_HEAD(merged); 1183 1184 for (old_map = map_groups__first(kmaps); old_map; 1185 old_map = map_groups__next(old_map)) { 1186 1187 /* no overload with this one */ 1188 if (new_map->end < old_map->start || 1189 new_map->start >= old_map->end) 1190 continue; 1191 1192 if (new_map->start < old_map->start) { 1193 /* 1194 * |new...... 1195 * |old.... 1196 */ 1197 if (new_map->end < old_map->end) { 1198 /* 1199 * |new......| -> |new..| 1200 * |old....| -> |old....| 1201 */ 1202 new_map->end = old_map->start; 1203 } else { 1204 /* 1205 * |new.............| -> |new..| |new..| 1206 * |old....| -> |old....| 1207 */ 1208 struct map *m = map__clone(new_map); 1209 1210 if (!m) 1211 return -ENOMEM; 1212 1213 m->end = old_map->start; 1214 list_add_tail(&m->node, &merged); 1215 new_map->start = old_map->end; 1216 } 1217 } else { 1218 /* 1219 * |new...... 1220 * |old.... 1221 */ 1222 if (new_map->end < old_map->end) { 1223 /* 1224 * |new..| -> x 1225 * |old.........| -> |old.........| 1226 */ 1227 map__put(new_map); 1228 new_map = NULL; 1229 break; 1230 } else { 1231 /* 1232 * |new......| -> |new...| 1233 * |old....| -> |old....| 1234 */ 1235 new_map->start = old_map->end; 1236 } 1237 } 1238 } 1239 1240 while (!list_empty(&merged)) { 1241 old_map = list_entry(merged.next, struct map, node); 1242 list_del_init(&old_map->node); 1243 map_groups__insert(kmaps, old_map); 1244 map__put(old_map); 1245 } 1246 1247 if (new_map) { 1248 map_groups__insert(kmaps, new_map); 1249 map__put(new_map); 1250 } 1251 return 0; 1252 } 1253 1254 static int dso__load_kcore(struct dso *dso, struct map *map, 1255 const char *kallsyms_filename) 1256 { 1257 struct map_groups *kmaps = map__kmaps(map); 1258 struct kcore_mapfn_data md; 1259 struct map *old_map, *new_map, *replacement_map = NULL; 1260 struct machine *machine; 1261 bool is_64_bit; 1262 int err, fd; 1263 char kcore_filename[PATH_MAX]; 1264 u64 stext; 1265 1266 if (!kmaps) 1267 return -EINVAL; 1268 1269 machine = kmaps->machine; 1270 1271 /* This function requires that the map is the kernel map */ 1272 if (!__map__is_kernel(map)) 1273 return -EINVAL; 1274 1275 if (!filename_from_kallsyms_filename(kcore_filename, "kcore", 1276 kallsyms_filename)) 1277 return -EINVAL; 1278 1279 /* Modules and kernel must be present at their original addresses */ 1280 if (validate_kcore_addresses(kallsyms_filename, map)) 1281 return -EINVAL; 1282 1283 md.dso = dso; 1284 INIT_LIST_HEAD(&md.maps); 1285 1286 fd = open(kcore_filename, O_RDONLY); 1287 if (fd < 0) { 1288 pr_debug("Failed to open %s. Note /proc/kcore requires CAP_SYS_RAWIO capability to access.\n", 1289 kcore_filename); 1290 return -EINVAL; 1291 } 1292 1293 /* Read new maps into temporary lists */ 1294 err = file__read_maps(fd, map->prot & PROT_EXEC, kcore_mapfn, &md, 1295 &is_64_bit); 1296 if (err) 1297 goto out_err; 1298 dso->is_64_bit = is_64_bit; 1299 1300 if (list_empty(&md.maps)) { 1301 err = -EINVAL; 1302 goto out_err; 1303 } 1304 1305 /* Remove old maps */ 1306 old_map = map_groups__first(kmaps); 1307 while (old_map) { 1308 struct map *next = map_groups__next(old_map); 1309 1310 /* 1311 * We need to preserve eBPF maps even if they are 1312 * covered by kcore, because we need to access 1313 * eBPF dso for source data. 1314 */ 1315 if (old_map != map && !__map__is_bpf_prog(old_map)) 1316 map_groups__remove(kmaps, old_map); 1317 old_map = next; 1318 } 1319 machine->trampolines_mapped = false; 1320 1321 /* Find the kernel map using the '_stext' symbol */ 1322 if (!kallsyms__get_function_start(kallsyms_filename, "_stext", &stext)) { 1323 list_for_each_entry(new_map, &md.maps, node) { 1324 if (stext >= new_map->start && stext < new_map->end) { 1325 replacement_map = new_map; 1326 break; 1327 } 1328 } 1329 } 1330 1331 if (!replacement_map) 1332 replacement_map = list_entry(md.maps.next, struct map, node); 1333 1334 /* Add new maps */ 1335 while (!list_empty(&md.maps)) { 1336 new_map = list_entry(md.maps.next, struct map, node); 1337 list_del_init(&new_map->node); 1338 if (new_map == replacement_map) { 1339 map->start = new_map->start; 1340 map->end = new_map->end; 1341 map->pgoff = new_map->pgoff; 1342 map->map_ip = new_map->map_ip; 1343 map->unmap_ip = new_map->unmap_ip; 1344 /* Ensure maps are correctly ordered */ 1345 map__get(map); 1346 map_groups__remove(kmaps, map); 1347 map_groups__insert(kmaps, map); 1348 map__put(map); 1349 map__put(new_map); 1350 } else { 1351 /* 1352 * Merge kcore map into existing maps, 1353 * and ensure that current maps (eBPF) 1354 * stay intact. 1355 */ 1356 if (map_groups__merge_in(kmaps, new_map)) 1357 goto out_err; 1358 } 1359 } 1360 1361 if (machine__is(machine, "x86_64")) { 1362 u64 addr; 1363 1364 /* 1365 * If one of the corresponding symbols is there, assume the 1366 * entry trampoline maps are too. 1367 */ 1368 if (!kallsyms__get_function_start(kallsyms_filename, 1369 ENTRY_TRAMPOLINE_NAME, 1370 &addr)) 1371 machine->trampolines_mapped = true; 1372 } 1373 1374 /* 1375 * Set the data type and long name so that kcore can be read via 1376 * dso__data_read_addr(). 1377 */ 1378 if (dso->kernel == DSO_TYPE_GUEST_KERNEL) 1379 dso->binary_type = DSO_BINARY_TYPE__GUEST_KCORE; 1380 else 1381 dso->binary_type = DSO_BINARY_TYPE__KCORE; 1382 dso__set_long_name(dso, strdup(kcore_filename), true); 1383 1384 close(fd); 1385 1386 if (map->prot & PROT_EXEC) 1387 pr_debug("Using %s for kernel object code\n", kcore_filename); 1388 else 1389 pr_debug("Using %s for kernel data\n", kcore_filename); 1390 1391 return 0; 1392 1393 out_err: 1394 while (!list_empty(&md.maps)) { 1395 map = list_entry(md.maps.next, struct map, node); 1396 list_del_init(&map->node); 1397 map__put(map); 1398 } 1399 close(fd); 1400 return -EINVAL; 1401 } 1402 1403 /* 1404 * If the kernel is relocated at boot time, kallsyms won't match. Compute the 1405 * delta based on the relocation reference symbol. 1406 */ 1407 static int kallsyms__delta(struct kmap *kmap, const char *filename, u64 *delta) 1408 { 1409 u64 addr; 1410 1411 if (!kmap->ref_reloc_sym || !kmap->ref_reloc_sym->name) 1412 return 0; 1413 1414 if (kallsyms__get_function_start(filename, kmap->ref_reloc_sym->name, &addr)) 1415 return -1; 1416 1417 *delta = addr - kmap->ref_reloc_sym->addr; 1418 return 0; 1419 } 1420 1421 int __dso__load_kallsyms(struct dso *dso, const char *filename, 1422 struct map *map, bool no_kcore) 1423 { 1424 struct kmap *kmap = map__kmap(map); 1425 u64 delta = 0; 1426 1427 if (symbol__restricted_filename(filename, "/proc/kallsyms")) 1428 return -1; 1429 1430 if (!kmap || !kmap->kmaps) 1431 return -1; 1432 1433 if (dso__load_all_kallsyms(dso, filename) < 0) 1434 return -1; 1435 1436 if (kallsyms__delta(kmap, filename, &delta)) 1437 return -1; 1438 1439 symbols__fixup_end(&dso->symbols); 1440 symbols__fixup_duplicate(&dso->symbols); 1441 1442 if (dso->kernel == DSO_TYPE_GUEST_KERNEL) 1443 dso->symtab_type = DSO_BINARY_TYPE__GUEST_KALLSYMS; 1444 else 1445 dso->symtab_type = DSO_BINARY_TYPE__KALLSYMS; 1446 1447 if (!no_kcore && !dso__load_kcore(dso, map, filename)) 1448 return map_groups__split_kallsyms_for_kcore(kmap->kmaps, dso); 1449 else 1450 return map_groups__split_kallsyms(kmap->kmaps, dso, delta, map); 1451 } 1452 1453 int dso__load_kallsyms(struct dso *dso, const char *filename, 1454 struct map *map) 1455 { 1456 return __dso__load_kallsyms(dso, filename, map, false); 1457 } 1458 1459 static int dso__load_perf_map(const char *map_path, struct dso *dso) 1460 { 1461 char *line = NULL; 1462 size_t n; 1463 FILE *file; 1464 int nr_syms = 0; 1465 1466 file = fopen(map_path, "r"); 1467 if (file == NULL) 1468 goto out_failure; 1469 1470 while (!feof(file)) { 1471 u64 start, size; 1472 struct symbol *sym; 1473 int line_len, len; 1474 1475 line_len = getline(&line, &n, file); 1476 if (line_len < 0) 1477 break; 1478 1479 if (!line) 1480 goto out_failure; 1481 1482 line[--line_len] = '\0'; /* \n */ 1483 1484 len = hex2u64(line, &start); 1485 1486 len++; 1487 if (len + 2 >= line_len) 1488 continue; 1489 1490 len += hex2u64(line + len, &size); 1491 1492 len++; 1493 if (len + 2 >= line_len) 1494 continue; 1495 1496 sym = symbol__new(start, size, STB_GLOBAL, STT_FUNC, line + len); 1497 1498 if (sym == NULL) 1499 goto out_delete_line; 1500 1501 symbols__insert(&dso->symbols, sym); 1502 nr_syms++; 1503 } 1504 1505 free(line); 1506 fclose(file); 1507 1508 return nr_syms; 1509 1510 out_delete_line: 1511 free(line); 1512 out_failure: 1513 return -1; 1514 } 1515 1516 static bool dso__is_compatible_symtab_type(struct dso *dso, bool kmod, 1517 enum dso_binary_type type) 1518 { 1519 switch (type) { 1520 case DSO_BINARY_TYPE__JAVA_JIT: 1521 case DSO_BINARY_TYPE__DEBUGLINK: 1522 case DSO_BINARY_TYPE__SYSTEM_PATH_DSO: 1523 case DSO_BINARY_TYPE__FEDORA_DEBUGINFO: 1524 case DSO_BINARY_TYPE__UBUNTU_DEBUGINFO: 1525 case DSO_BINARY_TYPE__BUILDID_DEBUGINFO: 1526 case DSO_BINARY_TYPE__OPENEMBEDDED_DEBUGINFO: 1527 return !kmod && dso->kernel == DSO_TYPE_USER; 1528 1529 case DSO_BINARY_TYPE__KALLSYMS: 1530 case DSO_BINARY_TYPE__VMLINUX: 1531 case DSO_BINARY_TYPE__KCORE: 1532 return dso->kernel == DSO_TYPE_KERNEL; 1533 1534 case DSO_BINARY_TYPE__GUEST_KALLSYMS: 1535 case DSO_BINARY_TYPE__GUEST_VMLINUX: 1536 case DSO_BINARY_TYPE__GUEST_KCORE: 1537 return dso->kernel == DSO_TYPE_GUEST_KERNEL; 1538 1539 case DSO_BINARY_TYPE__GUEST_KMODULE: 1540 case DSO_BINARY_TYPE__GUEST_KMODULE_COMP: 1541 case DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE: 1542 case DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE_COMP: 1543 /* 1544 * kernel modules know their symtab type - it's set when 1545 * creating a module dso in machine__findnew_module_map(). 1546 */ 1547 return kmod && dso->symtab_type == type; 1548 1549 case DSO_BINARY_TYPE__BUILD_ID_CACHE: 1550 case DSO_BINARY_TYPE__BUILD_ID_CACHE_DEBUGINFO: 1551 return true; 1552 1553 case DSO_BINARY_TYPE__BPF_PROG_INFO: 1554 case DSO_BINARY_TYPE__NOT_FOUND: 1555 default: 1556 return false; 1557 } 1558 } 1559 1560 /* Checks for the existence of the perf-<pid>.map file in two different 1561 * locations. First, if the process is a separate mount namespace, check in 1562 * that namespace using the pid of the innermost pid namespace. If's not in a 1563 * namespace, or the file can't be found there, try in the mount namespace of 1564 * the tracing process using our view of its pid. 1565 */ 1566 static int dso__find_perf_map(char *filebuf, size_t bufsz, 1567 struct nsinfo **nsip) 1568 { 1569 struct nscookie nsc; 1570 struct nsinfo *nsi; 1571 struct nsinfo *nnsi; 1572 int rc = -1; 1573 1574 nsi = *nsip; 1575 1576 if (nsi->need_setns) { 1577 snprintf(filebuf, bufsz, "/tmp/perf-%d.map", nsi->nstgid); 1578 nsinfo__mountns_enter(nsi, &nsc); 1579 rc = access(filebuf, R_OK); 1580 nsinfo__mountns_exit(&nsc); 1581 if (rc == 0) 1582 return rc; 1583 } 1584 1585 nnsi = nsinfo__copy(nsi); 1586 if (nnsi) { 1587 nsinfo__put(nsi); 1588 1589 nnsi->need_setns = false; 1590 snprintf(filebuf, bufsz, "/tmp/perf-%d.map", nnsi->tgid); 1591 *nsip = nnsi; 1592 rc = 0; 1593 } 1594 1595 return rc; 1596 } 1597 1598 int dso__load(struct dso *dso, struct map *map) 1599 { 1600 char *name; 1601 int ret = -1; 1602 u_int i; 1603 struct machine *machine; 1604 char *root_dir = (char *) ""; 1605 int ss_pos = 0; 1606 struct symsrc ss_[2]; 1607 struct symsrc *syms_ss = NULL, *runtime_ss = NULL; 1608 bool kmod; 1609 bool perfmap; 1610 unsigned char build_id[BUILD_ID_SIZE]; 1611 struct nscookie nsc; 1612 char newmapname[PATH_MAX]; 1613 const char *map_path = dso->long_name; 1614 1615 perfmap = strncmp(dso->name, "/tmp/perf-", 10) == 0; 1616 if (perfmap) { 1617 if (dso->nsinfo && (dso__find_perf_map(newmapname, 1618 sizeof(newmapname), &dso->nsinfo) == 0)) { 1619 map_path = newmapname; 1620 } 1621 } 1622 1623 nsinfo__mountns_enter(dso->nsinfo, &nsc); 1624 pthread_mutex_lock(&dso->lock); 1625 1626 /* check again under the dso->lock */ 1627 if (dso__loaded(dso)) { 1628 ret = 1; 1629 goto out; 1630 } 1631 1632 if (map->groups && map->groups->machine) 1633 machine = map->groups->machine; 1634 else 1635 machine = NULL; 1636 1637 if (dso->kernel) { 1638 if (dso->kernel == DSO_TYPE_KERNEL) 1639 ret = dso__load_kernel_sym(dso, map); 1640 else if (dso->kernel == DSO_TYPE_GUEST_KERNEL) 1641 ret = dso__load_guest_kernel_sym(dso, map); 1642 1643 if (machine__is(machine, "x86_64")) 1644 machine__map_x86_64_entry_trampolines(machine, dso); 1645 goto out; 1646 } 1647 1648 dso->adjust_symbols = 0; 1649 1650 if (perfmap) { 1651 ret = dso__load_perf_map(map_path, dso); 1652 dso->symtab_type = ret > 0 ? DSO_BINARY_TYPE__JAVA_JIT : 1653 DSO_BINARY_TYPE__NOT_FOUND; 1654 goto out; 1655 } 1656 1657 if (machine) 1658 root_dir = machine->root_dir; 1659 1660 name = malloc(PATH_MAX); 1661 if (!name) 1662 goto out; 1663 1664 kmod = dso->symtab_type == DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE || 1665 dso->symtab_type == DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE_COMP || 1666 dso->symtab_type == DSO_BINARY_TYPE__GUEST_KMODULE || 1667 dso->symtab_type == DSO_BINARY_TYPE__GUEST_KMODULE_COMP; 1668 1669 1670 /* 1671 * Read the build id if possible. This is required for 1672 * DSO_BINARY_TYPE__BUILDID_DEBUGINFO to work 1673 */ 1674 if (!dso->has_build_id && 1675 is_regular_file(dso->long_name)) { 1676 __symbol__join_symfs(name, PATH_MAX, dso->long_name); 1677 if (filename__read_build_id(name, build_id, BUILD_ID_SIZE) > 0) 1678 dso__set_build_id(dso, build_id); 1679 } 1680 1681 /* 1682 * Iterate over candidate debug images. 1683 * Keep track of "interesting" ones (those which have a symtab, dynsym, 1684 * and/or opd section) for processing. 1685 */ 1686 for (i = 0; i < DSO_BINARY_TYPE__SYMTAB_CNT; i++) { 1687 struct symsrc *ss = &ss_[ss_pos]; 1688 bool next_slot = false; 1689 bool is_reg; 1690 bool nsexit; 1691 int sirc = -1; 1692 1693 enum dso_binary_type symtab_type = binary_type_symtab[i]; 1694 1695 nsexit = (symtab_type == DSO_BINARY_TYPE__BUILD_ID_CACHE || 1696 symtab_type == DSO_BINARY_TYPE__BUILD_ID_CACHE_DEBUGINFO); 1697 1698 if (!dso__is_compatible_symtab_type(dso, kmod, symtab_type)) 1699 continue; 1700 1701 if (dso__read_binary_type_filename(dso, symtab_type, 1702 root_dir, name, PATH_MAX)) 1703 continue; 1704 1705 if (nsexit) 1706 nsinfo__mountns_exit(&nsc); 1707 1708 is_reg = is_regular_file(name); 1709 if (is_reg) 1710 sirc = symsrc__init(ss, dso, name, symtab_type); 1711 1712 if (nsexit) 1713 nsinfo__mountns_enter(dso->nsinfo, &nsc); 1714 1715 if (!is_reg || sirc < 0) 1716 continue; 1717 1718 if (!syms_ss && symsrc__has_symtab(ss)) { 1719 syms_ss = ss; 1720 next_slot = true; 1721 if (!dso->symsrc_filename) 1722 dso->symsrc_filename = strdup(name); 1723 } 1724 1725 if (!runtime_ss && symsrc__possibly_runtime(ss)) { 1726 runtime_ss = ss; 1727 next_slot = true; 1728 } 1729 1730 if (next_slot) { 1731 ss_pos++; 1732 1733 if (syms_ss && runtime_ss) 1734 break; 1735 } else { 1736 symsrc__destroy(ss); 1737 } 1738 1739 } 1740 1741 if (!runtime_ss && !syms_ss) 1742 goto out_free; 1743 1744 if (runtime_ss && !syms_ss) { 1745 syms_ss = runtime_ss; 1746 } 1747 1748 /* We'll have to hope for the best */ 1749 if (!runtime_ss && syms_ss) 1750 runtime_ss = syms_ss; 1751 1752 if (syms_ss) 1753 ret = dso__load_sym(dso, map, syms_ss, runtime_ss, kmod); 1754 else 1755 ret = -1; 1756 1757 if (ret > 0) { 1758 int nr_plt; 1759 1760 nr_plt = dso__synthesize_plt_symbols(dso, runtime_ss); 1761 if (nr_plt > 0) 1762 ret += nr_plt; 1763 } 1764 1765 for (; ss_pos > 0; ss_pos--) 1766 symsrc__destroy(&ss_[ss_pos - 1]); 1767 out_free: 1768 free(name); 1769 if (ret < 0 && strstr(dso->name, " (deleted)") != NULL) 1770 ret = 0; 1771 out: 1772 dso__set_loaded(dso); 1773 pthread_mutex_unlock(&dso->lock); 1774 nsinfo__mountns_exit(&nsc); 1775 1776 return ret; 1777 } 1778 1779 struct map *map_groups__find_by_name(struct map_groups *mg, const char *name) 1780 { 1781 struct maps *maps = &mg->maps; 1782 struct map *map; 1783 struct rb_node *node; 1784 1785 down_read(&maps->lock); 1786 1787 for (node = maps->names.rb_node; node; ) { 1788 int rc; 1789 1790 map = rb_entry(node, struct map, rb_node_name); 1791 1792 rc = strcmp(map->dso->short_name, name); 1793 if (rc < 0) 1794 node = node->rb_left; 1795 else if (rc > 0) 1796 node = node->rb_right; 1797 else 1798 1799 goto out_unlock; 1800 } 1801 1802 map = NULL; 1803 1804 out_unlock: 1805 up_read(&maps->lock); 1806 return map; 1807 } 1808 1809 int dso__load_vmlinux(struct dso *dso, struct map *map, 1810 const char *vmlinux, bool vmlinux_allocated) 1811 { 1812 int err = -1; 1813 struct symsrc ss; 1814 char symfs_vmlinux[PATH_MAX]; 1815 enum dso_binary_type symtab_type; 1816 1817 if (vmlinux[0] == '/') 1818 snprintf(symfs_vmlinux, sizeof(symfs_vmlinux), "%s", vmlinux); 1819 else 1820 symbol__join_symfs(symfs_vmlinux, vmlinux); 1821 1822 if (dso->kernel == DSO_TYPE_GUEST_KERNEL) 1823 symtab_type = DSO_BINARY_TYPE__GUEST_VMLINUX; 1824 else 1825 symtab_type = DSO_BINARY_TYPE__VMLINUX; 1826 1827 if (symsrc__init(&ss, dso, symfs_vmlinux, symtab_type)) 1828 return -1; 1829 1830 err = dso__load_sym(dso, map, &ss, &ss, 0); 1831 symsrc__destroy(&ss); 1832 1833 if (err > 0) { 1834 if (dso->kernel == DSO_TYPE_GUEST_KERNEL) 1835 dso->binary_type = DSO_BINARY_TYPE__GUEST_VMLINUX; 1836 else 1837 dso->binary_type = DSO_BINARY_TYPE__VMLINUX; 1838 dso__set_long_name(dso, vmlinux, vmlinux_allocated); 1839 dso__set_loaded(dso); 1840 pr_debug("Using %s for symbols\n", symfs_vmlinux); 1841 } 1842 1843 return err; 1844 } 1845 1846 int dso__load_vmlinux_path(struct dso *dso, struct map *map) 1847 { 1848 int i, err = 0; 1849 char *filename = NULL; 1850 1851 pr_debug("Looking at the vmlinux_path (%d entries long)\n", 1852 vmlinux_path__nr_entries + 1); 1853 1854 for (i = 0; i < vmlinux_path__nr_entries; ++i) { 1855 err = dso__load_vmlinux(dso, map, vmlinux_path[i], false); 1856 if (err > 0) 1857 goto out; 1858 } 1859 1860 if (!symbol_conf.ignore_vmlinux_buildid) 1861 filename = dso__build_id_filename(dso, NULL, 0, false); 1862 if (filename != NULL) { 1863 err = dso__load_vmlinux(dso, map, filename, true); 1864 if (err > 0) 1865 goto out; 1866 free(filename); 1867 } 1868 out: 1869 return err; 1870 } 1871 1872 static bool visible_dir_filter(const char *name, struct dirent *d) 1873 { 1874 if (d->d_type != DT_DIR) 1875 return false; 1876 return lsdir_no_dot_filter(name, d); 1877 } 1878 1879 static int find_matching_kcore(struct map *map, char *dir, size_t dir_sz) 1880 { 1881 char kallsyms_filename[PATH_MAX]; 1882 int ret = -1; 1883 struct strlist *dirs; 1884 struct str_node *nd; 1885 1886 dirs = lsdir(dir, visible_dir_filter); 1887 if (!dirs) 1888 return -1; 1889 1890 strlist__for_each_entry(nd, dirs) { 1891 scnprintf(kallsyms_filename, sizeof(kallsyms_filename), 1892 "%s/%s/kallsyms", dir, nd->s); 1893 if (!validate_kcore_addresses(kallsyms_filename, map)) { 1894 strlcpy(dir, kallsyms_filename, dir_sz); 1895 ret = 0; 1896 break; 1897 } 1898 } 1899 1900 strlist__delete(dirs); 1901 1902 return ret; 1903 } 1904 1905 /* 1906 * Use open(O_RDONLY) to check readability directly instead of access(R_OK) 1907 * since access(R_OK) only checks with real UID/GID but open() use effective 1908 * UID/GID and actual capabilities (e.g. /proc/kcore requires CAP_SYS_RAWIO). 1909 */ 1910 static bool filename__readable(const char *file) 1911 { 1912 int fd = open(file, O_RDONLY); 1913 if (fd < 0) 1914 return false; 1915 close(fd); 1916 return true; 1917 } 1918 1919 static char *dso__find_kallsyms(struct dso *dso, struct map *map) 1920 { 1921 u8 host_build_id[BUILD_ID_SIZE]; 1922 char sbuild_id[SBUILD_ID_SIZE]; 1923 bool is_host = false; 1924 char path[PATH_MAX]; 1925 1926 if (!dso->has_build_id) { 1927 /* 1928 * Last resort, if we don't have a build-id and couldn't find 1929 * any vmlinux file, try the running kernel kallsyms table. 1930 */ 1931 goto proc_kallsyms; 1932 } 1933 1934 if (sysfs__read_build_id("/sys/kernel/notes", host_build_id, 1935 sizeof(host_build_id)) == 0) 1936 is_host = dso__build_id_equal(dso, host_build_id); 1937 1938 /* Try a fast path for /proc/kallsyms if possible */ 1939 if (is_host) { 1940 /* 1941 * Do not check the build-id cache, unless we know we cannot use 1942 * /proc/kcore or module maps don't match to /proc/kallsyms. 1943 * To check readability of /proc/kcore, do not use access(R_OK) 1944 * since /proc/kcore requires CAP_SYS_RAWIO to read and access 1945 * can't check it. 1946 */ 1947 if (filename__readable("/proc/kcore") && 1948 !validate_kcore_addresses("/proc/kallsyms", map)) 1949 goto proc_kallsyms; 1950 } 1951 1952 build_id__sprintf(dso->build_id, sizeof(dso->build_id), sbuild_id); 1953 1954 /* Find kallsyms in build-id cache with kcore */ 1955 scnprintf(path, sizeof(path), "%s/%s/%s", 1956 buildid_dir, DSO__NAME_KCORE, sbuild_id); 1957 1958 if (!find_matching_kcore(map, path, sizeof(path))) 1959 return strdup(path); 1960 1961 /* Use current /proc/kallsyms if possible */ 1962 if (is_host) { 1963 proc_kallsyms: 1964 return strdup("/proc/kallsyms"); 1965 } 1966 1967 /* Finally, find a cache of kallsyms */ 1968 if (!build_id_cache__kallsyms_path(sbuild_id, path, sizeof(path))) { 1969 pr_err("No kallsyms or vmlinux with build-id %s was found\n", 1970 sbuild_id); 1971 return NULL; 1972 } 1973 1974 return strdup(path); 1975 } 1976 1977 static int dso__load_kernel_sym(struct dso *dso, struct map *map) 1978 { 1979 int err; 1980 const char *kallsyms_filename = NULL; 1981 char *kallsyms_allocated_filename = NULL; 1982 /* 1983 * Step 1: if the user specified a kallsyms or vmlinux filename, use 1984 * it and only it, reporting errors to the user if it cannot be used. 1985 * 1986 * For instance, try to analyse an ARM perf.data file _without_ a 1987 * build-id, or if the user specifies the wrong path to the right 1988 * vmlinux file, obviously we can't fallback to another vmlinux (a 1989 * x86_86 one, on the machine where analysis is being performed, say), 1990 * or worse, /proc/kallsyms. 1991 * 1992 * If the specified file _has_ a build-id and there is a build-id 1993 * section in the perf.data file, we will still do the expected 1994 * validation in dso__load_vmlinux and will bail out if they don't 1995 * match. 1996 */ 1997 if (symbol_conf.kallsyms_name != NULL) { 1998 kallsyms_filename = symbol_conf.kallsyms_name; 1999 goto do_kallsyms; 2000 } 2001 2002 if (!symbol_conf.ignore_vmlinux && symbol_conf.vmlinux_name != NULL) { 2003 return dso__load_vmlinux(dso, map, symbol_conf.vmlinux_name, false); 2004 } 2005 2006 if (!symbol_conf.ignore_vmlinux && vmlinux_path != NULL) { 2007 err = dso__load_vmlinux_path(dso, map); 2008 if (err > 0) 2009 return err; 2010 } 2011 2012 /* do not try local files if a symfs was given */ 2013 if (symbol_conf.symfs[0] != 0) 2014 return -1; 2015 2016 kallsyms_allocated_filename = dso__find_kallsyms(dso, map); 2017 if (!kallsyms_allocated_filename) 2018 return -1; 2019 2020 kallsyms_filename = kallsyms_allocated_filename; 2021 2022 do_kallsyms: 2023 err = dso__load_kallsyms(dso, kallsyms_filename, map); 2024 if (err > 0) 2025 pr_debug("Using %s for symbols\n", kallsyms_filename); 2026 free(kallsyms_allocated_filename); 2027 2028 if (err > 0 && !dso__is_kcore(dso)) { 2029 dso->binary_type = DSO_BINARY_TYPE__KALLSYMS; 2030 dso__set_long_name(dso, DSO__NAME_KALLSYMS, false); 2031 map__fixup_start(map); 2032 map__fixup_end(map); 2033 } 2034 2035 return err; 2036 } 2037 2038 static int dso__load_guest_kernel_sym(struct dso *dso, struct map *map) 2039 { 2040 int err; 2041 const char *kallsyms_filename = NULL; 2042 struct machine *machine; 2043 char path[PATH_MAX]; 2044 2045 if (!map->groups) { 2046 pr_debug("Guest kernel map hasn't the point to groups\n"); 2047 return -1; 2048 } 2049 machine = map->groups->machine; 2050 2051 if (machine__is_default_guest(machine)) { 2052 /* 2053 * if the user specified a vmlinux filename, use it and only 2054 * it, reporting errors to the user if it cannot be used. 2055 * Or use file guest_kallsyms inputted by user on commandline 2056 */ 2057 if (symbol_conf.default_guest_vmlinux_name != NULL) { 2058 err = dso__load_vmlinux(dso, map, 2059 symbol_conf.default_guest_vmlinux_name, 2060 false); 2061 return err; 2062 } 2063 2064 kallsyms_filename = symbol_conf.default_guest_kallsyms; 2065 if (!kallsyms_filename) 2066 return -1; 2067 } else { 2068 sprintf(path, "%s/proc/kallsyms", machine->root_dir); 2069 kallsyms_filename = path; 2070 } 2071 2072 err = dso__load_kallsyms(dso, kallsyms_filename, map); 2073 if (err > 0) 2074 pr_debug("Using %s for symbols\n", kallsyms_filename); 2075 if (err > 0 && !dso__is_kcore(dso)) { 2076 dso->binary_type = DSO_BINARY_TYPE__GUEST_KALLSYMS; 2077 dso__set_long_name(dso, machine->mmap_name, false); 2078 map__fixup_start(map); 2079 map__fixup_end(map); 2080 } 2081 2082 return err; 2083 } 2084 2085 static void vmlinux_path__exit(void) 2086 { 2087 while (--vmlinux_path__nr_entries >= 0) 2088 zfree(&vmlinux_path[vmlinux_path__nr_entries]); 2089 vmlinux_path__nr_entries = 0; 2090 2091 zfree(&vmlinux_path); 2092 } 2093 2094 static const char * const vmlinux_paths[] = { 2095 "vmlinux", 2096 "/boot/vmlinux" 2097 }; 2098 2099 static const char * const vmlinux_paths_upd[] = { 2100 "/boot/vmlinux-%s", 2101 "/usr/lib/debug/boot/vmlinux-%s", 2102 "/lib/modules/%s/build/vmlinux", 2103 "/usr/lib/debug/lib/modules/%s/vmlinux", 2104 "/usr/lib/debug/boot/vmlinux-%s.debug" 2105 }; 2106 2107 static int vmlinux_path__add(const char *new_entry) 2108 { 2109 vmlinux_path[vmlinux_path__nr_entries] = strdup(new_entry); 2110 if (vmlinux_path[vmlinux_path__nr_entries] == NULL) 2111 return -1; 2112 ++vmlinux_path__nr_entries; 2113 2114 return 0; 2115 } 2116 2117 static int vmlinux_path__init(struct perf_env *env) 2118 { 2119 struct utsname uts; 2120 char bf[PATH_MAX]; 2121 char *kernel_version; 2122 unsigned int i; 2123 2124 vmlinux_path = malloc(sizeof(char *) * (ARRAY_SIZE(vmlinux_paths) + 2125 ARRAY_SIZE(vmlinux_paths_upd))); 2126 if (vmlinux_path == NULL) 2127 return -1; 2128 2129 for (i = 0; i < ARRAY_SIZE(vmlinux_paths); i++) 2130 if (vmlinux_path__add(vmlinux_paths[i]) < 0) 2131 goto out_fail; 2132 2133 /* only try kernel version if no symfs was given */ 2134 if (symbol_conf.symfs[0] != 0) 2135 return 0; 2136 2137 if (env) { 2138 kernel_version = env->os_release; 2139 } else { 2140 if (uname(&uts) < 0) 2141 goto out_fail; 2142 2143 kernel_version = uts.release; 2144 } 2145 2146 for (i = 0; i < ARRAY_SIZE(vmlinux_paths_upd); i++) { 2147 snprintf(bf, sizeof(bf), vmlinux_paths_upd[i], kernel_version); 2148 if (vmlinux_path__add(bf) < 0) 2149 goto out_fail; 2150 } 2151 2152 return 0; 2153 2154 out_fail: 2155 vmlinux_path__exit(); 2156 return -1; 2157 } 2158 2159 int setup_list(struct strlist **list, const char *list_str, 2160 const char *list_name) 2161 { 2162 if (list_str == NULL) 2163 return 0; 2164 2165 *list = strlist__new(list_str, NULL); 2166 if (!*list) { 2167 pr_err("problems parsing %s list\n", list_name); 2168 return -1; 2169 } 2170 2171 symbol_conf.has_filter = true; 2172 return 0; 2173 } 2174 2175 int setup_intlist(struct intlist **list, const char *list_str, 2176 const char *list_name) 2177 { 2178 if (list_str == NULL) 2179 return 0; 2180 2181 *list = intlist__new(list_str); 2182 if (!*list) { 2183 pr_err("problems parsing %s list\n", list_name); 2184 return -1; 2185 } 2186 return 0; 2187 } 2188 2189 static bool symbol__read_kptr_restrict(void) 2190 { 2191 bool value = false; 2192 FILE *fp = fopen("/proc/sys/kernel/kptr_restrict", "r"); 2193 2194 if (fp != NULL) { 2195 char line[8]; 2196 2197 if (fgets(line, sizeof(line), fp) != NULL) 2198 value = ((geteuid() != 0) || (getuid() != 0)) ? 2199 (atoi(line) != 0) : 2200 (atoi(line) == 2); 2201 2202 fclose(fp); 2203 } 2204 2205 return value; 2206 } 2207 2208 int symbol__annotation_init(void) 2209 { 2210 if (symbol_conf.init_annotation) 2211 return 0; 2212 2213 if (symbol_conf.initialized) { 2214 pr_err("Annotation needs to be init before symbol__init()\n"); 2215 return -1; 2216 } 2217 2218 symbol_conf.priv_size += sizeof(struct annotation); 2219 symbol_conf.init_annotation = true; 2220 return 0; 2221 } 2222 2223 int symbol__init(struct perf_env *env) 2224 { 2225 const char *symfs; 2226 2227 if (symbol_conf.initialized) 2228 return 0; 2229 2230 symbol_conf.priv_size = PERF_ALIGN(symbol_conf.priv_size, sizeof(u64)); 2231 2232 symbol__elf_init(); 2233 2234 if (symbol_conf.sort_by_name) 2235 symbol_conf.priv_size += (sizeof(struct symbol_name_rb_node) - 2236 sizeof(struct symbol)); 2237 2238 if (symbol_conf.try_vmlinux_path && vmlinux_path__init(env) < 0) 2239 return -1; 2240 2241 if (symbol_conf.field_sep && *symbol_conf.field_sep == '.') { 2242 pr_err("'.' is the only non valid --field-separator argument\n"); 2243 return -1; 2244 } 2245 2246 if (setup_list(&symbol_conf.dso_list, 2247 symbol_conf.dso_list_str, "dso") < 0) 2248 return -1; 2249 2250 if (setup_list(&symbol_conf.comm_list, 2251 symbol_conf.comm_list_str, "comm") < 0) 2252 goto out_free_dso_list; 2253 2254 if (setup_intlist(&symbol_conf.pid_list, 2255 symbol_conf.pid_list_str, "pid") < 0) 2256 goto out_free_comm_list; 2257 2258 if (setup_intlist(&symbol_conf.tid_list, 2259 symbol_conf.tid_list_str, "tid") < 0) 2260 goto out_free_pid_list; 2261 2262 if (setup_list(&symbol_conf.sym_list, 2263 symbol_conf.sym_list_str, "symbol") < 0) 2264 goto out_free_tid_list; 2265 2266 if (setup_list(&symbol_conf.bt_stop_list, 2267 symbol_conf.bt_stop_list_str, "symbol") < 0) 2268 goto out_free_sym_list; 2269 2270 /* 2271 * A path to symbols of "/" is identical to "" 2272 * reset here for simplicity. 2273 */ 2274 symfs = realpath(symbol_conf.symfs, NULL); 2275 if (symfs == NULL) 2276 symfs = symbol_conf.symfs; 2277 if (strcmp(symfs, "/") == 0) 2278 symbol_conf.symfs = ""; 2279 if (symfs != symbol_conf.symfs) 2280 free((void *)symfs); 2281 2282 symbol_conf.kptr_restrict = symbol__read_kptr_restrict(); 2283 2284 symbol_conf.initialized = true; 2285 return 0; 2286 2287 out_free_sym_list: 2288 strlist__delete(symbol_conf.sym_list); 2289 out_free_tid_list: 2290 intlist__delete(symbol_conf.tid_list); 2291 out_free_pid_list: 2292 intlist__delete(symbol_conf.pid_list); 2293 out_free_comm_list: 2294 strlist__delete(symbol_conf.comm_list); 2295 out_free_dso_list: 2296 strlist__delete(symbol_conf.dso_list); 2297 return -1; 2298 } 2299 2300 void symbol__exit(void) 2301 { 2302 if (!symbol_conf.initialized) 2303 return; 2304 strlist__delete(symbol_conf.bt_stop_list); 2305 strlist__delete(symbol_conf.sym_list); 2306 strlist__delete(symbol_conf.dso_list); 2307 strlist__delete(symbol_conf.comm_list); 2308 intlist__delete(symbol_conf.tid_list); 2309 intlist__delete(symbol_conf.pid_list); 2310 vmlinux_path__exit(); 2311 symbol_conf.sym_list = symbol_conf.dso_list = symbol_conf.comm_list = NULL; 2312 symbol_conf.bt_stop_list = NULL; 2313 symbol_conf.initialized = false; 2314 } 2315 2316 int symbol__config_symfs(const struct option *opt __maybe_unused, 2317 const char *dir, int unset __maybe_unused) 2318 { 2319 char *bf = NULL; 2320 int ret; 2321 2322 symbol_conf.symfs = strdup(dir); 2323 if (symbol_conf.symfs == NULL) 2324 return -ENOMEM; 2325 2326 /* skip the locally configured cache if a symfs is given, and 2327 * config buildid dir to symfs/.debug 2328 */ 2329 ret = asprintf(&bf, "%s/%s", dir, ".debug"); 2330 if (ret < 0) 2331 return -ENOMEM; 2332 2333 set_buildid_dir(bf); 2334 2335 free(bf); 2336 return 0; 2337 } 2338 2339 struct mem_info *mem_info__get(struct mem_info *mi) 2340 { 2341 if (mi) 2342 refcount_inc(&mi->refcnt); 2343 return mi; 2344 } 2345 2346 void mem_info__put(struct mem_info *mi) 2347 { 2348 if (mi && refcount_dec_and_test(&mi->refcnt)) 2349 free(mi); 2350 } 2351 2352 struct mem_info *mem_info__new(void) 2353 { 2354 struct mem_info *mi = zalloc(sizeof(*mi)); 2355 2356 if (mi) 2357 refcount_set(&mi->refcnt, 1); 2358 return mi; 2359 } 2360 2361 struct block_info *block_info__get(struct block_info *bi) 2362 { 2363 if (bi) 2364 refcount_inc(&bi->refcnt); 2365 return bi; 2366 } 2367 2368 void block_info__put(struct block_info *bi) 2369 { 2370 if (bi && refcount_dec_and_test(&bi->refcnt)) 2371 free(bi); 2372 } 2373 2374 struct block_info *block_info__new(void) 2375 { 2376 struct block_info *bi = zalloc(sizeof(*bi)); 2377 2378 if (bi) 2379 refcount_set(&bi->refcnt, 1); 2380 return bi; 2381 } 2382