1 // SPDX-License-Identifier: GPL-2.0 2 #include <dirent.h> 3 #include <errno.h> 4 #include <stdlib.h> 5 #include <stdio.h> 6 #include <string.h> 7 #include <linux/capability.h> 8 #include <linux/kernel.h> 9 #include <linux/mman.h> 10 #include <linux/string.h> 11 #include <linux/time64.h> 12 #include <sys/types.h> 13 #include <sys/stat.h> 14 #include <sys/param.h> 15 #include <fcntl.h> 16 #include <unistd.h> 17 #include <inttypes.h> 18 #include "annotate.h" 19 #include "build-id.h" 20 #include "cap.h" 21 #include "dso.h" 22 #include "util.h" // lsdir() 23 #include "debug.h" 24 #include "event.h" 25 #include "machine.h" 26 #include "map.h" 27 #include "symbol.h" 28 #include "map_symbol.h" 29 #include "mem-events.h" 30 #include "symsrc.h" 31 #include "strlist.h" 32 #include "intlist.h" 33 #include "namespaces.h" 34 #include "header.h" 35 #include "path.h" 36 #include <linux/ctype.h> 37 #include <linux/zalloc.h> 38 39 #include <elf.h> 40 #include <limits.h> 41 #include <symbol/kallsyms.h> 42 #include <sys/utsname.h> 43 44 static int dso__load_kernel_sym(struct dso *dso, struct map *map); 45 static int dso__load_guest_kernel_sym(struct dso *dso, struct map *map); 46 static bool symbol__is_idle(const char *name); 47 48 int vmlinux_path__nr_entries; 49 char **vmlinux_path; 50 51 struct symbol_conf symbol_conf = { 52 .nanosecs = false, 53 .use_modules = true, 54 .try_vmlinux_path = true, 55 .demangle = true, 56 .demangle_kernel = false, 57 .cumulate_callchain = true, 58 .time_quantum = 100 * NSEC_PER_MSEC, /* 100ms */ 59 .show_hist_headers = true, 60 .symfs = "", 61 .event_group = true, 62 .inline_name = true, 63 .res_sample = 0, 64 }; 65 66 static enum dso_binary_type binary_type_symtab[] = { 67 DSO_BINARY_TYPE__KALLSYMS, 68 DSO_BINARY_TYPE__GUEST_KALLSYMS, 69 DSO_BINARY_TYPE__JAVA_JIT, 70 DSO_BINARY_TYPE__DEBUGLINK, 71 DSO_BINARY_TYPE__BUILD_ID_CACHE, 72 DSO_BINARY_TYPE__BUILD_ID_CACHE_DEBUGINFO, 73 DSO_BINARY_TYPE__FEDORA_DEBUGINFO, 74 DSO_BINARY_TYPE__UBUNTU_DEBUGINFO, 75 DSO_BINARY_TYPE__BUILDID_DEBUGINFO, 76 DSO_BINARY_TYPE__SYSTEM_PATH_DSO, 77 DSO_BINARY_TYPE__GUEST_KMODULE, 78 DSO_BINARY_TYPE__GUEST_KMODULE_COMP, 79 DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE, 80 DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE_COMP, 81 DSO_BINARY_TYPE__OPENEMBEDDED_DEBUGINFO, 82 DSO_BINARY_TYPE__NOT_FOUND, 83 }; 84 85 #define DSO_BINARY_TYPE__SYMTAB_CNT ARRAY_SIZE(binary_type_symtab) 86 87 static bool symbol_type__filter(char symbol_type) 88 { 89 symbol_type = toupper(symbol_type); 90 return symbol_type == 'T' || symbol_type == 'W' || symbol_type == 'D' || symbol_type == 'B'; 91 } 92 93 static int prefix_underscores_count(const char *str) 94 { 95 const char *tail = str; 96 97 while (*tail == '_') 98 tail++; 99 100 return tail - str; 101 } 102 103 void __weak arch__symbols__fixup_end(struct symbol *p, struct symbol *c) 104 { 105 p->end = c->start; 106 } 107 108 const char * __weak arch__normalize_symbol_name(const char *name) 109 { 110 return name; 111 } 112 113 int __weak arch__compare_symbol_names(const char *namea, const char *nameb) 114 { 115 return strcmp(namea, nameb); 116 } 117 118 int __weak arch__compare_symbol_names_n(const char *namea, const char *nameb, 119 unsigned int n) 120 { 121 return strncmp(namea, nameb, n); 122 } 123 124 int __weak arch__choose_best_symbol(struct symbol *syma, 125 struct symbol *symb __maybe_unused) 126 { 127 /* Avoid "SyS" kernel syscall aliases */ 128 if (strlen(syma->name) >= 3 && !strncmp(syma->name, "SyS", 3)) 129 return SYMBOL_B; 130 if (strlen(syma->name) >= 10 && !strncmp(syma->name, "compat_SyS", 10)) 131 return SYMBOL_B; 132 133 return SYMBOL_A; 134 } 135 136 static int choose_best_symbol(struct symbol *syma, struct symbol *symb) 137 { 138 s64 a; 139 s64 b; 140 size_t na, nb; 141 142 /* Prefer a symbol with non zero length */ 143 a = syma->end - syma->start; 144 b = symb->end - symb->start; 145 if ((b == 0) && (a > 0)) 146 return SYMBOL_A; 147 else if ((a == 0) && (b > 0)) 148 return SYMBOL_B; 149 150 /* Prefer a non weak symbol over a weak one */ 151 a = syma->binding == STB_WEAK; 152 b = symb->binding == STB_WEAK; 153 if (b && !a) 154 return SYMBOL_A; 155 if (a && !b) 156 return SYMBOL_B; 157 158 /* Prefer a global symbol over a non global one */ 159 a = syma->binding == STB_GLOBAL; 160 b = symb->binding == STB_GLOBAL; 161 if (a && !b) 162 return SYMBOL_A; 163 if (b && !a) 164 return SYMBOL_B; 165 166 /* Prefer a symbol with less underscores */ 167 a = prefix_underscores_count(syma->name); 168 b = prefix_underscores_count(symb->name); 169 if (b > a) 170 return SYMBOL_A; 171 else if (a > b) 172 return SYMBOL_B; 173 174 /* Choose the symbol with the longest name */ 175 na = strlen(syma->name); 176 nb = strlen(symb->name); 177 if (na > nb) 178 return SYMBOL_A; 179 else if (na < nb) 180 return SYMBOL_B; 181 182 return arch__choose_best_symbol(syma, symb); 183 } 184 185 void symbols__fixup_duplicate(struct rb_root_cached *symbols) 186 { 187 struct rb_node *nd; 188 struct symbol *curr, *next; 189 190 if (symbol_conf.allow_aliases) 191 return; 192 193 nd = rb_first_cached(symbols); 194 195 while (nd) { 196 curr = rb_entry(nd, struct symbol, rb_node); 197 again: 198 nd = rb_next(&curr->rb_node); 199 next = rb_entry(nd, struct symbol, rb_node); 200 201 if (!nd) 202 break; 203 204 if (curr->start != next->start) 205 continue; 206 207 if (choose_best_symbol(curr, next) == SYMBOL_A) { 208 rb_erase_cached(&next->rb_node, symbols); 209 symbol__delete(next); 210 goto again; 211 } else { 212 nd = rb_next(&curr->rb_node); 213 rb_erase_cached(&curr->rb_node, symbols); 214 symbol__delete(curr); 215 } 216 } 217 } 218 219 void symbols__fixup_end(struct rb_root_cached *symbols) 220 { 221 struct rb_node *nd, *prevnd = rb_first_cached(symbols); 222 struct symbol *curr, *prev; 223 224 if (prevnd == NULL) 225 return; 226 227 curr = rb_entry(prevnd, struct symbol, rb_node); 228 229 for (nd = rb_next(prevnd); nd; nd = rb_next(nd)) { 230 prev = curr; 231 curr = rb_entry(nd, struct symbol, rb_node); 232 233 if (prev->end == prev->start && prev->end != curr->start) 234 arch__symbols__fixup_end(prev, curr); 235 } 236 237 /* Last entry */ 238 if (curr->end == curr->start) 239 curr->end = roundup(curr->start, 4096) + 4096; 240 } 241 242 void map_groups__fixup_end(struct map_groups *mg) 243 { 244 struct maps *maps = &mg->maps; 245 struct map *next, *curr; 246 247 down_write(&maps->lock); 248 249 curr = maps__first(maps); 250 if (curr == NULL) 251 goto out_unlock; 252 253 for (next = map__next(curr); next; next = map__next(curr)) { 254 if (!curr->end) 255 curr->end = next->start; 256 curr = next; 257 } 258 259 /* 260 * We still haven't the actual symbols, so guess the 261 * last map final address. 262 */ 263 if (!curr->end) 264 curr->end = ~0ULL; 265 266 out_unlock: 267 up_write(&maps->lock); 268 } 269 270 struct symbol *symbol__new(u64 start, u64 len, u8 binding, u8 type, const char *name) 271 { 272 size_t namelen = strlen(name) + 1; 273 struct symbol *sym = calloc(1, (symbol_conf.priv_size + 274 sizeof(*sym) + namelen)); 275 if (sym == NULL) 276 return NULL; 277 278 if (symbol_conf.priv_size) { 279 if (symbol_conf.init_annotation) { 280 struct annotation *notes = (void *)sym; 281 pthread_mutex_init(¬es->lock, NULL); 282 } 283 sym = ((void *)sym) + symbol_conf.priv_size; 284 } 285 286 sym->start = start; 287 sym->end = len ? start + len : start; 288 sym->type = type; 289 sym->binding = binding; 290 sym->namelen = namelen - 1; 291 292 pr_debug4("%s: %s %#" PRIx64 "-%#" PRIx64 "\n", 293 __func__, name, start, sym->end); 294 memcpy(sym->name, name, namelen); 295 296 return sym; 297 } 298 299 void symbol__delete(struct symbol *sym) 300 { 301 free(((void *)sym) - symbol_conf.priv_size); 302 } 303 304 void symbols__delete(struct rb_root_cached *symbols) 305 { 306 struct symbol *pos; 307 struct rb_node *next = rb_first_cached(symbols); 308 309 while (next) { 310 pos = rb_entry(next, struct symbol, rb_node); 311 next = rb_next(&pos->rb_node); 312 rb_erase_cached(&pos->rb_node, symbols); 313 symbol__delete(pos); 314 } 315 } 316 317 void __symbols__insert(struct rb_root_cached *symbols, 318 struct symbol *sym, bool kernel) 319 { 320 struct rb_node **p = &symbols->rb_root.rb_node; 321 struct rb_node *parent = NULL; 322 const u64 ip = sym->start; 323 struct symbol *s; 324 bool leftmost = true; 325 326 if (kernel) { 327 const char *name = sym->name; 328 /* 329 * ppc64 uses function descriptors and appends a '.' to the 330 * start of every instruction address. Remove it. 331 */ 332 if (name[0] == '.') 333 name++; 334 sym->idle = symbol__is_idle(name); 335 } 336 337 while (*p != NULL) { 338 parent = *p; 339 s = rb_entry(parent, struct symbol, rb_node); 340 if (ip < s->start) 341 p = &(*p)->rb_left; 342 else { 343 p = &(*p)->rb_right; 344 leftmost = false; 345 } 346 } 347 rb_link_node(&sym->rb_node, parent, p); 348 rb_insert_color_cached(&sym->rb_node, symbols, leftmost); 349 } 350 351 void symbols__insert(struct rb_root_cached *symbols, struct symbol *sym) 352 { 353 __symbols__insert(symbols, sym, false); 354 } 355 356 static struct symbol *symbols__find(struct rb_root_cached *symbols, u64 ip) 357 { 358 struct rb_node *n; 359 360 if (symbols == NULL) 361 return NULL; 362 363 n = symbols->rb_root.rb_node; 364 365 while (n) { 366 struct symbol *s = rb_entry(n, struct symbol, rb_node); 367 368 if (ip < s->start) 369 n = n->rb_left; 370 else if (ip > s->end || (ip == s->end && ip != s->start)) 371 n = n->rb_right; 372 else 373 return s; 374 } 375 376 return NULL; 377 } 378 379 static struct symbol *symbols__first(struct rb_root_cached *symbols) 380 { 381 struct rb_node *n = rb_first_cached(symbols); 382 383 if (n) 384 return rb_entry(n, struct symbol, rb_node); 385 386 return NULL; 387 } 388 389 static struct symbol *symbols__last(struct rb_root_cached *symbols) 390 { 391 struct rb_node *n = rb_last(&symbols->rb_root); 392 393 if (n) 394 return rb_entry(n, struct symbol, rb_node); 395 396 return NULL; 397 } 398 399 static struct symbol *symbols__next(struct symbol *sym) 400 { 401 struct rb_node *n = rb_next(&sym->rb_node); 402 403 if (n) 404 return rb_entry(n, struct symbol, rb_node); 405 406 return NULL; 407 } 408 409 static void symbols__insert_by_name(struct rb_root_cached *symbols, struct symbol *sym) 410 { 411 struct rb_node **p = &symbols->rb_root.rb_node; 412 struct rb_node *parent = NULL; 413 struct symbol_name_rb_node *symn, *s; 414 bool leftmost = true; 415 416 symn = container_of(sym, struct symbol_name_rb_node, sym); 417 418 while (*p != NULL) { 419 parent = *p; 420 s = rb_entry(parent, struct symbol_name_rb_node, rb_node); 421 if (strcmp(sym->name, s->sym.name) < 0) 422 p = &(*p)->rb_left; 423 else { 424 p = &(*p)->rb_right; 425 leftmost = false; 426 } 427 } 428 rb_link_node(&symn->rb_node, parent, p); 429 rb_insert_color_cached(&symn->rb_node, symbols, leftmost); 430 } 431 432 static void symbols__sort_by_name(struct rb_root_cached *symbols, 433 struct rb_root_cached *source) 434 { 435 struct rb_node *nd; 436 437 for (nd = rb_first_cached(source); nd; nd = rb_next(nd)) { 438 struct symbol *pos = rb_entry(nd, struct symbol, rb_node); 439 symbols__insert_by_name(symbols, pos); 440 } 441 } 442 443 int symbol__match_symbol_name(const char *name, const char *str, 444 enum symbol_tag_include includes) 445 { 446 const char *versioning; 447 448 if (includes == SYMBOL_TAG_INCLUDE__DEFAULT_ONLY && 449 (versioning = strstr(name, "@@"))) { 450 int len = strlen(str); 451 452 if (len < versioning - name) 453 len = versioning - name; 454 455 return arch__compare_symbol_names_n(name, str, len); 456 } else 457 return arch__compare_symbol_names(name, str); 458 } 459 460 static struct symbol *symbols__find_by_name(struct rb_root_cached *symbols, 461 const char *name, 462 enum symbol_tag_include includes) 463 { 464 struct rb_node *n; 465 struct symbol_name_rb_node *s = NULL; 466 467 if (symbols == NULL) 468 return NULL; 469 470 n = symbols->rb_root.rb_node; 471 472 while (n) { 473 int cmp; 474 475 s = rb_entry(n, struct symbol_name_rb_node, rb_node); 476 cmp = symbol__match_symbol_name(s->sym.name, name, includes); 477 478 if (cmp > 0) 479 n = n->rb_left; 480 else if (cmp < 0) 481 n = n->rb_right; 482 else 483 break; 484 } 485 486 if (n == NULL) 487 return NULL; 488 489 if (includes != SYMBOL_TAG_INCLUDE__DEFAULT_ONLY) 490 /* return first symbol that has same name (if any) */ 491 for (n = rb_prev(n); n; n = rb_prev(n)) { 492 struct symbol_name_rb_node *tmp; 493 494 tmp = rb_entry(n, struct symbol_name_rb_node, rb_node); 495 if (arch__compare_symbol_names(tmp->sym.name, s->sym.name)) 496 break; 497 498 s = tmp; 499 } 500 501 return &s->sym; 502 } 503 504 void dso__reset_find_symbol_cache(struct dso *dso) 505 { 506 dso->last_find_result.addr = 0; 507 dso->last_find_result.symbol = NULL; 508 } 509 510 void dso__insert_symbol(struct dso *dso, struct symbol *sym) 511 { 512 __symbols__insert(&dso->symbols, sym, dso->kernel); 513 514 /* update the symbol cache if necessary */ 515 if (dso->last_find_result.addr >= sym->start && 516 (dso->last_find_result.addr < sym->end || 517 sym->start == sym->end)) { 518 dso->last_find_result.symbol = sym; 519 } 520 } 521 522 struct symbol *dso__find_symbol(struct dso *dso, u64 addr) 523 { 524 if (dso->last_find_result.addr != addr || dso->last_find_result.symbol == NULL) { 525 dso->last_find_result.addr = addr; 526 dso->last_find_result.symbol = symbols__find(&dso->symbols, addr); 527 } 528 529 return dso->last_find_result.symbol; 530 } 531 532 struct symbol *dso__first_symbol(struct dso *dso) 533 { 534 return symbols__first(&dso->symbols); 535 } 536 537 struct symbol *dso__last_symbol(struct dso *dso) 538 { 539 return symbols__last(&dso->symbols); 540 } 541 542 struct symbol *dso__next_symbol(struct symbol *sym) 543 { 544 return symbols__next(sym); 545 } 546 547 struct symbol *symbol__next_by_name(struct symbol *sym) 548 { 549 struct symbol_name_rb_node *s = container_of(sym, struct symbol_name_rb_node, sym); 550 struct rb_node *n = rb_next(&s->rb_node); 551 552 return n ? &rb_entry(n, struct symbol_name_rb_node, rb_node)->sym : NULL; 553 } 554 555 /* 556 * Returns first symbol that matched with @name. 557 */ 558 struct symbol *dso__find_symbol_by_name(struct dso *dso, const char *name) 559 { 560 struct symbol *s = symbols__find_by_name(&dso->symbol_names, name, 561 SYMBOL_TAG_INCLUDE__NONE); 562 if (!s) 563 s = symbols__find_by_name(&dso->symbol_names, name, 564 SYMBOL_TAG_INCLUDE__DEFAULT_ONLY); 565 return s; 566 } 567 568 void dso__sort_by_name(struct dso *dso) 569 { 570 dso__set_sorted_by_name(dso); 571 return symbols__sort_by_name(&dso->symbol_names, &dso->symbols); 572 } 573 574 int modules__parse(const char *filename, void *arg, 575 int (*process_module)(void *arg, const char *name, 576 u64 start, u64 size)) 577 { 578 char *line = NULL; 579 size_t n; 580 FILE *file; 581 int err = 0; 582 583 file = fopen(filename, "r"); 584 if (file == NULL) 585 return -1; 586 587 while (1) { 588 char name[PATH_MAX]; 589 u64 start, size; 590 char *sep, *endptr; 591 ssize_t line_len; 592 593 line_len = getline(&line, &n, file); 594 if (line_len < 0) { 595 if (feof(file)) 596 break; 597 err = -1; 598 goto out; 599 } 600 601 if (!line) { 602 err = -1; 603 goto out; 604 } 605 606 line[--line_len] = '\0'; /* \n */ 607 608 sep = strrchr(line, 'x'); 609 if (sep == NULL) 610 continue; 611 612 hex2u64(sep + 1, &start); 613 614 sep = strchr(line, ' '); 615 if (sep == NULL) 616 continue; 617 618 *sep = '\0'; 619 620 scnprintf(name, sizeof(name), "[%s]", line); 621 622 size = strtoul(sep + 1, &endptr, 0); 623 if (*endptr != ' ' && *endptr != '\t') 624 continue; 625 626 err = process_module(arg, name, start, size); 627 if (err) 628 break; 629 } 630 out: 631 free(line); 632 fclose(file); 633 return err; 634 } 635 636 /* 637 * These are symbols in the kernel image, so make sure that 638 * sym is from a kernel DSO. 639 */ 640 static bool symbol__is_idle(const char *name) 641 { 642 const char * const idle_symbols[] = { 643 "arch_cpu_idle", 644 "cpu_idle", 645 "cpu_startup_entry", 646 "intel_idle", 647 "default_idle", 648 "native_safe_halt", 649 "enter_idle", 650 "exit_idle", 651 "mwait_idle", 652 "mwait_idle_with_hints", 653 "poll_idle", 654 "ppc64_runlatch_off", 655 "pseries_dedicated_idle_sleep", 656 NULL 657 }; 658 int i; 659 660 for (i = 0; idle_symbols[i]; i++) { 661 if (!strcmp(idle_symbols[i], name)) 662 return true; 663 } 664 665 return false; 666 } 667 668 static int map__process_kallsym_symbol(void *arg, const char *name, 669 char type, u64 start) 670 { 671 struct symbol *sym; 672 struct dso *dso = arg; 673 struct rb_root_cached *root = &dso->symbols; 674 675 if (!symbol_type__filter(type)) 676 return 0; 677 678 /* 679 * module symbols are not sorted so we add all 680 * symbols, setting length to 0, and rely on 681 * symbols__fixup_end() to fix it up. 682 */ 683 sym = symbol__new(start, 0, kallsyms2elf_binding(type), kallsyms2elf_type(type), name); 684 if (sym == NULL) 685 return -ENOMEM; 686 /* 687 * We will pass the symbols to the filter later, in 688 * map__split_kallsyms, when we have split the maps per module 689 */ 690 __symbols__insert(root, sym, !strchr(name, '[')); 691 692 return 0; 693 } 694 695 /* 696 * Loads the function entries in /proc/kallsyms into kernel_map->dso, 697 * so that we can in the next step set the symbol ->end address and then 698 * call kernel_maps__split_kallsyms. 699 */ 700 static int dso__load_all_kallsyms(struct dso *dso, const char *filename) 701 { 702 return kallsyms__parse(filename, dso, map__process_kallsym_symbol); 703 } 704 705 static int map_groups__split_kallsyms_for_kcore(struct map_groups *kmaps, struct dso *dso) 706 { 707 struct map *curr_map; 708 struct symbol *pos; 709 int count = 0; 710 struct rb_root_cached old_root = dso->symbols; 711 struct rb_root_cached *root = &dso->symbols; 712 struct rb_node *next = rb_first_cached(root); 713 714 if (!kmaps) 715 return -1; 716 717 *root = RB_ROOT_CACHED; 718 719 while (next) { 720 char *module; 721 722 pos = rb_entry(next, struct symbol, rb_node); 723 next = rb_next(&pos->rb_node); 724 725 rb_erase_cached(&pos->rb_node, &old_root); 726 RB_CLEAR_NODE(&pos->rb_node); 727 module = strchr(pos->name, '\t'); 728 if (module) 729 *module = '\0'; 730 731 curr_map = map_groups__find(kmaps, pos->start); 732 733 if (!curr_map) { 734 symbol__delete(pos); 735 continue; 736 } 737 738 pos->start -= curr_map->start - curr_map->pgoff; 739 if (pos->end > curr_map->end) 740 pos->end = curr_map->end; 741 if (pos->end) 742 pos->end -= curr_map->start - curr_map->pgoff; 743 symbols__insert(&curr_map->dso->symbols, pos); 744 ++count; 745 } 746 747 /* Symbols have been adjusted */ 748 dso->adjust_symbols = 1; 749 750 return count; 751 } 752 753 /* 754 * Split the symbols into maps, making sure there are no overlaps, i.e. the 755 * kernel range is broken in several maps, named [kernel].N, as we don't have 756 * the original ELF section names vmlinux have. 757 */ 758 static int map_groups__split_kallsyms(struct map_groups *kmaps, struct dso *dso, u64 delta, 759 struct map *initial_map) 760 { 761 struct machine *machine; 762 struct map *curr_map = initial_map; 763 struct symbol *pos; 764 int count = 0, moved = 0; 765 struct rb_root_cached *root = &dso->symbols; 766 struct rb_node *next = rb_first_cached(root); 767 int kernel_range = 0; 768 bool x86_64; 769 770 if (!kmaps) 771 return -1; 772 773 machine = kmaps->machine; 774 775 x86_64 = machine__is(machine, "x86_64"); 776 777 while (next) { 778 char *module; 779 780 pos = rb_entry(next, struct symbol, rb_node); 781 next = rb_next(&pos->rb_node); 782 783 module = strchr(pos->name, '\t'); 784 if (module) { 785 if (!symbol_conf.use_modules) 786 goto discard_symbol; 787 788 *module++ = '\0'; 789 790 if (strcmp(curr_map->dso->short_name, module)) { 791 if (curr_map != initial_map && 792 dso->kernel == DSO_TYPE_GUEST_KERNEL && 793 machine__is_default_guest(machine)) { 794 /* 795 * We assume all symbols of a module are 796 * continuous in * kallsyms, so curr_map 797 * points to a module and all its 798 * symbols are in its kmap. Mark it as 799 * loaded. 800 */ 801 dso__set_loaded(curr_map->dso); 802 } 803 804 curr_map = map_groups__find_by_name(kmaps, module); 805 if (curr_map == NULL) { 806 pr_debug("%s/proc/{kallsyms,modules} " 807 "inconsistency while looking " 808 "for \"%s\" module!\n", 809 machine->root_dir, module); 810 curr_map = initial_map; 811 goto discard_symbol; 812 } 813 814 if (curr_map->dso->loaded && 815 !machine__is_default_guest(machine)) 816 goto discard_symbol; 817 } 818 /* 819 * So that we look just like we get from .ko files, 820 * i.e. not prelinked, relative to initial_map->start. 821 */ 822 pos->start = curr_map->map_ip(curr_map, pos->start); 823 pos->end = curr_map->map_ip(curr_map, pos->end); 824 } else if (x86_64 && is_entry_trampoline(pos->name)) { 825 /* 826 * These symbols are not needed anymore since the 827 * trampoline maps refer to the text section and it's 828 * symbols instead. Avoid having to deal with 829 * relocations, and the assumption that the first symbol 830 * is the start of kernel text, by simply removing the 831 * symbols at this point. 832 */ 833 goto discard_symbol; 834 } else if (curr_map != initial_map) { 835 char dso_name[PATH_MAX]; 836 struct dso *ndso; 837 838 if (delta) { 839 /* Kernel was relocated at boot time */ 840 pos->start -= delta; 841 pos->end -= delta; 842 } 843 844 if (count == 0) { 845 curr_map = initial_map; 846 goto add_symbol; 847 } 848 849 if (dso->kernel == DSO_TYPE_GUEST_KERNEL) 850 snprintf(dso_name, sizeof(dso_name), 851 "[guest.kernel].%d", 852 kernel_range++); 853 else 854 snprintf(dso_name, sizeof(dso_name), 855 "[kernel].%d", 856 kernel_range++); 857 858 ndso = dso__new(dso_name); 859 if (ndso == NULL) 860 return -1; 861 862 ndso->kernel = dso->kernel; 863 864 curr_map = map__new2(pos->start, ndso); 865 if (curr_map == NULL) { 866 dso__put(ndso); 867 return -1; 868 } 869 870 curr_map->map_ip = curr_map->unmap_ip = identity__map_ip; 871 map_groups__insert(kmaps, curr_map); 872 ++kernel_range; 873 } else if (delta) { 874 /* Kernel was relocated at boot time */ 875 pos->start -= delta; 876 pos->end -= delta; 877 } 878 add_symbol: 879 if (curr_map != initial_map) { 880 rb_erase_cached(&pos->rb_node, root); 881 symbols__insert(&curr_map->dso->symbols, pos); 882 ++moved; 883 } else 884 ++count; 885 886 continue; 887 discard_symbol: 888 rb_erase_cached(&pos->rb_node, root); 889 symbol__delete(pos); 890 } 891 892 if (curr_map != initial_map && 893 dso->kernel == DSO_TYPE_GUEST_KERNEL && 894 machine__is_default_guest(kmaps->machine)) { 895 dso__set_loaded(curr_map->dso); 896 } 897 898 return count + moved; 899 } 900 901 bool symbol__restricted_filename(const char *filename, 902 const char *restricted_filename) 903 { 904 bool restricted = false; 905 906 if (symbol_conf.kptr_restrict) { 907 char *r = realpath(filename, NULL); 908 909 if (r != NULL) { 910 restricted = strcmp(r, restricted_filename) == 0; 911 free(r); 912 return restricted; 913 } 914 } 915 916 return restricted; 917 } 918 919 struct module_info { 920 struct rb_node rb_node; 921 char *name; 922 u64 start; 923 }; 924 925 static void add_module(struct module_info *mi, struct rb_root *modules) 926 { 927 struct rb_node **p = &modules->rb_node; 928 struct rb_node *parent = NULL; 929 struct module_info *m; 930 931 while (*p != NULL) { 932 parent = *p; 933 m = rb_entry(parent, struct module_info, rb_node); 934 if (strcmp(mi->name, m->name) < 0) 935 p = &(*p)->rb_left; 936 else 937 p = &(*p)->rb_right; 938 } 939 rb_link_node(&mi->rb_node, parent, p); 940 rb_insert_color(&mi->rb_node, modules); 941 } 942 943 static void delete_modules(struct rb_root *modules) 944 { 945 struct module_info *mi; 946 struct rb_node *next = rb_first(modules); 947 948 while (next) { 949 mi = rb_entry(next, struct module_info, rb_node); 950 next = rb_next(&mi->rb_node); 951 rb_erase(&mi->rb_node, modules); 952 zfree(&mi->name); 953 free(mi); 954 } 955 } 956 957 static struct module_info *find_module(const char *name, 958 struct rb_root *modules) 959 { 960 struct rb_node *n = modules->rb_node; 961 962 while (n) { 963 struct module_info *m; 964 int cmp; 965 966 m = rb_entry(n, struct module_info, rb_node); 967 cmp = strcmp(name, m->name); 968 if (cmp < 0) 969 n = n->rb_left; 970 else if (cmp > 0) 971 n = n->rb_right; 972 else 973 return m; 974 } 975 976 return NULL; 977 } 978 979 static int __read_proc_modules(void *arg, const char *name, u64 start, 980 u64 size __maybe_unused) 981 { 982 struct rb_root *modules = arg; 983 struct module_info *mi; 984 985 mi = zalloc(sizeof(struct module_info)); 986 if (!mi) 987 return -ENOMEM; 988 989 mi->name = strdup(name); 990 mi->start = start; 991 992 if (!mi->name) { 993 free(mi); 994 return -ENOMEM; 995 } 996 997 add_module(mi, modules); 998 999 return 0; 1000 } 1001 1002 static int read_proc_modules(const char *filename, struct rb_root *modules) 1003 { 1004 if (symbol__restricted_filename(filename, "/proc/modules")) 1005 return -1; 1006 1007 if (modules__parse(filename, modules, __read_proc_modules)) { 1008 delete_modules(modules); 1009 return -1; 1010 } 1011 1012 return 0; 1013 } 1014 1015 int compare_proc_modules(const char *from, const char *to) 1016 { 1017 struct rb_root from_modules = RB_ROOT; 1018 struct rb_root to_modules = RB_ROOT; 1019 struct rb_node *from_node, *to_node; 1020 struct module_info *from_m, *to_m; 1021 int ret = -1; 1022 1023 if (read_proc_modules(from, &from_modules)) 1024 return -1; 1025 1026 if (read_proc_modules(to, &to_modules)) 1027 goto out_delete_from; 1028 1029 from_node = rb_first(&from_modules); 1030 to_node = rb_first(&to_modules); 1031 while (from_node) { 1032 if (!to_node) 1033 break; 1034 1035 from_m = rb_entry(from_node, struct module_info, rb_node); 1036 to_m = rb_entry(to_node, struct module_info, rb_node); 1037 1038 if (from_m->start != to_m->start || 1039 strcmp(from_m->name, to_m->name)) 1040 break; 1041 1042 from_node = rb_next(from_node); 1043 to_node = rb_next(to_node); 1044 } 1045 1046 if (!from_node && !to_node) 1047 ret = 0; 1048 1049 delete_modules(&to_modules); 1050 out_delete_from: 1051 delete_modules(&from_modules); 1052 1053 return ret; 1054 } 1055 1056 struct map *map_groups__first(struct map_groups *mg) 1057 { 1058 return maps__first(&mg->maps); 1059 } 1060 1061 static int do_validate_kcore_modules(const char *filename, 1062 struct map_groups *kmaps) 1063 { 1064 struct rb_root modules = RB_ROOT; 1065 struct map *old_map; 1066 int err; 1067 1068 err = read_proc_modules(filename, &modules); 1069 if (err) 1070 return err; 1071 1072 old_map = map_groups__first(kmaps); 1073 while (old_map) { 1074 struct map *next = map_groups__next(old_map); 1075 struct module_info *mi; 1076 1077 if (!__map__is_kmodule(old_map)) { 1078 old_map = next; 1079 continue; 1080 } 1081 1082 /* Module must be in memory at the same address */ 1083 mi = find_module(old_map->dso->short_name, &modules); 1084 if (!mi || mi->start != old_map->start) { 1085 err = -EINVAL; 1086 goto out; 1087 } 1088 1089 old_map = next; 1090 } 1091 out: 1092 delete_modules(&modules); 1093 return err; 1094 } 1095 1096 /* 1097 * If kallsyms is referenced by name then we look for filename in the same 1098 * directory. 1099 */ 1100 static bool filename_from_kallsyms_filename(char *filename, 1101 const char *base_name, 1102 const char *kallsyms_filename) 1103 { 1104 char *name; 1105 1106 strcpy(filename, kallsyms_filename); 1107 name = strrchr(filename, '/'); 1108 if (!name) 1109 return false; 1110 1111 name += 1; 1112 1113 if (!strcmp(name, "kallsyms")) { 1114 strcpy(name, base_name); 1115 return true; 1116 } 1117 1118 return false; 1119 } 1120 1121 static int validate_kcore_modules(const char *kallsyms_filename, 1122 struct map *map) 1123 { 1124 struct map_groups *kmaps = map__kmaps(map); 1125 char modules_filename[PATH_MAX]; 1126 1127 if (!kmaps) 1128 return -EINVAL; 1129 1130 if (!filename_from_kallsyms_filename(modules_filename, "modules", 1131 kallsyms_filename)) 1132 return -EINVAL; 1133 1134 if (do_validate_kcore_modules(modules_filename, kmaps)) 1135 return -EINVAL; 1136 1137 return 0; 1138 } 1139 1140 static int validate_kcore_addresses(const char *kallsyms_filename, 1141 struct map *map) 1142 { 1143 struct kmap *kmap = map__kmap(map); 1144 1145 if (!kmap) 1146 return -EINVAL; 1147 1148 if (kmap->ref_reloc_sym && kmap->ref_reloc_sym->name) { 1149 u64 start; 1150 1151 if (kallsyms__get_function_start(kallsyms_filename, 1152 kmap->ref_reloc_sym->name, &start)) 1153 return -ENOENT; 1154 if (start != kmap->ref_reloc_sym->addr) 1155 return -EINVAL; 1156 } 1157 1158 return validate_kcore_modules(kallsyms_filename, map); 1159 } 1160 1161 struct kcore_mapfn_data { 1162 struct dso *dso; 1163 struct list_head maps; 1164 }; 1165 1166 static int kcore_mapfn(u64 start, u64 len, u64 pgoff, void *data) 1167 { 1168 struct kcore_mapfn_data *md = data; 1169 struct map *map; 1170 1171 map = map__new2(start, md->dso); 1172 if (map == NULL) 1173 return -ENOMEM; 1174 1175 map->end = map->start + len; 1176 map->pgoff = pgoff; 1177 1178 list_add(&map->node, &md->maps); 1179 1180 return 0; 1181 } 1182 1183 /* 1184 * Merges map into map_groups by splitting the new map 1185 * within the existing map regions. 1186 */ 1187 int map_groups__merge_in(struct map_groups *kmaps, struct map *new_map) 1188 { 1189 struct map *old_map; 1190 LIST_HEAD(merged); 1191 1192 for (old_map = map_groups__first(kmaps); old_map; 1193 old_map = map_groups__next(old_map)) { 1194 1195 /* no overload with this one */ 1196 if (new_map->end < old_map->start || 1197 new_map->start >= old_map->end) 1198 continue; 1199 1200 if (new_map->start < old_map->start) { 1201 /* 1202 * |new...... 1203 * |old.... 1204 */ 1205 if (new_map->end < old_map->end) { 1206 /* 1207 * |new......| -> |new..| 1208 * |old....| -> |old....| 1209 */ 1210 new_map->end = old_map->start; 1211 } else { 1212 /* 1213 * |new.............| -> |new..| |new..| 1214 * |old....| -> |old....| 1215 */ 1216 struct map *m = map__clone(new_map); 1217 1218 if (!m) 1219 return -ENOMEM; 1220 1221 m->end = old_map->start; 1222 list_add_tail(&m->node, &merged); 1223 new_map->start = old_map->end; 1224 } 1225 } else { 1226 /* 1227 * |new...... 1228 * |old.... 1229 */ 1230 if (new_map->end < old_map->end) { 1231 /* 1232 * |new..| -> x 1233 * |old.........| -> |old.........| 1234 */ 1235 map__put(new_map); 1236 new_map = NULL; 1237 break; 1238 } else { 1239 /* 1240 * |new......| -> |new...| 1241 * |old....| -> |old....| 1242 */ 1243 new_map->start = old_map->end; 1244 } 1245 } 1246 } 1247 1248 while (!list_empty(&merged)) { 1249 old_map = list_entry(merged.next, struct map, node); 1250 list_del_init(&old_map->node); 1251 map_groups__insert(kmaps, old_map); 1252 map__put(old_map); 1253 } 1254 1255 if (new_map) { 1256 map_groups__insert(kmaps, new_map); 1257 map__put(new_map); 1258 } 1259 return 0; 1260 } 1261 1262 static int dso__load_kcore(struct dso *dso, struct map *map, 1263 const char *kallsyms_filename) 1264 { 1265 struct map_groups *kmaps = map__kmaps(map); 1266 struct kcore_mapfn_data md; 1267 struct map *old_map, *new_map, *replacement_map = NULL; 1268 struct machine *machine; 1269 bool is_64_bit; 1270 int err, fd; 1271 char kcore_filename[PATH_MAX]; 1272 u64 stext; 1273 1274 if (!kmaps) 1275 return -EINVAL; 1276 1277 machine = kmaps->machine; 1278 1279 /* This function requires that the map is the kernel map */ 1280 if (!__map__is_kernel(map)) 1281 return -EINVAL; 1282 1283 if (!filename_from_kallsyms_filename(kcore_filename, "kcore", 1284 kallsyms_filename)) 1285 return -EINVAL; 1286 1287 /* Modules and kernel must be present at their original addresses */ 1288 if (validate_kcore_addresses(kallsyms_filename, map)) 1289 return -EINVAL; 1290 1291 md.dso = dso; 1292 INIT_LIST_HEAD(&md.maps); 1293 1294 fd = open(kcore_filename, O_RDONLY); 1295 if (fd < 0) { 1296 pr_debug("Failed to open %s. Note /proc/kcore requires CAP_SYS_RAWIO capability to access.\n", 1297 kcore_filename); 1298 return -EINVAL; 1299 } 1300 1301 /* Read new maps into temporary lists */ 1302 err = file__read_maps(fd, map->prot & PROT_EXEC, kcore_mapfn, &md, 1303 &is_64_bit); 1304 if (err) 1305 goto out_err; 1306 dso->is_64_bit = is_64_bit; 1307 1308 if (list_empty(&md.maps)) { 1309 err = -EINVAL; 1310 goto out_err; 1311 } 1312 1313 /* Remove old maps */ 1314 old_map = map_groups__first(kmaps); 1315 while (old_map) { 1316 struct map *next = map_groups__next(old_map); 1317 1318 /* 1319 * We need to preserve eBPF maps even if they are 1320 * covered by kcore, because we need to access 1321 * eBPF dso for source data. 1322 */ 1323 if (old_map != map && !__map__is_bpf_prog(old_map)) 1324 map_groups__remove(kmaps, old_map); 1325 old_map = next; 1326 } 1327 machine->trampolines_mapped = false; 1328 1329 /* Find the kernel map using the '_stext' symbol */ 1330 if (!kallsyms__get_function_start(kallsyms_filename, "_stext", &stext)) { 1331 list_for_each_entry(new_map, &md.maps, node) { 1332 if (stext >= new_map->start && stext < new_map->end) { 1333 replacement_map = new_map; 1334 break; 1335 } 1336 } 1337 } 1338 1339 if (!replacement_map) 1340 replacement_map = list_entry(md.maps.next, struct map, node); 1341 1342 /* Add new maps */ 1343 while (!list_empty(&md.maps)) { 1344 new_map = list_entry(md.maps.next, struct map, node); 1345 list_del_init(&new_map->node); 1346 if (new_map == replacement_map) { 1347 map->start = new_map->start; 1348 map->end = new_map->end; 1349 map->pgoff = new_map->pgoff; 1350 map->map_ip = new_map->map_ip; 1351 map->unmap_ip = new_map->unmap_ip; 1352 /* Ensure maps are correctly ordered */ 1353 map__get(map); 1354 map_groups__remove(kmaps, map); 1355 map_groups__insert(kmaps, map); 1356 map__put(map); 1357 map__put(new_map); 1358 } else { 1359 /* 1360 * Merge kcore map into existing maps, 1361 * and ensure that current maps (eBPF) 1362 * stay intact. 1363 */ 1364 if (map_groups__merge_in(kmaps, new_map)) 1365 goto out_err; 1366 } 1367 } 1368 1369 if (machine__is(machine, "x86_64")) { 1370 u64 addr; 1371 1372 /* 1373 * If one of the corresponding symbols is there, assume the 1374 * entry trampoline maps are too. 1375 */ 1376 if (!kallsyms__get_function_start(kallsyms_filename, 1377 ENTRY_TRAMPOLINE_NAME, 1378 &addr)) 1379 machine->trampolines_mapped = true; 1380 } 1381 1382 /* 1383 * Set the data type and long name so that kcore can be read via 1384 * dso__data_read_addr(). 1385 */ 1386 if (dso->kernel == DSO_TYPE_GUEST_KERNEL) 1387 dso->binary_type = DSO_BINARY_TYPE__GUEST_KCORE; 1388 else 1389 dso->binary_type = DSO_BINARY_TYPE__KCORE; 1390 dso__set_long_name(dso, strdup(kcore_filename), true); 1391 1392 close(fd); 1393 1394 if (map->prot & PROT_EXEC) 1395 pr_debug("Using %s for kernel object code\n", kcore_filename); 1396 else 1397 pr_debug("Using %s for kernel data\n", kcore_filename); 1398 1399 return 0; 1400 1401 out_err: 1402 while (!list_empty(&md.maps)) { 1403 map = list_entry(md.maps.next, struct map, node); 1404 list_del_init(&map->node); 1405 map__put(map); 1406 } 1407 close(fd); 1408 return -EINVAL; 1409 } 1410 1411 /* 1412 * If the kernel is relocated at boot time, kallsyms won't match. Compute the 1413 * delta based on the relocation reference symbol. 1414 */ 1415 static int kallsyms__delta(struct kmap *kmap, const char *filename, u64 *delta) 1416 { 1417 u64 addr; 1418 1419 if (!kmap->ref_reloc_sym || !kmap->ref_reloc_sym->name) 1420 return 0; 1421 1422 if (kallsyms__get_function_start(filename, kmap->ref_reloc_sym->name, &addr)) 1423 return -1; 1424 1425 *delta = addr - kmap->ref_reloc_sym->addr; 1426 return 0; 1427 } 1428 1429 int __dso__load_kallsyms(struct dso *dso, const char *filename, 1430 struct map *map, bool no_kcore) 1431 { 1432 struct kmap *kmap = map__kmap(map); 1433 u64 delta = 0; 1434 1435 if (symbol__restricted_filename(filename, "/proc/kallsyms")) 1436 return -1; 1437 1438 if (!kmap || !kmap->kmaps) 1439 return -1; 1440 1441 if (dso__load_all_kallsyms(dso, filename) < 0) 1442 return -1; 1443 1444 if (kallsyms__delta(kmap, filename, &delta)) 1445 return -1; 1446 1447 symbols__fixup_end(&dso->symbols); 1448 symbols__fixup_duplicate(&dso->symbols); 1449 1450 if (dso->kernel == DSO_TYPE_GUEST_KERNEL) 1451 dso->symtab_type = DSO_BINARY_TYPE__GUEST_KALLSYMS; 1452 else 1453 dso->symtab_type = DSO_BINARY_TYPE__KALLSYMS; 1454 1455 if (!no_kcore && !dso__load_kcore(dso, map, filename)) 1456 return map_groups__split_kallsyms_for_kcore(kmap->kmaps, dso); 1457 else 1458 return map_groups__split_kallsyms(kmap->kmaps, dso, delta, map); 1459 } 1460 1461 int dso__load_kallsyms(struct dso *dso, const char *filename, 1462 struct map *map) 1463 { 1464 return __dso__load_kallsyms(dso, filename, map, false); 1465 } 1466 1467 static int dso__load_perf_map(const char *map_path, struct dso *dso) 1468 { 1469 char *line = NULL; 1470 size_t n; 1471 FILE *file; 1472 int nr_syms = 0; 1473 1474 file = fopen(map_path, "r"); 1475 if (file == NULL) 1476 goto out_failure; 1477 1478 while (!feof(file)) { 1479 u64 start, size; 1480 struct symbol *sym; 1481 int line_len, len; 1482 1483 line_len = getline(&line, &n, file); 1484 if (line_len < 0) 1485 break; 1486 1487 if (!line) 1488 goto out_failure; 1489 1490 line[--line_len] = '\0'; /* \n */ 1491 1492 len = hex2u64(line, &start); 1493 1494 len++; 1495 if (len + 2 >= line_len) 1496 continue; 1497 1498 len += hex2u64(line + len, &size); 1499 1500 len++; 1501 if (len + 2 >= line_len) 1502 continue; 1503 1504 sym = symbol__new(start, size, STB_GLOBAL, STT_FUNC, line + len); 1505 1506 if (sym == NULL) 1507 goto out_delete_line; 1508 1509 symbols__insert(&dso->symbols, sym); 1510 nr_syms++; 1511 } 1512 1513 free(line); 1514 fclose(file); 1515 1516 return nr_syms; 1517 1518 out_delete_line: 1519 free(line); 1520 out_failure: 1521 return -1; 1522 } 1523 1524 static bool dso__is_compatible_symtab_type(struct dso *dso, bool kmod, 1525 enum dso_binary_type type) 1526 { 1527 switch (type) { 1528 case DSO_BINARY_TYPE__JAVA_JIT: 1529 case DSO_BINARY_TYPE__DEBUGLINK: 1530 case DSO_BINARY_TYPE__SYSTEM_PATH_DSO: 1531 case DSO_BINARY_TYPE__FEDORA_DEBUGINFO: 1532 case DSO_BINARY_TYPE__UBUNTU_DEBUGINFO: 1533 case DSO_BINARY_TYPE__BUILDID_DEBUGINFO: 1534 case DSO_BINARY_TYPE__OPENEMBEDDED_DEBUGINFO: 1535 return !kmod && dso->kernel == DSO_TYPE_USER; 1536 1537 case DSO_BINARY_TYPE__KALLSYMS: 1538 case DSO_BINARY_TYPE__VMLINUX: 1539 case DSO_BINARY_TYPE__KCORE: 1540 return dso->kernel == DSO_TYPE_KERNEL; 1541 1542 case DSO_BINARY_TYPE__GUEST_KALLSYMS: 1543 case DSO_BINARY_TYPE__GUEST_VMLINUX: 1544 case DSO_BINARY_TYPE__GUEST_KCORE: 1545 return dso->kernel == DSO_TYPE_GUEST_KERNEL; 1546 1547 case DSO_BINARY_TYPE__GUEST_KMODULE: 1548 case DSO_BINARY_TYPE__GUEST_KMODULE_COMP: 1549 case DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE: 1550 case DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE_COMP: 1551 /* 1552 * kernel modules know their symtab type - it's set when 1553 * creating a module dso in machine__findnew_module_map(). 1554 */ 1555 return kmod && dso->symtab_type == type; 1556 1557 case DSO_BINARY_TYPE__BUILD_ID_CACHE: 1558 case DSO_BINARY_TYPE__BUILD_ID_CACHE_DEBUGINFO: 1559 return true; 1560 1561 case DSO_BINARY_TYPE__BPF_PROG_INFO: 1562 case DSO_BINARY_TYPE__NOT_FOUND: 1563 default: 1564 return false; 1565 } 1566 } 1567 1568 /* Checks for the existence of the perf-<pid>.map file in two different 1569 * locations. First, if the process is a separate mount namespace, check in 1570 * that namespace using the pid of the innermost pid namespace. If's not in a 1571 * namespace, or the file can't be found there, try in the mount namespace of 1572 * the tracing process using our view of its pid. 1573 */ 1574 static int dso__find_perf_map(char *filebuf, size_t bufsz, 1575 struct nsinfo **nsip) 1576 { 1577 struct nscookie nsc; 1578 struct nsinfo *nsi; 1579 struct nsinfo *nnsi; 1580 int rc = -1; 1581 1582 nsi = *nsip; 1583 1584 if (nsi->need_setns) { 1585 snprintf(filebuf, bufsz, "/tmp/perf-%d.map", nsi->nstgid); 1586 nsinfo__mountns_enter(nsi, &nsc); 1587 rc = access(filebuf, R_OK); 1588 nsinfo__mountns_exit(&nsc); 1589 if (rc == 0) 1590 return rc; 1591 } 1592 1593 nnsi = nsinfo__copy(nsi); 1594 if (nnsi) { 1595 nsinfo__put(nsi); 1596 1597 nnsi->need_setns = false; 1598 snprintf(filebuf, bufsz, "/tmp/perf-%d.map", nnsi->tgid); 1599 *nsip = nnsi; 1600 rc = 0; 1601 } 1602 1603 return rc; 1604 } 1605 1606 int dso__load(struct dso *dso, struct map *map) 1607 { 1608 char *name; 1609 int ret = -1; 1610 u_int i; 1611 struct machine *machine; 1612 char *root_dir = (char *) ""; 1613 int ss_pos = 0; 1614 struct symsrc ss_[2]; 1615 struct symsrc *syms_ss = NULL, *runtime_ss = NULL; 1616 bool kmod; 1617 bool perfmap; 1618 unsigned char build_id[BUILD_ID_SIZE]; 1619 struct nscookie nsc; 1620 char newmapname[PATH_MAX]; 1621 const char *map_path = dso->long_name; 1622 1623 perfmap = strncmp(dso->name, "/tmp/perf-", 10) == 0; 1624 if (perfmap) { 1625 if (dso->nsinfo && (dso__find_perf_map(newmapname, 1626 sizeof(newmapname), &dso->nsinfo) == 0)) { 1627 map_path = newmapname; 1628 } 1629 } 1630 1631 nsinfo__mountns_enter(dso->nsinfo, &nsc); 1632 pthread_mutex_lock(&dso->lock); 1633 1634 /* check again under the dso->lock */ 1635 if (dso__loaded(dso)) { 1636 ret = 1; 1637 goto out; 1638 } 1639 1640 if (map->groups && map->groups->machine) 1641 machine = map->groups->machine; 1642 else 1643 machine = NULL; 1644 1645 if (dso->kernel) { 1646 if (dso->kernel == DSO_TYPE_KERNEL) 1647 ret = dso__load_kernel_sym(dso, map); 1648 else if (dso->kernel == DSO_TYPE_GUEST_KERNEL) 1649 ret = dso__load_guest_kernel_sym(dso, map); 1650 1651 if (machine__is(machine, "x86_64")) 1652 machine__map_x86_64_entry_trampolines(machine, dso); 1653 goto out; 1654 } 1655 1656 dso->adjust_symbols = 0; 1657 1658 if (perfmap) { 1659 ret = dso__load_perf_map(map_path, dso); 1660 dso->symtab_type = ret > 0 ? DSO_BINARY_TYPE__JAVA_JIT : 1661 DSO_BINARY_TYPE__NOT_FOUND; 1662 goto out; 1663 } 1664 1665 if (machine) 1666 root_dir = machine->root_dir; 1667 1668 name = malloc(PATH_MAX); 1669 if (!name) 1670 goto out; 1671 1672 kmod = dso->symtab_type == DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE || 1673 dso->symtab_type == DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE_COMP || 1674 dso->symtab_type == DSO_BINARY_TYPE__GUEST_KMODULE || 1675 dso->symtab_type == DSO_BINARY_TYPE__GUEST_KMODULE_COMP; 1676 1677 1678 /* 1679 * Read the build id if possible. This is required for 1680 * DSO_BINARY_TYPE__BUILDID_DEBUGINFO to work 1681 */ 1682 if (!dso->has_build_id && 1683 is_regular_file(dso->long_name)) { 1684 __symbol__join_symfs(name, PATH_MAX, dso->long_name); 1685 if (filename__read_build_id(name, build_id, BUILD_ID_SIZE) > 0) 1686 dso__set_build_id(dso, build_id); 1687 } 1688 1689 /* 1690 * Iterate over candidate debug images. 1691 * Keep track of "interesting" ones (those which have a symtab, dynsym, 1692 * and/or opd section) for processing. 1693 */ 1694 for (i = 0; i < DSO_BINARY_TYPE__SYMTAB_CNT; i++) { 1695 struct symsrc *ss = &ss_[ss_pos]; 1696 bool next_slot = false; 1697 bool is_reg; 1698 bool nsexit; 1699 int sirc = -1; 1700 1701 enum dso_binary_type symtab_type = binary_type_symtab[i]; 1702 1703 nsexit = (symtab_type == DSO_BINARY_TYPE__BUILD_ID_CACHE || 1704 symtab_type == DSO_BINARY_TYPE__BUILD_ID_CACHE_DEBUGINFO); 1705 1706 if (!dso__is_compatible_symtab_type(dso, kmod, symtab_type)) 1707 continue; 1708 1709 if (dso__read_binary_type_filename(dso, symtab_type, 1710 root_dir, name, PATH_MAX)) 1711 continue; 1712 1713 if (nsexit) 1714 nsinfo__mountns_exit(&nsc); 1715 1716 is_reg = is_regular_file(name); 1717 if (is_reg) 1718 sirc = symsrc__init(ss, dso, name, symtab_type); 1719 1720 if (nsexit) 1721 nsinfo__mountns_enter(dso->nsinfo, &nsc); 1722 1723 if (!is_reg || sirc < 0) 1724 continue; 1725 1726 if (!syms_ss && symsrc__has_symtab(ss)) { 1727 syms_ss = ss; 1728 next_slot = true; 1729 if (!dso->symsrc_filename) 1730 dso->symsrc_filename = strdup(name); 1731 } 1732 1733 if (!runtime_ss && symsrc__possibly_runtime(ss)) { 1734 runtime_ss = ss; 1735 next_slot = true; 1736 } 1737 1738 if (next_slot) { 1739 ss_pos++; 1740 1741 if (syms_ss && runtime_ss) 1742 break; 1743 } else { 1744 symsrc__destroy(ss); 1745 } 1746 1747 } 1748 1749 if (!runtime_ss && !syms_ss) 1750 goto out_free; 1751 1752 if (runtime_ss && !syms_ss) { 1753 syms_ss = runtime_ss; 1754 } 1755 1756 /* We'll have to hope for the best */ 1757 if (!runtime_ss && syms_ss) 1758 runtime_ss = syms_ss; 1759 1760 if (syms_ss) 1761 ret = dso__load_sym(dso, map, syms_ss, runtime_ss, kmod); 1762 else 1763 ret = -1; 1764 1765 if (ret > 0) { 1766 int nr_plt; 1767 1768 nr_plt = dso__synthesize_plt_symbols(dso, runtime_ss); 1769 if (nr_plt > 0) 1770 ret += nr_plt; 1771 } 1772 1773 for (; ss_pos > 0; ss_pos--) 1774 symsrc__destroy(&ss_[ss_pos - 1]); 1775 out_free: 1776 free(name); 1777 if (ret < 0 && strstr(dso->name, " (deleted)") != NULL) 1778 ret = 0; 1779 out: 1780 dso__set_loaded(dso); 1781 pthread_mutex_unlock(&dso->lock); 1782 nsinfo__mountns_exit(&nsc); 1783 1784 return ret; 1785 } 1786 1787 struct map *map_groups__find_by_name(struct map_groups *mg, const char *name) 1788 { 1789 struct maps *maps = &mg->maps; 1790 struct map *map; 1791 struct rb_node *node; 1792 1793 down_read(&maps->lock); 1794 1795 for (node = maps->names.rb_node; node; ) { 1796 int rc; 1797 1798 map = rb_entry(node, struct map, rb_node_name); 1799 1800 rc = strcmp(map->dso->short_name, name); 1801 if (rc < 0) 1802 node = node->rb_left; 1803 else if (rc > 0) 1804 node = node->rb_right; 1805 else 1806 1807 goto out_unlock; 1808 } 1809 1810 map = NULL; 1811 1812 out_unlock: 1813 up_read(&maps->lock); 1814 return map; 1815 } 1816 1817 int dso__load_vmlinux(struct dso *dso, struct map *map, 1818 const char *vmlinux, bool vmlinux_allocated) 1819 { 1820 int err = -1; 1821 struct symsrc ss; 1822 char symfs_vmlinux[PATH_MAX]; 1823 enum dso_binary_type symtab_type; 1824 1825 if (vmlinux[0] == '/') 1826 snprintf(symfs_vmlinux, sizeof(symfs_vmlinux), "%s", vmlinux); 1827 else 1828 symbol__join_symfs(symfs_vmlinux, vmlinux); 1829 1830 if (dso->kernel == DSO_TYPE_GUEST_KERNEL) 1831 symtab_type = DSO_BINARY_TYPE__GUEST_VMLINUX; 1832 else 1833 symtab_type = DSO_BINARY_TYPE__VMLINUX; 1834 1835 if (symsrc__init(&ss, dso, symfs_vmlinux, symtab_type)) 1836 return -1; 1837 1838 err = dso__load_sym(dso, map, &ss, &ss, 0); 1839 symsrc__destroy(&ss); 1840 1841 if (err > 0) { 1842 if (dso->kernel == DSO_TYPE_GUEST_KERNEL) 1843 dso->binary_type = DSO_BINARY_TYPE__GUEST_VMLINUX; 1844 else 1845 dso->binary_type = DSO_BINARY_TYPE__VMLINUX; 1846 dso__set_long_name(dso, vmlinux, vmlinux_allocated); 1847 dso__set_loaded(dso); 1848 pr_debug("Using %s for symbols\n", symfs_vmlinux); 1849 } 1850 1851 return err; 1852 } 1853 1854 int dso__load_vmlinux_path(struct dso *dso, struct map *map) 1855 { 1856 int i, err = 0; 1857 char *filename = NULL; 1858 1859 pr_debug("Looking at the vmlinux_path (%d entries long)\n", 1860 vmlinux_path__nr_entries + 1); 1861 1862 for (i = 0; i < vmlinux_path__nr_entries; ++i) { 1863 err = dso__load_vmlinux(dso, map, vmlinux_path[i], false); 1864 if (err > 0) 1865 goto out; 1866 } 1867 1868 if (!symbol_conf.ignore_vmlinux_buildid) 1869 filename = dso__build_id_filename(dso, NULL, 0, false); 1870 if (filename != NULL) { 1871 err = dso__load_vmlinux(dso, map, filename, true); 1872 if (err > 0) 1873 goto out; 1874 free(filename); 1875 } 1876 out: 1877 return err; 1878 } 1879 1880 static bool visible_dir_filter(const char *name, struct dirent *d) 1881 { 1882 if (d->d_type != DT_DIR) 1883 return false; 1884 return lsdir_no_dot_filter(name, d); 1885 } 1886 1887 static int find_matching_kcore(struct map *map, char *dir, size_t dir_sz) 1888 { 1889 char kallsyms_filename[PATH_MAX]; 1890 int ret = -1; 1891 struct strlist *dirs; 1892 struct str_node *nd; 1893 1894 dirs = lsdir(dir, visible_dir_filter); 1895 if (!dirs) 1896 return -1; 1897 1898 strlist__for_each_entry(nd, dirs) { 1899 scnprintf(kallsyms_filename, sizeof(kallsyms_filename), 1900 "%s/%s/kallsyms", dir, nd->s); 1901 if (!validate_kcore_addresses(kallsyms_filename, map)) { 1902 strlcpy(dir, kallsyms_filename, dir_sz); 1903 ret = 0; 1904 break; 1905 } 1906 } 1907 1908 strlist__delete(dirs); 1909 1910 return ret; 1911 } 1912 1913 /* 1914 * Use open(O_RDONLY) to check readability directly instead of access(R_OK) 1915 * since access(R_OK) only checks with real UID/GID but open() use effective 1916 * UID/GID and actual capabilities (e.g. /proc/kcore requires CAP_SYS_RAWIO). 1917 */ 1918 static bool filename__readable(const char *file) 1919 { 1920 int fd = open(file, O_RDONLY); 1921 if (fd < 0) 1922 return false; 1923 close(fd); 1924 return true; 1925 } 1926 1927 static char *dso__find_kallsyms(struct dso *dso, struct map *map) 1928 { 1929 u8 host_build_id[BUILD_ID_SIZE]; 1930 char sbuild_id[SBUILD_ID_SIZE]; 1931 bool is_host = false; 1932 char path[PATH_MAX]; 1933 1934 if (!dso->has_build_id) { 1935 /* 1936 * Last resort, if we don't have a build-id and couldn't find 1937 * any vmlinux file, try the running kernel kallsyms table. 1938 */ 1939 goto proc_kallsyms; 1940 } 1941 1942 if (sysfs__read_build_id("/sys/kernel/notes", host_build_id, 1943 sizeof(host_build_id)) == 0) 1944 is_host = dso__build_id_equal(dso, host_build_id); 1945 1946 /* Try a fast path for /proc/kallsyms if possible */ 1947 if (is_host) { 1948 /* 1949 * Do not check the build-id cache, unless we know we cannot use 1950 * /proc/kcore or module maps don't match to /proc/kallsyms. 1951 * To check readability of /proc/kcore, do not use access(R_OK) 1952 * since /proc/kcore requires CAP_SYS_RAWIO to read and access 1953 * can't check it. 1954 */ 1955 if (filename__readable("/proc/kcore") && 1956 !validate_kcore_addresses("/proc/kallsyms", map)) 1957 goto proc_kallsyms; 1958 } 1959 1960 build_id__sprintf(dso->build_id, sizeof(dso->build_id), sbuild_id); 1961 1962 /* Find kallsyms in build-id cache with kcore */ 1963 scnprintf(path, sizeof(path), "%s/%s/%s", 1964 buildid_dir, DSO__NAME_KCORE, sbuild_id); 1965 1966 if (!find_matching_kcore(map, path, sizeof(path))) 1967 return strdup(path); 1968 1969 /* Use current /proc/kallsyms if possible */ 1970 if (is_host) { 1971 proc_kallsyms: 1972 return strdup("/proc/kallsyms"); 1973 } 1974 1975 /* Finally, find a cache of kallsyms */ 1976 if (!build_id_cache__kallsyms_path(sbuild_id, path, sizeof(path))) { 1977 pr_err("No kallsyms or vmlinux with build-id %s was found\n", 1978 sbuild_id); 1979 return NULL; 1980 } 1981 1982 return strdup(path); 1983 } 1984 1985 static int dso__load_kernel_sym(struct dso *dso, struct map *map) 1986 { 1987 int err; 1988 const char *kallsyms_filename = NULL; 1989 char *kallsyms_allocated_filename = NULL; 1990 /* 1991 * Step 1: if the user specified a kallsyms or vmlinux filename, use 1992 * it and only it, reporting errors to the user if it cannot be used. 1993 * 1994 * For instance, try to analyse an ARM perf.data file _without_ a 1995 * build-id, or if the user specifies the wrong path to the right 1996 * vmlinux file, obviously we can't fallback to another vmlinux (a 1997 * x86_86 one, on the machine where analysis is being performed, say), 1998 * or worse, /proc/kallsyms. 1999 * 2000 * If the specified file _has_ a build-id and there is a build-id 2001 * section in the perf.data file, we will still do the expected 2002 * validation in dso__load_vmlinux and will bail out if they don't 2003 * match. 2004 */ 2005 if (symbol_conf.kallsyms_name != NULL) { 2006 kallsyms_filename = symbol_conf.kallsyms_name; 2007 goto do_kallsyms; 2008 } 2009 2010 if (!symbol_conf.ignore_vmlinux && symbol_conf.vmlinux_name != NULL) { 2011 return dso__load_vmlinux(dso, map, symbol_conf.vmlinux_name, false); 2012 } 2013 2014 if (!symbol_conf.ignore_vmlinux && vmlinux_path != NULL) { 2015 err = dso__load_vmlinux_path(dso, map); 2016 if (err > 0) 2017 return err; 2018 } 2019 2020 /* do not try local files if a symfs was given */ 2021 if (symbol_conf.symfs[0] != 0) 2022 return -1; 2023 2024 kallsyms_allocated_filename = dso__find_kallsyms(dso, map); 2025 if (!kallsyms_allocated_filename) 2026 return -1; 2027 2028 kallsyms_filename = kallsyms_allocated_filename; 2029 2030 do_kallsyms: 2031 err = dso__load_kallsyms(dso, kallsyms_filename, map); 2032 if (err > 0) 2033 pr_debug("Using %s for symbols\n", kallsyms_filename); 2034 free(kallsyms_allocated_filename); 2035 2036 if (err > 0 && !dso__is_kcore(dso)) { 2037 dso->binary_type = DSO_BINARY_TYPE__KALLSYMS; 2038 dso__set_long_name(dso, DSO__NAME_KALLSYMS, false); 2039 map__fixup_start(map); 2040 map__fixup_end(map); 2041 } 2042 2043 return err; 2044 } 2045 2046 static int dso__load_guest_kernel_sym(struct dso *dso, struct map *map) 2047 { 2048 int err; 2049 const char *kallsyms_filename = NULL; 2050 struct machine *machine; 2051 char path[PATH_MAX]; 2052 2053 if (!map->groups) { 2054 pr_debug("Guest kernel map hasn't the point to groups\n"); 2055 return -1; 2056 } 2057 machine = map->groups->machine; 2058 2059 if (machine__is_default_guest(machine)) { 2060 /* 2061 * if the user specified a vmlinux filename, use it and only 2062 * it, reporting errors to the user if it cannot be used. 2063 * Or use file guest_kallsyms inputted by user on commandline 2064 */ 2065 if (symbol_conf.default_guest_vmlinux_name != NULL) { 2066 err = dso__load_vmlinux(dso, map, 2067 symbol_conf.default_guest_vmlinux_name, 2068 false); 2069 return err; 2070 } 2071 2072 kallsyms_filename = symbol_conf.default_guest_kallsyms; 2073 if (!kallsyms_filename) 2074 return -1; 2075 } else { 2076 sprintf(path, "%s/proc/kallsyms", machine->root_dir); 2077 kallsyms_filename = path; 2078 } 2079 2080 err = dso__load_kallsyms(dso, kallsyms_filename, map); 2081 if (err > 0) 2082 pr_debug("Using %s for symbols\n", kallsyms_filename); 2083 if (err > 0 && !dso__is_kcore(dso)) { 2084 dso->binary_type = DSO_BINARY_TYPE__GUEST_KALLSYMS; 2085 dso__set_long_name(dso, machine->mmap_name, false); 2086 map__fixup_start(map); 2087 map__fixup_end(map); 2088 } 2089 2090 return err; 2091 } 2092 2093 static void vmlinux_path__exit(void) 2094 { 2095 while (--vmlinux_path__nr_entries >= 0) 2096 zfree(&vmlinux_path[vmlinux_path__nr_entries]); 2097 vmlinux_path__nr_entries = 0; 2098 2099 zfree(&vmlinux_path); 2100 } 2101 2102 static const char * const vmlinux_paths[] = { 2103 "vmlinux", 2104 "/boot/vmlinux" 2105 }; 2106 2107 static const char * const vmlinux_paths_upd[] = { 2108 "/boot/vmlinux-%s", 2109 "/usr/lib/debug/boot/vmlinux-%s", 2110 "/lib/modules/%s/build/vmlinux", 2111 "/usr/lib/debug/lib/modules/%s/vmlinux", 2112 "/usr/lib/debug/boot/vmlinux-%s.debug" 2113 }; 2114 2115 static int vmlinux_path__add(const char *new_entry) 2116 { 2117 vmlinux_path[vmlinux_path__nr_entries] = strdup(new_entry); 2118 if (vmlinux_path[vmlinux_path__nr_entries] == NULL) 2119 return -1; 2120 ++vmlinux_path__nr_entries; 2121 2122 return 0; 2123 } 2124 2125 static int vmlinux_path__init(struct perf_env *env) 2126 { 2127 struct utsname uts; 2128 char bf[PATH_MAX]; 2129 char *kernel_version; 2130 unsigned int i; 2131 2132 vmlinux_path = malloc(sizeof(char *) * (ARRAY_SIZE(vmlinux_paths) + 2133 ARRAY_SIZE(vmlinux_paths_upd))); 2134 if (vmlinux_path == NULL) 2135 return -1; 2136 2137 for (i = 0; i < ARRAY_SIZE(vmlinux_paths); i++) 2138 if (vmlinux_path__add(vmlinux_paths[i]) < 0) 2139 goto out_fail; 2140 2141 /* only try kernel version if no symfs was given */ 2142 if (symbol_conf.symfs[0] != 0) 2143 return 0; 2144 2145 if (env) { 2146 kernel_version = env->os_release; 2147 } else { 2148 if (uname(&uts) < 0) 2149 goto out_fail; 2150 2151 kernel_version = uts.release; 2152 } 2153 2154 for (i = 0; i < ARRAY_SIZE(vmlinux_paths_upd); i++) { 2155 snprintf(bf, sizeof(bf), vmlinux_paths_upd[i], kernel_version); 2156 if (vmlinux_path__add(bf) < 0) 2157 goto out_fail; 2158 } 2159 2160 return 0; 2161 2162 out_fail: 2163 vmlinux_path__exit(); 2164 return -1; 2165 } 2166 2167 int setup_list(struct strlist **list, const char *list_str, 2168 const char *list_name) 2169 { 2170 if (list_str == NULL) 2171 return 0; 2172 2173 *list = strlist__new(list_str, NULL); 2174 if (!*list) { 2175 pr_err("problems parsing %s list\n", list_name); 2176 return -1; 2177 } 2178 2179 symbol_conf.has_filter = true; 2180 return 0; 2181 } 2182 2183 int setup_intlist(struct intlist **list, const char *list_str, 2184 const char *list_name) 2185 { 2186 if (list_str == NULL) 2187 return 0; 2188 2189 *list = intlist__new(list_str); 2190 if (!*list) { 2191 pr_err("problems parsing %s list\n", list_name); 2192 return -1; 2193 } 2194 return 0; 2195 } 2196 2197 static bool symbol__read_kptr_restrict(void) 2198 { 2199 bool value = false; 2200 FILE *fp = fopen("/proc/sys/kernel/kptr_restrict", "r"); 2201 2202 if (fp != NULL) { 2203 char line[8]; 2204 2205 if (fgets(line, sizeof(line), fp) != NULL) 2206 value = perf_cap__capable(CAP_SYSLOG) ? 2207 (atoi(line) >= 2) : 2208 (atoi(line) != 0); 2209 2210 fclose(fp); 2211 } 2212 2213 /* Per kernel/kallsyms.c: 2214 * we also restrict when perf_event_paranoid > 1 w/o CAP_SYSLOG 2215 */ 2216 if (perf_event_paranoid() > 1 && !perf_cap__capable(CAP_SYSLOG)) 2217 value = true; 2218 2219 return value; 2220 } 2221 2222 int symbol__annotation_init(void) 2223 { 2224 if (symbol_conf.init_annotation) 2225 return 0; 2226 2227 if (symbol_conf.initialized) { 2228 pr_err("Annotation needs to be init before symbol__init()\n"); 2229 return -1; 2230 } 2231 2232 symbol_conf.priv_size += sizeof(struct annotation); 2233 symbol_conf.init_annotation = true; 2234 return 0; 2235 } 2236 2237 int symbol__init(struct perf_env *env) 2238 { 2239 const char *symfs; 2240 2241 if (symbol_conf.initialized) 2242 return 0; 2243 2244 symbol_conf.priv_size = PERF_ALIGN(symbol_conf.priv_size, sizeof(u64)); 2245 2246 symbol__elf_init(); 2247 2248 if (symbol_conf.sort_by_name) 2249 symbol_conf.priv_size += (sizeof(struct symbol_name_rb_node) - 2250 sizeof(struct symbol)); 2251 2252 if (symbol_conf.try_vmlinux_path && vmlinux_path__init(env) < 0) 2253 return -1; 2254 2255 if (symbol_conf.field_sep && *symbol_conf.field_sep == '.') { 2256 pr_err("'.' is the only non valid --field-separator argument\n"); 2257 return -1; 2258 } 2259 2260 if (setup_list(&symbol_conf.dso_list, 2261 symbol_conf.dso_list_str, "dso") < 0) 2262 return -1; 2263 2264 if (setup_list(&symbol_conf.comm_list, 2265 symbol_conf.comm_list_str, "comm") < 0) 2266 goto out_free_dso_list; 2267 2268 if (setup_intlist(&symbol_conf.pid_list, 2269 symbol_conf.pid_list_str, "pid") < 0) 2270 goto out_free_comm_list; 2271 2272 if (setup_intlist(&symbol_conf.tid_list, 2273 symbol_conf.tid_list_str, "tid") < 0) 2274 goto out_free_pid_list; 2275 2276 if (setup_list(&symbol_conf.sym_list, 2277 symbol_conf.sym_list_str, "symbol") < 0) 2278 goto out_free_tid_list; 2279 2280 if (setup_list(&symbol_conf.bt_stop_list, 2281 symbol_conf.bt_stop_list_str, "symbol") < 0) 2282 goto out_free_sym_list; 2283 2284 /* 2285 * A path to symbols of "/" is identical to "" 2286 * reset here for simplicity. 2287 */ 2288 symfs = realpath(symbol_conf.symfs, NULL); 2289 if (symfs == NULL) 2290 symfs = symbol_conf.symfs; 2291 if (strcmp(symfs, "/") == 0) 2292 symbol_conf.symfs = ""; 2293 if (symfs != symbol_conf.symfs) 2294 free((void *)symfs); 2295 2296 symbol_conf.kptr_restrict = symbol__read_kptr_restrict(); 2297 2298 symbol_conf.initialized = true; 2299 return 0; 2300 2301 out_free_sym_list: 2302 strlist__delete(symbol_conf.sym_list); 2303 out_free_tid_list: 2304 intlist__delete(symbol_conf.tid_list); 2305 out_free_pid_list: 2306 intlist__delete(symbol_conf.pid_list); 2307 out_free_comm_list: 2308 strlist__delete(symbol_conf.comm_list); 2309 out_free_dso_list: 2310 strlist__delete(symbol_conf.dso_list); 2311 return -1; 2312 } 2313 2314 void symbol__exit(void) 2315 { 2316 if (!symbol_conf.initialized) 2317 return; 2318 strlist__delete(symbol_conf.bt_stop_list); 2319 strlist__delete(symbol_conf.sym_list); 2320 strlist__delete(symbol_conf.dso_list); 2321 strlist__delete(symbol_conf.comm_list); 2322 intlist__delete(symbol_conf.tid_list); 2323 intlist__delete(symbol_conf.pid_list); 2324 vmlinux_path__exit(); 2325 symbol_conf.sym_list = symbol_conf.dso_list = symbol_conf.comm_list = NULL; 2326 symbol_conf.bt_stop_list = NULL; 2327 symbol_conf.initialized = false; 2328 } 2329 2330 int symbol__config_symfs(const struct option *opt __maybe_unused, 2331 const char *dir, int unset __maybe_unused) 2332 { 2333 char *bf = NULL; 2334 int ret; 2335 2336 symbol_conf.symfs = strdup(dir); 2337 if (symbol_conf.symfs == NULL) 2338 return -ENOMEM; 2339 2340 /* skip the locally configured cache if a symfs is given, and 2341 * config buildid dir to symfs/.debug 2342 */ 2343 ret = asprintf(&bf, "%s/%s", dir, ".debug"); 2344 if (ret < 0) 2345 return -ENOMEM; 2346 2347 set_buildid_dir(bf); 2348 2349 free(bf); 2350 return 0; 2351 } 2352 2353 struct mem_info *mem_info__get(struct mem_info *mi) 2354 { 2355 if (mi) 2356 refcount_inc(&mi->refcnt); 2357 return mi; 2358 } 2359 2360 void mem_info__put(struct mem_info *mi) 2361 { 2362 if (mi && refcount_dec_and_test(&mi->refcnt)) 2363 free(mi); 2364 } 2365 2366 struct mem_info *mem_info__new(void) 2367 { 2368 struct mem_info *mi = zalloc(sizeof(*mi)); 2369 2370 if (mi) 2371 refcount_set(&mi->refcnt, 1); 2372 return mi; 2373 } 2374 2375 struct block_info *block_info__get(struct block_info *bi) 2376 { 2377 if (bi) 2378 refcount_inc(&bi->refcnt); 2379 return bi; 2380 } 2381 2382 void block_info__put(struct block_info *bi) 2383 { 2384 if (bi && refcount_dec_and_test(&bi->refcnt)) 2385 free(bi); 2386 } 2387 2388 struct block_info *block_info__new(void) 2389 { 2390 struct block_info *bi = zalloc(sizeof(*bi)); 2391 2392 if (bi) 2393 refcount_set(&bi->refcnt, 1); 2394 return bi; 2395 } 2396