1 // SPDX-License-Identifier: GPL-2.0 2 #include <asm/bug.h> 3 #include <linux/kernel.h> 4 #include <sys/time.h> 5 #include <sys/resource.h> 6 #include <sys/types.h> 7 #include <sys/stat.h> 8 #include <unistd.h> 9 #include <errno.h> 10 #include <fcntl.h> 11 #include "compress.h" 12 #include "path.h" 13 #include "symbol.h" 14 #include "srcline.h" 15 #include "dso.h" 16 #include "machine.h" 17 #include "auxtrace.h" 18 #include "util.h" 19 #include "debug.h" 20 #include "string2.h" 21 #include "vdso.h" 22 23 static const char * const debuglink_paths[] = { 24 "%.0s%s", 25 "%s/%s", 26 "%s/.debug/%s", 27 "/usr/lib/debug%s/%s" 28 }; 29 30 char dso__symtab_origin(const struct dso *dso) 31 { 32 static const char origin[] = { 33 [DSO_BINARY_TYPE__KALLSYMS] = 'k', 34 [DSO_BINARY_TYPE__VMLINUX] = 'v', 35 [DSO_BINARY_TYPE__JAVA_JIT] = 'j', 36 [DSO_BINARY_TYPE__DEBUGLINK] = 'l', 37 [DSO_BINARY_TYPE__BUILD_ID_CACHE] = 'B', 38 [DSO_BINARY_TYPE__BUILD_ID_CACHE_DEBUGINFO] = 'D', 39 [DSO_BINARY_TYPE__FEDORA_DEBUGINFO] = 'f', 40 [DSO_BINARY_TYPE__UBUNTU_DEBUGINFO] = 'u', 41 [DSO_BINARY_TYPE__OPENEMBEDDED_DEBUGINFO] = 'o', 42 [DSO_BINARY_TYPE__BUILDID_DEBUGINFO] = 'b', 43 [DSO_BINARY_TYPE__SYSTEM_PATH_DSO] = 'd', 44 [DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE] = 'K', 45 [DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE_COMP] = 'm', 46 [DSO_BINARY_TYPE__GUEST_KALLSYMS] = 'g', 47 [DSO_BINARY_TYPE__GUEST_KMODULE] = 'G', 48 [DSO_BINARY_TYPE__GUEST_KMODULE_COMP] = 'M', 49 [DSO_BINARY_TYPE__GUEST_VMLINUX] = 'V', 50 }; 51 52 if (dso == NULL || dso->symtab_type == DSO_BINARY_TYPE__NOT_FOUND) 53 return '!'; 54 return origin[dso->symtab_type]; 55 } 56 57 int dso__read_binary_type_filename(const struct dso *dso, 58 enum dso_binary_type type, 59 char *root_dir, char *filename, size_t size) 60 { 61 char build_id_hex[SBUILD_ID_SIZE]; 62 int ret = 0; 63 size_t len; 64 65 switch (type) { 66 case DSO_BINARY_TYPE__DEBUGLINK: 67 { 68 const char *last_slash; 69 char dso_dir[PATH_MAX]; 70 char symfile[PATH_MAX]; 71 unsigned int i; 72 73 len = __symbol__join_symfs(filename, size, dso->long_name); 74 last_slash = filename + len; 75 while (last_slash != filename && *last_slash != '/') 76 last_slash--; 77 78 strncpy(dso_dir, filename, last_slash - filename); 79 dso_dir[last_slash-filename] = '\0'; 80 81 if (!is_regular_file(filename)) { 82 ret = -1; 83 break; 84 } 85 86 ret = filename__read_debuglink(filename, symfile, PATH_MAX); 87 if (ret) 88 break; 89 90 /* Check predefined locations where debug file might reside */ 91 ret = -1; 92 for (i = 0; i < ARRAY_SIZE(debuglink_paths); i++) { 93 snprintf(filename, size, 94 debuglink_paths[i], dso_dir, symfile); 95 if (is_regular_file(filename)) { 96 ret = 0; 97 break; 98 } 99 } 100 101 break; 102 } 103 case DSO_BINARY_TYPE__BUILD_ID_CACHE: 104 if (dso__build_id_filename(dso, filename, size, false) == NULL) 105 ret = -1; 106 break; 107 108 case DSO_BINARY_TYPE__BUILD_ID_CACHE_DEBUGINFO: 109 if (dso__build_id_filename(dso, filename, size, true) == NULL) 110 ret = -1; 111 break; 112 113 case DSO_BINARY_TYPE__FEDORA_DEBUGINFO: 114 len = __symbol__join_symfs(filename, size, "/usr/lib/debug"); 115 snprintf(filename + len, size - len, "%s.debug", dso->long_name); 116 break; 117 118 case DSO_BINARY_TYPE__UBUNTU_DEBUGINFO: 119 len = __symbol__join_symfs(filename, size, "/usr/lib/debug"); 120 snprintf(filename + len, size - len, "%s", dso->long_name); 121 break; 122 123 case DSO_BINARY_TYPE__OPENEMBEDDED_DEBUGINFO: 124 { 125 const char *last_slash; 126 size_t dir_size; 127 128 last_slash = dso->long_name + dso->long_name_len; 129 while (last_slash != dso->long_name && *last_slash != '/') 130 last_slash--; 131 132 len = __symbol__join_symfs(filename, size, ""); 133 dir_size = last_slash - dso->long_name + 2; 134 if (dir_size > (size - len)) { 135 ret = -1; 136 break; 137 } 138 len += scnprintf(filename + len, dir_size, "%s", dso->long_name); 139 len += scnprintf(filename + len , size - len, ".debug%s", 140 last_slash); 141 break; 142 } 143 144 case DSO_BINARY_TYPE__BUILDID_DEBUGINFO: 145 if (!dso->has_build_id) { 146 ret = -1; 147 break; 148 } 149 150 build_id__sprintf(dso->build_id, 151 sizeof(dso->build_id), 152 build_id_hex); 153 len = __symbol__join_symfs(filename, size, "/usr/lib/debug/.build-id/"); 154 snprintf(filename + len, size - len, "%.2s/%s.debug", 155 build_id_hex, build_id_hex + 2); 156 break; 157 158 case DSO_BINARY_TYPE__VMLINUX: 159 case DSO_BINARY_TYPE__GUEST_VMLINUX: 160 case DSO_BINARY_TYPE__SYSTEM_PATH_DSO: 161 __symbol__join_symfs(filename, size, dso->long_name); 162 break; 163 164 case DSO_BINARY_TYPE__GUEST_KMODULE: 165 case DSO_BINARY_TYPE__GUEST_KMODULE_COMP: 166 path__join3(filename, size, symbol_conf.symfs, 167 root_dir, dso->long_name); 168 break; 169 170 case DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE: 171 case DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE_COMP: 172 __symbol__join_symfs(filename, size, dso->long_name); 173 break; 174 175 case DSO_BINARY_TYPE__KCORE: 176 case DSO_BINARY_TYPE__GUEST_KCORE: 177 snprintf(filename, size, "%s", dso->long_name); 178 break; 179 180 default: 181 case DSO_BINARY_TYPE__KALLSYMS: 182 case DSO_BINARY_TYPE__GUEST_KALLSYMS: 183 case DSO_BINARY_TYPE__JAVA_JIT: 184 case DSO_BINARY_TYPE__NOT_FOUND: 185 ret = -1; 186 break; 187 } 188 189 return ret; 190 } 191 192 static const struct { 193 const char *fmt; 194 int (*decompress)(const char *input, int output); 195 } compressions[] = { 196 #ifdef HAVE_ZLIB_SUPPORT 197 { "gz", gzip_decompress_to_file }, 198 #endif 199 #ifdef HAVE_LZMA_SUPPORT 200 { "xz", lzma_decompress_to_file }, 201 #endif 202 { NULL, NULL }, 203 }; 204 205 bool is_supported_compression(const char *ext) 206 { 207 unsigned i; 208 209 for (i = 0; compressions[i].fmt; i++) { 210 if (!strcmp(ext, compressions[i].fmt)) 211 return true; 212 } 213 return false; 214 } 215 216 bool is_kernel_module(const char *pathname, int cpumode) 217 { 218 struct kmod_path m; 219 int mode = cpumode & PERF_RECORD_MISC_CPUMODE_MASK; 220 221 WARN_ONCE(mode != cpumode, 222 "Internal error: passing unmasked cpumode (%x) to is_kernel_module", 223 cpumode); 224 225 switch (mode) { 226 case PERF_RECORD_MISC_USER: 227 case PERF_RECORD_MISC_HYPERVISOR: 228 case PERF_RECORD_MISC_GUEST_USER: 229 return false; 230 /* Treat PERF_RECORD_MISC_CPUMODE_UNKNOWN as kernel */ 231 default: 232 if (kmod_path__parse(&m, pathname)) { 233 pr_err("Failed to check whether %s is a kernel module or not. Assume it is.", 234 pathname); 235 return true; 236 } 237 } 238 239 return m.kmod; 240 } 241 242 bool decompress_to_file(const char *ext, const char *filename, int output_fd) 243 { 244 unsigned i; 245 246 for (i = 0; compressions[i].fmt; i++) { 247 if (!strcmp(ext, compressions[i].fmt)) 248 return !compressions[i].decompress(filename, 249 output_fd); 250 } 251 return false; 252 } 253 254 bool dso__needs_decompress(struct dso *dso) 255 { 256 return dso->symtab_type == DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE_COMP || 257 dso->symtab_type == DSO_BINARY_TYPE__GUEST_KMODULE_COMP; 258 } 259 260 static int decompress_kmodule(struct dso *dso, const char *name, char *tmpbuf) 261 { 262 int fd = -1; 263 struct kmod_path m; 264 265 if (!dso__needs_decompress(dso)) 266 return -1; 267 268 if (kmod_path__parse_ext(&m, dso->long_name)) 269 return -1; 270 271 if (!m.comp) 272 goto out; 273 274 fd = mkstemp(tmpbuf); 275 if (fd < 0) { 276 dso->load_errno = errno; 277 goto out; 278 } 279 280 if (!decompress_to_file(m.ext, name, fd)) { 281 dso->load_errno = DSO_LOAD_ERRNO__DECOMPRESSION_FAILURE; 282 close(fd); 283 fd = -1; 284 } 285 286 out: 287 free(m.ext); 288 return fd; 289 } 290 291 int dso__decompress_kmodule_fd(struct dso *dso, const char *name) 292 { 293 char tmpbuf[] = KMOD_DECOMP_NAME; 294 int fd; 295 296 fd = decompress_kmodule(dso, name, tmpbuf); 297 unlink(tmpbuf); 298 return fd; 299 } 300 301 int dso__decompress_kmodule_path(struct dso *dso, const char *name, 302 char *pathname, size_t len) 303 { 304 char tmpbuf[] = KMOD_DECOMP_NAME; 305 int fd; 306 307 fd = decompress_kmodule(dso, name, tmpbuf); 308 if (fd < 0) { 309 unlink(tmpbuf); 310 return -1; 311 } 312 313 strncpy(pathname, tmpbuf, len); 314 close(fd); 315 return 0; 316 } 317 318 /* 319 * Parses kernel module specified in @path and updates 320 * @m argument like: 321 * 322 * @comp - true if @path contains supported compression suffix, 323 * false otherwise 324 * @kmod - true if @path contains '.ko' suffix in right position, 325 * false otherwise 326 * @name - if (@alloc_name && @kmod) is true, it contains strdup-ed base name 327 * of the kernel module without suffixes, otherwise strudup-ed 328 * base name of @path 329 * @ext - if (@alloc_ext && @comp) is true, it contains strdup-ed string 330 * the compression suffix 331 * 332 * Returns 0 if there's no strdup error, -ENOMEM otherwise. 333 */ 334 int __kmod_path__parse(struct kmod_path *m, const char *path, 335 bool alloc_name, bool alloc_ext) 336 { 337 const char *name = strrchr(path, '/'); 338 const char *ext = strrchr(path, '.'); 339 bool is_simple_name = false; 340 341 memset(m, 0x0, sizeof(*m)); 342 name = name ? name + 1 : path; 343 344 /* 345 * '.' is also a valid character for module name. For example: 346 * [aaa.bbb] is a valid module name. '[' should have higher 347 * priority than '.ko' suffix. 348 * 349 * The kernel names are from machine__mmap_name. Such 350 * name should belong to kernel itself, not kernel module. 351 */ 352 if (name[0] == '[') { 353 is_simple_name = true; 354 if ((strncmp(name, "[kernel.kallsyms]", 17) == 0) || 355 (strncmp(name, "[guest.kernel.kallsyms", 22) == 0) || 356 (strncmp(name, "[vdso]", 6) == 0) || 357 (strncmp(name, "[vsyscall]", 10) == 0)) { 358 m->kmod = false; 359 360 } else 361 m->kmod = true; 362 } 363 364 /* No extension, just return name. */ 365 if ((ext == NULL) || is_simple_name) { 366 if (alloc_name) { 367 m->name = strdup(name); 368 return m->name ? 0 : -ENOMEM; 369 } 370 return 0; 371 } 372 373 if (is_supported_compression(ext + 1)) { 374 m->comp = true; 375 ext -= 3; 376 } 377 378 /* Check .ko extension only if there's enough name left. */ 379 if (ext > name) 380 m->kmod = !strncmp(ext, ".ko", 3); 381 382 if (alloc_name) { 383 if (m->kmod) { 384 if (asprintf(&m->name, "[%.*s]", (int) (ext - name), name) == -1) 385 return -ENOMEM; 386 } else { 387 if (asprintf(&m->name, "%s", name) == -1) 388 return -ENOMEM; 389 } 390 391 strxfrchar(m->name, '-', '_'); 392 } 393 394 if (alloc_ext && m->comp) { 395 m->ext = strdup(ext + 4); 396 if (!m->ext) { 397 free((void *) m->name); 398 return -ENOMEM; 399 } 400 } 401 402 return 0; 403 } 404 405 void dso__set_module_info(struct dso *dso, struct kmod_path *m, 406 struct machine *machine) 407 { 408 if (machine__is_host(machine)) 409 dso->symtab_type = DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE; 410 else 411 dso->symtab_type = DSO_BINARY_TYPE__GUEST_KMODULE; 412 413 /* _KMODULE_COMP should be next to _KMODULE */ 414 if (m->kmod && m->comp) 415 dso->symtab_type++; 416 417 dso__set_short_name(dso, strdup(m->name), true); 418 } 419 420 /* 421 * Global list of open DSOs and the counter. 422 */ 423 static LIST_HEAD(dso__data_open); 424 static long dso__data_open_cnt; 425 static pthread_mutex_t dso__data_open_lock = PTHREAD_MUTEX_INITIALIZER; 426 427 static void dso__list_add(struct dso *dso) 428 { 429 list_add_tail(&dso->data.open_entry, &dso__data_open); 430 dso__data_open_cnt++; 431 } 432 433 static void dso__list_del(struct dso *dso) 434 { 435 list_del(&dso->data.open_entry); 436 WARN_ONCE(dso__data_open_cnt <= 0, 437 "DSO data fd counter out of bounds."); 438 dso__data_open_cnt--; 439 } 440 441 static void close_first_dso(void); 442 443 static int do_open(char *name) 444 { 445 int fd; 446 char sbuf[STRERR_BUFSIZE]; 447 448 do { 449 fd = open(name, O_RDONLY|O_CLOEXEC); 450 if (fd >= 0) 451 return fd; 452 453 pr_debug("dso open failed: %s\n", 454 str_error_r(errno, sbuf, sizeof(sbuf))); 455 if (!dso__data_open_cnt || errno != EMFILE) 456 break; 457 458 close_first_dso(); 459 } while (1); 460 461 return -1; 462 } 463 464 static int __open_dso(struct dso *dso, struct machine *machine) 465 { 466 int fd = -EINVAL; 467 char *root_dir = (char *)""; 468 char *name = malloc(PATH_MAX); 469 470 if (!name) 471 return -ENOMEM; 472 473 if (machine) 474 root_dir = machine->root_dir; 475 476 if (dso__read_binary_type_filename(dso, dso->binary_type, 477 root_dir, name, PATH_MAX)) 478 goto out; 479 480 if (!is_regular_file(name)) 481 goto out; 482 483 if (dso__needs_decompress(dso)) { 484 char newpath[KMOD_DECOMP_LEN]; 485 size_t len = sizeof(newpath); 486 487 if (dso__decompress_kmodule_path(dso, name, newpath, len) < 0) { 488 fd = -dso->load_errno; 489 goto out; 490 } 491 492 strcpy(name, newpath); 493 } 494 495 fd = do_open(name); 496 497 if (dso__needs_decompress(dso)) 498 unlink(name); 499 500 out: 501 free(name); 502 return fd; 503 } 504 505 static void check_data_close(void); 506 507 /** 508 * dso_close - Open DSO data file 509 * @dso: dso object 510 * 511 * Open @dso's data file descriptor and updates 512 * list/count of open DSO objects. 513 */ 514 static int open_dso(struct dso *dso, struct machine *machine) 515 { 516 int fd; 517 struct nscookie nsc; 518 519 if (dso->binary_type != DSO_BINARY_TYPE__BUILD_ID_CACHE) 520 nsinfo__mountns_enter(dso->nsinfo, &nsc); 521 fd = __open_dso(dso, machine); 522 if (dso->binary_type != DSO_BINARY_TYPE__BUILD_ID_CACHE) 523 nsinfo__mountns_exit(&nsc); 524 525 if (fd >= 0) { 526 dso__list_add(dso); 527 /* 528 * Check if we crossed the allowed number 529 * of opened DSOs and close one if needed. 530 */ 531 check_data_close(); 532 } 533 534 return fd; 535 } 536 537 static void close_data_fd(struct dso *dso) 538 { 539 if (dso->data.fd >= 0) { 540 close(dso->data.fd); 541 dso->data.fd = -1; 542 dso->data.file_size = 0; 543 dso__list_del(dso); 544 } 545 } 546 547 /** 548 * dso_close - Close DSO data file 549 * @dso: dso object 550 * 551 * Close @dso's data file descriptor and updates 552 * list/count of open DSO objects. 553 */ 554 static void close_dso(struct dso *dso) 555 { 556 close_data_fd(dso); 557 } 558 559 static void close_first_dso(void) 560 { 561 struct dso *dso; 562 563 dso = list_first_entry(&dso__data_open, struct dso, data.open_entry); 564 close_dso(dso); 565 } 566 567 static rlim_t get_fd_limit(void) 568 { 569 struct rlimit l; 570 rlim_t limit = 0; 571 572 /* Allow half of the current open fd limit. */ 573 if (getrlimit(RLIMIT_NOFILE, &l) == 0) { 574 if (l.rlim_cur == RLIM_INFINITY) 575 limit = l.rlim_cur; 576 else 577 limit = l.rlim_cur / 2; 578 } else { 579 pr_err("failed to get fd limit\n"); 580 limit = 1; 581 } 582 583 return limit; 584 } 585 586 static rlim_t fd_limit; 587 588 /* 589 * Used only by tests/dso-data.c to reset the environment 590 * for tests. I dont expect we should change this during 591 * standard runtime. 592 */ 593 void reset_fd_limit(void) 594 { 595 fd_limit = 0; 596 } 597 598 static bool may_cache_fd(void) 599 { 600 if (!fd_limit) 601 fd_limit = get_fd_limit(); 602 603 if (fd_limit == RLIM_INFINITY) 604 return true; 605 606 return fd_limit > (rlim_t) dso__data_open_cnt; 607 } 608 609 /* 610 * Check and close LRU dso if we crossed allowed limit 611 * for opened dso file descriptors. The limit is half 612 * of the RLIMIT_NOFILE files opened. 613 */ 614 static void check_data_close(void) 615 { 616 bool cache_fd = may_cache_fd(); 617 618 if (!cache_fd) 619 close_first_dso(); 620 } 621 622 /** 623 * dso__data_close - Close DSO data file 624 * @dso: dso object 625 * 626 * External interface to close @dso's data file descriptor. 627 */ 628 void dso__data_close(struct dso *dso) 629 { 630 pthread_mutex_lock(&dso__data_open_lock); 631 close_dso(dso); 632 pthread_mutex_unlock(&dso__data_open_lock); 633 } 634 635 static void try_to_open_dso(struct dso *dso, struct machine *machine) 636 { 637 enum dso_binary_type binary_type_data[] = { 638 DSO_BINARY_TYPE__BUILD_ID_CACHE, 639 DSO_BINARY_TYPE__SYSTEM_PATH_DSO, 640 DSO_BINARY_TYPE__NOT_FOUND, 641 }; 642 int i = 0; 643 644 if (dso->data.fd >= 0) 645 return; 646 647 if (dso->binary_type != DSO_BINARY_TYPE__NOT_FOUND) { 648 dso->data.fd = open_dso(dso, machine); 649 goto out; 650 } 651 652 do { 653 dso->binary_type = binary_type_data[i++]; 654 655 dso->data.fd = open_dso(dso, machine); 656 if (dso->data.fd >= 0) 657 goto out; 658 659 } while (dso->binary_type != DSO_BINARY_TYPE__NOT_FOUND); 660 out: 661 if (dso->data.fd >= 0) 662 dso->data.status = DSO_DATA_STATUS_OK; 663 else 664 dso->data.status = DSO_DATA_STATUS_ERROR; 665 } 666 667 /** 668 * dso__data_get_fd - Get dso's data file descriptor 669 * @dso: dso object 670 * @machine: machine object 671 * 672 * External interface to find dso's file, open it and 673 * returns file descriptor. It should be paired with 674 * dso__data_put_fd() if it returns non-negative value. 675 */ 676 int dso__data_get_fd(struct dso *dso, struct machine *machine) 677 { 678 if (dso->data.status == DSO_DATA_STATUS_ERROR) 679 return -1; 680 681 if (pthread_mutex_lock(&dso__data_open_lock) < 0) 682 return -1; 683 684 try_to_open_dso(dso, machine); 685 686 if (dso->data.fd < 0) 687 pthread_mutex_unlock(&dso__data_open_lock); 688 689 return dso->data.fd; 690 } 691 692 void dso__data_put_fd(struct dso *dso __maybe_unused) 693 { 694 pthread_mutex_unlock(&dso__data_open_lock); 695 } 696 697 bool dso__data_status_seen(struct dso *dso, enum dso_data_status_seen by) 698 { 699 u32 flag = 1 << by; 700 701 if (dso->data.status_seen & flag) 702 return true; 703 704 dso->data.status_seen |= flag; 705 706 return false; 707 } 708 709 static void 710 dso_cache__free(struct dso *dso) 711 { 712 struct rb_root *root = &dso->data.cache; 713 struct rb_node *next = rb_first(root); 714 715 pthread_mutex_lock(&dso->lock); 716 while (next) { 717 struct dso_cache *cache; 718 719 cache = rb_entry(next, struct dso_cache, rb_node); 720 next = rb_next(&cache->rb_node); 721 rb_erase(&cache->rb_node, root); 722 free(cache); 723 } 724 pthread_mutex_unlock(&dso->lock); 725 } 726 727 static struct dso_cache *dso_cache__find(struct dso *dso, u64 offset) 728 { 729 const struct rb_root *root = &dso->data.cache; 730 struct rb_node * const *p = &root->rb_node; 731 const struct rb_node *parent = NULL; 732 struct dso_cache *cache; 733 734 while (*p != NULL) { 735 u64 end; 736 737 parent = *p; 738 cache = rb_entry(parent, struct dso_cache, rb_node); 739 end = cache->offset + DSO__DATA_CACHE_SIZE; 740 741 if (offset < cache->offset) 742 p = &(*p)->rb_left; 743 else if (offset >= end) 744 p = &(*p)->rb_right; 745 else 746 return cache; 747 } 748 749 return NULL; 750 } 751 752 static struct dso_cache * 753 dso_cache__insert(struct dso *dso, struct dso_cache *new) 754 { 755 struct rb_root *root = &dso->data.cache; 756 struct rb_node **p = &root->rb_node; 757 struct rb_node *parent = NULL; 758 struct dso_cache *cache; 759 u64 offset = new->offset; 760 761 pthread_mutex_lock(&dso->lock); 762 while (*p != NULL) { 763 u64 end; 764 765 parent = *p; 766 cache = rb_entry(parent, struct dso_cache, rb_node); 767 end = cache->offset + DSO__DATA_CACHE_SIZE; 768 769 if (offset < cache->offset) 770 p = &(*p)->rb_left; 771 else if (offset >= end) 772 p = &(*p)->rb_right; 773 else 774 goto out; 775 } 776 777 rb_link_node(&new->rb_node, parent, p); 778 rb_insert_color(&new->rb_node, root); 779 780 cache = NULL; 781 out: 782 pthread_mutex_unlock(&dso->lock); 783 return cache; 784 } 785 786 static ssize_t 787 dso_cache__memcpy(struct dso_cache *cache, u64 offset, 788 u8 *data, u64 size) 789 { 790 u64 cache_offset = offset - cache->offset; 791 u64 cache_size = min(cache->size - cache_offset, size); 792 793 memcpy(data, cache->data + cache_offset, cache_size); 794 return cache_size; 795 } 796 797 static ssize_t 798 dso_cache__read(struct dso *dso, struct machine *machine, 799 u64 offset, u8 *data, ssize_t size) 800 { 801 struct dso_cache *cache; 802 struct dso_cache *old; 803 ssize_t ret; 804 805 do { 806 u64 cache_offset; 807 808 cache = zalloc(sizeof(*cache) + DSO__DATA_CACHE_SIZE); 809 if (!cache) 810 return -ENOMEM; 811 812 pthread_mutex_lock(&dso__data_open_lock); 813 814 /* 815 * dso->data.fd might be closed if other thread opened another 816 * file (dso) due to open file limit (RLIMIT_NOFILE). 817 */ 818 try_to_open_dso(dso, machine); 819 820 if (dso->data.fd < 0) { 821 ret = -errno; 822 dso->data.status = DSO_DATA_STATUS_ERROR; 823 break; 824 } 825 826 cache_offset = offset & DSO__DATA_CACHE_MASK; 827 828 ret = pread(dso->data.fd, cache->data, DSO__DATA_CACHE_SIZE, cache_offset); 829 if (ret <= 0) 830 break; 831 832 cache->offset = cache_offset; 833 cache->size = ret; 834 } while (0); 835 836 pthread_mutex_unlock(&dso__data_open_lock); 837 838 if (ret > 0) { 839 old = dso_cache__insert(dso, cache); 840 if (old) { 841 /* we lose the race */ 842 free(cache); 843 cache = old; 844 } 845 846 ret = dso_cache__memcpy(cache, offset, data, size); 847 } 848 849 if (ret <= 0) 850 free(cache); 851 852 return ret; 853 } 854 855 static ssize_t dso_cache_read(struct dso *dso, struct machine *machine, 856 u64 offset, u8 *data, ssize_t size) 857 { 858 struct dso_cache *cache; 859 860 cache = dso_cache__find(dso, offset); 861 if (cache) 862 return dso_cache__memcpy(cache, offset, data, size); 863 else 864 return dso_cache__read(dso, machine, offset, data, size); 865 } 866 867 /* 868 * Reads and caches dso data DSO__DATA_CACHE_SIZE size chunks 869 * in the rb_tree. Any read to already cached data is served 870 * by cached data. 871 */ 872 static ssize_t cached_read(struct dso *dso, struct machine *machine, 873 u64 offset, u8 *data, ssize_t size) 874 { 875 ssize_t r = 0; 876 u8 *p = data; 877 878 do { 879 ssize_t ret; 880 881 ret = dso_cache_read(dso, machine, offset, p, size); 882 if (ret < 0) 883 return ret; 884 885 /* Reached EOF, return what we have. */ 886 if (!ret) 887 break; 888 889 BUG_ON(ret > size); 890 891 r += ret; 892 p += ret; 893 offset += ret; 894 size -= ret; 895 896 } while (size); 897 898 return r; 899 } 900 901 static int data_file_size(struct dso *dso, struct machine *machine) 902 { 903 int ret = 0; 904 struct stat st; 905 char sbuf[STRERR_BUFSIZE]; 906 907 if (dso->data.file_size) 908 return 0; 909 910 if (dso->data.status == DSO_DATA_STATUS_ERROR) 911 return -1; 912 913 pthread_mutex_lock(&dso__data_open_lock); 914 915 /* 916 * dso->data.fd might be closed if other thread opened another 917 * file (dso) due to open file limit (RLIMIT_NOFILE). 918 */ 919 try_to_open_dso(dso, machine); 920 921 if (dso->data.fd < 0) { 922 ret = -errno; 923 dso->data.status = DSO_DATA_STATUS_ERROR; 924 goto out; 925 } 926 927 if (fstat(dso->data.fd, &st) < 0) { 928 ret = -errno; 929 pr_err("dso cache fstat failed: %s\n", 930 str_error_r(errno, sbuf, sizeof(sbuf))); 931 dso->data.status = DSO_DATA_STATUS_ERROR; 932 goto out; 933 } 934 dso->data.file_size = st.st_size; 935 936 out: 937 pthread_mutex_unlock(&dso__data_open_lock); 938 return ret; 939 } 940 941 /** 942 * dso__data_size - Return dso data size 943 * @dso: dso object 944 * @machine: machine object 945 * 946 * Return: dso data size 947 */ 948 off_t dso__data_size(struct dso *dso, struct machine *machine) 949 { 950 if (data_file_size(dso, machine)) 951 return -1; 952 953 /* For now just estimate dso data size is close to file size */ 954 return dso->data.file_size; 955 } 956 957 static ssize_t data_read_offset(struct dso *dso, struct machine *machine, 958 u64 offset, u8 *data, ssize_t size) 959 { 960 if (data_file_size(dso, machine)) 961 return -1; 962 963 /* Check the offset sanity. */ 964 if (offset > dso->data.file_size) 965 return -1; 966 967 if (offset + size < offset) 968 return -1; 969 970 return cached_read(dso, machine, offset, data, size); 971 } 972 973 /** 974 * dso__data_read_offset - Read data from dso file offset 975 * @dso: dso object 976 * @machine: machine object 977 * @offset: file offset 978 * @data: buffer to store data 979 * @size: size of the @data buffer 980 * 981 * External interface to read data from dso file offset. Open 982 * dso data file and use cached_read to get the data. 983 */ 984 ssize_t dso__data_read_offset(struct dso *dso, struct machine *machine, 985 u64 offset, u8 *data, ssize_t size) 986 { 987 if (dso->data.status == DSO_DATA_STATUS_ERROR) 988 return -1; 989 990 return data_read_offset(dso, machine, offset, data, size); 991 } 992 993 /** 994 * dso__data_read_addr - Read data from dso address 995 * @dso: dso object 996 * @machine: machine object 997 * @add: virtual memory address 998 * @data: buffer to store data 999 * @size: size of the @data buffer 1000 * 1001 * External interface to read data from dso address. 1002 */ 1003 ssize_t dso__data_read_addr(struct dso *dso, struct map *map, 1004 struct machine *machine, u64 addr, 1005 u8 *data, ssize_t size) 1006 { 1007 u64 offset = map->map_ip(map, addr); 1008 return dso__data_read_offset(dso, machine, offset, data, size); 1009 } 1010 1011 struct map *dso__new_map(const char *name) 1012 { 1013 struct map *map = NULL; 1014 struct dso *dso = dso__new(name); 1015 1016 if (dso) 1017 map = map__new2(0, dso, MAP__FUNCTION); 1018 1019 return map; 1020 } 1021 1022 struct dso *machine__findnew_kernel(struct machine *machine, const char *name, 1023 const char *short_name, int dso_type) 1024 { 1025 /* 1026 * The kernel dso could be created by build_id processing. 1027 */ 1028 struct dso *dso = machine__findnew_dso(machine, name); 1029 1030 /* 1031 * We need to run this in all cases, since during the build_id 1032 * processing we had no idea this was the kernel dso. 1033 */ 1034 if (dso != NULL) { 1035 dso__set_short_name(dso, short_name, false); 1036 dso->kernel = dso_type; 1037 } 1038 1039 return dso; 1040 } 1041 1042 /* 1043 * Find a matching entry and/or link current entry to RB tree. 1044 * Either one of the dso or name parameter must be non-NULL or the 1045 * function will not work. 1046 */ 1047 static struct dso *__dso__findlink_by_longname(struct rb_root *root, 1048 struct dso *dso, const char *name) 1049 { 1050 struct rb_node **p = &root->rb_node; 1051 struct rb_node *parent = NULL; 1052 1053 if (!name) 1054 name = dso->long_name; 1055 /* 1056 * Find node with the matching name 1057 */ 1058 while (*p) { 1059 struct dso *this = rb_entry(*p, struct dso, rb_node); 1060 int rc = strcmp(name, this->long_name); 1061 1062 parent = *p; 1063 if (rc == 0) { 1064 /* 1065 * In case the new DSO is a duplicate of an existing 1066 * one, print a one-time warning & put the new entry 1067 * at the end of the list of duplicates. 1068 */ 1069 if (!dso || (dso == this)) 1070 return this; /* Find matching dso */ 1071 /* 1072 * The core kernel DSOs may have duplicated long name. 1073 * In this case, the short name should be different. 1074 * Comparing the short names to differentiate the DSOs. 1075 */ 1076 rc = strcmp(dso->short_name, this->short_name); 1077 if (rc == 0) { 1078 pr_err("Duplicated dso name: %s\n", name); 1079 return NULL; 1080 } 1081 } 1082 if (rc < 0) 1083 p = &parent->rb_left; 1084 else 1085 p = &parent->rb_right; 1086 } 1087 if (dso) { 1088 /* Add new node and rebalance tree */ 1089 rb_link_node(&dso->rb_node, parent, p); 1090 rb_insert_color(&dso->rb_node, root); 1091 dso->root = root; 1092 } 1093 return NULL; 1094 } 1095 1096 static inline struct dso *__dso__find_by_longname(struct rb_root *root, 1097 const char *name) 1098 { 1099 return __dso__findlink_by_longname(root, NULL, name); 1100 } 1101 1102 void dso__set_long_name(struct dso *dso, const char *name, bool name_allocated) 1103 { 1104 struct rb_root *root = dso->root; 1105 1106 if (name == NULL) 1107 return; 1108 1109 if (dso->long_name_allocated) 1110 free((char *)dso->long_name); 1111 1112 if (root) { 1113 rb_erase(&dso->rb_node, root); 1114 /* 1115 * __dso__findlink_by_longname() isn't guaranteed to add it 1116 * back, so a clean removal is required here. 1117 */ 1118 RB_CLEAR_NODE(&dso->rb_node); 1119 dso->root = NULL; 1120 } 1121 1122 dso->long_name = name; 1123 dso->long_name_len = strlen(name); 1124 dso->long_name_allocated = name_allocated; 1125 1126 if (root) 1127 __dso__findlink_by_longname(root, dso, NULL); 1128 } 1129 1130 void dso__set_short_name(struct dso *dso, const char *name, bool name_allocated) 1131 { 1132 if (name == NULL) 1133 return; 1134 1135 if (dso->short_name_allocated) 1136 free((char *)dso->short_name); 1137 1138 dso->short_name = name; 1139 dso->short_name_len = strlen(name); 1140 dso->short_name_allocated = name_allocated; 1141 } 1142 1143 static void dso__set_basename(struct dso *dso) 1144 { 1145 /* 1146 * basename() may modify path buffer, so we must pass 1147 * a copy. 1148 */ 1149 char *base, *lname = strdup(dso->long_name); 1150 1151 if (!lname) 1152 return; 1153 1154 /* 1155 * basename() may return a pointer to internal 1156 * storage which is reused in subsequent calls 1157 * so copy the result. 1158 */ 1159 base = strdup(basename(lname)); 1160 1161 free(lname); 1162 1163 if (!base) 1164 return; 1165 1166 dso__set_short_name(dso, base, true); 1167 } 1168 1169 int dso__name_len(const struct dso *dso) 1170 { 1171 if (!dso) 1172 return strlen("[unknown]"); 1173 if (verbose > 0) 1174 return dso->long_name_len; 1175 1176 return dso->short_name_len; 1177 } 1178 1179 bool dso__loaded(const struct dso *dso, enum map_type type) 1180 { 1181 return dso->loaded & (1 << type); 1182 } 1183 1184 bool dso__sorted_by_name(const struct dso *dso, enum map_type type) 1185 { 1186 return dso->sorted_by_name & (1 << type); 1187 } 1188 1189 void dso__set_sorted_by_name(struct dso *dso, enum map_type type) 1190 { 1191 dso->sorted_by_name |= (1 << type); 1192 } 1193 1194 struct dso *dso__new(const char *name) 1195 { 1196 struct dso *dso = calloc(1, sizeof(*dso) + strlen(name) + 1); 1197 1198 if (dso != NULL) { 1199 int i; 1200 strcpy(dso->name, name); 1201 dso__set_long_name(dso, dso->name, false); 1202 dso__set_short_name(dso, dso->name, false); 1203 for (i = 0; i < MAP__NR_TYPES; ++i) 1204 dso->symbols[i] = dso->symbol_names[i] = RB_ROOT; 1205 dso->data.cache = RB_ROOT; 1206 dso->inlined_nodes = RB_ROOT; 1207 dso->srclines = RB_ROOT; 1208 dso->data.fd = -1; 1209 dso->data.status = DSO_DATA_STATUS_UNKNOWN; 1210 dso->symtab_type = DSO_BINARY_TYPE__NOT_FOUND; 1211 dso->binary_type = DSO_BINARY_TYPE__NOT_FOUND; 1212 dso->is_64_bit = (sizeof(void *) == 8); 1213 dso->loaded = 0; 1214 dso->rel = 0; 1215 dso->sorted_by_name = 0; 1216 dso->has_build_id = 0; 1217 dso->has_srcline = 1; 1218 dso->a2l_fails = 1; 1219 dso->kernel = DSO_TYPE_USER; 1220 dso->needs_swap = DSO_SWAP__UNSET; 1221 RB_CLEAR_NODE(&dso->rb_node); 1222 dso->root = NULL; 1223 INIT_LIST_HEAD(&dso->node); 1224 INIT_LIST_HEAD(&dso->data.open_entry); 1225 pthread_mutex_init(&dso->lock, NULL); 1226 refcount_set(&dso->refcnt, 1); 1227 } 1228 1229 return dso; 1230 } 1231 1232 void dso__delete(struct dso *dso) 1233 { 1234 int i; 1235 1236 if (!RB_EMPTY_NODE(&dso->rb_node)) 1237 pr_err("DSO %s is still in rbtree when being deleted!\n", 1238 dso->long_name); 1239 1240 /* free inlines first, as they reference symbols */ 1241 inlines__tree_delete(&dso->inlined_nodes); 1242 srcline__tree_delete(&dso->srclines); 1243 for (i = 0; i < MAP__NR_TYPES; ++i) 1244 symbols__delete(&dso->symbols[i]); 1245 1246 if (dso->short_name_allocated) { 1247 zfree((char **)&dso->short_name); 1248 dso->short_name_allocated = false; 1249 } 1250 1251 if (dso->long_name_allocated) { 1252 zfree((char **)&dso->long_name); 1253 dso->long_name_allocated = false; 1254 } 1255 1256 dso__data_close(dso); 1257 auxtrace_cache__free(dso->auxtrace_cache); 1258 dso_cache__free(dso); 1259 dso__free_a2l(dso); 1260 zfree(&dso->symsrc_filename); 1261 nsinfo__zput(dso->nsinfo); 1262 pthread_mutex_destroy(&dso->lock); 1263 free(dso); 1264 } 1265 1266 struct dso *dso__get(struct dso *dso) 1267 { 1268 if (dso) 1269 refcount_inc(&dso->refcnt); 1270 return dso; 1271 } 1272 1273 void dso__put(struct dso *dso) 1274 { 1275 if (dso && refcount_dec_and_test(&dso->refcnt)) 1276 dso__delete(dso); 1277 } 1278 1279 void dso__set_build_id(struct dso *dso, void *build_id) 1280 { 1281 memcpy(dso->build_id, build_id, sizeof(dso->build_id)); 1282 dso->has_build_id = 1; 1283 } 1284 1285 bool dso__build_id_equal(const struct dso *dso, u8 *build_id) 1286 { 1287 return memcmp(dso->build_id, build_id, sizeof(dso->build_id)) == 0; 1288 } 1289 1290 void dso__read_running_kernel_build_id(struct dso *dso, struct machine *machine) 1291 { 1292 char path[PATH_MAX]; 1293 1294 if (machine__is_default_guest(machine)) 1295 return; 1296 sprintf(path, "%s/sys/kernel/notes", machine->root_dir); 1297 if (sysfs__read_build_id(path, dso->build_id, 1298 sizeof(dso->build_id)) == 0) 1299 dso->has_build_id = true; 1300 } 1301 1302 int dso__kernel_module_get_build_id(struct dso *dso, 1303 const char *root_dir) 1304 { 1305 char filename[PATH_MAX]; 1306 /* 1307 * kernel module short names are of the form "[module]" and 1308 * we need just "module" here. 1309 */ 1310 const char *name = dso->short_name + 1; 1311 1312 snprintf(filename, sizeof(filename), 1313 "%s/sys/module/%.*s/notes/.note.gnu.build-id", 1314 root_dir, (int)strlen(name) - 1, name); 1315 1316 if (sysfs__read_build_id(filename, dso->build_id, 1317 sizeof(dso->build_id)) == 0) 1318 dso->has_build_id = true; 1319 1320 return 0; 1321 } 1322 1323 bool __dsos__read_build_ids(struct list_head *head, bool with_hits) 1324 { 1325 bool have_build_id = false; 1326 struct dso *pos; 1327 struct nscookie nsc; 1328 1329 list_for_each_entry(pos, head, node) { 1330 if (with_hits && !pos->hit && !dso__is_vdso(pos)) 1331 continue; 1332 if (pos->has_build_id) { 1333 have_build_id = true; 1334 continue; 1335 } 1336 nsinfo__mountns_enter(pos->nsinfo, &nsc); 1337 if (filename__read_build_id(pos->long_name, pos->build_id, 1338 sizeof(pos->build_id)) > 0) { 1339 have_build_id = true; 1340 pos->has_build_id = true; 1341 } 1342 nsinfo__mountns_exit(&nsc); 1343 } 1344 1345 return have_build_id; 1346 } 1347 1348 void __dsos__add(struct dsos *dsos, struct dso *dso) 1349 { 1350 list_add_tail(&dso->node, &dsos->head); 1351 __dso__findlink_by_longname(&dsos->root, dso, NULL); 1352 /* 1353 * It is now in the linked list, grab a reference, then garbage collect 1354 * this when needing memory, by looking at LRU dso instances in the 1355 * list with atomic_read(&dso->refcnt) == 1, i.e. no references 1356 * anywhere besides the one for the list, do, under a lock for the 1357 * list: remove it from the list, then a dso__put(), that probably will 1358 * be the last and will then call dso__delete(), end of life. 1359 * 1360 * That, or at the end of the 'struct machine' lifetime, when all 1361 * 'struct dso' instances will be removed from the list, in 1362 * dsos__exit(), if they have no other reference from some other data 1363 * structure. 1364 * 1365 * E.g.: after processing a 'perf.data' file and storing references 1366 * to objects instantiated while processing events, we will have 1367 * references to the 'thread', 'map', 'dso' structs all from 'struct 1368 * hist_entry' instances, but we may not need anything not referenced, 1369 * so we might as well call machines__exit()/machines__delete() and 1370 * garbage collect it. 1371 */ 1372 dso__get(dso); 1373 } 1374 1375 void dsos__add(struct dsos *dsos, struct dso *dso) 1376 { 1377 down_write(&dsos->lock); 1378 __dsos__add(dsos, dso); 1379 up_write(&dsos->lock); 1380 } 1381 1382 struct dso *__dsos__find(struct dsos *dsos, const char *name, bool cmp_short) 1383 { 1384 struct dso *pos; 1385 1386 if (cmp_short) { 1387 list_for_each_entry(pos, &dsos->head, node) 1388 if (strcmp(pos->short_name, name) == 0) 1389 return pos; 1390 return NULL; 1391 } 1392 return __dso__find_by_longname(&dsos->root, name); 1393 } 1394 1395 struct dso *dsos__find(struct dsos *dsos, const char *name, bool cmp_short) 1396 { 1397 struct dso *dso; 1398 down_read(&dsos->lock); 1399 dso = __dsos__find(dsos, name, cmp_short); 1400 up_read(&dsos->lock); 1401 return dso; 1402 } 1403 1404 struct dso *__dsos__addnew(struct dsos *dsos, const char *name) 1405 { 1406 struct dso *dso = dso__new(name); 1407 1408 if (dso != NULL) { 1409 __dsos__add(dsos, dso); 1410 dso__set_basename(dso); 1411 /* Put dso here because __dsos_add already got it */ 1412 dso__put(dso); 1413 } 1414 return dso; 1415 } 1416 1417 struct dso *__dsos__findnew(struct dsos *dsos, const char *name) 1418 { 1419 struct dso *dso = __dsos__find(dsos, name, false); 1420 1421 return dso ? dso : __dsos__addnew(dsos, name); 1422 } 1423 1424 struct dso *dsos__findnew(struct dsos *dsos, const char *name) 1425 { 1426 struct dso *dso; 1427 down_write(&dsos->lock); 1428 dso = dso__get(__dsos__findnew(dsos, name)); 1429 up_write(&dsos->lock); 1430 return dso; 1431 } 1432 1433 size_t __dsos__fprintf_buildid(struct list_head *head, FILE *fp, 1434 bool (skip)(struct dso *dso, int parm), int parm) 1435 { 1436 struct dso *pos; 1437 size_t ret = 0; 1438 1439 list_for_each_entry(pos, head, node) { 1440 if (skip && skip(pos, parm)) 1441 continue; 1442 ret += dso__fprintf_buildid(pos, fp); 1443 ret += fprintf(fp, " %s\n", pos->long_name); 1444 } 1445 return ret; 1446 } 1447 1448 size_t __dsos__fprintf(struct list_head *head, FILE *fp) 1449 { 1450 struct dso *pos; 1451 size_t ret = 0; 1452 1453 list_for_each_entry(pos, head, node) { 1454 int i; 1455 for (i = 0; i < MAP__NR_TYPES; ++i) 1456 ret += dso__fprintf(pos, i, fp); 1457 } 1458 1459 return ret; 1460 } 1461 1462 size_t dso__fprintf_buildid(struct dso *dso, FILE *fp) 1463 { 1464 char sbuild_id[SBUILD_ID_SIZE]; 1465 1466 build_id__sprintf(dso->build_id, sizeof(dso->build_id), sbuild_id); 1467 return fprintf(fp, "%s", sbuild_id); 1468 } 1469 1470 size_t dso__fprintf(struct dso *dso, enum map_type type, FILE *fp) 1471 { 1472 struct rb_node *nd; 1473 size_t ret = fprintf(fp, "dso: %s (", dso->short_name); 1474 1475 if (dso->short_name != dso->long_name) 1476 ret += fprintf(fp, "%s, ", dso->long_name); 1477 ret += fprintf(fp, "%s, %sloaded, ", map_type__name[type], 1478 dso__loaded(dso, type) ? "" : "NOT "); 1479 ret += dso__fprintf_buildid(dso, fp); 1480 ret += fprintf(fp, ")\n"); 1481 for (nd = rb_first(&dso->symbols[type]); nd; nd = rb_next(nd)) { 1482 struct symbol *pos = rb_entry(nd, struct symbol, rb_node); 1483 ret += symbol__fprintf(pos, fp); 1484 } 1485 1486 return ret; 1487 } 1488 1489 enum dso_type dso__type(struct dso *dso, struct machine *machine) 1490 { 1491 int fd; 1492 enum dso_type type = DSO__TYPE_UNKNOWN; 1493 1494 fd = dso__data_get_fd(dso, machine); 1495 if (fd >= 0) { 1496 type = dso__type_fd(fd); 1497 dso__data_put_fd(dso); 1498 } 1499 1500 return type; 1501 } 1502 1503 int dso__strerror_load(struct dso *dso, char *buf, size_t buflen) 1504 { 1505 int idx, errnum = dso->load_errno; 1506 /* 1507 * This must have a same ordering as the enum dso_load_errno. 1508 */ 1509 static const char *dso_load__error_str[] = { 1510 "Internal tools/perf/ library error", 1511 "Invalid ELF file", 1512 "Can not read build id", 1513 "Mismatching build id", 1514 "Decompression failure", 1515 }; 1516 1517 BUG_ON(buflen == 0); 1518 1519 if (errnum >= 0) { 1520 const char *err = str_error_r(errnum, buf, buflen); 1521 1522 if (err != buf) 1523 scnprintf(buf, buflen, "%s", err); 1524 1525 return 0; 1526 } 1527 1528 if (errnum < __DSO_LOAD_ERRNO__START || errnum >= __DSO_LOAD_ERRNO__END) 1529 return -1; 1530 1531 idx = errnum - __DSO_LOAD_ERRNO__START; 1532 scnprintf(buf, buflen, "%s", dso_load__error_str[idx]); 1533 return 0; 1534 } 1535