1 // SPDX-License-Identifier: GPL-2.0 2 #include <asm/bug.h> 3 #include <linux/kernel.h> 4 #include <sys/time.h> 5 #include <sys/resource.h> 6 #include <sys/types.h> 7 #include <sys/stat.h> 8 #include <unistd.h> 9 #include <errno.h> 10 #include "compress.h" 11 #include "path.h" 12 #include "symbol.h" 13 #include "dso.h" 14 #include "machine.h" 15 #include "auxtrace.h" 16 #include "util.h" 17 #include "debug.h" 18 #include "string2.h" 19 #include "vdso.h" 20 21 static const char * const debuglink_paths[] = { 22 "%.0s%s", 23 "%s/%s", 24 "%s/.debug/%s", 25 "/usr/lib/debug%s/%s" 26 }; 27 28 char dso__symtab_origin(const struct dso *dso) 29 { 30 static const char origin[] = { 31 [DSO_BINARY_TYPE__KALLSYMS] = 'k', 32 [DSO_BINARY_TYPE__VMLINUX] = 'v', 33 [DSO_BINARY_TYPE__JAVA_JIT] = 'j', 34 [DSO_BINARY_TYPE__DEBUGLINK] = 'l', 35 [DSO_BINARY_TYPE__BUILD_ID_CACHE] = 'B', 36 [DSO_BINARY_TYPE__BUILD_ID_CACHE_DEBUGINFO] = 'D', 37 [DSO_BINARY_TYPE__FEDORA_DEBUGINFO] = 'f', 38 [DSO_BINARY_TYPE__UBUNTU_DEBUGINFO] = 'u', 39 [DSO_BINARY_TYPE__OPENEMBEDDED_DEBUGINFO] = 'o', 40 [DSO_BINARY_TYPE__BUILDID_DEBUGINFO] = 'b', 41 [DSO_BINARY_TYPE__SYSTEM_PATH_DSO] = 'd', 42 [DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE] = 'K', 43 [DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE_COMP] = 'm', 44 [DSO_BINARY_TYPE__GUEST_KALLSYMS] = 'g', 45 [DSO_BINARY_TYPE__GUEST_KMODULE] = 'G', 46 [DSO_BINARY_TYPE__GUEST_KMODULE_COMP] = 'M', 47 [DSO_BINARY_TYPE__GUEST_VMLINUX] = 'V', 48 }; 49 50 if (dso == NULL || dso->symtab_type == DSO_BINARY_TYPE__NOT_FOUND) 51 return '!'; 52 return origin[dso->symtab_type]; 53 } 54 55 int dso__read_binary_type_filename(const struct dso *dso, 56 enum dso_binary_type type, 57 char *root_dir, char *filename, size_t size) 58 { 59 char build_id_hex[SBUILD_ID_SIZE]; 60 int ret = 0; 61 size_t len; 62 63 switch (type) { 64 case DSO_BINARY_TYPE__DEBUGLINK: 65 { 66 const char *last_slash; 67 char dso_dir[PATH_MAX]; 68 char symfile[PATH_MAX]; 69 unsigned int i; 70 71 len = __symbol__join_symfs(filename, size, dso->long_name); 72 last_slash = filename + len; 73 while (last_slash != filename && *last_slash != '/') 74 last_slash--; 75 76 strncpy(dso_dir, filename, last_slash - filename); 77 dso_dir[last_slash-filename] = '\0'; 78 79 if (!is_regular_file(filename)) { 80 ret = -1; 81 break; 82 } 83 84 ret = filename__read_debuglink(filename, symfile, PATH_MAX); 85 if (ret) 86 break; 87 88 /* Check predefined locations where debug file might reside */ 89 ret = -1; 90 for (i = 0; i < ARRAY_SIZE(debuglink_paths); i++) { 91 snprintf(filename, size, 92 debuglink_paths[i], dso_dir, symfile); 93 if (is_regular_file(filename)) { 94 ret = 0; 95 break; 96 } 97 } 98 99 break; 100 } 101 case DSO_BINARY_TYPE__BUILD_ID_CACHE: 102 if (dso__build_id_filename(dso, filename, size, false) == NULL) 103 ret = -1; 104 break; 105 106 case DSO_BINARY_TYPE__BUILD_ID_CACHE_DEBUGINFO: 107 if (dso__build_id_filename(dso, filename, size, true) == NULL) 108 ret = -1; 109 break; 110 111 case DSO_BINARY_TYPE__FEDORA_DEBUGINFO: 112 len = __symbol__join_symfs(filename, size, "/usr/lib/debug"); 113 snprintf(filename + len, size - len, "%s.debug", dso->long_name); 114 break; 115 116 case DSO_BINARY_TYPE__UBUNTU_DEBUGINFO: 117 len = __symbol__join_symfs(filename, size, "/usr/lib/debug"); 118 snprintf(filename + len, size - len, "%s", dso->long_name); 119 break; 120 121 case DSO_BINARY_TYPE__OPENEMBEDDED_DEBUGINFO: 122 { 123 const char *last_slash; 124 size_t dir_size; 125 126 last_slash = dso->long_name + dso->long_name_len; 127 while (last_slash != dso->long_name && *last_slash != '/') 128 last_slash--; 129 130 len = __symbol__join_symfs(filename, size, ""); 131 dir_size = last_slash - dso->long_name + 2; 132 if (dir_size > (size - len)) { 133 ret = -1; 134 break; 135 } 136 len += scnprintf(filename + len, dir_size, "%s", dso->long_name); 137 len += scnprintf(filename + len , size - len, ".debug%s", 138 last_slash); 139 break; 140 } 141 142 case DSO_BINARY_TYPE__BUILDID_DEBUGINFO: 143 if (!dso->has_build_id) { 144 ret = -1; 145 break; 146 } 147 148 build_id__sprintf(dso->build_id, 149 sizeof(dso->build_id), 150 build_id_hex); 151 len = __symbol__join_symfs(filename, size, "/usr/lib/debug/.build-id/"); 152 snprintf(filename + len, size - len, "%.2s/%s.debug", 153 build_id_hex, build_id_hex + 2); 154 break; 155 156 case DSO_BINARY_TYPE__VMLINUX: 157 case DSO_BINARY_TYPE__GUEST_VMLINUX: 158 case DSO_BINARY_TYPE__SYSTEM_PATH_DSO: 159 __symbol__join_symfs(filename, size, dso->long_name); 160 break; 161 162 case DSO_BINARY_TYPE__GUEST_KMODULE: 163 case DSO_BINARY_TYPE__GUEST_KMODULE_COMP: 164 path__join3(filename, size, symbol_conf.symfs, 165 root_dir, dso->long_name); 166 break; 167 168 case DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE: 169 case DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE_COMP: 170 __symbol__join_symfs(filename, size, dso->long_name); 171 break; 172 173 case DSO_BINARY_TYPE__KCORE: 174 case DSO_BINARY_TYPE__GUEST_KCORE: 175 snprintf(filename, size, "%s", dso->long_name); 176 break; 177 178 default: 179 case DSO_BINARY_TYPE__KALLSYMS: 180 case DSO_BINARY_TYPE__GUEST_KALLSYMS: 181 case DSO_BINARY_TYPE__JAVA_JIT: 182 case DSO_BINARY_TYPE__NOT_FOUND: 183 ret = -1; 184 break; 185 } 186 187 return ret; 188 } 189 190 static const struct { 191 const char *fmt; 192 int (*decompress)(const char *input, int output); 193 } compressions[] = { 194 #ifdef HAVE_ZLIB_SUPPORT 195 { "gz", gzip_decompress_to_file }, 196 #endif 197 #ifdef HAVE_LZMA_SUPPORT 198 { "xz", lzma_decompress_to_file }, 199 #endif 200 { NULL, NULL }, 201 }; 202 203 bool is_supported_compression(const char *ext) 204 { 205 unsigned i; 206 207 for (i = 0; compressions[i].fmt; i++) { 208 if (!strcmp(ext, compressions[i].fmt)) 209 return true; 210 } 211 return false; 212 } 213 214 bool is_kernel_module(const char *pathname, int cpumode) 215 { 216 struct kmod_path m; 217 int mode = cpumode & PERF_RECORD_MISC_CPUMODE_MASK; 218 219 WARN_ONCE(mode != cpumode, 220 "Internal error: passing unmasked cpumode (%x) to is_kernel_module", 221 cpumode); 222 223 switch (mode) { 224 case PERF_RECORD_MISC_USER: 225 case PERF_RECORD_MISC_HYPERVISOR: 226 case PERF_RECORD_MISC_GUEST_USER: 227 return false; 228 /* Treat PERF_RECORD_MISC_CPUMODE_UNKNOWN as kernel */ 229 default: 230 if (kmod_path__parse(&m, pathname)) { 231 pr_err("Failed to check whether %s is a kernel module or not. Assume it is.", 232 pathname); 233 return true; 234 } 235 } 236 237 return m.kmod; 238 } 239 240 bool decompress_to_file(const char *ext, const char *filename, int output_fd) 241 { 242 unsigned i; 243 244 for (i = 0; compressions[i].fmt; i++) { 245 if (!strcmp(ext, compressions[i].fmt)) 246 return !compressions[i].decompress(filename, 247 output_fd); 248 } 249 return false; 250 } 251 252 bool dso__needs_decompress(struct dso *dso) 253 { 254 return dso->symtab_type == DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE_COMP || 255 dso->symtab_type == DSO_BINARY_TYPE__GUEST_KMODULE_COMP; 256 } 257 258 static int decompress_kmodule(struct dso *dso, const char *name, char *tmpbuf) 259 { 260 int fd = -1; 261 struct kmod_path m; 262 263 if (!dso__needs_decompress(dso)) 264 return -1; 265 266 if (kmod_path__parse_ext(&m, dso->long_name)) 267 return -1; 268 269 if (!m.comp) 270 goto out; 271 272 fd = mkstemp(tmpbuf); 273 if (fd < 0) { 274 dso->load_errno = errno; 275 goto out; 276 } 277 278 if (!decompress_to_file(m.ext, name, fd)) { 279 dso->load_errno = DSO_LOAD_ERRNO__DECOMPRESSION_FAILURE; 280 close(fd); 281 fd = -1; 282 } 283 284 out: 285 free(m.ext); 286 return fd; 287 } 288 289 int dso__decompress_kmodule_fd(struct dso *dso, const char *name) 290 { 291 char tmpbuf[] = KMOD_DECOMP_NAME; 292 int fd; 293 294 fd = decompress_kmodule(dso, name, tmpbuf); 295 unlink(tmpbuf); 296 return fd; 297 } 298 299 int dso__decompress_kmodule_path(struct dso *dso, const char *name, 300 char *pathname, size_t len) 301 { 302 char tmpbuf[] = KMOD_DECOMP_NAME; 303 int fd; 304 305 fd = decompress_kmodule(dso, name, tmpbuf); 306 if (fd < 0) { 307 unlink(tmpbuf); 308 return -1; 309 } 310 311 strncpy(pathname, tmpbuf, len); 312 close(fd); 313 return 0; 314 } 315 316 /* 317 * Parses kernel module specified in @path and updates 318 * @m argument like: 319 * 320 * @comp - true if @path contains supported compression suffix, 321 * false otherwise 322 * @kmod - true if @path contains '.ko' suffix in right position, 323 * false otherwise 324 * @name - if (@alloc_name && @kmod) is true, it contains strdup-ed base name 325 * of the kernel module without suffixes, otherwise strudup-ed 326 * base name of @path 327 * @ext - if (@alloc_ext && @comp) is true, it contains strdup-ed string 328 * the compression suffix 329 * 330 * Returns 0 if there's no strdup error, -ENOMEM otherwise. 331 */ 332 int __kmod_path__parse(struct kmod_path *m, const char *path, 333 bool alloc_name, bool alloc_ext) 334 { 335 const char *name = strrchr(path, '/'); 336 const char *ext = strrchr(path, '.'); 337 bool is_simple_name = false; 338 339 memset(m, 0x0, sizeof(*m)); 340 name = name ? name + 1 : path; 341 342 /* 343 * '.' is also a valid character for module name. For example: 344 * [aaa.bbb] is a valid module name. '[' should have higher 345 * priority than '.ko' suffix. 346 * 347 * The kernel names are from machine__mmap_name. Such 348 * name should belong to kernel itself, not kernel module. 349 */ 350 if (name[0] == '[') { 351 is_simple_name = true; 352 if ((strncmp(name, "[kernel.kallsyms]", 17) == 0) || 353 (strncmp(name, "[guest.kernel.kallsyms", 22) == 0) || 354 (strncmp(name, "[vdso]", 6) == 0) || 355 (strncmp(name, "[vsyscall]", 10) == 0)) { 356 m->kmod = false; 357 358 } else 359 m->kmod = true; 360 } 361 362 /* No extension, just return name. */ 363 if ((ext == NULL) || is_simple_name) { 364 if (alloc_name) { 365 m->name = strdup(name); 366 return m->name ? 0 : -ENOMEM; 367 } 368 return 0; 369 } 370 371 if (is_supported_compression(ext + 1)) { 372 m->comp = true; 373 ext -= 3; 374 } 375 376 /* Check .ko extension only if there's enough name left. */ 377 if (ext > name) 378 m->kmod = !strncmp(ext, ".ko", 3); 379 380 if (alloc_name) { 381 if (m->kmod) { 382 if (asprintf(&m->name, "[%.*s]", (int) (ext - name), name) == -1) 383 return -ENOMEM; 384 } else { 385 if (asprintf(&m->name, "%s", name) == -1) 386 return -ENOMEM; 387 } 388 389 strxfrchar(m->name, '-', '_'); 390 } 391 392 if (alloc_ext && m->comp) { 393 m->ext = strdup(ext + 4); 394 if (!m->ext) { 395 free((void *) m->name); 396 return -ENOMEM; 397 } 398 } 399 400 return 0; 401 } 402 403 void dso__set_module_info(struct dso *dso, struct kmod_path *m, 404 struct machine *machine) 405 { 406 if (machine__is_host(machine)) 407 dso->symtab_type = DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE; 408 else 409 dso->symtab_type = DSO_BINARY_TYPE__GUEST_KMODULE; 410 411 /* _KMODULE_COMP should be next to _KMODULE */ 412 if (m->kmod && m->comp) 413 dso->symtab_type++; 414 415 dso__set_short_name(dso, strdup(m->name), true); 416 } 417 418 /* 419 * Global list of open DSOs and the counter. 420 */ 421 static LIST_HEAD(dso__data_open); 422 static long dso__data_open_cnt; 423 static pthread_mutex_t dso__data_open_lock = PTHREAD_MUTEX_INITIALIZER; 424 425 static void dso__list_add(struct dso *dso) 426 { 427 list_add_tail(&dso->data.open_entry, &dso__data_open); 428 dso__data_open_cnt++; 429 } 430 431 static void dso__list_del(struct dso *dso) 432 { 433 list_del(&dso->data.open_entry); 434 WARN_ONCE(dso__data_open_cnt <= 0, 435 "DSO data fd counter out of bounds."); 436 dso__data_open_cnt--; 437 } 438 439 static void close_first_dso(void); 440 441 static int do_open(char *name) 442 { 443 int fd; 444 char sbuf[STRERR_BUFSIZE]; 445 446 do { 447 fd = open(name, O_RDONLY); 448 if (fd >= 0) 449 return fd; 450 451 pr_debug("dso open failed: %s\n", 452 str_error_r(errno, sbuf, sizeof(sbuf))); 453 if (!dso__data_open_cnt || errno != EMFILE) 454 break; 455 456 close_first_dso(); 457 } while (1); 458 459 return -1; 460 } 461 462 static int __open_dso(struct dso *dso, struct machine *machine) 463 { 464 int fd = -EINVAL; 465 char *root_dir = (char *)""; 466 char *name = malloc(PATH_MAX); 467 468 if (!name) 469 return -ENOMEM; 470 471 if (machine) 472 root_dir = machine->root_dir; 473 474 if (dso__read_binary_type_filename(dso, dso->binary_type, 475 root_dir, name, PATH_MAX)) 476 goto out; 477 478 if (!is_regular_file(name)) 479 goto out; 480 481 if (dso__needs_decompress(dso)) { 482 char newpath[KMOD_DECOMP_LEN]; 483 size_t len = sizeof(newpath); 484 485 if (dso__decompress_kmodule_path(dso, name, newpath, len) < 0) { 486 fd = -dso->load_errno; 487 goto out; 488 } 489 490 strcpy(name, newpath); 491 } 492 493 fd = do_open(name); 494 495 if (dso__needs_decompress(dso)) 496 unlink(name); 497 498 out: 499 free(name); 500 return fd; 501 } 502 503 static void check_data_close(void); 504 505 /** 506 * dso_close - Open DSO data file 507 * @dso: dso object 508 * 509 * Open @dso's data file descriptor and updates 510 * list/count of open DSO objects. 511 */ 512 static int open_dso(struct dso *dso, struct machine *machine) 513 { 514 int fd; 515 struct nscookie nsc; 516 517 if (dso->binary_type != DSO_BINARY_TYPE__BUILD_ID_CACHE) 518 nsinfo__mountns_enter(dso->nsinfo, &nsc); 519 fd = __open_dso(dso, machine); 520 if (dso->binary_type != DSO_BINARY_TYPE__BUILD_ID_CACHE) 521 nsinfo__mountns_exit(&nsc); 522 523 if (fd >= 0) { 524 dso__list_add(dso); 525 /* 526 * Check if we crossed the allowed number 527 * of opened DSOs and close one if needed. 528 */ 529 check_data_close(); 530 } 531 532 return fd; 533 } 534 535 static void close_data_fd(struct dso *dso) 536 { 537 if (dso->data.fd >= 0) { 538 close(dso->data.fd); 539 dso->data.fd = -1; 540 dso->data.file_size = 0; 541 dso__list_del(dso); 542 } 543 } 544 545 /** 546 * dso_close - Close DSO data file 547 * @dso: dso object 548 * 549 * Close @dso's data file descriptor and updates 550 * list/count of open DSO objects. 551 */ 552 static void close_dso(struct dso *dso) 553 { 554 close_data_fd(dso); 555 } 556 557 static void close_first_dso(void) 558 { 559 struct dso *dso; 560 561 dso = list_first_entry(&dso__data_open, struct dso, data.open_entry); 562 close_dso(dso); 563 } 564 565 static rlim_t get_fd_limit(void) 566 { 567 struct rlimit l; 568 rlim_t limit = 0; 569 570 /* Allow half of the current open fd limit. */ 571 if (getrlimit(RLIMIT_NOFILE, &l) == 0) { 572 if (l.rlim_cur == RLIM_INFINITY) 573 limit = l.rlim_cur; 574 else 575 limit = l.rlim_cur / 2; 576 } else { 577 pr_err("failed to get fd limit\n"); 578 limit = 1; 579 } 580 581 return limit; 582 } 583 584 static rlim_t fd_limit; 585 586 /* 587 * Used only by tests/dso-data.c to reset the environment 588 * for tests. I dont expect we should change this during 589 * standard runtime. 590 */ 591 void reset_fd_limit(void) 592 { 593 fd_limit = 0; 594 } 595 596 static bool may_cache_fd(void) 597 { 598 if (!fd_limit) 599 fd_limit = get_fd_limit(); 600 601 if (fd_limit == RLIM_INFINITY) 602 return true; 603 604 return fd_limit > (rlim_t) dso__data_open_cnt; 605 } 606 607 /* 608 * Check and close LRU dso if we crossed allowed limit 609 * for opened dso file descriptors. The limit is half 610 * of the RLIMIT_NOFILE files opened. 611 */ 612 static void check_data_close(void) 613 { 614 bool cache_fd = may_cache_fd(); 615 616 if (!cache_fd) 617 close_first_dso(); 618 } 619 620 /** 621 * dso__data_close - Close DSO data file 622 * @dso: dso object 623 * 624 * External interface to close @dso's data file descriptor. 625 */ 626 void dso__data_close(struct dso *dso) 627 { 628 pthread_mutex_lock(&dso__data_open_lock); 629 close_dso(dso); 630 pthread_mutex_unlock(&dso__data_open_lock); 631 } 632 633 static void try_to_open_dso(struct dso *dso, struct machine *machine) 634 { 635 enum dso_binary_type binary_type_data[] = { 636 DSO_BINARY_TYPE__BUILD_ID_CACHE, 637 DSO_BINARY_TYPE__SYSTEM_PATH_DSO, 638 DSO_BINARY_TYPE__NOT_FOUND, 639 }; 640 int i = 0; 641 642 if (dso->data.fd >= 0) 643 return; 644 645 if (dso->binary_type != DSO_BINARY_TYPE__NOT_FOUND) { 646 dso->data.fd = open_dso(dso, machine); 647 goto out; 648 } 649 650 do { 651 dso->binary_type = binary_type_data[i++]; 652 653 dso->data.fd = open_dso(dso, machine); 654 if (dso->data.fd >= 0) 655 goto out; 656 657 } while (dso->binary_type != DSO_BINARY_TYPE__NOT_FOUND); 658 out: 659 if (dso->data.fd >= 0) 660 dso->data.status = DSO_DATA_STATUS_OK; 661 else 662 dso->data.status = DSO_DATA_STATUS_ERROR; 663 } 664 665 /** 666 * dso__data_get_fd - Get dso's data file descriptor 667 * @dso: dso object 668 * @machine: machine object 669 * 670 * External interface to find dso's file, open it and 671 * returns file descriptor. It should be paired with 672 * dso__data_put_fd() if it returns non-negative value. 673 */ 674 int dso__data_get_fd(struct dso *dso, struct machine *machine) 675 { 676 if (dso->data.status == DSO_DATA_STATUS_ERROR) 677 return -1; 678 679 if (pthread_mutex_lock(&dso__data_open_lock) < 0) 680 return -1; 681 682 try_to_open_dso(dso, machine); 683 684 if (dso->data.fd < 0) 685 pthread_mutex_unlock(&dso__data_open_lock); 686 687 return dso->data.fd; 688 } 689 690 void dso__data_put_fd(struct dso *dso __maybe_unused) 691 { 692 pthread_mutex_unlock(&dso__data_open_lock); 693 } 694 695 bool dso__data_status_seen(struct dso *dso, enum dso_data_status_seen by) 696 { 697 u32 flag = 1 << by; 698 699 if (dso->data.status_seen & flag) 700 return true; 701 702 dso->data.status_seen |= flag; 703 704 return false; 705 } 706 707 static void 708 dso_cache__free(struct dso *dso) 709 { 710 struct rb_root *root = &dso->data.cache; 711 struct rb_node *next = rb_first(root); 712 713 pthread_mutex_lock(&dso->lock); 714 while (next) { 715 struct dso_cache *cache; 716 717 cache = rb_entry(next, struct dso_cache, rb_node); 718 next = rb_next(&cache->rb_node); 719 rb_erase(&cache->rb_node, root); 720 free(cache); 721 } 722 pthread_mutex_unlock(&dso->lock); 723 } 724 725 static struct dso_cache *dso_cache__find(struct dso *dso, u64 offset) 726 { 727 const struct rb_root *root = &dso->data.cache; 728 struct rb_node * const *p = &root->rb_node; 729 const struct rb_node *parent = NULL; 730 struct dso_cache *cache; 731 732 while (*p != NULL) { 733 u64 end; 734 735 parent = *p; 736 cache = rb_entry(parent, struct dso_cache, rb_node); 737 end = cache->offset + DSO__DATA_CACHE_SIZE; 738 739 if (offset < cache->offset) 740 p = &(*p)->rb_left; 741 else if (offset >= end) 742 p = &(*p)->rb_right; 743 else 744 return cache; 745 } 746 747 return NULL; 748 } 749 750 static struct dso_cache * 751 dso_cache__insert(struct dso *dso, struct dso_cache *new) 752 { 753 struct rb_root *root = &dso->data.cache; 754 struct rb_node **p = &root->rb_node; 755 struct rb_node *parent = NULL; 756 struct dso_cache *cache; 757 u64 offset = new->offset; 758 759 pthread_mutex_lock(&dso->lock); 760 while (*p != NULL) { 761 u64 end; 762 763 parent = *p; 764 cache = rb_entry(parent, struct dso_cache, rb_node); 765 end = cache->offset + DSO__DATA_CACHE_SIZE; 766 767 if (offset < cache->offset) 768 p = &(*p)->rb_left; 769 else if (offset >= end) 770 p = &(*p)->rb_right; 771 else 772 goto out; 773 } 774 775 rb_link_node(&new->rb_node, parent, p); 776 rb_insert_color(&new->rb_node, root); 777 778 cache = NULL; 779 out: 780 pthread_mutex_unlock(&dso->lock); 781 return cache; 782 } 783 784 static ssize_t 785 dso_cache__memcpy(struct dso_cache *cache, u64 offset, 786 u8 *data, u64 size) 787 { 788 u64 cache_offset = offset - cache->offset; 789 u64 cache_size = min(cache->size - cache_offset, size); 790 791 memcpy(data, cache->data + cache_offset, cache_size); 792 return cache_size; 793 } 794 795 static ssize_t 796 dso_cache__read(struct dso *dso, struct machine *machine, 797 u64 offset, u8 *data, ssize_t size) 798 { 799 struct dso_cache *cache; 800 struct dso_cache *old; 801 ssize_t ret; 802 803 do { 804 u64 cache_offset; 805 806 cache = zalloc(sizeof(*cache) + DSO__DATA_CACHE_SIZE); 807 if (!cache) 808 return -ENOMEM; 809 810 pthread_mutex_lock(&dso__data_open_lock); 811 812 /* 813 * dso->data.fd might be closed if other thread opened another 814 * file (dso) due to open file limit (RLIMIT_NOFILE). 815 */ 816 try_to_open_dso(dso, machine); 817 818 if (dso->data.fd < 0) { 819 ret = -errno; 820 dso->data.status = DSO_DATA_STATUS_ERROR; 821 break; 822 } 823 824 cache_offset = offset & DSO__DATA_CACHE_MASK; 825 826 ret = pread(dso->data.fd, cache->data, DSO__DATA_CACHE_SIZE, cache_offset); 827 if (ret <= 0) 828 break; 829 830 cache->offset = cache_offset; 831 cache->size = ret; 832 } while (0); 833 834 pthread_mutex_unlock(&dso__data_open_lock); 835 836 if (ret > 0) { 837 old = dso_cache__insert(dso, cache); 838 if (old) { 839 /* we lose the race */ 840 free(cache); 841 cache = old; 842 } 843 844 ret = dso_cache__memcpy(cache, offset, data, size); 845 } 846 847 if (ret <= 0) 848 free(cache); 849 850 return ret; 851 } 852 853 static ssize_t dso_cache_read(struct dso *dso, struct machine *machine, 854 u64 offset, u8 *data, ssize_t size) 855 { 856 struct dso_cache *cache; 857 858 cache = dso_cache__find(dso, offset); 859 if (cache) 860 return dso_cache__memcpy(cache, offset, data, size); 861 else 862 return dso_cache__read(dso, machine, offset, data, size); 863 } 864 865 /* 866 * Reads and caches dso data DSO__DATA_CACHE_SIZE size chunks 867 * in the rb_tree. Any read to already cached data is served 868 * by cached data. 869 */ 870 static ssize_t cached_read(struct dso *dso, struct machine *machine, 871 u64 offset, u8 *data, ssize_t size) 872 { 873 ssize_t r = 0; 874 u8 *p = data; 875 876 do { 877 ssize_t ret; 878 879 ret = dso_cache_read(dso, machine, offset, p, size); 880 if (ret < 0) 881 return ret; 882 883 /* Reached EOF, return what we have. */ 884 if (!ret) 885 break; 886 887 BUG_ON(ret > size); 888 889 r += ret; 890 p += ret; 891 offset += ret; 892 size -= ret; 893 894 } while (size); 895 896 return r; 897 } 898 899 static int data_file_size(struct dso *dso, struct machine *machine) 900 { 901 int ret = 0; 902 struct stat st; 903 char sbuf[STRERR_BUFSIZE]; 904 905 if (dso->data.file_size) 906 return 0; 907 908 if (dso->data.status == DSO_DATA_STATUS_ERROR) 909 return -1; 910 911 pthread_mutex_lock(&dso__data_open_lock); 912 913 /* 914 * dso->data.fd might be closed if other thread opened another 915 * file (dso) due to open file limit (RLIMIT_NOFILE). 916 */ 917 try_to_open_dso(dso, machine); 918 919 if (dso->data.fd < 0) { 920 ret = -errno; 921 dso->data.status = DSO_DATA_STATUS_ERROR; 922 goto out; 923 } 924 925 if (fstat(dso->data.fd, &st) < 0) { 926 ret = -errno; 927 pr_err("dso cache fstat failed: %s\n", 928 str_error_r(errno, sbuf, sizeof(sbuf))); 929 dso->data.status = DSO_DATA_STATUS_ERROR; 930 goto out; 931 } 932 dso->data.file_size = st.st_size; 933 934 out: 935 pthread_mutex_unlock(&dso__data_open_lock); 936 return ret; 937 } 938 939 /** 940 * dso__data_size - Return dso data size 941 * @dso: dso object 942 * @machine: machine object 943 * 944 * Return: dso data size 945 */ 946 off_t dso__data_size(struct dso *dso, struct machine *machine) 947 { 948 if (data_file_size(dso, machine)) 949 return -1; 950 951 /* For now just estimate dso data size is close to file size */ 952 return dso->data.file_size; 953 } 954 955 static ssize_t data_read_offset(struct dso *dso, struct machine *machine, 956 u64 offset, u8 *data, ssize_t size) 957 { 958 if (data_file_size(dso, machine)) 959 return -1; 960 961 /* Check the offset sanity. */ 962 if (offset > dso->data.file_size) 963 return -1; 964 965 if (offset + size < offset) 966 return -1; 967 968 return cached_read(dso, machine, offset, data, size); 969 } 970 971 /** 972 * dso__data_read_offset - Read data from dso file offset 973 * @dso: dso object 974 * @machine: machine object 975 * @offset: file offset 976 * @data: buffer to store data 977 * @size: size of the @data buffer 978 * 979 * External interface to read data from dso file offset. Open 980 * dso data file and use cached_read to get the data. 981 */ 982 ssize_t dso__data_read_offset(struct dso *dso, struct machine *machine, 983 u64 offset, u8 *data, ssize_t size) 984 { 985 if (dso->data.status == DSO_DATA_STATUS_ERROR) 986 return -1; 987 988 return data_read_offset(dso, machine, offset, data, size); 989 } 990 991 /** 992 * dso__data_read_addr - Read data from dso address 993 * @dso: dso object 994 * @machine: machine object 995 * @add: virtual memory address 996 * @data: buffer to store data 997 * @size: size of the @data buffer 998 * 999 * External interface to read data from dso address. 1000 */ 1001 ssize_t dso__data_read_addr(struct dso *dso, struct map *map, 1002 struct machine *machine, u64 addr, 1003 u8 *data, ssize_t size) 1004 { 1005 u64 offset = map->map_ip(map, addr); 1006 return dso__data_read_offset(dso, machine, offset, data, size); 1007 } 1008 1009 struct map *dso__new_map(const char *name) 1010 { 1011 struct map *map = NULL; 1012 struct dso *dso = dso__new(name); 1013 1014 if (dso) 1015 map = map__new2(0, dso, MAP__FUNCTION); 1016 1017 return map; 1018 } 1019 1020 struct dso *machine__findnew_kernel(struct machine *machine, const char *name, 1021 const char *short_name, int dso_type) 1022 { 1023 /* 1024 * The kernel dso could be created by build_id processing. 1025 */ 1026 struct dso *dso = machine__findnew_dso(machine, name); 1027 1028 /* 1029 * We need to run this in all cases, since during the build_id 1030 * processing we had no idea this was the kernel dso. 1031 */ 1032 if (dso != NULL) { 1033 dso__set_short_name(dso, short_name, false); 1034 dso->kernel = dso_type; 1035 } 1036 1037 return dso; 1038 } 1039 1040 /* 1041 * Find a matching entry and/or link current entry to RB tree. 1042 * Either one of the dso or name parameter must be non-NULL or the 1043 * function will not work. 1044 */ 1045 static struct dso *__dso__findlink_by_longname(struct rb_root *root, 1046 struct dso *dso, const char *name) 1047 { 1048 struct rb_node **p = &root->rb_node; 1049 struct rb_node *parent = NULL; 1050 1051 if (!name) 1052 name = dso->long_name; 1053 /* 1054 * Find node with the matching name 1055 */ 1056 while (*p) { 1057 struct dso *this = rb_entry(*p, struct dso, rb_node); 1058 int rc = strcmp(name, this->long_name); 1059 1060 parent = *p; 1061 if (rc == 0) { 1062 /* 1063 * In case the new DSO is a duplicate of an existing 1064 * one, print a one-time warning & put the new entry 1065 * at the end of the list of duplicates. 1066 */ 1067 if (!dso || (dso == this)) 1068 return this; /* Find matching dso */ 1069 /* 1070 * The core kernel DSOs may have duplicated long name. 1071 * In this case, the short name should be different. 1072 * Comparing the short names to differentiate the DSOs. 1073 */ 1074 rc = strcmp(dso->short_name, this->short_name); 1075 if (rc == 0) { 1076 pr_err("Duplicated dso name: %s\n", name); 1077 return NULL; 1078 } 1079 } 1080 if (rc < 0) 1081 p = &parent->rb_left; 1082 else 1083 p = &parent->rb_right; 1084 } 1085 if (dso) { 1086 /* Add new node and rebalance tree */ 1087 rb_link_node(&dso->rb_node, parent, p); 1088 rb_insert_color(&dso->rb_node, root); 1089 dso->root = root; 1090 } 1091 return NULL; 1092 } 1093 1094 static inline struct dso *__dso__find_by_longname(struct rb_root *root, 1095 const char *name) 1096 { 1097 return __dso__findlink_by_longname(root, NULL, name); 1098 } 1099 1100 void dso__set_long_name(struct dso *dso, const char *name, bool name_allocated) 1101 { 1102 struct rb_root *root = dso->root; 1103 1104 if (name == NULL) 1105 return; 1106 1107 if (dso->long_name_allocated) 1108 free((char *)dso->long_name); 1109 1110 if (root) { 1111 rb_erase(&dso->rb_node, root); 1112 /* 1113 * __dso__findlink_by_longname() isn't guaranteed to add it 1114 * back, so a clean removal is required here. 1115 */ 1116 RB_CLEAR_NODE(&dso->rb_node); 1117 dso->root = NULL; 1118 } 1119 1120 dso->long_name = name; 1121 dso->long_name_len = strlen(name); 1122 dso->long_name_allocated = name_allocated; 1123 1124 if (root) 1125 __dso__findlink_by_longname(root, dso, NULL); 1126 } 1127 1128 void dso__set_short_name(struct dso *dso, const char *name, bool name_allocated) 1129 { 1130 if (name == NULL) 1131 return; 1132 1133 if (dso->short_name_allocated) 1134 free((char *)dso->short_name); 1135 1136 dso->short_name = name; 1137 dso->short_name_len = strlen(name); 1138 dso->short_name_allocated = name_allocated; 1139 } 1140 1141 static void dso__set_basename(struct dso *dso) 1142 { 1143 /* 1144 * basename() may modify path buffer, so we must pass 1145 * a copy. 1146 */ 1147 char *base, *lname = strdup(dso->long_name); 1148 1149 if (!lname) 1150 return; 1151 1152 /* 1153 * basename() may return a pointer to internal 1154 * storage which is reused in subsequent calls 1155 * so copy the result. 1156 */ 1157 base = strdup(basename(lname)); 1158 1159 free(lname); 1160 1161 if (!base) 1162 return; 1163 1164 dso__set_short_name(dso, base, true); 1165 } 1166 1167 int dso__name_len(const struct dso *dso) 1168 { 1169 if (!dso) 1170 return strlen("[unknown]"); 1171 if (verbose > 0) 1172 return dso->long_name_len; 1173 1174 return dso->short_name_len; 1175 } 1176 1177 bool dso__loaded(const struct dso *dso, enum map_type type) 1178 { 1179 return dso->loaded & (1 << type); 1180 } 1181 1182 bool dso__sorted_by_name(const struct dso *dso, enum map_type type) 1183 { 1184 return dso->sorted_by_name & (1 << type); 1185 } 1186 1187 void dso__set_sorted_by_name(struct dso *dso, enum map_type type) 1188 { 1189 dso->sorted_by_name |= (1 << type); 1190 } 1191 1192 struct dso *dso__new(const char *name) 1193 { 1194 struct dso *dso = calloc(1, sizeof(*dso) + strlen(name) + 1); 1195 1196 if (dso != NULL) { 1197 int i; 1198 strcpy(dso->name, name); 1199 dso__set_long_name(dso, dso->name, false); 1200 dso__set_short_name(dso, dso->name, false); 1201 for (i = 0; i < MAP__NR_TYPES; ++i) 1202 dso->symbols[i] = dso->symbol_names[i] = RB_ROOT; 1203 dso->data.cache = RB_ROOT; 1204 dso->data.fd = -1; 1205 dso->data.status = DSO_DATA_STATUS_UNKNOWN; 1206 dso->symtab_type = DSO_BINARY_TYPE__NOT_FOUND; 1207 dso->binary_type = DSO_BINARY_TYPE__NOT_FOUND; 1208 dso->is_64_bit = (sizeof(void *) == 8); 1209 dso->loaded = 0; 1210 dso->rel = 0; 1211 dso->sorted_by_name = 0; 1212 dso->has_build_id = 0; 1213 dso->has_srcline = 1; 1214 dso->a2l_fails = 1; 1215 dso->kernel = DSO_TYPE_USER; 1216 dso->needs_swap = DSO_SWAP__UNSET; 1217 RB_CLEAR_NODE(&dso->rb_node); 1218 dso->root = NULL; 1219 INIT_LIST_HEAD(&dso->node); 1220 INIT_LIST_HEAD(&dso->data.open_entry); 1221 pthread_mutex_init(&dso->lock, NULL); 1222 refcount_set(&dso->refcnt, 1); 1223 } 1224 1225 return dso; 1226 } 1227 1228 void dso__delete(struct dso *dso) 1229 { 1230 int i; 1231 1232 if (!RB_EMPTY_NODE(&dso->rb_node)) 1233 pr_err("DSO %s is still in rbtree when being deleted!\n", 1234 dso->long_name); 1235 for (i = 0; i < MAP__NR_TYPES; ++i) 1236 symbols__delete(&dso->symbols[i]); 1237 1238 if (dso->short_name_allocated) { 1239 zfree((char **)&dso->short_name); 1240 dso->short_name_allocated = false; 1241 } 1242 1243 if (dso->long_name_allocated) { 1244 zfree((char **)&dso->long_name); 1245 dso->long_name_allocated = false; 1246 } 1247 1248 dso__data_close(dso); 1249 auxtrace_cache__free(dso->auxtrace_cache); 1250 dso_cache__free(dso); 1251 dso__free_a2l(dso); 1252 zfree(&dso->symsrc_filename); 1253 nsinfo__zput(dso->nsinfo); 1254 pthread_mutex_destroy(&dso->lock); 1255 free(dso); 1256 } 1257 1258 struct dso *dso__get(struct dso *dso) 1259 { 1260 if (dso) 1261 refcount_inc(&dso->refcnt); 1262 return dso; 1263 } 1264 1265 void dso__put(struct dso *dso) 1266 { 1267 if (dso && refcount_dec_and_test(&dso->refcnt)) 1268 dso__delete(dso); 1269 } 1270 1271 void dso__set_build_id(struct dso *dso, void *build_id) 1272 { 1273 memcpy(dso->build_id, build_id, sizeof(dso->build_id)); 1274 dso->has_build_id = 1; 1275 } 1276 1277 bool dso__build_id_equal(const struct dso *dso, u8 *build_id) 1278 { 1279 return memcmp(dso->build_id, build_id, sizeof(dso->build_id)) == 0; 1280 } 1281 1282 void dso__read_running_kernel_build_id(struct dso *dso, struct machine *machine) 1283 { 1284 char path[PATH_MAX]; 1285 1286 if (machine__is_default_guest(machine)) 1287 return; 1288 sprintf(path, "%s/sys/kernel/notes", machine->root_dir); 1289 if (sysfs__read_build_id(path, dso->build_id, 1290 sizeof(dso->build_id)) == 0) 1291 dso->has_build_id = true; 1292 } 1293 1294 int dso__kernel_module_get_build_id(struct dso *dso, 1295 const char *root_dir) 1296 { 1297 char filename[PATH_MAX]; 1298 /* 1299 * kernel module short names are of the form "[module]" and 1300 * we need just "module" here. 1301 */ 1302 const char *name = dso->short_name + 1; 1303 1304 snprintf(filename, sizeof(filename), 1305 "%s/sys/module/%.*s/notes/.note.gnu.build-id", 1306 root_dir, (int)strlen(name) - 1, name); 1307 1308 if (sysfs__read_build_id(filename, dso->build_id, 1309 sizeof(dso->build_id)) == 0) 1310 dso->has_build_id = true; 1311 1312 return 0; 1313 } 1314 1315 bool __dsos__read_build_ids(struct list_head *head, bool with_hits) 1316 { 1317 bool have_build_id = false; 1318 struct dso *pos; 1319 struct nscookie nsc; 1320 1321 list_for_each_entry(pos, head, node) { 1322 if (with_hits && !pos->hit && !dso__is_vdso(pos)) 1323 continue; 1324 if (pos->has_build_id) { 1325 have_build_id = true; 1326 continue; 1327 } 1328 nsinfo__mountns_enter(pos->nsinfo, &nsc); 1329 if (filename__read_build_id(pos->long_name, pos->build_id, 1330 sizeof(pos->build_id)) > 0) { 1331 have_build_id = true; 1332 pos->has_build_id = true; 1333 } 1334 nsinfo__mountns_exit(&nsc); 1335 } 1336 1337 return have_build_id; 1338 } 1339 1340 void __dsos__add(struct dsos *dsos, struct dso *dso) 1341 { 1342 list_add_tail(&dso->node, &dsos->head); 1343 __dso__findlink_by_longname(&dsos->root, dso, NULL); 1344 /* 1345 * It is now in the linked list, grab a reference, then garbage collect 1346 * this when needing memory, by looking at LRU dso instances in the 1347 * list with atomic_read(&dso->refcnt) == 1, i.e. no references 1348 * anywhere besides the one for the list, do, under a lock for the 1349 * list: remove it from the list, then a dso__put(), that probably will 1350 * be the last and will then call dso__delete(), end of life. 1351 * 1352 * That, or at the end of the 'struct machine' lifetime, when all 1353 * 'struct dso' instances will be removed from the list, in 1354 * dsos__exit(), if they have no other reference from some other data 1355 * structure. 1356 * 1357 * E.g.: after processing a 'perf.data' file and storing references 1358 * to objects instantiated while processing events, we will have 1359 * references to the 'thread', 'map', 'dso' structs all from 'struct 1360 * hist_entry' instances, but we may not need anything not referenced, 1361 * so we might as well call machines__exit()/machines__delete() and 1362 * garbage collect it. 1363 */ 1364 dso__get(dso); 1365 } 1366 1367 void dsos__add(struct dsos *dsos, struct dso *dso) 1368 { 1369 pthread_rwlock_wrlock(&dsos->lock); 1370 __dsos__add(dsos, dso); 1371 pthread_rwlock_unlock(&dsos->lock); 1372 } 1373 1374 struct dso *__dsos__find(struct dsos *dsos, const char *name, bool cmp_short) 1375 { 1376 struct dso *pos; 1377 1378 if (cmp_short) { 1379 list_for_each_entry(pos, &dsos->head, node) 1380 if (strcmp(pos->short_name, name) == 0) 1381 return pos; 1382 return NULL; 1383 } 1384 return __dso__find_by_longname(&dsos->root, name); 1385 } 1386 1387 struct dso *dsos__find(struct dsos *dsos, const char *name, bool cmp_short) 1388 { 1389 struct dso *dso; 1390 pthread_rwlock_rdlock(&dsos->lock); 1391 dso = __dsos__find(dsos, name, cmp_short); 1392 pthread_rwlock_unlock(&dsos->lock); 1393 return dso; 1394 } 1395 1396 struct dso *__dsos__addnew(struct dsos *dsos, const char *name) 1397 { 1398 struct dso *dso = dso__new(name); 1399 1400 if (dso != NULL) { 1401 __dsos__add(dsos, dso); 1402 dso__set_basename(dso); 1403 /* Put dso here because __dsos_add already got it */ 1404 dso__put(dso); 1405 } 1406 return dso; 1407 } 1408 1409 struct dso *__dsos__findnew(struct dsos *dsos, const char *name) 1410 { 1411 struct dso *dso = __dsos__find(dsos, name, false); 1412 1413 return dso ? dso : __dsos__addnew(dsos, name); 1414 } 1415 1416 struct dso *dsos__findnew(struct dsos *dsos, const char *name) 1417 { 1418 struct dso *dso; 1419 pthread_rwlock_wrlock(&dsos->lock); 1420 dso = dso__get(__dsos__findnew(dsos, name)); 1421 pthread_rwlock_unlock(&dsos->lock); 1422 return dso; 1423 } 1424 1425 size_t __dsos__fprintf_buildid(struct list_head *head, FILE *fp, 1426 bool (skip)(struct dso *dso, int parm), int parm) 1427 { 1428 struct dso *pos; 1429 size_t ret = 0; 1430 1431 list_for_each_entry(pos, head, node) { 1432 if (skip && skip(pos, parm)) 1433 continue; 1434 ret += dso__fprintf_buildid(pos, fp); 1435 ret += fprintf(fp, " %s\n", pos->long_name); 1436 } 1437 return ret; 1438 } 1439 1440 size_t __dsos__fprintf(struct list_head *head, FILE *fp) 1441 { 1442 struct dso *pos; 1443 size_t ret = 0; 1444 1445 list_for_each_entry(pos, head, node) { 1446 int i; 1447 for (i = 0; i < MAP__NR_TYPES; ++i) 1448 ret += dso__fprintf(pos, i, fp); 1449 } 1450 1451 return ret; 1452 } 1453 1454 size_t dso__fprintf_buildid(struct dso *dso, FILE *fp) 1455 { 1456 char sbuild_id[SBUILD_ID_SIZE]; 1457 1458 build_id__sprintf(dso->build_id, sizeof(dso->build_id), sbuild_id); 1459 return fprintf(fp, "%s", sbuild_id); 1460 } 1461 1462 size_t dso__fprintf(struct dso *dso, enum map_type type, FILE *fp) 1463 { 1464 struct rb_node *nd; 1465 size_t ret = fprintf(fp, "dso: %s (", dso->short_name); 1466 1467 if (dso->short_name != dso->long_name) 1468 ret += fprintf(fp, "%s, ", dso->long_name); 1469 ret += fprintf(fp, "%s, %sloaded, ", map_type__name[type], 1470 dso__loaded(dso, type) ? "" : "NOT "); 1471 ret += dso__fprintf_buildid(dso, fp); 1472 ret += fprintf(fp, ")\n"); 1473 for (nd = rb_first(&dso->symbols[type]); nd; nd = rb_next(nd)) { 1474 struct symbol *pos = rb_entry(nd, struct symbol, rb_node); 1475 ret += symbol__fprintf(pos, fp); 1476 } 1477 1478 return ret; 1479 } 1480 1481 enum dso_type dso__type(struct dso *dso, struct machine *machine) 1482 { 1483 int fd; 1484 enum dso_type type = DSO__TYPE_UNKNOWN; 1485 1486 fd = dso__data_get_fd(dso, machine); 1487 if (fd >= 0) { 1488 type = dso__type_fd(fd); 1489 dso__data_put_fd(dso); 1490 } 1491 1492 return type; 1493 } 1494 1495 int dso__strerror_load(struct dso *dso, char *buf, size_t buflen) 1496 { 1497 int idx, errnum = dso->load_errno; 1498 /* 1499 * This must have a same ordering as the enum dso_load_errno. 1500 */ 1501 static const char *dso_load__error_str[] = { 1502 "Internal tools/perf/ library error", 1503 "Invalid ELF file", 1504 "Can not read build id", 1505 "Mismatching build id", 1506 "Decompression failure", 1507 }; 1508 1509 BUG_ON(buflen == 0); 1510 1511 if (errnum >= 0) { 1512 const char *err = str_error_r(errnum, buf, buflen); 1513 1514 if (err != buf) 1515 scnprintf(buf, buflen, "%s", err); 1516 1517 return 0; 1518 } 1519 1520 if (errnum < __DSO_LOAD_ERRNO__START || errnum >= __DSO_LOAD_ERRNO__END) 1521 return -1; 1522 1523 idx = errnum - __DSO_LOAD_ERRNO__START; 1524 scnprintf(buf, buflen, "%s", dso_load__error_str[idx]); 1525 return 0; 1526 } 1527