1 // SPDX-License-Identifier: GPL-2.0 2 #include <asm/bug.h> 3 #include <linux/kernel.h> 4 #include <linux/string.h> 5 #include <linux/zalloc.h> 6 #include <sys/time.h> 7 #include <sys/resource.h> 8 #include <sys/types.h> 9 #include <sys/stat.h> 10 #include <unistd.h> 11 #include <errno.h> 12 #include <fcntl.h> 13 #include <stdlib.h> 14 #ifdef HAVE_LIBBPF_SUPPORT 15 #include <bpf/libbpf.h> 16 #include "bpf-event.h" 17 #include "bpf-utils.h" 18 #endif 19 #include "compress.h" 20 #include "env.h" 21 #include "namespaces.h" 22 #include "path.h" 23 #include "map.h" 24 #include "symbol.h" 25 #include "srcline.h" 26 #include "dso.h" 27 #include "dsos.h" 28 #include "machine.h" 29 #include "auxtrace.h" 30 #include "util.h" /* O_CLOEXEC for older systems */ 31 #include "debug.h" 32 #include "string2.h" 33 #include "vdso.h" 34 35 static const char * const debuglink_paths[] = { 36 "%.0s%s", 37 "%s/%s", 38 "%s/.debug/%s", 39 "/usr/lib/debug%s/%s" 40 }; 41 42 char dso__symtab_origin(const struct dso *dso) 43 { 44 static const char origin[] = { 45 [DSO_BINARY_TYPE__KALLSYMS] = 'k', 46 [DSO_BINARY_TYPE__VMLINUX] = 'v', 47 [DSO_BINARY_TYPE__JAVA_JIT] = 'j', 48 [DSO_BINARY_TYPE__DEBUGLINK] = 'l', 49 [DSO_BINARY_TYPE__BUILD_ID_CACHE] = 'B', 50 [DSO_BINARY_TYPE__BUILD_ID_CACHE_DEBUGINFO] = 'D', 51 [DSO_BINARY_TYPE__FEDORA_DEBUGINFO] = 'f', 52 [DSO_BINARY_TYPE__UBUNTU_DEBUGINFO] = 'u', 53 [DSO_BINARY_TYPE__MIXEDUP_UBUNTU_DEBUGINFO] = 'x', 54 [DSO_BINARY_TYPE__OPENEMBEDDED_DEBUGINFO] = 'o', 55 [DSO_BINARY_TYPE__BUILDID_DEBUGINFO] = 'b', 56 [DSO_BINARY_TYPE__SYSTEM_PATH_DSO] = 'd', 57 [DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE] = 'K', 58 [DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE_COMP] = 'm', 59 [DSO_BINARY_TYPE__GUEST_KALLSYMS] = 'g', 60 [DSO_BINARY_TYPE__GUEST_KMODULE] = 'G', 61 [DSO_BINARY_TYPE__GUEST_KMODULE_COMP] = 'M', 62 [DSO_BINARY_TYPE__GUEST_VMLINUX] = 'V', 63 }; 64 65 if (dso == NULL || dso->symtab_type == DSO_BINARY_TYPE__NOT_FOUND) 66 return '!'; 67 return origin[dso->symtab_type]; 68 } 69 70 int dso__read_binary_type_filename(const struct dso *dso, 71 enum dso_binary_type type, 72 char *root_dir, char *filename, size_t size) 73 { 74 char build_id_hex[SBUILD_ID_SIZE]; 75 int ret = 0; 76 size_t len; 77 78 switch (type) { 79 case DSO_BINARY_TYPE__DEBUGLINK: 80 { 81 const char *last_slash; 82 char dso_dir[PATH_MAX]; 83 char symfile[PATH_MAX]; 84 unsigned int i; 85 86 len = __symbol__join_symfs(filename, size, dso->long_name); 87 last_slash = filename + len; 88 while (last_slash != filename && *last_slash != '/') 89 last_slash--; 90 91 strncpy(dso_dir, filename, last_slash - filename); 92 dso_dir[last_slash-filename] = '\0'; 93 94 if (!is_regular_file(filename)) { 95 ret = -1; 96 break; 97 } 98 99 ret = filename__read_debuglink(filename, symfile, PATH_MAX); 100 if (ret) 101 break; 102 103 /* Check predefined locations where debug file might reside */ 104 ret = -1; 105 for (i = 0; i < ARRAY_SIZE(debuglink_paths); i++) { 106 snprintf(filename, size, 107 debuglink_paths[i], dso_dir, symfile); 108 if (is_regular_file(filename)) { 109 ret = 0; 110 break; 111 } 112 } 113 114 break; 115 } 116 case DSO_BINARY_TYPE__BUILD_ID_CACHE: 117 if (dso__build_id_filename(dso, filename, size, false) == NULL) 118 ret = -1; 119 break; 120 121 case DSO_BINARY_TYPE__BUILD_ID_CACHE_DEBUGINFO: 122 if (dso__build_id_filename(dso, filename, size, true) == NULL) 123 ret = -1; 124 break; 125 126 case DSO_BINARY_TYPE__FEDORA_DEBUGINFO: 127 len = __symbol__join_symfs(filename, size, "/usr/lib/debug"); 128 snprintf(filename + len, size - len, "%s.debug", dso->long_name); 129 break; 130 131 case DSO_BINARY_TYPE__UBUNTU_DEBUGINFO: 132 len = __symbol__join_symfs(filename, size, "/usr/lib/debug"); 133 snprintf(filename + len, size - len, "%s", dso->long_name); 134 break; 135 136 case DSO_BINARY_TYPE__MIXEDUP_UBUNTU_DEBUGINFO: 137 /* 138 * Ubuntu can mixup /usr/lib with /lib, putting debuginfo in 139 * /usr/lib/debug/lib when it is expected to be in 140 * /usr/lib/debug/usr/lib 141 */ 142 if (strlen(dso->long_name) < 9 || 143 strncmp(dso->long_name, "/usr/lib/", 9)) { 144 ret = -1; 145 break; 146 } 147 len = __symbol__join_symfs(filename, size, "/usr/lib/debug"); 148 snprintf(filename + len, size - len, "%s", dso->long_name + 4); 149 break; 150 151 case DSO_BINARY_TYPE__OPENEMBEDDED_DEBUGINFO: 152 { 153 const char *last_slash; 154 size_t dir_size; 155 156 last_slash = dso->long_name + dso->long_name_len; 157 while (last_slash != dso->long_name && *last_slash != '/') 158 last_slash--; 159 160 len = __symbol__join_symfs(filename, size, ""); 161 dir_size = last_slash - dso->long_name + 2; 162 if (dir_size > (size - len)) { 163 ret = -1; 164 break; 165 } 166 len += scnprintf(filename + len, dir_size, "%s", dso->long_name); 167 len += scnprintf(filename + len , size - len, ".debug%s", 168 last_slash); 169 break; 170 } 171 172 case DSO_BINARY_TYPE__BUILDID_DEBUGINFO: 173 if (!dso->has_build_id) { 174 ret = -1; 175 break; 176 } 177 178 build_id__sprintf(&dso->bid, build_id_hex); 179 len = __symbol__join_symfs(filename, size, "/usr/lib/debug/.build-id/"); 180 snprintf(filename + len, size - len, "%.2s/%s.debug", 181 build_id_hex, build_id_hex + 2); 182 break; 183 184 case DSO_BINARY_TYPE__VMLINUX: 185 case DSO_BINARY_TYPE__GUEST_VMLINUX: 186 case DSO_BINARY_TYPE__SYSTEM_PATH_DSO: 187 __symbol__join_symfs(filename, size, dso->long_name); 188 break; 189 190 case DSO_BINARY_TYPE__GUEST_KMODULE: 191 case DSO_BINARY_TYPE__GUEST_KMODULE_COMP: 192 path__join3(filename, size, symbol_conf.symfs, 193 root_dir, dso->long_name); 194 break; 195 196 case DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE: 197 case DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE_COMP: 198 __symbol__join_symfs(filename, size, dso->long_name); 199 break; 200 201 case DSO_BINARY_TYPE__KCORE: 202 case DSO_BINARY_TYPE__GUEST_KCORE: 203 snprintf(filename, size, "%s", dso->long_name); 204 break; 205 206 default: 207 case DSO_BINARY_TYPE__KALLSYMS: 208 case DSO_BINARY_TYPE__GUEST_KALLSYMS: 209 case DSO_BINARY_TYPE__JAVA_JIT: 210 case DSO_BINARY_TYPE__BPF_PROG_INFO: 211 case DSO_BINARY_TYPE__BPF_IMAGE: 212 case DSO_BINARY_TYPE__OOL: 213 case DSO_BINARY_TYPE__NOT_FOUND: 214 ret = -1; 215 break; 216 } 217 218 return ret; 219 } 220 221 enum { 222 COMP_ID__NONE = 0, 223 }; 224 225 static const struct { 226 const char *fmt; 227 int (*decompress)(const char *input, int output); 228 bool (*is_compressed)(const char *input); 229 } compressions[] = { 230 [COMP_ID__NONE] = { .fmt = NULL, }, 231 #ifdef HAVE_ZLIB_SUPPORT 232 { "gz", gzip_decompress_to_file, gzip_is_compressed }, 233 #endif 234 #ifdef HAVE_LZMA_SUPPORT 235 { "xz", lzma_decompress_to_file, lzma_is_compressed }, 236 #endif 237 { NULL, NULL, NULL }, 238 }; 239 240 static int is_supported_compression(const char *ext) 241 { 242 unsigned i; 243 244 for (i = 1; compressions[i].fmt; i++) { 245 if (!strcmp(ext, compressions[i].fmt)) 246 return i; 247 } 248 return COMP_ID__NONE; 249 } 250 251 bool is_kernel_module(const char *pathname, int cpumode) 252 { 253 struct kmod_path m; 254 int mode = cpumode & PERF_RECORD_MISC_CPUMODE_MASK; 255 256 WARN_ONCE(mode != cpumode, 257 "Internal error: passing unmasked cpumode (%x) to is_kernel_module", 258 cpumode); 259 260 switch (mode) { 261 case PERF_RECORD_MISC_USER: 262 case PERF_RECORD_MISC_HYPERVISOR: 263 case PERF_RECORD_MISC_GUEST_USER: 264 return false; 265 /* Treat PERF_RECORD_MISC_CPUMODE_UNKNOWN as kernel */ 266 default: 267 if (kmod_path__parse(&m, pathname)) { 268 pr_err("Failed to check whether %s is a kernel module or not. Assume it is.", 269 pathname); 270 return true; 271 } 272 } 273 274 return m.kmod; 275 } 276 277 bool dso__needs_decompress(struct dso *dso) 278 { 279 return dso->symtab_type == DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE_COMP || 280 dso->symtab_type == DSO_BINARY_TYPE__GUEST_KMODULE_COMP; 281 } 282 283 int filename__decompress(const char *name, char *pathname, 284 size_t len, int comp, int *err) 285 { 286 char tmpbuf[] = KMOD_DECOMP_NAME; 287 int fd = -1; 288 289 /* 290 * We have proper compression id for DSO and yet the file 291 * behind the 'name' can still be plain uncompressed object. 292 * 293 * The reason is behind the logic we open the DSO object files, 294 * when we try all possible 'debug' objects until we find the 295 * data. So even if the DSO is represented by 'krava.xz' module, 296 * we can end up here opening ~/.debug/....23432432/debug' file 297 * which is not compressed. 298 * 299 * To keep this transparent, we detect this and return the file 300 * descriptor to the uncompressed file. 301 */ 302 if (!compressions[comp].is_compressed(name)) 303 return open(name, O_RDONLY); 304 305 fd = mkstemp(tmpbuf); 306 if (fd < 0) { 307 *err = errno; 308 return -1; 309 } 310 311 if (compressions[comp].decompress(name, fd)) { 312 *err = DSO_LOAD_ERRNO__DECOMPRESSION_FAILURE; 313 close(fd); 314 fd = -1; 315 } 316 317 if (!pathname || (fd < 0)) 318 unlink(tmpbuf); 319 320 if (pathname && (fd >= 0)) 321 strlcpy(pathname, tmpbuf, len); 322 323 return fd; 324 } 325 326 static int decompress_kmodule(struct dso *dso, const char *name, 327 char *pathname, size_t len) 328 { 329 if (!dso__needs_decompress(dso)) 330 return -1; 331 332 if (dso->comp == COMP_ID__NONE) 333 return -1; 334 335 return filename__decompress(name, pathname, len, dso->comp, 336 &dso->load_errno); 337 } 338 339 int dso__decompress_kmodule_fd(struct dso *dso, const char *name) 340 { 341 return decompress_kmodule(dso, name, NULL, 0); 342 } 343 344 int dso__decompress_kmodule_path(struct dso *dso, const char *name, 345 char *pathname, size_t len) 346 { 347 int fd = decompress_kmodule(dso, name, pathname, len); 348 349 close(fd); 350 return fd >= 0 ? 0 : -1; 351 } 352 353 /* 354 * Parses kernel module specified in @path and updates 355 * @m argument like: 356 * 357 * @comp - true if @path contains supported compression suffix, 358 * false otherwise 359 * @kmod - true if @path contains '.ko' suffix in right position, 360 * false otherwise 361 * @name - if (@alloc_name && @kmod) is true, it contains strdup-ed base name 362 * of the kernel module without suffixes, otherwise strudup-ed 363 * base name of @path 364 * @ext - if (@alloc_ext && @comp) is true, it contains strdup-ed string 365 * the compression suffix 366 * 367 * Returns 0 if there's no strdup error, -ENOMEM otherwise. 368 */ 369 int __kmod_path__parse(struct kmod_path *m, const char *path, 370 bool alloc_name) 371 { 372 const char *name = strrchr(path, '/'); 373 const char *ext = strrchr(path, '.'); 374 bool is_simple_name = false; 375 376 memset(m, 0x0, sizeof(*m)); 377 name = name ? name + 1 : path; 378 379 /* 380 * '.' is also a valid character for module name. For example: 381 * [aaa.bbb] is a valid module name. '[' should have higher 382 * priority than '.ko' suffix. 383 * 384 * The kernel names are from machine__mmap_name. Such 385 * name should belong to kernel itself, not kernel module. 386 */ 387 if (name[0] == '[') { 388 is_simple_name = true; 389 if ((strncmp(name, "[kernel.kallsyms]", 17) == 0) || 390 (strncmp(name, "[guest.kernel.kallsyms", 22) == 0) || 391 (strncmp(name, "[vdso]", 6) == 0) || 392 (strncmp(name, "[vdso32]", 8) == 0) || 393 (strncmp(name, "[vdsox32]", 9) == 0) || 394 (strncmp(name, "[vsyscall]", 10) == 0)) { 395 m->kmod = false; 396 397 } else 398 m->kmod = true; 399 } 400 401 /* No extension, just return name. */ 402 if ((ext == NULL) || is_simple_name) { 403 if (alloc_name) { 404 m->name = strdup(name); 405 return m->name ? 0 : -ENOMEM; 406 } 407 return 0; 408 } 409 410 m->comp = is_supported_compression(ext + 1); 411 if (m->comp > COMP_ID__NONE) 412 ext -= 3; 413 414 /* Check .ko extension only if there's enough name left. */ 415 if (ext > name) 416 m->kmod = !strncmp(ext, ".ko", 3); 417 418 if (alloc_name) { 419 if (m->kmod) { 420 if (asprintf(&m->name, "[%.*s]", (int) (ext - name), name) == -1) 421 return -ENOMEM; 422 } else { 423 if (asprintf(&m->name, "%s", name) == -1) 424 return -ENOMEM; 425 } 426 427 strreplace(m->name, '-', '_'); 428 } 429 430 return 0; 431 } 432 433 void dso__set_module_info(struct dso *dso, struct kmod_path *m, 434 struct machine *machine) 435 { 436 if (machine__is_host(machine)) 437 dso->symtab_type = DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE; 438 else 439 dso->symtab_type = DSO_BINARY_TYPE__GUEST_KMODULE; 440 441 /* _KMODULE_COMP should be next to _KMODULE */ 442 if (m->kmod && m->comp) { 443 dso->symtab_type++; 444 dso->comp = m->comp; 445 } 446 447 dso__set_short_name(dso, strdup(m->name), true); 448 } 449 450 /* 451 * Global list of open DSOs and the counter. 452 */ 453 static LIST_HEAD(dso__data_open); 454 static long dso__data_open_cnt; 455 static pthread_mutex_t dso__data_open_lock = PTHREAD_MUTEX_INITIALIZER; 456 457 static void dso__list_add(struct dso *dso) 458 { 459 list_add_tail(&dso->data.open_entry, &dso__data_open); 460 dso__data_open_cnt++; 461 } 462 463 static void dso__list_del(struct dso *dso) 464 { 465 list_del_init(&dso->data.open_entry); 466 WARN_ONCE(dso__data_open_cnt <= 0, 467 "DSO data fd counter out of bounds."); 468 dso__data_open_cnt--; 469 } 470 471 static void close_first_dso(void); 472 473 static int do_open(char *name) 474 { 475 int fd; 476 char sbuf[STRERR_BUFSIZE]; 477 478 do { 479 fd = open(name, O_RDONLY|O_CLOEXEC); 480 if (fd >= 0) 481 return fd; 482 483 pr_debug("dso open failed: %s\n", 484 str_error_r(errno, sbuf, sizeof(sbuf))); 485 if (!dso__data_open_cnt || errno != EMFILE) 486 break; 487 488 close_first_dso(); 489 } while (1); 490 491 return -1; 492 } 493 494 static int __open_dso(struct dso *dso, struct machine *machine) 495 { 496 int fd = -EINVAL; 497 char *root_dir = (char *)""; 498 char *name = malloc(PATH_MAX); 499 bool decomp = false; 500 501 if (!name) 502 return -ENOMEM; 503 504 mutex_lock(&dso->lock); 505 if (machine) 506 root_dir = machine->root_dir; 507 508 if (dso__read_binary_type_filename(dso, dso->binary_type, 509 root_dir, name, PATH_MAX)) 510 goto out; 511 512 if (!is_regular_file(name)) { 513 char *new_name; 514 515 if (errno != ENOENT || dso->nsinfo == NULL) 516 goto out; 517 518 new_name = filename_with_chroot(dso->nsinfo->pid, name); 519 if (!new_name) 520 goto out; 521 522 free(name); 523 name = new_name; 524 } 525 526 if (dso__needs_decompress(dso)) { 527 char newpath[KMOD_DECOMP_LEN]; 528 size_t len = sizeof(newpath); 529 530 if (dso__decompress_kmodule_path(dso, name, newpath, len) < 0) { 531 fd = -dso->load_errno; 532 goto out; 533 } 534 535 decomp = true; 536 strcpy(name, newpath); 537 } 538 539 fd = do_open(name); 540 541 if (decomp) 542 unlink(name); 543 544 out: 545 mutex_unlock(&dso->lock); 546 free(name); 547 return fd; 548 } 549 550 static void check_data_close(void); 551 552 /** 553 * dso_close - Open DSO data file 554 * @dso: dso object 555 * 556 * Open @dso's data file descriptor and updates 557 * list/count of open DSO objects. 558 */ 559 static int open_dso(struct dso *dso, struct machine *machine) 560 { 561 int fd; 562 struct nscookie nsc; 563 564 if (dso->binary_type != DSO_BINARY_TYPE__BUILD_ID_CACHE) { 565 mutex_lock(&dso->lock); 566 nsinfo__mountns_enter(dso->nsinfo, &nsc); 567 mutex_unlock(&dso->lock); 568 } 569 fd = __open_dso(dso, machine); 570 if (dso->binary_type != DSO_BINARY_TYPE__BUILD_ID_CACHE) 571 nsinfo__mountns_exit(&nsc); 572 573 if (fd >= 0) { 574 dso__list_add(dso); 575 /* 576 * Check if we crossed the allowed number 577 * of opened DSOs and close one if needed. 578 */ 579 check_data_close(); 580 } 581 582 return fd; 583 } 584 585 static void close_data_fd(struct dso *dso) 586 { 587 if (dso->data.fd >= 0) { 588 close(dso->data.fd); 589 dso->data.fd = -1; 590 dso->data.file_size = 0; 591 dso__list_del(dso); 592 } 593 } 594 595 /** 596 * dso_close - Close DSO data file 597 * @dso: dso object 598 * 599 * Close @dso's data file descriptor and updates 600 * list/count of open DSO objects. 601 */ 602 static void close_dso(struct dso *dso) 603 { 604 close_data_fd(dso); 605 } 606 607 static void close_first_dso(void) 608 { 609 struct dso *dso; 610 611 dso = list_first_entry(&dso__data_open, struct dso, data.open_entry); 612 close_dso(dso); 613 } 614 615 static rlim_t get_fd_limit(void) 616 { 617 struct rlimit l; 618 rlim_t limit = 0; 619 620 /* Allow half of the current open fd limit. */ 621 if (getrlimit(RLIMIT_NOFILE, &l) == 0) { 622 if (l.rlim_cur == RLIM_INFINITY) 623 limit = l.rlim_cur; 624 else 625 limit = l.rlim_cur / 2; 626 } else { 627 pr_err("failed to get fd limit\n"); 628 limit = 1; 629 } 630 631 return limit; 632 } 633 634 static rlim_t fd_limit; 635 636 /* 637 * Used only by tests/dso-data.c to reset the environment 638 * for tests. I dont expect we should change this during 639 * standard runtime. 640 */ 641 void reset_fd_limit(void) 642 { 643 fd_limit = 0; 644 } 645 646 static bool may_cache_fd(void) 647 { 648 if (!fd_limit) 649 fd_limit = get_fd_limit(); 650 651 if (fd_limit == RLIM_INFINITY) 652 return true; 653 654 return fd_limit > (rlim_t) dso__data_open_cnt; 655 } 656 657 /* 658 * Check and close LRU dso if we crossed allowed limit 659 * for opened dso file descriptors. The limit is half 660 * of the RLIMIT_NOFILE files opened. 661 */ 662 static void check_data_close(void) 663 { 664 bool cache_fd = may_cache_fd(); 665 666 if (!cache_fd) 667 close_first_dso(); 668 } 669 670 /** 671 * dso__data_close - Close DSO data file 672 * @dso: dso object 673 * 674 * External interface to close @dso's data file descriptor. 675 */ 676 void dso__data_close(struct dso *dso) 677 { 678 pthread_mutex_lock(&dso__data_open_lock); 679 close_dso(dso); 680 pthread_mutex_unlock(&dso__data_open_lock); 681 } 682 683 static void try_to_open_dso(struct dso *dso, struct machine *machine) 684 { 685 enum dso_binary_type binary_type_data[] = { 686 DSO_BINARY_TYPE__BUILD_ID_CACHE, 687 DSO_BINARY_TYPE__SYSTEM_PATH_DSO, 688 DSO_BINARY_TYPE__NOT_FOUND, 689 }; 690 int i = 0; 691 692 if (dso->data.fd >= 0) 693 return; 694 695 if (dso->binary_type != DSO_BINARY_TYPE__NOT_FOUND) { 696 dso->data.fd = open_dso(dso, machine); 697 goto out; 698 } 699 700 do { 701 dso->binary_type = binary_type_data[i++]; 702 703 dso->data.fd = open_dso(dso, machine); 704 if (dso->data.fd >= 0) 705 goto out; 706 707 } while (dso->binary_type != DSO_BINARY_TYPE__NOT_FOUND); 708 out: 709 if (dso->data.fd >= 0) 710 dso->data.status = DSO_DATA_STATUS_OK; 711 else 712 dso->data.status = DSO_DATA_STATUS_ERROR; 713 } 714 715 /** 716 * dso__data_get_fd - Get dso's data file descriptor 717 * @dso: dso object 718 * @machine: machine object 719 * 720 * External interface to find dso's file, open it and 721 * returns file descriptor. It should be paired with 722 * dso__data_put_fd() if it returns non-negative value. 723 */ 724 int dso__data_get_fd(struct dso *dso, struct machine *machine) 725 { 726 if (dso->data.status == DSO_DATA_STATUS_ERROR) 727 return -1; 728 729 if (pthread_mutex_lock(&dso__data_open_lock) < 0) 730 return -1; 731 732 try_to_open_dso(dso, machine); 733 734 if (dso->data.fd < 0) 735 pthread_mutex_unlock(&dso__data_open_lock); 736 737 return dso->data.fd; 738 } 739 740 void dso__data_put_fd(struct dso *dso __maybe_unused) 741 { 742 pthread_mutex_unlock(&dso__data_open_lock); 743 } 744 745 bool dso__data_status_seen(struct dso *dso, enum dso_data_status_seen by) 746 { 747 u32 flag = 1 << by; 748 749 if (dso->data.status_seen & flag) 750 return true; 751 752 dso->data.status_seen |= flag; 753 754 return false; 755 } 756 757 #ifdef HAVE_LIBBPF_SUPPORT 758 static ssize_t bpf_read(struct dso *dso, u64 offset, char *data) 759 { 760 struct bpf_prog_info_node *node; 761 ssize_t size = DSO__DATA_CACHE_SIZE; 762 u64 len; 763 u8 *buf; 764 765 node = perf_env__find_bpf_prog_info(dso->bpf_prog.env, dso->bpf_prog.id); 766 if (!node || !node->info_linear) { 767 dso->data.status = DSO_DATA_STATUS_ERROR; 768 return -1; 769 } 770 771 len = node->info_linear->info.jited_prog_len; 772 buf = (u8 *)(uintptr_t)node->info_linear->info.jited_prog_insns; 773 774 if (offset >= len) 775 return -1; 776 777 size = (ssize_t)min(len - offset, (u64)size); 778 memcpy(data, buf + offset, size); 779 return size; 780 } 781 782 static int bpf_size(struct dso *dso) 783 { 784 struct bpf_prog_info_node *node; 785 786 node = perf_env__find_bpf_prog_info(dso->bpf_prog.env, dso->bpf_prog.id); 787 if (!node || !node->info_linear) { 788 dso->data.status = DSO_DATA_STATUS_ERROR; 789 return -1; 790 } 791 792 dso->data.file_size = node->info_linear->info.jited_prog_len; 793 return 0; 794 } 795 #endif // HAVE_LIBBPF_SUPPORT 796 797 static void 798 dso_cache__free(struct dso *dso) 799 { 800 struct rb_root *root = &dso->data.cache; 801 struct rb_node *next = rb_first(root); 802 803 mutex_lock(&dso->lock); 804 while (next) { 805 struct dso_cache *cache; 806 807 cache = rb_entry(next, struct dso_cache, rb_node); 808 next = rb_next(&cache->rb_node); 809 rb_erase(&cache->rb_node, root); 810 free(cache); 811 } 812 mutex_unlock(&dso->lock); 813 } 814 815 static struct dso_cache *__dso_cache__find(struct dso *dso, u64 offset) 816 { 817 const struct rb_root *root = &dso->data.cache; 818 struct rb_node * const *p = &root->rb_node; 819 const struct rb_node *parent = NULL; 820 struct dso_cache *cache; 821 822 while (*p != NULL) { 823 u64 end; 824 825 parent = *p; 826 cache = rb_entry(parent, struct dso_cache, rb_node); 827 end = cache->offset + DSO__DATA_CACHE_SIZE; 828 829 if (offset < cache->offset) 830 p = &(*p)->rb_left; 831 else if (offset >= end) 832 p = &(*p)->rb_right; 833 else 834 return cache; 835 } 836 837 return NULL; 838 } 839 840 static struct dso_cache * 841 dso_cache__insert(struct dso *dso, struct dso_cache *new) 842 { 843 struct rb_root *root = &dso->data.cache; 844 struct rb_node **p = &root->rb_node; 845 struct rb_node *parent = NULL; 846 struct dso_cache *cache; 847 u64 offset = new->offset; 848 849 mutex_lock(&dso->lock); 850 while (*p != NULL) { 851 u64 end; 852 853 parent = *p; 854 cache = rb_entry(parent, struct dso_cache, rb_node); 855 end = cache->offset + DSO__DATA_CACHE_SIZE; 856 857 if (offset < cache->offset) 858 p = &(*p)->rb_left; 859 else if (offset >= end) 860 p = &(*p)->rb_right; 861 else 862 goto out; 863 } 864 865 rb_link_node(&new->rb_node, parent, p); 866 rb_insert_color(&new->rb_node, root); 867 868 cache = NULL; 869 out: 870 mutex_unlock(&dso->lock); 871 return cache; 872 } 873 874 static ssize_t dso_cache__memcpy(struct dso_cache *cache, u64 offset, u8 *data, 875 u64 size, bool out) 876 { 877 u64 cache_offset = offset - cache->offset; 878 u64 cache_size = min(cache->size - cache_offset, size); 879 880 if (out) 881 memcpy(data, cache->data + cache_offset, cache_size); 882 else 883 memcpy(cache->data + cache_offset, data, cache_size); 884 return cache_size; 885 } 886 887 static ssize_t file_read(struct dso *dso, struct machine *machine, 888 u64 offset, char *data) 889 { 890 ssize_t ret; 891 892 pthread_mutex_lock(&dso__data_open_lock); 893 894 /* 895 * dso->data.fd might be closed if other thread opened another 896 * file (dso) due to open file limit (RLIMIT_NOFILE). 897 */ 898 try_to_open_dso(dso, machine); 899 900 if (dso->data.fd < 0) { 901 dso->data.status = DSO_DATA_STATUS_ERROR; 902 ret = -errno; 903 goto out; 904 } 905 906 ret = pread(dso->data.fd, data, DSO__DATA_CACHE_SIZE, offset); 907 out: 908 pthread_mutex_unlock(&dso__data_open_lock); 909 return ret; 910 } 911 912 static struct dso_cache *dso_cache__populate(struct dso *dso, 913 struct machine *machine, 914 u64 offset, ssize_t *ret) 915 { 916 u64 cache_offset = offset & DSO__DATA_CACHE_MASK; 917 struct dso_cache *cache; 918 struct dso_cache *old; 919 920 cache = zalloc(sizeof(*cache) + DSO__DATA_CACHE_SIZE); 921 if (!cache) { 922 *ret = -ENOMEM; 923 return NULL; 924 } 925 #ifdef HAVE_LIBBPF_SUPPORT 926 if (dso->binary_type == DSO_BINARY_TYPE__BPF_PROG_INFO) 927 *ret = bpf_read(dso, cache_offset, cache->data); 928 else 929 #endif 930 if (dso->binary_type == DSO_BINARY_TYPE__OOL) 931 *ret = DSO__DATA_CACHE_SIZE; 932 else 933 *ret = file_read(dso, machine, cache_offset, cache->data); 934 935 if (*ret <= 0) { 936 free(cache); 937 return NULL; 938 } 939 940 cache->offset = cache_offset; 941 cache->size = *ret; 942 943 old = dso_cache__insert(dso, cache); 944 if (old) { 945 /* we lose the race */ 946 free(cache); 947 cache = old; 948 } 949 950 return cache; 951 } 952 953 static struct dso_cache *dso_cache__find(struct dso *dso, 954 struct machine *machine, 955 u64 offset, 956 ssize_t *ret) 957 { 958 struct dso_cache *cache = __dso_cache__find(dso, offset); 959 960 return cache ? cache : dso_cache__populate(dso, machine, offset, ret); 961 } 962 963 static ssize_t dso_cache_io(struct dso *dso, struct machine *machine, 964 u64 offset, u8 *data, ssize_t size, bool out) 965 { 966 struct dso_cache *cache; 967 ssize_t ret = 0; 968 969 cache = dso_cache__find(dso, machine, offset, &ret); 970 if (!cache) 971 return ret; 972 973 return dso_cache__memcpy(cache, offset, data, size, out); 974 } 975 976 /* 977 * Reads and caches dso data DSO__DATA_CACHE_SIZE size chunks 978 * in the rb_tree. Any read to already cached data is served 979 * by cached data. Writes update the cache only, not the backing file. 980 */ 981 static ssize_t cached_io(struct dso *dso, struct machine *machine, 982 u64 offset, u8 *data, ssize_t size, bool out) 983 { 984 ssize_t r = 0; 985 u8 *p = data; 986 987 do { 988 ssize_t ret; 989 990 ret = dso_cache_io(dso, machine, offset, p, size, out); 991 if (ret < 0) 992 return ret; 993 994 /* Reached EOF, return what we have. */ 995 if (!ret) 996 break; 997 998 BUG_ON(ret > size); 999 1000 r += ret; 1001 p += ret; 1002 offset += ret; 1003 size -= ret; 1004 1005 } while (size); 1006 1007 return r; 1008 } 1009 1010 static int file_size(struct dso *dso, struct machine *machine) 1011 { 1012 int ret = 0; 1013 struct stat st; 1014 char sbuf[STRERR_BUFSIZE]; 1015 1016 pthread_mutex_lock(&dso__data_open_lock); 1017 1018 /* 1019 * dso->data.fd might be closed if other thread opened another 1020 * file (dso) due to open file limit (RLIMIT_NOFILE). 1021 */ 1022 try_to_open_dso(dso, machine); 1023 1024 if (dso->data.fd < 0) { 1025 ret = -errno; 1026 dso->data.status = DSO_DATA_STATUS_ERROR; 1027 goto out; 1028 } 1029 1030 if (fstat(dso->data.fd, &st) < 0) { 1031 ret = -errno; 1032 pr_err("dso cache fstat failed: %s\n", 1033 str_error_r(errno, sbuf, sizeof(sbuf))); 1034 dso->data.status = DSO_DATA_STATUS_ERROR; 1035 goto out; 1036 } 1037 dso->data.file_size = st.st_size; 1038 1039 out: 1040 pthread_mutex_unlock(&dso__data_open_lock); 1041 return ret; 1042 } 1043 1044 int dso__data_file_size(struct dso *dso, struct machine *machine) 1045 { 1046 if (dso->data.file_size) 1047 return 0; 1048 1049 if (dso->data.status == DSO_DATA_STATUS_ERROR) 1050 return -1; 1051 #ifdef HAVE_LIBBPF_SUPPORT 1052 if (dso->binary_type == DSO_BINARY_TYPE__BPF_PROG_INFO) 1053 return bpf_size(dso); 1054 #endif 1055 return file_size(dso, machine); 1056 } 1057 1058 /** 1059 * dso__data_size - Return dso data size 1060 * @dso: dso object 1061 * @machine: machine object 1062 * 1063 * Return: dso data size 1064 */ 1065 off_t dso__data_size(struct dso *dso, struct machine *machine) 1066 { 1067 if (dso__data_file_size(dso, machine)) 1068 return -1; 1069 1070 /* For now just estimate dso data size is close to file size */ 1071 return dso->data.file_size; 1072 } 1073 1074 static ssize_t data_read_write_offset(struct dso *dso, struct machine *machine, 1075 u64 offset, u8 *data, ssize_t size, 1076 bool out) 1077 { 1078 if (dso__data_file_size(dso, machine)) 1079 return -1; 1080 1081 /* Check the offset sanity. */ 1082 if (offset > dso->data.file_size) 1083 return -1; 1084 1085 if (offset + size < offset) 1086 return -1; 1087 1088 return cached_io(dso, machine, offset, data, size, out); 1089 } 1090 1091 /** 1092 * dso__data_read_offset - Read data from dso file offset 1093 * @dso: dso object 1094 * @machine: machine object 1095 * @offset: file offset 1096 * @data: buffer to store data 1097 * @size: size of the @data buffer 1098 * 1099 * External interface to read data from dso file offset. Open 1100 * dso data file and use cached_read to get the data. 1101 */ 1102 ssize_t dso__data_read_offset(struct dso *dso, struct machine *machine, 1103 u64 offset, u8 *data, ssize_t size) 1104 { 1105 if (dso->data.status == DSO_DATA_STATUS_ERROR) 1106 return -1; 1107 1108 return data_read_write_offset(dso, machine, offset, data, size, true); 1109 } 1110 1111 /** 1112 * dso__data_read_addr - Read data from dso address 1113 * @dso: dso object 1114 * @machine: machine object 1115 * @add: virtual memory address 1116 * @data: buffer to store data 1117 * @size: size of the @data buffer 1118 * 1119 * External interface to read data from dso address. 1120 */ 1121 ssize_t dso__data_read_addr(struct dso *dso, struct map *map, 1122 struct machine *machine, u64 addr, 1123 u8 *data, ssize_t size) 1124 { 1125 u64 offset = map->map_ip(map, addr); 1126 return dso__data_read_offset(dso, machine, offset, data, size); 1127 } 1128 1129 /** 1130 * dso__data_write_cache_offs - Write data to dso data cache at file offset 1131 * @dso: dso object 1132 * @machine: machine object 1133 * @offset: file offset 1134 * @data: buffer to write 1135 * @size: size of the @data buffer 1136 * 1137 * Write into the dso file data cache, but do not change the file itself. 1138 */ 1139 ssize_t dso__data_write_cache_offs(struct dso *dso, struct machine *machine, 1140 u64 offset, const u8 *data_in, ssize_t size) 1141 { 1142 u8 *data = (u8 *)data_in; /* cast away const to use same fns for r/w */ 1143 1144 if (dso->data.status == DSO_DATA_STATUS_ERROR) 1145 return -1; 1146 1147 return data_read_write_offset(dso, machine, offset, data, size, false); 1148 } 1149 1150 /** 1151 * dso__data_write_cache_addr - Write data to dso data cache at dso address 1152 * @dso: dso object 1153 * @machine: machine object 1154 * @add: virtual memory address 1155 * @data: buffer to write 1156 * @size: size of the @data buffer 1157 * 1158 * External interface to write into the dso file data cache, but do not change 1159 * the file itself. 1160 */ 1161 ssize_t dso__data_write_cache_addr(struct dso *dso, struct map *map, 1162 struct machine *machine, u64 addr, 1163 const u8 *data, ssize_t size) 1164 { 1165 u64 offset = map->map_ip(map, addr); 1166 return dso__data_write_cache_offs(dso, machine, offset, data, size); 1167 } 1168 1169 struct map *dso__new_map(const char *name) 1170 { 1171 struct map *map = NULL; 1172 struct dso *dso = dso__new(name); 1173 1174 if (dso) { 1175 map = map__new2(0, dso); 1176 dso__put(dso); 1177 } 1178 1179 return map; 1180 } 1181 1182 struct dso *machine__findnew_kernel(struct machine *machine, const char *name, 1183 const char *short_name, int dso_type) 1184 { 1185 /* 1186 * The kernel dso could be created by build_id processing. 1187 */ 1188 struct dso *dso = machine__findnew_dso(machine, name); 1189 1190 /* 1191 * We need to run this in all cases, since during the build_id 1192 * processing we had no idea this was the kernel dso. 1193 */ 1194 if (dso != NULL) { 1195 dso__set_short_name(dso, short_name, false); 1196 dso->kernel = dso_type; 1197 } 1198 1199 return dso; 1200 } 1201 1202 static void dso__set_long_name_id(struct dso *dso, const char *name, struct dso_id *id, bool name_allocated) 1203 { 1204 struct rb_root *root = dso->root; 1205 1206 if (name == NULL) 1207 return; 1208 1209 if (dso->long_name_allocated) 1210 free((char *)dso->long_name); 1211 1212 if (root) { 1213 rb_erase(&dso->rb_node, root); 1214 /* 1215 * __dsos__findnew_link_by_longname_id() isn't guaranteed to 1216 * add it back, so a clean removal is required here. 1217 */ 1218 RB_CLEAR_NODE(&dso->rb_node); 1219 dso->root = NULL; 1220 } 1221 1222 dso->long_name = name; 1223 dso->long_name_len = strlen(name); 1224 dso->long_name_allocated = name_allocated; 1225 1226 if (root) 1227 __dsos__findnew_link_by_longname_id(root, dso, NULL, id); 1228 } 1229 1230 void dso__set_long_name(struct dso *dso, const char *name, bool name_allocated) 1231 { 1232 dso__set_long_name_id(dso, name, NULL, name_allocated); 1233 } 1234 1235 void dso__set_short_name(struct dso *dso, const char *name, bool name_allocated) 1236 { 1237 if (name == NULL) 1238 return; 1239 1240 if (dso->short_name_allocated) 1241 free((char *)dso->short_name); 1242 1243 dso->short_name = name; 1244 dso->short_name_len = strlen(name); 1245 dso->short_name_allocated = name_allocated; 1246 } 1247 1248 int dso__name_len(const struct dso *dso) 1249 { 1250 if (!dso) 1251 return strlen("[unknown]"); 1252 if (verbose > 0) 1253 return dso->long_name_len; 1254 1255 return dso->short_name_len; 1256 } 1257 1258 bool dso__loaded(const struct dso *dso) 1259 { 1260 return dso->loaded; 1261 } 1262 1263 bool dso__sorted_by_name(const struct dso *dso) 1264 { 1265 return dso->sorted_by_name; 1266 } 1267 1268 void dso__set_sorted_by_name(struct dso *dso) 1269 { 1270 dso->sorted_by_name = true; 1271 } 1272 1273 struct dso *dso__new_id(const char *name, struct dso_id *id) 1274 { 1275 struct dso *dso = calloc(1, sizeof(*dso) + strlen(name) + 1); 1276 1277 if (dso != NULL) { 1278 strcpy(dso->name, name); 1279 if (id) 1280 dso->id = *id; 1281 dso__set_long_name_id(dso, dso->name, id, false); 1282 dso__set_short_name(dso, dso->name, false); 1283 dso->symbols = dso->symbol_names = RB_ROOT_CACHED; 1284 dso->data.cache = RB_ROOT; 1285 dso->inlined_nodes = RB_ROOT_CACHED; 1286 dso->srclines = RB_ROOT_CACHED; 1287 dso->data.fd = -1; 1288 dso->data.status = DSO_DATA_STATUS_UNKNOWN; 1289 dso->symtab_type = DSO_BINARY_TYPE__NOT_FOUND; 1290 dso->binary_type = DSO_BINARY_TYPE__NOT_FOUND; 1291 dso->is_64_bit = (sizeof(void *) == 8); 1292 dso->loaded = 0; 1293 dso->rel = 0; 1294 dso->sorted_by_name = 0; 1295 dso->has_build_id = 0; 1296 dso->has_srcline = 1; 1297 dso->a2l_fails = 1; 1298 dso->kernel = DSO_SPACE__USER; 1299 dso->needs_swap = DSO_SWAP__UNSET; 1300 dso->comp = COMP_ID__NONE; 1301 RB_CLEAR_NODE(&dso->rb_node); 1302 dso->root = NULL; 1303 INIT_LIST_HEAD(&dso->node); 1304 INIT_LIST_HEAD(&dso->data.open_entry); 1305 mutex_init(&dso->lock); 1306 refcount_set(&dso->refcnt, 1); 1307 } 1308 1309 return dso; 1310 } 1311 1312 struct dso *dso__new(const char *name) 1313 { 1314 return dso__new_id(name, NULL); 1315 } 1316 1317 void dso__delete(struct dso *dso) 1318 { 1319 if (!RB_EMPTY_NODE(&dso->rb_node)) 1320 pr_err("DSO %s is still in rbtree when being deleted!\n", 1321 dso->long_name); 1322 1323 /* free inlines first, as they reference symbols */ 1324 inlines__tree_delete(&dso->inlined_nodes); 1325 srcline__tree_delete(&dso->srclines); 1326 symbols__delete(&dso->symbols); 1327 1328 if (dso->short_name_allocated) { 1329 zfree((char **)&dso->short_name); 1330 dso->short_name_allocated = false; 1331 } 1332 1333 if (dso->long_name_allocated) { 1334 zfree((char **)&dso->long_name); 1335 dso->long_name_allocated = false; 1336 } 1337 1338 dso__data_close(dso); 1339 auxtrace_cache__free(dso->auxtrace_cache); 1340 dso_cache__free(dso); 1341 dso__free_a2l(dso); 1342 zfree(&dso->symsrc_filename); 1343 nsinfo__zput(dso->nsinfo); 1344 mutex_destroy(&dso->lock); 1345 free(dso); 1346 } 1347 1348 struct dso *dso__get(struct dso *dso) 1349 { 1350 if (dso) 1351 refcount_inc(&dso->refcnt); 1352 return dso; 1353 } 1354 1355 void dso__put(struct dso *dso) 1356 { 1357 if (dso && refcount_dec_and_test(&dso->refcnt)) 1358 dso__delete(dso); 1359 } 1360 1361 void dso__set_build_id(struct dso *dso, struct build_id *bid) 1362 { 1363 dso->bid = *bid; 1364 dso->has_build_id = 1; 1365 } 1366 1367 bool dso__build_id_equal(const struct dso *dso, struct build_id *bid) 1368 { 1369 if (dso->bid.size > bid->size && dso->bid.size == BUILD_ID_SIZE) { 1370 /* 1371 * For the backward compatibility, it allows a build-id has 1372 * trailing zeros. 1373 */ 1374 return !memcmp(dso->bid.data, bid->data, bid->size) && 1375 !memchr_inv(&dso->bid.data[bid->size], 0, 1376 dso->bid.size - bid->size); 1377 } 1378 1379 return dso->bid.size == bid->size && 1380 memcmp(dso->bid.data, bid->data, dso->bid.size) == 0; 1381 } 1382 1383 void dso__read_running_kernel_build_id(struct dso *dso, struct machine *machine) 1384 { 1385 char path[PATH_MAX]; 1386 1387 if (machine__is_default_guest(machine)) 1388 return; 1389 sprintf(path, "%s/sys/kernel/notes", machine->root_dir); 1390 if (sysfs__read_build_id(path, &dso->bid) == 0) 1391 dso->has_build_id = true; 1392 } 1393 1394 int dso__kernel_module_get_build_id(struct dso *dso, 1395 const char *root_dir) 1396 { 1397 char filename[PATH_MAX]; 1398 /* 1399 * kernel module short names are of the form "[module]" and 1400 * we need just "module" here. 1401 */ 1402 const char *name = dso->short_name + 1; 1403 1404 snprintf(filename, sizeof(filename), 1405 "%s/sys/module/%.*s/notes/.note.gnu.build-id", 1406 root_dir, (int)strlen(name) - 1, name); 1407 1408 if (sysfs__read_build_id(filename, &dso->bid) == 0) 1409 dso->has_build_id = true; 1410 1411 return 0; 1412 } 1413 1414 static size_t dso__fprintf_buildid(struct dso *dso, FILE *fp) 1415 { 1416 char sbuild_id[SBUILD_ID_SIZE]; 1417 1418 build_id__sprintf(&dso->bid, sbuild_id); 1419 return fprintf(fp, "%s", sbuild_id); 1420 } 1421 1422 size_t dso__fprintf(struct dso *dso, FILE *fp) 1423 { 1424 struct rb_node *nd; 1425 size_t ret = fprintf(fp, "dso: %s (", dso->short_name); 1426 1427 if (dso->short_name != dso->long_name) 1428 ret += fprintf(fp, "%s, ", dso->long_name); 1429 ret += fprintf(fp, "%sloaded, ", dso__loaded(dso) ? "" : "NOT "); 1430 ret += dso__fprintf_buildid(dso, fp); 1431 ret += fprintf(fp, ")\n"); 1432 for (nd = rb_first_cached(&dso->symbols); nd; nd = rb_next(nd)) { 1433 struct symbol *pos = rb_entry(nd, struct symbol, rb_node); 1434 ret += symbol__fprintf(pos, fp); 1435 } 1436 1437 return ret; 1438 } 1439 1440 enum dso_type dso__type(struct dso *dso, struct machine *machine) 1441 { 1442 int fd; 1443 enum dso_type type = DSO__TYPE_UNKNOWN; 1444 1445 fd = dso__data_get_fd(dso, machine); 1446 if (fd >= 0) { 1447 type = dso__type_fd(fd); 1448 dso__data_put_fd(dso); 1449 } 1450 1451 return type; 1452 } 1453 1454 int dso__strerror_load(struct dso *dso, char *buf, size_t buflen) 1455 { 1456 int idx, errnum = dso->load_errno; 1457 /* 1458 * This must have a same ordering as the enum dso_load_errno. 1459 */ 1460 static const char *dso_load__error_str[] = { 1461 "Internal tools/perf/ library error", 1462 "Invalid ELF file", 1463 "Can not read build id", 1464 "Mismatching build id", 1465 "Decompression failure", 1466 }; 1467 1468 BUG_ON(buflen == 0); 1469 1470 if (errnum >= 0) { 1471 const char *err = str_error_r(errnum, buf, buflen); 1472 1473 if (err != buf) 1474 scnprintf(buf, buflen, "%s", err); 1475 1476 return 0; 1477 } 1478 1479 if (errnum < __DSO_LOAD_ERRNO__START || errnum >= __DSO_LOAD_ERRNO__END) 1480 return -1; 1481 1482 idx = errnum - __DSO_LOAD_ERRNO__START; 1483 scnprintf(buf, buflen, "%s", dso_load__error_str[idx]); 1484 return 0; 1485 } 1486