1 // SPDX-License-Identifier: GPL-2.0 2 #include <asm/bug.h> 3 #include <linux/kernel.h> 4 #include <linux/string.h> 5 #include <linux/zalloc.h> 6 #include <sys/time.h> 7 #include <sys/resource.h> 8 #include <sys/types.h> 9 #include <sys/stat.h> 10 #include <unistd.h> 11 #include <errno.h> 12 #include <fcntl.h> 13 #include <stdlib.h> 14 #include <bpf/libbpf.h> 15 #include "bpf-event.h" 16 #include "compress.h" 17 #include "env.h" 18 #include "namespaces.h" 19 #include "path.h" 20 #include "map.h" 21 #include "symbol.h" 22 #include "srcline.h" 23 #include "dso.h" 24 #include "dsos.h" 25 #include "machine.h" 26 #include "auxtrace.h" 27 #include "util.h" /* O_CLOEXEC for older systems */ 28 #include "debug.h" 29 #include "string2.h" 30 #include "vdso.h" 31 32 static const char * const debuglink_paths[] = { 33 "%.0s%s", 34 "%s/%s", 35 "%s/.debug/%s", 36 "/usr/lib/debug%s/%s" 37 }; 38 39 char dso__symtab_origin(const struct dso *dso) 40 { 41 static const char origin[] = { 42 [DSO_BINARY_TYPE__KALLSYMS] = 'k', 43 [DSO_BINARY_TYPE__VMLINUX] = 'v', 44 [DSO_BINARY_TYPE__JAVA_JIT] = 'j', 45 [DSO_BINARY_TYPE__DEBUGLINK] = 'l', 46 [DSO_BINARY_TYPE__BUILD_ID_CACHE] = 'B', 47 [DSO_BINARY_TYPE__BUILD_ID_CACHE_DEBUGINFO] = 'D', 48 [DSO_BINARY_TYPE__FEDORA_DEBUGINFO] = 'f', 49 [DSO_BINARY_TYPE__UBUNTU_DEBUGINFO] = 'u', 50 [DSO_BINARY_TYPE__MIXEDUP_UBUNTU_DEBUGINFO] = 'x', 51 [DSO_BINARY_TYPE__OPENEMBEDDED_DEBUGINFO] = 'o', 52 [DSO_BINARY_TYPE__BUILDID_DEBUGINFO] = 'b', 53 [DSO_BINARY_TYPE__SYSTEM_PATH_DSO] = 'd', 54 [DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE] = 'K', 55 [DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE_COMP] = 'm', 56 [DSO_BINARY_TYPE__GUEST_KALLSYMS] = 'g', 57 [DSO_BINARY_TYPE__GUEST_KMODULE] = 'G', 58 [DSO_BINARY_TYPE__GUEST_KMODULE_COMP] = 'M', 59 [DSO_BINARY_TYPE__GUEST_VMLINUX] = 'V', 60 }; 61 62 if (dso == NULL || dso->symtab_type == DSO_BINARY_TYPE__NOT_FOUND) 63 return '!'; 64 return origin[dso->symtab_type]; 65 } 66 67 int dso__read_binary_type_filename(const struct dso *dso, 68 enum dso_binary_type type, 69 char *root_dir, char *filename, size_t size) 70 { 71 char build_id_hex[SBUILD_ID_SIZE]; 72 int ret = 0; 73 size_t len; 74 75 switch (type) { 76 case DSO_BINARY_TYPE__DEBUGLINK: 77 { 78 const char *last_slash; 79 char dso_dir[PATH_MAX]; 80 char symfile[PATH_MAX]; 81 unsigned int i; 82 83 len = __symbol__join_symfs(filename, size, dso->long_name); 84 last_slash = filename + len; 85 while (last_slash != filename && *last_slash != '/') 86 last_slash--; 87 88 strncpy(dso_dir, filename, last_slash - filename); 89 dso_dir[last_slash-filename] = '\0'; 90 91 if (!is_regular_file(filename)) { 92 ret = -1; 93 break; 94 } 95 96 ret = filename__read_debuglink(filename, symfile, PATH_MAX); 97 if (ret) 98 break; 99 100 /* Check predefined locations where debug file might reside */ 101 ret = -1; 102 for (i = 0; i < ARRAY_SIZE(debuglink_paths); i++) { 103 snprintf(filename, size, 104 debuglink_paths[i], dso_dir, symfile); 105 if (is_regular_file(filename)) { 106 ret = 0; 107 break; 108 } 109 } 110 111 break; 112 } 113 case DSO_BINARY_TYPE__BUILD_ID_CACHE: 114 if (dso__build_id_filename(dso, filename, size, false) == NULL) 115 ret = -1; 116 break; 117 118 case DSO_BINARY_TYPE__BUILD_ID_CACHE_DEBUGINFO: 119 if (dso__build_id_filename(dso, filename, size, true) == NULL) 120 ret = -1; 121 break; 122 123 case DSO_BINARY_TYPE__FEDORA_DEBUGINFO: 124 len = __symbol__join_symfs(filename, size, "/usr/lib/debug"); 125 snprintf(filename + len, size - len, "%s.debug", dso->long_name); 126 break; 127 128 case DSO_BINARY_TYPE__UBUNTU_DEBUGINFO: 129 len = __symbol__join_symfs(filename, size, "/usr/lib/debug"); 130 snprintf(filename + len, size - len, "%s", dso->long_name); 131 break; 132 133 case DSO_BINARY_TYPE__MIXEDUP_UBUNTU_DEBUGINFO: 134 /* 135 * Ubuntu can mixup /usr/lib with /lib, putting debuginfo in 136 * /usr/lib/debug/lib when it is expected to be in 137 * /usr/lib/debug/usr/lib 138 */ 139 if (strlen(dso->long_name) < 9 || 140 strncmp(dso->long_name, "/usr/lib/", 9)) { 141 ret = -1; 142 break; 143 } 144 len = __symbol__join_symfs(filename, size, "/usr/lib/debug"); 145 snprintf(filename + len, size - len, "%s", dso->long_name + 4); 146 break; 147 148 case DSO_BINARY_TYPE__OPENEMBEDDED_DEBUGINFO: 149 { 150 const char *last_slash; 151 size_t dir_size; 152 153 last_slash = dso->long_name + dso->long_name_len; 154 while (last_slash != dso->long_name && *last_slash != '/') 155 last_slash--; 156 157 len = __symbol__join_symfs(filename, size, ""); 158 dir_size = last_slash - dso->long_name + 2; 159 if (dir_size > (size - len)) { 160 ret = -1; 161 break; 162 } 163 len += scnprintf(filename + len, dir_size, "%s", dso->long_name); 164 len += scnprintf(filename + len , size - len, ".debug%s", 165 last_slash); 166 break; 167 } 168 169 case DSO_BINARY_TYPE__BUILDID_DEBUGINFO: 170 if (!dso->has_build_id) { 171 ret = -1; 172 break; 173 } 174 175 build_id__sprintf(dso->build_id, 176 sizeof(dso->build_id), 177 build_id_hex); 178 len = __symbol__join_symfs(filename, size, "/usr/lib/debug/.build-id/"); 179 snprintf(filename + len, size - len, "%.2s/%s.debug", 180 build_id_hex, build_id_hex + 2); 181 break; 182 183 case DSO_BINARY_TYPE__VMLINUX: 184 case DSO_BINARY_TYPE__GUEST_VMLINUX: 185 case DSO_BINARY_TYPE__SYSTEM_PATH_DSO: 186 __symbol__join_symfs(filename, size, dso->long_name); 187 break; 188 189 case DSO_BINARY_TYPE__GUEST_KMODULE: 190 case DSO_BINARY_TYPE__GUEST_KMODULE_COMP: 191 path__join3(filename, size, symbol_conf.symfs, 192 root_dir, dso->long_name); 193 break; 194 195 case DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE: 196 case DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE_COMP: 197 __symbol__join_symfs(filename, size, dso->long_name); 198 break; 199 200 case DSO_BINARY_TYPE__KCORE: 201 case DSO_BINARY_TYPE__GUEST_KCORE: 202 snprintf(filename, size, "%s", dso->long_name); 203 break; 204 205 default: 206 case DSO_BINARY_TYPE__KALLSYMS: 207 case DSO_BINARY_TYPE__GUEST_KALLSYMS: 208 case DSO_BINARY_TYPE__JAVA_JIT: 209 case DSO_BINARY_TYPE__BPF_PROG_INFO: 210 case DSO_BINARY_TYPE__BPF_IMAGE: 211 case DSO_BINARY_TYPE__NOT_FOUND: 212 ret = -1; 213 break; 214 } 215 216 return ret; 217 } 218 219 enum { 220 COMP_ID__NONE = 0, 221 }; 222 223 static const struct { 224 const char *fmt; 225 int (*decompress)(const char *input, int output); 226 bool (*is_compressed)(const char *input); 227 } compressions[] = { 228 [COMP_ID__NONE] = { .fmt = NULL, }, 229 #ifdef HAVE_ZLIB_SUPPORT 230 { "gz", gzip_decompress_to_file, gzip_is_compressed }, 231 #endif 232 #ifdef HAVE_LZMA_SUPPORT 233 { "xz", lzma_decompress_to_file, lzma_is_compressed }, 234 #endif 235 { NULL, NULL, NULL }, 236 }; 237 238 static int is_supported_compression(const char *ext) 239 { 240 unsigned i; 241 242 for (i = 1; compressions[i].fmt; i++) { 243 if (!strcmp(ext, compressions[i].fmt)) 244 return i; 245 } 246 return COMP_ID__NONE; 247 } 248 249 bool is_kernel_module(const char *pathname, int cpumode) 250 { 251 struct kmod_path m; 252 int mode = cpumode & PERF_RECORD_MISC_CPUMODE_MASK; 253 254 WARN_ONCE(mode != cpumode, 255 "Internal error: passing unmasked cpumode (%x) to is_kernel_module", 256 cpumode); 257 258 switch (mode) { 259 case PERF_RECORD_MISC_USER: 260 case PERF_RECORD_MISC_HYPERVISOR: 261 case PERF_RECORD_MISC_GUEST_USER: 262 return false; 263 /* Treat PERF_RECORD_MISC_CPUMODE_UNKNOWN as kernel */ 264 default: 265 if (kmod_path__parse(&m, pathname)) { 266 pr_err("Failed to check whether %s is a kernel module or not. Assume it is.", 267 pathname); 268 return true; 269 } 270 } 271 272 return m.kmod; 273 } 274 275 bool dso__needs_decompress(struct dso *dso) 276 { 277 return dso->symtab_type == DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE_COMP || 278 dso->symtab_type == DSO_BINARY_TYPE__GUEST_KMODULE_COMP; 279 } 280 281 static int decompress_kmodule(struct dso *dso, const char *name, 282 char *pathname, size_t len) 283 { 284 char tmpbuf[] = KMOD_DECOMP_NAME; 285 int fd = -1; 286 287 if (!dso__needs_decompress(dso)) 288 return -1; 289 290 if (dso->comp == COMP_ID__NONE) 291 return -1; 292 293 /* 294 * We have proper compression id for DSO and yet the file 295 * behind the 'name' can still be plain uncompressed object. 296 * 297 * The reason is behind the logic we open the DSO object files, 298 * when we try all possible 'debug' objects until we find the 299 * data. So even if the DSO is represented by 'krava.xz' module, 300 * we can end up here opening ~/.debug/....23432432/debug' file 301 * which is not compressed. 302 * 303 * To keep this transparent, we detect this and return the file 304 * descriptor to the uncompressed file. 305 */ 306 if (!compressions[dso->comp].is_compressed(name)) 307 return open(name, O_RDONLY); 308 309 fd = mkstemp(tmpbuf); 310 if (fd < 0) { 311 dso->load_errno = errno; 312 return -1; 313 } 314 315 if (compressions[dso->comp].decompress(name, fd)) { 316 dso->load_errno = DSO_LOAD_ERRNO__DECOMPRESSION_FAILURE; 317 close(fd); 318 fd = -1; 319 } 320 321 if (!pathname || (fd < 0)) 322 unlink(tmpbuf); 323 324 if (pathname && (fd >= 0)) 325 strlcpy(pathname, tmpbuf, len); 326 327 return fd; 328 } 329 330 int dso__decompress_kmodule_fd(struct dso *dso, const char *name) 331 { 332 return decompress_kmodule(dso, name, NULL, 0); 333 } 334 335 int dso__decompress_kmodule_path(struct dso *dso, const char *name, 336 char *pathname, size_t len) 337 { 338 int fd = decompress_kmodule(dso, name, pathname, len); 339 340 close(fd); 341 return fd >= 0 ? 0 : -1; 342 } 343 344 /* 345 * Parses kernel module specified in @path and updates 346 * @m argument like: 347 * 348 * @comp - true if @path contains supported compression suffix, 349 * false otherwise 350 * @kmod - true if @path contains '.ko' suffix in right position, 351 * false otherwise 352 * @name - if (@alloc_name && @kmod) is true, it contains strdup-ed base name 353 * of the kernel module without suffixes, otherwise strudup-ed 354 * base name of @path 355 * @ext - if (@alloc_ext && @comp) is true, it contains strdup-ed string 356 * the compression suffix 357 * 358 * Returns 0 if there's no strdup error, -ENOMEM otherwise. 359 */ 360 int __kmod_path__parse(struct kmod_path *m, const char *path, 361 bool alloc_name) 362 { 363 const char *name = strrchr(path, '/'); 364 const char *ext = strrchr(path, '.'); 365 bool is_simple_name = false; 366 367 memset(m, 0x0, sizeof(*m)); 368 name = name ? name + 1 : path; 369 370 /* 371 * '.' is also a valid character for module name. For example: 372 * [aaa.bbb] is a valid module name. '[' should have higher 373 * priority than '.ko' suffix. 374 * 375 * The kernel names are from machine__mmap_name. Such 376 * name should belong to kernel itself, not kernel module. 377 */ 378 if (name[0] == '[') { 379 is_simple_name = true; 380 if ((strncmp(name, "[kernel.kallsyms]", 17) == 0) || 381 (strncmp(name, "[guest.kernel.kallsyms", 22) == 0) || 382 (strncmp(name, "[vdso]", 6) == 0) || 383 (strncmp(name, "[vdso32]", 8) == 0) || 384 (strncmp(name, "[vdsox32]", 9) == 0) || 385 (strncmp(name, "[vsyscall]", 10) == 0)) { 386 m->kmod = false; 387 388 } else 389 m->kmod = true; 390 } 391 392 /* No extension, just return name. */ 393 if ((ext == NULL) || is_simple_name) { 394 if (alloc_name) { 395 m->name = strdup(name); 396 return m->name ? 0 : -ENOMEM; 397 } 398 return 0; 399 } 400 401 m->comp = is_supported_compression(ext + 1); 402 if (m->comp > COMP_ID__NONE) 403 ext -= 3; 404 405 /* Check .ko extension only if there's enough name left. */ 406 if (ext > name) 407 m->kmod = !strncmp(ext, ".ko", 3); 408 409 if (alloc_name) { 410 if (m->kmod) { 411 if (asprintf(&m->name, "[%.*s]", (int) (ext - name), name) == -1) 412 return -ENOMEM; 413 } else { 414 if (asprintf(&m->name, "%s", name) == -1) 415 return -ENOMEM; 416 } 417 418 strreplace(m->name, '-', '_'); 419 } 420 421 return 0; 422 } 423 424 void dso__set_module_info(struct dso *dso, struct kmod_path *m, 425 struct machine *machine) 426 { 427 if (machine__is_host(machine)) 428 dso->symtab_type = DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE; 429 else 430 dso->symtab_type = DSO_BINARY_TYPE__GUEST_KMODULE; 431 432 /* _KMODULE_COMP should be next to _KMODULE */ 433 if (m->kmod && m->comp) { 434 dso->symtab_type++; 435 dso->comp = m->comp; 436 } 437 438 dso__set_short_name(dso, strdup(m->name), true); 439 } 440 441 /* 442 * Global list of open DSOs and the counter. 443 */ 444 static LIST_HEAD(dso__data_open); 445 static long dso__data_open_cnt; 446 static pthread_mutex_t dso__data_open_lock = PTHREAD_MUTEX_INITIALIZER; 447 448 static void dso__list_add(struct dso *dso) 449 { 450 list_add_tail(&dso->data.open_entry, &dso__data_open); 451 dso__data_open_cnt++; 452 } 453 454 static void dso__list_del(struct dso *dso) 455 { 456 list_del_init(&dso->data.open_entry); 457 WARN_ONCE(dso__data_open_cnt <= 0, 458 "DSO data fd counter out of bounds."); 459 dso__data_open_cnt--; 460 } 461 462 static void close_first_dso(void); 463 464 static int do_open(char *name) 465 { 466 int fd; 467 char sbuf[STRERR_BUFSIZE]; 468 469 do { 470 fd = open(name, O_RDONLY|O_CLOEXEC); 471 if (fd >= 0) 472 return fd; 473 474 pr_debug("dso open failed: %s\n", 475 str_error_r(errno, sbuf, sizeof(sbuf))); 476 if (!dso__data_open_cnt || errno != EMFILE) 477 break; 478 479 close_first_dso(); 480 } while (1); 481 482 return -1; 483 } 484 485 static int __open_dso(struct dso *dso, struct machine *machine) 486 { 487 int fd = -EINVAL; 488 char *root_dir = (char *)""; 489 char *name = malloc(PATH_MAX); 490 bool decomp = false; 491 492 if (!name) 493 return -ENOMEM; 494 495 if (machine) 496 root_dir = machine->root_dir; 497 498 if (dso__read_binary_type_filename(dso, dso->binary_type, 499 root_dir, name, PATH_MAX)) 500 goto out; 501 502 if (!is_regular_file(name)) 503 goto out; 504 505 if (dso__needs_decompress(dso)) { 506 char newpath[KMOD_DECOMP_LEN]; 507 size_t len = sizeof(newpath); 508 509 if (dso__decompress_kmodule_path(dso, name, newpath, len) < 0) { 510 fd = -dso->load_errno; 511 goto out; 512 } 513 514 decomp = true; 515 strcpy(name, newpath); 516 } 517 518 fd = do_open(name); 519 520 if (decomp) 521 unlink(name); 522 523 out: 524 free(name); 525 return fd; 526 } 527 528 static void check_data_close(void); 529 530 /** 531 * dso_close - Open DSO data file 532 * @dso: dso object 533 * 534 * Open @dso's data file descriptor and updates 535 * list/count of open DSO objects. 536 */ 537 static int open_dso(struct dso *dso, struct machine *machine) 538 { 539 int fd; 540 struct nscookie nsc; 541 542 if (dso->binary_type != DSO_BINARY_TYPE__BUILD_ID_CACHE) 543 nsinfo__mountns_enter(dso->nsinfo, &nsc); 544 fd = __open_dso(dso, machine); 545 if (dso->binary_type != DSO_BINARY_TYPE__BUILD_ID_CACHE) 546 nsinfo__mountns_exit(&nsc); 547 548 if (fd >= 0) { 549 dso__list_add(dso); 550 /* 551 * Check if we crossed the allowed number 552 * of opened DSOs and close one if needed. 553 */ 554 check_data_close(); 555 } 556 557 return fd; 558 } 559 560 static void close_data_fd(struct dso *dso) 561 { 562 if (dso->data.fd >= 0) { 563 close(dso->data.fd); 564 dso->data.fd = -1; 565 dso->data.file_size = 0; 566 dso__list_del(dso); 567 } 568 } 569 570 /** 571 * dso_close - Close DSO data file 572 * @dso: dso object 573 * 574 * Close @dso's data file descriptor and updates 575 * list/count of open DSO objects. 576 */ 577 static void close_dso(struct dso *dso) 578 { 579 close_data_fd(dso); 580 } 581 582 static void close_first_dso(void) 583 { 584 struct dso *dso; 585 586 dso = list_first_entry(&dso__data_open, struct dso, data.open_entry); 587 close_dso(dso); 588 } 589 590 static rlim_t get_fd_limit(void) 591 { 592 struct rlimit l; 593 rlim_t limit = 0; 594 595 /* Allow half of the current open fd limit. */ 596 if (getrlimit(RLIMIT_NOFILE, &l) == 0) { 597 if (l.rlim_cur == RLIM_INFINITY) 598 limit = l.rlim_cur; 599 else 600 limit = l.rlim_cur / 2; 601 } else { 602 pr_err("failed to get fd limit\n"); 603 limit = 1; 604 } 605 606 return limit; 607 } 608 609 static rlim_t fd_limit; 610 611 /* 612 * Used only by tests/dso-data.c to reset the environment 613 * for tests. I dont expect we should change this during 614 * standard runtime. 615 */ 616 void reset_fd_limit(void) 617 { 618 fd_limit = 0; 619 } 620 621 static bool may_cache_fd(void) 622 { 623 if (!fd_limit) 624 fd_limit = get_fd_limit(); 625 626 if (fd_limit == RLIM_INFINITY) 627 return true; 628 629 return fd_limit > (rlim_t) dso__data_open_cnt; 630 } 631 632 /* 633 * Check and close LRU dso if we crossed allowed limit 634 * for opened dso file descriptors. The limit is half 635 * of the RLIMIT_NOFILE files opened. 636 */ 637 static void check_data_close(void) 638 { 639 bool cache_fd = may_cache_fd(); 640 641 if (!cache_fd) 642 close_first_dso(); 643 } 644 645 /** 646 * dso__data_close - Close DSO data file 647 * @dso: dso object 648 * 649 * External interface to close @dso's data file descriptor. 650 */ 651 void dso__data_close(struct dso *dso) 652 { 653 pthread_mutex_lock(&dso__data_open_lock); 654 close_dso(dso); 655 pthread_mutex_unlock(&dso__data_open_lock); 656 } 657 658 static void try_to_open_dso(struct dso *dso, struct machine *machine) 659 { 660 enum dso_binary_type binary_type_data[] = { 661 DSO_BINARY_TYPE__BUILD_ID_CACHE, 662 DSO_BINARY_TYPE__SYSTEM_PATH_DSO, 663 DSO_BINARY_TYPE__NOT_FOUND, 664 }; 665 int i = 0; 666 667 if (dso->data.fd >= 0) 668 return; 669 670 if (dso->binary_type != DSO_BINARY_TYPE__NOT_FOUND) { 671 dso->data.fd = open_dso(dso, machine); 672 goto out; 673 } 674 675 do { 676 dso->binary_type = binary_type_data[i++]; 677 678 dso->data.fd = open_dso(dso, machine); 679 if (dso->data.fd >= 0) 680 goto out; 681 682 } while (dso->binary_type != DSO_BINARY_TYPE__NOT_FOUND); 683 out: 684 if (dso->data.fd >= 0) 685 dso->data.status = DSO_DATA_STATUS_OK; 686 else 687 dso->data.status = DSO_DATA_STATUS_ERROR; 688 } 689 690 /** 691 * dso__data_get_fd - Get dso's data file descriptor 692 * @dso: dso object 693 * @machine: machine object 694 * 695 * External interface to find dso's file, open it and 696 * returns file descriptor. It should be paired with 697 * dso__data_put_fd() if it returns non-negative value. 698 */ 699 int dso__data_get_fd(struct dso *dso, struct machine *machine) 700 { 701 if (dso->data.status == DSO_DATA_STATUS_ERROR) 702 return -1; 703 704 if (pthread_mutex_lock(&dso__data_open_lock) < 0) 705 return -1; 706 707 try_to_open_dso(dso, machine); 708 709 if (dso->data.fd < 0) 710 pthread_mutex_unlock(&dso__data_open_lock); 711 712 return dso->data.fd; 713 } 714 715 void dso__data_put_fd(struct dso *dso __maybe_unused) 716 { 717 pthread_mutex_unlock(&dso__data_open_lock); 718 } 719 720 bool dso__data_status_seen(struct dso *dso, enum dso_data_status_seen by) 721 { 722 u32 flag = 1 << by; 723 724 if (dso->data.status_seen & flag) 725 return true; 726 727 dso->data.status_seen |= flag; 728 729 return false; 730 } 731 732 static ssize_t bpf_read(struct dso *dso, u64 offset, char *data) 733 { 734 struct bpf_prog_info_node *node; 735 ssize_t size = DSO__DATA_CACHE_SIZE; 736 u64 len; 737 u8 *buf; 738 739 node = perf_env__find_bpf_prog_info(dso->bpf_prog.env, dso->bpf_prog.id); 740 if (!node || !node->info_linear) { 741 dso->data.status = DSO_DATA_STATUS_ERROR; 742 return -1; 743 } 744 745 len = node->info_linear->info.jited_prog_len; 746 buf = (u8 *)(uintptr_t)node->info_linear->info.jited_prog_insns; 747 748 if (offset >= len) 749 return -1; 750 751 size = (ssize_t)min(len - offset, (u64)size); 752 memcpy(data, buf + offset, size); 753 return size; 754 } 755 756 static int bpf_size(struct dso *dso) 757 { 758 struct bpf_prog_info_node *node; 759 760 node = perf_env__find_bpf_prog_info(dso->bpf_prog.env, dso->bpf_prog.id); 761 if (!node || !node->info_linear) { 762 dso->data.status = DSO_DATA_STATUS_ERROR; 763 return -1; 764 } 765 766 dso->data.file_size = node->info_linear->info.jited_prog_len; 767 return 0; 768 } 769 770 static void 771 dso_cache__free(struct dso *dso) 772 { 773 struct rb_root *root = &dso->data.cache; 774 struct rb_node *next = rb_first(root); 775 776 pthread_mutex_lock(&dso->lock); 777 while (next) { 778 struct dso_cache *cache; 779 780 cache = rb_entry(next, struct dso_cache, rb_node); 781 next = rb_next(&cache->rb_node); 782 rb_erase(&cache->rb_node, root); 783 free(cache); 784 } 785 pthread_mutex_unlock(&dso->lock); 786 } 787 788 static struct dso_cache *__dso_cache__find(struct dso *dso, u64 offset) 789 { 790 const struct rb_root *root = &dso->data.cache; 791 struct rb_node * const *p = &root->rb_node; 792 const struct rb_node *parent = NULL; 793 struct dso_cache *cache; 794 795 while (*p != NULL) { 796 u64 end; 797 798 parent = *p; 799 cache = rb_entry(parent, struct dso_cache, rb_node); 800 end = cache->offset + DSO__DATA_CACHE_SIZE; 801 802 if (offset < cache->offset) 803 p = &(*p)->rb_left; 804 else if (offset >= end) 805 p = &(*p)->rb_right; 806 else 807 return cache; 808 } 809 810 return NULL; 811 } 812 813 static struct dso_cache * 814 dso_cache__insert(struct dso *dso, struct dso_cache *new) 815 { 816 struct rb_root *root = &dso->data.cache; 817 struct rb_node **p = &root->rb_node; 818 struct rb_node *parent = NULL; 819 struct dso_cache *cache; 820 u64 offset = new->offset; 821 822 pthread_mutex_lock(&dso->lock); 823 while (*p != NULL) { 824 u64 end; 825 826 parent = *p; 827 cache = rb_entry(parent, struct dso_cache, rb_node); 828 end = cache->offset + DSO__DATA_CACHE_SIZE; 829 830 if (offset < cache->offset) 831 p = &(*p)->rb_left; 832 else if (offset >= end) 833 p = &(*p)->rb_right; 834 else 835 goto out; 836 } 837 838 rb_link_node(&new->rb_node, parent, p); 839 rb_insert_color(&new->rb_node, root); 840 841 cache = NULL; 842 out: 843 pthread_mutex_unlock(&dso->lock); 844 return cache; 845 } 846 847 static ssize_t dso_cache__memcpy(struct dso_cache *cache, u64 offset, u8 *data, 848 u64 size, bool out) 849 { 850 u64 cache_offset = offset - cache->offset; 851 u64 cache_size = min(cache->size - cache_offset, size); 852 853 if (out) 854 memcpy(data, cache->data + cache_offset, cache_size); 855 else 856 memcpy(cache->data + cache_offset, data, cache_size); 857 return cache_size; 858 } 859 860 static ssize_t file_read(struct dso *dso, struct machine *machine, 861 u64 offset, char *data) 862 { 863 ssize_t ret; 864 865 pthread_mutex_lock(&dso__data_open_lock); 866 867 /* 868 * dso->data.fd might be closed if other thread opened another 869 * file (dso) due to open file limit (RLIMIT_NOFILE). 870 */ 871 try_to_open_dso(dso, machine); 872 873 if (dso->data.fd < 0) { 874 dso->data.status = DSO_DATA_STATUS_ERROR; 875 ret = -errno; 876 goto out; 877 } 878 879 ret = pread(dso->data.fd, data, DSO__DATA_CACHE_SIZE, offset); 880 out: 881 pthread_mutex_unlock(&dso__data_open_lock); 882 return ret; 883 } 884 885 static struct dso_cache *dso_cache__populate(struct dso *dso, 886 struct machine *machine, 887 u64 offset, ssize_t *ret) 888 { 889 u64 cache_offset = offset & DSO__DATA_CACHE_MASK; 890 struct dso_cache *cache; 891 struct dso_cache *old; 892 893 cache = zalloc(sizeof(*cache) + DSO__DATA_CACHE_SIZE); 894 if (!cache) { 895 *ret = -ENOMEM; 896 return NULL; 897 } 898 899 if (dso->binary_type == DSO_BINARY_TYPE__BPF_PROG_INFO) 900 *ret = bpf_read(dso, cache_offset, cache->data); 901 else 902 *ret = file_read(dso, machine, cache_offset, cache->data); 903 904 if (*ret <= 0) { 905 free(cache); 906 return NULL; 907 } 908 909 cache->offset = cache_offset; 910 cache->size = *ret; 911 912 old = dso_cache__insert(dso, cache); 913 if (old) { 914 /* we lose the race */ 915 free(cache); 916 cache = old; 917 } 918 919 return cache; 920 } 921 922 static struct dso_cache *dso_cache__find(struct dso *dso, 923 struct machine *machine, 924 u64 offset, 925 ssize_t *ret) 926 { 927 struct dso_cache *cache = __dso_cache__find(dso, offset); 928 929 return cache ? cache : dso_cache__populate(dso, machine, offset, ret); 930 } 931 932 static ssize_t dso_cache_io(struct dso *dso, struct machine *machine, 933 u64 offset, u8 *data, ssize_t size, bool out) 934 { 935 struct dso_cache *cache; 936 ssize_t ret = 0; 937 938 cache = dso_cache__find(dso, machine, offset, &ret); 939 if (!cache) 940 return ret; 941 942 return dso_cache__memcpy(cache, offset, data, size, out); 943 } 944 945 /* 946 * Reads and caches dso data DSO__DATA_CACHE_SIZE size chunks 947 * in the rb_tree. Any read to already cached data is served 948 * by cached data. Writes update the cache only, not the backing file. 949 */ 950 static ssize_t cached_io(struct dso *dso, struct machine *machine, 951 u64 offset, u8 *data, ssize_t size, bool out) 952 { 953 ssize_t r = 0; 954 u8 *p = data; 955 956 do { 957 ssize_t ret; 958 959 ret = dso_cache_io(dso, machine, offset, p, size, out); 960 if (ret < 0) 961 return ret; 962 963 /* Reached EOF, return what we have. */ 964 if (!ret) 965 break; 966 967 BUG_ON(ret > size); 968 969 r += ret; 970 p += ret; 971 offset += ret; 972 size -= ret; 973 974 } while (size); 975 976 return r; 977 } 978 979 static int file_size(struct dso *dso, struct machine *machine) 980 { 981 int ret = 0; 982 struct stat st; 983 char sbuf[STRERR_BUFSIZE]; 984 985 pthread_mutex_lock(&dso__data_open_lock); 986 987 /* 988 * dso->data.fd might be closed if other thread opened another 989 * file (dso) due to open file limit (RLIMIT_NOFILE). 990 */ 991 try_to_open_dso(dso, machine); 992 993 if (dso->data.fd < 0) { 994 ret = -errno; 995 dso->data.status = DSO_DATA_STATUS_ERROR; 996 goto out; 997 } 998 999 if (fstat(dso->data.fd, &st) < 0) { 1000 ret = -errno; 1001 pr_err("dso cache fstat failed: %s\n", 1002 str_error_r(errno, sbuf, sizeof(sbuf))); 1003 dso->data.status = DSO_DATA_STATUS_ERROR; 1004 goto out; 1005 } 1006 dso->data.file_size = st.st_size; 1007 1008 out: 1009 pthread_mutex_unlock(&dso__data_open_lock); 1010 return ret; 1011 } 1012 1013 int dso__data_file_size(struct dso *dso, struct machine *machine) 1014 { 1015 if (dso->data.file_size) 1016 return 0; 1017 1018 if (dso->data.status == DSO_DATA_STATUS_ERROR) 1019 return -1; 1020 1021 if (dso->binary_type == DSO_BINARY_TYPE__BPF_PROG_INFO) 1022 return bpf_size(dso); 1023 1024 return file_size(dso, machine); 1025 } 1026 1027 /** 1028 * dso__data_size - Return dso data size 1029 * @dso: dso object 1030 * @machine: machine object 1031 * 1032 * Return: dso data size 1033 */ 1034 off_t dso__data_size(struct dso *dso, struct machine *machine) 1035 { 1036 if (dso__data_file_size(dso, machine)) 1037 return -1; 1038 1039 /* For now just estimate dso data size is close to file size */ 1040 return dso->data.file_size; 1041 } 1042 1043 static ssize_t data_read_write_offset(struct dso *dso, struct machine *machine, 1044 u64 offset, u8 *data, ssize_t size, 1045 bool out) 1046 { 1047 if (dso__data_file_size(dso, machine)) 1048 return -1; 1049 1050 /* Check the offset sanity. */ 1051 if (offset > dso->data.file_size) 1052 return -1; 1053 1054 if (offset + size < offset) 1055 return -1; 1056 1057 return cached_io(dso, machine, offset, data, size, out); 1058 } 1059 1060 /** 1061 * dso__data_read_offset - Read data from dso file offset 1062 * @dso: dso object 1063 * @machine: machine object 1064 * @offset: file offset 1065 * @data: buffer to store data 1066 * @size: size of the @data buffer 1067 * 1068 * External interface to read data from dso file offset. Open 1069 * dso data file and use cached_read to get the data. 1070 */ 1071 ssize_t dso__data_read_offset(struct dso *dso, struct machine *machine, 1072 u64 offset, u8 *data, ssize_t size) 1073 { 1074 if (dso->data.status == DSO_DATA_STATUS_ERROR) 1075 return -1; 1076 1077 return data_read_write_offset(dso, machine, offset, data, size, true); 1078 } 1079 1080 /** 1081 * dso__data_read_addr - Read data from dso address 1082 * @dso: dso object 1083 * @machine: machine object 1084 * @add: virtual memory address 1085 * @data: buffer to store data 1086 * @size: size of the @data buffer 1087 * 1088 * External interface to read data from dso address. 1089 */ 1090 ssize_t dso__data_read_addr(struct dso *dso, struct map *map, 1091 struct machine *machine, u64 addr, 1092 u8 *data, ssize_t size) 1093 { 1094 u64 offset = map->map_ip(map, addr); 1095 return dso__data_read_offset(dso, machine, offset, data, size); 1096 } 1097 1098 /** 1099 * dso__data_write_cache_offs - Write data to dso data cache at file offset 1100 * @dso: dso object 1101 * @machine: machine object 1102 * @offset: file offset 1103 * @data: buffer to write 1104 * @size: size of the @data buffer 1105 * 1106 * Write into the dso file data cache, but do not change the file itself. 1107 */ 1108 ssize_t dso__data_write_cache_offs(struct dso *dso, struct machine *machine, 1109 u64 offset, const u8 *data_in, ssize_t size) 1110 { 1111 u8 *data = (u8 *)data_in; /* cast away const to use same fns for r/w */ 1112 1113 if (dso->data.status == DSO_DATA_STATUS_ERROR) 1114 return -1; 1115 1116 return data_read_write_offset(dso, machine, offset, data, size, false); 1117 } 1118 1119 /** 1120 * dso__data_write_cache_addr - Write data to dso data cache at dso address 1121 * @dso: dso object 1122 * @machine: machine object 1123 * @add: virtual memory address 1124 * @data: buffer to write 1125 * @size: size of the @data buffer 1126 * 1127 * External interface to write into the dso file data cache, but do not change 1128 * the file itself. 1129 */ 1130 ssize_t dso__data_write_cache_addr(struct dso *dso, struct map *map, 1131 struct machine *machine, u64 addr, 1132 const u8 *data, ssize_t size) 1133 { 1134 u64 offset = map->map_ip(map, addr); 1135 return dso__data_write_cache_offs(dso, machine, offset, data, size); 1136 } 1137 1138 struct map *dso__new_map(const char *name) 1139 { 1140 struct map *map = NULL; 1141 struct dso *dso = dso__new(name); 1142 1143 if (dso) 1144 map = map__new2(0, dso); 1145 1146 return map; 1147 } 1148 1149 struct dso *machine__findnew_kernel(struct machine *machine, const char *name, 1150 const char *short_name, int dso_type) 1151 { 1152 /* 1153 * The kernel dso could be created by build_id processing. 1154 */ 1155 struct dso *dso = machine__findnew_dso(machine, name); 1156 1157 /* 1158 * We need to run this in all cases, since during the build_id 1159 * processing we had no idea this was the kernel dso. 1160 */ 1161 if (dso != NULL) { 1162 dso__set_short_name(dso, short_name, false); 1163 dso->kernel = dso_type; 1164 } 1165 1166 return dso; 1167 } 1168 1169 static void dso__set_long_name_id(struct dso *dso, const char *name, struct dso_id *id, bool name_allocated) 1170 { 1171 struct rb_root *root = dso->root; 1172 1173 if (name == NULL) 1174 return; 1175 1176 if (dso->long_name_allocated) 1177 free((char *)dso->long_name); 1178 1179 if (root) { 1180 rb_erase(&dso->rb_node, root); 1181 /* 1182 * __dsos__findnew_link_by_longname_id() isn't guaranteed to 1183 * add it back, so a clean removal is required here. 1184 */ 1185 RB_CLEAR_NODE(&dso->rb_node); 1186 dso->root = NULL; 1187 } 1188 1189 dso->long_name = name; 1190 dso->long_name_len = strlen(name); 1191 dso->long_name_allocated = name_allocated; 1192 1193 if (root) 1194 __dsos__findnew_link_by_longname_id(root, dso, NULL, id); 1195 } 1196 1197 void dso__set_long_name(struct dso *dso, const char *name, bool name_allocated) 1198 { 1199 dso__set_long_name_id(dso, name, NULL, name_allocated); 1200 } 1201 1202 void dso__set_short_name(struct dso *dso, const char *name, bool name_allocated) 1203 { 1204 if (name == NULL) 1205 return; 1206 1207 if (dso->short_name_allocated) 1208 free((char *)dso->short_name); 1209 1210 dso->short_name = name; 1211 dso->short_name_len = strlen(name); 1212 dso->short_name_allocated = name_allocated; 1213 } 1214 1215 int dso__name_len(const struct dso *dso) 1216 { 1217 if (!dso) 1218 return strlen("[unknown]"); 1219 if (verbose > 0) 1220 return dso->long_name_len; 1221 1222 return dso->short_name_len; 1223 } 1224 1225 bool dso__loaded(const struct dso *dso) 1226 { 1227 return dso->loaded; 1228 } 1229 1230 bool dso__sorted_by_name(const struct dso *dso) 1231 { 1232 return dso->sorted_by_name; 1233 } 1234 1235 void dso__set_sorted_by_name(struct dso *dso) 1236 { 1237 dso->sorted_by_name = true; 1238 } 1239 1240 struct dso *dso__new_id(const char *name, struct dso_id *id) 1241 { 1242 struct dso *dso = calloc(1, sizeof(*dso) + strlen(name) + 1); 1243 1244 if (dso != NULL) { 1245 strcpy(dso->name, name); 1246 if (id) 1247 dso->id = *id; 1248 dso__set_long_name_id(dso, dso->name, id, false); 1249 dso__set_short_name(dso, dso->name, false); 1250 dso->symbols = dso->symbol_names = RB_ROOT_CACHED; 1251 dso->data.cache = RB_ROOT; 1252 dso->inlined_nodes = RB_ROOT_CACHED; 1253 dso->srclines = RB_ROOT_CACHED; 1254 dso->data.fd = -1; 1255 dso->data.status = DSO_DATA_STATUS_UNKNOWN; 1256 dso->symtab_type = DSO_BINARY_TYPE__NOT_FOUND; 1257 dso->binary_type = DSO_BINARY_TYPE__NOT_FOUND; 1258 dso->is_64_bit = (sizeof(void *) == 8); 1259 dso->loaded = 0; 1260 dso->rel = 0; 1261 dso->sorted_by_name = 0; 1262 dso->has_build_id = 0; 1263 dso->has_srcline = 1; 1264 dso->a2l_fails = 1; 1265 dso->kernel = DSO_TYPE_USER; 1266 dso->needs_swap = DSO_SWAP__UNSET; 1267 dso->comp = COMP_ID__NONE; 1268 RB_CLEAR_NODE(&dso->rb_node); 1269 dso->root = NULL; 1270 INIT_LIST_HEAD(&dso->node); 1271 INIT_LIST_HEAD(&dso->data.open_entry); 1272 pthread_mutex_init(&dso->lock, NULL); 1273 refcount_set(&dso->refcnt, 1); 1274 } 1275 1276 return dso; 1277 } 1278 1279 struct dso *dso__new(const char *name) 1280 { 1281 return dso__new_id(name, NULL); 1282 } 1283 1284 void dso__delete(struct dso *dso) 1285 { 1286 if (!RB_EMPTY_NODE(&dso->rb_node)) 1287 pr_err("DSO %s is still in rbtree when being deleted!\n", 1288 dso->long_name); 1289 1290 /* free inlines first, as they reference symbols */ 1291 inlines__tree_delete(&dso->inlined_nodes); 1292 srcline__tree_delete(&dso->srclines); 1293 symbols__delete(&dso->symbols); 1294 1295 if (dso->short_name_allocated) { 1296 zfree((char **)&dso->short_name); 1297 dso->short_name_allocated = false; 1298 } 1299 1300 if (dso->long_name_allocated) { 1301 zfree((char **)&dso->long_name); 1302 dso->long_name_allocated = false; 1303 } 1304 1305 dso__data_close(dso); 1306 auxtrace_cache__free(dso->auxtrace_cache); 1307 dso_cache__free(dso); 1308 dso__free_a2l(dso); 1309 zfree(&dso->symsrc_filename); 1310 nsinfo__zput(dso->nsinfo); 1311 pthread_mutex_destroy(&dso->lock); 1312 free(dso); 1313 } 1314 1315 struct dso *dso__get(struct dso *dso) 1316 { 1317 if (dso) 1318 refcount_inc(&dso->refcnt); 1319 return dso; 1320 } 1321 1322 void dso__put(struct dso *dso) 1323 { 1324 if (dso && refcount_dec_and_test(&dso->refcnt)) 1325 dso__delete(dso); 1326 } 1327 1328 void dso__set_build_id(struct dso *dso, void *build_id) 1329 { 1330 memcpy(dso->build_id, build_id, sizeof(dso->build_id)); 1331 dso->has_build_id = 1; 1332 } 1333 1334 bool dso__build_id_equal(const struct dso *dso, u8 *build_id) 1335 { 1336 return memcmp(dso->build_id, build_id, sizeof(dso->build_id)) == 0; 1337 } 1338 1339 void dso__read_running_kernel_build_id(struct dso *dso, struct machine *machine) 1340 { 1341 char path[PATH_MAX]; 1342 1343 if (machine__is_default_guest(machine)) 1344 return; 1345 sprintf(path, "%s/sys/kernel/notes", machine->root_dir); 1346 if (sysfs__read_build_id(path, dso->build_id, 1347 sizeof(dso->build_id)) == 0) 1348 dso->has_build_id = true; 1349 } 1350 1351 int dso__kernel_module_get_build_id(struct dso *dso, 1352 const char *root_dir) 1353 { 1354 char filename[PATH_MAX]; 1355 /* 1356 * kernel module short names are of the form "[module]" and 1357 * we need just "module" here. 1358 */ 1359 const char *name = dso->short_name + 1; 1360 1361 snprintf(filename, sizeof(filename), 1362 "%s/sys/module/%.*s/notes/.note.gnu.build-id", 1363 root_dir, (int)strlen(name) - 1, name); 1364 1365 if (sysfs__read_build_id(filename, dso->build_id, 1366 sizeof(dso->build_id)) == 0) 1367 dso->has_build_id = true; 1368 1369 return 0; 1370 } 1371 1372 size_t dso__fprintf_buildid(struct dso *dso, FILE *fp) 1373 { 1374 char sbuild_id[SBUILD_ID_SIZE]; 1375 1376 build_id__sprintf(dso->build_id, sizeof(dso->build_id), sbuild_id); 1377 return fprintf(fp, "%s", sbuild_id); 1378 } 1379 1380 size_t dso__fprintf(struct dso *dso, FILE *fp) 1381 { 1382 struct rb_node *nd; 1383 size_t ret = fprintf(fp, "dso: %s (", dso->short_name); 1384 1385 if (dso->short_name != dso->long_name) 1386 ret += fprintf(fp, "%s, ", dso->long_name); 1387 ret += fprintf(fp, "%sloaded, ", dso__loaded(dso) ? "" : "NOT "); 1388 ret += dso__fprintf_buildid(dso, fp); 1389 ret += fprintf(fp, ")\n"); 1390 for (nd = rb_first_cached(&dso->symbols); nd; nd = rb_next(nd)) { 1391 struct symbol *pos = rb_entry(nd, struct symbol, rb_node); 1392 ret += symbol__fprintf(pos, fp); 1393 } 1394 1395 return ret; 1396 } 1397 1398 enum dso_type dso__type(struct dso *dso, struct machine *machine) 1399 { 1400 int fd; 1401 enum dso_type type = DSO__TYPE_UNKNOWN; 1402 1403 fd = dso__data_get_fd(dso, machine); 1404 if (fd >= 0) { 1405 type = dso__type_fd(fd); 1406 dso__data_put_fd(dso); 1407 } 1408 1409 return type; 1410 } 1411 1412 int dso__strerror_load(struct dso *dso, char *buf, size_t buflen) 1413 { 1414 int idx, errnum = dso->load_errno; 1415 /* 1416 * This must have a same ordering as the enum dso_load_errno. 1417 */ 1418 static const char *dso_load__error_str[] = { 1419 "Internal tools/perf/ library error", 1420 "Invalid ELF file", 1421 "Can not read build id", 1422 "Mismatching build id", 1423 "Decompression failure", 1424 }; 1425 1426 BUG_ON(buflen == 0); 1427 1428 if (errnum >= 0) { 1429 const char *err = str_error_r(errnum, buf, buflen); 1430 1431 if (err != buf) 1432 scnprintf(buf, buflen, "%s", err); 1433 1434 return 0; 1435 } 1436 1437 if (errnum < __DSO_LOAD_ERRNO__START || errnum >= __DSO_LOAD_ERRNO__END) 1438 return -1; 1439 1440 idx = errnum - __DSO_LOAD_ERRNO__START; 1441 scnprintf(buf, buflen, "%s", dso_load__error_str[idx]); 1442 return 0; 1443 } 1444