1 // SPDX-License-Identifier: GPL-2.0 2 #include <asm/bug.h> 3 #include <linux/kernel.h> 4 #include <linux/string.h> 5 #include <linux/zalloc.h> 6 #include <sys/time.h> 7 #include <sys/resource.h> 8 #include <sys/types.h> 9 #include <sys/stat.h> 10 #include <unistd.h> 11 #include <errno.h> 12 #include <fcntl.h> 13 #include <stdlib.h> 14 #ifdef HAVE_LIBBPF_SUPPORT 15 #include <bpf/libbpf.h> 16 #include "bpf-event.h" 17 #endif 18 #include "compress.h" 19 #include "env.h" 20 #include "namespaces.h" 21 #include "path.h" 22 #include "map.h" 23 #include "symbol.h" 24 #include "srcline.h" 25 #include "dso.h" 26 #include "dsos.h" 27 #include "machine.h" 28 #include "auxtrace.h" 29 #include "util.h" /* O_CLOEXEC for older systems */ 30 #include "debug.h" 31 #include "string2.h" 32 #include "vdso.h" 33 34 static const char * const debuglink_paths[] = { 35 "%.0s%s", 36 "%s/%s", 37 "%s/.debug/%s", 38 "/usr/lib/debug%s/%s" 39 }; 40 41 char dso__symtab_origin(const struct dso *dso) 42 { 43 static const char origin[] = { 44 [DSO_BINARY_TYPE__KALLSYMS] = 'k', 45 [DSO_BINARY_TYPE__VMLINUX] = 'v', 46 [DSO_BINARY_TYPE__JAVA_JIT] = 'j', 47 [DSO_BINARY_TYPE__DEBUGLINK] = 'l', 48 [DSO_BINARY_TYPE__BUILD_ID_CACHE] = 'B', 49 [DSO_BINARY_TYPE__BUILD_ID_CACHE_DEBUGINFO] = 'D', 50 [DSO_BINARY_TYPE__FEDORA_DEBUGINFO] = 'f', 51 [DSO_BINARY_TYPE__UBUNTU_DEBUGINFO] = 'u', 52 [DSO_BINARY_TYPE__MIXEDUP_UBUNTU_DEBUGINFO] = 'x', 53 [DSO_BINARY_TYPE__OPENEMBEDDED_DEBUGINFO] = 'o', 54 [DSO_BINARY_TYPE__BUILDID_DEBUGINFO] = 'b', 55 [DSO_BINARY_TYPE__SYSTEM_PATH_DSO] = 'd', 56 [DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE] = 'K', 57 [DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE_COMP] = 'm', 58 [DSO_BINARY_TYPE__GUEST_KALLSYMS] = 'g', 59 [DSO_BINARY_TYPE__GUEST_KMODULE] = 'G', 60 [DSO_BINARY_TYPE__GUEST_KMODULE_COMP] = 'M', 61 [DSO_BINARY_TYPE__GUEST_VMLINUX] = 'V', 62 }; 63 64 if (dso == NULL || dso->symtab_type == DSO_BINARY_TYPE__NOT_FOUND) 65 return '!'; 66 return origin[dso->symtab_type]; 67 } 68 69 int dso__read_binary_type_filename(const struct dso *dso, 70 enum dso_binary_type type, 71 char *root_dir, char *filename, size_t size) 72 { 73 char build_id_hex[SBUILD_ID_SIZE]; 74 int ret = 0; 75 size_t len; 76 77 switch (type) { 78 case DSO_BINARY_TYPE__DEBUGLINK: 79 { 80 const char *last_slash; 81 char dso_dir[PATH_MAX]; 82 char symfile[PATH_MAX]; 83 unsigned int i; 84 85 len = __symbol__join_symfs(filename, size, dso->long_name); 86 last_slash = filename + len; 87 while (last_slash != filename && *last_slash != '/') 88 last_slash--; 89 90 strncpy(dso_dir, filename, last_slash - filename); 91 dso_dir[last_slash-filename] = '\0'; 92 93 if (!is_regular_file(filename)) { 94 ret = -1; 95 break; 96 } 97 98 ret = filename__read_debuglink(filename, symfile, PATH_MAX); 99 if (ret) 100 break; 101 102 /* Check predefined locations where debug file might reside */ 103 ret = -1; 104 for (i = 0; i < ARRAY_SIZE(debuglink_paths); i++) { 105 snprintf(filename, size, 106 debuglink_paths[i], dso_dir, symfile); 107 if (is_regular_file(filename)) { 108 ret = 0; 109 break; 110 } 111 } 112 113 break; 114 } 115 case DSO_BINARY_TYPE__BUILD_ID_CACHE: 116 if (dso__build_id_filename(dso, filename, size, false) == NULL) 117 ret = -1; 118 break; 119 120 case DSO_BINARY_TYPE__BUILD_ID_CACHE_DEBUGINFO: 121 if (dso__build_id_filename(dso, filename, size, true) == NULL) 122 ret = -1; 123 break; 124 125 case DSO_BINARY_TYPE__FEDORA_DEBUGINFO: 126 len = __symbol__join_symfs(filename, size, "/usr/lib/debug"); 127 snprintf(filename + len, size - len, "%s.debug", dso->long_name); 128 break; 129 130 case DSO_BINARY_TYPE__UBUNTU_DEBUGINFO: 131 len = __symbol__join_symfs(filename, size, "/usr/lib/debug"); 132 snprintf(filename + len, size - len, "%s", dso->long_name); 133 break; 134 135 case DSO_BINARY_TYPE__MIXEDUP_UBUNTU_DEBUGINFO: 136 /* 137 * Ubuntu can mixup /usr/lib with /lib, putting debuginfo in 138 * /usr/lib/debug/lib when it is expected to be in 139 * /usr/lib/debug/usr/lib 140 */ 141 if (strlen(dso->long_name) < 9 || 142 strncmp(dso->long_name, "/usr/lib/", 9)) { 143 ret = -1; 144 break; 145 } 146 len = __symbol__join_symfs(filename, size, "/usr/lib/debug"); 147 snprintf(filename + len, size - len, "%s", dso->long_name + 4); 148 break; 149 150 case DSO_BINARY_TYPE__OPENEMBEDDED_DEBUGINFO: 151 { 152 const char *last_slash; 153 size_t dir_size; 154 155 last_slash = dso->long_name + dso->long_name_len; 156 while (last_slash != dso->long_name && *last_slash != '/') 157 last_slash--; 158 159 len = __symbol__join_symfs(filename, size, ""); 160 dir_size = last_slash - dso->long_name + 2; 161 if (dir_size > (size - len)) { 162 ret = -1; 163 break; 164 } 165 len += scnprintf(filename + len, dir_size, "%s", dso->long_name); 166 len += scnprintf(filename + len , size - len, ".debug%s", 167 last_slash); 168 break; 169 } 170 171 case DSO_BINARY_TYPE__BUILDID_DEBUGINFO: 172 if (!dso->has_build_id) { 173 ret = -1; 174 break; 175 } 176 177 build_id__sprintf(&dso->bid, build_id_hex); 178 len = __symbol__join_symfs(filename, size, "/usr/lib/debug/.build-id/"); 179 snprintf(filename + len, size - len, "%.2s/%s.debug", 180 build_id_hex, build_id_hex + 2); 181 break; 182 183 case DSO_BINARY_TYPE__VMLINUX: 184 case DSO_BINARY_TYPE__GUEST_VMLINUX: 185 case DSO_BINARY_TYPE__SYSTEM_PATH_DSO: 186 __symbol__join_symfs(filename, size, dso->long_name); 187 break; 188 189 case DSO_BINARY_TYPE__GUEST_KMODULE: 190 case DSO_BINARY_TYPE__GUEST_KMODULE_COMP: 191 path__join3(filename, size, symbol_conf.symfs, 192 root_dir, dso->long_name); 193 break; 194 195 case DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE: 196 case DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE_COMP: 197 __symbol__join_symfs(filename, size, dso->long_name); 198 break; 199 200 case DSO_BINARY_TYPE__KCORE: 201 case DSO_BINARY_TYPE__GUEST_KCORE: 202 snprintf(filename, size, "%s", dso->long_name); 203 break; 204 205 default: 206 case DSO_BINARY_TYPE__KALLSYMS: 207 case DSO_BINARY_TYPE__GUEST_KALLSYMS: 208 case DSO_BINARY_TYPE__JAVA_JIT: 209 case DSO_BINARY_TYPE__BPF_PROG_INFO: 210 case DSO_BINARY_TYPE__BPF_IMAGE: 211 case DSO_BINARY_TYPE__OOL: 212 case DSO_BINARY_TYPE__NOT_FOUND: 213 ret = -1; 214 break; 215 } 216 217 return ret; 218 } 219 220 enum { 221 COMP_ID__NONE = 0, 222 }; 223 224 static const struct { 225 const char *fmt; 226 int (*decompress)(const char *input, int output); 227 bool (*is_compressed)(const char *input); 228 } compressions[] = { 229 [COMP_ID__NONE] = { .fmt = NULL, }, 230 #ifdef HAVE_ZLIB_SUPPORT 231 { "gz", gzip_decompress_to_file, gzip_is_compressed }, 232 #endif 233 #ifdef HAVE_LZMA_SUPPORT 234 { "xz", lzma_decompress_to_file, lzma_is_compressed }, 235 #endif 236 { NULL, NULL, NULL }, 237 }; 238 239 static int is_supported_compression(const char *ext) 240 { 241 unsigned i; 242 243 for (i = 1; compressions[i].fmt; i++) { 244 if (!strcmp(ext, compressions[i].fmt)) 245 return i; 246 } 247 return COMP_ID__NONE; 248 } 249 250 bool is_kernel_module(const char *pathname, int cpumode) 251 { 252 struct kmod_path m; 253 int mode = cpumode & PERF_RECORD_MISC_CPUMODE_MASK; 254 255 WARN_ONCE(mode != cpumode, 256 "Internal error: passing unmasked cpumode (%x) to is_kernel_module", 257 cpumode); 258 259 switch (mode) { 260 case PERF_RECORD_MISC_USER: 261 case PERF_RECORD_MISC_HYPERVISOR: 262 case PERF_RECORD_MISC_GUEST_USER: 263 return false; 264 /* Treat PERF_RECORD_MISC_CPUMODE_UNKNOWN as kernel */ 265 default: 266 if (kmod_path__parse(&m, pathname)) { 267 pr_err("Failed to check whether %s is a kernel module or not. Assume it is.", 268 pathname); 269 return true; 270 } 271 } 272 273 return m.kmod; 274 } 275 276 bool dso__needs_decompress(struct dso *dso) 277 { 278 return dso->symtab_type == DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE_COMP || 279 dso->symtab_type == DSO_BINARY_TYPE__GUEST_KMODULE_COMP; 280 } 281 282 int filename__decompress(const char *name, char *pathname, 283 size_t len, int comp, int *err) 284 { 285 char tmpbuf[] = KMOD_DECOMP_NAME; 286 int fd = -1; 287 288 /* 289 * We have proper compression id for DSO and yet the file 290 * behind the 'name' can still be plain uncompressed object. 291 * 292 * The reason is behind the logic we open the DSO object files, 293 * when we try all possible 'debug' objects until we find the 294 * data. So even if the DSO is represented by 'krava.xz' module, 295 * we can end up here opening ~/.debug/....23432432/debug' file 296 * which is not compressed. 297 * 298 * To keep this transparent, we detect this and return the file 299 * descriptor to the uncompressed file. 300 */ 301 if (!compressions[comp].is_compressed(name)) 302 return open(name, O_RDONLY); 303 304 fd = mkstemp(tmpbuf); 305 if (fd < 0) { 306 *err = errno; 307 return -1; 308 } 309 310 if (compressions[comp].decompress(name, fd)) { 311 *err = DSO_LOAD_ERRNO__DECOMPRESSION_FAILURE; 312 close(fd); 313 fd = -1; 314 } 315 316 if (!pathname || (fd < 0)) 317 unlink(tmpbuf); 318 319 if (pathname && (fd >= 0)) 320 strlcpy(pathname, tmpbuf, len); 321 322 return fd; 323 } 324 325 static int decompress_kmodule(struct dso *dso, const char *name, 326 char *pathname, size_t len) 327 { 328 if (!dso__needs_decompress(dso)) 329 return -1; 330 331 if (dso->comp == COMP_ID__NONE) 332 return -1; 333 334 return filename__decompress(name, pathname, len, dso->comp, 335 &dso->load_errno); 336 } 337 338 int dso__decompress_kmodule_fd(struct dso *dso, const char *name) 339 { 340 return decompress_kmodule(dso, name, NULL, 0); 341 } 342 343 int dso__decompress_kmodule_path(struct dso *dso, const char *name, 344 char *pathname, size_t len) 345 { 346 int fd = decompress_kmodule(dso, name, pathname, len); 347 348 close(fd); 349 return fd >= 0 ? 0 : -1; 350 } 351 352 /* 353 * Parses kernel module specified in @path and updates 354 * @m argument like: 355 * 356 * @comp - true if @path contains supported compression suffix, 357 * false otherwise 358 * @kmod - true if @path contains '.ko' suffix in right position, 359 * false otherwise 360 * @name - if (@alloc_name && @kmod) is true, it contains strdup-ed base name 361 * of the kernel module without suffixes, otherwise strudup-ed 362 * base name of @path 363 * @ext - if (@alloc_ext && @comp) is true, it contains strdup-ed string 364 * the compression suffix 365 * 366 * Returns 0 if there's no strdup error, -ENOMEM otherwise. 367 */ 368 int __kmod_path__parse(struct kmod_path *m, const char *path, 369 bool alloc_name) 370 { 371 const char *name = strrchr(path, '/'); 372 const char *ext = strrchr(path, '.'); 373 bool is_simple_name = false; 374 375 memset(m, 0x0, sizeof(*m)); 376 name = name ? name + 1 : path; 377 378 /* 379 * '.' is also a valid character for module name. For example: 380 * [aaa.bbb] is a valid module name. '[' should have higher 381 * priority than '.ko' suffix. 382 * 383 * The kernel names are from machine__mmap_name. Such 384 * name should belong to kernel itself, not kernel module. 385 */ 386 if (name[0] == '[') { 387 is_simple_name = true; 388 if ((strncmp(name, "[kernel.kallsyms]", 17) == 0) || 389 (strncmp(name, "[guest.kernel.kallsyms", 22) == 0) || 390 (strncmp(name, "[vdso]", 6) == 0) || 391 (strncmp(name, "[vdso32]", 8) == 0) || 392 (strncmp(name, "[vdsox32]", 9) == 0) || 393 (strncmp(name, "[vsyscall]", 10) == 0)) { 394 m->kmod = false; 395 396 } else 397 m->kmod = true; 398 } 399 400 /* No extension, just return name. */ 401 if ((ext == NULL) || is_simple_name) { 402 if (alloc_name) { 403 m->name = strdup(name); 404 return m->name ? 0 : -ENOMEM; 405 } 406 return 0; 407 } 408 409 m->comp = is_supported_compression(ext + 1); 410 if (m->comp > COMP_ID__NONE) 411 ext -= 3; 412 413 /* Check .ko extension only if there's enough name left. */ 414 if (ext > name) 415 m->kmod = !strncmp(ext, ".ko", 3); 416 417 if (alloc_name) { 418 if (m->kmod) { 419 if (asprintf(&m->name, "[%.*s]", (int) (ext - name), name) == -1) 420 return -ENOMEM; 421 } else { 422 if (asprintf(&m->name, "%s", name) == -1) 423 return -ENOMEM; 424 } 425 426 strreplace(m->name, '-', '_'); 427 } 428 429 return 0; 430 } 431 432 void dso__set_module_info(struct dso *dso, struct kmod_path *m, 433 struct machine *machine) 434 { 435 if (machine__is_host(machine)) 436 dso->symtab_type = DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE; 437 else 438 dso->symtab_type = DSO_BINARY_TYPE__GUEST_KMODULE; 439 440 /* _KMODULE_COMP should be next to _KMODULE */ 441 if (m->kmod && m->comp) { 442 dso->symtab_type++; 443 dso->comp = m->comp; 444 } 445 446 dso__set_short_name(dso, strdup(m->name), true); 447 } 448 449 /* 450 * Global list of open DSOs and the counter. 451 */ 452 static LIST_HEAD(dso__data_open); 453 static long dso__data_open_cnt; 454 static pthread_mutex_t dso__data_open_lock = PTHREAD_MUTEX_INITIALIZER; 455 456 static void dso__list_add(struct dso *dso) 457 { 458 list_add_tail(&dso->data.open_entry, &dso__data_open); 459 dso__data_open_cnt++; 460 } 461 462 static void dso__list_del(struct dso *dso) 463 { 464 list_del_init(&dso->data.open_entry); 465 WARN_ONCE(dso__data_open_cnt <= 0, 466 "DSO data fd counter out of bounds."); 467 dso__data_open_cnt--; 468 } 469 470 static void close_first_dso(void); 471 472 static int do_open(char *name) 473 { 474 int fd; 475 char sbuf[STRERR_BUFSIZE]; 476 477 do { 478 fd = open(name, O_RDONLY|O_CLOEXEC); 479 if (fd >= 0) 480 return fd; 481 482 pr_debug("dso open failed: %s\n", 483 str_error_r(errno, sbuf, sizeof(sbuf))); 484 if (!dso__data_open_cnt || errno != EMFILE) 485 break; 486 487 close_first_dso(); 488 } while (1); 489 490 return -1; 491 } 492 493 static int __open_dso(struct dso *dso, struct machine *machine) 494 { 495 int fd = -EINVAL; 496 char *root_dir = (char *)""; 497 char *name = malloc(PATH_MAX); 498 bool decomp = false; 499 500 if (!name) 501 return -ENOMEM; 502 503 if (machine) 504 root_dir = machine->root_dir; 505 506 if (dso__read_binary_type_filename(dso, dso->binary_type, 507 root_dir, name, PATH_MAX)) 508 goto out; 509 510 if (!is_regular_file(name)) 511 goto out; 512 513 if (dso__needs_decompress(dso)) { 514 char newpath[KMOD_DECOMP_LEN]; 515 size_t len = sizeof(newpath); 516 517 if (dso__decompress_kmodule_path(dso, name, newpath, len) < 0) { 518 fd = -dso->load_errno; 519 goto out; 520 } 521 522 decomp = true; 523 strcpy(name, newpath); 524 } 525 526 fd = do_open(name); 527 528 if (decomp) 529 unlink(name); 530 531 out: 532 free(name); 533 return fd; 534 } 535 536 static void check_data_close(void); 537 538 /** 539 * dso_close - Open DSO data file 540 * @dso: dso object 541 * 542 * Open @dso's data file descriptor and updates 543 * list/count of open DSO objects. 544 */ 545 static int open_dso(struct dso *dso, struct machine *machine) 546 { 547 int fd; 548 struct nscookie nsc; 549 550 if (dso->binary_type != DSO_BINARY_TYPE__BUILD_ID_CACHE) 551 nsinfo__mountns_enter(dso->nsinfo, &nsc); 552 fd = __open_dso(dso, machine); 553 if (dso->binary_type != DSO_BINARY_TYPE__BUILD_ID_CACHE) 554 nsinfo__mountns_exit(&nsc); 555 556 if (fd >= 0) { 557 dso__list_add(dso); 558 /* 559 * Check if we crossed the allowed number 560 * of opened DSOs and close one if needed. 561 */ 562 check_data_close(); 563 } 564 565 return fd; 566 } 567 568 static void close_data_fd(struct dso *dso) 569 { 570 if (dso->data.fd >= 0) { 571 close(dso->data.fd); 572 dso->data.fd = -1; 573 dso->data.file_size = 0; 574 dso__list_del(dso); 575 } 576 } 577 578 /** 579 * dso_close - Close DSO data file 580 * @dso: dso object 581 * 582 * Close @dso's data file descriptor and updates 583 * list/count of open DSO objects. 584 */ 585 static void close_dso(struct dso *dso) 586 { 587 close_data_fd(dso); 588 } 589 590 static void close_first_dso(void) 591 { 592 struct dso *dso; 593 594 dso = list_first_entry(&dso__data_open, struct dso, data.open_entry); 595 close_dso(dso); 596 } 597 598 static rlim_t get_fd_limit(void) 599 { 600 struct rlimit l; 601 rlim_t limit = 0; 602 603 /* Allow half of the current open fd limit. */ 604 if (getrlimit(RLIMIT_NOFILE, &l) == 0) { 605 if (l.rlim_cur == RLIM_INFINITY) 606 limit = l.rlim_cur; 607 else 608 limit = l.rlim_cur / 2; 609 } else { 610 pr_err("failed to get fd limit\n"); 611 limit = 1; 612 } 613 614 return limit; 615 } 616 617 static rlim_t fd_limit; 618 619 /* 620 * Used only by tests/dso-data.c to reset the environment 621 * for tests. I dont expect we should change this during 622 * standard runtime. 623 */ 624 void reset_fd_limit(void) 625 { 626 fd_limit = 0; 627 } 628 629 static bool may_cache_fd(void) 630 { 631 if (!fd_limit) 632 fd_limit = get_fd_limit(); 633 634 if (fd_limit == RLIM_INFINITY) 635 return true; 636 637 return fd_limit > (rlim_t) dso__data_open_cnt; 638 } 639 640 /* 641 * Check and close LRU dso if we crossed allowed limit 642 * for opened dso file descriptors. The limit is half 643 * of the RLIMIT_NOFILE files opened. 644 */ 645 static void check_data_close(void) 646 { 647 bool cache_fd = may_cache_fd(); 648 649 if (!cache_fd) 650 close_first_dso(); 651 } 652 653 /** 654 * dso__data_close - Close DSO data file 655 * @dso: dso object 656 * 657 * External interface to close @dso's data file descriptor. 658 */ 659 void dso__data_close(struct dso *dso) 660 { 661 pthread_mutex_lock(&dso__data_open_lock); 662 close_dso(dso); 663 pthread_mutex_unlock(&dso__data_open_lock); 664 } 665 666 static void try_to_open_dso(struct dso *dso, struct machine *machine) 667 { 668 enum dso_binary_type binary_type_data[] = { 669 DSO_BINARY_TYPE__BUILD_ID_CACHE, 670 DSO_BINARY_TYPE__SYSTEM_PATH_DSO, 671 DSO_BINARY_TYPE__NOT_FOUND, 672 }; 673 int i = 0; 674 675 if (dso->data.fd >= 0) 676 return; 677 678 if (dso->binary_type != DSO_BINARY_TYPE__NOT_FOUND) { 679 dso->data.fd = open_dso(dso, machine); 680 goto out; 681 } 682 683 do { 684 dso->binary_type = binary_type_data[i++]; 685 686 dso->data.fd = open_dso(dso, machine); 687 if (dso->data.fd >= 0) 688 goto out; 689 690 } while (dso->binary_type != DSO_BINARY_TYPE__NOT_FOUND); 691 out: 692 if (dso->data.fd >= 0) 693 dso->data.status = DSO_DATA_STATUS_OK; 694 else 695 dso->data.status = DSO_DATA_STATUS_ERROR; 696 } 697 698 /** 699 * dso__data_get_fd - Get dso's data file descriptor 700 * @dso: dso object 701 * @machine: machine object 702 * 703 * External interface to find dso's file, open it and 704 * returns file descriptor. It should be paired with 705 * dso__data_put_fd() if it returns non-negative value. 706 */ 707 int dso__data_get_fd(struct dso *dso, struct machine *machine) 708 { 709 if (dso->data.status == DSO_DATA_STATUS_ERROR) 710 return -1; 711 712 if (pthread_mutex_lock(&dso__data_open_lock) < 0) 713 return -1; 714 715 try_to_open_dso(dso, machine); 716 717 if (dso->data.fd < 0) 718 pthread_mutex_unlock(&dso__data_open_lock); 719 720 return dso->data.fd; 721 } 722 723 void dso__data_put_fd(struct dso *dso __maybe_unused) 724 { 725 pthread_mutex_unlock(&dso__data_open_lock); 726 } 727 728 bool dso__data_status_seen(struct dso *dso, enum dso_data_status_seen by) 729 { 730 u32 flag = 1 << by; 731 732 if (dso->data.status_seen & flag) 733 return true; 734 735 dso->data.status_seen |= flag; 736 737 return false; 738 } 739 740 #ifdef HAVE_LIBBPF_SUPPORT 741 static ssize_t bpf_read(struct dso *dso, u64 offset, char *data) 742 { 743 struct bpf_prog_info_node *node; 744 ssize_t size = DSO__DATA_CACHE_SIZE; 745 u64 len; 746 u8 *buf; 747 748 node = perf_env__find_bpf_prog_info(dso->bpf_prog.env, dso->bpf_prog.id); 749 if (!node || !node->info_linear) { 750 dso->data.status = DSO_DATA_STATUS_ERROR; 751 return -1; 752 } 753 754 len = node->info_linear->info.jited_prog_len; 755 buf = (u8 *)(uintptr_t)node->info_linear->info.jited_prog_insns; 756 757 if (offset >= len) 758 return -1; 759 760 size = (ssize_t)min(len - offset, (u64)size); 761 memcpy(data, buf + offset, size); 762 return size; 763 } 764 765 static int bpf_size(struct dso *dso) 766 { 767 struct bpf_prog_info_node *node; 768 769 node = perf_env__find_bpf_prog_info(dso->bpf_prog.env, dso->bpf_prog.id); 770 if (!node || !node->info_linear) { 771 dso->data.status = DSO_DATA_STATUS_ERROR; 772 return -1; 773 } 774 775 dso->data.file_size = node->info_linear->info.jited_prog_len; 776 return 0; 777 } 778 #endif // HAVE_LIBBPF_SUPPORT 779 780 static void 781 dso_cache__free(struct dso *dso) 782 { 783 struct rb_root *root = &dso->data.cache; 784 struct rb_node *next = rb_first(root); 785 786 pthread_mutex_lock(&dso->lock); 787 while (next) { 788 struct dso_cache *cache; 789 790 cache = rb_entry(next, struct dso_cache, rb_node); 791 next = rb_next(&cache->rb_node); 792 rb_erase(&cache->rb_node, root); 793 free(cache); 794 } 795 pthread_mutex_unlock(&dso->lock); 796 } 797 798 static struct dso_cache *__dso_cache__find(struct dso *dso, u64 offset) 799 { 800 const struct rb_root *root = &dso->data.cache; 801 struct rb_node * const *p = &root->rb_node; 802 const struct rb_node *parent = NULL; 803 struct dso_cache *cache; 804 805 while (*p != NULL) { 806 u64 end; 807 808 parent = *p; 809 cache = rb_entry(parent, struct dso_cache, rb_node); 810 end = cache->offset + DSO__DATA_CACHE_SIZE; 811 812 if (offset < cache->offset) 813 p = &(*p)->rb_left; 814 else if (offset >= end) 815 p = &(*p)->rb_right; 816 else 817 return cache; 818 } 819 820 return NULL; 821 } 822 823 static struct dso_cache * 824 dso_cache__insert(struct dso *dso, struct dso_cache *new) 825 { 826 struct rb_root *root = &dso->data.cache; 827 struct rb_node **p = &root->rb_node; 828 struct rb_node *parent = NULL; 829 struct dso_cache *cache; 830 u64 offset = new->offset; 831 832 pthread_mutex_lock(&dso->lock); 833 while (*p != NULL) { 834 u64 end; 835 836 parent = *p; 837 cache = rb_entry(parent, struct dso_cache, rb_node); 838 end = cache->offset + DSO__DATA_CACHE_SIZE; 839 840 if (offset < cache->offset) 841 p = &(*p)->rb_left; 842 else if (offset >= end) 843 p = &(*p)->rb_right; 844 else 845 goto out; 846 } 847 848 rb_link_node(&new->rb_node, parent, p); 849 rb_insert_color(&new->rb_node, root); 850 851 cache = NULL; 852 out: 853 pthread_mutex_unlock(&dso->lock); 854 return cache; 855 } 856 857 static ssize_t dso_cache__memcpy(struct dso_cache *cache, u64 offset, u8 *data, 858 u64 size, bool out) 859 { 860 u64 cache_offset = offset - cache->offset; 861 u64 cache_size = min(cache->size - cache_offset, size); 862 863 if (out) 864 memcpy(data, cache->data + cache_offset, cache_size); 865 else 866 memcpy(cache->data + cache_offset, data, cache_size); 867 return cache_size; 868 } 869 870 static ssize_t file_read(struct dso *dso, struct machine *machine, 871 u64 offset, char *data) 872 { 873 ssize_t ret; 874 875 pthread_mutex_lock(&dso__data_open_lock); 876 877 /* 878 * dso->data.fd might be closed if other thread opened another 879 * file (dso) due to open file limit (RLIMIT_NOFILE). 880 */ 881 try_to_open_dso(dso, machine); 882 883 if (dso->data.fd < 0) { 884 dso->data.status = DSO_DATA_STATUS_ERROR; 885 ret = -errno; 886 goto out; 887 } 888 889 ret = pread(dso->data.fd, data, DSO__DATA_CACHE_SIZE, offset); 890 out: 891 pthread_mutex_unlock(&dso__data_open_lock); 892 return ret; 893 } 894 895 static struct dso_cache *dso_cache__populate(struct dso *dso, 896 struct machine *machine, 897 u64 offset, ssize_t *ret) 898 { 899 u64 cache_offset = offset & DSO__DATA_CACHE_MASK; 900 struct dso_cache *cache; 901 struct dso_cache *old; 902 903 cache = zalloc(sizeof(*cache) + DSO__DATA_CACHE_SIZE); 904 if (!cache) { 905 *ret = -ENOMEM; 906 return NULL; 907 } 908 #ifdef HAVE_LIBBPF_SUPPORT 909 if (dso->binary_type == DSO_BINARY_TYPE__BPF_PROG_INFO) 910 *ret = bpf_read(dso, cache_offset, cache->data); 911 else 912 #endif 913 if (dso->binary_type == DSO_BINARY_TYPE__OOL) 914 *ret = DSO__DATA_CACHE_SIZE; 915 else 916 *ret = file_read(dso, machine, cache_offset, cache->data); 917 918 if (*ret <= 0) { 919 free(cache); 920 return NULL; 921 } 922 923 cache->offset = cache_offset; 924 cache->size = *ret; 925 926 old = dso_cache__insert(dso, cache); 927 if (old) { 928 /* we lose the race */ 929 free(cache); 930 cache = old; 931 } 932 933 return cache; 934 } 935 936 static struct dso_cache *dso_cache__find(struct dso *dso, 937 struct machine *machine, 938 u64 offset, 939 ssize_t *ret) 940 { 941 struct dso_cache *cache = __dso_cache__find(dso, offset); 942 943 return cache ? cache : dso_cache__populate(dso, machine, offset, ret); 944 } 945 946 static ssize_t dso_cache_io(struct dso *dso, struct machine *machine, 947 u64 offset, u8 *data, ssize_t size, bool out) 948 { 949 struct dso_cache *cache; 950 ssize_t ret = 0; 951 952 cache = dso_cache__find(dso, machine, offset, &ret); 953 if (!cache) 954 return ret; 955 956 return dso_cache__memcpy(cache, offset, data, size, out); 957 } 958 959 /* 960 * Reads and caches dso data DSO__DATA_CACHE_SIZE size chunks 961 * in the rb_tree. Any read to already cached data is served 962 * by cached data. Writes update the cache only, not the backing file. 963 */ 964 static ssize_t cached_io(struct dso *dso, struct machine *machine, 965 u64 offset, u8 *data, ssize_t size, bool out) 966 { 967 ssize_t r = 0; 968 u8 *p = data; 969 970 do { 971 ssize_t ret; 972 973 ret = dso_cache_io(dso, machine, offset, p, size, out); 974 if (ret < 0) 975 return ret; 976 977 /* Reached EOF, return what we have. */ 978 if (!ret) 979 break; 980 981 BUG_ON(ret > size); 982 983 r += ret; 984 p += ret; 985 offset += ret; 986 size -= ret; 987 988 } while (size); 989 990 return r; 991 } 992 993 static int file_size(struct dso *dso, struct machine *machine) 994 { 995 int ret = 0; 996 struct stat st; 997 char sbuf[STRERR_BUFSIZE]; 998 999 pthread_mutex_lock(&dso__data_open_lock); 1000 1001 /* 1002 * dso->data.fd might be closed if other thread opened another 1003 * file (dso) due to open file limit (RLIMIT_NOFILE). 1004 */ 1005 try_to_open_dso(dso, machine); 1006 1007 if (dso->data.fd < 0) { 1008 ret = -errno; 1009 dso->data.status = DSO_DATA_STATUS_ERROR; 1010 goto out; 1011 } 1012 1013 if (fstat(dso->data.fd, &st) < 0) { 1014 ret = -errno; 1015 pr_err("dso cache fstat failed: %s\n", 1016 str_error_r(errno, sbuf, sizeof(sbuf))); 1017 dso->data.status = DSO_DATA_STATUS_ERROR; 1018 goto out; 1019 } 1020 dso->data.file_size = st.st_size; 1021 1022 out: 1023 pthread_mutex_unlock(&dso__data_open_lock); 1024 return ret; 1025 } 1026 1027 int dso__data_file_size(struct dso *dso, struct machine *machine) 1028 { 1029 if (dso->data.file_size) 1030 return 0; 1031 1032 if (dso->data.status == DSO_DATA_STATUS_ERROR) 1033 return -1; 1034 #ifdef HAVE_LIBBPF_SUPPORT 1035 if (dso->binary_type == DSO_BINARY_TYPE__BPF_PROG_INFO) 1036 return bpf_size(dso); 1037 #endif 1038 return file_size(dso, machine); 1039 } 1040 1041 /** 1042 * dso__data_size - Return dso data size 1043 * @dso: dso object 1044 * @machine: machine object 1045 * 1046 * Return: dso data size 1047 */ 1048 off_t dso__data_size(struct dso *dso, struct machine *machine) 1049 { 1050 if (dso__data_file_size(dso, machine)) 1051 return -1; 1052 1053 /* For now just estimate dso data size is close to file size */ 1054 return dso->data.file_size; 1055 } 1056 1057 static ssize_t data_read_write_offset(struct dso *dso, struct machine *machine, 1058 u64 offset, u8 *data, ssize_t size, 1059 bool out) 1060 { 1061 if (dso__data_file_size(dso, machine)) 1062 return -1; 1063 1064 /* Check the offset sanity. */ 1065 if (offset > dso->data.file_size) 1066 return -1; 1067 1068 if (offset + size < offset) 1069 return -1; 1070 1071 return cached_io(dso, machine, offset, data, size, out); 1072 } 1073 1074 /** 1075 * dso__data_read_offset - Read data from dso file offset 1076 * @dso: dso object 1077 * @machine: machine object 1078 * @offset: file offset 1079 * @data: buffer to store data 1080 * @size: size of the @data buffer 1081 * 1082 * External interface to read data from dso file offset. Open 1083 * dso data file and use cached_read to get the data. 1084 */ 1085 ssize_t dso__data_read_offset(struct dso *dso, struct machine *machine, 1086 u64 offset, u8 *data, ssize_t size) 1087 { 1088 if (dso->data.status == DSO_DATA_STATUS_ERROR) 1089 return -1; 1090 1091 return data_read_write_offset(dso, machine, offset, data, size, true); 1092 } 1093 1094 /** 1095 * dso__data_read_addr - Read data from dso address 1096 * @dso: dso object 1097 * @machine: machine object 1098 * @add: virtual memory address 1099 * @data: buffer to store data 1100 * @size: size of the @data buffer 1101 * 1102 * External interface to read data from dso address. 1103 */ 1104 ssize_t dso__data_read_addr(struct dso *dso, struct map *map, 1105 struct machine *machine, u64 addr, 1106 u8 *data, ssize_t size) 1107 { 1108 u64 offset = map->map_ip(map, addr); 1109 return dso__data_read_offset(dso, machine, offset, data, size); 1110 } 1111 1112 /** 1113 * dso__data_write_cache_offs - Write data to dso data cache at file offset 1114 * @dso: dso object 1115 * @machine: machine object 1116 * @offset: file offset 1117 * @data: buffer to write 1118 * @size: size of the @data buffer 1119 * 1120 * Write into the dso file data cache, but do not change the file itself. 1121 */ 1122 ssize_t dso__data_write_cache_offs(struct dso *dso, struct machine *machine, 1123 u64 offset, const u8 *data_in, ssize_t size) 1124 { 1125 u8 *data = (u8 *)data_in; /* cast away const to use same fns for r/w */ 1126 1127 if (dso->data.status == DSO_DATA_STATUS_ERROR) 1128 return -1; 1129 1130 return data_read_write_offset(dso, machine, offset, data, size, false); 1131 } 1132 1133 /** 1134 * dso__data_write_cache_addr - Write data to dso data cache at dso address 1135 * @dso: dso object 1136 * @machine: machine object 1137 * @add: virtual memory address 1138 * @data: buffer to write 1139 * @size: size of the @data buffer 1140 * 1141 * External interface to write into the dso file data cache, but do not change 1142 * the file itself. 1143 */ 1144 ssize_t dso__data_write_cache_addr(struct dso *dso, struct map *map, 1145 struct machine *machine, u64 addr, 1146 const u8 *data, ssize_t size) 1147 { 1148 u64 offset = map->map_ip(map, addr); 1149 return dso__data_write_cache_offs(dso, machine, offset, data, size); 1150 } 1151 1152 struct map *dso__new_map(const char *name) 1153 { 1154 struct map *map = NULL; 1155 struct dso *dso = dso__new(name); 1156 1157 if (dso) { 1158 map = map__new2(0, dso); 1159 dso__put(dso); 1160 } 1161 1162 return map; 1163 } 1164 1165 struct dso *machine__findnew_kernel(struct machine *machine, const char *name, 1166 const char *short_name, int dso_type) 1167 { 1168 /* 1169 * The kernel dso could be created by build_id processing. 1170 */ 1171 struct dso *dso = machine__findnew_dso(machine, name); 1172 1173 /* 1174 * We need to run this in all cases, since during the build_id 1175 * processing we had no idea this was the kernel dso. 1176 */ 1177 if (dso != NULL) { 1178 dso__set_short_name(dso, short_name, false); 1179 dso->kernel = dso_type; 1180 } 1181 1182 return dso; 1183 } 1184 1185 static void dso__set_long_name_id(struct dso *dso, const char *name, struct dso_id *id, bool name_allocated) 1186 { 1187 struct rb_root *root = dso->root; 1188 1189 if (name == NULL) 1190 return; 1191 1192 if (dso->long_name_allocated) 1193 free((char *)dso->long_name); 1194 1195 if (root) { 1196 rb_erase(&dso->rb_node, root); 1197 /* 1198 * __dsos__findnew_link_by_longname_id() isn't guaranteed to 1199 * add it back, so a clean removal is required here. 1200 */ 1201 RB_CLEAR_NODE(&dso->rb_node); 1202 dso->root = NULL; 1203 } 1204 1205 dso->long_name = name; 1206 dso->long_name_len = strlen(name); 1207 dso->long_name_allocated = name_allocated; 1208 1209 if (root) 1210 __dsos__findnew_link_by_longname_id(root, dso, NULL, id); 1211 } 1212 1213 void dso__set_long_name(struct dso *dso, const char *name, bool name_allocated) 1214 { 1215 dso__set_long_name_id(dso, name, NULL, name_allocated); 1216 } 1217 1218 void dso__set_short_name(struct dso *dso, const char *name, bool name_allocated) 1219 { 1220 if (name == NULL) 1221 return; 1222 1223 if (dso->short_name_allocated) 1224 free((char *)dso->short_name); 1225 1226 dso->short_name = name; 1227 dso->short_name_len = strlen(name); 1228 dso->short_name_allocated = name_allocated; 1229 } 1230 1231 int dso__name_len(const struct dso *dso) 1232 { 1233 if (!dso) 1234 return strlen("[unknown]"); 1235 if (verbose > 0) 1236 return dso->long_name_len; 1237 1238 return dso->short_name_len; 1239 } 1240 1241 bool dso__loaded(const struct dso *dso) 1242 { 1243 return dso->loaded; 1244 } 1245 1246 bool dso__sorted_by_name(const struct dso *dso) 1247 { 1248 return dso->sorted_by_name; 1249 } 1250 1251 void dso__set_sorted_by_name(struct dso *dso) 1252 { 1253 dso->sorted_by_name = true; 1254 } 1255 1256 struct dso *dso__new_id(const char *name, struct dso_id *id) 1257 { 1258 struct dso *dso = calloc(1, sizeof(*dso) + strlen(name) + 1); 1259 1260 if (dso != NULL) { 1261 strcpy(dso->name, name); 1262 if (id) 1263 dso->id = *id; 1264 dso__set_long_name_id(dso, dso->name, id, false); 1265 dso__set_short_name(dso, dso->name, false); 1266 dso->symbols = dso->symbol_names = RB_ROOT_CACHED; 1267 dso->data.cache = RB_ROOT; 1268 dso->inlined_nodes = RB_ROOT_CACHED; 1269 dso->srclines = RB_ROOT_CACHED; 1270 dso->data.fd = -1; 1271 dso->data.status = DSO_DATA_STATUS_UNKNOWN; 1272 dso->symtab_type = DSO_BINARY_TYPE__NOT_FOUND; 1273 dso->binary_type = DSO_BINARY_TYPE__NOT_FOUND; 1274 dso->is_64_bit = (sizeof(void *) == 8); 1275 dso->loaded = 0; 1276 dso->rel = 0; 1277 dso->sorted_by_name = 0; 1278 dso->has_build_id = 0; 1279 dso->has_srcline = 1; 1280 dso->a2l_fails = 1; 1281 dso->kernel = DSO_SPACE__USER; 1282 dso->needs_swap = DSO_SWAP__UNSET; 1283 dso->comp = COMP_ID__NONE; 1284 RB_CLEAR_NODE(&dso->rb_node); 1285 dso->root = NULL; 1286 INIT_LIST_HEAD(&dso->node); 1287 INIT_LIST_HEAD(&dso->data.open_entry); 1288 pthread_mutex_init(&dso->lock, NULL); 1289 refcount_set(&dso->refcnt, 1); 1290 } 1291 1292 return dso; 1293 } 1294 1295 struct dso *dso__new(const char *name) 1296 { 1297 return dso__new_id(name, NULL); 1298 } 1299 1300 void dso__delete(struct dso *dso) 1301 { 1302 if (!RB_EMPTY_NODE(&dso->rb_node)) 1303 pr_err("DSO %s is still in rbtree when being deleted!\n", 1304 dso->long_name); 1305 1306 /* free inlines first, as they reference symbols */ 1307 inlines__tree_delete(&dso->inlined_nodes); 1308 srcline__tree_delete(&dso->srclines); 1309 symbols__delete(&dso->symbols); 1310 1311 if (dso->short_name_allocated) { 1312 zfree((char **)&dso->short_name); 1313 dso->short_name_allocated = false; 1314 } 1315 1316 if (dso->long_name_allocated) { 1317 zfree((char **)&dso->long_name); 1318 dso->long_name_allocated = false; 1319 } 1320 1321 dso__data_close(dso); 1322 auxtrace_cache__free(dso->auxtrace_cache); 1323 dso_cache__free(dso); 1324 dso__free_a2l(dso); 1325 zfree(&dso->symsrc_filename); 1326 nsinfo__zput(dso->nsinfo); 1327 pthread_mutex_destroy(&dso->lock); 1328 free(dso); 1329 } 1330 1331 struct dso *dso__get(struct dso *dso) 1332 { 1333 if (dso) 1334 refcount_inc(&dso->refcnt); 1335 return dso; 1336 } 1337 1338 void dso__put(struct dso *dso) 1339 { 1340 if (dso && refcount_dec_and_test(&dso->refcnt)) 1341 dso__delete(dso); 1342 } 1343 1344 void dso__set_build_id(struct dso *dso, struct build_id *bid) 1345 { 1346 dso->bid = *bid; 1347 dso->has_build_id = 1; 1348 } 1349 1350 bool dso__build_id_equal(const struct dso *dso, struct build_id *bid) 1351 { 1352 if (dso->bid.size > bid->size && dso->bid.size == BUILD_ID_SIZE) { 1353 /* 1354 * For the backward compatibility, it allows a build-id has 1355 * trailing zeros. 1356 */ 1357 return !memcmp(dso->bid.data, bid->data, bid->size) && 1358 !memchr_inv(&dso->bid.data[bid->size], 0, 1359 dso->bid.size - bid->size); 1360 } 1361 1362 return dso->bid.size == bid->size && 1363 memcmp(dso->bid.data, bid->data, dso->bid.size) == 0; 1364 } 1365 1366 void dso__read_running_kernel_build_id(struct dso *dso, struct machine *machine) 1367 { 1368 char path[PATH_MAX]; 1369 1370 if (machine__is_default_guest(machine)) 1371 return; 1372 sprintf(path, "%s/sys/kernel/notes", machine->root_dir); 1373 if (sysfs__read_build_id(path, &dso->bid) == 0) 1374 dso->has_build_id = true; 1375 } 1376 1377 int dso__kernel_module_get_build_id(struct dso *dso, 1378 const char *root_dir) 1379 { 1380 char filename[PATH_MAX]; 1381 /* 1382 * kernel module short names are of the form "[module]" and 1383 * we need just "module" here. 1384 */ 1385 const char *name = dso->short_name + 1; 1386 1387 snprintf(filename, sizeof(filename), 1388 "%s/sys/module/%.*s/notes/.note.gnu.build-id", 1389 root_dir, (int)strlen(name) - 1, name); 1390 1391 if (sysfs__read_build_id(filename, &dso->bid) == 0) 1392 dso->has_build_id = true; 1393 1394 return 0; 1395 } 1396 1397 static size_t dso__fprintf_buildid(struct dso *dso, FILE *fp) 1398 { 1399 char sbuild_id[SBUILD_ID_SIZE]; 1400 1401 build_id__sprintf(&dso->bid, sbuild_id); 1402 return fprintf(fp, "%s", sbuild_id); 1403 } 1404 1405 size_t dso__fprintf(struct dso *dso, FILE *fp) 1406 { 1407 struct rb_node *nd; 1408 size_t ret = fprintf(fp, "dso: %s (", dso->short_name); 1409 1410 if (dso->short_name != dso->long_name) 1411 ret += fprintf(fp, "%s, ", dso->long_name); 1412 ret += fprintf(fp, "%sloaded, ", dso__loaded(dso) ? "" : "NOT "); 1413 ret += dso__fprintf_buildid(dso, fp); 1414 ret += fprintf(fp, ")\n"); 1415 for (nd = rb_first_cached(&dso->symbols); nd; nd = rb_next(nd)) { 1416 struct symbol *pos = rb_entry(nd, struct symbol, rb_node); 1417 ret += symbol__fprintf(pos, fp); 1418 } 1419 1420 return ret; 1421 } 1422 1423 enum dso_type dso__type(struct dso *dso, struct machine *machine) 1424 { 1425 int fd; 1426 enum dso_type type = DSO__TYPE_UNKNOWN; 1427 1428 fd = dso__data_get_fd(dso, machine); 1429 if (fd >= 0) { 1430 type = dso__type_fd(fd); 1431 dso__data_put_fd(dso); 1432 } 1433 1434 return type; 1435 } 1436 1437 int dso__strerror_load(struct dso *dso, char *buf, size_t buflen) 1438 { 1439 int idx, errnum = dso->load_errno; 1440 /* 1441 * This must have a same ordering as the enum dso_load_errno. 1442 */ 1443 static const char *dso_load__error_str[] = { 1444 "Internal tools/perf/ library error", 1445 "Invalid ELF file", 1446 "Can not read build id", 1447 "Mismatching build id", 1448 "Decompression failure", 1449 }; 1450 1451 BUG_ON(buflen == 0); 1452 1453 if (errnum >= 0) { 1454 const char *err = str_error_r(errnum, buf, buflen); 1455 1456 if (err != buf) 1457 scnprintf(buf, buflen, "%s", err); 1458 1459 return 0; 1460 } 1461 1462 if (errnum < __DSO_LOAD_ERRNO__START || errnum >= __DSO_LOAD_ERRNO__END) 1463 return -1; 1464 1465 idx = errnum - __DSO_LOAD_ERRNO__START; 1466 scnprintf(buf, buflen, "%s", dso_load__error_str[idx]); 1467 return 0; 1468 } 1469