1 // SPDX-License-Identifier: GPL-2.0 2 #include <asm/bug.h> 3 #include <linux/kernel.h> 4 #include <linux/string.h> 5 #include <linux/zalloc.h> 6 #include <sys/time.h> 7 #include <sys/resource.h> 8 #include <sys/types.h> 9 #include <sys/stat.h> 10 #include <unistd.h> 11 #include <errno.h> 12 #include <fcntl.h> 13 #include <stdlib.h> 14 #ifdef HAVE_LIBBPF_SUPPORT 15 #include <bpf/libbpf.h> 16 #include "bpf-event.h" 17 #include "bpf-utils.h" 18 #endif 19 #include "compress.h" 20 #include "env.h" 21 #include "namespaces.h" 22 #include "path.h" 23 #include "map.h" 24 #include "symbol.h" 25 #include "srcline.h" 26 #include "dso.h" 27 #include "dsos.h" 28 #include "machine.h" 29 #include "auxtrace.h" 30 #include "util.h" /* O_CLOEXEC for older systems */ 31 #include "debug.h" 32 #include "string2.h" 33 #include "vdso.h" 34 35 static const char * const debuglink_paths[] = { 36 "%.0s%s", 37 "%s/%s", 38 "%s/.debug/%s", 39 "/usr/lib/debug%s/%s" 40 }; 41 42 char dso__symtab_origin(const struct dso *dso) 43 { 44 static const char origin[] = { 45 [DSO_BINARY_TYPE__KALLSYMS] = 'k', 46 [DSO_BINARY_TYPE__VMLINUX] = 'v', 47 [DSO_BINARY_TYPE__JAVA_JIT] = 'j', 48 [DSO_BINARY_TYPE__DEBUGLINK] = 'l', 49 [DSO_BINARY_TYPE__BUILD_ID_CACHE] = 'B', 50 [DSO_BINARY_TYPE__BUILD_ID_CACHE_DEBUGINFO] = 'D', 51 [DSO_BINARY_TYPE__FEDORA_DEBUGINFO] = 'f', 52 [DSO_BINARY_TYPE__UBUNTU_DEBUGINFO] = 'u', 53 [DSO_BINARY_TYPE__MIXEDUP_UBUNTU_DEBUGINFO] = 'x', 54 [DSO_BINARY_TYPE__OPENEMBEDDED_DEBUGINFO] = 'o', 55 [DSO_BINARY_TYPE__BUILDID_DEBUGINFO] = 'b', 56 [DSO_BINARY_TYPE__SYSTEM_PATH_DSO] = 'd', 57 [DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE] = 'K', 58 [DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE_COMP] = 'm', 59 [DSO_BINARY_TYPE__GUEST_KALLSYMS] = 'g', 60 [DSO_BINARY_TYPE__GUEST_KMODULE] = 'G', 61 [DSO_BINARY_TYPE__GUEST_KMODULE_COMP] = 'M', 62 [DSO_BINARY_TYPE__GUEST_VMLINUX] = 'V', 63 }; 64 65 if (dso == NULL || dso->symtab_type == DSO_BINARY_TYPE__NOT_FOUND) 66 return '!'; 67 return origin[dso->symtab_type]; 68 } 69 70 int dso__read_binary_type_filename(const struct dso *dso, 71 enum dso_binary_type type, 72 char *root_dir, char *filename, size_t size) 73 { 74 char build_id_hex[SBUILD_ID_SIZE]; 75 int ret = 0; 76 size_t len; 77 78 switch (type) { 79 case DSO_BINARY_TYPE__DEBUGLINK: 80 { 81 const char *last_slash; 82 char dso_dir[PATH_MAX]; 83 char symfile[PATH_MAX]; 84 unsigned int i; 85 86 len = __symbol__join_symfs(filename, size, dso->long_name); 87 last_slash = filename + len; 88 while (last_slash != filename && *last_slash != '/') 89 last_slash--; 90 91 strncpy(dso_dir, filename, last_slash - filename); 92 dso_dir[last_slash-filename] = '\0'; 93 94 if (!is_regular_file(filename)) { 95 ret = -1; 96 break; 97 } 98 99 ret = filename__read_debuglink(filename, symfile, PATH_MAX); 100 if (ret) 101 break; 102 103 /* Check predefined locations where debug file might reside */ 104 ret = -1; 105 for (i = 0; i < ARRAY_SIZE(debuglink_paths); i++) { 106 snprintf(filename, size, 107 debuglink_paths[i], dso_dir, symfile); 108 if (is_regular_file(filename)) { 109 ret = 0; 110 break; 111 } 112 } 113 114 break; 115 } 116 case DSO_BINARY_TYPE__BUILD_ID_CACHE: 117 if (dso__build_id_filename(dso, filename, size, false) == NULL) 118 ret = -1; 119 break; 120 121 case DSO_BINARY_TYPE__BUILD_ID_CACHE_DEBUGINFO: 122 if (dso__build_id_filename(dso, filename, size, true) == NULL) 123 ret = -1; 124 break; 125 126 case DSO_BINARY_TYPE__FEDORA_DEBUGINFO: 127 len = __symbol__join_symfs(filename, size, "/usr/lib/debug"); 128 snprintf(filename + len, size - len, "%s.debug", dso->long_name); 129 break; 130 131 case DSO_BINARY_TYPE__UBUNTU_DEBUGINFO: 132 len = __symbol__join_symfs(filename, size, "/usr/lib/debug"); 133 snprintf(filename + len, size - len, "%s", dso->long_name); 134 break; 135 136 case DSO_BINARY_TYPE__MIXEDUP_UBUNTU_DEBUGINFO: 137 /* 138 * Ubuntu can mixup /usr/lib with /lib, putting debuginfo in 139 * /usr/lib/debug/lib when it is expected to be in 140 * /usr/lib/debug/usr/lib 141 */ 142 if (strlen(dso->long_name) < 9 || 143 strncmp(dso->long_name, "/usr/lib/", 9)) { 144 ret = -1; 145 break; 146 } 147 len = __symbol__join_symfs(filename, size, "/usr/lib/debug"); 148 snprintf(filename + len, size - len, "%s", dso->long_name + 4); 149 break; 150 151 case DSO_BINARY_TYPE__OPENEMBEDDED_DEBUGINFO: 152 { 153 const char *last_slash; 154 size_t dir_size; 155 156 last_slash = dso->long_name + dso->long_name_len; 157 while (last_slash != dso->long_name && *last_slash != '/') 158 last_slash--; 159 160 len = __symbol__join_symfs(filename, size, ""); 161 dir_size = last_slash - dso->long_name + 2; 162 if (dir_size > (size - len)) { 163 ret = -1; 164 break; 165 } 166 len += scnprintf(filename + len, dir_size, "%s", dso->long_name); 167 len += scnprintf(filename + len , size - len, ".debug%s", 168 last_slash); 169 break; 170 } 171 172 case DSO_BINARY_TYPE__BUILDID_DEBUGINFO: 173 if (!dso->has_build_id) { 174 ret = -1; 175 break; 176 } 177 178 build_id__sprintf(&dso->bid, build_id_hex); 179 len = __symbol__join_symfs(filename, size, "/usr/lib/debug/.build-id/"); 180 snprintf(filename + len, size - len, "%.2s/%s.debug", 181 build_id_hex, build_id_hex + 2); 182 break; 183 184 case DSO_BINARY_TYPE__VMLINUX: 185 case DSO_BINARY_TYPE__GUEST_VMLINUX: 186 case DSO_BINARY_TYPE__SYSTEM_PATH_DSO: 187 __symbol__join_symfs(filename, size, dso->long_name); 188 break; 189 190 case DSO_BINARY_TYPE__GUEST_KMODULE: 191 case DSO_BINARY_TYPE__GUEST_KMODULE_COMP: 192 path__join3(filename, size, symbol_conf.symfs, 193 root_dir, dso->long_name); 194 break; 195 196 case DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE: 197 case DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE_COMP: 198 __symbol__join_symfs(filename, size, dso->long_name); 199 break; 200 201 case DSO_BINARY_TYPE__KCORE: 202 case DSO_BINARY_TYPE__GUEST_KCORE: 203 snprintf(filename, size, "%s", dso->long_name); 204 break; 205 206 default: 207 case DSO_BINARY_TYPE__KALLSYMS: 208 case DSO_BINARY_TYPE__GUEST_KALLSYMS: 209 case DSO_BINARY_TYPE__JAVA_JIT: 210 case DSO_BINARY_TYPE__BPF_PROG_INFO: 211 case DSO_BINARY_TYPE__BPF_IMAGE: 212 case DSO_BINARY_TYPE__OOL: 213 case DSO_BINARY_TYPE__NOT_FOUND: 214 ret = -1; 215 break; 216 } 217 218 return ret; 219 } 220 221 enum { 222 COMP_ID__NONE = 0, 223 }; 224 225 static const struct { 226 const char *fmt; 227 int (*decompress)(const char *input, int output); 228 bool (*is_compressed)(const char *input); 229 } compressions[] = { 230 [COMP_ID__NONE] = { .fmt = NULL, }, 231 #ifdef HAVE_ZLIB_SUPPORT 232 { "gz", gzip_decompress_to_file, gzip_is_compressed }, 233 #endif 234 #ifdef HAVE_LZMA_SUPPORT 235 { "xz", lzma_decompress_to_file, lzma_is_compressed }, 236 #endif 237 { NULL, NULL, NULL }, 238 }; 239 240 static int is_supported_compression(const char *ext) 241 { 242 unsigned i; 243 244 for (i = 1; compressions[i].fmt; i++) { 245 if (!strcmp(ext, compressions[i].fmt)) 246 return i; 247 } 248 return COMP_ID__NONE; 249 } 250 251 bool is_kernel_module(const char *pathname, int cpumode) 252 { 253 struct kmod_path m; 254 int mode = cpumode & PERF_RECORD_MISC_CPUMODE_MASK; 255 256 WARN_ONCE(mode != cpumode, 257 "Internal error: passing unmasked cpumode (%x) to is_kernel_module", 258 cpumode); 259 260 switch (mode) { 261 case PERF_RECORD_MISC_USER: 262 case PERF_RECORD_MISC_HYPERVISOR: 263 case PERF_RECORD_MISC_GUEST_USER: 264 return false; 265 /* Treat PERF_RECORD_MISC_CPUMODE_UNKNOWN as kernel */ 266 default: 267 if (kmod_path__parse(&m, pathname)) { 268 pr_err("Failed to check whether %s is a kernel module or not. Assume it is.", 269 pathname); 270 return true; 271 } 272 } 273 274 return m.kmod; 275 } 276 277 bool dso__needs_decompress(struct dso *dso) 278 { 279 return dso->symtab_type == DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE_COMP || 280 dso->symtab_type == DSO_BINARY_TYPE__GUEST_KMODULE_COMP; 281 } 282 283 int filename__decompress(const char *name, char *pathname, 284 size_t len, int comp, int *err) 285 { 286 char tmpbuf[] = KMOD_DECOMP_NAME; 287 int fd = -1; 288 289 /* 290 * We have proper compression id for DSO and yet the file 291 * behind the 'name' can still be plain uncompressed object. 292 * 293 * The reason is behind the logic we open the DSO object files, 294 * when we try all possible 'debug' objects until we find the 295 * data. So even if the DSO is represented by 'krava.xz' module, 296 * we can end up here opening ~/.debug/....23432432/debug' file 297 * which is not compressed. 298 * 299 * To keep this transparent, we detect this and return the file 300 * descriptor to the uncompressed file. 301 */ 302 if (!compressions[comp].is_compressed(name)) 303 return open(name, O_RDONLY); 304 305 fd = mkstemp(tmpbuf); 306 if (fd < 0) { 307 *err = errno; 308 return -1; 309 } 310 311 if (compressions[comp].decompress(name, fd)) { 312 *err = DSO_LOAD_ERRNO__DECOMPRESSION_FAILURE; 313 close(fd); 314 fd = -1; 315 } 316 317 if (!pathname || (fd < 0)) 318 unlink(tmpbuf); 319 320 if (pathname && (fd >= 0)) 321 strlcpy(pathname, tmpbuf, len); 322 323 return fd; 324 } 325 326 static int decompress_kmodule(struct dso *dso, const char *name, 327 char *pathname, size_t len) 328 { 329 if (!dso__needs_decompress(dso)) 330 return -1; 331 332 if (dso->comp == COMP_ID__NONE) 333 return -1; 334 335 return filename__decompress(name, pathname, len, dso->comp, 336 &dso->load_errno); 337 } 338 339 int dso__decompress_kmodule_fd(struct dso *dso, const char *name) 340 { 341 return decompress_kmodule(dso, name, NULL, 0); 342 } 343 344 int dso__decompress_kmodule_path(struct dso *dso, const char *name, 345 char *pathname, size_t len) 346 { 347 int fd = decompress_kmodule(dso, name, pathname, len); 348 349 close(fd); 350 return fd >= 0 ? 0 : -1; 351 } 352 353 /* 354 * Parses kernel module specified in @path and updates 355 * @m argument like: 356 * 357 * @comp - true if @path contains supported compression suffix, 358 * false otherwise 359 * @kmod - true if @path contains '.ko' suffix in right position, 360 * false otherwise 361 * @name - if (@alloc_name && @kmod) is true, it contains strdup-ed base name 362 * of the kernel module without suffixes, otherwise strudup-ed 363 * base name of @path 364 * @ext - if (@alloc_ext && @comp) is true, it contains strdup-ed string 365 * the compression suffix 366 * 367 * Returns 0 if there's no strdup error, -ENOMEM otherwise. 368 */ 369 int __kmod_path__parse(struct kmod_path *m, const char *path, 370 bool alloc_name) 371 { 372 const char *name = strrchr(path, '/'); 373 const char *ext = strrchr(path, '.'); 374 bool is_simple_name = false; 375 376 memset(m, 0x0, sizeof(*m)); 377 name = name ? name + 1 : path; 378 379 /* 380 * '.' is also a valid character for module name. For example: 381 * [aaa.bbb] is a valid module name. '[' should have higher 382 * priority than '.ko' suffix. 383 * 384 * The kernel names are from machine__mmap_name. Such 385 * name should belong to kernel itself, not kernel module. 386 */ 387 if (name[0] == '[') { 388 is_simple_name = true; 389 if ((strncmp(name, "[kernel.kallsyms]", 17) == 0) || 390 (strncmp(name, "[guest.kernel.kallsyms", 22) == 0) || 391 (strncmp(name, "[vdso]", 6) == 0) || 392 (strncmp(name, "[vdso32]", 8) == 0) || 393 (strncmp(name, "[vdsox32]", 9) == 0) || 394 (strncmp(name, "[vsyscall]", 10) == 0)) { 395 m->kmod = false; 396 397 } else 398 m->kmod = true; 399 } 400 401 /* No extension, just return name. */ 402 if ((ext == NULL) || is_simple_name) { 403 if (alloc_name) { 404 m->name = strdup(name); 405 return m->name ? 0 : -ENOMEM; 406 } 407 return 0; 408 } 409 410 m->comp = is_supported_compression(ext + 1); 411 if (m->comp > COMP_ID__NONE) 412 ext -= 3; 413 414 /* Check .ko extension only if there's enough name left. */ 415 if (ext > name) 416 m->kmod = !strncmp(ext, ".ko", 3); 417 418 if (alloc_name) { 419 if (m->kmod) { 420 if (asprintf(&m->name, "[%.*s]", (int) (ext - name), name) == -1) 421 return -ENOMEM; 422 } else { 423 if (asprintf(&m->name, "%s", name) == -1) 424 return -ENOMEM; 425 } 426 427 strreplace(m->name, '-', '_'); 428 } 429 430 return 0; 431 } 432 433 void dso__set_module_info(struct dso *dso, struct kmod_path *m, 434 struct machine *machine) 435 { 436 if (machine__is_host(machine)) 437 dso->symtab_type = DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE; 438 else 439 dso->symtab_type = DSO_BINARY_TYPE__GUEST_KMODULE; 440 441 /* _KMODULE_COMP should be next to _KMODULE */ 442 if (m->kmod && m->comp) { 443 dso->symtab_type++; 444 dso->comp = m->comp; 445 } 446 447 dso__set_short_name(dso, strdup(m->name), true); 448 } 449 450 /* 451 * Global list of open DSOs and the counter. 452 */ 453 static LIST_HEAD(dso__data_open); 454 static long dso__data_open_cnt; 455 static pthread_mutex_t dso__data_open_lock = PTHREAD_MUTEX_INITIALIZER; 456 457 static void dso__list_add(struct dso *dso) 458 { 459 list_add_tail(&dso->data.open_entry, &dso__data_open); 460 dso__data_open_cnt++; 461 } 462 463 static void dso__list_del(struct dso *dso) 464 { 465 list_del_init(&dso->data.open_entry); 466 WARN_ONCE(dso__data_open_cnt <= 0, 467 "DSO data fd counter out of bounds."); 468 dso__data_open_cnt--; 469 } 470 471 static void close_first_dso(void); 472 473 static int do_open(char *name) 474 { 475 int fd; 476 char sbuf[STRERR_BUFSIZE]; 477 478 do { 479 fd = open(name, O_RDONLY|O_CLOEXEC); 480 if (fd >= 0) 481 return fd; 482 483 pr_debug("dso open failed: %s\n", 484 str_error_r(errno, sbuf, sizeof(sbuf))); 485 if (!dso__data_open_cnt || errno != EMFILE) 486 break; 487 488 close_first_dso(); 489 } while (1); 490 491 return -1; 492 } 493 494 static int __open_dso(struct dso *dso, struct machine *machine) 495 { 496 int fd = -EINVAL; 497 char *root_dir = (char *)""; 498 char *name = malloc(PATH_MAX); 499 bool decomp = false; 500 501 if (!name) 502 return -ENOMEM; 503 504 if (machine) 505 root_dir = machine->root_dir; 506 507 if (dso__read_binary_type_filename(dso, dso->binary_type, 508 root_dir, name, PATH_MAX)) 509 goto out; 510 511 if (!is_regular_file(name)) 512 goto out; 513 514 if (dso__needs_decompress(dso)) { 515 char newpath[KMOD_DECOMP_LEN]; 516 size_t len = sizeof(newpath); 517 518 if (dso__decompress_kmodule_path(dso, name, newpath, len) < 0) { 519 fd = -dso->load_errno; 520 goto out; 521 } 522 523 decomp = true; 524 strcpy(name, newpath); 525 } 526 527 fd = do_open(name); 528 529 if (decomp) 530 unlink(name); 531 532 out: 533 free(name); 534 return fd; 535 } 536 537 static void check_data_close(void); 538 539 /** 540 * dso_close - Open DSO data file 541 * @dso: dso object 542 * 543 * Open @dso's data file descriptor and updates 544 * list/count of open DSO objects. 545 */ 546 static int open_dso(struct dso *dso, struct machine *machine) 547 { 548 int fd; 549 struct nscookie nsc; 550 551 if (dso->binary_type != DSO_BINARY_TYPE__BUILD_ID_CACHE) 552 nsinfo__mountns_enter(dso->nsinfo, &nsc); 553 fd = __open_dso(dso, machine); 554 if (dso->binary_type != DSO_BINARY_TYPE__BUILD_ID_CACHE) 555 nsinfo__mountns_exit(&nsc); 556 557 if (fd >= 0) { 558 dso__list_add(dso); 559 /* 560 * Check if we crossed the allowed number 561 * of opened DSOs and close one if needed. 562 */ 563 check_data_close(); 564 } 565 566 return fd; 567 } 568 569 static void close_data_fd(struct dso *dso) 570 { 571 if (dso->data.fd >= 0) { 572 close(dso->data.fd); 573 dso->data.fd = -1; 574 dso->data.file_size = 0; 575 dso__list_del(dso); 576 } 577 } 578 579 /** 580 * dso_close - Close DSO data file 581 * @dso: dso object 582 * 583 * Close @dso's data file descriptor and updates 584 * list/count of open DSO objects. 585 */ 586 static void close_dso(struct dso *dso) 587 { 588 close_data_fd(dso); 589 } 590 591 static void close_first_dso(void) 592 { 593 struct dso *dso; 594 595 dso = list_first_entry(&dso__data_open, struct dso, data.open_entry); 596 close_dso(dso); 597 } 598 599 static rlim_t get_fd_limit(void) 600 { 601 struct rlimit l; 602 rlim_t limit = 0; 603 604 /* Allow half of the current open fd limit. */ 605 if (getrlimit(RLIMIT_NOFILE, &l) == 0) { 606 if (l.rlim_cur == RLIM_INFINITY) 607 limit = l.rlim_cur; 608 else 609 limit = l.rlim_cur / 2; 610 } else { 611 pr_err("failed to get fd limit\n"); 612 limit = 1; 613 } 614 615 return limit; 616 } 617 618 static rlim_t fd_limit; 619 620 /* 621 * Used only by tests/dso-data.c to reset the environment 622 * for tests. I dont expect we should change this during 623 * standard runtime. 624 */ 625 void reset_fd_limit(void) 626 { 627 fd_limit = 0; 628 } 629 630 static bool may_cache_fd(void) 631 { 632 if (!fd_limit) 633 fd_limit = get_fd_limit(); 634 635 if (fd_limit == RLIM_INFINITY) 636 return true; 637 638 return fd_limit > (rlim_t) dso__data_open_cnt; 639 } 640 641 /* 642 * Check and close LRU dso if we crossed allowed limit 643 * for opened dso file descriptors. The limit is half 644 * of the RLIMIT_NOFILE files opened. 645 */ 646 static void check_data_close(void) 647 { 648 bool cache_fd = may_cache_fd(); 649 650 if (!cache_fd) 651 close_first_dso(); 652 } 653 654 /** 655 * dso__data_close - Close DSO data file 656 * @dso: dso object 657 * 658 * External interface to close @dso's data file descriptor. 659 */ 660 void dso__data_close(struct dso *dso) 661 { 662 pthread_mutex_lock(&dso__data_open_lock); 663 close_dso(dso); 664 pthread_mutex_unlock(&dso__data_open_lock); 665 } 666 667 static void try_to_open_dso(struct dso *dso, struct machine *machine) 668 { 669 enum dso_binary_type binary_type_data[] = { 670 DSO_BINARY_TYPE__BUILD_ID_CACHE, 671 DSO_BINARY_TYPE__SYSTEM_PATH_DSO, 672 DSO_BINARY_TYPE__NOT_FOUND, 673 }; 674 int i = 0; 675 676 if (dso->data.fd >= 0) 677 return; 678 679 if (dso->binary_type != DSO_BINARY_TYPE__NOT_FOUND) { 680 dso->data.fd = open_dso(dso, machine); 681 goto out; 682 } 683 684 do { 685 dso->binary_type = binary_type_data[i++]; 686 687 dso->data.fd = open_dso(dso, machine); 688 if (dso->data.fd >= 0) 689 goto out; 690 691 } while (dso->binary_type != DSO_BINARY_TYPE__NOT_FOUND); 692 out: 693 if (dso->data.fd >= 0) 694 dso->data.status = DSO_DATA_STATUS_OK; 695 else 696 dso->data.status = DSO_DATA_STATUS_ERROR; 697 } 698 699 /** 700 * dso__data_get_fd - Get dso's data file descriptor 701 * @dso: dso object 702 * @machine: machine object 703 * 704 * External interface to find dso's file, open it and 705 * returns file descriptor. It should be paired with 706 * dso__data_put_fd() if it returns non-negative value. 707 */ 708 int dso__data_get_fd(struct dso *dso, struct machine *machine) 709 { 710 if (dso->data.status == DSO_DATA_STATUS_ERROR) 711 return -1; 712 713 if (pthread_mutex_lock(&dso__data_open_lock) < 0) 714 return -1; 715 716 try_to_open_dso(dso, machine); 717 718 if (dso->data.fd < 0) 719 pthread_mutex_unlock(&dso__data_open_lock); 720 721 return dso->data.fd; 722 } 723 724 void dso__data_put_fd(struct dso *dso __maybe_unused) 725 { 726 pthread_mutex_unlock(&dso__data_open_lock); 727 } 728 729 bool dso__data_status_seen(struct dso *dso, enum dso_data_status_seen by) 730 { 731 u32 flag = 1 << by; 732 733 if (dso->data.status_seen & flag) 734 return true; 735 736 dso->data.status_seen |= flag; 737 738 return false; 739 } 740 741 #ifdef HAVE_LIBBPF_SUPPORT 742 static ssize_t bpf_read(struct dso *dso, u64 offset, char *data) 743 { 744 struct bpf_prog_info_node *node; 745 ssize_t size = DSO__DATA_CACHE_SIZE; 746 u64 len; 747 u8 *buf; 748 749 node = perf_env__find_bpf_prog_info(dso->bpf_prog.env, dso->bpf_prog.id); 750 if (!node || !node->info_linear) { 751 dso->data.status = DSO_DATA_STATUS_ERROR; 752 return -1; 753 } 754 755 len = node->info_linear->info.jited_prog_len; 756 buf = (u8 *)(uintptr_t)node->info_linear->info.jited_prog_insns; 757 758 if (offset >= len) 759 return -1; 760 761 size = (ssize_t)min(len - offset, (u64)size); 762 memcpy(data, buf + offset, size); 763 return size; 764 } 765 766 static int bpf_size(struct dso *dso) 767 { 768 struct bpf_prog_info_node *node; 769 770 node = perf_env__find_bpf_prog_info(dso->bpf_prog.env, dso->bpf_prog.id); 771 if (!node || !node->info_linear) { 772 dso->data.status = DSO_DATA_STATUS_ERROR; 773 return -1; 774 } 775 776 dso->data.file_size = node->info_linear->info.jited_prog_len; 777 return 0; 778 } 779 #endif // HAVE_LIBBPF_SUPPORT 780 781 static void 782 dso_cache__free(struct dso *dso) 783 { 784 struct rb_root *root = &dso->data.cache; 785 struct rb_node *next = rb_first(root); 786 787 pthread_mutex_lock(&dso->lock); 788 while (next) { 789 struct dso_cache *cache; 790 791 cache = rb_entry(next, struct dso_cache, rb_node); 792 next = rb_next(&cache->rb_node); 793 rb_erase(&cache->rb_node, root); 794 free(cache); 795 } 796 pthread_mutex_unlock(&dso->lock); 797 } 798 799 static struct dso_cache *__dso_cache__find(struct dso *dso, u64 offset) 800 { 801 const struct rb_root *root = &dso->data.cache; 802 struct rb_node * const *p = &root->rb_node; 803 const struct rb_node *parent = NULL; 804 struct dso_cache *cache; 805 806 while (*p != NULL) { 807 u64 end; 808 809 parent = *p; 810 cache = rb_entry(parent, struct dso_cache, rb_node); 811 end = cache->offset + DSO__DATA_CACHE_SIZE; 812 813 if (offset < cache->offset) 814 p = &(*p)->rb_left; 815 else if (offset >= end) 816 p = &(*p)->rb_right; 817 else 818 return cache; 819 } 820 821 return NULL; 822 } 823 824 static struct dso_cache * 825 dso_cache__insert(struct dso *dso, struct dso_cache *new) 826 { 827 struct rb_root *root = &dso->data.cache; 828 struct rb_node **p = &root->rb_node; 829 struct rb_node *parent = NULL; 830 struct dso_cache *cache; 831 u64 offset = new->offset; 832 833 pthread_mutex_lock(&dso->lock); 834 while (*p != NULL) { 835 u64 end; 836 837 parent = *p; 838 cache = rb_entry(parent, struct dso_cache, rb_node); 839 end = cache->offset + DSO__DATA_CACHE_SIZE; 840 841 if (offset < cache->offset) 842 p = &(*p)->rb_left; 843 else if (offset >= end) 844 p = &(*p)->rb_right; 845 else 846 goto out; 847 } 848 849 rb_link_node(&new->rb_node, parent, p); 850 rb_insert_color(&new->rb_node, root); 851 852 cache = NULL; 853 out: 854 pthread_mutex_unlock(&dso->lock); 855 return cache; 856 } 857 858 static ssize_t dso_cache__memcpy(struct dso_cache *cache, u64 offset, u8 *data, 859 u64 size, bool out) 860 { 861 u64 cache_offset = offset - cache->offset; 862 u64 cache_size = min(cache->size - cache_offset, size); 863 864 if (out) 865 memcpy(data, cache->data + cache_offset, cache_size); 866 else 867 memcpy(cache->data + cache_offset, data, cache_size); 868 return cache_size; 869 } 870 871 static ssize_t file_read(struct dso *dso, struct machine *machine, 872 u64 offset, char *data) 873 { 874 ssize_t ret; 875 876 pthread_mutex_lock(&dso__data_open_lock); 877 878 /* 879 * dso->data.fd might be closed if other thread opened another 880 * file (dso) due to open file limit (RLIMIT_NOFILE). 881 */ 882 try_to_open_dso(dso, machine); 883 884 if (dso->data.fd < 0) { 885 dso->data.status = DSO_DATA_STATUS_ERROR; 886 ret = -errno; 887 goto out; 888 } 889 890 ret = pread(dso->data.fd, data, DSO__DATA_CACHE_SIZE, offset); 891 out: 892 pthread_mutex_unlock(&dso__data_open_lock); 893 return ret; 894 } 895 896 static struct dso_cache *dso_cache__populate(struct dso *dso, 897 struct machine *machine, 898 u64 offset, ssize_t *ret) 899 { 900 u64 cache_offset = offset & DSO__DATA_CACHE_MASK; 901 struct dso_cache *cache; 902 struct dso_cache *old; 903 904 cache = zalloc(sizeof(*cache) + DSO__DATA_CACHE_SIZE); 905 if (!cache) { 906 *ret = -ENOMEM; 907 return NULL; 908 } 909 #ifdef HAVE_LIBBPF_SUPPORT 910 if (dso->binary_type == DSO_BINARY_TYPE__BPF_PROG_INFO) 911 *ret = bpf_read(dso, cache_offset, cache->data); 912 else 913 #endif 914 if (dso->binary_type == DSO_BINARY_TYPE__OOL) 915 *ret = DSO__DATA_CACHE_SIZE; 916 else 917 *ret = file_read(dso, machine, cache_offset, cache->data); 918 919 if (*ret <= 0) { 920 free(cache); 921 return NULL; 922 } 923 924 cache->offset = cache_offset; 925 cache->size = *ret; 926 927 old = dso_cache__insert(dso, cache); 928 if (old) { 929 /* we lose the race */ 930 free(cache); 931 cache = old; 932 } 933 934 return cache; 935 } 936 937 static struct dso_cache *dso_cache__find(struct dso *dso, 938 struct machine *machine, 939 u64 offset, 940 ssize_t *ret) 941 { 942 struct dso_cache *cache = __dso_cache__find(dso, offset); 943 944 return cache ? cache : dso_cache__populate(dso, machine, offset, ret); 945 } 946 947 static ssize_t dso_cache_io(struct dso *dso, struct machine *machine, 948 u64 offset, u8 *data, ssize_t size, bool out) 949 { 950 struct dso_cache *cache; 951 ssize_t ret = 0; 952 953 cache = dso_cache__find(dso, machine, offset, &ret); 954 if (!cache) 955 return ret; 956 957 return dso_cache__memcpy(cache, offset, data, size, out); 958 } 959 960 /* 961 * Reads and caches dso data DSO__DATA_CACHE_SIZE size chunks 962 * in the rb_tree. Any read to already cached data is served 963 * by cached data. Writes update the cache only, not the backing file. 964 */ 965 static ssize_t cached_io(struct dso *dso, struct machine *machine, 966 u64 offset, u8 *data, ssize_t size, bool out) 967 { 968 ssize_t r = 0; 969 u8 *p = data; 970 971 do { 972 ssize_t ret; 973 974 ret = dso_cache_io(dso, machine, offset, p, size, out); 975 if (ret < 0) 976 return ret; 977 978 /* Reached EOF, return what we have. */ 979 if (!ret) 980 break; 981 982 BUG_ON(ret > size); 983 984 r += ret; 985 p += ret; 986 offset += ret; 987 size -= ret; 988 989 } while (size); 990 991 return r; 992 } 993 994 static int file_size(struct dso *dso, struct machine *machine) 995 { 996 int ret = 0; 997 struct stat st; 998 char sbuf[STRERR_BUFSIZE]; 999 1000 pthread_mutex_lock(&dso__data_open_lock); 1001 1002 /* 1003 * dso->data.fd might be closed if other thread opened another 1004 * file (dso) due to open file limit (RLIMIT_NOFILE). 1005 */ 1006 try_to_open_dso(dso, machine); 1007 1008 if (dso->data.fd < 0) { 1009 ret = -errno; 1010 dso->data.status = DSO_DATA_STATUS_ERROR; 1011 goto out; 1012 } 1013 1014 if (fstat(dso->data.fd, &st) < 0) { 1015 ret = -errno; 1016 pr_err("dso cache fstat failed: %s\n", 1017 str_error_r(errno, sbuf, sizeof(sbuf))); 1018 dso->data.status = DSO_DATA_STATUS_ERROR; 1019 goto out; 1020 } 1021 dso->data.file_size = st.st_size; 1022 1023 out: 1024 pthread_mutex_unlock(&dso__data_open_lock); 1025 return ret; 1026 } 1027 1028 int dso__data_file_size(struct dso *dso, struct machine *machine) 1029 { 1030 if (dso->data.file_size) 1031 return 0; 1032 1033 if (dso->data.status == DSO_DATA_STATUS_ERROR) 1034 return -1; 1035 #ifdef HAVE_LIBBPF_SUPPORT 1036 if (dso->binary_type == DSO_BINARY_TYPE__BPF_PROG_INFO) 1037 return bpf_size(dso); 1038 #endif 1039 return file_size(dso, machine); 1040 } 1041 1042 /** 1043 * dso__data_size - Return dso data size 1044 * @dso: dso object 1045 * @machine: machine object 1046 * 1047 * Return: dso data size 1048 */ 1049 off_t dso__data_size(struct dso *dso, struct machine *machine) 1050 { 1051 if (dso__data_file_size(dso, machine)) 1052 return -1; 1053 1054 /* For now just estimate dso data size is close to file size */ 1055 return dso->data.file_size; 1056 } 1057 1058 static ssize_t data_read_write_offset(struct dso *dso, struct machine *machine, 1059 u64 offset, u8 *data, ssize_t size, 1060 bool out) 1061 { 1062 if (dso__data_file_size(dso, machine)) 1063 return -1; 1064 1065 /* Check the offset sanity. */ 1066 if (offset > dso->data.file_size) 1067 return -1; 1068 1069 if (offset + size < offset) 1070 return -1; 1071 1072 return cached_io(dso, machine, offset, data, size, out); 1073 } 1074 1075 /** 1076 * dso__data_read_offset - Read data from dso file offset 1077 * @dso: dso object 1078 * @machine: machine object 1079 * @offset: file offset 1080 * @data: buffer to store data 1081 * @size: size of the @data buffer 1082 * 1083 * External interface to read data from dso file offset. Open 1084 * dso data file and use cached_read to get the data. 1085 */ 1086 ssize_t dso__data_read_offset(struct dso *dso, struct machine *machine, 1087 u64 offset, u8 *data, ssize_t size) 1088 { 1089 if (dso->data.status == DSO_DATA_STATUS_ERROR) 1090 return -1; 1091 1092 return data_read_write_offset(dso, machine, offset, data, size, true); 1093 } 1094 1095 /** 1096 * dso__data_read_addr - Read data from dso address 1097 * @dso: dso object 1098 * @machine: machine object 1099 * @add: virtual memory address 1100 * @data: buffer to store data 1101 * @size: size of the @data buffer 1102 * 1103 * External interface to read data from dso address. 1104 */ 1105 ssize_t dso__data_read_addr(struct dso *dso, struct map *map, 1106 struct machine *machine, u64 addr, 1107 u8 *data, ssize_t size) 1108 { 1109 u64 offset = map->map_ip(map, addr); 1110 return dso__data_read_offset(dso, machine, offset, data, size); 1111 } 1112 1113 /** 1114 * dso__data_write_cache_offs - Write data to dso data cache at file offset 1115 * @dso: dso object 1116 * @machine: machine object 1117 * @offset: file offset 1118 * @data: buffer to write 1119 * @size: size of the @data buffer 1120 * 1121 * Write into the dso file data cache, but do not change the file itself. 1122 */ 1123 ssize_t dso__data_write_cache_offs(struct dso *dso, struct machine *machine, 1124 u64 offset, const u8 *data_in, ssize_t size) 1125 { 1126 u8 *data = (u8 *)data_in; /* cast away const to use same fns for r/w */ 1127 1128 if (dso->data.status == DSO_DATA_STATUS_ERROR) 1129 return -1; 1130 1131 return data_read_write_offset(dso, machine, offset, data, size, false); 1132 } 1133 1134 /** 1135 * dso__data_write_cache_addr - Write data to dso data cache at dso address 1136 * @dso: dso object 1137 * @machine: machine object 1138 * @add: virtual memory address 1139 * @data: buffer to write 1140 * @size: size of the @data buffer 1141 * 1142 * External interface to write into the dso file data cache, but do not change 1143 * the file itself. 1144 */ 1145 ssize_t dso__data_write_cache_addr(struct dso *dso, struct map *map, 1146 struct machine *machine, u64 addr, 1147 const u8 *data, ssize_t size) 1148 { 1149 u64 offset = map->map_ip(map, addr); 1150 return dso__data_write_cache_offs(dso, machine, offset, data, size); 1151 } 1152 1153 struct map *dso__new_map(const char *name) 1154 { 1155 struct map *map = NULL; 1156 struct dso *dso = dso__new(name); 1157 1158 if (dso) { 1159 map = map__new2(0, dso); 1160 dso__put(dso); 1161 } 1162 1163 return map; 1164 } 1165 1166 struct dso *machine__findnew_kernel(struct machine *machine, const char *name, 1167 const char *short_name, int dso_type) 1168 { 1169 /* 1170 * The kernel dso could be created by build_id processing. 1171 */ 1172 struct dso *dso = machine__findnew_dso(machine, name); 1173 1174 /* 1175 * We need to run this in all cases, since during the build_id 1176 * processing we had no idea this was the kernel dso. 1177 */ 1178 if (dso != NULL) { 1179 dso__set_short_name(dso, short_name, false); 1180 dso->kernel = dso_type; 1181 } 1182 1183 return dso; 1184 } 1185 1186 static void dso__set_long_name_id(struct dso *dso, const char *name, struct dso_id *id, bool name_allocated) 1187 { 1188 struct rb_root *root = dso->root; 1189 1190 if (name == NULL) 1191 return; 1192 1193 if (dso->long_name_allocated) 1194 free((char *)dso->long_name); 1195 1196 if (root) { 1197 rb_erase(&dso->rb_node, root); 1198 /* 1199 * __dsos__findnew_link_by_longname_id() isn't guaranteed to 1200 * add it back, so a clean removal is required here. 1201 */ 1202 RB_CLEAR_NODE(&dso->rb_node); 1203 dso->root = NULL; 1204 } 1205 1206 dso->long_name = name; 1207 dso->long_name_len = strlen(name); 1208 dso->long_name_allocated = name_allocated; 1209 1210 if (root) 1211 __dsos__findnew_link_by_longname_id(root, dso, NULL, id); 1212 } 1213 1214 void dso__set_long_name(struct dso *dso, const char *name, bool name_allocated) 1215 { 1216 dso__set_long_name_id(dso, name, NULL, name_allocated); 1217 } 1218 1219 void dso__set_short_name(struct dso *dso, const char *name, bool name_allocated) 1220 { 1221 if (name == NULL) 1222 return; 1223 1224 if (dso->short_name_allocated) 1225 free((char *)dso->short_name); 1226 1227 dso->short_name = name; 1228 dso->short_name_len = strlen(name); 1229 dso->short_name_allocated = name_allocated; 1230 } 1231 1232 int dso__name_len(const struct dso *dso) 1233 { 1234 if (!dso) 1235 return strlen("[unknown]"); 1236 if (verbose > 0) 1237 return dso->long_name_len; 1238 1239 return dso->short_name_len; 1240 } 1241 1242 bool dso__loaded(const struct dso *dso) 1243 { 1244 return dso->loaded; 1245 } 1246 1247 bool dso__sorted_by_name(const struct dso *dso) 1248 { 1249 return dso->sorted_by_name; 1250 } 1251 1252 void dso__set_sorted_by_name(struct dso *dso) 1253 { 1254 dso->sorted_by_name = true; 1255 } 1256 1257 struct dso *dso__new_id(const char *name, struct dso_id *id) 1258 { 1259 struct dso *dso = calloc(1, sizeof(*dso) + strlen(name) + 1); 1260 1261 if (dso != NULL) { 1262 strcpy(dso->name, name); 1263 if (id) 1264 dso->id = *id; 1265 dso__set_long_name_id(dso, dso->name, id, false); 1266 dso__set_short_name(dso, dso->name, false); 1267 dso->symbols = dso->symbol_names = RB_ROOT_CACHED; 1268 dso->data.cache = RB_ROOT; 1269 dso->inlined_nodes = RB_ROOT_CACHED; 1270 dso->srclines = RB_ROOT_CACHED; 1271 dso->data.fd = -1; 1272 dso->data.status = DSO_DATA_STATUS_UNKNOWN; 1273 dso->symtab_type = DSO_BINARY_TYPE__NOT_FOUND; 1274 dso->binary_type = DSO_BINARY_TYPE__NOT_FOUND; 1275 dso->is_64_bit = (sizeof(void *) == 8); 1276 dso->loaded = 0; 1277 dso->rel = 0; 1278 dso->sorted_by_name = 0; 1279 dso->has_build_id = 0; 1280 dso->has_srcline = 1; 1281 dso->a2l_fails = 1; 1282 dso->kernel = DSO_SPACE__USER; 1283 dso->needs_swap = DSO_SWAP__UNSET; 1284 dso->comp = COMP_ID__NONE; 1285 RB_CLEAR_NODE(&dso->rb_node); 1286 dso->root = NULL; 1287 INIT_LIST_HEAD(&dso->node); 1288 INIT_LIST_HEAD(&dso->data.open_entry); 1289 pthread_mutex_init(&dso->lock, NULL); 1290 refcount_set(&dso->refcnt, 1); 1291 } 1292 1293 return dso; 1294 } 1295 1296 struct dso *dso__new(const char *name) 1297 { 1298 return dso__new_id(name, NULL); 1299 } 1300 1301 void dso__delete(struct dso *dso) 1302 { 1303 if (!RB_EMPTY_NODE(&dso->rb_node)) 1304 pr_err("DSO %s is still in rbtree when being deleted!\n", 1305 dso->long_name); 1306 1307 /* free inlines first, as they reference symbols */ 1308 inlines__tree_delete(&dso->inlined_nodes); 1309 srcline__tree_delete(&dso->srclines); 1310 symbols__delete(&dso->symbols); 1311 1312 if (dso->short_name_allocated) { 1313 zfree((char **)&dso->short_name); 1314 dso->short_name_allocated = false; 1315 } 1316 1317 if (dso->long_name_allocated) { 1318 zfree((char **)&dso->long_name); 1319 dso->long_name_allocated = false; 1320 } 1321 1322 dso__data_close(dso); 1323 auxtrace_cache__free(dso->auxtrace_cache); 1324 dso_cache__free(dso); 1325 dso__free_a2l(dso); 1326 zfree(&dso->symsrc_filename); 1327 nsinfo__zput(dso->nsinfo); 1328 pthread_mutex_destroy(&dso->lock); 1329 free(dso); 1330 } 1331 1332 struct dso *dso__get(struct dso *dso) 1333 { 1334 if (dso) 1335 refcount_inc(&dso->refcnt); 1336 return dso; 1337 } 1338 1339 void dso__put(struct dso *dso) 1340 { 1341 if (dso && refcount_dec_and_test(&dso->refcnt)) 1342 dso__delete(dso); 1343 } 1344 1345 void dso__set_build_id(struct dso *dso, struct build_id *bid) 1346 { 1347 dso->bid = *bid; 1348 dso->has_build_id = 1; 1349 } 1350 1351 bool dso__build_id_equal(const struct dso *dso, struct build_id *bid) 1352 { 1353 if (dso->bid.size > bid->size && dso->bid.size == BUILD_ID_SIZE) { 1354 /* 1355 * For the backward compatibility, it allows a build-id has 1356 * trailing zeros. 1357 */ 1358 return !memcmp(dso->bid.data, bid->data, bid->size) && 1359 !memchr_inv(&dso->bid.data[bid->size], 0, 1360 dso->bid.size - bid->size); 1361 } 1362 1363 return dso->bid.size == bid->size && 1364 memcmp(dso->bid.data, bid->data, dso->bid.size) == 0; 1365 } 1366 1367 void dso__read_running_kernel_build_id(struct dso *dso, struct machine *machine) 1368 { 1369 char path[PATH_MAX]; 1370 1371 if (machine__is_default_guest(machine)) 1372 return; 1373 sprintf(path, "%s/sys/kernel/notes", machine->root_dir); 1374 if (sysfs__read_build_id(path, &dso->bid) == 0) 1375 dso->has_build_id = true; 1376 } 1377 1378 int dso__kernel_module_get_build_id(struct dso *dso, 1379 const char *root_dir) 1380 { 1381 char filename[PATH_MAX]; 1382 /* 1383 * kernel module short names are of the form "[module]" and 1384 * we need just "module" here. 1385 */ 1386 const char *name = dso->short_name + 1; 1387 1388 snprintf(filename, sizeof(filename), 1389 "%s/sys/module/%.*s/notes/.note.gnu.build-id", 1390 root_dir, (int)strlen(name) - 1, name); 1391 1392 if (sysfs__read_build_id(filename, &dso->bid) == 0) 1393 dso->has_build_id = true; 1394 1395 return 0; 1396 } 1397 1398 static size_t dso__fprintf_buildid(struct dso *dso, FILE *fp) 1399 { 1400 char sbuild_id[SBUILD_ID_SIZE]; 1401 1402 build_id__sprintf(&dso->bid, sbuild_id); 1403 return fprintf(fp, "%s", sbuild_id); 1404 } 1405 1406 size_t dso__fprintf(struct dso *dso, FILE *fp) 1407 { 1408 struct rb_node *nd; 1409 size_t ret = fprintf(fp, "dso: %s (", dso->short_name); 1410 1411 if (dso->short_name != dso->long_name) 1412 ret += fprintf(fp, "%s, ", dso->long_name); 1413 ret += fprintf(fp, "%sloaded, ", dso__loaded(dso) ? "" : "NOT "); 1414 ret += dso__fprintf_buildid(dso, fp); 1415 ret += fprintf(fp, ")\n"); 1416 for (nd = rb_first_cached(&dso->symbols); nd; nd = rb_next(nd)) { 1417 struct symbol *pos = rb_entry(nd, struct symbol, rb_node); 1418 ret += symbol__fprintf(pos, fp); 1419 } 1420 1421 return ret; 1422 } 1423 1424 enum dso_type dso__type(struct dso *dso, struct machine *machine) 1425 { 1426 int fd; 1427 enum dso_type type = DSO__TYPE_UNKNOWN; 1428 1429 fd = dso__data_get_fd(dso, machine); 1430 if (fd >= 0) { 1431 type = dso__type_fd(fd); 1432 dso__data_put_fd(dso); 1433 } 1434 1435 return type; 1436 } 1437 1438 int dso__strerror_load(struct dso *dso, char *buf, size_t buflen) 1439 { 1440 int idx, errnum = dso->load_errno; 1441 /* 1442 * This must have a same ordering as the enum dso_load_errno. 1443 */ 1444 static const char *dso_load__error_str[] = { 1445 "Internal tools/perf/ library error", 1446 "Invalid ELF file", 1447 "Can not read build id", 1448 "Mismatching build id", 1449 "Decompression failure", 1450 }; 1451 1452 BUG_ON(buflen == 0); 1453 1454 if (errnum >= 0) { 1455 const char *err = str_error_r(errnum, buf, buflen); 1456 1457 if (err != buf) 1458 scnprintf(buf, buflen, "%s", err); 1459 1460 return 0; 1461 } 1462 1463 if (errnum < __DSO_LOAD_ERRNO__START || errnum >= __DSO_LOAD_ERRNO__END) 1464 return -1; 1465 1466 idx = errnum - __DSO_LOAD_ERRNO__START; 1467 scnprintf(buf, buflen, "%s", dso_load__error_str[idx]); 1468 return 0; 1469 } 1470