1 // SPDX-License-Identifier: GPL-2.0 2 #include <asm/bug.h> 3 #include <linux/kernel.h> 4 #include <sys/time.h> 5 #include <sys/resource.h> 6 #include <sys/types.h> 7 #include <sys/stat.h> 8 #include <unistd.h> 9 #include <errno.h> 10 #include <fcntl.h> 11 #include <libgen.h> 12 #include "compress.h" 13 #include "namespaces.h" 14 #include "path.h" 15 #include "map.h" 16 #include "symbol.h" 17 #include "srcline.h" 18 #include "dso.h" 19 #include "machine.h" 20 #include "auxtrace.h" 21 #include "util.h" 22 #include "debug.h" 23 #include "string2.h" 24 #include "vdso.h" 25 26 static const char * const debuglink_paths[] = { 27 "%.0s%s", 28 "%s/%s", 29 "%s/.debug/%s", 30 "/usr/lib/debug%s/%s" 31 }; 32 33 char dso__symtab_origin(const struct dso *dso) 34 { 35 static const char origin[] = { 36 [DSO_BINARY_TYPE__KALLSYMS] = 'k', 37 [DSO_BINARY_TYPE__VMLINUX] = 'v', 38 [DSO_BINARY_TYPE__JAVA_JIT] = 'j', 39 [DSO_BINARY_TYPE__DEBUGLINK] = 'l', 40 [DSO_BINARY_TYPE__BUILD_ID_CACHE] = 'B', 41 [DSO_BINARY_TYPE__BUILD_ID_CACHE_DEBUGINFO] = 'D', 42 [DSO_BINARY_TYPE__FEDORA_DEBUGINFO] = 'f', 43 [DSO_BINARY_TYPE__UBUNTU_DEBUGINFO] = 'u', 44 [DSO_BINARY_TYPE__OPENEMBEDDED_DEBUGINFO] = 'o', 45 [DSO_BINARY_TYPE__BUILDID_DEBUGINFO] = 'b', 46 [DSO_BINARY_TYPE__SYSTEM_PATH_DSO] = 'd', 47 [DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE] = 'K', 48 [DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE_COMP] = 'm', 49 [DSO_BINARY_TYPE__GUEST_KALLSYMS] = 'g', 50 [DSO_BINARY_TYPE__GUEST_KMODULE] = 'G', 51 [DSO_BINARY_TYPE__GUEST_KMODULE_COMP] = 'M', 52 [DSO_BINARY_TYPE__GUEST_VMLINUX] = 'V', 53 }; 54 55 if (dso == NULL || dso->symtab_type == DSO_BINARY_TYPE__NOT_FOUND) 56 return '!'; 57 return origin[dso->symtab_type]; 58 } 59 60 int dso__read_binary_type_filename(const struct dso *dso, 61 enum dso_binary_type type, 62 char *root_dir, char *filename, size_t size) 63 { 64 char build_id_hex[SBUILD_ID_SIZE]; 65 int ret = 0; 66 size_t len; 67 68 switch (type) { 69 case DSO_BINARY_TYPE__DEBUGLINK: 70 { 71 const char *last_slash; 72 char dso_dir[PATH_MAX]; 73 char symfile[PATH_MAX]; 74 unsigned int i; 75 76 len = __symbol__join_symfs(filename, size, dso->long_name); 77 last_slash = filename + len; 78 while (last_slash != filename && *last_slash != '/') 79 last_slash--; 80 81 strncpy(dso_dir, filename, last_slash - filename); 82 dso_dir[last_slash-filename] = '\0'; 83 84 if (!is_regular_file(filename)) { 85 ret = -1; 86 break; 87 } 88 89 ret = filename__read_debuglink(filename, symfile, PATH_MAX); 90 if (ret) 91 break; 92 93 /* Check predefined locations where debug file might reside */ 94 ret = -1; 95 for (i = 0; i < ARRAY_SIZE(debuglink_paths); i++) { 96 snprintf(filename, size, 97 debuglink_paths[i], dso_dir, symfile); 98 if (is_regular_file(filename)) { 99 ret = 0; 100 break; 101 } 102 } 103 104 break; 105 } 106 case DSO_BINARY_TYPE__BUILD_ID_CACHE: 107 if (dso__build_id_filename(dso, filename, size, false) == NULL) 108 ret = -1; 109 break; 110 111 case DSO_BINARY_TYPE__BUILD_ID_CACHE_DEBUGINFO: 112 if (dso__build_id_filename(dso, filename, size, true) == NULL) 113 ret = -1; 114 break; 115 116 case DSO_BINARY_TYPE__FEDORA_DEBUGINFO: 117 len = __symbol__join_symfs(filename, size, "/usr/lib/debug"); 118 snprintf(filename + len, size - len, "%s.debug", dso->long_name); 119 break; 120 121 case DSO_BINARY_TYPE__UBUNTU_DEBUGINFO: 122 len = __symbol__join_symfs(filename, size, "/usr/lib/debug"); 123 snprintf(filename + len, size - len, "%s", dso->long_name); 124 break; 125 126 case DSO_BINARY_TYPE__OPENEMBEDDED_DEBUGINFO: 127 { 128 const char *last_slash; 129 size_t dir_size; 130 131 last_slash = dso->long_name + dso->long_name_len; 132 while (last_slash != dso->long_name && *last_slash != '/') 133 last_slash--; 134 135 len = __symbol__join_symfs(filename, size, ""); 136 dir_size = last_slash - dso->long_name + 2; 137 if (dir_size > (size - len)) { 138 ret = -1; 139 break; 140 } 141 len += scnprintf(filename + len, dir_size, "%s", dso->long_name); 142 len += scnprintf(filename + len , size - len, ".debug%s", 143 last_slash); 144 break; 145 } 146 147 case DSO_BINARY_TYPE__BUILDID_DEBUGINFO: 148 if (!dso->has_build_id) { 149 ret = -1; 150 break; 151 } 152 153 build_id__sprintf(dso->build_id, 154 sizeof(dso->build_id), 155 build_id_hex); 156 len = __symbol__join_symfs(filename, size, "/usr/lib/debug/.build-id/"); 157 snprintf(filename + len, size - len, "%.2s/%s.debug", 158 build_id_hex, build_id_hex + 2); 159 break; 160 161 case DSO_BINARY_TYPE__VMLINUX: 162 case DSO_BINARY_TYPE__GUEST_VMLINUX: 163 case DSO_BINARY_TYPE__SYSTEM_PATH_DSO: 164 __symbol__join_symfs(filename, size, dso->long_name); 165 break; 166 167 case DSO_BINARY_TYPE__GUEST_KMODULE: 168 case DSO_BINARY_TYPE__GUEST_KMODULE_COMP: 169 path__join3(filename, size, symbol_conf.symfs, 170 root_dir, dso->long_name); 171 break; 172 173 case DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE: 174 case DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE_COMP: 175 __symbol__join_symfs(filename, size, dso->long_name); 176 break; 177 178 case DSO_BINARY_TYPE__KCORE: 179 case DSO_BINARY_TYPE__GUEST_KCORE: 180 snprintf(filename, size, "%s", dso->long_name); 181 break; 182 183 default: 184 case DSO_BINARY_TYPE__KALLSYMS: 185 case DSO_BINARY_TYPE__GUEST_KALLSYMS: 186 case DSO_BINARY_TYPE__JAVA_JIT: 187 case DSO_BINARY_TYPE__BPF_PROG_INFO: 188 case DSO_BINARY_TYPE__NOT_FOUND: 189 ret = -1; 190 break; 191 } 192 193 return ret; 194 } 195 196 enum { 197 COMP_ID__NONE = 0, 198 }; 199 200 static const struct { 201 const char *fmt; 202 int (*decompress)(const char *input, int output); 203 bool (*is_compressed)(const char *input); 204 } compressions[] = { 205 [COMP_ID__NONE] = { .fmt = NULL, }, 206 #ifdef HAVE_ZLIB_SUPPORT 207 { "gz", gzip_decompress_to_file, gzip_is_compressed }, 208 #endif 209 #ifdef HAVE_LZMA_SUPPORT 210 { "xz", lzma_decompress_to_file, lzma_is_compressed }, 211 #endif 212 { NULL, NULL, NULL }, 213 }; 214 215 static int is_supported_compression(const char *ext) 216 { 217 unsigned i; 218 219 for (i = 1; compressions[i].fmt; i++) { 220 if (!strcmp(ext, compressions[i].fmt)) 221 return i; 222 } 223 return COMP_ID__NONE; 224 } 225 226 bool is_kernel_module(const char *pathname, int cpumode) 227 { 228 struct kmod_path m; 229 int mode = cpumode & PERF_RECORD_MISC_CPUMODE_MASK; 230 231 WARN_ONCE(mode != cpumode, 232 "Internal error: passing unmasked cpumode (%x) to is_kernel_module", 233 cpumode); 234 235 switch (mode) { 236 case PERF_RECORD_MISC_USER: 237 case PERF_RECORD_MISC_HYPERVISOR: 238 case PERF_RECORD_MISC_GUEST_USER: 239 return false; 240 /* Treat PERF_RECORD_MISC_CPUMODE_UNKNOWN as kernel */ 241 default: 242 if (kmod_path__parse(&m, pathname)) { 243 pr_err("Failed to check whether %s is a kernel module or not. Assume it is.", 244 pathname); 245 return true; 246 } 247 } 248 249 return m.kmod; 250 } 251 252 bool dso__needs_decompress(struct dso *dso) 253 { 254 return dso->symtab_type == DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE_COMP || 255 dso->symtab_type == DSO_BINARY_TYPE__GUEST_KMODULE_COMP; 256 } 257 258 static int decompress_kmodule(struct dso *dso, const char *name, 259 char *pathname, size_t len) 260 { 261 char tmpbuf[] = KMOD_DECOMP_NAME; 262 int fd = -1; 263 264 if (!dso__needs_decompress(dso)) 265 return -1; 266 267 if (dso->comp == COMP_ID__NONE) 268 return -1; 269 270 /* 271 * We have proper compression id for DSO and yet the file 272 * behind the 'name' can still be plain uncompressed object. 273 * 274 * The reason is behind the logic we open the DSO object files, 275 * when we try all possible 'debug' objects until we find the 276 * data. So even if the DSO is represented by 'krava.xz' module, 277 * we can end up here opening ~/.debug/....23432432/debug' file 278 * which is not compressed. 279 * 280 * To keep this transparent, we detect this and return the file 281 * descriptor to the uncompressed file. 282 */ 283 if (!compressions[dso->comp].is_compressed(name)) 284 return open(name, O_RDONLY); 285 286 fd = mkstemp(tmpbuf); 287 if (fd < 0) { 288 dso->load_errno = errno; 289 return -1; 290 } 291 292 if (compressions[dso->comp].decompress(name, fd)) { 293 dso->load_errno = DSO_LOAD_ERRNO__DECOMPRESSION_FAILURE; 294 close(fd); 295 fd = -1; 296 } 297 298 if (!pathname || (fd < 0)) 299 unlink(tmpbuf); 300 301 if (pathname && (fd >= 0)) 302 strlcpy(pathname, tmpbuf, len); 303 304 return fd; 305 } 306 307 int dso__decompress_kmodule_fd(struct dso *dso, const char *name) 308 { 309 return decompress_kmodule(dso, name, NULL, 0); 310 } 311 312 int dso__decompress_kmodule_path(struct dso *dso, const char *name, 313 char *pathname, size_t len) 314 { 315 int fd = decompress_kmodule(dso, name, pathname, len); 316 317 close(fd); 318 return fd >= 0 ? 0 : -1; 319 } 320 321 /* 322 * Parses kernel module specified in @path and updates 323 * @m argument like: 324 * 325 * @comp - true if @path contains supported compression suffix, 326 * false otherwise 327 * @kmod - true if @path contains '.ko' suffix in right position, 328 * false otherwise 329 * @name - if (@alloc_name && @kmod) is true, it contains strdup-ed base name 330 * of the kernel module without suffixes, otherwise strudup-ed 331 * base name of @path 332 * @ext - if (@alloc_ext && @comp) is true, it contains strdup-ed string 333 * the compression suffix 334 * 335 * Returns 0 if there's no strdup error, -ENOMEM otherwise. 336 */ 337 int __kmod_path__parse(struct kmod_path *m, const char *path, 338 bool alloc_name) 339 { 340 const char *name = strrchr(path, '/'); 341 const char *ext = strrchr(path, '.'); 342 bool is_simple_name = false; 343 344 memset(m, 0x0, sizeof(*m)); 345 name = name ? name + 1 : path; 346 347 /* 348 * '.' is also a valid character for module name. For example: 349 * [aaa.bbb] is a valid module name. '[' should have higher 350 * priority than '.ko' suffix. 351 * 352 * The kernel names are from machine__mmap_name. Such 353 * name should belong to kernel itself, not kernel module. 354 */ 355 if (name[0] == '[') { 356 is_simple_name = true; 357 if ((strncmp(name, "[kernel.kallsyms]", 17) == 0) || 358 (strncmp(name, "[guest.kernel.kallsyms", 22) == 0) || 359 (strncmp(name, "[vdso]", 6) == 0) || 360 (strncmp(name, "[vdso32]", 8) == 0) || 361 (strncmp(name, "[vdsox32]", 9) == 0) || 362 (strncmp(name, "[vsyscall]", 10) == 0)) { 363 m->kmod = false; 364 365 } else 366 m->kmod = true; 367 } 368 369 /* No extension, just return name. */ 370 if ((ext == NULL) || is_simple_name) { 371 if (alloc_name) { 372 m->name = strdup(name); 373 return m->name ? 0 : -ENOMEM; 374 } 375 return 0; 376 } 377 378 m->comp = is_supported_compression(ext + 1); 379 if (m->comp > COMP_ID__NONE) 380 ext -= 3; 381 382 /* Check .ko extension only if there's enough name left. */ 383 if (ext > name) 384 m->kmod = !strncmp(ext, ".ko", 3); 385 386 if (alloc_name) { 387 if (m->kmod) { 388 if (asprintf(&m->name, "[%.*s]", (int) (ext - name), name) == -1) 389 return -ENOMEM; 390 } else { 391 if (asprintf(&m->name, "%s", name) == -1) 392 return -ENOMEM; 393 } 394 395 strxfrchar(m->name, '-', '_'); 396 } 397 398 return 0; 399 } 400 401 void dso__set_module_info(struct dso *dso, struct kmod_path *m, 402 struct machine *machine) 403 { 404 if (machine__is_host(machine)) 405 dso->symtab_type = DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE; 406 else 407 dso->symtab_type = DSO_BINARY_TYPE__GUEST_KMODULE; 408 409 /* _KMODULE_COMP should be next to _KMODULE */ 410 if (m->kmod && m->comp) { 411 dso->symtab_type++; 412 dso->comp = m->comp; 413 } 414 415 dso__set_short_name(dso, strdup(m->name), true); 416 } 417 418 /* 419 * Global list of open DSOs and the counter. 420 */ 421 static LIST_HEAD(dso__data_open); 422 static long dso__data_open_cnt; 423 static pthread_mutex_t dso__data_open_lock = PTHREAD_MUTEX_INITIALIZER; 424 425 static void dso__list_add(struct dso *dso) 426 { 427 list_add_tail(&dso->data.open_entry, &dso__data_open); 428 dso__data_open_cnt++; 429 } 430 431 static void dso__list_del(struct dso *dso) 432 { 433 list_del(&dso->data.open_entry); 434 WARN_ONCE(dso__data_open_cnt <= 0, 435 "DSO data fd counter out of bounds."); 436 dso__data_open_cnt--; 437 } 438 439 static void close_first_dso(void); 440 441 static int do_open(char *name) 442 { 443 int fd; 444 char sbuf[STRERR_BUFSIZE]; 445 446 do { 447 fd = open(name, O_RDONLY|O_CLOEXEC); 448 if (fd >= 0) 449 return fd; 450 451 pr_debug("dso open failed: %s\n", 452 str_error_r(errno, sbuf, sizeof(sbuf))); 453 if (!dso__data_open_cnt || errno != EMFILE) 454 break; 455 456 close_first_dso(); 457 } while (1); 458 459 return -1; 460 } 461 462 static int __open_dso(struct dso *dso, struct machine *machine) 463 { 464 int fd = -EINVAL; 465 char *root_dir = (char *)""; 466 char *name = malloc(PATH_MAX); 467 bool decomp = false; 468 469 if (!name) 470 return -ENOMEM; 471 472 if (machine) 473 root_dir = machine->root_dir; 474 475 if (dso__read_binary_type_filename(dso, dso->binary_type, 476 root_dir, name, PATH_MAX)) 477 goto out; 478 479 if (!is_regular_file(name)) 480 goto out; 481 482 if (dso__needs_decompress(dso)) { 483 char newpath[KMOD_DECOMP_LEN]; 484 size_t len = sizeof(newpath); 485 486 if (dso__decompress_kmodule_path(dso, name, newpath, len) < 0) { 487 fd = -dso->load_errno; 488 goto out; 489 } 490 491 decomp = true; 492 strcpy(name, newpath); 493 } 494 495 fd = do_open(name); 496 497 if (decomp) 498 unlink(name); 499 500 out: 501 free(name); 502 return fd; 503 } 504 505 static void check_data_close(void); 506 507 /** 508 * dso_close - Open DSO data file 509 * @dso: dso object 510 * 511 * Open @dso's data file descriptor and updates 512 * list/count of open DSO objects. 513 */ 514 static int open_dso(struct dso *dso, struct machine *machine) 515 { 516 int fd; 517 struct nscookie nsc; 518 519 if (dso->binary_type != DSO_BINARY_TYPE__BUILD_ID_CACHE) 520 nsinfo__mountns_enter(dso->nsinfo, &nsc); 521 fd = __open_dso(dso, machine); 522 if (dso->binary_type != DSO_BINARY_TYPE__BUILD_ID_CACHE) 523 nsinfo__mountns_exit(&nsc); 524 525 if (fd >= 0) { 526 dso__list_add(dso); 527 /* 528 * Check if we crossed the allowed number 529 * of opened DSOs and close one if needed. 530 */ 531 check_data_close(); 532 } 533 534 return fd; 535 } 536 537 static void close_data_fd(struct dso *dso) 538 { 539 if (dso->data.fd >= 0) { 540 close(dso->data.fd); 541 dso->data.fd = -1; 542 dso->data.file_size = 0; 543 dso__list_del(dso); 544 } 545 } 546 547 /** 548 * dso_close - Close DSO data file 549 * @dso: dso object 550 * 551 * Close @dso's data file descriptor and updates 552 * list/count of open DSO objects. 553 */ 554 static void close_dso(struct dso *dso) 555 { 556 close_data_fd(dso); 557 } 558 559 static void close_first_dso(void) 560 { 561 struct dso *dso; 562 563 dso = list_first_entry(&dso__data_open, struct dso, data.open_entry); 564 close_dso(dso); 565 } 566 567 static rlim_t get_fd_limit(void) 568 { 569 struct rlimit l; 570 rlim_t limit = 0; 571 572 /* Allow half of the current open fd limit. */ 573 if (getrlimit(RLIMIT_NOFILE, &l) == 0) { 574 if (l.rlim_cur == RLIM_INFINITY) 575 limit = l.rlim_cur; 576 else 577 limit = l.rlim_cur / 2; 578 } else { 579 pr_err("failed to get fd limit\n"); 580 limit = 1; 581 } 582 583 return limit; 584 } 585 586 static rlim_t fd_limit; 587 588 /* 589 * Used only by tests/dso-data.c to reset the environment 590 * for tests. I dont expect we should change this during 591 * standard runtime. 592 */ 593 void reset_fd_limit(void) 594 { 595 fd_limit = 0; 596 } 597 598 static bool may_cache_fd(void) 599 { 600 if (!fd_limit) 601 fd_limit = get_fd_limit(); 602 603 if (fd_limit == RLIM_INFINITY) 604 return true; 605 606 return fd_limit > (rlim_t) dso__data_open_cnt; 607 } 608 609 /* 610 * Check and close LRU dso if we crossed allowed limit 611 * for opened dso file descriptors. The limit is half 612 * of the RLIMIT_NOFILE files opened. 613 */ 614 static void check_data_close(void) 615 { 616 bool cache_fd = may_cache_fd(); 617 618 if (!cache_fd) 619 close_first_dso(); 620 } 621 622 /** 623 * dso__data_close - Close DSO data file 624 * @dso: dso object 625 * 626 * External interface to close @dso's data file descriptor. 627 */ 628 void dso__data_close(struct dso *dso) 629 { 630 pthread_mutex_lock(&dso__data_open_lock); 631 close_dso(dso); 632 pthread_mutex_unlock(&dso__data_open_lock); 633 } 634 635 static void try_to_open_dso(struct dso *dso, struct machine *machine) 636 { 637 enum dso_binary_type binary_type_data[] = { 638 DSO_BINARY_TYPE__BUILD_ID_CACHE, 639 DSO_BINARY_TYPE__SYSTEM_PATH_DSO, 640 DSO_BINARY_TYPE__NOT_FOUND, 641 }; 642 int i = 0; 643 644 if (dso->data.fd >= 0) 645 return; 646 647 if (dso->binary_type != DSO_BINARY_TYPE__NOT_FOUND) { 648 dso->data.fd = open_dso(dso, machine); 649 goto out; 650 } 651 652 do { 653 dso->binary_type = binary_type_data[i++]; 654 655 dso->data.fd = open_dso(dso, machine); 656 if (dso->data.fd >= 0) 657 goto out; 658 659 } while (dso->binary_type != DSO_BINARY_TYPE__NOT_FOUND); 660 out: 661 if (dso->data.fd >= 0) 662 dso->data.status = DSO_DATA_STATUS_OK; 663 else 664 dso->data.status = DSO_DATA_STATUS_ERROR; 665 } 666 667 /** 668 * dso__data_get_fd - Get dso's data file descriptor 669 * @dso: dso object 670 * @machine: machine object 671 * 672 * External interface to find dso's file, open it and 673 * returns file descriptor. It should be paired with 674 * dso__data_put_fd() if it returns non-negative value. 675 */ 676 int dso__data_get_fd(struct dso *dso, struct machine *machine) 677 { 678 if (dso->data.status == DSO_DATA_STATUS_ERROR) 679 return -1; 680 681 if (pthread_mutex_lock(&dso__data_open_lock) < 0) 682 return -1; 683 684 try_to_open_dso(dso, machine); 685 686 if (dso->data.fd < 0) 687 pthread_mutex_unlock(&dso__data_open_lock); 688 689 return dso->data.fd; 690 } 691 692 void dso__data_put_fd(struct dso *dso __maybe_unused) 693 { 694 pthread_mutex_unlock(&dso__data_open_lock); 695 } 696 697 bool dso__data_status_seen(struct dso *dso, enum dso_data_status_seen by) 698 { 699 u32 flag = 1 << by; 700 701 if (dso->data.status_seen & flag) 702 return true; 703 704 dso->data.status_seen |= flag; 705 706 return false; 707 } 708 709 static void 710 dso_cache__free(struct dso *dso) 711 { 712 struct rb_root *root = &dso->data.cache; 713 struct rb_node *next = rb_first(root); 714 715 pthread_mutex_lock(&dso->lock); 716 while (next) { 717 struct dso_cache *cache; 718 719 cache = rb_entry(next, struct dso_cache, rb_node); 720 next = rb_next(&cache->rb_node); 721 rb_erase(&cache->rb_node, root); 722 free(cache); 723 } 724 pthread_mutex_unlock(&dso->lock); 725 } 726 727 static struct dso_cache *dso_cache__find(struct dso *dso, u64 offset) 728 { 729 const struct rb_root *root = &dso->data.cache; 730 struct rb_node * const *p = &root->rb_node; 731 const struct rb_node *parent = NULL; 732 struct dso_cache *cache; 733 734 while (*p != NULL) { 735 u64 end; 736 737 parent = *p; 738 cache = rb_entry(parent, struct dso_cache, rb_node); 739 end = cache->offset + DSO__DATA_CACHE_SIZE; 740 741 if (offset < cache->offset) 742 p = &(*p)->rb_left; 743 else if (offset >= end) 744 p = &(*p)->rb_right; 745 else 746 return cache; 747 } 748 749 return NULL; 750 } 751 752 static struct dso_cache * 753 dso_cache__insert(struct dso *dso, struct dso_cache *new) 754 { 755 struct rb_root *root = &dso->data.cache; 756 struct rb_node **p = &root->rb_node; 757 struct rb_node *parent = NULL; 758 struct dso_cache *cache; 759 u64 offset = new->offset; 760 761 pthread_mutex_lock(&dso->lock); 762 while (*p != NULL) { 763 u64 end; 764 765 parent = *p; 766 cache = rb_entry(parent, struct dso_cache, rb_node); 767 end = cache->offset + DSO__DATA_CACHE_SIZE; 768 769 if (offset < cache->offset) 770 p = &(*p)->rb_left; 771 else if (offset >= end) 772 p = &(*p)->rb_right; 773 else 774 goto out; 775 } 776 777 rb_link_node(&new->rb_node, parent, p); 778 rb_insert_color(&new->rb_node, root); 779 780 cache = NULL; 781 out: 782 pthread_mutex_unlock(&dso->lock); 783 return cache; 784 } 785 786 static ssize_t 787 dso_cache__memcpy(struct dso_cache *cache, u64 offset, 788 u8 *data, u64 size) 789 { 790 u64 cache_offset = offset - cache->offset; 791 u64 cache_size = min(cache->size - cache_offset, size); 792 793 memcpy(data, cache->data + cache_offset, cache_size); 794 return cache_size; 795 } 796 797 static ssize_t 798 dso_cache__read(struct dso *dso, struct machine *machine, 799 u64 offset, u8 *data, ssize_t size) 800 { 801 struct dso_cache *cache; 802 struct dso_cache *old; 803 ssize_t ret; 804 805 do { 806 u64 cache_offset; 807 808 cache = zalloc(sizeof(*cache) + DSO__DATA_CACHE_SIZE); 809 if (!cache) 810 return -ENOMEM; 811 812 pthread_mutex_lock(&dso__data_open_lock); 813 814 /* 815 * dso->data.fd might be closed if other thread opened another 816 * file (dso) due to open file limit (RLIMIT_NOFILE). 817 */ 818 try_to_open_dso(dso, machine); 819 820 if (dso->data.fd < 0) { 821 ret = -errno; 822 dso->data.status = DSO_DATA_STATUS_ERROR; 823 break; 824 } 825 826 cache_offset = offset & DSO__DATA_CACHE_MASK; 827 828 ret = pread(dso->data.fd, cache->data, DSO__DATA_CACHE_SIZE, cache_offset); 829 if (ret <= 0) 830 break; 831 832 cache->offset = cache_offset; 833 cache->size = ret; 834 } while (0); 835 836 pthread_mutex_unlock(&dso__data_open_lock); 837 838 if (ret > 0) { 839 old = dso_cache__insert(dso, cache); 840 if (old) { 841 /* we lose the race */ 842 free(cache); 843 cache = old; 844 } 845 846 ret = dso_cache__memcpy(cache, offset, data, size); 847 } 848 849 if (ret <= 0) 850 free(cache); 851 852 return ret; 853 } 854 855 static ssize_t dso_cache_read(struct dso *dso, struct machine *machine, 856 u64 offset, u8 *data, ssize_t size) 857 { 858 struct dso_cache *cache; 859 860 cache = dso_cache__find(dso, offset); 861 if (cache) 862 return dso_cache__memcpy(cache, offset, data, size); 863 else 864 return dso_cache__read(dso, machine, offset, data, size); 865 } 866 867 /* 868 * Reads and caches dso data DSO__DATA_CACHE_SIZE size chunks 869 * in the rb_tree. Any read to already cached data is served 870 * by cached data. 871 */ 872 static ssize_t cached_read(struct dso *dso, struct machine *machine, 873 u64 offset, u8 *data, ssize_t size) 874 { 875 ssize_t r = 0; 876 u8 *p = data; 877 878 do { 879 ssize_t ret; 880 881 ret = dso_cache_read(dso, machine, offset, p, size); 882 if (ret < 0) 883 return ret; 884 885 /* Reached EOF, return what we have. */ 886 if (!ret) 887 break; 888 889 BUG_ON(ret > size); 890 891 r += ret; 892 p += ret; 893 offset += ret; 894 size -= ret; 895 896 } while (size); 897 898 return r; 899 } 900 901 int dso__data_file_size(struct dso *dso, struct machine *machine) 902 { 903 int ret = 0; 904 struct stat st; 905 char sbuf[STRERR_BUFSIZE]; 906 907 if (dso->data.file_size) 908 return 0; 909 910 if (dso->data.status == DSO_DATA_STATUS_ERROR) 911 return -1; 912 913 pthread_mutex_lock(&dso__data_open_lock); 914 915 /* 916 * dso->data.fd might be closed if other thread opened another 917 * file (dso) due to open file limit (RLIMIT_NOFILE). 918 */ 919 try_to_open_dso(dso, machine); 920 921 if (dso->data.fd < 0) { 922 ret = -errno; 923 dso->data.status = DSO_DATA_STATUS_ERROR; 924 goto out; 925 } 926 927 if (fstat(dso->data.fd, &st) < 0) { 928 ret = -errno; 929 pr_err("dso cache fstat failed: %s\n", 930 str_error_r(errno, sbuf, sizeof(sbuf))); 931 dso->data.status = DSO_DATA_STATUS_ERROR; 932 goto out; 933 } 934 dso->data.file_size = st.st_size; 935 936 out: 937 pthread_mutex_unlock(&dso__data_open_lock); 938 return ret; 939 } 940 941 /** 942 * dso__data_size - Return dso data size 943 * @dso: dso object 944 * @machine: machine object 945 * 946 * Return: dso data size 947 */ 948 off_t dso__data_size(struct dso *dso, struct machine *machine) 949 { 950 if (dso__data_file_size(dso, machine)) 951 return -1; 952 953 /* For now just estimate dso data size is close to file size */ 954 return dso->data.file_size; 955 } 956 957 static ssize_t data_read_offset(struct dso *dso, struct machine *machine, 958 u64 offset, u8 *data, ssize_t size) 959 { 960 if (dso__data_file_size(dso, machine)) 961 return -1; 962 963 /* Check the offset sanity. */ 964 if (offset > dso->data.file_size) 965 return -1; 966 967 if (offset + size < offset) 968 return -1; 969 970 return cached_read(dso, machine, offset, data, size); 971 } 972 973 /** 974 * dso__data_read_offset - Read data from dso file offset 975 * @dso: dso object 976 * @machine: machine object 977 * @offset: file offset 978 * @data: buffer to store data 979 * @size: size of the @data buffer 980 * 981 * External interface to read data from dso file offset. Open 982 * dso data file and use cached_read to get the data. 983 */ 984 ssize_t dso__data_read_offset(struct dso *dso, struct machine *machine, 985 u64 offset, u8 *data, ssize_t size) 986 { 987 if (dso->data.status == DSO_DATA_STATUS_ERROR) 988 return -1; 989 990 return data_read_offset(dso, machine, offset, data, size); 991 } 992 993 /** 994 * dso__data_read_addr - Read data from dso address 995 * @dso: dso object 996 * @machine: machine object 997 * @add: virtual memory address 998 * @data: buffer to store data 999 * @size: size of the @data buffer 1000 * 1001 * External interface to read data from dso address. 1002 */ 1003 ssize_t dso__data_read_addr(struct dso *dso, struct map *map, 1004 struct machine *machine, u64 addr, 1005 u8 *data, ssize_t size) 1006 { 1007 u64 offset = map->map_ip(map, addr); 1008 return dso__data_read_offset(dso, machine, offset, data, size); 1009 } 1010 1011 struct map *dso__new_map(const char *name) 1012 { 1013 struct map *map = NULL; 1014 struct dso *dso = dso__new(name); 1015 1016 if (dso) 1017 map = map__new2(0, dso); 1018 1019 return map; 1020 } 1021 1022 struct dso *machine__findnew_kernel(struct machine *machine, const char *name, 1023 const char *short_name, int dso_type) 1024 { 1025 /* 1026 * The kernel dso could be created by build_id processing. 1027 */ 1028 struct dso *dso = machine__findnew_dso(machine, name); 1029 1030 /* 1031 * We need to run this in all cases, since during the build_id 1032 * processing we had no idea this was the kernel dso. 1033 */ 1034 if (dso != NULL) { 1035 dso__set_short_name(dso, short_name, false); 1036 dso->kernel = dso_type; 1037 } 1038 1039 return dso; 1040 } 1041 1042 /* 1043 * Find a matching entry and/or link current entry to RB tree. 1044 * Either one of the dso or name parameter must be non-NULL or the 1045 * function will not work. 1046 */ 1047 static struct dso *__dso__findlink_by_longname(struct rb_root *root, 1048 struct dso *dso, const char *name) 1049 { 1050 struct rb_node **p = &root->rb_node; 1051 struct rb_node *parent = NULL; 1052 1053 if (!name) 1054 name = dso->long_name; 1055 /* 1056 * Find node with the matching name 1057 */ 1058 while (*p) { 1059 struct dso *this = rb_entry(*p, struct dso, rb_node); 1060 int rc = strcmp(name, this->long_name); 1061 1062 parent = *p; 1063 if (rc == 0) { 1064 /* 1065 * In case the new DSO is a duplicate of an existing 1066 * one, print a one-time warning & put the new entry 1067 * at the end of the list of duplicates. 1068 */ 1069 if (!dso || (dso == this)) 1070 return this; /* Find matching dso */ 1071 /* 1072 * The core kernel DSOs may have duplicated long name. 1073 * In this case, the short name should be different. 1074 * Comparing the short names to differentiate the DSOs. 1075 */ 1076 rc = strcmp(dso->short_name, this->short_name); 1077 if (rc == 0) { 1078 pr_err("Duplicated dso name: %s\n", name); 1079 return NULL; 1080 } 1081 } 1082 if (rc < 0) 1083 p = &parent->rb_left; 1084 else 1085 p = &parent->rb_right; 1086 } 1087 if (dso) { 1088 /* Add new node and rebalance tree */ 1089 rb_link_node(&dso->rb_node, parent, p); 1090 rb_insert_color(&dso->rb_node, root); 1091 dso->root = root; 1092 } 1093 return NULL; 1094 } 1095 1096 static inline struct dso *__dso__find_by_longname(struct rb_root *root, 1097 const char *name) 1098 { 1099 return __dso__findlink_by_longname(root, NULL, name); 1100 } 1101 1102 void dso__set_long_name(struct dso *dso, const char *name, bool name_allocated) 1103 { 1104 struct rb_root *root = dso->root; 1105 1106 if (name == NULL) 1107 return; 1108 1109 if (dso->long_name_allocated) 1110 free((char *)dso->long_name); 1111 1112 if (root) { 1113 rb_erase(&dso->rb_node, root); 1114 /* 1115 * __dso__findlink_by_longname() isn't guaranteed to add it 1116 * back, so a clean removal is required here. 1117 */ 1118 RB_CLEAR_NODE(&dso->rb_node); 1119 dso->root = NULL; 1120 } 1121 1122 dso->long_name = name; 1123 dso->long_name_len = strlen(name); 1124 dso->long_name_allocated = name_allocated; 1125 1126 if (root) 1127 __dso__findlink_by_longname(root, dso, NULL); 1128 } 1129 1130 void dso__set_short_name(struct dso *dso, const char *name, bool name_allocated) 1131 { 1132 if (name == NULL) 1133 return; 1134 1135 if (dso->short_name_allocated) 1136 free((char *)dso->short_name); 1137 1138 dso->short_name = name; 1139 dso->short_name_len = strlen(name); 1140 dso->short_name_allocated = name_allocated; 1141 } 1142 1143 static void dso__set_basename(struct dso *dso) 1144 { 1145 char *base, *lname; 1146 int tid; 1147 1148 if (sscanf(dso->long_name, "/tmp/perf-%d.map", &tid) == 1) { 1149 if (asprintf(&base, "[JIT] tid %d", tid) < 0) 1150 return; 1151 } else { 1152 /* 1153 * basename() may modify path buffer, so we must pass 1154 * a copy. 1155 */ 1156 lname = strdup(dso->long_name); 1157 if (!lname) 1158 return; 1159 1160 /* 1161 * basename() may return a pointer to internal 1162 * storage which is reused in subsequent calls 1163 * so copy the result. 1164 */ 1165 base = strdup(basename(lname)); 1166 1167 free(lname); 1168 1169 if (!base) 1170 return; 1171 } 1172 dso__set_short_name(dso, base, true); 1173 } 1174 1175 int dso__name_len(const struct dso *dso) 1176 { 1177 if (!dso) 1178 return strlen("[unknown]"); 1179 if (verbose > 0) 1180 return dso->long_name_len; 1181 1182 return dso->short_name_len; 1183 } 1184 1185 bool dso__loaded(const struct dso *dso) 1186 { 1187 return dso->loaded; 1188 } 1189 1190 bool dso__sorted_by_name(const struct dso *dso) 1191 { 1192 return dso->sorted_by_name; 1193 } 1194 1195 void dso__set_sorted_by_name(struct dso *dso) 1196 { 1197 dso->sorted_by_name = true; 1198 } 1199 1200 struct dso *dso__new(const char *name) 1201 { 1202 struct dso *dso = calloc(1, sizeof(*dso) + strlen(name) + 1); 1203 1204 if (dso != NULL) { 1205 strcpy(dso->name, name); 1206 dso__set_long_name(dso, dso->name, false); 1207 dso__set_short_name(dso, dso->name, false); 1208 dso->symbols = dso->symbol_names = RB_ROOT_CACHED; 1209 dso->data.cache = RB_ROOT; 1210 dso->inlined_nodes = RB_ROOT_CACHED; 1211 dso->srclines = RB_ROOT_CACHED; 1212 dso->data.fd = -1; 1213 dso->data.status = DSO_DATA_STATUS_UNKNOWN; 1214 dso->symtab_type = DSO_BINARY_TYPE__NOT_FOUND; 1215 dso->binary_type = DSO_BINARY_TYPE__NOT_FOUND; 1216 dso->is_64_bit = (sizeof(void *) == 8); 1217 dso->loaded = 0; 1218 dso->rel = 0; 1219 dso->sorted_by_name = 0; 1220 dso->has_build_id = 0; 1221 dso->has_srcline = 1; 1222 dso->a2l_fails = 1; 1223 dso->kernel = DSO_TYPE_USER; 1224 dso->needs_swap = DSO_SWAP__UNSET; 1225 dso->comp = COMP_ID__NONE; 1226 RB_CLEAR_NODE(&dso->rb_node); 1227 dso->root = NULL; 1228 INIT_LIST_HEAD(&dso->node); 1229 INIT_LIST_HEAD(&dso->data.open_entry); 1230 pthread_mutex_init(&dso->lock, NULL); 1231 refcount_set(&dso->refcnt, 1); 1232 } 1233 1234 return dso; 1235 } 1236 1237 void dso__delete(struct dso *dso) 1238 { 1239 if (!RB_EMPTY_NODE(&dso->rb_node)) 1240 pr_err("DSO %s is still in rbtree when being deleted!\n", 1241 dso->long_name); 1242 1243 /* free inlines first, as they reference symbols */ 1244 inlines__tree_delete(&dso->inlined_nodes); 1245 srcline__tree_delete(&dso->srclines); 1246 symbols__delete(&dso->symbols); 1247 1248 if (dso->short_name_allocated) { 1249 zfree((char **)&dso->short_name); 1250 dso->short_name_allocated = false; 1251 } 1252 1253 if (dso->long_name_allocated) { 1254 zfree((char **)&dso->long_name); 1255 dso->long_name_allocated = false; 1256 } 1257 1258 dso__data_close(dso); 1259 auxtrace_cache__free(dso->auxtrace_cache); 1260 dso_cache__free(dso); 1261 dso__free_a2l(dso); 1262 zfree(&dso->symsrc_filename); 1263 nsinfo__zput(dso->nsinfo); 1264 pthread_mutex_destroy(&dso->lock); 1265 free(dso); 1266 } 1267 1268 struct dso *dso__get(struct dso *dso) 1269 { 1270 if (dso) 1271 refcount_inc(&dso->refcnt); 1272 return dso; 1273 } 1274 1275 void dso__put(struct dso *dso) 1276 { 1277 if (dso && refcount_dec_and_test(&dso->refcnt)) 1278 dso__delete(dso); 1279 } 1280 1281 void dso__set_build_id(struct dso *dso, void *build_id) 1282 { 1283 memcpy(dso->build_id, build_id, sizeof(dso->build_id)); 1284 dso->has_build_id = 1; 1285 } 1286 1287 bool dso__build_id_equal(const struct dso *dso, u8 *build_id) 1288 { 1289 return memcmp(dso->build_id, build_id, sizeof(dso->build_id)) == 0; 1290 } 1291 1292 void dso__read_running_kernel_build_id(struct dso *dso, struct machine *machine) 1293 { 1294 char path[PATH_MAX]; 1295 1296 if (machine__is_default_guest(machine)) 1297 return; 1298 sprintf(path, "%s/sys/kernel/notes", machine->root_dir); 1299 if (sysfs__read_build_id(path, dso->build_id, 1300 sizeof(dso->build_id)) == 0) 1301 dso->has_build_id = true; 1302 } 1303 1304 int dso__kernel_module_get_build_id(struct dso *dso, 1305 const char *root_dir) 1306 { 1307 char filename[PATH_MAX]; 1308 /* 1309 * kernel module short names are of the form "[module]" and 1310 * we need just "module" here. 1311 */ 1312 const char *name = dso->short_name + 1; 1313 1314 snprintf(filename, sizeof(filename), 1315 "%s/sys/module/%.*s/notes/.note.gnu.build-id", 1316 root_dir, (int)strlen(name) - 1, name); 1317 1318 if (sysfs__read_build_id(filename, dso->build_id, 1319 sizeof(dso->build_id)) == 0) 1320 dso->has_build_id = true; 1321 1322 return 0; 1323 } 1324 1325 bool __dsos__read_build_ids(struct list_head *head, bool with_hits) 1326 { 1327 bool have_build_id = false; 1328 struct dso *pos; 1329 struct nscookie nsc; 1330 1331 list_for_each_entry(pos, head, node) { 1332 if (with_hits && !pos->hit && !dso__is_vdso(pos)) 1333 continue; 1334 if (pos->has_build_id) { 1335 have_build_id = true; 1336 continue; 1337 } 1338 nsinfo__mountns_enter(pos->nsinfo, &nsc); 1339 if (filename__read_build_id(pos->long_name, pos->build_id, 1340 sizeof(pos->build_id)) > 0) { 1341 have_build_id = true; 1342 pos->has_build_id = true; 1343 } 1344 nsinfo__mountns_exit(&nsc); 1345 } 1346 1347 return have_build_id; 1348 } 1349 1350 void __dsos__add(struct dsos *dsos, struct dso *dso) 1351 { 1352 list_add_tail(&dso->node, &dsos->head); 1353 __dso__findlink_by_longname(&dsos->root, dso, NULL); 1354 /* 1355 * It is now in the linked list, grab a reference, then garbage collect 1356 * this when needing memory, by looking at LRU dso instances in the 1357 * list with atomic_read(&dso->refcnt) == 1, i.e. no references 1358 * anywhere besides the one for the list, do, under a lock for the 1359 * list: remove it from the list, then a dso__put(), that probably will 1360 * be the last and will then call dso__delete(), end of life. 1361 * 1362 * That, or at the end of the 'struct machine' lifetime, when all 1363 * 'struct dso' instances will be removed from the list, in 1364 * dsos__exit(), if they have no other reference from some other data 1365 * structure. 1366 * 1367 * E.g.: after processing a 'perf.data' file and storing references 1368 * to objects instantiated while processing events, we will have 1369 * references to the 'thread', 'map', 'dso' structs all from 'struct 1370 * hist_entry' instances, but we may not need anything not referenced, 1371 * so we might as well call machines__exit()/machines__delete() and 1372 * garbage collect it. 1373 */ 1374 dso__get(dso); 1375 } 1376 1377 void dsos__add(struct dsos *dsos, struct dso *dso) 1378 { 1379 down_write(&dsos->lock); 1380 __dsos__add(dsos, dso); 1381 up_write(&dsos->lock); 1382 } 1383 1384 struct dso *__dsos__find(struct dsos *dsos, const char *name, bool cmp_short) 1385 { 1386 struct dso *pos; 1387 1388 if (cmp_short) { 1389 list_for_each_entry(pos, &dsos->head, node) 1390 if (strcmp(pos->short_name, name) == 0) 1391 return pos; 1392 return NULL; 1393 } 1394 return __dso__find_by_longname(&dsos->root, name); 1395 } 1396 1397 struct dso *dsos__find(struct dsos *dsos, const char *name, bool cmp_short) 1398 { 1399 struct dso *dso; 1400 down_read(&dsos->lock); 1401 dso = __dsos__find(dsos, name, cmp_short); 1402 up_read(&dsos->lock); 1403 return dso; 1404 } 1405 1406 struct dso *__dsos__addnew(struct dsos *dsos, const char *name) 1407 { 1408 struct dso *dso = dso__new(name); 1409 1410 if (dso != NULL) { 1411 __dsos__add(dsos, dso); 1412 dso__set_basename(dso); 1413 /* Put dso here because __dsos_add already got it */ 1414 dso__put(dso); 1415 } 1416 return dso; 1417 } 1418 1419 struct dso *__dsos__findnew(struct dsos *dsos, const char *name) 1420 { 1421 struct dso *dso = __dsos__find(dsos, name, false); 1422 1423 return dso ? dso : __dsos__addnew(dsos, name); 1424 } 1425 1426 struct dso *dsos__findnew(struct dsos *dsos, const char *name) 1427 { 1428 struct dso *dso; 1429 down_write(&dsos->lock); 1430 dso = dso__get(__dsos__findnew(dsos, name)); 1431 up_write(&dsos->lock); 1432 return dso; 1433 } 1434 1435 size_t __dsos__fprintf_buildid(struct list_head *head, FILE *fp, 1436 bool (skip)(struct dso *dso, int parm), int parm) 1437 { 1438 struct dso *pos; 1439 size_t ret = 0; 1440 1441 list_for_each_entry(pos, head, node) { 1442 if (skip && skip(pos, parm)) 1443 continue; 1444 ret += dso__fprintf_buildid(pos, fp); 1445 ret += fprintf(fp, " %s\n", pos->long_name); 1446 } 1447 return ret; 1448 } 1449 1450 size_t __dsos__fprintf(struct list_head *head, FILE *fp) 1451 { 1452 struct dso *pos; 1453 size_t ret = 0; 1454 1455 list_for_each_entry(pos, head, node) { 1456 ret += dso__fprintf(pos, fp); 1457 } 1458 1459 return ret; 1460 } 1461 1462 size_t dso__fprintf_buildid(struct dso *dso, FILE *fp) 1463 { 1464 char sbuild_id[SBUILD_ID_SIZE]; 1465 1466 build_id__sprintf(dso->build_id, sizeof(dso->build_id), sbuild_id); 1467 return fprintf(fp, "%s", sbuild_id); 1468 } 1469 1470 size_t dso__fprintf(struct dso *dso, FILE *fp) 1471 { 1472 struct rb_node *nd; 1473 size_t ret = fprintf(fp, "dso: %s (", dso->short_name); 1474 1475 if (dso->short_name != dso->long_name) 1476 ret += fprintf(fp, "%s, ", dso->long_name); 1477 ret += fprintf(fp, "%sloaded, ", dso__loaded(dso) ? "" : "NOT "); 1478 ret += dso__fprintf_buildid(dso, fp); 1479 ret += fprintf(fp, ")\n"); 1480 for (nd = rb_first_cached(&dso->symbols); nd; nd = rb_next(nd)) { 1481 struct symbol *pos = rb_entry(nd, struct symbol, rb_node); 1482 ret += symbol__fprintf(pos, fp); 1483 } 1484 1485 return ret; 1486 } 1487 1488 enum dso_type dso__type(struct dso *dso, struct machine *machine) 1489 { 1490 int fd; 1491 enum dso_type type = DSO__TYPE_UNKNOWN; 1492 1493 fd = dso__data_get_fd(dso, machine); 1494 if (fd >= 0) { 1495 type = dso__type_fd(fd); 1496 dso__data_put_fd(dso); 1497 } 1498 1499 return type; 1500 } 1501 1502 int dso__strerror_load(struct dso *dso, char *buf, size_t buflen) 1503 { 1504 int idx, errnum = dso->load_errno; 1505 /* 1506 * This must have a same ordering as the enum dso_load_errno. 1507 */ 1508 static const char *dso_load__error_str[] = { 1509 "Internal tools/perf/ library error", 1510 "Invalid ELF file", 1511 "Can not read build id", 1512 "Mismatching build id", 1513 "Decompression failure", 1514 }; 1515 1516 BUG_ON(buflen == 0); 1517 1518 if (errnum >= 0) { 1519 const char *err = str_error_r(errnum, buf, buflen); 1520 1521 if (err != buf) 1522 scnprintf(buf, buflen, "%s", err); 1523 1524 return 0; 1525 } 1526 1527 if (errnum < __DSO_LOAD_ERRNO__START || errnum >= __DSO_LOAD_ERRNO__END) 1528 return -1; 1529 1530 idx = errnum - __DSO_LOAD_ERRNO__START; 1531 scnprintf(buf, buflen, "%s", dso_load__error_str[idx]); 1532 return 0; 1533 } 1534