1 // SPDX-License-Identifier: GPL-2.0
2 #include <asm/bug.h>
3 #include <linux/kernel.h>
4 #include <linux/string.h>
5 #include <linux/zalloc.h>
6 #include <sys/time.h>
7 #include <sys/resource.h>
8 #include <sys/types.h>
9 #include <sys/stat.h>
10 #include <unistd.h>
11 #include <errno.h>
12 #include <fcntl.h>
13 #include <stdlib.h>
14 #ifdef HAVE_LIBBPF_SUPPORT
15 #include <bpf/libbpf.h>
16 #include "bpf-event.h"
17 #include "bpf-utils.h"
18 #endif
19 #include "compress.h"
20 #include "env.h"
21 #include "namespaces.h"
22 #include "path.h"
23 #include "map.h"
24 #include "symbol.h"
25 #include "srcline.h"
26 #include "dso.h"
27 #include "dsos.h"
28 #include "machine.h"
29 #include "auxtrace.h"
30 #include "util.h" /* O_CLOEXEC for older systems */
31 #include "debug.h"
32 #include "string2.h"
33 #include "vdso.h"
34
35 static const char * const debuglink_paths[] = {
36 "%.0s%s",
37 "%s/%s",
38 "%s/.debug/%s",
39 "/usr/lib/debug%s/%s"
40 };
41
dso__symtab_origin(const struct dso * dso)42 char dso__symtab_origin(const struct dso *dso)
43 {
44 static const char origin[] = {
45 [DSO_BINARY_TYPE__KALLSYMS] = 'k',
46 [DSO_BINARY_TYPE__VMLINUX] = 'v',
47 [DSO_BINARY_TYPE__JAVA_JIT] = 'j',
48 [DSO_BINARY_TYPE__DEBUGLINK] = 'l',
49 [DSO_BINARY_TYPE__BUILD_ID_CACHE] = 'B',
50 [DSO_BINARY_TYPE__BUILD_ID_CACHE_DEBUGINFO] = 'D',
51 [DSO_BINARY_TYPE__FEDORA_DEBUGINFO] = 'f',
52 [DSO_BINARY_TYPE__UBUNTU_DEBUGINFO] = 'u',
53 [DSO_BINARY_TYPE__MIXEDUP_UBUNTU_DEBUGINFO] = 'x',
54 [DSO_BINARY_TYPE__OPENEMBEDDED_DEBUGINFO] = 'o',
55 [DSO_BINARY_TYPE__BUILDID_DEBUGINFO] = 'b',
56 [DSO_BINARY_TYPE__SYSTEM_PATH_DSO] = 'd',
57 [DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE] = 'K',
58 [DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE_COMP] = 'm',
59 [DSO_BINARY_TYPE__GUEST_KALLSYMS] = 'g',
60 [DSO_BINARY_TYPE__GUEST_KMODULE] = 'G',
61 [DSO_BINARY_TYPE__GUEST_KMODULE_COMP] = 'M',
62 [DSO_BINARY_TYPE__GUEST_VMLINUX] = 'V',
63 };
64
65 if (dso == NULL || dso->symtab_type == DSO_BINARY_TYPE__NOT_FOUND)
66 return '!';
67 return origin[dso->symtab_type];
68 }
69
dso__is_object_file(const struct dso * dso)70 bool dso__is_object_file(const struct dso *dso)
71 {
72 switch (dso->binary_type) {
73 case DSO_BINARY_TYPE__KALLSYMS:
74 case DSO_BINARY_TYPE__GUEST_KALLSYMS:
75 case DSO_BINARY_TYPE__JAVA_JIT:
76 case DSO_BINARY_TYPE__BPF_PROG_INFO:
77 case DSO_BINARY_TYPE__BPF_IMAGE:
78 case DSO_BINARY_TYPE__OOL:
79 return false;
80 case DSO_BINARY_TYPE__VMLINUX:
81 case DSO_BINARY_TYPE__GUEST_VMLINUX:
82 case DSO_BINARY_TYPE__DEBUGLINK:
83 case DSO_BINARY_TYPE__BUILD_ID_CACHE:
84 case DSO_BINARY_TYPE__BUILD_ID_CACHE_DEBUGINFO:
85 case DSO_BINARY_TYPE__FEDORA_DEBUGINFO:
86 case DSO_BINARY_TYPE__UBUNTU_DEBUGINFO:
87 case DSO_BINARY_TYPE__MIXEDUP_UBUNTU_DEBUGINFO:
88 case DSO_BINARY_TYPE__BUILDID_DEBUGINFO:
89 case DSO_BINARY_TYPE__SYSTEM_PATH_DSO:
90 case DSO_BINARY_TYPE__GUEST_KMODULE:
91 case DSO_BINARY_TYPE__GUEST_KMODULE_COMP:
92 case DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE:
93 case DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE_COMP:
94 case DSO_BINARY_TYPE__KCORE:
95 case DSO_BINARY_TYPE__GUEST_KCORE:
96 case DSO_BINARY_TYPE__OPENEMBEDDED_DEBUGINFO:
97 case DSO_BINARY_TYPE__NOT_FOUND:
98 default:
99 return true;
100 }
101 }
102
dso__read_binary_type_filename(const struct dso * dso,enum dso_binary_type type,char * root_dir,char * filename,size_t size)103 int dso__read_binary_type_filename(const struct dso *dso,
104 enum dso_binary_type type,
105 char *root_dir, char *filename, size_t size)
106 {
107 char build_id_hex[SBUILD_ID_SIZE];
108 int ret = 0;
109 size_t len;
110
111 switch (type) {
112 case DSO_BINARY_TYPE__DEBUGLINK:
113 {
114 const char *last_slash;
115 char dso_dir[PATH_MAX];
116 char symfile[PATH_MAX];
117 unsigned int i;
118
119 len = __symbol__join_symfs(filename, size, dso->long_name);
120 last_slash = filename + len;
121 while (last_slash != filename && *last_slash != '/')
122 last_slash--;
123
124 strncpy(dso_dir, filename, last_slash - filename);
125 dso_dir[last_slash-filename] = '\0';
126
127 if (!is_regular_file(filename)) {
128 ret = -1;
129 break;
130 }
131
132 ret = filename__read_debuglink(filename, symfile, PATH_MAX);
133 if (ret)
134 break;
135
136 /* Check predefined locations where debug file might reside */
137 ret = -1;
138 for (i = 0; i < ARRAY_SIZE(debuglink_paths); i++) {
139 snprintf(filename, size,
140 debuglink_paths[i], dso_dir, symfile);
141 if (is_regular_file(filename)) {
142 ret = 0;
143 break;
144 }
145 }
146
147 break;
148 }
149 case DSO_BINARY_TYPE__BUILD_ID_CACHE:
150 if (dso__build_id_filename(dso, filename, size, false) == NULL)
151 ret = -1;
152 break;
153
154 case DSO_BINARY_TYPE__BUILD_ID_CACHE_DEBUGINFO:
155 if (dso__build_id_filename(dso, filename, size, true) == NULL)
156 ret = -1;
157 break;
158
159 case DSO_BINARY_TYPE__FEDORA_DEBUGINFO:
160 len = __symbol__join_symfs(filename, size, "/usr/lib/debug");
161 snprintf(filename + len, size - len, "%s.debug", dso->long_name);
162 break;
163
164 case DSO_BINARY_TYPE__UBUNTU_DEBUGINFO:
165 len = __symbol__join_symfs(filename, size, "/usr/lib/debug");
166 snprintf(filename + len, size - len, "%s", dso->long_name);
167 break;
168
169 case DSO_BINARY_TYPE__MIXEDUP_UBUNTU_DEBUGINFO:
170 /*
171 * Ubuntu can mixup /usr/lib with /lib, putting debuginfo in
172 * /usr/lib/debug/lib when it is expected to be in
173 * /usr/lib/debug/usr/lib
174 */
175 if (strlen(dso->long_name) < 9 ||
176 strncmp(dso->long_name, "/usr/lib/", 9)) {
177 ret = -1;
178 break;
179 }
180 len = __symbol__join_symfs(filename, size, "/usr/lib/debug");
181 snprintf(filename + len, size - len, "%s", dso->long_name + 4);
182 break;
183
184 case DSO_BINARY_TYPE__OPENEMBEDDED_DEBUGINFO:
185 {
186 const char *last_slash;
187 size_t dir_size;
188
189 last_slash = dso->long_name + dso->long_name_len;
190 while (last_slash != dso->long_name && *last_slash != '/')
191 last_slash--;
192
193 len = __symbol__join_symfs(filename, size, "");
194 dir_size = last_slash - dso->long_name + 2;
195 if (dir_size > (size - len)) {
196 ret = -1;
197 break;
198 }
199 len += scnprintf(filename + len, dir_size, "%s", dso->long_name);
200 len += scnprintf(filename + len , size - len, ".debug%s",
201 last_slash);
202 break;
203 }
204
205 case DSO_BINARY_TYPE__BUILDID_DEBUGINFO:
206 if (!dso->has_build_id) {
207 ret = -1;
208 break;
209 }
210
211 build_id__sprintf(&dso->bid, build_id_hex);
212 len = __symbol__join_symfs(filename, size, "/usr/lib/debug/.build-id/");
213 snprintf(filename + len, size - len, "%.2s/%s.debug",
214 build_id_hex, build_id_hex + 2);
215 break;
216
217 case DSO_BINARY_TYPE__VMLINUX:
218 case DSO_BINARY_TYPE__GUEST_VMLINUX:
219 case DSO_BINARY_TYPE__SYSTEM_PATH_DSO:
220 __symbol__join_symfs(filename, size, dso->long_name);
221 break;
222
223 case DSO_BINARY_TYPE__GUEST_KMODULE:
224 case DSO_BINARY_TYPE__GUEST_KMODULE_COMP:
225 path__join3(filename, size, symbol_conf.symfs,
226 root_dir, dso->long_name);
227 break;
228
229 case DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE:
230 case DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE_COMP:
231 __symbol__join_symfs(filename, size, dso->long_name);
232 break;
233
234 case DSO_BINARY_TYPE__KCORE:
235 case DSO_BINARY_TYPE__GUEST_KCORE:
236 snprintf(filename, size, "%s", dso->long_name);
237 break;
238
239 default:
240 case DSO_BINARY_TYPE__KALLSYMS:
241 case DSO_BINARY_TYPE__GUEST_KALLSYMS:
242 case DSO_BINARY_TYPE__JAVA_JIT:
243 case DSO_BINARY_TYPE__BPF_PROG_INFO:
244 case DSO_BINARY_TYPE__BPF_IMAGE:
245 case DSO_BINARY_TYPE__OOL:
246 case DSO_BINARY_TYPE__NOT_FOUND:
247 ret = -1;
248 break;
249 }
250
251 return ret;
252 }
253
254 enum {
255 COMP_ID__NONE = 0,
256 };
257
258 static const struct {
259 const char *fmt;
260 int (*decompress)(const char *input, int output);
261 bool (*is_compressed)(const char *input);
262 } compressions[] = {
263 [COMP_ID__NONE] = { .fmt = NULL, },
264 #ifdef HAVE_ZLIB_SUPPORT
265 { "gz", gzip_decompress_to_file, gzip_is_compressed },
266 #endif
267 #ifdef HAVE_LZMA_SUPPORT
268 { "xz", lzma_decompress_to_file, lzma_is_compressed },
269 #endif
270 { NULL, NULL, NULL },
271 };
272
is_supported_compression(const char * ext)273 static int is_supported_compression(const char *ext)
274 {
275 unsigned i;
276
277 for (i = 1; compressions[i].fmt; i++) {
278 if (!strcmp(ext, compressions[i].fmt))
279 return i;
280 }
281 return COMP_ID__NONE;
282 }
283
is_kernel_module(const char * pathname,int cpumode)284 bool is_kernel_module(const char *pathname, int cpumode)
285 {
286 struct kmod_path m;
287 int mode = cpumode & PERF_RECORD_MISC_CPUMODE_MASK;
288
289 WARN_ONCE(mode != cpumode,
290 "Internal error: passing unmasked cpumode (%x) to is_kernel_module",
291 cpumode);
292
293 switch (mode) {
294 case PERF_RECORD_MISC_USER:
295 case PERF_RECORD_MISC_HYPERVISOR:
296 case PERF_RECORD_MISC_GUEST_USER:
297 return false;
298 /* Treat PERF_RECORD_MISC_CPUMODE_UNKNOWN as kernel */
299 default:
300 if (kmod_path__parse(&m, pathname)) {
301 pr_err("Failed to check whether %s is a kernel module or not. Assume it is.",
302 pathname);
303 return true;
304 }
305 }
306
307 return m.kmod;
308 }
309
dso__needs_decompress(struct dso * dso)310 bool dso__needs_decompress(struct dso *dso)
311 {
312 return dso->symtab_type == DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE_COMP ||
313 dso->symtab_type == DSO_BINARY_TYPE__GUEST_KMODULE_COMP;
314 }
315
filename__decompress(const char * name,char * pathname,size_t len,int comp,int * err)316 int filename__decompress(const char *name, char *pathname,
317 size_t len, int comp, int *err)
318 {
319 char tmpbuf[] = KMOD_DECOMP_NAME;
320 int fd = -1;
321
322 /*
323 * We have proper compression id for DSO and yet the file
324 * behind the 'name' can still be plain uncompressed object.
325 *
326 * The reason is behind the logic we open the DSO object files,
327 * when we try all possible 'debug' objects until we find the
328 * data. So even if the DSO is represented by 'krava.xz' module,
329 * we can end up here opening ~/.debug/....23432432/debug' file
330 * which is not compressed.
331 *
332 * To keep this transparent, we detect this and return the file
333 * descriptor to the uncompressed file.
334 */
335 if (!compressions[comp].is_compressed(name))
336 return open(name, O_RDONLY);
337
338 fd = mkstemp(tmpbuf);
339 if (fd < 0) {
340 *err = errno;
341 return -1;
342 }
343
344 if (compressions[comp].decompress(name, fd)) {
345 *err = DSO_LOAD_ERRNO__DECOMPRESSION_FAILURE;
346 close(fd);
347 fd = -1;
348 }
349
350 if (!pathname || (fd < 0))
351 unlink(tmpbuf);
352
353 if (pathname && (fd >= 0))
354 strlcpy(pathname, tmpbuf, len);
355
356 return fd;
357 }
358
decompress_kmodule(struct dso * dso,const char * name,char * pathname,size_t len)359 static int decompress_kmodule(struct dso *dso, const char *name,
360 char *pathname, size_t len)
361 {
362 if (!dso__needs_decompress(dso))
363 return -1;
364
365 if (dso->comp == COMP_ID__NONE)
366 return -1;
367
368 return filename__decompress(name, pathname, len, dso->comp,
369 &dso->load_errno);
370 }
371
dso__decompress_kmodule_fd(struct dso * dso,const char * name)372 int dso__decompress_kmodule_fd(struct dso *dso, const char *name)
373 {
374 return decompress_kmodule(dso, name, NULL, 0);
375 }
376
dso__decompress_kmodule_path(struct dso * dso,const char * name,char * pathname,size_t len)377 int dso__decompress_kmodule_path(struct dso *dso, const char *name,
378 char *pathname, size_t len)
379 {
380 int fd = decompress_kmodule(dso, name, pathname, len);
381
382 close(fd);
383 return fd >= 0 ? 0 : -1;
384 }
385
386 /*
387 * Parses kernel module specified in @path and updates
388 * @m argument like:
389 *
390 * @comp - true if @path contains supported compression suffix,
391 * false otherwise
392 * @kmod - true if @path contains '.ko' suffix in right position,
393 * false otherwise
394 * @name - if (@alloc_name && @kmod) is true, it contains strdup-ed base name
395 * of the kernel module without suffixes, otherwise strudup-ed
396 * base name of @path
397 * @ext - if (@alloc_ext && @comp) is true, it contains strdup-ed string
398 * the compression suffix
399 *
400 * Returns 0 if there's no strdup error, -ENOMEM otherwise.
401 */
__kmod_path__parse(struct kmod_path * m,const char * path,bool alloc_name)402 int __kmod_path__parse(struct kmod_path *m, const char *path,
403 bool alloc_name)
404 {
405 const char *name = strrchr(path, '/');
406 const char *ext = strrchr(path, '.');
407 bool is_simple_name = false;
408
409 memset(m, 0x0, sizeof(*m));
410 name = name ? name + 1 : path;
411
412 /*
413 * '.' is also a valid character for module name. For example:
414 * [aaa.bbb] is a valid module name. '[' should have higher
415 * priority than '.ko' suffix.
416 *
417 * The kernel names are from machine__mmap_name. Such
418 * name should belong to kernel itself, not kernel module.
419 */
420 if (name[0] == '[') {
421 is_simple_name = true;
422 if ((strncmp(name, "[kernel.kallsyms]", 17) == 0) ||
423 (strncmp(name, "[guest.kernel.kallsyms", 22) == 0) ||
424 (strncmp(name, "[vdso]", 6) == 0) ||
425 (strncmp(name, "[vdso32]", 8) == 0) ||
426 (strncmp(name, "[vdsox32]", 9) == 0) ||
427 (strncmp(name, "[vsyscall]", 10) == 0)) {
428 m->kmod = false;
429
430 } else
431 m->kmod = true;
432 }
433
434 /* No extension, just return name. */
435 if ((ext == NULL) || is_simple_name) {
436 if (alloc_name) {
437 m->name = strdup(name);
438 return m->name ? 0 : -ENOMEM;
439 }
440 return 0;
441 }
442
443 m->comp = is_supported_compression(ext + 1);
444 if (m->comp > COMP_ID__NONE)
445 ext -= 3;
446
447 /* Check .ko extension only if there's enough name left. */
448 if (ext > name)
449 m->kmod = !strncmp(ext, ".ko", 3);
450
451 if (alloc_name) {
452 if (m->kmod) {
453 if (asprintf(&m->name, "[%.*s]", (int) (ext - name), name) == -1)
454 return -ENOMEM;
455 } else {
456 if (asprintf(&m->name, "%s", name) == -1)
457 return -ENOMEM;
458 }
459
460 strreplace(m->name, '-', '_');
461 }
462
463 return 0;
464 }
465
dso__set_module_info(struct dso * dso,struct kmod_path * m,struct machine * machine)466 void dso__set_module_info(struct dso *dso, struct kmod_path *m,
467 struct machine *machine)
468 {
469 if (machine__is_host(machine))
470 dso->symtab_type = DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE;
471 else
472 dso->symtab_type = DSO_BINARY_TYPE__GUEST_KMODULE;
473
474 /* _KMODULE_COMP should be next to _KMODULE */
475 if (m->kmod && m->comp) {
476 dso->symtab_type++;
477 dso->comp = m->comp;
478 }
479
480 dso__set_short_name(dso, strdup(m->name), true);
481 }
482
483 /*
484 * Global list of open DSOs and the counter.
485 */
486 static LIST_HEAD(dso__data_open);
487 static long dso__data_open_cnt;
488 static pthread_mutex_t dso__data_open_lock = PTHREAD_MUTEX_INITIALIZER;
489
dso__list_add(struct dso * dso)490 static void dso__list_add(struct dso *dso)
491 {
492 list_add_tail(&dso->data.open_entry, &dso__data_open);
493 dso__data_open_cnt++;
494 }
495
dso__list_del(struct dso * dso)496 static void dso__list_del(struct dso *dso)
497 {
498 list_del_init(&dso->data.open_entry);
499 WARN_ONCE(dso__data_open_cnt <= 0,
500 "DSO data fd counter out of bounds.");
501 dso__data_open_cnt--;
502 }
503
504 static void close_first_dso(void);
505
do_open(char * name)506 static int do_open(char *name)
507 {
508 int fd;
509 char sbuf[STRERR_BUFSIZE];
510
511 do {
512 fd = open(name, O_RDONLY|O_CLOEXEC);
513 if (fd >= 0)
514 return fd;
515
516 pr_debug("dso open failed: %s\n",
517 str_error_r(errno, sbuf, sizeof(sbuf)));
518 if (!dso__data_open_cnt || errno != EMFILE)
519 break;
520
521 close_first_dso();
522 } while (1);
523
524 return -1;
525 }
526
dso__filename_with_chroot(const struct dso * dso,const char * filename)527 char *dso__filename_with_chroot(const struct dso *dso, const char *filename)
528 {
529 return filename_with_chroot(nsinfo__pid(dso->nsinfo), filename);
530 }
531
__open_dso(struct dso * dso,struct machine * machine)532 static int __open_dso(struct dso *dso, struct machine *machine)
533 {
534 int fd = -EINVAL;
535 char *root_dir = (char *)"";
536 char *name = malloc(PATH_MAX);
537 bool decomp = false;
538
539 if (!name)
540 return -ENOMEM;
541
542 mutex_lock(&dso->lock);
543 if (machine)
544 root_dir = machine->root_dir;
545
546 if (dso__read_binary_type_filename(dso, dso->binary_type,
547 root_dir, name, PATH_MAX))
548 goto out;
549
550 if (!is_regular_file(name)) {
551 char *new_name;
552
553 if (errno != ENOENT || dso->nsinfo == NULL)
554 goto out;
555
556 new_name = dso__filename_with_chroot(dso, name);
557 if (!new_name)
558 goto out;
559
560 free(name);
561 name = new_name;
562 }
563
564 if (dso__needs_decompress(dso)) {
565 char newpath[KMOD_DECOMP_LEN];
566 size_t len = sizeof(newpath);
567
568 if (dso__decompress_kmodule_path(dso, name, newpath, len) < 0) {
569 fd = -dso->load_errno;
570 goto out;
571 }
572
573 decomp = true;
574 strcpy(name, newpath);
575 }
576
577 fd = do_open(name);
578
579 if (decomp)
580 unlink(name);
581
582 out:
583 mutex_unlock(&dso->lock);
584 free(name);
585 return fd;
586 }
587
588 static void check_data_close(void);
589
590 /**
591 * dso_close - Open DSO data file
592 * @dso: dso object
593 *
594 * Open @dso's data file descriptor and updates
595 * list/count of open DSO objects.
596 */
open_dso(struct dso * dso,struct machine * machine)597 static int open_dso(struct dso *dso, struct machine *machine)
598 {
599 int fd;
600 struct nscookie nsc;
601
602 if (dso->binary_type != DSO_BINARY_TYPE__BUILD_ID_CACHE) {
603 mutex_lock(&dso->lock);
604 nsinfo__mountns_enter(dso->nsinfo, &nsc);
605 mutex_unlock(&dso->lock);
606 }
607 fd = __open_dso(dso, machine);
608 if (dso->binary_type != DSO_BINARY_TYPE__BUILD_ID_CACHE)
609 nsinfo__mountns_exit(&nsc);
610
611 if (fd >= 0) {
612 dso__list_add(dso);
613 /*
614 * Check if we crossed the allowed number
615 * of opened DSOs and close one if needed.
616 */
617 check_data_close();
618 }
619
620 return fd;
621 }
622
close_data_fd(struct dso * dso)623 static void close_data_fd(struct dso *dso)
624 {
625 if (dso->data.fd >= 0) {
626 close(dso->data.fd);
627 dso->data.fd = -1;
628 dso->data.file_size = 0;
629 dso__list_del(dso);
630 }
631 }
632
633 /**
634 * dso_close - Close DSO data file
635 * @dso: dso object
636 *
637 * Close @dso's data file descriptor and updates
638 * list/count of open DSO objects.
639 */
close_dso(struct dso * dso)640 static void close_dso(struct dso *dso)
641 {
642 close_data_fd(dso);
643 }
644
close_first_dso(void)645 static void close_first_dso(void)
646 {
647 struct dso *dso;
648
649 dso = list_first_entry(&dso__data_open, struct dso, data.open_entry);
650 close_dso(dso);
651 }
652
get_fd_limit(void)653 static rlim_t get_fd_limit(void)
654 {
655 struct rlimit l;
656 rlim_t limit = 0;
657
658 /* Allow half of the current open fd limit. */
659 if (getrlimit(RLIMIT_NOFILE, &l) == 0) {
660 if (l.rlim_cur == RLIM_INFINITY)
661 limit = l.rlim_cur;
662 else
663 limit = l.rlim_cur / 2;
664 } else {
665 pr_err("failed to get fd limit\n");
666 limit = 1;
667 }
668
669 return limit;
670 }
671
672 static rlim_t fd_limit;
673
674 /*
675 * Used only by tests/dso-data.c to reset the environment
676 * for tests. I dont expect we should change this during
677 * standard runtime.
678 */
reset_fd_limit(void)679 void reset_fd_limit(void)
680 {
681 fd_limit = 0;
682 }
683
may_cache_fd(void)684 static bool may_cache_fd(void)
685 {
686 if (!fd_limit)
687 fd_limit = get_fd_limit();
688
689 if (fd_limit == RLIM_INFINITY)
690 return true;
691
692 return fd_limit > (rlim_t) dso__data_open_cnt;
693 }
694
695 /*
696 * Check and close LRU dso if we crossed allowed limit
697 * for opened dso file descriptors. The limit is half
698 * of the RLIMIT_NOFILE files opened.
699 */
check_data_close(void)700 static void check_data_close(void)
701 {
702 bool cache_fd = may_cache_fd();
703
704 if (!cache_fd)
705 close_first_dso();
706 }
707
708 /**
709 * dso__data_close - Close DSO data file
710 * @dso: dso object
711 *
712 * External interface to close @dso's data file descriptor.
713 */
dso__data_close(struct dso * dso)714 void dso__data_close(struct dso *dso)
715 {
716 pthread_mutex_lock(&dso__data_open_lock);
717 close_dso(dso);
718 pthread_mutex_unlock(&dso__data_open_lock);
719 }
720
try_to_open_dso(struct dso * dso,struct machine * machine)721 static void try_to_open_dso(struct dso *dso, struct machine *machine)
722 {
723 enum dso_binary_type binary_type_data[] = {
724 DSO_BINARY_TYPE__BUILD_ID_CACHE,
725 DSO_BINARY_TYPE__SYSTEM_PATH_DSO,
726 DSO_BINARY_TYPE__NOT_FOUND,
727 };
728 int i = 0;
729
730 if (dso->data.fd >= 0)
731 return;
732
733 if (dso->binary_type != DSO_BINARY_TYPE__NOT_FOUND) {
734 dso->data.fd = open_dso(dso, machine);
735 goto out;
736 }
737
738 do {
739 dso->binary_type = binary_type_data[i++];
740
741 dso->data.fd = open_dso(dso, machine);
742 if (dso->data.fd >= 0)
743 goto out;
744
745 } while (dso->binary_type != DSO_BINARY_TYPE__NOT_FOUND);
746 out:
747 if (dso->data.fd >= 0)
748 dso->data.status = DSO_DATA_STATUS_OK;
749 else
750 dso->data.status = DSO_DATA_STATUS_ERROR;
751 }
752
753 /**
754 * dso__data_get_fd - Get dso's data file descriptor
755 * @dso: dso object
756 * @machine: machine object
757 *
758 * External interface to find dso's file, open it and
759 * returns file descriptor. It should be paired with
760 * dso__data_put_fd() if it returns non-negative value.
761 */
dso__data_get_fd(struct dso * dso,struct machine * machine)762 int dso__data_get_fd(struct dso *dso, struct machine *machine)
763 {
764 if (dso->data.status == DSO_DATA_STATUS_ERROR)
765 return -1;
766
767 if (pthread_mutex_lock(&dso__data_open_lock) < 0)
768 return -1;
769
770 try_to_open_dso(dso, machine);
771
772 if (dso->data.fd < 0)
773 pthread_mutex_unlock(&dso__data_open_lock);
774
775 return dso->data.fd;
776 }
777
dso__data_put_fd(struct dso * dso __maybe_unused)778 void dso__data_put_fd(struct dso *dso __maybe_unused)
779 {
780 pthread_mutex_unlock(&dso__data_open_lock);
781 }
782
dso__data_status_seen(struct dso * dso,enum dso_data_status_seen by)783 bool dso__data_status_seen(struct dso *dso, enum dso_data_status_seen by)
784 {
785 u32 flag = 1 << by;
786
787 if (dso->data.status_seen & flag)
788 return true;
789
790 dso->data.status_seen |= flag;
791
792 return false;
793 }
794
795 #ifdef HAVE_LIBBPF_SUPPORT
bpf_read(struct dso * dso,u64 offset,char * data)796 static ssize_t bpf_read(struct dso *dso, u64 offset, char *data)
797 {
798 struct bpf_prog_info_node *node;
799 ssize_t size = DSO__DATA_CACHE_SIZE;
800 u64 len;
801 u8 *buf;
802
803 node = perf_env__find_bpf_prog_info(dso->bpf_prog.env, dso->bpf_prog.id);
804 if (!node || !node->info_linear) {
805 dso->data.status = DSO_DATA_STATUS_ERROR;
806 return -1;
807 }
808
809 len = node->info_linear->info.jited_prog_len;
810 buf = (u8 *)(uintptr_t)node->info_linear->info.jited_prog_insns;
811
812 if (offset >= len)
813 return -1;
814
815 size = (ssize_t)min(len - offset, (u64)size);
816 memcpy(data, buf + offset, size);
817 return size;
818 }
819
bpf_size(struct dso * dso)820 static int bpf_size(struct dso *dso)
821 {
822 struct bpf_prog_info_node *node;
823
824 node = perf_env__find_bpf_prog_info(dso->bpf_prog.env, dso->bpf_prog.id);
825 if (!node || !node->info_linear) {
826 dso->data.status = DSO_DATA_STATUS_ERROR;
827 return -1;
828 }
829
830 dso->data.file_size = node->info_linear->info.jited_prog_len;
831 return 0;
832 }
833 #endif // HAVE_LIBBPF_SUPPORT
834
835 static void
dso_cache__free(struct dso * dso)836 dso_cache__free(struct dso *dso)
837 {
838 struct rb_root *root = &dso->data.cache;
839 struct rb_node *next = rb_first(root);
840
841 mutex_lock(&dso->lock);
842 while (next) {
843 struct dso_cache *cache;
844
845 cache = rb_entry(next, struct dso_cache, rb_node);
846 next = rb_next(&cache->rb_node);
847 rb_erase(&cache->rb_node, root);
848 free(cache);
849 }
850 mutex_unlock(&dso->lock);
851 }
852
__dso_cache__find(struct dso * dso,u64 offset)853 static struct dso_cache *__dso_cache__find(struct dso *dso, u64 offset)
854 {
855 const struct rb_root *root = &dso->data.cache;
856 struct rb_node * const *p = &root->rb_node;
857 const struct rb_node *parent = NULL;
858 struct dso_cache *cache;
859
860 while (*p != NULL) {
861 u64 end;
862
863 parent = *p;
864 cache = rb_entry(parent, struct dso_cache, rb_node);
865 end = cache->offset + DSO__DATA_CACHE_SIZE;
866
867 if (offset < cache->offset)
868 p = &(*p)->rb_left;
869 else if (offset >= end)
870 p = &(*p)->rb_right;
871 else
872 return cache;
873 }
874
875 return NULL;
876 }
877
878 static struct dso_cache *
dso_cache__insert(struct dso * dso,struct dso_cache * new)879 dso_cache__insert(struct dso *dso, struct dso_cache *new)
880 {
881 struct rb_root *root = &dso->data.cache;
882 struct rb_node **p = &root->rb_node;
883 struct rb_node *parent = NULL;
884 struct dso_cache *cache;
885 u64 offset = new->offset;
886
887 mutex_lock(&dso->lock);
888 while (*p != NULL) {
889 u64 end;
890
891 parent = *p;
892 cache = rb_entry(parent, struct dso_cache, rb_node);
893 end = cache->offset + DSO__DATA_CACHE_SIZE;
894
895 if (offset < cache->offset)
896 p = &(*p)->rb_left;
897 else if (offset >= end)
898 p = &(*p)->rb_right;
899 else
900 goto out;
901 }
902
903 rb_link_node(&new->rb_node, parent, p);
904 rb_insert_color(&new->rb_node, root);
905
906 cache = NULL;
907 out:
908 mutex_unlock(&dso->lock);
909 return cache;
910 }
911
dso_cache__memcpy(struct dso_cache * cache,u64 offset,u8 * data,u64 size,bool out)912 static ssize_t dso_cache__memcpy(struct dso_cache *cache, u64 offset, u8 *data,
913 u64 size, bool out)
914 {
915 u64 cache_offset = offset - cache->offset;
916 u64 cache_size = min(cache->size - cache_offset, size);
917
918 if (out)
919 memcpy(data, cache->data + cache_offset, cache_size);
920 else
921 memcpy(cache->data + cache_offset, data, cache_size);
922 return cache_size;
923 }
924
file_read(struct dso * dso,struct machine * machine,u64 offset,char * data)925 static ssize_t file_read(struct dso *dso, struct machine *machine,
926 u64 offset, char *data)
927 {
928 ssize_t ret;
929
930 pthread_mutex_lock(&dso__data_open_lock);
931
932 /*
933 * dso->data.fd might be closed if other thread opened another
934 * file (dso) due to open file limit (RLIMIT_NOFILE).
935 */
936 try_to_open_dso(dso, machine);
937
938 if (dso->data.fd < 0) {
939 dso->data.status = DSO_DATA_STATUS_ERROR;
940 ret = -errno;
941 goto out;
942 }
943
944 ret = pread(dso->data.fd, data, DSO__DATA_CACHE_SIZE, offset);
945 out:
946 pthread_mutex_unlock(&dso__data_open_lock);
947 return ret;
948 }
949
dso_cache__populate(struct dso * dso,struct machine * machine,u64 offset,ssize_t * ret)950 static struct dso_cache *dso_cache__populate(struct dso *dso,
951 struct machine *machine,
952 u64 offset, ssize_t *ret)
953 {
954 u64 cache_offset = offset & DSO__DATA_CACHE_MASK;
955 struct dso_cache *cache;
956 struct dso_cache *old;
957
958 cache = zalloc(sizeof(*cache) + DSO__DATA_CACHE_SIZE);
959 if (!cache) {
960 *ret = -ENOMEM;
961 return NULL;
962 }
963 #ifdef HAVE_LIBBPF_SUPPORT
964 if (dso->binary_type == DSO_BINARY_TYPE__BPF_PROG_INFO)
965 *ret = bpf_read(dso, cache_offset, cache->data);
966 else
967 #endif
968 if (dso->binary_type == DSO_BINARY_TYPE__OOL)
969 *ret = DSO__DATA_CACHE_SIZE;
970 else
971 *ret = file_read(dso, machine, cache_offset, cache->data);
972
973 if (*ret <= 0) {
974 free(cache);
975 return NULL;
976 }
977
978 cache->offset = cache_offset;
979 cache->size = *ret;
980
981 old = dso_cache__insert(dso, cache);
982 if (old) {
983 /* we lose the race */
984 free(cache);
985 cache = old;
986 }
987
988 return cache;
989 }
990
dso_cache__find(struct dso * dso,struct machine * machine,u64 offset,ssize_t * ret)991 static struct dso_cache *dso_cache__find(struct dso *dso,
992 struct machine *machine,
993 u64 offset,
994 ssize_t *ret)
995 {
996 struct dso_cache *cache = __dso_cache__find(dso, offset);
997
998 return cache ? cache : dso_cache__populate(dso, machine, offset, ret);
999 }
1000
dso_cache_io(struct dso * dso,struct machine * machine,u64 offset,u8 * data,ssize_t size,bool out)1001 static ssize_t dso_cache_io(struct dso *dso, struct machine *machine,
1002 u64 offset, u8 *data, ssize_t size, bool out)
1003 {
1004 struct dso_cache *cache;
1005 ssize_t ret = 0;
1006
1007 cache = dso_cache__find(dso, machine, offset, &ret);
1008 if (!cache)
1009 return ret;
1010
1011 return dso_cache__memcpy(cache, offset, data, size, out);
1012 }
1013
1014 /*
1015 * Reads and caches dso data DSO__DATA_CACHE_SIZE size chunks
1016 * in the rb_tree. Any read to already cached data is served
1017 * by cached data. Writes update the cache only, not the backing file.
1018 */
cached_io(struct dso * dso,struct machine * machine,u64 offset,u8 * data,ssize_t size,bool out)1019 static ssize_t cached_io(struct dso *dso, struct machine *machine,
1020 u64 offset, u8 *data, ssize_t size, bool out)
1021 {
1022 ssize_t r = 0;
1023 u8 *p = data;
1024
1025 do {
1026 ssize_t ret;
1027
1028 ret = dso_cache_io(dso, machine, offset, p, size, out);
1029 if (ret < 0)
1030 return ret;
1031
1032 /* Reached EOF, return what we have. */
1033 if (!ret)
1034 break;
1035
1036 BUG_ON(ret > size);
1037
1038 r += ret;
1039 p += ret;
1040 offset += ret;
1041 size -= ret;
1042
1043 } while (size);
1044
1045 return r;
1046 }
1047
file_size(struct dso * dso,struct machine * machine)1048 static int file_size(struct dso *dso, struct machine *machine)
1049 {
1050 int ret = 0;
1051 struct stat st;
1052 char sbuf[STRERR_BUFSIZE];
1053
1054 pthread_mutex_lock(&dso__data_open_lock);
1055
1056 /*
1057 * dso->data.fd might be closed if other thread opened another
1058 * file (dso) due to open file limit (RLIMIT_NOFILE).
1059 */
1060 try_to_open_dso(dso, machine);
1061
1062 if (dso->data.fd < 0) {
1063 ret = -errno;
1064 dso->data.status = DSO_DATA_STATUS_ERROR;
1065 goto out;
1066 }
1067
1068 if (fstat(dso->data.fd, &st) < 0) {
1069 ret = -errno;
1070 pr_err("dso cache fstat failed: %s\n",
1071 str_error_r(errno, sbuf, sizeof(sbuf)));
1072 dso->data.status = DSO_DATA_STATUS_ERROR;
1073 goto out;
1074 }
1075 dso->data.file_size = st.st_size;
1076
1077 out:
1078 pthread_mutex_unlock(&dso__data_open_lock);
1079 return ret;
1080 }
1081
dso__data_file_size(struct dso * dso,struct machine * machine)1082 int dso__data_file_size(struct dso *dso, struct machine *machine)
1083 {
1084 if (dso->data.file_size)
1085 return 0;
1086
1087 if (dso->data.status == DSO_DATA_STATUS_ERROR)
1088 return -1;
1089 #ifdef HAVE_LIBBPF_SUPPORT
1090 if (dso->binary_type == DSO_BINARY_TYPE__BPF_PROG_INFO)
1091 return bpf_size(dso);
1092 #endif
1093 return file_size(dso, machine);
1094 }
1095
1096 /**
1097 * dso__data_size - Return dso data size
1098 * @dso: dso object
1099 * @machine: machine object
1100 *
1101 * Return: dso data size
1102 */
dso__data_size(struct dso * dso,struct machine * machine)1103 off_t dso__data_size(struct dso *dso, struct machine *machine)
1104 {
1105 if (dso__data_file_size(dso, machine))
1106 return -1;
1107
1108 /* For now just estimate dso data size is close to file size */
1109 return dso->data.file_size;
1110 }
1111
data_read_write_offset(struct dso * dso,struct machine * machine,u64 offset,u8 * data,ssize_t size,bool out)1112 static ssize_t data_read_write_offset(struct dso *dso, struct machine *machine,
1113 u64 offset, u8 *data, ssize_t size,
1114 bool out)
1115 {
1116 if (dso__data_file_size(dso, machine))
1117 return -1;
1118
1119 /* Check the offset sanity. */
1120 if (offset > dso->data.file_size)
1121 return -1;
1122
1123 if (offset + size < offset)
1124 return -1;
1125
1126 return cached_io(dso, machine, offset, data, size, out);
1127 }
1128
1129 /**
1130 * dso__data_read_offset - Read data from dso file offset
1131 * @dso: dso object
1132 * @machine: machine object
1133 * @offset: file offset
1134 * @data: buffer to store data
1135 * @size: size of the @data buffer
1136 *
1137 * External interface to read data from dso file offset. Open
1138 * dso data file and use cached_read to get the data.
1139 */
dso__data_read_offset(struct dso * dso,struct machine * machine,u64 offset,u8 * data,ssize_t size)1140 ssize_t dso__data_read_offset(struct dso *dso, struct machine *machine,
1141 u64 offset, u8 *data, ssize_t size)
1142 {
1143 if (dso->data.status == DSO_DATA_STATUS_ERROR)
1144 return -1;
1145
1146 return data_read_write_offset(dso, machine, offset, data, size, true);
1147 }
1148
1149 /**
1150 * dso__data_read_addr - Read data from dso address
1151 * @dso: dso object
1152 * @machine: machine object
1153 * @add: virtual memory address
1154 * @data: buffer to store data
1155 * @size: size of the @data buffer
1156 *
1157 * External interface to read data from dso address.
1158 */
dso__data_read_addr(struct dso * dso,struct map * map,struct machine * machine,u64 addr,u8 * data,ssize_t size)1159 ssize_t dso__data_read_addr(struct dso *dso, struct map *map,
1160 struct machine *machine, u64 addr,
1161 u8 *data, ssize_t size)
1162 {
1163 u64 offset = map__map_ip(map, addr);
1164
1165 return dso__data_read_offset(dso, machine, offset, data, size);
1166 }
1167
1168 /**
1169 * dso__data_write_cache_offs - Write data to dso data cache at file offset
1170 * @dso: dso object
1171 * @machine: machine object
1172 * @offset: file offset
1173 * @data: buffer to write
1174 * @size: size of the @data buffer
1175 *
1176 * Write into the dso file data cache, but do not change the file itself.
1177 */
dso__data_write_cache_offs(struct dso * dso,struct machine * machine,u64 offset,const u8 * data_in,ssize_t size)1178 ssize_t dso__data_write_cache_offs(struct dso *dso, struct machine *machine,
1179 u64 offset, const u8 *data_in, ssize_t size)
1180 {
1181 u8 *data = (u8 *)data_in; /* cast away const to use same fns for r/w */
1182
1183 if (dso->data.status == DSO_DATA_STATUS_ERROR)
1184 return -1;
1185
1186 return data_read_write_offset(dso, machine, offset, data, size, false);
1187 }
1188
1189 /**
1190 * dso__data_write_cache_addr - Write data to dso data cache at dso address
1191 * @dso: dso object
1192 * @machine: machine object
1193 * @add: virtual memory address
1194 * @data: buffer to write
1195 * @size: size of the @data buffer
1196 *
1197 * External interface to write into the dso file data cache, but do not change
1198 * the file itself.
1199 */
dso__data_write_cache_addr(struct dso * dso,struct map * map,struct machine * machine,u64 addr,const u8 * data,ssize_t size)1200 ssize_t dso__data_write_cache_addr(struct dso *dso, struct map *map,
1201 struct machine *machine, u64 addr,
1202 const u8 *data, ssize_t size)
1203 {
1204 u64 offset = map__map_ip(map, addr);
1205
1206 return dso__data_write_cache_offs(dso, machine, offset, data, size);
1207 }
1208
dso__new_map(const char * name)1209 struct map *dso__new_map(const char *name)
1210 {
1211 struct map *map = NULL;
1212 struct dso *dso = dso__new(name);
1213
1214 if (dso) {
1215 map = map__new2(0, dso);
1216 dso__put(dso);
1217 }
1218
1219 return map;
1220 }
1221
machine__findnew_kernel(struct machine * machine,const char * name,const char * short_name,int dso_type)1222 struct dso *machine__findnew_kernel(struct machine *machine, const char *name,
1223 const char *short_name, int dso_type)
1224 {
1225 /*
1226 * The kernel dso could be created by build_id processing.
1227 */
1228 struct dso *dso = machine__findnew_dso(machine, name);
1229
1230 /*
1231 * We need to run this in all cases, since during the build_id
1232 * processing we had no idea this was the kernel dso.
1233 */
1234 if (dso != NULL) {
1235 dso__set_short_name(dso, short_name, false);
1236 dso->kernel = dso_type;
1237 }
1238
1239 return dso;
1240 }
1241
dso__set_long_name_id(struct dso * dso,const char * name,struct dso_id * id,bool name_allocated)1242 static void dso__set_long_name_id(struct dso *dso, const char *name, struct dso_id *id, bool name_allocated)
1243 {
1244 struct rb_root *root = dso->root;
1245
1246 if (name == NULL)
1247 return;
1248
1249 if (dso->long_name_allocated)
1250 free((char *)dso->long_name);
1251
1252 if (root) {
1253 rb_erase(&dso->rb_node, root);
1254 /*
1255 * __dsos__findnew_link_by_longname_id() isn't guaranteed to
1256 * add it back, so a clean removal is required here.
1257 */
1258 RB_CLEAR_NODE(&dso->rb_node);
1259 dso->root = NULL;
1260 }
1261
1262 dso->long_name = name;
1263 dso->long_name_len = strlen(name);
1264 dso->long_name_allocated = name_allocated;
1265
1266 if (root)
1267 __dsos__findnew_link_by_longname_id(root, dso, NULL, id);
1268 }
1269
dso__set_long_name(struct dso * dso,const char * name,bool name_allocated)1270 void dso__set_long_name(struct dso *dso, const char *name, bool name_allocated)
1271 {
1272 dso__set_long_name_id(dso, name, NULL, name_allocated);
1273 }
1274
dso__set_short_name(struct dso * dso,const char * name,bool name_allocated)1275 void dso__set_short_name(struct dso *dso, const char *name, bool name_allocated)
1276 {
1277 if (name == NULL)
1278 return;
1279
1280 if (dso->short_name_allocated)
1281 free((char *)dso->short_name);
1282
1283 dso->short_name = name;
1284 dso->short_name_len = strlen(name);
1285 dso->short_name_allocated = name_allocated;
1286 }
1287
dso__name_len(const struct dso * dso)1288 int dso__name_len(const struct dso *dso)
1289 {
1290 if (!dso)
1291 return strlen("[unknown]");
1292 if (verbose > 0)
1293 return dso->long_name_len;
1294
1295 return dso->short_name_len;
1296 }
1297
dso__loaded(const struct dso * dso)1298 bool dso__loaded(const struct dso *dso)
1299 {
1300 return dso->loaded;
1301 }
1302
dso__sorted_by_name(const struct dso * dso)1303 bool dso__sorted_by_name(const struct dso *dso)
1304 {
1305 return dso->sorted_by_name;
1306 }
1307
dso__set_sorted_by_name(struct dso * dso)1308 void dso__set_sorted_by_name(struct dso *dso)
1309 {
1310 dso->sorted_by_name = true;
1311 }
1312
dso__new_id(const char * name,struct dso_id * id)1313 struct dso *dso__new_id(const char *name, struct dso_id *id)
1314 {
1315 struct dso *dso = calloc(1, sizeof(*dso) + strlen(name) + 1);
1316
1317 if (dso != NULL) {
1318 strcpy(dso->name, name);
1319 if (id)
1320 dso->id = *id;
1321 dso__set_long_name_id(dso, dso->name, id, false);
1322 dso__set_short_name(dso, dso->name, false);
1323 dso->symbols = RB_ROOT_CACHED;
1324 dso->symbol_names = NULL;
1325 dso->symbol_names_len = 0;
1326 dso->data.cache = RB_ROOT;
1327 dso->inlined_nodes = RB_ROOT_CACHED;
1328 dso->srclines = RB_ROOT_CACHED;
1329 dso->data.fd = -1;
1330 dso->data.status = DSO_DATA_STATUS_UNKNOWN;
1331 dso->symtab_type = DSO_BINARY_TYPE__NOT_FOUND;
1332 dso->binary_type = DSO_BINARY_TYPE__NOT_FOUND;
1333 dso->is_64_bit = (sizeof(void *) == 8);
1334 dso->loaded = 0;
1335 dso->rel = 0;
1336 dso->sorted_by_name = 0;
1337 dso->has_build_id = 0;
1338 dso->has_srcline = 1;
1339 dso->a2l_fails = 1;
1340 dso->kernel = DSO_SPACE__USER;
1341 dso->needs_swap = DSO_SWAP__UNSET;
1342 dso->comp = COMP_ID__NONE;
1343 RB_CLEAR_NODE(&dso->rb_node);
1344 dso->root = NULL;
1345 INIT_LIST_HEAD(&dso->node);
1346 INIT_LIST_HEAD(&dso->data.open_entry);
1347 mutex_init(&dso->lock);
1348 refcount_set(&dso->refcnt, 1);
1349 }
1350
1351 return dso;
1352 }
1353
dso__new(const char * name)1354 struct dso *dso__new(const char *name)
1355 {
1356 return dso__new_id(name, NULL);
1357 }
1358
dso__delete(struct dso * dso)1359 void dso__delete(struct dso *dso)
1360 {
1361 if (!RB_EMPTY_NODE(&dso->rb_node))
1362 pr_err("DSO %s is still in rbtree when being deleted!\n",
1363 dso->long_name);
1364
1365 /* free inlines first, as they reference symbols */
1366 inlines__tree_delete(&dso->inlined_nodes);
1367 srcline__tree_delete(&dso->srclines);
1368 symbols__delete(&dso->symbols);
1369 dso->symbol_names_len = 0;
1370 zfree(&dso->symbol_names);
1371 if (dso->short_name_allocated) {
1372 zfree((char **)&dso->short_name);
1373 dso->short_name_allocated = false;
1374 }
1375
1376 if (dso->long_name_allocated) {
1377 zfree((char **)&dso->long_name);
1378 dso->long_name_allocated = false;
1379 }
1380
1381 dso__data_close(dso);
1382 auxtrace_cache__free(dso->auxtrace_cache);
1383 dso_cache__free(dso);
1384 dso__free_a2l(dso);
1385 zfree(&dso->symsrc_filename);
1386 nsinfo__zput(dso->nsinfo);
1387 mutex_destroy(&dso->lock);
1388 free(dso);
1389 }
1390
dso__get(struct dso * dso)1391 struct dso *dso__get(struct dso *dso)
1392 {
1393 if (dso)
1394 refcount_inc(&dso->refcnt);
1395 return dso;
1396 }
1397
dso__put(struct dso * dso)1398 void dso__put(struct dso *dso)
1399 {
1400 if (dso && refcount_dec_and_test(&dso->refcnt))
1401 dso__delete(dso);
1402 }
1403
dso__set_build_id(struct dso * dso,struct build_id * bid)1404 void dso__set_build_id(struct dso *dso, struct build_id *bid)
1405 {
1406 dso->bid = *bid;
1407 dso->has_build_id = 1;
1408 }
1409
dso__build_id_equal(const struct dso * dso,struct build_id * bid)1410 bool dso__build_id_equal(const struct dso *dso, struct build_id *bid)
1411 {
1412 if (dso->bid.size > bid->size && dso->bid.size == BUILD_ID_SIZE) {
1413 /*
1414 * For the backward compatibility, it allows a build-id has
1415 * trailing zeros.
1416 */
1417 return !memcmp(dso->bid.data, bid->data, bid->size) &&
1418 !memchr_inv(&dso->bid.data[bid->size], 0,
1419 dso->bid.size - bid->size);
1420 }
1421
1422 return dso->bid.size == bid->size &&
1423 memcmp(dso->bid.data, bid->data, dso->bid.size) == 0;
1424 }
1425
dso__read_running_kernel_build_id(struct dso * dso,struct machine * machine)1426 void dso__read_running_kernel_build_id(struct dso *dso, struct machine *machine)
1427 {
1428 char path[PATH_MAX];
1429
1430 if (machine__is_default_guest(machine))
1431 return;
1432 sprintf(path, "%s/sys/kernel/notes", machine->root_dir);
1433 if (sysfs__read_build_id(path, &dso->bid) == 0)
1434 dso->has_build_id = true;
1435 }
1436
dso__kernel_module_get_build_id(struct dso * dso,const char * root_dir)1437 int dso__kernel_module_get_build_id(struct dso *dso,
1438 const char *root_dir)
1439 {
1440 char filename[PATH_MAX];
1441 /*
1442 * kernel module short names are of the form "[module]" and
1443 * we need just "module" here.
1444 */
1445 const char *name = dso->short_name + 1;
1446
1447 snprintf(filename, sizeof(filename),
1448 "%s/sys/module/%.*s/notes/.note.gnu.build-id",
1449 root_dir, (int)strlen(name) - 1, name);
1450
1451 if (sysfs__read_build_id(filename, &dso->bid) == 0)
1452 dso->has_build_id = true;
1453
1454 return 0;
1455 }
1456
dso__fprintf_buildid(struct dso * dso,FILE * fp)1457 static size_t dso__fprintf_buildid(struct dso *dso, FILE *fp)
1458 {
1459 char sbuild_id[SBUILD_ID_SIZE];
1460
1461 build_id__sprintf(&dso->bid, sbuild_id);
1462 return fprintf(fp, "%s", sbuild_id);
1463 }
1464
dso__fprintf(struct dso * dso,FILE * fp)1465 size_t dso__fprintf(struct dso *dso, FILE *fp)
1466 {
1467 struct rb_node *nd;
1468 size_t ret = fprintf(fp, "dso: %s (", dso->short_name);
1469
1470 if (dso->short_name != dso->long_name)
1471 ret += fprintf(fp, "%s, ", dso->long_name);
1472 ret += fprintf(fp, "%sloaded, ", dso__loaded(dso) ? "" : "NOT ");
1473 ret += dso__fprintf_buildid(dso, fp);
1474 ret += fprintf(fp, ")\n");
1475 for (nd = rb_first_cached(&dso->symbols); nd; nd = rb_next(nd)) {
1476 struct symbol *pos = rb_entry(nd, struct symbol, rb_node);
1477 ret += symbol__fprintf(pos, fp);
1478 }
1479
1480 return ret;
1481 }
1482
dso__type(struct dso * dso,struct machine * machine)1483 enum dso_type dso__type(struct dso *dso, struct machine *machine)
1484 {
1485 int fd;
1486 enum dso_type type = DSO__TYPE_UNKNOWN;
1487
1488 fd = dso__data_get_fd(dso, machine);
1489 if (fd >= 0) {
1490 type = dso__type_fd(fd);
1491 dso__data_put_fd(dso);
1492 }
1493
1494 return type;
1495 }
1496
dso__strerror_load(struct dso * dso,char * buf,size_t buflen)1497 int dso__strerror_load(struct dso *dso, char *buf, size_t buflen)
1498 {
1499 int idx, errnum = dso->load_errno;
1500 /*
1501 * This must have a same ordering as the enum dso_load_errno.
1502 */
1503 static const char *dso_load__error_str[] = {
1504 "Internal tools/perf/ library error",
1505 "Invalid ELF file",
1506 "Can not read build id",
1507 "Mismatching build id",
1508 "Decompression failure",
1509 };
1510
1511 BUG_ON(buflen == 0);
1512
1513 if (errnum >= 0) {
1514 const char *err = str_error_r(errnum, buf, buflen);
1515
1516 if (err != buf)
1517 scnprintf(buf, buflen, "%s", err);
1518
1519 return 0;
1520 }
1521
1522 if (errnum < __DSO_LOAD_ERRNO__START || errnum >= __DSO_LOAD_ERRNO__END)
1523 return -1;
1524
1525 idx = errnum - __DSO_LOAD_ERRNO__START;
1526 scnprintf(buf, buflen, "%s", dso_load__error_str[idx]);
1527 return 0;
1528 }
1529