xref: /openbmc/linux/tools/perf/util/dso.c (revision e6dec923)
1 #include <asm/bug.h>
2 #include <linux/kernel.h>
3 #include <sys/time.h>
4 #include <sys/resource.h>
5 #include <sys/types.h>
6 #include <sys/stat.h>
7 #include <unistd.h>
8 #include <errno.h>
9 #include "compress.h"
10 #include "path.h"
11 #include "symbol.h"
12 #include "dso.h"
13 #include "machine.h"
14 #include "auxtrace.h"
15 #include "util.h"
16 #include "debug.h"
17 #include "string2.h"
18 #include "vdso.h"
19 
20 static const char * const debuglink_paths[] = {
21 	"%.0s%s",
22 	"%s/%s",
23 	"%s/.debug/%s",
24 	"/usr/lib/debug%s/%s"
25 };
26 
27 char dso__symtab_origin(const struct dso *dso)
28 {
29 	static const char origin[] = {
30 		[DSO_BINARY_TYPE__KALLSYMS]			= 'k',
31 		[DSO_BINARY_TYPE__VMLINUX]			= 'v',
32 		[DSO_BINARY_TYPE__JAVA_JIT]			= 'j',
33 		[DSO_BINARY_TYPE__DEBUGLINK]			= 'l',
34 		[DSO_BINARY_TYPE__BUILD_ID_CACHE]		= 'B',
35 		[DSO_BINARY_TYPE__FEDORA_DEBUGINFO]		= 'f',
36 		[DSO_BINARY_TYPE__UBUNTU_DEBUGINFO]		= 'u',
37 		[DSO_BINARY_TYPE__OPENEMBEDDED_DEBUGINFO]	= 'o',
38 		[DSO_BINARY_TYPE__BUILDID_DEBUGINFO]		= 'b',
39 		[DSO_BINARY_TYPE__SYSTEM_PATH_DSO]		= 'd',
40 		[DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE]		= 'K',
41 		[DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE_COMP]	= 'm',
42 		[DSO_BINARY_TYPE__GUEST_KALLSYMS]		= 'g',
43 		[DSO_BINARY_TYPE__GUEST_KMODULE]		= 'G',
44 		[DSO_BINARY_TYPE__GUEST_KMODULE_COMP]		= 'M',
45 		[DSO_BINARY_TYPE__GUEST_VMLINUX]		= 'V',
46 	};
47 
48 	if (dso == NULL || dso->symtab_type == DSO_BINARY_TYPE__NOT_FOUND)
49 		return '!';
50 	return origin[dso->symtab_type];
51 }
52 
53 int dso__read_binary_type_filename(const struct dso *dso,
54 				   enum dso_binary_type type,
55 				   char *root_dir, char *filename, size_t size)
56 {
57 	char build_id_hex[SBUILD_ID_SIZE];
58 	int ret = 0;
59 	size_t len;
60 
61 	switch (type) {
62 	case DSO_BINARY_TYPE__DEBUGLINK:
63 	{
64 		const char *last_slash;
65 		char dso_dir[PATH_MAX];
66 		char symfile[PATH_MAX];
67 		unsigned int i;
68 
69 		len = __symbol__join_symfs(filename, size, dso->long_name);
70 		last_slash = filename + len;
71 		while (last_slash != filename && *last_slash != '/')
72 			last_slash--;
73 
74 		strncpy(dso_dir, filename, last_slash - filename);
75 		dso_dir[last_slash-filename] = '\0';
76 
77 		if (!is_regular_file(filename)) {
78 			ret = -1;
79 			break;
80 		}
81 
82 		ret = filename__read_debuglink(filename, symfile, PATH_MAX);
83 		if (ret)
84 			break;
85 
86 		/* Check predefined locations where debug file might reside */
87 		ret = -1;
88 		for (i = 0; i < ARRAY_SIZE(debuglink_paths); i++) {
89 			snprintf(filename, size,
90 					debuglink_paths[i], dso_dir, symfile);
91 			if (is_regular_file(filename)) {
92 				ret = 0;
93 				break;
94 			}
95 		}
96 
97 		break;
98 	}
99 	case DSO_BINARY_TYPE__BUILD_ID_CACHE:
100 		if (dso__build_id_filename(dso, filename, size) == NULL)
101 			ret = -1;
102 		break;
103 
104 	case DSO_BINARY_TYPE__FEDORA_DEBUGINFO:
105 		len = __symbol__join_symfs(filename, size, "/usr/lib/debug");
106 		snprintf(filename + len, size - len, "%s.debug", dso->long_name);
107 		break;
108 
109 	case DSO_BINARY_TYPE__UBUNTU_DEBUGINFO:
110 		len = __symbol__join_symfs(filename, size, "/usr/lib/debug");
111 		snprintf(filename + len, size - len, "%s", dso->long_name);
112 		break;
113 
114 	case DSO_BINARY_TYPE__OPENEMBEDDED_DEBUGINFO:
115 	{
116 		const char *last_slash;
117 		size_t dir_size;
118 
119 		last_slash = dso->long_name + dso->long_name_len;
120 		while (last_slash != dso->long_name && *last_slash != '/')
121 			last_slash--;
122 
123 		len = __symbol__join_symfs(filename, size, "");
124 		dir_size = last_slash - dso->long_name + 2;
125 		if (dir_size > (size - len)) {
126 			ret = -1;
127 			break;
128 		}
129 		len += scnprintf(filename + len, dir_size, "%s",  dso->long_name);
130 		len += scnprintf(filename + len , size - len, ".debug%s",
131 								last_slash);
132 		break;
133 	}
134 
135 	case DSO_BINARY_TYPE__BUILDID_DEBUGINFO:
136 		if (!dso->has_build_id) {
137 			ret = -1;
138 			break;
139 		}
140 
141 		build_id__sprintf(dso->build_id,
142 				  sizeof(dso->build_id),
143 				  build_id_hex);
144 		len = __symbol__join_symfs(filename, size, "/usr/lib/debug/.build-id/");
145 		snprintf(filename + len, size - len, "%.2s/%s.debug",
146 			 build_id_hex, build_id_hex + 2);
147 		break;
148 
149 	case DSO_BINARY_TYPE__VMLINUX:
150 	case DSO_BINARY_TYPE__GUEST_VMLINUX:
151 	case DSO_BINARY_TYPE__SYSTEM_PATH_DSO:
152 		__symbol__join_symfs(filename, size, dso->long_name);
153 		break;
154 
155 	case DSO_BINARY_TYPE__GUEST_KMODULE:
156 	case DSO_BINARY_TYPE__GUEST_KMODULE_COMP:
157 		path__join3(filename, size, symbol_conf.symfs,
158 			    root_dir, dso->long_name);
159 		break;
160 
161 	case DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE:
162 	case DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE_COMP:
163 		__symbol__join_symfs(filename, size, dso->long_name);
164 		break;
165 
166 	case DSO_BINARY_TYPE__KCORE:
167 	case DSO_BINARY_TYPE__GUEST_KCORE:
168 		snprintf(filename, size, "%s", dso->long_name);
169 		break;
170 
171 	default:
172 	case DSO_BINARY_TYPE__KALLSYMS:
173 	case DSO_BINARY_TYPE__GUEST_KALLSYMS:
174 	case DSO_BINARY_TYPE__JAVA_JIT:
175 	case DSO_BINARY_TYPE__NOT_FOUND:
176 		ret = -1;
177 		break;
178 	}
179 
180 	return ret;
181 }
182 
183 static const struct {
184 	const char *fmt;
185 	int (*decompress)(const char *input, int output);
186 } compressions[] = {
187 #ifdef HAVE_ZLIB_SUPPORT
188 	{ "gz", gzip_decompress_to_file },
189 #endif
190 #ifdef HAVE_LZMA_SUPPORT
191 	{ "xz", lzma_decompress_to_file },
192 #endif
193 	{ NULL, NULL },
194 };
195 
196 bool is_supported_compression(const char *ext)
197 {
198 	unsigned i;
199 
200 	for (i = 0; compressions[i].fmt; i++) {
201 		if (!strcmp(ext, compressions[i].fmt))
202 			return true;
203 	}
204 	return false;
205 }
206 
207 bool is_kernel_module(const char *pathname, int cpumode)
208 {
209 	struct kmod_path m;
210 	int mode = cpumode & PERF_RECORD_MISC_CPUMODE_MASK;
211 
212 	WARN_ONCE(mode != cpumode,
213 		  "Internal error: passing unmasked cpumode (%x) to is_kernel_module",
214 		  cpumode);
215 
216 	switch (mode) {
217 	case PERF_RECORD_MISC_USER:
218 	case PERF_RECORD_MISC_HYPERVISOR:
219 	case PERF_RECORD_MISC_GUEST_USER:
220 		return false;
221 	/* Treat PERF_RECORD_MISC_CPUMODE_UNKNOWN as kernel */
222 	default:
223 		if (kmod_path__parse(&m, pathname)) {
224 			pr_err("Failed to check whether %s is a kernel module or not. Assume it is.",
225 					pathname);
226 			return true;
227 		}
228 	}
229 
230 	return m.kmod;
231 }
232 
233 bool decompress_to_file(const char *ext, const char *filename, int output_fd)
234 {
235 	unsigned i;
236 
237 	for (i = 0; compressions[i].fmt; i++) {
238 		if (!strcmp(ext, compressions[i].fmt))
239 			return !compressions[i].decompress(filename,
240 							   output_fd);
241 	}
242 	return false;
243 }
244 
245 bool dso__needs_decompress(struct dso *dso)
246 {
247 	return dso->symtab_type == DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE_COMP ||
248 		dso->symtab_type == DSO_BINARY_TYPE__GUEST_KMODULE_COMP;
249 }
250 
251 static int decompress_kmodule(struct dso *dso, const char *name, char *tmpbuf)
252 {
253 	int fd = -1;
254 	struct kmod_path m;
255 
256 	if (!dso__needs_decompress(dso))
257 		return -1;
258 
259 	if (kmod_path__parse_ext(&m, dso->long_name))
260 		return -1;
261 
262 	if (!m.comp)
263 		goto out;
264 
265 	fd = mkstemp(tmpbuf);
266 	if (fd < 0) {
267 		dso->load_errno = errno;
268 		goto out;
269 	}
270 
271 	if (!decompress_to_file(m.ext, name, fd)) {
272 		dso->load_errno = DSO_LOAD_ERRNO__DECOMPRESSION_FAILURE;
273 		close(fd);
274 		fd = -1;
275 	}
276 
277 out:
278 	free(m.ext);
279 	return fd;
280 }
281 
282 int dso__decompress_kmodule_fd(struct dso *dso, const char *name)
283 {
284 	char tmpbuf[] = KMOD_DECOMP_NAME;
285 	int fd;
286 
287 	fd = decompress_kmodule(dso, name, tmpbuf);
288 	unlink(tmpbuf);
289 	return fd;
290 }
291 
292 int dso__decompress_kmodule_path(struct dso *dso, const char *name,
293 				 char *pathname, size_t len)
294 {
295 	char tmpbuf[] = KMOD_DECOMP_NAME;
296 	int fd;
297 
298 	fd = decompress_kmodule(dso, name, tmpbuf);
299 	if (fd < 0) {
300 		unlink(tmpbuf);
301 		return -1;
302 	}
303 
304 	strncpy(pathname, tmpbuf, len);
305 	close(fd);
306 	return 0;
307 }
308 
309 /*
310  * Parses kernel module specified in @path and updates
311  * @m argument like:
312  *
313  *    @comp - true if @path contains supported compression suffix,
314  *            false otherwise
315  *    @kmod - true if @path contains '.ko' suffix in right position,
316  *            false otherwise
317  *    @name - if (@alloc_name && @kmod) is true, it contains strdup-ed base name
318  *            of the kernel module without suffixes, otherwise strudup-ed
319  *            base name of @path
320  *    @ext  - if (@alloc_ext && @comp) is true, it contains strdup-ed string
321  *            the compression suffix
322  *
323  * Returns 0 if there's no strdup error, -ENOMEM otherwise.
324  */
325 int __kmod_path__parse(struct kmod_path *m, const char *path,
326 		       bool alloc_name, bool alloc_ext)
327 {
328 	const char *name = strrchr(path, '/');
329 	const char *ext  = strrchr(path, '.');
330 	bool is_simple_name = false;
331 
332 	memset(m, 0x0, sizeof(*m));
333 	name = name ? name + 1 : path;
334 
335 	/*
336 	 * '.' is also a valid character for module name. For example:
337 	 * [aaa.bbb] is a valid module name. '[' should have higher
338 	 * priority than '.ko' suffix.
339 	 *
340 	 * The kernel names are from machine__mmap_name. Such
341 	 * name should belong to kernel itself, not kernel module.
342 	 */
343 	if (name[0] == '[') {
344 		is_simple_name = true;
345 		if ((strncmp(name, "[kernel.kallsyms]", 17) == 0) ||
346 		    (strncmp(name, "[guest.kernel.kallsyms", 22) == 0) ||
347 		    (strncmp(name, "[vdso]", 6) == 0) ||
348 		    (strncmp(name, "[vsyscall]", 10) == 0)) {
349 			m->kmod = false;
350 
351 		} else
352 			m->kmod = true;
353 	}
354 
355 	/* No extension, just return name. */
356 	if ((ext == NULL) || is_simple_name) {
357 		if (alloc_name) {
358 			m->name = strdup(name);
359 			return m->name ? 0 : -ENOMEM;
360 		}
361 		return 0;
362 	}
363 
364 	if (is_supported_compression(ext + 1)) {
365 		m->comp = true;
366 		ext -= 3;
367 	}
368 
369 	/* Check .ko extension only if there's enough name left. */
370 	if (ext > name)
371 		m->kmod = !strncmp(ext, ".ko", 3);
372 
373 	if (alloc_name) {
374 		if (m->kmod) {
375 			if (asprintf(&m->name, "[%.*s]", (int) (ext - name), name) == -1)
376 				return -ENOMEM;
377 		} else {
378 			if (asprintf(&m->name, "%s", name) == -1)
379 				return -ENOMEM;
380 		}
381 
382 		strxfrchar(m->name, '-', '_');
383 	}
384 
385 	if (alloc_ext && m->comp) {
386 		m->ext = strdup(ext + 4);
387 		if (!m->ext) {
388 			free((void *) m->name);
389 			return -ENOMEM;
390 		}
391 	}
392 
393 	return 0;
394 }
395 
396 void dso__set_module_info(struct dso *dso, struct kmod_path *m,
397 			  struct machine *machine)
398 {
399 	if (machine__is_host(machine))
400 		dso->symtab_type = DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE;
401 	else
402 		dso->symtab_type = DSO_BINARY_TYPE__GUEST_KMODULE;
403 
404 	/* _KMODULE_COMP should be next to _KMODULE */
405 	if (m->kmod && m->comp)
406 		dso->symtab_type++;
407 
408 	dso__set_short_name(dso, strdup(m->name), true);
409 }
410 
411 /*
412  * Global list of open DSOs and the counter.
413  */
414 static LIST_HEAD(dso__data_open);
415 static long dso__data_open_cnt;
416 static pthread_mutex_t dso__data_open_lock = PTHREAD_MUTEX_INITIALIZER;
417 
418 static void dso__list_add(struct dso *dso)
419 {
420 	list_add_tail(&dso->data.open_entry, &dso__data_open);
421 	dso__data_open_cnt++;
422 }
423 
424 static void dso__list_del(struct dso *dso)
425 {
426 	list_del(&dso->data.open_entry);
427 	WARN_ONCE(dso__data_open_cnt <= 0,
428 		  "DSO data fd counter out of bounds.");
429 	dso__data_open_cnt--;
430 }
431 
432 static void close_first_dso(void);
433 
434 static int do_open(char *name)
435 {
436 	int fd;
437 	char sbuf[STRERR_BUFSIZE];
438 
439 	do {
440 		fd = open(name, O_RDONLY);
441 		if (fd >= 0)
442 			return fd;
443 
444 		pr_debug("dso open failed: %s\n",
445 			 str_error_r(errno, sbuf, sizeof(sbuf)));
446 		if (!dso__data_open_cnt || errno != EMFILE)
447 			break;
448 
449 		close_first_dso();
450 	} while (1);
451 
452 	return -1;
453 }
454 
455 static int __open_dso(struct dso *dso, struct machine *machine)
456 {
457 	int fd = -EINVAL;
458 	char *root_dir = (char *)"";
459 	char *name = malloc(PATH_MAX);
460 
461 	if (!name)
462 		return -ENOMEM;
463 
464 	if (machine)
465 		root_dir = machine->root_dir;
466 
467 	if (dso__read_binary_type_filename(dso, dso->binary_type,
468 					    root_dir, name, PATH_MAX))
469 		goto out;
470 
471 	if (!is_regular_file(name))
472 		goto out;
473 
474 	if (dso__needs_decompress(dso)) {
475 		char newpath[KMOD_DECOMP_LEN];
476 		size_t len = sizeof(newpath);
477 
478 		if (dso__decompress_kmodule_path(dso, name, newpath, len) < 0) {
479 			fd = -dso->load_errno;
480 			goto out;
481 		}
482 
483 		strcpy(name, newpath);
484 	}
485 
486 	fd = do_open(name);
487 
488 	if (dso__needs_decompress(dso))
489 		unlink(name);
490 
491 out:
492 	free(name);
493 	return fd;
494 }
495 
496 static void check_data_close(void);
497 
498 /**
499  * dso_close - Open DSO data file
500  * @dso: dso object
501  *
502  * Open @dso's data file descriptor and updates
503  * list/count of open DSO objects.
504  */
505 static int open_dso(struct dso *dso, struct machine *machine)
506 {
507 	int fd = __open_dso(dso, machine);
508 
509 	if (fd >= 0) {
510 		dso__list_add(dso);
511 		/*
512 		 * Check if we crossed the allowed number
513 		 * of opened DSOs and close one if needed.
514 		 */
515 		check_data_close();
516 	}
517 
518 	return fd;
519 }
520 
521 static void close_data_fd(struct dso *dso)
522 {
523 	if (dso->data.fd >= 0) {
524 		close(dso->data.fd);
525 		dso->data.fd = -1;
526 		dso->data.file_size = 0;
527 		dso__list_del(dso);
528 	}
529 }
530 
531 /**
532  * dso_close - Close DSO data file
533  * @dso: dso object
534  *
535  * Close @dso's data file descriptor and updates
536  * list/count of open DSO objects.
537  */
538 static void close_dso(struct dso *dso)
539 {
540 	close_data_fd(dso);
541 }
542 
543 static void close_first_dso(void)
544 {
545 	struct dso *dso;
546 
547 	dso = list_first_entry(&dso__data_open, struct dso, data.open_entry);
548 	close_dso(dso);
549 }
550 
551 static rlim_t get_fd_limit(void)
552 {
553 	struct rlimit l;
554 	rlim_t limit = 0;
555 
556 	/* Allow half of the current open fd limit. */
557 	if (getrlimit(RLIMIT_NOFILE, &l) == 0) {
558 		if (l.rlim_cur == RLIM_INFINITY)
559 			limit = l.rlim_cur;
560 		else
561 			limit = l.rlim_cur / 2;
562 	} else {
563 		pr_err("failed to get fd limit\n");
564 		limit = 1;
565 	}
566 
567 	return limit;
568 }
569 
570 static rlim_t fd_limit;
571 
572 /*
573  * Used only by tests/dso-data.c to reset the environment
574  * for tests. I dont expect we should change this during
575  * standard runtime.
576  */
577 void reset_fd_limit(void)
578 {
579 	fd_limit = 0;
580 }
581 
582 static bool may_cache_fd(void)
583 {
584 	if (!fd_limit)
585 		fd_limit = get_fd_limit();
586 
587 	if (fd_limit == RLIM_INFINITY)
588 		return true;
589 
590 	return fd_limit > (rlim_t) dso__data_open_cnt;
591 }
592 
593 /*
594  * Check and close LRU dso if we crossed allowed limit
595  * for opened dso file descriptors. The limit is half
596  * of the RLIMIT_NOFILE files opened.
597 */
598 static void check_data_close(void)
599 {
600 	bool cache_fd = may_cache_fd();
601 
602 	if (!cache_fd)
603 		close_first_dso();
604 }
605 
606 /**
607  * dso__data_close - Close DSO data file
608  * @dso: dso object
609  *
610  * External interface to close @dso's data file descriptor.
611  */
612 void dso__data_close(struct dso *dso)
613 {
614 	pthread_mutex_lock(&dso__data_open_lock);
615 	close_dso(dso);
616 	pthread_mutex_unlock(&dso__data_open_lock);
617 }
618 
619 static void try_to_open_dso(struct dso *dso, struct machine *machine)
620 {
621 	enum dso_binary_type binary_type_data[] = {
622 		DSO_BINARY_TYPE__BUILD_ID_CACHE,
623 		DSO_BINARY_TYPE__SYSTEM_PATH_DSO,
624 		DSO_BINARY_TYPE__NOT_FOUND,
625 	};
626 	int i = 0;
627 
628 	if (dso->data.fd >= 0)
629 		return;
630 
631 	if (dso->binary_type != DSO_BINARY_TYPE__NOT_FOUND) {
632 		dso->data.fd = open_dso(dso, machine);
633 		goto out;
634 	}
635 
636 	do {
637 		dso->binary_type = binary_type_data[i++];
638 
639 		dso->data.fd = open_dso(dso, machine);
640 		if (dso->data.fd >= 0)
641 			goto out;
642 
643 	} while (dso->binary_type != DSO_BINARY_TYPE__NOT_FOUND);
644 out:
645 	if (dso->data.fd >= 0)
646 		dso->data.status = DSO_DATA_STATUS_OK;
647 	else
648 		dso->data.status = DSO_DATA_STATUS_ERROR;
649 }
650 
651 /**
652  * dso__data_get_fd - Get dso's data file descriptor
653  * @dso: dso object
654  * @machine: machine object
655  *
656  * External interface to find dso's file, open it and
657  * returns file descriptor.  It should be paired with
658  * dso__data_put_fd() if it returns non-negative value.
659  */
660 int dso__data_get_fd(struct dso *dso, struct machine *machine)
661 {
662 	if (dso->data.status == DSO_DATA_STATUS_ERROR)
663 		return -1;
664 
665 	if (pthread_mutex_lock(&dso__data_open_lock) < 0)
666 		return -1;
667 
668 	try_to_open_dso(dso, machine);
669 
670 	if (dso->data.fd < 0)
671 		pthread_mutex_unlock(&dso__data_open_lock);
672 
673 	return dso->data.fd;
674 }
675 
676 void dso__data_put_fd(struct dso *dso __maybe_unused)
677 {
678 	pthread_mutex_unlock(&dso__data_open_lock);
679 }
680 
681 bool dso__data_status_seen(struct dso *dso, enum dso_data_status_seen by)
682 {
683 	u32 flag = 1 << by;
684 
685 	if (dso->data.status_seen & flag)
686 		return true;
687 
688 	dso->data.status_seen |= flag;
689 
690 	return false;
691 }
692 
693 static void
694 dso_cache__free(struct dso *dso)
695 {
696 	struct rb_root *root = &dso->data.cache;
697 	struct rb_node *next = rb_first(root);
698 
699 	pthread_mutex_lock(&dso->lock);
700 	while (next) {
701 		struct dso_cache *cache;
702 
703 		cache = rb_entry(next, struct dso_cache, rb_node);
704 		next = rb_next(&cache->rb_node);
705 		rb_erase(&cache->rb_node, root);
706 		free(cache);
707 	}
708 	pthread_mutex_unlock(&dso->lock);
709 }
710 
711 static struct dso_cache *dso_cache__find(struct dso *dso, u64 offset)
712 {
713 	const struct rb_root *root = &dso->data.cache;
714 	struct rb_node * const *p = &root->rb_node;
715 	const struct rb_node *parent = NULL;
716 	struct dso_cache *cache;
717 
718 	while (*p != NULL) {
719 		u64 end;
720 
721 		parent = *p;
722 		cache = rb_entry(parent, struct dso_cache, rb_node);
723 		end = cache->offset + DSO__DATA_CACHE_SIZE;
724 
725 		if (offset < cache->offset)
726 			p = &(*p)->rb_left;
727 		else if (offset >= end)
728 			p = &(*p)->rb_right;
729 		else
730 			return cache;
731 	}
732 
733 	return NULL;
734 }
735 
736 static struct dso_cache *
737 dso_cache__insert(struct dso *dso, struct dso_cache *new)
738 {
739 	struct rb_root *root = &dso->data.cache;
740 	struct rb_node **p = &root->rb_node;
741 	struct rb_node *parent = NULL;
742 	struct dso_cache *cache;
743 	u64 offset = new->offset;
744 
745 	pthread_mutex_lock(&dso->lock);
746 	while (*p != NULL) {
747 		u64 end;
748 
749 		parent = *p;
750 		cache = rb_entry(parent, struct dso_cache, rb_node);
751 		end = cache->offset + DSO__DATA_CACHE_SIZE;
752 
753 		if (offset < cache->offset)
754 			p = &(*p)->rb_left;
755 		else if (offset >= end)
756 			p = &(*p)->rb_right;
757 		else
758 			goto out;
759 	}
760 
761 	rb_link_node(&new->rb_node, parent, p);
762 	rb_insert_color(&new->rb_node, root);
763 
764 	cache = NULL;
765 out:
766 	pthread_mutex_unlock(&dso->lock);
767 	return cache;
768 }
769 
770 static ssize_t
771 dso_cache__memcpy(struct dso_cache *cache, u64 offset,
772 		  u8 *data, u64 size)
773 {
774 	u64 cache_offset = offset - cache->offset;
775 	u64 cache_size   = min(cache->size - cache_offset, size);
776 
777 	memcpy(data, cache->data + cache_offset, cache_size);
778 	return cache_size;
779 }
780 
781 static ssize_t
782 dso_cache__read(struct dso *dso, struct machine *machine,
783 		u64 offset, u8 *data, ssize_t size)
784 {
785 	struct dso_cache *cache;
786 	struct dso_cache *old;
787 	ssize_t ret;
788 
789 	do {
790 		u64 cache_offset;
791 
792 		cache = zalloc(sizeof(*cache) + DSO__DATA_CACHE_SIZE);
793 		if (!cache)
794 			return -ENOMEM;
795 
796 		pthread_mutex_lock(&dso__data_open_lock);
797 
798 		/*
799 		 * dso->data.fd might be closed if other thread opened another
800 		 * file (dso) due to open file limit (RLIMIT_NOFILE).
801 		 */
802 		try_to_open_dso(dso, machine);
803 
804 		if (dso->data.fd < 0) {
805 			ret = -errno;
806 			dso->data.status = DSO_DATA_STATUS_ERROR;
807 			break;
808 		}
809 
810 		cache_offset = offset & DSO__DATA_CACHE_MASK;
811 
812 		ret = pread(dso->data.fd, cache->data, DSO__DATA_CACHE_SIZE, cache_offset);
813 		if (ret <= 0)
814 			break;
815 
816 		cache->offset = cache_offset;
817 		cache->size   = ret;
818 	} while (0);
819 
820 	pthread_mutex_unlock(&dso__data_open_lock);
821 
822 	if (ret > 0) {
823 		old = dso_cache__insert(dso, cache);
824 		if (old) {
825 			/* we lose the race */
826 			free(cache);
827 			cache = old;
828 		}
829 
830 		ret = dso_cache__memcpy(cache, offset, data, size);
831 	}
832 
833 	if (ret <= 0)
834 		free(cache);
835 
836 	return ret;
837 }
838 
839 static ssize_t dso_cache_read(struct dso *dso, struct machine *machine,
840 			      u64 offset, u8 *data, ssize_t size)
841 {
842 	struct dso_cache *cache;
843 
844 	cache = dso_cache__find(dso, offset);
845 	if (cache)
846 		return dso_cache__memcpy(cache, offset, data, size);
847 	else
848 		return dso_cache__read(dso, machine, offset, data, size);
849 }
850 
851 /*
852  * Reads and caches dso data DSO__DATA_CACHE_SIZE size chunks
853  * in the rb_tree. Any read to already cached data is served
854  * by cached data.
855  */
856 static ssize_t cached_read(struct dso *dso, struct machine *machine,
857 			   u64 offset, u8 *data, ssize_t size)
858 {
859 	ssize_t r = 0;
860 	u8 *p = data;
861 
862 	do {
863 		ssize_t ret;
864 
865 		ret = dso_cache_read(dso, machine, offset, p, size);
866 		if (ret < 0)
867 			return ret;
868 
869 		/* Reached EOF, return what we have. */
870 		if (!ret)
871 			break;
872 
873 		BUG_ON(ret > size);
874 
875 		r      += ret;
876 		p      += ret;
877 		offset += ret;
878 		size   -= ret;
879 
880 	} while (size);
881 
882 	return r;
883 }
884 
885 static int data_file_size(struct dso *dso, struct machine *machine)
886 {
887 	int ret = 0;
888 	struct stat st;
889 	char sbuf[STRERR_BUFSIZE];
890 
891 	if (dso->data.file_size)
892 		return 0;
893 
894 	if (dso->data.status == DSO_DATA_STATUS_ERROR)
895 		return -1;
896 
897 	pthread_mutex_lock(&dso__data_open_lock);
898 
899 	/*
900 	 * dso->data.fd might be closed if other thread opened another
901 	 * file (dso) due to open file limit (RLIMIT_NOFILE).
902 	 */
903 	try_to_open_dso(dso, machine);
904 
905 	if (dso->data.fd < 0) {
906 		ret = -errno;
907 		dso->data.status = DSO_DATA_STATUS_ERROR;
908 		goto out;
909 	}
910 
911 	if (fstat(dso->data.fd, &st) < 0) {
912 		ret = -errno;
913 		pr_err("dso cache fstat failed: %s\n",
914 		       str_error_r(errno, sbuf, sizeof(sbuf)));
915 		dso->data.status = DSO_DATA_STATUS_ERROR;
916 		goto out;
917 	}
918 	dso->data.file_size = st.st_size;
919 
920 out:
921 	pthread_mutex_unlock(&dso__data_open_lock);
922 	return ret;
923 }
924 
925 /**
926  * dso__data_size - Return dso data size
927  * @dso: dso object
928  * @machine: machine object
929  *
930  * Return: dso data size
931  */
932 off_t dso__data_size(struct dso *dso, struct machine *machine)
933 {
934 	if (data_file_size(dso, machine))
935 		return -1;
936 
937 	/* For now just estimate dso data size is close to file size */
938 	return dso->data.file_size;
939 }
940 
941 static ssize_t data_read_offset(struct dso *dso, struct machine *machine,
942 				u64 offset, u8 *data, ssize_t size)
943 {
944 	if (data_file_size(dso, machine))
945 		return -1;
946 
947 	/* Check the offset sanity. */
948 	if (offset > dso->data.file_size)
949 		return -1;
950 
951 	if (offset + size < offset)
952 		return -1;
953 
954 	return cached_read(dso, machine, offset, data, size);
955 }
956 
957 /**
958  * dso__data_read_offset - Read data from dso file offset
959  * @dso: dso object
960  * @machine: machine object
961  * @offset: file offset
962  * @data: buffer to store data
963  * @size: size of the @data buffer
964  *
965  * External interface to read data from dso file offset. Open
966  * dso data file and use cached_read to get the data.
967  */
968 ssize_t dso__data_read_offset(struct dso *dso, struct machine *machine,
969 			      u64 offset, u8 *data, ssize_t size)
970 {
971 	if (dso->data.status == DSO_DATA_STATUS_ERROR)
972 		return -1;
973 
974 	return data_read_offset(dso, machine, offset, data, size);
975 }
976 
977 /**
978  * dso__data_read_addr - Read data from dso address
979  * @dso: dso object
980  * @machine: machine object
981  * @add: virtual memory address
982  * @data: buffer to store data
983  * @size: size of the @data buffer
984  *
985  * External interface to read data from dso address.
986  */
987 ssize_t dso__data_read_addr(struct dso *dso, struct map *map,
988 			    struct machine *machine, u64 addr,
989 			    u8 *data, ssize_t size)
990 {
991 	u64 offset = map->map_ip(map, addr);
992 	return dso__data_read_offset(dso, machine, offset, data, size);
993 }
994 
995 struct map *dso__new_map(const char *name)
996 {
997 	struct map *map = NULL;
998 	struct dso *dso = dso__new(name);
999 
1000 	if (dso)
1001 		map = map__new2(0, dso, MAP__FUNCTION);
1002 
1003 	return map;
1004 }
1005 
1006 struct dso *machine__findnew_kernel(struct machine *machine, const char *name,
1007 				    const char *short_name, int dso_type)
1008 {
1009 	/*
1010 	 * The kernel dso could be created by build_id processing.
1011 	 */
1012 	struct dso *dso = machine__findnew_dso(machine, name);
1013 
1014 	/*
1015 	 * We need to run this in all cases, since during the build_id
1016 	 * processing we had no idea this was the kernel dso.
1017 	 */
1018 	if (dso != NULL) {
1019 		dso__set_short_name(dso, short_name, false);
1020 		dso->kernel = dso_type;
1021 	}
1022 
1023 	return dso;
1024 }
1025 
1026 /*
1027  * Find a matching entry and/or link current entry to RB tree.
1028  * Either one of the dso or name parameter must be non-NULL or the
1029  * function will not work.
1030  */
1031 static struct dso *__dso__findlink_by_longname(struct rb_root *root,
1032 					       struct dso *dso, const char *name)
1033 {
1034 	struct rb_node **p = &root->rb_node;
1035 	struct rb_node  *parent = NULL;
1036 
1037 	if (!name)
1038 		name = dso->long_name;
1039 	/*
1040 	 * Find node with the matching name
1041 	 */
1042 	while (*p) {
1043 		struct dso *this = rb_entry(*p, struct dso, rb_node);
1044 		int rc = strcmp(name, this->long_name);
1045 
1046 		parent = *p;
1047 		if (rc == 0) {
1048 			/*
1049 			 * In case the new DSO is a duplicate of an existing
1050 			 * one, print a one-time warning & put the new entry
1051 			 * at the end of the list of duplicates.
1052 			 */
1053 			if (!dso || (dso == this))
1054 				return this;	/* Find matching dso */
1055 			/*
1056 			 * The core kernel DSOs may have duplicated long name.
1057 			 * In this case, the short name should be different.
1058 			 * Comparing the short names to differentiate the DSOs.
1059 			 */
1060 			rc = strcmp(dso->short_name, this->short_name);
1061 			if (rc == 0) {
1062 				pr_err("Duplicated dso name: %s\n", name);
1063 				return NULL;
1064 			}
1065 		}
1066 		if (rc < 0)
1067 			p = &parent->rb_left;
1068 		else
1069 			p = &parent->rb_right;
1070 	}
1071 	if (dso) {
1072 		/* Add new node and rebalance tree */
1073 		rb_link_node(&dso->rb_node, parent, p);
1074 		rb_insert_color(&dso->rb_node, root);
1075 		dso->root = root;
1076 	}
1077 	return NULL;
1078 }
1079 
1080 static inline struct dso *__dso__find_by_longname(struct rb_root *root,
1081 						  const char *name)
1082 {
1083 	return __dso__findlink_by_longname(root, NULL, name);
1084 }
1085 
1086 void dso__set_long_name(struct dso *dso, const char *name, bool name_allocated)
1087 {
1088 	struct rb_root *root = dso->root;
1089 
1090 	if (name == NULL)
1091 		return;
1092 
1093 	if (dso->long_name_allocated)
1094 		free((char *)dso->long_name);
1095 
1096 	if (root) {
1097 		rb_erase(&dso->rb_node, root);
1098 		/*
1099 		 * __dso__findlink_by_longname() isn't guaranteed to add it
1100 		 * back, so a clean removal is required here.
1101 		 */
1102 		RB_CLEAR_NODE(&dso->rb_node);
1103 		dso->root = NULL;
1104 	}
1105 
1106 	dso->long_name		 = name;
1107 	dso->long_name_len	 = strlen(name);
1108 	dso->long_name_allocated = name_allocated;
1109 
1110 	if (root)
1111 		__dso__findlink_by_longname(root, dso, NULL);
1112 }
1113 
1114 void dso__set_short_name(struct dso *dso, const char *name, bool name_allocated)
1115 {
1116 	if (name == NULL)
1117 		return;
1118 
1119 	if (dso->short_name_allocated)
1120 		free((char *)dso->short_name);
1121 
1122 	dso->short_name		  = name;
1123 	dso->short_name_len	  = strlen(name);
1124 	dso->short_name_allocated = name_allocated;
1125 }
1126 
1127 static void dso__set_basename(struct dso *dso)
1128 {
1129        /*
1130         * basename() may modify path buffer, so we must pass
1131         * a copy.
1132         */
1133        char *base, *lname = strdup(dso->long_name);
1134 
1135        if (!lname)
1136                return;
1137 
1138        /*
1139         * basename() may return a pointer to internal
1140         * storage which is reused in subsequent calls
1141         * so copy the result.
1142         */
1143        base = strdup(basename(lname));
1144 
1145        free(lname);
1146 
1147        if (!base)
1148                return;
1149 
1150        dso__set_short_name(dso, base, true);
1151 }
1152 
1153 int dso__name_len(const struct dso *dso)
1154 {
1155 	if (!dso)
1156 		return strlen("[unknown]");
1157 	if (verbose > 0)
1158 		return dso->long_name_len;
1159 
1160 	return dso->short_name_len;
1161 }
1162 
1163 bool dso__loaded(const struct dso *dso, enum map_type type)
1164 {
1165 	return dso->loaded & (1 << type);
1166 }
1167 
1168 bool dso__sorted_by_name(const struct dso *dso, enum map_type type)
1169 {
1170 	return dso->sorted_by_name & (1 << type);
1171 }
1172 
1173 void dso__set_sorted_by_name(struct dso *dso, enum map_type type)
1174 {
1175 	dso->sorted_by_name |= (1 << type);
1176 }
1177 
1178 struct dso *dso__new(const char *name)
1179 {
1180 	struct dso *dso = calloc(1, sizeof(*dso) + strlen(name) + 1);
1181 
1182 	if (dso != NULL) {
1183 		int i;
1184 		strcpy(dso->name, name);
1185 		dso__set_long_name(dso, dso->name, false);
1186 		dso__set_short_name(dso, dso->name, false);
1187 		for (i = 0; i < MAP__NR_TYPES; ++i)
1188 			dso->symbols[i] = dso->symbol_names[i] = RB_ROOT;
1189 		dso->data.cache = RB_ROOT;
1190 		dso->data.fd = -1;
1191 		dso->data.status = DSO_DATA_STATUS_UNKNOWN;
1192 		dso->symtab_type = DSO_BINARY_TYPE__NOT_FOUND;
1193 		dso->binary_type = DSO_BINARY_TYPE__NOT_FOUND;
1194 		dso->is_64_bit = (sizeof(void *) == 8);
1195 		dso->loaded = 0;
1196 		dso->rel = 0;
1197 		dso->sorted_by_name = 0;
1198 		dso->has_build_id = 0;
1199 		dso->has_srcline = 1;
1200 		dso->a2l_fails = 1;
1201 		dso->kernel = DSO_TYPE_USER;
1202 		dso->needs_swap = DSO_SWAP__UNSET;
1203 		RB_CLEAR_NODE(&dso->rb_node);
1204 		dso->root = NULL;
1205 		INIT_LIST_HEAD(&dso->node);
1206 		INIT_LIST_HEAD(&dso->data.open_entry);
1207 		pthread_mutex_init(&dso->lock, NULL);
1208 		refcount_set(&dso->refcnt, 1);
1209 	}
1210 
1211 	return dso;
1212 }
1213 
1214 void dso__delete(struct dso *dso)
1215 {
1216 	int i;
1217 
1218 	if (!RB_EMPTY_NODE(&dso->rb_node))
1219 		pr_err("DSO %s is still in rbtree when being deleted!\n",
1220 		       dso->long_name);
1221 	for (i = 0; i < MAP__NR_TYPES; ++i)
1222 		symbols__delete(&dso->symbols[i]);
1223 
1224 	if (dso->short_name_allocated) {
1225 		zfree((char **)&dso->short_name);
1226 		dso->short_name_allocated = false;
1227 	}
1228 
1229 	if (dso->long_name_allocated) {
1230 		zfree((char **)&dso->long_name);
1231 		dso->long_name_allocated = false;
1232 	}
1233 
1234 	dso__data_close(dso);
1235 	auxtrace_cache__free(dso->auxtrace_cache);
1236 	dso_cache__free(dso);
1237 	dso__free_a2l(dso);
1238 	zfree(&dso->symsrc_filename);
1239 	pthread_mutex_destroy(&dso->lock);
1240 	free(dso);
1241 }
1242 
1243 struct dso *dso__get(struct dso *dso)
1244 {
1245 	if (dso)
1246 		refcount_inc(&dso->refcnt);
1247 	return dso;
1248 }
1249 
1250 void dso__put(struct dso *dso)
1251 {
1252 	if (dso && refcount_dec_and_test(&dso->refcnt))
1253 		dso__delete(dso);
1254 }
1255 
1256 void dso__set_build_id(struct dso *dso, void *build_id)
1257 {
1258 	memcpy(dso->build_id, build_id, sizeof(dso->build_id));
1259 	dso->has_build_id = 1;
1260 }
1261 
1262 bool dso__build_id_equal(const struct dso *dso, u8 *build_id)
1263 {
1264 	return memcmp(dso->build_id, build_id, sizeof(dso->build_id)) == 0;
1265 }
1266 
1267 void dso__read_running_kernel_build_id(struct dso *dso, struct machine *machine)
1268 {
1269 	char path[PATH_MAX];
1270 
1271 	if (machine__is_default_guest(machine))
1272 		return;
1273 	sprintf(path, "%s/sys/kernel/notes", machine->root_dir);
1274 	if (sysfs__read_build_id(path, dso->build_id,
1275 				 sizeof(dso->build_id)) == 0)
1276 		dso->has_build_id = true;
1277 }
1278 
1279 int dso__kernel_module_get_build_id(struct dso *dso,
1280 				    const char *root_dir)
1281 {
1282 	char filename[PATH_MAX];
1283 	/*
1284 	 * kernel module short names are of the form "[module]" and
1285 	 * we need just "module" here.
1286 	 */
1287 	const char *name = dso->short_name + 1;
1288 
1289 	snprintf(filename, sizeof(filename),
1290 		 "%s/sys/module/%.*s/notes/.note.gnu.build-id",
1291 		 root_dir, (int)strlen(name) - 1, name);
1292 
1293 	if (sysfs__read_build_id(filename, dso->build_id,
1294 				 sizeof(dso->build_id)) == 0)
1295 		dso->has_build_id = true;
1296 
1297 	return 0;
1298 }
1299 
1300 bool __dsos__read_build_ids(struct list_head *head, bool with_hits)
1301 {
1302 	bool have_build_id = false;
1303 	struct dso *pos;
1304 
1305 	list_for_each_entry(pos, head, node) {
1306 		if (with_hits && !pos->hit && !dso__is_vdso(pos))
1307 			continue;
1308 		if (pos->has_build_id) {
1309 			have_build_id = true;
1310 			continue;
1311 		}
1312 		if (filename__read_build_id(pos->long_name, pos->build_id,
1313 					    sizeof(pos->build_id)) > 0) {
1314 			have_build_id	  = true;
1315 			pos->has_build_id = true;
1316 		}
1317 	}
1318 
1319 	return have_build_id;
1320 }
1321 
1322 void __dsos__add(struct dsos *dsos, struct dso *dso)
1323 {
1324 	list_add_tail(&dso->node, &dsos->head);
1325 	__dso__findlink_by_longname(&dsos->root, dso, NULL);
1326 	/*
1327 	 * It is now in the linked list, grab a reference, then garbage collect
1328 	 * this when needing memory, by looking at LRU dso instances in the
1329 	 * list with atomic_read(&dso->refcnt) == 1, i.e. no references
1330 	 * anywhere besides the one for the list, do, under a lock for the
1331 	 * list: remove it from the list, then a dso__put(), that probably will
1332 	 * be the last and will then call dso__delete(), end of life.
1333 	 *
1334 	 * That, or at the end of the 'struct machine' lifetime, when all
1335 	 * 'struct dso' instances will be removed from the list, in
1336 	 * dsos__exit(), if they have no other reference from some other data
1337 	 * structure.
1338 	 *
1339 	 * E.g.: after processing a 'perf.data' file and storing references
1340 	 * to objects instantiated while processing events, we will have
1341 	 * references to the 'thread', 'map', 'dso' structs all from 'struct
1342 	 * hist_entry' instances, but we may not need anything not referenced,
1343 	 * so we might as well call machines__exit()/machines__delete() and
1344 	 * garbage collect it.
1345 	 */
1346 	dso__get(dso);
1347 }
1348 
1349 void dsos__add(struct dsos *dsos, struct dso *dso)
1350 {
1351 	pthread_rwlock_wrlock(&dsos->lock);
1352 	__dsos__add(dsos, dso);
1353 	pthread_rwlock_unlock(&dsos->lock);
1354 }
1355 
1356 struct dso *__dsos__find(struct dsos *dsos, const char *name, bool cmp_short)
1357 {
1358 	struct dso *pos;
1359 
1360 	if (cmp_short) {
1361 		list_for_each_entry(pos, &dsos->head, node)
1362 			if (strcmp(pos->short_name, name) == 0)
1363 				return pos;
1364 		return NULL;
1365 	}
1366 	return __dso__find_by_longname(&dsos->root, name);
1367 }
1368 
1369 struct dso *dsos__find(struct dsos *dsos, const char *name, bool cmp_short)
1370 {
1371 	struct dso *dso;
1372 	pthread_rwlock_rdlock(&dsos->lock);
1373 	dso = __dsos__find(dsos, name, cmp_short);
1374 	pthread_rwlock_unlock(&dsos->lock);
1375 	return dso;
1376 }
1377 
1378 struct dso *__dsos__addnew(struct dsos *dsos, const char *name)
1379 {
1380 	struct dso *dso = dso__new(name);
1381 
1382 	if (dso != NULL) {
1383 		__dsos__add(dsos, dso);
1384 		dso__set_basename(dso);
1385 		/* Put dso here because __dsos_add already got it */
1386 		dso__put(dso);
1387 	}
1388 	return dso;
1389 }
1390 
1391 struct dso *__dsos__findnew(struct dsos *dsos, const char *name)
1392 {
1393 	struct dso *dso = __dsos__find(dsos, name, false);
1394 
1395 	return dso ? dso : __dsos__addnew(dsos, name);
1396 }
1397 
1398 struct dso *dsos__findnew(struct dsos *dsos, const char *name)
1399 {
1400 	struct dso *dso;
1401 	pthread_rwlock_wrlock(&dsos->lock);
1402 	dso = dso__get(__dsos__findnew(dsos, name));
1403 	pthread_rwlock_unlock(&dsos->lock);
1404 	return dso;
1405 }
1406 
1407 size_t __dsos__fprintf_buildid(struct list_head *head, FILE *fp,
1408 			       bool (skip)(struct dso *dso, int parm), int parm)
1409 {
1410 	struct dso *pos;
1411 	size_t ret = 0;
1412 
1413 	list_for_each_entry(pos, head, node) {
1414 		if (skip && skip(pos, parm))
1415 			continue;
1416 		ret += dso__fprintf_buildid(pos, fp);
1417 		ret += fprintf(fp, " %s\n", pos->long_name);
1418 	}
1419 	return ret;
1420 }
1421 
1422 size_t __dsos__fprintf(struct list_head *head, FILE *fp)
1423 {
1424 	struct dso *pos;
1425 	size_t ret = 0;
1426 
1427 	list_for_each_entry(pos, head, node) {
1428 		int i;
1429 		for (i = 0; i < MAP__NR_TYPES; ++i)
1430 			ret += dso__fprintf(pos, i, fp);
1431 	}
1432 
1433 	return ret;
1434 }
1435 
1436 size_t dso__fprintf_buildid(struct dso *dso, FILE *fp)
1437 {
1438 	char sbuild_id[SBUILD_ID_SIZE];
1439 
1440 	build_id__sprintf(dso->build_id, sizeof(dso->build_id), sbuild_id);
1441 	return fprintf(fp, "%s", sbuild_id);
1442 }
1443 
1444 size_t dso__fprintf(struct dso *dso, enum map_type type, FILE *fp)
1445 {
1446 	struct rb_node *nd;
1447 	size_t ret = fprintf(fp, "dso: %s (", dso->short_name);
1448 
1449 	if (dso->short_name != dso->long_name)
1450 		ret += fprintf(fp, "%s, ", dso->long_name);
1451 	ret += fprintf(fp, "%s, %sloaded, ", map_type__name[type],
1452 		       dso__loaded(dso, type) ? "" : "NOT ");
1453 	ret += dso__fprintf_buildid(dso, fp);
1454 	ret += fprintf(fp, ")\n");
1455 	for (nd = rb_first(&dso->symbols[type]); nd; nd = rb_next(nd)) {
1456 		struct symbol *pos = rb_entry(nd, struct symbol, rb_node);
1457 		ret += symbol__fprintf(pos, fp);
1458 	}
1459 
1460 	return ret;
1461 }
1462 
1463 enum dso_type dso__type(struct dso *dso, struct machine *machine)
1464 {
1465 	int fd;
1466 	enum dso_type type = DSO__TYPE_UNKNOWN;
1467 
1468 	fd = dso__data_get_fd(dso, machine);
1469 	if (fd >= 0) {
1470 		type = dso__type_fd(fd);
1471 		dso__data_put_fd(dso);
1472 	}
1473 
1474 	return type;
1475 }
1476 
1477 int dso__strerror_load(struct dso *dso, char *buf, size_t buflen)
1478 {
1479 	int idx, errnum = dso->load_errno;
1480 	/*
1481 	 * This must have a same ordering as the enum dso_load_errno.
1482 	 */
1483 	static const char *dso_load__error_str[] = {
1484 	"Internal tools/perf/ library error",
1485 	"Invalid ELF file",
1486 	"Can not read build id",
1487 	"Mismatching build id",
1488 	"Decompression failure",
1489 	};
1490 
1491 	BUG_ON(buflen == 0);
1492 
1493 	if (errnum >= 0) {
1494 		const char *err = str_error_r(errnum, buf, buflen);
1495 
1496 		if (err != buf)
1497 			scnprintf(buf, buflen, "%s", err);
1498 
1499 		return 0;
1500 	}
1501 
1502 	if (errnum <  __DSO_LOAD_ERRNO__START || errnum >= __DSO_LOAD_ERRNO__END)
1503 		return -1;
1504 
1505 	idx = errnum - __DSO_LOAD_ERRNO__START;
1506 	scnprintf(buf, buflen, "%s", dso_load__error_str[idx]);
1507 	return 0;
1508 }
1509