xref: /openbmc/linux/tools/perf/util/dso.c (revision f3a8b664)
1 #include <asm/bug.h>
2 #include <sys/time.h>
3 #include <sys/resource.h>
4 #include "symbol.h"
5 #include "dso.h"
6 #include "machine.h"
7 #include "auxtrace.h"
8 #include "util.h"
9 #include "debug.h"
10 #include "vdso.h"
11 
12 char dso__symtab_origin(const struct dso *dso)
13 {
14 	static const char origin[] = {
15 		[DSO_BINARY_TYPE__KALLSYMS]			= 'k',
16 		[DSO_BINARY_TYPE__VMLINUX]			= 'v',
17 		[DSO_BINARY_TYPE__JAVA_JIT]			= 'j',
18 		[DSO_BINARY_TYPE__DEBUGLINK]			= 'l',
19 		[DSO_BINARY_TYPE__BUILD_ID_CACHE]		= 'B',
20 		[DSO_BINARY_TYPE__FEDORA_DEBUGINFO]		= 'f',
21 		[DSO_BINARY_TYPE__UBUNTU_DEBUGINFO]		= 'u',
22 		[DSO_BINARY_TYPE__OPENEMBEDDED_DEBUGINFO]	= 'o',
23 		[DSO_BINARY_TYPE__BUILDID_DEBUGINFO]		= 'b',
24 		[DSO_BINARY_TYPE__SYSTEM_PATH_DSO]		= 'd',
25 		[DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE]		= 'K',
26 		[DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE_COMP]	= 'm',
27 		[DSO_BINARY_TYPE__GUEST_KALLSYMS]		= 'g',
28 		[DSO_BINARY_TYPE__GUEST_KMODULE]		= 'G',
29 		[DSO_BINARY_TYPE__GUEST_KMODULE_COMP]		= 'M',
30 		[DSO_BINARY_TYPE__GUEST_VMLINUX]		= 'V',
31 	};
32 
33 	if (dso == NULL || dso->symtab_type == DSO_BINARY_TYPE__NOT_FOUND)
34 		return '!';
35 	return origin[dso->symtab_type];
36 }
37 
38 int dso__read_binary_type_filename(const struct dso *dso,
39 				   enum dso_binary_type type,
40 				   char *root_dir, char *filename, size_t size)
41 {
42 	char build_id_hex[SBUILD_ID_SIZE];
43 	int ret = 0;
44 	size_t len;
45 
46 	switch (type) {
47 	case DSO_BINARY_TYPE__DEBUGLINK: {
48 		char *debuglink;
49 
50 		len = __symbol__join_symfs(filename, size, dso->long_name);
51 		debuglink = filename + len;
52 		while (debuglink != filename && *debuglink != '/')
53 			debuglink--;
54 		if (*debuglink == '/')
55 			debuglink++;
56 
57 		ret = -1;
58 		if (!is_regular_file(filename))
59 			break;
60 
61 		ret = filename__read_debuglink(filename, debuglink,
62 					       size - (debuglink - filename));
63 		}
64 		break;
65 	case DSO_BINARY_TYPE__BUILD_ID_CACHE:
66 		if (dso__build_id_filename(dso, filename, size) == NULL)
67 			ret = -1;
68 		break;
69 
70 	case DSO_BINARY_TYPE__FEDORA_DEBUGINFO:
71 		len = __symbol__join_symfs(filename, size, "/usr/lib/debug");
72 		snprintf(filename + len, size - len, "%s.debug", dso->long_name);
73 		break;
74 
75 	case DSO_BINARY_TYPE__UBUNTU_DEBUGINFO:
76 		len = __symbol__join_symfs(filename, size, "/usr/lib/debug");
77 		snprintf(filename + len, size - len, "%s", dso->long_name);
78 		break;
79 
80 	case DSO_BINARY_TYPE__OPENEMBEDDED_DEBUGINFO:
81 	{
82 		const char *last_slash;
83 		size_t dir_size;
84 
85 		last_slash = dso->long_name + dso->long_name_len;
86 		while (last_slash != dso->long_name && *last_slash != '/')
87 			last_slash--;
88 
89 		len = __symbol__join_symfs(filename, size, "");
90 		dir_size = last_slash - dso->long_name + 2;
91 		if (dir_size > (size - len)) {
92 			ret = -1;
93 			break;
94 		}
95 		len += scnprintf(filename + len, dir_size, "%s",  dso->long_name);
96 		len += scnprintf(filename + len , size - len, ".debug%s",
97 								last_slash);
98 		break;
99 	}
100 
101 	case DSO_BINARY_TYPE__BUILDID_DEBUGINFO:
102 		if (!dso->has_build_id) {
103 			ret = -1;
104 			break;
105 		}
106 
107 		build_id__sprintf(dso->build_id,
108 				  sizeof(dso->build_id),
109 				  build_id_hex);
110 		len = __symbol__join_symfs(filename, size, "/usr/lib/debug/.build-id/");
111 		snprintf(filename + len, size - len, "%.2s/%s.debug",
112 			 build_id_hex, build_id_hex + 2);
113 		break;
114 
115 	case DSO_BINARY_TYPE__VMLINUX:
116 	case DSO_BINARY_TYPE__GUEST_VMLINUX:
117 	case DSO_BINARY_TYPE__SYSTEM_PATH_DSO:
118 		__symbol__join_symfs(filename, size, dso->long_name);
119 		break;
120 
121 	case DSO_BINARY_TYPE__GUEST_KMODULE:
122 	case DSO_BINARY_TYPE__GUEST_KMODULE_COMP:
123 		path__join3(filename, size, symbol_conf.symfs,
124 			    root_dir, dso->long_name);
125 		break;
126 
127 	case DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE:
128 	case DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE_COMP:
129 		__symbol__join_symfs(filename, size, dso->long_name);
130 		break;
131 
132 	case DSO_BINARY_TYPE__KCORE:
133 	case DSO_BINARY_TYPE__GUEST_KCORE:
134 		snprintf(filename, size, "%s", dso->long_name);
135 		break;
136 
137 	default:
138 	case DSO_BINARY_TYPE__KALLSYMS:
139 	case DSO_BINARY_TYPE__GUEST_KALLSYMS:
140 	case DSO_BINARY_TYPE__JAVA_JIT:
141 	case DSO_BINARY_TYPE__NOT_FOUND:
142 		ret = -1;
143 		break;
144 	}
145 
146 	return ret;
147 }
148 
149 static const struct {
150 	const char *fmt;
151 	int (*decompress)(const char *input, int output);
152 } compressions[] = {
153 #ifdef HAVE_ZLIB_SUPPORT
154 	{ "gz", gzip_decompress_to_file },
155 #endif
156 #ifdef HAVE_LZMA_SUPPORT
157 	{ "xz", lzma_decompress_to_file },
158 #endif
159 	{ NULL, NULL },
160 };
161 
162 bool is_supported_compression(const char *ext)
163 {
164 	unsigned i;
165 
166 	for (i = 0; compressions[i].fmt; i++) {
167 		if (!strcmp(ext, compressions[i].fmt))
168 			return true;
169 	}
170 	return false;
171 }
172 
173 bool is_kernel_module(const char *pathname, int cpumode)
174 {
175 	struct kmod_path m;
176 	int mode = cpumode & PERF_RECORD_MISC_CPUMODE_MASK;
177 
178 	WARN_ONCE(mode != cpumode,
179 		  "Internal error: passing unmasked cpumode (%x) to is_kernel_module",
180 		  cpumode);
181 
182 	switch (mode) {
183 	case PERF_RECORD_MISC_USER:
184 	case PERF_RECORD_MISC_HYPERVISOR:
185 	case PERF_RECORD_MISC_GUEST_USER:
186 		return false;
187 	/* Treat PERF_RECORD_MISC_CPUMODE_UNKNOWN as kernel */
188 	default:
189 		if (kmod_path__parse(&m, pathname)) {
190 			pr_err("Failed to check whether %s is a kernel module or not. Assume it is.",
191 					pathname);
192 			return true;
193 		}
194 	}
195 
196 	return m.kmod;
197 }
198 
199 bool decompress_to_file(const char *ext, const char *filename, int output_fd)
200 {
201 	unsigned i;
202 
203 	for (i = 0; compressions[i].fmt; i++) {
204 		if (!strcmp(ext, compressions[i].fmt))
205 			return !compressions[i].decompress(filename,
206 							   output_fd);
207 	}
208 	return false;
209 }
210 
211 bool dso__needs_decompress(struct dso *dso)
212 {
213 	return dso->symtab_type == DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE_COMP ||
214 		dso->symtab_type == DSO_BINARY_TYPE__GUEST_KMODULE_COMP;
215 }
216 
217 /*
218  * Parses kernel module specified in @path and updates
219  * @m argument like:
220  *
221  *    @comp - true if @path contains supported compression suffix,
222  *            false otherwise
223  *    @kmod - true if @path contains '.ko' suffix in right position,
224  *            false otherwise
225  *    @name - if (@alloc_name && @kmod) is true, it contains strdup-ed base name
226  *            of the kernel module without suffixes, otherwise strudup-ed
227  *            base name of @path
228  *    @ext  - if (@alloc_ext && @comp) is true, it contains strdup-ed string
229  *            the compression suffix
230  *
231  * Returns 0 if there's no strdup error, -ENOMEM otherwise.
232  */
233 int __kmod_path__parse(struct kmod_path *m, const char *path,
234 		       bool alloc_name, bool alloc_ext)
235 {
236 	const char *name = strrchr(path, '/');
237 	const char *ext  = strrchr(path, '.');
238 	bool is_simple_name = false;
239 
240 	memset(m, 0x0, sizeof(*m));
241 	name = name ? name + 1 : path;
242 
243 	/*
244 	 * '.' is also a valid character for module name. For example:
245 	 * [aaa.bbb] is a valid module name. '[' should have higher
246 	 * priority than '.ko' suffix.
247 	 *
248 	 * The kernel names are from machine__mmap_name. Such
249 	 * name should belong to kernel itself, not kernel module.
250 	 */
251 	if (name[0] == '[') {
252 		is_simple_name = true;
253 		if ((strncmp(name, "[kernel.kallsyms]", 17) == 0) ||
254 		    (strncmp(name, "[guest.kernel.kallsyms", 22) == 0) ||
255 		    (strncmp(name, "[vdso]", 6) == 0) ||
256 		    (strncmp(name, "[vsyscall]", 10) == 0)) {
257 			m->kmod = false;
258 
259 		} else
260 			m->kmod = true;
261 	}
262 
263 	/* No extension, just return name. */
264 	if ((ext == NULL) || is_simple_name) {
265 		if (alloc_name) {
266 			m->name = strdup(name);
267 			return m->name ? 0 : -ENOMEM;
268 		}
269 		return 0;
270 	}
271 
272 	if (is_supported_compression(ext + 1)) {
273 		m->comp = true;
274 		ext -= 3;
275 	}
276 
277 	/* Check .ko extension only if there's enough name left. */
278 	if (ext > name)
279 		m->kmod = !strncmp(ext, ".ko", 3);
280 
281 	if (alloc_name) {
282 		if (m->kmod) {
283 			if (asprintf(&m->name, "[%.*s]", (int) (ext - name), name) == -1)
284 				return -ENOMEM;
285 		} else {
286 			if (asprintf(&m->name, "%s", name) == -1)
287 				return -ENOMEM;
288 		}
289 
290 		strxfrchar(m->name, '-', '_');
291 	}
292 
293 	if (alloc_ext && m->comp) {
294 		m->ext = strdup(ext + 4);
295 		if (!m->ext) {
296 			free((void *) m->name);
297 			return -ENOMEM;
298 		}
299 	}
300 
301 	return 0;
302 }
303 
304 /*
305  * Global list of open DSOs and the counter.
306  */
307 static LIST_HEAD(dso__data_open);
308 static long dso__data_open_cnt;
309 static pthread_mutex_t dso__data_open_lock = PTHREAD_MUTEX_INITIALIZER;
310 
311 static void dso__list_add(struct dso *dso)
312 {
313 	list_add_tail(&dso->data.open_entry, &dso__data_open);
314 	dso__data_open_cnt++;
315 }
316 
317 static void dso__list_del(struct dso *dso)
318 {
319 	list_del(&dso->data.open_entry);
320 	WARN_ONCE(dso__data_open_cnt <= 0,
321 		  "DSO data fd counter out of bounds.");
322 	dso__data_open_cnt--;
323 }
324 
325 static void close_first_dso(void);
326 
327 static int do_open(char *name)
328 {
329 	int fd;
330 	char sbuf[STRERR_BUFSIZE];
331 
332 	do {
333 		fd = open(name, O_RDONLY);
334 		if (fd >= 0)
335 			return fd;
336 
337 		pr_debug("dso open failed: %s\n",
338 			 str_error_r(errno, sbuf, sizeof(sbuf)));
339 		if (!dso__data_open_cnt || errno != EMFILE)
340 			break;
341 
342 		close_first_dso();
343 	} while (1);
344 
345 	return -1;
346 }
347 
348 static int __open_dso(struct dso *dso, struct machine *machine)
349 {
350 	int fd;
351 	char *root_dir = (char *)"";
352 	char *name = malloc(PATH_MAX);
353 
354 	if (!name)
355 		return -ENOMEM;
356 
357 	if (machine)
358 		root_dir = machine->root_dir;
359 
360 	if (dso__read_binary_type_filename(dso, dso->binary_type,
361 					    root_dir, name, PATH_MAX)) {
362 		free(name);
363 		return -EINVAL;
364 	}
365 
366 	if (!is_regular_file(name))
367 		return -EINVAL;
368 
369 	fd = do_open(name);
370 	free(name);
371 	return fd;
372 }
373 
374 static void check_data_close(void);
375 
376 /**
377  * dso_close - Open DSO data file
378  * @dso: dso object
379  *
380  * Open @dso's data file descriptor and updates
381  * list/count of open DSO objects.
382  */
383 static int open_dso(struct dso *dso, struct machine *machine)
384 {
385 	int fd = __open_dso(dso, machine);
386 
387 	if (fd >= 0) {
388 		dso__list_add(dso);
389 		/*
390 		 * Check if we crossed the allowed number
391 		 * of opened DSOs and close one if needed.
392 		 */
393 		check_data_close();
394 	}
395 
396 	return fd;
397 }
398 
399 static void close_data_fd(struct dso *dso)
400 {
401 	if (dso->data.fd >= 0) {
402 		close(dso->data.fd);
403 		dso->data.fd = -1;
404 		dso->data.file_size = 0;
405 		dso__list_del(dso);
406 	}
407 }
408 
409 /**
410  * dso_close - Close DSO data file
411  * @dso: dso object
412  *
413  * Close @dso's data file descriptor and updates
414  * list/count of open DSO objects.
415  */
416 static void close_dso(struct dso *dso)
417 {
418 	close_data_fd(dso);
419 }
420 
421 static void close_first_dso(void)
422 {
423 	struct dso *dso;
424 
425 	dso = list_first_entry(&dso__data_open, struct dso, data.open_entry);
426 	close_dso(dso);
427 }
428 
429 static rlim_t get_fd_limit(void)
430 {
431 	struct rlimit l;
432 	rlim_t limit = 0;
433 
434 	/* Allow half of the current open fd limit. */
435 	if (getrlimit(RLIMIT_NOFILE, &l) == 0) {
436 		if (l.rlim_cur == RLIM_INFINITY)
437 			limit = l.rlim_cur;
438 		else
439 			limit = l.rlim_cur / 2;
440 	} else {
441 		pr_err("failed to get fd limit\n");
442 		limit = 1;
443 	}
444 
445 	return limit;
446 }
447 
448 static rlim_t fd_limit;
449 
450 /*
451  * Used only by tests/dso-data.c to reset the environment
452  * for tests. I dont expect we should change this during
453  * standard runtime.
454  */
455 void reset_fd_limit(void)
456 {
457 	fd_limit = 0;
458 }
459 
460 static bool may_cache_fd(void)
461 {
462 	if (!fd_limit)
463 		fd_limit = get_fd_limit();
464 
465 	if (fd_limit == RLIM_INFINITY)
466 		return true;
467 
468 	return fd_limit > (rlim_t) dso__data_open_cnt;
469 }
470 
471 /*
472  * Check and close LRU dso if we crossed allowed limit
473  * for opened dso file descriptors. The limit is half
474  * of the RLIMIT_NOFILE files opened.
475 */
476 static void check_data_close(void)
477 {
478 	bool cache_fd = may_cache_fd();
479 
480 	if (!cache_fd)
481 		close_first_dso();
482 }
483 
484 /**
485  * dso__data_close - Close DSO data file
486  * @dso: dso object
487  *
488  * External interface to close @dso's data file descriptor.
489  */
490 void dso__data_close(struct dso *dso)
491 {
492 	pthread_mutex_lock(&dso__data_open_lock);
493 	close_dso(dso);
494 	pthread_mutex_unlock(&dso__data_open_lock);
495 }
496 
497 static void try_to_open_dso(struct dso *dso, struct machine *machine)
498 {
499 	enum dso_binary_type binary_type_data[] = {
500 		DSO_BINARY_TYPE__BUILD_ID_CACHE,
501 		DSO_BINARY_TYPE__SYSTEM_PATH_DSO,
502 		DSO_BINARY_TYPE__NOT_FOUND,
503 	};
504 	int i = 0;
505 
506 	if (dso->data.fd >= 0)
507 		return;
508 
509 	if (dso->binary_type != DSO_BINARY_TYPE__NOT_FOUND) {
510 		dso->data.fd = open_dso(dso, machine);
511 		goto out;
512 	}
513 
514 	do {
515 		dso->binary_type = binary_type_data[i++];
516 
517 		dso->data.fd = open_dso(dso, machine);
518 		if (dso->data.fd >= 0)
519 			goto out;
520 
521 	} while (dso->binary_type != DSO_BINARY_TYPE__NOT_FOUND);
522 out:
523 	if (dso->data.fd >= 0)
524 		dso->data.status = DSO_DATA_STATUS_OK;
525 	else
526 		dso->data.status = DSO_DATA_STATUS_ERROR;
527 }
528 
529 /**
530  * dso__data_get_fd - Get dso's data file descriptor
531  * @dso: dso object
532  * @machine: machine object
533  *
534  * External interface to find dso's file, open it and
535  * returns file descriptor.  It should be paired with
536  * dso__data_put_fd() if it returns non-negative value.
537  */
538 int dso__data_get_fd(struct dso *dso, struct machine *machine)
539 {
540 	if (dso->data.status == DSO_DATA_STATUS_ERROR)
541 		return -1;
542 
543 	if (pthread_mutex_lock(&dso__data_open_lock) < 0)
544 		return -1;
545 
546 	try_to_open_dso(dso, machine);
547 
548 	if (dso->data.fd < 0)
549 		pthread_mutex_unlock(&dso__data_open_lock);
550 
551 	return dso->data.fd;
552 }
553 
554 void dso__data_put_fd(struct dso *dso __maybe_unused)
555 {
556 	pthread_mutex_unlock(&dso__data_open_lock);
557 }
558 
559 bool dso__data_status_seen(struct dso *dso, enum dso_data_status_seen by)
560 {
561 	u32 flag = 1 << by;
562 
563 	if (dso->data.status_seen & flag)
564 		return true;
565 
566 	dso->data.status_seen |= flag;
567 
568 	return false;
569 }
570 
571 static void
572 dso_cache__free(struct dso *dso)
573 {
574 	struct rb_root *root = &dso->data.cache;
575 	struct rb_node *next = rb_first(root);
576 
577 	pthread_mutex_lock(&dso->lock);
578 	while (next) {
579 		struct dso_cache *cache;
580 
581 		cache = rb_entry(next, struct dso_cache, rb_node);
582 		next = rb_next(&cache->rb_node);
583 		rb_erase(&cache->rb_node, root);
584 		free(cache);
585 	}
586 	pthread_mutex_unlock(&dso->lock);
587 }
588 
589 static struct dso_cache *dso_cache__find(struct dso *dso, u64 offset)
590 {
591 	const struct rb_root *root = &dso->data.cache;
592 	struct rb_node * const *p = &root->rb_node;
593 	const struct rb_node *parent = NULL;
594 	struct dso_cache *cache;
595 
596 	while (*p != NULL) {
597 		u64 end;
598 
599 		parent = *p;
600 		cache = rb_entry(parent, struct dso_cache, rb_node);
601 		end = cache->offset + DSO__DATA_CACHE_SIZE;
602 
603 		if (offset < cache->offset)
604 			p = &(*p)->rb_left;
605 		else if (offset >= end)
606 			p = &(*p)->rb_right;
607 		else
608 			return cache;
609 	}
610 
611 	return NULL;
612 }
613 
614 static struct dso_cache *
615 dso_cache__insert(struct dso *dso, struct dso_cache *new)
616 {
617 	struct rb_root *root = &dso->data.cache;
618 	struct rb_node **p = &root->rb_node;
619 	struct rb_node *parent = NULL;
620 	struct dso_cache *cache;
621 	u64 offset = new->offset;
622 
623 	pthread_mutex_lock(&dso->lock);
624 	while (*p != NULL) {
625 		u64 end;
626 
627 		parent = *p;
628 		cache = rb_entry(parent, struct dso_cache, rb_node);
629 		end = cache->offset + DSO__DATA_CACHE_SIZE;
630 
631 		if (offset < cache->offset)
632 			p = &(*p)->rb_left;
633 		else if (offset >= end)
634 			p = &(*p)->rb_right;
635 		else
636 			goto out;
637 	}
638 
639 	rb_link_node(&new->rb_node, parent, p);
640 	rb_insert_color(&new->rb_node, root);
641 
642 	cache = NULL;
643 out:
644 	pthread_mutex_unlock(&dso->lock);
645 	return cache;
646 }
647 
648 static ssize_t
649 dso_cache__memcpy(struct dso_cache *cache, u64 offset,
650 		  u8 *data, u64 size)
651 {
652 	u64 cache_offset = offset - cache->offset;
653 	u64 cache_size   = min(cache->size - cache_offset, size);
654 
655 	memcpy(data, cache->data + cache_offset, cache_size);
656 	return cache_size;
657 }
658 
659 static ssize_t
660 dso_cache__read(struct dso *dso, struct machine *machine,
661 		u64 offset, u8 *data, ssize_t size)
662 {
663 	struct dso_cache *cache;
664 	struct dso_cache *old;
665 	ssize_t ret;
666 
667 	do {
668 		u64 cache_offset;
669 
670 		cache = zalloc(sizeof(*cache) + DSO__DATA_CACHE_SIZE);
671 		if (!cache)
672 			return -ENOMEM;
673 
674 		pthread_mutex_lock(&dso__data_open_lock);
675 
676 		/*
677 		 * dso->data.fd might be closed if other thread opened another
678 		 * file (dso) due to open file limit (RLIMIT_NOFILE).
679 		 */
680 		try_to_open_dso(dso, machine);
681 
682 		if (dso->data.fd < 0) {
683 			ret = -errno;
684 			dso->data.status = DSO_DATA_STATUS_ERROR;
685 			break;
686 		}
687 
688 		cache_offset = offset & DSO__DATA_CACHE_MASK;
689 
690 		ret = pread(dso->data.fd, cache->data, DSO__DATA_CACHE_SIZE, cache_offset);
691 		if (ret <= 0)
692 			break;
693 
694 		cache->offset = cache_offset;
695 		cache->size   = ret;
696 	} while (0);
697 
698 	pthread_mutex_unlock(&dso__data_open_lock);
699 
700 	if (ret > 0) {
701 		old = dso_cache__insert(dso, cache);
702 		if (old) {
703 			/* we lose the race */
704 			free(cache);
705 			cache = old;
706 		}
707 
708 		ret = dso_cache__memcpy(cache, offset, data, size);
709 	}
710 
711 	if (ret <= 0)
712 		free(cache);
713 
714 	return ret;
715 }
716 
717 static ssize_t dso_cache_read(struct dso *dso, struct machine *machine,
718 			      u64 offset, u8 *data, ssize_t size)
719 {
720 	struct dso_cache *cache;
721 
722 	cache = dso_cache__find(dso, offset);
723 	if (cache)
724 		return dso_cache__memcpy(cache, offset, data, size);
725 	else
726 		return dso_cache__read(dso, machine, offset, data, size);
727 }
728 
729 /*
730  * Reads and caches dso data DSO__DATA_CACHE_SIZE size chunks
731  * in the rb_tree. Any read to already cached data is served
732  * by cached data.
733  */
734 static ssize_t cached_read(struct dso *dso, struct machine *machine,
735 			   u64 offset, u8 *data, ssize_t size)
736 {
737 	ssize_t r = 0;
738 	u8 *p = data;
739 
740 	do {
741 		ssize_t ret;
742 
743 		ret = dso_cache_read(dso, machine, offset, p, size);
744 		if (ret < 0)
745 			return ret;
746 
747 		/* Reached EOF, return what we have. */
748 		if (!ret)
749 			break;
750 
751 		BUG_ON(ret > size);
752 
753 		r      += ret;
754 		p      += ret;
755 		offset += ret;
756 		size   -= ret;
757 
758 	} while (size);
759 
760 	return r;
761 }
762 
763 static int data_file_size(struct dso *dso, struct machine *machine)
764 {
765 	int ret = 0;
766 	struct stat st;
767 	char sbuf[STRERR_BUFSIZE];
768 
769 	if (dso->data.file_size)
770 		return 0;
771 
772 	if (dso->data.status == DSO_DATA_STATUS_ERROR)
773 		return -1;
774 
775 	pthread_mutex_lock(&dso__data_open_lock);
776 
777 	/*
778 	 * dso->data.fd might be closed if other thread opened another
779 	 * file (dso) due to open file limit (RLIMIT_NOFILE).
780 	 */
781 	try_to_open_dso(dso, machine);
782 
783 	if (dso->data.fd < 0) {
784 		ret = -errno;
785 		dso->data.status = DSO_DATA_STATUS_ERROR;
786 		goto out;
787 	}
788 
789 	if (fstat(dso->data.fd, &st) < 0) {
790 		ret = -errno;
791 		pr_err("dso cache fstat failed: %s\n",
792 		       str_error_r(errno, sbuf, sizeof(sbuf)));
793 		dso->data.status = DSO_DATA_STATUS_ERROR;
794 		goto out;
795 	}
796 	dso->data.file_size = st.st_size;
797 
798 out:
799 	pthread_mutex_unlock(&dso__data_open_lock);
800 	return ret;
801 }
802 
803 /**
804  * dso__data_size - Return dso data size
805  * @dso: dso object
806  * @machine: machine object
807  *
808  * Return: dso data size
809  */
810 off_t dso__data_size(struct dso *dso, struct machine *machine)
811 {
812 	if (data_file_size(dso, machine))
813 		return -1;
814 
815 	/* For now just estimate dso data size is close to file size */
816 	return dso->data.file_size;
817 }
818 
819 static ssize_t data_read_offset(struct dso *dso, struct machine *machine,
820 				u64 offset, u8 *data, ssize_t size)
821 {
822 	if (data_file_size(dso, machine))
823 		return -1;
824 
825 	/* Check the offset sanity. */
826 	if (offset > dso->data.file_size)
827 		return -1;
828 
829 	if (offset + size < offset)
830 		return -1;
831 
832 	return cached_read(dso, machine, offset, data, size);
833 }
834 
835 /**
836  * dso__data_read_offset - Read data from dso file offset
837  * @dso: dso object
838  * @machine: machine object
839  * @offset: file offset
840  * @data: buffer to store data
841  * @size: size of the @data buffer
842  *
843  * External interface to read data from dso file offset. Open
844  * dso data file and use cached_read to get the data.
845  */
846 ssize_t dso__data_read_offset(struct dso *dso, struct machine *machine,
847 			      u64 offset, u8 *data, ssize_t size)
848 {
849 	if (dso->data.status == DSO_DATA_STATUS_ERROR)
850 		return -1;
851 
852 	return data_read_offset(dso, machine, offset, data, size);
853 }
854 
855 /**
856  * dso__data_read_addr - Read data from dso address
857  * @dso: dso object
858  * @machine: machine object
859  * @add: virtual memory address
860  * @data: buffer to store data
861  * @size: size of the @data buffer
862  *
863  * External interface to read data from dso address.
864  */
865 ssize_t dso__data_read_addr(struct dso *dso, struct map *map,
866 			    struct machine *machine, u64 addr,
867 			    u8 *data, ssize_t size)
868 {
869 	u64 offset = map->map_ip(map, addr);
870 	return dso__data_read_offset(dso, machine, offset, data, size);
871 }
872 
873 struct map *dso__new_map(const char *name)
874 {
875 	struct map *map = NULL;
876 	struct dso *dso = dso__new(name);
877 
878 	if (dso)
879 		map = map__new2(0, dso, MAP__FUNCTION);
880 
881 	return map;
882 }
883 
884 struct dso *machine__findnew_kernel(struct machine *machine, const char *name,
885 				    const char *short_name, int dso_type)
886 {
887 	/*
888 	 * The kernel dso could be created by build_id processing.
889 	 */
890 	struct dso *dso = machine__findnew_dso(machine, name);
891 
892 	/*
893 	 * We need to run this in all cases, since during the build_id
894 	 * processing we had no idea this was the kernel dso.
895 	 */
896 	if (dso != NULL) {
897 		dso__set_short_name(dso, short_name, false);
898 		dso->kernel = dso_type;
899 	}
900 
901 	return dso;
902 }
903 
904 /*
905  * Find a matching entry and/or link current entry to RB tree.
906  * Either one of the dso or name parameter must be non-NULL or the
907  * function will not work.
908  */
909 static struct dso *__dso__findlink_by_longname(struct rb_root *root,
910 					       struct dso *dso, const char *name)
911 {
912 	struct rb_node **p = &root->rb_node;
913 	struct rb_node  *parent = NULL;
914 
915 	if (!name)
916 		name = dso->long_name;
917 	/*
918 	 * Find node with the matching name
919 	 */
920 	while (*p) {
921 		struct dso *this = rb_entry(*p, struct dso, rb_node);
922 		int rc = strcmp(name, this->long_name);
923 
924 		parent = *p;
925 		if (rc == 0) {
926 			/*
927 			 * In case the new DSO is a duplicate of an existing
928 			 * one, print an one-time warning & put the new entry
929 			 * at the end of the list of duplicates.
930 			 */
931 			if (!dso || (dso == this))
932 				return this;	/* Find matching dso */
933 			/*
934 			 * The core kernel DSOs may have duplicated long name.
935 			 * In this case, the short name should be different.
936 			 * Comparing the short names to differentiate the DSOs.
937 			 */
938 			rc = strcmp(dso->short_name, this->short_name);
939 			if (rc == 0) {
940 				pr_err("Duplicated dso name: %s\n", name);
941 				return NULL;
942 			}
943 		}
944 		if (rc < 0)
945 			p = &parent->rb_left;
946 		else
947 			p = &parent->rb_right;
948 	}
949 	if (dso) {
950 		/* Add new node and rebalance tree */
951 		rb_link_node(&dso->rb_node, parent, p);
952 		rb_insert_color(&dso->rb_node, root);
953 		dso->root = root;
954 	}
955 	return NULL;
956 }
957 
958 static inline struct dso *__dso__find_by_longname(struct rb_root *root,
959 						  const char *name)
960 {
961 	return __dso__findlink_by_longname(root, NULL, name);
962 }
963 
964 void dso__set_long_name(struct dso *dso, const char *name, bool name_allocated)
965 {
966 	struct rb_root *root = dso->root;
967 
968 	if (name == NULL)
969 		return;
970 
971 	if (dso->long_name_allocated)
972 		free((char *)dso->long_name);
973 
974 	if (root) {
975 		rb_erase(&dso->rb_node, root);
976 		/*
977 		 * __dso__findlink_by_longname() isn't guaranteed to add it
978 		 * back, so a clean removal is required here.
979 		 */
980 		RB_CLEAR_NODE(&dso->rb_node);
981 		dso->root = NULL;
982 	}
983 
984 	dso->long_name		 = name;
985 	dso->long_name_len	 = strlen(name);
986 	dso->long_name_allocated = name_allocated;
987 
988 	if (root)
989 		__dso__findlink_by_longname(root, dso, NULL);
990 }
991 
992 void dso__set_short_name(struct dso *dso, const char *name, bool name_allocated)
993 {
994 	if (name == NULL)
995 		return;
996 
997 	if (dso->short_name_allocated)
998 		free((char *)dso->short_name);
999 
1000 	dso->short_name		  = name;
1001 	dso->short_name_len	  = strlen(name);
1002 	dso->short_name_allocated = name_allocated;
1003 }
1004 
1005 static void dso__set_basename(struct dso *dso)
1006 {
1007        /*
1008         * basename() may modify path buffer, so we must pass
1009         * a copy.
1010         */
1011        char *base, *lname = strdup(dso->long_name);
1012 
1013        if (!lname)
1014                return;
1015 
1016        /*
1017         * basename() may return a pointer to internal
1018         * storage which is reused in subsequent calls
1019         * so copy the result.
1020         */
1021        base = strdup(basename(lname));
1022 
1023        free(lname);
1024 
1025        if (!base)
1026                return;
1027 
1028        dso__set_short_name(dso, base, true);
1029 }
1030 
1031 int dso__name_len(const struct dso *dso)
1032 {
1033 	if (!dso)
1034 		return strlen("[unknown]");
1035 	if (verbose)
1036 		return dso->long_name_len;
1037 
1038 	return dso->short_name_len;
1039 }
1040 
1041 bool dso__loaded(const struct dso *dso, enum map_type type)
1042 {
1043 	return dso->loaded & (1 << type);
1044 }
1045 
1046 bool dso__sorted_by_name(const struct dso *dso, enum map_type type)
1047 {
1048 	return dso->sorted_by_name & (1 << type);
1049 }
1050 
1051 void dso__set_sorted_by_name(struct dso *dso, enum map_type type)
1052 {
1053 	dso->sorted_by_name |= (1 << type);
1054 }
1055 
1056 struct dso *dso__new(const char *name)
1057 {
1058 	struct dso *dso = calloc(1, sizeof(*dso) + strlen(name) + 1);
1059 
1060 	if (dso != NULL) {
1061 		int i;
1062 		strcpy(dso->name, name);
1063 		dso__set_long_name(dso, dso->name, false);
1064 		dso__set_short_name(dso, dso->name, false);
1065 		for (i = 0; i < MAP__NR_TYPES; ++i)
1066 			dso->symbols[i] = dso->symbol_names[i] = RB_ROOT;
1067 		dso->data.cache = RB_ROOT;
1068 		dso->data.fd = -1;
1069 		dso->data.status = DSO_DATA_STATUS_UNKNOWN;
1070 		dso->symtab_type = DSO_BINARY_TYPE__NOT_FOUND;
1071 		dso->binary_type = DSO_BINARY_TYPE__NOT_FOUND;
1072 		dso->is_64_bit = (sizeof(void *) == 8);
1073 		dso->loaded = 0;
1074 		dso->rel = 0;
1075 		dso->sorted_by_name = 0;
1076 		dso->has_build_id = 0;
1077 		dso->has_srcline = 1;
1078 		dso->a2l_fails = 1;
1079 		dso->kernel = DSO_TYPE_USER;
1080 		dso->needs_swap = DSO_SWAP__UNSET;
1081 		RB_CLEAR_NODE(&dso->rb_node);
1082 		dso->root = NULL;
1083 		INIT_LIST_HEAD(&dso->node);
1084 		INIT_LIST_HEAD(&dso->data.open_entry);
1085 		pthread_mutex_init(&dso->lock, NULL);
1086 		atomic_set(&dso->refcnt, 1);
1087 	}
1088 
1089 	return dso;
1090 }
1091 
1092 void dso__delete(struct dso *dso)
1093 {
1094 	int i;
1095 
1096 	if (!RB_EMPTY_NODE(&dso->rb_node))
1097 		pr_err("DSO %s is still in rbtree when being deleted!\n",
1098 		       dso->long_name);
1099 	for (i = 0; i < MAP__NR_TYPES; ++i)
1100 		symbols__delete(&dso->symbols[i]);
1101 
1102 	if (dso->short_name_allocated) {
1103 		zfree((char **)&dso->short_name);
1104 		dso->short_name_allocated = false;
1105 	}
1106 
1107 	if (dso->long_name_allocated) {
1108 		zfree((char **)&dso->long_name);
1109 		dso->long_name_allocated = false;
1110 	}
1111 
1112 	dso__data_close(dso);
1113 	auxtrace_cache__free(dso->auxtrace_cache);
1114 	dso_cache__free(dso);
1115 	dso__free_a2l(dso);
1116 	zfree(&dso->symsrc_filename);
1117 	pthread_mutex_destroy(&dso->lock);
1118 	free(dso);
1119 }
1120 
1121 struct dso *dso__get(struct dso *dso)
1122 {
1123 	if (dso)
1124 		atomic_inc(&dso->refcnt);
1125 	return dso;
1126 }
1127 
1128 void dso__put(struct dso *dso)
1129 {
1130 	if (dso && atomic_dec_and_test(&dso->refcnt))
1131 		dso__delete(dso);
1132 }
1133 
1134 void dso__set_build_id(struct dso *dso, void *build_id)
1135 {
1136 	memcpy(dso->build_id, build_id, sizeof(dso->build_id));
1137 	dso->has_build_id = 1;
1138 }
1139 
1140 bool dso__build_id_equal(const struct dso *dso, u8 *build_id)
1141 {
1142 	return memcmp(dso->build_id, build_id, sizeof(dso->build_id)) == 0;
1143 }
1144 
1145 void dso__read_running_kernel_build_id(struct dso *dso, struct machine *machine)
1146 {
1147 	char path[PATH_MAX];
1148 
1149 	if (machine__is_default_guest(machine))
1150 		return;
1151 	sprintf(path, "%s/sys/kernel/notes", machine->root_dir);
1152 	if (sysfs__read_build_id(path, dso->build_id,
1153 				 sizeof(dso->build_id)) == 0)
1154 		dso->has_build_id = true;
1155 }
1156 
1157 int dso__kernel_module_get_build_id(struct dso *dso,
1158 				    const char *root_dir)
1159 {
1160 	char filename[PATH_MAX];
1161 	/*
1162 	 * kernel module short names are of the form "[module]" and
1163 	 * we need just "module" here.
1164 	 */
1165 	const char *name = dso->short_name + 1;
1166 
1167 	snprintf(filename, sizeof(filename),
1168 		 "%s/sys/module/%.*s/notes/.note.gnu.build-id",
1169 		 root_dir, (int)strlen(name) - 1, name);
1170 
1171 	if (sysfs__read_build_id(filename, dso->build_id,
1172 				 sizeof(dso->build_id)) == 0)
1173 		dso->has_build_id = true;
1174 
1175 	return 0;
1176 }
1177 
1178 bool __dsos__read_build_ids(struct list_head *head, bool with_hits)
1179 {
1180 	bool have_build_id = false;
1181 	struct dso *pos;
1182 
1183 	list_for_each_entry(pos, head, node) {
1184 		if (with_hits && !pos->hit && !dso__is_vdso(pos))
1185 			continue;
1186 		if (pos->has_build_id) {
1187 			have_build_id = true;
1188 			continue;
1189 		}
1190 		if (filename__read_build_id(pos->long_name, pos->build_id,
1191 					    sizeof(pos->build_id)) > 0) {
1192 			have_build_id	  = true;
1193 			pos->has_build_id = true;
1194 		}
1195 	}
1196 
1197 	return have_build_id;
1198 }
1199 
1200 void __dsos__add(struct dsos *dsos, struct dso *dso)
1201 {
1202 	list_add_tail(&dso->node, &dsos->head);
1203 	__dso__findlink_by_longname(&dsos->root, dso, NULL);
1204 	/*
1205 	 * It is now in the linked list, grab a reference, then garbage collect
1206 	 * this when needing memory, by looking at LRU dso instances in the
1207 	 * list with atomic_read(&dso->refcnt) == 1, i.e. no references
1208 	 * anywhere besides the one for the list, do, under a lock for the
1209 	 * list: remove it from the list, then a dso__put(), that probably will
1210 	 * be the last and will then call dso__delete(), end of life.
1211 	 *
1212 	 * That, or at the end of the 'struct machine' lifetime, when all
1213 	 * 'struct dso' instances will be removed from the list, in
1214 	 * dsos__exit(), if they have no other reference from some other data
1215 	 * structure.
1216 	 *
1217 	 * E.g.: after processing a 'perf.data' file and storing references
1218 	 * to objects instantiated while processing events, we will have
1219 	 * references to the 'thread', 'map', 'dso' structs all from 'struct
1220 	 * hist_entry' instances, but we may not need anything not referenced,
1221 	 * so we might as well call machines__exit()/machines__delete() and
1222 	 * garbage collect it.
1223 	 */
1224 	dso__get(dso);
1225 }
1226 
1227 void dsos__add(struct dsos *dsos, struct dso *dso)
1228 {
1229 	pthread_rwlock_wrlock(&dsos->lock);
1230 	__dsos__add(dsos, dso);
1231 	pthread_rwlock_unlock(&dsos->lock);
1232 }
1233 
1234 struct dso *__dsos__find(struct dsos *dsos, const char *name, bool cmp_short)
1235 {
1236 	struct dso *pos;
1237 
1238 	if (cmp_short) {
1239 		list_for_each_entry(pos, &dsos->head, node)
1240 			if (strcmp(pos->short_name, name) == 0)
1241 				return pos;
1242 		return NULL;
1243 	}
1244 	return __dso__find_by_longname(&dsos->root, name);
1245 }
1246 
1247 struct dso *dsos__find(struct dsos *dsos, const char *name, bool cmp_short)
1248 {
1249 	struct dso *dso;
1250 	pthread_rwlock_rdlock(&dsos->lock);
1251 	dso = __dsos__find(dsos, name, cmp_short);
1252 	pthread_rwlock_unlock(&dsos->lock);
1253 	return dso;
1254 }
1255 
1256 struct dso *__dsos__addnew(struct dsos *dsos, const char *name)
1257 {
1258 	struct dso *dso = dso__new(name);
1259 
1260 	if (dso != NULL) {
1261 		__dsos__add(dsos, dso);
1262 		dso__set_basename(dso);
1263 		/* Put dso here because __dsos_add already got it */
1264 		dso__put(dso);
1265 	}
1266 	return dso;
1267 }
1268 
1269 struct dso *__dsos__findnew(struct dsos *dsos, const char *name)
1270 {
1271 	struct dso *dso = __dsos__find(dsos, name, false);
1272 
1273 	return dso ? dso : __dsos__addnew(dsos, name);
1274 }
1275 
1276 struct dso *dsos__findnew(struct dsos *dsos, const char *name)
1277 {
1278 	struct dso *dso;
1279 	pthread_rwlock_wrlock(&dsos->lock);
1280 	dso = dso__get(__dsos__findnew(dsos, name));
1281 	pthread_rwlock_unlock(&dsos->lock);
1282 	return dso;
1283 }
1284 
1285 size_t __dsos__fprintf_buildid(struct list_head *head, FILE *fp,
1286 			       bool (skip)(struct dso *dso, int parm), int parm)
1287 {
1288 	struct dso *pos;
1289 	size_t ret = 0;
1290 
1291 	list_for_each_entry(pos, head, node) {
1292 		if (skip && skip(pos, parm))
1293 			continue;
1294 		ret += dso__fprintf_buildid(pos, fp);
1295 		ret += fprintf(fp, " %s\n", pos->long_name);
1296 	}
1297 	return ret;
1298 }
1299 
1300 size_t __dsos__fprintf(struct list_head *head, FILE *fp)
1301 {
1302 	struct dso *pos;
1303 	size_t ret = 0;
1304 
1305 	list_for_each_entry(pos, head, node) {
1306 		int i;
1307 		for (i = 0; i < MAP__NR_TYPES; ++i)
1308 			ret += dso__fprintf(pos, i, fp);
1309 	}
1310 
1311 	return ret;
1312 }
1313 
1314 size_t dso__fprintf_buildid(struct dso *dso, FILE *fp)
1315 {
1316 	char sbuild_id[SBUILD_ID_SIZE];
1317 
1318 	build_id__sprintf(dso->build_id, sizeof(dso->build_id), sbuild_id);
1319 	return fprintf(fp, "%s", sbuild_id);
1320 }
1321 
1322 size_t dso__fprintf(struct dso *dso, enum map_type type, FILE *fp)
1323 {
1324 	struct rb_node *nd;
1325 	size_t ret = fprintf(fp, "dso: %s (", dso->short_name);
1326 
1327 	if (dso->short_name != dso->long_name)
1328 		ret += fprintf(fp, "%s, ", dso->long_name);
1329 	ret += fprintf(fp, "%s, %sloaded, ", map_type__name[type],
1330 		       dso__loaded(dso, type) ? "" : "NOT ");
1331 	ret += dso__fprintf_buildid(dso, fp);
1332 	ret += fprintf(fp, ")\n");
1333 	for (nd = rb_first(&dso->symbols[type]); nd; nd = rb_next(nd)) {
1334 		struct symbol *pos = rb_entry(nd, struct symbol, rb_node);
1335 		ret += symbol__fprintf(pos, fp);
1336 	}
1337 
1338 	return ret;
1339 }
1340 
1341 enum dso_type dso__type(struct dso *dso, struct machine *machine)
1342 {
1343 	int fd;
1344 	enum dso_type type = DSO__TYPE_UNKNOWN;
1345 
1346 	fd = dso__data_get_fd(dso, machine);
1347 	if (fd >= 0) {
1348 		type = dso__type_fd(fd);
1349 		dso__data_put_fd(dso);
1350 	}
1351 
1352 	return type;
1353 }
1354 
1355 int dso__strerror_load(struct dso *dso, char *buf, size_t buflen)
1356 {
1357 	int idx, errnum = dso->load_errno;
1358 	/*
1359 	 * This must have a same ordering as the enum dso_load_errno.
1360 	 */
1361 	static const char *dso_load__error_str[] = {
1362 	"Internal tools/perf/ library error",
1363 	"Invalid ELF file",
1364 	"Can not read build id",
1365 	"Mismatching build id",
1366 	"Decompression failure",
1367 	};
1368 
1369 	BUG_ON(buflen == 0);
1370 
1371 	if (errnum >= 0) {
1372 		const char *err = str_error_r(errnum, buf, buflen);
1373 
1374 		if (err != buf)
1375 			scnprintf(buf, buflen, "%s", err);
1376 
1377 		return 0;
1378 	}
1379 
1380 	if (errnum <  __DSO_LOAD_ERRNO__START || errnum >= __DSO_LOAD_ERRNO__END)
1381 		return -1;
1382 
1383 	idx = errnum - __DSO_LOAD_ERRNO__START;
1384 	scnprintf(buf, buflen, "%s", dso_load__error_str[idx]);
1385 	return 0;
1386 }
1387