xref: /openbmc/linux/tools/perf/util/symbol.c (revision 3d3337de)
1 #include <dirent.h>
2 #include <errno.h>
3 #include <stdlib.h>
4 #include <stdio.h>
5 #include <string.h>
6 #include <sys/types.h>
7 #include <sys/stat.h>
8 #include <sys/param.h>
9 #include <fcntl.h>
10 #include <unistd.h>
11 #include <inttypes.h>
12 #include "build-id.h"
13 #include "util.h"
14 #include "debug.h"
15 #include "machine.h"
16 #include "symbol.h"
17 #include "strlist.h"
18 #include "intlist.h"
19 #include "header.h"
20 
21 #include <elf.h>
22 #include <limits.h>
23 #include <symbol/kallsyms.h>
24 #include <sys/utsname.h>
25 
26 static int dso__load_kernel_sym(struct dso *dso, struct map *map,
27 				symbol_filter_t filter);
28 static int dso__load_guest_kernel_sym(struct dso *dso, struct map *map,
29 			symbol_filter_t filter);
30 int vmlinux_path__nr_entries;
31 char **vmlinux_path;
32 
33 struct symbol_conf symbol_conf = {
34 	.use_modules		= true,
35 	.try_vmlinux_path	= true,
36 	.annotate_src		= true,
37 	.demangle		= true,
38 	.demangle_kernel	= false,
39 	.cumulate_callchain	= true,
40 	.show_hist_headers	= true,
41 	.symfs			= "",
42 };
43 
44 static enum dso_binary_type binary_type_symtab[] = {
45 	DSO_BINARY_TYPE__KALLSYMS,
46 	DSO_BINARY_TYPE__GUEST_KALLSYMS,
47 	DSO_BINARY_TYPE__JAVA_JIT,
48 	DSO_BINARY_TYPE__DEBUGLINK,
49 	DSO_BINARY_TYPE__BUILD_ID_CACHE,
50 	DSO_BINARY_TYPE__FEDORA_DEBUGINFO,
51 	DSO_BINARY_TYPE__UBUNTU_DEBUGINFO,
52 	DSO_BINARY_TYPE__BUILDID_DEBUGINFO,
53 	DSO_BINARY_TYPE__SYSTEM_PATH_DSO,
54 	DSO_BINARY_TYPE__GUEST_KMODULE,
55 	DSO_BINARY_TYPE__GUEST_KMODULE_COMP,
56 	DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE,
57 	DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE_COMP,
58 	DSO_BINARY_TYPE__OPENEMBEDDED_DEBUGINFO,
59 	DSO_BINARY_TYPE__NOT_FOUND,
60 };
61 
62 #define DSO_BINARY_TYPE__SYMTAB_CNT ARRAY_SIZE(binary_type_symtab)
63 
64 bool symbol_type__is_a(char symbol_type, enum map_type map_type)
65 {
66 	symbol_type = toupper(symbol_type);
67 
68 	switch (map_type) {
69 	case MAP__FUNCTION:
70 		return symbol_type == 'T' || symbol_type == 'W';
71 	case MAP__VARIABLE:
72 		return symbol_type == 'D';
73 	default:
74 		return false;
75 	}
76 }
77 
78 static int prefix_underscores_count(const char *str)
79 {
80 	const char *tail = str;
81 
82 	while (*tail == '_')
83 		tail++;
84 
85 	return tail - str;
86 }
87 
88 int __weak arch__choose_best_symbol(struct symbol *syma,
89 				    struct symbol *symb __maybe_unused)
90 {
91 	/* Avoid "SyS" kernel syscall aliases */
92 	if (strlen(syma->name) >= 3 && !strncmp(syma->name, "SyS", 3))
93 		return SYMBOL_B;
94 	if (strlen(syma->name) >= 10 && !strncmp(syma->name, "compat_SyS", 10))
95 		return SYMBOL_B;
96 
97 	return SYMBOL_A;
98 }
99 
100 static int choose_best_symbol(struct symbol *syma, struct symbol *symb)
101 {
102 	s64 a;
103 	s64 b;
104 	size_t na, nb;
105 
106 	/* Prefer a symbol with non zero length */
107 	a = syma->end - syma->start;
108 	b = symb->end - symb->start;
109 	if ((b == 0) && (a > 0))
110 		return SYMBOL_A;
111 	else if ((a == 0) && (b > 0))
112 		return SYMBOL_B;
113 
114 	/* Prefer a non weak symbol over a weak one */
115 	a = syma->binding == STB_WEAK;
116 	b = symb->binding == STB_WEAK;
117 	if (b && !a)
118 		return SYMBOL_A;
119 	if (a && !b)
120 		return SYMBOL_B;
121 
122 	/* Prefer a global symbol over a non global one */
123 	a = syma->binding == STB_GLOBAL;
124 	b = symb->binding == STB_GLOBAL;
125 	if (a && !b)
126 		return SYMBOL_A;
127 	if (b && !a)
128 		return SYMBOL_B;
129 
130 	/* Prefer a symbol with less underscores */
131 	a = prefix_underscores_count(syma->name);
132 	b = prefix_underscores_count(symb->name);
133 	if (b > a)
134 		return SYMBOL_A;
135 	else if (a > b)
136 		return SYMBOL_B;
137 
138 	/* Choose the symbol with the longest name */
139 	na = strlen(syma->name);
140 	nb = strlen(symb->name);
141 	if (na > nb)
142 		return SYMBOL_A;
143 	else if (na < nb)
144 		return SYMBOL_B;
145 
146 	return arch__choose_best_symbol(syma, symb);
147 }
148 
149 void symbols__fixup_duplicate(struct rb_root *symbols)
150 {
151 	struct rb_node *nd;
152 	struct symbol *curr, *next;
153 
154 	nd = rb_first(symbols);
155 
156 	while (nd) {
157 		curr = rb_entry(nd, struct symbol, rb_node);
158 again:
159 		nd = rb_next(&curr->rb_node);
160 		next = rb_entry(nd, struct symbol, rb_node);
161 
162 		if (!nd)
163 			break;
164 
165 		if (curr->start != next->start)
166 			continue;
167 
168 		if (choose_best_symbol(curr, next) == SYMBOL_A) {
169 			rb_erase(&next->rb_node, symbols);
170 			symbol__delete(next);
171 			goto again;
172 		} else {
173 			nd = rb_next(&curr->rb_node);
174 			rb_erase(&curr->rb_node, symbols);
175 			symbol__delete(curr);
176 		}
177 	}
178 }
179 
180 void symbols__fixup_end(struct rb_root *symbols)
181 {
182 	struct rb_node *nd, *prevnd = rb_first(symbols);
183 	struct symbol *curr, *prev;
184 
185 	if (prevnd == NULL)
186 		return;
187 
188 	curr = rb_entry(prevnd, struct symbol, rb_node);
189 
190 	for (nd = rb_next(prevnd); nd; nd = rb_next(nd)) {
191 		prev = curr;
192 		curr = rb_entry(nd, struct symbol, rb_node);
193 
194 		if (prev->end == prev->start && prev->end != curr->start)
195 			prev->end = curr->start;
196 	}
197 
198 	/* Last entry */
199 	if (curr->end == curr->start)
200 		curr->end = roundup(curr->start, 4096);
201 }
202 
203 void __map_groups__fixup_end(struct map_groups *mg, enum map_type type)
204 {
205 	struct map *prev, *curr;
206 	struct rb_node *nd, *prevnd = rb_first(&mg->maps[type]);
207 
208 	if (prevnd == NULL)
209 		return;
210 
211 	curr = rb_entry(prevnd, struct map, rb_node);
212 
213 	for (nd = rb_next(prevnd); nd; nd = rb_next(nd)) {
214 		prev = curr;
215 		curr = rb_entry(nd, struct map, rb_node);
216 		prev->end = curr->start;
217 	}
218 
219 	/*
220 	 * We still haven't the actual symbols, so guess the
221 	 * last map final address.
222 	 */
223 	curr->end = ~0ULL;
224 }
225 
226 struct symbol *symbol__new(u64 start, u64 len, u8 binding, const char *name)
227 {
228 	size_t namelen = strlen(name) + 1;
229 	struct symbol *sym = calloc(1, (symbol_conf.priv_size +
230 					sizeof(*sym) + namelen));
231 	if (sym == NULL)
232 		return NULL;
233 
234 	if (symbol_conf.priv_size)
235 		sym = ((void *)sym) + symbol_conf.priv_size;
236 
237 	sym->start   = start;
238 	sym->end     = len ? start + len : start;
239 	sym->binding = binding;
240 	sym->namelen = namelen - 1;
241 
242 	pr_debug4("%s: %s %#" PRIx64 "-%#" PRIx64 "\n",
243 		  __func__, name, start, sym->end);
244 	memcpy(sym->name, name, namelen);
245 
246 	return sym;
247 }
248 
249 void symbol__delete(struct symbol *sym)
250 {
251 	free(((void *)sym) - symbol_conf.priv_size);
252 }
253 
254 size_t symbol__fprintf(struct symbol *sym, FILE *fp)
255 {
256 	return fprintf(fp, " %" PRIx64 "-%" PRIx64 " %c %s\n",
257 		       sym->start, sym->end,
258 		       sym->binding == STB_GLOBAL ? 'g' :
259 		       sym->binding == STB_LOCAL  ? 'l' : 'w',
260 		       sym->name);
261 }
262 
263 size_t symbol__fprintf_symname_offs(const struct symbol *sym,
264 				    const struct addr_location *al, FILE *fp)
265 {
266 	unsigned long offset;
267 	size_t length;
268 
269 	if (sym && sym->name) {
270 		length = fprintf(fp, "%s", sym->name);
271 		if (al) {
272 			if (al->addr < sym->end)
273 				offset = al->addr - sym->start;
274 			else
275 				offset = al->addr - al->map->start - sym->start;
276 			length += fprintf(fp, "+0x%lx", offset);
277 		}
278 		return length;
279 	} else
280 		return fprintf(fp, "[unknown]");
281 }
282 
283 size_t symbol__fprintf_symname(const struct symbol *sym, FILE *fp)
284 {
285 	return symbol__fprintf_symname_offs(sym, NULL, fp);
286 }
287 
288 void symbols__delete(struct rb_root *symbols)
289 {
290 	struct symbol *pos;
291 	struct rb_node *next = rb_first(symbols);
292 
293 	while (next) {
294 		pos = rb_entry(next, struct symbol, rb_node);
295 		next = rb_next(&pos->rb_node);
296 		rb_erase(&pos->rb_node, symbols);
297 		symbol__delete(pos);
298 	}
299 }
300 
301 void symbols__insert(struct rb_root *symbols, struct symbol *sym)
302 {
303 	struct rb_node **p = &symbols->rb_node;
304 	struct rb_node *parent = NULL;
305 	const u64 ip = sym->start;
306 	struct symbol *s;
307 
308 	while (*p != NULL) {
309 		parent = *p;
310 		s = rb_entry(parent, struct symbol, rb_node);
311 		if (ip < s->start)
312 			p = &(*p)->rb_left;
313 		else
314 			p = &(*p)->rb_right;
315 	}
316 	rb_link_node(&sym->rb_node, parent, p);
317 	rb_insert_color(&sym->rb_node, symbols);
318 }
319 
320 static struct symbol *symbols__find(struct rb_root *symbols, u64 ip)
321 {
322 	struct rb_node *n;
323 
324 	if (symbols == NULL)
325 		return NULL;
326 
327 	n = symbols->rb_node;
328 
329 	while (n) {
330 		struct symbol *s = rb_entry(n, struct symbol, rb_node);
331 
332 		if (ip < s->start)
333 			n = n->rb_left;
334 		else if (ip >= s->end)
335 			n = n->rb_right;
336 		else
337 			return s;
338 	}
339 
340 	return NULL;
341 }
342 
343 static struct symbol *symbols__first(struct rb_root *symbols)
344 {
345 	struct rb_node *n = rb_first(symbols);
346 
347 	if (n)
348 		return rb_entry(n, struct symbol, rb_node);
349 
350 	return NULL;
351 }
352 
353 static struct symbol *symbols__next(struct symbol *sym)
354 {
355 	struct rb_node *n = rb_next(&sym->rb_node);
356 
357 	if (n)
358 		return rb_entry(n, struct symbol, rb_node);
359 
360 	return NULL;
361 }
362 
363 struct symbol_name_rb_node {
364 	struct rb_node	rb_node;
365 	struct symbol	sym;
366 };
367 
368 static void symbols__insert_by_name(struct rb_root *symbols, struct symbol *sym)
369 {
370 	struct rb_node **p = &symbols->rb_node;
371 	struct rb_node *parent = NULL;
372 	struct symbol_name_rb_node *symn, *s;
373 
374 	symn = container_of(sym, struct symbol_name_rb_node, sym);
375 
376 	while (*p != NULL) {
377 		parent = *p;
378 		s = rb_entry(parent, struct symbol_name_rb_node, rb_node);
379 		if (strcmp(sym->name, s->sym.name) < 0)
380 			p = &(*p)->rb_left;
381 		else
382 			p = &(*p)->rb_right;
383 	}
384 	rb_link_node(&symn->rb_node, parent, p);
385 	rb_insert_color(&symn->rb_node, symbols);
386 }
387 
388 static void symbols__sort_by_name(struct rb_root *symbols,
389 				  struct rb_root *source)
390 {
391 	struct rb_node *nd;
392 
393 	for (nd = rb_first(source); nd; nd = rb_next(nd)) {
394 		struct symbol *pos = rb_entry(nd, struct symbol, rb_node);
395 		symbols__insert_by_name(symbols, pos);
396 	}
397 }
398 
399 static struct symbol *symbols__find_by_name(struct rb_root *symbols,
400 					    const char *name)
401 {
402 	struct rb_node *n;
403 	struct symbol_name_rb_node *s;
404 
405 	if (symbols == NULL)
406 		return NULL;
407 
408 	n = symbols->rb_node;
409 
410 	while (n) {
411 		int cmp;
412 
413 		s = rb_entry(n, struct symbol_name_rb_node, rb_node);
414 		cmp = arch__compare_symbol_names(name, s->sym.name);
415 
416 		if (cmp < 0)
417 			n = n->rb_left;
418 		else if (cmp > 0)
419 			n = n->rb_right;
420 		else
421 			break;
422 	}
423 
424 	if (n == NULL)
425 		return NULL;
426 
427 	/* return first symbol that has same name (if any) */
428 	for (n = rb_prev(n); n; n = rb_prev(n)) {
429 		struct symbol_name_rb_node *tmp;
430 
431 		tmp = rb_entry(n, struct symbol_name_rb_node, rb_node);
432 		if (arch__compare_symbol_names(tmp->sym.name, s->sym.name))
433 			break;
434 
435 		s = tmp;
436 	}
437 
438 	return &s->sym;
439 }
440 
441 struct symbol *dso__find_symbol(struct dso *dso,
442 				enum map_type type, u64 addr)
443 {
444 	return symbols__find(&dso->symbols[type], addr);
445 }
446 
447 struct symbol *dso__first_symbol(struct dso *dso, enum map_type type)
448 {
449 	return symbols__first(&dso->symbols[type]);
450 }
451 
452 struct symbol *dso__next_symbol(struct symbol *sym)
453 {
454 	return symbols__next(sym);
455 }
456 
457 struct symbol *symbol__next_by_name(struct symbol *sym)
458 {
459 	struct symbol_name_rb_node *s = container_of(sym, struct symbol_name_rb_node, sym);
460 	struct rb_node *n = rb_next(&s->rb_node);
461 
462 	return n ? &rb_entry(n, struct symbol_name_rb_node, rb_node)->sym : NULL;
463 }
464 
465  /*
466   * Teturns first symbol that matched with @name.
467   */
468 struct symbol *dso__find_symbol_by_name(struct dso *dso, enum map_type type,
469 					const char *name)
470 {
471 	return symbols__find_by_name(&dso->symbol_names[type], name);
472 }
473 
474 void dso__sort_by_name(struct dso *dso, enum map_type type)
475 {
476 	dso__set_sorted_by_name(dso, type);
477 	return symbols__sort_by_name(&dso->symbol_names[type],
478 				     &dso->symbols[type]);
479 }
480 
481 size_t dso__fprintf_symbols_by_name(struct dso *dso,
482 				    enum map_type type, FILE *fp)
483 {
484 	size_t ret = 0;
485 	struct rb_node *nd;
486 	struct symbol_name_rb_node *pos;
487 
488 	for (nd = rb_first(&dso->symbol_names[type]); nd; nd = rb_next(nd)) {
489 		pos = rb_entry(nd, struct symbol_name_rb_node, rb_node);
490 		fprintf(fp, "%s\n", pos->sym.name);
491 	}
492 
493 	return ret;
494 }
495 
496 int modules__parse(const char *filename, void *arg,
497 		   int (*process_module)(void *arg, const char *name,
498 					 u64 start))
499 {
500 	char *line = NULL;
501 	size_t n;
502 	FILE *file;
503 	int err = 0;
504 
505 	file = fopen(filename, "r");
506 	if (file == NULL)
507 		return -1;
508 
509 	while (1) {
510 		char name[PATH_MAX];
511 		u64 start;
512 		char *sep;
513 		ssize_t line_len;
514 
515 		line_len = getline(&line, &n, file);
516 		if (line_len < 0) {
517 			if (feof(file))
518 				break;
519 			err = -1;
520 			goto out;
521 		}
522 
523 		if (!line) {
524 			err = -1;
525 			goto out;
526 		}
527 
528 		line[--line_len] = '\0'; /* \n */
529 
530 		sep = strrchr(line, 'x');
531 		if (sep == NULL)
532 			continue;
533 
534 		hex2u64(sep + 1, &start);
535 
536 		sep = strchr(line, ' ');
537 		if (sep == NULL)
538 			continue;
539 
540 		*sep = '\0';
541 
542 		scnprintf(name, sizeof(name), "[%s]", line);
543 
544 		err = process_module(arg, name, start);
545 		if (err)
546 			break;
547 	}
548 out:
549 	free(line);
550 	fclose(file);
551 	return err;
552 }
553 
554 struct process_kallsyms_args {
555 	struct map *map;
556 	struct dso *dso;
557 };
558 
559 /*
560  * These are symbols in the kernel image, so make sure that
561  * sym is from a kernel DSO.
562  */
563 bool symbol__is_idle(struct symbol *sym)
564 {
565 	const char * const idle_symbols[] = {
566 		"cpu_idle",
567 		"cpu_startup_entry",
568 		"intel_idle",
569 		"default_idle",
570 		"native_safe_halt",
571 		"enter_idle",
572 		"exit_idle",
573 		"mwait_idle",
574 		"mwait_idle_with_hints",
575 		"poll_idle",
576 		"ppc64_runlatch_off",
577 		"pseries_dedicated_idle_sleep",
578 		NULL
579 	};
580 
581 	int i;
582 
583 	if (!sym)
584 		return false;
585 
586 	for (i = 0; idle_symbols[i]; i++) {
587 		if (!strcmp(idle_symbols[i], sym->name))
588 			return true;
589 	}
590 
591 	return false;
592 }
593 
594 static int map__process_kallsym_symbol(void *arg, const char *name,
595 				       char type, u64 start)
596 {
597 	struct symbol *sym;
598 	struct process_kallsyms_args *a = arg;
599 	struct rb_root *root = &a->dso->symbols[a->map->type];
600 
601 	if (!symbol_type__is_a(type, a->map->type))
602 		return 0;
603 
604 	/*
605 	 * module symbols are not sorted so we add all
606 	 * symbols, setting length to 0, and rely on
607 	 * symbols__fixup_end() to fix it up.
608 	 */
609 	sym = symbol__new(start, 0, kallsyms2elf_type(type), name);
610 	if (sym == NULL)
611 		return -ENOMEM;
612 	/*
613 	 * We will pass the symbols to the filter later, in
614 	 * map__split_kallsyms, when we have split the maps per module
615 	 */
616 	symbols__insert(root, sym);
617 
618 	return 0;
619 }
620 
621 /*
622  * Loads the function entries in /proc/kallsyms into kernel_map->dso,
623  * so that we can in the next step set the symbol ->end address and then
624  * call kernel_maps__split_kallsyms.
625  */
626 static int dso__load_all_kallsyms(struct dso *dso, const char *filename,
627 				  struct map *map)
628 {
629 	struct process_kallsyms_args args = { .map = map, .dso = dso, };
630 	return kallsyms__parse(filename, &args, map__process_kallsym_symbol);
631 }
632 
633 static int dso__split_kallsyms_for_kcore(struct dso *dso, struct map *map,
634 					 symbol_filter_t filter)
635 {
636 	struct map_groups *kmaps = map__kmaps(map);
637 	struct map *curr_map;
638 	struct symbol *pos;
639 	int count = 0, moved = 0;
640 	struct rb_root *root = &dso->symbols[map->type];
641 	struct rb_node *next = rb_first(root);
642 
643 	if (!kmaps)
644 		return -1;
645 
646 	while (next) {
647 		char *module;
648 
649 		pos = rb_entry(next, struct symbol, rb_node);
650 		next = rb_next(&pos->rb_node);
651 
652 		module = strchr(pos->name, '\t');
653 		if (module)
654 			*module = '\0';
655 
656 		curr_map = map_groups__find(kmaps, map->type, pos->start);
657 
658 		if (!curr_map || (filter && filter(curr_map, pos))) {
659 			rb_erase(&pos->rb_node, root);
660 			symbol__delete(pos);
661 		} else {
662 			pos->start -= curr_map->start - curr_map->pgoff;
663 			if (pos->end)
664 				pos->end -= curr_map->start - curr_map->pgoff;
665 			if (curr_map != map) {
666 				rb_erase(&pos->rb_node, root);
667 				symbols__insert(
668 					&curr_map->dso->symbols[curr_map->type],
669 					pos);
670 				++moved;
671 			} else {
672 				++count;
673 			}
674 		}
675 	}
676 
677 	/* Symbols have been adjusted */
678 	dso->adjust_symbols = 1;
679 
680 	return count + moved;
681 }
682 
683 /*
684  * Split the symbols into maps, making sure there are no overlaps, i.e. the
685  * kernel range is broken in several maps, named [kernel].N, as we don't have
686  * the original ELF section names vmlinux have.
687  */
688 static int dso__split_kallsyms(struct dso *dso, struct map *map, u64 delta,
689 			       symbol_filter_t filter)
690 {
691 	struct map_groups *kmaps = map__kmaps(map);
692 	struct machine *machine;
693 	struct map *curr_map = map;
694 	struct symbol *pos;
695 	int count = 0, moved = 0;
696 	struct rb_root *root = &dso->symbols[map->type];
697 	struct rb_node *next = rb_first(root);
698 	int kernel_range = 0;
699 
700 	if (!kmaps)
701 		return -1;
702 
703 	machine = kmaps->machine;
704 
705 	while (next) {
706 		char *module;
707 
708 		pos = rb_entry(next, struct symbol, rb_node);
709 		next = rb_next(&pos->rb_node);
710 
711 		module = strchr(pos->name, '\t');
712 		if (module) {
713 			if (!symbol_conf.use_modules)
714 				goto discard_symbol;
715 
716 			*module++ = '\0';
717 
718 			if (strcmp(curr_map->dso->short_name, module)) {
719 				if (curr_map != map &&
720 				    dso->kernel == DSO_TYPE_GUEST_KERNEL &&
721 				    machine__is_default_guest(machine)) {
722 					/*
723 					 * We assume all symbols of a module are
724 					 * continuous in * kallsyms, so curr_map
725 					 * points to a module and all its
726 					 * symbols are in its kmap. Mark it as
727 					 * loaded.
728 					 */
729 					dso__set_loaded(curr_map->dso,
730 							curr_map->type);
731 				}
732 
733 				curr_map = map_groups__find_by_name(kmaps,
734 							map->type, module);
735 				if (curr_map == NULL) {
736 					pr_debug("%s/proc/{kallsyms,modules} "
737 					         "inconsistency while looking "
738 						 "for \"%s\" module!\n",
739 						 machine->root_dir, module);
740 					curr_map = map;
741 					goto discard_symbol;
742 				}
743 
744 				if (curr_map->dso->loaded &&
745 				    !machine__is_default_guest(machine))
746 					goto discard_symbol;
747 			}
748 			/*
749 			 * So that we look just like we get from .ko files,
750 			 * i.e. not prelinked, relative to map->start.
751 			 */
752 			pos->start = curr_map->map_ip(curr_map, pos->start);
753 			pos->end   = curr_map->map_ip(curr_map, pos->end);
754 		} else if (curr_map != map) {
755 			char dso_name[PATH_MAX];
756 			struct dso *ndso;
757 
758 			if (delta) {
759 				/* Kernel was relocated at boot time */
760 				pos->start -= delta;
761 				pos->end -= delta;
762 			}
763 
764 			if (count == 0) {
765 				curr_map = map;
766 				goto filter_symbol;
767 			}
768 
769 			if (dso->kernel == DSO_TYPE_GUEST_KERNEL)
770 				snprintf(dso_name, sizeof(dso_name),
771 					"[guest.kernel].%d",
772 					kernel_range++);
773 			else
774 				snprintf(dso_name, sizeof(dso_name),
775 					"[kernel].%d",
776 					kernel_range++);
777 
778 			ndso = dso__new(dso_name);
779 			if (ndso == NULL)
780 				return -1;
781 
782 			ndso->kernel = dso->kernel;
783 
784 			curr_map = map__new2(pos->start, ndso, map->type);
785 			if (curr_map == NULL) {
786 				dso__delete(ndso);
787 				return -1;
788 			}
789 
790 			curr_map->map_ip = curr_map->unmap_ip = identity__map_ip;
791 			map_groups__insert(kmaps, curr_map);
792 			++kernel_range;
793 		} else if (delta) {
794 			/* Kernel was relocated at boot time */
795 			pos->start -= delta;
796 			pos->end -= delta;
797 		}
798 filter_symbol:
799 		if (filter && filter(curr_map, pos)) {
800 discard_symbol:		rb_erase(&pos->rb_node, root);
801 			symbol__delete(pos);
802 		} else {
803 			if (curr_map != map) {
804 				rb_erase(&pos->rb_node, root);
805 				symbols__insert(&curr_map->dso->symbols[curr_map->type], pos);
806 				++moved;
807 			} else
808 				++count;
809 		}
810 	}
811 
812 	if (curr_map != map &&
813 	    dso->kernel == DSO_TYPE_GUEST_KERNEL &&
814 	    machine__is_default_guest(kmaps->machine)) {
815 		dso__set_loaded(curr_map->dso, curr_map->type);
816 	}
817 
818 	return count + moved;
819 }
820 
821 bool symbol__restricted_filename(const char *filename,
822 				 const char *restricted_filename)
823 {
824 	bool restricted = false;
825 
826 	if (symbol_conf.kptr_restrict) {
827 		char *r = realpath(filename, NULL);
828 
829 		if (r != NULL) {
830 			restricted = strcmp(r, restricted_filename) == 0;
831 			free(r);
832 			return restricted;
833 		}
834 	}
835 
836 	return restricted;
837 }
838 
839 struct module_info {
840 	struct rb_node rb_node;
841 	char *name;
842 	u64 start;
843 };
844 
845 static void add_module(struct module_info *mi, struct rb_root *modules)
846 {
847 	struct rb_node **p = &modules->rb_node;
848 	struct rb_node *parent = NULL;
849 	struct module_info *m;
850 
851 	while (*p != NULL) {
852 		parent = *p;
853 		m = rb_entry(parent, struct module_info, rb_node);
854 		if (strcmp(mi->name, m->name) < 0)
855 			p = &(*p)->rb_left;
856 		else
857 			p = &(*p)->rb_right;
858 	}
859 	rb_link_node(&mi->rb_node, parent, p);
860 	rb_insert_color(&mi->rb_node, modules);
861 }
862 
863 static void delete_modules(struct rb_root *modules)
864 {
865 	struct module_info *mi;
866 	struct rb_node *next = rb_first(modules);
867 
868 	while (next) {
869 		mi = rb_entry(next, struct module_info, rb_node);
870 		next = rb_next(&mi->rb_node);
871 		rb_erase(&mi->rb_node, modules);
872 		zfree(&mi->name);
873 		free(mi);
874 	}
875 }
876 
877 static struct module_info *find_module(const char *name,
878 				       struct rb_root *modules)
879 {
880 	struct rb_node *n = modules->rb_node;
881 
882 	while (n) {
883 		struct module_info *m;
884 		int cmp;
885 
886 		m = rb_entry(n, struct module_info, rb_node);
887 		cmp = strcmp(name, m->name);
888 		if (cmp < 0)
889 			n = n->rb_left;
890 		else if (cmp > 0)
891 			n = n->rb_right;
892 		else
893 			return m;
894 	}
895 
896 	return NULL;
897 }
898 
899 static int __read_proc_modules(void *arg, const char *name, u64 start)
900 {
901 	struct rb_root *modules = arg;
902 	struct module_info *mi;
903 
904 	mi = zalloc(sizeof(struct module_info));
905 	if (!mi)
906 		return -ENOMEM;
907 
908 	mi->name = strdup(name);
909 	mi->start = start;
910 
911 	if (!mi->name) {
912 		free(mi);
913 		return -ENOMEM;
914 	}
915 
916 	add_module(mi, modules);
917 
918 	return 0;
919 }
920 
921 static int read_proc_modules(const char *filename, struct rb_root *modules)
922 {
923 	if (symbol__restricted_filename(filename, "/proc/modules"))
924 		return -1;
925 
926 	if (modules__parse(filename, modules, __read_proc_modules)) {
927 		delete_modules(modules);
928 		return -1;
929 	}
930 
931 	return 0;
932 }
933 
934 int compare_proc_modules(const char *from, const char *to)
935 {
936 	struct rb_root from_modules = RB_ROOT;
937 	struct rb_root to_modules = RB_ROOT;
938 	struct rb_node *from_node, *to_node;
939 	struct module_info *from_m, *to_m;
940 	int ret = -1;
941 
942 	if (read_proc_modules(from, &from_modules))
943 		return -1;
944 
945 	if (read_proc_modules(to, &to_modules))
946 		goto out_delete_from;
947 
948 	from_node = rb_first(&from_modules);
949 	to_node = rb_first(&to_modules);
950 	while (from_node) {
951 		if (!to_node)
952 			break;
953 
954 		from_m = rb_entry(from_node, struct module_info, rb_node);
955 		to_m = rb_entry(to_node, struct module_info, rb_node);
956 
957 		if (from_m->start != to_m->start ||
958 		    strcmp(from_m->name, to_m->name))
959 			break;
960 
961 		from_node = rb_next(from_node);
962 		to_node = rb_next(to_node);
963 	}
964 
965 	if (!from_node && !to_node)
966 		ret = 0;
967 
968 	delete_modules(&to_modules);
969 out_delete_from:
970 	delete_modules(&from_modules);
971 
972 	return ret;
973 }
974 
975 static int do_validate_kcore_modules(const char *filename, struct map *map,
976 				  struct map_groups *kmaps)
977 {
978 	struct rb_root modules = RB_ROOT;
979 	struct map *old_map;
980 	int err;
981 
982 	err = read_proc_modules(filename, &modules);
983 	if (err)
984 		return err;
985 
986 	old_map = map_groups__first(kmaps, map->type);
987 	while (old_map) {
988 		struct map *next = map_groups__next(old_map);
989 		struct module_info *mi;
990 
991 		if (old_map == map || old_map->start == map->start) {
992 			/* The kernel map */
993 			old_map = next;
994 			continue;
995 		}
996 
997 		/* Module must be in memory at the same address */
998 		mi = find_module(old_map->dso->short_name, &modules);
999 		if (!mi || mi->start != old_map->start) {
1000 			err = -EINVAL;
1001 			goto out;
1002 		}
1003 
1004 		old_map = next;
1005 	}
1006 out:
1007 	delete_modules(&modules);
1008 	return err;
1009 }
1010 
1011 /*
1012  * If kallsyms is referenced by name then we look for filename in the same
1013  * directory.
1014  */
1015 static bool filename_from_kallsyms_filename(char *filename,
1016 					    const char *base_name,
1017 					    const char *kallsyms_filename)
1018 {
1019 	char *name;
1020 
1021 	strcpy(filename, kallsyms_filename);
1022 	name = strrchr(filename, '/');
1023 	if (!name)
1024 		return false;
1025 
1026 	name += 1;
1027 
1028 	if (!strcmp(name, "kallsyms")) {
1029 		strcpy(name, base_name);
1030 		return true;
1031 	}
1032 
1033 	return false;
1034 }
1035 
1036 static int validate_kcore_modules(const char *kallsyms_filename,
1037 				  struct map *map)
1038 {
1039 	struct map_groups *kmaps = map__kmaps(map);
1040 	char modules_filename[PATH_MAX];
1041 
1042 	if (!kmaps)
1043 		return -EINVAL;
1044 
1045 	if (!filename_from_kallsyms_filename(modules_filename, "modules",
1046 					     kallsyms_filename))
1047 		return -EINVAL;
1048 
1049 	if (do_validate_kcore_modules(modules_filename, map, kmaps))
1050 		return -EINVAL;
1051 
1052 	return 0;
1053 }
1054 
1055 static int validate_kcore_addresses(const char *kallsyms_filename,
1056 				    struct map *map)
1057 {
1058 	struct kmap *kmap = map__kmap(map);
1059 
1060 	if (!kmap)
1061 		return -EINVAL;
1062 
1063 	if (kmap->ref_reloc_sym && kmap->ref_reloc_sym->name) {
1064 		u64 start;
1065 
1066 		start = kallsyms__get_function_start(kallsyms_filename,
1067 						     kmap->ref_reloc_sym->name);
1068 		if (start != kmap->ref_reloc_sym->addr)
1069 			return -EINVAL;
1070 	}
1071 
1072 	return validate_kcore_modules(kallsyms_filename, map);
1073 }
1074 
1075 struct kcore_mapfn_data {
1076 	struct dso *dso;
1077 	enum map_type type;
1078 	struct list_head maps;
1079 };
1080 
1081 static int kcore_mapfn(u64 start, u64 len, u64 pgoff, void *data)
1082 {
1083 	struct kcore_mapfn_data *md = data;
1084 	struct map *map;
1085 
1086 	map = map__new2(start, md->dso, md->type);
1087 	if (map == NULL)
1088 		return -ENOMEM;
1089 
1090 	map->end = map->start + len;
1091 	map->pgoff = pgoff;
1092 
1093 	list_add(&map->node, &md->maps);
1094 
1095 	return 0;
1096 }
1097 
1098 static int dso__load_kcore(struct dso *dso, struct map *map,
1099 			   const char *kallsyms_filename)
1100 {
1101 	struct map_groups *kmaps = map__kmaps(map);
1102 	struct machine *machine;
1103 	struct kcore_mapfn_data md;
1104 	struct map *old_map, *new_map, *replacement_map = NULL;
1105 	bool is_64_bit;
1106 	int err, fd;
1107 	char kcore_filename[PATH_MAX];
1108 	struct symbol *sym;
1109 
1110 	if (!kmaps)
1111 		return -EINVAL;
1112 
1113 	machine = kmaps->machine;
1114 
1115 	/* This function requires that the map is the kernel map */
1116 	if (map != machine->vmlinux_maps[map->type])
1117 		return -EINVAL;
1118 
1119 	if (!filename_from_kallsyms_filename(kcore_filename, "kcore",
1120 					     kallsyms_filename))
1121 		return -EINVAL;
1122 
1123 	/* Modules and kernel must be present at their original addresses */
1124 	if (validate_kcore_addresses(kallsyms_filename, map))
1125 		return -EINVAL;
1126 
1127 	md.dso = dso;
1128 	md.type = map->type;
1129 	INIT_LIST_HEAD(&md.maps);
1130 
1131 	fd = open(kcore_filename, O_RDONLY);
1132 	if (fd < 0)
1133 		return -EINVAL;
1134 
1135 	/* Read new maps into temporary lists */
1136 	err = file__read_maps(fd, md.type == MAP__FUNCTION, kcore_mapfn, &md,
1137 			      &is_64_bit);
1138 	if (err)
1139 		goto out_err;
1140 	dso->is_64_bit = is_64_bit;
1141 
1142 	if (list_empty(&md.maps)) {
1143 		err = -EINVAL;
1144 		goto out_err;
1145 	}
1146 
1147 	/* Remove old maps */
1148 	old_map = map_groups__first(kmaps, map->type);
1149 	while (old_map) {
1150 		struct map *next = map_groups__next(old_map);
1151 
1152 		if (old_map != map)
1153 			map_groups__remove(kmaps, old_map);
1154 		old_map = next;
1155 	}
1156 
1157 	/* Find the kernel map using the first symbol */
1158 	sym = dso__first_symbol(dso, map->type);
1159 	list_for_each_entry(new_map, &md.maps, node) {
1160 		if (sym && sym->start >= new_map->start &&
1161 		    sym->start < new_map->end) {
1162 			replacement_map = new_map;
1163 			break;
1164 		}
1165 	}
1166 
1167 	if (!replacement_map)
1168 		replacement_map = list_entry(md.maps.next, struct map, node);
1169 
1170 	/* Add new maps */
1171 	while (!list_empty(&md.maps)) {
1172 		new_map = list_entry(md.maps.next, struct map, node);
1173 		list_del(&new_map->node);
1174 		if (new_map == replacement_map) {
1175 			map->start	= new_map->start;
1176 			map->end	= new_map->end;
1177 			map->pgoff	= new_map->pgoff;
1178 			map->map_ip	= new_map->map_ip;
1179 			map->unmap_ip	= new_map->unmap_ip;
1180 			map__delete(new_map);
1181 			/* Ensure maps are correctly ordered */
1182 			map_groups__remove(kmaps, map);
1183 			map_groups__insert(kmaps, map);
1184 		} else {
1185 			map_groups__insert(kmaps, new_map);
1186 		}
1187 	}
1188 
1189 	/*
1190 	 * Set the data type and long name so that kcore can be read via
1191 	 * dso__data_read_addr().
1192 	 */
1193 	if (dso->kernel == DSO_TYPE_GUEST_KERNEL)
1194 		dso->binary_type = DSO_BINARY_TYPE__GUEST_KCORE;
1195 	else
1196 		dso->binary_type = DSO_BINARY_TYPE__KCORE;
1197 	dso__set_long_name(dso, strdup(kcore_filename), true);
1198 
1199 	close(fd);
1200 
1201 	if (map->type == MAP__FUNCTION)
1202 		pr_debug("Using %s for kernel object code\n", kcore_filename);
1203 	else
1204 		pr_debug("Using %s for kernel data\n", kcore_filename);
1205 
1206 	return 0;
1207 
1208 out_err:
1209 	while (!list_empty(&md.maps)) {
1210 		map = list_entry(md.maps.next, struct map, node);
1211 		list_del(&map->node);
1212 		map__delete(map);
1213 	}
1214 	close(fd);
1215 	return -EINVAL;
1216 }
1217 
1218 /*
1219  * If the kernel is relocated at boot time, kallsyms won't match.  Compute the
1220  * delta based on the relocation reference symbol.
1221  */
1222 static int kallsyms__delta(struct map *map, const char *filename, u64 *delta)
1223 {
1224 	struct kmap *kmap = map__kmap(map);
1225 	u64 addr;
1226 
1227 	if (!kmap)
1228 		return -1;
1229 
1230 	if (!kmap->ref_reloc_sym || !kmap->ref_reloc_sym->name)
1231 		return 0;
1232 
1233 	addr = kallsyms__get_function_start(filename,
1234 					    kmap->ref_reloc_sym->name);
1235 	if (!addr)
1236 		return -1;
1237 
1238 	*delta = addr - kmap->ref_reloc_sym->addr;
1239 	return 0;
1240 }
1241 
1242 int dso__load_kallsyms(struct dso *dso, const char *filename,
1243 		       struct map *map, symbol_filter_t filter)
1244 {
1245 	u64 delta = 0;
1246 
1247 	if (symbol__restricted_filename(filename, "/proc/kallsyms"))
1248 		return -1;
1249 
1250 	if (dso__load_all_kallsyms(dso, filename, map) < 0)
1251 		return -1;
1252 
1253 	if (kallsyms__delta(map, filename, &delta))
1254 		return -1;
1255 
1256 	symbols__fixup_duplicate(&dso->symbols[map->type]);
1257 	symbols__fixup_end(&dso->symbols[map->type]);
1258 
1259 	if (dso->kernel == DSO_TYPE_GUEST_KERNEL)
1260 		dso->symtab_type = DSO_BINARY_TYPE__GUEST_KALLSYMS;
1261 	else
1262 		dso->symtab_type = DSO_BINARY_TYPE__KALLSYMS;
1263 
1264 	if (!dso__load_kcore(dso, map, filename))
1265 		return dso__split_kallsyms_for_kcore(dso, map, filter);
1266 	else
1267 		return dso__split_kallsyms(dso, map, delta, filter);
1268 }
1269 
1270 static int dso__load_perf_map(struct dso *dso, struct map *map,
1271 			      symbol_filter_t filter)
1272 {
1273 	char *line = NULL;
1274 	size_t n;
1275 	FILE *file;
1276 	int nr_syms = 0;
1277 
1278 	file = fopen(dso->long_name, "r");
1279 	if (file == NULL)
1280 		goto out_failure;
1281 
1282 	while (!feof(file)) {
1283 		u64 start, size;
1284 		struct symbol *sym;
1285 		int line_len, len;
1286 
1287 		line_len = getline(&line, &n, file);
1288 		if (line_len < 0)
1289 			break;
1290 
1291 		if (!line)
1292 			goto out_failure;
1293 
1294 		line[--line_len] = '\0'; /* \n */
1295 
1296 		len = hex2u64(line, &start);
1297 
1298 		len++;
1299 		if (len + 2 >= line_len)
1300 			continue;
1301 
1302 		len += hex2u64(line + len, &size);
1303 
1304 		len++;
1305 		if (len + 2 >= line_len)
1306 			continue;
1307 
1308 		sym = symbol__new(start, size, STB_GLOBAL, line + len);
1309 
1310 		if (sym == NULL)
1311 			goto out_delete_line;
1312 
1313 		if (filter && filter(map, sym))
1314 			symbol__delete(sym);
1315 		else {
1316 			symbols__insert(&dso->symbols[map->type], sym);
1317 			nr_syms++;
1318 		}
1319 	}
1320 
1321 	free(line);
1322 	fclose(file);
1323 
1324 	return nr_syms;
1325 
1326 out_delete_line:
1327 	free(line);
1328 out_failure:
1329 	return -1;
1330 }
1331 
1332 static bool dso__is_compatible_symtab_type(struct dso *dso, bool kmod,
1333 					   enum dso_binary_type type)
1334 {
1335 	switch (type) {
1336 	case DSO_BINARY_TYPE__JAVA_JIT:
1337 	case DSO_BINARY_TYPE__DEBUGLINK:
1338 	case DSO_BINARY_TYPE__SYSTEM_PATH_DSO:
1339 	case DSO_BINARY_TYPE__FEDORA_DEBUGINFO:
1340 	case DSO_BINARY_TYPE__UBUNTU_DEBUGINFO:
1341 	case DSO_BINARY_TYPE__BUILDID_DEBUGINFO:
1342 	case DSO_BINARY_TYPE__OPENEMBEDDED_DEBUGINFO:
1343 		return !kmod && dso->kernel == DSO_TYPE_USER;
1344 
1345 	case DSO_BINARY_TYPE__KALLSYMS:
1346 	case DSO_BINARY_TYPE__VMLINUX:
1347 	case DSO_BINARY_TYPE__KCORE:
1348 		return dso->kernel == DSO_TYPE_KERNEL;
1349 
1350 	case DSO_BINARY_TYPE__GUEST_KALLSYMS:
1351 	case DSO_BINARY_TYPE__GUEST_VMLINUX:
1352 	case DSO_BINARY_TYPE__GUEST_KCORE:
1353 		return dso->kernel == DSO_TYPE_GUEST_KERNEL;
1354 
1355 	case DSO_BINARY_TYPE__GUEST_KMODULE:
1356 	case DSO_BINARY_TYPE__GUEST_KMODULE_COMP:
1357 	case DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE:
1358 	case DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE_COMP:
1359 		/*
1360 		 * kernel modules know their symtab type - it's set when
1361 		 * creating a module dso in machine__new_module().
1362 		 */
1363 		return kmod && dso->symtab_type == type;
1364 
1365 	case DSO_BINARY_TYPE__BUILD_ID_CACHE:
1366 		return true;
1367 
1368 	case DSO_BINARY_TYPE__NOT_FOUND:
1369 	default:
1370 		return false;
1371 	}
1372 }
1373 
1374 int dso__load(struct dso *dso, struct map *map, symbol_filter_t filter)
1375 {
1376 	char *name;
1377 	int ret = -1;
1378 	u_int i;
1379 	struct machine *machine;
1380 	char *root_dir = (char *) "";
1381 	int ss_pos = 0;
1382 	struct symsrc ss_[2];
1383 	struct symsrc *syms_ss = NULL, *runtime_ss = NULL;
1384 	bool kmod;
1385 
1386 	dso__set_loaded(dso, map->type);
1387 
1388 	if (dso->kernel == DSO_TYPE_KERNEL)
1389 		return dso__load_kernel_sym(dso, map, filter);
1390 	else if (dso->kernel == DSO_TYPE_GUEST_KERNEL)
1391 		return dso__load_guest_kernel_sym(dso, map, filter);
1392 
1393 	if (map->groups && map->groups->machine)
1394 		machine = map->groups->machine;
1395 	else
1396 		machine = NULL;
1397 
1398 	dso->adjust_symbols = 0;
1399 
1400 	if (strncmp(dso->name, "/tmp/perf-", 10) == 0) {
1401 		struct stat st;
1402 
1403 		if (lstat(dso->name, &st) < 0)
1404 			return -1;
1405 
1406 		if (st.st_uid && (st.st_uid != geteuid())) {
1407 			pr_warning("File %s not owned by current user or root, "
1408 				"ignoring it.\n", dso->name);
1409 			return -1;
1410 		}
1411 
1412 		ret = dso__load_perf_map(dso, map, filter);
1413 		dso->symtab_type = ret > 0 ? DSO_BINARY_TYPE__JAVA_JIT :
1414 					     DSO_BINARY_TYPE__NOT_FOUND;
1415 		return ret;
1416 	}
1417 
1418 	if (machine)
1419 		root_dir = machine->root_dir;
1420 
1421 	name = malloc(PATH_MAX);
1422 	if (!name)
1423 		return -1;
1424 
1425 	kmod = dso->symtab_type == DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE ||
1426 		dso->symtab_type == DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE_COMP ||
1427 		dso->symtab_type == DSO_BINARY_TYPE__GUEST_KMODULE ||
1428 		dso->symtab_type == DSO_BINARY_TYPE__GUEST_KMODULE_COMP;
1429 
1430 	/*
1431 	 * Iterate over candidate debug images.
1432 	 * Keep track of "interesting" ones (those which have a symtab, dynsym,
1433 	 * and/or opd section) for processing.
1434 	 */
1435 	for (i = 0; i < DSO_BINARY_TYPE__SYMTAB_CNT; i++) {
1436 		struct symsrc *ss = &ss_[ss_pos];
1437 		bool next_slot = false;
1438 
1439 		enum dso_binary_type symtab_type = binary_type_symtab[i];
1440 
1441 		if (!dso__is_compatible_symtab_type(dso, kmod, symtab_type))
1442 			continue;
1443 
1444 		if (dso__read_binary_type_filename(dso, symtab_type,
1445 						   root_dir, name, PATH_MAX))
1446 			continue;
1447 
1448 		/* Name is now the name of the next image to try */
1449 		if (symsrc__init(ss, dso, name, symtab_type) < 0)
1450 			continue;
1451 
1452 		if (!syms_ss && symsrc__has_symtab(ss)) {
1453 			syms_ss = ss;
1454 			next_slot = true;
1455 			if (!dso->symsrc_filename)
1456 				dso->symsrc_filename = strdup(name);
1457 		}
1458 
1459 		if (!runtime_ss && symsrc__possibly_runtime(ss)) {
1460 			runtime_ss = ss;
1461 			next_slot = true;
1462 		}
1463 
1464 		if (next_slot) {
1465 			ss_pos++;
1466 
1467 			if (syms_ss && runtime_ss)
1468 				break;
1469 		} else {
1470 			symsrc__destroy(ss);
1471 		}
1472 
1473 	}
1474 
1475 	if (!runtime_ss && !syms_ss)
1476 		goto out_free;
1477 
1478 	if (runtime_ss && !syms_ss) {
1479 		syms_ss = runtime_ss;
1480 	}
1481 
1482 	/* We'll have to hope for the best */
1483 	if (!runtime_ss && syms_ss)
1484 		runtime_ss = syms_ss;
1485 
1486 	if (syms_ss)
1487 		ret = dso__load_sym(dso, map, syms_ss, runtime_ss, filter, kmod);
1488 	else
1489 		ret = -1;
1490 
1491 	if (ret > 0) {
1492 		int nr_plt;
1493 
1494 		nr_plt = dso__synthesize_plt_symbols(dso, runtime_ss, map, filter);
1495 		if (nr_plt > 0)
1496 			ret += nr_plt;
1497 	}
1498 
1499 	for (; ss_pos > 0; ss_pos--)
1500 		symsrc__destroy(&ss_[ss_pos - 1]);
1501 out_free:
1502 	free(name);
1503 	if (ret < 0 && strstr(dso->name, " (deleted)") != NULL)
1504 		return 0;
1505 	return ret;
1506 }
1507 
1508 struct map *map_groups__find_by_name(struct map_groups *mg,
1509 				     enum map_type type, const char *name)
1510 {
1511 	struct rb_node *nd;
1512 
1513 	for (nd = rb_first(&mg->maps[type]); nd; nd = rb_next(nd)) {
1514 		struct map *map = rb_entry(nd, struct map, rb_node);
1515 
1516 		if (map->dso && strcmp(map->dso->short_name, name) == 0)
1517 			return map;
1518 	}
1519 
1520 	return NULL;
1521 }
1522 
1523 int dso__load_vmlinux(struct dso *dso, struct map *map,
1524 		      const char *vmlinux, bool vmlinux_allocated,
1525 		      symbol_filter_t filter)
1526 {
1527 	int err = -1;
1528 	struct symsrc ss;
1529 	char symfs_vmlinux[PATH_MAX];
1530 	enum dso_binary_type symtab_type;
1531 
1532 	if (vmlinux[0] == '/')
1533 		snprintf(symfs_vmlinux, sizeof(symfs_vmlinux), "%s", vmlinux);
1534 	else
1535 		symbol__join_symfs(symfs_vmlinux, vmlinux);
1536 
1537 	if (dso->kernel == DSO_TYPE_GUEST_KERNEL)
1538 		symtab_type = DSO_BINARY_TYPE__GUEST_VMLINUX;
1539 	else
1540 		symtab_type = DSO_BINARY_TYPE__VMLINUX;
1541 
1542 	if (symsrc__init(&ss, dso, symfs_vmlinux, symtab_type))
1543 		return -1;
1544 
1545 	err = dso__load_sym(dso, map, &ss, &ss, filter, 0);
1546 	symsrc__destroy(&ss);
1547 
1548 	if (err > 0) {
1549 		if (dso->kernel == DSO_TYPE_GUEST_KERNEL)
1550 			dso->binary_type = DSO_BINARY_TYPE__GUEST_VMLINUX;
1551 		else
1552 			dso->binary_type = DSO_BINARY_TYPE__VMLINUX;
1553 		dso__set_long_name(dso, vmlinux, vmlinux_allocated);
1554 		dso__set_loaded(dso, map->type);
1555 		pr_debug("Using %s for symbols\n", symfs_vmlinux);
1556 	}
1557 
1558 	return err;
1559 }
1560 
1561 int dso__load_vmlinux_path(struct dso *dso, struct map *map,
1562 			   symbol_filter_t filter)
1563 {
1564 	int i, err = 0;
1565 	char *filename = NULL;
1566 
1567 	if (!symbol_conf.ignore_vmlinux_buildid)
1568 		filename = dso__build_id_filename(dso, NULL, 0);
1569 	if (filename != NULL) {
1570 		err = dso__load_vmlinux(dso, map, filename, true, filter);
1571 		if (err > 0)
1572 			goto out;
1573 		free(filename);
1574 	}
1575 
1576 	pr_debug("Looking at the vmlinux_path (%d entries long)\n",
1577 		 vmlinux_path__nr_entries + 1);
1578 
1579 	for (i = 0; i < vmlinux_path__nr_entries; ++i) {
1580 		err = dso__load_vmlinux(dso, map, vmlinux_path[i], false, filter);
1581 		if (err > 0)
1582 			break;
1583 	}
1584 out:
1585 	return err;
1586 }
1587 
1588 static int find_matching_kcore(struct map *map, char *dir, size_t dir_sz)
1589 {
1590 	char kallsyms_filename[PATH_MAX];
1591 	struct dirent *dent;
1592 	int ret = -1;
1593 	DIR *d;
1594 
1595 	d = opendir(dir);
1596 	if (!d)
1597 		return -1;
1598 
1599 	while (1) {
1600 		dent = readdir(d);
1601 		if (!dent)
1602 			break;
1603 		if (dent->d_type != DT_DIR)
1604 			continue;
1605 		scnprintf(kallsyms_filename, sizeof(kallsyms_filename),
1606 			  "%s/%s/kallsyms", dir, dent->d_name);
1607 		if (!validate_kcore_addresses(kallsyms_filename, map)) {
1608 			strlcpy(dir, kallsyms_filename, dir_sz);
1609 			ret = 0;
1610 			break;
1611 		}
1612 	}
1613 
1614 	closedir(d);
1615 
1616 	return ret;
1617 }
1618 
1619 static char *dso__find_kallsyms(struct dso *dso, struct map *map)
1620 {
1621 	u8 host_build_id[BUILD_ID_SIZE];
1622 	char sbuild_id[BUILD_ID_SIZE * 2 + 1];
1623 	bool is_host = false;
1624 	char path[PATH_MAX];
1625 
1626 	if (!dso->has_build_id) {
1627 		/*
1628 		 * Last resort, if we don't have a build-id and couldn't find
1629 		 * any vmlinux file, try the running kernel kallsyms table.
1630 		 */
1631 		goto proc_kallsyms;
1632 	}
1633 
1634 	if (sysfs__read_build_id("/sys/kernel/notes", host_build_id,
1635 				 sizeof(host_build_id)) == 0)
1636 		is_host = dso__build_id_equal(dso, host_build_id);
1637 
1638 	build_id__sprintf(dso->build_id, sizeof(dso->build_id), sbuild_id);
1639 
1640 	scnprintf(path, sizeof(path), "%s/[kernel.kcore]/%s", buildid_dir,
1641 		  sbuild_id);
1642 
1643 	/* Use /proc/kallsyms if possible */
1644 	if (is_host) {
1645 		DIR *d;
1646 		int fd;
1647 
1648 		/* If no cached kcore go with /proc/kallsyms */
1649 		d = opendir(path);
1650 		if (!d)
1651 			goto proc_kallsyms;
1652 		closedir(d);
1653 
1654 		/*
1655 		 * Do not check the build-id cache, until we know we cannot use
1656 		 * /proc/kcore.
1657 		 */
1658 		fd = open("/proc/kcore", O_RDONLY);
1659 		if (fd != -1) {
1660 			close(fd);
1661 			/* If module maps match go with /proc/kallsyms */
1662 			if (!validate_kcore_addresses("/proc/kallsyms", map))
1663 				goto proc_kallsyms;
1664 		}
1665 
1666 		/* Find kallsyms in build-id cache with kcore */
1667 		if (!find_matching_kcore(map, path, sizeof(path)))
1668 			return strdup(path);
1669 
1670 		goto proc_kallsyms;
1671 	}
1672 
1673 	/* Find kallsyms in build-id cache with kcore */
1674 	if (!find_matching_kcore(map, path, sizeof(path)))
1675 		return strdup(path);
1676 
1677 	scnprintf(path, sizeof(path), "%s/[kernel.kallsyms]/%s",
1678 		  buildid_dir, sbuild_id);
1679 
1680 	if (access(path, F_OK)) {
1681 		pr_err("No kallsyms or vmlinux with build-id %s was found\n",
1682 		       sbuild_id);
1683 		return NULL;
1684 	}
1685 
1686 	return strdup(path);
1687 
1688 proc_kallsyms:
1689 	return strdup("/proc/kallsyms");
1690 }
1691 
1692 static int dso__load_kernel_sym(struct dso *dso, struct map *map,
1693 				symbol_filter_t filter)
1694 {
1695 	int err;
1696 	const char *kallsyms_filename = NULL;
1697 	char *kallsyms_allocated_filename = NULL;
1698 	/*
1699 	 * Step 1: if the user specified a kallsyms or vmlinux filename, use
1700 	 * it and only it, reporting errors to the user if it cannot be used.
1701 	 *
1702 	 * For instance, try to analyse an ARM perf.data file _without_ a
1703 	 * build-id, or if the user specifies the wrong path to the right
1704 	 * vmlinux file, obviously we can't fallback to another vmlinux (a
1705 	 * x86_86 one, on the machine where analysis is being performed, say),
1706 	 * or worse, /proc/kallsyms.
1707 	 *
1708 	 * If the specified file _has_ a build-id and there is a build-id
1709 	 * section in the perf.data file, we will still do the expected
1710 	 * validation in dso__load_vmlinux and will bail out if they don't
1711 	 * match.
1712 	 */
1713 	if (symbol_conf.kallsyms_name != NULL) {
1714 		kallsyms_filename = symbol_conf.kallsyms_name;
1715 		goto do_kallsyms;
1716 	}
1717 
1718 	if (!symbol_conf.ignore_vmlinux && symbol_conf.vmlinux_name != NULL) {
1719 		return dso__load_vmlinux(dso, map, symbol_conf.vmlinux_name,
1720 					 false, filter);
1721 	}
1722 
1723 	if (!symbol_conf.ignore_vmlinux && vmlinux_path != NULL) {
1724 		err = dso__load_vmlinux_path(dso, map, filter);
1725 		if (err > 0)
1726 			return err;
1727 	}
1728 
1729 	/* do not try local files if a symfs was given */
1730 	if (symbol_conf.symfs[0] != 0)
1731 		return -1;
1732 
1733 	kallsyms_allocated_filename = dso__find_kallsyms(dso, map);
1734 	if (!kallsyms_allocated_filename)
1735 		return -1;
1736 
1737 	kallsyms_filename = kallsyms_allocated_filename;
1738 
1739 do_kallsyms:
1740 	err = dso__load_kallsyms(dso, kallsyms_filename, map, filter);
1741 	if (err > 0)
1742 		pr_debug("Using %s for symbols\n", kallsyms_filename);
1743 	free(kallsyms_allocated_filename);
1744 
1745 	if (err > 0 && !dso__is_kcore(dso)) {
1746 		dso->binary_type = DSO_BINARY_TYPE__KALLSYMS;
1747 		dso__set_long_name(dso, "[kernel.kallsyms]", false);
1748 		map__fixup_start(map);
1749 		map__fixup_end(map);
1750 	}
1751 
1752 	return err;
1753 }
1754 
1755 static int dso__load_guest_kernel_sym(struct dso *dso, struct map *map,
1756 				      symbol_filter_t filter)
1757 {
1758 	int err;
1759 	const char *kallsyms_filename = NULL;
1760 	struct machine *machine;
1761 	char path[PATH_MAX];
1762 
1763 	if (!map->groups) {
1764 		pr_debug("Guest kernel map hasn't the point to groups\n");
1765 		return -1;
1766 	}
1767 	machine = map->groups->machine;
1768 
1769 	if (machine__is_default_guest(machine)) {
1770 		/*
1771 		 * if the user specified a vmlinux filename, use it and only
1772 		 * it, reporting errors to the user if it cannot be used.
1773 		 * Or use file guest_kallsyms inputted by user on commandline
1774 		 */
1775 		if (symbol_conf.default_guest_vmlinux_name != NULL) {
1776 			err = dso__load_vmlinux(dso, map,
1777 						symbol_conf.default_guest_vmlinux_name,
1778 						false, filter);
1779 			return err;
1780 		}
1781 
1782 		kallsyms_filename = symbol_conf.default_guest_kallsyms;
1783 		if (!kallsyms_filename)
1784 			return -1;
1785 	} else {
1786 		sprintf(path, "%s/proc/kallsyms", machine->root_dir);
1787 		kallsyms_filename = path;
1788 	}
1789 
1790 	err = dso__load_kallsyms(dso, kallsyms_filename, map, filter);
1791 	if (err > 0)
1792 		pr_debug("Using %s for symbols\n", kallsyms_filename);
1793 	if (err > 0 && !dso__is_kcore(dso)) {
1794 		dso->binary_type = DSO_BINARY_TYPE__GUEST_KALLSYMS;
1795 		machine__mmap_name(machine, path, sizeof(path));
1796 		dso__set_long_name(dso, strdup(path), true);
1797 		map__fixup_start(map);
1798 		map__fixup_end(map);
1799 	}
1800 
1801 	return err;
1802 }
1803 
1804 static void vmlinux_path__exit(void)
1805 {
1806 	while (--vmlinux_path__nr_entries >= 0)
1807 		zfree(&vmlinux_path[vmlinux_path__nr_entries]);
1808 
1809 	zfree(&vmlinux_path);
1810 }
1811 
1812 static int vmlinux_path__init(struct perf_session_env *env)
1813 {
1814 	struct utsname uts;
1815 	char bf[PATH_MAX];
1816 	char *kernel_version;
1817 
1818 	vmlinux_path = malloc(sizeof(char *) * 6);
1819 	if (vmlinux_path == NULL)
1820 		return -1;
1821 
1822 	vmlinux_path[vmlinux_path__nr_entries] = strdup("vmlinux");
1823 	if (vmlinux_path[vmlinux_path__nr_entries] == NULL)
1824 		goto out_fail;
1825 	++vmlinux_path__nr_entries;
1826 	vmlinux_path[vmlinux_path__nr_entries] = strdup("/boot/vmlinux");
1827 	if (vmlinux_path[vmlinux_path__nr_entries] == NULL)
1828 		goto out_fail;
1829 	++vmlinux_path__nr_entries;
1830 
1831 	/* only try kernel version if no symfs was given */
1832 	if (symbol_conf.symfs[0] != 0)
1833 		return 0;
1834 
1835 	if (env) {
1836 		kernel_version = env->os_release;
1837 	} else {
1838 		if (uname(&uts) < 0)
1839 			goto out_fail;
1840 
1841 		kernel_version = uts.release;
1842 	}
1843 
1844 	snprintf(bf, sizeof(bf), "/boot/vmlinux-%s", kernel_version);
1845 	vmlinux_path[vmlinux_path__nr_entries] = strdup(bf);
1846 	if (vmlinux_path[vmlinux_path__nr_entries] == NULL)
1847 		goto out_fail;
1848 	++vmlinux_path__nr_entries;
1849 	snprintf(bf, sizeof(bf), "/usr/lib/debug/boot/vmlinux-%s",
1850 		 kernel_version);
1851 	vmlinux_path[vmlinux_path__nr_entries] = strdup(bf);
1852 	if (vmlinux_path[vmlinux_path__nr_entries] == NULL)
1853 		goto out_fail;
1854         ++vmlinux_path__nr_entries;
1855 	snprintf(bf, sizeof(bf), "/lib/modules/%s/build/vmlinux", kernel_version);
1856 	vmlinux_path[vmlinux_path__nr_entries] = strdup(bf);
1857 	if (vmlinux_path[vmlinux_path__nr_entries] == NULL)
1858 		goto out_fail;
1859 	++vmlinux_path__nr_entries;
1860 	snprintf(bf, sizeof(bf), "/usr/lib/debug/lib/modules/%s/vmlinux",
1861 		 kernel_version);
1862 	vmlinux_path[vmlinux_path__nr_entries] = strdup(bf);
1863 	if (vmlinux_path[vmlinux_path__nr_entries] == NULL)
1864 		goto out_fail;
1865 	++vmlinux_path__nr_entries;
1866 
1867 	return 0;
1868 
1869 out_fail:
1870 	vmlinux_path__exit();
1871 	return -1;
1872 }
1873 
1874 int setup_list(struct strlist **list, const char *list_str,
1875 		      const char *list_name)
1876 {
1877 	if (list_str == NULL)
1878 		return 0;
1879 
1880 	*list = strlist__new(true, list_str);
1881 	if (!*list) {
1882 		pr_err("problems parsing %s list\n", list_name);
1883 		return -1;
1884 	}
1885 	return 0;
1886 }
1887 
1888 int setup_intlist(struct intlist **list, const char *list_str,
1889 		  const char *list_name)
1890 {
1891 	if (list_str == NULL)
1892 		return 0;
1893 
1894 	*list = intlist__new(list_str);
1895 	if (!*list) {
1896 		pr_err("problems parsing %s list\n", list_name);
1897 		return -1;
1898 	}
1899 	return 0;
1900 }
1901 
1902 static bool symbol__read_kptr_restrict(void)
1903 {
1904 	bool value = false;
1905 
1906 	if (geteuid() != 0) {
1907 		FILE *fp = fopen("/proc/sys/kernel/kptr_restrict", "r");
1908 		if (fp != NULL) {
1909 			char line[8];
1910 
1911 			if (fgets(line, sizeof(line), fp) != NULL)
1912 				value = atoi(line) != 0;
1913 
1914 			fclose(fp);
1915 		}
1916 	}
1917 
1918 	return value;
1919 }
1920 
1921 int symbol__init(struct perf_session_env *env)
1922 {
1923 	const char *symfs;
1924 
1925 	if (symbol_conf.initialized)
1926 		return 0;
1927 
1928 	symbol_conf.priv_size = PERF_ALIGN(symbol_conf.priv_size, sizeof(u64));
1929 
1930 	symbol__elf_init();
1931 
1932 	if (symbol_conf.sort_by_name)
1933 		symbol_conf.priv_size += (sizeof(struct symbol_name_rb_node) -
1934 					  sizeof(struct symbol));
1935 
1936 	if (symbol_conf.try_vmlinux_path && vmlinux_path__init(env) < 0)
1937 		return -1;
1938 
1939 	if (symbol_conf.field_sep && *symbol_conf.field_sep == '.') {
1940 		pr_err("'.' is the only non valid --field-separator argument\n");
1941 		return -1;
1942 	}
1943 
1944 	if (setup_list(&symbol_conf.dso_list,
1945 		       symbol_conf.dso_list_str, "dso") < 0)
1946 		return -1;
1947 
1948 	if (setup_list(&symbol_conf.comm_list,
1949 		       symbol_conf.comm_list_str, "comm") < 0)
1950 		goto out_free_dso_list;
1951 
1952 	if (setup_intlist(&symbol_conf.pid_list,
1953 		       symbol_conf.pid_list_str, "pid") < 0)
1954 		goto out_free_comm_list;
1955 
1956 	if (setup_intlist(&symbol_conf.tid_list,
1957 		       symbol_conf.tid_list_str, "tid") < 0)
1958 		goto out_free_pid_list;
1959 
1960 	if (setup_list(&symbol_conf.sym_list,
1961 		       symbol_conf.sym_list_str, "symbol") < 0)
1962 		goto out_free_tid_list;
1963 
1964 	/*
1965 	 * A path to symbols of "/" is identical to ""
1966 	 * reset here for simplicity.
1967 	 */
1968 	symfs = realpath(symbol_conf.symfs, NULL);
1969 	if (symfs == NULL)
1970 		symfs = symbol_conf.symfs;
1971 	if (strcmp(symfs, "/") == 0)
1972 		symbol_conf.symfs = "";
1973 	if (symfs != symbol_conf.symfs)
1974 		free((void *)symfs);
1975 
1976 	symbol_conf.kptr_restrict = symbol__read_kptr_restrict();
1977 
1978 	symbol_conf.initialized = true;
1979 	return 0;
1980 
1981 out_free_tid_list:
1982 	intlist__delete(symbol_conf.tid_list);
1983 out_free_pid_list:
1984 	intlist__delete(symbol_conf.pid_list);
1985 out_free_comm_list:
1986 	strlist__delete(symbol_conf.comm_list);
1987 out_free_dso_list:
1988 	strlist__delete(symbol_conf.dso_list);
1989 	return -1;
1990 }
1991 
1992 void symbol__exit(void)
1993 {
1994 	if (!symbol_conf.initialized)
1995 		return;
1996 	strlist__delete(symbol_conf.sym_list);
1997 	strlist__delete(symbol_conf.dso_list);
1998 	strlist__delete(symbol_conf.comm_list);
1999 	intlist__delete(symbol_conf.tid_list);
2000 	intlist__delete(symbol_conf.pid_list);
2001 	vmlinux_path__exit();
2002 	symbol_conf.sym_list = symbol_conf.dso_list = symbol_conf.comm_list = NULL;
2003 	symbol_conf.initialized = false;
2004 }
2005