xref: /openbmc/linux/tools/perf/util/map.c (revision 7b6d864b)
1 #include "symbol.h"
2 #include <errno.h>
3 #include <inttypes.h>
4 #include <limits.h>
5 #include <stdlib.h>
6 #include <string.h>
7 #include <stdio.h>
8 #include <unistd.h>
9 #include "map.h"
10 #include "thread.h"
11 #include "strlist.h"
12 #include "vdso.h"
13 #include "build-id.h"
14 #include <linux/string.h>
15 
16 const char *map_type__name[MAP__NR_TYPES] = {
17 	[MAP__FUNCTION] = "Functions",
18 	[MAP__VARIABLE] = "Variables",
19 };
20 
21 static inline int is_anon_memory(const char *filename)
22 {
23 	return !strcmp(filename, "//anon") ||
24 	       !strcmp(filename, "/dev/zero (deleted)") ||
25 	       !strcmp(filename, "/anon_hugepage (deleted)");
26 }
27 
28 static inline int is_no_dso_memory(const char *filename)
29 {
30 	return !strncmp(filename, "[stack", 6) ||
31 	       !strcmp(filename, "[heap]");
32 }
33 
34 void map__init(struct map *map, enum map_type type,
35 	       u64 start, u64 end, u64 pgoff, struct dso *dso)
36 {
37 	map->type     = type;
38 	map->start    = start;
39 	map->end      = end;
40 	map->pgoff    = pgoff;
41 	map->dso      = dso;
42 	map->map_ip   = map__map_ip;
43 	map->unmap_ip = map__unmap_ip;
44 	RB_CLEAR_NODE(&map->rb_node);
45 	map->groups   = NULL;
46 	map->referenced = false;
47 	map->erange_warned = false;
48 }
49 
50 struct map *map__new(struct list_head *dsos__list, u64 start, u64 len,
51 		     u64 pgoff, u32 pid, char *filename,
52 		     enum map_type type)
53 {
54 	struct map *map = malloc(sizeof(*map));
55 
56 	if (map != NULL) {
57 		char newfilename[PATH_MAX];
58 		struct dso *dso;
59 		int anon, no_dso, vdso;
60 
61 		anon = is_anon_memory(filename);
62 		vdso = is_vdso_map(filename);
63 		no_dso = is_no_dso_memory(filename);
64 
65 		if (anon) {
66 			snprintf(newfilename, sizeof(newfilename), "/tmp/perf-%d.map", pid);
67 			filename = newfilename;
68 		}
69 
70 		if (vdso) {
71 			pgoff = 0;
72 			dso = vdso__dso_findnew(dsos__list);
73 		} else
74 			dso = __dsos__findnew(dsos__list, filename);
75 
76 		if (dso == NULL)
77 			goto out_delete;
78 
79 		map__init(map, type, start, start + len, pgoff, dso);
80 
81 		if (anon || no_dso) {
82 			map->map_ip = map->unmap_ip = identity__map_ip;
83 
84 			/*
85 			 * Set memory without DSO as loaded. All map__find_*
86 			 * functions still return NULL, and we avoid the
87 			 * unnecessary map__load warning.
88 			 */
89 			if (no_dso)
90 				dso__set_loaded(dso, map->type);
91 		}
92 	}
93 	return map;
94 out_delete:
95 	free(map);
96 	return NULL;
97 }
98 
99 /*
100  * Constructor variant for modules (where we know from /proc/modules where
101  * they are loaded) and for vmlinux, where only after we load all the
102  * symbols we'll know where it starts and ends.
103  */
104 struct map *map__new2(u64 start, struct dso *dso, enum map_type type)
105 {
106 	struct map *map = calloc(1, (sizeof(*map) +
107 				     (dso->kernel ? sizeof(struct kmap) : 0)));
108 	if (map != NULL) {
109 		/*
110 		 * ->end will be filled after we load all the symbols
111 		 */
112 		map__init(map, type, start, 0, 0, dso);
113 	}
114 
115 	return map;
116 }
117 
118 void map__delete(struct map *map)
119 {
120 	free(map);
121 }
122 
123 void map__fixup_start(struct map *map)
124 {
125 	struct rb_root *symbols = &map->dso->symbols[map->type];
126 	struct rb_node *nd = rb_first(symbols);
127 	if (nd != NULL) {
128 		struct symbol *sym = rb_entry(nd, struct symbol, rb_node);
129 		map->start = sym->start;
130 	}
131 }
132 
133 void map__fixup_end(struct map *map)
134 {
135 	struct rb_root *symbols = &map->dso->symbols[map->type];
136 	struct rb_node *nd = rb_last(symbols);
137 	if (nd != NULL) {
138 		struct symbol *sym = rb_entry(nd, struct symbol, rb_node);
139 		map->end = sym->end;
140 	}
141 }
142 
143 #define DSO__DELETED "(deleted)"
144 
145 int map__load(struct map *map, symbol_filter_t filter)
146 {
147 	const char *name = map->dso->long_name;
148 	int nr;
149 
150 	if (dso__loaded(map->dso, map->type))
151 		return 0;
152 
153 	nr = dso__load(map->dso, map, filter);
154 	if (nr < 0) {
155 		if (map->dso->has_build_id) {
156 			char sbuild_id[BUILD_ID_SIZE * 2 + 1];
157 
158 			build_id__sprintf(map->dso->build_id,
159 					  sizeof(map->dso->build_id),
160 					  sbuild_id);
161 			pr_warning("%s with build id %s not found",
162 				   name, sbuild_id);
163 		} else
164 			pr_warning("Failed to open %s", name);
165 
166 		pr_warning(", continuing without symbols\n");
167 		return -1;
168 	} else if (nr == 0) {
169 #ifdef LIBELF_SUPPORT
170 		const size_t len = strlen(name);
171 		const size_t real_len = len - sizeof(DSO__DELETED);
172 
173 		if (len > sizeof(DSO__DELETED) &&
174 		    strcmp(name + real_len + 1, DSO__DELETED) == 0) {
175 			pr_warning("%.*s was updated (is prelink enabled?). "
176 				"Restart the long running apps that use it!\n",
177 				   (int)real_len, name);
178 		} else {
179 			pr_warning("no symbols found in %s, maybe install "
180 				   "a debug package?\n", name);
181 		}
182 #endif
183 		return -1;
184 	}
185 	/*
186 	 * Only applies to the kernel, as its symtabs aren't relative like the
187 	 * module ones.
188 	 */
189 	if (map->dso->kernel)
190 		map__reloc_vmlinux(map);
191 
192 	return 0;
193 }
194 
195 struct symbol *map__find_symbol(struct map *map, u64 addr,
196 				symbol_filter_t filter)
197 {
198 	if (map__load(map, filter) < 0)
199 		return NULL;
200 
201 	return dso__find_symbol(map->dso, map->type, addr);
202 }
203 
204 struct symbol *map__find_symbol_by_name(struct map *map, const char *name,
205 					symbol_filter_t filter)
206 {
207 	if (map__load(map, filter) < 0)
208 		return NULL;
209 
210 	if (!dso__sorted_by_name(map->dso, map->type))
211 		dso__sort_by_name(map->dso, map->type);
212 
213 	return dso__find_symbol_by_name(map->dso, map->type, name);
214 }
215 
216 struct map *map__clone(struct map *map)
217 {
218 	return memdup(map, sizeof(*map));
219 }
220 
221 int map__overlap(struct map *l, struct map *r)
222 {
223 	if (l->start > r->start) {
224 		struct map *t = l;
225 		l = r;
226 		r = t;
227 	}
228 
229 	if (l->end > r->start)
230 		return 1;
231 
232 	return 0;
233 }
234 
235 size_t map__fprintf(struct map *map, FILE *fp)
236 {
237 	return fprintf(fp, " %" PRIx64 "-%" PRIx64 " %" PRIx64 " %s\n",
238 		       map->start, map->end, map->pgoff, map->dso->name);
239 }
240 
241 size_t map__fprintf_dsoname(struct map *map, FILE *fp)
242 {
243 	const char *dsoname = "[unknown]";
244 
245 	if (map && map->dso && (map->dso->name || map->dso->long_name)) {
246 		if (symbol_conf.show_kernel_path && map->dso->long_name)
247 			dsoname = map->dso->long_name;
248 		else if (map->dso->name)
249 			dsoname = map->dso->name;
250 	}
251 
252 	return fprintf(fp, "%s", dsoname);
253 }
254 
255 /*
256  * objdump wants/reports absolute IPs for ET_EXEC, and RIPs for ET_DYN.
257  * map->dso->adjust_symbols==1 for ET_EXEC-like cases.
258  */
259 u64 map__rip_2objdump(struct map *map, u64 rip)
260 {
261 	u64 addr = map->dso->adjust_symbols ?
262 			map->unmap_ip(map, rip) :	/* RIP -> IP */
263 			rip;
264 	return addr;
265 }
266 
267 void map_groups__init(struct map_groups *mg)
268 {
269 	int i;
270 	for (i = 0; i < MAP__NR_TYPES; ++i) {
271 		mg->maps[i] = RB_ROOT;
272 		INIT_LIST_HEAD(&mg->removed_maps[i]);
273 	}
274 	mg->machine = NULL;
275 }
276 
277 static void maps__delete(struct rb_root *maps)
278 {
279 	struct rb_node *next = rb_first(maps);
280 
281 	while (next) {
282 		struct map *pos = rb_entry(next, struct map, rb_node);
283 
284 		next = rb_next(&pos->rb_node);
285 		rb_erase(&pos->rb_node, maps);
286 		map__delete(pos);
287 	}
288 }
289 
290 static void maps__delete_removed(struct list_head *maps)
291 {
292 	struct map *pos, *n;
293 
294 	list_for_each_entry_safe(pos, n, maps, node) {
295 		list_del(&pos->node);
296 		map__delete(pos);
297 	}
298 }
299 
300 void map_groups__exit(struct map_groups *mg)
301 {
302 	int i;
303 
304 	for (i = 0; i < MAP__NR_TYPES; ++i) {
305 		maps__delete(&mg->maps[i]);
306 		maps__delete_removed(&mg->removed_maps[i]);
307 	}
308 }
309 
310 void map_groups__flush(struct map_groups *mg)
311 {
312 	int type;
313 
314 	for (type = 0; type < MAP__NR_TYPES; type++) {
315 		struct rb_root *root = &mg->maps[type];
316 		struct rb_node *next = rb_first(root);
317 
318 		while (next) {
319 			struct map *pos = rb_entry(next, struct map, rb_node);
320 			next = rb_next(&pos->rb_node);
321 			rb_erase(&pos->rb_node, root);
322 			/*
323 			 * We may have references to this map, for
324 			 * instance in some hist_entry instances, so
325 			 * just move them to a separate list.
326 			 */
327 			list_add_tail(&pos->node, &mg->removed_maps[pos->type]);
328 		}
329 	}
330 }
331 
332 struct symbol *map_groups__find_symbol(struct map_groups *mg,
333 				       enum map_type type, u64 addr,
334 				       struct map **mapp,
335 				       symbol_filter_t filter)
336 {
337 	struct map *map = map_groups__find(mg, type, addr);
338 
339 	if (map != NULL) {
340 		if (mapp != NULL)
341 			*mapp = map;
342 		return map__find_symbol(map, map->map_ip(map, addr), filter);
343 	}
344 
345 	return NULL;
346 }
347 
348 struct symbol *map_groups__find_symbol_by_name(struct map_groups *mg,
349 					       enum map_type type,
350 					       const char *name,
351 					       struct map **mapp,
352 					       symbol_filter_t filter)
353 {
354 	struct rb_node *nd;
355 
356 	for (nd = rb_first(&mg->maps[type]); nd; nd = rb_next(nd)) {
357 		struct map *pos = rb_entry(nd, struct map, rb_node);
358 		struct symbol *sym = map__find_symbol_by_name(pos, name, filter);
359 
360 		if (sym == NULL)
361 			continue;
362 		if (mapp != NULL)
363 			*mapp = pos;
364 		return sym;
365 	}
366 
367 	return NULL;
368 }
369 
370 size_t __map_groups__fprintf_maps(struct map_groups *mg,
371 				  enum map_type type, int verbose, FILE *fp)
372 {
373 	size_t printed = fprintf(fp, "%s:\n", map_type__name[type]);
374 	struct rb_node *nd;
375 
376 	for (nd = rb_first(&mg->maps[type]); nd; nd = rb_next(nd)) {
377 		struct map *pos = rb_entry(nd, struct map, rb_node);
378 		printed += fprintf(fp, "Map:");
379 		printed += map__fprintf(pos, fp);
380 		if (verbose > 2) {
381 			printed += dso__fprintf(pos->dso, type, fp);
382 			printed += fprintf(fp, "--\n");
383 		}
384 	}
385 
386 	return printed;
387 }
388 
389 size_t map_groups__fprintf_maps(struct map_groups *mg, int verbose, FILE *fp)
390 {
391 	size_t printed = 0, i;
392 	for (i = 0; i < MAP__NR_TYPES; ++i)
393 		printed += __map_groups__fprintf_maps(mg, i, verbose, fp);
394 	return printed;
395 }
396 
397 static size_t __map_groups__fprintf_removed_maps(struct map_groups *mg,
398 						 enum map_type type,
399 						 int verbose, FILE *fp)
400 {
401 	struct map *pos;
402 	size_t printed = 0;
403 
404 	list_for_each_entry(pos, &mg->removed_maps[type], node) {
405 		printed += fprintf(fp, "Map:");
406 		printed += map__fprintf(pos, fp);
407 		if (verbose > 1) {
408 			printed += dso__fprintf(pos->dso, type, fp);
409 			printed += fprintf(fp, "--\n");
410 		}
411 	}
412 	return printed;
413 }
414 
415 static size_t map_groups__fprintf_removed_maps(struct map_groups *mg,
416 					       int verbose, FILE *fp)
417 {
418 	size_t printed = 0, i;
419 	for (i = 0; i < MAP__NR_TYPES; ++i)
420 		printed += __map_groups__fprintf_removed_maps(mg, i, verbose, fp);
421 	return printed;
422 }
423 
424 size_t map_groups__fprintf(struct map_groups *mg, int verbose, FILE *fp)
425 {
426 	size_t printed = map_groups__fprintf_maps(mg, verbose, fp);
427 	printed += fprintf(fp, "Removed maps:\n");
428 	return printed + map_groups__fprintf_removed_maps(mg, verbose, fp);
429 }
430 
431 int map_groups__fixup_overlappings(struct map_groups *mg, struct map *map,
432 				   int verbose, FILE *fp)
433 {
434 	struct rb_root *root = &mg->maps[map->type];
435 	struct rb_node *next = rb_first(root);
436 	int err = 0;
437 
438 	while (next) {
439 		struct map *pos = rb_entry(next, struct map, rb_node);
440 		next = rb_next(&pos->rb_node);
441 
442 		if (!map__overlap(pos, map))
443 			continue;
444 
445 		if (verbose >= 2) {
446 			fputs("overlapping maps:\n", fp);
447 			map__fprintf(map, fp);
448 			map__fprintf(pos, fp);
449 		}
450 
451 		rb_erase(&pos->rb_node, root);
452 		/*
453 		 * Now check if we need to create new maps for areas not
454 		 * overlapped by the new map:
455 		 */
456 		if (map->start > pos->start) {
457 			struct map *before = map__clone(pos);
458 
459 			if (before == NULL) {
460 				err = -ENOMEM;
461 				goto move_map;
462 			}
463 
464 			before->end = map->start - 1;
465 			map_groups__insert(mg, before);
466 			if (verbose >= 2)
467 				map__fprintf(before, fp);
468 		}
469 
470 		if (map->end < pos->end) {
471 			struct map *after = map__clone(pos);
472 
473 			if (after == NULL) {
474 				err = -ENOMEM;
475 				goto move_map;
476 			}
477 
478 			after->start = map->end + 1;
479 			map_groups__insert(mg, after);
480 			if (verbose >= 2)
481 				map__fprintf(after, fp);
482 		}
483 move_map:
484 		/*
485 		 * If we have references, just move them to a separate list.
486 		 */
487 		if (pos->referenced)
488 			list_add_tail(&pos->node, &mg->removed_maps[map->type]);
489 		else
490 			map__delete(pos);
491 
492 		if (err)
493 			return err;
494 	}
495 
496 	return 0;
497 }
498 
499 /*
500  * XXX This should not really _copy_ te maps, but refcount them.
501  */
502 int map_groups__clone(struct map_groups *mg,
503 		      struct map_groups *parent, enum map_type type)
504 {
505 	struct rb_node *nd;
506 	for (nd = rb_first(&parent->maps[type]); nd; nd = rb_next(nd)) {
507 		struct map *map = rb_entry(nd, struct map, rb_node);
508 		struct map *new = map__clone(map);
509 		if (new == NULL)
510 			return -ENOMEM;
511 		map_groups__insert(mg, new);
512 	}
513 	return 0;
514 }
515 
516 static u64 map__reloc_map_ip(struct map *map, u64 ip)
517 {
518 	return ip + (s64)map->pgoff;
519 }
520 
521 static u64 map__reloc_unmap_ip(struct map *map, u64 ip)
522 {
523 	return ip - (s64)map->pgoff;
524 }
525 
526 void map__reloc_vmlinux(struct map *map)
527 {
528 	struct kmap *kmap = map__kmap(map);
529 	s64 reloc;
530 
531 	if (!kmap->ref_reloc_sym || !kmap->ref_reloc_sym->unrelocated_addr)
532 		return;
533 
534 	reloc = (kmap->ref_reloc_sym->unrelocated_addr -
535 		 kmap->ref_reloc_sym->addr);
536 
537 	if (!reloc)
538 		return;
539 
540 	map->map_ip   = map__reloc_map_ip;
541 	map->unmap_ip = map__reloc_unmap_ip;
542 	map->pgoff    = reloc;
543 }
544 
545 void maps__insert(struct rb_root *maps, struct map *map)
546 {
547 	struct rb_node **p = &maps->rb_node;
548 	struct rb_node *parent = NULL;
549 	const u64 ip = map->start;
550 	struct map *m;
551 
552 	while (*p != NULL) {
553 		parent = *p;
554 		m = rb_entry(parent, struct map, rb_node);
555 		if (ip < m->start)
556 			p = &(*p)->rb_left;
557 		else
558 			p = &(*p)->rb_right;
559 	}
560 
561 	rb_link_node(&map->rb_node, parent, p);
562 	rb_insert_color(&map->rb_node, maps);
563 }
564 
565 void maps__remove(struct rb_root *maps, struct map *map)
566 {
567 	rb_erase(&map->rb_node, maps);
568 }
569 
570 struct map *maps__find(struct rb_root *maps, u64 ip)
571 {
572 	struct rb_node **p = &maps->rb_node;
573 	struct rb_node *parent = NULL;
574 	struct map *m;
575 
576 	while (*p != NULL) {
577 		parent = *p;
578 		m = rb_entry(parent, struct map, rb_node);
579 		if (ip < m->start)
580 			p = &(*p)->rb_left;
581 		else if (ip > m->end)
582 			p = &(*p)->rb_right;
583 		else
584 			return m;
585 	}
586 
587 	return NULL;
588 }
589