xref: /openbmc/linux/tools/perf/util/maps.c (revision c3cc4656)
1 // SPDX-License-Identifier: GPL-2.0
2 #include <errno.h>
3 #include <stdlib.h>
4 #include <linux/zalloc.h>
5 #include "debug.h"
6 #include "dso.h"
7 #include "map.h"
8 #include "maps.h"
9 #include "thread.h"
10 #include "ui/ui.h"
11 #include "unwind.h"
12 
maps__init(struct maps * maps,struct machine * machine)13 static void maps__init(struct maps *maps, struct machine *machine)
14 {
15 	refcount_set(maps__refcnt(maps), 1);
16 	init_rwsem(maps__lock(maps));
17 	RC_CHK_ACCESS(maps)->entries = RB_ROOT;
18 	RC_CHK_ACCESS(maps)->machine = machine;
19 	RC_CHK_ACCESS(maps)->last_search_by_name = NULL;
20 	RC_CHK_ACCESS(maps)->nr_maps = 0;
21 	RC_CHK_ACCESS(maps)->maps_by_name = NULL;
22 }
23 
__maps__free_maps_by_name(struct maps * maps)24 static void __maps__free_maps_by_name(struct maps *maps)
25 {
26 	/*
27 	 * Free everything to try to do it from the rbtree in the next search
28 	 */
29 	for (unsigned int i = 0; i < maps__nr_maps(maps); i++)
30 		map__put(maps__maps_by_name(maps)[i]);
31 
32 	zfree(&RC_CHK_ACCESS(maps)->maps_by_name);
33 	RC_CHK_ACCESS(maps)->nr_maps_allocated = 0;
34 }
35 
__maps__insert(struct maps * maps,struct map * map)36 static int __maps__insert(struct maps *maps, struct map *map)
37 {
38 	struct rb_node **p = &maps__entries(maps)->rb_node;
39 	struct rb_node *parent = NULL;
40 	const u64 ip = map__start(map);
41 	struct map_rb_node *m, *new_rb_node;
42 
43 	new_rb_node = malloc(sizeof(*new_rb_node));
44 	if (!new_rb_node)
45 		return -ENOMEM;
46 
47 	RB_CLEAR_NODE(&new_rb_node->rb_node);
48 	new_rb_node->map = map__get(map);
49 
50 	while (*p != NULL) {
51 		parent = *p;
52 		m = rb_entry(parent, struct map_rb_node, rb_node);
53 		if (ip < map__start(m->map))
54 			p = &(*p)->rb_left;
55 		else
56 			p = &(*p)->rb_right;
57 	}
58 
59 	rb_link_node(&new_rb_node->rb_node, parent, p);
60 	rb_insert_color(&new_rb_node->rb_node, maps__entries(maps));
61 	return 0;
62 }
63 
maps__insert(struct maps * maps,struct map * map)64 int maps__insert(struct maps *maps, struct map *map)
65 {
66 	int err;
67 	const struct dso *dso = map__dso(map);
68 
69 	down_write(maps__lock(maps));
70 	err = __maps__insert(maps, map);
71 	if (err)
72 		goto out;
73 
74 	++RC_CHK_ACCESS(maps)->nr_maps;
75 
76 	if (dso && dso->kernel) {
77 		struct kmap *kmap = map__kmap(map);
78 
79 		if (kmap)
80 			kmap->kmaps = maps;
81 		else
82 			pr_err("Internal error: kernel dso with non kernel map\n");
83 	}
84 
85 
86 	/*
87 	 * If we already performed some search by name, then we need to add the just
88 	 * inserted map and resort.
89 	 */
90 	if (maps__maps_by_name(maps)) {
91 		if (maps__nr_maps(maps) > RC_CHK_ACCESS(maps)->nr_maps_allocated) {
92 			int nr_allocate = maps__nr_maps(maps) * 2;
93 			struct map **maps_by_name = realloc(maps__maps_by_name(maps),
94 							    nr_allocate * sizeof(map));
95 
96 			if (maps_by_name == NULL) {
97 				__maps__free_maps_by_name(maps);
98 				err = -ENOMEM;
99 				goto out;
100 			}
101 
102 			RC_CHK_ACCESS(maps)->maps_by_name = maps_by_name;
103 			RC_CHK_ACCESS(maps)->nr_maps_allocated = nr_allocate;
104 		}
105 		maps__maps_by_name(maps)[maps__nr_maps(maps) - 1] = map__get(map);
106 		__maps__sort_by_name(maps);
107 	}
108  out:
109 	up_write(maps__lock(maps));
110 	return err;
111 }
112 
__maps__remove(struct maps * maps,struct map_rb_node * rb_node)113 static void __maps__remove(struct maps *maps, struct map_rb_node *rb_node)
114 {
115 	rb_erase_init(&rb_node->rb_node, maps__entries(maps));
116 	map__put(rb_node->map);
117 	free(rb_node);
118 }
119 
maps__remove(struct maps * maps,struct map * map)120 void maps__remove(struct maps *maps, struct map *map)
121 {
122 	struct map_rb_node *rb_node;
123 
124 	down_write(maps__lock(maps));
125 	if (RC_CHK_ACCESS(maps)->last_search_by_name == map)
126 		RC_CHK_ACCESS(maps)->last_search_by_name = NULL;
127 
128 	rb_node = maps__find_node(maps, map);
129 	assert(rb_node->RC_CHK_ACCESS(map) == RC_CHK_ACCESS(map));
130 	__maps__remove(maps, rb_node);
131 	if (maps__maps_by_name(maps))
132 		__maps__free_maps_by_name(maps);
133 	--RC_CHK_ACCESS(maps)->nr_maps;
134 	up_write(maps__lock(maps));
135 }
136 
__maps__purge(struct maps * maps)137 static void __maps__purge(struct maps *maps)
138 {
139 	struct map_rb_node *pos, *next;
140 
141 	if (maps__maps_by_name(maps))
142 		__maps__free_maps_by_name(maps);
143 
144 	maps__for_each_entry_safe(maps, pos, next) {
145 		rb_erase_init(&pos->rb_node,  maps__entries(maps));
146 		map__put(pos->map);
147 		free(pos);
148 	}
149 }
150 
maps__exit(struct maps * maps)151 static void maps__exit(struct maps *maps)
152 {
153 	down_write(maps__lock(maps));
154 	__maps__purge(maps);
155 	up_write(maps__lock(maps));
156 }
157 
maps__empty(struct maps * maps)158 bool maps__empty(struct maps *maps)
159 {
160 	return !maps__first(maps);
161 }
162 
maps__new(struct machine * machine)163 struct maps *maps__new(struct machine *machine)
164 {
165 	struct maps *result;
166 	RC_STRUCT(maps) *maps = zalloc(sizeof(*maps));
167 
168 	if (ADD_RC_CHK(result, maps))
169 		maps__init(result, machine);
170 
171 	return result;
172 }
173 
maps__delete(struct maps * maps)174 static void maps__delete(struct maps *maps)
175 {
176 	maps__exit(maps);
177 	unwind__finish_access(maps);
178 	RC_CHK_FREE(maps);
179 }
180 
maps__get(struct maps * maps)181 struct maps *maps__get(struct maps *maps)
182 {
183 	struct maps *result;
184 
185 	if (RC_CHK_GET(result, maps))
186 		refcount_inc(maps__refcnt(maps));
187 
188 	return result;
189 }
190 
maps__put(struct maps * maps)191 void maps__put(struct maps *maps)
192 {
193 	if (maps && refcount_dec_and_test(maps__refcnt(maps)))
194 		maps__delete(maps);
195 	else
196 		RC_CHK_PUT(maps);
197 }
198 
maps__find_symbol(struct maps * maps,u64 addr,struct map ** mapp)199 struct symbol *maps__find_symbol(struct maps *maps, u64 addr, struct map **mapp)
200 {
201 	struct map *map = maps__find(maps, addr);
202 
203 	/* Ensure map is loaded before using map->map_ip */
204 	if (map != NULL && map__load(map) >= 0) {
205 		if (mapp != NULL)
206 			*mapp = map;
207 		return map__find_symbol(map, map__map_ip(map, addr));
208 	}
209 
210 	return NULL;
211 }
212 
maps__find_symbol_by_name(struct maps * maps,const char * name,struct map ** mapp)213 struct symbol *maps__find_symbol_by_name(struct maps *maps, const char *name, struct map **mapp)
214 {
215 	struct symbol *sym;
216 	struct map_rb_node *pos;
217 
218 	down_read(maps__lock(maps));
219 
220 	maps__for_each_entry(maps, pos) {
221 		sym = map__find_symbol_by_name(pos->map, name);
222 
223 		if (sym == NULL)
224 			continue;
225 		if (!map__contains_symbol(pos->map, sym)) {
226 			sym = NULL;
227 			continue;
228 		}
229 		if (mapp != NULL)
230 			*mapp = pos->map;
231 		goto out;
232 	}
233 
234 	sym = NULL;
235 out:
236 	up_read(maps__lock(maps));
237 	return sym;
238 }
239 
maps__find_ams(struct maps * maps,struct addr_map_symbol * ams)240 int maps__find_ams(struct maps *maps, struct addr_map_symbol *ams)
241 {
242 	if (ams->addr < map__start(ams->ms.map) || ams->addr >= map__end(ams->ms.map)) {
243 		if (maps == NULL)
244 			return -1;
245 		ams->ms.map = maps__find(maps, ams->addr);
246 		if (ams->ms.map == NULL)
247 			return -1;
248 	}
249 
250 	ams->al_addr = map__map_ip(ams->ms.map, ams->addr);
251 	ams->ms.sym = map__find_symbol(ams->ms.map, ams->al_addr);
252 
253 	return ams->ms.sym ? 0 : -1;
254 }
255 
maps__fprintf(struct maps * maps,FILE * fp)256 size_t maps__fprintf(struct maps *maps, FILE *fp)
257 {
258 	size_t printed = 0;
259 	struct map_rb_node *pos;
260 
261 	down_read(maps__lock(maps));
262 
263 	maps__for_each_entry(maps, pos) {
264 		printed += fprintf(fp, "Map:");
265 		printed += map__fprintf(pos->map, fp);
266 		if (verbose > 2) {
267 			printed += dso__fprintf(map__dso(pos->map), fp);
268 			printed += fprintf(fp, "--\n");
269 		}
270 	}
271 
272 	up_read(maps__lock(maps));
273 
274 	return printed;
275 }
276 
maps__fixup_overlappings(struct maps * maps,struct map * map,FILE * fp)277 int maps__fixup_overlappings(struct maps *maps, struct map *map, FILE *fp)
278 {
279 	struct rb_root *root;
280 	struct rb_node *next, *first;
281 	int err = 0;
282 
283 	down_write(maps__lock(maps));
284 
285 	root = maps__entries(maps);
286 
287 	/*
288 	 * Find first map where end > map->start.
289 	 * Same as find_vma() in kernel.
290 	 */
291 	next = root->rb_node;
292 	first = NULL;
293 	while (next) {
294 		struct map_rb_node *pos = rb_entry(next, struct map_rb_node, rb_node);
295 
296 		if (map__end(pos->map) > map__start(map)) {
297 			first = next;
298 			if (map__start(pos->map) <= map__start(map))
299 				break;
300 			next = next->rb_left;
301 		} else
302 			next = next->rb_right;
303 	}
304 
305 	next = first;
306 	while (next && !err) {
307 		struct map_rb_node *pos = rb_entry(next, struct map_rb_node, rb_node);
308 		next = rb_next(&pos->rb_node);
309 
310 		/*
311 		 * Stop if current map starts after map->end.
312 		 * Maps are ordered by start: next will not overlap for sure.
313 		 */
314 		if (map__start(pos->map) >= map__end(map))
315 			break;
316 
317 		if (verbose >= 2) {
318 
319 			if (use_browser) {
320 				pr_debug("overlapping maps in %s (disable tui for more info)\n",
321 					 map__dso(map)->name);
322 			} else {
323 				fputs("overlapping maps:\n", fp);
324 				map__fprintf(map, fp);
325 				map__fprintf(pos->map, fp);
326 			}
327 		}
328 
329 		rb_erase_init(&pos->rb_node, root);
330 		/*
331 		 * Now check if we need to create new maps for areas not
332 		 * overlapped by the new map:
333 		 */
334 		if (map__start(map) > map__start(pos->map)) {
335 			struct map *before = map__clone(pos->map);
336 
337 			if (before == NULL) {
338 				err = -ENOMEM;
339 				goto put_map;
340 			}
341 
342 			map__set_end(before, map__start(map));
343 			err = __maps__insert(maps, before);
344 			if (err) {
345 				map__put(before);
346 				goto put_map;
347 			}
348 
349 			if (verbose >= 2 && !use_browser)
350 				map__fprintf(before, fp);
351 			map__put(before);
352 		}
353 
354 		if (map__end(map) < map__end(pos->map)) {
355 			struct map *after = map__clone(pos->map);
356 
357 			if (after == NULL) {
358 				err = -ENOMEM;
359 				goto put_map;
360 			}
361 
362 			map__set_start(after, map__end(map));
363 			map__add_pgoff(after, map__end(map) - map__start(pos->map));
364 			assert(map__map_ip(pos->map, map__end(map)) ==
365 				map__map_ip(after, map__end(map)));
366 			err = __maps__insert(maps, after);
367 			if (err) {
368 				map__put(after);
369 				goto put_map;
370 			}
371 			if (verbose >= 2 && !use_browser)
372 				map__fprintf(after, fp);
373 			map__put(after);
374 		}
375 put_map:
376 		map__put(pos->map);
377 		free(pos);
378 	}
379 	up_write(maps__lock(maps));
380 	return err;
381 }
382 
383 /*
384  * XXX This should not really _copy_ te maps, but refcount them.
385  */
maps__clone(struct thread * thread,struct maps * parent)386 int maps__clone(struct thread *thread, struct maps *parent)
387 {
388 	struct maps *maps = thread__maps(thread);
389 	int err;
390 	struct map_rb_node *rb_node;
391 
392 	down_read(maps__lock(parent));
393 
394 	maps__for_each_entry(parent, rb_node) {
395 		struct map *new = map__clone(rb_node->map);
396 
397 		if (new == NULL) {
398 			err = -ENOMEM;
399 			goto out_unlock;
400 		}
401 
402 		err = unwind__prepare_access(maps, new, NULL);
403 		if (err)
404 			goto out_unlock;
405 
406 		err = maps__insert(maps, new);
407 		if (err)
408 			goto out_unlock;
409 
410 		map__put(new);
411 	}
412 
413 	err = 0;
414 out_unlock:
415 	up_read(maps__lock(parent));
416 	return err;
417 }
418 
maps__find_node(struct maps * maps,struct map * map)419 struct map_rb_node *maps__find_node(struct maps *maps, struct map *map)
420 {
421 	struct map_rb_node *rb_node;
422 
423 	maps__for_each_entry(maps, rb_node) {
424 		if (rb_node->RC_CHK_ACCESS(map) == RC_CHK_ACCESS(map))
425 			return rb_node;
426 	}
427 	return NULL;
428 }
429 
maps__find(struct maps * maps,u64 ip)430 struct map *maps__find(struct maps *maps, u64 ip)
431 {
432 	struct rb_node *p;
433 	struct map_rb_node *m;
434 
435 
436 	down_read(maps__lock(maps));
437 
438 	p = maps__entries(maps)->rb_node;
439 	while (p != NULL) {
440 		m = rb_entry(p, struct map_rb_node, rb_node);
441 		if (ip < map__start(m->map))
442 			p = p->rb_left;
443 		else if (ip >= map__end(m->map))
444 			p = p->rb_right;
445 		else
446 			goto out;
447 	}
448 
449 	m = NULL;
450 out:
451 	up_read(maps__lock(maps));
452 	return m ? m->map : NULL;
453 }
454 
maps__first(struct maps * maps)455 struct map_rb_node *maps__first(struct maps *maps)
456 {
457 	struct rb_node *first = rb_first(maps__entries(maps));
458 
459 	if (first)
460 		return rb_entry(first, struct map_rb_node, rb_node);
461 	return NULL;
462 }
463 
map_rb_node__next(struct map_rb_node * node)464 struct map_rb_node *map_rb_node__next(struct map_rb_node *node)
465 {
466 	struct rb_node *next;
467 
468 	if (!node)
469 		return NULL;
470 
471 	next = rb_next(&node->rb_node);
472 
473 	if (!next)
474 		return NULL;
475 
476 	return rb_entry(next, struct map_rb_node, rb_node);
477 }
478 
map__strcmp(const void * a,const void * b)479 static int map__strcmp(const void *a, const void *b)
480 {
481 	const struct map *map_a = *(const struct map **)a;
482 	const struct map *map_b = *(const struct map **)b;
483 	const struct dso *dso_a = map__dso(map_a);
484 	const struct dso *dso_b = map__dso(map_b);
485 	int ret = strcmp(dso_a->short_name, dso_b->short_name);
486 
487 	if (ret == 0 && map_a != map_b) {
488 		/*
489 		 * Ensure distinct but name equal maps have an order in part to
490 		 * aid reference counting.
491 		 */
492 		ret = (int)map__start(map_a) - (int)map__start(map_b);
493 		if (ret == 0)
494 			ret = (int)((intptr_t)map_a - (intptr_t)map_b);
495 	}
496 
497 	return ret;
498 }
499 
map__strcmp_name(const void * name,const void * b)500 static int map__strcmp_name(const void *name, const void *b)
501 {
502 	const struct dso *dso = map__dso(*(const struct map **)b);
503 
504 	return strcmp(name, dso->short_name);
505 }
506 
__maps__sort_by_name(struct maps * maps)507 void __maps__sort_by_name(struct maps *maps)
508 {
509 	qsort(maps__maps_by_name(maps), maps__nr_maps(maps), sizeof(struct map *), map__strcmp);
510 }
511 
map__groups__sort_by_name_from_rbtree(struct maps * maps)512 static int map__groups__sort_by_name_from_rbtree(struct maps *maps)
513 {
514 	struct map_rb_node *rb_node;
515 	struct map **maps_by_name = realloc(maps__maps_by_name(maps),
516 					    maps__nr_maps(maps) * sizeof(struct map *));
517 	int i = 0;
518 
519 	if (maps_by_name == NULL)
520 		return -1;
521 
522 	up_read(maps__lock(maps));
523 	down_write(maps__lock(maps));
524 
525 	RC_CHK_ACCESS(maps)->maps_by_name = maps_by_name;
526 	RC_CHK_ACCESS(maps)->nr_maps_allocated = maps__nr_maps(maps);
527 
528 	maps__for_each_entry(maps, rb_node)
529 		maps_by_name[i++] = map__get(rb_node->map);
530 
531 	__maps__sort_by_name(maps);
532 
533 	up_write(maps__lock(maps));
534 	down_read(maps__lock(maps));
535 
536 	return 0;
537 }
538 
__maps__find_by_name(struct maps * maps,const char * name)539 static struct map *__maps__find_by_name(struct maps *maps, const char *name)
540 {
541 	struct map **mapp;
542 
543 	if (maps__maps_by_name(maps) == NULL &&
544 	    map__groups__sort_by_name_from_rbtree(maps))
545 		return NULL;
546 
547 	mapp = bsearch(name, maps__maps_by_name(maps), maps__nr_maps(maps),
548 		       sizeof(*mapp), map__strcmp_name);
549 	if (mapp)
550 		return *mapp;
551 	return NULL;
552 }
553 
maps__find_by_name(struct maps * maps,const char * name)554 struct map *maps__find_by_name(struct maps *maps, const char *name)
555 {
556 	struct map_rb_node *rb_node;
557 	struct map *map;
558 
559 	down_read(maps__lock(maps));
560 
561 
562 	if (RC_CHK_ACCESS(maps)->last_search_by_name) {
563 		const struct dso *dso = map__dso(RC_CHK_ACCESS(maps)->last_search_by_name);
564 
565 		if (strcmp(dso->short_name, name) == 0) {
566 			map = RC_CHK_ACCESS(maps)->last_search_by_name;
567 			goto out_unlock;
568 		}
569 	}
570 	/*
571 	 * If we have maps->maps_by_name, then the name isn't in the rbtree,
572 	 * as maps->maps_by_name mirrors the rbtree when lookups by name are
573 	 * made.
574 	 */
575 	map = __maps__find_by_name(maps, name);
576 	if (map || maps__maps_by_name(maps) != NULL)
577 		goto out_unlock;
578 
579 	/* Fallback to traversing the rbtree... */
580 	maps__for_each_entry(maps, rb_node) {
581 		struct dso *dso;
582 
583 		map = rb_node->map;
584 		dso = map__dso(map);
585 		if (strcmp(dso->short_name, name) == 0) {
586 			RC_CHK_ACCESS(maps)->last_search_by_name = map;
587 			goto out_unlock;
588 		}
589 	}
590 	map = NULL;
591 
592 out_unlock:
593 	up_read(maps__lock(maps));
594 	return map;
595 }
596 
maps__fixup_end(struct maps * maps)597 void maps__fixup_end(struct maps *maps)
598 {
599 	struct map_rb_node *prev = NULL, *curr;
600 
601 	down_write(maps__lock(maps));
602 
603 	maps__for_each_entry(maps, curr) {
604 		if (prev != NULL && !map__end(prev->map))
605 			map__set_end(prev->map, map__start(curr->map));
606 
607 		prev = curr;
608 	}
609 
610 	/*
611 	 * We still haven't the actual symbols, so guess the
612 	 * last map final address.
613 	 */
614 	if (curr && !map__end(curr->map))
615 		map__set_end(curr->map, ~0ULL);
616 
617 	up_write(maps__lock(maps));
618 }
619 
620 /*
621  * Merges map into maps by splitting the new map within the existing map
622  * regions.
623  */
maps__merge_in(struct maps * kmaps,struct map * new_map)624 int maps__merge_in(struct maps *kmaps, struct map *new_map)
625 {
626 	struct map_rb_node *rb_node;
627 	LIST_HEAD(merged);
628 	int err = 0;
629 
630 	maps__for_each_entry(kmaps, rb_node) {
631 		struct map *old_map = rb_node->map;
632 
633 		/* no overload with this one */
634 		if (map__end(new_map) < map__start(old_map) ||
635 		    map__start(new_map) >= map__end(old_map))
636 			continue;
637 
638 		if (map__start(new_map) < map__start(old_map)) {
639 			/*
640 			 * |new......
641 			 *       |old....
642 			 */
643 			if (map__end(new_map) < map__end(old_map)) {
644 				/*
645 				 * |new......|     -> |new..|
646 				 *       |old....| ->       |old....|
647 				 */
648 				map__set_end(new_map, map__start(old_map));
649 			} else {
650 				/*
651 				 * |new.............| -> |new..|       |new..|
652 				 *       |old....|    ->       |old....|
653 				 */
654 				struct map_list_node *m = map_list_node__new();
655 
656 				if (!m) {
657 					err = -ENOMEM;
658 					goto out;
659 				}
660 
661 				m->map = map__clone(new_map);
662 				if (!m->map) {
663 					free(m);
664 					err = -ENOMEM;
665 					goto out;
666 				}
667 
668 				map__set_end(m->map, map__start(old_map));
669 				list_add_tail(&m->node, &merged);
670 				map__add_pgoff(new_map, map__end(old_map) - map__start(new_map));
671 				map__set_start(new_map, map__end(old_map));
672 			}
673 		} else {
674 			/*
675 			 *      |new......
676 			 * |old....
677 			 */
678 			if (map__end(new_map) < map__end(old_map)) {
679 				/*
680 				 *      |new..|   -> x
681 				 * |old.........| -> |old.........|
682 				 */
683 				map__put(new_map);
684 				new_map = NULL;
685 				break;
686 			} else {
687 				/*
688 				 *      |new......| ->         |new...|
689 				 * |old....|        -> |old....|
690 				 */
691 				map__add_pgoff(new_map, map__end(old_map) - map__start(new_map));
692 				map__set_start(new_map, map__end(old_map));
693 			}
694 		}
695 	}
696 
697 out:
698 	while (!list_empty(&merged)) {
699 		struct map_list_node *old_node;
700 
701 		old_node = list_entry(merged.next, struct map_list_node, node);
702 		list_del_init(&old_node->node);
703 		if (!err)
704 			err = maps__insert(kmaps, old_node->map);
705 		map__put(old_node->map);
706 		free(old_node);
707 	}
708 
709 	if (new_map) {
710 		if (!err)
711 			err = maps__insert(kmaps, new_map);
712 		map__put(new_map);
713 	}
714 	return err;
715 }
716