xref: /openbmc/linux/tools/perf/util/maps.c (revision 40662333)
1 // SPDX-License-Identifier: GPL-2.0
2 #include <errno.h>
3 #include <stdlib.h>
4 #include <linux/zalloc.h>
5 #include "debug.h"
6 #include "dso.h"
7 #include "map.h"
8 #include "maps.h"
9 #include "thread.h"
10 #include "ui/ui.h"
11 #include "unwind.h"
12 
13 static void __maps__insert(struct maps *maps, struct map *map);
14 
15 static void maps__init(struct maps *maps, struct machine *machine)
16 {
17 	maps->entries = RB_ROOT;
18 	init_rwsem(&maps->lock);
19 	maps->machine = machine;
20 	maps->last_search_by_name = NULL;
21 	maps->nr_maps = 0;
22 	maps->maps_by_name = NULL;
23 	refcount_set(&maps->refcnt, 1);
24 }
25 
26 static void __maps__free_maps_by_name(struct maps *maps)
27 {
28 	/*
29 	 * Free everything to try to do it from the rbtree in the next search
30 	 */
31 	zfree(&maps->maps_by_name);
32 	maps->nr_maps_allocated = 0;
33 }
34 
35 void maps__insert(struct maps *maps, struct map *map)
36 {
37 	down_write(&maps->lock);
38 	__maps__insert(maps, map);
39 	++maps->nr_maps;
40 
41 	if (map->dso && map->dso->kernel) {
42 		struct kmap *kmap = map__kmap(map);
43 
44 		if (kmap)
45 			kmap->kmaps = maps;
46 		else
47 			pr_err("Internal error: kernel dso with non kernel map\n");
48 	}
49 
50 
51 	/*
52 	 * If we already performed some search by name, then we need to add the just
53 	 * inserted map and resort.
54 	 */
55 	if (maps->maps_by_name) {
56 		if (maps->nr_maps > maps->nr_maps_allocated) {
57 			int nr_allocate = maps->nr_maps * 2;
58 			struct map **maps_by_name = realloc(maps->maps_by_name, nr_allocate * sizeof(map));
59 
60 			if (maps_by_name == NULL) {
61 				__maps__free_maps_by_name(maps);
62 				up_write(&maps->lock);
63 				return;
64 			}
65 
66 			maps->maps_by_name = maps_by_name;
67 			maps->nr_maps_allocated = nr_allocate;
68 		}
69 		maps->maps_by_name[maps->nr_maps - 1] = map;
70 		__maps__sort_by_name(maps);
71 	}
72 	up_write(&maps->lock);
73 }
74 
75 static void __maps__remove(struct maps *maps, struct map *map)
76 {
77 	rb_erase_init(&map->rb_node, &maps->entries);
78 	map__put(map);
79 }
80 
81 void maps__remove(struct maps *maps, struct map *map)
82 {
83 	down_write(&maps->lock);
84 	if (maps->last_search_by_name == map)
85 		maps->last_search_by_name = NULL;
86 
87 	__maps__remove(maps, map);
88 	--maps->nr_maps;
89 	if (maps->maps_by_name)
90 		__maps__free_maps_by_name(maps);
91 	up_write(&maps->lock);
92 }
93 
94 static void __maps__purge(struct maps *maps)
95 {
96 	struct map *pos, *next;
97 
98 	maps__for_each_entry_safe(maps, pos, next) {
99 		rb_erase_init(&pos->rb_node,  &maps->entries);
100 		map__put(pos);
101 	}
102 }
103 
104 static void maps__exit(struct maps *maps)
105 {
106 	down_write(&maps->lock);
107 	__maps__purge(maps);
108 	up_write(&maps->lock);
109 }
110 
111 bool maps__empty(struct maps *maps)
112 {
113 	return !maps__first(maps);
114 }
115 
116 struct maps *maps__new(struct machine *machine)
117 {
118 	struct maps *maps = zalloc(sizeof(*maps));
119 
120 	if (maps != NULL)
121 		maps__init(maps, machine);
122 
123 	return maps;
124 }
125 
126 void maps__delete(struct maps *maps)
127 {
128 	maps__exit(maps);
129 	unwind__finish_access(maps);
130 	free(maps);
131 }
132 
133 void maps__put(struct maps *maps)
134 {
135 	if (maps && refcount_dec_and_test(&maps->refcnt))
136 		maps__delete(maps);
137 }
138 
139 struct symbol *maps__find_symbol(struct maps *maps, u64 addr, struct map **mapp)
140 {
141 	struct map *map = maps__find(maps, addr);
142 
143 	/* Ensure map is loaded before using map->map_ip */
144 	if (map != NULL && map__load(map) >= 0) {
145 		if (mapp != NULL)
146 			*mapp = map;
147 		return map__find_symbol(map, map->map_ip(map, addr));
148 	}
149 
150 	return NULL;
151 }
152 
153 struct symbol *maps__find_symbol_by_name(struct maps *maps, const char *name, struct map **mapp)
154 {
155 	struct symbol *sym;
156 	struct map *pos;
157 
158 	down_read(&maps->lock);
159 
160 	maps__for_each_entry(maps, pos) {
161 		sym = map__find_symbol_by_name(pos, name);
162 
163 		if (sym == NULL)
164 			continue;
165 		if (!map__contains_symbol(pos, sym)) {
166 			sym = NULL;
167 			continue;
168 		}
169 		if (mapp != NULL)
170 			*mapp = pos;
171 		goto out;
172 	}
173 
174 	sym = NULL;
175 out:
176 	up_read(&maps->lock);
177 	return sym;
178 }
179 
180 int maps__find_ams(struct maps *maps, struct addr_map_symbol *ams)
181 {
182 	if (ams->addr < ams->ms.map->start || ams->addr >= ams->ms.map->end) {
183 		if (maps == NULL)
184 			return -1;
185 		ams->ms.map = maps__find(maps, ams->addr);
186 		if (ams->ms.map == NULL)
187 			return -1;
188 	}
189 
190 	ams->al_addr = ams->ms.map->map_ip(ams->ms.map, ams->addr);
191 	ams->ms.sym = map__find_symbol(ams->ms.map, ams->al_addr);
192 
193 	return ams->ms.sym ? 0 : -1;
194 }
195 
196 size_t maps__fprintf(struct maps *maps, FILE *fp)
197 {
198 	size_t printed = 0;
199 	struct map *pos;
200 
201 	down_read(&maps->lock);
202 
203 	maps__for_each_entry(maps, pos) {
204 		printed += fprintf(fp, "Map:");
205 		printed += map__fprintf(pos, fp);
206 		if (verbose > 2) {
207 			printed += dso__fprintf(pos->dso, fp);
208 			printed += fprintf(fp, "--\n");
209 		}
210 	}
211 
212 	up_read(&maps->lock);
213 
214 	return printed;
215 }
216 
217 int maps__fixup_overlappings(struct maps *maps, struct map *map, FILE *fp)
218 {
219 	struct rb_root *root;
220 	struct rb_node *next, *first;
221 	int err = 0;
222 
223 	down_write(&maps->lock);
224 
225 	root = &maps->entries;
226 
227 	/*
228 	 * Find first map where end > map->start.
229 	 * Same as find_vma() in kernel.
230 	 */
231 	next = root->rb_node;
232 	first = NULL;
233 	while (next) {
234 		struct map *pos = rb_entry(next, struct map, rb_node);
235 
236 		if (pos->end > map->start) {
237 			first = next;
238 			if (pos->start <= map->start)
239 				break;
240 			next = next->rb_left;
241 		} else
242 			next = next->rb_right;
243 	}
244 
245 	next = first;
246 	while (next) {
247 		struct map *pos = rb_entry(next, struct map, rb_node);
248 		next = rb_next(&pos->rb_node);
249 
250 		/*
251 		 * Stop if current map starts after map->end.
252 		 * Maps are ordered by start: next will not overlap for sure.
253 		 */
254 		if (pos->start >= map->end)
255 			break;
256 
257 		if (verbose >= 2) {
258 
259 			if (use_browser) {
260 				pr_debug("overlapping maps in %s (disable tui for more info)\n",
261 					   map->dso->name);
262 			} else {
263 				fputs("overlapping maps:\n", fp);
264 				map__fprintf(map, fp);
265 				map__fprintf(pos, fp);
266 			}
267 		}
268 
269 		rb_erase_init(&pos->rb_node, root);
270 		/*
271 		 * Now check if we need to create new maps for areas not
272 		 * overlapped by the new map:
273 		 */
274 		if (map->start > pos->start) {
275 			struct map *before = map__clone(pos);
276 
277 			if (before == NULL) {
278 				err = -ENOMEM;
279 				goto put_map;
280 			}
281 
282 			before->end = map->start;
283 			__maps__insert(maps, before);
284 			if (verbose >= 2 && !use_browser)
285 				map__fprintf(before, fp);
286 			map__put(before);
287 		}
288 
289 		if (map->end < pos->end) {
290 			struct map *after = map__clone(pos);
291 
292 			if (after == NULL) {
293 				err = -ENOMEM;
294 				goto put_map;
295 			}
296 
297 			after->start = map->end;
298 			after->pgoff += map->end - pos->start;
299 			assert(pos->map_ip(pos, map->end) == after->map_ip(after, map->end));
300 			__maps__insert(maps, after);
301 			if (verbose >= 2 && !use_browser)
302 				map__fprintf(after, fp);
303 			map__put(after);
304 		}
305 put_map:
306 		map__put(pos);
307 
308 		if (err)
309 			goto out;
310 	}
311 
312 	err = 0;
313 out:
314 	up_write(&maps->lock);
315 	return err;
316 }
317 
318 /*
319  * XXX This should not really _copy_ te maps, but refcount them.
320  */
321 int maps__clone(struct thread *thread, struct maps *parent)
322 {
323 	struct maps *maps = thread->maps;
324 	int err;
325 	struct map *map;
326 
327 	down_read(&parent->lock);
328 
329 	maps__for_each_entry(parent, map) {
330 		struct map *new = map__clone(map);
331 
332 		if (new == NULL) {
333 			err = -ENOMEM;
334 			goto out_unlock;
335 		}
336 
337 		err = unwind__prepare_access(maps, new, NULL);
338 		if (err)
339 			goto out_unlock;
340 
341 		maps__insert(maps, new);
342 		map__put(new);
343 	}
344 
345 	err = 0;
346 out_unlock:
347 	up_read(&parent->lock);
348 	return err;
349 }
350 
351 static void __maps__insert(struct maps *maps, struct map *map)
352 {
353 	struct rb_node **p = &maps->entries.rb_node;
354 	struct rb_node *parent = NULL;
355 	const u64 ip = map->start;
356 	struct map *m;
357 
358 	while (*p != NULL) {
359 		parent = *p;
360 		m = rb_entry(parent, struct map, rb_node);
361 		if (ip < m->start)
362 			p = &(*p)->rb_left;
363 		else
364 			p = &(*p)->rb_right;
365 	}
366 
367 	rb_link_node(&map->rb_node, parent, p);
368 	rb_insert_color(&map->rb_node, &maps->entries);
369 	map__get(map);
370 }
371 
372 struct map *maps__find(struct maps *maps, u64 ip)
373 {
374 	struct rb_node *p;
375 	struct map *m;
376 
377 	down_read(&maps->lock);
378 
379 	p = maps->entries.rb_node;
380 	while (p != NULL) {
381 		m = rb_entry(p, struct map, rb_node);
382 		if (ip < m->start)
383 			p = p->rb_left;
384 		else if (ip >= m->end)
385 			p = p->rb_right;
386 		else
387 			goto out;
388 	}
389 
390 	m = NULL;
391 out:
392 	up_read(&maps->lock);
393 	return m;
394 }
395 
396 struct map *maps__first(struct maps *maps)
397 {
398 	struct rb_node *first = rb_first(&maps->entries);
399 
400 	if (first)
401 		return rb_entry(first, struct map, rb_node);
402 	return NULL;
403 }
404