xref: /openbmc/linux/tools/perf/util/map.c (revision 293d5b43)
1 #include "symbol.h"
2 #include <errno.h>
3 #include <inttypes.h>
4 #include <limits.h>
5 #include <stdlib.h>
6 #include <string.h>
7 #include <stdio.h>
8 #include <unistd.h>
9 #include "map.h"
10 #include "thread.h"
11 #include "strlist.h"
12 #include "vdso.h"
13 #include "build-id.h"
14 #include "util.h"
15 #include "debug.h"
16 #include "machine.h"
17 #include <linux/string.h>
18 #include "unwind.h"
19 
20 static void __maps__insert(struct maps *maps, struct map *map);
21 
22 const char *map_type__name[MAP__NR_TYPES] = {
23 	[MAP__FUNCTION] = "Functions",
24 	[MAP__VARIABLE] = "Variables",
25 };
26 
27 static inline int is_anon_memory(const char *filename)
28 {
29 	return !strcmp(filename, "//anon") ||
30 	       !strncmp(filename, "/dev/zero", sizeof("/dev/zero") - 1) ||
31 	       !strncmp(filename, "/anon_hugepage", sizeof("/anon_hugepage") - 1);
32 }
33 
34 static inline int is_no_dso_memory(const char *filename)
35 {
36 	return !strncmp(filename, "[stack", 6) ||
37 	       !strncmp(filename, "/SYSV",5)   ||
38 	       !strcmp(filename, "[heap]");
39 }
40 
41 static inline int is_android_lib(const char *filename)
42 {
43 	return !strncmp(filename, "/data/app-lib", 13) ||
44 	       !strncmp(filename, "/system/lib", 11);
45 }
46 
47 static inline bool replace_android_lib(const char *filename, char *newfilename)
48 {
49 	const char *libname;
50 	char *app_abi;
51 	size_t app_abi_length, new_length;
52 	size_t lib_length = 0;
53 
54 	libname  = strrchr(filename, '/');
55 	if (libname)
56 		lib_length = strlen(libname);
57 
58 	app_abi = getenv("APP_ABI");
59 	if (!app_abi)
60 		return false;
61 
62 	app_abi_length = strlen(app_abi);
63 
64 	if (!strncmp(filename, "/data/app-lib", 13)) {
65 		char *apk_path;
66 
67 		if (!app_abi_length)
68 			return false;
69 
70 		new_length = 7 + app_abi_length + lib_length;
71 
72 		apk_path = getenv("APK_PATH");
73 		if (apk_path) {
74 			new_length += strlen(apk_path) + 1;
75 			if (new_length > PATH_MAX)
76 				return false;
77 			snprintf(newfilename, new_length,
78 				 "%s/libs/%s/%s", apk_path, app_abi, libname);
79 		} else {
80 			if (new_length > PATH_MAX)
81 				return false;
82 			snprintf(newfilename, new_length,
83 				 "libs/%s/%s", app_abi, libname);
84 		}
85 		return true;
86 	}
87 
88 	if (!strncmp(filename, "/system/lib/", 11)) {
89 		char *ndk, *app;
90 		const char *arch;
91 		size_t ndk_length;
92 		size_t app_length;
93 
94 		ndk = getenv("NDK_ROOT");
95 		app = getenv("APP_PLATFORM");
96 
97 		if (!(ndk && app))
98 			return false;
99 
100 		ndk_length = strlen(ndk);
101 		app_length = strlen(app);
102 
103 		if (!(ndk_length && app_length && app_abi_length))
104 			return false;
105 
106 		arch = !strncmp(app_abi, "arm", 3) ? "arm" :
107 		       !strncmp(app_abi, "mips", 4) ? "mips" :
108 		       !strncmp(app_abi, "x86", 3) ? "x86" : NULL;
109 
110 		if (!arch)
111 			return false;
112 
113 		new_length = 27 + ndk_length +
114 			     app_length + lib_length
115 			   + strlen(arch);
116 
117 		if (new_length > PATH_MAX)
118 			return false;
119 		snprintf(newfilename, new_length,
120 			"%s/platforms/%s/arch-%s/usr/lib/%s",
121 			ndk, app, arch, libname);
122 
123 		return true;
124 	}
125 	return false;
126 }
127 
128 void map__init(struct map *map, enum map_type type,
129 	       u64 start, u64 end, u64 pgoff, struct dso *dso)
130 {
131 	map->type     = type;
132 	map->start    = start;
133 	map->end      = end;
134 	map->pgoff    = pgoff;
135 	map->reloc    = 0;
136 	map->dso      = dso__get(dso);
137 	map->map_ip   = map__map_ip;
138 	map->unmap_ip = map__unmap_ip;
139 	RB_CLEAR_NODE(&map->rb_node);
140 	map->groups   = NULL;
141 	map->erange_warned = false;
142 	atomic_set(&map->refcnt, 1);
143 }
144 
145 struct map *map__new(struct machine *machine, u64 start, u64 len,
146 		     u64 pgoff, u32 pid, u32 d_maj, u32 d_min, u64 ino,
147 		     u64 ino_gen, u32 prot, u32 flags, char *filename,
148 		     enum map_type type, struct thread *thread)
149 {
150 	struct map *map = malloc(sizeof(*map));
151 
152 	if (map != NULL) {
153 		char newfilename[PATH_MAX];
154 		struct dso *dso;
155 		int anon, no_dso, vdso, android;
156 
157 		android = is_android_lib(filename);
158 		anon = is_anon_memory(filename);
159 		vdso = is_vdso_map(filename);
160 		no_dso = is_no_dso_memory(filename);
161 
162 		map->maj = d_maj;
163 		map->min = d_min;
164 		map->ino = ino;
165 		map->ino_generation = ino_gen;
166 		map->prot = prot;
167 		map->flags = flags;
168 
169 		if ((anon || no_dso) && type == MAP__FUNCTION) {
170 			snprintf(newfilename, sizeof(newfilename), "/tmp/perf-%d.map", pid);
171 			filename = newfilename;
172 		}
173 
174 		if (android) {
175 			if (replace_android_lib(filename, newfilename))
176 				filename = newfilename;
177 		}
178 
179 		if (vdso) {
180 			pgoff = 0;
181 			dso = machine__findnew_vdso(machine, thread);
182 		} else
183 			dso = machine__findnew_dso(machine, filename);
184 
185 		if (dso == NULL)
186 			goto out_delete;
187 
188 		map__init(map, type, start, start + len, pgoff, dso);
189 
190 		if (anon || no_dso) {
191 			map->map_ip = map->unmap_ip = identity__map_ip;
192 
193 			/*
194 			 * Set memory without DSO as loaded. All map__find_*
195 			 * functions still return NULL, and we avoid the
196 			 * unnecessary map__load warning.
197 			 */
198 			if (type != MAP__FUNCTION)
199 				dso__set_loaded(dso, map->type);
200 		}
201 		dso__put(dso);
202 	}
203 	return map;
204 out_delete:
205 	free(map);
206 	return NULL;
207 }
208 
209 /*
210  * Constructor variant for modules (where we know from /proc/modules where
211  * they are loaded) and for vmlinux, where only after we load all the
212  * symbols we'll know where it starts and ends.
213  */
214 struct map *map__new2(u64 start, struct dso *dso, enum map_type type)
215 {
216 	struct map *map = calloc(1, (sizeof(*map) +
217 				     (dso->kernel ? sizeof(struct kmap) : 0)));
218 	if (map != NULL) {
219 		/*
220 		 * ->end will be filled after we load all the symbols
221 		 */
222 		map__init(map, type, start, 0, 0, dso);
223 	}
224 
225 	return map;
226 }
227 
228 /*
229  * Use this and __map__is_kmodule() for map instances that are in
230  * machine->kmaps, and thus have map->groups->machine all properly set, to
231  * disambiguate between the kernel and modules.
232  *
233  * When the need arises, introduce map__is_{kernel,kmodule)() that
234  * checks (map->groups != NULL && map->groups->machine != NULL &&
235  * map->dso->kernel) before calling __map__is_{kernel,kmodule}())
236  */
237 bool __map__is_kernel(const struct map *map)
238 {
239 	return __machine__kernel_map(map->groups->machine, map->type) == map;
240 }
241 
242 static void map__exit(struct map *map)
243 {
244 	BUG_ON(!RB_EMPTY_NODE(&map->rb_node));
245 	dso__zput(map->dso);
246 }
247 
248 void map__delete(struct map *map)
249 {
250 	map__exit(map);
251 	free(map);
252 }
253 
254 void map__put(struct map *map)
255 {
256 	if (map && atomic_dec_and_test(&map->refcnt))
257 		map__delete(map);
258 }
259 
260 void map__fixup_start(struct map *map)
261 {
262 	struct rb_root *symbols = &map->dso->symbols[map->type];
263 	struct rb_node *nd = rb_first(symbols);
264 	if (nd != NULL) {
265 		struct symbol *sym = rb_entry(nd, struct symbol, rb_node);
266 		map->start = sym->start;
267 	}
268 }
269 
270 void map__fixup_end(struct map *map)
271 {
272 	struct rb_root *symbols = &map->dso->symbols[map->type];
273 	struct rb_node *nd = rb_last(symbols);
274 	if (nd != NULL) {
275 		struct symbol *sym = rb_entry(nd, struct symbol, rb_node);
276 		map->end = sym->end;
277 	}
278 }
279 
280 #define DSO__DELETED "(deleted)"
281 
282 int map__load(struct map *map, symbol_filter_t filter)
283 {
284 	const char *name = map->dso->long_name;
285 	int nr;
286 
287 	if (dso__loaded(map->dso, map->type))
288 		return 0;
289 
290 	nr = dso__load(map->dso, map, filter);
291 	if (nr < 0) {
292 		if (map->dso->has_build_id) {
293 			char sbuild_id[SBUILD_ID_SIZE];
294 
295 			build_id__sprintf(map->dso->build_id,
296 					  sizeof(map->dso->build_id),
297 					  sbuild_id);
298 			pr_warning("%s with build id %s not found",
299 				   name, sbuild_id);
300 		} else
301 			pr_warning("Failed to open %s", name);
302 
303 		pr_warning(", continuing without symbols\n");
304 		return -1;
305 	} else if (nr == 0) {
306 #ifdef HAVE_LIBELF_SUPPORT
307 		const size_t len = strlen(name);
308 		const size_t real_len = len - sizeof(DSO__DELETED);
309 
310 		if (len > sizeof(DSO__DELETED) &&
311 		    strcmp(name + real_len + 1, DSO__DELETED) == 0) {
312 			pr_warning("%.*s was updated (is prelink enabled?). "
313 				"Restart the long running apps that use it!\n",
314 				   (int)real_len, name);
315 		} else if (filter) {
316 			pr_warning("no symbols passed the given filter.\n");
317 			return -2;	/* Empty but maybe by the filter */
318 		} else {
319 			pr_warning("no symbols found in %s, maybe install "
320 				   "a debug package?\n", name);
321 		}
322 #endif
323 		return -1;
324 	}
325 
326 	return 0;
327 }
328 
329 int __weak arch__compare_symbol_names(const char *namea, const char *nameb)
330 {
331 	return strcmp(namea, nameb);
332 }
333 
334 struct symbol *map__find_symbol(struct map *map, u64 addr,
335 				symbol_filter_t filter)
336 {
337 	if (map__load(map, filter) < 0)
338 		return NULL;
339 
340 	return dso__find_symbol(map->dso, map->type, addr);
341 }
342 
343 struct symbol *map__find_symbol_by_name(struct map *map, const char *name,
344 					symbol_filter_t filter)
345 {
346 	if (map__load(map, filter) < 0)
347 		return NULL;
348 
349 	if (!dso__sorted_by_name(map->dso, map->type))
350 		dso__sort_by_name(map->dso, map->type);
351 
352 	return dso__find_symbol_by_name(map->dso, map->type, name);
353 }
354 
355 struct map *map__clone(struct map *from)
356 {
357 	struct map *map = memdup(from, sizeof(*map));
358 
359 	if (map != NULL) {
360 		atomic_set(&map->refcnt, 1);
361 		RB_CLEAR_NODE(&map->rb_node);
362 		dso__get(map->dso);
363 		map->groups = NULL;
364 	}
365 
366 	return map;
367 }
368 
369 int map__overlap(struct map *l, struct map *r)
370 {
371 	if (l->start > r->start) {
372 		struct map *t = l;
373 		l = r;
374 		r = t;
375 	}
376 
377 	if (l->end > r->start)
378 		return 1;
379 
380 	return 0;
381 }
382 
383 size_t map__fprintf(struct map *map, FILE *fp)
384 {
385 	return fprintf(fp, " %" PRIx64 "-%" PRIx64 " %" PRIx64 " %s\n",
386 		       map->start, map->end, map->pgoff, map->dso->name);
387 }
388 
389 size_t map__fprintf_dsoname(struct map *map, FILE *fp)
390 {
391 	const char *dsoname = "[unknown]";
392 
393 	if (map && map->dso && (map->dso->name || map->dso->long_name)) {
394 		if (symbol_conf.show_kernel_path && map->dso->long_name)
395 			dsoname = map->dso->long_name;
396 		else if (map->dso->name)
397 			dsoname = map->dso->name;
398 	}
399 
400 	return fprintf(fp, "%s", dsoname);
401 }
402 
403 int map__fprintf_srcline(struct map *map, u64 addr, const char *prefix,
404 			 FILE *fp)
405 {
406 	char *srcline;
407 	int ret = 0;
408 
409 	if (map && map->dso) {
410 		srcline = get_srcline(map->dso,
411 				      map__rip_2objdump(map, addr), NULL, true);
412 		if (srcline != SRCLINE_UNKNOWN)
413 			ret = fprintf(fp, "%s%s", prefix, srcline);
414 		free_srcline(srcline);
415 	}
416 	return ret;
417 }
418 
419 /**
420  * map__rip_2objdump - convert symbol start address to objdump address.
421  * @map: memory map
422  * @rip: symbol start address
423  *
424  * objdump wants/reports absolute IPs for ET_EXEC, and RIPs for ET_DYN.
425  * map->dso->adjust_symbols==1 for ET_EXEC-like cases except ET_REL which is
426  * relative to section start.
427  *
428  * Return: Address suitable for passing to "objdump --start-address="
429  */
430 u64 map__rip_2objdump(struct map *map, u64 rip)
431 {
432 	if (!map->dso->adjust_symbols)
433 		return rip;
434 
435 	if (map->dso->rel)
436 		return rip - map->pgoff;
437 
438 	/*
439 	 * kernel modules also have DSO_TYPE_USER in dso->kernel,
440 	 * but all kernel modules are ET_REL, so won't get here.
441 	 */
442 	if (map->dso->kernel == DSO_TYPE_USER)
443 		return rip + map->dso->text_offset;
444 
445 	return map->unmap_ip(map, rip) - map->reloc;
446 }
447 
448 /**
449  * map__objdump_2mem - convert objdump address to a memory address.
450  * @map: memory map
451  * @ip: objdump address
452  *
453  * Closely related to map__rip_2objdump(), this function takes an address from
454  * objdump and converts it to a memory address.  Note this assumes that @map
455  * contains the address.  To be sure the result is valid, check it forwards
456  * e.g. map__rip_2objdump(map->map_ip(map, map__objdump_2mem(map, ip))) == ip
457  *
458  * Return: Memory address.
459  */
460 u64 map__objdump_2mem(struct map *map, u64 ip)
461 {
462 	if (!map->dso->adjust_symbols)
463 		return map->unmap_ip(map, ip);
464 
465 	if (map->dso->rel)
466 		return map->unmap_ip(map, ip + map->pgoff);
467 
468 	/*
469 	 * kernel modules also have DSO_TYPE_USER in dso->kernel,
470 	 * but all kernel modules are ET_REL, so won't get here.
471 	 */
472 	if (map->dso->kernel == DSO_TYPE_USER)
473 		return map->unmap_ip(map, ip - map->dso->text_offset);
474 
475 	return ip + map->reloc;
476 }
477 
478 static void maps__init(struct maps *maps)
479 {
480 	maps->entries = RB_ROOT;
481 	pthread_rwlock_init(&maps->lock, NULL);
482 }
483 
484 void map_groups__init(struct map_groups *mg, struct machine *machine)
485 {
486 	int i;
487 	for (i = 0; i < MAP__NR_TYPES; ++i) {
488 		maps__init(&mg->maps[i]);
489 	}
490 	mg->machine = machine;
491 	atomic_set(&mg->refcnt, 1);
492 }
493 
494 static void __maps__purge(struct maps *maps)
495 {
496 	struct rb_root *root = &maps->entries;
497 	struct rb_node *next = rb_first(root);
498 
499 	while (next) {
500 		struct map *pos = rb_entry(next, struct map, rb_node);
501 
502 		next = rb_next(&pos->rb_node);
503 		rb_erase_init(&pos->rb_node, root);
504 		map__put(pos);
505 	}
506 }
507 
508 static void maps__exit(struct maps *maps)
509 {
510 	pthread_rwlock_wrlock(&maps->lock);
511 	__maps__purge(maps);
512 	pthread_rwlock_unlock(&maps->lock);
513 }
514 
515 void map_groups__exit(struct map_groups *mg)
516 {
517 	int i;
518 
519 	for (i = 0; i < MAP__NR_TYPES; ++i)
520 		maps__exit(&mg->maps[i]);
521 }
522 
523 bool map_groups__empty(struct map_groups *mg)
524 {
525 	int i;
526 
527 	for (i = 0; i < MAP__NR_TYPES; ++i) {
528 		if (maps__first(&mg->maps[i]))
529 			return false;
530 	}
531 
532 	return true;
533 }
534 
535 struct map_groups *map_groups__new(struct machine *machine)
536 {
537 	struct map_groups *mg = malloc(sizeof(*mg));
538 
539 	if (mg != NULL)
540 		map_groups__init(mg, machine);
541 
542 	return mg;
543 }
544 
545 void map_groups__delete(struct map_groups *mg)
546 {
547 	map_groups__exit(mg);
548 	free(mg);
549 }
550 
551 void map_groups__put(struct map_groups *mg)
552 {
553 	if (mg && atomic_dec_and_test(&mg->refcnt))
554 		map_groups__delete(mg);
555 }
556 
557 struct symbol *map_groups__find_symbol(struct map_groups *mg,
558 				       enum map_type type, u64 addr,
559 				       struct map **mapp,
560 				       symbol_filter_t filter)
561 {
562 	struct map *map = map_groups__find(mg, type, addr);
563 
564 	/* Ensure map is loaded before using map->map_ip */
565 	if (map != NULL && map__load(map, filter) >= 0) {
566 		if (mapp != NULL)
567 			*mapp = map;
568 		return map__find_symbol(map, map->map_ip(map, addr), filter);
569 	}
570 
571 	return NULL;
572 }
573 
574 struct symbol *maps__find_symbol_by_name(struct maps *maps, const char *name,
575 					 struct map **mapp, symbol_filter_t filter)
576 {
577 	struct symbol *sym;
578 	struct rb_node *nd;
579 
580 	pthread_rwlock_rdlock(&maps->lock);
581 
582 	for (nd = rb_first(&maps->entries); nd; nd = rb_next(nd)) {
583 		struct map *pos = rb_entry(nd, struct map, rb_node);
584 
585 		sym = map__find_symbol_by_name(pos, name, filter);
586 
587 		if (sym == NULL)
588 			continue;
589 		if (mapp != NULL)
590 			*mapp = pos;
591 		goto out;
592 	}
593 
594 	sym = NULL;
595 out:
596 	pthread_rwlock_unlock(&maps->lock);
597 	return sym;
598 }
599 
600 struct symbol *map_groups__find_symbol_by_name(struct map_groups *mg,
601 					       enum map_type type,
602 					       const char *name,
603 					       struct map **mapp,
604 					       symbol_filter_t filter)
605 {
606 	struct symbol *sym = maps__find_symbol_by_name(&mg->maps[type], name, mapp, filter);
607 
608 	return sym;
609 }
610 
611 int map_groups__find_ams(struct addr_map_symbol *ams, symbol_filter_t filter)
612 {
613 	if (ams->addr < ams->map->start || ams->addr >= ams->map->end) {
614 		if (ams->map->groups == NULL)
615 			return -1;
616 		ams->map = map_groups__find(ams->map->groups, ams->map->type,
617 					    ams->addr);
618 		if (ams->map == NULL)
619 			return -1;
620 	}
621 
622 	ams->al_addr = ams->map->map_ip(ams->map, ams->addr);
623 	ams->sym = map__find_symbol(ams->map, ams->al_addr, filter);
624 
625 	return ams->sym ? 0 : -1;
626 }
627 
628 static size_t maps__fprintf(struct maps *maps, FILE *fp)
629 {
630 	size_t printed = 0;
631 	struct rb_node *nd;
632 
633 	pthread_rwlock_rdlock(&maps->lock);
634 
635 	for (nd = rb_first(&maps->entries); nd; nd = rb_next(nd)) {
636 		struct map *pos = rb_entry(nd, struct map, rb_node);
637 		printed += fprintf(fp, "Map:");
638 		printed += map__fprintf(pos, fp);
639 		if (verbose > 2) {
640 			printed += dso__fprintf(pos->dso, pos->type, fp);
641 			printed += fprintf(fp, "--\n");
642 		}
643 	}
644 
645 	pthread_rwlock_unlock(&maps->lock);
646 
647 	return printed;
648 }
649 
650 size_t __map_groups__fprintf_maps(struct map_groups *mg, enum map_type type,
651 				  FILE *fp)
652 {
653 	size_t printed = fprintf(fp, "%s:\n", map_type__name[type]);
654 	return printed += maps__fprintf(&mg->maps[type], fp);
655 }
656 
657 size_t map_groups__fprintf(struct map_groups *mg, FILE *fp)
658 {
659 	size_t printed = 0, i;
660 	for (i = 0; i < MAP__NR_TYPES; ++i)
661 		printed += __map_groups__fprintf_maps(mg, i, fp);
662 	return printed;
663 }
664 
665 static void __map_groups__insert(struct map_groups *mg, struct map *map)
666 {
667 	__maps__insert(&mg->maps[map->type], map);
668 	map->groups = mg;
669 }
670 
671 static int maps__fixup_overlappings(struct maps *maps, struct map *map, FILE *fp)
672 {
673 	struct rb_root *root;
674 	struct rb_node *next;
675 	int err = 0;
676 
677 	pthread_rwlock_wrlock(&maps->lock);
678 
679 	root = &maps->entries;
680 	next = rb_first(root);
681 
682 	while (next) {
683 		struct map *pos = rb_entry(next, struct map, rb_node);
684 		next = rb_next(&pos->rb_node);
685 
686 		if (!map__overlap(pos, map))
687 			continue;
688 
689 		if (verbose >= 2) {
690 			fputs("overlapping maps:\n", fp);
691 			map__fprintf(map, fp);
692 			map__fprintf(pos, fp);
693 		}
694 
695 		rb_erase_init(&pos->rb_node, root);
696 		/*
697 		 * Now check if we need to create new maps for areas not
698 		 * overlapped by the new map:
699 		 */
700 		if (map->start > pos->start) {
701 			struct map *before = map__clone(pos);
702 
703 			if (before == NULL) {
704 				err = -ENOMEM;
705 				goto put_map;
706 			}
707 
708 			before->end = map->start;
709 			__map_groups__insert(pos->groups, before);
710 			if (verbose >= 2)
711 				map__fprintf(before, fp);
712 			map__put(before);
713 		}
714 
715 		if (map->end < pos->end) {
716 			struct map *after = map__clone(pos);
717 
718 			if (after == NULL) {
719 				err = -ENOMEM;
720 				goto put_map;
721 			}
722 
723 			after->start = map->end;
724 			__map_groups__insert(pos->groups, after);
725 			if (verbose >= 2)
726 				map__fprintf(after, fp);
727 			map__put(after);
728 		}
729 put_map:
730 		map__put(pos);
731 
732 		if (err)
733 			goto out;
734 	}
735 
736 	err = 0;
737 out:
738 	pthread_rwlock_unlock(&maps->lock);
739 	return err;
740 }
741 
742 int map_groups__fixup_overlappings(struct map_groups *mg, struct map *map,
743 				   FILE *fp)
744 {
745 	return maps__fixup_overlappings(&mg->maps[map->type], map, fp);
746 }
747 
748 /*
749  * XXX This should not really _copy_ te maps, but refcount them.
750  */
751 int map_groups__clone(struct thread *thread,
752 		      struct map_groups *parent, enum map_type type)
753 {
754 	struct map_groups *mg = thread->mg;
755 	int err = -ENOMEM;
756 	struct map *map;
757 	struct maps *maps = &parent->maps[type];
758 
759 	pthread_rwlock_rdlock(&maps->lock);
760 
761 	for (map = maps__first(maps); map; map = map__next(map)) {
762 		struct map *new = map__clone(map);
763 		if (new == NULL)
764 			goto out_unlock;
765 
766 		err = unwind__prepare_access(thread, new, NULL);
767 		if (err)
768 			goto out_unlock;
769 
770 		map_groups__insert(mg, new);
771 		map__put(new);
772 	}
773 
774 	err = 0;
775 out_unlock:
776 	pthread_rwlock_unlock(&maps->lock);
777 	return err;
778 }
779 
780 static void __maps__insert(struct maps *maps, struct map *map)
781 {
782 	struct rb_node **p = &maps->entries.rb_node;
783 	struct rb_node *parent = NULL;
784 	const u64 ip = map->start;
785 	struct map *m;
786 
787 	while (*p != NULL) {
788 		parent = *p;
789 		m = rb_entry(parent, struct map, rb_node);
790 		if (ip < m->start)
791 			p = &(*p)->rb_left;
792 		else
793 			p = &(*p)->rb_right;
794 	}
795 
796 	rb_link_node(&map->rb_node, parent, p);
797 	rb_insert_color(&map->rb_node, &maps->entries);
798 	map__get(map);
799 }
800 
801 void maps__insert(struct maps *maps, struct map *map)
802 {
803 	pthread_rwlock_wrlock(&maps->lock);
804 	__maps__insert(maps, map);
805 	pthread_rwlock_unlock(&maps->lock);
806 }
807 
808 static void __maps__remove(struct maps *maps, struct map *map)
809 {
810 	rb_erase_init(&map->rb_node, &maps->entries);
811 	map__put(map);
812 }
813 
814 void maps__remove(struct maps *maps, struct map *map)
815 {
816 	pthread_rwlock_wrlock(&maps->lock);
817 	__maps__remove(maps, map);
818 	pthread_rwlock_unlock(&maps->lock);
819 }
820 
821 struct map *maps__find(struct maps *maps, u64 ip)
822 {
823 	struct rb_node **p, *parent = NULL;
824 	struct map *m;
825 
826 	pthread_rwlock_rdlock(&maps->lock);
827 
828 	p = &maps->entries.rb_node;
829 	while (*p != NULL) {
830 		parent = *p;
831 		m = rb_entry(parent, struct map, rb_node);
832 		if (ip < m->start)
833 			p = &(*p)->rb_left;
834 		else if (ip >= m->end)
835 			p = &(*p)->rb_right;
836 		else
837 			goto out;
838 	}
839 
840 	m = NULL;
841 out:
842 	pthread_rwlock_unlock(&maps->lock);
843 	return m;
844 }
845 
846 struct map *maps__first(struct maps *maps)
847 {
848 	struct rb_node *first = rb_first(&maps->entries);
849 
850 	if (first)
851 		return rb_entry(first, struct map, rb_node);
852 	return NULL;
853 }
854 
855 struct map *map__next(struct map *map)
856 {
857 	struct rb_node *next = rb_next(&map->rb_node);
858 
859 	if (next)
860 		return rb_entry(next, struct map, rb_node);
861 	return NULL;
862 }
863 
864 struct kmap *map__kmap(struct map *map)
865 {
866 	if (!map->dso || !map->dso->kernel) {
867 		pr_err("Internal error: map__kmap with a non-kernel map\n");
868 		return NULL;
869 	}
870 	return (struct kmap *)(map + 1);
871 }
872 
873 struct map_groups *map__kmaps(struct map *map)
874 {
875 	struct kmap *kmap = map__kmap(map);
876 
877 	if (!kmap || !kmap->kmaps) {
878 		pr_err("Internal error: map__kmaps with a non-kernel map\n");
879 		return NULL;
880 	}
881 	return kmap->kmaps;
882 }
883