xref: /openbmc/linux/tools/perf/util/map.c (revision 8dda2eac)
1 // SPDX-License-Identifier: GPL-2.0
2 #include "symbol.h"
3 #include <assert.h>
4 #include <errno.h>
5 #include <inttypes.h>
6 #include <limits.h>
7 #include <stdlib.h>
8 #include <string.h>
9 #include <stdio.h>
10 #include <unistd.h>
11 #include <uapi/linux/mman.h> /* To get things like MAP_HUGETLB even on older libc headers */
12 #include "dso.h"
13 #include "map.h"
14 #include "map_symbol.h"
15 #include "thread.h"
16 #include "vdso.h"
17 #include "build-id.h"
18 #include "debug.h"
19 #include "machine.h"
20 #include <linux/string.h>
21 #include <linux/zalloc.h>
22 #include "srcline.h"
23 #include "namespaces.h"
24 #include "unwind.h"
25 #include "srccode.h"
26 #include "ui/ui.h"
27 
28 static void __maps__insert(struct maps *maps, struct map *map);
29 
30 static inline int is_android_lib(const char *filename)
31 {
32 	return strstarts(filename, "/data/app-lib/") ||
33 	       strstarts(filename, "/system/lib/");
34 }
35 
36 static inline bool replace_android_lib(const char *filename, char *newfilename)
37 {
38 	const char *libname;
39 	char *app_abi;
40 	size_t app_abi_length, new_length;
41 	size_t lib_length = 0;
42 
43 	libname  = strrchr(filename, '/');
44 	if (libname)
45 		lib_length = strlen(libname);
46 
47 	app_abi = getenv("APP_ABI");
48 	if (!app_abi)
49 		return false;
50 
51 	app_abi_length = strlen(app_abi);
52 
53 	if (strstarts(filename, "/data/app-lib/")) {
54 		char *apk_path;
55 
56 		if (!app_abi_length)
57 			return false;
58 
59 		new_length = 7 + app_abi_length + lib_length;
60 
61 		apk_path = getenv("APK_PATH");
62 		if (apk_path) {
63 			new_length += strlen(apk_path) + 1;
64 			if (new_length > PATH_MAX)
65 				return false;
66 			snprintf(newfilename, new_length,
67 				 "%s/libs/%s/%s", apk_path, app_abi, libname);
68 		} else {
69 			if (new_length > PATH_MAX)
70 				return false;
71 			snprintf(newfilename, new_length,
72 				 "libs/%s/%s", app_abi, libname);
73 		}
74 		return true;
75 	}
76 
77 	if (strstarts(filename, "/system/lib/")) {
78 		char *ndk, *app;
79 		const char *arch;
80 		int ndk_length, app_length;
81 
82 		ndk = getenv("NDK_ROOT");
83 		app = getenv("APP_PLATFORM");
84 
85 		if (!(ndk && app))
86 			return false;
87 
88 		ndk_length = strlen(ndk);
89 		app_length = strlen(app);
90 
91 		if (!(ndk_length && app_length && app_abi_length))
92 			return false;
93 
94 		arch = !strncmp(app_abi, "arm", 3) ? "arm" :
95 		       !strncmp(app_abi, "mips", 4) ? "mips" :
96 		       !strncmp(app_abi, "x86", 3) ? "x86" : NULL;
97 
98 		if (!arch)
99 			return false;
100 
101 		new_length = 27 + ndk_length +
102 			     app_length + lib_length
103 			   + strlen(arch);
104 
105 		if (new_length > PATH_MAX)
106 			return false;
107 		snprintf(newfilename, new_length,
108 			"%.*s/platforms/%.*s/arch-%s/usr/lib/%s",
109 			ndk_length, ndk, app_length, app, arch, libname);
110 
111 		return true;
112 	}
113 	return false;
114 }
115 
116 void map__init(struct map *map, u64 start, u64 end, u64 pgoff, struct dso *dso)
117 {
118 	map->start    = start;
119 	map->end      = end;
120 	map->pgoff    = pgoff;
121 	map->reloc    = 0;
122 	map->dso      = dso__get(dso);
123 	map->map_ip   = map__map_ip;
124 	map->unmap_ip = map__unmap_ip;
125 	RB_CLEAR_NODE(&map->rb_node);
126 	map->erange_warned = false;
127 	refcount_set(&map->refcnt, 1);
128 }
129 
130 struct map *map__new(struct machine *machine, u64 start, u64 len,
131 		     u64 pgoff, struct dso_id *id,
132 		     u32 prot, u32 flags, struct build_id *bid,
133 		     char *filename, struct thread *thread)
134 {
135 	struct map *map = malloc(sizeof(*map));
136 	struct nsinfo *nsi = NULL;
137 	struct nsinfo *nnsi;
138 
139 	if (map != NULL) {
140 		char newfilename[PATH_MAX];
141 		struct dso *dso;
142 		int anon, no_dso, vdso, android;
143 
144 		android = is_android_lib(filename);
145 		anon = is_anon_memory(filename) || flags & MAP_HUGETLB;
146 		vdso = is_vdso_map(filename);
147 		no_dso = is_no_dso_memory(filename);
148 		map->prot = prot;
149 		map->flags = flags;
150 		nsi = nsinfo__get(thread->nsinfo);
151 
152 		if ((anon || no_dso) && nsi && (prot & PROT_EXEC)) {
153 			snprintf(newfilename, sizeof(newfilename),
154 				 "/tmp/perf-%d.map", nsi->pid);
155 			filename = newfilename;
156 		}
157 
158 		if (android) {
159 			if (replace_android_lib(filename, newfilename))
160 				filename = newfilename;
161 		}
162 
163 		if (vdso) {
164 			/* The vdso maps are always on the host and not the
165 			 * container.  Ensure that we don't use setns to look
166 			 * them up.
167 			 */
168 			nnsi = nsinfo__copy(nsi);
169 			if (nnsi) {
170 				nsinfo__put(nsi);
171 				nnsi->need_setns = false;
172 				nsi = nnsi;
173 			}
174 			pgoff = 0;
175 			dso = machine__findnew_vdso(machine, thread);
176 		} else
177 			dso = machine__findnew_dso_id(machine, filename, id);
178 
179 		if (dso == NULL)
180 			goto out_delete;
181 
182 		map__init(map, start, start + len, pgoff, dso);
183 
184 		if (anon || no_dso) {
185 			map->map_ip = map->unmap_ip = identity__map_ip;
186 
187 			/*
188 			 * Set memory without DSO as loaded. All map__find_*
189 			 * functions still return NULL, and we avoid the
190 			 * unnecessary map__load warning.
191 			 */
192 			if (!(prot & PROT_EXEC))
193 				dso__set_loaded(dso);
194 		}
195 		dso->nsinfo = nsi;
196 
197 		if (build_id__is_defined(bid))
198 			dso__set_build_id(dso, bid);
199 
200 		dso__put(dso);
201 	}
202 	return map;
203 out_delete:
204 	nsinfo__put(nsi);
205 	free(map);
206 	return NULL;
207 }
208 
209 /*
210  * Constructor variant for modules (where we know from /proc/modules where
211  * they are loaded) and for vmlinux, where only after we load all the
212  * symbols we'll know where it starts and ends.
213  */
214 struct map *map__new2(u64 start, struct dso *dso)
215 {
216 	struct map *map = calloc(1, (sizeof(*map) +
217 				     (dso->kernel ? sizeof(struct kmap) : 0)));
218 	if (map != NULL) {
219 		/*
220 		 * ->end will be filled after we load all the symbols
221 		 */
222 		map__init(map, start, 0, 0, dso);
223 	}
224 
225 	return map;
226 }
227 
228 bool __map__is_kernel(const struct map *map)
229 {
230 	if (!map->dso->kernel)
231 		return false;
232 	return machine__kernel_map(map__kmaps((struct map *)map)->machine) == map;
233 }
234 
235 bool __map__is_extra_kernel_map(const struct map *map)
236 {
237 	struct kmap *kmap = __map__kmap((struct map *)map);
238 
239 	return kmap && kmap->name[0];
240 }
241 
242 bool __map__is_bpf_prog(const struct map *map)
243 {
244 	const char *name;
245 
246 	if (map->dso->binary_type == DSO_BINARY_TYPE__BPF_PROG_INFO)
247 		return true;
248 
249 	/*
250 	 * If PERF_RECORD_BPF_EVENT is not included, the dso will not have
251 	 * type of DSO_BINARY_TYPE__BPF_PROG_INFO. In such cases, we can
252 	 * guess the type based on name.
253 	 */
254 	name = map->dso->short_name;
255 	return name && (strstr(name, "bpf_prog_") == name);
256 }
257 
258 bool __map__is_bpf_image(const struct map *map)
259 {
260 	const char *name;
261 
262 	if (map->dso->binary_type == DSO_BINARY_TYPE__BPF_IMAGE)
263 		return true;
264 
265 	/*
266 	 * If PERF_RECORD_KSYMBOL is not included, the dso will not have
267 	 * type of DSO_BINARY_TYPE__BPF_IMAGE. In such cases, we can
268 	 * guess the type based on name.
269 	 */
270 	name = map->dso->short_name;
271 	return name && is_bpf_image(name);
272 }
273 
274 bool __map__is_ool(const struct map *map)
275 {
276 	return map->dso && map->dso->binary_type == DSO_BINARY_TYPE__OOL;
277 }
278 
279 bool map__has_symbols(const struct map *map)
280 {
281 	return dso__has_symbols(map->dso);
282 }
283 
284 static void map__exit(struct map *map)
285 {
286 	BUG_ON(refcount_read(&map->refcnt) != 0);
287 	dso__zput(map->dso);
288 }
289 
290 void map__delete(struct map *map)
291 {
292 	map__exit(map);
293 	free(map);
294 }
295 
296 void map__put(struct map *map)
297 {
298 	if (map && refcount_dec_and_test(&map->refcnt))
299 		map__delete(map);
300 }
301 
302 void map__fixup_start(struct map *map)
303 {
304 	struct rb_root_cached *symbols = &map->dso->symbols;
305 	struct rb_node *nd = rb_first_cached(symbols);
306 	if (nd != NULL) {
307 		struct symbol *sym = rb_entry(nd, struct symbol, rb_node);
308 		map->start = sym->start;
309 	}
310 }
311 
312 void map__fixup_end(struct map *map)
313 {
314 	struct rb_root_cached *symbols = &map->dso->symbols;
315 	struct rb_node *nd = rb_last(&symbols->rb_root);
316 	if (nd != NULL) {
317 		struct symbol *sym = rb_entry(nd, struct symbol, rb_node);
318 		map->end = sym->end;
319 	}
320 }
321 
322 #define DSO__DELETED "(deleted)"
323 
324 int map__load(struct map *map)
325 {
326 	const char *name = map->dso->long_name;
327 	int nr;
328 
329 	if (dso__loaded(map->dso))
330 		return 0;
331 
332 	nr = dso__load(map->dso, map);
333 	if (nr < 0) {
334 		if (map->dso->has_build_id) {
335 			char sbuild_id[SBUILD_ID_SIZE];
336 
337 			build_id__sprintf(&map->dso->bid, sbuild_id);
338 			pr_debug("%s with build id %s not found", name, sbuild_id);
339 		} else
340 			pr_debug("Failed to open %s", name);
341 
342 		pr_debug(", continuing without symbols\n");
343 		return -1;
344 	} else if (nr == 0) {
345 #ifdef HAVE_LIBELF_SUPPORT
346 		const size_t len = strlen(name);
347 		const size_t real_len = len - sizeof(DSO__DELETED);
348 
349 		if (len > sizeof(DSO__DELETED) &&
350 		    strcmp(name + real_len + 1, DSO__DELETED) == 0) {
351 			pr_debug("%.*s was updated (is prelink enabled?). "
352 				"Restart the long running apps that use it!\n",
353 				   (int)real_len, name);
354 		} else {
355 			pr_debug("no symbols found in %s, maybe install a debug package?\n", name);
356 		}
357 #endif
358 		return -1;
359 	}
360 
361 	return 0;
362 }
363 
364 struct symbol *map__find_symbol(struct map *map, u64 addr)
365 {
366 	if (map__load(map) < 0)
367 		return NULL;
368 
369 	return dso__find_symbol(map->dso, addr);
370 }
371 
372 struct symbol *map__find_symbol_by_name(struct map *map, const char *name)
373 {
374 	if (map__load(map) < 0)
375 		return NULL;
376 
377 	if (!dso__sorted_by_name(map->dso))
378 		dso__sort_by_name(map->dso);
379 
380 	return dso__find_symbol_by_name(map->dso, name);
381 }
382 
383 struct map *map__clone(struct map *from)
384 {
385 	size_t size = sizeof(struct map);
386 	struct map *map;
387 
388 	if (from->dso && from->dso->kernel)
389 		size += sizeof(struct kmap);
390 
391 	map = memdup(from, size);
392 	if (map != NULL) {
393 		refcount_set(&map->refcnt, 1);
394 		RB_CLEAR_NODE(&map->rb_node);
395 		dso__get(map->dso);
396 	}
397 
398 	return map;
399 }
400 
401 size_t map__fprintf(struct map *map, FILE *fp)
402 {
403 	return fprintf(fp, " %" PRIx64 "-%" PRIx64 " %" PRIx64 " %s\n",
404 		       map->start, map->end, map->pgoff, map->dso->name);
405 }
406 
407 size_t map__fprintf_dsoname(struct map *map, FILE *fp)
408 {
409 	char buf[symbol_conf.pad_output_len_dso + 1];
410 	const char *dsoname = "[unknown]";
411 
412 	if (map && map->dso) {
413 		if (symbol_conf.show_kernel_path && map->dso->long_name)
414 			dsoname = map->dso->long_name;
415 		else
416 			dsoname = map->dso->name;
417 	}
418 
419 	if (symbol_conf.pad_output_len_dso) {
420 		scnprintf_pad(buf, symbol_conf.pad_output_len_dso, "%s", dsoname);
421 		dsoname = buf;
422 	}
423 
424 	return fprintf(fp, "%s", dsoname);
425 }
426 
427 char *map__srcline(struct map *map, u64 addr, struct symbol *sym)
428 {
429 	if (map == NULL)
430 		return SRCLINE_UNKNOWN;
431 	return get_srcline(map->dso, map__rip_2objdump(map, addr), sym, true, true, addr);
432 }
433 
434 int map__fprintf_srcline(struct map *map, u64 addr, const char *prefix,
435 			 FILE *fp)
436 {
437 	int ret = 0;
438 
439 	if (map && map->dso) {
440 		char *srcline = map__srcline(map, addr, NULL);
441 		if (strncmp(srcline, SRCLINE_UNKNOWN, strlen(SRCLINE_UNKNOWN)) != 0)
442 			ret = fprintf(fp, "%s%s", prefix, srcline);
443 		free_srcline(srcline);
444 	}
445 	return ret;
446 }
447 
448 void srccode_state_free(struct srccode_state *state)
449 {
450 	zfree(&state->srcfile);
451 	state->line = 0;
452 }
453 
454 /**
455  * map__rip_2objdump - convert symbol start address to objdump address.
456  * @map: memory map
457  * @rip: symbol start address
458  *
459  * objdump wants/reports absolute IPs for ET_EXEC, and RIPs for ET_DYN.
460  * map->dso->adjust_symbols==1 for ET_EXEC-like cases except ET_REL which is
461  * relative to section start.
462  *
463  * Return: Address suitable for passing to "objdump --start-address="
464  */
465 u64 map__rip_2objdump(struct map *map, u64 rip)
466 {
467 	struct kmap *kmap = __map__kmap(map);
468 
469 	/*
470 	 * vmlinux does not have program headers for PTI entry trampolines and
471 	 * kcore may not either. However the trampoline object code is on the
472 	 * main kernel map, so just use that instead.
473 	 */
474 	if (kmap && is_entry_trampoline(kmap->name) && kmap->kmaps && kmap->kmaps->machine) {
475 		struct map *kernel_map = machine__kernel_map(kmap->kmaps->machine);
476 
477 		if (kernel_map)
478 			map = kernel_map;
479 	}
480 
481 	if (!map->dso->adjust_symbols)
482 		return rip;
483 
484 	if (map->dso->rel)
485 		return rip - map->pgoff;
486 
487 	/*
488 	 * kernel modules also have DSO_TYPE_USER in dso->kernel,
489 	 * but all kernel modules are ET_REL, so won't get here.
490 	 */
491 	if (map->dso->kernel == DSO_SPACE__USER)
492 		return rip + map->dso->text_offset;
493 
494 	return map->unmap_ip(map, rip) - map->reloc;
495 }
496 
497 /**
498  * map__objdump_2mem - convert objdump address to a memory address.
499  * @map: memory map
500  * @ip: objdump address
501  *
502  * Closely related to map__rip_2objdump(), this function takes an address from
503  * objdump and converts it to a memory address.  Note this assumes that @map
504  * contains the address.  To be sure the result is valid, check it forwards
505  * e.g. map__rip_2objdump(map->map_ip(map, map__objdump_2mem(map, ip))) == ip
506  *
507  * Return: Memory address.
508  */
509 u64 map__objdump_2mem(struct map *map, u64 ip)
510 {
511 	if (!map->dso->adjust_symbols)
512 		return map->unmap_ip(map, ip);
513 
514 	if (map->dso->rel)
515 		return map->unmap_ip(map, ip + map->pgoff);
516 
517 	/*
518 	 * kernel modules also have DSO_TYPE_USER in dso->kernel,
519 	 * but all kernel modules are ET_REL, so won't get here.
520 	 */
521 	if (map->dso->kernel == DSO_SPACE__USER)
522 		return map->unmap_ip(map, ip - map->dso->text_offset);
523 
524 	return ip + map->reloc;
525 }
526 
527 void maps__init(struct maps *maps, struct machine *machine)
528 {
529 	maps->entries = RB_ROOT;
530 	init_rwsem(&maps->lock);
531 	maps->machine = machine;
532 	maps->last_search_by_name = NULL;
533 	maps->nr_maps = 0;
534 	maps->maps_by_name = NULL;
535 	refcount_set(&maps->refcnt, 1);
536 }
537 
538 static void __maps__free_maps_by_name(struct maps *maps)
539 {
540 	/*
541 	 * Free everything to try to do it from the rbtree in the next search
542 	 */
543 	zfree(&maps->maps_by_name);
544 	maps->nr_maps_allocated = 0;
545 }
546 
547 void maps__insert(struct maps *maps, struct map *map)
548 {
549 	down_write(&maps->lock);
550 	__maps__insert(maps, map);
551 	++maps->nr_maps;
552 
553 	if (map->dso && map->dso->kernel) {
554 		struct kmap *kmap = map__kmap(map);
555 
556 		if (kmap)
557 			kmap->kmaps = maps;
558 		else
559 			pr_err("Internal error: kernel dso with non kernel map\n");
560 	}
561 
562 
563 	/*
564 	 * If we already performed some search by name, then we need to add the just
565 	 * inserted map and resort.
566 	 */
567 	if (maps->maps_by_name) {
568 		if (maps->nr_maps > maps->nr_maps_allocated) {
569 			int nr_allocate = maps->nr_maps * 2;
570 			struct map **maps_by_name = realloc(maps->maps_by_name, nr_allocate * sizeof(map));
571 
572 			if (maps_by_name == NULL) {
573 				__maps__free_maps_by_name(maps);
574 				up_write(&maps->lock);
575 				return;
576 			}
577 
578 			maps->maps_by_name = maps_by_name;
579 			maps->nr_maps_allocated = nr_allocate;
580 		}
581 		maps->maps_by_name[maps->nr_maps - 1] = map;
582 		__maps__sort_by_name(maps);
583 	}
584 	up_write(&maps->lock);
585 }
586 
587 static void __maps__remove(struct maps *maps, struct map *map)
588 {
589 	rb_erase_init(&map->rb_node, &maps->entries);
590 	map__put(map);
591 }
592 
593 void maps__remove(struct maps *maps, struct map *map)
594 {
595 	down_write(&maps->lock);
596 	if (maps->last_search_by_name == map)
597 		maps->last_search_by_name = NULL;
598 
599 	__maps__remove(maps, map);
600 	--maps->nr_maps;
601 	if (maps->maps_by_name)
602 		__maps__free_maps_by_name(maps);
603 	up_write(&maps->lock);
604 }
605 
606 static void __maps__purge(struct maps *maps)
607 {
608 	struct map *pos, *next;
609 
610 	maps__for_each_entry_safe(maps, pos, next) {
611 		rb_erase_init(&pos->rb_node,  &maps->entries);
612 		map__put(pos);
613 	}
614 }
615 
616 void maps__exit(struct maps *maps)
617 {
618 	down_write(&maps->lock);
619 	__maps__purge(maps);
620 	up_write(&maps->lock);
621 }
622 
623 bool maps__empty(struct maps *maps)
624 {
625 	return !maps__first(maps);
626 }
627 
628 struct maps *maps__new(struct machine *machine)
629 {
630 	struct maps *maps = zalloc(sizeof(*maps));
631 
632 	if (maps != NULL)
633 		maps__init(maps, machine);
634 
635 	return maps;
636 }
637 
638 void maps__delete(struct maps *maps)
639 {
640 	maps__exit(maps);
641 	unwind__finish_access(maps);
642 	free(maps);
643 }
644 
645 void maps__put(struct maps *maps)
646 {
647 	if (maps && refcount_dec_and_test(&maps->refcnt))
648 		maps__delete(maps);
649 }
650 
651 struct symbol *maps__find_symbol(struct maps *maps, u64 addr, struct map **mapp)
652 {
653 	struct map *map = maps__find(maps, addr);
654 
655 	/* Ensure map is loaded before using map->map_ip */
656 	if (map != NULL && map__load(map) >= 0) {
657 		if (mapp != NULL)
658 			*mapp = map;
659 		return map__find_symbol(map, map->map_ip(map, addr));
660 	}
661 
662 	return NULL;
663 }
664 
665 static bool map__contains_symbol(struct map *map, struct symbol *sym)
666 {
667 	u64 ip = map->unmap_ip(map, sym->start);
668 
669 	return ip >= map->start && ip < map->end;
670 }
671 
672 struct symbol *maps__find_symbol_by_name(struct maps *maps, const char *name, struct map **mapp)
673 {
674 	struct symbol *sym;
675 	struct map *pos;
676 
677 	down_read(&maps->lock);
678 
679 	maps__for_each_entry(maps, pos) {
680 		sym = map__find_symbol_by_name(pos, name);
681 
682 		if (sym == NULL)
683 			continue;
684 		if (!map__contains_symbol(pos, sym)) {
685 			sym = NULL;
686 			continue;
687 		}
688 		if (mapp != NULL)
689 			*mapp = pos;
690 		goto out;
691 	}
692 
693 	sym = NULL;
694 out:
695 	up_read(&maps->lock);
696 	return sym;
697 }
698 
699 int maps__find_ams(struct maps *maps, struct addr_map_symbol *ams)
700 {
701 	if (ams->addr < ams->ms.map->start || ams->addr >= ams->ms.map->end) {
702 		if (maps == NULL)
703 			return -1;
704 		ams->ms.map = maps__find(maps, ams->addr);
705 		if (ams->ms.map == NULL)
706 			return -1;
707 	}
708 
709 	ams->al_addr = ams->ms.map->map_ip(ams->ms.map, ams->addr);
710 	ams->ms.sym = map__find_symbol(ams->ms.map, ams->al_addr);
711 
712 	return ams->ms.sym ? 0 : -1;
713 }
714 
715 size_t maps__fprintf(struct maps *maps, FILE *fp)
716 {
717 	size_t printed = 0;
718 	struct map *pos;
719 
720 	down_read(&maps->lock);
721 
722 	maps__for_each_entry(maps, pos) {
723 		printed += fprintf(fp, "Map:");
724 		printed += map__fprintf(pos, fp);
725 		if (verbose > 2) {
726 			printed += dso__fprintf(pos->dso, fp);
727 			printed += fprintf(fp, "--\n");
728 		}
729 	}
730 
731 	up_read(&maps->lock);
732 
733 	return printed;
734 }
735 
736 int maps__fixup_overlappings(struct maps *maps, struct map *map, FILE *fp)
737 {
738 	struct rb_root *root;
739 	struct rb_node *next, *first;
740 	int err = 0;
741 
742 	down_write(&maps->lock);
743 
744 	root = &maps->entries;
745 
746 	/*
747 	 * Find first map where end > map->start.
748 	 * Same as find_vma() in kernel.
749 	 */
750 	next = root->rb_node;
751 	first = NULL;
752 	while (next) {
753 		struct map *pos = rb_entry(next, struct map, rb_node);
754 
755 		if (pos->end > map->start) {
756 			first = next;
757 			if (pos->start <= map->start)
758 				break;
759 			next = next->rb_left;
760 		} else
761 			next = next->rb_right;
762 	}
763 
764 	next = first;
765 	while (next) {
766 		struct map *pos = rb_entry(next, struct map, rb_node);
767 		next = rb_next(&pos->rb_node);
768 
769 		/*
770 		 * Stop if current map starts after map->end.
771 		 * Maps are ordered by start: next will not overlap for sure.
772 		 */
773 		if (pos->start >= map->end)
774 			break;
775 
776 		if (verbose >= 2) {
777 
778 			if (use_browser) {
779 				pr_debug("overlapping maps in %s (disable tui for more info)\n",
780 					   map->dso->name);
781 			} else {
782 				fputs("overlapping maps:\n", fp);
783 				map__fprintf(map, fp);
784 				map__fprintf(pos, fp);
785 			}
786 		}
787 
788 		rb_erase_init(&pos->rb_node, root);
789 		/*
790 		 * Now check if we need to create new maps for areas not
791 		 * overlapped by the new map:
792 		 */
793 		if (map->start > pos->start) {
794 			struct map *before = map__clone(pos);
795 
796 			if (before == NULL) {
797 				err = -ENOMEM;
798 				goto put_map;
799 			}
800 
801 			before->end = map->start;
802 			__maps__insert(maps, before);
803 			if (verbose >= 2 && !use_browser)
804 				map__fprintf(before, fp);
805 			map__put(before);
806 		}
807 
808 		if (map->end < pos->end) {
809 			struct map *after = map__clone(pos);
810 
811 			if (after == NULL) {
812 				err = -ENOMEM;
813 				goto put_map;
814 			}
815 
816 			after->start = map->end;
817 			after->pgoff += map->end - pos->start;
818 			assert(pos->map_ip(pos, map->end) == after->map_ip(after, map->end));
819 			__maps__insert(maps, after);
820 			if (verbose >= 2 && !use_browser)
821 				map__fprintf(after, fp);
822 			map__put(after);
823 		}
824 put_map:
825 		map__put(pos);
826 
827 		if (err)
828 			goto out;
829 	}
830 
831 	err = 0;
832 out:
833 	up_write(&maps->lock);
834 	return err;
835 }
836 
837 /*
838  * XXX This should not really _copy_ te maps, but refcount them.
839  */
840 int maps__clone(struct thread *thread, struct maps *parent)
841 {
842 	struct maps *maps = thread->maps;
843 	int err;
844 	struct map *map;
845 
846 	down_read(&parent->lock);
847 
848 	maps__for_each_entry(parent, map) {
849 		struct map *new = map__clone(map);
850 
851 		if (new == NULL) {
852 			err = -ENOMEM;
853 			goto out_unlock;
854 		}
855 
856 		err = unwind__prepare_access(maps, new, NULL);
857 		if (err)
858 			goto out_unlock;
859 
860 		maps__insert(maps, new);
861 		map__put(new);
862 	}
863 
864 	err = 0;
865 out_unlock:
866 	up_read(&parent->lock);
867 	return err;
868 }
869 
870 static void __maps__insert(struct maps *maps, struct map *map)
871 {
872 	struct rb_node **p = &maps->entries.rb_node;
873 	struct rb_node *parent = NULL;
874 	const u64 ip = map->start;
875 	struct map *m;
876 
877 	while (*p != NULL) {
878 		parent = *p;
879 		m = rb_entry(parent, struct map, rb_node);
880 		if (ip < m->start)
881 			p = &(*p)->rb_left;
882 		else
883 			p = &(*p)->rb_right;
884 	}
885 
886 	rb_link_node(&map->rb_node, parent, p);
887 	rb_insert_color(&map->rb_node, &maps->entries);
888 	map__get(map);
889 }
890 
891 struct map *maps__find(struct maps *maps, u64 ip)
892 {
893 	struct rb_node *p;
894 	struct map *m;
895 
896 	down_read(&maps->lock);
897 
898 	p = maps->entries.rb_node;
899 	while (p != NULL) {
900 		m = rb_entry(p, struct map, rb_node);
901 		if (ip < m->start)
902 			p = p->rb_left;
903 		else if (ip >= m->end)
904 			p = p->rb_right;
905 		else
906 			goto out;
907 	}
908 
909 	m = NULL;
910 out:
911 	up_read(&maps->lock);
912 	return m;
913 }
914 
915 struct map *maps__first(struct maps *maps)
916 {
917 	struct rb_node *first = rb_first(&maps->entries);
918 
919 	if (first)
920 		return rb_entry(first, struct map, rb_node);
921 	return NULL;
922 }
923 
924 static struct map *__map__next(struct map *map)
925 {
926 	struct rb_node *next = rb_next(&map->rb_node);
927 
928 	if (next)
929 		return rb_entry(next, struct map, rb_node);
930 	return NULL;
931 }
932 
933 struct map *map__next(struct map *map)
934 {
935 	return map ? __map__next(map) : NULL;
936 }
937 
938 struct kmap *__map__kmap(struct map *map)
939 {
940 	if (!map->dso || !map->dso->kernel)
941 		return NULL;
942 	return (struct kmap *)(map + 1);
943 }
944 
945 struct kmap *map__kmap(struct map *map)
946 {
947 	struct kmap *kmap = __map__kmap(map);
948 
949 	if (!kmap)
950 		pr_err("Internal error: map__kmap with a non-kernel map\n");
951 	return kmap;
952 }
953 
954 struct maps *map__kmaps(struct map *map)
955 {
956 	struct kmap *kmap = map__kmap(map);
957 
958 	if (!kmap || !kmap->kmaps) {
959 		pr_err("Internal error: map__kmaps with a non-kernel map\n");
960 		return NULL;
961 	}
962 	return kmap->kmaps;
963 }
964