1 // SPDX-License-Identifier: GPL-2.0 2 #include "symbol.h" 3 #include <errno.h> 4 #include <inttypes.h> 5 #include <limits.h> 6 #include <stdlib.h> 7 #include <string.h> 8 #include <stdio.h> 9 #include <unistd.h> 10 #include <uapi/linux/mman.h> /* To get things like MAP_HUGETLB even on older libc headers */ 11 #include "map.h" 12 #include "thread.h" 13 #include "vdso.h" 14 #include "build-id.h" 15 #include "debug.h" 16 #include "machine.h" 17 #include <linux/string.h> 18 #include <linux/zalloc.h> 19 #include "srcline.h" 20 #include "namespaces.h" 21 #include "unwind.h" 22 #include "srccode.h" 23 24 static void __maps__insert(struct maps *maps, struct map *map); 25 static void __maps__insert_name(struct maps *maps, struct map *map); 26 27 static inline int is_anon_memory(const char *filename, u32 flags) 28 { 29 return flags & MAP_HUGETLB || 30 !strcmp(filename, "//anon") || 31 !strncmp(filename, "/dev/zero", sizeof("/dev/zero") - 1) || 32 !strncmp(filename, "/anon_hugepage", sizeof("/anon_hugepage") - 1); 33 } 34 35 static inline int is_no_dso_memory(const char *filename) 36 { 37 return !strncmp(filename, "[stack", 6) || 38 !strncmp(filename, "/SYSV",5) || 39 !strcmp(filename, "[heap]"); 40 } 41 42 static inline int is_android_lib(const char *filename) 43 { 44 return !strncmp(filename, "/data/app-lib", 13) || 45 !strncmp(filename, "/system/lib", 11); 46 } 47 48 static inline bool replace_android_lib(const char *filename, char *newfilename) 49 { 50 const char *libname; 51 char *app_abi; 52 size_t app_abi_length, new_length; 53 size_t lib_length = 0; 54 55 libname = strrchr(filename, '/'); 56 if (libname) 57 lib_length = strlen(libname); 58 59 app_abi = getenv("APP_ABI"); 60 if (!app_abi) 61 return false; 62 63 app_abi_length = strlen(app_abi); 64 65 if (!strncmp(filename, "/data/app-lib", 13)) { 66 char *apk_path; 67 68 if (!app_abi_length) 69 return false; 70 71 new_length = 7 + app_abi_length + lib_length; 72 73 apk_path = getenv("APK_PATH"); 74 if (apk_path) { 75 new_length += strlen(apk_path) + 1; 76 if (new_length > PATH_MAX) 77 return false; 78 snprintf(newfilename, new_length, 79 "%s/libs/%s/%s", apk_path, app_abi, libname); 80 } else { 81 if (new_length > PATH_MAX) 82 return false; 83 snprintf(newfilename, new_length, 84 "libs/%s/%s", app_abi, libname); 85 } 86 return true; 87 } 88 89 if (!strncmp(filename, "/system/lib/", 11)) { 90 char *ndk, *app; 91 const char *arch; 92 size_t ndk_length; 93 size_t app_length; 94 95 ndk = getenv("NDK_ROOT"); 96 app = getenv("APP_PLATFORM"); 97 98 if (!(ndk && app)) 99 return false; 100 101 ndk_length = strlen(ndk); 102 app_length = strlen(app); 103 104 if (!(ndk_length && app_length && app_abi_length)) 105 return false; 106 107 arch = !strncmp(app_abi, "arm", 3) ? "arm" : 108 !strncmp(app_abi, "mips", 4) ? "mips" : 109 !strncmp(app_abi, "x86", 3) ? "x86" : NULL; 110 111 if (!arch) 112 return false; 113 114 new_length = 27 + ndk_length + 115 app_length + lib_length 116 + strlen(arch); 117 118 if (new_length > PATH_MAX) 119 return false; 120 snprintf(newfilename, new_length, 121 "%s/platforms/%s/arch-%s/usr/lib/%s", 122 ndk, app, arch, libname); 123 124 return true; 125 } 126 return false; 127 } 128 129 void map__init(struct map *map, u64 start, u64 end, u64 pgoff, struct dso *dso) 130 { 131 map->start = start; 132 map->end = end; 133 map->pgoff = pgoff; 134 map->reloc = 0; 135 map->dso = dso__get(dso); 136 map->map_ip = map__map_ip; 137 map->unmap_ip = map__unmap_ip; 138 RB_CLEAR_NODE(&map->rb_node); 139 map->groups = NULL; 140 map->erange_warned = false; 141 refcount_set(&map->refcnt, 1); 142 } 143 144 struct map *map__new(struct machine *machine, u64 start, u64 len, 145 u64 pgoff, u32 d_maj, u32 d_min, u64 ino, 146 u64 ino_gen, u32 prot, u32 flags, char *filename, 147 struct thread *thread) 148 { 149 struct map *map = malloc(sizeof(*map)); 150 struct nsinfo *nsi = NULL; 151 struct nsinfo *nnsi; 152 153 if (map != NULL) { 154 char newfilename[PATH_MAX]; 155 struct dso *dso; 156 int anon, no_dso, vdso, android; 157 158 android = is_android_lib(filename); 159 anon = is_anon_memory(filename, flags); 160 vdso = is_vdso_map(filename); 161 no_dso = is_no_dso_memory(filename); 162 163 map->maj = d_maj; 164 map->min = d_min; 165 map->ino = ino; 166 map->ino_generation = ino_gen; 167 map->prot = prot; 168 map->flags = flags; 169 nsi = nsinfo__get(thread->nsinfo); 170 171 if ((anon || no_dso) && nsi && (prot & PROT_EXEC)) { 172 snprintf(newfilename, sizeof(newfilename), 173 "/tmp/perf-%d.map", nsi->pid); 174 filename = newfilename; 175 } 176 177 if (android) { 178 if (replace_android_lib(filename, newfilename)) 179 filename = newfilename; 180 } 181 182 if (vdso) { 183 /* The vdso maps are always on the host and not the 184 * container. Ensure that we don't use setns to look 185 * them up. 186 */ 187 nnsi = nsinfo__copy(nsi); 188 if (nnsi) { 189 nsinfo__put(nsi); 190 nnsi->need_setns = false; 191 nsi = nnsi; 192 } 193 pgoff = 0; 194 dso = machine__findnew_vdso(machine, thread); 195 } else 196 dso = machine__findnew_dso(machine, filename); 197 198 if (dso == NULL) 199 goto out_delete; 200 201 map__init(map, start, start + len, pgoff, dso); 202 203 if (anon || no_dso) { 204 map->map_ip = map->unmap_ip = identity__map_ip; 205 206 /* 207 * Set memory without DSO as loaded. All map__find_* 208 * functions still return NULL, and we avoid the 209 * unnecessary map__load warning. 210 */ 211 if (!(prot & PROT_EXEC)) 212 dso__set_loaded(dso); 213 } 214 dso->nsinfo = nsi; 215 dso__put(dso); 216 } 217 return map; 218 out_delete: 219 nsinfo__put(nsi); 220 free(map); 221 return NULL; 222 } 223 224 /* 225 * Constructor variant for modules (where we know from /proc/modules where 226 * they are loaded) and for vmlinux, where only after we load all the 227 * symbols we'll know where it starts and ends. 228 */ 229 struct map *map__new2(u64 start, struct dso *dso) 230 { 231 struct map *map = calloc(1, (sizeof(*map) + 232 (dso->kernel ? sizeof(struct kmap) : 0))); 233 if (map != NULL) { 234 /* 235 * ->end will be filled after we load all the symbols 236 */ 237 map__init(map, start, 0, 0, dso); 238 } 239 240 return map; 241 } 242 243 /* 244 * Use this and __map__is_kmodule() for map instances that are in 245 * machine->kmaps, and thus have map->groups->machine all properly set, to 246 * disambiguate between the kernel and modules. 247 * 248 * When the need arises, introduce map__is_{kernel,kmodule)() that 249 * checks (map->groups != NULL && map->groups->machine != NULL && 250 * map->dso->kernel) before calling __map__is_{kernel,kmodule}()) 251 */ 252 bool __map__is_kernel(const struct map *map) 253 { 254 return machine__kernel_map(map->groups->machine) == map; 255 } 256 257 bool __map__is_extra_kernel_map(const struct map *map) 258 { 259 struct kmap *kmap = __map__kmap((struct map *)map); 260 261 return kmap && kmap->name[0]; 262 } 263 264 bool __map__is_bpf_prog(const struct map *map) 265 { 266 const char *name; 267 268 if (map->dso->binary_type == DSO_BINARY_TYPE__BPF_PROG_INFO) 269 return true; 270 271 /* 272 * If PERF_RECORD_BPF_EVENT is not included, the dso will not have 273 * type of DSO_BINARY_TYPE__BPF_PROG_INFO. In such cases, we can 274 * guess the type based on name. 275 */ 276 name = map->dso->short_name; 277 return name && (strstr(name, "bpf_prog_") == name); 278 } 279 280 bool map__has_symbols(const struct map *map) 281 { 282 return dso__has_symbols(map->dso); 283 } 284 285 static void map__exit(struct map *map) 286 { 287 BUG_ON(!RB_EMPTY_NODE(&map->rb_node)); 288 dso__zput(map->dso); 289 } 290 291 void map__delete(struct map *map) 292 { 293 map__exit(map); 294 free(map); 295 } 296 297 void map__put(struct map *map) 298 { 299 if (map && refcount_dec_and_test(&map->refcnt)) 300 map__delete(map); 301 } 302 303 void map__fixup_start(struct map *map) 304 { 305 struct rb_root_cached *symbols = &map->dso->symbols; 306 struct rb_node *nd = rb_first_cached(symbols); 307 if (nd != NULL) { 308 struct symbol *sym = rb_entry(nd, struct symbol, rb_node); 309 map->start = sym->start; 310 } 311 } 312 313 void map__fixup_end(struct map *map) 314 { 315 struct rb_root_cached *symbols = &map->dso->symbols; 316 struct rb_node *nd = rb_last(&symbols->rb_root); 317 if (nd != NULL) { 318 struct symbol *sym = rb_entry(nd, struct symbol, rb_node); 319 map->end = sym->end; 320 } 321 } 322 323 #define DSO__DELETED "(deleted)" 324 325 int map__load(struct map *map) 326 { 327 const char *name = map->dso->long_name; 328 int nr; 329 330 if (dso__loaded(map->dso)) 331 return 0; 332 333 nr = dso__load(map->dso, map); 334 if (nr < 0) { 335 if (map->dso->has_build_id) { 336 char sbuild_id[SBUILD_ID_SIZE]; 337 338 build_id__sprintf(map->dso->build_id, 339 sizeof(map->dso->build_id), 340 sbuild_id); 341 pr_debug("%s with build id %s not found", name, sbuild_id); 342 } else 343 pr_debug("Failed to open %s", name); 344 345 pr_debug(", continuing without symbols\n"); 346 return -1; 347 } else if (nr == 0) { 348 #ifdef HAVE_LIBELF_SUPPORT 349 const size_t len = strlen(name); 350 const size_t real_len = len - sizeof(DSO__DELETED); 351 352 if (len > sizeof(DSO__DELETED) && 353 strcmp(name + real_len + 1, DSO__DELETED) == 0) { 354 pr_debug("%.*s was updated (is prelink enabled?). " 355 "Restart the long running apps that use it!\n", 356 (int)real_len, name); 357 } else { 358 pr_debug("no symbols found in %s, maybe install a debug package?\n", name); 359 } 360 #endif 361 return -1; 362 } 363 364 return 0; 365 } 366 367 struct symbol *map__find_symbol(struct map *map, u64 addr) 368 { 369 if (map__load(map) < 0) 370 return NULL; 371 372 return dso__find_symbol(map->dso, addr); 373 } 374 375 struct symbol *map__find_symbol_by_name(struct map *map, const char *name) 376 { 377 if (map__load(map) < 0) 378 return NULL; 379 380 if (!dso__sorted_by_name(map->dso)) 381 dso__sort_by_name(map->dso); 382 383 return dso__find_symbol_by_name(map->dso, name); 384 } 385 386 struct map *map__clone(struct map *from) 387 { 388 struct map *map = memdup(from, sizeof(*map)); 389 390 if (map != NULL) { 391 refcount_set(&map->refcnt, 1); 392 RB_CLEAR_NODE(&map->rb_node); 393 dso__get(map->dso); 394 map->groups = NULL; 395 } 396 397 return map; 398 } 399 400 size_t map__fprintf(struct map *map, FILE *fp) 401 { 402 return fprintf(fp, " %" PRIx64 "-%" PRIx64 " %" PRIx64 " %s\n", 403 map->start, map->end, map->pgoff, map->dso->name); 404 } 405 406 size_t map__fprintf_dsoname(struct map *map, FILE *fp) 407 { 408 char buf[symbol_conf.pad_output_len_dso + 1]; 409 const char *dsoname = "[unknown]"; 410 411 if (map && map->dso) { 412 if (symbol_conf.show_kernel_path && map->dso->long_name) 413 dsoname = map->dso->long_name; 414 else 415 dsoname = map->dso->name; 416 } 417 418 if (symbol_conf.pad_output_len_dso) { 419 scnprintf_pad(buf, symbol_conf.pad_output_len_dso, "%s", dsoname); 420 dsoname = buf; 421 } 422 423 return fprintf(fp, "%s", dsoname); 424 } 425 426 char *map__srcline(struct map *map, u64 addr, struct symbol *sym) 427 { 428 if (map == NULL) 429 return SRCLINE_UNKNOWN; 430 return get_srcline(map->dso, map__rip_2objdump(map, addr), sym, true, true, addr); 431 } 432 433 int map__fprintf_srcline(struct map *map, u64 addr, const char *prefix, 434 FILE *fp) 435 { 436 int ret = 0; 437 438 if (map && map->dso) { 439 char *srcline = map__srcline(map, addr, NULL); 440 if (srcline != SRCLINE_UNKNOWN) 441 ret = fprintf(fp, "%s%s", prefix, srcline); 442 free_srcline(srcline); 443 } 444 return ret; 445 } 446 447 int map__fprintf_srccode(struct map *map, u64 addr, 448 FILE *fp, 449 struct srccode_state *state) 450 { 451 char *srcfile; 452 int ret = 0; 453 unsigned line; 454 int len; 455 char *srccode; 456 457 if (!map || !map->dso) 458 return 0; 459 srcfile = get_srcline_split(map->dso, 460 map__rip_2objdump(map, addr), 461 &line); 462 if (!srcfile) 463 return 0; 464 465 /* Avoid redundant printing */ 466 if (state && 467 state->srcfile && 468 !strcmp(state->srcfile, srcfile) && 469 state->line == line) { 470 free(srcfile); 471 return 0; 472 } 473 474 srccode = find_sourceline(srcfile, line, &len); 475 if (!srccode) 476 goto out_free_line; 477 478 ret = fprintf(fp, "|%-8d %.*s", line, len, srccode); 479 480 if (state) { 481 state->srcfile = srcfile; 482 state->line = line; 483 } 484 return ret; 485 486 out_free_line: 487 free(srcfile); 488 return ret; 489 } 490 491 492 void srccode_state_free(struct srccode_state *state) 493 { 494 zfree(&state->srcfile); 495 state->line = 0; 496 } 497 498 /** 499 * map__rip_2objdump - convert symbol start address to objdump address. 500 * @map: memory map 501 * @rip: symbol start address 502 * 503 * objdump wants/reports absolute IPs for ET_EXEC, and RIPs for ET_DYN. 504 * map->dso->adjust_symbols==1 for ET_EXEC-like cases except ET_REL which is 505 * relative to section start. 506 * 507 * Return: Address suitable for passing to "objdump --start-address=" 508 */ 509 u64 map__rip_2objdump(struct map *map, u64 rip) 510 { 511 struct kmap *kmap = __map__kmap(map); 512 513 /* 514 * vmlinux does not have program headers for PTI entry trampolines and 515 * kcore may not either. However the trampoline object code is on the 516 * main kernel map, so just use that instead. 517 */ 518 if (kmap && is_entry_trampoline(kmap->name) && kmap->kmaps && kmap->kmaps->machine) { 519 struct map *kernel_map = machine__kernel_map(kmap->kmaps->machine); 520 521 if (kernel_map) 522 map = kernel_map; 523 } 524 525 if (!map->dso->adjust_symbols) 526 return rip; 527 528 if (map->dso->rel) 529 return rip - map->pgoff; 530 531 /* 532 * kernel modules also have DSO_TYPE_USER in dso->kernel, 533 * but all kernel modules are ET_REL, so won't get here. 534 */ 535 if (map->dso->kernel == DSO_TYPE_USER) 536 return rip + map->dso->text_offset; 537 538 return map->unmap_ip(map, rip) - map->reloc; 539 } 540 541 /** 542 * map__objdump_2mem - convert objdump address to a memory address. 543 * @map: memory map 544 * @ip: objdump address 545 * 546 * Closely related to map__rip_2objdump(), this function takes an address from 547 * objdump and converts it to a memory address. Note this assumes that @map 548 * contains the address. To be sure the result is valid, check it forwards 549 * e.g. map__rip_2objdump(map->map_ip(map, map__objdump_2mem(map, ip))) == ip 550 * 551 * Return: Memory address. 552 */ 553 u64 map__objdump_2mem(struct map *map, u64 ip) 554 { 555 if (!map->dso->adjust_symbols) 556 return map->unmap_ip(map, ip); 557 558 if (map->dso->rel) 559 return map->unmap_ip(map, ip + map->pgoff); 560 561 /* 562 * kernel modules also have DSO_TYPE_USER in dso->kernel, 563 * but all kernel modules are ET_REL, so won't get here. 564 */ 565 if (map->dso->kernel == DSO_TYPE_USER) 566 return map->unmap_ip(map, ip - map->dso->text_offset); 567 568 return ip + map->reloc; 569 } 570 571 static void maps__init(struct maps *maps) 572 { 573 maps->entries = RB_ROOT; 574 maps->names = RB_ROOT; 575 init_rwsem(&maps->lock); 576 } 577 578 void map_groups__init(struct map_groups *mg, struct machine *machine) 579 { 580 maps__init(&mg->maps); 581 mg->machine = machine; 582 refcount_set(&mg->refcnt, 1); 583 } 584 585 void map_groups__insert(struct map_groups *mg, struct map *map) 586 { 587 maps__insert(&mg->maps, map); 588 map->groups = mg; 589 } 590 591 static void __maps__purge(struct maps *maps) 592 { 593 struct rb_root *root = &maps->entries; 594 struct rb_node *next = rb_first(root); 595 596 while (next) { 597 struct map *pos = rb_entry(next, struct map, rb_node); 598 599 next = rb_next(&pos->rb_node); 600 rb_erase_init(&pos->rb_node, root); 601 map__put(pos); 602 } 603 } 604 605 static void __maps__purge_names(struct maps *maps) 606 { 607 struct rb_root *root = &maps->names; 608 struct rb_node *next = rb_first(root); 609 610 while (next) { 611 struct map *pos = rb_entry(next, struct map, rb_node_name); 612 613 next = rb_next(&pos->rb_node_name); 614 rb_erase_init(&pos->rb_node_name, root); 615 map__put(pos); 616 } 617 } 618 619 static void maps__exit(struct maps *maps) 620 { 621 down_write(&maps->lock); 622 __maps__purge(maps); 623 __maps__purge_names(maps); 624 up_write(&maps->lock); 625 } 626 627 void map_groups__exit(struct map_groups *mg) 628 { 629 maps__exit(&mg->maps); 630 } 631 632 bool map_groups__empty(struct map_groups *mg) 633 { 634 return !maps__first(&mg->maps); 635 } 636 637 struct map_groups *map_groups__new(struct machine *machine) 638 { 639 struct map_groups *mg = malloc(sizeof(*mg)); 640 641 if (mg != NULL) 642 map_groups__init(mg, machine); 643 644 return mg; 645 } 646 647 void map_groups__delete(struct map_groups *mg) 648 { 649 map_groups__exit(mg); 650 free(mg); 651 } 652 653 void map_groups__put(struct map_groups *mg) 654 { 655 if (mg && refcount_dec_and_test(&mg->refcnt)) 656 map_groups__delete(mg); 657 } 658 659 struct symbol *map_groups__find_symbol(struct map_groups *mg, 660 u64 addr, struct map **mapp) 661 { 662 struct map *map = map_groups__find(mg, addr); 663 664 /* Ensure map is loaded before using map->map_ip */ 665 if (map != NULL && map__load(map) >= 0) { 666 if (mapp != NULL) 667 *mapp = map; 668 return map__find_symbol(map, map->map_ip(map, addr)); 669 } 670 671 return NULL; 672 } 673 674 static bool map__contains_symbol(struct map *map, struct symbol *sym) 675 { 676 u64 ip = map->unmap_ip(map, sym->start); 677 678 return ip >= map->start && ip < map->end; 679 } 680 681 struct symbol *maps__find_symbol_by_name(struct maps *maps, const char *name, 682 struct map **mapp) 683 { 684 struct symbol *sym; 685 struct rb_node *nd; 686 687 down_read(&maps->lock); 688 689 for (nd = rb_first(&maps->entries); nd; nd = rb_next(nd)) { 690 struct map *pos = rb_entry(nd, struct map, rb_node); 691 692 sym = map__find_symbol_by_name(pos, name); 693 694 if (sym == NULL) 695 continue; 696 if (!map__contains_symbol(pos, sym)) { 697 sym = NULL; 698 continue; 699 } 700 if (mapp != NULL) 701 *mapp = pos; 702 goto out; 703 } 704 705 sym = NULL; 706 out: 707 up_read(&maps->lock); 708 return sym; 709 } 710 711 struct symbol *map_groups__find_symbol_by_name(struct map_groups *mg, 712 const char *name, 713 struct map **mapp) 714 { 715 return maps__find_symbol_by_name(&mg->maps, name, mapp); 716 } 717 718 int map_groups__find_ams(struct addr_map_symbol *ams) 719 { 720 if (ams->addr < ams->map->start || ams->addr >= ams->map->end) { 721 if (ams->map->groups == NULL) 722 return -1; 723 ams->map = map_groups__find(ams->map->groups, ams->addr); 724 if (ams->map == NULL) 725 return -1; 726 } 727 728 ams->al_addr = ams->map->map_ip(ams->map, ams->addr); 729 ams->sym = map__find_symbol(ams->map, ams->al_addr); 730 731 return ams->sym ? 0 : -1; 732 } 733 734 static size_t maps__fprintf(struct maps *maps, FILE *fp) 735 { 736 size_t printed = 0; 737 struct rb_node *nd; 738 739 down_read(&maps->lock); 740 741 for (nd = rb_first(&maps->entries); nd; nd = rb_next(nd)) { 742 struct map *pos = rb_entry(nd, struct map, rb_node); 743 printed += fprintf(fp, "Map:"); 744 printed += map__fprintf(pos, fp); 745 if (verbose > 2) { 746 printed += dso__fprintf(pos->dso, fp); 747 printed += fprintf(fp, "--\n"); 748 } 749 } 750 751 up_read(&maps->lock); 752 753 return printed; 754 } 755 756 size_t map_groups__fprintf(struct map_groups *mg, FILE *fp) 757 { 758 return maps__fprintf(&mg->maps, fp); 759 } 760 761 static void __map_groups__insert(struct map_groups *mg, struct map *map) 762 { 763 __maps__insert(&mg->maps, map); 764 __maps__insert_name(&mg->maps, map); 765 map->groups = mg; 766 } 767 768 static int maps__fixup_overlappings(struct maps *maps, struct map *map, FILE *fp) 769 { 770 struct rb_root *root; 771 struct rb_node *next, *first; 772 int err = 0; 773 774 down_write(&maps->lock); 775 776 root = &maps->entries; 777 778 /* 779 * Find first map where end > map->start. 780 * Same as find_vma() in kernel. 781 */ 782 next = root->rb_node; 783 first = NULL; 784 while (next) { 785 struct map *pos = rb_entry(next, struct map, rb_node); 786 787 if (pos->end > map->start) { 788 first = next; 789 if (pos->start <= map->start) 790 break; 791 next = next->rb_left; 792 } else 793 next = next->rb_right; 794 } 795 796 next = first; 797 while (next) { 798 struct map *pos = rb_entry(next, struct map, rb_node); 799 next = rb_next(&pos->rb_node); 800 801 /* 802 * Stop if current map starts after map->end. 803 * Maps are ordered by start: next will not overlap for sure. 804 */ 805 if (pos->start >= map->end) 806 break; 807 808 if (verbose >= 2) { 809 810 if (use_browser) { 811 pr_debug("overlapping maps in %s (disable tui for more info)\n", 812 map->dso->name); 813 } else { 814 fputs("overlapping maps:\n", fp); 815 map__fprintf(map, fp); 816 map__fprintf(pos, fp); 817 } 818 } 819 820 rb_erase_init(&pos->rb_node, root); 821 /* 822 * Now check if we need to create new maps for areas not 823 * overlapped by the new map: 824 */ 825 if (map->start > pos->start) { 826 struct map *before = map__clone(pos); 827 828 if (before == NULL) { 829 err = -ENOMEM; 830 goto put_map; 831 } 832 833 before->end = map->start; 834 __map_groups__insert(pos->groups, before); 835 if (verbose >= 2 && !use_browser) 836 map__fprintf(before, fp); 837 map__put(before); 838 } 839 840 if (map->end < pos->end) { 841 struct map *after = map__clone(pos); 842 843 if (after == NULL) { 844 err = -ENOMEM; 845 goto put_map; 846 } 847 848 after->start = map->end; 849 __map_groups__insert(pos->groups, after); 850 if (verbose >= 2 && !use_browser) 851 map__fprintf(after, fp); 852 map__put(after); 853 } 854 put_map: 855 map__put(pos); 856 857 if (err) 858 goto out; 859 } 860 861 err = 0; 862 out: 863 up_write(&maps->lock); 864 return err; 865 } 866 867 int map_groups__fixup_overlappings(struct map_groups *mg, struct map *map, 868 FILE *fp) 869 { 870 return maps__fixup_overlappings(&mg->maps, map, fp); 871 } 872 873 /* 874 * XXX This should not really _copy_ te maps, but refcount them. 875 */ 876 int map_groups__clone(struct thread *thread, struct map_groups *parent) 877 { 878 struct map_groups *mg = thread->mg; 879 int err = -ENOMEM; 880 struct map *map; 881 struct maps *maps = &parent->maps; 882 883 down_read(&maps->lock); 884 885 for (map = maps__first(maps); map; map = map__next(map)) { 886 struct map *new = map__clone(map); 887 if (new == NULL) 888 goto out_unlock; 889 890 err = unwind__prepare_access(thread, new, NULL); 891 if (err) 892 goto out_unlock; 893 894 map_groups__insert(mg, new); 895 map__put(new); 896 } 897 898 err = 0; 899 out_unlock: 900 up_read(&maps->lock); 901 return err; 902 } 903 904 static void __maps__insert(struct maps *maps, struct map *map) 905 { 906 struct rb_node **p = &maps->entries.rb_node; 907 struct rb_node *parent = NULL; 908 const u64 ip = map->start; 909 struct map *m; 910 911 while (*p != NULL) { 912 parent = *p; 913 m = rb_entry(parent, struct map, rb_node); 914 if (ip < m->start) 915 p = &(*p)->rb_left; 916 else 917 p = &(*p)->rb_right; 918 } 919 920 rb_link_node(&map->rb_node, parent, p); 921 rb_insert_color(&map->rb_node, &maps->entries); 922 map__get(map); 923 } 924 925 static void __maps__insert_name(struct maps *maps, struct map *map) 926 { 927 struct rb_node **p = &maps->names.rb_node; 928 struct rb_node *parent = NULL; 929 struct map *m; 930 int rc; 931 932 while (*p != NULL) { 933 parent = *p; 934 m = rb_entry(parent, struct map, rb_node_name); 935 rc = strcmp(m->dso->short_name, map->dso->short_name); 936 if (rc < 0) 937 p = &(*p)->rb_left; 938 else 939 p = &(*p)->rb_right; 940 } 941 rb_link_node(&map->rb_node_name, parent, p); 942 rb_insert_color(&map->rb_node_name, &maps->names); 943 map__get(map); 944 } 945 946 void maps__insert(struct maps *maps, struct map *map) 947 { 948 down_write(&maps->lock); 949 __maps__insert(maps, map); 950 __maps__insert_name(maps, map); 951 up_write(&maps->lock); 952 } 953 954 static void __maps__remove(struct maps *maps, struct map *map) 955 { 956 rb_erase_init(&map->rb_node, &maps->entries); 957 map__put(map); 958 959 rb_erase_init(&map->rb_node_name, &maps->names); 960 map__put(map); 961 } 962 963 void maps__remove(struct maps *maps, struct map *map) 964 { 965 down_write(&maps->lock); 966 __maps__remove(maps, map); 967 up_write(&maps->lock); 968 } 969 970 struct map *maps__find(struct maps *maps, u64 ip) 971 { 972 struct rb_node *p; 973 struct map *m; 974 975 down_read(&maps->lock); 976 977 p = maps->entries.rb_node; 978 while (p != NULL) { 979 m = rb_entry(p, struct map, rb_node); 980 if (ip < m->start) 981 p = p->rb_left; 982 else if (ip >= m->end) 983 p = p->rb_right; 984 else 985 goto out; 986 } 987 988 m = NULL; 989 out: 990 up_read(&maps->lock); 991 return m; 992 } 993 994 struct map *maps__first(struct maps *maps) 995 { 996 struct rb_node *first = rb_first(&maps->entries); 997 998 if (first) 999 return rb_entry(first, struct map, rb_node); 1000 return NULL; 1001 } 1002 1003 struct map *map__next(struct map *map) 1004 { 1005 struct rb_node *next = rb_next(&map->rb_node); 1006 1007 if (next) 1008 return rb_entry(next, struct map, rb_node); 1009 return NULL; 1010 } 1011 1012 struct kmap *__map__kmap(struct map *map) 1013 { 1014 if (!map->dso || !map->dso->kernel) 1015 return NULL; 1016 return (struct kmap *)(map + 1); 1017 } 1018 1019 struct kmap *map__kmap(struct map *map) 1020 { 1021 struct kmap *kmap = __map__kmap(map); 1022 1023 if (!kmap) 1024 pr_err("Internal error: map__kmap with a non-kernel map\n"); 1025 return kmap; 1026 } 1027 1028 struct map_groups *map__kmaps(struct map *map) 1029 { 1030 struct kmap *kmap = map__kmap(map); 1031 1032 if (!kmap || !kmap->kmaps) { 1033 pr_err("Internal error: map__kmaps with a non-kernel map\n"); 1034 return NULL; 1035 } 1036 return kmap->kmaps; 1037 } 1038