1 // SPDX-License-Identifier: GPL-2.0 2 #include "symbol.h" 3 #include <errno.h> 4 #include <inttypes.h> 5 #include <limits.h> 6 #include <stdlib.h> 7 #include <string.h> 8 #include <stdio.h> 9 #include <unistd.h> 10 #include <uapi/linux/mman.h> /* To get things like MAP_HUGETLB even on older libc headers */ 11 #include "map.h" 12 #include "thread.h" 13 #include "vdso.h" 14 #include "build-id.h" 15 #include "util.h" 16 #include "debug.h" 17 #include "machine.h" 18 #include <linux/string.h> 19 #include "srcline.h" 20 #include "namespaces.h" 21 #include "unwind.h" 22 23 static void __maps__insert(struct maps *maps, struct map *map); 24 25 static inline int is_anon_memory(const char *filename, u32 flags) 26 { 27 return flags & MAP_HUGETLB || 28 !strcmp(filename, "//anon") || 29 !strncmp(filename, "/dev/zero", sizeof("/dev/zero") - 1) || 30 !strncmp(filename, "/anon_hugepage", sizeof("/anon_hugepage") - 1); 31 } 32 33 static inline int is_no_dso_memory(const char *filename) 34 { 35 return !strncmp(filename, "[stack", 6) || 36 !strncmp(filename, "/SYSV",5) || 37 !strcmp(filename, "[heap]"); 38 } 39 40 static inline int is_android_lib(const char *filename) 41 { 42 return !strncmp(filename, "/data/app-lib", 13) || 43 !strncmp(filename, "/system/lib", 11); 44 } 45 46 static inline bool replace_android_lib(const char *filename, char *newfilename) 47 { 48 const char *libname; 49 char *app_abi; 50 size_t app_abi_length, new_length; 51 size_t lib_length = 0; 52 53 libname = strrchr(filename, '/'); 54 if (libname) 55 lib_length = strlen(libname); 56 57 app_abi = getenv("APP_ABI"); 58 if (!app_abi) 59 return false; 60 61 app_abi_length = strlen(app_abi); 62 63 if (!strncmp(filename, "/data/app-lib", 13)) { 64 char *apk_path; 65 66 if (!app_abi_length) 67 return false; 68 69 new_length = 7 + app_abi_length + lib_length; 70 71 apk_path = getenv("APK_PATH"); 72 if (apk_path) { 73 new_length += strlen(apk_path) + 1; 74 if (new_length > PATH_MAX) 75 return false; 76 snprintf(newfilename, new_length, 77 "%s/libs/%s/%s", apk_path, app_abi, libname); 78 } else { 79 if (new_length > PATH_MAX) 80 return false; 81 snprintf(newfilename, new_length, 82 "libs/%s/%s", app_abi, libname); 83 } 84 return true; 85 } 86 87 if (!strncmp(filename, "/system/lib/", 11)) { 88 char *ndk, *app; 89 const char *arch; 90 size_t ndk_length; 91 size_t app_length; 92 93 ndk = getenv("NDK_ROOT"); 94 app = getenv("APP_PLATFORM"); 95 96 if (!(ndk && app)) 97 return false; 98 99 ndk_length = strlen(ndk); 100 app_length = strlen(app); 101 102 if (!(ndk_length && app_length && app_abi_length)) 103 return false; 104 105 arch = !strncmp(app_abi, "arm", 3) ? "arm" : 106 !strncmp(app_abi, "mips", 4) ? "mips" : 107 !strncmp(app_abi, "x86", 3) ? "x86" : NULL; 108 109 if (!arch) 110 return false; 111 112 new_length = 27 + ndk_length + 113 app_length + lib_length 114 + strlen(arch); 115 116 if (new_length > PATH_MAX) 117 return false; 118 snprintf(newfilename, new_length, 119 "%s/platforms/%s/arch-%s/usr/lib/%s", 120 ndk, app, arch, libname); 121 122 return true; 123 } 124 return false; 125 } 126 127 void map__init(struct map *map, u64 start, u64 end, u64 pgoff, struct dso *dso) 128 { 129 map->start = start; 130 map->end = end; 131 map->pgoff = pgoff; 132 map->reloc = 0; 133 map->dso = dso__get(dso); 134 map->map_ip = map__map_ip; 135 map->unmap_ip = map__unmap_ip; 136 RB_CLEAR_NODE(&map->rb_node); 137 map->groups = NULL; 138 map->erange_warned = false; 139 refcount_set(&map->refcnt, 1); 140 } 141 142 struct map *map__new(struct machine *machine, u64 start, u64 len, 143 u64 pgoff, u32 d_maj, u32 d_min, u64 ino, 144 u64 ino_gen, u32 prot, u32 flags, char *filename, 145 struct thread *thread) 146 { 147 struct map *map = malloc(sizeof(*map)); 148 struct nsinfo *nsi = NULL; 149 struct nsinfo *nnsi; 150 151 if (map != NULL) { 152 char newfilename[PATH_MAX]; 153 struct dso *dso; 154 int anon, no_dso, vdso, android; 155 156 android = is_android_lib(filename); 157 anon = is_anon_memory(filename, flags); 158 vdso = is_vdso_map(filename); 159 no_dso = is_no_dso_memory(filename); 160 161 map->maj = d_maj; 162 map->min = d_min; 163 map->ino = ino; 164 map->ino_generation = ino_gen; 165 map->prot = prot; 166 map->flags = flags; 167 nsi = nsinfo__get(thread->nsinfo); 168 169 if ((anon || no_dso) && nsi && (prot & PROT_EXEC)) { 170 snprintf(newfilename, sizeof(newfilename), 171 "/tmp/perf-%d.map", nsi->pid); 172 filename = newfilename; 173 } 174 175 if (android) { 176 if (replace_android_lib(filename, newfilename)) 177 filename = newfilename; 178 } 179 180 if (vdso) { 181 /* The vdso maps are always on the host and not the 182 * container. Ensure that we don't use setns to look 183 * them up. 184 */ 185 nnsi = nsinfo__copy(nsi); 186 if (nnsi) { 187 nsinfo__put(nsi); 188 nnsi->need_setns = false; 189 nsi = nnsi; 190 } 191 pgoff = 0; 192 dso = machine__findnew_vdso(machine, thread); 193 } else 194 dso = machine__findnew_dso(machine, filename); 195 196 if (dso == NULL) 197 goto out_delete; 198 199 map__init(map, start, start + len, pgoff, dso); 200 201 if (anon || no_dso) { 202 map->map_ip = map->unmap_ip = identity__map_ip; 203 204 /* 205 * Set memory without DSO as loaded. All map__find_* 206 * functions still return NULL, and we avoid the 207 * unnecessary map__load warning. 208 */ 209 if (!(prot & PROT_EXEC)) 210 dso__set_loaded(dso); 211 } 212 dso->nsinfo = nsi; 213 dso__put(dso); 214 } 215 return map; 216 out_delete: 217 nsinfo__put(nsi); 218 free(map); 219 return NULL; 220 } 221 222 /* 223 * Constructor variant for modules (where we know from /proc/modules where 224 * they are loaded) and for vmlinux, where only after we load all the 225 * symbols we'll know where it starts and ends. 226 */ 227 struct map *map__new2(u64 start, struct dso *dso) 228 { 229 struct map *map = calloc(1, (sizeof(*map) + 230 (dso->kernel ? sizeof(struct kmap) : 0))); 231 if (map != NULL) { 232 /* 233 * ->end will be filled after we load all the symbols 234 */ 235 map__init(map, start, 0, 0, dso); 236 } 237 238 return map; 239 } 240 241 /* 242 * Use this and __map__is_kmodule() for map instances that are in 243 * machine->kmaps, and thus have map->groups->machine all properly set, to 244 * disambiguate between the kernel and modules. 245 * 246 * When the need arises, introduce map__is_{kernel,kmodule)() that 247 * checks (map->groups != NULL && map->groups->machine != NULL && 248 * map->dso->kernel) before calling __map__is_{kernel,kmodule}()) 249 */ 250 bool __map__is_kernel(const struct map *map) 251 { 252 return machine__kernel_map(map->groups->machine) == map; 253 } 254 255 bool __map__is_extra_kernel_map(const struct map *map) 256 { 257 struct kmap *kmap = __map__kmap((struct map *)map); 258 259 return kmap && kmap->name[0]; 260 } 261 262 bool map__has_symbols(const struct map *map) 263 { 264 return dso__has_symbols(map->dso); 265 } 266 267 static void map__exit(struct map *map) 268 { 269 BUG_ON(!RB_EMPTY_NODE(&map->rb_node)); 270 dso__zput(map->dso); 271 } 272 273 void map__delete(struct map *map) 274 { 275 map__exit(map); 276 free(map); 277 } 278 279 void map__put(struct map *map) 280 { 281 if (map && refcount_dec_and_test(&map->refcnt)) 282 map__delete(map); 283 } 284 285 void map__fixup_start(struct map *map) 286 { 287 struct rb_root *symbols = &map->dso->symbols; 288 struct rb_node *nd = rb_first(symbols); 289 if (nd != NULL) { 290 struct symbol *sym = rb_entry(nd, struct symbol, rb_node); 291 map->start = sym->start; 292 } 293 } 294 295 void map__fixup_end(struct map *map) 296 { 297 struct rb_root *symbols = &map->dso->symbols; 298 struct rb_node *nd = rb_last(symbols); 299 if (nd != NULL) { 300 struct symbol *sym = rb_entry(nd, struct symbol, rb_node); 301 map->end = sym->end; 302 } 303 } 304 305 #define DSO__DELETED "(deleted)" 306 307 int map__load(struct map *map) 308 { 309 const char *name = map->dso->long_name; 310 int nr; 311 312 if (dso__loaded(map->dso)) 313 return 0; 314 315 nr = dso__load(map->dso, map); 316 if (nr < 0) { 317 if (map->dso->has_build_id) { 318 char sbuild_id[SBUILD_ID_SIZE]; 319 320 build_id__sprintf(map->dso->build_id, 321 sizeof(map->dso->build_id), 322 sbuild_id); 323 pr_warning("%s with build id %s not found", 324 name, sbuild_id); 325 } else 326 pr_warning("Failed to open %s", name); 327 328 pr_warning(", continuing without symbols\n"); 329 return -1; 330 } else if (nr == 0) { 331 #ifdef HAVE_LIBELF_SUPPORT 332 const size_t len = strlen(name); 333 const size_t real_len = len - sizeof(DSO__DELETED); 334 335 if (len > sizeof(DSO__DELETED) && 336 strcmp(name + real_len + 1, DSO__DELETED) == 0) { 337 pr_warning("%.*s was updated (is prelink enabled?). " 338 "Restart the long running apps that use it!\n", 339 (int)real_len, name); 340 } else { 341 pr_warning("no symbols found in %s, maybe install " 342 "a debug package?\n", name); 343 } 344 #endif 345 return -1; 346 } 347 348 return 0; 349 } 350 351 struct symbol *map__find_symbol(struct map *map, u64 addr) 352 { 353 if (map__load(map) < 0) 354 return NULL; 355 356 return dso__find_symbol(map->dso, addr); 357 } 358 359 struct symbol *map__find_symbol_by_name(struct map *map, const char *name) 360 { 361 if (map__load(map) < 0) 362 return NULL; 363 364 if (!dso__sorted_by_name(map->dso)) 365 dso__sort_by_name(map->dso); 366 367 return dso__find_symbol_by_name(map->dso, name); 368 } 369 370 struct map *map__clone(struct map *from) 371 { 372 struct map *map = memdup(from, sizeof(*map)); 373 374 if (map != NULL) { 375 refcount_set(&map->refcnt, 1); 376 RB_CLEAR_NODE(&map->rb_node); 377 dso__get(map->dso); 378 map->groups = NULL; 379 } 380 381 return map; 382 } 383 384 size_t map__fprintf(struct map *map, FILE *fp) 385 { 386 return fprintf(fp, " %" PRIx64 "-%" PRIx64 " %" PRIx64 " %s\n", 387 map->start, map->end, map->pgoff, map->dso->name); 388 } 389 390 size_t map__fprintf_dsoname(struct map *map, FILE *fp) 391 { 392 const char *dsoname = "[unknown]"; 393 394 if (map && map->dso) { 395 if (symbol_conf.show_kernel_path && map->dso->long_name) 396 dsoname = map->dso->long_name; 397 else 398 dsoname = map->dso->name; 399 } 400 401 return fprintf(fp, "%s", dsoname); 402 } 403 404 char *map__srcline(struct map *map, u64 addr, struct symbol *sym) 405 { 406 if (map == NULL) 407 return SRCLINE_UNKNOWN; 408 return get_srcline(map->dso, map__rip_2objdump(map, addr), sym, true, true, addr); 409 } 410 411 int map__fprintf_srcline(struct map *map, u64 addr, const char *prefix, 412 FILE *fp) 413 { 414 int ret = 0; 415 416 if (map && map->dso) { 417 char *srcline = map__srcline(map, addr, NULL); 418 if (srcline != SRCLINE_UNKNOWN) 419 ret = fprintf(fp, "%s%s", prefix, srcline); 420 free_srcline(srcline); 421 } 422 return ret; 423 } 424 425 /** 426 * map__rip_2objdump - convert symbol start address to objdump address. 427 * @map: memory map 428 * @rip: symbol start address 429 * 430 * objdump wants/reports absolute IPs for ET_EXEC, and RIPs for ET_DYN. 431 * map->dso->adjust_symbols==1 for ET_EXEC-like cases except ET_REL which is 432 * relative to section start. 433 * 434 * Return: Address suitable for passing to "objdump --start-address=" 435 */ 436 u64 map__rip_2objdump(struct map *map, u64 rip) 437 { 438 struct kmap *kmap = __map__kmap(map); 439 440 /* 441 * vmlinux does not have program headers for PTI entry trampolines and 442 * kcore may not either. However the trampoline object code is on the 443 * main kernel map, so just use that instead. 444 */ 445 if (kmap && is_entry_trampoline(kmap->name) && kmap->kmaps && kmap->kmaps->machine) { 446 struct map *kernel_map = machine__kernel_map(kmap->kmaps->machine); 447 448 if (kernel_map) 449 map = kernel_map; 450 } 451 452 if (!map->dso->adjust_symbols) 453 return rip; 454 455 if (map->dso->rel) 456 return rip - map->pgoff; 457 458 /* 459 * kernel modules also have DSO_TYPE_USER in dso->kernel, 460 * but all kernel modules are ET_REL, so won't get here. 461 */ 462 if (map->dso->kernel == DSO_TYPE_USER) 463 return rip + map->dso->text_offset; 464 465 return map->unmap_ip(map, rip) - map->reloc; 466 } 467 468 /** 469 * map__objdump_2mem - convert objdump address to a memory address. 470 * @map: memory map 471 * @ip: objdump address 472 * 473 * Closely related to map__rip_2objdump(), this function takes an address from 474 * objdump and converts it to a memory address. Note this assumes that @map 475 * contains the address. To be sure the result is valid, check it forwards 476 * e.g. map__rip_2objdump(map->map_ip(map, map__objdump_2mem(map, ip))) == ip 477 * 478 * Return: Memory address. 479 */ 480 u64 map__objdump_2mem(struct map *map, u64 ip) 481 { 482 if (!map->dso->adjust_symbols) 483 return map->unmap_ip(map, ip); 484 485 if (map->dso->rel) 486 return map->unmap_ip(map, ip + map->pgoff); 487 488 /* 489 * kernel modules also have DSO_TYPE_USER in dso->kernel, 490 * but all kernel modules are ET_REL, so won't get here. 491 */ 492 if (map->dso->kernel == DSO_TYPE_USER) 493 return map->unmap_ip(map, ip - map->dso->text_offset); 494 495 return ip + map->reloc; 496 } 497 498 static void maps__init(struct maps *maps) 499 { 500 maps->entries = RB_ROOT; 501 init_rwsem(&maps->lock); 502 } 503 504 void map_groups__init(struct map_groups *mg, struct machine *machine) 505 { 506 maps__init(&mg->maps); 507 mg->machine = machine; 508 refcount_set(&mg->refcnt, 1); 509 } 510 511 static void __maps__purge(struct maps *maps) 512 { 513 struct rb_root *root = &maps->entries; 514 struct rb_node *next = rb_first(root); 515 516 while (next) { 517 struct map *pos = rb_entry(next, struct map, rb_node); 518 519 next = rb_next(&pos->rb_node); 520 rb_erase_init(&pos->rb_node, root); 521 map__put(pos); 522 } 523 } 524 525 static void maps__exit(struct maps *maps) 526 { 527 down_write(&maps->lock); 528 __maps__purge(maps); 529 up_write(&maps->lock); 530 } 531 532 void map_groups__exit(struct map_groups *mg) 533 { 534 maps__exit(&mg->maps); 535 } 536 537 bool map_groups__empty(struct map_groups *mg) 538 { 539 return !maps__first(&mg->maps); 540 } 541 542 struct map_groups *map_groups__new(struct machine *machine) 543 { 544 struct map_groups *mg = malloc(sizeof(*mg)); 545 546 if (mg != NULL) 547 map_groups__init(mg, machine); 548 549 return mg; 550 } 551 552 void map_groups__delete(struct map_groups *mg) 553 { 554 map_groups__exit(mg); 555 free(mg); 556 } 557 558 void map_groups__put(struct map_groups *mg) 559 { 560 if (mg && refcount_dec_and_test(&mg->refcnt)) 561 map_groups__delete(mg); 562 } 563 564 struct symbol *map_groups__find_symbol(struct map_groups *mg, 565 u64 addr, struct map **mapp) 566 { 567 struct map *map = map_groups__find(mg, addr); 568 569 /* Ensure map is loaded before using map->map_ip */ 570 if (map != NULL && map__load(map) >= 0) { 571 if (mapp != NULL) 572 *mapp = map; 573 return map__find_symbol(map, map->map_ip(map, addr)); 574 } 575 576 return NULL; 577 } 578 579 struct symbol *maps__find_symbol_by_name(struct maps *maps, const char *name, 580 struct map **mapp) 581 { 582 struct symbol *sym; 583 struct rb_node *nd; 584 585 down_read(&maps->lock); 586 587 for (nd = rb_first(&maps->entries); nd; nd = rb_next(nd)) { 588 struct map *pos = rb_entry(nd, struct map, rb_node); 589 590 sym = map__find_symbol_by_name(pos, name); 591 592 if (sym == NULL) 593 continue; 594 if (mapp != NULL) 595 *mapp = pos; 596 goto out; 597 } 598 599 sym = NULL; 600 out: 601 up_read(&maps->lock); 602 return sym; 603 } 604 605 struct symbol *map_groups__find_symbol_by_name(struct map_groups *mg, 606 const char *name, 607 struct map **mapp) 608 { 609 return maps__find_symbol_by_name(&mg->maps, name, mapp); 610 } 611 612 int map_groups__find_ams(struct addr_map_symbol *ams) 613 { 614 if (ams->addr < ams->map->start || ams->addr >= ams->map->end) { 615 if (ams->map->groups == NULL) 616 return -1; 617 ams->map = map_groups__find(ams->map->groups, ams->addr); 618 if (ams->map == NULL) 619 return -1; 620 } 621 622 ams->al_addr = ams->map->map_ip(ams->map, ams->addr); 623 ams->sym = map__find_symbol(ams->map, ams->al_addr); 624 625 return ams->sym ? 0 : -1; 626 } 627 628 static size_t maps__fprintf(struct maps *maps, FILE *fp) 629 { 630 size_t printed = 0; 631 struct rb_node *nd; 632 633 down_read(&maps->lock); 634 635 for (nd = rb_first(&maps->entries); nd; nd = rb_next(nd)) { 636 struct map *pos = rb_entry(nd, struct map, rb_node); 637 printed += fprintf(fp, "Map:"); 638 printed += map__fprintf(pos, fp); 639 if (verbose > 2) { 640 printed += dso__fprintf(pos->dso, fp); 641 printed += fprintf(fp, "--\n"); 642 } 643 } 644 645 up_read(&maps->lock); 646 647 return printed; 648 } 649 650 size_t map_groups__fprintf(struct map_groups *mg, FILE *fp) 651 { 652 return maps__fprintf(&mg->maps, fp); 653 } 654 655 static void __map_groups__insert(struct map_groups *mg, struct map *map) 656 { 657 __maps__insert(&mg->maps, map); 658 map->groups = mg; 659 } 660 661 static int maps__fixup_overlappings(struct maps *maps, struct map *map, FILE *fp) 662 { 663 struct rb_root *root; 664 struct rb_node *next, *first; 665 int err = 0; 666 667 down_write(&maps->lock); 668 669 root = &maps->entries; 670 671 /* 672 * Find first map where end > map->start. 673 * Same as find_vma() in kernel. 674 */ 675 next = root->rb_node; 676 first = NULL; 677 while (next) { 678 struct map *pos = rb_entry(next, struct map, rb_node); 679 680 if (pos->end > map->start) { 681 first = next; 682 if (pos->start <= map->start) 683 break; 684 next = next->rb_left; 685 } else 686 next = next->rb_right; 687 } 688 689 next = first; 690 while (next) { 691 struct map *pos = rb_entry(next, struct map, rb_node); 692 next = rb_next(&pos->rb_node); 693 694 /* 695 * Stop if current map starts after map->end. 696 * Maps are ordered by start: next will not overlap for sure. 697 */ 698 if (pos->start >= map->end) 699 break; 700 701 if (verbose >= 2) { 702 703 if (use_browser) { 704 pr_warning("overlapping maps in %s " 705 "(disable tui for more info)\n", 706 map->dso->name); 707 } else { 708 fputs("overlapping maps:\n", fp); 709 map__fprintf(map, fp); 710 map__fprintf(pos, fp); 711 } 712 } 713 714 rb_erase_init(&pos->rb_node, root); 715 /* 716 * Now check if we need to create new maps for areas not 717 * overlapped by the new map: 718 */ 719 if (map->start > pos->start) { 720 struct map *before = map__clone(pos); 721 722 if (before == NULL) { 723 err = -ENOMEM; 724 goto put_map; 725 } 726 727 before->end = map->start; 728 __map_groups__insert(pos->groups, before); 729 if (verbose >= 2 && !use_browser) 730 map__fprintf(before, fp); 731 map__put(before); 732 } 733 734 if (map->end < pos->end) { 735 struct map *after = map__clone(pos); 736 737 if (after == NULL) { 738 err = -ENOMEM; 739 goto put_map; 740 } 741 742 after->start = map->end; 743 __map_groups__insert(pos->groups, after); 744 if (verbose >= 2 && !use_browser) 745 map__fprintf(after, fp); 746 map__put(after); 747 } 748 put_map: 749 map__put(pos); 750 751 if (err) 752 goto out; 753 } 754 755 err = 0; 756 out: 757 up_write(&maps->lock); 758 return err; 759 } 760 761 int map_groups__fixup_overlappings(struct map_groups *mg, struct map *map, 762 FILE *fp) 763 { 764 return maps__fixup_overlappings(&mg->maps, map, fp); 765 } 766 767 /* 768 * XXX This should not really _copy_ te maps, but refcount them. 769 */ 770 int map_groups__clone(struct thread *thread, struct map_groups *parent) 771 { 772 struct map_groups *mg = thread->mg; 773 int err = -ENOMEM; 774 struct map *map; 775 struct maps *maps = &parent->maps; 776 777 down_read(&maps->lock); 778 779 for (map = maps__first(maps); map; map = map__next(map)) { 780 struct map *new = map__clone(map); 781 if (new == NULL) 782 goto out_unlock; 783 784 err = unwind__prepare_access(thread, new, NULL); 785 if (err) 786 goto out_unlock; 787 788 map_groups__insert(mg, new); 789 map__put(new); 790 } 791 792 err = 0; 793 out_unlock: 794 up_read(&maps->lock); 795 return err; 796 } 797 798 static void __maps__insert(struct maps *maps, struct map *map) 799 { 800 struct rb_node **p = &maps->entries.rb_node; 801 struct rb_node *parent = NULL; 802 const u64 ip = map->start; 803 struct map *m; 804 805 while (*p != NULL) { 806 parent = *p; 807 m = rb_entry(parent, struct map, rb_node); 808 if (ip < m->start) 809 p = &(*p)->rb_left; 810 else 811 p = &(*p)->rb_right; 812 } 813 814 rb_link_node(&map->rb_node, parent, p); 815 rb_insert_color(&map->rb_node, &maps->entries); 816 map__get(map); 817 } 818 819 void maps__insert(struct maps *maps, struct map *map) 820 { 821 down_write(&maps->lock); 822 __maps__insert(maps, map); 823 up_write(&maps->lock); 824 } 825 826 static void __maps__remove(struct maps *maps, struct map *map) 827 { 828 rb_erase_init(&map->rb_node, &maps->entries); 829 map__put(map); 830 } 831 832 void maps__remove(struct maps *maps, struct map *map) 833 { 834 down_write(&maps->lock); 835 __maps__remove(maps, map); 836 up_write(&maps->lock); 837 } 838 839 struct map *maps__find(struct maps *maps, u64 ip) 840 { 841 struct rb_node **p, *parent = NULL; 842 struct map *m; 843 844 down_read(&maps->lock); 845 846 p = &maps->entries.rb_node; 847 while (*p != NULL) { 848 parent = *p; 849 m = rb_entry(parent, struct map, rb_node); 850 if (ip < m->start) 851 p = &(*p)->rb_left; 852 else if (ip >= m->end) 853 p = &(*p)->rb_right; 854 else 855 goto out; 856 } 857 858 m = NULL; 859 out: 860 up_read(&maps->lock); 861 return m; 862 } 863 864 struct map *maps__first(struct maps *maps) 865 { 866 struct rb_node *first = rb_first(&maps->entries); 867 868 if (first) 869 return rb_entry(first, struct map, rb_node); 870 return NULL; 871 } 872 873 struct map *map__next(struct map *map) 874 { 875 struct rb_node *next = rb_next(&map->rb_node); 876 877 if (next) 878 return rb_entry(next, struct map, rb_node); 879 return NULL; 880 } 881 882 struct kmap *__map__kmap(struct map *map) 883 { 884 if (!map->dso || !map->dso->kernel) 885 return NULL; 886 return (struct kmap *)(map + 1); 887 } 888 889 struct kmap *map__kmap(struct map *map) 890 { 891 struct kmap *kmap = __map__kmap(map); 892 893 if (!kmap) 894 pr_err("Internal error: map__kmap with a non-kernel map\n"); 895 return kmap; 896 } 897 898 struct map_groups *map__kmaps(struct map *map) 899 { 900 struct kmap *kmap = map__kmap(map); 901 902 if (!kmap || !kmap->kmaps) { 903 pr_err("Internal error: map__kmaps with a non-kernel map\n"); 904 return NULL; 905 } 906 return kmap->kmaps; 907 } 908