1 #include "symbol.h" 2 #include <errno.h> 3 #include <inttypes.h> 4 #include <limits.h> 5 #include <stdlib.h> 6 #include <string.h> 7 #include <stdio.h> 8 #include <unistd.h> 9 #include "map.h" 10 #include "thread.h" 11 #include "strlist.h" 12 #include "vdso.h" 13 #include "build-id.h" 14 #include "util.h" 15 #include "debug.h" 16 #include "machine.h" 17 #include <linux/string.h> 18 19 static void __maps__insert(struct maps *maps, struct map *map); 20 21 const char *map_type__name[MAP__NR_TYPES] = { 22 [MAP__FUNCTION] = "Functions", 23 [MAP__VARIABLE] = "Variables", 24 }; 25 26 static inline int is_anon_memory(const char *filename) 27 { 28 return !strcmp(filename, "//anon") || 29 !strcmp(filename, "/dev/zero (deleted)") || 30 !strcmp(filename, "/anon_hugepage (deleted)"); 31 } 32 33 static inline int is_no_dso_memory(const char *filename) 34 { 35 return !strncmp(filename, "[stack", 6) || 36 !strncmp(filename, "/SYSV",5) || 37 !strcmp(filename, "[heap]"); 38 } 39 40 static inline int is_android_lib(const char *filename) 41 { 42 return !strncmp(filename, "/data/app-lib", 13) || 43 !strncmp(filename, "/system/lib", 11); 44 } 45 46 static inline bool replace_android_lib(const char *filename, char *newfilename) 47 { 48 const char *libname; 49 char *app_abi; 50 size_t app_abi_length, new_length; 51 size_t lib_length = 0; 52 53 libname = strrchr(filename, '/'); 54 if (libname) 55 lib_length = strlen(libname); 56 57 app_abi = getenv("APP_ABI"); 58 if (!app_abi) 59 return false; 60 61 app_abi_length = strlen(app_abi); 62 63 if (!strncmp(filename, "/data/app-lib", 13)) { 64 char *apk_path; 65 66 if (!app_abi_length) 67 return false; 68 69 new_length = 7 + app_abi_length + lib_length; 70 71 apk_path = getenv("APK_PATH"); 72 if (apk_path) { 73 new_length += strlen(apk_path) + 1; 74 if (new_length > PATH_MAX) 75 return false; 76 snprintf(newfilename, new_length, 77 "%s/libs/%s/%s", apk_path, app_abi, libname); 78 } else { 79 if (new_length > PATH_MAX) 80 return false; 81 snprintf(newfilename, new_length, 82 "libs/%s/%s", app_abi, libname); 83 } 84 return true; 85 } 86 87 if (!strncmp(filename, "/system/lib/", 11)) { 88 char *ndk, *app; 89 const char *arch; 90 size_t ndk_length; 91 size_t app_length; 92 93 ndk = getenv("NDK_ROOT"); 94 app = getenv("APP_PLATFORM"); 95 96 if (!(ndk && app)) 97 return false; 98 99 ndk_length = strlen(ndk); 100 app_length = strlen(app); 101 102 if (!(ndk_length && app_length && app_abi_length)) 103 return false; 104 105 arch = !strncmp(app_abi, "arm", 3) ? "arm" : 106 !strncmp(app_abi, "mips", 4) ? "mips" : 107 !strncmp(app_abi, "x86", 3) ? "x86" : NULL; 108 109 if (!arch) 110 return false; 111 112 new_length = 27 + ndk_length + 113 app_length + lib_length 114 + strlen(arch); 115 116 if (new_length > PATH_MAX) 117 return false; 118 snprintf(newfilename, new_length, 119 "%s/platforms/%s/arch-%s/usr/lib/%s", 120 ndk, app, arch, libname); 121 122 return true; 123 } 124 return false; 125 } 126 127 void map__init(struct map *map, enum map_type type, 128 u64 start, u64 end, u64 pgoff, struct dso *dso) 129 { 130 map->type = type; 131 map->start = start; 132 map->end = end; 133 map->pgoff = pgoff; 134 map->reloc = 0; 135 map->dso = dso__get(dso); 136 map->map_ip = map__map_ip; 137 map->unmap_ip = map__unmap_ip; 138 RB_CLEAR_NODE(&map->rb_node); 139 map->groups = NULL; 140 map->erange_warned = false; 141 atomic_set(&map->refcnt, 1); 142 } 143 144 struct map *map__new(struct machine *machine, u64 start, u64 len, 145 u64 pgoff, u32 pid, u32 d_maj, u32 d_min, u64 ino, 146 u64 ino_gen, u32 prot, u32 flags, char *filename, 147 enum map_type type, struct thread *thread) 148 { 149 struct map *map = malloc(sizeof(*map)); 150 151 if (map != NULL) { 152 char newfilename[PATH_MAX]; 153 struct dso *dso; 154 int anon, no_dso, vdso, android; 155 156 android = is_android_lib(filename); 157 anon = is_anon_memory(filename); 158 vdso = is_vdso_map(filename); 159 no_dso = is_no_dso_memory(filename); 160 161 map->maj = d_maj; 162 map->min = d_min; 163 map->ino = ino; 164 map->ino_generation = ino_gen; 165 map->prot = prot; 166 map->flags = flags; 167 168 if ((anon || no_dso) && type == MAP__FUNCTION) { 169 snprintf(newfilename, sizeof(newfilename), "/tmp/perf-%d.map", pid); 170 filename = newfilename; 171 } 172 173 if (android) { 174 if (replace_android_lib(filename, newfilename)) 175 filename = newfilename; 176 } 177 178 if (vdso) { 179 pgoff = 0; 180 dso = machine__findnew_vdso(machine, thread); 181 } else 182 dso = machine__findnew_dso(machine, filename); 183 184 if (dso == NULL) 185 goto out_delete; 186 187 map__init(map, type, start, start + len, pgoff, dso); 188 189 if (anon || no_dso) { 190 map->map_ip = map->unmap_ip = identity__map_ip; 191 192 /* 193 * Set memory without DSO as loaded. All map__find_* 194 * functions still return NULL, and we avoid the 195 * unnecessary map__load warning. 196 */ 197 if (type != MAP__FUNCTION) 198 dso__set_loaded(dso, map->type); 199 } 200 dso__put(dso); 201 } 202 return map; 203 out_delete: 204 free(map); 205 return NULL; 206 } 207 208 /* 209 * Constructor variant for modules (where we know from /proc/modules where 210 * they are loaded) and for vmlinux, where only after we load all the 211 * symbols we'll know where it starts and ends. 212 */ 213 struct map *map__new2(u64 start, struct dso *dso, enum map_type type) 214 { 215 struct map *map = calloc(1, (sizeof(*map) + 216 (dso->kernel ? sizeof(struct kmap) : 0))); 217 if (map != NULL) { 218 /* 219 * ->end will be filled after we load all the symbols 220 */ 221 map__init(map, type, start, 0, 0, dso); 222 } 223 224 return map; 225 } 226 227 /* 228 * Use this and __map__is_kmodule() for map instances that are in 229 * machine->kmaps, and thus have map->groups->machine all properly set, to 230 * disambiguate between the kernel and modules. 231 * 232 * When the need arises, introduce map__is_{kernel,kmodule)() that 233 * checks (map->groups != NULL && map->groups->machine != NULL && 234 * map->dso->kernel) before calling __map__is_{kernel,kmodule}()) 235 */ 236 bool __map__is_kernel(const struct map *map) 237 { 238 return __machine__kernel_map(map->groups->machine, map->type) == map; 239 } 240 241 static void map__exit(struct map *map) 242 { 243 BUG_ON(!RB_EMPTY_NODE(&map->rb_node)); 244 dso__zput(map->dso); 245 } 246 247 void map__delete(struct map *map) 248 { 249 map__exit(map); 250 free(map); 251 } 252 253 void map__put(struct map *map) 254 { 255 if (map && atomic_dec_and_test(&map->refcnt)) 256 map__delete(map); 257 } 258 259 void map__fixup_start(struct map *map) 260 { 261 struct rb_root *symbols = &map->dso->symbols[map->type]; 262 struct rb_node *nd = rb_first(symbols); 263 if (nd != NULL) { 264 struct symbol *sym = rb_entry(nd, struct symbol, rb_node); 265 map->start = sym->start; 266 } 267 } 268 269 void map__fixup_end(struct map *map) 270 { 271 struct rb_root *symbols = &map->dso->symbols[map->type]; 272 struct rb_node *nd = rb_last(symbols); 273 if (nd != NULL) { 274 struct symbol *sym = rb_entry(nd, struct symbol, rb_node); 275 map->end = sym->end; 276 } 277 } 278 279 #define DSO__DELETED "(deleted)" 280 281 int map__load(struct map *map, symbol_filter_t filter) 282 { 283 const char *name = map->dso->long_name; 284 int nr; 285 286 if (dso__loaded(map->dso, map->type)) 287 return 0; 288 289 nr = dso__load(map->dso, map, filter); 290 if (nr < 0) { 291 if (map->dso->has_build_id) { 292 char sbuild_id[BUILD_ID_SIZE * 2 + 1]; 293 294 build_id__sprintf(map->dso->build_id, 295 sizeof(map->dso->build_id), 296 sbuild_id); 297 pr_warning("%s with build id %s not found", 298 name, sbuild_id); 299 } else 300 pr_warning("Failed to open %s", name); 301 302 pr_warning(", continuing without symbols\n"); 303 return -1; 304 } else if (nr == 0) { 305 #ifdef HAVE_LIBELF_SUPPORT 306 const size_t len = strlen(name); 307 const size_t real_len = len - sizeof(DSO__DELETED); 308 309 if (len > sizeof(DSO__DELETED) && 310 strcmp(name + real_len + 1, DSO__DELETED) == 0) { 311 pr_warning("%.*s was updated (is prelink enabled?). " 312 "Restart the long running apps that use it!\n", 313 (int)real_len, name); 314 } else { 315 pr_warning("no symbols found in %s, maybe install " 316 "a debug package?\n", name); 317 } 318 #endif 319 return -1; 320 } 321 322 return 0; 323 } 324 325 int __weak arch__compare_symbol_names(const char *namea, const char *nameb) 326 { 327 return strcmp(namea, nameb); 328 } 329 330 struct symbol *map__find_symbol(struct map *map, u64 addr, 331 symbol_filter_t filter) 332 { 333 if (map__load(map, filter) < 0) 334 return NULL; 335 336 return dso__find_symbol(map->dso, map->type, addr); 337 } 338 339 struct symbol *map__find_symbol_by_name(struct map *map, const char *name, 340 symbol_filter_t filter) 341 { 342 if (map__load(map, filter) < 0) 343 return NULL; 344 345 if (!dso__sorted_by_name(map->dso, map->type)) 346 dso__sort_by_name(map->dso, map->type); 347 348 return dso__find_symbol_by_name(map->dso, map->type, name); 349 } 350 351 struct map *map__clone(struct map *from) 352 { 353 struct map *map = memdup(from, sizeof(*map)); 354 355 if (map != NULL) { 356 atomic_set(&map->refcnt, 1); 357 RB_CLEAR_NODE(&map->rb_node); 358 dso__get(map->dso); 359 map->groups = NULL; 360 } 361 362 return map; 363 } 364 365 int map__overlap(struct map *l, struct map *r) 366 { 367 if (l->start > r->start) { 368 struct map *t = l; 369 l = r; 370 r = t; 371 } 372 373 if (l->end > r->start) 374 return 1; 375 376 return 0; 377 } 378 379 size_t map__fprintf(struct map *map, FILE *fp) 380 { 381 return fprintf(fp, " %" PRIx64 "-%" PRIx64 " %" PRIx64 " %s\n", 382 map->start, map->end, map->pgoff, map->dso->name); 383 } 384 385 size_t map__fprintf_dsoname(struct map *map, FILE *fp) 386 { 387 const char *dsoname = "[unknown]"; 388 389 if (map && map->dso && (map->dso->name || map->dso->long_name)) { 390 if (symbol_conf.show_kernel_path && map->dso->long_name) 391 dsoname = map->dso->long_name; 392 else if (map->dso->name) 393 dsoname = map->dso->name; 394 } 395 396 return fprintf(fp, "%s", dsoname); 397 } 398 399 int map__fprintf_srcline(struct map *map, u64 addr, const char *prefix, 400 FILE *fp) 401 { 402 char *srcline; 403 int ret = 0; 404 405 if (map && map->dso) { 406 srcline = get_srcline(map->dso, 407 map__rip_2objdump(map, addr), NULL, true); 408 if (srcline != SRCLINE_UNKNOWN) 409 ret = fprintf(fp, "%s%s", prefix, srcline); 410 free_srcline(srcline); 411 } 412 return ret; 413 } 414 415 /** 416 * map__rip_2objdump - convert symbol start address to objdump address. 417 * @map: memory map 418 * @rip: symbol start address 419 * 420 * objdump wants/reports absolute IPs for ET_EXEC, and RIPs for ET_DYN. 421 * map->dso->adjust_symbols==1 for ET_EXEC-like cases except ET_REL which is 422 * relative to section start. 423 * 424 * Return: Address suitable for passing to "objdump --start-address=" 425 */ 426 u64 map__rip_2objdump(struct map *map, u64 rip) 427 { 428 if (!map->dso->adjust_symbols) 429 return rip; 430 431 if (map->dso->rel) 432 return rip - map->pgoff; 433 434 return map->unmap_ip(map, rip) - map->reloc; 435 } 436 437 /** 438 * map__objdump_2mem - convert objdump address to a memory address. 439 * @map: memory map 440 * @ip: objdump address 441 * 442 * Closely related to map__rip_2objdump(), this function takes an address from 443 * objdump and converts it to a memory address. Note this assumes that @map 444 * contains the address. To be sure the result is valid, check it forwards 445 * e.g. map__rip_2objdump(map->map_ip(map, map__objdump_2mem(map, ip))) == ip 446 * 447 * Return: Memory address. 448 */ 449 u64 map__objdump_2mem(struct map *map, u64 ip) 450 { 451 if (!map->dso->adjust_symbols) 452 return map->unmap_ip(map, ip); 453 454 if (map->dso->rel) 455 return map->unmap_ip(map, ip + map->pgoff); 456 457 return ip + map->reloc; 458 } 459 460 static void maps__init(struct maps *maps) 461 { 462 maps->entries = RB_ROOT; 463 pthread_rwlock_init(&maps->lock, NULL); 464 } 465 466 void map_groups__init(struct map_groups *mg, struct machine *machine) 467 { 468 int i; 469 for (i = 0; i < MAP__NR_TYPES; ++i) { 470 maps__init(&mg->maps[i]); 471 } 472 mg->machine = machine; 473 atomic_set(&mg->refcnt, 1); 474 } 475 476 static void __maps__purge(struct maps *maps) 477 { 478 struct rb_root *root = &maps->entries; 479 struct rb_node *next = rb_first(root); 480 481 while (next) { 482 struct map *pos = rb_entry(next, struct map, rb_node); 483 484 next = rb_next(&pos->rb_node); 485 rb_erase_init(&pos->rb_node, root); 486 map__put(pos); 487 } 488 } 489 490 static void maps__exit(struct maps *maps) 491 { 492 pthread_rwlock_wrlock(&maps->lock); 493 __maps__purge(maps); 494 pthread_rwlock_unlock(&maps->lock); 495 } 496 497 void map_groups__exit(struct map_groups *mg) 498 { 499 int i; 500 501 for (i = 0; i < MAP__NR_TYPES; ++i) 502 maps__exit(&mg->maps[i]); 503 } 504 505 bool map_groups__empty(struct map_groups *mg) 506 { 507 int i; 508 509 for (i = 0; i < MAP__NR_TYPES; ++i) { 510 if (maps__first(&mg->maps[i])) 511 return false; 512 } 513 514 return true; 515 } 516 517 struct map_groups *map_groups__new(struct machine *machine) 518 { 519 struct map_groups *mg = malloc(sizeof(*mg)); 520 521 if (mg != NULL) 522 map_groups__init(mg, machine); 523 524 return mg; 525 } 526 527 void map_groups__delete(struct map_groups *mg) 528 { 529 map_groups__exit(mg); 530 free(mg); 531 } 532 533 void map_groups__put(struct map_groups *mg) 534 { 535 if (mg && atomic_dec_and_test(&mg->refcnt)) 536 map_groups__delete(mg); 537 } 538 539 struct symbol *map_groups__find_symbol(struct map_groups *mg, 540 enum map_type type, u64 addr, 541 struct map **mapp, 542 symbol_filter_t filter) 543 { 544 struct map *map = map_groups__find(mg, type, addr); 545 546 /* Ensure map is loaded before using map->map_ip */ 547 if (map != NULL && map__load(map, filter) >= 0) { 548 if (mapp != NULL) 549 *mapp = map; 550 return map__find_symbol(map, map->map_ip(map, addr), filter); 551 } 552 553 return NULL; 554 } 555 556 struct symbol *maps__find_symbol_by_name(struct maps *maps, const char *name, 557 struct map **mapp, symbol_filter_t filter) 558 { 559 struct symbol *sym; 560 struct rb_node *nd; 561 562 pthread_rwlock_rdlock(&maps->lock); 563 564 for (nd = rb_first(&maps->entries); nd; nd = rb_next(nd)) { 565 struct map *pos = rb_entry(nd, struct map, rb_node); 566 567 sym = map__find_symbol_by_name(pos, name, filter); 568 569 if (sym == NULL) 570 continue; 571 if (mapp != NULL) 572 *mapp = pos; 573 goto out; 574 } 575 576 sym = NULL; 577 out: 578 pthread_rwlock_unlock(&maps->lock); 579 return sym; 580 } 581 582 struct symbol *map_groups__find_symbol_by_name(struct map_groups *mg, 583 enum map_type type, 584 const char *name, 585 struct map **mapp, 586 symbol_filter_t filter) 587 { 588 struct symbol *sym = maps__find_symbol_by_name(&mg->maps[type], name, mapp, filter); 589 590 return sym; 591 } 592 593 int map_groups__find_ams(struct addr_map_symbol *ams, symbol_filter_t filter) 594 { 595 if (ams->addr < ams->map->start || ams->addr >= ams->map->end) { 596 if (ams->map->groups == NULL) 597 return -1; 598 ams->map = map_groups__find(ams->map->groups, ams->map->type, 599 ams->addr); 600 if (ams->map == NULL) 601 return -1; 602 } 603 604 ams->al_addr = ams->map->map_ip(ams->map, ams->addr); 605 ams->sym = map__find_symbol(ams->map, ams->al_addr, filter); 606 607 return ams->sym ? 0 : -1; 608 } 609 610 static size_t maps__fprintf(struct maps *maps, FILE *fp) 611 { 612 size_t printed = 0; 613 struct rb_node *nd; 614 615 pthread_rwlock_rdlock(&maps->lock); 616 617 for (nd = rb_first(&maps->entries); nd; nd = rb_next(nd)) { 618 struct map *pos = rb_entry(nd, struct map, rb_node); 619 printed += fprintf(fp, "Map:"); 620 printed += map__fprintf(pos, fp); 621 if (verbose > 2) { 622 printed += dso__fprintf(pos->dso, pos->type, fp); 623 printed += fprintf(fp, "--\n"); 624 } 625 } 626 627 pthread_rwlock_unlock(&maps->lock); 628 629 return printed; 630 } 631 632 size_t __map_groups__fprintf_maps(struct map_groups *mg, enum map_type type, 633 FILE *fp) 634 { 635 size_t printed = fprintf(fp, "%s:\n", map_type__name[type]); 636 return printed += maps__fprintf(&mg->maps[type], fp); 637 } 638 639 size_t map_groups__fprintf(struct map_groups *mg, FILE *fp) 640 { 641 size_t printed = 0, i; 642 for (i = 0; i < MAP__NR_TYPES; ++i) 643 printed += __map_groups__fprintf_maps(mg, i, fp); 644 return printed; 645 } 646 647 static void __map_groups__insert(struct map_groups *mg, struct map *map) 648 { 649 __maps__insert(&mg->maps[map->type], map); 650 map->groups = mg; 651 } 652 653 static int maps__fixup_overlappings(struct maps *maps, struct map *map, FILE *fp) 654 { 655 struct rb_root *root; 656 struct rb_node *next; 657 int err = 0; 658 659 pthread_rwlock_wrlock(&maps->lock); 660 661 root = &maps->entries; 662 next = rb_first(root); 663 664 while (next) { 665 struct map *pos = rb_entry(next, struct map, rb_node); 666 next = rb_next(&pos->rb_node); 667 668 if (!map__overlap(pos, map)) 669 continue; 670 671 if (verbose >= 2) { 672 fputs("overlapping maps:\n", fp); 673 map__fprintf(map, fp); 674 map__fprintf(pos, fp); 675 } 676 677 rb_erase_init(&pos->rb_node, root); 678 /* 679 * Now check if we need to create new maps for areas not 680 * overlapped by the new map: 681 */ 682 if (map->start > pos->start) { 683 struct map *before = map__clone(pos); 684 685 if (before == NULL) { 686 err = -ENOMEM; 687 goto put_map; 688 } 689 690 before->end = map->start; 691 __map_groups__insert(pos->groups, before); 692 if (verbose >= 2) 693 map__fprintf(before, fp); 694 } 695 696 if (map->end < pos->end) { 697 struct map *after = map__clone(pos); 698 699 if (after == NULL) { 700 err = -ENOMEM; 701 goto put_map; 702 } 703 704 after->start = map->end; 705 __map_groups__insert(pos->groups, after); 706 if (verbose >= 2) 707 map__fprintf(after, fp); 708 } 709 put_map: 710 map__put(pos); 711 712 if (err) 713 goto out; 714 } 715 716 err = 0; 717 out: 718 pthread_rwlock_unlock(&maps->lock); 719 return err; 720 } 721 722 int map_groups__fixup_overlappings(struct map_groups *mg, struct map *map, 723 FILE *fp) 724 { 725 return maps__fixup_overlappings(&mg->maps[map->type], map, fp); 726 } 727 728 /* 729 * XXX This should not really _copy_ te maps, but refcount them. 730 */ 731 int map_groups__clone(struct map_groups *mg, 732 struct map_groups *parent, enum map_type type) 733 { 734 int err = -ENOMEM; 735 struct map *map; 736 struct maps *maps = &parent->maps[type]; 737 738 pthread_rwlock_rdlock(&maps->lock); 739 740 for (map = maps__first(maps); map; map = map__next(map)) { 741 struct map *new = map__clone(map); 742 if (new == NULL) 743 goto out_unlock; 744 map_groups__insert(mg, new); 745 } 746 747 err = 0; 748 out_unlock: 749 pthread_rwlock_unlock(&maps->lock); 750 return err; 751 } 752 753 static void __maps__insert(struct maps *maps, struct map *map) 754 { 755 struct rb_node **p = &maps->entries.rb_node; 756 struct rb_node *parent = NULL; 757 const u64 ip = map->start; 758 struct map *m; 759 760 while (*p != NULL) { 761 parent = *p; 762 m = rb_entry(parent, struct map, rb_node); 763 if (ip < m->start) 764 p = &(*p)->rb_left; 765 else 766 p = &(*p)->rb_right; 767 } 768 769 rb_link_node(&map->rb_node, parent, p); 770 rb_insert_color(&map->rb_node, &maps->entries); 771 map__get(map); 772 } 773 774 void maps__insert(struct maps *maps, struct map *map) 775 { 776 pthread_rwlock_wrlock(&maps->lock); 777 __maps__insert(maps, map); 778 pthread_rwlock_unlock(&maps->lock); 779 } 780 781 static void __maps__remove(struct maps *maps, struct map *map) 782 { 783 rb_erase_init(&map->rb_node, &maps->entries); 784 map__put(map); 785 } 786 787 void maps__remove(struct maps *maps, struct map *map) 788 { 789 pthread_rwlock_wrlock(&maps->lock); 790 __maps__remove(maps, map); 791 pthread_rwlock_unlock(&maps->lock); 792 } 793 794 struct map *maps__find(struct maps *maps, u64 ip) 795 { 796 struct rb_node **p, *parent = NULL; 797 struct map *m; 798 799 pthread_rwlock_rdlock(&maps->lock); 800 801 p = &maps->entries.rb_node; 802 while (*p != NULL) { 803 parent = *p; 804 m = rb_entry(parent, struct map, rb_node); 805 if (ip < m->start) 806 p = &(*p)->rb_left; 807 else if (ip >= m->end) 808 p = &(*p)->rb_right; 809 else 810 goto out; 811 } 812 813 m = NULL; 814 out: 815 pthread_rwlock_unlock(&maps->lock); 816 return m; 817 } 818 819 struct map *maps__first(struct maps *maps) 820 { 821 struct rb_node *first = rb_first(&maps->entries); 822 823 if (first) 824 return rb_entry(first, struct map, rb_node); 825 return NULL; 826 } 827 828 struct map *map__next(struct map *map) 829 { 830 struct rb_node *next = rb_next(&map->rb_node); 831 832 if (next) 833 return rb_entry(next, struct map, rb_node); 834 return NULL; 835 } 836 837 struct kmap *map__kmap(struct map *map) 838 { 839 if (!map->dso || !map->dso->kernel) { 840 pr_err("Internal error: map__kmap with a non-kernel map\n"); 841 return NULL; 842 } 843 return (struct kmap *)(map + 1); 844 } 845 846 struct map_groups *map__kmaps(struct map *map) 847 { 848 struct kmap *kmap = map__kmap(map); 849 850 if (!kmap || !kmap->kmaps) { 851 pr_err("Internal error: map__kmaps with a non-kernel map\n"); 852 return NULL; 853 } 854 return kmap->kmaps; 855 } 856