1 // SPDX-License-Identifier: GPL-2.0 2 #include <inttypes.h> 3 #include <limits.h> 4 #include <stdio.h> 5 #include <stdlib.h> 6 #include <string.h> 7 #include <linux/string.h> 8 #include <linux/zalloc.h> 9 #include <uapi/linux/mman.h> /* To get things like MAP_HUGETLB even on older libc headers */ 10 #include "debug.h" 11 #include "dso.h" 12 #include "map.h" 13 #include "namespaces.h" 14 #include "srcline.h" 15 #include "symbol.h" 16 #include "thread.h" 17 #include "vdso.h" 18 19 static inline int is_android_lib(const char *filename) 20 { 21 return strstarts(filename, "/data/app-lib/") || 22 strstarts(filename, "/system/lib/"); 23 } 24 25 static inline bool replace_android_lib(const char *filename, char *newfilename) 26 { 27 const char *libname; 28 char *app_abi; 29 size_t app_abi_length, new_length; 30 size_t lib_length = 0; 31 32 libname = strrchr(filename, '/'); 33 if (libname) 34 lib_length = strlen(libname); 35 36 app_abi = getenv("APP_ABI"); 37 if (!app_abi) 38 return false; 39 40 app_abi_length = strlen(app_abi); 41 42 if (strstarts(filename, "/data/app-lib/")) { 43 char *apk_path; 44 45 if (!app_abi_length) 46 return false; 47 48 new_length = 7 + app_abi_length + lib_length; 49 50 apk_path = getenv("APK_PATH"); 51 if (apk_path) { 52 new_length += strlen(apk_path) + 1; 53 if (new_length > PATH_MAX) 54 return false; 55 snprintf(newfilename, new_length, 56 "%s/libs/%s/%s", apk_path, app_abi, libname); 57 } else { 58 if (new_length > PATH_MAX) 59 return false; 60 snprintf(newfilename, new_length, 61 "libs/%s/%s", app_abi, libname); 62 } 63 return true; 64 } 65 66 if (strstarts(filename, "/system/lib/")) { 67 char *ndk, *app; 68 const char *arch; 69 int ndk_length, app_length; 70 71 ndk = getenv("NDK_ROOT"); 72 app = getenv("APP_PLATFORM"); 73 74 if (!(ndk && app)) 75 return false; 76 77 ndk_length = strlen(ndk); 78 app_length = strlen(app); 79 80 if (!(ndk_length && app_length && app_abi_length)) 81 return false; 82 83 arch = !strncmp(app_abi, "arm", 3) ? "arm" : 84 !strncmp(app_abi, "mips", 4) ? "mips" : 85 !strncmp(app_abi, "x86", 3) ? "x86" : NULL; 86 87 if (!arch) 88 return false; 89 90 new_length = 27 + ndk_length + 91 app_length + lib_length 92 + strlen(arch); 93 94 if (new_length > PATH_MAX) 95 return false; 96 snprintf(newfilename, new_length, 97 "%.*s/platforms/%.*s/arch-%s/usr/lib/%s", 98 ndk_length, ndk, app_length, app, arch, libname); 99 100 return true; 101 } 102 return false; 103 } 104 105 void map__init(struct map *map, u64 start, u64 end, u64 pgoff, struct dso *dso) 106 { 107 map->start = start; 108 map->end = end; 109 map->pgoff = pgoff; 110 map->reloc = 0; 111 map->dso = dso__get(dso); 112 map->map_ip = map__map_ip; 113 map->unmap_ip = map__unmap_ip; 114 RB_CLEAR_NODE(&map->rb_node); 115 map->erange_warned = false; 116 refcount_set(&map->refcnt, 1); 117 } 118 119 struct map *map__new(struct machine *machine, u64 start, u64 len, 120 u64 pgoff, struct dso_id *id, 121 u32 prot, u32 flags, struct build_id *bid, 122 char *filename, struct thread *thread) 123 { 124 struct map *map = malloc(sizeof(*map)); 125 struct nsinfo *nsi = NULL; 126 struct nsinfo *nnsi; 127 128 if (map != NULL) { 129 char newfilename[PATH_MAX]; 130 struct dso *dso, *header_bid_dso; 131 int anon, no_dso, vdso, android; 132 133 android = is_android_lib(filename); 134 anon = is_anon_memory(filename) || flags & MAP_HUGETLB; 135 vdso = is_vdso_map(filename); 136 no_dso = is_no_dso_memory(filename); 137 map->prot = prot; 138 map->flags = flags; 139 nsi = nsinfo__get(thread->nsinfo); 140 141 if ((anon || no_dso) && nsi && (prot & PROT_EXEC)) { 142 snprintf(newfilename, sizeof(newfilename), 143 "/tmp/perf-%d.map", nsinfo__pid(nsi)); 144 filename = newfilename; 145 } 146 147 if (android) { 148 if (replace_android_lib(filename, newfilename)) 149 filename = newfilename; 150 } 151 152 if (vdso) { 153 /* The vdso maps are always on the host and not the 154 * container. Ensure that we don't use setns to look 155 * them up. 156 */ 157 nnsi = nsinfo__copy(nsi); 158 if (nnsi) { 159 nsinfo__put(nsi); 160 nsinfo__clear_need_setns(nnsi); 161 nsi = nnsi; 162 } 163 pgoff = 0; 164 dso = machine__findnew_vdso(machine, thread); 165 } else 166 dso = machine__findnew_dso_id(machine, filename, id); 167 168 if (dso == NULL) 169 goto out_delete; 170 171 map__init(map, start, start + len, pgoff, dso); 172 173 if (anon || no_dso) { 174 map->map_ip = map->unmap_ip = identity__map_ip; 175 176 /* 177 * Set memory without DSO as loaded. All map__find_* 178 * functions still return NULL, and we avoid the 179 * unnecessary map__load warning. 180 */ 181 if (!(prot & PROT_EXEC)) 182 dso__set_loaded(dso); 183 } 184 mutex_lock(&dso->lock); 185 nsinfo__put(dso->nsinfo); 186 dso->nsinfo = nsi; 187 mutex_unlock(&dso->lock); 188 189 if (build_id__is_defined(bid)) { 190 dso__set_build_id(dso, bid); 191 } else { 192 /* 193 * If the mmap event had no build ID, search for an existing dso from the 194 * build ID header by name. Otherwise only the dso loaded at the time of 195 * reading the header will have the build ID set and all future mmaps will 196 * have it missing. 197 */ 198 down_read(&machine->dsos.lock); 199 header_bid_dso = __dsos__find(&machine->dsos, filename, false); 200 up_read(&machine->dsos.lock); 201 if (header_bid_dso && header_bid_dso->header_build_id) { 202 dso__set_build_id(dso, &header_bid_dso->bid); 203 dso->header_build_id = 1; 204 } 205 } 206 dso__put(dso); 207 } 208 return map; 209 out_delete: 210 nsinfo__put(nsi); 211 free(map); 212 return NULL; 213 } 214 215 /* 216 * Constructor variant for modules (where we know from /proc/modules where 217 * they are loaded) and for vmlinux, where only after we load all the 218 * symbols we'll know where it starts and ends. 219 */ 220 struct map *map__new2(u64 start, struct dso *dso) 221 { 222 struct map *map = calloc(1, (sizeof(*map) + 223 (dso->kernel ? sizeof(struct kmap) : 0))); 224 if (map != NULL) { 225 /* 226 * ->end will be filled after we load all the symbols 227 */ 228 map__init(map, start, 0, 0, dso); 229 } 230 231 return map; 232 } 233 234 bool __map__is_kernel(const struct map *map) 235 { 236 if (!map->dso->kernel) 237 return false; 238 return machine__kernel_map(map__kmaps((struct map *)map)->machine) == map; 239 } 240 241 bool __map__is_extra_kernel_map(const struct map *map) 242 { 243 struct kmap *kmap = __map__kmap((struct map *)map); 244 245 return kmap && kmap->name[0]; 246 } 247 248 bool __map__is_bpf_prog(const struct map *map) 249 { 250 const char *name; 251 252 if (map->dso->binary_type == DSO_BINARY_TYPE__BPF_PROG_INFO) 253 return true; 254 255 /* 256 * If PERF_RECORD_BPF_EVENT is not included, the dso will not have 257 * type of DSO_BINARY_TYPE__BPF_PROG_INFO. In such cases, we can 258 * guess the type based on name. 259 */ 260 name = map->dso->short_name; 261 return name && (strstr(name, "bpf_prog_") == name); 262 } 263 264 bool __map__is_bpf_image(const struct map *map) 265 { 266 const char *name; 267 268 if (map->dso->binary_type == DSO_BINARY_TYPE__BPF_IMAGE) 269 return true; 270 271 /* 272 * If PERF_RECORD_KSYMBOL is not included, the dso will not have 273 * type of DSO_BINARY_TYPE__BPF_IMAGE. In such cases, we can 274 * guess the type based on name. 275 */ 276 name = map->dso->short_name; 277 return name && is_bpf_image(name); 278 } 279 280 bool __map__is_ool(const struct map *map) 281 { 282 return map->dso && map->dso->binary_type == DSO_BINARY_TYPE__OOL; 283 } 284 285 bool map__has_symbols(const struct map *map) 286 { 287 return dso__has_symbols(map->dso); 288 } 289 290 static void map__exit(struct map *map) 291 { 292 BUG_ON(refcount_read(&map->refcnt) != 0); 293 dso__zput(map->dso); 294 } 295 296 void map__delete(struct map *map) 297 { 298 map__exit(map); 299 free(map); 300 } 301 302 void map__put(struct map *map) 303 { 304 if (map && refcount_dec_and_test(&map->refcnt)) 305 map__delete(map); 306 } 307 308 void map__fixup_start(struct map *map) 309 { 310 struct rb_root_cached *symbols = &map->dso->symbols; 311 struct rb_node *nd = rb_first_cached(symbols); 312 if (nd != NULL) { 313 struct symbol *sym = rb_entry(nd, struct symbol, rb_node); 314 map->start = sym->start; 315 } 316 } 317 318 void map__fixup_end(struct map *map) 319 { 320 struct rb_root_cached *symbols = &map->dso->symbols; 321 struct rb_node *nd = rb_last(&symbols->rb_root); 322 if (nd != NULL) { 323 struct symbol *sym = rb_entry(nd, struct symbol, rb_node); 324 map->end = sym->end; 325 } 326 } 327 328 #define DSO__DELETED "(deleted)" 329 330 int map__load(struct map *map) 331 { 332 const char *name = map->dso->long_name; 333 int nr; 334 335 if (dso__loaded(map->dso)) 336 return 0; 337 338 nr = dso__load(map->dso, map); 339 if (nr < 0) { 340 if (map->dso->has_build_id) { 341 char sbuild_id[SBUILD_ID_SIZE]; 342 343 build_id__sprintf(&map->dso->bid, sbuild_id); 344 pr_debug("%s with build id %s not found", name, sbuild_id); 345 } else 346 pr_debug("Failed to open %s", name); 347 348 pr_debug(", continuing without symbols\n"); 349 return -1; 350 } else if (nr == 0) { 351 #ifdef HAVE_LIBELF_SUPPORT 352 const size_t len = strlen(name); 353 const size_t real_len = len - sizeof(DSO__DELETED); 354 355 if (len > sizeof(DSO__DELETED) && 356 strcmp(name + real_len + 1, DSO__DELETED) == 0) { 357 pr_debug("%.*s was updated (is prelink enabled?). " 358 "Restart the long running apps that use it!\n", 359 (int)real_len, name); 360 } else { 361 pr_debug("no symbols found in %s, maybe install a debug package?\n", name); 362 } 363 #endif 364 return -1; 365 } 366 367 return 0; 368 } 369 370 struct symbol *map__find_symbol(struct map *map, u64 addr) 371 { 372 if (map__load(map) < 0) 373 return NULL; 374 375 return dso__find_symbol(map->dso, addr); 376 } 377 378 struct symbol *map__find_symbol_by_name(struct map *map, const char *name) 379 { 380 if (map__load(map) < 0) 381 return NULL; 382 383 if (!dso__sorted_by_name(map->dso)) 384 dso__sort_by_name(map->dso); 385 386 return dso__find_symbol_by_name(map->dso, name); 387 } 388 389 struct map *map__clone(struct map *from) 390 { 391 size_t size = sizeof(struct map); 392 struct map *map; 393 394 if (from->dso && from->dso->kernel) 395 size += sizeof(struct kmap); 396 397 map = memdup(from, size); 398 if (map != NULL) { 399 refcount_set(&map->refcnt, 1); 400 RB_CLEAR_NODE(&map->rb_node); 401 dso__get(map->dso); 402 } 403 404 return map; 405 } 406 407 size_t map__fprintf(struct map *map, FILE *fp) 408 { 409 return fprintf(fp, " %" PRIx64 "-%" PRIx64 " %" PRIx64 " %s\n", 410 map->start, map->end, map->pgoff, map->dso->name); 411 } 412 413 size_t map__fprintf_dsoname(struct map *map, FILE *fp) 414 { 415 char buf[symbol_conf.pad_output_len_dso + 1]; 416 const char *dsoname = "[unknown]"; 417 418 if (map && map->dso) { 419 if (symbol_conf.show_kernel_path && map->dso->long_name) 420 dsoname = map->dso->long_name; 421 else 422 dsoname = map->dso->name; 423 } 424 425 if (symbol_conf.pad_output_len_dso) { 426 scnprintf_pad(buf, symbol_conf.pad_output_len_dso, "%s", dsoname); 427 dsoname = buf; 428 } 429 430 return fprintf(fp, "%s", dsoname); 431 } 432 433 char *map__srcline(struct map *map, u64 addr, struct symbol *sym) 434 { 435 if (map == NULL) 436 return SRCLINE_UNKNOWN; 437 return get_srcline(map->dso, map__rip_2objdump(map, addr), sym, true, true, addr); 438 } 439 440 int map__fprintf_srcline(struct map *map, u64 addr, const char *prefix, 441 FILE *fp) 442 { 443 int ret = 0; 444 445 if (map && map->dso) { 446 char *srcline = map__srcline(map, addr, NULL); 447 if (strncmp(srcline, SRCLINE_UNKNOWN, strlen(SRCLINE_UNKNOWN)) != 0) 448 ret = fprintf(fp, "%s%s", prefix, srcline); 449 free_srcline(srcline); 450 } 451 return ret; 452 } 453 454 void srccode_state_free(struct srccode_state *state) 455 { 456 zfree(&state->srcfile); 457 state->line = 0; 458 } 459 460 /** 461 * map__rip_2objdump - convert symbol start address to objdump address. 462 * @map: memory map 463 * @rip: symbol start address 464 * 465 * objdump wants/reports absolute IPs for ET_EXEC, and RIPs for ET_DYN. 466 * map->dso->adjust_symbols==1 for ET_EXEC-like cases except ET_REL which is 467 * relative to section start. 468 * 469 * Return: Address suitable for passing to "objdump --start-address=" 470 */ 471 u64 map__rip_2objdump(struct map *map, u64 rip) 472 { 473 struct kmap *kmap = __map__kmap(map); 474 475 /* 476 * vmlinux does not have program headers for PTI entry trampolines and 477 * kcore may not either. However the trampoline object code is on the 478 * main kernel map, so just use that instead. 479 */ 480 if (kmap && is_entry_trampoline(kmap->name) && kmap->kmaps && kmap->kmaps->machine) { 481 struct map *kernel_map = machine__kernel_map(kmap->kmaps->machine); 482 483 if (kernel_map) 484 map = kernel_map; 485 } 486 487 if (!map->dso->adjust_symbols) 488 return rip; 489 490 if (map->dso->rel) 491 return rip - map->pgoff; 492 493 /* 494 * kernel modules also have DSO_TYPE_USER in dso->kernel, 495 * but all kernel modules are ET_REL, so won't get here. 496 */ 497 if (map->dso->kernel == DSO_SPACE__USER) 498 return rip + map->dso->text_offset; 499 500 return map->unmap_ip(map, rip) - map->reloc; 501 } 502 503 /** 504 * map__objdump_2mem - convert objdump address to a memory address. 505 * @map: memory map 506 * @ip: objdump address 507 * 508 * Closely related to map__rip_2objdump(), this function takes an address from 509 * objdump and converts it to a memory address. Note this assumes that @map 510 * contains the address. To be sure the result is valid, check it forwards 511 * e.g. map__rip_2objdump(map->map_ip(map, map__objdump_2mem(map, ip))) == ip 512 * 513 * Return: Memory address. 514 */ 515 u64 map__objdump_2mem(struct map *map, u64 ip) 516 { 517 if (!map->dso->adjust_symbols) 518 return map->unmap_ip(map, ip); 519 520 if (map->dso->rel) 521 return map->unmap_ip(map, ip + map->pgoff); 522 523 /* 524 * kernel modules also have DSO_TYPE_USER in dso->kernel, 525 * but all kernel modules are ET_REL, so won't get here. 526 */ 527 if (map->dso->kernel == DSO_SPACE__USER) 528 return map->unmap_ip(map, ip - map->dso->text_offset); 529 530 return ip + map->reloc; 531 } 532 533 bool map__contains_symbol(const struct map *map, const struct symbol *sym) 534 { 535 u64 ip = map->unmap_ip(map, sym->start); 536 537 return ip >= map->start && ip < map->end; 538 } 539 540 static struct map *__map__next(struct map *map) 541 { 542 struct rb_node *next = rb_next(&map->rb_node); 543 544 if (next) 545 return rb_entry(next, struct map, rb_node); 546 return NULL; 547 } 548 549 struct map *map__next(struct map *map) 550 { 551 return map ? __map__next(map) : NULL; 552 } 553 554 struct kmap *__map__kmap(struct map *map) 555 { 556 if (!map->dso || !map->dso->kernel) 557 return NULL; 558 return (struct kmap *)(map + 1); 559 } 560 561 struct kmap *map__kmap(struct map *map) 562 { 563 struct kmap *kmap = __map__kmap(map); 564 565 if (!kmap) 566 pr_err("Internal error: map__kmap with a non-kernel map\n"); 567 return kmap; 568 } 569 570 struct maps *map__kmaps(struct map *map) 571 { 572 struct kmap *kmap = map__kmap(map); 573 574 if (!kmap || !kmap->kmaps) { 575 pr_err("Internal error: map__kmaps with a non-kernel map\n"); 576 return NULL; 577 } 578 return kmap->kmaps; 579 } 580 581 u64 map__map_ip(const struct map *map, u64 ip) 582 { 583 return ip - map->start + map->pgoff; 584 } 585 586 u64 map__unmap_ip(const struct map *map, u64 ip) 587 { 588 return ip + map->start - map->pgoff; 589 } 590 591 u64 identity__map_ip(const struct map *map __maybe_unused, u64 ip) 592 { 593 return ip; 594 } 595