1 // SPDX-License-Identifier: GPL-2.0 2 #include <inttypes.h> 3 #include <limits.h> 4 #include <stdio.h> 5 #include <stdlib.h> 6 #include <string.h> 7 #include <linux/string.h> 8 #include <linux/zalloc.h> 9 #include <uapi/linux/mman.h> /* To get things like MAP_HUGETLB even on older libc headers */ 10 #include "debug.h" 11 #include "dso.h" 12 #include "map.h" 13 #include "namespaces.h" 14 #include "srcline.h" 15 #include "symbol.h" 16 #include "thread.h" 17 #include "vdso.h" 18 19 static inline int is_android_lib(const char *filename) 20 { 21 return strstarts(filename, "/data/app-lib/") || 22 strstarts(filename, "/system/lib/"); 23 } 24 25 static inline bool replace_android_lib(const char *filename, char *newfilename) 26 { 27 const char *libname; 28 char *app_abi; 29 size_t app_abi_length, new_length; 30 size_t lib_length = 0; 31 32 libname = strrchr(filename, '/'); 33 if (libname) 34 lib_length = strlen(libname); 35 36 app_abi = getenv("APP_ABI"); 37 if (!app_abi) 38 return false; 39 40 app_abi_length = strlen(app_abi); 41 42 if (strstarts(filename, "/data/app-lib/")) { 43 char *apk_path; 44 45 if (!app_abi_length) 46 return false; 47 48 new_length = 7 + app_abi_length + lib_length; 49 50 apk_path = getenv("APK_PATH"); 51 if (apk_path) { 52 new_length += strlen(apk_path) + 1; 53 if (new_length > PATH_MAX) 54 return false; 55 snprintf(newfilename, new_length, 56 "%s/libs/%s/%s", apk_path, app_abi, libname); 57 } else { 58 if (new_length > PATH_MAX) 59 return false; 60 snprintf(newfilename, new_length, 61 "libs/%s/%s", app_abi, libname); 62 } 63 return true; 64 } 65 66 if (strstarts(filename, "/system/lib/")) { 67 char *ndk, *app; 68 const char *arch; 69 int ndk_length, app_length; 70 71 ndk = getenv("NDK_ROOT"); 72 app = getenv("APP_PLATFORM"); 73 74 if (!(ndk && app)) 75 return false; 76 77 ndk_length = strlen(ndk); 78 app_length = strlen(app); 79 80 if (!(ndk_length && app_length && app_abi_length)) 81 return false; 82 83 arch = !strncmp(app_abi, "arm", 3) ? "arm" : 84 !strncmp(app_abi, "mips", 4) ? "mips" : 85 !strncmp(app_abi, "x86", 3) ? "x86" : NULL; 86 87 if (!arch) 88 return false; 89 90 new_length = 27 + ndk_length + 91 app_length + lib_length 92 + strlen(arch); 93 94 if (new_length > PATH_MAX) 95 return false; 96 snprintf(newfilename, new_length, 97 "%.*s/platforms/%.*s/arch-%s/usr/lib/%s", 98 ndk_length, ndk, app_length, app, arch, libname); 99 100 return true; 101 } 102 return false; 103 } 104 105 void map__init(struct map *map, u64 start, u64 end, u64 pgoff, struct dso *dso) 106 { 107 map__set_start(map, start); 108 map__set_end(map, end); 109 map__set_pgoff(map, pgoff); 110 map__set_reloc(map, 0); 111 map__set_dso(map, dso__get(dso)); 112 map__set_map_ip(map, map__dso_map_ip); 113 map__set_unmap_ip(map, map__dso_unmap_ip); 114 map__set_erange_warned(map, false); 115 refcount_set(map__refcnt(map), 1); 116 } 117 118 struct map *map__new(struct machine *machine, u64 start, u64 len, 119 u64 pgoff, struct dso_id *id, 120 u32 prot, u32 flags, struct build_id *bid, 121 char *filename, struct thread *thread) 122 { 123 struct map *result; 124 RC_STRUCT(map) *map; 125 struct nsinfo *nsi = NULL; 126 struct nsinfo *nnsi; 127 128 map = malloc(sizeof(*map)); 129 if (ADD_RC_CHK(result, map)) { 130 char newfilename[PATH_MAX]; 131 struct dso *dso, *header_bid_dso; 132 int anon, no_dso, vdso, android; 133 134 android = is_android_lib(filename); 135 anon = is_anon_memory(filename) || flags & MAP_HUGETLB; 136 vdso = is_vdso_map(filename); 137 no_dso = is_no_dso_memory(filename); 138 map->prot = prot; 139 map->flags = flags; 140 nsi = nsinfo__get(thread->nsinfo); 141 142 if ((anon || no_dso) && nsi && (prot & PROT_EXEC)) { 143 snprintf(newfilename, sizeof(newfilename), 144 "/tmp/perf-%d.map", nsinfo__pid(nsi)); 145 filename = newfilename; 146 } 147 148 if (android) { 149 if (replace_android_lib(filename, newfilename)) 150 filename = newfilename; 151 } 152 153 if (vdso) { 154 /* The vdso maps are always on the host and not the 155 * container. Ensure that we don't use setns to look 156 * them up. 157 */ 158 nnsi = nsinfo__copy(nsi); 159 if (nnsi) { 160 nsinfo__put(nsi); 161 nsinfo__clear_need_setns(nnsi); 162 nsi = nnsi; 163 } 164 pgoff = 0; 165 dso = machine__findnew_vdso(machine, thread); 166 } else 167 dso = machine__findnew_dso_id(machine, filename, id); 168 169 if (dso == NULL) 170 goto out_delete; 171 172 map__init(result, start, start + len, pgoff, dso); 173 174 if (anon || no_dso) { 175 map->map_ip = map->unmap_ip = identity__map_ip; 176 177 /* 178 * Set memory without DSO as loaded. All map__find_* 179 * functions still return NULL, and we avoid the 180 * unnecessary map__load warning. 181 */ 182 if (!(prot & PROT_EXEC)) 183 dso__set_loaded(dso); 184 } 185 mutex_lock(&dso->lock); 186 nsinfo__put(dso->nsinfo); 187 dso->nsinfo = nsi; 188 mutex_unlock(&dso->lock); 189 190 if (build_id__is_defined(bid)) { 191 dso__set_build_id(dso, bid); 192 } else { 193 /* 194 * If the mmap event had no build ID, search for an existing dso from the 195 * build ID header by name. Otherwise only the dso loaded at the time of 196 * reading the header will have the build ID set and all future mmaps will 197 * have it missing. 198 */ 199 down_read(&machine->dsos.lock); 200 header_bid_dso = __dsos__find(&machine->dsos, filename, false); 201 up_read(&machine->dsos.lock); 202 if (header_bid_dso && header_bid_dso->header_build_id) { 203 dso__set_build_id(dso, &header_bid_dso->bid); 204 dso->header_build_id = 1; 205 } 206 } 207 dso__put(dso); 208 } 209 return result; 210 out_delete: 211 nsinfo__put(nsi); 212 RC_CHK_FREE(result); 213 return NULL; 214 } 215 216 /* 217 * Constructor variant for modules (where we know from /proc/modules where 218 * they are loaded) and for vmlinux, where only after we load all the 219 * symbols we'll know where it starts and ends. 220 */ 221 struct map *map__new2(u64 start, struct dso *dso) 222 { 223 struct map *result; 224 RC_STRUCT(map) *map; 225 226 map = calloc(1, sizeof(*map) + (dso->kernel ? sizeof(struct kmap) : 0)); 227 if (ADD_RC_CHK(result, map)) { 228 /* 229 * ->end will be filled after we load all the symbols 230 */ 231 map__init(result, start, 0, 0, dso); 232 } 233 234 return result; 235 } 236 237 bool __map__is_kernel(const struct map *map) 238 { 239 if (!map__dso(map)->kernel) 240 return false; 241 return machine__kernel_map(maps__machine(map__kmaps((struct map *)map))) == map; 242 } 243 244 bool __map__is_extra_kernel_map(const struct map *map) 245 { 246 struct kmap *kmap = __map__kmap((struct map *)map); 247 248 return kmap && kmap->name[0]; 249 } 250 251 bool __map__is_bpf_prog(const struct map *map) 252 { 253 const char *name; 254 struct dso *dso = map__dso(map); 255 256 if (dso->binary_type == DSO_BINARY_TYPE__BPF_PROG_INFO) 257 return true; 258 259 /* 260 * If PERF_RECORD_BPF_EVENT is not included, the dso will not have 261 * type of DSO_BINARY_TYPE__BPF_PROG_INFO. In such cases, we can 262 * guess the type based on name. 263 */ 264 name = dso->short_name; 265 return name && (strstr(name, "bpf_prog_") == name); 266 } 267 268 bool __map__is_bpf_image(const struct map *map) 269 { 270 const char *name; 271 struct dso *dso = map__dso(map); 272 273 if (dso->binary_type == DSO_BINARY_TYPE__BPF_IMAGE) 274 return true; 275 276 /* 277 * If PERF_RECORD_KSYMBOL is not included, the dso will not have 278 * type of DSO_BINARY_TYPE__BPF_IMAGE. In such cases, we can 279 * guess the type based on name. 280 */ 281 name = dso->short_name; 282 return name && is_bpf_image(name); 283 } 284 285 bool __map__is_ool(const struct map *map) 286 { 287 const struct dso *dso = map__dso(map); 288 289 return dso && dso->binary_type == DSO_BINARY_TYPE__OOL; 290 } 291 292 bool map__has_symbols(const struct map *map) 293 { 294 return dso__has_symbols(map__dso(map)); 295 } 296 297 static void map__exit(struct map *map) 298 { 299 BUG_ON(refcount_read(map__refcnt(map)) != 0); 300 dso__zput(RC_CHK_ACCESS(map)->dso); 301 } 302 303 void map__delete(struct map *map) 304 { 305 map__exit(map); 306 RC_CHK_FREE(map); 307 } 308 309 void map__put(struct map *map) 310 { 311 if (map && refcount_dec_and_test(map__refcnt(map))) 312 map__delete(map); 313 else 314 RC_CHK_PUT(map); 315 } 316 317 void map__fixup_start(struct map *map) 318 { 319 struct dso *dso = map__dso(map); 320 struct rb_root_cached *symbols = &dso->symbols; 321 struct rb_node *nd = rb_first_cached(symbols); 322 323 if (nd != NULL) { 324 struct symbol *sym = rb_entry(nd, struct symbol, rb_node); 325 326 map__set_start(map, sym->start); 327 } 328 } 329 330 void map__fixup_end(struct map *map) 331 { 332 struct dso *dso = map__dso(map); 333 struct rb_root_cached *symbols = &dso->symbols; 334 struct rb_node *nd = rb_last(&symbols->rb_root); 335 336 if (nd != NULL) { 337 struct symbol *sym = rb_entry(nd, struct symbol, rb_node); 338 map__set_end(map, sym->end); 339 } 340 } 341 342 #define DSO__DELETED "(deleted)" 343 344 int map__load(struct map *map) 345 { 346 struct dso *dso = map__dso(map); 347 const char *name = dso->long_name; 348 int nr; 349 350 if (dso__loaded(dso)) 351 return 0; 352 353 nr = dso__load(dso, map); 354 if (nr < 0) { 355 if (dso->has_build_id) { 356 char sbuild_id[SBUILD_ID_SIZE]; 357 358 build_id__sprintf(&dso->bid, sbuild_id); 359 pr_debug("%s with build id %s not found", name, sbuild_id); 360 } else 361 pr_debug("Failed to open %s", name); 362 363 pr_debug(", continuing without symbols\n"); 364 return -1; 365 } else if (nr == 0) { 366 #ifdef HAVE_LIBELF_SUPPORT 367 const size_t len = strlen(name); 368 const size_t real_len = len - sizeof(DSO__DELETED); 369 370 if (len > sizeof(DSO__DELETED) && 371 strcmp(name + real_len + 1, DSO__DELETED) == 0) { 372 pr_debug("%.*s was updated (is prelink enabled?). " 373 "Restart the long running apps that use it!\n", 374 (int)real_len, name); 375 } else { 376 pr_debug("no symbols found in %s, maybe install a debug package?\n", name); 377 } 378 #endif 379 return -1; 380 } 381 382 return 0; 383 } 384 385 struct symbol *map__find_symbol(struct map *map, u64 addr) 386 { 387 if (map__load(map) < 0) 388 return NULL; 389 390 return dso__find_symbol(map__dso(map), addr); 391 } 392 393 struct symbol *map__find_symbol_by_name(struct map *map, const char *name) 394 { 395 struct dso *dso; 396 397 if (map__load(map) < 0) 398 return NULL; 399 400 dso = map__dso(map); 401 if (!dso__sorted_by_name(dso)) 402 dso__sort_by_name(dso); 403 404 return dso__find_symbol_by_name(dso, name); 405 } 406 407 struct map *map__clone(struct map *from) 408 { 409 struct map *result; 410 RC_STRUCT(map) *map; 411 size_t size = sizeof(RC_STRUCT(map)); 412 struct dso *dso = map__dso(from); 413 414 if (dso && dso->kernel) 415 size += sizeof(struct kmap); 416 417 map = memdup(RC_CHK_ACCESS(from), size); 418 if (ADD_RC_CHK(result, map)) { 419 refcount_set(&map->refcnt, 1); 420 map->dso = dso__get(dso); 421 } 422 423 return result; 424 } 425 426 size_t map__fprintf(struct map *map, FILE *fp) 427 { 428 const struct dso *dso = map__dso(map); 429 430 return fprintf(fp, " %" PRIx64 "-%" PRIx64 " %" PRIx64 " %s\n", 431 map__start(map), map__end(map), map__pgoff(map), dso->name); 432 } 433 434 size_t map__fprintf_dsoname(struct map *map, FILE *fp) 435 { 436 char buf[symbol_conf.pad_output_len_dso + 1]; 437 const char *dsoname = "[unknown]"; 438 const struct dso *dso = map ? map__dso(map) : NULL; 439 440 if (dso) { 441 if (symbol_conf.show_kernel_path && dso->long_name) 442 dsoname = dso->long_name; 443 else 444 dsoname = dso->name; 445 } 446 447 if (symbol_conf.pad_output_len_dso) { 448 scnprintf_pad(buf, symbol_conf.pad_output_len_dso, "%s", dsoname); 449 dsoname = buf; 450 } 451 452 return fprintf(fp, "%s", dsoname); 453 } 454 455 char *map__srcline(struct map *map, u64 addr, struct symbol *sym) 456 { 457 if (map == NULL) 458 return SRCLINE_UNKNOWN; 459 460 return get_srcline(map__dso(map), map__rip_2objdump(map, addr), sym, true, true, addr); 461 } 462 463 int map__fprintf_srcline(struct map *map, u64 addr, const char *prefix, 464 FILE *fp) 465 { 466 const struct dso *dso = map ? map__dso(map) : NULL; 467 int ret = 0; 468 469 if (dso) { 470 char *srcline = map__srcline(map, addr, NULL); 471 if (strncmp(srcline, SRCLINE_UNKNOWN, strlen(SRCLINE_UNKNOWN)) != 0) 472 ret = fprintf(fp, "%s%s", prefix, srcline); 473 free_srcline(srcline); 474 } 475 return ret; 476 } 477 478 void srccode_state_free(struct srccode_state *state) 479 { 480 zfree(&state->srcfile); 481 state->line = 0; 482 } 483 484 /** 485 * map__rip_2objdump - convert symbol start address to objdump address. 486 * @map: memory map 487 * @rip: symbol start address 488 * 489 * objdump wants/reports absolute IPs for ET_EXEC, and RIPs for ET_DYN. 490 * map->dso->adjust_symbols==1 for ET_EXEC-like cases except ET_REL which is 491 * relative to section start. 492 * 493 * Return: Address suitable for passing to "objdump --start-address=" 494 */ 495 u64 map__rip_2objdump(struct map *map, u64 rip) 496 { 497 struct kmap *kmap = __map__kmap(map); 498 const struct dso *dso = map__dso(map); 499 500 /* 501 * vmlinux does not have program headers for PTI entry trampolines and 502 * kcore may not either. However the trampoline object code is on the 503 * main kernel map, so just use that instead. 504 */ 505 if (kmap && is_entry_trampoline(kmap->name) && kmap->kmaps) { 506 struct machine *machine = maps__machine(kmap->kmaps); 507 508 if (machine) { 509 struct map *kernel_map = machine__kernel_map(machine); 510 511 if (kernel_map) 512 map = kernel_map; 513 } 514 } 515 516 if (!dso->adjust_symbols) 517 return rip; 518 519 if (dso->rel) 520 return rip - map__pgoff(map); 521 522 /* 523 * kernel modules also have DSO_TYPE_USER in dso->kernel, 524 * but all kernel modules are ET_REL, so won't get here. 525 */ 526 if (dso->kernel == DSO_SPACE__USER) 527 return rip + dso->text_offset; 528 529 return map__unmap_ip(map, rip) - map__reloc(map); 530 } 531 532 /** 533 * map__objdump_2mem - convert objdump address to a memory address. 534 * @map: memory map 535 * @ip: objdump address 536 * 537 * Closely related to map__rip_2objdump(), this function takes an address from 538 * objdump and converts it to a memory address. Note this assumes that @map 539 * contains the address. To be sure the result is valid, check it forwards 540 * e.g. map__rip_2objdump(map__map_ip(map, map__objdump_2mem(map, ip))) == ip 541 * 542 * Return: Memory address. 543 */ 544 u64 map__objdump_2mem(struct map *map, u64 ip) 545 { 546 const struct dso *dso = map__dso(map); 547 548 if (!dso->adjust_symbols) 549 return map__unmap_ip(map, ip); 550 551 if (dso->rel) 552 return map__unmap_ip(map, ip + map__pgoff(map)); 553 554 /* 555 * kernel modules also have DSO_TYPE_USER in dso->kernel, 556 * but all kernel modules are ET_REL, so won't get here. 557 */ 558 if (dso->kernel == DSO_SPACE__USER) 559 return map__unmap_ip(map, ip - dso->text_offset); 560 561 return ip + map__reloc(map); 562 } 563 564 bool map__contains_symbol(const struct map *map, const struct symbol *sym) 565 { 566 u64 ip = map__unmap_ip(map, sym->start); 567 568 return ip >= map__start(map) && ip < map__end(map); 569 } 570 571 struct kmap *__map__kmap(struct map *map) 572 { 573 const struct dso *dso = map__dso(map); 574 575 if (!dso || !dso->kernel) 576 return NULL; 577 return (struct kmap *)(&RC_CHK_ACCESS(map)[1]); 578 } 579 580 struct kmap *map__kmap(struct map *map) 581 { 582 struct kmap *kmap = __map__kmap(map); 583 584 if (!kmap) 585 pr_err("Internal error: map__kmap with a non-kernel map\n"); 586 return kmap; 587 } 588 589 struct maps *map__kmaps(struct map *map) 590 { 591 struct kmap *kmap = map__kmap(map); 592 593 if (!kmap || !kmap->kmaps) { 594 pr_err("Internal error: map__kmaps with a non-kernel map\n"); 595 return NULL; 596 } 597 return kmap->kmaps; 598 } 599 600 u64 map__dso_map_ip(const struct map *map, u64 ip) 601 { 602 return ip - map__start(map) + map__pgoff(map); 603 } 604 605 u64 map__dso_unmap_ip(const struct map *map, u64 ip) 606 { 607 return ip + map__start(map) - map__pgoff(map); 608 } 609 610 u64 identity__map_ip(const struct map *map __maybe_unused, u64 ip) 611 { 612 return ip; 613 } 614