1 #include "symbol.h" 2 #include <errno.h> 3 #include <limits.h> 4 #include <stdlib.h> 5 #include <string.h> 6 #include <stdio.h> 7 #include <unistd.h> 8 #include "map.h" 9 10 const char *map_type__name[MAP__NR_TYPES] = { 11 [MAP__FUNCTION] = "Functions", 12 [MAP__VARIABLE] = "Variables", 13 }; 14 15 static inline int is_anon_memory(const char *filename) 16 { 17 return strcmp(filename, "//anon") == 0; 18 } 19 20 void map__init(struct map *self, enum map_type type, 21 u64 start, u64 end, u64 pgoff, struct dso *dso) 22 { 23 self->type = type; 24 self->start = start; 25 self->end = end; 26 self->pgoff = pgoff; 27 self->dso = dso; 28 self->map_ip = map__map_ip; 29 self->unmap_ip = map__unmap_ip; 30 RB_CLEAR_NODE(&self->rb_node); 31 self->groups = NULL; 32 } 33 34 struct map *map__new(struct list_head *dsos__list, u64 start, u64 len, 35 u64 pgoff, u32 pid, char *filename, 36 enum map_type type) 37 { 38 struct map *self = malloc(sizeof(*self)); 39 40 if (self != NULL) { 41 char newfilename[PATH_MAX]; 42 struct dso *dso; 43 int anon; 44 45 anon = is_anon_memory(filename); 46 47 if (anon) { 48 snprintf(newfilename, sizeof(newfilename), "/tmp/perf-%d.map", pid); 49 filename = newfilename; 50 } 51 52 dso = __dsos__findnew(dsos__list, filename); 53 if (dso == NULL) 54 goto out_delete; 55 56 map__init(self, type, start, start + len, pgoff, dso); 57 58 if (anon) { 59 set_identity: 60 self->map_ip = self->unmap_ip = identity__map_ip; 61 } else if (strcmp(filename, "[vdso]") == 0) { 62 dso__set_loaded(dso, self->type); 63 goto set_identity; 64 } 65 } 66 return self; 67 out_delete: 68 free(self); 69 return NULL; 70 } 71 72 void map__delete(struct map *self) 73 { 74 free(self); 75 } 76 77 void map__fixup_start(struct map *self) 78 { 79 struct rb_root *symbols = &self->dso->symbols[self->type]; 80 struct rb_node *nd = rb_first(symbols); 81 if (nd != NULL) { 82 struct symbol *sym = rb_entry(nd, struct symbol, rb_node); 83 self->start = sym->start; 84 } 85 } 86 87 void map__fixup_end(struct map *self) 88 { 89 struct rb_root *symbols = &self->dso->symbols[self->type]; 90 struct rb_node *nd = rb_last(symbols); 91 if (nd != NULL) { 92 struct symbol *sym = rb_entry(nd, struct symbol, rb_node); 93 self->end = sym->end; 94 } 95 } 96 97 #define DSO__DELETED "(deleted)" 98 99 int map__load(struct map *self, symbol_filter_t filter) 100 { 101 const char *name = self->dso->long_name; 102 int nr; 103 104 if (dso__loaded(self->dso, self->type)) 105 return 0; 106 107 nr = dso__load(self->dso, self, filter); 108 if (nr < 0) { 109 if (self->dso->has_build_id) { 110 char sbuild_id[BUILD_ID_SIZE * 2 + 1]; 111 112 build_id__sprintf(self->dso->build_id, 113 sizeof(self->dso->build_id), 114 sbuild_id); 115 pr_warning("%s with build id %s not found", 116 name, sbuild_id); 117 } else 118 pr_warning("Failed to open %s", name); 119 120 pr_warning(", continuing without symbols\n"); 121 return -1; 122 } else if (nr == 0) { 123 const size_t len = strlen(name); 124 const size_t real_len = len - sizeof(DSO__DELETED); 125 126 if (len > sizeof(DSO__DELETED) && 127 strcmp(name + real_len + 1, DSO__DELETED) == 0) { 128 pr_warning("%.*s was updated, restart the long " 129 "running apps that use it!\n", 130 (int)real_len, name); 131 } else { 132 pr_warning("no symbols found in %s, maybe install " 133 "a debug package?\n", name); 134 } 135 136 return -1; 137 } 138 /* 139 * Only applies to the kernel, as its symtabs aren't relative like the 140 * module ones. 141 */ 142 if (self->dso->kernel) 143 map__reloc_vmlinux(self); 144 145 return 0; 146 } 147 148 struct symbol *map__find_symbol(struct map *self, u64 addr, 149 symbol_filter_t filter) 150 { 151 if (map__load(self, filter) < 0) 152 return NULL; 153 154 return dso__find_symbol(self->dso, self->type, addr); 155 } 156 157 struct symbol *map__find_symbol_by_name(struct map *self, const char *name, 158 symbol_filter_t filter) 159 { 160 if (map__load(self, filter) < 0) 161 return NULL; 162 163 if (!dso__sorted_by_name(self->dso, self->type)) 164 dso__sort_by_name(self->dso, self->type); 165 166 return dso__find_symbol_by_name(self->dso, self->type, name); 167 } 168 169 struct map *map__clone(struct map *self) 170 { 171 struct map *map = malloc(sizeof(*self)); 172 173 if (!map) 174 return NULL; 175 176 memcpy(map, self, sizeof(*self)); 177 178 return map; 179 } 180 181 int map__overlap(struct map *l, struct map *r) 182 { 183 if (l->start > r->start) { 184 struct map *t = l; 185 l = r; 186 r = t; 187 } 188 189 if (l->end > r->start) 190 return 1; 191 192 return 0; 193 } 194 195 size_t map__fprintf(struct map *self, FILE *fp) 196 { 197 return fprintf(fp, " %Lx-%Lx %Lx %s\n", 198 self->start, self->end, self->pgoff, self->dso->name); 199 } 200 201 /* 202 * objdump wants/reports absolute IPs for ET_EXEC, and RIPs for ET_DYN. 203 * map->dso->adjust_symbols==1 for ET_EXEC-like cases. 204 */ 205 u64 map__rip_2objdump(struct map *map, u64 rip) 206 { 207 u64 addr = map->dso->adjust_symbols ? 208 map->unmap_ip(map, rip) : /* RIP -> IP */ 209 rip; 210 return addr; 211 } 212 213 u64 map__objdump_2ip(struct map *map, u64 addr) 214 { 215 u64 ip = map->dso->adjust_symbols ? 216 addr : 217 map->unmap_ip(map, addr); /* RIP -> IP */ 218 return ip; 219 } 220 221 void map_groups__init(struct map_groups *self) 222 { 223 int i; 224 for (i = 0; i < MAP__NR_TYPES; ++i) { 225 self->maps[i] = RB_ROOT; 226 INIT_LIST_HEAD(&self->removed_maps[i]); 227 } 228 self->machine = NULL; 229 } 230 231 static void maps__delete(struct rb_root *self) 232 { 233 struct rb_node *next = rb_first(self); 234 235 while (next) { 236 struct map *pos = rb_entry(next, struct map, rb_node); 237 238 next = rb_next(&pos->rb_node); 239 rb_erase(&pos->rb_node, self); 240 map__delete(pos); 241 } 242 } 243 244 static void maps__delete_removed(struct list_head *self) 245 { 246 struct map *pos, *n; 247 248 list_for_each_entry_safe(pos, n, self, node) { 249 list_del(&pos->node); 250 map__delete(pos); 251 } 252 } 253 254 void map_groups__exit(struct map_groups *self) 255 { 256 int i; 257 258 for (i = 0; i < MAP__NR_TYPES; ++i) { 259 maps__delete(&self->maps[i]); 260 maps__delete_removed(&self->removed_maps[i]); 261 } 262 } 263 264 void map_groups__flush(struct map_groups *self) 265 { 266 int type; 267 268 for (type = 0; type < MAP__NR_TYPES; type++) { 269 struct rb_root *root = &self->maps[type]; 270 struct rb_node *next = rb_first(root); 271 272 while (next) { 273 struct map *pos = rb_entry(next, struct map, rb_node); 274 next = rb_next(&pos->rb_node); 275 rb_erase(&pos->rb_node, root); 276 /* 277 * We may have references to this map, for 278 * instance in some hist_entry instances, so 279 * just move them to a separate list. 280 */ 281 list_add_tail(&pos->node, &self->removed_maps[pos->type]); 282 } 283 } 284 } 285 286 struct symbol *map_groups__find_symbol(struct map_groups *self, 287 enum map_type type, u64 addr, 288 struct map **mapp, 289 symbol_filter_t filter) 290 { 291 struct map *map = map_groups__find(self, type, addr); 292 293 if (map != NULL) { 294 if (mapp != NULL) 295 *mapp = map; 296 return map__find_symbol(map, map->map_ip(map, addr), filter); 297 } 298 299 return NULL; 300 } 301 302 struct symbol *map_groups__find_symbol_by_name(struct map_groups *self, 303 enum map_type type, 304 const char *name, 305 struct map **mapp, 306 symbol_filter_t filter) 307 { 308 struct rb_node *nd; 309 310 for (nd = rb_first(&self->maps[type]); nd; nd = rb_next(nd)) { 311 struct map *pos = rb_entry(nd, struct map, rb_node); 312 struct symbol *sym = map__find_symbol_by_name(pos, name, filter); 313 314 if (sym == NULL) 315 continue; 316 if (mapp != NULL) 317 *mapp = pos; 318 return sym; 319 } 320 321 return NULL; 322 } 323 324 size_t __map_groups__fprintf_maps(struct map_groups *self, 325 enum map_type type, int verbose, FILE *fp) 326 { 327 size_t printed = fprintf(fp, "%s:\n", map_type__name[type]); 328 struct rb_node *nd; 329 330 for (nd = rb_first(&self->maps[type]); nd; nd = rb_next(nd)) { 331 struct map *pos = rb_entry(nd, struct map, rb_node); 332 printed += fprintf(fp, "Map:"); 333 printed += map__fprintf(pos, fp); 334 if (verbose > 2) { 335 printed += dso__fprintf(pos->dso, type, fp); 336 printed += fprintf(fp, "--\n"); 337 } 338 } 339 340 return printed; 341 } 342 343 size_t map_groups__fprintf_maps(struct map_groups *self, int verbose, FILE *fp) 344 { 345 size_t printed = 0, i; 346 for (i = 0; i < MAP__NR_TYPES; ++i) 347 printed += __map_groups__fprintf_maps(self, i, verbose, fp); 348 return printed; 349 } 350 351 static size_t __map_groups__fprintf_removed_maps(struct map_groups *self, 352 enum map_type type, 353 int verbose, FILE *fp) 354 { 355 struct map *pos; 356 size_t printed = 0; 357 358 list_for_each_entry(pos, &self->removed_maps[type], node) { 359 printed += fprintf(fp, "Map:"); 360 printed += map__fprintf(pos, fp); 361 if (verbose > 1) { 362 printed += dso__fprintf(pos->dso, type, fp); 363 printed += fprintf(fp, "--\n"); 364 } 365 } 366 return printed; 367 } 368 369 static size_t map_groups__fprintf_removed_maps(struct map_groups *self, 370 int verbose, FILE *fp) 371 { 372 size_t printed = 0, i; 373 for (i = 0; i < MAP__NR_TYPES; ++i) 374 printed += __map_groups__fprintf_removed_maps(self, i, verbose, fp); 375 return printed; 376 } 377 378 size_t map_groups__fprintf(struct map_groups *self, int verbose, FILE *fp) 379 { 380 size_t printed = map_groups__fprintf_maps(self, verbose, fp); 381 printed += fprintf(fp, "Removed maps:\n"); 382 return printed + map_groups__fprintf_removed_maps(self, verbose, fp); 383 } 384 385 int map_groups__fixup_overlappings(struct map_groups *self, struct map *map, 386 int verbose, FILE *fp) 387 { 388 struct rb_root *root = &self->maps[map->type]; 389 struct rb_node *next = rb_first(root); 390 391 while (next) { 392 struct map *pos = rb_entry(next, struct map, rb_node); 393 next = rb_next(&pos->rb_node); 394 395 if (!map__overlap(pos, map)) 396 continue; 397 398 if (verbose >= 2) { 399 fputs("overlapping maps:\n", fp); 400 map__fprintf(map, fp); 401 map__fprintf(pos, fp); 402 } 403 404 rb_erase(&pos->rb_node, root); 405 /* 406 * We may have references to this map, for instance in some 407 * hist_entry instances, so just move them to a separate 408 * list. 409 */ 410 list_add_tail(&pos->node, &self->removed_maps[map->type]); 411 /* 412 * Now check if we need to create new maps for areas not 413 * overlapped by the new map: 414 */ 415 if (map->start > pos->start) { 416 struct map *before = map__clone(pos); 417 418 if (before == NULL) 419 return -ENOMEM; 420 421 before->end = map->start - 1; 422 map_groups__insert(self, before); 423 if (verbose >= 2) 424 map__fprintf(before, fp); 425 } 426 427 if (map->end < pos->end) { 428 struct map *after = map__clone(pos); 429 430 if (after == NULL) 431 return -ENOMEM; 432 433 after->start = map->end + 1; 434 map_groups__insert(self, after); 435 if (verbose >= 2) 436 map__fprintf(after, fp); 437 } 438 } 439 440 return 0; 441 } 442 443 /* 444 * XXX This should not really _copy_ te maps, but refcount them. 445 */ 446 int map_groups__clone(struct map_groups *self, 447 struct map_groups *parent, enum map_type type) 448 { 449 struct rb_node *nd; 450 for (nd = rb_first(&parent->maps[type]); nd; nd = rb_next(nd)) { 451 struct map *map = rb_entry(nd, struct map, rb_node); 452 struct map *new = map__clone(map); 453 if (new == NULL) 454 return -ENOMEM; 455 map_groups__insert(self, new); 456 } 457 return 0; 458 } 459 460 static u64 map__reloc_map_ip(struct map *map, u64 ip) 461 { 462 return ip + (s64)map->pgoff; 463 } 464 465 static u64 map__reloc_unmap_ip(struct map *map, u64 ip) 466 { 467 return ip - (s64)map->pgoff; 468 } 469 470 void map__reloc_vmlinux(struct map *self) 471 { 472 struct kmap *kmap = map__kmap(self); 473 s64 reloc; 474 475 if (!kmap->ref_reloc_sym || !kmap->ref_reloc_sym->unrelocated_addr) 476 return; 477 478 reloc = (kmap->ref_reloc_sym->unrelocated_addr - 479 kmap->ref_reloc_sym->addr); 480 481 if (!reloc) 482 return; 483 484 self->map_ip = map__reloc_map_ip; 485 self->unmap_ip = map__reloc_unmap_ip; 486 self->pgoff = reloc; 487 } 488 489 void maps__insert(struct rb_root *maps, struct map *map) 490 { 491 struct rb_node **p = &maps->rb_node; 492 struct rb_node *parent = NULL; 493 const u64 ip = map->start; 494 struct map *m; 495 496 while (*p != NULL) { 497 parent = *p; 498 m = rb_entry(parent, struct map, rb_node); 499 if (ip < m->start) 500 p = &(*p)->rb_left; 501 else 502 p = &(*p)->rb_right; 503 } 504 505 rb_link_node(&map->rb_node, parent, p); 506 rb_insert_color(&map->rb_node, maps); 507 } 508 509 struct map *maps__find(struct rb_root *maps, u64 ip) 510 { 511 struct rb_node **p = &maps->rb_node; 512 struct rb_node *parent = NULL; 513 struct map *m; 514 515 while (*p != NULL) { 516 parent = *p; 517 m = rb_entry(parent, struct map, rb_node); 518 if (ip < m->start) 519 p = &(*p)->rb_left; 520 else if (ip > m->end) 521 p = &(*p)->rb_right; 522 else 523 return m; 524 } 525 526 return NULL; 527 } 528 529 int machine__init(struct machine *self, const char *root_dir, pid_t pid) 530 { 531 map_groups__init(&self->kmaps); 532 RB_CLEAR_NODE(&self->rb_node); 533 INIT_LIST_HEAD(&self->user_dsos); 534 INIT_LIST_HEAD(&self->kernel_dsos); 535 536 self->kmaps.machine = self; 537 self->pid = pid; 538 self->root_dir = strdup(root_dir); 539 return self->root_dir == NULL ? -ENOMEM : 0; 540 } 541 542 struct machine *machines__add(struct rb_root *self, pid_t pid, 543 const char *root_dir) 544 { 545 struct rb_node **p = &self->rb_node; 546 struct rb_node *parent = NULL; 547 struct machine *pos, *machine = malloc(sizeof(*machine)); 548 549 if (!machine) 550 return NULL; 551 552 if (machine__init(machine, root_dir, pid) != 0) { 553 free(machine); 554 return NULL; 555 } 556 557 while (*p != NULL) { 558 parent = *p; 559 pos = rb_entry(parent, struct machine, rb_node); 560 if (pid < pos->pid) 561 p = &(*p)->rb_left; 562 else 563 p = &(*p)->rb_right; 564 } 565 566 rb_link_node(&machine->rb_node, parent, p); 567 rb_insert_color(&machine->rb_node, self); 568 569 return machine; 570 } 571 572 struct machine *machines__find(struct rb_root *self, pid_t pid) 573 { 574 struct rb_node **p = &self->rb_node; 575 struct rb_node *parent = NULL; 576 struct machine *machine; 577 struct machine *default_machine = NULL; 578 579 while (*p != NULL) { 580 parent = *p; 581 machine = rb_entry(parent, struct machine, rb_node); 582 if (pid < machine->pid) 583 p = &(*p)->rb_left; 584 else if (pid > machine->pid) 585 p = &(*p)->rb_right; 586 else 587 return machine; 588 if (!machine->pid) 589 default_machine = machine; 590 } 591 592 return default_machine; 593 } 594 595 struct machine *machines__findnew(struct rb_root *self, pid_t pid) 596 { 597 char path[PATH_MAX]; 598 const char *root_dir; 599 struct machine *machine = machines__find(self, pid); 600 601 if (!machine || machine->pid != pid) { 602 if (pid == HOST_KERNEL_ID || pid == DEFAULT_GUEST_KERNEL_ID) 603 root_dir = ""; 604 else { 605 if (!symbol_conf.guestmount) 606 goto out; 607 sprintf(path, "%s/%d", symbol_conf.guestmount, pid); 608 if (access(path, R_OK)) { 609 pr_err("Can't access file %s\n", path); 610 goto out; 611 } 612 root_dir = path; 613 } 614 machine = machines__add(self, pid, root_dir); 615 } 616 617 out: 618 return machine; 619 } 620 621 void machines__process(struct rb_root *self, machine__process_t process, void *data) 622 { 623 struct rb_node *nd; 624 625 for (nd = rb_first(self); nd; nd = rb_next(nd)) { 626 struct machine *pos = rb_entry(nd, struct machine, rb_node); 627 process(pos, data); 628 } 629 } 630 631 char *machine__mmap_name(struct machine *self, char *bf, size_t size) 632 { 633 if (machine__is_host(self)) 634 snprintf(bf, size, "[%s]", "kernel.kallsyms"); 635 else if (machine__is_default_guest(self)) 636 snprintf(bf, size, "[%s]", "guest.kernel.kallsyms"); 637 else 638 snprintf(bf, size, "[%s.%d]", "guest.kernel.kallsyms", self->pid); 639 640 return bf; 641 } 642