1 #include "callchain.h" 2 #include "debug.h" 3 #include "event.h" 4 #include "evsel.h" 5 #include "hist.h" 6 #include "machine.h" 7 #include "map.h" 8 #include "sort.h" 9 #include "strlist.h" 10 #include "thread.h" 11 #include <stdbool.h> 12 #include <symbol/kallsyms.h> 13 #include "unwind.h" 14 15 int machine__init(struct machine *machine, const char *root_dir, pid_t pid) 16 { 17 map_groups__init(&machine->kmaps); 18 RB_CLEAR_NODE(&machine->rb_node); 19 INIT_LIST_HEAD(&machine->user_dsos); 20 INIT_LIST_HEAD(&machine->kernel_dsos); 21 22 machine->threads = RB_ROOT; 23 INIT_LIST_HEAD(&machine->dead_threads); 24 machine->last_match = NULL; 25 26 machine->kmaps.machine = machine; 27 machine->pid = pid; 28 29 machine->symbol_filter = NULL; 30 machine->id_hdr_size = 0; 31 32 machine->root_dir = strdup(root_dir); 33 if (machine->root_dir == NULL) 34 return -ENOMEM; 35 36 if (pid != HOST_KERNEL_ID) { 37 struct thread *thread = machine__findnew_thread(machine, 0, 38 pid); 39 char comm[64]; 40 41 if (thread == NULL) 42 return -ENOMEM; 43 44 snprintf(comm, sizeof(comm), "[guest/%d]", pid); 45 thread__set_comm(thread, comm, 0); 46 } 47 48 return 0; 49 } 50 51 struct machine *machine__new_host(void) 52 { 53 struct machine *machine = malloc(sizeof(*machine)); 54 55 if (machine != NULL) { 56 machine__init(machine, "", HOST_KERNEL_ID); 57 58 if (machine__create_kernel_maps(machine) < 0) 59 goto out_delete; 60 } 61 62 return machine; 63 out_delete: 64 free(machine); 65 return NULL; 66 } 67 68 static void dsos__delete(struct list_head *dsos) 69 { 70 struct dso *pos, *n; 71 72 list_for_each_entry_safe(pos, n, dsos, node) { 73 list_del(&pos->node); 74 dso__delete(pos); 75 } 76 } 77 78 void machine__delete_dead_threads(struct machine *machine) 79 { 80 struct thread *n, *t; 81 82 list_for_each_entry_safe(t, n, &machine->dead_threads, node) { 83 list_del(&t->node); 84 thread__delete(t); 85 } 86 } 87 88 void machine__delete_threads(struct machine *machine) 89 { 90 struct rb_node *nd = rb_first(&machine->threads); 91 92 while (nd) { 93 struct thread *t = rb_entry(nd, struct thread, rb_node); 94 95 rb_erase(&t->rb_node, &machine->threads); 96 nd = rb_next(nd); 97 thread__delete(t); 98 } 99 } 100 101 void machine__exit(struct machine *machine) 102 { 103 map_groups__exit(&machine->kmaps); 104 dsos__delete(&machine->user_dsos); 105 dsos__delete(&machine->kernel_dsos); 106 zfree(&machine->root_dir); 107 } 108 109 void machine__delete(struct machine *machine) 110 { 111 machine__exit(machine); 112 free(machine); 113 } 114 115 void machines__init(struct machines *machines) 116 { 117 machine__init(&machines->host, "", HOST_KERNEL_ID); 118 machines->guests = RB_ROOT; 119 machines->symbol_filter = NULL; 120 } 121 122 void machines__exit(struct machines *machines) 123 { 124 machine__exit(&machines->host); 125 /* XXX exit guest */ 126 } 127 128 struct machine *machines__add(struct machines *machines, pid_t pid, 129 const char *root_dir) 130 { 131 struct rb_node **p = &machines->guests.rb_node; 132 struct rb_node *parent = NULL; 133 struct machine *pos, *machine = malloc(sizeof(*machine)); 134 135 if (machine == NULL) 136 return NULL; 137 138 if (machine__init(machine, root_dir, pid) != 0) { 139 free(machine); 140 return NULL; 141 } 142 143 machine->symbol_filter = machines->symbol_filter; 144 145 while (*p != NULL) { 146 parent = *p; 147 pos = rb_entry(parent, struct machine, rb_node); 148 if (pid < pos->pid) 149 p = &(*p)->rb_left; 150 else 151 p = &(*p)->rb_right; 152 } 153 154 rb_link_node(&machine->rb_node, parent, p); 155 rb_insert_color(&machine->rb_node, &machines->guests); 156 157 return machine; 158 } 159 160 void machines__set_symbol_filter(struct machines *machines, 161 symbol_filter_t symbol_filter) 162 { 163 struct rb_node *nd; 164 165 machines->symbol_filter = symbol_filter; 166 machines->host.symbol_filter = symbol_filter; 167 168 for (nd = rb_first(&machines->guests); nd; nd = rb_next(nd)) { 169 struct machine *machine = rb_entry(nd, struct machine, rb_node); 170 171 machine->symbol_filter = symbol_filter; 172 } 173 } 174 175 struct machine *machines__find(struct machines *machines, pid_t pid) 176 { 177 struct rb_node **p = &machines->guests.rb_node; 178 struct rb_node *parent = NULL; 179 struct machine *machine; 180 struct machine *default_machine = NULL; 181 182 if (pid == HOST_KERNEL_ID) 183 return &machines->host; 184 185 while (*p != NULL) { 186 parent = *p; 187 machine = rb_entry(parent, struct machine, rb_node); 188 if (pid < machine->pid) 189 p = &(*p)->rb_left; 190 else if (pid > machine->pid) 191 p = &(*p)->rb_right; 192 else 193 return machine; 194 if (!machine->pid) 195 default_machine = machine; 196 } 197 198 return default_machine; 199 } 200 201 struct machine *machines__findnew(struct machines *machines, pid_t pid) 202 { 203 char path[PATH_MAX]; 204 const char *root_dir = ""; 205 struct machine *machine = machines__find(machines, pid); 206 207 if (machine && (machine->pid == pid)) 208 goto out; 209 210 if ((pid != HOST_KERNEL_ID) && 211 (pid != DEFAULT_GUEST_KERNEL_ID) && 212 (symbol_conf.guestmount)) { 213 sprintf(path, "%s/%d", symbol_conf.guestmount, pid); 214 if (access(path, R_OK)) { 215 static struct strlist *seen; 216 217 if (!seen) 218 seen = strlist__new(true, NULL); 219 220 if (!strlist__has_entry(seen, path)) { 221 pr_err("Can't access file %s\n", path); 222 strlist__add(seen, path); 223 } 224 machine = NULL; 225 goto out; 226 } 227 root_dir = path; 228 } 229 230 machine = machines__add(machines, pid, root_dir); 231 out: 232 return machine; 233 } 234 235 void machines__process_guests(struct machines *machines, 236 machine__process_t process, void *data) 237 { 238 struct rb_node *nd; 239 240 for (nd = rb_first(&machines->guests); nd; nd = rb_next(nd)) { 241 struct machine *pos = rb_entry(nd, struct machine, rb_node); 242 process(pos, data); 243 } 244 } 245 246 char *machine__mmap_name(struct machine *machine, char *bf, size_t size) 247 { 248 if (machine__is_host(machine)) 249 snprintf(bf, size, "[%s]", "kernel.kallsyms"); 250 else if (machine__is_default_guest(machine)) 251 snprintf(bf, size, "[%s]", "guest.kernel.kallsyms"); 252 else { 253 snprintf(bf, size, "[%s.%d]", "guest.kernel.kallsyms", 254 machine->pid); 255 } 256 257 return bf; 258 } 259 260 void machines__set_id_hdr_size(struct machines *machines, u16 id_hdr_size) 261 { 262 struct rb_node *node; 263 struct machine *machine; 264 265 machines->host.id_hdr_size = id_hdr_size; 266 267 for (node = rb_first(&machines->guests); node; node = rb_next(node)) { 268 machine = rb_entry(node, struct machine, rb_node); 269 machine->id_hdr_size = id_hdr_size; 270 } 271 272 return; 273 } 274 275 static struct thread *__machine__findnew_thread(struct machine *machine, 276 pid_t pid, pid_t tid, 277 bool create) 278 { 279 struct rb_node **p = &machine->threads.rb_node; 280 struct rb_node *parent = NULL; 281 struct thread *th; 282 283 /* 284 * Front-end cache - TID lookups come in blocks, 285 * so most of the time we dont have to look up 286 * the full rbtree: 287 */ 288 if (machine->last_match && machine->last_match->tid == tid) { 289 if (pid && pid != machine->last_match->pid_) 290 machine->last_match->pid_ = pid; 291 return machine->last_match; 292 } 293 294 while (*p != NULL) { 295 parent = *p; 296 th = rb_entry(parent, struct thread, rb_node); 297 298 if (th->tid == tid) { 299 machine->last_match = th; 300 if (pid && pid != th->pid_) 301 th->pid_ = pid; 302 return th; 303 } 304 305 if (tid < th->tid) 306 p = &(*p)->rb_left; 307 else 308 p = &(*p)->rb_right; 309 } 310 311 if (!create) 312 return NULL; 313 314 th = thread__new(pid, tid); 315 if (th != NULL) { 316 rb_link_node(&th->rb_node, parent, p); 317 rb_insert_color(&th->rb_node, &machine->threads); 318 machine->last_match = th; 319 320 /* 321 * We have to initialize map_groups separately 322 * after rb tree is updated. 323 * 324 * The reason is that we call machine__findnew_thread 325 * within thread__init_map_groups to find the thread 326 * leader and that would screwed the rb tree. 327 */ 328 if (thread__init_map_groups(th, machine)) 329 return NULL; 330 } 331 332 return th; 333 } 334 335 struct thread *machine__findnew_thread(struct machine *machine, pid_t pid, 336 pid_t tid) 337 { 338 return __machine__findnew_thread(machine, pid, tid, true); 339 } 340 341 struct thread *machine__find_thread(struct machine *machine, pid_t pid, 342 pid_t tid) 343 { 344 return __machine__findnew_thread(machine, pid, tid, false); 345 } 346 347 int machine__process_comm_event(struct machine *machine, union perf_event *event, 348 struct perf_sample *sample) 349 { 350 struct thread *thread = machine__findnew_thread(machine, 351 event->comm.pid, 352 event->comm.tid); 353 354 if (dump_trace) 355 perf_event__fprintf_comm(event, stdout); 356 357 if (thread == NULL || thread__set_comm(thread, event->comm.comm, sample->time)) { 358 dump_printf("problem processing PERF_RECORD_COMM, skipping event.\n"); 359 return -1; 360 } 361 362 return 0; 363 } 364 365 int machine__process_lost_event(struct machine *machine __maybe_unused, 366 union perf_event *event, struct perf_sample *sample __maybe_unused) 367 { 368 dump_printf(": id:%" PRIu64 ": lost:%" PRIu64 "\n", 369 event->lost.id, event->lost.lost); 370 return 0; 371 } 372 373 struct map *machine__new_module(struct machine *machine, u64 start, 374 const char *filename) 375 { 376 struct map *map; 377 struct dso *dso = __dsos__findnew(&machine->kernel_dsos, filename); 378 379 if (dso == NULL) 380 return NULL; 381 382 map = map__new2(start, dso, MAP__FUNCTION); 383 if (map == NULL) 384 return NULL; 385 386 if (machine__is_host(machine)) 387 dso->symtab_type = DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE; 388 else 389 dso->symtab_type = DSO_BINARY_TYPE__GUEST_KMODULE; 390 map_groups__insert(&machine->kmaps, map); 391 return map; 392 } 393 394 size_t machines__fprintf_dsos(struct machines *machines, FILE *fp) 395 { 396 struct rb_node *nd; 397 size_t ret = __dsos__fprintf(&machines->host.kernel_dsos, fp) + 398 __dsos__fprintf(&machines->host.user_dsos, fp); 399 400 for (nd = rb_first(&machines->guests); nd; nd = rb_next(nd)) { 401 struct machine *pos = rb_entry(nd, struct machine, rb_node); 402 ret += __dsos__fprintf(&pos->kernel_dsos, fp); 403 ret += __dsos__fprintf(&pos->user_dsos, fp); 404 } 405 406 return ret; 407 } 408 409 size_t machine__fprintf_dsos_buildid(struct machine *machine, FILE *fp, 410 bool (skip)(struct dso *dso, int parm), int parm) 411 { 412 return __dsos__fprintf_buildid(&machine->kernel_dsos, fp, skip, parm) + 413 __dsos__fprintf_buildid(&machine->user_dsos, fp, skip, parm); 414 } 415 416 size_t machines__fprintf_dsos_buildid(struct machines *machines, FILE *fp, 417 bool (skip)(struct dso *dso, int parm), int parm) 418 { 419 struct rb_node *nd; 420 size_t ret = machine__fprintf_dsos_buildid(&machines->host, fp, skip, parm); 421 422 for (nd = rb_first(&machines->guests); nd; nd = rb_next(nd)) { 423 struct machine *pos = rb_entry(nd, struct machine, rb_node); 424 ret += machine__fprintf_dsos_buildid(pos, fp, skip, parm); 425 } 426 return ret; 427 } 428 429 size_t machine__fprintf_vmlinux_path(struct machine *machine, FILE *fp) 430 { 431 int i; 432 size_t printed = 0; 433 struct dso *kdso = machine->vmlinux_maps[MAP__FUNCTION]->dso; 434 435 if (kdso->has_build_id) { 436 char filename[PATH_MAX]; 437 if (dso__build_id_filename(kdso, filename, sizeof(filename))) 438 printed += fprintf(fp, "[0] %s\n", filename); 439 } 440 441 for (i = 0; i < vmlinux_path__nr_entries; ++i) 442 printed += fprintf(fp, "[%d] %s\n", 443 i + kdso->has_build_id, vmlinux_path[i]); 444 445 return printed; 446 } 447 448 size_t machine__fprintf(struct machine *machine, FILE *fp) 449 { 450 size_t ret = 0; 451 struct rb_node *nd; 452 453 for (nd = rb_first(&machine->threads); nd; nd = rb_next(nd)) { 454 struct thread *pos = rb_entry(nd, struct thread, rb_node); 455 456 ret += thread__fprintf(pos, fp); 457 } 458 459 return ret; 460 } 461 462 static struct dso *machine__get_kernel(struct machine *machine) 463 { 464 const char *vmlinux_name = NULL; 465 struct dso *kernel; 466 467 if (machine__is_host(machine)) { 468 vmlinux_name = symbol_conf.vmlinux_name; 469 if (!vmlinux_name) 470 vmlinux_name = "[kernel.kallsyms]"; 471 472 kernel = dso__kernel_findnew(machine, vmlinux_name, 473 "[kernel]", 474 DSO_TYPE_KERNEL); 475 } else { 476 char bf[PATH_MAX]; 477 478 if (machine__is_default_guest(machine)) 479 vmlinux_name = symbol_conf.default_guest_vmlinux_name; 480 if (!vmlinux_name) 481 vmlinux_name = machine__mmap_name(machine, bf, 482 sizeof(bf)); 483 484 kernel = dso__kernel_findnew(machine, vmlinux_name, 485 "[guest.kernel]", 486 DSO_TYPE_GUEST_KERNEL); 487 } 488 489 if (kernel != NULL && (!kernel->has_build_id)) 490 dso__read_running_kernel_build_id(kernel, machine); 491 492 return kernel; 493 } 494 495 struct process_args { 496 u64 start; 497 }; 498 499 static void machine__get_kallsyms_filename(struct machine *machine, char *buf, 500 size_t bufsz) 501 { 502 if (machine__is_default_guest(machine)) 503 scnprintf(buf, bufsz, "%s", symbol_conf.default_guest_kallsyms); 504 else 505 scnprintf(buf, bufsz, "%s/proc/kallsyms", machine->root_dir); 506 } 507 508 const char *ref_reloc_sym_names[] = {"_text", "_stext", NULL}; 509 510 /* Figure out the start address of kernel map from /proc/kallsyms. 511 * Returns the name of the start symbol in *symbol_name. Pass in NULL as 512 * symbol_name if it's not that important. 513 */ 514 static u64 machine__get_kernel_start_addr(struct machine *machine, 515 const char **symbol_name) 516 { 517 char filename[PATH_MAX]; 518 int i; 519 const char *name; 520 u64 addr = 0; 521 522 machine__get_kallsyms_filename(machine, filename, PATH_MAX); 523 524 if (symbol__restricted_filename(filename, "/proc/kallsyms")) 525 return 0; 526 527 for (i = 0; (name = ref_reloc_sym_names[i]) != NULL; i++) { 528 addr = kallsyms__get_function_start(filename, name); 529 if (addr) 530 break; 531 } 532 533 if (symbol_name) 534 *symbol_name = name; 535 536 return addr; 537 } 538 539 int __machine__create_kernel_maps(struct machine *machine, struct dso *kernel) 540 { 541 enum map_type type; 542 u64 start = machine__get_kernel_start_addr(machine, NULL); 543 544 for (type = 0; type < MAP__NR_TYPES; ++type) { 545 struct kmap *kmap; 546 547 machine->vmlinux_maps[type] = map__new2(start, kernel, type); 548 if (machine->vmlinux_maps[type] == NULL) 549 return -1; 550 551 machine->vmlinux_maps[type]->map_ip = 552 machine->vmlinux_maps[type]->unmap_ip = 553 identity__map_ip; 554 kmap = map__kmap(machine->vmlinux_maps[type]); 555 kmap->kmaps = &machine->kmaps; 556 map_groups__insert(&machine->kmaps, 557 machine->vmlinux_maps[type]); 558 } 559 560 return 0; 561 } 562 563 void machine__destroy_kernel_maps(struct machine *machine) 564 { 565 enum map_type type; 566 567 for (type = 0; type < MAP__NR_TYPES; ++type) { 568 struct kmap *kmap; 569 570 if (machine->vmlinux_maps[type] == NULL) 571 continue; 572 573 kmap = map__kmap(machine->vmlinux_maps[type]); 574 map_groups__remove(&machine->kmaps, 575 machine->vmlinux_maps[type]); 576 if (kmap->ref_reloc_sym) { 577 /* 578 * ref_reloc_sym is shared among all maps, so free just 579 * on one of them. 580 */ 581 if (type == MAP__FUNCTION) { 582 zfree((char **)&kmap->ref_reloc_sym->name); 583 zfree(&kmap->ref_reloc_sym); 584 } else 585 kmap->ref_reloc_sym = NULL; 586 } 587 588 map__delete(machine->vmlinux_maps[type]); 589 machine->vmlinux_maps[type] = NULL; 590 } 591 } 592 593 int machines__create_guest_kernel_maps(struct machines *machines) 594 { 595 int ret = 0; 596 struct dirent **namelist = NULL; 597 int i, items = 0; 598 char path[PATH_MAX]; 599 pid_t pid; 600 char *endp; 601 602 if (symbol_conf.default_guest_vmlinux_name || 603 symbol_conf.default_guest_modules || 604 symbol_conf.default_guest_kallsyms) { 605 machines__create_kernel_maps(machines, DEFAULT_GUEST_KERNEL_ID); 606 } 607 608 if (symbol_conf.guestmount) { 609 items = scandir(symbol_conf.guestmount, &namelist, NULL, NULL); 610 if (items <= 0) 611 return -ENOENT; 612 for (i = 0; i < items; i++) { 613 if (!isdigit(namelist[i]->d_name[0])) { 614 /* Filter out . and .. */ 615 continue; 616 } 617 pid = (pid_t)strtol(namelist[i]->d_name, &endp, 10); 618 if ((*endp != '\0') || 619 (endp == namelist[i]->d_name) || 620 (errno == ERANGE)) { 621 pr_debug("invalid directory (%s). Skipping.\n", 622 namelist[i]->d_name); 623 continue; 624 } 625 sprintf(path, "%s/%s/proc/kallsyms", 626 symbol_conf.guestmount, 627 namelist[i]->d_name); 628 ret = access(path, R_OK); 629 if (ret) { 630 pr_debug("Can't access file %s\n", path); 631 goto failure; 632 } 633 machines__create_kernel_maps(machines, pid); 634 } 635 failure: 636 free(namelist); 637 } 638 639 return ret; 640 } 641 642 void machines__destroy_kernel_maps(struct machines *machines) 643 { 644 struct rb_node *next = rb_first(&machines->guests); 645 646 machine__destroy_kernel_maps(&machines->host); 647 648 while (next) { 649 struct machine *pos = rb_entry(next, struct machine, rb_node); 650 651 next = rb_next(&pos->rb_node); 652 rb_erase(&pos->rb_node, &machines->guests); 653 machine__delete(pos); 654 } 655 } 656 657 int machines__create_kernel_maps(struct machines *machines, pid_t pid) 658 { 659 struct machine *machine = machines__findnew(machines, pid); 660 661 if (machine == NULL) 662 return -1; 663 664 return machine__create_kernel_maps(machine); 665 } 666 667 int machine__load_kallsyms(struct machine *machine, const char *filename, 668 enum map_type type, symbol_filter_t filter) 669 { 670 struct map *map = machine->vmlinux_maps[type]; 671 int ret = dso__load_kallsyms(map->dso, filename, map, filter); 672 673 if (ret > 0) { 674 dso__set_loaded(map->dso, type); 675 /* 676 * Since /proc/kallsyms will have multiple sessions for the 677 * kernel, with modules between them, fixup the end of all 678 * sections. 679 */ 680 __map_groups__fixup_end(&machine->kmaps, type); 681 } 682 683 return ret; 684 } 685 686 int machine__load_vmlinux_path(struct machine *machine, enum map_type type, 687 symbol_filter_t filter) 688 { 689 struct map *map = machine->vmlinux_maps[type]; 690 int ret = dso__load_vmlinux_path(map->dso, map, filter); 691 692 if (ret > 0) 693 dso__set_loaded(map->dso, type); 694 695 return ret; 696 } 697 698 static void map_groups__fixup_end(struct map_groups *mg) 699 { 700 int i; 701 for (i = 0; i < MAP__NR_TYPES; ++i) 702 __map_groups__fixup_end(mg, i); 703 } 704 705 static char *get_kernel_version(const char *root_dir) 706 { 707 char version[PATH_MAX]; 708 FILE *file; 709 char *name, *tmp; 710 const char *prefix = "Linux version "; 711 712 sprintf(version, "%s/proc/version", root_dir); 713 file = fopen(version, "r"); 714 if (!file) 715 return NULL; 716 717 version[0] = '\0'; 718 tmp = fgets(version, sizeof(version), file); 719 fclose(file); 720 721 name = strstr(version, prefix); 722 if (!name) 723 return NULL; 724 name += strlen(prefix); 725 tmp = strchr(name, ' '); 726 if (tmp) 727 *tmp = '\0'; 728 729 return strdup(name); 730 } 731 732 static int map_groups__set_modules_path_dir(struct map_groups *mg, 733 const char *dir_name, int depth) 734 { 735 struct dirent *dent; 736 DIR *dir = opendir(dir_name); 737 int ret = 0; 738 739 if (!dir) { 740 pr_debug("%s: cannot open %s dir\n", __func__, dir_name); 741 return -1; 742 } 743 744 while ((dent = readdir(dir)) != NULL) { 745 char path[PATH_MAX]; 746 struct stat st; 747 748 /*sshfs might return bad dent->d_type, so we have to stat*/ 749 snprintf(path, sizeof(path), "%s/%s", dir_name, dent->d_name); 750 if (stat(path, &st)) 751 continue; 752 753 if (S_ISDIR(st.st_mode)) { 754 if (!strcmp(dent->d_name, ".") || 755 !strcmp(dent->d_name, "..")) 756 continue; 757 758 /* Do not follow top-level source and build symlinks */ 759 if (depth == 0) { 760 if (!strcmp(dent->d_name, "source") || 761 !strcmp(dent->d_name, "build")) 762 continue; 763 } 764 765 ret = map_groups__set_modules_path_dir(mg, path, 766 depth + 1); 767 if (ret < 0) 768 goto out; 769 } else { 770 char *dot = strrchr(dent->d_name, '.'), 771 dso_name[PATH_MAX]; 772 struct map *map; 773 char *long_name; 774 775 if (dot == NULL || strcmp(dot, ".ko")) 776 continue; 777 snprintf(dso_name, sizeof(dso_name), "[%.*s]", 778 (int)(dot - dent->d_name), dent->d_name); 779 780 strxfrchar(dso_name, '-', '_'); 781 map = map_groups__find_by_name(mg, MAP__FUNCTION, 782 dso_name); 783 if (map == NULL) 784 continue; 785 786 long_name = strdup(path); 787 if (long_name == NULL) { 788 ret = -1; 789 goto out; 790 } 791 dso__set_long_name(map->dso, long_name, true); 792 dso__kernel_module_get_build_id(map->dso, ""); 793 } 794 } 795 796 out: 797 closedir(dir); 798 return ret; 799 } 800 801 static int machine__set_modules_path(struct machine *machine) 802 { 803 char *version; 804 char modules_path[PATH_MAX]; 805 806 version = get_kernel_version(machine->root_dir); 807 if (!version) 808 return -1; 809 810 snprintf(modules_path, sizeof(modules_path), "%s/lib/modules/%s", 811 machine->root_dir, version); 812 free(version); 813 814 return map_groups__set_modules_path_dir(&machine->kmaps, modules_path, 0); 815 } 816 817 static int machine__create_module(void *arg, const char *name, u64 start) 818 { 819 struct machine *machine = arg; 820 struct map *map; 821 822 map = machine__new_module(machine, start, name); 823 if (map == NULL) 824 return -1; 825 826 dso__kernel_module_get_build_id(map->dso, machine->root_dir); 827 828 return 0; 829 } 830 831 static int machine__create_modules(struct machine *machine) 832 { 833 const char *modules; 834 char path[PATH_MAX]; 835 836 if (machine__is_default_guest(machine)) { 837 modules = symbol_conf.default_guest_modules; 838 } else { 839 snprintf(path, PATH_MAX, "%s/proc/modules", machine->root_dir); 840 modules = path; 841 } 842 843 if (symbol__restricted_filename(modules, "/proc/modules")) 844 return -1; 845 846 if (modules__parse(modules, machine, machine__create_module)) 847 return -1; 848 849 if (!machine__set_modules_path(machine)) 850 return 0; 851 852 pr_debug("Problems setting modules path maps, continuing anyway...\n"); 853 854 return 0; 855 } 856 857 int machine__create_kernel_maps(struct machine *machine) 858 { 859 struct dso *kernel = machine__get_kernel(machine); 860 const char *name; 861 u64 addr = machine__get_kernel_start_addr(machine, &name); 862 if (!addr) 863 return -1; 864 865 if (kernel == NULL || 866 __machine__create_kernel_maps(machine, kernel) < 0) 867 return -1; 868 869 if (symbol_conf.use_modules && machine__create_modules(machine) < 0) { 870 if (machine__is_host(machine)) 871 pr_debug("Problems creating module maps, " 872 "continuing anyway...\n"); 873 else 874 pr_debug("Problems creating module maps for guest %d, " 875 "continuing anyway...\n", machine->pid); 876 } 877 878 /* 879 * Now that we have all the maps created, just set the ->end of them: 880 */ 881 map_groups__fixup_end(&machine->kmaps); 882 883 if (maps__set_kallsyms_ref_reloc_sym(machine->vmlinux_maps, name, 884 addr)) { 885 machine__destroy_kernel_maps(machine); 886 return -1; 887 } 888 889 return 0; 890 } 891 892 static void machine__set_kernel_mmap_len(struct machine *machine, 893 union perf_event *event) 894 { 895 int i; 896 897 for (i = 0; i < MAP__NR_TYPES; i++) { 898 machine->vmlinux_maps[i]->start = event->mmap.start; 899 machine->vmlinux_maps[i]->end = (event->mmap.start + 900 event->mmap.len); 901 /* 902 * Be a bit paranoid here, some perf.data file came with 903 * a zero sized synthesized MMAP event for the kernel. 904 */ 905 if (machine->vmlinux_maps[i]->end == 0) 906 machine->vmlinux_maps[i]->end = ~0ULL; 907 } 908 } 909 910 static bool machine__uses_kcore(struct machine *machine) 911 { 912 struct dso *dso; 913 914 list_for_each_entry(dso, &machine->kernel_dsos, node) { 915 if (dso__is_kcore(dso)) 916 return true; 917 } 918 919 return false; 920 } 921 922 static int machine__process_kernel_mmap_event(struct machine *machine, 923 union perf_event *event) 924 { 925 struct map *map; 926 char kmmap_prefix[PATH_MAX]; 927 enum dso_kernel_type kernel_type; 928 bool is_kernel_mmap; 929 930 /* If we have maps from kcore then we do not need or want any others */ 931 if (machine__uses_kcore(machine)) 932 return 0; 933 934 machine__mmap_name(machine, kmmap_prefix, sizeof(kmmap_prefix)); 935 if (machine__is_host(machine)) 936 kernel_type = DSO_TYPE_KERNEL; 937 else 938 kernel_type = DSO_TYPE_GUEST_KERNEL; 939 940 is_kernel_mmap = memcmp(event->mmap.filename, 941 kmmap_prefix, 942 strlen(kmmap_prefix) - 1) == 0; 943 if (event->mmap.filename[0] == '/' || 944 (!is_kernel_mmap && event->mmap.filename[0] == '[')) { 945 946 char short_module_name[1024]; 947 char *name, *dot; 948 949 if (event->mmap.filename[0] == '/') { 950 name = strrchr(event->mmap.filename, '/'); 951 if (name == NULL) 952 goto out_problem; 953 954 ++name; /* skip / */ 955 dot = strrchr(name, '.'); 956 if (dot == NULL) 957 goto out_problem; 958 snprintf(short_module_name, sizeof(short_module_name), 959 "[%.*s]", (int)(dot - name), name); 960 strxfrchar(short_module_name, '-', '_'); 961 } else 962 strcpy(short_module_name, event->mmap.filename); 963 964 map = machine__new_module(machine, event->mmap.start, 965 event->mmap.filename); 966 if (map == NULL) 967 goto out_problem; 968 969 name = strdup(short_module_name); 970 if (name == NULL) 971 goto out_problem; 972 973 dso__set_short_name(map->dso, name, true); 974 map->end = map->start + event->mmap.len; 975 } else if (is_kernel_mmap) { 976 const char *symbol_name = (event->mmap.filename + 977 strlen(kmmap_prefix)); 978 /* 979 * Should be there already, from the build-id table in 980 * the header. 981 */ 982 struct dso *kernel = __dsos__findnew(&machine->kernel_dsos, 983 kmmap_prefix); 984 if (kernel == NULL) 985 goto out_problem; 986 987 kernel->kernel = kernel_type; 988 if (__machine__create_kernel_maps(machine, kernel) < 0) 989 goto out_problem; 990 991 machine__set_kernel_mmap_len(machine, event); 992 993 /* 994 * Avoid using a zero address (kptr_restrict) for the ref reloc 995 * symbol. Effectively having zero here means that at record 996 * time /proc/sys/kernel/kptr_restrict was non zero. 997 */ 998 if (event->mmap.pgoff != 0) { 999 maps__set_kallsyms_ref_reloc_sym(machine->vmlinux_maps, 1000 symbol_name, 1001 event->mmap.pgoff); 1002 } 1003 1004 if (machine__is_default_guest(machine)) { 1005 /* 1006 * preload dso of guest kernel and modules 1007 */ 1008 dso__load(kernel, machine->vmlinux_maps[MAP__FUNCTION], 1009 NULL); 1010 } 1011 } 1012 return 0; 1013 out_problem: 1014 return -1; 1015 } 1016 1017 int machine__process_mmap2_event(struct machine *machine, 1018 union perf_event *event, 1019 struct perf_sample *sample __maybe_unused) 1020 { 1021 u8 cpumode = event->header.misc & PERF_RECORD_MISC_CPUMODE_MASK; 1022 struct thread *thread; 1023 struct map *map; 1024 enum map_type type; 1025 int ret = 0; 1026 1027 if (dump_trace) 1028 perf_event__fprintf_mmap2(event, stdout); 1029 1030 if (cpumode == PERF_RECORD_MISC_GUEST_KERNEL || 1031 cpumode == PERF_RECORD_MISC_KERNEL) { 1032 ret = machine__process_kernel_mmap_event(machine, event); 1033 if (ret < 0) 1034 goto out_problem; 1035 return 0; 1036 } 1037 1038 thread = machine__findnew_thread(machine, event->mmap2.pid, 1039 event->mmap2.tid); 1040 if (thread == NULL) 1041 goto out_problem; 1042 1043 if (event->header.misc & PERF_RECORD_MISC_MMAP_DATA) 1044 type = MAP__VARIABLE; 1045 else 1046 type = MAP__FUNCTION; 1047 1048 map = map__new(&machine->user_dsos, event->mmap2.start, 1049 event->mmap2.len, event->mmap2.pgoff, 1050 event->mmap2.pid, event->mmap2.maj, 1051 event->mmap2.min, event->mmap2.ino, 1052 event->mmap2.ino_generation, 1053 event->mmap2.prot, 1054 event->mmap2.flags, 1055 event->mmap2.filename, type); 1056 1057 if (map == NULL) 1058 goto out_problem; 1059 1060 thread__insert_map(thread, map); 1061 return 0; 1062 1063 out_problem: 1064 dump_printf("problem processing PERF_RECORD_MMAP2, skipping event.\n"); 1065 return 0; 1066 } 1067 1068 int machine__process_mmap_event(struct machine *machine, union perf_event *event, 1069 struct perf_sample *sample __maybe_unused) 1070 { 1071 u8 cpumode = event->header.misc & PERF_RECORD_MISC_CPUMODE_MASK; 1072 struct thread *thread; 1073 struct map *map; 1074 enum map_type type; 1075 int ret = 0; 1076 1077 if (dump_trace) 1078 perf_event__fprintf_mmap(event, stdout); 1079 1080 if (cpumode == PERF_RECORD_MISC_GUEST_KERNEL || 1081 cpumode == PERF_RECORD_MISC_KERNEL) { 1082 ret = machine__process_kernel_mmap_event(machine, event); 1083 if (ret < 0) 1084 goto out_problem; 1085 return 0; 1086 } 1087 1088 thread = machine__findnew_thread(machine, event->mmap.pid, 1089 event->mmap.tid); 1090 if (thread == NULL) 1091 goto out_problem; 1092 1093 if (event->header.misc & PERF_RECORD_MISC_MMAP_DATA) 1094 type = MAP__VARIABLE; 1095 else 1096 type = MAP__FUNCTION; 1097 1098 map = map__new(&machine->user_dsos, event->mmap.start, 1099 event->mmap.len, event->mmap.pgoff, 1100 event->mmap.pid, 0, 0, 0, 0, 0, 0, 1101 event->mmap.filename, 1102 type); 1103 1104 if (map == NULL) 1105 goto out_problem; 1106 1107 thread__insert_map(thread, map); 1108 return 0; 1109 1110 out_problem: 1111 dump_printf("problem processing PERF_RECORD_MMAP, skipping event.\n"); 1112 return 0; 1113 } 1114 1115 static void machine__remove_thread(struct machine *machine, struct thread *th) 1116 { 1117 machine->last_match = NULL; 1118 rb_erase(&th->rb_node, &machine->threads); 1119 /* 1120 * We may have references to this thread, for instance in some hist_entry 1121 * instances, so just move them to a separate list. 1122 */ 1123 list_add_tail(&th->node, &machine->dead_threads); 1124 } 1125 1126 int machine__process_fork_event(struct machine *machine, union perf_event *event, 1127 struct perf_sample *sample) 1128 { 1129 struct thread *thread = machine__find_thread(machine, 1130 event->fork.pid, 1131 event->fork.tid); 1132 struct thread *parent = machine__findnew_thread(machine, 1133 event->fork.ppid, 1134 event->fork.ptid); 1135 1136 /* if a thread currently exists for the thread id remove it */ 1137 if (thread != NULL) 1138 machine__remove_thread(machine, thread); 1139 1140 thread = machine__findnew_thread(machine, event->fork.pid, 1141 event->fork.tid); 1142 if (dump_trace) 1143 perf_event__fprintf_task(event, stdout); 1144 1145 if (thread == NULL || parent == NULL || 1146 thread__fork(thread, parent, sample->time) < 0) { 1147 dump_printf("problem processing PERF_RECORD_FORK, skipping event.\n"); 1148 return -1; 1149 } 1150 1151 return 0; 1152 } 1153 1154 int machine__process_exit_event(struct machine *machine, union perf_event *event, 1155 struct perf_sample *sample __maybe_unused) 1156 { 1157 struct thread *thread = machine__find_thread(machine, 1158 event->fork.pid, 1159 event->fork.tid); 1160 1161 if (dump_trace) 1162 perf_event__fprintf_task(event, stdout); 1163 1164 if (thread != NULL) 1165 thread__exited(thread); 1166 1167 return 0; 1168 } 1169 1170 int machine__process_event(struct machine *machine, union perf_event *event, 1171 struct perf_sample *sample) 1172 { 1173 int ret; 1174 1175 switch (event->header.type) { 1176 case PERF_RECORD_COMM: 1177 ret = machine__process_comm_event(machine, event, sample); break; 1178 case PERF_RECORD_MMAP: 1179 ret = machine__process_mmap_event(machine, event, sample); break; 1180 case PERF_RECORD_MMAP2: 1181 ret = machine__process_mmap2_event(machine, event, sample); break; 1182 case PERF_RECORD_FORK: 1183 ret = machine__process_fork_event(machine, event, sample); break; 1184 case PERF_RECORD_EXIT: 1185 ret = machine__process_exit_event(machine, event, sample); break; 1186 case PERF_RECORD_LOST: 1187 ret = machine__process_lost_event(machine, event, sample); break; 1188 default: 1189 ret = -1; 1190 break; 1191 } 1192 1193 return ret; 1194 } 1195 1196 static bool symbol__match_regex(struct symbol *sym, regex_t *regex) 1197 { 1198 if (sym->name && !regexec(regex, sym->name, 0, NULL, 0)) 1199 return 1; 1200 return 0; 1201 } 1202 1203 static void ip__resolve_ams(struct machine *machine, struct thread *thread, 1204 struct addr_map_symbol *ams, 1205 u64 ip) 1206 { 1207 struct addr_location al; 1208 1209 memset(&al, 0, sizeof(al)); 1210 /* 1211 * We cannot use the header.misc hint to determine whether a 1212 * branch stack address is user, kernel, guest, hypervisor. 1213 * Branches may straddle the kernel/user/hypervisor boundaries. 1214 * Thus, we have to try consecutively until we find a match 1215 * or else, the symbol is unknown 1216 */ 1217 thread__find_cpumode_addr_location(thread, machine, MAP__FUNCTION, ip, &al); 1218 1219 ams->addr = ip; 1220 ams->al_addr = al.addr; 1221 ams->sym = al.sym; 1222 ams->map = al.map; 1223 } 1224 1225 static void ip__resolve_data(struct machine *machine, struct thread *thread, 1226 u8 m, struct addr_map_symbol *ams, u64 addr) 1227 { 1228 struct addr_location al; 1229 1230 memset(&al, 0, sizeof(al)); 1231 1232 thread__find_addr_location(thread, machine, m, MAP__VARIABLE, addr, 1233 &al); 1234 ams->addr = addr; 1235 ams->al_addr = al.addr; 1236 ams->sym = al.sym; 1237 ams->map = al.map; 1238 } 1239 1240 struct mem_info *sample__resolve_mem(struct perf_sample *sample, 1241 struct addr_location *al) 1242 { 1243 struct mem_info *mi = zalloc(sizeof(*mi)); 1244 1245 if (!mi) 1246 return NULL; 1247 1248 ip__resolve_ams(al->machine, al->thread, &mi->iaddr, sample->ip); 1249 ip__resolve_data(al->machine, al->thread, al->cpumode, 1250 &mi->daddr, sample->addr); 1251 mi->data_src.val = sample->data_src; 1252 1253 return mi; 1254 } 1255 1256 struct branch_info *sample__resolve_bstack(struct perf_sample *sample, 1257 struct addr_location *al) 1258 { 1259 unsigned int i; 1260 const struct branch_stack *bs = sample->branch_stack; 1261 struct branch_info *bi = calloc(bs->nr, sizeof(struct branch_info)); 1262 1263 if (!bi) 1264 return NULL; 1265 1266 for (i = 0; i < bs->nr; i++) { 1267 ip__resolve_ams(al->machine, al->thread, &bi[i].to, bs->entries[i].to); 1268 ip__resolve_ams(al->machine, al->thread, &bi[i].from, bs->entries[i].from); 1269 bi[i].flags = bs->entries[i].flags; 1270 } 1271 return bi; 1272 } 1273 1274 static int machine__resolve_callchain_sample(struct machine *machine, 1275 struct thread *thread, 1276 struct ip_callchain *chain, 1277 struct symbol **parent, 1278 struct addr_location *root_al, 1279 int max_stack) 1280 { 1281 u8 cpumode = PERF_RECORD_MISC_USER; 1282 int chain_nr = min(max_stack, (int)chain->nr); 1283 int i; 1284 int err; 1285 1286 callchain_cursor_reset(&callchain_cursor); 1287 1288 if (chain->nr > PERF_MAX_STACK_DEPTH) { 1289 pr_warning("corrupted callchain. skipping...\n"); 1290 return 0; 1291 } 1292 1293 for (i = 0; i < chain_nr; i++) { 1294 u64 ip; 1295 struct addr_location al; 1296 1297 if (callchain_param.order == ORDER_CALLEE) 1298 ip = chain->ips[i]; 1299 else 1300 ip = chain->ips[chain->nr - i - 1]; 1301 1302 if (ip >= PERF_CONTEXT_MAX) { 1303 switch (ip) { 1304 case PERF_CONTEXT_HV: 1305 cpumode = PERF_RECORD_MISC_HYPERVISOR; 1306 break; 1307 case PERF_CONTEXT_KERNEL: 1308 cpumode = PERF_RECORD_MISC_KERNEL; 1309 break; 1310 case PERF_CONTEXT_USER: 1311 cpumode = PERF_RECORD_MISC_USER; 1312 break; 1313 default: 1314 pr_debug("invalid callchain context: " 1315 "%"PRId64"\n", (s64) ip); 1316 /* 1317 * It seems the callchain is corrupted. 1318 * Discard all. 1319 */ 1320 callchain_cursor_reset(&callchain_cursor); 1321 return 0; 1322 } 1323 continue; 1324 } 1325 1326 al.filtered = 0; 1327 thread__find_addr_location(thread, machine, cpumode, 1328 MAP__FUNCTION, ip, &al); 1329 if (al.sym != NULL) { 1330 if (sort__has_parent && !*parent && 1331 symbol__match_regex(al.sym, &parent_regex)) 1332 *parent = al.sym; 1333 else if (have_ignore_callees && root_al && 1334 symbol__match_regex(al.sym, &ignore_callees_regex)) { 1335 /* Treat this symbol as the root, 1336 forgetting its callees. */ 1337 *root_al = al; 1338 callchain_cursor_reset(&callchain_cursor); 1339 } 1340 } 1341 1342 err = callchain_cursor_append(&callchain_cursor, 1343 ip, al.map, al.sym); 1344 if (err) 1345 return err; 1346 } 1347 1348 return 0; 1349 } 1350 1351 static int unwind_entry(struct unwind_entry *entry, void *arg) 1352 { 1353 struct callchain_cursor *cursor = arg; 1354 return callchain_cursor_append(cursor, entry->ip, 1355 entry->map, entry->sym); 1356 } 1357 1358 int machine__resolve_callchain(struct machine *machine, 1359 struct perf_evsel *evsel, 1360 struct thread *thread, 1361 struct perf_sample *sample, 1362 struct symbol **parent, 1363 struct addr_location *root_al, 1364 int max_stack) 1365 { 1366 int ret; 1367 1368 ret = machine__resolve_callchain_sample(machine, thread, 1369 sample->callchain, parent, 1370 root_al, max_stack); 1371 if (ret) 1372 return ret; 1373 1374 /* Can we do dwarf post unwind? */ 1375 if (!((evsel->attr.sample_type & PERF_SAMPLE_REGS_USER) && 1376 (evsel->attr.sample_type & PERF_SAMPLE_STACK_USER))) 1377 return 0; 1378 1379 /* Bail out if nothing was captured. */ 1380 if ((!sample->user_regs.regs) || 1381 (!sample->user_stack.size)) 1382 return 0; 1383 1384 return unwind__get_entries(unwind_entry, &callchain_cursor, machine, 1385 thread, sample, max_stack); 1386 1387 } 1388 1389 int machine__for_each_thread(struct machine *machine, 1390 int (*fn)(struct thread *thread, void *p), 1391 void *priv) 1392 { 1393 struct rb_node *nd; 1394 struct thread *thread; 1395 int rc = 0; 1396 1397 for (nd = rb_first(&machine->threads); nd; nd = rb_next(nd)) { 1398 thread = rb_entry(nd, struct thread, rb_node); 1399 rc = fn(thread, priv); 1400 if (rc != 0) 1401 return rc; 1402 } 1403 1404 list_for_each_entry(thread, &machine->dead_threads, node) { 1405 rc = fn(thread, priv); 1406 if (rc != 0) 1407 return rc; 1408 } 1409 return rc; 1410 } 1411 1412 int __machine__synthesize_threads(struct machine *machine, struct perf_tool *tool, 1413 struct target *target, struct thread_map *threads, 1414 perf_event__handler_t process, bool data_mmap) 1415 { 1416 if (target__has_task(target)) 1417 return perf_event__synthesize_thread_map(tool, threads, process, machine, data_mmap); 1418 else if (target__has_cpu(target)) 1419 return perf_event__synthesize_threads(tool, process, machine, data_mmap); 1420 /* command specified */ 1421 return 0; 1422 } 1423