1 #include "callchain.h" 2 #include "debug.h" 3 #include "event.h" 4 #include "evsel.h" 5 #include "hist.h" 6 #include "machine.h" 7 #include "map.h" 8 #include "sort.h" 9 #include "strlist.h" 10 #include "thread.h" 11 #include <stdbool.h> 12 #include <symbol/kallsyms.h> 13 #include "unwind.h" 14 15 int machine__init(struct machine *machine, const char *root_dir, pid_t pid) 16 { 17 map_groups__init(&machine->kmaps); 18 RB_CLEAR_NODE(&machine->rb_node); 19 INIT_LIST_HEAD(&machine->user_dsos); 20 INIT_LIST_HEAD(&machine->kernel_dsos); 21 22 machine->threads = RB_ROOT; 23 INIT_LIST_HEAD(&machine->dead_threads); 24 machine->last_match = NULL; 25 26 machine->kmaps.machine = machine; 27 machine->pid = pid; 28 29 machine->symbol_filter = NULL; 30 machine->id_hdr_size = 0; 31 32 machine->root_dir = strdup(root_dir); 33 if (machine->root_dir == NULL) 34 return -ENOMEM; 35 36 if (pid != HOST_KERNEL_ID) { 37 struct thread *thread = machine__findnew_thread(machine, 0, 38 pid); 39 char comm[64]; 40 41 if (thread == NULL) 42 return -ENOMEM; 43 44 snprintf(comm, sizeof(comm), "[guest/%d]", pid); 45 thread__set_comm(thread, comm, 0); 46 } 47 48 return 0; 49 } 50 51 struct machine *machine__new_host(void) 52 { 53 struct machine *machine = malloc(sizeof(*machine)); 54 55 if (machine != NULL) { 56 machine__init(machine, "", HOST_KERNEL_ID); 57 58 if (machine__create_kernel_maps(machine) < 0) 59 goto out_delete; 60 } 61 62 return machine; 63 out_delete: 64 free(machine); 65 return NULL; 66 } 67 68 static void dsos__delete(struct list_head *dsos) 69 { 70 struct dso *pos, *n; 71 72 list_for_each_entry_safe(pos, n, dsos, node) { 73 list_del(&pos->node); 74 dso__delete(pos); 75 } 76 } 77 78 void machine__delete_dead_threads(struct machine *machine) 79 { 80 struct thread *n, *t; 81 82 list_for_each_entry_safe(t, n, &machine->dead_threads, node) { 83 list_del(&t->node); 84 thread__delete(t); 85 } 86 } 87 88 void machine__delete_threads(struct machine *machine) 89 { 90 struct rb_node *nd = rb_first(&machine->threads); 91 92 while (nd) { 93 struct thread *t = rb_entry(nd, struct thread, rb_node); 94 95 rb_erase(&t->rb_node, &machine->threads); 96 nd = rb_next(nd); 97 thread__delete(t); 98 } 99 } 100 101 void machine__exit(struct machine *machine) 102 { 103 map_groups__exit(&machine->kmaps); 104 dsos__delete(&machine->user_dsos); 105 dsos__delete(&machine->kernel_dsos); 106 zfree(&machine->root_dir); 107 } 108 109 void machine__delete(struct machine *machine) 110 { 111 machine__exit(machine); 112 free(machine); 113 } 114 115 void machines__init(struct machines *machines) 116 { 117 machine__init(&machines->host, "", HOST_KERNEL_ID); 118 machines->guests = RB_ROOT; 119 machines->symbol_filter = NULL; 120 } 121 122 void machines__exit(struct machines *machines) 123 { 124 machine__exit(&machines->host); 125 /* XXX exit guest */ 126 } 127 128 struct machine *machines__add(struct machines *machines, pid_t pid, 129 const char *root_dir) 130 { 131 struct rb_node **p = &machines->guests.rb_node; 132 struct rb_node *parent = NULL; 133 struct machine *pos, *machine = malloc(sizeof(*machine)); 134 135 if (machine == NULL) 136 return NULL; 137 138 if (machine__init(machine, root_dir, pid) != 0) { 139 free(machine); 140 return NULL; 141 } 142 143 machine->symbol_filter = machines->symbol_filter; 144 145 while (*p != NULL) { 146 parent = *p; 147 pos = rb_entry(parent, struct machine, rb_node); 148 if (pid < pos->pid) 149 p = &(*p)->rb_left; 150 else 151 p = &(*p)->rb_right; 152 } 153 154 rb_link_node(&machine->rb_node, parent, p); 155 rb_insert_color(&machine->rb_node, &machines->guests); 156 157 return machine; 158 } 159 160 void machines__set_symbol_filter(struct machines *machines, 161 symbol_filter_t symbol_filter) 162 { 163 struct rb_node *nd; 164 165 machines->symbol_filter = symbol_filter; 166 machines->host.symbol_filter = symbol_filter; 167 168 for (nd = rb_first(&machines->guests); nd; nd = rb_next(nd)) { 169 struct machine *machine = rb_entry(nd, struct machine, rb_node); 170 171 machine->symbol_filter = symbol_filter; 172 } 173 } 174 175 struct machine *machines__find(struct machines *machines, pid_t pid) 176 { 177 struct rb_node **p = &machines->guests.rb_node; 178 struct rb_node *parent = NULL; 179 struct machine *machine; 180 struct machine *default_machine = NULL; 181 182 if (pid == HOST_KERNEL_ID) 183 return &machines->host; 184 185 while (*p != NULL) { 186 parent = *p; 187 machine = rb_entry(parent, struct machine, rb_node); 188 if (pid < machine->pid) 189 p = &(*p)->rb_left; 190 else if (pid > machine->pid) 191 p = &(*p)->rb_right; 192 else 193 return machine; 194 if (!machine->pid) 195 default_machine = machine; 196 } 197 198 return default_machine; 199 } 200 201 struct machine *machines__findnew(struct machines *machines, pid_t pid) 202 { 203 char path[PATH_MAX]; 204 const char *root_dir = ""; 205 struct machine *machine = machines__find(machines, pid); 206 207 if (machine && (machine->pid == pid)) 208 goto out; 209 210 if ((pid != HOST_KERNEL_ID) && 211 (pid != DEFAULT_GUEST_KERNEL_ID) && 212 (symbol_conf.guestmount)) { 213 sprintf(path, "%s/%d", symbol_conf.guestmount, pid); 214 if (access(path, R_OK)) { 215 static struct strlist *seen; 216 217 if (!seen) 218 seen = strlist__new(true, NULL); 219 220 if (!strlist__has_entry(seen, path)) { 221 pr_err("Can't access file %s\n", path); 222 strlist__add(seen, path); 223 } 224 machine = NULL; 225 goto out; 226 } 227 root_dir = path; 228 } 229 230 machine = machines__add(machines, pid, root_dir); 231 out: 232 return machine; 233 } 234 235 void machines__process_guests(struct machines *machines, 236 machine__process_t process, void *data) 237 { 238 struct rb_node *nd; 239 240 for (nd = rb_first(&machines->guests); nd; nd = rb_next(nd)) { 241 struct machine *pos = rb_entry(nd, struct machine, rb_node); 242 process(pos, data); 243 } 244 } 245 246 char *machine__mmap_name(struct machine *machine, char *bf, size_t size) 247 { 248 if (machine__is_host(machine)) 249 snprintf(bf, size, "[%s]", "kernel.kallsyms"); 250 else if (machine__is_default_guest(machine)) 251 snprintf(bf, size, "[%s]", "guest.kernel.kallsyms"); 252 else { 253 snprintf(bf, size, "[%s.%d]", "guest.kernel.kallsyms", 254 machine->pid); 255 } 256 257 return bf; 258 } 259 260 void machines__set_id_hdr_size(struct machines *machines, u16 id_hdr_size) 261 { 262 struct rb_node *node; 263 struct machine *machine; 264 265 machines->host.id_hdr_size = id_hdr_size; 266 267 for (node = rb_first(&machines->guests); node; node = rb_next(node)) { 268 machine = rb_entry(node, struct machine, rb_node); 269 machine->id_hdr_size = id_hdr_size; 270 } 271 272 return; 273 } 274 275 static struct thread *__machine__findnew_thread(struct machine *machine, 276 pid_t pid, pid_t tid, 277 bool create) 278 { 279 struct rb_node **p = &machine->threads.rb_node; 280 struct rb_node *parent = NULL; 281 struct thread *th; 282 283 /* 284 * Front-end cache - TID lookups come in blocks, 285 * so most of the time we dont have to look up 286 * the full rbtree: 287 */ 288 if (machine->last_match && machine->last_match->tid == tid) { 289 if (pid && pid != machine->last_match->pid_) 290 machine->last_match->pid_ = pid; 291 return machine->last_match; 292 } 293 294 while (*p != NULL) { 295 parent = *p; 296 th = rb_entry(parent, struct thread, rb_node); 297 298 if (th->tid == tid) { 299 machine->last_match = th; 300 if (pid && pid != th->pid_) 301 th->pid_ = pid; 302 return th; 303 } 304 305 if (tid < th->tid) 306 p = &(*p)->rb_left; 307 else 308 p = &(*p)->rb_right; 309 } 310 311 if (!create) 312 return NULL; 313 314 th = thread__new(pid, tid); 315 if (th != NULL) { 316 rb_link_node(&th->rb_node, parent, p); 317 rb_insert_color(&th->rb_node, &machine->threads); 318 machine->last_match = th; 319 } 320 321 return th; 322 } 323 324 struct thread *machine__findnew_thread(struct machine *machine, pid_t pid, 325 pid_t tid) 326 { 327 return __machine__findnew_thread(machine, pid, tid, true); 328 } 329 330 struct thread *machine__find_thread(struct machine *machine, pid_t tid) 331 { 332 return __machine__findnew_thread(machine, 0, tid, false); 333 } 334 335 int machine__process_comm_event(struct machine *machine, union perf_event *event, 336 struct perf_sample *sample) 337 { 338 struct thread *thread = machine__findnew_thread(machine, 339 event->comm.pid, 340 event->comm.tid); 341 342 if (dump_trace) 343 perf_event__fprintf_comm(event, stdout); 344 345 if (thread == NULL || thread__set_comm(thread, event->comm.comm, sample->time)) { 346 dump_printf("problem processing PERF_RECORD_COMM, skipping event.\n"); 347 return -1; 348 } 349 350 return 0; 351 } 352 353 int machine__process_lost_event(struct machine *machine __maybe_unused, 354 union perf_event *event, struct perf_sample *sample __maybe_unused) 355 { 356 dump_printf(": id:%" PRIu64 ": lost:%" PRIu64 "\n", 357 event->lost.id, event->lost.lost); 358 return 0; 359 } 360 361 struct map *machine__new_module(struct machine *machine, u64 start, 362 const char *filename) 363 { 364 struct map *map; 365 struct dso *dso = __dsos__findnew(&machine->kernel_dsos, filename); 366 367 if (dso == NULL) 368 return NULL; 369 370 map = map__new2(start, dso, MAP__FUNCTION); 371 if (map == NULL) 372 return NULL; 373 374 if (machine__is_host(machine)) 375 dso->symtab_type = DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE; 376 else 377 dso->symtab_type = DSO_BINARY_TYPE__GUEST_KMODULE; 378 map_groups__insert(&machine->kmaps, map); 379 return map; 380 } 381 382 size_t machines__fprintf_dsos(struct machines *machines, FILE *fp) 383 { 384 struct rb_node *nd; 385 size_t ret = __dsos__fprintf(&machines->host.kernel_dsos, fp) + 386 __dsos__fprintf(&machines->host.user_dsos, fp); 387 388 for (nd = rb_first(&machines->guests); nd; nd = rb_next(nd)) { 389 struct machine *pos = rb_entry(nd, struct machine, rb_node); 390 ret += __dsos__fprintf(&pos->kernel_dsos, fp); 391 ret += __dsos__fprintf(&pos->user_dsos, fp); 392 } 393 394 return ret; 395 } 396 397 size_t machine__fprintf_dsos_buildid(struct machine *machine, FILE *fp, 398 bool (skip)(struct dso *dso, int parm), int parm) 399 { 400 return __dsos__fprintf_buildid(&machine->kernel_dsos, fp, skip, parm) + 401 __dsos__fprintf_buildid(&machine->user_dsos, fp, skip, parm); 402 } 403 404 size_t machines__fprintf_dsos_buildid(struct machines *machines, FILE *fp, 405 bool (skip)(struct dso *dso, int parm), int parm) 406 { 407 struct rb_node *nd; 408 size_t ret = machine__fprintf_dsos_buildid(&machines->host, fp, skip, parm); 409 410 for (nd = rb_first(&machines->guests); nd; nd = rb_next(nd)) { 411 struct machine *pos = rb_entry(nd, struct machine, rb_node); 412 ret += machine__fprintf_dsos_buildid(pos, fp, skip, parm); 413 } 414 return ret; 415 } 416 417 size_t machine__fprintf_vmlinux_path(struct machine *machine, FILE *fp) 418 { 419 int i; 420 size_t printed = 0; 421 struct dso *kdso = machine->vmlinux_maps[MAP__FUNCTION]->dso; 422 423 if (kdso->has_build_id) { 424 char filename[PATH_MAX]; 425 if (dso__build_id_filename(kdso, filename, sizeof(filename))) 426 printed += fprintf(fp, "[0] %s\n", filename); 427 } 428 429 for (i = 0; i < vmlinux_path__nr_entries; ++i) 430 printed += fprintf(fp, "[%d] %s\n", 431 i + kdso->has_build_id, vmlinux_path[i]); 432 433 return printed; 434 } 435 436 size_t machine__fprintf(struct machine *machine, FILE *fp) 437 { 438 size_t ret = 0; 439 struct rb_node *nd; 440 441 for (nd = rb_first(&machine->threads); nd; nd = rb_next(nd)) { 442 struct thread *pos = rb_entry(nd, struct thread, rb_node); 443 444 ret += thread__fprintf(pos, fp); 445 } 446 447 return ret; 448 } 449 450 static struct dso *machine__get_kernel(struct machine *machine) 451 { 452 const char *vmlinux_name = NULL; 453 struct dso *kernel; 454 455 if (machine__is_host(machine)) { 456 vmlinux_name = symbol_conf.vmlinux_name; 457 if (!vmlinux_name) 458 vmlinux_name = "[kernel.kallsyms]"; 459 460 kernel = dso__kernel_findnew(machine, vmlinux_name, 461 "[kernel]", 462 DSO_TYPE_KERNEL); 463 } else { 464 char bf[PATH_MAX]; 465 466 if (machine__is_default_guest(machine)) 467 vmlinux_name = symbol_conf.default_guest_vmlinux_name; 468 if (!vmlinux_name) 469 vmlinux_name = machine__mmap_name(machine, bf, 470 sizeof(bf)); 471 472 kernel = dso__kernel_findnew(machine, vmlinux_name, 473 "[guest.kernel]", 474 DSO_TYPE_GUEST_KERNEL); 475 } 476 477 if (kernel != NULL && (!kernel->has_build_id)) 478 dso__read_running_kernel_build_id(kernel, machine); 479 480 return kernel; 481 } 482 483 struct process_args { 484 u64 start; 485 }; 486 487 static int symbol__in_kernel(void *arg, const char *name, 488 char type __maybe_unused, u64 start) 489 { 490 struct process_args *args = arg; 491 492 if (strchr(name, '[')) 493 return 0; 494 495 args->start = start; 496 return 1; 497 } 498 499 /* Figure out the start address of kernel map from /proc/kallsyms */ 500 static u64 machine__get_kernel_start_addr(struct machine *machine) 501 { 502 const char *filename; 503 char path[PATH_MAX]; 504 struct process_args args; 505 506 if (machine__is_default_guest(machine)) 507 filename = (char *)symbol_conf.default_guest_kallsyms; 508 else { 509 sprintf(path, "%s/proc/kallsyms", machine->root_dir); 510 filename = path; 511 } 512 513 if (symbol__restricted_filename(filename, "/proc/kallsyms")) 514 return 0; 515 516 if (kallsyms__parse(filename, &args, symbol__in_kernel) <= 0) 517 return 0; 518 519 return args.start; 520 } 521 522 int __machine__create_kernel_maps(struct machine *machine, struct dso *kernel) 523 { 524 enum map_type type; 525 u64 start = machine__get_kernel_start_addr(machine); 526 527 for (type = 0; type < MAP__NR_TYPES; ++type) { 528 struct kmap *kmap; 529 530 machine->vmlinux_maps[type] = map__new2(start, kernel, type); 531 if (machine->vmlinux_maps[type] == NULL) 532 return -1; 533 534 machine->vmlinux_maps[type]->map_ip = 535 machine->vmlinux_maps[type]->unmap_ip = 536 identity__map_ip; 537 kmap = map__kmap(machine->vmlinux_maps[type]); 538 kmap->kmaps = &machine->kmaps; 539 map_groups__insert(&machine->kmaps, 540 machine->vmlinux_maps[type]); 541 } 542 543 return 0; 544 } 545 546 void machine__destroy_kernel_maps(struct machine *machine) 547 { 548 enum map_type type; 549 550 for (type = 0; type < MAP__NR_TYPES; ++type) { 551 struct kmap *kmap; 552 553 if (machine->vmlinux_maps[type] == NULL) 554 continue; 555 556 kmap = map__kmap(machine->vmlinux_maps[type]); 557 map_groups__remove(&machine->kmaps, 558 machine->vmlinux_maps[type]); 559 if (kmap->ref_reloc_sym) { 560 /* 561 * ref_reloc_sym is shared among all maps, so free just 562 * on one of them. 563 */ 564 if (type == MAP__FUNCTION) { 565 zfree((char **)&kmap->ref_reloc_sym->name); 566 zfree(&kmap->ref_reloc_sym); 567 } else 568 kmap->ref_reloc_sym = NULL; 569 } 570 571 map__delete(machine->vmlinux_maps[type]); 572 machine->vmlinux_maps[type] = NULL; 573 } 574 } 575 576 int machines__create_guest_kernel_maps(struct machines *machines) 577 { 578 int ret = 0; 579 struct dirent **namelist = NULL; 580 int i, items = 0; 581 char path[PATH_MAX]; 582 pid_t pid; 583 char *endp; 584 585 if (symbol_conf.default_guest_vmlinux_name || 586 symbol_conf.default_guest_modules || 587 symbol_conf.default_guest_kallsyms) { 588 machines__create_kernel_maps(machines, DEFAULT_GUEST_KERNEL_ID); 589 } 590 591 if (symbol_conf.guestmount) { 592 items = scandir(symbol_conf.guestmount, &namelist, NULL, NULL); 593 if (items <= 0) 594 return -ENOENT; 595 for (i = 0; i < items; i++) { 596 if (!isdigit(namelist[i]->d_name[0])) { 597 /* Filter out . and .. */ 598 continue; 599 } 600 pid = (pid_t)strtol(namelist[i]->d_name, &endp, 10); 601 if ((*endp != '\0') || 602 (endp == namelist[i]->d_name) || 603 (errno == ERANGE)) { 604 pr_debug("invalid directory (%s). Skipping.\n", 605 namelist[i]->d_name); 606 continue; 607 } 608 sprintf(path, "%s/%s/proc/kallsyms", 609 symbol_conf.guestmount, 610 namelist[i]->d_name); 611 ret = access(path, R_OK); 612 if (ret) { 613 pr_debug("Can't access file %s\n", path); 614 goto failure; 615 } 616 machines__create_kernel_maps(machines, pid); 617 } 618 failure: 619 free(namelist); 620 } 621 622 return ret; 623 } 624 625 void machines__destroy_kernel_maps(struct machines *machines) 626 { 627 struct rb_node *next = rb_first(&machines->guests); 628 629 machine__destroy_kernel_maps(&machines->host); 630 631 while (next) { 632 struct machine *pos = rb_entry(next, struct machine, rb_node); 633 634 next = rb_next(&pos->rb_node); 635 rb_erase(&pos->rb_node, &machines->guests); 636 machine__delete(pos); 637 } 638 } 639 640 int machines__create_kernel_maps(struct machines *machines, pid_t pid) 641 { 642 struct machine *machine = machines__findnew(machines, pid); 643 644 if (machine == NULL) 645 return -1; 646 647 return machine__create_kernel_maps(machine); 648 } 649 650 int machine__load_kallsyms(struct machine *machine, const char *filename, 651 enum map_type type, symbol_filter_t filter) 652 { 653 struct map *map = machine->vmlinux_maps[type]; 654 int ret = dso__load_kallsyms(map->dso, filename, map, filter); 655 656 if (ret > 0) { 657 dso__set_loaded(map->dso, type); 658 /* 659 * Since /proc/kallsyms will have multiple sessions for the 660 * kernel, with modules between them, fixup the end of all 661 * sections. 662 */ 663 __map_groups__fixup_end(&machine->kmaps, type); 664 } 665 666 return ret; 667 } 668 669 int machine__load_vmlinux_path(struct machine *machine, enum map_type type, 670 symbol_filter_t filter) 671 { 672 struct map *map = machine->vmlinux_maps[type]; 673 int ret = dso__load_vmlinux_path(map->dso, map, filter); 674 675 if (ret > 0) 676 dso__set_loaded(map->dso, type); 677 678 return ret; 679 } 680 681 static void map_groups__fixup_end(struct map_groups *mg) 682 { 683 int i; 684 for (i = 0; i < MAP__NR_TYPES; ++i) 685 __map_groups__fixup_end(mg, i); 686 } 687 688 static char *get_kernel_version(const char *root_dir) 689 { 690 char version[PATH_MAX]; 691 FILE *file; 692 char *name, *tmp; 693 const char *prefix = "Linux version "; 694 695 sprintf(version, "%s/proc/version", root_dir); 696 file = fopen(version, "r"); 697 if (!file) 698 return NULL; 699 700 version[0] = '\0'; 701 tmp = fgets(version, sizeof(version), file); 702 fclose(file); 703 704 name = strstr(version, prefix); 705 if (!name) 706 return NULL; 707 name += strlen(prefix); 708 tmp = strchr(name, ' '); 709 if (tmp) 710 *tmp = '\0'; 711 712 return strdup(name); 713 } 714 715 static int map_groups__set_modules_path_dir(struct map_groups *mg, 716 const char *dir_name) 717 { 718 struct dirent *dent; 719 DIR *dir = opendir(dir_name); 720 int ret = 0; 721 722 if (!dir) { 723 pr_debug("%s: cannot open %s dir\n", __func__, dir_name); 724 return -1; 725 } 726 727 while ((dent = readdir(dir)) != NULL) { 728 char path[PATH_MAX]; 729 struct stat st; 730 731 /*sshfs might return bad dent->d_type, so we have to stat*/ 732 snprintf(path, sizeof(path), "%s/%s", dir_name, dent->d_name); 733 if (stat(path, &st)) 734 continue; 735 736 if (S_ISDIR(st.st_mode)) { 737 if (!strcmp(dent->d_name, ".") || 738 !strcmp(dent->d_name, "..")) 739 continue; 740 741 ret = map_groups__set_modules_path_dir(mg, path); 742 if (ret < 0) 743 goto out; 744 } else { 745 char *dot = strrchr(dent->d_name, '.'), 746 dso_name[PATH_MAX]; 747 struct map *map; 748 char *long_name; 749 750 if (dot == NULL || strcmp(dot, ".ko")) 751 continue; 752 snprintf(dso_name, sizeof(dso_name), "[%.*s]", 753 (int)(dot - dent->d_name), dent->d_name); 754 755 strxfrchar(dso_name, '-', '_'); 756 map = map_groups__find_by_name(mg, MAP__FUNCTION, 757 dso_name); 758 if (map == NULL) 759 continue; 760 761 long_name = strdup(path); 762 if (long_name == NULL) { 763 ret = -1; 764 goto out; 765 } 766 dso__set_long_name(map->dso, long_name, true); 767 dso__kernel_module_get_build_id(map->dso, ""); 768 } 769 } 770 771 out: 772 closedir(dir); 773 return ret; 774 } 775 776 static int machine__set_modules_path(struct machine *machine) 777 { 778 char *version; 779 char modules_path[PATH_MAX]; 780 781 version = get_kernel_version(machine->root_dir); 782 if (!version) 783 return -1; 784 785 snprintf(modules_path, sizeof(modules_path), "%s/lib/modules/%s/kernel", 786 machine->root_dir, version); 787 free(version); 788 789 return map_groups__set_modules_path_dir(&machine->kmaps, modules_path); 790 } 791 792 static int machine__create_module(void *arg, const char *name, u64 start) 793 { 794 struct machine *machine = arg; 795 struct map *map; 796 797 map = machine__new_module(machine, start, name); 798 if (map == NULL) 799 return -1; 800 801 dso__kernel_module_get_build_id(map->dso, machine->root_dir); 802 803 return 0; 804 } 805 806 static int machine__create_modules(struct machine *machine) 807 { 808 const char *modules; 809 char path[PATH_MAX]; 810 811 if (machine__is_default_guest(machine)) { 812 modules = symbol_conf.default_guest_modules; 813 } else { 814 snprintf(path, PATH_MAX, "%s/proc/modules", machine->root_dir); 815 modules = path; 816 } 817 818 if (symbol__restricted_filename(modules, "/proc/modules")) 819 return -1; 820 821 if (modules__parse(modules, machine, machine__create_module)) 822 return -1; 823 824 if (!machine__set_modules_path(machine)) 825 return 0; 826 827 pr_debug("Problems setting modules path maps, continuing anyway...\n"); 828 829 return 0; 830 } 831 832 int machine__create_kernel_maps(struct machine *machine) 833 { 834 struct dso *kernel = machine__get_kernel(machine); 835 836 if (kernel == NULL || 837 __machine__create_kernel_maps(machine, kernel) < 0) 838 return -1; 839 840 if (symbol_conf.use_modules && machine__create_modules(machine) < 0) { 841 if (machine__is_host(machine)) 842 pr_debug("Problems creating module maps, " 843 "continuing anyway...\n"); 844 else 845 pr_debug("Problems creating module maps for guest %d, " 846 "continuing anyway...\n", machine->pid); 847 } 848 849 /* 850 * Now that we have all the maps created, just set the ->end of them: 851 */ 852 map_groups__fixup_end(&machine->kmaps); 853 return 0; 854 } 855 856 static void machine__set_kernel_mmap_len(struct machine *machine, 857 union perf_event *event) 858 { 859 int i; 860 861 for (i = 0; i < MAP__NR_TYPES; i++) { 862 machine->vmlinux_maps[i]->start = event->mmap.start; 863 machine->vmlinux_maps[i]->end = (event->mmap.start + 864 event->mmap.len); 865 /* 866 * Be a bit paranoid here, some perf.data file came with 867 * a zero sized synthesized MMAP event for the kernel. 868 */ 869 if (machine->vmlinux_maps[i]->end == 0) 870 machine->vmlinux_maps[i]->end = ~0ULL; 871 } 872 } 873 874 static bool machine__uses_kcore(struct machine *machine) 875 { 876 struct dso *dso; 877 878 list_for_each_entry(dso, &machine->kernel_dsos, node) { 879 if (dso__is_kcore(dso)) 880 return true; 881 } 882 883 return false; 884 } 885 886 static int machine__process_kernel_mmap_event(struct machine *machine, 887 union perf_event *event) 888 { 889 struct map *map; 890 char kmmap_prefix[PATH_MAX]; 891 enum dso_kernel_type kernel_type; 892 bool is_kernel_mmap; 893 894 /* If we have maps from kcore then we do not need or want any others */ 895 if (machine__uses_kcore(machine)) 896 return 0; 897 898 machine__mmap_name(machine, kmmap_prefix, sizeof(kmmap_prefix)); 899 if (machine__is_host(machine)) 900 kernel_type = DSO_TYPE_KERNEL; 901 else 902 kernel_type = DSO_TYPE_GUEST_KERNEL; 903 904 is_kernel_mmap = memcmp(event->mmap.filename, 905 kmmap_prefix, 906 strlen(kmmap_prefix) - 1) == 0; 907 if (event->mmap.filename[0] == '/' || 908 (!is_kernel_mmap && event->mmap.filename[0] == '[')) { 909 910 char short_module_name[1024]; 911 char *name, *dot; 912 913 if (event->mmap.filename[0] == '/') { 914 name = strrchr(event->mmap.filename, '/'); 915 if (name == NULL) 916 goto out_problem; 917 918 ++name; /* skip / */ 919 dot = strrchr(name, '.'); 920 if (dot == NULL) 921 goto out_problem; 922 snprintf(short_module_name, sizeof(short_module_name), 923 "[%.*s]", (int)(dot - name), name); 924 strxfrchar(short_module_name, '-', '_'); 925 } else 926 strcpy(short_module_name, event->mmap.filename); 927 928 map = machine__new_module(machine, event->mmap.start, 929 event->mmap.filename); 930 if (map == NULL) 931 goto out_problem; 932 933 name = strdup(short_module_name); 934 if (name == NULL) 935 goto out_problem; 936 937 dso__set_short_name(map->dso, name, true); 938 map->end = map->start + event->mmap.len; 939 } else if (is_kernel_mmap) { 940 const char *symbol_name = (event->mmap.filename + 941 strlen(kmmap_prefix)); 942 /* 943 * Should be there already, from the build-id table in 944 * the header. 945 */ 946 struct dso *kernel = __dsos__findnew(&machine->kernel_dsos, 947 kmmap_prefix); 948 if (kernel == NULL) 949 goto out_problem; 950 951 kernel->kernel = kernel_type; 952 if (__machine__create_kernel_maps(machine, kernel) < 0) 953 goto out_problem; 954 955 machine__set_kernel_mmap_len(machine, event); 956 957 /* 958 * Avoid using a zero address (kptr_restrict) for the ref reloc 959 * symbol. Effectively having zero here means that at record 960 * time /proc/sys/kernel/kptr_restrict was non zero. 961 */ 962 if (event->mmap.pgoff != 0) { 963 maps__set_kallsyms_ref_reloc_sym(machine->vmlinux_maps, 964 symbol_name, 965 event->mmap.pgoff); 966 } 967 968 if (machine__is_default_guest(machine)) { 969 /* 970 * preload dso of guest kernel and modules 971 */ 972 dso__load(kernel, machine->vmlinux_maps[MAP__FUNCTION], 973 NULL); 974 } 975 } 976 return 0; 977 out_problem: 978 return -1; 979 } 980 981 int machine__process_mmap2_event(struct machine *machine, 982 union perf_event *event, 983 struct perf_sample *sample __maybe_unused) 984 { 985 u8 cpumode = event->header.misc & PERF_RECORD_MISC_CPUMODE_MASK; 986 struct thread *thread; 987 struct map *map; 988 enum map_type type; 989 int ret = 0; 990 991 if (dump_trace) 992 perf_event__fprintf_mmap2(event, stdout); 993 994 if (cpumode == PERF_RECORD_MISC_GUEST_KERNEL || 995 cpumode == PERF_RECORD_MISC_KERNEL) { 996 ret = machine__process_kernel_mmap_event(machine, event); 997 if (ret < 0) 998 goto out_problem; 999 return 0; 1000 } 1001 1002 thread = machine__findnew_thread(machine, event->mmap2.pid, 1003 event->mmap2.pid); 1004 if (thread == NULL) 1005 goto out_problem; 1006 1007 if (event->header.misc & PERF_RECORD_MISC_MMAP_DATA) 1008 type = MAP__VARIABLE; 1009 else 1010 type = MAP__FUNCTION; 1011 1012 map = map__new(&machine->user_dsos, event->mmap2.start, 1013 event->mmap2.len, event->mmap2.pgoff, 1014 event->mmap2.pid, event->mmap2.maj, 1015 event->mmap2.min, event->mmap2.ino, 1016 event->mmap2.ino_generation, 1017 event->mmap2.filename, type); 1018 1019 if (map == NULL) 1020 goto out_problem; 1021 1022 thread__insert_map(thread, map); 1023 return 0; 1024 1025 out_problem: 1026 dump_printf("problem processing PERF_RECORD_MMAP2, skipping event.\n"); 1027 return 0; 1028 } 1029 1030 int machine__process_mmap_event(struct machine *machine, union perf_event *event, 1031 struct perf_sample *sample __maybe_unused) 1032 { 1033 u8 cpumode = event->header.misc & PERF_RECORD_MISC_CPUMODE_MASK; 1034 struct thread *thread; 1035 struct map *map; 1036 enum map_type type; 1037 int ret = 0; 1038 1039 if (dump_trace) 1040 perf_event__fprintf_mmap(event, stdout); 1041 1042 if (cpumode == PERF_RECORD_MISC_GUEST_KERNEL || 1043 cpumode == PERF_RECORD_MISC_KERNEL) { 1044 ret = machine__process_kernel_mmap_event(machine, event); 1045 if (ret < 0) 1046 goto out_problem; 1047 return 0; 1048 } 1049 1050 thread = machine__findnew_thread(machine, event->mmap.pid, 1051 event->mmap.pid); 1052 if (thread == NULL) 1053 goto out_problem; 1054 1055 if (event->header.misc & PERF_RECORD_MISC_MMAP_DATA) 1056 type = MAP__VARIABLE; 1057 else 1058 type = MAP__FUNCTION; 1059 1060 map = map__new(&machine->user_dsos, event->mmap.start, 1061 event->mmap.len, event->mmap.pgoff, 1062 event->mmap.pid, 0, 0, 0, 0, 1063 event->mmap.filename, 1064 type); 1065 1066 if (map == NULL) 1067 goto out_problem; 1068 1069 thread__insert_map(thread, map); 1070 return 0; 1071 1072 out_problem: 1073 dump_printf("problem processing PERF_RECORD_MMAP, skipping event.\n"); 1074 return 0; 1075 } 1076 1077 static void machine__remove_thread(struct machine *machine, struct thread *th) 1078 { 1079 machine->last_match = NULL; 1080 rb_erase(&th->rb_node, &machine->threads); 1081 /* 1082 * We may have references to this thread, for instance in some hist_entry 1083 * instances, so just move them to a separate list. 1084 */ 1085 list_add_tail(&th->node, &machine->dead_threads); 1086 } 1087 1088 int machine__process_fork_event(struct machine *machine, union perf_event *event, 1089 struct perf_sample *sample) 1090 { 1091 struct thread *thread = machine__find_thread(machine, event->fork.tid); 1092 struct thread *parent = machine__findnew_thread(machine, 1093 event->fork.ppid, 1094 event->fork.ptid); 1095 1096 /* if a thread currently exists for the thread id remove it */ 1097 if (thread != NULL) 1098 machine__remove_thread(machine, thread); 1099 1100 thread = machine__findnew_thread(machine, event->fork.pid, 1101 event->fork.tid); 1102 if (dump_trace) 1103 perf_event__fprintf_task(event, stdout); 1104 1105 if (thread == NULL || parent == NULL || 1106 thread__fork(thread, parent, sample->time) < 0) { 1107 dump_printf("problem processing PERF_RECORD_FORK, skipping event.\n"); 1108 return -1; 1109 } 1110 1111 return 0; 1112 } 1113 1114 int machine__process_exit_event(struct machine *machine, union perf_event *event, 1115 struct perf_sample *sample __maybe_unused) 1116 { 1117 struct thread *thread = machine__find_thread(machine, event->fork.tid); 1118 1119 if (dump_trace) 1120 perf_event__fprintf_task(event, stdout); 1121 1122 if (thread != NULL) 1123 thread__exited(thread); 1124 1125 return 0; 1126 } 1127 1128 int machine__process_event(struct machine *machine, union perf_event *event, 1129 struct perf_sample *sample) 1130 { 1131 int ret; 1132 1133 switch (event->header.type) { 1134 case PERF_RECORD_COMM: 1135 ret = machine__process_comm_event(machine, event, sample); break; 1136 case PERF_RECORD_MMAP: 1137 ret = machine__process_mmap_event(machine, event, sample); break; 1138 case PERF_RECORD_MMAP2: 1139 ret = machine__process_mmap2_event(machine, event, sample); break; 1140 case PERF_RECORD_FORK: 1141 ret = machine__process_fork_event(machine, event, sample); break; 1142 case PERF_RECORD_EXIT: 1143 ret = machine__process_exit_event(machine, event, sample); break; 1144 case PERF_RECORD_LOST: 1145 ret = machine__process_lost_event(machine, event, sample); break; 1146 default: 1147 ret = -1; 1148 break; 1149 } 1150 1151 return ret; 1152 } 1153 1154 static bool symbol__match_regex(struct symbol *sym, regex_t *regex) 1155 { 1156 if (sym->name && !regexec(regex, sym->name, 0, NULL, 0)) 1157 return 1; 1158 return 0; 1159 } 1160 1161 static const u8 cpumodes[] = { 1162 PERF_RECORD_MISC_USER, 1163 PERF_RECORD_MISC_KERNEL, 1164 PERF_RECORD_MISC_GUEST_USER, 1165 PERF_RECORD_MISC_GUEST_KERNEL 1166 }; 1167 #define NCPUMODES (sizeof(cpumodes)/sizeof(u8)) 1168 1169 static void ip__resolve_ams(struct machine *machine, struct thread *thread, 1170 struct addr_map_symbol *ams, 1171 u64 ip) 1172 { 1173 struct addr_location al; 1174 size_t i; 1175 u8 m; 1176 1177 memset(&al, 0, sizeof(al)); 1178 1179 for (i = 0; i < NCPUMODES; i++) { 1180 m = cpumodes[i]; 1181 /* 1182 * We cannot use the header.misc hint to determine whether a 1183 * branch stack address is user, kernel, guest, hypervisor. 1184 * Branches may straddle the kernel/user/hypervisor boundaries. 1185 * Thus, we have to try consecutively until we find a match 1186 * or else, the symbol is unknown 1187 */ 1188 thread__find_addr_location(thread, machine, m, MAP__FUNCTION, 1189 ip, &al); 1190 if (al.sym) 1191 goto found; 1192 } 1193 found: 1194 ams->addr = ip; 1195 ams->al_addr = al.addr; 1196 ams->sym = al.sym; 1197 ams->map = al.map; 1198 } 1199 1200 static void ip__resolve_data(struct machine *machine, struct thread *thread, 1201 u8 m, struct addr_map_symbol *ams, u64 addr) 1202 { 1203 struct addr_location al; 1204 1205 memset(&al, 0, sizeof(al)); 1206 1207 thread__find_addr_location(thread, machine, m, MAP__VARIABLE, addr, 1208 &al); 1209 ams->addr = addr; 1210 ams->al_addr = al.addr; 1211 ams->sym = al.sym; 1212 ams->map = al.map; 1213 } 1214 1215 struct mem_info *machine__resolve_mem(struct machine *machine, 1216 struct thread *thr, 1217 struct perf_sample *sample, 1218 u8 cpumode) 1219 { 1220 struct mem_info *mi = zalloc(sizeof(*mi)); 1221 1222 if (!mi) 1223 return NULL; 1224 1225 ip__resolve_ams(machine, thr, &mi->iaddr, sample->ip); 1226 ip__resolve_data(machine, thr, cpumode, &mi->daddr, sample->addr); 1227 mi->data_src.val = sample->data_src; 1228 1229 return mi; 1230 } 1231 1232 struct branch_info *machine__resolve_bstack(struct machine *machine, 1233 struct thread *thr, 1234 struct branch_stack *bs) 1235 { 1236 struct branch_info *bi; 1237 unsigned int i; 1238 1239 bi = calloc(bs->nr, sizeof(struct branch_info)); 1240 if (!bi) 1241 return NULL; 1242 1243 for (i = 0; i < bs->nr; i++) { 1244 ip__resolve_ams(machine, thr, &bi[i].to, bs->entries[i].to); 1245 ip__resolve_ams(machine, thr, &bi[i].from, bs->entries[i].from); 1246 bi[i].flags = bs->entries[i].flags; 1247 } 1248 return bi; 1249 } 1250 1251 static int machine__resolve_callchain_sample(struct machine *machine, 1252 struct thread *thread, 1253 struct ip_callchain *chain, 1254 struct symbol **parent, 1255 struct addr_location *root_al, 1256 int max_stack) 1257 { 1258 u8 cpumode = PERF_RECORD_MISC_USER; 1259 int chain_nr = min(max_stack, (int)chain->nr); 1260 int i; 1261 int err; 1262 1263 callchain_cursor_reset(&callchain_cursor); 1264 1265 if (chain->nr > PERF_MAX_STACK_DEPTH) { 1266 pr_warning("corrupted callchain. skipping...\n"); 1267 return 0; 1268 } 1269 1270 for (i = 0; i < chain_nr; i++) { 1271 u64 ip; 1272 struct addr_location al; 1273 1274 if (callchain_param.order == ORDER_CALLEE) 1275 ip = chain->ips[i]; 1276 else 1277 ip = chain->ips[chain->nr - i - 1]; 1278 1279 if (ip >= PERF_CONTEXT_MAX) { 1280 switch (ip) { 1281 case PERF_CONTEXT_HV: 1282 cpumode = PERF_RECORD_MISC_HYPERVISOR; 1283 break; 1284 case PERF_CONTEXT_KERNEL: 1285 cpumode = PERF_RECORD_MISC_KERNEL; 1286 break; 1287 case PERF_CONTEXT_USER: 1288 cpumode = PERF_RECORD_MISC_USER; 1289 break; 1290 default: 1291 pr_debug("invalid callchain context: " 1292 "%"PRId64"\n", (s64) ip); 1293 /* 1294 * It seems the callchain is corrupted. 1295 * Discard all. 1296 */ 1297 callchain_cursor_reset(&callchain_cursor); 1298 return 0; 1299 } 1300 continue; 1301 } 1302 1303 al.filtered = false; 1304 thread__find_addr_location(thread, machine, cpumode, 1305 MAP__FUNCTION, ip, &al); 1306 if (al.sym != NULL) { 1307 if (sort__has_parent && !*parent && 1308 symbol__match_regex(al.sym, &parent_regex)) 1309 *parent = al.sym; 1310 else if (have_ignore_callees && root_al && 1311 symbol__match_regex(al.sym, &ignore_callees_regex)) { 1312 /* Treat this symbol as the root, 1313 forgetting its callees. */ 1314 *root_al = al; 1315 callchain_cursor_reset(&callchain_cursor); 1316 } 1317 } 1318 1319 err = callchain_cursor_append(&callchain_cursor, 1320 ip, al.map, al.sym); 1321 if (err) 1322 return err; 1323 } 1324 1325 return 0; 1326 } 1327 1328 static int unwind_entry(struct unwind_entry *entry, void *arg) 1329 { 1330 struct callchain_cursor *cursor = arg; 1331 return callchain_cursor_append(cursor, entry->ip, 1332 entry->map, entry->sym); 1333 } 1334 1335 int machine__resolve_callchain(struct machine *machine, 1336 struct perf_evsel *evsel, 1337 struct thread *thread, 1338 struct perf_sample *sample, 1339 struct symbol **parent, 1340 struct addr_location *root_al, 1341 int max_stack) 1342 { 1343 int ret; 1344 1345 ret = machine__resolve_callchain_sample(machine, thread, 1346 sample->callchain, parent, 1347 root_al, max_stack); 1348 if (ret) 1349 return ret; 1350 1351 /* Can we do dwarf post unwind? */ 1352 if (!((evsel->attr.sample_type & PERF_SAMPLE_REGS_USER) && 1353 (evsel->attr.sample_type & PERF_SAMPLE_STACK_USER))) 1354 return 0; 1355 1356 /* Bail out if nothing was captured. */ 1357 if ((!sample->user_regs.regs) || 1358 (!sample->user_stack.size)) 1359 return 0; 1360 1361 return unwind__get_entries(unwind_entry, &callchain_cursor, machine, 1362 thread, evsel->attr.sample_regs_user, 1363 sample, max_stack); 1364 1365 } 1366 1367 int machine__for_each_thread(struct machine *machine, 1368 int (*fn)(struct thread *thread, void *p), 1369 void *priv) 1370 { 1371 struct rb_node *nd; 1372 struct thread *thread; 1373 int rc = 0; 1374 1375 for (nd = rb_first(&machine->threads); nd; nd = rb_next(nd)) { 1376 thread = rb_entry(nd, struct thread, rb_node); 1377 rc = fn(thread, priv); 1378 if (rc != 0) 1379 return rc; 1380 } 1381 1382 list_for_each_entry(thread, &machine->dead_threads, node) { 1383 rc = fn(thread, priv); 1384 if (rc != 0) 1385 return rc; 1386 } 1387 return rc; 1388 } 1389 1390 int __machine__synthesize_threads(struct machine *machine, struct perf_tool *tool, 1391 struct target *target, struct thread_map *threads, 1392 perf_event__handler_t process, bool data_mmap) 1393 { 1394 if (target__has_task(target)) 1395 return perf_event__synthesize_thread_map(tool, threads, process, machine, data_mmap); 1396 else if (target__has_cpu(target)) 1397 return perf_event__synthesize_threads(tool, process, machine, data_mmap); 1398 /* command specified */ 1399 return 0; 1400 } 1401