1 #include "callchain.h" 2 #include "debug.h" 3 #include "event.h" 4 #include "evsel.h" 5 #include "hist.h" 6 #include "machine.h" 7 #include "map.h" 8 #include "sort.h" 9 #include "strlist.h" 10 #include "thread.h" 11 #include "vdso.h" 12 #include <stdbool.h> 13 #include <symbol/kallsyms.h> 14 #include "unwind.h" 15 #include "linux/hash.h" 16 17 static void dsos__init(struct dsos *dsos) 18 { 19 INIT_LIST_HEAD(&dsos->head); 20 dsos->root = RB_ROOT; 21 } 22 23 int machine__init(struct machine *machine, const char *root_dir, pid_t pid) 24 { 25 map_groups__init(&machine->kmaps, machine); 26 RB_CLEAR_NODE(&machine->rb_node); 27 dsos__init(&machine->user_dsos); 28 dsos__init(&machine->kernel_dsos); 29 30 machine->threads = RB_ROOT; 31 INIT_LIST_HEAD(&machine->dead_threads); 32 machine->last_match = NULL; 33 34 machine->vdso_info = NULL; 35 36 machine->pid = pid; 37 38 machine->symbol_filter = NULL; 39 machine->id_hdr_size = 0; 40 machine->comm_exec = false; 41 machine->kernel_start = 0; 42 43 machine->root_dir = strdup(root_dir); 44 if (machine->root_dir == NULL) 45 return -ENOMEM; 46 47 if (pid != HOST_KERNEL_ID) { 48 struct thread *thread = machine__findnew_thread(machine, -1, 49 pid); 50 char comm[64]; 51 52 if (thread == NULL) 53 return -ENOMEM; 54 55 snprintf(comm, sizeof(comm), "[guest/%d]", pid); 56 thread__set_comm(thread, comm, 0); 57 } 58 59 machine->current_tid = NULL; 60 61 return 0; 62 } 63 64 struct machine *machine__new_host(void) 65 { 66 struct machine *machine = malloc(sizeof(*machine)); 67 68 if (machine != NULL) { 69 machine__init(machine, "", HOST_KERNEL_ID); 70 71 if (machine__create_kernel_maps(machine) < 0) 72 goto out_delete; 73 } 74 75 return machine; 76 out_delete: 77 free(machine); 78 return NULL; 79 } 80 81 static void dsos__delete(struct dsos *dsos) 82 { 83 struct dso *pos, *n; 84 85 list_for_each_entry_safe(pos, n, &dsos->head, node) { 86 RB_CLEAR_NODE(&pos->rb_node); 87 list_del(&pos->node); 88 dso__delete(pos); 89 } 90 } 91 92 void machine__delete_threads(struct machine *machine) 93 { 94 struct rb_node *nd = rb_first(&machine->threads); 95 96 while (nd) { 97 struct thread *t = rb_entry(nd, struct thread, rb_node); 98 99 nd = rb_next(nd); 100 machine__remove_thread(machine, t); 101 } 102 } 103 104 void machine__exit(struct machine *machine) 105 { 106 map_groups__exit(&machine->kmaps); 107 dsos__delete(&machine->user_dsos); 108 dsos__delete(&machine->kernel_dsos); 109 vdso__exit(machine); 110 zfree(&machine->root_dir); 111 zfree(&machine->current_tid); 112 } 113 114 void machine__delete(struct machine *machine) 115 { 116 machine__exit(machine); 117 free(machine); 118 } 119 120 void machines__init(struct machines *machines) 121 { 122 machine__init(&machines->host, "", HOST_KERNEL_ID); 123 machines->guests = RB_ROOT; 124 machines->symbol_filter = NULL; 125 } 126 127 void machines__exit(struct machines *machines) 128 { 129 machine__exit(&machines->host); 130 /* XXX exit guest */ 131 } 132 133 struct machine *machines__add(struct machines *machines, pid_t pid, 134 const char *root_dir) 135 { 136 struct rb_node **p = &machines->guests.rb_node; 137 struct rb_node *parent = NULL; 138 struct machine *pos, *machine = malloc(sizeof(*machine)); 139 140 if (machine == NULL) 141 return NULL; 142 143 if (machine__init(machine, root_dir, pid) != 0) { 144 free(machine); 145 return NULL; 146 } 147 148 machine->symbol_filter = machines->symbol_filter; 149 150 while (*p != NULL) { 151 parent = *p; 152 pos = rb_entry(parent, struct machine, rb_node); 153 if (pid < pos->pid) 154 p = &(*p)->rb_left; 155 else 156 p = &(*p)->rb_right; 157 } 158 159 rb_link_node(&machine->rb_node, parent, p); 160 rb_insert_color(&machine->rb_node, &machines->guests); 161 162 return machine; 163 } 164 165 void machines__set_symbol_filter(struct machines *machines, 166 symbol_filter_t symbol_filter) 167 { 168 struct rb_node *nd; 169 170 machines->symbol_filter = symbol_filter; 171 machines->host.symbol_filter = symbol_filter; 172 173 for (nd = rb_first(&machines->guests); nd; nd = rb_next(nd)) { 174 struct machine *machine = rb_entry(nd, struct machine, rb_node); 175 176 machine->symbol_filter = symbol_filter; 177 } 178 } 179 180 void machines__set_comm_exec(struct machines *machines, bool comm_exec) 181 { 182 struct rb_node *nd; 183 184 machines->host.comm_exec = comm_exec; 185 186 for (nd = rb_first(&machines->guests); nd; nd = rb_next(nd)) { 187 struct machine *machine = rb_entry(nd, struct machine, rb_node); 188 189 machine->comm_exec = comm_exec; 190 } 191 } 192 193 struct machine *machines__find(struct machines *machines, pid_t pid) 194 { 195 struct rb_node **p = &machines->guests.rb_node; 196 struct rb_node *parent = NULL; 197 struct machine *machine; 198 struct machine *default_machine = NULL; 199 200 if (pid == HOST_KERNEL_ID) 201 return &machines->host; 202 203 while (*p != NULL) { 204 parent = *p; 205 machine = rb_entry(parent, struct machine, rb_node); 206 if (pid < machine->pid) 207 p = &(*p)->rb_left; 208 else if (pid > machine->pid) 209 p = &(*p)->rb_right; 210 else 211 return machine; 212 if (!machine->pid) 213 default_machine = machine; 214 } 215 216 return default_machine; 217 } 218 219 struct machine *machines__findnew(struct machines *machines, pid_t pid) 220 { 221 char path[PATH_MAX]; 222 const char *root_dir = ""; 223 struct machine *machine = machines__find(machines, pid); 224 225 if (machine && (machine->pid == pid)) 226 goto out; 227 228 if ((pid != HOST_KERNEL_ID) && 229 (pid != DEFAULT_GUEST_KERNEL_ID) && 230 (symbol_conf.guestmount)) { 231 sprintf(path, "%s/%d", symbol_conf.guestmount, pid); 232 if (access(path, R_OK)) { 233 static struct strlist *seen; 234 235 if (!seen) 236 seen = strlist__new(true, NULL); 237 238 if (!strlist__has_entry(seen, path)) { 239 pr_err("Can't access file %s\n", path); 240 strlist__add(seen, path); 241 } 242 machine = NULL; 243 goto out; 244 } 245 root_dir = path; 246 } 247 248 machine = machines__add(machines, pid, root_dir); 249 out: 250 return machine; 251 } 252 253 void machines__process_guests(struct machines *machines, 254 machine__process_t process, void *data) 255 { 256 struct rb_node *nd; 257 258 for (nd = rb_first(&machines->guests); nd; nd = rb_next(nd)) { 259 struct machine *pos = rb_entry(nd, struct machine, rb_node); 260 process(pos, data); 261 } 262 } 263 264 char *machine__mmap_name(struct machine *machine, char *bf, size_t size) 265 { 266 if (machine__is_host(machine)) 267 snprintf(bf, size, "[%s]", "kernel.kallsyms"); 268 else if (machine__is_default_guest(machine)) 269 snprintf(bf, size, "[%s]", "guest.kernel.kallsyms"); 270 else { 271 snprintf(bf, size, "[%s.%d]", "guest.kernel.kallsyms", 272 machine->pid); 273 } 274 275 return bf; 276 } 277 278 void machines__set_id_hdr_size(struct machines *machines, u16 id_hdr_size) 279 { 280 struct rb_node *node; 281 struct machine *machine; 282 283 machines->host.id_hdr_size = id_hdr_size; 284 285 for (node = rb_first(&machines->guests); node; node = rb_next(node)) { 286 machine = rb_entry(node, struct machine, rb_node); 287 machine->id_hdr_size = id_hdr_size; 288 } 289 290 return; 291 } 292 293 static void machine__update_thread_pid(struct machine *machine, 294 struct thread *th, pid_t pid) 295 { 296 struct thread *leader; 297 298 if (pid == th->pid_ || pid == -1 || th->pid_ != -1) 299 return; 300 301 th->pid_ = pid; 302 303 if (th->pid_ == th->tid) 304 return; 305 306 leader = machine__findnew_thread(machine, th->pid_, th->pid_); 307 if (!leader) 308 goto out_err; 309 310 if (!leader->mg) 311 leader->mg = map_groups__new(machine); 312 313 if (!leader->mg) 314 goto out_err; 315 316 if (th->mg == leader->mg) 317 return; 318 319 if (th->mg) { 320 /* 321 * Maps are created from MMAP events which provide the pid and 322 * tid. Consequently there never should be any maps on a thread 323 * with an unknown pid. Just print an error if there are. 324 */ 325 if (!map_groups__empty(th->mg)) 326 pr_err("Discarding thread maps for %d:%d\n", 327 th->pid_, th->tid); 328 map_groups__delete(th->mg); 329 } 330 331 th->mg = map_groups__get(leader->mg); 332 333 return; 334 335 out_err: 336 pr_err("Failed to join map groups for %d:%d\n", th->pid_, th->tid); 337 } 338 339 static struct thread *__machine__findnew_thread(struct machine *machine, 340 pid_t pid, pid_t tid, 341 bool create) 342 { 343 struct rb_node **p = &machine->threads.rb_node; 344 struct rb_node *parent = NULL; 345 struct thread *th; 346 347 /* 348 * Front-end cache - TID lookups come in blocks, 349 * so most of the time we dont have to look up 350 * the full rbtree: 351 */ 352 th = machine->last_match; 353 if (th != NULL) { 354 if (th->tid == tid) { 355 machine__update_thread_pid(machine, th, pid); 356 return th; 357 } 358 359 thread__zput(machine->last_match); 360 } 361 362 while (*p != NULL) { 363 parent = *p; 364 th = rb_entry(parent, struct thread, rb_node); 365 366 if (th->tid == tid) { 367 machine->last_match = thread__get(th); 368 machine__update_thread_pid(machine, th, pid); 369 return th; 370 } 371 372 if (tid < th->tid) 373 p = &(*p)->rb_left; 374 else 375 p = &(*p)->rb_right; 376 } 377 378 if (!create) 379 return NULL; 380 381 th = thread__new(pid, tid); 382 if (th != NULL) { 383 rb_link_node(&th->rb_node, parent, p); 384 rb_insert_color(&th->rb_node, &machine->threads); 385 386 /* 387 * We have to initialize map_groups separately 388 * after rb tree is updated. 389 * 390 * The reason is that we call machine__findnew_thread 391 * within thread__init_map_groups to find the thread 392 * leader and that would screwed the rb tree. 393 */ 394 if (thread__init_map_groups(th, machine)) { 395 rb_erase(&th->rb_node, &machine->threads); 396 thread__delete(th); 397 return NULL; 398 } 399 /* 400 * It is now in the rbtree, get a ref 401 */ 402 thread__get(th); 403 machine->last_match = thread__get(th); 404 } 405 406 return th; 407 } 408 409 struct thread *machine__findnew_thread(struct machine *machine, pid_t pid, 410 pid_t tid) 411 { 412 return __machine__findnew_thread(machine, pid, tid, true); 413 } 414 415 struct thread *machine__find_thread(struct machine *machine, pid_t pid, 416 pid_t tid) 417 { 418 return __machine__findnew_thread(machine, pid, tid, false); 419 } 420 421 struct comm *machine__thread_exec_comm(struct machine *machine, 422 struct thread *thread) 423 { 424 if (machine->comm_exec) 425 return thread__exec_comm(thread); 426 else 427 return thread__comm(thread); 428 } 429 430 int machine__process_comm_event(struct machine *machine, union perf_event *event, 431 struct perf_sample *sample) 432 { 433 struct thread *thread = machine__findnew_thread(machine, 434 event->comm.pid, 435 event->comm.tid); 436 bool exec = event->header.misc & PERF_RECORD_MISC_COMM_EXEC; 437 438 if (exec) 439 machine->comm_exec = true; 440 441 if (dump_trace) 442 perf_event__fprintf_comm(event, stdout); 443 444 if (thread == NULL || 445 __thread__set_comm(thread, event->comm.comm, sample->time, exec)) { 446 dump_printf("problem processing PERF_RECORD_COMM, skipping event.\n"); 447 return -1; 448 } 449 450 return 0; 451 } 452 453 int machine__process_lost_event(struct machine *machine __maybe_unused, 454 union perf_event *event, struct perf_sample *sample __maybe_unused) 455 { 456 dump_printf(": id:%" PRIu64 ": lost:%" PRIu64 "\n", 457 event->lost.id, event->lost.lost); 458 return 0; 459 } 460 461 static struct dso* 462 machine__module_dso(struct machine *machine, struct kmod_path *m, 463 const char *filename) 464 { 465 struct dso *dso; 466 467 dso = dsos__find(&machine->kernel_dsos, m->name, true); 468 if (!dso) { 469 dso = dsos__addnew(&machine->kernel_dsos, m->name); 470 if (dso == NULL) 471 return NULL; 472 473 if (machine__is_host(machine)) 474 dso->symtab_type = DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE; 475 else 476 dso->symtab_type = DSO_BINARY_TYPE__GUEST_KMODULE; 477 478 /* _KMODULE_COMP should be next to _KMODULE */ 479 if (m->kmod && m->comp) 480 dso->symtab_type++; 481 482 dso__set_short_name(dso, strdup(m->name), true); 483 dso__set_long_name(dso, strdup(filename), true); 484 } 485 486 return dso; 487 } 488 489 struct map *machine__new_module(struct machine *machine, u64 start, 490 const char *filename) 491 { 492 struct map *map = NULL; 493 struct dso *dso; 494 struct kmod_path m; 495 496 if (kmod_path__parse_name(&m, filename)) 497 return NULL; 498 499 map = map_groups__find_by_name(&machine->kmaps, MAP__FUNCTION, 500 m.name); 501 if (map) 502 goto out; 503 504 dso = machine__module_dso(machine, &m, filename); 505 if (dso == NULL) 506 goto out; 507 508 map = map__new2(start, dso, MAP__FUNCTION); 509 if (map == NULL) 510 goto out; 511 512 map_groups__insert(&machine->kmaps, map); 513 514 out: 515 free(m.name); 516 return map; 517 } 518 519 size_t machines__fprintf_dsos(struct machines *machines, FILE *fp) 520 { 521 struct rb_node *nd; 522 size_t ret = __dsos__fprintf(&machines->host.kernel_dsos.head, fp) + 523 __dsos__fprintf(&machines->host.user_dsos.head, fp); 524 525 for (nd = rb_first(&machines->guests); nd; nd = rb_next(nd)) { 526 struct machine *pos = rb_entry(nd, struct machine, rb_node); 527 ret += __dsos__fprintf(&pos->kernel_dsos.head, fp); 528 ret += __dsos__fprintf(&pos->user_dsos.head, fp); 529 } 530 531 return ret; 532 } 533 534 size_t machine__fprintf_dsos_buildid(struct machine *m, FILE *fp, 535 bool (skip)(struct dso *dso, int parm), int parm) 536 { 537 return __dsos__fprintf_buildid(&m->kernel_dsos.head, fp, skip, parm) + 538 __dsos__fprintf_buildid(&m->user_dsos.head, fp, skip, parm); 539 } 540 541 size_t machines__fprintf_dsos_buildid(struct machines *machines, FILE *fp, 542 bool (skip)(struct dso *dso, int parm), int parm) 543 { 544 struct rb_node *nd; 545 size_t ret = machine__fprintf_dsos_buildid(&machines->host, fp, skip, parm); 546 547 for (nd = rb_first(&machines->guests); nd; nd = rb_next(nd)) { 548 struct machine *pos = rb_entry(nd, struct machine, rb_node); 549 ret += machine__fprintf_dsos_buildid(pos, fp, skip, parm); 550 } 551 return ret; 552 } 553 554 size_t machine__fprintf_vmlinux_path(struct machine *machine, FILE *fp) 555 { 556 int i; 557 size_t printed = 0; 558 struct dso *kdso = machine->vmlinux_maps[MAP__FUNCTION]->dso; 559 560 if (kdso->has_build_id) { 561 char filename[PATH_MAX]; 562 if (dso__build_id_filename(kdso, filename, sizeof(filename))) 563 printed += fprintf(fp, "[0] %s\n", filename); 564 } 565 566 for (i = 0; i < vmlinux_path__nr_entries; ++i) 567 printed += fprintf(fp, "[%d] %s\n", 568 i + kdso->has_build_id, vmlinux_path[i]); 569 570 return printed; 571 } 572 573 size_t machine__fprintf(struct machine *machine, FILE *fp) 574 { 575 size_t ret = 0; 576 struct rb_node *nd; 577 578 for (nd = rb_first(&machine->threads); nd; nd = rb_next(nd)) { 579 struct thread *pos = rb_entry(nd, struct thread, rb_node); 580 581 ret += thread__fprintf(pos, fp); 582 } 583 584 return ret; 585 } 586 587 static struct dso *machine__get_kernel(struct machine *machine) 588 { 589 const char *vmlinux_name = NULL; 590 struct dso *kernel; 591 592 if (machine__is_host(machine)) { 593 vmlinux_name = symbol_conf.vmlinux_name; 594 if (!vmlinux_name) 595 vmlinux_name = "[kernel.kallsyms]"; 596 597 kernel = dso__kernel_findnew(machine, vmlinux_name, 598 "[kernel]", 599 DSO_TYPE_KERNEL); 600 } else { 601 char bf[PATH_MAX]; 602 603 if (machine__is_default_guest(machine)) 604 vmlinux_name = symbol_conf.default_guest_vmlinux_name; 605 if (!vmlinux_name) 606 vmlinux_name = machine__mmap_name(machine, bf, 607 sizeof(bf)); 608 609 kernel = dso__kernel_findnew(machine, vmlinux_name, 610 "[guest.kernel]", 611 DSO_TYPE_GUEST_KERNEL); 612 } 613 614 if (kernel != NULL && (!kernel->has_build_id)) 615 dso__read_running_kernel_build_id(kernel, machine); 616 617 return kernel; 618 } 619 620 struct process_args { 621 u64 start; 622 }; 623 624 static void machine__get_kallsyms_filename(struct machine *machine, char *buf, 625 size_t bufsz) 626 { 627 if (machine__is_default_guest(machine)) 628 scnprintf(buf, bufsz, "%s", symbol_conf.default_guest_kallsyms); 629 else 630 scnprintf(buf, bufsz, "%s/proc/kallsyms", machine->root_dir); 631 } 632 633 const char *ref_reloc_sym_names[] = {"_text", "_stext", NULL}; 634 635 /* Figure out the start address of kernel map from /proc/kallsyms. 636 * Returns the name of the start symbol in *symbol_name. Pass in NULL as 637 * symbol_name if it's not that important. 638 */ 639 static u64 machine__get_running_kernel_start(struct machine *machine, 640 const char **symbol_name) 641 { 642 char filename[PATH_MAX]; 643 int i; 644 const char *name; 645 u64 addr = 0; 646 647 machine__get_kallsyms_filename(machine, filename, PATH_MAX); 648 649 if (symbol__restricted_filename(filename, "/proc/kallsyms")) 650 return 0; 651 652 for (i = 0; (name = ref_reloc_sym_names[i]) != NULL; i++) { 653 addr = kallsyms__get_function_start(filename, name); 654 if (addr) 655 break; 656 } 657 658 if (symbol_name) 659 *symbol_name = name; 660 661 return addr; 662 } 663 664 int __machine__create_kernel_maps(struct machine *machine, struct dso *kernel) 665 { 666 enum map_type type; 667 u64 start = machine__get_running_kernel_start(machine, NULL); 668 669 for (type = 0; type < MAP__NR_TYPES; ++type) { 670 struct kmap *kmap; 671 672 machine->vmlinux_maps[type] = map__new2(start, kernel, type); 673 if (machine->vmlinux_maps[type] == NULL) 674 return -1; 675 676 machine->vmlinux_maps[type]->map_ip = 677 machine->vmlinux_maps[type]->unmap_ip = 678 identity__map_ip; 679 kmap = map__kmap(machine->vmlinux_maps[type]); 680 if (!kmap) 681 return -1; 682 683 kmap->kmaps = &machine->kmaps; 684 map_groups__insert(&machine->kmaps, 685 machine->vmlinux_maps[type]); 686 } 687 688 return 0; 689 } 690 691 void machine__destroy_kernel_maps(struct machine *machine) 692 { 693 enum map_type type; 694 695 for (type = 0; type < MAP__NR_TYPES; ++type) { 696 struct kmap *kmap; 697 698 if (machine->vmlinux_maps[type] == NULL) 699 continue; 700 701 kmap = map__kmap(machine->vmlinux_maps[type]); 702 map_groups__remove(&machine->kmaps, 703 machine->vmlinux_maps[type]); 704 if (kmap && kmap->ref_reloc_sym) { 705 /* 706 * ref_reloc_sym is shared among all maps, so free just 707 * on one of them. 708 */ 709 if (type == MAP__FUNCTION) { 710 zfree((char **)&kmap->ref_reloc_sym->name); 711 zfree(&kmap->ref_reloc_sym); 712 } else 713 kmap->ref_reloc_sym = NULL; 714 } 715 716 map__delete(machine->vmlinux_maps[type]); 717 machine->vmlinux_maps[type] = NULL; 718 } 719 } 720 721 int machines__create_guest_kernel_maps(struct machines *machines) 722 { 723 int ret = 0; 724 struct dirent **namelist = NULL; 725 int i, items = 0; 726 char path[PATH_MAX]; 727 pid_t pid; 728 char *endp; 729 730 if (symbol_conf.default_guest_vmlinux_name || 731 symbol_conf.default_guest_modules || 732 symbol_conf.default_guest_kallsyms) { 733 machines__create_kernel_maps(machines, DEFAULT_GUEST_KERNEL_ID); 734 } 735 736 if (symbol_conf.guestmount) { 737 items = scandir(symbol_conf.guestmount, &namelist, NULL, NULL); 738 if (items <= 0) 739 return -ENOENT; 740 for (i = 0; i < items; i++) { 741 if (!isdigit(namelist[i]->d_name[0])) { 742 /* Filter out . and .. */ 743 continue; 744 } 745 pid = (pid_t)strtol(namelist[i]->d_name, &endp, 10); 746 if ((*endp != '\0') || 747 (endp == namelist[i]->d_name) || 748 (errno == ERANGE)) { 749 pr_debug("invalid directory (%s). Skipping.\n", 750 namelist[i]->d_name); 751 continue; 752 } 753 sprintf(path, "%s/%s/proc/kallsyms", 754 symbol_conf.guestmount, 755 namelist[i]->d_name); 756 ret = access(path, R_OK); 757 if (ret) { 758 pr_debug("Can't access file %s\n", path); 759 goto failure; 760 } 761 machines__create_kernel_maps(machines, pid); 762 } 763 failure: 764 free(namelist); 765 } 766 767 return ret; 768 } 769 770 void machines__destroy_kernel_maps(struct machines *machines) 771 { 772 struct rb_node *next = rb_first(&machines->guests); 773 774 machine__destroy_kernel_maps(&machines->host); 775 776 while (next) { 777 struct machine *pos = rb_entry(next, struct machine, rb_node); 778 779 next = rb_next(&pos->rb_node); 780 rb_erase(&pos->rb_node, &machines->guests); 781 machine__delete(pos); 782 } 783 } 784 785 int machines__create_kernel_maps(struct machines *machines, pid_t pid) 786 { 787 struct machine *machine = machines__findnew(machines, pid); 788 789 if (machine == NULL) 790 return -1; 791 792 return machine__create_kernel_maps(machine); 793 } 794 795 int machine__load_kallsyms(struct machine *machine, const char *filename, 796 enum map_type type, symbol_filter_t filter) 797 { 798 struct map *map = machine->vmlinux_maps[type]; 799 int ret = dso__load_kallsyms(map->dso, filename, map, filter); 800 801 if (ret > 0) { 802 dso__set_loaded(map->dso, type); 803 /* 804 * Since /proc/kallsyms will have multiple sessions for the 805 * kernel, with modules between them, fixup the end of all 806 * sections. 807 */ 808 __map_groups__fixup_end(&machine->kmaps, type); 809 } 810 811 return ret; 812 } 813 814 int machine__load_vmlinux_path(struct machine *machine, enum map_type type, 815 symbol_filter_t filter) 816 { 817 struct map *map = machine->vmlinux_maps[type]; 818 int ret = dso__load_vmlinux_path(map->dso, map, filter); 819 820 if (ret > 0) 821 dso__set_loaded(map->dso, type); 822 823 return ret; 824 } 825 826 static void map_groups__fixup_end(struct map_groups *mg) 827 { 828 int i; 829 for (i = 0; i < MAP__NR_TYPES; ++i) 830 __map_groups__fixup_end(mg, i); 831 } 832 833 static char *get_kernel_version(const char *root_dir) 834 { 835 char version[PATH_MAX]; 836 FILE *file; 837 char *name, *tmp; 838 const char *prefix = "Linux version "; 839 840 sprintf(version, "%s/proc/version", root_dir); 841 file = fopen(version, "r"); 842 if (!file) 843 return NULL; 844 845 version[0] = '\0'; 846 tmp = fgets(version, sizeof(version), file); 847 fclose(file); 848 849 name = strstr(version, prefix); 850 if (!name) 851 return NULL; 852 name += strlen(prefix); 853 tmp = strchr(name, ' '); 854 if (tmp) 855 *tmp = '\0'; 856 857 return strdup(name); 858 } 859 860 static bool is_kmod_dso(struct dso *dso) 861 { 862 return dso->symtab_type == DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE || 863 dso->symtab_type == DSO_BINARY_TYPE__GUEST_KMODULE; 864 } 865 866 static int map_groups__set_module_path(struct map_groups *mg, const char *path, 867 struct kmod_path *m) 868 { 869 struct map *map; 870 char *long_name; 871 872 map = map_groups__find_by_name(mg, MAP__FUNCTION, m->name); 873 if (map == NULL) 874 return 0; 875 876 long_name = strdup(path); 877 if (long_name == NULL) 878 return -ENOMEM; 879 880 dso__set_long_name(map->dso, long_name, true); 881 dso__kernel_module_get_build_id(map->dso, ""); 882 883 /* 884 * Full name could reveal us kmod compression, so 885 * we need to update the symtab_type if needed. 886 */ 887 if (m->comp && is_kmod_dso(map->dso)) 888 map->dso->symtab_type++; 889 890 return 0; 891 } 892 893 static int map_groups__set_modules_path_dir(struct map_groups *mg, 894 const char *dir_name, int depth) 895 { 896 struct dirent *dent; 897 DIR *dir = opendir(dir_name); 898 int ret = 0; 899 900 if (!dir) { 901 pr_debug("%s: cannot open %s dir\n", __func__, dir_name); 902 return -1; 903 } 904 905 while ((dent = readdir(dir)) != NULL) { 906 char path[PATH_MAX]; 907 struct stat st; 908 909 /*sshfs might return bad dent->d_type, so we have to stat*/ 910 snprintf(path, sizeof(path), "%s/%s", dir_name, dent->d_name); 911 if (stat(path, &st)) 912 continue; 913 914 if (S_ISDIR(st.st_mode)) { 915 if (!strcmp(dent->d_name, ".") || 916 !strcmp(dent->d_name, "..")) 917 continue; 918 919 /* Do not follow top-level source and build symlinks */ 920 if (depth == 0) { 921 if (!strcmp(dent->d_name, "source") || 922 !strcmp(dent->d_name, "build")) 923 continue; 924 } 925 926 ret = map_groups__set_modules_path_dir(mg, path, 927 depth + 1); 928 if (ret < 0) 929 goto out; 930 } else { 931 struct kmod_path m; 932 933 ret = kmod_path__parse_name(&m, dent->d_name); 934 if (ret) 935 goto out; 936 937 if (m.kmod) 938 ret = map_groups__set_module_path(mg, path, &m); 939 940 free(m.name); 941 942 if (ret) 943 goto out; 944 } 945 } 946 947 out: 948 closedir(dir); 949 return ret; 950 } 951 952 static int machine__set_modules_path(struct machine *machine) 953 { 954 char *version; 955 char modules_path[PATH_MAX]; 956 957 version = get_kernel_version(machine->root_dir); 958 if (!version) 959 return -1; 960 961 snprintf(modules_path, sizeof(modules_path), "%s/lib/modules/%s", 962 machine->root_dir, version); 963 free(version); 964 965 return map_groups__set_modules_path_dir(&machine->kmaps, modules_path, 0); 966 } 967 968 static int machine__create_module(void *arg, const char *name, u64 start) 969 { 970 struct machine *machine = arg; 971 struct map *map; 972 973 map = machine__new_module(machine, start, name); 974 if (map == NULL) 975 return -1; 976 977 dso__kernel_module_get_build_id(map->dso, machine->root_dir); 978 979 return 0; 980 } 981 982 static int machine__create_modules(struct machine *machine) 983 { 984 const char *modules; 985 char path[PATH_MAX]; 986 987 if (machine__is_default_guest(machine)) { 988 modules = symbol_conf.default_guest_modules; 989 } else { 990 snprintf(path, PATH_MAX, "%s/proc/modules", machine->root_dir); 991 modules = path; 992 } 993 994 if (symbol__restricted_filename(modules, "/proc/modules")) 995 return -1; 996 997 if (modules__parse(modules, machine, machine__create_module)) 998 return -1; 999 1000 if (!machine__set_modules_path(machine)) 1001 return 0; 1002 1003 pr_debug("Problems setting modules path maps, continuing anyway...\n"); 1004 1005 return 0; 1006 } 1007 1008 int machine__create_kernel_maps(struct machine *machine) 1009 { 1010 struct dso *kernel = machine__get_kernel(machine); 1011 const char *name; 1012 u64 addr = machine__get_running_kernel_start(machine, &name); 1013 if (!addr) 1014 return -1; 1015 1016 if (kernel == NULL || 1017 __machine__create_kernel_maps(machine, kernel) < 0) 1018 return -1; 1019 1020 if (symbol_conf.use_modules && machine__create_modules(machine) < 0) { 1021 if (machine__is_host(machine)) 1022 pr_debug("Problems creating module maps, " 1023 "continuing anyway...\n"); 1024 else 1025 pr_debug("Problems creating module maps for guest %d, " 1026 "continuing anyway...\n", machine->pid); 1027 } 1028 1029 /* 1030 * Now that we have all the maps created, just set the ->end of them: 1031 */ 1032 map_groups__fixup_end(&machine->kmaps); 1033 1034 if (maps__set_kallsyms_ref_reloc_sym(machine->vmlinux_maps, name, 1035 addr)) { 1036 machine__destroy_kernel_maps(machine); 1037 return -1; 1038 } 1039 1040 return 0; 1041 } 1042 1043 static void machine__set_kernel_mmap_len(struct machine *machine, 1044 union perf_event *event) 1045 { 1046 int i; 1047 1048 for (i = 0; i < MAP__NR_TYPES; i++) { 1049 machine->vmlinux_maps[i]->start = event->mmap.start; 1050 machine->vmlinux_maps[i]->end = (event->mmap.start + 1051 event->mmap.len); 1052 /* 1053 * Be a bit paranoid here, some perf.data file came with 1054 * a zero sized synthesized MMAP event for the kernel. 1055 */ 1056 if (machine->vmlinux_maps[i]->end == 0) 1057 machine->vmlinux_maps[i]->end = ~0ULL; 1058 } 1059 } 1060 1061 static bool machine__uses_kcore(struct machine *machine) 1062 { 1063 struct dso *dso; 1064 1065 list_for_each_entry(dso, &machine->kernel_dsos.head, node) { 1066 if (dso__is_kcore(dso)) 1067 return true; 1068 } 1069 1070 return false; 1071 } 1072 1073 static int machine__process_kernel_mmap_event(struct machine *machine, 1074 union perf_event *event) 1075 { 1076 struct map *map; 1077 char kmmap_prefix[PATH_MAX]; 1078 enum dso_kernel_type kernel_type; 1079 bool is_kernel_mmap; 1080 1081 /* If we have maps from kcore then we do not need or want any others */ 1082 if (machine__uses_kcore(machine)) 1083 return 0; 1084 1085 machine__mmap_name(machine, kmmap_prefix, sizeof(kmmap_prefix)); 1086 if (machine__is_host(machine)) 1087 kernel_type = DSO_TYPE_KERNEL; 1088 else 1089 kernel_type = DSO_TYPE_GUEST_KERNEL; 1090 1091 is_kernel_mmap = memcmp(event->mmap.filename, 1092 kmmap_prefix, 1093 strlen(kmmap_prefix) - 1) == 0; 1094 if (event->mmap.filename[0] == '/' || 1095 (!is_kernel_mmap && event->mmap.filename[0] == '[')) { 1096 map = machine__new_module(machine, event->mmap.start, 1097 event->mmap.filename); 1098 if (map == NULL) 1099 goto out_problem; 1100 1101 map->end = map->start + event->mmap.len; 1102 } else if (is_kernel_mmap) { 1103 const char *symbol_name = (event->mmap.filename + 1104 strlen(kmmap_prefix)); 1105 /* 1106 * Should be there already, from the build-id table in 1107 * the header. 1108 */ 1109 struct dso *kernel = NULL; 1110 struct dso *dso; 1111 1112 list_for_each_entry(dso, &machine->kernel_dsos.head, node) { 1113 if (is_kernel_module(dso->long_name)) 1114 continue; 1115 1116 kernel = dso; 1117 break; 1118 } 1119 1120 if (kernel == NULL) 1121 kernel = __dsos__findnew(&machine->kernel_dsos, 1122 kmmap_prefix); 1123 if (kernel == NULL) 1124 goto out_problem; 1125 1126 kernel->kernel = kernel_type; 1127 if (__machine__create_kernel_maps(machine, kernel) < 0) 1128 goto out_problem; 1129 1130 if (strstr(kernel->long_name, "vmlinux")) 1131 dso__set_short_name(kernel, "[kernel.vmlinux]", false); 1132 1133 machine__set_kernel_mmap_len(machine, event); 1134 1135 /* 1136 * Avoid using a zero address (kptr_restrict) for the ref reloc 1137 * symbol. Effectively having zero here means that at record 1138 * time /proc/sys/kernel/kptr_restrict was non zero. 1139 */ 1140 if (event->mmap.pgoff != 0) { 1141 maps__set_kallsyms_ref_reloc_sym(machine->vmlinux_maps, 1142 symbol_name, 1143 event->mmap.pgoff); 1144 } 1145 1146 if (machine__is_default_guest(machine)) { 1147 /* 1148 * preload dso of guest kernel and modules 1149 */ 1150 dso__load(kernel, machine->vmlinux_maps[MAP__FUNCTION], 1151 NULL); 1152 } 1153 } 1154 return 0; 1155 out_problem: 1156 return -1; 1157 } 1158 1159 int machine__process_mmap2_event(struct machine *machine, 1160 union perf_event *event, 1161 struct perf_sample *sample __maybe_unused) 1162 { 1163 u8 cpumode = event->header.misc & PERF_RECORD_MISC_CPUMODE_MASK; 1164 struct thread *thread; 1165 struct map *map; 1166 enum map_type type; 1167 int ret = 0; 1168 1169 if (dump_trace) 1170 perf_event__fprintf_mmap2(event, stdout); 1171 1172 if (cpumode == PERF_RECORD_MISC_GUEST_KERNEL || 1173 cpumode == PERF_RECORD_MISC_KERNEL) { 1174 ret = machine__process_kernel_mmap_event(machine, event); 1175 if (ret < 0) 1176 goto out_problem; 1177 return 0; 1178 } 1179 1180 thread = machine__findnew_thread(machine, event->mmap2.pid, 1181 event->mmap2.tid); 1182 if (thread == NULL) 1183 goto out_problem; 1184 1185 if (event->header.misc & PERF_RECORD_MISC_MMAP_DATA) 1186 type = MAP__VARIABLE; 1187 else 1188 type = MAP__FUNCTION; 1189 1190 map = map__new(machine, event->mmap2.start, 1191 event->mmap2.len, event->mmap2.pgoff, 1192 event->mmap2.pid, event->mmap2.maj, 1193 event->mmap2.min, event->mmap2.ino, 1194 event->mmap2.ino_generation, 1195 event->mmap2.prot, 1196 event->mmap2.flags, 1197 event->mmap2.filename, type, thread); 1198 1199 if (map == NULL) 1200 goto out_problem; 1201 1202 thread__insert_map(thread, map); 1203 return 0; 1204 1205 out_problem: 1206 dump_printf("problem processing PERF_RECORD_MMAP2, skipping event.\n"); 1207 return 0; 1208 } 1209 1210 int machine__process_mmap_event(struct machine *machine, union perf_event *event, 1211 struct perf_sample *sample __maybe_unused) 1212 { 1213 u8 cpumode = event->header.misc & PERF_RECORD_MISC_CPUMODE_MASK; 1214 struct thread *thread; 1215 struct map *map; 1216 enum map_type type; 1217 int ret = 0; 1218 1219 if (dump_trace) 1220 perf_event__fprintf_mmap(event, stdout); 1221 1222 if (cpumode == PERF_RECORD_MISC_GUEST_KERNEL || 1223 cpumode == PERF_RECORD_MISC_KERNEL) { 1224 ret = machine__process_kernel_mmap_event(machine, event); 1225 if (ret < 0) 1226 goto out_problem; 1227 return 0; 1228 } 1229 1230 thread = machine__findnew_thread(machine, event->mmap.pid, 1231 event->mmap.tid); 1232 if (thread == NULL) 1233 goto out_problem; 1234 1235 if (event->header.misc & PERF_RECORD_MISC_MMAP_DATA) 1236 type = MAP__VARIABLE; 1237 else 1238 type = MAP__FUNCTION; 1239 1240 map = map__new(machine, event->mmap.start, 1241 event->mmap.len, event->mmap.pgoff, 1242 event->mmap.pid, 0, 0, 0, 0, 0, 0, 1243 event->mmap.filename, 1244 type, thread); 1245 1246 if (map == NULL) 1247 goto out_problem; 1248 1249 thread__insert_map(thread, map); 1250 return 0; 1251 1252 out_problem: 1253 dump_printf("problem processing PERF_RECORD_MMAP, skipping event.\n"); 1254 return 0; 1255 } 1256 1257 void machine__remove_thread(struct machine *machine, struct thread *th) 1258 { 1259 if (machine->last_match == th) 1260 thread__zput(machine->last_match); 1261 1262 rb_erase(&th->rb_node, &machine->threads); 1263 /* 1264 * Move it first to the dead_threads list, then drop the reference, 1265 * if this is the last reference, then the thread__delete destructor 1266 * will be called and we will remove it from the dead_threads list. 1267 */ 1268 list_add_tail(&th->node, &machine->dead_threads); 1269 thread__put(th); 1270 } 1271 1272 int machine__process_fork_event(struct machine *machine, union perf_event *event, 1273 struct perf_sample *sample) 1274 { 1275 struct thread *thread = machine__find_thread(machine, 1276 event->fork.pid, 1277 event->fork.tid); 1278 struct thread *parent = machine__findnew_thread(machine, 1279 event->fork.ppid, 1280 event->fork.ptid); 1281 1282 /* if a thread currently exists for the thread id remove it */ 1283 if (thread != NULL) 1284 machine__remove_thread(machine, thread); 1285 1286 thread = machine__findnew_thread(machine, event->fork.pid, 1287 event->fork.tid); 1288 if (dump_trace) 1289 perf_event__fprintf_task(event, stdout); 1290 1291 if (thread == NULL || parent == NULL || 1292 thread__fork(thread, parent, sample->time) < 0) { 1293 dump_printf("problem processing PERF_RECORD_FORK, skipping event.\n"); 1294 return -1; 1295 } 1296 1297 return 0; 1298 } 1299 1300 int machine__process_exit_event(struct machine *machine, union perf_event *event, 1301 struct perf_sample *sample __maybe_unused) 1302 { 1303 struct thread *thread = machine__find_thread(machine, 1304 event->fork.pid, 1305 event->fork.tid); 1306 1307 if (dump_trace) 1308 perf_event__fprintf_task(event, stdout); 1309 1310 if (thread != NULL) 1311 thread__exited(thread); 1312 1313 return 0; 1314 } 1315 1316 int machine__process_event(struct machine *machine, union perf_event *event, 1317 struct perf_sample *sample) 1318 { 1319 int ret; 1320 1321 switch (event->header.type) { 1322 case PERF_RECORD_COMM: 1323 ret = machine__process_comm_event(machine, event, sample); break; 1324 case PERF_RECORD_MMAP: 1325 ret = machine__process_mmap_event(machine, event, sample); break; 1326 case PERF_RECORD_MMAP2: 1327 ret = machine__process_mmap2_event(machine, event, sample); break; 1328 case PERF_RECORD_FORK: 1329 ret = machine__process_fork_event(machine, event, sample); break; 1330 case PERF_RECORD_EXIT: 1331 ret = machine__process_exit_event(machine, event, sample); break; 1332 case PERF_RECORD_LOST: 1333 ret = machine__process_lost_event(machine, event, sample); break; 1334 default: 1335 ret = -1; 1336 break; 1337 } 1338 1339 return ret; 1340 } 1341 1342 static bool symbol__match_regex(struct symbol *sym, regex_t *regex) 1343 { 1344 if (sym->name && !regexec(regex, sym->name, 0, NULL, 0)) 1345 return 1; 1346 return 0; 1347 } 1348 1349 static void ip__resolve_ams(struct thread *thread, 1350 struct addr_map_symbol *ams, 1351 u64 ip) 1352 { 1353 struct addr_location al; 1354 1355 memset(&al, 0, sizeof(al)); 1356 /* 1357 * We cannot use the header.misc hint to determine whether a 1358 * branch stack address is user, kernel, guest, hypervisor. 1359 * Branches may straddle the kernel/user/hypervisor boundaries. 1360 * Thus, we have to try consecutively until we find a match 1361 * or else, the symbol is unknown 1362 */ 1363 thread__find_cpumode_addr_location(thread, MAP__FUNCTION, ip, &al); 1364 1365 ams->addr = ip; 1366 ams->al_addr = al.addr; 1367 ams->sym = al.sym; 1368 ams->map = al.map; 1369 } 1370 1371 static void ip__resolve_data(struct thread *thread, 1372 u8 m, struct addr_map_symbol *ams, u64 addr) 1373 { 1374 struct addr_location al; 1375 1376 memset(&al, 0, sizeof(al)); 1377 1378 thread__find_addr_location(thread, m, MAP__VARIABLE, addr, &al); 1379 if (al.map == NULL) { 1380 /* 1381 * some shared data regions have execute bit set which puts 1382 * their mapping in the MAP__FUNCTION type array. 1383 * Check there as a fallback option before dropping the sample. 1384 */ 1385 thread__find_addr_location(thread, m, MAP__FUNCTION, addr, &al); 1386 } 1387 1388 ams->addr = addr; 1389 ams->al_addr = al.addr; 1390 ams->sym = al.sym; 1391 ams->map = al.map; 1392 } 1393 1394 struct mem_info *sample__resolve_mem(struct perf_sample *sample, 1395 struct addr_location *al) 1396 { 1397 struct mem_info *mi = zalloc(sizeof(*mi)); 1398 1399 if (!mi) 1400 return NULL; 1401 1402 ip__resolve_ams(al->thread, &mi->iaddr, sample->ip); 1403 ip__resolve_data(al->thread, al->cpumode, &mi->daddr, sample->addr); 1404 mi->data_src.val = sample->data_src; 1405 1406 return mi; 1407 } 1408 1409 static int add_callchain_ip(struct thread *thread, 1410 struct symbol **parent, 1411 struct addr_location *root_al, 1412 u8 *cpumode, 1413 u64 ip) 1414 { 1415 struct addr_location al; 1416 1417 al.filtered = 0; 1418 al.sym = NULL; 1419 if (!cpumode) { 1420 thread__find_cpumode_addr_location(thread, MAP__FUNCTION, 1421 ip, &al); 1422 } else { 1423 if (ip >= PERF_CONTEXT_MAX) { 1424 switch (ip) { 1425 case PERF_CONTEXT_HV: 1426 *cpumode = PERF_RECORD_MISC_HYPERVISOR; 1427 break; 1428 case PERF_CONTEXT_KERNEL: 1429 *cpumode = PERF_RECORD_MISC_KERNEL; 1430 break; 1431 case PERF_CONTEXT_USER: 1432 *cpumode = PERF_RECORD_MISC_USER; 1433 break; 1434 default: 1435 pr_debug("invalid callchain context: " 1436 "%"PRId64"\n", (s64) ip); 1437 /* 1438 * It seems the callchain is corrupted. 1439 * Discard all. 1440 */ 1441 callchain_cursor_reset(&callchain_cursor); 1442 return 1; 1443 } 1444 return 0; 1445 } 1446 thread__find_addr_location(thread, *cpumode, MAP__FUNCTION, 1447 ip, &al); 1448 } 1449 1450 if (al.sym != NULL) { 1451 if (sort__has_parent && !*parent && 1452 symbol__match_regex(al.sym, &parent_regex)) 1453 *parent = al.sym; 1454 else if (have_ignore_callees && root_al && 1455 symbol__match_regex(al.sym, &ignore_callees_regex)) { 1456 /* Treat this symbol as the root, 1457 forgetting its callees. */ 1458 *root_al = al; 1459 callchain_cursor_reset(&callchain_cursor); 1460 } 1461 } 1462 1463 return callchain_cursor_append(&callchain_cursor, al.addr, al.map, al.sym); 1464 } 1465 1466 struct branch_info *sample__resolve_bstack(struct perf_sample *sample, 1467 struct addr_location *al) 1468 { 1469 unsigned int i; 1470 const struct branch_stack *bs = sample->branch_stack; 1471 struct branch_info *bi = calloc(bs->nr, sizeof(struct branch_info)); 1472 1473 if (!bi) 1474 return NULL; 1475 1476 for (i = 0; i < bs->nr; i++) { 1477 ip__resolve_ams(al->thread, &bi[i].to, bs->entries[i].to); 1478 ip__resolve_ams(al->thread, &bi[i].from, bs->entries[i].from); 1479 bi[i].flags = bs->entries[i].flags; 1480 } 1481 return bi; 1482 } 1483 1484 #define CHASHSZ 127 1485 #define CHASHBITS 7 1486 #define NO_ENTRY 0xff 1487 1488 #define PERF_MAX_BRANCH_DEPTH 127 1489 1490 /* Remove loops. */ 1491 static int remove_loops(struct branch_entry *l, int nr) 1492 { 1493 int i, j, off; 1494 unsigned char chash[CHASHSZ]; 1495 1496 memset(chash, NO_ENTRY, sizeof(chash)); 1497 1498 BUG_ON(PERF_MAX_BRANCH_DEPTH > 255); 1499 1500 for (i = 0; i < nr; i++) { 1501 int h = hash_64(l[i].from, CHASHBITS) % CHASHSZ; 1502 1503 /* no collision handling for now */ 1504 if (chash[h] == NO_ENTRY) { 1505 chash[h] = i; 1506 } else if (l[chash[h]].from == l[i].from) { 1507 bool is_loop = true; 1508 /* check if it is a real loop */ 1509 off = 0; 1510 for (j = chash[h]; j < i && i + off < nr; j++, off++) 1511 if (l[j].from != l[i + off].from) { 1512 is_loop = false; 1513 break; 1514 } 1515 if (is_loop) { 1516 memmove(l + i, l + i + off, 1517 (nr - (i + off)) * sizeof(*l)); 1518 nr -= off; 1519 } 1520 } 1521 } 1522 return nr; 1523 } 1524 1525 /* 1526 * Recolve LBR callstack chain sample 1527 * Return: 1528 * 1 on success get LBR callchain information 1529 * 0 no available LBR callchain information, should try fp 1530 * negative error code on other errors. 1531 */ 1532 static int resolve_lbr_callchain_sample(struct thread *thread, 1533 struct perf_sample *sample, 1534 struct symbol **parent, 1535 struct addr_location *root_al, 1536 int max_stack) 1537 { 1538 struct ip_callchain *chain = sample->callchain; 1539 int chain_nr = min(max_stack, (int)chain->nr); 1540 u8 cpumode = PERF_RECORD_MISC_USER; 1541 int i, j, err; 1542 u64 ip; 1543 1544 for (i = 0; i < chain_nr; i++) { 1545 if (chain->ips[i] == PERF_CONTEXT_USER) 1546 break; 1547 } 1548 1549 /* LBR only affects the user callchain */ 1550 if (i != chain_nr) { 1551 struct branch_stack *lbr_stack = sample->branch_stack; 1552 int lbr_nr = lbr_stack->nr; 1553 /* 1554 * LBR callstack can only get user call chain. 1555 * The mix_chain_nr is kernel call chain 1556 * number plus LBR user call chain number. 1557 * i is kernel call chain number, 1558 * 1 is PERF_CONTEXT_USER, 1559 * lbr_nr + 1 is the user call chain number. 1560 * For details, please refer to the comments 1561 * in callchain__printf 1562 */ 1563 int mix_chain_nr = i + 1 + lbr_nr + 1; 1564 1565 if (mix_chain_nr > PERF_MAX_STACK_DEPTH + PERF_MAX_BRANCH_DEPTH) { 1566 pr_warning("corrupted callchain. skipping...\n"); 1567 return 0; 1568 } 1569 1570 for (j = 0; j < mix_chain_nr; j++) { 1571 if (callchain_param.order == ORDER_CALLEE) { 1572 if (j < i + 1) 1573 ip = chain->ips[j]; 1574 else if (j > i + 1) 1575 ip = lbr_stack->entries[j - i - 2].from; 1576 else 1577 ip = lbr_stack->entries[0].to; 1578 } else { 1579 if (j < lbr_nr) 1580 ip = lbr_stack->entries[lbr_nr - j - 1].from; 1581 else if (j > lbr_nr) 1582 ip = chain->ips[i + 1 - (j - lbr_nr)]; 1583 else 1584 ip = lbr_stack->entries[0].to; 1585 } 1586 1587 err = add_callchain_ip(thread, parent, root_al, &cpumode, ip); 1588 if (err) 1589 return (err < 0) ? err : 0; 1590 } 1591 return 1; 1592 } 1593 1594 return 0; 1595 } 1596 1597 static int thread__resolve_callchain_sample(struct thread *thread, 1598 struct perf_evsel *evsel, 1599 struct perf_sample *sample, 1600 struct symbol **parent, 1601 struct addr_location *root_al, 1602 int max_stack) 1603 { 1604 struct branch_stack *branch = sample->branch_stack; 1605 struct ip_callchain *chain = sample->callchain; 1606 int chain_nr = min(max_stack, (int)chain->nr); 1607 u8 cpumode = PERF_RECORD_MISC_USER; 1608 int i, j, err; 1609 int skip_idx = -1; 1610 int first_call = 0; 1611 1612 callchain_cursor_reset(&callchain_cursor); 1613 1614 if (has_branch_callstack(evsel)) { 1615 err = resolve_lbr_callchain_sample(thread, sample, parent, 1616 root_al, max_stack); 1617 if (err) 1618 return (err < 0) ? err : 0; 1619 } 1620 1621 /* 1622 * Based on DWARF debug information, some architectures skip 1623 * a callchain entry saved by the kernel. 1624 */ 1625 if (chain->nr < PERF_MAX_STACK_DEPTH) 1626 skip_idx = arch_skip_callchain_idx(thread, chain); 1627 1628 /* 1629 * Add branches to call stack for easier browsing. This gives 1630 * more context for a sample than just the callers. 1631 * 1632 * This uses individual histograms of paths compared to the 1633 * aggregated histograms the normal LBR mode uses. 1634 * 1635 * Limitations for now: 1636 * - No extra filters 1637 * - No annotations (should annotate somehow) 1638 */ 1639 1640 if (branch && callchain_param.branch_callstack) { 1641 int nr = min(max_stack, (int)branch->nr); 1642 struct branch_entry be[nr]; 1643 1644 if (branch->nr > PERF_MAX_BRANCH_DEPTH) { 1645 pr_warning("corrupted branch chain. skipping...\n"); 1646 goto check_calls; 1647 } 1648 1649 for (i = 0; i < nr; i++) { 1650 if (callchain_param.order == ORDER_CALLEE) { 1651 be[i] = branch->entries[i]; 1652 /* 1653 * Check for overlap into the callchain. 1654 * The return address is one off compared to 1655 * the branch entry. To adjust for this 1656 * assume the calling instruction is not longer 1657 * than 8 bytes. 1658 */ 1659 if (i == skip_idx || 1660 chain->ips[first_call] >= PERF_CONTEXT_MAX) 1661 first_call++; 1662 else if (be[i].from < chain->ips[first_call] && 1663 be[i].from >= chain->ips[first_call] - 8) 1664 first_call++; 1665 } else 1666 be[i] = branch->entries[branch->nr - i - 1]; 1667 } 1668 1669 nr = remove_loops(be, nr); 1670 1671 for (i = 0; i < nr; i++) { 1672 err = add_callchain_ip(thread, parent, root_al, 1673 NULL, be[i].to); 1674 if (!err) 1675 err = add_callchain_ip(thread, parent, root_al, 1676 NULL, be[i].from); 1677 if (err == -EINVAL) 1678 break; 1679 if (err) 1680 return err; 1681 } 1682 chain_nr -= nr; 1683 } 1684 1685 check_calls: 1686 if (chain->nr > PERF_MAX_STACK_DEPTH) { 1687 pr_warning("corrupted callchain. skipping...\n"); 1688 return 0; 1689 } 1690 1691 for (i = first_call; i < chain_nr; i++) { 1692 u64 ip; 1693 1694 if (callchain_param.order == ORDER_CALLEE) 1695 j = i; 1696 else 1697 j = chain->nr - i - 1; 1698 1699 #ifdef HAVE_SKIP_CALLCHAIN_IDX 1700 if (j == skip_idx) 1701 continue; 1702 #endif 1703 ip = chain->ips[j]; 1704 1705 err = add_callchain_ip(thread, parent, root_al, &cpumode, ip); 1706 1707 if (err) 1708 return (err < 0) ? err : 0; 1709 } 1710 1711 return 0; 1712 } 1713 1714 static int unwind_entry(struct unwind_entry *entry, void *arg) 1715 { 1716 struct callchain_cursor *cursor = arg; 1717 return callchain_cursor_append(cursor, entry->ip, 1718 entry->map, entry->sym); 1719 } 1720 1721 int thread__resolve_callchain(struct thread *thread, 1722 struct perf_evsel *evsel, 1723 struct perf_sample *sample, 1724 struct symbol **parent, 1725 struct addr_location *root_al, 1726 int max_stack) 1727 { 1728 int ret = thread__resolve_callchain_sample(thread, evsel, 1729 sample, parent, 1730 root_al, max_stack); 1731 if (ret) 1732 return ret; 1733 1734 /* Can we do dwarf post unwind? */ 1735 if (!((evsel->attr.sample_type & PERF_SAMPLE_REGS_USER) && 1736 (evsel->attr.sample_type & PERF_SAMPLE_STACK_USER))) 1737 return 0; 1738 1739 /* Bail out if nothing was captured. */ 1740 if ((!sample->user_regs.regs) || 1741 (!sample->user_stack.size)) 1742 return 0; 1743 1744 return unwind__get_entries(unwind_entry, &callchain_cursor, 1745 thread, sample, max_stack); 1746 1747 } 1748 1749 int machine__for_each_thread(struct machine *machine, 1750 int (*fn)(struct thread *thread, void *p), 1751 void *priv) 1752 { 1753 struct rb_node *nd; 1754 struct thread *thread; 1755 int rc = 0; 1756 1757 for (nd = rb_first(&machine->threads); nd; nd = rb_next(nd)) { 1758 thread = rb_entry(nd, struct thread, rb_node); 1759 rc = fn(thread, priv); 1760 if (rc != 0) 1761 return rc; 1762 } 1763 1764 list_for_each_entry(thread, &machine->dead_threads, node) { 1765 rc = fn(thread, priv); 1766 if (rc != 0) 1767 return rc; 1768 } 1769 return rc; 1770 } 1771 1772 int __machine__synthesize_threads(struct machine *machine, struct perf_tool *tool, 1773 struct target *target, struct thread_map *threads, 1774 perf_event__handler_t process, bool data_mmap) 1775 { 1776 if (target__has_task(target)) 1777 return perf_event__synthesize_thread_map(tool, threads, process, machine, data_mmap); 1778 else if (target__has_cpu(target)) 1779 return perf_event__synthesize_threads(tool, process, machine, data_mmap); 1780 /* command specified */ 1781 return 0; 1782 } 1783 1784 pid_t machine__get_current_tid(struct machine *machine, int cpu) 1785 { 1786 if (cpu < 0 || cpu >= MAX_NR_CPUS || !machine->current_tid) 1787 return -1; 1788 1789 return machine->current_tid[cpu]; 1790 } 1791 1792 int machine__set_current_tid(struct machine *machine, int cpu, pid_t pid, 1793 pid_t tid) 1794 { 1795 struct thread *thread; 1796 1797 if (cpu < 0) 1798 return -EINVAL; 1799 1800 if (!machine->current_tid) { 1801 int i; 1802 1803 machine->current_tid = calloc(MAX_NR_CPUS, sizeof(pid_t)); 1804 if (!machine->current_tid) 1805 return -ENOMEM; 1806 for (i = 0; i < MAX_NR_CPUS; i++) 1807 machine->current_tid[i] = -1; 1808 } 1809 1810 if (cpu >= MAX_NR_CPUS) { 1811 pr_err("Requested CPU %d too large. ", cpu); 1812 pr_err("Consider raising MAX_NR_CPUS\n"); 1813 return -EINVAL; 1814 } 1815 1816 machine->current_tid[cpu] = tid; 1817 1818 thread = machine__findnew_thread(machine, pid, tid); 1819 if (!thread) 1820 return -ENOMEM; 1821 1822 thread->cpu = cpu; 1823 1824 return 0; 1825 } 1826 1827 int machine__get_kernel_start(struct machine *machine) 1828 { 1829 struct map *map = machine__kernel_map(machine, MAP__FUNCTION); 1830 int err = 0; 1831 1832 /* 1833 * The only addresses above 2^63 are kernel addresses of a 64-bit 1834 * kernel. Note that addresses are unsigned so that on a 32-bit system 1835 * all addresses including kernel addresses are less than 2^32. In 1836 * that case (32-bit system), if the kernel mapping is unknown, all 1837 * addresses will be assumed to be in user space - see 1838 * machine__kernel_ip(). 1839 */ 1840 machine->kernel_start = 1ULL << 63; 1841 if (map) { 1842 err = map__load(map, machine->symbol_filter); 1843 if (map->start) 1844 machine->kernel_start = map->start; 1845 } 1846 return err; 1847 } 1848