1 #include "callchain.h" 2 #include "debug.h" 3 #include "event.h" 4 #include "evsel.h" 5 #include "hist.h" 6 #include "machine.h" 7 #include "map.h" 8 #include "sort.h" 9 #include "strlist.h" 10 #include "thread.h" 11 #include "vdso.h" 12 #include <stdbool.h> 13 #include <symbol/kallsyms.h> 14 #include "unwind.h" 15 #include "linux/hash.h" 16 17 static void __machine__remove_thread(struct machine *machine, struct thread *th, bool lock); 18 19 static void dsos__init(struct dsos *dsos) 20 { 21 INIT_LIST_HEAD(&dsos->head); 22 dsos->root = RB_ROOT; 23 pthread_rwlock_init(&dsos->lock, NULL); 24 } 25 26 int machine__init(struct machine *machine, const char *root_dir, pid_t pid) 27 { 28 memset(machine, 0, sizeof(*machine)); 29 map_groups__init(&machine->kmaps, machine); 30 RB_CLEAR_NODE(&machine->rb_node); 31 dsos__init(&machine->dsos); 32 33 machine->threads = RB_ROOT; 34 pthread_rwlock_init(&machine->threads_lock, NULL); 35 INIT_LIST_HEAD(&machine->dead_threads); 36 machine->last_match = NULL; 37 38 machine->vdso_info = NULL; 39 machine->env = NULL; 40 41 machine->pid = pid; 42 43 machine->symbol_filter = NULL; 44 machine->id_hdr_size = 0; 45 machine->comm_exec = false; 46 machine->kernel_start = 0; 47 48 memset(machine->vmlinux_maps, 0, sizeof(machine->vmlinux_maps)); 49 50 machine->root_dir = strdup(root_dir); 51 if (machine->root_dir == NULL) 52 return -ENOMEM; 53 54 if (pid != HOST_KERNEL_ID) { 55 struct thread *thread = machine__findnew_thread(machine, -1, 56 pid); 57 char comm[64]; 58 59 if (thread == NULL) 60 return -ENOMEM; 61 62 snprintf(comm, sizeof(comm), "[guest/%d]", pid); 63 thread__set_comm(thread, comm, 0); 64 thread__put(thread); 65 } 66 67 machine->current_tid = NULL; 68 69 return 0; 70 } 71 72 struct machine *machine__new_host(void) 73 { 74 struct machine *machine = malloc(sizeof(*machine)); 75 76 if (machine != NULL) { 77 machine__init(machine, "", HOST_KERNEL_ID); 78 79 if (machine__create_kernel_maps(machine) < 0) 80 goto out_delete; 81 } 82 83 return machine; 84 out_delete: 85 free(machine); 86 return NULL; 87 } 88 89 static void dsos__purge(struct dsos *dsos) 90 { 91 struct dso *pos, *n; 92 93 pthread_rwlock_wrlock(&dsos->lock); 94 95 list_for_each_entry_safe(pos, n, &dsos->head, node) { 96 RB_CLEAR_NODE(&pos->rb_node); 97 pos->root = NULL; 98 list_del_init(&pos->node); 99 dso__put(pos); 100 } 101 102 pthread_rwlock_unlock(&dsos->lock); 103 } 104 105 static void dsos__exit(struct dsos *dsos) 106 { 107 dsos__purge(dsos); 108 pthread_rwlock_destroy(&dsos->lock); 109 } 110 111 void machine__delete_threads(struct machine *machine) 112 { 113 struct rb_node *nd; 114 115 pthread_rwlock_wrlock(&machine->threads_lock); 116 nd = rb_first(&machine->threads); 117 while (nd) { 118 struct thread *t = rb_entry(nd, struct thread, rb_node); 119 120 nd = rb_next(nd); 121 __machine__remove_thread(machine, t, false); 122 } 123 pthread_rwlock_unlock(&machine->threads_lock); 124 } 125 126 void machine__exit(struct machine *machine) 127 { 128 machine__destroy_kernel_maps(machine); 129 map_groups__exit(&machine->kmaps); 130 dsos__exit(&machine->dsos); 131 machine__exit_vdso(machine); 132 zfree(&machine->root_dir); 133 zfree(&machine->current_tid); 134 pthread_rwlock_destroy(&machine->threads_lock); 135 } 136 137 void machine__delete(struct machine *machine) 138 { 139 machine__exit(machine); 140 free(machine); 141 } 142 143 void machines__init(struct machines *machines) 144 { 145 machine__init(&machines->host, "", HOST_KERNEL_ID); 146 machines->guests = RB_ROOT; 147 machines->symbol_filter = NULL; 148 } 149 150 void machines__exit(struct machines *machines) 151 { 152 machine__exit(&machines->host); 153 /* XXX exit guest */ 154 } 155 156 struct machine *machines__add(struct machines *machines, pid_t pid, 157 const char *root_dir) 158 { 159 struct rb_node **p = &machines->guests.rb_node; 160 struct rb_node *parent = NULL; 161 struct machine *pos, *machine = malloc(sizeof(*machine)); 162 163 if (machine == NULL) 164 return NULL; 165 166 if (machine__init(machine, root_dir, pid) != 0) { 167 free(machine); 168 return NULL; 169 } 170 171 machine->symbol_filter = machines->symbol_filter; 172 173 while (*p != NULL) { 174 parent = *p; 175 pos = rb_entry(parent, struct machine, rb_node); 176 if (pid < pos->pid) 177 p = &(*p)->rb_left; 178 else 179 p = &(*p)->rb_right; 180 } 181 182 rb_link_node(&machine->rb_node, parent, p); 183 rb_insert_color(&machine->rb_node, &machines->guests); 184 185 return machine; 186 } 187 188 void machines__set_symbol_filter(struct machines *machines, 189 symbol_filter_t symbol_filter) 190 { 191 struct rb_node *nd; 192 193 machines->symbol_filter = symbol_filter; 194 machines->host.symbol_filter = symbol_filter; 195 196 for (nd = rb_first(&machines->guests); nd; nd = rb_next(nd)) { 197 struct machine *machine = rb_entry(nd, struct machine, rb_node); 198 199 machine->symbol_filter = symbol_filter; 200 } 201 } 202 203 void machines__set_comm_exec(struct machines *machines, bool comm_exec) 204 { 205 struct rb_node *nd; 206 207 machines->host.comm_exec = comm_exec; 208 209 for (nd = rb_first(&machines->guests); nd; nd = rb_next(nd)) { 210 struct machine *machine = rb_entry(nd, struct machine, rb_node); 211 212 machine->comm_exec = comm_exec; 213 } 214 } 215 216 struct machine *machines__find(struct machines *machines, pid_t pid) 217 { 218 struct rb_node **p = &machines->guests.rb_node; 219 struct rb_node *parent = NULL; 220 struct machine *machine; 221 struct machine *default_machine = NULL; 222 223 if (pid == HOST_KERNEL_ID) 224 return &machines->host; 225 226 while (*p != NULL) { 227 parent = *p; 228 machine = rb_entry(parent, struct machine, rb_node); 229 if (pid < machine->pid) 230 p = &(*p)->rb_left; 231 else if (pid > machine->pid) 232 p = &(*p)->rb_right; 233 else 234 return machine; 235 if (!machine->pid) 236 default_machine = machine; 237 } 238 239 return default_machine; 240 } 241 242 struct machine *machines__findnew(struct machines *machines, pid_t pid) 243 { 244 char path[PATH_MAX]; 245 const char *root_dir = ""; 246 struct machine *machine = machines__find(machines, pid); 247 248 if (machine && (machine->pid == pid)) 249 goto out; 250 251 if ((pid != HOST_KERNEL_ID) && 252 (pid != DEFAULT_GUEST_KERNEL_ID) && 253 (symbol_conf.guestmount)) { 254 sprintf(path, "%s/%d", symbol_conf.guestmount, pid); 255 if (access(path, R_OK)) { 256 static struct strlist *seen; 257 258 if (!seen) 259 seen = strlist__new(NULL, NULL); 260 261 if (!strlist__has_entry(seen, path)) { 262 pr_err("Can't access file %s\n", path); 263 strlist__add(seen, path); 264 } 265 machine = NULL; 266 goto out; 267 } 268 root_dir = path; 269 } 270 271 machine = machines__add(machines, pid, root_dir); 272 out: 273 return machine; 274 } 275 276 void machines__process_guests(struct machines *machines, 277 machine__process_t process, void *data) 278 { 279 struct rb_node *nd; 280 281 for (nd = rb_first(&machines->guests); nd; nd = rb_next(nd)) { 282 struct machine *pos = rb_entry(nd, struct machine, rb_node); 283 process(pos, data); 284 } 285 } 286 287 char *machine__mmap_name(struct machine *machine, char *bf, size_t size) 288 { 289 if (machine__is_host(machine)) 290 snprintf(bf, size, "[%s]", "kernel.kallsyms"); 291 else if (machine__is_default_guest(machine)) 292 snprintf(bf, size, "[%s]", "guest.kernel.kallsyms"); 293 else { 294 snprintf(bf, size, "[%s.%d]", "guest.kernel.kallsyms", 295 machine->pid); 296 } 297 298 return bf; 299 } 300 301 void machines__set_id_hdr_size(struct machines *machines, u16 id_hdr_size) 302 { 303 struct rb_node *node; 304 struct machine *machine; 305 306 machines->host.id_hdr_size = id_hdr_size; 307 308 for (node = rb_first(&machines->guests); node; node = rb_next(node)) { 309 machine = rb_entry(node, struct machine, rb_node); 310 machine->id_hdr_size = id_hdr_size; 311 } 312 313 return; 314 } 315 316 static void machine__update_thread_pid(struct machine *machine, 317 struct thread *th, pid_t pid) 318 { 319 struct thread *leader; 320 321 if (pid == th->pid_ || pid == -1 || th->pid_ != -1) 322 return; 323 324 th->pid_ = pid; 325 326 if (th->pid_ == th->tid) 327 return; 328 329 leader = __machine__findnew_thread(machine, th->pid_, th->pid_); 330 if (!leader) 331 goto out_err; 332 333 if (!leader->mg) 334 leader->mg = map_groups__new(machine); 335 336 if (!leader->mg) 337 goto out_err; 338 339 if (th->mg == leader->mg) 340 return; 341 342 if (th->mg) { 343 /* 344 * Maps are created from MMAP events which provide the pid and 345 * tid. Consequently there never should be any maps on a thread 346 * with an unknown pid. Just print an error if there are. 347 */ 348 if (!map_groups__empty(th->mg)) 349 pr_err("Discarding thread maps for %d:%d\n", 350 th->pid_, th->tid); 351 map_groups__put(th->mg); 352 } 353 354 th->mg = map_groups__get(leader->mg); 355 out_put: 356 thread__put(leader); 357 return; 358 out_err: 359 pr_err("Failed to join map groups for %d:%d\n", th->pid_, th->tid); 360 goto out_put; 361 } 362 363 /* 364 * Caller must eventually drop thread->refcnt returned with a successfull 365 * lookup/new thread inserted. 366 */ 367 static struct thread *____machine__findnew_thread(struct machine *machine, 368 pid_t pid, pid_t tid, 369 bool create) 370 { 371 struct rb_node **p = &machine->threads.rb_node; 372 struct rb_node *parent = NULL; 373 struct thread *th; 374 375 /* 376 * Front-end cache - TID lookups come in blocks, 377 * so most of the time we dont have to look up 378 * the full rbtree: 379 */ 380 th = machine->last_match; 381 if (th != NULL) { 382 if (th->tid == tid) { 383 machine__update_thread_pid(machine, th, pid); 384 return thread__get(th); 385 } 386 387 machine->last_match = NULL; 388 } 389 390 while (*p != NULL) { 391 parent = *p; 392 th = rb_entry(parent, struct thread, rb_node); 393 394 if (th->tid == tid) { 395 machine->last_match = th; 396 machine__update_thread_pid(machine, th, pid); 397 return thread__get(th); 398 } 399 400 if (tid < th->tid) 401 p = &(*p)->rb_left; 402 else 403 p = &(*p)->rb_right; 404 } 405 406 if (!create) 407 return NULL; 408 409 th = thread__new(pid, tid); 410 if (th != NULL) { 411 rb_link_node(&th->rb_node, parent, p); 412 rb_insert_color(&th->rb_node, &machine->threads); 413 414 /* 415 * We have to initialize map_groups separately 416 * after rb tree is updated. 417 * 418 * The reason is that we call machine__findnew_thread 419 * within thread__init_map_groups to find the thread 420 * leader and that would screwed the rb tree. 421 */ 422 if (thread__init_map_groups(th, machine)) { 423 rb_erase_init(&th->rb_node, &machine->threads); 424 RB_CLEAR_NODE(&th->rb_node); 425 thread__put(th); 426 return NULL; 427 } 428 /* 429 * It is now in the rbtree, get a ref 430 */ 431 thread__get(th); 432 machine->last_match = th; 433 } 434 435 return th; 436 } 437 438 struct thread *__machine__findnew_thread(struct machine *machine, pid_t pid, pid_t tid) 439 { 440 return ____machine__findnew_thread(machine, pid, tid, true); 441 } 442 443 struct thread *machine__findnew_thread(struct machine *machine, pid_t pid, 444 pid_t tid) 445 { 446 struct thread *th; 447 448 pthread_rwlock_wrlock(&machine->threads_lock); 449 th = __machine__findnew_thread(machine, pid, tid); 450 pthread_rwlock_unlock(&machine->threads_lock); 451 return th; 452 } 453 454 struct thread *machine__find_thread(struct machine *machine, pid_t pid, 455 pid_t tid) 456 { 457 struct thread *th; 458 pthread_rwlock_rdlock(&machine->threads_lock); 459 th = ____machine__findnew_thread(machine, pid, tid, false); 460 pthread_rwlock_unlock(&machine->threads_lock); 461 return th; 462 } 463 464 struct comm *machine__thread_exec_comm(struct machine *machine, 465 struct thread *thread) 466 { 467 if (machine->comm_exec) 468 return thread__exec_comm(thread); 469 else 470 return thread__comm(thread); 471 } 472 473 int machine__process_comm_event(struct machine *machine, union perf_event *event, 474 struct perf_sample *sample) 475 { 476 struct thread *thread = machine__findnew_thread(machine, 477 event->comm.pid, 478 event->comm.tid); 479 bool exec = event->header.misc & PERF_RECORD_MISC_COMM_EXEC; 480 int err = 0; 481 482 if (exec) 483 machine->comm_exec = true; 484 485 if (dump_trace) 486 perf_event__fprintf_comm(event, stdout); 487 488 if (thread == NULL || 489 __thread__set_comm(thread, event->comm.comm, sample->time, exec)) { 490 dump_printf("problem processing PERF_RECORD_COMM, skipping event.\n"); 491 err = -1; 492 } 493 494 thread__put(thread); 495 496 return err; 497 } 498 499 int machine__process_lost_event(struct machine *machine __maybe_unused, 500 union perf_event *event, struct perf_sample *sample __maybe_unused) 501 { 502 dump_printf(": id:%" PRIu64 ": lost:%" PRIu64 "\n", 503 event->lost.id, event->lost.lost); 504 return 0; 505 } 506 507 int machine__process_lost_samples_event(struct machine *machine __maybe_unused, 508 union perf_event *event, struct perf_sample *sample) 509 { 510 dump_printf(": id:%" PRIu64 ": lost samples :%" PRIu64 "\n", 511 sample->id, event->lost_samples.lost); 512 return 0; 513 } 514 515 static struct dso *machine__findnew_module_dso(struct machine *machine, 516 struct kmod_path *m, 517 const char *filename) 518 { 519 struct dso *dso; 520 521 pthread_rwlock_wrlock(&machine->dsos.lock); 522 523 dso = __dsos__find(&machine->dsos, m->name, true); 524 if (!dso) { 525 dso = __dsos__addnew(&machine->dsos, m->name); 526 if (dso == NULL) 527 goto out_unlock; 528 529 if (machine__is_host(machine)) 530 dso->symtab_type = DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE; 531 else 532 dso->symtab_type = DSO_BINARY_TYPE__GUEST_KMODULE; 533 534 /* _KMODULE_COMP should be next to _KMODULE */ 535 if (m->kmod && m->comp) 536 dso->symtab_type++; 537 538 dso__set_short_name(dso, strdup(m->name), true); 539 dso__set_long_name(dso, strdup(filename), true); 540 } 541 542 dso__get(dso); 543 out_unlock: 544 pthread_rwlock_unlock(&machine->dsos.lock); 545 return dso; 546 } 547 548 int machine__process_aux_event(struct machine *machine __maybe_unused, 549 union perf_event *event) 550 { 551 if (dump_trace) 552 perf_event__fprintf_aux(event, stdout); 553 return 0; 554 } 555 556 int machine__process_itrace_start_event(struct machine *machine __maybe_unused, 557 union perf_event *event) 558 { 559 if (dump_trace) 560 perf_event__fprintf_itrace_start(event, stdout); 561 return 0; 562 } 563 564 int machine__process_switch_event(struct machine *machine __maybe_unused, 565 union perf_event *event) 566 { 567 if (dump_trace) 568 perf_event__fprintf_switch(event, stdout); 569 return 0; 570 } 571 572 static void dso__adjust_kmod_long_name(struct dso *dso, const char *filename) 573 { 574 const char *dup_filename; 575 576 if (!filename || !dso || !dso->long_name) 577 return; 578 if (dso->long_name[0] != '[') 579 return; 580 if (!strchr(filename, '/')) 581 return; 582 583 dup_filename = strdup(filename); 584 if (!dup_filename) 585 return; 586 587 dso__set_long_name(dso, dup_filename, true); 588 } 589 590 struct map *machine__findnew_module_map(struct machine *machine, u64 start, 591 const char *filename) 592 { 593 struct map *map = NULL; 594 struct dso *dso = NULL; 595 struct kmod_path m; 596 597 if (kmod_path__parse_name(&m, filename)) 598 return NULL; 599 600 map = map_groups__find_by_name(&machine->kmaps, MAP__FUNCTION, 601 m.name); 602 if (map) { 603 /* 604 * If the map's dso is an offline module, give dso__load() 605 * a chance to find the file path of that module by fixing 606 * long_name. 607 */ 608 dso__adjust_kmod_long_name(map->dso, filename); 609 goto out; 610 } 611 612 dso = machine__findnew_module_dso(machine, &m, filename); 613 if (dso == NULL) 614 goto out; 615 616 map = map__new2(start, dso, MAP__FUNCTION); 617 if (map == NULL) 618 goto out; 619 620 map_groups__insert(&machine->kmaps, map); 621 622 /* Put the map here because map_groups__insert alread got it */ 623 map__put(map); 624 out: 625 /* put the dso here, corresponding to machine__findnew_module_dso */ 626 dso__put(dso); 627 free(m.name); 628 return map; 629 } 630 631 size_t machines__fprintf_dsos(struct machines *machines, FILE *fp) 632 { 633 struct rb_node *nd; 634 size_t ret = __dsos__fprintf(&machines->host.dsos.head, fp); 635 636 for (nd = rb_first(&machines->guests); nd; nd = rb_next(nd)) { 637 struct machine *pos = rb_entry(nd, struct machine, rb_node); 638 ret += __dsos__fprintf(&pos->dsos.head, fp); 639 } 640 641 return ret; 642 } 643 644 size_t machine__fprintf_dsos_buildid(struct machine *m, FILE *fp, 645 bool (skip)(struct dso *dso, int parm), int parm) 646 { 647 return __dsos__fprintf_buildid(&m->dsos.head, fp, skip, parm); 648 } 649 650 size_t machines__fprintf_dsos_buildid(struct machines *machines, FILE *fp, 651 bool (skip)(struct dso *dso, int parm), int parm) 652 { 653 struct rb_node *nd; 654 size_t ret = machine__fprintf_dsos_buildid(&machines->host, fp, skip, parm); 655 656 for (nd = rb_first(&machines->guests); nd; nd = rb_next(nd)) { 657 struct machine *pos = rb_entry(nd, struct machine, rb_node); 658 ret += machine__fprintf_dsos_buildid(pos, fp, skip, parm); 659 } 660 return ret; 661 } 662 663 size_t machine__fprintf_vmlinux_path(struct machine *machine, FILE *fp) 664 { 665 int i; 666 size_t printed = 0; 667 struct dso *kdso = machine__kernel_map(machine)->dso; 668 669 if (kdso->has_build_id) { 670 char filename[PATH_MAX]; 671 if (dso__build_id_filename(kdso, filename, sizeof(filename))) 672 printed += fprintf(fp, "[0] %s\n", filename); 673 } 674 675 for (i = 0; i < vmlinux_path__nr_entries; ++i) 676 printed += fprintf(fp, "[%d] %s\n", 677 i + kdso->has_build_id, vmlinux_path[i]); 678 679 return printed; 680 } 681 682 size_t machine__fprintf(struct machine *machine, FILE *fp) 683 { 684 size_t ret = 0; 685 struct rb_node *nd; 686 687 pthread_rwlock_rdlock(&machine->threads_lock); 688 689 for (nd = rb_first(&machine->threads); nd; nd = rb_next(nd)) { 690 struct thread *pos = rb_entry(nd, struct thread, rb_node); 691 692 ret += thread__fprintf(pos, fp); 693 } 694 695 pthread_rwlock_unlock(&machine->threads_lock); 696 697 return ret; 698 } 699 700 static struct dso *machine__get_kernel(struct machine *machine) 701 { 702 const char *vmlinux_name = NULL; 703 struct dso *kernel; 704 705 if (machine__is_host(machine)) { 706 vmlinux_name = symbol_conf.vmlinux_name; 707 if (!vmlinux_name) 708 vmlinux_name = "[kernel.kallsyms]"; 709 710 kernel = machine__findnew_kernel(machine, vmlinux_name, 711 "[kernel]", DSO_TYPE_KERNEL); 712 } else { 713 char bf[PATH_MAX]; 714 715 if (machine__is_default_guest(machine)) 716 vmlinux_name = symbol_conf.default_guest_vmlinux_name; 717 if (!vmlinux_name) 718 vmlinux_name = machine__mmap_name(machine, bf, 719 sizeof(bf)); 720 721 kernel = machine__findnew_kernel(machine, vmlinux_name, 722 "[guest.kernel]", 723 DSO_TYPE_GUEST_KERNEL); 724 } 725 726 if (kernel != NULL && (!kernel->has_build_id)) 727 dso__read_running_kernel_build_id(kernel, machine); 728 729 return kernel; 730 } 731 732 struct process_args { 733 u64 start; 734 }; 735 736 static void machine__get_kallsyms_filename(struct machine *machine, char *buf, 737 size_t bufsz) 738 { 739 if (machine__is_default_guest(machine)) 740 scnprintf(buf, bufsz, "%s", symbol_conf.default_guest_kallsyms); 741 else 742 scnprintf(buf, bufsz, "%s/proc/kallsyms", machine->root_dir); 743 } 744 745 const char *ref_reloc_sym_names[] = {"_text", "_stext", NULL}; 746 747 /* Figure out the start address of kernel map from /proc/kallsyms. 748 * Returns the name of the start symbol in *symbol_name. Pass in NULL as 749 * symbol_name if it's not that important. 750 */ 751 static u64 machine__get_running_kernel_start(struct machine *machine, 752 const char **symbol_name) 753 { 754 char filename[PATH_MAX]; 755 int i; 756 const char *name; 757 u64 addr = 0; 758 759 machine__get_kallsyms_filename(machine, filename, PATH_MAX); 760 761 if (symbol__restricted_filename(filename, "/proc/kallsyms")) 762 return 0; 763 764 for (i = 0; (name = ref_reloc_sym_names[i]) != NULL; i++) { 765 addr = kallsyms__get_function_start(filename, name); 766 if (addr) 767 break; 768 } 769 770 if (symbol_name) 771 *symbol_name = name; 772 773 return addr; 774 } 775 776 int __machine__create_kernel_maps(struct machine *machine, struct dso *kernel) 777 { 778 enum map_type type; 779 u64 start = machine__get_running_kernel_start(machine, NULL); 780 781 /* In case of renewal the kernel map, destroy previous one */ 782 machine__destroy_kernel_maps(machine); 783 784 for (type = 0; type < MAP__NR_TYPES; ++type) { 785 struct kmap *kmap; 786 struct map *map; 787 788 machine->vmlinux_maps[type] = map__new2(start, kernel, type); 789 if (machine->vmlinux_maps[type] == NULL) 790 return -1; 791 792 machine->vmlinux_maps[type]->map_ip = 793 machine->vmlinux_maps[type]->unmap_ip = 794 identity__map_ip; 795 map = __machine__kernel_map(machine, type); 796 kmap = map__kmap(map); 797 if (!kmap) 798 return -1; 799 800 kmap->kmaps = &machine->kmaps; 801 map_groups__insert(&machine->kmaps, map); 802 } 803 804 return 0; 805 } 806 807 void machine__destroy_kernel_maps(struct machine *machine) 808 { 809 enum map_type type; 810 811 for (type = 0; type < MAP__NR_TYPES; ++type) { 812 struct kmap *kmap; 813 struct map *map = __machine__kernel_map(machine, type); 814 815 if (map == NULL) 816 continue; 817 818 kmap = map__kmap(map); 819 map_groups__remove(&machine->kmaps, map); 820 if (kmap && kmap->ref_reloc_sym) { 821 /* 822 * ref_reloc_sym is shared among all maps, so free just 823 * on one of them. 824 */ 825 if (type == MAP__FUNCTION) { 826 zfree((char **)&kmap->ref_reloc_sym->name); 827 zfree(&kmap->ref_reloc_sym); 828 } else 829 kmap->ref_reloc_sym = NULL; 830 } 831 832 map__put(machine->vmlinux_maps[type]); 833 machine->vmlinux_maps[type] = NULL; 834 } 835 } 836 837 int machines__create_guest_kernel_maps(struct machines *machines) 838 { 839 int ret = 0; 840 struct dirent **namelist = NULL; 841 int i, items = 0; 842 char path[PATH_MAX]; 843 pid_t pid; 844 char *endp; 845 846 if (symbol_conf.default_guest_vmlinux_name || 847 symbol_conf.default_guest_modules || 848 symbol_conf.default_guest_kallsyms) { 849 machines__create_kernel_maps(machines, DEFAULT_GUEST_KERNEL_ID); 850 } 851 852 if (symbol_conf.guestmount) { 853 items = scandir(symbol_conf.guestmount, &namelist, NULL, NULL); 854 if (items <= 0) 855 return -ENOENT; 856 for (i = 0; i < items; i++) { 857 if (!isdigit(namelist[i]->d_name[0])) { 858 /* Filter out . and .. */ 859 continue; 860 } 861 pid = (pid_t)strtol(namelist[i]->d_name, &endp, 10); 862 if ((*endp != '\0') || 863 (endp == namelist[i]->d_name) || 864 (errno == ERANGE)) { 865 pr_debug("invalid directory (%s). Skipping.\n", 866 namelist[i]->d_name); 867 continue; 868 } 869 sprintf(path, "%s/%s/proc/kallsyms", 870 symbol_conf.guestmount, 871 namelist[i]->d_name); 872 ret = access(path, R_OK); 873 if (ret) { 874 pr_debug("Can't access file %s\n", path); 875 goto failure; 876 } 877 machines__create_kernel_maps(machines, pid); 878 } 879 failure: 880 free(namelist); 881 } 882 883 return ret; 884 } 885 886 void machines__destroy_kernel_maps(struct machines *machines) 887 { 888 struct rb_node *next = rb_first(&machines->guests); 889 890 machine__destroy_kernel_maps(&machines->host); 891 892 while (next) { 893 struct machine *pos = rb_entry(next, struct machine, rb_node); 894 895 next = rb_next(&pos->rb_node); 896 rb_erase(&pos->rb_node, &machines->guests); 897 machine__delete(pos); 898 } 899 } 900 901 int machines__create_kernel_maps(struct machines *machines, pid_t pid) 902 { 903 struct machine *machine = machines__findnew(machines, pid); 904 905 if (machine == NULL) 906 return -1; 907 908 return machine__create_kernel_maps(machine); 909 } 910 911 int machine__load_kallsyms(struct machine *machine, const char *filename, 912 enum map_type type, symbol_filter_t filter) 913 { 914 struct map *map = machine__kernel_map(machine); 915 int ret = dso__load_kallsyms(map->dso, filename, map, filter); 916 917 if (ret > 0) { 918 dso__set_loaded(map->dso, type); 919 /* 920 * Since /proc/kallsyms will have multiple sessions for the 921 * kernel, with modules between them, fixup the end of all 922 * sections. 923 */ 924 __map_groups__fixup_end(&machine->kmaps, type); 925 } 926 927 return ret; 928 } 929 930 int machine__load_vmlinux_path(struct machine *machine, enum map_type type, 931 symbol_filter_t filter) 932 { 933 struct map *map = machine__kernel_map(machine); 934 int ret = dso__load_vmlinux_path(map->dso, map, filter); 935 936 if (ret > 0) 937 dso__set_loaded(map->dso, type); 938 939 return ret; 940 } 941 942 static void map_groups__fixup_end(struct map_groups *mg) 943 { 944 int i; 945 for (i = 0; i < MAP__NR_TYPES; ++i) 946 __map_groups__fixup_end(mg, i); 947 } 948 949 static char *get_kernel_version(const char *root_dir) 950 { 951 char version[PATH_MAX]; 952 FILE *file; 953 char *name, *tmp; 954 const char *prefix = "Linux version "; 955 956 sprintf(version, "%s/proc/version", root_dir); 957 file = fopen(version, "r"); 958 if (!file) 959 return NULL; 960 961 version[0] = '\0'; 962 tmp = fgets(version, sizeof(version), file); 963 fclose(file); 964 965 name = strstr(version, prefix); 966 if (!name) 967 return NULL; 968 name += strlen(prefix); 969 tmp = strchr(name, ' '); 970 if (tmp) 971 *tmp = '\0'; 972 973 return strdup(name); 974 } 975 976 static bool is_kmod_dso(struct dso *dso) 977 { 978 return dso->symtab_type == DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE || 979 dso->symtab_type == DSO_BINARY_TYPE__GUEST_KMODULE; 980 } 981 982 static int map_groups__set_module_path(struct map_groups *mg, const char *path, 983 struct kmod_path *m) 984 { 985 struct map *map; 986 char *long_name; 987 988 map = map_groups__find_by_name(mg, MAP__FUNCTION, m->name); 989 if (map == NULL) 990 return 0; 991 992 long_name = strdup(path); 993 if (long_name == NULL) 994 return -ENOMEM; 995 996 dso__set_long_name(map->dso, long_name, true); 997 dso__kernel_module_get_build_id(map->dso, ""); 998 999 /* 1000 * Full name could reveal us kmod compression, so 1001 * we need to update the symtab_type if needed. 1002 */ 1003 if (m->comp && is_kmod_dso(map->dso)) 1004 map->dso->symtab_type++; 1005 1006 return 0; 1007 } 1008 1009 static int map_groups__set_modules_path_dir(struct map_groups *mg, 1010 const char *dir_name, int depth) 1011 { 1012 struct dirent *dent; 1013 DIR *dir = opendir(dir_name); 1014 int ret = 0; 1015 1016 if (!dir) { 1017 pr_debug("%s: cannot open %s dir\n", __func__, dir_name); 1018 return -1; 1019 } 1020 1021 while ((dent = readdir(dir)) != NULL) { 1022 char path[PATH_MAX]; 1023 struct stat st; 1024 1025 /*sshfs might return bad dent->d_type, so we have to stat*/ 1026 snprintf(path, sizeof(path), "%s/%s", dir_name, dent->d_name); 1027 if (stat(path, &st)) 1028 continue; 1029 1030 if (S_ISDIR(st.st_mode)) { 1031 if (!strcmp(dent->d_name, ".") || 1032 !strcmp(dent->d_name, "..")) 1033 continue; 1034 1035 /* Do not follow top-level source and build symlinks */ 1036 if (depth == 0) { 1037 if (!strcmp(dent->d_name, "source") || 1038 !strcmp(dent->d_name, "build")) 1039 continue; 1040 } 1041 1042 ret = map_groups__set_modules_path_dir(mg, path, 1043 depth + 1); 1044 if (ret < 0) 1045 goto out; 1046 } else { 1047 struct kmod_path m; 1048 1049 ret = kmod_path__parse_name(&m, dent->d_name); 1050 if (ret) 1051 goto out; 1052 1053 if (m.kmod) 1054 ret = map_groups__set_module_path(mg, path, &m); 1055 1056 free(m.name); 1057 1058 if (ret) 1059 goto out; 1060 } 1061 } 1062 1063 out: 1064 closedir(dir); 1065 return ret; 1066 } 1067 1068 static int machine__set_modules_path(struct machine *machine) 1069 { 1070 char *version; 1071 char modules_path[PATH_MAX]; 1072 1073 version = get_kernel_version(machine->root_dir); 1074 if (!version) 1075 return -1; 1076 1077 snprintf(modules_path, sizeof(modules_path), "%s/lib/modules/%s", 1078 machine->root_dir, version); 1079 free(version); 1080 1081 return map_groups__set_modules_path_dir(&machine->kmaps, modules_path, 0); 1082 } 1083 1084 static int machine__create_module(void *arg, const char *name, u64 start) 1085 { 1086 struct machine *machine = arg; 1087 struct map *map; 1088 1089 map = machine__findnew_module_map(machine, start, name); 1090 if (map == NULL) 1091 return -1; 1092 1093 dso__kernel_module_get_build_id(map->dso, machine->root_dir); 1094 1095 return 0; 1096 } 1097 1098 static int machine__create_modules(struct machine *machine) 1099 { 1100 const char *modules; 1101 char path[PATH_MAX]; 1102 1103 if (machine__is_default_guest(machine)) { 1104 modules = symbol_conf.default_guest_modules; 1105 } else { 1106 snprintf(path, PATH_MAX, "%s/proc/modules", machine->root_dir); 1107 modules = path; 1108 } 1109 1110 if (symbol__restricted_filename(modules, "/proc/modules")) 1111 return -1; 1112 1113 if (modules__parse(modules, machine, machine__create_module)) 1114 return -1; 1115 1116 if (!machine__set_modules_path(machine)) 1117 return 0; 1118 1119 pr_debug("Problems setting modules path maps, continuing anyway...\n"); 1120 1121 return 0; 1122 } 1123 1124 int machine__create_kernel_maps(struct machine *machine) 1125 { 1126 struct dso *kernel = machine__get_kernel(machine); 1127 const char *name; 1128 u64 addr = machine__get_running_kernel_start(machine, &name); 1129 int ret; 1130 1131 if (!addr || kernel == NULL) 1132 return -1; 1133 1134 ret = __machine__create_kernel_maps(machine, kernel); 1135 dso__put(kernel); 1136 if (ret < 0) 1137 return -1; 1138 1139 if (symbol_conf.use_modules && machine__create_modules(machine) < 0) { 1140 if (machine__is_host(machine)) 1141 pr_debug("Problems creating module maps, " 1142 "continuing anyway...\n"); 1143 else 1144 pr_debug("Problems creating module maps for guest %d, " 1145 "continuing anyway...\n", machine->pid); 1146 } 1147 1148 /* 1149 * Now that we have all the maps created, just set the ->end of them: 1150 */ 1151 map_groups__fixup_end(&machine->kmaps); 1152 1153 if (maps__set_kallsyms_ref_reloc_sym(machine->vmlinux_maps, name, 1154 addr)) { 1155 machine__destroy_kernel_maps(machine); 1156 return -1; 1157 } 1158 1159 return 0; 1160 } 1161 1162 static void machine__set_kernel_mmap_len(struct machine *machine, 1163 union perf_event *event) 1164 { 1165 int i; 1166 1167 for (i = 0; i < MAP__NR_TYPES; i++) { 1168 machine->vmlinux_maps[i]->start = event->mmap.start; 1169 machine->vmlinux_maps[i]->end = (event->mmap.start + 1170 event->mmap.len); 1171 /* 1172 * Be a bit paranoid here, some perf.data file came with 1173 * a zero sized synthesized MMAP event for the kernel. 1174 */ 1175 if (machine->vmlinux_maps[i]->end == 0) 1176 machine->vmlinux_maps[i]->end = ~0ULL; 1177 } 1178 } 1179 1180 static bool machine__uses_kcore(struct machine *machine) 1181 { 1182 struct dso *dso; 1183 1184 list_for_each_entry(dso, &machine->dsos.head, node) { 1185 if (dso__is_kcore(dso)) 1186 return true; 1187 } 1188 1189 return false; 1190 } 1191 1192 static int machine__process_kernel_mmap_event(struct machine *machine, 1193 union perf_event *event) 1194 { 1195 struct map *map; 1196 char kmmap_prefix[PATH_MAX]; 1197 enum dso_kernel_type kernel_type; 1198 bool is_kernel_mmap; 1199 1200 /* If we have maps from kcore then we do not need or want any others */ 1201 if (machine__uses_kcore(machine)) 1202 return 0; 1203 1204 machine__mmap_name(machine, kmmap_prefix, sizeof(kmmap_prefix)); 1205 if (machine__is_host(machine)) 1206 kernel_type = DSO_TYPE_KERNEL; 1207 else 1208 kernel_type = DSO_TYPE_GUEST_KERNEL; 1209 1210 is_kernel_mmap = memcmp(event->mmap.filename, 1211 kmmap_prefix, 1212 strlen(kmmap_prefix) - 1) == 0; 1213 if (event->mmap.filename[0] == '/' || 1214 (!is_kernel_mmap && event->mmap.filename[0] == '[')) { 1215 map = machine__findnew_module_map(machine, event->mmap.start, 1216 event->mmap.filename); 1217 if (map == NULL) 1218 goto out_problem; 1219 1220 map->end = map->start + event->mmap.len; 1221 } else if (is_kernel_mmap) { 1222 const char *symbol_name = (event->mmap.filename + 1223 strlen(kmmap_prefix)); 1224 /* 1225 * Should be there already, from the build-id table in 1226 * the header. 1227 */ 1228 struct dso *kernel = NULL; 1229 struct dso *dso; 1230 1231 pthread_rwlock_rdlock(&machine->dsos.lock); 1232 1233 list_for_each_entry(dso, &machine->dsos.head, node) { 1234 1235 /* 1236 * The cpumode passed to is_kernel_module is not the 1237 * cpumode of *this* event. If we insist on passing 1238 * correct cpumode to is_kernel_module, we should 1239 * record the cpumode when we adding this dso to the 1240 * linked list. 1241 * 1242 * However we don't really need passing correct 1243 * cpumode. We know the correct cpumode must be kernel 1244 * mode (if not, we should not link it onto kernel_dsos 1245 * list). 1246 * 1247 * Therefore, we pass PERF_RECORD_MISC_CPUMODE_UNKNOWN. 1248 * is_kernel_module() treats it as a kernel cpumode. 1249 */ 1250 1251 if (!dso->kernel || 1252 is_kernel_module(dso->long_name, 1253 PERF_RECORD_MISC_CPUMODE_UNKNOWN)) 1254 continue; 1255 1256 1257 kernel = dso; 1258 break; 1259 } 1260 1261 pthread_rwlock_unlock(&machine->dsos.lock); 1262 1263 if (kernel == NULL) 1264 kernel = machine__findnew_dso(machine, kmmap_prefix); 1265 if (kernel == NULL) 1266 goto out_problem; 1267 1268 kernel->kernel = kernel_type; 1269 if (__machine__create_kernel_maps(machine, kernel) < 0) { 1270 dso__put(kernel); 1271 goto out_problem; 1272 } 1273 1274 if (strstr(kernel->long_name, "vmlinux")) 1275 dso__set_short_name(kernel, "[kernel.vmlinux]", false); 1276 1277 machine__set_kernel_mmap_len(machine, event); 1278 1279 /* 1280 * Avoid using a zero address (kptr_restrict) for the ref reloc 1281 * symbol. Effectively having zero here means that at record 1282 * time /proc/sys/kernel/kptr_restrict was non zero. 1283 */ 1284 if (event->mmap.pgoff != 0) { 1285 maps__set_kallsyms_ref_reloc_sym(machine->vmlinux_maps, 1286 symbol_name, 1287 event->mmap.pgoff); 1288 } 1289 1290 if (machine__is_default_guest(machine)) { 1291 /* 1292 * preload dso of guest kernel and modules 1293 */ 1294 dso__load(kernel, machine__kernel_map(machine), NULL); 1295 } 1296 } 1297 return 0; 1298 out_problem: 1299 return -1; 1300 } 1301 1302 int machine__process_mmap2_event(struct machine *machine, 1303 union perf_event *event, 1304 struct perf_sample *sample) 1305 { 1306 struct thread *thread; 1307 struct map *map; 1308 enum map_type type; 1309 int ret = 0; 1310 1311 if (dump_trace) 1312 perf_event__fprintf_mmap2(event, stdout); 1313 1314 if (sample->cpumode == PERF_RECORD_MISC_GUEST_KERNEL || 1315 sample->cpumode == PERF_RECORD_MISC_KERNEL) { 1316 ret = machine__process_kernel_mmap_event(machine, event); 1317 if (ret < 0) 1318 goto out_problem; 1319 return 0; 1320 } 1321 1322 thread = machine__findnew_thread(machine, event->mmap2.pid, 1323 event->mmap2.tid); 1324 if (thread == NULL) 1325 goto out_problem; 1326 1327 if (event->header.misc & PERF_RECORD_MISC_MMAP_DATA) 1328 type = MAP__VARIABLE; 1329 else 1330 type = MAP__FUNCTION; 1331 1332 map = map__new(machine, event->mmap2.start, 1333 event->mmap2.len, event->mmap2.pgoff, 1334 event->mmap2.pid, event->mmap2.maj, 1335 event->mmap2.min, event->mmap2.ino, 1336 event->mmap2.ino_generation, 1337 event->mmap2.prot, 1338 event->mmap2.flags, 1339 event->mmap2.filename, type, thread); 1340 1341 if (map == NULL) 1342 goto out_problem_map; 1343 1344 thread__insert_map(thread, map); 1345 thread__put(thread); 1346 map__put(map); 1347 return 0; 1348 1349 out_problem_map: 1350 thread__put(thread); 1351 out_problem: 1352 dump_printf("problem processing PERF_RECORD_MMAP2, skipping event.\n"); 1353 return 0; 1354 } 1355 1356 int machine__process_mmap_event(struct machine *machine, union perf_event *event, 1357 struct perf_sample *sample) 1358 { 1359 struct thread *thread; 1360 struct map *map; 1361 enum map_type type; 1362 int ret = 0; 1363 1364 if (dump_trace) 1365 perf_event__fprintf_mmap(event, stdout); 1366 1367 if (sample->cpumode == PERF_RECORD_MISC_GUEST_KERNEL || 1368 sample->cpumode == PERF_RECORD_MISC_KERNEL) { 1369 ret = machine__process_kernel_mmap_event(machine, event); 1370 if (ret < 0) 1371 goto out_problem; 1372 return 0; 1373 } 1374 1375 thread = machine__findnew_thread(machine, event->mmap.pid, 1376 event->mmap.tid); 1377 if (thread == NULL) 1378 goto out_problem; 1379 1380 if (event->header.misc & PERF_RECORD_MISC_MMAP_DATA) 1381 type = MAP__VARIABLE; 1382 else 1383 type = MAP__FUNCTION; 1384 1385 map = map__new(machine, event->mmap.start, 1386 event->mmap.len, event->mmap.pgoff, 1387 event->mmap.pid, 0, 0, 0, 0, 0, 0, 1388 event->mmap.filename, 1389 type, thread); 1390 1391 if (map == NULL) 1392 goto out_problem_map; 1393 1394 thread__insert_map(thread, map); 1395 thread__put(thread); 1396 map__put(map); 1397 return 0; 1398 1399 out_problem_map: 1400 thread__put(thread); 1401 out_problem: 1402 dump_printf("problem processing PERF_RECORD_MMAP, skipping event.\n"); 1403 return 0; 1404 } 1405 1406 static void __machine__remove_thread(struct machine *machine, struct thread *th, bool lock) 1407 { 1408 if (machine->last_match == th) 1409 machine->last_match = NULL; 1410 1411 BUG_ON(atomic_read(&th->refcnt) == 0); 1412 if (lock) 1413 pthread_rwlock_wrlock(&machine->threads_lock); 1414 rb_erase_init(&th->rb_node, &machine->threads); 1415 RB_CLEAR_NODE(&th->rb_node); 1416 /* 1417 * Move it first to the dead_threads list, then drop the reference, 1418 * if this is the last reference, then the thread__delete destructor 1419 * will be called and we will remove it from the dead_threads list. 1420 */ 1421 list_add_tail(&th->node, &machine->dead_threads); 1422 if (lock) 1423 pthread_rwlock_unlock(&machine->threads_lock); 1424 thread__put(th); 1425 } 1426 1427 void machine__remove_thread(struct machine *machine, struct thread *th) 1428 { 1429 return __machine__remove_thread(machine, th, true); 1430 } 1431 1432 int machine__process_fork_event(struct machine *machine, union perf_event *event, 1433 struct perf_sample *sample) 1434 { 1435 struct thread *thread = machine__find_thread(machine, 1436 event->fork.pid, 1437 event->fork.tid); 1438 struct thread *parent = machine__findnew_thread(machine, 1439 event->fork.ppid, 1440 event->fork.ptid); 1441 int err = 0; 1442 1443 if (dump_trace) 1444 perf_event__fprintf_task(event, stdout); 1445 1446 /* 1447 * There may be an existing thread that is not actually the parent, 1448 * either because we are processing events out of order, or because the 1449 * (fork) event that would have removed the thread was lost. Assume the 1450 * latter case and continue on as best we can. 1451 */ 1452 if (parent->pid_ != (pid_t)event->fork.ppid) { 1453 dump_printf("removing erroneous parent thread %d/%d\n", 1454 parent->pid_, parent->tid); 1455 machine__remove_thread(machine, parent); 1456 thread__put(parent); 1457 parent = machine__findnew_thread(machine, event->fork.ppid, 1458 event->fork.ptid); 1459 } 1460 1461 /* if a thread currently exists for the thread id remove it */ 1462 if (thread != NULL) { 1463 machine__remove_thread(machine, thread); 1464 thread__put(thread); 1465 } 1466 1467 thread = machine__findnew_thread(machine, event->fork.pid, 1468 event->fork.tid); 1469 1470 if (thread == NULL || parent == NULL || 1471 thread__fork(thread, parent, sample->time) < 0) { 1472 dump_printf("problem processing PERF_RECORD_FORK, skipping event.\n"); 1473 err = -1; 1474 } 1475 thread__put(thread); 1476 thread__put(parent); 1477 1478 return err; 1479 } 1480 1481 int machine__process_exit_event(struct machine *machine, union perf_event *event, 1482 struct perf_sample *sample __maybe_unused) 1483 { 1484 struct thread *thread = machine__find_thread(machine, 1485 event->fork.pid, 1486 event->fork.tid); 1487 1488 if (dump_trace) 1489 perf_event__fprintf_task(event, stdout); 1490 1491 if (thread != NULL) { 1492 thread__exited(thread); 1493 thread__put(thread); 1494 } 1495 1496 return 0; 1497 } 1498 1499 int machine__process_event(struct machine *machine, union perf_event *event, 1500 struct perf_sample *sample) 1501 { 1502 int ret; 1503 1504 switch (event->header.type) { 1505 case PERF_RECORD_COMM: 1506 ret = machine__process_comm_event(machine, event, sample); break; 1507 case PERF_RECORD_MMAP: 1508 ret = machine__process_mmap_event(machine, event, sample); break; 1509 case PERF_RECORD_MMAP2: 1510 ret = machine__process_mmap2_event(machine, event, sample); break; 1511 case PERF_RECORD_FORK: 1512 ret = machine__process_fork_event(machine, event, sample); break; 1513 case PERF_RECORD_EXIT: 1514 ret = machine__process_exit_event(machine, event, sample); break; 1515 case PERF_RECORD_LOST: 1516 ret = machine__process_lost_event(machine, event, sample); break; 1517 case PERF_RECORD_AUX: 1518 ret = machine__process_aux_event(machine, event); break; 1519 case PERF_RECORD_ITRACE_START: 1520 ret = machine__process_itrace_start_event(machine, event); break; 1521 case PERF_RECORD_LOST_SAMPLES: 1522 ret = machine__process_lost_samples_event(machine, event, sample); break; 1523 case PERF_RECORD_SWITCH: 1524 case PERF_RECORD_SWITCH_CPU_WIDE: 1525 ret = machine__process_switch_event(machine, event); break; 1526 default: 1527 ret = -1; 1528 break; 1529 } 1530 1531 return ret; 1532 } 1533 1534 static bool symbol__match_regex(struct symbol *sym, regex_t *regex) 1535 { 1536 if (sym->name && !regexec(regex, sym->name, 0, NULL, 0)) 1537 return 1; 1538 return 0; 1539 } 1540 1541 static void ip__resolve_ams(struct thread *thread, 1542 struct addr_map_symbol *ams, 1543 u64 ip) 1544 { 1545 struct addr_location al; 1546 1547 memset(&al, 0, sizeof(al)); 1548 /* 1549 * We cannot use the header.misc hint to determine whether a 1550 * branch stack address is user, kernel, guest, hypervisor. 1551 * Branches may straddle the kernel/user/hypervisor boundaries. 1552 * Thus, we have to try consecutively until we find a match 1553 * or else, the symbol is unknown 1554 */ 1555 thread__find_cpumode_addr_location(thread, MAP__FUNCTION, ip, &al); 1556 1557 ams->addr = ip; 1558 ams->al_addr = al.addr; 1559 ams->sym = al.sym; 1560 ams->map = al.map; 1561 } 1562 1563 static void ip__resolve_data(struct thread *thread, 1564 u8 m, struct addr_map_symbol *ams, u64 addr) 1565 { 1566 struct addr_location al; 1567 1568 memset(&al, 0, sizeof(al)); 1569 1570 thread__find_addr_location(thread, m, MAP__VARIABLE, addr, &al); 1571 if (al.map == NULL) { 1572 /* 1573 * some shared data regions have execute bit set which puts 1574 * their mapping in the MAP__FUNCTION type array. 1575 * Check there as a fallback option before dropping the sample. 1576 */ 1577 thread__find_addr_location(thread, m, MAP__FUNCTION, addr, &al); 1578 } 1579 1580 ams->addr = addr; 1581 ams->al_addr = al.addr; 1582 ams->sym = al.sym; 1583 ams->map = al.map; 1584 } 1585 1586 struct mem_info *sample__resolve_mem(struct perf_sample *sample, 1587 struct addr_location *al) 1588 { 1589 struct mem_info *mi = zalloc(sizeof(*mi)); 1590 1591 if (!mi) 1592 return NULL; 1593 1594 ip__resolve_ams(al->thread, &mi->iaddr, sample->ip); 1595 ip__resolve_data(al->thread, al->cpumode, &mi->daddr, sample->addr); 1596 mi->data_src.val = sample->data_src; 1597 1598 return mi; 1599 } 1600 1601 static int add_callchain_ip(struct thread *thread, 1602 struct symbol **parent, 1603 struct addr_location *root_al, 1604 u8 *cpumode, 1605 u64 ip) 1606 { 1607 struct addr_location al; 1608 1609 al.filtered = 0; 1610 al.sym = NULL; 1611 if (!cpumode) { 1612 thread__find_cpumode_addr_location(thread, MAP__FUNCTION, 1613 ip, &al); 1614 } else { 1615 if (ip >= PERF_CONTEXT_MAX) { 1616 switch (ip) { 1617 case PERF_CONTEXT_HV: 1618 *cpumode = PERF_RECORD_MISC_HYPERVISOR; 1619 break; 1620 case PERF_CONTEXT_KERNEL: 1621 *cpumode = PERF_RECORD_MISC_KERNEL; 1622 break; 1623 case PERF_CONTEXT_USER: 1624 *cpumode = PERF_RECORD_MISC_USER; 1625 break; 1626 default: 1627 pr_debug("invalid callchain context: " 1628 "%"PRId64"\n", (s64) ip); 1629 /* 1630 * It seems the callchain is corrupted. 1631 * Discard all. 1632 */ 1633 callchain_cursor_reset(&callchain_cursor); 1634 return 1; 1635 } 1636 return 0; 1637 } 1638 thread__find_addr_location(thread, *cpumode, MAP__FUNCTION, 1639 ip, &al); 1640 } 1641 1642 if (al.sym != NULL) { 1643 if (sort__has_parent && !*parent && 1644 symbol__match_regex(al.sym, &parent_regex)) 1645 *parent = al.sym; 1646 else if (have_ignore_callees && root_al && 1647 symbol__match_regex(al.sym, &ignore_callees_regex)) { 1648 /* Treat this symbol as the root, 1649 forgetting its callees. */ 1650 *root_al = al; 1651 callchain_cursor_reset(&callchain_cursor); 1652 } 1653 } 1654 1655 if (symbol_conf.hide_unresolved && al.sym == NULL) 1656 return 0; 1657 return callchain_cursor_append(&callchain_cursor, al.addr, al.map, al.sym); 1658 } 1659 1660 struct branch_info *sample__resolve_bstack(struct perf_sample *sample, 1661 struct addr_location *al) 1662 { 1663 unsigned int i; 1664 const struct branch_stack *bs = sample->branch_stack; 1665 struct branch_info *bi = calloc(bs->nr, sizeof(struct branch_info)); 1666 1667 if (!bi) 1668 return NULL; 1669 1670 for (i = 0; i < bs->nr; i++) { 1671 ip__resolve_ams(al->thread, &bi[i].to, bs->entries[i].to); 1672 ip__resolve_ams(al->thread, &bi[i].from, bs->entries[i].from); 1673 bi[i].flags = bs->entries[i].flags; 1674 } 1675 return bi; 1676 } 1677 1678 #define CHASHSZ 127 1679 #define CHASHBITS 7 1680 #define NO_ENTRY 0xff 1681 1682 #define PERF_MAX_BRANCH_DEPTH 127 1683 1684 /* Remove loops. */ 1685 static int remove_loops(struct branch_entry *l, int nr) 1686 { 1687 int i, j, off; 1688 unsigned char chash[CHASHSZ]; 1689 1690 memset(chash, NO_ENTRY, sizeof(chash)); 1691 1692 BUG_ON(PERF_MAX_BRANCH_DEPTH > 255); 1693 1694 for (i = 0; i < nr; i++) { 1695 int h = hash_64(l[i].from, CHASHBITS) % CHASHSZ; 1696 1697 /* no collision handling for now */ 1698 if (chash[h] == NO_ENTRY) { 1699 chash[h] = i; 1700 } else if (l[chash[h]].from == l[i].from) { 1701 bool is_loop = true; 1702 /* check if it is a real loop */ 1703 off = 0; 1704 for (j = chash[h]; j < i && i + off < nr; j++, off++) 1705 if (l[j].from != l[i + off].from) { 1706 is_loop = false; 1707 break; 1708 } 1709 if (is_loop) { 1710 memmove(l + i, l + i + off, 1711 (nr - (i + off)) * sizeof(*l)); 1712 nr -= off; 1713 } 1714 } 1715 } 1716 return nr; 1717 } 1718 1719 /* 1720 * Recolve LBR callstack chain sample 1721 * Return: 1722 * 1 on success get LBR callchain information 1723 * 0 no available LBR callchain information, should try fp 1724 * negative error code on other errors. 1725 */ 1726 static int resolve_lbr_callchain_sample(struct thread *thread, 1727 struct perf_sample *sample, 1728 struct symbol **parent, 1729 struct addr_location *root_al, 1730 int max_stack) 1731 { 1732 struct ip_callchain *chain = sample->callchain; 1733 int chain_nr = min(max_stack, (int)chain->nr); 1734 u8 cpumode = PERF_RECORD_MISC_USER; 1735 int i, j, err; 1736 u64 ip; 1737 1738 for (i = 0; i < chain_nr; i++) { 1739 if (chain->ips[i] == PERF_CONTEXT_USER) 1740 break; 1741 } 1742 1743 /* LBR only affects the user callchain */ 1744 if (i != chain_nr) { 1745 struct branch_stack *lbr_stack = sample->branch_stack; 1746 int lbr_nr = lbr_stack->nr; 1747 /* 1748 * LBR callstack can only get user call chain. 1749 * The mix_chain_nr is kernel call chain 1750 * number plus LBR user call chain number. 1751 * i is kernel call chain number, 1752 * 1 is PERF_CONTEXT_USER, 1753 * lbr_nr + 1 is the user call chain number. 1754 * For details, please refer to the comments 1755 * in callchain__printf 1756 */ 1757 int mix_chain_nr = i + 1 + lbr_nr + 1; 1758 1759 if (mix_chain_nr > PERF_MAX_STACK_DEPTH + PERF_MAX_BRANCH_DEPTH) { 1760 pr_warning("corrupted callchain. skipping...\n"); 1761 return 0; 1762 } 1763 1764 for (j = 0; j < mix_chain_nr; j++) { 1765 if (callchain_param.order == ORDER_CALLEE) { 1766 if (j < i + 1) 1767 ip = chain->ips[j]; 1768 else if (j > i + 1) 1769 ip = lbr_stack->entries[j - i - 2].from; 1770 else 1771 ip = lbr_stack->entries[0].to; 1772 } else { 1773 if (j < lbr_nr) 1774 ip = lbr_stack->entries[lbr_nr - j - 1].from; 1775 else if (j > lbr_nr) 1776 ip = chain->ips[i + 1 - (j - lbr_nr)]; 1777 else 1778 ip = lbr_stack->entries[0].to; 1779 } 1780 1781 err = add_callchain_ip(thread, parent, root_al, &cpumode, ip); 1782 if (err) 1783 return (err < 0) ? err : 0; 1784 } 1785 return 1; 1786 } 1787 1788 return 0; 1789 } 1790 1791 static int thread__resolve_callchain_sample(struct thread *thread, 1792 struct perf_evsel *evsel, 1793 struct perf_sample *sample, 1794 struct symbol **parent, 1795 struct addr_location *root_al, 1796 int max_stack) 1797 { 1798 struct branch_stack *branch = sample->branch_stack; 1799 struct ip_callchain *chain = sample->callchain; 1800 int chain_nr = min(max_stack, (int)chain->nr); 1801 u8 cpumode = PERF_RECORD_MISC_USER; 1802 int i, j, err; 1803 int skip_idx = -1; 1804 int first_call = 0; 1805 1806 callchain_cursor_reset(&callchain_cursor); 1807 1808 if (has_branch_callstack(evsel)) { 1809 err = resolve_lbr_callchain_sample(thread, sample, parent, 1810 root_al, max_stack); 1811 if (err) 1812 return (err < 0) ? err : 0; 1813 } 1814 1815 /* 1816 * Based on DWARF debug information, some architectures skip 1817 * a callchain entry saved by the kernel. 1818 */ 1819 if (chain->nr < PERF_MAX_STACK_DEPTH) 1820 skip_idx = arch_skip_callchain_idx(thread, chain); 1821 1822 /* 1823 * Add branches to call stack for easier browsing. This gives 1824 * more context for a sample than just the callers. 1825 * 1826 * This uses individual histograms of paths compared to the 1827 * aggregated histograms the normal LBR mode uses. 1828 * 1829 * Limitations for now: 1830 * - No extra filters 1831 * - No annotations (should annotate somehow) 1832 */ 1833 1834 if (branch && callchain_param.branch_callstack) { 1835 int nr = min(max_stack, (int)branch->nr); 1836 struct branch_entry be[nr]; 1837 1838 if (branch->nr > PERF_MAX_BRANCH_DEPTH) { 1839 pr_warning("corrupted branch chain. skipping...\n"); 1840 goto check_calls; 1841 } 1842 1843 for (i = 0; i < nr; i++) { 1844 if (callchain_param.order == ORDER_CALLEE) { 1845 be[i] = branch->entries[i]; 1846 /* 1847 * Check for overlap into the callchain. 1848 * The return address is one off compared to 1849 * the branch entry. To adjust for this 1850 * assume the calling instruction is not longer 1851 * than 8 bytes. 1852 */ 1853 if (i == skip_idx || 1854 chain->ips[first_call] >= PERF_CONTEXT_MAX) 1855 first_call++; 1856 else if (be[i].from < chain->ips[first_call] && 1857 be[i].from >= chain->ips[first_call] - 8) 1858 first_call++; 1859 } else 1860 be[i] = branch->entries[branch->nr - i - 1]; 1861 } 1862 1863 nr = remove_loops(be, nr); 1864 1865 for (i = 0; i < nr; i++) { 1866 err = add_callchain_ip(thread, parent, root_al, 1867 NULL, be[i].to); 1868 if (!err) 1869 err = add_callchain_ip(thread, parent, root_al, 1870 NULL, be[i].from); 1871 if (err == -EINVAL) 1872 break; 1873 if (err) 1874 return err; 1875 } 1876 chain_nr -= nr; 1877 } 1878 1879 check_calls: 1880 if (chain->nr > PERF_MAX_STACK_DEPTH && (int)chain->nr > max_stack) { 1881 pr_warning("corrupted callchain. skipping...\n"); 1882 return 0; 1883 } 1884 1885 for (i = first_call; i < chain_nr; i++) { 1886 u64 ip; 1887 1888 if (callchain_param.order == ORDER_CALLEE) 1889 j = i; 1890 else 1891 j = chain->nr - i - 1; 1892 1893 #ifdef HAVE_SKIP_CALLCHAIN_IDX 1894 if (j == skip_idx) 1895 continue; 1896 #endif 1897 ip = chain->ips[j]; 1898 1899 err = add_callchain_ip(thread, parent, root_al, &cpumode, ip); 1900 1901 if (err) 1902 return (err < 0) ? err : 0; 1903 } 1904 1905 return 0; 1906 } 1907 1908 static int unwind_entry(struct unwind_entry *entry, void *arg) 1909 { 1910 struct callchain_cursor *cursor = arg; 1911 1912 if (symbol_conf.hide_unresolved && entry->sym == NULL) 1913 return 0; 1914 return callchain_cursor_append(cursor, entry->ip, 1915 entry->map, entry->sym); 1916 } 1917 1918 int thread__resolve_callchain(struct thread *thread, 1919 struct perf_evsel *evsel, 1920 struct perf_sample *sample, 1921 struct symbol **parent, 1922 struct addr_location *root_al, 1923 int max_stack) 1924 { 1925 int ret = thread__resolve_callchain_sample(thread, evsel, 1926 sample, parent, 1927 root_al, max_stack); 1928 if (ret) 1929 return ret; 1930 1931 /* Can we do dwarf post unwind? */ 1932 if (!((evsel->attr.sample_type & PERF_SAMPLE_REGS_USER) && 1933 (evsel->attr.sample_type & PERF_SAMPLE_STACK_USER))) 1934 return 0; 1935 1936 /* Bail out if nothing was captured. */ 1937 if ((!sample->user_regs.regs) || 1938 (!sample->user_stack.size)) 1939 return 0; 1940 1941 return unwind__get_entries(unwind_entry, &callchain_cursor, 1942 thread, sample, max_stack); 1943 1944 } 1945 1946 int machine__for_each_thread(struct machine *machine, 1947 int (*fn)(struct thread *thread, void *p), 1948 void *priv) 1949 { 1950 struct rb_node *nd; 1951 struct thread *thread; 1952 int rc = 0; 1953 1954 for (nd = rb_first(&machine->threads); nd; nd = rb_next(nd)) { 1955 thread = rb_entry(nd, struct thread, rb_node); 1956 rc = fn(thread, priv); 1957 if (rc != 0) 1958 return rc; 1959 } 1960 1961 list_for_each_entry(thread, &machine->dead_threads, node) { 1962 rc = fn(thread, priv); 1963 if (rc != 0) 1964 return rc; 1965 } 1966 return rc; 1967 } 1968 1969 int machines__for_each_thread(struct machines *machines, 1970 int (*fn)(struct thread *thread, void *p), 1971 void *priv) 1972 { 1973 struct rb_node *nd; 1974 int rc = 0; 1975 1976 rc = machine__for_each_thread(&machines->host, fn, priv); 1977 if (rc != 0) 1978 return rc; 1979 1980 for (nd = rb_first(&machines->guests); nd; nd = rb_next(nd)) { 1981 struct machine *machine = rb_entry(nd, struct machine, rb_node); 1982 1983 rc = machine__for_each_thread(machine, fn, priv); 1984 if (rc != 0) 1985 return rc; 1986 } 1987 return rc; 1988 } 1989 1990 int __machine__synthesize_threads(struct machine *machine, struct perf_tool *tool, 1991 struct target *target, struct thread_map *threads, 1992 perf_event__handler_t process, bool data_mmap, 1993 unsigned int proc_map_timeout) 1994 { 1995 if (target__has_task(target)) 1996 return perf_event__synthesize_thread_map(tool, threads, process, machine, data_mmap, proc_map_timeout); 1997 else if (target__has_cpu(target)) 1998 return perf_event__synthesize_threads(tool, process, machine, data_mmap, proc_map_timeout); 1999 /* command specified */ 2000 return 0; 2001 } 2002 2003 pid_t machine__get_current_tid(struct machine *machine, int cpu) 2004 { 2005 if (cpu < 0 || cpu >= MAX_NR_CPUS || !machine->current_tid) 2006 return -1; 2007 2008 return machine->current_tid[cpu]; 2009 } 2010 2011 int machine__set_current_tid(struct machine *machine, int cpu, pid_t pid, 2012 pid_t tid) 2013 { 2014 struct thread *thread; 2015 2016 if (cpu < 0) 2017 return -EINVAL; 2018 2019 if (!machine->current_tid) { 2020 int i; 2021 2022 machine->current_tid = calloc(MAX_NR_CPUS, sizeof(pid_t)); 2023 if (!machine->current_tid) 2024 return -ENOMEM; 2025 for (i = 0; i < MAX_NR_CPUS; i++) 2026 machine->current_tid[i] = -1; 2027 } 2028 2029 if (cpu >= MAX_NR_CPUS) { 2030 pr_err("Requested CPU %d too large. ", cpu); 2031 pr_err("Consider raising MAX_NR_CPUS\n"); 2032 return -EINVAL; 2033 } 2034 2035 machine->current_tid[cpu] = tid; 2036 2037 thread = machine__findnew_thread(machine, pid, tid); 2038 if (!thread) 2039 return -ENOMEM; 2040 2041 thread->cpu = cpu; 2042 thread__put(thread); 2043 2044 return 0; 2045 } 2046 2047 int machine__get_kernel_start(struct machine *machine) 2048 { 2049 struct map *map = machine__kernel_map(machine); 2050 int err = 0; 2051 2052 /* 2053 * The only addresses above 2^63 are kernel addresses of a 64-bit 2054 * kernel. Note that addresses are unsigned so that on a 32-bit system 2055 * all addresses including kernel addresses are less than 2^32. In 2056 * that case (32-bit system), if the kernel mapping is unknown, all 2057 * addresses will be assumed to be in user space - see 2058 * machine__kernel_ip(). 2059 */ 2060 machine->kernel_start = 1ULL << 63; 2061 if (map) { 2062 err = map__load(map, machine->symbol_filter); 2063 if (map->start) 2064 machine->kernel_start = map->start; 2065 } 2066 return err; 2067 } 2068 2069 struct dso *machine__findnew_dso(struct machine *machine, const char *filename) 2070 { 2071 return dsos__findnew(&machine->dsos, filename); 2072 } 2073 2074 char *machine__resolve_kernel_addr(void *vmachine, unsigned long long *addrp, char **modp) 2075 { 2076 struct machine *machine = vmachine; 2077 struct map *map; 2078 struct symbol *sym = map_groups__find_symbol(&machine->kmaps, MAP__FUNCTION, *addrp, &map, NULL); 2079 2080 if (sym == NULL) 2081 return NULL; 2082 2083 *modp = __map__is_kmodule(map) ? (char *)map->dso->short_name : NULL; 2084 *addrp = map->unmap_ip(map, sym->start); 2085 return sym->name; 2086 } 2087