1 // SPDX-License-Identifier: GPL-2.0 2 #include <dirent.h> 3 #include <errno.h> 4 #include <inttypes.h> 5 #include <regex.h> 6 #include <stdlib.h> 7 #include "callchain.h" 8 #include "debug.h" 9 #include "dso.h" 10 #include "env.h" 11 #include "event.h" 12 #include "evsel.h" 13 #include "hist.h" 14 #include "machine.h" 15 #include "map.h" 16 #include "map_symbol.h" 17 #include "branch.h" 18 #include "mem-events.h" 19 #include "srcline.h" 20 #include "symbol.h" 21 #include "sort.h" 22 #include "strlist.h" 23 #include "target.h" 24 #include "thread.h" 25 #include "util.h" 26 #include "vdso.h" 27 #include <stdbool.h> 28 #include <sys/types.h> 29 #include <sys/stat.h> 30 #include <unistd.h> 31 #include "unwind.h" 32 #include "linux/hash.h" 33 #include "asm/bug.h" 34 #include "bpf-event.h" 35 #include <internal/lib.h> // page_size 36 37 #include <linux/ctype.h> 38 #include <symbol/kallsyms.h> 39 #include <linux/mman.h> 40 #include <linux/string.h> 41 #include <linux/zalloc.h> 42 43 static void __machine__remove_thread(struct machine *machine, struct thread *th, bool lock); 44 45 static struct dso *machine__kernel_dso(struct machine *machine) 46 { 47 return machine->vmlinux_map->dso; 48 } 49 50 static void dsos__init(struct dsos *dsos) 51 { 52 INIT_LIST_HEAD(&dsos->head); 53 dsos->root = RB_ROOT; 54 init_rwsem(&dsos->lock); 55 } 56 57 static void machine__threads_init(struct machine *machine) 58 { 59 int i; 60 61 for (i = 0; i < THREADS__TABLE_SIZE; i++) { 62 struct threads *threads = &machine->threads[i]; 63 threads->entries = RB_ROOT_CACHED; 64 init_rwsem(&threads->lock); 65 threads->nr = 0; 66 INIT_LIST_HEAD(&threads->dead); 67 threads->last_match = NULL; 68 } 69 } 70 71 static int machine__set_mmap_name(struct machine *machine) 72 { 73 if (machine__is_host(machine)) 74 machine->mmap_name = strdup("[kernel.kallsyms]"); 75 else if (machine__is_default_guest(machine)) 76 machine->mmap_name = strdup("[guest.kernel.kallsyms]"); 77 else if (asprintf(&machine->mmap_name, "[guest.kernel.kallsyms.%d]", 78 machine->pid) < 0) 79 machine->mmap_name = NULL; 80 81 return machine->mmap_name ? 0 : -ENOMEM; 82 } 83 84 int machine__init(struct machine *machine, const char *root_dir, pid_t pid) 85 { 86 int err = -ENOMEM; 87 88 memset(machine, 0, sizeof(*machine)); 89 maps__init(&machine->kmaps, machine); 90 RB_CLEAR_NODE(&machine->rb_node); 91 dsos__init(&machine->dsos); 92 93 machine__threads_init(machine); 94 95 machine->vdso_info = NULL; 96 machine->env = NULL; 97 98 machine->pid = pid; 99 100 machine->id_hdr_size = 0; 101 machine->kptr_restrict_warned = false; 102 machine->comm_exec = false; 103 machine->kernel_start = 0; 104 machine->vmlinux_map = NULL; 105 106 machine->root_dir = strdup(root_dir); 107 if (machine->root_dir == NULL) 108 return -ENOMEM; 109 110 if (machine__set_mmap_name(machine)) 111 goto out; 112 113 if (pid != HOST_KERNEL_ID) { 114 struct thread *thread = machine__findnew_thread(machine, -1, 115 pid); 116 char comm[64]; 117 118 if (thread == NULL) 119 goto out; 120 121 snprintf(comm, sizeof(comm), "[guest/%d]", pid); 122 thread__set_comm(thread, comm, 0); 123 thread__put(thread); 124 } 125 126 machine->current_tid = NULL; 127 err = 0; 128 129 out: 130 if (err) { 131 zfree(&machine->root_dir); 132 zfree(&machine->mmap_name); 133 } 134 return 0; 135 } 136 137 struct machine *machine__new_host(void) 138 { 139 struct machine *machine = malloc(sizeof(*machine)); 140 141 if (machine != NULL) { 142 machine__init(machine, "", HOST_KERNEL_ID); 143 144 if (machine__create_kernel_maps(machine) < 0) 145 goto out_delete; 146 } 147 148 return machine; 149 out_delete: 150 free(machine); 151 return NULL; 152 } 153 154 struct machine *machine__new_kallsyms(void) 155 { 156 struct machine *machine = machine__new_host(); 157 /* 158 * FIXME: 159 * 1) We should switch to machine__load_kallsyms(), i.e. not explicitly 160 * ask for not using the kcore parsing code, once this one is fixed 161 * to create a map per module. 162 */ 163 if (machine && machine__load_kallsyms(machine, "/proc/kallsyms") <= 0) { 164 machine__delete(machine); 165 machine = NULL; 166 } 167 168 return machine; 169 } 170 171 static void dsos__purge(struct dsos *dsos) 172 { 173 struct dso *pos, *n; 174 175 down_write(&dsos->lock); 176 177 list_for_each_entry_safe(pos, n, &dsos->head, node) { 178 RB_CLEAR_NODE(&pos->rb_node); 179 pos->root = NULL; 180 list_del_init(&pos->node); 181 dso__put(pos); 182 } 183 184 up_write(&dsos->lock); 185 } 186 187 static void dsos__exit(struct dsos *dsos) 188 { 189 dsos__purge(dsos); 190 exit_rwsem(&dsos->lock); 191 } 192 193 void machine__delete_threads(struct machine *machine) 194 { 195 struct rb_node *nd; 196 int i; 197 198 for (i = 0; i < THREADS__TABLE_SIZE; i++) { 199 struct threads *threads = &machine->threads[i]; 200 down_write(&threads->lock); 201 nd = rb_first_cached(&threads->entries); 202 while (nd) { 203 struct thread *t = rb_entry(nd, struct thread, rb_node); 204 205 nd = rb_next(nd); 206 __machine__remove_thread(machine, t, false); 207 } 208 up_write(&threads->lock); 209 } 210 } 211 212 void machine__exit(struct machine *machine) 213 { 214 int i; 215 216 if (machine == NULL) 217 return; 218 219 machine__destroy_kernel_maps(machine); 220 maps__exit(&machine->kmaps); 221 dsos__exit(&machine->dsos); 222 machine__exit_vdso(machine); 223 zfree(&machine->root_dir); 224 zfree(&machine->mmap_name); 225 zfree(&machine->current_tid); 226 227 for (i = 0; i < THREADS__TABLE_SIZE; i++) { 228 struct threads *threads = &machine->threads[i]; 229 struct thread *thread, *n; 230 /* 231 * Forget about the dead, at this point whatever threads were 232 * left in the dead lists better have a reference count taken 233 * by who is using them, and then, when they drop those references 234 * and it finally hits zero, thread__put() will check and see that 235 * its not in the dead threads list and will not try to remove it 236 * from there, just calling thread__delete() straight away. 237 */ 238 list_for_each_entry_safe(thread, n, &threads->dead, node) 239 list_del_init(&thread->node); 240 241 exit_rwsem(&threads->lock); 242 } 243 } 244 245 void machine__delete(struct machine *machine) 246 { 247 if (machine) { 248 machine__exit(machine); 249 free(machine); 250 } 251 } 252 253 void machines__init(struct machines *machines) 254 { 255 machine__init(&machines->host, "", HOST_KERNEL_ID); 256 machines->guests = RB_ROOT_CACHED; 257 } 258 259 void machines__exit(struct machines *machines) 260 { 261 machine__exit(&machines->host); 262 /* XXX exit guest */ 263 } 264 265 struct machine *machines__add(struct machines *machines, pid_t pid, 266 const char *root_dir) 267 { 268 struct rb_node **p = &machines->guests.rb_root.rb_node; 269 struct rb_node *parent = NULL; 270 struct machine *pos, *machine = malloc(sizeof(*machine)); 271 bool leftmost = true; 272 273 if (machine == NULL) 274 return NULL; 275 276 if (machine__init(machine, root_dir, pid) != 0) { 277 free(machine); 278 return NULL; 279 } 280 281 while (*p != NULL) { 282 parent = *p; 283 pos = rb_entry(parent, struct machine, rb_node); 284 if (pid < pos->pid) 285 p = &(*p)->rb_left; 286 else { 287 p = &(*p)->rb_right; 288 leftmost = false; 289 } 290 } 291 292 rb_link_node(&machine->rb_node, parent, p); 293 rb_insert_color_cached(&machine->rb_node, &machines->guests, leftmost); 294 295 return machine; 296 } 297 298 void machines__set_comm_exec(struct machines *machines, bool comm_exec) 299 { 300 struct rb_node *nd; 301 302 machines->host.comm_exec = comm_exec; 303 304 for (nd = rb_first_cached(&machines->guests); nd; nd = rb_next(nd)) { 305 struct machine *machine = rb_entry(nd, struct machine, rb_node); 306 307 machine->comm_exec = comm_exec; 308 } 309 } 310 311 struct machine *machines__find(struct machines *machines, pid_t pid) 312 { 313 struct rb_node **p = &machines->guests.rb_root.rb_node; 314 struct rb_node *parent = NULL; 315 struct machine *machine; 316 struct machine *default_machine = NULL; 317 318 if (pid == HOST_KERNEL_ID) 319 return &machines->host; 320 321 while (*p != NULL) { 322 parent = *p; 323 machine = rb_entry(parent, struct machine, rb_node); 324 if (pid < machine->pid) 325 p = &(*p)->rb_left; 326 else if (pid > machine->pid) 327 p = &(*p)->rb_right; 328 else 329 return machine; 330 if (!machine->pid) 331 default_machine = machine; 332 } 333 334 return default_machine; 335 } 336 337 struct machine *machines__findnew(struct machines *machines, pid_t pid) 338 { 339 char path[PATH_MAX]; 340 const char *root_dir = ""; 341 struct machine *machine = machines__find(machines, pid); 342 343 if (machine && (machine->pid == pid)) 344 goto out; 345 346 if ((pid != HOST_KERNEL_ID) && 347 (pid != DEFAULT_GUEST_KERNEL_ID) && 348 (symbol_conf.guestmount)) { 349 sprintf(path, "%s/%d", symbol_conf.guestmount, pid); 350 if (access(path, R_OK)) { 351 static struct strlist *seen; 352 353 if (!seen) 354 seen = strlist__new(NULL, NULL); 355 356 if (!strlist__has_entry(seen, path)) { 357 pr_err("Can't access file %s\n", path); 358 strlist__add(seen, path); 359 } 360 machine = NULL; 361 goto out; 362 } 363 root_dir = path; 364 } 365 366 machine = machines__add(machines, pid, root_dir); 367 out: 368 return machine; 369 } 370 371 void machines__process_guests(struct machines *machines, 372 machine__process_t process, void *data) 373 { 374 struct rb_node *nd; 375 376 for (nd = rb_first_cached(&machines->guests); nd; nd = rb_next(nd)) { 377 struct machine *pos = rb_entry(nd, struct machine, rb_node); 378 process(pos, data); 379 } 380 } 381 382 void machines__set_id_hdr_size(struct machines *machines, u16 id_hdr_size) 383 { 384 struct rb_node *node; 385 struct machine *machine; 386 387 machines->host.id_hdr_size = id_hdr_size; 388 389 for (node = rb_first_cached(&machines->guests); node; 390 node = rb_next(node)) { 391 machine = rb_entry(node, struct machine, rb_node); 392 machine->id_hdr_size = id_hdr_size; 393 } 394 395 return; 396 } 397 398 static void machine__update_thread_pid(struct machine *machine, 399 struct thread *th, pid_t pid) 400 { 401 struct thread *leader; 402 403 if (pid == th->pid_ || pid == -1 || th->pid_ != -1) 404 return; 405 406 th->pid_ = pid; 407 408 if (th->pid_ == th->tid) 409 return; 410 411 leader = __machine__findnew_thread(machine, th->pid_, th->pid_); 412 if (!leader) 413 goto out_err; 414 415 if (!leader->maps) 416 leader->maps = maps__new(machine); 417 418 if (!leader->maps) 419 goto out_err; 420 421 if (th->maps == leader->maps) 422 return; 423 424 if (th->maps) { 425 /* 426 * Maps are created from MMAP events which provide the pid and 427 * tid. Consequently there never should be any maps on a thread 428 * with an unknown pid. Just print an error if there are. 429 */ 430 if (!maps__empty(th->maps)) 431 pr_err("Discarding thread maps for %d:%d\n", 432 th->pid_, th->tid); 433 maps__put(th->maps); 434 } 435 436 th->maps = maps__get(leader->maps); 437 out_put: 438 thread__put(leader); 439 return; 440 out_err: 441 pr_err("Failed to join map groups for %d:%d\n", th->pid_, th->tid); 442 goto out_put; 443 } 444 445 /* 446 * Front-end cache - TID lookups come in blocks, 447 * so most of the time we dont have to look up 448 * the full rbtree: 449 */ 450 static struct thread* 451 __threads__get_last_match(struct threads *threads, struct machine *machine, 452 int pid, int tid) 453 { 454 struct thread *th; 455 456 th = threads->last_match; 457 if (th != NULL) { 458 if (th->tid == tid) { 459 machine__update_thread_pid(machine, th, pid); 460 return thread__get(th); 461 } 462 463 threads->last_match = NULL; 464 } 465 466 return NULL; 467 } 468 469 static struct thread* 470 threads__get_last_match(struct threads *threads, struct machine *machine, 471 int pid, int tid) 472 { 473 struct thread *th = NULL; 474 475 if (perf_singlethreaded) 476 th = __threads__get_last_match(threads, machine, pid, tid); 477 478 return th; 479 } 480 481 static void 482 __threads__set_last_match(struct threads *threads, struct thread *th) 483 { 484 threads->last_match = th; 485 } 486 487 static void 488 threads__set_last_match(struct threads *threads, struct thread *th) 489 { 490 if (perf_singlethreaded) 491 __threads__set_last_match(threads, th); 492 } 493 494 /* 495 * Caller must eventually drop thread->refcnt returned with a successful 496 * lookup/new thread inserted. 497 */ 498 static struct thread *____machine__findnew_thread(struct machine *machine, 499 struct threads *threads, 500 pid_t pid, pid_t tid, 501 bool create) 502 { 503 struct rb_node **p = &threads->entries.rb_root.rb_node; 504 struct rb_node *parent = NULL; 505 struct thread *th; 506 bool leftmost = true; 507 508 th = threads__get_last_match(threads, machine, pid, tid); 509 if (th) 510 return th; 511 512 while (*p != NULL) { 513 parent = *p; 514 th = rb_entry(parent, struct thread, rb_node); 515 516 if (th->tid == tid) { 517 threads__set_last_match(threads, th); 518 machine__update_thread_pid(machine, th, pid); 519 return thread__get(th); 520 } 521 522 if (tid < th->tid) 523 p = &(*p)->rb_left; 524 else { 525 p = &(*p)->rb_right; 526 leftmost = false; 527 } 528 } 529 530 if (!create) 531 return NULL; 532 533 th = thread__new(pid, tid); 534 if (th != NULL) { 535 rb_link_node(&th->rb_node, parent, p); 536 rb_insert_color_cached(&th->rb_node, &threads->entries, leftmost); 537 538 /* 539 * We have to initialize maps separately after rb tree is updated. 540 * 541 * The reason is that we call machine__findnew_thread 542 * within thread__init_maps to find the thread 543 * leader and that would screwed the rb tree. 544 */ 545 if (thread__init_maps(th, machine)) { 546 rb_erase_cached(&th->rb_node, &threads->entries); 547 RB_CLEAR_NODE(&th->rb_node); 548 thread__put(th); 549 return NULL; 550 } 551 /* 552 * It is now in the rbtree, get a ref 553 */ 554 thread__get(th); 555 threads__set_last_match(threads, th); 556 ++threads->nr; 557 } 558 559 return th; 560 } 561 562 struct thread *__machine__findnew_thread(struct machine *machine, pid_t pid, pid_t tid) 563 { 564 return ____machine__findnew_thread(machine, machine__threads(machine, tid), pid, tid, true); 565 } 566 567 struct thread *machine__findnew_thread(struct machine *machine, pid_t pid, 568 pid_t tid) 569 { 570 struct threads *threads = machine__threads(machine, tid); 571 struct thread *th; 572 573 down_write(&threads->lock); 574 th = __machine__findnew_thread(machine, pid, tid); 575 up_write(&threads->lock); 576 return th; 577 } 578 579 struct thread *machine__find_thread(struct machine *machine, pid_t pid, 580 pid_t tid) 581 { 582 struct threads *threads = machine__threads(machine, tid); 583 struct thread *th; 584 585 down_read(&threads->lock); 586 th = ____machine__findnew_thread(machine, threads, pid, tid, false); 587 up_read(&threads->lock); 588 return th; 589 } 590 591 struct comm *machine__thread_exec_comm(struct machine *machine, 592 struct thread *thread) 593 { 594 if (machine->comm_exec) 595 return thread__exec_comm(thread); 596 else 597 return thread__comm(thread); 598 } 599 600 int machine__process_comm_event(struct machine *machine, union perf_event *event, 601 struct perf_sample *sample) 602 { 603 struct thread *thread = machine__findnew_thread(machine, 604 event->comm.pid, 605 event->comm.tid); 606 bool exec = event->header.misc & PERF_RECORD_MISC_COMM_EXEC; 607 int err = 0; 608 609 if (exec) 610 machine->comm_exec = true; 611 612 if (dump_trace) 613 perf_event__fprintf_comm(event, stdout); 614 615 if (thread == NULL || 616 __thread__set_comm(thread, event->comm.comm, sample->time, exec)) { 617 dump_printf("problem processing PERF_RECORD_COMM, skipping event.\n"); 618 err = -1; 619 } 620 621 thread__put(thread); 622 623 return err; 624 } 625 626 int machine__process_namespaces_event(struct machine *machine __maybe_unused, 627 union perf_event *event, 628 struct perf_sample *sample __maybe_unused) 629 { 630 struct thread *thread = machine__findnew_thread(machine, 631 event->namespaces.pid, 632 event->namespaces.tid); 633 int err = 0; 634 635 WARN_ONCE(event->namespaces.nr_namespaces > NR_NAMESPACES, 636 "\nWARNING: kernel seems to support more namespaces than perf" 637 " tool.\nTry updating the perf tool..\n\n"); 638 639 WARN_ONCE(event->namespaces.nr_namespaces < NR_NAMESPACES, 640 "\nWARNING: perf tool seems to support more namespaces than" 641 " the kernel.\nTry updating the kernel..\n\n"); 642 643 if (dump_trace) 644 perf_event__fprintf_namespaces(event, stdout); 645 646 if (thread == NULL || 647 thread__set_namespaces(thread, sample->time, &event->namespaces)) { 648 dump_printf("problem processing PERF_RECORD_NAMESPACES, skipping event.\n"); 649 err = -1; 650 } 651 652 thread__put(thread); 653 654 return err; 655 } 656 657 int machine__process_lost_event(struct machine *machine __maybe_unused, 658 union perf_event *event, struct perf_sample *sample __maybe_unused) 659 { 660 dump_printf(": id:%" PRI_lu64 ": lost:%" PRI_lu64 "\n", 661 event->lost.id, event->lost.lost); 662 return 0; 663 } 664 665 int machine__process_lost_samples_event(struct machine *machine __maybe_unused, 666 union perf_event *event, struct perf_sample *sample) 667 { 668 dump_printf(": id:%" PRIu64 ": lost samples :%" PRI_lu64 "\n", 669 sample->id, event->lost_samples.lost); 670 return 0; 671 } 672 673 static struct dso *machine__findnew_module_dso(struct machine *machine, 674 struct kmod_path *m, 675 const char *filename) 676 { 677 struct dso *dso; 678 679 down_write(&machine->dsos.lock); 680 681 dso = __dsos__find(&machine->dsos, m->name, true); 682 if (!dso) { 683 dso = __dsos__addnew(&machine->dsos, m->name); 684 if (dso == NULL) 685 goto out_unlock; 686 687 dso__set_module_info(dso, m, machine); 688 dso__set_long_name(dso, strdup(filename), true); 689 } 690 691 dso__get(dso); 692 out_unlock: 693 up_write(&machine->dsos.lock); 694 return dso; 695 } 696 697 int machine__process_aux_event(struct machine *machine __maybe_unused, 698 union perf_event *event) 699 { 700 if (dump_trace) 701 perf_event__fprintf_aux(event, stdout); 702 return 0; 703 } 704 705 int machine__process_itrace_start_event(struct machine *machine __maybe_unused, 706 union perf_event *event) 707 { 708 if (dump_trace) 709 perf_event__fprintf_itrace_start(event, stdout); 710 return 0; 711 } 712 713 int machine__process_switch_event(struct machine *machine __maybe_unused, 714 union perf_event *event) 715 { 716 if (dump_trace) 717 perf_event__fprintf_switch(event, stdout); 718 return 0; 719 } 720 721 static int machine__process_ksymbol_register(struct machine *machine, 722 union perf_event *event, 723 struct perf_sample *sample __maybe_unused) 724 { 725 struct symbol *sym; 726 struct map *map = maps__find(&machine->kmaps, event->ksymbol.addr); 727 728 if (!map) { 729 map = dso__new_map(event->ksymbol.name); 730 if (!map) 731 return -ENOMEM; 732 733 map->start = event->ksymbol.addr; 734 map->end = map->start + event->ksymbol.len; 735 maps__insert(&machine->kmaps, map); 736 } 737 738 sym = symbol__new(map->map_ip(map, map->start), 739 event->ksymbol.len, 740 0, 0, event->ksymbol.name); 741 if (!sym) 742 return -ENOMEM; 743 dso__insert_symbol(map->dso, sym); 744 return 0; 745 } 746 747 static int machine__process_ksymbol_unregister(struct machine *machine, 748 union perf_event *event, 749 struct perf_sample *sample __maybe_unused) 750 { 751 struct map *map; 752 753 map = maps__find(&machine->kmaps, event->ksymbol.addr); 754 if (map) 755 maps__remove(&machine->kmaps, map); 756 757 return 0; 758 } 759 760 int machine__process_ksymbol(struct machine *machine __maybe_unused, 761 union perf_event *event, 762 struct perf_sample *sample) 763 { 764 if (dump_trace) 765 perf_event__fprintf_ksymbol(event, stdout); 766 767 if (event->ksymbol.flags & PERF_RECORD_KSYMBOL_FLAGS_UNREGISTER) 768 return machine__process_ksymbol_unregister(machine, event, 769 sample); 770 return machine__process_ksymbol_register(machine, event, sample); 771 } 772 773 static struct map *machine__addnew_module_map(struct machine *machine, u64 start, 774 const char *filename) 775 { 776 struct map *map = NULL; 777 struct kmod_path m; 778 struct dso *dso; 779 780 if (kmod_path__parse_name(&m, filename)) 781 return NULL; 782 783 dso = machine__findnew_module_dso(machine, &m, filename); 784 if (dso == NULL) 785 goto out; 786 787 map = map__new2(start, dso); 788 if (map == NULL) 789 goto out; 790 791 maps__insert(&machine->kmaps, map); 792 793 /* Put the map here because maps__insert alread got it */ 794 map__put(map); 795 out: 796 /* put the dso here, corresponding to machine__findnew_module_dso */ 797 dso__put(dso); 798 zfree(&m.name); 799 return map; 800 } 801 802 size_t machines__fprintf_dsos(struct machines *machines, FILE *fp) 803 { 804 struct rb_node *nd; 805 size_t ret = __dsos__fprintf(&machines->host.dsos.head, fp); 806 807 for (nd = rb_first_cached(&machines->guests); nd; nd = rb_next(nd)) { 808 struct machine *pos = rb_entry(nd, struct machine, rb_node); 809 ret += __dsos__fprintf(&pos->dsos.head, fp); 810 } 811 812 return ret; 813 } 814 815 size_t machine__fprintf_dsos_buildid(struct machine *m, FILE *fp, 816 bool (skip)(struct dso *dso, int parm), int parm) 817 { 818 return __dsos__fprintf_buildid(&m->dsos.head, fp, skip, parm); 819 } 820 821 size_t machines__fprintf_dsos_buildid(struct machines *machines, FILE *fp, 822 bool (skip)(struct dso *dso, int parm), int parm) 823 { 824 struct rb_node *nd; 825 size_t ret = machine__fprintf_dsos_buildid(&machines->host, fp, skip, parm); 826 827 for (nd = rb_first_cached(&machines->guests); nd; nd = rb_next(nd)) { 828 struct machine *pos = rb_entry(nd, struct machine, rb_node); 829 ret += machine__fprintf_dsos_buildid(pos, fp, skip, parm); 830 } 831 return ret; 832 } 833 834 size_t machine__fprintf_vmlinux_path(struct machine *machine, FILE *fp) 835 { 836 int i; 837 size_t printed = 0; 838 struct dso *kdso = machine__kernel_dso(machine); 839 840 if (kdso->has_build_id) { 841 char filename[PATH_MAX]; 842 if (dso__build_id_filename(kdso, filename, sizeof(filename), 843 false)) 844 printed += fprintf(fp, "[0] %s\n", filename); 845 } 846 847 for (i = 0; i < vmlinux_path__nr_entries; ++i) 848 printed += fprintf(fp, "[%d] %s\n", 849 i + kdso->has_build_id, vmlinux_path[i]); 850 851 return printed; 852 } 853 854 size_t machine__fprintf(struct machine *machine, FILE *fp) 855 { 856 struct rb_node *nd; 857 size_t ret; 858 int i; 859 860 for (i = 0; i < THREADS__TABLE_SIZE; i++) { 861 struct threads *threads = &machine->threads[i]; 862 863 down_read(&threads->lock); 864 865 ret = fprintf(fp, "Threads: %u\n", threads->nr); 866 867 for (nd = rb_first_cached(&threads->entries); nd; 868 nd = rb_next(nd)) { 869 struct thread *pos = rb_entry(nd, struct thread, rb_node); 870 871 ret += thread__fprintf(pos, fp); 872 } 873 874 up_read(&threads->lock); 875 } 876 return ret; 877 } 878 879 static struct dso *machine__get_kernel(struct machine *machine) 880 { 881 const char *vmlinux_name = machine->mmap_name; 882 struct dso *kernel; 883 884 if (machine__is_host(machine)) { 885 if (symbol_conf.vmlinux_name) 886 vmlinux_name = symbol_conf.vmlinux_name; 887 888 kernel = machine__findnew_kernel(machine, vmlinux_name, 889 "[kernel]", DSO_TYPE_KERNEL); 890 } else { 891 if (symbol_conf.default_guest_vmlinux_name) 892 vmlinux_name = symbol_conf.default_guest_vmlinux_name; 893 894 kernel = machine__findnew_kernel(machine, vmlinux_name, 895 "[guest.kernel]", 896 DSO_TYPE_GUEST_KERNEL); 897 } 898 899 if (kernel != NULL && (!kernel->has_build_id)) 900 dso__read_running_kernel_build_id(kernel, machine); 901 902 return kernel; 903 } 904 905 struct process_args { 906 u64 start; 907 }; 908 909 void machine__get_kallsyms_filename(struct machine *machine, char *buf, 910 size_t bufsz) 911 { 912 if (machine__is_default_guest(machine)) 913 scnprintf(buf, bufsz, "%s", symbol_conf.default_guest_kallsyms); 914 else 915 scnprintf(buf, bufsz, "%s/proc/kallsyms", machine->root_dir); 916 } 917 918 const char *ref_reloc_sym_names[] = {"_text", "_stext", NULL}; 919 920 /* Figure out the start address of kernel map from /proc/kallsyms. 921 * Returns the name of the start symbol in *symbol_name. Pass in NULL as 922 * symbol_name if it's not that important. 923 */ 924 static int machine__get_running_kernel_start(struct machine *machine, 925 const char **symbol_name, 926 u64 *start, u64 *end) 927 { 928 char filename[PATH_MAX]; 929 int i, err = -1; 930 const char *name; 931 u64 addr = 0; 932 933 machine__get_kallsyms_filename(machine, filename, PATH_MAX); 934 935 if (symbol__restricted_filename(filename, "/proc/kallsyms")) 936 return 0; 937 938 for (i = 0; (name = ref_reloc_sym_names[i]) != NULL; i++) { 939 err = kallsyms__get_function_start(filename, name, &addr); 940 if (!err) 941 break; 942 } 943 944 if (err) 945 return -1; 946 947 if (symbol_name) 948 *symbol_name = name; 949 950 *start = addr; 951 952 err = kallsyms__get_function_start(filename, "_etext", &addr); 953 if (!err) 954 *end = addr; 955 956 return 0; 957 } 958 959 int machine__create_extra_kernel_map(struct machine *machine, 960 struct dso *kernel, 961 struct extra_kernel_map *xm) 962 { 963 struct kmap *kmap; 964 struct map *map; 965 966 map = map__new2(xm->start, kernel); 967 if (!map) 968 return -1; 969 970 map->end = xm->end; 971 map->pgoff = xm->pgoff; 972 973 kmap = map__kmap(map); 974 975 kmap->kmaps = &machine->kmaps; 976 strlcpy(kmap->name, xm->name, KMAP_NAME_LEN); 977 978 maps__insert(&machine->kmaps, map); 979 980 pr_debug2("Added extra kernel map %s %" PRIx64 "-%" PRIx64 "\n", 981 kmap->name, map->start, map->end); 982 983 map__put(map); 984 985 return 0; 986 } 987 988 static u64 find_entry_trampoline(struct dso *dso) 989 { 990 /* Duplicates are removed so lookup all aliases */ 991 const char *syms[] = { 992 "_entry_trampoline", 993 "__entry_trampoline_start", 994 "entry_SYSCALL_64_trampoline", 995 }; 996 struct symbol *sym = dso__first_symbol(dso); 997 unsigned int i; 998 999 for (; sym; sym = dso__next_symbol(sym)) { 1000 if (sym->binding != STB_GLOBAL) 1001 continue; 1002 for (i = 0; i < ARRAY_SIZE(syms); i++) { 1003 if (!strcmp(sym->name, syms[i])) 1004 return sym->start; 1005 } 1006 } 1007 1008 return 0; 1009 } 1010 1011 /* 1012 * These values can be used for kernels that do not have symbols for the entry 1013 * trampolines in kallsyms. 1014 */ 1015 #define X86_64_CPU_ENTRY_AREA_PER_CPU 0xfffffe0000000000ULL 1016 #define X86_64_CPU_ENTRY_AREA_SIZE 0x2c000 1017 #define X86_64_ENTRY_TRAMPOLINE 0x6000 1018 1019 /* Map x86_64 PTI entry trampolines */ 1020 int machine__map_x86_64_entry_trampolines(struct machine *machine, 1021 struct dso *kernel) 1022 { 1023 struct maps *kmaps = &machine->kmaps; 1024 int nr_cpus_avail, cpu; 1025 bool found = false; 1026 struct map *map; 1027 u64 pgoff; 1028 1029 /* 1030 * In the vmlinux case, pgoff is a virtual address which must now be 1031 * mapped to a vmlinux offset. 1032 */ 1033 maps__for_each_entry(kmaps, map) { 1034 struct kmap *kmap = __map__kmap(map); 1035 struct map *dest_map; 1036 1037 if (!kmap || !is_entry_trampoline(kmap->name)) 1038 continue; 1039 1040 dest_map = maps__find(kmaps, map->pgoff); 1041 if (dest_map != map) 1042 map->pgoff = dest_map->map_ip(dest_map, map->pgoff); 1043 found = true; 1044 } 1045 if (found || machine->trampolines_mapped) 1046 return 0; 1047 1048 pgoff = find_entry_trampoline(kernel); 1049 if (!pgoff) 1050 return 0; 1051 1052 nr_cpus_avail = machine__nr_cpus_avail(machine); 1053 1054 /* Add a 1 page map for each CPU's entry trampoline */ 1055 for (cpu = 0; cpu < nr_cpus_avail; cpu++) { 1056 u64 va = X86_64_CPU_ENTRY_AREA_PER_CPU + 1057 cpu * X86_64_CPU_ENTRY_AREA_SIZE + 1058 X86_64_ENTRY_TRAMPOLINE; 1059 struct extra_kernel_map xm = { 1060 .start = va, 1061 .end = va + page_size, 1062 .pgoff = pgoff, 1063 }; 1064 1065 strlcpy(xm.name, ENTRY_TRAMPOLINE_NAME, KMAP_NAME_LEN); 1066 1067 if (machine__create_extra_kernel_map(machine, kernel, &xm) < 0) 1068 return -1; 1069 } 1070 1071 machine->trampolines_mapped = nr_cpus_avail; 1072 1073 return 0; 1074 } 1075 1076 int __weak machine__create_extra_kernel_maps(struct machine *machine __maybe_unused, 1077 struct dso *kernel __maybe_unused) 1078 { 1079 return 0; 1080 } 1081 1082 static int 1083 __machine__create_kernel_maps(struct machine *machine, struct dso *kernel) 1084 { 1085 struct kmap *kmap; 1086 struct map *map; 1087 1088 /* In case of renewal the kernel map, destroy previous one */ 1089 machine__destroy_kernel_maps(machine); 1090 1091 machine->vmlinux_map = map__new2(0, kernel); 1092 if (machine->vmlinux_map == NULL) 1093 return -1; 1094 1095 machine->vmlinux_map->map_ip = machine->vmlinux_map->unmap_ip = identity__map_ip; 1096 map = machine__kernel_map(machine); 1097 kmap = map__kmap(map); 1098 if (!kmap) 1099 return -1; 1100 1101 kmap->kmaps = &machine->kmaps; 1102 maps__insert(&machine->kmaps, map); 1103 1104 return 0; 1105 } 1106 1107 void machine__destroy_kernel_maps(struct machine *machine) 1108 { 1109 struct kmap *kmap; 1110 struct map *map = machine__kernel_map(machine); 1111 1112 if (map == NULL) 1113 return; 1114 1115 kmap = map__kmap(map); 1116 maps__remove(&machine->kmaps, map); 1117 if (kmap && kmap->ref_reloc_sym) { 1118 zfree((char **)&kmap->ref_reloc_sym->name); 1119 zfree(&kmap->ref_reloc_sym); 1120 } 1121 1122 map__zput(machine->vmlinux_map); 1123 } 1124 1125 int machines__create_guest_kernel_maps(struct machines *machines) 1126 { 1127 int ret = 0; 1128 struct dirent **namelist = NULL; 1129 int i, items = 0; 1130 char path[PATH_MAX]; 1131 pid_t pid; 1132 char *endp; 1133 1134 if (symbol_conf.default_guest_vmlinux_name || 1135 symbol_conf.default_guest_modules || 1136 symbol_conf.default_guest_kallsyms) { 1137 machines__create_kernel_maps(machines, DEFAULT_GUEST_KERNEL_ID); 1138 } 1139 1140 if (symbol_conf.guestmount) { 1141 items = scandir(symbol_conf.guestmount, &namelist, NULL, NULL); 1142 if (items <= 0) 1143 return -ENOENT; 1144 for (i = 0; i < items; i++) { 1145 if (!isdigit(namelist[i]->d_name[0])) { 1146 /* Filter out . and .. */ 1147 continue; 1148 } 1149 pid = (pid_t)strtol(namelist[i]->d_name, &endp, 10); 1150 if ((*endp != '\0') || 1151 (endp == namelist[i]->d_name) || 1152 (errno == ERANGE)) { 1153 pr_debug("invalid directory (%s). Skipping.\n", 1154 namelist[i]->d_name); 1155 continue; 1156 } 1157 sprintf(path, "%s/%s/proc/kallsyms", 1158 symbol_conf.guestmount, 1159 namelist[i]->d_name); 1160 ret = access(path, R_OK); 1161 if (ret) { 1162 pr_debug("Can't access file %s\n", path); 1163 goto failure; 1164 } 1165 machines__create_kernel_maps(machines, pid); 1166 } 1167 failure: 1168 free(namelist); 1169 } 1170 1171 return ret; 1172 } 1173 1174 void machines__destroy_kernel_maps(struct machines *machines) 1175 { 1176 struct rb_node *next = rb_first_cached(&machines->guests); 1177 1178 machine__destroy_kernel_maps(&machines->host); 1179 1180 while (next) { 1181 struct machine *pos = rb_entry(next, struct machine, rb_node); 1182 1183 next = rb_next(&pos->rb_node); 1184 rb_erase_cached(&pos->rb_node, &machines->guests); 1185 machine__delete(pos); 1186 } 1187 } 1188 1189 int machines__create_kernel_maps(struct machines *machines, pid_t pid) 1190 { 1191 struct machine *machine = machines__findnew(machines, pid); 1192 1193 if (machine == NULL) 1194 return -1; 1195 1196 return machine__create_kernel_maps(machine); 1197 } 1198 1199 int machine__load_kallsyms(struct machine *machine, const char *filename) 1200 { 1201 struct map *map = machine__kernel_map(machine); 1202 int ret = __dso__load_kallsyms(map->dso, filename, map, true); 1203 1204 if (ret > 0) { 1205 dso__set_loaded(map->dso); 1206 /* 1207 * Since /proc/kallsyms will have multiple sessions for the 1208 * kernel, with modules between them, fixup the end of all 1209 * sections. 1210 */ 1211 maps__fixup_end(&machine->kmaps); 1212 } 1213 1214 return ret; 1215 } 1216 1217 int machine__load_vmlinux_path(struct machine *machine) 1218 { 1219 struct map *map = machine__kernel_map(machine); 1220 int ret = dso__load_vmlinux_path(map->dso, map); 1221 1222 if (ret > 0) 1223 dso__set_loaded(map->dso); 1224 1225 return ret; 1226 } 1227 1228 static char *get_kernel_version(const char *root_dir) 1229 { 1230 char version[PATH_MAX]; 1231 FILE *file; 1232 char *name, *tmp; 1233 const char *prefix = "Linux version "; 1234 1235 sprintf(version, "%s/proc/version", root_dir); 1236 file = fopen(version, "r"); 1237 if (!file) 1238 return NULL; 1239 1240 tmp = fgets(version, sizeof(version), file); 1241 fclose(file); 1242 if (!tmp) 1243 return NULL; 1244 1245 name = strstr(version, prefix); 1246 if (!name) 1247 return NULL; 1248 name += strlen(prefix); 1249 tmp = strchr(name, ' '); 1250 if (tmp) 1251 *tmp = '\0'; 1252 1253 return strdup(name); 1254 } 1255 1256 static bool is_kmod_dso(struct dso *dso) 1257 { 1258 return dso->symtab_type == DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE || 1259 dso->symtab_type == DSO_BINARY_TYPE__GUEST_KMODULE; 1260 } 1261 1262 static int maps__set_module_path(struct maps *maps, const char *path, struct kmod_path *m) 1263 { 1264 char *long_name; 1265 struct map *map = maps__find_by_name(maps, m->name); 1266 1267 if (map == NULL) 1268 return 0; 1269 1270 long_name = strdup(path); 1271 if (long_name == NULL) 1272 return -ENOMEM; 1273 1274 dso__set_long_name(map->dso, long_name, true); 1275 dso__kernel_module_get_build_id(map->dso, ""); 1276 1277 /* 1278 * Full name could reveal us kmod compression, so 1279 * we need to update the symtab_type if needed. 1280 */ 1281 if (m->comp && is_kmod_dso(map->dso)) { 1282 map->dso->symtab_type++; 1283 map->dso->comp = m->comp; 1284 } 1285 1286 return 0; 1287 } 1288 1289 static int maps__set_modules_path_dir(struct maps *maps, const char *dir_name, int depth) 1290 { 1291 struct dirent *dent; 1292 DIR *dir = opendir(dir_name); 1293 int ret = 0; 1294 1295 if (!dir) { 1296 pr_debug("%s: cannot open %s dir\n", __func__, dir_name); 1297 return -1; 1298 } 1299 1300 while ((dent = readdir(dir)) != NULL) { 1301 char path[PATH_MAX]; 1302 struct stat st; 1303 1304 /*sshfs might return bad dent->d_type, so we have to stat*/ 1305 snprintf(path, sizeof(path), "%s/%s", dir_name, dent->d_name); 1306 if (stat(path, &st)) 1307 continue; 1308 1309 if (S_ISDIR(st.st_mode)) { 1310 if (!strcmp(dent->d_name, ".") || 1311 !strcmp(dent->d_name, "..")) 1312 continue; 1313 1314 /* Do not follow top-level source and build symlinks */ 1315 if (depth == 0) { 1316 if (!strcmp(dent->d_name, "source") || 1317 !strcmp(dent->d_name, "build")) 1318 continue; 1319 } 1320 1321 ret = maps__set_modules_path_dir(maps, path, depth + 1); 1322 if (ret < 0) 1323 goto out; 1324 } else { 1325 struct kmod_path m; 1326 1327 ret = kmod_path__parse_name(&m, dent->d_name); 1328 if (ret) 1329 goto out; 1330 1331 if (m.kmod) 1332 ret = maps__set_module_path(maps, path, &m); 1333 1334 zfree(&m.name); 1335 1336 if (ret) 1337 goto out; 1338 } 1339 } 1340 1341 out: 1342 closedir(dir); 1343 return ret; 1344 } 1345 1346 static int machine__set_modules_path(struct machine *machine) 1347 { 1348 char *version; 1349 char modules_path[PATH_MAX]; 1350 1351 version = get_kernel_version(machine->root_dir); 1352 if (!version) 1353 return -1; 1354 1355 snprintf(modules_path, sizeof(modules_path), "%s/lib/modules/%s", 1356 machine->root_dir, version); 1357 free(version); 1358 1359 return maps__set_modules_path_dir(&machine->kmaps, modules_path, 0); 1360 } 1361 int __weak arch__fix_module_text_start(u64 *start __maybe_unused, 1362 u64 *size __maybe_unused, 1363 const char *name __maybe_unused) 1364 { 1365 return 0; 1366 } 1367 1368 static int machine__create_module(void *arg, const char *name, u64 start, 1369 u64 size) 1370 { 1371 struct machine *machine = arg; 1372 struct map *map; 1373 1374 if (arch__fix_module_text_start(&start, &size, name) < 0) 1375 return -1; 1376 1377 map = machine__addnew_module_map(machine, start, name); 1378 if (map == NULL) 1379 return -1; 1380 map->end = start + size; 1381 1382 dso__kernel_module_get_build_id(map->dso, machine->root_dir); 1383 1384 return 0; 1385 } 1386 1387 static int machine__create_modules(struct machine *machine) 1388 { 1389 const char *modules; 1390 char path[PATH_MAX]; 1391 1392 if (machine__is_default_guest(machine)) { 1393 modules = symbol_conf.default_guest_modules; 1394 } else { 1395 snprintf(path, PATH_MAX, "%s/proc/modules", machine->root_dir); 1396 modules = path; 1397 } 1398 1399 if (symbol__restricted_filename(modules, "/proc/modules")) 1400 return -1; 1401 1402 if (modules__parse(modules, machine, machine__create_module)) 1403 return -1; 1404 1405 if (!machine__set_modules_path(machine)) 1406 return 0; 1407 1408 pr_debug("Problems setting modules path maps, continuing anyway...\n"); 1409 1410 return 0; 1411 } 1412 1413 static void machine__set_kernel_mmap(struct machine *machine, 1414 u64 start, u64 end) 1415 { 1416 machine->vmlinux_map->start = start; 1417 machine->vmlinux_map->end = end; 1418 /* 1419 * Be a bit paranoid here, some perf.data file came with 1420 * a zero sized synthesized MMAP event for the kernel. 1421 */ 1422 if (start == 0 && end == 0) 1423 machine->vmlinux_map->end = ~0ULL; 1424 } 1425 1426 static void machine__update_kernel_mmap(struct machine *machine, 1427 u64 start, u64 end) 1428 { 1429 struct map *map = machine__kernel_map(machine); 1430 1431 map__get(map); 1432 maps__remove(&machine->kmaps, map); 1433 1434 machine__set_kernel_mmap(machine, start, end); 1435 1436 maps__insert(&machine->kmaps, map); 1437 map__put(map); 1438 } 1439 1440 int machine__create_kernel_maps(struct machine *machine) 1441 { 1442 struct dso *kernel = machine__get_kernel(machine); 1443 const char *name = NULL; 1444 struct map *map; 1445 u64 start = 0, end = ~0ULL; 1446 int ret; 1447 1448 if (kernel == NULL) 1449 return -1; 1450 1451 ret = __machine__create_kernel_maps(machine, kernel); 1452 if (ret < 0) 1453 goto out_put; 1454 1455 if (symbol_conf.use_modules && machine__create_modules(machine) < 0) { 1456 if (machine__is_host(machine)) 1457 pr_debug("Problems creating module maps, " 1458 "continuing anyway...\n"); 1459 else 1460 pr_debug("Problems creating module maps for guest %d, " 1461 "continuing anyway...\n", machine->pid); 1462 } 1463 1464 if (!machine__get_running_kernel_start(machine, &name, &start, &end)) { 1465 if (name && 1466 map__set_kallsyms_ref_reloc_sym(machine->vmlinux_map, name, start)) { 1467 machine__destroy_kernel_maps(machine); 1468 ret = -1; 1469 goto out_put; 1470 } 1471 1472 /* 1473 * we have a real start address now, so re-order the kmaps 1474 * assume it's the last in the kmaps 1475 */ 1476 machine__update_kernel_mmap(machine, start, end); 1477 } 1478 1479 if (machine__create_extra_kernel_maps(machine, kernel)) 1480 pr_debug("Problems creating extra kernel maps, continuing anyway...\n"); 1481 1482 if (end == ~0ULL) { 1483 /* update end address of the kernel map using adjacent module address */ 1484 map = map__next(machine__kernel_map(machine)); 1485 if (map) 1486 machine__set_kernel_mmap(machine, start, map->start); 1487 } 1488 1489 out_put: 1490 dso__put(kernel); 1491 return ret; 1492 } 1493 1494 static bool machine__uses_kcore(struct machine *machine) 1495 { 1496 struct dso *dso; 1497 1498 list_for_each_entry(dso, &machine->dsos.head, node) { 1499 if (dso__is_kcore(dso)) 1500 return true; 1501 } 1502 1503 return false; 1504 } 1505 1506 static bool perf_event__is_extra_kernel_mmap(struct machine *machine, 1507 union perf_event *event) 1508 { 1509 return machine__is(machine, "x86_64") && 1510 is_entry_trampoline(event->mmap.filename); 1511 } 1512 1513 static int machine__process_extra_kernel_map(struct machine *machine, 1514 union perf_event *event) 1515 { 1516 struct dso *kernel = machine__kernel_dso(machine); 1517 struct extra_kernel_map xm = { 1518 .start = event->mmap.start, 1519 .end = event->mmap.start + event->mmap.len, 1520 .pgoff = event->mmap.pgoff, 1521 }; 1522 1523 if (kernel == NULL) 1524 return -1; 1525 1526 strlcpy(xm.name, event->mmap.filename, KMAP_NAME_LEN); 1527 1528 return machine__create_extra_kernel_map(machine, kernel, &xm); 1529 } 1530 1531 static int machine__process_kernel_mmap_event(struct machine *machine, 1532 union perf_event *event) 1533 { 1534 struct map *map; 1535 enum dso_kernel_type kernel_type; 1536 bool is_kernel_mmap; 1537 1538 /* If we have maps from kcore then we do not need or want any others */ 1539 if (machine__uses_kcore(machine)) 1540 return 0; 1541 1542 if (machine__is_host(machine)) 1543 kernel_type = DSO_TYPE_KERNEL; 1544 else 1545 kernel_type = DSO_TYPE_GUEST_KERNEL; 1546 1547 is_kernel_mmap = memcmp(event->mmap.filename, 1548 machine->mmap_name, 1549 strlen(machine->mmap_name) - 1) == 0; 1550 if (event->mmap.filename[0] == '/' || 1551 (!is_kernel_mmap && event->mmap.filename[0] == '[')) { 1552 map = machine__addnew_module_map(machine, event->mmap.start, 1553 event->mmap.filename); 1554 if (map == NULL) 1555 goto out_problem; 1556 1557 map->end = map->start + event->mmap.len; 1558 } else if (is_kernel_mmap) { 1559 const char *symbol_name = (event->mmap.filename + 1560 strlen(machine->mmap_name)); 1561 /* 1562 * Should be there already, from the build-id table in 1563 * the header. 1564 */ 1565 struct dso *kernel = NULL; 1566 struct dso *dso; 1567 1568 down_read(&machine->dsos.lock); 1569 1570 list_for_each_entry(dso, &machine->dsos.head, node) { 1571 1572 /* 1573 * The cpumode passed to is_kernel_module is not the 1574 * cpumode of *this* event. If we insist on passing 1575 * correct cpumode to is_kernel_module, we should 1576 * record the cpumode when we adding this dso to the 1577 * linked list. 1578 * 1579 * However we don't really need passing correct 1580 * cpumode. We know the correct cpumode must be kernel 1581 * mode (if not, we should not link it onto kernel_dsos 1582 * list). 1583 * 1584 * Therefore, we pass PERF_RECORD_MISC_CPUMODE_UNKNOWN. 1585 * is_kernel_module() treats it as a kernel cpumode. 1586 */ 1587 1588 if (!dso->kernel || 1589 is_kernel_module(dso->long_name, 1590 PERF_RECORD_MISC_CPUMODE_UNKNOWN)) 1591 continue; 1592 1593 1594 kernel = dso; 1595 break; 1596 } 1597 1598 up_read(&machine->dsos.lock); 1599 1600 if (kernel == NULL) 1601 kernel = machine__findnew_dso(machine, machine->mmap_name); 1602 if (kernel == NULL) 1603 goto out_problem; 1604 1605 kernel->kernel = kernel_type; 1606 if (__machine__create_kernel_maps(machine, kernel) < 0) { 1607 dso__put(kernel); 1608 goto out_problem; 1609 } 1610 1611 if (strstr(kernel->long_name, "vmlinux")) 1612 dso__set_short_name(kernel, "[kernel.vmlinux]", false); 1613 1614 machine__update_kernel_mmap(machine, event->mmap.start, 1615 event->mmap.start + event->mmap.len); 1616 1617 /* 1618 * Avoid using a zero address (kptr_restrict) for the ref reloc 1619 * symbol. Effectively having zero here means that at record 1620 * time /proc/sys/kernel/kptr_restrict was non zero. 1621 */ 1622 if (event->mmap.pgoff != 0) { 1623 map__set_kallsyms_ref_reloc_sym(machine->vmlinux_map, 1624 symbol_name, 1625 event->mmap.pgoff); 1626 } 1627 1628 if (machine__is_default_guest(machine)) { 1629 /* 1630 * preload dso of guest kernel and modules 1631 */ 1632 dso__load(kernel, machine__kernel_map(machine)); 1633 } 1634 } else if (perf_event__is_extra_kernel_mmap(machine, event)) { 1635 return machine__process_extra_kernel_map(machine, event); 1636 } 1637 return 0; 1638 out_problem: 1639 return -1; 1640 } 1641 1642 int machine__process_mmap2_event(struct machine *machine, 1643 union perf_event *event, 1644 struct perf_sample *sample) 1645 { 1646 struct thread *thread; 1647 struct map *map; 1648 struct dso_id dso_id = { 1649 .maj = event->mmap2.maj, 1650 .min = event->mmap2.min, 1651 .ino = event->mmap2.ino, 1652 .ino_generation = event->mmap2.ino_generation, 1653 }; 1654 int ret = 0; 1655 1656 if (dump_trace) 1657 perf_event__fprintf_mmap2(event, stdout); 1658 1659 if (sample->cpumode == PERF_RECORD_MISC_GUEST_KERNEL || 1660 sample->cpumode == PERF_RECORD_MISC_KERNEL) { 1661 ret = machine__process_kernel_mmap_event(machine, event); 1662 if (ret < 0) 1663 goto out_problem; 1664 return 0; 1665 } 1666 1667 thread = machine__findnew_thread(machine, event->mmap2.pid, 1668 event->mmap2.tid); 1669 if (thread == NULL) 1670 goto out_problem; 1671 1672 map = map__new(machine, event->mmap2.start, 1673 event->mmap2.len, event->mmap2.pgoff, 1674 &dso_id, event->mmap2.prot, 1675 event->mmap2.flags, 1676 event->mmap2.filename, thread); 1677 1678 if (map == NULL) 1679 goto out_problem_map; 1680 1681 ret = thread__insert_map(thread, map); 1682 if (ret) 1683 goto out_problem_insert; 1684 1685 thread__put(thread); 1686 map__put(map); 1687 return 0; 1688 1689 out_problem_insert: 1690 map__put(map); 1691 out_problem_map: 1692 thread__put(thread); 1693 out_problem: 1694 dump_printf("problem processing PERF_RECORD_MMAP2, skipping event.\n"); 1695 return 0; 1696 } 1697 1698 int machine__process_mmap_event(struct machine *machine, union perf_event *event, 1699 struct perf_sample *sample) 1700 { 1701 struct thread *thread; 1702 struct map *map; 1703 u32 prot = 0; 1704 int ret = 0; 1705 1706 if (dump_trace) 1707 perf_event__fprintf_mmap(event, stdout); 1708 1709 if (sample->cpumode == PERF_RECORD_MISC_GUEST_KERNEL || 1710 sample->cpumode == PERF_RECORD_MISC_KERNEL) { 1711 ret = machine__process_kernel_mmap_event(machine, event); 1712 if (ret < 0) 1713 goto out_problem; 1714 return 0; 1715 } 1716 1717 thread = machine__findnew_thread(machine, event->mmap.pid, 1718 event->mmap.tid); 1719 if (thread == NULL) 1720 goto out_problem; 1721 1722 if (!(event->header.misc & PERF_RECORD_MISC_MMAP_DATA)) 1723 prot = PROT_EXEC; 1724 1725 map = map__new(machine, event->mmap.start, 1726 event->mmap.len, event->mmap.pgoff, 1727 NULL, prot, 0, event->mmap.filename, thread); 1728 1729 if (map == NULL) 1730 goto out_problem_map; 1731 1732 ret = thread__insert_map(thread, map); 1733 if (ret) 1734 goto out_problem_insert; 1735 1736 thread__put(thread); 1737 map__put(map); 1738 return 0; 1739 1740 out_problem_insert: 1741 map__put(map); 1742 out_problem_map: 1743 thread__put(thread); 1744 out_problem: 1745 dump_printf("problem processing PERF_RECORD_MMAP, skipping event.\n"); 1746 return 0; 1747 } 1748 1749 static void __machine__remove_thread(struct machine *machine, struct thread *th, bool lock) 1750 { 1751 struct threads *threads = machine__threads(machine, th->tid); 1752 1753 if (threads->last_match == th) 1754 threads__set_last_match(threads, NULL); 1755 1756 if (lock) 1757 down_write(&threads->lock); 1758 1759 BUG_ON(refcount_read(&th->refcnt) == 0); 1760 1761 rb_erase_cached(&th->rb_node, &threads->entries); 1762 RB_CLEAR_NODE(&th->rb_node); 1763 --threads->nr; 1764 /* 1765 * Move it first to the dead_threads list, then drop the reference, 1766 * if this is the last reference, then the thread__delete destructor 1767 * will be called and we will remove it from the dead_threads list. 1768 */ 1769 list_add_tail(&th->node, &threads->dead); 1770 1771 /* 1772 * We need to do the put here because if this is the last refcount, 1773 * then we will be touching the threads->dead head when removing the 1774 * thread. 1775 */ 1776 thread__put(th); 1777 1778 if (lock) 1779 up_write(&threads->lock); 1780 } 1781 1782 void machine__remove_thread(struct machine *machine, struct thread *th) 1783 { 1784 return __machine__remove_thread(machine, th, true); 1785 } 1786 1787 int machine__process_fork_event(struct machine *machine, union perf_event *event, 1788 struct perf_sample *sample) 1789 { 1790 struct thread *thread = machine__find_thread(machine, 1791 event->fork.pid, 1792 event->fork.tid); 1793 struct thread *parent = machine__findnew_thread(machine, 1794 event->fork.ppid, 1795 event->fork.ptid); 1796 bool do_maps_clone = true; 1797 int err = 0; 1798 1799 if (dump_trace) 1800 perf_event__fprintf_task(event, stdout); 1801 1802 /* 1803 * There may be an existing thread that is not actually the parent, 1804 * either because we are processing events out of order, or because the 1805 * (fork) event that would have removed the thread was lost. Assume the 1806 * latter case and continue on as best we can. 1807 */ 1808 if (parent->pid_ != (pid_t)event->fork.ppid) { 1809 dump_printf("removing erroneous parent thread %d/%d\n", 1810 parent->pid_, parent->tid); 1811 machine__remove_thread(machine, parent); 1812 thread__put(parent); 1813 parent = machine__findnew_thread(machine, event->fork.ppid, 1814 event->fork.ptid); 1815 } 1816 1817 /* if a thread currently exists for the thread id remove it */ 1818 if (thread != NULL) { 1819 machine__remove_thread(machine, thread); 1820 thread__put(thread); 1821 } 1822 1823 thread = machine__findnew_thread(machine, event->fork.pid, 1824 event->fork.tid); 1825 /* 1826 * When synthesizing FORK events, we are trying to create thread 1827 * objects for the already running tasks on the machine. 1828 * 1829 * Normally, for a kernel FORK event, we want to clone the parent's 1830 * maps because that is what the kernel just did. 1831 * 1832 * But when synthesizing, this should not be done. If we do, we end up 1833 * with overlapping maps as we process the sythesized MMAP2 events that 1834 * get delivered shortly thereafter. 1835 * 1836 * Use the FORK event misc flags in an internal way to signal this 1837 * situation, so we can elide the map clone when appropriate. 1838 */ 1839 if (event->fork.header.misc & PERF_RECORD_MISC_FORK_EXEC) 1840 do_maps_clone = false; 1841 1842 if (thread == NULL || parent == NULL || 1843 thread__fork(thread, parent, sample->time, do_maps_clone) < 0) { 1844 dump_printf("problem processing PERF_RECORD_FORK, skipping event.\n"); 1845 err = -1; 1846 } 1847 thread__put(thread); 1848 thread__put(parent); 1849 1850 return err; 1851 } 1852 1853 int machine__process_exit_event(struct machine *machine, union perf_event *event, 1854 struct perf_sample *sample __maybe_unused) 1855 { 1856 struct thread *thread = machine__find_thread(machine, 1857 event->fork.pid, 1858 event->fork.tid); 1859 1860 if (dump_trace) 1861 perf_event__fprintf_task(event, stdout); 1862 1863 if (thread != NULL) { 1864 thread__exited(thread); 1865 thread__put(thread); 1866 } 1867 1868 return 0; 1869 } 1870 1871 int machine__process_event(struct machine *machine, union perf_event *event, 1872 struct perf_sample *sample) 1873 { 1874 int ret; 1875 1876 switch (event->header.type) { 1877 case PERF_RECORD_COMM: 1878 ret = machine__process_comm_event(machine, event, sample); break; 1879 case PERF_RECORD_MMAP: 1880 ret = machine__process_mmap_event(machine, event, sample); break; 1881 case PERF_RECORD_NAMESPACES: 1882 ret = machine__process_namespaces_event(machine, event, sample); break; 1883 case PERF_RECORD_MMAP2: 1884 ret = machine__process_mmap2_event(machine, event, sample); break; 1885 case PERF_RECORD_FORK: 1886 ret = machine__process_fork_event(machine, event, sample); break; 1887 case PERF_RECORD_EXIT: 1888 ret = machine__process_exit_event(machine, event, sample); break; 1889 case PERF_RECORD_LOST: 1890 ret = machine__process_lost_event(machine, event, sample); break; 1891 case PERF_RECORD_AUX: 1892 ret = machine__process_aux_event(machine, event); break; 1893 case PERF_RECORD_ITRACE_START: 1894 ret = machine__process_itrace_start_event(machine, event); break; 1895 case PERF_RECORD_LOST_SAMPLES: 1896 ret = machine__process_lost_samples_event(machine, event, sample); break; 1897 case PERF_RECORD_SWITCH: 1898 case PERF_RECORD_SWITCH_CPU_WIDE: 1899 ret = machine__process_switch_event(machine, event); break; 1900 case PERF_RECORD_KSYMBOL: 1901 ret = machine__process_ksymbol(machine, event, sample); break; 1902 case PERF_RECORD_BPF_EVENT: 1903 ret = machine__process_bpf(machine, event, sample); break; 1904 default: 1905 ret = -1; 1906 break; 1907 } 1908 1909 return ret; 1910 } 1911 1912 static bool symbol__match_regex(struct symbol *sym, regex_t *regex) 1913 { 1914 if (!regexec(regex, sym->name, 0, NULL, 0)) 1915 return 1; 1916 return 0; 1917 } 1918 1919 static void ip__resolve_ams(struct thread *thread, 1920 struct addr_map_symbol *ams, 1921 u64 ip) 1922 { 1923 struct addr_location al; 1924 1925 memset(&al, 0, sizeof(al)); 1926 /* 1927 * We cannot use the header.misc hint to determine whether a 1928 * branch stack address is user, kernel, guest, hypervisor. 1929 * Branches may straddle the kernel/user/hypervisor boundaries. 1930 * Thus, we have to try consecutively until we find a match 1931 * or else, the symbol is unknown 1932 */ 1933 thread__find_cpumode_addr_location(thread, ip, &al); 1934 1935 ams->addr = ip; 1936 ams->al_addr = al.addr; 1937 ams->ms.maps = al.maps; 1938 ams->ms.sym = al.sym; 1939 ams->ms.map = al.map; 1940 ams->phys_addr = 0; 1941 } 1942 1943 static void ip__resolve_data(struct thread *thread, 1944 u8 m, struct addr_map_symbol *ams, 1945 u64 addr, u64 phys_addr) 1946 { 1947 struct addr_location al; 1948 1949 memset(&al, 0, sizeof(al)); 1950 1951 thread__find_symbol(thread, m, addr, &al); 1952 1953 ams->addr = addr; 1954 ams->al_addr = al.addr; 1955 ams->ms.maps = al.maps; 1956 ams->ms.sym = al.sym; 1957 ams->ms.map = al.map; 1958 ams->phys_addr = phys_addr; 1959 } 1960 1961 struct mem_info *sample__resolve_mem(struct perf_sample *sample, 1962 struct addr_location *al) 1963 { 1964 struct mem_info *mi = mem_info__new(); 1965 1966 if (!mi) 1967 return NULL; 1968 1969 ip__resolve_ams(al->thread, &mi->iaddr, sample->ip); 1970 ip__resolve_data(al->thread, al->cpumode, &mi->daddr, 1971 sample->addr, sample->phys_addr); 1972 mi->data_src.val = sample->data_src; 1973 1974 return mi; 1975 } 1976 1977 static char *callchain_srcline(struct map_symbol *ms, u64 ip) 1978 { 1979 struct map *map = ms->map; 1980 char *srcline = NULL; 1981 1982 if (!map || callchain_param.key == CCKEY_FUNCTION) 1983 return srcline; 1984 1985 srcline = srcline__tree_find(&map->dso->srclines, ip); 1986 if (!srcline) { 1987 bool show_sym = false; 1988 bool show_addr = callchain_param.key == CCKEY_ADDRESS; 1989 1990 srcline = get_srcline(map->dso, map__rip_2objdump(map, ip), 1991 ms->sym, show_sym, show_addr, ip); 1992 srcline__tree_insert(&map->dso->srclines, ip, srcline); 1993 } 1994 1995 return srcline; 1996 } 1997 1998 struct iterations { 1999 int nr_loop_iter; 2000 u64 cycles; 2001 }; 2002 2003 static int add_callchain_ip(struct thread *thread, 2004 struct callchain_cursor *cursor, 2005 struct symbol **parent, 2006 struct addr_location *root_al, 2007 u8 *cpumode, 2008 u64 ip, 2009 bool branch, 2010 struct branch_flags *flags, 2011 struct iterations *iter, 2012 u64 branch_from) 2013 { 2014 struct map_symbol ms; 2015 struct addr_location al; 2016 int nr_loop_iter = 0; 2017 u64 iter_cycles = 0; 2018 const char *srcline = NULL; 2019 2020 al.filtered = 0; 2021 al.sym = NULL; 2022 if (!cpumode) { 2023 thread__find_cpumode_addr_location(thread, ip, &al); 2024 } else { 2025 if (ip >= PERF_CONTEXT_MAX) { 2026 switch (ip) { 2027 case PERF_CONTEXT_HV: 2028 *cpumode = PERF_RECORD_MISC_HYPERVISOR; 2029 break; 2030 case PERF_CONTEXT_KERNEL: 2031 *cpumode = PERF_RECORD_MISC_KERNEL; 2032 break; 2033 case PERF_CONTEXT_USER: 2034 *cpumode = PERF_RECORD_MISC_USER; 2035 break; 2036 default: 2037 pr_debug("invalid callchain context: " 2038 "%"PRId64"\n", (s64) ip); 2039 /* 2040 * It seems the callchain is corrupted. 2041 * Discard all. 2042 */ 2043 callchain_cursor_reset(cursor); 2044 return 1; 2045 } 2046 return 0; 2047 } 2048 thread__find_symbol(thread, *cpumode, ip, &al); 2049 } 2050 2051 if (al.sym != NULL) { 2052 if (perf_hpp_list.parent && !*parent && 2053 symbol__match_regex(al.sym, &parent_regex)) 2054 *parent = al.sym; 2055 else if (have_ignore_callees && root_al && 2056 symbol__match_regex(al.sym, &ignore_callees_regex)) { 2057 /* Treat this symbol as the root, 2058 forgetting its callees. */ 2059 *root_al = al; 2060 callchain_cursor_reset(cursor); 2061 } 2062 } 2063 2064 if (symbol_conf.hide_unresolved && al.sym == NULL) 2065 return 0; 2066 2067 if (iter) { 2068 nr_loop_iter = iter->nr_loop_iter; 2069 iter_cycles = iter->cycles; 2070 } 2071 2072 ms.maps = al.maps; 2073 ms.map = al.map; 2074 ms.sym = al.sym; 2075 srcline = callchain_srcline(&ms, al.addr); 2076 return callchain_cursor_append(cursor, ip, &ms, 2077 branch, flags, nr_loop_iter, 2078 iter_cycles, branch_from, srcline); 2079 } 2080 2081 struct branch_info *sample__resolve_bstack(struct perf_sample *sample, 2082 struct addr_location *al) 2083 { 2084 unsigned int i; 2085 const struct branch_stack *bs = sample->branch_stack; 2086 struct branch_info *bi = calloc(bs->nr, sizeof(struct branch_info)); 2087 2088 if (!bi) 2089 return NULL; 2090 2091 for (i = 0; i < bs->nr; i++) { 2092 ip__resolve_ams(al->thread, &bi[i].to, bs->entries[i].to); 2093 ip__resolve_ams(al->thread, &bi[i].from, bs->entries[i].from); 2094 bi[i].flags = bs->entries[i].flags; 2095 } 2096 return bi; 2097 } 2098 2099 static void save_iterations(struct iterations *iter, 2100 struct branch_entry *be, int nr) 2101 { 2102 int i; 2103 2104 iter->nr_loop_iter++; 2105 iter->cycles = 0; 2106 2107 for (i = 0; i < nr; i++) 2108 iter->cycles += be[i].flags.cycles; 2109 } 2110 2111 #define CHASHSZ 127 2112 #define CHASHBITS 7 2113 #define NO_ENTRY 0xff 2114 2115 #define PERF_MAX_BRANCH_DEPTH 127 2116 2117 /* Remove loops. */ 2118 static int remove_loops(struct branch_entry *l, int nr, 2119 struct iterations *iter) 2120 { 2121 int i, j, off; 2122 unsigned char chash[CHASHSZ]; 2123 2124 memset(chash, NO_ENTRY, sizeof(chash)); 2125 2126 BUG_ON(PERF_MAX_BRANCH_DEPTH > 255); 2127 2128 for (i = 0; i < nr; i++) { 2129 int h = hash_64(l[i].from, CHASHBITS) % CHASHSZ; 2130 2131 /* no collision handling for now */ 2132 if (chash[h] == NO_ENTRY) { 2133 chash[h] = i; 2134 } else if (l[chash[h]].from == l[i].from) { 2135 bool is_loop = true; 2136 /* check if it is a real loop */ 2137 off = 0; 2138 for (j = chash[h]; j < i && i + off < nr; j++, off++) 2139 if (l[j].from != l[i + off].from) { 2140 is_loop = false; 2141 break; 2142 } 2143 if (is_loop) { 2144 j = nr - (i + off); 2145 if (j > 0) { 2146 save_iterations(iter + i + off, 2147 l + i, off); 2148 2149 memmove(iter + i, iter + i + off, 2150 j * sizeof(*iter)); 2151 2152 memmove(l + i, l + i + off, 2153 j * sizeof(*l)); 2154 } 2155 2156 nr -= off; 2157 } 2158 } 2159 } 2160 return nr; 2161 } 2162 2163 /* 2164 * Recolve LBR callstack chain sample 2165 * Return: 2166 * 1 on success get LBR callchain information 2167 * 0 no available LBR callchain information, should try fp 2168 * negative error code on other errors. 2169 */ 2170 static int resolve_lbr_callchain_sample(struct thread *thread, 2171 struct callchain_cursor *cursor, 2172 struct perf_sample *sample, 2173 struct symbol **parent, 2174 struct addr_location *root_al, 2175 int max_stack) 2176 { 2177 struct ip_callchain *chain = sample->callchain; 2178 int chain_nr = min(max_stack, (int)chain->nr), i; 2179 u8 cpumode = PERF_RECORD_MISC_USER; 2180 u64 ip, branch_from = 0; 2181 2182 for (i = 0; i < chain_nr; i++) { 2183 if (chain->ips[i] == PERF_CONTEXT_USER) 2184 break; 2185 } 2186 2187 /* LBR only affects the user callchain */ 2188 if (i != chain_nr) { 2189 struct branch_stack *lbr_stack = sample->branch_stack; 2190 int lbr_nr = lbr_stack->nr, j, k; 2191 bool branch; 2192 struct branch_flags *flags; 2193 /* 2194 * LBR callstack can only get user call chain. 2195 * The mix_chain_nr is kernel call chain 2196 * number plus LBR user call chain number. 2197 * i is kernel call chain number, 2198 * 1 is PERF_CONTEXT_USER, 2199 * lbr_nr + 1 is the user call chain number. 2200 * For details, please refer to the comments 2201 * in callchain__printf 2202 */ 2203 int mix_chain_nr = i + 1 + lbr_nr + 1; 2204 2205 for (j = 0; j < mix_chain_nr; j++) { 2206 int err; 2207 branch = false; 2208 flags = NULL; 2209 2210 if (callchain_param.order == ORDER_CALLEE) { 2211 if (j < i + 1) 2212 ip = chain->ips[j]; 2213 else if (j > i + 1) { 2214 k = j - i - 2; 2215 ip = lbr_stack->entries[k].from; 2216 branch = true; 2217 flags = &lbr_stack->entries[k].flags; 2218 } else { 2219 ip = lbr_stack->entries[0].to; 2220 branch = true; 2221 flags = &lbr_stack->entries[0].flags; 2222 branch_from = 2223 lbr_stack->entries[0].from; 2224 } 2225 } else { 2226 if (j < lbr_nr) { 2227 k = lbr_nr - j - 1; 2228 ip = lbr_stack->entries[k].from; 2229 branch = true; 2230 flags = &lbr_stack->entries[k].flags; 2231 } 2232 else if (j > lbr_nr) 2233 ip = chain->ips[i + 1 - (j - lbr_nr)]; 2234 else { 2235 ip = lbr_stack->entries[0].to; 2236 branch = true; 2237 flags = &lbr_stack->entries[0].flags; 2238 branch_from = 2239 lbr_stack->entries[0].from; 2240 } 2241 } 2242 2243 err = add_callchain_ip(thread, cursor, parent, 2244 root_al, &cpumode, ip, 2245 branch, flags, NULL, 2246 branch_from); 2247 if (err) 2248 return (err < 0) ? err : 0; 2249 } 2250 return 1; 2251 } 2252 2253 return 0; 2254 } 2255 2256 static int find_prev_cpumode(struct ip_callchain *chain, struct thread *thread, 2257 struct callchain_cursor *cursor, 2258 struct symbol **parent, 2259 struct addr_location *root_al, 2260 u8 *cpumode, int ent) 2261 { 2262 int err = 0; 2263 2264 while (--ent >= 0) { 2265 u64 ip = chain->ips[ent]; 2266 2267 if (ip >= PERF_CONTEXT_MAX) { 2268 err = add_callchain_ip(thread, cursor, parent, 2269 root_al, cpumode, ip, 2270 false, NULL, NULL, 0); 2271 break; 2272 } 2273 } 2274 return err; 2275 } 2276 2277 static int thread__resolve_callchain_sample(struct thread *thread, 2278 struct callchain_cursor *cursor, 2279 struct evsel *evsel, 2280 struct perf_sample *sample, 2281 struct symbol **parent, 2282 struct addr_location *root_al, 2283 int max_stack) 2284 { 2285 struct branch_stack *branch = sample->branch_stack; 2286 struct ip_callchain *chain = sample->callchain; 2287 int chain_nr = 0; 2288 u8 cpumode = PERF_RECORD_MISC_USER; 2289 int i, j, err, nr_entries; 2290 int skip_idx = -1; 2291 int first_call = 0; 2292 2293 if (chain) 2294 chain_nr = chain->nr; 2295 2296 if (perf_evsel__has_branch_callstack(evsel)) { 2297 err = resolve_lbr_callchain_sample(thread, cursor, sample, parent, 2298 root_al, max_stack); 2299 if (err) 2300 return (err < 0) ? err : 0; 2301 } 2302 2303 /* 2304 * Based on DWARF debug information, some architectures skip 2305 * a callchain entry saved by the kernel. 2306 */ 2307 skip_idx = arch_skip_callchain_idx(thread, chain); 2308 2309 /* 2310 * Add branches to call stack for easier browsing. This gives 2311 * more context for a sample than just the callers. 2312 * 2313 * This uses individual histograms of paths compared to the 2314 * aggregated histograms the normal LBR mode uses. 2315 * 2316 * Limitations for now: 2317 * - No extra filters 2318 * - No annotations (should annotate somehow) 2319 */ 2320 2321 if (branch && callchain_param.branch_callstack) { 2322 int nr = min(max_stack, (int)branch->nr); 2323 struct branch_entry be[nr]; 2324 struct iterations iter[nr]; 2325 2326 if (branch->nr > PERF_MAX_BRANCH_DEPTH) { 2327 pr_warning("corrupted branch chain. skipping...\n"); 2328 goto check_calls; 2329 } 2330 2331 for (i = 0; i < nr; i++) { 2332 if (callchain_param.order == ORDER_CALLEE) { 2333 be[i] = branch->entries[i]; 2334 2335 if (chain == NULL) 2336 continue; 2337 2338 /* 2339 * Check for overlap into the callchain. 2340 * The return address is one off compared to 2341 * the branch entry. To adjust for this 2342 * assume the calling instruction is not longer 2343 * than 8 bytes. 2344 */ 2345 if (i == skip_idx || 2346 chain->ips[first_call] >= PERF_CONTEXT_MAX) 2347 first_call++; 2348 else if (be[i].from < chain->ips[first_call] && 2349 be[i].from >= chain->ips[first_call] - 8) 2350 first_call++; 2351 } else 2352 be[i] = branch->entries[branch->nr - i - 1]; 2353 } 2354 2355 memset(iter, 0, sizeof(struct iterations) * nr); 2356 nr = remove_loops(be, nr, iter); 2357 2358 for (i = 0; i < nr; i++) { 2359 err = add_callchain_ip(thread, cursor, parent, 2360 root_al, 2361 NULL, be[i].to, 2362 true, &be[i].flags, 2363 NULL, be[i].from); 2364 2365 if (!err) 2366 err = add_callchain_ip(thread, cursor, parent, root_al, 2367 NULL, be[i].from, 2368 true, &be[i].flags, 2369 &iter[i], 0); 2370 if (err == -EINVAL) 2371 break; 2372 if (err) 2373 return err; 2374 } 2375 2376 if (chain_nr == 0) 2377 return 0; 2378 2379 chain_nr -= nr; 2380 } 2381 2382 check_calls: 2383 if (chain && callchain_param.order != ORDER_CALLEE) { 2384 err = find_prev_cpumode(chain, thread, cursor, parent, root_al, 2385 &cpumode, chain->nr - first_call); 2386 if (err) 2387 return (err < 0) ? err : 0; 2388 } 2389 for (i = first_call, nr_entries = 0; 2390 i < chain_nr && nr_entries < max_stack; i++) { 2391 u64 ip; 2392 2393 if (callchain_param.order == ORDER_CALLEE) 2394 j = i; 2395 else 2396 j = chain->nr - i - 1; 2397 2398 #ifdef HAVE_SKIP_CALLCHAIN_IDX 2399 if (j == skip_idx) 2400 continue; 2401 #endif 2402 ip = chain->ips[j]; 2403 if (ip < PERF_CONTEXT_MAX) 2404 ++nr_entries; 2405 else if (callchain_param.order != ORDER_CALLEE) { 2406 err = find_prev_cpumode(chain, thread, cursor, parent, 2407 root_al, &cpumode, j); 2408 if (err) 2409 return (err < 0) ? err : 0; 2410 continue; 2411 } 2412 2413 err = add_callchain_ip(thread, cursor, parent, 2414 root_al, &cpumode, ip, 2415 false, NULL, NULL, 0); 2416 2417 if (err) 2418 return (err < 0) ? err : 0; 2419 } 2420 2421 return 0; 2422 } 2423 2424 static int append_inlines(struct callchain_cursor *cursor, struct map_symbol *ms, u64 ip) 2425 { 2426 struct symbol *sym = ms->sym; 2427 struct map *map = ms->map; 2428 struct inline_node *inline_node; 2429 struct inline_list *ilist; 2430 u64 addr; 2431 int ret = 1; 2432 2433 if (!symbol_conf.inline_name || !map || !sym) 2434 return ret; 2435 2436 addr = map__map_ip(map, ip); 2437 addr = map__rip_2objdump(map, addr); 2438 2439 inline_node = inlines__tree_find(&map->dso->inlined_nodes, addr); 2440 if (!inline_node) { 2441 inline_node = dso__parse_addr_inlines(map->dso, addr, sym); 2442 if (!inline_node) 2443 return ret; 2444 inlines__tree_insert(&map->dso->inlined_nodes, inline_node); 2445 } 2446 2447 list_for_each_entry(ilist, &inline_node->val, list) { 2448 struct map_symbol ilist_ms = { 2449 .maps = ms->maps, 2450 .map = map, 2451 .sym = ilist->symbol, 2452 }; 2453 ret = callchain_cursor_append(cursor, ip, &ilist_ms, false, 2454 NULL, 0, 0, 0, ilist->srcline); 2455 2456 if (ret != 0) 2457 return ret; 2458 } 2459 2460 return ret; 2461 } 2462 2463 static int unwind_entry(struct unwind_entry *entry, void *arg) 2464 { 2465 struct callchain_cursor *cursor = arg; 2466 const char *srcline = NULL; 2467 u64 addr = entry->ip; 2468 2469 if (symbol_conf.hide_unresolved && entry->ms.sym == NULL) 2470 return 0; 2471 2472 if (append_inlines(cursor, &entry->ms, entry->ip) == 0) 2473 return 0; 2474 2475 /* 2476 * Convert entry->ip from a virtual address to an offset in 2477 * its corresponding binary. 2478 */ 2479 if (entry->ms.map) 2480 addr = map__map_ip(entry->ms.map, entry->ip); 2481 2482 srcline = callchain_srcline(&entry->ms, addr); 2483 return callchain_cursor_append(cursor, entry->ip, &entry->ms, 2484 false, NULL, 0, 0, 0, srcline); 2485 } 2486 2487 static int thread__resolve_callchain_unwind(struct thread *thread, 2488 struct callchain_cursor *cursor, 2489 struct evsel *evsel, 2490 struct perf_sample *sample, 2491 int max_stack) 2492 { 2493 /* Can we do dwarf post unwind? */ 2494 if (!((evsel->core.attr.sample_type & PERF_SAMPLE_REGS_USER) && 2495 (evsel->core.attr.sample_type & PERF_SAMPLE_STACK_USER))) 2496 return 0; 2497 2498 /* Bail out if nothing was captured. */ 2499 if ((!sample->user_regs.regs) || 2500 (!sample->user_stack.size)) 2501 return 0; 2502 2503 return unwind__get_entries(unwind_entry, cursor, 2504 thread, sample, max_stack); 2505 } 2506 2507 int thread__resolve_callchain(struct thread *thread, 2508 struct callchain_cursor *cursor, 2509 struct evsel *evsel, 2510 struct perf_sample *sample, 2511 struct symbol **parent, 2512 struct addr_location *root_al, 2513 int max_stack) 2514 { 2515 int ret = 0; 2516 2517 callchain_cursor_reset(cursor); 2518 2519 if (callchain_param.order == ORDER_CALLEE) { 2520 ret = thread__resolve_callchain_sample(thread, cursor, 2521 evsel, sample, 2522 parent, root_al, 2523 max_stack); 2524 if (ret) 2525 return ret; 2526 ret = thread__resolve_callchain_unwind(thread, cursor, 2527 evsel, sample, 2528 max_stack); 2529 } else { 2530 ret = thread__resolve_callchain_unwind(thread, cursor, 2531 evsel, sample, 2532 max_stack); 2533 if (ret) 2534 return ret; 2535 ret = thread__resolve_callchain_sample(thread, cursor, 2536 evsel, sample, 2537 parent, root_al, 2538 max_stack); 2539 } 2540 2541 return ret; 2542 } 2543 2544 int machine__for_each_thread(struct machine *machine, 2545 int (*fn)(struct thread *thread, void *p), 2546 void *priv) 2547 { 2548 struct threads *threads; 2549 struct rb_node *nd; 2550 struct thread *thread; 2551 int rc = 0; 2552 int i; 2553 2554 for (i = 0; i < THREADS__TABLE_SIZE; i++) { 2555 threads = &machine->threads[i]; 2556 for (nd = rb_first_cached(&threads->entries); nd; 2557 nd = rb_next(nd)) { 2558 thread = rb_entry(nd, struct thread, rb_node); 2559 rc = fn(thread, priv); 2560 if (rc != 0) 2561 return rc; 2562 } 2563 2564 list_for_each_entry(thread, &threads->dead, node) { 2565 rc = fn(thread, priv); 2566 if (rc != 0) 2567 return rc; 2568 } 2569 } 2570 return rc; 2571 } 2572 2573 int machines__for_each_thread(struct machines *machines, 2574 int (*fn)(struct thread *thread, void *p), 2575 void *priv) 2576 { 2577 struct rb_node *nd; 2578 int rc = 0; 2579 2580 rc = machine__for_each_thread(&machines->host, fn, priv); 2581 if (rc != 0) 2582 return rc; 2583 2584 for (nd = rb_first_cached(&machines->guests); nd; nd = rb_next(nd)) { 2585 struct machine *machine = rb_entry(nd, struct machine, rb_node); 2586 2587 rc = machine__for_each_thread(machine, fn, priv); 2588 if (rc != 0) 2589 return rc; 2590 } 2591 return rc; 2592 } 2593 2594 pid_t machine__get_current_tid(struct machine *machine, int cpu) 2595 { 2596 int nr_cpus = min(machine->env->nr_cpus_online, MAX_NR_CPUS); 2597 2598 if (cpu < 0 || cpu >= nr_cpus || !machine->current_tid) 2599 return -1; 2600 2601 return machine->current_tid[cpu]; 2602 } 2603 2604 int machine__set_current_tid(struct machine *machine, int cpu, pid_t pid, 2605 pid_t tid) 2606 { 2607 struct thread *thread; 2608 int nr_cpus = min(machine->env->nr_cpus_online, MAX_NR_CPUS); 2609 2610 if (cpu < 0) 2611 return -EINVAL; 2612 2613 if (!machine->current_tid) { 2614 int i; 2615 2616 machine->current_tid = calloc(nr_cpus, sizeof(pid_t)); 2617 if (!machine->current_tid) 2618 return -ENOMEM; 2619 for (i = 0; i < nr_cpus; i++) 2620 machine->current_tid[i] = -1; 2621 } 2622 2623 if (cpu >= nr_cpus) { 2624 pr_err("Requested CPU %d too large. ", cpu); 2625 pr_err("Consider raising MAX_NR_CPUS\n"); 2626 return -EINVAL; 2627 } 2628 2629 machine->current_tid[cpu] = tid; 2630 2631 thread = machine__findnew_thread(machine, pid, tid); 2632 if (!thread) 2633 return -ENOMEM; 2634 2635 thread->cpu = cpu; 2636 thread__put(thread); 2637 2638 return 0; 2639 } 2640 2641 /* 2642 * Compares the raw arch string. N.B. see instead perf_env__arch() if a 2643 * normalized arch is needed. 2644 */ 2645 bool machine__is(struct machine *machine, const char *arch) 2646 { 2647 return machine && !strcmp(perf_env__raw_arch(machine->env), arch); 2648 } 2649 2650 int machine__nr_cpus_avail(struct machine *machine) 2651 { 2652 return machine ? perf_env__nr_cpus_avail(machine->env) : 0; 2653 } 2654 2655 int machine__get_kernel_start(struct machine *machine) 2656 { 2657 struct map *map = machine__kernel_map(machine); 2658 int err = 0; 2659 2660 /* 2661 * The only addresses above 2^63 are kernel addresses of a 64-bit 2662 * kernel. Note that addresses are unsigned so that on a 32-bit system 2663 * all addresses including kernel addresses are less than 2^32. In 2664 * that case (32-bit system), if the kernel mapping is unknown, all 2665 * addresses will be assumed to be in user space - see 2666 * machine__kernel_ip(). 2667 */ 2668 machine->kernel_start = 1ULL << 63; 2669 if (map) { 2670 err = map__load(map); 2671 /* 2672 * On x86_64, PTI entry trampolines are less than the 2673 * start of kernel text, but still above 2^63. So leave 2674 * kernel_start = 1ULL << 63 for x86_64. 2675 */ 2676 if (!err && !machine__is(machine, "x86_64")) 2677 machine->kernel_start = map->start; 2678 } 2679 return err; 2680 } 2681 2682 u8 machine__addr_cpumode(struct machine *machine, u8 cpumode, u64 addr) 2683 { 2684 u8 addr_cpumode = cpumode; 2685 bool kernel_ip; 2686 2687 if (!machine->single_address_space) 2688 goto out; 2689 2690 kernel_ip = machine__kernel_ip(machine, addr); 2691 switch (cpumode) { 2692 case PERF_RECORD_MISC_KERNEL: 2693 case PERF_RECORD_MISC_USER: 2694 addr_cpumode = kernel_ip ? PERF_RECORD_MISC_KERNEL : 2695 PERF_RECORD_MISC_USER; 2696 break; 2697 case PERF_RECORD_MISC_GUEST_KERNEL: 2698 case PERF_RECORD_MISC_GUEST_USER: 2699 addr_cpumode = kernel_ip ? PERF_RECORD_MISC_GUEST_KERNEL : 2700 PERF_RECORD_MISC_GUEST_USER; 2701 break; 2702 default: 2703 break; 2704 } 2705 out: 2706 return addr_cpumode; 2707 } 2708 2709 struct dso *machine__findnew_dso_id(struct machine *machine, const char *filename, struct dso_id *id) 2710 { 2711 return dsos__findnew_id(&machine->dsos, filename, id); 2712 } 2713 2714 struct dso *machine__findnew_dso(struct machine *machine, const char *filename) 2715 { 2716 return machine__findnew_dso_id(machine, filename, NULL); 2717 } 2718 2719 char *machine__resolve_kernel_addr(void *vmachine, unsigned long long *addrp, char **modp) 2720 { 2721 struct machine *machine = vmachine; 2722 struct map *map; 2723 struct symbol *sym = machine__find_kernel_symbol(machine, *addrp, &map); 2724 2725 if (sym == NULL) 2726 return NULL; 2727 2728 *modp = __map__is_kmodule(map) ? (char *)map->dso->short_name : NULL; 2729 *addrp = map->unmap_ip(map, sym->start); 2730 return sym->name; 2731 } 2732