1 // SPDX-License-Identifier: GPL-2.0 2 #include <dirent.h> 3 #include <errno.h> 4 #include <inttypes.h> 5 #include <regex.h> 6 #include <stdlib.h> 7 #include "callchain.h" 8 #include "debug.h" 9 #include "dso.h" 10 #include "env.h" 11 #include "event.h" 12 #include "evsel.h" 13 #include "hist.h" 14 #include "machine.h" 15 #include "map.h" 16 #include "map_symbol.h" 17 #include "branch.h" 18 #include "mem-events.h" 19 #include "path.h" 20 #include "srcline.h" 21 #include "symbol.h" 22 #include "sort.h" 23 #include "strlist.h" 24 #include "target.h" 25 #include "thread.h" 26 #include "util.h" 27 #include "vdso.h" 28 #include <stdbool.h> 29 #include <sys/types.h> 30 #include <sys/stat.h> 31 #include <unistd.h> 32 #include "unwind.h" 33 #include "linux/hash.h" 34 #include "asm/bug.h" 35 #include "bpf-event.h" 36 #include <internal/lib.h> // page_size 37 #include "cgroup.h" 38 #include "arm64-frame-pointer-unwind-support.h" 39 40 #include <linux/ctype.h> 41 #include <symbol/kallsyms.h> 42 #include <linux/mman.h> 43 #include <linux/string.h> 44 #include <linux/zalloc.h> 45 46 static void __machine__remove_thread(struct machine *machine, struct thread_rb_node *nd, 47 struct thread *th, bool lock); 48 49 static struct dso *machine__kernel_dso(struct machine *machine) 50 { 51 return map__dso(machine->vmlinux_map); 52 } 53 54 static void dsos__init(struct dsos *dsos) 55 { 56 INIT_LIST_HEAD(&dsos->head); 57 dsos->root = RB_ROOT; 58 init_rwsem(&dsos->lock); 59 } 60 61 static void machine__threads_init(struct machine *machine) 62 { 63 int i; 64 65 for (i = 0; i < THREADS__TABLE_SIZE; i++) { 66 struct threads *threads = &machine->threads[i]; 67 threads->entries = RB_ROOT_CACHED; 68 init_rwsem(&threads->lock); 69 threads->nr = 0; 70 INIT_LIST_HEAD(&threads->dead); 71 threads->last_match = NULL; 72 } 73 } 74 75 static int thread_rb_node__cmp_tid(const void *key, const struct rb_node *nd) 76 { 77 int to_find = (int) *((pid_t *)key); 78 79 return to_find - (int)thread__tid(rb_entry(nd, struct thread_rb_node, rb_node)->thread); 80 } 81 82 static struct thread_rb_node *thread_rb_node__find(const struct thread *th, 83 struct rb_root *tree) 84 { 85 pid_t to_find = thread__tid(th); 86 struct rb_node *nd = rb_find(&to_find, tree, thread_rb_node__cmp_tid); 87 88 return rb_entry(nd, struct thread_rb_node, rb_node); 89 } 90 91 static int machine__set_mmap_name(struct machine *machine) 92 { 93 if (machine__is_host(machine)) 94 machine->mmap_name = strdup("[kernel.kallsyms]"); 95 else if (machine__is_default_guest(machine)) 96 machine->mmap_name = strdup("[guest.kernel.kallsyms]"); 97 else if (asprintf(&machine->mmap_name, "[guest.kernel.kallsyms.%d]", 98 machine->pid) < 0) 99 machine->mmap_name = NULL; 100 101 return machine->mmap_name ? 0 : -ENOMEM; 102 } 103 104 static void thread__set_guest_comm(struct thread *thread, pid_t pid) 105 { 106 char comm[64]; 107 108 snprintf(comm, sizeof(comm), "[guest/%d]", pid); 109 thread__set_comm(thread, comm, 0); 110 } 111 112 int machine__init(struct machine *machine, const char *root_dir, pid_t pid) 113 { 114 int err = -ENOMEM; 115 116 memset(machine, 0, sizeof(*machine)); 117 machine->kmaps = maps__new(machine); 118 if (machine->kmaps == NULL) 119 return -ENOMEM; 120 121 RB_CLEAR_NODE(&machine->rb_node); 122 dsos__init(&machine->dsos); 123 124 machine__threads_init(machine); 125 126 machine->vdso_info = NULL; 127 machine->env = NULL; 128 129 machine->pid = pid; 130 131 machine->id_hdr_size = 0; 132 machine->kptr_restrict_warned = false; 133 machine->comm_exec = false; 134 machine->kernel_start = 0; 135 machine->vmlinux_map = NULL; 136 137 machine->root_dir = strdup(root_dir); 138 if (machine->root_dir == NULL) 139 goto out; 140 141 if (machine__set_mmap_name(machine)) 142 goto out; 143 144 if (pid != HOST_KERNEL_ID) { 145 struct thread *thread = machine__findnew_thread(machine, -1, 146 pid); 147 148 if (thread == NULL) 149 goto out; 150 151 thread__set_guest_comm(thread, pid); 152 thread__put(thread); 153 } 154 155 machine->current_tid = NULL; 156 err = 0; 157 158 out: 159 if (err) { 160 zfree(&machine->kmaps); 161 zfree(&machine->root_dir); 162 zfree(&machine->mmap_name); 163 } 164 return 0; 165 } 166 167 struct machine *machine__new_host(void) 168 { 169 struct machine *machine = malloc(sizeof(*machine)); 170 171 if (machine != NULL) { 172 machine__init(machine, "", HOST_KERNEL_ID); 173 174 if (machine__create_kernel_maps(machine) < 0) 175 goto out_delete; 176 } 177 178 return machine; 179 out_delete: 180 free(machine); 181 return NULL; 182 } 183 184 struct machine *machine__new_kallsyms(void) 185 { 186 struct machine *machine = machine__new_host(); 187 /* 188 * FIXME: 189 * 1) We should switch to machine__load_kallsyms(), i.e. not explicitly 190 * ask for not using the kcore parsing code, once this one is fixed 191 * to create a map per module. 192 */ 193 if (machine && machine__load_kallsyms(machine, "/proc/kallsyms") <= 0) { 194 machine__delete(machine); 195 machine = NULL; 196 } 197 198 return machine; 199 } 200 201 static void dsos__purge(struct dsos *dsos) 202 { 203 struct dso *pos, *n; 204 205 down_write(&dsos->lock); 206 207 list_for_each_entry_safe(pos, n, &dsos->head, node) { 208 RB_CLEAR_NODE(&pos->rb_node); 209 pos->root = NULL; 210 list_del_init(&pos->node); 211 dso__put(pos); 212 } 213 214 up_write(&dsos->lock); 215 } 216 217 static void dsos__exit(struct dsos *dsos) 218 { 219 dsos__purge(dsos); 220 exit_rwsem(&dsos->lock); 221 } 222 223 void machine__delete_threads(struct machine *machine) 224 { 225 struct rb_node *nd; 226 int i; 227 228 for (i = 0; i < THREADS__TABLE_SIZE; i++) { 229 struct threads *threads = &machine->threads[i]; 230 down_write(&threads->lock); 231 nd = rb_first_cached(&threads->entries); 232 while (nd) { 233 struct thread_rb_node *trb = rb_entry(nd, struct thread_rb_node, rb_node); 234 235 nd = rb_next(nd); 236 __machine__remove_thread(machine, trb, trb->thread, false); 237 } 238 up_write(&threads->lock); 239 } 240 } 241 242 void machine__exit(struct machine *machine) 243 { 244 int i; 245 246 if (machine == NULL) 247 return; 248 249 machine__destroy_kernel_maps(machine); 250 maps__zput(machine->kmaps); 251 dsos__exit(&machine->dsos); 252 machine__exit_vdso(machine); 253 zfree(&machine->root_dir); 254 zfree(&machine->mmap_name); 255 zfree(&machine->current_tid); 256 zfree(&machine->kallsyms_filename); 257 258 machine__delete_threads(machine); 259 for (i = 0; i < THREADS__TABLE_SIZE; i++) { 260 struct threads *threads = &machine->threads[i]; 261 262 exit_rwsem(&threads->lock); 263 } 264 } 265 266 void machine__delete(struct machine *machine) 267 { 268 if (machine) { 269 machine__exit(machine); 270 free(machine); 271 } 272 } 273 274 void machines__init(struct machines *machines) 275 { 276 machine__init(&machines->host, "", HOST_KERNEL_ID); 277 machines->guests = RB_ROOT_CACHED; 278 } 279 280 void machines__exit(struct machines *machines) 281 { 282 machine__exit(&machines->host); 283 /* XXX exit guest */ 284 } 285 286 struct machine *machines__add(struct machines *machines, pid_t pid, 287 const char *root_dir) 288 { 289 struct rb_node **p = &machines->guests.rb_root.rb_node; 290 struct rb_node *parent = NULL; 291 struct machine *pos, *machine = malloc(sizeof(*machine)); 292 bool leftmost = true; 293 294 if (machine == NULL) 295 return NULL; 296 297 if (machine__init(machine, root_dir, pid) != 0) { 298 free(machine); 299 return NULL; 300 } 301 302 while (*p != NULL) { 303 parent = *p; 304 pos = rb_entry(parent, struct machine, rb_node); 305 if (pid < pos->pid) 306 p = &(*p)->rb_left; 307 else { 308 p = &(*p)->rb_right; 309 leftmost = false; 310 } 311 } 312 313 rb_link_node(&machine->rb_node, parent, p); 314 rb_insert_color_cached(&machine->rb_node, &machines->guests, leftmost); 315 316 machine->machines = machines; 317 318 return machine; 319 } 320 321 void machines__set_comm_exec(struct machines *machines, bool comm_exec) 322 { 323 struct rb_node *nd; 324 325 machines->host.comm_exec = comm_exec; 326 327 for (nd = rb_first_cached(&machines->guests); nd; nd = rb_next(nd)) { 328 struct machine *machine = rb_entry(nd, struct machine, rb_node); 329 330 machine->comm_exec = comm_exec; 331 } 332 } 333 334 struct machine *machines__find(struct machines *machines, pid_t pid) 335 { 336 struct rb_node **p = &machines->guests.rb_root.rb_node; 337 struct rb_node *parent = NULL; 338 struct machine *machine; 339 struct machine *default_machine = NULL; 340 341 if (pid == HOST_KERNEL_ID) 342 return &machines->host; 343 344 while (*p != NULL) { 345 parent = *p; 346 machine = rb_entry(parent, struct machine, rb_node); 347 if (pid < machine->pid) 348 p = &(*p)->rb_left; 349 else if (pid > machine->pid) 350 p = &(*p)->rb_right; 351 else 352 return machine; 353 if (!machine->pid) 354 default_machine = machine; 355 } 356 357 return default_machine; 358 } 359 360 struct machine *machines__findnew(struct machines *machines, pid_t pid) 361 { 362 char path[PATH_MAX]; 363 const char *root_dir = ""; 364 struct machine *machine = machines__find(machines, pid); 365 366 if (machine && (machine->pid == pid)) 367 goto out; 368 369 if ((pid != HOST_KERNEL_ID) && 370 (pid != DEFAULT_GUEST_KERNEL_ID) && 371 (symbol_conf.guestmount)) { 372 sprintf(path, "%s/%d", symbol_conf.guestmount, pid); 373 if (access(path, R_OK)) { 374 static struct strlist *seen; 375 376 if (!seen) 377 seen = strlist__new(NULL, NULL); 378 379 if (!strlist__has_entry(seen, path)) { 380 pr_err("Can't access file %s\n", path); 381 strlist__add(seen, path); 382 } 383 machine = NULL; 384 goto out; 385 } 386 root_dir = path; 387 } 388 389 machine = machines__add(machines, pid, root_dir); 390 out: 391 return machine; 392 } 393 394 struct machine *machines__find_guest(struct machines *machines, pid_t pid) 395 { 396 struct machine *machine = machines__find(machines, pid); 397 398 if (!machine) 399 machine = machines__findnew(machines, DEFAULT_GUEST_KERNEL_ID); 400 return machine; 401 } 402 403 /* 404 * A common case for KVM test programs is that the test program acts as the 405 * hypervisor, creating, running and destroying the virtual machine, and 406 * providing the guest object code from its own object code. In this case, 407 * the VM is not running an OS, but only the functions loaded into it by the 408 * hypervisor test program, and conveniently, loaded at the same virtual 409 * addresses. 410 * 411 * Normally to resolve addresses, MMAP events are needed to map addresses 412 * back to the object code and debug symbols for that object code. 413 * 414 * Currently, there is no way to get such mapping information from guests 415 * but, in the scenario described above, the guest has the same mappings 416 * as the hypervisor, so support for that scenario can be achieved. 417 * 418 * To support that, copy the host thread's maps to the guest thread's maps. 419 * Note, we do not discover the guest until we encounter a guest event, 420 * which works well because it is not until then that we know that the host 421 * thread's maps have been set up. 422 * 423 * This function returns the guest thread. Apart from keeping the data 424 * structures sane, using a thread belonging to the guest machine, instead 425 * of the host thread, allows it to have its own comm (refer 426 * thread__set_guest_comm()). 427 */ 428 static struct thread *findnew_guest_code(struct machine *machine, 429 struct machine *host_machine, 430 pid_t pid) 431 { 432 struct thread *host_thread; 433 struct thread *thread; 434 int err; 435 436 if (!machine) 437 return NULL; 438 439 thread = machine__findnew_thread(machine, -1, pid); 440 if (!thread) 441 return NULL; 442 443 /* Assume maps are set up if there are any */ 444 if (maps__nr_maps(thread__maps(thread))) 445 return thread; 446 447 host_thread = machine__find_thread(host_machine, -1, pid); 448 if (!host_thread) 449 goto out_err; 450 451 thread__set_guest_comm(thread, pid); 452 453 /* 454 * Guest code can be found in hypervisor process at the same address 455 * so copy host maps. 456 */ 457 err = maps__clone(thread, thread__maps(host_thread)); 458 thread__put(host_thread); 459 if (err) 460 goto out_err; 461 462 return thread; 463 464 out_err: 465 thread__zput(thread); 466 return NULL; 467 } 468 469 struct thread *machines__findnew_guest_code(struct machines *machines, pid_t pid) 470 { 471 struct machine *host_machine = machines__find(machines, HOST_KERNEL_ID); 472 struct machine *machine = machines__findnew(machines, pid); 473 474 return findnew_guest_code(machine, host_machine, pid); 475 } 476 477 struct thread *machine__findnew_guest_code(struct machine *machine, pid_t pid) 478 { 479 struct machines *machines = machine->machines; 480 struct machine *host_machine; 481 482 if (!machines) 483 return NULL; 484 485 host_machine = machines__find(machines, HOST_KERNEL_ID); 486 487 return findnew_guest_code(machine, host_machine, pid); 488 } 489 490 void machines__process_guests(struct machines *machines, 491 machine__process_t process, void *data) 492 { 493 struct rb_node *nd; 494 495 for (nd = rb_first_cached(&machines->guests); nd; nd = rb_next(nd)) { 496 struct machine *pos = rb_entry(nd, struct machine, rb_node); 497 process(pos, data); 498 } 499 } 500 501 void machines__set_id_hdr_size(struct machines *machines, u16 id_hdr_size) 502 { 503 struct rb_node *node; 504 struct machine *machine; 505 506 machines->host.id_hdr_size = id_hdr_size; 507 508 for (node = rb_first_cached(&machines->guests); node; 509 node = rb_next(node)) { 510 machine = rb_entry(node, struct machine, rb_node); 511 machine->id_hdr_size = id_hdr_size; 512 } 513 514 return; 515 } 516 517 static void machine__update_thread_pid(struct machine *machine, 518 struct thread *th, pid_t pid) 519 { 520 struct thread *leader; 521 522 if (pid == thread__pid(th) || pid == -1 || thread__pid(th) != -1) 523 return; 524 525 thread__set_pid(th, pid); 526 527 if (thread__pid(th) == thread__tid(th)) 528 return; 529 530 leader = __machine__findnew_thread(machine, thread__pid(th), thread__pid(th)); 531 if (!leader) 532 goto out_err; 533 534 if (!thread__maps(leader)) 535 thread__set_maps(leader, maps__new(machine)); 536 537 if (!thread__maps(leader)) 538 goto out_err; 539 540 if (thread__maps(th) == thread__maps(leader)) 541 goto out_put; 542 543 if (thread__maps(th)) { 544 /* 545 * Maps are created from MMAP events which provide the pid and 546 * tid. Consequently there never should be any maps on a thread 547 * with an unknown pid. Just print an error if there are. 548 */ 549 if (!maps__empty(thread__maps(th))) 550 pr_err("Discarding thread maps for %d:%d\n", 551 thread__pid(th), thread__tid(th)); 552 maps__put(thread__maps(th)); 553 } 554 555 thread__set_maps(th, maps__get(thread__maps(leader))); 556 out_put: 557 thread__put(leader); 558 return; 559 out_err: 560 pr_err("Failed to join map groups for %d:%d\n", thread__pid(th), thread__tid(th)); 561 goto out_put; 562 } 563 564 /* 565 * Front-end cache - TID lookups come in blocks, 566 * so most of the time we dont have to look up 567 * the full rbtree: 568 */ 569 static struct thread* 570 __threads__get_last_match(struct threads *threads, struct machine *machine, 571 int pid, int tid) 572 { 573 struct thread *th; 574 575 th = threads->last_match; 576 if (th != NULL) { 577 if (thread__tid(th) == tid) { 578 machine__update_thread_pid(machine, th, pid); 579 return thread__get(th); 580 } 581 thread__put(threads->last_match); 582 threads->last_match = NULL; 583 } 584 585 return NULL; 586 } 587 588 static struct thread* 589 threads__get_last_match(struct threads *threads, struct machine *machine, 590 int pid, int tid) 591 { 592 struct thread *th = NULL; 593 594 if (perf_singlethreaded) 595 th = __threads__get_last_match(threads, machine, pid, tid); 596 597 return th; 598 } 599 600 static void 601 __threads__set_last_match(struct threads *threads, struct thread *th) 602 { 603 thread__put(threads->last_match); 604 threads->last_match = thread__get(th); 605 } 606 607 static void 608 threads__set_last_match(struct threads *threads, struct thread *th) 609 { 610 if (perf_singlethreaded) 611 __threads__set_last_match(threads, th); 612 } 613 614 /* 615 * Caller must eventually drop thread->refcnt returned with a successful 616 * lookup/new thread inserted. 617 */ 618 static struct thread *____machine__findnew_thread(struct machine *machine, 619 struct threads *threads, 620 pid_t pid, pid_t tid, 621 bool create) 622 { 623 struct rb_node **p = &threads->entries.rb_root.rb_node; 624 struct rb_node *parent = NULL; 625 struct thread *th; 626 struct thread_rb_node *nd; 627 bool leftmost = true; 628 629 th = threads__get_last_match(threads, machine, pid, tid); 630 if (th) 631 return th; 632 633 while (*p != NULL) { 634 parent = *p; 635 th = rb_entry(parent, struct thread_rb_node, rb_node)->thread; 636 637 if (thread__tid(th) == tid) { 638 threads__set_last_match(threads, th); 639 machine__update_thread_pid(machine, th, pid); 640 return thread__get(th); 641 } 642 643 if (tid < thread__tid(th)) 644 p = &(*p)->rb_left; 645 else { 646 p = &(*p)->rb_right; 647 leftmost = false; 648 } 649 } 650 651 if (!create) 652 return NULL; 653 654 th = thread__new(pid, tid); 655 if (th == NULL) 656 return NULL; 657 658 nd = malloc(sizeof(*nd)); 659 if (nd == NULL) { 660 thread__put(th); 661 return NULL; 662 } 663 nd->thread = th; 664 665 rb_link_node(&nd->rb_node, parent, p); 666 rb_insert_color_cached(&nd->rb_node, &threads->entries, leftmost); 667 /* 668 * We have to initialize maps separately after rb tree is updated. 669 * 670 * The reason is that we call machine__findnew_thread within 671 * thread__init_maps to find the thread leader and that would screwed 672 * the rb tree. 673 */ 674 if (thread__init_maps(th, machine)) { 675 pr_err("Thread init failed thread %d\n", pid); 676 rb_erase_cached(&nd->rb_node, &threads->entries); 677 RB_CLEAR_NODE(&nd->rb_node); 678 free(nd); 679 thread__put(th); 680 return NULL; 681 } 682 /* 683 * It is now in the rbtree, get a ref 684 */ 685 threads__set_last_match(threads, th); 686 ++threads->nr; 687 688 return thread__get(th); 689 } 690 691 struct thread *__machine__findnew_thread(struct machine *machine, pid_t pid, pid_t tid) 692 { 693 return ____machine__findnew_thread(machine, machine__threads(machine, tid), pid, tid, true); 694 } 695 696 struct thread *machine__findnew_thread(struct machine *machine, pid_t pid, 697 pid_t tid) 698 { 699 struct threads *threads = machine__threads(machine, tid); 700 struct thread *th; 701 702 down_write(&threads->lock); 703 th = __machine__findnew_thread(machine, pid, tid); 704 up_write(&threads->lock); 705 return th; 706 } 707 708 struct thread *machine__find_thread(struct machine *machine, pid_t pid, 709 pid_t tid) 710 { 711 struct threads *threads = machine__threads(machine, tid); 712 struct thread *th; 713 714 down_read(&threads->lock); 715 th = ____machine__findnew_thread(machine, threads, pid, tid, false); 716 up_read(&threads->lock); 717 return th; 718 } 719 720 /* 721 * Threads are identified by pid and tid, and the idle task has pid == tid == 0. 722 * So here a single thread is created for that, but actually there is a separate 723 * idle task per cpu, so there should be one 'struct thread' per cpu, but there 724 * is only 1. That causes problems for some tools, requiring workarounds. For 725 * example get_idle_thread() in builtin-sched.c, or thread_stack__per_cpu(). 726 */ 727 struct thread *machine__idle_thread(struct machine *machine) 728 { 729 struct thread *thread = machine__findnew_thread(machine, 0, 0); 730 731 if (!thread || thread__set_comm(thread, "swapper", 0) || 732 thread__set_namespaces(thread, 0, NULL)) 733 pr_err("problem inserting idle task for machine pid %d\n", machine->pid); 734 735 return thread; 736 } 737 738 struct comm *machine__thread_exec_comm(struct machine *machine, 739 struct thread *thread) 740 { 741 if (machine->comm_exec) 742 return thread__exec_comm(thread); 743 else 744 return thread__comm(thread); 745 } 746 747 int machine__process_comm_event(struct machine *machine, union perf_event *event, 748 struct perf_sample *sample) 749 { 750 struct thread *thread = machine__findnew_thread(machine, 751 event->comm.pid, 752 event->comm.tid); 753 bool exec = event->header.misc & PERF_RECORD_MISC_COMM_EXEC; 754 int err = 0; 755 756 if (exec) 757 machine->comm_exec = true; 758 759 if (dump_trace) 760 perf_event__fprintf_comm(event, stdout); 761 762 if (thread == NULL || 763 __thread__set_comm(thread, event->comm.comm, sample->time, exec)) { 764 dump_printf("problem processing PERF_RECORD_COMM, skipping event.\n"); 765 err = -1; 766 } 767 768 thread__put(thread); 769 770 return err; 771 } 772 773 int machine__process_namespaces_event(struct machine *machine __maybe_unused, 774 union perf_event *event, 775 struct perf_sample *sample __maybe_unused) 776 { 777 struct thread *thread = machine__findnew_thread(machine, 778 event->namespaces.pid, 779 event->namespaces.tid); 780 int err = 0; 781 782 WARN_ONCE(event->namespaces.nr_namespaces > NR_NAMESPACES, 783 "\nWARNING: kernel seems to support more namespaces than perf" 784 " tool.\nTry updating the perf tool..\n\n"); 785 786 WARN_ONCE(event->namespaces.nr_namespaces < NR_NAMESPACES, 787 "\nWARNING: perf tool seems to support more namespaces than" 788 " the kernel.\nTry updating the kernel..\n\n"); 789 790 if (dump_trace) 791 perf_event__fprintf_namespaces(event, stdout); 792 793 if (thread == NULL || 794 thread__set_namespaces(thread, sample->time, &event->namespaces)) { 795 dump_printf("problem processing PERF_RECORD_NAMESPACES, skipping event.\n"); 796 err = -1; 797 } 798 799 thread__put(thread); 800 801 return err; 802 } 803 804 int machine__process_cgroup_event(struct machine *machine, 805 union perf_event *event, 806 struct perf_sample *sample __maybe_unused) 807 { 808 struct cgroup *cgrp; 809 810 if (dump_trace) 811 perf_event__fprintf_cgroup(event, stdout); 812 813 cgrp = cgroup__findnew(machine->env, event->cgroup.id, event->cgroup.path); 814 if (cgrp == NULL) 815 return -ENOMEM; 816 817 return 0; 818 } 819 820 int machine__process_lost_event(struct machine *machine __maybe_unused, 821 union perf_event *event, struct perf_sample *sample __maybe_unused) 822 { 823 dump_printf(": id:%" PRI_lu64 ": lost:%" PRI_lu64 "\n", 824 event->lost.id, event->lost.lost); 825 return 0; 826 } 827 828 int machine__process_lost_samples_event(struct machine *machine __maybe_unused, 829 union perf_event *event, struct perf_sample *sample) 830 { 831 dump_printf(": id:%" PRIu64 ": lost samples :%" PRI_lu64 "\n", 832 sample->id, event->lost_samples.lost); 833 return 0; 834 } 835 836 static struct dso *machine__findnew_module_dso(struct machine *machine, 837 struct kmod_path *m, 838 const char *filename) 839 { 840 struct dso *dso; 841 842 down_write(&machine->dsos.lock); 843 844 dso = __dsos__find(&machine->dsos, m->name, true); 845 if (!dso) { 846 dso = __dsos__addnew(&machine->dsos, m->name); 847 if (dso == NULL) 848 goto out_unlock; 849 850 dso__set_module_info(dso, m, machine); 851 dso__set_long_name(dso, strdup(filename), true); 852 dso->kernel = DSO_SPACE__KERNEL; 853 } 854 855 dso__get(dso); 856 out_unlock: 857 up_write(&machine->dsos.lock); 858 return dso; 859 } 860 861 int machine__process_aux_event(struct machine *machine __maybe_unused, 862 union perf_event *event) 863 { 864 if (dump_trace) 865 perf_event__fprintf_aux(event, stdout); 866 return 0; 867 } 868 869 int machine__process_itrace_start_event(struct machine *machine __maybe_unused, 870 union perf_event *event) 871 { 872 if (dump_trace) 873 perf_event__fprintf_itrace_start(event, stdout); 874 return 0; 875 } 876 877 int machine__process_aux_output_hw_id_event(struct machine *machine __maybe_unused, 878 union perf_event *event) 879 { 880 if (dump_trace) 881 perf_event__fprintf_aux_output_hw_id(event, stdout); 882 return 0; 883 } 884 885 int machine__process_switch_event(struct machine *machine __maybe_unused, 886 union perf_event *event) 887 { 888 if (dump_trace) 889 perf_event__fprintf_switch(event, stdout); 890 return 0; 891 } 892 893 static int machine__process_ksymbol_register(struct machine *machine, 894 union perf_event *event, 895 struct perf_sample *sample __maybe_unused) 896 { 897 struct symbol *sym; 898 struct dso *dso; 899 struct map *map = maps__find(machine__kernel_maps(machine), event->ksymbol.addr); 900 bool put_map = false; 901 int err = 0; 902 903 if (!map) { 904 dso = dso__new(event->ksymbol.name); 905 906 if (!dso) { 907 err = -ENOMEM; 908 goto out; 909 } 910 dso->kernel = DSO_SPACE__KERNEL; 911 map = map__new2(0, dso); 912 dso__put(dso); 913 if (!map) { 914 err = -ENOMEM; 915 goto out; 916 } 917 /* 918 * The inserted map has a get on it, we need to put to release 919 * the reference count here, but do it after all accesses are 920 * done. 921 */ 922 put_map = true; 923 if (event->ksymbol.ksym_type == PERF_RECORD_KSYMBOL_TYPE_OOL) { 924 dso->binary_type = DSO_BINARY_TYPE__OOL; 925 dso->data.file_size = event->ksymbol.len; 926 dso__set_loaded(dso); 927 } 928 929 map__set_start(map, event->ksymbol.addr); 930 map__set_end(map, map__start(map) + event->ksymbol.len); 931 err = maps__insert(machine__kernel_maps(machine), map); 932 if (err) { 933 err = -ENOMEM; 934 goto out; 935 } 936 937 dso__set_loaded(dso); 938 939 if (is_bpf_image(event->ksymbol.name)) { 940 dso->binary_type = DSO_BINARY_TYPE__BPF_IMAGE; 941 dso__set_long_name(dso, "", false); 942 } 943 } else { 944 dso = map__dso(map); 945 } 946 947 sym = symbol__new(map__map_ip(map, map__start(map)), 948 event->ksymbol.len, 949 0, 0, event->ksymbol.name); 950 if (!sym) { 951 err = -ENOMEM; 952 goto out; 953 } 954 dso__insert_symbol(dso, sym); 955 out: 956 if (put_map) 957 map__put(map); 958 return err; 959 } 960 961 static int machine__process_ksymbol_unregister(struct machine *machine, 962 union perf_event *event, 963 struct perf_sample *sample __maybe_unused) 964 { 965 struct symbol *sym; 966 struct map *map; 967 968 map = maps__find(machine__kernel_maps(machine), event->ksymbol.addr); 969 if (!map) 970 return 0; 971 972 if (RC_CHK_ACCESS(map) != RC_CHK_ACCESS(machine->vmlinux_map)) 973 maps__remove(machine__kernel_maps(machine), map); 974 else { 975 struct dso *dso = map__dso(map); 976 977 sym = dso__find_symbol(dso, map__map_ip(map, map__start(map))); 978 if (sym) 979 dso__delete_symbol(dso, sym); 980 } 981 982 return 0; 983 } 984 985 int machine__process_ksymbol(struct machine *machine __maybe_unused, 986 union perf_event *event, 987 struct perf_sample *sample) 988 { 989 if (dump_trace) 990 perf_event__fprintf_ksymbol(event, stdout); 991 992 if (event->ksymbol.flags & PERF_RECORD_KSYMBOL_FLAGS_UNREGISTER) 993 return machine__process_ksymbol_unregister(machine, event, 994 sample); 995 return machine__process_ksymbol_register(machine, event, sample); 996 } 997 998 int machine__process_text_poke(struct machine *machine, union perf_event *event, 999 struct perf_sample *sample __maybe_unused) 1000 { 1001 struct map *map = maps__find(machine__kernel_maps(machine), event->text_poke.addr); 1002 u8 cpumode = event->header.misc & PERF_RECORD_MISC_CPUMODE_MASK; 1003 struct dso *dso = map ? map__dso(map) : NULL; 1004 1005 if (dump_trace) 1006 perf_event__fprintf_text_poke(event, machine, stdout); 1007 1008 if (!event->text_poke.new_len) 1009 return 0; 1010 1011 if (cpumode != PERF_RECORD_MISC_KERNEL) { 1012 pr_debug("%s: unsupported cpumode - ignoring\n", __func__); 1013 return 0; 1014 } 1015 1016 if (dso) { 1017 u8 *new_bytes = event->text_poke.bytes + event->text_poke.old_len; 1018 int ret; 1019 1020 /* 1021 * Kernel maps might be changed when loading symbols so loading 1022 * must be done prior to using kernel maps. 1023 */ 1024 map__load(map); 1025 ret = dso__data_write_cache_addr(dso, map, machine, 1026 event->text_poke.addr, 1027 new_bytes, 1028 event->text_poke.new_len); 1029 if (ret != event->text_poke.new_len) 1030 pr_debug("Failed to write kernel text poke at %#" PRI_lx64 "\n", 1031 event->text_poke.addr); 1032 } else { 1033 pr_debug("Failed to find kernel text poke address map for %#" PRI_lx64 "\n", 1034 event->text_poke.addr); 1035 } 1036 1037 return 0; 1038 } 1039 1040 static struct map *machine__addnew_module_map(struct machine *machine, u64 start, 1041 const char *filename) 1042 { 1043 struct map *map = NULL; 1044 struct kmod_path m; 1045 struct dso *dso; 1046 int err; 1047 1048 if (kmod_path__parse_name(&m, filename)) 1049 return NULL; 1050 1051 dso = machine__findnew_module_dso(machine, &m, filename); 1052 if (dso == NULL) 1053 goto out; 1054 1055 map = map__new2(start, dso); 1056 if (map == NULL) 1057 goto out; 1058 1059 err = maps__insert(machine__kernel_maps(machine), map); 1060 /* If maps__insert failed, return NULL. */ 1061 if (err) { 1062 map__put(map); 1063 map = NULL; 1064 } 1065 out: 1066 /* put the dso here, corresponding to machine__findnew_module_dso */ 1067 dso__put(dso); 1068 zfree(&m.name); 1069 return map; 1070 } 1071 1072 size_t machines__fprintf_dsos(struct machines *machines, FILE *fp) 1073 { 1074 struct rb_node *nd; 1075 size_t ret = __dsos__fprintf(&machines->host.dsos.head, fp); 1076 1077 for (nd = rb_first_cached(&machines->guests); nd; nd = rb_next(nd)) { 1078 struct machine *pos = rb_entry(nd, struct machine, rb_node); 1079 ret += __dsos__fprintf(&pos->dsos.head, fp); 1080 } 1081 1082 return ret; 1083 } 1084 1085 size_t machine__fprintf_dsos_buildid(struct machine *m, FILE *fp, 1086 bool (skip)(struct dso *dso, int parm), int parm) 1087 { 1088 return __dsos__fprintf_buildid(&m->dsos.head, fp, skip, parm); 1089 } 1090 1091 size_t machines__fprintf_dsos_buildid(struct machines *machines, FILE *fp, 1092 bool (skip)(struct dso *dso, int parm), int parm) 1093 { 1094 struct rb_node *nd; 1095 size_t ret = machine__fprintf_dsos_buildid(&machines->host, fp, skip, parm); 1096 1097 for (nd = rb_first_cached(&machines->guests); nd; nd = rb_next(nd)) { 1098 struct machine *pos = rb_entry(nd, struct machine, rb_node); 1099 ret += machine__fprintf_dsos_buildid(pos, fp, skip, parm); 1100 } 1101 return ret; 1102 } 1103 1104 size_t machine__fprintf_vmlinux_path(struct machine *machine, FILE *fp) 1105 { 1106 int i; 1107 size_t printed = 0; 1108 struct dso *kdso = machine__kernel_dso(machine); 1109 1110 if (kdso->has_build_id) { 1111 char filename[PATH_MAX]; 1112 if (dso__build_id_filename(kdso, filename, sizeof(filename), 1113 false)) 1114 printed += fprintf(fp, "[0] %s\n", filename); 1115 } 1116 1117 for (i = 0; i < vmlinux_path__nr_entries; ++i) 1118 printed += fprintf(fp, "[%d] %s\n", 1119 i + kdso->has_build_id, vmlinux_path[i]); 1120 1121 return printed; 1122 } 1123 1124 size_t machine__fprintf(struct machine *machine, FILE *fp) 1125 { 1126 struct rb_node *nd; 1127 size_t ret; 1128 int i; 1129 1130 for (i = 0; i < THREADS__TABLE_SIZE; i++) { 1131 struct threads *threads = &machine->threads[i]; 1132 1133 down_read(&threads->lock); 1134 1135 ret = fprintf(fp, "Threads: %u\n", threads->nr); 1136 1137 for (nd = rb_first_cached(&threads->entries); nd; 1138 nd = rb_next(nd)) { 1139 struct thread *pos = rb_entry(nd, struct thread_rb_node, rb_node)->thread; 1140 1141 ret += thread__fprintf(pos, fp); 1142 } 1143 1144 up_read(&threads->lock); 1145 } 1146 return ret; 1147 } 1148 1149 static struct dso *machine__get_kernel(struct machine *machine) 1150 { 1151 const char *vmlinux_name = machine->mmap_name; 1152 struct dso *kernel; 1153 1154 if (machine__is_host(machine)) { 1155 if (symbol_conf.vmlinux_name) 1156 vmlinux_name = symbol_conf.vmlinux_name; 1157 1158 kernel = machine__findnew_kernel(machine, vmlinux_name, 1159 "[kernel]", DSO_SPACE__KERNEL); 1160 } else { 1161 if (symbol_conf.default_guest_vmlinux_name) 1162 vmlinux_name = symbol_conf.default_guest_vmlinux_name; 1163 1164 kernel = machine__findnew_kernel(machine, vmlinux_name, 1165 "[guest.kernel]", 1166 DSO_SPACE__KERNEL_GUEST); 1167 } 1168 1169 if (kernel != NULL && (!kernel->has_build_id)) 1170 dso__read_running_kernel_build_id(kernel, machine); 1171 1172 return kernel; 1173 } 1174 1175 void machine__get_kallsyms_filename(struct machine *machine, char *buf, 1176 size_t bufsz) 1177 { 1178 if (machine__is_default_guest(machine)) 1179 scnprintf(buf, bufsz, "%s", symbol_conf.default_guest_kallsyms); 1180 else 1181 scnprintf(buf, bufsz, "%s/proc/kallsyms", machine->root_dir); 1182 } 1183 1184 const char *ref_reloc_sym_names[] = {"_text", "_stext", NULL}; 1185 1186 /* Figure out the start address of kernel map from /proc/kallsyms. 1187 * Returns the name of the start symbol in *symbol_name. Pass in NULL as 1188 * symbol_name if it's not that important. 1189 */ 1190 static int machine__get_running_kernel_start(struct machine *machine, 1191 const char **symbol_name, 1192 u64 *start, u64 *end) 1193 { 1194 char filename[PATH_MAX]; 1195 int i, err = -1; 1196 const char *name; 1197 u64 addr = 0; 1198 1199 machine__get_kallsyms_filename(machine, filename, PATH_MAX); 1200 1201 if (symbol__restricted_filename(filename, "/proc/kallsyms")) 1202 return 0; 1203 1204 for (i = 0; (name = ref_reloc_sym_names[i]) != NULL; i++) { 1205 err = kallsyms__get_function_start(filename, name, &addr); 1206 if (!err) 1207 break; 1208 } 1209 1210 if (err) 1211 return -1; 1212 1213 if (symbol_name) 1214 *symbol_name = name; 1215 1216 *start = addr; 1217 1218 err = kallsyms__get_symbol_start(filename, "_edata", &addr); 1219 if (err) 1220 err = kallsyms__get_function_start(filename, "_etext", &addr); 1221 if (!err) 1222 *end = addr; 1223 1224 return 0; 1225 } 1226 1227 int machine__create_extra_kernel_map(struct machine *machine, 1228 struct dso *kernel, 1229 struct extra_kernel_map *xm) 1230 { 1231 struct kmap *kmap; 1232 struct map *map; 1233 int err; 1234 1235 map = map__new2(xm->start, kernel); 1236 if (!map) 1237 return -ENOMEM; 1238 1239 map__set_end(map, xm->end); 1240 map__set_pgoff(map, xm->pgoff); 1241 1242 kmap = map__kmap(map); 1243 1244 strlcpy(kmap->name, xm->name, KMAP_NAME_LEN); 1245 1246 err = maps__insert(machine__kernel_maps(machine), map); 1247 1248 if (!err) { 1249 pr_debug2("Added extra kernel map %s %" PRIx64 "-%" PRIx64 "\n", 1250 kmap->name, map__start(map), map__end(map)); 1251 } 1252 1253 map__put(map); 1254 1255 return err; 1256 } 1257 1258 static u64 find_entry_trampoline(struct dso *dso) 1259 { 1260 /* Duplicates are removed so lookup all aliases */ 1261 const char *syms[] = { 1262 "_entry_trampoline", 1263 "__entry_trampoline_start", 1264 "entry_SYSCALL_64_trampoline", 1265 }; 1266 struct symbol *sym = dso__first_symbol(dso); 1267 unsigned int i; 1268 1269 for (; sym; sym = dso__next_symbol(sym)) { 1270 if (sym->binding != STB_GLOBAL) 1271 continue; 1272 for (i = 0; i < ARRAY_SIZE(syms); i++) { 1273 if (!strcmp(sym->name, syms[i])) 1274 return sym->start; 1275 } 1276 } 1277 1278 return 0; 1279 } 1280 1281 /* 1282 * These values can be used for kernels that do not have symbols for the entry 1283 * trampolines in kallsyms. 1284 */ 1285 #define X86_64_CPU_ENTRY_AREA_PER_CPU 0xfffffe0000000000ULL 1286 #define X86_64_CPU_ENTRY_AREA_SIZE 0x2c000 1287 #define X86_64_ENTRY_TRAMPOLINE 0x6000 1288 1289 /* Map x86_64 PTI entry trampolines */ 1290 int machine__map_x86_64_entry_trampolines(struct machine *machine, 1291 struct dso *kernel) 1292 { 1293 struct maps *kmaps = machine__kernel_maps(machine); 1294 int nr_cpus_avail, cpu; 1295 bool found = false; 1296 struct map_rb_node *rb_node; 1297 u64 pgoff; 1298 1299 /* 1300 * In the vmlinux case, pgoff is a virtual address which must now be 1301 * mapped to a vmlinux offset. 1302 */ 1303 maps__for_each_entry(kmaps, rb_node) { 1304 struct map *dest_map, *map = rb_node->map; 1305 struct kmap *kmap = __map__kmap(map); 1306 1307 if (!kmap || !is_entry_trampoline(kmap->name)) 1308 continue; 1309 1310 dest_map = maps__find(kmaps, map__pgoff(map)); 1311 if (dest_map != map) 1312 map__set_pgoff(map, map__map_ip(dest_map, map__pgoff(map))); 1313 found = true; 1314 } 1315 if (found || machine->trampolines_mapped) 1316 return 0; 1317 1318 pgoff = find_entry_trampoline(kernel); 1319 if (!pgoff) 1320 return 0; 1321 1322 nr_cpus_avail = machine__nr_cpus_avail(machine); 1323 1324 /* Add a 1 page map for each CPU's entry trampoline */ 1325 for (cpu = 0; cpu < nr_cpus_avail; cpu++) { 1326 u64 va = X86_64_CPU_ENTRY_AREA_PER_CPU + 1327 cpu * X86_64_CPU_ENTRY_AREA_SIZE + 1328 X86_64_ENTRY_TRAMPOLINE; 1329 struct extra_kernel_map xm = { 1330 .start = va, 1331 .end = va + page_size, 1332 .pgoff = pgoff, 1333 }; 1334 1335 strlcpy(xm.name, ENTRY_TRAMPOLINE_NAME, KMAP_NAME_LEN); 1336 1337 if (machine__create_extra_kernel_map(machine, kernel, &xm) < 0) 1338 return -1; 1339 } 1340 1341 machine->trampolines_mapped = nr_cpus_avail; 1342 1343 return 0; 1344 } 1345 1346 int __weak machine__create_extra_kernel_maps(struct machine *machine __maybe_unused, 1347 struct dso *kernel __maybe_unused) 1348 { 1349 return 0; 1350 } 1351 1352 static int 1353 __machine__create_kernel_maps(struct machine *machine, struct dso *kernel) 1354 { 1355 /* In case of renewal the kernel map, destroy previous one */ 1356 machine__destroy_kernel_maps(machine); 1357 1358 map__put(machine->vmlinux_map); 1359 machine->vmlinux_map = map__new2(0, kernel); 1360 if (machine->vmlinux_map == NULL) 1361 return -ENOMEM; 1362 1363 map__set_map_ip(machine->vmlinux_map, identity__map_ip); 1364 map__set_unmap_ip(machine->vmlinux_map, identity__map_ip); 1365 return maps__insert(machine__kernel_maps(machine), machine->vmlinux_map); 1366 } 1367 1368 void machine__destroy_kernel_maps(struct machine *machine) 1369 { 1370 struct kmap *kmap; 1371 struct map *map = machine__kernel_map(machine); 1372 1373 if (map == NULL) 1374 return; 1375 1376 kmap = map__kmap(map); 1377 maps__remove(machine__kernel_maps(machine), map); 1378 if (kmap && kmap->ref_reloc_sym) { 1379 zfree((char **)&kmap->ref_reloc_sym->name); 1380 zfree(&kmap->ref_reloc_sym); 1381 } 1382 1383 map__zput(machine->vmlinux_map); 1384 } 1385 1386 int machines__create_guest_kernel_maps(struct machines *machines) 1387 { 1388 int ret = 0; 1389 struct dirent **namelist = NULL; 1390 int i, items = 0; 1391 char path[PATH_MAX]; 1392 pid_t pid; 1393 char *endp; 1394 1395 if (symbol_conf.default_guest_vmlinux_name || 1396 symbol_conf.default_guest_modules || 1397 symbol_conf.default_guest_kallsyms) { 1398 machines__create_kernel_maps(machines, DEFAULT_GUEST_KERNEL_ID); 1399 } 1400 1401 if (symbol_conf.guestmount) { 1402 items = scandir(symbol_conf.guestmount, &namelist, NULL, NULL); 1403 if (items <= 0) 1404 return -ENOENT; 1405 for (i = 0; i < items; i++) { 1406 if (!isdigit(namelist[i]->d_name[0])) { 1407 /* Filter out . and .. */ 1408 continue; 1409 } 1410 pid = (pid_t)strtol(namelist[i]->d_name, &endp, 10); 1411 if ((*endp != '\0') || 1412 (endp == namelist[i]->d_name) || 1413 (errno == ERANGE)) { 1414 pr_debug("invalid directory (%s). Skipping.\n", 1415 namelist[i]->d_name); 1416 continue; 1417 } 1418 sprintf(path, "%s/%s/proc/kallsyms", 1419 symbol_conf.guestmount, 1420 namelist[i]->d_name); 1421 ret = access(path, R_OK); 1422 if (ret) { 1423 pr_debug("Can't access file %s\n", path); 1424 goto failure; 1425 } 1426 machines__create_kernel_maps(machines, pid); 1427 } 1428 failure: 1429 free(namelist); 1430 } 1431 1432 return ret; 1433 } 1434 1435 void machines__destroy_kernel_maps(struct machines *machines) 1436 { 1437 struct rb_node *next = rb_first_cached(&machines->guests); 1438 1439 machine__destroy_kernel_maps(&machines->host); 1440 1441 while (next) { 1442 struct machine *pos = rb_entry(next, struct machine, rb_node); 1443 1444 next = rb_next(&pos->rb_node); 1445 rb_erase_cached(&pos->rb_node, &machines->guests); 1446 machine__delete(pos); 1447 } 1448 } 1449 1450 int machines__create_kernel_maps(struct machines *machines, pid_t pid) 1451 { 1452 struct machine *machine = machines__findnew(machines, pid); 1453 1454 if (machine == NULL) 1455 return -1; 1456 1457 return machine__create_kernel_maps(machine); 1458 } 1459 1460 int machine__load_kallsyms(struct machine *machine, const char *filename) 1461 { 1462 struct map *map = machine__kernel_map(machine); 1463 struct dso *dso = map__dso(map); 1464 int ret = __dso__load_kallsyms(dso, filename, map, true); 1465 1466 if (ret > 0) { 1467 dso__set_loaded(dso); 1468 /* 1469 * Since /proc/kallsyms will have multiple sessions for the 1470 * kernel, with modules between them, fixup the end of all 1471 * sections. 1472 */ 1473 maps__fixup_end(machine__kernel_maps(machine)); 1474 } 1475 1476 return ret; 1477 } 1478 1479 int machine__load_vmlinux_path(struct machine *machine) 1480 { 1481 struct map *map = machine__kernel_map(machine); 1482 struct dso *dso = map__dso(map); 1483 int ret = dso__load_vmlinux_path(dso, map); 1484 1485 if (ret > 0) 1486 dso__set_loaded(dso); 1487 1488 return ret; 1489 } 1490 1491 static char *get_kernel_version(const char *root_dir) 1492 { 1493 char version[PATH_MAX]; 1494 FILE *file; 1495 char *name, *tmp; 1496 const char *prefix = "Linux version "; 1497 1498 sprintf(version, "%s/proc/version", root_dir); 1499 file = fopen(version, "r"); 1500 if (!file) 1501 return NULL; 1502 1503 tmp = fgets(version, sizeof(version), file); 1504 fclose(file); 1505 if (!tmp) 1506 return NULL; 1507 1508 name = strstr(version, prefix); 1509 if (!name) 1510 return NULL; 1511 name += strlen(prefix); 1512 tmp = strchr(name, ' '); 1513 if (tmp) 1514 *tmp = '\0'; 1515 1516 return strdup(name); 1517 } 1518 1519 static bool is_kmod_dso(struct dso *dso) 1520 { 1521 return dso->symtab_type == DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE || 1522 dso->symtab_type == DSO_BINARY_TYPE__GUEST_KMODULE; 1523 } 1524 1525 static int maps__set_module_path(struct maps *maps, const char *path, struct kmod_path *m) 1526 { 1527 char *long_name; 1528 struct dso *dso; 1529 struct map *map = maps__find_by_name(maps, m->name); 1530 1531 if (map == NULL) 1532 return 0; 1533 1534 long_name = strdup(path); 1535 if (long_name == NULL) 1536 return -ENOMEM; 1537 1538 dso = map__dso(map); 1539 dso__set_long_name(dso, long_name, true); 1540 dso__kernel_module_get_build_id(dso, ""); 1541 1542 /* 1543 * Full name could reveal us kmod compression, so 1544 * we need to update the symtab_type if needed. 1545 */ 1546 if (m->comp && is_kmod_dso(dso)) { 1547 dso->symtab_type++; 1548 dso->comp = m->comp; 1549 } 1550 1551 return 0; 1552 } 1553 1554 static int maps__set_modules_path_dir(struct maps *maps, const char *dir_name, int depth) 1555 { 1556 struct dirent *dent; 1557 DIR *dir = opendir(dir_name); 1558 int ret = 0; 1559 1560 if (!dir) { 1561 pr_debug("%s: cannot open %s dir\n", __func__, dir_name); 1562 return -1; 1563 } 1564 1565 while ((dent = readdir(dir)) != NULL) { 1566 char path[PATH_MAX]; 1567 struct stat st; 1568 1569 /*sshfs might return bad dent->d_type, so we have to stat*/ 1570 path__join(path, sizeof(path), dir_name, dent->d_name); 1571 if (stat(path, &st)) 1572 continue; 1573 1574 if (S_ISDIR(st.st_mode)) { 1575 if (!strcmp(dent->d_name, ".") || 1576 !strcmp(dent->d_name, "..")) 1577 continue; 1578 1579 /* Do not follow top-level source and build symlinks */ 1580 if (depth == 0) { 1581 if (!strcmp(dent->d_name, "source") || 1582 !strcmp(dent->d_name, "build")) 1583 continue; 1584 } 1585 1586 ret = maps__set_modules_path_dir(maps, path, depth + 1); 1587 if (ret < 0) 1588 goto out; 1589 } else { 1590 struct kmod_path m; 1591 1592 ret = kmod_path__parse_name(&m, dent->d_name); 1593 if (ret) 1594 goto out; 1595 1596 if (m.kmod) 1597 ret = maps__set_module_path(maps, path, &m); 1598 1599 zfree(&m.name); 1600 1601 if (ret) 1602 goto out; 1603 } 1604 } 1605 1606 out: 1607 closedir(dir); 1608 return ret; 1609 } 1610 1611 static int machine__set_modules_path(struct machine *machine) 1612 { 1613 char *version; 1614 char modules_path[PATH_MAX]; 1615 1616 version = get_kernel_version(machine->root_dir); 1617 if (!version) 1618 return -1; 1619 1620 snprintf(modules_path, sizeof(modules_path), "%s/lib/modules/%s", 1621 machine->root_dir, version); 1622 free(version); 1623 1624 return maps__set_modules_path_dir(machine__kernel_maps(machine), modules_path, 0); 1625 } 1626 int __weak arch__fix_module_text_start(u64 *start __maybe_unused, 1627 u64 *size __maybe_unused, 1628 const char *name __maybe_unused) 1629 { 1630 return 0; 1631 } 1632 1633 static int machine__create_module(void *arg, const char *name, u64 start, 1634 u64 size) 1635 { 1636 struct machine *machine = arg; 1637 struct map *map; 1638 1639 if (arch__fix_module_text_start(&start, &size, name) < 0) 1640 return -1; 1641 1642 map = machine__addnew_module_map(machine, start, name); 1643 if (map == NULL) 1644 return -1; 1645 map__set_end(map, start + size); 1646 1647 dso__kernel_module_get_build_id(map__dso(map), machine->root_dir); 1648 map__put(map); 1649 return 0; 1650 } 1651 1652 static int machine__create_modules(struct machine *machine) 1653 { 1654 const char *modules; 1655 char path[PATH_MAX]; 1656 1657 if (machine__is_default_guest(machine)) { 1658 modules = symbol_conf.default_guest_modules; 1659 } else { 1660 snprintf(path, PATH_MAX, "%s/proc/modules", machine->root_dir); 1661 modules = path; 1662 } 1663 1664 if (symbol__restricted_filename(modules, "/proc/modules")) 1665 return -1; 1666 1667 if (modules__parse(modules, machine, machine__create_module)) 1668 return -1; 1669 1670 if (!machine__set_modules_path(machine)) 1671 return 0; 1672 1673 pr_debug("Problems setting modules path maps, continuing anyway...\n"); 1674 1675 return 0; 1676 } 1677 1678 static void machine__set_kernel_mmap(struct machine *machine, 1679 u64 start, u64 end) 1680 { 1681 map__set_start(machine->vmlinux_map, start); 1682 map__set_end(machine->vmlinux_map, end); 1683 /* 1684 * Be a bit paranoid here, some perf.data file came with 1685 * a zero sized synthesized MMAP event for the kernel. 1686 */ 1687 if (start == 0 && end == 0) 1688 map__set_end(machine->vmlinux_map, ~0ULL); 1689 } 1690 1691 static int machine__update_kernel_mmap(struct machine *machine, 1692 u64 start, u64 end) 1693 { 1694 struct map *orig, *updated; 1695 int err; 1696 1697 orig = machine->vmlinux_map; 1698 updated = map__get(orig); 1699 1700 machine->vmlinux_map = updated; 1701 machine__set_kernel_mmap(machine, start, end); 1702 maps__remove(machine__kernel_maps(machine), orig); 1703 err = maps__insert(machine__kernel_maps(machine), updated); 1704 map__put(orig); 1705 1706 return err; 1707 } 1708 1709 int machine__create_kernel_maps(struct machine *machine) 1710 { 1711 struct dso *kernel = machine__get_kernel(machine); 1712 const char *name = NULL; 1713 u64 start = 0, end = ~0ULL; 1714 int ret; 1715 1716 if (kernel == NULL) 1717 return -1; 1718 1719 ret = __machine__create_kernel_maps(machine, kernel); 1720 if (ret < 0) 1721 goto out_put; 1722 1723 if (symbol_conf.use_modules && machine__create_modules(machine) < 0) { 1724 if (machine__is_host(machine)) 1725 pr_debug("Problems creating module maps, " 1726 "continuing anyway...\n"); 1727 else 1728 pr_debug("Problems creating module maps for guest %d, " 1729 "continuing anyway...\n", machine->pid); 1730 } 1731 1732 if (!machine__get_running_kernel_start(machine, &name, &start, &end)) { 1733 if (name && 1734 map__set_kallsyms_ref_reloc_sym(machine->vmlinux_map, name, start)) { 1735 machine__destroy_kernel_maps(machine); 1736 ret = -1; 1737 goto out_put; 1738 } 1739 1740 /* 1741 * we have a real start address now, so re-order the kmaps 1742 * assume it's the last in the kmaps 1743 */ 1744 ret = machine__update_kernel_mmap(machine, start, end); 1745 if (ret < 0) 1746 goto out_put; 1747 } 1748 1749 if (machine__create_extra_kernel_maps(machine, kernel)) 1750 pr_debug("Problems creating extra kernel maps, continuing anyway...\n"); 1751 1752 if (end == ~0ULL) { 1753 /* update end address of the kernel map using adjacent module address */ 1754 struct map_rb_node *rb_node = maps__find_node(machine__kernel_maps(machine), 1755 machine__kernel_map(machine)); 1756 struct map_rb_node *next = map_rb_node__next(rb_node); 1757 1758 if (next) 1759 machine__set_kernel_mmap(machine, start, map__start(next->map)); 1760 } 1761 1762 out_put: 1763 dso__put(kernel); 1764 return ret; 1765 } 1766 1767 static bool machine__uses_kcore(struct machine *machine) 1768 { 1769 struct dso *dso; 1770 1771 list_for_each_entry(dso, &machine->dsos.head, node) { 1772 if (dso__is_kcore(dso)) 1773 return true; 1774 } 1775 1776 return false; 1777 } 1778 1779 static bool perf_event__is_extra_kernel_mmap(struct machine *machine, 1780 struct extra_kernel_map *xm) 1781 { 1782 return machine__is(machine, "x86_64") && 1783 is_entry_trampoline(xm->name); 1784 } 1785 1786 static int machine__process_extra_kernel_map(struct machine *machine, 1787 struct extra_kernel_map *xm) 1788 { 1789 struct dso *kernel = machine__kernel_dso(machine); 1790 1791 if (kernel == NULL) 1792 return -1; 1793 1794 return machine__create_extra_kernel_map(machine, kernel, xm); 1795 } 1796 1797 static int machine__process_kernel_mmap_event(struct machine *machine, 1798 struct extra_kernel_map *xm, 1799 struct build_id *bid) 1800 { 1801 enum dso_space_type dso_space; 1802 bool is_kernel_mmap; 1803 const char *mmap_name = machine->mmap_name; 1804 1805 /* If we have maps from kcore then we do not need or want any others */ 1806 if (machine__uses_kcore(machine)) 1807 return 0; 1808 1809 if (machine__is_host(machine)) 1810 dso_space = DSO_SPACE__KERNEL; 1811 else 1812 dso_space = DSO_SPACE__KERNEL_GUEST; 1813 1814 is_kernel_mmap = memcmp(xm->name, mmap_name, strlen(mmap_name) - 1) == 0; 1815 if (!is_kernel_mmap && !machine__is_host(machine)) { 1816 /* 1817 * If the event was recorded inside the guest and injected into 1818 * the host perf.data file, then it will match a host mmap_name, 1819 * so try that - see machine__set_mmap_name(). 1820 */ 1821 mmap_name = "[kernel.kallsyms]"; 1822 is_kernel_mmap = memcmp(xm->name, mmap_name, strlen(mmap_name) - 1) == 0; 1823 } 1824 if (xm->name[0] == '/' || 1825 (!is_kernel_mmap && xm->name[0] == '[')) { 1826 struct map *map = machine__addnew_module_map(machine, xm->start, xm->name); 1827 1828 if (map == NULL) 1829 goto out_problem; 1830 1831 map__set_end(map, map__start(map) + xm->end - xm->start); 1832 1833 if (build_id__is_defined(bid)) 1834 dso__set_build_id(map__dso(map), bid); 1835 1836 map__put(map); 1837 } else if (is_kernel_mmap) { 1838 const char *symbol_name = xm->name + strlen(mmap_name); 1839 /* 1840 * Should be there already, from the build-id table in 1841 * the header. 1842 */ 1843 struct dso *kernel = NULL; 1844 struct dso *dso; 1845 1846 down_read(&machine->dsos.lock); 1847 1848 list_for_each_entry(dso, &machine->dsos.head, node) { 1849 1850 /* 1851 * The cpumode passed to is_kernel_module is not the 1852 * cpumode of *this* event. If we insist on passing 1853 * correct cpumode to is_kernel_module, we should 1854 * record the cpumode when we adding this dso to the 1855 * linked list. 1856 * 1857 * However we don't really need passing correct 1858 * cpumode. We know the correct cpumode must be kernel 1859 * mode (if not, we should not link it onto kernel_dsos 1860 * list). 1861 * 1862 * Therefore, we pass PERF_RECORD_MISC_CPUMODE_UNKNOWN. 1863 * is_kernel_module() treats it as a kernel cpumode. 1864 */ 1865 1866 if (!dso->kernel || 1867 is_kernel_module(dso->long_name, 1868 PERF_RECORD_MISC_CPUMODE_UNKNOWN)) 1869 continue; 1870 1871 1872 kernel = dso__get(dso); 1873 break; 1874 } 1875 1876 up_read(&machine->dsos.lock); 1877 1878 if (kernel == NULL) 1879 kernel = machine__findnew_dso(machine, machine->mmap_name); 1880 if (kernel == NULL) 1881 goto out_problem; 1882 1883 kernel->kernel = dso_space; 1884 if (__machine__create_kernel_maps(machine, kernel) < 0) { 1885 dso__put(kernel); 1886 goto out_problem; 1887 } 1888 1889 if (strstr(kernel->long_name, "vmlinux")) 1890 dso__set_short_name(kernel, "[kernel.vmlinux]", false); 1891 1892 if (machine__update_kernel_mmap(machine, xm->start, xm->end) < 0) { 1893 dso__put(kernel); 1894 goto out_problem; 1895 } 1896 1897 if (build_id__is_defined(bid)) 1898 dso__set_build_id(kernel, bid); 1899 1900 /* 1901 * Avoid using a zero address (kptr_restrict) for the ref reloc 1902 * symbol. Effectively having zero here means that at record 1903 * time /proc/sys/kernel/kptr_restrict was non zero. 1904 */ 1905 if (xm->pgoff != 0) { 1906 map__set_kallsyms_ref_reloc_sym(machine->vmlinux_map, 1907 symbol_name, 1908 xm->pgoff); 1909 } 1910 1911 if (machine__is_default_guest(machine)) { 1912 /* 1913 * preload dso of guest kernel and modules 1914 */ 1915 dso__load(kernel, machine__kernel_map(machine)); 1916 } 1917 dso__put(kernel); 1918 } else if (perf_event__is_extra_kernel_mmap(machine, xm)) { 1919 return machine__process_extra_kernel_map(machine, xm); 1920 } 1921 return 0; 1922 out_problem: 1923 return -1; 1924 } 1925 1926 int machine__process_mmap2_event(struct machine *machine, 1927 union perf_event *event, 1928 struct perf_sample *sample) 1929 { 1930 struct thread *thread; 1931 struct map *map; 1932 struct dso_id dso_id = { 1933 .maj = event->mmap2.maj, 1934 .min = event->mmap2.min, 1935 .ino = event->mmap2.ino, 1936 .ino_generation = event->mmap2.ino_generation, 1937 }; 1938 struct build_id __bid, *bid = NULL; 1939 int ret = 0; 1940 1941 if (dump_trace) 1942 perf_event__fprintf_mmap2(event, stdout); 1943 1944 if (event->header.misc & PERF_RECORD_MISC_MMAP_BUILD_ID) { 1945 bid = &__bid; 1946 build_id__init(bid, event->mmap2.build_id, event->mmap2.build_id_size); 1947 } 1948 1949 if (sample->cpumode == PERF_RECORD_MISC_GUEST_KERNEL || 1950 sample->cpumode == PERF_RECORD_MISC_KERNEL) { 1951 struct extra_kernel_map xm = { 1952 .start = event->mmap2.start, 1953 .end = event->mmap2.start + event->mmap2.len, 1954 .pgoff = event->mmap2.pgoff, 1955 }; 1956 1957 strlcpy(xm.name, event->mmap2.filename, KMAP_NAME_LEN); 1958 ret = machine__process_kernel_mmap_event(machine, &xm, bid); 1959 if (ret < 0) 1960 goto out_problem; 1961 return 0; 1962 } 1963 1964 thread = machine__findnew_thread(machine, event->mmap2.pid, 1965 event->mmap2.tid); 1966 if (thread == NULL) 1967 goto out_problem; 1968 1969 map = map__new(machine, event->mmap2.start, 1970 event->mmap2.len, event->mmap2.pgoff, 1971 &dso_id, event->mmap2.prot, 1972 event->mmap2.flags, bid, 1973 event->mmap2.filename, thread); 1974 1975 if (map == NULL) 1976 goto out_problem_map; 1977 1978 ret = thread__insert_map(thread, map); 1979 if (ret) 1980 goto out_problem_insert; 1981 1982 thread__put(thread); 1983 map__put(map); 1984 return 0; 1985 1986 out_problem_insert: 1987 map__put(map); 1988 out_problem_map: 1989 thread__put(thread); 1990 out_problem: 1991 dump_printf("problem processing PERF_RECORD_MMAP2, skipping event.\n"); 1992 return 0; 1993 } 1994 1995 int machine__process_mmap_event(struct machine *machine, union perf_event *event, 1996 struct perf_sample *sample) 1997 { 1998 struct thread *thread; 1999 struct map *map; 2000 u32 prot = 0; 2001 int ret = 0; 2002 2003 if (dump_trace) 2004 perf_event__fprintf_mmap(event, stdout); 2005 2006 if (sample->cpumode == PERF_RECORD_MISC_GUEST_KERNEL || 2007 sample->cpumode == PERF_RECORD_MISC_KERNEL) { 2008 struct extra_kernel_map xm = { 2009 .start = event->mmap.start, 2010 .end = event->mmap.start + event->mmap.len, 2011 .pgoff = event->mmap.pgoff, 2012 }; 2013 2014 strlcpy(xm.name, event->mmap.filename, KMAP_NAME_LEN); 2015 ret = machine__process_kernel_mmap_event(machine, &xm, NULL); 2016 if (ret < 0) 2017 goto out_problem; 2018 return 0; 2019 } 2020 2021 thread = machine__findnew_thread(machine, event->mmap.pid, 2022 event->mmap.tid); 2023 if (thread == NULL) 2024 goto out_problem; 2025 2026 if (!(event->header.misc & PERF_RECORD_MISC_MMAP_DATA)) 2027 prot = PROT_EXEC; 2028 2029 map = map__new(machine, event->mmap.start, 2030 event->mmap.len, event->mmap.pgoff, 2031 NULL, prot, 0, NULL, event->mmap.filename, thread); 2032 2033 if (map == NULL) 2034 goto out_problem_map; 2035 2036 ret = thread__insert_map(thread, map); 2037 if (ret) 2038 goto out_problem_insert; 2039 2040 thread__put(thread); 2041 map__put(map); 2042 return 0; 2043 2044 out_problem_insert: 2045 map__put(map); 2046 out_problem_map: 2047 thread__put(thread); 2048 out_problem: 2049 dump_printf("problem processing PERF_RECORD_MMAP, skipping event.\n"); 2050 return 0; 2051 } 2052 2053 static void __machine__remove_thread(struct machine *machine, struct thread_rb_node *nd, 2054 struct thread *th, bool lock) 2055 { 2056 struct threads *threads = machine__threads(machine, thread__tid(th)); 2057 2058 if (!nd) 2059 nd = thread_rb_node__find(th, &threads->entries.rb_root); 2060 2061 if (threads->last_match && RC_CHK_ACCESS(threads->last_match) == RC_CHK_ACCESS(th)) 2062 threads__set_last_match(threads, NULL); 2063 2064 if (lock) 2065 down_write(&threads->lock); 2066 2067 BUG_ON(refcount_read(thread__refcnt(th)) == 0); 2068 2069 thread__put(nd->thread); 2070 rb_erase_cached(&nd->rb_node, &threads->entries); 2071 RB_CLEAR_NODE(&nd->rb_node); 2072 --threads->nr; 2073 2074 free(nd); 2075 2076 if (lock) 2077 up_write(&threads->lock); 2078 } 2079 2080 void machine__remove_thread(struct machine *machine, struct thread *th) 2081 { 2082 return __machine__remove_thread(machine, NULL, th, true); 2083 } 2084 2085 int machine__process_fork_event(struct machine *machine, union perf_event *event, 2086 struct perf_sample *sample) 2087 { 2088 struct thread *thread = machine__find_thread(machine, 2089 event->fork.pid, 2090 event->fork.tid); 2091 struct thread *parent = machine__findnew_thread(machine, 2092 event->fork.ppid, 2093 event->fork.ptid); 2094 bool do_maps_clone = true; 2095 int err = 0; 2096 2097 if (dump_trace) 2098 perf_event__fprintf_task(event, stdout); 2099 2100 /* 2101 * There may be an existing thread that is not actually the parent, 2102 * either because we are processing events out of order, or because the 2103 * (fork) event that would have removed the thread was lost. Assume the 2104 * latter case and continue on as best we can. 2105 */ 2106 if (thread__pid(parent) != (pid_t)event->fork.ppid) { 2107 dump_printf("removing erroneous parent thread %d/%d\n", 2108 thread__pid(parent), thread__tid(parent)); 2109 machine__remove_thread(machine, parent); 2110 thread__put(parent); 2111 parent = machine__findnew_thread(machine, event->fork.ppid, 2112 event->fork.ptid); 2113 } 2114 2115 /* if a thread currently exists for the thread id remove it */ 2116 if (thread != NULL) { 2117 machine__remove_thread(machine, thread); 2118 thread__put(thread); 2119 } 2120 2121 thread = machine__findnew_thread(machine, event->fork.pid, 2122 event->fork.tid); 2123 /* 2124 * When synthesizing FORK events, we are trying to create thread 2125 * objects for the already running tasks on the machine. 2126 * 2127 * Normally, for a kernel FORK event, we want to clone the parent's 2128 * maps because that is what the kernel just did. 2129 * 2130 * But when synthesizing, this should not be done. If we do, we end up 2131 * with overlapping maps as we process the synthesized MMAP2 events that 2132 * get delivered shortly thereafter. 2133 * 2134 * Use the FORK event misc flags in an internal way to signal this 2135 * situation, so we can elide the map clone when appropriate. 2136 */ 2137 if (event->fork.header.misc & PERF_RECORD_MISC_FORK_EXEC) 2138 do_maps_clone = false; 2139 2140 if (thread == NULL || parent == NULL || 2141 thread__fork(thread, parent, sample->time, do_maps_clone) < 0) { 2142 dump_printf("problem processing PERF_RECORD_FORK, skipping event.\n"); 2143 err = -1; 2144 } 2145 thread__put(thread); 2146 thread__put(parent); 2147 2148 return err; 2149 } 2150 2151 int machine__process_exit_event(struct machine *machine, union perf_event *event, 2152 struct perf_sample *sample __maybe_unused) 2153 { 2154 struct thread *thread = machine__find_thread(machine, 2155 event->fork.pid, 2156 event->fork.tid); 2157 2158 if (dump_trace) 2159 perf_event__fprintf_task(event, stdout); 2160 2161 if (thread != NULL) { 2162 if (symbol_conf.keep_exited_threads) 2163 thread__set_exited(thread, /*exited=*/true); 2164 else 2165 machine__remove_thread(machine, thread); 2166 } 2167 thread__put(thread); 2168 return 0; 2169 } 2170 2171 int machine__process_event(struct machine *machine, union perf_event *event, 2172 struct perf_sample *sample) 2173 { 2174 int ret; 2175 2176 switch (event->header.type) { 2177 case PERF_RECORD_COMM: 2178 ret = machine__process_comm_event(machine, event, sample); break; 2179 case PERF_RECORD_MMAP: 2180 ret = machine__process_mmap_event(machine, event, sample); break; 2181 case PERF_RECORD_NAMESPACES: 2182 ret = machine__process_namespaces_event(machine, event, sample); break; 2183 case PERF_RECORD_CGROUP: 2184 ret = machine__process_cgroup_event(machine, event, sample); break; 2185 case PERF_RECORD_MMAP2: 2186 ret = machine__process_mmap2_event(machine, event, sample); break; 2187 case PERF_RECORD_FORK: 2188 ret = machine__process_fork_event(machine, event, sample); break; 2189 case PERF_RECORD_EXIT: 2190 ret = machine__process_exit_event(machine, event, sample); break; 2191 case PERF_RECORD_LOST: 2192 ret = machine__process_lost_event(machine, event, sample); break; 2193 case PERF_RECORD_AUX: 2194 ret = machine__process_aux_event(machine, event); break; 2195 case PERF_RECORD_ITRACE_START: 2196 ret = machine__process_itrace_start_event(machine, event); break; 2197 case PERF_RECORD_LOST_SAMPLES: 2198 ret = machine__process_lost_samples_event(machine, event, sample); break; 2199 case PERF_RECORD_SWITCH: 2200 case PERF_RECORD_SWITCH_CPU_WIDE: 2201 ret = machine__process_switch_event(machine, event); break; 2202 case PERF_RECORD_KSYMBOL: 2203 ret = machine__process_ksymbol(machine, event, sample); break; 2204 case PERF_RECORD_BPF_EVENT: 2205 ret = machine__process_bpf(machine, event, sample); break; 2206 case PERF_RECORD_TEXT_POKE: 2207 ret = machine__process_text_poke(machine, event, sample); break; 2208 case PERF_RECORD_AUX_OUTPUT_HW_ID: 2209 ret = machine__process_aux_output_hw_id_event(machine, event); break; 2210 default: 2211 ret = -1; 2212 break; 2213 } 2214 2215 return ret; 2216 } 2217 2218 static bool symbol__match_regex(struct symbol *sym, regex_t *regex) 2219 { 2220 if (!regexec(regex, sym->name, 0, NULL, 0)) 2221 return true; 2222 return false; 2223 } 2224 2225 static void ip__resolve_ams(struct thread *thread, 2226 struct addr_map_symbol *ams, 2227 u64 ip) 2228 { 2229 struct addr_location al; 2230 2231 addr_location__init(&al); 2232 /* 2233 * We cannot use the header.misc hint to determine whether a 2234 * branch stack address is user, kernel, guest, hypervisor. 2235 * Branches may straddle the kernel/user/hypervisor boundaries. 2236 * Thus, we have to try consecutively until we find a match 2237 * or else, the symbol is unknown 2238 */ 2239 thread__find_cpumode_addr_location(thread, ip, &al); 2240 2241 ams->addr = ip; 2242 ams->al_addr = al.addr; 2243 ams->al_level = al.level; 2244 ams->ms.maps = maps__get(al.maps); 2245 ams->ms.sym = al.sym; 2246 ams->ms.map = map__get(al.map); 2247 ams->phys_addr = 0; 2248 ams->data_page_size = 0; 2249 addr_location__exit(&al); 2250 } 2251 2252 static void ip__resolve_data(struct thread *thread, 2253 u8 m, struct addr_map_symbol *ams, 2254 u64 addr, u64 phys_addr, u64 daddr_page_size) 2255 { 2256 struct addr_location al; 2257 2258 addr_location__init(&al); 2259 2260 thread__find_symbol(thread, m, addr, &al); 2261 2262 ams->addr = addr; 2263 ams->al_addr = al.addr; 2264 ams->al_level = al.level; 2265 ams->ms.maps = maps__get(al.maps); 2266 ams->ms.sym = al.sym; 2267 ams->ms.map = map__get(al.map); 2268 ams->phys_addr = phys_addr; 2269 ams->data_page_size = daddr_page_size; 2270 addr_location__exit(&al); 2271 } 2272 2273 struct mem_info *sample__resolve_mem(struct perf_sample *sample, 2274 struct addr_location *al) 2275 { 2276 struct mem_info *mi = mem_info__new(); 2277 2278 if (!mi) 2279 return NULL; 2280 2281 ip__resolve_ams(al->thread, &mi->iaddr, sample->ip); 2282 ip__resolve_data(al->thread, al->cpumode, &mi->daddr, 2283 sample->addr, sample->phys_addr, 2284 sample->data_page_size); 2285 mi->data_src.val = sample->data_src; 2286 2287 return mi; 2288 } 2289 2290 static char *callchain_srcline(struct map_symbol *ms, u64 ip) 2291 { 2292 struct map *map = ms->map; 2293 char *srcline = NULL; 2294 struct dso *dso; 2295 2296 if (!map || callchain_param.key == CCKEY_FUNCTION) 2297 return srcline; 2298 2299 dso = map__dso(map); 2300 srcline = srcline__tree_find(&dso->srclines, ip); 2301 if (!srcline) { 2302 bool show_sym = false; 2303 bool show_addr = callchain_param.key == CCKEY_ADDRESS; 2304 2305 srcline = get_srcline(dso, map__rip_2objdump(map, ip), 2306 ms->sym, show_sym, show_addr, ip); 2307 srcline__tree_insert(&dso->srclines, ip, srcline); 2308 } 2309 2310 return srcline; 2311 } 2312 2313 struct iterations { 2314 int nr_loop_iter; 2315 u64 cycles; 2316 }; 2317 2318 static int add_callchain_ip(struct thread *thread, 2319 struct callchain_cursor *cursor, 2320 struct symbol **parent, 2321 struct addr_location *root_al, 2322 u8 *cpumode, 2323 u64 ip, 2324 bool branch, 2325 struct branch_flags *flags, 2326 struct iterations *iter, 2327 u64 branch_from) 2328 { 2329 struct map_symbol ms = {}; 2330 struct addr_location al; 2331 int nr_loop_iter = 0, err = 0; 2332 u64 iter_cycles = 0; 2333 const char *srcline = NULL; 2334 2335 addr_location__init(&al); 2336 al.filtered = 0; 2337 al.sym = NULL; 2338 al.srcline = NULL; 2339 if (!cpumode) { 2340 thread__find_cpumode_addr_location(thread, ip, &al); 2341 } else { 2342 if (ip >= PERF_CONTEXT_MAX) { 2343 switch (ip) { 2344 case PERF_CONTEXT_HV: 2345 *cpumode = PERF_RECORD_MISC_HYPERVISOR; 2346 break; 2347 case PERF_CONTEXT_KERNEL: 2348 *cpumode = PERF_RECORD_MISC_KERNEL; 2349 break; 2350 case PERF_CONTEXT_USER: 2351 *cpumode = PERF_RECORD_MISC_USER; 2352 break; 2353 default: 2354 pr_debug("invalid callchain context: " 2355 "%"PRId64"\n", (s64) ip); 2356 /* 2357 * It seems the callchain is corrupted. 2358 * Discard all. 2359 */ 2360 callchain_cursor_reset(cursor); 2361 err = 1; 2362 goto out; 2363 } 2364 goto out; 2365 } 2366 thread__find_symbol(thread, *cpumode, ip, &al); 2367 } 2368 2369 if (al.sym != NULL) { 2370 if (perf_hpp_list.parent && !*parent && 2371 symbol__match_regex(al.sym, &parent_regex)) 2372 *parent = al.sym; 2373 else if (have_ignore_callees && root_al && 2374 symbol__match_regex(al.sym, &ignore_callees_regex)) { 2375 /* Treat this symbol as the root, 2376 forgetting its callees. */ 2377 addr_location__copy(root_al, &al); 2378 callchain_cursor_reset(cursor); 2379 } 2380 } 2381 2382 if (symbol_conf.hide_unresolved && al.sym == NULL) 2383 goto out; 2384 2385 if (iter) { 2386 nr_loop_iter = iter->nr_loop_iter; 2387 iter_cycles = iter->cycles; 2388 } 2389 2390 ms.maps = maps__get(al.maps); 2391 ms.map = map__get(al.map); 2392 ms.sym = al.sym; 2393 srcline = callchain_srcline(&ms, al.addr); 2394 err = callchain_cursor_append(cursor, ip, &ms, 2395 branch, flags, nr_loop_iter, 2396 iter_cycles, branch_from, srcline); 2397 out: 2398 addr_location__exit(&al); 2399 maps__put(ms.maps); 2400 map__put(ms.map); 2401 return err; 2402 } 2403 2404 struct branch_info *sample__resolve_bstack(struct perf_sample *sample, 2405 struct addr_location *al) 2406 { 2407 unsigned int i; 2408 const struct branch_stack *bs = sample->branch_stack; 2409 struct branch_entry *entries = perf_sample__branch_entries(sample); 2410 struct branch_info *bi = calloc(bs->nr, sizeof(struct branch_info)); 2411 2412 if (!bi) 2413 return NULL; 2414 2415 for (i = 0; i < bs->nr; i++) { 2416 ip__resolve_ams(al->thread, &bi[i].to, entries[i].to); 2417 ip__resolve_ams(al->thread, &bi[i].from, entries[i].from); 2418 bi[i].flags = entries[i].flags; 2419 } 2420 return bi; 2421 } 2422 2423 static void save_iterations(struct iterations *iter, 2424 struct branch_entry *be, int nr) 2425 { 2426 int i; 2427 2428 iter->nr_loop_iter++; 2429 iter->cycles = 0; 2430 2431 for (i = 0; i < nr; i++) 2432 iter->cycles += be[i].flags.cycles; 2433 } 2434 2435 #define CHASHSZ 127 2436 #define CHASHBITS 7 2437 #define NO_ENTRY 0xff 2438 2439 #define PERF_MAX_BRANCH_DEPTH 127 2440 2441 /* Remove loops. */ 2442 static int remove_loops(struct branch_entry *l, int nr, 2443 struct iterations *iter) 2444 { 2445 int i, j, off; 2446 unsigned char chash[CHASHSZ]; 2447 2448 memset(chash, NO_ENTRY, sizeof(chash)); 2449 2450 BUG_ON(PERF_MAX_BRANCH_DEPTH > 255); 2451 2452 for (i = 0; i < nr; i++) { 2453 int h = hash_64(l[i].from, CHASHBITS) % CHASHSZ; 2454 2455 /* no collision handling for now */ 2456 if (chash[h] == NO_ENTRY) { 2457 chash[h] = i; 2458 } else if (l[chash[h]].from == l[i].from) { 2459 bool is_loop = true; 2460 /* check if it is a real loop */ 2461 off = 0; 2462 for (j = chash[h]; j < i && i + off < nr; j++, off++) 2463 if (l[j].from != l[i + off].from) { 2464 is_loop = false; 2465 break; 2466 } 2467 if (is_loop) { 2468 j = nr - (i + off); 2469 if (j > 0) { 2470 save_iterations(iter + i + off, 2471 l + i, off); 2472 2473 memmove(iter + i, iter + i + off, 2474 j * sizeof(*iter)); 2475 2476 memmove(l + i, l + i + off, 2477 j * sizeof(*l)); 2478 } 2479 2480 nr -= off; 2481 } 2482 } 2483 } 2484 return nr; 2485 } 2486 2487 static int lbr_callchain_add_kernel_ip(struct thread *thread, 2488 struct callchain_cursor *cursor, 2489 struct perf_sample *sample, 2490 struct symbol **parent, 2491 struct addr_location *root_al, 2492 u64 branch_from, 2493 bool callee, int end) 2494 { 2495 struct ip_callchain *chain = sample->callchain; 2496 u8 cpumode = PERF_RECORD_MISC_USER; 2497 int err, i; 2498 2499 if (callee) { 2500 for (i = 0; i < end + 1; i++) { 2501 err = add_callchain_ip(thread, cursor, parent, 2502 root_al, &cpumode, chain->ips[i], 2503 false, NULL, NULL, branch_from); 2504 if (err) 2505 return err; 2506 } 2507 return 0; 2508 } 2509 2510 for (i = end; i >= 0; i--) { 2511 err = add_callchain_ip(thread, cursor, parent, 2512 root_al, &cpumode, chain->ips[i], 2513 false, NULL, NULL, branch_from); 2514 if (err) 2515 return err; 2516 } 2517 2518 return 0; 2519 } 2520 2521 static void save_lbr_cursor_node(struct thread *thread, 2522 struct callchain_cursor *cursor, 2523 int idx) 2524 { 2525 struct lbr_stitch *lbr_stitch = thread__lbr_stitch(thread); 2526 2527 if (!lbr_stitch) 2528 return; 2529 2530 if (cursor->pos == cursor->nr) { 2531 lbr_stitch->prev_lbr_cursor[idx].valid = false; 2532 return; 2533 } 2534 2535 if (!cursor->curr) 2536 cursor->curr = cursor->first; 2537 else 2538 cursor->curr = cursor->curr->next; 2539 2540 map_symbol__exit(&lbr_stitch->prev_lbr_cursor[idx].ms); 2541 memcpy(&lbr_stitch->prev_lbr_cursor[idx], cursor->curr, 2542 sizeof(struct callchain_cursor_node)); 2543 lbr_stitch->prev_lbr_cursor[idx].ms.maps = maps__get(cursor->curr->ms.maps); 2544 lbr_stitch->prev_lbr_cursor[idx].ms.map = map__get(cursor->curr->ms.map); 2545 2546 lbr_stitch->prev_lbr_cursor[idx].valid = true; 2547 cursor->pos++; 2548 } 2549 2550 static int lbr_callchain_add_lbr_ip(struct thread *thread, 2551 struct callchain_cursor *cursor, 2552 struct perf_sample *sample, 2553 struct symbol **parent, 2554 struct addr_location *root_al, 2555 u64 *branch_from, 2556 bool callee) 2557 { 2558 struct branch_stack *lbr_stack = sample->branch_stack; 2559 struct branch_entry *entries = perf_sample__branch_entries(sample); 2560 u8 cpumode = PERF_RECORD_MISC_USER; 2561 int lbr_nr = lbr_stack->nr; 2562 struct branch_flags *flags; 2563 int err, i; 2564 u64 ip; 2565 2566 /* 2567 * The curr and pos are not used in writing session. They are cleared 2568 * in callchain_cursor_commit() when the writing session is closed. 2569 * Using curr and pos to track the current cursor node. 2570 */ 2571 if (thread__lbr_stitch(thread)) { 2572 cursor->curr = NULL; 2573 cursor->pos = cursor->nr; 2574 if (cursor->nr) { 2575 cursor->curr = cursor->first; 2576 for (i = 0; i < (int)(cursor->nr - 1); i++) 2577 cursor->curr = cursor->curr->next; 2578 } 2579 } 2580 2581 if (callee) { 2582 /* Add LBR ip from first entries.to */ 2583 ip = entries[0].to; 2584 flags = &entries[0].flags; 2585 *branch_from = entries[0].from; 2586 err = add_callchain_ip(thread, cursor, parent, 2587 root_al, &cpumode, ip, 2588 true, flags, NULL, 2589 *branch_from); 2590 if (err) 2591 return err; 2592 2593 /* 2594 * The number of cursor node increases. 2595 * Move the current cursor node. 2596 * But does not need to save current cursor node for entry 0. 2597 * It's impossible to stitch the whole LBRs of previous sample. 2598 */ 2599 if (thread__lbr_stitch(thread) && (cursor->pos != cursor->nr)) { 2600 if (!cursor->curr) 2601 cursor->curr = cursor->first; 2602 else 2603 cursor->curr = cursor->curr->next; 2604 cursor->pos++; 2605 } 2606 2607 /* Add LBR ip from entries.from one by one. */ 2608 for (i = 0; i < lbr_nr; i++) { 2609 ip = entries[i].from; 2610 flags = &entries[i].flags; 2611 err = add_callchain_ip(thread, cursor, parent, 2612 root_al, &cpumode, ip, 2613 true, flags, NULL, 2614 *branch_from); 2615 if (err) 2616 return err; 2617 save_lbr_cursor_node(thread, cursor, i); 2618 } 2619 return 0; 2620 } 2621 2622 /* Add LBR ip from entries.from one by one. */ 2623 for (i = lbr_nr - 1; i >= 0; i--) { 2624 ip = entries[i].from; 2625 flags = &entries[i].flags; 2626 err = add_callchain_ip(thread, cursor, parent, 2627 root_al, &cpumode, ip, 2628 true, flags, NULL, 2629 *branch_from); 2630 if (err) 2631 return err; 2632 save_lbr_cursor_node(thread, cursor, i); 2633 } 2634 2635 if (lbr_nr > 0) { 2636 /* Add LBR ip from first entries.to */ 2637 ip = entries[0].to; 2638 flags = &entries[0].flags; 2639 *branch_from = entries[0].from; 2640 err = add_callchain_ip(thread, cursor, parent, 2641 root_al, &cpumode, ip, 2642 true, flags, NULL, 2643 *branch_from); 2644 if (err) 2645 return err; 2646 } 2647 2648 return 0; 2649 } 2650 2651 static int lbr_callchain_add_stitched_lbr_ip(struct thread *thread, 2652 struct callchain_cursor *cursor) 2653 { 2654 struct lbr_stitch *lbr_stitch = thread__lbr_stitch(thread); 2655 struct callchain_cursor_node *cnode; 2656 struct stitch_list *stitch_node; 2657 int err; 2658 2659 list_for_each_entry(stitch_node, &lbr_stitch->lists, node) { 2660 cnode = &stitch_node->cursor; 2661 2662 err = callchain_cursor_append(cursor, cnode->ip, 2663 &cnode->ms, 2664 cnode->branch, 2665 &cnode->branch_flags, 2666 cnode->nr_loop_iter, 2667 cnode->iter_cycles, 2668 cnode->branch_from, 2669 cnode->srcline); 2670 if (err) 2671 return err; 2672 } 2673 return 0; 2674 } 2675 2676 static struct stitch_list *get_stitch_node(struct thread *thread) 2677 { 2678 struct lbr_stitch *lbr_stitch = thread__lbr_stitch(thread); 2679 struct stitch_list *stitch_node; 2680 2681 if (!list_empty(&lbr_stitch->free_lists)) { 2682 stitch_node = list_first_entry(&lbr_stitch->free_lists, 2683 struct stitch_list, node); 2684 list_del(&stitch_node->node); 2685 2686 return stitch_node; 2687 } 2688 2689 return malloc(sizeof(struct stitch_list)); 2690 } 2691 2692 static bool has_stitched_lbr(struct thread *thread, 2693 struct perf_sample *cur, 2694 struct perf_sample *prev, 2695 unsigned int max_lbr, 2696 bool callee) 2697 { 2698 struct branch_stack *cur_stack = cur->branch_stack; 2699 struct branch_entry *cur_entries = perf_sample__branch_entries(cur); 2700 struct branch_stack *prev_stack = prev->branch_stack; 2701 struct branch_entry *prev_entries = perf_sample__branch_entries(prev); 2702 struct lbr_stitch *lbr_stitch = thread__lbr_stitch(thread); 2703 int i, j, nr_identical_branches = 0; 2704 struct stitch_list *stitch_node; 2705 u64 cur_base, distance; 2706 2707 if (!cur_stack || !prev_stack) 2708 return false; 2709 2710 /* Find the physical index of the base-of-stack for current sample. */ 2711 cur_base = max_lbr - cur_stack->nr + cur_stack->hw_idx + 1; 2712 2713 distance = (prev_stack->hw_idx > cur_base) ? (prev_stack->hw_idx - cur_base) : 2714 (max_lbr + prev_stack->hw_idx - cur_base); 2715 /* Previous sample has shorter stack. Nothing can be stitched. */ 2716 if (distance + 1 > prev_stack->nr) 2717 return false; 2718 2719 /* 2720 * Check if there are identical LBRs between two samples. 2721 * Identical LBRs must have same from, to and flags values. Also, 2722 * they have to be saved in the same LBR registers (same physical 2723 * index). 2724 * 2725 * Starts from the base-of-stack of current sample. 2726 */ 2727 for (i = distance, j = cur_stack->nr - 1; (i >= 0) && (j >= 0); i--, j--) { 2728 if ((prev_entries[i].from != cur_entries[j].from) || 2729 (prev_entries[i].to != cur_entries[j].to) || 2730 (prev_entries[i].flags.value != cur_entries[j].flags.value)) 2731 break; 2732 nr_identical_branches++; 2733 } 2734 2735 if (!nr_identical_branches) 2736 return false; 2737 2738 /* 2739 * Save the LBRs between the base-of-stack of previous sample 2740 * and the base-of-stack of current sample into lbr_stitch->lists. 2741 * These LBRs will be stitched later. 2742 */ 2743 for (i = prev_stack->nr - 1; i > (int)distance; i--) { 2744 2745 if (!lbr_stitch->prev_lbr_cursor[i].valid) 2746 continue; 2747 2748 stitch_node = get_stitch_node(thread); 2749 if (!stitch_node) 2750 return false; 2751 2752 memcpy(&stitch_node->cursor, &lbr_stitch->prev_lbr_cursor[i], 2753 sizeof(struct callchain_cursor_node)); 2754 2755 stitch_node->cursor.ms.maps = maps__get(lbr_stitch->prev_lbr_cursor[i].ms.maps); 2756 stitch_node->cursor.ms.map = map__get(lbr_stitch->prev_lbr_cursor[i].ms.map); 2757 2758 if (callee) 2759 list_add(&stitch_node->node, &lbr_stitch->lists); 2760 else 2761 list_add_tail(&stitch_node->node, &lbr_stitch->lists); 2762 } 2763 2764 return true; 2765 } 2766 2767 static bool alloc_lbr_stitch(struct thread *thread, unsigned int max_lbr) 2768 { 2769 if (thread__lbr_stitch(thread)) 2770 return true; 2771 2772 thread__set_lbr_stitch(thread, zalloc(sizeof(struct lbr_stitch))); 2773 if (!thread__lbr_stitch(thread)) 2774 goto err; 2775 2776 thread__lbr_stitch(thread)->prev_lbr_cursor = 2777 calloc(max_lbr + 1, sizeof(struct callchain_cursor_node)); 2778 if (!thread__lbr_stitch(thread)->prev_lbr_cursor) 2779 goto free_lbr_stitch; 2780 2781 thread__lbr_stitch(thread)->prev_lbr_cursor_size = max_lbr + 1; 2782 2783 INIT_LIST_HEAD(&thread__lbr_stitch(thread)->lists); 2784 INIT_LIST_HEAD(&thread__lbr_stitch(thread)->free_lists); 2785 2786 return true; 2787 2788 free_lbr_stitch: 2789 free(thread__lbr_stitch(thread)); 2790 thread__set_lbr_stitch(thread, NULL); 2791 err: 2792 pr_warning("Failed to allocate space for stitched LBRs. Disable LBR stitch\n"); 2793 thread__set_lbr_stitch_enable(thread, false); 2794 return false; 2795 } 2796 2797 /* 2798 * Resolve LBR callstack chain sample 2799 * Return: 2800 * 1 on success get LBR callchain information 2801 * 0 no available LBR callchain information, should try fp 2802 * negative error code on other errors. 2803 */ 2804 static int resolve_lbr_callchain_sample(struct thread *thread, 2805 struct callchain_cursor *cursor, 2806 struct perf_sample *sample, 2807 struct symbol **parent, 2808 struct addr_location *root_al, 2809 int max_stack, 2810 unsigned int max_lbr) 2811 { 2812 bool callee = (callchain_param.order == ORDER_CALLEE); 2813 struct ip_callchain *chain = sample->callchain; 2814 int chain_nr = min(max_stack, (int)chain->nr), i; 2815 struct lbr_stitch *lbr_stitch; 2816 bool stitched_lbr = false; 2817 u64 branch_from = 0; 2818 int err; 2819 2820 for (i = 0; i < chain_nr; i++) { 2821 if (chain->ips[i] == PERF_CONTEXT_USER) 2822 break; 2823 } 2824 2825 /* LBR only affects the user callchain */ 2826 if (i == chain_nr) 2827 return 0; 2828 2829 if (thread__lbr_stitch_enable(thread) && !sample->no_hw_idx && 2830 (max_lbr > 0) && alloc_lbr_stitch(thread, max_lbr)) { 2831 lbr_stitch = thread__lbr_stitch(thread); 2832 2833 stitched_lbr = has_stitched_lbr(thread, sample, 2834 &lbr_stitch->prev_sample, 2835 max_lbr, callee); 2836 2837 if (!stitched_lbr && !list_empty(&lbr_stitch->lists)) { 2838 struct stitch_list *stitch_node; 2839 2840 list_for_each_entry(stitch_node, &lbr_stitch->lists, node) 2841 map_symbol__exit(&stitch_node->cursor.ms); 2842 2843 list_splice_init(&lbr_stitch->lists, &lbr_stitch->free_lists); 2844 } 2845 memcpy(&lbr_stitch->prev_sample, sample, sizeof(*sample)); 2846 } 2847 2848 if (callee) { 2849 /* Add kernel ip */ 2850 err = lbr_callchain_add_kernel_ip(thread, cursor, sample, 2851 parent, root_al, branch_from, 2852 true, i); 2853 if (err) 2854 goto error; 2855 2856 err = lbr_callchain_add_lbr_ip(thread, cursor, sample, parent, 2857 root_al, &branch_from, true); 2858 if (err) 2859 goto error; 2860 2861 if (stitched_lbr) { 2862 err = lbr_callchain_add_stitched_lbr_ip(thread, cursor); 2863 if (err) 2864 goto error; 2865 } 2866 2867 } else { 2868 if (stitched_lbr) { 2869 err = lbr_callchain_add_stitched_lbr_ip(thread, cursor); 2870 if (err) 2871 goto error; 2872 } 2873 err = lbr_callchain_add_lbr_ip(thread, cursor, sample, parent, 2874 root_al, &branch_from, false); 2875 if (err) 2876 goto error; 2877 2878 /* Add kernel ip */ 2879 err = lbr_callchain_add_kernel_ip(thread, cursor, sample, 2880 parent, root_al, branch_from, 2881 false, i); 2882 if (err) 2883 goto error; 2884 } 2885 return 1; 2886 2887 error: 2888 return (err < 0) ? err : 0; 2889 } 2890 2891 static int find_prev_cpumode(struct ip_callchain *chain, struct thread *thread, 2892 struct callchain_cursor *cursor, 2893 struct symbol **parent, 2894 struct addr_location *root_al, 2895 u8 *cpumode, int ent) 2896 { 2897 int err = 0; 2898 2899 while (--ent >= 0) { 2900 u64 ip = chain->ips[ent]; 2901 2902 if (ip >= PERF_CONTEXT_MAX) { 2903 err = add_callchain_ip(thread, cursor, parent, 2904 root_al, cpumode, ip, 2905 false, NULL, NULL, 0); 2906 break; 2907 } 2908 } 2909 return err; 2910 } 2911 2912 static u64 get_leaf_frame_caller(struct perf_sample *sample, 2913 struct thread *thread, int usr_idx) 2914 { 2915 if (machine__normalized_is(maps__machine(thread__maps(thread)), "arm64")) 2916 return get_leaf_frame_caller_aarch64(sample, thread, usr_idx); 2917 else 2918 return 0; 2919 } 2920 2921 static int thread__resolve_callchain_sample(struct thread *thread, 2922 struct callchain_cursor *cursor, 2923 struct evsel *evsel, 2924 struct perf_sample *sample, 2925 struct symbol **parent, 2926 struct addr_location *root_al, 2927 int max_stack) 2928 { 2929 struct branch_stack *branch = sample->branch_stack; 2930 struct branch_entry *entries = perf_sample__branch_entries(sample); 2931 struct ip_callchain *chain = sample->callchain; 2932 int chain_nr = 0; 2933 u8 cpumode = PERF_RECORD_MISC_USER; 2934 int i, j, err, nr_entries, usr_idx; 2935 int skip_idx = -1; 2936 int first_call = 0; 2937 u64 leaf_frame_caller; 2938 2939 if (chain) 2940 chain_nr = chain->nr; 2941 2942 if (evsel__has_branch_callstack(evsel)) { 2943 struct perf_env *env = evsel__env(evsel); 2944 2945 err = resolve_lbr_callchain_sample(thread, cursor, sample, parent, 2946 root_al, max_stack, 2947 !env ? 0 : env->max_branches); 2948 if (err) 2949 return (err < 0) ? err : 0; 2950 } 2951 2952 /* 2953 * Based on DWARF debug information, some architectures skip 2954 * a callchain entry saved by the kernel. 2955 */ 2956 skip_idx = arch_skip_callchain_idx(thread, chain); 2957 2958 /* 2959 * Add branches to call stack for easier browsing. This gives 2960 * more context for a sample than just the callers. 2961 * 2962 * This uses individual histograms of paths compared to the 2963 * aggregated histograms the normal LBR mode uses. 2964 * 2965 * Limitations for now: 2966 * - No extra filters 2967 * - No annotations (should annotate somehow) 2968 */ 2969 2970 if (branch && callchain_param.branch_callstack) { 2971 int nr = min(max_stack, (int)branch->nr); 2972 struct branch_entry be[nr]; 2973 struct iterations iter[nr]; 2974 2975 if (branch->nr > PERF_MAX_BRANCH_DEPTH) { 2976 pr_warning("corrupted branch chain. skipping...\n"); 2977 goto check_calls; 2978 } 2979 2980 for (i = 0; i < nr; i++) { 2981 if (callchain_param.order == ORDER_CALLEE) { 2982 be[i] = entries[i]; 2983 2984 if (chain == NULL) 2985 continue; 2986 2987 /* 2988 * Check for overlap into the callchain. 2989 * The return address is one off compared to 2990 * the branch entry. To adjust for this 2991 * assume the calling instruction is not longer 2992 * than 8 bytes. 2993 */ 2994 if (i == skip_idx || 2995 chain->ips[first_call] >= PERF_CONTEXT_MAX) 2996 first_call++; 2997 else if (be[i].from < chain->ips[first_call] && 2998 be[i].from >= chain->ips[first_call] - 8) 2999 first_call++; 3000 } else 3001 be[i] = entries[branch->nr - i - 1]; 3002 } 3003 3004 memset(iter, 0, sizeof(struct iterations) * nr); 3005 nr = remove_loops(be, nr, iter); 3006 3007 for (i = 0; i < nr; i++) { 3008 err = add_callchain_ip(thread, cursor, parent, 3009 root_al, 3010 NULL, be[i].to, 3011 true, &be[i].flags, 3012 NULL, be[i].from); 3013 3014 if (!err) 3015 err = add_callchain_ip(thread, cursor, parent, root_al, 3016 NULL, be[i].from, 3017 true, &be[i].flags, 3018 &iter[i], 0); 3019 if (err == -EINVAL) 3020 break; 3021 if (err) 3022 return err; 3023 } 3024 3025 if (chain_nr == 0) 3026 return 0; 3027 3028 chain_nr -= nr; 3029 } 3030 3031 check_calls: 3032 if (chain && callchain_param.order != ORDER_CALLEE) { 3033 err = find_prev_cpumode(chain, thread, cursor, parent, root_al, 3034 &cpumode, chain->nr - first_call); 3035 if (err) 3036 return (err < 0) ? err : 0; 3037 } 3038 for (i = first_call, nr_entries = 0; 3039 i < chain_nr && nr_entries < max_stack; i++) { 3040 u64 ip; 3041 3042 if (callchain_param.order == ORDER_CALLEE) 3043 j = i; 3044 else 3045 j = chain->nr - i - 1; 3046 3047 #ifdef HAVE_SKIP_CALLCHAIN_IDX 3048 if (j == skip_idx) 3049 continue; 3050 #endif 3051 ip = chain->ips[j]; 3052 if (ip < PERF_CONTEXT_MAX) 3053 ++nr_entries; 3054 else if (callchain_param.order != ORDER_CALLEE) { 3055 err = find_prev_cpumode(chain, thread, cursor, parent, 3056 root_al, &cpumode, j); 3057 if (err) 3058 return (err < 0) ? err : 0; 3059 continue; 3060 } 3061 3062 /* 3063 * PERF_CONTEXT_USER allows us to locate where the user stack ends. 3064 * Depending on callchain_param.order and the position of PERF_CONTEXT_USER, 3065 * the index will be different in order to add the missing frame 3066 * at the right place. 3067 */ 3068 3069 usr_idx = callchain_param.order == ORDER_CALLEE ? j-2 : j-1; 3070 3071 if (usr_idx >= 0 && chain->ips[usr_idx] == PERF_CONTEXT_USER) { 3072 3073 leaf_frame_caller = get_leaf_frame_caller(sample, thread, usr_idx); 3074 3075 /* 3076 * check if leaf_frame_Caller != ip to not add the same 3077 * value twice. 3078 */ 3079 3080 if (leaf_frame_caller && leaf_frame_caller != ip) { 3081 3082 err = add_callchain_ip(thread, cursor, parent, 3083 root_al, &cpumode, leaf_frame_caller, 3084 false, NULL, NULL, 0); 3085 if (err) 3086 return (err < 0) ? err : 0; 3087 } 3088 } 3089 3090 err = add_callchain_ip(thread, cursor, parent, 3091 root_al, &cpumode, ip, 3092 false, NULL, NULL, 0); 3093 3094 if (err) 3095 return (err < 0) ? err : 0; 3096 } 3097 3098 return 0; 3099 } 3100 3101 static int append_inlines(struct callchain_cursor *cursor, struct map_symbol *ms, u64 ip) 3102 { 3103 struct symbol *sym = ms->sym; 3104 struct map *map = ms->map; 3105 struct inline_node *inline_node; 3106 struct inline_list *ilist; 3107 struct dso *dso; 3108 u64 addr; 3109 int ret = 1; 3110 struct map_symbol ilist_ms; 3111 3112 if (!symbol_conf.inline_name || !map || !sym) 3113 return ret; 3114 3115 addr = map__dso_map_ip(map, ip); 3116 addr = map__rip_2objdump(map, addr); 3117 dso = map__dso(map); 3118 3119 inline_node = inlines__tree_find(&dso->inlined_nodes, addr); 3120 if (!inline_node) { 3121 inline_node = dso__parse_addr_inlines(dso, addr, sym); 3122 if (!inline_node) 3123 return ret; 3124 inlines__tree_insert(&dso->inlined_nodes, inline_node); 3125 } 3126 3127 ilist_ms = (struct map_symbol) { 3128 .maps = maps__get(ms->maps), 3129 .map = map__get(map), 3130 }; 3131 list_for_each_entry(ilist, &inline_node->val, list) { 3132 ilist_ms.sym = ilist->symbol; 3133 ret = callchain_cursor_append(cursor, ip, &ilist_ms, false, 3134 NULL, 0, 0, 0, ilist->srcline); 3135 3136 if (ret != 0) 3137 return ret; 3138 } 3139 map__put(ilist_ms.map); 3140 maps__put(ilist_ms.maps); 3141 3142 return ret; 3143 } 3144 3145 static int unwind_entry(struct unwind_entry *entry, void *arg) 3146 { 3147 struct callchain_cursor *cursor = arg; 3148 const char *srcline = NULL; 3149 u64 addr = entry->ip; 3150 3151 if (symbol_conf.hide_unresolved && entry->ms.sym == NULL) 3152 return 0; 3153 3154 if (append_inlines(cursor, &entry->ms, entry->ip) == 0) 3155 return 0; 3156 3157 /* 3158 * Convert entry->ip from a virtual address to an offset in 3159 * its corresponding binary. 3160 */ 3161 if (entry->ms.map) 3162 addr = map__dso_map_ip(entry->ms.map, entry->ip); 3163 3164 srcline = callchain_srcline(&entry->ms, addr); 3165 return callchain_cursor_append(cursor, entry->ip, &entry->ms, 3166 false, NULL, 0, 0, 0, srcline); 3167 } 3168 3169 static int thread__resolve_callchain_unwind(struct thread *thread, 3170 struct callchain_cursor *cursor, 3171 struct evsel *evsel, 3172 struct perf_sample *sample, 3173 int max_stack) 3174 { 3175 /* Can we do dwarf post unwind? */ 3176 if (!((evsel->core.attr.sample_type & PERF_SAMPLE_REGS_USER) && 3177 (evsel->core.attr.sample_type & PERF_SAMPLE_STACK_USER))) 3178 return 0; 3179 3180 /* Bail out if nothing was captured. */ 3181 if ((!sample->user_regs.regs) || 3182 (!sample->user_stack.size)) 3183 return 0; 3184 3185 return unwind__get_entries(unwind_entry, cursor, 3186 thread, sample, max_stack, false); 3187 } 3188 3189 int thread__resolve_callchain(struct thread *thread, 3190 struct callchain_cursor *cursor, 3191 struct evsel *evsel, 3192 struct perf_sample *sample, 3193 struct symbol **parent, 3194 struct addr_location *root_al, 3195 int max_stack) 3196 { 3197 int ret = 0; 3198 3199 if (cursor == NULL) 3200 return -ENOMEM; 3201 3202 callchain_cursor_reset(cursor); 3203 3204 if (callchain_param.order == ORDER_CALLEE) { 3205 ret = thread__resolve_callchain_sample(thread, cursor, 3206 evsel, sample, 3207 parent, root_al, 3208 max_stack); 3209 if (ret) 3210 return ret; 3211 ret = thread__resolve_callchain_unwind(thread, cursor, 3212 evsel, sample, 3213 max_stack); 3214 } else { 3215 ret = thread__resolve_callchain_unwind(thread, cursor, 3216 evsel, sample, 3217 max_stack); 3218 if (ret) 3219 return ret; 3220 ret = thread__resolve_callchain_sample(thread, cursor, 3221 evsel, sample, 3222 parent, root_al, 3223 max_stack); 3224 } 3225 3226 return ret; 3227 } 3228 3229 int machine__for_each_thread(struct machine *machine, 3230 int (*fn)(struct thread *thread, void *p), 3231 void *priv) 3232 { 3233 struct threads *threads; 3234 struct rb_node *nd; 3235 int rc = 0; 3236 int i; 3237 3238 for (i = 0; i < THREADS__TABLE_SIZE; i++) { 3239 threads = &machine->threads[i]; 3240 for (nd = rb_first_cached(&threads->entries); nd; 3241 nd = rb_next(nd)) { 3242 struct thread_rb_node *trb = rb_entry(nd, struct thread_rb_node, rb_node); 3243 3244 rc = fn(trb->thread, priv); 3245 if (rc != 0) 3246 return rc; 3247 } 3248 } 3249 return rc; 3250 } 3251 3252 int machines__for_each_thread(struct machines *machines, 3253 int (*fn)(struct thread *thread, void *p), 3254 void *priv) 3255 { 3256 struct rb_node *nd; 3257 int rc = 0; 3258 3259 rc = machine__for_each_thread(&machines->host, fn, priv); 3260 if (rc != 0) 3261 return rc; 3262 3263 for (nd = rb_first_cached(&machines->guests); nd; nd = rb_next(nd)) { 3264 struct machine *machine = rb_entry(nd, struct machine, rb_node); 3265 3266 rc = machine__for_each_thread(machine, fn, priv); 3267 if (rc != 0) 3268 return rc; 3269 } 3270 return rc; 3271 } 3272 3273 pid_t machine__get_current_tid(struct machine *machine, int cpu) 3274 { 3275 if (cpu < 0 || (size_t)cpu >= machine->current_tid_sz) 3276 return -1; 3277 3278 return machine->current_tid[cpu]; 3279 } 3280 3281 int machine__set_current_tid(struct machine *machine, int cpu, pid_t pid, 3282 pid_t tid) 3283 { 3284 struct thread *thread; 3285 const pid_t init_val = -1; 3286 3287 if (cpu < 0) 3288 return -EINVAL; 3289 3290 if (realloc_array_as_needed(machine->current_tid, 3291 machine->current_tid_sz, 3292 (unsigned int)cpu, 3293 &init_val)) 3294 return -ENOMEM; 3295 3296 machine->current_tid[cpu] = tid; 3297 3298 thread = machine__findnew_thread(machine, pid, tid); 3299 if (!thread) 3300 return -ENOMEM; 3301 3302 thread__set_cpu(thread, cpu); 3303 thread__put(thread); 3304 3305 return 0; 3306 } 3307 3308 /* 3309 * Compares the raw arch string. N.B. see instead perf_env__arch() or 3310 * machine__normalized_is() if a normalized arch is needed. 3311 */ 3312 bool machine__is(struct machine *machine, const char *arch) 3313 { 3314 return machine && !strcmp(perf_env__raw_arch(machine->env), arch); 3315 } 3316 3317 bool machine__normalized_is(struct machine *machine, const char *arch) 3318 { 3319 return machine && !strcmp(perf_env__arch(machine->env), arch); 3320 } 3321 3322 int machine__nr_cpus_avail(struct machine *machine) 3323 { 3324 return machine ? perf_env__nr_cpus_avail(machine->env) : 0; 3325 } 3326 3327 int machine__get_kernel_start(struct machine *machine) 3328 { 3329 struct map *map = machine__kernel_map(machine); 3330 int err = 0; 3331 3332 /* 3333 * The only addresses above 2^63 are kernel addresses of a 64-bit 3334 * kernel. Note that addresses are unsigned so that on a 32-bit system 3335 * all addresses including kernel addresses are less than 2^32. In 3336 * that case (32-bit system), if the kernel mapping is unknown, all 3337 * addresses will be assumed to be in user space - see 3338 * machine__kernel_ip(). 3339 */ 3340 machine->kernel_start = 1ULL << 63; 3341 if (map) { 3342 err = map__load(map); 3343 /* 3344 * On x86_64, PTI entry trampolines are less than the 3345 * start of kernel text, but still above 2^63. So leave 3346 * kernel_start = 1ULL << 63 for x86_64. 3347 */ 3348 if (!err && !machine__is(machine, "x86_64")) 3349 machine->kernel_start = map__start(map); 3350 } 3351 return err; 3352 } 3353 3354 u8 machine__addr_cpumode(struct machine *machine, u8 cpumode, u64 addr) 3355 { 3356 u8 addr_cpumode = cpumode; 3357 bool kernel_ip; 3358 3359 if (!machine->single_address_space) 3360 goto out; 3361 3362 kernel_ip = machine__kernel_ip(machine, addr); 3363 switch (cpumode) { 3364 case PERF_RECORD_MISC_KERNEL: 3365 case PERF_RECORD_MISC_USER: 3366 addr_cpumode = kernel_ip ? PERF_RECORD_MISC_KERNEL : 3367 PERF_RECORD_MISC_USER; 3368 break; 3369 case PERF_RECORD_MISC_GUEST_KERNEL: 3370 case PERF_RECORD_MISC_GUEST_USER: 3371 addr_cpumode = kernel_ip ? PERF_RECORD_MISC_GUEST_KERNEL : 3372 PERF_RECORD_MISC_GUEST_USER; 3373 break; 3374 default: 3375 break; 3376 } 3377 out: 3378 return addr_cpumode; 3379 } 3380 3381 struct dso *machine__findnew_dso_id(struct machine *machine, const char *filename, struct dso_id *id) 3382 { 3383 return dsos__findnew_id(&machine->dsos, filename, id); 3384 } 3385 3386 struct dso *machine__findnew_dso(struct machine *machine, const char *filename) 3387 { 3388 return machine__findnew_dso_id(machine, filename, NULL); 3389 } 3390 3391 char *machine__resolve_kernel_addr(void *vmachine, unsigned long long *addrp, char **modp) 3392 { 3393 struct machine *machine = vmachine; 3394 struct map *map; 3395 struct symbol *sym = machine__find_kernel_symbol(machine, *addrp, &map); 3396 3397 if (sym == NULL) 3398 return NULL; 3399 3400 *modp = __map__is_kmodule(map) ? (char *)map__dso(map)->short_name : NULL; 3401 *addrp = map__unmap_ip(map, sym->start); 3402 return sym->name; 3403 } 3404 3405 int machine__for_each_dso(struct machine *machine, machine__dso_t fn, void *priv) 3406 { 3407 struct dso *pos; 3408 int err = 0; 3409 3410 list_for_each_entry(pos, &machine->dsos.head, node) { 3411 if (fn(pos, machine, priv)) 3412 err = -1; 3413 } 3414 return err; 3415 } 3416 3417 int machine__for_each_kernel_map(struct machine *machine, machine__map_t fn, void *priv) 3418 { 3419 struct maps *maps = machine__kernel_maps(machine); 3420 struct map_rb_node *pos; 3421 int err = 0; 3422 3423 maps__for_each_entry(maps, pos) { 3424 err = fn(pos->map, priv); 3425 if (err != 0) { 3426 break; 3427 } 3428 } 3429 return err; 3430 } 3431 3432 bool machine__is_lock_function(struct machine *machine, u64 addr) 3433 { 3434 if (!machine->sched.text_start) { 3435 struct map *kmap; 3436 struct symbol *sym = machine__find_kernel_symbol_by_name(machine, "__sched_text_start", &kmap); 3437 3438 if (!sym) { 3439 /* to avoid retry */ 3440 machine->sched.text_start = 1; 3441 return false; 3442 } 3443 3444 machine->sched.text_start = map__unmap_ip(kmap, sym->start); 3445 3446 /* should not fail from here */ 3447 sym = machine__find_kernel_symbol_by_name(machine, "__sched_text_end", &kmap); 3448 machine->sched.text_end = map__unmap_ip(kmap, sym->start); 3449 3450 sym = machine__find_kernel_symbol_by_name(machine, "__lock_text_start", &kmap); 3451 machine->lock.text_start = map__unmap_ip(kmap, sym->start); 3452 3453 sym = machine__find_kernel_symbol_by_name(machine, "__lock_text_end", &kmap); 3454 machine->lock.text_end = map__unmap_ip(kmap, sym->start); 3455 } 3456 3457 /* failed to get kernel symbols */ 3458 if (machine->sched.text_start == 1) 3459 return false; 3460 3461 /* mutex and rwsem functions are in sched text section */ 3462 if (machine->sched.text_start <= addr && addr < machine->sched.text_end) 3463 return true; 3464 3465 /* spinlock functions are in lock text section */ 3466 if (machine->lock.text_start <= addr && addr < machine->lock.text_end) 3467 return true; 3468 3469 return false; 3470 } 3471