1 // SPDX-License-Identifier: GPL-2.0 2 #include <dirent.h> 3 #include <errno.h> 4 #include <inttypes.h> 5 #include <regex.h> 6 #include "callchain.h" 7 #include "debug.h" 8 #include "event.h" 9 #include "evsel.h" 10 #include "hist.h" 11 #include "machine.h" 12 #include "map.h" 13 #include "sort.h" 14 #include "strlist.h" 15 #include "thread.h" 16 #include "vdso.h" 17 #include <stdbool.h> 18 #include <sys/types.h> 19 #include <sys/stat.h> 20 #include <unistd.h> 21 #include "unwind.h" 22 #include "linux/hash.h" 23 #include "asm/bug.h" 24 25 #include "sane_ctype.h" 26 #include <symbol/kallsyms.h> 27 28 static void __machine__remove_thread(struct machine *machine, struct thread *th, bool lock); 29 30 static void dsos__init(struct dsos *dsos) 31 { 32 INIT_LIST_HEAD(&dsos->head); 33 dsos->root = RB_ROOT; 34 init_rwsem(&dsos->lock); 35 } 36 37 static void machine__threads_init(struct machine *machine) 38 { 39 int i; 40 41 for (i = 0; i < THREADS__TABLE_SIZE; i++) { 42 struct threads *threads = &machine->threads[i]; 43 threads->entries = RB_ROOT; 44 init_rwsem(&threads->lock); 45 threads->nr = 0; 46 INIT_LIST_HEAD(&threads->dead); 47 threads->last_match = NULL; 48 } 49 } 50 51 int machine__init(struct machine *machine, const char *root_dir, pid_t pid) 52 { 53 memset(machine, 0, sizeof(*machine)); 54 map_groups__init(&machine->kmaps, machine); 55 RB_CLEAR_NODE(&machine->rb_node); 56 dsos__init(&machine->dsos); 57 58 machine__threads_init(machine); 59 60 machine->vdso_info = NULL; 61 machine->env = NULL; 62 63 machine->pid = pid; 64 65 machine->id_hdr_size = 0; 66 machine->kptr_restrict_warned = false; 67 machine->comm_exec = false; 68 machine->kernel_start = 0; 69 70 memset(machine->vmlinux_maps, 0, sizeof(machine->vmlinux_maps)); 71 72 machine->root_dir = strdup(root_dir); 73 if (machine->root_dir == NULL) 74 return -ENOMEM; 75 76 if (pid != HOST_KERNEL_ID) { 77 struct thread *thread = machine__findnew_thread(machine, -1, 78 pid); 79 char comm[64]; 80 81 if (thread == NULL) 82 return -ENOMEM; 83 84 snprintf(comm, sizeof(comm), "[guest/%d]", pid); 85 thread__set_comm(thread, comm, 0); 86 thread__put(thread); 87 } 88 89 machine->current_tid = NULL; 90 91 return 0; 92 } 93 94 struct machine *machine__new_host(void) 95 { 96 struct machine *machine = malloc(sizeof(*machine)); 97 98 if (machine != NULL) { 99 machine__init(machine, "", HOST_KERNEL_ID); 100 101 if (machine__create_kernel_maps(machine) < 0) 102 goto out_delete; 103 } 104 105 return machine; 106 out_delete: 107 free(machine); 108 return NULL; 109 } 110 111 struct machine *machine__new_kallsyms(void) 112 { 113 struct machine *machine = machine__new_host(); 114 /* 115 * FIXME: 116 * 1) MAP__FUNCTION will go away when we stop loading separate maps for 117 * functions and data objects. 118 * 2) We should switch to machine__load_kallsyms(), i.e. not explicitely 119 * ask for not using the kcore parsing code, once this one is fixed 120 * to create a map per module. 121 */ 122 if (machine && __machine__load_kallsyms(machine, "/proc/kallsyms", MAP__FUNCTION, true) <= 0) { 123 machine__delete(machine); 124 machine = NULL; 125 } 126 127 return machine; 128 } 129 130 static void dsos__purge(struct dsos *dsos) 131 { 132 struct dso *pos, *n; 133 134 down_write(&dsos->lock); 135 136 list_for_each_entry_safe(pos, n, &dsos->head, node) { 137 RB_CLEAR_NODE(&pos->rb_node); 138 pos->root = NULL; 139 list_del_init(&pos->node); 140 dso__put(pos); 141 } 142 143 up_write(&dsos->lock); 144 } 145 146 static void dsos__exit(struct dsos *dsos) 147 { 148 dsos__purge(dsos); 149 exit_rwsem(&dsos->lock); 150 } 151 152 void machine__delete_threads(struct machine *machine) 153 { 154 struct rb_node *nd; 155 int i; 156 157 for (i = 0; i < THREADS__TABLE_SIZE; i++) { 158 struct threads *threads = &machine->threads[i]; 159 down_write(&threads->lock); 160 nd = rb_first(&threads->entries); 161 while (nd) { 162 struct thread *t = rb_entry(nd, struct thread, rb_node); 163 164 nd = rb_next(nd); 165 __machine__remove_thread(machine, t, false); 166 } 167 up_write(&threads->lock); 168 } 169 } 170 171 void machine__exit(struct machine *machine) 172 { 173 int i; 174 175 machine__destroy_kernel_maps(machine); 176 map_groups__exit(&machine->kmaps); 177 dsos__exit(&machine->dsos); 178 machine__exit_vdso(machine); 179 zfree(&machine->root_dir); 180 zfree(&machine->current_tid); 181 182 for (i = 0; i < THREADS__TABLE_SIZE; i++) { 183 struct threads *threads = &machine->threads[i]; 184 exit_rwsem(&threads->lock); 185 } 186 } 187 188 void machine__delete(struct machine *machine) 189 { 190 if (machine) { 191 machine__exit(machine); 192 free(machine); 193 } 194 } 195 196 void machines__init(struct machines *machines) 197 { 198 machine__init(&machines->host, "", HOST_KERNEL_ID); 199 machines->guests = RB_ROOT; 200 } 201 202 void machines__exit(struct machines *machines) 203 { 204 machine__exit(&machines->host); 205 /* XXX exit guest */ 206 } 207 208 struct machine *machines__add(struct machines *machines, pid_t pid, 209 const char *root_dir) 210 { 211 struct rb_node **p = &machines->guests.rb_node; 212 struct rb_node *parent = NULL; 213 struct machine *pos, *machine = malloc(sizeof(*machine)); 214 215 if (machine == NULL) 216 return NULL; 217 218 if (machine__init(machine, root_dir, pid) != 0) { 219 free(machine); 220 return NULL; 221 } 222 223 while (*p != NULL) { 224 parent = *p; 225 pos = rb_entry(parent, struct machine, rb_node); 226 if (pid < pos->pid) 227 p = &(*p)->rb_left; 228 else 229 p = &(*p)->rb_right; 230 } 231 232 rb_link_node(&machine->rb_node, parent, p); 233 rb_insert_color(&machine->rb_node, &machines->guests); 234 235 return machine; 236 } 237 238 void machines__set_comm_exec(struct machines *machines, bool comm_exec) 239 { 240 struct rb_node *nd; 241 242 machines->host.comm_exec = comm_exec; 243 244 for (nd = rb_first(&machines->guests); nd; nd = rb_next(nd)) { 245 struct machine *machine = rb_entry(nd, struct machine, rb_node); 246 247 machine->comm_exec = comm_exec; 248 } 249 } 250 251 struct machine *machines__find(struct machines *machines, pid_t pid) 252 { 253 struct rb_node **p = &machines->guests.rb_node; 254 struct rb_node *parent = NULL; 255 struct machine *machine; 256 struct machine *default_machine = NULL; 257 258 if (pid == HOST_KERNEL_ID) 259 return &machines->host; 260 261 while (*p != NULL) { 262 parent = *p; 263 machine = rb_entry(parent, struct machine, rb_node); 264 if (pid < machine->pid) 265 p = &(*p)->rb_left; 266 else if (pid > machine->pid) 267 p = &(*p)->rb_right; 268 else 269 return machine; 270 if (!machine->pid) 271 default_machine = machine; 272 } 273 274 return default_machine; 275 } 276 277 struct machine *machines__findnew(struct machines *machines, pid_t pid) 278 { 279 char path[PATH_MAX]; 280 const char *root_dir = ""; 281 struct machine *machine = machines__find(machines, pid); 282 283 if (machine && (machine->pid == pid)) 284 goto out; 285 286 if ((pid != HOST_KERNEL_ID) && 287 (pid != DEFAULT_GUEST_KERNEL_ID) && 288 (symbol_conf.guestmount)) { 289 sprintf(path, "%s/%d", symbol_conf.guestmount, pid); 290 if (access(path, R_OK)) { 291 static struct strlist *seen; 292 293 if (!seen) 294 seen = strlist__new(NULL, NULL); 295 296 if (!strlist__has_entry(seen, path)) { 297 pr_err("Can't access file %s\n", path); 298 strlist__add(seen, path); 299 } 300 machine = NULL; 301 goto out; 302 } 303 root_dir = path; 304 } 305 306 machine = machines__add(machines, pid, root_dir); 307 out: 308 return machine; 309 } 310 311 void machines__process_guests(struct machines *machines, 312 machine__process_t process, void *data) 313 { 314 struct rb_node *nd; 315 316 for (nd = rb_first(&machines->guests); nd; nd = rb_next(nd)) { 317 struct machine *pos = rb_entry(nd, struct machine, rb_node); 318 process(pos, data); 319 } 320 } 321 322 char *machine__mmap_name(struct machine *machine, char *bf, size_t size) 323 { 324 if (machine__is_host(machine)) 325 snprintf(bf, size, "[%s]", "kernel.kallsyms"); 326 else if (machine__is_default_guest(machine)) 327 snprintf(bf, size, "[%s]", "guest.kernel.kallsyms"); 328 else { 329 snprintf(bf, size, "[%s.%d]", "guest.kernel.kallsyms", 330 machine->pid); 331 } 332 333 return bf; 334 } 335 336 void machines__set_id_hdr_size(struct machines *machines, u16 id_hdr_size) 337 { 338 struct rb_node *node; 339 struct machine *machine; 340 341 machines->host.id_hdr_size = id_hdr_size; 342 343 for (node = rb_first(&machines->guests); node; node = rb_next(node)) { 344 machine = rb_entry(node, struct machine, rb_node); 345 machine->id_hdr_size = id_hdr_size; 346 } 347 348 return; 349 } 350 351 static void machine__update_thread_pid(struct machine *machine, 352 struct thread *th, pid_t pid) 353 { 354 struct thread *leader; 355 356 if (pid == th->pid_ || pid == -1 || th->pid_ != -1) 357 return; 358 359 th->pid_ = pid; 360 361 if (th->pid_ == th->tid) 362 return; 363 364 leader = __machine__findnew_thread(machine, th->pid_, th->pid_); 365 if (!leader) 366 goto out_err; 367 368 if (!leader->mg) 369 leader->mg = map_groups__new(machine); 370 371 if (!leader->mg) 372 goto out_err; 373 374 if (th->mg == leader->mg) 375 return; 376 377 if (th->mg) { 378 /* 379 * Maps are created from MMAP events which provide the pid and 380 * tid. Consequently there never should be any maps on a thread 381 * with an unknown pid. Just print an error if there are. 382 */ 383 if (!map_groups__empty(th->mg)) 384 pr_err("Discarding thread maps for %d:%d\n", 385 th->pid_, th->tid); 386 map_groups__put(th->mg); 387 } 388 389 th->mg = map_groups__get(leader->mg); 390 out_put: 391 thread__put(leader); 392 return; 393 out_err: 394 pr_err("Failed to join map groups for %d:%d\n", th->pid_, th->tid); 395 goto out_put; 396 } 397 398 /* 399 * Caller must eventually drop thread->refcnt returned with a successful 400 * lookup/new thread inserted. 401 */ 402 static struct thread *____machine__findnew_thread(struct machine *machine, 403 struct threads *threads, 404 pid_t pid, pid_t tid, 405 bool create) 406 { 407 struct rb_node **p = &threads->entries.rb_node; 408 struct rb_node *parent = NULL; 409 struct thread *th; 410 411 /* 412 * Front-end cache - TID lookups come in blocks, 413 * so most of the time we dont have to look up 414 * the full rbtree: 415 */ 416 th = threads->last_match; 417 if (th != NULL) { 418 if (th->tid == tid) { 419 machine__update_thread_pid(machine, th, pid); 420 return thread__get(th); 421 } 422 423 threads->last_match = NULL; 424 } 425 426 while (*p != NULL) { 427 parent = *p; 428 th = rb_entry(parent, struct thread, rb_node); 429 430 if (th->tid == tid) { 431 threads->last_match = th; 432 machine__update_thread_pid(machine, th, pid); 433 return thread__get(th); 434 } 435 436 if (tid < th->tid) 437 p = &(*p)->rb_left; 438 else 439 p = &(*p)->rb_right; 440 } 441 442 if (!create) 443 return NULL; 444 445 th = thread__new(pid, tid); 446 if (th != NULL) { 447 rb_link_node(&th->rb_node, parent, p); 448 rb_insert_color(&th->rb_node, &threads->entries); 449 450 /* 451 * We have to initialize map_groups separately 452 * after rb tree is updated. 453 * 454 * The reason is that we call machine__findnew_thread 455 * within thread__init_map_groups to find the thread 456 * leader and that would screwed the rb tree. 457 */ 458 if (thread__init_map_groups(th, machine)) { 459 rb_erase_init(&th->rb_node, &threads->entries); 460 RB_CLEAR_NODE(&th->rb_node); 461 thread__put(th); 462 return NULL; 463 } 464 /* 465 * It is now in the rbtree, get a ref 466 */ 467 thread__get(th); 468 threads->last_match = th; 469 ++threads->nr; 470 } 471 472 return th; 473 } 474 475 struct thread *__machine__findnew_thread(struct machine *machine, pid_t pid, pid_t tid) 476 { 477 return ____machine__findnew_thread(machine, machine__threads(machine, tid), pid, tid, true); 478 } 479 480 struct thread *machine__findnew_thread(struct machine *machine, pid_t pid, 481 pid_t tid) 482 { 483 struct threads *threads = machine__threads(machine, tid); 484 struct thread *th; 485 486 down_write(&threads->lock); 487 th = __machine__findnew_thread(machine, pid, tid); 488 up_write(&threads->lock); 489 return th; 490 } 491 492 struct thread *machine__find_thread(struct machine *machine, pid_t pid, 493 pid_t tid) 494 { 495 struct threads *threads = machine__threads(machine, tid); 496 struct thread *th; 497 498 down_read(&threads->lock); 499 th = ____machine__findnew_thread(machine, threads, pid, tid, false); 500 up_read(&threads->lock); 501 return th; 502 } 503 504 struct comm *machine__thread_exec_comm(struct machine *machine, 505 struct thread *thread) 506 { 507 if (machine->comm_exec) 508 return thread__exec_comm(thread); 509 else 510 return thread__comm(thread); 511 } 512 513 int machine__process_comm_event(struct machine *machine, union perf_event *event, 514 struct perf_sample *sample) 515 { 516 struct thread *thread = machine__findnew_thread(machine, 517 event->comm.pid, 518 event->comm.tid); 519 bool exec = event->header.misc & PERF_RECORD_MISC_COMM_EXEC; 520 int err = 0; 521 522 if (exec) 523 machine->comm_exec = true; 524 525 if (dump_trace) 526 perf_event__fprintf_comm(event, stdout); 527 528 if (thread == NULL || 529 __thread__set_comm(thread, event->comm.comm, sample->time, exec)) { 530 dump_printf("problem processing PERF_RECORD_COMM, skipping event.\n"); 531 err = -1; 532 } 533 534 thread__put(thread); 535 536 return err; 537 } 538 539 int machine__process_namespaces_event(struct machine *machine __maybe_unused, 540 union perf_event *event, 541 struct perf_sample *sample __maybe_unused) 542 { 543 struct thread *thread = machine__findnew_thread(machine, 544 event->namespaces.pid, 545 event->namespaces.tid); 546 int err = 0; 547 548 WARN_ONCE(event->namespaces.nr_namespaces > NR_NAMESPACES, 549 "\nWARNING: kernel seems to support more namespaces than perf" 550 " tool.\nTry updating the perf tool..\n\n"); 551 552 WARN_ONCE(event->namespaces.nr_namespaces < NR_NAMESPACES, 553 "\nWARNING: perf tool seems to support more namespaces than" 554 " the kernel.\nTry updating the kernel..\n\n"); 555 556 if (dump_trace) 557 perf_event__fprintf_namespaces(event, stdout); 558 559 if (thread == NULL || 560 thread__set_namespaces(thread, sample->time, &event->namespaces)) { 561 dump_printf("problem processing PERF_RECORD_NAMESPACES, skipping event.\n"); 562 err = -1; 563 } 564 565 thread__put(thread); 566 567 return err; 568 } 569 570 int machine__process_lost_event(struct machine *machine __maybe_unused, 571 union perf_event *event, struct perf_sample *sample __maybe_unused) 572 { 573 dump_printf(": id:%" PRIu64 ": lost:%" PRIu64 "\n", 574 event->lost.id, event->lost.lost); 575 return 0; 576 } 577 578 int machine__process_lost_samples_event(struct machine *machine __maybe_unused, 579 union perf_event *event, struct perf_sample *sample) 580 { 581 dump_printf(": id:%" PRIu64 ": lost samples :%" PRIu64 "\n", 582 sample->id, event->lost_samples.lost); 583 return 0; 584 } 585 586 static struct dso *machine__findnew_module_dso(struct machine *machine, 587 struct kmod_path *m, 588 const char *filename) 589 { 590 struct dso *dso; 591 592 down_write(&machine->dsos.lock); 593 594 dso = __dsos__find(&machine->dsos, m->name, true); 595 if (!dso) { 596 dso = __dsos__addnew(&machine->dsos, m->name); 597 if (dso == NULL) 598 goto out_unlock; 599 600 dso__set_module_info(dso, m, machine); 601 dso__set_long_name(dso, strdup(filename), true); 602 } 603 604 dso__get(dso); 605 out_unlock: 606 up_write(&machine->dsos.lock); 607 return dso; 608 } 609 610 int machine__process_aux_event(struct machine *machine __maybe_unused, 611 union perf_event *event) 612 { 613 if (dump_trace) 614 perf_event__fprintf_aux(event, stdout); 615 return 0; 616 } 617 618 int machine__process_itrace_start_event(struct machine *machine __maybe_unused, 619 union perf_event *event) 620 { 621 if (dump_trace) 622 perf_event__fprintf_itrace_start(event, stdout); 623 return 0; 624 } 625 626 int machine__process_switch_event(struct machine *machine __maybe_unused, 627 union perf_event *event) 628 { 629 if (dump_trace) 630 perf_event__fprintf_switch(event, stdout); 631 return 0; 632 } 633 634 static void dso__adjust_kmod_long_name(struct dso *dso, const char *filename) 635 { 636 const char *dup_filename; 637 638 if (!filename || !dso || !dso->long_name) 639 return; 640 if (dso->long_name[0] != '[') 641 return; 642 if (!strchr(filename, '/')) 643 return; 644 645 dup_filename = strdup(filename); 646 if (!dup_filename) 647 return; 648 649 dso__set_long_name(dso, dup_filename, true); 650 } 651 652 struct map *machine__findnew_module_map(struct machine *machine, u64 start, 653 const char *filename) 654 { 655 struct map *map = NULL; 656 struct dso *dso = NULL; 657 struct kmod_path m; 658 659 if (kmod_path__parse_name(&m, filename)) 660 return NULL; 661 662 map = map_groups__find_by_name(&machine->kmaps, MAP__FUNCTION, 663 m.name); 664 if (map) { 665 /* 666 * If the map's dso is an offline module, give dso__load() 667 * a chance to find the file path of that module by fixing 668 * long_name. 669 */ 670 dso__adjust_kmod_long_name(map->dso, filename); 671 goto out; 672 } 673 674 dso = machine__findnew_module_dso(machine, &m, filename); 675 if (dso == NULL) 676 goto out; 677 678 map = map__new2(start, dso, MAP__FUNCTION); 679 if (map == NULL) 680 goto out; 681 682 map_groups__insert(&machine->kmaps, map); 683 684 /* Put the map here because map_groups__insert alread got it */ 685 map__put(map); 686 out: 687 /* put the dso here, corresponding to machine__findnew_module_dso */ 688 dso__put(dso); 689 free(m.name); 690 return map; 691 } 692 693 size_t machines__fprintf_dsos(struct machines *machines, FILE *fp) 694 { 695 struct rb_node *nd; 696 size_t ret = __dsos__fprintf(&machines->host.dsos.head, fp); 697 698 for (nd = rb_first(&machines->guests); nd; nd = rb_next(nd)) { 699 struct machine *pos = rb_entry(nd, struct machine, rb_node); 700 ret += __dsos__fprintf(&pos->dsos.head, fp); 701 } 702 703 return ret; 704 } 705 706 size_t machine__fprintf_dsos_buildid(struct machine *m, FILE *fp, 707 bool (skip)(struct dso *dso, int parm), int parm) 708 { 709 return __dsos__fprintf_buildid(&m->dsos.head, fp, skip, parm); 710 } 711 712 size_t machines__fprintf_dsos_buildid(struct machines *machines, FILE *fp, 713 bool (skip)(struct dso *dso, int parm), int parm) 714 { 715 struct rb_node *nd; 716 size_t ret = machine__fprintf_dsos_buildid(&machines->host, fp, skip, parm); 717 718 for (nd = rb_first(&machines->guests); nd; nd = rb_next(nd)) { 719 struct machine *pos = rb_entry(nd, struct machine, rb_node); 720 ret += machine__fprintf_dsos_buildid(pos, fp, skip, parm); 721 } 722 return ret; 723 } 724 725 size_t machine__fprintf_vmlinux_path(struct machine *machine, FILE *fp) 726 { 727 int i; 728 size_t printed = 0; 729 struct dso *kdso = machine__kernel_map(machine)->dso; 730 731 if (kdso->has_build_id) { 732 char filename[PATH_MAX]; 733 if (dso__build_id_filename(kdso, filename, sizeof(filename), 734 false)) 735 printed += fprintf(fp, "[0] %s\n", filename); 736 } 737 738 for (i = 0; i < vmlinux_path__nr_entries; ++i) 739 printed += fprintf(fp, "[%d] %s\n", 740 i + kdso->has_build_id, vmlinux_path[i]); 741 742 return printed; 743 } 744 745 size_t machine__fprintf(struct machine *machine, FILE *fp) 746 { 747 struct rb_node *nd; 748 size_t ret; 749 int i; 750 751 for (i = 0; i < THREADS__TABLE_SIZE; i++) { 752 struct threads *threads = &machine->threads[i]; 753 754 down_read(&threads->lock); 755 756 ret = fprintf(fp, "Threads: %u\n", threads->nr); 757 758 for (nd = rb_first(&threads->entries); nd; nd = rb_next(nd)) { 759 struct thread *pos = rb_entry(nd, struct thread, rb_node); 760 761 ret += thread__fprintf(pos, fp); 762 } 763 764 up_read(&threads->lock); 765 } 766 return ret; 767 } 768 769 static struct dso *machine__get_kernel(struct machine *machine) 770 { 771 const char *vmlinux_name = NULL; 772 struct dso *kernel; 773 774 if (machine__is_host(machine)) { 775 vmlinux_name = symbol_conf.vmlinux_name; 776 if (!vmlinux_name) 777 vmlinux_name = DSO__NAME_KALLSYMS; 778 779 kernel = machine__findnew_kernel(machine, vmlinux_name, 780 "[kernel]", DSO_TYPE_KERNEL); 781 } else { 782 char bf[PATH_MAX]; 783 784 if (machine__is_default_guest(machine)) 785 vmlinux_name = symbol_conf.default_guest_vmlinux_name; 786 if (!vmlinux_name) 787 vmlinux_name = machine__mmap_name(machine, bf, 788 sizeof(bf)); 789 790 kernel = machine__findnew_kernel(machine, vmlinux_name, 791 "[guest.kernel]", 792 DSO_TYPE_GUEST_KERNEL); 793 } 794 795 if (kernel != NULL && (!kernel->has_build_id)) 796 dso__read_running_kernel_build_id(kernel, machine); 797 798 return kernel; 799 } 800 801 struct process_args { 802 u64 start; 803 }; 804 805 static void machine__get_kallsyms_filename(struct machine *machine, char *buf, 806 size_t bufsz) 807 { 808 if (machine__is_default_guest(machine)) 809 scnprintf(buf, bufsz, "%s", symbol_conf.default_guest_kallsyms); 810 else 811 scnprintf(buf, bufsz, "%s/proc/kallsyms", machine->root_dir); 812 } 813 814 const char *ref_reloc_sym_names[] = {"_text", "_stext", NULL}; 815 816 /* Figure out the start address of kernel map from /proc/kallsyms. 817 * Returns the name of the start symbol in *symbol_name. Pass in NULL as 818 * symbol_name if it's not that important. 819 */ 820 static int machine__get_running_kernel_start(struct machine *machine, 821 const char **symbol_name, u64 *start) 822 { 823 char filename[PATH_MAX]; 824 int i, err = -1; 825 const char *name; 826 u64 addr = 0; 827 828 machine__get_kallsyms_filename(machine, filename, PATH_MAX); 829 830 if (symbol__restricted_filename(filename, "/proc/kallsyms")) 831 return 0; 832 833 for (i = 0; (name = ref_reloc_sym_names[i]) != NULL; i++) { 834 err = kallsyms__get_function_start(filename, name, &addr); 835 if (!err) 836 break; 837 } 838 839 if (err) 840 return -1; 841 842 if (symbol_name) 843 *symbol_name = name; 844 845 *start = addr; 846 return 0; 847 } 848 849 int __machine__create_kernel_maps(struct machine *machine, struct dso *kernel) 850 { 851 int type; 852 u64 start = 0; 853 854 if (machine__get_running_kernel_start(machine, NULL, &start)) 855 return -1; 856 857 /* In case of renewal the kernel map, destroy previous one */ 858 machine__destroy_kernel_maps(machine); 859 860 for (type = 0; type < MAP__NR_TYPES; ++type) { 861 struct kmap *kmap; 862 struct map *map; 863 864 machine->vmlinux_maps[type] = map__new2(start, kernel, type); 865 if (machine->vmlinux_maps[type] == NULL) 866 return -1; 867 868 machine->vmlinux_maps[type]->map_ip = 869 machine->vmlinux_maps[type]->unmap_ip = 870 identity__map_ip; 871 map = __machine__kernel_map(machine, type); 872 kmap = map__kmap(map); 873 if (!kmap) 874 return -1; 875 876 kmap->kmaps = &machine->kmaps; 877 map_groups__insert(&machine->kmaps, map); 878 } 879 880 return 0; 881 } 882 883 void machine__destroy_kernel_maps(struct machine *machine) 884 { 885 int type; 886 887 for (type = 0; type < MAP__NR_TYPES; ++type) { 888 struct kmap *kmap; 889 struct map *map = __machine__kernel_map(machine, type); 890 891 if (map == NULL) 892 continue; 893 894 kmap = map__kmap(map); 895 map_groups__remove(&machine->kmaps, map); 896 if (kmap && kmap->ref_reloc_sym) { 897 /* 898 * ref_reloc_sym is shared among all maps, so free just 899 * on one of them. 900 */ 901 if (type == MAP__FUNCTION) { 902 zfree((char **)&kmap->ref_reloc_sym->name); 903 zfree(&kmap->ref_reloc_sym); 904 } else 905 kmap->ref_reloc_sym = NULL; 906 } 907 908 map__put(machine->vmlinux_maps[type]); 909 machine->vmlinux_maps[type] = NULL; 910 } 911 } 912 913 int machines__create_guest_kernel_maps(struct machines *machines) 914 { 915 int ret = 0; 916 struct dirent **namelist = NULL; 917 int i, items = 0; 918 char path[PATH_MAX]; 919 pid_t pid; 920 char *endp; 921 922 if (symbol_conf.default_guest_vmlinux_name || 923 symbol_conf.default_guest_modules || 924 symbol_conf.default_guest_kallsyms) { 925 machines__create_kernel_maps(machines, DEFAULT_GUEST_KERNEL_ID); 926 } 927 928 if (symbol_conf.guestmount) { 929 items = scandir(symbol_conf.guestmount, &namelist, NULL, NULL); 930 if (items <= 0) 931 return -ENOENT; 932 for (i = 0; i < items; i++) { 933 if (!isdigit(namelist[i]->d_name[0])) { 934 /* Filter out . and .. */ 935 continue; 936 } 937 pid = (pid_t)strtol(namelist[i]->d_name, &endp, 10); 938 if ((*endp != '\0') || 939 (endp == namelist[i]->d_name) || 940 (errno == ERANGE)) { 941 pr_debug("invalid directory (%s). Skipping.\n", 942 namelist[i]->d_name); 943 continue; 944 } 945 sprintf(path, "%s/%s/proc/kallsyms", 946 symbol_conf.guestmount, 947 namelist[i]->d_name); 948 ret = access(path, R_OK); 949 if (ret) { 950 pr_debug("Can't access file %s\n", path); 951 goto failure; 952 } 953 machines__create_kernel_maps(machines, pid); 954 } 955 failure: 956 free(namelist); 957 } 958 959 return ret; 960 } 961 962 void machines__destroy_kernel_maps(struct machines *machines) 963 { 964 struct rb_node *next = rb_first(&machines->guests); 965 966 machine__destroy_kernel_maps(&machines->host); 967 968 while (next) { 969 struct machine *pos = rb_entry(next, struct machine, rb_node); 970 971 next = rb_next(&pos->rb_node); 972 rb_erase(&pos->rb_node, &machines->guests); 973 machine__delete(pos); 974 } 975 } 976 977 int machines__create_kernel_maps(struct machines *machines, pid_t pid) 978 { 979 struct machine *machine = machines__findnew(machines, pid); 980 981 if (machine == NULL) 982 return -1; 983 984 return machine__create_kernel_maps(machine); 985 } 986 987 int __machine__load_kallsyms(struct machine *machine, const char *filename, 988 enum map_type type, bool no_kcore) 989 { 990 struct map *map = machine__kernel_map(machine); 991 int ret = __dso__load_kallsyms(map->dso, filename, map, no_kcore); 992 993 if (ret > 0) { 994 dso__set_loaded(map->dso, type); 995 /* 996 * Since /proc/kallsyms will have multiple sessions for the 997 * kernel, with modules between them, fixup the end of all 998 * sections. 999 */ 1000 __map_groups__fixup_end(&machine->kmaps, type); 1001 } 1002 1003 return ret; 1004 } 1005 1006 int machine__load_kallsyms(struct machine *machine, const char *filename, 1007 enum map_type type) 1008 { 1009 return __machine__load_kallsyms(machine, filename, type, false); 1010 } 1011 1012 int machine__load_vmlinux_path(struct machine *machine, enum map_type type) 1013 { 1014 struct map *map = machine__kernel_map(machine); 1015 int ret = dso__load_vmlinux_path(map->dso, map); 1016 1017 if (ret > 0) 1018 dso__set_loaded(map->dso, type); 1019 1020 return ret; 1021 } 1022 1023 static void map_groups__fixup_end(struct map_groups *mg) 1024 { 1025 int i; 1026 for (i = 0; i < MAP__NR_TYPES; ++i) 1027 __map_groups__fixup_end(mg, i); 1028 } 1029 1030 static char *get_kernel_version(const char *root_dir) 1031 { 1032 char version[PATH_MAX]; 1033 FILE *file; 1034 char *name, *tmp; 1035 const char *prefix = "Linux version "; 1036 1037 sprintf(version, "%s/proc/version", root_dir); 1038 file = fopen(version, "r"); 1039 if (!file) 1040 return NULL; 1041 1042 version[0] = '\0'; 1043 tmp = fgets(version, sizeof(version), file); 1044 fclose(file); 1045 1046 name = strstr(version, prefix); 1047 if (!name) 1048 return NULL; 1049 name += strlen(prefix); 1050 tmp = strchr(name, ' '); 1051 if (tmp) 1052 *tmp = '\0'; 1053 1054 return strdup(name); 1055 } 1056 1057 static bool is_kmod_dso(struct dso *dso) 1058 { 1059 return dso->symtab_type == DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE || 1060 dso->symtab_type == DSO_BINARY_TYPE__GUEST_KMODULE; 1061 } 1062 1063 static int map_groups__set_module_path(struct map_groups *mg, const char *path, 1064 struct kmod_path *m) 1065 { 1066 struct map *map; 1067 char *long_name; 1068 1069 map = map_groups__find_by_name(mg, MAP__FUNCTION, m->name); 1070 if (map == NULL) 1071 return 0; 1072 1073 long_name = strdup(path); 1074 if (long_name == NULL) 1075 return -ENOMEM; 1076 1077 dso__set_long_name(map->dso, long_name, true); 1078 dso__kernel_module_get_build_id(map->dso, ""); 1079 1080 /* 1081 * Full name could reveal us kmod compression, so 1082 * we need to update the symtab_type if needed. 1083 */ 1084 if (m->comp && is_kmod_dso(map->dso)) 1085 map->dso->symtab_type++; 1086 1087 return 0; 1088 } 1089 1090 static int map_groups__set_modules_path_dir(struct map_groups *mg, 1091 const char *dir_name, int depth) 1092 { 1093 struct dirent *dent; 1094 DIR *dir = opendir(dir_name); 1095 int ret = 0; 1096 1097 if (!dir) { 1098 pr_debug("%s: cannot open %s dir\n", __func__, dir_name); 1099 return -1; 1100 } 1101 1102 while ((dent = readdir(dir)) != NULL) { 1103 char path[PATH_MAX]; 1104 struct stat st; 1105 1106 /*sshfs might return bad dent->d_type, so we have to stat*/ 1107 snprintf(path, sizeof(path), "%s/%s", dir_name, dent->d_name); 1108 if (stat(path, &st)) 1109 continue; 1110 1111 if (S_ISDIR(st.st_mode)) { 1112 if (!strcmp(dent->d_name, ".") || 1113 !strcmp(dent->d_name, "..")) 1114 continue; 1115 1116 /* Do not follow top-level source and build symlinks */ 1117 if (depth == 0) { 1118 if (!strcmp(dent->d_name, "source") || 1119 !strcmp(dent->d_name, "build")) 1120 continue; 1121 } 1122 1123 ret = map_groups__set_modules_path_dir(mg, path, 1124 depth + 1); 1125 if (ret < 0) 1126 goto out; 1127 } else { 1128 struct kmod_path m; 1129 1130 ret = kmod_path__parse_name(&m, dent->d_name); 1131 if (ret) 1132 goto out; 1133 1134 if (m.kmod) 1135 ret = map_groups__set_module_path(mg, path, &m); 1136 1137 free(m.name); 1138 1139 if (ret) 1140 goto out; 1141 } 1142 } 1143 1144 out: 1145 closedir(dir); 1146 return ret; 1147 } 1148 1149 static int machine__set_modules_path(struct machine *machine) 1150 { 1151 char *version; 1152 char modules_path[PATH_MAX]; 1153 1154 version = get_kernel_version(machine->root_dir); 1155 if (!version) 1156 return -1; 1157 1158 snprintf(modules_path, sizeof(modules_path), "%s/lib/modules/%s", 1159 machine->root_dir, version); 1160 free(version); 1161 1162 return map_groups__set_modules_path_dir(&machine->kmaps, modules_path, 0); 1163 } 1164 int __weak arch__fix_module_text_start(u64 *start __maybe_unused, 1165 const char *name __maybe_unused) 1166 { 1167 return 0; 1168 } 1169 1170 static int machine__create_module(void *arg, const char *name, u64 start, 1171 u64 size) 1172 { 1173 struct machine *machine = arg; 1174 struct map *map; 1175 1176 if (arch__fix_module_text_start(&start, name) < 0) 1177 return -1; 1178 1179 map = machine__findnew_module_map(machine, start, name); 1180 if (map == NULL) 1181 return -1; 1182 map->end = start + size; 1183 1184 dso__kernel_module_get_build_id(map->dso, machine->root_dir); 1185 1186 return 0; 1187 } 1188 1189 static int machine__create_modules(struct machine *machine) 1190 { 1191 const char *modules; 1192 char path[PATH_MAX]; 1193 1194 if (machine__is_default_guest(machine)) { 1195 modules = symbol_conf.default_guest_modules; 1196 } else { 1197 snprintf(path, PATH_MAX, "%s/proc/modules", machine->root_dir); 1198 modules = path; 1199 } 1200 1201 if (symbol__restricted_filename(modules, "/proc/modules")) 1202 return -1; 1203 1204 if (modules__parse(modules, machine, machine__create_module)) 1205 return -1; 1206 1207 if (!machine__set_modules_path(machine)) 1208 return 0; 1209 1210 pr_debug("Problems setting modules path maps, continuing anyway...\n"); 1211 1212 return 0; 1213 } 1214 1215 int machine__create_kernel_maps(struct machine *machine) 1216 { 1217 struct dso *kernel = machine__get_kernel(machine); 1218 const char *name = NULL; 1219 u64 addr = 0; 1220 int ret; 1221 1222 if (kernel == NULL) 1223 return -1; 1224 1225 ret = __machine__create_kernel_maps(machine, kernel); 1226 dso__put(kernel); 1227 if (ret < 0) 1228 return -1; 1229 1230 if (symbol_conf.use_modules && machine__create_modules(machine) < 0) { 1231 if (machine__is_host(machine)) 1232 pr_debug("Problems creating module maps, " 1233 "continuing anyway...\n"); 1234 else 1235 pr_debug("Problems creating module maps for guest %d, " 1236 "continuing anyway...\n", machine->pid); 1237 } 1238 1239 /* 1240 * Now that we have all the maps created, just set the ->end of them: 1241 */ 1242 map_groups__fixup_end(&machine->kmaps); 1243 1244 if (!machine__get_running_kernel_start(machine, &name, &addr)) { 1245 if (name && 1246 maps__set_kallsyms_ref_reloc_sym(machine->vmlinux_maps, name, addr)) { 1247 machine__destroy_kernel_maps(machine); 1248 return -1; 1249 } 1250 } 1251 1252 return 0; 1253 } 1254 1255 static void machine__set_kernel_mmap_len(struct machine *machine, 1256 union perf_event *event) 1257 { 1258 int i; 1259 1260 for (i = 0; i < MAP__NR_TYPES; i++) { 1261 machine->vmlinux_maps[i]->start = event->mmap.start; 1262 machine->vmlinux_maps[i]->end = (event->mmap.start + 1263 event->mmap.len); 1264 /* 1265 * Be a bit paranoid here, some perf.data file came with 1266 * a zero sized synthesized MMAP event for the kernel. 1267 */ 1268 if (machine->vmlinux_maps[i]->end == 0) 1269 machine->vmlinux_maps[i]->end = ~0ULL; 1270 } 1271 } 1272 1273 static bool machine__uses_kcore(struct machine *machine) 1274 { 1275 struct dso *dso; 1276 1277 list_for_each_entry(dso, &machine->dsos.head, node) { 1278 if (dso__is_kcore(dso)) 1279 return true; 1280 } 1281 1282 return false; 1283 } 1284 1285 static int machine__process_kernel_mmap_event(struct machine *machine, 1286 union perf_event *event) 1287 { 1288 struct map *map; 1289 char kmmap_prefix[PATH_MAX]; 1290 enum dso_kernel_type kernel_type; 1291 bool is_kernel_mmap; 1292 1293 /* If we have maps from kcore then we do not need or want any others */ 1294 if (machine__uses_kcore(machine)) 1295 return 0; 1296 1297 machine__mmap_name(machine, kmmap_prefix, sizeof(kmmap_prefix)); 1298 if (machine__is_host(machine)) 1299 kernel_type = DSO_TYPE_KERNEL; 1300 else 1301 kernel_type = DSO_TYPE_GUEST_KERNEL; 1302 1303 is_kernel_mmap = memcmp(event->mmap.filename, 1304 kmmap_prefix, 1305 strlen(kmmap_prefix) - 1) == 0; 1306 if (event->mmap.filename[0] == '/' || 1307 (!is_kernel_mmap && event->mmap.filename[0] == '[')) { 1308 map = machine__findnew_module_map(machine, event->mmap.start, 1309 event->mmap.filename); 1310 if (map == NULL) 1311 goto out_problem; 1312 1313 map->end = map->start + event->mmap.len; 1314 } else if (is_kernel_mmap) { 1315 const char *symbol_name = (event->mmap.filename + 1316 strlen(kmmap_prefix)); 1317 /* 1318 * Should be there already, from the build-id table in 1319 * the header. 1320 */ 1321 struct dso *kernel = NULL; 1322 struct dso *dso; 1323 1324 down_read(&machine->dsos.lock); 1325 1326 list_for_each_entry(dso, &machine->dsos.head, node) { 1327 1328 /* 1329 * The cpumode passed to is_kernel_module is not the 1330 * cpumode of *this* event. If we insist on passing 1331 * correct cpumode to is_kernel_module, we should 1332 * record the cpumode when we adding this dso to the 1333 * linked list. 1334 * 1335 * However we don't really need passing correct 1336 * cpumode. We know the correct cpumode must be kernel 1337 * mode (if not, we should not link it onto kernel_dsos 1338 * list). 1339 * 1340 * Therefore, we pass PERF_RECORD_MISC_CPUMODE_UNKNOWN. 1341 * is_kernel_module() treats it as a kernel cpumode. 1342 */ 1343 1344 if (!dso->kernel || 1345 is_kernel_module(dso->long_name, 1346 PERF_RECORD_MISC_CPUMODE_UNKNOWN)) 1347 continue; 1348 1349 1350 kernel = dso; 1351 break; 1352 } 1353 1354 up_read(&machine->dsos.lock); 1355 1356 if (kernel == NULL) 1357 kernel = machine__findnew_dso(machine, kmmap_prefix); 1358 if (kernel == NULL) 1359 goto out_problem; 1360 1361 kernel->kernel = kernel_type; 1362 if (__machine__create_kernel_maps(machine, kernel) < 0) { 1363 dso__put(kernel); 1364 goto out_problem; 1365 } 1366 1367 if (strstr(kernel->long_name, "vmlinux")) 1368 dso__set_short_name(kernel, "[kernel.vmlinux]", false); 1369 1370 machine__set_kernel_mmap_len(machine, event); 1371 1372 /* 1373 * Avoid using a zero address (kptr_restrict) for the ref reloc 1374 * symbol. Effectively having zero here means that at record 1375 * time /proc/sys/kernel/kptr_restrict was non zero. 1376 */ 1377 if (event->mmap.pgoff != 0) { 1378 maps__set_kallsyms_ref_reloc_sym(machine->vmlinux_maps, 1379 symbol_name, 1380 event->mmap.pgoff); 1381 } 1382 1383 if (machine__is_default_guest(machine)) { 1384 /* 1385 * preload dso of guest kernel and modules 1386 */ 1387 dso__load(kernel, machine__kernel_map(machine)); 1388 } 1389 } 1390 return 0; 1391 out_problem: 1392 return -1; 1393 } 1394 1395 int machine__process_mmap2_event(struct machine *machine, 1396 union perf_event *event, 1397 struct perf_sample *sample) 1398 { 1399 struct thread *thread; 1400 struct map *map; 1401 enum map_type type; 1402 int ret = 0; 1403 1404 if (dump_trace) 1405 perf_event__fprintf_mmap2(event, stdout); 1406 1407 if (sample->cpumode == PERF_RECORD_MISC_GUEST_KERNEL || 1408 sample->cpumode == PERF_RECORD_MISC_KERNEL) { 1409 ret = machine__process_kernel_mmap_event(machine, event); 1410 if (ret < 0) 1411 goto out_problem; 1412 return 0; 1413 } 1414 1415 thread = machine__findnew_thread(machine, event->mmap2.pid, 1416 event->mmap2.tid); 1417 if (thread == NULL) 1418 goto out_problem; 1419 1420 if (event->header.misc & PERF_RECORD_MISC_MMAP_DATA) 1421 type = MAP__VARIABLE; 1422 else 1423 type = MAP__FUNCTION; 1424 1425 map = map__new(machine, event->mmap2.start, 1426 event->mmap2.len, event->mmap2.pgoff, 1427 event->mmap2.maj, 1428 event->mmap2.min, event->mmap2.ino, 1429 event->mmap2.ino_generation, 1430 event->mmap2.prot, 1431 event->mmap2.flags, 1432 event->mmap2.filename, type, thread); 1433 1434 if (map == NULL) 1435 goto out_problem_map; 1436 1437 ret = thread__insert_map(thread, map); 1438 if (ret) 1439 goto out_problem_insert; 1440 1441 thread__put(thread); 1442 map__put(map); 1443 return 0; 1444 1445 out_problem_insert: 1446 map__put(map); 1447 out_problem_map: 1448 thread__put(thread); 1449 out_problem: 1450 dump_printf("problem processing PERF_RECORD_MMAP2, skipping event.\n"); 1451 return 0; 1452 } 1453 1454 int machine__process_mmap_event(struct machine *machine, union perf_event *event, 1455 struct perf_sample *sample) 1456 { 1457 struct thread *thread; 1458 struct map *map; 1459 enum map_type type; 1460 int ret = 0; 1461 1462 if (dump_trace) 1463 perf_event__fprintf_mmap(event, stdout); 1464 1465 if (sample->cpumode == PERF_RECORD_MISC_GUEST_KERNEL || 1466 sample->cpumode == PERF_RECORD_MISC_KERNEL) { 1467 ret = machine__process_kernel_mmap_event(machine, event); 1468 if (ret < 0) 1469 goto out_problem; 1470 return 0; 1471 } 1472 1473 thread = machine__findnew_thread(machine, event->mmap.pid, 1474 event->mmap.tid); 1475 if (thread == NULL) 1476 goto out_problem; 1477 1478 if (event->header.misc & PERF_RECORD_MISC_MMAP_DATA) 1479 type = MAP__VARIABLE; 1480 else 1481 type = MAP__FUNCTION; 1482 1483 map = map__new(machine, event->mmap.start, 1484 event->mmap.len, event->mmap.pgoff, 1485 0, 0, 0, 0, 0, 0, 1486 event->mmap.filename, 1487 type, thread); 1488 1489 if (map == NULL) 1490 goto out_problem_map; 1491 1492 ret = thread__insert_map(thread, map); 1493 if (ret) 1494 goto out_problem_insert; 1495 1496 thread__put(thread); 1497 map__put(map); 1498 return 0; 1499 1500 out_problem_insert: 1501 map__put(map); 1502 out_problem_map: 1503 thread__put(thread); 1504 out_problem: 1505 dump_printf("problem processing PERF_RECORD_MMAP, skipping event.\n"); 1506 return 0; 1507 } 1508 1509 static void __machine__remove_thread(struct machine *machine, struct thread *th, bool lock) 1510 { 1511 struct threads *threads = machine__threads(machine, th->tid); 1512 1513 if (threads->last_match == th) 1514 threads->last_match = NULL; 1515 1516 BUG_ON(refcount_read(&th->refcnt) == 0); 1517 if (lock) 1518 down_write(&threads->lock); 1519 rb_erase_init(&th->rb_node, &threads->entries); 1520 RB_CLEAR_NODE(&th->rb_node); 1521 --threads->nr; 1522 /* 1523 * Move it first to the dead_threads list, then drop the reference, 1524 * if this is the last reference, then the thread__delete destructor 1525 * will be called and we will remove it from the dead_threads list. 1526 */ 1527 list_add_tail(&th->node, &threads->dead); 1528 if (lock) 1529 up_write(&threads->lock); 1530 thread__put(th); 1531 } 1532 1533 void machine__remove_thread(struct machine *machine, struct thread *th) 1534 { 1535 return __machine__remove_thread(machine, th, true); 1536 } 1537 1538 int machine__process_fork_event(struct machine *machine, union perf_event *event, 1539 struct perf_sample *sample) 1540 { 1541 struct thread *thread = machine__find_thread(machine, 1542 event->fork.pid, 1543 event->fork.tid); 1544 struct thread *parent = machine__findnew_thread(machine, 1545 event->fork.ppid, 1546 event->fork.ptid); 1547 int err = 0; 1548 1549 if (dump_trace) 1550 perf_event__fprintf_task(event, stdout); 1551 1552 /* 1553 * There may be an existing thread that is not actually the parent, 1554 * either because we are processing events out of order, or because the 1555 * (fork) event that would have removed the thread was lost. Assume the 1556 * latter case and continue on as best we can. 1557 */ 1558 if (parent->pid_ != (pid_t)event->fork.ppid) { 1559 dump_printf("removing erroneous parent thread %d/%d\n", 1560 parent->pid_, parent->tid); 1561 machine__remove_thread(machine, parent); 1562 thread__put(parent); 1563 parent = machine__findnew_thread(machine, event->fork.ppid, 1564 event->fork.ptid); 1565 } 1566 1567 /* if a thread currently exists for the thread id remove it */ 1568 if (thread != NULL) { 1569 machine__remove_thread(machine, thread); 1570 thread__put(thread); 1571 } 1572 1573 thread = machine__findnew_thread(machine, event->fork.pid, 1574 event->fork.tid); 1575 1576 if (thread == NULL || parent == NULL || 1577 thread__fork(thread, parent, sample->time) < 0) { 1578 dump_printf("problem processing PERF_RECORD_FORK, skipping event.\n"); 1579 err = -1; 1580 } 1581 thread__put(thread); 1582 thread__put(parent); 1583 1584 return err; 1585 } 1586 1587 int machine__process_exit_event(struct machine *machine, union perf_event *event, 1588 struct perf_sample *sample __maybe_unused) 1589 { 1590 struct thread *thread = machine__find_thread(machine, 1591 event->fork.pid, 1592 event->fork.tid); 1593 1594 if (dump_trace) 1595 perf_event__fprintf_task(event, stdout); 1596 1597 if (thread != NULL) { 1598 thread__exited(thread); 1599 thread__put(thread); 1600 } 1601 1602 return 0; 1603 } 1604 1605 int machine__process_event(struct machine *machine, union perf_event *event, 1606 struct perf_sample *sample) 1607 { 1608 int ret; 1609 1610 switch (event->header.type) { 1611 case PERF_RECORD_COMM: 1612 ret = machine__process_comm_event(machine, event, sample); break; 1613 case PERF_RECORD_MMAP: 1614 ret = machine__process_mmap_event(machine, event, sample); break; 1615 case PERF_RECORD_NAMESPACES: 1616 ret = machine__process_namespaces_event(machine, event, sample); break; 1617 case PERF_RECORD_MMAP2: 1618 ret = machine__process_mmap2_event(machine, event, sample); break; 1619 case PERF_RECORD_FORK: 1620 ret = machine__process_fork_event(machine, event, sample); break; 1621 case PERF_RECORD_EXIT: 1622 ret = machine__process_exit_event(machine, event, sample); break; 1623 case PERF_RECORD_LOST: 1624 ret = machine__process_lost_event(machine, event, sample); break; 1625 case PERF_RECORD_AUX: 1626 ret = machine__process_aux_event(machine, event); break; 1627 case PERF_RECORD_ITRACE_START: 1628 ret = machine__process_itrace_start_event(machine, event); break; 1629 case PERF_RECORD_LOST_SAMPLES: 1630 ret = machine__process_lost_samples_event(machine, event, sample); break; 1631 case PERF_RECORD_SWITCH: 1632 case PERF_RECORD_SWITCH_CPU_WIDE: 1633 ret = machine__process_switch_event(machine, event); break; 1634 default: 1635 ret = -1; 1636 break; 1637 } 1638 1639 return ret; 1640 } 1641 1642 static bool symbol__match_regex(struct symbol *sym, regex_t *regex) 1643 { 1644 if (!regexec(regex, sym->name, 0, NULL, 0)) 1645 return 1; 1646 return 0; 1647 } 1648 1649 static void ip__resolve_ams(struct thread *thread, 1650 struct addr_map_symbol *ams, 1651 u64 ip) 1652 { 1653 struct addr_location al; 1654 1655 memset(&al, 0, sizeof(al)); 1656 /* 1657 * We cannot use the header.misc hint to determine whether a 1658 * branch stack address is user, kernel, guest, hypervisor. 1659 * Branches may straddle the kernel/user/hypervisor boundaries. 1660 * Thus, we have to try consecutively until we find a match 1661 * or else, the symbol is unknown 1662 */ 1663 thread__find_cpumode_addr_location(thread, MAP__FUNCTION, ip, &al); 1664 1665 ams->addr = ip; 1666 ams->al_addr = al.addr; 1667 ams->sym = al.sym; 1668 ams->map = al.map; 1669 ams->phys_addr = 0; 1670 } 1671 1672 static void ip__resolve_data(struct thread *thread, 1673 u8 m, struct addr_map_symbol *ams, 1674 u64 addr, u64 phys_addr) 1675 { 1676 struct addr_location al; 1677 1678 memset(&al, 0, sizeof(al)); 1679 1680 thread__find_addr_location(thread, m, MAP__VARIABLE, addr, &al); 1681 if (al.map == NULL) { 1682 /* 1683 * some shared data regions have execute bit set which puts 1684 * their mapping in the MAP__FUNCTION type array. 1685 * Check there as a fallback option before dropping the sample. 1686 */ 1687 thread__find_addr_location(thread, m, MAP__FUNCTION, addr, &al); 1688 } 1689 1690 ams->addr = addr; 1691 ams->al_addr = al.addr; 1692 ams->sym = al.sym; 1693 ams->map = al.map; 1694 ams->phys_addr = phys_addr; 1695 } 1696 1697 struct mem_info *sample__resolve_mem(struct perf_sample *sample, 1698 struct addr_location *al) 1699 { 1700 struct mem_info *mi = zalloc(sizeof(*mi)); 1701 1702 if (!mi) 1703 return NULL; 1704 1705 ip__resolve_ams(al->thread, &mi->iaddr, sample->ip); 1706 ip__resolve_data(al->thread, al->cpumode, &mi->daddr, 1707 sample->addr, sample->phys_addr); 1708 mi->data_src.val = sample->data_src; 1709 1710 return mi; 1711 } 1712 1713 static char *callchain_srcline(struct map *map, struct symbol *sym, u64 ip) 1714 { 1715 char *srcline = NULL; 1716 1717 if (!map || callchain_param.key == CCKEY_FUNCTION) 1718 return srcline; 1719 1720 srcline = srcline__tree_find(&map->dso->srclines, ip); 1721 if (!srcline) { 1722 bool show_sym = false; 1723 bool show_addr = callchain_param.key == CCKEY_ADDRESS; 1724 1725 srcline = get_srcline(map->dso, map__rip_2objdump(map, ip), 1726 sym, show_sym, show_addr); 1727 srcline__tree_insert(&map->dso->srclines, ip, srcline); 1728 } 1729 1730 return srcline; 1731 } 1732 1733 struct iterations { 1734 int nr_loop_iter; 1735 u64 cycles; 1736 }; 1737 1738 static int add_callchain_ip(struct thread *thread, 1739 struct callchain_cursor *cursor, 1740 struct symbol **parent, 1741 struct addr_location *root_al, 1742 u8 *cpumode, 1743 u64 ip, 1744 bool branch, 1745 struct branch_flags *flags, 1746 struct iterations *iter, 1747 u64 branch_from) 1748 { 1749 struct addr_location al; 1750 int nr_loop_iter = 0; 1751 u64 iter_cycles = 0; 1752 const char *srcline = NULL; 1753 1754 al.filtered = 0; 1755 al.sym = NULL; 1756 if (!cpumode) { 1757 thread__find_cpumode_addr_location(thread, MAP__FUNCTION, 1758 ip, &al); 1759 } else { 1760 if (ip >= PERF_CONTEXT_MAX) { 1761 switch (ip) { 1762 case PERF_CONTEXT_HV: 1763 *cpumode = PERF_RECORD_MISC_HYPERVISOR; 1764 break; 1765 case PERF_CONTEXT_KERNEL: 1766 *cpumode = PERF_RECORD_MISC_KERNEL; 1767 break; 1768 case PERF_CONTEXT_USER: 1769 *cpumode = PERF_RECORD_MISC_USER; 1770 break; 1771 default: 1772 pr_debug("invalid callchain context: " 1773 "%"PRId64"\n", (s64) ip); 1774 /* 1775 * It seems the callchain is corrupted. 1776 * Discard all. 1777 */ 1778 callchain_cursor_reset(cursor); 1779 return 1; 1780 } 1781 return 0; 1782 } 1783 thread__find_addr_location(thread, *cpumode, MAP__FUNCTION, 1784 ip, &al); 1785 } 1786 1787 if (al.sym != NULL) { 1788 if (perf_hpp_list.parent && !*parent && 1789 symbol__match_regex(al.sym, &parent_regex)) 1790 *parent = al.sym; 1791 else if (have_ignore_callees && root_al && 1792 symbol__match_regex(al.sym, &ignore_callees_regex)) { 1793 /* Treat this symbol as the root, 1794 forgetting its callees. */ 1795 *root_al = al; 1796 callchain_cursor_reset(cursor); 1797 } 1798 } 1799 1800 if (symbol_conf.hide_unresolved && al.sym == NULL) 1801 return 0; 1802 1803 if (iter) { 1804 nr_loop_iter = iter->nr_loop_iter; 1805 iter_cycles = iter->cycles; 1806 } 1807 1808 srcline = callchain_srcline(al.map, al.sym, al.addr); 1809 return callchain_cursor_append(cursor, al.addr, al.map, al.sym, 1810 branch, flags, nr_loop_iter, 1811 iter_cycles, branch_from, srcline); 1812 } 1813 1814 struct branch_info *sample__resolve_bstack(struct perf_sample *sample, 1815 struct addr_location *al) 1816 { 1817 unsigned int i; 1818 const struct branch_stack *bs = sample->branch_stack; 1819 struct branch_info *bi = calloc(bs->nr, sizeof(struct branch_info)); 1820 1821 if (!bi) 1822 return NULL; 1823 1824 for (i = 0; i < bs->nr; i++) { 1825 ip__resolve_ams(al->thread, &bi[i].to, bs->entries[i].to); 1826 ip__resolve_ams(al->thread, &bi[i].from, bs->entries[i].from); 1827 bi[i].flags = bs->entries[i].flags; 1828 } 1829 return bi; 1830 } 1831 1832 static void save_iterations(struct iterations *iter, 1833 struct branch_entry *be, int nr) 1834 { 1835 int i; 1836 1837 iter->nr_loop_iter = nr; 1838 iter->cycles = 0; 1839 1840 for (i = 0; i < nr; i++) 1841 iter->cycles += be[i].flags.cycles; 1842 } 1843 1844 #define CHASHSZ 127 1845 #define CHASHBITS 7 1846 #define NO_ENTRY 0xff 1847 1848 #define PERF_MAX_BRANCH_DEPTH 127 1849 1850 /* Remove loops. */ 1851 static int remove_loops(struct branch_entry *l, int nr, 1852 struct iterations *iter) 1853 { 1854 int i, j, off; 1855 unsigned char chash[CHASHSZ]; 1856 1857 memset(chash, NO_ENTRY, sizeof(chash)); 1858 1859 BUG_ON(PERF_MAX_BRANCH_DEPTH > 255); 1860 1861 for (i = 0; i < nr; i++) { 1862 int h = hash_64(l[i].from, CHASHBITS) % CHASHSZ; 1863 1864 /* no collision handling for now */ 1865 if (chash[h] == NO_ENTRY) { 1866 chash[h] = i; 1867 } else if (l[chash[h]].from == l[i].from) { 1868 bool is_loop = true; 1869 /* check if it is a real loop */ 1870 off = 0; 1871 for (j = chash[h]; j < i && i + off < nr; j++, off++) 1872 if (l[j].from != l[i + off].from) { 1873 is_loop = false; 1874 break; 1875 } 1876 if (is_loop) { 1877 j = nr - (i + off); 1878 if (j > 0) { 1879 save_iterations(iter + i + off, 1880 l + i, off); 1881 1882 memmove(iter + i, iter + i + off, 1883 j * sizeof(*iter)); 1884 1885 memmove(l + i, l + i + off, 1886 j * sizeof(*l)); 1887 } 1888 1889 nr -= off; 1890 } 1891 } 1892 } 1893 return nr; 1894 } 1895 1896 /* 1897 * Recolve LBR callstack chain sample 1898 * Return: 1899 * 1 on success get LBR callchain information 1900 * 0 no available LBR callchain information, should try fp 1901 * negative error code on other errors. 1902 */ 1903 static int resolve_lbr_callchain_sample(struct thread *thread, 1904 struct callchain_cursor *cursor, 1905 struct perf_sample *sample, 1906 struct symbol **parent, 1907 struct addr_location *root_al, 1908 int max_stack) 1909 { 1910 struct ip_callchain *chain = sample->callchain; 1911 int chain_nr = min(max_stack, (int)chain->nr), i; 1912 u8 cpumode = PERF_RECORD_MISC_USER; 1913 u64 ip, branch_from = 0; 1914 1915 for (i = 0; i < chain_nr; i++) { 1916 if (chain->ips[i] == PERF_CONTEXT_USER) 1917 break; 1918 } 1919 1920 /* LBR only affects the user callchain */ 1921 if (i != chain_nr) { 1922 struct branch_stack *lbr_stack = sample->branch_stack; 1923 int lbr_nr = lbr_stack->nr, j, k; 1924 bool branch; 1925 struct branch_flags *flags; 1926 /* 1927 * LBR callstack can only get user call chain. 1928 * The mix_chain_nr is kernel call chain 1929 * number plus LBR user call chain number. 1930 * i is kernel call chain number, 1931 * 1 is PERF_CONTEXT_USER, 1932 * lbr_nr + 1 is the user call chain number. 1933 * For details, please refer to the comments 1934 * in callchain__printf 1935 */ 1936 int mix_chain_nr = i + 1 + lbr_nr + 1; 1937 1938 for (j = 0; j < mix_chain_nr; j++) { 1939 int err; 1940 branch = false; 1941 flags = NULL; 1942 1943 if (callchain_param.order == ORDER_CALLEE) { 1944 if (j < i + 1) 1945 ip = chain->ips[j]; 1946 else if (j > i + 1) { 1947 k = j - i - 2; 1948 ip = lbr_stack->entries[k].from; 1949 branch = true; 1950 flags = &lbr_stack->entries[k].flags; 1951 } else { 1952 ip = lbr_stack->entries[0].to; 1953 branch = true; 1954 flags = &lbr_stack->entries[0].flags; 1955 branch_from = 1956 lbr_stack->entries[0].from; 1957 } 1958 } else { 1959 if (j < lbr_nr) { 1960 k = lbr_nr - j - 1; 1961 ip = lbr_stack->entries[k].from; 1962 branch = true; 1963 flags = &lbr_stack->entries[k].flags; 1964 } 1965 else if (j > lbr_nr) 1966 ip = chain->ips[i + 1 - (j - lbr_nr)]; 1967 else { 1968 ip = lbr_stack->entries[0].to; 1969 branch = true; 1970 flags = &lbr_stack->entries[0].flags; 1971 branch_from = 1972 lbr_stack->entries[0].from; 1973 } 1974 } 1975 1976 err = add_callchain_ip(thread, cursor, parent, 1977 root_al, &cpumode, ip, 1978 branch, flags, NULL, 1979 branch_from); 1980 if (err) 1981 return (err < 0) ? err : 0; 1982 } 1983 return 1; 1984 } 1985 1986 return 0; 1987 } 1988 1989 static int thread__resolve_callchain_sample(struct thread *thread, 1990 struct callchain_cursor *cursor, 1991 struct perf_evsel *evsel, 1992 struct perf_sample *sample, 1993 struct symbol **parent, 1994 struct addr_location *root_al, 1995 int max_stack) 1996 { 1997 struct branch_stack *branch = sample->branch_stack; 1998 struct ip_callchain *chain = sample->callchain; 1999 int chain_nr = 0; 2000 u8 cpumode = PERF_RECORD_MISC_USER; 2001 int i, j, err, nr_entries; 2002 int skip_idx = -1; 2003 int first_call = 0; 2004 2005 if (chain) 2006 chain_nr = chain->nr; 2007 2008 if (perf_evsel__has_branch_callstack(evsel)) { 2009 err = resolve_lbr_callchain_sample(thread, cursor, sample, parent, 2010 root_al, max_stack); 2011 if (err) 2012 return (err < 0) ? err : 0; 2013 } 2014 2015 /* 2016 * Based on DWARF debug information, some architectures skip 2017 * a callchain entry saved by the kernel. 2018 */ 2019 skip_idx = arch_skip_callchain_idx(thread, chain); 2020 2021 /* 2022 * Add branches to call stack for easier browsing. This gives 2023 * more context for a sample than just the callers. 2024 * 2025 * This uses individual histograms of paths compared to the 2026 * aggregated histograms the normal LBR mode uses. 2027 * 2028 * Limitations for now: 2029 * - No extra filters 2030 * - No annotations (should annotate somehow) 2031 */ 2032 2033 if (branch && callchain_param.branch_callstack) { 2034 int nr = min(max_stack, (int)branch->nr); 2035 struct branch_entry be[nr]; 2036 struct iterations iter[nr]; 2037 2038 if (branch->nr > PERF_MAX_BRANCH_DEPTH) { 2039 pr_warning("corrupted branch chain. skipping...\n"); 2040 goto check_calls; 2041 } 2042 2043 for (i = 0; i < nr; i++) { 2044 if (callchain_param.order == ORDER_CALLEE) { 2045 be[i] = branch->entries[i]; 2046 2047 if (chain == NULL) 2048 continue; 2049 2050 /* 2051 * Check for overlap into the callchain. 2052 * The return address is one off compared to 2053 * the branch entry. To adjust for this 2054 * assume the calling instruction is not longer 2055 * than 8 bytes. 2056 */ 2057 if (i == skip_idx || 2058 chain->ips[first_call] >= PERF_CONTEXT_MAX) 2059 first_call++; 2060 else if (be[i].from < chain->ips[first_call] && 2061 be[i].from >= chain->ips[first_call] - 8) 2062 first_call++; 2063 } else 2064 be[i] = branch->entries[branch->nr - i - 1]; 2065 } 2066 2067 memset(iter, 0, sizeof(struct iterations) * nr); 2068 nr = remove_loops(be, nr, iter); 2069 2070 for (i = 0; i < nr; i++) { 2071 err = add_callchain_ip(thread, cursor, parent, 2072 root_al, 2073 NULL, be[i].to, 2074 true, &be[i].flags, 2075 NULL, be[i].from); 2076 2077 if (!err) 2078 err = add_callchain_ip(thread, cursor, parent, root_al, 2079 NULL, be[i].from, 2080 true, &be[i].flags, 2081 &iter[i], 0); 2082 if (err == -EINVAL) 2083 break; 2084 if (err) 2085 return err; 2086 } 2087 2088 if (chain_nr == 0) 2089 return 0; 2090 2091 chain_nr -= nr; 2092 } 2093 2094 check_calls: 2095 for (i = first_call, nr_entries = 0; 2096 i < chain_nr && nr_entries < max_stack; i++) { 2097 u64 ip; 2098 2099 if (callchain_param.order == ORDER_CALLEE) 2100 j = i; 2101 else 2102 j = chain->nr - i - 1; 2103 2104 #ifdef HAVE_SKIP_CALLCHAIN_IDX 2105 if (j == skip_idx) 2106 continue; 2107 #endif 2108 ip = chain->ips[j]; 2109 2110 if (ip < PERF_CONTEXT_MAX) 2111 ++nr_entries; 2112 2113 err = add_callchain_ip(thread, cursor, parent, 2114 root_al, &cpumode, ip, 2115 false, NULL, NULL, 0); 2116 2117 if (err) 2118 return (err < 0) ? err : 0; 2119 } 2120 2121 return 0; 2122 } 2123 2124 static int append_inlines(struct callchain_cursor *cursor, 2125 struct map *map, struct symbol *sym, u64 ip) 2126 { 2127 struct inline_node *inline_node; 2128 struct inline_list *ilist; 2129 u64 addr; 2130 int ret = 1; 2131 2132 if (!symbol_conf.inline_name || !map || !sym) 2133 return ret; 2134 2135 addr = map__rip_2objdump(map, ip); 2136 2137 inline_node = inlines__tree_find(&map->dso->inlined_nodes, addr); 2138 if (!inline_node) { 2139 inline_node = dso__parse_addr_inlines(map->dso, addr, sym); 2140 if (!inline_node) 2141 return ret; 2142 inlines__tree_insert(&map->dso->inlined_nodes, inline_node); 2143 } 2144 2145 list_for_each_entry(ilist, &inline_node->val, list) { 2146 ret = callchain_cursor_append(cursor, ip, map, 2147 ilist->symbol, false, 2148 NULL, 0, 0, 0, ilist->srcline); 2149 2150 if (ret != 0) 2151 return ret; 2152 } 2153 2154 return ret; 2155 } 2156 2157 static int unwind_entry(struct unwind_entry *entry, void *arg) 2158 { 2159 struct callchain_cursor *cursor = arg; 2160 const char *srcline = NULL; 2161 2162 if (symbol_conf.hide_unresolved && entry->sym == NULL) 2163 return 0; 2164 2165 if (append_inlines(cursor, entry->map, entry->sym, entry->ip) == 0) 2166 return 0; 2167 2168 srcline = callchain_srcline(entry->map, entry->sym, entry->ip); 2169 return callchain_cursor_append(cursor, entry->ip, 2170 entry->map, entry->sym, 2171 false, NULL, 0, 0, 0, srcline); 2172 } 2173 2174 static int thread__resolve_callchain_unwind(struct thread *thread, 2175 struct callchain_cursor *cursor, 2176 struct perf_evsel *evsel, 2177 struct perf_sample *sample, 2178 int max_stack) 2179 { 2180 /* Can we do dwarf post unwind? */ 2181 if (!((evsel->attr.sample_type & PERF_SAMPLE_REGS_USER) && 2182 (evsel->attr.sample_type & PERF_SAMPLE_STACK_USER))) 2183 return 0; 2184 2185 /* Bail out if nothing was captured. */ 2186 if ((!sample->user_regs.regs) || 2187 (!sample->user_stack.size)) 2188 return 0; 2189 2190 return unwind__get_entries(unwind_entry, cursor, 2191 thread, sample, max_stack); 2192 } 2193 2194 int thread__resolve_callchain(struct thread *thread, 2195 struct callchain_cursor *cursor, 2196 struct perf_evsel *evsel, 2197 struct perf_sample *sample, 2198 struct symbol **parent, 2199 struct addr_location *root_al, 2200 int max_stack) 2201 { 2202 int ret = 0; 2203 2204 callchain_cursor_reset(&callchain_cursor); 2205 2206 if (callchain_param.order == ORDER_CALLEE) { 2207 ret = thread__resolve_callchain_sample(thread, cursor, 2208 evsel, sample, 2209 parent, root_al, 2210 max_stack); 2211 if (ret) 2212 return ret; 2213 ret = thread__resolve_callchain_unwind(thread, cursor, 2214 evsel, sample, 2215 max_stack); 2216 } else { 2217 ret = thread__resolve_callchain_unwind(thread, cursor, 2218 evsel, sample, 2219 max_stack); 2220 if (ret) 2221 return ret; 2222 ret = thread__resolve_callchain_sample(thread, cursor, 2223 evsel, sample, 2224 parent, root_al, 2225 max_stack); 2226 } 2227 2228 return ret; 2229 } 2230 2231 int machine__for_each_thread(struct machine *machine, 2232 int (*fn)(struct thread *thread, void *p), 2233 void *priv) 2234 { 2235 struct threads *threads; 2236 struct rb_node *nd; 2237 struct thread *thread; 2238 int rc = 0; 2239 int i; 2240 2241 for (i = 0; i < THREADS__TABLE_SIZE; i++) { 2242 threads = &machine->threads[i]; 2243 for (nd = rb_first(&threads->entries); nd; nd = rb_next(nd)) { 2244 thread = rb_entry(nd, struct thread, rb_node); 2245 rc = fn(thread, priv); 2246 if (rc != 0) 2247 return rc; 2248 } 2249 2250 list_for_each_entry(thread, &threads->dead, node) { 2251 rc = fn(thread, priv); 2252 if (rc != 0) 2253 return rc; 2254 } 2255 } 2256 return rc; 2257 } 2258 2259 int machines__for_each_thread(struct machines *machines, 2260 int (*fn)(struct thread *thread, void *p), 2261 void *priv) 2262 { 2263 struct rb_node *nd; 2264 int rc = 0; 2265 2266 rc = machine__for_each_thread(&machines->host, fn, priv); 2267 if (rc != 0) 2268 return rc; 2269 2270 for (nd = rb_first(&machines->guests); nd; nd = rb_next(nd)) { 2271 struct machine *machine = rb_entry(nd, struct machine, rb_node); 2272 2273 rc = machine__for_each_thread(machine, fn, priv); 2274 if (rc != 0) 2275 return rc; 2276 } 2277 return rc; 2278 } 2279 2280 int __machine__synthesize_threads(struct machine *machine, struct perf_tool *tool, 2281 struct target *target, struct thread_map *threads, 2282 perf_event__handler_t process, bool data_mmap, 2283 unsigned int proc_map_timeout, 2284 unsigned int nr_threads_synthesize) 2285 { 2286 if (target__has_task(target)) 2287 return perf_event__synthesize_thread_map(tool, threads, process, machine, data_mmap, proc_map_timeout); 2288 else if (target__has_cpu(target)) 2289 return perf_event__synthesize_threads(tool, process, 2290 machine, data_mmap, 2291 proc_map_timeout, 2292 nr_threads_synthesize); 2293 /* command specified */ 2294 return 0; 2295 } 2296 2297 pid_t machine__get_current_tid(struct machine *machine, int cpu) 2298 { 2299 if (cpu < 0 || cpu >= MAX_NR_CPUS || !machine->current_tid) 2300 return -1; 2301 2302 return machine->current_tid[cpu]; 2303 } 2304 2305 int machine__set_current_tid(struct machine *machine, int cpu, pid_t pid, 2306 pid_t tid) 2307 { 2308 struct thread *thread; 2309 2310 if (cpu < 0) 2311 return -EINVAL; 2312 2313 if (!machine->current_tid) { 2314 int i; 2315 2316 machine->current_tid = calloc(MAX_NR_CPUS, sizeof(pid_t)); 2317 if (!machine->current_tid) 2318 return -ENOMEM; 2319 for (i = 0; i < MAX_NR_CPUS; i++) 2320 machine->current_tid[i] = -1; 2321 } 2322 2323 if (cpu >= MAX_NR_CPUS) { 2324 pr_err("Requested CPU %d too large. ", cpu); 2325 pr_err("Consider raising MAX_NR_CPUS\n"); 2326 return -EINVAL; 2327 } 2328 2329 machine->current_tid[cpu] = tid; 2330 2331 thread = machine__findnew_thread(machine, pid, tid); 2332 if (!thread) 2333 return -ENOMEM; 2334 2335 thread->cpu = cpu; 2336 thread__put(thread); 2337 2338 return 0; 2339 } 2340 2341 int machine__get_kernel_start(struct machine *machine) 2342 { 2343 struct map *map = machine__kernel_map(machine); 2344 int err = 0; 2345 2346 /* 2347 * The only addresses above 2^63 are kernel addresses of a 64-bit 2348 * kernel. Note that addresses are unsigned so that on a 32-bit system 2349 * all addresses including kernel addresses are less than 2^32. In 2350 * that case (32-bit system), if the kernel mapping is unknown, all 2351 * addresses will be assumed to be in user space - see 2352 * machine__kernel_ip(). 2353 */ 2354 machine->kernel_start = 1ULL << 63; 2355 if (map) { 2356 err = map__load(map); 2357 if (!err) 2358 machine->kernel_start = map->start; 2359 } 2360 return err; 2361 } 2362 2363 struct dso *machine__findnew_dso(struct machine *machine, const char *filename) 2364 { 2365 return dsos__findnew(&machine->dsos, filename); 2366 } 2367 2368 char *machine__resolve_kernel_addr(void *vmachine, unsigned long long *addrp, char **modp) 2369 { 2370 struct machine *machine = vmachine; 2371 struct map *map; 2372 struct symbol *sym = map_groups__find_symbol(&machine->kmaps, MAP__FUNCTION, *addrp, &map); 2373 2374 if (sym == NULL) 2375 return NULL; 2376 2377 *modp = __map__is_kmodule(map) ? (char *)map->dso->short_name : NULL; 2378 *addrp = map->unmap_ip(map, sym->start); 2379 return sym->name; 2380 } 2381