1 // SPDX-License-Identifier: GPL-2.0 2 #include <dirent.h> 3 #include <errno.h> 4 #include <inttypes.h> 5 #include <regex.h> 6 #include "callchain.h" 7 #include "debug.h" 8 #include "event.h" 9 #include "evsel.h" 10 #include "hist.h" 11 #include "machine.h" 12 #include "map.h" 13 #include "sort.h" 14 #include "strlist.h" 15 #include "thread.h" 16 #include "vdso.h" 17 #include <stdbool.h> 18 #include <sys/types.h> 19 #include <sys/stat.h> 20 #include <unistd.h> 21 #include "unwind.h" 22 #include "linux/hash.h" 23 #include "asm/bug.h" 24 25 #include "sane_ctype.h" 26 #include <symbol/kallsyms.h> 27 28 static void __machine__remove_thread(struct machine *machine, struct thread *th, bool lock); 29 30 static void dsos__init(struct dsos *dsos) 31 { 32 INIT_LIST_HEAD(&dsos->head); 33 dsos->root = RB_ROOT; 34 init_rwsem(&dsos->lock); 35 } 36 37 static void machine__threads_init(struct machine *machine) 38 { 39 int i; 40 41 for (i = 0; i < THREADS__TABLE_SIZE; i++) { 42 struct threads *threads = &machine->threads[i]; 43 threads->entries = RB_ROOT; 44 init_rwsem(&threads->lock); 45 threads->nr = 0; 46 INIT_LIST_HEAD(&threads->dead); 47 threads->last_match = NULL; 48 } 49 } 50 51 static int machine__set_mmap_name(struct machine *machine) 52 { 53 if (machine__is_host(machine)) 54 machine->mmap_name = strdup("[kernel.kallsyms]"); 55 else if (machine__is_default_guest(machine)) 56 machine->mmap_name = strdup("[guest.kernel.kallsyms]"); 57 else if (asprintf(&machine->mmap_name, "[guest.kernel.kallsyms.%d]", 58 machine->pid) < 0) 59 machine->mmap_name = NULL; 60 61 return machine->mmap_name ? 0 : -ENOMEM; 62 } 63 64 int machine__init(struct machine *machine, const char *root_dir, pid_t pid) 65 { 66 int err = -ENOMEM; 67 68 memset(machine, 0, sizeof(*machine)); 69 map_groups__init(&machine->kmaps, machine); 70 RB_CLEAR_NODE(&machine->rb_node); 71 dsos__init(&machine->dsos); 72 73 machine__threads_init(machine); 74 75 machine->vdso_info = NULL; 76 machine->env = NULL; 77 78 machine->pid = pid; 79 80 machine->id_hdr_size = 0; 81 machine->kptr_restrict_warned = false; 82 machine->comm_exec = false; 83 machine->kernel_start = 0; 84 85 memset(machine->vmlinux_maps, 0, sizeof(machine->vmlinux_maps)); 86 87 machine->root_dir = strdup(root_dir); 88 if (machine->root_dir == NULL) 89 return -ENOMEM; 90 91 if (machine__set_mmap_name(machine)) 92 goto out; 93 94 if (pid != HOST_KERNEL_ID) { 95 struct thread *thread = machine__findnew_thread(machine, -1, 96 pid); 97 char comm[64]; 98 99 if (thread == NULL) 100 goto out; 101 102 snprintf(comm, sizeof(comm), "[guest/%d]", pid); 103 thread__set_comm(thread, comm, 0); 104 thread__put(thread); 105 } 106 107 machine->current_tid = NULL; 108 err = 0; 109 110 out: 111 if (err) { 112 zfree(&machine->root_dir); 113 zfree(&machine->mmap_name); 114 } 115 return 0; 116 } 117 118 struct machine *machine__new_host(void) 119 { 120 struct machine *machine = malloc(sizeof(*machine)); 121 122 if (machine != NULL) { 123 machine__init(machine, "", HOST_KERNEL_ID); 124 125 if (machine__create_kernel_maps(machine) < 0) 126 goto out_delete; 127 } 128 129 return machine; 130 out_delete: 131 free(machine); 132 return NULL; 133 } 134 135 struct machine *machine__new_kallsyms(void) 136 { 137 struct machine *machine = machine__new_host(); 138 /* 139 * FIXME: 140 * 1) MAP__FUNCTION will go away when we stop loading separate maps for 141 * functions and data objects. 142 * 2) We should switch to machine__load_kallsyms(), i.e. not explicitely 143 * ask for not using the kcore parsing code, once this one is fixed 144 * to create a map per module. 145 */ 146 if (machine && machine__load_kallsyms(machine, "/proc/kallsyms", MAP__FUNCTION) <= 0) { 147 machine__delete(machine); 148 machine = NULL; 149 } 150 151 return machine; 152 } 153 154 static void dsos__purge(struct dsos *dsos) 155 { 156 struct dso *pos, *n; 157 158 down_write(&dsos->lock); 159 160 list_for_each_entry_safe(pos, n, &dsos->head, node) { 161 RB_CLEAR_NODE(&pos->rb_node); 162 pos->root = NULL; 163 list_del_init(&pos->node); 164 dso__put(pos); 165 } 166 167 up_write(&dsos->lock); 168 } 169 170 static void dsos__exit(struct dsos *dsos) 171 { 172 dsos__purge(dsos); 173 exit_rwsem(&dsos->lock); 174 } 175 176 void machine__delete_threads(struct machine *machine) 177 { 178 struct rb_node *nd; 179 int i; 180 181 for (i = 0; i < THREADS__TABLE_SIZE; i++) { 182 struct threads *threads = &machine->threads[i]; 183 down_write(&threads->lock); 184 nd = rb_first(&threads->entries); 185 while (nd) { 186 struct thread *t = rb_entry(nd, struct thread, rb_node); 187 188 nd = rb_next(nd); 189 __machine__remove_thread(machine, t, false); 190 } 191 up_write(&threads->lock); 192 } 193 } 194 195 void machine__exit(struct machine *machine) 196 { 197 int i; 198 199 if (machine == NULL) 200 return; 201 202 machine__destroy_kernel_maps(machine); 203 map_groups__exit(&machine->kmaps); 204 dsos__exit(&machine->dsos); 205 machine__exit_vdso(machine); 206 zfree(&machine->root_dir); 207 zfree(&machine->mmap_name); 208 zfree(&machine->current_tid); 209 210 for (i = 0; i < THREADS__TABLE_SIZE; i++) { 211 struct threads *threads = &machine->threads[i]; 212 exit_rwsem(&threads->lock); 213 } 214 } 215 216 void machine__delete(struct machine *machine) 217 { 218 if (machine) { 219 machine__exit(machine); 220 free(machine); 221 } 222 } 223 224 void machines__init(struct machines *machines) 225 { 226 machine__init(&machines->host, "", HOST_KERNEL_ID); 227 machines->guests = RB_ROOT; 228 } 229 230 void machines__exit(struct machines *machines) 231 { 232 machine__exit(&machines->host); 233 /* XXX exit guest */ 234 } 235 236 struct machine *machines__add(struct machines *machines, pid_t pid, 237 const char *root_dir) 238 { 239 struct rb_node **p = &machines->guests.rb_node; 240 struct rb_node *parent = NULL; 241 struct machine *pos, *machine = malloc(sizeof(*machine)); 242 243 if (machine == NULL) 244 return NULL; 245 246 if (machine__init(machine, root_dir, pid) != 0) { 247 free(machine); 248 return NULL; 249 } 250 251 while (*p != NULL) { 252 parent = *p; 253 pos = rb_entry(parent, struct machine, rb_node); 254 if (pid < pos->pid) 255 p = &(*p)->rb_left; 256 else 257 p = &(*p)->rb_right; 258 } 259 260 rb_link_node(&machine->rb_node, parent, p); 261 rb_insert_color(&machine->rb_node, &machines->guests); 262 263 return machine; 264 } 265 266 void machines__set_comm_exec(struct machines *machines, bool comm_exec) 267 { 268 struct rb_node *nd; 269 270 machines->host.comm_exec = comm_exec; 271 272 for (nd = rb_first(&machines->guests); nd; nd = rb_next(nd)) { 273 struct machine *machine = rb_entry(nd, struct machine, rb_node); 274 275 machine->comm_exec = comm_exec; 276 } 277 } 278 279 struct machine *machines__find(struct machines *machines, pid_t pid) 280 { 281 struct rb_node **p = &machines->guests.rb_node; 282 struct rb_node *parent = NULL; 283 struct machine *machine; 284 struct machine *default_machine = NULL; 285 286 if (pid == HOST_KERNEL_ID) 287 return &machines->host; 288 289 while (*p != NULL) { 290 parent = *p; 291 machine = rb_entry(parent, struct machine, rb_node); 292 if (pid < machine->pid) 293 p = &(*p)->rb_left; 294 else if (pid > machine->pid) 295 p = &(*p)->rb_right; 296 else 297 return machine; 298 if (!machine->pid) 299 default_machine = machine; 300 } 301 302 return default_machine; 303 } 304 305 struct machine *machines__findnew(struct machines *machines, pid_t pid) 306 { 307 char path[PATH_MAX]; 308 const char *root_dir = ""; 309 struct machine *machine = machines__find(machines, pid); 310 311 if (machine && (machine->pid == pid)) 312 goto out; 313 314 if ((pid != HOST_KERNEL_ID) && 315 (pid != DEFAULT_GUEST_KERNEL_ID) && 316 (symbol_conf.guestmount)) { 317 sprintf(path, "%s/%d", symbol_conf.guestmount, pid); 318 if (access(path, R_OK)) { 319 static struct strlist *seen; 320 321 if (!seen) 322 seen = strlist__new(NULL, NULL); 323 324 if (!strlist__has_entry(seen, path)) { 325 pr_err("Can't access file %s\n", path); 326 strlist__add(seen, path); 327 } 328 machine = NULL; 329 goto out; 330 } 331 root_dir = path; 332 } 333 334 machine = machines__add(machines, pid, root_dir); 335 out: 336 return machine; 337 } 338 339 void machines__process_guests(struct machines *machines, 340 machine__process_t process, void *data) 341 { 342 struct rb_node *nd; 343 344 for (nd = rb_first(&machines->guests); nd; nd = rb_next(nd)) { 345 struct machine *pos = rb_entry(nd, struct machine, rb_node); 346 process(pos, data); 347 } 348 } 349 350 void machines__set_id_hdr_size(struct machines *machines, u16 id_hdr_size) 351 { 352 struct rb_node *node; 353 struct machine *machine; 354 355 machines->host.id_hdr_size = id_hdr_size; 356 357 for (node = rb_first(&machines->guests); node; node = rb_next(node)) { 358 machine = rb_entry(node, struct machine, rb_node); 359 machine->id_hdr_size = id_hdr_size; 360 } 361 362 return; 363 } 364 365 static void machine__update_thread_pid(struct machine *machine, 366 struct thread *th, pid_t pid) 367 { 368 struct thread *leader; 369 370 if (pid == th->pid_ || pid == -1 || th->pid_ != -1) 371 return; 372 373 th->pid_ = pid; 374 375 if (th->pid_ == th->tid) 376 return; 377 378 leader = __machine__findnew_thread(machine, th->pid_, th->pid_); 379 if (!leader) 380 goto out_err; 381 382 if (!leader->mg) 383 leader->mg = map_groups__new(machine); 384 385 if (!leader->mg) 386 goto out_err; 387 388 if (th->mg == leader->mg) 389 return; 390 391 if (th->mg) { 392 /* 393 * Maps are created from MMAP events which provide the pid and 394 * tid. Consequently there never should be any maps on a thread 395 * with an unknown pid. Just print an error if there are. 396 */ 397 if (!map_groups__empty(th->mg)) 398 pr_err("Discarding thread maps for %d:%d\n", 399 th->pid_, th->tid); 400 map_groups__put(th->mg); 401 } 402 403 th->mg = map_groups__get(leader->mg); 404 out_put: 405 thread__put(leader); 406 return; 407 out_err: 408 pr_err("Failed to join map groups for %d:%d\n", th->pid_, th->tid); 409 goto out_put; 410 } 411 412 /* 413 * Caller must eventually drop thread->refcnt returned with a successful 414 * lookup/new thread inserted. 415 */ 416 static struct thread *____machine__findnew_thread(struct machine *machine, 417 struct threads *threads, 418 pid_t pid, pid_t tid, 419 bool create) 420 { 421 struct rb_node **p = &threads->entries.rb_node; 422 struct rb_node *parent = NULL; 423 struct thread *th; 424 425 /* 426 * Front-end cache - TID lookups come in blocks, 427 * so most of the time we dont have to look up 428 * the full rbtree: 429 */ 430 th = threads->last_match; 431 if (th != NULL) { 432 if (th->tid == tid) { 433 machine__update_thread_pid(machine, th, pid); 434 return thread__get(th); 435 } 436 437 threads->last_match = NULL; 438 } 439 440 while (*p != NULL) { 441 parent = *p; 442 th = rb_entry(parent, struct thread, rb_node); 443 444 if (th->tid == tid) { 445 threads->last_match = th; 446 machine__update_thread_pid(machine, th, pid); 447 return thread__get(th); 448 } 449 450 if (tid < th->tid) 451 p = &(*p)->rb_left; 452 else 453 p = &(*p)->rb_right; 454 } 455 456 if (!create) 457 return NULL; 458 459 th = thread__new(pid, tid); 460 if (th != NULL) { 461 rb_link_node(&th->rb_node, parent, p); 462 rb_insert_color(&th->rb_node, &threads->entries); 463 464 /* 465 * We have to initialize map_groups separately 466 * after rb tree is updated. 467 * 468 * The reason is that we call machine__findnew_thread 469 * within thread__init_map_groups to find the thread 470 * leader and that would screwed the rb tree. 471 */ 472 if (thread__init_map_groups(th, machine)) { 473 rb_erase_init(&th->rb_node, &threads->entries); 474 RB_CLEAR_NODE(&th->rb_node); 475 thread__put(th); 476 return NULL; 477 } 478 /* 479 * It is now in the rbtree, get a ref 480 */ 481 thread__get(th); 482 threads->last_match = th; 483 ++threads->nr; 484 } 485 486 return th; 487 } 488 489 struct thread *__machine__findnew_thread(struct machine *machine, pid_t pid, pid_t tid) 490 { 491 return ____machine__findnew_thread(machine, machine__threads(machine, tid), pid, tid, true); 492 } 493 494 struct thread *machine__findnew_thread(struct machine *machine, pid_t pid, 495 pid_t tid) 496 { 497 struct threads *threads = machine__threads(machine, tid); 498 struct thread *th; 499 500 down_write(&threads->lock); 501 th = __machine__findnew_thread(machine, pid, tid); 502 up_write(&threads->lock); 503 return th; 504 } 505 506 struct thread *machine__find_thread(struct machine *machine, pid_t pid, 507 pid_t tid) 508 { 509 struct threads *threads = machine__threads(machine, tid); 510 struct thread *th; 511 512 down_read(&threads->lock); 513 th = ____machine__findnew_thread(machine, threads, pid, tid, false); 514 up_read(&threads->lock); 515 return th; 516 } 517 518 struct comm *machine__thread_exec_comm(struct machine *machine, 519 struct thread *thread) 520 { 521 if (machine->comm_exec) 522 return thread__exec_comm(thread); 523 else 524 return thread__comm(thread); 525 } 526 527 int machine__process_comm_event(struct machine *machine, union perf_event *event, 528 struct perf_sample *sample) 529 { 530 struct thread *thread = machine__findnew_thread(machine, 531 event->comm.pid, 532 event->comm.tid); 533 bool exec = event->header.misc & PERF_RECORD_MISC_COMM_EXEC; 534 int err = 0; 535 536 if (exec) 537 machine->comm_exec = true; 538 539 if (dump_trace) 540 perf_event__fprintf_comm(event, stdout); 541 542 if (thread == NULL || 543 __thread__set_comm(thread, event->comm.comm, sample->time, exec)) { 544 dump_printf("problem processing PERF_RECORD_COMM, skipping event.\n"); 545 err = -1; 546 } 547 548 thread__put(thread); 549 550 return err; 551 } 552 553 int machine__process_namespaces_event(struct machine *machine __maybe_unused, 554 union perf_event *event, 555 struct perf_sample *sample __maybe_unused) 556 { 557 struct thread *thread = machine__findnew_thread(machine, 558 event->namespaces.pid, 559 event->namespaces.tid); 560 int err = 0; 561 562 WARN_ONCE(event->namespaces.nr_namespaces > NR_NAMESPACES, 563 "\nWARNING: kernel seems to support more namespaces than perf" 564 " tool.\nTry updating the perf tool..\n\n"); 565 566 WARN_ONCE(event->namespaces.nr_namespaces < NR_NAMESPACES, 567 "\nWARNING: perf tool seems to support more namespaces than" 568 " the kernel.\nTry updating the kernel..\n\n"); 569 570 if (dump_trace) 571 perf_event__fprintf_namespaces(event, stdout); 572 573 if (thread == NULL || 574 thread__set_namespaces(thread, sample->time, &event->namespaces)) { 575 dump_printf("problem processing PERF_RECORD_NAMESPACES, skipping event.\n"); 576 err = -1; 577 } 578 579 thread__put(thread); 580 581 return err; 582 } 583 584 int machine__process_lost_event(struct machine *machine __maybe_unused, 585 union perf_event *event, struct perf_sample *sample __maybe_unused) 586 { 587 dump_printf(": id:%" PRIu64 ": lost:%" PRIu64 "\n", 588 event->lost.id, event->lost.lost); 589 return 0; 590 } 591 592 int machine__process_lost_samples_event(struct machine *machine __maybe_unused, 593 union perf_event *event, struct perf_sample *sample) 594 { 595 dump_printf(": id:%" PRIu64 ": lost samples :%" PRIu64 "\n", 596 sample->id, event->lost_samples.lost); 597 return 0; 598 } 599 600 static struct dso *machine__findnew_module_dso(struct machine *machine, 601 struct kmod_path *m, 602 const char *filename) 603 { 604 struct dso *dso; 605 606 down_write(&machine->dsos.lock); 607 608 dso = __dsos__find(&machine->dsos, m->name, true); 609 if (!dso) { 610 dso = __dsos__addnew(&machine->dsos, m->name); 611 if (dso == NULL) 612 goto out_unlock; 613 614 dso__set_module_info(dso, m, machine); 615 dso__set_long_name(dso, strdup(filename), true); 616 } 617 618 dso__get(dso); 619 out_unlock: 620 up_write(&machine->dsos.lock); 621 return dso; 622 } 623 624 int machine__process_aux_event(struct machine *machine __maybe_unused, 625 union perf_event *event) 626 { 627 if (dump_trace) 628 perf_event__fprintf_aux(event, stdout); 629 return 0; 630 } 631 632 int machine__process_itrace_start_event(struct machine *machine __maybe_unused, 633 union perf_event *event) 634 { 635 if (dump_trace) 636 perf_event__fprintf_itrace_start(event, stdout); 637 return 0; 638 } 639 640 int machine__process_switch_event(struct machine *machine __maybe_unused, 641 union perf_event *event) 642 { 643 if (dump_trace) 644 perf_event__fprintf_switch(event, stdout); 645 return 0; 646 } 647 648 static void dso__adjust_kmod_long_name(struct dso *dso, const char *filename) 649 { 650 const char *dup_filename; 651 652 if (!filename || !dso || !dso->long_name) 653 return; 654 if (dso->long_name[0] != '[') 655 return; 656 if (!strchr(filename, '/')) 657 return; 658 659 dup_filename = strdup(filename); 660 if (!dup_filename) 661 return; 662 663 dso__set_long_name(dso, dup_filename, true); 664 } 665 666 struct map *machine__findnew_module_map(struct machine *machine, u64 start, 667 const char *filename) 668 { 669 struct map *map = NULL; 670 struct dso *dso = NULL; 671 struct kmod_path m; 672 673 if (kmod_path__parse_name(&m, filename)) 674 return NULL; 675 676 map = map_groups__find_by_name(&machine->kmaps, MAP__FUNCTION, 677 m.name); 678 if (map) { 679 /* 680 * If the map's dso is an offline module, give dso__load() 681 * a chance to find the file path of that module by fixing 682 * long_name. 683 */ 684 dso__adjust_kmod_long_name(map->dso, filename); 685 goto out; 686 } 687 688 dso = machine__findnew_module_dso(machine, &m, filename); 689 if (dso == NULL) 690 goto out; 691 692 map = map__new2(start, dso, MAP__FUNCTION); 693 if (map == NULL) 694 goto out; 695 696 map_groups__insert(&machine->kmaps, map); 697 698 /* Put the map here because map_groups__insert alread got it */ 699 map__put(map); 700 out: 701 /* put the dso here, corresponding to machine__findnew_module_dso */ 702 dso__put(dso); 703 free(m.name); 704 return map; 705 } 706 707 size_t machines__fprintf_dsos(struct machines *machines, FILE *fp) 708 { 709 struct rb_node *nd; 710 size_t ret = __dsos__fprintf(&machines->host.dsos.head, fp); 711 712 for (nd = rb_first(&machines->guests); nd; nd = rb_next(nd)) { 713 struct machine *pos = rb_entry(nd, struct machine, rb_node); 714 ret += __dsos__fprintf(&pos->dsos.head, fp); 715 } 716 717 return ret; 718 } 719 720 size_t machine__fprintf_dsos_buildid(struct machine *m, FILE *fp, 721 bool (skip)(struct dso *dso, int parm), int parm) 722 { 723 return __dsos__fprintf_buildid(&m->dsos.head, fp, skip, parm); 724 } 725 726 size_t machines__fprintf_dsos_buildid(struct machines *machines, FILE *fp, 727 bool (skip)(struct dso *dso, int parm), int parm) 728 { 729 struct rb_node *nd; 730 size_t ret = machine__fprintf_dsos_buildid(&machines->host, fp, skip, parm); 731 732 for (nd = rb_first(&machines->guests); nd; nd = rb_next(nd)) { 733 struct machine *pos = rb_entry(nd, struct machine, rb_node); 734 ret += machine__fprintf_dsos_buildid(pos, fp, skip, parm); 735 } 736 return ret; 737 } 738 739 size_t machine__fprintf_vmlinux_path(struct machine *machine, FILE *fp) 740 { 741 int i; 742 size_t printed = 0; 743 struct dso *kdso = machine__kernel_map(machine)->dso; 744 745 if (kdso->has_build_id) { 746 char filename[PATH_MAX]; 747 if (dso__build_id_filename(kdso, filename, sizeof(filename), 748 false)) 749 printed += fprintf(fp, "[0] %s\n", filename); 750 } 751 752 for (i = 0; i < vmlinux_path__nr_entries; ++i) 753 printed += fprintf(fp, "[%d] %s\n", 754 i + kdso->has_build_id, vmlinux_path[i]); 755 756 return printed; 757 } 758 759 size_t machine__fprintf(struct machine *machine, FILE *fp) 760 { 761 struct rb_node *nd; 762 size_t ret; 763 int i; 764 765 for (i = 0; i < THREADS__TABLE_SIZE; i++) { 766 struct threads *threads = &machine->threads[i]; 767 768 down_read(&threads->lock); 769 770 ret = fprintf(fp, "Threads: %u\n", threads->nr); 771 772 for (nd = rb_first(&threads->entries); nd; nd = rb_next(nd)) { 773 struct thread *pos = rb_entry(nd, struct thread, rb_node); 774 775 ret += thread__fprintf(pos, fp); 776 } 777 778 up_read(&threads->lock); 779 } 780 return ret; 781 } 782 783 static struct dso *machine__get_kernel(struct machine *machine) 784 { 785 const char *vmlinux_name = machine->mmap_name; 786 struct dso *kernel; 787 788 if (machine__is_host(machine)) { 789 if (symbol_conf.vmlinux_name) 790 vmlinux_name = symbol_conf.vmlinux_name; 791 792 kernel = machine__findnew_kernel(machine, vmlinux_name, 793 "[kernel]", DSO_TYPE_KERNEL); 794 } else { 795 if (symbol_conf.default_guest_vmlinux_name) 796 vmlinux_name = symbol_conf.default_guest_vmlinux_name; 797 798 kernel = machine__findnew_kernel(machine, vmlinux_name, 799 "[guest.kernel]", 800 DSO_TYPE_GUEST_KERNEL); 801 } 802 803 if (kernel != NULL && (!kernel->has_build_id)) 804 dso__read_running_kernel_build_id(kernel, machine); 805 806 return kernel; 807 } 808 809 struct process_args { 810 u64 start; 811 }; 812 813 static void machine__get_kallsyms_filename(struct machine *machine, char *buf, 814 size_t bufsz) 815 { 816 if (machine__is_default_guest(machine)) 817 scnprintf(buf, bufsz, "%s", symbol_conf.default_guest_kallsyms); 818 else 819 scnprintf(buf, bufsz, "%s/proc/kallsyms", machine->root_dir); 820 } 821 822 const char *ref_reloc_sym_names[] = {"_text", "_stext", NULL}; 823 824 /* Figure out the start address of kernel map from /proc/kallsyms. 825 * Returns the name of the start symbol in *symbol_name. Pass in NULL as 826 * symbol_name if it's not that important. 827 */ 828 static int machine__get_running_kernel_start(struct machine *machine, 829 const char **symbol_name, u64 *start) 830 { 831 char filename[PATH_MAX]; 832 int i, err = -1; 833 const char *name; 834 u64 addr = 0; 835 836 machine__get_kallsyms_filename(machine, filename, PATH_MAX); 837 838 if (symbol__restricted_filename(filename, "/proc/kallsyms")) 839 return 0; 840 841 for (i = 0; (name = ref_reloc_sym_names[i]) != NULL; i++) { 842 err = kallsyms__get_function_start(filename, name, &addr); 843 if (!err) 844 break; 845 } 846 847 if (err) 848 return -1; 849 850 if (symbol_name) 851 *symbol_name = name; 852 853 *start = addr; 854 return 0; 855 } 856 857 static int 858 __machine__create_kernel_maps(struct machine *machine, struct dso *kernel) 859 { 860 int type; 861 862 /* In case of renewal the kernel map, destroy previous one */ 863 machine__destroy_kernel_maps(machine); 864 865 for (type = 0; type < MAP__NR_TYPES; ++type) { 866 struct kmap *kmap; 867 struct map *map; 868 869 machine->vmlinux_maps[type] = map__new2(0, kernel, type); 870 if (machine->vmlinux_maps[type] == NULL) 871 return -1; 872 873 machine->vmlinux_maps[type]->map_ip = 874 machine->vmlinux_maps[type]->unmap_ip = 875 identity__map_ip; 876 map = __machine__kernel_map(machine, type); 877 kmap = map__kmap(map); 878 if (!kmap) 879 return -1; 880 881 kmap->kmaps = &machine->kmaps; 882 map_groups__insert(&machine->kmaps, map); 883 } 884 885 return 0; 886 } 887 888 void machine__destroy_kernel_maps(struct machine *machine) 889 { 890 int type; 891 892 for (type = 0; type < MAP__NR_TYPES; ++type) { 893 struct kmap *kmap; 894 struct map *map = __machine__kernel_map(machine, type); 895 896 if (map == NULL) 897 continue; 898 899 kmap = map__kmap(map); 900 map_groups__remove(&machine->kmaps, map); 901 if (kmap && kmap->ref_reloc_sym) { 902 /* 903 * ref_reloc_sym is shared among all maps, so free just 904 * on one of them. 905 */ 906 if (type == MAP__FUNCTION) { 907 zfree((char **)&kmap->ref_reloc_sym->name); 908 zfree(&kmap->ref_reloc_sym); 909 } else 910 kmap->ref_reloc_sym = NULL; 911 } 912 913 map__put(machine->vmlinux_maps[type]); 914 machine->vmlinux_maps[type] = NULL; 915 } 916 } 917 918 int machines__create_guest_kernel_maps(struct machines *machines) 919 { 920 int ret = 0; 921 struct dirent **namelist = NULL; 922 int i, items = 0; 923 char path[PATH_MAX]; 924 pid_t pid; 925 char *endp; 926 927 if (symbol_conf.default_guest_vmlinux_name || 928 symbol_conf.default_guest_modules || 929 symbol_conf.default_guest_kallsyms) { 930 machines__create_kernel_maps(machines, DEFAULT_GUEST_KERNEL_ID); 931 } 932 933 if (symbol_conf.guestmount) { 934 items = scandir(symbol_conf.guestmount, &namelist, NULL, NULL); 935 if (items <= 0) 936 return -ENOENT; 937 for (i = 0; i < items; i++) { 938 if (!isdigit(namelist[i]->d_name[0])) { 939 /* Filter out . and .. */ 940 continue; 941 } 942 pid = (pid_t)strtol(namelist[i]->d_name, &endp, 10); 943 if ((*endp != '\0') || 944 (endp == namelist[i]->d_name) || 945 (errno == ERANGE)) { 946 pr_debug("invalid directory (%s). Skipping.\n", 947 namelist[i]->d_name); 948 continue; 949 } 950 sprintf(path, "%s/%s/proc/kallsyms", 951 symbol_conf.guestmount, 952 namelist[i]->d_name); 953 ret = access(path, R_OK); 954 if (ret) { 955 pr_debug("Can't access file %s\n", path); 956 goto failure; 957 } 958 machines__create_kernel_maps(machines, pid); 959 } 960 failure: 961 free(namelist); 962 } 963 964 return ret; 965 } 966 967 void machines__destroy_kernel_maps(struct machines *machines) 968 { 969 struct rb_node *next = rb_first(&machines->guests); 970 971 machine__destroy_kernel_maps(&machines->host); 972 973 while (next) { 974 struct machine *pos = rb_entry(next, struct machine, rb_node); 975 976 next = rb_next(&pos->rb_node); 977 rb_erase(&pos->rb_node, &machines->guests); 978 machine__delete(pos); 979 } 980 } 981 982 int machines__create_kernel_maps(struct machines *machines, pid_t pid) 983 { 984 struct machine *machine = machines__findnew(machines, pid); 985 986 if (machine == NULL) 987 return -1; 988 989 return machine__create_kernel_maps(machine); 990 } 991 992 int machine__load_kallsyms(struct machine *machine, const char *filename, 993 enum map_type type) 994 { 995 struct map *map = machine__kernel_map(machine); 996 int ret = __dso__load_kallsyms(map->dso, filename, map, true); 997 998 if (ret > 0) { 999 dso__set_loaded(map->dso, type); 1000 /* 1001 * Since /proc/kallsyms will have multiple sessions for the 1002 * kernel, with modules between them, fixup the end of all 1003 * sections. 1004 */ 1005 __map_groups__fixup_end(&machine->kmaps, type); 1006 } 1007 1008 return ret; 1009 } 1010 1011 int machine__load_vmlinux_path(struct machine *machine, enum map_type type) 1012 { 1013 struct map *map = machine__kernel_map(machine); 1014 int ret = dso__load_vmlinux_path(map->dso, map); 1015 1016 if (ret > 0) 1017 dso__set_loaded(map->dso, type); 1018 1019 return ret; 1020 } 1021 1022 static char *get_kernel_version(const char *root_dir) 1023 { 1024 char version[PATH_MAX]; 1025 FILE *file; 1026 char *name, *tmp; 1027 const char *prefix = "Linux version "; 1028 1029 sprintf(version, "%s/proc/version", root_dir); 1030 file = fopen(version, "r"); 1031 if (!file) 1032 return NULL; 1033 1034 version[0] = '\0'; 1035 tmp = fgets(version, sizeof(version), file); 1036 fclose(file); 1037 1038 name = strstr(version, prefix); 1039 if (!name) 1040 return NULL; 1041 name += strlen(prefix); 1042 tmp = strchr(name, ' '); 1043 if (tmp) 1044 *tmp = '\0'; 1045 1046 return strdup(name); 1047 } 1048 1049 static bool is_kmod_dso(struct dso *dso) 1050 { 1051 return dso->symtab_type == DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE || 1052 dso->symtab_type == DSO_BINARY_TYPE__GUEST_KMODULE; 1053 } 1054 1055 static int map_groups__set_module_path(struct map_groups *mg, const char *path, 1056 struct kmod_path *m) 1057 { 1058 struct map *map; 1059 char *long_name; 1060 1061 map = map_groups__find_by_name(mg, MAP__FUNCTION, m->name); 1062 if (map == NULL) 1063 return 0; 1064 1065 long_name = strdup(path); 1066 if (long_name == NULL) 1067 return -ENOMEM; 1068 1069 dso__set_long_name(map->dso, long_name, true); 1070 dso__kernel_module_get_build_id(map->dso, ""); 1071 1072 /* 1073 * Full name could reveal us kmod compression, so 1074 * we need to update the symtab_type if needed. 1075 */ 1076 if (m->comp && is_kmod_dso(map->dso)) 1077 map->dso->symtab_type++; 1078 1079 return 0; 1080 } 1081 1082 static int map_groups__set_modules_path_dir(struct map_groups *mg, 1083 const char *dir_name, int depth) 1084 { 1085 struct dirent *dent; 1086 DIR *dir = opendir(dir_name); 1087 int ret = 0; 1088 1089 if (!dir) { 1090 pr_debug("%s: cannot open %s dir\n", __func__, dir_name); 1091 return -1; 1092 } 1093 1094 while ((dent = readdir(dir)) != NULL) { 1095 char path[PATH_MAX]; 1096 struct stat st; 1097 1098 /*sshfs might return bad dent->d_type, so we have to stat*/ 1099 snprintf(path, sizeof(path), "%s/%s", dir_name, dent->d_name); 1100 if (stat(path, &st)) 1101 continue; 1102 1103 if (S_ISDIR(st.st_mode)) { 1104 if (!strcmp(dent->d_name, ".") || 1105 !strcmp(dent->d_name, "..")) 1106 continue; 1107 1108 /* Do not follow top-level source and build symlinks */ 1109 if (depth == 0) { 1110 if (!strcmp(dent->d_name, "source") || 1111 !strcmp(dent->d_name, "build")) 1112 continue; 1113 } 1114 1115 ret = map_groups__set_modules_path_dir(mg, path, 1116 depth + 1); 1117 if (ret < 0) 1118 goto out; 1119 } else { 1120 struct kmod_path m; 1121 1122 ret = kmod_path__parse_name(&m, dent->d_name); 1123 if (ret) 1124 goto out; 1125 1126 if (m.kmod) 1127 ret = map_groups__set_module_path(mg, path, &m); 1128 1129 free(m.name); 1130 1131 if (ret) 1132 goto out; 1133 } 1134 } 1135 1136 out: 1137 closedir(dir); 1138 return ret; 1139 } 1140 1141 static int machine__set_modules_path(struct machine *machine) 1142 { 1143 char *version; 1144 char modules_path[PATH_MAX]; 1145 1146 version = get_kernel_version(machine->root_dir); 1147 if (!version) 1148 return -1; 1149 1150 snprintf(modules_path, sizeof(modules_path), "%s/lib/modules/%s", 1151 machine->root_dir, version); 1152 free(version); 1153 1154 return map_groups__set_modules_path_dir(&machine->kmaps, modules_path, 0); 1155 } 1156 int __weak arch__fix_module_text_start(u64 *start __maybe_unused, 1157 const char *name __maybe_unused) 1158 { 1159 return 0; 1160 } 1161 1162 static int machine__create_module(void *arg, const char *name, u64 start, 1163 u64 size) 1164 { 1165 struct machine *machine = arg; 1166 struct map *map; 1167 1168 if (arch__fix_module_text_start(&start, name) < 0) 1169 return -1; 1170 1171 map = machine__findnew_module_map(machine, start, name); 1172 if (map == NULL) 1173 return -1; 1174 map->end = start + size; 1175 1176 dso__kernel_module_get_build_id(map->dso, machine->root_dir); 1177 1178 return 0; 1179 } 1180 1181 static int machine__create_modules(struct machine *machine) 1182 { 1183 const char *modules; 1184 char path[PATH_MAX]; 1185 1186 if (machine__is_default_guest(machine)) { 1187 modules = symbol_conf.default_guest_modules; 1188 } else { 1189 snprintf(path, PATH_MAX, "%s/proc/modules", machine->root_dir); 1190 modules = path; 1191 } 1192 1193 if (symbol__restricted_filename(modules, "/proc/modules")) 1194 return -1; 1195 1196 if (modules__parse(modules, machine, machine__create_module)) 1197 return -1; 1198 1199 if (!machine__set_modules_path(machine)) 1200 return 0; 1201 1202 pr_debug("Problems setting modules path maps, continuing anyway...\n"); 1203 1204 return 0; 1205 } 1206 1207 static void machine__set_kernel_mmap(struct machine *machine, 1208 u64 start, u64 end) 1209 { 1210 int i; 1211 1212 for (i = 0; i < MAP__NR_TYPES; i++) { 1213 machine->vmlinux_maps[i]->start = start; 1214 machine->vmlinux_maps[i]->end = end; 1215 1216 /* 1217 * Be a bit paranoid here, some perf.data file came with 1218 * a zero sized synthesized MMAP event for the kernel. 1219 */ 1220 if (start == 0 && end == 0) 1221 machine->vmlinux_maps[i]->end = ~0ULL; 1222 } 1223 } 1224 1225 int machine__create_kernel_maps(struct machine *machine) 1226 { 1227 struct dso *kernel = machine__get_kernel(machine); 1228 const char *name = NULL; 1229 struct map *map; 1230 u64 addr = 0; 1231 int ret; 1232 1233 if (kernel == NULL) 1234 return -1; 1235 1236 ret = __machine__create_kernel_maps(machine, kernel); 1237 dso__put(kernel); 1238 if (ret < 0) 1239 return -1; 1240 1241 if (symbol_conf.use_modules && machine__create_modules(machine) < 0) { 1242 if (machine__is_host(machine)) 1243 pr_debug("Problems creating module maps, " 1244 "continuing anyway...\n"); 1245 else 1246 pr_debug("Problems creating module maps for guest %d, " 1247 "continuing anyway...\n", machine->pid); 1248 } 1249 1250 if (!machine__get_running_kernel_start(machine, &name, &addr)) { 1251 if (name && 1252 maps__set_kallsyms_ref_reloc_sym(machine->vmlinux_maps, name, addr)) { 1253 machine__destroy_kernel_maps(machine); 1254 return -1; 1255 } 1256 1257 /* we have a real start address now, so re-order the kmaps */ 1258 map = machine__kernel_map(machine); 1259 1260 map__get(map); 1261 map_groups__remove(&machine->kmaps, map); 1262 1263 /* assume it's the last in the kmaps */ 1264 machine__set_kernel_mmap(machine, addr, ~0ULL); 1265 1266 map_groups__insert(&machine->kmaps, map); 1267 map__put(map); 1268 } 1269 1270 /* update end address of the kernel map using adjacent module address */ 1271 map = map__next(machine__kernel_map(machine)); 1272 if (map) 1273 machine__set_kernel_mmap(machine, addr, map->start); 1274 1275 return 0; 1276 } 1277 1278 static bool machine__uses_kcore(struct machine *machine) 1279 { 1280 struct dso *dso; 1281 1282 list_for_each_entry(dso, &machine->dsos.head, node) { 1283 if (dso__is_kcore(dso)) 1284 return true; 1285 } 1286 1287 return false; 1288 } 1289 1290 static int machine__process_kernel_mmap_event(struct machine *machine, 1291 union perf_event *event) 1292 { 1293 struct map *map; 1294 enum dso_kernel_type kernel_type; 1295 bool is_kernel_mmap; 1296 1297 /* If we have maps from kcore then we do not need or want any others */ 1298 if (machine__uses_kcore(machine)) 1299 return 0; 1300 1301 if (machine__is_host(machine)) 1302 kernel_type = DSO_TYPE_KERNEL; 1303 else 1304 kernel_type = DSO_TYPE_GUEST_KERNEL; 1305 1306 is_kernel_mmap = memcmp(event->mmap.filename, 1307 machine->mmap_name, 1308 strlen(machine->mmap_name) - 1) == 0; 1309 if (event->mmap.filename[0] == '/' || 1310 (!is_kernel_mmap && event->mmap.filename[0] == '[')) { 1311 map = machine__findnew_module_map(machine, event->mmap.start, 1312 event->mmap.filename); 1313 if (map == NULL) 1314 goto out_problem; 1315 1316 map->end = map->start + event->mmap.len; 1317 } else if (is_kernel_mmap) { 1318 const char *symbol_name = (event->mmap.filename + 1319 strlen(machine->mmap_name)); 1320 /* 1321 * Should be there already, from the build-id table in 1322 * the header. 1323 */ 1324 struct dso *kernel = NULL; 1325 struct dso *dso; 1326 1327 down_read(&machine->dsos.lock); 1328 1329 list_for_each_entry(dso, &machine->dsos.head, node) { 1330 1331 /* 1332 * The cpumode passed to is_kernel_module is not the 1333 * cpumode of *this* event. If we insist on passing 1334 * correct cpumode to is_kernel_module, we should 1335 * record the cpumode when we adding this dso to the 1336 * linked list. 1337 * 1338 * However we don't really need passing correct 1339 * cpumode. We know the correct cpumode must be kernel 1340 * mode (if not, we should not link it onto kernel_dsos 1341 * list). 1342 * 1343 * Therefore, we pass PERF_RECORD_MISC_CPUMODE_UNKNOWN. 1344 * is_kernel_module() treats it as a kernel cpumode. 1345 */ 1346 1347 if (!dso->kernel || 1348 is_kernel_module(dso->long_name, 1349 PERF_RECORD_MISC_CPUMODE_UNKNOWN)) 1350 continue; 1351 1352 1353 kernel = dso; 1354 break; 1355 } 1356 1357 up_read(&machine->dsos.lock); 1358 1359 if (kernel == NULL) 1360 kernel = machine__findnew_dso(machine, machine->mmap_name); 1361 if (kernel == NULL) 1362 goto out_problem; 1363 1364 kernel->kernel = kernel_type; 1365 if (__machine__create_kernel_maps(machine, kernel) < 0) { 1366 dso__put(kernel); 1367 goto out_problem; 1368 } 1369 1370 if (strstr(kernel->long_name, "vmlinux")) 1371 dso__set_short_name(kernel, "[kernel.vmlinux]", false); 1372 1373 machine__set_kernel_mmap(machine, event->mmap.start, 1374 event->mmap.start + event->mmap.len); 1375 1376 /* 1377 * Avoid using a zero address (kptr_restrict) for the ref reloc 1378 * symbol. Effectively having zero here means that at record 1379 * time /proc/sys/kernel/kptr_restrict was non zero. 1380 */ 1381 if (event->mmap.pgoff != 0) { 1382 maps__set_kallsyms_ref_reloc_sym(machine->vmlinux_maps, 1383 symbol_name, 1384 event->mmap.pgoff); 1385 } 1386 1387 if (machine__is_default_guest(machine)) { 1388 /* 1389 * preload dso of guest kernel and modules 1390 */ 1391 dso__load(kernel, machine__kernel_map(machine)); 1392 } 1393 } 1394 return 0; 1395 out_problem: 1396 return -1; 1397 } 1398 1399 int machine__process_mmap2_event(struct machine *machine, 1400 union perf_event *event, 1401 struct perf_sample *sample) 1402 { 1403 struct thread *thread; 1404 struct map *map; 1405 enum map_type type; 1406 int ret = 0; 1407 1408 if (dump_trace) 1409 perf_event__fprintf_mmap2(event, stdout); 1410 1411 if (sample->cpumode == PERF_RECORD_MISC_GUEST_KERNEL || 1412 sample->cpumode == PERF_RECORD_MISC_KERNEL) { 1413 ret = machine__process_kernel_mmap_event(machine, event); 1414 if (ret < 0) 1415 goto out_problem; 1416 return 0; 1417 } 1418 1419 thread = machine__findnew_thread(machine, event->mmap2.pid, 1420 event->mmap2.tid); 1421 if (thread == NULL) 1422 goto out_problem; 1423 1424 if (event->header.misc & PERF_RECORD_MISC_MMAP_DATA) 1425 type = MAP__VARIABLE; 1426 else 1427 type = MAP__FUNCTION; 1428 1429 map = map__new(machine, event->mmap2.start, 1430 event->mmap2.len, event->mmap2.pgoff, 1431 event->mmap2.maj, 1432 event->mmap2.min, event->mmap2.ino, 1433 event->mmap2.ino_generation, 1434 event->mmap2.prot, 1435 event->mmap2.flags, 1436 event->mmap2.filename, type, thread); 1437 1438 if (map == NULL) 1439 goto out_problem_map; 1440 1441 ret = thread__insert_map(thread, map); 1442 if (ret) 1443 goto out_problem_insert; 1444 1445 thread__put(thread); 1446 map__put(map); 1447 return 0; 1448 1449 out_problem_insert: 1450 map__put(map); 1451 out_problem_map: 1452 thread__put(thread); 1453 out_problem: 1454 dump_printf("problem processing PERF_RECORD_MMAP2, skipping event.\n"); 1455 return 0; 1456 } 1457 1458 int machine__process_mmap_event(struct machine *machine, union perf_event *event, 1459 struct perf_sample *sample) 1460 { 1461 struct thread *thread; 1462 struct map *map; 1463 enum map_type type; 1464 int ret = 0; 1465 1466 if (dump_trace) 1467 perf_event__fprintf_mmap(event, stdout); 1468 1469 if (sample->cpumode == PERF_RECORD_MISC_GUEST_KERNEL || 1470 sample->cpumode == PERF_RECORD_MISC_KERNEL) { 1471 ret = machine__process_kernel_mmap_event(machine, event); 1472 if (ret < 0) 1473 goto out_problem; 1474 return 0; 1475 } 1476 1477 thread = machine__findnew_thread(machine, event->mmap.pid, 1478 event->mmap.tid); 1479 if (thread == NULL) 1480 goto out_problem; 1481 1482 if (event->header.misc & PERF_RECORD_MISC_MMAP_DATA) 1483 type = MAP__VARIABLE; 1484 else 1485 type = MAP__FUNCTION; 1486 1487 map = map__new(machine, event->mmap.start, 1488 event->mmap.len, event->mmap.pgoff, 1489 0, 0, 0, 0, 0, 0, 1490 event->mmap.filename, 1491 type, thread); 1492 1493 if (map == NULL) 1494 goto out_problem_map; 1495 1496 ret = thread__insert_map(thread, map); 1497 if (ret) 1498 goto out_problem_insert; 1499 1500 thread__put(thread); 1501 map__put(map); 1502 return 0; 1503 1504 out_problem_insert: 1505 map__put(map); 1506 out_problem_map: 1507 thread__put(thread); 1508 out_problem: 1509 dump_printf("problem processing PERF_RECORD_MMAP, skipping event.\n"); 1510 return 0; 1511 } 1512 1513 static void __machine__remove_thread(struct machine *machine, struct thread *th, bool lock) 1514 { 1515 struct threads *threads = machine__threads(machine, th->tid); 1516 1517 if (threads->last_match == th) 1518 threads->last_match = NULL; 1519 1520 BUG_ON(refcount_read(&th->refcnt) == 0); 1521 if (lock) 1522 down_write(&threads->lock); 1523 rb_erase_init(&th->rb_node, &threads->entries); 1524 RB_CLEAR_NODE(&th->rb_node); 1525 --threads->nr; 1526 /* 1527 * Move it first to the dead_threads list, then drop the reference, 1528 * if this is the last reference, then the thread__delete destructor 1529 * will be called and we will remove it from the dead_threads list. 1530 */ 1531 list_add_tail(&th->node, &threads->dead); 1532 if (lock) 1533 up_write(&threads->lock); 1534 thread__put(th); 1535 } 1536 1537 void machine__remove_thread(struct machine *machine, struct thread *th) 1538 { 1539 return __machine__remove_thread(machine, th, true); 1540 } 1541 1542 int machine__process_fork_event(struct machine *machine, union perf_event *event, 1543 struct perf_sample *sample) 1544 { 1545 struct thread *thread = machine__find_thread(machine, 1546 event->fork.pid, 1547 event->fork.tid); 1548 struct thread *parent = machine__findnew_thread(machine, 1549 event->fork.ppid, 1550 event->fork.ptid); 1551 int err = 0; 1552 1553 if (dump_trace) 1554 perf_event__fprintf_task(event, stdout); 1555 1556 /* 1557 * There may be an existing thread that is not actually the parent, 1558 * either because we are processing events out of order, or because the 1559 * (fork) event that would have removed the thread was lost. Assume the 1560 * latter case and continue on as best we can. 1561 */ 1562 if (parent->pid_ != (pid_t)event->fork.ppid) { 1563 dump_printf("removing erroneous parent thread %d/%d\n", 1564 parent->pid_, parent->tid); 1565 machine__remove_thread(machine, parent); 1566 thread__put(parent); 1567 parent = machine__findnew_thread(machine, event->fork.ppid, 1568 event->fork.ptid); 1569 } 1570 1571 /* if a thread currently exists for the thread id remove it */ 1572 if (thread != NULL) { 1573 machine__remove_thread(machine, thread); 1574 thread__put(thread); 1575 } 1576 1577 thread = machine__findnew_thread(machine, event->fork.pid, 1578 event->fork.tid); 1579 1580 if (thread == NULL || parent == NULL || 1581 thread__fork(thread, parent, sample->time) < 0) { 1582 dump_printf("problem processing PERF_RECORD_FORK, skipping event.\n"); 1583 err = -1; 1584 } 1585 thread__put(thread); 1586 thread__put(parent); 1587 1588 return err; 1589 } 1590 1591 int machine__process_exit_event(struct machine *machine, union perf_event *event, 1592 struct perf_sample *sample __maybe_unused) 1593 { 1594 struct thread *thread = machine__find_thread(machine, 1595 event->fork.pid, 1596 event->fork.tid); 1597 1598 if (dump_trace) 1599 perf_event__fprintf_task(event, stdout); 1600 1601 if (thread != NULL) { 1602 thread__exited(thread); 1603 thread__put(thread); 1604 } 1605 1606 return 0; 1607 } 1608 1609 int machine__process_event(struct machine *machine, union perf_event *event, 1610 struct perf_sample *sample) 1611 { 1612 int ret; 1613 1614 switch (event->header.type) { 1615 case PERF_RECORD_COMM: 1616 ret = machine__process_comm_event(machine, event, sample); break; 1617 case PERF_RECORD_MMAP: 1618 ret = machine__process_mmap_event(machine, event, sample); break; 1619 case PERF_RECORD_NAMESPACES: 1620 ret = machine__process_namespaces_event(machine, event, sample); break; 1621 case PERF_RECORD_MMAP2: 1622 ret = machine__process_mmap2_event(machine, event, sample); break; 1623 case PERF_RECORD_FORK: 1624 ret = machine__process_fork_event(machine, event, sample); break; 1625 case PERF_RECORD_EXIT: 1626 ret = machine__process_exit_event(machine, event, sample); break; 1627 case PERF_RECORD_LOST: 1628 ret = machine__process_lost_event(machine, event, sample); break; 1629 case PERF_RECORD_AUX: 1630 ret = machine__process_aux_event(machine, event); break; 1631 case PERF_RECORD_ITRACE_START: 1632 ret = machine__process_itrace_start_event(machine, event); break; 1633 case PERF_RECORD_LOST_SAMPLES: 1634 ret = machine__process_lost_samples_event(machine, event, sample); break; 1635 case PERF_RECORD_SWITCH: 1636 case PERF_RECORD_SWITCH_CPU_WIDE: 1637 ret = machine__process_switch_event(machine, event); break; 1638 default: 1639 ret = -1; 1640 break; 1641 } 1642 1643 return ret; 1644 } 1645 1646 static bool symbol__match_regex(struct symbol *sym, regex_t *regex) 1647 { 1648 if (!regexec(regex, sym->name, 0, NULL, 0)) 1649 return 1; 1650 return 0; 1651 } 1652 1653 static void ip__resolve_ams(struct thread *thread, 1654 struct addr_map_symbol *ams, 1655 u64 ip) 1656 { 1657 struct addr_location al; 1658 1659 memset(&al, 0, sizeof(al)); 1660 /* 1661 * We cannot use the header.misc hint to determine whether a 1662 * branch stack address is user, kernel, guest, hypervisor. 1663 * Branches may straddle the kernel/user/hypervisor boundaries. 1664 * Thus, we have to try consecutively until we find a match 1665 * or else, the symbol is unknown 1666 */ 1667 thread__find_cpumode_addr_location(thread, MAP__FUNCTION, ip, &al); 1668 1669 ams->addr = ip; 1670 ams->al_addr = al.addr; 1671 ams->sym = al.sym; 1672 ams->map = al.map; 1673 ams->phys_addr = 0; 1674 } 1675 1676 static void ip__resolve_data(struct thread *thread, 1677 u8 m, struct addr_map_symbol *ams, 1678 u64 addr, u64 phys_addr) 1679 { 1680 struct addr_location al; 1681 1682 memset(&al, 0, sizeof(al)); 1683 1684 thread__find_addr_location(thread, m, MAP__VARIABLE, addr, &al); 1685 if (al.map == NULL) { 1686 /* 1687 * some shared data regions have execute bit set which puts 1688 * their mapping in the MAP__FUNCTION type array. 1689 * Check there as a fallback option before dropping the sample. 1690 */ 1691 thread__find_addr_location(thread, m, MAP__FUNCTION, addr, &al); 1692 } 1693 1694 ams->addr = addr; 1695 ams->al_addr = al.addr; 1696 ams->sym = al.sym; 1697 ams->map = al.map; 1698 ams->phys_addr = phys_addr; 1699 } 1700 1701 struct mem_info *sample__resolve_mem(struct perf_sample *sample, 1702 struct addr_location *al) 1703 { 1704 struct mem_info *mi = mem_info__new(); 1705 1706 if (!mi) 1707 return NULL; 1708 1709 ip__resolve_ams(al->thread, &mi->iaddr, sample->ip); 1710 ip__resolve_data(al->thread, al->cpumode, &mi->daddr, 1711 sample->addr, sample->phys_addr); 1712 mi->data_src.val = sample->data_src; 1713 1714 return mi; 1715 } 1716 1717 static char *callchain_srcline(struct map *map, struct symbol *sym, u64 ip) 1718 { 1719 char *srcline = NULL; 1720 1721 if (!map || callchain_param.key == CCKEY_FUNCTION) 1722 return srcline; 1723 1724 srcline = srcline__tree_find(&map->dso->srclines, ip); 1725 if (!srcline) { 1726 bool show_sym = false; 1727 bool show_addr = callchain_param.key == CCKEY_ADDRESS; 1728 1729 srcline = get_srcline(map->dso, map__rip_2objdump(map, ip), 1730 sym, show_sym, show_addr, ip); 1731 srcline__tree_insert(&map->dso->srclines, ip, srcline); 1732 } 1733 1734 return srcline; 1735 } 1736 1737 struct iterations { 1738 int nr_loop_iter; 1739 u64 cycles; 1740 }; 1741 1742 static int add_callchain_ip(struct thread *thread, 1743 struct callchain_cursor *cursor, 1744 struct symbol **parent, 1745 struct addr_location *root_al, 1746 u8 *cpumode, 1747 u64 ip, 1748 bool branch, 1749 struct branch_flags *flags, 1750 struct iterations *iter, 1751 u64 branch_from) 1752 { 1753 struct addr_location al; 1754 int nr_loop_iter = 0; 1755 u64 iter_cycles = 0; 1756 const char *srcline = NULL; 1757 1758 al.filtered = 0; 1759 al.sym = NULL; 1760 if (!cpumode) { 1761 thread__find_cpumode_addr_location(thread, MAP__FUNCTION, 1762 ip, &al); 1763 } else { 1764 if (ip >= PERF_CONTEXT_MAX) { 1765 switch (ip) { 1766 case PERF_CONTEXT_HV: 1767 *cpumode = PERF_RECORD_MISC_HYPERVISOR; 1768 break; 1769 case PERF_CONTEXT_KERNEL: 1770 *cpumode = PERF_RECORD_MISC_KERNEL; 1771 break; 1772 case PERF_CONTEXT_USER: 1773 *cpumode = PERF_RECORD_MISC_USER; 1774 break; 1775 default: 1776 pr_debug("invalid callchain context: " 1777 "%"PRId64"\n", (s64) ip); 1778 /* 1779 * It seems the callchain is corrupted. 1780 * Discard all. 1781 */ 1782 callchain_cursor_reset(cursor); 1783 return 1; 1784 } 1785 return 0; 1786 } 1787 thread__find_addr_location(thread, *cpumode, MAP__FUNCTION, 1788 ip, &al); 1789 } 1790 1791 if (al.sym != NULL) { 1792 if (perf_hpp_list.parent && !*parent && 1793 symbol__match_regex(al.sym, &parent_regex)) 1794 *parent = al.sym; 1795 else if (have_ignore_callees && root_al && 1796 symbol__match_regex(al.sym, &ignore_callees_regex)) { 1797 /* Treat this symbol as the root, 1798 forgetting its callees. */ 1799 *root_al = al; 1800 callchain_cursor_reset(cursor); 1801 } 1802 } 1803 1804 if (symbol_conf.hide_unresolved && al.sym == NULL) 1805 return 0; 1806 1807 if (iter) { 1808 nr_loop_iter = iter->nr_loop_iter; 1809 iter_cycles = iter->cycles; 1810 } 1811 1812 srcline = callchain_srcline(al.map, al.sym, al.addr); 1813 return callchain_cursor_append(cursor, al.addr, al.map, al.sym, 1814 branch, flags, nr_loop_iter, 1815 iter_cycles, branch_from, srcline); 1816 } 1817 1818 struct branch_info *sample__resolve_bstack(struct perf_sample *sample, 1819 struct addr_location *al) 1820 { 1821 unsigned int i; 1822 const struct branch_stack *bs = sample->branch_stack; 1823 struct branch_info *bi = calloc(bs->nr, sizeof(struct branch_info)); 1824 1825 if (!bi) 1826 return NULL; 1827 1828 for (i = 0; i < bs->nr; i++) { 1829 ip__resolve_ams(al->thread, &bi[i].to, bs->entries[i].to); 1830 ip__resolve_ams(al->thread, &bi[i].from, bs->entries[i].from); 1831 bi[i].flags = bs->entries[i].flags; 1832 } 1833 return bi; 1834 } 1835 1836 static void save_iterations(struct iterations *iter, 1837 struct branch_entry *be, int nr) 1838 { 1839 int i; 1840 1841 iter->nr_loop_iter = nr; 1842 iter->cycles = 0; 1843 1844 for (i = 0; i < nr; i++) 1845 iter->cycles += be[i].flags.cycles; 1846 } 1847 1848 #define CHASHSZ 127 1849 #define CHASHBITS 7 1850 #define NO_ENTRY 0xff 1851 1852 #define PERF_MAX_BRANCH_DEPTH 127 1853 1854 /* Remove loops. */ 1855 static int remove_loops(struct branch_entry *l, int nr, 1856 struct iterations *iter) 1857 { 1858 int i, j, off; 1859 unsigned char chash[CHASHSZ]; 1860 1861 memset(chash, NO_ENTRY, sizeof(chash)); 1862 1863 BUG_ON(PERF_MAX_BRANCH_DEPTH > 255); 1864 1865 for (i = 0; i < nr; i++) { 1866 int h = hash_64(l[i].from, CHASHBITS) % CHASHSZ; 1867 1868 /* no collision handling for now */ 1869 if (chash[h] == NO_ENTRY) { 1870 chash[h] = i; 1871 } else if (l[chash[h]].from == l[i].from) { 1872 bool is_loop = true; 1873 /* check if it is a real loop */ 1874 off = 0; 1875 for (j = chash[h]; j < i && i + off < nr; j++, off++) 1876 if (l[j].from != l[i + off].from) { 1877 is_loop = false; 1878 break; 1879 } 1880 if (is_loop) { 1881 j = nr - (i + off); 1882 if (j > 0) { 1883 save_iterations(iter + i + off, 1884 l + i, off); 1885 1886 memmove(iter + i, iter + i + off, 1887 j * sizeof(*iter)); 1888 1889 memmove(l + i, l + i + off, 1890 j * sizeof(*l)); 1891 } 1892 1893 nr -= off; 1894 } 1895 } 1896 } 1897 return nr; 1898 } 1899 1900 /* 1901 * Recolve LBR callstack chain sample 1902 * Return: 1903 * 1 on success get LBR callchain information 1904 * 0 no available LBR callchain information, should try fp 1905 * negative error code on other errors. 1906 */ 1907 static int resolve_lbr_callchain_sample(struct thread *thread, 1908 struct callchain_cursor *cursor, 1909 struct perf_sample *sample, 1910 struct symbol **parent, 1911 struct addr_location *root_al, 1912 int max_stack) 1913 { 1914 struct ip_callchain *chain = sample->callchain; 1915 int chain_nr = min(max_stack, (int)chain->nr), i; 1916 u8 cpumode = PERF_RECORD_MISC_USER; 1917 u64 ip, branch_from = 0; 1918 1919 for (i = 0; i < chain_nr; i++) { 1920 if (chain->ips[i] == PERF_CONTEXT_USER) 1921 break; 1922 } 1923 1924 /* LBR only affects the user callchain */ 1925 if (i != chain_nr) { 1926 struct branch_stack *lbr_stack = sample->branch_stack; 1927 int lbr_nr = lbr_stack->nr, j, k; 1928 bool branch; 1929 struct branch_flags *flags; 1930 /* 1931 * LBR callstack can only get user call chain. 1932 * The mix_chain_nr is kernel call chain 1933 * number plus LBR user call chain number. 1934 * i is kernel call chain number, 1935 * 1 is PERF_CONTEXT_USER, 1936 * lbr_nr + 1 is the user call chain number. 1937 * For details, please refer to the comments 1938 * in callchain__printf 1939 */ 1940 int mix_chain_nr = i + 1 + lbr_nr + 1; 1941 1942 for (j = 0; j < mix_chain_nr; j++) { 1943 int err; 1944 branch = false; 1945 flags = NULL; 1946 1947 if (callchain_param.order == ORDER_CALLEE) { 1948 if (j < i + 1) 1949 ip = chain->ips[j]; 1950 else if (j > i + 1) { 1951 k = j - i - 2; 1952 ip = lbr_stack->entries[k].from; 1953 branch = true; 1954 flags = &lbr_stack->entries[k].flags; 1955 } else { 1956 ip = lbr_stack->entries[0].to; 1957 branch = true; 1958 flags = &lbr_stack->entries[0].flags; 1959 branch_from = 1960 lbr_stack->entries[0].from; 1961 } 1962 } else { 1963 if (j < lbr_nr) { 1964 k = lbr_nr - j - 1; 1965 ip = lbr_stack->entries[k].from; 1966 branch = true; 1967 flags = &lbr_stack->entries[k].flags; 1968 } 1969 else if (j > lbr_nr) 1970 ip = chain->ips[i + 1 - (j - lbr_nr)]; 1971 else { 1972 ip = lbr_stack->entries[0].to; 1973 branch = true; 1974 flags = &lbr_stack->entries[0].flags; 1975 branch_from = 1976 lbr_stack->entries[0].from; 1977 } 1978 } 1979 1980 err = add_callchain_ip(thread, cursor, parent, 1981 root_al, &cpumode, ip, 1982 branch, flags, NULL, 1983 branch_from); 1984 if (err) 1985 return (err < 0) ? err : 0; 1986 } 1987 return 1; 1988 } 1989 1990 return 0; 1991 } 1992 1993 static int thread__resolve_callchain_sample(struct thread *thread, 1994 struct callchain_cursor *cursor, 1995 struct perf_evsel *evsel, 1996 struct perf_sample *sample, 1997 struct symbol **parent, 1998 struct addr_location *root_al, 1999 int max_stack) 2000 { 2001 struct branch_stack *branch = sample->branch_stack; 2002 struct ip_callchain *chain = sample->callchain; 2003 int chain_nr = 0; 2004 u8 cpumode = PERF_RECORD_MISC_USER; 2005 int i, j, err, nr_entries; 2006 int skip_idx = -1; 2007 int first_call = 0; 2008 2009 if (chain) 2010 chain_nr = chain->nr; 2011 2012 if (perf_evsel__has_branch_callstack(evsel)) { 2013 err = resolve_lbr_callchain_sample(thread, cursor, sample, parent, 2014 root_al, max_stack); 2015 if (err) 2016 return (err < 0) ? err : 0; 2017 } 2018 2019 /* 2020 * Based on DWARF debug information, some architectures skip 2021 * a callchain entry saved by the kernel. 2022 */ 2023 skip_idx = arch_skip_callchain_idx(thread, chain); 2024 2025 /* 2026 * Add branches to call stack for easier browsing. This gives 2027 * more context for a sample than just the callers. 2028 * 2029 * This uses individual histograms of paths compared to the 2030 * aggregated histograms the normal LBR mode uses. 2031 * 2032 * Limitations for now: 2033 * - No extra filters 2034 * - No annotations (should annotate somehow) 2035 */ 2036 2037 if (branch && callchain_param.branch_callstack) { 2038 int nr = min(max_stack, (int)branch->nr); 2039 struct branch_entry be[nr]; 2040 struct iterations iter[nr]; 2041 2042 if (branch->nr > PERF_MAX_BRANCH_DEPTH) { 2043 pr_warning("corrupted branch chain. skipping...\n"); 2044 goto check_calls; 2045 } 2046 2047 for (i = 0; i < nr; i++) { 2048 if (callchain_param.order == ORDER_CALLEE) { 2049 be[i] = branch->entries[i]; 2050 2051 if (chain == NULL) 2052 continue; 2053 2054 /* 2055 * Check for overlap into the callchain. 2056 * The return address is one off compared to 2057 * the branch entry. To adjust for this 2058 * assume the calling instruction is not longer 2059 * than 8 bytes. 2060 */ 2061 if (i == skip_idx || 2062 chain->ips[first_call] >= PERF_CONTEXT_MAX) 2063 first_call++; 2064 else if (be[i].from < chain->ips[first_call] && 2065 be[i].from >= chain->ips[first_call] - 8) 2066 first_call++; 2067 } else 2068 be[i] = branch->entries[branch->nr - i - 1]; 2069 } 2070 2071 memset(iter, 0, sizeof(struct iterations) * nr); 2072 nr = remove_loops(be, nr, iter); 2073 2074 for (i = 0; i < nr; i++) { 2075 err = add_callchain_ip(thread, cursor, parent, 2076 root_al, 2077 NULL, be[i].to, 2078 true, &be[i].flags, 2079 NULL, be[i].from); 2080 2081 if (!err) 2082 err = add_callchain_ip(thread, cursor, parent, root_al, 2083 NULL, be[i].from, 2084 true, &be[i].flags, 2085 &iter[i], 0); 2086 if (err == -EINVAL) 2087 break; 2088 if (err) 2089 return err; 2090 } 2091 2092 if (chain_nr == 0) 2093 return 0; 2094 2095 chain_nr -= nr; 2096 } 2097 2098 check_calls: 2099 for (i = first_call, nr_entries = 0; 2100 i < chain_nr && nr_entries < max_stack; i++) { 2101 u64 ip; 2102 2103 if (callchain_param.order == ORDER_CALLEE) 2104 j = i; 2105 else 2106 j = chain->nr - i - 1; 2107 2108 #ifdef HAVE_SKIP_CALLCHAIN_IDX 2109 if (j == skip_idx) 2110 continue; 2111 #endif 2112 ip = chain->ips[j]; 2113 2114 if (ip < PERF_CONTEXT_MAX) 2115 ++nr_entries; 2116 2117 err = add_callchain_ip(thread, cursor, parent, 2118 root_al, &cpumode, ip, 2119 false, NULL, NULL, 0); 2120 2121 if (err) 2122 return (err < 0) ? err : 0; 2123 } 2124 2125 return 0; 2126 } 2127 2128 static int append_inlines(struct callchain_cursor *cursor, 2129 struct map *map, struct symbol *sym, u64 ip) 2130 { 2131 struct inline_node *inline_node; 2132 struct inline_list *ilist; 2133 u64 addr; 2134 int ret = 1; 2135 2136 if (!symbol_conf.inline_name || !map || !sym) 2137 return ret; 2138 2139 addr = map__rip_2objdump(map, ip); 2140 2141 inline_node = inlines__tree_find(&map->dso->inlined_nodes, addr); 2142 if (!inline_node) { 2143 inline_node = dso__parse_addr_inlines(map->dso, addr, sym); 2144 if (!inline_node) 2145 return ret; 2146 inlines__tree_insert(&map->dso->inlined_nodes, inline_node); 2147 } 2148 2149 list_for_each_entry(ilist, &inline_node->val, list) { 2150 ret = callchain_cursor_append(cursor, ip, map, 2151 ilist->symbol, false, 2152 NULL, 0, 0, 0, ilist->srcline); 2153 2154 if (ret != 0) 2155 return ret; 2156 } 2157 2158 return ret; 2159 } 2160 2161 static int unwind_entry(struct unwind_entry *entry, void *arg) 2162 { 2163 struct callchain_cursor *cursor = arg; 2164 const char *srcline = NULL; 2165 2166 if (symbol_conf.hide_unresolved && entry->sym == NULL) 2167 return 0; 2168 2169 if (append_inlines(cursor, entry->map, entry->sym, entry->ip) == 0) 2170 return 0; 2171 2172 srcline = callchain_srcline(entry->map, entry->sym, entry->ip); 2173 return callchain_cursor_append(cursor, entry->ip, 2174 entry->map, entry->sym, 2175 false, NULL, 0, 0, 0, srcline); 2176 } 2177 2178 static int thread__resolve_callchain_unwind(struct thread *thread, 2179 struct callchain_cursor *cursor, 2180 struct perf_evsel *evsel, 2181 struct perf_sample *sample, 2182 int max_stack) 2183 { 2184 /* Can we do dwarf post unwind? */ 2185 if (!((evsel->attr.sample_type & PERF_SAMPLE_REGS_USER) && 2186 (evsel->attr.sample_type & PERF_SAMPLE_STACK_USER))) 2187 return 0; 2188 2189 /* Bail out if nothing was captured. */ 2190 if ((!sample->user_regs.regs) || 2191 (!sample->user_stack.size)) 2192 return 0; 2193 2194 return unwind__get_entries(unwind_entry, cursor, 2195 thread, sample, max_stack); 2196 } 2197 2198 int thread__resolve_callchain(struct thread *thread, 2199 struct callchain_cursor *cursor, 2200 struct perf_evsel *evsel, 2201 struct perf_sample *sample, 2202 struct symbol **parent, 2203 struct addr_location *root_al, 2204 int max_stack) 2205 { 2206 int ret = 0; 2207 2208 callchain_cursor_reset(cursor); 2209 2210 if (callchain_param.order == ORDER_CALLEE) { 2211 ret = thread__resolve_callchain_sample(thread, cursor, 2212 evsel, sample, 2213 parent, root_al, 2214 max_stack); 2215 if (ret) 2216 return ret; 2217 ret = thread__resolve_callchain_unwind(thread, cursor, 2218 evsel, sample, 2219 max_stack); 2220 } else { 2221 ret = thread__resolve_callchain_unwind(thread, cursor, 2222 evsel, sample, 2223 max_stack); 2224 if (ret) 2225 return ret; 2226 ret = thread__resolve_callchain_sample(thread, cursor, 2227 evsel, sample, 2228 parent, root_al, 2229 max_stack); 2230 } 2231 2232 return ret; 2233 } 2234 2235 int machine__for_each_thread(struct machine *machine, 2236 int (*fn)(struct thread *thread, void *p), 2237 void *priv) 2238 { 2239 struct threads *threads; 2240 struct rb_node *nd; 2241 struct thread *thread; 2242 int rc = 0; 2243 int i; 2244 2245 for (i = 0; i < THREADS__TABLE_SIZE; i++) { 2246 threads = &machine->threads[i]; 2247 for (nd = rb_first(&threads->entries); nd; nd = rb_next(nd)) { 2248 thread = rb_entry(nd, struct thread, rb_node); 2249 rc = fn(thread, priv); 2250 if (rc != 0) 2251 return rc; 2252 } 2253 2254 list_for_each_entry(thread, &threads->dead, node) { 2255 rc = fn(thread, priv); 2256 if (rc != 0) 2257 return rc; 2258 } 2259 } 2260 return rc; 2261 } 2262 2263 int machines__for_each_thread(struct machines *machines, 2264 int (*fn)(struct thread *thread, void *p), 2265 void *priv) 2266 { 2267 struct rb_node *nd; 2268 int rc = 0; 2269 2270 rc = machine__for_each_thread(&machines->host, fn, priv); 2271 if (rc != 0) 2272 return rc; 2273 2274 for (nd = rb_first(&machines->guests); nd; nd = rb_next(nd)) { 2275 struct machine *machine = rb_entry(nd, struct machine, rb_node); 2276 2277 rc = machine__for_each_thread(machine, fn, priv); 2278 if (rc != 0) 2279 return rc; 2280 } 2281 return rc; 2282 } 2283 2284 int __machine__synthesize_threads(struct machine *machine, struct perf_tool *tool, 2285 struct target *target, struct thread_map *threads, 2286 perf_event__handler_t process, bool data_mmap, 2287 unsigned int proc_map_timeout, 2288 unsigned int nr_threads_synthesize) 2289 { 2290 if (target__has_task(target)) 2291 return perf_event__synthesize_thread_map(tool, threads, process, machine, data_mmap, proc_map_timeout); 2292 else if (target__has_cpu(target)) 2293 return perf_event__synthesize_threads(tool, process, 2294 machine, data_mmap, 2295 proc_map_timeout, 2296 nr_threads_synthesize); 2297 /* command specified */ 2298 return 0; 2299 } 2300 2301 pid_t machine__get_current_tid(struct machine *machine, int cpu) 2302 { 2303 if (cpu < 0 || cpu >= MAX_NR_CPUS || !machine->current_tid) 2304 return -1; 2305 2306 return machine->current_tid[cpu]; 2307 } 2308 2309 int machine__set_current_tid(struct machine *machine, int cpu, pid_t pid, 2310 pid_t tid) 2311 { 2312 struct thread *thread; 2313 2314 if (cpu < 0) 2315 return -EINVAL; 2316 2317 if (!machine->current_tid) { 2318 int i; 2319 2320 machine->current_tid = calloc(MAX_NR_CPUS, sizeof(pid_t)); 2321 if (!machine->current_tid) 2322 return -ENOMEM; 2323 for (i = 0; i < MAX_NR_CPUS; i++) 2324 machine->current_tid[i] = -1; 2325 } 2326 2327 if (cpu >= MAX_NR_CPUS) { 2328 pr_err("Requested CPU %d too large. ", cpu); 2329 pr_err("Consider raising MAX_NR_CPUS\n"); 2330 return -EINVAL; 2331 } 2332 2333 machine->current_tid[cpu] = tid; 2334 2335 thread = machine__findnew_thread(machine, pid, tid); 2336 if (!thread) 2337 return -ENOMEM; 2338 2339 thread->cpu = cpu; 2340 thread__put(thread); 2341 2342 return 0; 2343 } 2344 2345 int machine__get_kernel_start(struct machine *machine) 2346 { 2347 struct map *map = machine__kernel_map(machine); 2348 int err = 0; 2349 2350 /* 2351 * The only addresses above 2^63 are kernel addresses of a 64-bit 2352 * kernel. Note that addresses are unsigned so that on a 32-bit system 2353 * all addresses including kernel addresses are less than 2^32. In 2354 * that case (32-bit system), if the kernel mapping is unknown, all 2355 * addresses will be assumed to be in user space - see 2356 * machine__kernel_ip(). 2357 */ 2358 machine->kernel_start = 1ULL << 63; 2359 if (map) { 2360 err = map__load(map); 2361 if (!err) 2362 machine->kernel_start = map->start; 2363 } 2364 return err; 2365 } 2366 2367 struct dso *machine__findnew_dso(struct machine *machine, const char *filename) 2368 { 2369 return dsos__findnew(&machine->dsos, filename); 2370 } 2371 2372 char *machine__resolve_kernel_addr(void *vmachine, unsigned long long *addrp, char **modp) 2373 { 2374 struct machine *machine = vmachine; 2375 struct map *map; 2376 struct symbol *sym = map_groups__find_symbol(&machine->kmaps, MAP__FUNCTION, *addrp, &map); 2377 2378 if (sym == NULL) 2379 return NULL; 2380 2381 *modp = __map__is_kmodule(map) ? (char *)map->dso->short_name : NULL; 2382 *addrp = map->unmap_ip(map, sym->start); 2383 return sym->name; 2384 } 2385