1 #include "callchain.h" 2 #include "debug.h" 3 #include "event.h" 4 #include "evsel.h" 5 #include "hist.h" 6 #include "machine.h" 7 #include "map.h" 8 #include "sort.h" 9 #include "strlist.h" 10 #include "thread.h" 11 #include "vdso.h" 12 #include <stdbool.h> 13 #include <symbol/kallsyms.h> 14 #include "unwind.h" 15 16 static void dsos__init(struct dsos *dsos) 17 { 18 INIT_LIST_HEAD(&dsos->head); 19 dsos->root = RB_ROOT; 20 } 21 22 int machine__init(struct machine *machine, const char *root_dir, pid_t pid) 23 { 24 map_groups__init(&machine->kmaps); 25 RB_CLEAR_NODE(&machine->rb_node); 26 dsos__init(&machine->user_dsos); 27 dsos__init(&machine->kernel_dsos); 28 29 machine->threads = RB_ROOT; 30 INIT_LIST_HEAD(&machine->dead_threads); 31 machine->last_match = NULL; 32 33 machine->vdso_info = NULL; 34 35 machine->kmaps.machine = machine; 36 machine->pid = pid; 37 38 machine->symbol_filter = NULL; 39 machine->id_hdr_size = 0; 40 machine->comm_exec = false; 41 machine->kernel_start = 0; 42 43 machine->root_dir = strdup(root_dir); 44 if (machine->root_dir == NULL) 45 return -ENOMEM; 46 47 if (pid != HOST_KERNEL_ID) { 48 struct thread *thread = machine__findnew_thread(machine, -1, 49 pid); 50 char comm[64]; 51 52 if (thread == NULL) 53 return -ENOMEM; 54 55 snprintf(comm, sizeof(comm), "[guest/%d]", pid); 56 thread__set_comm(thread, comm, 0); 57 } 58 59 machine->current_tid = NULL; 60 61 return 0; 62 } 63 64 struct machine *machine__new_host(void) 65 { 66 struct machine *machine = malloc(sizeof(*machine)); 67 68 if (machine != NULL) { 69 machine__init(machine, "", HOST_KERNEL_ID); 70 71 if (machine__create_kernel_maps(machine) < 0) 72 goto out_delete; 73 } 74 75 return machine; 76 out_delete: 77 free(machine); 78 return NULL; 79 } 80 81 static void dsos__delete(struct dsos *dsos) 82 { 83 struct dso *pos, *n; 84 85 list_for_each_entry_safe(pos, n, &dsos->head, node) { 86 RB_CLEAR_NODE(&pos->rb_node); 87 list_del(&pos->node); 88 dso__delete(pos); 89 } 90 } 91 92 void machine__delete_dead_threads(struct machine *machine) 93 { 94 struct thread *n, *t; 95 96 list_for_each_entry_safe(t, n, &machine->dead_threads, node) { 97 list_del(&t->node); 98 thread__delete(t); 99 } 100 } 101 102 void machine__delete_threads(struct machine *machine) 103 { 104 struct rb_node *nd = rb_first(&machine->threads); 105 106 while (nd) { 107 struct thread *t = rb_entry(nd, struct thread, rb_node); 108 109 rb_erase(&t->rb_node, &machine->threads); 110 nd = rb_next(nd); 111 thread__delete(t); 112 } 113 } 114 115 void machine__exit(struct machine *machine) 116 { 117 map_groups__exit(&machine->kmaps); 118 dsos__delete(&machine->user_dsos); 119 dsos__delete(&machine->kernel_dsos); 120 vdso__exit(machine); 121 zfree(&machine->root_dir); 122 zfree(&machine->current_tid); 123 } 124 125 void machine__delete(struct machine *machine) 126 { 127 machine__exit(machine); 128 free(machine); 129 } 130 131 void machines__init(struct machines *machines) 132 { 133 machine__init(&machines->host, "", HOST_KERNEL_ID); 134 machines->guests = RB_ROOT; 135 machines->symbol_filter = NULL; 136 } 137 138 void machines__exit(struct machines *machines) 139 { 140 machine__exit(&machines->host); 141 /* XXX exit guest */ 142 } 143 144 struct machine *machines__add(struct machines *machines, pid_t pid, 145 const char *root_dir) 146 { 147 struct rb_node **p = &machines->guests.rb_node; 148 struct rb_node *parent = NULL; 149 struct machine *pos, *machine = malloc(sizeof(*machine)); 150 151 if (machine == NULL) 152 return NULL; 153 154 if (machine__init(machine, root_dir, pid) != 0) { 155 free(machine); 156 return NULL; 157 } 158 159 machine->symbol_filter = machines->symbol_filter; 160 161 while (*p != NULL) { 162 parent = *p; 163 pos = rb_entry(parent, struct machine, rb_node); 164 if (pid < pos->pid) 165 p = &(*p)->rb_left; 166 else 167 p = &(*p)->rb_right; 168 } 169 170 rb_link_node(&machine->rb_node, parent, p); 171 rb_insert_color(&machine->rb_node, &machines->guests); 172 173 return machine; 174 } 175 176 void machines__set_symbol_filter(struct machines *machines, 177 symbol_filter_t symbol_filter) 178 { 179 struct rb_node *nd; 180 181 machines->symbol_filter = symbol_filter; 182 machines->host.symbol_filter = symbol_filter; 183 184 for (nd = rb_first(&machines->guests); nd; nd = rb_next(nd)) { 185 struct machine *machine = rb_entry(nd, struct machine, rb_node); 186 187 machine->symbol_filter = symbol_filter; 188 } 189 } 190 191 void machines__set_comm_exec(struct machines *machines, bool comm_exec) 192 { 193 struct rb_node *nd; 194 195 machines->host.comm_exec = comm_exec; 196 197 for (nd = rb_first(&machines->guests); nd; nd = rb_next(nd)) { 198 struct machine *machine = rb_entry(nd, struct machine, rb_node); 199 200 machine->comm_exec = comm_exec; 201 } 202 } 203 204 struct machine *machines__find(struct machines *machines, pid_t pid) 205 { 206 struct rb_node **p = &machines->guests.rb_node; 207 struct rb_node *parent = NULL; 208 struct machine *machine; 209 struct machine *default_machine = NULL; 210 211 if (pid == HOST_KERNEL_ID) 212 return &machines->host; 213 214 while (*p != NULL) { 215 parent = *p; 216 machine = rb_entry(parent, struct machine, rb_node); 217 if (pid < machine->pid) 218 p = &(*p)->rb_left; 219 else if (pid > machine->pid) 220 p = &(*p)->rb_right; 221 else 222 return machine; 223 if (!machine->pid) 224 default_machine = machine; 225 } 226 227 return default_machine; 228 } 229 230 struct machine *machines__findnew(struct machines *machines, pid_t pid) 231 { 232 char path[PATH_MAX]; 233 const char *root_dir = ""; 234 struct machine *machine = machines__find(machines, pid); 235 236 if (machine && (machine->pid == pid)) 237 goto out; 238 239 if ((pid != HOST_KERNEL_ID) && 240 (pid != DEFAULT_GUEST_KERNEL_ID) && 241 (symbol_conf.guestmount)) { 242 sprintf(path, "%s/%d", symbol_conf.guestmount, pid); 243 if (access(path, R_OK)) { 244 static struct strlist *seen; 245 246 if (!seen) 247 seen = strlist__new(true, NULL); 248 249 if (!strlist__has_entry(seen, path)) { 250 pr_err("Can't access file %s\n", path); 251 strlist__add(seen, path); 252 } 253 machine = NULL; 254 goto out; 255 } 256 root_dir = path; 257 } 258 259 machine = machines__add(machines, pid, root_dir); 260 out: 261 return machine; 262 } 263 264 void machines__process_guests(struct machines *machines, 265 machine__process_t process, void *data) 266 { 267 struct rb_node *nd; 268 269 for (nd = rb_first(&machines->guests); nd; nd = rb_next(nd)) { 270 struct machine *pos = rb_entry(nd, struct machine, rb_node); 271 process(pos, data); 272 } 273 } 274 275 char *machine__mmap_name(struct machine *machine, char *bf, size_t size) 276 { 277 if (machine__is_host(machine)) 278 snprintf(bf, size, "[%s]", "kernel.kallsyms"); 279 else if (machine__is_default_guest(machine)) 280 snprintf(bf, size, "[%s]", "guest.kernel.kallsyms"); 281 else { 282 snprintf(bf, size, "[%s.%d]", "guest.kernel.kallsyms", 283 machine->pid); 284 } 285 286 return bf; 287 } 288 289 void machines__set_id_hdr_size(struct machines *machines, u16 id_hdr_size) 290 { 291 struct rb_node *node; 292 struct machine *machine; 293 294 machines->host.id_hdr_size = id_hdr_size; 295 296 for (node = rb_first(&machines->guests); node; node = rb_next(node)) { 297 machine = rb_entry(node, struct machine, rb_node); 298 machine->id_hdr_size = id_hdr_size; 299 } 300 301 return; 302 } 303 304 static void machine__update_thread_pid(struct machine *machine, 305 struct thread *th, pid_t pid) 306 { 307 struct thread *leader; 308 309 if (pid == th->pid_ || pid == -1 || th->pid_ != -1) 310 return; 311 312 th->pid_ = pid; 313 314 if (th->pid_ == th->tid) 315 return; 316 317 leader = machine__findnew_thread(machine, th->pid_, th->pid_); 318 if (!leader) 319 goto out_err; 320 321 if (!leader->mg) 322 leader->mg = map_groups__new(); 323 324 if (!leader->mg) 325 goto out_err; 326 327 if (th->mg == leader->mg) 328 return; 329 330 if (th->mg) { 331 /* 332 * Maps are created from MMAP events which provide the pid and 333 * tid. Consequently there never should be any maps on a thread 334 * with an unknown pid. Just print an error if there are. 335 */ 336 if (!map_groups__empty(th->mg)) 337 pr_err("Discarding thread maps for %d:%d\n", 338 th->pid_, th->tid); 339 map_groups__delete(th->mg); 340 } 341 342 th->mg = map_groups__get(leader->mg); 343 344 return; 345 346 out_err: 347 pr_err("Failed to join map groups for %d:%d\n", th->pid_, th->tid); 348 } 349 350 static struct thread *__machine__findnew_thread(struct machine *machine, 351 pid_t pid, pid_t tid, 352 bool create) 353 { 354 struct rb_node **p = &machine->threads.rb_node; 355 struct rb_node *parent = NULL; 356 struct thread *th; 357 358 /* 359 * Front-end cache - TID lookups come in blocks, 360 * so most of the time we dont have to look up 361 * the full rbtree: 362 */ 363 th = machine->last_match; 364 if (th && th->tid == tid) { 365 machine__update_thread_pid(machine, th, pid); 366 return th; 367 } 368 369 while (*p != NULL) { 370 parent = *p; 371 th = rb_entry(parent, struct thread, rb_node); 372 373 if (th->tid == tid) { 374 machine->last_match = th; 375 machine__update_thread_pid(machine, th, pid); 376 return th; 377 } 378 379 if (tid < th->tid) 380 p = &(*p)->rb_left; 381 else 382 p = &(*p)->rb_right; 383 } 384 385 if (!create) 386 return NULL; 387 388 th = thread__new(pid, tid); 389 if (th != NULL) { 390 rb_link_node(&th->rb_node, parent, p); 391 rb_insert_color(&th->rb_node, &machine->threads); 392 machine->last_match = th; 393 394 /* 395 * We have to initialize map_groups separately 396 * after rb tree is updated. 397 * 398 * The reason is that we call machine__findnew_thread 399 * within thread__init_map_groups to find the thread 400 * leader and that would screwed the rb tree. 401 */ 402 if (thread__init_map_groups(th, machine)) { 403 thread__delete(th); 404 return NULL; 405 } 406 } 407 408 return th; 409 } 410 411 struct thread *machine__findnew_thread(struct machine *machine, pid_t pid, 412 pid_t tid) 413 { 414 return __machine__findnew_thread(machine, pid, tid, true); 415 } 416 417 struct thread *machine__find_thread(struct machine *machine, pid_t pid, 418 pid_t tid) 419 { 420 return __machine__findnew_thread(machine, pid, tid, false); 421 } 422 423 struct comm *machine__thread_exec_comm(struct machine *machine, 424 struct thread *thread) 425 { 426 if (machine->comm_exec) 427 return thread__exec_comm(thread); 428 else 429 return thread__comm(thread); 430 } 431 432 int machine__process_comm_event(struct machine *machine, union perf_event *event, 433 struct perf_sample *sample) 434 { 435 struct thread *thread = machine__findnew_thread(machine, 436 event->comm.pid, 437 event->comm.tid); 438 bool exec = event->header.misc & PERF_RECORD_MISC_COMM_EXEC; 439 440 if (exec) 441 machine->comm_exec = true; 442 443 if (dump_trace) 444 perf_event__fprintf_comm(event, stdout); 445 446 if (thread == NULL || 447 __thread__set_comm(thread, event->comm.comm, sample->time, exec)) { 448 dump_printf("problem processing PERF_RECORD_COMM, skipping event.\n"); 449 return -1; 450 } 451 452 return 0; 453 } 454 455 int machine__process_lost_event(struct machine *machine __maybe_unused, 456 union perf_event *event, struct perf_sample *sample __maybe_unused) 457 { 458 dump_printf(": id:%" PRIu64 ": lost:%" PRIu64 "\n", 459 event->lost.id, event->lost.lost); 460 return 0; 461 } 462 463 struct map *machine__new_module(struct machine *machine, u64 start, 464 const char *filename) 465 { 466 struct map *map; 467 struct dso *dso = __dsos__findnew(&machine->kernel_dsos, filename); 468 469 if (dso == NULL) 470 return NULL; 471 472 map = map__new2(start, dso, MAP__FUNCTION); 473 if (map == NULL) 474 return NULL; 475 476 if (machine__is_host(machine)) 477 dso->symtab_type = DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE; 478 else 479 dso->symtab_type = DSO_BINARY_TYPE__GUEST_KMODULE; 480 map_groups__insert(&machine->kmaps, map); 481 return map; 482 } 483 484 size_t machines__fprintf_dsos(struct machines *machines, FILE *fp) 485 { 486 struct rb_node *nd; 487 size_t ret = __dsos__fprintf(&machines->host.kernel_dsos.head, fp) + 488 __dsos__fprintf(&machines->host.user_dsos.head, fp); 489 490 for (nd = rb_first(&machines->guests); nd; nd = rb_next(nd)) { 491 struct machine *pos = rb_entry(nd, struct machine, rb_node); 492 ret += __dsos__fprintf(&pos->kernel_dsos.head, fp); 493 ret += __dsos__fprintf(&pos->user_dsos.head, fp); 494 } 495 496 return ret; 497 } 498 499 size_t machine__fprintf_dsos_buildid(struct machine *m, FILE *fp, 500 bool (skip)(struct dso *dso, int parm), int parm) 501 { 502 return __dsos__fprintf_buildid(&m->kernel_dsos.head, fp, skip, parm) + 503 __dsos__fprintf_buildid(&m->user_dsos.head, fp, skip, parm); 504 } 505 506 size_t machines__fprintf_dsos_buildid(struct machines *machines, FILE *fp, 507 bool (skip)(struct dso *dso, int parm), int parm) 508 { 509 struct rb_node *nd; 510 size_t ret = machine__fprintf_dsos_buildid(&machines->host, fp, skip, parm); 511 512 for (nd = rb_first(&machines->guests); nd; nd = rb_next(nd)) { 513 struct machine *pos = rb_entry(nd, struct machine, rb_node); 514 ret += machine__fprintf_dsos_buildid(pos, fp, skip, parm); 515 } 516 return ret; 517 } 518 519 size_t machine__fprintf_vmlinux_path(struct machine *machine, FILE *fp) 520 { 521 int i; 522 size_t printed = 0; 523 struct dso *kdso = machine->vmlinux_maps[MAP__FUNCTION]->dso; 524 525 if (kdso->has_build_id) { 526 char filename[PATH_MAX]; 527 if (dso__build_id_filename(kdso, filename, sizeof(filename))) 528 printed += fprintf(fp, "[0] %s\n", filename); 529 } 530 531 for (i = 0; i < vmlinux_path__nr_entries; ++i) 532 printed += fprintf(fp, "[%d] %s\n", 533 i + kdso->has_build_id, vmlinux_path[i]); 534 535 return printed; 536 } 537 538 size_t machine__fprintf(struct machine *machine, FILE *fp) 539 { 540 size_t ret = 0; 541 struct rb_node *nd; 542 543 for (nd = rb_first(&machine->threads); nd; nd = rb_next(nd)) { 544 struct thread *pos = rb_entry(nd, struct thread, rb_node); 545 546 ret += thread__fprintf(pos, fp); 547 } 548 549 return ret; 550 } 551 552 static struct dso *machine__get_kernel(struct machine *machine) 553 { 554 const char *vmlinux_name = NULL; 555 struct dso *kernel; 556 557 if (machine__is_host(machine)) { 558 vmlinux_name = symbol_conf.vmlinux_name; 559 if (!vmlinux_name) 560 vmlinux_name = "[kernel.kallsyms]"; 561 562 kernel = dso__kernel_findnew(machine, vmlinux_name, 563 "[kernel]", 564 DSO_TYPE_KERNEL); 565 } else { 566 char bf[PATH_MAX]; 567 568 if (machine__is_default_guest(machine)) 569 vmlinux_name = symbol_conf.default_guest_vmlinux_name; 570 if (!vmlinux_name) 571 vmlinux_name = machine__mmap_name(machine, bf, 572 sizeof(bf)); 573 574 kernel = dso__kernel_findnew(machine, vmlinux_name, 575 "[guest.kernel]", 576 DSO_TYPE_GUEST_KERNEL); 577 } 578 579 if (kernel != NULL && (!kernel->has_build_id)) 580 dso__read_running_kernel_build_id(kernel, machine); 581 582 return kernel; 583 } 584 585 struct process_args { 586 u64 start; 587 }; 588 589 static void machine__get_kallsyms_filename(struct machine *machine, char *buf, 590 size_t bufsz) 591 { 592 if (machine__is_default_guest(machine)) 593 scnprintf(buf, bufsz, "%s", symbol_conf.default_guest_kallsyms); 594 else 595 scnprintf(buf, bufsz, "%s/proc/kallsyms", machine->root_dir); 596 } 597 598 const char *ref_reloc_sym_names[] = {"_text", "_stext", NULL}; 599 600 /* Figure out the start address of kernel map from /proc/kallsyms. 601 * Returns the name of the start symbol in *symbol_name. Pass in NULL as 602 * symbol_name if it's not that important. 603 */ 604 static u64 machine__get_running_kernel_start(struct machine *machine, 605 const char **symbol_name) 606 { 607 char filename[PATH_MAX]; 608 int i; 609 const char *name; 610 u64 addr = 0; 611 612 machine__get_kallsyms_filename(machine, filename, PATH_MAX); 613 614 if (symbol__restricted_filename(filename, "/proc/kallsyms")) 615 return 0; 616 617 for (i = 0; (name = ref_reloc_sym_names[i]) != NULL; i++) { 618 addr = kallsyms__get_function_start(filename, name); 619 if (addr) 620 break; 621 } 622 623 if (symbol_name) 624 *symbol_name = name; 625 626 return addr; 627 } 628 629 int __machine__create_kernel_maps(struct machine *machine, struct dso *kernel) 630 { 631 enum map_type type; 632 u64 start = machine__get_running_kernel_start(machine, NULL); 633 634 for (type = 0; type < MAP__NR_TYPES; ++type) { 635 struct kmap *kmap; 636 637 machine->vmlinux_maps[type] = map__new2(start, kernel, type); 638 if (machine->vmlinux_maps[type] == NULL) 639 return -1; 640 641 machine->vmlinux_maps[type]->map_ip = 642 machine->vmlinux_maps[type]->unmap_ip = 643 identity__map_ip; 644 kmap = map__kmap(machine->vmlinux_maps[type]); 645 kmap->kmaps = &machine->kmaps; 646 map_groups__insert(&machine->kmaps, 647 machine->vmlinux_maps[type]); 648 } 649 650 return 0; 651 } 652 653 void machine__destroy_kernel_maps(struct machine *machine) 654 { 655 enum map_type type; 656 657 for (type = 0; type < MAP__NR_TYPES; ++type) { 658 struct kmap *kmap; 659 660 if (machine->vmlinux_maps[type] == NULL) 661 continue; 662 663 kmap = map__kmap(machine->vmlinux_maps[type]); 664 map_groups__remove(&machine->kmaps, 665 machine->vmlinux_maps[type]); 666 if (kmap->ref_reloc_sym) { 667 /* 668 * ref_reloc_sym is shared among all maps, so free just 669 * on one of them. 670 */ 671 if (type == MAP__FUNCTION) { 672 zfree((char **)&kmap->ref_reloc_sym->name); 673 zfree(&kmap->ref_reloc_sym); 674 } else 675 kmap->ref_reloc_sym = NULL; 676 } 677 678 map__delete(machine->vmlinux_maps[type]); 679 machine->vmlinux_maps[type] = NULL; 680 } 681 } 682 683 int machines__create_guest_kernel_maps(struct machines *machines) 684 { 685 int ret = 0; 686 struct dirent **namelist = NULL; 687 int i, items = 0; 688 char path[PATH_MAX]; 689 pid_t pid; 690 char *endp; 691 692 if (symbol_conf.default_guest_vmlinux_name || 693 symbol_conf.default_guest_modules || 694 symbol_conf.default_guest_kallsyms) { 695 machines__create_kernel_maps(machines, DEFAULT_GUEST_KERNEL_ID); 696 } 697 698 if (symbol_conf.guestmount) { 699 items = scandir(symbol_conf.guestmount, &namelist, NULL, NULL); 700 if (items <= 0) 701 return -ENOENT; 702 for (i = 0; i < items; i++) { 703 if (!isdigit(namelist[i]->d_name[0])) { 704 /* Filter out . and .. */ 705 continue; 706 } 707 pid = (pid_t)strtol(namelist[i]->d_name, &endp, 10); 708 if ((*endp != '\0') || 709 (endp == namelist[i]->d_name) || 710 (errno == ERANGE)) { 711 pr_debug("invalid directory (%s). Skipping.\n", 712 namelist[i]->d_name); 713 continue; 714 } 715 sprintf(path, "%s/%s/proc/kallsyms", 716 symbol_conf.guestmount, 717 namelist[i]->d_name); 718 ret = access(path, R_OK); 719 if (ret) { 720 pr_debug("Can't access file %s\n", path); 721 goto failure; 722 } 723 machines__create_kernel_maps(machines, pid); 724 } 725 failure: 726 free(namelist); 727 } 728 729 return ret; 730 } 731 732 void machines__destroy_kernel_maps(struct machines *machines) 733 { 734 struct rb_node *next = rb_first(&machines->guests); 735 736 machine__destroy_kernel_maps(&machines->host); 737 738 while (next) { 739 struct machine *pos = rb_entry(next, struct machine, rb_node); 740 741 next = rb_next(&pos->rb_node); 742 rb_erase(&pos->rb_node, &machines->guests); 743 machine__delete(pos); 744 } 745 } 746 747 int machines__create_kernel_maps(struct machines *machines, pid_t pid) 748 { 749 struct machine *machine = machines__findnew(machines, pid); 750 751 if (machine == NULL) 752 return -1; 753 754 return machine__create_kernel_maps(machine); 755 } 756 757 int machine__load_kallsyms(struct machine *machine, const char *filename, 758 enum map_type type, symbol_filter_t filter) 759 { 760 struct map *map = machine->vmlinux_maps[type]; 761 int ret = dso__load_kallsyms(map->dso, filename, map, filter); 762 763 if (ret > 0) { 764 dso__set_loaded(map->dso, type); 765 /* 766 * Since /proc/kallsyms will have multiple sessions for the 767 * kernel, with modules between them, fixup the end of all 768 * sections. 769 */ 770 __map_groups__fixup_end(&machine->kmaps, type); 771 } 772 773 return ret; 774 } 775 776 int machine__load_vmlinux_path(struct machine *machine, enum map_type type, 777 symbol_filter_t filter) 778 { 779 struct map *map = machine->vmlinux_maps[type]; 780 int ret = dso__load_vmlinux_path(map->dso, map, filter); 781 782 if (ret > 0) 783 dso__set_loaded(map->dso, type); 784 785 return ret; 786 } 787 788 static void map_groups__fixup_end(struct map_groups *mg) 789 { 790 int i; 791 for (i = 0; i < MAP__NR_TYPES; ++i) 792 __map_groups__fixup_end(mg, i); 793 } 794 795 static char *get_kernel_version(const char *root_dir) 796 { 797 char version[PATH_MAX]; 798 FILE *file; 799 char *name, *tmp; 800 const char *prefix = "Linux version "; 801 802 sprintf(version, "%s/proc/version", root_dir); 803 file = fopen(version, "r"); 804 if (!file) 805 return NULL; 806 807 version[0] = '\0'; 808 tmp = fgets(version, sizeof(version), file); 809 fclose(file); 810 811 name = strstr(version, prefix); 812 if (!name) 813 return NULL; 814 name += strlen(prefix); 815 tmp = strchr(name, ' '); 816 if (tmp) 817 *tmp = '\0'; 818 819 return strdup(name); 820 } 821 822 static int map_groups__set_modules_path_dir(struct map_groups *mg, 823 const char *dir_name, int depth) 824 { 825 struct dirent *dent; 826 DIR *dir = opendir(dir_name); 827 int ret = 0; 828 829 if (!dir) { 830 pr_debug("%s: cannot open %s dir\n", __func__, dir_name); 831 return -1; 832 } 833 834 while ((dent = readdir(dir)) != NULL) { 835 char path[PATH_MAX]; 836 struct stat st; 837 838 /*sshfs might return bad dent->d_type, so we have to stat*/ 839 snprintf(path, sizeof(path), "%s/%s", dir_name, dent->d_name); 840 if (stat(path, &st)) 841 continue; 842 843 if (S_ISDIR(st.st_mode)) { 844 if (!strcmp(dent->d_name, ".") || 845 !strcmp(dent->d_name, "..")) 846 continue; 847 848 /* Do not follow top-level source and build symlinks */ 849 if (depth == 0) { 850 if (!strcmp(dent->d_name, "source") || 851 !strcmp(dent->d_name, "build")) 852 continue; 853 } 854 855 ret = map_groups__set_modules_path_dir(mg, path, 856 depth + 1); 857 if (ret < 0) 858 goto out; 859 } else { 860 char *dot = strrchr(dent->d_name, '.'), 861 dso_name[PATH_MAX]; 862 struct map *map; 863 char *long_name; 864 865 if (dot == NULL || strcmp(dot, ".ko")) 866 continue; 867 snprintf(dso_name, sizeof(dso_name), "[%.*s]", 868 (int)(dot - dent->d_name), dent->d_name); 869 870 strxfrchar(dso_name, '-', '_'); 871 map = map_groups__find_by_name(mg, MAP__FUNCTION, 872 dso_name); 873 if (map == NULL) 874 continue; 875 876 long_name = strdup(path); 877 if (long_name == NULL) { 878 ret = -1; 879 goto out; 880 } 881 dso__set_long_name(map->dso, long_name, true); 882 dso__kernel_module_get_build_id(map->dso, ""); 883 } 884 } 885 886 out: 887 closedir(dir); 888 return ret; 889 } 890 891 static int machine__set_modules_path(struct machine *machine) 892 { 893 char *version; 894 char modules_path[PATH_MAX]; 895 896 version = get_kernel_version(machine->root_dir); 897 if (!version) 898 return -1; 899 900 snprintf(modules_path, sizeof(modules_path), "%s/lib/modules/%s", 901 machine->root_dir, version); 902 free(version); 903 904 return map_groups__set_modules_path_dir(&machine->kmaps, modules_path, 0); 905 } 906 907 static int machine__create_module(void *arg, const char *name, u64 start) 908 { 909 struct machine *machine = arg; 910 struct map *map; 911 912 map = machine__new_module(machine, start, name); 913 if (map == NULL) 914 return -1; 915 916 dso__kernel_module_get_build_id(map->dso, machine->root_dir); 917 918 return 0; 919 } 920 921 static int machine__create_modules(struct machine *machine) 922 { 923 const char *modules; 924 char path[PATH_MAX]; 925 926 if (machine__is_default_guest(machine)) { 927 modules = symbol_conf.default_guest_modules; 928 } else { 929 snprintf(path, PATH_MAX, "%s/proc/modules", machine->root_dir); 930 modules = path; 931 } 932 933 if (symbol__restricted_filename(modules, "/proc/modules")) 934 return -1; 935 936 if (modules__parse(modules, machine, machine__create_module)) 937 return -1; 938 939 if (!machine__set_modules_path(machine)) 940 return 0; 941 942 pr_debug("Problems setting modules path maps, continuing anyway...\n"); 943 944 return 0; 945 } 946 947 int machine__create_kernel_maps(struct machine *machine) 948 { 949 struct dso *kernel = machine__get_kernel(machine); 950 const char *name; 951 u64 addr = machine__get_running_kernel_start(machine, &name); 952 if (!addr) 953 return -1; 954 955 if (kernel == NULL || 956 __machine__create_kernel_maps(machine, kernel) < 0) 957 return -1; 958 959 if (symbol_conf.use_modules && machine__create_modules(machine) < 0) { 960 if (machine__is_host(machine)) 961 pr_debug("Problems creating module maps, " 962 "continuing anyway...\n"); 963 else 964 pr_debug("Problems creating module maps for guest %d, " 965 "continuing anyway...\n", machine->pid); 966 } 967 968 /* 969 * Now that we have all the maps created, just set the ->end of them: 970 */ 971 map_groups__fixup_end(&machine->kmaps); 972 973 if (maps__set_kallsyms_ref_reloc_sym(machine->vmlinux_maps, name, 974 addr)) { 975 machine__destroy_kernel_maps(machine); 976 return -1; 977 } 978 979 return 0; 980 } 981 982 static void machine__set_kernel_mmap_len(struct machine *machine, 983 union perf_event *event) 984 { 985 int i; 986 987 for (i = 0; i < MAP__NR_TYPES; i++) { 988 machine->vmlinux_maps[i]->start = event->mmap.start; 989 machine->vmlinux_maps[i]->end = (event->mmap.start + 990 event->mmap.len); 991 /* 992 * Be a bit paranoid here, some perf.data file came with 993 * a zero sized synthesized MMAP event for the kernel. 994 */ 995 if (machine->vmlinux_maps[i]->end == 0) 996 machine->vmlinux_maps[i]->end = ~0ULL; 997 } 998 } 999 1000 static bool machine__uses_kcore(struct machine *machine) 1001 { 1002 struct dso *dso; 1003 1004 list_for_each_entry(dso, &machine->kernel_dsos.head, node) { 1005 if (dso__is_kcore(dso)) 1006 return true; 1007 } 1008 1009 return false; 1010 } 1011 1012 static int machine__process_kernel_mmap_event(struct machine *machine, 1013 union perf_event *event) 1014 { 1015 struct map *map; 1016 char kmmap_prefix[PATH_MAX]; 1017 enum dso_kernel_type kernel_type; 1018 bool is_kernel_mmap; 1019 1020 /* If we have maps from kcore then we do not need or want any others */ 1021 if (machine__uses_kcore(machine)) 1022 return 0; 1023 1024 machine__mmap_name(machine, kmmap_prefix, sizeof(kmmap_prefix)); 1025 if (machine__is_host(machine)) 1026 kernel_type = DSO_TYPE_KERNEL; 1027 else 1028 kernel_type = DSO_TYPE_GUEST_KERNEL; 1029 1030 is_kernel_mmap = memcmp(event->mmap.filename, 1031 kmmap_prefix, 1032 strlen(kmmap_prefix) - 1) == 0; 1033 if (event->mmap.filename[0] == '/' || 1034 (!is_kernel_mmap && event->mmap.filename[0] == '[')) { 1035 1036 char short_module_name[1024]; 1037 char *name, *dot; 1038 1039 if (event->mmap.filename[0] == '/') { 1040 name = strrchr(event->mmap.filename, '/'); 1041 if (name == NULL) 1042 goto out_problem; 1043 1044 ++name; /* skip / */ 1045 dot = strrchr(name, '.'); 1046 if (dot == NULL) 1047 goto out_problem; 1048 snprintf(short_module_name, sizeof(short_module_name), 1049 "[%.*s]", (int)(dot - name), name); 1050 strxfrchar(short_module_name, '-', '_'); 1051 } else 1052 strcpy(short_module_name, event->mmap.filename); 1053 1054 map = machine__new_module(machine, event->mmap.start, 1055 event->mmap.filename); 1056 if (map == NULL) 1057 goto out_problem; 1058 1059 name = strdup(short_module_name); 1060 if (name == NULL) 1061 goto out_problem; 1062 1063 dso__set_short_name(map->dso, name, true); 1064 map->end = map->start + event->mmap.len; 1065 } else if (is_kernel_mmap) { 1066 const char *symbol_name = (event->mmap.filename + 1067 strlen(kmmap_prefix)); 1068 /* 1069 * Should be there already, from the build-id table in 1070 * the header. 1071 */ 1072 struct dso *kernel = __dsos__findnew(&machine->kernel_dsos, 1073 kmmap_prefix); 1074 if (kernel == NULL) 1075 goto out_problem; 1076 1077 kernel->kernel = kernel_type; 1078 if (__machine__create_kernel_maps(machine, kernel) < 0) 1079 goto out_problem; 1080 1081 machine__set_kernel_mmap_len(machine, event); 1082 1083 /* 1084 * Avoid using a zero address (kptr_restrict) for the ref reloc 1085 * symbol. Effectively having zero here means that at record 1086 * time /proc/sys/kernel/kptr_restrict was non zero. 1087 */ 1088 if (event->mmap.pgoff != 0) { 1089 maps__set_kallsyms_ref_reloc_sym(machine->vmlinux_maps, 1090 symbol_name, 1091 event->mmap.pgoff); 1092 } 1093 1094 if (machine__is_default_guest(machine)) { 1095 /* 1096 * preload dso of guest kernel and modules 1097 */ 1098 dso__load(kernel, machine->vmlinux_maps[MAP__FUNCTION], 1099 NULL); 1100 } 1101 } 1102 return 0; 1103 out_problem: 1104 return -1; 1105 } 1106 1107 int machine__process_mmap2_event(struct machine *machine, 1108 union perf_event *event, 1109 struct perf_sample *sample __maybe_unused) 1110 { 1111 u8 cpumode = event->header.misc & PERF_RECORD_MISC_CPUMODE_MASK; 1112 struct thread *thread; 1113 struct map *map; 1114 enum map_type type; 1115 int ret = 0; 1116 1117 if (dump_trace) 1118 perf_event__fprintf_mmap2(event, stdout); 1119 1120 if (cpumode == PERF_RECORD_MISC_GUEST_KERNEL || 1121 cpumode == PERF_RECORD_MISC_KERNEL) { 1122 ret = machine__process_kernel_mmap_event(machine, event); 1123 if (ret < 0) 1124 goto out_problem; 1125 return 0; 1126 } 1127 1128 thread = machine__findnew_thread(machine, event->mmap2.pid, 1129 event->mmap2.tid); 1130 if (thread == NULL) 1131 goto out_problem; 1132 1133 if (event->header.misc & PERF_RECORD_MISC_MMAP_DATA) 1134 type = MAP__VARIABLE; 1135 else 1136 type = MAP__FUNCTION; 1137 1138 map = map__new(machine, event->mmap2.start, 1139 event->mmap2.len, event->mmap2.pgoff, 1140 event->mmap2.pid, event->mmap2.maj, 1141 event->mmap2.min, event->mmap2.ino, 1142 event->mmap2.ino_generation, 1143 event->mmap2.prot, 1144 event->mmap2.flags, 1145 event->mmap2.filename, type, thread); 1146 1147 if (map == NULL) 1148 goto out_problem; 1149 1150 thread__insert_map(thread, map); 1151 return 0; 1152 1153 out_problem: 1154 dump_printf("problem processing PERF_RECORD_MMAP2, skipping event.\n"); 1155 return 0; 1156 } 1157 1158 int machine__process_mmap_event(struct machine *machine, union perf_event *event, 1159 struct perf_sample *sample __maybe_unused) 1160 { 1161 u8 cpumode = event->header.misc & PERF_RECORD_MISC_CPUMODE_MASK; 1162 struct thread *thread; 1163 struct map *map; 1164 enum map_type type; 1165 int ret = 0; 1166 1167 if (dump_trace) 1168 perf_event__fprintf_mmap(event, stdout); 1169 1170 if (cpumode == PERF_RECORD_MISC_GUEST_KERNEL || 1171 cpumode == PERF_RECORD_MISC_KERNEL) { 1172 ret = machine__process_kernel_mmap_event(machine, event); 1173 if (ret < 0) 1174 goto out_problem; 1175 return 0; 1176 } 1177 1178 thread = machine__findnew_thread(machine, event->mmap.pid, 1179 event->mmap.tid); 1180 if (thread == NULL) 1181 goto out_problem; 1182 1183 if (event->header.misc & PERF_RECORD_MISC_MMAP_DATA) 1184 type = MAP__VARIABLE; 1185 else 1186 type = MAP__FUNCTION; 1187 1188 map = map__new(machine, event->mmap.start, 1189 event->mmap.len, event->mmap.pgoff, 1190 event->mmap.pid, 0, 0, 0, 0, 0, 0, 1191 event->mmap.filename, 1192 type, thread); 1193 1194 if (map == NULL) 1195 goto out_problem; 1196 1197 thread__insert_map(thread, map); 1198 return 0; 1199 1200 out_problem: 1201 dump_printf("problem processing PERF_RECORD_MMAP, skipping event.\n"); 1202 return 0; 1203 } 1204 1205 static void machine__remove_thread(struct machine *machine, struct thread *th) 1206 { 1207 machine->last_match = NULL; 1208 rb_erase(&th->rb_node, &machine->threads); 1209 /* 1210 * We may have references to this thread, for instance in some hist_entry 1211 * instances, so just move them to a separate list. 1212 */ 1213 list_add_tail(&th->node, &machine->dead_threads); 1214 } 1215 1216 int machine__process_fork_event(struct machine *machine, union perf_event *event, 1217 struct perf_sample *sample) 1218 { 1219 struct thread *thread = machine__find_thread(machine, 1220 event->fork.pid, 1221 event->fork.tid); 1222 struct thread *parent = machine__findnew_thread(machine, 1223 event->fork.ppid, 1224 event->fork.ptid); 1225 1226 /* if a thread currently exists for the thread id remove it */ 1227 if (thread != NULL) 1228 machine__remove_thread(machine, thread); 1229 1230 thread = machine__findnew_thread(machine, event->fork.pid, 1231 event->fork.tid); 1232 if (dump_trace) 1233 perf_event__fprintf_task(event, stdout); 1234 1235 if (thread == NULL || parent == NULL || 1236 thread__fork(thread, parent, sample->time) < 0) { 1237 dump_printf("problem processing PERF_RECORD_FORK, skipping event.\n"); 1238 return -1; 1239 } 1240 1241 return 0; 1242 } 1243 1244 int machine__process_exit_event(struct machine *machine, union perf_event *event, 1245 struct perf_sample *sample __maybe_unused) 1246 { 1247 struct thread *thread = machine__find_thread(machine, 1248 event->fork.pid, 1249 event->fork.tid); 1250 1251 if (dump_trace) 1252 perf_event__fprintf_task(event, stdout); 1253 1254 if (thread != NULL) 1255 thread__exited(thread); 1256 1257 return 0; 1258 } 1259 1260 int machine__process_event(struct machine *machine, union perf_event *event, 1261 struct perf_sample *sample) 1262 { 1263 int ret; 1264 1265 switch (event->header.type) { 1266 case PERF_RECORD_COMM: 1267 ret = machine__process_comm_event(machine, event, sample); break; 1268 case PERF_RECORD_MMAP: 1269 ret = machine__process_mmap_event(machine, event, sample); break; 1270 case PERF_RECORD_MMAP2: 1271 ret = machine__process_mmap2_event(machine, event, sample); break; 1272 case PERF_RECORD_FORK: 1273 ret = machine__process_fork_event(machine, event, sample); break; 1274 case PERF_RECORD_EXIT: 1275 ret = machine__process_exit_event(machine, event, sample); break; 1276 case PERF_RECORD_LOST: 1277 ret = machine__process_lost_event(machine, event, sample); break; 1278 default: 1279 ret = -1; 1280 break; 1281 } 1282 1283 return ret; 1284 } 1285 1286 static bool symbol__match_regex(struct symbol *sym, regex_t *regex) 1287 { 1288 if (sym->name && !regexec(regex, sym->name, 0, NULL, 0)) 1289 return 1; 1290 return 0; 1291 } 1292 1293 static void ip__resolve_ams(struct machine *machine, struct thread *thread, 1294 struct addr_map_symbol *ams, 1295 u64 ip) 1296 { 1297 struct addr_location al; 1298 1299 memset(&al, 0, sizeof(al)); 1300 /* 1301 * We cannot use the header.misc hint to determine whether a 1302 * branch stack address is user, kernel, guest, hypervisor. 1303 * Branches may straddle the kernel/user/hypervisor boundaries. 1304 * Thus, we have to try consecutively until we find a match 1305 * or else, the symbol is unknown 1306 */ 1307 thread__find_cpumode_addr_location(thread, machine, MAP__FUNCTION, ip, &al); 1308 1309 ams->addr = ip; 1310 ams->al_addr = al.addr; 1311 ams->sym = al.sym; 1312 ams->map = al.map; 1313 } 1314 1315 static void ip__resolve_data(struct machine *machine, struct thread *thread, 1316 u8 m, struct addr_map_symbol *ams, u64 addr) 1317 { 1318 struct addr_location al; 1319 1320 memset(&al, 0, sizeof(al)); 1321 1322 thread__find_addr_location(thread, machine, m, MAP__VARIABLE, addr, 1323 &al); 1324 if (al.map == NULL) { 1325 /* 1326 * some shared data regions have execute bit set which puts 1327 * their mapping in the MAP__FUNCTION type array. 1328 * Check there as a fallback option before dropping the sample. 1329 */ 1330 thread__find_addr_location(thread, machine, m, MAP__FUNCTION, addr, 1331 &al); 1332 } 1333 1334 ams->addr = addr; 1335 ams->al_addr = al.addr; 1336 ams->sym = al.sym; 1337 ams->map = al.map; 1338 } 1339 1340 struct mem_info *sample__resolve_mem(struct perf_sample *sample, 1341 struct addr_location *al) 1342 { 1343 struct mem_info *mi = zalloc(sizeof(*mi)); 1344 1345 if (!mi) 1346 return NULL; 1347 1348 ip__resolve_ams(al->machine, al->thread, &mi->iaddr, sample->ip); 1349 ip__resolve_data(al->machine, al->thread, al->cpumode, 1350 &mi->daddr, sample->addr); 1351 mi->data_src.val = sample->data_src; 1352 1353 return mi; 1354 } 1355 1356 struct branch_info *sample__resolve_bstack(struct perf_sample *sample, 1357 struct addr_location *al) 1358 { 1359 unsigned int i; 1360 const struct branch_stack *bs = sample->branch_stack; 1361 struct branch_info *bi = calloc(bs->nr, sizeof(struct branch_info)); 1362 1363 if (!bi) 1364 return NULL; 1365 1366 for (i = 0; i < bs->nr; i++) { 1367 ip__resolve_ams(al->machine, al->thread, &bi[i].to, bs->entries[i].to); 1368 ip__resolve_ams(al->machine, al->thread, &bi[i].from, bs->entries[i].from); 1369 bi[i].flags = bs->entries[i].flags; 1370 } 1371 return bi; 1372 } 1373 1374 static int machine__resolve_callchain_sample(struct machine *machine, 1375 struct thread *thread, 1376 struct ip_callchain *chain, 1377 struct symbol **parent, 1378 struct addr_location *root_al, 1379 int max_stack) 1380 { 1381 u8 cpumode = PERF_RECORD_MISC_USER; 1382 int chain_nr = min(max_stack, (int)chain->nr); 1383 int i; 1384 int j; 1385 int err; 1386 int skip_idx __maybe_unused; 1387 1388 callchain_cursor_reset(&callchain_cursor); 1389 1390 if (chain->nr > PERF_MAX_STACK_DEPTH) { 1391 pr_warning("corrupted callchain. skipping...\n"); 1392 return 0; 1393 } 1394 1395 /* 1396 * Based on DWARF debug information, some architectures skip 1397 * a callchain entry saved by the kernel. 1398 */ 1399 skip_idx = arch_skip_callchain_idx(machine, thread, chain); 1400 1401 for (i = 0; i < chain_nr; i++) { 1402 u64 ip; 1403 struct addr_location al; 1404 1405 if (callchain_param.order == ORDER_CALLEE) 1406 j = i; 1407 else 1408 j = chain->nr - i - 1; 1409 1410 #ifdef HAVE_SKIP_CALLCHAIN_IDX 1411 if (j == skip_idx) 1412 continue; 1413 #endif 1414 ip = chain->ips[j]; 1415 1416 if (ip >= PERF_CONTEXT_MAX) { 1417 switch (ip) { 1418 case PERF_CONTEXT_HV: 1419 cpumode = PERF_RECORD_MISC_HYPERVISOR; 1420 break; 1421 case PERF_CONTEXT_KERNEL: 1422 cpumode = PERF_RECORD_MISC_KERNEL; 1423 break; 1424 case PERF_CONTEXT_USER: 1425 cpumode = PERF_RECORD_MISC_USER; 1426 break; 1427 default: 1428 pr_debug("invalid callchain context: " 1429 "%"PRId64"\n", (s64) ip); 1430 /* 1431 * It seems the callchain is corrupted. 1432 * Discard all. 1433 */ 1434 callchain_cursor_reset(&callchain_cursor); 1435 return 0; 1436 } 1437 continue; 1438 } 1439 1440 al.filtered = 0; 1441 thread__find_addr_location(thread, machine, cpumode, 1442 MAP__FUNCTION, ip, &al); 1443 if (al.sym != NULL) { 1444 if (sort__has_parent && !*parent && 1445 symbol__match_regex(al.sym, &parent_regex)) 1446 *parent = al.sym; 1447 else if (have_ignore_callees && root_al && 1448 symbol__match_regex(al.sym, &ignore_callees_regex)) { 1449 /* Treat this symbol as the root, 1450 forgetting its callees. */ 1451 *root_al = al; 1452 callchain_cursor_reset(&callchain_cursor); 1453 } 1454 } 1455 1456 err = callchain_cursor_append(&callchain_cursor, 1457 ip, al.map, al.sym); 1458 if (err) 1459 return err; 1460 } 1461 1462 return 0; 1463 } 1464 1465 static int unwind_entry(struct unwind_entry *entry, void *arg) 1466 { 1467 struct callchain_cursor *cursor = arg; 1468 return callchain_cursor_append(cursor, entry->ip, 1469 entry->map, entry->sym); 1470 } 1471 1472 int machine__resolve_callchain(struct machine *machine, 1473 struct perf_evsel *evsel, 1474 struct thread *thread, 1475 struct perf_sample *sample, 1476 struct symbol **parent, 1477 struct addr_location *root_al, 1478 int max_stack) 1479 { 1480 int ret; 1481 1482 ret = machine__resolve_callchain_sample(machine, thread, 1483 sample->callchain, parent, 1484 root_al, max_stack); 1485 if (ret) 1486 return ret; 1487 1488 /* Can we do dwarf post unwind? */ 1489 if (!((evsel->attr.sample_type & PERF_SAMPLE_REGS_USER) && 1490 (evsel->attr.sample_type & PERF_SAMPLE_STACK_USER))) 1491 return 0; 1492 1493 /* Bail out if nothing was captured. */ 1494 if ((!sample->user_regs.regs) || 1495 (!sample->user_stack.size)) 1496 return 0; 1497 1498 return unwind__get_entries(unwind_entry, &callchain_cursor, machine, 1499 thread, sample, max_stack); 1500 1501 } 1502 1503 int machine__for_each_thread(struct machine *machine, 1504 int (*fn)(struct thread *thread, void *p), 1505 void *priv) 1506 { 1507 struct rb_node *nd; 1508 struct thread *thread; 1509 int rc = 0; 1510 1511 for (nd = rb_first(&machine->threads); nd; nd = rb_next(nd)) { 1512 thread = rb_entry(nd, struct thread, rb_node); 1513 rc = fn(thread, priv); 1514 if (rc != 0) 1515 return rc; 1516 } 1517 1518 list_for_each_entry(thread, &machine->dead_threads, node) { 1519 rc = fn(thread, priv); 1520 if (rc != 0) 1521 return rc; 1522 } 1523 return rc; 1524 } 1525 1526 int __machine__synthesize_threads(struct machine *machine, struct perf_tool *tool, 1527 struct target *target, struct thread_map *threads, 1528 perf_event__handler_t process, bool data_mmap) 1529 { 1530 if (target__has_task(target)) 1531 return perf_event__synthesize_thread_map(tool, threads, process, machine, data_mmap); 1532 else if (target__has_cpu(target)) 1533 return perf_event__synthesize_threads(tool, process, machine, data_mmap); 1534 /* command specified */ 1535 return 0; 1536 } 1537 1538 pid_t machine__get_current_tid(struct machine *machine, int cpu) 1539 { 1540 if (cpu < 0 || cpu >= MAX_NR_CPUS || !machine->current_tid) 1541 return -1; 1542 1543 return machine->current_tid[cpu]; 1544 } 1545 1546 int machine__set_current_tid(struct machine *machine, int cpu, pid_t pid, 1547 pid_t tid) 1548 { 1549 struct thread *thread; 1550 1551 if (cpu < 0) 1552 return -EINVAL; 1553 1554 if (!machine->current_tid) { 1555 int i; 1556 1557 machine->current_tid = calloc(MAX_NR_CPUS, sizeof(pid_t)); 1558 if (!machine->current_tid) 1559 return -ENOMEM; 1560 for (i = 0; i < MAX_NR_CPUS; i++) 1561 machine->current_tid[i] = -1; 1562 } 1563 1564 if (cpu >= MAX_NR_CPUS) { 1565 pr_err("Requested CPU %d too large. ", cpu); 1566 pr_err("Consider raising MAX_NR_CPUS\n"); 1567 return -EINVAL; 1568 } 1569 1570 machine->current_tid[cpu] = tid; 1571 1572 thread = machine__findnew_thread(machine, pid, tid); 1573 if (!thread) 1574 return -ENOMEM; 1575 1576 thread->cpu = cpu; 1577 1578 return 0; 1579 } 1580 1581 int machine__get_kernel_start(struct machine *machine) 1582 { 1583 struct map *map = machine__kernel_map(machine, MAP__FUNCTION); 1584 int err = 0; 1585 1586 /* 1587 * The only addresses above 2^63 are kernel addresses of a 64-bit 1588 * kernel. Note that addresses are unsigned so that on a 32-bit system 1589 * all addresses including kernel addresses are less than 2^32. In 1590 * that case (32-bit system), if the kernel mapping is unknown, all 1591 * addresses will be assumed to be in user space - see 1592 * machine__kernel_ip(). 1593 */ 1594 machine->kernel_start = 1ULL << 63; 1595 if (map) { 1596 err = map__load(map, machine->symbol_filter); 1597 if (map->start) 1598 machine->kernel_start = map->start; 1599 } 1600 return err; 1601 } 1602