1 #include <dirent.h> 2 #include <errno.h> 3 #include <inttypes.h> 4 #include <linux/kernel.h> 5 #include <linux/types.h> 6 #include <sys/types.h> 7 #include <sys/stat.h> 8 #include <unistd.h> 9 #include <uapi/linux/mman.h> /* To get things like MAP_HUGETLB even on older libc headers */ 10 #include <api/fs/fs.h> 11 #include "event.h" 12 #include "debug.h" 13 #include "hist.h" 14 #include "machine.h" 15 #include "sort.h" 16 #include "string2.h" 17 #include "strlist.h" 18 #include "thread.h" 19 #include "thread_map.h" 20 #include "sane_ctype.h" 21 #include "symbol/kallsyms.h" 22 #include "asm/bug.h" 23 #include "stat.h" 24 25 static const char *perf_event__names[] = { 26 [0] = "TOTAL", 27 [PERF_RECORD_MMAP] = "MMAP", 28 [PERF_RECORD_MMAP2] = "MMAP2", 29 [PERF_RECORD_LOST] = "LOST", 30 [PERF_RECORD_COMM] = "COMM", 31 [PERF_RECORD_EXIT] = "EXIT", 32 [PERF_RECORD_THROTTLE] = "THROTTLE", 33 [PERF_RECORD_UNTHROTTLE] = "UNTHROTTLE", 34 [PERF_RECORD_FORK] = "FORK", 35 [PERF_RECORD_READ] = "READ", 36 [PERF_RECORD_SAMPLE] = "SAMPLE", 37 [PERF_RECORD_AUX] = "AUX", 38 [PERF_RECORD_ITRACE_START] = "ITRACE_START", 39 [PERF_RECORD_LOST_SAMPLES] = "LOST_SAMPLES", 40 [PERF_RECORD_SWITCH] = "SWITCH", 41 [PERF_RECORD_SWITCH_CPU_WIDE] = "SWITCH_CPU_WIDE", 42 [PERF_RECORD_NAMESPACES] = "NAMESPACES", 43 [PERF_RECORD_HEADER_ATTR] = "ATTR", 44 [PERF_RECORD_HEADER_EVENT_TYPE] = "EVENT_TYPE", 45 [PERF_RECORD_HEADER_TRACING_DATA] = "TRACING_DATA", 46 [PERF_RECORD_HEADER_BUILD_ID] = "BUILD_ID", 47 [PERF_RECORD_FINISHED_ROUND] = "FINISHED_ROUND", 48 [PERF_RECORD_ID_INDEX] = "ID_INDEX", 49 [PERF_RECORD_AUXTRACE_INFO] = "AUXTRACE_INFO", 50 [PERF_RECORD_AUXTRACE] = "AUXTRACE", 51 [PERF_RECORD_AUXTRACE_ERROR] = "AUXTRACE_ERROR", 52 [PERF_RECORD_THREAD_MAP] = "THREAD_MAP", 53 [PERF_RECORD_CPU_MAP] = "CPU_MAP", 54 [PERF_RECORD_STAT_CONFIG] = "STAT_CONFIG", 55 [PERF_RECORD_STAT] = "STAT", 56 [PERF_RECORD_STAT_ROUND] = "STAT_ROUND", 57 [PERF_RECORD_EVENT_UPDATE] = "EVENT_UPDATE", 58 [PERF_RECORD_TIME_CONV] = "TIME_CONV", 59 }; 60 61 static const char *perf_ns__names[] = { 62 [NET_NS_INDEX] = "net", 63 [UTS_NS_INDEX] = "uts", 64 [IPC_NS_INDEX] = "ipc", 65 [PID_NS_INDEX] = "pid", 66 [USER_NS_INDEX] = "user", 67 [MNT_NS_INDEX] = "mnt", 68 [CGROUP_NS_INDEX] = "cgroup", 69 }; 70 71 const char *perf_event__name(unsigned int id) 72 { 73 if (id >= ARRAY_SIZE(perf_event__names)) 74 return "INVALID"; 75 if (!perf_event__names[id]) 76 return "UNKNOWN"; 77 return perf_event__names[id]; 78 } 79 80 static const char *perf_ns__name(unsigned int id) 81 { 82 if (id >= ARRAY_SIZE(perf_ns__names)) 83 return "UNKNOWN"; 84 return perf_ns__names[id]; 85 } 86 87 static int perf_tool__process_synth_event(struct perf_tool *tool, 88 union perf_event *event, 89 struct machine *machine, 90 perf_event__handler_t process) 91 { 92 struct perf_sample synth_sample = { 93 .pid = -1, 94 .tid = -1, 95 .time = -1, 96 .stream_id = -1, 97 .cpu = -1, 98 .period = 1, 99 .cpumode = event->header.misc & PERF_RECORD_MISC_CPUMODE_MASK, 100 }; 101 102 return process(tool, event, &synth_sample, machine); 103 }; 104 105 /* 106 * Assumes that the first 4095 bytes of /proc/pid/stat contains 107 * the comm, tgid and ppid. 108 */ 109 static int perf_event__get_comm_ids(pid_t pid, char *comm, size_t len, 110 pid_t *tgid, pid_t *ppid) 111 { 112 char filename[PATH_MAX]; 113 char bf[4096]; 114 int fd; 115 size_t size = 0; 116 ssize_t n; 117 char *name, *tgids, *ppids; 118 119 *tgid = -1; 120 *ppid = -1; 121 122 snprintf(filename, sizeof(filename), "/proc/%d/status", pid); 123 124 fd = open(filename, O_RDONLY); 125 if (fd < 0) { 126 pr_debug("couldn't open %s\n", filename); 127 return -1; 128 } 129 130 n = read(fd, bf, sizeof(bf) - 1); 131 close(fd); 132 if (n <= 0) { 133 pr_warning("Couldn't get COMM, tigd and ppid for pid %d\n", 134 pid); 135 return -1; 136 } 137 bf[n] = '\0'; 138 139 name = strstr(bf, "Name:"); 140 tgids = strstr(bf, "Tgid:"); 141 ppids = strstr(bf, "PPid:"); 142 143 if (name) { 144 char *nl; 145 146 name += 5; /* strlen("Name:") */ 147 name = ltrim(name); 148 149 nl = strchr(name, '\n'); 150 if (nl) 151 *nl = '\0'; 152 153 size = strlen(name); 154 if (size >= len) 155 size = len - 1; 156 memcpy(comm, name, size); 157 comm[size] = '\0'; 158 } else { 159 pr_debug("Name: string not found for pid %d\n", pid); 160 } 161 162 if (tgids) { 163 tgids += 5; /* strlen("Tgid:") */ 164 *tgid = atoi(tgids); 165 } else { 166 pr_debug("Tgid: string not found for pid %d\n", pid); 167 } 168 169 if (ppids) { 170 ppids += 5; /* strlen("PPid:") */ 171 *ppid = atoi(ppids); 172 } else { 173 pr_debug("PPid: string not found for pid %d\n", pid); 174 } 175 176 return 0; 177 } 178 179 static int perf_event__prepare_comm(union perf_event *event, pid_t pid, 180 struct machine *machine, 181 pid_t *tgid, pid_t *ppid) 182 { 183 size_t size; 184 185 *ppid = -1; 186 187 memset(&event->comm, 0, sizeof(event->comm)); 188 189 if (machine__is_host(machine)) { 190 if (perf_event__get_comm_ids(pid, event->comm.comm, 191 sizeof(event->comm.comm), 192 tgid, ppid) != 0) { 193 return -1; 194 } 195 } else { 196 *tgid = machine->pid; 197 } 198 199 if (*tgid < 0) 200 return -1; 201 202 event->comm.pid = *tgid; 203 event->comm.header.type = PERF_RECORD_COMM; 204 205 size = strlen(event->comm.comm) + 1; 206 size = PERF_ALIGN(size, sizeof(u64)); 207 memset(event->comm.comm + size, 0, machine->id_hdr_size); 208 event->comm.header.size = (sizeof(event->comm) - 209 (sizeof(event->comm.comm) - size) + 210 machine->id_hdr_size); 211 event->comm.tid = pid; 212 213 return 0; 214 } 215 216 pid_t perf_event__synthesize_comm(struct perf_tool *tool, 217 union perf_event *event, pid_t pid, 218 perf_event__handler_t process, 219 struct machine *machine) 220 { 221 pid_t tgid, ppid; 222 223 if (perf_event__prepare_comm(event, pid, machine, &tgid, &ppid) != 0) 224 return -1; 225 226 if (perf_tool__process_synth_event(tool, event, machine, process) != 0) 227 return -1; 228 229 return tgid; 230 } 231 232 static void perf_event__get_ns_link_info(pid_t pid, const char *ns, 233 struct perf_ns_link_info *ns_link_info) 234 { 235 struct stat64 st; 236 char proc_ns[128]; 237 238 sprintf(proc_ns, "/proc/%u/ns/%s", pid, ns); 239 if (stat64(proc_ns, &st) == 0) { 240 ns_link_info->dev = st.st_dev; 241 ns_link_info->ino = st.st_ino; 242 } 243 } 244 245 int perf_event__synthesize_namespaces(struct perf_tool *tool, 246 union perf_event *event, 247 pid_t pid, pid_t tgid, 248 perf_event__handler_t process, 249 struct machine *machine) 250 { 251 u32 idx; 252 struct perf_ns_link_info *ns_link_info; 253 254 if (!tool || !tool->namespace_events) 255 return 0; 256 257 memset(&event->namespaces, 0, (sizeof(event->namespaces) + 258 (NR_NAMESPACES * sizeof(struct perf_ns_link_info)) + 259 machine->id_hdr_size)); 260 261 event->namespaces.pid = tgid; 262 event->namespaces.tid = pid; 263 264 event->namespaces.nr_namespaces = NR_NAMESPACES; 265 266 ns_link_info = event->namespaces.link_info; 267 268 for (idx = 0; idx < event->namespaces.nr_namespaces; idx++) 269 perf_event__get_ns_link_info(pid, perf_ns__name(idx), 270 &ns_link_info[idx]); 271 272 event->namespaces.header.type = PERF_RECORD_NAMESPACES; 273 274 event->namespaces.header.size = (sizeof(event->namespaces) + 275 (NR_NAMESPACES * sizeof(struct perf_ns_link_info)) + 276 machine->id_hdr_size); 277 278 if (perf_tool__process_synth_event(tool, event, machine, process) != 0) 279 return -1; 280 281 return 0; 282 } 283 284 static int perf_event__synthesize_fork(struct perf_tool *tool, 285 union perf_event *event, 286 pid_t pid, pid_t tgid, pid_t ppid, 287 perf_event__handler_t process, 288 struct machine *machine) 289 { 290 memset(&event->fork, 0, sizeof(event->fork) + machine->id_hdr_size); 291 292 /* 293 * for main thread set parent to ppid from status file. For other 294 * threads set parent pid to main thread. ie., assume main thread 295 * spawns all threads in a process 296 */ 297 if (tgid == pid) { 298 event->fork.ppid = ppid; 299 event->fork.ptid = ppid; 300 } else { 301 event->fork.ppid = tgid; 302 event->fork.ptid = tgid; 303 } 304 event->fork.pid = tgid; 305 event->fork.tid = pid; 306 event->fork.header.type = PERF_RECORD_FORK; 307 308 event->fork.header.size = (sizeof(event->fork) + machine->id_hdr_size); 309 310 if (perf_tool__process_synth_event(tool, event, machine, process) != 0) 311 return -1; 312 313 return 0; 314 } 315 316 int perf_event__synthesize_mmap_events(struct perf_tool *tool, 317 union perf_event *event, 318 pid_t pid, pid_t tgid, 319 perf_event__handler_t process, 320 struct machine *machine, 321 bool mmap_data, 322 unsigned int proc_map_timeout) 323 { 324 char filename[PATH_MAX]; 325 FILE *fp; 326 unsigned long long t; 327 bool truncation = false; 328 unsigned long long timeout = proc_map_timeout * 1000000ULL; 329 int rc = 0; 330 const char *hugetlbfs_mnt = hugetlbfs__mountpoint(); 331 int hugetlbfs_mnt_len = hugetlbfs_mnt ? strlen(hugetlbfs_mnt) : 0; 332 333 if (machine__is_default_guest(machine)) 334 return 0; 335 336 snprintf(filename, sizeof(filename), "%s/proc/%d/task/%d/maps", 337 machine->root_dir, pid, pid); 338 339 fp = fopen(filename, "r"); 340 if (fp == NULL) { 341 /* 342 * We raced with a task exiting - just return: 343 */ 344 pr_debug("couldn't open %s\n", filename); 345 return -1; 346 } 347 348 event->header.type = PERF_RECORD_MMAP2; 349 t = rdclock(); 350 351 while (1) { 352 char bf[BUFSIZ]; 353 char prot[5]; 354 char execname[PATH_MAX]; 355 char anonstr[] = "//anon"; 356 unsigned int ino; 357 size_t size; 358 ssize_t n; 359 360 if (fgets(bf, sizeof(bf), fp) == NULL) 361 break; 362 363 if ((rdclock() - t) > timeout) { 364 pr_warning("Reading %s time out. " 365 "You may want to increase " 366 "the time limit by --proc-map-timeout\n", 367 filename); 368 truncation = true; 369 goto out; 370 } 371 372 /* ensure null termination since stack will be reused. */ 373 strcpy(execname, ""); 374 375 /* 00400000-0040c000 r-xp 00000000 fd:01 41038 /bin/cat */ 376 n = sscanf(bf, "%"PRIx64"-%"PRIx64" %s %"PRIx64" %x:%x %u %[^\n]\n", 377 &event->mmap2.start, &event->mmap2.len, prot, 378 &event->mmap2.pgoff, &event->mmap2.maj, 379 &event->mmap2.min, 380 &ino, execname); 381 382 /* 383 * Anon maps don't have the execname. 384 */ 385 if (n < 7) 386 continue; 387 388 event->mmap2.ino = (u64)ino; 389 390 /* 391 * Just like the kernel, see __perf_event_mmap in kernel/perf_event.c 392 */ 393 if (machine__is_host(machine)) 394 event->header.misc = PERF_RECORD_MISC_USER; 395 else 396 event->header.misc = PERF_RECORD_MISC_GUEST_USER; 397 398 /* map protection and flags bits */ 399 event->mmap2.prot = 0; 400 event->mmap2.flags = 0; 401 if (prot[0] == 'r') 402 event->mmap2.prot |= PROT_READ; 403 if (prot[1] == 'w') 404 event->mmap2.prot |= PROT_WRITE; 405 if (prot[2] == 'x') 406 event->mmap2.prot |= PROT_EXEC; 407 408 if (prot[3] == 's') 409 event->mmap2.flags |= MAP_SHARED; 410 else 411 event->mmap2.flags |= MAP_PRIVATE; 412 413 if (prot[2] != 'x') { 414 if (!mmap_data || prot[0] != 'r') 415 continue; 416 417 event->header.misc |= PERF_RECORD_MISC_MMAP_DATA; 418 } 419 420 out: 421 if (truncation) 422 event->header.misc |= PERF_RECORD_MISC_PROC_MAP_PARSE_TIMEOUT; 423 424 if (!strcmp(execname, "")) 425 strcpy(execname, anonstr); 426 427 if (hugetlbfs_mnt_len && 428 !strncmp(execname, hugetlbfs_mnt, hugetlbfs_mnt_len)) { 429 strcpy(execname, anonstr); 430 event->mmap2.flags |= MAP_HUGETLB; 431 } 432 433 size = strlen(execname) + 1; 434 memcpy(event->mmap2.filename, execname, size); 435 size = PERF_ALIGN(size, sizeof(u64)); 436 event->mmap2.len -= event->mmap.start; 437 event->mmap2.header.size = (sizeof(event->mmap2) - 438 (sizeof(event->mmap2.filename) - size)); 439 memset(event->mmap2.filename + size, 0, machine->id_hdr_size); 440 event->mmap2.header.size += machine->id_hdr_size; 441 event->mmap2.pid = tgid; 442 event->mmap2.tid = pid; 443 444 if (perf_tool__process_synth_event(tool, event, machine, process) != 0) { 445 rc = -1; 446 break; 447 } 448 449 if (truncation) 450 break; 451 } 452 453 fclose(fp); 454 return rc; 455 } 456 457 int perf_event__synthesize_modules(struct perf_tool *tool, 458 perf_event__handler_t process, 459 struct machine *machine) 460 { 461 int rc = 0; 462 struct map *pos; 463 struct map_groups *kmaps = &machine->kmaps; 464 struct maps *maps = &kmaps->maps[MAP__FUNCTION]; 465 union perf_event *event = zalloc((sizeof(event->mmap) + 466 machine->id_hdr_size)); 467 if (event == NULL) { 468 pr_debug("Not enough memory synthesizing mmap event " 469 "for kernel modules\n"); 470 return -1; 471 } 472 473 event->header.type = PERF_RECORD_MMAP; 474 475 /* 476 * kernel uses 0 for user space maps, see kernel/perf_event.c 477 * __perf_event_mmap 478 */ 479 if (machine__is_host(machine)) 480 event->header.misc = PERF_RECORD_MISC_KERNEL; 481 else 482 event->header.misc = PERF_RECORD_MISC_GUEST_KERNEL; 483 484 for (pos = maps__first(maps); pos; pos = map__next(pos)) { 485 size_t size; 486 487 if (__map__is_kernel(pos)) 488 continue; 489 490 size = PERF_ALIGN(pos->dso->long_name_len + 1, sizeof(u64)); 491 event->mmap.header.type = PERF_RECORD_MMAP; 492 event->mmap.header.size = (sizeof(event->mmap) - 493 (sizeof(event->mmap.filename) - size)); 494 memset(event->mmap.filename + size, 0, machine->id_hdr_size); 495 event->mmap.header.size += machine->id_hdr_size; 496 event->mmap.start = pos->start; 497 event->mmap.len = pos->end - pos->start; 498 event->mmap.pid = machine->pid; 499 500 memcpy(event->mmap.filename, pos->dso->long_name, 501 pos->dso->long_name_len + 1); 502 if (perf_tool__process_synth_event(tool, event, machine, process) != 0) { 503 rc = -1; 504 break; 505 } 506 } 507 508 free(event); 509 return rc; 510 } 511 512 static int __event__synthesize_thread(union perf_event *comm_event, 513 union perf_event *mmap_event, 514 union perf_event *fork_event, 515 union perf_event *namespaces_event, 516 pid_t pid, int full, 517 perf_event__handler_t process, 518 struct perf_tool *tool, 519 struct machine *machine, 520 bool mmap_data, 521 unsigned int proc_map_timeout) 522 { 523 char filename[PATH_MAX]; 524 DIR *tasks; 525 struct dirent *dirent; 526 pid_t tgid, ppid; 527 int rc = 0; 528 529 /* special case: only send one comm event using passed in pid */ 530 if (!full) { 531 tgid = perf_event__synthesize_comm(tool, comm_event, pid, 532 process, machine); 533 534 if (tgid == -1) 535 return -1; 536 537 if (perf_event__synthesize_namespaces(tool, namespaces_event, pid, 538 tgid, process, machine) < 0) 539 return -1; 540 541 542 return perf_event__synthesize_mmap_events(tool, mmap_event, pid, tgid, 543 process, machine, mmap_data, 544 proc_map_timeout); 545 } 546 547 if (machine__is_default_guest(machine)) 548 return 0; 549 550 snprintf(filename, sizeof(filename), "%s/proc/%d/task", 551 machine->root_dir, pid); 552 553 tasks = opendir(filename); 554 if (tasks == NULL) { 555 pr_debug("couldn't open %s\n", filename); 556 return 0; 557 } 558 559 while ((dirent = readdir(tasks)) != NULL) { 560 char *end; 561 pid_t _pid; 562 563 _pid = strtol(dirent->d_name, &end, 10); 564 if (*end) 565 continue; 566 567 rc = -1; 568 if (perf_event__prepare_comm(comm_event, _pid, machine, 569 &tgid, &ppid) != 0) 570 break; 571 572 if (perf_event__synthesize_fork(tool, fork_event, _pid, tgid, 573 ppid, process, machine) < 0) 574 break; 575 576 if (perf_event__synthesize_namespaces(tool, namespaces_event, _pid, 577 tgid, process, machine) < 0) 578 break; 579 580 /* 581 * Send the prepared comm event 582 */ 583 if (perf_tool__process_synth_event(tool, comm_event, machine, process) != 0) 584 break; 585 586 rc = 0; 587 if (_pid == pid) { 588 /* process the parent's maps too */ 589 rc = perf_event__synthesize_mmap_events(tool, mmap_event, pid, tgid, 590 process, machine, mmap_data, proc_map_timeout); 591 if (rc) 592 break; 593 } 594 } 595 596 closedir(tasks); 597 return rc; 598 } 599 600 int perf_event__synthesize_thread_map(struct perf_tool *tool, 601 struct thread_map *threads, 602 perf_event__handler_t process, 603 struct machine *machine, 604 bool mmap_data, 605 unsigned int proc_map_timeout) 606 { 607 union perf_event *comm_event, *mmap_event, *fork_event; 608 union perf_event *namespaces_event; 609 int err = -1, thread, j; 610 611 comm_event = malloc(sizeof(comm_event->comm) + machine->id_hdr_size); 612 if (comm_event == NULL) 613 goto out; 614 615 mmap_event = malloc(sizeof(mmap_event->mmap2) + machine->id_hdr_size); 616 if (mmap_event == NULL) 617 goto out_free_comm; 618 619 fork_event = malloc(sizeof(fork_event->fork) + machine->id_hdr_size); 620 if (fork_event == NULL) 621 goto out_free_mmap; 622 623 namespaces_event = malloc(sizeof(namespaces_event->namespaces) + 624 (NR_NAMESPACES * sizeof(struct perf_ns_link_info)) + 625 machine->id_hdr_size); 626 if (namespaces_event == NULL) 627 goto out_free_fork; 628 629 err = 0; 630 for (thread = 0; thread < threads->nr; ++thread) { 631 if (__event__synthesize_thread(comm_event, mmap_event, 632 fork_event, namespaces_event, 633 thread_map__pid(threads, thread), 0, 634 process, tool, machine, 635 mmap_data, proc_map_timeout)) { 636 err = -1; 637 break; 638 } 639 640 /* 641 * comm.pid is set to thread group id by 642 * perf_event__synthesize_comm 643 */ 644 if ((int) comm_event->comm.pid != thread_map__pid(threads, thread)) { 645 bool need_leader = true; 646 647 /* is thread group leader in thread_map? */ 648 for (j = 0; j < threads->nr; ++j) { 649 if ((int) comm_event->comm.pid == thread_map__pid(threads, j)) { 650 need_leader = false; 651 break; 652 } 653 } 654 655 /* if not, generate events for it */ 656 if (need_leader && 657 __event__synthesize_thread(comm_event, mmap_event, 658 fork_event, namespaces_event, 659 comm_event->comm.pid, 0, 660 process, tool, machine, 661 mmap_data, proc_map_timeout)) { 662 err = -1; 663 break; 664 } 665 } 666 } 667 free(namespaces_event); 668 out_free_fork: 669 free(fork_event); 670 out_free_mmap: 671 free(mmap_event); 672 out_free_comm: 673 free(comm_event); 674 out: 675 return err; 676 } 677 678 int perf_event__synthesize_threads(struct perf_tool *tool, 679 perf_event__handler_t process, 680 struct machine *machine, 681 bool mmap_data, 682 unsigned int proc_map_timeout) 683 { 684 DIR *proc; 685 char proc_path[PATH_MAX]; 686 struct dirent *dirent; 687 union perf_event *comm_event, *mmap_event, *fork_event; 688 union perf_event *namespaces_event; 689 int err = -1; 690 691 if (machine__is_default_guest(machine)) 692 return 0; 693 694 comm_event = malloc(sizeof(comm_event->comm) + machine->id_hdr_size); 695 if (comm_event == NULL) 696 goto out; 697 698 mmap_event = malloc(sizeof(mmap_event->mmap2) + machine->id_hdr_size); 699 if (mmap_event == NULL) 700 goto out_free_comm; 701 702 fork_event = malloc(sizeof(fork_event->fork) + machine->id_hdr_size); 703 if (fork_event == NULL) 704 goto out_free_mmap; 705 706 namespaces_event = malloc(sizeof(namespaces_event->namespaces) + 707 (NR_NAMESPACES * sizeof(struct perf_ns_link_info)) + 708 machine->id_hdr_size); 709 if (namespaces_event == NULL) 710 goto out_free_fork; 711 712 snprintf(proc_path, sizeof(proc_path), "%s/proc", machine->root_dir); 713 proc = opendir(proc_path); 714 715 if (proc == NULL) 716 goto out_free_namespaces; 717 718 while ((dirent = readdir(proc)) != NULL) { 719 char *end; 720 pid_t pid = strtol(dirent->d_name, &end, 10); 721 722 if (*end) /* only interested in proper numerical dirents */ 723 continue; 724 /* 725 * We may race with exiting thread, so don't stop just because 726 * one thread couldn't be synthesized. 727 */ 728 __event__synthesize_thread(comm_event, mmap_event, fork_event, 729 namespaces_event, pid, 1, process, 730 tool, machine, mmap_data, 731 proc_map_timeout); 732 } 733 734 err = 0; 735 closedir(proc); 736 out_free_namespaces: 737 free(namespaces_event); 738 out_free_fork: 739 free(fork_event); 740 out_free_mmap: 741 free(mmap_event); 742 out_free_comm: 743 free(comm_event); 744 out: 745 return err; 746 } 747 748 struct process_symbol_args { 749 const char *name; 750 u64 start; 751 }; 752 753 static int find_symbol_cb(void *arg, const char *name, char type, 754 u64 start) 755 { 756 struct process_symbol_args *args = arg; 757 758 /* 759 * Must be a function or at least an alias, as in PARISC64, where "_text" is 760 * an 'A' to the same address as "_stext". 761 */ 762 if (!(symbol_type__is_a(type, MAP__FUNCTION) || 763 type == 'A') || strcmp(name, args->name)) 764 return 0; 765 766 args->start = start; 767 return 1; 768 } 769 770 u64 kallsyms__get_function_start(const char *kallsyms_filename, 771 const char *symbol_name) 772 { 773 struct process_symbol_args args = { .name = symbol_name, }; 774 775 if (kallsyms__parse(kallsyms_filename, &args, find_symbol_cb) <= 0) 776 return 0; 777 778 return args.start; 779 } 780 781 int perf_event__synthesize_kernel_mmap(struct perf_tool *tool, 782 perf_event__handler_t process, 783 struct machine *machine) 784 { 785 size_t size; 786 const char *mmap_name; 787 char name_buff[PATH_MAX]; 788 struct map *map = machine__kernel_map(machine); 789 struct kmap *kmap; 790 int err; 791 union perf_event *event; 792 793 if (symbol_conf.kptr_restrict) 794 return -1; 795 if (map == NULL) 796 return -1; 797 798 /* 799 * We should get this from /sys/kernel/sections/.text, but till that is 800 * available use this, and after it is use this as a fallback for older 801 * kernels. 802 */ 803 event = zalloc((sizeof(event->mmap) + machine->id_hdr_size)); 804 if (event == NULL) { 805 pr_debug("Not enough memory synthesizing mmap event " 806 "for kernel modules\n"); 807 return -1; 808 } 809 810 mmap_name = machine__mmap_name(machine, name_buff, sizeof(name_buff)); 811 if (machine__is_host(machine)) { 812 /* 813 * kernel uses PERF_RECORD_MISC_USER for user space maps, 814 * see kernel/perf_event.c __perf_event_mmap 815 */ 816 event->header.misc = PERF_RECORD_MISC_KERNEL; 817 } else { 818 event->header.misc = PERF_RECORD_MISC_GUEST_KERNEL; 819 } 820 821 kmap = map__kmap(map); 822 size = snprintf(event->mmap.filename, sizeof(event->mmap.filename), 823 "%s%s", mmap_name, kmap->ref_reloc_sym->name) + 1; 824 size = PERF_ALIGN(size, sizeof(u64)); 825 event->mmap.header.type = PERF_RECORD_MMAP; 826 event->mmap.header.size = (sizeof(event->mmap) - 827 (sizeof(event->mmap.filename) - size) + machine->id_hdr_size); 828 event->mmap.pgoff = kmap->ref_reloc_sym->addr; 829 event->mmap.start = map->start; 830 event->mmap.len = map->end - event->mmap.start; 831 event->mmap.pid = machine->pid; 832 833 err = perf_tool__process_synth_event(tool, event, machine, process); 834 free(event); 835 836 return err; 837 } 838 839 int perf_event__synthesize_thread_map2(struct perf_tool *tool, 840 struct thread_map *threads, 841 perf_event__handler_t process, 842 struct machine *machine) 843 { 844 union perf_event *event; 845 int i, err, size; 846 847 size = sizeof(event->thread_map); 848 size += threads->nr * sizeof(event->thread_map.entries[0]); 849 850 event = zalloc(size); 851 if (!event) 852 return -ENOMEM; 853 854 event->header.type = PERF_RECORD_THREAD_MAP; 855 event->header.size = size; 856 event->thread_map.nr = threads->nr; 857 858 for (i = 0; i < threads->nr; i++) { 859 struct thread_map_event_entry *entry = &event->thread_map.entries[i]; 860 char *comm = thread_map__comm(threads, i); 861 862 if (!comm) 863 comm = (char *) ""; 864 865 entry->pid = thread_map__pid(threads, i); 866 strncpy((char *) &entry->comm, comm, sizeof(entry->comm)); 867 } 868 869 err = process(tool, event, NULL, machine); 870 871 free(event); 872 return err; 873 } 874 875 static void synthesize_cpus(struct cpu_map_entries *cpus, 876 struct cpu_map *map) 877 { 878 int i; 879 880 cpus->nr = map->nr; 881 882 for (i = 0; i < map->nr; i++) 883 cpus->cpu[i] = map->map[i]; 884 } 885 886 static void synthesize_mask(struct cpu_map_mask *mask, 887 struct cpu_map *map, int max) 888 { 889 int i; 890 891 mask->nr = BITS_TO_LONGS(max); 892 mask->long_size = sizeof(long); 893 894 for (i = 0; i < map->nr; i++) 895 set_bit(map->map[i], mask->mask); 896 } 897 898 static size_t cpus_size(struct cpu_map *map) 899 { 900 return sizeof(struct cpu_map_entries) + map->nr * sizeof(u16); 901 } 902 903 static size_t mask_size(struct cpu_map *map, int *max) 904 { 905 int i; 906 907 *max = 0; 908 909 for (i = 0; i < map->nr; i++) { 910 /* bit possition of the cpu is + 1 */ 911 int bit = map->map[i] + 1; 912 913 if (bit > *max) 914 *max = bit; 915 } 916 917 return sizeof(struct cpu_map_mask) + BITS_TO_LONGS(*max) * sizeof(long); 918 } 919 920 void *cpu_map_data__alloc(struct cpu_map *map, size_t *size, u16 *type, int *max) 921 { 922 size_t size_cpus, size_mask; 923 bool is_dummy = cpu_map__empty(map); 924 925 /* 926 * Both array and mask data have variable size based 927 * on the number of cpus and their actual values. 928 * The size of the 'struct cpu_map_data' is: 929 * 930 * array = size of 'struct cpu_map_entries' + 931 * number of cpus * sizeof(u64) 932 * 933 * mask = size of 'struct cpu_map_mask' + 934 * maximum cpu bit converted to size of longs 935 * 936 * and finaly + the size of 'struct cpu_map_data'. 937 */ 938 size_cpus = cpus_size(map); 939 size_mask = mask_size(map, max); 940 941 if (is_dummy || (size_cpus < size_mask)) { 942 *size += size_cpus; 943 *type = PERF_CPU_MAP__CPUS; 944 } else { 945 *size += size_mask; 946 *type = PERF_CPU_MAP__MASK; 947 } 948 949 *size += sizeof(struct cpu_map_data); 950 return zalloc(*size); 951 } 952 953 void cpu_map_data__synthesize(struct cpu_map_data *data, struct cpu_map *map, 954 u16 type, int max) 955 { 956 data->type = type; 957 958 switch (type) { 959 case PERF_CPU_MAP__CPUS: 960 synthesize_cpus((struct cpu_map_entries *) data->data, map); 961 break; 962 case PERF_CPU_MAP__MASK: 963 synthesize_mask((struct cpu_map_mask *) data->data, map, max); 964 default: 965 break; 966 }; 967 } 968 969 static struct cpu_map_event* cpu_map_event__new(struct cpu_map *map) 970 { 971 size_t size = sizeof(struct cpu_map_event); 972 struct cpu_map_event *event; 973 int max; 974 u16 type; 975 976 event = cpu_map_data__alloc(map, &size, &type, &max); 977 if (!event) 978 return NULL; 979 980 event->header.type = PERF_RECORD_CPU_MAP; 981 event->header.size = size; 982 event->data.type = type; 983 984 cpu_map_data__synthesize(&event->data, map, type, max); 985 return event; 986 } 987 988 int perf_event__synthesize_cpu_map(struct perf_tool *tool, 989 struct cpu_map *map, 990 perf_event__handler_t process, 991 struct machine *machine) 992 { 993 struct cpu_map_event *event; 994 int err; 995 996 event = cpu_map_event__new(map); 997 if (!event) 998 return -ENOMEM; 999 1000 err = process(tool, (union perf_event *) event, NULL, machine); 1001 1002 free(event); 1003 return err; 1004 } 1005 1006 int perf_event__synthesize_stat_config(struct perf_tool *tool, 1007 struct perf_stat_config *config, 1008 perf_event__handler_t process, 1009 struct machine *machine) 1010 { 1011 struct stat_config_event *event; 1012 int size, i = 0, err; 1013 1014 size = sizeof(*event); 1015 size += (PERF_STAT_CONFIG_TERM__MAX * sizeof(event->data[0])); 1016 1017 event = zalloc(size); 1018 if (!event) 1019 return -ENOMEM; 1020 1021 event->header.type = PERF_RECORD_STAT_CONFIG; 1022 event->header.size = size; 1023 event->nr = PERF_STAT_CONFIG_TERM__MAX; 1024 1025 #define ADD(__term, __val) \ 1026 event->data[i].tag = PERF_STAT_CONFIG_TERM__##__term; \ 1027 event->data[i].val = __val; \ 1028 i++; 1029 1030 ADD(AGGR_MODE, config->aggr_mode) 1031 ADD(INTERVAL, config->interval) 1032 ADD(SCALE, config->scale) 1033 1034 WARN_ONCE(i != PERF_STAT_CONFIG_TERM__MAX, 1035 "stat config terms unbalanced\n"); 1036 #undef ADD 1037 1038 err = process(tool, (union perf_event *) event, NULL, machine); 1039 1040 free(event); 1041 return err; 1042 } 1043 1044 int perf_event__synthesize_stat(struct perf_tool *tool, 1045 u32 cpu, u32 thread, u64 id, 1046 struct perf_counts_values *count, 1047 perf_event__handler_t process, 1048 struct machine *machine) 1049 { 1050 struct stat_event event; 1051 1052 event.header.type = PERF_RECORD_STAT; 1053 event.header.size = sizeof(event); 1054 event.header.misc = 0; 1055 1056 event.id = id; 1057 event.cpu = cpu; 1058 event.thread = thread; 1059 event.val = count->val; 1060 event.ena = count->ena; 1061 event.run = count->run; 1062 1063 return process(tool, (union perf_event *) &event, NULL, machine); 1064 } 1065 1066 int perf_event__synthesize_stat_round(struct perf_tool *tool, 1067 u64 evtime, u64 type, 1068 perf_event__handler_t process, 1069 struct machine *machine) 1070 { 1071 struct stat_round_event event; 1072 1073 event.header.type = PERF_RECORD_STAT_ROUND; 1074 event.header.size = sizeof(event); 1075 event.header.misc = 0; 1076 1077 event.time = evtime; 1078 event.type = type; 1079 1080 return process(tool, (union perf_event *) &event, NULL, machine); 1081 } 1082 1083 void perf_event__read_stat_config(struct perf_stat_config *config, 1084 struct stat_config_event *event) 1085 { 1086 unsigned i; 1087 1088 for (i = 0; i < event->nr; i++) { 1089 1090 switch (event->data[i].tag) { 1091 #define CASE(__term, __val) \ 1092 case PERF_STAT_CONFIG_TERM__##__term: \ 1093 config->__val = event->data[i].val; \ 1094 break; 1095 1096 CASE(AGGR_MODE, aggr_mode) 1097 CASE(SCALE, scale) 1098 CASE(INTERVAL, interval) 1099 #undef CASE 1100 default: 1101 pr_warning("unknown stat config term %" PRIu64 "\n", 1102 event->data[i].tag); 1103 } 1104 } 1105 } 1106 1107 size_t perf_event__fprintf_comm(union perf_event *event, FILE *fp) 1108 { 1109 const char *s; 1110 1111 if (event->header.misc & PERF_RECORD_MISC_COMM_EXEC) 1112 s = " exec"; 1113 else 1114 s = ""; 1115 1116 return fprintf(fp, "%s: %s:%d/%d\n", s, event->comm.comm, event->comm.pid, event->comm.tid); 1117 } 1118 1119 size_t perf_event__fprintf_namespaces(union perf_event *event, FILE *fp) 1120 { 1121 size_t ret = 0; 1122 struct perf_ns_link_info *ns_link_info; 1123 u32 nr_namespaces, idx; 1124 1125 ns_link_info = event->namespaces.link_info; 1126 nr_namespaces = event->namespaces.nr_namespaces; 1127 1128 ret += fprintf(fp, " %d/%d - nr_namespaces: %u\n\t\t[", 1129 event->namespaces.pid, 1130 event->namespaces.tid, 1131 nr_namespaces); 1132 1133 for (idx = 0; idx < nr_namespaces; idx++) { 1134 if (idx && (idx % 4 == 0)) 1135 ret += fprintf(fp, "\n\t\t "); 1136 1137 ret += fprintf(fp, "%u/%s: %" PRIu64 "/%#" PRIx64 "%s", idx, 1138 perf_ns__name(idx), (u64)ns_link_info[idx].dev, 1139 (u64)ns_link_info[idx].ino, 1140 ((idx + 1) != nr_namespaces) ? ", " : "]\n"); 1141 } 1142 1143 return ret; 1144 } 1145 1146 int perf_event__process_comm(struct perf_tool *tool __maybe_unused, 1147 union perf_event *event, 1148 struct perf_sample *sample, 1149 struct machine *machine) 1150 { 1151 return machine__process_comm_event(machine, event, sample); 1152 } 1153 1154 int perf_event__process_namespaces(struct perf_tool *tool __maybe_unused, 1155 union perf_event *event, 1156 struct perf_sample *sample, 1157 struct machine *machine) 1158 { 1159 return machine__process_namespaces_event(machine, event, sample); 1160 } 1161 1162 int perf_event__process_lost(struct perf_tool *tool __maybe_unused, 1163 union perf_event *event, 1164 struct perf_sample *sample, 1165 struct machine *machine) 1166 { 1167 return machine__process_lost_event(machine, event, sample); 1168 } 1169 1170 int perf_event__process_aux(struct perf_tool *tool __maybe_unused, 1171 union perf_event *event, 1172 struct perf_sample *sample __maybe_unused, 1173 struct machine *machine) 1174 { 1175 return machine__process_aux_event(machine, event); 1176 } 1177 1178 int perf_event__process_itrace_start(struct perf_tool *tool __maybe_unused, 1179 union perf_event *event, 1180 struct perf_sample *sample __maybe_unused, 1181 struct machine *machine) 1182 { 1183 return machine__process_itrace_start_event(machine, event); 1184 } 1185 1186 int perf_event__process_lost_samples(struct perf_tool *tool __maybe_unused, 1187 union perf_event *event, 1188 struct perf_sample *sample, 1189 struct machine *machine) 1190 { 1191 return machine__process_lost_samples_event(machine, event, sample); 1192 } 1193 1194 int perf_event__process_switch(struct perf_tool *tool __maybe_unused, 1195 union perf_event *event, 1196 struct perf_sample *sample __maybe_unused, 1197 struct machine *machine) 1198 { 1199 return machine__process_switch_event(machine, event); 1200 } 1201 1202 size_t perf_event__fprintf_mmap(union perf_event *event, FILE *fp) 1203 { 1204 return fprintf(fp, " %d/%d: [%#" PRIx64 "(%#" PRIx64 ") @ %#" PRIx64 "]: %c %s\n", 1205 event->mmap.pid, event->mmap.tid, event->mmap.start, 1206 event->mmap.len, event->mmap.pgoff, 1207 (event->header.misc & PERF_RECORD_MISC_MMAP_DATA) ? 'r' : 'x', 1208 event->mmap.filename); 1209 } 1210 1211 size_t perf_event__fprintf_mmap2(union perf_event *event, FILE *fp) 1212 { 1213 return fprintf(fp, " %d/%d: [%#" PRIx64 "(%#" PRIx64 ") @ %#" PRIx64 1214 " %02x:%02x %"PRIu64" %"PRIu64"]: %c%c%c%c %s\n", 1215 event->mmap2.pid, event->mmap2.tid, event->mmap2.start, 1216 event->mmap2.len, event->mmap2.pgoff, event->mmap2.maj, 1217 event->mmap2.min, event->mmap2.ino, 1218 event->mmap2.ino_generation, 1219 (event->mmap2.prot & PROT_READ) ? 'r' : '-', 1220 (event->mmap2.prot & PROT_WRITE) ? 'w' : '-', 1221 (event->mmap2.prot & PROT_EXEC) ? 'x' : '-', 1222 (event->mmap2.flags & MAP_SHARED) ? 's' : 'p', 1223 event->mmap2.filename); 1224 } 1225 1226 size_t perf_event__fprintf_thread_map(union perf_event *event, FILE *fp) 1227 { 1228 struct thread_map *threads = thread_map__new_event(&event->thread_map); 1229 size_t ret; 1230 1231 ret = fprintf(fp, " nr: "); 1232 1233 if (threads) 1234 ret += thread_map__fprintf(threads, fp); 1235 else 1236 ret += fprintf(fp, "failed to get threads from event\n"); 1237 1238 thread_map__put(threads); 1239 return ret; 1240 } 1241 1242 size_t perf_event__fprintf_cpu_map(union perf_event *event, FILE *fp) 1243 { 1244 struct cpu_map *cpus = cpu_map__new_data(&event->cpu_map.data); 1245 size_t ret; 1246 1247 ret = fprintf(fp, ": "); 1248 1249 if (cpus) 1250 ret += cpu_map__fprintf(cpus, fp); 1251 else 1252 ret += fprintf(fp, "failed to get cpumap from event\n"); 1253 1254 cpu_map__put(cpus); 1255 return ret; 1256 } 1257 1258 int perf_event__process_mmap(struct perf_tool *tool __maybe_unused, 1259 union perf_event *event, 1260 struct perf_sample *sample, 1261 struct machine *machine) 1262 { 1263 return machine__process_mmap_event(machine, event, sample); 1264 } 1265 1266 int perf_event__process_mmap2(struct perf_tool *tool __maybe_unused, 1267 union perf_event *event, 1268 struct perf_sample *sample, 1269 struct machine *machine) 1270 { 1271 return machine__process_mmap2_event(machine, event, sample); 1272 } 1273 1274 size_t perf_event__fprintf_task(union perf_event *event, FILE *fp) 1275 { 1276 return fprintf(fp, "(%d:%d):(%d:%d)\n", 1277 event->fork.pid, event->fork.tid, 1278 event->fork.ppid, event->fork.ptid); 1279 } 1280 1281 int perf_event__process_fork(struct perf_tool *tool __maybe_unused, 1282 union perf_event *event, 1283 struct perf_sample *sample, 1284 struct machine *machine) 1285 { 1286 return machine__process_fork_event(machine, event, sample); 1287 } 1288 1289 int perf_event__process_exit(struct perf_tool *tool __maybe_unused, 1290 union perf_event *event, 1291 struct perf_sample *sample, 1292 struct machine *machine) 1293 { 1294 return machine__process_exit_event(machine, event, sample); 1295 } 1296 1297 size_t perf_event__fprintf_aux(union perf_event *event, FILE *fp) 1298 { 1299 return fprintf(fp, " offset: %#"PRIx64" size: %#"PRIx64" flags: %#"PRIx64" [%s%s%s]\n", 1300 event->aux.aux_offset, event->aux.aux_size, 1301 event->aux.flags, 1302 event->aux.flags & PERF_AUX_FLAG_TRUNCATED ? "T" : "", 1303 event->aux.flags & PERF_AUX_FLAG_OVERWRITE ? "O" : "", 1304 event->aux.flags & PERF_AUX_FLAG_PARTIAL ? "P" : ""); 1305 } 1306 1307 size_t perf_event__fprintf_itrace_start(union perf_event *event, FILE *fp) 1308 { 1309 return fprintf(fp, " pid: %u tid: %u\n", 1310 event->itrace_start.pid, event->itrace_start.tid); 1311 } 1312 1313 size_t perf_event__fprintf_switch(union perf_event *event, FILE *fp) 1314 { 1315 bool out = event->header.misc & PERF_RECORD_MISC_SWITCH_OUT; 1316 const char *in_out = out ? "OUT" : "IN "; 1317 1318 if (event->header.type == PERF_RECORD_SWITCH) 1319 return fprintf(fp, " %s\n", in_out); 1320 1321 return fprintf(fp, " %s %s pid/tid: %5u/%-5u\n", 1322 in_out, out ? "next" : "prev", 1323 event->context_switch.next_prev_pid, 1324 event->context_switch.next_prev_tid); 1325 } 1326 1327 size_t perf_event__fprintf(union perf_event *event, FILE *fp) 1328 { 1329 size_t ret = fprintf(fp, "PERF_RECORD_%s", 1330 perf_event__name(event->header.type)); 1331 1332 switch (event->header.type) { 1333 case PERF_RECORD_COMM: 1334 ret += perf_event__fprintf_comm(event, fp); 1335 break; 1336 case PERF_RECORD_FORK: 1337 case PERF_RECORD_EXIT: 1338 ret += perf_event__fprintf_task(event, fp); 1339 break; 1340 case PERF_RECORD_MMAP: 1341 ret += perf_event__fprintf_mmap(event, fp); 1342 break; 1343 case PERF_RECORD_NAMESPACES: 1344 ret += perf_event__fprintf_namespaces(event, fp); 1345 break; 1346 case PERF_RECORD_MMAP2: 1347 ret += perf_event__fprintf_mmap2(event, fp); 1348 break; 1349 case PERF_RECORD_AUX: 1350 ret += perf_event__fprintf_aux(event, fp); 1351 break; 1352 case PERF_RECORD_ITRACE_START: 1353 ret += perf_event__fprintf_itrace_start(event, fp); 1354 break; 1355 case PERF_RECORD_SWITCH: 1356 case PERF_RECORD_SWITCH_CPU_WIDE: 1357 ret += perf_event__fprintf_switch(event, fp); 1358 break; 1359 default: 1360 ret += fprintf(fp, "\n"); 1361 } 1362 1363 return ret; 1364 } 1365 1366 int perf_event__process(struct perf_tool *tool __maybe_unused, 1367 union perf_event *event, 1368 struct perf_sample *sample, 1369 struct machine *machine) 1370 { 1371 return machine__process_event(machine, event, sample); 1372 } 1373 1374 void thread__find_addr_map(struct thread *thread, u8 cpumode, 1375 enum map_type type, u64 addr, 1376 struct addr_location *al) 1377 { 1378 struct map_groups *mg = thread->mg; 1379 struct machine *machine = mg->machine; 1380 bool load_map = false; 1381 1382 al->machine = machine; 1383 al->thread = thread; 1384 al->addr = addr; 1385 al->cpumode = cpumode; 1386 al->filtered = 0; 1387 1388 if (machine == NULL) { 1389 al->map = NULL; 1390 return; 1391 } 1392 1393 if (cpumode == PERF_RECORD_MISC_KERNEL && perf_host) { 1394 al->level = 'k'; 1395 mg = &machine->kmaps; 1396 load_map = true; 1397 } else if (cpumode == PERF_RECORD_MISC_USER && perf_host) { 1398 al->level = '.'; 1399 } else if (cpumode == PERF_RECORD_MISC_GUEST_KERNEL && perf_guest) { 1400 al->level = 'g'; 1401 mg = &machine->kmaps; 1402 load_map = true; 1403 } else if (cpumode == PERF_RECORD_MISC_GUEST_USER && perf_guest) { 1404 al->level = 'u'; 1405 } else { 1406 al->level = 'H'; 1407 al->map = NULL; 1408 1409 if ((cpumode == PERF_RECORD_MISC_GUEST_USER || 1410 cpumode == PERF_RECORD_MISC_GUEST_KERNEL) && 1411 !perf_guest) 1412 al->filtered |= (1 << HIST_FILTER__GUEST); 1413 if ((cpumode == PERF_RECORD_MISC_USER || 1414 cpumode == PERF_RECORD_MISC_KERNEL) && 1415 !perf_host) 1416 al->filtered |= (1 << HIST_FILTER__HOST); 1417 1418 return; 1419 } 1420 try_again: 1421 al->map = map_groups__find(mg, type, al->addr); 1422 if (al->map == NULL) { 1423 /* 1424 * If this is outside of all known maps, and is a negative 1425 * address, try to look it up in the kernel dso, as it might be 1426 * a vsyscall or vdso (which executes in user-mode). 1427 * 1428 * XXX This is nasty, we should have a symbol list in the 1429 * "[vdso]" dso, but for now lets use the old trick of looking 1430 * in the whole kernel symbol list. 1431 */ 1432 if (cpumode == PERF_RECORD_MISC_USER && machine && 1433 mg != &machine->kmaps && 1434 machine__kernel_ip(machine, al->addr)) { 1435 mg = &machine->kmaps; 1436 load_map = true; 1437 goto try_again; 1438 } 1439 } else { 1440 /* 1441 * Kernel maps might be changed when loading symbols so loading 1442 * must be done prior to using kernel maps. 1443 */ 1444 if (load_map) 1445 map__load(al->map); 1446 al->addr = al->map->map_ip(al->map, al->addr); 1447 } 1448 } 1449 1450 void thread__find_addr_location(struct thread *thread, 1451 u8 cpumode, enum map_type type, u64 addr, 1452 struct addr_location *al) 1453 { 1454 thread__find_addr_map(thread, cpumode, type, addr, al); 1455 if (al->map != NULL) 1456 al->sym = map__find_symbol(al->map, al->addr); 1457 else 1458 al->sym = NULL; 1459 } 1460 1461 /* 1462 * Callers need to drop the reference to al->thread, obtained in 1463 * machine__findnew_thread() 1464 */ 1465 int machine__resolve(struct machine *machine, struct addr_location *al, 1466 struct perf_sample *sample) 1467 { 1468 struct thread *thread = machine__findnew_thread(machine, sample->pid, 1469 sample->tid); 1470 1471 if (thread == NULL) 1472 return -1; 1473 1474 dump_printf(" ... thread: %s:%d\n", thread__comm_str(thread), thread->tid); 1475 /* 1476 * Have we already created the kernel maps for this machine? 1477 * 1478 * This should have happened earlier, when we processed the kernel MMAP 1479 * events, but for older perf.data files there was no such thing, so do 1480 * it now. 1481 */ 1482 if (sample->cpumode == PERF_RECORD_MISC_KERNEL && 1483 machine__kernel_map(machine) == NULL) 1484 machine__create_kernel_maps(machine); 1485 1486 thread__find_addr_map(thread, sample->cpumode, MAP__FUNCTION, sample->ip, al); 1487 dump_printf(" ...... dso: %s\n", 1488 al->map ? al->map->dso->long_name : 1489 al->level == 'H' ? "[hypervisor]" : "<not found>"); 1490 1491 if (thread__is_filtered(thread)) 1492 al->filtered |= (1 << HIST_FILTER__THREAD); 1493 1494 al->sym = NULL; 1495 al->cpu = sample->cpu; 1496 al->socket = -1; 1497 1498 if (al->cpu >= 0) { 1499 struct perf_env *env = machine->env; 1500 1501 if (env && env->cpu) 1502 al->socket = env->cpu[al->cpu].socket_id; 1503 } 1504 1505 if (al->map) { 1506 struct dso *dso = al->map->dso; 1507 1508 if (symbol_conf.dso_list && 1509 (!dso || !(strlist__has_entry(symbol_conf.dso_list, 1510 dso->short_name) || 1511 (dso->short_name != dso->long_name && 1512 strlist__has_entry(symbol_conf.dso_list, 1513 dso->long_name))))) { 1514 al->filtered |= (1 << HIST_FILTER__DSO); 1515 } 1516 1517 al->sym = map__find_symbol(al->map, al->addr); 1518 } 1519 1520 if (symbol_conf.sym_list && 1521 (!al->sym || !strlist__has_entry(symbol_conf.sym_list, 1522 al->sym->name))) { 1523 al->filtered |= (1 << HIST_FILTER__SYMBOL); 1524 } 1525 1526 return 0; 1527 } 1528 1529 /* 1530 * The preprocess_sample method will return with reference counts for the 1531 * in it, when done using (and perhaps getting ref counts if needing to 1532 * keep a pointer to one of those entries) it must be paired with 1533 * addr_location__put(), so that the refcounts can be decremented. 1534 */ 1535 void addr_location__put(struct addr_location *al) 1536 { 1537 thread__zput(al->thread); 1538 } 1539 1540 bool is_bts_event(struct perf_event_attr *attr) 1541 { 1542 return attr->type == PERF_TYPE_HARDWARE && 1543 (attr->config & PERF_COUNT_HW_BRANCH_INSTRUCTIONS) && 1544 attr->sample_period == 1; 1545 } 1546 1547 bool sample_addr_correlates_sym(struct perf_event_attr *attr) 1548 { 1549 if (attr->type == PERF_TYPE_SOFTWARE && 1550 (attr->config == PERF_COUNT_SW_PAGE_FAULTS || 1551 attr->config == PERF_COUNT_SW_PAGE_FAULTS_MIN || 1552 attr->config == PERF_COUNT_SW_PAGE_FAULTS_MAJ)) 1553 return true; 1554 1555 if (is_bts_event(attr)) 1556 return true; 1557 1558 return false; 1559 } 1560 1561 void thread__resolve(struct thread *thread, struct addr_location *al, 1562 struct perf_sample *sample) 1563 { 1564 thread__find_addr_map(thread, sample->cpumode, MAP__FUNCTION, sample->addr, al); 1565 if (!al->map) 1566 thread__find_addr_map(thread, sample->cpumode, MAP__VARIABLE, 1567 sample->addr, al); 1568 1569 al->cpu = sample->cpu; 1570 al->sym = NULL; 1571 1572 if (al->map) 1573 al->sym = map__find_symbol(al->map, al->addr); 1574 } 1575