1 // SPDX-License-Identifier: GPL-2.0 2 #include <dirent.h> 3 #include <errno.h> 4 #include <fcntl.h> 5 #include <inttypes.h> 6 #include <linux/kernel.h> 7 #include <linux/types.h> 8 #include <sys/types.h> 9 #include <sys/stat.h> 10 #include <unistd.h> 11 #include <uapi/linux/mman.h> /* To get things like MAP_HUGETLB even on older libc headers */ 12 #include <api/fs/fs.h> 13 #include <linux/perf_event.h> 14 #include "event.h" 15 #include "debug.h" 16 #include "hist.h" 17 #include "machine.h" 18 #include "sort.h" 19 #include "string2.h" 20 #include "strlist.h" 21 #include "thread.h" 22 #include "thread_map.h" 23 #include "sane_ctype.h" 24 #include "symbol/kallsyms.h" 25 #include "asm/bug.h" 26 #include "stat.h" 27 28 #define DEFAULT_PROC_MAP_PARSE_TIMEOUT 500 29 30 static const char *perf_event__names[] = { 31 [0] = "TOTAL", 32 [PERF_RECORD_MMAP] = "MMAP", 33 [PERF_RECORD_MMAP2] = "MMAP2", 34 [PERF_RECORD_LOST] = "LOST", 35 [PERF_RECORD_COMM] = "COMM", 36 [PERF_RECORD_EXIT] = "EXIT", 37 [PERF_RECORD_THROTTLE] = "THROTTLE", 38 [PERF_RECORD_UNTHROTTLE] = "UNTHROTTLE", 39 [PERF_RECORD_FORK] = "FORK", 40 [PERF_RECORD_READ] = "READ", 41 [PERF_RECORD_SAMPLE] = "SAMPLE", 42 [PERF_RECORD_AUX] = "AUX", 43 [PERF_RECORD_ITRACE_START] = "ITRACE_START", 44 [PERF_RECORD_LOST_SAMPLES] = "LOST_SAMPLES", 45 [PERF_RECORD_SWITCH] = "SWITCH", 46 [PERF_RECORD_SWITCH_CPU_WIDE] = "SWITCH_CPU_WIDE", 47 [PERF_RECORD_NAMESPACES] = "NAMESPACES", 48 [PERF_RECORD_HEADER_ATTR] = "ATTR", 49 [PERF_RECORD_HEADER_EVENT_TYPE] = "EVENT_TYPE", 50 [PERF_RECORD_HEADER_TRACING_DATA] = "TRACING_DATA", 51 [PERF_RECORD_HEADER_BUILD_ID] = "BUILD_ID", 52 [PERF_RECORD_FINISHED_ROUND] = "FINISHED_ROUND", 53 [PERF_RECORD_ID_INDEX] = "ID_INDEX", 54 [PERF_RECORD_AUXTRACE_INFO] = "AUXTRACE_INFO", 55 [PERF_RECORD_AUXTRACE] = "AUXTRACE", 56 [PERF_RECORD_AUXTRACE_ERROR] = "AUXTRACE_ERROR", 57 [PERF_RECORD_THREAD_MAP] = "THREAD_MAP", 58 [PERF_RECORD_CPU_MAP] = "CPU_MAP", 59 [PERF_RECORD_STAT_CONFIG] = "STAT_CONFIG", 60 [PERF_RECORD_STAT] = "STAT", 61 [PERF_RECORD_STAT_ROUND] = "STAT_ROUND", 62 [PERF_RECORD_EVENT_UPDATE] = "EVENT_UPDATE", 63 [PERF_RECORD_TIME_CONV] = "TIME_CONV", 64 [PERF_RECORD_HEADER_FEATURE] = "FEATURE", 65 }; 66 67 static const char *perf_ns__names[] = { 68 [NET_NS_INDEX] = "net", 69 [UTS_NS_INDEX] = "uts", 70 [IPC_NS_INDEX] = "ipc", 71 [PID_NS_INDEX] = "pid", 72 [USER_NS_INDEX] = "user", 73 [MNT_NS_INDEX] = "mnt", 74 [CGROUP_NS_INDEX] = "cgroup", 75 }; 76 77 unsigned int proc_map_timeout = DEFAULT_PROC_MAP_PARSE_TIMEOUT; 78 79 const char *perf_event__name(unsigned int id) 80 { 81 if (id >= ARRAY_SIZE(perf_event__names)) 82 return "INVALID"; 83 if (!perf_event__names[id]) 84 return "UNKNOWN"; 85 return perf_event__names[id]; 86 } 87 88 static const char *perf_ns__name(unsigned int id) 89 { 90 if (id >= ARRAY_SIZE(perf_ns__names)) 91 return "UNKNOWN"; 92 return perf_ns__names[id]; 93 } 94 95 int perf_tool__process_synth_event(struct perf_tool *tool, 96 union perf_event *event, 97 struct machine *machine, 98 perf_event__handler_t process) 99 { 100 struct perf_sample synth_sample = { 101 .pid = -1, 102 .tid = -1, 103 .time = -1, 104 .stream_id = -1, 105 .cpu = -1, 106 .period = 1, 107 .cpumode = event->header.misc & PERF_RECORD_MISC_CPUMODE_MASK, 108 }; 109 110 return process(tool, event, &synth_sample, machine); 111 }; 112 113 /* 114 * Assumes that the first 4095 bytes of /proc/pid/stat contains 115 * the comm, tgid and ppid. 116 */ 117 static int perf_event__get_comm_ids(pid_t pid, char *comm, size_t len, 118 pid_t *tgid, pid_t *ppid) 119 { 120 char filename[PATH_MAX]; 121 char bf[4096]; 122 int fd; 123 size_t size = 0; 124 ssize_t n; 125 char *name, *tgids, *ppids; 126 127 *tgid = -1; 128 *ppid = -1; 129 130 snprintf(filename, sizeof(filename), "/proc/%d/status", pid); 131 132 fd = open(filename, O_RDONLY); 133 if (fd < 0) { 134 pr_debug("couldn't open %s\n", filename); 135 return -1; 136 } 137 138 n = read(fd, bf, sizeof(bf) - 1); 139 close(fd); 140 if (n <= 0) { 141 pr_warning("Couldn't get COMM, tigd and ppid for pid %d\n", 142 pid); 143 return -1; 144 } 145 bf[n] = '\0'; 146 147 name = strstr(bf, "Name:"); 148 tgids = strstr(bf, "Tgid:"); 149 ppids = strstr(bf, "PPid:"); 150 151 if (name) { 152 char *nl; 153 154 name += 5; /* strlen("Name:") */ 155 name = ltrim(name); 156 157 nl = strchr(name, '\n'); 158 if (nl) 159 *nl = '\0'; 160 161 size = strlen(name); 162 if (size >= len) 163 size = len - 1; 164 memcpy(comm, name, size); 165 comm[size] = '\0'; 166 } else { 167 pr_debug("Name: string not found for pid %d\n", pid); 168 } 169 170 if (tgids) { 171 tgids += 5; /* strlen("Tgid:") */ 172 *tgid = atoi(tgids); 173 } else { 174 pr_debug("Tgid: string not found for pid %d\n", pid); 175 } 176 177 if (ppids) { 178 ppids += 5; /* strlen("PPid:") */ 179 *ppid = atoi(ppids); 180 } else { 181 pr_debug("PPid: string not found for pid %d\n", pid); 182 } 183 184 return 0; 185 } 186 187 static int perf_event__prepare_comm(union perf_event *event, pid_t pid, 188 struct machine *machine, 189 pid_t *tgid, pid_t *ppid) 190 { 191 size_t size; 192 193 *ppid = -1; 194 195 memset(&event->comm, 0, sizeof(event->comm)); 196 197 if (machine__is_host(machine)) { 198 if (perf_event__get_comm_ids(pid, event->comm.comm, 199 sizeof(event->comm.comm), 200 tgid, ppid) != 0) { 201 return -1; 202 } 203 } else { 204 *tgid = machine->pid; 205 } 206 207 if (*tgid < 0) 208 return -1; 209 210 event->comm.pid = *tgid; 211 event->comm.header.type = PERF_RECORD_COMM; 212 213 size = strlen(event->comm.comm) + 1; 214 size = PERF_ALIGN(size, sizeof(u64)); 215 memset(event->comm.comm + size, 0, machine->id_hdr_size); 216 event->comm.header.size = (sizeof(event->comm) - 217 (sizeof(event->comm.comm) - size) + 218 machine->id_hdr_size); 219 event->comm.tid = pid; 220 221 return 0; 222 } 223 224 pid_t perf_event__synthesize_comm(struct perf_tool *tool, 225 union perf_event *event, pid_t pid, 226 perf_event__handler_t process, 227 struct machine *machine) 228 { 229 pid_t tgid, ppid; 230 231 if (perf_event__prepare_comm(event, pid, machine, &tgid, &ppid) != 0) 232 return -1; 233 234 if (perf_tool__process_synth_event(tool, event, machine, process) != 0) 235 return -1; 236 237 return tgid; 238 } 239 240 static void perf_event__get_ns_link_info(pid_t pid, const char *ns, 241 struct perf_ns_link_info *ns_link_info) 242 { 243 struct stat64 st; 244 char proc_ns[128]; 245 246 sprintf(proc_ns, "/proc/%u/ns/%s", pid, ns); 247 if (stat64(proc_ns, &st) == 0) { 248 ns_link_info->dev = st.st_dev; 249 ns_link_info->ino = st.st_ino; 250 } 251 } 252 253 int perf_event__synthesize_namespaces(struct perf_tool *tool, 254 union perf_event *event, 255 pid_t pid, pid_t tgid, 256 perf_event__handler_t process, 257 struct machine *machine) 258 { 259 u32 idx; 260 struct perf_ns_link_info *ns_link_info; 261 262 if (!tool || !tool->namespace_events) 263 return 0; 264 265 memset(&event->namespaces, 0, (sizeof(event->namespaces) + 266 (NR_NAMESPACES * sizeof(struct perf_ns_link_info)) + 267 machine->id_hdr_size)); 268 269 event->namespaces.pid = tgid; 270 event->namespaces.tid = pid; 271 272 event->namespaces.nr_namespaces = NR_NAMESPACES; 273 274 ns_link_info = event->namespaces.link_info; 275 276 for (idx = 0; idx < event->namespaces.nr_namespaces; idx++) 277 perf_event__get_ns_link_info(pid, perf_ns__name(idx), 278 &ns_link_info[idx]); 279 280 event->namespaces.header.type = PERF_RECORD_NAMESPACES; 281 282 event->namespaces.header.size = (sizeof(event->namespaces) + 283 (NR_NAMESPACES * sizeof(struct perf_ns_link_info)) + 284 machine->id_hdr_size); 285 286 if (perf_tool__process_synth_event(tool, event, machine, process) != 0) 287 return -1; 288 289 return 0; 290 } 291 292 static int perf_event__synthesize_fork(struct perf_tool *tool, 293 union perf_event *event, 294 pid_t pid, pid_t tgid, pid_t ppid, 295 perf_event__handler_t process, 296 struct machine *machine) 297 { 298 memset(&event->fork, 0, sizeof(event->fork) + machine->id_hdr_size); 299 300 /* 301 * for main thread set parent to ppid from status file. For other 302 * threads set parent pid to main thread. ie., assume main thread 303 * spawns all threads in a process 304 */ 305 if (tgid == pid) { 306 event->fork.ppid = ppid; 307 event->fork.ptid = ppid; 308 } else { 309 event->fork.ppid = tgid; 310 event->fork.ptid = tgid; 311 } 312 event->fork.pid = tgid; 313 event->fork.tid = pid; 314 event->fork.header.type = PERF_RECORD_FORK; 315 event->fork.header.misc = PERF_RECORD_MISC_FORK_EXEC; 316 317 event->fork.header.size = (sizeof(event->fork) + machine->id_hdr_size); 318 319 if (perf_tool__process_synth_event(tool, event, machine, process) != 0) 320 return -1; 321 322 return 0; 323 } 324 325 int perf_event__synthesize_mmap_events(struct perf_tool *tool, 326 union perf_event *event, 327 pid_t pid, pid_t tgid, 328 perf_event__handler_t process, 329 struct machine *machine, 330 bool mmap_data) 331 { 332 char filename[PATH_MAX]; 333 FILE *fp; 334 unsigned long long t; 335 bool truncation = false; 336 unsigned long long timeout = proc_map_timeout * 1000000ULL; 337 int rc = 0; 338 const char *hugetlbfs_mnt = hugetlbfs__mountpoint(); 339 int hugetlbfs_mnt_len = hugetlbfs_mnt ? strlen(hugetlbfs_mnt) : 0; 340 341 if (machine__is_default_guest(machine)) 342 return 0; 343 344 snprintf(filename, sizeof(filename), "%s/proc/%d/task/%d/maps", 345 machine->root_dir, pid, pid); 346 347 fp = fopen(filename, "r"); 348 if (fp == NULL) { 349 /* 350 * We raced with a task exiting - just return: 351 */ 352 pr_debug("couldn't open %s\n", filename); 353 return -1; 354 } 355 356 event->header.type = PERF_RECORD_MMAP2; 357 t = rdclock(); 358 359 while (1) { 360 char bf[BUFSIZ]; 361 char prot[5]; 362 char execname[PATH_MAX]; 363 char anonstr[] = "//anon"; 364 unsigned int ino; 365 size_t size; 366 ssize_t n; 367 368 if (fgets(bf, sizeof(bf), fp) == NULL) 369 break; 370 371 if ((rdclock() - t) > timeout) { 372 pr_warning("Reading %s time out. " 373 "You may want to increase " 374 "the time limit by --proc-map-timeout\n", 375 filename); 376 truncation = true; 377 goto out; 378 } 379 380 /* ensure null termination since stack will be reused. */ 381 strcpy(execname, ""); 382 383 /* 00400000-0040c000 r-xp 00000000 fd:01 41038 /bin/cat */ 384 n = sscanf(bf, "%"PRIx64"-%"PRIx64" %s %"PRIx64" %x:%x %u %[^\n]\n", 385 &event->mmap2.start, &event->mmap2.len, prot, 386 &event->mmap2.pgoff, &event->mmap2.maj, 387 &event->mmap2.min, 388 &ino, execname); 389 390 /* 391 * Anon maps don't have the execname. 392 */ 393 if (n < 7) 394 continue; 395 396 event->mmap2.ino = (u64)ino; 397 398 /* 399 * Just like the kernel, see __perf_event_mmap in kernel/perf_event.c 400 */ 401 if (machine__is_host(machine)) 402 event->header.misc = PERF_RECORD_MISC_USER; 403 else 404 event->header.misc = PERF_RECORD_MISC_GUEST_USER; 405 406 /* map protection and flags bits */ 407 event->mmap2.prot = 0; 408 event->mmap2.flags = 0; 409 if (prot[0] == 'r') 410 event->mmap2.prot |= PROT_READ; 411 if (prot[1] == 'w') 412 event->mmap2.prot |= PROT_WRITE; 413 if (prot[2] == 'x') 414 event->mmap2.prot |= PROT_EXEC; 415 416 if (prot[3] == 's') 417 event->mmap2.flags |= MAP_SHARED; 418 else 419 event->mmap2.flags |= MAP_PRIVATE; 420 421 if (prot[2] != 'x') { 422 if (!mmap_data || prot[0] != 'r') 423 continue; 424 425 event->header.misc |= PERF_RECORD_MISC_MMAP_DATA; 426 } 427 428 out: 429 if (truncation) 430 event->header.misc |= PERF_RECORD_MISC_PROC_MAP_PARSE_TIMEOUT; 431 432 if (!strcmp(execname, "")) 433 strcpy(execname, anonstr); 434 435 if (hugetlbfs_mnt_len && 436 !strncmp(execname, hugetlbfs_mnt, hugetlbfs_mnt_len)) { 437 strcpy(execname, anonstr); 438 event->mmap2.flags |= MAP_HUGETLB; 439 } 440 441 size = strlen(execname) + 1; 442 memcpy(event->mmap2.filename, execname, size); 443 size = PERF_ALIGN(size, sizeof(u64)); 444 event->mmap2.len -= event->mmap.start; 445 event->mmap2.header.size = (sizeof(event->mmap2) - 446 (sizeof(event->mmap2.filename) - size)); 447 memset(event->mmap2.filename + size, 0, machine->id_hdr_size); 448 event->mmap2.header.size += machine->id_hdr_size; 449 event->mmap2.pid = tgid; 450 event->mmap2.tid = pid; 451 452 if (perf_tool__process_synth_event(tool, event, machine, process) != 0) { 453 rc = -1; 454 break; 455 } 456 457 if (truncation) 458 break; 459 } 460 461 fclose(fp); 462 return rc; 463 } 464 465 int perf_event__synthesize_modules(struct perf_tool *tool, 466 perf_event__handler_t process, 467 struct machine *machine) 468 { 469 int rc = 0; 470 struct map *pos; 471 struct maps *maps = machine__kernel_maps(machine); 472 union perf_event *event = zalloc((sizeof(event->mmap) + 473 machine->id_hdr_size)); 474 if (event == NULL) { 475 pr_debug("Not enough memory synthesizing mmap event " 476 "for kernel modules\n"); 477 return -1; 478 } 479 480 event->header.type = PERF_RECORD_MMAP; 481 482 /* 483 * kernel uses 0 for user space maps, see kernel/perf_event.c 484 * __perf_event_mmap 485 */ 486 if (machine__is_host(machine)) 487 event->header.misc = PERF_RECORD_MISC_KERNEL; 488 else 489 event->header.misc = PERF_RECORD_MISC_GUEST_KERNEL; 490 491 for (pos = maps__first(maps); pos; pos = map__next(pos)) { 492 size_t size; 493 494 if (!__map__is_kmodule(pos)) 495 continue; 496 497 size = PERF_ALIGN(pos->dso->long_name_len + 1, sizeof(u64)); 498 event->mmap.header.type = PERF_RECORD_MMAP; 499 event->mmap.header.size = (sizeof(event->mmap) - 500 (sizeof(event->mmap.filename) - size)); 501 memset(event->mmap.filename + size, 0, machine->id_hdr_size); 502 event->mmap.header.size += machine->id_hdr_size; 503 event->mmap.start = pos->start; 504 event->mmap.len = pos->end - pos->start; 505 event->mmap.pid = machine->pid; 506 507 memcpy(event->mmap.filename, pos->dso->long_name, 508 pos->dso->long_name_len + 1); 509 if (perf_tool__process_synth_event(tool, event, machine, process) != 0) { 510 rc = -1; 511 break; 512 } 513 } 514 515 free(event); 516 return rc; 517 } 518 519 static int __event__synthesize_thread(union perf_event *comm_event, 520 union perf_event *mmap_event, 521 union perf_event *fork_event, 522 union perf_event *namespaces_event, 523 pid_t pid, int full, 524 perf_event__handler_t process, 525 struct perf_tool *tool, 526 struct machine *machine, 527 bool mmap_data) 528 { 529 char filename[PATH_MAX]; 530 DIR *tasks; 531 struct dirent *dirent; 532 pid_t tgid, ppid; 533 int rc = 0; 534 535 /* special case: only send one comm event using passed in pid */ 536 if (!full) { 537 tgid = perf_event__synthesize_comm(tool, comm_event, pid, 538 process, machine); 539 540 if (tgid == -1) 541 return -1; 542 543 if (perf_event__synthesize_namespaces(tool, namespaces_event, pid, 544 tgid, process, machine) < 0) 545 return -1; 546 547 /* 548 * send mmap only for thread group leader 549 * see thread__init_map_groups 550 */ 551 if (pid == tgid && 552 perf_event__synthesize_mmap_events(tool, mmap_event, pid, tgid, 553 process, machine, mmap_data)) 554 return -1; 555 556 return 0; 557 } 558 559 if (machine__is_default_guest(machine)) 560 return 0; 561 562 snprintf(filename, sizeof(filename), "%s/proc/%d/task", 563 machine->root_dir, pid); 564 565 tasks = opendir(filename); 566 if (tasks == NULL) { 567 pr_debug("couldn't open %s\n", filename); 568 return 0; 569 } 570 571 while ((dirent = readdir(tasks)) != NULL) { 572 char *end; 573 pid_t _pid; 574 575 _pid = strtol(dirent->d_name, &end, 10); 576 if (*end) 577 continue; 578 579 rc = -1; 580 if (perf_event__prepare_comm(comm_event, _pid, machine, 581 &tgid, &ppid) != 0) 582 break; 583 584 if (perf_event__synthesize_fork(tool, fork_event, _pid, tgid, 585 ppid, process, machine) < 0) 586 break; 587 588 if (perf_event__synthesize_namespaces(tool, namespaces_event, _pid, 589 tgid, process, machine) < 0) 590 break; 591 592 /* 593 * Send the prepared comm event 594 */ 595 if (perf_tool__process_synth_event(tool, comm_event, machine, process) != 0) 596 break; 597 598 rc = 0; 599 if (_pid == pid) { 600 /* process the parent's maps too */ 601 rc = perf_event__synthesize_mmap_events(tool, mmap_event, pid, tgid, 602 process, machine, mmap_data); 603 if (rc) 604 break; 605 } 606 } 607 608 closedir(tasks); 609 return rc; 610 } 611 612 int perf_event__synthesize_thread_map(struct perf_tool *tool, 613 struct thread_map *threads, 614 perf_event__handler_t process, 615 struct machine *machine, 616 bool mmap_data) 617 { 618 union perf_event *comm_event, *mmap_event, *fork_event; 619 union perf_event *namespaces_event; 620 int err = -1, thread, j; 621 622 comm_event = malloc(sizeof(comm_event->comm) + machine->id_hdr_size); 623 if (comm_event == NULL) 624 goto out; 625 626 mmap_event = malloc(sizeof(mmap_event->mmap2) + machine->id_hdr_size); 627 if (mmap_event == NULL) 628 goto out_free_comm; 629 630 fork_event = malloc(sizeof(fork_event->fork) + machine->id_hdr_size); 631 if (fork_event == NULL) 632 goto out_free_mmap; 633 634 namespaces_event = malloc(sizeof(namespaces_event->namespaces) + 635 (NR_NAMESPACES * sizeof(struct perf_ns_link_info)) + 636 machine->id_hdr_size); 637 if (namespaces_event == NULL) 638 goto out_free_fork; 639 640 err = 0; 641 for (thread = 0; thread < threads->nr; ++thread) { 642 if (__event__synthesize_thread(comm_event, mmap_event, 643 fork_event, namespaces_event, 644 thread_map__pid(threads, thread), 0, 645 process, tool, machine, 646 mmap_data)) { 647 err = -1; 648 break; 649 } 650 651 /* 652 * comm.pid is set to thread group id by 653 * perf_event__synthesize_comm 654 */ 655 if ((int) comm_event->comm.pid != thread_map__pid(threads, thread)) { 656 bool need_leader = true; 657 658 /* is thread group leader in thread_map? */ 659 for (j = 0; j < threads->nr; ++j) { 660 if ((int) comm_event->comm.pid == thread_map__pid(threads, j)) { 661 need_leader = false; 662 break; 663 } 664 } 665 666 /* if not, generate events for it */ 667 if (need_leader && 668 __event__synthesize_thread(comm_event, mmap_event, 669 fork_event, namespaces_event, 670 comm_event->comm.pid, 0, 671 process, tool, machine, 672 mmap_data)) { 673 err = -1; 674 break; 675 } 676 } 677 } 678 free(namespaces_event); 679 out_free_fork: 680 free(fork_event); 681 out_free_mmap: 682 free(mmap_event); 683 out_free_comm: 684 free(comm_event); 685 out: 686 return err; 687 } 688 689 static int __perf_event__synthesize_threads(struct perf_tool *tool, 690 perf_event__handler_t process, 691 struct machine *machine, 692 bool mmap_data, 693 struct dirent **dirent, 694 int start, 695 int num) 696 { 697 union perf_event *comm_event, *mmap_event, *fork_event; 698 union perf_event *namespaces_event; 699 int err = -1; 700 char *end; 701 pid_t pid; 702 int i; 703 704 comm_event = malloc(sizeof(comm_event->comm) + machine->id_hdr_size); 705 if (comm_event == NULL) 706 goto out; 707 708 mmap_event = malloc(sizeof(mmap_event->mmap2) + machine->id_hdr_size); 709 if (mmap_event == NULL) 710 goto out_free_comm; 711 712 fork_event = malloc(sizeof(fork_event->fork) + machine->id_hdr_size); 713 if (fork_event == NULL) 714 goto out_free_mmap; 715 716 namespaces_event = malloc(sizeof(namespaces_event->namespaces) + 717 (NR_NAMESPACES * sizeof(struct perf_ns_link_info)) + 718 machine->id_hdr_size); 719 if (namespaces_event == NULL) 720 goto out_free_fork; 721 722 for (i = start; i < start + num; i++) { 723 if (!isdigit(dirent[i]->d_name[0])) 724 continue; 725 726 pid = (pid_t)strtol(dirent[i]->d_name, &end, 10); 727 /* only interested in proper numerical dirents */ 728 if (*end) 729 continue; 730 /* 731 * We may race with exiting thread, so don't stop just because 732 * one thread couldn't be synthesized. 733 */ 734 __event__synthesize_thread(comm_event, mmap_event, fork_event, 735 namespaces_event, pid, 1, process, 736 tool, machine, mmap_data); 737 } 738 err = 0; 739 740 free(namespaces_event); 741 out_free_fork: 742 free(fork_event); 743 out_free_mmap: 744 free(mmap_event); 745 out_free_comm: 746 free(comm_event); 747 out: 748 return err; 749 } 750 751 struct synthesize_threads_arg { 752 struct perf_tool *tool; 753 perf_event__handler_t process; 754 struct machine *machine; 755 bool mmap_data; 756 struct dirent **dirent; 757 int num; 758 int start; 759 }; 760 761 static void *synthesize_threads_worker(void *arg) 762 { 763 struct synthesize_threads_arg *args = arg; 764 765 __perf_event__synthesize_threads(args->tool, args->process, 766 args->machine, args->mmap_data, 767 args->dirent, 768 args->start, args->num); 769 return NULL; 770 } 771 772 int perf_event__synthesize_threads(struct perf_tool *tool, 773 perf_event__handler_t process, 774 struct machine *machine, 775 bool mmap_data, 776 unsigned int nr_threads_synthesize) 777 { 778 struct synthesize_threads_arg *args = NULL; 779 pthread_t *synthesize_threads = NULL; 780 char proc_path[PATH_MAX]; 781 struct dirent **dirent; 782 int num_per_thread; 783 int m, n, i, j; 784 int thread_nr; 785 int base = 0; 786 int err = -1; 787 788 789 if (machine__is_default_guest(machine)) 790 return 0; 791 792 snprintf(proc_path, sizeof(proc_path), "%s/proc", machine->root_dir); 793 n = scandir(proc_path, &dirent, 0, alphasort); 794 if (n < 0) 795 return err; 796 797 if (nr_threads_synthesize == UINT_MAX) 798 thread_nr = sysconf(_SC_NPROCESSORS_ONLN); 799 else 800 thread_nr = nr_threads_synthesize; 801 802 if (thread_nr <= 1) { 803 err = __perf_event__synthesize_threads(tool, process, 804 machine, mmap_data, 805 dirent, base, n); 806 goto free_dirent; 807 } 808 if (thread_nr > n) 809 thread_nr = n; 810 811 synthesize_threads = calloc(sizeof(pthread_t), thread_nr); 812 if (synthesize_threads == NULL) 813 goto free_dirent; 814 815 args = calloc(sizeof(*args), thread_nr); 816 if (args == NULL) 817 goto free_threads; 818 819 num_per_thread = n / thread_nr; 820 m = n % thread_nr; 821 for (i = 0; i < thread_nr; i++) { 822 args[i].tool = tool; 823 args[i].process = process; 824 args[i].machine = machine; 825 args[i].mmap_data = mmap_data; 826 args[i].dirent = dirent; 827 } 828 for (i = 0; i < m; i++) { 829 args[i].num = num_per_thread + 1; 830 args[i].start = i * args[i].num; 831 } 832 if (i != 0) 833 base = args[i-1].start + args[i-1].num; 834 for (j = i; j < thread_nr; j++) { 835 args[j].num = num_per_thread; 836 args[j].start = base + (j - i) * args[i].num; 837 } 838 839 for (i = 0; i < thread_nr; i++) { 840 if (pthread_create(&synthesize_threads[i], NULL, 841 synthesize_threads_worker, &args[i])) 842 goto out_join; 843 } 844 err = 0; 845 out_join: 846 for (i = 0; i < thread_nr; i++) 847 pthread_join(synthesize_threads[i], NULL); 848 free(args); 849 free_threads: 850 free(synthesize_threads); 851 free_dirent: 852 for (i = 0; i < n; i++) 853 free(dirent[i]); 854 free(dirent); 855 856 return err; 857 } 858 859 struct process_symbol_args { 860 const char *name; 861 u64 start; 862 }; 863 864 static int find_symbol_cb(void *arg, const char *name, char type, 865 u64 start) 866 { 867 struct process_symbol_args *args = arg; 868 869 /* 870 * Must be a function or at least an alias, as in PARISC64, where "_text" is 871 * an 'A' to the same address as "_stext". 872 */ 873 if (!(kallsyms__is_function(type) || 874 type == 'A') || strcmp(name, args->name)) 875 return 0; 876 877 args->start = start; 878 return 1; 879 } 880 881 int kallsyms__get_function_start(const char *kallsyms_filename, 882 const char *symbol_name, u64 *addr) 883 { 884 struct process_symbol_args args = { .name = symbol_name, }; 885 886 if (kallsyms__parse(kallsyms_filename, &args, find_symbol_cb) <= 0) 887 return -1; 888 889 *addr = args.start; 890 return 0; 891 } 892 893 int __weak perf_event__synthesize_extra_kmaps(struct perf_tool *tool __maybe_unused, 894 perf_event__handler_t process __maybe_unused, 895 struct machine *machine __maybe_unused) 896 { 897 return 0; 898 } 899 900 static int __perf_event__synthesize_kernel_mmap(struct perf_tool *tool, 901 perf_event__handler_t process, 902 struct machine *machine) 903 { 904 size_t size; 905 struct map *map = machine__kernel_map(machine); 906 struct kmap *kmap; 907 int err; 908 union perf_event *event; 909 910 if (symbol_conf.kptr_restrict) 911 return -1; 912 if (map == NULL) 913 return -1; 914 915 /* 916 * We should get this from /sys/kernel/sections/.text, but till that is 917 * available use this, and after it is use this as a fallback for older 918 * kernels. 919 */ 920 event = zalloc((sizeof(event->mmap) + machine->id_hdr_size)); 921 if (event == NULL) { 922 pr_debug("Not enough memory synthesizing mmap event " 923 "for kernel modules\n"); 924 return -1; 925 } 926 927 if (machine__is_host(machine)) { 928 /* 929 * kernel uses PERF_RECORD_MISC_USER for user space maps, 930 * see kernel/perf_event.c __perf_event_mmap 931 */ 932 event->header.misc = PERF_RECORD_MISC_KERNEL; 933 } else { 934 event->header.misc = PERF_RECORD_MISC_GUEST_KERNEL; 935 } 936 937 kmap = map__kmap(map); 938 size = snprintf(event->mmap.filename, sizeof(event->mmap.filename), 939 "%s%s", machine->mmap_name, kmap->ref_reloc_sym->name) + 1; 940 size = PERF_ALIGN(size, sizeof(u64)); 941 event->mmap.header.type = PERF_RECORD_MMAP; 942 event->mmap.header.size = (sizeof(event->mmap) - 943 (sizeof(event->mmap.filename) - size) + machine->id_hdr_size); 944 event->mmap.pgoff = kmap->ref_reloc_sym->addr; 945 event->mmap.start = map->start; 946 event->mmap.len = map->end - event->mmap.start; 947 event->mmap.pid = machine->pid; 948 949 err = perf_tool__process_synth_event(tool, event, machine, process); 950 free(event); 951 952 return err; 953 } 954 955 int perf_event__synthesize_kernel_mmap(struct perf_tool *tool, 956 perf_event__handler_t process, 957 struct machine *machine) 958 { 959 int err; 960 961 err = __perf_event__synthesize_kernel_mmap(tool, process, machine); 962 if (err < 0) 963 return err; 964 965 return perf_event__synthesize_extra_kmaps(tool, process, machine); 966 } 967 968 int perf_event__synthesize_thread_map2(struct perf_tool *tool, 969 struct thread_map *threads, 970 perf_event__handler_t process, 971 struct machine *machine) 972 { 973 union perf_event *event; 974 int i, err, size; 975 976 size = sizeof(event->thread_map); 977 size += threads->nr * sizeof(event->thread_map.entries[0]); 978 979 event = zalloc(size); 980 if (!event) 981 return -ENOMEM; 982 983 event->header.type = PERF_RECORD_THREAD_MAP; 984 event->header.size = size; 985 event->thread_map.nr = threads->nr; 986 987 for (i = 0; i < threads->nr; i++) { 988 struct thread_map_event_entry *entry = &event->thread_map.entries[i]; 989 char *comm = thread_map__comm(threads, i); 990 991 if (!comm) 992 comm = (char *) ""; 993 994 entry->pid = thread_map__pid(threads, i); 995 strncpy((char *) &entry->comm, comm, sizeof(entry->comm)); 996 } 997 998 err = process(tool, event, NULL, machine); 999 1000 free(event); 1001 return err; 1002 } 1003 1004 static void synthesize_cpus(struct cpu_map_entries *cpus, 1005 struct cpu_map *map) 1006 { 1007 int i; 1008 1009 cpus->nr = map->nr; 1010 1011 for (i = 0; i < map->nr; i++) 1012 cpus->cpu[i] = map->map[i]; 1013 } 1014 1015 static void synthesize_mask(struct cpu_map_mask *mask, 1016 struct cpu_map *map, int max) 1017 { 1018 int i; 1019 1020 mask->nr = BITS_TO_LONGS(max); 1021 mask->long_size = sizeof(long); 1022 1023 for (i = 0; i < map->nr; i++) 1024 set_bit(map->map[i], mask->mask); 1025 } 1026 1027 static size_t cpus_size(struct cpu_map *map) 1028 { 1029 return sizeof(struct cpu_map_entries) + map->nr * sizeof(u16); 1030 } 1031 1032 static size_t mask_size(struct cpu_map *map, int *max) 1033 { 1034 int i; 1035 1036 *max = 0; 1037 1038 for (i = 0; i < map->nr; i++) { 1039 /* bit possition of the cpu is + 1 */ 1040 int bit = map->map[i] + 1; 1041 1042 if (bit > *max) 1043 *max = bit; 1044 } 1045 1046 return sizeof(struct cpu_map_mask) + BITS_TO_LONGS(*max) * sizeof(long); 1047 } 1048 1049 void *cpu_map_data__alloc(struct cpu_map *map, size_t *size, u16 *type, int *max) 1050 { 1051 size_t size_cpus, size_mask; 1052 bool is_dummy = cpu_map__empty(map); 1053 1054 /* 1055 * Both array and mask data have variable size based 1056 * on the number of cpus and their actual values. 1057 * The size of the 'struct cpu_map_data' is: 1058 * 1059 * array = size of 'struct cpu_map_entries' + 1060 * number of cpus * sizeof(u64) 1061 * 1062 * mask = size of 'struct cpu_map_mask' + 1063 * maximum cpu bit converted to size of longs 1064 * 1065 * and finaly + the size of 'struct cpu_map_data'. 1066 */ 1067 size_cpus = cpus_size(map); 1068 size_mask = mask_size(map, max); 1069 1070 if (is_dummy || (size_cpus < size_mask)) { 1071 *size += size_cpus; 1072 *type = PERF_CPU_MAP__CPUS; 1073 } else { 1074 *size += size_mask; 1075 *type = PERF_CPU_MAP__MASK; 1076 } 1077 1078 *size += sizeof(struct cpu_map_data); 1079 *size = PERF_ALIGN(*size, sizeof(u64)); 1080 return zalloc(*size); 1081 } 1082 1083 void cpu_map_data__synthesize(struct cpu_map_data *data, struct cpu_map *map, 1084 u16 type, int max) 1085 { 1086 data->type = type; 1087 1088 switch (type) { 1089 case PERF_CPU_MAP__CPUS: 1090 synthesize_cpus((struct cpu_map_entries *) data->data, map); 1091 break; 1092 case PERF_CPU_MAP__MASK: 1093 synthesize_mask((struct cpu_map_mask *) data->data, map, max); 1094 default: 1095 break; 1096 }; 1097 } 1098 1099 static struct cpu_map_event* cpu_map_event__new(struct cpu_map *map) 1100 { 1101 size_t size = sizeof(struct cpu_map_event); 1102 struct cpu_map_event *event; 1103 int max; 1104 u16 type; 1105 1106 event = cpu_map_data__alloc(map, &size, &type, &max); 1107 if (!event) 1108 return NULL; 1109 1110 event->header.type = PERF_RECORD_CPU_MAP; 1111 event->header.size = size; 1112 event->data.type = type; 1113 1114 cpu_map_data__synthesize(&event->data, map, type, max); 1115 return event; 1116 } 1117 1118 int perf_event__synthesize_cpu_map(struct perf_tool *tool, 1119 struct cpu_map *map, 1120 perf_event__handler_t process, 1121 struct machine *machine) 1122 { 1123 struct cpu_map_event *event; 1124 int err; 1125 1126 event = cpu_map_event__new(map); 1127 if (!event) 1128 return -ENOMEM; 1129 1130 err = process(tool, (union perf_event *) event, NULL, machine); 1131 1132 free(event); 1133 return err; 1134 } 1135 1136 int perf_event__synthesize_stat_config(struct perf_tool *tool, 1137 struct perf_stat_config *config, 1138 perf_event__handler_t process, 1139 struct machine *machine) 1140 { 1141 struct stat_config_event *event; 1142 int size, i = 0, err; 1143 1144 size = sizeof(*event); 1145 size += (PERF_STAT_CONFIG_TERM__MAX * sizeof(event->data[0])); 1146 1147 event = zalloc(size); 1148 if (!event) 1149 return -ENOMEM; 1150 1151 event->header.type = PERF_RECORD_STAT_CONFIG; 1152 event->header.size = size; 1153 event->nr = PERF_STAT_CONFIG_TERM__MAX; 1154 1155 #define ADD(__term, __val) \ 1156 event->data[i].tag = PERF_STAT_CONFIG_TERM__##__term; \ 1157 event->data[i].val = __val; \ 1158 i++; 1159 1160 ADD(AGGR_MODE, config->aggr_mode) 1161 ADD(INTERVAL, config->interval) 1162 ADD(SCALE, config->scale) 1163 1164 WARN_ONCE(i != PERF_STAT_CONFIG_TERM__MAX, 1165 "stat config terms unbalanced\n"); 1166 #undef ADD 1167 1168 err = process(tool, (union perf_event *) event, NULL, machine); 1169 1170 free(event); 1171 return err; 1172 } 1173 1174 int perf_event__synthesize_stat(struct perf_tool *tool, 1175 u32 cpu, u32 thread, u64 id, 1176 struct perf_counts_values *count, 1177 perf_event__handler_t process, 1178 struct machine *machine) 1179 { 1180 struct stat_event event; 1181 1182 event.header.type = PERF_RECORD_STAT; 1183 event.header.size = sizeof(event); 1184 event.header.misc = 0; 1185 1186 event.id = id; 1187 event.cpu = cpu; 1188 event.thread = thread; 1189 event.val = count->val; 1190 event.ena = count->ena; 1191 event.run = count->run; 1192 1193 return process(tool, (union perf_event *) &event, NULL, machine); 1194 } 1195 1196 int perf_event__synthesize_stat_round(struct perf_tool *tool, 1197 u64 evtime, u64 type, 1198 perf_event__handler_t process, 1199 struct machine *machine) 1200 { 1201 struct stat_round_event event; 1202 1203 event.header.type = PERF_RECORD_STAT_ROUND; 1204 event.header.size = sizeof(event); 1205 event.header.misc = 0; 1206 1207 event.time = evtime; 1208 event.type = type; 1209 1210 return process(tool, (union perf_event *) &event, NULL, machine); 1211 } 1212 1213 void perf_event__read_stat_config(struct perf_stat_config *config, 1214 struct stat_config_event *event) 1215 { 1216 unsigned i; 1217 1218 for (i = 0; i < event->nr; i++) { 1219 1220 switch (event->data[i].tag) { 1221 #define CASE(__term, __val) \ 1222 case PERF_STAT_CONFIG_TERM__##__term: \ 1223 config->__val = event->data[i].val; \ 1224 break; 1225 1226 CASE(AGGR_MODE, aggr_mode) 1227 CASE(SCALE, scale) 1228 CASE(INTERVAL, interval) 1229 #undef CASE 1230 default: 1231 pr_warning("unknown stat config term %" PRIu64 "\n", 1232 event->data[i].tag); 1233 } 1234 } 1235 } 1236 1237 size_t perf_event__fprintf_comm(union perf_event *event, FILE *fp) 1238 { 1239 const char *s; 1240 1241 if (event->header.misc & PERF_RECORD_MISC_COMM_EXEC) 1242 s = " exec"; 1243 else 1244 s = ""; 1245 1246 return fprintf(fp, "%s: %s:%d/%d\n", s, event->comm.comm, event->comm.pid, event->comm.tid); 1247 } 1248 1249 size_t perf_event__fprintf_namespaces(union perf_event *event, FILE *fp) 1250 { 1251 size_t ret = 0; 1252 struct perf_ns_link_info *ns_link_info; 1253 u32 nr_namespaces, idx; 1254 1255 ns_link_info = event->namespaces.link_info; 1256 nr_namespaces = event->namespaces.nr_namespaces; 1257 1258 ret += fprintf(fp, " %d/%d - nr_namespaces: %u\n\t\t[", 1259 event->namespaces.pid, 1260 event->namespaces.tid, 1261 nr_namespaces); 1262 1263 for (idx = 0; idx < nr_namespaces; idx++) { 1264 if (idx && (idx % 4 == 0)) 1265 ret += fprintf(fp, "\n\t\t "); 1266 1267 ret += fprintf(fp, "%u/%s: %" PRIu64 "/%#" PRIx64 "%s", idx, 1268 perf_ns__name(idx), (u64)ns_link_info[idx].dev, 1269 (u64)ns_link_info[idx].ino, 1270 ((idx + 1) != nr_namespaces) ? ", " : "]\n"); 1271 } 1272 1273 return ret; 1274 } 1275 1276 int perf_event__process_comm(struct perf_tool *tool __maybe_unused, 1277 union perf_event *event, 1278 struct perf_sample *sample, 1279 struct machine *machine) 1280 { 1281 return machine__process_comm_event(machine, event, sample); 1282 } 1283 1284 int perf_event__process_namespaces(struct perf_tool *tool __maybe_unused, 1285 union perf_event *event, 1286 struct perf_sample *sample, 1287 struct machine *machine) 1288 { 1289 return machine__process_namespaces_event(machine, event, sample); 1290 } 1291 1292 int perf_event__process_lost(struct perf_tool *tool __maybe_unused, 1293 union perf_event *event, 1294 struct perf_sample *sample, 1295 struct machine *machine) 1296 { 1297 return machine__process_lost_event(machine, event, sample); 1298 } 1299 1300 int perf_event__process_aux(struct perf_tool *tool __maybe_unused, 1301 union perf_event *event, 1302 struct perf_sample *sample __maybe_unused, 1303 struct machine *machine) 1304 { 1305 return machine__process_aux_event(machine, event); 1306 } 1307 1308 int perf_event__process_itrace_start(struct perf_tool *tool __maybe_unused, 1309 union perf_event *event, 1310 struct perf_sample *sample __maybe_unused, 1311 struct machine *machine) 1312 { 1313 return machine__process_itrace_start_event(machine, event); 1314 } 1315 1316 int perf_event__process_lost_samples(struct perf_tool *tool __maybe_unused, 1317 union perf_event *event, 1318 struct perf_sample *sample, 1319 struct machine *machine) 1320 { 1321 return machine__process_lost_samples_event(machine, event, sample); 1322 } 1323 1324 int perf_event__process_switch(struct perf_tool *tool __maybe_unused, 1325 union perf_event *event, 1326 struct perf_sample *sample __maybe_unused, 1327 struct machine *machine) 1328 { 1329 return machine__process_switch_event(machine, event); 1330 } 1331 1332 size_t perf_event__fprintf_mmap(union perf_event *event, FILE *fp) 1333 { 1334 return fprintf(fp, " %d/%d: [%#" PRIx64 "(%#" PRIx64 ") @ %#" PRIx64 "]: %c %s\n", 1335 event->mmap.pid, event->mmap.tid, event->mmap.start, 1336 event->mmap.len, event->mmap.pgoff, 1337 (event->header.misc & PERF_RECORD_MISC_MMAP_DATA) ? 'r' : 'x', 1338 event->mmap.filename); 1339 } 1340 1341 size_t perf_event__fprintf_mmap2(union perf_event *event, FILE *fp) 1342 { 1343 return fprintf(fp, " %d/%d: [%#" PRIx64 "(%#" PRIx64 ") @ %#" PRIx64 1344 " %02x:%02x %"PRIu64" %"PRIu64"]: %c%c%c%c %s\n", 1345 event->mmap2.pid, event->mmap2.tid, event->mmap2.start, 1346 event->mmap2.len, event->mmap2.pgoff, event->mmap2.maj, 1347 event->mmap2.min, event->mmap2.ino, 1348 event->mmap2.ino_generation, 1349 (event->mmap2.prot & PROT_READ) ? 'r' : '-', 1350 (event->mmap2.prot & PROT_WRITE) ? 'w' : '-', 1351 (event->mmap2.prot & PROT_EXEC) ? 'x' : '-', 1352 (event->mmap2.flags & MAP_SHARED) ? 's' : 'p', 1353 event->mmap2.filename); 1354 } 1355 1356 size_t perf_event__fprintf_thread_map(union perf_event *event, FILE *fp) 1357 { 1358 struct thread_map *threads = thread_map__new_event(&event->thread_map); 1359 size_t ret; 1360 1361 ret = fprintf(fp, " nr: "); 1362 1363 if (threads) 1364 ret += thread_map__fprintf(threads, fp); 1365 else 1366 ret += fprintf(fp, "failed to get threads from event\n"); 1367 1368 thread_map__put(threads); 1369 return ret; 1370 } 1371 1372 size_t perf_event__fprintf_cpu_map(union perf_event *event, FILE *fp) 1373 { 1374 struct cpu_map *cpus = cpu_map__new_data(&event->cpu_map.data); 1375 size_t ret; 1376 1377 ret = fprintf(fp, ": "); 1378 1379 if (cpus) 1380 ret += cpu_map__fprintf(cpus, fp); 1381 else 1382 ret += fprintf(fp, "failed to get cpumap from event\n"); 1383 1384 cpu_map__put(cpus); 1385 return ret; 1386 } 1387 1388 int perf_event__process_mmap(struct perf_tool *tool __maybe_unused, 1389 union perf_event *event, 1390 struct perf_sample *sample, 1391 struct machine *machine) 1392 { 1393 return machine__process_mmap_event(machine, event, sample); 1394 } 1395 1396 int perf_event__process_mmap2(struct perf_tool *tool __maybe_unused, 1397 union perf_event *event, 1398 struct perf_sample *sample, 1399 struct machine *machine) 1400 { 1401 return machine__process_mmap2_event(machine, event, sample); 1402 } 1403 1404 size_t perf_event__fprintf_task(union perf_event *event, FILE *fp) 1405 { 1406 return fprintf(fp, "(%d:%d):(%d:%d)\n", 1407 event->fork.pid, event->fork.tid, 1408 event->fork.ppid, event->fork.ptid); 1409 } 1410 1411 int perf_event__process_fork(struct perf_tool *tool __maybe_unused, 1412 union perf_event *event, 1413 struct perf_sample *sample, 1414 struct machine *machine) 1415 { 1416 return machine__process_fork_event(machine, event, sample); 1417 } 1418 1419 int perf_event__process_exit(struct perf_tool *tool __maybe_unused, 1420 union perf_event *event, 1421 struct perf_sample *sample, 1422 struct machine *machine) 1423 { 1424 return machine__process_exit_event(machine, event, sample); 1425 } 1426 1427 size_t perf_event__fprintf_aux(union perf_event *event, FILE *fp) 1428 { 1429 return fprintf(fp, " offset: %#"PRIx64" size: %#"PRIx64" flags: %#"PRIx64" [%s%s%s]\n", 1430 event->aux.aux_offset, event->aux.aux_size, 1431 event->aux.flags, 1432 event->aux.flags & PERF_AUX_FLAG_TRUNCATED ? "T" : "", 1433 event->aux.flags & PERF_AUX_FLAG_OVERWRITE ? "O" : "", 1434 event->aux.flags & PERF_AUX_FLAG_PARTIAL ? "P" : ""); 1435 } 1436 1437 size_t perf_event__fprintf_itrace_start(union perf_event *event, FILE *fp) 1438 { 1439 return fprintf(fp, " pid: %u tid: %u\n", 1440 event->itrace_start.pid, event->itrace_start.tid); 1441 } 1442 1443 size_t perf_event__fprintf_switch(union perf_event *event, FILE *fp) 1444 { 1445 bool out = event->header.misc & PERF_RECORD_MISC_SWITCH_OUT; 1446 const char *in_out = !out ? "IN " : 1447 !(event->header.misc & PERF_RECORD_MISC_SWITCH_OUT_PREEMPT) ? 1448 "OUT " : "OUT preempt"; 1449 1450 if (event->header.type == PERF_RECORD_SWITCH) 1451 return fprintf(fp, " %s\n", in_out); 1452 1453 return fprintf(fp, " %s %s pid/tid: %5u/%-5u\n", 1454 in_out, out ? "next" : "prev", 1455 event->context_switch.next_prev_pid, 1456 event->context_switch.next_prev_tid); 1457 } 1458 1459 static size_t perf_event__fprintf_lost(union perf_event *event, FILE *fp) 1460 { 1461 return fprintf(fp, " lost %" PRIu64 "\n", event->lost.lost); 1462 } 1463 1464 size_t perf_event__fprintf(union perf_event *event, FILE *fp) 1465 { 1466 size_t ret = fprintf(fp, "PERF_RECORD_%s", 1467 perf_event__name(event->header.type)); 1468 1469 switch (event->header.type) { 1470 case PERF_RECORD_COMM: 1471 ret += perf_event__fprintf_comm(event, fp); 1472 break; 1473 case PERF_RECORD_FORK: 1474 case PERF_RECORD_EXIT: 1475 ret += perf_event__fprintf_task(event, fp); 1476 break; 1477 case PERF_RECORD_MMAP: 1478 ret += perf_event__fprintf_mmap(event, fp); 1479 break; 1480 case PERF_RECORD_NAMESPACES: 1481 ret += perf_event__fprintf_namespaces(event, fp); 1482 break; 1483 case PERF_RECORD_MMAP2: 1484 ret += perf_event__fprintf_mmap2(event, fp); 1485 break; 1486 case PERF_RECORD_AUX: 1487 ret += perf_event__fprintf_aux(event, fp); 1488 break; 1489 case PERF_RECORD_ITRACE_START: 1490 ret += perf_event__fprintf_itrace_start(event, fp); 1491 break; 1492 case PERF_RECORD_SWITCH: 1493 case PERF_RECORD_SWITCH_CPU_WIDE: 1494 ret += perf_event__fprintf_switch(event, fp); 1495 break; 1496 case PERF_RECORD_LOST: 1497 ret += perf_event__fprintf_lost(event, fp); 1498 break; 1499 default: 1500 ret += fprintf(fp, "\n"); 1501 } 1502 1503 return ret; 1504 } 1505 1506 int perf_event__process(struct perf_tool *tool __maybe_unused, 1507 union perf_event *event, 1508 struct perf_sample *sample, 1509 struct machine *machine) 1510 { 1511 return machine__process_event(machine, event, sample); 1512 } 1513 1514 struct map *thread__find_map(struct thread *thread, u8 cpumode, u64 addr, 1515 struct addr_location *al) 1516 { 1517 struct map_groups *mg = thread->mg; 1518 struct machine *machine = mg->machine; 1519 bool load_map = false; 1520 1521 al->machine = machine; 1522 al->thread = thread; 1523 al->addr = addr; 1524 al->cpumode = cpumode; 1525 al->filtered = 0; 1526 1527 if (machine == NULL) { 1528 al->map = NULL; 1529 return NULL; 1530 } 1531 1532 if (cpumode == PERF_RECORD_MISC_KERNEL && perf_host) { 1533 al->level = 'k'; 1534 mg = &machine->kmaps; 1535 load_map = true; 1536 } else if (cpumode == PERF_RECORD_MISC_USER && perf_host) { 1537 al->level = '.'; 1538 } else if (cpumode == PERF_RECORD_MISC_GUEST_KERNEL && perf_guest) { 1539 al->level = 'g'; 1540 mg = &machine->kmaps; 1541 load_map = true; 1542 } else if (cpumode == PERF_RECORD_MISC_GUEST_USER && perf_guest) { 1543 al->level = 'u'; 1544 } else { 1545 al->level = 'H'; 1546 al->map = NULL; 1547 1548 if ((cpumode == PERF_RECORD_MISC_GUEST_USER || 1549 cpumode == PERF_RECORD_MISC_GUEST_KERNEL) && 1550 !perf_guest) 1551 al->filtered |= (1 << HIST_FILTER__GUEST); 1552 if ((cpumode == PERF_RECORD_MISC_USER || 1553 cpumode == PERF_RECORD_MISC_KERNEL) && 1554 !perf_host) 1555 al->filtered |= (1 << HIST_FILTER__HOST); 1556 1557 return NULL; 1558 } 1559 1560 al->map = map_groups__find(mg, al->addr); 1561 if (al->map != NULL) { 1562 /* 1563 * Kernel maps might be changed when loading symbols so loading 1564 * must be done prior to using kernel maps. 1565 */ 1566 if (load_map) 1567 map__load(al->map); 1568 al->addr = al->map->map_ip(al->map, al->addr); 1569 } 1570 1571 return al->map; 1572 } 1573 1574 /* 1575 * For branch stacks or branch samples, the sample cpumode might not be correct 1576 * because it applies only to the sample 'ip' and not necessary to 'addr' or 1577 * branch stack addresses. If possible, use a fallback to deal with those cases. 1578 */ 1579 struct map *thread__find_map_fb(struct thread *thread, u8 cpumode, u64 addr, 1580 struct addr_location *al) 1581 { 1582 struct map *map = thread__find_map(thread, cpumode, addr, al); 1583 struct machine *machine = thread->mg->machine; 1584 u8 addr_cpumode = machine__addr_cpumode(machine, cpumode, addr); 1585 1586 if (map || addr_cpumode == cpumode) 1587 return map; 1588 1589 return thread__find_map(thread, addr_cpumode, addr, al); 1590 } 1591 1592 struct symbol *thread__find_symbol(struct thread *thread, u8 cpumode, 1593 u64 addr, struct addr_location *al) 1594 { 1595 al->sym = NULL; 1596 if (thread__find_map(thread, cpumode, addr, al)) 1597 al->sym = map__find_symbol(al->map, al->addr); 1598 return al->sym; 1599 } 1600 1601 struct symbol *thread__find_symbol_fb(struct thread *thread, u8 cpumode, 1602 u64 addr, struct addr_location *al) 1603 { 1604 al->sym = NULL; 1605 if (thread__find_map_fb(thread, cpumode, addr, al)) 1606 al->sym = map__find_symbol(al->map, al->addr); 1607 return al->sym; 1608 } 1609 1610 /* 1611 * Callers need to drop the reference to al->thread, obtained in 1612 * machine__findnew_thread() 1613 */ 1614 int machine__resolve(struct machine *machine, struct addr_location *al, 1615 struct perf_sample *sample) 1616 { 1617 struct thread *thread = machine__findnew_thread(machine, sample->pid, 1618 sample->tid); 1619 1620 if (thread == NULL) 1621 return -1; 1622 1623 dump_printf(" ... thread: %s:%d\n", thread__comm_str(thread), thread->tid); 1624 thread__find_map(thread, sample->cpumode, sample->ip, al); 1625 dump_printf(" ...... dso: %s\n", 1626 al->map ? al->map->dso->long_name : 1627 al->level == 'H' ? "[hypervisor]" : "<not found>"); 1628 1629 if (thread__is_filtered(thread)) 1630 al->filtered |= (1 << HIST_FILTER__THREAD); 1631 1632 al->sym = NULL; 1633 al->cpu = sample->cpu; 1634 al->socket = -1; 1635 al->srcline = NULL; 1636 1637 if (al->cpu >= 0) { 1638 struct perf_env *env = machine->env; 1639 1640 if (env && env->cpu) 1641 al->socket = env->cpu[al->cpu].socket_id; 1642 } 1643 1644 if (al->map) { 1645 struct dso *dso = al->map->dso; 1646 1647 if (symbol_conf.dso_list && 1648 (!dso || !(strlist__has_entry(symbol_conf.dso_list, 1649 dso->short_name) || 1650 (dso->short_name != dso->long_name && 1651 strlist__has_entry(symbol_conf.dso_list, 1652 dso->long_name))))) { 1653 al->filtered |= (1 << HIST_FILTER__DSO); 1654 } 1655 1656 al->sym = map__find_symbol(al->map, al->addr); 1657 } 1658 1659 if (symbol_conf.sym_list && 1660 (!al->sym || !strlist__has_entry(symbol_conf.sym_list, 1661 al->sym->name))) { 1662 al->filtered |= (1 << HIST_FILTER__SYMBOL); 1663 } 1664 1665 return 0; 1666 } 1667 1668 /* 1669 * The preprocess_sample method will return with reference counts for the 1670 * in it, when done using (and perhaps getting ref counts if needing to 1671 * keep a pointer to one of those entries) it must be paired with 1672 * addr_location__put(), so that the refcounts can be decremented. 1673 */ 1674 void addr_location__put(struct addr_location *al) 1675 { 1676 thread__zput(al->thread); 1677 } 1678 1679 bool is_bts_event(struct perf_event_attr *attr) 1680 { 1681 return attr->type == PERF_TYPE_HARDWARE && 1682 (attr->config & PERF_COUNT_HW_BRANCH_INSTRUCTIONS) && 1683 attr->sample_period == 1; 1684 } 1685 1686 bool sample_addr_correlates_sym(struct perf_event_attr *attr) 1687 { 1688 if (attr->type == PERF_TYPE_SOFTWARE && 1689 (attr->config == PERF_COUNT_SW_PAGE_FAULTS || 1690 attr->config == PERF_COUNT_SW_PAGE_FAULTS_MIN || 1691 attr->config == PERF_COUNT_SW_PAGE_FAULTS_MAJ)) 1692 return true; 1693 1694 if (is_bts_event(attr)) 1695 return true; 1696 1697 return false; 1698 } 1699 1700 void thread__resolve(struct thread *thread, struct addr_location *al, 1701 struct perf_sample *sample) 1702 { 1703 thread__find_map_fb(thread, sample->cpumode, sample->addr, al); 1704 1705 al->cpu = sample->cpu; 1706 al->sym = NULL; 1707 1708 if (al->map) 1709 al->sym = map__find_symbol(al->map, al->addr); 1710 } 1711