1 // SPDX-License-Identifier: GPL-2.0 2 #include <dirent.h> 3 #include <errno.h> 4 #include <fcntl.h> 5 #include <inttypes.h> 6 #include <linux/kernel.h> 7 #include <linux/types.h> 8 #include <sys/types.h> 9 #include <sys/stat.h> 10 #include <unistd.h> 11 #include <uapi/linux/mman.h> /* To get things like MAP_HUGETLB even on older libc headers */ 12 #include <api/fs/fs.h> 13 #include <linux/perf_event.h> 14 #include "event.h" 15 #include "debug.h" 16 #include "hist.h" 17 #include "machine.h" 18 #include "sort.h" 19 #include "string2.h" 20 #include "strlist.h" 21 #include "thread.h" 22 #include "thread_map.h" 23 #include "sane_ctype.h" 24 #include "symbol/kallsyms.h" 25 #include "asm/bug.h" 26 #include "stat.h" 27 28 static const char *perf_event__names[] = { 29 [0] = "TOTAL", 30 [PERF_RECORD_MMAP] = "MMAP", 31 [PERF_RECORD_MMAP2] = "MMAP2", 32 [PERF_RECORD_LOST] = "LOST", 33 [PERF_RECORD_COMM] = "COMM", 34 [PERF_RECORD_EXIT] = "EXIT", 35 [PERF_RECORD_THROTTLE] = "THROTTLE", 36 [PERF_RECORD_UNTHROTTLE] = "UNTHROTTLE", 37 [PERF_RECORD_FORK] = "FORK", 38 [PERF_RECORD_READ] = "READ", 39 [PERF_RECORD_SAMPLE] = "SAMPLE", 40 [PERF_RECORD_AUX] = "AUX", 41 [PERF_RECORD_ITRACE_START] = "ITRACE_START", 42 [PERF_RECORD_LOST_SAMPLES] = "LOST_SAMPLES", 43 [PERF_RECORD_SWITCH] = "SWITCH", 44 [PERF_RECORD_SWITCH_CPU_WIDE] = "SWITCH_CPU_WIDE", 45 [PERF_RECORD_NAMESPACES] = "NAMESPACES", 46 [PERF_RECORD_HEADER_ATTR] = "ATTR", 47 [PERF_RECORD_HEADER_EVENT_TYPE] = "EVENT_TYPE", 48 [PERF_RECORD_HEADER_TRACING_DATA] = "TRACING_DATA", 49 [PERF_RECORD_HEADER_BUILD_ID] = "BUILD_ID", 50 [PERF_RECORD_FINISHED_ROUND] = "FINISHED_ROUND", 51 [PERF_RECORD_ID_INDEX] = "ID_INDEX", 52 [PERF_RECORD_AUXTRACE_INFO] = "AUXTRACE_INFO", 53 [PERF_RECORD_AUXTRACE] = "AUXTRACE", 54 [PERF_RECORD_AUXTRACE_ERROR] = "AUXTRACE_ERROR", 55 [PERF_RECORD_THREAD_MAP] = "THREAD_MAP", 56 [PERF_RECORD_CPU_MAP] = "CPU_MAP", 57 [PERF_RECORD_STAT_CONFIG] = "STAT_CONFIG", 58 [PERF_RECORD_STAT] = "STAT", 59 [PERF_RECORD_STAT_ROUND] = "STAT_ROUND", 60 [PERF_RECORD_EVENT_UPDATE] = "EVENT_UPDATE", 61 [PERF_RECORD_TIME_CONV] = "TIME_CONV", 62 [PERF_RECORD_HEADER_FEATURE] = "FEATURE", 63 }; 64 65 static const char *perf_ns__names[] = { 66 [NET_NS_INDEX] = "net", 67 [UTS_NS_INDEX] = "uts", 68 [IPC_NS_INDEX] = "ipc", 69 [PID_NS_INDEX] = "pid", 70 [USER_NS_INDEX] = "user", 71 [MNT_NS_INDEX] = "mnt", 72 [CGROUP_NS_INDEX] = "cgroup", 73 }; 74 75 const char *perf_event__name(unsigned int id) 76 { 77 if (id >= ARRAY_SIZE(perf_event__names)) 78 return "INVALID"; 79 if (!perf_event__names[id]) 80 return "UNKNOWN"; 81 return perf_event__names[id]; 82 } 83 84 static const char *perf_ns__name(unsigned int id) 85 { 86 if (id >= ARRAY_SIZE(perf_ns__names)) 87 return "UNKNOWN"; 88 return perf_ns__names[id]; 89 } 90 91 static int perf_tool__process_synth_event(struct perf_tool *tool, 92 union perf_event *event, 93 struct machine *machine, 94 perf_event__handler_t process) 95 { 96 struct perf_sample synth_sample = { 97 .pid = -1, 98 .tid = -1, 99 .time = -1, 100 .stream_id = -1, 101 .cpu = -1, 102 .period = 1, 103 .cpumode = event->header.misc & PERF_RECORD_MISC_CPUMODE_MASK, 104 }; 105 106 return process(tool, event, &synth_sample, machine); 107 }; 108 109 /* 110 * Assumes that the first 4095 bytes of /proc/pid/stat contains 111 * the comm, tgid and ppid. 112 */ 113 static int perf_event__get_comm_ids(pid_t pid, char *comm, size_t len, 114 pid_t *tgid, pid_t *ppid) 115 { 116 char filename[PATH_MAX]; 117 char bf[4096]; 118 int fd; 119 size_t size = 0; 120 ssize_t n; 121 char *name, *tgids, *ppids; 122 123 *tgid = -1; 124 *ppid = -1; 125 126 snprintf(filename, sizeof(filename), "/proc/%d/status", pid); 127 128 fd = open(filename, O_RDONLY); 129 if (fd < 0) { 130 pr_debug("couldn't open %s\n", filename); 131 return -1; 132 } 133 134 n = read(fd, bf, sizeof(bf) - 1); 135 close(fd); 136 if (n <= 0) { 137 pr_warning("Couldn't get COMM, tigd and ppid for pid %d\n", 138 pid); 139 return -1; 140 } 141 bf[n] = '\0'; 142 143 name = strstr(bf, "Name:"); 144 tgids = strstr(bf, "Tgid:"); 145 ppids = strstr(bf, "PPid:"); 146 147 if (name) { 148 char *nl; 149 150 name += 5; /* strlen("Name:") */ 151 name = ltrim(name); 152 153 nl = strchr(name, '\n'); 154 if (nl) 155 *nl = '\0'; 156 157 size = strlen(name); 158 if (size >= len) 159 size = len - 1; 160 memcpy(comm, name, size); 161 comm[size] = '\0'; 162 } else { 163 pr_debug("Name: string not found for pid %d\n", pid); 164 } 165 166 if (tgids) { 167 tgids += 5; /* strlen("Tgid:") */ 168 *tgid = atoi(tgids); 169 } else { 170 pr_debug("Tgid: string not found for pid %d\n", pid); 171 } 172 173 if (ppids) { 174 ppids += 5; /* strlen("PPid:") */ 175 *ppid = atoi(ppids); 176 } else { 177 pr_debug("PPid: string not found for pid %d\n", pid); 178 } 179 180 return 0; 181 } 182 183 static int perf_event__prepare_comm(union perf_event *event, pid_t pid, 184 struct machine *machine, 185 pid_t *tgid, pid_t *ppid) 186 { 187 size_t size; 188 189 *ppid = -1; 190 191 memset(&event->comm, 0, sizeof(event->comm)); 192 193 if (machine__is_host(machine)) { 194 if (perf_event__get_comm_ids(pid, event->comm.comm, 195 sizeof(event->comm.comm), 196 tgid, ppid) != 0) { 197 return -1; 198 } 199 } else { 200 *tgid = machine->pid; 201 } 202 203 if (*tgid < 0) 204 return -1; 205 206 event->comm.pid = *tgid; 207 event->comm.header.type = PERF_RECORD_COMM; 208 209 size = strlen(event->comm.comm) + 1; 210 size = PERF_ALIGN(size, sizeof(u64)); 211 memset(event->comm.comm + size, 0, machine->id_hdr_size); 212 event->comm.header.size = (sizeof(event->comm) - 213 (sizeof(event->comm.comm) - size) + 214 machine->id_hdr_size); 215 event->comm.tid = pid; 216 217 return 0; 218 } 219 220 pid_t perf_event__synthesize_comm(struct perf_tool *tool, 221 union perf_event *event, pid_t pid, 222 perf_event__handler_t process, 223 struct machine *machine) 224 { 225 pid_t tgid, ppid; 226 227 if (perf_event__prepare_comm(event, pid, machine, &tgid, &ppid) != 0) 228 return -1; 229 230 if (perf_tool__process_synth_event(tool, event, machine, process) != 0) 231 return -1; 232 233 return tgid; 234 } 235 236 static void perf_event__get_ns_link_info(pid_t pid, const char *ns, 237 struct perf_ns_link_info *ns_link_info) 238 { 239 struct stat64 st; 240 char proc_ns[128]; 241 242 sprintf(proc_ns, "/proc/%u/ns/%s", pid, ns); 243 if (stat64(proc_ns, &st) == 0) { 244 ns_link_info->dev = st.st_dev; 245 ns_link_info->ino = st.st_ino; 246 } 247 } 248 249 int perf_event__synthesize_namespaces(struct perf_tool *tool, 250 union perf_event *event, 251 pid_t pid, pid_t tgid, 252 perf_event__handler_t process, 253 struct machine *machine) 254 { 255 u32 idx; 256 struct perf_ns_link_info *ns_link_info; 257 258 if (!tool || !tool->namespace_events) 259 return 0; 260 261 memset(&event->namespaces, 0, (sizeof(event->namespaces) + 262 (NR_NAMESPACES * sizeof(struct perf_ns_link_info)) + 263 machine->id_hdr_size)); 264 265 event->namespaces.pid = tgid; 266 event->namespaces.tid = pid; 267 268 event->namespaces.nr_namespaces = NR_NAMESPACES; 269 270 ns_link_info = event->namespaces.link_info; 271 272 for (idx = 0; idx < event->namespaces.nr_namespaces; idx++) 273 perf_event__get_ns_link_info(pid, perf_ns__name(idx), 274 &ns_link_info[idx]); 275 276 event->namespaces.header.type = PERF_RECORD_NAMESPACES; 277 278 event->namespaces.header.size = (sizeof(event->namespaces) + 279 (NR_NAMESPACES * sizeof(struct perf_ns_link_info)) + 280 machine->id_hdr_size); 281 282 if (perf_tool__process_synth_event(tool, event, machine, process) != 0) 283 return -1; 284 285 return 0; 286 } 287 288 static int perf_event__synthesize_fork(struct perf_tool *tool, 289 union perf_event *event, 290 pid_t pid, pid_t tgid, pid_t ppid, 291 perf_event__handler_t process, 292 struct machine *machine) 293 { 294 memset(&event->fork, 0, sizeof(event->fork) + machine->id_hdr_size); 295 296 /* 297 * for main thread set parent to ppid from status file. For other 298 * threads set parent pid to main thread. ie., assume main thread 299 * spawns all threads in a process 300 */ 301 if (tgid == pid) { 302 event->fork.ppid = ppid; 303 event->fork.ptid = ppid; 304 } else { 305 event->fork.ppid = tgid; 306 event->fork.ptid = tgid; 307 } 308 event->fork.pid = tgid; 309 event->fork.tid = pid; 310 event->fork.header.type = PERF_RECORD_FORK; 311 312 event->fork.header.size = (sizeof(event->fork) + machine->id_hdr_size); 313 314 if (perf_tool__process_synth_event(tool, event, machine, process) != 0) 315 return -1; 316 317 return 0; 318 } 319 320 int perf_event__synthesize_mmap_events(struct perf_tool *tool, 321 union perf_event *event, 322 pid_t pid, pid_t tgid, 323 perf_event__handler_t process, 324 struct machine *machine, 325 bool mmap_data, 326 unsigned int proc_map_timeout) 327 { 328 char filename[PATH_MAX]; 329 FILE *fp; 330 unsigned long long t; 331 bool truncation = false; 332 unsigned long long timeout = proc_map_timeout * 1000000ULL; 333 int rc = 0; 334 const char *hugetlbfs_mnt = hugetlbfs__mountpoint(); 335 int hugetlbfs_mnt_len = hugetlbfs_mnt ? strlen(hugetlbfs_mnt) : 0; 336 337 if (machine__is_default_guest(machine)) 338 return 0; 339 340 snprintf(filename, sizeof(filename), "%s/proc/%d/task/%d/maps", 341 machine->root_dir, pid, pid); 342 343 fp = fopen(filename, "r"); 344 if (fp == NULL) { 345 /* 346 * We raced with a task exiting - just return: 347 */ 348 pr_debug("couldn't open %s\n", filename); 349 return -1; 350 } 351 352 event->header.type = PERF_RECORD_MMAP2; 353 t = rdclock(); 354 355 while (1) { 356 char bf[BUFSIZ]; 357 char prot[5]; 358 char execname[PATH_MAX]; 359 char anonstr[] = "//anon"; 360 unsigned int ino; 361 size_t size; 362 ssize_t n; 363 364 if (fgets(bf, sizeof(bf), fp) == NULL) 365 break; 366 367 if ((rdclock() - t) > timeout) { 368 pr_warning("Reading %s time out. " 369 "You may want to increase " 370 "the time limit by --proc-map-timeout\n", 371 filename); 372 truncation = true; 373 goto out; 374 } 375 376 /* ensure null termination since stack will be reused. */ 377 strcpy(execname, ""); 378 379 /* 00400000-0040c000 r-xp 00000000 fd:01 41038 /bin/cat */ 380 n = sscanf(bf, "%"PRIx64"-%"PRIx64" %s %"PRIx64" %x:%x %u %[^\n]\n", 381 &event->mmap2.start, &event->mmap2.len, prot, 382 &event->mmap2.pgoff, &event->mmap2.maj, 383 &event->mmap2.min, 384 &ino, execname); 385 386 /* 387 * Anon maps don't have the execname. 388 */ 389 if (n < 7) 390 continue; 391 392 event->mmap2.ino = (u64)ino; 393 394 /* 395 * Just like the kernel, see __perf_event_mmap in kernel/perf_event.c 396 */ 397 if (machine__is_host(machine)) 398 event->header.misc = PERF_RECORD_MISC_USER; 399 else 400 event->header.misc = PERF_RECORD_MISC_GUEST_USER; 401 402 /* map protection and flags bits */ 403 event->mmap2.prot = 0; 404 event->mmap2.flags = 0; 405 if (prot[0] == 'r') 406 event->mmap2.prot |= PROT_READ; 407 if (prot[1] == 'w') 408 event->mmap2.prot |= PROT_WRITE; 409 if (prot[2] == 'x') 410 event->mmap2.prot |= PROT_EXEC; 411 412 if (prot[3] == 's') 413 event->mmap2.flags |= MAP_SHARED; 414 else 415 event->mmap2.flags |= MAP_PRIVATE; 416 417 if (prot[2] != 'x') { 418 if (!mmap_data || prot[0] != 'r') 419 continue; 420 421 event->header.misc |= PERF_RECORD_MISC_MMAP_DATA; 422 } 423 424 out: 425 if (truncation) 426 event->header.misc |= PERF_RECORD_MISC_PROC_MAP_PARSE_TIMEOUT; 427 428 if (!strcmp(execname, "")) 429 strcpy(execname, anonstr); 430 431 if (hugetlbfs_mnt_len && 432 !strncmp(execname, hugetlbfs_mnt, hugetlbfs_mnt_len)) { 433 strcpy(execname, anonstr); 434 event->mmap2.flags |= MAP_HUGETLB; 435 } 436 437 size = strlen(execname) + 1; 438 memcpy(event->mmap2.filename, execname, size); 439 size = PERF_ALIGN(size, sizeof(u64)); 440 event->mmap2.len -= event->mmap.start; 441 event->mmap2.header.size = (sizeof(event->mmap2) - 442 (sizeof(event->mmap2.filename) - size)); 443 memset(event->mmap2.filename + size, 0, machine->id_hdr_size); 444 event->mmap2.header.size += machine->id_hdr_size; 445 event->mmap2.pid = tgid; 446 event->mmap2.tid = pid; 447 448 if (perf_tool__process_synth_event(tool, event, machine, process) != 0) { 449 rc = -1; 450 break; 451 } 452 453 if (truncation) 454 break; 455 } 456 457 fclose(fp); 458 return rc; 459 } 460 461 int perf_event__synthesize_modules(struct perf_tool *tool, 462 perf_event__handler_t process, 463 struct machine *machine) 464 { 465 int rc = 0; 466 struct map *pos; 467 struct map_groups *kmaps = &machine->kmaps; 468 struct maps *maps = &kmaps->maps[MAP__FUNCTION]; 469 union perf_event *event = zalloc((sizeof(event->mmap) + 470 machine->id_hdr_size)); 471 if (event == NULL) { 472 pr_debug("Not enough memory synthesizing mmap event " 473 "for kernel modules\n"); 474 return -1; 475 } 476 477 event->header.type = PERF_RECORD_MMAP; 478 479 /* 480 * kernel uses 0 for user space maps, see kernel/perf_event.c 481 * __perf_event_mmap 482 */ 483 if (machine__is_host(machine)) 484 event->header.misc = PERF_RECORD_MISC_KERNEL; 485 else 486 event->header.misc = PERF_RECORD_MISC_GUEST_KERNEL; 487 488 for (pos = maps__first(maps); pos; pos = map__next(pos)) { 489 size_t size; 490 491 if (__map__is_kernel(pos)) 492 continue; 493 494 size = PERF_ALIGN(pos->dso->long_name_len + 1, sizeof(u64)); 495 event->mmap.header.type = PERF_RECORD_MMAP; 496 event->mmap.header.size = (sizeof(event->mmap) - 497 (sizeof(event->mmap.filename) - size)); 498 memset(event->mmap.filename + size, 0, machine->id_hdr_size); 499 event->mmap.header.size += machine->id_hdr_size; 500 event->mmap.start = pos->start; 501 event->mmap.len = pos->end - pos->start; 502 event->mmap.pid = machine->pid; 503 504 memcpy(event->mmap.filename, pos->dso->long_name, 505 pos->dso->long_name_len + 1); 506 if (perf_tool__process_synth_event(tool, event, machine, process) != 0) { 507 rc = -1; 508 break; 509 } 510 } 511 512 free(event); 513 return rc; 514 } 515 516 static int __event__synthesize_thread(union perf_event *comm_event, 517 union perf_event *mmap_event, 518 union perf_event *fork_event, 519 union perf_event *namespaces_event, 520 pid_t pid, int full, 521 perf_event__handler_t process, 522 struct perf_tool *tool, 523 struct machine *machine, 524 bool mmap_data, 525 unsigned int proc_map_timeout) 526 { 527 char filename[PATH_MAX]; 528 DIR *tasks; 529 struct dirent *dirent; 530 pid_t tgid, ppid; 531 int rc = 0; 532 533 /* special case: only send one comm event using passed in pid */ 534 if (!full) { 535 tgid = perf_event__synthesize_comm(tool, comm_event, pid, 536 process, machine); 537 538 if (tgid == -1) 539 return -1; 540 541 if (perf_event__synthesize_namespaces(tool, namespaces_event, pid, 542 tgid, process, machine) < 0) 543 return -1; 544 545 546 return perf_event__synthesize_mmap_events(tool, mmap_event, pid, tgid, 547 process, machine, mmap_data, 548 proc_map_timeout); 549 } 550 551 if (machine__is_default_guest(machine)) 552 return 0; 553 554 snprintf(filename, sizeof(filename), "%s/proc/%d/task", 555 machine->root_dir, pid); 556 557 tasks = opendir(filename); 558 if (tasks == NULL) { 559 pr_debug("couldn't open %s\n", filename); 560 return 0; 561 } 562 563 while ((dirent = readdir(tasks)) != NULL) { 564 char *end; 565 pid_t _pid; 566 567 _pid = strtol(dirent->d_name, &end, 10); 568 if (*end) 569 continue; 570 571 rc = -1; 572 if (perf_event__prepare_comm(comm_event, _pid, machine, 573 &tgid, &ppid) != 0) 574 break; 575 576 if (perf_event__synthesize_fork(tool, fork_event, _pid, tgid, 577 ppid, process, machine) < 0) 578 break; 579 580 if (perf_event__synthesize_namespaces(tool, namespaces_event, _pid, 581 tgid, process, machine) < 0) 582 break; 583 584 /* 585 * Send the prepared comm event 586 */ 587 if (perf_tool__process_synth_event(tool, comm_event, machine, process) != 0) 588 break; 589 590 rc = 0; 591 if (_pid == pid) { 592 /* process the parent's maps too */ 593 rc = perf_event__synthesize_mmap_events(tool, mmap_event, pid, tgid, 594 process, machine, mmap_data, proc_map_timeout); 595 if (rc) 596 break; 597 } 598 } 599 600 closedir(tasks); 601 return rc; 602 } 603 604 int perf_event__synthesize_thread_map(struct perf_tool *tool, 605 struct thread_map *threads, 606 perf_event__handler_t process, 607 struct machine *machine, 608 bool mmap_data, 609 unsigned int proc_map_timeout) 610 { 611 union perf_event *comm_event, *mmap_event, *fork_event; 612 union perf_event *namespaces_event; 613 int err = -1, thread, j; 614 615 comm_event = malloc(sizeof(comm_event->comm) + machine->id_hdr_size); 616 if (comm_event == NULL) 617 goto out; 618 619 mmap_event = malloc(sizeof(mmap_event->mmap2) + machine->id_hdr_size); 620 if (mmap_event == NULL) 621 goto out_free_comm; 622 623 fork_event = malloc(sizeof(fork_event->fork) + machine->id_hdr_size); 624 if (fork_event == NULL) 625 goto out_free_mmap; 626 627 namespaces_event = malloc(sizeof(namespaces_event->namespaces) + 628 (NR_NAMESPACES * sizeof(struct perf_ns_link_info)) + 629 machine->id_hdr_size); 630 if (namespaces_event == NULL) 631 goto out_free_fork; 632 633 err = 0; 634 for (thread = 0; thread < threads->nr; ++thread) { 635 if (__event__synthesize_thread(comm_event, mmap_event, 636 fork_event, namespaces_event, 637 thread_map__pid(threads, thread), 0, 638 process, tool, machine, 639 mmap_data, proc_map_timeout)) { 640 err = -1; 641 break; 642 } 643 644 /* 645 * comm.pid is set to thread group id by 646 * perf_event__synthesize_comm 647 */ 648 if ((int) comm_event->comm.pid != thread_map__pid(threads, thread)) { 649 bool need_leader = true; 650 651 /* is thread group leader in thread_map? */ 652 for (j = 0; j < threads->nr; ++j) { 653 if ((int) comm_event->comm.pid == thread_map__pid(threads, j)) { 654 need_leader = false; 655 break; 656 } 657 } 658 659 /* if not, generate events for it */ 660 if (need_leader && 661 __event__synthesize_thread(comm_event, mmap_event, 662 fork_event, namespaces_event, 663 comm_event->comm.pid, 0, 664 process, tool, machine, 665 mmap_data, proc_map_timeout)) { 666 err = -1; 667 break; 668 } 669 } 670 } 671 free(namespaces_event); 672 out_free_fork: 673 free(fork_event); 674 out_free_mmap: 675 free(mmap_event); 676 out_free_comm: 677 free(comm_event); 678 out: 679 return err; 680 } 681 682 static int __perf_event__synthesize_threads(struct perf_tool *tool, 683 perf_event__handler_t process, 684 struct machine *machine, 685 bool mmap_data, 686 unsigned int proc_map_timeout, 687 struct dirent **dirent, 688 int start, 689 int num) 690 { 691 union perf_event *comm_event, *mmap_event, *fork_event; 692 union perf_event *namespaces_event; 693 int err = -1; 694 char *end; 695 pid_t pid; 696 int i; 697 698 comm_event = malloc(sizeof(comm_event->comm) + machine->id_hdr_size); 699 if (comm_event == NULL) 700 goto out; 701 702 mmap_event = malloc(sizeof(mmap_event->mmap2) + machine->id_hdr_size); 703 if (mmap_event == NULL) 704 goto out_free_comm; 705 706 fork_event = malloc(sizeof(fork_event->fork) + machine->id_hdr_size); 707 if (fork_event == NULL) 708 goto out_free_mmap; 709 710 namespaces_event = malloc(sizeof(namespaces_event->namespaces) + 711 (NR_NAMESPACES * sizeof(struct perf_ns_link_info)) + 712 machine->id_hdr_size); 713 if (namespaces_event == NULL) 714 goto out_free_fork; 715 716 for (i = start; i < start + num; i++) { 717 if (!isdigit(dirent[i]->d_name[0])) 718 continue; 719 720 pid = (pid_t)strtol(dirent[i]->d_name, &end, 10); 721 /* only interested in proper numerical dirents */ 722 if (*end) 723 continue; 724 /* 725 * We may race with exiting thread, so don't stop just because 726 * one thread couldn't be synthesized. 727 */ 728 __event__synthesize_thread(comm_event, mmap_event, fork_event, 729 namespaces_event, pid, 1, process, 730 tool, machine, mmap_data, 731 proc_map_timeout); 732 } 733 err = 0; 734 735 free(namespaces_event); 736 out_free_fork: 737 free(fork_event); 738 out_free_mmap: 739 free(mmap_event); 740 out_free_comm: 741 free(comm_event); 742 out: 743 return err; 744 } 745 746 struct synthesize_threads_arg { 747 struct perf_tool *tool; 748 perf_event__handler_t process; 749 struct machine *machine; 750 bool mmap_data; 751 unsigned int proc_map_timeout; 752 struct dirent **dirent; 753 int num; 754 int start; 755 }; 756 757 static void *synthesize_threads_worker(void *arg) 758 { 759 struct synthesize_threads_arg *args = arg; 760 761 __perf_event__synthesize_threads(args->tool, args->process, 762 args->machine, args->mmap_data, 763 args->proc_map_timeout, args->dirent, 764 args->start, args->num); 765 return NULL; 766 } 767 768 int perf_event__synthesize_threads(struct perf_tool *tool, 769 perf_event__handler_t process, 770 struct machine *machine, 771 bool mmap_data, 772 unsigned int proc_map_timeout, 773 unsigned int nr_threads_synthesize) 774 { 775 struct synthesize_threads_arg *args = NULL; 776 pthread_t *synthesize_threads = NULL; 777 char proc_path[PATH_MAX]; 778 struct dirent **dirent; 779 int num_per_thread; 780 int m, n, i, j; 781 int thread_nr; 782 int base = 0; 783 int err = -1; 784 785 786 if (machine__is_default_guest(machine)) 787 return 0; 788 789 snprintf(proc_path, sizeof(proc_path), "%s/proc", machine->root_dir); 790 n = scandir(proc_path, &dirent, 0, alphasort); 791 if (n < 0) 792 return err; 793 794 if (nr_threads_synthesize == UINT_MAX) 795 thread_nr = sysconf(_SC_NPROCESSORS_ONLN); 796 else 797 thread_nr = nr_threads_synthesize; 798 799 if (thread_nr <= 1) { 800 err = __perf_event__synthesize_threads(tool, process, 801 machine, mmap_data, 802 proc_map_timeout, 803 dirent, base, n); 804 goto free_dirent; 805 } 806 if (thread_nr > n) 807 thread_nr = n; 808 809 synthesize_threads = calloc(sizeof(pthread_t), thread_nr); 810 if (synthesize_threads == NULL) 811 goto free_dirent; 812 813 args = calloc(sizeof(*args), thread_nr); 814 if (args == NULL) 815 goto free_threads; 816 817 num_per_thread = n / thread_nr; 818 m = n % thread_nr; 819 for (i = 0; i < thread_nr; i++) { 820 args[i].tool = tool; 821 args[i].process = process; 822 args[i].machine = machine; 823 args[i].mmap_data = mmap_data; 824 args[i].proc_map_timeout = proc_map_timeout; 825 args[i].dirent = dirent; 826 } 827 for (i = 0; i < m; i++) { 828 args[i].num = num_per_thread + 1; 829 args[i].start = i * args[i].num; 830 } 831 if (i != 0) 832 base = args[i-1].start + args[i-1].num; 833 for (j = i; j < thread_nr; j++) { 834 args[j].num = num_per_thread; 835 args[j].start = base + (j - i) * args[i].num; 836 } 837 838 for (i = 0; i < thread_nr; i++) { 839 if (pthread_create(&synthesize_threads[i], NULL, 840 synthesize_threads_worker, &args[i])) 841 goto out_join; 842 } 843 err = 0; 844 out_join: 845 for (i = 0; i < thread_nr; i++) 846 pthread_join(synthesize_threads[i], NULL); 847 free(args); 848 free_threads: 849 free(synthesize_threads); 850 free_dirent: 851 for (i = 0; i < n; i++) 852 free(dirent[i]); 853 free(dirent); 854 855 return err; 856 } 857 858 struct process_symbol_args { 859 const char *name; 860 u64 start; 861 }; 862 863 static int find_symbol_cb(void *arg, const char *name, char type, 864 u64 start) 865 { 866 struct process_symbol_args *args = arg; 867 868 /* 869 * Must be a function or at least an alias, as in PARISC64, where "_text" is 870 * an 'A' to the same address as "_stext". 871 */ 872 if (!(symbol_type__is_a(type, MAP__FUNCTION) || 873 type == 'A') || strcmp(name, args->name)) 874 return 0; 875 876 args->start = start; 877 return 1; 878 } 879 880 int kallsyms__get_function_start(const char *kallsyms_filename, 881 const char *symbol_name, u64 *addr) 882 { 883 struct process_symbol_args args = { .name = symbol_name, }; 884 885 if (kallsyms__parse(kallsyms_filename, &args, find_symbol_cb) <= 0) 886 return -1; 887 888 *addr = args.start; 889 return 0; 890 } 891 892 int perf_event__synthesize_kernel_mmap(struct perf_tool *tool, 893 perf_event__handler_t process, 894 struct machine *machine) 895 { 896 size_t size; 897 struct map *map = machine__kernel_map(machine); 898 struct kmap *kmap; 899 int err; 900 union perf_event *event; 901 902 if (symbol_conf.kptr_restrict) 903 return -1; 904 if (map == NULL) 905 return -1; 906 907 /* 908 * We should get this from /sys/kernel/sections/.text, but till that is 909 * available use this, and after it is use this as a fallback for older 910 * kernels. 911 */ 912 event = zalloc((sizeof(event->mmap) + machine->id_hdr_size)); 913 if (event == NULL) { 914 pr_debug("Not enough memory synthesizing mmap event " 915 "for kernel modules\n"); 916 return -1; 917 } 918 919 if (machine__is_host(machine)) { 920 /* 921 * kernel uses PERF_RECORD_MISC_USER for user space maps, 922 * see kernel/perf_event.c __perf_event_mmap 923 */ 924 event->header.misc = PERF_RECORD_MISC_KERNEL; 925 } else { 926 event->header.misc = PERF_RECORD_MISC_GUEST_KERNEL; 927 } 928 929 kmap = map__kmap(map); 930 size = snprintf(event->mmap.filename, sizeof(event->mmap.filename), 931 "%s%s", machine->mmap_name, kmap->ref_reloc_sym->name) + 1; 932 size = PERF_ALIGN(size, sizeof(u64)); 933 event->mmap.header.type = PERF_RECORD_MMAP; 934 event->mmap.header.size = (sizeof(event->mmap) - 935 (sizeof(event->mmap.filename) - size) + machine->id_hdr_size); 936 event->mmap.pgoff = kmap->ref_reloc_sym->addr; 937 event->mmap.start = map->start; 938 event->mmap.len = map->end - event->mmap.start; 939 event->mmap.pid = machine->pid; 940 941 err = perf_tool__process_synth_event(tool, event, machine, process); 942 free(event); 943 944 return err; 945 } 946 947 int perf_event__synthesize_thread_map2(struct perf_tool *tool, 948 struct thread_map *threads, 949 perf_event__handler_t process, 950 struct machine *machine) 951 { 952 union perf_event *event; 953 int i, err, size; 954 955 size = sizeof(event->thread_map); 956 size += threads->nr * sizeof(event->thread_map.entries[0]); 957 958 event = zalloc(size); 959 if (!event) 960 return -ENOMEM; 961 962 event->header.type = PERF_RECORD_THREAD_MAP; 963 event->header.size = size; 964 event->thread_map.nr = threads->nr; 965 966 for (i = 0; i < threads->nr; i++) { 967 struct thread_map_event_entry *entry = &event->thread_map.entries[i]; 968 char *comm = thread_map__comm(threads, i); 969 970 if (!comm) 971 comm = (char *) ""; 972 973 entry->pid = thread_map__pid(threads, i); 974 strncpy((char *) &entry->comm, comm, sizeof(entry->comm)); 975 } 976 977 err = process(tool, event, NULL, machine); 978 979 free(event); 980 return err; 981 } 982 983 static void synthesize_cpus(struct cpu_map_entries *cpus, 984 struct cpu_map *map) 985 { 986 int i; 987 988 cpus->nr = map->nr; 989 990 for (i = 0; i < map->nr; i++) 991 cpus->cpu[i] = map->map[i]; 992 } 993 994 static void synthesize_mask(struct cpu_map_mask *mask, 995 struct cpu_map *map, int max) 996 { 997 int i; 998 999 mask->nr = BITS_TO_LONGS(max); 1000 mask->long_size = sizeof(long); 1001 1002 for (i = 0; i < map->nr; i++) 1003 set_bit(map->map[i], mask->mask); 1004 } 1005 1006 static size_t cpus_size(struct cpu_map *map) 1007 { 1008 return sizeof(struct cpu_map_entries) + map->nr * sizeof(u16); 1009 } 1010 1011 static size_t mask_size(struct cpu_map *map, int *max) 1012 { 1013 int i; 1014 1015 *max = 0; 1016 1017 for (i = 0; i < map->nr; i++) { 1018 /* bit possition of the cpu is + 1 */ 1019 int bit = map->map[i] + 1; 1020 1021 if (bit > *max) 1022 *max = bit; 1023 } 1024 1025 return sizeof(struct cpu_map_mask) + BITS_TO_LONGS(*max) * sizeof(long); 1026 } 1027 1028 void *cpu_map_data__alloc(struct cpu_map *map, size_t *size, u16 *type, int *max) 1029 { 1030 size_t size_cpus, size_mask; 1031 bool is_dummy = cpu_map__empty(map); 1032 1033 /* 1034 * Both array and mask data have variable size based 1035 * on the number of cpus and their actual values. 1036 * The size of the 'struct cpu_map_data' is: 1037 * 1038 * array = size of 'struct cpu_map_entries' + 1039 * number of cpus * sizeof(u64) 1040 * 1041 * mask = size of 'struct cpu_map_mask' + 1042 * maximum cpu bit converted to size of longs 1043 * 1044 * and finaly + the size of 'struct cpu_map_data'. 1045 */ 1046 size_cpus = cpus_size(map); 1047 size_mask = mask_size(map, max); 1048 1049 if (is_dummy || (size_cpus < size_mask)) { 1050 *size += size_cpus; 1051 *type = PERF_CPU_MAP__CPUS; 1052 } else { 1053 *size += size_mask; 1054 *type = PERF_CPU_MAP__MASK; 1055 } 1056 1057 *size += sizeof(struct cpu_map_data); 1058 return zalloc(*size); 1059 } 1060 1061 void cpu_map_data__synthesize(struct cpu_map_data *data, struct cpu_map *map, 1062 u16 type, int max) 1063 { 1064 data->type = type; 1065 1066 switch (type) { 1067 case PERF_CPU_MAP__CPUS: 1068 synthesize_cpus((struct cpu_map_entries *) data->data, map); 1069 break; 1070 case PERF_CPU_MAP__MASK: 1071 synthesize_mask((struct cpu_map_mask *) data->data, map, max); 1072 default: 1073 break; 1074 }; 1075 } 1076 1077 static struct cpu_map_event* cpu_map_event__new(struct cpu_map *map) 1078 { 1079 size_t size = sizeof(struct cpu_map_event); 1080 struct cpu_map_event *event; 1081 int max; 1082 u16 type; 1083 1084 event = cpu_map_data__alloc(map, &size, &type, &max); 1085 if (!event) 1086 return NULL; 1087 1088 event->header.type = PERF_RECORD_CPU_MAP; 1089 event->header.size = size; 1090 event->data.type = type; 1091 1092 cpu_map_data__synthesize(&event->data, map, type, max); 1093 return event; 1094 } 1095 1096 int perf_event__synthesize_cpu_map(struct perf_tool *tool, 1097 struct cpu_map *map, 1098 perf_event__handler_t process, 1099 struct machine *machine) 1100 { 1101 struct cpu_map_event *event; 1102 int err; 1103 1104 event = cpu_map_event__new(map); 1105 if (!event) 1106 return -ENOMEM; 1107 1108 err = process(tool, (union perf_event *) event, NULL, machine); 1109 1110 free(event); 1111 return err; 1112 } 1113 1114 int perf_event__synthesize_stat_config(struct perf_tool *tool, 1115 struct perf_stat_config *config, 1116 perf_event__handler_t process, 1117 struct machine *machine) 1118 { 1119 struct stat_config_event *event; 1120 int size, i = 0, err; 1121 1122 size = sizeof(*event); 1123 size += (PERF_STAT_CONFIG_TERM__MAX * sizeof(event->data[0])); 1124 1125 event = zalloc(size); 1126 if (!event) 1127 return -ENOMEM; 1128 1129 event->header.type = PERF_RECORD_STAT_CONFIG; 1130 event->header.size = size; 1131 event->nr = PERF_STAT_CONFIG_TERM__MAX; 1132 1133 #define ADD(__term, __val) \ 1134 event->data[i].tag = PERF_STAT_CONFIG_TERM__##__term; \ 1135 event->data[i].val = __val; \ 1136 i++; 1137 1138 ADD(AGGR_MODE, config->aggr_mode) 1139 ADD(INTERVAL, config->interval) 1140 ADD(SCALE, config->scale) 1141 1142 WARN_ONCE(i != PERF_STAT_CONFIG_TERM__MAX, 1143 "stat config terms unbalanced\n"); 1144 #undef ADD 1145 1146 err = process(tool, (union perf_event *) event, NULL, machine); 1147 1148 free(event); 1149 return err; 1150 } 1151 1152 int perf_event__synthesize_stat(struct perf_tool *tool, 1153 u32 cpu, u32 thread, u64 id, 1154 struct perf_counts_values *count, 1155 perf_event__handler_t process, 1156 struct machine *machine) 1157 { 1158 struct stat_event event; 1159 1160 event.header.type = PERF_RECORD_STAT; 1161 event.header.size = sizeof(event); 1162 event.header.misc = 0; 1163 1164 event.id = id; 1165 event.cpu = cpu; 1166 event.thread = thread; 1167 event.val = count->val; 1168 event.ena = count->ena; 1169 event.run = count->run; 1170 1171 return process(tool, (union perf_event *) &event, NULL, machine); 1172 } 1173 1174 int perf_event__synthesize_stat_round(struct perf_tool *tool, 1175 u64 evtime, u64 type, 1176 perf_event__handler_t process, 1177 struct machine *machine) 1178 { 1179 struct stat_round_event event; 1180 1181 event.header.type = PERF_RECORD_STAT_ROUND; 1182 event.header.size = sizeof(event); 1183 event.header.misc = 0; 1184 1185 event.time = evtime; 1186 event.type = type; 1187 1188 return process(tool, (union perf_event *) &event, NULL, machine); 1189 } 1190 1191 void perf_event__read_stat_config(struct perf_stat_config *config, 1192 struct stat_config_event *event) 1193 { 1194 unsigned i; 1195 1196 for (i = 0; i < event->nr; i++) { 1197 1198 switch (event->data[i].tag) { 1199 #define CASE(__term, __val) \ 1200 case PERF_STAT_CONFIG_TERM__##__term: \ 1201 config->__val = event->data[i].val; \ 1202 break; 1203 1204 CASE(AGGR_MODE, aggr_mode) 1205 CASE(SCALE, scale) 1206 CASE(INTERVAL, interval) 1207 #undef CASE 1208 default: 1209 pr_warning("unknown stat config term %" PRIu64 "\n", 1210 event->data[i].tag); 1211 } 1212 } 1213 } 1214 1215 size_t perf_event__fprintf_comm(union perf_event *event, FILE *fp) 1216 { 1217 const char *s; 1218 1219 if (event->header.misc & PERF_RECORD_MISC_COMM_EXEC) 1220 s = " exec"; 1221 else 1222 s = ""; 1223 1224 return fprintf(fp, "%s: %s:%d/%d\n", s, event->comm.comm, event->comm.pid, event->comm.tid); 1225 } 1226 1227 size_t perf_event__fprintf_namespaces(union perf_event *event, FILE *fp) 1228 { 1229 size_t ret = 0; 1230 struct perf_ns_link_info *ns_link_info; 1231 u32 nr_namespaces, idx; 1232 1233 ns_link_info = event->namespaces.link_info; 1234 nr_namespaces = event->namespaces.nr_namespaces; 1235 1236 ret += fprintf(fp, " %d/%d - nr_namespaces: %u\n\t\t[", 1237 event->namespaces.pid, 1238 event->namespaces.tid, 1239 nr_namespaces); 1240 1241 for (idx = 0; idx < nr_namespaces; idx++) { 1242 if (idx && (idx % 4 == 0)) 1243 ret += fprintf(fp, "\n\t\t "); 1244 1245 ret += fprintf(fp, "%u/%s: %" PRIu64 "/%#" PRIx64 "%s", idx, 1246 perf_ns__name(idx), (u64)ns_link_info[idx].dev, 1247 (u64)ns_link_info[idx].ino, 1248 ((idx + 1) != nr_namespaces) ? ", " : "]\n"); 1249 } 1250 1251 return ret; 1252 } 1253 1254 int perf_event__process_comm(struct perf_tool *tool __maybe_unused, 1255 union perf_event *event, 1256 struct perf_sample *sample, 1257 struct machine *machine) 1258 { 1259 return machine__process_comm_event(machine, event, sample); 1260 } 1261 1262 int perf_event__process_namespaces(struct perf_tool *tool __maybe_unused, 1263 union perf_event *event, 1264 struct perf_sample *sample, 1265 struct machine *machine) 1266 { 1267 return machine__process_namespaces_event(machine, event, sample); 1268 } 1269 1270 int perf_event__process_lost(struct perf_tool *tool __maybe_unused, 1271 union perf_event *event, 1272 struct perf_sample *sample, 1273 struct machine *machine) 1274 { 1275 return machine__process_lost_event(machine, event, sample); 1276 } 1277 1278 int perf_event__process_aux(struct perf_tool *tool __maybe_unused, 1279 union perf_event *event, 1280 struct perf_sample *sample __maybe_unused, 1281 struct machine *machine) 1282 { 1283 return machine__process_aux_event(machine, event); 1284 } 1285 1286 int perf_event__process_itrace_start(struct perf_tool *tool __maybe_unused, 1287 union perf_event *event, 1288 struct perf_sample *sample __maybe_unused, 1289 struct machine *machine) 1290 { 1291 return machine__process_itrace_start_event(machine, event); 1292 } 1293 1294 int perf_event__process_lost_samples(struct perf_tool *tool __maybe_unused, 1295 union perf_event *event, 1296 struct perf_sample *sample, 1297 struct machine *machine) 1298 { 1299 return machine__process_lost_samples_event(machine, event, sample); 1300 } 1301 1302 int perf_event__process_switch(struct perf_tool *tool __maybe_unused, 1303 union perf_event *event, 1304 struct perf_sample *sample __maybe_unused, 1305 struct machine *machine) 1306 { 1307 return machine__process_switch_event(machine, event); 1308 } 1309 1310 size_t perf_event__fprintf_mmap(union perf_event *event, FILE *fp) 1311 { 1312 return fprintf(fp, " %d/%d: [%#" PRIx64 "(%#" PRIx64 ") @ %#" PRIx64 "]: %c %s\n", 1313 event->mmap.pid, event->mmap.tid, event->mmap.start, 1314 event->mmap.len, event->mmap.pgoff, 1315 (event->header.misc & PERF_RECORD_MISC_MMAP_DATA) ? 'r' : 'x', 1316 event->mmap.filename); 1317 } 1318 1319 size_t perf_event__fprintf_mmap2(union perf_event *event, FILE *fp) 1320 { 1321 return fprintf(fp, " %d/%d: [%#" PRIx64 "(%#" PRIx64 ") @ %#" PRIx64 1322 " %02x:%02x %"PRIu64" %"PRIu64"]: %c%c%c%c %s\n", 1323 event->mmap2.pid, event->mmap2.tid, event->mmap2.start, 1324 event->mmap2.len, event->mmap2.pgoff, event->mmap2.maj, 1325 event->mmap2.min, event->mmap2.ino, 1326 event->mmap2.ino_generation, 1327 (event->mmap2.prot & PROT_READ) ? 'r' : '-', 1328 (event->mmap2.prot & PROT_WRITE) ? 'w' : '-', 1329 (event->mmap2.prot & PROT_EXEC) ? 'x' : '-', 1330 (event->mmap2.flags & MAP_SHARED) ? 's' : 'p', 1331 event->mmap2.filename); 1332 } 1333 1334 size_t perf_event__fprintf_thread_map(union perf_event *event, FILE *fp) 1335 { 1336 struct thread_map *threads = thread_map__new_event(&event->thread_map); 1337 size_t ret; 1338 1339 ret = fprintf(fp, " nr: "); 1340 1341 if (threads) 1342 ret += thread_map__fprintf(threads, fp); 1343 else 1344 ret += fprintf(fp, "failed to get threads from event\n"); 1345 1346 thread_map__put(threads); 1347 return ret; 1348 } 1349 1350 size_t perf_event__fprintf_cpu_map(union perf_event *event, FILE *fp) 1351 { 1352 struct cpu_map *cpus = cpu_map__new_data(&event->cpu_map.data); 1353 size_t ret; 1354 1355 ret = fprintf(fp, ": "); 1356 1357 if (cpus) 1358 ret += cpu_map__fprintf(cpus, fp); 1359 else 1360 ret += fprintf(fp, "failed to get cpumap from event\n"); 1361 1362 cpu_map__put(cpus); 1363 return ret; 1364 } 1365 1366 int perf_event__process_mmap(struct perf_tool *tool __maybe_unused, 1367 union perf_event *event, 1368 struct perf_sample *sample, 1369 struct machine *machine) 1370 { 1371 return machine__process_mmap_event(machine, event, sample); 1372 } 1373 1374 int perf_event__process_mmap2(struct perf_tool *tool __maybe_unused, 1375 union perf_event *event, 1376 struct perf_sample *sample, 1377 struct machine *machine) 1378 { 1379 return machine__process_mmap2_event(machine, event, sample); 1380 } 1381 1382 size_t perf_event__fprintf_task(union perf_event *event, FILE *fp) 1383 { 1384 return fprintf(fp, "(%d:%d):(%d:%d)\n", 1385 event->fork.pid, event->fork.tid, 1386 event->fork.ppid, event->fork.ptid); 1387 } 1388 1389 int perf_event__process_fork(struct perf_tool *tool __maybe_unused, 1390 union perf_event *event, 1391 struct perf_sample *sample, 1392 struct machine *machine) 1393 { 1394 return machine__process_fork_event(machine, event, sample); 1395 } 1396 1397 int perf_event__process_exit(struct perf_tool *tool __maybe_unused, 1398 union perf_event *event, 1399 struct perf_sample *sample, 1400 struct machine *machine) 1401 { 1402 return machine__process_exit_event(machine, event, sample); 1403 } 1404 1405 size_t perf_event__fprintf_aux(union perf_event *event, FILE *fp) 1406 { 1407 return fprintf(fp, " offset: %#"PRIx64" size: %#"PRIx64" flags: %#"PRIx64" [%s%s%s]\n", 1408 event->aux.aux_offset, event->aux.aux_size, 1409 event->aux.flags, 1410 event->aux.flags & PERF_AUX_FLAG_TRUNCATED ? "T" : "", 1411 event->aux.flags & PERF_AUX_FLAG_OVERWRITE ? "O" : "", 1412 event->aux.flags & PERF_AUX_FLAG_PARTIAL ? "P" : ""); 1413 } 1414 1415 size_t perf_event__fprintf_itrace_start(union perf_event *event, FILE *fp) 1416 { 1417 return fprintf(fp, " pid: %u tid: %u\n", 1418 event->itrace_start.pid, event->itrace_start.tid); 1419 } 1420 1421 size_t perf_event__fprintf_switch(union perf_event *event, FILE *fp) 1422 { 1423 bool out = event->header.misc & PERF_RECORD_MISC_SWITCH_OUT; 1424 const char *in_out = !out ? "IN " : 1425 !(event->header.misc & PERF_RECORD_MISC_SWITCH_OUT_PREEMPT) ? 1426 "OUT " : "OUT preempt"; 1427 1428 if (event->header.type == PERF_RECORD_SWITCH) 1429 return fprintf(fp, " %s\n", in_out); 1430 1431 return fprintf(fp, " %s %s pid/tid: %5u/%-5u\n", 1432 in_out, out ? "next" : "prev", 1433 event->context_switch.next_prev_pid, 1434 event->context_switch.next_prev_tid); 1435 } 1436 1437 static size_t perf_event__fprintf_lost(union perf_event *event, FILE *fp) 1438 { 1439 return fprintf(fp, " lost %" PRIu64 "\n", event->lost.lost); 1440 } 1441 1442 size_t perf_event__fprintf(union perf_event *event, FILE *fp) 1443 { 1444 size_t ret = fprintf(fp, "PERF_RECORD_%s", 1445 perf_event__name(event->header.type)); 1446 1447 switch (event->header.type) { 1448 case PERF_RECORD_COMM: 1449 ret += perf_event__fprintf_comm(event, fp); 1450 break; 1451 case PERF_RECORD_FORK: 1452 case PERF_RECORD_EXIT: 1453 ret += perf_event__fprintf_task(event, fp); 1454 break; 1455 case PERF_RECORD_MMAP: 1456 ret += perf_event__fprintf_mmap(event, fp); 1457 break; 1458 case PERF_RECORD_NAMESPACES: 1459 ret += perf_event__fprintf_namespaces(event, fp); 1460 break; 1461 case PERF_RECORD_MMAP2: 1462 ret += perf_event__fprintf_mmap2(event, fp); 1463 break; 1464 case PERF_RECORD_AUX: 1465 ret += perf_event__fprintf_aux(event, fp); 1466 break; 1467 case PERF_RECORD_ITRACE_START: 1468 ret += perf_event__fprintf_itrace_start(event, fp); 1469 break; 1470 case PERF_RECORD_SWITCH: 1471 case PERF_RECORD_SWITCH_CPU_WIDE: 1472 ret += perf_event__fprintf_switch(event, fp); 1473 break; 1474 case PERF_RECORD_LOST: 1475 ret += perf_event__fprintf_lost(event, fp); 1476 break; 1477 default: 1478 ret += fprintf(fp, "\n"); 1479 } 1480 1481 return ret; 1482 } 1483 1484 int perf_event__process(struct perf_tool *tool __maybe_unused, 1485 union perf_event *event, 1486 struct perf_sample *sample, 1487 struct machine *machine) 1488 { 1489 return machine__process_event(machine, event, sample); 1490 } 1491 1492 void thread__find_addr_map(struct thread *thread, u8 cpumode, 1493 enum map_type type, u64 addr, 1494 struct addr_location *al) 1495 { 1496 struct map_groups *mg = thread->mg; 1497 struct machine *machine = mg->machine; 1498 bool load_map = false; 1499 1500 al->machine = machine; 1501 al->thread = thread; 1502 al->addr = addr; 1503 al->cpumode = cpumode; 1504 al->filtered = 0; 1505 1506 if (machine == NULL) { 1507 al->map = NULL; 1508 return; 1509 } 1510 1511 if (cpumode == PERF_RECORD_MISC_KERNEL && perf_host) { 1512 al->level = 'k'; 1513 mg = &machine->kmaps; 1514 load_map = true; 1515 } else if (cpumode == PERF_RECORD_MISC_USER && perf_host) { 1516 al->level = '.'; 1517 } else if (cpumode == PERF_RECORD_MISC_GUEST_KERNEL && perf_guest) { 1518 al->level = 'g'; 1519 mg = &machine->kmaps; 1520 load_map = true; 1521 } else if (cpumode == PERF_RECORD_MISC_GUEST_USER && perf_guest) { 1522 al->level = 'u'; 1523 } else { 1524 al->level = 'H'; 1525 al->map = NULL; 1526 1527 if ((cpumode == PERF_RECORD_MISC_GUEST_USER || 1528 cpumode == PERF_RECORD_MISC_GUEST_KERNEL) && 1529 !perf_guest) 1530 al->filtered |= (1 << HIST_FILTER__GUEST); 1531 if ((cpumode == PERF_RECORD_MISC_USER || 1532 cpumode == PERF_RECORD_MISC_KERNEL) && 1533 !perf_host) 1534 al->filtered |= (1 << HIST_FILTER__HOST); 1535 1536 return; 1537 } 1538 try_again: 1539 al->map = map_groups__find(mg, type, al->addr); 1540 if (al->map == NULL) { 1541 /* 1542 * If this is outside of all known maps, and is a negative 1543 * address, try to look it up in the kernel dso, as it might be 1544 * a vsyscall or vdso (which executes in user-mode). 1545 * 1546 * XXX This is nasty, we should have a symbol list in the 1547 * "[vdso]" dso, but for now lets use the old trick of looking 1548 * in the whole kernel symbol list. 1549 */ 1550 if (cpumode == PERF_RECORD_MISC_USER && machine && 1551 mg != &machine->kmaps && 1552 machine__kernel_ip(machine, al->addr)) { 1553 mg = &machine->kmaps; 1554 load_map = true; 1555 goto try_again; 1556 } 1557 } else { 1558 /* 1559 * Kernel maps might be changed when loading symbols so loading 1560 * must be done prior to using kernel maps. 1561 */ 1562 if (load_map) 1563 map__load(al->map); 1564 al->addr = al->map->map_ip(al->map, al->addr); 1565 } 1566 } 1567 1568 void thread__find_addr_location(struct thread *thread, 1569 u8 cpumode, enum map_type type, u64 addr, 1570 struct addr_location *al) 1571 { 1572 thread__find_addr_map(thread, cpumode, type, addr, al); 1573 if (al->map != NULL) 1574 al->sym = map__find_symbol(al->map, al->addr); 1575 else 1576 al->sym = NULL; 1577 } 1578 1579 /* 1580 * Callers need to drop the reference to al->thread, obtained in 1581 * machine__findnew_thread() 1582 */ 1583 int machine__resolve(struct machine *machine, struct addr_location *al, 1584 struct perf_sample *sample) 1585 { 1586 struct thread *thread = machine__findnew_thread(machine, sample->pid, 1587 sample->tid); 1588 1589 if (thread == NULL) 1590 return -1; 1591 1592 dump_printf(" ... thread: %s:%d\n", thread__comm_str(thread), thread->tid); 1593 thread__find_addr_map(thread, sample->cpumode, MAP__FUNCTION, sample->ip, al); 1594 dump_printf(" ...... dso: %s\n", 1595 al->map ? al->map->dso->long_name : 1596 al->level == 'H' ? "[hypervisor]" : "<not found>"); 1597 1598 if (thread__is_filtered(thread)) 1599 al->filtered |= (1 << HIST_FILTER__THREAD); 1600 1601 al->sym = NULL; 1602 al->cpu = sample->cpu; 1603 al->socket = -1; 1604 al->srcline = NULL; 1605 1606 if (al->cpu >= 0) { 1607 struct perf_env *env = machine->env; 1608 1609 if (env && env->cpu) 1610 al->socket = env->cpu[al->cpu].socket_id; 1611 } 1612 1613 if (al->map) { 1614 struct dso *dso = al->map->dso; 1615 1616 if (symbol_conf.dso_list && 1617 (!dso || !(strlist__has_entry(symbol_conf.dso_list, 1618 dso->short_name) || 1619 (dso->short_name != dso->long_name && 1620 strlist__has_entry(symbol_conf.dso_list, 1621 dso->long_name))))) { 1622 al->filtered |= (1 << HIST_FILTER__DSO); 1623 } 1624 1625 al->sym = map__find_symbol(al->map, al->addr); 1626 } 1627 1628 if (symbol_conf.sym_list && 1629 (!al->sym || !strlist__has_entry(symbol_conf.sym_list, 1630 al->sym->name))) { 1631 al->filtered |= (1 << HIST_FILTER__SYMBOL); 1632 } 1633 1634 return 0; 1635 } 1636 1637 /* 1638 * The preprocess_sample method will return with reference counts for the 1639 * in it, when done using (and perhaps getting ref counts if needing to 1640 * keep a pointer to one of those entries) it must be paired with 1641 * addr_location__put(), so that the refcounts can be decremented. 1642 */ 1643 void addr_location__put(struct addr_location *al) 1644 { 1645 thread__zput(al->thread); 1646 } 1647 1648 bool is_bts_event(struct perf_event_attr *attr) 1649 { 1650 return attr->type == PERF_TYPE_HARDWARE && 1651 (attr->config & PERF_COUNT_HW_BRANCH_INSTRUCTIONS) && 1652 attr->sample_period == 1; 1653 } 1654 1655 bool sample_addr_correlates_sym(struct perf_event_attr *attr) 1656 { 1657 if (attr->type == PERF_TYPE_SOFTWARE && 1658 (attr->config == PERF_COUNT_SW_PAGE_FAULTS || 1659 attr->config == PERF_COUNT_SW_PAGE_FAULTS_MIN || 1660 attr->config == PERF_COUNT_SW_PAGE_FAULTS_MAJ)) 1661 return true; 1662 1663 if (is_bts_event(attr)) 1664 return true; 1665 1666 return false; 1667 } 1668 1669 void thread__resolve(struct thread *thread, struct addr_location *al, 1670 struct perf_sample *sample) 1671 { 1672 thread__find_addr_map(thread, sample->cpumode, MAP__FUNCTION, sample->addr, al); 1673 if (!al->map) 1674 thread__find_addr_map(thread, sample->cpumode, MAP__VARIABLE, 1675 sample->addr, al); 1676 1677 al->cpu = sample->cpu; 1678 al->sym = NULL; 1679 1680 if (al->map) 1681 al->sym = map__find_symbol(al->map, al->addr); 1682 } 1683