1 // SPDX-License-Identifier: GPL-2.0-only 2 3 #include "util/cgroup.h" 4 #include "util/data.h" 5 #include "util/debug.h" 6 #include "util/dso.h" 7 #include "util/event.h" 8 #include "util/evlist.h" 9 #include "util/machine.h" 10 #include "util/map.h" 11 #include "util/map_symbol.h" 12 #include "util/branch.h" 13 #include "util/memswap.h" 14 #include "util/namespaces.h" 15 #include "util/session.h" 16 #include "util/stat.h" 17 #include "util/symbol.h" 18 #include "util/synthetic-events.h" 19 #include "util/target.h" 20 #include "util/time-utils.h" 21 #include <linux/bitops.h> 22 #include <linux/kernel.h> 23 #include <linux/string.h> 24 #include <linux/zalloc.h> 25 #include <linux/perf_event.h> 26 #include <asm/bug.h> 27 #include <perf/evsel.h> 28 #include <perf/cpumap.h> 29 #include <internal/lib.h> // page_size 30 #include <internal/threadmap.h> 31 #include <perf/threadmap.h> 32 #include <symbol/kallsyms.h> 33 #include <dirent.h> 34 #include <errno.h> 35 #include <inttypes.h> 36 #include <stdio.h> 37 #include <string.h> 38 #include <uapi/linux/mman.h> /* To get things like MAP_HUGETLB even on older libc headers */ 39 #include <api/fs/fs.h> 40 #include <api/io.h> 41 #include <sys/types.h> 42 #include <sys/stat.h> 43 #include <fcntl.h> 44 #include <unistd.h> 45 46 #define DEFAULT_PROC_MAP_PARSE_TIMEOUT 500 47 48 unsigned int proc_map_timeout = DEFAULT_PROC_MAP_PARSE_TIMEOUT; 49 50 int perf_tool__process_synth_event(struct perf_tool *tool, 51 union perf_event *event, 52 struct machine *machine, 53 perf_event__handler_t process) 54 { 55 struct perf_sample synth_sample = { 56 .pid = -1, 57 .tid = -1, 58 .time = -1, 59 .stream_id = -1, 60 .cpu = -1, 61 .period = 1, 62 .cpumode = event->header.misc & PERF_RECORD_MISC_CPUMODE_MASK, 63 }; 64 65 return process(tool, event, &synth_sample, machine); 66 }; 67 68 /* 69 * Assumes that the first 4095 bytes of /proc/pid/stat contains 70 * the comm, tgid and ppid. 71 */ 72 static int perf_event__get_comm_ids(pid_t pid, pid_t tid, char *comm, size_t len, 73 pid_t *tgid, pid_t *ppid, bool *kernel) 74 { 75 char bf[4096]; 76 int fd; 77 size_t size = 0; 78 ssize_t n; 79 char *name, *tgids, *ppids, *vmpeak, *threads; 80 81 *tgid = -1; 82 *ppid = -1; 83 84 if (pid) 85 snprintf(bf, sizeof(bf), "/proc/%d/task/%d/status", pid, tid); 86 else 87 snprintf(bf, sizeof(bf), "/proc/%d/status", tid); 88 89 fd = open(bf, O_RDONLY); 90 if (fd < 0) { 91 pr_debug("couldn't open %s\n", bf); 92 return -1; 93 } 94 95 n = read(fd, bf, sizeof(bf) - 1); 96 close(fd); 97 if (n <= 0) { 98 pr_warning("Couldn't get COMM, tigd and ppid for pid %d\n", 99 tid); 100 return -1; 101 } 102 bf[n] = '\0'; 103 104 name = strstr(bf, "Name:"); 105 tgids = strstr(name ?: bf, "Tgid:"); 106 ppids = strstr(tgids ?: bf, "PPid:"); 107 vmpeak = strstr(ppids ?: bf, "VmPeak:"); 108 109 if (vmpeak) 110 threads = NULL; 111 else 112 threads = strstr(ppids ?: bf, "Threads:"); 113 114 if (name) { 115 char *nl; 116 117 name = skip_spaces(name + 5); /* strlen("Name:") */ 118 nl = strchr(name, '\n'); 119 if (nl) 120 *nl = '\0'; 121 122 size = strlen(name); 123 if (size >= len) 124 size = len - 1; 125 memcpy(comm, name, size); 126 comm[size] = '\0'; 127 } else { 128 pr_debug("Name: string not found for pid %d\n", tid); 129 } 130 131 if (tgids) { 132 tgids += 5; /* strlen("Tgid:") */ 133 *tgid = atoi(tgids); 134 } else { 135 pr_debug("Tgid: string not found for pid %d\n", tid); 136 } 137 138 if (ppids) { 139 ppids += 5; /* strlen("PPid:") */ 140 *ppid = atoi(ppids); 141 } else { 142 pr_debug("PPid: string not found for pid %d\n", tid); 143 } 144 145 if (!vmpeak && threads) 146 *kernel = true; 147 else 148 *kernel = false; 149 150 return 0; 151 } 152 153 static int perf_event__prepare_comm(union perf_event *event, pid_t pid, pid_t tid, 154 struct machine *machine, 155 pid_t *tgid, pid_t *ppid, bool *kernel) 156 { 157 size_t size; 158 159 *ppid = -1; 160 161 memset(&event->comm, 0, sizeof(event->comm)); 162 163 if (machine__is_host(machine)) { 164 if (perf_event__get_comm_ids(pid, tid, event->comm.comm, 165 sizeof(event->comm.comm), 166 tgid, ppid, kernel) != 0) { 167 return -1; 168 } 169 } else { 170 *tgid = machine->pid; 171 } 172 173 if (*tgid < 0) 174 return -1; 175 176 event->comm.pid = *tgid; 177 event->comm.header.type = PERF_RECORD_COMM; 178 179 size = strlen(event->comm.comm) + 1; 180 size = PERF_ALIGN(size, sizeof(u64)); 181 memset(event->comm.comm + size, 0, machine->id_hdr_size); 182 event->comm.header.size = (sizeof(event->comm) - 183 (sizeof(event->comm.comm) - size) + 184 machine->id_hdr_size); 185 event->comm.tid = tid; 186 187 return 0; 188 } 189 190 pid_t perf_event__synthesize_comm(struct perf_tool *tool, 191 union perf_event *event, pid_t pid, 192 perf_event__handler_t process, 193 struct machine *machine) 194 { 195 pid_t tgid, ppid; 196 bool kernel_thread; 197 198 if (perf_event__prepare_comm(event, 0, pid, machine, &tgid, &ppid, 199 &kernel_thread) != 0) 200 return -1; 201 202 if (perf_tool__process_synth_event(tool, event, machine, process) != 0) 203 return -1; 204 205 return tgid; 206 } 207 208 static void perf_event__get_ns_link_info(pid_t pid, const char *ns, 209 struct perf_ns_link_info *ns_link_info) 210 { 211 struct stat64 st; 212 char proc_ns[128]; 213 214 sprintf(proc_ns, "/proc/%u/ns/%s", pid, ns); 215 if (stat64(proc_ns, &st) == 0) { 216 ns_link_info->dev = st.st_dev; 217 ns_link_info->ino = st.st_ino; 218 } 219 } 220 221 int perf_event__synthesize_namespaces(struct perf_tool *tool, 222 union perf_event *event, 223 pid_t pid, pid_t tgid, 224 perf_event__handler_t process, 225 struct machine *machine) 226 { 227 u32 idx; 228 struct perf_ns_link_info *ns_link_info; 229 230 if (!tool || !tool->namespace_events) 231 return 0; 232 233 memset(&event->namespaces, 0, (sizeof(event->namespaces) + 234 (NR_NAMESPACES * sizeof(struct perf_ns_link_info)) + 235 machine->id_hdr_size)); 236 237 event->namespaces.pid = tgid; 238 event->namespaces.tid = pid; 239 240 event->namespaces.nr_namespaces = NR_NAMESPACES; 241 242 ns_link_info = event->namespaces.link_info; 243 244 for (idx = 0; idx < event->namespaces.nr_namespaces; idx++) 245 perf_event__get_ns_link_info(pid, perf_ns__name(idx), 246 &ns_link_info[idx]); 247 248 event->namespaces.header.type = PERF_RECORD_NAMESPACES; 249 250 event->namespaces.header.size = (sizeof(event->namespaces) + 251 (NR_NAMESPACES * sizeof(struct perf_ns_link_info)) + 252 machine->id_hdr_size); 253 254 if (perf_tool__process_synth_event(tool, event, machine, process) != 0) 255 return -1; 256 257 return 0; 258 } 259 260 static int perf_event__synthesize_fork(struct perf_tool *tool, 261 union perf_event *event, 262 pid_t pid, pid_t tgid, pid_t ppid, 263 perf_event__handler_t process, 264 struct machine *machine) 265 { 266 memset(&event->fork, 0, sizeof(event->fork) + machine->id_hdr_size); 267 268 /* 269 * for main thread set parent to ppid from status file. For other 270 * threads set parent pid to main thread. ie., assume main thread 271 * spawns all threads in a process 272 */ 273 if (tgid == pid) { 274 event->fork.ppid = ppid; 275 event->fork.ptid = ppid; 276 } else { 277 event->fork.ppid = tgid; 278 event->fork.ptid = tgid; 279 } 280 event->fork.pid = tgid; 281 event->fork.tid = pid; 282 event->fork.header.type = PERF_RECORD_FORK; 283 event->fork.header.misc = PERF_RECORD_MISC_FORK_EXEC; 284 285 event->fork.header.size = (sizeof(event->fork) + machine->id_hdr_size); 286 287 if (perf_tool__process_synth_event(tool, event, machine, process) != 0) 288 return -1; 289 290 return 0; 291 } 292 293 static bool read_proc_maps_line(struct io *io, __u64 *start, __u64 *end, 294 u32 *prot, u32 *flags, __u64 *offset, 295 u32 *maj, u32 *min, 296 __u64 *inode, 297 ssize_t pathname_size, char *pathname) 298 { 299 __u64 temp; 300 int ch; 301 char *start_pathname = pathname; 302 303 if (io__get_hex(io, start) != '-') 304 return false; 305 if (io__get_hex(io, end) != ' ') 306 return false; 307 308 /* map protection and flags bits */ 309 *prot = 0; 310 ch = io__get_char(io); 311 if (ch == 'r') 312 *prot |= PROT_READ; 313 else if (ch != '-') 314 return false; 315 ch = io__get_char(io); 316 if (ch == 'w') 317 *prot |= PROT_WRITE; 318 else if (ch != '-') 319 return false; 320 ch = io__get_char(io); 321 if (ch == 'x') 322 *prot |= PROT_EXEC; 323 else if (ch != '-') 324 return false; 325 ch = io__get_char(io); 326 if (ch == 's') 327 *flags = MAP_SHARED; 328 else if (ch == 'p') 329 *flags = MAP_PRIVATE; 330 else 331 return false; 332 if (io__get_char(io) != ' ') 333 return false; 334 335 if (io__get_hex(io, offset) != ' ') 336 return false; 337 338 if (io__get_hex(io, &temp) != ':') 339 return false; 340 *maj = temp; 341 if (io__get_hex(io, &temp) != ' ') 342 return false; 343 *min = temp; 344 345 ch = io__get_dec(io, inode); 346 if (ch != ' ') { 347 *pathname = '\0'; 348 return ch == '\n'; 349 } 350 do { 351 ch = io__get_char(io); 352 } while (ch == ' '); 353 while (true) { 354 if (ch < 0) 355 return false; 356 if (ch == '\0' || ch == '\n' || 357 (pathname + 1 - start_pathname) >= pathname_size) { 358 *pathname = '\0'; 359 return true; 360 } 361 *pathname++ = ch; 362 ch = io__get_char(io); 363 } 364 } 365 366 static void perf_record_mmap2__read_build_id(struct perf_record_mmap2 *event, 367 struct machine *machine, 368 bool is_kernel) 369 { 370 struct build_id bid; 371 struct nsinfo *nsi; 372 struct nscookie nc; 373 struct dso *dso = NULL; 374 struct dso_id id; 375 int rc; 376 377 if (is_kernel) { 378 rc = sysfs__read_build_id("/sys/kernel/notes", &bid); 379 goto out; 380 } 381 382 id.maj = event->maj; 383 id.min = event->min; 384 id.ino = event->ino; 385 id.ino_generation = event->ino_generation; 386 387 dso = dsos__findnew_id(&machine->dsos, event->filename, &id); 388 if (dso && dso->has_build_id) { 389 bid = dso->bid; 390 rc = 0; 391 goto out; 392 } 393 394 nsi = nsinfo__new(event->pid); 395 nsinfo__mountns_enter(nsi, &nc); 396 397 rc = filename__read_build_id(event->filename, &bid) > 0 ? 0 : -1; 398 399 nsinfo__mountns_exit(&nc); 400 nsinfo__put(nsi); 401 402 out: 403 if (rc == 0) { 404 memcpy(event->build_id, bid.data, sizeof(bid.data)); 405 event->build_id_size = (u8) bid.size; 406 event->header.misc |= PERF_RECORD_MISC_MMAP_BUILD_ID; 407 event->__reserved_1 = 0; 408 event->__reserved_2 = 0; 409 410 if (dso && !dso->has_build_id) 411 dso__set_build_id(dso, &bid); 412 } else { 413 if (event->filename[0] == '/') { 414 pr_debug2("Failed to read build ID for %s\n", 415 event->filename); 416 } 417 } 418 dso__put(dso); 419 } 420 421 int perf_event__synthesize_mmap_events(struct perf_tool *tool, 422 union perf_event *event, 423 pid_t pid, pid_t tgid, 424 perf_event__handler_t process, 425 struct machine *machine, 426 bool mmap_data) 427 { 428 unsigned long long t; 429 char bf[BUFSIZ]; 430 struct io io; 431 bool truncation = false; 432 unsigned long long timeout = proc_map_timeout * 1000000ULL; 433 int rc = 0; 434 const char *hugetlbfs_mnt = hugetlbfs__mountpoint(); 435 int hugetlbfs_mnt_len = hugetlbfs_mnt ? strlen(hugetlbfs_mnt) : 0; 436 437 if (machine__is_default_guest(machine)) 438 return 0; 439 440 snprintf(bf, sizeof(bf), "%s/proc/%d/task/%d/maps", 441 machine->root_dir, pid, pid); 442 443 io.fd = open(bf, O_RDONLY, 0); 444 if (io.fd < 0) { 445 /* 446 * We raced with a task exiting - just return: 447 */ 448 pr_debug("couldn't open %s\n", bf); 449 return -1; 450 } 451 io__init(&io, io.fd, bf, sizeof(bf)); 452 453 event->header.type = PERF_RECORD_MMAP2; 454 t = rdclock(); 455 456 while (!io.eof) { 457 static const char anonstr[] = "//anon"; 458 size_t size, aligned_size; 459 460 /* ensure null termination since stack will be reused. */ 461 event->mmap2.filename[0] = '\0'; 462 463 /* 00400000-0040c000 r-xp 00000000 fd:01 41038 /bin/cat */ 464 if (!read_proc_maps_line(&io, 465 &event->mmap2.start, 466 &event->mmap2.len, 467 &event->mmap2.prot, 468 &event->mmap2.flags, 469 &event->mmap2.pgoff, 470 &event->mmap2.maj, 471 &event->mmap2.min, 472 &event->mmap2.ino, 473 sizeof(event->mmap2.filename), 474 event->mmap2.filename)) 475 continue; 476 477 if ((rdclock() - t) > timeout) { 478 pr_warning("Reading %s/proc/%d/task/%d/maps time out. " 479 "You may want to increase " 480 "the time limit by --proc-map-timeout\n", 481 machine->root_dir, pid, pid); 482 truncation = true; 483 goto out; 484 } 485 486 event->mmap2.ino_generation = 0; 487 488 /* 489 * Just like the kernel, see __perf_event_mmap in kernel/perf_event.c 490 */ 491 if (machine__is_host(machine)) 492 event->header.misc = PERF_RECORD_MISC_USER; 493 else 494 event->header.misc = PERF_RECORD_MISC_GUEST_USER; 495 496 if ((event->mmap2.prot & PROT_EXEC) == 0) { 497 if (!mmap_data || (event->mmap2.prot & PROT_READ) == 0) 498 continue; 499 500 event->header.misc |= PERF_RECORD_MISC_MMAP_DATA; 501 } 502 503 out: 504 if (truncation) 505 event->header.misc |= PERF_RECORD_MISC_PROC_MAP_PARSE_TIMEOUT; 506 507 if (!strcmp(event->mmap2.filename, "")) 508 strcpy(event->mmap2.filename, anonstr); 509 510 if (hugetlbfs_mnt_len && 511 !strncmp(event->mmap2.filename, hugetlbfs_mnt, 512 hugetlbfs_mnt_len)) { 513 strcpy(event->mmap2.filename, anonstr); 514 event->mmap2.flags |= MAP_HUGETLB; 515 } 516 517 size = strlen(event->mmap2.filename) + 1; 518 aligned_size = PERF_ALIGN(size, sizeof(u64)); 519 event->mmap2.len -= event->mmap.start; 520 event->mmap2.header.size = (sizeof(event->mmap2) - 521 (sizeof(event->mmap2.filename) - aligned_size)); 522 memset(event->mmap2.filename + size, 0, machine->id_hdr_size + 523 (aligned_size - size)); 524 event->mmap2.header.size += machine->id_hdr_size; 525 event->mmap2.pid = tgid; 526 event->mmap2.tid = pid; 527 528 if (symbol_conf.buildid_mmap2) 529 perf_record_mmap2__read_build_id(&event->mmap2, machine, false); 530 531 if (perf_tool__process_synth_event(tool, event, machine, process) != 0) { 532 rc = -1; 533 break; 534 } 535 536 if (truncation) 537 break; 538 } 539 540 close(io.fd); 541 return rc; 542 } 543 544 #ifdef HAVE_FILE_HANDLE 545 static int perf_event__synthesize_cgroup(struct perf_tool *tool, 546 union perf_event *event, 547 char *path, size_t mount_len, 548 perf_event__handler_t process, 549 struct machine *machine) 550 { 551 size_t event_size = sizeof(event->cgroup) - sizeof(event->cgroup.path); 552 size_t path_len = strlen(path) - mount_len + 1; 553 struct { 554 struct file_handle fh; 555 uint64_t cgroup_id; 556 } handle; 557 int mount_id; 558 559 while (path_len % sizeof(u64)) 560 path[mount_len + path_len++] = '\0'; 561 562 memset(&event->cgroup, 0, event_size); 563 564 event->cgroup.header.type = PERF_RECORD_CGROUP; 565 event->cgroup.header.size = event_size + path_len + machine->id_hdr_size; 566 567 handle.fh.handle_bytes = sizeof(handle.cgroup_id); 568 if (name_to_handle_at(AT_FDCWD, path, &handle.fh, &mount_id, 0) < 0) { 569 pr_debug("stat failed: %s\n", path); 570 return -1; 571 } 572 573 event->cgroup.id = handle.cgroup_id; 574 strncpy(event->cgroup.path, path + mount_len, path_len); 575 memset(event->cgroup.path + path_len, 0, machine->id_hdr_size); 576 577 if (perf_tool__process_synth_event(tool, event, machine, process) < 0) { 578 pr_debug("process synth event failed\n"); 579 return -1; 580 } 581 582 return 0; 583 } 584 585 static int perf_event__walk_cgroup_tree(struct perf_tool *tool, 586 union perf_event *event, 587 char *path, size_t mount_len, 588 perf_event__handler_t process, 589 struct machine *machine) 590 { 591 size_t pos = strlen(path); 592 DIR *d; 593 struct dirent *dent; 594 int ret = 0; 595 596 if (perf_event__synthesize_cgroup(tool, event, path, mount_len, 597 process, machine) < 0) 598 return -1; 599 600 d = opendir(path); 601 if (d == NULL) { 602 pr_debug("failed to open directory: %s\n", path); 603 return -1; 604 } 605 606 while ((dent = readdir(d)) != NULL) { 607 if (dent->d_type != DT_DIR) 608 continue; 609 if (!strcmp(dent->d_name, ".") || 610 !strcmp(dent->d_name, "..")) 611 continue; 612 613 /* any sane path should be less than PATH_MAX */ 614 if (strlen(path) + strlen(dent->d_name) + 1 >= PATH_MAX) 615 continue; 616 617 if (path[pos - 1] != '/') 618 strcat(path, "/"); 619 strcat(path, dent->d_name); 620 621 ret = perf_event__walk_cgroup_tree(tool, event, path, 622 mount_len, process, machine); 623 if (ret < 0) 624 break; 625 626 path[pos] = '\0'; 627 } 628 629 closedir(d); 630 return ret; 631 } 632 633 int perf_event__synthesize_cgroups(struct perf_tool *tool, 634 perf_event__handler_t process, 635 struct machine *machine) 636 { 637 union perf_event event; 638 char cgrp_root[PATH_MAX]; 639 size_t mount_len; /* length of mount point in the path */ 640 641 if (!tool || !tool->cgroup_events) 642 return 0; 643 644 if (cgroupfs_find_mountpoint(cgrp_root, PATH_MAX, "perf_event") < 0) { 645 pr_debug("cannot find cgroup mount point\n"); 646 return -1; 647 } 648 649 mount_len = strlen(cgrp_root); 650 /* make sure the path starts with a slash (after mount point) */ 651 strcat(cgrp_root, "/"); 652 653 if (perf_event__walk_cgroup_tree(tool, &event, cgrp_root, mount_len, 654 process, machine) < 0) 655 return -1; 656 657 return 0; 658 } 659 #else 660 int perf_event__synthesize_cgroups(struct perf_tool *tool __maybe_unused, 661 perf_event__handler_t process __maybe_unused, 662 struct machine *machine __maybe_unused) 663 { 664 return -1; 665 } 666 #endif 667 668 int perf_event__synthesize_modules(struct perf_tool *tool, perf_event__handler_t process, 669 struct machine *machine) 670 { 671 int rc = 0; 672 struct map_rb_node *pos; 673 struct maps *maps = machine__kernel_maps(machine); 674 union perf_event *event; 675 size_t size = symbol_conf.buildid_mmap2 ? 676 sizeof(event->mmap2) : sizeof(event->mmap); 677 678 event = zalloc(size + machine->id_hdr_size); 679 if (event == NULL) { 680 pr_debug("Not enough memory synthesizing mmap event " 681 "for kernel modules\n"); 682 return -1; 683 } 684 685 /* 686 * kernel uses 0 for user space maps, see kernel/perf_event.c 687 * __perf_event_mmap 688 */ 689 if (machine__is_host(machine)) 690 event->header.misc = PERF_RECORD_MISC_KERNEL; 691 else 692 event->header.misc = PERF_RECORD_MISC_GUEST_KERNEL; 693 694 maps__for_each_entry(maps, pos) { 695 struct map *map = pos->map; 696 struct dso *dso; 697 698 if (!__map__is_kmodule(map)) 699 continue; 700 701 dso = map__dso(map); 702 if (symbol_conf.buildid_mmap2) { 703 size = PERF_ALIGN(dso->long_name_len + 1, sizeof(u64)); 704 event->mmap2.header.type = PERF_RECORD_MMAP2; 705 event->mmap2.header.size = (sizeof(event->mmap2) - 706 (sizeof(event->mmap2.filename) - size)); 707 memset(event->mmap2.filename + size, 0, machine->id_hdr_size); 708 event->mmap2.header.size += machine->id_hdr_size; 709 event->mmap2.start = map__start(map); 710 event->mmap2.len = map__size(map); 711 event->mmap2.pid = machine->pid; 712 713 memcpy(event->mmap2.filename, dso->long_name, dso->long_name_len + 1); 714 715 perf_record_mmap2__read_build_id(&event->mmap2, machine, false); 716 } else { 717 size = PERF_ALIGN(dso->long_name_len + 1, sizeof(u64)); 718 event->mmap.header.type = PERF_RECORD_MMAP; 719 event->mmap.header.size = (sizeof(event->mmap) - 720 (sizeof(event->mmap.filename) - size)); 721 memset(event->mmap.filename + size, 0, machine->id_hdr_size); 722 event->mmap.header.size += machine->id_hdr_size; 723 event->mmap.start = map__start(map); 724 event->mmap.len = map__size(map); 725 event->mmap.pid = machine->pid; 726 727 memcpy(event->mmap.filename, dso->long_name, dso->long_name_len + 1); 728 } 729 730 if (perf_tool__process_synth_event(tool, event, machine, process) != 0) { 731 rc = -1; 732 break; 733 } 734 } 735 736 free(event); 737 return rc; 738 } 739 740 static int filter_task(const struct dirent *dirent) 741 { 742 return isdigit(dirent->d_name[0]); 743 } 744 745 static int __event__synthesize_thread(union perf_event *comm_event, 746 union perf_event *mmap_event, 747 union perf_event *fork_event, 748 union perf_event *namespaces_event, 749 pid_t pid, int full, perf_event__handler_t process, 750 struct perf_tool *tool, struct machine *machine, 751 bool needs_mmap, bool mmap_data) 752 { 753 char filename[PATH_MAX]; 754 struct dirent **dirent; 755 pid_t tgid, ppid; 756 int rc = 0; 757 int i, n; 758 759 /* special case: only send one comm event using passed in pid */ 760 if (!full) { 761 tgid = perf_event__synthesize_comm(tool, comm_event, pid, 762 process, machine); 763 764 if (tgid == -1) 765 return -1; 766 767 if (perf_event__synthesize_namespaces(tool, namespaces_event, pid, 768 tgid, process, machine) < 0) 769 return -1; 770 771 /* 772 * send mmap only for thread group leader 773 * see thread__init_maps() 774 */ 775 if (pid == tgid && needs_mmap && 776 perf_event__synthesize_mmap_events(tool, mmap_event, pid, tgid, 777 process, machine, mmap_data)) 778 return -1; 779 780 return 0; 781 } 782 783 if (machine__is_default_guest(machine)) 784 return 0; 785 786 snprintf(filename, sizeof(filename), "%s/proc/%d/task", 787 machine->root_dir, pid); 788 789 n = scandir(filename, &dirent, filter_task, NULL); 790 if (n < 0) 791 return n; 792 793 for (i = 0; i < n; i++) { 794 char *end; 795 pid_t _pid; 796 bool kernel_thread = false; 797 798 _pid = strtol(dirent[i]->d_name, &end, 10); 799 if (*end) 800 continue; 801 802 /* some threads may exit just after scan, ignore it */ 803 if (perf_event__prepare_comm(comm_event, pid, _pid, machine, 804 &tgid, &ppid, &kernel_thread) != 0) 805 continue; 806 807 rc = -1; 808 if (perf_event__synthesize_fork(tool, fork_event, _pid, tgid, 809 ppid, process, machine) < 0) 810 break; 811 812 if (perf_event__synthesize_namespaces(tool, namespaces_event, _pid, 813 tgid, process, machine) < 0) 814 break; 815 816 /* 817 * Send the prepared comm event 818 */ 819 if (perf_tool__process_synth_event(tool, comm_event, machine, process) != 0) 820 break; 821 822 rc = 0; 823 if (_pid == pid && !kernel_thread && needs_mmap) { 824 /* process the parent's maps too */ 825 rc = perf_event__synthesize_mmap_events(tool, mmap_event, pid, tgid, 826 process, machine, mmap_data); 827 if (rc) 828 break; 829 } 830 } 831 832 for (i = 0; i < n; i++) 833 zfree(&dirent[i]); 834 free(dirent); 835 836 return rc; 837 } 838 839 int perf_event__synthesize_thread_map(struct perf_tool *tool, 840 struct perf_thread_map *threads, 841 perf_event__handler_t process, 842 struct machine *machine, 843 bool needs_mmap, bool mmap_data) 844 { 845 union perf_event *comm_event, *mmap_event, *fork_event; 846 union perf_event *namespaces_event; 847 int err = -1, thread, j; 848 849 comm_event = malloc(sizeof(comm_event->comm) + machine->id_hdr_size); 850 if (comm_event == NULL) 851 goto out; 852 853 mmap_event = malloc(sizeof(mmap_event->mmap2) + machine->id_hdr_size); 854 if (mmap_event == NULL) 855 goto out_free_comm; 856 857 fork_event = malloc(sizeof(fork_event->fork) + machine->id_hdr_size); 858 if (fork_event == NULL) 859 goto out_free_mmap; 860 861 namespaces_event = malloc(sizeof(namespaces_event->namespaces) + 862 (NR_NAMESPACES * sizeof(struct perf_ns_link_info)) + 863 machine->id_hdr_size); 864 if (namespaces_event == NULL) 865 goto out_free_fork; 866 867 err = 0; 868 for (thread = 0; thread < threads->nr; ++thread) { 869 if (__event__synthesize_thread(comm_event, mmap_event, 870 fork_event, namespaces_event, 871 perf_thread_map__pid(threads, thread), 0, 872 process, tool, machine, 873 needs_mmap, mmap_data)) { 874 err = -1; 875 break; 876 } 877 878 /* 879 * comm.pid is set to thread group id by 880 * perf_event__synthesize_comm 881 */ 882 if ((int) comm_event->comm.pid != perf_thread_map__pid(threads, thread)) { 883 bool need_leader = true; 884 885 /* is thread group leader in thread_map? */ 886 for (j = 0; j < threads->nr; ++j) { 887 if ((int) comm_event->comm.pid == perf_thread_map__pid(threads, j)) { 888 need_leader = false; 889 break; 890 } 891 } 892 893 /* if not, generate events for it */ 894 if (need_leader && 895 __event__synthesize_thread(comm_event, mmap_event, 896 fork_event, namespaces_event, 897 comm_event->comm.pid, 0, 898 process, tool, machine, 899 needs_mmap, mmap_data)) { 900 err = -1; 901 break; 902 } 903 } 904 } 905 free(namespaces_event); 906 out_free_fork: 907 free(fork_event); 908 out_free_mmap: 909 free(mmap_event); 910 out_free_comm: 911 free(comm_event); 912 out: 913 return err; 914 } 915 916 static int __perf_event__synthesize_threads(struct perf_tool *tool, 917 perf_event__handler_t process, 918 struct machine *machine, 919 bool needs_mmap, 920 bool mmap_data, 921 struct dirent **dirent, 922 int start, 923 int num) 924 { 925 union perf_event *comm_event, *mmap_event, *fork_event; 926 union perf_event *namespaces_event; 927 int err = -1; 928 char *end; 929 pid_t pid; 930 int i; 931 932 comm_event = malloc(sizeof(comm_event->comm) + machine->id_hdr_size); 933 if (comm_event == NULL) 934 goto out; 935 936 mmap_event = malloc(sizeof(mmap_event->mmap2) + machine->id_hdr_size); 937 if (mmap_event == NULL) 938 goto out_free_comm; 939 940 fork_event = malloc(sizeof(fork_event->fork) + machine->id_hdr_size); 941 if (fork_event == NULL) 942 goto out_free_mmap; 943 944 namespaces_event = malloc(sizeof(namespaces_event->namespaces) + 945 (NR_NAMESPACES * sizeof(struct perf_ns_link_info)) + 946 machine->id_hdr_size); 947 if (namespaces_event == NULL) 948 goto out_free_fork; 949 950 for (i = start; i < start + num; i++) { 951 if (!isdigit(dirent[i]->d_name[0])) 952 continue; 953 954 pid = (pid_t)strtol(dirent[i]->d_name, &end, 10); 955 /* only interested in proper numerical dirents */ 956 if (*end) 957 continue; 958 /* 959 * We may race with exiting thread, so don't stop just because 960 * one thread couldn't be synthesized. 961 */ 962 __event__synthesize_thread(comm_event, mmap_event, fork_event, 963 namespaces_event, pid, 1, process, 964 tool, machine, needs_mmap, mmap_data); 965 } 966 err = 0; 967 968 free(namespaces_event); 969 out_free_fork: 970 free(fork_event); 971 out_free_mmap: 972 free(mmap_event); 973 out_free_comm: 974 free(comm_event); 975 out: 976 return err; 977 } 978 979 struct synthesize_threads_arg { 980 struct perf_tool *tool; 981 perf_event__handler_t process; 982 struct machine *machine; 983 bool needs_mmap; 984 bool mmap_data; 985 struct dirent **dirent; 986 int num; 987 int start; 988 }; 989 990 static void *synthesize_threads_worker(void *arg) 991 { 992 struct synthesize_threads_arg *args = arg; 993 994 __perf_event__synthesize_threads(args->tool, args->process, 995 args->machine, 996 args->needs_mmap, args->mmap_data, 997 args->dirent, 998 args->start, args->num); 999 return NULL; 1000 } 1001 1002 int perf_event__synthesize_threads(struct perf_tool *tool, 1003 perf_event__handler_t process, 1004 struct machine *machine, 1005 bool needs_mmap, bool mmap_data, 1006 unsigned int nr_threads_synthesize) 1007 { 1008 struct synthesize_threads_arg *args = NULL; 1009 pthread_t *synthesize_threads = NULL; 1010 char proc_path[PATH_MAX]; 1011 struct dirent **dirent; 1012 int num_per_thread; 1013 int m, n, i, j; 1014 int thread_nr; 1015 int base = 0; 1016 int err = -1; 1017 1018 1019 if (machine__is_default_guest(machine)) 1020 return 0; 1021 1022 snprintf(proc_path, sizeof(proc_path), "%s/proc", machine->root_dir); 1023 n = scandir(proc_path, &dirent, filter_task, NULL); 1024 if (n < 0) 1025 return err; 1026 1027 if (nr_threads_synthesize == UINT_MAX) 1028 thread_nr = sysconf(_SC_NPROCESSORS_ONLN); 1029 else 1030 thread_nr = nr_threads_synthesize; 1031 1032 if (thread_nr <= 1) { 1033 err = __perf_event__synthesize_threads(tool, process, 1034 machine, 1035 needs_mmap, mmap_data, 1036 dirent, base, n); 1037 goto free_dirent; 1038 } 1039 if (thread_nr > n) 1040 thread_nr = n; 1041 1042 synthesize_threads = calloc(sizeof(pthread_t), thread_nr); 1043 if (synthesize_threads == NULL) 1044 goto free_dirent; 1045 1046 args = calloc(sizeof(*args), thread_nr); 1047 if (args == NULL) 1048 goto free_threads; 1049 1050 num_per_thread = n / thread_nr; 1051 m = n % thread_nr; 1052 for (i = 0; i < thread_nr; i++) { 1053 args[i].tool = tool; 1054 args[i].process = process; 1055 args[i].machine = machine; 1056 args[i].needs_mmap = needs_mmap; 1057 args[i].mmap_data = mmap_data; 1058 args[i].dirent = dirent; 1059 } 1060 for (i = 0; i < m; i++) { 1061 args[i].num = num_per_thread + 1; 1062 args[i].start = i * args[i].num; 1063 } 1064 if (i != 0) 1065 base = args[i-1].start + args[i-1].num; 1066 for (j = i; j < thread_nr; j++) { 1067 args[j].num = num_per_thread; 1068 args[j].start = base + (j - i) * args[i].num; 1069 } 1070 1071 for (i = 0; i < thread_nr; i++) { 1072 if (pthread_create(&synthesize_threads[i], NULL, 1073 synthesize_threads_worker, &args[i])) 1074 goto out_join; 1075 } 1076 err = 0; 1077 out_join: 1078 for (i = 0; i < thread_nr; i++) 1079 pthread_join(synthesize_threads[i], NULL); 1080 free(args); 1081 free_threads: 1082 free(synthesize_threads); 1083 free_dirent: 1084 for (i = 0; i < n; i++) 1085 zfree(&dirent[i]); 1086 free(dirent); 1087 1088 return err; 1089 } 1090 1091 int __weak perf_event__synthesize_extra_kmaps(struct perf_tool *tool __maybe_unused, 1092 perf_event__handler_t process __maybe_unused, 1093 struct machine *machine __maybe_unused) 1094 { 1095 return 0; 1096 } 1097 1098 static int __perf_event__synthesize_kernel_mmap(struct perf_tool *tool, 1099 perf_event__handler_t process, 1100 struct machine *machine) 1101 { 1102 union perf_event *event; 1103 size_t size = symbol_conf.buildid_mmap2 ? 1104 sizeof(event->mmap2) : sizeof(event->mmap); 1105 struct map *map = machine__kernel_map(machine); 1106 struct kmap *kmap; 1107 int err; 1108 1109 if (map == NULL) 1110 return -1; 1111 1112 kmap = map__kmap(map); 1113 if (!kmap->ref_reloc_sym) 1114 return -1; 1115 1116 /* 1117 * We should get this from /sys/kernel/sections/.text, but till that is 1118 * available use this, and after it is use this as a fallback for older 1119 * kernels. 1120 */ 1121 event = zalloc(size + machine->id_hdr_size); 1122 if (event == NULL) { 1123 pr_debug("Not enough memory synthesizing mmap event " 1124 "for kernel modules\n"); 1125 return -1; 1126 } 1127 1128 if (machine__is_host(machine)) { 1129 /* 1130 * kernel uses PERF_RECORD_MISC_USER for user space maps, 1131 * see kernel/perf_event.c __perf_event_mmap 1132 */ 1133 event->header.misc = PERF_RECORD_MISC_KERNEL; 1134 } else { 1135 event->header.misc = PERF_RECORD_MISC_GUEST_KERNEL; 1136 } 1137 1138 if (symbol_conf.buildid_mmap2) { 1139 size = snprintf(event->mmap2.filename, sizeof(event->mmap2.filename), 1140 "%s%s", machine->mmap_name, kmap->ref_reloc_sym->name) + 1; 1141 size = PERF_ALIGN(size, sizeof(u64)); 1142 event->mmap2.header.type = PERF_RECORD_MMAP2; 1143 event->mmap2.header.size = (sizeof(event->mmap2) - 1144 (sizeof(event->mmap2.filename) - size) + machine->id_hdr_size); 1145 event->mmap2.pgoff = kmap->ref_reloc_sym->addr; 1146 event->mmap2.start = map__start(map); 1147 event->mmap2.len = map__end(map) - event->mmap.start; 1148 event->mmap2.pid = machine->pid; 1149 1150 perf_record_mmap2__read_build_id(&event->mmap2, machine, true); 1151 } else { 1152 size = snprintf(event->mmap.filename, sizeof(event->mmap.filename), 1153 "%s%s", machine->mmap_name, kmap->ref_reloc_sym->name) + 1; 1154 size = PERF_ALIGN(size, sizeof(u64)); 1155 event->mmap.header.type = PERF_RECORD_MMAP; 1156 event->mmap.header.size = (sizeof(event->mmap) - 1157 (sizeof(event->mmap.filename) - size) + machine->id_hdr_size); 1158 event->mmap.pgoff = kmap->ref_reloc_sym->addr; 1159 event->mmap.start = map__start(map); 1160 event->mmap.len = map__end(map) - event->mmap.start; 1161 event->mmap.pid = machine->pid; 1162 } 1163 1164 err = perf_tool__process_synth_event(tool, event, machine, process); 1165 free(event); 1166 1167 return err; 1168 } 1169 1170 int perf_event__synthesize_kernel_mmap(struct perf_tool *tool, 1171 perf_event__handler_t process, 1172 struct machine *machine) 1173 { 1174 int err; 1175 1176 err = __perf_event__synthesize_kernel_mmap(tool, process, machine); 1177 if (err < 0) 1178 return err; 1179 1180 return perf_event__synthesize_extra_kmaps(tool, process, machine); 1181 } 1182 1183 int perf_event__synthesize_thread_map2(struct perf_tool *tool, 1184 struct perf_thread_map *threads, 1185 perf_event__handler_t process, 1186 struct machine *machine) 1187 { 1188 union perf_event *event; 1189 int i, err, size; 1190 1191 size = sizeof(event->thread_map); 1192 size += threads->nr * sizeof(event->thread_map.entries[0]); 1193 1194 event = zalloc(size); 1195 if (!event) 1196 return -ENOMEM; 1197 1198 event->header.type = PERF_RECORD_THREAD_MAP; 1199 event->header.size = size; 1200 event->thread_map.nr = threads->nr; 1201 1202 for (i = 0; i < threads->nr; i++) { 1203 struct perf_record_thread_map_entry *entry = &event->thread_map.entries[i]; 1204 char *comm = perf_thread_map__comm(threads, i); 1205 1206 if (!comm) 1207 comm = (char *) ""; 1208 1209 entry->pid = perf_thread_map__pid(threads, i); 1210 strncpy((char *) &entry->comm, comm, sizeof(entry->comm)); 1211 } 1212 1213 err = process(tool, event, NULL, machine); 1214 1215 free(event); 1216 return err; 1217 } 1218 1219 struct synthesize_cpu_map_data { 1220 const struct perf_cpu_map *map; 1221 int nr; 1222 int min_cpu; 1223 int max_cpu; 1224 int has_any_cpu; 1225 int type; 1226 size_t size; 1227 struct perf_record_cpu_map_data *data; 1228 }; 1229 1230 static void synthesize_cpus(struct synthesize_cpu_map_data *data) 1231 { 1232 data->data->type = PERF_CPU_MAP__CPUS; 1233 data->data->cpus_data.nr = data->nr; 1234 for (int i = 0; i < data->nr; i++) 1235 data->data->cpus_data.cpu[i] = perf_cpu_map__cpu(data->map, i).cpu; 1236 } 1237 1238 static void synthesize_mask(struct synthesize_cpu_map_data *data) 1239 { 1240 int idx; 1241 struct perf_cpu cpu; 1242 1243 /* Due to padding, the 4bytes per entry mask variant is always smaller. */ 1244 data->data->type = PERF_CPU_MAP__MASK; 1245 data->data->mask32_data.nr = BITS_TO_U32(data->max_cpu); 1246 data->data->mask32_data.long_size = 4; 1247 1248 perf_cpu_map__for_each_cpu(cpu, idx, data->map) { 1249 int bit_word = cpu.cpu / 32; 1250 u32 bit_mask = 1U << (cpu.cpu & 31); 1251 1252 data->data->mask32_data.mask[bit_word] |= bit_mask; 1253 } 1254 } 1255 1256 static void synthesize_range_cpus(struct synthesize_cpu_map_data *data) 1257 { 1258 data->data->type = PERF_CPU_MAP__RANGE_CPUS; 1259 data->data->range_cpu_data.any_cpu = data->has_any_cpu; 1260 data->data->range_cpu_data.start_cpu = data->min_cpu; 1261 data->data->range_cpu_data.end_cpu = data->max_cpu; 1262 } 1263 1264 static void *cpu_map_data__alloc(struct synthesize_cpu_map_data *syn_data, 1265 size_t header_size) 1266 { 1267 size_t size_cpus, size_mask; 1268 1269 syn_data->nr = perf_cpu_map__nr(syn_data->map); 1270 syn_data->has_any_cpu = (perf_cpu_map__cpu(syn_data->map, 0).cpu == -1) ? 1 : 0; 1271 1272 syn_data->min_cpu = perf_cpu_map__cpu(syn_data->map, syn_data->has_any_cpu).cpu; 1273 syn_data->max_cpu = perf_cpu_map__max(syn_data->map).cpu; 1274 if (syn_data->max_cpu - syn_data->min_cpu + 1 == syn_data->nr - syn_data->has_any_cpu) { 1275 /* A consecutive range of CPUs can be encoded using a range. */ 1276 assert(sizeof(u16) + sizeof(struct perf_record_range_cpu_map) == sizeof(u64)); 1277 syn_data->type = PERF_CPU_MAP__RANGE_CPUS; 1278 syn_data->size = header_size + sizeof(u64); 1279 return zalloc(syn_data->size); 1280 } 1281 1282 size_cpus = sizeof(u16) + sizeof(struct cpu_map_entries) + syn_data->nr * sizeof(u16); 1283 /* Due to padding, the 4bytes per entry mask variant is always smaller. */ 1284 size_mask = sizeof(u16) + sizeof(struct perf_record_mask_cpu_map32) + 1285 BITS_TO_U32(syn_data->max_cpu) * sizeof(__u32); 1286 if (syn_data->has_any_cpu || size_cpus < size_mask) { 1287 /* Follow the CPU map encoding. */ 1288 syn_data->type = PERF_CPU_MAP__CPUS; 1289 syn_data->size = header_size + PERF_ALIGN(size_cpus, sizeof(u64)); 1290 return zalloc(syn_data->size); 1291 } 1292 /* Encode using a bitmask. */ 1293 syn_data->type = PERF_CPU_MAP__MASK; 1294 syn_data->size = header_size + PERF_ALIGN(size_mask, sizeof(u64)); 1295 return zalloc(syn_data->size); 1296 } 1297 1298 static void cpu_map_data__synthesize(struct synthesize_cpu_map_data *data) 1299 { 1300 switch (data->type) { 1301 case PERF_CPU_MAP__CPUS: 1302 synthesize_cpus(data); 1303 break; 1304 case PERF_CPU_MAP__MASK: 1305 synthesize_mask(data); 1306 break; 1307 case PERF_CPU_MAP__RANGE_CPUS: 1308 synthesize_range_cpus(data); 1309 break; 1310 default: 1311 break; 1312 } 1313 } 1314 1315 static struct perf_record_cpu_map *cpu_map_event__new(const struct perf_cpu_map *map) 1316 { 1317 struct synthesize_cpu_map_data syn_data = { .map = map }; 1318 struct perf_record_cpu_map *event; 1319 1320 1321 event = cpu_map_data__alloc(&syn_data, sizeof(struct perf_event_header)); 1322 if (!event) 1323 return NULL; 1324 1325 syn_data.data = &event->data; 1326 event->header.type = PERF_RECORD_CPU_MAP; 1327 event->header.size = syn_data.size; 1328 cpu_map_data__synthesize(&syn_data); 1329 return event; 1330 } 1331 1332 1333 int perf_event__synthesize_cpu_map(struct perf_tool *tool, 1334 const struct perf_cpu_map *map, 1335 perf_event__handler_t process, 1336 struct machine *machine) 1337 { 1338 struct perf_record_cpu_map *event; 1339 int err; 1340 1341 event = cpu_map_event__new(map); 1342 if (!event) 1343 return -ENOMEM; 1344 1345 err = process(tool, (union perf_event *) event, NULL, machine); 1346 1347 free(event); 1348 return err; 1349 } 1350 1351 int perf_event__synthesize_stat_config(struct perf_tool *tool, 1352 struct perf_stat_config *config, 1353 perf_event__handler_t process, 1354 struct machine *machine) 1355 { 1356 struct perf_record_stat_config *event; 1357 int size, i = 0, err; 1358 1359 size = sizeof(*event); 1360 size += (PERF_STAT_CONFIG_TERM__MAX * sizeof(event->data[0])); 1361 1362 event = zalloc(size); 1363 if (!event) 1364 return -ENOMEM; 1365 1366 event->header.type = PERF_RECORD_STAT_CONFIG; 1367 event->header.size = size; 1368 event->nr = PERF_STAT_CONFIG_TERM__MAX; 1369 1370 #define ADD(__term, __val) \ 1371 event->data[i].tag = PERF_STAT_CONFIG_TERM__##__term; \ 1372 event->data[i].val = __val; \ 1373 i++; 1374 1375 ADD(AGGR_MODE, config->aggr_mode) 1376 ADD(INTERVAL, config->interval) 1377 ADD(SCALE, config->scale) 1378 1379 WARN_ONCE(i != PERF_STAT_CONFIG_TERM__MAX, 1380 "stat config terms unbalanced\n"); 1381 #undef ADD 1382 1383 err = process(tool, (union perf_event *) event, NULL, machine); 1384 1385 free(event); 1386 return err; 1387 } 1388 1389 int perf_event__synthesize_stat(struct perf_tool *tool, 1390 struct perf_cpu cpu, u32 thread, u64 id, 1391 struct perf_counts_values *count, 1392 perf_event__handler_t process, 1393 struct machine *machine) 1394 { 1395 struct perf_record_stat event; 1396 1397 event.header.type = PERF_RECORD_STAT; 1398 event.header.size = sizeof(event); 1399 event.header.misc = 0; 1400 1401 event.id = id; 1402 event.cpu = cpu.cpu; 1403 event.thread = thread; 1404 event.val = count->val; 1405 event.ena = count->ena; 1406 event.run = count->run; 1407 1408 return process(tool, (union perf_event *) &event, NULL, machine); 1409 } 1410 1411 int perf_event__synthesize_stat_round(struct perf_tool *tool, 1412 u64 evtime, u64 type, 1413 perf_event__handler_t process, 1414 struct machine *machine) 1415 { 1416 struct perf_record_stat_round event; 1417 1418 event.header.type = PERF_RECORD_STAT_ROUND; 1419 event.header.size = sizeof(event); 1420 event.header.misc = 0; 1421 1422 event.time = evtime; 1423 event.type = type; 1424 1425 return process(tool, (union perf_event *) &event, NULL, machine); 1426 } 1427 1428 size_t perf_event__sample_event_size(const struct perf_sample *sample, u64 type, u64 read_format) 1429 { 1430 size_t sz, result = sizeof(struct perf_record_sample); 1431 1432 if (type & PERF_SAMPLE_IDENTIFIER) 1433 result += sizeof(u64); 1434 1435 if (type & PERF_SAMPLE_IP) 1436 result += sizeof(u64); 1437 1438 if (type & PERF_SAMPLE_TID) 1439 result += sizeof(u64); 1440 1441 if (type & PERF_SAMPLE_TIME) 1442 result += sizeof(u64); 1443 1444 if (type & PERF_SAMPLE_ADDR) 1445 result += sizeof(u64); 1446 1447 if (type & PERF_SAMPLE_ID) 1448 result += sizeof(u64); 1449 1450 if (type & PERF_SAMPLE_STREAM_ID) 1451 result += sizeof(u64); 1452 1453 if (type & PERF_SAMPLE_CPU) 1454 result += sizeof(u64); 1455 1456 if (type & PERF_SAMPLE_PERIOD) 1457 result += sizeof(u64); 1458 1459 if (type & PERF_SAMPLE_READ) { 1460 result += sizeof(u64); 1461 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) 1462 result += sizeof(u64); 1463 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) 1464 result += sizeof(u64); 1465 /* PERF_FORMAT_ID is forced for PERF_SAMPLE_READ */ 1466 if (read_format & PERF_FORMAT_GROUP) { 1467 sz = sample_read_value_size(read_format); 1468 result += sz * sample->read.group.nr; 1469 } else { 1470 result += sizeof(u64); 1471 if (read_format & PERF_FORMAT_LOST) 1472 result += sizeof(u64); 1473 } 1474 } 1475 1476 if (type & PERF_SAMPLE_CALLCHAIN) { 1477 sz = (sample->callchain->nr + 1) * sizeof(u64); 1478 result += sz; 1479 } 1480 1481 if (type & PERF_SAMPLE_RAW) { 1482 result += sizeof(u32); 1483 result += sample->raw_size; 1484 } 1485 1486 if (type & PERF_SAMPLE_BRANCH_STACK) { 1487 sz = sample->branch_stack->nr * sizeof(struct branch_entry); 1488 /* nr, hw_idx */ 1489 sz += 2 * sizeof(u64); 1490 result += sz; 1491 } 1492 1493 if (type & PERF_SAMPLE_REGS_USER) { 1494 if (sample->user_regs.abi) { 1495 result += sizeof(u64); 1496 sz = hweight64(sample->user_regs.mask) * sizeof(u64); 1497 result += sz; 1498 } else { 1499 result += sizeof(u64); 1500 } 1501 } 1502 1503 if (type & PERF_SAMPLE_STACK_USER) { 1504 sz = sample->user_stack.size; 1505 result += sizeof(u64); 1506 if (sz) { 1507 result += sz; 1508 result += sizeof(u64); 1509 } 1510 } 1511 1512 if (type & PERF_SAMPLE_WEIGHT_TYPE) 1513 result += sizeof(u64); 1514 1515 if (type & PERF_SAMPLE_DATA_SRC) 1516 result += sizeof(u64); 1517 1518 if (type & PERF_SAMPLE_TRANSACTION) 1519 result += sizeof(u64); 1520 1521 if (type & PERF_SAMPLE_REGS_INTR) { 1522 if (sample->intr_regs.abi) { 1523 result += sizeof(u64); 1524 sz = hweight64(sample->intr_regs.mask) * sizeof(u64); 1525 result += sz; 1526 } else { 1527 result += sizeof(u64); 1528 } 1529 } 1530 1531 if (type & PERF_SAMPLE_PHYS_ADDR) 1532 result += sizeof(u64); 1533 1534 if (type & PERF_SAMPLE_CGROUP) 1535 result += sizeof(u64); 1536 1537 if (type & PERF_SAMPLE_DATA_PAGE_SIZE) 1538 result += sizeof(u64); 1539 1540 if (type & PERF_SAMPLE_CODE_PAGE_SIZE) 1541 result += sizeof(u64); 1542 1543 if (type & PERF_SAMPLE_AUX) { 1544 result += sizeof(u64); 1545 result += sample->aux_sample.size; 1546 } 1547 1548 return result; 1549 } 1550 1551 void __weak arch_perf_synthesize_sample_weight(const struct perf_sample *data, 1552 __u64 *array, u64 type __maybe_unused) 1553 { 1554 *array = data->weight; 1555 } 1556 1557 static __u64 *copy_read_group_values(__u64 *array, __u64 read_format, 1558 const struct perf_sample *sample) 1559 { 1560 size_t sz = sample_read_value_size(read_format); 1561 struct sample_read_value *v = sample->read.group.values; 1562 1563 sample_read_group__for_each(v, sample->read.group.nr, read_format) { 1564 /* PERF_FORMAT_ID is forced for PERF_SAMPLE_READ */ 1565 memcpy(array, v, sz); 1566 array = (void *)array + sz; 1567 } 1568 return array; 1569 } 1570 1571 int perf_event__synthesize_sample(union perf_event *event, u64 type, u64 read_format, 1572 const struct perf_sample *sample) 1573 { 1574 __u64 *array; 1575 size_t sz; 1576 /* 1577 * used for cross-endian analysis. See git commit 65014ab3 1578 * for why this goofiness is needed. 1579 */ 1580 union u64_swap u; 1581 1582 array = event->sample.array; 1583 1584 if (type & PERF_SAMPLE_IDENTIFIER) { 1585 *array = sample->id; 1586 array++; 1587 } 1588 1589 if (type & PERF_SAMPLE_IP) { 1590 *array = sample->ip; 1591 array++; 1592 } 1593 1594 if (type & PERF_SAMPLE_TID) { 1595 u.val32[0] = sample->pid; 1596 u.val32[1] = sample->tid; 1597 *array = u.val64; 1598 array++; 1599 } 1600 1601 if (type & PERF_SAMPLE_TIME) { 1602 *array = sample->time; 1603 array++; 1604 } 1605 1606 if (type & PERF_SAMPLE_ADDR) { 1607 *array = sample->addr; 1608 array++; 1609 } 1610 1611 if (type & PERF_SAMPLE_ID) { 1612 *array = sample->id; 1613 array++; 1614 } 1615 1616 if (type & PERF_SAMPLE_STREAM_ID) { 1617 *array = sample->stream_id; 1618 array++; 1619 } 1620 1621 if (type & PERF_SAMPLE_CPU) { 1622 u.val32[0] = sample->cpu; 1623 u.val32[1] = 0; 1624 *array = u.val64; 1625 array++; 1626 } 1627 1628 if (type & PERF_SAMPLE_PERIOD) { 1629 *array = sample->period; 1630 array++; 1631 } 1632 1633 if (type & PERF_SAMPLE_READ) { 1634 if (read_format & PERF_FORMAT_GROUP) 1635 *array = sample->read.group.nr; 1636 else 1637 *array = sample->read.one.value; 1638 array++; 1639 1640 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) { 1641 *array = sample->read.time_enabled; 1642 array++; 1643 } 1644 1645 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) { 1646 *array = sample->read.time_running; 1647 array++; 1648 } 1649 1650 /* PERF_FORMAT_ID is forced for PERF_SAMPLE_READ */ 1651 if (read_format & PERF_FORMAT_GROUP) { 1652 array = copy_read_group_values(array, read_format, 1653 sample); 1654 } else { 1655 *array = sample->read.one.id; 1656 array++; 1657 1658 if (read_format & PERF_FORMAT_LOST) { 1659 *array = sample->read.one.lost; 1660 array++; 1661 } 1662 } 1663 } 1664 1665 if (type & PERF_SAMPLE_CALLCHAIN) { 1666 sz = (sample->callchain->nr + 1) * sizeof(u64); 1667 memcpy(array, sample->callchain, sz); 1668 array = (void *)array + sz; 1669 } 1670 1671 if (type & PERF_SAMPLE_RAW) { 1672 u.val32[0] = sample->raw_size; 1673 *array = u.val64; 1674 array = (void *)array + sizeof(u32); 1675 1676 memcpy(array, sample->raw_data, sample->raw_size); 1677 array = (void *)array + sample->raw_size; 1678 } 1679 1680 if (type & PERF_SAMPLE_BRANCH_STACK) { 1681 sz = sample->branch_stack->nr * sizeof(struct branch_entry); 1682 /* nr, hw_idx */ 1683 sz += 2 * sizeof(u64); 1684 memcpy(array, sample->branch_stack, sz); 1685 array = (void *)array + sz; 1686 } 1687 1688 if (type & PERF_SAMPLE_REGS_USER) { 1689 if (sample->user_regs.abi) { 1690 *array++ = sample->user_regs.abi; 1691 sz = hweight64(sample->user_regs.mask) * sizeof(u64); 1692 memcpy(array, sample->user_regs.regs, sz); 1693 array = (void *)array + sz; 1694 } else { 1695 *array++ = 0; 1696 } 1697 } 1698 1699 if (type & PERF_SAMPLE_STACK_USER) { 1700 sz = sample->user_stack.size; 1701 *array++ = sz; 1702 if (sz) { 1703 memcpy(array, sample->user_stack.data, sz); 1704 array = (void *)array + sz; 1705 *array++ = sz; 1706 } 1707 } 1708 1709 if (type & PERF_SAMPLE_WEIGHT_TYPE) { 1710 arch_perf_synthesize_sample_weight(sample, array, type); 1711 array++; 1712 } 1713 1714 if (type & PERF_SAMPLE_DATA_SRC) { 1715 *array = sample->data_src; 1716 array++; 1717 } 1718 1719 if (type & PERF_SAMPLE_TRANSACTION) { 1720 *array = sample->transaction; 1721 array++; 1722 } 1723 1724 if (type & PERF_SAMPLE_REGS_INTR) { 1725 if (sample->intr_regs.abi) { 1726 *array++ = sample->intr_regs.abi; 1727 sz = hweight64(sample->intr_regs.mask) * sizeof(u64); 1728 memcpy(array, sample->intr_regs.regs, sz); 1729 array = (void *)array + sz; 1730 } else { 1731 *array++ = 0; 1732 } 1733 } 1734 1735 if (type & PERF_SAMPLE_PHYS_ADDR) { 1736 *array = sample->phys_addr; 1737 array++; 1738 } 1739 1740 if (type & PERF_SAMPLE_CGROUP) { 1741 *array = sample->cgroup; 1742 array++; 1743 } 1744 1745 if (type & PERF_SAMPLE_DATA_PAGE_SIZE) { 1746 *array = sample->data_page_size; 1747 array++; 1748 } 1749 1750 if (type & PERF_SAMPLE_CODE_PAGE_SIZE) { 1751 *array = sample->code_page_size; 1752 array++; 1753 } 1754 1755 if (type & PERF_SAMPLE_AUX) { 1756 sz = sample->aux_sample.size; 1757 *array++ = sz; 1758 memcpy(array, sample->aux_sample.data, sz); 1759 array = (void *)array + sz; 1760 } 1761 1762 return 0; 1763 } 1764 1765 int perf_event__synthesize_id_sample(__u64 *array, u64 type, const struct perf_sample *sample) 1766 { 1767 __u64 *start = array; 1768 1769 /* 1770 * used for cross-endian analysis. See git commit 65014ab3 1771 * for why this goofiness is needed. 1772 */ 1773 union u64_swap u; 1774 1775 if (type & PERF_SAMPLE_TID) { 1776 u.val32[0] = sample->pid; 1777 u.val32[1] = sample->tid; 1778 *array = u.val64; 1779 array++; 1780 } 1781 1782 if (type & PERF_SAMPLE_TIME) { 1783 *array = sample->time; 1784 array++; 1785 } 1786 1787 if (type & PERF_SAMPLE_ID) { 1788 *array = sample->id; 1789 array++; 1790 } 1791 1792 if (type & PERF_SAMPLE_STREAM_ID) { 1793 *array = sample->stream_id; 1794 array++; 1795 } 1796 1797 if (type & PERF_SAMPLE_CPU) { 1798 u.val32[0] = sample->cpu; 1799 u.val32[1] = 0; 1800 *array = u.val64; 1801 array++; 1802 } 1803 1804 if (type & PERF_SAMPLE_IDENTIFIER) { 1805 *array = sample->id; 1806 array++; 1807 } 1808 1809 return (void *)array - (void *)start; 1810 } 1811 1812 int __perf_event__synthesize_id_index(struct perf_tool *tool, perf_event__handler_t process, 1813 struct evlist *evlist, struct machine *machine, size_t from) 1814 { 1815 union perf_event *ev; 1816 struct evsel *evsel; 1817 size_t nr = 0, i = 0, sz, max_nr, n, pos; 1818 size_t e1_sz = sizeof(struct id_index_entry); 1819 size_t e2_sz = sizeof(struct id_index_entry_2); 1820 size_t etot_sz = e1_sz + e2_sz; 1821 bool e2_needed = false; 1822 int err; 1823 1824 max_nr = (UINT16_MAX - sizeof(struct perf_record_id_index)) / etot_sz; 1825 1826 pos = 0; 1827 evlist__for_each_entry(evlist, evsel) { 1828 if (pos++ < from) 1829 continue; 1830 nr += evsel->core.ids; 1831 } 1832 1833 if (!nr) 1834 return 0; 1835 1836 pr_debug2("Synthesizing id index\n"); 1837 1838 n = nr > max_nr ? max_nr : nr; 1839 sz = sizeof(struct perf_record_id_index) + n * etot_sz; 1840 ev = zalloc(sz); 1841 if (!ev) 1842 return -ENOMEM; 1843 1844 sz = sizeof(struct perf_record_id_index) + n * e1_sz; 1845 1846 ev->id_index.header.type = PERF_RECORD_ID_INDEX; 1847 ev->id_index.nr = n; 1848 1849 pos = 0; 1850 evlist__for_each_entry(evlist, evsel) { 1851 u32 j; 1852 1853 if (pos++ < from) 1854 continue; 1855 for (j = 0; j < evsel->core.ids; j++, i++) { 1856 struct id_index_entry *e; 1857 struct id_index_entry_2 *e2; 1858 struct perf_sample_id *sid; 1859 1860 if (i >= n) { 1861 ev->id_index.header.size = sz + (e2_needed ? n * e2_sz : 0); 1862 err = process(tool, ev, NULL, machine); 1863 if (err) 1864 goto out_err; 1865 nr -= n; 1866 i = 0; 1867 e2_needed = false; 1868 } 1869 1870 e = &ev->id_index.entries[i]; 1871 1872 e->id = evsel->core.id[j]; 1873 1874 sid = evlist__id2sid(evlist, e->id); 1875 if (!sid) { 1876 free(ev); 1877 return -ENOENT; 1878 } 1879 1880 e->idx = sid->idx; 1881 e->cpu = sid->cpu.cpu; 1882 e->tid = sid->tid; 1883 1884 if (sid->machine_pid) 1885 e2_needed = true; 1886 1887 e2 = (void *)ev + sz; 1888 e2[i].machine_pid = sid->machine_pid; 1889 e2[i].vcpu = sid->vcpu.cpu; 1890 } 1891 } 1892 1893 sz = sizeof(struct perf_record_id_index) + nr * e1_sz; 1894 ev->id_index.header.size = sz + (e2_needed ? nr * e2_sz : 0); 1895 ev->id_index.nr = nr; 1896 1897 err = process(tool, ev, NULL, machine); 1898 out_err: 1899 free(ev); 1900 1901 return err; 1902 } 1903 1904 int perf_event__synthesize_id_index(struct perf_tool *tool, perf_event__handler_t process, 1905 struct evlist *evlist, struct machine *machine) 1906 { 1907 return __perf_event__synthesize_id_index(tool, process, evlist, machine, 0); 1908 } 1909 1910 int __machine__synthesize_threads(struct machine *machine, struct perf_tool *tool, 1911 struct target *target, struct perf_thread_map *threads, 1912 perf_event__handler_t process, bool needs_mmap, 1913 bool data_mmap, unsigned int nr_threads_synthesize) 1914 { 1915 /* 1916 * When perf runs in non-root PID namespace, and the namespace's proc FS 1917 * is not mounted, nsinfo__is_in_root_namespace() returns false. 1918 * In this case, the proc FS is coming for the parent namespace, thus 1919 * perf tool will wrongly gather process info from its parent PID 1920 * namespace. 1921 * 1922 * To avoid the confusion that the perf tool runs in a child PID 1923 * namespace but it synthesizes thread info from its parent PID 1924 * namespace, returns failure with warning. 1925 */ 1926 if (!nsinfo__is_in_root_namespace()) { 1927 pr_err("Perf runs in non-root PID namespace but it tries to "); 1928 pr_err("gather process info from its parent PID namespace.\n"); 1929 pr_err("Please mount the proc file system properly, e.g. "); 1930 pr_err("add the option '--mount-proc' for unshare command.\n"); 1931 return -EPERM; 1932 } 1933 1934 if (target__has_task(target)) 1935 return perf_event__synthesize_thread_map(tool, threads, process, machine, 1936 needs_mmap, data_mmap); 1937 else if (target__has_cpu(target)) 1938 return perf_event__synthesize_threads(tool, process, machine, 1939 needs_mmap, data_mmap, 1940 nr_threads_synthesize); 1941 /* command specified */ 1942 return 0; 1943 } 1944 1945 int machine__synthesize_threads(struct machine *machine, struct target *target, 1946 struct perf_thread_map *threads, bool needs_mmap, 1947 bool data_mmap, unsigned int nr_threads_synthesize) 1948 { 1949 return __machine__synthesize_threads(machine, NULL, target, threads, 1950 perf_event__process, needs_mmap, 1951 data_mmap, nr_threads_synthesize); 1952 } 1953 1954 static struct perf_record_event_update *event_update_event__new(size_t size, u64 type, u64 id) 1955 { 1956 struct perf_record_event_update *ev; 1957 1958 size += sizeof(*ev); 1959 size = PERF_ALIGN(size, sizeof(u64)); 1960 1961 ev = zalloc(size); 1962 if (ev) { 1963 ev->header.type = PERF_RECORD_EVENT_UPDATE; 1964 ev->header.size = (u16)size; 1965 ev->type = type; 1966 ev->id = id; 1967 } 1968 return ev; 1969 } 1970 1971 int perf_event__synthesize_event_update_unit(struct perf_tool *tool, struct evsel *evsel, 1972 perf_event__handler_t process) 1973 { 1974 size_t size = strlen(evsel->unit); 1975 struct perf_record_event_update *ev; 1976 int err; 1977 1978 ev = event_update_event__new(size + 1, PERF_EVENT_UPDATE__UNIT, evsel->core.id[0]); 1979 if (ev == NULL) 1980 return -ENOMEM; 1981 1982 strlcpy(ev->unit, evsel->unit, size + 1); 1983 err = process(tool, (union perf_event *)ev, NULL, NULL); 1984 free(ev); 1985 return err; 1986 } 1987 1988 int perf_event__synthesize_event_update_scale(struct perf_tool *tool, struct evsel *evsel, 1989 perf_event__handler_t process) 1990 { 1991 struct perf_record_event_update *ev; 1992 struct perf_record_event_update_scale *ev_data; 1993 int err; 1994 1995 ev = event_update_event__new(sizeof(*ev_data), PERF_EVENT_UPDATE__SCALE, evsel->core.id[0]); 1996 if (ev == NULL) 1997 return -ENOMEM; 1998 1999 ev->scale.scale = evsel->scale; 2000 err = process(tool, (union perf_event *)ev, NULL, NULL); 2001 free(ev); 2002 return err; 2003 } 2004 2005 int perf_event__synthesize_event_update_name(struct perf_tool *tool, struct evsel *evsel, 2006 perf_event__handler_t process) 2007 { 2008 struct perf_record_event_update *ev; 2009 size_t len = strlen(evsel__name(evsel)); 2010 int err; 2011 2012 ev = event_update_event__new(len + 1, PERF_EVENT_UPDATE__NAME, evsel->core.id[0]); 2013 if (ev == NULL) 2014 return -ENOMEM; 2015 2016 strlcpy(ev->name, evsel->name, len + 1); 2017 err = process(tool, (union perf_event *)ev, NULL, NULL); 2018 free(ev); 2019 return err; 2020 } 2021 2022 int perf_event__synthesize_event_update_cpus(struct perf_tool *tool, struct evsel *evsel, 2023 perf_event__handler_t process) 2024 { 2025 struct synthesize_cpu_map_data syn_data = { .map = evsel->core.own_cpus }; 2026 struct perf_record_event_update *ev; 2027 int err; 2028 2029 ev = cpu_map_data__alloc(&syn_data, sizeof(struct perf_event_header) + 2 * sizeof(u64)); 2030 if (!ev) 2031 return -ENOMEM; 2032 2033 syn_data.data = &ev->cpus.cpus; 2034 ev->header.type = PERF_RECORD_EVENT_UPDATE; 2035 ev->header.size = (u16)syn_data.size; 2036 ev->type = PERF_EVENT_UPDATE__CPUS; 2037 ev->id = evsel->core.id[0]; 2038 cpu_map_data__synthesize(&syn_data); 2039 2040 err = process(tool, (union perf_event *)ev, NULL, NULL); 2041 free(ev); 2042 return err; 2043 } 2044 2045 int perf_event__synthesize_attrs(struct perf_tool *tool, struct evlist *evlist, 2046 perf_event__handler_t process) 2047 { 2048 struct evsel *evsel; 2049 int err = 0; 2050 2051 evlist__for_each_entry(evlist, evsel) { 2052 err = perf_event__synthesize_attr(tool, &evsel->core.attr, evsel->core.ids, 2053 evsel->core.id, process); 2054 if (err) { 2055 pr_debug("failed to create perf header attribute\n"); 2056 return err; 2057 } 2058 } 2059 2060 return err; 2061 } 2062 2063 static bool has_unit(struct evsel *evsel) 2064 { 2065 return evsel->unit && *evsel->unit; 2066 } 2067 2068 static bool has_scale(struct evsel *evsel) 2069 { 2070 return evsel->scale != 1; 2071 } 2072 2073 int perf_event__synthesize_extra_attr(struct perf_tool *tool, struct evlist *evsel_list, 2074 perf_event__handler_t process, bool is_pipe) 2075 { 2076 struct evsel *evsel; 2077 int err; 2078 2079 /* 2080 * Synthesize other events stuff not carried within 2081 * attr event - unit, scale, name 2082 */ 2083 evlist__for_each_entry(evsel_list, evsel) { 2084 if (!evsel->supported) 2085 continue; 2086 2087 /* 2088 * Synthesize unit and scale only if it's defined. 2089 */ 2090 if (has_unit(evsel)) { 2091 err = perf_event__synthesize_event_update_unit(tool, evsel, process); 2092 if (err < 0) { 2093 pr_err("Couldn't synthesize evsel unit.\n"); 2094 return err; 2095 } 2096 } 2097 2098 if (has_scale(evsel)) { 2099 err = perf_event__synthesize_event_update_scale(tool, evsel, process); 2100 if (err < 0) { 2101 pr_err("Couldn't synthesize evsel evsel.\n"); 2102 return err; 2103 } 2104 } 2105 2106 if (evsel->core.own_cpus) { 2107 err = perf_event__synthesize_event_update_cpus(tool, evsel, process); 2108 if (err < 0) { 2109 pr_err("Couldn't synthesize evsel cpus.\n"); 2110 return err; 2111 } 2112 } 2113 2114 /* 2115 * Name is needed only for pipe output, 2116 * perf.data carries event names. 2117 */ 2118 if (is_pipe) { 2119 err = perf_event__synthesize_event_update_name(tool, evsel, process); 2120 if (err < 0) { 2121 pr_err("Couldn't synthesize evsel name.\n"); 2122 return err; 2123 } 2124 } 2125 } 2126 return 0; 2127 } 2128 2129 int perf_event__synthesize_attr(struct perf_tool *tool, struct perf_event_attr *attr, 2130 u32 ids, u64 *id, perf_event__handler_t process) 2131 { 2132 union perf_event *ev; 2133 size_t size; 2134 int err; 2135 2136 size = sizeof(struct perf_event_attr); 2137 size = PERF_ALIGN(size, sizeof(u64)); 2138 size += sizeof(struct perf_event_header); 2139 size += ids * sizeof(u64); 2140 2141 ev = zalloc(size); 2142 2143 if (ev == NULL) 2144 return -ENOMEM; 2145 2146 ev->attr.attr = *attr; 2147 memcpy(ev->attr.id, id, ids * sizeof(u64)); 2148 2149 ev->attr.header.type = PERF_RECORD_HEADER_ATTR; 2150 ev->attr.header.size = (u16)size; 2151 2152 if (ev->attr.header.size == size) 2153 err = process(tool, ev, NULL, NULL); 2154 else 2155 err = -E2BIG; 2156 2157 free(ev); 2158 2159 return err; 2160 } 2161 2162 #ifdef HAVE_LIBTRACEEVENT 2163 int perf_event__synthesize_tracing_data(struct perf_tool *tool, int fd, struct evlist *evlist, 2164 perf_event__handler_t process) 2165 { 2166 union perf_event ev; 2167 struct tracing_data *tdata; 2168 ssize_t size = 0, aligned_size = 0, padding; 2169 struct feat_fd ff; 2170 2171 /* 2172 * We are going to store the size of the data followed 2173 * by the data contents. Since the fd descriptor is a pipe, 2174 * we cannot seek back to store the size of the data once 2175 * we know it. Instead we: 2176 * 2177 * - write the tracing data to the temp file 2178 * - get/write the data size to pipe 2179 * - write the tracing data from the temp file 2180 * to the pipe 2181 */ 2182 tdata = tracing_data_get(&evlist->core.entries, fd, true); 2183 if (!tdata) 2184 return -1; 2185 2186 memset(&ev, 0, sizeof(ev)); 2187 2188 ev.tracing_data.header.type = PERF_RECORD_HEADER_TRACING_DATA; 2189 size = tdata->size; 2190 aligned_size = PERF_ALIGN(size, sizeof(u64)); 2191 padding = aligned_size - size; 2192 ev.tracing_data.header.size = sizeof(ev.tracing_data); 2193 ev.tracing_data.size = aligned_size; 2194 2195 process(tool, &ev, NULL, NULL); 2196 2197 /* 2198 * The put function will copy all the tracing data 2199 * stored in temp file to the pipe. 2200 */ 2201 tracing_data_put(tdata); 2202 2203 ff = (struct feat_fd){ .fd = fd }; 2204 if (write_padded(&ff, NULL, 0, padding)) 2205 return -1; 2206 2207 return aligned_size; 2208 } 2209 #endif 2210 2211 int perf_event__synthesize_build_id(struct perf_tool *tool, struct dso *pos, u16 misc, 2212 perf_event__handler_t process, struct machine *machine) 2213 { 2214 union perf_event ev; 2215 size_t len; 2216 2217 if (!pos->hit) 2218 return 0; 2219 2220 memset(&ev, 0, sizeof(ev)); 2221 2222 len = pos->long_name_len + 1; 2223 len = PERF_ALIGN(len, NAME_ALIGN); 2224 ev.build_id.size = min(pos->bid.size, sizeof(pos->bid.data)); 2225 memcpy(&ev.build_id.build_id, pos->bid.data, ev.build_id.size); 2226 ev.build_id.header.type = PERF_RECORD_HEADER_BUILD_ID; 2227 ev.build_id.header.misc = misc | PERF_RECORD_MISC_BUILD_ID_SIZE; 2228 ev.build_id.pid = machine->pid; 2229 ev.build_id.header.size = sizeof(ev.build_id) + len; 2230 memcpy(&ev.build_id.filename, pos->long_name, pos->long_name_len); 2231 2232 return process(tool, &ev, NULL, machine); 2233 } 2234 2235 int perf_event__synthesize_stat_events(struct perf_stat_config *config, struct perf_tool *tool, 2236 struct evlist *evlist, perf_event__handler_t process, bool attrs) 2237 { 2238 int err; 2239 2240 if (attrs) { 2241 err = perf_event__synthesize_attrs(tool, evlist, process); 2242 if (err < 0) { 2243 pr_err("Couldn't synthesize attrs.\n"); 2244 return err; 2245 } 2246 } 2247 2248 err = perf_event__synthesize_extra_attr(tool, evlist, process, attrs); 2249 err = perf_event__synthesize_thread_map2(tool, evlist->core.threads, process, NULL); 2250 if (err < 0) { 2251 pr_err("Couldn't synthesize thread map.\n"); 2252 return err; 2253 } 2254 2255 err = perf_event__synthesize_cpu_map(tool, evlist->core.user_requested_cpus, process, NULL); 2256 if (err < 0) { 2257 pr_err("Couldn't synthesize thread map.\n"); 2258 return err; 2259 } 2260 2261 err = perf_event__synthesize_stat_config(tool, config, process, NULL); 2262 if (err < 0) { 2263 pr_err("Couldn't synthesize config.\n"); 2264 return err; 2265 } 2266 2267 return 0; 2268 } 2269 2270 extern const struct perf_header_feature_ops feat_ops[HEADER_LAST_FEATURE]; 2271 2272 int perf_event__synthesize_features(struct perf_tool *tool, struct perf_session *session, 2273 struct evlist *evlist, perf_event__handler_t process) 2274 { 2275 struct perf_header *header = &session->header; 2276 struct perf_record_header_feature *fe; 2277 struct feat_fd ff; 2278 size_t sz, sz_hdr; 2279 int feat, ret; 2280 2281 sz_hdr = sizeof(fe->header); 2282 sz = sizeof(union perf_event); 2283 /* get a nice alignment */ 2284 sz = PERF_ALIGN(sz, page_size); 2285 2286 memset(&ff, 0, sizeof(ff)); 2287 2288 ff.buf = malloc(sz); 2289 if (!ff.buf) 2290 return -ENOMEM; 2291 2292 ff.size = sz - sz_hdr; 2293 ff.ph = &session->header; 2294 2295 for_each_set_bit(feat, header->adds_features, HEADER_FEAT_BITS) { 2296 if (!feat_ops[feat].synthesize) { 2297 pr_debug("No record header feature for header :%d\n", feat); 2298 continue; 2299 } 2300 2301 ff.offset = sizeof(*fe); 2302 2303 ret = feat_ops[feat].write(&ff, evlist); 2304 if (ret || ff.offset <= (ssize_t)sizeof(*fe)) { 2305 pr_debug("Error writing feature\n"); 2306 continue; 2307 } 2308 /* ff.buf may have changed due to realloc in do_write() */ 2309 fe = ff.buf; 2310 memset(fe, 0, sizeof(*fe)); 2311 2312 fe->feat_id = feat; 2313 fe->header.type = PERF_RECORD_HEADER_FEATURE; 2314 fe->header.size = ff.offset; 2315 2316 ret = process(tool, ff.buf, NULL, NULL); 2317 if (ret) { 2318 free(ff.buf); 2319 return ret; 2320 } 2321 } 2322 2323 /* Send HEADER_LAST_FEATURE mark. */ 2324 fe = ff.buf; 2325 fe->feat_id = HEADER_LAST_FEATURE; 2326 fe->header.type = PERF_RECORD_HEADER_FEATURE; 2327 fe->header.size = sizeof(*fe); 2328 2329 ret = process(tool, ff.buf, NULL, NULL); 2330 2331 free(ff.buf); 2332 return ret; 2333 } 2334 2335 int perf_event__synthesize_for_pipe(struct perf_tool *tool, 2336 struct perf_session *session, 2337 struct perf_data *data, 2338 perf_event__handler_t process) 2339 { 2340 int err; 2341 int ret = 0; 2342 struct evlist *evlist = session->evlist; 2343 2344 /* 2345 * We need to synthesize events first, because some 2346 * features works on top of them (on report side). 2347 */ 2348 err = perf_event__synthesize_attrs(tool, evlist, process); 2349 if (err < 0) { 2350 pr_err("Couldn't synthesize attrs.\n"); 2351 return err; 2352 } 2353 ret += err; 2354 2355 err = perf_event__synthesize_features(tool, session, evlist, process); 2356 if (err < 0) { 2357 pr_err("Couldn't synthesize features.\n"); 2358 return err; 2359 } 2360 ret += err; 2361 2362 #ifdef HAVE_LIBTRACEEVENT 2363 if (have_tracepoints(&evlist->core.entries)) { 2364 int fd = perf_data__fd(data); 2365 2366 /* 2367 * FIXME err <= 0 here actually means that 2368 * there were no tracepoints so its not really 2369 * an error, just that we don't need to 2370 * synthesize anything. We really have to 2371 * return this more properly and also 2372 * propagate errors that now are calling die() 2373 */ 2374 err = perf_event__synthesize_tracing_data(tool, fd, evlist, 2375 process); 2376 if (err <= 0) { 2377 pr_err("Couldn't record tracing data.\n"); 2378 return err; 2379 } 2380 ret += err; 2381 } 2382 #else 2383 (void)data; 2384 #endif 2385 2386 return ret; 2387 } 2388 2389 int parse_synth_opt(char *synth) 2390 { 2391 char *p, *q; 2392 int ret = 0; 2393 2394 if (synth == NULL) 2395 return -1; 2396 2397 for (q = synth; (p = strsep(&q, ",")); p = q) { 2398 if (!strcasecmp(p, "no") || !strcasecmp(p, "none")) 2399 return 0; 2400 2401 if (!strcasecmp(p, "all")) 2402 return PERF_SYNTH_ALL; 2403 2404 if (!strcasecmp(p, "task")) 2405 ret |= PERF_SYNTH_TASK; 2406 else if (!strcasecmp(p, "mmap")) 2407 ret |= PERF_SYNTH_TASK | PERF_SYNTH_MMAP; 2408 else if (!strcasecmp(p, "cgroup")) 2409 ret |= PERF_SYNTH_CGROUP; 2410 else 2411 return -1; 2412 } 2413 2414 return ret; 2415 } 2416