1 // SPDX-License-Identifier: GPL-2.0 2 #include <errno.h> 3 #include <inttypes.h> 4 #include "string2.h" 5 #include <sys/param.h> 6 #include <sys/types.h> 7 #include <byteswap.h> 8 #include <unistd.h> 9 #include <stdio.h> 10 #include <stdlib.h> 11 #include <linux/compiler.h> 12 #include <linux/list.h> 13 #include <linux/kernel.h> 14 #include <linux/bitops.h> 15 #include <linux/string.h> 16 #include <linux/stringify.h> 17 #include <linux/zalloc.h> 18 #include <sys/stat.h> 19 #include <sys/utsname.h> 20 #include <linux/time64.h> 21 #include <dirent.h> 22 #include <bpf/libbpf.h> 23 #include <perf/cpumap.h> 24 25 #include "dso.h" 26 #include "evlist.h" 27 #include "evsel.h" 28 #include "header.h" 29 #include "memswap.h" 30 #include "trace-event.h" 31 #include "session.h" 32 #include "symbol.h" 33 #include "debug.h" 34 #include "cpumap.h" 35 #include "pmu.h" 36 #include "vdso.h" 37 #include "strbuf.h" 38 #include "build-id.h" 39 #include "data.h" 40 #include <api/fs/fs.h> 41 #include "asm/bug.h" 42 #include "tool.h" 43 #include "time-utils.h" 44 #include "units.h" 45 #include "util.h" 46 #include "cputopo.h" 47 #include "bpf-event.h" 48 49 #include <linux/ctype.h> 50 51 /* 52 * magic2 = "PERFILE2" 53 * must be a numerical value to let the endianness 54 * determine the memory layout. That way we are able 55 * to detect endianness when reading the perf.data file 56 * back. 57 * 58 * we check for legacy (PERFFILE) format. 59 */ 60 static const char *__perf_magic1 = "PERFFILE"; 61 static const u64 __perf_magic2 = 0x32454c4946524550ULL; 62 static const u64 __perf_magic2_sw = 0x50455246494c4532ULL; 63 64 #define PERF_MAGIC __perf_magic2 65 66 const char perf_version_string[] = PERF_VERSION; 67 68 struct perf_file_attr { 69 struct perf_event_attr attr; 70 struct perf_file_section ids; 71 }; 72 73 struct feat_fd { 74 struct perf_header *ph; 75 int fd; 76 void *buf; /* Either buf != NULL or fd >= 0 */ 77 ssize_t offset; 78 size_t size; 79 struct evsel *events; 80 }; 81 82 void perf_header__set_feat(struct perf_header *header, int feat) 83 { 84 set_bit(feat, header->adds_features); 85 } 86 87 void perf_header__clear_feat(struct perf_header *header, int feat) 88 { 89 clear_bit(feat, header->adds_features); 90 } 91 92 bool perf_header__has_feat(const struct perf_header *header, int feat) 93 { 94 return test_bit(feat, header->adds_features); 95 } 96 97 static int __do_write_fd(struct feat_fd *ff, const void *buf, size_t size) 98 { 99 ssize_t ret = writen(ff->fd, buf, size); 100 101 if (ret != (ssize_t)size) 102 return ret < 0 ? (int)ret : -1; 103 return 0; 104 } 105 106 static int __do_write_buf(struct feat_fd *ff, const void *buf, size_t size) 107 { 108 /* struct perf_event_header::size is u16 */ 109 const size_t max_size = 0xffff - sizeof(struct perf_event_header); 110 size_t new_size = ff->size; 111 void *addr; 112 113 if (size + ff->offset > max_size) 114 return -E2BIG; 115 116 while (size > (new_size - ff->offset)) 117 new_size <<= 1; 118 new_size = min(max_size, new_size); 119 120 if (ff->size < new_size) { 121 addr = realloc(ff->buf, new_size); 122 if (!addr) 123 return -ENOMEM; 124 ff->buf = addr; 125 ff->size = new_size; 126 } 127 128 memcpy(ff->buf + ff->offset, buf, size); 129 ff->offset += size; 130 131 return 0; 132 } 133 134 /* Return: 0 if succeded, -ERR if failed. */ 135 int do_write(struct feat_fd *ff, const void *buf, size_t size) 136 { 137 if (!ff->buf) 138 return __do_write_fd(ff, buf, size); 139 return __do_write_buf(ff, buf, size); 140 } 141 142 /* Return: 0 if succeded, -ERR if failed. */ 143 static int do_write_bitmap(struct feat_fd *ff, unsigned long *set, u64 size) 144 { 145 u64 *p = (u64 *) set; 146 int i, ret; 147 148 ret = do_write(ff, &size, sizeof(size)); 149 if (ret < 0) 150 return ret; 151 152 for (i = 0; (u64) i < BITS_TO_U64(size); i++) { 153 ret = do_write(ff, p + i, sizeof(*p)); 154 if (ret < 0) 155 return ret; 156 } 157 158 return 0; 159 } 160 161 /* Return: 0 if succeded, -ERR if failed. */ 162 int write_padded(struct feat_fd *ff, const void *bf, 163 size_t count, size_t count_aligned) 164 { 165 static const char zero_buf[NAME_ALIGN]; 166 int err = do_write(ff, bf, count); 167 168 if (!err) 169 err = do_write(ff, zero_buf, count_aligned - count); 170 171 return err; 172 } 173 174 #define string_size(str) \ 175 (PERF_ALIGN((strlen(str) + 1), NAME_ALIGN) + sizeof(u32)) 176 177 /* Return: 0 if succeded, -ERR if failed. */ 178 static int do_write_string(struct feat_fd *ff, const char *str) 179 { 180 u32 len, olen; 181 int ret; 182 183 olen = strlen(str) + 1; 184 len = PERF_ALIGN(olen, NAME_ALIGN); 185 186 /* write len, incl. \0 */ 187 ret = do_write(ff, &len, sizeof(len)); 188 if (ret < 0) 189 return ret; 190 191 return write_padded(ff, str, olen, len); 192 } 193 194 static int __do_read_fd(struct feat_fd *ff, void *addr, ssize_t size) 195 { 196 ssize_t ret = readn(ff->fd, addr, size); 197 198 if (ret != size) 199 return ret < 0 ? (int)ret : -1; 200 return 0; 201 } 202 203 static int __do_read_buf(struct feat_fd *ff, void *addr, ssize_t size) 204 { 205 if (size > (ssize_t)ff->size - ff->offset) 206 return -1; 207 208 memcpy(addr, ff->buf + ff->offset, size); 209 ff->offset += size; 210 211 return 0; 212 213 } 214 215 static int __do_read(struct feat_fd *ff, void *addr, ssize_t size) 216 { 217 if (!ff->buf) 218 return __do_read_fd(ff, addr, size); 219 return __do_read_buf(ff, addr, size); 220 } 221 222 static int do_read_u32(struct feat_fd *ff, u32 *addr) 223 { 224 int ret; 225 226 ret = __do_read(ff, addr, sizeof(*addr)); 227 if (ret) 228 return ret; 229 230 if (ff->ph->needs_swap) 231 *addr = bswap_32(*addr); 232 return 0; 233 } 234 235 static int do_read_u64(struct feat_fd *ff, u64 *addr) 236 { 237 int ret; 238 239 ret = __do_read(ff, addr, sizeof(*addr)); 240 if (ret) 241 return ret; 242 243 if (ff->ph->needs_swap) 244 *addr = bswap_64(*addr); 245 return 0; 246 } 247 248 static char *do_read_string(struct feat_fd *ff) 249 { 250 u32 len; 251 char *buf; 252 253 if (do_read_u32(ff, &len)) 254 return NULL; 255 256 buf = malloc(len); 257 if (!buf) 258 return NULL; 259 260 if (!__do_read(ff, buf, len)) { 261 /* 262 * strings are padded by zeroes 263 * thus the actual strlen of buf 264 * may be less than len 265 */ 266 return buf; 267 } 268 269 free(buf); 270 return NULL; 271 } 272 273 /* Return: 0 if succeded, -ERR if failed. */ 274 static int do_read_bitmap(struct feat_fd *ff, unsigned long **pset, u64 *psize) 275 { 276 unsigned long *set; 277 u64 size, *p; 278 int i, ret; 279 280 ret = do_read_u64(ff, &size); 281 if (ret) 282 return ret; 283 284 set = bitmap_alloc(size); 285 if (!set) 286 return -ENOMEM; 287 288 p = (u64 *) set; 289 290 for (i = 0; (u64) i < BITS_TO_U64(size); i++) { 291 ret = do_read_u64(ff, p + i); 292 if (ret < 0) { 293 free(set); 294 return ret; 295 } 296 } 297 298 *pset = set; 299 *psize = size; 300 return 0; 301 } 302 303 static int write_tracing_data(struct feat_fd *ff, 304 struct evlist *evlist) 305 { 306 if (WARN(ff->buf, "Error: calling %s in pipe-mode.\n", __func__)) 307 return -1; 308 309 return read_tracing_data(ff->fd, &evlist->core.entries); 310 } 311 312 static int write_build_id(struct feat_fd *ff, 313 struct evlist *evlist __maybe_unused) 314 { 315 struct perf_session *session; 316 int err; 317 318 session = container_of(ff->ph, struct perf_session, header); 319 320 if (!perf_session__read_build_ids(session, true)) 321 return -1; 322 323 if (WARN(ff->buf, "Error: calling %s in pipe-mode.\n", __func__)) 324 return -1; 325 326 err = perf_session__write_buildid_table(session, ff); 327 if (err < 0) { 328 pr_debug("failed to write buildid table\n"); 329 return err; 330 } 331 perf_session__cache_build_ids(session); 332 333 return 0; 334 } 335 336 static int write_hostname(struct feat_fd *ff, 337 struct evlist *evlist __maybe_unused) 338 { 339 struct utsname uts; 340 int ret; 341 342 ret = uname(&uts); 343 if (ret < 0) 344 return -1; 345 346 return do_write_string(ff, uts.nodename); 347 } 348 349 static int write_osrelease(struct feat_fd *ff, 350 struct evlist *evlist __maybe_unused) 351 { 352 struct utsname uts; 353 int ret; 354 355 ret = uname(&uts); 356 if (ret < 0) 357 return -1; 358 359 return do_write_string(ff, uts.release); 360 } 361 362 static int write_arch(struct feat_fd *ff, 363 struct evlist *evlist __maybe_unused) 364 { 365 struct utsname uts; 366 int ret; 367 368 ret = uname(&uts); 369 if (ret < 0) 370 return -1; 371 372 return do_write_string(ff, uts.machine); 373 } 374 375 static int write_version(struct feat_fd *ff, 376 struct evlist *evlist __maybe_unused) 377 { 378 return do_write_string(ff, perf_version_string); 379 } 380 381 static int __write_cpudesc(struct feat_fd *ff, const char *cpuinfo_proc) 382 { 383 FILE *file; 384 char *buf = NULL; 385 char *s, *p; 386 const char *search = cpuinfo_proc; 387 size_t len = 0; 388 int ret = -1; 389 390 if (!search) 391 return -1; 392 393 file = fopen("/proc/cpuinfo", "r"); 394 if (!file) 395 return -1; 396 397 while (getline(&buf, &len, file) > 0) { 398 ret = strncmp(buf, search, strlen(search)); 399 if (!ret) 400 break; 401 } 402 403 if (ret) { 404 ret = -1; 405 goto done; 406 } 407 408 s = buf; 409 410 p = strchr(buf, ':'); 411 if (p && *(p+1) == ' ' && *(p+2)) 412 s = p + 2; 413 p = strchr(s, '\n'); 414 if (p) 415 *p = '\0'; 416 417 /* squash extra space characters (branding string) */ 418 p = s; 419 while (*p) { 420 if (isspace(*p)) { 421 char *r = p + 1; 422 char *q = skip_spaces(r); 423 *p = ' '; 424 if (q != (p+1)) 425 while ((*r++ = *q++)); 426 } 427 p++; 428 } 429 ret = do_write_string(ff, s); 430 done: 431 free(buf); 432 fclose(file); 433 return ret; 434 } 435 436 static int write_cpudesc(struct feat_fd *ff, 437 struct evlist *evlist __maybe_unused) 438 { 439 #if defined(__powerpc__) || defined(__hppa__) || defined(__sparc__) 440 #define CPUINFO_PROC { "cpu", } 441 #elif defined(__s390__) 442 #define CPUINFO_PROC { "vendor_id", } 443 #elif defined(__sh__) 444 #define CPUINFO_PROC { "cpu type", } 445 #elif defined(__alpha__) || defined(__mips__) 446 #define CPUINFO_PROC { "cpu model", } 447 #elif defined(__arm__) 448 #define CPUINFO_PROC { "model name", "Processor", } 449 #elif defined(__arc__) 450 #define CPUINFO_PROC { "Processor", } 451 #elif defined(__xtensa__) 452 #define CPUINFO_PROC { "core ID", } 453 #else 454 #define CPUINFO_PROC { "model name", } 455 #endif 456 const char *cpuinfo_procs[] = CPUINFO_PROC; 457 #undef CPUINFO_PROC 458 unsigned int i; 459 460 for (i = 0; i < ARRAY_SIZE(cpuinfo_procs); i++) { 461 int ret; 462 ret = __write_cpudesc(ff, cpuinfo_procs[i]); 463 if (ret >= 0) 464 return ret; 465 } 466 return -1; 467 } 468 469 470 static int write_nrcpus(struct feat_fd *ff, 471 struct evlist *evlist __maybe_unused) 472 { 473 long nr; 474 u32 nrc, nra; 475 int ret; 476 477 nrc = cpu__max_present_cpu(); 478 479 nr = sysconf(_SC_NPROCESSORS_ONLN); 480 if (nr < 0) 481 return -1; 482 483 nra = (u32)(nr & UINT_MAX); 484 485 ret = do_write(ff, &nrc, sizeof(nrc)); 486 if (ret < 0) 487 return ret; 488 489 return do_write(ff, &nra, sizeof(nra)); 490 } 491 492 static int write_event_desc(struct feat_fd *ff, 493 struct evlist *evlist) 494 { 495 struct evsel *evsel; 496 u32 nre, nri, sz; 497 int ret; 498 499 nre = evlist->core.nr_entries; 500 501 /* 502 * write number of events 503 */ 504 ret = do_write(ff, &nre, sizeof(nre)); 505 if (ret < 0) 506 return ret; 507 508 /* 509 * size of perf_event_attr struct 510 */ 511 sz = (u32)sizeof(evsel->core.attr); 512 ret = do_write(ff, &sz, sizeof(sz)); 513 if (ret < 0) 514 return ret; 515 516 evlist__for_each_entry(evlist, evsel) { 517 ret = do_write(ff, &evsel->core.attr, sz); 518 if (ret < 0) 519 return ret; 520 /* 521 * write number of unique id per event 522 * there is one id per instance of an event 523 * 524 * copy into an nri to be independent of the 525 * type of ids, 526 */ 527 nri = evsel->ids; 528 ret = do_write(ff, &nri, sizeof(nri)); 529 if (ret < 0) 530 return ret; 531 532 /* 533 * write event string as passed on cmdline 534 */ 535 ret = do_write_string(ff, perf_evsel__name(evsel)); 536 if (ret < 0) 537 return ret; 538 /* 539 * write unique ids for this event 540 */ 541 ret = do_write(ff, evsel->id, evsel->ids * sizeof(u64)); 542 if (ret < 0) 543 return ret; 544 } 545 return 0; 546 } 547 548 static int write_cmdline(struct feat_fd *ff, 549 struct evlist *evlist __maybe_unused) 550 { 551 char pbuf[MAXPATHLEN], *buf; 552 int i, ret, n; 553 554 /* actual path to perf binary */ 555 buf = perf_exe(pbuf, MAXPATHLEN); 556 557 /* account for binary path */ 558 n = perf_env.nr_cmdline + 1; 559 560 ret = do_write(ff, &n, sizeof(n)); 561 if (ret < 0) 562 return ret; 563 564 ret = do_write_string(ff, buf); 565 if (ret < 0) 566 return ret; 567 568 for (i = 0 ; i < perf_env.nr_cmdline; i++) { 569 ret = do_write_string(ff, perf_env.cmdline_argv[i]); 570 if (ret < 0) 571 return ret; 572 } 573 return 0; 574 } 575 576 577 static int write_cpu_topology(struct feat_fd *ff, 578 struct evlist *evlist __maybe_unused) 579 { 580 struct cpu_topology *tp; 581 u32 i; 582 int ret, j; 583 584 tp = cpu_topology__new(); 585 if (!tp) 586 return -1; 587 588 ret = do_write(ff, &tp->core_sib, sizeof(tp->core_sib)); 589 if (ret < 0) 590 goto done; 591 592 for (i = 0; i < tp->core_sib; i++) { 593 ret = do_write_string(ff, tp->core_siblings[i]); 594 if (ret < 0) 595 goto done; 596 } 597 ret = do_write(ff, &tp->thread_sib, sizeof(tp->thread_sib)); 598 if (ret < 0) 599 goto done; 600 601 for (i = 0; i < tp->thread_sib; i++) { 602 ret = do_write_string(ff, tp->thread_siblings[i]); 603 if (ret < 0) 604 break; 605 } 606 607 ret = perf_env__read_cpu_topology_map(&perf_env); 608 if (ret < 0) 609 goto done; 610 611 for (j = 0; j < perf_env.nr_cpus_avail; j++) { 612 ret = do_write(ff, &perf_env.cpu[j].core_id, 613 sizeof(perf_env.cpu[j].core_id)); 614 if (ret < 0) 615 return ret; 616 ret = do_write(ff, &perf_env.cpu[j].socket_id, 617 sizeof(perf_env.cpu[j].socket_id)); 618 if (ret < 0) 619 return ret; 620 } 621 622 if (!tp->die_sib) 623 goto done; 624 625 ret = do_write(ff, &tp->die_sib, sizeof(tp->die_sib)); 626 if (ret < 0) 627 goto done; 628 629 for (i = 0; i < tp->die_sib; i++) { 630 ret = do_write_string(ff, tp->die_siblings[i]); 631 if (ret < 0) 632 goto done; 633 } 634 635 for (j = 0; j < perf_env.nr_cpus_avail; j++) { 636 ret = do_write(ff, &perf_env.cpu[j].die_id, 637 sizeof(perf_env.cpu[j].die_id)); 638 if (ret < 0) 639 return ret; 640 } 641 642 done: 643 cpu_topology__delete(tp); 644 return ret; 645 } 646 647 648 649 static int write_total_mem(struct feat_fd *ff, 650 struct evlist *evlist __maybe_unused) 651 { 652 char *buf = NULL; 653 FILE *fp; 654 size_t len = 0; 655 int ret = -1, n; 656 uint64_t mem; 657 658 fp = fopen("/proc/meminfo", "r"); 659 if (!fp) 660 return -1; 661 662 while (getline(&buf, &len, fp) > 0) { 663 ret = strncmp(buf, "MemTotal:", 9); 664 if (!ret) 665 break; 666 } 667 if (!ret) { 668 n = sscanf(buf, "%*s %"PRIu64, &mem); 669 if (n == 1) 670 ret = do_write(ff, &mem, sizeof(mem)); 671 } else 672 ret = -1; 673 free(buf); 674 fclose(fp); 675 return ret; 676 } 677 678 static int write_numa_topology(struct feat_fd *ff, 679 struct evlist *evlist __maybe_unused) 680 { 681 struct numa_topology *tp; 682 int ret = -1; 683 u32 i; 684 685 tp = numa_topology__new(); 686 if (!tp) 687 return -ENOMEM; 688 689 ret = do_write(ff, &tp->nr, sizeof(u32)); 690 if (ret < 0) 691 goto err; 692 693 for (i = 0; i < tp->nr; i++) { 694 struct numa_topology_node *n = &tp->nodes[i]; 695 696 ret = do_write(ff, &n->node, sizeof(u32)); 697 if (ret < 0) 698 goto err; 699 700 ret = do_write(ff, &n->mem_total, sizeof(u64)); 701 if (ret) 702 goto err; 703 704 ret = do_write(ff, &n->mem_free, sizeof(u64)); 705 if (ret) 706 goto err; 707 708 ret = do_write_string(ff, n->cpus); 709 if (ret < 0) 710 goto err; 711 } 712 713 ret = 0; 714 715 err: 716 numa_topology__delete(tp); 717 return ret; 718 } 719 720 /* 721 * File format: 722 * 723 * struct pmu_mappings { 724 * u32 pmu_num; 725 * struct pmu_map { 726 * u32 type; 727 * char name[]; 728 * }[pmu_num]; 729 * }; 730 */ 731 732 static int write_pmu_mappings(struct feat_fd *ff, 733 struct evlist *evlist __maybe_unused) 734 { 735 struct perf_pmu *pmu = NULL; 736 u32 pmu_num = 0; 737 int ret; 738 739 /* 740 * Do a first pass to count number of pmu to avoid lseek so this 741 * works in pipe mode as well. 742 */ 743 while ((pmu = perf_pmu__scan(pmu))) { 744 if (!pmu->name) 745 continue; 746 pmu_num++; 747 } 748 749 ret = do_write(ff, &pmu_num, sizeof(pmu_num)); 750 if (ret < 0) 751 return ret; 752 753 while ((pmu = perf_pmu__scan(pmu))) { 754 if (!pmu->name) 755 continue; 756 757 ret = do_write(ff, &pmu->type, sizeof(pmu->type)); 758 if (ret < 0) 759 return ret; 760 761 ret = do_write_string(ff, pmu->name); 762 if (ret < 0) 763 return ret; 764 } 765 766 return 0; 767 } 768 769 /* 770 * File format: 771 * 772 * struct group_descs { 773 * u32 nr_groups; 774 * struct group_desc { 775 * char name[]; 776 * u32 leader_idx; 777 * u32 nr_members; 778 * }[nr_groups]; 779 * }; 780 */ 781 static int write_group_desc(struct feat_fd *ff, 782 struct evlist *evlist) 783 { 784 u32 nr_groups = evlist->nr_groups; 785 struct evsel *evsel; 786 int ret; 787 788 ret = do_write(ff, &nr_groups, sizeof(nr_groups)); 789 if (ret < 0) 790 return ret; 791 792 evlist__for_each_entry(evlist, evsel) { 793 if (perf_evsel__is_group_leader(evsel) && 794 evsel->core.nr_members > 1) { 795 const char *name = evsel->group_name ?: "{anon_group}"; 796 u32 leader_idx = evsel->idx; 797 u32 nr_members = evsel->core.nr_members; 798 799 ret = do_write_string(ff, name); 800 if (ret < 0) 801 return ret; 802 803 ret = do_write(ff, &leader_idx, sizeof(leader_idx)); 804 if (ret < 0) 805 return ret; 806 807 ret = do_write(ff, &nr_members, sizeof(nr_members)); 808 if (ret < 0) 809 return ret; 810 } 811 } 812 return 0; 813 } 814 815 /* 816 * Return the CPU id as a raw string. 817 * 818 * Each architecture should provide a more precise id string that 819 * can be use to match the architecture's "mapfile". 820 */ 821 char * __weak get_cpuid_str(struct perf_pmu *pmu __maybe_unused) 822 { 823 return NULL; 824 } 825 826 /* Return zero when the cpuid from the mapfile.csv matches the 827 * cpuid string generated on this platform. 828 * Otherwise return non-zero. 829 */ 830 int __weak strcmp_cpuid_str(const char *mapcpuid, const char *cpuid) 831 { 832 regex_t re; 833 regmatch_t pmatch[1]; 834 int match; 835 836 if (regcomp(&re, mapcpuid, REG_EXTENDED) != 0) { 837 /* Warn unable to generate match particular string. */ 838 pr_info("Invalid regular expression %s\n", mapcpuid); 839 return 1; 840 } 841 842 match = !regexec(&re, cpuid, 1, pmatch, 0); 843 regfree(&re); 844 if (match) { 845 size_t match_len = (pmatch[0].rm_eo - pmatch[0].rm_so); 846 847 /* Verify the entire string matched. */ 848 if (match_len == strlen(cpuid)) 849 return 0; 850 } 851 return 1; 852 } 853 854 /* 855 * default get_cpuid(): nothing gets recorded 856 * actual implementation must be in arch/$(SRCARCH)/util/header.c 857 */ 858 int __weak get_cpuid(char *buffer __maybe_unused, size_t sz __maybe_unused) 859 { 860 return -1; 861 } 862 863 static int write_cpuid(struct feat_fd *ff, 864 struct evlist *evlist __maybe_unused) 865 { 866 char buffer[64]; 867 int ret; 868 869 ret = get_cpuid(buffer, sizeof(buffer)); 870 if (ret) 871 return -1; 872 873 return do_write_string(ff, buffer); 874 } 875 876 static int write_branch_stack(struct feat_fd *ff __maybe_unused, 877 struct evlist *evlist __maybe_unused) 878 { 879 return 0; 880 } 881 882 static int write_auxtrace(struct feat_fd *ff, 883 struct evlist *evlist __maybe_unused) 884 { 885 struct perf_session *session; 886 int err; 887 888 if (WARN(ff->buf, "Error: calling %s in pipe-mode.\n", __func__)) 889 return -1; 890 891 session = container_of(ff->ph, struct perf_session, header); 892 893 err = auxtrace_index__write(ff->fd, &session->auxtrace_index); 894 if (err < 0) 895 pr_err("Failed to write auxtrace index\n"); 896 return err; 897 } 898 899 static int write_clockid(struct feat_fd *ff, 900 struct evlist *evlist __maybe_unused) 901 { 902 return do_write(ff, &ff->ph->env.clockid_res_ns, 903 sizeof(ff->ph->env.clockid_res_ns)); 904 } 905 906 static int write_dir_format(struct feat_fd *ff, 907 struct evlist *evlist __maybe_unused) 908 { 909 struct perf_session *session; 910 struct perf_data *data; 911 912 session = container_of(ff->ph, struct perf_session, header); 913 data = session->data; 914 915 if (WARN_ON(!perf_data__is_dir(data))) 916 return -1; 917 918 return do_write(ff, &data->dir.version, sizeof(data->dir.version)); 919 } 920 921 #ifdef HAVE_LIBBPF_SUPPORT 922 static int write_bpf_prog_info(struct feat_fd *ff, 923 struct evlist *evlist __maybe_unused) 924 { 925 struct perf_env *env = &ff->ph->env; 926 struct rb_root *root; 927 struct rb_node *next; 928 int ret; 929 930 down_read(&env->bpf_progs.lock); 931 932 ret = do_write(ff, &env->bpf_progs.infos_cnt, 933 sizeof(env->bpf_progs.infos_cnt)); 934 if (ret < 0) 935 goto out; 936 937 root = &env->bpf_progs.infos; 938 next = rb_first(root); 939 while (next) { 940 struct bpf_prog_info_node *node; 941 size_t len; 942 943 node = rb_entry(next, struct bpf_prog_info_node, rb_node); 944 next = rb_next(&node->rb_node); 945 len = sizeof(struct bpf_prog_info_linear) + 946 node->info_linear->data_len; 947 948 /* before writing to file, translate address to offset */ 949 bpf_program__bpil_addr_to_offs(node->info_linear); 950 ret = do_write(ff, node->info_linear, len); 951 /* 952 * translate back to address even when do_write() fails, 953 * so that this function never changes the data. 954 */ 955 bpf_program__bpil_offs_to_addr(node->info_linear); 956 if (ret < 0) 957 goto out; 958 } 959 out: 960 up_read(&env->bpf_progs.lock); 961 return ret; 962 } 963 #else // HAVE_LIBBPF_SUPPORT 964 static int write_bpf_prog_info(struct feat_fd *ff __maybe_unused, 965 struct evlist *evlist __maybe_unused) 966 { 967 return 0; 968 } 969 #endif // HAVE_LIBBPF_SUPPORT 970 971 static int write_bpf_btf(struct feat_fd *ff, 972 struct evlist *evlist __maybe_unused) 973 { 974 struct perf_env *env = &ff->ph->env; 975 struct rb_root *root; 976 struct rb_node *next; 977 int ret; 978 979 down_read(&env->bpf_progs.lock); 980 981 ret = do_write(ff, &env->bpf_progs.btfs_cnt, 982 sizeof(env->bpf_progs.btfs_cnt)); 983 984 if (ret < 0) 985 goto out; 986 987 root = &env->bpf_progs.btfs; 988 next = rb_first(root); 989 while (next) { 990 struct btf_node *node; 991 992 node = rb_entry(next, struct btf_node, rb_node); 993 next = rb_next(&node->rb_node); 994 ret = do_write(ff, &node->id, 995 sizeof(u32) * 2 + node->data_size); 996 if (ret < 0) 997 goto out; 998 } 999 out: 1000 up_read(&env->bpf_progs.lock); 1001 return ret; 1002 } 1003 1004 static int cpu_cache_level__sort(const void *a, const void *b) 1005 { 1006 struct cpu_cache_level *cache_a = (struct cpu_cache_level *)a; 1007 struct cpu_cache_level *cache_b = (struct cpu_cache_level *)b; 1008 1009 return cache_a->level - cache_b->level; 1010 } 1011 1012 static bool cpu_cache_level__cmp(struct cpu_cache_level *a, struct cpu_cache_level *b) 1013 { 1014 if (a->level != b->level) 1015 return false; 1016 1017 if (a->line_size != b->line_size) 1018 return false; 1019 1020 if (a->sets != b->sets) 1021 return false; 1022 1023 if (a->ways != b->ways) 1024 return false; 1025 1026 if (strcmp(a->type, b->type)) 1027 return false; 1028 1029 if (strcmp(a->size, b->size)) 1030 return false; 1031 1032 if (strcmp(a->map, b->map)) 1033 return false; 1034 1035 return true; 1036 } 1037 1038 static int cpu_cache_level__read(struct cpu_cache_level *cache, u32 cpu, u16 level) 1039 { 1040 char path[PATH_MAX], file[PATH_MAX]; 1041 struct stat st; 1042 size_t len; 1043 1044 scnprintf(path, PATH_MAX, "devices/system/cpu/cpu%d/cache/index%d/", cpu, level); 1045 scnprintf(file, PATH_MAX, "%s/%s", sysfs__mountpoint(), path); 1046 1047 if (stat(file, &st)) 1048 return 1; 1049 1050 scnprintf(file, PATH_MAX, "%s/level", path); 1051 if (sysfs__read_int(file, (int *) &cache->level)) 1052 return -1; 1053 1054 scnprintf(file, PATH_MAX, "%s/coherency_line_size", path); 1055 if (sysfs__read_int(file, (int *) &cache->line_size)) 1056 return -1; 1057 1058 scnprintf(file, PATH_MAX, "%s/number_of_sets", path); 1059 if (sysfs__read_int(file, (int *) &cache->sets)) 1060 return -1; 1061 1062 scnprintf(file, PATH_MAX, "%s/ways_of_associativity", path); 1063 if (sysfs__read_int(file, (int *) &cache->ways)) 1064 return -1; 1065 1066 scnprintf(file, PATH_MAX, "%s/type", path); 1067 if (sysfs__read_str(file, &cache->type, &len)) 1068 return -1; 1069 1070 cache->type[len] = 0; 1071 cache->type = strim(cache->type); 1072 1073 scnprintf(file, PATH_MAX, "%s/size", path); 1074 if (sysfs__read_str(file, &cache->size, &len)) { 1075 zfree(&cache->type); 1076 return -1; 1077 } 1078 1079 cache->size[len] = 0; 1080 cache->size = strim(cache->size); 1081 1082 scnprintf(file, PATH_MAX, "%s/shared_cpu_list", path); 1083 if (sysfs__read_str(file, &cache->map, &len)) { 1084 zfree(&cache->map); 1085 zfree(&cache->type); 1086 return -1; 1087 } 1088 1089 cache->map[len] = 0; 1090 cache->map = strim(cache->map); 1091 return 0; 1092 } 1093 1094 static void cpu_cache_level__fprintf(FILE *out, struct cpu_cache_level *c) 1095 { 1096 fprintf(out, "L%d %-15s %8s [%s]\n", c->level, c->type, c->size, c->map); 1097 } 1098 1099 static int build_caches(struct cpu_cache_level caches[], u32 size, u32 *cntp) 1100 { 1101 u32 i, cnt = 0; 1102 long ncpus; 1103 u32 nr, cpu; 1104 u16 level; 1105 1106 ncpus = sysconf(_SC_NPROCESSORS_CONF); 1107 if (ncpus < 0) 1108 return -1; 1109 1110 nr = (u32)(ncpus & UINT_MAX); 1111 1112 for (cpu = 0; cpu < nr; cpu++) { 1113 for (level = 0; level < 10; level++) { 1114 struct cpu_cache_level c; 1115 int err; 1116 1117 err = cpu_cache_level__read(&c, cpu, level); 1118 if (err < 0) 1119 return err; 1120 1121 if (err == 1) 1122 break; 1123 1124 for (i = 0; i < cnt; i++) { 1125 if (cpu_cache_level__cmp(&c, &caches[i])) 1126 break; 1127 } 1128 1129 if (i == cnt) 1130 caches[cnt++] = c; 1131 else 1132 cpu_cache_level__free(&c); 1133 1134 if (WARN_ONCE(cnt == size, "way too many cpu caches..")) 1135 goto out; 1136 } 1137 } 1138 out: 1139 *cntp = cnt; 1140 return 0; 1141 } 1142 1143 #define MAX_CACHE_LVL 4 1144 1145 static int write_cache(struct feat_fd *ff, 1146 struct evlist *evlist __maybe_unused) 1147 { 1148 u32 max_caches = cpu__max_cpu() * MAX_CACHE_LVL; 1149 struct cpu_cache_level caches[max_caches]; 1150 u32 cnt = 0, i, version = 1; 1151 int ret; 1152 1153 ret = build_caches(caches, max_caches, &cnt); 1154 if (ret) 1155 goto out; 1156 1157 qsort(&caches, cnt, sizeof(struct cpu_cache_level), cpu_cache_level__sort); 1158 1159 ret = do_write(ff, &version, sizeof(u32)); 1160 if (ret < 0) 1161 goto out; 1162 1163 ret = do_write(ff, &cnt, sizeof(u32)); 1164 if (ret < 0) 1165 goto out; 1166 1167 for (i = 0; i < cnt; i++) { 1168 struct cpu_cache_level *c = &caches[i]; 1169 1170 #define _W(v) \ 1171 ret = do_write(ff, &c->v, sizeof(u32)); \ 1172 if (ret < 0) \ 1173 goto out; 1174 1175 _W(level) 1176 _W(line_size) 1177 _W(sets) 1178 _W(ways) 1179 #undef _W 1180 1181 #define _W(v) \ 1182 ret = do_write_string(ff, (const char *) c->v); \ 1183 if (ret < 0) \ 1184 goto out; 1185 1186 _W(type) 1187 _W(size) 1188 _W(map) 1189 #undef _W 1190 } 1191 1192 out: 1193 for (i = 0; i < cnt; i++) 1194 cpu_cache_level__free(&caches[i]); 1195 return ret; 1196 } 1197 1198 static int write_stat(struct feat_fd *ff __maybe_unused, 1199 struct evlist *evlist __maybe_unused) 1200 { 1201 return 0; 1202 } 1203 1204 static int write_sample_time(struct feat_fd *ff, 1205 struct evlist *evlist) 1206 { 1207 int ret; 1208 1209 ret = do_write(ff, &evlist->first_sample_time, 1210 sizeof(evlist->first_sample_time)); 1211 if (ret < 0) 1212 return ret; 1213 1214 return do_write(ff, &evlist->last_sample_time, 1215 sizeof(evlist->last_sample_time)); 1216 } 1217 1218 1219 static int memory_node__read(struct memory_node *n, unsigned long idx) 1220 { 1221 unsigned int phys, size = 0; 1222 char path[PATH_MAX]; 1223 struct dirent *ent; 1224 DIR *dir; 1225 1226 #define for_each_memory(mem, dir) \ 1227 while ((ent = readdir(dir))) \ 1228 if (strcmp(ent->d_name, ".") && \ 1229 strcmp(ent->d_name, "..") && \ 1230 sscanf(ent->d_name, "memory%u", &mem) == 1) 1231 1232 scnprintf(path, PATH_MAX, 1233 "%s/devices/system/node/node%lu", 1234 sysfs__mountpoint(), idx); 1235 1236 dir = opendir(path); 1237 if (!dir) { 1238 pr_warning("failed: cant' open memory sysfs data\n"); 1239 return -1; 1240 } 1241 1242 for_each_memory(phys, dir) { 1243 size = max(phys, size); 1244 } 1245 1246 size++; 1247 1248 n->set = bitmap_alloc(size); 1249 if (!n->set) { 1250 closedir(dir); 1251 return -ENOMEM; 1252 } 1253 1254 n->node = idx; 1255 n->size = size; 1256 1257 rewinddir(dir); 1258 1259 for_each_memory(phys, dir) { 1260 set_bit(phys, n->set); 1261 } 1262 1263 closedir(dir); 1264 return 0; 1265 } 1266 1267 static int memory_node__sort(const void *a, const void *b) 1268 { 1269 const struct memory_node *na = a; 1270 const struct memory_node *nb = b; 1271 1272 return na->node - nb->node; 1273 } 1274 1275 static int build_mem_topology(struct memory_node *nodes, u64 size, u64 *cntp) 1276 { 1277 char path[PATH_MAX]; 1278 struct dirent *ent; 1279 DIR *dir; 1280 u64 cnt = 0; 1281 int ret = 0; 1282 1283 scnprintf(path, PATH_MAX, "%s/devices/system/node/", 1284 sysfs__mountpoint()); 1285 1286 dir = opendir(path); 1287 if (!dir) { 1288 pr_debug2("%s: could't read %s, does this arch have topology information?\n", 1289 __func__, path); 1290 return -1; 1291 } 1292 1293 while (!ret && (ent = readdir(dir))) { 1294 unsigned int idx; 1295 int r; 1296 1297 if (!strcmp(ent->d_name, ".") || 1298 !strcmp(ent->d_name, "..")) 1299 continue; 1300 1301 r = sscanf(ent->d_name, "node%u", &idx); 1302 if (r != 1) 1303 continue; 1304 1305 if (WARN_ONCE(cnt >= size, 1306 "failed to write MEM_TOPOLOGY, way too many nodes\n")) 1307 return -1; 1308 1309 ret = memory_node__read(&nodes[cnt++], idx); 1310 } 1311 1312 *cntp = cnt; 1313 closedir(dir); 1314 1315 if (!ret) 1316 qsort(nodes, cnt, sizeof(nodes[0]), memory_node__sort); 1317 1318 return ret; 1319 } 1320 1321 #define MAX_MEMORY_NODES 2000 1322 1323 /* 1324 * The MEM_TOPOLOGY holds physical memory map for every 1325 * node in system. The format of data is as follows: 1326 * 1327 * 0 - version | for future changes 1328 * 8 - block_size_bytes | /sys/devices/system/memory/block_size_bytes 1329 * 16 - count | number of nodes 1330 * 1331 * For each node we store map of physical indexes for 1332 * each node: 1333 * 1334 * 32 - node id | node index 1335 * 40 - size | size of bitmap 1336 * 48 - bitmap | bitmap of memory indexes that belongs to node 1337 */ 1338 static int write_mem_topology(struct feat_fd *ff __maybe_unused, 1339 struct evlist *evlist __maybe_unused) 1340 { 1341 static struct memory_node nodes[MAX_MEMORY_NODES]; 1342 u64 bsize, version = 1, i, nr; 1343 int ret; 1344 1345 ret = sysfs__read_xll("devices/system/memory/block_size_bytes", 1346 (unsigned long long *) &bsize); 1347 if (ret) 1348 return ret; 1349 1350 ret = build_mem_topology(&nodes[0], MAX_MEMORY_NODES, &nr); 1351 if (ret) 1352 return ret; 1353 1354 ret = do_write(ff, &version, sizeof(version)); 1355 if (ret < 0) 1356 goto out; 1357 1358 ret = do_write(ff, &bsize, sizeof(bsize)); 1359 if (ret < 0) 1360 goto out; 1361 1362 ret = do_write(ff, &nr, sizeof(nr)); 1363 if (ret < 0) 1364 goto out; 1365 1366 for (i = 0; i < nr; i++) { 1367 struct memory_node *n = &nodes[i]; 1368 1369 #define _W(v) \ 1370 ret = do_write(ff, &n->v, sizeof(n->v)); \ 1371 if (ret < 0) \ 1372 goto out; 1373 1374 _W(node) 1375 _W(size) 1376 1377 #undef _W 1378 1379 ret = do_write_bitmap(ff, n->set, n->size); 1380 if (ret < 0) 1381 goto out; 1382 } 1383 1384 out: 1385 return ret; 1386 } 1387 1388 static int write_compressed(struct feat_fd *ff __maybe_unused, 1389 struct evlist *evlist __maybe_unused) 1390 { 1391 int ret; 1392 1393 ret = do_write(ff, &(ff->ph->env.comp_ver), sizeof(ff->ph->env.comp_ver)); 1394 if (ret) 1395 return ret; 1396 1397 ret = do_write(ff, &(ff->ph->env.comp_type), sizeof(ff->ph->env.comp_type)); 1398 if (ret) 1399 return ret; 1400 1401 ret = do_write(ff, &(ff->ph->env.comp_level), sizeof(ff->ph->env.comp_level)); 1402 if (ret) 1403 return ret; 1404 1405 ret = do_write(ff, &(ff->ph->env.comp_ratio), sizeof(ff->ph->env.comp_ratio)); 1406 if (ret) 1407 return ret; 1408 1409 return do_write(ff, &(ff->ph->env.comp_mmap_len), sizeof(ff->ph->env.comp_mmap_len)); 1410 } 1411 1412 static void print_hostname(struct feat_fd *ff, FILE *fp) 1413 { 1414 fprintf(fp, "# hostname : %s\n", ff->ph->env.hostname); 1415 } 1416 1417 static void print_osrelease(struct feat_fd *ff, FILE *fp) 1418 { 1419 fprintf(fp, "# os release : %s\n", ff->ph->env.os_release); 1420 } 1421 1422 static void print_arch(struct feat_fd *ff, FILE *fp) 1423 { 1424 fprintf(fp, "# arch : %s\n", ff->ph->env.arch); 1425 } 1426 1427 static void print_cpudesc(struct feat_fd *ff, FILE *fp) 1428 { 1429 fprintf(fp, "# cpudesc : %s\n", ff->ph->env.cpu_desc); 1430 } 1431 1432 static void print_nrcpus(struct feat_fd *ff, FILE *fp) 1433 { 1434 fprintf(fp, "# nrcpus online : %u\n", ff->ph->env.nr_cpus_online); 1435 fprintf(fp, "# nrcpus avail : %u\n", ff->ph->env.nr_cpus_avail); 1436 } 1437 1438 static void print_version(struct feat_fd *ff, FILE *fp) 1439 { 1440 fprintf(fp, "# perf version : %s\n", ff->ph->env.version); 1441 } 1442 1443 static void print_cmdline(struct feat_fd *ff, FILE *fp) 1444 { 1445 int nr, i; 1446 1447 nr = ff->ph->env.nr_cmdline; 1448 1449 fprintf(fp, "# cmdline : "); 1450 1451 for (i = 0; i < nr; i++) { 1452 char *argv_i = strdup(ff->ph->env.cmdline_argv[i]); 1453 if (!argv_i) { 1454 fprintf(fp, "%s ", ff->ph->env.cmdline_argv[i]); 1455 } else { 1456 char *mem = argv_i; 1457 do { 1458 char *quote = strchr(argv_i, '\''); 1459 if (!quote) 1460 break; 1461 *quote++ = '\0'; 1462 fprintf(fp, "%s\\\'", argv_i); 1463 argv_i = quote; 1464 } while (1); 1465 fprintf(fp, "%s ", argv_i); 1466 free(mem); 1467 } 1468 } 1469 fputc('\n', fp); 1470 } 1471 1472 static void print_cpu_topology(struct feat_fd *ff, FILE *fp) 1473 { 1474 struct perf_header *ph = ff->ph; 1475 int cpu_nr = ph->env.nr_cpus_avail; 1476 int nr, i; 1477 char *str; 1478 1479 nr = ph->env.nr_sibling_cores; 1480 str = ph->env.sibling_cores; 1481 1482 for (i = 0; i < nr; i++) { 1483 fprintf(fp, "# sibling sockets : %s\n", str); 1484 str += strlen(str) + 1; 1485 } 1486 1487 if (ph->env.nr_sibling_dies) { 1488 nr = ph->env.nr_sibling_dies; 1489 str = ph->env.sibling_dies; 1490 1491 for (i = 0; i < nr; i++) { 1492 fprintf(fp, "# sibling dies : %s\n", str); 1493 str += strlen(str) + 1; 1494 } 1495 } 1496 1497 nr = ph->env.nr_sibling_threads; 1498 str = ph->env.sibling_threads; 1499 1500 for (i = 0; i < nr; i++) { 1501 fprintf(fp, "# sibling threads : %s\n", str); 1502 str += strlen(str) + 1; 1503 } 1504 1505 if (ph->env.nr_sibling_dies) { 1506 if (ph->env.cpu != NULL) { 1507 for (i = 0; i < cpu_nr; i++) 1508 fprintf(fp, "# CPU %d: Core ID %d, " 1509 "Die ID %d, Socket ID %d\n", 1510 i, ph->env.cpu[i].core_id, 1511 ph->env.cpu[i].die_id, 1512 ph->env.cpu[i].socket_id); 1513 } else 1514 fprintf(fp, "# Core ID, Die ID and Socket ID " 1515 "information is not available\n"); 1516 } else { 1517 if (ph->env.cpu != NULL) { 1518 for (i = 0; i < cpu_nr; i++) 1519 fprintf(fp, "# CPU %d: Core ID %d, " 1520 "Socket ID %d\n", 1521 i, ph->env.cpu[i].core_id, 1522 ph->env.cpu[i].socket_id); 1523 } else 1524 fprintf(fp, "# Core ID and Socket ID " 1525 "information is not available\n"); 1526 } 1527 } 1528 1529 static void print_clockid(struct feat_fd *ff, FILE *fp) 1530 { 1531 fprintf(fp, "# clockid frequency: %"PRIu64" MHz\n", 1532 ff->ph->env.clockid_res_ns * 1000); 1533 } 1534 1535 static void print_dir_format(struct feat_fd *ff, FILE *fp) 1536 { 1537 struct perf_session *session; 1538 struct perf_data *data; 1539 1540 session = container_of(ff->ph, struct perf_session, header); 1541 data = session->data; 1542 1543 fprintf(fp, "# directory data version : %"PRIu64"\n", data->dir.version); 1544 } 1545 1546 static void print_bpf_prog_info(struct feat_fd *ff, FILE *fp) 1547 { 1548 struct perf_env *env = &ff->ph->env; 1549 struct rb_root *root; 1550 struct rb_node *next; 1551 1552 down_read(&env->bpf_progs.lock); 1553 1554 root = &env->bpf_progs.infos; 1555 next = rb_first(root); 1556 1557 while (next) { 1558 struct bpf_prog_info_node *node; 1559 1560 node = rb_entry(next, struct bpf_prog_info_node, rb_node); 1561 next = rb_next(&node->rb_node); 1562 1563 bpf_event__print_bpf_prog_info(&node->info_linear->info, 1564 env, fp); 1565 } 1566 1567 up_read(&env->bpf_progs.lock); 1568 } 1569 1570 static void print_bpf_btf(struct feat_fd *ff, FILE *fp) 1571 { 1572 struct perf_env *env = &ff->ph->env; 1573 struct rb_root *root; 1574 struct rb_node *next; 1575 1576 down_read(&env->bpf_progs.lock); 1577 1578 root = &env->bpf_progs.btfs; 1579 next = rb_first(root); 1580 1581 while (next) { 1582 struct btf_node *node; 1583 1584 node = rb_entry(next, struct btf_node, rb_node); 1585 next = rb_next(&node->rb_node); 1586 fprintf(fp, "# btf info of id %u\n", node->id); 1587 } 1588 1589 up_read(&env->bpf_progs.lock); 1590 } 1591 1592 static void free_event_desc(struct evsel *events) 1593 { 1594 struct evsel *evsel; 1595 1596 if (!events) 1597 return; 1598 1599 for (evsel = events; evsel->core.attr.size; evsel++) { 1600 zfree(&evsel->name); 1601 zfree(&evsel->id); 1602 } 1603 1604 free(events); 1605 } 1606 1607 static struct evsel *read_event_desc(struct feat_fd *ff) 1608 { 1609 struct evsel *evsel, *events = NULL; 1610 u64 *id; 1611 void *buf = NULL; 1612 u32 nre, sz, nr, i, j; 1613 size_t msz; 1614 1615 /* number of events */ 1616 if (do_read_u32(ff, &nre)) 1617 goto error; 1618 1619 if (do_read_u32(ff, &sz)) 1620 goto error; 1621 1622 /* buffer to hold on file attr struct */ 1623 buf = malloc(sz); 1624 if (!buf) 1625 goto error; 1626 1627 /* the last event terminates with evsel->core.attr.size == 0: */ 1628 events = calloc(nre + 1, sizeof(*events)); 1629 if (!events) 1630 goto error; 1631 1632 msz = sizeof(evsel->core.attr); 1633 if (sz < msz) 1634 msz = sz; 1635 1636 for (i = 0, evsel = events; i < nre; evsel++, i++) { 1637 evsel->idx = i; 1638 1639 /* 1640 * must read entire on-file attr struct to 1641 * sync up with layout. 1642 */ 1643 if (__do_read(ff, buf, sz)) 1644 goto error; 1645 1646 if (ff->ph->needs_swap) 1647 perf_event__attr_swap(buf); 1648 1649 memcpy(&evsel->core.attr, buf, msz); 1650 1651 if (do_read_u32(ff, &nr)) 1652 goto error; 1653 1654 if (ff->ph->needs_swap) 1655 evsel->needs_swap = true; 1656 1657 evsel->name = do_read_string(ff); 1658 if (!evsel->name) 1659 goto error; 1660 1661 if (!nr) 1662 continue; 1663 1664 id = calloc(nr, sizeof(*id)); 1665 if (!id) 1666 goto error; 1667 evsel->ids = nr; 1668 evsel->id = id; 1669 1670 for (j = 0 ; j < nr; j++) { 1671 if (do_read_u64(ff, id)) 1672 goto error; 1673 id++; 1674 } 1675 } 1676 out: 1677 free(buf); 1678 return events; 1679 error: 1680 free_event_desc(events); 1681 events = NULL; 1682 goto out; 1683 } 1684 1685 static int __desc_attr__fprintf(FILE *fp, const char *name, const char *val, 1686 void *priv __maybe_unused) 1687 { 1688 return fprintf(fp, ", %s = %s", name, val); 1689 } 1690 1691 static void print_event_desc(struct feat_fd *ff, FILE *fp) 1692 { 1693 struct evsel *evsel, *events; 1694 u32 j; 1695 u64 *id; 1696 1697 if (ff->events) 1698 events = ff->events; 1699 else 1700 events = read_event_desc(ff); 1701 1702 if (!events) { 1703 fprintf(fp, "# event desc: not available or unable to read\n"); 1704 return; 1705 } 1706 1707 for (evsel = events; evsel->core.attr.size; evsel++) { 1708 fprintf(fp, "# event : name = %s, ", evsel->name); 1709 1710 if (evsel->ids) { 1711 fprintf(fp, ", id = {"); 1712 for (j = 0, id = evsel->id; j < evsel->ids; j++, id++) { 1713 if (j) 1714 fputc(',', fp); 1715 fprintf(fp, " %"PRIu64, *id); 1716 } 1717 fprintf(fp, " }"); 1718 } 1719 1720 perf_event_attr__fprintf(fp, &evsel->core.attr, __desc_attr__fprintf, NULL); 1721 1722 fputc('\n', fp); 1723 } 1724 1725 free_event_desc(events); 1726 ff->events = NULL; 1727 } 1728 1729 static void print_total_mem(struct feat_fd *ff, FILE *fp) 1730 { 1731 fprintf(fp, "# total memory : %llu kB\n", ff->ph->env.total_mem); 1732 } 1733 1734 static void print_numa_topology(struct feat_fd *ff, FILE *fp) 1735 { 1736 int i; 1737 struct numa_node *n; 1738 1739 for (i = 0; i < ff->ph->env.nr_numa_nodes; i++) { 1740 n = &ff->ph->env.numa_nodes[i]; 1741 1742 fprintf(fp, "# node%u meminfo : total = %"PRIu64" kB," 1743 " free = %"PRIu64" kB\n", 1744 n->node, n->mem_total, n->mem_free); 1745 1746 fprintf(fp, "# node%u cpu list : ", n->node); 1747 cpu_map__fprintf(n->map, fp); 1748 } 1749 } 1750 1751 static void print_cpuid(struct feat_fd *ff, FILE *fp) 1752 { 1753 fprintf(fp, "# cpuid : %s\n", ff->ph->env.cpuid); 1754 } 1755 1756 static void print_branch_stack(struct feat_fd *ff __maybe_unused, FILE *fp) 1757 { 1758 fprintf(fp, "# contains samples with branch stack\n"); 1759 } 1760 1761 static void print_auxtrace(struct feat_fd *ff __maybe_unused, FILE *fp) 1762 { 1763 fprintf(fp, "# contains AUX area data (e.g. instruction trace)\n"); 1764 } 1765 1766 static void print_stat(struct feat_fd *ff __maybe_unused, FILE *fp) 1767 { 1768 fprintf(fp, "# contains stat data\n"); 1769 } 1770 1771 static void print_cache(struct feat_fd *ff, FILE *fp __maybe_unused) 1772 { 1773 int i; 1774 1775 fprintf(fp, "# CPU cache info:\n"); 1776 for (i = 0; i < ff->ph->env.caches_cnt; i++) { 1777 fprintf(fp, "# "); 1778 cpu_cache_level__fprintf(fp, &ff->ph->env.caches[i]); 1779 } 1780 } 1781 1782 static void print_compressed(struct feat_fd *ff, FILE *fp) 1783 { 1784 fprintf(fp, "# compressed : %s, level = %d, ratio = %d\n", 1785 ff->ph->env.comp_type == PERF_COMP_ZSTD ? "Zstd" : "Unknown", 1786 ff->ph->env.comp_level, ff->ph->env.comp_ratio); 1787 } 1788 1789 static void print_pmu_mappings(struct feat_fd *ff, FILE *fp) 1790 { 1791 const char *delimiter = "# pmu mappings: "; 1792 char *str, *tmp; 1793 u32 pmu_num; 1794 u32 type; 1795 1796 pmu_num = ff->ph->env.nr_pmu_mappings; 1797 if (!pmu_num) { 1798 fprintf(fp, "# pmu mappings: not available\n"); 1799 return; 1800 } 1801 1802 str = ff->ph->env.pmu_mappings; 1803 1804 while (pmu_num) { 1805 type = strtoul(str, &tmp, 0); 1806 if (*tmp != ':') 1807 goto error; 1808 1809 str = tmp + 1; 1810 fprintf(fp, "%s%s = %" PRIu32, delimiter, str, type); 1811 1812 delimiter = ", "; 1813 str += strlen(str) + 1; 1814 pmu_num--; 1815 } 1816 1817 fprintf(fp, "\n"); 1818 1819 if (!pmu_num) 1820 return; 1821 error: 1822 fprintf(fp, "# pmu mappings: unable to read\n"); 1823 } 1824 1825 static void print_group_desc(struct feat_fd *ff, FILE *fp) 1826 { 1827 struct perf_session *session; 1828 struct evsel *evsel; 1829 u32 nr = 0; 1830 1831 session = container_of(ff->ph, struct perf_session, header); 1832 1833 evlist__for_each_entry(session->evlist, evsel) { 1834 if (perf_evsel__is_group_leader(evsel) && 1835 evsel->core.nr_members > 1) { 1836 fprintf(fp, "# group: %s{%s", evsel->group_name ?: "", 1837 perf_evsel__name(evsel)); 1838 1839 nr = evsel->core.nr_members - 1; 1840 } else if (nr) { 1841 fprintf(fp, ",%s", perf_evsel__name(evsel)); 1842 1843 if (--nr == 0) 1844 fprintf(fp, "}\n"); 1845 } 1846 } 1847 } 1848 1849 static void print_sample_time(struct feat_fd *ff, FILE *fp) 1850 { 1851 struct perf_session *session; 1852 char time_buf[32]; 1853 double d; 1854 1855 session = container_of(ff->ph, struct perf_session, header); 1856 1857 timestamp__scnprintf_usec(session->evlist->first_sample_time, 1858 time_buf, sizeof(time_buf)); 1859 fprintf(fp, "# time of first sample : %s\n", time_buf); 1860 1861 timestamp__scnprintf_usec(session->evlist->last_sample_time, 1862 time_buf, sizeof(time_buf)); 1863 fprintf(fp, "# time of last sample : %s\n", time_buf); 1864 1865 d = (double)(session->evlist->last_sample_time - 1866 session->evlist->first_sample_time) / NSEC_PER_MSEC; 1867 1868 fprintf(fp, "# sample duration : %10.3f ms\n", d); 1869 } 1870 1871 static void memory_node__fprintf(struct memory_node *n, 1872 unsigned long long bsize, FILE *fp) 1873 { 1874 char buf_map[100], buf_size[50]; 1875 unsigned long long size; 1876 1877 size = bsize * bitmap_weight(n->set, n->size); 1878 unit_number__scnprintf(buf_size, 50, size); 1879 1880 bitmap_scnprintf(n->set, n->size, buf_map, 100); 1881 fprintf(fp, "# %3" PRIu64 " [%s]: %s\n", n->node, buf_size, buf_map); 1882 } 1883 1884 static void print_mem_topology(struct feat_fd *ff, FILE *fp) 1885 { 1886 struct memory_node *nodes; 1887 int i, nr; 1888 1889 nodes = ff->ph->env.memory_nodes; 1890 nr = ff->ph->env.nr_memory_nodes; 1891 1892 fprintf(fp, "# memory nodes (nr %d, block size 0x%llx):\n", 1893 nr, ff->ph->env.memory_bsize); 1894 1895 for (i = 0; i < nr; i++) { 1896 memory_node__fprintf(&nodes[i], ff->ph->env.memory_bsize, fp); 1897 } 1898 } 1899 1900 static int __event_process_build_id(struct perf_record_header_build_id *bev, 1901 char *filename, 1902 struct perf_session *session) 1903 { 1904 int err = -1; 1905 struct machine *machine; 1906 u16 cpumode; 1907 struct dso *dso; 1908 enum dso_kernel_type dso_type; 1909 1910 machine = perf_session__findnew_machine(session, bev->pid); 1911 if (!machine) 1912 goto out; 1913 1914 cpumode = bev->header.misc & PERF_RECORD_MISC_CPUMODE_MASK; 1915 1916 switch (cpumode) { 1917 case PERF_RECORD_MISC_KERNEL: 1918 dso_type = DSO_TYPE_KERNEL; 1919 break; 1920 case PERF_RECORD_MISC_GUEST_KERNEL: 1921 dso_type = DSO_TYPE_GUEST_KERNEL; 1922 break; 1923 case PERF_RECORD_MISC_USER: 1924 case PERF_RECORD_MISC_GUEST_USER: 1925 dso_type = DSO_TYPE_USER; 1926 break; 1927 default: 1928 goto out; 1929 } 1930 1931 dso = machine__findnew_dso(machine, filename); 1932 if (dso != NULL) { 1933 char sbuild_id[SBUILD_ID_SIZE]; 1934 1935 dso__set_build_id(dso, &bev->build_id); 1936 1937 if (dso_type != DSO_TYPE_USER) { 1938 struct kmod_path m = { .name = NULL, }; 1939 1940 if (!kmod_path__parse_name(&m, filename) && m.kmod) 1941 dso__set_module_info(dso, &m, machine); 1942 else 1943 dso->kernel = dso_type; 1944 1945 free(m.name); 1946 } 1947 1948 build_id__sprintf(dso->build_id, sizeof(dso->build_id), 1949 sbuild_id); 1950 pr_debug("build id event received for %s: %s\n", 1951 dso->long_name, sbuild_id); 1952 dso__put(dso); 1953 } 1954 1955 err = 0; 1956 out: 1957 return err; 1958 } 1959 1960 static int perf_header__read_build_ids_abi_quirk(struct perf_header *header, 1961 int input, u64 offset, u64 size) 1962 { 1963 struct perf_session *session = container_of(header, struct perf_session, header); 1964 struct { 1965 struct perf_event_header header; 1966 u8 build_id[PERF_ALIGN(BUILD_ID_SIZE, sizeof(u64))]; 1967 char filename[0]; 1968 } old_bev; 1969 struct perf_record_header_build_id bev; 1970 char filename[PATH_MAX]; 1971 u64 limit = offset + size; 1972 1973 while (offset < limit) { 1974 ssize_t len; 1975 1976 if (readn(input, &old_bev, sizeof(old_bev)) != sizeof(old_bev)) 1977 return -1; 1978 1979 if (header->needs_swap) 1980 perf_event_header__bswap(&old_bev.header); 1981 1982 len = old_bev.header.size - sizeof(old_bev); 1983 if (readn(input, filename, len) != len) 1984 return -1; 1985 1986 bev.header = old_bev.header; 1987 1988 /* 1989 * As the pid is the missing value, we need to fill 1990 * it properly. The header.misc value give us nice hint. 1991 */ 1992 bev.pid = HOST_KERNEL_ID; 1993 if (bev.header.misc == PERF_RECORD_MISC_GUEST_USER || 1994 bev.header.misc == PERF_RECORD_MISC_GUEST_KERNEL) 1995 bev.pid = DEFAULT_GUEST_KERNEL_ID; 1996 1997 memcpy(bev.build_id, old_bev.build_id, sizeof(bev.build_id)); 1998 __event_process_build_id(&bev, filename, session); 1999 2000 offset += bev.header.size; 2001 } 2002 2003 return 0; 2004 } 2005 2006 static int perf_header__read_build_ids(struct perf_header *header, 2007 int input, u64 offset, u64 size) 2008 { 2009 struct perf_session *session = container_of(header, struct perf_session, header); 2010 struct perf_record_header_build_id bev; 2011 char filename[PATH_MAX]; 2012 u64 limit = offset + size, orig_offset = offset; 2013 int err = -1; 2014 2015 while (offset < limit) { 2016 ssize_t len; 2017 2018 if (readn(input, &bev, sizeof(bev)) != sizeof(bev)) 2019 goto out; 2020 2021 if (header->needs_swap) 2022 perf_event_header__bswap(&bev.header); 2023 2024 len = bev.header.size - sizeof(bev); 2025 if (readn(input, filename, len) != len) 2026 goto out; 2027 /* 2028 * The a1645ce1 changeset: 2029 * 2030 * "perf: 'perf kvm' tool for monitoring guest performance from host" 2031 * 2032 * Added a field to struct perf_record_header_build_id that broke the file 2033 * format. 2034 * 2035 * Since the kernel build-id is the first entry, process the 2036 * table using the old format if the well known 2037 * '[kernel.kallsyms]' string for the kernel build-id has the 2038 * first 4 characters chopped off (where the pid_t sits). 2039 */ 2040 if (memcmp(filename, "nel.kallsyms]", 13) == 0) { 2041 if (lseek(input, orig_offset, SEEK_SET) == (off_t)-1) 2042 return -1; 2043 return perf_header__read_build_ids_abi_quirk(header, input, offset, size); 2044 } 2045 2046 __event_process_build_id(&bev, filename, session); 2047 2048 offset += bev.header.size; 2049 } 2050 err = 0; 2051 out: 2052 return err; 2053 } 2054 2055 /* Macro for features that simply need to read and store a string. */ 2056 #define FEAT_PROCESS_STR_FUN(__feat, __feat_env) \ 2057 static int process_##__feat(struct feat_fd *ff, void *data __maybe_unused) \ 2058 {\ 2059 ff->ph->env.__feat_env = do_read_string(ff); \ 2060 return ff->ph->env.__feat_env ? 0 : -ENOMEM; \ 2061 } 2062 2063 FEAT_PROCESS_STR_FUN(hostname, hostname); 2064 FEAT_PROCESS_STR_FUN(osrelease, os_release); 2065 FEAT_PROCESS_STR_FUN(version, version); 2066 FEAT_PROCESS_STR_FUN(arch, arch); 2067 FEAT_PROCESS_STR_FUN(cpudesc, cpu_desc); 2068 FEAT_PROCESS_STR_FUN(cpuid, cpuid); 2069 2070 static int process_tracing_data(struct feat_fd *ff, void *data) 2071 { 2072 ssize_t ret = trace_report(ff->fd, data, false); 2073 2074 return ret < 0 ? -1 : 0; 2075 } 2076 2077 static int process_build_id(struct feat_fd *ff, void *data __maybe_unused) 2078 { 2079 if (perf_header__read_build_ids(ff->ph, ff->fd, ff->offset, ff->size)) 2080 pr_debug("Failed to read buildids, continuing...\n"); 2081 return 0; 2082 } 2083 2084 static int process_nrcpus(struct feat_fd *ff, void *data __maybe_unused) 2085 { 2086 int ret; 2087 u32 nr_cpus_avail, nr_cpus_online; 2088 2089 ret = do_read_u32(ff, &nr_cpus_avail); 2090 if (ret) 2091 return ret; 2092 2093 ret = do_read_u32(ff, &nr_cpus_online); 2094 if (ret) 2095 return ret; 2096 ff->ph->env.nr_cpus_avail = (int)nr_cpus_avail; 2097 ff->ph->env.nr_cpus_online = (int)nr_cpus_online; 2098 return 0; 2099 } 2100 2101 static int process_total_mem(struct feat_fd *ff, void *data __maybe_unused) 2102 { 2103 u64 total_mem; 2104 int ret; 2105 2106 ret = do_read_u64(ff, &total_mem); 2107 if (ret) 2108 return -1; 2109 ff->ph->env.total_mem = (unsigned long long)total_mem; 2110 return 0; 2111 } 2112 2113 static struct evsel * 2114 perf_evlist__find_by_index(struct evlist *evlist, int idx) 2115 { 2116 struct evsel *evsel; 2117 2118 evlist__for_each_entry(evlist, evsel) { 2119 if (evsel->idx == idx) 2120 return evsel; 2121 } 2122 2123 return NULL; 2124 } 2125 2126 static void 2127 perf_evlist__set_event_name(struct evlist *evlist, 2128 struct evsel *event) 2129 { 2130 struct evsel *evsel; 2131 2132 if (!event->name) 2133 return; 2134 2135 evsel = perf_evlist__find_by_index(evlist, event->idx); 2136 if (!evsel) 2137 return; 2138 2139 if (evsel->name) 2140 return; 2141 2142 evsel->name = strdup(event->name); 2143 } 2144 2145 static int 2146 process_event_desc(struct feat_fd *ff, void *data __maybe_unused) 2147 { 2148 struct perf_session *session; 2149 struct evsel *evsel, *events = read_event_desc(ff); 2150 2151 if (!events) 2152 return 0; 2153 2154 session = container_of(ff->ph, struct perf_session, header); 2155 2156 if (session->data->is_pipe) { 2157 /* Save events for reading later by print_event_desc, 2158 * since they can't be read again in pipe mode. */ 2159 ff->events = events; 2160 } 2161 2162 for (evsel = events; evsel->core.attr.size; evsel++) 2163 perf_evlist__set_event_name(session->evlist, evsel); 2164 2165 if (!session->data->is_pipe) 2166 free_event_desc(events); 2167 2168 return 0; 2169 } 2170 2171 static int process_cmdline(struct feat_fd *ff, void *data __maybe_unused) 2172 { 2173 char *str, *cmdline = NULL, **argv = NULL; 2174 u32 nr, i, len = 0; 2175 2176 if (do_read_u32(ff, &nr)) 2177 return -1; 2178 2179 ff->ph->env.nr_cmdline = nr; 2180 2181 cmdline = zalloc(ff->size + nr + 1); 2182 if (!cmdline) 2183 return -1; 2184 2185 argv = zalloc(sizeof(char *) * (nr + 1)); 2186 if (!argv) 2187 goto error; 2188 2189 for (i = 0; i < nr; i++) { 2190 str = do_read_string(ff); 2191 if (!str) 2192 goto error; 2193 2194 argv[i] = cmdline + len; 2195 memcpy(argv[i], str, strlen(str) + 1); 2196 len += strlen(str) + 1; 2197 free(str); 2198 } 2199 ff->ph->env.cmdline = cmdline; 2200 ff->ph->env.cmdline_argv = (const char **) argv; 2201 return 0; 2202 2203 error: 2204 free(argv); 2205 free(cmdline); 2206 return -1; 2207 } 2208 2209 static int process_cpu_topology(struct feat_fd *ff, void *data __maybe_unused) 2210 { 2211 u32 nr, i; 2212 char *str; 2213 struct strbuf sb; 2214 int cpu_nr = ff->ph->env.nr_cpus_avail; 2215 u64 size = 0; 2216 struct perf_header *ph = ff->ph; 2217 bool do_core_id_test = true; 2218 2219 ph->env.cpu = calloc(cpu_nr, sizeof(*ph->env.cpu)); 2220 if (!ph->env.cpu) 2221 return -1; 2222 2223 if (do_read_u32(ff, &nr)) 2224 goto free_cpu; 2225 2226 ph->env.nr_sibling_cores = nr; 2227 size += sizeof(u32); 2228 if (strbuf_init(&sb, 128) < 0) 2229 goto free_cpu; 2230 2231 for (i = 0; i < nr; i++) { 2232 str = do_read_string(ff); 2233 if (!str) 2234 goto error; 2235 2236 /* include a NULL character at the end */ 2237 if (strbuf_add(&sb, str, strlen(str) + 1) < 0) 2238 goto error; 2239 size += string_size(str); 2240 free(str); 2241 } 2242 ph->env.sibling_cores = strbuf_detach(&sb, NULL); 2243 2244 if (do_read_u32(ff, &nr)) 2245 return -1; 2246 2247 ph->env.nr_sibling_threads = nr; 2248 size += sizeof(u32); 2249 2250 for (i = 0; i < nr; i++) { 2251 str = do_read_string(ff); 2252 if (!str) 2253 goto error; 2254 2255 /* include a NULL character at the end */ 2256 if (strbuf_add(&sb, str, strlen(str) + 1) < 0) 2257 goto error; 2258 size += string_size(str); 2259 free(str); 2260 } 2261 ph->env.sibling_threads = strbuf_detach(&sb, NULL); 2262 2263 /* 2264 * The header may be from old perf, 2265 * which doesn't include core id and socket id information. 2266 */ 2267 if (ff->size <= size) { 2268 zfree(&ph->env.cpu); 2269 return 0; 2270 } 2271 2272 /* On s390 the socket_id number is not related to the numbers of cpus. 2273 * The socket_id number might be higher than the numbers of cpus. 2274 * This depends on the configuration. 2275 * AArch64 is the same. 2276 */ 2277 if (ph->env.arch && (!strncmp(ph->env.arch, "s390", 4) 2278 || !strncmp(ph->env.arch, "aarch64", 7))) 2279 do_core_id_test = false; 2280 2281 for (i = 0; i < (u32)cpu_nr; i++) { 2282 if (do_read_u32(ff, &nr)) 2283 goto free_cpu; 2284 2285 ph->env.cpu[i].core_id = nr; 2286 size += sizeof(u32); 2287 2288 if (do_read_u32(ff, &nr)) 2289 goto free_cpu; 2290 2291 if (do_core_id_test && nr != (u32)-1 && nr > (u32)cpu_nr) { 2292 pr_debug("socket_id number is too big." 2293 "You may need to upgrade the perf tool.\n"); 2294 goto free_cpu; 2295 } 2296 2297 ph->env.cpu[i].socket_id = nr; 2298 size += sizeof(u32); 2299 } 2300 2301 /* 2302 * The header may be from old perf, 2303 * which doesn't include die information. 2304 */ 2305 if (ff->size <= size) 2306 return 0; 2307 2308 if (do_read_u32(ff, &nr)) 2309 return -1; 2310 2311 ph->env.nr_sibling_dies = nr; 2312 size += sizeof(u32); 2313 2314 for (i = 0; i < nr; i++) { 2315 str = do_read_string(ff); 2316 if (!str) 2317 goto error; 2318 2319 /* include a NULL character at the end */ 2320 if (strbuf_add(&sb, str, strlen(str) + 1) < 0) 2321 goto error; 2322 size += string_size(str); 2323 free(str); 2324 } 2325 ph->env.sibling_dies = strbuf_detach(&sb, NULL); 2326 2327 for (i = 0; i < (u32)cpu_nr; i++) { 2328 if (do_read_u32(ff, &nr)) 2329 goto free_cpu; 2330 2331 ph->env.cpu[i].die_id = nr; 2332 } 2333 2334 return 0; 2335 2336 error: 2337 strbuf_release(&sb); 2338 free_cpu: 2339 zfree(&ph->env.cpu); 2340 return -1; 2341 } 2342 2343 static int process_numa_topology(struct feat_fd *ff, void *data __maybe_unused) 2344 { 2345 struct numa_node *nodes, *n; 2346 u32 nr, i; 2347 char *str; 2348 2349 /* nr nodes */ 2350 if (do_read_u32(ff, &nr)) 2351 return -1; 2352 2353 nodes = zalloc(sizeof(*nodes) * nr); 2354 if (!nodes) 2355 return -ENOMEM; 2356 2357 for (i = 0; i < nr; i++) { 2358 n = &nodes[i]; 2359 2360 /* node number */ 2361 if (do_read_u32(ff, &n->node)) 2362 goto error; 2363 2364 if (do_read_u64(ff, &n->mem_total)) 2365 goto error; 2366 2367 if (do_read_u64(ff, &n->mem_free)) 2368 goto error; 2369 2370 str = do_read_string(ff); 2371 if (!str) 2372 goto error; 2373 2374 n->map = perf_cpu_map__new(str); 2375 if (!n->map) 2376 goto error; 2377 2378 free(str); 2379 } 2380 ff->ph->env.nr_numa_nodes = nr; 2381 ff->ph->env.numa_nodes = nodes; 2382 return 0; 2383 2384 error: 2385 free(nodes); 2386 return -1; 2387 } 2388 2389 static int process_pmu_mappings(struct feat_fd *ff, void *data __maybe_unused) 2390 { 2391 char *name; 2392 u32 pmu_num; 2393 u32 type; 2394 struct strbuf sb; 2395 2396 if (do_read_u32(ff, &pmu_num)) 2397 return -1; 2398 2399 if (!pmu_num) { 2400 pr_debug("pmu mappings not available\n"); 2401 return 0; 2402 } 2403 2404 ff->ph->env.nr_pmu_mappings = pmu_num; 2405 if (strbuf_init(&sb, 128) < 0) 2406 return -1; 2407 2408 while (pmu_num) { 2409 if (do_read_u32(ff, &type)) 2410 goto error; 2411 2412 name = do_read_string(ff); 2413 if (!name) 2414 goto error; 2415 2416 if (strbuf_addf(&sb, "%u:%s", type, name) < 0) 2417 goto error; 2418 /* include a NULL character at the end */ 2419 if (strbuf_add(&sb, "", 1) < 0) 2420 goto error; 2421 2422 if (!strcmp(name, "msr")) 2423 ff->ph->env.msr_pmu_type = type; 2424 2425 free(name); 2426 pmu_num--; 2427 } 2428 ff->ph->env.pmu_mappings = strbuf_detach(&sb, NULL); 2429 return 0; 2430 2431 error: 2432 strbuf_release(&sb); 2433 return -1; 2434 } 2435 2436 static int process_group_desc(struct feat_fd *ff, void *data __maybe_unused) 2437 { 2438 size_t ret = -1; 2439 u32 i, nr, nr_groups; 2440 struct perf_session *session; 2441 struct evsel *evsel, *leader = NULL; 2442 struct group_desc { 2443 char *name; 2444 u32 leader_idx; 2445 u32 nr_members; 2446 } *desc; 2447 2448 if (do_read_u32(ff, &nr_groups)) 2449 return -1; 2450 2451 ff->ph->env.nr_groups = nr_groups; 2452 if (!nr_groups) { 2453 pr_debug("group desc not available\n"); 2454 return 0; 2455 } 2456 2457 desc = calloc(nr_groups, sizeof(*desc)); 2458 if (!desc) 2459 return -1; 2460 2461 for (i = 0; i < nr_groups; i++) { 2462 desc[i].name = do_read_string(ff); 2463 if (!desc[i].name) 2464 goto out_free; 2465 2466 if (do_read_u32(ff, &desc[i].leader_idx)) 2467 goto out_free; 2468 2469 if (do_read_u32(ff, &desc[i].nr_members)) 2470 goto out_free; 2471 } 2472 2473 /* 2474 * Rebuild group relationship based on the group_desc 2475 */ 2476 session = container_of(ff->ph, struct perf_session, header); 2477 session->evlist->nr_groups = nr_groups; 2478 2479 i = nr = 0; 2480 evlist__for_each_entry(session->evlist, evsel) { 2481 if (evsel->idx == (int) desc[i].leader_idx) { 2482 evsel->leader = evsel; 2483 /* {anon_group} is a dummy name */ 2484 if (strcmp(desc[i].name, "{anon_group}")) { 2485 evsel->group_name = desc[i].name; 2486 desc[i].name = NULL; 2487 } 2488 evsel->core.nr_members = desc[i].nr_members; 2489 2490 if (i >= nr_groups || nr > 0) { 2491 pr_debug("invalid group desc\n"); 2492 goto out_free; 2493 } 2494 2495 leader = evsel; 2496 nr = evsel->core.nr_members - 1; 2497 i++; 2498 } else if (nr) { 2499 /* This is a group member */ 2500 evsel->leader = leader; 2501 2502 nr--; 2503 } 2504 } 2505 2506 if (i != nr_groups || nr != 0) { 2507 pr_debug("invalid group desc\n"); 2508 goto out_free; 2509 } 2510 2511 ret = 0; 2512 out_free: 2513 for (i = 0; i < nr_groups; i++) 2514 zfree(&desc[i].name); 2515 free(desc); 2516 2517 return ret; 2518 } 2519 2520 static int process_auxtrace(struct feat_fd *ff, void *data __maybe_unused) 2521 { 2522 struct perf_session *session; 2523 int err; 2524 2525 session = container_of(ff->ph, struct perf_session, header); 2526 2527 err = auxtrace_index__process(ff->fd, ff->size, session, 2528 ff->ph->needs_swap); 2529 if (err < 0) 2530 pr_err("Failed to process auxtrace index\n"); 2531 return err; 2532 } 2533 2534 static int process_cache(struct feat_fd *ff, void *data __maybe_unused) 2535 { 2536 struct cpu_cache_level *caches; 2537 u32 cnt, i, version; 2538 2539 if (do_read_u32(ff, &version)) 2540 return -1; 2541 2542 if (version != 1) 2543 return -1; 2544 2545 if (do_read_u32(ff, &cnt)) 2546 return -1; 2547 2548 caches = zalloc(sizeof(*caches) * cnt); 2549 if (!caches) 2550 return -1; 2551 2552 for (i = 0; i < cnt; i++) { 2553 struct cpu_cache_level c; 2554 2555 #define _R(v) \ 2556 if (do_read_u32(ff, &c.v))\ 2557 goto out_free_caches; \ 2558 2559 _R(level) 2560 _R(line_size) 2561 _R(sets) 2562 _R(ways) 2563 #undef _R 2564 2565 #define _R(v) \ 2566 c.v = do_read_string(ff); \ 2567 if (!c.v) \ 2568 goto out_free_caches; 2569 2570 _R(type) 2571 _R(size) 2572 _R(map) 2573 #undef _R 2574 2575 caches[i] = c; 2576 } 2577 2578 ff->ph->env.caches = caches; 2579 ff->ph->env.caches_cnt = cnt; 2580 return 0; 2581 out_free_caches: 2582 free(caches); 2583 return -1; 2584 } 2585 2586 static int process_sample_time(struct feat_fd *ff, void *data __maybe_unused) 2587 { 2588 struct perf_session *session; 2589 u64 first_sample_time, last_sample_time; 2590 int ret; 2591 2592 session = container_of(ff->ph, struct perf_session, header); 2593 2594 ret = do_read_u64(ff, &first_sample_time); 2595 if (ret) 2596 return -1; 2597 2598 ret = do_read_u64(ff, &last_sample_time); 2599 if (ret) 2600 return -1; 2601 2602 session->evlist->first_sample_time = first_sample_time; 2603 session->evlist->last_sample_time = last_sample_time; 2604 return 0; 2605 } 2606 2607 static int process_mem_topology(struct feat_fd *ff, 2608 void *data __maybe_unused) 2609 { 2610 struct memory_node *nodes; 2611 u64 version, i, nr, bsize; 2612 int ret = -1; 2613 2614 if (do_read_u64(ff, &version)) 2615 return -1; 2616 2617 if (version != 1) 2618 return -1; 2619 2620 if (do_read_u64(ff, &bsize)) 2621 return -1; 2622 2623 if (do_read_u64(ff, &nr)) 2624 return -1; 2625 2626 nodes = zalloc(sizeof(*nodes) * nr); 2627 if (!nodes) 2628 return -1; 2629 2630 for (i = 0; i < nr; i++) { 2631 struct memory_node n; 2632 2633 #define _R(v) \ 2634 if (do_read_u64(ff, &n.v)) \ 2635 goto out; \ 2636 2637 _R(node) 2638 _R(size) 2639 2640 #undef _R 2641 2642 if (do_read_bitmap(ff, &n.set, &n.size)) 2643 goto out; 2644 2645 nodes[i] = n; 2646 } 2647 2648 ff->ph->env.memory_bsize = bsize; 2649 ff->ph->env.memory_nodes = nodes; 2650 ff->ph->env.nr_memory_nodes = nr; 2651 ret = 0; 2652 2653 out: 2654 if (ret) 2655 free(nodes); 2656 return ret; 2657 } 2658 2659 static int process_clockid(struct feat_fd *ff, 2660 void *data __maybe_unused) 2661 { 2662 if (do_read_u64(ff, &ff->ph->env.clockid_res_ns)) 2663 return -1; 2664 2665 return 0; 2666 } 2667 2668 static int process_dir_format(struct feat_fd *ff, 2669 void *_data __maybe_unused) 2670 { 2671 struct perf_session *session; 2672 struct perf_data *data; 2673 2674 session = container_of(ff->ph, struct perf_session, header); 2675 data = session->data; 2676 2677 if (WARN_ON(!perf_data__is_dir(data))) 2678 return -1; 2679 2680 return do_read_u64(ff, &data->dir.version); 2681 } 2682 2683 #ifdef HAVE_LIBBPF_SUPPORT 2684 static int process_bpf_prog_info(struct feat_fd *ff, void *data __maybe_unused) 2685 { 2686 struct bpf_prog_info_linear *info_linear; 2687 struct bpf_prog_info_node *info_node; 2688 struct perf_env *env = &ff->ph->env; 2689 u32 count, i; 2690 int err = -1; 2691 2692 if (ff->ph->needs_swap) { 2693 pr_warning("interpreting bpf_prog_info from systems with endianity is not yet supported\n"); 2694 return 0; 2695 } 2696 2697 if (do_read_u32(ff, &count)) 2698 return -1; 2699 2700 down_write(&env->bpf_progs.lock); 2701 2702 for (i = 0; i < count; ++i) { 2703 u32 info_len, data_len; 2704 2705 info_linear = NULL; 2706 info_node = NULL; 2707 if (do_read_u32(ff, &info_len)) 2708 goto out; 2709 if (do_read_u32(ff, &data_len)) 2710 goto out; 2711 2712 if (info_len > sizeof(struct bpf_prog_info)) { 2713 pr_warning("detected invalid bpf_prog_info\n"); 2714 goto out; 2715 } 2716 2717 info_linear = malloc(sizeof(struct bpf_prog_info_linear) + 2718 data_len); 2719 if (!info_linear) 2720 goto out; 2721 info_linear->info_len = sizeof(struct bpf_prog_info); 2722 info_linear->data_len = data_len; 2723 if (do_read_u64(ff, (u64 *)(&info_linear->arrays))) 2724 goto out; 2725 if (__do_read(ff, &info_linear->info, info_len)) 2726 goto out; 2727 if (info_len < sizeof(struct bpf_prog_info)) 2728 memset(((void *)(&info_linear->info)) + info_len, 0, 2729 sizeof(struct bpf_prog_info) - info_len); 2730 2731 if (__do_read(ff, info_linear->data, data_len)) 2732 goto out; 2733 2734 info_node = malloc(sizeof(struct bpf_prog_info_node)); 2735 if (!info_node) 2736 goto out; 2737 2738 /* after reading from file, translate offset to address */ 2739 bpf_program__bpil_offs_to_addr(info_linear); 2740 info_node->info_linear = info_linear; 2741 perf_env__insert_bpf_prog_info(env, info_node); 2742 } 2743 2744 up_write(&env->bpf_progs.lock); 2745 return 0; 2746 out: 2747 free(info_linear); 2748 free(info_node); 2749 up_write(&env->bpf_progs.lock); 2750 return err; 2751 } 2752 #else // HAVE_LIBBPF_SUPPORT 2753 static int process_bpf_prog_info(struct feat_fd *ff __maybe_unused, void *data __maybe_unused) 2754 { 2755 return 0; 2756 } 2757 #endif // HAVE_LIBBPF_SUPPORT 2758 2759 static int process_bpf_btf(struct feat_fd *ff, void *data __maybe_unused) 2760 { 2761 struct perf_env *env = &ff->ph->env; 2762 struct btf_node *node = NULL; 2763 u32 count, i; 2764 int err = -1; 2765 2766 if (ff->ph->needs_swap) { 2767 pr_warning("interpreting btf from systems with endianity is not yet supported\n"); 2768 return 0; 2769 } 2770 2771 if (do_read_u32(ff, &count)) 2772 return -1; 2773 2774 down_write(&env->bpf_progs.lock); 2775 2776 for (i = 0; i < count; ++i) { 2777 u32 id, data_size; 2778 2779 if (do_read_u32(ff, &id)) 2780 goto out; 2781 if (do_read_u32(ff, &data_size)) 2782 goto out; 2783 2784 node = malloc(sizeof(struct btf_node) + data_size); 2785 if (!node) 2786 goto out; 2787 2788 node->id = id; 2789 node->data_size = data_size; 2790 2791 if (__do_read(ff, node->data, data_size)) 2792 goto out; 2793 2794 perf_env__insert_btf(env, node); 2795 node = NULL; 2796 } 2797 2798 err = 0; 2799 out: 2800 up_write(&env->bpf_progs.lock); 2801 free(node); 2802 return err; 2803 } 2804 2805 static int process_compressed(struct feat_fd *ff, 2806 void *data __maybe_unused) 2807 { 2808 if (do_read_u32(ff, &(ff->ph->env.comp_ver))) 2809 return -1; 2810 2811 if (do_read_u32(ff, &(ff->ph->env.comp_type))) 2812 return -1; 2813 2814 if (do_read_u32(ff, &(ff->ph->env.comp_level))) 2815 return -1; 2816 2817 if (do_read_u32(ff, &(ff->ph->env.comp_ratio))) 2818 return -1; 2819 2820 if (do_read_u32(ff, &(ff->ph->env.comp_mmap_len))) 2821 return -1; 2822 2823 return 0; 2824 } 2825 2826 struct feature_ops { 2827 int (*write)(struct feat_fd *ff, struct evlist *evlist); 2828 void (*print)(struct feat_fd *ff, FILE *fp); 2829 int (*process)(struct feat_fd *ff, void *data); 2830 const char *name; 2831 bool full_only; 2832 bool synthesize; 2833 }; 2834 2835 #define FEAT_OPR(n, func, __full_only) \ 2836 [HEADER_##n] = { \ 2837 .name = __stringify(n), \ 2838 .write = write_##func, \ 2839 .print = print_##func, \ 2840 .full_only = __full_only, \ 2841 .process = process_##func, \ 2842 .synthesize = true \ 2843 } 2844 2845 #define FEAT_OPN(n, func, __full_only) \ 2846 [HEADER_##n] = { \ 2847 .name = __stringify(n), \ 2848 .write = write_##func, \ 2849 .print = print_##func, \ 2850 .full_only = __full_only, \ 2851 .process = process_##func \ 2852 } 2853 2854 /* feature_ops not implemented: */ 2855 #define print_tracing_data NULL 2856 #define print_build_id NULL 2857 2858 #define process_branch_stack NULL 2859 #define process_stat NULL 2860 2861 2862 static const struct feature_ops feat_ops[HEADER_LAST_FEATURE] = { 2863 FEAT_OPN(TRACING_DATA, tracing_data, false), 2864 FEAT_OPN(BUILD_ID, build_id, false), 2865 FEAT_OPR(HOSTNAME, hostname, false), 2866 FEAT_OPR(OSRELEASE, osrelease, false), 2867 FEAT_OPR(VERSION, version, false), 2868 FEAT_OPR(ARCH, arch, false), 2869 FEAT_OPR(NRCPUS, nrcpus, false), 2870 FEAT_OPR(CPUDESC, cpudesc, false), 2871 FEAT_OPR(CPUID, cpuid, false), 2872 FEAT_OPR(TOTAL_MEM, total_mem, false), 2873 FEAT_OPR(EVENT_DESC, event_desc, false), 2874 FEAT_OPR(CMDLINE, cmdline, false), 2875 FEAT_OPR(CPU_TOPOLOGY, cpu_topology, true), 2876 FEAT_OPR(NUMA_TOPOLOGY, numa_topology, true), 2877 FEAT_OPN(BRANCH_STACK, branch_stack, false), 2878 FEAT_OPR(PMU_MAPPINGS, pmu_mappings, false), 2879 FEAT_OPR(GROUP_DESC, group_desc, false), 2880 FEAT_OPN(AUXTRACE, auxtrace, false), 2881 FEAT_OPN(STAT, stat, false), 2882 FEAT_OPN(CACHE, cache, true), 2883 FEAT_OPR(SAMPLE_TIME, sample_time, false), 2884 FEAT_OPR(MEM_TOPOLOGY, mem_topology, true), 2885 FEAT_OPR(CLOCKID, clockid, false), 2886 FEAT_OPN(DIR_FORMAT, dir_format, false), 2887 FEAT_OPR(BPF_PROG_INFO, bpf_prog_info, false), 2888 FEAT_OPR(BPF_BTF, bpf_btf, false), 2889 FEAT_OPR(COMPRESSED, compressed, false), 2890 }; 2891 2892 struct header_print_data { 2893 FILE *fp; 2894 bool full; /* extended list of headers */ 2895 }; 2896 2897 static int perf_file_section__fprintf_info(struct perf_file_section *section, 2898 struct perf_header *ph, 2899 int feat, int fd, void *data) 2900 { 2901 struct header_print_data *hd = data; 2902 struct feat_fd ff; 2903 2904 if (lseek(fd, section->offset, SEEK_SET) == (off_t)-1) { 2905 pr_debug("Failed to lseek to %" PRIu64 " offset for feature " 2906 "%d, continuing...\n", section->offset, feat); 2907 return 0; 2908 } 2909 if (feat >= HEADER_LAST_FEATURE) { 2910 pr_warning("unknown feature %d\n", feat); 2911 return 0; 2912 } 2913 if (!feat_ops[feat].print) 2914 return 0; 2915 2916 ff = (struct feat_fd) { 2917 .fd = fd, 2918 .ph = ph, 2919 }; 2920 2921 if (!feat_ops[feat].full_only || hd->full) 2922 feat_ops[feat].print(&ff, hd->fp); 2923 else 2924 fprintf(hd->fp, "# %s info available, use -I to display\n", 2925 feat_ops[feat].name); 2926 2927 return 0; 2928 } 2929 2930 int perf_header__fprintf_info(struct perf_session *session, FILE *fp, bool full) 2931 { 2932 struct header_print_data hd; 2933 struct perf_header *header = &session->header; 2934 int fd = perf_data__fd(session->data); 2935 struct stat st; 2936 time_t stctime; 2937 int ret, bit; 2938 2939 hd.fp = fp; 2940 hd.full = full; 2941 2942 ret = fstat(fd, &st); 2943 if (ret == -1) 2944 return -1; 2945 2946 stctime = st.st_ctime; 2947 fprintf(fp, "# captured on : %s", ctime(&stctime)); 2948 2949 fprintf(fp, "# header version : %u\n", header->version); 2950 fprintf(fp, "# data offset : %" PRIu64 "\n", header->data_offset); 2951 fprintf(fp, "# data size : %" PRIu64 "\n", header->data_size); 2952 fprintf(fp, "# feat offset : %" PRIu64 "\n", header->feat_offset); 2953 2954 perf_header__process_sections(header, fd, &hd, 2955 perf_file_section__fprintf_info); 2956 2957 if (session->data->is_pipe) 2958 return 0; 2959 2960 fprintf(fp, "# missing features: "); 2961 for_each_clear_bit(bit, header->adds_features, HEADER_LAST_FEATURE) { 2962 if (bit) 2963 fprintf(fp, "%s ", feat_ops[bit].name); 2964 } 2965 2966 fprintf(fp, "\n"); 2967 return 0; 2968 } 2969 2970 static int do_write_feat(struct feat_fd *ff, int type, 2971 struct perf_file_section **p, 2972 struct evlist *evlist) 2973 { 2974 int err; 2975 int ret = 0; 2976 2977 if (perf_header__has_feat(ff->ph, type)) { 2978 if (!feat_ops[type].write) 2979 return -1; 2980 2981 if (WARN(ff->buf, "Error: calling %s in pipe-mode.\n", __func__)) 2982 return -1; 2983 2984 (*p)->offset = lseek(ff->fd, 0, SEEK_CUR); 2985 2986 err = feat_ops[type].write(ff, evlist); 2987 if (err < 0) { 2988 pr_debug("failed to write feature %s\n", feat_ops[type].name); 2989 2990 /* undo anything written */ 2991 lseek(ff->fd, (*p)->offset, SEEK_SET); 2992 2993 return -1; 2994 } 2995 (*p)->size = lseek(ff->fd, 0, SEEK_CUR) - (*p)->offset; 2996 (*p)++; 2997 } 2998 return ret; 2999 } 3000 3001 static int perf_header__adds_write(struct perf_header *header, 3002 struct evlist *evlist, int fd) 3003 { 3004 int nr_sections; 3005 struct feat_fd ff; 3006 struct perf_file_section *feat_sec, *p; 3007 int sec_size; 3008 u64 sec_start; 3009 int feat; 3010 int err; 3011 3012 ff = (struct feat_fd){ 3013 .fd = fd, 3014 .ph = header, 3015 }; 3016 3017 nr_sections = bitmap_weight(header->adds_features, HEADER_FEAT_BITS); 3018 if (!nr_sections) 3019 return 0; 3020 3021 feat_sec = p = calloc(nr_sections, sizeof(*feat_sec)); 3022 if (feat_sec == NULL) 3023 return -ENOMEM; 3024 3025 sec_size = sizeof(*feat_sec) * nr_sections; 3026 3027 sec_start = header->feat_offset; 3028 lseek(fd, sec_start + sec_size, SEEK_SET); 3029 3030 for_each_set_bit(feat, header->adds_features, HEADER_FEAT_BITS) { 3031 if (do_write_feat(&ff, feat, &p, evlist)) 3032 perf_header__clear_feat(header, feat); 3033 } 3034 3035 lseek(fd, sec_start, SEEK_SET); 3036 /* 3037 * may write more than needed due to dropped feature, but 3038 * this is okay, reader will skip the missing entries 3039 */ 3040 err = do_write(&ff, feat_sec, sec_size); 3041 if (err < 0) 3042 pr_debug("failed to write feature section\n"); 3043 free(feat_sec); 3044 return err; 3045 } 3046 3047 int perf_header__write_pipe(int fd) 3048 { 3049 struct perf_pipe_file_header f_header; 3050 struct feat_fd ff; 3051 int err; 3052 3053 ff = (struct feat_fd){ .fd = fd }; 3054 3055 f_header = (struct perf_pipe_file_header){ 3056 .magic = PERF_MAGIC, 3057 .size = sizeof(f_header), 3058 }; 3059 3060 err = do_write(&ff, &f_header, sizeof(f_header)); 3061 if (err < 0) { 3062 pr_debug("failed to write perf pipe header\n"); 3063 return err; 3064 } 3065 3066 return 0; 3067 } 3068 3069 int perf_session__write_header(struct perf_session *session, 3070 struct evlist *evlist, 3071 int fd, bool at_exit) 3072 { 3073 struct perf_file_header f_header; 3074 struct perf_file_attr f_attr; 3075 struct perf_header *header = &session->header; 3076 struct evsel *evsel; 3077 struct feat_fd ff; 3078 u64 attr_offset; 3079 int err; 3080 3081 ff = (struct feat_fd){ .fd = fd}; 3082 lseek(fd, sizeof(f_header), SEEK_SET); 3083 3084 evlist__for_each_entry(session->evlist, evsel) { 3085 evsel->id_offset = lseek(fd, 0, SEEK_CUR); 3086 err = do_write(&ff, evsel->id, evsel->ids * sizeof(u64)); 3087 if (err < 0) { 3088 pr_debug("failed to write perf header\n"); 3089 return err; 3090 } 3091 } 3092 3093 attr_offset = lseek(ff.fd, 0, SEEK_CUR); 3094 3095 evlist__for_each_entry(evlist, evsel) { 3096 f_attr = (struct perf_file_attr){ 3097 .attr = evsel->core.attr, 3098 .ids = { 3099 .offset = evsel->id_offset, 3100 .size = evsel->ids * sizeof(u64), 3101 } 3102 }; 3103 err = do_write(&ff, &f_attr, sizeof(f_attr)); 3104 if (err < 0) { 3105 pr_debug("failed to write perf header attribute\n"); 3106 return err; 3107 } 3108 } 3109 3110 if (!header->data_offset) 3111 header->data_offset = lseek(fd, 0, SEEK_CUR); 3112 header->feat_offset = header->data_offset + header->data_size; 3113 3114 if (at_exit) { 3115 err = perf_header__adds_write(header, evlist, fd); 3116 if (err < 0) 3117 return err; 3118 } 3119 3120 f_header = (struct perf_file_header){ 3121 .magic = PERF_MAGIC, 3122 .size = sizeof(f_header), 3123 .attr_size = sizeof(f_attr), 3124 .attrs = { 3125 .offset = attr_offset, 3126 .size = evlist->core.nr_entries * sizeof(f_attr), 3127 }, 3128 .data = { 3129 .offset = header->data_offset, 3130 .size = header->data_size, 3131 }, 3132 /* event_types is ignored, store zeros */ 3133 }; 3134 3135 memcpy(&f_header.adds_features, &header->adds_features, sizeof(header->adds_features)); 3136 3137 lseek(fd, 0, SEEK_SET); 3138 err = do_write(&ff, &f_header, sizeof(f_header)); 3139 if (err < 0) { 3140 pr_debug("failed to write perf header\n"); 3141 return err; 3142 } 3143 lseek(fd, header->data_offset + header->data_size, SEEK_SET); 3144 3145 return 0; 3146 } 3147 3148 static int perf_header__getbuffer64(struct perf_header *header, 3149 int fd, void *buf, size_t size) 3150 { 3151 if (readn(fd, buf, size) <= 0) 3152 return -1; 3153 3154 if (header->needs_swap) 3155 mem_bswap_64(buf, size); 3156 3157 return 0; 3158 } 3159 3160 int perf_header__process_sections(struct perf_header *header, int fd, 3161 void *data, 3162 int (*process)(struct perf_file_section *section, 3163 struct perf_header *ph, 3164 int feat, int fd, void *data)) 3165 { 3166 struct perf_file_section *feat_sec, *sec; 3167 int nr_sections; 3168 int sec_size; 3169 int feat; 3170 int err; 3171 3172 nr_sections = bitmap_weight(header->adds_features, HEADER_FEAT_BITS); 3173 if (!nr_sections) 3174 return 0; 3175 3176 feat_sec = sec = calloc(nr_sections, sizeof(*feat_sec)); 3177 if (!feat_sec) 3178 return -1; 3179 3180 sec_size = sizeof(*feat_sec) * nr_sections; 3181 3182 lseek(fd, header->feat_offset, SEEK_SET); 3183 3184 err = perf_header__getbuffer64(header, fd, feat_sec, sec_size); 3185 if (err < 0) 3186 goto out_free; 3187 3188 for_each_set_bit(feat, header->adds_features, HEADER_LAST_FEATURE) { 3189 err = process(sec++, header, feat, fd, data); 3190 if (err < 0) 3191 goto out_free; 3192 } 3193 err = 0; 3194 out_free: 3195 free(feat_sec); 3196 return err; 3197 } 3198 3199 static const int attr_file_abi_sizes[] = { 3200 [0] = PERF_ATTR_SIZE_VER0, 3201 [1] = PERF_ATTR_SIZE_VER1, 3202 [2] = PERF_ATTR_SIZE_VER2, 3203 [3] = PERF_ATTR_SIZE_VER3, 3204 [4] = PERF_ATTR_SIZE_VER4, 3205 0, 3206 }; 3207 3208 /* 3209 * In the legacy file format, the magic number is not used to encode endianness. 3210 * hdr_sz was used to encode endianness. But given that hdr_sz can vary based 3211 * on ABI revisions, we need to try all combinations for all endianness to 3212 * detect the endianness. 3213 */ 3214 static int try_all_file_abis(uint64_t hdr_sz, struct perf_header *ph) 3215 { 3216 uint64_t ref_size, attr_size; 3217 int i; 3218 3219 for (i = 0 ; attr_file_abi_sizes[i]; i++) { 3220 ref_size = attr_file_abi_sizes[i] 3221 + sizeof(struct perf_file_section); 3222 if (hdr_sz != ref_size) { 3223 attr_size = bswap_64(hdr_sz); 3224 if (attr_size != ref_size) 3225 continue; 3226 3227 ph->needs_swap = true; 3228 } 3229 pr_debug("ABI%d perf.data file detected, need_swap=%d\n", 3230 i, 3231 ph->needs_swap); 3232 return 0; 3233 } 3234 /* could not determine endianness */ 3235 return -1; 3236 } 3237 3238 #define PERF_PIPE_HDR_VER0 16 3239 3240 static const size_t attr_pipe_abi_sizes[] = { 3241 [0] = PERF_PIPE_HDR_VER0, 3242 0, 3243 }; 3244 3245 /* 3246 * In the legacy pipe format, there is an implicit assumption that endiannesss 3247 * between host recording the samples, and host parsing the samples is the 3248 * same. This is not always the case given that the pipe output may always be 3249 * redirected into a file and analyzed on a different machine with possibly a 3250 * different endianness and perf_event ABI revsions in the perf tool itself. 3251 */ 3252 static int try_all_pipe_abis(uint64_t hdr_sz, struct perf_header *ph) 3253 { 3254 u64 attr_size; 3255 int i; 3256 3257 for (i = 0 ; attr_pipe_abi_sizes[i]; i++) { 3258 if (hdr_sz != attr_pipe_abi_sizes[i]) { 3259 attr_size = bswap_64(hdr_sz); 3260 if (attr_size != hdr_sz) 3261 continue; 3262 3263 ph->needs_swap = true; 3264 } 3265 pr_debug("Pipe ABI%d perf.data file detected\n", i); 3266 return 0; 3267 } 3268 return -1; 3269 } 3270 3271 bool is_perf_magic(u64 magic) 3272 { 3273 if (!memcmp(&magic, __perf_magic1, sizeof(magic)) 3274 || magic == __perf_magic2 3275 || magic == __perf_magic2_sw) 3276 return true; 3277 3278 return false; 3279 } 3280 3281 static int check_magic_endian(u64 magic, uint64_t hdr_sz, 3282 bool is_pipe, struct perf_header *ph) 3283 { 3284 int ret; 3285 3286 /* check for legacy format */ 3287 ret = memcmp(&magic, __perf_magic1, sizeof(magic)); 3288 if (ret == 0) { 3289 ph->version = PERF_HEADER_VERSION_1; 3290 pr_debug("legacy perf.data format\n"); 3291 if (is_pipe) 3292 return try_all_pipe_abis(hdr_sz, ph); 3293 3294 return try_all_file_abis(hdr_sz, ph); 3295 } 3296 /* 3297 * the new magic number serves two purposes: 3298 * - unique number to identify actual perf.data files 3299 * - encode endianness of file 3300 */ 3301 ph->version = PERF_HEADER_VERSION_2; 3302 3303 /* check magic number with one endianness */ 3304 if (magic == __perf_magic2) 3305 return 0; 3306 3307 /* check magic number with opposite endianness */ 3308 if (magic != __perf_magic2_sw) 3309 return -1; 3310 3311 ph->needs_swap = true; 3312 3313 return 0; 3314 } 3315 3316 int perf_file_header__read(struct perf_file_header *header, 3317 struct perf_header *ph, int fd) 3318 { 3319 ssize_t ret; 3320 3321 lseek(fd, 0, SEEK_SET); 3322 3323 ret = readn(fd, header, sizeof(*header)); 3324 if (ret <= 0) 3325 return -1; 3326 3327 if (check_magic_endian(header->magic, 3328 header->attr_size, false, ph) < 0) { 3329 pr_debug("magic/endian check failed\n"); 3330 return -1; 3331 } 3332 3333 if (ph->needs_swap) { 3334 mem_bswap_64(header, offsetof(struct perf_file_header, 3335 adds_features)); 3336 } 3337 3338 if (header->size != sizeof(*header)) { 3339 /* Support the previous format */ 3340 if (header->size == offsetof(typeof(*header), adds_features)) 3341 bitmap_zero(header->adds_features, HEADER_FEAT_BITS); 3342 else 3343 return -1; 3344 } else if (ph->needs_swap) { 3345 /* 3346 * feature bitmap is declared as an array of unsigned longs -- 3347 * not good since its size can differ between the host that 3348 * generated the data file and the host analyzing the file. 3349 * 3350 * We need to handle endianness, but we don't know the size of 3351 * the unsigned long where the file was generated. Take a best 3352 * guess at determining it: try 64-bit swap first (ie., file 3353 * created on a 64-bit host), and check if the hostname feature 3354 * bit is set (this feature bit is forced on as of fbe96f2). 3355 * If the bit is not, undo the 64-bit swap and try a 32-bit 3356 * swap. If the hostname bit is still not set (e.g., older data 3357 * file), punt and fallback to the original behavior -- 3358 * clearing all feature bits and setting buildid. 3359 */ 3360 mem_bswap_64(&header->adds_features, 3361 BITS_TO_U64(HEADER_FEAT_BITS)); 3362 3363 if (!test_bit(HEADER_HOSTNAME, header->adds_features)) { 3364 /* unswap as u64 */ 3365 mem_bswap_64(&header->adds_features, 3366 BITS_TO_U64(HEADER_FEAT_BITS)); 3367 3368 /* unswap as u32 */ 3369 mem_bswap_32(&header->adds_features, 3370 BITS_TO_U32(HEADER_FEAT_BITS)); 3371 } 3372 3373 if (!test_bit(HEADER_HOSTNAME, header->adds_features)) { 3374 bitmap_zero(header->adds_features, HEADER_FEAT_BITS); 3375 set_bit(HEADER_BUILD_ID, header->adds_features); 3376 } 3377 } 3378 3379 memcpy(&ph->adds_features, &header->adds_features, 3380 sizeof(ph->adds_features)); 3381 3382 ph->data_offset = header->data.offset; 3383 ph->data_size = header->data.size; 3384 ph->feat_offset = header->data.offset + header->data.size; 3385 return 0; 3386 } 3387 3388 static int perf_file_section__process(struct perf_file_section *section, 3389 struct perf_header *ph, 3390 int feat, int fd, void *data) 3391 { 3392 struct feat_fd fdd = { 3393 .fd = fd, 3394 .ph = ph, 3395 .size = section->size, 3396 .offset = section->offset, 3397 }; 3398 3399 if (lseek(fd, section->offset, SEEK_SET) == (off_t)-1) { 3400 pr_debug("Failed to lseek to %" PRIu64 " offset for feature " 3401 "%d, continuing...\n", section->offset, feat); 3402 return 0; 3403 } 3404 3405 if (feat >= HEADER_LAST_FEATURE) { 3406 pr_debug("unknown feature %d, continuing...\n", feat); 3407 return 0; 3408 } 3409 3410 if (!feat_ops[feat].process) 3411 return 0; 3412 3413 return feat_ops[feat].process(&fdd, data); 3414 } 3415 3416 static int perf_file_header__read_pipe(struct perf_pipe_file_header *header, 3417 struct perf_header *ph, int fd, 3418 bool repipe) 3419 { 3420 struct feat_fd ff = { 3421 .fd = STDOUT_FILENO, 3422 .ph = ph, 3423 }; 3424 ssize_t ret; 3425 3426 ret = readn(fd, header, sizeof(*header)); 3427 if (ret <= 0) 3428 return -1; 3429 3430 if (check_magic_endian(header->magic, header->size, true, ph) < 0) { 3431 pr_debug("endian/magic failed\n"); 3432 return -1; 3433 } 3434 3435 if (ph->needs_swap) 3436 header->size = bswap_64(header->size); 3437 3438 if (repipe && do_write(&ff, header, sizeof(*header)) < 0) 3439 return -1; 3440 3441 return 0; 3442 } 3443 3444 static int perf_header__read_pipe(struct perf_session *session) 3445 { 3446 struct perf_header *header = &session->header; 3447 struct perf_pipe_file_header f_header; 3448 3449 if (perf_file_header__read_pipe(&f_header, header, 3450 perf_data__fd(session->data), 3451 session->repipe) < 0) { 3452 pr_debug("incompatible file format\n"); 3453 return -EINVAL; 3454 } 3455 3456 return 0; 3457 } 3458 3459 static int read_attr(int fd, struct perf_header *ph, 3460 struct perf_file_attr *f_attr) 3461 { 3462 struct perf_event_attr *attr = &f_attr->attr; 3463 size_t sz, left; 3464 size_t our_sz = sizeof(f_attr->attr); 3465 ssize_t ret; 3466 3467 memset(f_attr, 0, sizeof(*f_attr)); 3468 3469 /* read minimal guaranteed structure */ 3470 ret = readn(fd, attr, PERF_ATTR_SIZE_VER0); 3471 if (ret <= 0) { 3472 pr_debug("cannot read %d bytes of header attr\n", 3473 PERF_ATTR_SIZE_VER0); 3474 return -1; 3475 } 3476 3477 /* on file perf_event_attr size */ 3478 sz = attr->size; 3479 3480 if (ph->needs_swap) 3481 sz = bswap_32(sz); 3482 3483 if (sz == 0) { 3484 /* assume ABI0 */ 3485 sz = PERF_ATTR_SIZE_VER0; 3486 } else if (sz > our_sz) { 3487 pr_debug("file uses a more recent and unsupported ABI" 3488 " (%zu bytes extra)\n", sz - our_sz); 3489 return -1; 3490 } 3491 /* what we have not yet read and that we know about */ 3492 left = sz - PERF_ATTR_SIZE_VER0; 3493 if (left) { 3494 void *ptr = attr; 3495 ptr += PERF_ATTR_SIZE_VER0; 3496 3497 ret = readn(fd, ptr, left); 3498 } 3499 /* read perf_file_section, ids are read in caller */ 3500 ret = readn(fd, &f_attr->ids, sizeof(f_attr->ids)); 3501 3502 return ret <= 0 ? -1 : 0; 3503 } 3504 3505 static int perf_evsel__prepare_tracepoint_event(struct evsel *evsel, 3506 struct tep_handle *pevent) 3507 { 3508 struct tep_event *event; 3509 char bf[128]; 3510 3511 /* already prepared */ 3512 if (evsel->tp_format) 3513 return 0; 3514 3515 if (pevent == NULL) { 3516 pr_debug("broken or missing trace data\n"); 3517 return -1; 3518 } 3519 3520 event = tep_find_event(pevent, evsel->core.attr.config); 3521 if (event == NULL) { 3522 pr_debug("cannot find event format for %d\n", (int)evsel->core.attr.config); 3523 return -1; 3524 } 3525 3526 if (!evsel->name) { 3527 snprintf(bf, sizeof(bf), "%s:%s", event->system, event->name); 3528 evsel->name = strdup(bf); 3529 if (evsel->name == NULL) 3530 return -1; 3531 } 3532 3533 evsel->tp_format = event; 3534 return 0; 3535 } 3536 3537 static int perf_evlist__prepare_tracepoint_events(struct evlist *evlist, 3538 struct tep_handle *pevent) 3539 { 3540 struct evsel *pos; 3541 3542 evlist__for_each_entry(evlist, pos) { 3543 if (pos->core.attr.type == PERF_TYPE_TRACEPOINT && 3544 perf_evsel__prepare_tracepoint_event(pos, pevent)) 3545 return -1; 3546 } 3547 3548 return 0; 3549 } 3550 3551 int perf_session__read_header(struct perf_session *session) 3552 { 3553 struct perf_data *data = session->data; 3554 struct perf_header *header = &session->header; 3555 struct perf_file_header f_header; 3556 struct perf_file_attr f_attr; 3557 u64 f_id; 3558 int nr_attrs, nr_ids, i, j; 3559 int fd = perf_data__fd(data); 3560 3561 session->evlist = evlist__new(); 3562 if (session->evlist == NULL) 3563 return -ENOMEM; 3564 3565 session->evlist->env = &header->env; 3566 session->machines.host.env = &header->env; 3567 if (perf_data__is_pipe(data)) 3568 return perf_header__read_pipe(session); 3569 3570 if (perf_file_header__read(&f_header, header, fd) < 0) 3571 return -EINVAL; 3572 3573 /* 3574 * Sanity check that perf.data was written cleanly; data size is 3575 * initialized to 0 and updated only if the on_exit function is run. 3576 * If data size is still 0 then the file contains only partial 3577 * information. Just warn user and process it as much as it can. 3578 */ 3579 if (f_header.data.size == 0) { 3580 pr_warning("WARNING: The %s file's data size field is 0 which is unexpected.\n" 3581 "Was the 'perf record' command properly terminated?\n", 3582 data->file.path); 3583 } 3584 3585 if (f_header.attr_size == 0) { 3586 pr_err("ERROR: The %s file's attr size field is 0 which is unexpected.\n" 3587 "Was the 'perf record' command properly terminated?\n", 3588 data->file.path); 3589 return -EINVAL; 3590 } 3591 3592 nr_attrs = f_header.attrs.size / f_header.attr_size; 3593 lseek(fd, f_header.attrs.offset, SEEK_SET); 3594 3595 for (i = 0; i < nr_attrs; i++) { 3596 struct evsel *evsel; 3597 off_t tmp; 3598 3599 if (read_attr(fd, header, &f_attr) < 0) 3600 goto out_errno; 3601 3602 if (header->needs_swap) { 3603 f_attr.ids.size = bswap_64(f_attr.ids.size); 3604 f_attr.ids.offset = bswap_64(f_attr.ids.offset); 3605 perf_event__attr_swap(&f_attr.attr); 3606 } 3607 3608 tmp = lseek(fd, 0, SEEK_CUR); 3609 evsel = evsel__new(&f_attr.attr); 3610 3611 if (evsel == NULL) 3612 goto out_delete_evlist; 3613 3614 evsel->needs_swap = header->needs_swap; 3615 /* 3616 * Do it before so that if perf_evsel__alloc_id fails, this 3617 * entry gets purged too at evlist__delete(). 3618 */ 3619 evlist__add(session->evlist, evsel); 3620 3621 nr_ids = f_attr.ids.size / sizeof(u64); 3622 /* 3623 * We don't have the cpu and thread maps on the header, so 3624 * for allocating the perf_sample_id table we fake 1 cpu and 3625 * hattr->ids threads. 3626 */ 3627 if (perf_evsel__alloc_id(evsel, 1, nr_ids)) 3628 goto out_delete_evlist; 3629 3630 lseek(fd, f_attr.ids.offset, SEEK_SET); 3631 3632 for (j = 0; j < nr_ids; j++) { 3633 if (perf_header__getbuffer64(header, fd, &f_id, sizeof(f_id))) 3634 goto out_errno; 3635 3636 perf_evlist__id_add(session->evlist, evsel, 0, j, f_id); 3637 } 3638 3639 lseek(fd, tmp, SEEK_SET); 3640 } 3641 3642 perf_header__process_sections(header, fd, &session->tevent, 3643 perf_file_section__process); 3644 3645 if (perf_evlist__prepare_tracepoint_events(session->evlist, 3646 session->tevent.pevent)) 3647 goto out_delete_evlist; 3648 3649 return 0; 3650 out_errno: 3651 return -errno; 3652 3653 out_delete_evlist: 3654 evlist__delete(session->evlist); 3655 session->evlist = NULL; 3656 return -ENOMEM; 3657 } 3658 3659 int perf_event__synthesize_attr(struct perf_tool *tool, 3660 struct perf_event_attr *attr, u32 ids, u64 *id, 3661 perf_event__handler_t process) 3662 { 3663 union perf_event *ev; 3664 size_t size; 3665 int err; 3666 3667 size = sizeof(struct perf_event_attr); 3668 size = PERF_ALIGN(size, sizeof(u64)); 3669 size += sizeof(struct perf_event_header); 3670 size += ids * sizeof(u64); 3671 3672 ev = zalloc(size); 3673 3674 if (ev == NULL) 3675 return -ENOMEM; 3676 3677 ev->attr.attr = *attr; 3678 memcpy(ev->attr.id, id, ids * sizeof(u64)); 3679 3680 ev->attr.header.type = PERF_RECORD_HEADER_ATTR; 3681 ev->attr.header.size = (u16)size; 3682 3683 if (ev->attr.header.size == size) 3684 err = process(tool, ev, NULL, NULL); 3685 else 3686 err = -E2BIG; 3687 3688 free(ev); 3689 3690 return err; 3691 } 3692 3693 int perf_event__synthesize_features(struct perf_tool *tool, 3694 struct perf_session *session, 3695 struct evlist *evlist, 3696 perf_event__handler_t process) 3697 { 3698 struct perf_header *header = &session->header; 3699 struct feat_fd ff; 3700 struct perf_record_header_feature *fe; 3701 size_t sz, sz_hdr; 3702 int feat, ret; 3703 3704 sz_hdr = sizeof(fe->header); 3705 sz = sizeof(union perf_event); 3706 /* get a nice alignment */ 3707 sz = PERF_ALIGN(sz, page_size); 3708 3709 memset(&ff, 0, sizeof(ff)); 3710 3711 ff.buf = malloc(sz); 3712 if (!ff.buf) 3713 return -ENOMEM; 3714 3715 ff.size = sz - sz_hdr; 3716 ff.ph = &session->header; 3717 3718 for_each_set_bit(feat, header->adds_features, HEADER_FEAT_BITS) { 3719 if (!feat_ops[feat].synthesize) { 3720 pr_debug("No record header feature for header :%d\n", feat); 3721 continue; 3722 } 3723 3724 ff.offset = sizeof(*fe); 3725 3726 ret = feat_ops[feat].write(&ff, evlist); 3727 if (ret || ff.offset <= (ssize_t)sizeof(*fe)) { 3728 pr_debug("Error writing feature\n"); 3729 continue; 3730 } 3731 /* ff.buf may have changed due to realloc in do_write() */ 3732 fe = ff.buf; 3733 memset(fe, 0, sizeof(*fe)); 3734 3735 fe->feat_id = feat; 3736 fe->header.type = PERF_RECORD_HEADER_FEATURE; 3737 fe->header.size = ff.offset; 3738 3739 ret = process(tool, ff.buf, NULL, NULL); 3740 if (ret) { 3741 free(ff.buf); 3742 return ret; 3743 } 3744 } 3745 3746 /* Send HEADER_LAST_FEATURE mark. */ 3747 fe = ff.buf; 3748 fe->feat_id = HEADER_LAST_FEATURE; 3749 fe->header.type = PERF_RECORD_HEADER_FEATURE; 3750 fe->header.size = sizeof(*fe); 3751 3752 ret = process(tool, ff.buf, NULL, NULL); 3753 3754 free(ff.buf); 3755 return ret; 3756 } 3757 3758 int perf_event__process_feature(struct perf_session *session, 3759 union perf_event *event) 3760 { 3761 struct perf_tool *tool = session->tool; 3762 struct feat_fd ff = { .fd = 0 }; 3763 struct perf_record_header_feature *fe = (struct perf_record_header_feature *)event; 3764 int type = fe->header.type; 3765 u64 feat = fe->feat_id; 3766 3767 if (type < 0 || type >= PERF_RECORD_HEADER_MAX) { 3768 pr_warning("invalid record type %d in pipe-mode\n", type); 3769 return 0; 3770 } 3771 if (feat == HEADER_RESERVED || feat >= HEADER_LAST_FEATURE) { 3772 pr_warning("invalid record type %d in pipe-mode\n", type); 3773 return -1; 3774 } 3775 3776 if (!feat_ops[feat].process) 3777 return 0; 3778 3779 ff.buf = (void *)fe->data; 3780 ff.size = event->header.size - sizeof(*fe); 3781 ff.ph = &session->header; 3782 3783 if (feat_ops[feat].process(&ff, NULL)) 3784 return -1; 3785 3786 if (!feat_ops[feat].print || !tool->show_feat_hdr) 3787 return 0; 3788 3789 if (!feat_ops[feat].full_only || 3790 tool->show_feat_hdr >= SHOW_FEAT_HEADER_FULL_INFO) { 3791 feat_ops[feat].print(&ff, stdout); 3792 } else { 3793 fprintf(stdout, "# %s info available, use -I to display\n", 3794 feat_ops[feat].name); 3795 } 3796 3797 return 0; 3798 } 3799 3800 static struct perf_record_event_update * 3801 event_update_event__new(size_t size, u64 type, u64 id) 3802 { 3803 struct perf_record_event_update *ev; 3804 3805 size += sizeof(*ev); 3806 size = PERF_ALIGN(size, sizeof(u64)); 3807 3808 ev = zalloc(size); 3809 if (ev) { 3810 ev->header.type = PERF_RECORD_EVENT_UPDATE; 3811 ev->header.size = (u16)size; 3812 ev->type = type; 3813 ev->id = id; 3814 } 3815 return ev; 3816 } 3817 3818 int 3819 perf_event__synthesize_event_update_unit(struct perf_tool *tool, 3820 struct evsel *evsel, 3821 perf_event__handler_t process) 3822 { 3823 struct perf_record_event_update *ev; 3824 size_t size = strlen(evsel->unit); 3825 int err; 3826 3827 ev = event_update_event__new(size + 1, PERF_EVENT_UPDATE__UNIT, evsel->id[0]); 3828 if (ev == NULL) 3829 return -ENOMEM; 3830 3831 strlcpy(ev->data, evsel->unit, size + 1); 3832 err = process(tool, (union perf_event *)ev, NULL, NULL); 3833 free(ev); 3834 return err; 3835 } 3836 3837 int 3838 perf_event__synthesize_event_update_scale(struct perf_tool *tool, 3839 struct evsel *evsel, 3840 perf_event__handler_t process) 3841 { 3842 struct perf_record_event_update *ev; 3843 struct perf_record_event_update_scale *ev_data; 3844 int err; 3845 3846 ev = event_update_event__new(sizeof(*ev_data), PERF_EVENT_UPDATE__SCALE, evsel->id[0]); 3847 if (ev == NULL) 3848 return -ENOMEM; 3849 3850 ev_data = (struct perf_record_event_update_scale *)ev->data; 3851 ev_data->scale = evsel->scale; 3852 err = process(tool, (union perf_event*) ev, NULL, NULL); 3853 free(ev); 3854 return err; 3855 } 3856 3857 int 3858 perf_event__synthesize_event_update_name(struct perf_tool *tool, 3859 struct evsel *evsel, 3860 perf_event__handler_t process) 3861 { 3862 struct perf_record_event_update *ev; 3863 size_t len = strlen(evsel->name); 3864 int err; 3865 3866 ev = event_update_event__new(len + 1, PERF_EVENT_UPDATE__NAME, evsel->id[0]); 3867 if (ev == NULL) 3868 return -ENOMEM; 3869 3870 strlcpy(ev->data, evsel->name, len + 1); 3871 err = process(tool, (union perf_event*) ev, NULL, NULL); 3872 free(ev); 3873 return err; 3874 } 3875 3876 int 3877 perf_event__synthesize_event_update_cpus(struct perf_tool *tool, 3878 struct evsel *evsel, 3879 perf_event__handler_t process) 3880 { 3881 size_t size = sizeof(struct perf_record_event_update); 3882 struct perf_record_event_update *ev; 3883 int max, err; 3884 u16 type; 3885 3886 if (!evsel->core.own_cpus) 3887 return 0; 3888 3889 ev = cpu_map_data__alloc(evsel->core.own_cpus, &size, &type, &max); 3890 if (!ev) 3891 return -ENOMEM; 3892 3893 ev->header.type = PERF_RECORD_EVENT_UPDATE; 3894 ev->header.size = (u16)size; 3895 ev->type = PERF_EVENT_UPDATE__CPUS; 3896 ev->id = evsel->id[0]; 3897 3898 cpu_map_data__synthesize((struct perf_record_cpu_map_data *)ev->data, 3899 evsel->core.own_cpus, 3900 type, max); 3901 3902 err = process(tool, (union perf_event*) ev, NULL, NULL); 3903 free(ev); 3904 return err; 3905 } 3906 3907 size_t perf_event__fprintf_event_update(union perf_event *event, FILE *fp) 3908 { 3909 struct perf_record_event_update *ev = &event->event_update; 3910 struct perf_record_event_update_scale *ev_scale; 3911 struct perf_record_event_update_cpus *ev_cpus; 3912 struct perf_cpu_map *map; 3913 size_t ret; 3914 3915 ret = fprintf(fp, "\n... id: %" PRI_lu64 "\n", ev->id); 3916 3917 switch (ev->type) { 3918 case PERF_EVENT_UPDATE__SCALE: 3919 ev_scale = (struct perf_record_event_update_scale *)ev->data; 3920 ret += fprintf(fp, "... scale: %f\n", ev_scale->scale); 3921 break; 3922 case PERF_EVENT_UPDATE__UNIT: 3923 ret += fprintf(fp, "... unit: %s\n", ev->data); 3924 break; 3925 case PERF_EVENT_UPDATE__NAME: 3926 ret += fprintf(fp, "... name: %s\n", ev->data); 3927 break; 3928 case PERF_EVENT_UPDATE__CPUS: 3929 ev_cpus = (struct perf_record_event_update_cpus *)ev->data; 3930 ret += fprintf(fp, "... "); 3931 3932 map = cpu_map__new_data(&ev_cpus->cpus); 3933 if (map) 3934 ret += cpu_map__fprintf(map, fp); 3935 else 3936 ret += fprintf(fp, "failed to get cpus\n"); 3937 break; 3938 default: 3939 ret += fprintf(fp, "... unknown type\n"); 3940 break; 3941 } 3942 3943 return ret; 3944 } 3945 3946 int perf_event__synthesize_attrs(struct perf_tool *tool, 3947 struct evlist *evlist, 3948 perf_event__handler_t process) 3949 { 3950 struct evsel *evsel; 3951 int err = 0; 3952 3953 evlist__for_each_entry(evlist, evsel) { 3954 err = perf_event__synthesize_attr(tool, &evsel->core.attr, evsel->ids, 3955 evsel->id, process); 3956 if (err) { 3957 pr_debug("failed to create perf header attribute\n"); 3958 return err; 3959 } 3960 } 3961 3962 return err; 3963 } 3964 3965 static bool has_unit(struct evsel *counter) 3966 { 3967 return counter->unit && *counter->unit; 3968 } 3969 3970 static bool has_scale(struct evsel *counter) 3971 { 3972 return counter->scale != 1; 3973 } 3974 3975 int perf_event__synthesize_extra_attr(struct perf_tool *tool, 3976 struct evlist *evsel_list, 3977 perf_event__handler_t process, 3978 bool is_pipe) 3979 { 3980 struct evsel *counter; 3981 int err; 3982 3983 /* 3984 * Synthesize other events stuff not carried within 3985 * attr event - unit, scale, name 3986 */ 3987 evlist__for_each_entry(evsel_list, counter) { 3988 if (!counter->supported) 3989 continue; 3990 3991 /* 3992 * Synthesize unit and scale only if it's defined. 3993 */ 3994 if (has_unit(counter)) { 3995 err = perf_event__synthesize_event_update_unit(tool, counter, process); 3996 if (err < 0) { 3997 pr_err("Couldn't synthesize evsel unit.\n"); 3998 return err; 3999 } 4000 } 4001 4002 if (has_scale(counter)) { 4003 err = perf_event__synthesize_event_update_scale(tool, counter, process); 4004 if (err < 0) { 4005 pr_err("Couldn't synthesize evsel counter.\n"); 4006 return err; 4007 } 4008 } 4009 4010 if (counter->core.own_cpus) { 4011 err = perf_event__synthesize_event_update_cpus(tool, counter, process); 4012 if (err < 0) { 4013 pr_err("Couldn't synthesize evsel cpus.\n"); 4014 return err; 4015 } 4016 } 4017 4018 /* 4019 * Name is needed only for pipe output, 4020 * perf.data carries event names. 4021 */ 4022 if (is_pipe) { 4023 err = perf_event__synthesize_event_update_name(tool, counter, process); 4024 if (err < 0) { 4025 pr_err("Couldn't synthesize evsel name.\n"); 4026 return err; 4027 } 4028 } 4029 } 4030 return 0; 4031 } 4032 4033 int perf_event__process_attr(struct perf_tool *tool __maybe_unused, 4034 union perf_event *event, 4035 struct evlist **pevlist) 4036 { 4037 u32 i, ids, n_ids; 4038 struct evsel *evsel; 4039 struct evlist *evlist = *pevlist; 4040 4041 if (evlist == NULL) { 4042 *pevlist = evlist = evlist__new(); 4043 if (evlist == NULL) 4044 return -ENOMEM; 4045 } 4046 4047 evsel = evsel__new(&event->attr.attr); 4048 if (evsel == NULL) 4049 return -ENOMEM; 4050 4051 evlist__add(evlist, evsel); 4052 4053 ids = event->header.size; 4054 ids -= (void *)&event->attr.id - (void *)event; 4055 n_ids = ids / sizeof(u64); 4056 /* 4057 * We don't have the cpu and thread maps on the header, so 4058 * for allocating the perf_sample_id table we fake 1 cpu and 4059 * hattr->ids threads. 4060 */ 4061 if (perf_evsel__alloc_id(evsel, 1, n_ids)) 4062 return -ENOMEM; 4063 4064 for (i = 0; i < n_ids; i++) { 4065 perf_evlist__id_add(evlist, evsel, 0, i, event->attr.id[i]); 4066 } 4067 4068 return 0; 4069 } 4070 4071 int perf_event__process_event_update(struct perf_tool *tool __maybe_unused, 4072 union perf_event *event, 4073 struct evlist **pevlist) 4074 { 4075 struct perf_record_event_update *ev = &event->event_update; 4076 struct perf_record_event_update_scale *ev_scale; 4077 struct perf_record_event_update_cpus *ev_cpus; 4078 struct evlist *evlist; 4079 struct evsel *evsel; 4080 struct perf_cpu_map *map; 4081 4082 if (!pevlist || *pevlist == NULL) 4083 return -EINVAL; 4084 4085 evlist = *pevlist; 4086 4087 evsel = perf_evlist__id2evsel(evlist, ev->id); 4088 if (evsel == NULL) 4089 return -EINVAL; 4090 4091 switch (ev->type) { 4092 case PERF_EVENT_UPDATE__UNIT: 4093 evsel->unit = strdup(ev->data); 4094 break; 4095 case PERF_EVENT_UPDATE__NAME: 4096 evsel->name = strdup(ev->data); 4097 break; 4098 case PERF_EVENT_UPDATE__SCALE: 4099 ev_scale = (struct perf_record_event_update_scale *)ev->data; 4100 evsel->scale = ev_scale->scale; 4101 break; 4102 case PERF_EVENT_UPDATE__CPUS: 4103 ev_cpus = (struct perf_record_event_update_cpus *)ev->data; 4104 4105 map = cpu_map__new_data(&ev_cpus->cpus); 4106 if (map) 4107 evsel->core.own_cpus = map; 4108 else 4109 pr_err("failed to get event_update cpus\n"); 4110 default: 4111 break; 4112 } 4113 4114 return 0; 4115 } 4116 4117 int perf_event__synthesize_tracing_data(struct perf_tool *tool, int fd, 4118 struct evlist *evlist, 4119 perf_event__handler_t process) 4120 { 4121 union perf_event ev; 4122 struct tracing_data *tdata; 4123 ssize_t size = 0, aligned_size = 0, padding; 4124 struct feat_fd ff; 4125 int err __maybe_unused = 0; 4126 4127 /* 4128 * We are going to store the size of the data followed 4129 * by the data contents. Since the fd descriptor is a pipe, 4130 * we cannot seek back to store the size of the data once 4131 * we know it. Instead we: 4132 * 4133 * - write the tracing data to the temp file 4134 * - get/write the data size to pipe 4135 * - write the tracing data from the temp file 4136 * to the pipe 4137 */ 4138 tdata = tracing_data_get(&evlist->core.entries, fd, true); 4139 if (!tdata) 4140 return -1; 4141 4142 memset(&ev, 0, sizeof(ev)); 4143 4144 ev.tracing_data.header.type = PERF_RECORD_HEADER_TRACING_DATA; 4145 size = tdata->size; 4146 aligned_size = PERF_ALIGN(size, sizeof(u64)); 4147 padding = aligned_size - size; 4148 ev.tracing_data.header.size = sizeof(ev.tracing_data); 4149 ev.tracing_data.size = aligned_size; 4150 4151 process(tool, &ev, NULL, NULL); 4152 4153 /* 4154 * The put function will copy all the tracing data 4155 * stored in temp file to the pipe. 4156 */ 4157 tracing_data_put(tdata); 4158 4159 ff = (struct feat_fd){ .fd = fd }; 4160 if (write_padded(&ff, NULL, 0, padding)) 4161 return -1; 4162 4163 return aligned_size; 4164 } 4165 4166 int perf_event__process_tracing_data(struct perf_session *session, 4167 union perf_event *event) 4168 { 4169 ssize_t size_read, padding, size = event->tracing_data.size; 4170 int fd = perf_data__fd(session->data); 4171 off_t offset = lseek(fd, 0, SEEK_CUR); 4172 char buf[BUFSIZ]; 4173 4174 /* setup for reading amidst mmap */ 4175 lseek(fd, offset + sizeof(struct perf_record_header_tracing_data), 4176 SEEK_SET); 4177 4178 size_read = trace_report(fd, &session->tevent, 4179 session->repipe); 4180 padding = PERF_ALIGN(size_read, sizeof(u64)) - size_read; 4181 4182 if (readn(fd, buf, padding) < 0) { 4183 pr_err("%s: reading input file", __func__); 4184 return -1; 4185 } 4186 if (session->repipe) { 4187 int retw = write(STDOUT_FILENO, buf, padding); 4188 if (retw <= 0 || retw != padding) { 4189 pr_err("%s: repiping tracing data padding", __func__); 4190 return -1; 4191 } 4192 } 4193 4194 if (size_read + padding != size) { 4195 pr_err("%s: tracing data size mismatch", __func__); 4196 return -1; 4197 } 4198 4199 perf_evlist__prepare_tracepoint_events(session->evlist, 4200 session->tevent.pevent); 4201 4202 return size_read + padding; 4203 } 4204 4205 int perf_event__synthesize_build_id(struct perf_tool *tool, 4206 struct dso *pos, u16 misc, 4207 perf_event__handler_t process, 4208 struct machine *machine) 4209 { 4210 union perf_event ev; 4211 size_t len; 4212 int err = 0; 4213 4214 if (!pos->hit) 4215 return err; 4216 4217 memset(&ev, 0, sizeof(ev)); 4218 4219 len = pos->long_name_len + 1; 4220 len = PERF_ALIGN(len, NAME_ALIGN); 4221 memcpy(&ev.build_id.build_id, pos->build_id, sizeof(pos->build_id)); 4222 ev.build_id.header.type = PERF_RECORD_HEADER_BUILD_ID; 4223 ev.build_id.header.misc = misc; 4224 ev.build_id.pid = machine->pid; 4225 ev.build_id.header.size = sizeof(ev.build_id) + len; 4226 memcpy(&ev.build_id.filename, pos->long_name, pos->long_name_len); 4227 4228 err = process(tool, &ev, NULL, machine); 4229 4230 return err; 4231 } 4232 4233 int perf_event__process_build_id(struct perf_session *session, 4234 union perf_event *event) 4235 { 4236 __event_process_build_id(&event->build_id, 4237 event->build_id.filename, 4238 session); 4239 return 0; 4240 } 4241