1 // SPDX-License-Identifier: GPL-2.0 2 #include <errno.h> 3 #include <inttypes.h> 4 #include "string2.h" 5 #include <sys/param.h> 6 #include <sys/types.h> 7 #include <byteswap.h> 8 #include <unistd.h> 9 #include <stdio.h> 10 #include <stdlib.h> 11 #include <linux/compiler.h> 12 #include <linux/list.h> 13 #include <linux/kernel.h> 14 #include <linux/bitops.h> 15 #include <linux/string.h> 16 #include <linux/stringify.h> 17 #include <linux/zalloc.h> 18 #include <sys/stat.h> 19 #include <sys/utsname.h> 20 #include <linux/time64.h> 21 #include <dirent.h> 22 #include <bpf/libbpf.h> 23 24 #include "evlist.h" 25 #include "evsel.h" 26 #include "header.h" 27 #include "memswap.h" 28 #include "../perf.h" 29 #include "trace-event.h" 30 #include "session.h" 31 #include "symbol.h" 32 #include "debug.h" 33 #include "cpumap.h" 34 #include "pmu.h" 35 #include "vdso.h" 36 #include "strbuf.h" 37 #include "build-id.h" 38 #include "data.h" 39 #include <api/fs/fs.h> 40 #include "asm/bug.h" 41 #include "tool.h" 42 #include "time-utils.h" 43 #include "units.h" 44 #include "cputopo.h" 45 #include "bpf-event.h" 46 47 #include <linux/ctype.h> 48 49 /* 50 * magic2 = "PERFILE2" 51 * must be a numerical value to let the endianness 52 * determine the memory layout. That way we are able 53 * to detect endianness when reading the perf.data file 54 * back. 55 * 56 * we check for legacy (PERFFILE) format. 57 */ 58 static const char *__perf_magic1 = "PERFFILE"; 59 static const u64 __perf_magic2 = 0x32454c4946524550ULL; 60 static const u64 __perf_magic2_sw = 0x50455246494c4532ULL; 61 62 #define PERF_MAGIC __perf_magic2 63 64 const char perf_version_string[] = PERF_VERSION; 65 66 struct perf_file_attr { 67 struct perf_event_attr attr; 68 struct perf_file_section ids; 69 }; 70 71 struct feat_fd { 72 struct perf_header *ph; 73 int fd; 74 void *buf; /* Either buf != NULL or fd >= 0 */ 75 ssize_t offset; 76 size_t size; 77 struct perf_evsel *events; 78 }; 79 80 void perf_header__set_feat(struct perf_header *header, int feat) 81 { 82 set_bit(feat, header->adds_features); 83 } 84 85 void perf_header__clear_feat(struct perf_header *header, int feat) 86 { 87 clear_bit(feat, header->adds_features); 88 } 89 90 bool perf_header__has_feat(const struct perf_header *header, int feat) 91 { 92 return test_bit(feat, header->adds_features); 93 } 94 95 static int __do_write_fd(struct feat_fd *ff, const void *buf, size_t size) 96 { 97 ssize_t ret = writen(ff->fd, buf, size); 98 99 if (ret != (ssize_t)size) 100 return ret < 0 ? (int)ret : -1; 101 return 0; 102 } 103 104 static int __do_write_buf(struct feat_fd *ff, const void *buf, size_t size) 105 { 106 /* struct perf_event_header::size is u16 */ 107 const size_t max_size = 0xffff - sizeof(struct perf_event_header); 108 size_t new_size = ff->size; 109 void *addr; 110 111 if (size + ff->offset > max_size) 112 return -E2BIG; 113 114 while (size > (new_size - ff->offset)) 115 new_size <<= 1; 116 new_size = min(max_size, new_size); 117 118 if (ff->size < new_size) { 119 addr = realloc(ff->buf, new_size); 120 if (!addr) 121 return -ENOMEM; 122 ff->buf = addr; 123 ff->size = new_size; 124 } 125 126 memcpy(ff->buf + ff->offset, buf, size); 127 ff->offset += size; 128 129 return 0; 130 } 131 132 /* Return: 0 if succeded, -ERR if failed. */ 133 int do_write(struct feat_fd *ff, const void *buf, size_t size) 134 { 135 if (!ff->buf) 136 return __do_write_fd(ff, buf, size); 137 return __do_write_buf(ff, buf, size); 138 } 139 140 /* Return: 0 if succeded, -ERR if failed. */ 141 static int do_write_bitmap(struct feat_fd *ff, unsigned long *set, u64 size) 142 { 143 u64 *p = (u64 *) set; 144 int i, ret; 145 146 ret = do_write(ff, &size, sizeof(size)); 147 if (ret < 0) 148 return ret; 149 150 for (i = 0; (u64) i < BITS_TO_U64(size); i++) { 151 ret = do_write(ff, p + i, sizeof(*p)); 152 if (ret < 0) 153 return ret; 154 } 155 156 return 0; 157 } 158 159 /* Return: 0 if succeded, -ERR if failed. */ 160 int write_padded(struct feat_fd *ff, const void *bf, 161 size_t count, size_t count_aligned) 162 { 163 static const char zero_buf[NAME_ALIGN]; 164 int err = do_write(ff, bf, count); 165 166 if (!err) 167 err = do_write(ff, zero_buf, count_aligned - count); 168 169 return err; 170 } 171 172 #define string_size(str) \ 173 (PERF_ALIGN((strlen(str) + 1), NAME_ALIGN) + sizeof(u32)) 174 175 /* Return: 0 if succeded, -ERR if failed. */ 176 static int do_write_string(struct feat_fd *ff, const char *str) 177 { 178 u32 len, olen; 179 int ret; 180 181 olen = strlen(str) + 1; 182 len = PERF_ALIGN(olen, NAME_ALIGN); 183 184 /* write len, incl. \0 */ 185 ret = do_write(ff, &len, sizeof(len)); 186 if (ret < 0) 187 return ret; 188 189 return write_padded(ff, str, olen, len); 190 } 191 192 static int __do_read_fd(struct feat_fd *ff, void *addr, ssize_t size) 193 { 194 ssize_t ret = readn(ff->fd, addr, size); 195 196 if (ret != size) 197 return ret < 0 ? (int)ret : -1; 198 return 0; 199 } 200 201 static int __do_read_buf(struct feat_fd *ff, void *addr, ssize_t size) 202 { 203 if (size > (ssize_t)ff->size - ff->offset) 204 return -1; 205 206 memcpy(addr, ff->buf + ff->offset, size); 207 ff->offset += size; 208 209 return 0; 210 211 } 212 213 static int __do_read(struct feat_fd *ff, void *addr, ssize_t size) 214 { 215 if (!ff->buf) 216 return __do_read_fd(ff, addr, size); 217 return __do_read_buf(ff, addr, size); 218 } 219 220 static int do_read_u32(struct feat_fd *ff, u32 *addr) 221 { 222 int ret; 223 224 ret = __do_read(ff, addr, sizeof(*addr)); 225 if (ret) 226 return ret; 227 228 if (ff->ph->needs_swap) 229 *addr = bswap_32(*addr); 230 return 0; 231 } 232 233 static int do_read_u64(struct feat_fd *ff, u64 *addr) 234 { 235 int ret; 236 237 ret = __do_read(ff, addr, sizeof(*addr)); 238 if (ret) 239 return ret; 240 241 if (ff->ph->needs_swap) 242 *addr = bswap_64(*addr); 243 return 0; 244 } 245 246 static char *do_read_string(struct feat_fd *ff) 247 { 248 u32 len; 249 char *buf; 250 251 if (do_read_u32(ff, &len)) 252 return NULL; 253 254 buf = malloc(len); 255 if (!buf) 256 return NULL; 257 258 if (!__do_read(ff, buf, len)) { 259 /* 260 * strings are padded by zeroes 261 * thus the actual strlen of buf 262 * may be less than len 263 */ 264 return buf; 265 } 266 267 free(buf); 268 return NULL; 269 } 270 271 /* Return: 0 if succeded, -ERR if failed. */ 272 static int do_read_bitmap(struct feat_fd *ff, unsigned long **pset, u64 *psize) 273 { 274 unsigned long *set; 275 u64 size, *p; 276 int i, ret; 277 278 ret = do_read_u64(ff, &size); 279 if (ret) 280 return ret; 281 282 set = bitmap_alloc(size); 283 if (!set) 284 return -ENOMEM; 285 286 p = (u64 *) set; 287 288 for (i = 0; (u64) i < BITS_TO_U64(size); i++) { 289 ret = do_read_u64(ff, p + i); 290 if (ret < 0) { 291 free(set); 292 return ret; 293 } 294 } 295 296 *pset = set; 297 *psize = size; 298 return 0; 299 } 300 301 static int write_tracing_data(struct feat_fd *ff, 302 struct perf_evlist *evlist) 303 { 304 if (WARN(ff->buf, "Error: calling %s in pipe-mode.\n", __func__)) 305 return -1; 306 307 return read_tracing_data(ff->fd, &evlist->entries); 308 } 309 310 static int write_build_id(struct feat_fd *ff, 311 struct perf_evlist *evlist __maybe_unused) 312 { 313 struct perf_session *session; 314 int err; 315 316 session = container_of(ff->ph, struct perf_session, header); 317 318 if (!perf_session__read_build_ids(session, true)) 319 return -1; 320 321 if (WARN(ff->buf, "Error: calling %s in pipe-mode.\n", __func__)) 322 return -1; 323 324 err = perf_session__write_buildid_table(session, ff); 325 if (err < 0) { 326 pr_debug("failed to write buildid table\n"); 327 return err; 328 } 329 perf_session__cache_build_ids(session); 330 331 return 0; 332 } 333 334 static int write_hostname(struct feat_fd *ff, 335 struct perf_evlist *evlist __maybe_unused) 336 { 337 struct utsname uts; 338 int ret; 339 340 ret = uname(&uts); 341 if (ret < 0) 342 return -1; 343 344 return do_write_string(ff, uts.nodename); 345 } 346 347 static int write_osrelease(struct feat_fd *ff, 348 struct perf_evlist *evlist __maybe_unused) 349 { 350 struct utsname uts; 351 int ret; 352 353 ret = uname(&uts); 354 if (ret < 0) 355 return -1; 356 357 return do_write_string(ff, uts.release); 358 } 359 360 static int write_arch(struct feat_fd *ff, 361 struct perf_evlist *evlist __maybe_unused) 362 { 363 struct utsname uts; 364 int ret; 365 366 ret = uname(&uts); 367 if (ret < 0) 368 return -1; 369 370 return do_write_string(ff, uts.machine); 371 } 372 373 static int write_version(struct feat_fd *ff, 374 struct perf_evlist *evlist __maybe_unused) 375 { 376 return do_write_string(ff, perf_version_string); 377 } 378 379 static int __write_cpudesc(struct feat_fd *ff, const char *cpuinfo_proc) 380 { 381 FILE *file; 382 char *buf = NULL; 383 char *s, *p; 384 const char *search = cpuinfo_proc; 385 size_t len = 0; 386 int ret = -1; 387 388 if (!search) 389 return -1; 390 391 file = fopen("/proc/cpuinfo", "r"); 392 if (!file) 393 return -1; 394 395 while (getline(&buf, &len, file) > 0) { 396 ret = strncmp(buf, search, strlen(search)); 397 if (!ret) 398 break; 399 } 400 401 if (ret) { 402 ret = -1; 403 goto done; 404 } 405 406 s = buf; 407 408 p = strchr(buf, ':'); 409 if (p && *(p+1) == ' ' && *(p+2)) 410 s = p + 2; 411 p = strchr(s, '\n'); 412 if (p) 413 *p = '\0'; 414 415 /* squash extra space characters (branding string) */ 416 p = s; 417 while (*p) { 418 if (isspace(*p)) { 419 char *r = p + 1; 420 char *q = skip_spaces(r); 421 *p = ' '; 422 if (q != (p+1)) 423 while ((*r++ = *q++)); 424 } 425 p++; 426 } 427 ret = do_write_string(ff, s); 428 done: 429 free(buf); 430 fclose(file); 431 return ret; 432 } 433 434 static int write_cpudesc(struct feat_fd *ff, 435 struct perf_evlist *evlist __maybe_unused) 436 { 437 const char *cpuinfo_procs[] = CPUINFO_PROC; 438 unsigned int i; 439 440 for (i = 0; i < ARRAY_SIZE(cpuinfo_procs); i++) { 441 int ret; 442 ret = __write_cpudesc(ff, cpuinfo_procs[i]); 443 if (ret >= 0) 444 return ret; 445 } 446 return -1; 447 } 448 449 450 static int write_nrcpus(struct feat_fd *ff, 451 struct perf_evlist *evlist __maybe_unused) 452 { 453 long nr; 454 u32 nrc, nra; 455 int ret; 456 457 nrc = cpu__max_present_cpu(); 458 459 nr = sysconf(_SC_NPROCESSORS_ONLN); 460 if (nr < 0) 461 return -1; 462 463 nra = (u32)(nr & UINT_MAX); 464 465 ret = do_write(ff, &nrc, sizeof(nrc)); 466 if (ret < 0) 467 return ret; 468 469 return do_write(ff, &nra, sizeof(nra)); 470 } 471 472 static int write_event_desc(struct feat_fd *ff, 473 struct perf_evlist *evlist) 474 { 475 struct perf_evsel *evsel; 476 u32 nre, nri, sz; 477 int ret; 478 479 nre = evlist->nr_entries; 480 481 /* 482 * write number of events 483 */ 484 ret = do_write(ff, &nre, sizeof(nre)); 485 if (ret < 0) 486 return ret; 487 488 /* 489 * size of perf_event_attr struct 490 */ 491 sz = (u32)sizeof(evsel->attr); 492 ret = do_write(ff, &sz, sizeof(sz)); 493 if (ret < 0) 494 return ret; 495 496 evlist__for_each_entry(evlist, evsel) { 497 ret = do_write(ff, &evsel->attr, sz); 498 if (ret < 0) 499 return ret; 500 /* 501 * write number of unique id per event 502 * there is one id per instance of an event 503 * 504 * copy into an nri to be independent of the 505 * type of ids, 506 */ 507 nri = evsel->ids; 508 ret = do_write(ff, &nri, sizeof(nri)); 509 if (ret < 0) 510 return ret; 511 512 /* 513 * write event string as passed on cmdline 514 */ 515 ret = do_write_string(ff, perf_evsel__name(evsel)); 516 if (ret < 0) 517 return ret; 518 /* 519 * write unique ids for this event 520 */ 521 ret = do_write(ff, evsel->id, evsel->ids * sizeof(u64)); 522 if (ret < 0) 523 return ret; 524 } 525 return 0; 526 } 527 528 static int write_cmdline(struct feat_fd *ff, 529 struct perf_evlist *evlist __maybe_unused) 530 { 531 char pbuf[MAXPATHLEN], *buf; 532 int i, ret, n; 533 534 /* actual path to perf binary */ 535 buf = perf_exe(pbuf, MAXPATHLEN); 536 537 /* account for binary path */ 538 n = perf_env.nr_cmdline + 1; 539 540 ret = do_write(ff, &n, sizeof(n)); 541 if (ret < 0) 542 return ret; 543 544 ret = do_write_string(ff, buf); 545 if (ret < 0) 546 return ret; 547 548 for (i = 0 ; i < perf_env.nr_cmdline; i++) { 549 ret = do_write_string(ff, perf_env.cmdline_argv[i]); 550 if (ret < 0) 551 return ret; 552 } 553 return 0; 554 } 555 556 557 static int write_cpu_topology(struct feat_fd *ff, 558 struct perf_evlist *evlist __maybe_unused) 559 { 560 struct cpu_topology *tp; 561 u32 i; 562 int ret, j; 563 564 tp = cpu_topology__new(); 565 if (!tp) 566 return -1; 567 568 ret = do_write(ff, &tp->core_sib, sizeof(tp->core_sib)); 569 if (ret < 0) 570 goto done; 571 572 for (i = 0; i < tp->core_sib; i++) { 573 ret = do_write_string(ff, tp->core_siblings[i]); 574 if (ret < 0) 575 goto done; 576 } 577 ret = do_write(ff, &tp->thread_sib, sizeof(tp->thread_sib)); 578 if (ret < 0) 579 goto done; 580 581 for (i = 0; i < tp->thread_sib; i++) { 582 ret = do_write_string(ff, tp->thread_siblings[i]); 583 if (ret < 0) 584 break; 585 } 586 587 ret = perf_env__read_cpu_topology_map(&perf_env); 588 if (ret < 0) 589 goto done; 590 591 for (j = 0; j < perf_env.nr_cpus_avail; j++) { 592 ret = do_write(ff, &perf_env.cpu[j].core_id, 593 sizeof(perf_env.cpu[j].core_id)); 594 if (ret < 0) 595 return ret; 596 ret = do_write(ff, &perf_env.cpu[j].socket_id, 597 sizeof(perf_env.cpu[j].socket_id)); 598 if (ret < 0) 599 return ret; 600 } 601 602 if (!tp->die_sib) 603 goto done; 604 605 ret = do_write(ff, &tp->die_sib, sizeof(tp->die_sib)); 606 if (ret < 0) 607 goto done; 608 609 for (i = 0; i < tp->die_sib; i++) { 610 ret = do_write_string(ff, tp->die_siblings[i]); 611 if (ret < 0) 612 goto done; 613 } 614 615 for (j = 0; j < perf_env.nr_cpus_avail; j++) { 616 ret = do_write(ff, &perf_env.cpu[j].die_id, 617 sizeof(perf_env.cpu[j].die_id)); 618 if (ret < 0) 619 return ret; 620 } 621 622 done: 623 cpu_topology__delete(tp); 624 return ret; 625 } 626 627 628 629 static int write_total_mem(struct feat_fd *ff, 630 struct perf_evlist *evlist __maybe_unused) 631 { 632 char *buf = NULL; 633 FILE *fp; 634 size_t len = 0; 635 int ret = -1, n; 636 uint64_t mem; 637 638 fp = fopen("/proc/meminfo", "r"); 639 if (!fp) 640 return -1; 641 642 while (getline(&buf, &len, fp) > 0) { 643 ret = strncmp(buf, "MemTotal:", 9); 644 if (!ret) 645 break; 646 } 647 if (!ret) { 648 n = sscanf(buf, "%*s %"PRIu64, &mem); 649 if (n == 1) 650 ret = do_write(ff, &mem, sizeof(mem)); 651 } else 652 ret = -1; 653 free(buf); 654 fclose(fp); 655 return ret; 656 } 657 658 static int write_numa_topology(struct feat_fd *ff, 659 struct perf_evlist *evlist __maybe_unused) 660 { 661 struct numa_topology *tp; 662 int ret = -1; 663 u32 i; 664 665 tp = numa_topology__new(); 666 if (!tp) 667 return -ENOMEM; 668 669 ret = do_write(ff, &tp->nr, sizeof(u32)); 670 if (ret < 0) 671 goto err; 672 673 for (i = 0; i < tp->nr; i++) { 674 struct numa_topology_node *n = &tp->nodes[i]; 675 676 ret = do_write(ff, &n->node, sizeof(u32)); 677 if (ret < 0) 678 goto err; 679 680 ret = do_write(ff, &n->mem_total, sizeof(u64)); 681 if (ret) 682 goto err; 683 684 ret = do_write(ff, &n->mem_free, sizeof(u64)); 685 if (ret) 686 goto err; 687 688 ret = do_write_string(ff, n->cpus); 689 if (ret < 0) 690 goto err; 691 } 692 693 ret = 0; 694 695 err: 696 numa_topology__delete(tp); 697 return ret; 698 } 699 700 /* 701 * File format: 702 * 703 * struct pmu_mappings { 704 * u32 pmu_num; 705 * struct pmu_map { 706 * u32 type; 707 * char name[]; 708 * }[pmu_num]; 709 * }; 710 */ 711 712 static int write_pmu_mappings(struct feat_fd *ff, 713 struct perf_evlist *evlist __maybe_unused) 714 { 715 struct perf_pmu *pmu = NULL; 716 u32 pmu_num = 0; 717 int ret; 718 719 /* 720 * Do a first pass to count number of pmu to avoid lseek so this 721 * works in pipe mode as well. 722 */ 723 while ((pmu = perf_pmu__scan(pmu))) { 724 if (!pmu->name) 725 continue; 726 pmu_num++; 727 } 728 729 ret = do_write(ff, &pmu_num, sizeof(pmu_num)); 730 if (ret < 0) 731 return ret; 732 733 while ((pmu = perf_pmu__scan(pmu))) { 734 if (!pmu->name) 735 continue; 736 737 ret = do_write(ff, &pmu->type, sizeof(pmu->type)); 738 if (ret < 0) 739 return ret; 740 741 ret = do_write_string(ff, pmu->name); 742 if (ret < 0) 743 return ret; 744 } 745 746 return 0; 747 } 748 749 /* 750 * File format: 751 * 752 * struct group_descs { 753 * u32 nr_groups; 754 * struct group_desc { 755 * char name[]; 756 * u32 leader_idx; 757 * u32 nr_members; 758 * }[nr_groups]; 759 * }; 760 */ 761 static int write_group_desc(struct feat_fd *ff, 762 struct perf_evlist *evlist) 763 { 764 u32 nr_groups = evlist->nr_groups; 765 struct perf_evsel *evsel; 766 int ret; 767 768 ret = do_write(ff, &nr_groups, sizeof(nr_groups)); 769 if (ret < 0) 770 return ret; 771 772 evlist__for_each_entry(evlist, evsel) { 773 if (perf_evsel__is_group_leader(evsel) && 774 evsel->nr_members > 1) { 775 const char *name = evsel->group_name ?: "{anon_group}"; 776 u32 leader_idx = evsel->idx; 777 u32 nr_members = evsel->nr_members; 778 779 ret = do_write_string(ff, name); 780 if (ret < 0) 781 return ret; 782 783 ret = do_write(ff, &leader_idx, sizeof(leader_idx)); 784 if (ret < 0) 785 return ret; 786 787 ret = do_write(ff, &nr_members, sizeof(nr_members)); 788 if (ret < 0) 789 return ret; 790 } 791 } 792 return 0; 793 } 794 795 /* 796 * Return the CPU id as a raw string. 797 * 798 * Each architecture should provide a more precise id string that 799 * can be use to match the architecture's "mapfile". 800 */ 801 char * __weak get_cpuid_str(struct perf_pmu *pmu __maybe_unused) 802 { 803 return NULL; 804 } 805 806 /* Return zero when the cpuid from the mapfile.csv matches the 807 * cpuid string generated on this platform. 808 * Otherwise return non-zero. 809 */ 810 int __weak strcmp_cpuid_str(const char *mapcpuid, const char *cpuid) 811 { 812 regex_t re; 813 regmatch_t pmatch[1]; 814 int match; 815 816 if (regcomp(&re, mapcpuid, REG_EXTENDED) != 0) { 817 /* Warn unable to generate match particular string. */ 818 pr_info("Invalid regular expression %s\n", mapcpuid); 819 return 1; 820 } 821 822 match = !regexec(&re, cpuid, 1, pmatch, 0); 823 regfree(&re); 824 if (match) { 825 size_t match_len = (pmatch[0].rm_eo - pmatch[0].rm_so); 826 827 /* Verify the entire string matched. */ 828 if (match_len == strlen(cpuid)) 829 return 0; 830 } 831 return 1; 832 } 833 834 /* 835 * default get_cpuid(): nothing gets recorded 836 * actual implementation must be in arch/$(SRCARCH)/util/header.c 837 */ 838 int __weak get_cpuid(char *buffer __maybe_unused, size_t sz __maybe_unused) 839 { 840 return -1; 841 } 842 843 static int write_cpuid(struct feat_fd *ff, 844 struct perf_evlist *evlist __maybe_unused) 845 { 846 char buffer[64]; 847 int ret; 848 849 ret = get_cpuid(buffer, sizeof(buffer)); 850 if (ret) 851 return -1; 852 853 return do_write_string(ff, buffer); 854 } 855 856 static int write_branch_stack(struct feat_fd *ff __maybe_unused, 857 struct perf_evlist *evlist __maybe_unused) 858 { 859 return 0; 860 } 861 862 static int write_auxtrace(struct feat_fd *ff, 863 struct perf_evlist *evlist __maybe_unused) 864 { 865 struct perf_session *session; 866 int err; 867 868 if (WARN(ff->buf, "Error: calling %s in pipe-mode.\n", __func__)) 869 return -1; 870 871 session = container_of(ff->ph, struct perf_session, header); 872 873 err = auxtrace_index__write(ff->fd, &session->auxtrace_index); 874 if (err < 0) 875 pr_err("Failed to write auxtrace index\n"); 876 return err; 877 } 878 879 static int write_clockid(struct feat_fd *ff, 880 struct perf_evlist *evlist __maybe_unused) 881 { 882 return do_write(ff, &ff->ph->env.clockid_res_ns, 883 sizeof(ff->ph->env.clockid_res_ns)); 884 } 885 886 static int write_dir_format(struct feat_fd *ff, 887 struct perf_evlist *evlist __maybe_unused) 888 { 889 struct perf_session *session; 890 struct perf_data *data; 891 892 session = container_of(ff->ph, struct perf_session, header); 893 data = session->data; 894 895 if (WARN_ON(!perf_data__is_dir(data))) 896 return -1; 897 898 return do_write(ff, &data->dir.version, sizeof(data->dir.version)); 899 } 900 901 #ifdef HAVE_LIBBPF_SUPPORT 902 static int write_bpf_prog_info(struct feat_fd *ff, 903 struct perf_evlist *evlist __maybe_unused) 904 { 905 struct perf_env *env = &ff->ph->env; 906 struct rb_root *root; 907 struct rb_node *next; 908 int ret; 909 910 down_read(&env->bpf_progs.lock); 911 912 ret = do_write(ff, &env->bpf_progs.infos_cnt, 913 sizeof(env->bpf_progs.infos_cnt)); 914 if (ret < 0) 915 goto out; 916 917 root = &env->bpf_progs.infos; 918 next = rb_first(root); 919 while (next) { 920 struct bpf_prog_info_node *node; 921 size_t len; 922 923 node = rb_entry(next, struct bpf_prog_info_node, rb_node); 924 next = rb_next(&node->rb_node); 925 len = sizeof(struct bpf_prog_info_linear) + 926 node->info_linear->data_len; 927 928 /* before writing to file, translate address to offset */ 929 bpf_program__bpil_addr_to_offs(node->info_linear); 930 ret = do_write(ff, node->info_linear, len); 931 /* 932 * translate back to address even when do_write() fails, 933 * so that this function never changes the data. 934 */ 935 bpf_program__bpil_offs_to_addr(node->info_linear); 936 if (ret < 0) 937 goto out; 938 } 939 out: 940 up_read(&env->bpf_progs.lock); 941 return ret; 942 } 943 #else // HAVE_LIBBPF_SUPPORT 944 static int write_bpf_prog_info(struct feat_fd *ff __maybe_unused, 945 struct perf_evlist *evlist __maybe_unused) 946 { 947 return 0; 948 } 949 #endif // HAVE_LIBBPF_SUPPORT 950 951 static int write_bpf_btf(struct feat_fd *ff, 952 struct perf_evlist *evlist __maybe_unused) 953 { 954 struct perf_env *env = &ff->ph->env; 955 struct rb_root *root; 956 struct rb_node *next; 957 int ret; 958 959 down_read(&env->bpf_progs.lock); 960 961 ret = do_write(ff, &env->bpf_progs.btfs_cnt, 962 sizeof(env->bpf_progs.btfs_cnt)); 963 964 if (ret < 0) 965 goto out; 966 967 root = &env->bpf_progs.btfs; 968 next = rb_first(root); 969 while (next) { 970 struct btf_node *node; 971 972 node = rb_entry(next, struct btf_node, rb_node); 973 next = rb_next(&node->rb_node); 974 ret = do_write(ff, &node->id, 975 sizeof(u32) * 2 + node->data_size); 976 if (ret < 0) 977 goto out; 978 } 979 out: 980 up_read(&env->bpf_progs.lock); 981 return ret; 982 } 983 984 static int cpu_cache_level__sort(const void *a, const void *b) 985 { 986 struct cpu_cache_level *cache_a = (struct cpu_cache_level *)a; 987 struct cpu_cache_level *cache_b = (struct cpu_cache_level *)b; 988 989 return cache_a->level - cache_b->level; 990 } 991 992 static bool cpu_cache_level__cmp(struct cpu_cache_level *a, struct cpu_cache_level *b) 993 { 994 if (a->level != b->level) 995 return false; 996 997 if (a->line_size != b->line_size) 998 return false; 999 1000 if (a->sets != b->sets) 1001 return false; 1002 1003 if (a->ways != b->ways) 1004 return false; 1005 1006 if (strcmp(a->type, b->type)) 1007 return false; 1008 1009 if (strcmp(a->size, b->size)) 1010 return false; 1011 1012 if (strcmp(a->map, b->map)) 1013 return false; 1014 1015 return true; 1016 } 1017 1018 static int cpu_cache_level__read(struct cpu_cache_level *cache, u32 cpu, u16 level) 1019 { 1020 char path[PATH_MAX], file[PATH_MAX]; 1021 struct stat st; 1022 size_t len; 1023 1024 scnprintf(path, PATH_MAX, "devices/system/cpu/cpu%d/cache/index%d/", cpu, level); 1025 scnprintf(file, PATH_MAX, "%s/%s", sysfs__mountpoint(), path); 1026 1027 if (stat(file, &st)) 1028 return 1; 1029 1030 scnprintf(file, PATH_MAX, "%s/level", path); 1031 if (sysfs__read_int(file, (int *) &cache->level)) 1032 return -1; 1033 1034 scnprintf(file, PATH_MAX, "%s/coherency_line_size", path); 1035 if (sysfs__read_int(file, (int *) &cache->line_size)) 1036 return -1; 1037 1038 scnprintf(file, PATH_MAX, "%s/number_of_sets", path); 1039 if (sysfs__read_int(file, (int *) &cache->sets)) 1040 return -1; 1041 1042 scnprintf(file, PATH_MAX, "%s/ways_of_associativity", path); 1043 if (sysfs__read_int(file, (int *) &cache->ways)) 1044 return -1; 1045 1046 scnprintf(file, PATH_MAX, "%s/type", path); 1047 if (sysfs__read_str(file, &cache->type, &len)) 1048 return -1; 1049 1050 cache->type[len] = 0; 1051 cache->type = strim(cache->type); 1052 1053 scnprintf(file, PATH_MAX, "%s/size", path); 1054 if (sysfs__read_str(file, &cache->size, &len)) { 1055 zfree(&cache->type); 1056 return -1; 1057 } 1058 1059 cache->size[len] = 0; 1060 cache->size = strim(cache->size); 1061 1062 scnprintf(file, PATH_MAX, "%s/shared_cpu_list", path); 1063 if (sysfs__read_str(file, &cache->map, &len)) { 1064 zfree(&cache->map); 1065 zfree(&cache->type); 1066 return -1; 1067 } 1068 1069 cache->map[len] = 0; 1070 cache->map = strim(cache->map); 1071 return 0; 1072 } 1073 1074 static void cpu_cache_level__fprintf(FILE *out, struct cpu_cache_level *c) 1075 { 1076 fprintf(out, "L%d %-15s %8s [%s]\n", c->level, c->type, c->size, c->map); 1077 } 1078 1079 static int build_caches(struct cpu_cache_level caches[], u32 size, u32 *cntp) 1080 { 1081 u32 i, cnt = 0; 1082 long ncpus; 1083 u32 nr, cpu; 1084 u16 level; 1085 1086 ncpus = sysconf(_SC_NPROCESSORS_CONF); 1087 if (ncpus < 0) 1088 return -1; 1089 1090 nr = (u32)(ncpus & UINT_MAX); 1091 1092 for (cpu = 0; cpu < nr; cpu++) { 1093 for (level = 0; level < 10; level++) { 1094 struct cpu_cache_level c; 1095 int err; 1096 1097 err = cpu_cache_level__read(&c, cpu, level); 1098 if (err < 0) 1099 return err; 1100 1101 if (err == 1) 1102 break; 1103 1104 for (i = 0; i < cnt; i++) { 1105 if (cpu_cache_level__cmp(&c, &caches[i])) 1106 break; 1107 } 1108 1109 if (i == cnt) 1110 caches[cnt++] = c; 1111 else 1112 cpu_cache_level__free(&c); 1113 1114 if (WARN_ONCE(cnt == size, "way too many cpu caches..")) 1115 goto out; 1116 } 1117 } 1118 out: 1119 *cntp = cnt; 1120 return 0; 1121 } 1122 1123 #define MAX_CACHES (MAX_NR_CPUS * 4) 1124 1125 static int write_cache(struct feat_fd *ff, 1126 struct perf_evlist *evlist __maybe_unused) 1127 { 1128 struct cpu_cache_level caches[MAX_CACHES]; 1129 u32 cnt = 0, i, version = 1; 1130 int ret; 1131 1132 ret = build_caches(caches, MAX_CACHES, &cnt); 1133 if (ret) 1134 goto out; 1135 1136 qsort(&caches, cnt, sizeof(struct cpu_cache_level), cpu_cache_level__sort); 1137 1138 ret = do_write(ff, &version, sizeof(u32)); 1139 if (ret < 0) 1140 goto out; 1141 1142 ret = do_write(ff, &cnt, sizeof(u32)); 1143 if (ret < 0) 1144 goto out; 1145 1146 for (i = 0; i < cnt; i++) { 1147 struct cpu_cache_level *c = &caches[i]; 1148 1149 #define _W(v) \ 1150 ret = do_write(ff, &c->v, sizeof(u32)); \ 1151 if (ret < 0) \ 1152 goto out; 1153 1154 _W(level) 1155 _W(line_size) 1156 _W(sets) 1157 _W(ways) 1158 #undef _W 1159 1160 #define _W(v) \ 1161 ret = do_write_string(ff, (const char *) c->v); \ 1162 if (ret < 0) \ 1163 goto out; 1164 1165 _W(type) 1166 _W(size) 1167 _W(map) 1168 #undef _W 1169 } 1170 1171 out: 1172 for (i = 0; i < cnt; i++) 1173 cpu_cache_level__free(&caches[i]); 1174 return ret; 1175 } 1176 1177 static int write_stat(struct feat_fd *ff __maybe_unused, 1178 struct perf_evlist *evlist __maybe_unused) 1179 { 1180 return 0; 1181 } 1182 1183 static int write_sample_time(struct feat_fd *ff, 1184 struct perf_evlist *evlist) 1185 { 1186 int ret; 1187 1188 ret = do_write(ff, &evlist->first_sample_time, 1189 sizeof(evlist->first_sample_time)); 1190 if (ret < 0) 1191 return ret; 1192 1193 return do_write(ff, &evlist->last_sample_time, 1194 sizeof(evlist->last_sample_time)); 1195 } 1196 1197 1198 static int memory_node__read(struct memory_node *n, unsigned long idx) 1199 { 1200 unsigned int phys, size = 0; 1201 char path[PATH_MAX]; 1202 struct dirent *ent; 1203 DIR *dir; 1204 1205 #define for_each_memory(mem, dir) \ 1206 while ((ent = readdir(dir))) \ 1207 if (strcmp(ent->d_name, ".") && \ 1208 strcmp(ent->d_name, "..") && \ 1209 sscanf(ent->d_name, "memory%u", &mem) == 1) 1210 1211 scnprintf(path, PATH_MAX, 1212 "%s/devices/system/node/node%lu", 1213 sysfs__mountpoint(), idx); 1214 1215 dir = opendir(path); 1216 if (!dir) { 1217 pr_warning("failed: cant' open memory sysfs data\n"); 1218 return -1; 1219 } 1220 1221 for_each_memory(phys, dir) { 1222 size = max(phys, size); 1223 } 1224 1225 size++; 1226 1227 n->set = bitmap_alloc(size); 1228 if (!n->set) { 1229 closedir(dir); 1230 return -ENOMEM; 1231 } 1232 1233 n->node = idx; 1234 n->size = size; 1235 1236 rewinddir(dir); 1237 1238 for_each_memory(phys, dir) { 1239 set_bit(phys, n->set); 1240 } 1241 1242 closedir(dir); 1243 return 0; 1244 } 1245 1246 static int memory_node__sort(const void *a, const void *b) 1247 { 1248 const struct memory_node *na = a; 1249 const struct memory_node *nb = b; 1250 1251 return na->node - nb->node; 1252 } 1253 1254 static int build_mem_topology(struct memory_node *nodes, u64 size, u64 *cntp) 1255 { 1256 char path[PATH_MAX]; 1257 struct dirent *ent; 1258 DIR *dir; 1259 u64 cnt = 0; 1260 int ret = 0; 1261 1262 scnprintf(path, PATH_MAX, "%s/devices/system/node/", 1263 sysfs__mountpoint()); 1264 1265 dir = opendir(path); 1266 if (!dir) { 1267 pr_debug2("%s: could't read %s, does this arch have topology information?\n", 1268 __func__, path); 1269 return -1; 1270 } 1271 1272 while (!ret && (ent = readdir(dir))) { 1273 unsigned int idx; 1274 int r; 1275 1276 if (!strcmp(ent->d_name, ".") || 1277 !strcmp(ent->d_name, "..")) 1278 continue; 1279 1280 r = sscanf(ent->d_name, "node%u", &idx); 1281 if (r != 1) 1282 continue; 1283 1284 if (WARN_ONCE(cnt >= size, 1285 "failed to write MEM_TOPOLOGY, way too many nodes\n")) 1286 return -1; 1287 1288 ret = memory_node__read(&nodes[cnt++], idx); 1289 } 1290 1291 *cntp = cnt; 1292 closedir(dir); 1293 1294 if (!ret) 1295 qsort(nodes, cnt, sizeof(nodes[0]), memory_node__sort); 1296 1297 return ret; 1298 } 1299 1300 #define MAX_MEMORY_NODES 2000 1301 1302 /* 1303 * The MEM_TOPOLOGY holds physical memory map for every 1304 * node in system. The format of data is as follows: 1305 * 1306 * 0 - version | for future changes 1307 * 8 - block_size_bytes | /sys/devices/system/memory/block_size_bytes 1308 * 16 - count | number of nodes 1309 * 1310 * For each node we store map of physical indexes for 1311 * each node: 1312 * 1313 * 32 - node id | node index 1314 * 40 - size | size of bitmap 1315 * 48 - bitmap | bitmap of memory indexes that belongs to node 1316 */ 1317 static int write_mem_topology(struct feat_fd *ff __maybe_unused, 1318 struct perf_evlist *evlist __maybe_unused) 1319 { 1320 static struct memory_node nodes[MAX_MEMORY_NODES]; 1321 u64 bsize, version = 1, i, nr; 1322 int ret; 1323 1324 ret = sysfs__read_xll("devices/system/memory/block_size_bytes", 1325 (unsigned long long *) &bsize); 1326 if (ret) 1327 return ret; 1328 1329 ret = build_mem_topology(&nodes[0], MAX_MEMORY_NODES, &nr); 1330 if (ret) 1331 return ret; 1332 1333 ret = do_write(ff, &version, sizeof(version)); 1334 if (ret < 0) 1335 goto out; 1336 1337 ret = do_write(ff, &bsize, sizeof(bsize)); 1338 if (ret < 0) 1339 goto out; 1340 1341 ret = do_write(ff, &nr, sizeof(nr)); 1342 if (ret < 0) 1343 goto out; 1344 1345 for (i = 0; i < nr; i++) { 1346 struct memory_node *n = &nodes[i]; 1347 1348 #define _W(v) \ 1349 ret = do_write(ff, &n->v, sizeof(n->v)); \ 1350 if (ret < 0) \ 1351 goto out; 1352 1353 _W(node) 1354 _W(size) 1355 1356 #undef _W 1357 1358 ret = do_write_bitmap(ff, n->set, n->size); 1359 if (ret < 0) 1360 goto out; 1361 } 1362 1363 out: 1364 return ret; 1365 } 1366 1367 static int write_compressed(struct feat_fd *ff __maybe_unused, 1368 struct perf_evlist *evlist __maybe_unused) 1369 { 1370 int ret; 1371 1372 ret = do_write(ff, &(ff->ph->env.comp_ver), sizeof(ff->ph->env.comp_ver)); 1373 if (ret) 1374 return ret; 1375 1376 ret = do_write(ff, &(ff->ph->env.comp_type), sizeof(ff->ph->env.comp_type)); 1377 if (ret) 1378 return ret; 1379 1380 ret = do_write(ff, &(ff->ph->env.comp_level), sizeof(ff->ph->env.comp_level)); 1381 if (ret) 1382 return ret; 1383 1384 ret = do_write(ff, &(ff->ph->env.comp_ratio), sizeof(ff->ph->env.comp_ratio)); 1385 if (ret) 1386 return ret; 1387 1388 return do_write(ff, &(ff->ph->env.comp_mmap_len), sizeof(ff->ph->env.comp_mmap_len)); 1389 } 1390 1391 static void print_hostname(struct feat_fd *ff, FILE *fp) 1392 { 1393 fprintf(fp, "# hostname : %s\n", ff->ph->env.hostname); 1394 } 1395 1396 static void print_osrelease(struct feat_fd *ff, FILE *fp) 1397 { 1398 fprintf(fp, "# os release : %s\n", ff->ph->env.os_release); 1399 } 1400 1401 static void print_arch(struct feat_fd *ff, FILE *fp) 1402 { 1403 fprintf(fp, "# arch : %s\n", ff->ph->env.arch); 1404 } 1405 1406 static void print_cpudesc(struct feat_fd *ff, FILE *fp) 1407 { 1408 fprintf(fp, "# cpudesc : %s\n", ff->ph->env.cpu_desc); 1409 } 1410 1411 static void print_nrcpus(struct feat_fd *ff, FILE *fp) 1412 { 1413 fprintf(fp, "# nrcpus online : %u\n", ff->ph->env.nr_cpus_online); 1414 fprintf(fp, "# nrcpus avail : %u\n", ff->ph->env.nr_cpus_avail); 1415 } 1416 1417 static void print_version(struct feat_fd *ff, FILE *fp) 1418 { 1419 fprintf(fp, "# perf version : %s\n", ff->ph->env.version); 1420 } 1421 1422 static void print_cmdline(struct feat_fd *ff, FILE *fp) 1423 { 1424 int nr, i; 1425 1426 nr = ff->ph->env.nr_cmdline; 1427 1428 fprintf(fp, "# cmdline : "); 1429 1430 for (i = 0; i < nr; i++) { 1431 char *argv_i = strdup(ff->ph->env.cmdline_argv[i]); 1432 if (!argv_i) { 1433 fprintf(fp, "%s ", ff->ph->env.cmdline_argv[i]); 1434 } else { 1435 char *mem = argv_i; 1436 do { 1437 char *quote = strchr(argv_i, '\''); 1438 if (!quote) 1439 break; 1440 *quote++ = '\0'; 1441 fprintf(fp, "%s\\\'", argv_i); 1442 argv_i = quote; 1443 } while (1); 1444 fprintf(fp, "%s ", argv_i); 1445 free(mem); 1446 } 1447 } 1448 fputc('\n', fp); 1449 } 1450 1451 static void print_cpu_topology(struct feat_fd *ff, FILE *fp) 1452 { 1453 struct perf_header *ph = ff->ph; 1454 int cpu_nr = ph->env.nr_cpus_avail; 1455 int nr, i; 1456 char *str; 1457 1458 nr = ph->env.nr_sibling_cores; 1459 str = ph->env.sibling_cores; 1460 1461 for (i = 0; i < nr; i++) { 1462 fprintf(fp, "# sibling sockets : %s\n", str); 1463 str += strlen(str) + 1; 1464 } 1465 1466 if (ph->env.nr_sibling_dies) { 1467 nr = ph->env.nr_sibling_dies; 1468 str = ph->env.sibling_dies; 1469 1470 for (i = 0; i < nr; i++) { 1471 fprintf(fp, "# sibling dies : %s\n", str); 1472 str += strlen(str) + 1; 1473 } 1474 } 1475 1476 nr = ph->env.nr_sibling_threads; 1477 str = ph->env.sibling_threads; 1478 1479 for (i = 0; i < nr; i++) { 1480 fprintf(fp, "# sibling threads : %s\n", str); 1481 str += strlen(str) + 1; 1482 } 1483 1484 if (ph->env.nr_sibling_dies) { 1485 if (ph->env.cpu != NULL) { 1486 for (i = 0; i < cpu_nr; i++) 1487 fprintf(fp, "# CPU %d: Core ID %d, " 1488 "Die ID %d, Socket ID %d\n", 1489 i, ph->env.cpu[i].core_id, 1490 ph->env.cpu[i].die_id, 1491 ph->env.cpu[i].socket_id); 1492 } else 1493 fprintf(fp, "# Core ID, Die ID and Socket ID " 1494 "information is not available\n"); 1495 } else { 1496 if (ph->env.cpu != NULL) { 1497 for (i = 0; i < cpu_nr; i++) 1498 fprintf(fp, "# CPU %d: Core ID %d, " 1499 "Socket ID %d\n", 1500 i, ph->env.cpu[i].core_id, 1501 ph->env.cpu[i].socket_id); 1502 } else 1503 fprintf(fp, "# Core ID and Socket ID " 1504 "information is not available\n"); 1505 } 1506 } 1507 1508 static void print_clockid(struct feat_fd *ff, FILE *fp) 1509 { 1510 fprintf(fp, "# clockid frequency: %"PRIu64" MHz\n", 1511 ff->ph->env.clockid_res_ns * 1000); 1512 } 1513 1514 static void print_dir_format(struct feat_fd *ff, FILE *fp) 1515 { 1516 struct perf_session *session; 1517 struct perf_data *data; 1518 1519 session = container_of(ff->ph, struct perf_session, header); 1520 data = session->data; 1521 1522 fprintf(fp, "# directory data version : %"PRIu64"\n", data->dir.version); 1523 } 1524 1525 static void print_bpf_prog_info(struct feat_fd *ff, FILE *fp) 1526 { 1527 struct perf_env *env = &ff->ph->env; 1528 struct rb_root *root; 1529 struct rb_node *next; 1530 1531 down_read(&env->bpf_progs.lock); 1532 1533 root = &env->bpf_progs.infos; 1534 next = rb_first(root); 1535 1536 while (next) { 1537 struct bpf_prog_info_node *node; 1538 1539 node = rb_entry(next, struct bpf_prog_info_node, rb_node); 1540 next = rb_next(&node->rb_node); 1541 1542 bpf_event__print_bpf_prog_info(&node->info_linear->info, 1543 env, fp); 1544 } 1545 1546 up_read(&env->bpf_progs.lock); 1547 } 1548 1549 static void print_bpf_btf(struct feat_fd *ff, FILE *fp) 1550 { 1551 struct perf_env *env = &ff->ph->env; 1552 struct rb_root *root; 1553 struct rb_node *next; 1554 1555 down_read(&env->bpf_progs.lock); 1556 1557 root = &env->bpf_progs.btfs; 1558 next = rb_first(root); 1559 1560 while (next) { 1561 struct btf_node *node; 1562 1563 node = rb_entry(next, struct btf_node, rb_node); 1564 next = rb_next(&node->rb_node); 1565 fprintf(fp, "# btf info of id %u\n", node->id); 1566 } 1567 1568 up_read(&env->bpf_progs.lock); 1569 } 1570 1571 static void free_event_desc(struct perf_evsel *events) 1572 { 1573 struct perf_evsel *evsel; 1574 1575 if (!events) 1576 return; 1577 1578 for (evsel = events; evsel->attr.size; evsel++) { 1579 zfree(&evsel->name); 1580 zfree(&evsel->id); 1581 } 1582 1583 free(events); 1584 } 1585 1586 static struct perf_evsel *read_event_desc(struct feat_fd *ff) 1587 { 1588 struct perf_evsel *evsel, *events = NULL; 1589 u64 *id; 1590 void *buf = NULL; 1591 u32 nre, sz, nr, i, j; 1592 size_t msz; 1593 1594 /* number of events */ 1595 if (do_read_u32(ff, &nre)) 1596 goto error; 1597 1598 if (do_read_u32(ff, &sz)) 1599 goto error; 1600 1601 /* buffer to hold on file attr struct */ 1602 buf = malloc(sz); 1603 if (!buf) 1604 goto error; 1605 1606 /* the last event terminates with evsel->attr.size == 0: */ 1607 events = calloc(nre + 1, sizeof(*events)); 1608 if (!events) 1609 goto error; 1610 1611 msz = sizeof(evsel->attr); 1612 if (sz < msz) 1613 msz = sz; 1614 1615 for (i = 0, evsel = events; i < nre; evsel++, i++) { 1616 evsel->idx = i; 1617 1618 /* 1619 * must read entire on-file attr struct to 1620 * sync up with layout. 1621 */ 1622 if (__do_read(ff, buf, sz)) 1623 goto error; 1624 1625 if (ff->ph->needs_swap) 1626 perf_event__attr_swap(buf); 1627 1628 memcpy(&evsel->attr, buf, msz); 1629 1630 if (do_read_u32(ff, &nr)) 1631 goto error; 1632 1633 if (ff->ph->needs_swap) 1634 evsel->needs_swap = true; 1635 1636 evsel->name = do_read_string(ff); 1637 if (!evsel->name) 1638 goto error; 1639 1640 if (!nr) 1641 continue; 1642 1643 id = calloc(nr, sizeof(*id)); 1644 if (!id) 1645 goto error; 1646 evsel->ids = nr; 1647 evsel->id = id; 1648 1649 for (j = 0 ; j < nr; j++) { 1650 if (do_read_u64(ff, id)) 1651 goto error; 1652 id++; 1653 } 1654 } 1655 out: 1656 free(buf); 1657 return events; 1658 error: 1659 free_event_desc(events); 1660 events = NULL; 1661 goto out; 1662 } 1663 1664 static int __desc_attr__fprintf(FILE *fp, const char *name, const char *val, 1665 void *priv __maybe_unused) 1666 { 1667 return fprintf(fp, ", %s = %s", name, val); 1668 } 1669 1670 static void print_event_desc(struct feat_fd *ff, FILE *fp) 1671 { 1672 struct perf_evsel *evsel, *events; 1673 u32 j; 1674 u64 *id; 1675 1676 if (ff->events) 1677 events = ff->events; 1678 else 1679 events = read_event_desc(ff); 1680 1681 if (!events) { 1682 fprintf(fp, "# event desc: not available or unable to read\n"); 1683 return; 1684 } 1685 1686 for (evsel = events; evsel->attr.size; evsel++) { 1687 fprintf(fp, "# event : name = %s, ", evsel->name); 1688 1689 if (evsel->ids) { 1690 fprintf(fp, ", id = {"); 1691 for (j = 0, id = evsel->id; j < evsel->ids; j++, id++) { 1692 if (j) 1693 fputc(',', fp); 1694 fprintf(fp, " %"PRIu64, *id); 1695 } 1696 fprintf(fp, " }"); 1697 } 1698 1699 perf_event_attr__fprintf(fp, &evsel->attr, __desc_attr__fprintf, NULL); 1700 1701 fputc('\n', fp); 1702 } 1703 1704 free_event_desc(events); 1705 ff->events = NULL; 1706 } 1707 1708 static void print_total_mem(struct feat_fd *ff, FILE *fp) 1709 { 1710 fprintf(fp, "# total memory : %llu kB\n", ff->ph->env.total_mem); 1711 } 1712 1713 static void print_numa_topology(struct feat_fd *ff, FILE *fp) 1714 { 1715 int i; 1716 struct numa_node *n; 1717 1718 for (i = 0; i < ff->ph->env.nr_numa_nodes; i++) { 1719 n = &ff->ph->env.numa_nodes[i]; 1720 1721 fprintf(fp, "# node%u meminfo : total = %"PRIu64" kB," 1722 " free = %"PRIu64" kB\n", 1723 n->node, n->mem_total, n->mem_free); 1724 1725 fprintf(fp, "# node%u cpu list : ", n->node); 1726 cpu_map__fprintf(n->map, fp); 1727 } 1728 } 1729 1730 static void print_cpuid(struct feat_fd *ff, FILE *fp) 1731 { 1732 fprintf(fp, "# cpuid : %s\n", ff->ph->env.cpuid); 1733 } 1734 1735 static void print_branch_stack(struct feat_fd *ff __maybe_unused, FILE *fp) 1736 { 1737 fprintf(fp, "# contains samples with branch stack\n"); 1738 } 1739 1740 static void print_auxtrace(struct feat_fd *ff __maybe_unused, FILE *fp) 1741 { 1742 fprintf(fp, "# contains AUX area data (e.g. instruction trace)\n"); 1743 } 1744 1745 static void print_stat(struct feat_fd *ff __maybe_unused, FILE *fp) 1746 { 1747 fprintf(fp, "# contains stat data\n"); 1748 } 1749 1750 static void print_cache(struct feat_fd *ff, FILE *fp __maybe_unused) 1751 { 1752 int i; 1753 1754 fprintf(fp, "# CPU cache info:\n"); 1755 for (i = 0; i < ff->ph->env.caches_cnt; i++) { 1756 fprintf(fp, "# "); 1757 cpu_cache_level__fprintf(fp, &ff->ph->env.caches[i]); 1758 } 1759 } 1760 1761 static void print_compressed(struct feat_fd *ff, FILE *fp) 1762 { 1763 fprintf(fp, "# compressed : %s, level = %d, ratio = %d\n", 1764 ff->ph->env.comp_type == PERF_COMP_ZSTD ? "Zstd" : "Unknown", 1765 ff->ph->env.comp_level, ff->ph->env.comp_ratio); 1766 } 1767 1768 static void print_pmu_mappings(struct feat_fd *ff, FILE *fp) 1769 { 1770 const char *delimiter = "# pmu mappings: "; 1771 char *str, *tmp; 1772 u32 pmu_num; 1773 u32 type; 1774 1775 pmu_num = ff->ph->env.nr_pmu_mappings; 1776 if (!pmu_num) { 1777 fprintf(fp, "# pmu mappings: not available\n"); 1778 return; 1779 } 1780 1781 str = ff->ph->env.pmu_mappings; 1782 1783 while (pmu_num) { 1784 type = strtoul(str, &tmp, 0); 1785 if (*tmp != ':') 1786 goto error; 1787 1788 str = tmp + 1; 1789 fprintf(fp, "%s%s = %" PRIu32, delimiter, str, type); 1790 1791 delimiter = ", "; 1792 str += strlen(str) + 1; 1793 pmu_num--; 1794 } 1795 1796 fprintf(fp, "\n"); 1797 1798 if (!pmu_num) 1799 return; 1800 error: 1801 fprintf(fp, "# pmu mappings: unable to read\n"); 1802 } 1803 1804 static void print_group_desc(struct feat_fd *ff, FILE *fp) 1805 { 1806 struct perf_session *session; 1807 struct perf_evsel *evsel; 1808 u32 nr = 0; 1809 1810 session = container_of(ff->ph, struct perf_session, header); 1811 1812 evlist__for_each_entry(session->evlist, evsel) { 1813 if (perf_evsel__is_group_leader(evsel) && 1814 evsel->nr_members > 1) { 1815 fprintf(fp, "# group: %s{%s", evsel->group_name ?: "", 1816 perf_evsel__name(evsel)); 1817 1818 nr = evsel->nr_members - 1; 1819 } else if (nr) { 1820 fprintf(fp, ",%s", perf_evsel__name(evsel)); 1821 1822 if (--nr == 0) 1823 fprintf(fp, "}\n"); 1824 } 1825 } 1826 } 1827 1828 static void print_sample_time(struct feat_fd *ff, FILE *fp) 1829 { 1830 struct perf_session *session; 1831 char time_buf[32]; 1832 double d; 1833 1834 session = container_of(ff->ph, struct perf_session, header); 1835 1836 timestamp__scnprintf_usec(session->evlist->first_sample_time, 1837 time_buf, sizeof(time_buf)); 1838 fprintf(fp, "# time of first sample : %s\n", time_buf); 1839 1840 timestamp__scnprintf_usec(session->evlist->last_sample_time, 1841 time_buf, sizeof(time_buf)); 1842 fprintf(fp, "# time of last sample : %s\n", time_buf); 1843 1844 d = (double)(session->evlist->last_sample_time - 1845 session->evlist->first_sample_time) / NSEC_PER_MSEC; 1846 1847 fprintf(fp, "# sample duration : %10.3f ms\n", d); 1848 } 1849 1850 static void memory_node__fprintf(struct memory_node *n, 1851 unsigned long long bsize, FILE *fp) 1852 { 1853 char buf_map[100], buf_size[50]; 1854 unsigned long long size; 1855 1856 size = bsize * bitmap_weight(n->set, n->size); 1857 unit_number__scnprintf(buf_size, 50, size); 1858 1859 bitmap_scnprintf(n->set, n->size, buf_map, 100); 1860 fprintf(fp, "# %3" PRIu64 " [%s]: %s\n", n->node, buf_size, buf_map); 1861 } 1862 1863 static void print_mem_topology(struct feat_fd *ff, FILE *fp) 1864 { 1865 struct memory_node *nodes; 1866 int i, nr; 1867 1868 nodes = ff->ph->env.memory_nodes; 1869 nr = ff->ph->env.nr_memory_nodes; 1870 1871 fprintf(fp, "# memory nodes (nr %d, block size 0x%llx):\n", 1872 nr, ff->ph->env.memory_bsize); 1873 1874 for (i = 0; i < nr; i++) { 1875 memory_node__fprintf(&nodes[i], ff->ph->env.memory_bsize, fp); 1876 } 1877 } 1878 1879 static int __event_process_build_id(struct build_id_event *bev, 1880 char *filename, 1881 struct perf_session *session) 1882 { 1883 int err = -1; 1884 struct machine *machine; 1885 u16 cpumode; 1886 struct dso *dso; 1887 enum dso_kernel_type dso_type; 1888 1889 machine = perf_session__findnew_machine(session, bev->pid); 1890 if (!machine) 1891 goto out; 1892 1893 cpumode = bev->header.misc & PERF_RECORD_MISC_CPUMODE_MASK; 1894 1895 switch (cpumode) { 1896 case PERF_RECORD_MISC_KERNEL: 1897 dso_type = DSO_TYPE_KERNEL; 1898 break; 1899 case PERF_RECORD_MISC_GUEST_KERNEL: 1900 dso_type = DSO_TYPE_GUEST_KERNEL; 1901 break; 1902 case PERF_RECORD_MISC_USER: 1903 case PERF_RECORD_MISC_GUEST_USER: 1904 dso_type = DSO_TYPE_USER; 1905 break; 1906 default: 1907 goto out; 1908 } 1909 1910 dso = machine__findnew_dso(machine, filename); 1911 if (dso != NULL) { 1912 char sbuild_id[SBUILD_ID_SIZE]; 1913 1914 dso__set_build_id(dso, &bev->build_id); 1915 1916 if (dso_type != DSO_TYPE_USER) { 1917 struct kmod_path m = { .name = NULL, }; 1918 1919 if (!kmod_path__parse_name(&m, filename) && m.kmod) 1920 dso__set_module_info(dso, &m, machine); 1921 else 1922 dso->kernel = dso_type; 1923 1924 free(m.name); 1925 } 1926 1927 build_id__sprintf(dso->build_id, sizeof(dso->build_id), 1928 sbuild_id); 1929 pr_debug("build id event received for %s: %s\n", 1930 dso->long_name, sbuild_id); 1931 dso__put(dso); 1932 } 1933 1934 err = 0; 1935 out: 1936 return err; 1937 } 1938 1939 static int perf_header__read_build_ids_abi_quirk(struct perf_header *header, 1940 int input, u64 offset, u64 size) 1941 { 1942 struct perf_session *session = container_of(header, struct perf_session, header); 1943 struct { 1944 struct perf_event_header header; 1945 u8 build_id[PERF_ALIGN(BUILD_ID_SIZE, sizeof(u64))]; 1946 char filename[0]; 1947 } old_bev; 1948 struct build_id_event bev; 1949 char filename[PATH_MAX]; 1950 u64 limit = offset + size; 1951 1952 while (offset < limit) { 1953 ssize_t len; 1954 1955 if (readn(input, &old_bev, sizeof(old_bev)) != sizeof(old_bev)) 1956 return -1; 1957 1958 if (header->needs_swap) 1959 perf_event_header__bswap(&old_bev.header); 1960 1961 len = old_bev.header.size - sizeof(old_bev); 1962 if (readn(input, filename, len) != len) 1963 return -1; 1964 1965 bev.header = old_bev.header; 1966 1967 /* 1968 * As the pid is the missing value, we need to fill 1969 * it properly. The header.misc value give us nice hint. 1970 */ 1971 bev.pid = HOST_KERNEL_ID; 1972 if (bev.header.misc == PERF_RECORD_MISC_GUEST_USER || 1973 bev.header.misc == PERF_RECORD_MISC_GUEST_KERNEL) 1974 bev.pid = DEFAULT_GUEST_KERNEL_ID; 1975 1976 memcpy(bev.build_id, old_bev.build_id, sizeof(bev.build_id)); 1977 __event_process_build_id(&bev, filename, session); 1978 1979 offset += bev.header.size; 1980 } 1981 1982 return 0; 1983 } 1984 1985 static int perf_header__read_build_ids(struct perf_header *header, 1986 int input, u64 offset, u64 size) 1987 { 1988 struct perf_session *session = container_of(header, struct perf_session, header); 1989 struct build_id_event bev; 1990 char filename[PATH_MAX]; 1991 u64 limit = offset + size, orig_offset = offset; 1992 int err = -1; 1993 1994 while (offset < limit) { 1995 ssize_t len; 1996 1997 if (readn(input, &bev, sizeof(bev)) != sizeof(bev)) 1998 goto out; 1999 2000 if (header->needs_swap) 2001 perf_event_header__bswap(&bev.header); 2002 2003 len = bev.header.size - sizeof(bev); 2004 if (readn(input, filename, len) != len) 2005 goto out; 2006 /* 2007 * The a1645ce1 changeset: 2008 * 2009 * "perf: 'perf kvm' tool for monitoring guest performance from host" 2010 * 2011 * Added a field to struct build_id_event that broke the file 2012 * format. 2013 * 2014 * Since the kernel build-id is the first entry, process the 2015 * table using the old format if the well known 2016 * '[kernel.kallsyms]' string for the kernel build-id has the 2017 * first 4 characters chopped off (where the pid_t sits). 2018 */ 2019 if (memcmp(filename, "nel.kallsyms]", 13) == 0) { 2020 if (lseek(input, orig_offset, SEEK_SET) == (off_t)-1) 2021 return -1; 2022 return perf_header__read_build_ids_abi_quirk(header, input, offset, size); 2023 } 2024 2025 __event_process_build_id(&bev, filename, session); 2026 2027 offset += bev.header.size; 2028 } 2029 err = 0; 2030 out: 2031 return err; 2032 } 2033 2034 /* Macro for features that simply need to read and store a string. */ 2035 #define FEAT_PROCESS_STR_FUN(__feat, __feat_env) \ 2036 static int process_##__feat(struct feat_fd *ff, void *data __maybe_unused) \ 2037 {\ 2038 ff->ph->env.__feat_env = do_read_string(ff); \ 2039 return ff->ph->env.__feat_env ? 0 : -ENOMEM; \ 2040 } 2041 2042 FEAT_PROCESS_STR_FUN(hostname, hostname); 2043 FEAT_PROCESS_STR_FUN(osrelease, os_release); 2044 FEAT_PROCESS_STR_FUN(version, version); 2045 FEAT_PROCESS_STR_FUN(arch, arch); 2046 FEAT_PROCESS_STR_FUN(cpudesc, cpu_desc); 2047 FEAT_PROCESS_STR_FUN(cpuid, cpuid); 2048 2049 static int process_tracing_data(struct feat_fd *ff, void *data) 2050 { 2051 ssize_t ret = trace_report(ff->fd, data, false); 2052 2053 return ret < 0 ? -1 : 0; 2054 } 2055 2056 static int process_build_id(struct feat_fd *ff, void *data __maybe_unused) 2057 { 2058 if (perf_header__read_build_ids(ff->ph, ff->fd, ff->offset, ff->size)) 2059 pr_debug("Failed to read buildids, continuing...\n"); 2060 return 0; 2061 } 2062 2063 static int process_nrcpus(struct feat_fd *ff, void *data __maybe_unused) 2064 { 2065 int ret; 2066 u32 nr_cpus_avail, nr_cpus_online; 2067 2068 ret = do_read_u32(ff, &nr_cpus_avail); 2069 if (ret) 2070 return ret; 2071 2072 ret = do_read_u32(ff, &nr_cpus_online); 2073 if (ret) 2074 return ret; 2075 ff->ph->env.nr_cpus_avail = (int)nr_cpus_avail; 2076 ff->ph->env.nr_cpus_online = (int)nr_cpus_online; 2077 return 0; 2078 } 2079 2080 static int process_total_mem(struct feat_fd *ff, void *data __maybe_unused) 2081 { 2082 u64 total_mem; 2083 int ret; 2084 2085 ret = do_read_u64(ff, &total_mem); 2086 if (ret) 2087 return -1; 2088 ff->ph->env.total_mem = (unsigned long long)total_mem; 2089 return 0; 2090 } 2091 2092 static struct perf_evsel * 2093 perf_evlist__find_by_index(struct perf_evlist *evlist, int idx) 2094 { 2095 struct perf_evsel *evsel; 2096 2097 evlist__for_each_entry(evlist, evsel) { 2098 if (evsel->idx == idx) 2099 return evsel; 2100 } 2101 2102 return NULL; 2103 } 2104 2105 static void 2106 perf_evlist__set_event_name(struct perf_evlist *evlist, 2107 struct perf_evsel *event) 2108 { 2109 struct perf_evsel *evsel; 2110 2111 if (!event->name) 2112 return; 2113 2114 evsel = perf_evlist__find_by_index(evlist, event->idx); 2115 if (!evsel) 2116 return; 2117 2118 if (evsel->name) 2119 return; 2120 2121 evsel->name = strdup(event->name); 2122 } 2123 2124 static int 2125 process_event_desc(struct feat_fd *ff, void *data __maybe_unused) 2126 { 2127 struct perf_session *session; 2128 struct perf_evsel *evsel, *events = read_event_desc(ff); 2129 2130 if (!events) 2131 return 0; 2132 2133 session = container_of(ff->ph, struct perf_session, header); 2134 2135 if (session->data->is_pipe) { 2136 /* Save events for reading later by print_event_desc, 2137 * since they can't be read again in pipe mode. */ 2138 ff->events = events; 2139 } 2140 2141 for (evsel = events; evsel->attr.size; evsel++) 2142 perf_evlist__set_event_name(session->evlist, evsel); 2143 2144 if (!session->data->is_pipe) 2145 free_event_desc(events); 2146 2147 return 0; 2148 } 2149 2150 static int process_cmdline(struct feat_fd *ff, void *data __maybe_unused) 2151 { 2152 char *str, *cmdline = NULL, **argv = NULL; 2153 u32 nr, i, len = 0; 2154 2155 if (do_read_u32(ff, &nr)) 2156 return -1; 2157 2158 ff->ph->env.nr_cmdline = nr; 2159 2160 cmdline = zalloc(ff->size + nr + 1); 2161 if (!cmdline) 2162 return -1; 2163 2164 argv = zalloc(sizeof(char *) * (nr + 1)); 2165 if (!argv) 2166 goto error; 2167 2168 for (i = 0; i < nr; i++) { 2169 str = do_read_string(ff); 2170 if (!str) 2171 goto error; 2172 2173 argv[i] = cmdline + len; 2174 memcpy(argv[i], str, strlen(str) + 1); 2175 len += strlen(str) + 1; 2176 free(str); 2177 } 2178 ff->ph->env.cmdline = cmdline; 2179 ff->ph->env.cmdline_argv = (const char **) argv; 2180 return 0; 2181 2182 error: 2183 free(argv); 2184 free(cmdline); 2185 return -1; 2186 } 2187 2188 static int process_cpu_topology(struct feat_fd *ff, void *data __maybe_unused) 2189 { 2190 u32 nr, i; 2191 char *str; 2192 struct strbuf sb; 2193 int cpu_nr = ff->ph->env.nr_cpus_avail; 2194 u64 size = 0; 2195 struct perf_header *ph = ff->ph; 2196 bool do_core_id_test = true; 2197 2198 ph->env.cpu = calloc(cpu_nr, sizeof(*ph->env.cpu)); 2199 if (!ph->env.cpu) 2200 return -1; 2201 2202 if (do_read_u32(ff, &nr)) 2203 goto free_cpu; 2204 2205 ph->env.nr_sibling_cores = nr; 2206 size += sizeof(u32); 2207 if (strbuf_init(&sb, 128) < 0) 2208 goto free_cpu; 2209 2210 for (i = 0; i < nr; i++) { 2211 str = do_read_string(ff); 2212 if (!str) 2213 goto error; 2214 2215 /* include a NULL character at the end */ 2216 if (strbuf_add(&sb, str, strlen(str) + 1) < 0) 2217 goto error; 2218 size += string_size(str); 2219 free(str); 2220 } 2221 ph->env.sibling_cores = strbuf_detach(&sb, NULL); 2222 2223 if (do_read_u32(ff, &nr)) 2224 return -1; 2225 2226 ph->env.nr_sibling_threads = nr; 2227 size += sizeof(u32); 2228 2229 for (i = 0; i < nr; i++) { 2230 str = do_read_string(ff); 2231 if (!str) 2232 goto error; 2233 2234 /* include a NULL character at the end */ 2235 if (strbuf_add(&sb, str, strlen(str) + 1) < 0) 2236 goto error; 2237 size += string_size(str); 2238 free(str); 2239 } 2240 ph->env.sibling_threads = strbuf_detach(&sb, NULL); 2241 2242 /* 2243 * The header may be from old perf, 2244 * which doesn't include core id and socket id information. 2245 */ 2246 if (ff->size <= size) { 2247 zfree(&ph->env.cpu); 2248 return 0; 2249 } 2250 2251 /* On s390 the socket_id number is not related to the numbers of cpus. 2252 * The socket_id number might be higher than the numbers of cpus. 2253 * This depends on the configuration. 2254 */ 2255 if (ph->env.arch && !strncmp(ph->env.arch, "s390", 4)) 2256 do_core_id_test = false; 2257 2258 for (i = 0; i < (u32)cpu_nr; i++) { 2259 if (do_read_u32(ff, &nr)) 2260 goto free_cpu; 2261 2262 ph->env.cpu[i].core_id = nr; 2263 size += sizeof(u32); 2264 2265 if (do_read_u32(ff, &nr)) 2266 goto free_cpu; 2267 2268 if (do_core_id_test && nr != (u32)-1 && nr > (u32)cpu_nr) { 2269 pr_debug("socket_id number is too big." 2270 "You may need to upgrade the perf tool.\n"); 2271 goto free_cpu; 2272 } 2273 2274 ph->env.cpu[i].socket_id = nr; 2275 size += sizeof(u32); 2276 } 2277 2278 /* 2279 * The header may be from old perf, 2280 * which doesn't include die information. 2281 */ 2282 if (ff->size <= size) 2283 return 0; 2284 2285 if (do_read_u32(ff, &nr)) 2286 return -1; 2287 2288 ph->env.nr_sibling_dies = nr; 2289 size += sizeof(u32); 2290 2291 for (i = 0; i < nr; i++) { 2292 str = do_read_string(ff); 2293 if (!str) 2294 goto error; 2295 2296 /* include a NULL character at the end */ 2297 if (strbuf_add(&sb, str, strlen(str) + 1) < 0) 2298 goto error; 2299 size += string_size(str); 2300 free(str); 2301 } 2302 ph->env.sibling_dies = strbuf_detach(&sb, NULL); 2303 2304 for (i = 0; i < (u32)cpu_nr; i++) { 2305 if (do_read_u32(ff, &nr)) 2306 goto free_cpu; 2307 2308 ph->env.cpu[i].die_id = nr; 2309 } 2310 2311 return 0; 2312 2313 error: 2314 strbuf_release(&sb); 2315 free_cpu: 2316 zfree(&ph->env.cpu); 2317 return -1; 2318 } 2319 2320 static int process_numa_topology(struct feat_fd *ff, void *data __maybe_unused) 2321 { 2322 struct numa_node *nodes, *n; 2323 u32 nr, i; 2324 char *str; 2325 2326 /* nr nodes */ 2327 if (do_read_u32(ff, &nr)) 2328 return -1; 2329 2330 nodes = zalloc(sizeof(*nodes) * nr); 2331 if (!nodes) 2332 return -ENOMEM; 2333 2334 for (i = 0; i < nr; i++) { 2335 n = &nodes[i]; 2336 2337 /* node number */ 2338 if (do_read_u32(ff, &n->node)) 2339 goto error; 2340 2341 if (do_read_u64(ff, &n->mem_total)) 2342 goto error; 2343 2344 if (do_read_u64(ff, &n->mem_free)) 2345 goto error; 2346 2347 str = do_read_string(ff); 2348 if (!str) 2349 goto error; 2350 2351 n->map = cpu_map__new(str); 2352 if (!n->map) 2353 goto error; 2354 2355 free(str); 2356 } 2357 ff->ph->env.nr_numa_nodes = nr; 2358 ff->ph->env.numa_nodes = nodes; 2359 return 0; 2360 2361 error: 2362 free(nodes); 2363 return -1; 2364 } 2365 2366 static int process_pmu_mappings(struct feat_fd *ff, void *data __maybe_unused) 2367 { 2368 char *name; 2369 u32 pmu_num; 2370 u32 type; 2371 struct strbuf sb; 2372 2373 if (do_read_u32(ff, &pmu_num)) 2374 return -1; 2375 2376 if (!pmu_num) { 2377 pr_debug("pmu mappings not available\n"); 2378 return 0; 2379 } 2380 2381 ff->ph->env.nr_pmu_mappings = pmu_num; 2382 if (strbuf_init(&sb, 128) < 0) 2383 return -1; 2384 2385 while (pmu_num) { 2386 if (do_read_u32(ff, &type)) 2387 goto error; 2388 2389 name = do_read_string(ff); 2390 if (!name) 2391 goto error; 2392 2393 if (strbuf_addf(&sb, "%u:%s", type, name) < 0) 2394 goto error; 2395 /* include a NULL character at the end */ 2396 if (strbuf_add(&sb, "", 1) < 0) 2397 goto error; 2398 2399 if (!strcmp(name, "msr")) 2400 ff->ph->env.msr_pmu_type = type; 2401 2402 free(name); 2403 pmu_num--; 2404 } 2405 ff->ph->env.pmu_mappings = strbuf_detach(&sb, NULL); 2406 return 0; 2407 2408 error: 2409 strbuf_release(&sb); 2410 return -1; 2411 } 2412 2413 static int process_group_desc(struct feat_fd *ff, void *data __maybe_unused) 2414 { 2415 size_t ret = -1; 2416 u32 i, nr, nr_groups; 2417 struct perf_session *session; 2418 struct perf_evsel *evsel, *leader = NULL; 2419 struct group_desc { 2420 char *name; 2421 u32 leader_idx; 2422 u32 nr_members; 2423 } *desc; 2424 2425 if (do_read_u32(ff, &nr_groups)) 2426 return -1; 2427 2428 ff->ph->env.nr_groups = nr_groups; 2429 if (!nr_groups) { 2430 pr_debug("group desc not available\n"); 2431 return 0; 2432 } 2433 2434 desc = calloc(nr_groups, sizeof(*desc)); 2435 if (!desc) 2436 return -1; 2437 2438 for (i = 0; i < nr_groups; i++) { 2439 desc[i].name = do_read_string(ff); 2440 if (!desc[i].name) 2441 goto out_free; 2442 2443 if (do_read_u32(ff, &desc[i].leader_idx)) 2444 goto out_free; 2445 2446 if (do_read_u32(ff, &desc[i].nr_members)) 2447 goto out_free; 2448 } 2449 2450 /* 2451 * Rebuild group relationship based on the group_desc 2452 */ 2453 session = container_of(ff->ph, struct perf_session, header); 2454 session->evlist->nr_groups = nr_groups; 2455 2456 i = nr = 0; 2457 evlist__for_each_entry(session->evlist, evsel) { 2458 if (evsel->idx == (int) desc[i].leader_idx) { 2459 evsel->leader = evsel; 2460 /* {anon_group} is a dummy name */ 2461 if (strcmp(desc[i].name, "{anon_group}")) { 2462 evsel->group_name = desc[i].name; 2463 desc[i].name = NULL; 2464 } 2465 evsel->nr_members = desc[i].nr_members; 2466 2467 if (i >= nr_groups || nr > 0) { 2468 pr_debug("invalid group desc\n"); 2469 goto out_free; 2470 } 2471 2472 leader = evsel; 2473 nr = evsel->nr_members - 1; 2474 i++; 2475 } else if (nr) { 2476 /* This is a group member */ 2477 evsel->leader = leader; 2478 2479 nr--; 2480 } 2481 } 2482 2483 if (i != nr_groups || nr != 0) { 2484 pr_debug("invalid group desc\n"); 2485 goto out_free; 2486 } 2487 2488 ret = 0; 2489 out_free: 2490 for (i = 0; i < nr_groups; i++) 2491 zfree(&desc[i].name); 2492 free(desc); 2493 2494 return ret; 2495 } 2496 2497 static int process_auxtrace(struct feat_fd *ff, void *data __maybe_unused) 2498 { 2499 struct perf_session *session; 2500 int err; 2501 2502 session = container_of(ff->ph, struct perf_session, header); 2503 2504 err = auxtrace_index__process(ff->fd, ff->size, session, 2505 ff->ph->needs_swap); 2506 if (err < 0) 2507 pr_err("Failed to process auxtrace index\n"); 2508 return err; 2509 } 2510 2511 static int process_cache(struct feat_fd *ff, void *data __maybe_unused) 2512 { 2513 struct cpu_cache_level *caches; 2514 u32 cnt, i, version; 2515 2516 if (do_read_u32(ff, &version)) 2517 return -1; 2518 2519 if (version != 1) 2520 return -1; 2521 2522 if (do_read_u32(ff, &cnt)) 2523 return -1; 2524 2525 caches = zalloc(sizeof(*caches) * cnt); 2526 if (!caches) 2527 return -1; 2528 2529 for (i = 0; i < cnt; i++) { 2530 struct cpu_cache_level c; 2531 2532 #define _R(v) \ 2533 if (do_read_u32(ff, &c.v))\ 2534 goto out_free_caches; \ 2535 2536 _R(level) 2537 _R(line_size) 2538 _R(sets) 2539 _R(ways) 2540 #undef _R 2541 2542 #define _R(v) \ 2543 c.v = do_read_string(ff); \ 2544 if (!c.v) \ 2545 goto out_free_caches; 2546 2547 _R(type) 2548 _R(size) 2549 _R(map) 2550 #undef _R 2551 2552 caches[i] = c; 2553 } 2554 2555 ff->ph->env.caches = caches; 2556 ff->ph->env.caches_cnt = cnt; 2557 return 0; 2558 out_free_caches: 2559 free(caches); 2560 return -1; 2561 } 2562 2563 static int process_sample_time(struct feat_fd *ff, void *data __maybe_unused) 2564 { 2565 struct perf_session *session; 2566 u64 first_sample_time, last_sample_time; 2567 int ret; 2568 2569 session = container_of(ff->ph, struct perf_session, header); 2570 2571 ret = do_read_u64(ff, &first_sample_time); 2572 if (ret) 2573 return -1; 2574 2575 ret = do_read_u64(ff, &last_sample_time); 2576 if (ret) 2577 return -1; 2578 2579 session->evlist->first_sample_time = first_sample_time; 2580 session->evlist->last_sample_time = last_sample_time; 2581 return 0; 2582 } 2583 2584 static int process_mem_topology(struct feat_fd *ff, 2585 void *data __maybe_unused) 2586 { 2587 struct memory_node *nodes; 2588 u64 version, i, nr, bsize; 2589 int ret = -1; 2590 2591 if (do_read_u64(ff, &version)) 2592 return -1; 2593 2594 if (version != 1) 2595 return -1; 2596 2597 if (do_read_u64(ff, &bsize)) 2598 return -1; 2599 2600 if (do_read_u64(ff, &nr)) 2601 return -1; 2602 2603 nodes = zalloc(sizeof(*nodes) * nr); 2604 if (!nodes) 2605 return -1; 2606 2607 for (i = 0; i < nr; i++) { 2608 struct memory_node n; 2609 2610 #define _R(v) \ 2611 if (do_read_u64(ff, &n.v)) \ 2612 goto out; \ 2613 2614 _R(node) 2615 _R(size) 2616 2617 #undef _R 2618 2619 if (do_read_bitmap(ff, &n.set, &n.size)) 2620 goto out; 2621 2622 nodes[i] = n; 2623 } 2624 2625 ff->ph->env.memory_bsize = bsize; 2626 ff->ph->env.memory_nodes = nodes; 2627 ff->ph->env.nr_memory_nodes = nr; 2628 ret = 0; 2629 2630 out: 2631 if (ret) 2632 free(nodes); 2633 return ret; 2634 } 2635 2636 static int process_clockid(struct feat_fd *ff, 2637 void *data __maybe_unused) 2638 { 2639 if (do_read_u64(ff, &ff->ph->env.clockid_res_ns)) 2640 return -1; 2641 2642 return 0; 2643 } 2644 2645 static int process_dir_format(struct feat_fd *ff, 2646 void *_data __maybe_unused) 2647 { 2648 struct perf_session *session; 2649 struct perf_data *data; 2650 2651 session = container_of(ff->ph, struct perf_session, header); 2652 data = session->data; 2653 2654 if (WARN_ON(!perf_data__is_dir(data))) 2655 return -1; 2656 2657 return do_read_u64(ff, &data->dir.version); 2658 } 2659 2660 #ifdef HAVE_LIBBPF_SUPPORT 2661 static int process_bpf_prog_info(struct feat_fd *ff, void *data __maybe_unused) 2662 { 2663 struct bpf_prog_info_linear *info_linear; 2664 struct bpf_prog_info_node *info_node; 2665 struct perf_env *env = &ff->ph->env; 2666 u32 count, i; 2667 int err = -1; 2668 2669 if (ff->ph->needs_swap) { 2670 pr_warning("interpreting bpf_prog_info from systems with endianity is not yet supported\n"); 2671 return 0; 2672 } 2673 2674 if (do_read_u32(ff, &count)) 2675 return -1; 2676 2677 down_write(&env->bpf_progs.lock); 2678 2679 for (i = 0; i < count; ++i) { 2680 u32 info_len, data_len; 2681 2682 info_linear = NULL; 2683 info_node = NULL; 2684 if (do_read_u32(ff, &info_len)) 2685 goto out; 2686 if (do_read_u32(ff, &data_len)) 2687 goto out; 2688 2689 if (info_len > sizeof(struct bpf_prog_info)) { 2690 pr_warning("detected invalid bpf_prog_info\n"); 2691 goto out; 2692 } 2693 2694 info_linear = malloc(sizeof(struct bpf_prog_info_linear) + 2695 data_len); 2696 if (!info_linear) 2697 goto out; 2698 info_linear->info_len = sizeof(struct bpf_prog_info); 2699 info_linear->data_len = data_len; 2700 if (do_read_u64(ff, (u64 *)(&info_linear->arrays))) 2701 goto out; 2702 if (__do_read(ff, &info_linear->info, info_len)) 2703 goto out; 2704 if (info_len < sizeof(struct bpf_prog_info)) 2705 memset(((void *)(&info_linear->info)) + info_len, 0, 2706 sizeof(struct bpf_prog_info) - info_len); 2707 2708 if (__do_read(ff, info_linear->data, data_len)) 2709 goto out; 2710 2711 info_node = malloc(sizeof(struct bpf_prog_info_node)); 2712 if (!info_node) 2713 goto out; 2714 2715 /* after reading from file, translate offset to address */ 2716 bpf_program__bpil_offs_to_addr(info_linear); 2717 info_node->info_linear = info_linear; 2718 perf_env__insert_bpf_prog_info(env, info_node); 2719 } 2720 2721 up_write(&env->bpf_progs.lock); 2722 return 0; 2723 out: 2724 free(info_linear); 2725 free(info_node); 2726 up_write(&env->bpf_progs.lock); 2727 return err; 2728 } 2729 #else // HAVE_LIBBPF_SUPPORT 2730 static int process_bpf_prog_info(struct feat_fd *ff __maybe_unused, void *data __maybe_unused) 2731 { 2732 return 0; 2733 } 2734 #endif // HAVE_LIBBPF_SUPPORT 2735 2736 static int process_bpf_btf(struct feat_fd *ff, void *data __maybe_unused) 2737 { 2738 struct perf_env *env = &ff->ph->env; 2739 struct btf_node *node = NULL; 2740 u32 count, i; 2741 int err = -1; 2742 2743 if (ff->ph->needs_swap) { 2744 pr_warning("interpreting btf from systems with endianity is not yet supported\n"); 2745 return 0; 2746 } 2747 2748 if (do_read_u32(ff, &count)) 2749 return -1; 2750 2751 down_write(&env->bpf_progs.lock); 2752 2753 for (i = 0; i < count; ++i) { 2754 u32 id, data_size; 2755 2756 if (do_read_u32(ff, &id)) 2757 goto out; 2758 if (do_read_u32(ff, &data_size)) 2759 goto out; 2760 2761 node = malloc(sizeof(struct btf_node) + data_size); 2762 if (!node) 2763 goto out; 2764 2765 node->id = id; 2766 node->data_size = data_size; 2767 2768 if (__do_read(ff, node->data, data_size)) 2769 goto out; 2770 2771 perf_env__insert_btf(env, node); 2772 node = NULL; 2773 } 2774 2775 err = 0; 2776 out: 2777 up_write(&env->bpf_progs.lock); 2778 free(node); 2779 return err; 2780 } 2781 2782 static int process_compressed(struct feat_fd *ff, 2783 void *data __maybe_unused) 2784 { 2785 if (do_read_u32(ff, &(ff->ph->env.comp_ver))) 2786 return -1; 2787 2788 if (do_read_u32(ff, &(ff->ph->env.comp_type))) 2789 return -1; 2790 2791 if (do_read_u32(ff, &(ff->ph->env.comp_level))) 2792 return -1; 2793 2794 if (do_read_u32(ff, &(ff->ph->env.comp_ratio))) 2795 return -1; 2796 2797 if (do_read_u32(ff, &(ff->ph->env.comp_mmap_len))) 2798 return -1; 2799 2800 return 0; 2801 } 2802 2803 struct feature_ops { 2804 int (*write)(struct feat_fd *ff, struct perf_evlist *evlist); 2805 void (*print)(struct feat_fd *ff, FILE *fp); 2806 int (*process)(struct feat_fd *ff, void *data); 2807 const char *name; 2808 bool full_only; 2809 bool synthesize; 2810 }; 2811 2812 #define FEAT_OPR(n, func, __full_only) \ 2813 [HEADER_##n] = { \ 2814 .name = __stringify(n), \ 2815 .write = write_##func, \ 2816 .print = print_##func, \ 2817 .full_only = __full_only, \ 2818 .process = process_##func, \ 2819 .synthesize = true \ 2820 } 2821 2822 #define FEAT_OPN(n, func, __full_only) \ 2823 [HEADER_##n] = { \ 2824 .name = __stringify(n), \ 2825 .write = write_##func, \ 2826 .print = print_##func, \ 2827 .full_only = __full_only, \ 2828 .process = process_##func \ 2829 } 2830 2831 /* feature_ops not implemented: */ 2832 #define print_tracing_data NULL 2833 #define print_build_id NULL 2834 2835 #define process_branch_stack NULL 2836 #define process_stat NULL 2837 2838 2839 static const struct feature_ops feat_ops[HEADER_LAST_FEATURE] = { 2840 FEAT_OPN(TRACING_DATA, tracing_data, false), 2841 FEAT_OPN(BUILD_ID, build_id, false), 2842 FEAT_OPR(HOSTNAME, hostname, false), 2843 FEAT_OPR(OSRELEASE, osrelease, false), 2844 FEAT_OPR(VERSION, version, false), 2845 FEAT_OPR(ARCH, arch, false), 2846 FEAT_OPR(NRCPUS, nrcpus, false), 2847 FEAT_OPR(CPUDESC, cpudesc, false), 2848 FEAT_OPR(CPUID, cpuid, false), 2849 FEAT_OPR(TOTAL_MEM, total_mem, false), 2850 FEAT_OPR(EVENT_DESC, event_desc, false), 2851 FEAT_OPR(CMDLINE, cmdline, false), 2852 FEAT_OPR(CPU_TOPOLOGY, cpu_topology, true), 2853 FEAT_OPR(NUMA_TOPOLOGY, numa_topology, true), 2854 FEAT_OPN(BRANCH_STACK, branch_stack, false), 2855 FEAT_OPR(PMU_MAPPINGS, pmu_mappings, false), 2856 FEAT_OPR(GROUP_DESC, group_desc, false), 2857 FEAT_OPN(AUXTRACE, auxtrace, false), 2858 FEAT_OPN(STAT, stat, false), 2859 FEAT_OPN(CACHE, cache, true), 2860 FEAT_OPR(SAMPLE_TIME, sample_time, false), 2861 FEAT_OPR(MEM_TOPOLOGY, mem_topology, true), 2862 FEAT_OPR(CLOCKID, clockid, false), 2863 FEAT_OPN(DIR_FORMAT, dir_format, false), 2864 FEAT_OPR(BPF_PROG_INFO, bpf_prog_info, false), 2865 FEAT_OPR(BPF_BTF, bpf_btf, false), 2866 FEAT_OPR(COMPRESSED, compressed, false), 2867 }; 2868 2869 struct header_print_data { 2870 FILE *fp; 2871 bool full; /* extended list of headers */ 2872 }; 2873 2874 static int perf_file_section__fprintf_info(struct perf_file_section *section, 2875 struct perf_header *ph, 2876 int feat, int fd, void *data) 2877 { 2878 struct header_print_data *hd = data; 2879 struct feat_fd ff; 2880 2881 if (lseek(fd, section->offset, SEEK_SET) == (off_t)-1) { 2882 pr_debug("Failed to lseek to %" PRIu64 " offset for feature " 2883 "%d, continuing...\n", section->offset, feat); 2884 return 0; 2885 } 2886 if (feat >= HEADER_LAST_FEATURE) { 2887 pr_warning("unknown feature %d\n", feat); 2888 return 0; 2889 } 2890 if (!feat_ops[feat].print) 2891 return 0; 2892 2893 ff = (struct feat_fd) { 2894 .fd = fd, 2895 .ph = ph, 2896 }; 2897 2898 if (!feat_ops[feat].full_only || hd->full) 2899 feat_ops[feat].print(&ff, hd->fp); 2900 else 2901 fprintf(hd->fp, "# %s info available, use -I to display\n", 2902 feat_ops[feat].name); 2903 2904 return 0; 2905 } 2906 2907 int perf_header__fprintf_info(struct perf_session *session, FILE *fp, bool full) 2908 { 2909 struct header_print_data hd; 2910 struct perf_header *header = &session->header; 2911 int fd = perf_data__fd(session->data); 2912 struct stat st; 2913 time_t stctime; 2914 int ret, bit; 2915 2916 hd.fp = fp; 2917 hd.full = full; 2918 2919 ret = fstat(fd, &st); 2920 if (ret == -1) 2921 return -1; 2922 2923 stctime = st.st_ctime; 2924 fprintf(fp, "# captured on : %s", ctime(&stctime)); 2925 2926 fprintf(fp, "# header version : %u\n", header->version); 2927 fprintf(fp, "# data offset : %" PRIu64 "\n", header->data_offset); 2928 fprintf(fp, "# data size : %" PRIu64 "\n", header->data_size); 2929 fprintf(fp, "# feat offset : %" PRIu64 "\n", header->feat_offset); 2930 2931 perf_header__process_sections(header, fd, &hd, 2932 perf_file_section__fprintf_info); 2933 2934 if (session->data->is_pipe) 2935 return 0; 2936 2937 fprintf(fp, "# missing features: "); 2938 for_each_clear_bit(bit, header->adds_features, HEADER_LAST_FEATURE) { 2939 if (bit) 2940 fprintf(fp, "%s ", feat_ops[bit].name); 2941 } 2942 2943 fprintf(fp, "\n"); 2944 return 0; 2945 } 2946 2947 static int do_write_feat(struct feat_fd *ff, int type, 2948 struct perf_file_section **p, 2949 struct perf_evlist *evlist) 2950 { 2951 int err; 2952 int ret = 0; 2953 2954 if (perf_header__has_feat(ff->ph, type)) { 2955 if (!feat_ops[type].write) 2956 return -1; 2957 2958 if (WARN(ff->buf, "Error: calling %s in pipe-mode.\n", __func__)) 2959 return -1; 2960 2961 (*p)->offset = lseek(ff->fd, 0, SEEK_CUR); 2962 2963 err = feat_ops[type].write(ff, evlist); 2964 if (err < 0) { 2965 pr_debug("failed to write feature %s\n", feat_ops[type].name); 2966 2967 /* undo anything written */ 2968 lseek(ff->fd, (*p)->offset, SEEK_SET); 2969 2970 return -1; 2971 } 2972 (*p)->size = lseek(ff->fd, 0, SEEK_CUR) - (*p)->offset; 2973 (*p)++; 2974 } 2975 return ret; 2976 } 2977 2978 static int perf_header__adds_write(struct perf_header *header, 2979 struct perf_evlist *evlist, int fd) 2980 { 2981 int nr_sections; 2982 struct feat_fd ff; 2983 struct perf_file_section *feat_sec, *p; 2984 int sec_size; 2985 u64 sec_start; 2986 int feat; 2987 int err; 2988 2989 ff = (struct feat_fd){ 2990 .fd = fd, 2991 .ph = header, 2992 }; 2993 2994 nr_sections = bitmap_weight(header->adds_features, HEADER_FEAT_BITS); 2995 if (!nr_sections) 2996 return 0; 2997 2998 feat_sec = p = calloc(nr_sections, sizeof(*feat_sec)); 2999 if (feat_sec == NULL) 3000 return -ENOMEM; 3001 3002 sec_size = sizeof(*feat_sec) * nr_sections; 3003 3004 sec_start = header->feat_offset; 3005 lseek(fd, sec_start + sec_size, SEEK_SET); 3006 3007 for_each_set_bit(feat, header->adds_features, HEADER_FEAT_BITS) { 3008 if (do_write_feat(&ff, feat, &p, evlist)) 3009 perf_header__clear_feat(header, feat); 3010 } 3011 3012 lseek(fd, sec_start, SEEK_SET); 3013 /* 3014 * may write more than needed due to dropped feature, but 3015 * this is okay, reader will skip the missing entries 3016 */ 3017 err = do_write(&ff, feat_sec, sec_size); 3018 if (err < 0) 3019 pr_debug("failed to write feature section\n"); 3020 free(feat_sec); 3021 return err; 3022 } 3023 3024 int perf_header__write_pipe(int fd) 3025 { 3026 struct perf_pipe_file_header f_header; 3027 struct feat_fd ff; 3028 int err; 3029 3030 ff = (struct feat_fd){ .fd = fd }; 3031 3032 f_header = (struct perf_pipe_file_header){ 3033 .magic = PERF_MAGIC, 3034 .size = sizeof(f_header), 3035 }; 3036 3037 err = do_write(&ff, &f_header, sizeof(f_header)); 3038 if (err < 0) { 3039 pr_debug("failed to write perf pipe header\n"); 3040 return err; 3041 } 3042 3043 return 0; 3044 } 3045 3046 int perf_session__write_header(struct perf_session *session, 3047 struct perf_evlist *evlist, 3048 int fd, bool at_exit) 3049 { 3050 struct perf_file_header f_header; 3051 struct perf_file_attr f_attr; 3052 struct perf_header *header = &session->header; 3053 struct perf_evsel *evsel; 3054 struct feat_fd ff; 3055 u64 attr_offset; 3056 int err; 3057 3058 ff = (struct feat_fd){ .fd = fd}; 3059 lseek(fd, sizeof(f_header), SEEK_SET); 3060 3061 evlist__for_each_entry(session->evlist, evsel) { 3062 evsel->id_offset = lseek(fd, 0, SEEK_CUR); 3063 err = do_write(&ff, evsel->id, evsel->ids * sizeof(u64)); 3064 if (err < 0) { 3065 pr_debug("failed to write perf header\n"); 3066 return err; 3067 } 3068 } 3069 3070 attr_offset = lseek(ff.fd, 0, SEEK_CUR); 3071 3072 evlist__for_each_entry(evlist, evsel) { 3073 f_attr = (struct perf_file_attr){ 3074 .attr = evsel->attr, 3075 .ids = { 3076 .offset = evsel->id_offset, 3077 .size = evsel->ids * sizeof(u64), 3078 } 3079 }; 3080 err = do_write(&ff, &f_attr, sizeof(f_attr)); 3081 if (err < 0) { 3082 pr_debug("failed to write perf header attribute\n"); 3083 return err; 3084 } 3085 } 3086 3087 if (!header->data_offset) 3088 header->data_offset = lseek(fd, 0, SEEK_CUR); 3089 header->feat_offset = header->data_offset + header->data_size; 3090 3091 if (at_exit) { 3092 err = perf_header__adds_write(header, evlist, fd); 3093 if (err < 0) 3094 return err; 3095 } 3096 3097 f_header = (struct perf_file_header){ 3098 .magic = PERF_MAGIC, 3099 .size = sizeof(f_header), 3100 .attr_size = sizeof(f_attr), 3101 .attrs = { 3102 .offset = attr_offset, 3103 .size = evlist->nr_entries * sizeof(f_attr), 3104 }, 3105 .data = { 3106 .offset = header->data_offset, 3107 .size = header->data_size, 3108 }, 3109 /* event_types is ignored, store zeros */ 3110 }; 3111 3112 memcpy(&f_header.adds_features, &header->adds_features, sizeof(header->adds_features)); 3113 3114 lseek(fd, 0, SEEK_SET); 3115 err = do_write(&ff, &f_header, sizeof(f_header)); 3116 if (err < 0) { 3117 pr_debug("failed to write perf header\n"); 3118 return err; 3119 } 3120 lseek(fd, header->data_offset + header->data_size, SEEK_SET); 3121 3122 return 0; 3123 } 3124 3125 static int perf_header__getbuffer64(struct perf_header *header, 3126 int fd, void *buf, size_t size) 3127 { 3128 if (readn(fd, buf, size) <= 0) 3129 return -1; 3130 3131 if (header->needs_swap) 3132 mem_bswap_64(buf, size); 3133 3134 return 0; 3135 } 3136 3137 int perf_header__process_sections(struct perf_header *header, int fd, 3138 void *data, 3139 int (*process)(struct perf_file_section *section, 3140 struct perf_header *ph, 3141 int feat, int fd, void *data)) 3142 { 3143 struct perf_file_section *feat_sec, *sec; 3144 int nr_sections; 3145 int sec_size; 3146 int feat; 3147 int err; 3148 3149 nr_sections = bitmap_weight(header->adds_features, HEADER_FEAT_BITS); 3150 if (!nr_sections) 3151 return 0; 3152 3153 feat_sec = sec = calloc(nr_sections, sizeof(*feat_sec)); 3154 if (!feat_sec) 3155 return -1; 3156 3157 sec_size = sizeof(*feat_sec) * nr_sections; 3158 3159 lseek(fd, header->feat_offset, SEEK_SET); 3160 3161 err = perf_header__getbuffer64(header, fd, feat_sec, sec_size); 3162 if (err < 0) 3163 goto out_free; 3164 3165 for_each_set_bit(feat, header->adds_features, HEADER_LAST_FEATURE) { 3166 err = process(sec++, header, feat, fd, data); 3167 if (err < 0) 3168 goto out_free; 3169 } 3170 err = 0; 3171 out_free: 3172 free(feat_sec); 3173 return err; 3174 } 3175 3176 static const int attr_file_abi_sizes[] = { 3177 [0] = PERF_ATTR_SIZE_VER0, 3178 [1] = PERF_ATTR_SIZE_VER1, 3179 [2] = PERF_ATTR_SIZE_VER2, 3180 [3] = PERF_ATTR_SIZE_VER3, 3181 [4] = PERF_ATTR_SIZE_VER4, 3182 0, 3183 }; 3184 3185 /* 3186 * In the legacy file format, the magic number is not used to encode endianness. 3187 * hdr_sz was used to encode endianness. But given that hdr_sz can vary based 3188 * on ABI revisions, we need to try all combinations for all endianness to 3189 * detect the endianness. 3190 */ 3191 static int try_all_file_abis(uint64_t hdr_sz, struct perf_header *ph) 3192 { 3193 uint64_t ref_size, attr_size; 3194 int i; 3195 3196 for (i = 0 ; attr_file_abi_sizes[i]; i++) { 3197 ref_size = attr_file_abi_sizes[i] 3198 + sizeof(struct perf_file_section); 3199 if (hdr_sz != ref_size) { 3200 attr_size = bswap_64(hdr_sz); 3201 if (attr_size != ref_size) 3202 continue; 3203 3204 ph->needs_swap = true; 3205 } 3206 pr_debug("ABI%d perf.data file detected, need_swap=%d\n", 3207 i, 3208 ph->needs_swap); 3209 return 0; 3210 } 3211 /* could not determine endianness */ 3212 return -1; 3213 } 3214 3215 #define PERF_PIPE_HDR_VER0 16 3216 3217 static const size_t attr_pipe_abi_sizes[] = { 3218 [0] = PERF_PIPE_HDR_VER0, 3219 0, 3220 }; 3221 3222 /* 3223 * In the legacy pipe format, there is an implicit assumption that endiannesss 3224 * between host recording the samples, and host parsing the samples is the 3225 * same. This is not always the case given that the pipe output may always be 3226 * redirected into a file and analyzed on a different machine with possibly a 3227 * different endianness and perf_event ABI revsions in the perf tool itself. 3228 */ 3229 static int try_all_pipe_abis(uint64_t hdr_sz, struct perf_header *ph) 3230 { 3231 u64 attr_size; 3232 int i; 3233 3234 for (i = 0 ; attr_pipe_abi_sizes[i]; i++) { 3235 if (hdr_sz != attr_pipe_abi_sizes[i]) { 3236 attr_size = bswap_64(hdr_sz); 3237 if (attr_size != hdr_sz) 3238 continue; 3239 3240 ph->needs_swap = true; 3241 } 3242 pr_debug("Pipe ABI%d perf.data file detected\n", i); 3243 return 0; 3244 } 3245 return -1; 3246 } 3247 3248 bool is_perf_magic(u64 magic) 3249 { 3250 if (!memcmp(&magic, __perf_magic1, sizeof(magic)) 3251 || magic == __perf_magic2 3252 || magic == __perf_magic2_sw) 3253 return true; 3254 3255 return false; 3256 } 3257 3258 static int check_magic_endian(u64 magic, uint64_t hdr_sz, 3259 bool is_pipe, struct perf_header *ph) 3260 { 3261 int ret; 3262 3263 /* check for legacy format */ 3264 ret = memcmp(&magic, __perf_magic1, sizeof(magic)); 3265 if (ret == 0) { 3266 ph->version = PERF_HEADER_VERSION_1; 3267 pr_debug("legacy perf.data format\n"); 3268 if (is_pipe) 3269 return try_all_pipe_abis(hdr_sz, ph); 3270 3271 return try_all_file_abis(hdr_sz, ph); 3272 } 3273 /* 3274 * the new magic number serves two purposes: 3275 * - unique number to identify actual perf.data files 3276 * - encode endianness of file 3277 */ 3278 ph->version = PERF_HEADER_VERSION_2; 3279 3280 /* check magic number with one endianness */ 3281 if (magic == __perf_magic2) 3282 return 0; 3283 3284 /* check magic number with opposite endianness */ 3285 if (magic != __perf_magic2_sw) 3286 return -1; 3287 3288 ph->needs_swap = true; 3289 3290 return 0; 3291 } 3292 3293 int perf_file_header__read(struct perf_file_header *header, 3294 struct perf_header *ph, int fd) 3295 { 3296 ssize_t ret; 3297 3298 lseek(fd, 0, SEEK_SET); 3299 3300 ret = readn(fd, header, sizeof(*header)); 3301 if (ret <= 0) 3302 return -1; 3303 3304 if (check_magic_endian(header->magic, 3305 header->attr_size, false, ph) < 0) { 3306 pr_debug("magic/endian check failed\n"); 3307 return -1; 3308 } 3309 3310 if (ph->needs_swap) { 3311 mem_bswap_64(header, offsetof(struct perf_file_header, 3312 adds_features)); 3313 } 3314 3315 if (header->size != sizeof(*header)) { 3316 /* Support the previous format */ 3317 if (header->size == offsetof(typeof(*header), adds_features)) 3318 bitmap_zero(header->adds_features, HEADER_FEAT_BITS); 3319 else 3320 return -1; 3321 } else if (ph->needs_swap) { 3322 /* 3323 * feature bitmap is declared as an array of unsigned longs -- 3324 * not good since its size can differ between the host that 3325 * generated the data file and the host analyzing the file. 3326 * 3327 * We need to handle endianness, but we don't know the size of 3328 * the unsigned long where the file was generated. Take a best 3329 * guess at determining it: try 64-bit swap first (ie., file 3330 * created on a 64-bit host), and check if the hostname feature 3331 * bit is set (this feature bit is forced on as of fbe96f2). 3332 * If the bit is not, undo the 64-bit swap and try a 32-bit 3333 * swap. If the hostname bit is still not set (e.g., older data 3334 * file), punt and fallback to the original behavior -- 3335 * clearing all feature bits and setting buildid. 3336 */ 3337 mem_bswap_64(&header->adds_features, 3338 BITS_TO_U64(HEADER_FEAT_BITS)); 3339 3340 if (!test_bit(HEADER_HOSTNAME, header->adds_features)) { 3341 /* unswap as u64 */ 3342 mem_bswap_64(&header->adds_features, 3343 BITS_TO_U64(HEADER_FEAT_BITS)); 3344 3345 /* unswap as u32 */ 3346 mem_bswap_32(&header->adds_features, 3347 BITS_TO_U32(HEADER_FEAT_BITS)); 3348 } 3349 3350 if (!test_bit(HEADER_HOSTNAME, header->adds_features)) { 3351 bitmap_zero(header->adds_features, HEADER_FEAT_BITS); 3352 set_bit(HEADER_BUILD_ID, header->adds_features); 3353 } 3354 } 3355 3356 memcpy(&ph->adds_features, &header->adds_features, 3357 sizeof(ph->adds_features)); 3358 3359 ph->data_offset = header->data.offset; 3360 ph->data_size = header->data.size; 3361 ph->feat_offset = header->data.offset + header->data.size; 3362 return 0; 3363 } 3364 3365 static int perf_file_section__process(struct perf_file_section *section, 3366 struct perf_header *ph, 3367 int feat, int fd, void *data) 3368 { 3369 struct feat_fd fdd = { 3370 .fd = fd, 3371 .ph = ph, 3372 .size = section->size, 3373 .offset = section->offset, 3374 }; 3375 3376 if (lseek(fd, section->offset, SEEK_SET) == (off_t)-1) { 3377 pr_debug("Failed to lseek to %" PRIu64 " offset for feature " 3378 "%d, continuing...\n", section->offset, feat); 3379 return 0; 3380 } 3381 3382 if (feat >= HEADER_LAST_FEATURE) { 3383 pr_debug("unknown feature %d, continuing...\n", feat); 3384 return 0; 3385 } 3386 3387 if (!feat_ops[feat].process) 3388 return 0; 3389 3390 return feat_ops[feat].process(&fdd, data); 3391 } 3392 3393 static int perf_file_header__read_pipe(struct perf_pipe_file_header *header, 3394 struct perf_header *ph, int fd, 3395 bool repipe) 3396 { 3397 struct feat_fd ff = { 3398 .fd = STDOUT_FILENO, 3399 .ph = ph, 3400 }; 3401 ssize_t ret; 3402 3403 ret = readn(fd, header, sizeof(*header)); 3404 if (ret <= 0) 3405 return -1; 3406 3407 if (check_magic_endian(header->magic, header->size, true, ph) < 0) { 3408 pr_debug("endian/magic failed\n"); 3409 return -1; 3410 } 3411 3412 if (ph->needs_swap) 3413 header->size = bswap_64(header->size); 3414 3415 if (repipe && do_write(&ff, header, sizeof(*header)) < 0) 3416 return -1; 3417 3418 return 0; 3419 } 3420 3421 static int perf_header__read_pipe(struct perf_session *session) 3422 { 3423 struct perf_header *header = &session->header; 3424 struct perf_pipe_file_header f_header; 3425 3426 if (perf_file_header__read_pipe(&f_header, header, 3427 perf_data__fd(session->data), 3428 session->repipe) < 0) { 3429 pr_debug("incompatible file format\n"); 3430 return -EINVAL; 3431 } 3432 3433 return 0; 3434 } 3435 3436 static int read_attr(int fd, struct perf_header *ph, 3437 struct perf_file_attr *f_attr) 3438 { 3439 struct perf_event_attr *attr = &f_attr->attr; 3440 size_t sz, left; 3441 size_t our_sz = sizeof(f_attr->attr); 3442 ssize_t ret; 3443 3444 memset(f_attr, 0, sizeof(*f_attr)); 3445 3446 /* read minimal guaranteed structure */ 3447 ret = readn(fd, attr, PERF_ATTR_SIZE_VER0); 3448 if (ret <= 0) { 3449 pr_debug("cannot read %d bytes of header attr\n", 3450 PERF_ATTR_SIZE_VER0); 3451 return -1; 3452 } 3453 3454 /* on file perf_event_attr size */ 3455 sz = attr->size; 3456 3457 if (ph->needs_swap) 3458 sz = bswap_32(sz); 3459 3460 if (sz == 0) { 3461 /* assume ABI0 */ 3462 sz = PERF_ATTR_SIZE_VER0; 3463 } else if (sz > our_sz) { 3464 pr_debug("file uses a more recent and unsupported ABI" 3465 " (%zu bytes extra)\n", sz - our_sz); 3466 return -1; 3467 } 3468 /* what we have not yet read and that we know about */ 3469 left = sz - PERF_ATTR_SIZE_VER0; 3470 if (left) { 3471 void *ptr = attr; 3472 ptr += PERF_ATTR_SIZE_VER0; 3473 3474 ret = readn(fd, ptr, left); 3475 } 3476 /* read perf_file_section, ids are read in caller */ 3477 ret = readn(fd, &f_attr->ids, sizeof(f_attr->ids)); 3478 3479 return ret <= 0 ? -1 : 0; 3480 } 3481 3482 static int perf_evsel__prepare_tracepoint_event(struct perf_evsel *evsel, 3483 struct tep_handle *pevent) 3484 { 3485 struct tep_event *event; 3486 char bf[128]; 3487 3488 /* already prepared */ 3489 if (evsel->tp_format) 3490 return 0; 3491 3492 if (pevent == NULL) { 3493 pr_debug("broken or missing trace data\n"); 3494 return -1; 3495 } 3496 3497 event = tep_find_event(pevent, evsel->attr.config); 3498 if (event == NULL) { 3499 pr_debug("cannot find event format for %d\n", (int)evsel->attr.config); 3500 return -1; 3501 } 3502 3503 if (!evsel->name) { 3504 snprintf(bf, sizeof(bf), "%s:%s", event->system, event->name); 3505 evsel->name = strdup(bf); 3506 if (evsel->name == NULL) 3507 return -1; 3508 } 3509 3510 evsel->tp_format = event; 3511 return 0; 3512 } 3513 3514 static int perf_evlist__prepare_tracepoint_events(struct perf_evlist *evlist, 3515 struct tep_handle *pevent) 3516 { 3517 struct perf_evsel *pos; 3518 3519 evlist__for_each_entry(evlist, pos) { 3520 if (pos->attr.type == PERF_TYPE_TRACEPOINT && 3521 perf_evsel__prepare_tracepoint_event(pos, pevent)) 3522 return -1; 3523 } 3524 3525 return 0; 3526 } 3527 3528 int perf_session__read_header(struct perf_session *session) 3529 { 3530 struct perf_data *data = session->data; 3531 struct perf_header *header = &session->header; 3532 struct perf_file_header f_header; 3533 struct perf_file_attr f_attr; 3534 u64 f_id; 3535 int nr_attrs, nr_ids, i, j; 3536 int fd = perf_data__fd(data); 3537 3538 session->evlist = perf_evlist__new(); 3539 if (session->evlist == NULL) 3540 return -ENOMEM; 3541 3542 session->evlist->env = &header->env; 3543 session->machines.host.env = &header->env; 3544 if (perf_data__is_pipe(data)) 3545 return perf_header__read_pipe(session); 3546 3547 if (perf_file_header__read(&f_header, header, fd) < 0) 3548 return -EINVAL; 3549 3550 /* 3551 * Sanity check that perf.data was written cleanly; data size is 3552 * initialized to 0 and updated only if the on_exit function is run. 3553 * If data size is still 0 then the file contains only partial 3554 * information. Just warn user and process it as much as it can. 3555 */ 3556 if (f_header.data.size == 0) { 3557 pr_warning("WARNING: The %s file's data size field is 0 which is unexpected.\n" 3558 "Was the 'perf record' command properly terminated?\n", 3559 data->file.path); 3560 } 3561 3562 nr_attrs = f_header.attrs.size / f_header.attr_size; 3563 lseek(fd, f_header.attrs.offset, SEEK_SET); 3564 3565 for (i = 0; i < nr_attrs; i++) { 3566 struct perf_evsel *evsel; 3567 off_t tmp; 3568 3569 if (read_attr(fd, header, &f_attr) < 0) 3570 goto out_errno; 3571 3572 if (header->needs_swap) { 3573 f_attr.ids.size = bswap_64(f_attr.ids.size); 3574 f_attr.ids.offset = bswap_64(f_attr.ids.offset); 3575 perf_event__attr_swap(&f_attr.attr); 3576 } 3577 3578 tmp = lseek(fd, 0, SEEK_CUR); 3579 evsel = perf_evsel__new(&f_attr.attr); 3580 3581 if (evsel == NULL) 3582 goto out_delete_evlist; 3583 3584 evsel->needs_swap = header->needs_swap; 3585 /* 3586 * Do it before so that if perf_evsel__alloc_id fails, this 3587 * entry gets purged too at perf_evlist__delete(). 3588 */ 3589 perf_evlist__add(session->evlist, evsel); 3590 3591 nr_ids = f_attr.ids.size / sizeof(u64); 3592 /* 3593 * We don't have the cpu and thread maps on the header, so 3594 * for allocating the perf_sample_id table we fake 1 cpu and 3595 * hattr->ids threads. 3596 */ 3597 if (perf_evsel__alloc_id(evsel, 1, nr_ids)) 3598 goto out_delete_evlist; 3599 3600 lseek(fd, f_attr.ids.offset, SEEK_SET); 3601 3602 for (j = 0; j < nr_ids; j++) { 3603 if (perf_header__getbuffer64(header, fd, &f_id, sizeof(f_id))) 3604 goto out_errno; 3605 3606 perf_evlist__id_add(session->evlist, evsel, 0, j, f_id); 3607 } 3608 3609 lseek(fd, tmp, SEEK_SET); 3610 } 3611 3612 perf_header__process_sections(header, fd, &session->tevent, 3613 perf_file_section__process); 3614 3615 if (perf_evlist__prepare_tracepoint_events(session->evlist, 3616 session->tevent.pevent)) 3617 goto out_delete_evlist; 3618 3619 return 0; 3620 out_errno: 3621 return -errno; 3622 3623 out_delete_evlist: 3624 perf_evlist__delete(session->evlist); 3625 session->evlist = NULL; 3626 return -ENOMEM; 3627 } 3628 3629 int perf_event__synthesize_attr(struct perf_tool *tool, 3630 struct perf_event_attr *attr, u32 ids, u64 *id, 3631 perf_event__handler_t process) 3632 { 3633 union perf_event *ev; 3634 size_t size; 3635 int err; 3636 3637 size = sizeof(struct perf_event_attr); 3638 size = PERF_ALIGN(size, sizeof(u64)); 3639 size += sizeof(struct perf_event_header); 3640 size += ids * sizeof(u64); 3641 3642 ev = malloc(size); 3643 3644 if (ev == NULL) 3645 return -ENOMEM; 3646 3647 ev->attr.attr = *attr; 3648 memcpy(ev->attr.id, id, ids * sizeof(u64)); 3649 3650 ev->attr.header.type = PERF_RECORD_HEADER_ATTR; 3651 ev->attr.header.size = (u16)size; 3652 3653 if (ev->attr.header.size == size) 3654 err = process(tool, ev, NULL, NULL); 3655 else 3656 err = -E2BIG; 3657 3658 free(ev); 3659 3660 return err; 3661 } 3662 3663 int perf_event__synthesize_features(struct perf_tool *tool, 3664 struct perf_session *session, 3665 struct perf_evlist *evlist, 3666 perf_event__handler_t process) 3667 { 3668 struct perf_header *header = &session->header; 3669 struct feat_fd ff; 3670 struct feature_event *fe; 3671 size_t sz, sz_hdr; 3672 int feat, ret; 3673 3674 sz_hdr = sizeof(fe->header); 3675 sz = sizeof(union perf_event); 3676 /* get a nice alignment */ 3677 sz = PERF_ALIGN(sz, page_size); 3678 3679 memset(&ff, 0, sizeof(ff)); 3680 3681 ff.buf = malloc(sz); 3682 if (!ff.buf) 3683 return -ENOMEM; 3684 3685 ff.size = sz - sz_hdr; 3686 ff.ph = &session->header; 3687 3688 for_each_set_bit(feat, header->adds_features, HEADER_FEAT_BITS) { 3689 if (!feat_ops[feat].synthesize) { 3690 pr_debug("No record header feature for header :%d\n", feat); 3691 continue; 3692 } 3693 3694 ff.offset = sizeof(*fe); 3695 3696 ret = feat_ops[feat].write(&ff, evlist); 3697 if (ret || ff.offset <= (ssize_t)sizeof(*fe)) { 3698 pr_debug("Error writing feature\n"); 3699 continue; 3700 } 3701 /* ff.buf may have changed due to realloc in do_write() */ 3702 fe = ff.buf; 3703 memset(fe, 0, sizeof(*fe)); 3704 3705 fe->feat_id = feat; 3706 fe->header.type = PERF_RECORD_HEADER_FEATURE; 3707 fe->header.size = ff.offset; 3708 3709 ret = process(tool, ff.buf, NULL, NULL); 3710 if (ret) { 3711 free(ff.buf); 3712 return ret; 3713 } 3714 } 3715 3716 /* Send HEADER_LAST_FEATURE mark. */ 3717 fe = ff.buf; 3718 fe->feat_id = HEADER_LAST_FEATURE; 3719 fe->header.type = PERF_RECORD_HEADER_FEATURE; 3720 fe->header.size = sizeof(*fe); 3721 3722 ret = process(tool, ff.buf, NULL, NULL); 3723 3724 free(ff.buf); 3725 return ret; 3726 } 3727 3728 int perf_event__process_feature(struct perf_session *session, 3729 union perf_event *event) 3730 { 3731 struct perf_tool *tool = session->tool; 3732 struct feat_fd ff = { .fd = 0 }; 3733 struct feature_event *fe = (struct feature_event *)event; 3734 int type = fe->header.type; 3735 u64 feat = fe->feat_id; 3736 3737 if (type < 0 || type >= PERF_RECORD_HEADER_MAX) { 3738 pr_warning("invalid record type %d in pipe-mode\n", type); 3739 return 0; 3740 } 3741 if (feat == HEADER_RESERVED || feat >= HEADER_LAST_FEATURE) { 3742 pr_warning("invalid record type %d in pipe-mode\n", type); 3743 return -1; 3744 } 3745 3746 if (!feat_ops[feat].process) 3747 return 0; 3748 3749 ff.buf = (void *)fe->data; 3750 ff.size = event->header.size - sizeof(*fe); 3751 ff.ph = &session->header; 3752 3753 if (feat_ops[feat].process(&ff, NULL)) 3754 return -1; 3755 3756 if (!feat_ops[feat].print || !tool->show_feat_hdr) 3757 return 0; 3758 3759 if (!feat_ops[feat].full_only || 3760 tool->show_feat_hdr >= SHOW_FEAT_HEADER_FULL_INFO) { 3761 feat_ops[feat].print(&ff, stdout); 3762 } else { 3763 fprintf(stdout, "# %s info available, use -I to display\n", 3764 feat_ops[feat].name); 3765 } 3766 3767 return 0; 3768 } 3769 3770 static struct event_update_event * 3771 event_update_event__new(size_t size, u64 type, u64 id) 3772 { 3773 struct event_update_event *ev; 3774 3775 size += sizeof(*ev); 3776 size = PERF_ALIGN(size, sizeof(u64)); 3777 3778 ev = zalloc(size); 3779 if (ev) { 3780 ev->header.type = PERF_RECORD_EVENT_UPDATE; 3781 ev->header.size = (u16)size; 3782 ev->type = type; 3783 ev->id = id; 3784 } 3785 return ev; 3786 } 3787 3788 int 3789 perf_event__synthesize_event_update_unit(struct perf_tool *tool, 3790 struct perf_evsel *evsel, 3791 perf_event__handler_t process) 3792 { 3793 struct event_update_event *ev; 3794 size_t size = strlen(evsel->unit); 3795 int err; 3796 3797 ev = event_update_event__new(size + 1, PERF_EVENT_UPDATE__UNIT, evsel->id[0]); 3798 if (ev == NULL) 3799 return -ENOMEM; 3800 3801 strlcpy(ev->data, evsel->unit, size + 1); 3802 err = process(tool, (union perf_event *)ev, NULL, NULL); 3803 free(ev); 3804 return err; 3805 } 3806 3807 int 3808 perf_event__synthesize_event_update_scale(struct perf_tool *tool, 3809 struct perf_evsel *evsel, 3810 perf_event__handler_t process) 3811 { 3812 struct event_update_event *ev; 3813 struct event_update_event_scale *ev_data; 3814 int err; 3815 3816 ev = event_update_event__new(sizeof(*ev_data), PERF_EVENT_UPDATE__SCALE, evsel->id[0]); 3817 if (ev == NULL) 3818 return -ENOMEM; 3819 3820 ev_data = (struct event_update_event_scale *) ev->data; 3821 ev_data->scale = evsel->scale; 3822 err = process(tool, (union perf_event*) ev, NULL, NULL); 3823 free(ev); 3824 return err; 3825 } 3826 3827 int 3828 perf_event__synthesize_event_update_name(struct perf_tool *tool, 3829 struct perf_evsel *evsel, 3830 perf_event__handler_t process) 3831 { 3832 struct event_update_event *ev; 3833 size_t len = strlen(evsel->name); 3834 int err; 3835 3836 ev = event_update_event__new(len + 1, PERF_EVENT_UPDATE__NAME, evsel->id[0]); 3837 if (ev == NULL) 3838 return -ENOMEM; 3839 3840 strlcpy(ev->data, evsel->name, len + 1); 3841 err = process(tool, (union perf_event*) ev, NULL, NULL); 3842 free(ev); 3843 return err; 3844 } 3845 3846 int 3847 perf_event__synthesize_event_update_cpus(struct perf_tool *tool, 3848 struct perf_evsel *evsel, 3849 perf_event__handler_t process) 3850 { 3851 size_t size = sizeof(struct event_update_event); 3852 struct event_update_event *ev; 3853 int max, err; 3854 u16 type; 3855 3856 if (!evsel->own_cpus) 3857 return 0; 3858 3859 ev = cpu_map_data__alloc(evsel->own_cpus, &size, &type, &max); 3860 if (!ev) 3861 return -ENOMEM; 3862 3863 ev->header.type = PERF_RECORD_EVENT_UPDATE; 3864 ev->header.size = (u16)size; 3865 ev->type = PERF_EVENT_UPDATE__CPUS; 3866 ev->id = evsel->id[0]; 3867 3868 cpu_map_data__synthesize((struct cpu_map_data *) ev->data, 3869 evsel->own_cpus, 3870 type, max); 3871 3872 err = process(tool, (union perf_event*) ev, NULL, NULL); 3873 free(ev); 3874 return err; 3875 } 3876 3877 size_t perf_event__fprintf_event_update(union perf_event *event, FILE *fp) 3878 { 3879 struct event_update_event *ev = &event->event_update; 3880 struct event_update_event_scale *ev_scale; 3881 struct event_update_event_cpus *ev_cpus; 3882 struct cpu_map *map; 3883 size_t ret; 3884 3885 ret = fprintf(fp, "\n... id: %" PRIu64 "\n", ev->id); 3886 3887 switch (ev->type) { 3888 case PERF_EVENT_UPDATE__SCALE: 3889 ev_scale = (struct event_update_event_scale *) ev->data; 3890 ret += fprintf(fp, "... scale: %f\n", ev_scale->scale); 3891 break; 3892 case PERF_EVENT_UPDATE__UNIT: 3893 ret += fprintf(fp, "... unit: %s\n", ev->data); 3894 break; 3895 case PERF_EVENT_UPDATE__NAME: 3896 ret += fprintf(fp, "... name: %s\n", ev->data); 3897 break; 3898 case PERF_EVENT_UPDATE__CPUS: 3899 ev_cpus = (struct event_update_event_cpus *) ev->data; 3900 ret += fprintf(fp, "... "); 3901 3902 map = cpu_map__new_data(&ev_cpus->cpus); 3903 if (map) 3904 ret += cpu_map__fprintf(map, fp); 3905 else 3906 ret += fprintf(fp, "failed to get cpus\n"); 3907 break; 3908 default: 3909 ret += fprintf(fp, "... unknown type\n"); 3910 break; 3911 } 3912 3913 return ret; 3914 } 3915 3916 int perf_event__synthesize_attrs(struct perf_tool *tool, 3917 struct perf_evlist *evlist, 3918 perf_event__handler_t process) 3919 { 3920 struct perf_evsel *evsel; 3921 int err = 0; 3922 3923 evlist__for_each_entry(evlist, evsel) { 3924 err = perf_event__synthesize_attr(tool, &evsel->attr, evsel->ids, 3925 evsel->id, process); 3926 if (err) { 3927 pr_debug("failed to create perf header attribute\n"); 3928 return err; 3929 } 3930 } 3931 3932 return err; 3933 } 3934 3935 static bool has_unit(struct perf_evsel *counter) 3936 { 3937 return counter->unit && *counter->unit; 3938 } 3939 3940 static bool has_scale(struct perf_evsel *counter) 3941 { 3942 return counter->scale != 1; 3943 } 3944 3945 int perf_event__synthesize_extra_attr(struct perf_tool *tool, 3946 struct perf_evlist *evsel_list, 3947 perf_event__handler_t process, 3948 bool is_pipe) 3949 { 3950 struct perf_evsel *counter; 3951 int err; 3952 3953 /* 3954 * Synthesize other events stuff not carried within 3955 * attr event - unit, scale, name 3956 */ 3957 evlist__for_each_entry(evsel_list, counter) { 3958 if (!counter->supported) 3959 continue; 3960 3961 /* 3962 * Synthesize unit and scale only if it's defined. 3963 */ 3964 if (has_unit(counter)) { 3965 err = perf_event__synthesize_event_update_unit(tool, counter, process); 3966 if (err < 0) { 3967 pr_err("Couldn't synthesize evsel unit.\n"); 3968 return err; 3969 } 3970 } 3971 3972 if (has_scale(counter)) { 3973 err = perf_event__synthesize_event_update_scale(tool, counter, process); 3974 if (err < 0) { 3975 pr_err("Couldn't synthesize evsel counter.\n"); 3976 return err; 3977 } 3978 } 3979 3980 if (counter->own_cpus) { 3981 err = perf_event__synthesize_event_update_cpus(tool, counter, process); 3982 if (err < 0) { 3983 pr_err("Couldn't synthesize evsel cpus.\n"); 3984 return err; 3985 } 3986 } 3987 3988 /* 3989 * Name is needed only for pipe output, 3990 * perf.data carries event names. 3991 */ 3992 if (is_pipe) { 3993 err = perf_event__synthesize_event_update_name(tool, counter, process); 3994 if (err < 0) { 3995 pr_err("Couldn't synthesize evsel name.\n"); 3996 return err; 3997 } 3998 } 3999 } 4000 return 0; 4001 } 4002 4003 int perf_event__process_attr(struct perf_tool *tool __maybe_unused, 4004 union perf_event *event, 4005 struct perf_evlist **pevlist) 4006 { 4007 u32 i, ids, n_ids; 4008 struct perf_evsel *evsel; 4009 struct perf_evlist *evlist = *pevlist; 4010 4011 if (evlist == NULL) { 4012 *pevlist = evlist = perf_evlist__new(); 4013 if (evlist == NULL) 4014 return -ENOMEM; 4015 } 4016 4017 evsel = perf_evsel__new(&event->attr.attr); 4018 if (evsel == NULL) 4019 return -ENOMEM; 4020 4021 perf_evlist__add(evlist, evsel); 4022 4023 ids = event->header.size; 4024 ids -= (void *)&event->attr.id - (void *)event; 4025 n_ids = ids / sizeof(u64); 4026 /* 4027 * We don't have the cpu and thread maps on the header, so 4028 * for allocating the perf_sample_id table we fake 1 cpu and 4029 * hattr->ids threads. 4030 */ 4031 if (perf_evsel__alloc_id(evsel, 1, n_ids)) 4032 return -ENOMEM; 4033 4034 for (i = 0; i < n_ids; i++) { 4035 perf_evlist__id_add(evlist, evsel, 0, i, event->attr.id[i]); 4036 } 4037 4038 return 0; 4039 } 4040 4041 int perf_event__process_event_update(struct perf_tool *tool __maybe_unused, 4042 union perf_event *event, 4043 struct perf_evlist **pevlist) 4044 { 4045 struct event_update_event *ev = &event->event_update; 4046 struct event_update_event_scale *ev_scale; 4047 struct event_update_event_cpus *ev_cpus; 4048 struct perf_evlist *evlist; 4049 struct perf_evsel *evsel; 4050 struct cpu_map *map; 4051 4052 if (!pevlist || *pevlist == NULL) 4053 return -EINVAL; 4054 4055 evlist = *pevlist; 4056 4057 evsel = perf_evlist__id2evsel(evlist, ev->id); 4058 if (evsel == NULL) 4059 return -EINVAL; 4060 4061 switch (ev->type) { 4062 case PERF_EVENT_UPDATE__UNIT: 4063 evsel->unit = strdup(ev->data); 4064 break; 4065 case PERF_EVENT_UPDATE__NAME: 4066 evsel->name = strdup(ev->data); 4067 break; 4068 case PERF_EVENT_UPDATE__SCALE: 4069 ev_scale = (struct event_update_event_scale *) ev->data; 4070 evsel->scale = ev_scale->scale; 4071 break; 4072 case PERF_EVENT_UPDATE__CPUS: 4073 ev_cpus = (struct event_update_event_cpus *) ev->data; 4074 4075 map = cpu_map__new_data(&ev_cpus->cpus); 4076 if (map) 4077 evsel->own_cpus = map; 4078 else 4079 pr_err("failed to get event_update cpus\n"); 4080 default: 4081 break; 4082 } 4083 4084 return 0; 4085 } 4086 4087 int perf_event__synthesize_tracing_data(struct perf_tool *tool, int fd, 4088 struct perf_evlist *evlist, 4089 perf_event__handler_t process) 4090 { 4091 union perf_event ev; 4092 struct tracing_data *tdata; 4093 ssize_t size = 0, aligned_size = 0, padding; 4094 struct feat_fd ff; 4095 int err __maybe_unused = 0; 4096 4097 /* 4098 * We are going to store the size of the data followed 4099 * by the data contents. Since the fd descriptor is a pipe, 4100 * we cannot seek back to store the size of the data once 4101 * we know it. Instead we: 4102 * 4103 * - write the tracing data to the temp file 4104 * - get/write the data size to pipe 4105 * - write the tracing data from the temp file 4106 * to the pipe 4107 */ 4108 tdata = tracing_data_get(&evlist->entries, fd, true); 4109 if (!tdata) 4110 return -1; 4111 4112 memset(&ev, 0, sizeof(ev)); 4113 4114 ev.tracing_data.header.type = PERF_RECORD_HEADER_TRACING_DATA; 4115 size = tdata->size; 4116 aligned_size = PERF_ALIGN(size, sizeof(u64)); 4117 padding = aligned_size - size; 4118 ev.tracing_data.header.size = sizeof(ev.tracing_data); 4119 ev.tracing_data.size = aligned_size; 4120 4121 process(tool, &ev, NULL, NULL); 4122 4123 /* 4124 * The put function will copy all the tracing data 4125 * stored in temp file to the pipe. 4126 */ 4127 tracing_data_put(tdata); 4128 4129 ff = (struct feat_fd){ .fd = fd }; 4130 if (write_padded(&ff, NULL, 0, padding)) 4131 return -1; 4132 4133 return aligned_size; 4134 } 4135 4136 int perf_event__process_tracing_data(struct perf_session *session, 4137 union perf_event *event) 4138 { 4139 ssize_t size_read, padding, size = event->tracing_data.size; 4140 int fd = perf_data__fd(session->data); 4141 off_t offset = lseek(fd, 0, SEEK_CUR); 4142 char buf[BUFSIZ]; 4143 4144 /* setup for reading amidst mmap */ 4145 lseek(fd, offset + sizeof(struct tracing_data_event), 4146 SEEK_SET); 4147 4148 size_read = trace_report(fd, &session->tevent, 4149 session->repipe); 4150 padding = PERF_ALIGN(size_read, sizeof(u64)) - size_read; 4151 4152 if (readn(fd, buf, padding) < 0) { 4153 pr_err("%s: reading input file", __func__); 4154 return -1; 4155 } 4156 if (session->repipe) { 4157 int retw = write(STDOUT_FILENO, buf, padding); 4158 if (retw <= 0 || retw != padding) { 4159 pr_err("%s: repiping tracing data padding", __func__); 4160 return -1; 4161 } 4162 } 4163 4164 if (size_read + padding != size) { 4165 pr_err("%s: tracing data size mismatch", __func__); 4166 return -1; 4167 } 4168 4169 perf_evlist__prepare_tracepoint_events(session->evlist, 4170 session->tevent.pevent); 4171 4172 return size_read + padding; 4173 } 4174 4175 int perf_event__synthesize_build_id(struct perf_tool *tool, 4176 struct dso *pos, u16 misc, 4177 perf_event__handler_t process, 4178 struct machine *machine) 4179 { 4180 union perf_event ev; 4181 size_t len; 4182 int err = 0; 4183 4184 if (!pos->hit) 4185 return err; 4186 4187 memset(&ev, 0, sizeof(ev)); 4188 4189 len = pos->long_name_len + 1; 4190 len = PERF_ALIGN(len, NAME_ALIGN); 4191 memcpy(&ev.build_id.build_id, pos->build_id, sizeof(pos->build_id)); 4192 ev.build_id.header.type = PERF_RECORD_HEADER_BUILD_ID; 4193 ev.build_id.header.misc = misc; 4194 ev.build_id.pid = machine->pid; 4195 ev.build_id.header.size = sizeof(ev.build_id) + len; 4196 memcpy(&ev.build_id.filename, pos->long_name, pos->long_name_len); 4197 4198 err = process(tool, &ev, NULL, machine); 4199 4200 return err; 4201 } 4202 4203 int perf_event__process_build_id(struct perf_session *session, 4204 union perf_event *event) 4205 { 4206 __event_process_build_id(&event->build_id, 4207 event->build_id.filename, 4208 session); 4209 return 0; 4210 } 4211