1 // SPDX-License-Identifier: GPL-2.0 2 #include <errno.h> 3 #include <inttypes.h> 4 #include "util.h" 5 #include "string2.h" 6 #include <sys/param.h> 7 #include <sys/types.h> 8 #include <byteswap.h> 9 #include <unistd.h> 10 #include <stdio.h> 11 #include <stdlib.h> 12 #include <linux/compiler.h> 13 #include <linux/list.h> 14 #include <linux/kernel.h> 15 #include <linux/bitops.h> 16 #include <linux/stringify.h> 17 #include <sys/stat.h> 18 #include <sys/utsname.h> 19 #include <linux/time64.h> 20 #include <dirent.h> 21 22 #include "evlist.h" 23 #include "evsel.h" 24 #include "header.h" 25 #include "memswap.h" 26 #include "../perf.h" 27 #include "trace-event.h" 28 #include "session.h" 29 #include "symbol.h" 30 #include "debug.h" 31 #include "cpumap.h" 32 #include "pmu.h" 33 #include "vdso.h" 34 #include "strbuf.h" 35 #include "build-id.h" 36 #include "data.h" 37 #include <api/fs/fs.h> 38 #include "asm/bug.h" 39 #include "tool.h" 40 #include "time-utils.h" 41 #include "units.h" 42 43 #include "sane_ctype.h" 44 45 /* 46 * magic2 = "PERFILE2" 47 * must be a numerical value to let the endianness 48 * determine the memory layout. That way we are able 49 * to detect endianness when reading the perf.data file 50 * back. 51 * 52 * we check for legacy (PERFFILE) format. 53 */ 54 static const char *__perf_magic1 = "PERFFILE"; 55 static const u64 __perf_magic2 = 0x32454c4946524550ULL; 56 static const u64 __perf_magic2_sw = 0x50455246494c4532ULL; 57 58 #define PERF_MAGIC __perf_magic2 59 60 const char perf_version_string[] = PERF_VERSION; 61 62 struct perf_file_attr { 63 struct perf_event_attr attr; 64 struct perf_file_section ids; 65 }; 66 67 struct feat_fd { 68 struct perf_header *ph; 69 int fd; 70 void *buf; /* Either buf != NULL or fd >= 0 */ 71 ssize_t offset; 72 size_t size; 73 struct perf_evsel *events; 74 }; 75 76 void perf_header__set_feat(struct perf_header *header, int feat) 77 { 78 set_bit(feat, header->adds_features); 79 } 80 81 void perf_header__clear_feat(struct perf_header *header, int feat) 82 { 83 clear_bit(feat, header->adds_features); 84 } 85 86 bool perf_header__has_feat(const struct perf_header *header, int feat) 87 { 88 return test_bit(feat, header->adds_features); 89 } 90 91 static int __do_write_fd(struct feat_fd *ff, const void *buf, size_t size) 92 { 93 ssize_t ret = writen(ff->fd, buf, size); 94 95 if (ret != (ssize_t)size) 96 return ret < 0 ? (int)ret : -1; 97 return 0; 98 } 99 100 static int __do_write_buf(struct feat_fd *ff, const void *buf, size_t size) 101 { 102 /* struct perf_event_header::size is u16 */ 103 const size_t max_size = 0xffff - sizeof(struct perf_event_header); 104 size_t new_size = ff->size; 105 void *addr; 106 107 if (size + ff->offset > max_size) 108 return -E2BIG; 109 110 while (size > (new_size - ff->offset)) 111 new_size <<= 1; 112 new_size = min(max_size, new_size); 113 114 if (ff->size < new_size) { 115 addr = realloc(ff->buf, new_size); 116 if (!addr) 117 return -ENOMEM; 118 ff->buf = addr; 119 ff->size = new_size; 120 } 121 122 memcpy(ff->buf + ff->offset, buf, size); 123 ff->offset += size; 124 125 return 0; 126 } 127 128 /* Return: 0 if succeded, -ERR if failed. */ 129 int do_write(struct feat_fd *ff, const void *buf, size_t size) 130 { 131 if (!ff->buf) 132 return __do_write_fd(ff, buf, size); 133 return __do_write_buf(ff, buf, size); 134 } 135 136 /* Return: 0 if succeded, -ERR if failed. */ 137 static int do_write_bitmap(struct feat_fd *ff, unsigned long *set, u64 size) 138 { 139 u64 *p = (u64 *) set; 140 int i, ret; 141 142 ret = do_write(ff, &size, sizeof(size)); 143 if (ret < 0) 144 return ret; 145 146 for (i = 0; (u64) i < BITS_TO_U64(size); i++) { 147 ret = do_write(ff, p + i, sizeof(*p)); 148 if (ret < 0) 149 return ret; 150 } 151 152 return 0; 153 } 154 155 /* Return: 0 if succeded, -ERR if failed. */ 156 int write_padded(struct feat_fd *ff, const void *bf, 157 size_t count, size_t count_aligned) 158 { 159 static const char zero_buf[NAME_ALIGN]; 160 int err = do_write(ff, bf, count); 161 162 if (!err) 163 err = do_write(ff, zero_buf, count_aligned - count); 164 165 return err; 166 } 167 168 #define string_size(str) \ 169 (PERF_ALIGN((strlen(str) + 1), NAME_ALIGN) + sizeof(u32)) 170 171 /* Return: 0 if succeded, -ERR if failed. */ 172 static int do_write_string(struct feat_fd *ff, const char *str) 173 { 174 u32 len, olen; 175 int ret; 176 177 olen = strlen(str) + 1; 178 len = PERF_ALIGN(olen, NAME_ALIGN); 179 180 /* write len, incl. \0 */ 181 ret = do_write(ff, &len, sizeof(len)); 182 if (ret < 0) 183 return ret; 184 185 return write_padded(ff, str, olen, len); 186 } 187 188 static int __do_read_fd(struct feat_fd *ff, void *addr, ssize_t size) 189 { 190 ssize_t ret = readn(ff->fd, addr, size); 191 192 if (ret != size) 193 return ret < 0 ? (int)ret : -1; 194 return 0; 195 } 196 197 static int __do_read_buf(struct feat_fd *ff, void *addr, ssize_t size) 198 { 199 if (size > (ssize_t)ff->size - ff->offset) 200 return -1; 201 202 memcpy(addr, ff->buf + ff->offset, size); 203 ff->offset += size; 204 205 return 0; 206 207 } 208 209 static int __do_read(struct feat_fd *ff, void *addr, ssize_t size) 210 { 211 if (!ff->buf) 212 return __do_read_fd(ff, addr, size); 213 return __do_read_buf(ff, addr, size); 214 } 215 216 static int do_read_u32(struct feat_fd *ff, u32 *addr) 217 { 218 int ret; 219 220 ret = __do_read(ff, addr, sizeof(*addr)); 221 if (ret) 222 return ret; 223 224 if (ff->ph->needs_swap) 225 *addr = bswap_32(*addr); 226 return 0; 227 } 228 229 static int do_read_u64(struct feat_fd *ff, u64 *addr) 230 { 231 int ret; 232 233 ret = __do_read(ff, addr, sizeof(*addr)); 234 if (ret) 235 return ret; 236 237 if (ff->ph->needs_swap) 238 *addr = bswap_64(*addr); 239 return 0; 240 } 241 242 static char *do_read_string(struct feat_fd *ff) 243 { 244 u32 len; 245 char *buf; 246 247 if (do_read_u32(ff, &len)) 248 return NULL; 249 250 buf = malloc(len); 251 if (!buf) 252 return NULL; 253 254 if (!__do_read(ff, buf, len)) { 255 /* 256 * strings are padded by zeroes 257 * thus the actual strlen of buf 258 * may be less than len 259 */ 260 return buf; 261 } 262 263 free(buf); 264 return NULL; 265 } 266 267 /* Return: 0 if succeded, -ERR if failed. */ 268 static int do_read_bitmap(struct feat_fd *ff, unsigned long **pset, u64 *psize) 269 { 270 unsigned long *set; 271 u64 size, *p; 272 int i, ret; 273 274 ret = do_read_u64(ff, &size); 275 if (ret) 276 return ret; 277 278 set = bitmap_alloc(size); 279 if (!set) 280 return -ENOMEM; 281 282 bitmap_zero(set, size); 283 284 p = (u64 *) set; 285 286 for (i = 0; (u64) i < BITS_TO_U64(size); i++) { 287 ret = do_read_u64(ff, p + i); 288 if (ret < 0) { 289 free(set); 290 return ret; 291 } 292 } 293 294 *pset = set; 295 *psize = size; 296 return 0; 297 } 298 299 static int write_tracing_data(struct feat_fd *ff, 300 struct perf_evlist *evlist) 301 { 302 if (WARN(ff->buf, "Error: calling %s in pipe-mode.\n", __func__)) 303 return -1; 304 305 return read_tracing_data(ff->fd, &evlist->entries); 306 } 307 308 static int write_build_id(struct feat_fd *ff, 309 struct perf_evlist *evlist __maybe_unused) 310 { 311 struct perf_session *session; 312 int err; 313 314 session = container_of(ff->ph, struct perf_session, header); 315 316 if (!perf_session__read_build_ids(session, true)) 317 return -1; 318 319 if (WARN(ff->buf, "Error: calling %s in pipe-mode.\n", __func__)) 320 return -1; 321 322 err = perf_session__write_buildid_table(session, ff); 323 if (err < 0) { 324 pr_debug("failed to write buildid table\n"); 325 return err; 326 } 327 perf_session__cache_build_ids(session); 328 329 return 0; 330 } 331 332 static int write_hostname(struct feat_fd *ff, 333 struct perf_evlist *evlist __maybe_unused) 334 { 335 struct utsname uts; 336 int ret; 337 338 ret = uname(&uts); 339 if (ret < 0) 340 return -1; 341 342 return do_write_string(ff, uts.nodename); 343 } 344 345 static int write_osrelease(struct feat_fd *ff, 346 struct perf_evlist *evlist __maybe_unused) 347 { 348 struct utsname uts; 349 int ret; 350 351 ret = uname(&uts); 352 if (ret < 0) 353 return -1; 354 355 return do_write_string(ff, uts.release); 356 } 357 358 static int write_arch(struct feat_fd *ff, 359 struct perf_evlist *evlist __maybe_unused) 360 { 361 struct utsname uts; 362 int ret; 363 364 ret = uname(&uts); 365 if (ret < 0) 366 return -1; 367 368 return do_write_string(ff, uts.machine); 369 } 370 371 static int write_version(struct feat_fd *ff, 372 struct perf_evlist *evlist __maybe_unused) 373 { 374 return do_write_string(ff, perf_version_string); 375 } 376 377 static int __write_cpudesc(struct feat_fd *ff, const char *cpuinfo_proc) 378 { 379 FILE *file; 380 char *buf = NULL; 381 char *s, *p; 382 const char *search = cpuinfo_proc; 383 size_t len = 0; 384 int ret = -1; 385 386 if (!search) 387 return -1; 388 389 file = fopen("/proc/cpuinfo", "r"); 390 if (!file) 391 return -1; 392 393 while (getline(&buf, &len, file) > 0) { 394 ret = strncmp(buf, search, strlen(search)); 395 if (!ret) 396 break; 397 } 398 399 if (ret) { 400 ret = -1; 401 goto done; 402 } 403 404 s = buf; 405 406 p = strchr(buf, ':'); 407 if (p && *(p+1) == ' ' && *(p+2)) 408 s = p + 2; 409 p = strchr(s, '\n'); 410 if (p) 411 *p = '\0'; 412 413 /* squash extra space characters (branding string) */ 414 p = s; 415 while (*p) { 416 if (isspace(*p)) { 417 char *r = p + 1; 418 char *q = r; 419 *p = ' '; 420 while (*q && isspace(*q)) 421 q++; 422 if (q != (p+1)) 423 while ((*r++ = *q++)); 424 } 425 p++; 426 } 427 ret = do_write_string(ff, s); 428 done: 429 free(buf); 430 fclose(file); 431 return ret; 432 } 433 434 static int write_cpudesc(struct feat_fd *ff, 435 struct perf_evlist *evlist __maybe_unused) 436 { 437 const char *cpuinfo_procs[] = CPUINFO_PROC; 438 unsigned int i; 439 440 for (i = 0; i < ARRAY_SIZE(cpuinfo_procs); i++) { 441 int ret; 442 ret = __write_cpudesc(ff, cpuinfo_procs[i]); 443 if (ret >= 0) 444 return ret; 445 } 446 return -1; 447 } 448 449 450 static int write_nrcpus(struct feat_fd *ff, 451 struct perf_evlist *evlist __maybe_unused) 452 { 453 long nr; 454 u32 nrc, nra; 455 int ret; 456 457 nrc = cpu__max_present_cpu(); 458 459 nr = sysconf(_SC_NPROCESSORS_ONLN); 460 if (nr < 0) 461 return -1; 462 463 nra = (u32)(nr & UINT_MAX); 464 465 ret = do_write(ff, &nrc, sizeof(nrc)); 466 if (ret < 0) 467 return ret; 468 469 return do_write(ff, &nra, sizeof(nra)); 470 } 471 472 static int write_event_desc(struct feat_fd *ff, 473 struct perf_evlist *evlist) 474 { 475 struct perf_evsel *evsel; 476 u32 nre, nri, sz; 477 int ret; 478 479 nre = evlist->nr_entries; 480 481 /* 482 * write number of events 483 */ 484 ret = do_write(ff, &nre, sizeof(nre)); 485 if (ret < 0) 486 return ret; 487 488 /* 489 * size of perf_event_attr struct 490 */ 491 sz = (u32)sizeof(evsel->attr); 492 ret = do_write(ff, &sz, sizeof(sz)); 493 if (ret < 0) 494 return ret; 495 496 evlist__for_each_entry(evlist, evsel) { 497 ret = do_write(ff, &evsel->attr, sz); 498 if (ret < 0) 499 return ret; 500 /* 501 * write number of unique id per event 502 * there is one id per instance of an event 503 * 504 * copy into an nri to be independent of the 505 * type of ids, 506 */ 507 nri = evsel->ids; 508 ret = do_write(ff, &nri, sizeof(nri)); 509 if (ret < 0) 510 return ret; 511 512 /* 513 * write event string as passed on cmdline 514 */ 515 ret = do_write_string(ff, perf_evsel__name(evsel)); 516 if (ret < 0) 517 return ret; 518 /* 519 * write unique ids for this event 520 */ 521 ret = do_write(ff, evsel->id, evsel->ids * sizeof(u64)); 522 if (ret < 0) 523 return ret; 524 } 525 return 0; 526 } 527 528 static int write_cmdline(struct feat_fd *ff, 529 struct perf_evlist *evlist __maybe_unused) 530 { 531 char buf[MAXPATHLEN]; 532 u32 n; 533 int i, ret; 534 535 /* actual path to perf binary */ 536 ret = readlink("/proc/self/exe", buf, sizeof(buf) - 1); 537 if (ret <= 0) 538 return -1; 539 540 /* readlink() does not add null termination */ 541 buf[ret] = '\0'; 542 543 /* account for binary path */ 544 n = perf_env.nr_cmdline + 1; 545 546 ret = do_write(ff, &n, sizeof(n)); 547 if (ret < 0) 548 return ret; 549 550 ret = do_write_string(ff, buf); 551 if (ret < 0) 552 return ret; 553 554 for (i = 0 ; i < perf_env.nr_cmdline; i++) { 555 ret = do_write_string(ff, perf_env.cmdline_argv[i]); 556 if (ret < 0) 557 return ret; 558 } 559 return 0; 560 } 561 562 #define CORE_SIB_FMT \ 563 "/sys/devices/system/cpu/cpu%d/topology/core_siblings_list" 564 #define THRD_SIB_FMT \ 565 "/sys/devices/system/cpu/cpu%d/topology/thread_siblings_list" 566 567 struct cpu_topo { 568 u32 cpu_nr; 569 u32 core_sib; 570 u32 thread_sib; 571 char **core_siblings; 572 char **thread_siblings; 573 }; 574 575 static int build_cpu_topo(struct cpu_topo *tp, int cpu) 576 { 577 FILE *fp; 578 char filename[MAXPATHLEN]; 579 char *buf = NULL, *p; 580 size_t len = 0; 581 ssize_t sret; 582 u32 i = 0; 583 int ret = -1; 584 585 sprintf(filename, CORE_SIB_FMT, cpu); 586 fp = fopen(filename, "r"); 587 if (!fp) 588 goto try_threads; 589 590 sret = getline(&buf, &len, fp); 591 fclose(fp); 592 if (sret <= 0) 593 goto try_threads; 594 595 p = strchr(buf, '\n'); 596 if (p) 597 *p = '\0'; 598 599 for (i = 0; i < tp->core_sib; i++) { 600 if (!strcmp(buf, tp->core_siblings[i])) 601 break; 602 } 603 if (i == tp->core_sib) { 604 tp->core_siblings[i] = buf; 605 tp->core_sib++; 606 buf = NULL; 607 len = 0; 608 } 609 ret = 0; 610 611 try_threads: 612 sprintf(filename, THRD_SIB_FMT, cpu); 613 fp = fopen(filename, "r"); 614 if (!fp) 615 goto done; 616 617 if (getline(&buf, &len, fp) <= 0) 618 goto done; 619 620 p = strchr(buf, '\n'); 621 if (p) 622 *p = '\0'; 623 624 for (i = 0; i < tp->thread_sib; i++) { 625 if (!strcmp(buf, tp->thread_siblings[i])) 626 break; 627 } 628 if (i == tp->thread_sib) { 629 tp->thread_siblings[i] = buf; 630 tp->thread_sib++; 631 buf = NULL; 632 } 633 ret = 0; 634 done: 635 if(fp) 636 fclose(fp); 637 free(buf); 638 return ret; 639 } 640 641 static void free_cpu_topo(struct cpu_topo *tp) 642 { 643 u32 i; 644 645 if (!tp) 646 return; 647 648 for (i = 0 ; i < tp->core_sib; i++) 649 zfree(&tp->core_siblings[i]); 650 651 for (i = 0 ; i < tp->thread_sib; i++) 652 zfree(&tp->thread_siblings[i]); 653 654 free(tp); 655 } 656 657 static struct cpu_topo *build_cpu_topology(void) 658 { 659 struct cpu_topo *tp = NULL; 660 void *addr; 661 u32 nr, i; 662 size_t sz; 663 long ncpus; 664 int ret = -1; 665 struct cpu_map *map; 666 667 ncpus = cpu__max_present_cpu(); 668 669 /* build online CPU map */ 670 map = cpu_map__new(NULL); 671 if (map == NULL) { 672 pr_debug("failed to get system cpumap\n"); 673 return NULL; 674 } 675 676 nr = (u32)(ncpus & UINT_MAX); 677 678 sz = nr * sizeof(char *); 679 addr = calloc(1, sizeof(*tp) + 2 * sz); 680 if (!addr) 681 goto out_free; 682 683 tp = addr; 684 tp->cpu_nr = nr; 685 addr += sizeof(*tp); 686 tp->core_siblings = addr; 687 addr += sz; 688 tp->thread_siblings = addr; 689 690 for (i = 0; i < nr; i++) { 691 if (!cpu_map__has(map, i)) 692 continue; 693 694 ret = build_cpu_topo(tp, i); 695 if (ret < 0) 696 break; 697 } 698 699 out_free: 700 cpu_map__put(map); 701 if (ret) { 702 free_cpu_topo(tp); 703 tp = NULL; 704 } 705 return tp; 706 } 707 708 static int write_cpu_topology(struct feat_fd *ff, 709 struct perf_evlist *evlist __maybe_unused) 710 { 711 struct cpu_topo *tp; 712 u32 i; 713 int ret, j; 714 715 tp = build_cpu_topology(); 716 if (!tp) 717 return -1; 718 719 ret = do_write(ff, &tp->core_sib, sizeof(tp->core_sib)); 720 if (ret < 0) 721 goto done; 722 723 for (i = 0; i < tp->core_sib; i++) { 724 ret = do_write_string(ff, tp->core_siblings[i]); 725 if (ret < 0) 726 goto done; 727 } 728 ret = do_write(ff, &tp->thread_sib, sizeof(tp->thread_sib)); 729 if (ret < 0) 730 goto done; 731 732 for (i = 0; i < tp->thread_sib; i++) { 733 ret = do_write_string(ff, tp->thread_siblings[i]); 734 if (ret < 0) 735 break; 736 } 737 738 ret = perf_env__read_cpu_topology_map(&perf_env); 739 if (ret < 0) 740 goto done; 741 742 for (j = 0; j < perf_env.nr_cpus_avail; j++) { 743 ret = do_write(ff, &perf_env.cpu[j].core_id, 744 sizeof(perf_env.cpu[j].core_id)); 745 if (ret < 0) 746 return ret; 747 ret = do_write(ff, &perf_env.cpu[j].socket_id, 748 sizeof(perf_env.cpu[j].socket_id)); 749 if (ret < 0) 750 return ret; 751 } 752 done: 753 free_cpu_topo(tp); 754 return ret; 755 } 756 757 758 759 static int write_total_mem(struct feat_fd *ff, 760 struct perf_evlist *evlist __maybe_unused) 761 { 762 char *buf = NULL; 763 FILE *fp; 764 size_t len = 0; 765 int ret = -1, n; 766 uint64_t mem; 767 768 fp = fopen("/proc/meminfo", "r"); 769 if (!fp) 770 return -1; 771 772 while (getline(&buf, &len, fp) > 0) { 773 ret = strncmp(buf, "MemTotal:", 9); 774 if (!ret) 775 break; 776 } 777 if (!ret) { 778 n = sscanf(buf, "%*s %"PRIu64, &mem); 779 if (n == 1) 780 ret = do_write(ff, &mem, sizeof(mem)); 781 } else 782 ret = -1; 783 free(buf); 784 fclose(fp); 785 return ret; 786 } 787 788 static int write_topo_node(struct feat_fd *ff, int node) 789 { 790 char str[MAXPATHLEN]; 791 char field[32]; 792 char *buf = NULL, *p; 793 size_t len = 0; 794 FILE *fp; 795 u64 mem_total, mem_free, mem; 796 int ret = -1; 797 798 sprintf(str, "/sys/devices/system/node/node%d/meminfo", node); 799 fp = fopen(str, "r"); 800 if (!fp) 801 return -1; 802 803 while (getline(&buf, &len, fp) > 0) { 804 /* skip over invalid lines */ 805 if (!strchr(buf, ':')) 806 continue; 807 if (sscanf(buf, "%*s %*d %31s %"PRIu64, field, &mem) != 2) 808 goto done; 809 if (!strcmp(field, "MemTotal:")) 810 mem_total = mem; 811 if (!strcmp(field, "MemFree:")) 812 mem_free = mem; 813 } 814 815 fclose(fp); 816 fp = NULL; 817 818 ret = do_write(ff, &mem_total, sizeof(u64)); 819 if (ret) 820 goto done; 821 822 ret = do_write(ff, &mem_free, sizeof(u64)); 823 if (ret) 824 goto done; 825 826 ret = -1; 827 sprintf(str, "/sys/devices/system/node/node%d/cpulist", node); 828 829 fp = fopen(str, "r"); 830 if (!fp) 831 goto done; 832 833 if (getline(&buf, &len, fp) <= 0) 834 goto done; 835 836 p = strchr(buf, '\n'); 837 if (p) 838 *p = '\0'; 839 840 ret = do_write_string(ff, buf); 841 done: 842 free(buf); 843 if (fp) 844 fclose(fp); 845 return ret; 846 } 847 848 static int write_numa_topology(struct feat_fd *ff, 849 struct perf_evlist *evlist __maybe_unused) 850 { 851 char *buf = NULL; 852 size_t len = 0; 853 FILE *fp; 854 struct cpu_map *node_map = NULL; 855 char *c; 856 u32 nr, i, j; 857 int ret = -1; 858 859 fp = fopen("/sys/devices/system/node/online", "r"); 860 if (!fp) 861 return -1; 862 863 if (getline(&buf, &len, fp) <= 0) 864 goto done; 865 866 c = strchr(buf, '\n'); 867 if (c) 868 *c = '\0'; 869 870 node_map = cpu_map__new(buf); 871 if (!node_map) 872 goto done; 873 874 nr = (u32)node_map->nr; 875 876 ret = do_write(ff, &nr, sizeof(nr)); 877 if (ret < 0) 878 goto done; 879 880 for (i = 0; i < nr; i++) { 881 j = (u32)node_map->map[i]; 882 ret = do_write(ff, &j, sizeof(j)); 883 if (ret < 0) 884 break; 885 886 ret = write_topo_node(ff, i); 887 if (ret < 0) 888 break; 889 } 890 done: 891 free(buf); 892 fclose(fp); 893 cpu_map__put(node_map); 894 return ret; 895 } 896 897 /* 898 * File format: 899 * 900 * struct pmu_mappings { 901 * u32 pmu_num; 902 * struct pmu_map { 903 * u32 type; 904 * char name[]; 905 * }[pmu_num]; 906 * }; 907 */ 908 909 static int write_pmu_mappings(struct feat_fd *ff, 910 struct perf_evlist *evlist __maybe_unused) 911 { 912 struct perf_pmu *pmu = NULL; 913 u32 pmu_num = 0; 914 int ret; 915 916 /* 917 * Do a first pass to count number of pmu to avoid lseek so this 918 * works in pipe mode as well. 919 */ 920 while ((pmu = perf_pmu__scan(pmu))) { 921 if (!pmu->name) 922 continue; 923 pmu_num++; 924 } 925 926 ret = do_write(ff, &pmu_num, sizeof(pmu_num)); 927 if (ret < 0) 928 return ret; 929 930 while ((pmu = perf_pmu__scan(pmu))) { 931 if (!pmu->name) 932 continue; 933 934 ret = do_write(ff, &pmu->type, sizeof(pmu->type)); 935 if (ret < 0) 936 return ret; 937 938 ret = do_write_string(ff, pmu->name); 939 if (ret < 0) 940 return ret; 941 } 942 943 return 0; 944 } 945 946 /* 947 * File format: 948 * 949 * struct group_descs { 950 * u32 nr_groups; 951 * struct group_desc { 952 * char name[]; 953 * u32 leader_idx; 954 * u32 nr_members; 955 * }[nr_groups]; 956 * }; 957 */ 958 static int write_group_desc(struct feat_fd *ff, 959 struct perf_evlist *evlist) 960 { 961 u32 nr_groups = evlist->nr_groups; 962 struct perf_evsel *evsel; 963 int ret; 964 965 ret = do_write(ff, &nr_groups, sizeof(nr_groups)); 966 if (ret < 0) 967 return ret; 968 969 evlist__for_each_entry(evlist, evsel) { 970 if (perf_evsel__is_group_leader(evsel) && 971 evsel->nr_members > 1) { 972 const char *name = evsel->group_name ?: "{anon_group}"; 973 u32 leader_idx = evsel->idx; 974 u32 nr_members = evsel->nr_members; 975 976 ret = do_write_string(ff, name); 977 if (ret < 0) 978 return ret; 979 980 ret = do_write(ff, &leader_idx, sizeof(leader_idx)); 981 if (ret < 0) 982 return ret; 983 984 ret = do_write(ff, &nr_members, sizeof(nr_members)); 985 if (ret < 0) 986 return ret; 987 } 988 } 989 return 0; 990 } 991 992 /* 993 * default get_cpuid(): nothing gets recorded 994 * actual implementation must be in arch/$(SRCARCH)/util/header.c 995 */ 996 int __weak get_cpuid(char *buffer __maybe_unused, size_t sz __maybe_unused) 997 { 998 return -1; 999 } 1000 1001 static int write_cpuid(struct feat_fd *ff, 1002 struct perf_evlist *evlist __maybe_unused) 1003 { 1004 char buffer[64]; 1005 int ret; 1006 1007 ret = get_cpuid(buffer, sizeof(buffer)); 1008 if (!ret) 1009 goto write_it; 1010 1011 return -1; 1012 write_it: 1013 return do_write_string(ff, buffer); 1014 } 1015 1016 static int write_branch_stack(struct feat_fd *ff __maybe_unused, 1017 struct perf_evlist *evlist __maybe_unused) 1018 { 1019 return 0; 1020 } 1021 1022 static int write_auxtrace(struct feat_fd *ff, 1023 struct perf_evlist *evlist __maybe_unused) 1024 { 1025 struct perf_session *session; 1026 int err; 1027 1028 if (WARN(ff->buf, "Error: calling %s in pipe-mode.\n", __func__)) 1029 return -1; 1030 1031 session = container_of(ff->ph, struct perf_session, header); 1032 1033 err = auxtrace_index__write(ff->fd, &session->auxtrace_index); 1034 if (err < 0) 1035 pr_err("Failed to write auxtrace index\n"); 1036 return err; 1037 } 1038 1039 static int cpu_cache_level__sort(const void *a, const void *b) 1040 { 1041 struct cpu_cache_level *cache_a = (struct cpu_cache_level *)a; 1042 struct cpu_cache_level *cache_b = (struct cpu_cache_level *)b; 1043 1044 return cache_a->level - cache_b->level; 1045 } 1046 1047 static bool cpu_cache_level__cmp(struct cpu_cache_level *a, struct cpu_cache_level *b) 1048 { 1049 if (a->level != b->level) 1050 return false; 1051 1052 if (a->line_size != b->line_size) 1053 return false; 1054 1055 if (a->sets != b->sets) 1056 return false; 1057 1058 if (a->ways != b->ways) 1059 return false; 1060 1061 if (strcmp(a->type, b->type)) 1062 return false; 1063 1064 if (strcmp(a->size, b->size)) 1065 return false; 1066 1067 if (strcmp(a->map, b->map)) 1068 return false; 1069 1070 return true; 1071 } 1072 1073 static int cpu_cache_level__read(struct cpu_cache_level *cache, u32 cpu, u16 level) 1074 { 1075 char path[PATH_MAX], file[PATH_MAX]; 1076 struct stat st; 1077 size_t len; 1078 1079 scnprintf(path, PATH_MAX, "devices/system/cpu/cpu%d/cache/index%d/", cpu, level); 1080 scnprintf(file, PATH_MAX, "%s/%s", sysfs__mountpoint(), path); 1081 1082 if (stat(file, &st)) 1083 return 1; 1084 1085 scnprintf(file, PATH_MAX, "%s/level", path); 1086 if (sysfs__read_int(file, (int *) &cache->level)) 1087 return -1; 1088 1089 scnprintf(file, PATH_MAX, "%s/coherency_line_size", path); 1090 if (sysfs__read_int(file, (int *) &cache->line_size)) 1091 return -1; 1092 1093 scnprintf(file, PATH_MAX, "%s/number_of_sets", path); 1094 if (sysfs__read_int(file, (int *) &cache->sets)) 1095 return -1; 1096 1097 scnprintf(file, PATH_MAX, "%s/ways_of_associativity", path); 1098 if (sysfs__read_int(file, (int *) &cache->ways)) 1099 return -1; 1100 1101 scnprintf(file, PATH_MAX, "%s/type", path); 1102 if (sysfs__read_str(file, &cache->type, &len)) 1103 return -1; 1104 1105 cache->type[len] = 0; 1106 cache->type = rtrim(cache->type); 1107 1108 scnprintf(file, PATH_MAX, "%s/size", path); 1109 if (sysfs__read_str(file, &cache->size, &len)) { 1110 free(cache->type); 1111 return -1; 1112 } 1113 1114 cache->size[len] = 0; 1115 cache->size = rtrim(cache->size); 1116 1117 scnprintf(file, PATH_MAX, "%s/shared_cpu_list", path); 1118 if (sysfs__read_str(file, &cache->map, &len)) { 1119 free(cache->map); 1120 free(cache->type); 1121 return -1; 1122 } 1123 1124 cache->map[len] = 0; 1125 cache->map = rtrim(cache->map); 1126 return 0; 1127 } 1128 1129 static void cpu_cache_level__fprintf(FILE *out, struct cpu_cache_level *c) 1130 { 1131 fprintf(out, "L%d %-15s %8s [%s]\n", c->level, c->type, c->size, c->map); 1132 } 1133 1134 static int build_caches(struct cpu_cache_level caches[], u32 size, u32 *cntp) 1135 { 1136 u32 i, cnt = 0; 1137 long ncpus; 1138 u32 nr, cpu; 1139 u16 level; 1140 1141 ncpus = sysconf(_SC_NPROCESSORS_CONF); 1142 if (ncpus < 0) 1143 return -1; 1144 1145 nr = (u32)(ncpus & UINT_MAX); 1146 1147 for (cpu = 0; cpu < nr; cpu++) { 1148 for (level = 0; level < 10; level++) { 1149 struct cpu_cache_level c; 1150 int err; 1151 1152 err = cpu_cache_level__read(&c, cpu, level); 1153 if (err < 0) 1154 return err; 1155 1156 if (err == 1) 1157 break; 1158 1159 for (i = 0; i < cnt; i++) { 1160 if (cpu_cache_level__cmp(&c, &caches[i])) 1161 break; 1162 } 1163 1164 if (i == cnt) 1165 caches[cnt++] = c; 1166 else 1167 cpu_cache_level__free(&c); 1168 1169 if (WARN_ONCE(cnt == size, "way too many cpu caches..")) 1170 goto out; 1171 } 1172 } 1173 out: 1174 *cntp = cnt; 1175 return 0; 1176 } 1177 1178 #define MAX_CACHES 2000 1179 1180 static int write_cache(struct feat_fd *ff, 1181 struct perf_evlist *evlist __maybe_unused) 1182 { 1183 struct cpu_cache_level caches[MAX_CACHES]; 1184 u32 cnt = 0, i, version = 1; 1185 int ret; 1186 1187 ret = build_caches(caches, MAX_CACHES, &cnt); 1188 if (ret) 1189 goto out; 1190 1191 qsort(&caches, cnt, sizeof(struct cpu_cache_level), cpu_cache_level__sort); 1192 1193 ret = do_write(ff, &version, sizeof(u32)); 1194 if (ret < 0) 1195 goto out; 1196 1197 ret = do_write(ff, &cnt, sizeof(u32)); 1198 if (ret < 0) 1199 goto out; 1200 1201 for (i = 0; i < cnt; i++) { 1202 struct cpu_cache_level *c = &caches[i]; 1203 1204 #define _W(v) \ 1205 ret = do_write(ff, &c->v, sizeof(u32)); \ 1206 if (ret < 0) \ 1207 goto out; 1208 1209 _W(level) 1210 _W(line_size) 1211 _W(sets) 1212 _W(ways) 1213 #undef _W 1214 1215 #define _W(v) \ 1216 ret = do_write_string(ff, (const char *) c->v); \ 1217 if (ret < 0) \ 1218 goto out; 1219 1220 _W(type) 1221 _W(size) 1222 _W(map) 1223 #undef _W 1224 } 1225 1226 out: 1227 for (i = 0; i < cnt; i++) 1228 cpu_cache_level__free(&caches[i]); 1229 return ret; 1230 } 1231 1232 static int write_stat(struct feat_fd *ff __maybe_unused, 1233 struct perf_evlist *evlist __maybe_unused) 1234 { 1235 return 0; 1236 } 1237 1238 static int write_sample_time(struct feat_fd *ff, 1239 struct perf_evlist *evlist) 1240 { 1241 int ret; 1242 1243 ret = do_write(ff, &evlist->first_sample_time, 1244 sizeof(evlist->first_sample_time)); 1245 if (ret < 0) 1246 return ret; 1247 1248 return do_write(ff, &evlist->last_sample_time, 1249 sizeof(evlist->last_sample_time)); 1250 } 1251 1252 1253 static int memory_node__read(struct memory_node *n, unsigned long idx) 1254 { 1255 unsigned int phys, size = 0; 1256 char path[PATH_MAX]; 1257 struct dirent *ent; 1258 DIR *dir; 1259 1260 #define for_each_memory(mem, dir) \ 1261 while ((ent = readdir(dir))) \ 1262 if (strcmp(ent->d_name, ".") && \ 1263 strcmp(ent->d_name, "..") && \ 1264 sscanf(ent->d_name, "memory%u", &mem) == 1) 1265 1266 scnprintf(path, PATH_MAX, 1267 "%s/devices/system/node/node%lu", 1268 sysfs__mountpoint(), idx); 1269 1270 dir = opendir(path); 1271 if (!dir) { 1272 pr_warning("failed: cant' open memory sysfs data\n"); 1273 return -1; 1274 } 1275 1276 for_each_memory(phys, dir) { 1277 size = max(phys, size); 1278 } 1279 1280 size++; 1281 1282 n->set = bitmap_alloc(size); 1283 if (!n->set) { 1284 closedir(dir); 1285 return -ENOMEM; 1286 } 1287 1288 bitmap_zero(n->set, size); 1289 n->node = idx; 1290 n->size = size; 1291 1292 rewinddir(dir); 1293 1294 for_each_memory(phys, dir) { 1295 set_bit(phys, n->set); 1296 } 1297 1298 closedir(dir); 1299 return 0; 1300 } 1301 1302 static int memory_node__sort(const void *a, const void *b) 1303 { 1304 const struct memory_node *na = a; 1305 const struct memory_node *nb = b; 1306 1307 return na->node - nb->node; 1308 } 1309 1310 static int build_mem_topology(struct memory_node *nodes, u64 size, u64 *cntp) 1311 { 1312 char path[PATH_MAX]; 1313 struct dirent *ent; 1314 DIR *dir; 1315 u64 cnt = 0; 1316 int ret = 0; 1317 1318 scnprintf(path, PATH_MAX, "%s/devices/system/node/", 1319 sysfs__mountpoint()); 1320 1321 dir = opendir(path); 1322 if (!dir) { 1323 pr_debug2("%s: could't read %s, does this arch have topology information?\n", 1324 __func__, path); 1325 return -1; 1326 } 1327 1328 while (!ret && (ent = readdir(dir))) { 1329 unsigned int idx; 1330 int r; 1331 1332 if (!strcmp(ent->d_name, ".") || 1333 !strcmp(ent->d_name, "..")) 1334 continue; 1335 1336 r = sscanf(ent->d_name, "node%u", &idx); 1337 if (r != 1) 1338 continue; 1339 1340 if (WARN_ONCE(cnt >= size, 1341 "failed to write MEM_TOPOLOGY, way too many nodes\n")) 1342 return -1; 1343 1344 ret = memory_node__read(&nodes[cnt++], idx); 1345 } 1346 1347 *cntp = cnt; 1348 closedir(dir); 1349 1350 if (!ret) 1351 qsort(nodes, cnt, sizeof(nodes[0]), memory_node__sort); 1352 1353 return ret; 1354 } 1355 1356 #define MAX_MEMORY_NODES 2000 1357 1358 /* 1359 * The MEM_TOPOLOGY holds physical memory map for every 1360 * node in system. The format of data is as follows: 1361 * 1362 * 0 - version | for future changes 1363 * 8 - block_size_bytes | /sys/devices/system/memory/block_size_bytes 1364 * 16 - count | number of nodes 1365 * 1366 * For each node we store map of physical indexes for 1367 * each node: 1368 * 1369 * 32 - node id | node index 1370 * 40 - size | size of bitmap 1371 * 48 - bitmap | bitmap of memory indexes that belongs to node 1372 */ 1373 static int write_mem_topology(struct feat_fd *ff __maybe_unused, 1374 struct perf_evlist *evlist __maybe_unused) 1375 { 1376 static struct memory_node nodes[MAX_MEMORY_NODES]; 1377 u64 bsize, version = 1, i, nr; 1378 int ret; 1379 1380 ret = sysfs__read_xll("devices/system/memory/block_size_bytes", 1381 (unsigned long long *) &bsize); 1382 if (ret) 1383 return ret; 1384 1385 ret = build_mem_topology(&nodes[0], MAX_MEMORY_NODES, &nr); 1386 if (ret) 1387 return ret; 1388 1389 ret = do_write(ff, &version, sizeof(version)); 1390 if (ret < 0) 1391 goto out; 1392 1393 ret = do_write(ff, &bsize, sizeof(bsize)); 1394 if (ret < 0) 1395 goto out; 1396 1397 ret = do_write(ff, &nr, sizeof(nr)); 1398 if (ret < 0) 1399 goto out; 1400 1401 for (i = 0; i < nr; i++) { 1402 struct memory_node *n = &nodes[i]; 1403 1404 #define _W(v) \ 1405 ret = do_write(ff, &n->v, sizeof(n->v)); \ 1406 if (ret < 0) \ 1407 goto out; 1408 1409 _W(node) 1410 _W(size) 1411 1412 #undef _W 1413 1414 ret = do_write_bitmap(ff, n->set, n->size); 1415 if (ret < 0) 1416 goto out; 1417 } 1418 1419 out: 1420 return ret; 1421 } 1422 1423 static void print_hostname(struct feat_fd *ff, FILE *fp) 1424 { 1425 fprintf(fp, "# hostname : %s\n", ff->ph->env.hostname); 1426 } 1427 1428 static void print_osrelease(struct feat_fd *ff, FILE *fp) 1429 { 1430 fprintf(fp, "# os release : %s\n", ff->ph->env.os_release); 1431 } 1432 1433 static void print_arch(struct feat_fd *ff, FILE *fp) 1434 { 1435 fprintf(fp, "# arch : %s\n", ff->ph->env.arch); 1436 } 1437 1438 static void print_cpudesc(struct feat_fd *ff, FILE *fp) 1439 { 1440 fprintf(fp, "# cpudesc : %s\n", ff->ph->env.cpu_desc); 1441 } 1442 1443 static void print_nrcpus(struct feat_fd *ff, FILE *fp) 1444 { 1445 fprintf(fp, "# nrcpus online : %u\n", ff->ph->env.nr_cpus_online); 1446 fprintf(fp, "# nrcpus avail : %u\n", ff->ph->env.nr_cpus_avail); 1447 } 1448 1449 static void print_version(struct feat_fd *ff, FILE *fp) 1450 { 1451 fprintf(fp, "# perf version : %s\n", ff->ph->env.version); 1452 } 1453 1454 static void print_cmdline(struct feat_fd *ff, FILE *fp) 1455 { 1456 int nr, i; 1457 1458 nr = ff->ph->env.nr_cmdline; 1459 1460 fprintf(fp, "# cmdline : "); 1461 1462 for (i = 0; i < nr; i++) 1463 fprintf(fp, "%s ", ff->ph->env.cmdline_argv[i]); 1464 fputc('\n', fp); 1465 } 1466 1467 static void print_cpu_topology(struct feat_fd *ff, FILE *fp) 1468 { 1469 struct perf_header *ph = ff->ph; 1470 int cpu_nr = ph->env.nr_cpus_avail; 1471 int nr, i; 1472 char *str; 1473 1474 nr = ph->env.nr_sibling_cores; 1475 str = ph->env.sibling_cores; 1476 1477 for (i = 0; i < nr; i++) { 1478 fprintf(fp, "# sibling cores : %s\n", str); 1479 str += strlen(str) + 1; 1480 } 1481 1482 nr = ph->env.nr_sibling_threads; 1483 str = ph->env.sibling_threads; 1484 1485 for (i = 0; i < nr; i++) { 1486 fprintf(fp, "# sibling threads : %s\n", str); 1487 str += strlen(str) + 1; 1488 } 1489 1490 if (ph->env.cpu != NULL) { 1491 for (i = 0; i < cpu_nr; i++) 1492 fprintf(fp, "# CPU %d: Core ID %d, Socket ID %d\n", i, 1493 ph->env.cpu[i].core_id, ph->env.cpu[i].socket_id); 1494 } else 1495 fprintf(fp, "# Core ID and Socket ID information is not available\n"); 1496 } 1497 1498 static void free_event_desc(struct perf_evsel *events) 1499 { 1500 struct perf_evsel *evsel; 1501 1502 if (!events) 1503 return; 1504 1505 for (evsel = events; evsel->attr.size; evsel++) { 1506 zfree(&evsel->name); 1507 zfree(&evsel->id); 1508 } 1509 1510 free(events); 1511 } 1512 1513 static struct perf_evsel *read_event_desc(struct feat_fd *ff) 1514 { 1515 struct perf_evsel *evsel, *events = NULL; 1516 u64 *id; 1517 void *buf = NULL; 1518 u32 nre, sz, nr, i, j; 1519 size_t msz; 1520 1521 /* number of events */ 1522 if (do_read_u32(ff, &nre)) 1523 goto error; 1524 1525 if (do_read_u32(ff, &sz)) 1526 goto error; 1527 1528 /* buffer to hold on file attr struct */ 1529 buf = malloc(sz); 1530 if (!buf) 1531 goto error; 1532 1533 /* the last event terminates with evsel->attr.size == 0: */ 1534 events = calloc(nre + 1, sizeof(*events)); 1535 if (!events) 1536 goto error; 1537 1538 msz = sizeof(evsel->attr); 1539 if (sz < msz) 1540 msz = sz; 1541 1542 for (i = 0, evsel = events; i < nre; evsel++, i++) { 1543 evsel->idx = i; 1544 1545 /* 1546 * must read entire on-file attr struct to 1547 * sync up with layout. 1548 */ 1549 if (__do_read(ff, buf, sz)) 1550 goto error; 1551 1552 if (ff->ph->needs_swap) 1553 perf_event__attr_swap(buf); 1554 1555 memcpy(&evsel->attr, buf, msz); 1556 1557 if (do_read_u32(ff, &nr)) 1558 goto error; 1559 1560 if (ff->ph->needs_swap) 1561 evsel->needs_swap = true; 1562 1563 evsel->name = do_read_string(ff); 1564 if (!evsel->name) 1565 goto error; 1566 1567 if (!nr) 1568 continue; 1569 1570 id = calloc(nr, sizeof(*id)); 1571 if (!id) 1572 goto error; 1573 evsel->ids = nr; 1574 evsel->id = id; 1575 1576 for (j = 0 ; j < nr; j++) { 1577 if (do_read_u64(ff, id)) 1578 goto error; 1579 id++; 1580 } 1581 } 1582 out: 1583 free(buf); 1584 return events; 1585 error: 1586 free_event_desc(events); 1587 events = NULL; 1588 goto out; 1589 } 1590 1591 static int __desc_attr__fprintf(FILE *fp, const char *name, const char *val, 1592 void *priv __maybe_unused) 1593 { 1594 return fprintf(fp, ", %s = %s", name, val); 1595 } 1596 1597 static void print_event_desc(struct feat_fd *ff, FILE *fp) 1598 { 1599 struct perf_evsel *evsel, *events; 1600 u32 j; 1601 u64 *id; 1602 1603 if (ff->events) 1604 events = ff->events; 1605 else 1606 events = read_event_desc(ff); 1607 1608 if (!events) { 1609 fprintf(fp, "# event desc: not available or unable to read\n"); 1610 return; 1611 } 1612 1613 for (evsel = events; evsel->attr.size; evsel++) { 1614 fprintf(fp, "# event : name = %s, ", evsel->name); 1615 1616 if (evsel->ids) { 1617 fprintf(fp, ", id = {"); 1618 for (j = 0, id = evsel->id; j < evsel->ids; j++, id++) { 1619 if (j) 1620 fputc(',', fp); 1621 fprintf(fp, " %"PRIu64, *id); 1622 } 1623 fprintf(fp, " }"); 1624 } 1625 1626 perf_event_attr__fprintf(fp, &evsel->attr, __desc_attr__fprintf, NULL); 1627 1628 fputc('\n', fp); 1629 } 1630 1631 free_event_desc(events); 1632 ff->events = NULL; 1633 } 1634 1635 static void print_total_mem(struct feat_fd *ff, FILE *fp) 1636 { 1637 fprintf(fp, "# total memory : %llu kB\n", ff->ph->env.total_mem); 1638 } 1639 1640 static void print_numa_topology(struct feat_fd *ff, FILE *fp) 1641 { 1642 int i; 1643 struct numa_node *n; 1644 1645 for (i = 0; i < ff->ph->env.nr_numa_nodes; i++) { 1646 n = &ff->ph->env.numa_nodes[i]; 1647 1648 fprintf(fp, "# node%u meminfo : total = %"PRIu64" kB," 1649 " free = %"PRIu64" kB\n", 1650 n->node, n->mem_total, n->mem_free); 1651 1652 fprintf(fp, "# node%u cpu list : ", n->node); 1653 cpu_map__fprintf(n->map, fp); 1654 } 1655 } 1656 1657 static void print_cpuid(struct feat_fd *ff, FILE *fp) 1658 { 1659 fprintf(fp, "# cpuid : %s\n", ff->ph->env.cpuid); 1660 } 1661 1662 static void print_branch_stack(struct feat_fd *ff __maybe_unused, FILE *fp) 1663 { 1664 fprintf(fp, "# contains samples with branch stack\n"); 1665 } 1666 1667 static void print_auxtrace(struct feat_fd *ff __maybe_unused, FILE *fp) 1668 { 1669 fprintf(fp, "# contains AUX area data (e.g. instruction trace)\n"); 1670 } 1671 1672 static void print_stat(struct feat_fd *ff __maybe_unused, FILE *fp) 1673 { 1674 fprintf(fp, "# contains stat data\n"); 1675 } 1676 1677 static void print_cache(struct feat_fd *ff, FILE *fp __maybe_unused) 1678 { 1679 int i; 1680 1681 fprintf(fp, "# CPU cache info:\n"); 1682 for (i = 0; i < ff->ph->env.caches_cnt; i++) { 1683 fprintf(fp, "# "); 1684 cpu_cache_level__fprintf(fp, &ff->ph->env.caches[i]); 1685 } 1686 } 1687 1688 static void print_pmu_mappings(struct feat_fd *ff, FILE *fp) 1689 { 1690 const char *delimiter = "# pmu mappings: "; 1691 char *str, *tmp; 1692 u32 pmu_num; 1693 u32 type; 1694 1695 pmu_num = ff->ph->env.nr_pmu_mappings; 1696 if (!pmu_num) { 1697 fprintf(fp, "# pmu mappings: not available\n"); 1698 return; 1699 } 1700 1701 str = ff->ph->env.pmu_mappings; 1702 1703 while (pmu_num) { 1704 type = strtoul(str, &tmp, 0); 1705 if (*tmp != ':') 1706 goto error; 1707 1708 str = tmp + 1; 1709 fprintf(fp, "%s%s = %" PRIu32, delimiter, str, type); 1710 1711 delimiter = ", "; 1712 str += strlen(str) + 1; 1713 pmu_num--; 1714 } 1715 1716 fprintf(fp, "\n"); 1717 1718 if (!pmu_num) 1719 return; 1720 error: 1721 fprintf(fp, "# pmu mappings: unable to read\n"); 1722 } 1723 1724 static void print_group_desc(struct feat_fd *ff, FILE *fp) 1725 { 1726 struct perf_session *session; 1727 struct perf_evsel *evsel; 1728 u32 nr = 0; 1729 1730 session = container_of(ff->ph, struct perf_session, header); 1731 1732 evlist__for_each_entry(session->evlist, evsel) { 1733 if (perf_evsel__is_group_leader(evsel) && 1734 evsel->nr_members > 1) { 1735 fprintf(fp, "# group: %s{%s", evsel->group_name ?: "", 1736 perf_evsel__name(evsel)); 1737 1738 nr = evsel->nr_members - 1; 1739 } else if (nr) { 1740 fprintf(fp, ",%s", perf_evsel__name(evsel)); 1741 1742 if (--nr == 0) 1743 fprintf(fp, "}\n"); 1744 } 1745 } 1746 } 1747 1748 static void print_sample_time(struct feat_fd *ff, FILE *fp) 1749 { 1750 struct perf_session *session; 1751 char time_buf[32]; 1752 double d; 1753 1754 session = container_of(ff->ph, struct perf_session, header); 1755 1756 timestamp__scnprintf_usec(session->evlist->first_sample_time, 1757 time_buf, sizeof(time_buf)); 1758 fprintf(fp, "# time of first sample : %s\n", time_buf); 1759 1760 timestamp__scnprintf_usec(session->evlist->last_sample_time, 1761 time_buf, sizeof(time_buf)); 1762 fprintf(fp, "# time of last sample : %s\n", time_buf); 1763 1764 d = (double)(session->evlist->last_sample_time - 1765 session->evlist->first_sample_time) / NSEC_PER_MSEC; 1766 1767 fprintf(fp, "# sample duration : %10.3f ms\n", d); 1768 } 1769 1770 static void memory_node__fprintf(struct memory_node *n, 1771 unsigned long long bsize, FILE *fp) 1772 { 1773 char buf_map[100], buf_size[50]; 1774 unsigned long long size; 1775 1776 size = bsize * bitmap_weight(n->set, n->size); 1777 unit_number__scnprintf(buf_size, 50, size); 1778 1779 bitmap_scnprintf(n->set, n->size, buf_map, 100); 1780 fprintf(fp, "# %3" PRIu64 " [%s]: %s\n", n->node, buf_size, buf_map); 1781 } 1782 1783 static void print_mem_topology(struct feat_fd *ff, FILE *fp) 1784 { 1785 struct memory_node *nodes; 1786 int i, nr; 1787 1788 nodes = ff->ph->env.memory_nodes; 1789 nr = ff->ph->env.nr_memory_nodes; 1790 1791 fprintf(fp, "# memory nodes (nr %d, block size 0x%llx):\n", 1792 nr, ff->ph->env.memory_bsize); 1793 1794 for (i = 0; i < nr; i++) { 1795 memory_node__fprintf(&nodes[i], ff->ph->env.memory_bsize, fp); 1796 } 1797 } 1798 1799 static int __event_process_build_id(struct build_id_event *bev, 1800 char *filename, 1801 struct perf_session *session) 1802 { 1803 int err = -1; 1804 struct machine *machine; 1805 u16 cpumode; 1806 struct dso *dso; 1807 enum dso_kernel_type dso_type; 1808 1809 machine = perf_session__findnew_machine(session, bev->pid); 1810 if (!machine) 1811 goto out; 1812 1813 cpumode = bev->header.misc & PERF_RECORD_MISC_CPUMODE_MASK; 1814 1815 switch (cpumode) { 1816 case PERF_RECORD_MISC_KERNEL: 1817 dso_type = DSO_TYPE_KERNEL; 1818 break; 1819 case PERF_RECORD_MISC_GUEST_KERNEL: 1820 dso_type = DSO_TYPE_GUEST_KERNEL; 1821 break; 1822 case PERF_RECORD_MISC_USER: 1823 case PERF_RECORD_MISC_GUEST_USER: 1824 dso_type = DSO_TYPE_USER; 1825 break; 1826 default: 1827 goto out; 1828 } 1829 1830 dso = machine__findnew_dso(machine, filename); 1831 if (dso != NULL) { 1832 char sbuild_id[SBUILD_ID_SIZE]; 1833 1834 dso__set_build_id(dso, &bev->build_id); 1835 1836 if (dso_type != DSO_TYPE_USER) { 1837 struct kmod_path m = { .name = NULL, }; 1838 1839 if (!kmod_path__parse_name(&m, filename) && m.kmod) 1840 dso__set_module_info(dso, &m, machine); 1841 else 1842 dso->kernel = dso_type; 1843 1844 free(m.name); 1845 } 1846 1847 build_id__sprintf(dso->build_id, sizeof(dso->build_id), 1848 sbuild_id); 1849 pr_debug("build id event received for %s: %s\n", 1850 dso->long_name, sbuild_id); 1851 dso__put(dso); 1852 } 1853 1854 err = 0; 1855 out: 1856 return err; 1857 } 1858 1859 static int perf_header__read_build_ids_abi_quirk(struct perf_header *header, 1860 int input, u64 offset, u64 size) 1861 { 1862 struct perf_session *session = container_of(header, struct perf_session, header); 1863 struct { 1864 struct perf_event_header header; 1865 u8 build_id[PERF_ALIGN(BUILD_ID_SIZE, sizeof(u64))]; 1866 char filename[0]; 1867 } old_bev; 1868 struct build_id_event bev; 1869 char filename[PATH_MAX]; 1870 u64 limit = offset + size; 1871 1872 while (offset < limit) { 1873 ssize_t len; 1874 1875 if (readn(input, &old_bev, sizeof(old_bev)) != sizeof(old_bev)) 1876 return -1; 1877 1878 if (header->needs_swap) 1879 perf_event_header__bswap(&old_bev.header); 1880 1881 len = old_bev.header.size - sizeof(old_bev); 1882 if (readn(input, filename, len) != len) 1883 return -1; 1884 1885 bev.header = old_bev.header; 1886 1887 /* 1888 * As the pid is the missing value, we need to fill 1889 * it properly. The header.misc value give us nice hint. 1890 */ 1891 bev.pid = HOST_KERNEL_ID; 1892 if (bev.header.misc == PERF_RECORD_MISC_GUEST_USER || 1893 bev.header.misc == PERF_RECORD_MISC_GUEST_KERNEL) 1894 bev.pid = DEFAULT_GUEST_KERNEL_ID; 1895 1896 memcpy(bev.build_id, old_bev.build_id, sizeof(bev.build_id)); 1897 __event_process_build_id(&bev, filename, session); 1898 1899 offset += bev.header.size; 1900 } 1901 1902 return 0; 1903 } 1904 1905 static int perf_header__read_build_ids(struct perf_header *header, 1906 int input, u64 offset, u64 size) 1907 { 1908 struct perf_session *session = container_of(header, struct perf_session, header); 1909 struct build_id_event bev; 1910 char filename[PATH_MAX]; 1911 u64 limit = offset + size, orig_offset = offset; 1912 int err = -1; 1913 1914 while (offset < limit) { 1915 ssize_t len; 1916 1917 if (readn(input, &bev, sizeof(bev)) != sizeof(bev)) 1918 goto out; 1919 1920 if (header->needs_swap) 1921 perf_event_header__bswap(&bev.header); 1922 1923 len = bev.header.size - sizeof(bev); 1924 if (readn(input, filename, len) != len) 1925 goto out; 1926 /* 1927 * The a1645ce1 changeset: 1928 * 1929 * "perf: 'perf kvm' tool for monitoring guest performance from host" 1930 * 1931 * Added a field to struct build_id_event that broke the file 1932 * format. 1933 * 1934 * Since the kernel build-id is the first entry, process the 1935 * table using the old format if the well known 1936 * '[kernel.kallsyms]' string for the kernel build-id has the 1937 * first 4 characters chopped off (where the pid_t sits). 1938 */ 1939 if (memcmp(filename, "nel.kallsyms]", 13) == 0) { 1940 if (lseek(input, orig_offset, SEEK_SET) == (off_t)-1) 1941 return -1; 1942 return perf_header__read_build_ids_abi_quirk(header, input, offset, size); 1943 } 1944 1945 __event_process_build_id(&bev, filename, session); 1946 1947 offset += bev.header.size; 1948 } 1949 err = 0; 1950 out: 1951 return err; 1952 } 1953 1954 /* Macro for features that simply need to read and store a string. */ 1955 #define FEAT_PROCESS_STR_FUN(__feat, __feat_env) \ 1956 static int process_##__feat(struct feat_fd *ff, void *data __maybe_unused) \ 1957 {\ 1958 ff->ph->env.__feat_env = do_read_string(ff); \ 1959 return ff->ph->env.__feat_env ? 0 : -ENOMEM; \ 1960 } 1961 1962 FEAT_PROCESS_STR_FUN(hostname, hostname); 1963 FEAT_PROCESS_STR_FUN(osrelease, os_release); 1964 FEAT_PROCESS_STR_FUN(version, version); 1965 FEAT_PROCESS_STR_FUN(arch, arch); 1966 FEAT_PROCESS_STR_FUN(cpudesc, cpu_desc); 1967 FEAT_PROCESS_STR_FUN(cpuid, cpuid); 1968 1969 static int process_tracing_data(struct feat_fd *ff, void *data) 1970 { 1971 ssize_t ret = trace_report(ff->fd, data, false); 1972 1973 return ret < 0 ? -1 : 0; 1974 } 1975 1976 static int process_build_id(struct feat_fd *ff, void *data __maybe_unused) 1977 { 1978 if (perf_header__read_build_ids(ff->ph, ff->fd, ff->offset, ff->size)) 1979 pr_debug("Failed to read buildids, continuing...\n"); 1980 return 0; 1981 } 1982 1983 static int process_nrcpus(struct feat_fd *ff, void *data __maybe_unused) 1984 { 1985 int ret; 1986 u32 nr_cpus_avail, nr_cpus_online; 1987 1988 ret = do_read_u32(ff, &nr_cpus_avail); 1989 if (ret) 1990 return ret; 1991 1992 ret = do_read_u32(ff, &nr_cpus_online); 1993 if (ret) 1994 return ret; 1995 ff->ph->env.nr_cpus_avail = (int)nr_cpus_avail; 1996 ff->ph->env.nr_cpus_online = (int)nr_cpus_online; 1997 return 0; 1998 } 1999 2000 static int process_total_mem(struct feat_fd *ff, void *data __maybe_unused) 2001 { 2002 u64 total_mem; 2003 int ret; 2004 2005 ret = do_read_u64(ff, &total_mem); 2006 if (ret) 2007 return -1; 2008 ff->ph->env.total_mem = (unsigned long long)total_mem; 2009 return 0; 2010 } 2011 2012 static struct perf_evsel * 2013 perf_evlist__find_by_index(struct perf_evlist *evlist, int idx) 2014 { 2015 struct perf_evsel *evsel; 2016 2017 evlist__for_each_entry(evlist, evsel) { 2018 if (evsel->idx == idx) 2019 return evsel; 2020 } 2021 2022 return NULL; 2023 } 2024 2025 static void 2026 perf_evlist__set_event_name(struct perf_evlist *evlist, 2027 struct perf_evsel *event) 2028 { 2029 struct perf_evsel *evsel; 2030 2031 if (!event->name) 2032 return; 2033 2034 evsel = perf_evlist__find_by_index(evlist, event->idx); 2035 if (!evsel) 2036 return; 2037 2038 if (evsel->name) 2039 return; 2040 2041 evsel->name = strdup(event->name); 2042 } 2043 2044 static int 2045 process_event_desc(struct feat_fd *ff, void *data __maybe_unused) 2046 { 2047 struct perf_session *session; 2048 struct perf_evsel *evsel, *events = read_event_desc(ff); 2049 2050 if (!events) 2051 return 0; 2052 2053 session = container_of(ff->ph, struct perf_session, header); 2054 2055 if (session->data->is_pipe) { 2056 /* Save events for reading later by print_event_desc, 2057 * since they can't be read again in pipe mode. */ 2058 ff->events = events; 2059 } 2060 2061 for (evsel = events; evsel->attr.size; evsel++) 2062 perf_evlist__set_event_name(session->evlist, evsel); 2063 2064 if (!session->data->is_pipe) 2065 free_event_desc(events); 2066 2067 return 0; 2068 } 2069 2070 static int process_cmdline(struct feat_fd *ff, void *data __maybe_unused) 2071 { 2072 char *str, *cmdline = NULL, **argv = NULL; 2073 u32 nr, i, len = 0; 2074 2075 if (do_read_u32(ff, &nr)) 2076 return -1; 2077 2078 ff->ph->env.nr_cmdline = nr; 2079 2080 cmdline = zalloc(ff->size + nr + 1); 2081 if (!cmdline) 2082 return -1; 2083 2084 argv = zalloc(sizeof(char *) * (nr + 1)); 2085 if (!argv) 2086 goto error; 2087 2088 for (i = 0; i < nr; i++) { 2089 str = do_read_string(ff); 2090 if (!str) 2091 goto error; 2092 2093 argv[i] = cmdline + len; 2094 memcpy(argv[i], str, strlen(str) + 1); 2095 len += strlen(str) + 1; 2096 free(str); 2097 } 2098 ff->ph->env.cmdline = cmdline; 2099 ff->ph->env.cmdline_argv = (const char **) argv; 2100 return 0; 2101 2102 error: 2103 free(argv); 2104 free(cmdline); 2105 return -1; 2106 } 2107 2108 static int process_cpu_topology(struct feat_fd *ff, void *data __maybe_unused) 2109 { 2110 u32 nr, i; 2111 char *str; 2112 struct strbuf sb; 2113 int cpu_nr = ff->ph->env.nr_cpus_avail; 2114 u64 size = 0; 2115 struct perf_header *ph = ff->ph; 2116 2117 ph->env.cpu = calloc(cpu_nr, sizeof(*ph->env.cpu)); 2118 if (!ph->env.cpu) 2119 return -1; 2120 2121 if (do_read_u32(ff, &nr)) 2122 goto free_cpu; 2123 2124 ph->env.nr_sibling_cores = nr; 2125 size += sizeof(u32); 2126 if (strbuf_init(&sb, 128) < 0) 2127 goto free_cpu; 2128 2129 for (i = 0; i < nr; i++) { 2130 str = do_read_string(ff); 2131 if (!str) 2132 goto error; 2133 2134 /* include a NULL character at the end */ 2135 if (strbuf_add(&sb, str, strlen(str) + 1) < 0) 2136 goto error; 2137 size += string_size(str); 2138 free(str); 2139 } 2140 ph->env.sibling_cores = strbuf_detach(&sb, NULL); 2141 2142 if (do_read_u32(ff, &nr)) 2143 return -1; 2144 2145 ph->env.nr_sibling_threads = nr; 2146 size += sizeof(u32); 2147 2148 for (i = 0; i < nr; i++) { 2149 str = do_read_string(ff); 2150 if (!str) 2151 goto error; 2152 2153 /* include a NULL character at the end */ 2154 if (strbuf_add(&sb, str, strlen(str) + 1) < 0) 2155 goto error; 2156 size += string_size(str); 2157 free(str); 2158 } 2159 ph->env.sibling_threads = strbuf_detach(&sb, NULL); 2160 2161 /* 2162 * The header may be from old perf, 2163 * which doesn't include core id and socket id information. 2164 */ 2165 if (ff->size <= size) { 2166 zfree(&ph->env.cpu); 2167 return 0; 2168 } 2169 2170 for (i = 0; i < (u32)cpu_nr; i++) { 2171 if (do_read_u32(ff, &nr)) 2172 goto free_cpu; 2173 2174 ph->env.cpu[i].core_id = nr; 2175 2176 if (do_read_u32(ff, &nr)) 2177 goto free_cpu; 2178 2179 if (nr != (u32)-1 && nr > (u32)cpu_nr) { 2180 pr_debug("socket_id number is too big." 2181 "You may need to upgrade the perf tool.\n"); 2182 goto free_cpu; 2183 } 2184 2185 ph->env.cpu[i].socket_id = nr; 2186 } 2187 2188 return 0; 2189 2190 error: 2191 strbuf_release(&sb); 2192 free_cpu: 2193 zfree(&ph->env.cpu); 2194 return -1; 2195 } 2196 2197 static int process_numa_topology(struct feat_fd *ff, void *data __maybe_unused) 2198 { 2199 struct numa_node *nodes, *n; 2200 u32 nr, i; 2201 char *str; 2202 2203 /* nr nodes */ 2204 if (do_read_u32(ff, &nr)) 2205 return -1; 2206 2207 nodes = zalloc(sizeof(*nodes) * nr); 2208 if (!nodes) 2209 return -ENOMEM; 2210 2211 for (i = 0; i < nr; i++) { 2212 n = &nodes[i]; 2213 2214 /* node number */ 2215 if (do_read_u32(ff, &n->node)) 2216 goto error; 2217 2218 if (do_read_u64(ff, &n->mem_total)) 2219 goto error; 2220 2221 if (do_read_u64(ff, &n->mem_free)) 2222 goto error; 2223 2224 str = do_read_string(ff); 2225 if (!str) 2226 goto error; 2227 2228 n->map = cpu_map__new(str); 2229 if (!n->map) 2230 goto error; 2231 2232 free(str); 2233 } 2234 ff->ph->env.nr_numa_nodes = nr; 2235 ff->ph->env.numa_nodes = nodes; 2236 return 0; 2237 2238 error: 2239 free(nodes); 2240 return -1; 2241 } 2242 2243 static int process_pmu_mappings(struct feat_fd *ff, void *data __maybe_unused) 2244 { 2245 char *name; 2246 u32 pmu_num; 2247 u32 type; 2248 struct strbuf sb; 2249 2250 if (do_read_u32(ff, &pmu_num)) 2251 return -1; 2252 2253 if (!pmu_num) { 2254 pr_debug("pmu mappings not available\n"); 2255 return 0; 2256 } 2257 2258 ff->ph->env.nr_pmu_mappings = pmu_num; 2259 if (strbuf_init(&sb, 128) < 0) 2260 return -1; 2261 2262 while (pmu_num) { 2263 if (do_read_u32(ff, &type)) 2264 goto error; 2265 2266 name = do_read_string(ff); 2267 if (!name) 2268 goto error; 2269 2270 if (strbuf_addf(&sb, "%u:%s", type, name) < 0) 2271 goto error; 2272 /* include a NULL character at the end */ 2273 if (strbuf_add(&sb, "", 1) < 0) 2274 goto error; 2275 2276 if (!strcmp(name, "msr")) 2277 ff->ph->env.msr_pmu_type = type; 2278 2279 free(name); 2280 pmu_num--; 2281 } 2282 ff->ph->env.pmu_mappings = strbuf_detach(&sb, NULL); 2283 return 0; 2284 2285 error: 2286 strbuf_release(&sb); 2287 return -1; 2288 } 2289 2290 static int process_group_desc(struct feat_fd *ff, void *data __maybe_unused) 2291 { 2292 size_t ret = -1; 2293 u32 i, nr, nr_groups; 2294 struct perf_session *session; 2295 struct perf_evsel *evsel, *leader = NULL; 2296 struct group_desc { 2297 char *name; 2298 u32 leader_idx; 2299 u32 nr_members; 2300 } *desc; 2301 2302 if (do_read_u32(ff, &nr_groups)) 2303 return -1; 2304 2305 ff->ph->env.nr_groups = nr_groups; 2306 if (!nr_groups) { 2307 pr_debug("group desc not available\n"); 2308 return 0; 2309 } 2310 2311 desc = calloc(nr_groups, sizeof(*desc)); 2312 if (!desc) 2313 return -1; 2314 2315 for (i = 0; i < nr_groups; i++) { 2316 desc[i].name = do_read_string(ff); 2317 if (!desc[i].name) 2318 goto out_free; 2319 2320 if (do_read_u32(ff, &desc[i].leader_idx)) 2321 goto out_free; 2322 2323 if (do_read_u32(ff, &desc[i].nr_members)) 2324 goto out_free; 2325 } 2326 2327 /* 2328 * Rebuild group relationship based on the group_desc 2329 */ 2330 session = container_of(ff->ph, struct perf_session, header); 2331 session->evlist->nr_groups = nr_groups; 2332 2333 i = nr = 0; 2334 evlist__for_each_entry(session->evlist, evsel) { 2335 if (evsel->idx == (int) desc[i].leader_idx) { 2336 evsel->leader = evsel; 2337 /* {anon_group} is a dummy name */ 2338 if (strcmp(desc[i].name, "{anon_group}")) { 2339 evsel->group_name = desc[i].name; 2340 desc[i].name = NULL; 2341 } 2342 evsel->nr_members = desc[i].nr_members; 2343 2344 if (i >= nr_groups || nr > 0) { 2345 pr_debug("invalid group desc\n"); 2346 goto out_free; 2347 } 2348 2349 leader = evsel; 2350 nr = evsel->nr_members - 1; 2351 i++; 2352 } else if (nr) { 2353 /* This is a group member */ 2354 evsel->leader = leader; 2355 2356 nr--; 2357 } 2358 } 2359 2360 if (i != nr_groups || nr != 0) { 2361 pr_debug("invalid group desc\n"); 2362 goto out_free; 2363 } 2364 2365 ret = 0; 2366 out_free: 2367 for (i = 0; i < nr_groups; i++) 2368 zfree(&desc[i].name); 2369 free(desc); 2370 2371 return ret; 2372 } 2373 2374 static int process_auxtrace(struct feat_fd *ff, void *data __maybe_unused) 2375 { 2376 struct perf_session *session; 2377 int err; 2378 2379 session = container_of(ff->ph, struct perf_session, header); 2380 2381 err = auxtrace_index__process(ff->fd, ff->size, session, 2382 ff->ph->needs_swap); 2383 if (err < 0) 2384 pr_err("Failed to process auxtrace index\n"); 2385 return err; 2386 } 2387 2388 static int process_cache(struct feat_fd *ff, void *data __maybe_unused) 2389 { 2390 struct cpu_cache_level *caches; 2391 u32 cnt, i, version; 2392 2393 if (do_read_u32(ff, &version)) 2394 return -1; 2395 2396 if (version != 1) 2397 return -1; 2398 2399 if (do_read_u32(ff, &cnt)) 2400 return -1; 2401 2402 caches = zalloc(sizeof(*caches) * cnt); 2403 if (!caches) 2404 return -1; 2405 2406 for (i = 0; i < cnt; i++) { 2407 struct cpu_cache_level c; 2408 2409 #define _R(v) \ 2410 if (do_read_u32(ff, &c.v))\ 2411 goto out_free_caches; \ 2412 2413 _R(level) 2414 _R(line_size) 2415 _R(sets) 2416 _R(ways) 2417 #undef _R 2418 2419 #define _R(v) \ 2420 c.v = do_read_string(ff); \ 2421 if (!c.v) \ 2422 goto out_free_caches; 2423 2424 _R(type) 2425 _R(size) 2426 _R(map) 2427 #undef _R 2428 2429 caches[i] = c; 2430 } 2431 2432 ff->ph->env.caches = caches; 2433 ff->ph->env.caches_cnt = cnt; 2434 return 0; 2435 out_free_caches: 2436 free(caches); 2437 return -1; 2438 } 2439 2440 static int process_sample_time(struct feat_fd *ff, void *data __maybe_unused) 2441 { 2442 struct perf_session *session; 2443 u64 first_sample_time, last_sample_time; 2444 int ret; 2445 2446 session = container_of(ff->ph, struct perf_session, header); 2447 2448 ret = do_read_u64(ff, &first_sample_time); 2449 if (ret) 2450 return -1; 2451 2452 ret = do_read_u64(ff, &last_sample_time); 2453 if (ret) 2454 return -1; 2455 2456 session->evlist->first_sample_time = first_sample_time; 2457 session->evlist->last_sample_time = last_sample_time; 2458 return 0; 2459 } 2460 2461 static int process_mem_topology(struct feat_fd *ff, 2462 void *data __maybe_unused) 2463 { 2464 struct memory_node *nodes; 2465 u64 version, i, nr, bsize; 2466 int ret = -1; 2467 2468 if (do_read_u64(ff, &version)) 2469 return -1; 2470 2471 if (version != 1) 2472 return -1; 2473 2474 if (do_read_u64(ff, &bsize)) 2475 return -1; 2476 2477 if (do_read_u64(ff, &nr)) 2478 return -1; 2479 2480 nodes = zalloc(sizeof(*nodes) * nr); 2481 if (!nodes) 2482 return -1; 2483 2484 for (i = 0; i < nr; i++) { 2485 struct memory_node n; 2486 2487 #define _R(v) \ 2488 if (do_read_u64(ff, &n.v)) \ 2489 goto out; \ 2490 2491 _R(node) 2492 _R(size) 2493 2494 #undef _R 2495 2496 if (do_read_bitmap(ff, &n.set, &n.size)) 2497 goto out; 2498 2499 nodes[i] = n; 2500 } 2501 2502 ff->ph->env.memory_bsize = bsize; 2503 ff->ph->env.memory_nodes = nodes; 2504 ff->ph->env.nr_memory_nodes = nr; 2505 ret = 0; 2506 2507 out: 2508 if (ret) 2509 free(nodes); 2510 return ret; 2511 } 2512 2513 struct feature_ops { 2514 int (*write)(struct feat_fd *ff, struct perf_evlist *evlist); 2515 void (*print)(struct feat_fd *ff, FILE *fp); 2516 int (*process)(struct feat_fd *ff, void *data); 2517 const char *name; 2518 bool full_only; 2519 bool synthesize; 2520 }; 2521 2522 #define FEAT_OPR(n, func, __full_only) \ 2523 [HEADER_##n] = { \ 2524 .name = __stringify(n), \ 2525 .write = write_##func, \ 2526 .print = print_##func, \ 2527 .full_only = __full_only, \ 2528 .process = process_##func, \ 2529 .synthesize = true \ 2530 } 2531 2532 #define FEAT_OPN(n, func, __full_only) \ 2533 [HEADER_##n] = { \ 2534 .name = __stringify(n), \ 2535 .write = write_##func, \ 2536 .print = print_##func, \ 2537 .full_only = __full_only, \ 2538 .process = process_##func \ 2539 } 2540 2541 /* feature_ops not implemented: */ 2542 #define print_tracing_data NULL 2543 #define print_build_id NULL 2544 2545 #define process_branch_stack NULL 2546 #define process_stat NULL 2547 2548 2549 static const struct feature_ops feat_ops[HEADER_LAST_FEATURE] = { 2550 FEAT_OPN(TRACING_DATA, tracing_data, false), 2551 FEAT_OPN(BUILD_ID, build_id, false), 2552 FEAT_OPR(HOSTNAME, hostname, false), 2553 FEAT_OPR(OSRELEASE, osrelease, false), 2554 FEAT_OPR(VERSION, version, false), 2555 FEAT_OPR(ARCH, arch, false), 2556 FEAT_OPR(NRCPUS, nrcpus, false), 2557 FEAT_OPR(CPUDESC, cpudesc, false), 2558 FEAT_OPR(CPUID, cpuid, false), 2559 FEAT_OPR(TOTAL_MEM, total_mem, false), 2560 FEAT_OPR(EVENT_DESC, event_desc, false), 2561 FEAT_OPR(CMDLINE, cmdline, false), 2562 FEAT_OPR(CPU_TOPOLOGY, cpu_topology, true), 2563 FEAT_OPR(NUMA_TOPOLOGY, numa_topology, true), 2564 FEAT_OPN(BRANCH_STACK, branch_stack, false), 2565 FEAT_OPR(PMU_MAPPINGS, pmu_mappings, false), 2566 FEAT_OPN(GROUP_DESC, group_desc, false), 2567 FEAT_OPN(AUXTRACE, auxtrace, false), 2568 FEAT_OPN(STAT, stat, false), 2569 FEAT_OPN(CACHE, cache, true), 2570 FEAT_OPR(SAMPLE_TIME, sample_time, false), 2571 FEAT_OPR(MEM_TOPOLOGY, mem_topology, true), 2572 }; 2573 2574 struct header_print_data { 2575 FILE *fp; 2576 bool full; /* extended list of headers */ 2577 }; 2578 2579 static int perf_file_section__fprintf_info(struct perf_file_section *section, 2580 struct perf_header *ph, 2581 int feat, int fd, void *data) 2582 { 2583 struct header_print_data *hd = data; 2584 struct feat_fd ff; 2585 2586 if (lseek(fd, section->offset, SEEK_SET) == (off_t)-1) { 2587 pr_debug("Failed to lseek to %" PRIu64 " offset for feature " 2588 "%d, continuing...\n", section->offset, feat); 2589 return 0; 2590 } 2591 if (feat >= HEADER_LAST_FEATURE) { 2592 pr_warning("unknown feature %d\n", feat); 2593 return 0; 2594 } 2595 if (!feat_ops[feat].print) 2596 return 0; 2597 2598 ff = (struct feat_fd) { 2599 .fd = fd, 2600 .ph = ph, 2601 }; 2602 2603 if (!feat_ops[feat].full_only || hd->full) 2604 feat_ops[feat].print(&ff, hd->fp); 2605 else 2606 fprintf(hd->fp, "# %s info available, use -I to display\n", 2607 feat_ops[feat].name); 2608 2609 return 0; 2610 } 2611 2612 int perf_header__fprintf_info(struct perf_session *session, FILE *fp, bool full) 2613 { 2614 struct header_print_data hd; 2615 struct perf_header *header = &session->header; 2616 int fd = perf_data__fd(session->data); 2617 struct stat st; 2618 int ret, bit; 2619 2620 hd.fp = fp; 2621 hd.full = full; 2622 2623 ret = fstat(fd, &st); 2624 if (ret == -1) 2625 return -1; 2626 2627 fprintf(fp, "# captured on : %s", ctime(&st.st_ctime)); 2628 2629 fprintf(fp, "# header version : %u\n", header->version); 2630 fprintf(fp, "# data offset : %" PRIu64 "\n", header->data_offset); 2631 fprintf(fp, "# data size : %" PRIu64 "\n", header->data_size); 2632 fprintf(fp, "# feat offset : %" PRIu64 "\n", header->feat_offset); 2633 2634 perf_header__process_sections(header, fd, &hd, 2635 perf_file_section__fprintf_info); 2636 2637 if (session->data->is_pipe) 2638 return 0; 2639 2640 fprintf(fp, "# missing features: "); 2641 for_each_clear_bit(bit, header->adds_features, HEADER_LAST_FEATURE) { 2642 if (bit) 2643 fprintf(fp, "%s ", feat_ops[bit].name); 2644 } 2645 2646 fprintf(fp, "\n"); 2647 return 0; 2648 } 2649 2650 static int do_write_feat(struct feat_fd *ff, int type, 2651 struct perf_file_section **p, 2652 struct perf_evlist *evlist) 2653 { 2654 int err; 2655 int ret = 0; 2656 2657 if (perf_header__has_feat(ff->ph, type)) { 2658 if (!feat_ops[type].write) 2659 return -1; 2660 2661 if (WARN(ff->buf, "Error: calling %s in pipe-mode.\n", __func__)) 2662 return -1; 2663 2664 (*p)->offset = lseek(ff->fd, 0, SEEK_CUR); 2665 2666 err = feat_ops[type].write(ff, evlist); 2667 if (err < 0) { 2668 pr_debug("failed to write feature %s\n", feat_ops[type].name); 2669 2670 /* undo anything written */ 2671 lseek(ff->fd, (*p)->offset, SEEK_SET); 2672 2673 return -1; 2674 } 2675 (*p)->size = lseek(ff->fd, 0, SEEK_CUR) - (*p)->offset; 2676 (*p)++; 2677 } 2678 return ret; 2679 } 2680 2681 static int perf_header__adds_write(struct perf_header *header, 2682 struct perf_evlist *evlist, int fd) 2683 { 2684 int nr_sections; 2685 struct feat_fd ff; 2686 struct perf_file_section *feat_sec, *p; 2687 int sec_size; 2688 u64 sec_start; 2689 int feat; 2690 int err; 2691 2692 ff = (struct feat_fd){ 2693 .fd = fd, 2694 .ph = header, 2695 }; 2696 2697 nr_sections = bitmap_weight(header->adds_features, HEADER_FEAT_BITS); 2698 if (!nr_sections) 2699 return 0; 2700 2701 feat_sec = p = calloc(nr_sections, sizeof(*feat_sec)); 2702 if (feat_sec == NULL) 2703 return -ENOMEM; 2704 2705 sec_size = sizeof(*feat_sec) * nr_sections; 2706 2707 sec_start = header->feat_offset; 2708 lseek(fd, sec_start + sec_size, SEEK_SET); 2709 2710 for_each_set_bit(feat, header->adds_features, HEADER_FEAT_BITS) { 2711 if (do_write_feat(&ff, feat, &p, evlist)) 2712 perf_header__clear_feat(header, feat); 2713 } 2714 2715 lseek(fd, sec_start, SEEK_SET); 2716 /* 2717 * may write more than needed due to dropped feature, but 2718 * this is okay, reader will skip the mising entries 2719 */ 2720 err = do_write(&ff, feat_sec, sec_size); 2721 if (err < 0) 2722 pr_debug("failed to write feature section\n"); 2723 free(feat_sec); 2724 return err; 2725 } 2726 2727 int perf_header__write_pipe(int fd) 2728 { 2729 struct perf_pipe_file_header f_header; 2730 struct feat_fd ff; 2731 int err; 2732 2733 ff = (struct feat_fd){ .fd = fd }; 2734 2735 f_header = (struct perf_pipe_file_header){ 2736 .magic = PERF_MAGIC, 2737 .size = sizeof(f_header), 2738 }; 2739 2740 err = do_write(&ff, &f_header, sizeof(f_header)); 2741 if (err < 0) { 2742 pr_debug("failed to write perf pipe header\n"); 2743 return err; 2744 } 2745 2746 return 0; 2747 } 2748 2749 int perf_session__write_header(struct perf_session *session, 2750 struct perf_evlist *evlist, 2751 int fd, bool at_exit) 2752 { 2753 struct perf_file_header f_header; 2754 struct perf_file_attr f_attr; 2755 struct perf_header *header = &session->header; 2756 struct perf_evsel *evsel; 2757 struct feat_fd ff; 2758 u64 attr_offset; 2759 int err; 2760 2761 ff = (struct feat_fd){ .fd = fd}; 2762 lseek(fd, sizeof(f_header), SEEK_SET); 2763 2764 evlist__for_each_entry(session->evlist, evsel) { 2765 evsel->id_offset = lseek(fd, 0, SEEK_CUR); 2766 err = do_write(&ff, evsel->id, evsel->ids * sizeof(u64)); 2767 if (err < 0) { 2768 pr_debug("failed to write perf header\n"); 2769 return err; 2770 } 2771 } 2772 2773 attr_offset = lseek(ff.fd, 0, SEEK_CUR); 2774 2775 evlist__for_each_entry(evlist, evsel) { 2776 f_attr = (struct perf_file_attr){ 2777 .attr = evsel->attr, 2778 .ids = { 2779 .offset = evsel->id_offset, 2780 .size = evsel->ids * sizeof(u64), 2781 } 2782 }; 2783 err = do_write(&ff, &f_attr, sizeof(f_attr)); 2784 if (err < 0) { 2785 pr_debug("failed to write perf header attribute\n"); 2786 return err; 2787 } 2788 } 2789 2790 if (!header->data_offset) 2791 header->data_offset = lseek(fd, 0, SEEK_CUR); 2792 header->feat_offset = header->data_offset + header->data_size; 2793 2794 if (at_exit) { 2795 err = perf_header__adds_write(header, evlist, fd); 2796 if (err < 0) 2797 return err; 2798 } 2799 2800 f_header = (struct perf_file_header){ 2801 .magic = PERF_MAGIC, 2802 .size = sizeof(f_header), 2803 .attr_size = sizeof(f_attr), 2804 .attrs = { 2805 .offset = attr_offset, 2806 .size = evlist->nr_entries * sizeof(f_attr), 2807 }, 2808 .data = { 2809 .offset = header->data_offset, 2810 .size = header->data_size, 2811 }, 2812 /* event_types is ignored, store zeros */ 2813 }; 2814 2815 memcpy(&f_header.adds_features, &header->adds_features, sizeof(header->adds_features)); 2816 2817 lseek(fd, 0, SEEK_SET); 2818 err = do_write(&ff, &f_header, sizeof(f_header)); 2819 if (err < 0) { 2820 pr_debug("failed to write perf header\n"); 2821 return err; 2822 } 2823 lseek(fd, header->data_offset + header->data_size, SEEK_SET); 2824 2825 return 0; 2826 } 2827 2828 static int perf_header__getbuffer64(struct perf_header *header, 2829 int fd, void *buf, size_t size) 2830 { 2831 if (readn(fd, buf, size) <= 0) 2832 return -1; 2833 2834 if (header->needs_swap) 2835 mem_bswap_64(buf, size); 2836 2837 return 0; 2838 } 2839 2840 int perf_header__process_sections(struct perf_header *header, int fd, 2841 void *data, 2842 int (*process)(struct perf_file_section *section, 2843 struct perf_header *ph, 2844 int feat, int fd, void *data)) 2845 { 2846 struct perf_file_section *feat_sec, *sec; 2847 int nr_sections; 2848 int sec_size; 2849 int feat; 2850 int err; 2851 2852 nr_sections = bitmap_weight(header->adds_features, HEADER_FEAT_BITS); 2853 if (!nr_sections) 2854 return 0; 2855 2856 feat_sec = sec = calloc(nr_sections, sizeof(*feat_sec)); 2857 if (!feat_sec) 2858 return -1; 2859 2860 sec_size = sizeof(*feat_sec) * nr_sections; 2861 2862 lseek(fd, header->feat_offset, SEEK_SET); 2863 2864 err = perf_header__getbuffer64(header, fd, feat_sec, sec_size); 2865 if (err < 0) 2866 goto out_free; 2867 2868 for_each_set_bit(feat, header->adds_features, HEADER_LAST_FEATURE) { 2869 err = process(sec++, header, feat, fd, data); 2870 if (err < 0) 2871 goto out_free; 2872 } 2873 err = 0; 2874 out_free: 2875 free(feat_sec); 2876 return err; 2877 } 2878 2879 static const int attr_file_abi_sizes[] = { 2880 [0] = PERF_ATTR_SIZE_VER0, 2881 [1] = PERF_ATTR_SIZE_VER1, 2882 [2] = PERF_ATTR_SIZE_VER2, 2883 [3] = PERF_ATTR_SIZE_VER3, 2884 [4] = PERF_ATTR_SIZE_VER4, 2885 0, 2886 }; 2887 2888 /* 2889 * In the legacy file format, the magic number is not used to encode endianness. 2890 * hdr_sz was used to encode endianness. But given that hdr_sz can vary based 2891 * on ABI revisions, we need to try all combinations for all endianness to 2892 * detect the endianness. 2893 */ 2894 static int try_all_file_abis(uint64_t hdr_sz, struct perf_header *ph) 2895 { 2896 uint64_t ref_size, attr_size; 2897 int i; 2898 2899 for (i = 0 ; attr_file_abi_sizes[i]; i++) { 2900 ref_size = attr_file_abi_sizes[i] 2901 + sizeof(struct perf_file_section); 2902 if (hdr_sz != ref_size) { 2903 attr_size = bswap_64(hdr_sz); 2904 if (attr_size != ref_size) 2905 continue; 2906 2907 ph->needs_swap = true; 2908 } 2909 pr_debug("ABI%d perf.data file detected, need_swap=%d\n", 2910 i, 2911 ph->needs_swap); 2912 return 0; 2913 } 2914 /* could not determine endianness */ 2915 return -1; 2916 } 2917 2918 #define PERF_PIPE_HDR_VER0 16 2919 2920 static const size_t attr_pipe_abi_sizes[] = { 2921 [0] = PERF_PIPE_HDR_VER0, 2922 0, 2923 }; 2924 2925 /* 2926 * In the legacy pipe format, there is an implicit assumption that endiannesss 2927 * between host recording the samples, and host parsing the samples is the 2928 * same. This is not always the case given that the pipe output may always be 2929 * redirected into a file and analyzed on a different machine with possibly a 2930 * different endianness and perf_event ABI revsions in the perf tool itself. 2931 */ 2932 static int try_all_pipe_abis(uint64_t hdr_sz, struct perf_header *ph) 2933 { 2934 u64 attr_size; 2935 int i; 2936 2937 for (i = 0 ; attr_pipe_abi_sizes[i]; i++) { 2938 if (hdr_sz != attr_pipe_abi_sizes[i]) { 2939 attr_size = bswap_64(hdr_sz); 2940 if (attr_size != hdr_sz) 2941 continue; 2942 2943 ph->needs_swap = true; 2944 } 2945 pr_debug("Pipe ABI%d perf.data file detected\n", i); 2946 return 0; 2947 } 2948 return -1; 2949 } 2950 2951 bool is_perf_magic(u64 magic) 2952 { 2953 if (!memcmp(&magic, __perf_magic1, sizeof(magic)) 2954 || magic == __perf_magic2 2955 || magic == __perf_magic2_sw) 2956 return true; 2957 2958 return false; 2959 } 2960 2961 static int check_magic_endian(u64 magic, uint64_t hdr_sz, 2962 bool is_pipe, struct perf_header *ph) 2963 { 2964 int ret; 2965 2966 /* check for legacy format */ 2967 ret = memcmp(&magic, __perf_magic1, sizeof(magic)); 2968 if (ret == 0) { 2969 ph->version = PERF_HEADER_VERSION_1; 2970 pr_debug("legacy perf.data format\n"); 2971 if (is_pipe) 2972 return try_all_pipe_abis(hdr_sz, ph); 2973 2974 return try_all_file_abis(hdr_sz, ph); 2975 } 2976 /* 2977 * the new magic number serves two purposes: 2978 * - unique number to identify actual perf.data files 2979 * - encode endianness of file 2980 */ 2981 ph->version = PERF_HEADER_VERSION_2; 2982 2983 /* check magic number with one endianness */ 2984 if (magic == __perf_magic2) 2985 return 0; 2986 2987 /* check magic number with opposite endianness */ 2988 if (magic != __perf_magic2_sw) 2989 return -1; 2990 2991 ph->needs_swap = true; 2992 2993 return 0; 2994 } 2995 2996 int perf_file_header__read(struct perf_file_header *header, 2997 struct perf_header *ph, int fd) 2998 { 2999 ssize_t ret; 3000 3001 lseek(fd, 0, SEEK_SET); 3002 3003 ret = readn(fd, header, sizeof(*header)); 3004 if (ret <= 0) 3005 return -1; 3006 3007 if (check_magic_endian(header->magic, 3008 header->attr_size, false, ph) < 0) { 3009 pr_debug("magic/endian check failed\n"); 3010 return -1; 3011 } 3012 3013 if (ph->needs_swap) { 3014 mem_bswap_64(header, offsetof(struct perf_file_header, 3015 adds_features)); 3016 } 3017 3018 if (header->size != sizeof(*header)) { 3019 /* Support the previous format */ 3020 if (header->size == offsetof(typeof(*header), adds_features)) 3021 bitmap_zero(header->adds_features, HEADER_FEAT_BITS); 3022 else 3023 return -1; 3024 } else if (ph->needs_swap) { 3025 /* 3026 * feature bitmap is declared as an array of unsigned longs -- 3027 * not good since its size can differ between the host that 3028 * generated the data file and the host analyzing the file. 3029 * 3030 * We need to handle endianness, but we don't know the size of 3031 * the unsigned long where the file was generated. Take a best 3032 * guess at determining it: try 64-bit swap first (ie., file 3033 * created on a 64-bit host), and check if the hostname feature 3034 * bit is set (this feature bit is forced on as of fbe96f2). 3035 * If the bit is not, undo the 64-bit swap and try a 32-bit 3036 * swap. If the hostname bit is still not set (e.g., older data 3037 * file), punt and fallback to the original behavior -- 3038 * clearing all feature bits and setting buildid. 3039 */ 3040 mem_bswap_64(&header->adds_features, 3041 BITS_TO_U64(HEADER_FEAT_BITS)); 3042 3043 if (!test_bit(HEADER_HOSTNAME, header->adds_features)) { 3044 /* unswap as u64 */ 3045 mem_bswap_64(&header->adds_features, 3046 BITS_TO_U64(HEADER_FEAT_BITS)); 3047 3048 /* unswap as u32 */ 3049 mem_bswap_32(&header->adds_features, 3050 BITS_TO_U32(HEADER_FEAT_BITS)); 3051 } 3052 3053 if (!test_bit(HEADER_HOSTNAME, header->adds_features)) { 3054 bitmap_zero(header->adds_features, HEADER_FEAT_BITS); 3055 set_bit(HEADER_BUILD_ID, header->adds_features); 3056 } 3057 } 3058 3059 memcpy(&ph->adds_features, &header->adds_features, 3060 sizeof(ph->adds_features)); 3061 3062 ph->data_offset = header->data.offset; 3063 ph->data_size = header->data.size; 3064 ph->feat_offset = header->data.offset + header->data.size; 3065 return 0; 3066 } 3067 3068 static int perf_file_section__process(struct perf_file_section *section, 3069 struct perf_header *ph, 3070 int feat, int fd, void *data) 3071 { 3072 struct feat_fd fdd = { 3073 .fd = fd, 3074 .ph = ph, 3075 .size = section->size, 3076 .offset = section->offset, 3077 }; 3078 3079 if (lseek(fd, section->offset, SEEK_SET) == (off_t)-1) { 3080 pr_debug("Failed to lseek to %" PRIu64 " offset for feature " 3081 "%d, continuing...\n", section->offset, feat); 3082 return 0; 3083 } 3084 3085 if (feat >= HEADER_LAST_FEATURE) { 3086 pr_debug("unknown feature %d, continuing...\n", feat); 3087 return 0; 3088 } 3089 3090 if (!feat_ops[feat].process) 3091 return 0; 3092 3093 return feat_ops[feat].process(&fdd, data); 3094 } 3095 3096 static int perf_file_header__read_pipe(struct perf_pipe_file_header *header, 3097 struct perf_header *ph, int fd, 3098 bool repipe) 3099 { 3100 struct feat_fd ff = { 3101 .fd = STDOUT_FILENO, 3102 .ph = ph, 3103 }; 3104 ssize_t ret; 3105 3106 ret = readn(fd, header, sizeof(*header)); 3107 if (ret <= 0) 3108 return -1; 3109 3110 if (check_magic_endian(header->magic, header->size, true, ph) < 0) { 3111 pr_debug("endian/magic failed\n"); 3112 return -1; 3113 } 3114 3115 if (ph->needs_swap) 3116 header->size = bswap_64(header->size); 3117 3118 if (repipe && do_write(&ff, header, sizeof(*header)) < 0) 3119 return -1; 3120 3121 return 0; 3122 } 3123 3124 static int perf_header__read_pipe(struct perf_session *session) 3125 { 3126 struct perf_header *header = &session->header; 3127 struct perf_pipe_file_header f_header; 3128 3129 if (perf_file_header__read_pipe(&f_header, header, 3130 perf_data__fd(session->data), 3131 session->repipe) < 0) { 3132 pr_debug("incompatible file format\n"); 3133 return -EINVAL; 3134 } 3135 3136 return 0; 3137 } 3138 3139 static int read_attr(int fd, struct perf_header *ph, 3140 struct perf_file_attr *f_attr) 3141 { 3142 struct perf_event_attr *attr = &f_attr->attr; 3143 size_t sz, left; 3144 size_t our_sz = sizeof(f_attr->attr); 3145 ssize_t ret; 3146 3147 memset(f_attr, 0, sizeof(*f_attr)); 3148 3149 /* read minimal guaranteed structure */ 3150 ret = readn(fd, attr, PERF_ATTR_SIZE_VER0); 3151 if (ret <= 0) { 3152 pr_debug("cannot read %d bytes of header attr\n", 3153 PERF_ATTR_SIZE_VER0); 3154 return -1; 3155 } 3156 3157 /* on file perf_event_attr size */ 3158 sz = attr->size; 3159 3160 if (ph->needs_swap) 3161 sz = bswap_32(sz); 3162 3163 if (sz == 0) { 3164 /* assume ABI0 */ 3165 sz = PERF_ATTR_SIZE_VER0; 3166 } else if (sz > our_sz) { 3167 pr_debug("file uses a more recent and unsupported ABI" 3168 " (%zu bytes extra)\n", sz - our_sz); 3169 return -1; 3170 } 3171 /* what we have not yet read and that we know about */ 3172 left = sz - PERF_ATTR_SIZE_VER0; 3173 if (left) { 3174 void *ptr = attr; 3175 ptr += PERF_ATTR_SIZE_VER0; 3176 3177 ret = readn(fd, ptr, left); 3178 } 3179 /* read perf_file_section, ids are read in caller */ 3180 ret = readn(fd, &f_attr->ids, sizeof(f_attr->ids)); 3181 3182 return ret <= 0 ? -1 : 0; 3183 } 3184 3185 static int perf_evsel__prepare_tracepoint_event(struct perf_evsel *evsel, 3186 struct pevent *pevent) 3187 { 3188 struct event_format *event; 3189 char bf[128]; 3190 3191 /* already prepared */ 3192 if (evsel->tp_format) 3193 return 0; 3194 3195 if (pevent == NULL) { 3196 pr_debug("broken or missing trace data\n"); 3197 return -1; 3198 } 3199 3200 event = pevent_find_event(pevent, evsel->attr.config); 3201 if (event == NULL) { 3202 pr_debug("cannot find event format for %d\n", (int)evsel->attr.config); 3203 return -1; 3204 } 3205 3206 if (!evsel->name) { 3207 snprintf(bf, sizeof(bf), "%s:%s", event->system, event->name); 3208 evsel->name = strdup(bf); 3209 if (evsel->name == NULL) 3210 return -1; 3211 } 3212 3213 evsel->tp_format = event; 3214 return 0; 3215 } 3216 3217 static int perf_evlist__prepare_tracepoint_events(struct perf_evlist *evlist, 3218 struct pevent *pevent) 3219 { 3220 struct perf_evsel *pos; 3221 3222 evlist__for_each_entry(evlist, pos) { 3223 if (pos->attr.type == PERF_TYPE_TRACEPOINT && 3224 perf_evsel__prepare_tracepoint_event(pos, pevent)) 3225 return -1; 3226 } 3227 3228 return 0; 3229 } 3230 3231 int perf_session__read_header(struct perf_session *session) 3232 { 3233 struct perf_data *data = session->data; 3234 struct perf_header *header = &session->header; 3235 struct perf_file_header f_header; 3236 struct perf_file_attr f_attr; 3237 u64 f_id; 3238 int nr_attrs, nr_ids, i, j; 3239 int fd = perf_data__fd(data); 3240 3241 session->evlist = perf_evlist__new(); 3242 if (session->evlist == NULL) 3243 return -ENOMEM; 3244 3245 session->evlist->env = &header->env; 3246 session->machines.host.env = &header->env; 3247 if (perf_data__is_pipe(data)) 3248 return perf_header__read_pipe(session); 3249 3250 if (perf_file_header__read(&f_header, header, fd) < 0) 3251 return -EINVAL; 3252 3253 /* 3254 * Sanity check that perf.data was written cleanly; data size is 3255 * initialized to 0 and updated only if the on_exit function is run. 3256 * If data size is still 0 then the file contains only partial 3257 * information. Just warn user and process it as much as it can. 3258 */ 3259 if (f_header.data.size == 0) { 3260 pr_warning("WARNING: The %s file's data size field is 0 which is unexpected.\n" 3261 "Was the 'perf record' command properly terminated?\n", 3262 data->file.path); 3263 } 3264 3265 nr_attrs = f_header.attrs.size / f_header.attr_size; 3266 lseek(fd, f_header.attrs.offset, SEEK_SET); 3267 3268 for (i = 0; i < nr_attrs; i++) { 3269 struct perf_evsel *evsel; 3270 off_t tmp; 3271 3272 if (read_attr(fd, header, &f_attr) < 0) 3273 goto out_errno; 3274 3275 if (header->needs_swap) { 3276 f_attr.ids.size = bswap_64(f_attr.ids.size); 3277 f_attr.ids.offset = bswap_64(f_attr.ids.offset); 3278 perf_event__attr_swap(&f_attr.attr); 3279 } 3280 3281 tmp = lseek(fd, 0, SEEK_CUR); 3282 evsel = perf_evsel__new(&f_attr.attr); 3283 3284 if (evsel == NULL) 3285 goto out_delete_evlist; 3286 3287 evsel->needs_swap = header->needs_swap; 3288 /* 3289 * Do it before so that if perf_evsel__alloc_id fails, this 3290 * entry gets purged too at perf_evlist__delete(). 3291 */ 3292 perf_evlist__add(session->evlist, evsel); 3293 3294 nr_ids = f_attr.ids.size / sizeof(u64); 3295 /* 3296 * We don't have the cpu and thread maps on the header, so 3297 * for allocating the perf_sample_id table we fake 1 cpu and 3298 * hattr->ids threads. 3299 */ 3300 if (perf_evsel__alloc_id(evsel, 1, nr_ids)) 3301 goto out_delete_evlist; 3302 3303 lseek(fd, f_attr.ids.offset, SEEK_SET); 3304 3305 for (j = 0; j < nr_ids; j++) { 3306 if (perf_header__getbuffer64(header, fd, &f_id, sizeof(f_id))) 3307 goto out_errno; 3308 3309 perf_evlist__id_add(session->evlist, evsel, 0, j, f_id); 3310 } 3311 3312 lseek(fd, tmp, SEEK_SET); 3313 } 3314 3315 symbol_conf.nr_events = nr_attrs; 3316 3317 perf_header__process_sections(header, fd, &session->tevent, 3318 perf_file_section__process); 3319 3320 if (perf_evlist__prepare_tracepoint_events(session->evlist, 3321 session->tevent.pevent)) 3322 goto out_delete_evlist; 3323 3324 return 0; 3325 out_errno: 3326 return -errno; 3327 3328 out_delete_evlist: 3329 perf_evlist__delete(session->evlist); 3330 session->evlist = NULL; 3331 return -ENOMEM; 3332 } 3333 3334 int perf_event__synthesize_attr(struct perf_tool *tool, 3335 struct perf_event_attr *attr, u32 ids, u64 *id, 3336 perf_event__handler_t process) 3337 { 3338 union perf_event *ev; 3339 size_t size; 3340 int err; 3341 3342 size = sizeof(struct perf_event_attr); 3343 size = PERF_ALIGN(size, sizeof(u64)); 3344 size += sizeof(struct perf_event_header); 3345 size += ids * sizeof(u64); 3346 3347 ev = malloc(size); 3348 3349 if (ev == NULL) 3350 return -ENOMEM; 3351 3352 ev->attr.attr = *attr; 3353 memcpy(ev->attr.id, id, ids * sizeof(u64)); 3354 3355 ev->attr.header.type = PERF_RECORD_HEADER_ATTR; 3356 ev->attr.header.size = (u16)size; 3357 3358 if (ev->attr.header.size == size) 3359 err = process(tool, ev, NULL, NULL); 3360 else 3361 err = -E2BIG; 3362 3363 free(ev); 3364 3365 return err; 3366 } 3367 3368 int perf_event__synthesize_features(struct perf_tool *tool, 3369 struct perf_session *session, 3370 struct perf_evlist *evlist, 3371 perf_event__handler_t process) 3372 { 3373 struct perf_header *header = &session->header; 3374 struct feat_fd ff; 3375 struct feature_event *fe; 3376 size_t sz, sz_hdr; 3377 int feat, ret; 3378 3379 sz_hdr = sizeof(fe->header); 3380 sz = sizeof(union perf_event); 3381 /* get a nice alignment */ 3382 sz = PERF_ALIGN(sz, page_size); 3383 3384 memset(&ff, 0, sizeof(ff)); 3385 3386 ff.buf = malloc(sz); 3387 if (!ff.buf) 3388 return -ENOMEM; 3389 3390 ff.size = sz - sz_hdr; 3391 3392 for_each_set_bit(feat, header->adds_features, HEADER_FEAT_BITS) { 3393 if (!feat_ops[feat].synthesize) { 3394 pr_debug("No record header feature for header :%d\n", feat); 3395 continue; 3396 } 3397 3398 ff.offset = sizeof(*fe); 3399 3400 ret = feat_ops[feat].write(&ff, evlist); 3401 if (ret || ff.offset <= (ssize_t)sizeof(*fe)) { 3402 pr_debug("Error writing feature\n"); 3403 continue; 3404 } 3405 /* ff.buf may have changed due to realloc in do_write() */ 3406 fe = ff.buf; 3407 memset(fe, 0, sizeof(*fe)); 3408 3409 fe->feat_id = feat; 3410 fe->header.type = PERF_RECORD_HEADER_FEATURE; 3411 fe->header.size = ff.offset; 3412 3413 ret = process(tool, ff.buf, NULL, NULL); 3414 if (ret) { 3415 free(ff.buf); 3416 return ret; 3417 } 3418 } 3419 3420 /* Send HEADER_LAST_FEATURE mark. */ 3421 fe = ff.buf; 3422 fe->feat_id = HEADER_LAST_FEATURE; 3423 fe->header.type = PERF_RECORD_HEADER_FEATURE; 3424 fe->header.size = sizeof(*fe); 3425 3426 ret = process(tool, ff.buf, NULL, NULL); 3427 3428 free(ff.buf); 3429 return ret; 3430 } 3431 3432 int perf_event__process_feature(struct perf_tool *tool, 3433 union perf_event *event, 3434 struct perf_session *session __maybe_unused) 3435 { 3436 struct feat_fd ff = { .fd = 0 }; 3437 struct feature_event *fe = (struct feature_event *)event; 3438 int type = fe->header.type; 3439 u64 feat = fe->feat_id; 3440 3441 if (type < 0 || type >= PERF_RECORD_HEADER_MAX) { 3442 pr_warning("invalid record type %d in pipe-mode\n", type); 3443 return 0; 3444 } 3445 if (feat == HEADER_RESERVED || feat > HEADER_LAST_FEATURE) { 3446 pr_warning("invalid record type %d in pipe-mode\n", type); 3447 return -1; 3448 } 3449 3450 if (!feat_ops[feat].process) 3451 return 0; 3452 3453 ff.buf = (void *)fe->data; 3454 ff.size = event->header.size - sizeof(event->header); 3455 ff.ph = &session->header; 3456 3457 if (feat_ops[feat].process(&ff, NULL)) 3458 return -1; 3459 3460 if (!feat_ops[feat].print || !tool->show_feat_hdr) 3461 return 0; 3462 3463 if (!feat_ops[feat].full_only || 3464 tool->show_feat_hdr >= SHOW_FEAT_HEADER_FULL_INFO) { 3465 feat_ops[feat].print(&ff, stdout); 3466 } else { 3467 fprintf(stdout, "# %s info available, use -I to display\n", 3468 feat_ops[feat].name); 3469 } 3470 3471 return 0; 3472 } 3473 3474 static struct event_update_event * 3475 event_update_event__new(size_t size, u64 type, u64 id) 3476 { 3477 struct event_update_event *ev; 3478 3479 size += sizeof(*ev); 3480 size = PERF_ALIGN(size, sizeof(u64)); 3481 3482 ev = zalloc(size); 3483 if (ev) { 3484 ev->header.type = PERF_RECORD_EVENT_UPDATE; 3485 ev->header.size = (u16)size; 3486 ev->type = type; 3487 ev->id = id; 3488 } 3489 return ev; 3490 } 3491 3492 int 3493 perf_event__synthesize_event_update_unit(struct perf_tool *tool, 3494 struct perf_evsel *evsel, 3495 perf_event__handler_t process) 3496 { 3497 struct event_update_event *ev; 3498 size_t size = strlen(evsel->unit); 3499 int err; 3500 3501 ev = event_update_event__new(size + 1, PERF_EVENT_UPDATE__UNIT, evsel->id[0]); 3502 if (ev == NULL) 3503 return -ENOMEM; 3504 3505 strncpy(ev->data, evsel->unit, size); 3506 err = process(tool, (union perf_event *)ev, NULL, NULL); 3507 free(ev); 3508 return err; 3509 } 3510 3511 int 3512 perf_event__synthesize_event_update_scale(struct perf_tool *tool, 3513 struct perf_evsel *evsel, 3514 perf_event__handler_t process) 3515 { 3516 struct event_update_event *ev; 3517 struct event_update_event_scale *ev_data; 3518 int err; 3519 3520 ev = event_update_event__new(sizeof(*ev_data), PERF_EVENT_UPDATE__SCALE, evsel->id[0]); 3521 if (ev == NULL) 3522 return -ENOMEM; 3523 3524 ev_data = (struct event_update_event_scale *) ev->data; 3525 ev_data->scale = evsel->scale; 3526 err = process(tool, (union perf_event*) ev, NULL, NULL); 3527 free(ev); 3528 return err; 3529 } 3530 3531 int 3532 perf_event__synthesize_event_update_name(struct perf_tool *tool, 3533 struct perf_evsel *evsel, 3534 perf_event__handler_t process) 3535 { 3536 struct event_update_event *ev; 3537 size_t len = strlen(evsel->name); 3538 int err; 3539 3540 ev = event_update_event__new(len + 1, PERF_EVENT_UPDATE__NAME, evsel->id[0]); 3541 if (ev == NULL) 3542 return -ENOMEM; 3543 3544 strncpy(ev->data, evsel->name, len); 3545 err = process(tool, (union perf_event*) ev, NULL, NULL); 3546 free(ev); 3547 return err; 3548 } 3549 3550 int 3551 perf_event__synthesize_event_update_cpus(struct perf_tool *tool, 3552 struct perf_evsel *evsel, 3553 perf_event__handler_t process) 3554 { 3555 size_t size = sizeof(struct event_update_event); 3556 struct event_update_event *ev; 3557 int max, err; 3558 u16 type; 3559 3560 if (!evsel->own_cpus) 3561 return 0; 3562 3563 ev = cpu_map_data__alloc(evsel->own_cpus, &size, &type, &max); 3564 if (!ev) 3565 return -ENOMEM; 3566 3567 ev->header.type = PERF_RECORD_EVENT_UPDATE; 3568 ev->header.size = (u16)size; 3569 ev->type = PERF_EVENT_UPDATE__CPUS; 3570 ev->id = evsel->id[0]; 3571 3572 cpu_map_data__synthesize((struct cpu_map_data *) ev->data, 3573 evsel->own_cpus, 3574 type, max); 3575 3576 err = process(tool, (union perf_event*) ev, NULL, NULL); 3577 free(ev); 3578 return err; 3579 } 3580 3581 size_t perf_event__fprintf_event_update(union perf_event *event, FILE *fp) 3582 { 3583 struct event_update_event *ev = &event->event_update; 3584 struct event_update_event_scale *ev_scale; 3585 struct event_update_event_cpus *ev_cpus; 3586 struct cpu_map *map; 3587 size_t ret; 3588 3589 ret = fprintf(fp, "\n... id: %" PRIu64 "\n", ev->id); 3590 3591 switch (ev->type) { 3592 case PERF_EVENT_UPDATE__SCALE: 3593 ev_scale = (struct event_update_event_scale *) ev->data; 3594 ret += fprintf(fp, "... scale: %f\n", ev_scale->scale); 3595 break; 3596 case PERF_EVENT_UPDATE__UNIT: 3597 ret += fprintf(fp, "... unit: %s\n", ev->data); 3598 break; 3599 case PERF_EVENT_UPDATE__NAME: 3600 ret += fprintf(fp, "... name: %s\n", ev->data); 3601 break; 3602 case PERF_EVENT_UPDATE__CPUS: 3603 ev_cpus = (struct event_update_event_cpus *) ev->data; 3604 ret += fprintf(fp, "... "); 3605 3606 map = cpu_map__new_data(&ev_cpus->cpus); 3607 if (map) 3608 ret += cpu_map__fprintf(map, fp); 3609 else 3610 ret += fprintf(fp, "failed to get cpus\n"); 3611 break; 3612 default: 3613 ret += fprintf(fp, "... unknown type\n"); 3614 break; 3615 } 3616 3617 return ret; 3618 } 3619 3620 int perf_event__synthesize_attrs(struct perf_tool *tool, 3621 struct perf_session *session, 3622 perf_event__handler_t process) 3623 { 3624 struct perf_evsel *evsel; 3625 int err = 0; 3626 3627 evlist__for_each_entry(session->evlist, evsel) { 3628 err = perf_event__synthesize_attr(tool, &evsel->attr, evsel->ids, 3629 evsel->id, process); 3630 if (err) { 3631 pr_debug("failed to create perf header attribute\n"); 3632 return err; 3633 } 3634 } 3635 3636 return err; 3637 } 3638 3639 static bool has_unit(struct perf_evsel *counter) 3640 { 3641 return counter->unit && *counter->unit; 3642 } 3643 3644 static bool has_scale(struct perf_evsel *counter) 3645 { 3646 return counter->scale != 1; 3647 } 3648 3649 int perf_event__synthesize_extra_attr(struct perf_tool *tool, 3650 struct perf_evlist *evsel_list, 3651 perf_event__handler_t process, 3652 bool is_pipe) 3653 { 3654 struct perf_evsel *counter; 3655 int err; 3656 3657 /* 3658 * Synthesize other events stuff not carried within 3659 * attr event - unit, scale, name 3660 */ 3661 evlist__for_each_entry(evsel_list, counter) { 3662 if (!counter->supported) 3663 continue; 3664 3665 /* 3666 * Synthesize unit and scale only if it's defined. 3667 */ 3668 if (has_unit(counter)) { 3669 err = perf_event__synthesize_event_update_unit(tool, counter, process); 3670 if (err < 0) { 3671 pr_err("Couldn't synthesize evsel unit.\n"); 3672 return err; 3673 } 3674 } 3675 3676 if (has_scale(counter)) { 3677 err = perf_event__synthesize_event_update_scale(tool, counter, process); 3678 if (err < 0) { 3679 pr_err("Couldn't synthesize evsel counter.\n"); 3680 return err; 3681 } 3682 } 3683 3684 if (counter->own_cpus) { 3685 err = perf_event__synthesize_event_update_cpus(tool, counter, process); 3686 if (err < 0) { 3687 pr_err("Couldn't synthesize evsel cpus.\n"); 3688 return err; 3689 } 3690 } 3691 3692 /* 3693 * Name is needed only for pipe output, 3694 * perf.data carries event names. 3695 */ 3696 if (is_pipe) { 3697 err = perf_event__synthesize_event_update_name(tool, counter, process); 3698 if (err < 0) { 3699 pr_err("Couldn't synthesize evsel name.\n"); 3700 return err; 3701 } 3702 } 3703 } 3704 return 0; 3705 } 3706 3707 int perf_event__process_attr(struct perf_tool *tool __maybe_unused, 3708 union perf_event *event, 3709 struct perf_evlist **pevlist) 3710 { 3711 u32 i, ids, n_ids; 3712 struct perf_evsel *evsel; 3713 struct perf_evlist *evlist = *pevlist; 3714 3715 if (evlist == NULL) { 3716 *pevlist = evlist = perf_evlist__new(); 3717 if (evlist == NULL) 3718 return -ENOMEM; 3719 } 3720 3721 evsel = perf_evsel__new(&event->attr.attr); 3722 if (evsel == NULL) 3723 return -ENOMEM; 3724 3725 perf_evlist__add(evlist, evsel); 3726 3727 ids = event->header.size; 3728 ids -= (void *)&event->attr.id - (void *)event; 3729 n_ids = ids / sizeof(u64); 3730 /* 3731 * We don't have the cpu and thread maps on the header, so 3732 * for allocating the perf_sample_id table we fake 1 cpu and 3733 * hattr->ids threads. 3734 */ 3735 if (perf_evsel__alloc_id(evsel, 1, n_ids)) 3736 return -ENOMEM; 3737 3738 for (i = 0; i < n_ids; i++) { 3739 perf_evlist__id_add(evlist, evsel, 0, i, event->attr.id[i]); 3740 } 3741 3742 symbol_conf.nr_events = evlist->nr_entries; 3743 3744 return 0; 3745 } 3746 3747 int perf_event__process_event_update(struct perf_tool *tool __maybe_unused, 3748 union perf_event *event, 3749 struct perf_evlist **pevlist) 3750 { 3751 struct event_update_event *ev = &event->event_update; 3752 struct event_update_event_scale *ev_scale; 3753 struct event_update_event_cpus *ev_cpus; 3754 struct perf_evlist *evlist; 3755 struct perf_evsel *evsel; 3756 struct cpu_map *map; 3757 3758 if (!pevlist || *pevlist == NULL) 3759 return -EINVAL; 3760 3761 evlist = *pevlist; 3762 3763 evsel = perf_evlist__id2evsel(evlist, ev->id); 3764 if (evsel == NULL) 3765 return -EINVAL; 3766 3767 switch (ev->type) { 3768 case PERF_EVENT_UPDATE__UNIT: 3769 evsel->unit = strdup(ev->data); 3770 break; 3771 case PERF_EVENT_UPDATE__NAME: 3772 evsel->name = strdup(ev->data); 3773 break; 3774 case PERF_EVENT_UPDATE__SCALE: 3775 ev_scale = (struct event_update_event_scale *) ev->data; 3776 evsel->scale = ev_scale->scale; 3777 break; 3778 case PERF_EVENT_UPDATE__CPUS: 3779 ev_cpus = (struct event_update_event_cpus *) ev->data; 3780 3781 map = cpu_map__new_data(&ev_cpus->cpus); 3782 if (map) 3783 evsel->own_cpus = map; 3784 else 3785 pr_err("failed to get event_update cpus\n"); 3786 default: 3787 break; 3788 } 3789 3790 return 0; 3791 } 3792 3793 int perf_event__synthesize_tracing_data(struct perf_tool *tool, int fd, 3794 struct perf_evlist *evlist, 3795 perf_event__handler_t process) 3796 { 3797 union perf_event ev; 3798 struct tracing_data *tdata; 3799 ssize_t size = 0, aligned_size = 0, padding; 3800 struct feat_fd ff; 3801 int err __maybe_unused = 0; 3802 3803 /* 3804 * We are going to store the size of the data followed 3805 * by the data contents. Since the fd descriptor is a pipe, 3806 * we cannot seek back to store the size of the data once 3807 * we know it. Instead we: 3808 * 3809 * - write the tracing data to the temp file 3810 * - get/write the data size to pipe 3811 * - write the tracing data from the temp file 3812 * to the pipe 3813 */ 3814 tdata = tracing_data_get(&evlist->entries, fd, true); 3815 if (!tdata) 3816 return -1; 3817 3818 memset(&ev, 0, sizeof(ev)); 3819 3820 ev.tracing_data.header.type = PERF_RECORD_HEADER_TRACING_DATA; 3821 size = tdata->size; 3822 aligned_size = PERF_ALIGN(size, sizeof(u64)); 3823 padding = aligned_size - size; 3824 ev.tracing_data.header.size = sizeof(ev.tracing_data); 3825 ev.tracing_data.size = aligned_size; 3826 3827 process(tool, &ev, NULL, NULL); 3828 3829 /* 3830 * The put function will copy all the tracing data 3831 * stored in temp file to the pipe. 3832 */ 3833 tracing_data_put(tdata); 3834 3835 ff = (struct feat_fd){ .fd = fd }; 3836 if (write_padded(&ff, NULL, 0, padding)) 3837 return -1; 3838 3839 return aligned_size; 3840 } 3841 3842 int perf_event__process_tracing_data(struct perf_tool *tool __maybe_unused, 3843 union perf_event *event, 3844 struct perf_session *session) 3845 { 3846 ssize_t size_read, padding, size = event->tracing_data.size; 3847 int fd = perf_data__fd(session->data); 3848 off_t offset = lseek(fd, 0, SEEK_CUR); 3849 char buf[BUFSIZ]; 3850 3851 /* setup for reading amidst mmap */ 3852 lseek(fd, offset + sizeof(struct tracing_data_event), 3853 SEEK_SET); 3854 3855 size_read = trace_report(fd, &session->tevent, 3856 session->repipe); 3857 padding = PERF_ALIGN(size_read, sizeof(u64)) - size_read; 3858 3859 if (readn(fd, buf, padding) < 0) { 3860 pr_err("%s: reading input file", __func__); 3861 return -1; 3862 } 3863 if (session->repipe) { 3864 int retw = write(STDOUT_FILENO, buf, padding); 3865 if (retw <= 0 || retw != padding) { 3866 pr_err("%s: repiping tracing data padding", __func__); 3867 return -1; 3868 } 3869 } 3870 3871 if (size_read + padding != size) { 3872 pr_err("%s: tracing data size mismatch", __func__); 3873 return -1; 3874 } 3875 3876 perf_evlist__prepare_tracepoint_events(session->evlist, 3877 session->tevent.pevent); 3878 3879 return size_read + padding; 3880 } 3881 3882 int perf_event__synthesize_build_id(struct perf_tool *tool, 3883 struct dso *pos, u16 misc, 3884 perf_event__handler_t process, 3885 struct machine *machine) 3886 { 3887 union perf_event ev; 3888 size_t len; 3889 int err = 0; 3890 3891 if (!pos->hit) 3892 return err; 3893 3894 memset(&ev, 0, sizeof(ev)); 3895 3896 len = pos->long_name_len + 1; 3897 len = PERF_ALIGN(len, NAME_ALIGN); 3898 memcpy(&ev.build_id.build_id, pos->build_id, sizeof(pos->build_id)); 3899 ev.build_id.header.type = PERF_RECORD_HEADER_BUILD_ID; 3900 ev.build_id.header.misc = misc; 3901 ev.build_id.pid = machine->pid; 3902 ev.build_id.header.size = sizeof(ev.build_id) + len; 3903 memcpy(&ev.build_id.filename, pos->long_name, pos->long_name_len); 3904 3905 err = process(tool, &ev, NULL, machine); 3906 3907 return err; 3908 } 3909 3910 int perf_event__process_build_id(struct perf_tool *tool __maybe_unused, 3911 union perf_event *event, 3912 struct perf_session *session) 3913 { 3914 __event_process_build_id(&event->build_id, 3915 event->build_id.filename, 3916 session); 3917 return 0; 3918 } 3919