1 #include "util.h" 2 #include <sys/types.h> 3 #include <byteswap.h> 4 #include <unistd.h> 5 #include <stdio.h> 6 #include <stdlib.h> 7 #include <linux/list.h> 8 #include <linux/kernel.h> 9 #include <linux/bitops.h> 10 #include <sys/utsname.h> 11 12 #include "evlist.h" 13 #include "evsel.h" 14 #include "header.h" 15 #include "../perf.h" 16 #include "trace-event.h" 17 #include "session.h" 18 #include "symbol.h" 19 #include "debug.h" 20 #include "cpumap.h" 21 #include "pmu.h" 22 #include "vdso.h" 23 #include "strbuf.h" 24 #include "build-id.h" 25 #include "data.h" 26 #include <api/fs/fs.h> 27 #include "asm/bug.h" 28 29 /* 30 * magic2 = "PERFILE2" 31 * must be a numerical value to let the endianness 32 * determine the memory layout. That way we are able 33 * to detect endianness when reading the perf.data file 34 * back. 35 * 36 * we check for legacy (PERFFILE) format. 37 */ 38 static const char *__perf_magic1 = "PERFFILE"; 39 static const u64 __perf_magic2 = 0x32454c4946524550ULL; 40 static const u64 __perf_magic2_sw = 0x50455246494c4532ULL; 41 42 #define PERF_MAGIC __perf_magic2 43 44 const char perf_version_string[] = PERF_VERSION; 45 46 struct perf_file_attr { 47 struct perf_event_attr attr; 48 struct perf_file_section ids; 49 }; 50 51 void perf_header__set_feat(struct perf_header *header, int feat) 52 { 53 set_bit(feat, header->adds_features); 54 } 55 56 void perf_header__clear_feat(struct perf_header *header, int feat) 57 { 58 clear_bit(feat, header->adds_features); 59 } 60 61 bool perf_header__has_feat(const struct perf_header *header, int feat) 62 { 63 return test_bit(feat, header->adds_features); 64 } 65 66 static int do_write(int fd, const void *buf, size_t size) 67 { 68 while (size) { 69 int ret = write(fd, buf, size); 70 71 if (ret < 0) 72 return -errno; 73 74 size -= ret; 75 buf += ret; 76 } 77 78 return 0; 79 } 80 81 int write_padded(int fd, const void *bf, size_t count, size_t count_aligned) 82 { 83 static const char zero_buf[NAME_ALIGN]; 84 int err = do_write(fd, bf, count); 85 86 if (!err) 87 err = do_write(fd, zero_buf, count_aligned - count); 88 89 return err; 90 } 91 92 #define string_size(str) \ 93 (PERF_ALIGN((strlen(str) + 1), NAME_ALIGN) + sizeof(u32)) 94 95 static int do_write_string(int fd, const char *str) 96 { 97 u32 len, olen; 98 int ret; 99 100 olen = strlen(str) + 1; 101 len = PERF_ALIGN(olen, NAME_ALIGN); 102 103 /* write len, incl. \0 */ 104 ret = do_write(fd, &len, sizeof(len)); 105 if (ret < 0) 106 return ret; 107 108 return write_padded(fd, str, olen, len); 109 } 110 111 static char *do_read_string(int fd, struct perf_header *ph) 112 { 113 ssize_t sz, ret; 114 u32 len; 115 char *buf; 116 117 sz = readn(fd, &len, sizeof(len)); 118 if (sz < (ssize_t)sizeof(len)) 119 return NULL; 120 121 if (ph->needs_swap) 122 len = bswap_32(len); 123 124 buf = malloc(len); 125 if (!buf) 126 return NULL; 127 128 ret = readn(fd, buf, len); 129 if (ret == (ssize_t)len) { 130 /* 131 * strings are padded by zeroes 132 * thus the actual strlen of buf 133 * may be less than len 134 */ 135 return buf; 136 } 137 138 free(buf); 139 return NULL; 140 } 141 142 static int write_tracing_data(int fd, struct perf_header *h __maybe_unused, 143 struct perf_evlist *evlist) 144 { 145 return read_tracing_data(fd, &evlist->entries); 146 } 147 148 149 static int write_build_id(int fd, struct perf_header *h, 150 struct perf_evlist *evlist __maybe_unused) 151 { 152 struct perf_session *session; 153 int err; 154 155 session = container_of(h, struct perf_session, header); 156 157 if (!perf_session__read_build_ids(session, true)) 158 return -1; 159 160 err = perf_session__write_buildid_table(session, fd); 161 if (err < 0) { 162 pr_debug("failed to write buildid table\n"); 163 return err; 164 } 165 perf_session__cache_build_ids(session); 166 167 return 0; 168 } 169 170 static int write_hostname(int fd, struct perf_header *h __maybe_unused, 171 struct perf_evlist *evlist __maybe_unused) 172 { 173 struct utsname uts; 174 int ret; 175 176 ret = uname(&uts); 177 if (ret < 0) 178 return -1; 179 180 return do_write_string(fd, uts.nodename); 181 } 182 183 static int write_osrelease(int fd, struct perf_header *h __maybe_unused, 184 struct perf_evlist *evlist __maybe_unused) 185 { 186 struct utsname uts; 187 int ret; 188 189 ret = uname(&uts); 190 if (ret < 0) 191 return -1; 192 193 return do_write_string(fd, uts.release); 194 } 195 196 static int write_arch(int fd, struct perf_header *h __maybe_unused, 197 struct perf_evlist *evlist __maybe_unused) 198 { 199 struct utsname uts; 200 int ret; 201 202 ret = uname(&uts); 203 if (ret < 0) 204 return -1; 205 206 return do_write_string(fd, uts.machine); 207 } 208 209 static int write_version(int fd, struct perf_header *h __maybe_unused, 210 struct perf_evlist *evlist __maybe_unused) 211 { 212 return do_write_string(fd, perf_version_string); 213 } 214 215 static int __write_cpudesc(int fd, const char *cpuinfo_proc) 216 { 217 FILE *file; 218 char *buf = NULL; 219 char *s, *p; 220 const char *search = cpuinfo_proc; 221 size_t len = 0; 222 int ret = -1; 223 224 if (!search) 225 return -1; 226 227 file = fopen("/proc/cpuinfo", "r"); 228 if (!file) 229 return -1; 230 231 while (getline(&buf, &len, file) > 0) { 232 ret = strncmp(buf, search, strlen(search)); 233 if (!ret) 234 break; 235 } 236 237 if (ret) { 238 ret = -1; 239 goto done; 240 } 241 242 s = buf; 243 244 p = strchr(buf, ':'); 245 if (p && *(p+1) == ' ' && *(p+2)) 246 s = p + 2; 247 p = strchr(s, '\n'); 248 if (p) 249 *p = '\0'; 250 251 /* squash extra space characters (branding string) */ 252 p = s; 253 while (*p) { 254 if (isspace(*p)) { 255 char *r = p + 1; 256 char *q = r; 257 *p = ' '; 258 while (*q && isspace(*q)) 259 q++; 260 if (q != (p+1)) 261 while ((*r++ = *q++)); 262 } 263 p++; 264 } 265 ret = do_write_string(fd, s); 266 done: 267 free(buf); 268 fclose(file); 269 return ret; 270 } 271 272 static int write_cpudesc(int fd, struct perf_header *h __maybe_unused, 273 struct perf_evlist *evlist __maybe_unused) 274 { 275 #ifndef CPUINFO_PROC 276 #define CPUINFO_PROC {"model name", } 277 #endif 278 const char *cpuinfo_procs[] = CPUINFO_PROC; 279 unsigned int i; 280 281 for (i = 0; i < ARRAY_SIZE(cpuinfo_procs); i++) { 282 int ret; 283 ret = __write_cpudesc(fd, cpuinfo_procs[i]); 284 if (ret >= 0) 285 return ret; 286 } 287 return -1; 288 } 289 290 291 static int write_nrcpus(int fd, struct perf_header *h __maybe_unused, 292 struct perf_evlist *evlist __maybe_unused) 293 { 294 long nr; 295 u32 nrc, nra; 296 int ret; 297 298 nrc = cpu__max_present_cpu(); 299 300 nr = sysconf(_SC_NPROCESSORS_ONLN); 301 if (nr < 0) 302 return -1; 303 304 nra = (u32)(nr & UINT_MAX); 305 306 ret = do_write(fd, &nrc, sizeof(nrc)); 307 if (ret < 0) 308 return ret; 309 310 return do_write(fd, &nra, sizeof(nra)); 311 } 312 313 static int write_event_desc(int fd, struct perf_header *h __maybe_unused, 314 struct perf_evlist *evlist) 315 { 316 struct perf_evsel *evsel; 317 u32 nre, nri, sz; 318 int ret; 319 320 nre = evlist->nr_entries; 321 322 /* 323 * write number of events 324 */ 325 ret = do_write(fd, &nre, sizeof(nre)); 326 if (ret < 0) 327 return ret; 328 329 /* 330 * size of perf_event_attr struct 331 */ 332 sz = (u32)sizeof(evsel->attr); 333 ret = do_write(fd, &sz, sizeof(sz)); 334 if (ret < 0) 335 return ret; 336 337 evlist__for_each_entry(evlist, evsel) { 338 ret = do_write(fd, &evsel->attr, sz); 339 if (ret < 0) 340 return ret; 341 /* 342 * write number of unique id per event 343 * there is one id per instance of an event 344 * 345 * copy into an nri to be independent of the 346 * type of ids, 347 */ 348 nri = evsel->ids; 349 ret = do_write(fd, &nri, sizeof(nri)); 350 if (ret < 0) 351 return ret; 352 353 /* 354 * write event string as passed on cmdline 355 */ 356 ret = do_write_string(fd, perf_evsel__name(evsel)); 357 if (ret < 0) 358 return ret; 359 /* 360 * write unique ids for this event 361 */ 362 ret = do_write(fd, evsel->id, evsel->ids * sizeof(u64)); 363 if (ret < 0) 364 return ret; 365 } 366 return 0; 367 } 368 369 static int write_cmdline(int fd, struct perf_header *h __maybe_unused, 370 struct perf_evlist *evlist __maybe_unused) 371 { 372 char buf[MAXPATHLEN]; 373 char proc[32]; 374 u32 n; 375 int i, ret; 376 377 /* 378 * actual atual path to perf binary 379 */ 380 sprintf(proc, "/proc/%d/exe", getpid()); 381 ret = readlink(proc, buf, sizeof(buf)); 382 if (ret <= 0) 383 return -1; 384 385 /* readlink() does not add null termination */ 386 buf[ret] = '\0'; 387 388 /* account for binary path */ 389 n = perf_env.nr_cmdline + 1; 390 391 ret = do_write(fd, &n, sizeof(n)); 392 if (ret < 0) 393 return ret; 394 395 ret = do_write_string(fd, buf); 396 if (ret < 0) 397 return ret; 398 399 for (i = 0 ; i < perf_env.nr_cmdline; i++) { 400 ret = do_write_string(fd, perf_env.cmdline_argv[i]); 401 if (ret < 0) 402 return ret; 403 } 404 return 0; 405 } 406 407 #define CORE_SIB_FMT \ 408 "/sys/devices/system/cpu/cpu%d/topology/core_siblings_list" 409 #define THRD_SIB_FMT \ 410 "/sys/devices/system/cpu/cpu%d/topology/thread_siblings_list" 411 412 struct cpu_topo { 413 u32 cpu_nr; 414 u32 core_sib; 415 u32 thread_sib; 416 char **core_siblings; 417 char **thread_siblings; 418 }; 419 420 static int build_cpu_topo(struct cpu_topo *tp, int cpu) 421 { 422 FILE *fp; 423 char filename[MAXPATHLEN]; 424 char *buf = NULL, *p; 425 size_t len = 0; 426 ssize_t sret; 427 u32 i = 0; 428 int ret = -1; 429 430 sprintf(filename, CORE_SIB_FMT, cpu); 431 fp = fopen(filename, "r"); 432 if (!fp) 433 goto try_threads; 434 435 sret = getline(&buf, &len, fp); 436 fclose(fp); 437 if (sret <= 0) 438 goto try_threads; 439 440 p = strchr(buf, '\n'); 441 if (p) 442 *p = '\0'; 443 444 for (i = 0; i < tp->core_sib; i++) { 445 if (!strcmp(buf, tp->core_siblings[i])) 446 break; 447 } 448 if (i == tp->core_sib) { 449 tp->core_siblings[i] = buf; 450 tp->core_sib++; 451 buf = NULL; 452 len = 0; 453 } 454 ret = 0; 455 456 try_threads: 457 sprintf(filename, THRD_SIB_FMT, cpu); 458 fp = fopen(filename, "r"); 459 if (!fp) 460 goto done; 461 462 if (getline(&buf, &len, fp) <= 0) 463 goto done; 464 465 p = strchr(buf, '\n'); 466 if (p) 467 *p = '\0'; 468 469 for (i = 0; i < tp->thread_sib; i++) { 470 if (!strcmp(buf, tp->thread_siblings[i])) 471 break; 472 } 473 if (i == tp->thread_sib) { 474 tp->thread_siblings[i] = buf; 475 tp->thread_sib++; 476 buf = NULL; 477 } 478 ret = 0; 479 done: 480 if(fp) 481 fclose(fp); 482 free(buf); 483 return ret; 484 } 485 486 static void free_cpu_topo(struct cpu_topo *tp) 487 { 488 u32 i; 489 490 if (!tp) 491 return; 492 493 for (i = 0 ; i < tp->core_sib; i++) 494 zfree(&tp->core_siblings[i]); 495 496 for (i = 0 ; i < tp->thread_sib; i++) 497 zfree(&tp->thread_siblings[i]); 498 499 free(tp); 500 } 501 502 static struct cpu_topo *build_cpu_topology(void) 503 { 504 struct cpu_topo *tp = NULL; 505 void *addr; 506 u32 nr, i; 507 size_t sz; 508 long ncpus; 509 int ret = -1; 510 struct cpu_map *map; 511 512 ncpus = cpu__max_present_cpu(); 513 514 /* build online CPU map */ 515 map = cpu_map__new(NULL); 516 if (map == NULL) { 517 pr_debug("failed to get system cpumap\n"); 518 return NULL; 519 } 520 521 nr = (u32)(ncpus & UINT_MAX); 522 523 sz = nr * sizeof(char *); 524 addr = calloc(1, sizeof(*tp) + 2 * sz); 525 if (!addr) 526 goto out_free; 527 528 tp = addr; 529 tp->cpu_nr = nr; 530 addr += sizeof(*tp); 531 tp->core_siblings = addr; 532 addr += sz; 533 tp->thread_siblings = addr; 534 535 for (i = 0; i < nr; i++) { 536 if (!cpu_map__has(map, i)) 537 continue; 538 539 ret = build_cpu_topo(tp, i); 540 if (ret < 0) 541 break; 542 } 543 544 out_free: 545 cpu_map__put(map); 546 if (ret) { 547 free_cpu_topo(tp); 548 tp = NULL; 549 } 550 return tp; 551 } 552 553 static int write_cpu_topology(int fd, struct perf_header *h __maybe_unused, 554 struct perf_evlist *evlist __maybe_unused) 555 { 556 struct cpu_topo *tp; 557 u32 i; 558 int ret, j; 559 560 tp = build_cpu_topology(); 561 if (!tp) 562 return -1; 563 564 ret = do_write(fd, &tp->core_sib, sizeof(tp->core_sib)); 565 if (ret < 0) 566 goto done; 567 568 for (i = 0; i < tp->core_sib; i++) { 569 ret = do_write_string(fd, tp->core_siblings[i]); 570 if (ret < 0) 571 goto done; 572 } 573 ret = do_write(fd, &tp->thread_sib, sizeof(tp->thread_sib)); 574 if (ret < 0) 575 goto done; 576 577 for (i = 0; i < tp->thread_sib; i++) { 578 ret = do_write_string(fd, tp->thread_siblings[i]); 579 if (ret < 0) 580 break; 581 } 582 583 ret = perf_env__read_cpu_topology_map(&perf_env); 584 if (ret < 0) 585 goto done; 586 587 for (j = 0; j < perf_env.nr_cpus_avail; j++) { 588 ret = do_write(fd, &perf_env.cpu[j].core_id, 589 sizeof(perf_env.cpu[j].core_id)); 590 if (ret < 0) 591 return ret; 592 ret = do_write(fd, &perf_env.cpu[j].socket_id, 593 sizeof(perf_env.cpu[j].socket_id)); 594 if (ret < 0) 595 return ret; 596 } 597 done: 598 free_cpu_topo(tp); 599 return ret; 600 } 601 602 603 604 static int write_total_mem(int fd, struct perf_header *h __maybe_unused, 605 struct perf_evlist *evlist __maybe_unused) 606 { 607 char *buf = NULL; 608 FILE *fp; 609 size_t len = 0; 610 int ret = -1, n; 611 uint64_t mem; 612 613 fp = fopen("/proc/meminfo", "r"); 614 if (!fp) 615 return -1; 616 617 while (getline(&buf, &len, fp) > 0) { 618 ret = strncmp(buf, "MemTotal:", 9); 619 if (!ret) 620 break; 621 } 622 if (!ret) { 623 n = sscanf(buf, "%*s %"PRIu64, &mem); 624 if (n == 1) 625 ret = do_write(fd, &mem, sizeof(mem)); 626 } else 627 ret = -1; 628 free(buf); 629 fclose(fp); 630 return ret; 631 } 632 633 static int write_topo_node(int fd, int node) 634 { 635 char str[MAXPATHLEN]; 636 char field[32]; 637 char *buf = NULL, *p; 638 size_t len = 0; 639 FILE *fp; 640 u64 mem_total, mem_free, mem; 641 int ret = -1; 642 643 sprintf(str, "/sys/devices/system/node/node%d/meminfo", node); 644 fp = fopen(str, "r"); 645 if (!fp) 646 return -1; 647 648 while (getline(&buf, &len, fp) > 0) { 649 /* skip over invalid lines */ 650 if (!strchr(buf, ':')) 651 continue; 652 if (sscanf(buf, "%*s %*d %31s %"PRIu64, field, &mem) != 2) 653 goto done; 654 if (!strcmp(field, "MemTotal:")) 655 mem_total = mem; 656 if (!strcmp(field, "MemFree:")) 657 mem_free = mem; 658 } 659 660 fclose(fp); 661 fp = NULL; 662 663 ret = do_write(fd, &mem_total, sizeof(u64)); 664 if (ret) 665 goto done; 666 667 ret = do_write(fd, &mem_free, sizeof(u64)); 668 if (ret) 669 goto done; 670 671 ret = -1; 672 sprintf(str, "/sys/devices/system/node/node%d/cpulist", node); 673 674 fp = fopen(str, "r"); 675 if (!fp) 676 goto done; 677 678 if (getline(&buf, &len, fp) <= 0) 679 goto done; 680 681 p = strchr(buf, '\n'); 682 if (p) 683 *p = '\0'; 684 685 ret = do_write_string(fd, buf); 686 done: 687 free(buf); 688 if (fp) 689 fclose(fp); 690 return ret; 691 } 692 693 static int write_numa_topology(int fd, struct perf_header *h __maybe_unused, 694 struct perf_evlist *evlist __maybe_unused) 695 { 696 char *buf = NULL; 697 size_t len = 0; 698 FILE *fp; 699 struct cpu_map *node_map = NULL; 700 char *c; 701 u32 nr, i, j; 702 int ret = -1; 703 704 fp = fopen("/sys/devices/system/node/online", "r"); 705 if (!fp) 706 return -1; 707 708 if (getline(&buf, &len, fp) <= 0) 709 goto done; 710 711 c = strchr(buf, '\n'); 712 if (c) 713 *c = '\0'; 714 715 node_map = cpu_map__new(buf); 716 if (!node_map) 717 goto done; 718 719 nr = (u32)node_map->nr; 720 721 ret = do_write(fd, &nr, sizeof(nr)); 722 if (ret < 0) 723 goto done; 724 725 for (i = 0; i < nr; i++) { 726 j = (u32)node_map->map[i]; 727 ret = do_write(fd, &j, sizeof(j)); 728 if (ret < 0) 729 break; 730 731 ret = write_topo_node(fd, i); 732 if (ret < 0) 733 break; 734 } 735 done: 736 free(buf); 737 fclose(fp); 738 cpu_map__put(node_map); 739 return ret; 740 } 741 742 /* 743 * File format: 744 * 745 * struct pmu_mappings { 746 * u32 pmu_num; 747 * struct pmu_map { 748 * u32 type; 749 * char name[]; 750 * }[pmu_num]; 751 * }; 752 */ 753 754 static int write_pmu_mappings(int fd, struct perf_header *h __maybe_unused, 755 struct perf_evlist *evlist __maybe_unused) 756 { 757 struct perf_pmu *pmu = NULL; 758 off_t offset = lseek(fd, 0, SEEK_CUR); 759 __u32 pmu_num = 0; 760 int ret; 761 762 /* write real pmu_num later */ 763 ret = do_write(fd, &pmu_num, sizeof(pmu_num)); 764 if (ret < 0) 765 return ret; 766 767 while ((pmu = perf_pmu__scan(pmu))) { 768 if (!pmu->name) 769 continue; 770 pmu_num++; 771 772 ret = do_write(fd, &pmu->type, sizeof(pmu->type)); 773 if (ret < 0) 774 return ret; 775 776 ret = do_write_string(fd, pmu->name); 777 if (ret < 0) 778 return ret; 779 } 780 781 if (pwrite(fd, &pmu_num, sizeof(pmu_num), offset) != sizeof(pmu_num)) { 782 /* discard all */ 783 lseek(fd, offset, SEEK_SET); 784 return -1; 785 } 786 787 return 0; 788 } 789 790 /* 791 * File format: 792 * 793 * struct group_descs { 794 * u32 nr_groups; 795 * struct group_desc { 796 * char name[]; 797 * u32 leader_idx; 798 * u32 nr_members; 799 * }[nr_groups]; 800 * }; 801 */ 802 static int write_group_desc(int fd, struct perf_header *h __maybe_unused, 803 struct perf_evlist *evlist) 804 { 805 u32 nr_groups = evlist->nr_groups; 806 struct perf_evsel *evsel; 807 int ret; 808 809 ret = do_write(fd, &nr_groups, sizeof(nr_groups)); 810 if (ret < 0) 811 return ret; 812 813 evlist__for_each_entry(evlist, evsel) { 814 if (perf_evsel__is_group_leader(evsel) && 815 evsel->nr_members > 1) { 816 const char *name = evsel->group_name ?: "{anon_group}"; 817 u32 leader_idx = evsel->idx; 818 u32 nr_members = evsel->nr_members; 819 820 ret = do_write_string(fd, name); 821 if (ret < 0) 822 return ret; 823 824 ret = do_write(fd, &leader_idx, sizeof(leader_idx)); 825 if (ret < 0) 826 return ret; 827 828 ret = do_write(fd, &nr_members, sizeof(nr_members)); 829 if (ret < 0) 830 return ret; 831 } 832 } 833 return 0; 834 } 835 836 /* 837 * default get_cpuid(): nothing gets recorded 838 * actual implementation must be in arch/$(ARCH)/util/header.c 839 */ 840 int __weak get_cpuid(char *buffer __maybe_unused, size_t sz __maybe_unused) 841 { 842 return -1; 843 } 844 845 static int write_cpuid(int fd, struct perf_header *h __maybe_unused, 846 struct perf_evlist *evlist __maybe_unused) 847 { 848 char buffer[64]; 849 int ret; 850 851 ret = get_cpuid(buffer, sizeof(buffer)); 852 if (!ret) 853 goto write_it; 854 855 return -1; 856 write_it: 857 return do_write_string(fd, buffer); 858 } 859 860 static int write_branch_stack(int fd __maybe_unused, 861 struct perf_header *h __maybe_unused, 862 struct perf_evlist *evlist __maybe_unused) 863 { 864 return 0; 865 } 866 867 static int write_auxtrace(int fd, struct perf_header *h, 868 struct perf_evlist *evlist __maybe_unused) 869 { 870 struct perf_session *session; 871 int err; 872 873 session = container_of(h, struct perf_session, header); 874 875 err = auxtrace_index__write(fd, &session->auxtrace_index); 876 if (err < 0) 877 pr_err("Failed to write auxtrace index\n"); 878 return err; 879 } 880 881 static int cpu_cache_level__sort(const void *a, const void *b) 882 { 883 struct cpu_cache_level *cache_a = (struct cpu_cache_level *)a; 884 struct cpu_cache_level *cache_b = (struct cpu_cache_level *)b; 885 886 return cache_a->level - cache_b->level; 887 } 888 889 static bool cpu_cache_level__cmp(struct cpu_cache_level *a, struct cpu_cache_level *b) 890 { 891 if (a->level != b->level) 892 return false; 893 894 if (a->line_size != b->line_size) 895 return false; 896 897 if (a->sets != b->sets) 898 return false; 899 900 if (a->ways != b->ways) 901 return false; 902 903 if (strcmp(a->type, b->type)) 904 return false; 905 906 if (strcmp(a->size, b->size)) 907 return false; 908 909 if (strcmp(a->map, b->map)) 910 return false; 911 912 return true; 913 } 914 915 static int cpu_cache_level__read(struct cpu_cache_level *cache, u32 cpu, u16 level) 916 { 917 char path[PATH_MAX], file[PATH_MAX]; 918 struct stat st; 919 size_t len; 920 921 scnprintf(path, PATH_MAX, "devices/system/cpu/cpu%d/cache/index%d/", cpu, level); 922 scnprintf(file, PATH_MAX, "%s/%s", sysfs__mountpoint(), path); 923 924 if (stat(file, &st)) 925 return 1; 926 927 scnprintf(file, PATH_MAX, "%s/level", path); 928 if (sysfs__read_int(file, (int *) &cache->level)) 929 return -1; 930 931 scnprintf(file, PATH_MAX, "%s/coherency_line_size", path); 932 if (sysfs__read_int(file, (int *) &cache->line_size)) 933 return -1; 934 935 scnprintf(file, PATH_MAX, "%s/number_of_sets", path); 936 if (sysfs__read_int(file, (int *) &cache->sets)) 937 return -1; 938 939 scnprintf(file, PATH_MAX, "%s/ways_of_associativity", path); 940 if (sysfs__read_int(file, (int *) &cache->ways)) 941 return -1; 942 943 scnprintf(file, PATH_MAX, "%s/type", path); 944 if (sysfs__read_str(file, &cache->type, &len)) 945 return -1; 946 947 cache->type[len] = 0; 948 cache->type = rtrim(cache->type); 949 950 scnprintf(file, PATH_MAX, "%s/size", path); 951 if (sysfs__read_str(file, &cache->size, &len)) { 952 free(cache->type); 953 return -1; 954 } 955 956 cache->size[len] = 0; 957 cache->size = rtrim(cache->size); 958 959 scnprintf(file, PATH_MAX, "%s/shared_cpu_list", path); 960 if (sysfs__read_str(file, &cache->map, &len)) { 961 free(cache->map); 962 free(cache->type); 963 return -1; 964 } 965 966 cache->map[len] = 0; 967 cache->map = rtrim(cache->map); 968 return 0; 969 } 970 971 static void cpu_cache_level__fprintf(FILE *out, struct cpu_cache_level *c) 972 { 973 fprintf(out, "L%d %-15s %8s [%s]\n", c->level, c->type, c->size, c->map); 974 } 975 976 static int build_caches(struct cpu_cache_level caches[], u32 size, u32 *cntp) 977 { 978 u32 i, cnt = 0; 979 long ncpus; 980 u32 nr, cpu; 981 u16 level; 982 983 ncpus = sysconf(_SC_NPROCESSORS_CONF); 984 if (ncpus < 0) 985 return -1; 986 987 nr = (u32)(ncpus & UINT_MAX); 988 989 for (cpu = 0; cpu < nr; cpu++) { 990 for (level = 0; level < 10; level++) { 991 struct cpu_cache_level c; 992 int err; 993 994 err = cpu_cache_level__read(&c, cpu, level); 995 if (err < 0) 996 return err; 997 998 if (err == 1) 999 break; 1000 1001 for (i = 0; i < cnt; i++) { 1002 if (cpu_cache_level__cmp(&c, &caches[i])) 1003 break; 1004 } 1005 1006 if (i == cnt) 1007 caches[cnt++] = c; 1008 else 1009 cpu_cache_level__free(&c); 1010 1011 if (WARN_ONCE(cnt == size, "way too many cpu caches..")) 1012 goto out; 1013 } 1014 } 1015 out: 1016 *cntp = cnt; 1017 return 0; 1018 } 1019 1020 #define MAX_CACHES 2000 1021 1022 static int write_cache(int fd, struct perf_header *h __maybe_unused, 1023 struct perf_evlist *evlist __maybe_unused) 1024 { 1025 struct cpu_cache_level caches[MAX_CACHES]; 1026 u32 cnt = 0, i, version = 1; 1027 int ret; 1028 1029 ret = build_caches(caches, MAX_CACHES, &cnt); 1030 if (ret) 1031 goto out; 1032 1033 qsort(&caches, cnt, sizeof(struct cpu_cache_level), cpu_cache_level__sort); 1034 1035 ret = do_write(fd, &version, sizeof(u32)); 1036 if (ret < 0) 1037 goto out; 1038 1039 ret = do_write(fd, &cnt, sizeof(u32)); 1040 if (ret < 0) 1041 goto out; 1042 1043 for (i = 0; i < cnt; i++) { 1044 struct cpu_cache_level *c = &caches[i]; 1045 1046 #define _W(v) \ 1047 ret = do_write(fd, &c->v, sizeof(u32)); \ 1048 if (ret < 0) \ 1049 goto out; 1050 1051 _W(level) 1052 _W(line_size) 1053 _W(sets) 1054 _W(ways) 1055 #undef _W 1056 1057 #define _W(v) \ 1058 ret = do_write_string(fd, (const char *) c->v); \ 1059 if (ret < 0) \ 1060 goto out; 1061 1062 _W(type) 1063 _W(size) 1064 _W(map) 1065 #undef _W 1066 } 1067 1068 out: 1069 for (i = 0; i < cnt; i++) 1070 cpu_cache_level__free(&caches[i]); 1071 return ret; 1072 } 1073 1074 static int write_stat(int fd __maybe_unused, 1075 struct perf_header *h __maybe_unused, 1076 struct perf_evlist *evlist __maybe_unused) 1077 { 1078 return 0; 1079 } 1080 1081 static void print_hostname(struct perf_header *ph, int fd __maybe_unused, 1082 FILE *fp) 1083 { 1084 fprintf(fp, "# hostname : %s\n", ph->env.hostname); 1085 } 1086 1087 static void print_osrelease(struct perf_header *ph, int fd __maybe_unused, 1088 FILE *fp) 1089 { 1090 fprintf(fp, "# os release : %s\n", ph->env.os_release); 1091 } 1092 1093 static void print_arch(struct perf_header *ph, int fd __maybe_unused, FILE *fp) 1094 { 1095 fprintf(fp, "# arch : %s\n", ph->env.arch); 1096 } 1097 1098 static void print_cpudesc(struct perf_header *ph, int fd __maybe_unused, 1099 FILE *fp) 1100 { 1101 fprintf(fp, "# cpudesc : %s\n", ph->env.cpu_desc); 1102 } 1103 1104 static void print_nrcpus(struct perf_header *ph, int fd __maybe_unused, 1105 FILE *fp) 1106 { 1107 fprintf(fp, "# nrcpus online : %u\n", ph->env.nr_cpus_online); 1108 fprintf(fp, "# nrcpus avail : %u\n", ph->env.nr_cpus_avail); 1109 } 1110 1111 static void print_version(struct perf_header *ph, int fd __maybe_unused, 1112 FILE *fp) 1113 { 1114 fprintf(fp, "# perf version : %s\n", ph->env.version); 1115 } 1116 1117 static void print_cmdline(struct perf_header *ph, int fd __maybe_unused, 1118 FILE *fp) 1119 { 1120 int nr, i; 1121 1122 nr = ph->env.nr_cmdline; 1123 1124 fprintf(fp, "# cmdline : "); 1125 1126 for (i = 0; i < nr; i++) 1127 fprintf(fp, "%s ", ph->env.cmdline_argv[i]); 1128 fputc('\n', fp); 1129 } 1130 1131 static void print_cpu_topology(struct perf_header *ph, int fd __maybe_unused, 1132 FILE *fp) 1133 { 1134 int nr, i; 1135 char *str; 1136 int cpu_nr = ph->env.nr_cpus_avail; 1137 1138 nr = ph->env.nr_sibling_cores; 1139 str = ph->env.sibling_cores; 1140 1141 for (i = 0; i < nr; i++) { 1142 fprintf(fp, "# sibling cores : %s\n", str); 1143 str += strlen(str) + 1; 1144 } 1145 1146 nr = ph->env.nr_sibling_threads; 1147 str = ph->env.sibling_threads; 1148 1149 for (i = 0; i < nr; i++) { 1150 fprintf(fp, "# sibling threads : %s\n", str); 1151 str += strlen(str) + 1; 1152 } 1153 1154 if (ph->env.cpu != NULL) { 1155 for (i = 0; i < cpu_nr; i++) 1156 fprintf(fp, "# CPU %d: Core ID %d, Socket ID %d\n", i, 1157 ph->env.cpu[i].core_id, ph->env.cpu[i].socket_id); 1158 } else 1159 fprintf(fp, "# Core ID and Socket ID information is not available\n"); 1160 } 1161 1162 static void free_event_desc(struct perf_evsel *events) 1163 { 1164 struct perf_evsel *evsel; 1165 1166 if (!events) 1167 return; 1168 1169 for (evsel = events; evsel->attr.size; evsel++) { 1170 zfree(&evsel->name); 1171 zfree(&evsel->id); 1172 } 1173 1174 free(events); 1175 } 1176 1177 static struct perf_evsel * 1178 read_event_desc(struct perf_header *ph, int fd) 1179 { 1180 struct perf_evsel *evsel, *events = NULL; 1181 u64 *id; 1182 void *buf = NULL; 1183 u32 nre, sz, nr, i, j; 1184 ssize_t ret; 1185 size_t msz; 1186 1187 /* number of events */ 1188 ret = readn(fd, &nre, sizeof(nre)); 1189 if (ret != (ssize_t)sizeof(nre)) 1190 goto error; 1191 1192 if (ph->needs_swap) 1193 nre = bswap_32(nre); 1194 1195 ret = readn(fd, &sz, sizeof(sz)); 1196 if (ret != (ssize_t)sizeof(sz)) 1197 goto error; 1198 1199 if (ph->needs_swap) 1200 sz = bswap_32(sz); 1201 1202 /* buffer to hold on file attr struct */ 1203 buf = malloc(sz); 1204 if (!buf) 1205 goto error; 1206 1207 /* the last event terminates with evsel->attr.size == 0: */ 1208 events = calloc(nre + 1, sizeof(*events)); 1209 if (!events) 1210 goto error; 1211 1212 msz = sizeof(evsel->attr); 1213 if (sz < msz) 1214 msz = sz; 1215 1216 for (i = 0, evsel = events; i < nre; evsel++, i++) { 1217 evsel->idx = i; 1218 1219 /* 1220 * must read entire on-file attr struct to 1221 * sync up with layout. 1222 */ 1223 ret = readn(fd, buf, sz); 1224 if (ret != (ssize_t)sz) 1225 goto error; 1226 1227 if (ph->needs_swap) 1228 perf_event__attr_swap(buf); 1229 1230 memcpy(&evsel->attr, buf, msz); 1231 1232 ret = readn(fd, &nr, sizeof(nr)); 1233 if (ret != (ssize_t)sizeof(nr)) 1234 goto error; 1235 1236 if (ph->needs_swap) { 1237 nr = bswap_32(nr); 1238 evsel->needs_swap = true; 1239 } 1240 1241 evsel->name = do_read_string(fd, ph); 1242 1243 if (!nr) 1244 continue; 1245 1246 id = calloc(nr, sizeof(*id)); 1247 if (!id) 1248 goto error; 1249 evsel->ids = nr; 1250 evsel->id = id; 1251 1252 for (j = 0 ; j < nr; j++) { 1253 ret = readn(fd, id, sizeof(*id)); 1254 if (ret != (ssize_t)sizeof(*id)) 1255 goto error; 1256 if (ph->needs_swap) 1257 *id = bswap_64(*id); 1258 id++; 1259 } 1260 } 1261 out: 1262 free(buf); 1263 return events; 1264 error: 1265 free_event_desc(events); 1266 events = NULL; 1267 goto out; 1268 } 1269 1270 static int __desc_attr__fprintf(FILE *fp, const char *name, const char *val, 1271 void *priv __attribute__((unused))) 1272 { 1273 return fprintf(fp, ", %s = %s", name, val); 1274 } 1275 1276 static void print_event_desc(struct perf_header *ph, int fd, FILE *fp) 1277 { 1278 struct perf_evsel *evsel, *events = read_event_desc(ph, fd); 1279 u32 j; 1280 u64 *id; 1281 1282 if (!events) { 1283 fprintf(fp, "# event desc: not available or unable to read\n"); 1284 return; 1285 } 1286 1287 for (evsel = events; evsel->attr.size; evsel++) { 1288 fprintf(fp, "# event : name = %s, ", evsel->name); 1289 1290 if (evsel->ids) { 1291 fprintf(fp, ", id = {"); 1292 for (j = 0, id = evsel->id; j < evsel->ids; j++, id++) { 1293 if (j) 1294 fputc(',', fp); 1295 fprintf(fp, " %"PRIu64, *id); 1296 } 1297 fprintf(fp, " }"); 1298 } 1299 1300 perf_event_attr__fprintf(fp, &evsel->attr, __desc_attr__fprintf, NULL); 1301 1302 fputc('\n', fp); 1303 } 1304 1305 free_event_desc(events); 1306 } 1307 1308 static void print_total_mem(struct perf_header *ph, int fd __maybe_unused, 1309 FILE *fp) 1310 { 1311 fprintf(fp, "# total memory : %Lu kB\n", ph->env.total_mem); 1312 } 1313 1314 static void print_numa_topology(struct perf_header *ph, int fd __maybe_unused, 1315 FILE *fp) 1316 { 1317 int i; 1318 struct numa_node *n; 1319 1320 for (i = 0; i < ph->env.nr_numa_nodes; i++) { 1321 n = &ph->env.numa_nodes[i]; 1322 1323 fprintf(fp, "# node%u meminfo : total = %"PRIu64" kB," 1324 " free = %"PRIu64" kB\n", 1325 n->node, n->mem_total, n->mem_free); 1326 1327 fprintf(fp, "# node%u cpu list : ", n->node); 1328 cpu_map__fprintf(n->map, fp); 1329 } 1330 } 1331 1332 static void print_cpuid(struct perf_header *ph, int fd __maybe_unused, FILE *fp) 1333 { 1334 fprintf(fp, "# cpuid : %s\n", ph->env.cpuid); 1335 } 1336 1337 static void print_branch_stack(struct perf_header *ph __maybe_unused, 1338 int fd __maybe_unused, FILE *fp) 1339 { 1340 fprintf(fp, "# contains samples with branch stack\n"); 1341 } 1342 1343 static void print_auxtrace(struct perf_header *ph __maybe_unused, 1344 int fd __maybe_unused, FILE *fp) 1345 { 1346 fprintf(fp, "# contains AUX area data (e.g. instruction trace)\n"); 1347 } 1348 1349 static void print_stat(struct perf_header *ph __maybe_unused, 1350 int fd __maybe_unused, FILE *fp) 1351 { 1352 fprintf(fp, "# contains stat data\n"); 1353 } 1354 1355 static void print_cache(struct perf_header *ph __maybe_unused, 1356 int fd __maybe_unused, FILE *fp __maybe_unused) 1357 { 1358 int i; 1359 1360 fprintf(fp, "# CPU cache info:\n"); 1361 for (i = 0; i < ph->env.caches_cnt; i++) { 1362 fprintf(fp, "# "); 1363 cpu_cache_level__fprintf(fp, &ph->env.caches[i]); 1364 } 1365 } 1366 1367 static void print_pmu_mappings(struct perf_header *ph, int fd __maybe_unused, 1368 FILE *fp) 1369 { 1370 const char *delimiter = "# pmu mappings: "; 1371 char *str, *tmp; 1372 u32 pmu_num; 1373 u32 type; 1374 1375 pmu_num = ph->env.nr_pmu_mappings; 1376 if (!pmu_num) { 1377 fprintf(fp, "# pmu mappings: not available\n"); 1378 return; 1379 } 1380 1381 str = ph->env.pmu_mappings; 1382 1383 while (pmu_num) { 1384 type = strtoul(str, &tmp, 0); 1385 if (*tmp != ':') 1386 goto error; 1387 1388 str = tmp + 1; 1389 fprintf(fp, "%s%s = %" PRIu32, delimiter, str, type); 1390 1391 delimiter = ", "; 1392 str += strlen(str) + 1; 1393 pmu_num--; 1394 } 1395 1396 fprintf(fp, "\n"); 1397 1398 if (!pmu_num) 1399 return; 1400 error: 1401 fprintf(fp, "# pmu mappings: unable to read\n"); 1402 } 1403 1404 static void print_group_desc(struct perf_header *ph, int fd __maybe_unused, 1405 FILE *fp) 1406 { 1407 struct perf_session *session; 1408 struct perf_evsel *evsel; 1409 u32 nr = 0; 1410 1411 session = container_of(ph, struct perf_session, header); 1412 1413 evlist__for_each_entry(session->evlist, evsel) { 1414 if (perf_evsel__is_group_leader(evsel) && 1415 evsel->nr_members > 1) { 1416 fprintf(fp, "# group: %s{%s", evsel->group_name ?: "", 1417 perf_evsel__name(evsel)); 1418 1419 nr = evsel->nr_members - 1; 1420 } else if (nr) { 1421 fprintf(fp, ",%s", perf_evsel__name(evsel)); 1422 1423 if (--nr == 0) 1424 fprintf(fp, "}\n"); 1425 } 1426 } 1427 } 1428 1429 static int __event_process_build_id(struct build_id_event *bev, 1430 char *filename, 1431 struct perf_session *session) 1432 { 1433 int err = -1; 1434 struct machine *machine; 1435 u16 cpumode; 1436 struct dso *dso; 1437 enum dso_kernel_type dso_type; 1438 1439 machine = perf_session__findnew_machine(session, bev->pid); 1440 if (!machine) 1441 goto out; 1442 1443 cpumode = bev->header.misc & PERF_RECORD_MISC_CPUMODE_MASK; 1444 1445 switch (cpumode) { 1446 case PERF_RECORD_MISC_KERNEL: 1447 dso_type = DSO_TYPE_KERNEL; 1448 break; 1449 case PERF_RECORD_MISC_GUEST_KERNEL: 1450 dso_type = DSO_TYPE_GUEST_KERNEL; 1451 break; 1452 case PERF_RECORD_MISC_USER: 1453 case PERF_RECORD_MISC_GUEST_USER: 1454 dso_type = DSO_TYPE_USER; 1455 break; 1456 default: 1457 goto out; 1458 } 1459 1460 dso = machine__findnew_dso(machine, filename); 1461 if (dso != NULL) { 1462 char sbuild_id[SBUILD_ID_SIZE]; 1463 1464 dso__set_build_id(dso, &bev->build_id); 1465 1466 if (!is_kernel_module(filename, cpumode)) 1467 dso->kernel = dso_type; 1468 1469 build_id__sprintf(dso->build_id, sizeof(dso->build_id), 1470 sbuild_id); 1471 pr_debug("build id event received for %s: %s\n", 1472 dso->long_name, sbuild_id); 1473 dso__put(dso); 1474 } 1475 1476 err = 0; 1477 out: 1478 return err; 1479 } 1480 1481 static int perf_header__read_build_ids_abi_quirk(struct perf_header *header, 1482 int input, u64 offset, u64 size) 1483 { 1484 struct perf_session *session = container_of(header, struct perf_session, header); 1485 struct { 1486 struct perf_event_header header; 1487 u8 build_id[PERF_ALIGN(BUILD_ID_SIZE, sizeof(u64))]; 1488 char filename[0]; 1489 } old_bev; 1490 struct build_id_event bev; 1491 char filename[PATH_MAX]; 1492 u64 limit = offset + size; 1493 1494 while (offset < limit) { 1495 ssize_t len; 1496 1497 if (readn(input, &old_bev, sizeof(old_bev)) != sizeof(old_bev)) 1498 return -1; 1499 1500 if (header->needs_swap) 1501 perf_event_header__bswap(&old_bev.header); 1502 1503 len = old_bev.header.size - sizeof(old_bev); 1504 if (readn(input, filename, len) != len) 1505 return -1; 1506 1507 bev.header = old_bev.header; 1508 1509 /* 1510 * As the pid is the missing value, we need to fill 1511 * it properly. The header.misc value give us nice hint. 1512 */ 1513 bev.pid = HOST_KERNEL_ID; 1514 if (bev.header.misc == PERF_RECORD_MISC_GUEST_USER || 1515 bev.header.misc == PERF_RECORD_MISC_GUEST_KERNEL) 1516 bev.pid = DEFAULT_GUEST_KERNEL_ID; 1517 1518 memcpy(bev.build_id, old_bev.build_id, sizeof(bev.build_id)); 1519 __event_process_build_id(&bev, filename, session); 1520 1521 offset += bev.header.size; 1522 } 1523 1524 return 0; 1525 } 1526 1527 static int perf_header__read_build_ids(struct perf_header *header, 1528 int input, u64 offset, u64 size) 1529 { 1530 struct perf_session *session = container_of(header, struct perf_session, header); 1531 struct build_id_event bev; 1532 char filename[PATH_MAX]; 1533 u64 limit = offset + size, orig_offset = offset; 1534 int err = -1; 1535 1536 while (offset < limit) { 1537 ssize_t len; 1538 1539 if (readn(input, &bev, sizeof(bev)) != sizeof(bev)) 1540 goto out; 1541 1542 if (header->needs_swap) 1543 perf_event_header__bswap(&bev.header); 1544 1545 len = bev.header.size - sizeof(bev); 1546 if (readn(input, filename, len) != len) 1547 goto out; 1548 /* 1549 * The a1645ce1 changeset: 1550 * 1551 * "perf: 'perf kvm' tool for monitoring guest performance from host" 1552 * 1553 * Added a field to struct build_id_event that broke the file 1554 * format. 1555 * 1556 * Since the kernel build-id is the first entry, process the 1557 * table using the old format if the well known 1558 * '[kernel.kallsyms]' string for the kernel build-id has the 1559 * first 4 characters chopped off (where the pid_t sits). 1560 */ 1561 if (memcmp(filename, "nel.kallsyms]", 13) == 0) { 1562 if (lseek(input, orig_offset, SEEK_SET) == (off_t)-1) 1563 return -1; 1564 return perf_header__read_build_ids_abi_quirk(header, input, offset, size); 1565 } 1566 1567 __event_process_build_id(&bev, filename, session); 1568 1569 offset += bev.header.size; 1570 } 1571 err = 0; 1572 out: 1573 return err; 1574 } 1575 1576 static int process_tracing_data(struct perf_file_section *section __maybe_unused, 1577 struct perf_header *ph __maybe_unused, 1578 int fd, void *data) 1579 { 1580 ssize_t ret = trace_report(fd, data, false); 1581 return ret < 0 ? -1 : 0; 1582 } 1583 1584 static int process_build_id(struct perf_file_section *section, 1585 struct perf_header *ph, int fd, 1586 void *data __maybe_unused) 1587 { 1588 if (perf_header__read_build_ids(ph, fd, section->offset, section->size)) 1589 pr_debug("Failed to read buildids, continuing...\n"); 1590 return 0; 1591 } 1592 1593 static int process_hostname(struct perf_file_section *section __maybe_unused, 1594 struct perf_header *ph, int fd, 1595 void *data __maybe_unused) 1596 { 1597 ph->env.hostname = do_read_string(fd, ph); 1598 return ph->env.hostname ? 0 : -ENOMEM; 1599 } 1600 1601 static int process_osrelease(struct perf_file_section *section __maybe_unused, 1602 struct perf_header *ph, int fd, 1603 void *data __maybe_unused) 1604 { 1605 ph->env.os_release = do_read_string(fd, ph); 1606 return ph->env.os_release ? 0 : -ENOMEM; 1607 } 1608 1609 static int process_version(struct perf_file_section *section __maybe_unused, 1610 struct perf_header *ph, int fd, 1611 void *data __maybe_unused) 1612 { 1613 ph->env.version = do_read_string(fd, ph); 1614 return ph->env.version ? 0 : -ENOMEM; 1615 } 1616 1617 static int process_arch(struct perf_file_section *section __maybe_unused, 1618 struct perf_header *ph, int fd, 1619 void *data __maybe_unused) 1620 { 1621 ph->env.arch = do_read_string(fd, ph); 1622 return ph->env.arch ? 0 : -ENOMEM; 1623 } 1624 1625 static int process_nrcpus(struct perf_file_section *section __maybe_unused, 1626 struct perf_header *ph, int fd, 1627 void *data __maybe_unused) 1628 { 1629 ssize_t ret; 1630 u32 nr; 1631 1632 ret = readn(fd, &nr, sizeof(nr)); 1633 if (ret != sizeof(nr)) 1634 return -1; 1635 1636 if (ph->needs_swap) 1637 nr = bswap_32(nr); 1638 1639 ph->env.nr_cpus_avail = nr; 1640 1641 ret = readn(fd, &nr, sizeof(nr)); 1642 if (ret != sizeof(nr)) 1643 return -1; 1644 1645 if (ph->needs_swap) 1646 nr = bswap_32(nr); 1647 1648 ph->env.nr_cpus_online = nr; 1649 return 0; 1650 } 1651 1652 static int process_cpudesc(struct perf_file_section *section __maybe_unused, 1653 struct perf_header *ph, int fd, 1654 void *data __maybe_unused) 1655 { 1656 ph->env.cpu_desc = do_read_string(fd, ph); 1657 return ph->env.cpu_desc ? 0 : -ENOMEM; 1658 } 1659 1660 static int process_cpuid(struct perf_file_section *section __maybe_unused, 1661 struct perf_header *ph, int fd, 1662 void *data __maybe_unused) 1663 { 1664 ph->env.cpuid = do_read_string(fd, ph); 1665 return ph->env.cpuid ? 0 : -ENOMEM; 1666 } 1667 1668 static int process_total_mem(struct perf_file_section *section __maybe_unused, 1669 struct perf_header *ph, int fd, 1670 void *data __maybe_unused) 1671 { 1672 uint64_t mem; 1673 ssize_t ret; 1674 1675 ret = readn(fd, &mem, sizeof(mem)); 1676 if (ret != sizeof(mem)) 1677 return -1; 1678 1679 if (ph->needs_swap) 1680 mem = bswap_64(mem); 1681 1682 ph->env.total_mem = mem; 1683 return 0; 1684 } 1685 1686 static struct perf_evsel * 1687 perf_evlist__find_by_index(struct perf_evlist *evlist, int idx) 1688 { 1689 struct perf_evsel *evsel; 1690 1691 evlist__for_each_entry(evlist, evsel) { 1692 if (evsel->idx == idx) 1693 return evsel; 1694 } 1695 1696 return NULL; 1697 } 1698 1699 static void 1700 perf_evlist__set_event_name(struct perf_evlist *evlist, 1701 struct perf_evsel *event) 1702 { 1703 struct perf_evsel *evsel; 1704 1705 if (!event->name) 1706 return; 1707 1708 evsel = perf_evlist__find_by_index(evlist, event->idx); 1709 if (!evsel) 1710 return; 1711 1712 if (evsel->name) 1713 return; 1714 1715 evsel->name = strdup(event->name); 1716 } 1717 1718 static int 1719 process_event_desc(struct perf_file_section *section __maybe_unused, 1720 struct perf_header *header, int fd, 1721 void *data __maybe_unused) 1722 { 1723 struct perf_session *session; 1724 struct perf_evsel *evsel, *events = read_event_desc(header, fd); 1725 1726 if (!events) 1727 return 0; 1728 1729 session = container_of(header, struct perf_session, header); 1730 for (evsel = events; evsel->attr.size; evsel++) 1731 perf_evlist__set_event_name(session->evlist, evsel); 1732 1733 free_event_desc(events); 1734 1735 return 0; 1736 } 1737 1738 static int process_cmdline(struct perf_file_section *section, 1739 struct perf_header *ph, int fd, 1740 void *data __maybe_unused) 1741 { 1742 ssize_t ret; 1743 char *str, *cmdline = NULL, **argv = NULL; 1744 u32 nr, i, len = 0; 1745 1746 ret = readn(fd, &nr, sizeof(nr)); 1747 if (ret != sizeof(nr)) 1748 return -1; 1749 1750 if (ph->needs_swap) 1751 nr = bswap_32(nr); 1752 1753 ph->env.nr_cmdline = nr; 1754 1755 cmdline = zalloc(section->size + nr + 1); 1756 if (!cmdline) 1757 return -1; 1758 1759 argv = zalloc(sizeof(char *) * (nr + 1)); 1760 if (!argv) 1761 goto error; 1762 1763 for (i = 0; i < nr; i++) { 1764 str = do_read_string(fd, ph); 1765 if (!str) 1766 goto error; 1767 1768 argv[i] = cmdline + len; 1769 memcpy(argv[i], str, strlen(str) + 1); 1770 len += strlen(str) + 1; 1771 free(str); 1772 } 1773 ph->env.cmdline = cmdline; 1774 ph->env.cmdline_argv = (const char **) argv; 1775 return 0; 1776 1777 error: 1778 free(argv); 1779 free(cmdline); 1780 return -1; 1781 } 1782 1783 static int process_cpu_topology(struct perf_file_section *section, 1784 struct perf_header *ph, int fd, 1785 void *data __maybe_unused) 1786 { 1787 ssize_t ret; 1788 u32 nr, i; 1789 char *str; 1790 struct strbuf sb; 1791 int cpu_nr = ph->env.nr_cpus_avail; 1792 u64 size = 0; 1793 1794 ph->env.cpu = calloc(cpu_nr, sizeof(*ph->env.cpu)); 1795 if (!ph->env.cpu) 1796 return -1; 1797 1798 ret = readn(fd, &nr, sizeof(nr)); 1799 if (ret != sizeof(nr)) 1800 goto free_cpu; 1801 1802 if (ph->needs_swap) 1803 nr = bswap_32(nr); 1804 1805 ph->env.nr_sibling_cores = nr; 1806 size += sizeof(u32); 1807 if (strbuf_init(&sb, 128) < 0) 1808 goto free_cpu; 1809 1810 for (i = 0; i < nr; i++) { 1811 str = do_read_string(fd, ph); 1812 if (!str) 1813 goto error; 1814 1815 /* include a NULL character at the end */ 1816 if (strbuf_add(&sb, str, strlen(str) + 1) < 0) 1817 goto error; 1818 size += string_size(str); 1819 free(str); 1820 } 1821 ph->env.sibling_cores = strbuf_detach(&sb, NULL); 1822 1823 ret = readn(fd, &nr, sizeof(nr)); 1824 if (ret != sizeof(nr)) 1825 return -1; 1826 1827 if (ph->needs_swap) 1828 nr = bswap_32(nr); 1829 1830 ph->env.nr_sibling_threads = nr; 1831 size += sizeof(u32); 1832 1833 for (i = 0; i < nr; i++) { 1834 str = do_read_string(fd, ph); 1835 if (!str) 1836 goto error; 1837 1838 /* include a NULL character at the end */ 1839 if (strbuf_add(&sb, str, strlen(str) + 1) < 0) 1840 goto error; 1841 size += string_size(str); 1842 free(str); 1843 } 1844 ph->env.sibling_threads = strbuf_detach(&sb, NULL); 1845 1846 /* 1847 * The header may be from old perf, 1848 * which doesn't include core id and socket id information. 1849 */ 1850 if (section->size <= size) { 1851 zfree(&ph->env.cpu); 1852 return 0; 1853 } 1854 1855 for (i = 0; i < (u32)cpu_nr; i++) { 1856 ret = readn(fd, &nr, sizeof(nr)); 1857 if (ret != sizeof(nr)) 1858 goto free_cpu; 1859 1860 if (ph->needs_swap) 1861 nr = bswap_32(nr); 1862 1863 ph->env.cpu[i].core_id = nr; 1864 1865 ret = readn(fd, &nr, sizeof(nr)); 1866 if (ret != sizeof(nr)) 1867 goto free_cpu; 1868 1869 if (ph->needs_swap) 1870 nr = bswap_32(nr); 1871 1872 if (nr != (u32)-1 && nr > (u32)cpu_nr) { 1873 pr_debug("socket_id number is too big." 1874 "You may need to upgrade the perf tool.\n"); 1875 goto free_cpu; 1876 } 1877 1878 ph->env.cpu[i].socket_id = nr; 1879 } 1880 1881 return 0; 1882 1883 error: 1884 strbuf_release(&sb); 1885 free_cpu: 1886 zfree(&ph->env.cpu); 1887 return -1; 1888 } 1889 1890 static int process_numa_topology(struct perf_file_section *section __maybe_unused, 1891 struct perf_header *ph, int fd, 1892 void *data __maybe_unused) 1893 { 1894 struct numa_node *nodes, *n; 1895 ssize_t ret; 1896 u32 nr, i; 1897 char *str; 1898 1899 /* nr nodes */ 1900 ret = readn(fd, &nr, sizeof(nr)); 1901 if (ret != sizeof(nr)) 1902 return -1; 1903 1904 if (ph->needs_swap) 1905 nr = bswap_32(nr); 1906 1907 nodes = zalloc(sizeof(*nodes) * nr); 1908 if (!nodes) 1909 return -ENOMEM; 1910 1911 for (i = 0; i < nr; i++) { 1912 n = &nodes[i]; 1913 1914 /* node number */ 1915 ret = readn(fd, &n->node, sizeof(u32)); 1916 if (ret != sizeof(n->node)) 1917 goto error; 1918 1919 ret = readn(fd, &n->mem_total, sizeof(u64)); 1920 if (ret != sizeof(u64)) 1921 goto error; 1922 1923 ret = readn(fd, &n->mem_free, sizeof(u64)); 1924 if (ret != sizeof(u64)) 1925 goto error; 1926 1927 if (ph->needs_swap) { 1928 n->node = bswap_32(n->node); 1929 n->mem_total = bswap_64(n->mem_total); 1930 n->mem_free = bswap_64(n->mem_free); 1931 } 1932 1933 str = do_read_string(fd, ph); 1934 if (!str) 1935 goto error; 1936 1937 n->map = cpu_map__new(str); 1938 if (!n->map) 1939 goto error; 1940 1941 free(str); 1942 } 1943 ph->env.nr_numa_nodes = nr; 1944 ph->env.numa_nodes = nodes; 1945 return 0; 1946 1947 error: 1948 free(nodes); 1949 return -1; 1950 } 1951 1952 static int process_pmu_mappings(struct perf_file_section *section __maybe_unused, 1953 struct perf_header *ph, int fd, 1954 void *data __maybe_unused) 1955 { 1956 ssize_t ret; 1957 char *name; 1958 u32 pmu_num; 1959 u32 type; 1960 struct strbuf sb; 1961 1962 ret = readn(fd, &pmu_num, sizeof(pmu_num)); 1963 if (ret != sizeof(pmu_num)) 1964 return -1; 1965 1966 if (ph->needs_swap) 1967 pmu_num = bswap_32(pmu_num); 1968 1969 if (!pmu_num) { 1970 pr_debug("pmu mappings not available\n"); 1971 return 0; 1972 } 1973 1974 ph->env.nr_pmu_mappings = pmu_num; 1975 if (strbuf_init(&sb, 128) < 0) 1976 return -1; 1977 1978 while (pmu_num) { 1979 if (readn(fd, &type, sizeof(type)) != sizeof(type)) 1980 goto error; 1981 if (ph->needs_swap) 1982 type = bswap_32(type); 1983 1984 name = do_read_string(fd, ph); 1985 if (!name) 1986 goto error; 1987 1988 if (strbuf_addf(&sb, "%u:%s", type, name) < 0) 1989 goto error; 1990 /* include a NULL character at the end */ 1991 if (strbuf_add(&sb, "", 1) < 0) 1992 goto error; 1993 1994 if (!strcmp(name, "msr")) 1995 ph->env.msr_pmu_type = type; 1996 1997 free(name); 1998 pmu_num--; 1999 } 2000 ph->env.pmu_mappings = strbuf_detach(&sb, NULL); 2001 return 0; 2002 2003 error: 2004 strbuf_release(&sb); 2005 return -1; 2006 } 2007 2008 static int process_group_desc(struct perf_file_section *section __maybe_unused, 2009 struct perf_header *ph, int fd, 2010 void *data __maybe_unused) 2011 { 2012 size_t ret = -1; 2013 u32 i, nr, nr_groups; 2014 struct perf_session *session; 2015 struct perf_evsel *evsel, *leader = NULL; 2016 struct group_desc { 2017 char *name; 2018 u32 leader_idx; 2019 u32 nr_members; 2020 } *desc; 2021 2022 if (readn(fd, &nr_groups, sizeof(nr_groups)) != sizeof(nr_groups)) 2023 return -1; 2024 2025 if (ph->needs_swap) 2026 nr_groups = bswap_32(nr_groups); 2027 2028 ph->env.nr_groups = nr_groups; 2029 if (!nr_groups) { 2030 pr_debug("group desc not available\n"); 2031 return 0; 2032 } 2033 2034 desc = calloc(nr_groups, sizeof(*desc)); 2035 if (!desc) 2036 return -1; 2037 2038 for (i = 0; i < nr_groups; i++) { 2039 desc[i].name = do_read_string(fd, ph); 2040 if (!desc[i].name) 2041 goto out_free; 2042 2043 if (readn(fd, &desc[i].leader_idx, sizeof(u32)) != sizeof(u32)) 2044 goto out_free; 2045 2046 if (readn(fd, &desc[i].nr_members, sizeof(u32)) != sizeof(u32)) 2047 goto out_free; 2048 2049 if (ph->needs_swap) { 2050 desc[i].leader_idx = bswap_32(desc[i].leader_idx); 2051 desc[i].nr_members = bswap_32(desc[i].nr_members); 2052 } 2053 } 2054 2055 /* 2056 * Rebuild group relationship based on the group_desc 2057 */ 2058 session = container_of(ph, struct perf_session, header); 2059 session->evlist->nr_groups = nr_groups; 2060 2061 i = nr = 0; 2062 evlist__for_each_entry(session->evlist, evsel) { 2063 if (evsel->idx == (int) desc[i].leader_idx) { 2064 evsel->leader = evsel; 2065 /* {anon_group} is a dummy name */ 2066 if (strcmp(desc[i].name, "{anon_group}")) { 2067 evsel->group_name = desc[i].name; 2068 desc[i].name = NULL; 2069 } 2070 evsel->nr_members = desc[i].nr_members; 2071 2072 if (i >= nr_groups || nr > 0) { 2073 pr_debug("invalid group desc\n"); 2074 goto out_free; 2075 } 2076 2077 leader = evsel; 2078 nr = evsel->nr_members - 1; 2079 i++; 2080 } else if (nr) { 2081 /* This is a group member */ 2082 evsel->leader = leader; 2083 2084 nr--; 2085 } 2086 } 2087 2088 if (i != nr_groups || nr != 0) { 2089 pr_debug("invalid group desc\n"); 2090 goto out_free; 2091 } 2092 2093 ret = 0; 2094 out_free: 2095 for (i = 0; i < nr_groups; i++) 2096 zfree(&desc[i].name); 2097 free(desc); 2098 2099 return ret; 2100 } 2101 2102 static int process_auxtrace(struct perf_file_section *section, 2103 struct perf_header *ph, int fd, 2104 void *data __maybe_unused) 2105 { 2106 struct perf_session *session; 2107 int err; 2108 2109 session = container_of(ph, struct perf_session, header); 2110 2111 err = auxtrace_index__process(fd, section->size, session, 2112 ph->needs_swap); 2113 if (err < 0) 2114 pr_err("Failed to process auxtrace index\n"); 2115 return err; 2116 } 2117 2118 static int process_cache(struct perf_file_section *section __maybe_unused, 2119 struct perf_header *ph __maybe_unused, int fd __maybe_unused, 2120 void *data __maybe_unused) 2121 { 2122 struct cpu_cache_level *caches; 2123 u32 cnt, i, version; 2124 2125 if (readn(fd, &version, sizeof(version)) != sizeof(version)) 2126 return -1; 2127 2128 if (ph->needs_swap) 2129 version = bswap_32(version); 2130 2131 if (version != 1) 2132 return -1; 2133 2134 if (readn(fd, &cnt, sizeof(cnt)) != sizeof(cnt)) 2135 return -1; 2136 2137 if (ph->needs_swap) 2138 cnt = bswap_32(cnt); 2139 2140 caches = zalloc(sizeof(*caches) * cnt); 2141 if (!caches) 2142 return -1; 2143 2144 for (i = 0; i < cnt; i++) { 2145 struct cpu_cache_level c; 2146 2147 #define _R(v) \ 2148 if (readn(fd, &c.v, sizeof(u32)) != sizeof(u32))\ 2149 goto out_free_caches; \ 2150 if (ph->needs_swap) \ 2151 c.v = bswap_32(c.v); \ 2152 2153 _R(level) 2154 _R(line_size) 2155 _R(sets) 2156 _R(ways) 2157 #undef _R 2158 2159 #define _R(v) \ 2160 c.v = do_read_string(fd, ph); \ 2161 if (!c.v) \ 2162 goto out_free_caches; 2163 2164 _R(type) 2165 _R(size) 2166 _R(map) 2167 #undef _R 2168 2169 caches[i] = c; 2170 } 2171 2172 ph->env.caches = caches; 2173 ph->env.caches_cnt = cnt; 2174 return 0; 2175 out_free_caches: 2176 free(caches); 2177 return -1; 2178 } 2179 2180 struct feature_ops { 2181 int (*write)(int fd, struct perf_header *h, struct perf_evlist *evlist); 2182 void (*print)(struct perf_header *h, int fd, FILE *fp); 2183 int (*process)(struct perf_file_section *section, 2184 struct perf_header *h, int fd, void *data); 2185 const char *name; 2186 bool full_only; 2187 }; 2188 2189 #define FEAT_OPA(n, func) \ 2190 [n] = { .name = #n, .write = write_##func, .print = print_##func } 2191 #define FEAT_OPP(n, func) \ 2192 [n] = { .name = #n, .write = write_##func, .print = print_##func, \ 2193 .process = process_##func } 2194 #define FEAT_OPF(n, func) \ 2195 [n] = { .name = #n, .write = write_##func, .print = print_##func, \ 2196 .process = process_##func, .full_only = true } 2197 2198 /* feature_ops not implemented: */ 2199 #define print_tracing_data NULL 2200 #define print_build_id NULL 2201 2202 static const struct feature_ops feat_ops[HEADER_LAST_FEATURE] = { 2203 FEAT_OPP(HEADER_TRACING_DATA, tracing_data), 2204 FEAT_OPP(HEADER_BUILD_ID, build_id), 2205 FEAT_OPP(HEADER_HOSTNAME, hostname), 2206 FEAT_OPP(HEADER_OSRELEASE, osrelease), 2207 FEAT_OPP(HEADER_VERSION, version), 2208 FEAT_OPP(HEADER_ARCH, arch), 2209 FEAT_OPP(HEADER_NRCPUS, nrcpus), 2210 FEAT_OPP(HEADER_CPUDESC, cpudesc), 2211 FEAT_OPP(HEADER_CPUID, cpuid), 2212 FEAT_OPP(HEADER_TOTAL_MEM, total_mem), 2213 FEAT_OPP(HEADER_EVENT_DESC, event_desc), 2214 FEAT_OPP(HEADER_CMDLINE, cmdline), 2215 FEAT_OPF(HEADER_CPU_TOPOLOGY, cpu_topology), 2216 FEAT_OPF(HEADER_NUMA_TOPOLOGY, numa_topology), 2217 FEAT_OPA(HEADER_BRANCH_STACK, branch_stack), 2218 FEAT_OPP(HEADER_PMU_MAPPINGS, pmu_mappings), 2219 FEAT_OPP(HEADER_GROUP_DESC, group_desc), 2220 FEAT_OPP(HEADER_AUXTRACE, auxtrace), 2221 FEAT_OPA(HEADER_STAT, stat), 2222 FEAT_OPF(HEADER_CACHE, cache), 2223 }; 2224 2225 struct header_print_data { 2226 FILE *fp; 2227 bool full; /* extended list of headers */ 2228 }; 2229 2230 static int perf_file_section__fprintf_info(struct perf_file_section *section, 2231 struct perf_header *ph, 2232 int feat, int fd, void *data) 2233 { 2234 struct header_print_data *hd = data; 2235 2236 if (lseek(fd, section->offset, SEEK_SET) == (off_t)-1) { 2237 pr_debug("Failed to lseek to %" PRIu64 " offset for feature " 2238 "%d, continuing...\n", section->offset, feat); 2239 return 0; 2240 } 2241 if (feat >= HEADER_LAST_FEATURE) { 2242 pr_warning("unknown feature %d\n", feat); 2243 return 0; 2244 } 2245 if (!feat_ops[feat].print) 2246 return 0; 2247 2248 if (!feat_ops[feat].full_only || hd->full) 2249 feat_ops[feat].print(ph, fd, hd->fp); 2250 else 2251 fprintf(hd->fp, "# %s info available, use -I to display\n", 2252 feat_ops[feat].name); 2253 2254 return 0; 2255 } 2256 2257 int perf_header__fprintf_info(struct perf_session *session, FILE *fp, bool full) 2258 { 2259 struct header_print_data hd; 2260 struct perf_header *header = &session->header; 2261 int fd = perf_data_file__fd(session->file); 2262 struct stat st; 2263 int ret, bit; 2264 2265 hd.fp = fp; 2266 hd.full = full; 2267 2268 ret = fstat(fd, &st); 2269 if (ret == -1) 2270 return -1; 2271 2272 fprintf(fp, "# captured on: %s", ctime(&st.st_ctime)); 2273 2274 perf_header__process_sections(header, fd, &hd, 2275 perf_file_section__fprintf_info); 2276 2277 fprintf(fp, "# missing features: "); 2278 for_each_clear_bit(bit, header->adds_features, HEADER_LAST_FEATURE) { 2279 if (bit) 2280 fprintf(fp, "%s ", feat_ops[bit].name); 2281 } 2282 2283 fprintf(fp, "\n"); 2284 return 0; 2285 } 2286 2287 static int do_write_feat(int fd, struct perf_header *h, int type, 2288 struct perf_file_section **p, 2289 struct perf_evlist *evlist) 2290 { 2291 int err; 2292 int ret = 0; 2293 2294 if (perf_header__has_feat(h, type)) { 2295 if (!feat_ops[type].write) 2296 return -1; 2297 2298 (*p)->offset = lseek(fd, 0, SEEK_CUR); 2299 2300 err = feat_ops[type].write(fd, h, evlist); 2301 if (err < 0) { 2302 pr_debug("failed to write feature %s\n", feat_ops[type].name); 2303 2304 /* undo anything written */ 2305 lseek(fd, (*p)->offset, SEEK_SET); 2306 2307 return -1; 2308 } 2309 (*p)->size = lseek(fd, 0, SEEK_CUR) - (*p)->offset; 2310 (*p)++; 2311 } 2312 return ret; 2313 } 2314 2315 static int perf_header__adds_write(struct perf_header *header, 2316 struct perf_evlist *evlist, int fd) 2317 { 2318 int nr_sections; 2319 struct perf_file_section *feat_sec, *p; 2320 int sec_size; 2321 u64 sec_start; 2322 int feat; 2323 int err; 2324 2325 nr_sections = bitmap_weight(header->adds_features, HEADER_FEAT_BITS); 2326 if (!nr_sections) 2327 return 0; 2328 2329 feat_sec = p = calloc(nr_sections, sizeof(*feat_sec)); 2330 if (feat_sec == NULL) 2331 return -ENOMEM; 2332 2333 sec_size = sizeof(*feat_sec) * nr_sections; 2334 2335 sec_start = header->feat_offset; 2336 lseek(fd, sec_start + sec_size, SEEK_SET); 2337 2338 for_each_set_bit(feat, header->adds_features, HEADER_FEAT_BITS) { 2339 if (do_write_feat(fd, header, feat, &p, evlist)) 2340 perf_header__clear_feat(header, feat); 2341 } 2342 2343 lseek(fd, sec_start, SEEK_SET); 2344 /* 2345 * may write more than needed due to dropped feature, but 2346 * this is okay, reader will skip the mising entries 2347 */ 2348 err = do_write(fd, feat_sec, sec_size); 2349 if (err < 0) 2350 pr_debug("failed to write feature section\n"); 2351 free(feat_sec); 2352 return err; 2353 } 2354 2355 int perf_header__write_pipe(int fd) 2356 { 2357 struct perf_pipe_file_header f_header; 2358 int err; 2359 2360 f_header = (struct perf_pipe_file_header){ 2361 .magic = PERF_MAGIC, 2362 .size = sizeof(f_header), 2363 }; 2364 2365 err = do_write(fd, &f_header, sizeof(f_header)); 2366 if (err < 0) { 2367 pr_debug("failed to write perf pipe header\n"); 2368 return err; 2369 } 2370 2371 return 0; 2372 } 2373 2374 int perf_session__write_header(struct perf_session *session, 2375 struct perf_evlist *evlist, 2376 int fd, bool at_exit) 2377 { 2378 struct perf_file_header f_header; 2379 struct perf_file_attr f_attr; 2380 struct perf_header *header = &session->header; 2381 struct perf_evsel *evsel; 2382 u64 attr_offset; 2383 int err; 2384 2385 lseek(fd, sizeof(f_header), SEEK_SET); 2386 2387 evlist__for_each_entry(session->evlist, evsel) { 2388 evsel->id_offset = lseek(fd, 0, SEEK_CUR); 2389 err = do_write(fd, evsel->id, evsel->ids * sizeof(u64)); 2390 if (err < 0) { 2391 pr_debug("failed to write perf header\n"); 2392 return err; 2393 } 2394 } 2395 2396 attr_offset = lseek(fd, 0, SEEK_CUR); 2397 2398 evlist__for_each_entry(evlist, evsel) { 2399 f_attr = (struct perf_file_attr){ 2400 .attr = evsel->attr, 2401 .ids = { 2402 .offset = evsel->id_offset, 2403 .size = evsel->ids * sizeof(u64), 2404 } 2405 }; 2406 err = do_write(fd, &f_attr, sizeof(f_attr)); 2407 if (err < 0) { 2408 pr_debug("failed to write perf header attribute\n"); 2409 return err; 2410 } 2411 } 2412 2413 if (!header->data_offset) 2414 header->data_offset = lseek(fd, 0, SEEK_CUR); 2415 header->feat_offset = header->data_offset + header->data_size; 2416 2417 if (at_exit) { 2418 err = perf_header__adds_write(header, evlist, fd); 2419 if (err < 0) 2420 return err; 2421 } 2422 2423 f_header = (struct perf_file_header){ 2424 .magic = PERF_MAGIC, 2425 .size = sizeof(f_header), 2426 .attr_size = sizeof(f_attr), 2427 .attrs = { 2428 .offset = attr_offset, 2429 .size = evlist->nr_entries * sizeof(f_attr), 2430 }, 2431 .data = { 2432 .offset = header->data_offset, 2433 .size = header->data_size, 2434 }, 2435 /* event_types is ignored, store zeros */ 2436 }; 2437 2438 memcpy(&f_header.adds_features, &header->adds_features, sizeof(header->adds_features)); 2439 2440 lseek(fd, 0, SEEK_SET); 2441 err = do_write(fd, &f_header, sizeof(f_header)); 2442 if (err < 0) { 2443 pr_debug("failed to write perf header\n"); 2444 return err; 2445 } 2446 lseek(fd, header->data_offset + header->data_size, SEEK_SET); 2447 2448 return 0; 2449 } 2450 2451 static int perf_header__getbuffer64(struct perf_header *header, 2452 int fd, void *buf, size_t size) 2453 { 2454 if (readn(fd, buf, size) <= 0) 2455 return -1; 2456 2457 if (header->needs_swap) 2458 mem_bswap_64(buf, size); 2459 2460 return 0; 2461 } 2462 2463 int perf_header__process_sections(struct perf_header *header, int fd, 2464 void *data, 2465 int (*process)(struct perf_file_section *section, 2466 struct perf_header *ph, 2467 int feat, int fd, void *data)) 2468 { 2469 struct perf_file_section *feat_sec, *sec; 2470 int nr_sections; 2471 int sec_size; 2472 int feat; 2473 int err; 2474 2475 nr_sections = bitmap_weight(header->adds_features, HEADER_FEAT_BITS); 2476 if (!nr_sections) 2477 return 0; 2478 2479 feat_sec = sec = calloc(nr_sections, sizeof(*feat_sec)); 2480 if (!feat_sec) 2481 return -1; 2482 2483 sec_size = sizeof(*feat_sec) * nr_sections; 2484 2485 lseek(fd, header->feat_offset, SEEK_SET); 2486 2487 err = perf_header__getbuffer64(header, fd, feat_sec, sec_size); 2488 if (err < 0) 2489 goto out_free; 2490 2491 for_each_set_bit(feat, header->adds_features, HEADER_LAST_FEATURE) { 2492 err = process(sec++, header, feat, fd, data); 2493 if (err < 0) 2494 goto out_free; 2495 } 2496 err = 0; 2497 out_free: 2498 free(feat_sec); 2499 return err; 2500 } 2501 2502 static const int attr_file_abi_sizes[] = { 2503 [0] = PERF_ATTR_SIZE_VER0, 2504 [1] = PERF_ATTR_SIZE_VER1, 2505 [2] = PERF_ATTR_SIZE_VER2, 2506 [3] = PERF_ATTR_SIZE_VER3, 2507 [4] = PERF_ATTR_SIZE_VER4, 2508 0, 2509 }; 2510 2511 /* 2512 * In the legacy file format, the magic number is not used to encode endianness. 2513 * hdr_sz was used to encode endianness. But given that hdr_sz can vary based 2514 * on ABI revisions, we need to try all combinations for all endianness to 2515 * detect the endianness. 2516 */ 2517 static int try_all_file_abis(uint64_t hdr_sz, struct perf_header *ph) 2518 { 2519 uint64_t ref_size, attr_size; 2520 int i; 2521 2522 for (i = 0 ; attr_file_abi_sizes[i]; i++) { 2523 ref_size = attr_file_abi_sizes[i] 2524 + sizeof(struct perf_file_section); 2525 if (hdr_sz != ref_size) { 2526 attr_size = bswap_64(hdr_sz); 2527 if (attr_size != ref_size) 2528 continue; 2529 2530 ph->needs_swap = true; 2531 } 2532 pr_debug("ABI%d perf.data file detected, need_swap=%d\n", 2533 i, 2534 ph->needs_swap); 2535 return 0; 2536 } 2537 /* could not determine endianness */ 2538 return -1; 2539 } 2540 2541 #define PERF_PIPE_HDR_VER0 16 2542 2543 static const size_t attr_pipe_abi_sizes[] = { 2544 [0] = PERF_PIPE_HDR_VER0, 2545 0, 2546 }; 2547 2548 /* 2549 * In the legacy pipe format, there is an implicit assumption that endiannesss 2550 * between host recording the samples, and host parsing the samples is the 2551 * same. This is not always the case given that the pipe output may always be 2552 * redirected into a file and analyzed on a different machine with possibly a 2553 * different endianness and perf_event ABI revsions in the perf tool itself. 2554 */ 2555 static int try_all_pipe_abis(uint64_t hdr_sz, struct perf_header *ph) 2556 { 2557 u64 attr_size; 2558 int i; 2559 2560 for (i = 0 ; attr_pipe_abi_sizes[i]; i++) { 2561 if (hdr_sz != attr_pipe_abi_sizes[i]) { 2562 attr_size = bswap_64(hdr_sz); 2563 if (attr_size != hdr_sz) 2564 continue; 2565 2566 ph->needs_swap = true; 2567 } 2568 pr_debug("Pipe ABI%d perf.data file detected\n", i); 2569 return 0; 2570 } 2571 return -1; 2572 } 2573 2574 bool is_perf_magic(u64 magic) 2575 { 2576 if (!memcmp(&magic, __perf_magic1, sizeof(magic)) 2577 || magic == __perf_magic2 2578 || magic == __perf_magic2_sw) 2579 return true; 2580 2581 return false; 2582 } 2583 2584 static int check_magic_endian(u64 magic, uint64_t hdr_sz, 2585 bool is_pipe, struct perf_header *ph) 2586 { 2587 int ret; 2588 2589 /* check for legacy format */ 2590 ret = memcmp(&magic, __perf_magic1, sizeof(magic)); 2591 if (ret == 0) { 2592 ph->version = PERF_HEADER_VERSION_1; 2593 pr_debug("legacy perf.data format\n"); 2594 if (is_pipe) 2595 return try_all_pipe_abis(hdr_sz, ph); 2596 2597 return try_all_file_abis(hdr_sz, ph); 2598 } 2599 /* 2600 * the new magic number serves two purposes: 2601 * - unique number to identify actual perf.data files 2602 * - encode endianness of file 2603 */ 2604 ph->version = PERF_HEADER_VERSION_2; 2605 2606 /* check magic number with one endianness */ 2607 if (magic == __perf_magic2) 2608 return 0; 2609 2610 /* check magic number with opposite endianness */ 2611 if (magic != __perf_magic2_sw) 2612 return -1; 2613 2614 ph->needs_swap = true; 2615 2616 return 0; 2617 } 2618 2619 int perf_file_header__read(struct perf_file_header *header, 2620 struct perf_header *ph, int fd) 2621 { 2622 ssize_t ret; 2623 2624 lseek(fd, 0, SEEK_SET); 2625 2626 ret = readn(fd, header, sizeof(*header)); 2627 if (ret <= 0) 2628 return -1; 2629 2630 if (check_magic_endian(header->magic, 2631 header->attr_size, false, ph) < 0) { 2632 pr_debug("magic/endian check failed\n"); 2633 return -1; 2634 } 2635 2636 if (ph->needs_swap) { 2637 mem_bswap_64(header, offsetof(struct perf_file_header, 2638 adds_features)); 2639 } 2640 2641 if (header->size != sizeof(*header)) { 2642 /* Support the previous format */ 2643 if (header->size == offsetof(typeof(*header), adds_features)) 2644 bitmap_zero(header->adds_features, HEADER_FEAT_BITS); 2645 else 2646 return -1; 2647 } else if (ph->needs_swap) { 2648 /* 2649 * feature bitmap is declared as an array of unsigned longs -- 2650 * not good since its size can differ between the host that 2651 * generated the data file and the host analyzing the file. 2652 * 2653 * We need to handle endianness, but we don't know the size of 2654 * the unsigned long where the file was generated. Take a best 2655 * guess at determining it: try 64-bit swap first (ie., file 2656 * created on a 64-bit host), and check if the hostname feature 2657 * bit is set (this feature bit is forced on as of fbe96f2). 2658 * If the bit is not, undo the 64-bit swap and try a 32-bit 2659 * swap. If the hostname bit is still not set (e.g., older data 2660 * file), punt and fallback to the original behavior -- 2661 * clearing all feature bits and setting buildid. 2662 */ 2663 mem_bswap_64(&header->adds_features, 2664 BITS_TO_U64(HEADER_FEAT_BITS)); 2665 2666 if (!test_bit(HEADER_HOSTNAME, header->adds_features)) { 2667 /* unswap as u64 */ 2668 mem_bswap_64(&header->adds_features, 2669 BITS_TO_U64(HEADER_FEAT_BITS)); 2670 2671 /* unswap as u32 */ 2672 mem_bswap_32(&header->adds_features, 2673 BITS_TO_U32(HEADER_FEAT_BITS)); 2674 } 2675 2676 if (!test_bit(HEADER_HOSTNAME, header->adds_features)) { 2677 bitmap_zero(header->adds_features, HEADER_FEAT_BITS); 2678 set_bit(HEADER_BUILD_ID, header->adds_features); 2679 } 2680 } 2681 2682 memcpy(&ph->adds_features, &header->adds_features, 2683 sizeof(ph->adds_features)); 2684 2685 ph->data_offset = header->data.offset; 2686 ph->data_size = header->data.size; 2687 ph->feat_offset = header->data.offset + header->data.size; 2688 return 0; 2689 } 2690 2691 static int perf_file_section__process(struct perf_file_section *section, 2692 struct perf_header *ph, 2693 int feat, int fd, void *data) 2694 { 2695 if (lseek(fd, section->offset, SEEK_SET) == (off_t)-1) { 2696 pr_debug("Failed to lseek to %" PRIu64 " offset for feature " 2697 "%d, continuing...\n", section->offset, feat); 2698 return 0; 2699 } 2700 2701 if (feat >= HEADER_LAST_FEATURE) { 2702 pr_debug("unknown feature %d, continuing...\n", feat); 2703 return 0; 2704 } 2705 2706 if (!feat_ops[feat].process) 2707 return 0; 2708 2709 return feat_ops[feat].process(section, ph, fd, data); 2710 } 2711 2712 static int perf_file_header__read_pipe(struct perf_pipe_file_header *header, 2713 struct perf_header *ph, int fd, 2714 bool repipe) 2715 { 2716 ssize_t ret; 2717 2718 ret = readn(fd, header, sizeof(*header)); 2719 if (ret <= 0) 2720 return -1; 2721 2722 if (check_magic_endian(header->magic, header->size, true, ph) < 0) { 2723 pr_debug("endian/magic failed\n"); 2724 return -1; 2725 } 2726 2727 if (ph->needs_swap) 2728 header->size = bswap_64(header->size); 2729 2730 if (repipe && do_write(STDOUT_FILENO, header, sizeof(*header)) < 0) 2731 return -1; 2732 2733 return 0; 2734 } 2735 2736 static int perf_header__read_pipe(struct perf_session *session) 2737 { 2738 struct perf_header *header = &session->header; 2739 struct perf_pipe_file_header f_header; 2740 2741 if (perf_file_header__read_pipe(&f_header, header, 2742 perf_data_file__fd(session->file), 2743 session->repipe) < 0) { 2744 pr_debug("incompatible file format\n"); 2745 return -EINVAL; 2746 } 2747 2748 return 0; 2749 } 2750 2751 static int read_attr(int fd, struct perf_header *ph, 2752 struct perf_file_attr *f_attr) 2753 { 2754 struct perf_event_attr *attr = &f_attr->attr; 2755 size_t sz, left; 2756 size_t our_sz = sizeof(f_attr->attr); 2757 ssize_t ret; 2758 2759 memset(f_attr, 0, sizeof(*f_attr)); 2760 2761 /* read minimal guaranteed structure */ 2762 ret = readn(fd, attr, PERF_ATTR_SIZE_VER0); 2763 if (ret <= 0) { 2764 pr_debug("cannot read %d bytes of header attr\n", 2765 PERF_ATTR_SIZE_VER0); 2766 return -1; 2767 } 2768 2769 /* on file perf_event_attr size */ 2770 sz = attr->size; 2771 2772 if (ph->needs_swap) 2773 sz = bswap_32(sz); 2774 2775 if (sz == 0) { 2776 /* assume ABI0 */ 2777 sz = PERF_ATTR_SIZE_VER0; 2778 } else if (sz > our_sz) { 2779 pr_debug("file uses a more recent and unsupported ABI" 2780 " (%zu bytes extra)\n", sz - our_sz); 2781 return -1; 2782 } 2783 /* what we have not yet read and that we know about */ 2784 left = sz - PERF_ATTR_SIZE_VER0; 2785 if (left) { 2786 void *ptr = attr; 2787 ptr += PERF_ATTR_SIZE_VER0; 2788 2789 ret = readn(fd, ptr, left); 2790 } 2791 /* read perf_file_section, ids are read in caller */ 2792 ret = readn(fd, &f_attr->ids, sizeof(f_attr->ids)); 2793 2794 return ret <= 0 ? -1 : 0; 2795 } 2796 2797 static int perf_evsel__prepare_tracepoint_event(struct perf_evsel *evsel, 2798 struct pevent *pevent) 2799 { 2800 struct event_format *event; 2801 char bf[128]; 2802 2803 /* already prepared */ 2804 if (evsel->tp_format) 2805 return 0; 2806 2807 if (pevent == NULL) { 2808 pr_debug("broken or missing trace data\n"); 2809 return -1; 2810 } 2811 2812 event = pevent_find_event(pevent, evsel->attr.config); 2813 if (event == NULL) { 2814 pr_debug("cannot find event format for %d\n", (int)evsel->attr.config); 2815 return -1; 2816 } 2817 2818 if (!evsel->name) { 2819 snprintf(bf, sizeof(bf), "%s:%s", event->system, event->name); 2820 evsel->name = strdup(bf); 2821 if (evsel->name == NULL) 2822 return -1; 2823 } 2824 2825 evsel->tp_format = event; 2826 return 0; 2827 } 2828 2829 static int perf_evlist__prepare_tracepoint_events(struct perf_evlist *evlist, 2830 struct pevent *pevent) 2831 { 2832 struct perf_evsel *pos; 2833 2834 evlist__for_each_entry(evlist, pos) { 2835 if (pos->attr.type == PERF_TYPE_TRACEPOINT && 2836 perf_evsel__prepare_tracepoint_event(pos, pevent)) 2837 return -1; 2838 } 2839 2840 return 0; 2841 } 2842 2843 int perf_session__read_header(struct perf_session *session) 2844 { 2845 struct perf_data_file *file = session->file; 2846 struct perf_header *header = &session->header; 2847 struct perf_file_header f_header; 2848 struct perf_file_attr f_attr; 2849 u64 f_id; 2850 int nr_attrs, nr_ids, i, j; 2851 int fd = perf_data_file__fd(file); 2852 2853 session->evlist = perf_evlist__new(); 2854 if (session->evlist == NULL) 2855 return -ENOMEM; 2856 2857 session->evlist->env = &header->env; 2858 session->machines.host.env = &header->env; 2859 if (perf_data_file__is_pipe(file)) 2860 return perf_header__read_pipe(session); 2861 2862 if (perf_file_header__read(&f_header, header, fd) < 0) 2863 return -EINVAL; 2864 2865 /* 2866 * Sanity check that perf.data was written cleanly; data size is 2867 * initialized to 0 and updated only if the on_exit function is run. 2868 * If data size is still 0 then the file contains only partial 2869 * information. Just warn user and process it as much as it can. 2870 */ 2871 if (f_header.data.size == 0) { 2872 pr_warning("WARNING: The %s file's data size field is 0 which is unexpected.\n" 2873 "Was the 'perf record' command properly terminated?\n", 2874 file->path); 2875 } 2876 2877 nr_attrs = f_header.attrs.size / f_header.attr_size; 2878 lseek(fd, f_header.attrs.offset, SEEK_SET); 2879 2880 for (i = 0; i < nr_attrs; i++) { 2881 struct perf_evsel *evsel; 2882 off_t tmp; 2883 2884 if (read_attr(fd, header, &f_attr) < 0) 2885 goto out_errno; 2886 2887 if (header->needs_swap) { 2888 f_attr.ids.size = bswap_64(f_attr.ids.size); 2889 f_attr.ids.offset = bswap_64(f_attr.ids.offset); 2890 perf_event__attr_swap(&f_attr.attr); 2891 } 2892 2893 tmp = lseek(fd, 0, SEEK_CUR); 2894 evsel = perf_evsel__new(&f_attr.attr); 2895 2896 if (evsel == NULL) 2897 goto out_delete_evlist; 2898 2899 evsel->needs_swap = header->needs_swap; 2900 /* 2901 * Do it before so that if perf_evsel__alloc_id fails, this 2902 * entry gets purged too at perf_evlist__delete(). 2903 */ 2904 perf_evlist__add(session->evlist, evsel); 2905 2906 nr_ids = f_attr.ids.size / sizeof(u64); 2907 /* 2908 * We don't have the cpu and thread maps on the header, so 2909 * for allocating the perf_sample_id table we fake 1 cpu and 2910 * hattr->ids threads. 2911 */ 2912 if (perf_evsel__alloc_id(evsel, 1, nr_ids)) 2913 goto out_delete_evlist; 2914 2915 lseek(fd, f_attr.ids.offset, SEEK_SET); 2916 2917 for (j = 0; j < nr_ids; j++) { 2918 if (perf_header__getbuffer64(header, fd, &f_id, sizeof(f_id))) 2919 goto out_errno; 2920 2921 perf_evlist__id_add(session->evlist, evsel, 0, j, f_id); 2922 } 2923 2924 lseek(fd, tmp, SEEK_SET); 2925 } 2926 2927 symbol_conf.nr_events = nr_attrs; 2928 2929 perf_header__process_sections(header, fd, &session->tevent, 2930 perf_file_section__process); 2931 2932 if (perf_evlist__prepare_tracepoint_events(session->evlist, 2933 session->tevent.pevent)) 2934 goto out_delete_evlist; 2935 2936 return 0; 2937 out_errno: 2938 return -errno; 2939 2940 out_delete_evlist: 2941 perf_evlist__delete(session->evlist); 2942 session->evlist = NULL; 2943 return -ENOMEM; 2944 } 2945 2946 int perf_event__synthesize_attr(struct perf_tool *tool, 2947 struct perf_event_attr *attr, u32 ids, u64 *id, 2948 perf_event__handler_t process) 2949 { 2950 union perf_event *ev; 2951 size_t size; 2952 int err; 2953 2954 size = sizeof(struct perf_event_attr); 2955 size = PERF_ALIGN(size, sizeof(u64)); 2956 size += sizeof(struct perf_event_header); 2957 size += ids * sizeof(u64); 2958 2959 ev = malloc(size); 2960 2961 if (ev == NULL) 2962 return -ENOMEM; 2963 2964 ev->attr.attr = *attr; 2965 memcpy(ev->attr.id, id, ids * sizeof(u64)); 2966 2967 ev->attr.header.type = PERF_RECORD_HEADER_ATTR; 2968 ev->attr.header.size = (u16)size; 2969 2970 if (ev->attr.header.size == size) 2971 err = process(tool, ev, NULL, NULL); 2972 else 2973 err = -E2BIG; 2974 2975 free(ev); 2976 2977 return err; 2978 } 2979 2980 static struct event_update_event * 2981 event_update_event__new(size_t size, u64 type, u64 id) 2982 { 2983 struct event_update_event *ev; 2984 2985 size += sizeof(*ev); 2986 size = PERF_ALIGN(size, sizeof(u64)); 2987 2988 ev = zalloc(size); 2989 if (ev) { 2990 ev->header.type = PERF_RECORD_EVENT_UPDATE; 2991 ev->header.size = (u16)size; 2992 ev->type = type; 2993 ev->id = id; 2994 } 2995 return ev; 2996 } 2997 2998 int 2999 perf_event__synthesize_event_update_unit(struct perf_tool *tool, 3000 struct perf_evsel *evsel, 3001 perf_event__handler_t process) 3002 { 3003 struct event_update_event *ev; 3004 size_t size = strlen(evsel->unit); 3005 int err; 3006 3007 ev = event_update_event__new(size + 1, PERF_EVENT_UPDATE__UNIT, evsel->id[0]); 3008 if (ev == NULL) 3009 return -ENOMEM; 3010 3011 strncpy(ev->data, evsel->unit, size); 3012 err = process(tool, (union perf_event *)ev, NULL, NULL); 3013 free(ev); 3014 return err; 3015 } 3016 3017 int 3018 perf_event__synthesize_event_update_scale(struct perf_tool *tool, 3019 struct perf_evsel *evsel, 3020 perf_event__handler_t process) 3021 { 3022 struct event_update_event *ev; 3023 struct event_update_event_scale *ev_data; 3024 int err; 3025 3026 ev = event_update_event__new(sizeof(*ev_data), PERF_EVENT_UPDATE__SCALE, evsel->id[0]); 3027 if (ev == NULL) 3028 return -ENOMEM; 3029 3030 ev_data = (struct event_update_event_scale *) ev->data; 3031 ev_data->scale = evsel->scale; 3032 err = process(tool, (union perf_event*) ev, NULL, NULL); 3033 free(ev); 3034 return err; 3035 } 3036 3037 int 3038 perf_event__synthesize_event_update_name(struct perf_tool *tool, 3039 struct perf_evsel *evsel, 3040 perf_event__handler_t process) 3041 { 3042 struct event_update_event *ev; 3043 size_t len = strlen(evsel->name); 3044 int err; 3045 3046 ev = event_update_event__new(len + 1, PERF_EVENT_UPDATE__NAME, evsel->id[0]); 3047 if (ev == NULL) 3048 return -ENOMEM; 3049 3050 strncpy(ev->data, evsel->name, len); 3051 err = process(tool, (union perf_event*) ev, NULL, NULL); 3052 free(ev); 3053 return err; 3054 } 3055 3056 int 3057 perf_event__synthesize_event_update_cpus(struct perf_tool *tool, 3058 struct perf_evsel *evsel, 3059 perf_event__handler_t process) 3060 { 3061 size_t size = sizeof(struct event_update_event); 3062 struct event_update_event *ev; 3063 int max, err; 3064 u16 type; 3065 3066 if (!evsel->own_cpus) 3067 return 0; 3068 3069 ev = cpu_map_data__alloc(evsel->own_cpus, &size, &type, &max); 3070 if (!ev) 3071 return -ENOMEM; 3072 3073 ev->header.type = PERF_RECORD_EVENT_UPDATE; 3074 ev->header.size = (u16)size; 3075 ev->type = PERF_EVENT_UPDATE__CPUS; 3076 ev->id = evsel->id[0]; 3077 3078 cpu_map_data__synthesize((struct cpu_map_data *) ev->data, 3079 evsel->own_cpus, 3080 type, max); 3081 3082 err = process(tool, (union perf_event*) ev, NULL, NULL); 3083 free(ev); 3084 return err; 3085 } 3086 3087 size_t perf_event__fprintf_event_update(union perf_event *event, FILE *fp) 3088 { 3089 struct event_update_event *ev = &event->event_update; 3090 struct event_update_event_scale *ev_scale; 3091 struct event_update_event_cpus *ev_cpus; 3092 struct cpu_map *map; 3093 size_t ret; 3094 3095 ret = fprintf(fp, "\n... id: %" PRIu64 "\n", ev->id); 3096 3097 switch (ev->type) { 3098 case PERF_EVENT_UPDATE__SCALE: 3099 ev_scale = (struct event_update_event_scale *) ev->data; 3100 ret += fprintf(fp, "... scale: %f\n", ev_scale->scale); 3101 break; 3102 case PERF_EVENT_UPDATE__UNIT: 3103 ret += fprintf(fp, "... unit: %s\n", ev->data); 3104 break; 3105 case PERF_EVENT_UPDATE__NAME: 3106 ret += fprintf(fp, "... name: %s\n", ev->data); 3107 break; 3108 case PERF_EVENT_UPDATE__CPUS: 3109 ev_cpus = (struct event_update_event_cpus *) ev->data; 3110 ret += fprintf(fp, "... "); 3111 3112 map = cpu_map__new_data(&ev_cpus->cpus); 3113 if (map) 3114 ret += cpu_map__fprintf(map, fp); 3115 else 3116 ret += fprintf(fp, "failed to get cpus\n"); 3117 break; 3118 default: 3119 ret += fprintf(fp, "... unknown type\n"); 3120 break; 3121 } 3122 3123 return ret; 3124 } 3125 3126 int perf_event__synthesize_attrs(struct perf_tool *tool, 3127 struct perf_session *session, 3128 perf_event__handler_t process) 3129 { 3130 struct perf_evsel *evsel; 3131 int err = 0; 3132 3133 evlist__for_each_entry(session->evlist, evsel) { 3134 err = perf_event__synthesize_attr(tool, &evsel->attr, evsel->ids, 3135 evsel->id, process); 3136 if (err) { 3137 pr_debug("failed to create perf header attribute\n"); 3138 return err; 3139 } 3140 } 3141 3142 return err; 3143 } 3144 3145 int perf_event__process_attr(struct perf_tool *tool __maybe_unused, 3146 union perf_event *event, 3147 struct perf_evlist **pevlist) 3148 { 3149 u32 i, ids, n_ids; 3150 struct perf_evsel *evsel; 3151 struct perf_evlist *evlist = *pevlist; 3152 3153 if (evlist == NULL) { 3154 *pevlist = evlist = perf_evlist__new(); 3155 if (evlist == NULL) 3156 return -ENOMEM; 3157 } 3158 3159 evsel = perf_evsel__new(&event->attr.attr); 3160 if (evsel == NULL) 3161 return -ENOMEM; 3162 3163 perf_evlist__add(evlist, evsel); 3164 3165 ids = event->header.size; 3166 ids -= (void *)&event->attr.id - (void *)event; 3167 n_ids = ids / sizeof(u64); 3168 /* 3169 * We don't have the cpu and thread maps on the header, so 3170 * for allocating the perf_sample_id table we fake 1 cpu and 3171 * hattr->ids threads. 3172 */ 3173 if (perf_evsel__alloc_id(evsel, 1, n_ids)) 3174 return -ENOMEM; 3175 3176 for (i = 0; i < n_ids; i++) { 3177 perf_evlist__id_add(evlist, evsel, 0, i, event->attr.id[i]); 3178 } 3179 3180 symbol_conf.nr_events = evlist->nr_entries; 3181 3182 return 0; 3183 } 3184 3185 int perf_event__process_event_update(struct perf_tool *tool __maybe_unused, 3186 union perf_event *event, 3187 struct perf_evlist **pevlist) 3188 { 3189 struct event_update_event *ev = &event->event_update; 3190 struct event_update_event_scale *ev_scale; 3191 struct event_update_event_cpus *ev_cpus; 3192 struct perf_evlist *evlist; 3193 struct perf_evsel *evsel; 3194 struct cpu_map *map; 3195 3196 if (!pevlist || *pevlist == NULL) 3197 return -EINVAL; 3198 3199 evlist = *pevlist; 3200 3201 evsel = perf_evlist__id2evsel(evlist, ev->id); 3202 if (evsel == NULL) 3203 return -EINVAL; 3204 3205 switch (ev->type) { 3206 case PERF_EVENT_UPDATE__UNIT: 3207 evsel->unit = strdup(ev->data); 3208 break; 3209 case PERF_EVENT_UPDATE__NAME: 3210 evsel->name = strdup(ev->data); 3211 break; 3212 case PERF_EVENT_UPDATE__SCALE: 3213 ev_scale = (struct event_update_event_scale *) ev->data; 3214 evsel->scale = ev_scale->scale; 3215 break; 3216 case PERF_EVENT_UPDATE__CPUS: 3217 ev_cpus = (struct event_update_event_cpus *) ev->data; 3218 3219 map = cpu_map__new_data(&ev_cpus->cpus); 3220 if (map) 3221 evsel->own_cpus = map; 3222 else 3223 pr_err("failed to get event_update cpus\n"); 3224 default: 3225 break; 3226 } 3227 3228 return 0; 3229 } 3230 3231 int perf_event__synthesize_tracing_data(struct perf_tool *tool, int fd, 3232 struct perf_evlist *evlist, 3233 perf_event__handler_t process) 3234 { 3235 union perf_event ev; 3236 struct tracing_data *tdata; 3237 ssize_t size = 0, aligned_size = 0, padding; 3238 int err __maybe_unused = 0; 3239 3240 /* 3241 * We are going to store the size of the data followed 3242 * by the data contents. Since the fd descriptor is a pipe, 3243 * we cannot seek back to store the size of the data once 3244 * we know it. Instead we: 3245 * 3246 * - write the tracing data to the temp file 3247 * - get/write the data size to pipe 3248 * - write the tracing data from the temp file 3249 * to the pipe 3250 */ 3251 tdata = tracing_data_get(&evlist->entries, fd, true); 3252 if (!tdata) 3253 return -1; 3254 3255 memset(&ev, 0, sizeof(ev)); 3256 3257 ev.tracing_data.header.type = PERF_RECORD_HEADER_TRACING_DATA; 3258 size = tdata->size; 3259 aligned_size = PERF_ALIGN(size, sizeof(u64)); 3260 padding = aligned_size - size; 3261 ev.tracing_data.header.size = sizeof(ev.tracing_data); 3262 ev.tracing_data.size = aligned_size; 3263 3264 process(tool, &ev, NULL, NULL); 3265 3266 /* 3267 * The put function will copy all the tracing data 3268 * stored in temp file to the pipe. 3269 */ 3270 tracing_data_put(tdata); 3271 3272 write_padded(fd, NULL, 0, padding); 3273 3274 return aligned_size; 3275 } 3276 3277 int perf_event__process_tracing_data(struct perf_tool *tool __maybe_unused, 3278 union perf_event *event, 3279 struct perf_session *session) 3280 { 3281 ssize_t size_read, padding, size = event->tracing_data.size; 3282 int fd = perf_data_file__fd(session->file); 3283 off_t offset = lseek(fd, 0, SEEK_CUR); 3284 char buf[BUFSIZ]; 3285 3286 /* setup for reading amidst mmap */ 3287 lseek(fd, offset + sizeof(struct tracing_data_event), 3288 SEEK_SET); 3289 3290 size_read = trace_report(fd, &session->tevent, 3291 session->repipe); 3292 padding = PERF_ALIGN(size_read, sizeof(u64)) - size_read; 3293 3294 if (readn(fd, buf, padding) < 0) { 3295 pr_err("%s: reading input file", __func__); 3296 return -1; 3297 } 3298 if (session->repipe) { 3299 int retw = write(STDOUT_FILENO, buf, padding); 3300 if (retw <= 0 || retw != padding) { 3301 pr_err("%s: repiping tracing data padding", __func__); 3302 return -1; 3303 } 3304 } 3305 3306 if (size_read + padding != size) { 3307 pr_err("%s: tracing data size mismatch", __func__); 3308 return -1; 3309 } 3310 3311 perf_evlist__prepare_tracepoint_events(session->evlist, 3312 session->tevent.pevent); 3313 3314 return size_read + padding; 3315 } 3316 3317 int perf_event__synthesize_build_id(struct perf_tool *tool, 3318 struct dso *pos, u16 misc, 3319 perf_event__handler_t process, 3320 struct machine *machine) 3321 { 3322 union perf_event ev; 3323 size_t len; 3324 int err = 0; 3325 3326 if (!pos->hit) 3327 return err; 3328 3329 memset(&ev, 0, sizeof(ev)); 3330 3331 len = pos->long_name_len + 1; 3332 len = PERF_ALIGN(len, NAME_ALIGN); 3333 memcpy(&ev.build_id.build_id, pos->build_id, sizeof(pos->build_id)); 3334 ev.build_id.header.type = PERF_RECORD_HEADER_BUILD_ID; 3335 ev.build_id.header.misc = misc; 3336 ev.build_id.pid = machine->pid; 3337 ev.build_id.header.size = sizeof(ev.build_id) + len; 3338 memcpy(&ev.build_id.filename, pos->long_name, pos->long_name_len); 3339 3340 err = process(tool, &ev, NULL, machine); 3341 3342 return err; 3343 } 3344 3345 int perf_event__process_build_id(struct perf_tool *tool __maybe_unused, 3346 union perf_event *event, 3347 struct perf_session *session) 3348 { 3349 __event_process_build_id(&event->build_id, 3350 event->build_id.filename, 3351 session); 3352 return 0; 3353 } 3354