xref: /openbmc/linux/tools/perf/util/header.c (revision a977d045)
1 #include <errno.h>
2 #include <inttypes.h>
3 #include "util.h"
4 #include "string2.h"
5 #include <sys/param.h>
6 #include <sys/types.h>
7 #include <byteswap.h>
8 #include <unistd.h>
9 #include <stdio.h>
10 #include <stdlib.h>
11 #include <linux/compiler.h>
12 #include <linux/list.h>
13 #include <linux/kernel.h>
14 #include <linux/bitops.h>
15 #include <sys/stat.h>
16 #include <sys/types.h>
17 #include <sys/utsname.h>
18 #include <unistd.h>
19 
20 #include "evlist.h"
21 #include "evsel.h"
22 #include "header.h"
23 #include "memswap.h"
24 #include "../perf.h"
25 #include "trace-event.h"
26 #include "session.h"
27 #include "symbol.h"
28 #include "debug.h"
29 #include "cpumap.h"
30 #include "pmu.h"
31 #include "vdso.h"
32 #include "strbuf.h"
33 #include "build-id.h"
34 #include "data.h"
35 #include <api/fs/fs.h>
36 #include "asm/bug.h"
37 
38 #include "sane_ctype.h"
39 
40 /*
41  * magic2 = "PERFILE2"
42  * must be a numerical value to let the endianness
43  * determine the memory layout. That way we are able
44  * to detect endianness when reading the perf.data file
45  * back.
46  *
47  * we check for legacy (PERFFILE) format.
48  */
49 static const char *__perf_magic1 = "PERFFILE";
50 static const u64 __perf_magic2    = 0x32454c4946524550ULL;
51 static const u64 __perf_magic2_sw = 0x50455246494c4532ULL;
52 
53 #define PERF_MAGIC	__perf_magic2
54 
55 const char perf_version_string[] = PERF_VERSION;
56 
57 struct perf_file_attr {
58 	struct perf_event_attr	attr;
59 	struct perf_file_section	ids;
60 };
61 
62 void perf_header__set_feat(struct perf_header *header, int feat)
63 {
64 	set_bit(feat, header->adds_features);
65 }
66 
67 void perf_header__clear_feat(struct perf_header *header, int feat)
68 {
69 	clear_bit(feat, header->adds_features);
70 }
71 
72 bool perf_header__has_feat(const struct perf_header *header, int feat)
73 {
74 	return test_bit(feat, header->adds_features);
75 }
76 
77 static int do_write(int fd, const void *buf, size_t size)
78 {
79 	while (size) {
80 		int ret = write(fd, buf, size);
81 
82 		if (ret < 0)
83 			return -errno;
84 
85 		size -= ret;
86 		buf += ret;
87 	}
88 
89 	return 0;
90 }
91 
92 int write_padded(int fd, const void *bf, size_t count, size_t count_aligned)
93 {
94 	static const char zero_buf[NAME_ALIGN];
95 	int err = do_write(fd, bf, count);
96 
97 	if (!err)
98 		err = do_write(fd, zero_buf, count_aligned - count);
99 
100 	return err;
101 }
102 
103 #define string_size(str)						\
104 	(PERF_ALIGN((strlen(str) + 1), NAME_ALIGN) + sizeof(u32))
105 
106 static int do_write_string(int fd, const char *str)
107 {
108 	u32 len, olen;
109 	int ret;
110 
111 	olen = strlen(str) + 1;
112 	len = PERF_ALIGN(olen, NAME_ALIGN);
113 
114 	/* write len, incl. \0 */
115 	ret = do_write(fd, &len, sizeof(len));
116 	if (ret < 0)
117 		return ret;
118 
119 	return write_padded(fd, str, olen, len);
120 }
121 
122 static char *do_read_string(int fd, struct perf_header *ph)
123 {
124 	ssize_t sz, ret;
125 	u32 len;
126 	char *buf;
127 
128 	sz = readn(fd, &len, sizeof(len));
129 	if (sz < (ssize_t)sizeof(len))
130 		return NULL;
131 
132 	if (ph->needs_swap)
133 		len = bswap_32(len);
134 
135 	buf = malloc(len);
136 	if (!buf)
137 		return NULL;
138 
139 	ret = readn(fd, buf, len);
140 	if (ret == (ssize_t)len) {
141 		/*
142 		 * strings are padded by zeroes
143 		 * thus the actual strlen of buf
144 		 * may be less than len
145 		 */
146 		return buf;
147 	}
148 
149 	free(buf);
150 	return NULL;
151 }
152 
153 static int write_tracing_data(int fd, struct perf_header *h __maybe_unused,
154 			    struct perf_evlist *evlist)
155 {
156 	return read_tracing_data(fd, &evlist->entries);
157 }
158 
159 
160 static int write_build_id(int fd, struct perf_header *h,
161 			  struct perf_evlist *evlist __maybe_unused)
162 {
163 	struct perf_session *session;
164 	int err;
165 
166 	session = container_of(h, struct perf_session, header);
167 
168 	if (!perf_session__read_build_ids(session, true))
169 		return -1;
170 
171 	err = perf_session__write_buildid_table(session, fd);
172 	if (err < 0) {
173 		pr_debug("failed to write buildid table\n");
174 		return err;
175 	}
176 	perf_session__cache_build_ids(session);
177 
178 	return 0;
179 }
180 
181 static int write_hostname(int fd, struct perf_header *h __maybe_unused,
182 			  struct perf_evlist *evlist __maybe_unused)
183 {
184 	struct utsname uts;
185 	int ret;
186 
187 	ret = uname(&uts);
188 	if (ret < 0)
189 		return -1;
190 
191 	return do_write_string(fd, uts.nodename);
192 }
193 
194 static int write_osrelease(int fd, struct perf_header *h __maybe_unused,
195 			   struct perf_evlist *evlist __maybe_unused)
196 {
197 	struct utsname uts;
198 	int ret;
199 
200 	ret = uname(&uts);
201 	if (ret < 0)
202 		return -1;
203 
204 	return do_write_string(fd, uts.release);
205 }
206 
207 static int write_arch(int fd, struct perf_header *h __maybe_unused,
208 		      struct perf_evlist *evlist __maybe_unused)
209 {
210 	struct utsname uts;
211 	int ret;
212 
213 	ret = uname(&uts);
214 	if (ret < 0)
215 		return -1;
216 
217 	return do_write_string(fd, uts.machine);
218 }
219 
220 static int write_version(int fd, struct perf_header *h __maybe_unused,
221 			 struct perf_evlist *evlist __maybe_unused)
222 {
223 	return do_write_string(fd, perf_version_string);
224 }
225 
226 static int __write_cpudesc(int fd, const char *cpuinfo_proc)
227 {
228 	FILE *file;
229 	char *buf = NULL;
230 	char *s, *p;
231 	const char *search = cpuinfo_proc;
232 	size_t len = 0;
233 	int ret = -1;
234 
235 	if (!search)
236 		return -1;
237 
238 	file = fopen("/proc/cpuinfo", "r");
239 	if (!file)
240 		return -1;
241 
242 	while (getline(&buf, &len, file) > 0) {
243 		ret = strncmp(buf, search, strlen(search));
244 		if (!ret)
245 			break;
246 	}
247 
248 	if (ret) {
249 		ret = -1;
250 		goto done;
251 	}
252 
253 	s = buf;
254 
255 	p = strchr(buf, ':');
256 	if (p && *(p+1) == ' ' && *(p+2))
257 		s = p + 2;
258 	p = strchr(s, '\n');
259 	if (p)
260 		*p = '\0';
261 
262 	/* squash extra space characters (branding string) */
263 	p = s;
264 	while (*p) {
265 		if (isspace(*p)) {
266 			char *r = p + 1;
267 			char *q = r;
268 			*p = ' ';
269 			while (*q && isspace(*q))
270 				q++;
271 			if (q != (p+1))
272 				while ((*r++ = *q++));
273 		}
274 		p++;
275 	}
276 	ret = do_write_string(fd, s);
277 done:
278 	free(buf);
279 	fclose(file);
280 	return ret;
281 }
282 
283 static int write_cpudesc(int fd, struct perf_header *h __maybe_unused,
284 		       struct perf_evlist *evlist __maybe_unused)
285 {
286 #ifndef CPUINFO_PROC
287 #define CPUINFO_PROC {"model name", }
288 #endif
289 	const char *cpuinfo_procs[] = CPUINFO_PROC;
290 	unsigned int i;
291 
292 	for (i = 0; i < ARRAY_SIZE(cpuinfo_procs); i++) {
293 		int ret;
294 		ret = __write_cpudesc(fd, cpuinfo_procs[i]);
295 		if (ret >= 0)
296 			return ret;
297 	}
298 	return -1;
299 }
300 
301 
302 static int write_nrcpus(int fd, struct perf_header *h __maybe_unused,
303 			struct perf_evlist *evlist __maybe_unused)
304 {
305 	long nr;
306 	u32 nrc, nra;
307 	int ret;
308 
309 	nrc = cpu__max_present_cpu();
310 
311 	nr = sysconf(_SC_NPROCESSORS_ONLN);
312 	if (nr < 0)
313 		return -1;
314 
315 	nra = (u32)(nr & UINT_MAX);
316 
317 	ret = do_write(fd, &nrc, sizeof(nrc));
318 	if (ret < 0)
319 		return ret;
320 
321 	return do_write(fd, &nra, sizeof(nra));
322 }
323 
324 static int write_event_desc(int fd, struct perf_header *h __maybe_unused,
325 			    struct perf_evlist *evlist)
326 {
327 	struct perf_evsel *evsel;
328 	u32 nre, nri, sz;
329 	int ret;
330 
331 	nre = evlist->nr_entries;
332 
333 	/*
334 	 * write number of events
335 	 */
336 	ret = do_write(fd, &nre, sizeof(nre));
337 	if (ret < 0)
338 		return ret;
339 
340 	/*
341 	 * size of perf_event_attr struct
342 	 */
343 	sz = (u32)sizeof(evsel->attr);
344 	ret = do_write(fd, &sz, sizeof(sz));
345 	if (ret < 0)
346 		return ret;
347 
348 	evlist__for_each_entry(evlist, evsel) {
349 		ret = do_write(fd, &evsel->attr, sz);
350 		if (ret < 0)
351 			return ret;
352 		/*
353 		 * write number of unique id per event
354 		 * there is one id per instance of an event
355 		 *
356 		 * copy into an nri to be independent of the
357 		 * type of ids,
358 		 */
359 		nri = evsel->ids;
360 		ret = do_write(fd, &nri, sizeof(nri));
361 		if (ret < 0)
362 			return ret;
363 
364 		/*
365 		 * write event string as passed on cmdline
366 		 */
367 		ret = do_write_string(fd, perf_evsel__name(evsel));
368 		if (ret < 0)
369 			return ret;
370 		/*
371 		 * write unique ids for this event
372 		 */
373 		ret = do_write(fd, evsel->id, evsel->ids * sizeof(u64));
374 		if (ret < 0)
375 			return ret;
376 	}
377 	return 0;
378 }
379 
380 static int write_cmdline(int fd, struct perf_header *h __maybe_unused,
381 			 struct perf_evlist *evlist __maybe_unused)
382 {
383 	char buf[MAXPATHLEN];
384 	u32 n;
385 	int i, ret;
386 
387 	/* actual path to perf binary */
388 	ret = readlink("/proc/self/exe", buf, sizeof(buf) - 1);
389 	if (ret <= 0)
390 		return -1;
391 
392 	/* readlink() does not add null termination */
393 	buf[ret] = '\0';
394 
395 	/* account for binary path */
396 	n = perf_env.nr_cmdline + 1;
397 
398 	ret = do_write(fd, &n, sizeof(n));
399 	if (ret < 0)
400 		return ret;
401 
402 	ret = do_write_string(fd, buf);
403 	if (ret < 0)
404 		return ret;
405 
406 	for (i = 0 ; i < perf_env.nr_cmdline; i++) {
407 		ret = do_write_string(fd, perf_env.cmdline_argv[i]);
408 		if (ret < 0)
409 			return ret;
410 	}
411 	return 0;
412 }
413 
414 #define CORE_SIB_FMT \
415 	"/sys/devices/system/cpu/cpu%d/topology/core_siblings_list"
416 #define THRD_SIB_FMT \
417 	"/sys/devices/system/cpu/cpu%d/topology/thread_siblings_list"
418 
419 struct cpu_topo {
420 	u32 cpu_nr;
421 	u32 core_sib;
422 	u32 thread_sib;
423 	char **core_siblings;
424 	char **thread_siblings;
425 };
426 
427 static int build_cpu_topo(struct cpu_topo *tp, int cpu)
428 {
429 	FILE *fp;
430 	char filename[MAXPATHLEN];
431 	char *buf = NULL, *p;
432 	size_t len = 0;
433 	ssize_t sret;
434 	u32 i = 0;
435 	int ret = -1;
436 
437 	sprintf(filename, CORE_SIB_FMT, cpu);
438 	fp = fopen(filename, "r");
439 	if (!fp)
440 		goto try_threads;
441 
442 	sret = getline(&buf, &len, fp);
443 	fclose(fp);
444 	if (sret <= 0)
445 		goto try_threads;
446 
447 	p = strchr(buf, '\n');
448 	if (p)
449 		*p = '\0';
450 
451 	for (i = 0; i < tp->core_sib; i++) {
452 		if (!strcmp(buf, tp->core_siblings[i]))
453 			break;
454 	}
455 	if (i == tp->core_sib) {
456 		tp->core_siblings[i] = buf;
457 		tp->core_sib++;
458 		buf = NULL;
459 		len = 0;
460 	}
461 	ret = 0;
462 
463 try_threads:
464 	sprintf(filename, THRD_SIB_FMT, cpu);
465 	fp = fopen(filename, "r");
466 	if (!fp)
467 		goto done;
468 
469 	if (getline(&buf, &len, fp) <= 0)
470 		goto done;
471 
472 	p = strchr(buf, '\n');
473 	if (p)
474 		*p = '\0';
475 
476 	for (i = 0; i < tp->thread_sib; i++) {
477 		if (!strcmp(buf, tp->thread_siblings[i]))
478 			break;
479 	}
480 	if (i == tp->thread_sib) {
481 		tp->thread_siblings[i] = buf;
482 		tp->thread_sib++;
483 		buf = NULL;
484 	}
485 	ret = 0;
486 done:
487 	if(fp)
488 		fclose(fp);
489 	free(buf);
490 	return ret;
491 }
492 
493 static void free_cpu_topo(struct cpu_topo *tp)
494 {
495 	u32 i;
496 
497 	if (!tp)
498 		return;
499 
500 	for (i = 0 ; i < tp->core_sib; i++)
501 		zfree(&tp->core_siblings[i]);
502 
503 	for (i = 0 ; i < tp->thread_sib; i++)
504 		zfree(&tp->thread_siblings[i]);
505 
506 	free(tp);
507 }
508 
509 static struct cpu_topo *build_cpu_topology(void)
510 {
511 	struct cpu_topo *tp = NULL;
512 	void *addr;
513 	u32 nr, i;
514 	size_t sz;
515 	long ncpus;
516 	int ret = -1;
517 	struct cpu_map *map;
518 
519 	ncpus = cpu__max_present_cpu();
520 
521 	/* build online CPU map */
522 	map = cpu_map__new(NULL);
523 	if (map == NULL) {
524 		pr_debug("failed to get system cpumap\n");
525 		return NULL;
526 	}
527 
528 	nr = (u32)(ncpus & UINT_MAX);
529 
530 	sz = nr * sizeof(char *);
531 	addr = calloc(1, sizeof(*tp) + 2 * sz);
532 	if (!addr)
533 		goto out_free;
534 
535 	tp = addr;
536 	tp->cpu_nr = nr;
537 	addr += sizeof(*tp);
538 	tp->core_siblings = addr;
539 	addr += sz;
540 	tp->thread_siblings = addr;
541 
542 	for (i = 0; i < nr; i++) {
543 		if (!cpu_map__has(map, i))
544 			continue;
545 
546 		ret = build_cpu_topo(tp, i);
547 		if (ret < 0)
548 			break;
549 	}
550 
551 out_free:
552 	cpu_map__put(map);
553 	if (ret) {
554 		free_cpu_topo(tp);
555 		tp = NULL;
556 	}
557 	return tp;
558 }
559 
560 static int write_cpu_topology(int fd, struct perf_header *h __maybe_unused,
561 			  struct perf_evlist *evlist __maybe_unused)
562 {
563 	struct cpu_topo *tp;
564 	u32 i;
565 	int ret, j;
566 
567 	tp = build_cpu_topology();
568 	if (!tp)
569 		return -1;
570 
571 	ret = do_write(fd, &tp->core_sib, sizeof(tp->core_sib));
572 	if (ret < 0)
573 		goto done;
574 
575 	for (i = 0; i < tp->core_sib; i++) {
576 		ret = do_write_string(fd, tp->core_siblings[i]);
577 		if (ret < 0)
578 			goto done;
579 	}
580 	ret = do_write(fd, &tp->thread_sib, sizeof(tp->thread_sib));
581 	if (ret < 0)
582 		goto done;
583 
584 	for (i = 0; i < tp->thread_sib; i++) {
585 		ret = do_write_string(fd, tp->thread_siblings[i]);
586 		if (ret < 0)
587 			break;
588 	}
589 
590 	ret = perf_env__read_cpu_topology_map(&perf_env);
591 	if (ret < 0)
592 		goto done;
593 
594 	for (j = 0; j < perf_env.nr_cpus_avail; j++) {
595 		ret = do_write(fd, &perf_env.cpu[j].core_id,
596 			       sizeof(perf_env.cpu[j].core_id));
597 		if (ret < 0)
598 			return ret;
599 		ret = do_write(fd, &perf_env.cpu[j].socket_id,
600 			       sizeof(perf_env.cpu[j].socket_id));
601 		if (ret < 0)
602 			return ret;
603 	}
604 done:
605 	free_cpu_topo(tp);
606 	return ret;
607 }
608 
609 
610 
611 static int write_total_mem(int fd, struct perf_header *h __maybe_unused,
612 			  struct perf_evlist *evlist __maybe_unused)
613 {
614 	char *buf = NULL;
615 	FILE *fp;
616 	size_t len = 0;
617 	int ret = -1, n;
618 	uint64_t mem;
619 
620 	fp = fopen("/proc/meminfo", "r");
621 	if (!fp)
622 		return -1;
623 
624 	while (getline(&buf, &len, fp) > 0) {
625 		ret = strncmp(buf, "MemTotal:", 9);
626 		if (!ret)
627 			break;
628 	}
629 	if (!ret) {
630 		n = sscanf(buf, "%*s %"PRIu64, &mem);
631 		if (n == 1)
632 			ret = do_write(fd, &mem, sizeof(mem));
633 	} else
634 		ret = -1;
635 	free(buf);
636 	fclose(fp);
637 	return ret;
638 }
639 
640 static int write_topo_node(int fd, int node)
641 {
642 	char str[MAXPATHLEN];
643 	char field[32];
644 	char *buf = NULL, *p;
645 	size_t len = 0;
646 	FILE *fp;
647 	u64 mem_total, mem_free, mem;
648 	int ret = -1;
649 
650 	sprintf(str, "/sys/devices/system/node/node%d/meminfo", node);
651 	fp = fopen(str, "r");
652 	if (!fp)
653 		return -1;
654 
655 	while (getline(&buf, &len, fp) > 0) {
656 		/* skip over invalid lines */
657 		if (!strchr(buf, ':'))
658 			continue;
659 		if (sscanf(buf, "%*s %*d %31s %"PRIu64, field, &mem) != 2)
660 			goto done;
661 		if (!strcmp(field, "MemTotal:"))
662 			mem_total = mem;
663 		if (!strcmp(field, "MemFree:"))
664 			mem_free = mem;
665 	}
666 
667 	fclose(fp);
668 	fp = NULL;
669 
670 	ret = do_write(fd, &mem_total, sizeof(u64));
671 	if (ret)
672 		goto done;
673 
674 	ret = do_write(fd, &mem_free, sizeof(u64));
675 	if (ret)
676 		goto done;
677 
678 	ret = -1;
679 	sprintf(str, "/sys/devices/system/node/node%d/cpulist", node);
680 
681 	fp = fopen(str, "r");
682 	if (!fp)
683 		goto done;
684 
685 	if (getline(&buf, &len, fp) <= 0)
686 		goto done;
687 
688 	p = strchr(buf, '\n');
689 	if (p)
690 		*p = '\0';
691 
692 	ret = do_write_string(fd, buf);
693 done:
694 	free(buf);
695 	if (fp)
696 		fclose(fp);
697 	return ret;
698 }
699 
700 static int write_numa_topology(int fd, struct perf_header *h __maybe_unused,
701 			  struct perf_evlist *evlist __maybe_unused)
702 {
703 	char *buf = NULL;
704 	size_t len = 0;
705 	FILE *fp;
706 	struct cpu_map *node_map = NULL;
707 	char *c;
708 	u32 nr, i, j;
709 	int ret = -1;
710 
711 	fp = fopen("/sys/devices/system/node/online", "r");
712 	if (!fp)
713 		return -1;
714 
715 	if (getline(&buf, &len, fp) <= 0)
716 		goto done;
717 
718 	c = strchr(buf, '\n');
719 	if (c)
720 		*c = '\0';
721 
722 	node_map = cpu_map__new(buf);
723 	if (!node_map)
724 		goto done;
725 
726 	nr = (u32)node_map->nr;
727 
728 	ret = do_write(fd, &nr, sizeof(nr));
729 	if (ret < 0)
730 		goto done;
731 
732 	for (i = 0; i < nr; i++) {
733 		j = (u32)node_map->map[i];
734 		ret = do_write(fd, &j, sizeof(j));
735 		if (ret < 0)
736 			break;
737 
738 		ret = write_topo_node(fd, i);
739 		if (ret < 0)
740 			break;
741 	}
742 done:
743 	free(buf);
744 	fclose(fp);
745 	cpu_map__put(node_map);
746 	return ret;
747 }
748 
749 /*
750  * File format:
751  *
752  * struct pmu_mappings {
753  *	u32	pmu_num;
754  *	struct pmu_map {
755  *		u32	type;
756  *		char	name[];
757  *	}[pmu_num];
758  * };
759  */
760 
761 static int write_pmu_mappings(int fd, struct perf_header *h __maybe_unused,
762 			      struct perf_evlist *evlist __maybe_unused)
763 {
764 	struct perf_pmu *pmu = NULL;
765 	off_t offset = lseek(fd, 0, SEEK_CUR);
766 	__u32 pmu_num = 0;
767 	int ret;
768 
769 	/* write real pmu_num later */
770 	ret = do_write(fd, &pmu_num, sizeof(pmu_num));
771 	if (ret < 0)
772 		return ret;
773 
774 	while ((pmu = perf_pmu__scan(pmu))) {
775 		if (!pmu->name)
776 			continue;
777 		pmu_num++;
778 
779 		ret = do_write(fd, &pmu->type, sizeof(pmu->type));
780 		if (ret < 0)
781 			return ret;
782 
783 		ret = do_write_string(fd, pmu->name);
784 		if (ret < 0)
785 			return ret;
786 	}
787 
788 	if (pwrite(fd, &pmu_num, sizeof(pmu_num), offset) != sizeof(pmu_num)) {
789 		/* discard all */
790 		lseek(fd, offset, SEEK_SET);
791 		return -1;
792 	}
793 
794 	return 0;
795 }
796 
797 /*
798  * File format:
799  *
800  * struct group_descs {
801  *	u32	nr_groups;
802  *	struct group_desc {
803  *		char	name[];
804  *		u32	leader_idx;
805  *		u32	nr_members;
806  *	}[nr_groups];
807  * };
808  */
809 static int write_group_desc(int fd, struct perf_header *h __maybe_unused,
810 			    struct perf_evlist *evlist)
811 {
812 	u32 nr_groups = evlist->nr_groups;
813 	struct perf_evsel *evsel;
814 	int ret;
815 
816 	ret = do_write(fd, &nr_groups, sizeof(nr_groups));
817 	if (ret < 0)
818 		return ret;
819 
820 	evlist__for_each_entry(evlist, evsel) {
821 		if (perf_evsel__is_group_leader(evsel) &&
822 		    evsel->nr_members > 1) {
823 			const char *name = evsel->group_name ?: "{anon_group}";
824 			u32 leader_idx = evsel->idx;
825 			u32 nr_members = evsel->nr_members;
826 
827 			ret = do_write_string(fd, name);
828 			if (ret < 0)
829 				return ret;
830 
831 			ret = do_write(fd, &leader_idx, sizeof(leader_idx));
832 			if (ret < 0)
833 				return ret;
834 
835 			ret = do_write(fd, &nr_members, sizeof(nr_members));
836 			if (ret < 0)
837 				return ret;
838 		}
839 	}
840 	return 0;
841 }
842 
843 /*
844  * default get_cpuid(): nothing gets recorded
845  * actual implementation must be in arch/$(SRCARCH)/util/header.c
846  */
847 int __weak get_cpuid(char *buffer __maybe_unused, size_t sz __maybe_unused)
848 {
849 	return -1;
850 }
851 
852 static int write_cpuid(int fd, struct perf_header *h __maybe_unused,
853 		       struct perf_evlist *evlist __maybe_unused)
854 {
855 	char buffer[64];
856 	int ret;
857 
858 	ret = get_cpuid(buffer, sizeof(buffer));
859 	if (!ret)
860 		goto write_it;
861 
862 	return -1;
863 write_it:
864 	return do_write_string(fd, buffer);
865 }
866 
867 static int write_branch_stack(int fd __maybe_unused,
868 			      struct perf_header *h __maybe_unused,
869 		       struct perf_evlist *evlist __maybe_unused)
870 {
871 	return 0;
872 }
873 
874 static int write_auxtrace(int fd, struct perf_header *h,
875 			  struct perf_evlist *evlist __maybe_unused)
876 {
877 	struct perf_session *session;
878 	int err;
879 
880 	session = container_of(h, struct perf_session, header);
881 
882 	err = auxtrace_index__write(fd, &session->auxtrace_index);
883 	if (err < 0)
884 		pr_err("Failed to write auxtrace index\n");
885 	return err;
886 }
887 
888 static int cpu_cache_level__sort(const void *a, const void *b)
889 {
890 	struct cpu_cache_level *cache_a = (struct cpu_cache_level *)a;
891 	struct cpu_cache_level *cache_b = (struct cpu_cache_level *)b;
892 
893 	return cache_a->level - cache_b->level;
894 }
895 
896 static bool cpu_cache_level__cmp(struct cpu_cache_level *a, struct cpu_cache_level *b)
897 {
898 	if (a->level != b->level)
899 		return false;
900 
901 	if (a->line_size != b->line_size)
902 		return false;
903 
904 	if (a->sets != b->sets)
905 		return false;
906 
907 	if (a->ways != b->ways)
908 		return false;
909 
910 	if (strcmp(a->type, b->type))
911 		return false;
912 
913 	if (strcmp(a->size, b->size))
914 		return false;
915 
916 	if (strcmp(a->map, b->map))
917 		return false;
918 
919 	return true;
920 }
921 
922 static int cpu_cache_level__read(struct cpu_cache_level *cache, u32 cpu, u16 level)
923 {
924 	char path[PATH_MAX], file[PATH_MAX];
925 	struct stat st;
926 	size_t len;
927 
928 	scnprintf(path, PATH_MAX, "devices/system/cpu/cpu%d/cache/index%d/", cpu, level);
929 	scnprintf(file, PATH_MAX, "%s/%s", sysfs__mountpoint(), path);
930 
931 	if (stat(file, &st))
932 		return 1;
933 
934 	scnprintf(file, PATH_MAX, "%s/level", path);
935 	if (sysfs__read_int(file, (int *) &cache->level))
936 		return -1;
937 
938 	scnprintf(file, PATH_MAX, "%s/coherency_line_size", path);
939 	if (sysfs__read_int(file, (int *) &cache->line_size))
940 		return -1;
941 
942 	scnprintf(file, PATH_MAX, "%s/number_of_sets", path);
943 	if (sysfs__read_int(file, (int *) &cache->sets))
944 		return -1;
945 
946 	scnprintf(file, PATH_MAX, "%s/ways_of_associativity", path);
947 	if (sysfs__read_int(file, (int *) &cache->ways))
948 		return -1;
949 
950 	scnprintf(file, PATH_MAX, "%s/type", path);
951 	if (sysfs__read_str(file, &cache->type, &len))
952 		return -1;
953 
954 	cache->type[len] = 0;
955 	cache->type = rtrim(cache->type);
956 
957 	scnprintf(file, PATH_MAX, "%s/size", path);
958 	if (sysfs__read_str(file, &cache->size, &len)) {
959 		free(cache->type);
960 		return -1;
961 	}
962 
963 	cache->size[len] = 0;
964 	cache->size = rtrim(cache->size);
965 
966 	scnprintf(file, PATH_MAX, "%s/shared_cpu_list", path);
967 	if (sysfs__read_str(file, &cache->map, &len)) {
968 		free(cache->map);
969 		free(cache->type);
970 		return -1;
971 	}
972 
973 	cache->map[len] = 0;
974 	cache->map = rtrim(cache->map);
975 	return 0;
976 }
977 
978 static void cpu_cache_level__fprintf(FILE *out, struct cpu_cache_level *c)
979 {
980 	fprintf(out, "L%d %-15s %8s [%s]\n", c->level, c->type, c->size, c->map);
981 }
982 
983 static int build_caches(struct cpu_cache_level caches[], u32 size, u32 *cntp)
984 {
985 	u32 i, cnt = 0;
986 	long ncpus;
987 	u32 nr, cpu;
988 	u16 level;
989 
990 	ncpus = sysconf(_SC_NPROCESSORS_CONF);
991 	if (ncpus < 0)
992 		return -1;
993 
994 	nr = (u32)(ncpus & UINT_MAX);
995 
996 	for (cpu = 0; cpu < nr; cpu++) {
997 		for (level = 0; level < 10; level++) {
998 			struct cpu_cache_level c;
999 			int err;
1000 
1001 			err = cpu_cache_level__read(&c, cpu, level);
1002 			if (err < 0)
1003 				return err;
1004 
1005 			if (err == 1)
1006 				break;
1007 
1008 			for (i = 0; i < cnt; i++) {
1009 				if (cpu_cache_level__cmp(&c, &caches[i]))
1010 					break;
1011 			}
1012 
1013 			if (i == cnt)
1014 				caches[cnt++] = c;
1015 			else
1016 				cpu_cache_level__free(&c);
1017 
1018 			if (WARN_ONCE(cnt == size, "way too many cpu caches.."))
1019 				goto out;
1020 		}
1021 	}
1022  out:
1023 	*cntp = cnt;
1024 	return 0;
1025 }
1026 
1027 #define MAX_CACHES 2000
1028 
1029 static int write_cache(int fd, struct perf_header *h __maybe_unused,
1030 			  struct perf_evlist *evlist __maybe_unused)
1031 {
1032 	struct cpu_cache_level caches[MAX_CACHES];
1033 	u32 cnt = 0, i, version = 1;
1034 	int ret;
1035 
1036 	ret = build_caches(caches, MAX_CACHES, &cnt);
1037 	if (ret)
1038 		goto out;
1039 
1040 	qsort(&caches, cnt, sizeof(struct cpu_cache_level), cpu_cache_level__sort);
1041 
1042 	ret = do_write(fd, &version, sizeof(u32));
1043 	if (ret < 0)
1044 		goto out;
1045 
1046 	ret = do_write(fd, &cnt, sizeof(u32));
1047 	if (ret < 0)
1048 		goto out;
1049 
1050 	for (i = 0; i < cnt; i++) {
1051 		struct cpu_cache_level *c = &caches[i];
1052 
1053 		#define _W(v)					\
1054 			ret = do_write(fd, &c->v, sizeof(u32));	\
1055 			if (ret < 0)				\
1056 				goto out;
1057 
1058 		_W(level)
1059 		_W(line_size)
1060 		_W(sets)
1061 		_W(ways)
1062 		#undef _W
1063 
1064 		#define _W(v)						\
1065 			ret = do_write_string(fd, (const char *) c->v);	\
1066 			if (ret < 0)					\
1067 				goto out;
1068 
1069 		_W(type)
1070 		_W(size)
1071 		_W(map)
1072 		#undef _W
1073 	}
1074 
1075 out:
1076 	for (i = 0; i < cnt; i++)
1077 		cpu_cache_level__free(&caches[i]);
1078 	return ret;
1079 }
1080 
1081 static int write_stat(int fd __maybe_unused,
1082 		      struct perf_header *h __maybe_unused,
1083 		      struct perf_evlist *evlist __maybe_unused)
1084 {
1085 	return 0;
1086 }
1087 
1088 static void print_hostname(struct perf_header *ph, int fd __maybe_unused,
1089 			   FILE *fp)
1090 {
1091 	fprintf(fp, "# hostname : %s\n", ph->env.hostname);
1092 }
1093 
1094 static void print_osrelease(struct perf_header *ph, int fd __maybe_unused,
1095 			    FILE *fp)
1096 {
1097 	fprintf(fp, "# os release : %s\n", ph->env.os_release);
1098 }
1099 
1100 static void print_arch(struct perf_header *ph, int fd __maybe_unused, FILE *fp)
1101 {
1102 	fprintf(fp, "# arch : %s\n", ph->env.arch);
1103 }
1104 
1105 static void print_cpudesc(struct perf_header *ph, int fd __maybe_unused,
1106 			  FILE *fp)
1107 {
1108 	fprintf(fp, "# cpudesc : %s\n", ph->env.cpu_desc);
1109 }
1110 
1111 static void print_nrcpus(struct perf_header *ph, int fd __maybe_unused,
1112 			 FILE *fp)
1113 {
1114 	fprintf(fp, "# nrcpus online : %u\n", ph->env.nr_cpus_online);
1115 	fprintf(fp, "# nrcpus avail : %u\n", ph->env.nr_cpus_avail);
1116 }
1117 
1118 static void print_version(struct perf_header *ph, int fd __maybe_unused,
1119 			  FILE *fp)
1120 {
1121 	fprintf(fp, "# perf version : %s\n", ph->env.version);
1122 }
1123 
1124 static void print_cmdline(struct perf_header *ph, int fd __maybe_unused,
1125 			  FILE *fp)
1126 {
1127 	int nr, i;
1128 
1129 	nr = ph->env.nr_cmdline;
1130 
1131 	fprintf(fp, "# cmdline : ");
1132 
1133 	for (i = 0; i < nr; i++)
1134 		fprintf(fp, "%s ", ph->env.cmdline_argv[i]);
1135 	fputc('\n', fp);
1136 }
1137 
1138 static void print_cpu_topology(struct perf_header *ph, int fd __maybe_unused,
1139 			       FILE *fp)
1140 {
1141 	int nr, i;
1142 	char *str;
1143 	int cpu_nr = ph->env.nr_cpus_avail;
1144 
1145 	nr = ph->env.nr_sibling_cores;
1146 	str = ph->env.sibling_cores;
1147 
1148 	for (i = 0; i < nr; i++) {
1149 		fprintf(fp, "# sibling cores   : %s\n", str);
1150 		str += strlen(str) + 1;
1151 	}
1152 
1153 	nr = ph->env.nr_sibling_threads;
1154 	str = ph->env.sibling_threads;
1155 
1156 	for (i = 0; i < nr; i++) {
1157 		fprintf(fp, "# sibling threads : %s\n", str);
1158 		str += strlen(str) + 1;
1159 	}
1160 
1161 	if (ph->env.cpu != NULL) {
1162 		for (i = 0; i < cpu_nr; i++)
1163 			fprintf(fp, "# CPU %d: Core ID %d, Socket ID %d\n", i,
1164 				ph->env.cpu[i].core_id, ph->env.cpu[i].socket_id);
1165 	} else
1166 		fprintf(fp, "# Core ID and Socket ID information is not available\n");
1167 }
1168 
1169 static void free_event_desc(struct perf_evsel *events)
1170 {
1171 	struct perf_evsel *evsel;
1172 
1173 	if (!events)
1174 		return;
1175 
1176 	for (evsel = events; evsel->attr.size; evsel++) {
1177 		zfree(&evsel->name);
1178 		zfree(&evsel->id);
1179 	}
1180 
1181 	free(events);
1182 }
1183 
1184 static struct perf_evsel *
1185 read_event_desc(struct perf_header *ph, int fd)
1186 {
1187 	struct perf_evsel *evsel, *events = NULL;
1188 	u64 *id;
1189 	void *buf = NULL;
1190 	u32 nre, sz, nr, i, j;
1191 	ssize_t ret;
1192 	size_t msz;
1193 
1194 	/* number of events */
1195 	ret = readn(fd, &nre, sizeof(nre));
1196 	if (ret != (ssize_t)sizeof(nre))
1197 		goto error;
1198 
1199 	if (ph->needs_swap)
1200 		nre = bswap_32(nre);
1201 
1202 	ret = readn(fd, &sz, sizeof(sz));
1203 	if (ret != (ssize_t)sizeof(sz))
1204 		goto error;
1205 
1206 	if (ph->needs_swap)
1207 		sz = bswap_32(sz);
1208 
1209 	/* buffer to hold on file attr struct */
1210 	buf = malloc(sz);
1211 	if (!buf)
1212 		goto error;
1213 
1214 	/* the last event terminates with evsel->attr.size == 0: */
1215 	events = calloc(nre + 1, sizeof(*events));
1216 	if (!events)
1217 		goto error;
1218 
1219 	msz = sizeof(evsel->attr);
1220 	if (sz < msz)
1221 		msz = sz;
1222 
1223 	for (i = 0, evsel = events; i < nre; evsel++, i++) {
1224 		evsel->idx = i;
1225 
1226 		/*
1227 		 * must read entire on-file attr struct to
1228 		 * sync up with layout.
1229 		 */
1230 		ret = readn(fd, buf, sz);
1231 		if (ret != (ssize_t)sz)
1232 			goto error;
1233 
1234 		if (ph->needs_swap)
1235 			perf_event__attr_swap(buf);
1236 
1237 		memcpy(&evsel->attr, buf, msz);
1238 
1239 		ret = readn(fd, &nr, sizeof(nr));
1240 		if (ret != (ssize_t)sizeof(nr))
1241 			goto error;
1242 
1243 		if (ph->needs_swap) {
1244 			nr = bswap_32(nr);
1245 			evsel->needs_swap = true;
1246 		}
1247 
1248 		evsel->name = do_read_string(fd, ph);
1249 
1250 		if (!nr)
1251 			continue;
1252 
1253 		id = calloc(nr, sizeof(*id));
1254 		if (!id)
1255 			goto error;
1256 		evsel->ids = nr;
1257 		evsel->id = id;
1258 
1259 		for (j = 0 ; j < nr; j++) {
1260 			ret = readn(fd, id, sizeof(*id));
1261 			if (ret != (ssize_t)sizeof(*id))
1262 				goto error;
1263 			if (ph->needs_swap)
1264 				*id = bswap_64(*id);
1265 			id++;
1266 		}
1267 	}
1268 out:
1269 	free(buf);
1270 	return events;
1271 error:
1272 	free_event_desc(events);
1273 	events = NULL;
1274 	goto out;
1275 }
1276 
1277 static int __desc_attr__fprintf(FILE *fp, const char *name, const char *val,
1278 				void *priv __maybe_unused)
1279 {
1280 	return fprintf(fp, ", %s = %s", name, val);
1281 }
1282 
1283 static void print_event_desc(struct perf_header *ph, int fd, FILE *fp)
1284 {
1285 	struct perf_evsel *evsel, *events = read_event_desc(ph, fd);
1286 	u32 j;
1287 	u64 *id;
1288 
1289 	if (!events) {
1290 		fprintf(fp, "# event desc: not available or unable to read\n");
1291 		return;
1292 	}
1293 
1294 	for (evsel = events; evsel->attr.size; evsel++) {
1295 		fprintf(fp, "# event : name = %s, ", evsel->name);
1296 
1297 		if (evsel->ids) {
1298 			fprintf(fp, ", id = {");
1299 			for (j = 0, id = evsel->id; j < evsel->ids; j++, id++) {
1300 				if (j)
1301 					fputc(',', fp);
1302 				fprintf(fp, " %"PRIu64, *id);
1303 			}
1304 			fprintf(fp, " }");
1305 		}
1306 
1307 		perf_event_attr__fprintf(fp, &evsel->attr, __desc_attr__fprintf, NULL);
1308 
1309 		fputc('\n', fp);
1310 	}
1311 
1312 	free_event_desc(events);
1313 }
1314 
1315 static void print_total_mem(struct perf_header *ph, int fd __maybe_unused,
1316 			    FILE *fp)
1317 {
1318 	fprintf(fp, "# total memory : %Lu kB\n", ph->env.total_mem);
1319 }
1320 
1321 static void print_numa_topology(struct perf_header *ph, int fd __maybe_unused,
1322 				FILE *fp)
1323 {
1324 	int i;
1325 	struct numa_node *n;
1326 
1327 	for (i = 0; i < ph->env.nr_numa_nodes; i++) {
1328 		n = &ph->env.numa_nodes[i];
1329 
1330 		fprintf(fp, "# node%u meminfo  : total = %"PRIu64" kB,"
1331 			    " free = %"PRIu64" kB\n",
1332 			n->node, n->mem_total, n->mem_free);
1333 
1334 		fprintf(fp, "# node%u cpu list : ", n->node);
1335 		cpu_map__fprintf(n->map, fp);
1336 	}
1337 }
1338 
1339 static void print_cpuid(struct perf_header *ph, int fd __maybe_unused, FILE *fp)
1340 {
1341 	fprintf(fp, "# cpuid : %s\n", ph->env.cpuid);
1342 }
1343 
1344 static void print_branch_stack(struct perf_header *ph __maybe_unused,
1345 			       int fd __maybe_unused, FILE *fp)
1346 {
1347 	fprintf(fp, "# contains samples with branch stack\n");
1348 }
1349 
1350 static void print_auxtrace(struct perf_header *ph __maybe_unused,
1351 			   int fd __maybe_unused, FILE *fp)
1352 {
1353 	fprintf(fp, "# contains AUX area data (e.g. instruction trace)\n");
1354 }
1355 
1356 static void print_stat(struct perf_header *ph __maybe_unused,
1357 		       int fd __maybe_unused, FILE *fp)
1358 {
1359 	fprintf(fp, "# contains stat data\n");
1360 }
1361 
1362 static void print_cache(struct perf_header *ph __maybe_unused,
1363 			int fd __maybe_unused, FILE *fp __maybe_unused)
1364 {
1365 	int i;
1366 
1367 	fprintf(fp, "# CPU cache info:\n");
1368 	for (i = 0; i < ph->env.caches_cnt; i++) {
1369 		fprintf(fp, "#  ");
1370 		cpu_cache_level__fprintf(fp, &ph->env.caches[i]);
1371 	}
1372 }
1373 
1374 static void print_pmu_mappings(struct perf_header *ph, int fd __maybe_unused,
1375 			       FILE *fp)
1376 {
1377 	const char *delimiter = "# pmu mappings: ";
1378 	char *str, *tmp;
1379 	u32 pmu_num;
1380 	u32 type;
1381 
1382 	pmu_num = ph->env.nr_pmu_mappings;
1383 	if (!pmu_num) {
1384 		fprintf(fp, "# pmu mappings: not available\n");
1385 		return;
1386 	}
1387 
1388 	str = ph->env.pmu_mappings;
1389 
1390 	while (pmu_num) {
1391 		type = strtoul(str, &tmp, 0);
1392 		if (*tmp != ':')
1393 			goto error;
1394 
1395 		str = tmp + 1;
1396 		fprintf(fp, "%s%s = %" PRIu32, delimiter, str, type);
1397 
1398 		delimiter = ", ";
1399 		str += strlen(str) + 1;
1400 		pmu_num--;
1401 	}
1402 
1403 	fprintf(fp, "\n");
1404 
1405 	if (!pmu_num)
1406 		return;
1407 error:
1408 	fprintf(fp, "# pmu mappings: unable to read\n");
1409 }
1410 
1411 static void print_group_desc(struct perf_header *ph, int fd __maybe_unused,
1412 			     FILE *fp)
1413 {
1414 	struct perf_session *session;
1415 	struct perf_evsel *evsel;
1416 	u32 nr = 0;
1417 
1418 	session = container_of(ph, struct perf_session, header);
1419 
1420 	evlist__for_each_entry(session->evlist, evsel) {
1421 		if (perf_evsel__is_group_leader(evsel) &&
1422 		    evsel->nr_members > 1) {
1423 			fprintf(fp, "# group: %s{%s", evsel->group_name ?: "",
1424 				perf_evsel__name(evsel));
1425 
1426 			nr = evsel->nr_members - 1;
1427 		} else if (nr) {
1428 			fprintf(fp, ",%s", perf_evsel__name(evsel));
1429 
1430 			if (--nr == 0)
1431 				fprintf(fp, "}\n");
1432 		}
1433 	}
1434 }
1435 
1436 static int __event_process_build_id(struct build_id_event *bev,
1437 				    char *filename,
1438 				    struct perf_session *session)
1439 {
1440 	int err = -1;
1441 	struct machine *machine;
1442 	u16 cpumode;
1443 	struct dso *dso;
1444 	enum dso_kernel_type dso_type;
1445 
1446 	machine = perf_session__findnew_machine(session, bev->pid);
1447 	if (!machine)
1448 		goto out;
1449 
1450 	cpumode = bev->header.misc & PERF_RECORD_MISC_CPUMODE_MASK;
1451 
1452 	switch (cpumode) {
1453 	case PERF_RECORD_MISC_KERNEL:
1454 		dso_type = DSO_TYPE_KERNEL;
1455 		break;
1456 	case PERF_RECORD_MISC_GUEST_KERNEL:
1457 		dso_type = DSO_TYPE_GUEST_KERNEL;
1458 		break;
1459 	case PERF_RECORD_MISC_USER:
1460 	case PERF_RECORD_MISC_GUEST_USER:
1461 		dso_type = DSO_TYPE_USER;
1462 		break;
1463 	default:
1464 		goto out;
1465 	}
1466 
1467 	dso = machine__findnew_dso(machine, filename);
1468 	if (dso != NULL) {
1469 		char sbuild_id[SBUILD_ID_SIZE];
1470 
1471 		dso__set_build_id(dso, &bev->build_id);
1472 
1473 		if (dso_type != DSO_TYPE_USER) {
1474 			struct kmod_path m = { .name = NULL, };
1475 
1476 			if (!kmod_path__parse_name(&m, filename) && m.kmod)
1477 				dso__set_module_info(dso, &m, machine);
1478 			else
1479 				dso->kernel = dso_type;
1480 
1481 			free(m.name);
1482 		}
1483 
1484 		build_id__sprintf(dso->build_id, sizeof(dso->build_id),
1485 				  sbuild_id);
1486 		pr_debug("build id event received for %s: %s\n",
1487 			 dso->long_name, sbuild_id);
1488 		dso__put(dso);
1489 	}
1490 
1491 	err = 0;
1492 out:
1493 	return err;
1494 }
1495 
1496 static int perf_header__read_build_ids_abi_quirk(struct perf_header *header,
1497 						 int input, u64 offset, u64 size)
1498 {
1499 	struct perf_session *session = container_of(header, struct perf_session, header);
1500 	struct {
1501 		struct perf_event_header   header;
1502 		u8			   build_id[PERF_ALIGN(BUILD_ID_SIZE, sizeof(u64))];
1503 		char			   filename[0];
1504 	} old_bev;
1505 	struct build_id_event bev;
1506 	char filename[PATH_MAX];
1507 	u64 limit = offset + size;
1508 
1509 	while (offset < limit) {
1510 		ssize_t len;
1511 
1512 		if (readn(input, &old_bev, sizeof(old_bev)) != sizeof(old_bev))
1513 			return -1;
1514 
1515 		if (header->needs_swap)
1516 			perf_event_header__bswap(&old_bev.header);
1517 
1518 		len = old_bev.header.size - sizeof(old_bev);
1519 		if (readn(input, filename, len) != len)
1520 			return -1;
1521 
1522 		bev.header = old_bev.header;
1523 
1524 		/*
1525 		 * As the pid is the missing value, we need to fill
1526 		 * it properly. The header.misc value give us nice hint.
1527 		 */
1528 		bev.pid	= HOST_KERNEL_ID;
1529 		if (bev.header.misc == PERF_RECORD_MISC_GUEST_USER ||
1530 		    bev.header.misc == PERF_RECORD_MISC_GUEST_KERNEL)
1531 			bev.pid	= DEFAULT_GUEST_KERNEL_ID;
1532 
1533 		memcpy(bev.build_id, old_bev.build_id, sizeof(bev.build_id));
1534 		__event_process_build_id(&bev, filename, session);
1535 
1536 		offset += bev.header.size;
1537 	}
1538 
1539 	return 0;
1540 }
1541 
1542 static int perf_header__read_build_ids(struct perf_header *header,
1543 				       int input, u64 offset, u64 size)
1544 {
1545 	struct perf_session *session = container_of(header, struct perf_session, header);
1546 	struct build_id_event bev;
1547 	char filename[PATH_MAX];
1548 	u64 limit = offset + size, orig_offset = offset;
1549 	int err = -1;
1550 
1551 	while (offset < limit) {
1552 		ssize_t len;
1553 
1554 		if (readn(input, &bev, sizeof(bev)) != sizeof(bev))
1555 			goto out;
1556 
1557 		if (header->needs_swap)
1558 			perf_event_header__bswap(&bev.header);
1559 
1560 		len = bev.header.size - sizeof(bev);
1561 		if (readn(input, filename, len) != len)
1562 			goto out;
1563 		/*
1564 		 * The a1645ce1 changeset:
1565 		 *
1566 		 * "perf: 'perf kvm' tool for monitoring guest performance from host"
1567 		 *
1568 		 * Added a field to struct build_id_event that broke the file
1569 		 * format.
1570 		 *
1571 		 * Since the kernel build-id is the first entry, process the
1572 		 * table using the old format if the well known
1573 		 * '[kernel.kallsyms]' string for the kernel build-id has the
1574 		 * first 4 characters chopped off (where the pid_t sits).
1575 		 */
1576 		if (memcmp(filename, "nel.kallsyms]", 13) == 0) {
1577 			if (lseek(input, orig_offset, SEEK_SET) == (off_t)-1)
1578 				return -1;
1579 			return perf_header__read_build_ids_abi_quirk(header, input, offset, size);
1580 		}
1581 
1582 		__event_process_build_id(&bev, filename, session);
1583 
1584 		offset += bev.header.size;
1585 	}
1586 	err = 0;
1587 out:
1588 	return err;
1589 }
1590 
1591 static int process_tracing_data(struct perf_file_section *section __maybe_unused,
1592 				struct perf_header *ph __maybe_unused,
1593 				int fd, void *data)
1594 {
1595 	ssize_t ret = trace_report(fd, data, false);
1596 	return ret < 0 ? -1 : 0;
1597 }
1598 
1599 static int process_build_id(struct perf_file_section *section,
1600 			    struct perf_header *ph, int fd,
1601 			    void *data __maybe_unused)
1602 {
1603 	if (perf_header__read_build_ids(ph, fd, section->offset, section->size))
1604 		pr_debug("Failed to read buildids, continuing...\n");
1605 	return 0;
1606 }
1607 
1608 static int process_hostname(struct perf_file_section *section __maybe_unused,
1609 			    struct perf_header *ph, int fd,
1610 			    void *data __maybe_unused)
1611 {
1612 	ph->env.hostname = do_read_string(fd, ph);
1613 	return ph->env.hostname ? 0 : -ENOMEM;
1614 }
1615 
1616 static int process_osrelease(struct perf_file_section *section __maybe_unused,
1617 			     struct perf_header *ph, int fd,
1618 			     void *data __maybe_unused)
1619 {
1620 	ph->env.os_release = do_read_string(fd, ph);
1621 	return ph->env.os_release ? 0 : -ENOMEM;
1622 }
1623 
1624 static int process_version(struct perf_file_section *section __maybe_unused,
1625 			   struct perf_header *ph, int fd,
1626 			   void *data __maybe_unused)
1627 {
1628 	ph->env.version = do_read_string(fd, ph);
1629 	return ph->env.version ? 0 : -ENOMEM;
1630 }
1631 
1632 static int process_arch(struct perf_file_section *section __maybe_unused,
1633 			struct perf_header *ph,	int fd,
1634 			void *data __maybe_unused)
1635 {
1636 	ph->env.arch = do_read_string(fd, ph);
1637 	return ph->env.arch ? 0 : -ENOMEM;
1638 }
1639 
1640 static int process_nrcpus(struct perf_file_section *section __maybe_unused,
1641 			  struct perf_header *ph, int fd,
1642 			  void *data __maybe_unused)
1643 {
1644 	ssize_t ret;
1645 	u32 nr;
1646 
1647 	ret = readn(fd, &nr, sizeof(nr));
1648 	if (ret != sizeof(nr))
1649 		return -1;
1650 
1651 	if (ph->needs_swap)
1652 		nr = bswap_32(nr);
1653 
1654 	ph->env.nr_cpus_avail = nr;
1655 
1656 	ret = readn(fd, &nr, sizeof(nr));
1657 	if (ret != sizeof(nr))
1658 		return -1;
1659 
1660 	if (ph->needs_swap)
1661 		nr = bswap_32(nr);
1662 
1663 	ph->env.nr_cpus_online = nr;
1664 	return 0;
1665 }
1666 
1667 static int process_cpudesc(struct perf_file_section *section __maybe_unused,
1668 			   struct perf_header *ph, int fd,
1669 			   void *data __maybe_unused)
1670 {
1671 	ph->env.cpu_desc = do_read_string(fd, ph);
1672 	return ph->env.cpu_desc ? 0 : -ENOMEM;
1673 }
1674 
1675 static int process_cpuid(struct perf_file_section *section __maybe_unused,
1676 			 struct perf_header *ph,  int fd,
1677 			 void *data __maybe_unused)
1678 {
1679 	ph->env.cpuid = do_read_string(fd, ph);
1680 	return ph->env.cpuid ? 0 : -ENOMEM;
1681 }
1682 
1683 static int process_total_mem(struct perf_file_section *section __maybe_unused,
1684 			     struct perf_header *ph, int fd,
1685 			     void *data __maybe_unused)
1686 {
1687 	uint64_t mem;
1688 	ssize_t ret;
1689 
1690 	ret = readn(fd, &mem, sizeof(mem));
1691 	if (ret != sizeof(mem))
1692 		return -1;
1693 
1694 	if (ph->needs_swap)
1695 		mem = bswap_64(mem);
1696 
1697 	ph->env.total_mem = mem;
1698 	return 0;
1699 }
1700 
1701 static struct perf_evsel *
1702 perf_evlist__find_by_index(struct perf_evlist *evlist, int idx)
1703 {
1704 	struct perf_evsel *evsel;
1705 
1706 	evlist__for_each_entry(evlist, evsel) {
1707 		if (evsel->idx == idx)
1708 			return evsel;
1709 	}
1710 
1711 	return NULL;
1712 }
1713 
1714 static void
1715 perf_evlist__set_event_name(struct perf_evlist *evlist,
1716 			    struct perf_evsel *event)
1717 {
1718 	struct perf_evsel *evsel;
1719 
1720 	if (!event->name)
1721 		return;
1722 
1723 	evsel = perf_evlist__find_by_index(evlist, event->idx);
1724 	if (!evsel)
1725 		return;
1726 
1727 	if (evsel->name)
1728 		return;
1729 
1730 	evsel->name = strdup(event->name);
1731 }
1732 
1733 static int
1734 process_event_desc(struct perf_file_section *section __maybe_unused,
1735 		   struct perf_header *header, int fd,
1736 		   void *data __maybe_unused)
1737 {
1738 	struct perf_session *session;
1739 	struct perf_evsel *evsel, *events = read_event_desc(header, fd);
1740 
1741 	if (!events)
1742 		return 0;
1743 
1744 	session = container_of(header, struct perf_session, header);
1745 	for (evsel = events; evsel->attr.size; evsel++)
1746 		perf_evlist__set_event_name(session->evlist, evsel);
1747 
1748 	free_event_desc(events);
1749 
1750 	return 0;
1751 }
1752 
1753 static int process_cmdline(struct perf_file_section *section,
1754 			   struct perf_header *ph, int fd,
1755 			   void *data __maybe_unused)
1756 {
1757 	ssize_t ret;
1758 	char *str, *cmdline = NULL, **argv = NULL;
1759 	u32 nr, i, len = 0;
1760 
1761 	ret = readn(fd, &nr, sizeof(nr));
1762 	if (ret != sizeof(nr))
1763 		return -1;
1764 
1765 	if (ph->needs_swap)
1766 		nr = bswap_32(nr);
1767 
1768 	ph->env.nr_cmdline = nr;
1769 
1770 	cmdline = zalloc(section->size + nr + 1);
1771 	if (!cmdline)
1772 		return -1;
1773 
1774 	argv = zalloc(sizeof(char *) * (nr + 1));
1775 	if (!argv)
1776 		goto error;
1777 
1778 	for (i = 0; i < nr; i++) {
1779 		str = do_read_string(fd, ph);
1780 		if (!str)
1781 			goto error;
1782 
1783 		argv[i] = cmdline + len;
1784 		memcpy(argv[i], str, strlen(str) + 1);
1785 		len += strlen(str) + 1;
1786 		free(str);
1787 	}
1788 	ph->env.cmdline = cmdline;
1789 	ph->env.cmdline_argv = (const char **) argv;
1790 	return 0;
1791 
1792 error:
1793 	free(argv);
1794 	free(cmdline);
1795 	return -1;
1796 }
1797 
1798 static int process_cpu_topology(struct perf_file_section *section,
1799 				struct perf_header *ph, int fd,
1800 				void *data __maybe_unused)
1801 {
1802 	ssize_t ret;
1803 	u32 nr, i;
1804 	char *str;
1805 	struct strbuf sb;
1806 	int cpu_nr = ph->env.nr_cpus_avail;
1807 	u64 size = 0;
1808 
1809 	ph->env.cpu = calloc(cpu_nr, sizeof(*ph->env.cpu));
1810 	if (!ph->env.cpu)
1811 		return -1;
1812 
1813 	ret = readn(fd, &nr, sizeof(nr));
1814 	if (ret != sizeof(nr))
1815 		goto free_cpu;
1816 
1817 	if (ph->needs_swap)
1818 		nr = bswap_32(nr);
1819 
1820 	ph->env.nr_sibling_cores = nr;
1821 	size += sizeof(u32);
1822 	if (strbuf_init(&sb, 128) < 0)
1823 		goto free_cpu;
1824 
1825 	for (i = 0; i < nr; i++) {
1826 		str = do_read_string(fd, ph);
1827 		if (!str)
1828 			goto error;
1829 
1830 		/* include a NULL character at the end */
1831 		if (strbuf_add(&sb, str, strlen(str) + 1) < 0)
1832 			goto error;
1833 		size += string_size(str);
1834 		free(str);
1835 	}
1836 	ph->env.sibling_cores = strbuf_detach(&sb, NULL);
1837 
1838 	ret = readn(fd, &nr, sizeof(nr));
1839 	if (ret != sizeof(nr))
1840 		return -1;
1841 
1842 	if (ph->needs_swap)
1843 		nr = bswap_32(nr);
1844 
1845 	ph->env.nr_sibling_threads = nr;
1846 	size += sizeof(u32);
1847 
1848 	for (i = 0; i < nr; i++) {
1849 		str = do_read_string(fd, ph);
1850 		if (!str)
1851 			goto error;
1852 
1853 		/* include a NULL character at the end */
1854 		if (strbuf_add(&sb, str, strlen(str) + 1) < 0)
1855 			goto error;
1856 		size += string_size(str);
1857 		free(str);
1858 	}
1859 	ph->env.sibling_threads = strbuf_detach(&sb, NULL);
1860 
1861 	/*
1862 	 * The header may be from old perf,
1863 	 * which doesn't include core id and socket id information.
1864 	 */
1865 	if (section->size <= size) {
1866 		zfree(&ph->env.cpu);
1867 		return 0;
1868 	}
1869 
1870 	for (i = 0; i < (u32)cpu_nr; i++) {
1871 		ret = readn(fd, &nr, sizeof(nr));
1872 		if (ret != sizeof(nr))
1873 			goto free_cpu;
1874 
1875 		if (ph->needs_swap)
1876 			nr = bswap_32(nr);
1877 
1878 		ph->env.cpu[i].core_id = nr;
1879 
1880 		ret = readn(fd, &nr, sizeof(nr));
1881 		if (ret != sizeof(nr))
1882 			goto free_cpu;
1883 
1884 		if (ph->needs_swap)
1885 			nr = bswap_32(nr);
1886 
1887 		if (nr != (u32)-1 && nr > (u32)cpu_nr) {
1888 			pr_debug("socket_id number is too big."
1889 				 "You may need to upgrade the perf tool.\n");
1890 			goto free_cpu;
1891 		}
1892 
1893 		ph->env.cpu[i].socket_id = nr;
1894 	}
1895 
1896 	return 0;
1897 
1898 error:
1899 	strbuf_release(&sb);
1900 free_cpu:
1901 	zfree(&ph->env.cpu);
1902 	return -1;
1903 }
1904 
1905 static int process_numa_topology(struct perf_file_section *section __maybe_unused,
1906 				 struct perf_header *ph, int fd,
1907 				 void *data __maybe_unused)
1908 {
1909 	struct numa_node *nodes, *n;
1910 	ssize_t ret;
1911 	u32 nr, i;
1912 	char *str;
1913 
1914 	/* nr nodes */
1915 	ret = readn(fd, &nr, sizeof(nr));
1916 	if (ret != sizeof(nr))
1917 		return -1;
1918 
1919 	if (ph->needs_swap)
1920 		nr = bswap_32(nr);
1921 
1922 	nodes = zalloc(sizeof(*nodes) * nr);
1923 	if (!nodes)
1924 		return -ENOMEM;
1925 
1926 	for (i = 0; i < nr; i++) {
1927 		n = &nodes[i];
1928 
1929 		/* node number */
1930 		ret = readn(fd, &n->node, sizeof(u32));
1931 		if (ret != sizeof(n->node))
1932 			goto error;
1933 
1934 		ret = readn(fd, &n->mem_total, sizeof(u64));
1935 		if (ret != sizeof(u64))
1936 			goto error;
1937 
1938 		ret = readn(fd, &n->mem_free, sizeof(u64));
1939 		if (ret != sizeof(u64))
1940 			goto error;
1941 
1942 		if (ph->needs_swap) {
1943 			n->node      = bswap_32(n->node);
1944 			n->mem_total = bswap_64(n->mem_total);
1945 			n->mem_free  = bswap_64(n->mem_free);
1946 		}
1947 
1948 		str = do_read_string(fd, ph);
1949 		if (!str)
1950 			goto error;
1951 
1952 		n->map = cpu_map__new(str);
1953 		if (!n->map)
1954 			goto error;
1955 
1956 		free(str);
1957 	}
1958 	ph->env.nr_numa_nodes = nr;
1959 	ph->env.numa_nodes = nodes;
1960 	return 0;
1961 
1962 error:
1963 	free(nodes);
1964 	return -1;
1965 }
1966 
1967 static int process_pmu_mappings(struct perf_file_section *section __maybe_unused,
1968 				struct perf_header *ph, int fd,
1969 				void *data __maybe_unused)
1970 {
1971 	ssize_t ret;
1972 	char *name;
1973 	u32 pmu_num;
1974 	u32 type;
1975 	struct strbuf sb;
1976 
1977 	ret = readn(fd, &pmu_num, sizeof(pmu_num));
1978 	if (ret != sizeof(pmu_num))
1979 		return -1;
1980 
1981 	if (ph->needs_swap)
1982 		pmu_num = bswap_32(pmu_num);
1983 
1984 	if (!pmu_num) {
1985 		pr_debug("pmu mappings not available\n");
1986 		return 0;
1987 	}
1988 
1989 	ph->env.nr_pmu_mappings = pmu_num;
1990 	if (strbuf_init(&sb, 128) < 0)
1991 		return -1;
1992 
1993 	while (pmu_num) {
1994 		if (readn(fd, &type, sizeof(type)) != sizeof(type))
1995 			goto error;
1996 		if (ph->needs_swap)
1997 			type = bswap_32(type);
1998 
1999 		name = do_read_string(fd, ph);
2000 		if (!name)
2001 			goto error;
2002 
2003 		if (strbuf_addf(&sb, "%u:%s", type, name) < 0)
2004 			goto error;
2005 		/* include a NULL character at the end */
2006 		if (strbuf_add(&sb, "", 1) < 0)
2007 			goto error;
2008 
2009 		if (!strcmp(name, "msr"))
2010 			ph->env.msr_pmu_type = type;
2011 
2012 		free(name);
2013 		pmu_num--;
2014 	}
2015 	ph->env.pmu_mappings = strbuf_detach(&sb, NULL);
2016 	return 0;
2017 
2018 error:
2019 	strbuf_release(&sb);
2020 	return -1;
2021 }
2022 
2023 static int process_group_desc(struct perf_file_section *section __maybe_unused,
2024 			      struct perf_header *ph, int fd,
2025 			      void *data __maybe_unused)
2026 {
2027 	size_t ret = -1;
2028 	u32 i, nr, nr_groups;
2029 	struct perf_session *session;
2030 	struct perf_evsel *evsel, *leader = NULL;
2031 	struct group_desc {
2032 		char *name;
2033 		u32 leader_idx;
2034 		u32 nr_members;
2035 	} *desc;
2036 
2037 	if (readn(fd, &nr_groups, sizeof(nr_groups)) != sizeof(nr_groups))
2038 		return -1;
2039 
2040 	if (ph->needs_swap)
2041 		nr_groups = bswap_32(nr_groups);
2042 
2043 	ph->env.nr_groups = nr_groups;
2044 	if (!nr_groups) {
2045 		pr_debug("group desc not available\n");
2046 		return 0;
2047 	}
2048 
2049 	desc = calloc(nr_groups, sizeof(*desc));
2050 	if (!desc)
2051 		return -1;
2052 
2053 	for (i = 0; i < nr_groups; i++) {
2054 		desc[i].name = do_read_string(fd, ph);
2055 		if (!desc[i].name)
2056 			goto out_free;
2057 
2058 		if (readn(fd, &desc[i].leader_idx, sizeof(u32)) != sizeof(u32))
2059 			goto out_free;
2060 
2061 		if (readn(fd, &desc[i].nr_members, sizeof(u32)) != sizeof(u32))
2062 			goto out_free;
2063 
2064 		if (ph->needs_swap) {
2065 			desc[i].leader_idx = bswap_32(desc[i].leader_idx);
2066 			desc[i].nr_members = bswap_32(desc[i].nr_members);
2067 		}
2068 	}
2069 
2070 	/*
2071 	 * Rebuild group relationship based on the group_desc
2072 	 */
2073 	session = container_of(ph, struct perf_session, header);
2074 	session->evlist->nr_groups = nr_groups;
2075 
2076 	i = nr = 0;
2077 	evlist__for_each_entry(session->evlist, evsel) {
2078 		if (evsel->idx == (int) desc[i].leader_idx) {
2079 			evsel->leader = evsel;
2080 			/* {anon_group} is a dummy name */
2081 			if (strcmp(desc[i].name, "{anon_group}")) {
2082 				evsel->group_name = desc[i].name;
2083 				desc[i].name = NULL;
2084 			}
2085 			evsel->nr_members = desc[i].nr_members;
2086 
2087 			if (i >= nr_groups || nr > 0) {
2088 				pr_debug("invalid group desc\n");
2089 				goto out_free;
2090 			}
2091 
2092 			leader = evsel;
2093 			nr = evsel->nr_members - 1;
2094 			i++;
2095 		} else if (nr) {
2096 			/* This is a group member */
2097 			evsel->leader = leader;
2098 
2099 			nr--;
2100 		}
2101 	}
2102 
2103 	if (i != nr_groups || nr != 0) {
2104 		pr_debug("invalid group desc\n");
2105 		goto out_free;
2106 	}
2107 
2108 	ret = 0;
2109 out_free:
2110 	for (i = 0; i < nr_groups; i++)
2111 		zfree(&desc[i].name);
2112 	free(desc);
2113 
2114 	return ret;
2115 }
2116 
2117 static int process_auxtrace(struct perf_file_section *section,
2118 			    struct perf_header *ph, int fd,
2119 			    void *data __maybe_unused)
2120 {
2121 	struct perf_session *session;
2122 	int err;
2123 
2124 	session = container_of(ph, struct perf_session, header);
2125 
2126 	err = auxtrace_index__process(fd, section->size, session,
2127 				      ph->needs_swap);
2128 	if (err < 0)
2129 		pr_err("Failed to process auxtrace index\n");
2130 	return err;
2131 }
2132 
2133 static int process_cache(struct perf_file_section *section __maybe_unused,
2134 			 struct perf_header *ph __maybe_unused, int fd __maybe_unused,
2135 			 void *data __maybe_unused)
2136 {
2137 	struct cpu_cache_level *caches;
2138 	u32 cnt, i, version;
2139 
2140 	if (readn(fd, &version, sizeof(version)) != sizeof(version))
2141 		return -1;
2142 
2143 	if (ph->needs_swap)
2144 		version = bswap_32(version);
2145 
2146 	if (version != 1)
2147 		return -1;
2148 
2149 	if (readn(fd, &cnt, sizeof(cnt)) != sizeof(cnt))
2150 		return -1;
2151 
2152 	if (ph->needs_swap)
2153 		cnt = bswap_32(cnt);
2154 
2155 	caches = zalloc(sizeof(*caches) * cnt);
2156 	if (!caches)
2157 		return -1;
2158 
2159 	for (i = 0; i < cnt; i++) {
2160 		struct cpu_cache_level c;
2161 
2162 		#define _R(v)						\
2163 			if (readn(fd, &c.v, sizeof(u32)) != sizeof(u32))\
2164 				goto out_free_caches;			\
2165 			if (ph->needs_swap)				\
2166 				c.v = bswap_32(c.v);			\
2167 
2168 		_R(level)
2169 		_R(line_size)
2170 		_R(sets)
2171 		_R(ways)
2172 		#undef _R
2173 
2174 		#define _R(v)				\
2175 			c.v = do_read_string(fd, ph);	\
2176 			if (!c.v)			\
2177 				goto out_free_caches;
2178 
2179 		_R(type)
2180 		_R(size)
2181 		_R(map)
2182 		#undef _R
2183 
2184 		caches[i] = c;
2185 	}
2186 
2187 	ph->env.caches = caches;
2188 	ph->env.caches_cnt = cnt;
2189 	return 0;
2190 out_free_caches:
2191 	free(caches);
2192 	return -1;
2193 }
2194 
2195 struct feature_ops {
2196 	int (*write)(int fd, struct perf_header *h, struct perf_evlist *evlist);
2197 	void (*print)(struct perf_header *h, int fd, FILE *fp);
2198 	int (*process)(struct perf_file_section *section,
2199 		       struct perf_header *h, int fd, void *data);
2200 	const char *name;
2201 	bool full_only;
2202 };
2203 
2204 #define FEAT_OPA(n, func) \
2205 	[n] = { .name = #n, .write = write_##func, .print = print_##func }
2206 #define FEAT_OPP(n, func) \
2207 	[n] = { .name = #n, .write = write_##func, .print = print_##func, \
2208 		.process = process_##func }
2209 #define FEAT_OPF(n, func) \
2210 	[n] = { .name = #n, .write = write_##func, .print = print_##func, \
2211 		.process = process_##func, .full_only = true }
2212 
2213 /* feature_ops not implemented: */
2214 #define print_tracing_data	NULL
2215 #define print_build_id		NULL
2216 
2217 static const struct feature_ops feat_ops[HEADER_LAST_FEATURE] = {
2218 	FEAT_OPP(HEADER_TRACING_DATA,	tracing_data),
2219 	FEAT_OPP(HEADER_BUILD_ID,	build_id),
2220 	FEAT_OPP(HEADER_HOSTNAME,	hostname),
2221 	FEAT_OPP(HEADER_OSRELEASE,	osrelease),
2222 	FEAT_OPP(HEADER_VERSION,	version),
2223 	FEAT_OPP(HEADER_ARCH,		arch),
2224 	FEAT_OPP(HEADER_NRCPUS,		nrcpus),
2225 	FEAT_OPP(HEADER_CPUDESC,	cpudesc),
2226 	FEAT_OPP(HEADER_CPUID,		cpuid),
2227 	FEAT_OPP(HEADER_TOTAL_MEM,	total_mem),
2228 	FEAT_OPP(HEADER_EVENT_DESC,	event_desc),
2229 	FEAT_OPP(HEADER_CMDLINE,	cmdline),
2230 	FEAT_OPF(HEADER_CPU_TOPOLOGY,	cpu_topology),
2231 	FEAT_OPF(HEADER_NUMA_TOPOLOGY,	numa_topology),
2232 	FEAT_OPA(HEADER_BRANCH_STACK,	branch_stack),
2233 	FEAT_OPP(HEADER_PMU_MAPPINGS,	pmu_mappings),
2234 	FEAT_OPP(HEADER_GROUP_DESC,	group_desc),
2235 	FEAT_OPP(HEADER_AUXTRACE,	auxtrace),
2236 	FEAT_OPA(HEADER_STAT,		stat),
2237 	FEAT_OPF(HEADER_CACHE,		cache),
2238 };
2239 
2240 struct header_print_data {
2241 	FILE *fp;
2242 	bool full; /* extended list of headers */
2243 };
2244 
2245 static int perf_file_section__fprintf_info(struct perf_file_section *section,
2246 					   struct perf_header *ph,
2247 					   int feat, int fd, void *data)
2248 {
2249 	struct header_print_data *hd = data;
2250 
2251 	if (lseek(fd, section->offset, SEEK_SET) == (off_t)-1) {
2252 		pr_debug("Failed to lseek to %" PRIu64 " offset for feature "
2253 				"%d, continuing...\n", section->offset, feat);
2254 		return 0;
2255 	}
2256 	if (feat >= HEADER_LAST_FEATURE) {
2257 		pr_warning("unknown feature %d\n", feat);
2258 		return 0;
2259 	}
2260 	if (!feat_ops[feat].print)
2261 		return 0;
2262 
2263 	if (!feat_ops[feat].full_only || hd->full)
2264 		feat_ops[feat].print(ph, fd, hd->fp);
2265 	else
2266 		fprintf(hd->fp, "# %s info available, use -I to display\n",
2267 			feat_ops[feat].name);
2268 
2269 	return 0;
2270 }
2271 
2272 int perf_header__fprintf_info(struct perf_session *session, FILE *fp, bool full)
2273 {
2274 	struct header_print_data hd;
2275 	struct perf_header *header = &session->header;
2276 	int fd = perf_data_file__fd(session->file);
2277 	struct stat st;
2278 	int ret, bit;
2279 
2280 	hd.fp = fp;
2281 	hd.full = full;
2282 
2283 	ret = fstat(fd, &st);
2284 	if (ret == -1)
2285 		return -1;
2286 
2287 	fprintf(fp, "# captured on: %s", ctime(&st.st_ctime));
2288 
2289 	perf_header__process_sections(header, fd, &hd,
2290 				      perf_file_section__fprintf_info);
2291 
2292 	if (session->file->is_pipe)
2293 		return 0;
2294 
2295 	fprintf(fp, "# missing features: ");
2296 	for_each_clear_bit(bit, header->adds_features, HEADER_LAST_FEATURE) {
2297 		if (bit)
2298 			fprintf(fp, "%s ", feat_ops[bit].name);
2299 	}
2300 
2301 	fprintf(fp, "\n");
2302 	return 0;
2303 }
2304 
2305 static int do_write_feat(int fd, struct perf_header *h, int type,
2306 			 struct perf_file_section **p,
2307 			 struct perf_evlist *evlist)
2308 {
2309 	int err;
2310 	int ret = 0;
2311 
2312 	if (perf_header__has_feat(h, type)) {
2313 		if (!feat_ops[type].write)
2314 			return -1;
2315 
2316 		(*p)->offset = lseek(fd, 0, SEEK_CUR);
2317 
2318 		err = feat_ops[type].write(fd, h, evlist);
2319 		if (err < 0) {
2320 			pr_debug("failed to write feature %s\n", feat_ops[type].name);
2321 
2322 			/* undo anything written */
2323 			lseek(fd, (*p)->offset, SEEK_SET);
2324 
2325 			return -1;
2326 		}
2327 		(*p)->size = lseek(fd, 0, SEEK_CUR) - (*p)->offset;
2328 		(*p)++;
2329 	}
2330 	return ret;
2331 }
2332 
2333 static int perf_header__adds_write(struct perf_header *header,
2334 				   struct perf_evlist *evlist, int fd)
2335 {
2336 	int nr_sections;
2337 	struct perf_file_section *feat_sec, *p;
2338 	int sec_size;
2339 	u64 sec_start;
2340 	int feat;
2341 	int err;
2342 
2343 	nr_sections = bitmap_weight(header->adds_features, HEADER_FEAT_BITS);
2344 	if (!nr_sections)
2345 		return 0;
2346 
2347 	feat_sec = p = calloc(nr_sections, sizeof(*feat_sec));
2348 	if (feat_sec == NULL)
2349 		return -ENOMEM;
2350 
2351 	sec_size = sizeof(*feat_sec) * nr_sections;
2352 
2353 	sec_start = header->feat_offset;
2354 	lseek(fd, sec_start + sec_size, SEEK_SET);
2355 
2356 	for_each_set_bit(feat, header->adds_features, HEADER_FEAT_BITS) {
2357 		if (do_write_feat(fd, header, feat, &p, evlist))
2358 			perf_header__clear_feat(header, feat);
2359 	}
2360 
2361 	lseek(fd, sec_start, SEEK_SET);
2362 	/*
2363 	 * may write more than needed due to dropped feature, but
2364 	 * this is okay, reader will skip the mising entries
2365 	 */
2366 	err = do_write(fd, feat_sec, sec_size);
2367 	if (err < 0)
2368 		pr_debug("failed to write feature section\n");
2369 	free(feat_sec);
2370 	return err;
2371 }
2372 
2373 int perf_header__write_pipe(int fd)
2374 {
2375 	struct perf_pipe_file_header f_header;
2376 	int err;
2377 
2378 	f_header = (struct perf_pipe_file_header){
2379 		.magic	   = PERF_MAGIC,
2380 		.size	   = sizeof(f_header),
2381 	};
2382 
2383 	err = do_write(fd, &f_header, sizeof(f_header));
2384 	if (err < 0) {
2385 		pr_debug("failed to write perf pipe header\n");
2386 		return err;
2387 	}
2388 
2389 	return 0;
2390 }
2391 
2392 int perf_session__write_header(struct perf_session *session,
2393 			       struct perf_evlist *evlist,
2394 			       int fd, bool at_exit)
2395 {
2396 	struct perf_file_header f_header;
2397 	struct perf_file_attr   f_attr;
2398 	struct perf_header *header = &session->header;
2399 	struct perf_evsel *evsel;
2400 	u64 attr_offset;
2401 	int err;
2402 
2403 	lseek(fd, sizeof(f_header), SEEK_SET);
2404 
2405 	evlist__for_each_entry(session->evlist, evsel) {
2406 		evsel->id_offset = lseek(fd, 0, SEEK_CUR);
2407 		err = do_write(fd, evsel->id, evsel->ids * sizeof(u64));
2408 		if (err < 0) {
2409 			pr_debug("failed to write perf header\n");
2410 			return err;
2411 		}
2412 	}
2413 
2414 	attr_offset = lseek(fd, 0, SEEK_CUR);
2415 
2416 	evlist__for_each_entry(evlist, evsel) {
2417 		f_attr = (struct perf_file_attr){
2418 			.attr = evsel->attr,
2419 			.ids  = {
2420 				.offset = evsel->id_offset,
2421 				.size   = evsel->ids * sizeof(u64),
2422 			}
2423 		};
2424 		err = do_write(fd, &f_attr, sizeof(f_attr));
2425 		if (err < 0) {
2426 			pr_debug("failed to write perf header attribute\n");
2427 			return err;
2428 		}
2429 	}
2430 
2431 	if (!header->data_offset)
2432 		header->data_offset = lseek(fd, 0, SEEK_CUR);
2433 	header->feat_offset = header->data_offset + header->data_size;
2434 
2435 	if (at_exit) {
2436 		err = perf_header__adds_write(header, evlist, fd);
2437 		if (err < 0)
2438 			return err;
2439 	}
2440 
2441 	f_header = (struct perf_file_header){
2442 		.magic	   = PERF_MAGIC,
2443 		.size	   = sizeof(f_header),
2444 		.attr_size = sizeof(f_attr),
2445 		.attrs = {
2446 			.offset = attr_offset,
2447 			.size   = evlist->nr_entries * sizeof(f_attr),
2448 		},
2449 		.data = {
2450 			.offset = header->data_offset,
2451 			.size	= header->data_size,
2452 		},
2453 		/* event_types is ignored, store zeros */
2454 	};
2455 
2456 	memcpy(&f_header.adds_features, &header->adds_features, sizeof(header->adds_features));
2457 
2458 	lseek(fd, 0, SEEK_SET);
2459 	err = do_write(fd, &f_header, sizeof(f_header));
2460 	if (err < 0) {
2461 		pr_debug("failed to write perf header\n");
2462 		return err;
2463 	}
2464 	lseek(fd, header->data_offset + header->data_size, SEEK_SET);
2465 
2466 	return 0;
2467 }
2468 
2469 static int perf_header__getbuffer64(struct perf_header *header,
2470 				    int fd, void *buf, size_t size)
2471 {
2472 	if (readn(fd, buf, size) <= 0)
2473 		return -1;
2474 
2475 	if (header->needs_swap)
2476 		mem_bswap_64(buf, size);
2477 
2478 	return 0;
2479 }
2480 
2481 int perf_header__process_sections(struct perf_header *header, int fd,
2482 				  void *data,
2483 				  int (*process)(struct perf_file_section *section,
2484 						 struct perf_header *ph,
2485 						 int feat, int fd, void *data))
2486 {
2487 	struct perf_file_section *feat_sec, *sec;
2488 	int nr_sections;
2489 	int sec_size;
2490 	int feat;
2491 	int err;
2492 
2493 	nr_sections = bitmap_weight(header->adds_features, HEADER_FEAT_BITS);
2494 	if (!nr_sections)
2495 		return 0;
2496 
2497 	feat_sec = sec = calloc(nr_sections, sizeof(*feat_sec));
2498 	if (!feat_sec)
2499 		return -1;
2500 
2501 	sec_size = sizeof(*feat_sec) * nr_sections;
2502 
2503 	lseek(fd, header->feat_offset, SEEK_SET);
2504 
2505 	err = perf_header__getbuffer64(header, fd, feat_sec, sec_size);
2506 	if (err < 0)
2507 		goto out_free;
2508 
2509 	for_each_set_bit(feat, header->adds_features, HEADER_LAST_FEATURE) {
2510 		err = process(sec++, header, feat, fd, data);
2511 		if (err < 0)
2512 			goto out_free;
2513 	}
2514 	err = 0;
2515 out_free:
2516 	free(feat_sec);
2517 	return err;
2518 }
2519 
2520 static const int attr_file_abi_sizes[] = {
2521 	[0] = PERF_ATTR_SIZE_VER0,
2522 	[1] = PERF_ATTR_SIZE_VER1,
2523 	[2] = PERF_ATTR_SIZE_VER2,
2524 	[3] = PERF_ATTR_SIZE_VER3,
2525 	[4] = PERF_ATTR_SIZE_VER4,
2526 	0,
2527 };
2528 
2529 /*
2530  * In the legacy file format, the magic number is not used to encode endianness.
2531  * hdr_sz was used to encode endianness. But given that hdr_sz can vary based
2532  * on ABI revisions, we need to try all combinations for all endianness to
2533  * detect the endianness.
2534  */
2535 static int try_all_file_abis(uint64_t hdr_sz, struct perf_header *ph)
2536 {
2537 	uint64_t ref_size, attr_size;
2538 	int i;
2539 
2540 	for (i = 0 ; attr_file_abi_sizes[i]; i++) {
2541 		ref_size = attr_file_abi_sizes[i]
2542 			 + sizeof(struct perf_file_section);
2543 		if (hdr_sz != ref_size) {
2544 			attr_size = bswap_64(hdr_sz);
2545 			if (attr_size != ref_size)
2546 				continue;
2547 
2548 			ph->needs_swap = true;
2549 		}
2550 		pr_debug("ABI%d perf.data file detected, need_swap=%d\n",
2551 			 i,
2552 			 ph->needs_swap);
2553 		return 0;
2554 	}
2555 	/* could not determine endianness */
2556 	return -1;
2557 }
2558 
2559 #define PERF_PIPE_HDR_VER0	16
2560 
2561 static const size_t attr_pipe_abi_sizes[] = {
2562 	[0] = PERF_PIPE_HDR_VER0,
2563 	0,
2564 };
2565 
2566 /*
2567  * In the legacy pipe format, there is an implicit assumption that endiannesss
2568  * between host recording the samples, and host parsing the samples is the
2569  * same. This is not always the case given that the pipe output may always be
2570  * redirected into a file and analyzed on a different machine with possibly a
2571  * different endianness and perf_event ABI revsions in the perf tool itself.
2572  */
2573 static int try_all_pipe_abis(uint64_t hdr_sz, struct perf_header *ph)
2574 {
2575 	u64 attr_size;
2576 	int i;
2577 
2578 	for (i = 0 ; attr_pipe_abi_sizes[i]; i++) {
2579 		if (hdr_sz != attr_pipe_abi_sizes[i]) {
2580 			attr_size = bswap_64(hdr_sz);
2581 			if (attr_size != hdr_sz)
2582 				continue;
2583 
2584 			ph->needs_swap = true;
2585 		}
2586 		pr_debug("Pipe ABI%d perf.data file detected\n", i);
2587 		return 0;
2588 	}
2589 	return -1;
2590 }
2591 
2592 bool is_perf_magic(u64 magic)
2593 {
2594 	if (!memcmp(&magic, __perf_magic1, sizeof(magic))
2595 		|| magic == __perf_magic2
2596 		|| magic == __perf_magic2_sw)
2597 		return true;
2598 
2599 	return false;
2600 }
2601 
2602 static int check_magic_endian(u64 magic, uint64_t hdr_sz,
2603 			      bool is_pipe, struct perf_header *ph)
2604 {
2605 	int ret;
2606 
2607 	/* check for legacy format */
2608 	ret = memcmp(&magic, __perf_magic1, sizeof(magic));
2609 	if (ret == 0) {
2610 		ph->version = PERF_HEADER_VERSION_1;
2611 		pr_debug("legacy perf.data format\n");
2612 		if (is_pipe)
2613 			return try_all_pipe_abis(hdr_sz, ph);
2614 
2615 		return try_all_file_abis(hdr_sz, ph);
2616 	}
2617 	/*
2618 	 * the new magic number serves two purposes:
2619 	 * - unique number to identify actual perf.data files
2620 	 * - encode endianness of file
2621 	 */
2622 	ph->version = PERF_HEADER_VERSION_2;
2623 
2624 	/* check magic number with one endianness */
2625 	if (magic == __perf_magic2)
2626 		return 0;
2627 
2628 	/* check magic number with opposite endianness */
2629 	if (magic != __perf_magic2_sw)
2630 		return -1;
2631 
2632 	ph->needs_swap = true;
2633 
2634 	return 0;
2635 }
2636 
2637 int perf_file_header__read(struct perf_file_header *header,
2638 			   struct perf_header *ph, int fd)
2639 {
2640 	ssize_t ret;
2641 
2642 	lseek(fd, 0, SEEK_SET);
2643 
2644 	ret = readn(fd, header, sizeof(*header));
2645 	if (ret <= 0)
2646 		return -1;
2647 
2648 	if (check_magic_endian(header->magic,
2649 			       header->attr_size, false, ph) < 0) {
2650 		pr_debug("magic/endian check failed\n");
2651 		return -1;
2652 	}
2653 
2654 	if (ph->needs_swap) {
2655 		mem_bswap_64(header, offsetof(struct perf_file_header,
2656 			     adds_features));
2657 	}
2658 
2659 	if (header->size != sizeof(*header)) {
2660 		/* Support the previous format */
2661 		if (header->size == offsetof(typeof(*header), adds_features))
2662 			bitmap_zero(header->adds_features, HEADER_FEAT_BITS);
2663 		else
2664 			return -1;
2665 	} else if (ph->needs_swap) {
2666 		/*
2667 		 * feature bitmap is declared as an array of unsigned longs --
2668 		 * not good since its size can differ between the host that
2669 		 * generated the data file and the host analyzing the file.
2670 		 *
2671 		 * We need to handle endianness, but we don't know the size of
2672 		 * the unsigned long where the file was generated. Take a best
2673 		 * guess at determining it: try 64-bit swap first (ie., file
2674 		 * created on a 64-bit host), and check if the hostname feature
2675 		 * bit is set (this feature bit is forced on as of fbe96f2).
2676 		 * If the bit is not, undo the 64-bit swap and try a 32-bit
2677 		 * swap. If the hostname bit is still not set (e.g., older data
2678 		 * file), punt and fallback to the original behavior --
2679 		 * clearing all feature bits and setting buildid.
2680 		 */
2681 		mem_bswap_64(&header->adds_features,
2682 			    BITS_TO_U64(HEADER_FEAT_BITS));
2683 
2684 		if (!test_bit(HEADER_HOSTNAME, header->adds_features)) {
2685 			/* unswap as u64 */
2686 			mem_bswap_64(&header->adds_features,
2687 				    BITS_TO_U64(HEADER_FEAT_BITS));
2688 
2689 			/* unswap as u32 */
2690 			mem_bswap_32(&header->adds_features,
2691 				    BITS_TO_U32(HEADER_FEAT_BITS));
2692 		}
2693 
2694 		if (!test_bit(HEADER_HOSTNAME, header->adds_features)) {
2695 			bitmap_zero(header->adds_features, HEADER_FEAT_BITS);
2696 			set_bit(HEADER_BUILD_ID, header->adds_features);
2697 		}
2698 	}
2699 
2700 	memcpy(&ph->adds_features, &header->adds_features,
2701 	       sizeof(ph->adds_features));
2702 
2703 	ph->data_offset  = header->data.offset;
2704 	ph->data_size	 = header->data.size;
2705 	ph->feat_offset  = header->data.offset + header->data.size;
2706 	return 0;
2707 }
2708 
2709 static int perf_file_section__process(struct perf_file_section *section,
2710 				      struct perf_header *ph,
2711 				      int feat, int fd, void *data)
2712 {
2713 	if (lseek(fd, section->offset, SEEK_SET) == (off_t)-1) {
2714 		pr_debug("Failed to lseek to %" PRIu64 " offset for feature "
2715 			  "%d, continuing...\n", section->offset, feat);
2716 		return 0;
2717 	}
2718 
2719 	if (feat >= HEADER_LAST_FEATURE) {
2720 		pr_debug("unknown feature %d, continuing...\n", feat);
2721 		return 0;
2722 	}
2723 
2724 	if (!feat_ops[feat].process)
2725 		return 0;
2726 
2727 	return feat_ops[feat].process(section, ph, fd, data);
2728 }
2729 
2730 static int perf_file_header__read_pipe(struct perf_pipe_file_header *header,
2731 				       struct perf_header *ph, int fd,
2732 				       bool repipe)
2733 {
2734 	ssize_t ret;
2735 
2736 	ret = readn(fd, header, sizeof(*header));
2737 	if (ret <= 0)
2738 		return -1;
2739 
2740 	if (check_magic_endian(header->magic, header->size, true, ph) < 0) {
2741 		pr_debug("endian/magic failed\n");
2742 		return -1;
2743 	}
2744 
2745 	if (ph->needs_swap)
2746 		header->size = bswap_64(header->size);
2747 
2748 	if (repipe && do_write(STDOUT_FILENO, header, sizeof(*header)) < 0)
2749 		return -1;
2750 
2751 	return 0;
2752 }
2753 
2754 static int perf_header__read_pipe(struct perf_session *session)
2755 {
2756 	struct perf_header *header = &session->header;
2757 	struct perf_pipe_file_header f_header;
2758 
2759 	if (perf_file_header__read_pipe(&f_header, header,
2760 					perf_data_file__fd(session->file),
2761 					session->repipe) < 0) {
2762 		pr_debug("incompatible file format\n");
2763 		return -EINVAL;
2764 	}
2765 
2766 	return 0;
2767 }
2768 
2769 static int read_attr(int fd, struct perf_header *ph,
2770 		     struct perf_file_attr *f_attr)
2771 {
2772 	struct perf_event_attr *attr = &f_attr->attr;
2773 	size_t sz, left;
2774 	size_t our_sz = sizeof(f_attr->attr);
2775 	ssize_t ret;
2776 
2777 	memset(f_attr, 0, sizeof(*f_attr));
2778 
2779 	/* read minimal guaranteed structure */
2780 	ret = readn(fd, attr, PERF_ATTR_SIZE_VER0);
2781 	if (ret <= 0) {
2782 		pr_debug("cannot read %d bytes of header attr\n",
2783 			 PERF_ATTR_SIZE_VER0);
2784 		return -1;
2785 	}
2786 
2787 	/* on file perf_event_attr size */
2788 	sz = attr->size;
2789 
2790 	if (ph->needs_swap)
2791 		sz = bswap_32(sz);
2792 
2793 	if (sz == 0) {
2794 		/* assume ABI0 */
2795 		sz =  PERF_ATTR_SIZE_VER0;
2796 	} else if (sz > our_sz) {
2797 		pr_debug("file uses a more recent and unsupported ABI"
2798 			 " (%zu bytes extra)\n", sz - our_sz);
2799 		return -1;
2800 	}
2801 	/* what we have not yet read and that we know about */
2802 	left = sz - PERF_ATTR_SIZE_VER0;
2803 	if (left) {
2804 		void *ptr = attr;
2805 		ptr += PERF_ATTR_SIZE_VER0;
2806 
2807 		ret = readn(fd, ptr, left);
2808 	}
2809 	/* read perf_file_section, ids are read in caller */
2810 	ret = readn(fd, &f_attr->ids, sizeof(f_attr->ids));
2811 
2812 	return ret <= 0 ? -1 : 0;
2813 }
2814 
2815 static int perf_evsel__prepare_tracepoint_event(struct perf_evsel *evsel,
2816 						struct pevent *pevent)
2817 {
2818 	struct event_format *event;
2819 	char bf[128];
2820 
2821 	/* already prepared */
2822 	if (evsel->tp_format)
2823 		return 0;
2824 
2825 	if (pevent == NULL) {
2826 		pr_debug("broken or missing trace data\n");
2827 		return -1;
2828 	}
2829 
2830 	event = pevent_find_event(pevent, evsel->attr.config);
2831 	if (event == NULL) {
2832 		pr_debug("cannot find event format for %d\n", (int)evsel->attr.config);
2833 		return -1;
2834 	}
2835 
2836 	if (!evsel->name) {
2837 		snprintf(bf, sizeof(bf), "%s:%s", event->system, event->name);
2838 		evsel->name = strdup(bf);
2839 		if (evsel->name == NULL)
2840 			return -1;
2841 	}
2842 
2843 	evsel->tp_format = event;
2844 	return 0;
2845 }
2846 
2847 static int perf_evlist__prepare_tracepoint_events(struct perf_evlist *evlist,
2848 						  struct pevent *pevent)
2849 {
2850 	struct perf_evsel *pos;
2851 
2852 	evlist__for_each_entry(evlist, pos) {
2853 		if (pos->attr.type == PERF_TYPE_TRACEPOINT &&
2854 		    perf_evsel__prepare_tracepoint_event(pos, pevent))
2855 			return -1;
2856 	}
2857 
2858 	return 0;
2859 }
2860 
2861 int perf_session__read_header(struct perf_session *session)
2862 {
2863 	struct perf_data_file *file = session->file;
2864 	struct perf_header *header = &session->header;
2865 	struct perf_file_header	f_header;
2866 	struct perf_file_attr	f_attr;
2867 	u64			f_id;
2868 	int nr_attrs, nr_ids, i, j;
2869 	int fd = perf_data_file__fd(file);
2870 
2871 	session->evlist = perf_evlist__new();
2872 	if (session->evlist == NULL)
2873 		return -ENOMEM;
2874 
2875 	session->evlist->env = &header->env;
2876 	session->machines.host.env = &header->env;
2877 	if (perf_data_file__is_pipe(file))
2878 		return perf_header__read_pipe(session);
2879 
2880 	if (perf_file_header__read(&f_header, header, fd) < 0)
2881 		return -EINVAL;
2882 
2883 	/*
2884 	 * Sanity check that perf.data was written cleanly; data size is
2885 	 * initialized to 0 and updated only if the on_exit function is run.
2886 	 * If data size is still 0 then the file contains only partial
2887 	 * information.  Just warn user and process it as much as it can.
2888 	 */
2889 	if (f_header.data.size == 0) {
2890 		pr_warning("WARNING: The %s file's data size field is 0 which is unexpected.\n"
2891 			   "Was the 'perf record' command properly terminated?\n",
2892 			   file->path);
2893 	}
2894 
2895 	nr_attrs = f_header.attrs.size / f_header.attr_size;
2896 	lseek(fd, f_header.attrs.offset, SEEK_SET);
2897 
2898 	for (i = 0; i < nr_attrs; i++) {
2899 		struct perf_evsel *evsel;
2900 		off_t tmp;
2901 
2902 		if (read_attr(fd, header, &f_attr) < 0)
2903 			goto out_errno;
2904 
2905 		if (header->needs_swap) {
2906 			f_attr.ids.size   = bswap_64(f_attr.ids.size);
2907 			f_attr.ids.offset = bswap_64(f_attr.ids.offset);
2908 			perf_event__attr_swap(&f_attr.attr);
2909 		}
2910 
2911 		tmp = lseek(fd, 0, SEEK_CUR);
2912 		evsel = perf_evsel__new(&f_attr.attr);
2913 
2914 		if (evsel == NULL)
2915 			goto out_delete_evlist;
2916 
2917 		evsel->needs_swap = header->needs_swap;
2918 		/*
2919 		 * Do it before so that if perf_evsel__alloc_id fails, this
2920 		 * entry gets purged too at perf_evlist__delete().
2921 		 */
2922 		perf_evlist__add(session->evlist, evsel);
2923 
2924 		nr_ids = f_attr.ids.size / sizeof(u64);
2925 		/*
2926 		 * We don't have the cpu and thread maps on the header, so
2927 		 * for allocating the perf_sample_id table we fake 1 cpu and
2928 		 * hattr->ids threads.
2929 		 */
2930 		if (perf_evsel__alloc_id(evsel, 1, nr_ids))
2931 			goto out_delete_evlist;
2932 
2933 		lseek(fd, f_attr.ids.offset, SEEK_SET);
2934 
2935 		for (j = 0; j < nr_ids; j++) {
2936 			if (perf_header__getbuffer64(header, fd, &f_id, sizeof(f_id)))
2937 				goto out_errno;
2938 
2939 			perf_evlist__id_add(session->evlist, evsel, 0, j, f_id);
2940 		}
2941 
2942 		lseek(fd, tmp, SEEK_SET);
2943 	}
2944 
2945 	symbol_conf.nr_events = nr_attrs;
2946 
2947 	perf_header__process_sections(header, fd, &session->tevent,
2948 				      perf_file_section__process);
2949 
2950 	if (perf_evlist__prepare_tracepoint_events(session->evlist,
2951 						   session->tevent.pevent))
2952 		goto out_delete_evlist;
2953 
2954 	return 0;
2955 out_errno:
2956 	return -errno;
2957 
2958 out_delete_evlist:
2959 	perf_evlist__delete(session->evlist);
2960 	session->evlist = NULL;
2961 	return -ENOMEM;
2962 }
2963 
2964 int perf_event__synthesize_attr(struct perf_tool *tool,
2965 				struct perf_event_attr *attr, u32 ids, u64 *id,
2966 				perf_event__handler_t process)
2967 {
2968 	union perf_event *ev;
2969 	size_t size;
2970 	int err;
2971 
2972 	size = sizeof(struct perf_event_attr);
2973 	size = PERF_ALIGN(size, sizeof(u64));
2974 	size += sizeof(struct perf_event_header);
2975 	size += ids * sizeof(u64);
2976 
2977 	ev = malloc(size);
2978 
2979 	if (ev == NULL)
2980 		return -ENOMEM;
2981 
2982 	ev->attr.attr = *attr;
2983 	memcpy(ev->attr.id, id, ids * sizeof(u64));
2984 
2985 	ev->attr.header.type = PERF_RECORD_HEADER_ATTR;
2986 	ev->attr.header.size = (u16)size;
2987 
2988 	if (ev->attr.header.size == size)
2989 		err = process(tool, ev, NULL, NULL);
2990 	else
2991 		err = -E2BIG;
2992 
2993 	free(ev);
2994 
2995 	return err;
2996 }
2997 
2998 static struct event_update_event *
2999 event_update_event__new(size_t size, u64 type, u64 id)
3000 {
3001 	struct event_update_event *ev;
3002 
3003 	size += sizeof(*ev);
3004 	size  = PERF_ALIGN(size, sizeof(u64));
3005 
3006 	ev = zalloc(size);
3007 	if (ev) {
3008 		ev->header.type = PERF_RECORD_EVENT_UPDATE;
3009 		ev->header.size = (u16)size;
3010 		ev->type = type;
3011 		ev->id = id;
3012 	}
3013 	return ev;
3014 }
3015 
3016 int
3017 perf_event__synthesize_event_update_unit(struct perf_tool *tool,
3018 					 struct perf_evsel *evsel,
3019 					 perf_event__handler_t process)
3020 {
3021 	struct event_update_event *ev;
3022 	size_t size = strlen(evsel->unit);
3023 	int err;
3024 
3025 	ev = event_update_event__new(size + 1, PERF_EVENT_UPDATE__UNIT, evsel->id[0]);
3026 	if (ev == NULL)
3027 		return -ENOMEM;
3028 
3029 	strncpy(ev->data, evsel->unit, size);
3030 	err = process(tool, (union perf_event *)ev, NULL, NULL);
3031 	free(ev);
3032 	return err;
3033 }
3034 
3035 int
3036 perf_event__synthesize_event_update_scale(struct perf_tool *tool,
3037 					  struct perf_evsel *evsel,
3038 					  perf_event__handler_t process)
3039 {
3040 	struct event_update_event *ev;
3041 	struct event_update_event_scale *ev_data;
3042 	int err;
3043 
3044 	ev = event_update_event__new(sizeof(*ev_data), PERF_EVENT_UPDATE__SCALE, evsel->id[0]);
3045 	if (ev == NULL)
3046 		return -ENOMEM;
3047 
3048 	ev_data = (struct event_update_event_scale *) ev->data;
3049 	ev_data->scale = evsel->scale;
3050 	err = process(tool, (union perf_event*) ev, NULL, NULL);
3051 	free(ev);
3052 	return err;
3053 }
3054 
3055 int
3056 perf_event__synthesize_event_update_name(struct perf_tool *tool,
3057 					 struct perf_evsel *evsel,
3058 					 perf_event__handler_t process)
3059 {
3060 	struct event_update_event *ev;
3061 	size_t len = strlen(evsel->name);
3062 	int err;
3063 
3064 	ev = event_update_event__new(len + 1, PERF_EVENT_UPDATE__NAME, evsel->id[0]);
3065 	if (ev == NULL)
3066 		return -ENOMEM;
3067 
3068 	strncpy(ev->data, evsel->name, len);
3069 	err = process(tool, (union perf_event*) ev, NULL, NULL);
3070 	free(ev);
3071 	return err;
3072 }
3073 
3074 int
3075 perf_event__synthesize_event_update_cpus(struct perf_tool *tool,
3076 					struct perf_evsel *evsel,
3077 					perf_event__handler_t process)
3078 {
3079 	size_t size = sizeof(struct event_update_event);
3080 	struct event_update_event *ev;
3081 	int max, err;
3082 	u16 type;
3083 
3084 	if (!evsel->own_cpus)
3085 		return 0;
3086 
3087 	ev = cpu_map_data__alloc(evsel->own_cpus, &size, &type, &max);
3088 	if (!ev)
3089 		return -ENOMEM;
3090 
3091 	ev->header.type = PERF_RECORD_EVENT_UPDATE;
3092 	ev->header.size = (u16)size;
3093 	ev->type = PERF_EVENT_UPDATE__CPUS;
3094 	ev->id   = evsel->id[0];
3095 
3096 	cpu_map_data__synthesize((struct cpu_map_data *) ev->data,
3097 				 evsel->own_cpus,
3098 				 type, max);
3099 
3100 	err = process(tool, (union perf_event*) ev, NULL, NULL);
3101 	free(ev);
3102 	return err;
3103 }
3104 
3105 size_t perf_event__fprintf_event_update(union perf_event *event, FILE *fp)
3106 {
3107 	struct event_update_event *ev = &event->event_update;
3108 	struct event_update_event_scale *ev_scale;
3109 	struct event_update_event_cpus *ev_cpus;
3110 	struct cpu_map *map;
3111 	size_t ret;
3112 
3113 	ret = fprintf(fp, "\n... id:    %" PRIu64 "\n", ev->id);
3114 
3115 	switch (ev->type) {
3116 	case PERF_EVENT_UPDATE__SCALE:
3117 		ev_scale = (struct event_update_event_scale *) ev->data;
3118 		ret += fprintf(fp, "... scale: %f\n", ev_scale->scale);
3119 		break;
3120 	case PERF_EVENT_UPDATE__UNIT:
3121 		ret += fprintf(fp, "... unit:  %s\n", ev->data);
3122 		break;
3123 	case PERF_EVENT_UPDATE__NAME:
3124 		ret += fprintf(fp, "... name:  %s\n", ev->data);
3125 		break;
3126 	case PERF_EVENT_UPDATE__CPUS:
3127 		ev_cpus = (struct event_update_event_cpus *) ev->data;
3128 		ret += fprintf(fp, "... ");
3129 
3130 		map = cpu_map__new_data(&ev_cpus->cpus);
3131 		if (map)
3132 			ret += cpu_map__fprintf(map, fp);
3133 		else
3134 			ret += fprintf(fp, "failed to get cpus\n");
3135 		break;
3136 	default:
3137 		ret += fprintf(fp, "... unknown type\n");
3138 		break;
3139 	}
3140 
3141 	return ret;
3142 }
3143 
3144 int perf_event__synthesize_attrs(struct perf_tool *tool,
3145 				   struct perf_session *session,
3146 				   perf_event__handler_t process)
3147 {
3148 	struct perf_evsel *evsel;
3149 	int err = 0;
3150 
3151 	evlist__for_each_entry(session->evlist, evsel) {
3152 		err = perf_event__synthesize_attr(tool, &evsel->attr, evsel->ids,
3153 						  evsel->id, process);
3154 		if (err) {
3155 			pr_debug("failed to create perf header attribute\n");
3156 			return err;
3157 		}
3158 	}
3159 
3160 	return err;
3161 }
3162 
3163 int perf_event__process_attr(struct perf_tool *tool __maybe_unused,
3164 			     union perf_event *event,
3165 			     struct perf_evlist **pevlist)
3166 {
3167 	u32 i, ids, n_ids;
3168 	struct perf_evsel *evsel;
3169 	struct perf_evlist *evlist = *pevlist;
3170 
3171 	if (evlist == NULL) {
3172 		*pevlist = evlist = perf_evlist__new();
3173 		if (evlist == NULL)
3174 			return -ENOMEM;
3175 	}
3176 
3177 	evsel = perf_evsel__new(&event->attr.attr);
3178 	if (evsel == NULL)
3179 		return -ENOMEM;
3180 
3181 	perf_evlist__add(evlist, evsel);
3182 
3183 	ids = event->header.size;
3184 	ids -= (void *)&event->attr.id - (void *)event;
3185 	n_ids = ids / sizeof(u64);
3186 	/*
3187 	 * We don't have the cpu and thread maps on the header, so
3188 	 * for allocating the perf_sample_id table we fake 1 cpu and
3189 	 * hattr->ids threads.
3190 	 */
3191 	if (perf_evsel__alloc_id(evsel, 1, n_ids))
3192 		return -ENOMEM;
3193 
3194 	for (i = 0; i < n_ids; i++) {
3195 		perf_evlist__id_add(evlist, evsel, 0, i, event->attr.id[i]);
3196 	}
3197 
3198 	symbol_conf.nr_events = evlist->nr_entries;
3199 
3200 	return 0;
3201 }
3202 
3203 int perf_event__process_event_update(struct perf_tool *tool __maybe_unused,
3204 				     union perf_event *event,
3205 				     struct perf_evlist **pevlist)
3206 {
3207 	struct event_update_event *ev = &event->event_update;
3208 	struct event_update_event_scale *ev_scale;
3209 	struct event_update_event_cpus *ev_cpus;
3210 	struct perf_evlist *evlist;
3211 	struct perf_evsel *evsel;
3212 	struct cpu_map *map;
3213 
3214 	if (!pevlist || *pevlist == NULL)
3215 		return -EINVAL;
3216 
3217 	evlist = *pevlist;
3218 
3219 	evsel = perf_evlist__id2evsel(evlist, ev->id);
3220 	if (evsel == NULL)
3221 		return -EINVAL;
3222 
3223 	switch (ev->type) {
3224 	case PERF_EVENT_UPDATE__UNIT:
3225 		evsel->unit = strdup(ev->data);
3226 		break;
3227 	case PERF_EVENT_UPDATE__NAME:
3228 		evsel->name = strdup(ev->data);
3229 		break;
3230 	case PERF_EVENT_UPDATE__SCALE:
3231 		ev_scale = (struct event_update_event_scale *) ev->data;
3232 		evsel->scale = ev_scale->scale;
3233 		break;
3234 	case PERF_EVENT_UPDATE__CPUS:
3235 		ev_cpus = (struct event_update_event_cpus *) ev->data;
3236 
3237 		map = cpu_map__new_data(&ev_cpus->cpus);
3238 		if (map)
3239 			evsel->own_cpus = map;
3240 		else
3241 			pr_err("failed to get event_update cpus\n");
3242 	default:
3243 		break;
3244 	}
3245 
3246 	return 0;
3247 }
3248 
3249 int perf_event__synthesize_tracing_data(struct perf_tool *tool, int fd,
3250 					struct perf_evlist *evlist,
3251 					perf_event__handler_t process)
3252 {
3253 	union perf_event ev;
3254 	struct tracing_data *tdata;
3255 	ssize_t size = 0, aligned_size = 0, padding;
3256 	int err __maybe_unused = 0;
3257 
3258 	/*
3259 	 * We are going to store the size of the data followed
3260 	 * by the data contents. Since the fd descriptor is a pipe,
3261 	 * we cannot seek back to store the size of the data once
3262 	 * we know it. Instead we:
3263 	 *
3264 	 * - write the tracing data to the temp file
3265 	 * - get/write the data size to pipe
3266 	 * - write the tracing data from the temp file
3267 	 *   to the pipe
3268 	 */
3269 	tdata = tracing_data_get(&evlist->entries, fd, true);
3270 	if (!tdata)
3271 		return -1;
3272 
3273 	memset(&ev, 0, sizeof(ev));
3274 
3275 	ev.tracing_data.header.type = PERF_RECORD_HEADER_TRACING_DATA;
3276 	size = tdata->size;
3277 	aligned_size = PERF_ALIGN(size, sizeof(u64));
3278 	padding = aligned_size - size;
3279 	ev.tracing_data.header.size = sizeof(ev.tracing_data);
3280 	ev.tracing_data.size = aligned_size;
3281 
3282 	process(tool, &ev, NULL, NULL);
3283 
3284 	/*
3285 	 * The put function will copy all the tracing data
3286 	 * stored in temp file to the pipe.
3287 	 */
3288 	tracing_data_put(tdata);
3289 
3290 	write_padded(fd, NULL, 0, padding);
3291 
3292 	return aligned_size;
3293 }
3294 
3295 int perf_event__process_tracing_data(struct perf_tool *tool __maybe_unused,
3296 				     union perf_event *event,
3297 				     struct perf_session *session)
3298 {
3299 	ssize_t size_read, padding, size = event->tracing_data.size;
3300 	int fd = perf_data_file__fd(session->file);
3301 	off_t offset = lseek(fd, 0, SEEK_CUR);
3302 	char buf[BUFSIZ];
3303 
3304 	/* setup for reading amidst mmap */
3305 	lseek(fd, offset + sizeof(struct tracing_data_event),
3306 	      SEEK_SET);
3307 
3308 	size_read = trace_report(fd, &session->tevent,
3309 				 session->repipe);
3310 	padding = PERF_ALIGN(size_read, sizeof(u64)) - size_read;
3311 
3312 	if (readn(fd, buf, padding) < 0) {
3313 		pr_err("%s: reading input file", __func__);
3314 		return -1;
3315 	}
3316 	if (session->repipe) {
3317 		int retw = write(STDOUT_FILENO, buf, padding);
3318 		if (retw <= 0 || retw != padding) {
3319 			pr_err("%s: repiping tracing data padding", __func__);
3320 			return -1;
3321 		}
3322 	}
3323 
3324 	if (size_read + padding != size) {
3325 		pr_err("%s: tracing data size mismatch", __func__);
3326 		return -1;
3327 	}
3328 
3329 	perf_evlist__prepare_tracepoint_events(session->evlist,
3330 					       session->tevent.pevent);
3331 
3332 	return size_read + padding;
3333 }
3334 
3335 int perf_event__synthesize_build_id(struct perf_tool *tool,
3336 				    struct dso *pos, u16 misc,
3337 				    perf_event__handler_t process,
3338 				    struct machine *machine)
3339 {
3340 	union perf_event ev;
3341 	size_t len;
3342 	int err = 0;
3343 
3344 	if (!pos->hit)
3345 		return err;
3346 
3347 	memset(&ev, 0, sizeof(ev));
3348 
3349 	len = pos->long_name_len + 1;
3350 	len = PERF_ALIGN(len, NAME_ALIGN);
3351 	memcpy(&ev.build_id.build_id, pos->build_id, sizeof(pos->build_id));
3352 	ev.build_id.header.type = PERF_RECORD_HEADER_BUILD_ID;
3353 	ev.build_id.header.misc = misc;
3354 	ev.build_id.pid = machine->pid;
3355 	ev.build_id.header.size = sizeof(ev.build_id) + len;
3356 	memcpy(&ev.build_id.filename, pos->long_name, pos->long_name_len);
3357 
3358 	err = process(tool, &ev, NULL, machine);
3359 
3360 	return err;
3361 }
3362 
3363 int perf_event__process_build_id(struct perf_tool *tool __maybe_unused,
3364 				 union perf_event *event,
3365 				 struct perf_session *session)
3366 {
3367 	__event_process_build_id(&event->build_id,
3368 				 event->build_id.filename,
3369 				 session);
3370 	return 0;
3371 }
3372