xref: /openbmc/linux/tools/perf/util/header.c (revision 609e478b)
1 #include "util.h"
2 #include <sys/types.h>
3 #include <byteswap.h>
4 #include <unistd.h>
5 #include <stdio.h>
6 #include <stdlib.h>
7 #include <linux/list.h>
8 #include <linux/kernel.h>
9 #include <linux/bitops.h>
10 #include <sys/utsname.h>
11 
12 #include "evlist.h"
13 #include "evsel.h"
14 #include "header.h"
15 #include "../perf.h"
16 #include "trace-event.h"
17 #include "session.h"
18 #include "symbol.h"
19 #include "debug.h"
20 #include "cpumap.h"
21 #include "pmu.h"
22 #include "vdso.h"
23 #include "strbuf.h"
24 #include "build-id.h"
25 #include "data.h"
26 
27 static bool no_buildid_cache = false;
28 
29 static u32 header_argc;
30 static const char **header_argv;
31 
32 /*
33  * magic2 = "PERFILE2"
34  * must be a numerical value to let the endianness
35  * determine the memory layout. That way we are able
36  * to detect endianness when reading the perf.data file
37  * back.
38  *
39  * we check for legacy (PERFFILE) format.
40  */
41 static const char *__perf_magic1 = "PERFFILE";
42 static const u64 __perf_magic2    = 0x32454c4946524550ULL;
43 static const u64 __perf_magic2_sw = 0x50455246494c4532ULL;
44 
45 #define PERF_MAGIC	__perf_magic2
46 
47 struct perf_file_attr {
48 	struct perf_event_attr	attr;
49 	struct perf_file_section	ids;
50 };
51 
52 void perf_header__set_feat(struct perf_header *header, int feat)
53 {
54 	set_bit(feat, header->adds_features);
55 }
56 
57 void perf_header__clear_feat(struct perf_header *header, int feat)
58 {
59 	clear_bit(feat, header->adds_features);
60 }
61 
62 bool perf_header__has_feat(const struct perf_header *header, int feat)
63 {
64 	return test_bit(feat, header->adds_features);
65 }
66 
67 static int do_write(int fd, const void *buf, size_t size)
68 {
69 	while (size) {
70 		int ret = write(fd, buf, size);
71 
72 		if (ret < 0)
73 			return -errno;
74 
75 		size -= ret;
76 		buf += ret;
77 	}
78 
79 	return 0;
80 }
81 
82 #define NAME_ALIGN 64
83 
84 static int write_padded(int fd, const void *bf, size_t count,
85 			size_t count_aligned)
86 {
87 	static const char zero_buf[NAME_ALIGN];
88 	int err = do_write(fd, bf, count);
89 
90 	if (!err)
91 		err = do_write(fd, zero_buf, count_aligned - count);
92 
93 	return err;
94 }
95 
96 static int do_write_string(int fd, const char *str)
97 {
98 	u32 len, olen;
99 	int ret;
100 
101 	olen = strlen(str) + 1;
102 	len = PERF_ALIGN(olen, NAME_ALIGN);
103 
104 	/* write len, incl. \0 */
105 	ret = do_write(fd, &len, sizeof(len));
106 	if (ret < 0)
107 		return ret;
108 
109 	return write_padded(fd, str, olen, len);
110 }
111 
112 static char *do_read_string(int fd, struct perf_header *ph)
113 {
114 	ssize_t sz, ret;
115 	u32 len;
116 	char *buf;
117 
118 	sz = readn(fd, &len, sizeof(len));
119 	if (sz < (ssize_t)sizeof(len))
120 		return NULL;
121 
122 	if (ph->needs_swap)
123 		len = bswap_32(len);
124 
125 	buf = malloc(len);
126 	if (!buf)
127 		return NULL;
128 
129 	ret = readn(fd, buf, len);
130 	if (ret == (ssize_t)len) {
131 		/*
132 		 * strings are padded by zeroes
133 		 * thus the actual strlen of buf
134 		 * may be less than len
135 		 */
136 		return buf;
137 	}
138 
139 	free(buf);
140 	return NULL;
141 }
142 
143 int
144 perf_header__set_cmdline(int argc, const char **argv)
145 {
146 	int i;
147 
148 	/*
149 	 * If header_argv has already been set, do not override it.
150 	 * This allows a command to set the cmdline, parse args and
151 	 * then call another builtin function that implements a
152 	 * command -- e.g, cmd_kvm calling cmd_record.
153 	 */
154 	if (header_argv)
155 		return 0;
156 
157 	header_argc = (u32)argc;
158 
159 	/* do not include NULL termination */
160 	header_argv = calloc(argc, sizeof(char *));
161 	if (!header_argv)
162 		return -ENOMEM;
163 
164 	/*
165 	 * must copy argv contents because it gets moved
166 	 * around during option parsing
167 	 */
168 	for (i = 0; i < argc ; i++)
169 		header_argv[i] = argv[i];
170 
171 	return 0;
172 }
173 
174 #define dsos__for_each_with_build_id(pos, head)	\
175 	list_for_each_entry(pos, head, node)	\
176 		if (!pos->has_build_id)		\
177 			continue;		\
178 		else
179 
180 static int write_buildid(const char *name, size_t name_len, u8 *build_id,
181 			 pid_t pid, u16 misc, int fd)
182 {
183 	int err;
184 	struct build_id_event b;
185 	size_t len;
186 
187 	len = name_len + 1;
188 	len = PERF_ALIGN(len, NAME_ALIGN);
189 
190 	memset(&b, 0, sizeof(b));
191 	memcpy(&b.build_id, build_id, BUILD_ID_SIZE);
192 	b.pid = pid;
193 	b.header.misc = misc;
194 	b.header.size = sizeof(b) + len;
195 
196 	err = do_write(fd, &b, sizeof(b));
197 	if (err < 0)
198 		return err;
199 
200 	return write_padded(fd, name, name_len + 1, len);
201 }
202 
203 static int __dsos__hit_all(struct list_head *head)
204 {
205 	struct dso *pos;
206 
207 	list_for_each_entry(pos, head, node)
208 		pos->hit = true;
209 
210 	return 0;
211 }
212 
213 static int machine__hit_all_dsos(struct machine *machine)
214 {
215 	int err;
216 
217 	err = __dsos__hit_all(&machine->kernel_dsos.head);
218 	if (err)
219 		return err;
220 
221 	return __dsos__hit_all(&machine->user_dsos.head);
222 }
223 
224 int dsos__hit_all(struct perf_session *session)
225 {
226 	struct rb_node *nd;
227 	int err;
228 
229 	err = machine__hit_all_dsos(&session->machines.host);
230 	if (err)
231 		return err;
232 
233 	for (nd = rb_first(&session->machines.guests); nd; nd = rb_next(nd)) {
234 		struct machine *pos = rb_entry(nd, struct machine, rb_node);
235 
236 		err = machine__hit_all_dsos(pos);
237 		if (err)
238 			return err;
239 	}
240 
241 	return 0;
242 }
243 
244 static int __dsos__write_buildid_table(struct list_head *head,
245 				       struct machine *machine,
246 				       pid_t pid, u16 misc, int fd)
247 {
248 	char nm[PATH_MAX];
249 	struct dso *pos;
250 
251 	dsos__for_each_with_build_id(pos, head) {
252 		int err;
253 		const char *name;
254 		size_t name_len;
255 
256 		if (!pos->hit)
257 			continue;
258 
259 		if (dso__is_vdso(pos)) {
260 			name = pos->short_name;
261 			name_len = pos->short_name_len + 1;
262 		} else if (dso__is_kcore(pos)) {
263 			machine__mmap_name(machine, nm, sizeof(nm));
264 			name = nm;
265 			name_len = strlen(nm) + 1;
266 		} else {
267 			name = pos->long_name;
268 			name_len = pos->long_name_len + 1;
269 		}
270 
271 		err = write_buildid(name, name_len, pos->build_id,
272 				    pid, misc, fd);
273 		if (err)
274 			return err;
275 	}
276 
277 	return 0;
278 }
279 
280 static int machine__write_buildid_table(struct machine *machine, int fd)
281 {
282 	int err;
283 	u16 kmisc = PERF_RECORD_MISC_KERNEL,
284 	    umisc = PERF_RECORD_MISC_USER;
285 
286 	if (!machine__is_host(machine)) {
287 		kmisc = PERF_RECORD_MISC_GUEST_KERNEL;
288 		umisc = PERF_RECORD_MISC_GUEST_USER;
289 	}
290 
291 	err = __dsos__write_buildid_table(&machine->kernel_dsos.head, machine,
292 					  machine->pid, kmisc, fd);
293 	if (err == 0)
294 		err = __dsos__write_buildid_table(&machine->user_dsos.head,
295 						  machine, machine->pid, umisc,
296 						  fd);
297 	return err;
298 }
299 
300 static int dsos__write_buildid_table(struct perf_header *header, int fd)
301 {
302 	struct perf_session *session = container_of(header,
303 			struct perf_session, header);
304 	struct rb_node *nd;
305 	int err = machine__write_buildid_table(&session->machines.host, fd);
306 
307 	if (err)
308 		return err;
309 
310 	for (nd = rb_first(&session->machines.guests); nd; nd = rb_next(nd)) {
311 		struct machine *pos = rb_entry(nd, struct machine, rb_node);
312 		err = machine__write_buildid_table(pos, fd);
313 		if (err)
314 			break;
315 	}
316 	return err;
317 }
318 
319 int build_id_cache__add_s(const char *sbuild_id, const char *debugdir,
320 			  const char *name, bool is_kallsyms, bool is_vdso)
321 {
322 	const size_t size = PATH_MAX;
323 	char *realname, *filename = zalloc(size),
324 	     *linkname = zalloc(size), *targetname;
325 	int len, err = -1;
326 	bool slash = is_kallsyms || is_vdso;
327 
328 	if (is_kallsyms) {
329 		if (symbol_conf.kptr_restrict) {
330 			pr_debug("Not caching a kptr_restrict'ed /proc/kallsyms\n");
331 			err = 0;
332 			goto out_free;
333 		}
334 		realname = (char *) name;
335 	} else
336 		realname = realpath(name, NULL);
337 
338 	if (realname == NULL || filename == NULL || linkname == NULL)
339 		goto out_free;
340 
341 	len = scnprintf(filename, size, "%s%s%s",
342 		       debugdir, slash ? "/" : "",
343 		       is_vdso ? DSO__NAME_VDSO : realname);
344 	if (mkdir_p(filename, 0755))
345 		goto out_free;
346 
347 	snprintf(filename + len, size - len, "/%s", sbuild_id);
348 
349 	if (access(filename, F_OK)) {
350 		if (is_kallsyms) {
351 			 if (copyfile("/proc/kallsyms", filename))
352 				goto out_free;
353 		} else if (link(realname, filename) && copyfile(name, filename))
354 			goto out_free;
355 	}
356 
357 	len = scnprintf(linkname, size, "%s/.build-id/%.2s",
358 		       debugdir, sbuild_id);
359 
360 	if (access(linkname, X_OK) && mkdir_p(linkname, 0755))
361 		goto out_free;
362 
363 	snprintf(linkname + len, size - len, "/%s", sbuild_id + 2);
364 	targetname = filename + strlen(debugdir) - 5;
365 	memcpy(targetname, "../..", 5);
366 
367 	if (symlink(targetname, linkname) == 0)
368 		err = 0;
369 out_free:
370 	if (!is_kallsyms)
371 		free(realname);
372 	free(filename);
373 	free(linkname);
374 	return err;
375 }
376 
377 static int build_id_cache__add_b(const u8 *build_id, size_t build_id_size,
378 				 const char *name, const char *debugdir,
379 				 bool is_kallsyms, bool is_vdso)
380 {
381 	char sbuild_id[BUILD_ID_SIZE * 2 + 1];
382 
383 	build_id__sprintf(build_id, build_id_size, sbuild_id);
384 
385 	return build_id_cache__add_s(sbuild_id, debugdir, name,
386 				     is_kallsyms, is_vdso);
387 }
388 
389 int build_id_cache__remove_s(const char *sbuild_id, const char *debugdir)
390 {
391 	const size_t size = PATH_MAX;
392 	char *filename = zalloc(size),
393 	     *linkname = zalloc(size);
394 	int err = -1;
395 
396 	if (filename == NULL || linkname == NULL)
397 		goto out_free;
398 
399 	snprintf(linkname, size, "%s/.build-id/%.2s/%s",
400 		 debugdir, sbuild_id, sbuild_id + 2);
401 
402 	if (access(linkname, F_OK))
403 		goto out_free;
404 
405 	if (readlink(linkname, filename, size - 1) < 0)
406 		goto out_free;
407 
408 	if (unlink(linkname))
409 		goto out_free;
410 
411 	/*
412 	 * Since the link is relative, we must make it absolute:
413 	 */
414 	snprintf(linkname, size, "%s/.build-id/%.2s/%s",
415 		 debugdir, sbuild_id, filename);
416 
417 	if (unlink(linkname))
418 		goto out_free;
419 
420 	err = 0;
421 out_free:
422 	free(filename);
423 	free(linkname);
424 	return err;
425 }
426 
427 static int dso__cache_build_id(struct dso *dso, struct machine *machine,
428 			       const char *debugdir)
429 {
430 	bool is_kallsyms = dso->kernel && dso->long_name[0] != '/';
431 	bool is_vdso = dso__is_vdso(dso);
432 	const char *name = dso->long_name;
433 	char nm[PATH_MAX];
434 
435 	if (dso__is_kcore(dso)) {
436 		is_kallsyms = true;
437 		machine__mmap_name(machine, nm, sizeof(nm));
438 		name = nm;
439 	}
440 	return build_id_cache__add_b(dso->build_id, sizeof(dso->build_id), name,
441 				     debugdir, is_kallsyms, is_vdso);
442 }
443 
444 static int __dsos__cache_build_ids(struct list_head *head,
445 				   struct machine *machine, const char *debugdir)
446 {
447 	struct dso *pos;
448 	int err = 0;
449 
450 	dsos__for_each_with_build_id(pos, head)
451 		if (dso__cache_build_id(pos, machine, debugdir))
452 			err = -1;
453 
454 	return err;
455 }
456 
457 static int machine__cache_build_ids(struct machine *machine, const char *debugdir)
458 {
459 	int ret = __dsos__cache_build_ids(&machine->kernel_dsos.head, machine,
460 					  debugdir);
461 	ret |= __dsos__cache_build_ids(&machine->user_dsos.head, machine,
462 				       debugdir);
463 	return ret;
464 }
465 
466 static int perf_session__cache_build_ids(struct perf_session *session)
467 {
468 	struct rb_node *nd;
469 	int ret;
470 	char debugdir[PATH_MAX];
471 
472 	snprintf(debugdir, sizeof(debugdir), "%s", buildid_dir);
473 
474 	if (mkdir(debugdir, 0755) != 0 && errno != EEXIST)
475 		return -1;
476 
477 	ret = machine__cache_build_ids(&session->machines.host, debugdir);
478 
479 	for (nd = rb_first(&session->machines.guests); nd; nd = rb_next(nd)) {
480 		struct machine *pos = rb_entry(nd, struct machine, rb_node);
481 		ret |= machine__cache_build_ids(pos, debugdir);
482 	}
483 	return ret ? -1 : 0;
484 }
485 
486 static bool machine__read_build_ids(struct machine *machine, bool with_hits)
487 {
488 	bool ret;
489 
490 	ret  = __dsos__read_build_ids(&machine->kernel_dsos.head, with_hits);
491 	ret |= __dsos__read_build_ids(&machine->user_dsos.head, with_hits);
492 	return ret;
493 }
494 
495 static bool perf_session__read_build_ids(struct perf_session *session, bool with_hits)
496 {
497 	struct rb_node *nd;
498 	bool ret = machine__read_build_ids(&session->machines.host, with_hits);
499 
500 	for (nd = rb_first(&session->machines.guests); nd; nd = rb_next(nd)) {
501 		struct machine *pos = rb_entry(nd, struct machine, rb_node);
502 		ret |= machine__read_build_ids(pos, with_hits);
503 	}
504 
505 	return ret;
506 }
507 
508 static int write_tracing_data(int fd, struct perf_header *h __maybe_unused,
509 			    struct perf_evlist *evlist)
510 {
511 	return read_tracing_data(fd, &evlist->entries);
512 }
513 
514 
515 static int write_build_id(int fd, struct perf_header *h,
516 			  struct perf_evlist *evlist __maybe_unused)
517 {
518 	struct perf_session *session;
519 	int err;
520 
521 	session = container_of(h, struct perf_session, header);
522 
523 	if (!perf_session__read_build_ids(session, true))
524 		return -1;
525 
526 	err = dsos__write_buildid_table(h, fd);
527 	if (err < 0) {
528 		pr_debug("failed to write buildid table\n");
529 		return err;
530 	}
531 	if (!no_buildid_cache)
532 		perf_session__cache_build_ids(session);
533 
534 	return 0;
535 }
536 
537 static int write_hostname(int fd, struct perf_header *h __maybe_unused,
538 			  struct perf_evlist *evlist __maybe_unused)
539 {
540 	struct utsname uts;
541 	int ret;
542 
543 	ret = uname(&uts);
544 	if (ret < 0)
545 		return -1;
546 
547 	return do_write_string(fd, uts.nodename);
548 }
549 
550 static int write_osrelease(int fd, struct perf_header *h __maybe_unused,
551 			   struct perf_evlist *evlist __maybe_unused)
552 {
553 	struct utsname uts;
554 	int ret;
555 
556 	ret = uname(&uts);
557 	if (ret < 0)
558 		return -1;
559 
560 	return do_write_string(fd, uts.release);
561 }
562 
563 static int write_arch(int fd, struct perf_header *h __maybe_unused,
564 		      struct perf_evlist *evlist __maybe_unused)
565 {
566 	struct utsname uts;
567 	int ret;
568 
569 	ret = uname(&uts);
570 	if (ret < 0)
571 		return -1;
572 
573 	return do_write_string(fd, uts.machine);
574 }
575 
576 static int write_version(int fd, struct perf_header *h __maybe_unused,
577 			 struct perf_evlist *evlist __maybe_unused)
578 {
579 	return do_write_string(fd, perf_version_string);
580 }
581 
582 static int write_cpudesc(int fd, struct perf_header *h __maybe_unused,
583 		       struct perf_evlist *evlist __maybe_unused)
584 {
585 #ifndef CPUINFO_PROC
586 #define CPUINFO_PROC NULL
587 #endif
588 	FILE *file;
589 	char *buf = NULL;
590 	char *s, *p;
591 	const char *search = CPUINFO_PROC;
592 	size_t len = 0;
593 	int ret = -1;
594 
595 	if (!search)
596 		return -1;
597 
598 	file = fopen("/proc/cpuinfo", "r");
599 	if (!file)
600 		return -1;
601 
602 	while (getline(&buf, &len, file) > 0) {
603 		ret = strncmp(buf, search, strlen(search));
604 		if (!ret)
605 			break;
606 	}
607 
608 	if (ret)
609 		goto done;
610 
611 	s = buf;
612 
613 	p = strchr(buf, ':');
614 	if (p && *(p+1) == ' ' && *(p+2))
615 		s = p + 2;
616 	p = strchr(s, '\n');
617 	if (p)
618 		*p = '\0';
619 
620 	/* squash extra space characters (branding string) */
621 	p = s;
622 	while (*p) {
623 		if (isspace(*p)) {
624 			char *r = p + 1;
625 			char *q = r;
626 			*p = ' ';
627 			while (*q && isspace(*q))
628 				q++;
629 			if (q != (p+1))
630 				while ((*r++ = *q++));
631 		}
632 		p++;
633 	}
634 	ret = do_write_string(fd, s);
635 done:
636 	free(buf);
637 	fclose(file);
638 	return ret;
639 }
640 
641 static int write_nrcpus(int fd, struct perf_header *h __maybe_unused,
642 			struct perf_evlist *evlist __maybe_unused)
643 {
644 	long nr;
645 	u32 nrc, nra;
646 	int ret;
647 
648 	nr = sysconf(_SC_NPROCESSORS_CONF);
649 	if (nr < 0)
650 		return -1;
651 
652 	nrc = (u32)(nr & UINT_MAX);
653 
654 	nr = sysconf(_SC_NPROCESSORS_ONLN);
655 	if (nr < 0)
656 		return -1;
657 
658 	nra = (u32)(nr & UINT_MAX);
659 
660 	ret = do_write(fd, &nrc, sizeof(nrc));
661 	if (ret < 0)
662 		return ret;
663 
664 	return do_write(fd, &nra, sizeof(nra));
665 }
666 
667 static int write_event_desc(int fd, struct perf_header *h __maybe_unused,
668 			    struct perf_evlist *evlist)
669 {
670 	struct perf_evsel *evsel;
671 	u32 nre, nri, sz;
672 	int ret;
673 
674 	nre = evlist->nr_entries;
675 
676 	/*
677 	 * write number of events
678 	 */
679 	ret = do_write(fd, &nre, sizeof(nre));
680 	if (ret < 0)
681 		return ret;
682 
683 	/*
684 	 * size of perf_event_attr struct
685 	 */
686 	sz = (u32)sizeof(evsel->attr);
687 	ret = do_write(fd, &sz, sizeof(sz));
688 	if (ret < 0)
689 		return ret;
690 
691 	evlist__for_each(evlist, evsel) {
692 		ret = do_write(fd, &evsel->attr, sz);
693 		if (ret < 0)
694 			return ret;
695 		/*
696 		 * write number of unique id per event
697 		 * there is one id per instance of an event
698 		 *
699 		 * copy into an nri to be independent of the
700 		 * type of ids,
701 		 */
702 		nri = evsel->ids;
703 		ret = do_write(fd, &nri, sizeof(nri));
704 		if (ret < 0)
705 			return ret;
706 
707 		/*
708 		 * write event string as passed on cmdline
709 		 */
710 		ret = do_write_string(fd, perf_evsel__name(evsel));
711 		if (ret < 0)
712 			return ret;
713 		/*
714 		 * write unique ids for this event
715 		 */
716 		ret = do_write(fd, evsel->id, evsel->ids * sizeof(u64));
717 		if (ret < 0)
718 			return ret;
719 	}
720 	return 0;
721 }
722 
723 static int write_cmdline(int fd, struct perf_header *h __maybe_unused,
724 			 struct perf_evlist *evlist __maybe_unused)
725 {
726 	char buf[MAXPATHLEN];
727 	char proc[32];
728 	u32 i, n;
729 	int ret;
730 
731 	/*
732 	 * actual atual path to perf binary
733 	 */
734 	sprintf(proc, "/proc/%d/exe", getpid());
735 	ret = readlink(proc, buf, sizeof(buf));
736 	if (ret <= 0)
737 		return -1;
738 
739 	/* readlink() does not add null termination */
740 	buf[ret] = '\0';
741 
742 	/* account for binary path */
743 	n = header_argc + 1;
744 
745 	ret = do_write(fd, &n, sizeof(n));
746 	if (ret < 0)
747 		return ret;
748 
749 	ret = do_write_string(fd, buf);
750 	if (ret < 0)
751 		return ret;
752 
753 	for (i = 0 ; i < header_argc; i++) {
754 		ret = do_write_string(fd, header_argv[i]);
755 		if (ret < 0)
756 			return ret;
757 	}
758 	return 0;
759 }
760 
761 #define CORE_SIB_FMT \
762 	"/sys/devices/system/cpu/cpu%d/topology/core_siblings_list"
763 #define THRD_SIB_FMT \
764 	"/sys/devices/system/cpu/cpu%d/topology/thread_siblings_list"
765 
766 struct cpu_topo {
767 	u32 core_sib;
768 	u32 thread_sib;
769 	char **core_siblings;
770 	char **thread_siblings;
771 };
772 
773 static int build_cpu_topo(struct cpu_topo *tp, int cpu)
774 {
775 	FILE *fp;
776 	char filename[MAXPATHLEN];
777 	char *buf = NULL, *p;
778 	size_t len = 0;
779 	ssize_t sret;
780 	u32 i = 0;
781 	int ret = -1;
782 
783 	sprintf(filename, CORE_SIB_FMT, cpu);
784 	fp = fopen(filename, "r");
785 	if (!fp)
786 		goto try_threads;
787 
788 	sret = getline(&buf, &len, fp);
789 	fclose(fp);
790 	if (sret <= 0)
791 		goto try_threads;
792 
793 	p = strchr(buf, '\n');
794 	if (p)
795 		*p = '\0';
796 
797 	for (i = 0; i < tp->core_sib; i++) {
798 		if (!strcmp(buf, tp->core_siblings[i]))
799 			break;
800 	}
801 	if (i == tp->core_sib) {
802 		tp->core_siblings[i] = buf;
803 		tp->core_sib++;
804 		buf = NULL;
805 		len = 0;
806 	}
807 	ret = 0;
808 
809 try_threads:
810 	sprintf(filename, THRD_SIB_FMT, cpu);
811 	fp = fopen(filename, "r");
812 	if (!fp)
813 		goto done;
814 
815 	if (getline(&buf, &len, fp) <= 0)
816 		goto done;
817 
818 	p = strchr(buf, '\n');
819 	if (p)
820 		*p = '\0';
821 
822 	for (i = 0; i < tp->thread_sib; i++) {
823 		if (!strcmp(buf, tp->thread_siblings[i]))
824 			break;
825 	}
826 	if (i == tp->thread_sib) {
827 		tp->thread_siblings[i] = buf;
828 		tp->thread_sib++;
829 		buf = NULL;
830 	}
831 	ret = 0;
832 done:
833 	if(fp)
834 		fclose(fp);
835 	free(buf);
836 	return ret;
837 }
838 
839 static void free_cpu_topo(struct cpu_topo *tp)
840 {
841 	u32 i;
842 
843 	if (!tp)
844 		return;
845 
846 	for (i = 0 ; i < tp->core_sib; i++)
847 		zfree(&tp->core_siblings[i]);
848 
849 	for (i = 0 ; i < tp->thread_sib; i++)
850 		zfree(&tp->thread_siblings[i]);
851 
852 	free(tp);
853 }
854 
855 static struct cpu_topo *build_cpu_topology(void)
856 {
857 	struct cpu_topo *tp;
858 	void *addr;
859 	u32 nr, i;
860 	size_t sz;
861 	long ncpus;
862 	int ret = -1;
863 
864 	ncpus = sysconf(_SC_NPROCESSORS_CONF);
865 	if (ncpus < 0)
866 		return NULL;
867 
868 	nr = (u32)(ncpus & UINT_MAX);
869 
870 	sz = nr * sizeof(char *);
871 
872 	addr = calloc(1, sizeof(*tp) + 2 * sz);
873 	if (!addr)
874 		return NULL;
875 
876 	tp = addr;
877 
878 	addr += sizeof(*tp);
879 	tp->core_siblings = addr;
880 	addr += sz;
881 	tp->thread_siblings = addr;
882 
883 	for (i = 0; i < nr; i++) {
884 		ret = build_cpu_topo(tp, i);
885 		if (ret < 0)
886 			break;
887 	}
888 	if (ret) {
889 		free_cpu_topo(tp);
890 		tp = NULL;
891 	}
892 	return tp;
893 }
894 
895 static int write_cpu_topology(int fd, struct perf_header *h __maybe_unused,
896 			  struct perf_evlist *evlist __maybe_unused)
897 {
898 	struct cpu_topo *tp;
899 	u32 i;
900 	int ret;
901 
902 	tp = build_cpu_topology();
903 	if (!tp)
904 		return -1;
905 
906 	ret = do_write(fd, &tp->core_sib, sizeof(tp->core_sib));
907 	if (ret < 0)
908 		goto done;
909 
910 	for (i = 0; i < tp->core_sib; i++) {
911 		ret = do_write_string(fd, tp->core_siblings[i]);
912 		if (ret < 0)
913 			goto done;
914 	}
915 	ret = do_write(fd, &tp->thread_sib, sizeof(tp->thread_sib));
916 	if (ret < 0)
917 		goto done;
918 
919 	for (i = 0; i < tp->thread_sib; i++) {
920 		ret = do_write_string(fd, tp->thread_siblings[i]);
921 		if (ret < 0)
922 			break;
923 	}
924 done:
925 	free_cpu_topo(tp);
926 	return ret;
927 }
928 
929 
930 
931 static int write_total_mem(int fd, struct perf_header *h __maybe_unused,
932 			  struct perf_evlist *evlist __maybe_unused)
933 {
934 	char *buf = NULL;
935 	FILE *fp;
936 	size_t len = 0;
937 	int ret = -1, n;
938 	uint64_t mem;
939 
940 	fp = fopen("/proc/meminfo", "r");
941 	if (!fp)
942 		return -1;
943 
944 	while (getline(&buf, &len, fp) > 0) {
945 		ret = strncmp(buf, "MemTotal:", 9);
946 		if (!ret)
947 			break;
948 	}
949 	if (!ret) {
950 		n = sscanf(buf, "%*s %"PRIu64, &mem);
951 		if (n == 1)
952 			ret = do_write(fd, &mem, sizeof(mem));
953 	}
954 	free(buf);
955 	fclose(fp);
956 	return ret;
957 }
958 
959 static int write_topo_node(int fd, int node)
960 {
961 	char str[MAXPATHLEN];
962 	char field[32];
963 	char *buf = NULL, *p;
964 	size_t len = 0;
965 	FILE *fp;
966 	u64 mem_total, mem_free, mem;
967 	int ret = -1;
968 
969 	sprintf(str, "/sys/devices/system/node/node%d/meminfo", node);
970 	fp = fopen(str, "r");
971 	if (!fp)
972 		return -1;
973 
974 	while (getline(&buf, &len, fp) > 0) {
975 		/* skip over invalid lines */
976 		if (!strchr(buf, ':'))
977 			continue;
978 		if (sscanf(buf, "%*s %*d %31s %"PRIu64, field, &mem) != 2)
979 			goto done;
980 		if (!strcmp(field, "MemTotal:"))
981 			mem_total = mem;
982 		if (!strcmp(field, "MemFree:"))
983 			mem_free = mem;
984 	}
985 
986 	fclose(fp);
987 	fp = NULL;
988 
989 	ret = do_write(fd, &mem_total, sizeof(u64));
990 	if (ret)
991 		goto done;
992 
993 	ret = do_write(fd, &mem_free, sizeof(u64));
994 	if (ret)
995 		goto done;
996 
997 	ret = -1;
998 	sprintf(str, "/sys/devices/system/node/node%d/cpulist", node);
999 
1000 	fp = fopen(str, "r");
1001 	if (!fp)
1002 		goto done;
1003 
1004 	if (getline(&buf, &len, fp) <= 0)
1005 		goto done;
1006 
1007 	p = strchr(buf, '\n');
1008 	if (p)
1009 		*p = '\0';
1010 
1011 	ret = do_write_string(fd, buf);
1012 done:
1013 	free(buf);
1014 	if (fp)
1015 		fclose(fp);
1016 	return ret;
1017 }
1018 
1019 static int write_numa_topology(int fd, struct perf_header *h __maybe_unused,
1020 			  struct perf_evlist *evlist __maybe_unused)
1021 {
1022 	char *buf = NULL;
1023 	size_t len = 0;
1024 	FILE *fp;
1025 	struct cpu_map *node_map = NULL;
1026 	char *c;
1027 	u32 nr, i, j;
1028 	int ret = -1;
1029 
1030 	fp = fopen("/sys/devices/system/node/online", "r");
1031 	if (!fp)
1032 		return -1;
1033 
1034 	if (getline(&buf, &len, fp) <= 0)
1035 		goto done;
1036 
1037 	c = strchr(buf, '\n');
1038 	if (c)
1039 		*c = '\0';
1040 
1041 	node_map = cpu_map__new(buf);
1042 	if (!node_map)
1043 		goto done;
1044 
1045 	nr = (u32)node_map->nr;
1046 
1047 	ret = do_write(fd, &nr, sizeof(nr));
1048 	if (ret < 0)
1049 		goto done;
1050 
1051 	for (i = 0; i < nr; i++) {
1052 		j = (u32)node_map->map[i];
1053 		ret = do_write(fd, &j, sizeof(j));
1054 		if (ret < 0)
1055 			break;
1056 
1057 		ret = write_topo_node(fd, i);
1058 		if (ret < 0)
1059 			break;
1060 	}
1061 done:
1062 	free(buf);
1063 	fclose(fp);
1064 	free(node_map);
1065 	return ret;
1066 }
1067 
1068 /*
1069  * File format:
1070  *
1071  * struct pmu_mappings {
1072  *	u32	pmu_num;
1073  *	struct pmu_map {
1074  *		u32	type;
1075  *		char	name[];
1076  *	}[pmu_num];
1077  * };
1078  */
1079 
1080 static int write_pmu_mappings(int fd, struct perf_header *h __maybe_unused,
1081 			      struct perf_evlist *evlist __maybe_unused)
1082 {
1083 	struct perf_pmu *pmu = NULL;
1084 	off_t offset = lseek(fd, 0, SEEK_CUR);
1085 	__u32 pmu_num = 0;
1086 	int ret;
1087 
1088 	/* write real pmu_num later */
1089 	ret = do_write(fd, &pmu_num, sizeof(pmu_num));
1090 	if (ret < 0)
1091 		return ret;
1092 
1093 	while ((pmu = perf_pmu__scan(pmu))) {
1094 		if (!pmu->name)
1095 			continue;
1096 		pmu_num++;
1097 
1098 		ret = do_write(fd, &pmu->type, sizeof(pmu->type));
1099 		if (ret < 0)
1100 			return ret;
1101 
1102 		ret = do_write_string(fd, pmu->name);
1103 		if (ret < 0)
1104 			return ret;
1105 	}
1106 
1107 	if (pwrite(fd, &pmu_num, sizeof(pmu_num), offset) != sizeof(pmu_num)) {
1108 		/* discard all */
1109 		lseek(fd, offset, SEEK_SET);
1110 		return -1;
1111 	}
1112 
1113 	return 0;
1114 }
1115 
1116 /*
1117  * File format:
1118  *
1119  * struct group_descs {
1120  *	u32	nr_groups;
1121  *	struct group_desc {
1122  *		char	name[];
1123  *		u32	leader_idx;
1124  *		u32	nr_members;
1125  *	}[nr_groups];
1126  * };
1127  */
1128 static int write_group_desc(int fd, struct perf_header *h __maybe_unused,
1129 			    struct perf_evlist *evlist)
1130 {
1131 	u32 nr_groups = evlist->nr_groups;
1132 	struct perf_evsel *evsel;
1133 	int ret;
1134 
1135 	ret = do_write(fd, &nr_groups, sizeof(nr_groups));
1136 	if (ret < 0)
1137 		return ret;
1138 
1139 	evlist__for_each(evlist, evsel) {
1140 		if (perf_evsel__is_group_leader(evsel) &&
1141 		    evsel->nr_members > 1) {
1142 			const char *name = evsel->group_name ?: "{anon_group}";
1143 			u32 leader_idx = evsel->idx;
1144 			u32 nr_members = evsel->nr_members;
1145 
1146 			ret = do_write_string(fd, name);
1147 			if (ret < 0)
1148 				return ret;
1149 
1150 			ret = do_write(fd, &leader_idx, sizeof(leader_idx));
1151 			if (ret < 0)
1152 				return ret;
1153 
1154 			ret = do_write(fd, &nr_members, sizeof(nr_members));
1155 			if (ret < 0)
1156 				return ret;
1157 		}
1158 	}
1159 	return 0;
1160 }
1161 
1162 /*
1163  * default get_cpuid(): nothing gets recorded
1164  * actual implementation must be in arch/$(ARCH)/util/header.c
1165  */
1166 int __attribute__ ((weak)) get_cpuid(char *buffer __maybe_unused,
1167 				     size_t sz __maybe_unused)
1168 {
1169 	return -1;
1170 }
1171 
1172 static int write_cpuid(int fd, struct perf_header *h __maybe_unused,
1173 		       struct perf_evlist *evlist __maybe_unused)
1174 {
1175 	char buffer[64];
1176 	int ret;
1177 
1178 	ret = get_cpuid(buffer, sizeof(buffer));
1179 	if (!ret)
1180 		goto write_it;
1181 
1182 	return -1;
1183 write_it:
1184 	return do_write_string(fd, buffer);
1185 }
1186 
1187 static int write_branch_stack(int fd __maybe_unused,
1188 			      struct perf_header *h __maybe_unused,
1189 		       struct perf_evlist *evlist __maybe_unused)
1190 {
1191 	return 0;
1192 }
1193 
1194 static void print_hostname(struct perf_header *ph, int fd __maybe_unused,
1195 			   FILE *fp)
1196 {
1197 	fprintf(fp, "# hostname : %s\n", ph->env.hostname);
1198 }
1199 
1200 static void print_osrelease(struct perf_header *ph, int fd __maybe_unused,
1201 			    FILE *fp)
1202 {
1203 	fprintf(fp, "# os release : %s\n", ph->env.os_release);
1204 }
1205 
1206 static void print_arch(struct perf_header *ph, int fd __maybe_unused, FILE *fp)
1207 {
1208 	fprintf(fp, "# arch : %s\n", ph->env.arch);
1209 }
1210 
1211 static void print_cpudesc(struct perf_header *ph, int fd __maybe_unused,
1212 			  FILE *fp)
1213 {
1214 	fprintf(fp, "# cpudesc : %s\n", ph->env.cpu_desc);
1215 }
1216 
1217 static void print_nrcpus(struct perf_header *ph, int fd __maybe_unused,
1218 			 FILE *fp)
1219 {
1220 	fprintf(fp, "# nrcpus online : %u\n", ph->env.nr_cpus_online);
1221 	fprintf(fp, "# nrcpus avail : %u\n", ph->env.nr_cpus_avail);
1222 }
1223 
1224 static void print_version(struct perf_header *ph, int fd __maybe_unused,
1225 			  FILE *fp)
1226 {
1227 	fprintf(fp, "# perf version : %s\n", ph->env.version);
1228 }
1229 
1230 static void print_cmdline(struct perf_header *ph, int fd __maybe_unused,
1231 			  FILE *fp)
1232 {
1233 	int nr, i;
1234 	char *str;
1235 
1236 	nr = ph->env.nr_cmdline;
1237 	str = ph->env.cmdline;
1238 
1239 	fprintf(fp, "# cmdline : ");
1240 
1241 	for (i = 0; i < nr; i++) {
1242 		fprintf(fp, "%s ", str);
1243 		str += strlen(str) + 1;
1244 	}
1245 	fputc('\n', fp);
1246 }
1247 
1248 static void print_cpu_topology(struct perf_header *ph, int fd __maybe_unused,
1249 			       FILE *fp)
1250 {
1251 	int nr, i;
1252 	char *str;
1253 
1254 	nr = ph->env.nr_sibling_cores;
1255 	str = ph->env.sibling_cores;
1256 
1257 	for (i = 0; i < nr; i++) {
1258 		fprintf(fp, "# sibling cores   : %s\n", str);
1259 		str += strlen(str) + 1;
1260 	}
1261 
1262 	nr = ph->env.nr_sibling_threads;
1263 	str = ph->env.sibling_threads;
1264 
1265 	for (i = 0; i < nr; i++) {
1266 		fprintf(fp, "# sibling threads : %s\n", str);
1267 		str += strlen(str) + 1;
1268 	}
1269 }
1270 
1271 static void free_event_desc(struct perf_evsel *events)
1272 {
1273 	struct perf_evsel *evsel;
1274 
1275 	if (!events)
1276 		return;
1277 
1278 	for (evsel = events; evsel->attr.size; evsel++) {
1279 		zfree(&evsel->name);
1280 		zfree(&evsel->id);
1281 	}
1282 
1283 	free(events);
1284 }
1285 
1286 static struct perf_evsel *
1287 read_event_desc(struct perf_header *ph, int fd)
1288 {
1289 	struct perf_evsel *evsel, *events = NULL;
1290 	u64 *id;
1291 	void *buf = NULL;
1292 	u32 nre, sz, nr, i, j;
1293 	ssize_t ret;
1294 	size_t msz;
1295 
1296 	/* number of events */
1297 	ret = readn(fd, &nre, sizeof(nre));
1298 	if (ret != (ssize_t)sizeof(nre))
1299 		goto error;
1300 
1301 	if (ph->needs_swap)
1302 		nre = bswap_32(nre);
1303 
1304 	ret = readn(fd, &sz, sizeof(sz));
1305 	if (ret != (ssize_t)sizeof(sz))
1306 		goto error;
1307 
1308 	if (ph->needs_swap)
1309 		sz = bswap_32(sz);
1310 
1311 	/* buffer to hold on file attr struct */
1312 	buf = malloc(sz);
1313 	if (!buf)
1314 		goto error;
1315 
1316 	/* the last event terminates with evsel->attr.size == 0: */
1317 	events = calloc(nre + 1, sizeof(*events));
1318 	if (!events)
1319 		goto error;
1320 
1321 	msz = sizeof(evsel->attr);
1322 	if (sz < msz)
1323 		msz = sz;
1324 
1325 	for (i = 0, evsel = events; i < nre; evsel++, i++) {
1326 		evsel->idx = i;
1327 
1328 		/*
1329 		 * must read entire on-file attr struct to
1330 		 * sync up with layout.
1331 		 */
1332 		ret = readn(fd, buf, sz);
1333 		if (ret != (ssize_t)sz)
1334 			goto error;
1335 
1336 		if (ph->needs_swap)
1337 			perf_event__attr_swap(buf);
1338 
1339 		memcpy(&evsel->attr, buf, msz);
1340 
1341 		ret = readn(fd, &nr, sizeof(nr));
1342 		if (ret != (ssize_t)sizeof(nr))
1343 			goto error;
1344 
1345 		if (ph->needs_swap) {
1346 			nr = bswap_32(nr);
1347 			evsel->needs_swap = true;
1348 		}
1349 
1350 		evsel->name = do_read_string(fd, ph);
1351 
1352 		if (!nr)
1353 			continue;
1354 
1355 		id = calloc(nr, sizeof(*id));
1356 		if (!id)
1357 			goto error;
1358 		evsel->ids = nr;
1359 		evsel->id = id;
1360 
1361 		for (j = 0 ; j < nr; j++) {
1362 			ret = readn(fd, id, sizeof(*id));
1363 			if (ret != (ssize_t)sizeof(*id))
1364 				goto error;
1365 			if (ph->needs_swap)
1366 				*id = bswap_64(*id);
1367 			id++;
1368 		}
1369 	}
1370 out:
1371 	free(buf);
1372 	return events;
1373 error:
1374 	if (events)
1375 		free_event_desc(events);
1376 	events = NULL;
1377 	goto out;
1378 }
1379 
1380 static void print_event_desc(struct perf_header *ph, int fd, FILE *fp)
1381 {
1382 	struct perf_evsel *evsel, *events = read_event_desc(ph, fd);
1383 	u32 j;
1384 	u64 *id;
1385 
1386 	if (!events) {
1387 		fprintf(fp, "# event desc: not available or unable to read\n");
1388 		return;
1389 	}
1390 
1391 	for (evsel = events; evsel->attr.size; evsel++) {
1392 		fprintf(fp, "# event : name = %s, ", evsel->name);
1393 
1394 		fprintf(fp, "type = %d, config = 0x%"PRIx64
1395 			    ", config1 = 0x%"PRIx64", config2 = 0x%"PRIx64,
1396 				evsel->attr.type,
1397 				(u64)evsel->attr.config,
1398 				(u64)evsel->attr.config1,
1399 				(u64)evsel->attr.config2);
1400 
1401 		fprintf(fp, ", excl_usr = %d, excl_kern = %d",
1402 				evsel->attr.exclude_user,
1403 				evsel->attr.exclude_kernel);
1404 
1405 		fprintf(fp, ", excl_host = %d, excl_guest = %d",
1406 				evsel->attr.exclude_host,
1407 				evsel->attr.exclude_guest);
1408 
1409 		fprintf(fp, ", precise_ip = %d", evsel->attr.precise_ip);
1410 
1411 		fprintf(fp, ", attr_mmap2 = %d", evsel->attr.mmap2);
1412 		fprintf(fp, ", attr_mmap  = %d", evsel->attr.mmap);
1413 		fprintf(fp, ", attr_mmap_data = %d", evsel->attr.mmap_data);
1414 		if (evsel->ids) {
1415 			fprintf(fp, ", id = {");
1416 			for (j = 0, id = evsel->id; j < evsel->ids; j++, id++) {
1417 				if (j)
1418 					fputc(',', fp);
1419 				fprintf(fp, " %"PRIu64, *id);
1420 			}
1421 			fprintf(fp, " }");
1422 		}
1423 
1424 		fputc('\n', fp);
1425 	}
1426 
1427 	free_event_desc(events);
1428 }
1429 
1430 static void print_total_mem(struct perf_header *ph, int fd __maybe_unused,
1431 			    FILE *fp)
1432 {
1433 	fprintf(fp, "# total memory : %Lu kB\n", ph->env.total_mem);
1434 }
1435 
1436 static void print_numa_topology(struct perf_header *ph, int fd __maybe_unused,
1437 				FILE *fp)
1438 {
1439 	u32 nr, c, i;
1440 	char *str, *tmp;
1441 	uint64_t mem_total, mem_free;
1442 
1443 	/* nr nodes */
1444 	nr = ph->env.nr_numa_nodes;
1445 	str = ph->env.numa_nodes;
1446 
1447 	for (i = 0; i < nr; i++) {
1448 		/* node number */
1449 		c = strtoul(str, &tmp, 0);
1450 		if (*tmp != ':')
1451 			goto error;
1452 
1453 		str = tmp + 1;
1454 		mem_total = strtoull(str, &tmp, 0);
1455 		if (*tmp != ':')
1456 			goto error;
1457 
1458 		str = tmp + 1;
1459 		mem_free = strtoull(str, &tmp, 0);
1460 		if (*tmp != ':')
1461 			goto error;
1462 
1463 		fprintf(fp, "# node%u meminfo  : total = %"PRIu64" kB,"
1464 			    " free = %"PRIu64" kB\n",
1465 			c, mem_total, mem_free);
1466 
1467 		str = tmp + 1;
1468 		fprintf(fp, "# node%u cpu list : %s\n", c, str);
1469 
1470 		str += strlen(str) + 1;
1471 	}
1472 	return;
1473 error:
1474 	fprintf(fp, "# numa topology : not available\n");
1475 }
1476 
1477 static void print_cpuid(struct perf_header *ph, int fd __maybe_unused, FILE *fp)
1478 {
1479 	fprintf(fp, "# cpuid : %s\n", ph->env.cpuid);
1480 }
1481 
1482 static void print_branch_stack(struct perf_header *ph __maybe_unused,
1483 			       int fd __maybe_unused, FILE *fp)
1484 {
1485 	fprintf(fp, "# contains samples with branch stack\n");
1486 }
1487 
1488 static void print_pmu_mappings(struct perf_header *ph, int fd __maybe_unused,
1489 			       FILE *fp)
1490 {
1491 	const char *delimiter = "# pmu mappings: ";
1492 	char *str, *tmp;
1493 	u32 pmu_num;
1494 	u32 type;
1495 
1496 	pmu_num = ph->env.nr_pmu_mappings;
1497 	if (!pmu_num) {
1498 		fprintf(fp, "# pmu mappings: not available\n");
1499 		return;
1500 	}
1501 
1502 	str = ph->env.pmu_mappings;
1503 
1504 	while (pmu_num) {
1505 		type = strtoul(str, &tmp, 0);
1506 		if (*tmp != ':')
1507 			goto error;
1508 
1509 		str = tmp + 1;
1510 		fprintf(fp, "%s%s = %" PRIu32, delimiter, str, type);
1511 
1512 		delimiter = ", ";
1513 		str += strlen(str) + 1;
1514 		pmu_num--;
1515 	}
1516 
1517 	fprintf(fp, "\n");
1518 
1519 	if (!pmu_num)
1520 		return;
1521 error:
1522 	fprintf(fp, "# pmu mappings: unable to read\n");
1523 }
1524 
1525 static void print_group_desc(struct perf_header *ph, int fd __maybe_unused,
1526 			     FILE *fp)
1527 {
1528 	struct perf_session *session;
1529 	struct perf_evsel *evsel;
1530 	u32 nr = 0;
1531 
1532 	session = container_of(ph, struct perf_session, header);
1533 
1534 	evlist__for_each(session->evlist, evsel) {
1535 		if (perf_evsel__is_group_leader(evsel) &&
1536 		    evsel->nr_members > 1) {
1537 			fprintf(fp, "# group: %s{%s", evsel->group_name ?: "",
1538 				perf_evsel__name(evsel));
1539 
1540 			nr = evsel->nr_members - 1;
1541 		} else if (nr) {
1542 			fprintf(fp, ",%s", perf_evsel__name(evsel));
1543 
1544 			if (--nr == 0)
1545 				fprintf(fp, "}\n");
1546 		}
1547 	}
1548 }
1549 
1550 static int __event_process_build_id(struct build_id_event *bev,
1551 				    char *filename,
1552 				    struct perf_session *session)
1553 {
1554 	int err = -1;
1555 	struct dsos *dsos;
1556 	struct machine *machine;
1557 	u16 misc;
1558 	struct dso *dso;
1559 	enum dso_kernel_type dso_type;
1560 
1561 	machine = perf_session__findnew_machine(session, bev->pid);
1562 	if (!machine)
1563 		goto out;
1564 
1565 	misc = bev->header.misc & PERF_RECORD_MISC_CPUMODE_MASK;
1566 
1567 	switch (misc) {
1568 	case PERF_RECORD_MISC_KERNEL:
1569 		dso_type = DSO_TYPE_KERNEL;
1570 		dsos = &machine->kernel_dsos;
1571 		break;
1572 	case PERF_RECORD_MISC_GUEST_KERNEL:
1573 		dso_type = DSO_TYPE_GUEST_KERNEL;
1574 		dsos = &machine->kernel_dsos;
1575 		break;
1576 	case PERF_RECORD_MISC_USER:
1577 	case PERF_RECORD_MISC_GUEST_USER:
1578 		dso_type = DSO_TYPE_USER;
1579 		dsos = &machine->user_dsos;
1580 		break;
1581 	default:
1582 		goto out;
1583 	}
1584 
1585 	dso = __dsos__findnew(dsos, filename);
1586 	if (dso != NULL) {
1587 		char sbuild_id[BUILD_ID_SIZE * 2 + 1];
1588 
1589 		dso__set_build_id(dso, &bev->build_id);
1590 
1591 		if (filename[0] == '[')
1592 			dso->kernel = dso_type;
1593 
1594 		build_id__sprintf(dso->build_id, sizeof(dso->build_id),
1595 				  sbuild_id);
1596 		pr_debug("build id event received for %s: %s\n",
1597 			 dso->long_name, sbuild_id);
1598 	}
1599 
1600 	err = 0;
1601 out:
1602 	return err;
1603 }
1604 
1605 static int perf_header__read_build_ids_abi_quirk(struct perf_header *header,
1606 						 int input, u64 offset, u64 size)
1607 {
1608 	struct perf_session *session = container_of(header, struct perf_session, header);
1609 	struct {
1610 		struct perf_event_header   header;
1611 		u8			   build_id[PERF_ALIGN(BUILD_ID_SIZE, sizeof(u64))];
1612 		char			   filename[0];
1613 	} old_bev;
1614 	struct build_id_event bev;
1615 	char filename[PATH_MAX];
1616 	u64 limit = offset + size;
1617 
1618 	while (offset < limit) {
1619 		ssize_t len;
1620 
1621 		if (readn(input, &old_bev, sizeof(old_bev)) != sizeof(old_bev))
1622 			return -1;
1623 
1624 		if (header->needs_swap)
1625 			perf_event_header__bswap(&old_bev.header);
1626 
1627 		len = old_bev.header.size - sizeof(old_bev);
1628 		if (readn(input, filename, len) != len)
1629 			return -1;
1630 
1631 		bev.header = old_bev.header;
1632 
1633 		/*
1634 		 * As the pid is the missing value, we need to fill
1635 		 * it properly. The header.misc value give us nice hint.
1636 		 */
1637 		bev.pid	= HOST_KERNEL_ID;
1638 		if (bev.header.misc == PERF_RECORD_MISC_GUEST_USER ||
1639 		    bev.header.misc == PERF_RECORD_MISC_GUEST_KERNEL)
1640 			bev.pid	= DEFAULT_GUEST_KERNEL_ID;
1641 
1642 		memcpy(bev.build_id, old_bev.build_id, sizeof(bev.build_id));
1643 		__event_process_build_id(&bev, filename, session);
1644 
1645 		offset += bev.header.size;
1646 	}
1647 
1648 	return 0;
1649 }
1650 
1651 static int perf_header__read_build_ids(struct perf_header *header,
1652 				       int input, u64 offset, u64 size)
1653 {
1654 	struct perf_session *session = container_of(header, struct perf_session, header);
1655 	struct build_id_event bev;
1656 	char filename[PATH_MAX];
1657 	u64 limit = offset + size, orig_offset = offset;
1658 	int err = -1;
1659 
1660 	while (offset < limit) {
1661 		ssize_t len;
1662 
1663 		if (readn(input, &bev, sizeof(bev)) != sizeof(bev))
1664 			goto out;
1665 
1666 		if (header->needs_swap)
1667 			perf_event_header__bswap(&bev.header);
1668 
1669 		len = bev.header.size - sizeof(bev);
1670 		if (readn(input, filename, len) != len)
1671 			goto out;
1672 		/*
1673 		 * The a1645ce1 changeset:
1674 		 *
1675 		 * "perf: 'perf kvm' tool for monitoring guest performance from host"
1676 		 *
1677 		 * Added a field to struct build_id_event that broke the file
1678 		 * format.
1679 		 *
1680 		 * Since the kernel build-id is the first entry, process the
1681 		 * table using the old format if the well known
1682 		 * '[kernel.kallsyms]' string for the kernel build-id has the
1683 		 * first 4 characters chopped off (where the pid_t sits).
1684 		 */
1685 		if (memcmp(filename, "nel.kallsyms]", 13) == 0) {
1686 			if (lseek(input, orig_offset, SEEK_SET) == (off_t)-1)
1687 				return -1;
1688 			return perf_header__read_build_ids_abi_quirk(header, input, offset, size);
1689 		}
1690 
1691 		__event_process_build_id(&bev, filename, session);
1692 
1693 		offset += bev.header.size;
1694 	}
1695 	err = 0;
1696 out:
1697 	return err;
1698 }
1699 
1700 static int process_tracing_data(struct perf_file_section *section __maybe_unused,
1701 				struct perf_header *ph __maybe_unused,
1702 				int fd, void *data)
1703 {
1704 	ssize_t ret = trace_report(fd, data, false);
1705 	return ret < 0 ? -1 : 0;
1706 }
1707 
1708 static int process_build_id(struct perf_file_section *section,
1709 			    struct perf_header *ph, int fd,
1710 			    void *data __maybe_unused)
1711 {
1712 	if (perf_header__read_build_ids(ph, fd, section->offset, section->size))
1713 		pr_debug("Failed to read buildids, continuing...\n");
1714 	return 0;
1715 }
1716 
1717 static int process_hostname(struct perf_file_section *section __maybe_unused,
1718 			    struct perf_header *ph, int fd,
1719 			    void *data __maybe_unused)
1720 {
1721 	ph->env.hostname = do_read_string(fd, ph);
1722 	return ph->env.hostname ? 0 : -ENOMEM;
1723 }
1724 
1725 static int process_osrelease(struct perf_file_section *section __maybe_unused,
1726 			     struct perf_header *ph, int fd,
1727 			     void *data __maybe_unused)
1728 {
1729 	ph->env.os_release = do_read_string(fd, ph);
1730 	return ph->env.os_release ? 0 : -ENOMEM;
1731 }
1732 
1733 static int process_version(struct perf_file_section *section __maybe_unused,
1734 			   struct perf_header *ph, int fd,
1735 			   void *data __maybe_unused)
1736 {
1737 	ph->env.version = do_read_string(fd, ph);
1738 	return ph->env.version ? 0 : -ENOMEM;
1739 }
1740 
1741 static int process_arch(struct perf_file_section *section __maybe_unused,
1742 			struct perf_header *ph,	int fd,
1743 			void *data __maybe_unused)
1744 {
1745 	ph->env.arch = do_read_string(fd, ph);
1746 	return ph->env.arch ? 0 : -ENOMEM;
1747 }
1748 
1749 static int process_nrcpus(struct perf_file_section *section __maybe_unused,
1750 			  struct perf_header *ph, int fd,
1751 			  void *data __maybe_unused)
1752 {
1753 	ssize_t ret;
1754 	u32 nr;
1755 
1756 	ret = readn(fd, &nr, sizeof(nr));
1757 	if (ret != sizeof(nr))
1758 		return -1;
1759 
1760 	if (ph->needs_swap)
1761 		nr = bswap_32(nr);
1762 
1763 	ph->env.nr_cpus_online = nr;
1764 
1765 	ret = readn(fd, &nr, sizeof(nr));
1766 	if (ret != sizeof(nr))
1767 		return -1;
1768 
1769 	if (ph->needs_swap)
1770 		nr = bswap_32(nr);
1771 
1772 	ph->env.nr_cpus_avail = nr;
1773 	return 0;
1774 }
1775 
1776 static int process_cpudesc(struct perf_file_section *section __maybe_unused,
1777 			   struct perf_header *ph, int fd,
1778 			   void *data __maybe_unused)
1779 {
1780 	ph->env.cpu_desc = do_read_string(fd, ph);
1781 	return ph->env.cpu_desc ? 0 : -ENOMEM;
1782 }
1783 
1784 static int process_cpuid(struct perf_file_section *section __maybe_unused,
1785 			 struct perf_header *ph,  int fd,
1786 			 void *data __maybe_unused)
1787 {
1788 	ph->env.cpuid = do_read_string(fd, ph);
1789 	return ph->env.cpuid ? 0 : -ENOMEM;
1790 }
1791 
1792 static int process_total_mem(struct perf_file_section *section __maybe_unused,
1793 			     struct perf_header *ph, int fd,
1794 			     void *data __maybe_unused)
1795 {
1796 	uint64_t mem;
1797 	ssize_t ret;
1798 
1799 	ret = readn(fd, &mem, sizeof(mem));
1800 	if (ret != sizeof(mem))
1801 		return -1;
1802 
1803 	if (ph->needs_swap)
1804 		mem = bswap_64(mem);
1805 
1806 	ph->env.total_mem = mem;
1807 	return 0;
1808 }
1809 
1810 static struct perf_evsel *
1811 perf_evlist__find_by_index(struct perf_evlist *evlist, int idx)
1812 {
1813 	struct perf_evsel *evsel;
1814 
1815 	evlist__for_each(evlist, evsel) {
1816 		if (evsel->idx == idx)
1817 			return evsel;
1818 	}
1819 
1820 	return NULL;
1821 }
1822 
1823 static void
1824 perf_evlist__set_event_name(struct perf_evlist *evlist,
1825 			    struct perf_evsel *event)
1826 {
1827 	struct perf_evsel *evsel;
1828 
1829 	if (!event->name)
1830 		return;
1831 
1832 	evsel = perf_evlist__find_by_index(evlist, event->idx);
1833 	if (!evsel)
1834 		return;
1835 
1836 	if (evsel->name)
1837 		return;
1838 
1839 	evsel->name = strdup(event->name);
1840 }
1841 
1842 static int
1843 process_event_desc(struct perf_file_section *section __maybe_unused,
1844 		   struct perf_header *header, int fd,
1845 		   void *data __maybe_unused)
1846 {
1847 	struct perf_session *session;
1848 	struct perf_evsel *evsel, *events = read_event_desc(header, fd);
1849 
1850 	if (!events)
1851 		return 0;
1852 
1853 	session = container_of(header, struct perf_session, header);
1854 	for (evsel = events; evsel->attr.size; evsel++)
1855 		perf_evlist__set_event_name(session->evlist, evsel);
1856 
1857 	free_event_desc(events);
1858 
1859 	return 0;
1860 }
1861 
1862 static int process_cmdline(struct perf_file_section *section __maybe_unused,
1863 			   struct perf_header *ph, int fd,
1864 			   void *data __maybe_unused)
1865 {
1866 	ssize_t ret;
1867 	char *str;
1868 	u32 nr, i;
1869 	struct strbuf sb;
1870 
1871 	ret = readn(fd, &nr, sizeof(nr));
1872 	if (ret != sizeof(nr))
1873 		return -1;
1874 
1875 	if (ph->needs_swap)
1876 		nr = bswap_32(nr);
1877 
1878 	ph->env.nr_cmdline = nr;
1879 	strbuf_init(&sb, 128);
1880 
1881 	for (i = 0; i < nr; i++) {
1882 		str = do_read_string(fd, ph);
1883 		if (!str)
1884 			goto error;
1885 
1886 		/* include a NULL character at the end */
1887 		strbuf_add(&sb, str, strlen(str) + 1);
1888 		free(str);
1889 	}
1890 	ph->env.cmdline = strbuf_detach(&sb, NULL);
1891 	return 0;
1892 
1893 error:
1894 	strbuf_release(&sb);
1895 	return -1;
1896 }
1897 
1898 static int process_cpu_topology(struct perf_file_section *section __maybe_unused,
1899 				struct perf_header *ph, int fd,
1900 				void *data __maybe_unused)
1901 {
1902 	ssize_t ret;
1903 	u32 nr, i;
1904 	char *str;
1905 	struct strbuf sb;
1906 
1907 	ret = readn(fd, &nr, sizeof(nr));
1908 	if (ret != sizeof(nr))
1909 		return -1;
1910 
1911 	if (ph->needs_swap)
1912 		nr = bswap_32(nr);
1913 
1914 	ph->env.nr_sibling_cores = nr;
1915 	strbuf_init(&sb, 128);
1916 
1917 	for (i = 0; i < nr; i++) {
1918 		str = do_read_string(fd, ph);
1919 		if (!str)
1920 			goto error;
1921 
1922 		/* include a NULL character at the end */
1923 		strbuf_add(&sb, str, strlen(str) + 1);
1924 		free(str);
1925 	}
1926 	ph->env.sibling_cores = strbuf_detach(&sb, NULL);
1927 
1928 	ret = readn(fd, &nr, sizeof(nr));
1929 	if (ret != sizeof(nr))
1930 		return -1;
1931 
1932 	if (ph->needs_swap)
1933 		nr = bswap_32(nr);
1934 
1935 	ph->env.nr_sibling_threads = nr;
1936 
1937 	for (i = 0; i < nr; i++) {
1938 		str = do_read_string(fd, ph);
1939 		if (!str)
1940 			goto error;
1941 
1942 		/* include a NULL character at the end */
1943 		strbuf_add(&sb, str, strlen(str) + 1);
1944 		free(str);
1945 	}
1946 	ph->env.sibling_threads = strbuf_detach(&sb, NULL);
1947 	return 0;
1948 
1949 error:
1950 	strbuf_release(&sb);
1951 	return -1;
1952 }
1953 
1954 static int process_numa_topology(struct perf_file_section *section __maybe_unused,
1955 				 struct perf_header *ph, int fd,
1956 				 void *data __maybe_unused)
1957 {
1958 	ssize_t ret;
1959 	u32 nr, node, i;
1960 	char *str;
1961 	uint64_t mem_total, mem_free;
1962 	struct strbuf sb;
1963 
1964 	/* nr nodes */
1965 	ret = readn(fd, &nr, sizeof(nr));
1966 	if (ret != sizeof(nr))
1967 		goto error;
1968 
1969 	if (ph->needs_swap)
1970 		nr = bswap_32(nr);
1971 
1972 	ph->env.nr_numa_nodes = nr;
1973 	strbuf_init(&sb, 256);
1974 
1975 	for (i = 0; i < nr; i++) {
1976 		/* node number */
1977 		ret = readn(fd, &node, sizeof(node));
1978 		if (ret != sizeof(node))
1979 			goto error;
1980 
1981 		ret = readn(fd, &mem_total, sizeof(u64));
1982 		if (ret != sizeof(u64))
1983 			goto error;
1984 
1985 		ret = readn(fd, &mem_free, sizeof(u64));
1986 		if (ret != sizeof(u64))
1987 			goto error;
1988 
1989 		if (ph->needs_swap) {
1990 			node = bswap_32(node);
1991 			mem_total = bswap_64(mem_total);
1992 			mem_free = bswap_64(mem_free);
1993 		}
1994 
1995 		strbuf_addf(&sb, "%u:%"PRIu64":%"PRIu64":",
1996 			    node, mem_total, mem_free);
1997 
1998 		str = do_read_string(fd, ph);
1999 		if (!str)
2000 			goto error;
2001 
2002 		/* include a NULL character at the end */
2003 		strbuf_add(&sb, str, strlen(str) + 1);
2004 		free(str);
2005 	}
2006 	ph->env.numa_nodes = strbuf_detach(&sb, NULL);
2007 	return 0;
2008 
2009 error:
2010 	strbuf_release(&sb);
2011 	return -1;
2012 }
2013 
2014 static int process_pmu_mappings(struct perf_file_section *section __maybe_unused,
2015 				struct perf_header *ph, int fd,
2016 				void *data __maybe_unused)
2017 {
2018 	ssize_t ret;
2019 	char *name;
2020 	u32 pmu_num;
2021 	u32 type;
2022 	struct strbuf sb;
2023 
2024 	ret = readn(fd, &pmu_num, sizeof(pmu_num));
2025 	if (ret != sizeof(pmu_num))
2026 		return -1;
2027 
2028 	if (ph->needs_swap)
2029 		pmu_num = bswap_32(pmu_num);
2030 
2031 	if (!pmu_num) {
2032 		pr_debug("pmu mappings not available\n");
2033 		return 0;
2034 	}
2035 
2036 	ph->env.nr_pmu_mappings = pmu_num;
2037 	strbuf_init(&sb, 128);
2038 
2039 	while (pmu_num) {
2040 		if (readn(fd, &type, sizeof(type)) != sizeof(type))
2041 			goto error;
2042 		if (ph->needs_swap)
2043 			type = bswap_32(type);
2044 
2045 		name = do_read_string(fd, ph);
2046 		if (!name)
2047 			goto error;
2048 
2049 		strbuf_addf(&sb, "%u:%s", type, name);
2050 		/* include a NULL character at the end */
2051 		strbuf_add(&sb, "", 1);
2052 
2053 		free(name);
2054 		pmu_num--;
2055 	}
2056 	ph->env.pmu_mappings = strbuf_detach(&sb, NULL);
2057 	return 0;
2058 
2059 error:
2060 	strbuf_release(&sb);
2061 	return -1;
2062 }
2063 
2064 static int process_group_desc(struct perf_file_section *section __maybe_unused,
2065 			      struct perf_header *ph, int fd,
2066 			      void *data __maybe_unused)
2067 {
2068 	size_t ret = -1;
2069 	u32 i, nr, nr_groups;
2070 	struct perf_session *session;
2071 	struct perf_evsel *evsel, *leader = NULL;
2072 	struct group_desc {
2073 		char *name;
2074 		u32 leader_idx;
2075 		u32 nr_members;
2076 	} *desc;
2077 
2078 	if (readn(fd, &nr_groups, sizeof(nr_groups)) != sizeof(nr_groups))
2079 		return -1;
2080 
2081 	if (ph->needs_swap)
2082 		nr_groups = bswap_32(nr_groups);
2083 
2084 	ph->env.nr_groups = nr_groups;
2085 	if (!nr_groups) {
2086 		pr_debug("group desc not available\n");
2087 		return 0;
2088 	}
2089 
2090 	desc = calloc(nr_groups, sizeof(*desc));
2091 	if (!desc)
2092 		return -1;
2093 
2094 	for (i = 0; i < nr_groups; i++) {
2095 		desc[i].name = do_read_string(fd, ph);
2096 		if (!desc[i].name)
2097 			goto out_free;
2098 
2099 		if (readn(fd, &desc[i].leader_idx, sizeof(u32)) != sizeof(u32))
2100 			goto out_free;
2101 
2102 		if (readn(fd, &desc[i].nr_members, sizeof(u32)) != sizeof(u32))
2103 			goto out_free;
2104 
2105 		if (ph->needs_swap) {
2106 			desc[i].leader_idx = bswap_32(desc[i].leader_idx);
2107 			desc[i].nr_members = bswap_32(desc[i].nr_members);
2108 		}
2109 	}
2110 
2111 	/*
2112 	 * Rebuild group relationship based on the group_desc
2113 	 */
2114 	session = container_of(ph, struct perf_session, header);
2115 	session->evlist->nr_groups = nr_groups;
2116 
2117 	i = nr = 0;
2118 	evlist__for_each(session->evlist, evsel) {
2119 		if (evsel->idx == (int) desc[i].leader_idx) {
2120 			evsel->leader = evsel;
2121 			/* {anon_group} is a dummy name */
2122 			if (strcmp(desc[i].name, "{anon_group}")) {
2123 				evsel->group_name = desc[i].name;
2124 				desc[i].name = NULL;
2125 			}
2126 			evsel->nr_members = desc[i].nr_members;
2127 
2128 			if (i >= nr_groups || nr > 0) {
2129 				pr_debug("invalid group desc\n");
2130 				goto out_free;
2131 			}
2132 
2133 			leader = evsel;
2134 			nr = evsel->nr_members - 1;
2135 			i++;
2136 		} else if (nr) {
2137 			/* This is a group member */
2138 			evsel->leader = leader;
2139 
2140 			nr--;
2141 		}
2142 	}
2143 
2144 	if (i != nr_groups || nr != 0) {
2145 		pr_debug("invalid group desc\n");
2146 		goto out_free;
2147 	}
2148 
2149 	ret = 0;
2150 out_free:
2151 	for (i = 0; i < nr_groups; i++)
2152 		zfree(&desc[i].name);
2153 	free(desc);
2154 
2155 	return ret;
2156 }
2157 
2158 struct feature_ops {
2159 	int (*write)(int fd, struct perf_header *h, struct perf_evlist *evlist);
2160 	void (*print)(struct perf_header *h, int fd, FILE *fp);
2161 	int (*process)(struct perf_file_section *section,
2162 		       struct perf_header *h, int fd, void *data);
2163 	const char *name;
2164 	bool full_only;
2165 };
2166 
2167 #define FEAT_OPA(n, func) \
2168 	[n] = { .name = #n, .write = write_##func, .print = print_##func }
2169 #define FEAT_OPP(n, func) \
2170 	[n] = { .name = #n, .write = write_##func, .print = print_##func, \
2171 		.process = process_##func }
2172 #define FEAT_OPF(n, func) \
2173 	[n] = { .name = #n, .write = write_##func, .print = print_##func, \
2174 		.process = process_##func, .full_only = true }
2175 
2176 /* feature_ops not implemented: */
2177 #define print_tracing_data	NULL
2178 #define print_build_id		NULL
2179 
2180 static const struct feature_ops feat_ops[HEADER_LAST_FEATURE] = {
2181 	FEAT_OPP(HEADER_TRACING_DATA,	tracing_data),
2182 	FEAT_OPP(HEADER_BUILD_ID,	build_id),
2183 	FEAT_OPP(HEADER_HOSTNAME,	hostname),
2184 	FEAT_OPP(HEADER_OSRELEASE,	osrelease),
2185 	FEAT_OPP(HEADER_VERSION,	version),
2186 	FEAT_OPP(HEADER_ARCH,		arch),
2187 	FEAT_OPP(HEADER_NRCPUS,		nrcpus),
2188 	FEAT_OPP(HEADER_CPUDESC,	cpudesc),
2189 	FEAT_OPP(HEADER_CPUID,		cpuid),
2190 	FEAT_OPP(HEADER_TOTAL_MEM,	total_mem),
2191 	FEAT_OPP(HEADER_EVENT_DESC,	event_desc),
2192 	FEAT_OPP(HEADER_CMDLINE,	cmdline),
2193 	FEAT_OPF(HEADER_CPU_TOPOLOGY,	cpu_topology),
2194 	FEAT_OPF(HEADER_NUMA_TOPOLOGY,	numa_topology),
2195 	FEAT_OPA(HEADER_BRANCH_STACK,	branch_stack),
2196 	FEAT_OPP(HEADER_PMU_MAPPINGS,	pmu_mappings),
2197 	FEAT_OPP(HEADER_GROUP_DESC,	group_desc),
2198 };
2199 
2200 struct header_print_data {
2201 	FILE *fp;
2202 	bool full; /* extended list of headers */
2203 };
2204 
2205 static int perf_file_section__fprintf_info(struct perf_file_section *section,
2206 					   struct perf_header *ph,
2207 					   int feat, int fd, void *data)
2208 {
2209 	struct header_print_data *hd = data;
2210 
2211 	if (lseek(fd, section->offset, SEEK_SET) == (off_t)-1) {
2212 		pr_debug("Failed to lseek to %" PRIu64 " offset for feature "
2213 				"%d, continuing...\n", section->offset, feat);
2214 		return 0;
2215 	}
2216 	if (feat >= HEADER_LAST_FEATURE) {
2217 		pr_warning("unknown feature %d\n", feat);
2218 		return 0;
2219 	}
2220 	if (!feat_ops[feat].print)
2221 		return 0;
2222 
2223 	if (!feat_ops[feat].full_only || hd->full)
2224 		feat_ops[feat].print(ph, fd, hd->fp);
2225 	else
2226 		fprintf(hd->fp, "# %s info available, use -I to display\n",
2227 			feat_ops[feat].name);
2228 
2229 	return 0;
2230 }
2231 
2232 int perf_header__fprintf_info(struct perf_session *session, FILE *fp, bool full)
2233 {
2234 	struct header_print_data hd;
2235 	struct perf_header *header = &session->header;
2236 	int fd = perf_data_file__fd(session->file);
2237 	hd.fp = fp;
2238 	hd.full = full;
2239 
2240 	perf_header__process_sections(header, fd, &hd,
2241 				      perf_file_section__fprintf_info);
2242 	return 0;
2243 }
2244 
2245 static int do_write_feat(int fd, struct perf_header *h, int type,
2246 			 struct perf_file_section **p,
2247 			 struct perf_evlist *evlist)
2248 {
2249 	int err;
2250 	int ret = 0;
2251 
2252 	if (perf_header__has_feat(h, type)) {
2253 		if (!feat_ops[type].write)
2254 			return -1;
2255 
2256 		(*p)->offset = lseek(fd, 0, SEEK_CUR);
2257 
2258 		err = feat_ops[type].write(fd, h, evlist);
2259 		if (err < 0) {
2260 			pr_debug("failed to write feature %d\n", type);
2261 
2262 			/* undo anything written */
2263 			lseek(fd, (*p)->offset, SEEK_SET);
2264 
2265 			return -1;
2266 		}
2267 		(*p)->size = lseek(fd, 0, SEEK_CUR) - (*p)->offset;
2268 		(*p)++;
2269 	}
2270 	return ret;
2271 }
2272 
2273 static int perf_header__adds_write(struct perf_header *header,
2274 				   struct perf_evlist *evlist, int fd)
2275 {
2276 	int nr_sections;
2277 	struct perf_file_section *feat_sec, *p;
2278 	int sec_size;
2279 	u64 sec_start;
2280 	int feat;
2281 	int err;
2282 
2283 	nr_sections = bitmap_weight(header->adds_features, HEADER_FEAT_BITS);
2284 	if (!nr_sections)
2285 		return 0;
2286 
2287 	feat_sec = p = calloc(nr_sections, sizeof(*feat_sec));
2288 	if (feat_sec == NULL)
2289 		return -ENOMEM;
2290 
2291 	sec_size = sizeof(*feat_sec) * nr_sections;
2292 
2293 	sec_start = header->feat_offset;
2294 	lseek(fd, sec_start + sec_size, SEEK_SET);
2295 
2296 	for_each_set_bit(feat, header->adds_features, HEADER_FEAT_BITS) {
2297 		if (do_write_feat(fd, header, feat, &p, evlist))
2298 			perf_header__clear_feat(header, feat);
2299 	}
2300 
2301 	lseek(fd, sec_start, SEEK_SET);
2302 	/*
2303 	 * may write more than needed due to dropped feature, but
2304 	 * this is okay, reader will skip the mising entries
2305 	 */
2306 	err = do_write(fd, feat_sec, sec_size);
2307 	if (err < 0)
2308 		pr_debug("failed to write feature section\n");
2309 	free(feat_sec);
2310 	return err;
2311 }
2312 
2313 int perf_header__write_pipe(int fd)
2314 {
2315 	struct perf_pipe_file_header f_header;
2316 	int err;
2317 
2318 	f_header = (struct perf_pipe_file_header){
2319 		.magic	   = PERF_MAGIC,
2320 		.size	   = sizeof(f_header),
2321 	};
2322 
2323 	err = do_write(fd, &f_header, sizeof(f_header));
2324 	if (err < 0) {
2325 		pr_debug("failed to write perf pipe header\n");
2326 		return err;
2327 	}
2328 
2329 	return 0;
2330 }
2331 
2332 int perf_session__write_header(struct perf_session *session,
2333 			       struct perf_evlist *evlist,
2334 			       int fd, bool at_exit)
2335 {
2336 	struct perf_file_header f_header;
2337 	struct perf_file_attr   f_attr;
2338 	struct perf_header *header = &session->header;
2339 	struct perf_evsel *evsel;
2340 	u64 attr_offset;
2341 	int err;
2342 
2343 	lseek(fd, sizeof(f_header), SEEK_SET);
2344 
2345 	evlist__for_each(session->evlist, evsel) {
2346 		evsel->id_offset = lseek(fd, 0, SEEK_CUR);
2347 		err = do_write(fd, evsel->id, evsel->ids * sizeof(u64));
2348 		if (err < 0) {
2349 			pr_debug("failed to write perf header\n");
2350 			return err;
2351 		}
2352 	}
2353 
2354 	attr_offset = lseek(fd, 0, SEEK_CUR);
2355 
2356 	evlist__for_each(evlist, evsel) {
2357 		f_attr = (struct perf_file_attr){
2358 			.attr = evsel->attr,
2359 			.ids  = {
2360 				.offset = evsel->id_offset,
2361 				.size   = evsel->ids * sizeof(u64),
2362 			}
2363 		};
2364 		err = do_write(fd, &f_attr, sizeof(f_attr));
2365 		if (err < 0) {
2366 			pr_debug("failed to write perf header attribute\n");
2367 			return err;
2368 		}
2369 	}
2370 
2371 	if (!header->data_offset)
2372 		header->data_offset = lseek(fd, 0, SEEK_CUR);
2373 	header->feat_offset = header->data_offset + header->data_size;
2374 
2375 	if (at_exit) {
2376 		err = perf_header__adds_write(header, evlist, fd);
2377 		if (err < 0)
2378 			return err;
2379 	}
2380 
2381 	f_header = (struct perf_file_header){
2382 		.magic	   = PERF_MAGIC,
2383 		.size	   = sizeof(f_header),
2384 		.attr_size = sizeof(f_attr),
2385 		.attrs = {
2386 			.offset = attr_offset,
2387 			.size   = evlist->nr_entries * sizeof(f_attr),
2388 		},
2389 		.data = {
2390 			.offset = header->data_offset,
2391 			.size	= header->data_size,
2392 		},
2393 		/* event_types is ignored, store zeros */
2394 	};
2395 
2396 	memcpy(&f_header.adds_features, &header->adds_features, sizeof(header->adds_features));
2397 
2398 	lseek(fd, 0, SEEK_SET);
2399 	err = do_write(fd, &f_header, sizeof(f_header));
2400 	if (err < 0) {
2401 		pr_debug("failed to write perf header\n");
2402 		return err;
2403 	}
2404 	lseek(fd, header->data_offset + header->data_size, SEEK_SET);
2405 
2406 	return 0;
2407 }
2408 
2409 static int perf_header__getbuffer64(struct perf_header *header,
2410 				    int fd, void *buf, size_t size)
2411 {
2412 	if (readn(fd, buf, size) <= 0)
2413 		return -1;
2414 
2415 	if (header->needs_swap)
2416 		mem_bswap_64(buf, size);
2417 
2418 	return 0;
2419 }
2420 
2421 int perf_header__process_sections(struct perf_header *header, int fd,
2422 				  void *data,
2423 				  int (*process)(struct perf_file_section *section,
2424 						 struct perf_header *ph,
2425 						 int feat, int fd, void *data))
2426 {
2427 	struct perf_file_section *feat_sec, *sec;
2428 	int nr_sections;
2429 	int sec_size;
2430 	int feat;
2431 	int err;
2432 
2433 	nr_sections = bitmap_weight(header->adds_features, HEADER_FEAT_BITS);
2434 	if (!nr_sections)
2435 		return 0;
2436 
2437 	feat_sec = sec = calloc(nr_sections, sizeof(*feat_sec));
2438 	if (!feat_sec)
2439 		return -1;
2440 
2441 	sec_size = sizeof(*feat_sec) * nr_sections;
2442 
2443 	lseek(fd, header->feat_offset, SEEK_SET);
2444 
2445 	err = perf_header__getbuffer64(header, fd, feat_sec, sec_size);
2446 	if (err < 0)
2447 		goto out_free;
2448 
2449 	for_each_set_bit(feat, header->adds_features, HEADER_LAST_FEATURE) {
2450 		err = process(sec++, header, feat, fd, data);
2451 		if (err < 0)
2452 			goto out_free;
2453 	}
2454 	err = 0;
2455 out_free:
2456 	free(feat_sec);
2457 	return err;
2458 }
2459 
2460 static const int attr_file_abi_sizes[] = {
2461 	[0] = PERF_ATTR_SIZE_VER0,
2462 	[1] = PERF_ATTR_SIZE_VER1,
2463 	[2] = PERF_ATTR_SIZE_VER2,
2464 	[3] = PERF_ATTR_SIZE_VER3,
2465 	0,
2466 };
2467 
2468 /*
2469  * In the legacy file format, the magic number is not used to encode endianness.
2470  * hdr_sz was used to encode endianness. But given that hdr_sz can vary based
2471  * on ABI revisions, we need to try all combinations for all endianness to
2472  * detect the endianness.
2473  */
2474 static int try_all_file_abis(uint64_t hdr_sz, struct perf_header *ph)
2475 {
2476 	uint64_t ref_size, attr_size;
2477 	int i;
2478 
2479 	for (i = 0 ; attr_file_abi_sizes[i]; i++) {
2480 		ref_size = attr_file_abi_sizes[i]
2481 			 + sizeof(struct perf_file_section);
2482 		if (hdr_sz != ref_size) {
2483 			attr_size = bswap_64(hdr_sz);
2484 			if (attr_size != ref_size)
2485 				continue;
2486 
2487 			ph->needs_swap = true;
2488 		}
2489 		pr_debug("ABI%d perf.data file detected, need_swap=%d\n",
2490 			 i,
2491 			 ph->needs_swap);
2492 		return 0;
2493 	}
2494 	/* could not determine endianness */
2495 	return -1;
2496 }
2497 
2498 #define PERF_PIPE_HDR_VER0	16
2499 
2500 static const size_t attr_pipe_abi_sizes[] = {
2501 	[0] = PERF_PIPE_HDR_VER0,
2502 	0,
2503 };
2504 
2505 /*
2506  * In the legacy pipe format, there is an implicit assumption that endiannesss
2507  * between host recording the samples, and host parsing the samples is the
2508  * same. This is not always the case given that the pipe output may always be
2509  * redirected into a file and analyzed on a different machine with possibly a
2510  * different endianness and perf_event ABI revsions in the perf tool itself.
2511  */
2512 static int try_all_pipe_abis(uint64_t hdr_sz, struct perf_header *ph)
2513 {
2514 	u64 attr_size;
2515 	int i;
2516 
2517 	for (i = 0 ; attr_pipe_abi_sizes[i]; i++) {
2518 		if (hdr_sz != attr_pipe_abi_sizes[i]) {
2519 			attr_size = bswap_64(hdr_sz);
2520 			if (attr_size != hdr_sz)
2521 				continue;
2522 
2523 			ph->needs_swap = true;
2524 		}
2525 		pr_debug("Pipe ABI%d perf.data file detected\n", i);
2526 		return 0;
2527 	}
2528 	return -1;
2529 }
2530 
2531 bool is_perf_magic(u64 magic)
2532 {
2533 	if (!memcmp(&magic, __perf_magic1, sizeof(magic))
2534 		|| magic == __perf_magic2
2535 		|| magic == __perf_magic2_sw)
2536 		return true;
2537 
2538 	return false;
2539 }
2540 
2541 static int check_magic_endian(u64 magic, uint64_t hdr_sz,
2542 			      bool is_pipe, struct perf_header *ph)
2543 {
2544 	int ret;
2545 
2546 	/* check for legacy format */
2547 	ret = memcmp(&magic, __perf_magic1, sizeof(magic));
2548 	if (ret == 0) {
2549 		ph->version = PERF_HEADER_VERSION_1;
2550 		pr_debug("legacy perf.data format\n");
2551 		if (is_pipe)
2552 			return try_all_pipe_abis(hdr_sz, ph);
2553 
2554 		return try_all_file_abis(hdr_sz, ph);
2555 	}
2556 	/*
2557 	 * the new magic number serves two purposes:
2558 	 * - unique number to identify actual perf.data files
2559 	 * - encode endianness of file
2560 	 */
2561 
2562 	/* check magic number with one endianness */
2563 	if (magic == __perf_magic2)
2564 		return 0;
2565 
2566 	/* check magic number with opposite endianness */
2567 	if (magic != __perf_magic2_sw)
2568 		return -1;
2569 
2570 	ph->needs_swap = true;
2571 	ph->version = PERF_HEADER_VERSION_2;
2572 
2573 	return 0;
2574 }
2575 
2576 int perf_file_header__read(struct perf_file_header *header,
2577 			   struct perf_header *ph, int fd)
2578 {
2579 	ssize_t ret;
2580 
2581 	lseek(fd, 0, SEEK_SET);
2582 
2583 	ret = readn(fd, header, sizeof(*header));
2584 	if (ret <= 0)
2585 		return -1;
2586 
2587 	if (check_magic_endian(header->magic,
2588 			       header->attr_size, false, ph) < 0) {
2589 		pr_debug("magic/endian check failed\n");
2590 		return -1;
2591 	}
2592 
2593 	if (ph->needs_swap) {
2594 		mem_bswap_64(header, offsetof(struct perf_file_header,
2595 			     adds_features));
2596 	}
2597 
2598 	if (header->size != sizeof(*header)) {
2599 		/* Support the previous format */
2600 		if (header->size == offsetof(typeof(*header), adds_features))
2601 			bitmap_zero(header->adds_features, HEADER_FEAT_BITS);
2602 		else
2603 			return -1;
2604 	} else if (ph->needs_swap) {
2605 		/*
2606 		 * feature bitmap is declared as an array of unsigned longs --
2607 		 * not good since its size can differ between the host that
2608 		 * generated the data file and the host analyzing the file.
2609 		 *
2610 		 * We need to handle endianness, but we don't know the size of
2611 		 * the unsigned long where the file was generated. Take a best
2612 		 * guess at determining it: try 64-bit swap first (ie., file
2613 		 * created on a 64-bit host), and check if the hostname feature
2614 		 * bit is set (this feature bit is forced on as of fbe96f2).
2615 		 * If the bit is not, undo the 64-bit swap and try a 32-bit
2616 		 * swap. If the hostname bit is still not set (e.g., older data
2617 		 * file), punt and fallback to the original behavior --
2618 		 * clearing all feature bits and setting buildid.
2619 		 */
2620 		mem_bswap_64(&header->adds_features,
2621 			    BITS_TO_U64(HEADER_FEAT_BITS));
2622 
2623 		if (!test_bit(HEADER_HOSTNAME, header->adds_features)) {
2624 			/* unswap as u64 */
2625 			mem_bswap_64(&header->adds_features,
2626 				    BITS_TO_U64(HEADER_FEAT_BITS));
2627 
2628 			/* unswap as u32 */
2629 			mem_bswap_32(&header->adds_features,
2630 				    BITS_TO_U32(HEADER_FEAT_BITS));
2631 		}
2632 
2633 		if (!test_bit(HEADER_HOSTNAME, header->adds_features)) {
2634 			bitmap_zero(header->adds_features, HEADER_FEAT_BITS);
2635 			set_bit(HEADER_BUILD_ID, header->adds_features);
2636 		}
2637 	}
2638 
2639 	memcpy(&ph->adds_features, &header->adds_features,
2640 	       sizeof(ph->adds_features));
2641 
2642 	ph->data_offset  = header->data.offset;
2643 	ph->data_size	 = header->data.size;
2644 	ph->feat_offset  = header->data.offset + header->data.size;
2645 	return 0;
2646 }
2647 
2648 static int perf_file_section__process(struct perf_file_section *section,
2649 				      struct perf_header *ph,
2650 				      int feat, int fd, void *data)
2651 {
2652 	if (lseek(fd, section->offset, SEEK_SET) == (off_t)-1) {
2653 		pr_debug("Failed to lseek to %" PRIu64 " offset for feature "
2654 			  "%d, continuing...\n", section->offset, feat);
2655 		return 0;
2656 	}
2657 
2658 	if (feat >= HEADER_LAST_FEATURE) {
2659 		pr_debug("unknown feature %d, continuing...\n", feat);
2660 		return 0;
2661 	}
2662 
2663 	if (!feat_ops[feat].process)
2664 		return 0;
2665 
2666 	return feat_ops[feat].process(section, ph, fd, data);
2667 }
2668 
2669 static int perf_file_header__read_pipe(struct perf_pipe_file_header *header,
2670 				       struct perf_header *ph, int fd,
2671 				       bool repipe)
2672 {
2673 	ssize_t ret;
2674 
2675 	ret = readn(fd, header, sizeof(*header));
2676 	if (ret <= 0)
2677 		return -1;
2678 
2679 	if (check_magic_endian(header->magic, header->size, true, ph) < 0) {
2680 		pr_debug("endian/magic failed\n");
2681 		return -1;
2682 	}
2683 
2684 	if (ph->needs_swap)
2685 		header->size = bswap_64(header->size);
2686 
2687 	if (repipe && do_write(STDOUT_FILENO, header, sizeof(*header)) < 0)
2688 		return -1;
2689 
2690 	return 0;
2691 }
2692 
2693 static int perf_header__read_pipe(struct perf_session *session)
2694 {
2695 	struct perf_header *header = &session->header;
2696 	struct perf_pipe_file_header f_header;
2697 
2698 	if (perf_file_header__read_pipe(&f_header, header,
2699 					perf_data_file__fd(session->file),
2700 					session->repipe) < 0) {
2701 		pr_debug("incompatible file format\n");
2702 		return -EINVAL;
2703 	}
2704 
2705 	return 0;
2706 }
2707 
2708 static int read_attr(int fd, struct perf_header *ph,
2709 		     struct perf_file_attr *f_attr)
2710 {
2711 	struct perf_event_attr *attr = &f_attr->attr;
2712 	size_t sz, left;
2713 	size_t our_sz = sizeof(f_attr->attr);
2714 	ssize_t ret;
2715 
2716 	memset(f_attr, 0, sizeof(*f_attr));
2717 
2718 	/* read minimal guaranteed structure */
2719 	ret = readn(fd, attr, PERF_ATTR_SIZE_VER0);
2720 	if (ret <= 0) {
2721 		pr_debug("cannot read %d bytes of header attr\n",
2722 			 PERF_ATTR_SIZE_VER0);
2723 		return -1;
2724 	}
2725 
2726 	/* on file perf_event_attr size */
2727 	sz = attr->size;
2728 
2729 	if (ph->needs_swap)
2730 		sz = bswap_32(sz);
2731 
2732 	if (sz == 0) {
2733 		/* assume ABI0 */
2734 		sz =  PERF_ATTR_SIZE_VER0;
2735 	} else if (sz > our_sz) {
2736 		pr_debug("file uses a more recent and unsupported ABI"
2737 			 " (%zu bytes extra)\n", sz - our_sz);
2738 		return -1;
2739 	}
2740 	/* what we have not yet read and that we know about */
2741 	left = sz - PERF_ATTR_SIZE_VER0;
2742 	if (left) {
2743 		void *ptr = attr;
2744 		ptr += PERF_ATTR_SIZE_VER0;
2745 
2746 		ret = readn(fd, ptr, left);
2747 	}
2748 	/* read perf_file_section, ids are read in caller */
2749 	ret = readn(fd, &f_attr->ids, sizeof(f_attr->ids));
2750 
2751 	return ret <= 0 ? -1 : 0;
2752 }
2753 
2754 static int perf_evsel__prepare_tracepoint_event(struct perf_evsel *evsel,
2755 						struct pevent *pevent)
2756 {
2757 	struct event_format *event;
2758 	char bf[128];
2759 
2760 	/* already prepared */
2761 	if (evsel->tp_format)
2762 		return 0;
2763 
2764 	if (pevent == NULL) {
2765 		pr_debug("broken or missing trace data\n");
2766 		return -1;
2767 	}
2768 
2769 	event = pevent_find_event(pevent, evsel->attr.config);
2770 	if (event == NULL)
2771 		return -1;
2772 
2773 	if (!evsel->name) {
2774 		snprintf(bf, sizeof(bf), "%s:%s", event->system, event->name);
2775 		evsel->name = strdup(bf);
2776 		if (evsel->name == NULL)
2777 			return -1;
2778 	}
2779 
2780 	evsel->tp_format = event;
2781 	return 0;
2782 }
2783 
2784 static int perf_evlist__prepare_tracepoint_events(struct perf_evlist *evlist,
2785 						  struct pevent *pevent)
2786 {
2787 	struct perf_evsel *pos;
2788 
2789 	evlist__for_each(evlist, pos) {
2790 		if (pos->attr.type == PERF_TYPE_TRACEPOINT &&
2791 		    perf_evsel__prepare_tracepoint_event(pos, pevent))
2792 			return -1;
2793 	}
2794 
2795 	return 0;
2796 }
2797 
2798 int perf_session__read_header(struct perf_session *session)
2799 {
2800 	struct perf_data_file *file = session->file;
2801 	struct perf_header *header = &session->header;
2802 	struct perf_file_header	f_header;
2803 	struct perf_file_attr	f_attr;
2804 	u64			f_id;
2805 	int nr_attrs, nr_ids, i, j;
2806 	int fd = perf_data_file__fd(file);
2807 
2808 	session->evlist = perf_evlist__new();
2809 	if (session->evlist == NULL)
2810 		return -ENOMEM;
2811 
2812 	if (perf_data_file__is_pipe(file))
2813 		return perf_header__read_pipe(session);
2814 
2815 	if (perf_file_header__read(&f_header, header, fd) < 0)
2816 		return -EINVAL;
2817 
2818 	/*
2819 	 * Sanity check that perf.data was written cleanly; data size is
2820 	 * initialized to 0 and updated only if the on_exit function is run.
2821 	 * If data size is still 0 then the file contains only partial
2822 	 * information.  Just warn user and process it as much as it can.
2823 	 */
2824 	if (f_header.data.size == 0) {
2825 		pr_warning("WARNING: The %s file's data size field is 0 which is unexpected.\n"
2826 			   "Was the 'perf record' command properly terminated?\n",
2827 			   file->path);
2828 	}
2829 
2830 	nr_attrs = f_header.attrs.size / f_header.attr_size;
2831 	lseek(fd, f_header.attrs.offset, SEEK_SET);
2832 
2833 	for (i = 0; i < nr_attrs; i++) {
2834 		struct perf_evsel *evsel;
2835 		off_t tmp;
2836 
2837 		if (read_attr(fd, header, &f_attr) < 0)
2838 			goto out_errno;
2839 
2840 		if (header->needs_swap)
2841 			perf_event__attr_swap(&f_attr.attr);
2842 
2843 		tmp = lseek(fd, 0, SEEK_CUR);
2844 		evsel = perf_evsel__new(&f_attr.attr);
2845 
2846 		if (evsel == NULL)
2847 			goto out_delete_evlist;
2848 
2849 		evsel->needs_swap = header->needs_swap;
2850 		/*
2851 		 * Do it before so that if perf_evsel__alloc_id fails, this
2852 		 * entry gets purged too at perf_evlist__delete().
2853 		 */
2854 		perf_evlist__add(session->evlist, evsel);
2855 
2856 		nr_ids = f_attr.ids.size / sizeof(u64);
2857 		/*
2858 		 * We don't have the cpu and thread maps on the header, so
2859 		 * for allocating the perf_sample_id table we fake 1 cpu and
2860 		 * hattr->ids threads.
2861 		 */
2862 		if (perf_evsel__alloc_id(evsel, 1, nr_ids))
2863 			goto out_delete_evlist;
2864 
2865 		lseek(fd, f_attr.ids.offset, SEEK_SET);
2866 
2867 		for (j = 0; j < nr_ids; j++) {
2868 			if (perf_header__getbuffer64(header, fd, &f_id, sizeof(f_id)))
2869 				goto out_errno;
2870 
2871 			perf_evlist__id_add(session->evlist, evsel, 0, j, f_id);
2872 		}
2873 
2874 		lseek(fd, tmp, SEEK_SET);
2875 	}
2876 
2877 	symbol_conf.nr_events = nr_attrs;
2878 
2879 	perf_header__process_sections(header, fd, &session->tevent,
2880 				      perf_file_section__process);
2881 
2882 	if (perf_evlist__prepare_tracepoint_events(session->evlist,
2883 						   session->tevent.pevent))
2884 		goto out_delete_evlist;
2885 
2886 	return 0;
2887 out_errno:
2888 	return -errno;
2889 
2890 out_delete_evlist:
2891 	perf_evlist__delete(session->evlist);
2892 	session->evlist = NULL;
2893 	return -ENOMEM;
2894 }
2895 
2896 int perf_event__synthesize_attr(struct perf_tool *tool,
2897 				struct perf_event_attr *attr, u32 ids, u64 *id,
2898 				perf_event__handler_t process)
2899 {
2900 	union perf_event *ev;
2901 	size_t size;
2902 	int err;
2903 
2904 	size = sizeof(struct perf_event_attr);
2905 	size = PERF_ALIGN(size, sizeof(u64));
2906 	size += sizeof(struct perf_event_header);
2907 	size += ids * sizeof(u64);
2908 
2909 	ev = malloc(size);
2910 
2911 	if (ev == NULL)
2912 		return -ENOMEM;
2913 
2914 	ev->attr.attr = *attr;
2915 	memcpy(ev->attr.id, id, ids * sizeof(u64));
2916 
2917 	ev->attr.header.type = PERF_RECORD_HEADER_ATTR;
2918 	ev->attr.header.size = (u16)size;
2919 
2920 	if (ev->attr.header.size == size)
2921 		err = process(tool, ev, NULL, NULL);
2922 	else
2923 		err = -E2BIG;
2924 
2925 	free(ev);
2926 
2927 	return err;
2928 }
2929 
2930 int perf_event__synthesize_attrs(struct perf_tool *tool,
2931 				   struct perf_session *session,
2932 				   perf_event__handler_t process)
2933 {
2934 	struct perf_evsel *evsel;
2935 	int err = 0;
2936 
2937 	evlist__for_each(session->evlist, evsel) {
2938 		err = perf_event__synthesize_attr(tool, &evsel->attr, evsel->ids,
2939 						  evsel->id, process);
2940 		if (err) {
2941 			pr_debug("failed to create perf header attribute\n");
2942 			return err;
2943 		}
2944 	}
2945 
2946 	return err;
2947 }
2948 
2949 int perf_event__process_attr(struct perf_tool *tool __maybe_unused,
2950 			     union perf_event *event,
2951 			     struct perf_evlist **pevlist)
2952 {
2953 	u32 i, ids, n_ids;
2954 	struct perf_evsel *evsel;
2955 	struct perf_evlist *evlist = *pevlist;
2956 
2957 	if (evlist == NULL) {
2958 		*pevlist = evlist = perf_evlist__new();
2959 		if (evlist == NULL)
2960 			return -ENOMEM;
2961 	}
2962 
2963 	evsel = perf_evsel__new(&event->attr.attr);
2964 	if (evsel == NULL)
2965 		return -ENOMEM;
2966 
2967 	perf_evlist__add(evlist, evsel);
2968 
2969 	ids = event->header.size;
2970 	ids -= (void *)&event->attr.id - (void *)event;
2971 	n_ids = ids / sizeof(u64);
2972 	/*
2973 	 * We don't have the cpu and thread maps on the header, so
2974 	 * for allocating the perf_sample_id table we fake 1 cpu and
2975 	 * hattr->ids threads.
2976 	 */
2977 	if (perf_evsel__alloc_id(evsel, 1, n_ids))
2978 		return -ENOMEM;
2979 
2980 	for (i = 0; i < n_ids; i++) {
2981 		perf_evlist__id_add(evlist, evsel, 0, i, event->attr.id[i]);
2982 	}
2983 
2984 	symbol_conf.nr_events = evlist->nr_entries;
2985 
2986 	return 0;
2987 }
2988 
2989 int perf_event__synthesize_tracing_data(struct perf_tool *tool, int fd,
2990 					struct perf_evlist *evlist,
2991 					perf_event__handler_t process)
2992 {
2993 	union perf_event ev;
2994 	struct tracing_data *tdata;
2995 	ssize_t size = 0, aligned_size = 0, padding;
2996 	int err __maybe_unused = 0;
2997 
2998 	/*
2999 	 * We are going to store the size of the data followed
3000 	 * by the data contents. Since the fd descriptor is a pipe,
3001 	 * we cannot seek back to store the size of the data once
3002 	 * we know it. Instead we:
3003 	 *
3004 	 * - write the tracing data to the temp file
3005 	 * - get/write the data size to pipe
3006 	 * - write the tracing data from the temp file
3007 	 *   to the pipe
3008 	 */
3009 	tdata = tracing_data_get(&evlist->entries, fd, true);
3010 	if (!tdata)
3011 		return -1;
3012 
3013 	memset(&ev, 0, sizeof(ev));
3014 
3015 	ev.tracing_data.header.type = PERF_RECORD_HEADER_TRACING_DATA;
3016 	size = tdata->size;
3017 	aligned_size = PERF_ALIGN(size, sizeof(u64));
3018 	padding = aligned_size - size;
3019 	ev.tracing_data.header.size = sizeof(ev.tracing_data);
3020 	ev.tracing_data.size = aligned_size;
3021 
3022 	process(tool, &ev, NULL, NULL);
3023 
3024 	/*
3025 	 * The put function will copy all the tracing data
3026 	 * stored in temp file to the pipe.
3027 	 */
3028 	tracing_data_put(tdata);
3029 
3030 	write_padded(fd, NULL, 0, padding);
3031 
3032 	return aligned_size;
3033 }
3034 
3035 int perf_event__process_tracing_data(struct perf_tool *tool __maybe_unused,
3036 				     union perf_event *event,
3037 				     struct perf_session *session)
3038 {
3039 	ssize_t size_read, padding, size = event->tracing_data.size;
3040 	int fd = perf_data_file__fd(session->file);
3041 	off_t offset = lseek(fd, 0, SEEK_CUR);
3042 	char buf[BUFSIZ];
3043 
3044 	/* setup for reading amidst mmap */
3045 	lseek(fd, offset + sizeof(struct tracing_data_event),
3046 	      SEEK_SET);
3047 
3048 	size_read = trace_report(fd, &session->tevent,
3049 				 session->repipe);
3050 	padding = PERF_ALIGN(size_read, sizeof(u64)) - size_read;
3051 
3052 	if (readn(fd, buf, padding) < 0) {
3053 		pr_err("%s: reading input file", __func__);
3054 		return -1;
3055 	}
3056 	if (session->repipe) {
3057 		int retw = write(STDOUT_FILENO, buf, padding);
3058 		if (retw <= 0 || retw != padding) {
3059 			pr_err("%s: repiping tracing data padding", __func__);
3060 			return -1;
3061 		}
3062 	}
3063 
3064 	if (size_read + padding != size) {
3065 		pr_err("%s: tracing data size mismatch", __func__);
3066 		return -1;
3067 	}
3068 
3069 	perf_evlist__prepare_tracepoint_events(session->evlist,
3070 					       session->tevent.pevent);
3071 
3072 	return size_read + padding;
3073 }
3074 
3075 int perf_event__synthesize_build_id(struct perf_tool *tool,
3076 				    struct dso *pos, u16 misc,
3077 				    perf_event__handler_t process,
3078 				    struct machine *machine)
3079 {
3080 	union perf_event ev;
3081 	size_t len;
3082 	int err = 0;
3083 
3084 	if (!pos->hit)
3085 		return err;
3086 
3087 	memset(&ev, 0, sizeof(ev));
3088 
3089 	len = pos->long_name_len + 1;
3090 	len = PERF_ALIGN(len, NAME_ALIGN);
3091 	memcpy(&ev.build_id.build_id, pos->build_id, sizeof(pos->build_id));
3092 	ev.build_id.header.type = PERF_RECORD_HEADER_BUILD_ID;
3093 	ev.build_id.header.misc = misc;
3094 	ev.build_id.pid = machine->pid;
3095 	ev.build_id.header.size = sizeof(ev.build_id) + len;
3096 	memcpy(&ev.build_id.filename, pos->long_name, pos->long_name_len);
3097 
3098 	err = process(tool, &ev, NULL, machine);
3099 
3100 	return err;
3101 }
3102 
3103 int perf_event__process_build_id(struct perf_tool *tool __maybe_unused,
3104 				 union perf_event *event,
3105 				 struct perf_session *session)
3106 {
3107 	__event_process_build_id(&event->build_id,
3108 				 event->build_id.filename,
3109 				 session);
3110 	return 0;
3111 }
3112 
3113 void disable_buildid_cache(void)
3114 {
3115 	no_buildid_cache = true;
3116 }
3117