xref: /openbmc/linux/tools/perf/util/header.c (revision 95e9fd10)
1 #define _FILE_OFFSET_BITS 64
2 
3 #include "util.h"
4 #include <sys/types.h>
5 #include <byteswap.h>
6 #include <unistd.h>
7 #include <stdio.h>
8 #include <stdlib.h>
9 #include <linux/list.h>
10 #include <linux/kernel.h>
11 #include <linux/bitops.h>
12 #include <sys/utsname.h>
13 
14 #include "evlist.h"
15 #include "evsel.h"
16 #include "header.h"
17 #include "../perf.h"
18 #include "trace-event.h"
19 #include "session.h"
20 #include "symbol.h"
21 #include "debug.h"
22 #include "cpumap.h"
23 
24 static bool no_buildid_cache = false;
25 
26 static int event_count;
27 static struct perf_trace_event_type *events;
28 
29 static u32 header_argc;
30 static const char **header_argv;
31 
32 int perf_header__push_event(u64 id, const char *name)
33 {
34 	struct perf_trace_event_type *nevents;
35 
36 	if (strlen(name) > MAX_EVENT_NAME)
37 		pr_warning("Event %s will be truncated\n", name);
38 
39 	nevents = realloc(events, (event_count + 1) * sizeof(*events));
40 	if (nevents == NULL)
41 		return -ENOMEM;
42 	events = nevents;
43 
44 	memset(&events[event_count], 0, sizeof(struct perf_trace_event_type));
45 	events[event_count].event_id = id;
46 	strncpy(events[event_count].name, name, MAX_EVENT_NAME - 1);
47 	event_count++;
48 	return 0;
49 }
50 
51 char *perf_header__find_event(u64 id)
52 {
53 	int i;
54 	for (i = 0 ; i < event_count; i++) {
55 		if (events[i].event_id == id)
56 			return events[i].name;
57 	}
58 	return NULL;
59 }
60 
61 /*
62  * magic2 = "PERFILE2"
63  * must be a numerical value to let the endianness
64  * determine the memory layout. That way we are able
65  * to detect endianness when reading the perf.data file
66  * back.
67  *
68  * we check for legacy (PERFFILE) format.
69  */
70 static const char *__perf_magic1 = "PERFFILE";
71 static const u64 __perf_magic2    = 0x32454c4946524550ULL;
72 static const u64 __perf_magic2_sw = 0x50455246494c4532ULL;
73 
74 #define PERF_MAGIC	__perf_magic2
75 
76 struct perf_file_attr {
77 	struct perf_event_attr	attr;
78 	struct perf_file_section	ids;
79 };
80 
81 void perf_header__set_feat(struct perf_header *header, int feat)
82 {
83 	set_bit(feat, header->adds_features);
84 }
85 
86 void perf_header__clear_feat(struct perf_header *header, int feat)
87 {
88 	clear_bit(feat, header->adds_features);
89 }
90 
91 bool perf_header__has_feat(const struct perf_header *header, int feat)
92 {
93 	return test_bit(feat, header->adds_features);
94 }
95 
96 static int do_write(int fd, const void *buf, size_t size)
97 {
98 	while (size) {
99 		int ret = write(fd, buf, size);
100 
101 		if (ret < 0)
102 			return -errno;
103 
104 		size -= ret;
105 		buf += ret;
106 	}
107 
108 	return 0;
109 }
110 
111 #define NAME_ALIGN 64
112 
113 static int write_padded(int fd, const void *bf, size_t count,
114 			size_t count_aligned)
115 {
116 	static const char zero_buf[NAME_ALIGN];
117 	int err = do_write(fd, bf, count);
118 
119 	if (!err)
120 		err = do_write(fd, zero_buf, count_aligned - count);
121 
122 	return err;
123 }
124 
125 static int do_write_string(int fd, const char *str)
126 {
127 	u32 len, olen;
128 	int ret;
129 
130 	olen = strlen(str) + 1;
131 	len = ALIGN(olen, NAME_ALIGN);
132 
133 	/* write len, incl. \0 */
134 	ret = do_write(fd, &len, sizeof(len));
135 	if (ret < 0)
136 		return ret;
137 
138 	return write_padded(fd, str, olen, len);
139 }
140 
141 static char *do_read_string(int fd, struct perf_header *ph)
142 {
143 	ssize_t sz, ret;
144 	u32 len;
145 	char *buf;
146 
147 	sz = read(fd, &len, sizeof(len));
148 	if (sz < (ssize_t)sizeof(len))
149 		return NULL;
150 
151 	if (ph->needs_swap)
152 		len = bswap_32(len);
153 
154 	buf = malloc(len);
155 	if (!buf)
156 		return NULL;
157 
158 	ret = read(fd, buf, len);
159 	if (ret == (ssize_t)len) {
160 		/*
161 		 * strings are padded by zeroes
162 		 * thus the actual strlen of buf
163 		 * may be less than len
164 		 */
165 		return buf;
166 	}
167 
168 	free(buf);
169 	return NULL;
170 }
171 
172 int
173 perf_header__set_cmdline(int argc, const char **argv)
174 {
175 	int i;
176 
177 	/*
178 	 * If header_argv has already been set, do not override it.
179 	 * This allows a command to set the cmdline, parse args and
180 	 * then call another builtin function that implements a
181 	 * command -- e.g, cmd_kvm calling cmd_record.
182 	 */
183 	if (header_argv)
184 		return 0;
185 
186 	header_argc = (u32)argc;
187 
188 	/* do not include NULL termination */
189 	header_argv = calloc(argc, sizeof(char *));
190 	if (!header_argv)
191 		return -ENOMEM;
192 
193 	/*
194 	 * must copy argv contents because it gets moved
195 	 * around during option parsing
196 	 */
197 	for (i = 0; i < argc ; i++)
198 		header_argv[i] = argv[i];
199 
200 	return 0;
201 }
202 
203 #define dsos__for_each_with_build_id(pos, head)	\
204 	list_for_each_entry(pos, head, node)	\
205 		if (!pos->has_build_id)		\
206 			continue;		\
207 		else
208 
209 static int __dsos__write_buildid_table(struct list_head *head, pid_t pid,
210 				u16 misc, int fd)
211 {
212 	struct dso *pos;
213 
214 	dsos__for_each_with_build_id(pos, head) {
215 		int err;
216 		struct build_id_event b;
217 		size_t len;
218 
219 		if (!pos->hit)
220 			continue;
221 		len = pos->long_name_len + 1;
222 		len = ALIGN(len, NAME_ALIGN);
223 		memset(&b, 0, sizeof(b));
224 		memcpy(&b.build_id, pos->build_id, sizeof(pos->build_id));
225 		b.pid = pid;
226 		b.header.misc = misc;
227 		b.header.size = sizeof(b) + len;
228 		err = do_write(fd, &b, sizeof(b));
229 		if (err < 0)
230 			return err;
231 		err = write_padded(fd, pos->long_name,
232 				   pos->long_name_len + 1, len);
233 		if (err < 0)
234 			return err;
235 	}
236 
237 	return 0;
238 }
239 
240 static int machine__write_buildid_table(struct machine *machine, int fd)
241 {
242 	int err;
243 	u16 kmisc = PERF_RECORD_MISC_KERNEL,
244 	    umisc = PERF_RECORD_MISC_USER;
245 
246 	if (!machine__is_host(machine)) {
247 		kmisc = PERF_RECORD_MISC_GUEST_KERNEL;
248 		umisc = PERF_RECORD_MISC_GUEST_USER;
249 	}
250 
251 	err = __dsos__write_buildid_table(&machine->kernel_dsos, machine->pid,
252 					  kmisc, fd);
253 	if (err == 0)
254 		err = __dsos__write_buildid_table(&machine->user_dsos,
255 						  machine->pid, umisc, fd);
256 	return err;
257 }
258 
259 static int dsos__write_buildid_table(struct perf_header *header, int fd)
260 {
261 	struct perf_session *session = container_of(header,
262 			struct perf_session, header);
263 	struct rb_node *nd;
264 	int err = machine__write_buildid_table(&session->host_machine, fd);
265 
266 	if (err)
267 		return err;
268 
269 	for (nd = rb_first(&session->machines); nd; nd = rb_next(nd)) {
270 		struct machine *pos = rb_entry(nd, struct machine, rb_node);
271 		err = machine__write_buildid_table(pos, fd);
272 		if (err)
273 			break;
274 	}
275 	return err;
276 }
277 
278 int build_id_cache__add_s(const char *sbuild_id, const char *debugdir,
279 			  const char *name, bool is_kallsyms)
280 {
281 	const size_t size = PATH_MAX;
282 	char *realname, *filename = zalloc(size),
283 	     *linkname = zalloc(size), *targetname;
284 	int len, err = -1;
285 
286 	if (is_kallsyms) {
287 		if (symbol_conf.kptr_restrict) {
288 			pr_debug("Not caching a kptr_restrict'ed /proc/kallsyms\n");
289 			return 0;
290 		}
291 		realname = (char *)name;
292 	} else
293 		realname = realpath(name, NULL);
294 
295 	if (realname == NULL || filename == NULL || linkname == NULL)
296 		goto out_free;
297 
298 	len = scnprintf(filename, size, "%s%s%s",
299 		       debugdir, is_kallsyms ? "/" : "", realname);
300 	if (mkdir_p(filename, 0755))
301 		goto out_free;
302 
303 	snprintf(filename + len, size - len, "/%s", sbuild_id);
304 
305 	if (access(filename, F_OK)) {
306 		if (is_kallsyms) {
307 			 if (copyfile("/proc/kallsyms", filename))
308 				goto out_free;
309 		} else if (link(realname, filename) && copyfile(name, filename))
310 			goto out_free;
311 	}
312 
313 	len = scnprintf(linkname, size, "%s/.build-id/%.2s",
314 		       debugdir, sbuild_id);
315 
316 	if (access(linkname, X_OK) && mkdir_p(linkname, 0755))
317 		goto out_free;
318 
319 	snprintf(linkname + len, size - len, "/%s", sbuild_id + 2);
320 	targetname = filename + strlen(debugdir) - 5;
321 	memcpy(targetname, "../..", 5);
322 
323 	if (symlink(targetname, linkname) == 0)
324 		err = 0;
325 out_free:
326 	if (!is_kallsyms)
327 		free(realname);
328 	free(filename);
329 	free(linkname);
330 	return err;
331 }
332 
333 static int build_id_cache__add_b(const u8 *build_id, size_t build_id_size,
334 				 const char *name, const char *debugdir,
335 				 bool is_kallsyms)
336 {
337 	char sbuild_id[BUILD_ID_SIZE * 2 + 1];
338 
339 	build_id__sprintf(build_id, build_id_size, sbuild_id);
340 
341 	return build_id_cache__add_s(sbuild_id, debugdir, name, is_kallsyms);
342 }
343 
344 int build_id_cache__remove_s(const char *sbuild_id, const char *debugdir)
345 {
346 	const size_t size = PATH_MAX;
347 	char *filename = zalloc(size),
348 	     *linkname = zalloc(size);
349 	int err = -1;
350 
351 	if (filename == NULL || linkname == NULL)
352 		goto out_free;
353 
354 	snprintf(linkname, size, "%s/.build-id/%.2s/%s",
355 		 debugdir, sbuild_id, sbuild_id + 2);
356 
357 	if (access(linkname, F_OK))
358 		goto out_free;
359 
360 	if (readlink(linkname, filename, size - 1) < 0)
361 		goto out_free;
362 
363 	if (unlink(linkname))
364 		goto out_free;
365 
366 	/*
367 	 * Since the link is relative, we must make it absolute:
368 	 */
369 	snprintf(linkname, size, "%s/.build-id/%.2s/%s",
370 		 debugdir, sbuild_id, filename);
371 
372 	if (unlink(linkname))
373 		goto out_free;
374 
375 	err = 0;
376 out_free:
377 	free(filename);
378 	free(linkname);
379 	return err;
380 }
381 
382 static int dso__cache_build_id(struct dso *dso, const char *debugdir)
383 {
384 	bool is_kallsyms = dso->kernel && dso->long_name[0] != '/';
385 
386 	return build_id_cache__add_b(dso->build_id, sizeof(dso->build_id),
387 				     dso->long_name, debugdir, is_kallsyms);
388 }
389 
390 static int __dsos__cache_build_ids(struct list_head *head, const char *debugdir)
391 {
392 	struct dso *pos;
393 	int err = 0;
394 
395 	dsos__for_each_with_build_id(pos, head)
396 		if (dso__cache_build_id(pos, debugdir))
397 			err = -1;
398 
399 	return err;
400 }
401 
402 static int machine__cache_build_ids(struct machine *machine, const char *debugdir)
403 {
404 	int ret = __dsos__cache_build_ids(&machine->kernel_dsos, debugdir);
405 	ret |= __dsos__cache_build_ids(&machine->user_dsos, debugdir);
406 	return ret;
407 }
408 
409 static int perf_session__cache_build_ids(struct perf_session *session)
410 {
411 	struct rb_node *nd;
412 	int ret;
413 	char debugdir[PATH_MAX];
414 
415 	snprintf(debugdir, sizeof(debugdir), "%s", buildid_dir);
416 
417 	if (mkdir(debugdir, 0755) != 0 && errno != EEXIST)
418 		return -1;
419 
420 	ret = machine__cache_build_ids(&session->host_machine, debugdir);
421 
422 	for (nd = rb_first(&session->machines); nd; nd = rb_next(nd)) {
423 		struct machine *pos = rb_entry(nd, struct machine, rb_node);
424 		ret |= machine__cache_build_ids(pos, debugdir);
425 	}
426 	return ret ? -1 : 0;
427 }
428 
429 static bool machine__read_build_ids(struct machine *machine, bool with_hits)
430 {
431 	bool ret = __dsos__read_build_ids(&machine->kernel_dsos, with_hits);
432 	ret |= __dsos__read_build_ids(&machine->user_dsos, with_hits);
433 	return ret;
434 }
435 
436 static bool perf_session__read_build_ids(struct perf_session *session, bool with_hits)
437 {
438 	struct rb_node *nd;
439 	bool ret = machine__read_build_ids(&session->host_machine, with_hits);
440 
441 	for (nd = rb_first(&session->machines); nd; nd = rb_next(nd)) {
442 		struct machine *pos = rb_entry(nd, struct machine, rb_node);
443 		ret |= machine__read_build_ids(pos, with_hits);
444 	}
445 
446 	return ret;
447 }
448 
449 static int write_tracing_data(int fd, struct perf_header *h __used,
450 			    struct perf_evlist *evlist)
451 {
452 	return read_tracing_data(fd, &evlist->entries);
453 }
454 
455 
456 static int write_build_id(int fd, struct perf_header *h,
457 			  struct perf_evlist *evlist __used)
458 {
459 	struct perf_session *session;
460 	int err;
461 
462 	session = container_of(h, struct perf_session, header);
463 
464 	if (!perf_session__read_build_ids(session, true))
465 		return -1;
466 
467 	err = dsos__write_buildid_table(h, fd);
468 	if (err < 0) {
469 		pr_debug("failed to write buildid table\n");
470 		return err;
471 	}
472 	if (!no_buildid_cache)
473 		perf_session__cache_build_ids(session);
474 
475 	return 0;
476 }
477 
478 static int write_hostname(int fd, struct perf_header *h __used,
479 			  struct perf_evlist *evlist __used)
480 {
481 	struct utsname uts;
482 	int ret;
483 
484 	ret = uname(&uts);
485 	if (ret < 0)
486 		return -1;
487 
488 	return do_write_string(fd, uts.nodename);
489 }
490 
491 static int write_osrelease(int fd, struct perf_header *h __used,
492 			   struct perf_evlist *evlist __used)
493 {
494 	struct utsname uts;
495 	int ret;
496 
497 	ret = uname(&uts);
498 	if (ret < 0)
499 		return -1;
500 
501 	return do_write_string(fd, uts.release);
502 }
503 
504 static int write_arch(int fd, struct perf_header *h __used,
505 		      struct perf_evlist *evlist __used)
506 {
507 	struct utsname uts;
508 	int ret;
509 
510 	ret = uname(&uts);
511 	if (ret < 0)
512 		return -1;
513 
514 	return do_write_string(fd, uts.machine);
515 }
516 
517 static int write_version(int fd, struct perf_header *h __used,
518 			 struct perf_evlist *evlist __used)
519 {
520 	return do_write_string(fd, perf_version_string);
521 }
522 
523 static int write_cpudesc(int fd, struct perf_header *h __used,
524 		       struct perf_evlist *evlist __used)
525 {
526 #ifndef CPUINFO_PROC
527 #define CPUINFO_PROC NULL
528 #endif
529 	FILE *file;
530 	char *buf = NULL;
531 	char *s, *p;
532 	const char *search = CPUINFO_PROC;
533 	size_t len = 0;
534 	int ret = -1;
535 
536 	if (!search)
537 		return -1;
538 
539 	file = fopen("/proc/cpuinfo", "r");
540 	if (!file)
541 		return -1;
542 
543 	while (getline(&buf, &len, file) > 0) {
544 		ret = strncmp(buf, search, strlen(search));
545 		if (!ret)
546 			break;
547 	}
548 
549 	if (ret)
550 		goto done;
551 
552 	s = buf;
553 
554 	p = strchr(buf, ':');
555 	if (p && *(p+1) == ' ' && *(p+2))
556 		s = p + 2;
557 	p = strchr(s, '\n');
558 	if (p)
559 		*p = '\0';
560 
561 	/* squash extra space characters (branding string) */
562 	p = s;
563 	while (*p) {
564 		if (isspace(*p)) {
565 			char *r = p + 1;
566 			char *q = r;
567 			*p = ' ';
568 			while (*q && isspace(*q))
569 				q++;
570 			if (q != (p+1))
571 				while ((*r++ = *q++));
572 		}
573 		p++;
574 	}
575 	ret = do_write_string(fd, s);
576 done:
577 	free(buf);
578 	fclose(file);
579 	return ret;
580 }
581 
582 static int write_nrcpus(int fd, struct perf_header *h __used,
583 			struct perf_evlist *evlist __used)
584 {
585 	long nr;
586 	u32 nrc, nra;
587 	int ret;
588 
589 	nr = sysconf(_SC_NPROCESSORS_CONF);
590 	if (nr < 0)
591 		return -1;
592 
593 	nrc = (u32)(nr & UINT_MAX);
594 
595 	nr = sysconf(_SC_NPROCESSORS_ONLN);
596 	if (nr < 0)
597 		return -1;
598 
599 	nra = (u32)(nr & UINT_MAX);
600 
601 	ret = do_write(fd, &nrc, sizeof(nrc));
602 	if (ret < 0)
603 		return ret;
604 
605 	return do_write(fd, &nra, sizeof(nra));
606 }
607 
608 static int write_event_desc(int fd, struct perf_header *h __used,
609 			    struct perf_evlist *evlist)
610 {
611 	struct perf_evsel *attr;
612 	u32 nre = 0, nri, sz;
613 	int ret;
614 
615 	list_for_each_entry(attr, &evlist->entries, node)
616 		nre++;
617 
618 	/*
619 	 * write number of events
620 	 */
621 	ret = do_write(fd, &nre, sizeof(nre));
622 	if (ret < 0)
623 		return ret;
624 
625 	/*
626 	 * size of perf_event_attr struct
627 	 */
628 	sz = (u32)sizeof(attr->attr);
629 	ret = do_write(fd, &sz, sizeof(sz));
630 	if (ret < 0)
631 		return ret;
632 
633 	list_for_each_entry(attr, &evlist->entries, node) {
634 
635 		ret = do_write(fd, &attr->attr, sz);
636 		if (ret < 0)
637 			return ret;
638 		/*
639 		 * write number of unique id per event
640 		 * there is one id per instance of an event
641 		 *
642 		 * copy into an nri to be independent of the
643 		 * type of ids,
644 		 */
645 		nri = attr->ids;
646 		ret = do_write(fd, &nri, sizeof(nri));
647 		if (ret < 0)
648 			return ret;
649 
650 		/*
651 		 * write event string as passed on cmdline
652 		 */
653 		ret = do_write_string(fd, perf_evsel__name(attr));
654 		if (ret < 0)
655 			return ret;
656 		/*
657 		 * write unique ids for this event
658 		 */
659 		ret = do_write(fd, attr->id, attr->ids * sizeof(u64));
660 		if (ret < 0)
661 			return ret;
662 	}
663 	return 0;
664 }
665 
666 static int write_cmdline(int fd, struct perf_header *h __used,
667 			 struct perf_evlist *evlist __used)
668 {
669 	char buf[MAXPATHLEN];
670 	char proc[32];
671 	u32 i, n;
672 	int ret;
673 
674 	/*
675 	 * actual atual path to perf binary
676 	 */
677 	sprintf(proc, "/proc/%d/exe", getpid());
678 	ret = readlink(proc, buf, sizeof(buf));
679 	if (ret <= 0)
680 		return -1;
681 
682 	/* readlink() does not add null termination */
683 	buf[ret] = '\0';
684 
685 	/* account for binary path */
686 	n = header_argc + 1;
687 
688 	ret = do_write(fd, &n, sizeof(n));
689 	if (ret < 0)
690 		return ret;
691 
692 	ret = do_write_string(fd, buf);
693 	if (ret < 0)
694 		return ret;
695 
696 	for (i = 0 ; i < header_argc; i++) {
697 		ret = do_write_string(fd, header_argv[i]);
698 		if (ret < 0)
699 			return ret;
700 	}
701 	return 0;
702 }
703 
704 #define CORE_SIB_FMT \
705 	"/sys/devices/system/cpu/cpu%d/topology/core_siblings_list"
706 #define THRD_SIB_FMT \
707 	"/sys/devices/system/cpu/cpu%d/topology/thread_siblings_list"
708 
709 struct cpu_topo {
710 	u32 core_sib;
711 	u32 thread_sib;
712 	char **core_siblings;
713 	char **thread_siblings;
714 };
715 
716 static int build_cpu_topo(struct cpu_topo *tp, int cpu)
717 {
718 	FILE *fp;
719 	char filename[MAXPATHLEN];
720 	char *buf = NULL, *p;
721 	size_t len = 0;
722 	u32 i = 0;
723 	int ret = -1;
724 
725 	sprintf(filename, CORE_SIB_FMT, cpu);
726 	fp = fopen(filename, "r");
727 	if (!fp)
728 		return -1;
729 
730 	if (getline(&buf, &len, fp) <= 0)
731 		goto done;
732 
733 	fclose(fp);
734 
735 	p = strchr(buf, '\n');
736 	if (p)
737 		*p = '\0';
738 
739 	for (i = 0; i < tp->core_sib; i++) {
740 		if (!strcmp(buf, tp->core_siblings[i]))
741 			break;
742 	}
743 	if (i == tp->core_sib) {
744 		tp->core_siblings[i] = buf;
745 		tp->core_sib++;
746 		buf = NULL;
747 		len = 0;
748 	}
749 
750 	sprintf(filename, THRD_SIB_FMT, cpu);
751 	fp = fopen(filename, "r");
752 	if (!fp)
753 		goto done;
754 
755 	if (getline(&buf, &len, fp) <= 0)
756 		goto done;
757 
758 	p = strchr(buf, '\n');
759 	if (p)
760 		*p = '\0';
761 
762 	for (i = 0; i < tp->thread_sib; i++) {
763 		if (!strcmp(buf, tp->thread_siblings[i]))
764 			break;
765 	}
766 	if (i == tp->thread_sib) {
767 		tp->thread_siblings[i] = buf;
768 		tp->thread_sib++;
769 		buf = NULL;
770 	}
771 	ret = 0;
772 done:
773 	if(fp)
774 		fclose(fp);
775 	free(buf);
776 	return ret;
777 }
778 
779 static void free_cpu_topo(struct cpu_topo *tp)
780 {
781 	u32 i;
782 
783 	if (!tp)
784 		return;
785 
786 	for (i = 0 ; i < tp->core_sib; i++)
787 		free(tp->core_siblings[i]);
788 
789 	for (i = 0 ; i < tp->thread_sib; i++)
790 		free(tp->thread_siblings[i]);
791 
792 	free(tp);
793 }
794 
795 static struct cpu_topo *build_cpu_topology(void)
796 {
797 	struct cpu_topo *tp;
798 	void *addr;
799 	u32 nr, i;
800 	size_t sz;
801 	long ncpus;
802 	int ret = -1;
803 
804 	ncpus = sysconf(_SC_NPROCESSORS_CONF);
805 	if (ncpus < 0)
806 		return NULL;
807 
808 	nr = (u32)(ncpus & UINT_MAX);
809 
810 	sz = nr * sizeof(char *);
811 
812 	addr = calloc(1, sizeof(*tp) + 2 * sz);
813 	if (!addr)
814 		return NULL;
815 
816 	tp = addr;
817 
818 	addr += sizeof(*tp);
819 	tp->core_siblings = addr;
820 	addr += sz;
821 	tp->thread_siblings = addr;
822 
823 	for (i = 0; i < nr; i++) {
824 		ret = build_cpu_topo(tp, i);
825 		if (ret < 0)
826 			break;
827 	}
828 	if (ret) {
829 		free_cpu_topo(tp);
830 		tp = NULL;
831 	}
832 	return tp;
833 }
834 
835 static int write_cpu_topology(int fd, struct perf_header *h __used,
836 			  struct perf_evlist *evlist __used)
837 {
838 	struct cpu_topo *tp;
839 	u32 i;
840 	int ret;
841 
842 	tp = build_cpu_topology();
843 	if (!tp)
844 		return -1;
845 
846 	ret = do_write(fd, &tp->core_sib, sizeof(tp->core_sib));
847 	if (ret < 0)
848 		goto done;
849 
850 	for (i = 0; i < tp->core_sib; i++) {
851 		ret = do_write_string(fd, tp->core_siblings[i]);
852 		if (ret < 0)
853 			goto done;
854 	}
855 	ret = do_write(fd, &tp->thread_sib, sizeof(tp->thread_sib));
856 	if (ret < 0)
857 		goto done;
858 
859 	for (i = 0; i < tp->thread_sib; i++) {
860 		ret = do_write_string(fd, tp->thread_siblings[i]);
861 		if (ret < 0)
862 			break;
863 	}
864 done:
865 	free_cpu_topo(tp);
866 	return ret;
867 }
868 
869 
870 
871 static int write_total_mem(int fd, struct perf_header *h __used,
872 			  struct perf_evlist *evlist __used)
873 {
874 	char *buf = NULL;
875 	FILE *fp;
876 	size_t len = 0;
877 	int ret = -1, n;
878 	uint64_t mem;
879 
880 	fp = fopen("/proc/meminfo", "r");
881 	if (!fp)
882 		return -1;
883 
884 	while (getline(&buf, &len, fp) > 0) {
885 		ret = strncmp(buf, "MemTotal:", 9);
886 		if (!ret)
887 			break;
888 	}
889 	if (!ret) {
890 		n = sscanf(buf, "%*s %"PRIu64, &mem);
891 		if (n == 1)
892 			ret = do_write(fd, &mem, sizeof(mem));
893 	}
894 	free(buf);
895 	fclose(fp);
896 	return ret;
897 }
898 
899 static int write_topo_node(int fd, int node)
900 {
901 	char str[MAXPATHLEN];
902 	char field[32];
903 	char *buf = NULL, *p;
904 	size_t len = 0;
905 	FILE *fp;
906 	u64 mem_total, mem_free, mem;
907 	int ret = -1;
908 
909 	sprintf(str, "/sys/devices/system/node/node%d/meminfo", node);
910 	fp = fopen(str, "r");
911 	if (!fp)
912 		return -1;
913 
914 	while (getline(&buf, &len, fp) > 0) {
915 		/* skip over invalid lines */
916 		if (!strchr(buf, ':'))
917 			continue;
918 		if (sscanf(buf, "%*s %*d %s %"PRIu64, field, &mem) != 2)
919 			goto done;
920 		if (!strcmp(field, "MemTotal:"))
921 			mem_total = mem;
922 		if (!strcmp(field, "MemFree:"))
923 			mem_free = mem;
924 	}
925 
926 	fclose(fp);
927 
928 	ret = do_write(fd, &mem_total, sizeof(u64));
929 	if (ret)
930 		goto done;
931 
932 	ret = do_write(fd, &mem_free, sizeof(u64));
933 	if (ret)
934 		goto done;
935 
936 	ret = -1;
937 	sprintf(str, "/sys/devices/system/node/node%d/cpulist", node);
938 
939 	fp = fopen(str, "r");
940 	if (!fp)
941 		goto done;
942 
943 	if (getline(&buf, &len, fp) <= 0)
944 		goto done;
945 
946 	p = strchr(buf, '\n');
947 	if (p)
948 		*p = '\0';
949 
950 	ret = do_write_string(fd, buf);
951 done:
952 	free(buf);
953 	fclose(fp);
954 	return ret;
955 }
956 
957 static int write_numa_topology(int fd, struct perf_header *h __used,
958 			  struct perf_evlist *evlist __used)
959 {
960 	char *buf = NULL;
961 	size_t len = 0;
962 	FILE *fp;
963 	struct cpu_map *node_map = NULL;
964 	char *c;
965 	u32 nr, i, j;
966 	int ret = -1;
967 
968 	fp = fopen("/sys/devices/system/node/online", "r");
969 	if (!fp)
970 		return -1;
971 
972 	if (getline(&buf, &len, fp) <= 0)
973 		goto done;
974 
975 	c = strchr(buf, '\n');
976 	if (c)
977 		*c = '\0';
978 
979 	node_map = cpu_map__new(buf);
980 	if (!node_map)
981 		goto done;
982 
983 	nr = (u32)node_map->nr;
984 
985 	ret = do_write(fd, &nr, sizeof(nr));
986 	if (ret < 0)
987 		goto done;
988 
989 	for (i = 0; i < nr; i++) {
990 		j = (u32)node_map->map[i];
991 		ret = do_write(fd, &j, sizeof(j));
992 		if (ret < 0)
993 			break;
994 
995 		ret = write_topo_node(fd, i);
996 		if (ret < 0)
997 			break;
998 	}
999 done:
1000 	free(buf);
1001 	fclose(fp);
1002 	free(node_map);
1003 	return ret;
1004 }
1005 
1006 /*
1007  * default get_cpuid(): nothing gets recorded
1008  * actual implementation must be in arch/$(ARCH)/util/header.c
1009  */
1010 int __attribute__((weak)) get_cpuid(char *buffer __used, size_t sz __used)
1011 {
1012 	return -1;
1013 }
1014 
1015 static int write_cpuid(int fd, struct perf_header *h __used,
1016 		       struct perf_evlist *evlist __used)
1017 {
1018 	char buffer[64];
1019 	int ret;
1020 
1021 	ret = get_cpuid(buffer, sizeof(buffer));
1022 	if (!ret)
1023 		goto write_it;
1024 
1025 	return -1;
1026 write_it:
1027 	return do_write_string(fd, buffer);
1028 }
1029 
1030 static int write_branch_stack(int fd __used, struct perf_header *h __used,
1031 		       struct perf_evlist *evlist __used)
1032 {
1033 	return 0;
1034 }
1035 
1036 static void print_hostname(struct perf_header *ph, int fd, FILE *fp)
1037 {
1038 	char *str = do_read_string(fd, ph);
1039 	fprintf(fp, "# hostname : %s\n", str);
1040 	free(str);
1041 }
1042 
1043 static void print_osrelease(struct perf_header *ph, int fd, FILE *fp)
1044 {
1045 	char *str = do_read_string(fd, ph);
1046 	fprintf(fp, "# os release : %s\n", str);
1047 	free(str);
1048 }
1049 
1050 static void print_arch(struct perf_header *ph, int fd, FILE *fp)
1051 {
1052 	char *str = do_read_string(fd, ph);
1053 	fprintf(fp, "# arch : %s\n", str);
1054 	free(str);
1055 }
1056 
1057 static void print_cpudesc(struct perf_header *ph, int fd, FILE *fp)
1058 {
1059 	char *str = do_read_string(fd, ph);
1060 	fprintf(fp, "# cpudesc : %s\n", str);
1061 	free(str);
1062 }
1063 
1064 static void print_nrcpus(struct perf_header *ph, int fd, FILE *fp)
1065 {
1066 	ssize_t ret;
1067 	u32 nr;
1068 
1069 	ret = read(fd, &nr, sizeof(nr));
1070 	if (ret != (ssize_t)sizeof(nr))
1071 		nr = -1; /* interpreted as error */
1072 
1073 	if (ph->needs_swap)
1074 		nr = bswap_32(nr);
1075 
1076 	fprintf(fp, "# nrcpus online : %u\n", nr);
1077 
1078 	ret = read(fd, &nr, sizeof(nr));
1079 	if (ret != (ssize_t)sizeof(nr))
1080 		nr = -1; /* interpreted as error */
1081 
1082 	if (ph->needs_swap)
1083 		nr = bswap_32(nr);
1084 
1085 	fprintf(fp, "# nrcpus avail : %u\n", nr);
1086 }
1087 
1088 static void print_version(struct perf_header *ph, int fd, FILE *fp)
1089 {
1090 	char *str = do_read_string(fd, ph);
1091 	fprintf(fp, "# perf version : %s\n", str);
1092 	free(str);
1093 }
1094 
1095 static void print_cmdline(struct perf_header *ph, int fd, FILE *fp)
1096 {
1097 	ssize_t ret;
1098 	char *str;
1099 	u32 nr, i;
1100 
1101 	ret = read(fd, &nr, sizeof(nr));
1102 	if (ret != (ssize_t)sizeof(nr))
1103 		return;
1104 
1105 	if (ph->needs_swap)
1106 		nr = bswap_32(nr);
1107 
1108 	fprintf(fp, "# cmdline : ");
1109 
1110 	for (i = 0; i < nr; i++) {
1111 		str = do_read_string(fd, ph);
1112 		fprintf(fp, "%s ", str);
1113 		free(str);
1114 	}
1115 	fputc('\n', fp);
1116 }
1117 
1118 static void print_cpu_topology(struct perf_header *ph, int fd, FILE *fp)
1119 {
1120 	ssize_t ret;
1121 	u32 nr, i;
1122 	char *str;
1123 
1124 	ret = read(fd, &nr, sizeof(nr));
1125 	if (ret != (ssize_t)sizeof(nr))
1126 		return;
1127 
1128 	if (ph->needs_swap)
1129 		nr = bswap_32(nr);
1130 
1131 	for (i = 0; i < nr; i++) {
1132 		str = do_read_string(fd, ph);
1133 		fprintf(fp, "# sibling cores   : %s\n", str);
1134 		free(str);
1135 	}
1136 
1137 	ret = read(fd, &nr, sizeof(nr));
1138 	if (ret != (ssize_t)sizeof(nr))
1139 		return;
1140 
1141 	if (ph->needs_swap)
1142 		nr = bswap_32(nr);
1143 
1144 	for (i = 0; i < nr; i++) {
1145 		str = do_read_string(fd, ph);
1146 		fprintf(fp, "# sibling threads : %s\n", str);
1147 		free(str);
1148 	}
1149 }
1150 
1151 static void print_event_desc(struct perf_header *ph, int fd, FILE *fp)
1152 {
1153 	struct perf_event_attr attr;
1154 	uint64_t id;
1155 	void *buf = NULL;
1156 	char *str;
1157 	u32 nre, sz, nr, i, j;
1158 	ssize_t ret;
1159 	size_t msz;
1160 
1161 	/* number of events */
1162 	ret = read(fd, &nre, sizeof(nre));
1163 	if (ret != (ssize_t)sizeof(nre))
1164 		goto error;
1165 
1166 	if (ph->needs_swap)
1167 		nre = bswap_32(nre);
1168 
1169 	ret = read(fd, &sz, sizeof(sz));
1170 	if (ret != (ssize_t)sizeof(sz))
1171 		goto error;
1172 
1173 	if (ph->needs_swap)
1174 		sz = bswap_32(sz);
1175 
1176 	memset(&attr, 0, sizeof(attr));
1177 
1178 	/* buffer to hold on file attr struct */
1179 	buf = malloc(sz);
1180 	if (!buf)
1181 		goto error;
1182 
1183 	msz = sizeof(attr);
1184 	if (sz < msz)
1185 		msz = sz;
1186 
1187 	for (i = 0 ; i < nre; i++) {
1188 
1189 		/*
1190 		 * must read entire on-file attr struct to
1191 		 * sync up with layout.
1192 		 */
1193 		ret = read(fd, buf, sz);
1194 		if (ret != (ssize_t)sz)
1195 			goto error;
1196 
1197 		if (ph->needs_swap)
1198 			perf_event__attr_swap(buf);
1199 
1200 		memcpy(&attr, buf, msz);
1201 
1202 		ret = read(fd, &nr, sizeof(nr));
1203 		if (ret != (ssize_t)sizeof(nr))
1204 			goto error;
1205 
1206 		if (ph->needs_swap)
1207 			nr = bswap_32(nr);
1208 
1209 		str = do_read_string(fd, ph);
1210 		fprintf(fp, "# event : name = %s, ", str);
1211 		free(str);
1212 
1213 		fprintf(fp, "type = %d, config = 0x%"PRIx64
1214 			    ", config1 = 0x%"PRIx64", config2 = 0x%"PRIx64,
1215 				attr.type,
1216 				(u64)attr.config,
1217 				(u64)attr.config1,
1218 				(u64)attr.config2);
1219 
1220 		fprintf(fp, ", excl_usr = %d, excl_kern = %d",
1221 				attr.exclude_user,
1222 				attr.exclude_kernel);
1223 
1224 		fprintf(fp, ", excl_host = %d, excl_guest = %d",
1225 				attr.exclude_host,
1226 				attr.exclude_guest);
1227 
1228 		fprintf(fp, ", precise_ip = %d", attr.precise_ip);
1229 
1230 		if (nr)
1231 			fprintf(fp, ", id = {");
1232 
1233 		for (j = 0 ; j < nr; j++) {
1234 			ret = read(fd, &id, sizeof(id));
1235 			if (ret != (ssize_t)sizeof(id))
1236 				goto error;
1237 
1238 			if (ph->needs_swap)
1239 				id = bswap_64(id);
1240 
1241 			if (j)
1242 				fputc(',', fp);
1243 
1244 			fprintf(fp, " %"PRIu64, id);
1245 		}
1246 		if (nr && j == nr)
1247 			fprintf(fp, " }");
1248 		fputc('\n', fp);
1249 	}
1250 	free(buf);
1251 	return;
1252 error:
1253 	fprintf(fp, "# event desc: not available or unable to read\n");
1254 }
1255 
1256 static void print_total_mem(struct perf_header *h __used, int fd, FILE *fp)
1257 {
1258 	uint64_t mem;
1259 	ssize_t ret;
1260 
1261 	ret = read(fd, &mem, sizeof(mem));
1262 	if (ret != sizeof(mem))
1263 		goto error;
1264 
1265 	if (h->needs_swap)
1266 		mem = bswap_64(mem);
1267 
1268 	fprintf(fp, "# total memory : %"PRIu64" kB\n", mem);
1269 	return;
1270 error:
1271 	fprintf(fp, "# total memory : unknown\n");
1272 }
1273 
1274 static void print_numa_topology(struct perf_header *h __used, int fd, FILE *fp)
1275 {
1276 	ssize_t ret;
1277 	u32 nr, c, i;
1278 	char *str;
1279 	uint64_t mem_total, mem_free;
1280 
1281 	/* nr nodes */
1282 	ret = read(fd, &nr, sizeof(nr));
1283 	if (ret != (ssize_t)sizeof(nr))
1284 		goto error;
1285 
1286 	if (h->needs_swap)
1287 		nr = bswap_32(nr);
1288 
1289 	for (i = 0; i < nr; i++) {
1290 
1291 		/* node number */
1292 		ret = read(fd, &c, sizeof(c));
1293 		if (ret != (ssize_t)sizeof(c))
1294 			goto error;
1295 
1296 		if (h->needs_swap)
1297 			c = bswap_32(c);
1298 
1299 		ret = read(fd, &mem_total, sizeof(u64));
1300 		if (ret != sizeof(u64))
1301 			goto error;
1302 
1303 		ret = read(fd, &mem_free, sizeof(u64));
1304 		if (ret != sizeof(u64))
1305 			goto error;
1306 
1307 		if (h->needs_swap) {
1308 			mem_total = bswap_64(mem_total);
1309 			mem_free = bswap_64(mem_free);
1310 		}
1311 
1312 		fprintf(fp, "# node%u meminfo  : total = %"PRIu64" kB,"
1313 			    " free = %"PRIu64" kB\n",
1314 			c,
1315 			mem_total,
1316 			mem_free);
1317 
1318 		str = do_read_string(fd, h);
1319 		fprintf(fp, "# node%u cpu list : %s\n", c, str);
1320 		free(str);
1321 	}
1322 	return;
1323 error:
1324 	fprintf(fp, "# numa topology : not available\n");
1325 }
1326 
1327 static void print_cpuid(struct perf_header *ph, int fd, FILE *fp)
1328 {
1329 	char *str = do_read_string(fd, ph);
1330 	fprintf(fp, "# cpuid : %s\n", str);
1331 	free(str);
1332 }
1333 
1334 static void print_branch_stack(struct perf_header *ph __used, int fd __used,
1335 			       FILE *fp)
1336 {
1337 	fprintf(fp, "# contains samples with branch stack\n");
1338 }
1339 
1340 static int __event_process_build_id(struct build_id_event *bev,
1341 				    char *filename,
1342 				    struct perf_session *session)
1343 {
1344 	int err = -1;
1345 	struct list_head *head;
1346 	struct machine *machine;
1347 	u16 misc;
1348 	struct dso *dso;
1349 	enum dso_kernel_type dso_type;
1350 
1351 	machine = perf_session__findnew_machine(session, bev->pid);
1352 	if (!machine)
1353 		goto out;
1354 
1355 	misc = bev->header.misc & PERF_RECORD_MISC_CPUMODE_MASK;
1356 
1357 	switch (misc) {
1358 	case PERF_RECORD_MISC_KERNEL:
1359 		dso_type = DSO_TYPE_KERNEL;
1360 		head = &machine->kernel_dsos;
1361 		break;
1362 	case PERF_RECORD_MISC_GUEST_KERNEL:
1363 		dso_type = DSO_TYPE_GUEST_KERNEL;
1364 		head = &machine->kernel_dsos;
1365 		break;
1366 	case PERF_RECORD_MISC_USER:
1367 	case PERF_RECORD_MISC_GUEST_USER:
1368 		dso_type = DSO_TYPE_USER;
1369 		head = &machine->user_dsos;
1370 		break;
1371 	default:
1372 		goto out;
1373 	}
1374 
1375 	dso = __dsos__findnew(head, filename);
1376 	if (dso != NULL) {
1377 		char sbuild_id[BUILD_ID_SIZE * 2 + 1];
1378 
1379 		dso__set_build_id(dso, &bev->build_id);
1380 
1381 		if (filename[0] == '[')
1382 			dso->kernel = dso_type;
1383 
1384 		build_id__sprintf(dso->build_id, sizeof(dso->build_id),
1385 				  sbuild_id);
1386 		pr_debug("build id event received for %s: %s\n",
1387 			 dso->long_name, sbuild_id);
1388 	}
1389 
1390 	err = 0;
1391 out:
1392 	return err;
1393 }
1394 
1395 static int perf_header__read_build_ids_abi_quirk(struct perf_header *header,
1396 						 int input, u64 offset, u64 size)
1397 {
1398 	struct perf_session *session = container_of(header, struct perf_session, header);
1399 	struct {
1400 		struct perf_event_header   header;
1401 		u8			   build_id[ALIGN(BUILD_ID_SIZE, sizeof(u64))];
1402 		char			   filename[0];
1403 	} old_bev;
1404 	struct build_id_event bev;
1405 	char filename[PATH_MAX];
1406 	u64 limit = offset + size;
1407 
1408 	while (offset < limit) {
1409 		ssize_t len;
1410 
1411 		if (read(input, &old_bev, sizeof(old_bev)) != sizeof(old_bev))
1412 			return -1;
1413 
1414 		if (header->needs_swap)
1415 			perf_event_header__bswap(&old_bev.header);
1416 
1417 		len = old_bev.header.size - sizeof(old_bev);
1418 		if (read(input, filename, len) != len)
1419 			return -1;
1420 
1421 		bev.header = old_bev.header;
1422 
1423 		/*
1424 		 * As the pid is the missing value, we need to fill
1425 		 * it properly. The header.misc value give us nice hint.
1426 		 */
1427 		bev.pid	= HOST_KERNEL_ID;
1428 		if (bev.header.misc == PERF_RECORD_MISC_GUEST_USER ||
1429 		    bev.header.misc == PERF_RECORD_MISC_GUEST_KERNEL)
1430 			bev.pid	= DEFAULT_GUEST_KERNEL_ID;
1431 
1432 		memcpy(bev.build_id, old_bev.build_id, sizeof(bev.build_id));
1433 		__event_process_build_id(&bev, filename, session);
1434 
1435 		offset += bev.header.size;
1436 	}
1437 
1438 	return 0;
1439 }
1440 
1441 static int perf_header__read_build_ids(struct perf_header *header,
1442 				       int input, u64 offset, u64 size)
1443 {
1444 	struct perf_session *session = container_of(header, struct perf_session, header);
1445 	struct build_id_event bev;
1446 	char filename[PATH_MAX];
1447 	u64 limit = offset + size, orig_offset = offset;
1448 	int err = -1;
1449 
1450 	while (offset < limit) {
1451 		ssize_t len;
1452 
1453 		if (read(input, &bev, sizeof(bev)) != sizeof(bev))
1454 			goto out;
1455 
1456 		if (header->needs_swap)
1457 			perf_event_header__bswap(&bev.header);
1458 
1459 		len = bev.header.size - sizeof(bev);
1460 		if (read(input, filename, len) != len)
1461 			goto out;
1462 		/*
1463 		 * The a1645ce1 changeset:
1464 		 *
1465 		 * "perf: 'perf kvm' tool for monitoring guest performance from host"
1466 		 *
1467 		 * Added a field to struct build_id_event that broke the file
1468 		 * format.
1469 		 *
1470 		 * Since the kernel build-id is the first entry, process the
1471 		 * table using the old format if the well known
1472 		 * '[kernel.kallsyms]' string for the kernel build-id has the
1473 		 * first 4 characters chopped off (where the pid_t sits).
1474 		 */
1475 		if (memcmp(filename, "nel.kallsyms]", 13) == 0) {
1476 			if (lseek(input, orig_offset, SEEK_SET) == (off_t)-1)
1477 				return -1;
1478 			return perf_header__read_build_ids_abi_quirk(header, input, offset, size);
1479 		}
1480 
1481 		__event_process_build_id(&bev, filename, session);
1482 
1483 		offset += bev.header.size;
1484 	}
1485 	err = 0;
1486 out:
1487 	return err;
1488 }
1489 
1490 static int process_tracing_data(struct perf_file_section *section __unused,
1491 			      struct perf_header *ph __unused,
1492 			      int feat __unused, int fd, void *data)
1493 {
1494 	trace_report(fd, data, false);
1495 	return 0;
1496 }
1497 
1498 static int process_build_id(struct perf_file_section *section,
1499 			    struct perf_header *ph,
1500 			    int feat __unused, int fd, void *data __used)
1501 {
1502 	if (perf_header__read_build_ids(ph, fd, section->offset, section->size))
1503 		pr_debug("Failed to read buildids, continuing...\n");
1504 	return 0;
1505 }
1506 
1507 struct feature_ops {
1508 	int (*write)(int fd, struct perf_header *h, struct perf_evlist *evlist);
1509 	void (*print)(struct perf_header *h, int fd, FILE *fp);
1510 	int (*process)(struct perf_file_section *section,
1511 		       struct perf_header *h, int feat, int fd, void *data);
1512 	const char *name;
1513 	bool full_only;
1514 };
1515 
1516 #define FEAT_OPA(n, func) \
1517 	[n] = { .name = #n, .write = write_##func, .print = print_##func }
1518 #define FEAT_OPP(n, func) \
1519 	[n] = { .name = #n, .write = write_##func, .print = print_##func, \
1520 		.process = process_##func }
1521 #define FEAT_OPF(n, func) \
1522 	[n] = { .name = #n, .write = write_##func, .print = print_##func, \
1523 		.full_only = true }
1524 
1525 /* feature_ops not implemented: */
1526 #define print_tracing_data	NULL
1527 #define print_build_id		NULL
1528 
1529 static const struct feature_ops feat_ops[HEADER_LAST_FEATURE] = {
1530 	FEAT_OPP(HEADER_TRACING_DATA,	tracing_data),
1531 	FEAT_OPP(HEADER_BUILD_ID,	build_id),
1532 	FEAT_OPA(HEADER_HOSTNAME,	hostname),
1533 	FEAT_OPA(HEADER_OSRELEASE,	osrelease),
1534 	FEAT_OPA(HEADER_VERSION,	version),
1535 	FEAT_OPA(HEADER_ARCH,		arch),
1536 	FEAT_OPA(HEADER_NRCPUS,		nrcpus),
1537 	FEAT_OPA(HEADER_CPUDESC,	cpudesc),
1538 	FEAT_OPA(HEADER_CPUID,		cpuid),
1539 	FEAT_OPA(HEADER_TOTAL_MEM,	total_mem),
1540 	FEAT_OPA(HEADER_EVENT_DESC,	event_desc),
1541 	FEAT_OPA(HEADER_CMDLINE,	cmdline),
1542 	FEAT_OPF(HEADER_CPU_TOPOLOGY,	cpu_topology),
1543 	FEAT_OPF(HEADER_NUMA_TOPOLOGY,	numa_topology),
1544 	FEAT_OPA(HEADER_BRANCH_STACK,	branch_stack),
1545 };
1546 
1547 struct header_print_data {
1548 	FILE *fp;
1549 	bool full; /* extended list of headers */
1550 };
1551 
1552 static int perf_file_section__fprintf_info(struct perf_file_section *section,
1553 					   struct perf_header *ph,
1554 					   int feat, int fd, void *data)
1555 {
1556 	struct header_print_data *hd = data;
1557 
1558 	if (lseek(fd, section->offset, SEEK_SET) == (off_t)-1) {
1559 		pr_debug("Failed to lseek to %" PRIu64 " offset for feature "
1560 				"%d, continuing...\n", section->offset, feat);
1561 		return 0;
1562 	}
1563 	if (feat >= HEADER_LAST_FEATURE) {
1564 		pr_warning("unknown feature %d\n", feat);
1565 		return 0;
1566 	}
1567 	if (!feat_ops[feat].print)
1568 		return 0;
1569 
1570 	if (!feat_ops[feat].full_only || hd->full)
1571 		feat_ops[feat].print(ph, fd, hd->fp);
1572 	else
1573 		fprintf(hd->fp, "# %s info available, use -I to display\n",
1574 			feat_ops[feat].name);
1575 
1576 	return 0;
1577 }
1578 
1579 int perf_header__fprintf_info(struct perf_session *session, FILE *fp, bool full)
1580 {
1581 	struct header_print_data hd;
1582 	struct perf_header *header = &session->header;
1583 	int fd = session->fd;
1584 	hd.fp = fp;
1585 	hd.full = full;
1586 
1587 	perf_header__process_sections(header, fd, &hd,
1588 				      perf_file_section__fprintf_info);
1589 	return 0;
1590 }
1591 
1592 static int do_write_feat(int fd, struct perf_header *h, int type,
1593 			 struct perf_file_section **p,
1594 			 struct perf_evlist *evlist)
1595 {
1596 	int err;
1597 	int ret = 0;
1598 
1599 	if (perf_header__has_feat(h, type)) {
1600 		if (!feat_ops[type].write)
1601 			return -1;
1602 
1603 		(*p)->offset = lseek(fd, 0, SEEK_CUR);
1604 
1605 		err = feat_ops[type].write(fd, h, evlist);
1606 		if (err < 0) {
1607 			pr_debug("failed to write feature %d\n", type);
1608 
1609 			/* undo anything written */
1610 			lseek(fd, (*p)->offset, SEEK_SET);
1611 
1612 			return -1;
1613 		}
1614 		(*p)->size = lseek(fd, 0, SEEK_CUR) - (*p)->offset;
1615 		(*p)++;
1616 	}
1617 	return ret;
1618 }
1619 
1620 static int perf_header__adds_write(struct perf_header *header,
1621 				   struct perf_evlist *evlist, int fd)
1622 {
1623 	int nr_sections;
1624 	struct perf_file_section *feat_sec, *p;
1625 	int sec_size;
1626 	u64 sec_start;
1627 	int feat;
1628 	int err;
1629 
1630 	nr_sections = bitmap_weight(header->adds_features, HEADER_FEAT_BITS);
1631 	if (!nr_sections)
1632 		return 0;
1633 
1634 	feat_sec = p = calloc(sizeof(*feat_sec), nr_sections);
1635 	if (feat_sec == NULL)
1636 		return -ENOMEM;
1637 
1638 	sec_size = sizeof(*feat_sec) * nr_sections;
1639 
1640 	sec_start = header->data_offset + header->data_size;
1641 	lseek(fd, sec_start + sec_size, SEEK_SET);
1642 
1643 	for_each_set_bit(feat, header->adds_features, HEADER_FEAT_BITS) {
1644 		if (do_write_feat(fd, header, feat, &p, evlist))
1645 			perf_header__clear_feat(header, feat);
1646 	}
1647 
1648 	lseek(fd, sec_start, SEEK_SET);
1649 	/*
1650 	 * may write more than needed due to dropped feature, but
1651 	 * this is okay, reader will skip the mising entries
1652 	 */
1653 	err = do_write(fd, feat_sec, sec_size);
1654 	if (err < 0)
1655 		pr_debug("failed to write feature section\n");
1656 	free(feat_sec);
1657 	return err;
1658 }
1659 
1660 int perf_header__write_pipe(int fd)
1661 {
1662 	struct perf_pipe_file_header f_header;
1663 	int err;
1664 
1665 	f_header = (struct perf_pipe_file_header){
1666 		.magic	   = PERF_MAGIC,
1667 		.size	   = sizeof(f_header),
1668 	};
1669 
1670 	err = do_write(fd, &f_header, sizeof(f_header));
1671 	if (err < 0) {
1672 		pr_debug("failed to write perf pipe header\n");
1673 		return err;
1674 	}
1675 
1676 	return 0;
1677 }
1678 
1679 int perf_session__write_header(struct perf_session *session,
1680 			       struct perf_evlist *evlist,
1681 			       int fd, bool at_exit)
1682 {
1683 	struct perf_file_header f_header;
1684 	struct perf_file_attr   f_attr;
1685 	struct perf_header *header = &session->header;
1686 	struct perf_evsel *attr, *pair = NULL;
1687 	int err;
1688 
1689 	lseek(fd, sizeof(f_header), SEEK_SET);
1690 
1691 	if (session->evlist != evlist)
1692 		pair = list_entry(session->evlist->entries.next, struct perf_evsel, node);
1693 
1694 	list_for_each_entry(attr, &evlist->entries, node) {
1695 		attr->id_offset = lseek(fd, 0, SEEK_CUR);
1696 		err = do_write(fd, attr->id, attr->ids * sizeof(u64));
1697 		if (err < 0) {
1698 out_err_write:
1699 			pr_debug("failed to write perf header\n");
1700 			return err;
1701 		}
1702 		if (session->evlist != evlist) {
1703 			err = do_write(fd, pair->id, pair->ids * sizeof(u64));
1704 			if (err < 0)
1705 				goto out_err_write;
1706 			attr->ids += pair->ids;
1707 			pair = list_entry(pair->node.next, struct perf_evsel, node);
1708 		}
1709 	}
1710 
1711 	header->attr_offset = lseek(fd, 0, SEEK_CUR);
1712 
1713 	list_for_each_entry(attr, &evlist->entries, node) {
1714 		f_attr = (struct perf_file_attr){
1715 			.attr = attr->attr,
1716 			.ids  = {
1717 				.offset = attr->id_offset,
1718 				.size   = attr->ids * sizeof(u64),
1719 			}
1720 		};
1721 		err = do_write(fd, &f_attr, sizeof(f_attr));
1722 		if (err < 0) {
1723 			pr_debug("failed to write perf header attribute\n");
1724 			return err;
1725 		}
1726 	}
1727 
1728 	header->event_offset = lseek(fd, 0, SEEK_CUR);
1729 	header->event_size = event_count * sizeof(struct perf_trace_event_type);
1730 	if (events) {
1731 		err = do_write(fd, events, header->event_size);
1732 		if (err < 0) {
1733 			pr_debug("failed to write perf header events\n");
1734 			return err;
1735 		}
1736 	}
1737 
1738 	header->data_offset = lseek(fd, 0, SEEK_CUR);
1739 
1740 	if (at_exit) {
1741 		err = perf_header__adds_write(header, evlist, fd);
1742 		if (err < 0)
1743 			return err;
1744 	}
1745 
1746 	f_header = (struct perf_file_header){
1747 		.magic	   = PERF_MAGIC,
1748 		.size	   = sizeof(f_header),
1749 		.attr_size = sizeof(f_attr),
1750 		.attrs = {
1751 			.offset = header->attr_offset,
1752 			.size   = evlist->nr_entries * sizeof(f_attr),
1753 		},
1754 		.data = {
1755 			.offset = header->data_offset,
1756 			.size	= header->data_size,
1757 		},
1758 		.event_types = {
1759 			.offset = header->event_offset,
1760 			.size	= header->event_size,
1761 		},
1762 	};
1763 
1764 	memcpy(&f_header.adds_features, &header->adds_features, sizeof(header->adds_features));
1765 
1766 	lseek(fd, 0, SEEK_SET);
1767 	err = do_write(fd, &f_header, sizeof(f_header));
1768 	if (err < 0) {
1769 		pr_debug("failed to write perf header\n");
1770 		return err;
1771 	}
1772 	lseek(fd, header->data_offset + header->data_size, SEEK_SET);
1773 
1774 	header->frozen = 1;
1775 	return 0;
1776 }
1777 
1778 static int perf_header__getbuffer64(struct perf_header *header,
1779 				    int fd, void *buf, size_t size)
1780 {
1781 	if (readn(fd, buf, size) <= 0)
1782 		return -1;
1783 
1784 	if (header->needs_swap)
1785 		mem_bswap_64(buf, size);
1786 
1787 	return 0;
1788 }
1789 
1790 int perf_header__process_sections(struct perf_header *header, int fd,
1791 				  void *data,
1792 				  int (*process)(struct perf_file_section *section,
1793 						 struct perf_header *ph,
1794 						 int feat, int fd, void *data))
1795 {
1796 	struct perf_file_section *feat_sec, *sec;
1797 	int nr_sections;
1798 	int sec_size;
1799 	int feat;
1800 	int err;
1801 
1802 	nr_sections = bitmap_weight(header->adds_features, HEADER_FEAT_BITS);
1803 	if (!nr_sections)
1804 		return 0;
1805 
1806 	feat_sec = sec = calloc(sizeof(*feat_sec), nr_sections);
1807 	if (!feat_sec)
1808 		return -1;
1809 
1810 	sec_size = sizeof(*feat_sec) * nr_sections;
1811 
1812 	lseek(fd, header->data_offset + header->data_size, SEEK_SET);
1813 
1814 	err = perf_header__getbuffer64(header, fd, feat_sec, sec_size);
1815 	if (err < 0)
1816 		goto out_free;
1817 
1818 	for_each_set_bit(feat, header->adds_features, HEADER_LAST_FEATURE) {
1819 		err = process(sec++, header, feat, fd, data);
1820 		if (err < 0)
1821 			goto out_free;
1822 	}
1823 	err = 0;
1824 out_free:
1825 	free(feat_sec);
1826 	return err;
1827 }
1828 
1829 static const int attr_file_abi_sizes[] = {
1830 	[0] = PERF_ATTR_SIZE_VER0,
1831 	[1] = PERF_ATTR_SIZE_VER1,
1832 	0,
1833 };
1834 
1835 /*
1836  * In the legacy file format, the magic number is not used to encode endianness.
1837  * hdr_sz was used to encode endianness. But given that hdr_sz can vary based
1838  * on ABI revisions, we need to try all combinations for all endianness to
1839  * detect the endianness.
1840  */
1841 static int try_all_file_abis(uint64_t hdr_sz, struct perf_header *ph)
1842 {
1843 	uint64_t ref_size, attr_size;
1844 	int i;
1845 
1846 	for (i = 0 ; attr_file_abi_sizes[i]; i++) {
1847 		ref_size = attr_file_abi_sizes[i]
1848 			 + sizeof(struct perf_file_section);
1849 		if (hdr_sz != ref_size) {
1850 			attr_size = bswap_64(hdr_sz);
1851 			if (attr_size != ref_size)
1852 				continue;
1853 
1854 			ph->needs_swap = true;
1855 		}
1856 		pr_debug("ABI%d perf.data file detected, need_swap=%d\n",
1857 			 i,
1858 			 ph->needs_swap);
1859 		return 0;
1860 	}
1861 	/* could not determine endianness */
1862 	return -1;
1863 }
1864 
1865 #define PERF_PIPE_HDR_VER0	16
1866 
1867 static const size_t attr_pipe_abi_sizes[] = {
1868 	[0] = PERF_PIPE_HDR_VER0,
1869 	0,
1870 };
1871 
1872 /*
1873  * In the legacy pipe format, there is an implicit assumption that endiannesss
1874  * between host recording the samples, and host parsing the samples is the
1875  * same. This is not always the case given that the pipe output may always be
1876  * redirected into a file and analyzed on a different machine with possibly a
1877  * different endianness and perf_event ABI revsions in the perf tool itself.
1878  */
1879 static int try_all_pipe_abis(uint64_t hdr_sz, struct perf_header *ph)
1880 {
1881 	u64 attr_size;
1882 	int i;
1883 
1884 	for (i = 0 ; attr_pipe_abi_sizes[i]; i++) {
1885 		if (hdr_sz != attr_pipe_abi_sizes[i]) {
1886 			attr_size = bswap_64(hdr_sz);
1887 			if (attr_size != hdr_sz)
1888 				continue;
1889 
1890 			ph->needs_swap = true;
1891 		}
1892 		pr_debug("Pipe ABI%d perf.data file detected\n", i);
1893 		return 0;
1894 	}
1895 	return -1;
1896 }
1897 
1898 static int check_magic_endian(u64 magic, uint64_t hdr_sz,
1899 			      bool is_pipe, struct perf_header *ph)
1900 {
1901 	int ret;
1902 
1903 	/* check for legacy format */
1904 	ret = memcmp(&magic, __perf_magic1, sizeof(magic));
1905 	if (ret == 0) {
1906 		pr_debug("legacy perf.data format\n");
1907 		if (is_pipe)
1908 			return try_all_pipe_abis(hdr_sz, ph);
1909 
1910 		return try_all_file_abis(hdr_sz, ph);
1911 	}
1912 	/*
1913 	 * the new magic number serves two purposes:
1914 	 * - unique number to identify actual perf.data files
1915 	 * - encode endianness of file
1916 	 */
1917 
1918 	/* check magic number with one endianness */
1919 	if (magic == __perf_magic2)
1920 		return 0;
1921 
1922 	/* check magic number with opposite endianness */
1923 	if (magic != __perf_magic2_sw)
1924 		return -1;
1925 
1926 	ph->needs_swap = true;
1927 
1928 	return 0;
1929 }
1930 
1931 int perf_file_header__read(struct perf_file_header *header,
1932 			   struct perf_header *ph, int fd)
1933 {
1934 	int ret;
1935 
1936 	lseek(fd, 0, SEEK_SET);
1937 
1938 	ret = readn(fd, header, sizeof(*header));
1939 	if (ret <= 0)
1940 		return -1;
1941 
1942 	if (check_magic_endian(header->magic,
1943 			       header->attr_size, false, ph) < 0) {
1944 		pr_debug("magic/endian check failed\n");
1945 		return -1;
1946 	}
1947 
1948 	if (ph->needs_swap) {
1949 		mem_bswap_64(header, offsetof(struct perf_file_header,
1950 			     adds_features));
1951 	}
1952 
1953 	if (header->size != sizeof(*header)) {
1954 		/* Support the previous format */
1955 		if (header->size == offsetof(typeof(*header), adds_features))
1956 			bitmap_zero(header->adds_features, HEADER_FEAT_BITS);
1957 		else
1958 			return -1;
1959 	} else if (ph->needs_swap) {
1960 		/*
1961 		 * feature bitmap is declared as an array of unsigned longs --
1962 		 * not good since its size can differ between the host that
1963 		 * generated the data file and the host analyzing the file.
1964 		 *
1965 		 * We need to handle endianness, but we don't know the size of
1966 		 * the unsigned long where the file was generated. Take a best
1967 		 * guess at determining it: try 64-bit swap first (ie., file
1968 		 * created on a 64-bit host), and check if the hostname feature
1969 		 * bit is set (this feature bit is forced on as of fbe96f2).
1970 		 * If the bit is not, undo the 64-bit swap and try a 32-bit
1971 		 * swap. If the hostname bit is still not set (e.g., older data
1972 		 * file), punt and fallback to the original behavior --
1973 		 * clearing all feature bits and setting buildid.
1974 		 */
1975 		mem_bswap_64(&header->adds_features,
1976 			    BITS_TO_U64(HEADER_FEAT_BITS));
1977 
1978 		if (!test_bit(HEADER_HOSTNAME, header->adds_features)) {
1979 			/* unswap as u64 */
1980 			mem_bswap_64(&header->adds_features,
1981 				    BITS_TO_U64(HEADER_FEAT_BITS));
1982 
1983 			/* unswap as u32 */
1984 			mem_bswap_32(&header->adds_features,
1985 				    BITS_TO_U32(HEADER_FEAT_BITS));
1986 		}
1987 
1988 		if (!test_bit(HEADER_HOSTNAME, header->adds_features)) {
1989 			bitmap_zero(header->adds_features, HEADER_FEAT_BITS);
1990 			set_bit(HEADER_BUILD_ID, header->adds_features);
1991 		}
1992 	}
1993 
1994 	memcpy(&ph->adds_features, &header->adds_features,
1995 	       sizeof(ph->adds_features));
1996 
1997 	ph->event_offset = header->event_types.offset;
1998 	ph->event_size   = header->event_types.size;
1999 	ph->data_offset  = header->data.offset;
2000 	ph->data_size	 = header->data.size;
2001 	return 0;
2002 }
2003 
2004 static int perf_file_section__process(struct perf_file_section *section,
2005 				      struct perf_header *ph,
2006 				      int feat, int fd, void *data)
2007 {
2008 	if (lseek(fd, section->offset, SEEK_SET) == (off_t)-1) {
2009 		pr_debug("Failed to lseek to %" PRIu64 " offset for feature "
2010 			  "%d, continuing...\n", section->offset, feat);
2011 		return 0;
2012 	}
2013 
2014 	if (feat >= HEADER_LAST_FEATURE) {
2015 		pr_debug("unknown feature %d, continuing...\n", feat);
2016 		return 0;
2017 	}
2018 
2019 	if (!feat_ops[feat].process)
2020 		return 0;
2021 
2022 	return feat_ops[feat].process(section, ph, feat, fd, data);
2023 }
2024 
2025 static int perf_file_header__read_pipe(struct perf_pipe_file_header *header,
2026 				       struct perf_header *ph, int fd,
2027 				       bool repipe)
2028 {
2029 	int ret;
2030 
2031 	ret = readn(fd, header, sizeof(*header));
2032 	if (ret <= 0)
2033 		return -1;
2034 
2035 	if (check_magic_endian(header->magic, header->size, true, ph) < 0) {
2036 		pr_debug("endian/magic failed\n");
2037 		return -1;
2038 	}
2039 
2040 	if (ph->needs_swap)
2041 		header->size = bswap_64(header->size);
2042 
2043 	if (repipe && do_write(STDOUT_FILENO, header, sizeof(*header)) < 0)
2044 		return -1;
2045 
2046 	return 0;
2047 }
2048 
2049 static int perf_header__read_pipe(struct perf_session *session, int fd)
2050 {
2051 	struct perf_header *header = &session->header;
2052 	struct perf_pipe_file_header f_header;
2053 
2054 	if (perf_file_header__read_pipe(&f_header, header, fd,
2055 					session->repipe) < 0) {
2056 		pr_debug("incompatible file format\n");
2057 		return -EINVAL;
2058 	}
2059 
2060 	session->fd = fd;
2061 
2062 	return 0;
2063 }
2064 
2065 static int read_attr(int fd, struct perf_header *ph,
2066 		     struct perf_file_attr *f_attr)
2067 {
2068 	struct perf_event_attr *attr = &f_attr->attr;
2069 	size_t sz, left;
2070 	size_t our_sz = sizeof(f_attr->attr);
2071 	int ret;
2072 
2073 	memset(f_attr, 0, sizeof(*f_attr));
2074 
2075 	/* read minimal guaranteed structure */
2076 	ret = readn(fd, attr, PERF_ATTR_SIZE_VER0);
2077 	if (ret <= 0) {
2078 		pr_debug("cannot read %d bytes of header attr\n",
2079 			 PERF_ATTR_SIZE_VER0);
2080 		return -1;
2081 	}
2082 
2083 	/* on file perf_event_attr size */
2084 	sz = attr->size;
2085 
2086 	if (ph->needs_swap)
2087 		sz = bswap_32(sz);
2088 
2089 	if (sz == 0) {
2090 		/* assume ABI0 */
2091 		sz =  PERF_ATTR_SIZE_VER0;
2092 	} else if (sz > our_sz) {
2093 		pr_debug("file uses a more recent and unsupported ABI"
2094 			 " (%zu bytes extra)\n", sz - our_sz);
2095 		return -1;
2096 	}
2097 	/* what we have not yet read and that we know about */
2098 	left = sz - PERF_ATTR_SIZE_VER0;
2099 	if (left) {
2100 		void *ptr = attr;
2101 		ptr += PERF_ATTR_SIZE_VER0;
2102 
2103 		ret = readn(fd, ptr, left);
2104 	}
2105 	/* read perf_file_section, ids are read in caller */
2106 	ret = readn(fd, &f_attr->ids, sizeof(f_attr->ids));
2107 
2108 	return ret <= 0 ? -1 : 0;
2109 }
2110 
2111 static int perf_evsel__set_tracepoint_name(struct perf_evsel *evsel,
2112 					   struct pevent *pevent)
2113 {
2114 	struct event_format *event = pevent_find_event(pevent,
2115 						       evsel->attr.config);
2116 	char bf[128];
2117 
2118 	if (event == NULL)
2119 		return -1;
2120 
2121 	snprintf(bf, sizeof(bf), "%s:%s", event->system, event->name);
2122 	evsel->name = strdup(bf);
2123 	if (event->name == NULL)
2124 		return -1;
2125 
2126 	return 0;
2127 }
2128 
2129 static int perf_evlist__set_tracepoint_names(struct perf_evlist *evlist,
2130 					     struct pevent *pevent)
2131 {
2132 	struct perf_evsel *pos;
2133 
2134 	list_for_each_entry(pos, &evlist->entries, node) {
2135 		if (pos->attr.type == PERF_TYPE_TRACEPOINT &&
2136 		    perf_evsel__set_tracepoint_name(pos, pevent))
2137 			return -1;
2138 	}
2139 
2140 	return 0;
2141 }
2142 
2143 int perf_session__read_header(struct perf_session *session, int fd)
2144 {
2145 	struct perf_header *header = &session->header;
2146 	struct perf_file_header	f_header;
2147 	struct perf_file_attr	f_attr;
2148 	u64			f_id;
2149 	int nr_attrs, nr_ids, i, j;
2150 
2151 	session->evlist = perf_evlist__new(NULL, NULL);
2152 	if (session->evlist == NULL)
2153 		return -ENOMEM;
2154 
2155 	if (session->fd_pipe)
2156 		return perf_header__read_pipe(session, fd);
2157 
2158 	if (perf_file_header__read(&f_header, header, fd) < 0)
2159 		return -EINVAL;
2160 
2161 	nr_attrs = f_header.attrs.size / f_header.attr_size;
2162 	lseek(fd, f_header.attrs.offset, SEEK_SET);
2163 
2164 	for (i = 0; i < nr_attrs; i++) {
2165 		struct perf_evsel *evsel;
2166 		off_t tmp;
2167 
2168 		if (read_attr(fd, header, &f_attr) < 0)
2169 			goto out_errno;
2170 
2171 		if (header->needs_swap)
2172 			perf_event__attr_swap(&f_attr.attr);
2173 
2174 		tmp = lseek(fd, 0, SEEK_CUR);
2175 		evsel = perf_evsel__new(&f_attr.attr, i);
2176 
2177 		if (evsel == NULL)
2178 			goto out_delete_evlist;
2179 		/*
2180 		 * Do it before so that if perf_evsel__alloc_id fails, this
2181 		 * entry gets purged too at perf_evlist__delete().
2182 		 */
2183 		perf_evlist__add(session->evlist, evsel);
2184 
2185 		nr_ids = f_attr.ids.size / sizeof(u64);
2186 		/*
2187 		 * We don't have the cpu and thread maps on the header, so
2188 		 * for allocating the perf_sample_id table we fake 1 cpu and
2189 		 * hattr->ids threads.
2190 		 */
2191 		if (perf_evsel__alloc_id(evsel, 1, nr_ids))
2192 			goto out_delete_evlist;
2193 
2194 		lseek(fd, f_attr.ids.offset, SEEK_SET);
2195 
2196 		for (j = 0; j < nr_ids; j++) {
2197 			if (perf_header__getbuffer64(header, fd, &f_id, sizeof(f_id)))
2198 				goto out_errno;
2199 
2200 			perf_evlist__id_add(session->evlist, evsel, 0, j, f_id);
2201 		}
2202 
2203 		lseek(fd, tmp, SEEK_SET);
2204 	}
2205 
2206 	symbol_conf.nr_events = nr_attrs;
2207 
2208 	if (f_header.event_types.size) {
2209 		lseek(fd, f_header.event_types.offset, SEEK_SET);
2210 		events = malloc(f_header.event_types.size);
2211 		if (events == NULL)
2212 			return -ENOMEM;
2213 		if (perf_header__getbuffer64(header, fd, events,
2214 					     f_header.event_types.size))
2215 			goto out_errno;
2216 		event_count =  f_header.event_types.size / sizeof(struct perf_trace_event_type);
2217 	}
2218 
2219 	perf_header__process_sections(header, fd, &session->pevent,
2220 				      perf_file_section__process);
2221 
2222 	lseek(fd, header->data_offset, SEEK_SET);
2223 
2224 	if (perf_evlist__set_tracepoint_names(session->evlist, session->pevent))
2225 		goto out_delete_evlist;
2226 
2227 	header->frozen = 1;
2228 	return 0;
2229 out_errno:
2230 	return -errno;
2231 
2232 out_delete_evlist:
2233 	perf_evlist__delete(session->evlist);
2234 	session->evlist = NULL;
2235 	return -ENOMEM;
2236 }
2237 
2238 int perf_event__synthesize_attr(struct perf_tool *tool,
2239 				struct perf_event_attr *attr, u16 ids, u64 *id,
2240 				perf_event__handler_t process)
2241 {
2242 	union perf_event *ev;
2243 	size_t size;
2244 	int err;
2245 
2246 	size = sizeof(struct perf_event_attr);
2247 	size = ALIGN(size, sizeof(u64));
2248 	size += sizeof(struct perf_event_header);
2249 	size += ids * sizeof(u64);
2250 
2251 	ev = malloc(size);
2252 
2253 	if (ev == NULL)
2254 		return -ENOMEM;
2255 
2256 	ev->attr.attr = *attr;
2257 	memcpy(ev->attr.id, id, ids * sizeof(u64));
2258 
2259 	ev->attr.header.type = PERF_RECORD_HEADER_ATTR;
2260 	ev->attr.header.size = size;
2261 
2262 	err = process(tool, ev, NULL, NULL);
2263 
2264 	free(ev);
2265 
2266 	return err;
2267 }
2268 
2269 int perf_event__synthesize_attrs(struct perf_tool *tool,
2270 				   struct perf_session *session,
2271 				   perf_event__handler_t process)
2272 {
2273 	struct perf_evsel *attr;
2274 	int err = 0;
2275 
2276 	list_for_each_entry(attr, &session->evlist->entries, node) {
2277 		err = perf_event__synthesize_attr(tool, &attr->attr, attr->ids,
2278 						  attr->id, process);
2279 		if (err) {
2280 			pr_debug("failed to create perf header attribute\n");
2281 			return err;
2282 		}
2283 	}
2284 
2285 	return err;
2286 }
2287 
2288 int perf_event__process_attr(union perf_event *event,
2289 			     struct perf_evlist **pevlist)
2290 {
2291 	unsigned int i, ids, n_ids;
2292 	struct perf_evsel *evsel;
2293 	struct perf_evlist *evlist = *pevlist;
2294 
2295 	if (evlist == NULL) {
2296 		*pevlist = evlist = perf_evlist__new(NULL, NULL);
2297 		if (evlist == NULL)
2298 			return -ENOMEM;
2299 	}
2300 
2301 	evsel = perf_evsel__new(&event->attr.attr, evlist->nr_entries);
2302 	if (evsel == NULL)
2303 		return -ENOMEM;
2304 
2305 	perf_evlist__add(evlist, evsel);
2306 
2307 	ids = event->header.size;
2308 	ids -= (void *)&event->attr.id - (void *)event;
2309 	n_ids = ids / sizeof(u64);
2310 	/*
2311 	 * We don't have the cpu and thread maps on the header, so
2312 	 * for allocating the perf_sample_id table we fake 1 cpu and
2313 	 * hattr->ids threads.
2314 	 */
2315 	if (perf_evsel__alloc_id(evsel, 1, n_ids))
2316 		return -ENOMEM;
2317 
2318 	for (i = 0; i < n_ids; i++) {
2319 		perf_evlist__id_add(evlist, evsel, 0, i, event->attr.id[i]);
2320 	}
2321 
2322 	return 0;
2323 }
2324 
2325 int perf_event__synthesize_event_type(struct perf_tool *tool,
2326 				      u64 event_id, char *name,
2327 				      perf_event__handler_t process,
2328 				      struct machine *machine)
2329 {
2330 	union perf_event ev;
2331 	size_t size = 0;
2332 	int err = 0;
2333 
2334 	memset(&ev, 0, sizeof(ev));
2335 
2336 	ev.event_type.event_type.event_id = event_id;
2337 	memset(ev.event_type.event_type.name, 0, MAX_EVENT_NAME);
2338 	strncpy(ev.event_type.event_type.name, name, MAX_EVENT_NAME - 1);
2339 
2340 	ev.event_type.header.type = PERF_RECORD_HEADER_EVENT_TYPE;
2341 	size = strlen(ev.event_type.event_type.name);
2342 	size = ALIGN(size, sizeof(u64));
2343 	ev.event_type.header.size = sizeof(ev.event_type) -
2344 		(sizeof(ev.event_type.event_type.name) - size);
2345 
2346 	err = process(tool, &ev, NULL, machine);
2347 
2348 	return err;
2349 }
2350 
2351 int perf_event__synthesize_event_types(struct perf_tool *tool,
2352 				       perf_event__handler_t process,
2353 				       struct machine *machine)
2354 {
2355 	struct perf_trace_event_type *type;
2356 	int i, err = 0;
2357 
2358 	for (i = 0; i < event_count; i++) {
2359 		type = &events[i];
2360 
2361 		err = perf_event__synthesize_event_type(tool, type->event_id,
2362 							type->name, process,
2363 							machine);
2364 		if (err) {
2365 			pr_debug("failed to create perf header event type\n");
2366 			return err;
2367 		}
2368 	}
2369 
2370 	return err;
2371 }
2372 
2373 int perf_event__process_event_type(struct perf_tool *tool __unused,
2374 				   union perf_event *event)
2375 {
2376 	if (perf_header__push_event(event->event_type.event_type.event_id,
2377 				    event->event_type.event_type.name) < 0)
2378 		return -ENOMEM;
2379 
2380 	return 0;
2381 }
2382 
2383 int perf_event__synthesize_tracing_data(struct perf_tool *tool, int fd,
2384 					struct perf_evlist *evlist,
2385 					perf_event__handler_t process)
2386 {
2387 	union perf_event ev;
2388 	struct tracing_data *tdata;
2389 	ssize_t size = 0, aligned_size = 0, padding;
2390 	int err __used = 0;
2391 
2392 	/*
2393 	 * We are going to store the size of the data followed
2394 	 * by the data contents. Since the fd descriptor is a pipe,
2395 	 * we cannot seek back to store the size of the data once
2396 	 * we know it. Instead we:
2397 	 *
2398 	 * - write the tracing data to the temp file
2399 	 * - get/write the data size to pipe
2400 	 * - write the tracing data from the temp file
2401 	 *   to the pipe
2402 	 */
2403 	tdata = tracing_data_get(&evlist->entries, fd, true);
2404 	if (!tdata)
2405 		return -1;
2406 
2407 	memset(&ev, 0, sizeof(ev));
2408 
2409 	ev.tracing_data.header.type = PERF_RECORD_HEADER_TRACING_DATA;
2410 	size = tdata->size;
2411 	aligned_size = ALIGN(size, sizeof(u64));
2412 	padding = aligned_size - size;
2413 	ev.tracing_data.header.size = sizeof(ev.tracing_data);
2414 	ev.tracing_data.size = aligned_size;
2415 
2416 	process(tool, &ev, NULL, NULL);
2417 
2418 	/*
2419 	 * The put function will copy all the tracing data
2420 	 * stored in temp file to the pipe.
2421 	 */
2422 	tracing_data_put(tdata);
2423 
2424 	write_padded(fd, NULL, 0, padding);
2425 
2426 	return aligned_size;
2427 }
2428 
2429 int perf_event__process_tracing_data(union perf_event *event,
2430 				     struct perf_session *session)
2431 {
2432 	ssize_t size_read, padding, size = event->tracing_data.size;
2433 	off_t offset = lseek(session->fd, 0, SEEK_CUR);
2434 	char buf[BUFSIZ];
2435 
2436 	/* setup for reading amidst mmap */
2437 	lseek(session->fd, offset + sizeof(struct tracing_data_event),
2438 	      SEEK_SET);
2439 
2440 	size_read = trace_report(session->fd, &session->pevent,
2441 				 session->repipe);
2442 	padding = ALIGN(size_read, sizeof(u64)) - size_read;
2443 
2444 	if (read(session->fd, buf, padding) < 0)
2445 		die("reading input file");
2446 	if (session->repipe) {
2447 		int retw = write(STDOUT_FILENO, buf, padding);
2448 		if (retw <= 0 || retw != padding)
2449 			die("repiping tracing data padding");
2450 	}
2451 
2452 	if (size_read + padding != size)
2453 		die("tracing data size mismatch");
2454 
2455 	return size_read + padding;
2456 }
2457 
2458 int perf_event__synthesize_build_id(struct perf_tool *tool,
2459 				    struct dso *pos, u16 misc,
2460 				    perf_event__handler_t process,
2461 				    struct machine *machine)
2462 {
2463 	union perf_event ev;
2464 	size_t len;
2465 	int err = 0;
2466 
2467 	if (!pos->hit)
2468 		return err;
2469 
2470 	memset(&ev, 0, sizeof(ev));
2471 
2472 	len = pos->long_name_len + 1;
2473 	len = ALIGN(len, NAME_ALIGN);
2474 	memcpy(&ev.build_id.build_id, pos->build_id, sizeof(pos->build_id));
2475 	ev.build_id.header.type = PERF_RECORD_HEADER_BUILD_ID;
2476 	ev.build_id.header.misc = misc;
2477 	ev.build_id.pid = machine->pid;
2478 	ev.build_id.header.size = sizeof(ev.build_id) + len;
2479 	memcpy(&ev.build_id.filename, pos->long_name, pos->long_name_len);
2480 
2481 	err = process(tool, &ev, NULL, machine);
2482 
2483 	return err;
2484 }
2485 
2486 int perf_event__process_build_id(struct perf_tool *tool __used,
2487 				 union perf_event *event,
2488 				 struct perf_session *session)
2489 {
2490 	__event_process_build_id(&event->build_id,
2491 				 event->build_id.filename,
2492 				 session);
2493 	return 0;
2494 }
2495 
2496 void disable_buildid_cache(void)
2497 {
2498 	no_buildid_cache = true;
2499 }
2500