xref: /openbmc/linux/tools/perf/util/header.c (revision 171f1bc7)
1 #define _FILE_OFFSET_BITS 64
2 
3 #include "util.h"
4 #include <sys/types.h>
5 #include <byteswap.h>
6 #include <unistd.h>
7 #include <stdio.h>
8 #include <stdlib.h>
9 #include <linux/list.h>
10 #include <linux/kernel.h>
11 #include <sys/utsname.h>
12 
13 #include "evlist.h"
14 #include "evsel.h"
15 #include "header.h"
16 #include "../perf.h"
17 #include "trace-event.h"
18 #include "session.h"
19 #include "symbol.h"
20 #include "debug.h"
21 #include "cpumap.h"
22 
23 static bool no_buildid_cache = false;
24 
25 static int event_count;
26 static struct perf_trace_event_type *events;
27 
28 static u32 header_argc;
29 static const char **header_argv;
30 
31 static int dsos__write_buildid_table(struct perf_header *header, int fd);
32 static int perf_session__cache_build_ids(struct perf_session *session);
33 
34 int perf_header__push_event(u64 id, const char *name)
35 {
36 	if (strlen(name) > MAX_EVENT_NAME)
37 		pr_warning("Event %s will be truncated\n", name);
38 
39 	if (!events) {
40 		events = malloc(sizeof(struct perf_trace_event_type));
41 		if (events == NULL)
42 			return -ENOMEM;
43 	} else {
44 		struct perf_trace_event_type *nevents;
45 
46 		nevents = realloc(events, (event_count + 1) * sizeof(*events));
47 		if (nevents == NULL)
48 			return -ENOMEM;
49 		events = nevents;
50 	}
51 	memset(&events[event_count], 0, sizeof(struct perf_trace_event_type));
52 	events[event_count].event_id = id;
53 	strncpy(events[event_count].name, name, MAX_EVENT_NAME - 1);
54 	event_count++;
55 	return 0;
56 }
57 
58 char *perf_header__find_event(u64 id)
59 {
60 	int i;
61 	for (i = 0 ; i < event_count; i++) {
62 		if (events[i].event_id == id)
63 			return events[i].name;
64 	}
65 	return NULL;
66 }
67 
68 static const char *__perf_magic = "PERFFILE";
69 
70 #define PERF_MAGIC	(*(u64 *)__perf_magic)
71 
72 struct perf_file_attr {
73 	struct perf_event_attr	attr;
74 	struct perf_file_section	ids;
75 };
76 
77 void perf_header__set_feat(struct perf_header *header, int feat)
78 {
79 	set_bit(feat, header->adds_features);
80 }
81 
82 void perf_header__clear_feat(struct perf_header *header, int feat)
83 {
84 	clear_bit(feat, header->adds_features);
85 }
86 
87 bool perf_header__has_feat(const struct perf_header *header, int feat)
88 {
89 	return test_bit(feat, header->adds_features);
90 }
91 
92 static int do_write(int fd, const void *buf, size_t size)
93 {
94 	while (size) {
95 		int ret = write(fd, buf, size);
96 
97 		if (ret < 0)
98 			return -errno;
99 
100 		size -= ret;
101 		buf += ret;
102 	}
103 
104 	return 0;
105 }
106 
107 #define NAME_ALIGN 64
108 
109 static int write_padded(int fd, const void *bf, size_t count,
110 			size_t count_aligned)
111 {
112 	static const char zero_buf[NAME_ALIGN];
113 	int err = do_write(fd, bf, count);
114 
115 	if (!err)
116 		err = do_write(fd, zero_buf, count_aligned - count);
117 
118 	return err;
119 }
120 
121 static int do_write_string(int fd, const char *str)
122 {
123 	u32 len, olen;
124 	int ret;
125 
126 	olen = strlen(str) + 1;
127 	len = ALIGN(olen, NAME_ALIGN);
128 
129 	/* write len, incl. \0 */
130 	ret = do_write(fd, &len, sizeof(len));
131 	if (ret < 0)
132 		return ret;
133 
134 	return write_padded(fd, str, olen, len);
135 }
136 
137 static char *do_read_string(int fd, struct perf_header *ph)
138 {
139 	ssize_t sz, ret;
140 	u32 len;
141 	char *buf;
142 
143 	sz = read(fd, &len, sizeof(len));
144 	if (sz < (ssize_t)sizeof(len))
145 		return NULL;
146 
147 	if (ph->needs_swap)
148 		len = bswap_32(len);
149 
150 	buf = malloc(len);
151 	if (!buf)
152 		return NULL;
153 
154 	ret = read(fd, buf, len);
155 	if (ret == (ssize_t)len) {
156 		/*
157 		 * strings are padded by zeroes
158 		 * thus the actual strlen of buf
159 		 * may be less than len
160 		 */
161 		return buf;
162 	}
163 
164 	free(buf);
165 	return NULL;
166 }
167 
168 int
169 perf_header__set_cmdline(int argc, const char **argv)
170 {
171 	int i;
172 
173 	header_argc = (u32)argc;
174 
175 	/* do not include NULL termination */
176 	header_argv = calloc(argc, sizeof(char *));
177 	if (!header_argv)
178 		return -ENOMEM;
179 
180 	/*
181 	 * must copy argv contents because it gets moved
182 	 * around during option parsing
183 	 */
184 	for (i = 0; i < argc ; i++)
185 		header_argv[i] = argv[i];
186 
187 	return 0;
188 }
189 
190 static int write_trace_info(int fd, struct perf_header *h __used,
191 			    struct perf_evlist *evlist)
192 {
193 	return read_tracing_data(fd, &evlist->entries);
194 }
195 
196 
197 static int write_build_id(int fd, struct perf_header *h,
198 			  struct perf_evlist *evlist __used)
199 {
200 	struct perf_session *session;
201 	int err;
202 
203 	session = container_of(h, struct perf_session, header);
204 
205 	err = dsos__write_buildid_table(h, fd);
206 	if (err < 0) {
207 		pr_debug("failed to write buildid table\n");
208 		return err;
209 	}
210 	if (!no_buildid_cache)
211 		perf_session__cache_build_ids(session);
212 
213 	return 0;
214 }
215 
216 static int write_hostname(int fd, struct perf_header *h __used,
217 			  struct perf_evlist *evlist __used)
218 {
219 	struct utsname uts;
220 	int ret;
221 
222 	ret = uname(&uts);
223 	if (ret < 0)
224 		return -1;
225 
226 	return do_write_string(fd, uts.nodename);
227 }
228 
229 static int write_osrelease(int fd, struct perf_header *h __used,
230 			   struct perf_evlist *evlist __used)
231 {
232 	struct utsname uts;
233 	int ret;
234 
235 	ret = uname(&uts);
236 	if (ret < 0)
237 		return -1;
238 
239 	return do_write_string(fd, uts.release);
240 }
241 
242 static int write_arch(int fd, struct perf_header *h __used,
243 		      struct perf_evlist *evlist __used)
244 {
245 	struct utsname uts;
246 	int ret;
247 
248 	ret = uname(&uts);
249 	if (ret < 0)
250 		return -1;
251 
252 	return do_write_string(fd, uts.machine);
253 }
254 
255 static int write_version(int fd, struct perf_header *h __used,
256 			 struct perf_evlist *evlist __used)
257 {
258 	return do_write_string(fd, perf_version_string);
259 }
260 
261 static int write_cpudesc(int fd, struct perf_header *h __used,
262 		       struct perf_evlist *evlist __used)
263 {
264 #ifndef CPUINFO_PROC
265 #define CPUINFO_PROC NULL
266 #endif
267 	FILE *file;
268 	char *buf = NULL;
269 	char *s, *p;
270 	const char *search = CPUINFO_PROC;
271 	size_t len = 0;
272 	int ret = -1;
273 
274 	if (!search)
275 		return -1;
276 
277 	file = fopen("/proc/cpuinfo", "r");
278 	if (!file)
279 		return -1;
280 
281 	while (getline(&buf, &len, file) > 0) {
282 		ret = strncmp(buf, search, strlen(search));
283 		if (!ret)
284 			break;
285 	}
286 
287 	if (ret)
288 		goto done;
289 
290 	s = buf;
291 
292 	p = strchr(buf, ':');
293 	if (p && *(p+1) == ' ' && *(p+2))
294 		s = p + 2;
295 	p = strchr(s, '\n');
296 	if (p)
297 		*p = '\0';
298 
299 	/* squash extra space characters (branding string) */
300 	p = s;
301 	while (*p) {
302 		if (isspace(*p)) {
303 			char *r = p + 1;
304 			char *q = r;
305 			*p = ' ';
306 			while (*q && isspace(*q))
307 				q++;
308 			if (q != (p+1))
309 				while ((*r++ = *q++));
310 		}
311 		p++;
312 	}
313 	ret = do_write_string(fd, s);
314 done:
315 	free(buf);
316 	fclose(file);
317 	return ret;
318 }
319 
320 static int write_nrcpus(int fd, struct perf_header *h __used,
321 			struct perf_evlist *evlist __used)
322 {
323 	long nr;
324 	u32 nrc, nra;
325 	int ret;
326 
327 	nr = sysconf(_SC_NPROCESSORS_CONF);
328 	if (nr < 0)
329 		return -1;
330 
331 	nrc = (u32)(nr & UINT_MAX);
332 
333 	nr = sysconf(_SC_NPROCESSORS_ONLN);
334 	if (nr < 0)
335 		return -1;
336 
337 	nra = (u32)(nr & UINT_MAX);
338 
339 	ret = do_write(fd, &nrc, sizeof(nrc));
340 	if (ret < 0)
341 		return ret;
342 
343 	return do_write(fd, &nra, sizeof(nra));
344 }
345 
346 static int write_event_desc(int fd, struct perf_header *h __used,
347 			    struct perf_evlist *evlist)
348 {
349 	struct perf_evsel *attr;
350 	u32 nre = 0, nri, sz;
351 	int ret;
352 
353 	list_for_each_entry(attr, &evlist->entries, node)
354 		nre++;
355 
356 	/*
357 	 * write number of events
358 	 */
359 	ret = do_write(fd, &nre, sizeof(nre));
360 	if (ret < 0)
361 		return ret;
362 
363 	/*
364 	 * size of perf_event_attr struct
365 	 */
366 	sz = (u32)sizeof(attr->attr);
367 	ret = do_write(fd, &sz, sizeof(sz));
368 	if (ret < 0)
369 		return ret;
370 
371 	list_for_each_entry(attr, &evlist->entries, node) {
372 
373 		ret = do_write(fd, &attr->attr, sz);
374 		if (ret < 0)
375 			return ret;
376 		/*
377 		 * write number of unique id per event
378 		 * there is one id per instance of an event
379 		 *
380 		 * copy into an nri to be independent of the
381 		 * type of ids,
382 		 */
383 		nri = attr->ids;
384 		ret = do_write(fd, &nri, sizeof(nri));
385 		if (ret < 0)
386 			return ret;
387 
388 		/*
389 		 * write event string as passed on cmdline
390 		 */
391 		ret = do_write_string(fd, attr->name);
392 		if (ret < 0)
393 			return ret;
394 		/*
395 		 * write unique ids for this event
396 		 */
397 		ret = do_write(fd, attr->id, attr->ids * sizeof(u64));
398 		if (ret < 0)
399 			return ret;
400 	}
401 	return 0;
402 }
403 
404 static int write_cmdline(int fd, struct perf_header *h __used,
405 			 struct perf_evlist *evlist __used)
406 {
407 	char buf[MAXPATHLEN];
408 	char proc[32];
409 	u32 i, n;
410 	int ret;
411 
412 	/*
413 	 * actual atual path to perf binary
414 	 */
415 	sprintf(proc, "/proc/%d/exe", getpid());
416 	ret = readlink(proc, buf, sizeof(buf));
417 	if (ret <= 0)
418 		return -1;
419 
420 	/* readlink() does not add null termination */
421 	buf[ret] = '\0';
422 
423 	/* account for binary path */
424 	n = header_argc + 1;
425 
426 	ret = do_write(fd, &n, sizeof(n));
427 	if (ret < 0)
428 		return ret;
429 
430 	ret = do_write_string(fd, buf);
431 	if (ret < 0)
432 		return ret;
433 
434 	for (i = 0 ; i < header_argc; i++) {
435 		ret = do_write_string(fd, header_argv[i]);
436 		if (ret < 0)
437 			return ret;
438 	}
439 	return 0;
440 }
441 
442 #define CORE_SIB_FMT \
443 	"/sys/devices/system/cpu/cpu%d/topology/core_siblings_list"
444 #define THRD_SIB_FMT \
445 	"/sys/devices/system/cpu/cpu%d/topology/thread_siblings_list"
446 
447 struct cpu_topo {
448 	u32 core_sib;
449 	u32 thread_sib;
450 	char **core_siblings;
451 	char **thread_siblings;
452 };
453 
454 static int build_cpu_topo(struct cpu_topo *tp, int cpu)
455 {
456 	FILE *fp;
457 	char filename[MAXPATHLEN];
458 	char *buf = NULL, *p;
459 	size_t len = 0;
460 	u32 i = 0;
461 	int ret = -1;
462 
463 	sprintf(filename, CORE_SIB_FMT, cpu);
464 	fp = fopen(filename, "r");
465 	if (!fp)
466 		return -1;
467 
468 	if (getline(&buf, &len, fp) <= 0)
469 		goto done;
470 
471 	fclose(fp);
472 
473 	p = strchr(buf, '\n');
474 	if (p)
475 		*p = '\0';
476 
477 	for (i = 0; i < tp->core_sib; i++) {
478 		if (!strcmp(buf, tp->core_siblings[i]))
479 			break;
480 	}
481 	if (i == tp->core_sib) {
482 		tp->core_siblings[i] = buf;
483 		tp->core_sib++;
484 		buf = NULL;
485 		len = 0;
486 	}
487 
488 	sprintf(filename, THRD_SIB_FMT, cpu);
489 	fp = fopen(filename, "r");
490 	if (!fp)
491 		goto done;
492 
493 	if (getline(&buf, &len, fp) <= 0)
494 		goto done;
495 
496 	p = strchr(buf, '\n');
497 	if (p)
498 		*p = '\0';
499 
500 	for (i = 0; i < tp->thread_sib; i++) {
501 		if (!strcmp(buf, tp->thread_siblings[i]))
502 			break;
503 	}
504 	if (i == tp->thread_sib) {
505 		tp->thread_siblings[i] = buf;
506 		tp->thread_sib++;
507 		buf = NULL;
508 	}
509 	ret = 0;
510 done:
511 	if(fp)
512 		fclose(fp);
513 	free(buf);
514 	return ret;
515 }
516 
517 static void free_cpu_topo(struct cpu_topo *tp)
518 {
519 	u32 i;
520 
521 	if (!tp)
522 		return;
523 
524 	for (i = 0 ; i < tp->core_sib; i++)
525 		free(tp->core_siblings[i]);
526 
527 	for (i = 0 ; i < tp->thread_sib; i++)
528 		free(tp->thread_siblings[i]);
529 
530 	free(tp);
531 }
532 
533 static struct cpu_topo *build_cpu_topology(void)
534 {
535 	struct cpu_topo *tp;
536 	void *addr;
537 	u32 nr, i;
538 	size_t sz;
539 	long ncpus;
540 	int ret = -1;
541 
542 	ncpus = sysconf(_SC_NPROCESSORS_CONF);
543 	if (ncpus < 0)
544 		return NULL;
545 
546 	nr = (u32)(ncpus & UINT_MAX);
547 
548 	sz = nr * sizeof(char *);
549 
550 	addr = calloc(1, sizeof(*tp) + 2 * sz);
551 	if (!addr)
552 		return NULL;
553 
554 	tp = addr;
555 
556 	addr += sizeof(*tp);
557 	tp->core_siblings = addr;
558 	addr += sz;
559 	tp->thread_siblings = addr;
560 
561 	for (i = 0; i < nr; i++) {
562 		ret = build_cpu_topo(tp, i);
563 		if (ret < 0)
564 			break;
565 	}
566 	if (ret) {
567 		free_cpu_topo(tp);
568 		tp = NULL;
569 	}
570 	return tp;
571 }
572 
573 static int write_cpu_topology(int fd, struct perf_header *h __used,
574 			  struct perf_evlist *evlist __used)
575 {
576 	struct cpu_topo *tp;
577 	u32 i;
578 	int ret;
579 
580 	tp = build_cpu_topology();
581 	if (!tp)
582 		return -1;
583 
584 	ret = do_write(fd, &tp->core_sib, sizeof(tp->core_sib));
585 	if (ret < 0)
586 		goto done;
587 
588 	for (i = 0; i < tp->core_sib; i++) {
589 		ret = do_write_string(fd, tp->core_siblings[i]);
590 		if (ret < 0)
591 			goto done;
592 	}
593 	ret = do_write(fd, &tp->thread_sib, sizeof(tp->thread_sib));
594 	if (ret < 0)
595 		goto done;
596 
597 	for (i = 0; i < tp->thread_sib; i++) {
598 		ret = do_write_string(fd, tp->thread_siblings[i]);
599 		if (ret < 0)
600 			break;
601 	}
602 done:
603 	free_cpu_topo(tp);
604 	return ret;
605 }
606 
607 
608 
609 static int write_total_mem(int fd, struct perf_header *h __used,
610 			  struct perf_evlist *evlist __used)
611 {
612 	char *buf = NULL;
613 	FILE *fp;
614 	size_t len = 0;
615 	int ret = -1, n;
616 	uint64_t mem;
617 
618 	fp = fopen("/proc/meminfo", "r");
619 	if (!fp)
620 		return -1;
621 
622 	while (getline(&buf, &len, fp) > 0) {
623 		ret = strncmp(buf, "MemTotal:", 9);
624 		if (!ret)
625 			break;
626 	}
627 	if (!ret) {
628 		n = sscanf(buf, "%*s %"PRIu64, &mem);
629 		if (n == 1)
630 			ret = do_write(fd, &mem, sizeof(mem));
631 	}
632 	free(buf);
633 	fclose(fp);
634 	return ret;
635 }
636 
637 static int write_topo_node(int fd, int node)
638 {
639 	char str[MAXPATHLEN];
640 	char field[32];
641 	char *buf = NULL, *p;
642 	size_t len = 0;
643 	FILE *fp;
644 	u64 mem_total, mem_free, mem;
645 	int ret = -1;
646 
647 	sprintf(str, "/sys/devices/system/node/node%d/meminfo", node);
648 	fp = fopen(str, "r");
649 	if (!fp)
650 		return -1;
651 
652 	while (getline(&buf, &len, fp) > 0) {
653 		/* skip over invalid lines */
654 		if (!strchr(buf, ':'))
655 			continue;
656 		if (sscanf(buf, "%*s %*d %s %"PRIu64, field, &mem) != 2)
657 			goto done;
658 		if (!strcmp(field, "MemTotal:"))
659 			mem_total = mem;
660 		if (!strcmp(field, "MemFree:"))
661 			mem_free = mem;
662 	}
663 
664 	fclose(fp);
665 
666 	ret = do_write(fd, &mem_total, sizeof(u64));
667 	if (ret)
668 		goto done;
669 
670 	ret = do_write(fd, &mem_free, sizeof(u64));
671 	if (ret)
672 		goto done;
673 
674 	ret = -1;
675 	sprintf(str, "/sys/devices/system/node/node%d/cpulist", node);
676 
677 	fp = fopen(str, "r");
678 	if (!fp)
679 		goto done;
680 
681 	if (getline(&buf, &len, fp) <= 0)
682 		goto done;
683 
684 	p = strchr(buf, '\n');
685 	if (p)
686 		*p = '\0';
687 
688 	ret = do_write_string(fd, buf);
689 done:
690 	free(buf);
691 	fclose(fp);
692 	return ret;
693 }
694 
695 static int write_numa_topology(int fd, struct perf_header *h __used,
696 			  struct perf_evlist *evlist __used)
697 {
698 	char *buf = NULL;
699 	size_t len = 0;
700 	FILE *fp;
701 	struct cpu_map *node_map = NULL;
702 	char *c;
703 	u32 nr, i, j;
704 	int ret = -1;
705 
706 	fp = fopen("/sys/devices/system/node/online", "r");
707 	if (!fp)
708 		return -1;
709 
710 	if (getline(&buf, &len, fp) <= 0)
711 		goto done;
712 
713 	c = strchr(buf, '\n');
714 	if (c)
715 		*c = '\0';
716 
717 	node_map = cpu_map__new(buf);
718 	if (!node_map)
719 		goto done;
720 
721 	nr = (u32)node_map->nr;
722 
723 	ret = do_write(fd, &nr, sizeof(nr));
724 	if (ret < 0)
725 		goto done;
726 
727 	for (i = 0; i < nr; i++) {
728 		j = (u32)node_map->map[i];
729 		ret = do_write(fd, &j, sizeof(j));
730 		if (ret < 0)
731 			break;
732 
733 		ret = write_topo_node(fd, i);
734 		if (ret < 0)
735 			break;
736 	}
737 done:
738 	free(buf);
739 	fclose(fp);
740 	free(node_map);
741 	return ret;
742 }
743 
744 /*
745  * default get_cpuid(): nothing gets recorded
746  * actual implementation must be in arch/$(ARCH)/util/header.c
747  */
748 int __attribute__((weak)) get_cpuid(char *buffer __used, size_t sz __used)
749 {
750 	return -1;
751 }
752 
753 static int write_cpuid(int fd, struct perf_header *h __used,
754 		       struct perf_evlist *evlist __used)
755 {
756 	char buffer[64];
757 	int ret;
758 
759 	ret = get_cpuid(buffer, sizeof(buffer));
760 	if (!ret)
761 		goto write_it;
762 
763 	return -1;
764 write_it:
765 	return do_write_string(fd, buffer);
766 }
767 
768 static void print_hostname(struct perf_header *ph, int fd, FILE *fp)
769 {
770 	char *str = do_read_string(fd, ph);
771 	fprintf(fp, "# hostname : %s\n", str);
772 	free(str);
773 }
774 
775 static void print_osrelease(struct perf_header *ph, int fd, FILE *fp)
776 {
777 	char *str = do_read_string(fd, ph);
778 	fprintf(fp, "# os release : %s\n", str);
779 	free(str);
780 }
781 
782 static void print_arch(struct perf_header *ph, int fd, FILE *fp)
783 {
784 	char *str = do_read_string(fd, ph);
785 	fprintf(fp, "# arch : %s\n", str);
786 	free(str);
787 }
788 
789 static void print_cpudesc(struct perf_header *ph, int fd, FILE *fp)
790 {
791 	char *str = do_read_string(fd, ph);
792 	fprintf(fp, "# cpudesc : %s\n", str);
793 	free(str);
794 }
795 
796 static void print_nrcpus(struct perf_header *ph, int fd, FILE *fp)
797 {
798 	ssize_t ret;
799 	u32 nr;
800 
801 	ret = read(fd, &nr, sizeof(nr));
802 	if (ret != (ssize_t)sizeof(nr))
803 		nr = -1; /* interpreted as error */
804 
805 	if (ph->needs_swap)
806 		nr = bswap_32(nr);
807 
808 	fprintf(fp, "# nrcpus online : %u\n", nr);
809 
810 	ret = read(fd, &nr, sizeof(nr));
811 	if (ret != (ssize_t)sizeof(nr))
812 		nr = -1; /* interpreted as error */
813 
814 	if (ph->needs_swap)
815 		nr = bswap_32(nr);
816 
817 	fprintf(fp, "# nrcpus avail : %u\n", nr);
818 }
819 
820 static void print_version(struct perf_header *ph, int fd, FILE *fp)
821 {
822 	char *str = do_read_string(fd, ph);
823 	fprintf(fp, "# perf version : %s\n", str);
824 	free(str);
825 }
826 
827 static void print_cmdline(struct perf_header *ph, int fd, FILE *fp)
828 {
829 	ssize_t ret;
830 	char *str;
831 	u32 nr, i;
832 
833 	ret = read(fd, &nr, sizeof(nr));
834 	if (ret != (ssize_t)sizeof(nr))
835 		return;
836 
837 	if (ph->needs_swap)
838 		nr = bswap_32(nr);
839 
840 	fprintf(fp, "# cmdline : ");
841 
842 	for (i = 0; i < nr; i++) {
843 		str = do_read_string(fd, ph);
844 		fprintf(fp, "%s ", str);
845 		free(str);
846 	}
847 	fputc('\n', fp);
848 }
849 
850 static void print_cpu_topology(struct perf_header *ph, int fd, FILE *fp)
851 {
852 	ssize_t ret;
853 	u32 nr, i;
854 	char *str;
855 
856 	ret = read(fd, &nr, sizeof(nr));
857 	if (ret != (ssize_t)sizeof(nr))
858 		return;
859 
860 	if (ph->needs_swap)
861 		nr = bswap_32(nr);
862 
863 	for (i = 0; i < nr; i++) {
864 		str = do_read_string(fd, ph);
865 		fprintf(fp, "# sibling cores   : %s\n", str);
866 		free(str);
867 	}
868 
869 	ret = read(fd, &nr, sizeof(nr));
870 	if (ret != (ssize_t)sizeof(nr))
871 		return;
872 
873 	if (ph->needs_swap)
874 		nr = bswap_32(nr);
875 
876 	for (i = 0; i < nr; i++) {
877 		str = do_read_string(fd, ph);
878 		fprintf(fp, "# sibling threads : %s\n", str);
879 		free(str);
880 	}
881 }
882 
883 static void print_event_desc(struct perf_header *ph, int fd, FILE *fp)
884 {
885 	struct perf_event_attr attr;
886 	uint64_t id;
887 	void *buf = NULL;
888 	char *str;
889 	u32 nre, sz, nr, i, j, msz;
890 	int ret;
891 
892 	/* number of events */
893 	ret = read(fd, &nre, sizeof(nre));
894 	if (ret != (ssize_t)sizeof(nre))
895 		goto error;
896 
897 	if (ph->needs_swap)
898 		nre = bswap_32(nre);
899 
900 	ret = read(fd, &sz, sizeof(sz));
901 	if (ret != (ssize_t)sizeof(sz))
902 		goto error;
903 
904 	if (ph->needs_swap)
905 		sz = bswap_32(sz);
906 
907 	/*
908 	 * ensure it is at least to our ABI rev
909 	 */
910 	if (sz < (u32)sizeof(attr))
911 		goto error;
912 
913 	memset(&attr, 0, sizeof(attr));
914 
915 	/* read entire region to sync up to next field */
916 	buf = malloc(sz);
917 	if (!buf)
918 		goto error;
919 
920 	msz = sizeof(attr);
921 	if (sz < msz)
922 		msz = sz;
923 
924 	for (i = 0 ; i < nre; i++) {
925 
926 		ret = read(fd, buf, sz);
927 		if (ret != (ssize_t)sz)
928 			goto error;
929 
930 		if (ph->needs_swap)
931 			perf_event__attr_swap(buf);
932 
933 		memcpy(&attr, buf, msz);
934 
935 		ret = read(fd, &nr, sizeof(nr));
936 		if (ret != (ssize_t)sizeof(nr))
937 			goto error;
938 
939 		if (ph->needs_swap)
940 			nr = bswap_32(nr);
941 
942 		str = do_read_string(fd, ph);
943 		fprintf(fp, "# event : name = %s, ", str);
944 		free(str);
945 
946 		fprintf(fp, "type = %d, config = 0x%"PRIx64
947 			    ", config1 = 0x%"PRIx64", config2 = 0x%"PRIx64,
948 				attr.type,
949 				(u64)attr.config,
950 				(u64)attr.config1,
951 				(u64)attr.config2);
952 
953 		fprintf(fp, ", excl_usr = %d, excl_kern = %d",
954 				attr.exclude_user,
955 				attr.exclude_kernel);
956 
957 		if (nr)
958 			fprintf(fp, ", id = {");
959 
960 		for (j = 0 ; j < nr; j++) {
961 			ret = read(fd, &id, sizeof(id));
962 			if (ret != (ssize_t)sizeof(id))
963 				goto error;
964 
965 			if (ph->needs_swap)
966 				id = bswap_64(id);
967 
968 			if (j)
969 				fputc(',', fp);
970 
971 			fprintf(fp, " %"PRIu64, id);
972 		}
973 		if (nr && j == nr)
974 			fprintf(fp, " }");
975 		fputc('\n', fp);
976 	}
977 	free(buf);
978 	return;
979 error:
980 	fprintf(fp, "# event desc: not available or unable to read\n");
981 }
982 
983 static void print_total_mem(struct perf_header *h __used, int fd, FILE *fp)
984 {
985 	uint64_t mem;
986 	ssize_t ret;
987 
988 	ret = read(fd, &mem, sizeof(mem));
989 	if (ret != sizeof(mem))
990 		goto error;
991 
992 	if (h->needs_swap)
993 		mem = bswap_64(mem);
994 
995 	fprintf(fp, "# total memory : %"PRIu64" kB\n", mem);
996 	return;
997 error:
998 	fprintf(fp, "# total memory : unknown\n");
999 }
1000 
1001 static void print_numa_topology(struct perf_header *h __used, int fd, FILE *fp)
1002 {
1003 	ssize_t ret;
1004 	u32 nr, c, i;
1005 	char *str;
1006 	uint64_t mem_total, mem_free;
1007 
1008 	/* nr nodes */
1009 	ret = read(fd, &nr, sizeof(nr));
1010 	if (ret != (ssize_t)sizeof(nr))
1011 		goto error;
1012 
1013 	if (h->needs_swap)
1014 		nr = bswap_32(nr);
1015 
1016 	for (i = 0; i < nr; i++) {
1017 
1018 		/* node number */
1019 		ret = read(fd, &c, sizeof(c));
1020 		if (ret != (ssize_t)sizeof(c))
1021 			goto error;
1022 
1023 		if (h->needs_swap)
1024 			c = bswap_32(c);
1025 
1026 		ret = read(fd, &mem_total, sizeof(u64));
1027 		if (ret != sizeof(u64))
1028 			goto error;
1029 
1030 		ret = read(fd, &mem_free, sizeof(u64));
1031 		if (ret != sizeof(u64))
1032 			goto error;
1033 
1034 		if (h->needs_swap) {
1035 			mem_total = bswap_64(mem_total);
1036 			mem_free = bswap_64(mem_free);
1037 		}
1038 
1039 		fprintf(fp, "# node%u meminfo  : total = %"PRIu64" kB,"
1040 			    " free = %"PRIu64" kB\n",
1041 			c,
1042 			mem_total,
1043 			mem_free);
1044 
1045 		str = do_read_string(fd, h);
1046 		fprintf(fp, "# node%u cpu list : %s\n", c, str);
1047 		free(str);
1048 	}
1049 	return;
1050 error:
1051 	fprintf(fp, "# numa topology : not available\n");
1052 }
1053 
1054 static void print_cpuid(struct perf_header *ph, int fd, FILE *fp)
1055 {
1056 	char *str = do_read_string(fd, ph);
1057 	fprintf(fp, "# cpuid : %s\n", str);
1058 	free(str);
1059 }
1060 
1061 struct feature_ops {
1062 	int (*write)(int fd, struct perf_header *h, struct perf_evlist *evlist);
1063 	void (*print)(struct perf_header *h, int fd, FILE *fp);
1064 	const char *name;
1065 	bool full_only;
1066 };
1067 
1068 #define FEAT_OPA(n, w, p) \
1069 	[n] = { .name = #n, .write = w, .print = p }
1070 #define FEAT_OPF(n, w, p) \
1071 	[n] = { .name = #n, .write = w, .print = p, .full_only = true }
1072 
1073 static const struct feature_ops feat_ops[HEADER_LAST_FEATURE] = {
1074 	FEAT_OPA(HEADER_TRACE_INFO, write_trace_info, NULL),
1075 	FEAT_OPA(HEADER_BUILD_ID, write_build_id, NULL),
1076 	FEAT_OPA(HEADER_HOSTNAME, write_hostname, print_hostname),
1077 	FEAT_OPA(HEADER_OSRELEASE, write_osrelease, print_osrelease),
1078 	FEAT_OPA(HEADER_VERSION, write_version, print_version),
1079 	FEAT_OPA(HEADER_ARCH, write_arch, print_arch),
1080 	FEAT_OPA(HEADER_NRCPUS, write_nrcpus, print_nrcpus),
1081 	FEAT_OPA(HEADER_CPUDESC, write_cpudesc, print_cpudesc),
1082 	FEAT_OPA(HEADER_CPUID, write_cpuid, print_cpuid),
1083 	FEAT_OPA(HEADER_TOTAL_MEM, write_total_mem, print_total_mem),
1084 	FEAT_OPA(HEADER_EVENT_DESC, write_event_desc, print_event_desc),
1085 	FEAT_OPA(HEADER_CMDLINE, write_cmdline, print_cmdline),
1086 	FEAT_OPF(HEADER_CPU_TOPOLOGY, write_cpu_topology, print_cpu_topology),
1087 	FEAT_OPF(HEADER_NUMA_TOPOLOGY, write_numa_topology, print_numa_topology),
1088 };
1089 
1090 struct header_print_data {
1091 	FILE *fp;
1092 	bool full; /* extended list of headers */
1093 };
1094 
1095 static int perf_file_section__fprintf_info(struct perf_file_section *section,
1096 					   struct perf_header *ph,
1097 					   int feat, int fd, void *data)
1098 {
1099 	struct header_print_data *hd = data;
1100 
1101 	if (lseek(fd, section->offset, SEEK_SET) == (off_t)-1) {
1102 		pr_debug("Failed to lseek to %" PRIu64 " offset for feature "
1103 				"%d, continuing...\n", section->offset, feat);
1104 		return 0;
1105 	}
1106 	if (feat < HEADER_TRACE_INFO || feat >= HEADER_LAST_FEATURE) {
1107 		pr_warning("unknown feature %d\n", feat);
1108 		return -1;
1109 	}
1110 	if (!feat_ops[feat].print)
1111 		return 0;
1112 
1113 	if (!feat_ops[feat].full_only || hd->full)
1114 		feat_ops[feat].print(ph, fd, hd->fp);
1115 	else
1116 		fprintf(hd->fp, "# %s info available, use -I to display\n",
1117 			feat_ops[feat].name);
1118 
1119 	return 0;
1120 }
1121 
1122 int perf_header__fprintf_info(struct perf_session *session, FILE *fp, bool full)
1123 {
1124 	struct header_print_data hd;
1125 	struct perf_header *header = &session->header;
1126 	int fd = session->fd;
1127 	hd.fp = fp;
1128 	hd.full = full;
1129 
1130 	perf_header__process_sections(header, fd, &hd,
1131 				      perf_file_section__fprintf_info);
1132 	return 0;
1133 }
1134 
1135 #define dsos__for_each_with_build_id(pos, head)	\
1136 	list_for_each_entry(pos, head, node)	\
1137 		if (!pos->has_build_id)		\
1138 			continue;		\
1139 		else
1140 
1141 static int __dsos__write_buildid_table(struct list_head *head, pid_t pid,
1142 				u16 misc, int fd)
1143 {
1144 	struct dso *pos;
1145 
1146 	dsos__for_each_with_build_id(pos, head) {
1147 		int err;
1148 		struct build_id_event b;
1149 		size_t len;
1150 
1151 		if (!pos->hit)
1152 			continue;
1153 		len = pos->long_name_len + 1;
1154 		len = ALIGN(len, NAME_ALIGN);
1155 		memset(&b, 0, sizeof(b));
1156 		memcpy(&b.build_id, pos->build_id, sizeof(pos->build_id));
1157 		b.pid = pid;
1158 		b.header.misc = misc;
1159 		b.header.size = sizeof(b) + len;
1160 		err = do_write(fd, &b, sizeof(b));
1161 		if (err < 0)
1162 			return err;
1163 		err = write_padded(fd, pos->long_name,
1164 				   pos->long_name_len + 1, len);
1165 		if (err < 0)
1166 			return err;
1167 	}
1168 
1169 	return 0;
1170 }
1171 
1172 static int machine__write_buildid_table(struct machine *machine, int fd)
1173 {
1174 	int err;
1175 	u16 kmisc = PERF_RECORD_MISC_KERNEL,
1176 	    umisc = PERF_RECORD_MISC_USER;
1177 
1178 	if (!machine__is_host(machine)) {
1179 		kmisc = PERF_RECORD_MISC_GUEST_KERNEL;
1180 		umisc = PERF_RECORD_MISC_GUEST_USER;
1181 	}
1182 
1183 	err = __dsos__write_buildid_table(&machine->kernel_dsos, machine->pid,
1184 					  kmisc, fd);
1185 	if (err == 0)
1186 		err = __dsos__write_buildid_table(&machine->user_dsos,
1187 						  machine->pid, umisc, fd);
1188 	return err;
1189 }
1190 
1191 static int dsos__write_buildid_table(struct perf_header *header, int fd)
1192 {
1193 	struct perf_session *session = container_of(header,
1194 			struct perf_session, header);
1195 	struct rb_node *nd;
1196 	int err = machine__write_buildid_table(&session->host_machine, fd);
1197 
1198 	if (err)
1199 		return err;
1200 
1201 	for (nd = rb_first(&session->machines); nd; nd = rb_next(nd)) {
1202 		struct machine *pos = rb_entry(nd, struct machine, rb_node);
1203 		err = machine__write_buildid_table(pos, fd);
1204 		if (err)
1205 			break;
1206 	}
1207 	return err;
1208 }
1209 
1210 int build_id_cache__add_s(const char *sbuild_id, const char *debugdir,
1211 			  const char *name, bool is_kallsyms)
1212 {
1213 	const size_t size = PATH_MAX;
1214 	char *realname, *filename = zalloc(size),
1215 	     *linkname = zalloc(size), *targetname;
1216 	int len, err = -1;
1217 
1218 	if (is_kallsyms) {
1219 		if (symbol_conf.kptr_restrict) {
1220 			pr_debug("Not caching a kptr_restrict'ed /proc/kallsyms\n");
1221 			return 0;
1222 		}
1223 		realname = (char *)name;
1224 	} else
1225 		realname = realpath(name, NULL);
1226 
1227 	if (realname == NULL || filename == NULL || linkname == NULL)
1228 		goto out_free;
1229 
1230 	len = snprintf(filename, size, "%s%s%s",
1231 		       debugdir, is_kallsyms ? "/" : "", realname);
1232 	if (mkdir_p(filename, 0755))
1233 		goto out_free;
1234 
1235 	snprintf(filename + len, sizeof(filename) - len, "/%s", sbuild_id);
1236 
1237 	if (access(filename, F_OK)) {
1238 		if (is_kallsyms) {
1239 			 if (copyfile("/proc/kallsyms", filename))
1240 				goto out_free;
1241 		} else if (link(realname, filename) && copyfile(name, filename))
1242 			goto out_free;
1243 	}
1244 
1245 	len = snprintf(linkname, size, "%s/.build-id/%.2s",
1246 		       debugdir, sbuild_id);
1247 
1248 	if (access(linkname, X_OK) && mkdir_p(linkname, 0755))
1249 		goto out_free;
1250 
1251 	snprintf(linkname + len, size - len, "/%s", sbuild_id + 2);
1252 	targetname = filename + strlen(debugdir) - 5;
1253 	memcpy(targetname, "../..", 5);
1254 
1255 	if (symlink(targetname, linkname) == 0)
1256 		err = 0;
1257 out_free:
1258 	if (!is_kallsyms)
1259 		free(realname);
1260 	free(filename);
1261 	free(linkname);
1262 	return err;
1263 }
1264 
1265 static int build_id_cache__add_b(const u8 *build_id, size_t build_id_size,
1266 				 const char *name, const char *debugdir,
1267 				 bool is_kallsyms)
1268 {
1269 	char sbuild_id[BUILD_ID_SIZE * 2 + 1];
1270 
1271 	build_id__sprintf(build_id, build_id_size, sbuild_id);
1272 
1273 	return build_id_cache__add_s(sbuild_id, debugdir, name, is_kallsyms);
1274 }
1275 
1276 int build_id_cache__remove_s(const char *sbuild_id, const char *debugdir)
1277 {
1278 	const size_t size = PATH_MAX;
1279 	char *filename = zalloc(size),
1280 	     *linkname = zalloc(size);
1281 	int err = -1;
1282 
1283 	if (filename == NULL || linkname == NULL)
1284 		goto out_free;
1285 
1286 	snprintf(linkname, size, "%s/.build-id/%.2s/%s",
1287 		 debugdir, sbuild_id, sbuild_id + 2);
1288 
1289 	if (access(linkname, F_OK))
1290 		goto out_free;
1291 
1292 	if (readlink(linkname, filename, size - 1) < 0)
1293 		goto out_free;
1294 
1295 	if (unlink(linkname))
1296 		goto out_free;
1297 
1298 	/*
1299 	 * Since the link is relative, we must make it absolute:
1300 	 */
1301 	snprintf(linkname, size, "%s/.build-id/%.2s/%s",
1302 		 debugdir, sbuild_id, filename);
1303 
1304 	if (unlink(linkname))
1305 		goto out_free;
1306 
1307 	err = 0;
1308 out_free:
1309 	free(filename);
1310 	free(linkname);
1311 	return err;
1312 }
1313 
1314 static int dso__cache_build_id(struct dso *dso, const char *debugdir)
1315 {
1316 	bool is_kallsyms = dso->kernel && dso->long_name[0] != '/';
1317 
1318 	return build_id_cache__add_b(dso->build_id, sizeof(dso->build_id),
1319 				     dso->long_name, debugdir, is_kallsyms);
1320 }
1321 
1322 static int __dsos__cache_build_ids(struct list_head *head, const char *debugdir)
1323 {
1324 	struct dso *pos;
1325 	int err = 0;
1326 
1327 	dsos__for_each_with_build_id(pos, head)
1328 		if (dso__cache_build_id(pos, debugdir))
1329 			err = -1;
1330 
1331 	return err;
1332 }
1333 
1334 static int machine__cache_build_ids(struct machine *machine, const char *debugdir)
1335 {
1336 	int ret = __dsos__cache_build_ids(&machine->kernel_dsos, debugdir);
1337 	ret |= __dsos__cache_build_ids(&machine->user_dsos, debugdir);
1338 	return ret;
1339 }
1340 
1341 static int perf_session__cache_build_ids(struct perf_session *session)
1342 {
1343 	struct rb_node *nd;
1344 	int ret;
1345 	char debugdir[PATH_MAX];
1346 
1347 	snprintf(debugdir, sizeof(debugdir), "%s", buildid_dir);
1348 
1349 	if (mkdir(debugdir, 0755) != 0 && errno != EEXIST)
1350 		return -1;
1351 
1352 	ret = machine__cache_build_ids(&session->host_machine, debugdir);
1353 
1354 	for (nd = rb_first(&session->machines); nd; nd = rb_next(nd)) {
1355 		struct machine *pos = rb_entry(nd, struct machine, rb_node);
1356 		ret |= machine__cache_build_ids(pos, debugdir);
1357 	}
1358 	return ret ? -1 : 0;
1359 }
1360 
1361 static bool machine__read_build_ids(struct machine *machine, bool with_hits)
1362 {
1363 	bool ret = __dsos__read_build_ids(&machine->kernel_dsos, with_hits);
1364 	ret |= __dsos__read_build_ids(&machine->user_dsos, with_hits);
1365 	return ret;
1366 }
1367 
1368 static bool perf_session__read_build_ids(struct perf_session *session, bool with_hits)
1369 {
1370 	struct rb_node *nd;
1371 	bool ret = machine__read_build_ids(&session->host_machine, with_hits);
1372 
1373 	for (nd = rb_first(&session->machines); nd; nd = rb_next(nd)) {
1374 		struct machine *pos = rb_entry(nd, struct machine, rb_node);
1375 		ret |= machine__read_build_ids(pos, with_hits);
1376 	}
1377 
1378 	return ret;
1379 }
1380 
1381 static int do_write_feat(int fd, struct perf_header *h, int type,
1382 			 struct perf_file_section **p,
1383 			 struct perf_evlist *evlist)
1384 {
1385 	int err;
1386 	int ret = 0;
1387 
1388 	if (perf_header__has_feat(h, type)) {
1389 
1390 		(*p)->offset = lseek(fd, 0, SEEK_CUR);
1391 
1392 		err = feat_ops[type].write(fd, h, evlist);
1393 		if (err < 0) {
1394 			pr_debug("failed to write feature %d\n", type);
1395 
1396 			/* undo anything written */
1397 			lseek(fd, (*p)->offset, SEEK_SET);
1398 
1399 			return -1;
1400 		}
1401 		(*p)->size = lseek(fd, 0, SEEK_CUR) - (*p)->offset;
1402 		(*p)++;
1403 	}
1404 	return ret;
1405 }
1406 
1407 static int perf_header__adds_write(struct perf_header *header,
1408 				   struct perf_evlist *evlist, int fd)
1409 {
1410 	int nr_sections;
1411 	struct perf_session *session;
1412 	struct perf_file_section *feat_sec, *p;
1413 	int sec_size;
1414 	u64 sec_start;
1415 	int err;
1416 
1417 	session = container_of(header, struct perf_session, header);
1418 
1419 	if (perf_header__has_feat(header, HEADER_BUILD_ID &&
1420 	    !perf_session__read_build_ids(session, true)))
1421 		perf_header__clear_feat(header, HEADER_BUILD_ID);
1422 
1423 	nr_sections = bitmap_weight(header->adds_features, HEADER_FEAT_BITS);
1424 	if (!nr_sections)
1425 		return 0;
1426 
1427 	feat_sec = p = calloc(sizeof(*feat_sec), nr_sections);
1428 	if (feat_sec == NULL)
1429 		return -ENOMEM;
1430 
1431 	sec_size = sizeof(*feat_sec) * nr_sections;
1432 
1433 	sec_start = header->data_offset + header->data_size;
1434 	lseek(fd, sec_start + sec_size, SEEK_SET);
1435 
1436 	err = do_write_feat(fd, header, HEADER_TRACE_INFO, &p, evlist);
1437 	if (err)
1438 		goto out_free;
1439 
1440 	err = do_write_feat(fd, header, HEADER_BUILD_ID, &p, evlist);
1441 	if (err) {
1442 		perf_header__clear_feat(header, HEADER_BUILD_ID);
1443 		goto out_free;
1444 	}
1445 
1446 	err = do_write_feat(fd, header, HEADER_HOSTNAME, &p, evlist);
1447 	if (err)
1448 		perf_header__clear_feat(header, HEADER_HOSTNAME);
1449 
1450 	err = do_write_feat(fd, header, HEADER_OSRELEASE, &p, evlist);
1451 	if (err)
1452 		perf_header__clear_feat(header, HEADER_OSRELEASE);
1453 
1454 	err = do_write_feat(fd, header, HEADER_VERSION, &p, evlist);
1455 	if (err)
1456 		perf_header__clear_feat(header, HEADER_VERSION);
1457 
1458 	err = do_write_feat(fd, header, HEADER_ARCH, &p, evlist);
1459 	if (err)
1460 		perf_header__clear_feat(header, HEADER_ARCH);
1461 
1462 	err = do_write_feat(fd, header, HEADER_NRCPUS, &p, evlist);
1463 	if (err)
1464 		perf_header__clear_feat(header, HEADER_NRCPUS);
1465 
1466 	err = do_write_feat(fd, header, HEADER_CPUDESC, &p, evlist);
1467 	if (err)
1468 		perf_header__clear_feat(header, HEADER_CPUDESC);
1469 
1470 	err = do_write_feat(fd, header, HEADER_CPUID, &p, evlist);
1471 	if (err)
1472 		perf_header__clear_feat(header, HEADER_CPUID);
1473 
1474 	err = do_write_feat(fd, header, HEADER_TOTAL_MEM, &p, evlist);
1475 	if (err)
1476 		perf_header__clear_feat(header, HEADER_TOTAL_MEM);
1477 
1478 	err = do_write_feat(fd, header, HEADER_CMDLINE, &p, evlist);
1479 	if (err)
1480 		perf_header__clear_feat(header, HEADER_CMDLINE);
1481 
1482 	err = do_write_feat(fd, header, HEADER_EVENT_DESC, &p, evlist);
1483 	if (err)
1484 		perf_header__clear_feat(header, HEADER_EVENT_DESC);
1485 
1486 	err = do_write_feat(fd, header, HEADER_CPU_TOPOLOGY, &p, evlist);
1487 	if (err)
1488 		perf_header__clear_feat(header, HEADER_CPU_TOPOLOGY);
1489 
1490 	err = do_write_feat(fd, header, HEADER_NUMA_TOPOLOGY, &p, evlist);
1491 	if (err)
1492 		perf_header__clear_feat(header, HEADER_NUMA_TOPOLOGY);
1493 
1494 	lseek(fd, sec_start, SEEK_SET);
1495 	/*
1496 	 * may write more than needed due to dropped feature, but
1497 	 * this is okay, reader will skip the mising entries
1498 	 */
1499 	err = do_write(fd, feat_sec, sec_size);
1500 	if (err < 0)
1501 		pr_debug("failed to write feature section\n");
1502 out_free:
1503 	free(feat_sec);
1504 	return err;
1505 }
1506 
1507 int perf_header__write_pipe(int fd)
1508 {
1509 	struct perf_pipe_file_header f_header;
1510 	int err;
1511 
1512 	f_header = (struct perf_pipe_file_header){
1513 		.magic	   = PERF_MAGIC,
1514 		.size	   = sizeof(f_header),
1515 	};
1516 
1517 	err = do_write(fd, &f_header, sizeof(f_header));
1518 	if (err < 0) {
1519 		pr_debug("failed to write perf pipe header\n");
1520 		return err;
1521 	}
1522 
1523 	return 0;
1524 }
1525 
1526 int perf_session__write_header(struct perf_session *session,
1527 			       struct perf_evlist *evlist,
1528 			       int fd, bool at_exit)
1529 {
1530 	struct perf_file_header f_header;
1531 	struct perf_file_attr   f_attr;
1532 	struct perf_header *header = &session->header;
1533 	struct perf_evsel *attr, *pair = NULL;
1534 	int err;
1535 
1536 	lseek(fd, sizeof(f_header), SEEK_SET);
1537 
1538 	if (session->evlist != evlist)
1539 		pair = list_entry(session->evlist->entries.next, struct perf_evsel, node);
1540 
1541 	list_for_each_entry(attr, &evlist->entries, node) {
1542 		attr->id_offset = lseek(fd, 0, SEEK_CUR);
1543 		err = do_write(fd, attr->id, attr->ids * sizeof(u64));
1544 		if (err < 0) {
1545 out_err_write:
1546 			pr_debug("failed to write perf header\n");
1547 			return err;
1548 		}
1549 		if (session->evlist != evlist) {
1550 			err = do_write(fd, pair->id, pair->ids * sizeof(u64));
1551 			if (err < 0)
1552 				goto out_err_write;
1553 			attr->ids += pair->ids;
1554 			pair = list_entry(pair->node.next, struct perf_evsel, node);
1555 		}
1556 	}
1557 
1558 	header->attr_offset = lseek(fd, 0, SEEK_CUR);
1559 
1560 	list_for_each_entry(attr, &evlist->entries, node) {
1561 		f_attr = (struct perf_file_attr){
1562 			.attr = attr->attr,
1563 			.ids  = {
1564 				.offset = attr->id_offset,
1565 				.size   = attr->ids * sizeof(u64),
1566 			}
1567 		};
1568 		err = do_write(fd, &f_attr, sizeof(f_attr));
1569 		if (err < 0) {
1570 			pr_debug("failed to write perf header attribute\n");
1571 			return err;
1572 		}
1573 	}
1574 
1575 	header->event_offset = lseek(fd, 0, SEEK_CUR);
1576 	header->event_size = event_count * sizeof(struct perf_trace_event_type);
1577 	if (events) {
1578 		err = do_write(fd, events, header->event_size);
1579 		if (err < 0) {
1580 			pr_debug("failed to write perf header events\n");
1581 			return err;
1582 		}
1583 	}
1584 
1585 	header->data_offset = lseek(fd, 0, SEEK_CUR);
1586 
1587 	if (at_exit) {
1588 		err = perf_header__adds_write(header, evlist, fd);
1589 		if (err < 0)
1590 			return err;
1591 	}
1592 
1593 	f_header = (struct perf_file_header){
1594 		.magic	   = PERF_MAGIC,
1595 		.size	   = sizeof(f_header),
1596 		.attr_size = sizeof(f_attr),
1597 		.attrs = {
1598 			.offset = header->attr_offset,
1599 			.size   = evlist->nr_entries * sizeof(f_attr),
1600 		},
1601 		.data = {
1602 			.offset = header->data_offset,
1603 			.size	= header->data_size,
1604 		},
1605 		.event_types = {
1606 			.offset = header->event_offset,
1607 			.size	= header->event_size,
1608 		},
1609 	};
1610 
1611 	memcpy(&f_header.adds_features, &header->adds_features, sizeof(header->adds_features));
1612 
1613 	lseek(fd, 0, SEEK_SET);
1614 	err = do_write(fd, &f_header, sizeof(f_header));
1615 	if (err < 0) {
1616 		pr_debug("failed to write perf header\n");
1617 		return err;
1618 	}
1619 	lseek(fd, header->data_offset + header->data_size, SEEK_SET);
1620 
1621 	header->frozen = 1;
1622 	return 0;
1623 }
1624 
1625 static int perf_header__getbuffer64(struct perf_header *header,
1626 				    int fd, void *buf, size_t size)
1627 {
1628 	if (readn(fd, buf, size) <= 0)
1629 		return -1;
1630 
1631 	if (header->needs_swap)
1632 		mem_bswap_64(buf, size);
1633 
1634 	return 0;
1635 }
1636 
1637 int perf_header__process_sections(struct perf_header *header, int fd,
1638 				  void *data,
1639 				  int (*process)(struct perf_file_section *section,
1640 				  struct perf_header *ph,
1641 				  int feat, int fd, void *data))
1642 {
1643 	struct perf_file_section *feat_sec;
1644 	int nr_sections;
1645 	int sec_size;
1646 	int idx = 0;
1647 	int err = -1, feat = 1;
1648 
1649 	nr_sections = bitmap_weight(header->adds_features, HEADER_FEAT_BITS);
1650 	if (!nr_sections)
1651 		return 0;
1652 
1653 	feat_sec = calloc(sizeof(*feat_sec), nr_sections);
1654 	if (!feat_sec)
1655 		return -1;
1656 
1657 	sec_size = sizeof(*feat_sec) * nr_sections;
1658 
1659 	lseek(fd, header->data_offset + header->data_size, SEEK_SET);
1660 
1661 	if (perf_header__getbuffer64(header, fd, feat_sec, sec_size))
1662 		goto out_free;
1663 
1664 	err = 0;
1665 	while (idx < nr_sections && feat < HEADER_LAST_FEATURE) {
1666 		if (perf_header__has_feat(header, feat)) {
1667 			struct perf_file_section *sec = &feat_sec[idx++];
1668 
1669 			err = process(sec, header, feat, fd, data);
1670 			if (err < 0)
1671 				break;
1672 		}
1673 		++feat;
1674 	}
1675 out_free:
1676 	free(feat_sec);
1677 	return err;
1678 }
1679 
1680 int perf_file_header__read(struct perf_file_header *header,
1681 			   struct perf_header *ph, int fd)
1682 {
1683 	lseek(fd, 0, SEEK_SET);
1684 
1685 	if (readn(fd, header, sizeof(*header)) <= 0 ||
1686 	    memcmp(&header->magic, __perf_magic, sizeof(header->magic)))
1687 		return -1;
1688 
1689 	if (header->attr_size != sizeof(struct perf_file_attr)) {
1690 		u64 attr_size = bswap_64(header->attr_size);
1691 
1692 		if (attr_size != sizeof(struct perf_file_attr))
1693 			return -1;
1694 
1695 		mem_bswap_64(header, offsetof(struct perf_file_header,
1696 					    adds_features));
1697 		ph->needs_swap = true;
1698 	}
1699 
1700 	if (header->size != sizeof(*header)) {
1701 		/* Support the previous format */
1702 		if (header->size == offsetof(typeof(*header), adds_features))
1703 			bitmap_zero(header->adds_features, HEADER_FEAT_BITS);
1704 		else
1705 			return -1;
1706 	} else if (ph->needs_swap) {
1707 		unsigned int i;
1708 		/*
1709 		 * feature bitmap is declared as an array of unsigned longs --
1710 		 * not good since its size can differ between the host that
1711 		 * generated the data file and the host analyzing the file.
1712 		 *
1713 		 * We need to handle endianness, but we don't know the size of
1714 		 * the unsigned long where the file was generated. Take a best
1715 		 * guess at determining it: try 64-bit swap first (ie., file
1716 		 * created on a 64-bit host), and check if the hostname feature
1717 		 * bit is set (this feature bit is forced on as of fbe96f2).
1718 		 * If the bit is not, undo the 64-bit swap and try a 32-bit
1719 		 * swap. If the hostname bit is still not set (e.g., older data
1720 		 * file), punt and fallback to the original behavior --
1721 		 * clearing all feature bits and setting buildid.
1722 		 */
1723 		for (i = 0; i < BITS_TO_LONGS(HEADER_FEAT_BITS); ++i)
1724 			header->adds_features[i] = bswap_64(header->adds_features[i]);
1725 
1726 		if (!test_bit(HEADER_HOSTNAME, header->adds_features)) {
1727 			for (i = 0; i < BITS_TO_LONGS(HEADER_FEAT_BITS); ++i) {
1728 				header->adds_features[i] = bswap_64(header->adds_features[i]);
1729 				header->adds_features[i] = bswap_32(header->adds_features[i]);
1730 			}
1731 		}
1732 
1733 		if (!test_bit(HEADER_HOSTNAME, header->adds_features)) {
1734 			bitmap_zero(header->adds_features, HEADER_FEAT_BITS);
1735 			set_bit(HEADER_BUILD_ID, header->adds_features);
1736 		}
1737 	}
1738 
1739 	memcpy(&ph->adds_features, &header->adds_features,
1740 	       sizeof(ph->adds_features));
1741 
1742 	ph->event_offset = header->event_types.offset;
1743 	ph->event_size   = header->event_types.size;
1744 	ph->data_offset  = header->data.offset;
1745 	ph->data_size	 = header->data.size;
1746 	return 0;
1747 }
1748 
1749 static int __event_process_build_id(struct build_id_event *bev,
1750 				    char *filename,
1751 				    struct perf_session *session)
1752 {
1753 	int err = -1;
1754 	struct list_head *head;
1755 	struct machine *machine;
1756 	u16 misc;
1757 	struct dso *dso;
1758 	enum dso_kernel_type dso_type;
1759 
1760 	machine = perf_session__findnew_machine(session, bev->pid);
1761 	if (!machine)
1762 		goto out;
1763 
1764 	misc = bev->header.misc & PERF_RECORD_MISC_CPUMODE_MASK;
1765 
1766 	switch (misc) {
1767 	case PERF_RECORD_MISC_KERNEL:
1768 		dso_type = DSO_TYPE_KERNEL;
1769 		head = &machine->kernel_dsos;
1770 		break;
1771 	case PERF_RECORD_MISC_GUEST_KERNEL:
1772 		dso_type = DSO_TYPE_GUEST_KERNEL;
1773 		head = &machine->kernel_dsos;
1774 		break;
1775 	case PERF_RECORD_MISC_USER:
1776 	case PERF_RECORD_MISC_GUEST_USER:
1777 		dso_type = DSO_TYPE_USER;
1778 		head = &machine->user_dsos;
1779 		break;
1780 	default:
1781 		goto out;
1782 	}
1783 
1784 	dso = __dsos__findnew(head, filename);
1785 	if (dso != NULL) {
1786 		char sbuild_id[BUILD_ID_SIZE * 2 + 1];
1787 
1788 		dso__set_build_id(dso, &bev->build_id);
1789 
1790 		if (filename[0] == '[')
1791 			dso->kernel = dso_type;
1792 
1793 		build_id__sprintf(dso->build_id, sizeof(dso->build_id),
1794 				  sbuild_id);
1795 		pr_debug("build id event received for %s: %s\n",
1796 			 dso->long_name, sbuild_id);
1797 	}
1798 
1799 	err = 0;
1800 out:
1801 	return err;
1802 }
1803 
1804 static int perf_header__read_build_ids_abi_quirk(struct perf_header *header,
1805 						 int input, u64 offset, u64 size)
1806 {
1807 	struct perf_session *session = container_of(header, struct perf_session, header);
1808 	struct {
1809 		struct perf_event_header   header;
1810 		u8			   build_id[ALIGN(BUILD_ID_SIZE, sizeof(u64))];
1811 		char			   filename[0];
1812 	} old_bev;
1813 	struct build_id_event bev;
1814 	char filename[PATH_MAX];
1815 	u64 limit = offset + size;
1816 
1817 	while (offset < limit) {
1818 		ssize_t len;
1819 
1820 		if (read(input, &old_bev, sizeof(old_bev)) != sizeof(old_bev))
1821 			return -1;
1822 
1823 		if (header->needs_swap)
1824 			perf_event_header__bswap(&old_bev.header);
1825 
1826 		len = old_bev.header.size - sizeof(old_bev);
1827 		if (read(input, filename, len) != len)
1828 			return -1;
1829 
1830 		bev.header = old_bev.header;
1831 
1832 		/*
1833 		 * As the pid is the missing value, we need to fill
1834 		 * it properly. The header.misc value give us nice hint.
1835 		 */
1836 		bev.pid	= HOST_KERNEL_ID;
1837 		if (bev.header.misc == PERF_RECORD_MISC_GUEST_USER ||
1838 		    bev.header.misc == PERF_RECORD_MISC_GUEST_KERNEL)
1839 			bev.pid	= DEFAULT_GUEST_KERNEL_ID;
1840 
1841 		memcpy(bev.build_id, old_bev.build_id, sizeof(bev.build_id));
1842 		__event_process_build_id(&bev, filename, session);
1843 
1844 		offset += bev.header.size;
1845 	}
1846 
1847 	return 0;
1848 }
1849 
1850 static int perf_header__read_build_ids(struct perf_header *header,
1851 				       int input, u64 offset, u64 size)
1852 {
1853 	struct perf_session *session = container_of(header, struct perf_session, header);
1854 	struct build_id_event bev;
1855 	char filename[PATH_MAX];
1856 	u64 limit = offset + size, orig_offset = offset;
1857 	int err = -1;
1858 
1859 	while (offset < limit) {
1860 		ssize_t len;
1861 
1862 		if (read(input, &bev, sizeof(bev)) != sizeof(bev))
1863 			goto out;
1864 
1865 		if (header->needs_swap)
1866 			perf_event_header__bswap(&bev.header);
1867 
1868 		len = bev.header.size - sizeof(bev);
1869 		if (read(input, filename, len) != len)
1870 			goto out;
1871 		/*
1872 		 * The a1645ce1 changeset:
1873 		 *
1874 		 * "perf: 'perf kvm' tool for monitoring guest performance from host"
1875 		 *
1876 		 * Added a field to struct build_id_event that broke the file
1877 		 * format.
1878 		 *
1879 		 * Since the kernel build-id is the first entry, process the
1880 		 * table using the old format if the well known
1881 		 * '[kernel.kallsyms]' string for the kernel build-id has the
1882 		 * first 4 characters chopped off (where the pid_t sits).
1883 		 */
1884 		if (memcmp(filename, "nel.kallsyms]", 13) == 0) {
1885 			if (lseek(input, orig_offset, SEEK_SET) == (off_t)-1)
1886 				return -1;
1887 			return perf_header__read_build_ids_abi_quirk(header, input, offset, size);
1888 		}
1889 
1890 		__event_process_build_id(&bev, filename, session);
1891 
1892 		offset += bev.header.size;
1893 	}
1894 	err = 0;
1895 out:
1896 	return err;
1897 }
1898 
1899 static int perf_file_section__process(struct perf_file_section *section,
1900 				      struct perf_header *ph,
1901 				      int feat, int fd, void *data __used)
1902 {
1903 	if (lseek(fd, section->offset, SEEK_SET) == (off_t)-1) {
1904 		pr_debug("Failed to lseek to %" PRIu64 " offset for feature "
1905 			  "%d, continuing...\n", section->offset, feat);
1906 		return 0;
1907 	}
1908 
1909 	switch (feat) {
1910 	case HEADER_TRACE_INFO:
1911 		trace_report(fd, false);
1912 		break;
1913 
1914 	case HEADER_BUILD_ID:
1915 		if (perf_header__read_build_ids(ph, fd, section->offset, section->size))
1916 			pr_debug("Failed to read buildids, continuing...\n");
1917 		break;
1918 
1919 	case HEADER_HOSTNAME:
1920 	case HEADER_OSRELEASE:
1921 	case HEADER_VERSION:
1922 	case HEADER_ARCH:
1923 	case HEADER_NRCPUS:
1924 	case HEADER_CPUDESC:
1925 	case HEADER_CPUID:
1926 	case HEADER_TOTAL_MEM:
1927 	case HEADER_CMDLINE:
1928 	case HEADER_EVENT_DESC:
1929 	case HEADER_CPU_TOPOLOGY:
1930 	case HEADER_NUMA_TOPOLOGY:
1931 		break;
1932 
1933 	default:
1934 		pr_debug("unknown feature %d, continuing...\n", feat);
1935 	}
1936 
1937 	return 0;
1938 }
1939 
1940 static int perf_file_header__read_pipe(struct perf_pipe_file_header *header,
1941 				       struct perf_header *ph, int fd,
1942 				       bool repipe)
1943 {
1944 	if (readn(fd, header, sizeof(*header)) <= 0 ||
1945 	    memcmp(&header->magic, __perf_magic, sizeof(header->magic)))
1946 		return -1;
1947 
1948 	if (repipe && do_write(STDOUT_FILENO, header, sizeof(*header)) < 0)
1949 		return -1;
1950 
1951 	if (header->size != sizeof(*header)) {
1952 		u64 size = bswap_64(header->size);
1953 
1954 		if (size != sizeof(*header))
1955 			return -1;
1956 
1957 		ph->needs_swap = true;
1958 	}
1959 
1960 	return 0;
1961 }
1962 
1963 static int perf_header__read_pipe(struct perf_session *session, int fd)
1964 {
1965 	struct perf_header *header = &session->header;
1966 	struct perf_pipe_file_header f_header;
1967 
1968 	if (perf_file_header__read_pipe(&f_header, header, fd,
1969 					session->repipe) < 0) {
1970 		pr_debug("incompatible file format\n");
1971 		return -EINVAL;
1972 	}
1973 
1974 	session->fd = fd;
1975 
1976 	return 0;
1977 }
1978 
1979 int perf_session__read_header(struct perf_session *session, int fd)
1980 {
1981 	struct perf_header *header = &session->header;
1982 	struct perf_file_header	f_header;
1983 	struct perf_file_attr	f_attr;
1984 	u64			f_id;
1985 	int nr_attrs, nr_ids, i, j;
1986 
1987 	session->evlist = perf_evlist__new(NULL, NULL);
1988 	if (session->evlist == NULL)
1989 		return -ENOMEM;
1990 
1991 	if (session->fd_pipe)
1992 		return perf_header__read_pipe(session, fd);
1993 
1994 	if (perf_file_header__read(&f_header, header, fd) < 0) {
1995 		pr_debug("incompatible file format\n");
1996 		return -EINVAL;
1997 	}
1998 
1999 	nr_attrs = f_header.attrs.size / sizeof(f_attr);
2000 	lseek(fd, f_header.attrs.offset, SEEK_SET);
2001 
2002 	for (i = 0; i < nr_attrs; i++) {
2003 		struct perf_evsel *evsel;
2004 		off_t tmp;
2005 
2006 		if (readn(fd, &f_attr, sizeof(f_attr)) <= 0)
2007 			goto out_errno;
2008 
2009 		if (header->needs_swap)
2010 			perf_event__attr_swap(&f_attr.attr);
2011 
2012 		tmp = lseek(fd, 0, SEEK_CUR);
2013 		evsel = perf_evsel__new(&f_attr.attr, i);
2014 
2015 		if (evsel == NULL)
2016 			goto out_delete_evlist;
2017 		/*
2018 		 * Do it before so that if perf_evsel__alloc_id fails, this
2019 		 * entry gets purged too at perf_evlist__delete().
2020 		 */
2021 		perf_evlist__add(session->evlist, evsel);
2022 
2023 		nr_ids = f_attr.ids.size / sizeof(u64);
2024 		/*
2025 		 * We don't have the cpu and thread maps on the header, so
2026 		 * for allocating the perf_sample_id table we fake 1 cpu and
2027 		 * hattr->ids threads.
2028 		 */
2029 		if (perf_evsel__alloc_id(evsel, 1, nr_ids))
2030 			goto out_delete_evlist;
2031 
2032 		lseek(fd, f_attr.ids.offset, SEEK_SET);
2033 
2034 		for (j = 0; j < nr_ids; j++) {
2035 			if (perf_header__getbuffer64(header, fd, &f_id, sizeof(f_id)))
2036 				goto out_errno;
2037 
2038 			perf_evlist__id_add(session->evlist, evsel, 0, j, f_id);
2039 		}
2040 
2041 		lseek(fd, tmp, SEEK_SET);
2042 	}
2043 
2044 	if (f_header.event_types.size) {
2045 		lseek(fd, f_header.event_types.offset, SEEK_SET);
2046 		events = malloc(f_header.event_types.size);
2047 		if (events == NULL)
2048 			return -ENOMEM;
2049 		if (perf_header__getbuffer64(header, fd, events,
2050 					     f_header.event_types.size))
2051 			goto out_errno;
2052 		event_count =  f_header.event_types.size / sizeof(struct perf_trace_event_type);
2053 	}
2054 
2055 	perf_header__process_sections(header, fd, NULL,
2056 				      perf_file_section__process);
2057 
2058 	lseek(fd, header->data_offset, SEEK_SET);
2059 
2060 	header->frozen = 1;
2061 	return 0;
2062 out_errno:
2063 	return -errno;
2064 
2065 out_delete_evlist:
2066 	perf_evlist__delete(session->evlist);
2067 	session->evlist = NULL;
2068 	return -ENOMEM;
2069 }
2070 
2071 int perf_event__synthesize_attr(struct perf_event_attr *attr, u16 ids, u64 *id,
2072 				perf_event__handler_t process,
2073 				struct perf_session *session)
2074 {
2075 	union perf_event *ev;
2076 	size_t size;
2077 	int err;
2078 
2079 	size = sizeof(struct perf_event_attr);
2080 	size = ALIGN(size, sizeof(u64));
2081 	size += sizeof(struct perf_event_header);
2082 	size += ids * sizeof(u64);
2083 
2084 	ev = malloc(size);
2085 
2086 	if (ev == NULL)
2087 		return -ENOMEM;
2088 
2089 	ev->attr.attr = *attr;
2090 	memcpy(ev->attr.id, id, ids * sizeof(u64));
2091 
2092 	ev->attr.header.type = PERF_RECORD_HEADER_ATTR;
2093 	ev->attr.header.size = size;
2094 
2095 	err = process(ev, NULL, session);
2096 
2097 	free(ev);
2098 
2099 	return err;
2100 }
2101 
2102 int perf_session__synthesize_attrs(struct perf_session *session,
2103 				   perf_event__handler_t process)
2104 {
2105 	struct perf_evsel *attr;
2106 	int err = 0;
2107 
2108 	list_for_each_entry(attr, &session->evlist->entries, node) {
2109 		err = perf_event__synthesize_attr(&attr->attr, attr->ids,
2110 						  attr->id, process, session);
2111 		if (err) {
2112 			pr_debug("failed to create perf header attribute\n");
2113 			return err;
2114 		}
2115 	}
2116 
2117 	return err;
2118 }
2119 
2120 int perf_event__process_attr(union perf_event *event,
2121 			     struct perf_session *session)
2122 {
2123 	unsigned int i, ids, n_ids;
2124 	struct perf_evsel *evsel;
2125 
2126 	if (session->evlist == NULL) {
2127 		session->evlist = perf_evlist__new(NULL, NULL);
2128 		if (session->evlist == NULL)
2129 			return -ENOMEM;
2130 	}
2131 
2132 	evsel = perf_evsel__new(&event->attr.attr,
2133 				session->evlist->nr_entries);
2134 	if (evsel == NULL)
2135 		return -ENOMEM;
2136 
2137 	perf_evlist__add(session->evlist, evsel);
2138 
2139 	ids = event->header.size;
2140 	ids -= (void *)&event->attr.id - (void *)event;
2141 	n_ids = ids / sizeof(u64);
2142 	/*
2143 	 * We don't have the cpu and thread maps on the header, so
2144 	 * for allocating the perf_sample_id table we fake 1 cpu and
2145 	 * hattr->ids threads.
2146 	 */
2147 	if (perf_evsel__alloc_id(evsel, 1, n_ids))
2148 		return -ENOMEM;
2149 
2150 	for (i = 0; i < n_ids; i++) {
2151 		perf_evlist__id_add(session->evlist, evsel, 0, i,
2152 				    event->attr.id[i]);
2153 	}
2154 
2155 	perf_session__update_sample_type(session);
2156 
2157 	return 0;
2158 }
2159 
2160 int perf_event__synthesize_event_type(u64 event_id, char *name,
2161 				      perf_event__handler_t process,
2162 				      struct perf_session *session)
2163 {
2164 	union perf_event ev;
2165 	size_t size = 0;
2166 	int err = 0;
2167 
2168 	memset(&ev, 0, sizeof(ev));
2169 
2170 	ev.event_type.event_type.event_id = event_id;
2171 	memset(ev.event_type.event_type.name, 0, MAX_EVENT_NAME);
2172 	strncpy(ev.event_type.event_type.name, name, MAX_EVENT_NAME - 1);
2173 
2174 	ev.event_type.header.type = PERF_RECORD_HEADER_EVENT_TYPE;
2175 	size = strlen(name);
2176 	size = ALIGN(size, sizeof(u64));
2177 	ev.event_type.header.size = sizeof(ev.event_type) -
2178 		(sizeof(ev.event_type.event_type.name) - size);
2179 
2180 	err = process(&ev, NULL, session);
2181 
2182 	return err;
2183 }
2184 
2185 int perf_event__synthesize_event_types(perf_event__handler_t process,
2186 				       struct perf_session *session)
2187 {
2188 	struct perf_trace_event_type *type;
2189 	int i, err = 0;
2190 
2191 	for (i = 0; i < event_count; i++) {
2192 		type = &events[i];
2193 
2194 		err = perf_event__synthesize_event_type(type->event_id,
2195 							type->name, process,
2196 							session);
2197 		if (err) {
2198 			pr_debug("failed to create perf header event type\n");
2199 			return err;
2200 		}
2201 	}
2202 
2203 	return err;
2204 }
2205 
2206 int perf_event__process_event_type(union perf_event *event,
2207 				   struct perf_session *session __unused)
2208 {
2209 	if (perf_header__push_event(event->event_type.event_type.event_id,
2210 				    event->event_type.event_type.name) < 0)
2211 		return -ENOMEM;
2212 
2213 	return 0;
2214 }
2215 
2216 int perf_event__synthesize_tracing_data(int fd, struct perf_evlist *evlist,
2217 					 perf_event__handler_t process,
2218 				   struct perf_session *session __unused)
2219 {
2220 	union perf_event ev;
2221 	struct tracing_data *tdata;
2222 	ssize_t size = 0, aligned_size = 0, padding;
2223 	int err __used = 0;
2224 
2225 	/*
2226 	 * We are going to store the size of the data followed
2227 	 * by the data contents. Since the fd descriptor is a pipe,
2228 	 * we cannot seek back to store the size of the data once
2229 	 * we know it. Instead we:
2230 	 *
2231 	 * - write the tracing data to the temp file
2232 	 * - get/write the data size to pipe
2233 	 * - write the tracing data from the temp file
2234 	 *   to the pipe
2235 	 */
2236 	tdata = tracing_data_get(&evlist->entries, fd, true);
2237 	if (!tdata)
2238 		return -1;
2239 
2240 	memset(&ev, 0, sizeof(ev));
2241 
2242 	ev.tracing_data.header.type = PERF_RECORD_HEADER_TRACING_DATA;
2243 	size = tdata->size;
2244 	aligned_size = ALIGN(size, sizeof(u64));
2245 	padding = aligned_size - size;
2246 	ev.tracing_data.header.size = sizeof(ev.tracing_data);
2247 	ev.tracing_data.size = aligned_size;
2248 
2249 	process(&ev, NULL, session);
2250 
2251 	/*
2252 	 * The put function will copy all the tracing data
2253 	 * stored in temp file to the pipe.
2254 	 */
2255 	tracing_data_put(tdata);
2256 
2257 	write_padded(fd, NULL, 0, padding);
2258 
2259 	return aligned_size;
2260 }
2261 
2262 int perf_event__process_tracing_data(union perf_event *event,
2263 				     struct perf_session *session)
2264 {
2265 	ssize_t size_read, padding, size = event->tracing_data.size;
2266 	off_t offset = lseek(session->fd, 0, SEEK_CUR);
2267 	char buf[BUFSIZ];
2268 
2269 	/* setup for reading amidst mmap */
2270 	lseek(session->fd, offset + sizeof(struct tracing_data_event),
2271 	      SEEK_SET);
2272 
2273 	size_read = trace_report(session->fd, session->repipe);
2274 
2275 	padding = ALIGN(size_read, sizeof(u64)) - size_read;
2276 
2277 	if (read(session->fd, buf, padding) < 0)
2278 		die("reading input file");
2279 	if (session->repipe) {
2280 		int retw = write(STDOUT_FILENO, buf, padding);
2281 		if (retw <= 0 || retw != padding)
2282 			die("repiping tracing data padding");
2283 	}
2284 
2285 	if (size_read + padding != size)
2286 		die("tracing data size mismatch");
2287 
2288 	return size_read + padding;
2289 }
2290 
2291 int perf_event__synthesize_build_id(struct dso *pos, u16 misc,
2292 				    perf_event__handler_t process,
2293 				    struct machine *machine,
2294 				    struct perf_session *session)
2295 {
2296 	union perf_event ev;
2297 	size_t len;
2298 	int err = 0;
2299 
2300 	if (!pos->hit)
2301 		return err;
2302 
2303 	memset(&ev, 0, sizeof(ev));
2304 
2305 	len = pos->long_name_len + 1;
2306 	len = ALIGN(len, NAME_ALIGN);
2307 	memcpy(&ev.build_id.build_id, pos->build_id, sizeof(pos->build_id));
2308 	ev.build_id.header.type = PERF_RECORD_HEADER_BUILD_ID;
2309 	ev.build_id.header.misc = misc;
2310 	ev.build_id.pid = machine->pid;
2311 	ev.build_id.header.size = sizeof(ev.build_id) + len;
2312 	memcpy(&ev.build_id.filename, pos->long_name, pos->long_name_len);
2313 
2314 	err = process(&ev, NULL, session);
2315 
2316 	return err;
2317 }
2318 
2319 int perf_event__process_build_id(union perf_event *event,
2320 				 struct perf_session *session)
2321 {
2322 	__event_process_build_id(&event->build_id,
2323 				 event->build_id.filename,
2324 				 session);
2325 	return 0;
2326 }
2327 
2328 void disable_buildid_cache(void)
2329 {
2330 	no_buildid_cache = true;
2331 }
2332