1 // SPDX-License-Identifier: GPL-2.0-only
2 
3 #include "util/cgroup.h"
4 #include "util/data.h"
5 #include "util/debug.h"
6 #include "util/dso.h"
7 #include "util/event.h"
8 #include "util/evlist.h"
9 #include "util/machine.h"
10 #include "util/map.h"
11 #include "util/map_symbol.h"
12 #include "util/branch.h"
13 #include "util/memswap.h"
14 #include "util/namespaces.h"
15 #include "util/session.h"
16 #include "util/stat.h"
17 #include "util/symbol.h"
18 #include "util/synthetic-events.h"
19 #include "util/target.h"
20 #include "util/time-utils.h"
21 #include <linux/bitops.h>
22 #include <linux/kernel.h>
23 #include <linux/string.h>
24 #include <linux/zalloc.h>
25 #include <linux/perf_event.h>
26 #include <asm/bug.h>
27 #include <perf/evsel.h>
28 #include <perf/cpumap.h>
29 #include <internal/lib.h> // page_size
30 #include <internal/threadmap.h>
31 #include <perf/threadmap.h>
32 #include <symbol/kallsyms.h>
33 #include <dirent.h>
34 #include <errno.h>
35 #include <inttypes.h>
36 #include <stdio.h>
37 #include <string.h>
38 #include <uapi/linux/mman.h> /* To get things like MAP_HUGETLB even on older libc headers */
39 #include <api/fs/fs.h>
40 #include <api/io.h>
41 #include <sys/types.h>
42 #include <sys/stat.h>
43 #include <fcntl.h>
44 #include <unistd.h>
45 
46 #define DEFAULT_PROC_MAP_PARSE_TIMEOUT 500
47 
48 unsigned int proc_map_timeout = DEFAULT_PROC_MAP_PARSE_TIMEOUT;
49 
50 int perf_tool__process_synth_event(struct perf_tool *tool,
51 				   union perf_event *event,
52 				   struct machine *machine,
53 				   perf_event__handler_t process)
54 {
55 	struct perf_sample synth_sample = {
56 		.pid	   = -1,
57 		.tid	   = -1,
58 		.time	   = -1,
59 		.stream_id = -1,
60 		.cpu	   = -1,
61 		.period	   = 1,
62 		.cpumode   = event->header.misc & PERF_RECORD_MISC_CPUMODE_MASK,
63 	};
64 
65 	return process(tool, event, &synth_sample, machine);
66 };
67 
68 /*
69  * Assumes that the first 4095 bytes of /proc/pid/stat contains
70  * the comm, tgid and ppid.
71  */
72 static int perf_event__get_comm_ids(pid_t pid, pid_t tid, char *comm, size_t len,
73 				    pid_t *tgid, pid_t *ppid, bool *kernel)
74 {
75 	char bf[4096];
76 	int fd;
77 	size_t size = 0;
78 	ssize_t n;
79 	char *name, *tgids, *ppids, *vmpeak, *threads;
80 
81 	*tgid = -1;
82 	*ppid = -1;
83 
84 	if (pid)
85 		snprintf(bf, sizeof(bf), "/proc/%d/task/%d/status", pid, tid);
86 	else
87 		snprintf(bf, sizeof(bf), "/proc/%d/status", tid);
88 
89 	fd = open(bf, O_RDONLY);
90 	if (fd < 0) {
91 		pr_debug("couldn't open %s\n", bf);
92 		return -1;
93 	}
94 
95 	n = read(fd, bf, sizeof(bf) - 1);
96 	close(fd);
97 	if (n <= 0) {
98 		pr_warning("Couldn't get COMM, tigd and ppid for pid %d\n",
99 			   tid);
100 		return -1;
101 	}
102 	bf[n] = '\0';
103 
104 	name = strstr(bf, "Name:");
105 	tgids = strstr(name ?: bf, "Tgid:");
106 	ppids = strstr(tgids ?: bf, "PPid:");
107 	vmpeak = strstr(ppids ?: bf, "VmPeak:");
108 
109 	if (vmpeak)
110 		threads = NULL;
111 	else
112 		threads = strstr(ppids ?: bf, "Threads:");
113 
114 	if (name) {
115 		char *nl;
116 
117 		name = skip_spaces(name + 5);  /* strlen("Name:") */
118 		nl = strchr(name, '\n');
119 		if (nl)
120 			*nl = '\0';
121 
122 		size = strlen(name);
123 		if (size >= len)
124 			size = len - 1;
125 		memcpy(comm, name, size);
126 		comm[size] = '\0';
127 	} else {
128 		pr_debug("Name: string not found for pid %d\n", tid);
129 	}
130 
131 	if (tgids) {
132 		tgids += 5;  /* strlen("Tgid:") */
133 		*tgid = atoi(tgids);
134 	} else {
135 		pr_debug("Tgid: string not found for pid %d\n", tid);
136 	}
137 
138 	if (ppids) {
139 		ppids += 5;  /* strlen("PPid:") */
140 		*ppid = atoi(ppids);
141 	} else {
142 		pr_debug("PPid: string not found for pid %d\n", tid);
143 	}
144 
145 	if (!vmpeak && threads)
146 		*kernel = true;
147 	else
148 		*kernel = false;
149 
150 	return 0;
151 }
152 
153 static int perf_event__prepare_comm(union perf_event *event, pid_t pid, pid_t tid,
154 				    struct machine *machine,
155 				    pid_t *tgid, pid_t *ppid, bool *kernel)
156 {
157 	size_t size;
158 
159 	*ppid = -1;
160 
161 	memset(&event->comm, 0, sizeof(event->comm));
162 
163 	if (machine__is_host(machine)) {
164 		if (perf_event__get_comm_ids(pid, tid, event->comm.comm,
165 					     sizeof(event->comm.comm),
166 					     tgid, ppid, kernel) != 0) {
167 			return -1;
168 		}
169 	} else {
170 		*tgid = machine->pid;
171 	}
172 
173 	if (*tgid < 0)
174 		return -1;
175 
176 	event->comm.pid = *tgid;
177 	event->comm.header.type = PERF_RECORD_COMM;
178 
179 	size = strlen(event->comm.comm) + 1;
180 	size = PERF_ALIGN(size, sizeof(u64));
181 	memset(event->comm.comm + size, 0, machine->id_hdr_size);
182 	event->comm.header.size = (sizeof(event->comm) -
183 				(sizeof(event->comm.comm) - size) +
184 				machine->id_hdr_size);
185 	event->comm.tid = tid;
186 
187 	return 0;
188 }
189 
190 pid_t perf_event__synthesize_comm(struct perf_tool *tool,
191 					 union perf_event *event, pid_t pid,
192 					 perf_event__handler_t process,
193 					 struct machine *machine)
194 {
195 	pid_t tgid, ppid;
196 	bool kernel_thread;
197 
198 	if (perf_event__prepare_comm(event, 0, pid, machine, &tgid, &ppid,
199 				     &kernel_thread) != 0)
200 		return -1;
201 
202 	if (perf_tool__process_synth_event(tool, event, machine, process) != 0)
203 		return -1;
204 
205 	return tgid;
206 }
207 
208 static void perf_event__get_ns_link_info(pid_t pid, const char *ns,
209 					 struct perf_ns_link_info *ns_link_info)
210 {
211 	struct stat64 st;
212 	char proc_ns[128];
213 
214 	sprintf(proc_ns, "/proc/%u/ns/%s", pid, ns);
215 	if (stat64(proc_ns, &st) == 0) {
216 		ns_link_info->dev = st.st_dev;
217 		ns_link_info->ino = st.st_ino;
218 	}
219 }
220 
221 int perf_event__synthesize_namespaces(struct perf_tool *tool,
222 				      union perf_event *event,
223 				      pid_t pid, pid_t tgid,
224 				      perf_event__handler_t process,
225 				      struct machine *machine)
226 {
227 	u32 idx;
228 	struct perf_ns_link_info *ns_link_info;
229 
230 	if (!tool || !tool->namespace_events)
231 		return 0;
232 
233 	memset(&event->namespaces, 0, (sizeof(event->namespaces) +
234 	       (NR_NAMESPACES * sizeof(struct perf_ns_link_info)) +
235 	       machine->id_hdr_size));
236 
237 	event->namespaces.pid = tgid;
238 	event->namespaces.tid = pid;
239 
240 	event->namespaces.nr_namespaces = NR_NAMESPACES;
241 
242 	ns_link_info = event->namespaces.link_info;
243 
244 	for (idx = 0; idx < event->namespaces.nr_namespaces; idx++)
245 		perf_event__get_ns_link_info(pid, perf_ns__name(idx),
246 					     &ns_link_info[idx]);
247 
248 	event->namespaces.header.type = PERF_RECORD_NAMESPACES;
249 
250 	event->namespaces.header.size = (sizeof(event->namespaces) +
251 			(NR_NAMESPACES * sizeof(struct perf_ns_link_info)) +
252 			machine->id_hdr_size);
253 
254 	if (perf_tool__process_synth_event(tool, event, machine, process) != 0)
255 		return -1;
256 
257 	return 0;
258 }
259 
260 static int perf_event__synthesize_fork(struct perf_tool *tool,
261 				       union perf_event *event,
262 				       pid_t pid, pid_t tgid, pid_t ppid,
263 				       perf_event__handler_t process,
264 				       struct machine *machine)
265 {
266 	memset(&event->fork, 0, sizeof(event->fork) + machine->id_hdr_size);
267 
268 	/*
269 	 * for main thread set parent to ppid from status file. For other
270 	 * threads set parent pid to main thread. ie., assume main thread
271 	 * spawns all threads in a process
272 	*/
273 	if (tgid == pid) {
274 		event->fork.ppid = ppid;
275 		event->fork.ptid = ppid;
276 	} else {
277 		event->fork.ppid = tgid;
278 		event->fork.ptid = tgid;
279 	}
280 	event->fork.pid  = tgid;
281 	event->fork.tid  = pid;
282 	event->fork.header.type = PERF_RECORD_FORK;
283 	event->fork.header.misc = PERF_RECORD_MISC_FORK_EXEC;
284 
285 	event->fork.header.size = (sizeof(event->fork) + machine->id_hdr_size);
286 
287 	if (perf_tool__process_synth_event(tool, event, machine, process) != 0)
288 		return -1;
289 
290 	return 0;
291 }
292 
293 static bool read_proc_maps_line(struct io *io, __u64 *start, __u64 *end,
294 				u32 *prot, u32 *flags, __u64 *offset,
295 				u32 *maj, u32 *min,
296 				__u64 *inode,
297 				ssize_t pathname_size, char *pathname)
298 {
299 	__u64 temp;
300 	int ch;
301 	char *start_pathname = pathname;
302 
303 	if (io__get_hex(io, start) != '-')
304 		return false;
305 	if (io__get_hex(io, end) != ' ')
306 		return false;
307 
308 	/* map protection and flags bits */
309 	*prot = 0;
310 	ch = io__get_char(io);
311 	if (ch == 'r')
312 		*prot |= PROT_READ;
313 	else if (ch != '-')
314 		return false;
315 	ch = io__get_char(io);
316 	if (ch == 'w')
317 		*prot |= PROT_WRITE;
318 	else if (ch != '-')
319 		return false;
320 	ch = io__get_char(io);
321 	if (ch == 'x')
322 		*prot |= PROT_EXEC;
323 	else if (ch != '-')
324 		return false;
325 	ch = io__get_char(io);
326 	if (ch == 's')
327 		*flags = MAP_SHARED;
328 	else if (ch == 'p')
329 		*flags = MAP_PRIVATE;
330 	else
331 		return false;
332 	if (io__get_char(io) != ' ')
333 		return false;
334 
335 	if (io__get_hex(io, offset) != ' ')
336 		return false;
337 
338 	if (io__get_hex(io, &temp) != ':')
339 		return false;
340 	*maj = temp;
341 	if (io__get_hex(io, &temp) != ' ')
342 		return false;
343 	*min = temp;
344 
345 	ch = io__get_dec(io, inode);
346 	if (ch != ' ') {
347 		*pathname = '\0';
348 		return ch == '\n';
349 	}
350 	do {
351 		ch = io__get_char(io);
352 	} while (ch == ' ');
353 	while (true) {
354 		if (ch < 0)
355 			return false;
356 		if (ch == '\0' || ch == '\n' ||
357 		    (pathname + 1 - start_pathname) >= pathname_size) {
358 			*pathname = '\0';
359 			return true;
360 		}
361 		*pathname++ = ch;
362 		ch = io__get_char(io);
363 	}
364 }
365 
366 static void perf_record_mmap2__read_build_id(struct perf_record_mmap2 *event,
367 					     bool is_kernel)
368 {
369 	struct build_id bid;
370 	struct nsinfo *nsi;
371 	struct nscookie nc;
372 	int rc;
373 
374 	if (is_kernel) {
375 		rc = sysfs__read_build_id("/sys/kernel/notes", &bid);
376 		goto out;
377 	}
378 
379 	nsi = nsinfo__new(event->pid);
380 	nsinfo__mountns_enter(nsi, &nc);
381 
382 	rc = filename__read_build_id(event->filename, &bid) > 0 ? 0 : -1;
383 
384 	nsinfo__mountns_exit(&nc);
385 	nsinfo__put(nsi);
386 
387 out:
388 	if (rc == 0) {
389 		memcpy(event->build_id, bid.data, sizeof(bid.data));
390 		event->build_id_size = (u8) bid.size;
391 		event->header.misc |= PERF_RECORD_MISC_MMAP_BUILD_ID;
392 		event->__reserved_1 = 0;
393 		event->__reserved_2 = 0;
394 	} else {
395 		if (event->filename[0] == '/') {
396 			pr_debug2("Failed to read build ID for %s\n",
397 				  event->filename);
398 		}
399 	}
400 }
401 
402 int perf_event__synthesize_mmap_events(struct perf_tool *tool,
403 				       union perf_event *event,
404 				       pid_t pid, pid_t tgid,
405 				       perf_event__handler_t process,
406 				       struct machine *machine,
407 				       bool mmap_data)
408 {
409 	unsigned long long t;
410 	char bf[BUFSIZ];
411 	struct io io;
412 	bool truncation = false;
413 	unsigned long long timeout = proc_map_timeout * 1000000ULL;
414 	int rc = 0;
415 	const char *hugetlbfs_mnt = hugetlbfs__mountpoint();
416 	int hugetlbfs_mnt_len = hugetlbfs_mnt ? strlen(hugetlbfs_mnt) : 0;
417 
418 	if (machine__is_default_guest(machine))
419 		return 0;
420 
421 	snprintf(bf, sizeof(bf), "%s/proc/%d/task/%d/maps",
422 		machine->root_dir, pid, pid);
423 
424 	io.fd = open(bf, O_RDONLY, 0);
425 	if (io.fd < 0) {
426 		/*
427 		 * We raced with a task exiting - just return:
428 		 */
429 		pr_debug("couldn't open %s\n", bf);
430 		return -1;
431 	}
432 	io__init(&io, io.fd, bf, sizeof(bf));
433 
434 	event->header.type = PERF_RECORD_MMAP2;
435 	t = rdclock();
436 
437 	while (!io.eof) {
438 		static const char anonstr[] = "//anon";
439 		size_t size, aligned_size;
440 
441 		/* ensure null termination since stack will be reused. */
442 		event->mmap2.filename[0] = '\0';
443 
444 		/* 00400000-0040c000 r-xp 00000000 fd:01 41038  /bin/cat */
445 		if (!read_proc_maps_line(&io,
446 					&event->mmap2.start,
447 					&event->mmap2.len,
448 					&event->mmap2.prot,
449 					&event->mmap2.flags,
450 					&event->mmap2.pgoff,
451 					&event->mmap2.maj,
452 					&event->mmap2.min,
453 					&event->mmap2.ino,
454 					sizeof(event->mmap2.filename),
455 					event->mmap2.filename))
456 			continue;
457 
458 		if ((rdclock() - t) > timeout) {
459 			pr_warning("Reading %s/proc/%d/task/%d/maps time out. "
460 				   "You may want to increase "
461 				   "the time limit by --proc-map-timeout\n",
462 				   machine->root_dir, pid, pid);
463 			truncation = true;
464 			goto out;
465 		}
466 
467 		event->mmap2.ino_generation = 0;
468 
469 		/*
470 		 * Just like the kernel, see __perf_event_mmap in kernel/perf_event.c
471 		 */
472 		if (machine__is_host(machine))
473 			event->header.misc = PERF_RECORD_MISC_USER;
474 		else
475 			event->header.misc = PERF_RECORD_MISC_GUEST_USER;
476 
477 		if ((event->mmap2.prot & PROT_EXEC) == 0) {
478 			if (!mmap_data || (event->mmap2.prot & PROT_READ) == 0)
479 				continue;
480 
481 			event->header.misc |= PERF_RECORD_MISC_MMAP_DATA;
482 		}
483 
484 out:
485 		if (truncation)
486 			event->header.misc |= PERF_RECORD_MISC_PROC_MAP_PARSE_TIMEOUT;
487 
488 		if (!strcmp(event->mmap2.filename, ""))
489 			strcpy(event->mmap2.filename, anonstr);
490 
491 		if (hugetlbfs_mnt_len &&
492 		    !strncmp(event->mmap2.filename, hugetlbfs_mnt,
493 			     hugetlbfs_mnt_len)) {
494 			strcpy(event->mmap2.filename, anonstr);
495 			event->mmap2.flags |= MAP_HUGETLB;
496 		}
497 
498 		size = strlen(event->mmap2.filename) + 1;
499 		aligned_size = PERF_ALIGN(size, sizeof(u64));
500 		event->mmap2.len -= event->mmap.start;
501 		event->mmap2.header.size = (sizeof(event->mmap2) -
502 					(sizeof(event->mmap2.filename) - aligned_size));
503 		memset(event->mmap2.filename + size, 0, machine->id_hdr_size +
504 			(aligned_size - size));
505 		event->mmap2.header.size += machine->id_hdr_size;
506 		event->mmap2.pid = tgid;
507 		event->mmap2.tid = pid;
508 
509 		if (symbol_conf.buildid_mmap2)
510 			perf_record_mmap2__read_build_id(&event->mmap2, false);
511 
512 		if (perf_tool__process_synth_event(tool, event, machine, process) != 0) {
513 			rc = -1;
514 			break;
515 		}
516 
517 		if (truncation)
518 			break;
519 	}
520 
521 	close(io.fd);
522 	return rc;
523 }
524 
525 #ifdef HAVE_FILE_HANDLE
526 static int perf_event__synthesize_cgroup(struct perf_tool *tool,
527 					 union perf_event *event,
528 					 char *path, size_t mount_len,
529 					 perf_event__handler_t process,
530 					 struct machine *machine)
531 {
532 	size_t event_size = sizeof(event->cgroup) - sizeof(event->cgroup.path);
533 	size_t path_len = strlen(path) - mount_len + 1;
534 	struct {
535 		struct file_handle fh;
536 		uint64_t cgroup_id;
537 	} handle;
538 	int mount_id;
539 
540 	while (path_len % sizeof(u64))
541 		path[mount_len + path_len++] = '\0';
542 
543 	memset(&event->cgroup, 0, event_size);
544 
545 	event->cgroup.header.type = PERF_RECORD_CGROUP;
546 	event->cgroup.header.size = event_size + path_len + machine->id_hdr_size;
547 
548 	handle.fh.handle_bytes = sizeof(handle.cgroup_id);
549 	if (name_to_handle_at(AT_FDCWD, path, &handle.fh, &mount_id, 0) < 0) {
550 		pr_debug("stat failed: %s\n", path);
551 		return -1;
552 	}
553 
554 	event->cgroup.id = handle.cgroup_id;
555 	strncpy(event->cgroup.path, path + mount_len, path_len);
556 	memset(event->cgroup.path + path_len, 0, machine->id_hdr_size);
557 
558 	if (perf_tool__process_synth_event(tool, event, machine, process) < 0) {
559 		pr_debug("process synth event failed\n");
560 		return -1;
561 	}
562 
563 	return 0;
564 }
565 
566 static int perf_event__walk_cgroup_tree(struct perf_tool *tool,
567 					union perf_event *event,
568 					char *path, size_t mount_len,
569 					perf_event__handler_t process,
570 					struct machine *machine)
571 {
572 	size_t pos = strlen(path);
573 	DIR *d;
574 	struct dirent *dent;
575 	int ret = 0;
576 
577 	if (perf_event__synthesize_cgroup(tool, event, path, mount_len,
578 					  process, machine) < 0)
579 		return -1;
580 
581 	d = opendir(path);
582 	if (d == NULL) {
583 		pr_debug("failed to open directory: %s\n", path);
584 		return -1;
585 	}
586 
587 	while ((dent = readdir(d)) != NULL) {
588 		if (dent->d_type != DT_DIR)
589 			continue;
590 		if (!strcmp(dent->d_name, ".") ||
591 		    !strcmp(dent->d_name, ".."))
592 			continue;
593 
594 		/* any sane path should be less than PATH_MAX */
595 		if (strlen(path) + strlen(dent->d_name) + 1 >= PATH_MAX)
596 			continue;
597 
598 		if (path[pos - 1] != '/')
599 			strcat(path, "/");
600 		strcat(path, dent->d_name);
601 
602 		ret = perf_event__walk_cgroup_tree(tool, event, path,
603 						   mount_len, process, machine);
604 		if (ret < 0)
605 			break;
606 
607 		path[pos] = '\0';
608 	}
609 
610 	closedir(d);
611 	return ret;
612 }
613 
614 int perf_event__synthesize_cgroups(struct perf_tool *tool,
615 				   perf_event__handler_t process,
616 				   struct machine *machine)
617 {
618 	union perf_event event;
619 	char cgrp_root[PATH_MAX];
620 	size_t mount_len;  /* length of mount point in the path */
621 
622 	if (!tool || !tool->cgroup_events)
623 		return 0;
624 
625 	if (cgroupfs_find_mountpoint(cgrp_root, PATH_MAX, "perf_event") < 0) {
626 		pr_debug("cannot find cgroup mount point\n");
627 		return -1;
628 	}
629 
630 	mount_len = strlen(cgrp_root);
631 	/* make sure the path starts with a slash (after mount point) */
632 	strcat(cgrp_root, "/");
633 
634 	if (perf_event__walk_cgroup_tree(tool, &event, cgrp_root, mount_len,
635 					 process, machine) < 0)
636 		return -1;
637 
638 	return 0;
639 }
640 #else
641 int perf_event__synthesize_cgroups(struct perf_tool *tool __maybe_unused,
642 				   perf_event__handler_t process __maybe_unused,
643 				   struct machine *machine __maybe_unused)
644 {
645 	return -1;
646 }
647 #endif
648 
649 int perf_event__synthesize_modules(struct perf_tool *tool, perf_event__handler_t process,
650 				   struct machine *machine)
651 {
652 	int rc = 0;
653 	struct map *pos;
654 	struct maps *maps = machine__kernel_maps(machine);
655 	union perf_event *event;
656 	size_t size = symbol_conf.buildid_mmap2 ?
657 			sizeof(event->mmap2) : sizeof(event->mmap);
658 
659 	event = zalloc(size + machine->id_hdr_size);
660 	if (event == NULL) {
661 		pr_debug("Not enough memory synthesizing mmap event "
662 			 "for kernel modules\n");
663 		return -1;
664 	}
665 
666 	/*
667 	 * kernel uses 0 for user space maps, see kernel/perf_event.c
668 	 * __perf_event_mmap
669 	 */
670 	if (machine__is_host(machine))
671 		event->header.misc = PERF_RECORD_MISC_KERNEL;
672 	else
673 		event->header.misc = PERF_RECORD_MISC_GUEST_KERNEL;
674 
675 	maps__for_each_entry(maps, pos) {
676 		if (!__map__is_kmodule(pos))
677 			continue;
678 
679 		if (symbol_conf.buildid_mmap2) {
680 			size = PERF_ALIGN(pos->dso->long_name_len + 1, sizeof(u64));
681 			event->mmap2.header.type = PERF_RECORD_MMAP2;
682 			event->mmap2.header.size = (sizeof(event->mmap2) -
683 						(sizeof(event->mmap2.filename) - size));
684 			memset(event->mmap2.filename + size, 0, machine->id_hdr_size);
685 			event->mmap2.header.size += machine->id_hdr_size;
686 			event->mmap2.start = pos->start;
687 			event->mmap2.len   = pos->end - pos->start;
688 			event->mmap2.pid   = machine->pid;
689 
690 			memcpy(event->mmap2.filename, pos->dso->long_name,
691 			       pos->dso->long_name_len + 1);
692 
693 			perf_record_mmap2__read_build_id(&event->mmap2, false);
694 		} else {
695 			size = PERF_ALIGN(pos->dso->long_name_len + 1, sizeof(u64));
696 			event->mmap.header.type = PERF_RECORD_MMAP;
697 			event->mmap.header.size = (sizeof(event->mmap) -
698 						(sizeof(event->mmap.filename) - size));
699 			memset(event->mmap.filename + size, 0, machine->id_hdr_size);
700 			event->mmap.header.size += machine->id_hdr_size;
701 			event->mmap.start = pos->start;
702 			event->mmap.len   = pos->end - pos->start;
703 			event->mmap.pid   = machine->pid;
704 
705 			memcpy(event->mmap.filename, pos->dso->long_name,
706 			       pos->dso->long_name_len + 1);
707 		}
708 
709 		if (perf_tool__process_synth_event(tool, event, machine, process) != 0) {
710 			rc = -1;
711 			break;
712 		}
713 	}
714 
715 	free(event);
716 	return rc;
717 }
718 
719 static int filter_task(const struct dirent *dirent)
720 {
721 	return isdigit(dirent->d_name[0]);
722 }
723 
724 static int __event__synthesize_thread(union perf_event *comm_event,
725 				      union perf_event *mmap_event,
726 				      union perf_event *fork_event,
727 				      union perf_event *namespaces_event,
728 				      pid_t pid, int full, perf_event__handler_t process,
729 				      struct perf_tool *tool, struct machine *machine,
730 				      bool needs_mmap, bool mmap_data)
731 {
732 	char filename[PATH_MAX];
733 	struct dirent **dirent;
734 	pid_t tgid, ppid;
735 	int rc = 0;
736 	int i, n;
737 
738 	/* special case: only send one comm event using passed in pid */
739 	if (!full) {
740 		tgid = perf_event__synthesize_comm(tool, comm_event, pid,
741 						   process, machine);
742 
743 		if (tgid == -1)
744 			return -1;
745 
746 		if (perf_event__synthesize_namespaces(tool, namespaces_event, pid,
747 						      tgid, process, machine) < 0)
748 			return -1;
749 
750 		/*
751 		 * send mmap only for thread group leader
752 		 * see thread__init_maps()
753 		 */
754 		if (pid == tgid && needs_mmap &&
755 		    perf_event__synthesize_mmap_events(tool, mmap_event, pid, tgid,
756 						       process, machine, mmap_data))
757 			return -1;
758 
759 		return 0;
760 	}
761 
762 	if (machine__is_default_guest(machine))
763 		return 0;
764 
765 	snprintf(filename, sizeof(filename), "%s/proc/%d/task",
766 		 machine->root_dir, pid);
767 
768 	n = scandir(filename, &dirent, filter_task, NULL);
769 	if (n < 0)
770 		return n;
771 
772 	for (i = 0; i < n; i++) {
773 		char *end;
774 		pid_t _pid;
775 		bool kernel_thread = false;
776 
777 		_pid = strtol(dirent[i]->d_name, &end, 10);
778 		if (*end)
779 			continue;
780 
781 		/* some threads may exit just after scan, ignore it */
782 		if (perf_event__prepare_comm(comm_event, pid, _pid, machine,
783 					     &tgid, &ppid, &kernel_thread) != 0)
784 			continue;
785 
786 		rc = -1;
787 		if (perf_event__synthesize_fork(tool, fork_event, _pid, tgid,
788 						ppid, process, machine) < 0)
789 			break;
790 
791 		if (perf_event__synthesize_namespaces(tool, namespaces_event, _pid,
792 						      tgid, process, machine) < 0)
793 			break;
794 
795 		/*
796 		 * Send the prepared comm event
797 		 */
798 		if (perf_tool__process_synth_event(tool, comm_event, machine, process) != 0)
799 			break;
800 
801 		rc = 0;
802 		if (_pid == pid && !kernel_thread && needs_mmap) {
803 			/* process the parent's maps too */
804 			rc = perf_event__synthesize_mmap_events(tool, mmap_event, pid, tgid,
805 						process, machine, mmap_data);
806 			if (rc)
807 				break;
808 		}
809 	}
810 
811 	for (i = 0; i < n; i++)
812 		zfree(&dirent[i]);
813 	free(dirent);
814 
815 	return rc;
816 }
817 
818 int perf_event__synthesize_thread_map(struct perf_tool *tool,
819 				      struct perf_thread_map *threads,
820 				      perf_event__handler_t process,
821 				      struct machine *machine,
822 				      bool needs_mmap, bool mmap_data)
823 {
824 	union perf_event *comm_event, *mmap_event, *fork_event;
825 	union perf_event *namespaces_event;
826 	int err = -1, thread, j;
827 
828 	comm_event = malloc(sizeof(comm_event->comm) + machine->id_hdr_size);
829 	if (comm_event == NULL)
830 		goto out;
831 
832 	mmap_event = malloc(sizeof(mmap_event->mmap2) + machine->id_hdr_size);
833 	if (mmap_event == NULL)
834 		goto out_free_comm;
835 
836 	fork_event = malloc(sizeof(fork_event->fork) + machine->id_hdr_size);
837 	if (fork_event == NULL)
838 		goto out_free_mmap;
839 
840 	namespaces_event = malloc(sizeof(namespaces_event->namespaces) +
841 				  (NR_NAMESPACES * sizeof(struct perf_ns_link_info)) +
842 				  machine->id_hdr_size);
843 	if (namespaces_event == NULL)
844 		goto out_free_fork;
845 
846 	err = 0;
847 	for (thread = 0; thread < threads->nr; ++thread) {
848 		if (__event__synthesize_thread(comm_event, mmap_event,
849 					       fork_event, namespaces_event,
850 					       perf_thread_map__pid(threads, thread), 0,
851 					       process, tool, machine,
852 					       needs_mmap, mmap_data)) {
853 			err = -1;
854 			break;
855 		}
856 
857 		/*
858 		 * comm.pid is set to thread group id by
859 		 * perf_event__synthesize_comm
860 		 */
861 		if ((int) comm_event->comm.pid != perf_thread_map__pid(threads, thread)) {
862 			bool need_leader = true;
863 
864 			/* is thread group leader in thread_map? */
865 			for (j = 0; j < threads->nr; ++j) {
866 				if ((int) comm_event->comm.pid == perf_thread_map__pid(threads, j)) {
867 					need_leader = false;
868 					break;
869 				}
870 			}
871 
872 			/* if not, generate events for it */
873 			if (need_leader &&
874 			    __event__synthesize_thread(comm_event, mmap_event,
875 						       fork_event, namespaces_event,
876 						       comm_event->comm.pid, 0,
877 						       process, tool, machine,
878 						       needs_mmap, mmap_data)) {
879 				err = -1;
880 				break;
881 			}
882 		}
883 	}
884 	free(namespaces_event);
885 out_free_fork:
886 	free(fork_event);
887 out_free_mmap:
888 	free(mmap_event);
889 out_free_comm:
890 	free(comm_event);
891 out:
892 	return err;
893 }
894 
895 static int __perf_event__synthesize_threads(struct perf_tool *tool,
896 					    perf_event__handler_t process,
897 					    struct machine *machine,
898 					    bool needs_mmap,
899 					    bool mmap_data,
900 					    struct dirent **dirent,
901 					    int start,
902 					    int num)
903 {
904 	union perf_event *comm_event, *mmap_event, *fork_event;
905 	union perf_event *namespaces_event;
906 	int err = -1;
907 	char *end;
908 	pid_t pid;
909 	int i;
910 
911 	comm_event = malloc(sizeof(comm_event->comm) + machine->id_hdr_size);
912 	if (comm_event == NULL)
913 		goto out;
914 
915 	mmap_event = malloc(sizeof(mmap_event->mmap2) + machine->id_hdr_size);
916 	if (mmap_event == NULL)
917 		goto out_free_comm;
918 
919 	fork_event = malloc(sizeof(fork_event->fork) + machine->id_hdr_size);
920 	if (fork_event == NULL)
921 		goto out_free_mmap;
922 
923 	namespaces_event = malloc(sizeof(namespaces_event->namespaces) +
924 				  (NR_NAMESPACES * sizeof(struct perf_ns_link_info)) +
925 				  machine->id_hdr_size);
926 	if (namespaces_event == NULL)
927 		goto out_free_fork;
928 
929 	for (i = start; i < start + num; i++) {
930 		if (!isdigit(dirent[i]->d_name[0]))
931 			continue;
932 
933 		pid = (pid_t)strtol(dirent[i]->d_name, &end, 10);
934 		/* only interested in proper numerical dirents */
935 		if (*end)
936 			continue;
937 		/*
938 		 * We may race with exiting thread, so don't stop just because
939 		 * one thread couldn't be synthesized.
940 		 */
941 		__event__synthesize_thread(comm_event, mmap_event, fork_event,
942 					   namespaces_event, pid, 1, process,
943 					   tool, machine, needs_mmap, mmap_data);
944 	}
945 	err = 0;
946 
947 	free(namespaces_event);
948 out_free_fork:
949 	free(fork_event);
950 out_free_mmap:
951 	free(mmap_event);
952 out_free_comm:
953 	free(comm_event);
954 out:
955 	return err;
956 }
957 
958 struct synthesize_threads_arg {
959 	struct perf_tool *tool;
960 	perf_event__handler_t process;
961 	struct machine *machine;
962 	bool needs_mmap;
963 	bool mmap_data;
964 	struct dirent **dirent;
965 	int num;
966 	int start;
967 };
968 
969 static void *synthesize_threads_worker(void *arg)
970 {
971 	struct synthesize_threads_arg *args = arg;
972 
973 	__perf_event__synthesize_threads(args->tool, args->process,
974 					 args->machine,
975 					 args->needs_mmap, args->mmap_data,
976 					 args->dirent,
977 					 args->start, args->num);
978 	return NULL;
979 }
980 
981 int perf_event__synthesize_threads(struct perf_tool *tool,
982 				   perf_event__handler_t process,
983 				   struct machine *machine,
984 				   bool needs_mmap, bool mmap_data,
985 				   unsigned int nr_threads_synthesize)
986 {
987 	struct synthesize_threads_arg *args = NULL;
988 	pthread_t *synthesize_threads = NULL;
989 	char proc_path[PATH_MAX];
990 	struct dirent **dirent;
991 	int num_per_thread;
992 	int m, n, i, j;
993 	int thread_nr;
994 	int base = 0;
995 	int err = -1;
996 
997 
998 	if (machine__is_default_guest(machine))
999 		return 0;
1000 
1001 	snprintf(proc_path, sizeof(proc_path), "%s/proc", machine->root_dir);
1002 	n = scandir(proc_path, &dirent, filter_task, NULL);
1003 	if (n < 0)
1004 		return err;
1005 
1006 	if (nr_threads_synthesize == UINT_MAX)
1007 		thread_nr = sysconf(_SC_NPROCESSORS_ONLN);
1008 	else
1009 		thread_nr = nr_threads_synthesize;
1010 
1011 	if (thread_nr <= 1) {
1012 		err = __perf_event__synthesize_threads(tool, process,
1013 						       machine,
1014 						       needs_mmap, mmap_data,
1015 						       dirent, base, n);
1016 		goto free_dirent;
1017 	}
1018 	if (thread_nr > n)
1019 		thread_nr = n;
1020 
1021 	synthesize_threads = calloc(sizeof(pthread_t), thread_nr);
1022 	if (synthesize_threads == NULL)
1023 		goto free_dirent;
1024 
1025 	args = calloc(sizeof(*args), thread_nr);
1026 	if (args == NULL)
1027 		goto free_threads;
1028 
1029 	num_per_thread = n / thread_nr;
1030 	m = n % thread_nr;
1031 	for (i = 0; i < thread_nr; i++) {
1032 		args[i].tool = tool;
1033 		args[i].process = process;
1034 		args[i].machine = machine;
1035 		args[i].needs_mmap = needs_mmap;
1036 		args[i].mmap_data = mmap_data;
1037 		args[i].dirent = dirent;
1038 	}
1039 	for (i = 0; i < m; i++) {
1040 		args[i].num = num_per_thread + 1;
1041 		args[i].start = i * args[i].num;
1042 	}
1043 	if (i != 0)
1044 		base = args[i-1].start + args[i-1].num;
1045 	for (j = i; j < thread_nr; j++) {
1046 		args[j].num = num_per_thread;
1047 		args[j].start = base + (j - i) * args[i].num;
1048 	}
1049 
1050 	for (i = 0; i < thread_nr; i++) {
1051 		if (pthread_create(&synthesize_threads[i], NULL,
1052 				   synthesize_threads_worker, &args[i]))
1053 			goto out_join;
1054 	}
1055 	err = 0;
1056 out_join:
1057 	for (i = 0; i < thread_nr; i++)
1058 		pthread_join(synthesize_threads[i], NULL);
1059 	free(args);
1060 free_threads:
1061 	free(synthesize_threads);
1062 free_dirent:
1063 	for (i = 0; i < n; i++)
1064 		zfree(&dirent[i]);
1065 	free(dirent);
1066 
1067 	return err;
1068 }
1069 
1070 int __weak perf_event__synthesize_extra_kmaps(struct perf_tool *tool __maybe_unused,
1071 					      perf_event__handler_t process __maybe_unused,
1072 					      struct machine *machine __maybe_unused)
1073 {
1074 	return 0;
1075 }
1076 
1077 static int __perf_event__synthesize_kernel_mmap(struct perf_tool *tool,
1078 						perf_event__handler_t process,
1079 						struct machine *machine)
1080 {
1081 	union perf_event *event;
1082 	size_t size = symbol_conf.buildid_mmap2 ?
1083 			sizeof(event->mmap2) : sizeof(event->mmap);
1084 	struct map *map = machine__kernel_map(machine);
1085 	struct kmap *kmap;
1086 	int err;
1087 
1088 	if (map == NULL)
1089 		return -1;
1090 
1091 	kmap = map__kmap(map);
1092 	if (!kmap->ref_reloc_sym)
1093 		return -1;
1094 
1095 	/*
1096 	 * We should get this from /sys/kernel/sections/.text, but till that is
1097 	 * available use this, and after it is use this as a fallback for older
1098 	 * kernels.
1099 	 */
1100 	event = zalloc(size + machine->id_hdr_size);
1101 	if (event == NULL) {
1102 		pr_debug("Not enough memory synthesizing mmap event "
1103 			 "for kernel modules\n");
1104 		return -1;
1105 	}
1106 
1107 	if (machine__is_host(machine)) {
1108 		/*
1109 		 * kernel uses PERF_RECORD_MISC_USER for user space maps,
1110 		 * see kernel/perf_event.c __perf_event_mmap
1111 		 */
1112 		event->header.misc = PERF_RECORD_MISC_KERNEL;
1113 	} else {
1114 		event->header.misc = PERF_RECORD_MISC_GUEST_KERNEL;
1115 	}
1116 
1117 	if (symbol_conf.buildid_mmap2) {
1118 		size = snprintf(event->mmap2.filename, sizeof(event->mmap2.filename),
1119 				"%s%s", machine->mmap_name, kmap->ref_reloc_sym->name) + 1;
1120 		size = PERF_ALIGN(size, sizeof(u64));
1121 		event->mmap2.header.type = PERF_RECORD_MMAP2;
1122 		event->mmap2.header.size = (sizeof(event->mmap2) -
1123 				(sizeof(event->mmap2.filename) - size) + machine->id_hdr_size);
1124 		event->mmap2.pgoff = kmap->ref_reloc_sym->addr;
1125 		event->mmap2.start = map->start;
1126 		event->mmap2.len   = map->end - event->mmap.start;
1127 		event->mmap2.pid   = machine->pid;
1128 
1129 		perf_record_mmap2__read_build_id(&event->mmap2, true);
1130 	} else {
1131 		size = snprintf(event->mmap.filename, sizeof(event->mmap.filename),
1132 				"%s%s", machine->mmap_name, kmap->ref_reloc_sym->name) + 1;
1133 		size = PERF_ALIGN(size, sizeof(u64));
1134 		event->mmap.header.type = PERF_RECORD_MMAP;
1135 		event->mmap.header.size = (sizeof(event->mmap) -
1136 				(sizeof(event->mmap.filename) - size) + machine->id_hdr_size);
1137 		event->mmap.pgoff = kmap->ref_reloc_sym->addr;
1138 		event->mmap.start = map->start;
1139 		event->mmap.len   = map->end - event->mmap.start;
1140 		event->mmap.pid   = machine->pid;
1141 	}
1142 
1143 	err = perf_tool__process_synth_event(tool, event, machine, process);
1144 	free(event);
1145 
1146 	return err;
1147 }
1148 
1149 int perf_event__synthesize_kernel_mmap(struct perf_tool *tool,
1150 				       perf_event__handler_t process,
1151 				       struct machine *machine)
1152 {
1153 	int err;
1154 
1155 	err = __perf_event__synthesize_kernel_mmap(tool, process, machine);
1156 	if (err < 0)
1157 		return err;
1158 
1159 	return perf_event__synthesize_extra_kmaps(tool, process, machine);
1160 }
1161 
1162 int perf_event__synthesize_thread_map2(struct perf_tool *tool,
1163 				      struct perf_thread_map *threads,
1164 				      perf_event__handler_t process,
1165 				      struct machine *machine)
1166 {
1167 	union perf_event *event;
1168 	int i, err, size;
1169 
1170 	size  = sizeof(event->thread_map);
1171 	size +=	threads->nr * sizeof(event->thread_map.entries[0]);
1172 
1173 	event = zalloc(size);
1174 	if (!event)
1175 		return -ENOMEM;
1176 
1177 	event->header.type = PERF_RECORD_THREAD_MAP;
1178 	event->header.size = size;
1179 	event->thread_map.nr = threads->nr;
1180 
1181 	for (i = 0; i < threads->nr; i++) {
1182 		struct perf_record_thread_map_entry *entry = &event->thread_map.entries[i];
1183 		char *comm = perf_thread_map__comm(threads, i);
1184 
1185 		if (!comm)
1186 			comm = (char *) "";
1187 
1188 		entry->pid = perf_thread_map__pid(threads, i);
1189 		strncpy((char *) &entry->comm, comm, sizeof(entry->comm));
1190 	}
1191 
1192 	err = process(tool, event, NULL, machine);
1193 
1194 	free(event);
1195 	return err;
1196 }
1197 
1198 static void synthesize_cpus(struct perf_record_cpu_map_data *data,
1199 			    const struct perf_cpu_map *map)
1200 {
1201 	int i, map_nr = perf_cpu_map__nr(map);
1202 
1203 	data->cpus_data.nr = map_nr;
1204 
1205 	for (i = 0; i < map_nr; i++)
1206 		data->cpus_data.cpu[i] = perf_cpu_map__cpu(map, i).cpu;
1207 }
1208 
1209 static void synthesize_mask(struct perf_record_cpu_map_data *data,
1210 			    const struct perf_cpu_map *map, int max)
1211 {
1212 	int idx;
1213 	struct perf_cpu cpu;
1214 
1215 	/* Due to padding, the 4bytes per entry mask variant is always smaller. */
1216 	data->mask32_data.nr = BITS_TO_U32(max);
1217 	data->mask32_data.long_size = 4;
1218 
1219 	perf_cpu_map__for_each_cpu(cpu, idx, map) {
1220 		int bit_word = cpu.cpu / 32;
1221 		__u32 bit_mask = 1U << (cpu.cpu & 31);
1222 
1223 		data->mask32_data.mask[bit_word] |= bit_mask;
1224 	}
1225 }
1226 
1227 static size_t cpus_size(const struct perf_cpu_map *map)
1228 {
1229 	return sizeof(struct cpu_map_entries) + perf_cpu_map__nr(map) * sizeof(u16);
1230 }
1231 
1232 static size_t mask_size(const struct perf_cpu_map *map, int *max)
1233 {
1234 	*max = perf_cpu_map__max(map).cpu;
1235 	return sizeof(struct perf_record_mask_cpu_map32) + BITS_TO_U32(*max) * sizeof(__u32);
1236 }
1237 
1238 static void *cpu_map_data__alloc(const struct perf_cpu_map *map, size_t *size,
1239 				 u16 *type, int *max)
1240 {
1241 	size_t size_cpus, size_mask;
1242 	bool is_dummy = perf_cpu_map__empty(map);
1243 
1244 	/*
1245 	 * Both array and mask data have variable size based
1246 	 * on the number of cpus and their actual values.
1247 	 * The size of the 'struct perf_record_cpu_map_data' is:
1248 	 *
1249 	 *   array = size of 'struct cpu_map_entries' +
1250 	 *           number of cpus * sizeof(u64)
1251 	 *
1252 	 *   mask  = size of 'struct perf_record_record_cpu_map' +
1253 	 *           maximum cpu bit converted to size of longs
1254 	 *
1255 	 * and finally + the size of 'struct perf_record_cpu_map_data'.
1256 	 */
1257 	size_cpus = cpus_size(map);
1258 	size_mask = mask_size(map, max);
1259 
1260 	if (is_dummy || (size_cpus < size_mask)) {
1261 		*size += size_cpus;
1262 		*type  = PERF_CPU_MAP__CPUS;
1263 	} else {
1264 		*size += size_mask;
1265 		*type  = PERF_CPU_MAP__MASK;
1266 	}
1267 
1268 	*size += sizeof(__u16); /* For perf_record_cpu_map_data.type. */
1269 	*size = PERF_ALIGN(*size, sizeof(u64));
1270 	return zalloc(*size);
1271 }
1272 
1273 static void cpu_map_data__synthesize(struct perf_record_cpu_map_data *data,
1274 				     const struct perf_cpu_map *map,
1275 				     u16 type, int max)
1276 {
1277 	data->type = type;
1278 
1279 	switch (type) {
1280 	case PERF_CPU_MAP__CPUS:
1281 		synthesize_cpus(data, map);
1282 		break;
1283 	case PERF_CPU_MAP__MASK:
1284 		synthesize_mask(data, map, max);
1285 	default:
1286 		break;
1287 	}
1288 }
1289 
1290 static struct perf_record_cpu_map *cpu_map_event__new(const struct perf_cpu_map *map)
1291 {
1292 	size_t size = sizeof(struct perf_event_header);
1293 	struct perf_record_cpu_map *event;
1294 	int max;
1295 	u16 type;
1296 
1297 	event = cpu_map_data__alloc(map, &size, &type, &max);
1298 	if (!event)
1299 		return NULL;
1300 
1301 	event->header.type = PERF_RECORD_CPU_MAP;
1302 	event->header.size = size;
1303 	event->data.type   = type;
1304 
1305 	cpu_map_data__synthesize(&event->data, map, type, max);
1306 	return event;
1307 }
1308 
1309 int perf_event__synthesize_cpu_map(struct perf_tool *tool,
1310 				   const struct perf_cpu_map *map,
1311 				   perf_event__handler_t process,
1312 				   struct machine *machine)
1313 {
1314 	struct perf_record_cpu_map *event;
1315 	int err;
1316 
1317 	event = cpu_map_event__new(map);
1318 	if (!event)
1319 		return -ENOMEM;
1320 
1321 	err = process(tool, (union perf_event *) event, NULL, machine);
1322 
1323 	free(event);
1324 	return err;
1325 }
1326 
1327 int perf_event__synthesize_stat_config(struct perf_tool *tool,
1328 				       struct perf_stat_config *config,
1329 				       perf_event__handler_t process,
1330 				       struct machine *machine)
1331 {
1332 	struct perf_record_stat_config *event;
1333 	int size, i = 0, err;
1334 
1335 	size  = sizeof(*event);
1336 	size += (PERF_STAT_CONFIG_TERM__MAX * sizeof(event->data[0]));
1337 
1338 	event = zalloc(size);
1339 	if (!event)
1340 		return -ENOMEM;
1341 
1342 	event->header.type = PERF_RECORD_STAT_CONFIG;
1343 	event->header.size = size;
1344 	event->nr          = PERF_STAT_CONFIG_TERM__MAX;
1345 
1346 #define ADD(__term, __val)					\
1347 	event->data[i].tag = PERF_STAT_CONFIG_TERM__##__term;	\
1348 	event->data[i].val = __val;				\
1349 	i++;
1350 
1351 	ADD(AGGR_MODE,	config->aggr_mode)
1352 	ADD(INTERVAL,	config->interval)
1353 	ADD(SCALE,	config->scale)
1354 
1355 	WARN_ONCE(i != PERF_STAT_CONFIG_TERM__MAX,
1356 		  "stat config terms unbalanced\n");
1357 #undef ADD
1358 
1359 	err = process(tool, (union perf_event *) event, NULL, machine);
1360 
1361 	free(event);
1362 	return err;
1363 }
1364 
1365 int perf_event__synthesize_stat(struct perf_tool *tool,
1366 				struct perf_cpu cpu, u32 thread, u64 id,
1367 				struct perf_counts_values *count,
1368 				perf_event__handler_t process,
1369 				struct machine *machine)
1370 {
1371 	struct perf_record_stat event;
1372 
1373 	event.header.type = PERF_RECORD_STAT;
1374 	event.header.size = sizeof(event);
1375 	event.header.misc = 0;
1376 
1377 	event.id        = id;
1378 	event.cpu       = cpu.cpu;
1379 	event.thread    = thread;
1380 	event.val       = count->val;
1381 	event.ena       = count->ena;
1382 	event.run       = count->run;
1383 
1384 	return process(tool, (union perf_event *) &event, NULL, machine);
1385 }
1386 
1387 int perf_event__synthesize_stat_round(struct perf_tool *tool,
1388 				      u64 evtime, u64 type,
1389 				      perf_event__handler_t process,
1390 				      struct machine *machine)
1391 {
1392 	struct perf_record_stat_round event;
1393 
1394 	event.header.type = PERF_RECORD_STAT_ROUND;
1395 	event.header.size = sizeof(event);
1396 	event.header.misc = 0;
1397 
1398 	event.time = evtime;
1399 	event.type = type;
1400 
1401 	return process(tool, (union perf_event *) &event, NULL, machine);
1402 }
1403 
1404 size_t perf_event__sample_event_size(const struct perf_sample *sample, u64 type, u64 read_format)
1405 {
1406 	size_t sz, result = sizeof(struct perf_record_sample);
1407 
1408 	if (type & PERF_SAMPLE_IDENTIFIER)
1409 		result += sizeof(u64);
1410 
1411 	if (type & PERF_SAMPLE_IP)
1412 		result += sizeof(u64);
1413 
1414 	if (type & PERF_SAMPLE_TID)
1415 		result += sizeof(u64);
1416 
1417 	if (type & PERF_SAMPLE_TIME)
1418 		result += sizeof(u64);
1419 
1420 	if (type & PERF_SAMPLE_ADDR)
1421 		result += sizeof(u64);
1422 
1423 	if (type & PERF_SAMPLE_ID)
1424 		result += sizeof(u64);
1425 
1426 	if (type & PERF_SAMPLE_STREAM_ID)
1427 		result += sizeof(u64);
1428 
1429 	if (type & PERF_SAMPLE_CPU)
1430 		result += sizeof(u64);
1431 
1432 	if (type & PERF_SAMPLE_PERIOD)
1433 		result += sizeof(u64);
1434 
1435 	if (type & PERF_SAMPLE_READ) {
1436 		result += sizeof(u64);
1437 		if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
1438 			result += sizeof(u64);
1439 		if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
1440 			result += sizeof(u64);
1441 		/* PERF_FORMAT_ID is forced for PERF_SAMPLE_READ */
1442 		if (read_format & PERF_FORMAT_GROUP) {
1443 			sz = sample_read_value_size(read_format);
1444 			result += sz * sample->read.group.nr;
1445 		} else {
1446 			result += sizeof(u64);
1447 			if (read_format & PERF_FORMAT_LOST)
1448 				result += sizeof(u64);
1449 		}
1450 	}
1451 
1452 	if (type & PERF_SAMPLE_CALLCHAIN) {
1453 		sz = (sample->callchain->nr + 1) * sizeof(u64);
1454 		result += sz;
1455 	}
1456 
1457 	if (type & PERF_SAMPLE_RAW) {
1458 		result += sizeof(u32);
1459 		result += sample->raw_size;
1460 	}
1461 
1462 	if (type & PERF_SAMPLE_BRANCH_STACK) {
1463 		sz = sample->branch_stack->nr * sizeof(struct branch_entry);
1464 		/* nr, hw_idx */
1465 		sz += 2 * sizeof(u64);
1466 		result += sz;
1467 	}
1468 
1469 	if (type & PERF_SAMPLE_REGS_USER) {
1470 		if (sample->user_regs.abi) {
1471 			result += sizeof(u64);
1472 			sz = hweight64(sample->user_regs.mask) * sizeof(u64);
1473 			result += sz;
1474 		} else {
1475 			result += sizeof(u64);
1476 		}
1477 	}
1478 
1479 	if (type & PERF_SAMPLE_STACK_USER) {
1480 		sz = sample->user_stack.size;
1481 		result += sizeof(u64);
1482 		if (sz) {
1483 			result += sz;
1484 			result += sizeof(u64);
1485 		}
1486 	}
1487 
1488 	if (type & PERF_SAMPLE_WEIGHT_TYPE)
1489 		result += sizeof(u64);
1490 
1491 	if (type & PERF_SAMPLE_DATA_SRC)
1492 		result += sizeof(u64);
1493 
1494 	if (type & PERF_SAMPLE_TRANSACTION)
1495 		result += sizeof(u64);
1496 
1497 	if (type & PERF_SAMPLE_REGS_INTR) {
1498 		if (sample->intr_regs.abi) {
1499 			result += sizeof(u64);
1500 			sz = hweight64(sample->intr_regs.mask) * sizeof(u64);
1501 			result += sz;
1502 		} else {
1503 			result += sizeof(u64);
1504 		}
1505 	}
1506 
1507 	if (type & PERF_SAMPLE_PHYS_ADDR)
1508 		result += sizeof(u64);
1509 
1510 	if (type & PERF_SAMPLE_CGROUP)
1511 		result += sizeof(u64);
1512 
1513 	if (type & PERF_SAMPLE_DATA_PAGE_SIZE)
1514 		result += sizeof(u64);
1515 
1516 	if (type & PERF_SAMPLE_CODE_PAGE_SIZE)
1517 		result += sizeof(u64);
1518 
1519 	if (type & PERF_SAMPLE_AUX) {
1520 		result += sizeof(u64);
1521 		result += sample->aux_sample.size;
1522 	}
1523 
1524 	return result;
1525 }
1526 
1527 void __weak arch_perf_synthesize_sample_weight(const struct perf_sample *data,
1528 					       __u64 *array, u64 type __maybe_unused)
1529 {
1530 	*array = data->weight;
1531 }
1532 
1533 static __u64 *copy_read_group_values(__u64 *array, __u64 read_format,
1534 				     const struct perf_sample *sample)
1535 {
1536 	size_t sz = sample_read_value_size(read_format);
1537 	struct sample_read_value *v = sample->read.group.values;
1538 
1539 	sample_read_group__for_each(v, sample->read.group.nr, read_format) {
1540 		/* PERF_FORMAT_ID is forced for PERF_SAMPLE_READ */
1541 		memcpy(array, v, sz);
1542 		array = (void *)array + sz;
1543 	}
1544 	return array;
1545 }
1546 
1547 int perf_event__synthesize_sample(union perf_event *event, u64 type, u64 read_format,
1548 				  const struct perf_sample *sample)
1549 {
1550 	__u64 *array;
1551 	size_t sz;
1552 	/*
1553 	 * used for cross-endian analysis. See git commit 65014ab3
1554 	 * for why this goofiness is needed.
1555 	 */
1556 	union u64_swap u;
1557 
1558 	array = event->sample.array;
1559 
1560 	if (type & PERF_SAMPLE_IDENTIFIER) {
1561 		*array = sample->id;
1562 		array++;
1563 	}
1564 
1565 	if (type & PERF_SAMPLE_IP) {
1566 		*array = sample->ip;
1567 		array++;
1568 	}
1569 
1570 	if (type & PERF_SAMPLE_TID) {
1571 		u.val32[0] = sample->pid;
1572 		u.val32[1] = sample->tid;
1573 		*array = u.val64;
1574 		array++;
1575 	}
1576 
1577 	if (type & PERF_SAMPLE_TIME) {
1578 		*array = sample->time;
1579 		array++;
1580 	}
1581 
1582 	if (type & PERF_SAMPLE_ADDR) {
1583 		*array = sample->addr;
1584 		array++;
1585 	}
1586 
1587 	if (type & PERF_SAMPLE_ID) {
1588 		*array = sample->id;
1589 		array++;
1590 	}
1591 
1592 	if (type & PERF_SAMPLE_STREAM_ID) {
1593 		*array = sample->stream_id;
1594 		array++;
1595 	}
1596 
1597 	if (type & PERF_SAMPLE_CPU) {
1598 		u.val32[0] = sample->cpu;
1599 		u.val32[1] = 0;
1600 		*array = u.val64;
1601 		array++;
1602 	}
1603 
1604 	if (type & PERF_SAMPLE_PERIOD) {
1605 		*array = sample->period;
1606 		array++;
1607 	}
1608 
1609 	if (type & PERF_SAMPLE_READ) {
1610 		if (read_format & PERF_FORMAT_GROUP)
1611 			*array = sample->read.group.nr;
1612 		else
1613 			*array = sample->read.one.value;
1614 		array++;
1615 
1616 		if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) {
1617 			*array = sample->read.time_enabled;
1618 			array++;
1619 		}
1620 
1621 		if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) {
1622 			*array = sample->read.time_running;
1623 			array++;
1624 		}
1625 
1626 		/* PERF_FORMAT_ID is forced for PERF_SAMPLE_READ */
1627 		if (read_format & PERF_FORMAT_GROUP) {
1628 			array = copy_read_group_values(array, read_format,
1629 						       sample);
1630 		} else {
1631 			*array = sample->read.one.id;
1632 			array++;
1633 
1634 			if (read_format & PERF_FORMAT_LOST) {
1635 				*array = sample->read.one.lost;
1636 				array++;
1637 			}
1638 		}
1639 	}
1640 
1641 	if (type & PERF_SAMPLE_CALLCHAIN) {
1642 		sz = (sample->callchain->nr + 1) * sizeof(u64);
1643 		memcpy(array, sample->callchain, sz);
1644 		array = (void *)array + sz;
1645 	}
1646 
1647 	if (type & PERF_SAMPLE_RAW) {
1648 		u.val32[0] = sample->raw_size;
1649 		*array = u.val64;
1650 		array = (void *)array + sizeof(u32);
1651 
1652 		memcpy(array, sample->raw_data, sample->raw_size);
1653 		array = (void *)array + sample->raw_size;
1654 	}
1655 
1656 	if (type & PERF_SAMPLE_BRANCH_STACK) {
1657 		sz = sample->branch_stack->nr * sizeof(struct branch_entry);
1658 		/* nr, hw_idx */
1659 		sz += 2 * sizeof(u64);
1660 		memcpy(array, sample->branch_stack, sz);
1661 		array = (void *)array + sz;
1662 	}
1663 
1664 	if (type & PERF_SAMPLE_REGS_USER) {
1665 		if (sample->user_regs.abi) {
1666 			*array++ = sample->user_regs.abi;
1667 			sz = hweight64(sample->user_regs.mask) * sizeof(u64);
1668 			memcpy(array, sample->user_regs.regs, sz);
1669 			array = (void *)array + sz;
1670 		} else {
1671 			*array++ = 0;
1672 		}
1673 	}
1674 
1675 	if (type & PERF_SAMPLE_STACK_USER) {
1676 		sz = sample->user_stack.size;
1677 		*array++ = sz;
1678 		if (sz) {
1679 			memcpy(array, sample->user_stack.data, sz);
1680 			array = (void *)array + sz;
1681 			*array++ = sz;
1682 		}
1683 	}
1684 
1685 	if (type & PERF_SAMPLE_WEIGHT_TYPE) {
1686 		arch_perf_synthesize_sample_weight(sample, array, type);
1687 		array++;
1688 	}
1689 
1690 	if (type & PERF_SAMPLE_DATA_SRC) {
1691 		*array = sample->data_src;
1692 		array++;
1693 	}
1694 
1695 	if (type & PERF_SAMPLE_TRANSACTION) {
1696 		*array = sample->transaction;
1697 		array++;
1698 	}
1699 
1700 	if (type & PERF_SAMPLE_REGS_INTR) {
1701 		if (sample->intr_regs.abi) {
1702 			*array++ = sample->intr_regs.abi;
1703 			sz = hweight64(sample->intr_regs.mask) * sizeof(u64);
1704 			memcpy(array, sample->intr_regs.regs, sz);
1705 			array = (void *)array + sz;
1706 		} else {
1707 			*array++ = 0;
1708 		}
1709 	}
1710 
1711 	if (type & PERF_SAMPLE_PHYS_ADDR) {
1712 		*array = sample->phys_addr;
1713 		array++;
1714 	}
1715 
1716 	if (type & PERF_SAMPLE_CGROUP) {
1717 		*array = sample->cgroup;
1718 		array++;
1719 	}
1720 
1721 	if (type & PERF_SAMPLE_DATA_PAGE_SIZE) {
1722 		*array = sample->data_page_size;
1723 		array++;
1724 	}
1725 
1726 	if (type & PERF_SAMPLE_CODE_PAGE_SIZE) {
1727 		*array = sample->code_page_size;
1728 		array++;
1729 	}
1730 
1731 	if (type & PERF_SAMPLE_AUX) {
1732 		sz = sample->aux_sample.size;
1733 		*array++ = sz;
1734 		memcpy(array, sample->aux_sample.data, sz);
1735 		array = (void *)array + sz;
1736 	}
1737 
1738 	return 0;
1739 }
1740 
1741 int perf_event__synthesize_id_sample(__u64 *array, u64 type, const struct perf_sample *sample)
1742 {
1743 	__u64 *start = array;
1744 
1745 	/*
1746 	 * used for cross-endian analysis. See git commit 65014ab3
1747 	 * for why this goofiness is needed.
1748 	 */
1749 	union u64_swap u;
1750 
1751 	if (type & PERF_SAMPLE_TID) {
1752 		u.val32[0] = sample->pid;
1753 		u.val32[1] = sample->tid;
1754 		*array = u.val64;
1755 		array++;
1756 	}
1757 
1758 	if (type & PERF_SAMPLE_TIME) {
1759 		*array = sample->time;
1760 		array++;
1761 	}
1762 
1763 	if (type & PERF_SAMPLE_ID) {
1764 		*array = sample->id;
1765 		array++;
1766 	}
1767 
1768 	if (type & PERF_SAMPLE_STREAM_ID) {
1769 		*array = sample->stream_id;
1770 		array++;
1771 	}
1772 
1773 	if (type & PERF_SAMPLE_CPU) {
1774 		u.val32[0] = sample->cpu;
1775 		u.val32[1] = 0;
1776 		*array = u.val64;
1777 		array++;
1778 	}
1779 
1780 	if (type & PERF_SAMPLE_IDENTIFIER) {
1781 		*array = sample->id;
1782 		array++;
1783 	}
1784 
1785 	return (void *)array - (void *)start;
1786 }
1787 
1788 int __perf_event__synthesize_id_index(struct perf_tool *tool, perf_event__handler_t process,
1789 				      struct evlist *evlist, struct machine *machine, size_t from)
1790 {
1791 	union perf_event *ev;
1792 	struct evsel *evsel;
1793 	size_t nr = 0, i = 0, sz, max_nr, n, pos;
1794 	size_t e1_sz = sizeof(struct id_index_entry);
1795 	size_t e2_sz = sizeof(struct id_index_entry_2);
1796 	size_t etot_sz = e1_sz + e2_sz;
1797 	bool e2_needed = false;
1798 	int err;
1799 
1800 	max_nr = (UINT16_MAX - sizeof(struct perf_record_id_index)) / etot_sz;
1801 
1802 	pos = 0;
1803 	evlist__for_each_entry(evlist, evsel) {
1804 		if (pos++ < from)
1805 			continue;
1806 		nr += evsel->core.ids;
1807 	}
1808 
1809 	if (!nr)
1810 		return 0;
1811 
1812 	pr_debug2("Synthesizing id index\n");
1813 
1814 	n = nr > max_nr ? max_nr : nr;
1815 	sz = sizeof(struct perf_record_id_index) + n * etot_sz;
1816 	ev = zalloc(sz);
1817 	if (!ev)
1818 		return -ENOMEM;
1819 
1820 	sz = sizeof(struct perf_record_id_index) + n * e1_sz;
1821 
1822 	ev->id_index.header.type = PERF_RECORD_ID_INDEX;
1823 	ev->id_index.nr = n;
1824 
1825 	pos = 0;
1826 	evlist__for_each_entry(evlist, evsel) {
1827 		u32 j;
1828 
1829 		if (pos++ < from)
1830 			continue;
1831 		for (j = 0; j < evsel->core.ids; j++, i++) {
1832 			struct id_index_entry *e;
1833 			struct id_index_entry_2 *e2;
1834 			struct perf_sample_id *sid;
1835 
1836 			if (i >= n) {
1837 				ev->id_index.header.size = sz + (e2_needed ? n * e2_sz : 0);
1838 				err = process(tool, ev, NULL, machine);
1839 				if (err)
1840 					goto out_err;
1841 				nr -= n;
1842 				i = 0;
1843 				e2_needed = false;
1844 			}
1845 
1846 			e = &ev->id_index.entries[i];
1847 
1848 			e->id = evsel->core.id[j];
1849 
1850 			sid = evlist__id2sid(evlist, e->id);
1851 			if (!sid) {
1852 				free(ev);
1853 				return -ENOENT;
1854 			}
1855 
1856 			e->idx = sid->idx;
1857 			e->cpu = sid->cpu.cpu;
1858 			e->tid = sid->tid;
1859 
1860 			if (sid->machine_pid)
1861 				e2_needed = true;
1862 
1863 			e2 = (void *)ev + sz;
1864 			e2[i].machine_pid = sid->machine_pid;
1865 			e2[i].vcpu        = sid->vcpu.cpu;
1866 		}
1867 	}
1868 
1869 	sz = sizeof(struct perf_record_id_index) + nr * e1_sz;
1870 	ev->id_index.header.size = sz + (e2_needed ? nr * e2_sz : 0);
1871 	ev->id_index.nr = nr;
1872 
1873 	err = process(tool, ev, NULL, machine);
1874 out_err:
1875 	free(ev);
1876 
1877 	return err;
1878 }
1879 
1880 int perf_event__synthesize_id_index(struct perf_tool *tool, perf_event__handler_t process,
1881 				    struct evlist *evlist, struct machine *machine)
1882 {
1883 	return __perf_event__synthesize_id_index(tool, process, evlist, machine, 0);
1884 }
1885 
1886 int __machine__synthesize_threads(struct machine *machine, struct perf_tool *tool,
1887 				  struct target *target, struct perf_thread_map *threads,
1888 				  perf_event__handler_t process, bool needs_mmap,
1889 				  bool data_mmap, unsigned int nr_threads_synthesize)
1890 {
1891 	/*
1892 	 * When perf runs in non-root PID namespace, and the namespace's proc FS
1893 	 * is not mounted, nsinfo__is_in_root_namespace() returns false.
1894 	 * In this case, the proc FS is coming for the parent namespace, thus
1895 	 * perf tool will wrongly gather process info from its parent PID
1896 	 * namespace.
1897 	 *
1898 	 * To avoid the confusion that the perf tool runs in a child PID
1899 	 * namespace but it synthesizes thread info from its parent PID
1900 	 * namespace, returns failure with warning.
1901 	 */
1902 	if (!nsinfo__is_in_root_namespace()) {
1903 		pr_err("Perf runs in non-root PID namespace but it tries to ");
1904 		pr_err("gather process info from its parent PID namespace.\n");
1905 		pr_err("Please mount the proc file system properly, e.g. ");
1906 		pr_err("add the option '--mount-proc' for unshare command.\n");
1907 		return -EPERM;
1908 	}
1909 
1910 	if (target__has_task(target))
1911 		return perf_event__synthesize_thread_map(tool, threads, process, machine,
1912 							 needs_mmap, data_mmap);
1913 	else if (target__has_cpu(target))
1914 		return perf_event__synthesize_threads(tool, process, machine,
1915 						      needs_mmap, data_mmap,
1916 						      nr_threads_synthesize);
1917 	/* command specified */
1918 	return 0;
1919 }
1920 
1921 int machine__synthesize_threads(struct machine *machine, struct target *target,
1922 				struct perf_thread_map *threads, bool needs_mmap,
1923 				bool data_mmap, unsigned int nr_threads_synthesize)
1924 {
1925 	return __machine__synthesize_threads(machine, NULL, target, threads,
1926 					     perf_event__process, needs_mmap,
1927 					     data_mmap, nr_threads_synthesize);
1928 }
1929 
1930 static struct perf_record_event_update *event_update_event__new(size_t size, u64 type, u64 id)
1931 {
1932 	struct perf_record_event_update *ev;
1933 
1934 	size += sizeof(*ev);
1935 	size  = PERF_ALIGN(size, sizeof(u64));
1936 
1937 	ev = zalloc(size);
1938 	if (ev) {
1939 		ev->header.type = PERF_RECORD_EVENT_UPDATE;
1940 		ev->header.size = (u16)size;
1941 		ev->type	= type;
1942 		ev->id		= id;
1943 	}
1944 	return ev;
1945 }
1946 
1947 int perf_event__synthesize_event_update_unit(struct perf_tool *tool, struct evsel *evsel,
1948 					     perf_event__handler_t process)
1949 {
1950 	size_t size = strlen(evsel->unit);
1951 	struct perf_record_event_update *ev;
1952 	int err;
1953 
1954 	ev = event_update_event__new(size + 1, PERF_EVENT_UPDATE__UNIT, evsel->core.id[0]);
1955 	if (ev == NULL)
1956 		return -ENOMEM;
1957 
1958 	strlcpy(ev->data, evsel->unit, size + 1);
1959 	err = process(tool, (union perf_event *)ev, NULL, NULL);
1960 	free(ev);
1961 	return err;
1962 }
1963 
1964 int perf_event__synthesize_event_update_scale(struct perf_tool *tool, struct evsel *evsel,
1965 					      perf_event__handler_t process)
1966 {
1967 	struct perf_record_event_update *ev;
1968 	struct perf_record_event_update_scale *ev_data;
1969 	int err;
1970 
1971 	ev = event_update_event__new(sizeof(*ev_data), PERF_EVENT_UPDATE__SCALE, evsel->core.id[0]);
1972 	if (ev == NULL)
1973 		return -ENOMEM;
1974 
1975 	ev_data = (struct perf_record_event_update_scale *)ev->data;
1976 	ev_data->scale = evsel->scale;
1977 	err = process(tool, (union perf_event *)ev, NULL, NULL);
1978 	free(ev);
1979 	return err;
1980 }
1981 
1982 int perf_event__synthesize_event_update_name(struct perf_tool *tool, struct evsel *evsel,
1983 					     perf_event__handler_t process)
1984 {
1985 	struct perf_record_event_update *ev;
1986 	size_t len = strlen(evsel->name);
1987 	int err;
1988 
1989 	ev = event_update_event__new(len + 1, PERF_EVENT_UPDATE__NAME, evsel->core.id[0]);
1990 	if (ev == NULL)
1991 		return -ENOMEM;
1992 
1993 	strlcpy(ev->data, evsel->name, len + 1);
1994 	err = process(tool, (union perf_event *)ev, NULL, NULL);
1995 	free(ev);
1996 	return err;
1997 }
1998 
1999 int perf_event__synthesize_event_update_cpus(struct perf_tool *tool, struct evsel *evsel,
2000 					     perf_event__handler_t process)
2001 {
2002 	size_t size = sizeof(struct perf_record_event_update);
2003 	struct perf_record_event_update *ev;
2004 	int max, err;
2005 	u16 type;
2006 
2007 	if (!evsel->core.own_cpus)
2008 		return 0;
2009 
2010 	ev = cpu_map_data__alloc(evsel->core.own_cpus, &size, &type, &max);
2011 	if (!ev)
2012 		return -ENOMEM;
2013 
2014 	ev->header.type = PERF_RECORD_EVENT_UPDATE;
2015 	ev->header.size = (u16)size;
2016 	ev->type	= PERF_EVENT_UPDATE__CPUS;
2017 	ev->id		= evsel->core.id[0];
2018 
2019 	cpu_map_data__synthesize((struct perf_record_cpu_map_data *)ev->data,
2020 				 evsel->core.own_cpus, type, max);
2021 
2022 	err = process(tool, (union perf_event *)ev, NULL, NULL);
2023 	free(ev);
2024 	return err;
2025 }
2026 
2027 int perf_event__synthesize_attrs(struct perf_tool *tool, struct evlist *evlist,
2028 				 perf_event__handler_t process)
2029 {
2030 	struct evsel *evsel;
2031 	int err = 0;
2032 
2033 	evlist__for_each_entry(evlist, evsel) {
2034 		err = perf_event__synthesize_attr(tool, &evsel->core.attr, evsel->core.ids,
2035 						  evsel->core.id, process);
2036 		if (err) {
2037 			pr_debug("failed to create perf header attribute\n");
2038 			return err;
2039 		}
2040 	}
2041 
2042 	return err;
2043 }
2044 
2045 static bool has_unit(struct evsel *evsel)
2046 {
2047 	return evsel->unit && *evsel->unit;
2048 }
2049 
2050 static bool has_scale(struct evsel *evsel)
2051 {
2052 	return evsel->scale != 1;
2053 }
2054 
2055 int perf_event__synthesize_extra_attr(struct perf_tool *tool, struct evlist *evsel_list,
2056 				      perf_event__handler_t process, bool is_pipe)
2057 {
2058 	struct evsel *evsel;
2059 	int err;
2060 
2061 	/*
2062 	 * Synthesize other events stuff not carried within
2063 	 * attr event - unit, scale, name
2064 	 */
2065 	evlist__for_each_entry(evsel_list, evsel) {
2066 		if (!evsel->supported)
2067 			continue;
2068 
2069 		/*
2070 		 * Synthesize unit and scale only if it's defined.
2071 		 */
2072 		if (has_unit(evsel)) {
2073 			err = perf_event__synthesize_event_update_unit(tool, evsel, process);
2074 			if (err < 0) {
2075 				pr_err("Couldn't synthesize evsel unit.\n");
2076 				return err;
2077 			}
2078 		}
2079 
2080 		if (has_scale(evsel)) {
2081 			err = perf_event__synthesize_event_update_scale(tool, evsel, process);
2082 			if (err < 0) {
2083 				pr_err("Couldn't synthesize evsel evsel.\n");
2084 				return err;
2085 			}
2086 		}
2087 
2088 		if (evsel->core.own_cpus) {
2089 			err = perf_event__synthesize_event_update_cpus(tool, evsel, process);
2090 			if (err < 0) {
2091 				pr_err("Couldn't synthesize evsel cpus.\n");
2092 				return err;
2093 			}
2094 		}
2095 
2096 		/*
2097 		 * Name is needed only for pipe output,
2098 		 * perf.data carries event names.
2099 		 */
2100 		if (is_pipe) {
2101 			err = perf_event__synthesize_event_update_name(tool, evsel, process);
2102 			if (err < 0) {
2103 				pr_err("Couldn't synthesize evsel name.\n");
2104 				return err;
2105 			}
2106 		}
2107 	}
2108 	return 0;
2109 }
2110 
2111 int perf_event__synthesize_attr(struct perf_tool *tool, struct perf_event_attr *attr,
2112 				u32 ids, u64 *id, perf_event__handler_t process)
2113 {
2114 	union perf_event *ev;
2115 	size_t size;
2116 	int err;
2117 
2118 	size = sizeof(struct perf_event_attr);
2119 	size = PERF_ALIGN(size, sizeof(u64));
2120 	size += sizeof(struct perf_event_header);
2121 	size += ids * sizeof(u64);
2122 
2123 	ev = zalloc(size);
2124 
2125 	if (ev == NULL)
2126 		return -ENOMEM;
2127 
2128 	ev->attr.attr = *attr;
2129 	memcpy(ev->attr.id, id, ids * sizeof(u64));
2130 
2131 	ev->attr.header.type = PERF_RECORD_HEADER_ATTR;
2132 	ev->attr.header.size = (u16)size;
2133 
2134 	if (ev->attr.header.size == size)
2135 		err = process(tool, ev, NULL, NULL);
2136 	else
2137 		err = -E2BIG;
2138 
2139 	free(ev);
2140 
2141 	return err;
2142 }
2143 
2144 int perf_event__synthesize_tracing_data(struct perf_tool *tool, int fd, struct evlist *evlist,
2145 					perf_event__handler_t process)
2146 {
2147 	union perf_event ev;
2148 	struct tracing_data *tdata;
2149 	ssize_t size = 0, aligned_size = 0, padding;
2150 	struct feat_fd ff;
2151 
2152 	/*
2153 	 * We are going to store the size of the data followed
2154 	 * by the data contents. Since the fd descriptor is a pipe,
2155 	 * we cannot seek back to store the size of the data once
2156 	 * we know it. Instead we:
2157 	 *
2158 	 * - write the tracing data to the temp file
2159 	 * - get/write the data size to pipe
2160 	 * - write the tracing data from the temp file
2161 	 *   to the pipe
2162 	 */
2163 	tdata = tracing_data_get(&evlist->core.entries, fd, true);
2164 	if (!tdata)
2165 		return -1;
2166 
2167 	memset(&ev, 0, sizeof(ev));
2168 
2169 	ev.tracing_data.header.type = PERF_RECORD_HEADER_TRACING_DATA;
2170 	size = tdata->size;
2171 	aligned_size = PERF_ALIGN(size, sizeof(u64));
2172 	padding = aligned_size - size;
2173 	ev.tracing_data.header.size = sizeof(ev.tracing_data);
2174 	ev.tracing_data.size = aligned_size;
2175 
2176 	process(tool, &ev, NULL, NULL);
2177 
2178 	/*
2179 	 * The put function will copy all the tracing data
2180 	 * stored in temp file to the pipe.
2181 	 */
2182 	tracing_data_put(tdata);
2183 
2184 	ff = (struct feat_fd){ .fd = fd };
2185 	if (write_padded(&ff, NULL, 0, padding))
2186 		return -1;
2187 
2188 	return aligned_size;
2189 }
2190 
2191 int perf_event__synthesize_build_id(struct perf_tool *tool, struct dso *pos, u16 misc,
2192 				    perf_event__handler_t process, struct machine *machine)
2193 {
2194 	union perf_event ev;
2195 	size_t len;
2196 
2197 	if (!pos->hit)
2198 		return 0;
2199 
2200 	memset(&ev, 0, sizeof(ev));
2201 
2202 	len = pos->long_name_len + 1;
2203 	len = PERF_ALIGN(len, NAME_ALIGN);
2204 	memcpy(&ev.build_id.build_id, pos->bid.data, sizeof(pos->bid.data));
2205 	ev.build_id.header.type = PERF_RECORD_HEADER_BUILD_ID;
2206 	ev.build_id.header.misc = misc;
2207 	ev.build_id.pid = machine->pid;
2208 	ev.build_id.header.size = sizeof(ev.build_id) + len;
2209 	memcpy(&ev.build_id.filename, pos->long_name, pos->long_name_len);
2210 
2211 	return process(tool, &ev, NULL, machine);
2212 }
2213 
2214 int perf_event__synthesize_stat_events(struct perf_stat_config *config, struct perf_tool *tool,
2215 				       struct evlist *evlist, perf_event__handler_t process, bool attrs)
2216 {
2217 	int err;
2218 
2219 	if (attrs) {
2220 		err = perf_event__synthesize_attrs(tool, evlist, process);
2221 		if (err < 0) {
2222 			pr_err("Couldn't synthesize attrs.\n");
2223 			return err;
2224 		}
2225 	}
2226 
2227 	err = perf_event__synthesize_extra_attr(tool, evlist, process, attrs);
2228 	err = perf_event__synthesize_thread_map2(tool, evlist->core.threads, process, NULL);
2229 	if (err < 0) {
2230 		pr_err("Couldn't synthesize thread map.\n");
2231 		return err;
2232 	}
2233 
2234 	err = perf_event__synthesize_cpu_map(tool, evlist->core.user_requested_cpus, process, NULL);
2235 	if (err < 0) {
2236 		pr_err("Couldn't synthesize thread map.\n");
2237 		return err;
2238 	}
2239 
2240 	err = perf_event__synthesize_stat_config(tool, config, process, NULL);
2241 	if (err < 0) {
2242 		pr_err("Couldn't synthesize config.\n");
2243 		return err;
2244 	}
2245 
2246 	return 0;
2247 }
2248 
2249 extern const struct perf_header_feature_ops feat_ops[HEADER_LAST_FEATURE];
2250 
2251 int perf_event__synthesize_features(struct perf_tool *tool, struct perf_session *session,
2252 				    struct evlist *evlist, perf_event__handler_t process)
2253 {
2254 	struct perf_header *header = &session->header;
2255 	struct perf_record_header_feature *fe;
2256 	struct feat_fd ff;
2257 	size_t sz, sz_hdr;
2258 	int feat, ret;
2259 
2260 	sz_hdr = sizeof(fe->header);
2261 	sz = sizeof(union perf_event);
2262 	/* get a nice alignment */
2263 	sz = PERF_ALIGN(sz, page_size);
2264 
2265 	memset(&ff, 0, sizeof(ff));
2266 
2267 	ff.buf = malloc(sz);
2268 	if (!ff.buf)
2269 		return -ENOMEM;
2270 
2271 	ff.size = sz - sz_hdr;
2272 	ff.ph = &session->header;
2273 
2274 	for_each_set_bit(feat, header->adds_features, HEADER_FEAT_BITS) {
2275 		if (!feat_ops[feat].synthesize) {
2276 			pr_debug("No record header feature for header :%d\n", feat);
2277 			continue;
2278 		}
2279 
2280 		ff.offset = sizeof(*fe);
2281 
2282 		ret = feat_ops[feat].write(&ff, evlist);
2283 		if (ret || ff.offset <= (ssize_t)sizeof(*fe)) {
2284 			pr_debug("Error writing feature\n");
2285 			continue;
2286 		}
2287 		/* ff.buf may have changed due to realloc in do_write() */
2288 		fe = ff.buf;
2289 		memset(fe, 0, sizeof(*fe));
2290 
2291 		fe->feat_id = feat;
2292 		fe->header.type = PERF_RECORD_HEADER_FEATURE;
2293 		fe->header.size = ff.offset;
2294 
2295 		ret = process(tool, ff.buf, NULL, NULL);
2296 		if (ret) {
2297 			free(ff.buf);
2298 			return ret;
2299 		}
2300 	}
2301 
2302 	/* Send HEADER_LAST_FEATURE mark. */
2303 	fe = ff.buf;
2304 	fe->feat_id     = HEADER_LAST_FEATURE;
2305 	fe->header.type = PERF_RECORD_HEADER_FEATURE;
2306 	fe->header.size = sizeof(*fe);
2307 
2308 	ret = process(tool, ff.buf, NULL, NULL);
2309 
2310 	free(ff.buf);
2311 	return ret;
2312 }
2313 
2314 int perf_event__synthesize_for_pipe(struct perf_tool *tool,
2315 				    struct perf_session *session,
2316 				    struct perf_data *data,
2317 				    perf_event__handler_t process)
2318 {
2319 	int err;
2320 	int ret = 0;
2321 	struct evlist *evlist = session->evlist;
2322 
2323 	/*
2324 	 * We need to synthesize events first, because some
2325 	 * features works on top of them (on report side).
2326 	 */
2327 	err = perf_event__synthesize_attrs(tool, evlist, process);
2328 	if (err < 0) {
2329 		pr_err("Couldn't synthesize attrs.\n");
2330 		return err;
2331 	}
2332 	ret += err;
2333 
2334 	err = perf_event__synthesize_features(tool, session, evlist, process);
2335 	if (err < 0) {
2336 		pr_err("Couldn't synthesize features.\n");
2337 		return err;
2338 	}
2339 	ret += err;
2340 
2341 	if (have_tracepoints(&evlist->core.entries)) {
2342 		int fd = perf_data__fd(data);
2343 
2344 		/*
2345 		 * FIXME err <= 0 here actually means that
2346 		 * there were no tracepoints so its not really
2347 		 * an error, just that we don't need to
2348 		 * synthesize anything.  We really have to
2349 		 * return this more properly and also
2350 		 * propagate errors that now are calling die()
2351 		 */
2352 		err = perf_event__synthesize_tracing_data(tool,	fd, evlist,
2353 							  process);
2354 		if (err <= 0) {
2355 			pr_err("Couldn't record tracing data.\n");
2356 			return err;
2357 		}
2358 		ret += err;
2359 	}
2360 
2361 	return ret;
2362 }
2363 
2364 int parse_synth_opt(char *synth)
2365 {
2366 	char *p, *q;
2367 	int ret = 0;
2368 
2369 	if (synth == NULL)
2370 		return -1;
2371 
2372 	for (q = synth; (p = strsep(&q, ",")); p = q) {
2373 		if (!strcasecmp(p, "no") || !strcasecmp(p, "none"))
2374 			return 0;
2375 
2376 		if (!strcasecmp(p, "all"))
2377 			return PERF_SYNTH_ALL;
2378 
2379 		if (!strcasecmp(p, "task"))
2380 			ret |= PERF_SYNTH_TASK;
2381 		else if (!strcasecmp(p, "mmap"))
2382 			ret |= PERF_SYNTH_TASK | PERF_SYNTH_MMAP;
2383 		else if (!strcasecmp(p, "cgroup"))
2384 			ret |= PERF_SYNTH_CGROUP;
2385 		else
2386 			return -1;
2387 	}
2388 
2389 	return ret;
2390 }
2391