xref: /openbmc/linux/tools/perf/util/event.c (revision 5e012745)
1 // SPDX-License-Identifier: GPL-2.0
2 #include <dirent.h>
3 #include <errno.h>
4 #include <fcntl.h>
5 #include <inttypes.h>
6 #include <linux/kernel.h>
7 #include <linux/types.h>
8 #include <sys/types.h>
9 #include <sys/stat.h>
10 #include <unistd.h>
11 #include <uapi/linux/mman.h> /* To get things like MAP_HUGETLB even on older libc headers */
12 #include <api/fs/fs.h>
13 #include <linux/perf_event.h>
14 #include <linux/zalloc.h>
15 #include "event.h"
16 #include "debug.h"
17 #include "hist.h"
18 #include "machine.h"
19 #include "sort.h"
20 #include "string2.h"
21 #include "strlist.h"
22 #include "thread.h"
23 #include "thread_map.h"
24 #include <linux/ctype.h>
25 #include "map.h"
26 #include "symbol.h"
27 #include "symbol/kallsyms.h"
28 #include "asm/bug.h"
29 #include "stat.h"
30 #include "session.h"
31 #include "bpf-event.h"
32 
33 #define DEFAULT_PROC_MAP_PARSE_TIMEOUT 500
34 
35 static const char *perf_event__names[] = {
36 	[0]					= "TOTAL",
37 	[PERF_RECORD_MMAP]			= "MMAP",
38 	[PERF_RECORD_MMAP2]			= "MMAP2",
39 	[PERF_RECORD_LOST]			= "LOST",
40 	[PERF_RECORD_COMM]			= "COMM",
41 	[PERF_RECORD_EXIT]			= "EXIT",
42 	[PERF_RECORD_THROTTLE]			= "THROTTLE",
43 	[PERF_RECORD_UNTHROTTLE]		= "UNTHROTTLE",
44 	[PERF_RECORD_FORK]			= "FORK",
45 	[PERF_RECORD_READ]			= "READ",
46 	[PERF_RECORD_SAMPLE]			= "SAMPLE",
47 	[PERF_RECORD_AUX]			= "AUX",
48 	[PERF_RECORD_ITRACE_START]		= "ITRACE_START",
49 	[PERF_RECORD_LOST_SAMPLES]		= "LOST_SAMPLES",
50 	[PERF_RECORD_SWITCH]			= "SWITCH",
51 	[PERF_RECORD_SWITCH_CPU_WIDE]		= "SWITCH_CPU_WIDE",
52 	[PERF_RECORD_NAMESPACES]		= "NAMESPACES",
53 	[PERF_RECORD_KSYMBOL]			= "KSYMBOL",
54 	[PERF_RECORD_BPF_EVENT]			= "BPF_EVENT",
55 	[PERF_RECORD_HEADER_ATTR]		= "ATTR",
56 	[PERF_RECORD_HEADER_EVENT_TYPE]		= "EVENT_TYPE",
57 	[PERF_RECORD_HEADER_TRACING_DATA]	= "TRACING_DATA",
58 	[PERF_RECORD_HEADER_BUILD_ID]		= "BUILD_ID",
59 	[PERF_RECORD_FINISHED_ROUND]		= "FINISHED_ROUND",
60 	[PERF_RECORD_ID_INDEX]			= "ID_INDEX",
61 	[PERF_RECORD_AUXTRACE_INFO]		= "AUXTRACE_INFO",
62 	[PERF_RECORD_AUXTRACE]			= "AUXTRACE",
63 	[PERF_RECORD_AUXTRACE_ERROR]		= "AUXTRACE_ERROR",
64 	[PERF_RECORD_THREAD_MAP]		= "THREAD_MAP",
65 	[PERF_RECORD_CPU_MAP]			= "CPU_MAP",
66 	[PERF_RECORD_STAT_CONFIG]		= "STAT_CONFIG",
67 	[PERF_RECORD_STAT]			= "STAT",
68 	[PERF_RECORD_STAT_ROUND]		= "STAT_ROUND",
69 	[PERF_RECORD_EVENT_UPDATE]		= "EVENT_UPDATE",
70 	[PERF_RECORD_TIME_CONV]			= "TIME_CONV",
71 	[PERF_RECORD_HEADER_FEATURE]		= "FEATURE",
72 	[PERF_RECORD_COMPRESSED]		= "COMPRESSED",
73 };
74 
75 static const char *perf_ns__names[] = {
76 	[NET_NS_INDEX]		= "net",
77 	[UTS_NS_INDEX]		= "uts",
78 	[IPC_NS_INDEX]		= "ipc",
79 	[PID_NS_INDEX]		= "pid",
80 	[USER_NS_INDEX]		= "user",
81 	[MNT_NS_INDEX]		= "mnt",
82 	[CGROUP_NS_INDEX]	= "cgroup",
83 };
84 
85 unsigned int proc_map_timeout = DEFAULT_PROC_MAP_PARSE_TIMEOUT;
86 
87 const char *perf_event__name(unsigned int id)
88 {
89 	if (id >= ARRAY_SIZE(perf_event__names))
90 		return "INVALID";
91 	if (!perf_event__names[id])
92 		return "UNKNOWN";
93 	return perf_event__names[id];
94 }
95 
96 static const char *perf_ns__name(unsigned int id)
97 {
98 	if (id >= ARRAY_SIZE(perf_ns__names))
99 		return "UNKNOWN";
100 	return perf_ns__names[id];
101 }
102 
103 int perf_tool__process_synth_event(struct perf_tool *tool,
104 				   union perf_event *event,
105 				   struct machine *machine,
106 				   perf_event__handler_t process)
107 {
108 	struct perf_sample synth_sample = {
109 	.pid	   = -1,
110 	.tid	   = -1,
111 	.time	   = -1,
112 	.stream_id = -1,
113 	.cpu	   = -1,
114 	.period	   = 1,
115 	.cpumode   = event->header.misc & PERF_RECORD_MISC_CPUMODE_MASK,
116 	};
117 
118 	return process(tool, event, &synth_sample, machine);
119 };
120 
121 /*
122  * Assumes that the first 4095 bytes of /proc/pid/stat contains
123  * the comm, tgid and ppid.
124  */
125 static int perf_event__get_comm_ids(pid_t pid, char *comm, size_t len,
126 				    pid_t *tgid, pid_t *ppid)
127 {
128 	char filename[PATH_MAX];
129 	char bf[4096];
130 	int fd;
131 	size_t size = 0;
132 	ssize_t n;
133 	char *name, *tgids, *ppids;
134 
135 	*tgid = -1;
136 	*ppid = -1;
137 
138 	snprintf(filename, sizeof(filename), "/proc/%d/status", pid);
139 
140 	fd = open(filename, O_RDONLY);
141 	if (fd < 0) {
142 		pr_debug("couldn't open %s\n", filename);
143 		return -1;
144 	}
145 
146 	n = read(fd, bf, sizeof(bf) - 1);
147 	close(fd);
148 	if (n <= 0) {
149 		pr_warning("Couldn't get COMM, tigd and ppid for pid %d\n",
150 			   pid);
151 		return -1;
152 	}
153 	bf[n] = '\0';
154 
155 	name = strstr(bf, "Name:");
156 	tgids = strstr(bf, "Tgid:");
157 	ppids = strstr(bf, "PPid:");
158 
159 	if (name) {
160 		char *nl;
161 
162 		name = skip_spaces(name + 5);  /* strlen("Name:") */
163 		nl = strchr(name, '\n');
164 		if (nl)
165 			*nl = '\0';
166 
167 		size = strlen(name);
168 		if (size >= len)
169 			size = len - 1;
170 		memcpy(comm, name, size);
171 		comm[size] = '\0';
172 	} else {
173 		pr_debug("Name: string not found for pid %d\n", pid);
174 	}
175 
176 	if (tgids) {
177 		tgids += 5;  /* strlen("Tgid:") */
178 		*tgid = atoi(tgids);
179 	} else {
180 		pr_debug("Tgid: string not found for pid %d\n", pid);
181 	}
182 
183 	if (ppids) {
184 		ppids += 5;  /* strlen("PPid:") */
185 		*ppid = atoi(ppids);
186 	} else {
187 		pr_debug("PPid: string not found for pid %d\n", pid);
188 	}
189 
190 	return 0;
191 }
192 
193 static int perf_event__prepare_comm(union perf_event *event, pid_t pid,
194 				    struct machine *machine,
195 				    pid_t *tgid, pid_t *ppid)
196 {
197 	size_t size;
198 
199 	*ppid = -1;
200 
201 	memset(&event->comm, 0, sizeof(event->comm));
202 
203 	if (machine__is_host(machine)) {
204 		if (perf_event__get_comm_ids(pid, event->comm.comm,
205 					     sizeof(event->comm.comm),
206 					     tgid, ppid) != 0) {
207 			return -1;
208 		}
209 	} else {
210 		*tgid = machine->pid;
211 	}
212 
213 	if (*tgid < 0)
214 		return -1;
215 
216 	event->comm.pid = *tgid;
217 	event->comm.header.type = PERF_RECORD_COMM;
218 
219 	size = strlen(event->comm.comm) + 1;
220 	size = PERF_ALIGN(size, sizeof(u64));
221 	memset(event->comm.comm + size, 0, machine->id_hdr_size);
222 	event->comm.header.size = (sizeof(event->comm) -
223 				(sizeof(event->comm.comm) - size) +
224 				machine->id_hdr_size);
225 	event->comm.tid = pid;
226 
227 	return 0;
228 }
229 
230 pid_t perf_event__synthesize_comm(struct perf_tool *tool,
231 					 union perf_event *event, pid_t pid,
232 					 perf_event__handler_t process,
233 					 struct machine *machine)
234 {
235 	pid_t tgid, ppid;
236 
237 	if (perf_event__prepare_comm(event, pid, machine, &tgid, &ppid) != 0)
238 		return -1;
239 
240 	if (perf_tool__process_synth_event(tool, event, machine, process) != 0)
241 		return -1;
242 
243 	return tgid;
244 }
245 
246 static void perf_event__get_ns_link_info(pid_t pid, const char *ns,
247 					 struct perf_ns_link_info *ns_link_info)
248 {
249 	struct stat64 st;
250 	char proc_ns[128];
251 
252 	sprintf(proc_ns, "/proc/%u/ns/%s", pid, ns);
253 	if (stat64(proc_ns, &st) == 0) {
254 		ns_link_info->dev = st.st_dev;
255 		ns_link_info->ino = st.st_ino;
256 	}
257 }
258 
259 int perf_event__synthesize_namespaces(struct perf_tool *tool,
260 				      union perf_event *event,
261 				      pid_t pid, pid_t tgid,
262 				      perf_event__handler_t process,
263 				      struct machine *machine)
264 {
265 	u32 idx;
266 	struct perf_ns_link_info *ns_link_info;
267 
268 	if (!tool || !tool->namespace_events)
269 		return 0;
270 
271 	memset(&event->namespaces, 0, (sizeof(event->namespaces) +
272 	       (NR_NAMESPACES * sizeof(struct perf_ns_link_info)) +
273 	       machine->id_hdr_size));
274 
275 	event->namespaces.pid = tgid;
276 	event->namespaces.tid = pid;
277 
278 	event->namespaces.nr_namespaces = NR_NAMESPACES;
279 
280 	ns_link_info = event->namespaces.link_info;
281 
282 	for (idx = 0; idx < event->namespaces.nr_namespaces; idx++)
283 		perf_event__get_ns_link_info(pid, perf_ns__name(idx),
284 					     &ns_link_info[idx]);
285 
286 	event->namespaces.header.type = PERF_RECORD_NAMESPACES;
287 
288 	event->namespaces.header.size = (sizeof(event->namespaces) +
289 			(NR_NAMESPACES * sizeof(struct perf_ns_link_info)) +
290 			machine->id_hdr_size);
291 
292 	if (perf_tool__process_synth_event(tool, event, machine, process) != 0)
293 		return -1;
294 
295 	return 0;
296 }
297 
298 static int perf_event__synthesize_fork(struct perf_tool *tool,
299 				       union perf_event *event,
300 				       pid_t pid, pid_t tgid, pid_t ppid,
301 				       perf_event__handler_t process,
302 				       struct machine *machine)
303 {
304 	memset(&event->fork, 0, sizeof(event->fork) + machine->id_hdr_size);
305 
306 	/*
307 	 * for main thread set parent to ppid from status file. For other
308 	 * threads set parent pid to main thread. ie., assume main thread
309 	 * spawns all threads in a process
310 	*/
311 	if (tgid == pid) {
312 		event->fork.ppid = ppid;
313 		event->fork.ptid = ppid;
314 	} else {
315 		event->fork.ppid = tgid;
316 		event->fork.ptid = tgid;
317 	}
318 	event->fork.pid  = tgid;
319 	event->fork.tid  = pid;
320 	event->fork.header.type = PERF_RECORD_FORK;
321 	event->fork.header.misc = PERF_RECORD_MISC_FORK_EXEC;
322 
323 	event->fork.header.size = (sizeof(event->fork) + machine->id_hdr_size);
324 
325 	if (perf_tool__process_synth_event(tool, event, machine, process) != 0)
326 		return -1;
327 
328 	return 0;
329 }
330 
331 int perf_event__synthesize_mmap_events(struct perf_tool *tool,
332 				       union perf_event *event,
333 				       pid_t pid, pid_t tgid,
334 				       perf_event__handler_t process,
335 				       struct machine *machine,
336 				       bool mmap_data)
337 {
338 	char filename[PATH_MAX];
339 	FILE *fp;
340 	unsigned long long t;
341 	bool truncation = false;
342 	unsigned long long timeout = proc_map_timeout * 1000000ULL;
343 	int rc = 0;
344 	const char *hugetlbfs_mnt = hugetlbfs__mountpoint();
345 	int hugetlbfs_mnt_len = hugetlbfs_mnt ? strlen(hugetlbfs_mnt) : 0;
346 
347 	if (machine__is_default_guest(machine))
348 		return 0;
349 
350 	snprintf(filename, sizeof(filename), "%s/proc/%d/task/%d/maps",
351 		 machine->root_dir, pid, pid);
352 
353 	fp = fopen(filename, "r");
354 	if (fp == NULL) {
355 		/*
356 		 * We raced with a task exiting - just return:
357 		 */
358 		pr_debug("couldn't open %s\n", filename);
359 		return -1;
360 	}
361 
362 	event->header.type = PERF_RECORD_MMAP2;
363 	t = rdclock();
364 
365 	while (1) {
366 		char bf[BUFSIZ];
367 		char prot[5];
368 		char execname[PATH_MAX];
369 		char anonstr[] = "//anon";
370 		unsigned int ino;
371 		size_t size;
372 		ssize_t n;
373 
374 		if (fgets(bf, sizeof(bf), fp) == NULL)
375 			break;
376 
377 		if ((rdclock() - t) > timeout) {
378 			pr_warning("Reading %s time out. "
379 				   "You may want to increase "
380 				   "the time limit by --proc-map-timeout\n",
381 				   filename);
382 			truncation = true;
383 			goto out;
384 		}
385 
386 		/* ensure null termination since stack will be reused. */
387 		strcpy(execname, "");
388 
389 		/* 00400000-0040c000 r-xp 00000000 fd:01 41038  /bin/cat */
390 		n = sscanf(bf, "%"PRIx64"-%"PRIx64" %s %"PRIx64" %x:%x %u %[^\n]\n",
391 		       &event->mmap2.start, &event->mmap2.len, prot,
392 		       &event->mmap2.pgoff, &event->mmap2.maj,
393 		       &event->mmap2.min,
394 		       &ino, execname);
395 
396 		/*
397  		 * Anon maps don't have the execname.
398  		 */
399 		if (n < 7)
400 			continue;
401 
402 		event->mmap2.ino = (u64)ino;
403 
404 		/*
405 		 * Just like the kernel, see __perf_event_mmap in kernel/perf_event.c
406 		 */
407 		if (machine__is_host(machine))
408 			event->header.misc = PERF_RECORD_MISC_USER;
409 		else
410 			event->header.misc = PERF_RECORD_MISC_GUEST_USER;
411 
412 		/* map protection and flags bits */
413 		event->mmap2.prot = 0;
414 		event->mmap2.flags = 0;
415 		if (prot[0] == 'r')
416 			event->mmap2.prot |= PROT_READ;
417 		if (prot[1] == 'w')
418 			event->mmap2.prot |= PROT_WRITE;
419 		if (prot[2] == 'x')
420 			event->mmap2.prot |= PROT_EXEC;
421 
422 		if (prot[3] == 's')
423 			event->mmap2.flags |= MAP_SHARED;
424 		else
425 			event->mmap2.flags |= MAP_PRIVATE;
426 
427 		if (prot[2] != 'x') {
428 			if (!mmap_data || prot[0] != 'r')
429 				continue;
430 
431 			event->header.misc |= PERF_RECORD_MISC_MMAP_DATA;
432 		}
433 
434 out:
435 		if (truncation)
436 			event->header.misc |= PERF_RECORD_MISC_PROC_MAP_PARSE_TIMEOUT;
437 
438 		if (!strcmp(execname, ""))
439 			strcpy(execname, anonstr);
440 
441 		if (hugetlbfs_mnt_len &&
442 		    !strncmp(execname, hugetlbfs_mnt, hugetlbfs_mnt_len)) {
443 			strcpy(execname, anonstr);
444 			event->mmap2.flags |= MAP_HUGETLB;
445 		}
446 
447 		size = strlen(execname) + 1;
448 		memcpy(event->mmap2.filename, execname, size);
449 		size = PERF_ALIGN(size, sizeof(u64));
450 		event->mmap2.len -= event->mmap.start;
451 		event->mmap2.header.size = (sizeof(event->mmap2) -
452 					(sizeof(event->mmap2.filename) - size));
453 		memset(event->mmap2.filename + size, 0, machine->id_hdr_size);
454 		event->mmap2.header.size += machine->id_hdr_size;
455 		event->mmap2.pid = tgid;
456 		event->mmap2.tid = pid;
457 
458 		if (perf_tool__process_synth_event(tool, event, machine, process) != 0) {
459 			rc = -1;
460 			break;
461 		}
462 
463 		if (truncation)
464 			break;
465 	}
466 
467 	fclose(fp);
468 	return rc;
469 }
470 
471 int perf_event__synthesize_modules(struct perf_tool *tool,
472 				   perf_event__handler_t process,
473 				   struct machine *machine)
474 {
475 	int rc = 0;
476 	struct map *pos;
477 	struct maps *maps = machine__kernel_maps(machine);
478 	union perf_event *event = zalloc((sizeof(event->mmap) +
479 					  machine->id_hdr_size));
480 	if (event == NULL) {
481 		pr_debug("Not enough memory synthesizing mmap event "
482 			 "for kernel modules\n");
483 		return -1;
484 	}
485 
486 	event->header.type = PERF_RECORD_MMAP;
487 
488 	/*
489 	 * kernel uses 0 for user space maps, see kernel/perf_event.c
490 	 * __perf_event_mmap
491 	 */
492 	if (machine__is_host(machine))
493 		event->header.misc = PERF_RECORD_MISC_KERNEL;
494 	else
495 		event->header.misc = PERF_RECORD_MISC_GUEST_KERNEL;
496 
497 	for (pos = maps__first(maps); pos; pos = map__next(pos)) {
498 		size_t size;
499 
500 		if (!__map__is_kmodule(pos))
501 			continue;
502 
503 		size = PERF_ALIGN(pos->dso->long_name_len + 1, sizeof(u64));
504 		event->mmap.header.type = PERF_RECORD_MMAP;
505 		event->mmap.header.size = (sizeof(event->mmap) -
506 				        (sizeof(event->mmap.filename) - size));
507 		memset(event->mmap.filename + size, 0, machine->id_hdr_size);
508 		event->mmap.header.size += machine->id_hdr_size;
509 		event->mmap.start = pos->start;
510 		event->mmap.len   = pos->end - pos->start;
511 		event->mmap.pid   = machine->pid;
512 
513 		memcpy(event->mmap.filename, pos->dso->long_name,
514 		       pos->dso->long_name_len + 1);
515 		if (perf_tool__process_synth_event(tool, event, machine, process) != 0) {
516 			rc = -1;
517 			break;
518 		}
519 	}
520 
521 	free(event);
522 	return rc;
523 }
524 
525 static int __event__synthesize_thread(union perf_event *comm_event,
526 				      union perf_event *mmap_event,
527 				      union perf_event *fork_event,
528 				      union perf_event *namespaces_event,
529 				      pid_t pid, int full,
530 				      perf_event__handler_t process,
531 				      struct perf_tool *tool,
532 				      struct machine *machine,
533 				      bool mmap_data)
534 {
535 	char filename[PATH_MAX];
536 	DIR *tasks;
537 	struct dirent *dirent;
538 	pid_t tgid, ppid;
539 	int rc = 0;
540 
541 	/* special case: only send one comm event using passed in pid */
542 	if (!full) {
543 		tgid = perf_event__synthesize_comm(tool, comm_event, pid,
544 						   process, machine);
545 
546 		if (tgid == -1)
547 			return -1;
548 
549 		if (perf_event__synthesize_namespaces(tool, namespaces_event, pid,
550 						      tgid, process, machine) < 0)
551 			return -1;
552 
553 		/*
554 		 * send mmap only for thread group leader
555 		 * see thread__init_map_groups
556 		 */
557 		if (pid == tgid &&
558 		    perf_event__synthesize_mmap_events(tool, mmap_event, pid, tgid,
559 						       process, machine, mmap_data))
560 			return -1;
561 
562 		return 0;
563 	}
564 
565 	if (machine__is_default_guest(machine))
566 		return 0;
567 
568 	snprintf(filename, sizeof(filename), "%s/proc/%d/task",
569 		 machine->root_dir, pid);
570 
571 	tasks = opendir(filename);
572 	if (tasks == NULL) {
573 		pr_debug("couldn't open %s\n", filename);
574 		return 0;
575 	}
576 
577 	while ((dirent = readdir(tasks)) != NULL) {
578 		char *end;
579 		pid_t _pid;
580 
581 		_pid = strtol(dirent->d_name, &end, 10);
582 		if (*end)
583 			continue;
584 
585 		rc = -1;
586 		if (perf_event__prepare_comm(comm_event, _pid, machine,
587 					     &tgid, &ppid) != 0)
588 			break;
589 
590 		if (perf_event__synthesize_fork(tool, fork_event, _pid, tgid,
591 						ppid, process, machine) < 0)
592 			break;
593 
594 		if (perf_event__synthesize_namespaces(tool, namespaces_event, _pid,
595 						      tgid, process, machine) < 0)
596 			break;
597 
598 		/*
599 		 * Send the prepared comm event
600 		 */
601 		if (perf_tool__process_synth_event(tool, comm_event, machine, process) != 0)
602 			break;
603 
604 		rc = 0;
605 		if (_pid == pid) {
606 			/* process the parent's maps too */
607 			rc = perf_event__synthesize_mmap_events(tool, mmap_event, pid, tgid,
608 						process, machine, mmap_data);
609 			if (rc)
610 				break;
611 		}
612 	}
613 
614 	closedir(tasks);
615 	return rc;
616 }
617 
618 int perf_event__synthesize_thread_map(struct perf_tool *tool,
619 				      struct thread_map *threads,
620 				      perf_event__handler_t process,
621 				      struct machine *machine,
622 				      bool mmap_data)
623 {
624 	union perf_event *comm_event, *mmap_event, *fork_event;
625 	union perf_event *namespaces_event;
626 	int err = -1, thread, j;
627 
628 	comm_event = malloc(sizeof(comm_event->comm) + machine->id_hdr_size);
629 	if (comm_event == NULL)
630 		goto out;
631 
632 	mmap_event = malloc(sizeof(mmap_event->mmap2) + machine->id_hdr_size);
633 	if (mmap_event == NULL)
634 		goto out_free_comm;
635 
636 	fork_event = malloc(sizeof(fork_event->fork) + machine->id_hdr_size);
637 	if (fork_event == NULL)
638 		goto out_free_mmap;
639 
640 	namespaces_event = malloc(sizeof(namespaces_event->namespaces) +
641 				  (NR_NAMESPACES * sizeof(struct perf_ns_link_info)) +
642 				  machine->id_hdr_size);
643 	if (namespaces_event == NULL)
644 		goto out_free_fork;
645 
646 	err = 0;
647 	for (thread = 0; thread < threads->nr; ++thread) {
648 		if (__event__synthesize_thread(comm_event, mmap_event,
649 					       fork_event, namespaces_event,
650 					       thread_map__pid(threads, thread), 0,
651 					       process, tool, machine,
652 					       mmap_data)) {
653 			err = -1;
654 			break;
655 		}
656 
657 		/*
658 		 * comm.pid is set to thread group id by
659 		 * perf_event__synthesize_comm
660 		 */
661 		if ((int) comm_event->comm.pid != thread_map__pid(threads, thread)) {
662 			bool need_leader = true;
663 
664 			/* is thread group leader in thread_map? */
665 			for (j = 0; j < threads->nr; ++j) {
666 				if ((int) comm_event->comm.pid == thread_map__pid(threads, j)) {
667 					need_leader = false;
668 					break;
669 				}
670 			}
671 
672 			/* if not, generate events for it */
673 			if (need_leader &&
674 			    __event__synthesize_thread(comm_event, mmap_event,
675 						       fork_event, namespaces_event,
676 						       comm_event->comm.pid, 0,
677 						       process, tool, machine,
678 						       mmap_data)) {
679 				err = -1;
680 				break;
681 			}
682 		}
683 	}
684 	free(namespaces_event);
685 out_free_fork:
686 	free(fork_event);
687 out_free_mmap:
688 	free(mmap_event);
689 out_free_comm:
690 	free(comm_event);
691 out:
692 	return err;
693 }
694 
695 static int __perf_event__synthesize_threads(struct perf_tool *tool,
696 					    perf_event__handler_t process,
697 					    struct machine *machine,
698 					    bool mmap_data,
699 					    struct dirent **dirent,
700 					    int start,
701 					    int num)
702 {
703 	union perf_event *comm_event, *mmap_event, *fork_event;
704 	union perf_event *namespaces_event;
705 	int err = -1;
706 	char *end;
707 	pid_t pid;
708 	int i;
709 
710 	comm_event = malloc(sizeof(comm_event->comm) + machine->id_hdr_size);
711 	if (comm_event == NULL)
712 		goto out;
713 
714 	mmap_event = malloc(sizeof(mmap_event->mmap2) + machine->id_hdr_size);
715 	if (mmap_event == NULL)
716 		goto out_free_comm;
717 
718 	fork_event = malloc(sizeof(fork_event->fork) + machine->id_hdr_size);
719 	if (fork_event == NULL)
720 		goto out_free_mmap;
721 
722 	namespaces_event = malloc(sizeof(namespaces_event->namespaces) +
723 				  (NR_NAMESPACES * sizeof(struct perf_ns_link_info)) +
724 				  machine->id_hdr_size);
725 	if (namespaces_event == NULL)
726 		goto out_free_fork;
727 
728 	for (i = start; i < start + num; i++) {
729 		if (!isdigit(dirent[i]->d_name[0]))
730 			continue;
731 
732 		pid = (pid_t)strtol(dirent[i]->d_name, &end, 10);
733 		/* only interested in proper numerical dirents */
734 		if (*end)
735 			continue;
736 		/*
737 		 * We may race with exiting thread, so don't stop just because
738 		 * one thread couldn't be synthesized.
739 		 */
740 		__event__synthesize_thread(comm_event, mmap_event, fork_event,
741 					   namespaces_event, pid, 1, process,
742 					   tool, machine, mmap_data);
743 	}
744 	err = 0;
745 
746 	free(namespaces_event);
747 out_free_fork:
748 	free(fork_event);
749 out_free_mmap:
750 	free(mmap_event);
751 out_free_comm:
752 	free(comm_event);
753 out:
754 	return err;
755 }
756 
757 struct synthesize_threads_arg {
758 	struct perf_tool *tool;
759 	perf_event__handler_t process;
760 	struct machine *machine;
761 	bool mmap_data;
762 	struct dirent **dirent;
763 	int num;
764 	int start;
765 };
766 
767 static void *synthesize_threads_worker(void *arg)
768 {
769 	struct synthesize_threads_arg *args = arg;
770 
771 	__perf_event__synthesize_threads(args->tool, args->process,
772 					 args->machine, args->mmap_data,
773 					 args->dirent,
774 					 args->start, args->num);
775 	return NULL;
776 }
777 
778 int perf_event__synthesize_threads(struct perf_tool *tool,
779 				   perf_event__handler_t process,
780 				   struct machine *machine,
781 				   bool mmap_data,
782 				   unsigned int nr_threads_synthesize)
783 {
784 	struct synthesize_threads_arg *args = NULL;
785 	pthread_t *synthesize_threads = NULL;
786 	char proc_path[PATH_MAX];
787 	struct dirent **dirent;
788 	int num_per_thread;
789 	int m, n, i, j;
790 	int thread_nr;
791 	int base = 0;
792 	int err = -1;
793 
794 
795 	if (machine__is_default_guest(machine))
796 		return 0;
797 
798 	snprintf(proc_path, sizeof(proc_path), "%s/proc", machine->root_dir);
799 	n = scandir(proc_path, &dirent, 0, alphasort);
800 	if (n < 0)
801 		return err;
802 
803 	if (nr_threads_synthesize == UINT_MAX)
804 		thread_nr = sysconf(_SC_NPROCESSORS_ONLN);
805 	else
806 		thread_nr = nr_threads_synthesize;
807 
808 	if (thread_nr <= 1) {
809 		err = __perf_event__synthesize_threads(tool, process,
810 						       machine, mmap_data,
811 						       dirent, base, n);
812 		goto free_dirent;
813 	}
814 	if (thread_nr > n)
815 		thread_nr = n;
816 
817 	synthesize_threads = calloc(sizeof(pthread_t), thread_nr);
818 	if (synthesize_threads == NULL)
819 		goto free_dirent;
820 
821 	args = calloc(sizeof(*args), thread_nr);
822 	if (args == NULL)
823 		goto free_threads;
824 
825 	num_per_thread = n / thread_nr;
826 	m = n % thread_nr;
827 	for (i = 0; i < thread_nr; i++) {
828 		args[i].tool = tool;
829 		args[i].process = process;
830 		args[i].machine = machine;
831 		args[i].mmap_data = mmap_data;
832 		args[i].dirent = dirent;
833 	}
834 	for (i = 0; i < m; i++) {
835 		args[i].num = num_per_thread + 1;
836 		args[i].start = i * args[i].num;
837 	}
838 	if (i != 0)
839 		base = args[i-1].start + args[i-1].num;
840 	for (j = i; j < thread_nr; j++) {
841 		args[j].num = num_per_thread;
842 		args[j].start = base + (j - i) * args[i].num;
843 	}
844 
845 	for (i = 0; i < thread_nr; i++) {
846 		if (pthread_create(&synthesize_threads[i], NULL,
847 				   synthesize_threads_worker, &args[i]))
848 			goto out_join;
849 	}
850 	err = 0;
851 out_join:
852 	for (i = 0; i < thread_nr; i++)
853 		pthread_join(synthesize_threads[i], NULL);
854 	free(args);
855 free_threads:
856 	free(synthesize_threads);
857 free_dirent:
858 	for (i = 0; i < n; i++)
859 		zfree(&dirent[i]);
860 	free(dirent);
861 
862 	return err;
863 }
864 
865 struct process_symbol_args {
866 	const char *name;
867 	u64	   start;
868 };
869 
870 static int find_symbol_cb(void *arg, const char *name, char type,
871 			  u64 start)
872 {
873 	struct process_symbol_args *args = arg;
874 
875 	/*
876 	 * Must be a function or at least an alias, as in PARISC64, where "_text" is
877 	 * an 'A' to the same address as "_stext".
878 	 */
879 	if (!(kallsyms__is_function(type) ||
880 	      type == 'A') || strcmp(name, args->name))
881 		return 0;
882 
883 	args->start = start;
884 	return 1;
885 }
886 
887 int kallsyms__get_function_start(const char *kallsyms_filename,
888 				 const char *symbol_name, u64 *addr)
889 {
890 	struct process_symbol_args args = { .name = symbol_name, };
891 
892 	if (kallsyms__parse(kallsyms_filename, &args, find_symbol_cb) <= 0)
893 		return -1;
894 
895 	*addr = args.start;
896 	return 0;
897 }
898 
899 int __weak perf_event__synthesize_extra_kmaps(struct perf_tool *tool __maybe_unused,
900 					      perf_event__handler_t process __maybe_unused,
901 					      struct machine *machine __maybe_unused)
902 {
903 	return 0;
904 }
905 
906 static int __perf_event__synthesize_kernel_mmap(struct perf_tool *tool,
907 						perf_event__handler_t process,
908 						struct machine *machine)
909 {
910 	size_t size;
911 	struct map *map = machine__kernel_map(machine);
912 	struct kmap *kmap;
913 	int err;
914 	union perf_event *event;
915 
916 	if (symbol_conf.kptr_restrict)
917 		return -1;
918 	if (map == NULL)
919 		return -1;
920 
921 	/*
922 	 * We should get this from /sys/kernel/sections/.text, but till that is
923 	 * available use this, and after it is use this as a fallback for older
924 	 * kernels.
925 	 */
926 	event = zalloc((sizeof(event->mmap) + machine->id_hdr_size));
927 	if (event == NULL) {
928 		pr_debug("Not enough memory synthesizing mmap event "
929 			 "for kernel modules\n");
930 		return -1;
931 	}
932 
933 	if (machine__is_host(machine)) {
934 		/*
935 		 * kernel uses PERF_RECORD_MISC_USER for user space maps,
936 		 * see kernel/perf_event.c __perf_event_mmap
937 		 */
938 		event->header.misc = PERF_RECORD_MISC_KERNEL;
939 	} else {
940 		event->header.misc = PERF_RECORD_MISC_GUEST_KERNEL;
941 	}
942 
943 	kmap = map__kmap(map);
944 	size = snprintf(event->mmap.filename, sizeof(event->mmap.filename),
945 			"%s%s", machine->mmap_name, kmap->ref_reloc_sym->name) + 1;
946 	size = PERF_ALIGN(size, sizeof(u64));
947 	event->mmap.header.type = PERF_RECORD_MMAP;
948 	event->mmap.header.size = (sizeof(event->mmap) -
949 			(sizeof(event->mmap.filename) - size) + machine->id_hdr_size);
950 	event->mmap.pgoff = kmap->ref_reloc_sym->addr;
951 	event->mmap.start = map->start;
952 	event->mmap.len   = map->end - event->mmap.start;
953 	event->mmap.pid   = machine->pid;
954 
955 	err = perf_tool__process_synth_event(tool, event, machine, process);
956 	free(event);
957 
958 	return err;
959 }
960 
961 int perf_event__synthesize_kernel_mmap(struct perf_tool *tool,
962 				       perf_event__handler_t process,
963 				       struct machine *machine)
964 {
965 	int err;
966 
967 	err = __perf_event__synthesize_kernel_mmap(tool, process, machine);
968 	if (err < 0)
969 		return err;
970 
971 	return perf_event__synthesize_extra_kmaps(tool, process, machine);
972 }
973 
974 int perf_event__synthesize_thread_map2(struct perf_tool *tool,
975 				      struct thread_map *threads,
976 				      perf_event__handler_t process,
977 				      struct machine *machine)
978 {
979 	union perf_event *event;
980 	int i, err, size;
981 
982 	size  = sizeof(event->thread_map);
983 	size +=	threads->nr * sizeof(event->thread_map.entries[0]);
984 
985 	event = zalloc(size);
986 	if (!event)
987 		return -ENOMEM;
988 
989 	event->header.type = PERF_RECORD_THREAD_MAP;
990 	event->header.size = size;
991 	event->thread_map.nr = threads->nr;
992 
993 	for (i = 0; i < threads->nr; i++) {
994 		struct thread_map_event_entry *entry = &event->thread_map.entries[i];
995 		char *comm = thread_map__comm(threads, i);
996 
997 		if (!comm)
998 			comm = (char *) "";
999 
1000 		entry->pid = thread_map__pid(threads, i);
1001 		strncpy((char *) &entry->comm, comm, sizeof(entry->comm));
1002 	}
1003 
1004 	err = process(tool, event, NULL, machine);
1005 
1006 	free(event);
1007 	return err;
1008 }
1009 
1010 static void synthesize_cpus(struct cpu_map_entries *cpus,
1011 			    struct cpu_map *map)
1012 {
1013 	int i;
1014 
1015 	cpus->nr = map->nr;
1016 
1017 	for (i = 0; i < map->nr; i++)
1018 		cpus->cpu[i] = map->map[i];
1019 }
1020 
1021 static void synthesize_mask(struct cpu_map_mask *mask,
1022 			    struct cpu_map *map, int max)
1023 {
1024 	int i;
1025 
1026 	mask->nr = BITS_TO_LONGS(max);
1027 	mask->long_size = sizeof(long);
1028 
1029 	for (i = 0; i < map->nr; i++)
1030 		set_bit(map->map[i], mask->mask);
1031 }
1032 
1033 static size_t cpus_size(struct cpu_map *map)
1034 {
1035 	return sizeof(struct cpu_map_entries) + map->nr * sizeof(u16);
1036 }
1037 
1038 static size_t mask_size(struct cpu_map *map, int *max)
1039 {
1040 	int i;
1041 
1042 	*max = 0;
1043 
1044 	for (i = 0; i < map->nr; i++) {
1045 		/* bit possition of the cpu is + 1 */
1046 		int bit = map->map[i] + 1;
1047 
1048 		if (bit > *max)
1049 			*max = bit;
1050 	}
1051 
1052 	return sizeof(struct cpu_map_mask) + BITS_TO_LONGS(*max) * sizeof(long);
1053 }
1054 
1055 void *cpu_map_data__alloc(struct cpu_map *map, size_t *size, u16 *type, int *max)
1056 {
1057 	size_t size_cpus, size_mask;
1058 	bool is_dummy = cpu_map__empty(map);
1059 
1060 	/*
1061 	 * Both array and mask data have variable size based
1062 	 * on the number of cpus and their actual values.
1063 	 * The size of the 'struct cpu_map_data' is:
1064 	 *
1065 	 *   array = size of 'struct cpu_map_entries' +
1066 	 *           number of cpus * sizeof(u64)
1067 	 *
1068 	 *   mask  = size of 'struct cpu_map_mask' +
1069 	 *           maximum cpu bit converted to size of longs
1070 	 *
1071 	 * and finaly + the size of 'struct cpu_map_data'.
1072 	 */
1073 	size_cpus = cpus_size(map);
1074 	size_mask = mask_size(map, max);
1075 
1076 	if (is_dummy || (size_cpus < size_mask)) {
1077 		*size += size_cpus;
1078 		*type  = PERF_CPU_MAP__CPUS;
1079 	} else {
1080 		*size += size_mask;
1081 		*type  = PERF_CPU_MAP__MASK;
1082 	}
1083 
1084 	*size += sizeof(struct cpu_map_data);
1085 	*size = PERF_ALIGN(*size, sizeof(u64));
1086 	return zalloc(*size);
1087 }
1088 
1089 void cpu_map_data__synthesize(struct cpu_map_data *data, struct cpu_map *map,
1090 			      u16 type, int max)
1091 {
1092 	data->type = type;
1093 
1094 	switch (type) {
1095 	case PERF_CPU_MAP__CPUS:
1096 		synthesize_cpus((struct cpu_map_entries *) data->data, map);
1097 		break;
1098 	case PERF_CPU_MAP__MASK:
1099 		synthesize_mask((struct cpu_map_mask *) data->data, map, max);
1100 	default:
1101 		break;
1102 	};
1103 }
1104 
1105 static struct cpu_map_event* cpu_map_event__new(struct cpu_map *map)
1106 {
1107 	size_t size = sizeof(struct cpu_map_event);
1108 	struct cpu_map_event *event;
1109 	int max;
1110 	u16 type;
1111 
1112 	event = cpu_map_data__alloc(map, &size, &type, &max);
1113 	if (!event)
1114 		return NULL;
1115 
1116 	event->header.type = PERF_RECORD_CPU_MAP;
1117 	event->header.size = size;
1118 	event->data.type   = type;
1119 
1120 	cpu_map_data__synthesize(&event->data, map, type, max);
1121 	return event;
1122 }
1123 
1124 int perf_event__synthesize_cpu_map(struct perf_tool *tool,
1125 				   struct cpu_map *map,
1126 				   perf_event__handler_t process,
1127 				   struct machine *machine)
1128 {
1129 	struct cpu_map_event *event;
1130 	int err;
1131 
1132 	event = cpu_map_event__new(map);
1133 	if (!event)
1134 		return -ENOMEM;
1135 
1136 	err = process(tool, (union perf_event *) event, NULL, machine);
1137 
1138 	free(event);
1139 	return err;
1140 }
1141 
1142 int perf_event__synthesize_stat_config(struct perf_tool *tool,
1143 				       struct perf_stat_config *config,
1144 				       perf_event__handler_t process,
1145 				       struct machine *machine)
1146 {
1147 	struct stat_config_event *event;
1148 	int size, i = 0, err;
1149 
1150 	size  = sizeof(*event);
1151 	size += (PERF_STAT_CONFIG_TERM__MAX * sizeof(event->data[0]));
1152 
1153 	event = zalloc(size);
1154 	if (!event)
1155 		return -ENOMEM;
1156 
1157 	event->header.type = PERF_RECORD_STAT_CONFIG;
1158 	event->header.size = size;
1159 	event->nr          = PERF_STAT_CONFIG_TERM__MAX;
1160 
1161 #define ADD(__term, __val)					\
1162 	event->data[i].tag = PERF_STAT_CONFIG_TERM__##__term;	\
1163 	event->data[i].val = __val;				\
1164 	i++;
1165 
1166 	ADD(AGGR_MODE,	config->aggr_mode)
1167 	ADD(INTERVAL,	config->interval)
1168 	ADD(SCALE,	config->scale)
1169 
1170 	WARN_ONCE(i != PERF_STAT_CONFIG_TERM__MAX,
1171 		  "stat config terms unbalanced\n");
1172 #undef ADD
1173 
1174 	err = process(tool, (union perf_event *) event, NULL, machine);
1175 
1176 	free(event);
1177 	return err;
1178 }
1179 
1180 int perf_event__synthesize_stat(struct perf_tool *tool,
1181 				u32 cpu, u32 thread, u64 id,
1182 				struct perf_counts_values *count,
1183 				perf_event__handler_t process,
1184 				struct machine *machine)
1185 {
1186 	struct stat_event event;
1187 
1188 	event.header.type = PERF_RECORD_STAT;
1189 	event.header.size = sizeof(event);
1190 	event.header.misc = 0;
1191 
1192 	event.id        = id;
1193 	event.cpu       = cpu;
1194 	event.thread    = thread;
1195 	event.val       = count->val;
1196 	event.ena       = count->ena;
1197 	event.run       = count->run;
1198 
1199 	return process(tool, (union perf_event *) &event, NULL, machine);
1200 }
1201 
1202 int perf_event__synthesize_stat_round(struct perf_tool *tool,
1203 				      u64 evtime, u64 type,
1204 				      perf_event__handler_t process,
1205 				      struct machine *machine)
1206 {
1207 	struct stat_round_event event;
1208 
1209 	event.header.type = PERF_RECORD_STAT_ROUND;
1210 	event.header.size = sizeof(event);
1211 	event.header.misc = 0;
1212 
1213 	event.time = evtime;
1214 	event.type = type;
1215 
1216 	return process(tool, (union perf_event *) &event, NULL, machine);
1217 }
1218 
1219 void perf_event__read_stat_config(struct perf_stat_config *config,
1220 				  struct stat_config_event *event)
1221 {
1222 	unsigned i;
1223 
1224 	for (i = 0; i < event->nr; i++) {
1225 
1226 		switch (event->data[i].tag) {
1227 #define CASE(__term, __val)					\
1228 		case PERF_STAT_CONFIG_TERM__##__term:		\
1229 			config->__val = event->data[i].val;	\
1230 			break;
1231 
1232 		CASE(AGGR_MODE, aggr_mode)
1233 		CASE(SCALE,     scale)
1234 		CASE(INTERVAL,  interval)
1235 #undef CASE
1236 		default:
1237 			pr_warning("unknown stat config term %" PRIu64 "\n",
1238 				   event->data[i].tag);
1239 		}
1240 	}
1241 }
1242 
1243 size_t perf_event__fprintf_comm(union perf_event *event, FILE *fp)
1244 {
1245 	const char *s;
1246 
1247 	if (event->header.misc & PERF_RECORD_MISC_COMM_EXEC)
1248 		s = " exec";
1249 	else
1250 		s = "";
1251 
1252 	return fprintf(fp, "%s: %s:%d/%d\n", s, event->comm.comm, event->comm.pid, event->comm.tid);
1253 }
1254 
1255 size_t perf_event__fprintf_namespaces(union perf_event *event, FILE *fp)
1256 {
1257 	size_t ret = 0;
1258 	struct perf_ns_link_info *ns_link_info;
1259 	u32 nr_namespaces, idx;
1260 
1261 	ns_link_info = event->namespaces.link_info;
1262 	nr_namespaces = event->namespaces.nr_namespaces;
1263 
1264 	ret += fprintf(fp, " %d/%d - nr_namespaces: %u\n\t\t[",
1265 		       event->namespaces.pid,
1266 		       event->namespaces.tid,
1267 		       nr_namespaces);
1268 
1269 	for (idx = 0; idx < nr_namespaces; idx++) {
1270 		if (idx && (idx % 4 == 0))
1271 			ret += fprintf(fp, "\n\t\t ");
1272 
1273 		ret  += fprintf(fp, "%u/%s: %" PRIu64 "/%#" PRIx64 "%s", idx,
1274 				perf_ns__name(idx), (u64)ns_link_info[idx].dev,
1275 				(u64)ns_link_info[idx].ino,
1276 				((idx + 1) != nr_namespaces) ? ", " : "]\n");
1277 	}
1278 
1279 	return ret;
1280 }
1281 
1282 int perf_event__process_comm(struct perf_tool *tool __maybe_unused,
1283 			     union perf_event *event,
1284 			     struct perf_sample *sample,
1285 			     struct machine *machine)
1286 {
1287 	return machine__process_comm_event(machine, event, sample);
1288 }
1289 
1290 int perf_event__process_namespaces(struct perf_tool *tool __maybe_unused,
1291 				   union perf_event *event,
1292 				   struct perf_sample *sample,
1293 				   struct machine *machine)
1294 {
1295 	return machine__process_namespaces_event(machine, event, sample);
1296 }
1297 
1298 int perf_event__process_lost(struct perf_tool *tool __maybe_unused,
1299 			     union perf_event *event,
1300 			     struct perf_sample *sample,
1301 			     struct machine *machine)
1302 {
1303 	return machine__process_lost_event(machine, event, sample);
1304 }
1305 
1306 int perf_event__process_aux(struct perf_tool *tool __maybe_unused,
1307 			    union perf_event *event,
1308 			    struct perf_sample *sample __maybe_unused,
1309 			    struct machine *machine)
1310 {
1311 	return machine__process_aux_event(machine, event);
1312 }
1313 
1314 int perf_event__process_itrace_start(struct perf_tool *tool __maybe_unused,
1315 				     union perf_event *event,
1316 				     struct perf_sample *sample __maybe_unused,
1317 				     struct machine *machine)
1318 {
1319 	return machine__process_itrace_start_event(machine, event);
1320 }
1321 
1322 int perf_event__process_lost_samples(struct perf_tool *tool __maybe_unused,
1323 				     union perf_event *event,
1324 				     struct perf_sample *sample,
1325 				     struct machine *machine)
1326 {
1327 	return machine__process_lost_samples_event(machine, event, sample);
1328 }
1329 
1330 int perf_event__process_switch(struct perf_tool *tool __maybe_unused,
1331 			       union perf_event *event,
1332 			       struct perf_sample *sample __maybe_unused,
1333 			       struct machine *machine)
1334 {
1335 	return machine__process_switch_event(machine, event);
1336 }
1337 
1338 int perf_event__process_ksymbol(struct perf_tool *tool __maybe_unused,
1339 				union perf_event *event,
1340 				struct perf_sample *sample __maybe_unused,
1341 				struct machine *machine)
1342 {
1343 	return machine__process_ksymbol(machine, event, sample);
1344 }
1345 
1346 int perf_event__process_bpf_event(struct perf_tool *tool __maybe_unused,
1347 				  union perf_event *event,
1348 				  struct perf_sample *sample __maybe_unused,
1349 				  struct machine *machine)
1350 {
1351 	return machine__process_bpf_event(machine, event, sample);
1352 }
1353 
1354 size_t perf_event__fprintf_mmap(union perf_event *event, FILE *fp)
1355 {
1356 	return fprintf(fp, " %d/%d: [%#" PRIx64 "(%#" PRIx64 ") @ %#" PRIx64 "]: %c %s\n",
1357 		       event->mmap.pid, event->mmap.tid, event->mmap.start,
1358 		       event->mmap.len, event->mmap.pgoff,
1359 		       (event->header.misc & PERF_RECORD_MISC_MMAP_DATA) ? 'r' : 'x',
1360 		       event->mmap.filename);
1361 }
1362 
1363 size_t perf_event__fprintf_mmap2(union perf_event *event, FILE *fp)
1364 {
1365 	return fprintf(fp, " %d/%d: [%#" PRIx64 "(%#" PRIx64 ") @ %#" PRIx64
1366 			   " %02x:%02x %"PRIu64" %"PRIu64"]: %c%c%c%c %s\n",
1367 		       event->mmap2.pid, event->mmap2.tid, event->mmap2.start,
1368 		       event->mmap2.len, event->mmap2.pgoff, event->mmap2.maj,
1369 		       event->mmap2.min, event->mmap2.ino,
1370 		       event->mmap2.ino_generation,
1371 		       (event->mmap2.prot & PROT_READ) ? 'r' : '-',
1372 		       (event->mmap2.prot & PROT_WRITE) ? 'w' : '-',
1373 		       (event->mmap2.prot & PROT_EXEC) ? 'x' : '-',
1374 		       (event->mmap2.flags & MAP_SHARED) ? 's' : 'p',
1375 		       event->mmap2.filename);
1376 }
1377 
1378 size_t perf_event__fprintf_thread_map(union perf_event *event, FILE *fp)
1379 {
1380 	struct thread_map *threads = thread_map__new_event(&event->thread_map);
1381 	size_t ret;
1382 
1383 	ret = fprintf(fp, " nr: ");
1384 
1385 	if (threads)
1386 		ret += thread_map__fprintf(threads, fp);
1387 	else
1388 		ret += fprintf(fp, "failed to get threads from event\n");
1389 
1390 	thread_map__put(threads);
1391 	return ret;
1392 }
1393 
1394 size_t perf_event__fprintf_cpu_map(union perf_event *event, FILE *fp)
1395 {
1396 	struct cpu_map *cpus = cpu_map__new_data(&event->cpu_map.data);
1397 	size_t ret;
1398 
1399 	ret = fprintf(fp, ": ");
1400 
1401 	if (cpus)
1402 		ret += cpu_map__fprintf(cpus, fp);
1403 	else
1404 		ret += fprintf(fp, "failed to get cpumap from event\n");
1405 
1406 	cpu_map__put(cpus);
1407 	return ret;
1408 }
1409 
1410 int perf_event__process_mmap(struct perf_tool *tool __maybe_unused,
1411 			     union perf_event *event,
1412 			     struct perf_sample *sample,
1413 			     struct machine *machine)
1414 {
1415 	return machine__process_mmap_event(machine, event, sample);
1416 }
1417 
1418 int perf_event__process_mmap2(struct perf_tool *tool __maybe_unused,
1419 			     union perf_event *event,
1420 			     struct perf_sample *sample,
1421 			     struct machine *machine)
1422 {
1423 	return machine__process_mmap2_event(machine, event, sample);
1424 }
1425 
1426 size_t perf_event__fprintf_task(union perf_event *event, FILE *fp)
1427 {
1428 	return fprintf(fp, "(%d:%d):(%d:%d)\n",
1429 		       event->fork.pid, event->fork.tid,
1430 		       event->fork.ppid, event->fork.ptid);
1431 }
1432 
1433 int perf_event__process_fork(struct perf_tool *tool __maybe_unused,
1434 			     union perf_event *event,
1435 			     struct perf_sample *sample,
1436 			     struct machine *machine)
1437 {
1438 	return machine__process_fork_event(machine, event, sample);
1439 }
1440 
1441 int perf_event__process_exit(struct perf_tool *tool __maybe_unused,
1442 			     union perf_event *event,
1443 			     struct perf_sample *sample,
1444 			     struct machine *machine)
1445 {
1446 	return machine__process_exit_event(machine, event, sample);
1447 }
1448 
1449 size_t perf_event__fprintf_aux(union perf_event *event, FILE *fp)
1450 {
1451 	return fprintf(fp, " offset: %#"PRIx64" size: %#"PRIx64" flags: %#"PRIx64" [%s%s%s]\n",
1452 		       event->aux.aux_offset, event->aux.aux_size,
1453 		       event->aux.flags,
1454 		       event->aux.flags & PERF_AUX_FLAG_TRUNCATED ? "T" : "",
1455 		       event->aux.flags & PERF_AUX_FLAG_OVERWRITE ? "O" : "",
1456 		       event->aux.flags & PERF_AUX_FLAG_PARTIAL   ? "P" : "");
1457 }
1458 
1459 size_t perf_event__fprintf_itrace_start(union perf_event *event, FILE *fp)
1460 {
1461 	return fprintf(fp, " pid: %u tid: %u\n",
1462 		       event->itrace_start.pid, event->itrace_start.tid);
1463 }
1464 
1465 size_t perf_event__fprintf_switch(union perf_event *event, FILE *fp)
1466 {
1467 	bool out = event->header.misc & PERF_RECORD_MISC_SWITCH_OUT;
1468 	const char *in_out = !out ? "IN         " :
1469 		!(event->header.misc & PERF_RECORD_MISC_SWITCH_OUT_PREEMPT) ?
1470 				    "OUT        " : "OUT preempt";
1471 
1472 	if (event->header.type == PERF_RECORD_SWITCH)
1473 		return fprintf(fp, " %s\n", in_out);
1474 
1475 	return fprintf(fp, " %s  %s pid/tid: %5u/%-5u\n",
1476 		       in_out, out ? "next" : "prev",
1477 		       event->context_switch.next_prev_pid,
1478 		       event->context_switch.next_prev_tid);
1479 }
1480 
1481 static size_t perf_event__fprintf_lost(union perf_event *event, FILE *fp)
1482 {
1483 	return fprintf(fp, " lost %" PRIu64 "\n", event->lost.lost);
1484 }
1485 
1486 size_t perf_event__fprintf_ksymbol(union perf_event *event, FILE *fp)
1487 {
1488 	return fprintf(fp, " addr %" PRIx64 " len %u type %u flags 0x%x name %s\n",
1489 		       event->ksymbol_event.addr, event->ksymbol_event.len,
1490 		       event->ksymbol_event.ksym_type,
1491 		       event->ksymbol_event.flags, event->ksymbol_event.name);
1492 }
1493 
1494 size_t perf_event__fprintf_bpf_event(union perf_event *event, FILE *fp)
1495 {
1496 	return fprintf(fp, " type %u, flags %u, id %u\n",
1497 		       event->bpf_event.type, event->bpf_event.flags,
1498 		       event->bpf_event.id);
1499 }
1500 
1501 size_t perf_event__fprintf(union perf_event *event, FILE *fp)
1502 {
1503 	size_t ret = fprintf(fp, "PERF_RECORD_%s",
1504 			     perf_event__name(event->header.type));
1505 
1506 	switch (event->header.type) {
1507 	case PERF_RECORD_COMM:
1508 		ret += perf_event__fprintf_comm(event, fp);
1509 		break;
1510 	case PERF_RECORD_FORK:
1511 	case PERF_RECORD_EXIT:
1512 		ret += perf_event__fprintf_task(event, fp);
1513 		break;
1514 	case PERF_RECORD_MMAP:
1515 		ret += perf_event__fprintf_mmap(event, fp);
1516 		break;
1517 	case PERF_RECORD_NAMESPACES:
1518 		ret += perf_event__fprintf_namespaces(event, fp);
1519 		break;
1520 	case PERF_RECORD_MMAP2:
1521 		ret += perf_event__fprintf_mmap2(event, fp);
1522 		break;
1523 	case PERF_RECORD_AUX:
1524 		ret += perf_event__fprintf_aux(event, fp);
1525 		break;
1526 	case PERF_RECORD_ITRACE_START:
1527 		ret += perf_event__fprintf_itrace_start(event, fp);
1528 		break;
1529 	case PERF_RECORD_SWITCH:
1530 	case PERF_RECORD_SWITCH_CPU_WIDE:
1531 		ret += perf_event__fprintf_switch(event, fp);
1532 		break;
1533 	case PERF_RECORD_LOST:
1534 		ret += perf_event__fprintf_lost(event, fp);
1535 		break;
1536 	case PERF_RECORD_KSYMBOL:
1537 		ret += perf_event__fprintf_ksymbol(event, fp);
1538 		break;
1539 	case PERF_RECORD_BPF_EVENT:
1540 		ret += perf_event__fprintf_bpf_event(event, fp);
1541 		break;
1542 	default:
1543 		ret += fprintf(fp, "\n");
1544 	}
1545 
1546 	return ret;
1547 }
1548 
1549 int perf_event__process(struct perf_tool *tool __maybe_unused,
1550 			union perf_event *event,
1551 			struct perf_sample *sample,
1552 			struct machine *machine)
1553 {
1554 	return machine__process_event(machine, event, sample);
1555 }
1556 
1557 struct map *thread__find_map(struct thread *thread, u8 cpumode, u64 addr,
1558 			     struct addr_location *al)
1559 {
1560 	struct map_groups *mg = thread->mg;
1561 	struct machine *machine = mg->machine;
1562 	bool load_map = false;
1563 
1564 	al->machine = machine;
1565 	al->thread = thread;
1566 	al->addr = addr;
1567 	al->cpumode = cpumode;
1568 	al->filtered = 0;
1569 
1570 	if (machine == NULL) {
1571 		al->map = NULL;
1572 		return NULL;
1573 	}
1574 
1575 	if (cpumode == PERF_RECORD_MISC_KERNEL && perf_host) {
1576 		al->level = 'k';
1577 		mg = &machine->kmaps;
1578 		load_map = true;
1579 	} else if (cpumode == PERF_RECORD_MISC_USER && perf_host) {
1580 		al->level = '.';
1581 	} else if (cpumode == PERF_RECORD_MISC_GUEST_KERNEL && perf_guest) {
1582 		al->level = 'g';
1583 		mg = &machine->kmaps;
1584 		load_map = true;
1585 	} else if (cpumode == PERF_RECORD_MISC_GUEST_USER && perf_guest) {
1586 		al->level = 'u';
1587 	} else {
1588 		al->level = 'H';
1589 		al->map = NULL;
1590 
1591 		if ((cpumode == PERF_RECORD_MISC_GUEST_USER ||
1592 			cpumode == PERF_RECORD_MISC_GUEST_KERNEL) &&
1593 			!perf_guest)
1594 			al->filtered |= (1 << HIST_FILTER__GUEST);
1595 		if ((cpumode == PERF_RECORD_MISC_USER ||
1596 			cpumode == PERF_RECORD_MISC_KERNEL) &&
1597 			!perf_host)
1598 			al->filtered |= (1 << HIST_FILTER__HOST);
1599 
1600 		return NULL;
1601 	}
1602 
1603 	al->map = map_groups__find(mg, al->addr);
1604 	if (al->map != NULL) {
1605 		/*
1606 		 * Kernel maps might be changed when loading symbols so loading
1607 		 * must be done prior to using kernel maps.
1608 		 */
1609 		if (load_map)
1610 			map__load(al->map);
1611 		al->addr = al->map->map_ip(al->map, al->addr);
1612 	}
1613 
1614 	return al->map;
1615 }
1616 
1617 /*
1618  * For branch stacks or branch samples, the sample cpumode might not be correct
1619  * because it applies only to the sample 'ip' and not necessary to 'addr' or
1620  * branch stack addresses. If possible, use a fallback to deal with those cases.
1621  */
1622 struct map *thread__find_map_fb(struct thread *thread, u8 cpumode, u64 addr,
1623 				struct addr_location *al)
1624 {
1625 	struct map *map = thread__find_map(thread, cpumode, addr, al);
1626 	struct machine *machine = thread->mg->machine;
1627 	u8 addr_cpumode = machine__addr_cpumode(machine, cpumode, addr);
1628 
1629 	if (map || addr_cpumode == cpumode)
1630 		return map;
1631 
1632 	return thread__find_map(thread, addr_cpumode, addr, al);
1633 }
1634 
1635 struct symbol *thread__find_symbol(struct thread *thread, u8 cpumode,
1636 				   u64 addr, struct addr_location *al)
1637 {
1638 	al->sym = NULL;
1639 	if (thread__find_map(thread, cpumode, addr, al))
1640 		al->sym = map__find_symbol(al->map, al->addr);
1641 	return al->sym;
1642 }
1643 
1644 struct symbol *thread__find_symbol_fb(struct thread *thread, u8 cpumode,
1645 				      u64 addr, struct addr_location *al)
1646 {
1647 	al->sym = NULL;
1648 	if (thread__find_map_fb(thread, cpumode, addr, al))
1649 		al->sym = map__find_symbol(al->map, al->addr);
1650 	return al->sym;
1651 }
1652 
1653 /*
1654  * Callers need to drop the reference to al->thread, obtained in
1655  * machine__findnew_thread()
1656  */
1657 int machine__resolve(struct machine *machine, struct addr_location *al,
1658 		     struct perf_sample *sample)
1659 {
1660 	struct thread *thread = machine__findnew_thread(machine, sample->pid,
1661 							sample->tid);
1662 
1663 	if (thread == NULL)
1664 		return -1;
1665 
1666 	dump_printf(" ... thread: %s:%d\n", thread__comm_str(thread), thread->tid);
1667 	thread__find_map(thread, sample->cpumode, sample->ip, al);
1668 	dump_printf(" ...... dso: %s\n",
1669 		    al->map ? al->map->dso->long_name :
1670 			al->level == 'H' ? "[hypervisor]" : "<not found>");
1671 
1672 	if (thread__is_filtered(thread))
1673 		al->filtered |= (1 << HIST_FILTER__THREAD);
1674 
1675 	al->sym = NULL;
1676 	al->cpu = sample->cpu;
1677 	al->socket = -1;
1678 	al->srcline = NULL;
1679 
1680 	if (al->cpu >= 0) {
1681 		struct perf_env *env = machine->env;
1682 
1683 		if (env && env->cpu)
1684 			al->socket = env->cpu[al->cpu].socket_id;
1685 	}
1686 
1687 	if (al->map) {
1688 		struct dso *dso = al->map->dso;
1689 
1690 		if (symbol_conf.dso_list &&
1691 		    (!dso || !(strlist__has_entry(symbol_conf.dso_list,
1692 						  dso->short_name) ||
1693 			       (dso->short_name != dso->long_name &&
1694 				strlist__has_entry(symbol_conf.dso_list,
1695 						   dso->long_name))))) {
1696 			al->filtered |= (1 << HIST_FILTER__DSO);
1697 		}
1698 
1699 		al->sym = map__find_symbol(al->map, al->addr);
1700 	}
1701 
1702 	if (symbol_conf.sym_list &&
1703 		(!al->sym || !strlist__has_entry(symbol_conf.sym_list,
1704 						al->sym->name))) {
1705 		al->filtered |= (1 << HIST_FILTER__SYMBOL);
1706 	}
1707 
1708 	return 0;
1709 }
1710 
1711 /*
1712  * The preprocess_sample method will return with reference counts for the
1713  * in it, when done using (and perhaps getting ref counts if needing to
1714  * keep a pointer to one of those entries) it must be paired with
1715  * addr_location__put(), so that the refcounts can be decremented.
1716  */
1717 void addr_location__put(struct addr_location *al)
1718 {
1719 	thread__zput(al->thread);
1720 }
1721 
1722 bool is_bts_event(struct perf_event_attr *attr)
1723 {
1724 	return attr->type == PERF_TYPE_HARDWARE &&
1725 	       (attr->config & PERF_COUNT_HW_BRANCH_INSTRUCTIONS) &&
1726 	       attr->sample_period == 1;
1727 }
1728 
1729 bool sample_addr_correlates_sym(struct perf_event_attr *attr)
1730 {
1731 	if (attr->type == PERF_TYPE_SOFTWARE &&
1732 	    (attr->config == PERF_COUNT_SW_PAGE_FAULTS ||
1733 	     attr->config == PERF_COUNT_SW_PAGE_FAULTS_MIN ||
1734 	     attr->config == PERF_COUNT_SW_PAGE_FAULTS_MAJ))
1735 		return true;
1736 
1737 	if (is_bts_event(attr))
1738 		return true;
1739 
1740 	return false;
1741 }
1742 
1743 void thread__resolve(struct thread *thread, struct addr_location *al,
1744 		     struct perf_sample *sample)
1745 {
1746 	thread__find_map_fb(thread, sample->cpumode, sample->addr, al);
1747 
1748 	al->cpu = sample->cpu;
1749 	al->sym = NULL;
1750 
1751 	if (al->map)
1752 		al->sym = map__find_symbol(al->map, al->addr);
1753 }
1754