xref: /openbmc/linux/tools/perf/util/event.c (revision ba61bb17)
1 // SPDX-License-Identifier: GPL-2.0
2 #include <dirent.h>
3 #include <errno.h>
4 #include <fcntl.h>
5 #include <inttypes.h>
6 #include <linux/kernel.h>
7 #include <linux/types.h>
8 #include <sys/types.h>
9 #include <sys/stat.h>
10 #include <unistd.h>
11 #include <uapi/linux/mman.h> /* To get things like MAP_HUGETLB even on older libc headers */
12 #include <api/fs/fs.h>
13 #include <linux/perf_event.h>
14 #include "event.h"
15 #include "debug.h"
16 #include "hist.h"
17 #include "machine.h"
18 #include "sort.h"
19 #include "string2.h"
20 #include "strlist.h"
21 #include "thread.h"
22 #include "thread_map.h"
23 #include "sane_ctype.h"
24 #include "symbol/kallsyms.h"
25 #include "asm/bug.h"
26 #include "stat.h"
27 
28 static const char *perf_event__names[] = {
29 	[0]					= "TOTAL",
30 	[PERF_RECORD_MMAP]			= "MMAP",
31 	[PERF_RECORD_MMAP2]			= "MMAP2",
32 	[PERF_RECORD_LOST]			= "LOST",
33 	[PERF_RECORD_COMM]			= "COMM",
34 	[PERF_RECORD_EXIT]			= "EXIT",
35 	[PERF_RECORD_THROTTLE]			= "THROTTLE",
36 	[PERF_RECORD_UNTHROTTLE]		= "UNTHROTTLE",
37 	[PERF_RECORD_FORK]			= "FORK",
38 	[PERF_RECORD_READ]			= "READ",
39 	[PERF_RECORD_SAMPLE]			= "SAMPLE",
40 	[PERF_RECORD_AUX]			= "AUX",
41 	[PERF_RECORD_ITRACE_START]		= "ITRACE_START",
42 	[PERF_RECORD_LOST_SAMPLES]		= "LOST_SAMPLES",
43 	[PERF_RECORD_SWITCH]			= "SWITCH",
44 	[PERF_RECORD_SWITCH_CPU_WIDE]		= "SWITCH_CPU_WIDE",
45 	[PERF_RECORD_NAMESPACES]		= "NAMESPACES",
46 	[PERF_RECORD_HEADER_ATTR]		= "ATTR",
47 	[PERF_RECORD_HEADER_EVENT_TYPE]		= "EVENT_TYPE",
48 	[PERF_RECORD_HEADER_TRACING_DATA]	= "TRACING_DATA",
49 	[PERF_RECORD_HEADER_BUILD_ID]		= "BUILD_ID",
50 	[PERF_RECORD_FINISHED_ROUND]		= "FINISHED_ROUND",
51 	[PERF_RECORD_ID_INDEX]			= "ID_INDEX",
52 	[PERF_RECORD_AUXTRACE_INFO]		= "AUXTRACE_INFO",
53 	[PERF_RECORD_AUXTRACE]			= "AUXTRACE",
54 	[PERF_RECORD_AUXTRACE_ERROR]		= "AUXTRACE_ERROR",
55 	[PERF_RECORD_THREAD_MAP]		= "THREAD_MAP",
56 	[PERF_RECORD_CPU_MAP]			= "CPU_MAP",
57 	[PERF_RECORD_STAT_CONFIG]		= "STAT_CONFIG",
58 	[PERF_RECORD_STAT]			= "STAT",
59 	[PERF_RECORD_STAT_ROUND]		= "STAT_ROUND",
60 	[PERF_RECORD_EVENT_UPDATE]		= "EVENT_UPDATE",
61 	[PERF_RECORD_TIME_CONV]			= "TIME_CONV",
62 	[PERF_RECORD_HEADER_FEATURE]		= "FEATURE",
63 };
64 
65 static const char *perf_ns__names[] = {
66 	[NET_NS_INDEX]		= "net",
67 	[UTS_NS_INDEX]		= "uts",
68 	[IPC_NS_INDEX]		= "ipc",
69 	[PID_NS_INDEX]		= "pid",
70 	[USER_NS_INDEX]		= "user",
71 	[MNT_NS_INDEX]		= "mnt",
72 	[CGROUP_NS_INDEX]	= "cgroup",
73 };
74 
75 const char *perf_event__name(unsigned int id)
76 {
77 	if (id >= ARRAY_SIZE(perf_event__names))
78 		return "INVALID";
79 	if (!perf_event__names[id])
80 		return "UNKNOWN";
81 	return perf_event__names[id];
82 }
83 
84 static const char *perf_ns__name(unsigned int id)
85 {
86 	if (id >= ARRAY_SIZE(perf_ns__names))
87 		return "UNKNOWN";
88 	return perf_ns__names[id];
89 }
90 
91 int perf_tool__process_synth_event(struct perf_tool *tool,
92 				   union perf_event *event,
93 				   struct machine *machine,
94 				   perf_event__handler_t process)
95 {
96 	struct perf_sample synth_sample = {
97 	.pid	   = -1,
98 	.tid	   = -1,
99 	.time	   = -1,
100 	.stream_id = -1,
101 	.cpu	   = -1,
102 	.period	   = 1,
103 	.cpumode   = event->header.misc & PERF_RECORD_MISC_CPUMODE_MASK,
104 	};
105 
106 	return process(tool, event, &synth_sample, machine);
107 };
108 
109 /*
110  * Assumes that the first 4095 bytes of /proc/pid/stat contains
111  * the comm, tgid and ppid.
112  */
113 static int perf_event__get_comm_ids(pid_t pid, char *comm, size_t len,
114 				    pid_t *tgid, pid_t *ppid)
115 {
116 	char filename[PATH_MAX];
117 	char bf[4096];
118 	int fd;
119 	size_t size = 0;
120 	ssize_t n;
121 	char *name, *tgids, *ppids;
122 
123 	*tgid = -1;
124 	*ppid = -1;
125 
126 	snprintf(filename, sizeof(filename), "/proc/%d/status", pid);
127 
128 	fd = open(filename, O_RDONLY);
129 	if (fd < 0) {
130 		pr_debug("couldn't open %s\n", filename);
131 		return -1;
132 	}
133 
134 	n = read(fd, bf, sizeof(bf) - 1);
135 	close(fd);
136 	if (n <= 0) {
137 		pr_warning("Couldn't get COMM, tigd and ppid for pid %d\n",
138 			   pid);
139 		return -1;
140 	}
141 	bf[n] = '\0';
142 
143 	name = strstr(bf, "Name:");
144 	tgids = strstr(bf, "Tgid:");
145 	ppids = strstr(bf, "PPid:");
146 
147 	if (name) {
148 		char *nl;
149 
150 		name += 5;  /* strlen("Name:") */
151 		name = ltrim(name);
152 
153 		nl = strchr(name, '\n');
154 		if (nl)
155 			*nl = '\0';
156 
157 		size = strlen(name);
158 		if (size >= len)
159 			size = len - 1;
160 		memcpy(comm, name, size);
161 		comm[size] = '\0';
162 	} else {
163 		pr_debug("Name: string not found for pid %d\n", pid);
164 	}
165 
166 	if (tgids) {
167 		tgids += 5;  /* strlen("Tgid:") */
168 		*tgid = atoi(tgids);
169 	} else {
170 		pr_debug("Tgid: string not found for pid %d\n", pid);
171 	}
172 
173 	if (ppids) {
174 		ppids += 5;  /* strlen("PPid:") */
175 		*ppid = atoi(ppids);
176 	} else {
177 		pr_debug("PPid: string not found for pid %d\n", pid);
178 	}
179 
180 	return 0;
181 }
182 
183 static int perf_event__prepare_comm(union perf_event *event, pid_t pid,
184 				    struct machine *machine,
185 				    pid_t *tgid, pid_t *ppid)
186 {
187 	size_t size;
188 
189 	*ppid = -1;
190 
191 	memset(&event->comm, 0, sizeof(event->comm));
192 
193 	if (machine__is_host(machine)) {
194 		if (perf_event__get_comm_ids(pid, event->comm.comm,
195 					     sizeof(event->comm.comm),
196 					     tgid, ppid) != 0) {
197 			return -1;
198 		}
199 	} else {
200 		*tgid = machine->pid;
201 	}
202 
203 	if (*tgid < 0)
204 		return -1;
205 
206 	event->comm.pid = *tgid;
207 	event->comm.header.type = PERF_RECORD_COMM;
208 
209 	size = strlen(event->comm.comm) + 1;
210 	size = PERF_ALIGN(size, sizeof(u64));
211 	memset(event->comm.comm + size, 0, machine->id_hdr_size);
212 	event->comm.header.size = (sizeof(event->comm) -
213 				(sizeof(event->comm.comm) - size) +
214 				machine->id_hdr_size);
215 	event->comm.tid = pid;
216 
217 	return 0;
218 }
219 
220 pid_t perf_event__synthesize_comm(struct perf_tool *tool,
221 					 union perf_event *event, pid_t pid,
222 					 perf_event__handler_t process,
223 					 struct machine *machine)
224 {
225 	pid_t tgid, ppid;
226 
227 	if (perf_event__prepare_comm(event, pid, machine, &tgid, &ppid) != 0)
228 		return -1;
229 
230 	if (perf_tool__process_synth_event(tool, event, machine, process) != 0)
231 		return -1;
232 
233 	return tgid;
234 }
235 
236 static void perf_event__get_ns_link_info(pid_t pid, const char *ns,
237 					 struct perf_ns_link_info *ns_link_info)
238 {
239 	struct stat64 st;
240 	char proc_ns[128];
241 
242 	sprintf(proc_ns, "/proc/%u/ns/%s", pid, ns);
243 	if (stat64(proc_ns, &st) == 0) {
244 		ns_link_info->dev = st.st_dev;
245 		ns_link_info->ino = st.st_ino;
246 	}
247 }
248 
249 int perf_event__synthesize_namespaces(struct perf_tool *tool,
250 				      union perf_event *event,
251 				      pid_t pid, pid_t tgid,
252 				      perf_event__handler_t process,
253 				      struct machine *machine)
254 {
255 	u32 idx;
256 	struct perf_ns_link_info *ns_link_info;
257 
258 	if (!tool || !tool->namespace_events)
259 		return 0;
260 
261 	memset(&event->namespaces, 0, (sizeof(event->namespaces) +
262 	       (NR_NAMESPACES * sizeof(struct perf_ns_link_info)) +
263 	       machine->id_hdr_size));
264 
265 	event->namespaces.pid = tgid;
266 	event->namespaces.tid = pid;
267 
268 	event->namespaces.nr_namespaces = NR_NAMESPACES;
269 
270 	ns_link_info = event->namespaces.link_info;
271 
272 	for (idx = 0; idx < event->namespaces.nr_namespaces; idx++)
273 		perf_event__get_ns_link_info(pid, perf_ns__name(idx),
274 					     &ns_link_info[idx]);
275 
276 	event->namespaces.header.type = PERF_RECORD_NAMESPACES;
277 
278 	event->namespaces.header.size = (sizeof(event->namespaces) +
279 			(NR_NAMESPACES * sizeof(struct perf_ns_link_info)) +
280 			machine->id_hdr_size);
281 
282 	if (perf_tool__process_synth_event(tool, event, machine, process) != 0)
283 		return -1;
284 
285 	return 0;
286 }
287 
288 static int perf_event__synthesize_fork(struct perf_tool *tool,
289 				       union perf_event *event,
290 				       pid_t pid, pid_t tgid, pid_t ppid,
291 				       perf_event__handler_t process,
292 				       struct machine *machine)
293 {
294 	memset(&event->fork, 0, sizeof(event->fork) + machine->id_hdr_size);
295 
296 	/*
297 	 * for main thread set parent to ppid from status file. For other
298 	 * threads set parent pid to main thread. ie., assume main thread
299 	 * spawns all threads in a process
300 	*/
301 	if (tgid == pid) {
302 		event->fork.ppid = ppid;
303 		event->fork.ptid = ppid;
304 	} else {
305 		event->fork.ppid = tgid;
306 		event->fork.ptid = tgid;
307 	}
308 	event->fork.pid  = tgid;
309 	event->fork.tid  = pid;
310 	event->fork.header.type = PERF_RECORD_FORK;
311 
312 	event->fork.header.size = (sizeof(event->fork) + machine->id_hdr_size);
313 
314 	if (perf_tool__process_synth_event(tool, event, machine, process) != 0)
315 		return -1;
316 
317 	return 0;
318 }
319 
320 int perf_event__synthesize_mmap_events(struct perf_tool *tool,
321 				       union perf_event *event,
322 				       pid_t pid, pid_t tgid,
323 				       perf_event__handler_t process,
324 				       struct machine *machine,
325 				       bool mmap_data,
326 				       unsigned int proc_map_timeout)
327 {
328 	char filename[PATH_MAX];
329 	FILE *fp;
330 	unsigned long long t;
331 	bool truncation = false;
332 	unsigned long long timeout = proc_map_timeout * 1000000ULL;
333 	int rc = 0;
334 	const char *hugetlbfs_mnt = hugetlbfs__mountpoint();
335 	int hugetlbfs_mnt_len = hugetlbfs_mnt ? strlen(hugetlbfs_mnt) : 0;
336 
337 	if (machine__is_default_guest(machine))
338 		return 0;
339 
340 	snprintf(filename, sizeof(filename), "%s/proc/%d/task/%d/maps",
341 		 machine->root_dir, pid, pid);
342 
343 	fp = fopen(filename, "r");
344 	if (fp == NULL) {
345 		/*
346 		 * We raced with a task exiting - just return:
347 		 */
348 		pr_debug("couldn't open %s\n", filename);
349 		return -1;
350 	}
351 
352 	event->header.type = PERF_RECORD_MMAP2;
353 	t = rdclock();
354 
355 	while (1) {
356 		char bf[BUFSIZ];
357 		char prot[5];
358 		char execname[PATH_MAX];
359 		char anonstr[] = "//anon";
360 		unsigned int ino;
361 		size_t size;
362 		ssize_t n;
363 
364 		if (fgets(bf, sizeof(bf), fp) == NULL)
365 			break;
366 
367 		if ((rdclock() - t) > timeout) {
368 			pr_warning("Reading %s time out. "
369 				   "You may want to increase "
370 				   "the time limit by --proc-map-timeout\n",
371 				   filename);
372 			truncation = true;
373 			goto out;
374 		}
375 
376 		/* ensure null termination since stack will be reused. */
377 		strcpy(execname, "");
378 
379 		/* 00400000-0040c000 r-xp 00000000 fd:01 41038  /bin/cat */
380 		n = sscanf(bf, "%"PRIx64"-%"PRIx64" %s %"PRIx64" %x:%x %u %[^\n]\n",
381 		       &event->mmap2.start, &event->mmap2.len, prot,
382 		       &event->mmap2.pgoff, &event->mmap2.maj,
383 		       &event->mmap2.min,
384 		       &ino, execname);
385 
386 		/*
387  		 * Anon maps don't have the execname.
388  		 */
389 		if (n < 7)
390 			continue;
391 
392 		event->mmap2.ino = (u64)ino;
393 
394 		/*
395 		 * Just like the kernel, see __perf_event_mmap in kernel/perf_event.c
396 		 */
397 		if (machine__is_host(machine))
398 			event->header.misc = PERF_RECORD_MISC_USER;
399 		else
400 			event->header.misc = PERF_RECORD_MISC_GUEST_USER;
401 
402 		/* map protection and flags bits */
403 		event->mmap2.prot = 0;
404 		event->mmap2.flags = 0;
405 		if (prot[0] == 'r')
406 			event->mmap2.prot |= PROT_READ;
407 		if (prot[1] == 'w')
408 			event->mmap2.prot |= PROT_WRITE;
409 		if (prot[2] == 'x')
410 			event->mmap2.prot |= PROT_EXEC;
411 
412 		if (prot[3] == 's')
413 			event->mmap2.flags |= MAP_SHARED;
414 		else
415 			event->mmap2.flags |= MAP_PRIVATE;
416 
417 		if (prot[2] != 'x') {
418 			if (!mmap_data || prot[0] != 'r')
419 				continue;
420 
421 			event->header.misc |= PERF_RECORD_MISC_MMAP_DATA;
422 		}
423 
424 out:
425 		if (truncation)
426 			event->header.misc |= PERF_RECORD_MISC_PROC_MAP_PARSE_TIMEOUT;
427 
428 		if (!strcmp(execname, ""))
429 			strcpy(execname, anonstr);
430 
431 		if (hugetlbfs_mnt_len &&
432 		    !strncmp(execname, hugetlbfs_mnt, hugetlbfs_mnt_len)) {
433 			strcpy(execname, anonstr);
434 			event->mmap2.flags |= MAP_HUGETLB;
435 		}
436 
437 		size = strlen(execname) + 1;
438 		memcpy(event->mmap2.filename, execname, size);
439 		size = PERF_ALIGN(size, sizeof(u64));
440 		event->mmap2.len -= event->mmap.start;
441 		event->mmap2.header.size = (sizeof(event->mmap2) -
442 					(sizeof(event->mmap2.filename) - size));
443 		memset(event->mmap2.filename + size, 0, machine->id_hdr_size);
444 		event->mmap2.header.size += machine->id_hdr_size;
445 		event->mmap2.pid = tgid;
446 		event->mmap2.tid = pid;
447 
448 		if (perf_tool__process_synth_event(tool, event, machine, process) != 0) {
449 			rc = -1;
450 			break;
451 		}
452 
453 		if (truncation)
454 			break;
455 	}
456 
457 	fclose(fp);
458 	return rc;
459 }
460 
461 int perf_event__synthesize_modules(struct perf_tool *tool,
462 				   perf_event__handler_t process,
463 				   struct machine *machine)
464 {
465 	int rc = 0;
466 	struct map *pos;
467 	struct maps *maps = machine__kernel_maps(machine);
468 	union perf_event *event = zalloc((sizeof(event->mmap) +
469 					  machine->id_hdr_size));
470 	if (event == NULL) {
471 		pr_debug("Not enough memory synthesizing mmap event "
472 			 "for kernel modules\n");
473 		return -1;
474 	}
475 
476 	event->header.type = PERF_RECORD_MMAP;
477 
478 	/*
479 	 * kernel uses 0 for user space maps, see kernel/perf_event.c
480 	 * __perf_event_mmap
481 	 */
482 	if (machine__is_host(machine))
483 		event->header.misc = PERF_RECORD_MISC_KERNEL;
484 	else
485 		event->header.misc = PERF_RECORD_MISC_GUEST_KERNEL;
486 
487 	for (pos = maps__first(maps); pos; pos = map__next(pos)) {
488 		size_t size;
489 
490 		if (!__map__is_kmodule(pos))
491 			continue;
492 
493 		size = PERF_ALIGN(pos->dso->long_name_len + 1, sizeof(u64));
494 		event->mmap.header.type = PERF_RECORD_MMAP;
495 		event->mmap.header.size = (sizeof(event->mmap) -
496 				        (sizeof(event->mmap.filename) - size));
497 		memset(event->mmap.filename + size, 0, machine->id_hdr_size);
498 		event->mmap.header.size += machine->id_hdr_size;
499 		event->mmap.start = pos->start;
500 		event->mmap.len   = pos->end - pos->start;
501 		event->mmap.pid   = machine->pid;
502 
503 		memcpy(event->mmap.filename, pos->dso->long_name,
504 		       pos->dso->long_name_len + 1);
505 		if (perf_tool__process_synth_event(tool, event, machine, process) != 0) {
506 			rc = -1;
507 			break;
508 		}
509 	}
510 
511 	free(event);
512 	return rc;
513 }
514 
515 static int __event__synthesize_thread(union perf_event *comm_event,
516 				      union perf_event *mmap_event,
517 				      union perf_event *fork_event,
518 				      union perf_event *namespaces_event,
519 				      pid_t pid, int full,
520 				      perf_event__handler_t process,
521 				      struct perf_tool *tool,
522 				      struct machine *machine,
523 				      bool mmap_data,
524 				      unsigned int proc_map_timeout)
525 {
526 	char filename[PATH_MAX];
527 	DIR *tasks;
528 	struct dirent *dirent;
529 	pid_t tgid, ppid;
530 	int rc = 0;
531 
532 	/* special case: only send one comm event using passed in pid */
533 	if (!full) {
534 		tgid = perf_event__synthesize_comm(tool, comm_event, pid,
535 						   process, machine);
536 
537 		if (tgid == -1)
538 			return -1;
539 
540 		if (perf_event__synthesize_namespaces(tool, namespaces_event, pid,
541 						      tgid, process, machine) < 0)
542 			return -1;
543 
544 
545 		return perf_event__synthesize_mmap_events(tool, mmap_event, pid, tgid,
546 							  process, machine, mmap_data,
547 							  proc_map_timeout);
548 	}
549 
550 	if (machine__is_default_guest(machine))
551 		return 0;
552 
553 	snprintf(filename, sizeof(filename), "%s/proc/%d/task",
554 		 machine->root_dir, pid);
555 
556 	tasks = opendir(filename);
557 	if (tasks == NULL) {
558 		pr_debug("couldn't open %s\n", filename);
559 		return 0;
560 	}
561 
562 	while ((dirent = readdir(tasks)) != NULL) {
563 		char *end;
564 		pid_t _pid;
565 
566 		_pid = strtol(dirent->d_name, &end, 10);
567 		if (*end)
568 			continue;
569 
570 		rc = -1;
571 		if (perf_event__prepare_comm(comm_event, _pid, machine,
572 					     &tgid, &ppid) != 0)
573 			break;
574 
575 		if (perf_event__synthesize_fork(tool, fork_event, _pid, tgid,
576 						ppid, process, machine) < 0)
577 			break;
578 
579 		if (perf_event__synthesize_namespaces(tool, namespaces_event, _pid,
580 						      tgid, process, machine) < 0)
581 			break;
582 
583 		/*
584 		 * Send the prepared comm event
585 		 */
586 		if (perf_tool__process_synth_event(tool, comm_event, machine, process) != 0)
587 			break;
588 
589 		rc = 0;
590 		if (_pid == pid) {
591 			/* process the parent's maps too */
592 			rc = perf_event__synthesize_mmap_events(tool, mmap_event, pid, tgid,
593 						process, machine, mmap_data, proc_map_timeout);
594 			if (rc)
595 				break;
596 		}
597 	}
598 
599 	closedir(tasks);
600 	return rc;
601 }
602 
603 int perf_event__synthesize_thread_map(struct perf_tool *tool,
604 				      struct thread_map *threads,
605 				      perf_event__handler_t process,
606 				      struct machine *machine,
607 				      bool mmap_data,
608 				      unsigned int proc_map_timeout)
609 {
610 	union perf_event *comm_event, *mmap_event, *fork_event;
611 	union perf_event *namespaces_event;
612 	int err = -1, thread, j;
613 
614 	comm_event = malloc(sizeof(comm_event->comm) + machine->id_hdr_size);
615 	if (comm_event == NULL)
616 		goto out;
617 
618 	mmap_event = malloc(sizeof(mmap_event->mmap2) + machine->id_hdr_size);
619 	if (mmap_event == NULL)
620 		goto out_free_comm;
621 
622 	fork_event = malloc(sizeof(fork_event->fork) + machine->id_hdr_size);
623 	if (fork_event == NULL)
624 		goto out_free_mmap;
625 
626 	namespaces_event = malloc(sizeof(namespaces_event->namespaces) +
627 				  (NR_NAMESPACES * sizeof(struct perf_ns_link_info)) +
628 				  machine->id_hdr_size);
629 	if (namespaces_event == NULL)
630 		goto out_free_fork;
631 
632 	err = 0;
633 	for (thread = 0; thread < threads->nr; ++thread) {
634 		if (__event__synthesize_thread(comm_event, mmap_event,
635 					       fork_event, namespaces_event,
636 					       thread_map__pid(threads, thread), 0,
637 					       process, tool, machine,
638 					       mmap_data, proc_map_timeout)) {
639 			err = -1;
640 			break;
641 		}
642 
643 		/*
644 		 * comm.pid is set to thread group id by
645 		 * perf_event__synthesize_comm
646 		 */
647 		if ((int) comm_event->comm.pid != thread_map__pid(threads, thread)) {
648 			bool need_leader = true;
649 
650 			/* is thread group leader in thread_map? */
651 			for (j = 0; j < threads->nr; ++j) {
652 				if ((int) comm_event->comm.pid == thread_map__pid(threads, j)) {
653 					need_leader = false;
654 					break;
655 				}
656 			}
657 
658 			/* if not, generate events for it */
659 			if (need_leader &&
660 			    __event__synthesize_thread(comm_event, mmap_event,
661 						       fork_event, namespaces_event,
662 						       comm_event->comm.pid, 0,
663 						       process, tool, machine,
664 						       mmap_data, proc_map_timeout)) {
665 				err = -1;
666 				break;
667 			}
668 		}
669 	}
670 	free(namespaces_event);
671 out_free_fork:
672 	free(fork_event);
673 out_free_mmap:
674 	free(mmap_event);
675 out_free_comm:
676 	free(comm_event);
677 out:
678 	return err;
679 }
680 
681 static int __perf_event__synthesize_threads(struct perf_tool *tool,
682 					    perf_event__handler_t process,
683 					    struct machine *machine,
684 					    bool mmap_data,
685 					    unsigned int proc_map_timeout,
686 					    struct dirent **dirent,
687 					    int start,
688 					    int num)
689 {
690 	union perf_event *comm_event, *mmap_event, *fork_event;
691 	union perf_event *namespaces_event;
692 	int err = -1;
693 	char *end;
694 	pid_t pid;
695 	int i;
696 
697 	comm_event = malloc(sizeof(comm_event->comm) + machine->id_hdr_size);
698 	if (comm_event == NULL)
699 		goto out;
700 
701 	mmap_event = malloc(sizeof(mmap_event->mmap2) + machine->id_hdr_size);
702 	if (mmap_event == NULL)
703 		goto out_free_comm;
704 
705 	fork_event = malloc(sizeof(fork_event->fork) + machine->id_hdr_size);
706 	if (fork_event == NULL)
707 		goto out_free_mmap;
708 
709 	namespaces_event = malloc(sizeof(namespaces_event->namespaces) +
710 				  (NR_NAMESPACES * sizeof(struct perf_ns_link_info)) +
711 				  machine->id_hdr_size);
712 	if (namespaces_event == NULL)
713 		goto out_free_fork;
714 
715 	for (i = start; i < start + num; i++) {
716 		if (!isdigit(dirent[i]->d_name[0]))
717 			continue;
718 
719 		pid = (pid_t)strtol(dirent[i]->d_name, &end, 10);
720 		/* only interested in proper numerical dirents */
721 		if (*end)
722 			continue;
723 		/*
724 		 * We may race with exiting thread, so don't stop just because
725 		 * one thread couldn't be synthesized.
726 		 */
727 		__event__synthesize_thread(comm_event, mmap_event, fork_event,
728 					   namespaces_event, pid, 1, process,
729 					   tool, machine, mmap_data,
730 					   proc_map_timeout);
731 	}
732 	err = 0;
733 
734 	free(namespaces_event);
735 out_free_fork:
736 	free(fork_event);
737 out_free_mmap:
738 	free(mmap_event);
739 out_free_comm:
740 	free(comm_event);
741 out:
742 	return err;
743 }
744 
745 struct synthesize_threads_arg {
746 	struct perf_tool *tool;
747 	perf_event__handler_t process;
748 	struct machine *machine;
749 	bool mmap_data;
750 	unsigned int proc_map_timeout;
751 	struct dirent **dirent;
752 	int num;
753 	int start;
754 };
755 
756 static void *synthesize_threads_worker(void *arg)
757 {
758 	struct synthesize_threads_arg *args = arg;
759 
760 	__perf_event__synthesize_threads(args->tool, args->process,
761 					 args->machine, args->mmap_data,
762 					 args->proc_map_timeout, args->dirent,
763 					 args->start, args->num);
764 	return NULL;
765 }
766 
767 int perf_event__synthesize_threads(struct perf_tool *tool,
768 				   perf_event__handler_t process,
769 				   struct machine *machine,
770 				   bool mmap_data,
771 				   unsigned int proc_map_timeout,
772 				   unsigned int nr_threads_synthesize)
773 {
774 	struct synthesize_threads_arg *args = NULL;
775 	pthread_t *synthesize_threads = NULL;
776 	char proc_path[PATH_MAX];
777 	struct dirent **dirent;
778 	int num_per_thread;
779 	int m, n, i, j;
780 	int thread_nr;
781 	int base = 0;
782 	int err = -1;
783 
784 
785 	if (machine__is_default_guest(machine))
786 		return 0;
787 
788 	snprintf(proc_path, sizeof(proc_path), "%s/proc", machine->root_dir);
789 	n = scandir(proc_path, &dirent, 0, alphasort);
790 	if (n < 0)
791 		return err;
792 
793 	if (nr_threads_synthesize == UINT_MAX)
794 		thread_nr = sysconf(_SC_NPROCESSORS_ONLN);
795 	else
796 		thread_nr = nr_threads_synthesize;
797 
798 	if (thread_nr <= 1) {
799 		err = __perf_event__synthesize_threads(tool, process,
800 						       machine, mmap_data,
801 						       proc_map_timeout,
802 						       dirent, base, n);
803 		goto free_dirent;
804 	}
805 	if (thread_nr > n)
806 		thread_nr = n;
807 
808 	synthesize_threads = calloc(sizeof(pthread_t), thread_nr);
809 	if (synthesize_threads == NULL)
810 		goto free_dirent;
811 
812 	args = calloc(sizeof(*args), thread_nr);
813 	if (args == NULL)
814 		goto free_threads;
815 
816 	num_per_thread = n / thread_nr;
817 	m = n % thread_nr;
818 	for (i = 0; i < thread_nr; i++) {
819 		args[i].tool = tool;
820 		args[i].process = process;
821 		args[i].machine = machine;
822 		args[i].mmap_data = mmap_data;
823 		args[i].proc_map_timeout = proc_map_timeout;
824 		args[i].dirent = dirent;
825 	}
826 	for (i = 0; i < m; i++) {
827 		args[i].num = num_per_thread + 1;
828 		args[i].start = i * args[i].num;
829 	}
830 	if (i != 0)
831 		base = args[i-1].start + args[i-1].num;
832 	for (j = i; j < thread_nr; j++) {
833 		args[j].num = num_per_thread;
834 		args[j].start = base + (j - i) * args[i].num;
835 	}
836 
837 	for (i = 0; i < thread_nr; i++) {
838 		if (pthread_create(&synthesize_threads[i], NULL,
839 				   synthesize_threads_worker, &args[i]))
840 			goto out_join;
841 	}
842 	err = 0;
843 out_join:
844 	for (i = 0; i < thread_nr; i++)
845 		pthread_join(synthesize_threads[i], NULL);
846 	free(args);
847 free_threads:
848 	free(synthesize_threads);
849 free_dirent:
850 	for (i = 0; i < n; i++)
851 		free(dirent[i]);
852 	free(dirent);
853 
854 	return err;
855 }
856 
857 struct process_symbol_args {
858 	const char *name;
859 	u64	   start;
860 };
861 
862 static int find_symbol_cb(void *arg, const char *name, char type,
863 			  u64 start)
864 {
865 	struct process_symbol_args *args = arg;
866 
867 	/*
868 	 * Must be a function or at least an alias, as in PARISC64, where "_text" is
869 	 * an 'A' to the same address as "_stext".
870 	 */
871 	if (!(kallsyms__is_function(type) ||
872 	      type == 'A') || strcmp(name, args->name))
873 		return 0;
874 
875 	args->start = start;
876 	return 1;
877 }
878 
879 int kallsyms__get_function_start(const char *kallsyms_filename,
880 				 const char *symbol_name, u64 *addr)
881 {
882 	struct process_symbol_args args = { .name = symbol_name, };
883 
884 	if (kallsyms__parse(kallsyms_filename, &args, find_symbol_cb) <= 0)
885 		return -1;
886 
887 	*addr = args.start;
888 	return 0;
889 }
890 
891 int __weak perf_event__synthesize_extra_kmaps(struct perf_tool *tool __maybe_unused,
892 					      perf_event__handler_t process __maybe_unused,
893 					      struct machine *machine __maybe_unused)
894 {
895 	return 0;
896 }
897 
898 static int __perf_event__synthesize_kernel_mmap(struct perf_tool *tool,
899 						perf_event__handler_t process,
900 						struct machine *machine)
901 {
902 	size_t size;
903 	struct map *map = machine__kernel_map(machine);
904 	struct kmap *kmap;
905 	int err;
906 	union perf_event *event;
907 
908 	if (symbol_conf.kptr_restrict)
909 		return -1;
910 	if (map == NULL)
911 		return -1;
912 
913 	/*
914 	 * We should get this from /sys/kernel/sections/.text, but till that is
915 	 * available use this, and after it is use this as a fallback for older
916 	 * kernels.
917 	 */
918 	event = zalloc((sizeof(event->mmap) + machine->id_hdr_size));
919 	if (event == NULL) {
920 		pr_debug("Not enough memory synthesizing mmap event "
921 			 "for kernel modules\n");
922 		return -1;
923 	}
924 
925 	if (machine__is_host(machine)) {
926 		/*
927 		 * kernel uses PERF_RECORD_MISC_USER for user space maps,
928 		 * see kernel/perf_event.c __perf_event_mmap
929 		 */
930 		event->header.misc = PERF_RECORD_MISC_KERNEL;
931 	} else {
932 		event->header.misc = PERF_RECORD_MISC_GUEST_KERNEL;
933 	}
934 
935 	kmap = map__kmap(map);
936 	size = snprintf(event->mmap.filename, sizeof(event->mmap.filename),
937 			"%s%s", machine->mmap_name, kmap->ref_reloc_sym->name) + 1;
938 	size = PERF_ALIGN(size, sizeof(u64));
939 	event->mmap.header.type = PERF_RECORD_MMAP;
940 	event->mmap.header.size = (sizeof(event->mmap) -
941 			(sizeof(event->mmap.filename) - size) + machine->id_hdr_size);
942 	event->mmap.pgoff = kmap->ref_reloc_sym->addr;
943 	event->mmap.start = map->start;
944 	event->mmap.len   = map->end - event->mmap.start;
945 	event->mmap.pid   = machine->pid;
946 
947 	err = perf_tool__process_synth_event(tool, event, machine, process);
948 	free(event);
949 
950 	return err;
951 }
952 
953 int perf_event__synthesize_kernel_mmap(struct perf_tool *tool,
954 				       perf_event__handler_t process,
955 				       struct machine *machine)
956 {
957 	int err;
958 
959 	err = __perf_event__synthesize_kernel_mmap(tool, process, machine);
960 	if (err < 0)
961 		return err;
962 
963 	return perf_event__synthesize_extra_kmaps(tool, process, machine);
964 }
965 
966 int perf_event__synthesize_thread_map2(struct perf_tool *tool,
967 				      struct thread_map *threads,
968 				      perf_event__handler_t process,
969 				      struct machine *machine)
970 {
971 	union perf_event *event;
972 	int i, err, size;
973 
974 	size  = sizeof(event->thread_map);
975 	size +=	threads->nr * sizeof(event->thread_map.entries[0]);
976 
977 	event = zalloc(size);
978 	if (!event)
979 		return -ENOMEM;
980 
981 	event->header.type = PERF_RECORD_THREAD_MAP;
982 	event->header.size = size;
983 	event->thread_map.nr = threads->nr;
984 
985 	for (i = 0; i < threads->nr; i++) {
986 		struct thread_map_event_entry *entry = &event->thread_map.entries[i];
987 		char *comm = thread_map__comm(threads, i);
988 
989 		if (!comm)
990 			comm = (char *) "";
991 
992 		entry->pid = thread_map__pid(threads, i);
993 		strncpy((char *) &entry->comm, comm, sizeof(entry->comm));
994 	}
995 
996 	err = process(tool, event, NULL, machine);
997 
998 	free(event);
999 	return err;
1000 }
1001 
1002 static void synthesize_cpus(struct cpu_map_entries *cpus,
1003 			    struct cpu_map *map)
1004 {
1005 	int i;
1006 
1007 	cpus->nr = map->nr;
1008 
1009 	for (i = 0; i < map->nr; i++)
1010 		cpus->cpu[i] = map->map[i];
1011 }
1012 
1013 static void synthesize_mask(struct cpu_map_mask *mask,
1014 			    struct cpu_map *map, int max)
1015 {
1016 	int i;
1017 
1018 	mask->nr = BITS_TO_LONGS(max);
1019 	mask->long_size = sizeof(long);
1020 
1021 	for (i = 0; i < map->nr; i++)
1022 		set_bit(map->map[i], mask->mask);
1023 }
1024 
1025 static size_t cpus_size(struct cpu_map *map)
1026 {
1027 	return sizeof(struct cpu_map_entries) + map->nr * sizeof(u16);
1028 }
1029 
1030 static size_t mask_size(struct cpu_map *map, int *max)
1031 {
1032 	int i;
1033 
1034 	*max = 0;
1035 
1036 	for (i = 0; i < map->nr; i++) {
1037 		/* bit possition of the cpu is + 1 */
1038 		int bit = map->map[i] + 1;
1039 
1040 		if (bit > *max)
1041 			*max = bit;
1042 	}
1043 
1044 	return sizeof(struct cpu_map_mask) + BITS_TO_LONGS(*max) * sizeof(long);
1045 }
1046 
1047 void *cpu_map_data__alloc(struct cpu_map *map, size_t *size, u16 *type, int *max)
1048 {
1049 	size_t size_cpus, size_mask;
1050 	bool is_dummy = cpu_map__empty(map);
1051 
1052 	/*
1053 	 * Both array and mask data have variable size based
1054 	 * on the number of cpus and their actual values.
1055 	 * The size of the 'struct cpu_map_data' is:
1056 	 *
1057 	 *   array = size of 'struct cpu_map_entries' +
1058 	 *           number of cpus * sizeof(u64)
1059 	 *
1060 	 *   mask  = size of 'struct cpu_map_mask' +
1061 	 *           maximum cpu bit converted to size of longs
1062 	 *
1063 	 * and finaly + the size of 'struct cpu_map_data'.
1064 	 */
1065 	size_cpus = cpus_size(map);
1066 	size_mask = mask_size(map, max);
1067 
1068 	if (is_dummy || (size_cpus < size_mask)) {
1069 		*size += size_cpus;
1070 		*type  = PERF_CPU_MAP__CPUS;
1071 	} else {
1072 		*size += size_mask;
1073 		*type  = PERF_CPU_MAP__MASK;
1074 	}
1075 
1076 	*size += sizeof(struct cpu_map_data);
1077 	return zalloc(*size);
1078 }
1079 
1080 void cpu_map_data__synthesize(struct cpu_map_data *data, struct cpu_map *map,
1081 			      u16 type, int max)
1082 {
1083 	data->type = type;
1084 
1085 	switch (type) {
1086 	case PERF_CPU_MAP__CPUS:
1087 		synthesize_cpus((struct cpu_map_entries *) data->data, map);
1088 		break;
1089 	case PERF_CPU_MAP__MASK:
1090 		synthesize_mask((struct cpu_map_mask *) data->data, map, max);
1091 	default:
1092 		break;
1093 	};
1094 }
1095 
1096 static struct cpu_map_event* cpu_map_event__new(struct cpu_map *map)
1097 {
1098 	size_t size = sizeof(struct cpu_map_event);
1099 	struct cpu_map_event *event;
1100 	int max;
1101 	u16 type;
1102 
1103 	event = cpu_map_data__alloc(map, &size, &type, &max);
1104 	if (!event)
1105 		return NULL;
1106 
1107 	event->header.type = PERF_RECORD_CPU_MAP;
1108 	event->header.size = size;
1109 	event->data.type   = type;
1110 
1111 	cpu_map_data__synthesize(&event->data, map, type, max);
1112 	return event;
1113 }
1114 
1115 int perf_event__synthesize_cpu_map(struct perf_tool *tool,
1116 				   struct cpu_map *map,
1117 				   perf_event__handler_t process,
1118 				   struct machine *machine)
1119 {
1120 	struct cpu_map_event *event;
1121 	int err;
1122 
1123 	event = cpu_map_event__new(map);
1124 	if (!event)
1125 		return -ENOMEM;
1126 
1127 	err = process(tool, (union perf_event *) event, NULL, machine);
1128 
1129 	free(event);
1130 	return err;
1131 }
1132 
1133 int perf_event__synthesize_stat_config(struct perf_tool *tool,
1134 				       struct perf_stat_config *config,
1135 				       perf_event__handler_t process,
1136 				       struct machine *machine)
1137 {
1138 	struct stat_config_event *event;
1139 	int size, i = 0, err;
1140 
1141 	size  = sizeof(*event);
1142 	size += (PERF_STAT_CONFIG_TERM__MAX * sizeof(event->data[0]));
1143 
1144 	event = zalloc(size);
1145 	if (!event)
1146 		return -ENOMEM;
1147 
1148 	event->header.type = PERF_RECORD_STAT_CONFIG;
1149 	event->header.size = size;
1150 	event->nr          = PERF_STAT_CONFIG_TERM__MAX;
1151 
1152 #define ADD(__term, __val)					\
1153 	event->data[i].tag = PERF_STAT_CONFIG_TERM__##__term;	\
1154 	event->data[i].val = __val;				\
1155 	i++;
1156 
1157 	ADD(AGGR_MODE,	config->aggr_mode)
1158 	ADD(INTERVAL,	config->interval)
1159 	ADD(SCALE,	config->scale)
1160 
1161 	WARN_ONCE(i != PERF_STAT_CONFIG_TERM__MAX,
1162 		  "stat config terms unbalanced\n");
1163 #undef ADD
1164 
1165 	err = process(tool, (union perf_event *) event, NULL, machine);
1166 
1167 	free(event);
1168 	return err;
1169 }
1170 
1171 int perf_event__synthesize_stat(struct perf_tool *tool,
1172 				u32 cpu, u32 thread, u64 id,
1173 				struct perf_counts_values *count,
1174 				perf_event__handler_t process,
1175 				struct machine *machine)
1176 {
1177 	struct stat_event event;
1178 
1179 	event.header.type = PERF_RECORD_STAT;
1180 	event.header.size = sizeof(event);
1181 	event.header.misc = 0;
1182 
1183 	event.id        = id;
1184 	event.cpu       = cpu;
1185 	event.thread    = thread;
1186 	event.val       = count->val;
1187 	event.ena       = count->ena;
1188 	event.run       = count->run;
1189 
1190 	return process(tool, (union perf_event *) &event, NULL, machine);
1191 }
1192 
1193 int perf_event__synthesize_stat_round(struct perf_tool *tool,
1194 				      u64 evtime, u64 type,
1195 				      perf_event__handler_t process,
1196 				      struct machine *machine)
1197 {
1198 	struct stat_round_event event;
1199 
1200 	event.header.type = PERF_RECORD_STAT_ROUND;
1201 	event.header.size = sizeof(event);
1202 	event.header.misc = 0;
1203 
1204 	event.time = evtime;
1205 	event.type = type;
1206 
1207 	return process(tool, (union perf_event *) &event, NULL, machine);
1208 }
1209 
1210 void perf_event__read_stat_config(struct perf_stat_config *config,
1211 				  struct stat_config_event *event)
1212 {
1213 	unsigned i;
1214 
1215 	for (i = 0; i < event->nr; i++) {
1216 
1217 		switch (event->data[i].tag) {
1218 #define CASE(__term, __val)					\
1219 		case PERF_STAT_CONFIG_TERM__##__term:		\
1220 			config->__val = event->data[i].val;	\
1221 			break;
1222 
1223 		CASE(AGGR_MODE, aggr_mode)
1224 		CASE(SCALE,     scale)
1225 		CASE(INTERVAL,  interval)
1226 #undef CASE
1227 		default:
1228 			pr_warning("unknown stat config term %" PRIu64 "\n",
1229 				   event->data[i].tag);
1230 		}
1231 	}
1232 }
1233 
1234 size_t perf_event__fprintf_comm(union perf_event *event, FILE *fp)
1235 {
1236 	const char *s;
1237 
1238 	if (event->header.misc & PERF_RECORD_MISC_COMM_EXEC)
1239 		s = " exec";
1240 	else
1241 		s = "";
1242 
1243 	return fprintf(fp, "%s: %s:%d/%d\n", s, event->comm.comm, event->comm.pid, event->comm.tid);
1244 }
1245 
1246 size_t perf_event__fprintf_namespaces(union perf_event *event, FILE *fp)
1247 {
1248 	size_t ret = 0;
1249 	struct perf_ns_link_info *ns_link_info;
1250 	u32 nr_namespaces, idx;
1251 
1252 	ns_link_info = event->namespaces.link_info;
1253 	nr_namespaces = event->namespaces.nr_namespaces;
1254 
1255 	ret += fprintf(fp, " %d/%d - nr_namespaces: %u\n\t\t[",
1256 		       event->namespaces.pid,
1257 		       event->namespaces.tid,
1258 		       nr_namespaces);
1259 
1260 	for (idx = 0; idx < nr_namespaces; idx++) {
1261 		if (idx && (idx % 4 == 0))
1262 			ret += fprintf(fp, "\n\t\t ");
1263 
1264 		ret  += fprintf(fp, "%u/%s: %" PRIu64 "/%#" PRIx64 "%s", idx,
1265 				perf_ns__name(idx), (u64)ns_link_info[idx].dev,
1266 				(u64)ns_link_info[idx].ino,
1267 				((idx + 1) != nr_namespaces) ? ", " : "]\n");
1268 	}
1269 
1270 	return ret;
1271 }
1272 
1273 int perf_event__process_comm(struct perf_tool *tool __maybe_unused,
1274 			     union perf_event *event,
1275 			     struct perf_sample *sample,
1276 			     struct machine *machine)
1277 {
1278 	return machine__process_comm_event(machine, event, sample);
1279 }
1280 
1281 int perf_event__process_namespaces(struct perf_tool *tool __maybe_unused,
1282 				   union perf_event *event,
1283 				   struct perf_sample *sample,
1284 				   struct machine *machine)
1285 {
1286 	return machine__process_namespaces_event(machine, event, sample);
1287 }
1288 
1289 int perf_event__process_lost(struct perf_tool *tool __maybe_unused,
1290 			     union perf_event *event,
1291 			     struct perf_sample *sample,
1292 			     struct machine *machine)
1293 {
1294 	return machine__process_lost_event(machine, event, sample);
1295 }
1296 
1297 int perf_event__process_aux(struct perf_tool *tool __maybe_unused,
1298 			    union perf_event *event,
1299 			    struct perf_sample *sample __maybe_unused,
1300 			    struct machine *machine)
1301 {
1302 	return machine__process_aux_event(machine, event);
1303 }
1304 
1305 int perf_event__process_itrace_start(struct perf_tool *tool __maybe_unused,
1306 				     union perf_event *event,
1307 				     struct perf_sample *sample __maybe_unused,
1308 				     struct machine *machine)
1309 {
1310 	return machine__process_itrace_start_event(machine, event);
1311 }
1312 
1313 int perf_event__process_lost_samples(struct perf_tool *tool __maybe_unused,
1314 				     union perf_event *event,
1315 				     struct perf_sample *sample,
1316 				     struct machine *machine)
1317 {
1318 	return machine__process_lost_samples_event(machine, event, sample);
1319 }
1320 
1321 int perf_event__process_switch(struct perf_tool *tool __maybe_unused,
1322 			       union perf_event *event,
1323 			       struct perf_sample *sample __maybe_unused,
1324 			       struct machine *machine)
1325 {
1326 	return machine__process_switch_event(machine, event);
1327 }
1328 
1329 size_t perf_event__fprintf_mmap(union perf_event *event, FILE *fp)
1330 {
1331 	return fprintf(fp, " %d/%d: [%#" PRIx64 "(%#" PRIx64 ") @ %#" PRIx64 "]: %c %s\n",
1332 		       event->mmap.pid, event->mmap.tid, event->mmap.start,
1333 		       event->mmap.len, event->mmap.pgoff,
1334 		       (event->header.misc & PERF_RECORD_MISC_MMAP_DATA) ? 'r' : 'x',
1335 		       event->mmap.filename);
1336 }
1337 
1338 size_t perf_event__fprintf_mmap2(union perf_event *event, FILE *fp)
1339 {
1340 	return fprintf(fp, " %d/%d: [%#" PRIx64 "(%#" PRIx64 ") @ %#" PRIx64
1341 			   " %02x:%02x %"PRIu64" %"PRIu64"]: %c%c%c%c %s\n",
1342 		       event->mmap2.pid, event->mmap2.tid, event->mmap2.start,
1343 		       event->mmap2.len, event->mmap2.pgoff, event->mmap2.maj,
1344 		       event->mmap2.min, event->mmap2.ino,
1345 		       event->mmap2.ino_generation,
1346 		       (event->mmap2.prot & PROT_READ) ? 'r' : '-',
1347 		       (event->mmap2.prot & PROT_WRITE) ? 'w' : '-',
1348 		       (event->mmap2.prot & PROT_EXEC) ? 'x' : '-',
1349 		       (event->mmap2.flags & MAP_SHARED) ? 's' : 'p',
1350 		       event->mmap2.filename);
1351 }
1352 
1353 size_t perf_event__fprintf_thread_map(union perf_event *event, FILE *fp)
1354 {
1355 	struct thread_map *threads = thread_map__new_event(&event->thread_map);
1356 	size_t ret;
1357 
1358 	ret = fprintf(fp, " nr: ");
1359 
1360 	if (threads)
1361 		ret += thread_map__fprintf(threads, fp);
1362 	else
1363 		ret += fprintf(fp, "failed to get threads from event\n");
1364 
1365 	thread_map__put(threads);
1366 	return ret;
1367 }
1368 
1369 size_t perf_event__fprintf_cpu_map(union perf_event *event, FILE *fp)
1370 {
1371 	struct cpu_map *cpus = cpu_map__new_data(&event->cpu_map.data);
1372 	size_t ret;
1373 
1374 	ret = fprintf(fp, ": ");
1375 
1376 	if (cpus)
1377 		ret += cpu_map__fprintf(cpus, fp);
1378 	else
1379 		ret += fprintf(fp, "failed to get cpumap from event\n");
1380 
1381 	cpu_map__put(cpus);
1382 	return ret;
1383 }
1384 
1385 int perf_event__process_mmap(struct perf_tool *tool __maybe_unused,
1386 			     union perf_event *event,
1387 			     struct perf_sample *sample,
1388 			     struct machine *machine)
1389 {
1390 	return machine__process_mmap_event(machine, event, sample);
1391 }
1392 
1393 int perf_event__process_mmap2(struct perf_tool *tool __maybe_unused,
1394 			     union perf_event *event,
1395 			     struct perf_sample *sample,
1396 			     struct machine *machine)
1397 {
1398 	return machine__process_mmap2_event(machine, event, sample);
1399 }
1400 
1401 size_t perf_event__fprintf_task(union perf_event *event, FILE *fp)
1402 {
1403 	return fprintf(fp, "(%d:%d):(%d:%d)\n",
1404 		       event->fork.pid, event->fork.tid,
1405 		       event->fork.ppid, event->fork.ptid);
1406 }
1407 
1408 int perf_event__process_fork(struct perf_tool *tool __maybe_unused,
1409 			     union perf_event *event,
1410 			     struct perf_sample *sample,
1411 			     struct machine *machine)
1412 {
1413 	return machine__process_fork_event(machine, event, sample);
1414 }
1415 
1416 int perf_event__process_exit(struct perf_tool *tool __maybe_unused,
1417 			     union perf_event *event,
1418 			     struct perf_sample *sample,
1419 			     struct machine *machine)
1420 {
1421 	return machine__process_exit_event(machine, event, sample);
1422 }
1423 
1424 size_t perf_event__fprintf_aux(union perf_event *event, FILE *fp)
1425 {
1426 	return fprintf(fp, " offset: %#"PRIx64" size: %#"PRIx64" flags: %#"PRIx64" [%s%s%s]\n",
1427 		       event->aux.aux_offset, event->aux.aux_size,
1428 		       event->aux.flags,
1429 		       event->aux.flags & PERF_AUX_FLAG_TRUNCATED ? "T" : "",
1430 		       event->aux.flags & PERF_AUX_FLAG_OVERWRITE ? "O" : "",
1431 		       event->aux.flags & PERF_AUX_FLAG_PARTIAL   ? "P" : "");
1432 }
1433 
1434 size_t perf_event__fprintf_itrace_start(union perf_event *event, FILE *fp)
1435 {
1436 	return fprintf(fp, " pid: %u tid: %u\n",
1437 		       event->itrace_start.pid, event->itrace_start.tid);
1438 }
1439 
1440 size_t perf_event__fprintf_switch(union perf_event *event, FILE *fp)
1441 {
1442 	bool out = event->header.misc & PERF_RECORD_MISC_SWITCH_OUT;
1443 	const char *in_out = !out ? "IN         " :
1444 		!(event->header.misc & PERF_RECORD_MISC_SWITCH_OUT_PREEMPT) ?
1445 				    "OUT        " : "OUT preempt";
1446 
1447 	if (event->header.type == PERF_RECORD_SWITCH)
1448 		return fprintf(fp, " %s\n", in_out);
1449 
1450 	return fprintf(fp, " %s  %s pid/tid: %5u/%-5u\n",
1451 		       in_out, out ? "next" : "prev",
1452 		       event->context_switch.next_prev_pid,
1453 		       event->context_switch.next_prev_tid);
1454 }
1455 
1456 static size_t perf_event__fprintf_lost(union perf_event *event, FILE *fp)
1457 {
1458 	return fprintf(fp, " lost %" PRIu64 "\n", event->lost.lost);
1459 }
1460 
1461 size_t perf_event__fprintf(union perf_event *event, FILE *fp)
1462 {
1463 	size_t ret = fprintf(fp, "PERF_RECORD_%s",
1464 			     perf_event__name(event->header.type));
1465 
1466 	switch (event->header.type) {
1467 	case PERF_RECORD_COMM:
1468 		ret += perf_event__fprintf_comm(event, fp);
1469 		break;
1470 	case PERF_RECORD_FORK:
1471 	case PERF_RECORD_EXIT:
1472 		ret += perf_event__fprintf_task(event, fp);
1473 		break;
1474 	case PERF_RECORD_MMAP:
1475 		ret += perf_event__fprintf_mmap(event, fp);
1476 		break;
1477 	case PERF_RECORD_NAMESPACES:
1478 		ret += perf_event__fprintf_namespaces(event, fp);
1479 		break;
1480 	case PERF_RECORD_MMAP2:
1481 		ret += perf_event__fprintf_mmap2(event, fp);
1482 		break;
1483 	case PERF_RECORD_AUX:
1484 		ret += perf_event__fprintf_aux(event, fp);
1485 		break;
1486 	case PERF_RECORD_ITRACE_START:
1487 		ret += perf_event__fprintf_itrace_start(event, fp);
1488 		break;
1489 	case PERF_RECORD_SWITCH:
1490 	case PERF_RECORD_SWITCH_CPU_WIDE:
1491 		ret += perf_event__fprintf_switch(event, fp);
1492 		break;
1493 	case PERF_RECORD_LOST:
1494 		ret += perf_event__fprintf_lost(event, fp);
1495 		break;
1496 	default:
1497 		ret += fprintf(fp, "\n");
1498 	}
1499 
1500 	return ret;
1501 }
1502 
1503 int perf_event__process(struct perf_tool *tool __maybe_unused,
1504 			union perf_event *event,
1505 			struct perf_sample *sample,
1506 			struct machine *machine)
1507 {
1508 	return machine__process_event(machine, event, sample);
1509 }
1510 
1511 struct map *thread__find_map(struct thread *thread, u8 cpumode, u64 addr,
1512 			     struct addr_location *al)
1513 {
1514 	struct map_groups *mg = thread->mg;
1515 	struct machine *machine = mg->machine;
1516 	bool load_map = false;
1517 
1518 	al->machine = machine;
1519 	al->thread = thread;
1520 	al->addr = addr;
1521 	al->cpumode = cpumode;
1522 	al->filtered = 0;
1523 
1524 	if (machine == NULL) {
1525 		al->map = NULL;
1526 		return NULL;
1527 	}
1528 
1529 	if (cpumode == PERF_RECORD_MISC_KERNEL && perf_host) {
1530 		al->level = 'k';
1531 		mg = &machine->kmaps;
1532 		load_map = true;
1533 	} else if (cpumode == PERF_RECORD_MISC_USER && perf_host) {
1534 		al->level = '.';
1535 	} else if (cpumode == PERF_RECORD_MISC_GUEST_KERNEL && perf_guest) {
1536 		al->level = 'g';
1537 		mg = &machine->kmaps;
1538 		load_map = true;
1539 	} else if (cpumode == PERF_RECORD_MISC_GUEST_USER && perf_guest) {
1540 		al->level = 'u';
1541 	} else {
1542 		al->level = 'H';
1543 		al->map = NULL;
1544 
1545 		if ((cpumode == PERF_RECORD_MISC_GUEST_USER ||
1546 			cpumode == PERF_RECORD_MISC_GUEST_KERNEL) &&
1547 			!perf_guest)
1548 			al->filtered |= (1 << HIST_FILTER__GUEST);
1549 		if ((cpumode == PERF_RECORD_MISC_USER ||
1550 			cpumode == PERF_RECORD_MISC_KERNEL) &&
1551 			!perf_host)
1552 			al->filtered |= (1 << HIST_FILTER__HOST);
1553 
1554 		return NULL;
1555 	}
1556 try_again:
1557 	al->map = map_groups__find(mg, al->addr);
1558 	if (al->map == NULL) {
1559 		/*
1560 		 * If this is outside of all known maps, and is a negative
1561 		 * address, try to look it up in the kernel dso, as it might be
1562 		 * a vsyscall or vdso (which executes in user-mode).
1563 		 *
1564 		 * XXX This is nasty, we should have a symbol list in the
1565 		 * "[vdso]" dso, but for now lets use the old trick of looking
1566 		 * in the whole kernel symbol list.
1567 		 */
1568 		if (cpumode == PERF_RECORD_MISC_USER && machine &&
1569 		    mg != &machine->kmaps &&
1570 		    machine__kernel_ip(machine, al->addr)) {
1571 			mg = &machine->kmaps;
1572 			load_map = true;
1573 			goto try_again;
1574 		}
1575 	} else {
1576 		/*
1577 		 * Kernel maps might be changed when loading symbols so loading
1578 		 * must be done prior to using kernel maps.
1579 		 */
1580 		if (load_map)
1581 			map__load(al->map);
1582 		al->addr = al->map->map_ip(al->map, al->addr);
1583 	}
1584 
1585 	return al->map;
1586 }
1587 
1588 struct symbol *thread__find_symbol(struct thread *thread, u8 cpumode,
1589 				   u64 addr, struct addr_location *al)
1590 {
1591 	al->sym = NULL;
1592 	if (thread__find_map(thread, cpumode, addr, al))
1593 		al->sym = map__find_symbol(al->map, al->addr);
1594 	return al->sym;
1595 }
1596 
1597 /*
1598  * Callers need to drop the reference to al->thread, obtained in
1599  * machine__findnew_thread()
1600  */
1601 int machine__resolve(struct machine *machine, struct addr_location *al,
1602 		     struct perf_sample *sample)
1603 {
1604 	struct thread *thread = machine__findnew_thread(machine, sample->pid,
1605 							sample->tid);
1606 
1607 	if (thread == NULL)
1608 		return -1;
1609 
1610 	dump_printf(" ... thread: %s:%d\n", thread__comm_str(thread), thread->tid);
1611 	thread__find_map(thread, sample->cpumode, sample->ip, al);
1612 	dump_printf(" ...... dso: %s\n",
1613 		    al->map ? al->map->dso->long_name :
1614 			al->level == 'H' ? "[hypervisor]" : "<not found>");
1615 
1616 	if (thread__is_filtered(thread))
1617 		al->filtered |= (1 << HIST_FILTER__THREAD);
1618 
1619 	al->sym = NULL;
1620 	al->cpu = sample->cpu;
1621 	al->socket = -1;
1622 	al->srcline = NULL;
1623 
1624 	if (al->cpu >= 0) {
1625 		struct perf_env *env = machine->env;
1626 
1627 		if (env && env->cpu)
1628 			al->socket = env->cpu[al->cpu].socket_id;
1629 	}
1630 
1631 	if (al->map) {
1632 		struct dso *dso = al->map->dso;
1633 
1634 		if (symbol_conf.dso_list &&
1635 		    (!dso || !(strlist__has_entry(symbol_conf.dso_list,
1636 						  dso->short_name) ||
1637 			       (dso->short_name != dso->long_name &&
1638 				strlist__has_entry(symbol_conf.dso_list,
1639 						   dso->long_name))))) {
1640 			al->filtered |= (1 << HIST_FILTER__DSO);
1641 		}
1642 
1643 		al->sym = map__find_symbol(al->map, al->addr);
1644 	}
1645 
1646 	if (symbol_conf.sym_list &&
1647 		(!al->sym || !strlist__has_entry(symbol_conf.sym_list,
1648 						al->sym->name))) {
1649 		al->filtered |= (1 << HIST_FILTER__SYMBOL);
1650 	}
1651 
1652 	return 0;
1653 }
1654 
1655 /*
1656  * The preprocess_sample method will return with reference counts for the
1657  * in it, when done using (and perhaps getting ref counts if needing to
1658  * keep a pointer to one of those entries) it must be paired with
1659  * addr_location__put(), so that the refcounts can be decremented.
1660  */
1661 void addr_location__put(struct addr_location *al)
1662 {
1663 	thread__zput(al->thread);
1664 }
1665 
1666 bool is_bts_event(struct perf_event_attr *attr)
1667 {
1668 	return attr->type == PERF_TYPE_HARDWARE &&
1669 	       (attr->config & PERF_COUNT_HW_BRANCH_INSTRUCTIONS) &&
1670 	       attr->sample_period == 1;
1671 }
1672 
1673 bool sample_addr_correlates_sym(struct perf_event_attr *attr)
1674 {
1675 	if (attr->type == PERF_TYPE_SOFTWARE &&
1676 	    (attr->config == PERF_COUNT_SW_PAGE_FAULTS ||
1677 	     attr->config == PERF_COUNT_SW_PAGE_FAULTS_MIN ||
1678 	     attr->config == PERF_COUNT_SW_PAGE_FAULTS_MAJ))
1679 		return true;
1680 
1681 	if (is_bts_event(attr))
1682 		return true;
1683 
1684 	return false;
1685 }
1686 
1687 void thread__resolve(struct thread *thread, struct addr_location *al,
1688 		     struct perf_sample *sample)
1689 {
1690 	thread__find_map(thread, sample->cpumode, sample->addr, al);
1691 
1692 	al->cpu = sample->cpu;
1693 	al->sym = NULL;
1694 
1695 	if (al->map)
1696 		al->sym = map__find_symbol(al->map, al->addr);
1697 }
1698