xref: /openbmc/linux/tools/perf/util/event.c (revision b802fb99)
1 #include <linux/types.h>
2 #include <sys/mman.h>
3 #include "event.h"
4 #include "debug.h"
5 #include "hist.h"
6 #include "machine.h"
7 #include "sort.h"
8 #include "string.h"
9 #include "strlist.h"
10 #include "thread.h"
11 #include "thread_map.h"
12 #include "symbol/kallsyms.h"
13 #include "asm/bug.h"
14 #include "stat.h"
15 
16 static const char *perf_event__names[] = {
17 	[0]					= "TOTAL",
18 	[PERF_RECORD_MMAP]			= "MMAP",
19 	[PERF_RECORD_MMAP2]			= "MMAP2",
20 	[PERF_RECORD_LOST]			= "LOST",
21 	[PERF_RECORD_COMM]			= "COMM",
22 	[PERF_RECORD_EXIT]			= "EXIT",
23 	[PERF_RECORD_THROTTLE]			= "THROTTLE",
24 	[PERF_RECORD_UNTHROTTLE]		= "UNTHROTTLE",
25 	[PERF_RECORD_FORK]			= "FORK",
26 	[PERF_RECORD_READ]			= "READ",
27 	[PERF_RECORD_SAMPLE]			= "SAMPLE",
28 	[PERF_RECORD_AUX]			= "AUX",
29 	[PERF_RECORD_ITRACE_START]		= "ITRACE_START",
30 	[PERF_RECORD_LOST_SAMPLES]		= "LOST_SAMPLES",
31 	[PERF_RECORD_SWITCH]			= "SWITCH",
32 	[PERF_RECORD_SWITCH_CPU_WIDE]		= "SWITCH_CPU_WIDE",
33 	[PERF_RECORD_HEADER_ATTR]		= "ATTR",
34 	[PERF_RECORD_HEADER_EVENT_TYPE]		= "EVENT_TYPE",
35 	[PERF_RECORD_HEADER_TRACING_DATA]	= "TRACING_DATA",
36 	[PERF_RECORD_HEADER_BUILD_ID]		= "BUILD_ID",
37 	[PERF_RECORD_FINISHED_ROUND]		= "FINISHED_ROUND",
38 	[PERF_RECORD_ID_INDEX]			= "ID_INDEX",
39 	[PERF_RECORD_AUXTRACE_INFO]		= "AUXTRACE_INFO",
40 	[PERF_RECORD_AUXTRACE]			= "AUXTRACE",
41 	[PERF_RECORD_AUXTRACE_ERROR]		= "AUXTRACE_ERROR",
42 	[PERF_RECORD_THREAD_MAP]		= "THREAD_MAP",
43 	[PERF_RECORD_CPU_MAP]			= "CPU_MAP",
44 	[PERF_RECORD_STAT_CONFIG]		= "STAT_CONFIG",
45 	[PERF_RECORD_STAT]			= "STAT",
46 	[PERF_RECORD_STAT_ROUND]		= "STAT_ROUND",
47 	[PERF_RECORD_EVENT_UPDATE]		= "EVENT_UPDATE",
48 };
49 
50 const char *perf_event__name(unsigned int id)
51 {
52 	if (id >= ARRAY_SIZE(perf_event__names))
53 		return "INVALID";
54 	if (!perf_event__names[id])
55 		return "UNKNOWN";
56 	return perf_event__names[id];
57 }
58 
59 static struct perf_sample synth_sample = {
60 	.pid	   = -1,
61 	.tid	   = -1,
62 	.time	   = -1,
63 	.stream_id = -1,
64 	.cpu	   = -1,
65 	.period	   = 1,
66 };
67 
68 /*
69  * Assumes that the first 4095 bytes of /proc/pid/stat contains
70  * the comm, tgid and ppid.
71  */
72 static int perf_event__get_comm_ids(pid_t pid, char *comm, size_t len,
73 				    pid_t *tgid, pid_t *ppid)
74 {
75 	char filename[PATH_MAX];
76 	char bf[4096];
77 	int fd;
78 	size_t size = 0;
79 	ssize_t n;
80 	char *nl, *name, *tgids, *ppids;
81 
82 	*tgid = -1;
83 	*ppid = -1;
84 
85 	snprintf(filename, sizeof(filename), "/proc/%d/status", pid);
86 
87 	fd = open(filename, O_RDONLY);
88 	if (fd < 0) {
89 		pr_debug("couldn't open %s\n", filename);
90 		return -1;
91 	}
92 
93 	n = read(fd, bf, sizeof(bf) - 1);
94 	close(fd);
95 	if (n <= 0) {
96 		pr_warning("Couldn't get COMM, tigd and ppid for pid %d\n",
97 			   pid);
98 		return -1;
99 	}
100 	bf[n] = '\0';
101 
102 	name = strstr(bf, "Name:");
103 	tgids = strstr(bf, "Tgid:");
104 	ppids = strstr(bf, "PPid:");
105 
106 	if (name) {
107 		name += 5;  /* strlen("Name:") */
108 
109 		while (*name && isspace(*name))
110 			++name;
111 
112 		nl = strchr(name, '\n');
113 		if (nl)
114 			*nl = '\0';
115 
116 		size = strlen(name);
117 		if (size >= len)
118 			size = len - 1;
119 		memcpy(comm, name, size);
120 		comm[size] = '\0';
121 	} else {
122 		pr_debug("Name: string not found for pid %d\n", pid);
123 	}
124 
125 	if (tgids) {
126 		tgids += 5;  /* strlen("Tgid:") */
127 		*tgid = atoi(tgids);
128 	} else {
129 		pr_debug("Tgid: string not found for pid %d\n", pid);
130 	}
131 
132 	if (ppids) {
133 		ppids += 5;  /* strlen("PPid:") */
134 		*ppid = atoi(ppids);
135 	} else {
136 		pr_debug("PPid: string not found for pid %d\n", pid);
137 	}
138 
139 	return 0;
140 }
141 
142 static int perf_event__prepare_comm(union perf_event *event, pid_t pid,
143 				    struct machine *machine,
144 				    pid_t *tgid, pid_t *ppid)
145 {
146 	size_t size;
147 
148 	*ppid = -1;
149 
150 	memset(&event->comm, 0, sizeof(event->comm));
151 
152 	if (machine__is_host(machine)) {
153 		if (perf_event__get_comm_ids(pid, event->comm.comm,
154 					     sizeof(event->comm.comm),
155 					     tgid, ppid) != 0) {
156 			return -1;
157 		}
158 	} else {
159 		*tgid = machine->pid;
160 	}
161 
162 	if (*tgid < 0)
163 		return -1;
164 
165 	event->comm.pid = *tgid;
166 	event->comm.header.type = PERF_RECORD_COMM;
167 
168 	size = strlen(event->comm.comm) + 1;
169 	size = PERF_ALIGN(size, sizeof(u64));
170 	memset(event->comm.comm + size, 0, machine->id_hdr_size);
171 	event->comm.header.size = (sizeof(event->comm) -
172 				(sizeof(event->comm.comm) - size) +
173 				machine->id_hdr_size);
174 	event->comm.tid = pid;
175 
176 	return 0;
177 }
178 
179 pid_t perf_event__synthesize_comm(struct perf_tool *tool,
180 					 union perf_event *event, pid_t pid,
181 					 perf_event__handler_t process,
182 					 struct machine *machine)
183 {
184 	pid_t tgid, ppid;
185 
186 	if (perf_event__prepare_comm(event, pid, machine, &tgid, &ppid) != 0)
187 		return -1;
188 
189 	if (process(tool, event, &synth_sample, machine) != 0)
190 		return -1;
191 
192 	return tgid;
193 }
194 
195 static int perf_event__synthesize_fork(struct perf_tool *tool,
196 				       union perf_event *event,
197 				       pid_t pid, pid_t tgid, pid_t ppid,
198 				       perf_event__handler_t process,
199 				       struct machine *machine)
200 {
201 	memset(&event->fork, 0, sizeof(event->fork) + machine->id_hdr_size);
202 
203 	/*
204 	 * for main thread set parent to ppid from status file. For other
205 	 * threads set parent pid to main thread. ie., assume main thread
206 	 * spawns all threads in a process
207 	*/
208 	if (tgid == pid) {
209 		event->fork.ppid = ppid;
210 		event->fork.ptid = ppid;
211 	} else {
212 		event->fork.ppid = tgid;
213 		event->fork.ptid = tgid;
214 	}
215 	event->fork.pid  = tgid;
216 	event->fork.tid  = pid;
217 	event->fork.header.type = PERF_RECORD_FORK;
218 
219 	event->fork.header.size = (sizeof(event->fork) + machine->id_hdr_size);
220 
221 	if (process(tool, event, &synth_sample, machine) != 0)
222 		return -1;
223 
224 	return 0;
225 }
226 
227 int perf_event__synthesize_mmap_events(struct perf_tool *tool,
228 				       union perf_event *event,
229 				       pid_t pid, pid_t tgid,
230 				       perf_event__handler_t process,
231 				       struct machine *machine,
232 				       bool mmap_data,
233 				       unsigned int proc_map_timeout)
234 {
235 	char filename[PATH_MAX];
236 	FILE *fp;
237 	unsigned long long t;
238 	bool truncation = false;
239 	unsigned long long timeout = proc_map_timeout * 1000000ULL;
240 	int rc = 0;
241 
242 	if (machine__is_default_guest(machine))
243 		return 0;
244 
245 	snprintf(filename, sizeof(filename), "%s/proc/%d/maps",
246 		 machine->root_dir, pid);
247 
248 	fp = fopen(filename, "r");
249 	if (fp == NULL) {
250 		/*
251 		 * We raced with a task exiting - just return:
252 		 */
253 		pr_debug("couldn't open %s\n", filename);
254 		return -1;
255 	}
256 
257 	event->header.type = PERF_RECORD_MMAP2;
258 	t = rdclock();
259 
260 	while (1) {
261 		char bf[BUFSIZ];
262 		char prot[5];
263 		char execname[PATH_MAX];
264 		char anonstr[] = "//anon";
265 		unsigned int ino;
266 		size_t size;
267 		ssize_t n;
268 
269 		if (fgets(bf, sizeof(bf), fp) == NULL)
270 			break;
271 
272 		if ((rdclock() - t) > timeout) {
273 			pr_warning("Reading %s time out. "
274 				   "You may want to increase "
275 				   "the time limit by --proc-map-timeout\n",
276 				   filename);
277 			truncation = true;
278 			goto out;
279 		}
280 
281 		/* ensure null termination since stack will be reused. */
282 		strcpy(execname, "");
283 
284 		/* 00400000-0040c000 r-xp 00000000 fd:01 41038  /bin/cat */
285 		n = sscanf(bf, "%"PRIx64"-%"PRIx64" %s %"PRIx64" %x:%x %u %s\n",
286 		       &event->mmap2.start, &event->mmap2.len, prot,
287 		       &event->mmap2.pgoff, &event->mmap2.maj,
288 		       &event->mmap2.min,
289 		       &ino, execname);
290 
291 		/*
292  		 * Anon maps don't have the execname.
293  		 */
294 		if (n < 7)
295 			continue;
296 
297 		event->mmap2.ino = (u64)ino;
298 
299 		/*
300 		 * Just like the kernel, see __perf_event_mmap in kernel/perf_event.c
301 		 */
302 		if (machine__is_host(machine))
303 			event->header.misc = PERF_RECORD_MISC_USER;
304 		else
305 			event->header.misc = PERF_RECORD_MISC_GUEST_USER;
306 
307 		/* map protection and flags bits */
308 		event->mmap2.prot = 0;
309 		event->mmap2.flags = 0;
310 		if (prot[0] == 'r')
311 			event->mmap2.prot |= PROT_READ;
312 		if (prot[1] == 'w')
313 			event->mmap2.prot |= PROT_WRITE;
314 		if (prot[2] == 'x')
315 			event->mmap2.prot |= PROT_EXEC;
316 
317 		if (prot[3] == 's')
318 			event->mmap2.flags |= MAP_SHARED;
319 		else
320 			event->mmap2.flags |= MAP_PRIVATE;
321 
322 		if (prot[2] != 'x') {
323 			if (!mmap_data || prot[0] != 'r')
324 				continue;
325 
326 			event->header.misc |= PERF_RECORD_MISC_MMAP_DATA;
327 		}
328 
329 out:
330 		if (truncation)
331 			event->header.misc |= PERF_RECORD_MISC_PROC_MAP_PARSE_TIMEOUT;
332 
333 		if (!strcmp(execname, ""))
334 			strcpy(execname, anonstr);
335 
336 		size = strlen(execname) + 1;
337 		memcpy(event->mmap2.filename, execname, size);
338 		size = PERF_ALIGN(size, sizeof(u64));
339 		event->mmap2.len -= event->mmap.start;
340 		event->mmap2.header.size = (sizeof(event->mmap2) -
341 					(sizeof(event->mmap2.filename) - size));
342 		memset(event->mmap2.filename + size, 0, machine->id_hdr_size);
343 		event->mmap2.header.size += machine->id_hdr_size;
344 		event->mmap2.pid = tgid;
345 		event->mmap2.tid = pid;
346 
347 		if (process(tool, event, &synth_sample, machine) != 0) {
348 			rc = -1;
349 			break;
350 		}
351 
352 		if (truncation)
353 			break;
354 	}
355 
356 	fclose(fp);
357 	return rc;
358 }
359 
360 int perf_event__synthesize_modules(struct perf_tool *tool,
361 				   perf_event__handler_t process,
362 				   struct machine *machine)
363 {
364 	int rc = 0;
365 	struct map *pos;
366 	struct map_groups *kmaps = &machine->kmaps;
367 	struct maps *maps = &kmaps->maps[MAP__FUNCTION];
368 	union perf_event *event = zalloc((sizeof(event->mmap) +
369 					  machine->id_hdr_size));
370 	if (event == NULL) {
371 		pr_debug("Not enough memory synthesizing mmap event "
372 			 "for kernel modules\n");
373 		return -1;
374 	}
375 
376 	event->header.type = PERF_RECORD_MMAP;
377 
378 	/*
379 	 * kernel uses 0 for user space maps, see kernel/perf_event.c
380 	 * __perf_event_mmap
381 	 */
382 	if (machine__is_host(machine))
383 		event->header.misc = PERF_RECORD_MISC_KERNEL;
384 	else
385 		event->header.misc = PERF_RECORD_MISC_GUEST_KERNEL;
386 
387 	for (pos = maps__first(maps); pos; pos = map__next(pos)) {
388 		size_t size;
389 
390 		if (__map__is_kernel(pos))
391 			continue;
392 
393 		size = PERF_ALIGN(pos->dso->long_name_len + 1, sizeof(u64));
394 		event->mmap.header.type = PERF_RECORD_MMAP;
395 		event->mmap.header.size = (sizeof(event->mmap) -
396 				        (sizeof(event->mmap.filename) - size));
397 		memset(event->mmap.filename + size, 0, machine->id_hdr_size);
398 		event->mmap.header.size += machine->id_hdr_size;
399 		event->mmap.start = pos->start;
400 		event->mmap.len   = pos->end - pos->start;
401 		event->mmap.pid   = machine->pid;
402 
403 		memcpy(event->mmap.filename, pos->dso->long_name,
404 		       pos->dso->long_name_len + 1);
405 		if (process(tool, event, &synth_sample, machine) != 0) {
406 			rc = -1;
407 			break;
408 		}
409 	}
410 
411 	free(event);
412 	return rc;
413 }
414 
415 static int __event__synthesize_thread(union perf_event *comm_event,
416 				      union perf_event *mmap_event,
417 				      union perf_event *fork_event,
418 				      pid_t pid, int full,
419 					  perf_event__handler_t process,
420 				      struct perf_tool *tool,
421 				      struct machine *machine,
422 				      bool mmap_data,
423 				      unsigned int proc_map_timeout)
424 {
425 	char filename[PATH_MAX];
426 	DIR *tasks;
427 	struct dirent dirent, *next;
428 	pid_t tgid, ppid;
429 	int rc = 0;
430 
431 	/* special case: only send one comm event using passed in pid */
432 	if (!full) {
433 		tgid = perf_event__synthesize_comm(tool, comm_event, pid,
434 						   process, machine);
435 
436 		if (tgid == -1)
437 			return -1;
438 
439 		return perf_event__synthesize_mmap_events(tool, mmap_event, pid, tgid,
440 							  process, machine, mmap_data,
441 							  proc_map_timeout);
442 	}
443 
444 	if (machine__is_default_guest(machine))
445 		return 0;
446 
447 	snprintf(filename, sizeof(filename), "%s/proc/%d/task",
448 		 machine->root_dir, pid);
449 
450 	tasks = opendir(filename);
451 	if (tasks == NULL) {
452 		pr_debug("couldn't open %s\n", filename);
453 		return 0;
454 	}
455 
456 	while (!readdir_r(tasks, &dirent, &next) && next) {
457 		char *end;
458 		pid_t _pid;
459 
460 		_pid = strtol(dirent.d_name, &end, 10);
461 		if (*end)
462 			continue;
463 
464 		rc = -1;
465 		if (perf_event__prepare_comm(comm_event, _pid, machine,
466 					     &tgid, &ppid) != 0)
467 			break;
468 
469 		if (perf_event__synthesize_fork(tool, fork_event, _pid, tgid,
470 						ppid, process, machine) < 0)
471 			break;
472 		/*
473 		 * Send the prepared comm event
474 		 */
475 		if (process(tool, comm_event, &synth_sample, machine) != 0)
476 			break;
477 
478 		rc = 0;
479 		if (_pid == pid) {
480 			/* process the parent's maps too */
481 			rc = perf_event__synthesize_mmap_events(tool, mmap_event, pid, tgid,
482 						process, machine, mmap_data, proc_map_timeout);
483 			if (rc)
484 				break;
485 		}
486 	}
487 
488 	closedir(tasks);
489 	return rc;
490 }
491 
492 int perf_event__synthesize_thread_map(struct perf_tool *tool,
493 				      struct thread_map *threads,
494 				      perf_event__handler_t process,
495 				      struct machine *machine,
496 				      bool mmap_data,
497 				      unsigned int proc_map_timeout)
498 {
499 	union perf_event *comm_event, *mmap_event, *fork_event;
500 	int err = -1, thread, j;
501 
502 	comm_event = malloc(sizeof(comm_event->comm) + machine->id_hdr_size);
503 	if (comm_event == NULL)
504 		goto out;
505 
506 	mmap_event = malloc(sizeof(mmap_event->mmap2) + machine->id_hdr_size);
507 	if (mmap_event == NULL)
508 		goto out_free_comm;
509 
510 	fork_event = malloc(sizeof(fork_event->fork) + machine->id_hdr_size);
511 	if (fork_event == NULL)
512 		goto out_free_mmap;
513 
514 	err = 0;
515 	for (thread = 0; thread < threads->nr; ++thread) {
516 		if (__event__synthesize_thread(comm_event, mmap_event,
517 					       fork_event,
518 					       thread_map__pid(threads, thread), 0,
519 					       process, tool, machine,
520 					       mmap_data, proc_map_timeout)) {
521 			err = -1;
522 			break;
523 		}
524 
525 		/*
526 		 * comm.pid is set to thread group id by
527 		 * perf_event__synthesize_comm
528 		 */
529 		if ((int) comm_event->comm.pid != thread_map__pid(threads, thread)) {
530 			bool need_leader = true;
531 
532 			/* is thread group leader in thread_map? */
533 			for (j = 0; j < threads->nr; ++j) {
534 				if ((int) comm_event->comm.pid == thread_map__pid(threads, j)) {
535 					need_leader = false;
536 					break;
537 				}
538 			}
539 
540 			/* if not, generate events for it */
541 			if (need_leader &&
542 			    __event__synthesize_thread(comm_event, mmap_event,
543 						       fork_event,
544 						       comm_event->comm.pid, 0,
545 						       process, tool, machine,
546 						       mmap_data, proc_map_timeout)) {
547 				err = -1;
548 				break;
549 			}
550 		}
551 	}
552 	free(fork_event);
553 out_free_mmap:
554 	free(mmap_event);
555 out_free_comm:
556 	free(comm_event);
557 out:
558 	return err;
559 }
560 
561 int perf_event__synthesize_threads(struct perf_tool *tool,
562 				   perf_event__handler_t process,
563 				   struct machine *machine,
564 				   bool mmap_data,
565 				   unsigned int proc_map_timeout)
566 {
567 	DIR *proc;
568 	char proc_path[PATH_MAX];
569 	struct dirent dirent, *next;
570 	union perf_event *comm_event, *mmap_event, *fork_event;
571 	int err = -1;
572 
573 	if (machine__is_default_guest(machine))
574 		return 0;
575 
576 	comm_event = malloc(sizeof(comm_event->comm) + machine->id_hdr_size);
577 	if (comm_event == NULL)
578 		goto out;
579 
580 	mmap_event = malloc(sizeof(mmap_event->mmap2) + machine->id_hdr_size);
581 	if (mmap_event == NULL)
582 		goto out_free_comm;
583 
584 	fork_event = malloc(sizeof(fork_event->fork) + machine->id_hdr_size);
585 	if (fork_event == NULL)
586 		goto out_free_mmap;
587 
588 	snprintf(proc_path, sizeof(proc_path), "%s/proc", machine->root_dir);
589 	proc = opendir(proc_path);
590 
591 	if (proc == NULL)
592 		goto out_free_fork;
593 
594 	while (!readdir_r(proc, &dirent, &next) && next) {
595 		char *end;
596 		pid_t pid = strtol(dirent.d_name, &end, 10);
597 
598 		if (*end) /* only interested in proper numerical dirents */
599 			continue;
600 		/*
601  		 * We may race with exiting thread, so don't stop just because
602  		 * one thread couldn't be synthesized.
603  		 */
604 		__event__synthesize_thread(comm_event, mmap_event, fork_event, pid,
605 					   1, process, tool, machine, mmap_data,
606 					   proc_map_timeout);
607 	}
608 
609 	err = 0;
610 	closedir(proc);
611 out_free_fork:
612 	free(fork_event);
613 out_free_mmap:
614 	free(mmap_event);
615 out_free_comm:
616 	free(comm_event);
617 out:
618 	return err;
619 }
620 
621 struct process_symbol_args {
622 	const char *name;
623 	u64	   start;
624 };
625 
626 static int find_symbol_cb(void *arg, const char *name, char type,
627 			  u64 start)
628 {
629 	struct process_symbol_args *args = arg;
630 
631 	/*
632 	 * Must be a function or at least an alias, as in PARISC64, where "_text" is
633 	 * an 'A' to the same address as "_stext".
634 	 */
635 	if (!(symbol_type__is_a(type, MAP__FUNCTION) ||
636 	      type == 'A') || strcmp(name, args->name))
637 		return 0;
638 
639 	args->start = start;
640 	return 1;
641 }
642 
643 u64 kallsyms__get_function_start(const char *kallsyms_filename,
644 				 const char *symbol_name)
645 {
646 	struct process_symbol_args args = { .name = symbol_name, };
647 
648 	if (kallsyms__parse(kallsyms_filename, &args, find_symbol_cb) <= 0)
649 		return 0;
650 
651 	return args.start;
652 }
653 
654 int perf_event__synthesize_kernel_mmap(struct perf_tool *tool,
655 				       perf_event__handler_t process,
656 				       struct machine *machine)
657 {
658 	size_t size;
659 	const char *mmap_name;
660 	char name_buff[PATH_MAX];
661 	struct map *map = machine__kernel_map(machine);
662 	struct kmap *kmap;
663 	int err;
664 	union perf_event *event;
665 
666 	if (map == NULL)
667 		return -1;
668 
669 	/*
670 	 * We should get this from /sys/kernel/sections/.text, but till that is
671 	 * available use this, and after it is use this as a fallback for older
672 	 * kernels.
673 	 */
674 	event = zalloc((sizeof(event->mmap) + machine->id_hdr_size));
675 	if (event == NULL) {
676 		pr_debug("Not enough memory synthesizing mmap event "
677 			 "for kernel modules\n");
678 		return -1;
679 	}
680 
681 	mmap_name = machine__mmap_name(machine, name_buff, sizeof(name_buff));
682 	if (machine__is_host(machine)) {
683 		/*
684 		 * kernel uses PERF_RECORD_MISC_USER for user space maps,
685 		 * see kernel/perf_event.c __perf_event_mmap
686 		 */
687 		event->header.misc = PERF_RECORD_MISC_KERNEL;
688 	} else {
689 		event->header.misc = PERF_RECORD_MISC_GUEST_KERNEL;
690 	}
691 
692 	kmap = map__kmap(map);
693 	size = snprintf(event->mmap.filename, sizeof(event->mmap.filename),
694 			"%s%s", mmap_name, kmap->ref_reloc_sym->name) + 1;
695 	size = PERF_ALIGN(size, sizeof(u64));
696 	event->mmap.header.type = PERF_RECORD_MMAP;
697 	event->mmap.header.size = (sizeof(event->mmap) -
698 			(sizeof(event->mmap.filename) - size) + machine->id_hdr_size);
699 	event->mmap.pgoff = kmap->ref_reloc_sym->addr;
700 	event->mmap.start = map->start;
701 	event->mmap.len   = map->end - event->mmap.start;
702 	event->mmap.pid   = machine->pid;
703 
704 	err = process(tool, event, &synth_sample, machine);
705 	free(event);
706 
707 	return err;
708 }
709 
710 int perf_event__synthesize_thread_map2(struct perf_tool *tool,
711 				      struct thread_map *threads,
712 				      perf_event__handler_t process,
713 				      struct machine *machine)
714 {
715 	union perf_event *event;
716 	int i, err, size;
717 
718 	size  = sizeof(event->thread_map);
719 	size +=	threads->nr * sizeof(event->thread_map.entries[0]);
720 
721 	event = zalloc(size);
722 	if (!event)
723 		return -ENOMEM;
724 
725 	event->header.type = PERF_RECORD_THREAD_MAP;
726 	event->header.size = size;
727 	event->thread_map.nr = threads->nr;
728 
729 	for (i = 0; i < threads->nr; i++) {
730 		struct thread_map_event_entry *entry = &event->thread_map.entries[i];
731 		char *comm = thread_map__comm(threads, i);
732 
733 		if (!comm)
734 			comm = (char *) "";
735 
736 		entry->pid = thread_map__pid(threads, i);
737 		strncpy((char *) &entry->comm, comm, sizeof(entry->comm));
738 	}
739 
740 	err = process(tool, event, NULL, machine);
741 
742 	free(event);
743 	return err;
744 }
745 
746 static void synthesize_cpus(struct cpu_map_entries *cpus,
747 			    struct cpu_map *map)
748 {
749 	int i;
750 
751 	cpus->nr = map->nr;
752 
753 	for (i = 0; i < map->nr; i++)
754 		cpus->cpu[i] = map->map[i];
755 }
756 
757 static void synthesize_mask(struct cpu_map_mask *mask,
758 			    struct cpu_map *map, int max)
759 {
760 	int i;
761 
762 	mask->nr = BITS_TO_LONGS(max);
763 	mask->long_size = sizeof(long);
764 
765 	for (i = 0; i < map->nr; i++)
766 		set_bit(map->map[i], mask->mask);
767 }
768 
769 static size_t cpus_size(struct cpu_map *map)
770 {
771 	return sizeof(struct cpu_map_entries) + map->nr * sizeof(u16);
772 }
773 
774 static size_t mask_size(struct cpu_map *map, int *max)
775 {
776 	int i;
777 
778 	*max = 0;
779 
780 	for (i = 0; i < map->nr; i++) {
781 		/* bit possition of the cpu is + 1 */
782 		int bit = map->map[i] + 1;
783 
784 		if (bit > *max)
785 			*max = bit;
786 	}
787 
788 	return sizeof(struct cpu_map_mask) + BITS_TO_LONGS(*max) * sizeof(long);
789 }
790 
791 void *cpu_map_data__alloc(struct cpu_map *map, size_t *size, u16 *type, int *max)
792 {
793 	size_t size_cpus, size_mask;
794 	bool is_dummy = cpu_map__empty(map);
795 
796 	/*
797 	 * Both array and mask data have variable size based
798 	 * on the number of cpus and their actual values.
799 	 * The size of the 'struct cpu_map_data' is:
800 	 *
801 	 *   array = size of 'struct cpu_map_entries' +
802 	 *           number of cpus * sizeof(u64)
803 	 *
804 	 *   mask  = size of 'struct cpu_map_mask' +
805 	 *           maximum cpu bit converted to size of longs
806 	 *
807 	 * and finaly + the size of 'struct cpu_map_data'.
808 	 */
809 	size_cpus = cpus_size(map);
810 	size_mask = mask_size(map, max);
811 
812 	if (is_dummy || (size_cpus < size_mask)) {
813 		*size += size_cpus;
814 		*type  = PERF_CPU_MAP__CPUS;
815 	} else {
816 		*size += size_mask;
817 		*type  = PERF_CPU_MAP__MASK;
818 	}
819 
820 	*size += sizeof(struct cpu_map_data);
821 	return zalloc(*size);
822 }
823 
824 void cpu_map_data__synthesize(struct cpu_map_data *data, struct cpu_map *map,
825 			      u16 type, int max)
826 {
827 	data->type = type;
828 
829 	switch (type) {
830 	case PERF_CPU_MAP__CPUS:
831 		synthesize_cpus((struct cpu_map_entries *) data->data, map);
832 		break;
833 	case PERF_CPU_MAP__MASK:
834 		synthesize_mask((struct cpu_map_mask *) data->data, map, max);
835 	default:
836 		break;
837 	};
838 }
839 
840 static struct cpu_map_event* cpu_map_event__new(struct cpu_map *map)
841 {
842 	size_t size = sizeof(struct cpu_map_event);
843 	struct cpu_map_event *event;
844 	int max;
845 	u16 type;
846 
847 	event = cpu_map_data__alloc(map, &size, &type, &max);
848 	if (!event)
849 		return NULL;
850 
851 	event->header.type = PERF_RECORD_CPU_MAP;
852 	event->header.size = size;
853 	event->data.type   = type;
854 
855 	cpu_map_data__synthesize(&event->data, map, type, max);
856 	return event;
857 }
858 
859 int perf_event__synthesize_cpu_map(struct perf_tool *tool,
860 				   struct cpu_map *map,
861 				   perf_event__handler_t process,
862 				   struct machine *machine)
863 {
864 	struct cpu_map_event *event;
865 	int err;
866 
867 	event = cpu_map_event__new(map);
868 	if (!event)
869 		return -ENOMEM;
870 
871 	err = process(tool, (union perf_event *) event, NULL, machine);
872 
873 	free(event);
874 	return err;
875 }
876 
877 int perf_event__synthesize_stat_config(struct perf_tool *tool,
878 				       struct perf_stat_config *config,
879 				       perf_event__handler_t process,
880 				       struct machine *machine)
881 {
882 	struct stat_config_event *event;
883 	int size, i = 0, err;
884 
885 	size  = sizeof(*event);
886 	size += (PERF_STAT_CONFIG_TERM__MAX * sizeof(event->data[0]));
887 
888 	event = zalloc(size);
889 	if (!event)
890 		return -ENOMEM;
891 
892 	event->header.type = PERF_RECORD_STAT_CONFIG;
893 	event->header.size = size;
894 	event->nr          = PERF_STAT_CONFIG_TERM__MAX;
895 
896 #define ADD(__term, __val)					\
897 	event->data[i].tag = PERF_STAT_CONFIG_TERM__##__term;	\
898 	event->data[i].val = __val;				\
899 	i++;
900 
901 	ADD(AGGR_MODE,	config->aggr_mode)
902 	ADD(INTERVAL,	config->interval)
903 	ADD(SCALE,	config->scale)
904 
905 	WARN_ONCE(i != PERF_STAT_CONFIG_TERM__MAX,
906 		  "stat config terms unbalanced\n");
907 #undef ADD
908 
909 	err = process(tool, (union perf_event *) event, NULL, machine);
910 
911 	free(event);
912 	return err;
913 }
914 
915 int perf_event__synthesize_stat(struct perf_tool *tool,
916 				u32 cpu, u32 thread, u64 id,
917 				struct perf_counts_values *count,
918 				perf_event__handler_t process,
919 				struct machine *machine)
920 {
921 	struct stat_event event;
922 
923 	event.header.type = PERF_RECORD_STAT;
924 	event.header.size = sizeof(event);
925 	event.header.misc = 0;
926 
927 	event.id        = id;
928 	event.cpu       = cpu;
929 	event.thread    = thread;
930 	event.val       = count->val;
931 	event.ena       = count->ena;
932 	event.run       = count->run;
933 
934 	return process(tool, (union perf_event *) &event, NULL, machine);
935 }
936 
937 int perf_event__synthesize_stat_round(struct perf_tool *tool,
938 				      u64 evtime, u64 type,
939 				      perf_event__handler_t process,
940 				      struct machine *machine)
941 {
942 	struct stat_round_event event;
943 
944 	event.header.type = PERF_RECORD_STAT_ROUND;
945 	event.header.size = sizeof(event);
946 	event.header.misc = 0;
947 
948 	event.time = evtime;
949 	event.type = type;
950 
951 	return process(tool, (union perf_event *) &event, NULL, machine);
952 }
953 
954 void perf_event__read_stat_config(struct perf_stat_config *config,
955 				  struct stat_config_event *event)
956 {
957 	unsigned i;
958 
959 	for (i = 0; i < event->nr; i++) {
960 
961 		switch (event->data[i].tag) {
962 #define CASE(__term, __val)					\
963 		case PERF_STAT_CONFIG_TERM__##__term:		\
964 			config->__val = event->data[i].val;	\
965 			break;
966 
967 		CASE(AGGR_MODE, aggr_mode)
968 		CASE(SCALE,     scale)
969 		CASE(INTERVAL,  interval)
970 #undef CASE
971 		default:
972 			pr_warning("unknown stat config term %" PRIu64 "\n",
973 				   event->data[i].tag);
974 		}
975 	}
976 }
977 
978 size_t perf_event__fprintf_comm(union perf_event *event, FILE *fp)
979 {
980 	const char *s;
981 
982 	if (event->header.misc & PERF_RECORD_MISC_COMM_EXEC)
983 		s = " exec";
984 	else
985 		s = "";
986 
987 	return fprintf(fp, "%s: %s:%d/%d\n", s, event->comm.comm, event->comm.pid, event->comm.tid);
988 }
989 
990 int perf_event__process_comm(struct perf_tool *tool __maybe_unused,
991 			     union perf_event *event,
992 			     struct perf_sample *sample,
993 			     struct machine *machine)
994 {
995 	return machine__process_comm_event(machine, event, sample);
996 }
997 
998 int perf_event__process_lost(struct perf_tool *tool __maybe_unused,
999 			     union perf_event *event,
1000 			     struct perf_sample *sample,
1001 			     struct machine *machine)
1002 {
1003 	return machine__process_lost_event(machine, event, sample);
1004 }
1005 
1006 int perf_event__process_aux(struct perf_tool *tool __maybe_unused,
1007 			    union perf_event *event,
1008 			    struct perf_sample *sample __maybe_unused,
1009 			    struct machine *machine)
1010 {
1011 	return machine__process_aux_event(machine, event);
1012 }
1013 
1014 int perf_event__process_itrace_start(struct perf_tool *tool __maybe_unused,
1015 				     union perf_event *event,
1016 				     struct perf_sample *sample __maybe_unused,
1017 				     struct machine *machine)
1018 {
1019 	return machine__process_itrace_start_event(machine, event);
1020 }
1021 
1022 int perf_event__process_lost_samples(struct perf_tool *tool __maybe_unused,
1023 				     union perf_event *event,
1024 				     struct perf_sample *sample,
1025 				     struct machine *machine)
1026 {
1027 	return machine__process_lost_samples_event(machine, event, sample);
1028 }
1029 
1030 int perf_event__process_switch(struct perf_tool *tool __maybe_unused,
1031 			       union perf_event *event,
1032 			       struct perf_sample *sample __maybe_unused,
1033 			       struct machine *machine)
1034 {
1035 	return machine__process_switch_event(machine, event);
1036 }
1037 
1038 size_t perf_event__fprintf_mmap(union perf_event *event, FILE *fp)
1039 {
1040 	return fprintf(fp, " %d/%d: [%#" PRIx64 "(%#" PRIx64 ") @ %#" PRIx64 "]: %c %s\n",
1041 		       event->mmap.pid, event->mmap.tid, event->mmap.start,
1042 		       event->mmap.len, event->mmap.pgoff,
1043 		       (event->header.misc & PERF_RECORD_MISC_MMAP_DATA) ? 'r' : 'x',
1044 		       event->mmap.filename);
1045 }
1046 
1047 size_t perf_event__fprintf_mmap2(union perf_event *event, FILE *fp)
1048 {
1049 	return fprintf(fp, " %d/%d: [%#" PRIx64 "(%#" PRIx64 ") @ %#" PRIx64
1050 			   " %02x:%02x %"PRIu64" %"PRIu64"]: %c%c%c%c %s\n",
1051 		       event->mmap2.pid, event->mmap2.tid, event->mmap2.start,
1052 		       event->mmap2.len, event->mmap2.pgoff, event->mmap2.maj,
1053 		       event->mmap2.min, event->mmap2.ino,
1054 		       event->mmap2.ino_generation,
1055 		       (event->mmap2.prot & PROT_READ) ? 'r' : '-',
1056 		       (event->mmap2.prot & PROT_WRITE) ? 'w' : '-',
1057 		       (event->mmap2.prot & PROT_EXEC) ? 'x' : '-',
1058 		       (event->mmap2.flags & MAP_SHARED) ? 's' : 'p',
1059 		       event->mmap2.filename);
1060 }
1061 
1062 size_t perf_event__fprintf_thread_map(union perf_event *event, FILE *fp)
1063 {
1064 	struct thread_map *threads = thread_map__new_event(&event->thread_map);
1065 	size_t ret;
1066 
1067 	ret = fprintf(fp, " nr: ");
1068 
1069 	if (threads)
1070 		ret += thread_map__fprintf(threads, fp);
1071 	else
1072 		ret += fprintf(fp, "failed to get threads from event\n");
1073 
1074 	thread_map__put(threads);
1075 	return ret;
1076 }
1077 
1078 size_t perf_event__fprintf_cpu_map(union perf_event *event, FILE *fp)
1079 {
1080 	struct cpu_map *cpus = cpu_map__new_data(&event->cpu_map.data);
1081 	size_t ret;
1082 
1083 	ret = fprintf(fp, " nr: ");
1084 
1085 	if (cpus)
1086 		ret += cpu_map__fprintf(cpus, fp);
1087 	else
1088 		ret += fprintf(fp, "failed to get cpumap from event\n");
1089 
1090 	cpu_map__put(cpus);
1091 	return ret;
1092 }
1093 
1094 int perf_event__process_mmap(struct perf_tool *tool __maybe_unused,
1095 			     union perf_event *event,
1096 			     struct perf_sample *sample,
1097 			     struct machine *machine)
1098 {
1099 	return machine__process_mmap_event(machine, event, sample);
1100 }
1101 
1102 int perf_event__process_mmap2(struct perf_tool *tool __maybe_unused,
1103 			     union perf_event *event,
1104 			     struct perf_sample *sample,
1105 			     struct machine *machine)
1106 {
1107 	return machine__process_mmap2_event(machine, event, sample);
1108 }
1109 
1110 size_t perf_event__fprintf_task(union perf_event *event, FILE *fp)
1111 {
1112 	return fprintf(fp, "(%d:%d):(%d:%d)\n",
1113 		       event->fork.pid, event->fork.tid,
1114 		       event->fork.ppid, event->fork.ptid);
1115 }
1116 
1117 int perf_event__process_fork(struct perf_tool *tool __maybe_unused,
1118 			     union perf_event *event,
1119 			     struct perf_sample *sample,
1120 			     struct machine *machine)
1121 {
1122 	return machine__process_fork_event(machine, event, sample);
1123 }
1124 
1125 int perf_event__process_exit(struct perf_tool *tool __maybe_unused,
1126 			     union perf_event *event,
1127 			     struct perf_sample *sample,
1128 			     struct machine *machine)
1129 {
1130 	return machine__process_exit_event(machine, event, sample);
1131 }
1132 
1133 size_t perf_event__fprintf_aux(union perf_event *event, FILE *fp)
1134 {
1135 	return fprintf(fp, " offset: %#"PRIx64" size: %#"PRIx64" flags: %#"PRIx64" [%s%s]\n",
1136 		       event->aux.aux_offset, event->aux.aux_size,
1137 		       event->aux.flags,
1138 		       event->aux.flags & PERF_AUX_FLAG_TRUNCATED ? "T" : "",
1139 		       event->aux.flags & PERF_AUX_FLAG_OVERWRITE ? "O" : "");
1140 }
1141 
1142 size_t perf_event__fprintf_itrace_start(union perf_event *event, FILE *fp)
1143 {
1144 	return fprintf(fp, " pid: %u tid: %u\n",
1145 		       event->itrace_start.pid, event->itrace_start.tid);
1146 }
1147 
1148 size_t perf_event__fprintf_switch(union perf_event *event, FILE *fp)
1149 {
1150 	bool out = event->header.misc & PERF_RECORD_MISC_SWITCH_OUT;
1151 	const char *in_out = out ? "OUT" : "IN ";
1152 
1153 	if (event->header.type == PERF_RECORD_SWITCH)
1154 		return fprintf(fp, " %s\n", in_out);
1155 
1156 	return fprintf(fp, " %s  %s pid/tid: %5u/%-5u\n",
1157 		       in_out, out ? "next" : "prev",
1158 		       event->context_switch.next_prev_pid,
1159 		       event->context_switch.next_prev_tid);
1160 }
1161 
1162 size_t perf_event__fprintf(union perf_event *event, FILE *fp)
1163 {
1164 	size_t ret = fprintf(fp, "PERF_RECORD_%s",
1165 			     perf_event__name(event->header.type));
1166 
1167 	switch (event->header.type) {
1168 	case PERF_RECORD_COMM:
1169 		ret += perf_event__fprintf_comm(event, fp);
1170 		break;
1171 	case PERF_RECORD_FORK:
1172 	case PERF_RECORD_EXIT:
1173 		ret += perf_event__fprintf_task(event, fp);
1174 		break;
1175 	case PERF_RECORD_MMAP:
1176 		ret += perf_event__fprintf_mmap(event, fp);
1177 		break;
1178 	case PERF_RECORD_MMAP2:
1179 		ret += perf_event__fprintf_mmap2(event, fp);
1180 		break;
1181 	case PERF_RECORD_AUX:
1182 		ret += perf_event__fprintf_aux(event, fp);
1183 		break;
1184 	case PERF_RECORD_ITRACE_START:
1185 		ret += perf_event__fprintf_itrace_start(event, fp);
1186 		break;
1187 	case PERF_RECORD_SWITCH:
1188 	case PERF_RECORD_SWITCH_CPU_WIDE:
1189 		ret += perf_event__fprintf_switch(event, fp);
1190 		break;
1191 	default:
1192 		ret += fprintf(fp, "\n");
1193 	}
1194 
1195 	return ret;
1196 }
1197 
1198 int perf_event__process(struct perf_tool *tool __maybe_unused,
1199 			union perf_event *event,
1200 			struct perf_sample *sample,
1201 			struct machine *machine)
1202 {
1203 	return machine__process_event(machine, event, sample);
1204 }
1205 
1206 void thread__find_addr_map(struct thread *thread, u8 cpumode,
1207 			   enum map_type type, u64 addr,
1208 			   struct addr_location *al)
1209 {
1210 	struct map_groups *mg = thread->mg;
1211 	struct machine *machine = mg->machine;
1212 	bool load_map = false;
1213 
1214 	al->machine = machine;
1215 	al->thread = thread;
1216 	al->addr = addr;
1217 	al->cpumode = cpumode;
1218 	al->filtered = 0;
1219 
1220 	if (machine == NULL) {
1221 		al->map = NULL;
1222 		return;
1223 	}
1224 
1225 	if (cpumode == PERF_RECORD_MISC_KERNEL && perf_host) {
1226 		al->level = 'k';
1227 		mg = &machine->kmaps;
1228 		load_map = true;
1229 	} else if (cpumode == PERF_RECORD_MISC_USER && perf_host) {
1230 		al->level = '.';
1231 	} else if (cpumode == PERF_RECORD_MISC_GUEST_KERNEL && perf_guest) {
1232 		al->level = 'g';
1233 		mg = &machine->kmaps;
1234 		load_map = true;
1235 	} else if (cpumode == PERF_RECORD_MISC_GUEST_USER && perf_guest) {
1236 		al->level = 'u';
1237 	} else {
1238 		al->level = 'H';
1239 		al->map = NULL;
1240 
1241 		if ((cpumode == PERF_RECORD_MISC_GUEST_USER ||
1242 			cpumode == PERF_RECORD_MISC_GUEST_KERNEL) &&
1243 			!perf_guest)
1244 			al->filtered |= (1 << HIST_FILTER__GUEST);
1245 		if ((cpumode == PERF_RECORD_MISC_USER ||
1246 			cpumode == PERF_RECORD_MISC_KERNEL) &&
1247 			!perf_host)
1248 			al->filtered |= (1 << HIST_FILTER__HOST);
1249 
1250 		return;
1251 	}
1252 try_again:
1253 	al->map = map_groups__find(mg, type, al->addr);
1254 	if (al->map == NULL) {
1255 		/*
1256 		 * If this is outside of all known maps, and is a negative
1257 		 * address, try to look it up in the kernel dso, as it might be
1258 		 * a vsyscall or vdso (which executes in user-mode).
1259 		 *
1260 		 * XXX This is nasty, we should have a symbol list in the
1261 		 * "[vdso]" dso, but for now lets use the old trick of looking
1262 		 * in the whole kernel symbol list.
1263 		 */
1264 		if (cpumode == PERF_RECORD_MISC_USER && machine &&
1265 		    mg != &machine->kmaps &&
1266 		    machine__kernel_ip(machine, al->addr)) {
1267 			mg = &machine->kmaps;
1268 			load_map = true;
1269 			goto try_again;
1270 		}
1271 	} else {
1272 		/*
1273 		 * Kernel maps might be changed when loading symbols so loading
1274 		 * must be done prior to using kernel maps.
1275 		 */
1276 		if (load_map)
1277 			map__load(al->map, machine->symbol_filter);
1278 		al->addr = al->map->map_ip(al->map, al->addr);
1279 	}
1280 }
1281 
1282 void thread__find_addr_location(struct thread *thread,
1283 				u8 cpumode, enum map_type type, u64 addr,
1284 				struct addr_location *al)
1285 {
1286 	thread__find_addr_map(thread, cpumode, type, addr, al);
1287 	if (al->map != NULL)
1288 		al->sym = map__find_symbol(al->map, al->addr,
1289 					   thread->mg->machine->symbol_filter);
1290 	else
1291 		al->sym = NULL;
1292 }
1293 
1294 /*
1295  * Callers need to drop the reference to al->thread, obtained in
1296  * machine__findnew_thread()
1297  */
1298 int perf_event__preprocess_sample(const union perf_event *event,
1299 				  struct machine *machine,
1300 				  struct addr_location *al,
1301 				  struct perf_sample *sample)
1302 {
1303 	u8 cpumode = event->header.misc & PERF_RECORD_MISC_CPUMODE_MASK;
1304 	struct thread *thread = machine__findnew_thread(machine, sample->pid,
1305 							sample->tid);
1306 
1307 	if (thread == NULL)
1308 		return -1;
1309 
1310 	dump_printf(" ... thread: %s:%d\n", thread__comm_str(thread), thread->tid);
1311 	/*
1312 	 * Have we already created the kernel maps for this machine?
1313 	 *
1314 	 * This should have happened earlier, when we processed the kernel MMAP
1315 	 * events, but for older perf.data files there was no such thing, so do
1316 	 * it now.
1317 	 */
1318 	if (cpumode == PERF_RECORD_MISC_KERNEL &&
1319 	    machine__kernel_map(machine) == NULL)
1320 		machine__create_kernel_maps(machine);
1321 
1322 	thread__find_addr_map(thread, cpumode, MAP__FUNCTION, sample->ip, al);
1323 	dump_printf(" ...... dso: %s\n",
1324 		    al->map ? al->map->dso->long_name :
1325 			al->level == 'H' ? "[hypervisor]" : "<not found>");
1326 
1327 	if (thread__is_filtered(thread))
1328 		al->filtered |= (1 << HIST_FILTER__THREAD);
1329 
1330 	al->sym = NULL;
1331 	al->cpu = sample->cpu;
1332 	al->socket = -1;
1333 
1334 	if (al->cpu >= 0) {
1335 		struct perf_env *env = machine->env;
1336 
1337 		if (env && env->cpu)
1338 			al->socket = env->cpu[al->cpu].socket_id;
1339 	}
1340 
1341 	if (al->map) {
1342 		struct dso *dso = al->map->dso;
1343 
1344 		if (symbol_conf.dso_list &&
1345 		    (!dso || !(strlist__has_entry(symbol_conf.dso_list,
1346 						  dso->short_name) ||
1347 			       (dso->short_name != dso->long_name &&
1348 				strlist__has_entry(symbol_conf.dso_list,
1349 						   dso->long_name))))) {
1350 			al->filtered |= (1 << HIST_FILTER__DSO);
1351 		}
1352 
1353 		al->sym = map__find_symbol(al->map, al->addr,
1354 					   machine->symbol_filter);
1355 	}
1356 
1357 	if (symbol_conf.sym_list &&
1358 		(!al->sym || !strlist__has_entry(symbol_conf.sym_list,
1359 						al->sym->name))) {
1360 		al->filtered |= (1 << HIST_FILTER__SYMBOL);
1361 	}
1362 
1363 	return 0;
1364 }
1365 
1366 /*
1367  * The preprocess_sample method will return with reference counts for the
1368  * in it, when done using (and perhaps getting ref counts if needing to
1369  * keep a pointer to one of those entries) it must be paired with
1370  * addr_location__put(), so that the refcounts can be decremented.
1371  */
1372 void addr_location__put(struct addr_location *al)
1373 {
1374 	thread__zput(al->thread);
1375 }
1376 
1377 bool is_bts_event(struct perf_event_attr *attr)
1378 {
1379 	return attr->type == PERF_TYPE_HARDWARE &&
1380 	       (attr->config & PERF_COUNT_HW_BRANCH_INSTRUCTIONS) &&
1381 	       attr->sample_period == 1;
1382 }
1383 
1384 bool sample_addr_correlates_sym(struct perf_event_attr *attr)
1385 {
1386 	if (attr->type == PERF_TYPE_SOFTWARE &&
1387 	    (attr->config == PERF_COUNT_SW_PAGE_FAULTS ||
1388 	     attr->config == PERF_COUNT_SW_PAGE_FAULTS_MIN ||
1389 	     attr->config == PERF_COUNT_SW_PAGE_FAULTS_MAJ))
1390 		return true;
1391 
1392 	if (is_bts_event(attr))
1393 		return true;
1394 
1395 	return false;
1396 }
1397 
1398 void perf_event__preprocess_sample_addr(union perf_event *event,
1399 					struct perf_sample *sample,
1400 					struct thread *thread,
1401 					struct addr_location *al)
1402 {
1403 	u8 cpumode = event->header.misc & PERF_RECORD_MISC_CPUMODE_MASK;
1404 
1405 	thread__find_addr_map(thread, cpumode, MAP__FUNCTION, sample->addr, al);
1406 	if (!al->map)
1407 		thread__find_addr_map(thread, cpumode, MAP__VARIABLE,
1408 				      sample->addr, al);
1409 
1410 	al->cpu = sample->cpu;
1411 	al->sym = NULL;
1412 
1413 	if (al->map)
1414 		al->sym = map__find_symbol(al->map, al->addr, NULL);
1415 }
1416