xref: /openbmc/linux/tools/perf/util/event.c (revision 7bcae826)
1 #include <linux/types.h>
2 #include <linux/mman.h> /* To get things like MAP_HUGETLB even on older libc headers */
3 #include <api/fs/fs.h>
4 #include "event.h"
5 #include "debug.h"
6 #include "hist.h"
7 #include "machine.h"
8 #include "sort.h"
9 #include "string.h"
10 #include "strlist.h"
11 #include "thread.h"
12 #include "thread_map.h"
13 #include "symbol/kallsyms.h"
14 #include "asm/bug.h"
15 #include "stat.h"
16 
17 static const char *perf_event__names[] = {
18 	[0]					= "TOTAL",
19 	[PERF_RECORD_MMAP]			= "MMAP",
20 	[PERF_RECORD_MMAP2]			= "MMAP2",
21 	[PERF_RECORD_LOST]			= "LOST",
22 	[PERF_RECORD_COMM]			= "COMM",
23 	[PERF_RECORD_EXIT]			= "EXIT",
24 	[PERF_RECORD_THROTTLE]			= "THROTTLE",
25 	[PERF_RECORD_UNTHROTTLE]		= "UNTHROTTLE",
26 	[PERF_RECORD_FORK]			= "FORK",
27 	[PERF_RECORD_READ]			= "READ",
28 	[PERF_RECORD_SAMPLE]			= "SAMPLE",
29 	[PERF_RECORD_AUX]			= "AUX",
30 	[PERF_RECORD_ITRACE_START]		= "ITRACE_START",
31 	[PERF_RECORD_LOST_SAMPLES]		= "LOST_SAMPLES",
32 	[PERF_RECORD_SWITCH]			= "SWITCH",
33 	[PERF_RECORD_SWITCH_CPU_WIDE]		= "SWITCH_CPU_WIDE",
34 	[PERF_RECORD_HEADER_ATTR]		= "ATTR",
35 	[PERF_RECORD_HEADER_EVENT_TYPE]		= "EVENT_TYPE",
36 	[PERF_RECORD_HEADER_TRACING_DATA]	= "TRACING_DATA",
37 	[PERF_RECORD_HEADER_BUILD_ID]		= "BUILD_ID",
38 	[PERF_RECORD_FINISHED_ROUND]		= "FINISHED_ROUND",
39 	[PERF_RECORD_ID_INDEX]			= "ID_INDEX",
40 	[PERF_RECORD_AUXTRACE_INFO]		= "AUXTRACE_INFO",
41 	[PERF_RECORD_AUXTRACE]			= "AUXTRACE",
42 	[PERF_RECORD_AUXTRACE_ERROR]		= "AUXTRACE_ERROR",
43 	[PERF_RECORD_THREAD_MAP]		= "THREAD_MAP",
44 	[PERF_RECORD_CPU_MAP]			= "CPU_MAP",
45 	[PERF_RECORD_STAT_CONFIG]		= "STAT_CONFIG",
46 	[PERF_RECORD_STAT]			= "STAT",
47 	[PERF_RECORD_STAT_ROUND]		= "STAT_ROUND",
48 	[PERF_RECORD_EVENT_UPDATE]		= "EVENT_UPDATE",
49 	[PERF_RECORD_TIME_CONV]			= "TIME_CONV",
50 };
51 
52 const char *perf_event__name(unsigned int id)
53 {
54 	if (id >= ARRAY_SIZE(perf_event__names))
55 		return "INVALID";
56 	if (!perf_event__names[id])
57 		return "UNKNOWN";
58 	return perf_event__names[id];
59 }
60 
61 static int perf_tool__process_synth_event(struct perf_tool *tool,
62 					  union perf_event *event,
63 					  struct machine *machine,
64 					  perf_event__handler_t process)
65 {
66 	struct perf_sample synth_sample = {
67 	.pid	   = -1,
68 	.tid	   = -1,
69 	.time	   = -1,
70 	.stream_id = -1,
71 	.cpu	   = -1,
72 	.period	   = 1,
73 	.cpumode   = event->header.misc & PERF_RECORD_MISC_CPUMODE_MASK,
74 	};
75 
76 	return process(tool, event, &synth_sample, machine);
77 };
78 
79 /*
80  * Assumes that the first 4095 bytes of /proc/pid/stat contains
81  * the comm, tgid and ppid.
82  */
83 static int perf_event__get_comm_ids(pid_t pid, char *comm, size_t len,
84 				    pid_t *tgid, pid_t *ppid)
85 {
86 	char filename[PATH_MAX];
87 	char bf[4096];
88 	int fd;
89 	size_t size = 0;
90 	ssize_t n;
91 	char *nl, *name, *tgids, *ppids;
92 
93 	*tgid = -1;
94 	*ppid = -1;
95 
96 	snprintf(filename, sizeof(filename), "/proc/%d/status", pid);
97 
98 	fd = open(filename, O_RDONLY);
99 	if (fd < 0) {
100 		pr_debug("couldn't open %s\n", filename);
101 		return -1;
102 	}
103 
104 	n = read(fd, bf, sizeof(bf) - 1);
105 	close(fd);
106 	if (n <= 0) {
107 		pr_warning("Couldn't get COMM, tigd and ppid for pid %d\n",
108 			   pid);
109 		return -1;
110 	}
111 	bf[n] = '\0';
112 
113 	name = strstr(bf, "Name:");
114 	tgids = strstr(bf, "Tgid:");
115 	ppids = strstr(bf, "PPid:");
116 
117 	if (name) {
118 		name += 5;  /* strlen("Name:") */
119 
120 		while (*name && isspace(*name))
121 			++name;
122 
123 		nl = strchr(name, '\n');
124 		if (nl)
125 			*nl = '\0';
126 
127 		size = strlen(name);
128 		if (size >= len)
129 			size = len - 1;
130 		memcpy(comm, name, size);
131 		comm[size] = '\0';
132 	} else {
133 		pr_debug("Name: string not found for pid %d\n", pid);
134 	}
135 
136 	if (tgids) {
137 		tgids += 5;  /* strlen("Tgid:") */
138 		*tgid = atoi(tgids);
139 	} else {
140 		pr_debug("Tgid: string not found for pid %d\n", pid);
141 	}
142 
143 	if (ppids) {
144 		ppids += 5;  /* strlen("PPid:") */
145 		*ppid = atoi(ppids);
146 	} else {
147 		pr_debug("PPid: string not found for pid %d\n", pid);
148 	}
149 
150 	return 0;
151 }
152 
153 static int perf_event__prepare_comm(union perf_event *event, pid_t pid,
154 				    struct machine *machine,
155 				    pid_t *tgid, pid_t *ppid)
156 {
157 	size_t size;
158 
159 	*ppid = -1;
160 
161 	memset(&event->comm, 0, sizeof(event->comm));
162 
163 	if (machine__is_host(machine)) {
164 		if (perf_event__get_comm_ids(pid, event->comm.comm,
165 					     sizeof(event->comm.comm),
166 					     tgid, ppid) != 0) {
167 			return -1;
168 		}
169 	} else {
170 		*tgid = machine->pid;
171 	}
172 
173 	if (*tgid < 0)
174 		return -1;
175 
176 	event->comm.pid = *tgid;
177 	event->comm.header.type = PERF_RECORD_COMM;
178 
179 	size = strlen(event->comm.comm) + 1;
180 	size = PERF_ALIGN(size, sizeof(u64));
181 	memset(event->comm.comm + size, 0, machine->id_hdr_size);
182 	event->comm.header.size = (sizeof(event->comm) -
183 				(sizeof(event->comm.comm) - size) +
184 				machine->id_hdr_size);
185 	event->comm.tid = pid;
186 
187 	return 0;
188 }
189 
190 pid_t perf_event__synthesize_comm(struct perf_tool *tool,
191 					 union perf_event *event, pid_t pid,
192 					 perf_event__handler_t process,
193 					 struct machine *machine)
194 {
195 	pid_t tgid, ppid;
196 
197 	if (perf_event__prepare_comm(event, pid, machine, &tgid, &ppid) != 0)
198 		return -1;
199 
200 	if (perf_tool__process_synth_event(tool, event, machine, process) != 0)
201 		return -1;
202 
203 	return tgid;
204 }
205 
206 static int perf_event__synthesize_fork(struct perf_tool *tool,
207 				       union perf_event *event,
208 				       pid_t pid, pid_t tgid, pid_t ppid,
209 				       perf_event__handler_t process,
210 				       struct machine *machine)
211 {
212 	memset(&event->fork, 0, sizeof(event->fork) + machine->id_hdr_size);
213 
214 	/*
215 	 * for main thread set parent to ppid from status file. For other
216 	 * threads set parent pid to main thread. ie., assume main thread
217 	 * spawns all threads in a process
218 	*/
219 	if (tgid == pid) {
220 		event->fork.ppid = ppid;
221 		event->fork.ptid = ppid;
222 	} else {
223 		event->fork.ppid = tgid;
224 		event->fork.ptid = tgid;
225 	}
226 	event->fork.pid  = tgid;
227 	event->fork.tid  = pid;
228 	event->fork.header.type = PERF_RECORD_FORK;
229 
230 	event->fork.header.size = (sizeof(event->fork) + machine->id_hdr_size);
231 
232 	if (perf_tool__process_synth_event(tool, event, machine, process) != 0)
233 		return -1;
234 
235 	return 0;
236 }
237 
238 int perf_event__synthesize_mmap_events(struct perf_tool *tool,
239 				       union perf_event *event,
240 				       pid_t pid, pid_t tgid,
241 				       perf_event__handler_t process,
242 				       struct machine *machine,
243 				       bool mmap_data,
244 				       unsigned int proc_map_timeout)
245 {
246 	char filename[PATH_MAX];
247 	FILE *fp;
248 	unsigned long long t;
249 	bool truncation = false;
250 	unsigned long long timeout = proc_map_timeout * 1000000ULL;
251 	int rc = 0;
252 	const char *hugetlbfs_mnt = hugetlbfs__mountpoint();
253 	int hugetlbfs_mnt_len = hugetlbfs_mnt ? strlen(hugetlbfs_mnt) : 0;
254 
255 	if (machine__is_default_guest(machine))
256 		return 0;
257 
258 	snprintf(filename, sizeof(filename), "%s/proc/%d/maps",
259 		 machine->root_dir, pid);
260 
261 	fp = fopen(filename, "r");
262 	if (fp == NULL) {
263 		/*
264 		 * We raced with a task exiting - just return:
265 		 */
266 		pr_debug("couldn't open %s\n", filename);
267 		return -1;
268 	}
269 
270 	event->header.type = PERF_RECORD_MMAP2;
271 	t = rdclock();
272 
273 	while (1) {
274 		char bf[BUFSIZ];
275 		char prot[5];
276 		char execname[PATH_MAX];
277 		char anonstr[] = "//anon";
278 		unsigned int ino;
279 		size_t size;
280 		ssize_t n;
281 
282 		if (fgets(bf, sizeof(bf), fp) == NULL)
283 			break;
284 
285 		if ((rdclock() - t) > timeout) {
286 			pr_warning("Reading %s time out. "
287 				   "You may want to increase "
288 				   "the time limit by --proc-map-timeout\n",
289 				   filename);
290 			truncation = true;
291 			goto out;
292 		}
293 
294 		/* ensure null termination since stack will be reused. */
295 		strcpy(execname, "");
296 
297 		/* 00400000-0040c000 r-xp 00000000 fd:01 41038  /bin/cat */
298 		n = sscanf(bf, "%"PRIx64"-%"PRIx64" %s %"PRIx64" %x:%x %u %[^\n]\n",
299 		       &event->mmap2.start, &event->mmap2.len, prot,
300 		       &event->mmap2.pgoff, &event->mmap2.maj,
301 		       &event->mmap2.min,
302 		       &ino, execname);
303 
304 		/*
305  		 * Anon maps don't have the execname.
306  		 */
307 		if (n < 7)
308 			continue;
309 
310 		event->mmap2.ino = (u64)ino;
311 
312 		/*
313 		 * Just like the kernel, see __perf_event_mmap in kernel/perf_event.c
314 		 */
315 		if (machine__is_host(machine))
316 			event->header.misc = PERF_RECORD_MISC_USER;
317 		else
318 			event->header.misc = PERF_RECORD_MISC_GUEST_USER;
319 
320 		/* map protection and flags bits */
321 		event->mmap2.prot = 0;
322 		event->mmap2.flags = 0;
323 		if (prot[0] == 'r')
324 			event->mmap2.prot |= PROT_READ;
325 		if (prot[1] == 'w')
326 			event->mmap2.prot |= PROT_WRITE;
327 		if (prot[2] == 'x')
328 			event->mmap2.prot |= PROT_EXEC;
329 
330 		if (prot[3] == 's')
331 			event->mmap2.flags |= MAP_SHARED;
332 		else
333 			event->mmap2.flags |= MAP_PRIVATE;
334 
335 		if (prot[2] != 'x') {
336 			if (!mmap_data || prot[0] != 'r')
337 				continue;
338 
339 			event->header.misc |= PERF_RECORD_MISC_MMAP_DATA;
340 		}
341 
342 out:
343 		if (truncation)
344 			event->header.misc |= PERF_RECORD_MISC_PROC_MAP_PARSE_TIMEOUT;
345 
346 		if (!strcmp(execname, ""))
347 			strcpy(execname, anonstr);
348 
349 		if (hugetlbfs_mnt_len &&
350 		    !strncmp(execname, hugetlbfs_mnt, hugetlbfs_mnt_len)) {
351 			strcpy(execname, anonstr);
352 			event->mmap2.flags |= MAP_HUGETLB;
353 		}
354 
355 		size = strlen(execname) + 1;
356 		memcpy(event->mmap2.filename, execname, size);
357 		size = PERF_ALIGN(size, sizeof(u64));
358 		event->mmap2.len -= event->mmap.start;
359 		event->mmap2.header.size = (sizeof(event->mmap2) -
360 					(sizeof(event->mmap2.filename) - size));
361 		memset(event->mmap2.filename + size, 0, machine->id_hdr_size);
362 		event->mmap2.header.size += machine->id_hdr_size;
363 		event->mmap2.pid = tgid;
364 		event->mmap2.tid = pid;
365 
366 		if (perf_tool__process_synth_event(tool, event, machine, process) != 0) {
367 			rc = -1;
368 			break;
369 		}
370 
371 		if (truncation)
372 			break;
373 	}
374 
375 	fclose(fp);
376 	return rc;
377 }
378 
379 int perf_event__synthesize_modules(struct perf_tool *tool,
380 				   perf_event__handler_t process,
381 				   struct machine *machine)
382 {
383 	int rc = 0;
384 	struct map *pos;
385 	struct map_groups *kmaps = &machine->kmaps;
386 	struct maps *maps = &kmaps->maps[MAP__FUNCTION];
387 	union perf_event *event = zalloc((sizeof(event->mmap) +
388 					  machine->id_hdr_size));
389 	if (event == NULL) {
390 		pr_debug("Not enough memory synthesizing mmap event "
391 			 "for kernel modules\n");
392 		return -1;
393 	}
394 
395 	event->header.type = PERF_RECORD_MMAP;
396 
397 	/*
398 	 * kernel uses 0 for user space maps, see kernel/perf_event.c
399 	 * __perf_event_mmap
400 	 */
401 	if (machine__is_host(machine))
402 		event->header.misc = PERF_RECORD_MISC_KERNEL;
403 	else
404 		event->header.misc = PERF_RECORD_MISC_GUEST_KERNEL;
405 
406 	for (pos = maps__first(maps); pos; pos = map__next(pos)) {
407 		size_t size;
408 
409 		if (__map__is_kernel(pos))
410 			continue;
411 
412 		size = PERF_ALIGN(pos->dso->long_name_len + 1, sizeof(u64));
413 		event->mmap.header.type = PERF_RECORD_MMAP;
414 		event->mmap.header.size = (sizeof(event->mmap) -
415 				        (sizeof(event->mmap.filename) - size));
416 		memset(event->mmap.filename + size, 0, machine->id_hdr_size);
417 		event->mmap.header.size += machine->id_hdr_size;
418 		event->mmap.start = pos->start;
419 		event->mmap.len   = pos->end - pos->start;
420 		event->mmap.pid   = machine->pid;
421 
422 		memcpy(event->mmap.filename, pos->dso->long_name,
423 		       pos->dso->long_name_len + 1);
424 		if (perf_tool__process_synth_event(tool, event, machine, process) != 0) {
425 			rc = -1;
426 			break;
427 		}
428 	}
429 
430 	free(event);
431 	return rc;
432 }
433 
434 static int __event__synthesize_thread(union perf_event *comm_event,
435 				      union perf_event *mmap_event,
436 				      union perf_event *fork_event,
437 				      pid_t pid, int full,
438 					  perf_event__handler_t process,
439 				      struct perf_tool *tool,
440 				      struct machine *machine,
441 				      bool mmap_data,
442 				      unsigned int proc_map_timeout)
443 {
444 	char filename[PATH_MAX];
445 	DIR *tasks;
446 	struct dirent *dirent;
447 	pid_t tgid, ppid;
448 	int rc = 0;
449 
450 	/* special case: only send one comm event using passed in pid */
451 	if (!full) {
452 		tgid = perf_event__synthesize_comm(tool, comm_event, pid,
453 						   process, machine);
454 
455 		if (tgid == -1)
456 			return -1;
457 
458 		return perf_event__synthesize_mmap_events(tool, mmap_event, pid, tgid,
459 							  process, machine, mmap_data,
460 							  proc_map_timeout);
461 	}
462 
463 	if (machine__is_default_guest(machine))
464 		return 0;
465 
466 	snprintf(filename, sizeof(filename), "%s/proc/%d/task",
467 		 machine->root_dir, pid);
468 
469 	tasks = opendir(filename);
470 	if (tasks == NULL) {
471 		pr_debug("couldn't open %s\n", filename);
472 		return 0;
473 	}
474 
475 	while ((dirent = readdir(tasks)) != NULL) {
476 		char *end;
477 		pid_t _pid;
478 
479 		_pid = strtol(dirent->d_name, &end, 10);
480 		if (*end)
481 			continue;
482 
483 		rc = -1;
484 		if (perf_event__prepare_comm(comm_event, _pid, machine,
485 					     &tgid, &ppid) != 0)
486 			break;
487 
488 		if (perf_event__synthesize_fork(tool, fork_event, _pid, tgid,
489 						ppid, process, machine) < 0)
490 			break;
491 		/*
492 		 * Send the prepared comm event
493 		 */
494 		if (perf_tool__process_synth_event(tool, comm_event, machine, process) != 0)
495 			break;
496 
497 		rc = 0;
498 		if (_pid == pid) {
499 			/* process the parent's maps too */
500 			rc = perf_event__synthesize_mmap_events(tool, mmap_event, pid, tgid,
501 						process, machine, mmap_data, proc_map_timeout);
502 			if (rc)
503 				break;
504 		}
505 	}
506 
507 	closedir(tasks);
508 	return rc;
509 }
510 
511 int perf_event__synthesize_thread_map(struct perf_tool *tool,
512 				      struct thread_map *threads,
513 				      perf_event__handler_t process,
514 				      struct machine *machine,
515 				      bool mmap_data,
516 				      unsigned int proc_map_timeout)
517 {
518 	union perf_event *comm_event, *mmap_event, *fork_event;
519 	int err = -1, thread, j;
520 
521 	comm_event = malloc(sizeof(comm_event->comm) + machine->id_hdr_size);
522 	if (comm_event == NULL)
523 		goto out;
524 
525 	mmap_event = malloc(sizeof(mmap_event->mmap2) + machine->id_hdr_size);
526 	if (mmap_event == NULL)
527 		goto out_free_comm;
528 
529 	fork_event = malloc(sizeof(fork_event->fork) + machine->id_hdr_size);
530 	if (fork_event == NULL)
531 		goto out_free_mmap;
532 
533 	err = 0;
534 	for (thread = 0; thread < threads->nr; ++thread) {
535 		if (__event__synthesize_thread(comm_event, mmap_event,
536 					       fork_event,
537 					       thread_map__pid(threads, thread), 0,
538 					       process, tool, machine,
539 					       mmap_data, proc_map_timeout)) {
540 			err = -1;
541 			break;
542 		}
543 
544 		/*
545 		 * comm.pid is set to thread group id by
546 		 * perf_event__synthesize_comm
547 		 */
548 		if ((int) comm_event->comm.pid != thread_map__pid(threads, thread)) {
549 			bool need_leader = true;
550 
551 			/* is thread group leader in thread_map? */
552 			for (j = 0; j < threads->nr; ++j) {
553 				if ((int) comm_event->comm.pid == thread_map__pid(threads, j)) {
554 					need_leader = false;
555 					break;
556 				}
557 			}
558 
559 			/* if not, generate events for it */
560 			if (need_leader &&
561 			    __event__synthesize_thread(comm_event, mmap_event,
562 						       fork_event,
563 						       comm_event->comm.pid, 0,
564 						       process, tool, machine,
565 						       mmap_data, proc_map_timeout)) {
566 				err = -1;
567 				break;
568 			}
569 		}
570 	}
571 	free(fork_event);
572 out_free_mmap:
573 	free(mmap_event);
574 out_free_comm:
575 	free(comm_event);
576 out:
577 	return err;
578 }
579 
580 int perf_event__synthesize_threads(struct perf_tool *tool,
581 				   perf_event__handler_t process,
582 				   struct machine *machine,
583 				   bool mmap_data,
584 				   unsigned int proc_map_timeout)
585 {
586 	DIR *proc;
587 	char proc_path[PATH_MAX];
588 	struct dirent *dirent;
589 	union perf_event *comm_event, *mmap_event, *fork_event;
590 	int err = -1;
591 
592 	if (machine__is_default_guest(machine))
593 		return 0;
594 
595 	comm_event = malloc(sizeof(comm_event->comm) + machine->id_hdr_size);
596 	if (comm_event == NULL)
597 		goto out;
598 
599 	mmap_event = malloc(sizeof(mmap_event->mmap2) + machine->id_hdr_size);
600 	if (mmap_event == NULL)
601 		goto out_free_comm;
602 
603 	fork_event = malloc(sizeof(fork_event->fork) + machine->id_hdr_size);
604 	if (fork_event == NULL)
605 		goto out_free_mmap;
606 
607 	snprintf(proc_path, sizeof(proc_path), "%s/proc", machine->root_dir);
608 	proc = opendir(proc_path);
609 
610 	if (proc == NULL)
611 		goto out_free_fork;
612 
613 	while ((dirent = readdir(proc)) != NULL) {
614 		char *end;
615 		pid_t pid = strtol(dirent->d_name, &end, 10);
616 
617 		if (*end) /* only interested in proper numerical dirents */
618 			continue;
619 		/*
620  		 * We may race with exiting thread, so don't stop just because
621  		 * one thread couldn't be synthesized.
622  		 */
623 		__event__synthesize_thread(comm_event, mmap_event, fork_event, pid,
624 					   1, process, tool, machine, mmap_data,
625 					   proc_map_timeout);
626 	}
627 
628 	err = 0;
629 	closedir(proc);
630 out_free_fork:
631 	free(fork_event);
632 out_free_mmap:
633 	free(mmap_event);
634 out_free_comm:
635 	free(comm_event);
636 out:
637 	return err;
638 }
639 
640 struct process_symbol_args {
641 	const char *name;
642 	u64	   start;
643 };
644 
645 static int find_symbol_cb(void *arg, const char *name, char type,
646 			  u64 start)
647 {
648 	struct process_symbol_args *args = arg;
649 
650 	/*
651 	 * Must be a function or at least an alias, as in PARISC64, where "_text" is
652 	 * an 'A' to the same address as "_stext".
653 	 */
654 	if (!(symbol_type__is_a(type, MAP__FUNCTION) ||
655 	      type == 'A') || strcmp(name, args->name))
656 		return 0;
657 
658 	args->start = start;
659 	return 1;
660 }
661 
662 u64 kallsyms__get_function_start(const char *kallsyms_filename,
663 				 const char *symbol_name)
664 {
665 	struct process_symbol_args args = { .name = symbol_name, };
666 
667 	if (kallsyms__parse(kallsyms_filename, &args, find_symbol_cb) <= 0)
668 		return 0;
669 
670 	return args.start;
671 }
672 
673 int perf_event__synthesize_kernel_mmap(struct perf_tool *tool,
674 				       perf_event__handler_t process,
675 				       struct machine *machine)
676 {
677 	size_t size;
678 	const char *mmap_name;
679 	char name_buff[PATH_MAX];
680 	struct map *map = machine__kernel_map(machine);
681 	struct kmap *kmap;
682 	int err;
683 	union perf_event *event;
684 
685 	if (symbol_conf.kptr_restrict)
686 		return -1;
687 	if (map == NULL)
688 		return -1;
689 
690 	/*
691 	 * We should get this from /sys/kernel/sections/.text, but till that is
692 	 * available use this, and after it is use this as a fallback for older
693 	 * kernels.
694 	 */
695 	event = zalloc((sizeof(event->mmap) + machine->id_hdr_size));
696 	if (event == NULL) {
697 		pr_debug("Not enough memory synthesizing mmap event "
698 			 "for kernel modules\n");
699 		return -1;
700 	}
701 
702 	mmap_name = machine__mmap_name(machine, name_buff, sizeof(name_buff));
703 	if (machine__is_host(machine)) {
704 		/*
705 		 * kernel uses PERF_RECORD_MISC_USER for user space maps,
706 		 * see kernel/perf_event.c __perf_event_mmap
707 		 */
708 		event->header.misc = PERF_RECORD_MISC_KERNEL;
709 	} else {
710 		event->header.misc = PERF_RECORD_MISC_GUEST_KERNEL;
711 	}
712 
713 	kmap = map__kmap(map);
714 	size = snprintf(event->mmap.filename, sizeof(event->mmap.filename),
715 			"%s%s", mmap_name, kmap->ref_reloc_sym->name) + 1;
716 	size = PERF_ALIGN(size, sizeof(u64));
717 	event->mmap.header.type = PERF_RECORD_MMAP;
718 	event->mmap.header.size = (sizeof(event->mmap) -
719 			(sizeof(event->mmap.filename) - size) + machine->id_hdr_size);
720 	event->mmap.pgoff = kmap->ref_reloc_sym->addr;
721 	event->mmap.start = map->start;
722 	event->mmap.len   = map->end - event->mmap.start;
723 	event->mmap.pid   = machine->pid;
724 
725 	err = perf_tool__process_synth_event(tool, event, machine, process);
726 	free(event);
727 
728 	return err;
729 }
730 
731 int perf_event__synthesize_thread_map2(struct perf_tool *tool,
732 				      struct thread_map *threads,
733 				      perf_event__handler_t process,
734 				      struct machine *machine)
735 {
736 	union perf_event *event;
737 	int i, err, size;
738 
739 	size  = sizeof(event->thread_map);
740 	size +=	threads->nr * sizeof(event->thread_map.entries[0]);
741 
742 	event = zalloc(size);
743 	if (!event)
744 		return -ENOMEM;
745 
746 	event->header.type = PERF_RECORD_THREAD_MAP;
747 	event->header.size = size;
748 	event->thread_map.nr = threads->nr;
749 
750 	for (i = 0; i < threads->nr; i++) {
751 		struct thread_map_event_entry *entry = &event->thread_map.entries[i];
752 		char *comm = thread_map__comm(threads, i);
753 
754 		if (!comm)
755 			comm = (char *) "";
756 
757 		entry->pid = thread_map__pid(threads, i);
758 		strncpy((char *) &entry->comm, comm, sizeof(entry->comm));
759 	}
760 
761 	err = process(tool, event, NULL, machine);
762 
763 	free(event);
764 	return err;
765 }
766 
767 static void synthesize_cpus(struct cpu_map_entries *cpus,
768 			    struct cpu_map *map)
769 {
770 	int i;
771 
772 	cpus->nr = map->nr;
773 
774 	for (i = 0; i < map->nr; i++)
775 		cpus->cpu[i] = map->map[i];
776 }
777 
778 static void synthesize_mask(struct cpu_map_mask *mask,
779 			    struct cpu_map *map, int max)
780 {
781 	int i;
782 
783 	mask->nr = BITS_TO_LONGS(max);
784 	mask->long_size = sizeof(long);
785 
786 	for (i = 0; i < map->nr; i++)
787 		set_bit(map->map[i], mask->mask);
788 }
789 
790 static size_t cpus_size(struct cpu_map *map)
791 {
792 	return sizeof(struct cpu_map_entries) + map->nr * sizeof(u16);
793 }
794 
795 static size_t mask_size(struct cpu_map *map, int *max)
796 {
797 	int i;
798 
799 	*max = 0;
800 
801 	for (i = 0; i < map->nr; i++) {
802 		/* bit possition of the cpu is + 1 */
803 		int bit = map->map[i] + 1;
804 
805 		if (bit > *max)
806 			*max = bit;
807 	}
808 
809 	return sizeof(struct cpu_map_mask) + BITS_TO_LONGS(*max) * sizeof(long);
810 }
811 
812 void *cpu_map_data__alloc(struct cpu_map *map, size_t *size, u16 *type, int *max)
813 {
814 	size_t size_cpus, size_mask;
815 	bool is_dummy = cpu_map__empty(map);
816 
817 	/*
818 	 * Both array and mask data have variable size based
819 	 * on the number of cpus and their actual values.
820 	 * The size of the 'struct cpu_map_data' is:
821 	 *
822 	 *   array = size of 'struct cpu_map_entries' +
823 	 *           number of cpus * sizeof(u64)
824 	 *
825 	 *   mask  = size of 'struct cpu_map_mask' +
826 	 *           maximum cpu bit converted to size of longs
827 	 *
828 	 * and finaly + the size of 'struct cpu_map_data'.
829 	 */
830 	size_cpus = cpus_size(map);
831 	size_mask = mask_size(map, max);
832 
833 	if (is_dummy || (size_cpus < size_mask)) {
834 		*size += size_cpus;
835 		*type  = PERF_CPU_MAP__CPUS;
836 	} else {
837 		*size += size_mask;
838 		*type  = PERF_CPU_MAP__MASK;
839 	}
840 
841 	*size += sizeof(struct cpu_map_data);
842 	return zalloc(*size);
843 }
844 
845 void cpu_map_data__synthesize(struct cpu_map_data *data, struct cpu_map *map,
846 			      u16 type, int max)
847 {
848 	data->type = type;
849 
850 	switch (type) {
851 	case PERF_CPU_MAP__CPUS:
852 		synthesize_cpus((struct cpu_map_entries *) data->data, map);
853 		break;
854 	case PERF_CPU_MAP__MASK:
855 		synthesize_mask((struct cpu_map_mask *) data->data, map, max);
856 	default:
857 		break;
858 	};
859 }
860 
861 static struct cpu_map_event* cpu_map_event__new(struct cpu_map *map)
862 {
863 	size_t size = sizeof(struct cpu_map_event);
864 	struct cpu_map_event *event;
865 	int max;
866 	u16 type;
867 
868 	event = cpu_map_data__alloc(map, &size, &type, &max);
869 	if (!event)
870 		return NULL;
871 
872 	event->header.type = PERF_RECORD_CPU_MAP;
873 	event->header.size = size;
874 	event->data.type   = type;
875 
876 	cpu_map_data__synthesize(&event->data, map, type, max);
877 	return event;
878 }
879 
880 int perf_event__synthesize_cpu_map(struct perf_tool *tool,
881 				   struct cpu_map *map,
882 				   perf_event__handler_t process,
883 				   struct machine *machine)
884 {
885 	struct cpu_map_event *event;
886 	int err;
887 
888 	event = cpu_map_event__new(map);
889 	if (!event)
890 		return -ENOMEM;
891 
892 	err = process(tool, (union perf_event *) event, NULL, machine);
893 
894 	free(event);
895 	return err;
896 }
897 
898 int perf_event__synthesize_stat_config(struct perf_tool *tool,
899 				       struct perf_stat_config *config,
900 				       perf_event__handler_t process,
901 				       struct machine *machine)
902 {
903 	struct stat_config_event *event;
904 	int size, i = 0, err;
905 
906 	size  = sizeof(*event);
907 	size += (PERF_STAT_CONFIG_TERM__MAX * sizeof(event->data[0]));
908 
909 	event = zalloc(size);
910 	if (!event)
911 		return -ENOMEM;
912 
913 	event->header.type = PERF_RECORD_STAT_CONFIG;
914 	event->header.size = size;
915 	event->nr          = PERF_STAT_CONFIG_TERM__MAX;
916 
917 #define ADD(__term, __val)					\
918 	event->data[i].tag = PERF_STAT_CONFIG_TERM__##__term;	\
919 	event->data[i].val = __val;				\
920 	i++;
921 
922 	ADD(AGGR_MODE,	config->aggr_mode)
923 	ADD(INTERVAL,	config->interval)
924 	ADD(SCALE,	config->scale)
925 
926 	WARN_ONCE(i != PERF_STAT_CONFIG_TERM__MAX,
927 		  "stat config terms unbalanced\n");
928 #undef ADD
929 
930 	err = process(tool, (union perf_event *) event, NULL, machine);
931 
932 	free(event);
933 	return err;
934 }
935 
936 int perf_event__synthesize_stat(struct perf_tool *tool,
937 				u32 cpu, u32 thread, u64 id,
938 				struct perf_counts_values *count,
939 				perf_event__handler_t process,
940 				struct machine *machine)
941 {
942 	struct stat_event event;
943 
944 	event.header.type = PERF_RECORD_STAT;
945 	event.header.size = sizeof(event);
946 	event.header.misc = 0;
947 
948 	event.id        = id;
949 	event.cpu       = cpu;
950 	event.thread    = thread;
951 	event.val       = count->val;
952 	event.ena       = count->ena;
953 	event.run       = count->run;
954 
955 	return process(tool, (union perf_event *) &event, NULL, machine);
956 }
957 
958 int perf_event__synthesize_stat_round(struct perf_tool *tool,
959 				      u64 evtime, u64 type,
960 				      perf_event__handler_t process,
961 				      struct machine *machine)
962 {
963 	struct stat_round_event event;
964 
965 	event.header.type = PERF_RECORD_STAT_ROUND;
966 	event.header.size = sizeof(event);
967 	event.header.misc = 0;
968 
969 	event.time = evtime;
970 	event.type = type;
971 
972 	return process(tool, (union perf_event *) &event, NULL, machine);
973 }
974 
975 void perf_event__read_stat_config(struct perf_stat_config *config,
976 				  struct stat_config_event *event)
977 {
978 	unsigned i;
979 
980 	for (i = 0; i < event->nr; i++) {
981 
982 		switch (event->data[i].tag) {
983 #define CASE(__term, __val)					\
984 		case PERF_STAT_CONFIG_TERM__##__term:		\
985 			config->__val = event->data[i].val;	\
986 			break;
987 
988 		CASE(AGGR_MODE, aggr_mode)
989 		CASE(SCALE,     scale)
990 		CASE(INTERVAL,  interval)
991 #undef CASE
992 		default:
993 			pr_warning("unknown stat config term %" PRIu64 "\n",
994 				   event->data[i].tag);
995 		}
996 	}
997 }
998 
999 size_t perf_event__fprintf_comm(union perf_event *event, FILE *fp)
1000 {
1001 	const char *s;
1002 
1003 	if (event->header.misc & PERF_RECORD_MISC_COMM_EXEC)
1004 		s = " exec";
1005 	else
1006 		s = "";
1007 
1008 	return fprintf(fp, "%s: %s:%d/%d\n", s, event->comm.comm, event->comm.pid, event->comm.tid);
1009 }
1010 
1011 int perf_event__process_comm(struct perf_tool *tool __maybe_unused,
1012 			     union perf_event *event,
1013 			     struct perf_sample *sample,
1014 			     struct machine *machine)
1015 {
1016 	return machine__process_comm_event(machine, event, sample);
1017 }
1018 
1019 int perf_event__process_lost(struct perf_tool *tool __maybe_unused,
1020 			     union perf_event *event,
1021 			     struct perf_sample *sample,
1022 			     struct machine *machine)
1023 {
1024 	return machine__process_lost_event(machine, event, sample);
1025 }
1026 
1027 int perf_event__process_aux(struct perf_tool *tool __maybe_unused,
1028 			    union perf_event *event,
1029 			    struct perf_sample *sample __maybe_unused,
1030 			    struct machine *machine)
1031 {
1032 	return machine__process_aux_event(machine, event);
1033 }
1034 
1035 int perf_event__process_itrace_start(struct perf_tool *tool __maybe_unused,
1036 				     union perf_event *event,
1037 				     struct perf_sample *sample __maybe_unused,
1038 				     struct machine *machine)
1039 {
1040 	return machine__process_itrace_start_event(machine, event);
1041 }
1042 
1043 int perf_event__process_lost_samples(struct perf_tool *tool __maybe_unused,
1044 				     union perf_event *event,
1045 				     struct perf_sample *sample,
1046 				     struct machine *machine)
1047 {
1048 	return machine__process_lost_samples_event(machine, event, sample);
1049 }
1050 
1051 int perf_event__process_switch(struct perf_tool *tool __maybe_unused,
1052 			       union perf_event *event,
1053 			       struct perf_sample *sample __maybe_unused,
1054 			       struct machine *machine)
1055 {
1056 	return machine__process_switch_event(machine, event);
1057 }
1058 
1059 size_t perf_event__fprintf_mmap(union perf_event *event, FILE *fp)
1060 {
1061 	return fprintf(fp, " %d/%d: [%#" PRIx64 "(%#" PRIx64 ") @ %#" PRIx64 "]: %c %s\n",
1062 		       event->mmap.pid, event->mmap.tid, event->mmap.start,
1063 		       event->mmap.len, event->mmap.pgoff,
1064 		       (event->header.misc & PERF_RECORD_MISC_MMAP_DATA) ? 'r' : 'x',
1065 		       event->mmap.filename);
1066 }
1067 
1068 size_t perf_event__fprintf_mmap2(union perf_event *event, FILE *fp)
1069 {
1070 	return fprintf(fp, " %d/%d: [%#" PRIx64 "(%#" PRIx64 ") @ %#" PRIx64
1071 			   " %02x:%02x %"PRIu64" %"PRIu64"]: %c%c%c%c %s\n",
1072 		       event->mmap2.pid, event->mmap2.tid, event->mmap2.start,
1073 		       event->mmap2.len, event->mmap2.pgoff, event->mmap2.maj,
1074 		       event->mmap2.min, event->mmap2.ino,
1075 		       event->mmap2.ino_generation,
1076 		       (event->mmap2.prot & PROT_READ) ? 'r' : '-',
1077 		       (event->mmap2.prot & PROT_WRITE) ? 'w' : '-',
1078 		       (event->mmap2.prot & PROT_EXEC) ? 'x' : '-',
1079 		       (event->mmap2.flags & MAP_SHARED) ? 's' : 'p',
1080 		       event->mmap2.filename);
1081 }
1082 
1083 size_t perf_event__fprintf_thread_map(union perf_event *event, FILE *fp)
1084 {
1085 	struct thread_map *threads = thread_map__new_event(&event->thread_map);
1086 	size_t ret;
1087 
1088 	ret = fprintf(fp, " nr: ");
1089 
1090 	if (threads)
1091 		ret += thread_map__fprintf(threads, fp);
1092 	else
1093 		ret += fprintf(fp, "failed to get threads from event\n");
1094 
1095 	thread_map__put(threads);
1096 	return ret;
1097 }
1098 
1099 size_t perf_event__fprintf_cpu_map(union perf_event *event, FILE *fp)
1100 {
1101 	struct cpu_map *cpus = cpu_map__new_data(&event->cpu_map.data);
1102 	size_t ret;
1103 
1104 	ret = fprintf(fp, ": ");
1105 
1106 	if (cpus)
1107 		ret += cpu_map__fprintf(cpus, fp);
1108 	else
1109 		ret += fprintf(fp, "failed to get cpumap from event\n");
1110 
1111 	cpu_map__put(cpus);
1112 	return ret;
1113 }
1114 
1115 int perf_event__process_mmap(struct perf_tool *tool __maybe_unused,
1116 			     union perf_event *event,
1117 			     struct perf_sample *sample,
1118 			     struct machine *machine)
1119 {
1120 	return machine__process_mmap_event(machine, event, sample);
1121 }
1122 
1123 int perf_event__process_mmap2(struct perf_tool *tool __maybe_unused,
1124 			     union perf_event *event,
1125 			     struct perf_sample *sample,
1126 			     struct machine *machine)
1127 {
1128 	return machine__process_mmap2_event(machine, event, sample);
1129 }
1130 
1131 size_t perf_event__fprintf_task(union perf_event *event, FILE *fp)
1132 {
1133 	return fprintf(fp, "(%d:%d):(%d:%d)\n",
1134 		       event->fork.pid, event->fork.tid,
1135 		       event->fork.ppid, event->fork.ptid);
1136 }
1137 
1138 int perf_event__process_fork(struct perf_tool *tool __maybe_unused,
1139 			     union perf_event *event,
1140 			     struct perf_sample *sample,
1141 			     struct machine *machine)
1142 {
1143 	return machine__process_fork_event(machine, event, sample);
1144 }
1145 
1146 int perf_event__process_exit(struct perf_tool *tool __maybe_unused,
1147 			     union perf_event *event,
1148 			     struct perf_sample *sample,
1149 			     struct machine *machine)
1150 {
1151 	return machine__process_exit_event(machine, event, sample);
1152 }
1153 
1154 size_t perf_event__fprintf_aux(union perf_event *event, FILE *fp)
1155 {
1156 	return fprintf(fp, " offset: %#"PRIx64" size: %#"PRIx64" flags: %#"PRIx64" [%s%s]\n",
1157 		       event->aux.aux_offset, event->aux.aux_size,
1158 		       event->aux.flags,
1159 		       event->aux.flags & PERF_AUX_FLAG_TRUNCATED ? "T" : "",
1160 		       event->aux.flags & PERF_AUX_FLAG_OVERWRITE ? "O" : "");
1161 }
1162 
1163 size_t perf_event__fprintf_itrace_start(union perf_event *event, FILE *fp)
1164 {
1165 	return fprintf(fp, " pid: %u tid: %u\n",
1166 		       event->itrace_start.pid, event->itrace_start.tid);
1167 }
1168 
1169 size_t perf_event__fprintf_switch(union perf_event *event, FILE *fp)
1170 {
1171 	bool out = event->header.misc & PERF_RECORD_MISC_SWITCH_OUT;
1172 	const char *in_out = out ? "OUT" : "IN ";
1173 
1174 	if (event->header.type == PERF_RECORD_SWITCH)
1175 		return fprintf(fp, " %s\n", in_out);
1176 
1177 	return fprintf(fp, " %s  %s pid/tid: %5u/%-5u\n",
1178 		       in_out, out ? "next" : "prev",
1179 		       event->context_switch.next_prev_pid,
1180 		       event->context_switch.next_prev_tid);
1181 }
1182 
1183 size_t perf_event__fprintf(union perf_event *event, FILE *fp)
1184 {
1185 	size_t ret = fprintf(fp, "PERF_RECORD_%s",
1186 			     perf_event__name(event->header.type));
1187 
1188 	switch (event->header.type) {
1189 	case PERF_RECORD_COMM:
1190 		ret += perf_event__fprintf_comm(event, fp);
1191 		break;
1192 	case PERF_RECORD_FORK:
1193 	case PERF_RECORD_EXIT:
1194 		ret += perf_event__fprintf_task(event, fp);
1195 		break;
1196 	case PERF_RECORD_MMAP:
1197 		ret += perf_event__fprintf_mmap(event, fp);
1198 		break;
1199 	case PERF_RECORD_MMAP2:
1200 		ret += perf_event__fprintf_mmap2(event, fp);
1201 		break;
1202 	case PERF_RECORD_AUX:
1203 		ret += perf_event__fprintf_aux(event, fp);
1204 		break;
1205 	case PERF_RECORD_ITRACE_START:
1206 		ret += perf_event__fprintf_itrace_start(event, fp);
1207 		break;
1208 	case PERF_RECORD_SWITCH:
1209 	case PERF_RECORD_SWITCH_CPU_WIDE:
1210 		ret += perf_event__fprintf_switch(event, fp);
1211 		break;
1212 	default:
1213 		ret += fprintf(fp, "\n");
1214 	}
1215 
1216 	return ret;
1217 }
1218 
1219 int perf_event__process(struct perf_tool *tool __maybe_unused,
1220 			union perf_event *event,
1221 			struct perf_sample *sample,
1222 			struct machine *machine)
1223 {
1224 	return machine__process_event(machine, event, sample);
1225 }
1226 
1227 void thread__find_addr_map(struct thread *thread, u8 cpumode,
1228 			   enum map_type type, u64 addr,
1229 			   struct addr_location *al)
1230 {
1231 	struct map_groups *mg = thread->mg;
1232 	struct machine *machine = mg->machine;
1233 	bool load_map = false;
1234 
1235 	al->machine = machine;
1236 	al->thread = thread;
1237 	al->addr = addr;
1238 	al->cpumode = cpumode;
1239 	al->filtered = 0;
1240 
1241 	if (machine == NULL) {
1242 		al->map = NULL;
1243 		return;
1244 	}
1245 
1246 	if (cpumode == PERF_RECORD_MISC_KERNEL && perf_host) {
1247 		al->level = 'k';
1248 		mg = &machine->kmaps;
1249 		load_map = true;
1250 	} else if (cpumode == PERF_RECORD_MISC_USER && perf_host) {
1251 		al->level = '.';
1252 	} else if (cpumode == PERF_RECORD_MISC_GUEST_KERNEL && perf_guest) {
1253 		al->level = 'g';
1254 		mg = &machine->kmaps;
1255 		load_map = true;
1256 	} else if (cpumode == PERF_RECORD_MISC_GUEST_USER && perf_guest) {
1257 		al->level = 'u';
1258 	} else {
1259 		al->level = 'H';
1260 		al->map = NULL;
1261 
1262 		if ((cpumode == PERF_RECORD_MISC_GUEST_USER ||
1263 			cpumode == PERF_RECORD_MISC_GUEST_KERNEL) &&
1264 			!perf_guest)
1265 			al->filtered |= (1 << HIST_FILTER__GUEST);
1266 		if ((cpumode == PERF_RECORD_MISC_USER ||
1267 			cpumode == PERF_RECORD_MISC_KERNEL) &&
1268 			!perf_host)
1269 			al->filtered |= (1 << HIST_FILTER__HOST);
1270 
1271 		return;
1272 	}
1273 try_again:
1274 	al->map = map_groups__find(mg, type, al->addr);
1275 	if (al->map == NULL) {
1276 		/*
1277 		 * If this is outside of all known maps, and is a negative
1278 		 * address, try to look it up in the kernel dso, as it might be
1279 		 * a vsyscall or vdso (which executes in user-mode).
1280 		 *
1281 		 * XXX This is nasty, we should have a symbol list in the
1282 		 * "[vdso]" dso, but for now lets use the old trick of looking
1283 		 * in the whole kernel symbol list.
1284 		 */
1285 		if (cpumode == PERF_RECORD_MISC_USER && machine &&
1286 		    mg != &machine->kmaps &&
1287 		    machine__kernel_ip(machine, al->addr)) {
1288 			mg = &machine->kmaps;
1289 			load_map = true;
1290 			goto try_again;
1291 		}
1292 	} else {
1293 		/*
1294 		 * Kernel maps might be changed when loading symbols so loading
1295 		 * must be done prior to using kernel maps.
1296 		 */
1297 		if (load_map)
1298 			map__load(al->map);
1299 		al->addr = al->map->map_ip(al->map, al->addr);
1300 	}
1301 }
1302 
1303 void thread__find_addr_location(struct thread *thread,
1304 				u8 cpumode, enum map_type type, u64 addr,
1305 				struct addr_location *al)
1306 {
1307 	thread__find_addr_map(thread, cpumode, type, addr, al);
1308 	if (al->map != NULL)
1309 		al->sym = map__find_symbol(al->map, al->addr);
1310 	else
1311 		al->sym = NULL;
1312 }
1313 
1314 /*
1315  * Callers need to drop the reference to al->thread, obtained in
1316  * machine__findnew_thread()
1317  */
1318 int machine__resolve(struct machine *machine, struct addr_location *al,
1319 		     struct perf_sample *sample)
1320 {
1321 	struct thread *thread = machine__findnew_thread(machine, sample->pid,
1322 							sample->tid);
1323 
1324 	if (thread == NULL)
1325 		return -1;
1326 
1327 	dump_printf(" ... thread: %s:%d\n", thread__comm_str(thread), thread->tid);
1328 	/*
1329 	 * Have we already created the kernel maps for this machine?
1330 	 *
1331 	 * This should have happened earlier, when we processed the kernel MMAP
1332 	 * events, but for older perf.data files there was no such thing, so do
1333 	 * it now.
1334 	 */
1335 	if (sample->cpumode == PERF_RECORD_MISC_KERNEL &&
1336 	    machine__kernel_map(machine) == NULL)
1337 		machine__create_kernel_maps(machine);
1338 
1339 	thread__find_addr_map(thread, sample->cpumode, MAP__FUNCTION, sample->ip, al);
1340 	dump_printf(" ...... dso: %s\n",
1341 		    al->map ? al->map->dso->long_name :
1342 			al->level == 'H' ? "[hypervisor]" : "<not found>");
1343 
1344 	if (thread__is_filtered(thread))
1345 		al->filtered |= (1 << HIST_FILTER__THREAD);
1346 
1347 	al->sym = NULL;
1348 	al->cpu = sample->cpu;
1349 	al->socket = -1;
1350 
1351 	if (al->cpu >= 0) {
1352 		struct perf_env *env = machine->env;
1353 
1354 		if (env && env->cpu)
1355 			al->socket = env->cpu[al->cpu].socket_id;
1356 	}
1357 
1358 	if (al->map) {
1359 		struct dso *dso = al->map->dso;
1360 
1361 		if (symbol_conf.dso_list &&
1362 		    (!dso || !(strlist__has_entry(symbol_conf.dso_list,
1363 						  dso->short_name) ||
1364 			       (dso->short_name != dso->long_name &&
1365 				strlist__has_entry(symbol_conf.dso_list,
1366 						   dso->long_name))))) {
1367 			al->filtered |= (1 << HIST_FILTER__DSO);
1368 		}
1369 
1370 		al->sym = map__find_symbol(al->map, al->addr);
1371 	}
1372 
1373 	if (symbol_conf.sym_list &&
1374 		(!al->sym || !strlist__has_entry(symbol_conf.sym_list,
1375 						al->sym->name))) {
1376 		al->filtered |= (1 << HIST_FILTER__SYMBOL);
1377 	}
1378 
1379 	return 0;
1380 }
1381 
1382 /*
1383  * The preprocess_sample method will return with reference counts for the
1384  * in it, when done using (and perhaps getting ref counts if needing to
1385  * keep a pointer to one of those entries) it must be paired with
1386  * addr_location__put(), so that the refcounts can be decremented.
1387  */
1388 void addr_location__put(struct addr_location *al)
1389 {
1390 	thread__zput(al->thread);
1391 }
1392 
1393 bool is_bts_event(struct perf_event_attr *attr)
1394 {
1395 	return attr->type == PERF_TYPE_HARDWARE &&
1396 	       (attr->config & PERF_COUNT_HW_BRANCH_INSTRUCTIONS) &&
1397 	       attr->sample_period == 1;
1398 }
1399 
1400 bool sample_addr_correlates_sym(struct perf_event_attr *attr)
1401 {
1402 	if (attr->type == PERF_TYPE_SOFTWARE &&
1403 	    (attr->config == PERF_COUNT_SW_PAGE_FAULTS ||
1404 	     attr->config == PERF_COUNT_SW_PAGE_FAULTS_MIN ||
1405 	     attr->config == PERF_COUNT_SW_PAGE_FAULTS_MAJ))
1406 		return true;
1407 
1408 	if (is_bts_event(attr))
1409 		return true;
1410 
1411 	return false;
1412 }
1413 
1414 void thread__resolve(struct thread *thread, struct addr_location *al,
1415 		     struct perf_sample *sample)
1416 {
1417 	thread__find_addr_map(thread, sample->cpumode, MAP__FUNCTION, sample->addr, al);
1418 	if (!al->map)
1419 		thread__find_addr_map(thread, sample->cpumode, MAP__VARIABLE,
1420 				      sample->addr, al);
1421 
1422 	al->cpu = sample->cpu;
1423 	al->sym = NULL;
1424 
1425 	if (al->map)
1426 		al->sym = map__find_symbol(al->map, al->addr);
1427 }
1428