xref: /openbmc/linux/tools/perf/util/event.c (revision b9890054)
1 #include <errno.h>
2 #include <fcntl.h>
3 #include <inttypes.h>
4 #include <linux/kernel.h>
5 #include <linux/types.h>
6 #include <perf/cpumap.h>
7 #include <sys/types.h>
8 #include <sys/stat.h>
9 #include <unistd.h>
10 #include <uapi/linux/mman.h> /* To get things like MAP_HUGETLB even on older libc headers */
11 #include <linux/perf_event.h>
12 #include <linux/zalloc.h>
13 #include "cpumap.h"
14 #include "dso.h"
15 #include "event.h"
16 #include "debug.h"
17 #include "hist.h"
18 #include "machine.h"
19 #include "sort.h"
20 #include "string2.h"
21 #include "strlist.h"
22 #include "thread.h"
23 #include "thread_map.h"
24 #include "time-utils.h"
25 #include <linux/ctype.h>
26 #include "map.h"
27 #include "util/namespaces.h"
28 #include "symbol.h"
29 #include "symbol/kallsyms.h"
30 #include "asm/bug.h"
31 #include "stat.h"
32 #include "session.h"
33 #include "bpf-event.h"
34 #include "tool.h"
35 #include "../perf.h"
36 
37 static const char *perf_event__names[] = {
38 	[0]					= "TOTAL",
39 	[PERF_RECORD_MMAP]			= "MMAP",
40 	[PERF_RECORD_MMAP2]			= "MMAP2",
41 	[PERF_RECORD_LOST]			= "LOST",
42 	[PERF_RECORD_COMM]			= "COMM",
43 	[PERF_RECORD_EXIT]			= "EXIT",
44 	[PERF_RECORD_THROTTLE]			= "THROTTLE",
45 	[PERF_RECORD_UNTHROTTLE]		= "UNTHROTTLE",
46 	[PERF_RECORD_FORK]			= "FORK",
47 	[PERF_RECORD_READ]			= "READ",
48 	[PERF_RECORD_SAMPLE]			= "SAMPLE",
49 	[PERF_RECORD_AUX]			= "AUX",
50 	[PERF_RECORD_ITRACE_START]		= "ITRACE_START",
51 	[PERF_RECORD_LOST_SAMPLES]		= "LOST_SAMPLES",
52 	[PERF_RECORD_SWITCH]			= "SWITCH",
53 	[PERF_RECORD_SWITCH_CPU_WIDE]		= "SWITCH_CPU_WIDE",
54 	[PERF_RECORD_NAMESPACES]		= "NAMESPACES",
55 	[PERF_RECORD_KSYMBOL]			= "KSYMBOL",
56 	[PERF_RECORD_BPF_EVENT]			= "BPF_EVENT",
57 	[PERF_RECORD_HEADER_ATTR]		= "ATTR",
58 	[PERF_RECORD_HEADER_EVENT_TYPE]		= "EVENT_TYPE",
59 	[PERF_RECORD_HEADER_TRACING_DATA]	= "TRACING_DATA",
60 	[PERF_RECORD_HEADER_BUILD_ID]		= "BUILD_ID",
61 	[PERF_RECORD_FINISHED_ROUND]		= "FINISHED_ROUND",
62 	[PERF_RECORD_ID_INDEX]			= "ID_INDEX",
63 	[PERF_RECORD_AUXTRACE_INFO]		= "AUXTRACE_INFO",
64 	[PERF_RECORD_AUXTRACE]			= "AUXTRACE",
65 	[PERF_RECORD_AUXTRACE_ERROR]		= "AUXTRACE_ERROR",
66 	[PERF_RECORD_THREAD_MAP]		= "THREAD_MAP",
67 	[PERF_RECORD_CPU_MAP]			= "CPU_MAP",
68 	[PERF_RECORD_STAT_CONFIG]		= "STAT_CONFIG",
69 	[PERF_RECORD_STAT]			= "STAT",
70 	[PERF_RECORD_STAT_ROUND]		= "STAT_ROUND",
71 	[PERF_RECORD_EVENT_UPDATE]		= "EVENT_UPDATE",
72 	[PERF_RECORD_TIME_CONV]			= "TIME_CONV",
73 	[PERF_RECORD_HEADER_FEATURE]		= "FEATURE",
74 	[PERF_RECORD_COMPRESSED]		= "COMPRESSED",
75 };
76 
77 const char *perf_event__name(unsigned int id)
78 {
79 	if (id >= ARRAY_SIZE(perf_event__names))
80 		return "INVALID";
81 	if (!perf_event__names[id])
82 		return "UNKNOWN";
83 	return perf_event__names[id];
84 }
85 
86 struct process_symbol_args {
87 	const char *name;
88 	u64	   start;
89 };
90 
91 static int find_symbol_cb(void *arg, const char *name, char type,
92 			  u64 start)
93 {
94 	struct process_symbol_args *args = arg;
95 
96 	/*
97 	 * Must be a function or at least an alias, as in PARISC64, where "_text" is
98 	 * an 'A' to the same address as "_stext".
99 	 */
100 	if (!(kallsyms__is_function(type) ||
101 	      type == 'A') || strcmp(name, args->name))
102 		return 0;
103 
104 	args->start = start;
105 	return 1;
106 }
107 
108 int kallsyms__get_function_start(const char *kallsyms_filename,
109 				 const char *symbol_name, u64 *addr)
110 {
111 	struct process_symbol_args args = { .name = symbol_name, };
112 
113 	if (kallsyms__parse(kallsyms_filename, &args, find_symbol_cb) <= 0)
114 		return -1;
115 
116 	*addr = args.start;
117 	return 0;
118 }
119 
120 void perf_event__read_stat_config(struct perf_stat_config *config,
121 				  struct perf_record_stat_config *event)
122 {
123 	unsigned i;
124 
125 	for (i = 0; i < event->nr; i++) {
126 
127 		switch (event->data[i].tag) {
128 #define CASE(__term, __val)					\
129 		case PERF_STAT_CONFIG_TERM__##__term:		\
130 			config->__val = event->data[i].val;	\
131 			break;
132 
133 		CASE(AGGR_MODE, aggr_mode)
134 		CASE(SCALE,     scale)
135 		CASE(INTERVAL,  interval)
136 #undef CASE
137 		default:
138 			pr_warning("unknown stat config term %" PRI_lu64 "\n",
139 				   event->data[i].tag);
140 		}
141 	}
142 }
143 
144 size_t perf_event__fprintf_comm(union perf_event *event, FILE *fp)
145 {
146 	const char *s;
147 
148 	if (event->header.misc & PERF_RECORD_MISC_COMM_EXEC)
149 		s = " exec";
150 	else
151 		s = "";
152 
153 	return fprintf(fp, "%s: %s:%d/%d\n", s, event->comm.comm, event->comm.pid, event->comm.tid);
154 }
155 
156 size_t perf_event__fprintf_namespaces(union perf_event *event, FILE *fp)
157 {
158 	size_t ret = 0;
159 	struct perf_ns_link_info *ns_link_info;
160 	u32 nr_namespaces, idx;
161 
162 	ns_link_info = event->namespaces.link_info;
163 	nr_namespaces = event->namespaces.nr_namespaces;
164 
165 	ret += fprintf(fp, " %d/%d - nr_namespaces: %u\n\t\t[",
166 		       event->namespaces.pid,
167 		       event->namespaces.tid,
168 		       nr_namespaces);
169 
170 	for (idx = 0; idx < nr_namespaces; idx++) {
171 		if (idx && (idx % 4 == 0))
172 			ret += fprintf(fp, "\n\t\t ");
173 
174 		ret  += fprintf(fp, "%u/%s: %" PRIu64 "/%#" PRIx64 "%s", idx,
175 				perf_ns__name(idx), (u64)ns_link_info[idx].dev,
176 				(u64)ns_link_info[idx].ino,
177 				((idx + 1) != nr_namespaces) ? ", " : "]\n");
178 	}
179 
180 	return ret;
181 }
182 
183 int perf_event__process_comm(struct perf_tool *tool __maybe_unused,
184 			     union perf_event *event,
185 			     struct perf_sample *sample,
186 			     struct machine *machine)
187 {
188 	return machine__process_comm_event(machine, event, sample);
189 }
190 
191 int perf_event__process_namespaces(struct perf_tool *tool __maybe_unused,
192 				   union perf_event *event,
193 				   struct perf_sample *sample,
194 				   struct machine *machine)
195 {
196 	return machine__process_namespaces_event(machine, event, sample);
197 }
198 
199 int perf_event__process_lost(struct perf_tool *tool __maybe_unused,
200 			     union perf_event *event,
201 			     struct perf_sample *sample,
202 			     struct machine *machine)
203 {
204 	return machine__process_lost_event(machine, event, sample);
205 }
206 
207 int perf_event__process_aux(struct perf_tool *tool __maybe_unused,
208 			    union perf_event *event,
209 			    struct perf_sample *sample __maybe_unused,
210 			    struct machine *machine)
211 {
212 	return machine__process_aux_event(machine, event);
213 }
214 
215 int perf_event__process_itrace_start(struct perf_tool *tool __maybe_unused,
216 				     union perf_event *event,
217 				     struct perf_sample *sample __maybe_unused,
218 				     struct machine *machine)
219 {
220 	return machine__process_itrace_start_event(machine, event);
221 }
222 
223 int perf_event__process_lost_samples(struct perf_tool *tool __maybe_unused,
224 				     union perf_event *event,
225 				     struct perf_sample *sample,
226 				     struct machine *machine)
227 {
228 	return machine__process_lost_samples_event(machine, event, sample);
229 }
230 
231 int perf_event__process_switch(struct perf_tool *tool __maybe_unused,
232 			       union perf_event *event,
233 			       struct perf_sample *sample __maybe_unused,
234 			       struct machine *machine)
235 {
236 	return machine__process_switch_event(machine, event);
237 }
238 
239 int perf_event__process_ksymbol(struct perf_tool *tool __maybe_unused,
240 				union perf_event *event,
241 				struct perf_sample *sample __maybe_unused,
242 				struct machine *machine)
243 {
244 	return machine__process_ksymbol(machine, event, sample);
245 }
246 
247 int perf_event__process_bpf(struct perf_tool *tool __maybe_unused,
248 			    union perf_event *event,
249 			    struct perf_sample *sample,
250 			    struct machine *machine)
251 {
252 	return machine__process_bpf(machine, event, sample);
253 }
254 
255 size_t perf_event__fprintf_mmap(union perf_event *event, FILE *fp)
256 {
257 	return fprintf(fp, " %d/%d: [%#" PRI_lx64 "(%#" PRI_lx64 ") @ %#" PRI_lx64 "]: %c %s\n",
258 		       event->mmap.pid, event->mmap.tid, event->mmap.start,
259 		       event->mmap.len, event->mmap.pgoff,
260 		       (event->header.misc & PERF_RECORD_MISC_MMAP_DATA) ? 'r' : 'x',
261 		       event->mmap.filename);
262 }
263 
264 size_t perf_event__fprintf_mmap2(union perf_event *event, FILE *fp)
265 {
266 	return fprintf(fp, " %d/%d: [%#" PRI_lx64 "(%#" PRI_lx64 ") @ %#" PRI_lx64
267 			   " %02x:%02x %"PRI_lu64" %"PRI_lu64"]: %c%c%c%c %s\n",
268 		       event->mmap2.pid, event->mmap2.tid, event->mmap2.start,
269 		       event->mmap2.len, event->mmap2.pgoff, event->mmap2.maj,
270 		       event->mmap2.min, event->mmap2.ino,
271 		       event->mmap2.ino_generation,
272 		       (event->mmap2.prot & PROT_READ) ? 'r' : '-',
273 		       (event->mmap2.prot & PROT_WRITE) ? 'w' : '-',
274 		       (event->mmap2.prot & PROT_EXEC) ? 'x' : '-',
275 		       (event->mmap2.flags & MAP_SHARED) ? 's' : 'p',
276 		       event->mmap2.filename);
277 }
278 
279 size_t perf_event__fprintf_thread_map(union perf_event *event, FILE *fp)
280 {
281 	struct perf_thread_map *threads = thread_map__new_event(&event->thread_map);
282 	size_t ret;
283 
284 	ret = fprintf(fp, " nr: ");
285 
286 	if (threads)
287 		ret += thread_map__fprintf(threads, fp);
288 	else
289 		ret += fprintf(fp, "failed to get threads from event\n");
290 
291 	perf_thread_map__put(threads);
292 	return ret;
293 }
294 
295 size_t perf_event__fprintf_cpu_map(union perf_event *event, FILE *fp)
296 {
297 	struct perf_cpu_map *cpus = cpu_map__new_data(&event->cpu_map.data);
298 	size_t ret;
299 
300 	ret = fprintf(fp, ": ");
301 
302 	if (cpus)
303 		ret += cpu_map__fprintf(cpus, fp);
304 	else
305 		ret += fprintf(fp, "failed to get cpumap from event\n");
306 
307 	perf_cpu_map__put(cpus);
308 	return ret;
309 }
310 
311 int perf_event__process_mmap(struct perf_tool *tool __maybe_unused,
312 			     union perf_event *event,
313 			     struct perf_sample *sample,
314 			     struct machine *machine)
315 {
316 	return machine__process_mmap_event(machine, event, sample);
317 }
318 
319 int perf_event__process_mmap2(struct perf_tool *tool __maybe_unused,
320 			     union perf_event *event,
321 			     struct perf_sample *sample,
322 			     struct machine *machine)
323 {
324 	return machine__process_mmap2_event(machine, event, sample);
325 }
326 
327 size_t perf_event__fprintf_task(union perf_event *event, FILE *fp)
328 {
329 	return fprintf(fp, "(%d:%d):(%d:%d)\n",
330 		       event->fork.pid, event->fork.tid,
331 		       event->fork.ppid, event->fork.ptid);
332 }
333 
334 int perf_event__process_fork(struct perf_tool *tool __maybe_unused,
335 			     union perf_event *event,
336 			     struct perf_sample *sample,
337 			     struct machine *machine)
338 {
339 	return machine__process_fork_event(machine, event, sample);
340 }
341 
342 int perf_event__process_exit(struct perf_tool *tool __maybe_unused,
343 			     union perf_event *event,
344 			     struct perf_sample *sample,
345 			     struct machine *machine)
346 {
347 	return machine__process_exit_event(machine, event, sample);
348 }
349 
350 size_t perf_event__fprintf_aux(union perf_event *event, FILE *fp)
351 {
352 	return fprintf(fp, " offset: %#"PRI_lx64" size: %#"PRI_lx64" flags: %#"PRI_lx64" [%s%s%s]\n",
353 		       event->aux.aux_offset, event->aux.aux_size,
354 		       event->aux.flags,
355 		       event->aux.flags & PERF_AUX_FLAG_TRUNCATED ? "T" : "",
356 		       event->aux.flags & PERF_AUX_FLAG_OVERWRITE ? "O" : "",
357 		       event->aux.flags & PERF_AUX_FLAG_PARTIAL   ? "P" : "");
358 }
359 
360 size_t perf_event__fprintf_itrace_start(union perf_event *event, FILE *fp)
361 {
362 	return fprintf(fp, " pid: %u tid: %u\n",
363 		       event->itrace_start.pid, event->itrace_start.tid);
364 }
365 
366 size_t perf_event__fprintf_switch(union perf_event *event, FILE *fp)
367 {
368 	bool out = event->header.misc & PERF_RECORD_MISC_SWITCH_OUT;
369 	const char *in_out = !out ? "IN         " :
370 		!(event->header.misc & PERF_RECORD_MISC_SWITCH_OUT_PREEMPT) ?
371 				    "OUT        " : "OUT preempt";
372 
373 	if (event->header.type == PERF_RECORD_SWITCH)
374 		return fprintf(fp, " %s\n", in_out);
375 
376 	return fprintf(fp, " %s  %s pid/tid: %5u/%-5u\n",
377 		       in_out, out ? "next" : "prev",
378 		       event->context_switch.next_prev_pid,
379 		       event->context_switch.next_prev_tid);
380 }
381 
382 static size_t perf_event__fprintf_lost(union perf_event *event, FILE *fp)
383 {
384 	return fprintf(fp, " lost %" PRI_lu64 "\n", event->lost.lost);
385 }
386 
387 size_t perf_event__fprintf_ksymbol(union perf_event *event, FILE *fp)
388 {
389 	return fprintf(fp, " addr %" PRI_lx64 " len %u type %u flags 0x%x name %s\n",
390 		       event->ksymbol.addr, event->ksymbol.len,
391 		       event->ksymbol.ksym_type,
392 		       event->ksymbol.flags, event->ksymbol.name);
393 }
394 
395 size_t perf_event__fprintf_bpf(union perf_event *event, FILE *fp)
396 {
397 	return fprintf(fp, " type %u, flags %u, id %u\n",
398 		       event->bpf.type, event->bpf.flags, event->bpf.id);
399 }
400 
401 size_t perf_event__fprintf(union perf_event *event, FILE *fp)
402 {
403 	size_t ret = fprintf(fp, "PERF_RECORD_%s",
404 			     perf_event__name(event->header.type));
405 
406 	switch (event->header.type) {
407 	case PERF_RECORD_COMM:
408 		ret += perf_event__fprintf_comm(event, fp);
409 		break;
410 	case PERF_RECORD_FORK:
411 	case PERF_RECORD_EXIT:
412 		ret += perf_event__fprintf_task(event, fp);
413 		break;
414 	case PERF_RECORD_MMAP:
415 		ret += perf_event__fprintf_mmap(event, fp);
416 		break;
417 	case PERF_RECORD_NAMESPACES:
418 		ret += perf_event__fprintf_namespaces(event, fp);
419 		break;
420 	case PERF_RECORD_MMAP2:
421 		ret += perf_event__fprintf_mmap2(event, fp);
422 		break;
423 	case PERF_RECORD_AUX:
424 		ret += perf_event__fprintf_aux(event, fp);
425 		break;
426 	case PERF_RECORD_ITRACE_START:
427 		ret += perf_event__fprintf_itrace_start(event, fp);
428 		break;
429 	case PERF_RECORD_SWITCH:
430 	case PERF_RECORD_SWITCH_CPU_WIDE:
431 		ret += perf_event__fprintf_switch(event, fp);
432 		break;
433 	case PERF_RECORD_LOST:
434 		ret += perf_event__fprintf_lost(event, fp);
435 		break;
436 	case PERF_RECORD_KSYMBOL:
437 		ret += perf_event__fprintf_ksymbol(event, fp);
438 		break;
439 	case PERF_RECORD_BPF_EVENT:
440 		ret += perf_event__fprintf_bpf(event, fp);
441 		break;
442 	default:
443 		ret += fprintf(fp, "\n");
444 	}
445 
446 	return ret;
447 }
448 
449 int perf_event__process(struct perf_tool *tool __maybe_unused,
450 			union perf_event *event,
451 			struct perf_sample *sample,
452 			struct machine *machine)
453 {
454 	return machine__process_event(machine, event, sample);
455 }
456 
457 struct map *thread__find_map(struct thread *thread, u8 cpumode, u64 addr,
458 			     struct addr_location *al)
459 {
460 	struct maps *maps = thread->maps;
461 	struct machine *machine = maps->machine;
462 	bool load_map = false;
463 
464 	al->maps = maps;
465 	al->thread = thread;
466 	al->addr = addr;
467 	al->cpumode = cpumode;
468 	al->filtered = 0;
469 
470 	if (machine == NULL) {
471 		al->map = NULL;
472 		return NULL;
473 	}
474 
475 	if (cpumode == PERF_RECORD_MISC_KERNEL && perf_host) {
476 		al->level = 'k';
477 		al->maps = maps = &machine->kmaps;
478 		load_map = true;
479 	} else if (cpumode == PERF_RECORD_MISC_USER && perf_host) {
480 		al->level = '.';
481 	} else if (cpumode == PERF_RECORD_MISC_GUEST_KERNEL && perf_guest) {
482 		al->level = 'g';
483 		al->maps = maps = &machine->kmaps;
484 		load_map = true;
485 	} else if (cpumode == PERF_RECORD_MISC_GUEST_USER && perf_guest) {
486 		al->level = 'u';
487 	} else {
488 		al->level = 'H';
489 		al->map = NULL;
490 
491 		if ((cpumode == PERF_RECORD_MISC_GUEST_USER ||
492 			cpumode == PERF_RECORD_MISC_GUEST_KERNEL) &&
493 			!perf_guest)
494 			al->filtered |= (1 << HIST_FILTER__GUEST);
495 		if ((cpumode == PERF_RECORD_MISC_USER ||
496 			cpumode == PERF_RECORD_MISC_KERNEL) &&
497 			!perf_host)
498 			al->filtered |= (1 << HIST_FILTER__HOST);
499 
500 		return NULL;
501 	}
502 
503 	al->map = maps__find(maps, al->addr);
504 	if (al->map != NULL) {
505 		/*
506 		 * Kernel maps might be changed when loading symbols so loading
507 		 * must be done prior to using kernel maps.
508 		 */
509 		if (load_map)
510 			map__load(al->map);
511 		al->addr = al->map->map_ip(al->map, al->addr);
512 	}
513 
514 	return al->map;
515 }
516 
517 /*
518  * For branch stacks or branch samples, the sample cpumode might not be correct
519  * because it applies only to the sample 'ip' and not necessary to 'addr' or
520  * branch stack addresses. If possible, use a fallback to deal with those cases.
521  */
522 struct map *thread__find_map_fb(struct thread *thread, u8 cpumode, u64 addr,
523 				struct addr_location *al)
524 {
525 	struct map *map = thread__find_map(thread, cpumode, addr, al);
526 	struct machine *machine = thread->maps->machine;
527 	u8 addr_cpumode = machine__addr_cpumode(machine, cpumode, addr);
528 
529 	if (map || addr_cpumode == cpumode)
530 		return map;
531 
532 	return thread__find_map(thread, addr_cpumode, addr, al);
533 }
534 
535 struct symbol *thread__find_symbol(struct thread *thread, u8 cpumode,
536 				   u64 addr, struct addr_location *al)
537 {
538 	al->sym = NULL;
539 	if (thread__find_map(thread, cpumode, addr, al))
540 		al->sym = map__find_symbol(al->map, al->addr);
541 	return al->sym;
542 }
543 
544 struct symbol *thread__find_symbol_fb(struct thread *thread, u8 cpumode,
545 				      u64 addr, struct addr_location *al)
546 {
547 	al->sym = NULL;
548 	if (thread__find_map_fb(thread, cpumode, addr, al))
549 		al->sym = map__find_symbol(al->map, al->addr);
550 	return al->sym;
551 }
552 
553 /*
554  * Callers need to drop the reference to al->thread, obtained in
555  * machine__findnew_thread()
556  */
557 int machine__resolve(struct machine *machine, struct addr_location *al,
558 		     struct perf_sample *sample)
559 {
560 	struct thread *thread = machine__findnew_thread(machine, sample->pid,
561 							sample->tid);
562 
563 	if (thread == NULL)
564 		return -1;
565 
566 	dump_printf(" ... thread: %s:%d\n", thread__comm_str(thread), thread->tid);
567 	thread__find_map(thread, sample->cpumode, sample->ip, al);
568 	dump_printf(" ...... dso: %s\n",
569 		    al->map ? al->map->dso->long_name :
570 			al->level == 'H' ? "[hypervisor]" : "<not found>");
571 
572 	if (thread__is_filtered(thread))
573 		al->filtered |= (1 << HIST_FILTER__THREAD);
574 
575 	al->sym = NULL;
576 	al->cpu = sample->cpu;
577 	al->socket = -1;
578 	al->srcline = NULL;
579 
580 	if (al->cpu >= 0) {
581 		struct perf_env *env = machine->env;
582 
583 		if (env && env->cpu)
584 			al->socket = env->cpu[al->cpu].socket_id;
585 	}
586 
587 	if (al->map) {
588 		struct dso *dso = al->map->dso;
589 
590 		if (symbol_conf.dso_list &&
591 		    (!dso || !(strlist__has_entry(symbol_conf.dso_list,
592 						  dso->short_name) ||
593 			       (dso->short_name != dso->long_name &&
594 				strlist__has_entry(symbol_conf.dso_list,
595 						   dso->long_name))))) {
596 			al->filtered |= (1 << HIST_FILTER__DSO);
597 		}
598 
599 		al->sym = map__find_symbol(al->map, al->addr);
600 	}
601 
602 	if (symbol_conf.sym_list &&
603 		(!al->sym || !strlist__has_entry(symbol_conf.sym_list,
604 						al->sym->name))) {
605 		al->filtered |= (1 << HIST_FILTER__SYMBOL);
606 	}
607 
608 	return 0;
609 }
610 
611 /*
612  * The preprocess_sample method will return with reference counts for the
613  * in it, when done using (and perhaps getting ref counts if needing to
614  * keep a pointer to one of those entries) it must be paired with
615  * addr_location__put(), so that the refcounts can be decremented.
616  */
617 void addr_location__put(struct addr_location *al)
618 {
619 	thread__zput(al->thread);
620 }
621 
622 bool is_bts_event(struct perf_event_attr *attr)
623 {
624 	return attr->type == PERF_TYPE_HARDWARE &&
625 	       (attr->config & PERF_COUNT_HW_BRANCH_INSTRUCTIONS) &&
626 	       attr->sample_period == 1;
627 }
628 
629 bool sample_addr_correlates_sym(struct perf_event_attr *attr)
630 {
631 	if (attr->type == PERF_TYPE_SOFTWARE &&
632 	    (attr->config == PERF_COUNT_SW_PAGE_FAULTS ||
633 	     attr->config == PERF_COUNT_SW_PAGE_FAULTS_MIN ||
634 	     attr->config == PERF_COUNT_SW_PAGE_FAULTS_MAJ))
635 		return true;
636 
637 	if (is_bts_event(attr))
638 		return true;
639 
640 	return false;
641 }
642 
643 void thread__resolve(struct thread *thread, struct addr_location *al,
644 		     struct perf_sample *sample)
645 {
646 	thread__find_map_fb(thread, sample->cpumode, sample->addr, al);
647 
648 	al->cpu = sample->cpu;
649 	al->sym = NULL;
650 
651 	if (al->map)
652 		al->sym = map__find_symbol(al->map, al->addr);
653 }
654