xref: /openbmc/linux/tools/perf/util/evsel.c (revision 050bb587)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (C) 2011, Red Hat Inc, Arnaldo Carvalho de Melo <acme@redhat.com>
4  *
5  * Parts came from builtin-{top,stat,record}.c, see those files for further
6  * copyright notes.
7  */
8 
9 #include <byteswap.h>
10 #include <errno.h>
11 #include <inttypes.h>
12 #include <linux/bitops.h>
13 #include <api/fs/fs.h>
14 #include <api/fs/tracing_path.h>
15 #include <linux/hw_breakpoint.h>
16 #include <linux/perf_event.h>
17 #include <linux/compiler.h>
18 #include <linux/err.h>
19 #include <linux/zalloc.h>
20 #include <sys/ioctl.h>
21 #include <sys/resource.h>
22 #include <sys/types.h>
23 #include <dirent.h>
24 #include <stdlib.h>
25 #include <perf/evsel.h>
26 #include "asm/bug.h"
27 #include "bpf_counter.h"
28 #include "callchain.h"
29 #include "cgroup.h"
30 #include "counts.h"
31 #include "event.h"
32 #include "evsel.h"
33 #include "util/env.h"
34 #include "util/evsel_config.h"
35 #include "util/evsel_fprintf.h"
36 #include "evlist.h"
37 #include <perf/cpumap.h>
38 #include "thread_map.h"
39 #include "target.h"
40 #include "perf_regs.h"
41 #include "record.h"
42 #include "debug.h"
43 #include "trace-event.h"
44 #include "stat.h"
45 #include "string2.h"
46 #include "memswap.h"
47 #include "util.h"
48 #include "util/hashmap.h"
49 #include "off_cpu.h"
50 #include "pmu.h"
51 #include "pmus.h"
52 #include "../perf-sys.h"
53 #include "util/parse-branch-options.h"
54 #include "util/bpf-filter.h"
55 #include <internal/xyarray.h>
56 #include <internal/lib.h>
57 #include <internal/threadmap.h>
58 
59 #include <linux/ctype.h>
60 
61 #ifdef HAVE_LIBTRACEEVENT
62 #include <traceevent/event-parse.h>
63 #endif
64 
65 struct perf_missing_features perf_missing_features;
66 
67 static clockid_t clockid;
68 
69 static const char *const perf_tool_event__tool_names[PERF_TOOL_MAX] = {
70 	NULL,
71 	"duration_time",
72 	"user_time",
73 	"system_time",
74 };
75 
76 const char *perf_tool_event__to_str(enum perf_tool_event ev)
77 {
78 	if (ev > PERF_TOOL_NONE && ev < PERF_TOOL_MAX)
79 		return perf_tool_event__tool_names[ev];
80 
81 	return NULL;
82 }
83 
84 enum perf_tool_event perf_tool_event__from_str(const char *str)
85 {
86 	int i;
87 
88 	perf_tool_event__for_each_event(i) {
89 		if (!strcmp(str, perf_tool_event__tool_names[i]))
90 			return i;
91 	}
92 	return PERF_TOOL_NONE;
93 }
94 
95 
96 static int evsel__no_extra_init(struct evsel *evsel __maybe_unused)
97 {
98 	return 0;
99 }
100 
101 void __weak test_attr__ready(void) { }
102 
103 static void evsel__no_extra_fini(struct evsel *evsel __maybe_unused)
104 {
105 }
106 
107 static struct {
108 	size_t	size;
109 	int	(*init)(struct evsel *evsel);
110 	void	(*fini)(struct evsel *evsel);
111 } perf_evsel__object = {
112 	.size = sizeof(struct evsel),
113 	.init = evsel__no_extra_init,
114 	.fini = evsel__no_extra_fini,
115 };
116 
117 int evsel__object_config(size_t object_size, int (*init)(struct evsel *evsel),
118 			 void (*fini)(struct evsel *evsel))
119 {
120 
121 	if (object_size == 0)
122 		goto set_methods;
123 
124 	if (perf_evsel__object.size > object_size)
125 		return -EINVAL;
126 
127 	perf_evsel__object.size = object_size;
128 
129 set_methods:
130 	if (init != NULL)
131 		perf_evsel__object.init = init;
132 
133 	if (fini != NULL)
134 		perf_evsel__object.fini = fini;
135 
136 	return 0;
137 }
138 
139 #define FD(e, x, y) (*(int *)xyarray__entry(e->core.fd, x, y))
140 
141 int __evsel__sample_size(u64 sample_type)
142 {
143 	u64 mask = sample_type & PERF_SAMPLE_MASK;
144 	int size = 0;
145 	int i;
146 
147 	for (i = 0; i < 64; i++) {
148 		if (mask & (1ULL << i))
149 			size++;
150 	}
151 
152 	size *= sizeof(u64);
153 
154 	return size;
155 }
156 
157 /**
158  * __perf_evsel__calc_id_pos - calculate id_pos.
159  * @sample_type: sample type
160  *
161  * This function returns the position of the event id (PERF_SAMPLE_ID or
162  * PERF_SAMPLE_IDENTIFIER) in a sample event i.e. in the array of struct
163  * perf_record_sample.
164  */
165 static int __perf_evsel__calc_id_pos(u64 sample_type)
166 {
167 	int idx = 0;
168 
169 	if (sample_type & PERF_SAMPLE_IDENTIFIER)
170 		return 0;
171 
172 	if (!(sample_type & PERF_SAMPLE_ID))
173 		return -1;
174 
175 	if (sample_type & PERF_SAMPLE_IP)
176 		idx += 1;
177 
178 	if (sample_type & PERF_SAMPLE_TID)
179 		idx += 1;
180 
181 	if (sample_type & PERF_SAMPLE_TIME)
182 		idx += 1;
183 
184 	if (sample_type & PERF_SAMPLE_ADDR)
185 		idx += 1;
186 
187 	return idx;
188 }
189 
190 /**
191  * __perf_evsel__calc_is_pos - calculate is_pos.
192  * @sample_type: sample type
193  *
194  * This function returns the position (counting backwards) of the event id
195  * (PERF_SAMPLE_ID or PERF_SAMPLE_IDENTIFIER) in a non-sample event i.e. if
196  * sample_id_all is used there is an id sample appended to non-sample events.
197  */
198 static int __perf_evsel__calc_is_pos(u64 sample_type)
199 {
200 	int idx = 1;
201 
202 	if (sample_type & PERF_SAMPLE_IDENTIFIER)
203 		return 1;
204 
205 	if (!(sample_type & PERF_SAMPLE_ID))
206 		return -1;
207 
208 	if (sample_type & PERF_SAMPLE_CPU)
209 		idx += 1;
210 
211 	if (sample_type & PERF_SAMPLE_STREAM_ID)
212 		idx += 1;
213 
214 	return idx;
215 }
216 
217 void evsel__calc_id_pos(struct evsel *evsel)
218 {
219 	evsel->id_pos = __perf_evsel__calc_id_pos(evsel->core.attr.sample_type);
220 	evsel->is_pos = __perf_evsel__calc_is_pos(evsel->core.attr.sample_type);
221 }
222 
223 void __evsel__set_sample_bit(struct evsel *evsel,
224 				  enum perf_event_sample_format bit)
225 {
226 	if (!(evsel->core.attr.sample_type & bit)) {
227 		evsel->core.attr.sample_type |= bit;
228 		evsel->sample_size += sizeof(u64);
229 		evsel__calc_id_pos(evsel);
230 	}
231 }
232 
233 void __evsel__reset_sample_bit(struct evsel *evsel,
234 				    enum perf_event_sample_format bit)
235 {
236 	if (evsel->core.attr.sample_type & bit) {
237 		evsel->core.attr.sample_type &= ~bit;
238 		evsel->sample_size -= sizeof(u64);
239 		evsel__calc_id_pos(evsel);
240 	}
241 }
242 
243 void evsel__set_sample_id(struct evsel *evsel,
244 			       bool can_sample_identifier)
245 {
246 	if (can_sample_identifier) {
247 		evsel__reset_sample_bit(evsel, ID);
248 		evsel__set_sample_bit(evsel, IDENTIFIER);
249 	} else {
250 		evsel__set_sample_bit(evsel, ID);
251 	}
252 	evsel->core.attr.read_format |= PERF_FORMAT_ID;
253 }
254 
255 /**
256  * evsel__is_function_event - Return whether given evsel is a function
257  * trace event
258  *
259  * @evsel - evsel selector to be tested
260  *
261  * Return %true if event is function trace event
262  */
263 bool evsel__is_function_event(struct evsel *evsel)
264 {
265 #define FUNCTION_EVENT "ftrace:function"
266 
267 	return evsel->name &&
268 	       !strncmp(FUNCTION_EVENT, evsel->name, sizeof(FUNCTION_EVENT));
269 
270 #undef FUNCTION_EVENT
271 }
272 
273 void evsel__init(struct evsel *evsel,
274 		 struct perf_event_attr *attr, int idx)
275 {
276 	perf_evsel__init(&evsel->core, attr, idx);
277 	evsel->tracking	   = !idx;
278 	evsel->unit	   = strdup("");
279 	evsel->scale	   = 1.0;
280 	evsel->max_events  = ULONG_MAX;
281 	evsel->evlist	   = NULL;
282 	evsel->bpf_obj	   = NULL;
283 	evsel->bpf_fd	   = -1;
284 	INIT_LIST_HEAD(&evsel->config_terms);
285 	INIT_LIST_HEAD(&evsel->bpf_counter_list);
286 	INIT_LIST_HEAD(&evsel->bpf_filters);
287 	perf_evsel__object.init(evsel);
288 	evsel->sample_size = __evsel__sample_size(attr->sample_type);
289 	evsel__calc_id_pos(evsel);
290 	evsel->cmdline_group_boundary = false;
291 	evsel->metric_events = NULL;
292 	evsel->per_pkg_mask  = NULL;
293 	evsel->collect_stat  = false;
294 	evsel->pmu_name      = NULL;
295 	evsel->group_pmu_name = NULL;
296 	evsel->skippable     = false;
297 }
298 
299 struct evsel *evsel__new_idx(struct perf_event_attr *attr, int idx)
300 {
301 	struct evsel *evsel = zalloc(perf_evsel__object.size);
302 
303 	if (!evsel)
304 		return NULL;
305 	evsel__init(evsel, attr, idx);
306 
307 	if (evsel__is_bpf_output(evsel) && !attr->sample_type) {
308 		evsel->core.attr.sample_type = (PERF_SAMPLE_RAW | PERF_SAMPLE_TIME |
309 					    PERF_SAMPLE_CPU | PERF_SAMPLE_PERIOD),
310 		evsel->core.attr.sample_period = 1;
311 	}
312 
313 	if (evsel__is_clock(evsel)) {
314 		free((char *)evsel->unit);
315 		evsel->unit = strdup("msec");
316 		evsel->scale = 1e-6;
317 	}
318 
319 	return evsel;
320 }
321 
322 int copy_config_terms(struct list_head *dst, struct list_head *src)
323 {
324 	struct evsel_config_term *pos, *tmp;
325 
326 	list_for_each_entry(pos, src, list) {
327 		tmp = malloc(sizeof(*tmp));
328 		if (tmp == NULL)
329 			return -ENOMEM;
330 
331 		*tmp = *pos;
332 		if (tmp->free_str) {
333 			tmp->val.str = strdup(pos->val.str);
334 			if (tmp->val.str == NULL) {
335 				free(tmp);
336 				return -ENOMEM;
337 			}
338 		}
339 		list_add_tail(&tmp->list, dst);
340 	}
341 	return 0;
342 }
343 
344 static int evsel__copy_config_terms(struct evsel *dst, struct evsel *src)
345 {
346 	return copy_config_terms(&dst->config_terms, &src->config_terms);
347 }
348 
349 /**
350  * evsel__clone - create a new evsel copied from @orig
351  * @orig: original evsel
352  *
353  * The assumption is that @orig is not configured nor opened yet.
354  * So we only care about the attributes that can be set while it's parsed.
355  */
356 struct evsel *evsel__clone(struct evsel *orig)
357 {
358 	struct evsel *evsel;
359 
360 	BUG_ON(orig->core.fd);
361 	BUG_ON(orig->counts);
362 	BUG_ON(orig->priv);
363 	BUG_ON(orig->per_pkg_mask);
364 
365 	/* cannot handle BPF objects for now */
366 	if (orig->bpf_obj)
367 		return NULL;
368 
369 	evsel = evsel__new(&orig->core.attr);
370 	if (evsel == NULL)
371 		return NULL;
372 
373 	evsel->core.cpus = perf_cpu_map__get(orig->core.cpus);
374 	evsel->core.own_cpus = perf_cpu_map__get(orig->core.own_cpus);
375 	evsel->core.threads = perf_thread_map__get(orig->core.threads);
376 	evsel->core.nr_members = orig->core.nr_members;
377 	evsel->core.system_wide = orig->core.system_wide;
378 	evsel->core.requires_cpu = orig->core.requires_cpu;
379 	evsel->core.is_pmu_core = orig->core.is_pmu_core;
380 
381 	if (orig->name) {
382 		evsel->name = strdup(orig->name);
383 		if (evsel->name == NULL)
384 			goto out_err;
385 	}
386 	if (orig->group_name) {
387 		evsel->group_name = strdup(orig->group_name);
388 		if (evsel->group_name == NULL)
389 			goto out_err;
390 	}
391 	if (orig->pmu_name) {
392 		evsel->pmu_name = strdup(orig->pmu_name);
393 		if (evsel->pmu_name == NULL)
394 			goto out_err;
395 	}
396 	if (orig->group_pmu_name) {
397 		evsel->group_pmu_name = strdup(orig->group_pmu_name);
398 		if (evsel->group_pmu_name == NULL)
399 			goto out_err;
400 	}
401 	if (orig->filter) {
402 		evsel->filter = strdup(orig->filter);
403 		if (evsel->filter == NULL)
404 			goto out_err;
405 	}
406 	if (orig->metric_id) {
407 		evsel->metric_id = strdup(orig->metric_id);
408 		if (evsel->metric_id == NULL)
409 			goto out_err;
410 	}
411 	evsel->cgrp = cgroup__get(orig->cgrp);
412 #ifdef HAVE_LIBTRACEEVENT
413 	evsel->tp_format = orig->tp_format;
414 #endif
415 	evsel->handler = orig->handler;
416 	evsel->core.leader = orig->core.leader;
417 
418 	evsel->max_events = orig->max_events;
419 	evsel->tool_event = orig->tool_event;
420 	free((char *)evsel->unit);
421 	evsel->unit = strdup(orig->unit);
422 	if (evsel->unit == NULL)
423 		goto out_err;
424 
425 	evsel->scale = orig->scale;
426 	evsel->snapshot = orig->snapshot;
427 	evsel->per_pkg = orig->per_pkg;
428 	evsel->percore = orig->percore;
429 	evsel->precise_max = orig->precise_max;
430 	evsel->is_libpfm_event = orig->is_libpfm_event;
431 
432 	evsel->exclude_GH = orig->exclude_GH;
433 	evsel->sample_read = orig->sample_read;
434 	evsel->auto_merge_stats = orig->auto_merge_stats;
435 	evsel->collect_stat = orig->collect_stat;
436 	evsel->weak_group = orig->weak_group;
437 	evsel->use_config_name = orig->use_config_name;
438 	evsel->pmu = orig->pmu;
439 
440 	if (evsel__copy_config_terms(evsel, orig) < 0)
441 		goto out_err;
442 
443 	return evsel;
444 
445 out_err:
446 	evsel__delete(evsel);
447 	return NULL;
448 }
449 
450 /*
451  * Returns pointer with encoded error via <linux/err.h> interface.
452  */
453 #ifdef HAVE_LIBTRACEEVENT
454 struct evsel *evsel__newtp_idx(const char *sys, const char *name, int idx)
455 {
456 	struct evsel *evsel = zalloc(perf_evsel__object.size);
457 	int err = -ENOMEM;
458 
459 	if (evsel == NULL) {
460 		goto out_err;
461 	} else {
462 		struct perf_event_attr attr = {
463 			.type	       = PERF_TYPE_TRACEPOINT,
464 			.sample_type   = (PERF_SAMPLE_RAW | PERF_SAMPLE_TIME |
465 					  PERF_SAMPLE_CPU | PERF_SAMPLE_PERIOD),
466 		};
467 
468 		if (asprintf(&evsel->name, "%s:%s", sys, name) < 0)
469 			goto out_free;
470 
471 		evsel->tp_format = trace_event__tp_format(sys, name);
472 		if (IS_ERR(evsel->tp_format)) {
473 			err = PTR_ERR(evsel->tp_format);
474 			goto out_free;
475 		}
476 
477 		event_attr_init(&attr);
478 		attr.config = evsel->tp_format->id;
479 		attr.sample_period = 1;
480 		evsel__init(evsel, &attr, idx);
481 	}
482 
483 	return evsel;
484 
485 out_free:
486 	zfree(&evsel->name);
487 	free(evsel);
488 out_err:
489 	return ERR_PTR(err);
490 }
491 #endif
492 
493 const char *const evsel__hw_names[PERF_COUNT_HW_MAX] = {
494 	"cycles",
495 	"instructions",
496 	"cache-references",
497 	"cache-misses",
498 	"branches",
499 	"branch-misses",
500 	"bus-cycles",
501 	"stalled-cycles-frontend",
502 	"stalled-cycles-backend",
503 	"ref-cycles",
504 };
505 
506 char *evsel__bpf_counter_events;
507 
508 bool evsel__match_bpf_counter_events(const char *name)
509 {
510 	int name_len;
511 	bool match;
512 	char *ptr;
513 
514 	if (!evsel__bpf_counter_events)
515 		return false;
516 
517 	ptr = strstr(evsel__bpf_counter_events, name);
518 	name_len = strlen(name);
519 
520 	/* check name matches a full token in evsel__bpf_counter_events */
521 	match = (ptr != NULL) &&
522 		((ptr == evsel__bpf_counter_events) || (*(ptr - 1) == ',')) &&
523 		((*(ptr + name_len) == ',') || (*(ptr + name_len) == '\0'));
524 
525 	return match;
526 }
527 
528 static const char *__evsel__hw_name(u64 config)
529 {
530 	if (config < PERF_COUNT_HW_MAX && evsel__hw_names[config])
531 		return evsel__hw_names[config];
532 
533 	return "unknown-hardware";
534 }
535 
536 static int evsel__add_modifiers(struct evsel *evsel, char *bf, size_t size)
537 {
538 	int colon = 0, r = 0;
539 	struct perf_event_attr *attr = &evsel->core.attr;
540 	bool exclude_guest_default = false;
541 
542 #define MOD_PRINT(context, mod)	do {					\
543 		if (!attr->exclude_##context) {				\
544 			if (!colon) colon = ++r;			\
545 			r += scnprintf(bf + r, size - r, "%c", mod);	\
546 		} } while(0)
547 
548 	if (attr->exclude_kernel || attr->exclude_user || attr->exclude_hv) {
549 		MOD_PRINT(kernel, 'k');
550 		MOD_PRINT(user, 'u');
551 		MOD_PRINT(hv, 'h');
552 		exclude_guest_default = true;
553 	}
554 
555 	if (attr->precise_ip) {
556 		if (!colon)
557 			colon = ++r;
558 		r += scnprintf(bf + r, size - r, "%.*s", attr->precise_ip, "ppp");
559 		exclude_guest_default = true;
560 	}
561 
562 	if (attr->exclude_host || attr->exclude_guest == exclude_guest_default) {
563 		MOD_PRINT(host, 'H');
564 		MOD_PRINT(guest, 'G');
565 	}
566 #undef MOD_PRINT
567 	if (colon)
568 		bf[colon - 1] = ':';
569 	return r;
570 }
571 
572 int __weak arch_evsel__hw_name(struct evsel *evsel, char *bf, size_t size)
573 {
574 	return scnprintf(bf, size, "%s", __evsel__hw_name(evsel->core.attr.config));
575 }
576 
577 static int evsel__hw_name(struct evsel *evsel, char *bf, size_t size)
578 {
579 	int r = arch_evsel__hw_name(evsel, bf, size);
580 	return r + evsel__add_modifiers(evsel, bf + r, size - r);
581 }
582 
583 const char *const evsel__sw_names[PERF_COUNT_SW_MAX] = {
584 	"cpu-clock",
585 	"task-clock",
586 	"page-faults",
587 	"context-switches",
588 	"cpu-migrations",
589 	"minor-faults",
590 	"major-faults",
591 	"alignment-faults",
592 	"emulation-faults",
593 	"dummy",
594 };
595 
596 static const char *__evsel__sw_name(u64 config)
597 {
598 	if (config < PERF_COUNT_SW_MAX && evsel__sw_names[config])
599 		return evsel__sw_names[config];
600 	return "unknown-software";
601 }
602 
603 static int evsel__sw_name(struct evsel *evsel, char *bf, size_t size)
604 {
605 	int r = scnprintf(bf, size, "%s", __evsel__sw_name(evsel->core.attr.config));
606 	return r + evsel__add_modifiers(evsel, bf + r, size - r);
607 }
608 
609 static int evsel__tool_name(enum perf_tool_event ev, char *bf, size_t size)
610 {
611 	return scnprintf(bf, size, "%s", perf_tool_event__to_str(ev));
612 }
613 
614 static int __evsel__bp_name(char *bf, size_t size, u64 addr, u64 type)
615 {
616 	int r;
617 
618 	r = scnprintf(bf, size, "mem:0x%" PRIx64 ":", addr);
619 
620 	if (type & HW_BREAKPOINT_R)
621 		r += scnprintf(bf + r, size - r, "r");
622 
623 	if (type & HW_BREAKPOINT_W)
624 		r += scnprintf(bf + r, size - r, "w");
625 
626 	if (type & HW_BREAKPOINT_X)
627 		r += scnprintf(bf + r, size - r, "x");
628 
629 	return r;
630 }
631 
632 static int evsel__bp_name(struct evsel *evsel, char *bf, size_t size)
633 {
634 	struct perf_event_attr *attr = &evsel->core.attr;
635 	int r = __evsel__bp_name(bf, size, attr->bp_addr, attr->bp_type);
636 	return r + evsel__add_modifiers(evsel, bf + r, size - r);
637 }
638 
639 const char *const evsel__hw_cache[PERF_COUNT_HW_CACHE_MAX][EVSEL__MAX_ALIASES] = {
640  { "L1-dcache",	"l1-d",		"l1d",		"L1-data",		},
641  { "L1-icache",	"l1-i",		"l1i",		"L1-instruction",	},
642  { "LLC",	"L2",							},
643  { "dTLB",	"d-tlb",	"Data-TLB",				},
644  { "iTLB",	"i-tlb",	"Instruction-TLB",			},
645  { "branch",	"branches",	"bpu",		"btb",		"bpc",	},
646  { "node",								},
647 };
648 
649 const char *const evsel__hw_cache_op[PERF_COUNT_HW_CACHE_OP_MAX][EVSEL__MAX_ALIASES] = {
650  { "load",	"loads",	"read",					},
651  { "store",	"stores",	"write",				},
652  { "prefetch",	"prefetches",	"speculative-read", "speculative-load",	},
653 };
654 
655 const char *const evsel__hw_cache_result[PERF_COUNT_HW_CACHE_RESULT_MAX][EVSEL__MAX_ALIASES] = {
656  { "refs",	"Reference",	"ops",		"access",		},
657  { "misses",	"miss",							},
658 };
659 
660 #define C(x)		PERF_COUNT_HW_CACHE_##x
661 #define CACHE_READ	(1 << C(OP_READ))
662 #define CACHE_WRITE	(1 << C(OP_WRITE))
663 #define CACHE_PREFETCH	(1 << C(OP_PREFETCH))
664 #define COP(x)		(1 << x)
665 
666 /*
667  * cache operation stat
668  * L1I : Read and prefetch only
669  * ITLB and BPU : Read-only
670  */
671 static const unsigned long evsel__hw_cache_stat[C(MAX)] = {
672  [C(L1D)]	= (CACHE_READ | CACHE_WRITE | CACHE_PREFETCH),
673  [C(L1I)]	= (CACHE_READ | CACHE_PREFETCH),
674  [C(LL)]	= (CACHE_READ | CACHE_WRITE | CACHE_PREFETCH),
675  [C(DTLB)]	= (CACHE_READ | CACHE_WRITE | CACHE_PREFETCH),
676  [C(ITLB)]	= (CACHE_READ),
677  [C(BPU)]	= (CACHE_READ),
678  [C(NODE)]	= (CACHE_READ | CACHE_WRITE | CACHE_PREFETCH),
679 };
680 
681 bool evsel__is_cache_op_valid(u8 type, u8 op)
682 {
683 	if (evsel__hw_cache_stat[type] & COP(op))
684 		return true;	/* valid */
685 	else
686 		return false;	/* invalid */
687 }
688 
689 int __evsel__hw_cache_type_op_res_name(u8 type, u8 op, u8 result, char *bf, size_t size)
690 {
691 	if (result) {
692 		return scnprintf(bf, size, "%s-%s-%s", evsel__hw_cache[type][0],
693 				 evsel__hw_cache_op[op][0],
694 				 evsel__hw_cache_result[result][0]);
695 	}
696 
697 	return scnprintf(bf, size, "%s-%s", evsel__hw_cache[type][0],
698 			 evsel__hw_cache_op[op][1]);
699 }
700 
701 static int __evsel__hw_cache_name(u64 config, char *bf, size_t size)
702 {
703 	u8 op, result, type = (config >>  0) & 0xff;
704 	const char *err = "unknown-ext-hardware-cache-type";
705 
706 	if (type >= PERF_COUNT_HW_CACHE_MAX)
707 		goto out_err;
708 
709 	op = (config >>  8) & 0xff;
710 	err = "unknown-ext-hardware-cache-op";
711 	if (op >= PERF_COUNT_HW_CACHE_OP_MAX)
712 		goto out_err;
713 
714 	result = (config >> 16) & 0xff;
715 	err = "unknown-ext-hardware-cache-result";
716 	if (result >= PERF_COUNT_HW_CACHE_RESULT_MAX)
717 		goto out_err;
718 
719 	err = "invalid-cache";
720 	if (!evsel__is_cache_op_valid(type, op))
721 		goto out_err;
722 
723 	return __evsel__hw_cache_type_op_res_name(type, op, result, bf, size);
724 out_err:
725 	return scnprintf(bf, size, "%s", err);
726 }
727 
728 static int evsel__hw_cache_name(struct evsel *evsel, char *bf, size_t size)
729 {
730 	int ret = __evsel__hw_cache_name(evsel->core.attr.config, bf, size);
731 	return ret + evsel__add_modifiers(evsel, bf + ret, size - ret);
732 }
733 
734 static int evsel__raw_name(struct evsel *evsel, char *bf, size_t size)
735 {
736 	int ret = scnprintf(bf, size, "raw 0x%" PRIx64, evsel->core.attr.config);
737 	return ret + evsel__add_modifiers(evsel, bf + ret, size - ret);
738 }
739 
740 const char *evsel__name(struct evsel *evsel)
741 {
742 	char bf[128];
743 
744 	if (!evsel)
745 		goto out_unknown;
746 
747 	if (evsel->name)
748 		return evsel->name;
749 
750 	switch (evsel->core.attr.type) {
751 	case PERF_TYPE_RAW:
752 		evsel__raw_name(evsel, bf, sizeof(bf));
753 		break;
754 
755 	case PERF_TYPE_HARDWARE:
756 		evsel__hw_name(evsel, bf, sizeof(bf));
757 		break;
758 
759 	case PERF_TYPE_HW_CACHE:
760 		evsel__hw_cache_name(evsel, bf, sizeof(bf));
761 		break;
762 
763 	case PERF_TYPE_SOFTWARE:
764 		if (evsel__is_tool(evsel))
765 			evsel__tool_name(evsel->tool_event, bf, sizeof(bf));
766 		else
767 			evsel__sw_name(evsel, bf, sizeof(bf));
768 		break;
769 
770 	case PERF_TYPE_TRACEPOINT:
771 		scnprintf(bf, sizeof(bf), "%s", "unknown tracepoint");
772 		break;
773 
774 	case PERF_TYPE_BREAKPOINT:
775 		evsel__bp_name(evsel, bf, sizeof(bf));
776 		break;
777 
778 	default:
779 		scnprintf(bf, sizeof(bf), "unknown attr type: %d",
780 			  evsel->core.attr.type);
781 		break;
782 	}
783 
784 	evsel->name = strdup(bf);
785 
786 	if (evsel->name)
787 		return evsel->name;
788 out_unknown:
789 	return "unknown";
790 }
791 
792 bool evsel__name_is(struct evsel *evsel, const char *name)
793 {
794 	return !strcmp(evsel__name(evsel), name);
795 }
796 
797 const char *evsel__metric_id(const struct evsel *evsel)
798 {
799 	if (evsel->metric_id)
800 		return evsel->metric_id;
801 
802 	if (evsel__is_tool(evsel))
803 		return perf_tool_event__to_str(evsel->tool_event);
804 
805 	return "unknown";
806 }
807 
808 const char *evsel__group_name(struct evsel *evsel)
809 {
810 	return evsel->group_name ?: "anon group";
811 }
812 
813 /*
814  * Returns the group details for the specified leader,
815  * with following rules.
816  *
817  *  For record -e '{cycles,instructions}'
818  *    'anon group { cycles:u, instructions:u }'
819  *
820  *  For record -e 'cycles,instructions' and report --group
821  *    'cycles:u, instructions:u'
822  */
823 int evsel__group_desc(struct evsel *evsel, char *buf, size_t size)
824 {
825 	int ret = 0;
826 	struct evsel *pos;
827 	const char *group_name = evsel__group_name(evsel);
828 
829 	if (!evsel->forced_leader)
830 		ret = scnprintf(buf, size, "%s { ", group_name);
831 
832 	ret += scnprintf(buf + ret, size - ret, "%s", evsel__name(evsel));
833 
834 	for_each_group_member(pos, evsel)
835 		ret += scnprintf(buf + ret, size - ret, ", %s", evsel__name(pos));
836 
837 	if (!evsel->forced_leader)
838 		ret += scnprintf(buf + ret, size - ret, " }");
839 
840 	return ret;
841 }
842 
843 static void __evsel__config_callchain(struct evsel *evsel, struct record_opts *opts,
844 				      struct callchain_param *param)
845 {
846 	bool function = evsel__is_function_event(evsel);
847 	struct perf_event_attr *attr = &evsel->core.attr;
848 
849 	evsel__set_sample_bit(evsel, CALLCHAIN);
850 
851 	attr->sample_max_stack = param->max_stack;
852 
853 	if (opts->kernel_callchains)
854 		attr->exclude_callchain_user = 1;
855 	if (opts->user_callchains)
856 		attr->exclude_callchain_kernel = 1;
857 	if (param->record_mode == CALLCHAIN_LBR) {
858 		if (!opts->branch_stack) {
859 			if (attr->exclude_user) {
860 				pr_warning("LBR callstack option is only available "
861 					   "to get user callchain information. "
862 					   "Falling back to framepointers.\n");
863 			} else {
864 				evsel__set_sample_bit(evsel, BRANCH_STACK);
865 				attr->branch_sample_type = PERF_SAMPLE_BRANCH_USER |
866 							PERF_SAMPLE_BRANCH_CALL_STACK |
867 							PERF_SAMPLE_BRANCH_NO_CYCLES |
868 							PERF_SAMPLE_BRANCH_NO_FLAGS |
869 							PERF_SAMPLE_BRANCH_HW_INDEX;
870 			}
871 		} else
872 			 pr_warning("Cannot use LBR callstack with branch stack. "
873 				    "Falling back to framepointers.\n");
874 	}
875 
876 	if (param->record_mode == CALLCHAIN_DWARF) {
877 		if (!function) {
878 			evsel__set_sample_bit(evsel, REGS_USER);
879 			evsel__set_sample_bit(evsel, STACK_USER);
880 			if (opts->sample_user_regs && DWARF_MINIMAL_REGS != PERF_REGS_MASK) {
881 				attr->sample_regs_user |= DWARF_MINIMAL_REGS;
882 				pr_warning("WARNING: The use of --call-graph=dwarf may require all the user registers, "
883 					   "specifying a subset with --user-regs may render DWARF unwinding unreliable, "
884 					   "so the minimal registers set (IP, SP) is explicitly forced.\n");
885 			} else {
886 				attr->sample_regs_user |= arch__user_reg_mask();
887 			}
888 			attr->sample_stack_user = param->dump_size;
889 			attr->exclude_callchain_user = 1;
890 		} else {
891 			pr_info("Cannot use DWARF unwind for function trace event,"
892 				" falling back to framepointers.\n");
893 		}
894 	}
895 
896 	if (function) {
897 		pr_info("Disabling user space callchains for function trace event.\n");
898 		attr->exclude_callchain_user = 1;
899 	}
900 }
901 
902 void evsel__config_callchain(struct evsel *evsel, struct record_opts *opts,
903 			     struct callchain_param *param)
904 {
905 	if (param->enabled)
906 		return __evsel__config_callchain(evsel, opts, param);
907 }
908 
909 static void evsel__reset_callgraph(struct evsel *evsel, struct callchain_param *param)
910 {
911 	struct perf_event_attr *attr = &evsel->core.attr;
912 
913 	evsel__reset_sample_bit(evsel, CALLCHAIN);
914 	if (param->record_mode == CALLCHAIN_LBR) {
915 		evsel__reset_sample_bit(evsel, BRANCH_STACK);
916 		attr->branch_sample_type &= ~(PERF_SAMPLE_BRANCH_USER |
917 					      PERF_SAMPLE_BRANCH_CALL_STACK |
918 					      PERF_SAMPLE_BRANCH_HW_INDEX);
919 	}
920 	if (param->record_mode == CALLCHAIN_DWARF) {
921 		evsel__reset_sample_bit(evsel, REGS_USER);
922 		evsel__reset_sample_bit(evsel, STACK_USER);
923 	}
924 }
925 
926 static void evsel__apply_config_terms(struct evsel *evsel,
927 				      struct record_opts *opts, bool track)
928 {
929 	struct evsel_config_term *term;
930 	struct list_head *config_terms = &evsel->config_terms;
931 	struct perf_event_attr *attr = &evsel->core.attr;
932 	/* callgraph default */
933 	struct callchain_param param = {
934 		.record_mode = callchain_param.record_mode,
935 	};
936 	u32 dump_size = 0;
937 	int max_stack = 0;
938 	const char *callgraph_buf = NULL;
939 
940 	list_for_each_entry(term, config_terms, list) {
941 		switch (term->type) {
942 		case EVSEL__CONFIG_TERM_PERIOD:
943 			if (!(term->weak && opts->user_interval != ULLONG_MAX)) {
944 				attr->sample_period = term->val.period;
945 				attr->freq = 0;
946 				evsel__reset_sample_bit(evsel, PERIOD);
947 			}
948 			break;
949 		case EVSEL__CONFIG_TERM_FREQ:
950 			if (!(term->weak && opts->user_freq != UINT_MAX)) {
951 				attr->sample_freq = term->val.freq;
952 				attr->freq = 1;
953 				evsel__set_sample_bit(evsel, PERIOD);
954 			}
955 			break;
956 		case EVSEL__CONFIG_TERM_TIME:
957 			if (term->val.time)
958 				evsel__set_sample_bit(evsel, TIME);
959 			else
960 				evsel__reset_sample_bit(evsel, TIME);
961 			break;
962 		case EVSEL__CONFIG_TERM_CALLGRAPH:
963 			callgraph_buf = term->val.str;
964 			break;
965 		case EVSEL__CONFIG_TERM_BRANCH:
966 			if (term->val.str && strcmp(term->val.str, "no")) {
967 				evsel__set_sample_bit(evsel, BRANCH_STACK);
968 				parse_branch_str(term->val.str,
969 						 &attr->branch_sample_type);
970 			} else
971 				evsel__reset_sample_bit(evsel, BRANCH_STACK);
972 			break;
973 		case EVSEL__CONFIG_TERM_STACK_USER:
974 			dump_size = term->val.stack_user;
975 			break;
976 		case EVSEL__CONFIG_TERM_MAX_STACK:
977 			max_stack = term->val.max_stack;
978 			break;
979 		case EVSEL__CONFIG_TERM_MAX_EVENTS:
980 			evsel->max_events = term->val.max_events;
981 			break;
982 		case EVSEL__CONFIG_TERM_INHERIT:
983 			/*
984 			 * attr->inherit should has already been set by
985 			 * evsel__config. If user explicitly set
986 			 * inherit using config terms, override global
987 			 * opt->no_inherit setting.
988 			 */
989 			attr->inherit = term->val.inherit ? 1 : 0;
990 			break;
991 		case EVSEL__CONFIG_TERM_OVERWRITE:
992 			attr->write_backward = term->val.overwrite ? 1 : 0;
993 			break;
994 		case EVSEL__CONFIG_TERM_DRV_CFG:
995 			break;
996 		case EVSEL__CONFIG_TERM_PERCORE:
997 			break;
998 		case EVSEL__CONFIG_TERM_AUX_OUTPUT:
999 			attr->aux_output = term->val.aux_output ? 1 : 0;
1000 			break;
1001 		case EVSEL__CONFIG_TERM_AUX_SAMPLE_SIZE:
1002 			/* Already applied by auxtrace */
1003 			break;
1004 		case EVSEL__CONFIG_TERM_CFG_CHG:
1005 			break;
1006 		default:
1007 			break;
1008 		}
1009 	}
1010 
1011 	/* User explicitly set per-event callgraph, clear the old setting and reset. */
1012 	if ((callgraph_buf != NULL) || (dump_size > 0) || max_stack) {
1013 		bool sample_address = false;
1014 
1015 		if (max_stack) {
1016 			param.max_stack = max_stack;
1017 			if (callgraph_buf == NULL)
1018 				callgraph_buf = "fp";
1019 		}
1020 
1021 		/* parse callgraph parameters */
1022 		if (callgraph_buf != NULL) {
1023 			if (!strcmp(callgraph_buf, "no")) {
1024 				param.enabled = false;
1025 				param.record_mode = CALLCHAIN_NONE;
1026 			} else {
1027 				param.enabled = true;
1028 				if (parse_callchain_record(callgraph_buf, &param)) {
1029 					pr_err("per-event callgraph setting for %s failed. "
1030 					       "Apply callgraph global setting for it\n",
1031 					       evsel->name);
1032 					return;
1033 				}
1034 				if (param.record_mode == CALLCHAIN_DWARF)
1035 					sample_address = true;
1036 			}
1037 		}
1038 		if (dump_size > 0) {
1039 			dump_size = round_up(dump_size, sizeof(u64));
1040 			param.dump_size = dump_size;
1041 		}
1042 
1043 		/* If global callgraph set, clear it */
1044 		if (callchain_param.enabled)
1045 			evsel__reset_callgraph(evsel, &callchain_param);
1046 
1047 		/* set perf-event callgraph */
1048 		if (param.enabled) {
1049 			if (sample_address) {
1050 				evsel__set_sample_bit(evsel, ADDR);
1051 				evsel__set_sample_bit(evsel, DATA_SRC);
1052 				evsel->core.attr.mmap_data = track;
1053 			}
1054 			evsel__config_callchain(evsel, opts, &param);
1055 		}
1056 	}
1057 }
1058 
1059 struct evsel_config_term *__evsel__get_config_term(struct evsel *evsel, enum evsel_term_type type)
1060 {
1061 	struct evsel_config_term *term, *found_term = NULL;
1062 
1063 	list_for_each_entry(term, &evsel->config_terms, list) {
1064 		if (term->type == type)
1065 			found_term = term;
1066 	}
1067 
1068 	return found_term;
1069 }
1070 
1071 void __weak arch_evsel__set_sample_weight(struct evsel *evsel)
1072 {
1073 	evsel__set_sample_bit(evsel, WEIGHT);
1074 }
1075 
1076 void __weak arch__post_evsel_config(struct evsel *evsel __maybe_unused,
1077 				    struct perf_event_attr *attr __maybe_unused)
1078 {
1079 }
1080 
1081 static void evsel__set_default_freq_period(struct record_opts *opts,
1082 					   struct perf_event_attr *attr)
1083 {
1084 	if (opts->freq) {
1085 		attr->freq = 1;
1086 		attr->sample_freq = opts->freq;
1087 	} else {
1088 		attr->sample_period = opts->default_interval;
1089 	}
1090 }
1091 
1092 static bool evsel__is_offcpu_event(struct evsel *evsel)
1093 {
1094 	return evsel__is_bpf_output(evsel) && evsel__name_is(evsel, OFFCPU_EVENT);
1095 }
1096 
1097 /*
1098  * The enable_on_exec/disabled value strategy:
1099  *
1100  *  1) For any type of traced program:
1101  *    - all independent events and group leaders are disabled
1102  *    - all group members are enabled
1103  *
1104  *     Group members are ruled by group leaders. They need to
1105  *     be enabled, because the group scheduling relies on that.
1106  *
1107  *  2) For traced programs executed by perf:
1108  *     - all independent events and group leaders have
1109  *       enable_on_exec set
1110  *     - we don't specifically enable or disable any event during
1111  *       the record command
1112  *
1113  *     Independent events and group leaders are initially disabled
1114  *     and get enabled by exec. Group members are ruled by group
1115  *     leaders as stated in 1).
1116  *
1117  *  3) For traced programs attached by perf (pid/tid):
1118  *     - we specifically enable or disable all events during
1119  *       the record command
1120  *
1121  *     When attaching events to already running traced we
1122  *     enable/disable events specifically, as there's no
1123  *     initial traced exec call.
1124  */
1125 void evsel__config(struct evsel *evsel, struct record_opts *opts,
1126 		   struct callchain_param *callchain)
1127 {
1128 	struct evsel *leader = evsel__leader(evsel);
1129 	struct perf_event_attr *attr = &evsel->core.attr;
1130 	int track = evsel->tracking;
1131 	bool per_cpu = opts->target.default_per_cpu && !opts->target.per_thread;
1132 
1133 	attr->sample_id_all = perf_missing_features.sample_id_all ? 0 : 1;
1134 	attr->inherit	    = !opts->no_inherit;
1135 	attr->write_backward = opts->overwrite ? 1 : 0;
1136 	attr->read_format   = PERF_FORMAT_LOST;
1137 
1138 	evsel__set_sample_bit(evsel, IP);
1139 	evsel__set_sample_bit(evsel, TID);
1140 
1141 	if (evsel->sample_read) {
1142 		evsel__set_sample_bit(evsel, READ);
1143 
1144 		/*
1145 		 * We need ID even in case of single event, because
1146 		 * PERF_SAMPLE_READ process ID specific data.
1147 		 */
1148 		evsel__set_sample_id(evsel, false);
1149 
1150 		/*
1151 		 * Apply group format only if we belong to group
1152 		 * with more than one members.
1153 		 */
1154 		if (leader->core.nr_members > 1) {
1155 			attr->read_format |= PERF_FORMAT_GROUP;
1156 			attr->inherit = 0;
1157 		}
1158 	}
1159 
1160 	/*
1161 	 * We default some events to have a default interval. But keep
1162 	 * it a weak assumption overridable by the user.
1163 	 */
1164 	if ((evsel->is_libpfm_event && !attr->sample_period) ||
1165 	    (!evsel->is_libpfm_event && (!attr->sample_period ||
1166 					 opts->user_freq != UINT_MAX ||
1167 					 opts->user_interval != ULLONG_MAX)))
1168 		evsel__set_default_freq_period(opts, attr);
1169 
1170 	/*
1171 	 * If attr->freq was set (here or earlier), ask for period
1172 	 * to be sampled.
1173 	 */
1174 	if (attr->freq)
1175 		evsel__set_sample_bit(evsel, PERIOD);
1176 
1177 	if (opts->no_samples)
1178 		attr->sample_freq = 0;
1179 
1180 	if (opts->inherit_stat) {
1181 		evsel->core.attr.read_format |=
1182 			PERF_FORMAT_TOTAL_TIME_ENABLED |
1183 			PERF_FORMAT_TOTAL_TIME_RUNNING |
1184 			PERF_FORMAT_ID;
1185 		attr->inherit_stat = 1;
1186 	}
1187 
1188 	if (opts->sample_address) {
1189 		evsel__set_sample_bit(evsel, ADDR);
1190 		attr->mmap_data = track;
1191 	}
1192 
1193 	/*
1194 	 * We don't allow user space callchains for  function trace
1195 	 * event, due to issues with page faults while tracing page
1196 	 * fault handler and its overall trickiness nature.
1197 	 */
1198 	if (evsel__is_function_event(evsel))
1199 		evsel->core.attr.exclude_callchain_user = 1;
1200 
1201 	if (callchain && callchain->enabled && !evsel->no_aux_samples)
1202 		evsel__config_callchain(evsel, opts, callchain);
1203 
1204 	if (opts->sample_intr_regs && !evsel->no_aux_samples &&
1205 	    !evsel__is_dummy_event(evsel)) {
1206 		attr->sample_regs_intr = opts->sample_intr_regs;
1207 		evsel__set_sample_bit(evsel, REGS_INTR);
1208 	}
1209 
1210 	if (opts->sample_user_regs && !evsel->no_aux_samples &&
1211 	    !evsel__is_dummy_event(evsel)) {
1212 		attr->sample_regs_user |= opts->sample_user_regs;
1213 		evsel__set_sample_bit(evsel, REGS_USER);
1214 	}
1215 
1216 	if (target__has_cpu(&opts->target) || opts->sample_cpu)
1217 		evsel__set_sample_bit(evsel, CPU);
1218 
1219 	/*
1220 	 * When the user explicitly disabled time don't force it here.
1221 	 */
1222 	if (opts->sample_time &&
1223 	    (!perf_missing_features.sample_id_all &&
1224 	    (!opts->no_inherit || target__has_cpu(&opts->target) || per_cpu ||
1225 	     opts->sample_time_set)))
1226 		evsel__set_sample_bit(evsel, TIME);
1227 
1228 	if (opts->raw_samples && !evsel->no_aux_samples) {
1229 		evsel__set_sample_bit(evsel, TIME);
1230 		evsel__set_sample_bit(evsel, RAW);
1231 		evsel__set_sample_bit(evsel, CPU);
1232 	}
1233 
1234 	if (opts->sample_address)
1235 		evsel__set_sample_bit(evsel, DATA_SRC);
1236 
1237 	if (opts->sample_phys_addr)
1238 		evsel__set_sample_bit(evsel, PHYS_ADDR);
1239 
1240 	if (opts->no_buffering) {
1241 		attr->watermark = 0;
1242 		attr->wakeup_events = 1;
1243 	}
1244 	if (opts->branch_stack && !evsel->no_aux_samples) {
1245 		evsel__set_sample_bit(evsel, BRANCH_STACK);
1246 		attr->branch_sample_type = opts->branch_stack;
1247 	}
1248 
1249 	if (opts->sample_weight)
1250 		arch_evsel__set_sample_weight(evsel);
1251 
1252 	attr->task     = track;
1253 	attr->mmap     = track;
1254 	attr->mmap2    = track && !perf_missing_features.mmap2;
1255 	attr->comm     = track;
1256 	attr->build_id = track && opts->build_id;
1257 
1258 	/*
1259 	 * ksymbol is tracked separately with text poke because it needs to be
1260 	 * system wide and enabled immediately.
1261 	 */
1262 	if (!opts->text_poke)
1263 		attr->ksymbol = track && !perf_missing_features.ksymbol;
1264 	attr->bpf_event = track && !opts->no_bpf_event && !perf_missing_features.bpf;
1265 
1266 	if (opts->record_namespaces)
1267 		attr->namespaces  = track;
1268 
1269 	if (opts->record_cgroup) {
1270 		attr->cgroup = track && !perf_missing_features.cgroup;
1271 		evsel__set_sample_bit(evsel, CGROUP);
1272 	}
1273 
1274 	if (opts->sample_data_page_size)
1275 		evsel__set_sample_bit(evsel, DATA_PAGE_SIZE);
1276 
1277 	if (opts->sample_code_page_size)
1278 		evsel__set_sample_bit(evsel, CODE_PAGE_SIZE);
1279 
1280 	if (opts->record_switch_events)
1281 		attr->context_switch = track;
1282 
1283 	if (opts->sample_transaction)
1284 		evsel__set_sample_bit(evsel, TRANSACTION);
1285 
1286 	if (opts->running_time) {
1287 		evsel->core.attr.read_format |=
1288 			PERF_FORMAT_TOTAL_TIME_ENABLED |
1289 			PERF_FORMAT_TOTAL_TIME_RUNNING;
1290 	}
1291 
1292 	/*
1293 	 * XXX see the function comment above
1294 	 *
1295 	 * Disabling only independent events or group leaders,
1296 	 * keeping group members enabled.
1297 	 */
1298 	if (evsel__is_group_leader(evsel))
1299 		attr->disabled = 1;
1300 
1301 	/*
1302 	 * Setting enable_on_exec for independent events and
1303 	 * group leaders for traced executed by perf.
1304 	 */
1305 	if (target__none(&opts->target) && evsel__is_group_leader(evsel) &&
1306 	    !opts->target.initial_delay)
1307 		attr->enable_on_exec = 1;
1308 
1309 	if (evsel->immediate) {
1310 		attr->disabled = 0;
1311 		attr->enable_on_exec = 0;
1312 	}
1313 
1314 	clockid = opts->clockid;
1315 	if (opts->use_clockid) {
1316 		attr->use_clockid = 1;
1317 		attr->clockid = opts->clockid;
1318 	}
1319 
1320 	if (evsel->precise_max)
1321 		attr->precise_ip = 3;
1322 
1323 	if (opts->all_user) {
1324 		attr->exclude_kernel = 1;
1325 		attr->exclude_user   = 0;
1326 	}
1327 
1328 	if (opts->all_kernel) {
1329 		attr->exclude_kernel = 0;
1330 		attr->exclude_user   = 1;
1331 	}
1332 
1333 	if (evsel->core.own_cpus || evsel->unit)
1334 		evsel->core.attr.read_format |= PERF_FORMAT_ID;
1335 
1336 	/*
1337 	 * Apply event specific term settings,
1338 	 * it overloads any global configuration.
1339 	 */
1340 	evsel__apply_config_terms(evsel, opts, track);
1341 
1342 	evsel->ignore_missing_thread = opts->ignore_missing_thread;
1343 
1344 	/* The --period option takes the precedence. */
1345 	if (opts->period_set) {
1346 		if (opts->period)
1347 			evsel__set_sample_bit(evsel, PERIOD);
1348 		else
1349 			evsel__reset_sample_bit(evsel, PERIOD);
1350 	}
1351 
1352 	/*
1353 	 * A dummy event never triggers any actual counter and therefore
1354 	 * cannot be used with branch_stack.
1355 	 *
1356 	 * For initial_delay, a dummy event is added implicitly.
1357 	 * The software event will trigger -EOPNOTSUPP error out,
1358 	 * if BRANCH_STACK bit is set.
1359 	 */
1360 	if (evsel__is_dummy_event(evsel))
1361 		evsel__reset_sample_bit(evsel, BRANCH_STACK);
1362 
1363 	if (evsel__is_offcpu_event(evsel))
1364 		evsel->core.attr.sample_type &= OFFCPU_SAMPLE_TYPES;
1365 
1366 	arch__post_evsel_config(evsel, attr);
1367 }
1368 
1369 int evsel__set_filter(struct evsel *evsel, const char *filter)
1370 {
1371 	char *new_filter = strdup(filter);
1372 
1373 	if (new_filter != NULL) {
1374 		free(evsel->filter);
1375 		evsel->filter = new_filter;
1376 		return 0;
1377 	}
1378 
1379 	return -1;
1380 }
1381 
1382 static int evsel__append_filter(struct evsel *evsel, const char *fmt, const char *filter)
1383 {
1384 	char *new_filter;
1385 
1386 	if (evsel->filter == NULL)
1387 		return evsel__set_filter(evsel, filter);
1388 
1389 	if (asprintf(&new_filter, fmt, evsel->filter, filter) > 0) {
1390 		free(evsel->filter);
1391 		evsel->filter = new_filter;
1392 		return 0;
1393 	}
1394 
1395 	return -1;
1396 }
1397 
1398 int evsel__append_tp_filter(struct evsel *evsel, const char *filter)
1399 {
1400 	return evsel__append_filter(evsel, "(%s) && (%s)", filter);
1401 }
1402 
1403 int evsel__append_addr_filter(struct evsel *evsel, const char *filter)
1404 {
1405 	return evsel__append_filter(evsel, "%s,%s", filter);
1406 }
1407 
1408 /* Caller has to clear disabled after going through all CPUs. */
1409 int evsel__enable_cpu(struct evsel *evsel, int cpu_map_idx)
1410 {
1411 	return perf_evsel__enable_cpu(&evsel->core, cpu_map_idx);
1412 }
1413 
1414 int evsel__enable(struct evsel *evsel)
1415 {
1416 	int err = perf_evsel__enable(&evsel->core);
1417 
1418 	if (!err)
1419 		evsel->disabled = false;
1420 	return err;
1421 }
1422 
1423 /* Caller has to set disabled after going through all CPUs. */
1424 int evsel__disable_cpu(struct evsel *evsel, int cpu_map_idx)
1425 {
1426 	return perf_evsel__disable_cpu(&evsel->core, cpu_map_idx);
1427 }
1428 
1429 int evsel__disable(struct evsel *evsel)
1430 {
1431 	int err = perf_evsel__disable(&evsel->core);
1432 	/*
1433 	 * We mark it disabled here so that tools that disable a event can
1434 	 * ignore events after they disable it. I.e. the ring buffer may have
1435 	 * already a few more events queued up before the kernel got the stop
1436 	 * request.
1437 	 */
1438 	if (!err)
1439 		evsel->disabled = true;
1440 
1441 	return err;
1442 }
1443 
1444 void free_config_terms(struct list_head *config_terms)
1445 {
1446 	struct evsel_config_term *term, *h;
1447 
1448 	list_for_each_entry_safe(term, h, config_terms, list) {
1449 		list_del_init(&term->list);
1450 		if (term->free_str)
1451 			zfree(&term->val.str);
1452 		free(term);
1453 	}
1454 }
1455 
1456 static void evsel__free_config_terms(struct evsel *evsel)
1457 {
1458 	free_config_terms(&evsel->config_terms);
1459 }
1460 
1461 void evsel__exit(struct evsel *evsel)
1462 {
1463 	assert(list_empty(&evsel->core.node));
1464 	assert(evsel->evlist == NULL);
1465 	bpf_counter__destroy(evsel);
1466 	perf_bpf_filter__destroy(evsel);
1467 	evsel__free_counts(evsel);
1468 	perf_evsel__free_fd(&evsel->core);
1469 	perf_evsel__free_id(&evsel->core);
1470 	evsel__free_config_terms(evsel);
1471 	cgroup__put(evsel->cgrp);
1472 	perf_cpu_map__put(evsel->core.cpus);
1473 	perf_cpu_map__put(evsel->core.own_cpus);
1474 	perf_thread_map__put(evsel->core.threads);
1475 	zfree(&evsel->group_name);
1476 	zfree(&evsel->name);
1477 	zfree(&evsel->pmu_name);
1478 	zfree(&evsel->group_pmu_name);
1479 	zfree(&evsel->unit);
1480 	zfree(&evsel->metric_id);
1481 	evsel__zero_per_pkg(evsel);
1482 	hashmap__free(evsel->per_pkg_mask);
1483 	evsel->per_pkg_mask = NULL;
1484 	zfree(&evsel->metric_events);
1485 	perf_evsel__object.fini(evsel);
1486 }
1487 
1488 void evsel__delete(struct evsel *evsel)
1489 {
1490 	if (!evsel)
1491 		return;
1492 
1493 	evsel__exit(evsel);
1494 	free(evsel);
1495 }
1496 
1497 void evsel__compute_deltas(struct evsel *evsel, int cpu_map_idx, int thread,
1498 			   struct perf_counts_values *count)
1499 {
1500 	struct perf_counts_values tmp;
1501 
1502 	if (!evsel->prev_raw_counts)
1503 		return;
1504 
1505 	tmp = *perf_counts(evsel->prev_raw_counts, cpu_map_idx, thread);
1506 	*perf_counts(evsel->prev_raw_counts, cpu_map_idx, thread) = *count;
1507 
1508 	count->val = count->val - tmp.val;
1509 	count->ena = count->ena - tmp.ena;
1510 	count->run = count->run - tmp.run;
1511 }
1512 
1513 static int evsel__read_one(struct evsel *evsel, int cpu_map_idx, int thread)
1514 {
1515 	struct perf_counts_values *count = perf_counts(evsel->counts, cpu_map_idx, thread);
1516 
1517 	return perf_evsel__read(&evsel->core, cpu_map_idx, thread, count);
1518 }
1519 
1520 static void evsel__set_count(struct evsel *counter, int cpu_map_idx, int thread,
1521 			     u64 val, u64 ena, u64 run, u64 lost)
1522 {
1523 	struct perf_counts_values *count;
1524 
1525 	count = perf_counts(counter->counts, cpu_map_idx, thread);
1526 
1527 	count->val    = val;
1528 	count->ena    = ena;
1529 	count->run    = run;
1530 	count->lost   = lost;
1531 
1532 	perf_counts__set_loaded(counter->counts, cpu_map_idx, thread, true);
1533 }
1534 
1535 static int evsel__process_group_data(struct evsel *leader, int cpu_map_idx, int thread, u64 *data)
1536 {
1537 	u64 read_format = leader->core.attr.read_format;
1538 	struct sample_read_value *v;
1539 	u64 nr, ena = 0, run = 0, lost = 0;
1540 
1541 	nr = *data++;
1542 
1543 	if (nr != (u64) leader->core.nr_members)
1544 		return -EINVAL;
1545 
1546 	if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
1547 		ena = *data++;
1548 
1549 	if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
1550 		run = *data++;
1551 
1552 	v = (void *)data;
1553 	sample_read_group__for_each(v, nr, read_format) {
1554 		struct evsel *counter;
1555 
1556 		counter = evlist__id2evsel(leader->evlist, v->id);
1557 		if (!counter)
1558 			return -EINVAL;
1559 
1560 		if (read_format & PERF_FORMAT_LOST)
1561 			lost = v->lost;
1562 
1563 		evsel__set_count(counter, cpu_map_idx, thread, v->value, ena, run, lost);
1564 	}
1565 
1566 	return 0;
1567 }
1568 
1569 static int evsel__read_group(struct evsel *leader, int cpu_map_idx, int thread)
1570 {
1571 	struct perf_stat_evsel *ps = leader->stats;
1572 	u64 read_format = leader->core.attr.read_format;
1573 	int size = perf_evsel__read_size(&leader->core);
1574 	u64 *data = ps->group_data;
1575 
1576 	if (!(read_format & PERF_FORMAT_ID))
1577 		return -EINVAL;
1578 
1579 	if (!evsel__is_group_leader(leader))
1580 		return -EINVAL;
1581 
1582 	if (!data) {
1583 		data = zalloc(size);
1584 		if (!data)
1585 			return -ENOMEM;
1586 
1587 		ps->group_data = data;
1588 	}
1589 
1590 	if (FD(leader, cpu_map_idx, thread) < 0)
1591 		return -EINVAL;
1592 
1593 	if (readn(FD(leader, cpu_map_idx, thread), data, size) <= 0)
1594 		return -errno;
1595 
1596 	return evsel__process_group_data(leader, cpu_map_idx, thread, data);
1597 }
1598 
1599 int evsel__read_counter(struct evsel *evsel, int cpu_map_idx, int thread)
1600 {
1601 	u64 read_format = evsel->core.attr.read_format;
1602 
1603 	if (read_format & PERF_FORMAT_GROUP)
1604 		return evsel__read_group(evsel, cpu_map_idx, thread);
1605 
1606 	return evsel__read_one(evsel, cpu_map_idx, thread);
1607 }
1608 
1609 int __evsel__read_on_cpu(struct evsel *evsel, int cpu_map_idx, int thread, bool scale)
1610 {
1611 	struct perf_counts_values count;
1612 	size_t nv = scale ? 3 : 1;
1613 
1614 	if (FD(evsel, cpu_map_idx, thread) < 0)
1615 		return -EINVAL;
1616 
1617 	if (evsel->counts == NULL && evsel__alloc_counts(evsel) < 0)
1618 		return -ENOMEM;
1619 
1620 	if (readn(FD(evsel, cpu_map_idx, thread), &count, nv * sizeof(u64)) <= 0)
1621 		return -errno;
1622 
1623 	evsel__compute_deltas(evsel, cpu_map_idx, thread, &count);
1624 	perf_counts_values__scale(&count, scale, NULL);
1625 	*perf_counts(evsel->counts, cpu_map_idx, thread) = count;
1626 	return 0;
1627 }
1628 
1629 static int evsel__match_other_cpu(struct evsel *evsel, struct evsel *other,
1630 				  int cpu_map_idx)
1631 {
1632 	struct perf_cpu cpu;
1633 
1634 	cpu = perf_cpu_map__cpu(evsel->core.cpus, cpu_map_idx);
1635 	return perf_cpu_map__idx(other->core.cpus, cpu);
1636 }
1637 
1638 static int evsel__hybrid_group_cpu_map_idx(struct evsel *evsel, int cpu_map_idx)
1639 {
1640 	struct evsel *leader = evsel__leader(evsel);
1641 
1642 	if ((evsel__is_hybrid(evsel) && !evsel__is_hybrid(leader)) ||
1643 	    (!evsel__is_hybrid(evsel) && evsel__is_hybrid(leader))) {
1644 		return evsel__match_other_cpu(evsel, leader, cpu_map_idx);
1645 	}
1646 
1647 	return cpu_map_idx;
1648 }
1649 
1650 static int get_group_fd(struct evsel *evsel, int cpu_map_idx, int thread)
1651 {
1652 	struct evsel *leader = evsel__leader(evsel);
1653 	int fd;
1654 
1655 	if (evsel__is_group_leader(evsel))
1656 		return -1;
1657 
1658 	/*
1659 	 * Leader must be already processed/open,
1660 	 * if not it's a bug.
1661 	 */
1662 	BUG_ON(!leader->core.fd);
1663 
1664 	cpu_map_idx = evsel__hybrid_group_cpu_map_idx(evsel, cpu_map_idx);
1665 	if (cpu_map_idx == -1)
1666 		return -1;
1667 
1668 	fd = FD(leader, cpu_map_idx, thread);
1669 	BUG_ON(fd == -1 && !leader->skippable);
1670 
1671 	/*
1672 	 * When the leader has been skipped, return -2 to distinguish from no
1673 	 * group leader case.
1674 	 */
1675 	return fd == -1 ? -2 : fd;
1676 }
1677 
1678 static void evsel__remove_fd(struct evsel *pos, int nr_cpus, int nr_threads, int thread_idx)
1679 {
1680 	for (int cpu = 0; cpu < nr_cpus; cpu++)
1681 		for (int thread = thread_idx; thread < nr_threads - 1; thread++)
1682 			FD(pos, cpu, thread) = FD(pos, cpu, thread + 1);
1683 }
1684 
1685 static int update_fds(struct evsel *evsel,
1686 		      int nr_cpus, int cpu_map_idx,
1687 		      int nr_threads, int thread_idx)
1688 {
1689 	struct evsel *pos;
1690 
1691 	if (cpu_map_idx >= nr_cpus || thread_idx >= nr_threads)
1692 		return -EINVAL;
1693 
1694 	evlist__for_each_entry(evsel->evlist, pos) {
1695 		nr_cpus = pos != evsel ? nr_cpus : cpu_map_idx;
1696 
1697 		evsel__remove_fd(pos, nr_cpus, nr_threads, thread_idx);
1698 
1699 		/*
1700 		 * Since fds for next evsel has not been created,
1701 		 * there is no need to iterate whole event list.
1702 		 */
1703 		if (pos == evsel)
1704 			break;
1705 	}
1706 	return 0;
1707 }
1708 
1709 static bool evsel__ignore_missing_thread(struct evsel *evsel,
1710 					 int nr_cpus, int cpu_map_idx,
1711 					 struct perf_thread_map *threads,
1712 					 int thread, int err)
1713 {
1714 	pid_t ignore_pid = perf_thread_map__pid(threads, thread);
1715 
1716 	if (!evsel->ignore_missing_thread)
1717 		return false;
1718 
1719 	/* The system wide setup does not work with threads. */
1720 	if (evsel->core.system_wide)
1721 		return false;
1722 
1723 	/* The -ESRCH is perf event syscall errno for pid's not found. */
1724 	if (err != -ESRCH)
1725 		return false;
1726 
1727 	/* If there's only one thread, let it fail. */
1728 	if (threads->nr == 1)
1729 		return false;
1730 
1731 	/*
1732 	 * We should remove fd for missing_thread first
1733 	 * because thread_map__remove() will decrease threads->nr.
1734 	 */
1735 	if (update_fds(evsel, nr_cpus, cpu_map_idx, threads->nr, thread))
1736 		return false;
1737 
1738 	if (thread_map__remove(threads, thread))
1739 		return false;
1740 
1741 	pr_warning("WARNING: Ignored open failure for pid %d\n",
1742 		   ignore_pid);
1743 	return true;
1744 }
1745 
1746 static int __open_attr__fprintf(FILE *fp, const char *name, const char *val,
1747 				void *priv __maybe_unused)
1748 {
1749 	return fprintf(fp, "  %-32s %s\n", name, val);
1750 }
1751 
1752 static void display_attr(struct perf_event_attr *attr)
1753 {
1754 	if (verbose >= 2 || debug_peo_args) {
1755 		fprintf(stderr, "%.60s\n", graph_dotted_line);
1756 		fprintf(stderr, "perf_event_attr:\n");
1757 		perf_event_attr__fprintf(stderr, attr, __open_attr__fprintf, NULL);
1758 		fprintf(stderr, "%.60s\n", graph_dotted_line);
1759 	}
1760 }
1761 
1762 bool evsel__precise_ip_fallback(struct evsel *evsel)
1763 {
1764 	/* Do not try less precise if not requested. */
1765 	if (!evsel->precise_max)
1766 		return false;
1767 
1768 	/*
1769 	 * We tried all the precise_ip values, and it's
1770 	 * still failing, so leave it to standard fallback.
1771 	 */
1772 	if (!evsel->core.attr.precise_ip) {
1773 		evsel->core.attr.precise_ip = evsel->precise_ip_original;
1774 		return false;
1775 	}
1776 
1777 	if (!evsel->precise_ip_original)
1778 		evsel->precise_ip_original = evsel->core.attr.precise_ip;
1779 
1780 	evsel->core.attr.precise_ip--;
1781 	pr_debug2_peo("decreasing precise_ip by one (%d)\n", evsel->core.attr.precise_ip);
1782 	display_attr(&evsel->core.attr);
1783 	return true;
1784 }
1785 
1786 static struct perf_cpu_map *empty_cpu_map;
1787 static struct perf_thread_map *empty_thread_map;
1788 
1789 static int __evsel__prepare_open(struct evsel *evsel, struct perf_cpu_map *cpus,
1790 		struct perf_thread_map *threads)
1791 {
1792 	int nthreads = perf_thread_map__nr(threads);
1793 
1794 	if ((perf_missing_features.write_backward && evsel->core.attr.write_backward) ||
1795 	    (perf_missing_features.aux_output     && evsel->core.attr.aux_output))
1796 		return -EINVAL;
1797 
1798 	if (cpus == NULL) {
1799 		if (empty_cpu_map == NULL) {
1800 			empty_cpu_map = perf_cpu_map__dummy_new();
1801 			if (empty_cpu_map == NULL)
1802 				return -ENOMEM;
1803 		}
1804 
1805 		cpus = empty_cpu_map;
1806 	}
1807 
1808 	if (threads == NULL) {
1809 		if (empty_thread_map == NULL) {
1810 			empty_thread_map = thread_map__new_by_tid(-1);
1811 			if (empty_thread_map == NULL)
1812 				return -ENOMEM;
1813 		}
1814 
1815 		threads = empty_thread_map;
1816 	}
1817 
1818 	if (evsel->core.fd == NULL &&
1819 	    perf_evsel__alloc_fd(&evsel->core, perf_cpu_map__nr(cpus), nthreads) < 0)
1820 		return -ENOMEM;
1821 
1822 	evsel->open_flags = PERF_FLAG_FD_CLOEXEC;
1823 	if (evsel->cgrp)
1824 		evsel->open_flags |= PERF_FLAG_PID_CGROUP;
1825 
1826 	return 0;
1827 }
1828 
1829 static void evsel__disable_missing_features(struct evsel *evsel)
1830 {
1831 	if (perf_missing_features.read_lost)
1832 		evsel->core.attr.read_format &= ~PERF_FORMAT_LOST;
1833 	if (perf_missing_features.weight_struct) {
1834 		evsel__set_sample_bit(evsel, WEIGHT);
1835 		evsel__reset_sample_bit(evsel, WEIGHT_STRUCT);
1836 	}
1837 	if (perf_missing_features.clockid_wrong)
1838 		evsel->core.attr.clockid = CLOCK_MONOTONIC; /* should always work */
1839 	if (perf_missing_features.clockid) {
1840 		evsel->core.attr.use_clockid = 0;
1841 		evsel->core.attr.clockid = 0;
1842 	}
1843 	if (perf_missing_features.cloexec)
1844 		evsel->open_flags &= ~(unsigned long)PERF_FLAG_FD_CLOEXEC;
1845 	if (perf_missing_features.mmap2)
1846 		evsel->core.attr.mmap2 = 0;
1847 	if (evsel->pmu && evsel->pmu->missing_features.exclude_guest)
1848 		evsel->core.attr.exclude_guest = evsel->core.attr.exclude_host = 0;
1849 	if (perf_missing_features.lbr_flags)
1850 		evsel->core.attr.branch_sample_type &= ~(PERF_SAMPLE_BRANCH_NO_FLAGS |
1851 				     PERF_SAMPLE_BRANCH_NO_CYCLES);
1852 	if (perf_missing_features.group_read && evsel->core.attr.inherit)
1853 		evsel->core.attr.read_format &= ~(PERF_FORMAT_GROUP|PERF_FORMAT_ID);
1854 	if (perf_missing_features.ksymbol)
1855 		evsel->core.attr.ksymbol = 0;
1856 	if (perf_missing_features.bpf)
1857 		evsel->core.attr.bpf_event = 0;
1858 	if (perf_missing_features.branch_hw_idx)
1859 		evsel->core.attr.branch_sample_type &= ~PERF_SAMPLE_BRANCH_HW_INDEX;
1860 	if (perf_missing_features.sample_id_all)
1861 		evsel->core.attr.sample_id_all = 0;
1862 }
1863 
1864 int evsel__prepare_open(struct evsel *evsel, struct perf_cpu_map *cpus,
1865 			struct perf_thread_map *threads)
1866 {
1867 	int err;
1868 
1869 	err = __evsel__prepare_open(evsel, cpus, threads);
1870 	if (err)
1871 		return err;
1872 
1873 	evsel__disable_missing_features(evsel);
1874 
1875 	return err;
1876 }
1877 
1878 bool evsel__detect_missing_features(struct evsel *evsel)
1879 {
1880 	/*
1881 	 * Must probe features in the order they were added to the
1882 	 * perf_event_attr interface.
1883 	 */
1884 	if (!perf_missing_features.read_lost &&
1885 	    (evsel->core.attr.read_format & PERF_FORMAT_LOST)) {
1886 		perf_missing_features.read_lost = true;
1887 		pr_debug2("switching off PERF_FORMAT_LOST support\n");
1888 		return true;
1889 	} else if (!perf_missing_features.weight_struct &&
1890 	    (evsel->core.attr.sample_type & PERF_SAMPLE_WEIGHT_STRUCT)) {
1891 		perf_missing_features.weight_struct = true;
1892 		pr_debug2("switching off weight struct support\n");
1893 		return true;
1894 	} else if (!perf_missing_features.code_page_size &&
1895 	    (evsel->core.attr.sample_type & PERF_SAMPLE_CODE_PAGE_SIZE)) {
1896 		perf_missing_features.code_page_size = true;
1897 		pr_debug2_peo("Kernel has no PERF_SAMPLE_CODE_PAGE_SIZE support, bailing out\n");
1898 		return false;
1899 	} else if (!perf_missing_features.data_page_size &&
1900 	    (evsel->core.attr.sample_type & PERF_SAMPLE_DATA_PAGE_SIZE)) {
1901 		perf_missing_features.data_page_size = true;
1902 		pr_debug2_peo("Kernel has no PERF_SAMPLE_DATA_PAGE_SIZE support, bailing out\n");
1903 		return false;
1904 	} else if (!perf_missing_features.cgroup && evsel->core.attr.cgroup) {
1905 		perf_missing_features.cgroup = true;
1906 		pr_debug2_peo("Kernel has no cgroup sampling support, bailing out\n");
1907 		return false;
1908 	} else if (!perf_missing_features.branch_hw_idx &&
1909 	    (evsel->core.attr.branch_sample_type & PERF_SAMPLE_BRANCH_HW_INDEX)) {
1910 		perf_missing_features.branch_hw_idx = true;
1911 		pr_debug2("switching off branch HW index support\n");
1912 		return true;
1913 	} else if (!perf_missing_features.aux_output && evsel->core.attr.aux_output) {
1914 		perf_missing_features.aux_output = true;
1915 		pr_debug2_peo("Kernel has no attr.aux_output support, bailing out\n");
1916 		return false;
1917 	} else if (!perf_missing_features.bpf && evsel->core.attr.bpf_event) {
1918 		perf_missing_features.bpf = true;
1919 		pr_debug2_peo("switching off bpf_event\n");
1920 		return true;
1921 	} else if (!perf_missing_features.ksymbol && evsel->core.attr.ksymbol) {
1922 		perf_missing_features.ksymbol = true;
1923 		pr_debug2_peo("switching off ksymbol\n");
1924 		return true;
1925 	} else if (!perf_missing_features.write_backward && evsel->core.attr.write_backward) {
1926 		perf_missing_features.write_backward = true;
1927 		pr_debug2_peo("switching off write_backward\n");
1928 		return false;
1929 	} else if (!perf_missing_features.clockid_wrong && evsel->core.attr.use_clockid) {
1930 		perf_missing_features.clockid_wrong = true;
1931 		pr_debug2_peo("switching off clockid\n");
1932 		return true;
1933 	} else if (!perf_missing_features.clockid && evsel->core.attr.use_clockid) {
1934 		perf_missing_features.clockid = true;
1935 		pr_debug2_peo("switching off use_clockid\n");
1936 		return true;
1937 	} else if (!perf_missing_features.cloexec && (evsel->open_flags & PERF_FLAG_FD_CLOEXEC)) {
1938 		perf_missing_features.cloexec = true;
1939 		pr_debug2_peo("switching off cloexec flag\n");
1940 		return true;
1941 	} else if (!perf_missing_features.mmap2 && evsel->core.attr.mmap2) {
1942 		perf_missing_features.mmap2 = true;
1943 		pr_debug2_peo("switching off mmap2\n");
1944 		return true;
1945 	} else if (evsel->core.attr.exclude_guest || evsel->core.attr.exclude_host) {
1946 		if (evsel->pmu == NULL)
1947 			evsel->pmu = evsel__find_pmu(evsel);
1948 
1949 		if (evsel->pmu)
1950 			evsel->pmu->missing_features.exclude_guest = true;
1951 		else {
1952 			/* we cannot find PMU, disable attrs now */
1953 			evsel->core.attr.exclude_host = false;
1954 			evsel->core.attr.exclude_guest = false;
1955 		}
1956 
1957 		if (evsel->exclude_GH) {
1958 			pr_debug2_peo("PMU has no exclude_host/guest support, bailing out\n");
1959 			return false;
1960 		}
1961 		if (!perf_missing_features.exclude_guest) {
1962 			perf_missing_features.exclude_guest = true;
1963 			pr_debug2_peo("switching off exclude_guest, exclude_host\n");
1964 		}
1965 		return true;
1966 	} else if (!perf_missing_features.sample_id_all) {
1967 		perf_missing_features.sample_id_all = true;
1968 		pr_debug2_peo("switching off sample_id_all\n");
1969 		return true;
1970 	} else if (!perf_missing_features.lbr_flags &&
1971 			(evsel->core.attr.branch_sample_type &
1972 			 (PERF_SAMPLE_BRANCH_NO_CYCLES |
1973 			  PERF_SAMPLE_BRANCH_NO_FLAGS))) {
1974 		perf_missing_features.lbr_flags = true;
1975 		pr_debug2_peo("switching off branch sample type no (cycles/flags)\n");
1976 		return true;
1977 	} else if (!perf_missing_features.group_read &&
1978 		    evsel->core.attr.inherit &&
1979 		   (evsel->core.attr.read_format & PERF_FORMAT_GROUP) &&
1980 		   evsel__is_group_leader(evsel)) {
1981 		perf_missing_features.group_read = true;
1982 		pr_debug2_peo("switching off group read\n");
1983 		return true;
1984 	} else {
1985 		return false;
1986 	}
1987 }
1988 
1989 bool evsel__increase_rlimit(enum rlimit_action *set_rlimit)
1990 {
1991 	int old_errno;
1992 	struct rlimit l;
1993 
1994 	if (*set_rlimit < INCREASED_MAX) {
1995 		old_errno = errno;
1996 
1997 		if (getrlimit(RLIMIT_NOFILE, &l) == 0) {
1998 			if (*set_rlimit == NO_CHANGE) {
1999 				l.rlim_cur = l.rlim_max;
2000 			} else {
2001 				l.rlim_cur = l.rlim_max + 1000;
2002 				l.rlim_max = l.rlim_cur;
2003 			}
2004 			if (setrlimit(RLIMIT_NOFILE, &l) == 0) {
2005 				(*set_rlimit) += 1;
2006 				errno = old_errno;
2007 				return true;
2008 			}
2009 		}
2010 		errno = old_errno;
2011 	}
2012 
2013 	return false;
2014 }
2015 
2016 static int evsel__open_cpu(struct evsel *evsel, struct perf_cpu_map *cpus,
2017 		struct perf_thread_map *threads,
2018 		int start_cpu_map_idx, int end_cpu_map_idx)
2019 {
2020 	int idx, thread, nthreads;
2021 	int pid = -1, err, old_errno;
2022 	enum rlimit_action set_rlimit = NO_CHANGE;
2023 
2024 	err = __evsel__prepare_open(evsel, cpus, threads);
2025 	if (err)
2026 		return err;
2027 
2028 	if (cpus == NULL)
2029 		cpus = empty_cpu_map;
2030 
2031 	if (threads == NULL)
2032 		threads = empty_thread_map;
2033 
2034 	nthreads = perf_thread_map__nr(threads);
2035 
2036 	if (evsel->cgrp)
2037 		pid = evsel->cgrp->fd;
2038 
2039 fallback_missing_features:
2040 	evsel__disable_missing_features(evsel);
2041 
2042 	pr_debug3("Opening: %s\n", evsel__name(evsel));
2043 	display_attr(&evsel->core.attr);
2044 
2045 	for (idx = start_cpu_map_idx; idx < end_cpu_map_idx; idx++) {
2046 
2047 		for (thread = 0; thread < nthreads; thread++) {
2048 			int fd, group_fd;
2049 retry_open:
2050 			if (thread >= nthreads)
2051 				break;
2052 
2053 			if (!evsel->cgrp && !evsel->core.system_wide)
2054 				pid = perf_thread_map__pid(threads, thread);
2055 
2056 			group_fd = get_group_fd(evsel, idx, thread);
2057 
2058 			if (group_fd == -2) {
2059 				pr_debug("broken group leader for %s\n", evsel->name);
2060 				err = -EINVAL;
2061 				goto out_close;
2062 			}
2063 
2064 			test_attr__ready();
2065 
2066 			/* Debug message used by test scripts */
2067 			pr_debug2_peo("sys_perf_event_open: pid %d  cpu %d  group_fd %d  flags %#lx",
2068 				pid, perf_cpu_map__cpu(cpus, idx).cpu, group_fd, evsel->open_flags);
2069 
2070 			fd = sys_perf_event_open(&evsel->core.attr, pid,
2071 						perf_cpu_map__cpu(cpus, idx).cpu,
2072 						group_fd, evsel->open_flags);
2073 
2074 			FD(evsel, idx, thread) = fd;
2075 
2076 			if (fd < 0) {
2077 				err = -errno;
2078 
2079 				pr_debug2_peo("\nsys_perf_event_open failed, error %d\n",
2080 					  err);
2081 				goto try_fallback;
2082 			}
2083 
2084 			bpf_counter__install_pe(evsel, idx, fd);
2085 
2086 			if (unlikely(test_attr__enabled)) {
2087 				test_attr__open(&evsel->core.attr, pid,
2088 						perf_cpu_map__cpu(cpus, idx),
2089 						fd, group_fd, evsel->open_flags);
2090 			}
2091 
2092 			/* Debug message used by test scripts */
2093 			pr_debug2_peo(" = %d\n", fd);
2094 
2095 			if (evsel->bpf_fd >= 0) {
2096 				int evt_fd = fd;
2097 				int bpf_fd = evsel->bpf_fd;
2098 
2099 				err = ioctl(evt_fd,
2100 					    PERF_EVENT_IOC_SET_BPF,
2101 					    bpf_fd);
2102 				if (err && errno != EEXIST) {
2103 					pr_err("failed to attach bpf fd %d: %s\n",
2104 					       bpf_fd, strerror(errno));
2105 					err = -EINVAL;
2106 					goto out_close;
2107 				}
2108 			}
2109 
2110 			set_rlimit = NO_CHANGE;
2111 
2112 			/*
2113 			 * If we succeeded but had to kill clockid, fail and
2114 			 * have evsel__open_strerror() print us a nice error.
2115 			 */
2116 			if (perf_missing_features.clockid ||
2117 			    perf_missing_features.clockid_wrong) {
2118 				err = -EINVAL;
2119 				goto out_close;
2120 			}
2121 		}
2122 	}
2123 
2124 	return 0;
2125 
2126 try_fallback:
2127 	if (evsel__precise_ip_fallback(evsel))
2128 		goto retry_open;
2129 
2130 	if (evsel__ignore_missing_thread(evsel, perf_cpu_map__nr(cpus),
2131 					 idx, threads, thread, err)) {
2132 		/* We just removed 1 thread, so lower the upper nthreads limit. */
2133 		nthreads--;
2134 
2135 		/* ... and pretend like nothing have happened. */
2136 		err = 0;
2137 		goto retry_open;
2138 	}
2139 	/*
2140 	 * perf stat needs between 5 and 22 fds per CPU. When we run out
2141 	 * of them try to increase the limits.
2142 	 */
2143 	if (err == -EMFILE && evsel__increase_rlimit(&set_rlimit))
2144 		goto retry_open;
2145 
2146 	if (err != -EINVAL || idx > 0 || thread > 0)
2147 		goto out_close;
2148 
2149 	if (evsel__detect_missing_features(evsel))
2150 		goto fallback_missing_features;
2151 out_close:
2152 	if (err)
2153 		threads->err_thread = thread;
2154 
2155 	old_errno = errno;
2156 	do {
2157 		while (--thread >= 0) {
2158 			if (FD(evsel, idx, thread) >= 0)
2159 				close(FD(evsel, idx, thread));
2160 			FD(evsel, idx, thread) = -1;
2161 		}
2162 		thread = nthreads;
2163 	} while (--idx >= 0);
2164 	errno = old_errno;
2165 	return err;
2166 }
2167 
2168 int evsel__open(struct evsel *evsel, struct perf_cpu_map *cpus,
2169 		struct perf_thread_map *threads)
2170 {
2171 	return evsel__open_cpu(evsel, cpus, threads, 0, perf_cpu_map__nr(cpus));
2172 }
2173 
2174 void evsel__close(struct evsel *evsel)
2175 {
2176 	perf_evsel__close(&evsel->core);
2177 	perf_evsel__free_id(&evsel->core);
2178 }
2179 
2180 int evsel__open_per_cpu(struct evsel *evsel, struct perf_cpu_map *cpus, int cpu_map_idx)
2181 {
2182 	if (cpu_map_idx == -1)
2183 		return evsel__open_cpu(evsel, cpus, NULL, 0, perf_cpu_map__nr(cpus));
2184 
2185 	return evsel__open_cpu(evsel, cpus, NULL, cpu_map_idx, cpu_map_idx + 1);
2186 }
2187 
2188 int evsel__open_per_thread(struct evsel *evsel, struct perf_thread_map *threads)
2189 {
2190 	return evsel__open(evsel, NULL, threads);
2191 }
2192 
2193 static int perf_evsel__parse_id_sample(const struct evsel *evsel,
2194 				       const union perf_event *event,
2195 				       struct perf_sample *sample)
2196 {
2197 	u64 type = evsel->core.attr.sample_type;
2198 	const __u64 *array = event->sample.array;
2199 	bool swapped = evsel->needs_swap;
2200 	union u64_swap u;
2201 
2202 	array += ((event->header.size -
2203 		   sizeof(event->header)) / sizeof(u64)) - 1;
2204 
2205 	if (type & PERF_SAMPLE_IDENTIFIER) {
2206 		sample->id = *array;
2207 		array--;
2208 	}
2209 
2210 	if (type & PERF_SAMPLE_CPU) {
2211 		u.val64 = *array;
2212 		if (swapped) {
2213 			/* undo swap of u64, then swap on individual u32s */
2214 			u.val64 = bswap_64(u.val64);
2215 			u.val32[0] = bswap_32(u.val32[0]);
2216 		}
2217 
2218 		sample->cpu = u.val32[0];
2219 		array--;
2220 	}
2221 
2222 	if (type & PERF_SAMPLE_STREAM_ID) {
2223 		sample->stream_id = *array;
2224 		array--;
2225 	}
2226 
2227 	if (type & PERF_SAMPLE_ID) {
2228 		sample->id = *array;
2229 		array--;
2230 	}
2231 
2232 	if (type & PERF_SAMPLE_TIME) {
2233 		sample->time = *array;
2234 		array--;
2235 	}
2236 
2237 	if (type & PERF_SAMPLE_TID) {
2238 		u.val64 = *array;
2239 		if (swapped) {
2240 			/* undo swap of u64, then swap on individual u32s */
2241 			u.val64 = bswap_64(u.val64);
2242 			u.val32[0] = bswap_32(u.val32[0]);
2243 			u.val32[1] = bswap_32(u.val32[1]);
2244 		}
2245 
2246 		sample->pid = u.val32[0];
2247 		sample->tid = u.val32[1];
2248 		array--;
2249 	}
2250 
2251 	return 0;
2252 }
2253 
2254 static inline bool overflow(const void *endp, u16 max_size, const void *offset,
2255 			    u64 size)
2256 {
2257 	return size > max_size || offset + size > endp;
2258 }
2259 
2260 #define OVERFLOW_CHECK(offset, size, max_size)				\
2261 	do {								\
2262 		if (overflow(endp, (max_size), (offset), (size)))	\
2263 			return -EFAULT;					\
2264 	} while (0)
2265 
2266 #define OVERFLOW_CHECK_u64(offset) \
2267 	OVERFLOW_CHECK(offset, sizeof(u64), sizeof(u64))
2268 
2269 static int
2270 perf_event__check_size(union perf_event *event, unsigned int sample_size)
2271 {
2272 	/*
2273 	 * The evsel's sample_size is based on PERF_SAMPLE_MASK which includes
2274 	 * up to PERF_SAMPLE_PERIOD.  After that overflow() must be used to
2275 	 * check the format does not go past the end of the event.
2276 	 */
2277 	if (sample_size + sizeof(event->header) > event->header.size)
2278 		return -EFAULT;
2279 
2280 	return 0;
2281 }
2282 
2283 void __weak arch_perf_parse_sample_weight(struct perf_sample *data,
2284 					  const __u64 *array,
2285 					  u64 type __maybe_unused)
2286 {
2287 	data->weight = *array;
2288 }
2289 
2290 u64 evsel__bitfield_swap_branch_flags(u64 value)
2291 {
2292 	u64 new_val = 0;
2293 
2294 	/*
2295 	 * branch_flags
2296 	 * union {
2297 	 * 	u64 values;
2298 	 * 	struct {
2299 	 * 		mispred:1	//target mispredicted
2300 	 * 		predicted:1	//target predicted
2301 	 * 		in_tx:1		//in transaction
2302 	 * 		abort:1		//transaction abort
2303 	 * 		cycles:16	//cycle count to last branch
2304 	 * 		type:4		//branch type
2305 	 * 		spec:2		//branch speculation info
2306 	 * 		new_type:4	//additional branch type
2307 	 * 		priv:3		//privilege level
2308 	 * 		reserved:31
2309 	 * 	}
2310 	 * }
2311 	 *
2312 	 * Avoid bswap64() the entire branch_flag.value,
2313 	 * as it has variable bit-field sizes. Instead the
2314 	 * macro takes the bit-field position/size,
2315 	 * swaps it based on the host endianness.
2316 	 */
2317 	if (host_is_bigendian()) {
2318 		new_val = bitfield_swap(value, 0, 1);
2319 		new_val |= bitfield_swap(value, 1, 1);
2320 		new_val |= bitfield_swap(value, 2, 1);
2321 		new_val |= bitfield_swap(value, 3, 1);
2322 		new_val |= bitfield_swap(value, 4, 16);
2323 		new_val |= bitfield_swap(value, 20, 4);
2324 		new_val |= bitfield_swap(value, 24, 2);
2325 		new_val |= bitfield_swap(value, 26, 4);
2326 		new_val |= bitfield_swap(value, 30, 3);
2327 		new_val |= bitfield_swap(value, 33, 31);
2328 	} else {
2329 		new_val = bitfield_swap(value, 63, 1);
2330 		new_val |= bitfield_swap(value, 62, 1);
2331 		new_val |= bitfield_swap(value, 61, 1);
2332 		new_val |= bitfield_swap(value, 60, 1);
2333 		new_val |= bitfield_swap(value, 44, 16);
2334 		new_val |= bitfield_swap(value, 40, 4);
2335 		new_val |= bitfield_swap(value, 38, 2);
2336 		new_val |= bitfield_swap(value, 34, 4);
2337 		new_val |= bitfield_swap(value, 31, 3);
2338 		new_val |= bitfield_swap(value, 0, 31);
2339 	}
2340 
2341 	return new_val;
2342 }
2343 
2344 int evsel__parse_sample(struct evsel *evsel, union perf_event *event,
2345 			struct perf_sample *data)
2346 {
2347 	u64 type = evsel->core.attr.sample_type;
2348 	bool swapped = evsel->needs_swap;
2349 	const __u64 *array;
2350 	u16 max_size = event->header.size;
2351 	const void *endp = (void *)event + max_size;
2352 	u64 sz;
2353 
2354 	/*
2355 	 * used for cross-endian analysis. See git commit 65014ab3
2356 	 * for why this goofiness is needed.
2357 	 */
2358 	union u64_swap u;
2359 
2360 	memset(data, 0, sizeof(*data));
2361 	data->cpu = data->pid = data->tid = -1;
2362 	data->stream_id = data->id = data->time = -1ULL;
2363 	data->period = evsel->core.attr.sample_period;
2364 	data->cpumode = event->header.misc & PERF_RECORD_MISC_CPUMODE_MASK;
2365 	data->misc    = event->header.misc;
2366 	data->id = -1ULL;
2367 	data->data_src = PERF_MEM_DATA_SRC_NONE;
2368 	data->vcpu = -1;
2369 
2370 	if (event->header.type != PERF_RECORD_SAMPLE) {
2371 		if (!evsel->core.attr.sample_id_all)
2372 			return 0;
2373 		return perf_evsel__parse_id_sample(evsel, event, data);
2374 	}
2375 
2376 	array = event->sample.array;
2377 
2378 	if (perf_event__check_size(event, evsel->sample_size))
2379 		return -EFAULT;
2380 
2381 	if (type & PERF_SAMPLE_IDENTIFIER) {
2382 		data->id = *array;
2383 		array++;
2384 	}
2385 
2386 	if (type & PERF_SAMPLE_IP) {
2387 		data->ip = *array;
2388 		array++;
2389 	}
2390 
2391 	if (type & PERF_SAMPLE_TID) {
2392 		u.val64 = *array;
2393 		if (swapped) {
2394 			/* undo swap of u64, then swap on individual u32s */
2395 			u.val64 = bswap_64(u.val64);
2396 			u.val32[0] = bswap_32(u.val32[0]);
2397 			u.val32[1] = bswap_32(u.val32[1]);
2398 		}
2399 
2400 		data->pid = u.val32[0];
2401 		data->tid = u.val32[1];
2402 		array++;
2403 	}
2404 
2405 	if (type & PERF_SAMPLE_TIME) {
2406 		data->time = *array;
2407 		array++;
2408 	}
2409 
2410 	if (type & PERF_SAMPLE_ADDR) {
2411 		data->addr = *array;
2412 		array++;
2413 	}
2414 
2415 	if (type & PERF_SAMPLE_ID) {
2416 		data->id = *array;
2417 		array++;
2418 	}
2419 
2420 	if (type & PERF_SAMPLE_STREAM_ID) {
2421 		data->stream_id = *array;
2422 		array++;
2423 	}
2424 
2425 	if (type & PERF_SAMPLE_CPU) {
2426 
2427 		u.val64 = *array;
2428 		if (swapped) {
2429 			/* undo swap of u64, then swap on individual u32s */
2430 			u.val64 = bswap_64(u.val64);
2431 			u.val32[0] = bswap_32(u.val32[0]);
2432 		}
2433 
2434 		data->cpu = u.val32[0];
2435 		array++;
2436 	}
2437 
2438 	if (type & PERF_SAMPLE_PERIOD) {
2439 		data->period = *array;
2440 		array++;
2441 	}
2442 
2443 	if (type & PERF_SAMPLE_READ) {
2444 		u64 read_format = evsel->core.attr.read_format;
2445 
2446 		OVERFLOW_CHECK_u64(array);
2447 		if (read_format & PERF_FORMAT_GROUP)
2448 			data->read.group.nr = *array;
2449 		else
2450 			data->read.one.value = *array;
2451 
2452 		array++;
2453 
2454 		if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) {
2455 			OVERFLOW_CHECK_u64(array);
2456 			data->read.time_enabled = *array;
2457 			array++;
2458 		}
2459 
2460 		if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) {
2461 			OVERFLOW_CHECK_u64(array);
2462 			data->read.time_running = *array;
2463 			array++;
2464 		}
2465 
2466 		/* PERF_FORMAT_ID is forced for PERF_SAMPLE_READ */
2467 		if (read_format & PERF_FORMAT_GROUP) {
2468 			const u64 max_group_nr = UINT64_MAX /
2469 					sizeof(struct sample_read_value);
2470 
2471 			if (data->read.group.nr > max_group_nr)
2472 				return -EFAULT;
2473 
2474 			sz = data->read.group.nr * sample_read_value_size(read_format);
2475 			OVERFLOW_CHECK(array, sz, max_size);
2476 			data->read.group.values =
2477 					(struct sample_read_value *)array;
2478 			array = (void *)array + sz;
2479 		} else {
2480 			OVERFLOW_CHECK_u64(array);
2481 			data->read.one.id = *array;
2482 			array++;
2483 
2484 			if (read_format & PERF_FORMAT_LOST) {
2485 				OVERFLOW_CHECK_u64(array);
2486 				data->read.one.lost = *array;
2487 				array++;
2488 			}
2489 		}
2490 	}
2491 
2492 	if (type & PERF_SAMPLE_CALLCHAIN) {
2493 		const u64 max_callchain_nr = UINT64_MAX / sizeof(u64);
2494 
2495 		OVERFLOW_CHECK_u64(array);
2496 		data->callchain = (struct ip_callchain *)array++;
2497 		if (data->callchain->nr > max_callchain_nr)
2498 			return -EFAULT;
2499 		sz = data->callchain->nr * sizeof(u64);
2500 		OVERFLOW_CHECK(array, sz, max_size);
2501 		array = (void *)array + sz;
2502 	}
2503 
2504 	if (type & PERF_SAMPLE_RAW) {
2505 		OVERFLOW_CHECK_u64(array);
2506 		u.val64 = *array;
2507 
2508 		/*
2509 		 * Undo swap of u64, then swap on individual u32s,
2510 		 * get the size of the raw area and undo all of the
2511 		 * swap. The pevent interface handles endianness by
2512 		 * itself.
2513 		 */
2514 		if (swapped) {
2515 			u.val64 = bswap_64(u.val64);
2516 			u.val32[0] = bswap_32(u.val32[0]);
2517 			u.val32[1] = bswap_32(u.val32[1]);
2518 		}
2519 		data->raw_size = u.val32[0];
2520 
2521 		/*
2522 		 * The raw data is aligned on 64bits including the
2523 		 * u32 size, so it's safe to use mem_bswap_64.
2524 		 */
2525 		if (swapped)
2526 			mem_bswap_64((void *) array, data->raw_size);
2527 
2528 		array = (void *)array + sizeof(u32);
2529 
2530 		OVERFLOW_CHECK(array, data->raw_size, max_size);
2531 		data->raw_data = (void *)array;
2532 		array = (void *)array + data->raw_size;
2533 	}
2534 
2535 	if (type & PERF_SAMPLE_BRANCH_STACK) {
2536 		const u64 max_branch_nr = UINT64_MAX /
2537 					  sizeof(struct branch_entry);
2538 		struct branch_entry *e;
2539 		unsigned int i;
2540 
2541 		OVERFLOW_CHECK_u64(array);
2542 		data->branch_stack = (struct branch_stack *)array++;
2543 
2544 		if (data->branch_stack->nr > max_branch_nr)
2545 			return -EFAULT;
2546 
2547 		sz = data->branch_stack->nr * sizeof(struct branch_entry);
2548 		if (evsel__has_branch_hw_idx(evsel)) {
2549 			sz += sizeof(u64);
2550 			e = &data->branch_stack->entries[0];
2551 		} else {
2552 			data->no_hw_idx = true;
2553 			/*
2554 			 * if the PERF_SAMPLE_BRANCH_HW_INDEX is not applied,
2555 			 * only nr and entries[] will be output by kernel.
2556 			 */
2557 			e = (struct branch_entry *)&data->branch_stack->hw_idx;
2558 		}
2559 
2560 		if (swapped) {
2561 			/*
2562 			 * struct branch_flag does not have endian
2563 			 * specific bit field definition. And bswap
2564 			 * will not resolve the issue, since these
2565 			 * are bit fields.
2566 			 *
2567 			 * evsel__bitfield_swap_branch_flags() uses a
2568 			 * bitfield_swap macro to swap the bit position
2569 			 * based on the host endians.
2570 			 */
2571 			for (i = 0; i < data->branch_stack->nr; i++, e++)
2572 				e->flags.value = evsel__bitfield_swap_branch_flags(e->flags.value);
2573 		}
2574 
2575 		OVERFLOW_CHECK(array, sz, max_size);
2576 		array = (void *)array + sz;
2577 	}
2578 
2579 	if (type & PERF_SAMPLE_REGS_USER) {
2580 		OVERFLOW_CHECK_u64(array);
2581 		data->user_regs.abi = *array;
2582 		array++;
2583 
2584 		if (data->user_regs.abi) {
2585 			u64 mask = evsel->core.attr.sample_regs_user;
2586 
2587 			sz = hweight64(mask) * sizeof(u64);
2588 			OVERFLOW_CHECK(array, sz, max_size);
2589 			data->user_regs.mask = mask;
2590 			data->user_regs.regs = (u64 *)array;
2591 			array = (void *)array + sz;
2592 		}
2593 	}
2594 
2595 	if (type & PERF_SAMPLE_STACK_USER) {
2596 		OVERFLOW_CHECK_u64(array);
2597 		sz = *array++;
2598 
2599 		data->user_stack.offset = ((char *)(array - 1)
2600 					  - (char *) event);
2601 
2602 		if (!sz) {
2603 			data->user_stack.size = 0;
2604 		} else {
2605 			OVERFLOW_CHECK(array, sz, max_size);
2606 			data->user_stack.data = (char *)array;
2607 			array = (void *)array + sz;
2608 			OVERFLOW_CHECK_u64(array);
2609 			data->user_stack.size = *array++;
2610 			if (WARN_ONCE(data->user_stack.size > sz,
2611 				      "user stack dump failure\n"))
2612 				return -EFAULT;
2613 		}
2614 	}
2615 
2616 	if (type & PERF_SAMPLE_WEIGHT_TYPE) {
2617 		OVERFLOW_CHECK_u64(array);
2618 		arch_perf_parse_sample_weight(data, array, type);
2619 		array++;
2620 	}
2621 
2622 	if (type & PERF_SAMPLE_DATA_SRC) {
2623 		OVERFLOW_CHECK_u64(array);
2624 		data->data_src = *array;
2625 		array++;
2626 	}
2627 
2628 	if (type & PERF_SAMPLE_TRANSACTION) {
2629 		OVERFLOW_CHECK_u64(array);
2630 		data->transaction = *array;
2631 		array++;
2632 	}
2633 
2634 	data->intr_regs.abi = PERF_SAMPLE_REGS_ABI_NONE;
2635 	if (type & PERF_SAMPLE_REGS_INTR) {
2636 		OVERFLOW_CHECK_u64(array);
2637 		data->intr_regs.abi = *array;
2638 		array++;
2639 
2640 		if (data->intr_regs.abi != PERF_SAMPLE_REGS_ABI_NONE) {
2641 			u64 mask = evsel->core.attr.sample_regs_intr;
2642 
2643 			sz = hweight64(mask) * sizeof(u64);
2644 			OVERFLOW_CHECK(array, sz, max_size);
2645 			data->intr_regs.mask = mask;
2646 			data->intr_regs.regs = (u64 *)array;
2647 			array = (void *)array + sz;
2648 		}
2649 	}
2650 
2651 	data->phys_addr = 0;
2652 	if (type & PERF_SAMPLE_PHYS_ADDR) {
2653 		data->phys_addr = *array;
2654 		array++;
2655 	}
2656 
2657 	data->cgroup = 0;
2658 	if (type & PERF_SAMPLE_CGROUP) {
2659 		data->cgroup = *array;
2660 		array++;
2661 	}
2662 
2663 	data->data_page_size = 0;
2664 	if (type & PERF_SAMPLE_DATA_PAGE_SIZE) {
2665 		data->data_page_size = *array;
2666 		array++;
2667 	}
2668 
2669 	data->code_page_size = 0;
2670 	if (type & PERF_SAMPLE_CODE_PAGE_SIZE) {
2671 		data->code_page_size = *array;
2672 		array++;
2673 	}
2674 
2675 	if (type & PERF_SAMPLE_AUX) {
2676 		OVERFLOW_CHECK_u64(array);
2677 		sz = *array++;
2678 
2679 		OVERFLOW_CHECK(array, sz, max_size);
2680 		/* Undo swap of data */
2681 		if (swapped)
2682 			mem_bswap_64((char *)array, sz);
2683 		data->aux_sample.size = sz;
2684 		data->aux_sample.data = (char *)array;
2685 		array = (void *)array + sz;
2686 	}
2687 
2688 	return 0;
2689 }
2690 
2691 int evsel__parse_sample_timestamp(struct evsel *evsel, union perf_event *event,
2692 				  u64 *timestamp)
2693 {
2694 	u64 type = evsel->core.attr.sample_type;
2695 	const __u64 *array;
2696 
2697 	if (!(type & PERF_SAMPLE_TIME))
2698 		return -1;
2699 
2700 	if (event->header.type != PERF_RECORD_SAMPLE) {
2701 		struct perf_sample data = {
2702 			.time = -1ULL,
2703 		};
2704 
2705 		if (!evsel->core.attr.sample_id_all)
2706 			return -1;
2707 		if (perf_evsel__parse_id_sample(evsel, event, &data))
2708 			return -1;
2709 
2710 		*timestamp = data.time;
2711 		return 0;
2712 	}
2713 
2714 	array = event->sample.array;
2715 
2716 	if (perf_event__check_size(event, evsel->sample_size))
2717 		return -EFAULT;
2718 
2719 	if (type & PERF_SAMPLE_IDENTIFIER)
2720 		array++;
2721 
2722 	if (type & PERF_SAMPLE_IP)
2723 		array++;
2724 
2725 	if (type & PERF_SAMPLE_TID)
2726 		array++;
2727 
2728 	if (type & PERF_SAMPLE_TIME)
2729 		*timestamp = *array;
2730 
2731 	return 0;
2732 }
2733 
2734 u16 evsel__id_hdr_size(struct evsel *evsel)
2735 {
2736 	u64 sample_type = evsel->core.attr.sample_type;
2737 	u16 size = 0;
2738 
2739 	if (sample_type & PERF_SAMPLE_TID)
2740 		size += sizeof(u64);
2741 
2742 	if (sample_type & PERF_SAMPLE_TIME)
2743 		size += sizeof(u64);
2744 
2745 	if (sample_type & PERF_SAMPLE_ID)
2746 		size += sizeof(u64);
2747 
2748 	if (sample_type & PERF_SAMPLE_STREAM_ID)
2749 		size += sizeof(u64);
2750 
2751 	if (sample_type & PERF_SAMPLE_CPU)
2752 		size += sizeof(u64);
2753 
2754 	if (sample_type & PERF_SAMPLE_IDENTIFIER)
2755 		size += sizeof(u64);
2756 
2757 	return size;
2758 }
2759 
2760 #ifdef HAVE_LIBTRACEEVENT
2761 struct tep_format_field *evsel__field(struct evsel *evsel, const char *name)
2762 {
2763 	return tep_find_field(evsel->tp_format, name);
2764 }
2765 
2766 void *evsel__rawptr(struct evsel *evsel, struct perf_sample *sample, const char *name)
2767 {
2768 	struct tep_format_field *field = evsel__field(evsel, name);
2769 	int offset;
2770 
2771 	if (!field)
2772 		return NULL;
2773 
2774 	offset = field->offset;
2775 
2776 	if (field->flags & TEP_FIELD_IS_DYNAMIC) {
2777 		offset = *(int *)(sample->raw_data + field->offset);
2778 		offset &= 0xffff;
2779 		if (tep_field_is_relative(field->flags))
2780 			offset += field->offset + field->size;
2781 	}
2782 
2783 	return sample->raw_data + offset;
2784 }
2785 
2786 u64 format_field__intval(struct tep_format_field *field, struct perf_sample *sample,
2787 			 bool needs_swap)
2788 {
2789 	u64 value;
2790 	void *ptr = sample->raw_data + field->offset;
2791 
2792 	switch (field->size) {
2793 	case 1:
2794 		return *(u8 *)ptr;
2795 	case 2:
2796 		value = *(u16 *)ptr;
2797 		break;
2798 	case 4:
2799 		value = *(u32 *)ptr;
2800 		break;
2801 	case 8:
2802 		memcpy(&value, ptr, sizeof(u64));
2803 		break;
2804 	default:
2805 		return 0;
2806 	}
2807 
2808 	if (!needs_swap)
2809 		return value;
2810 
2811 	switch (field->size) {
2812 	case 2:
2813 		return bswap_16(value);
2814 	case 4:
2815 		return bswap_32(value);
2816 	case 8:
2817 		return bswap_64(value);
2818 	default:
2819 		return 0;
2820 	}
2821 
2822 	return 0;
2823 }
2824 
2825 u64 evsel__intval(struct evsel *evsel, struct perf_sample *sample, const char *name)
2826 {
2827 	struct tep_format_field *field = evsel__field(evsel, name);
2828 
2829 	if (!field)
2830 		return 0;
2831 
2832 	return field ? format_field__intval(field, sample, evsel->needs_swap) : 0;
2833 }
2834 #endif
2835 
2836 bool evsel__fallback(struct evsel *evsel, int err, char *msg, size_t msgsize)
2837 {
2838 	int paranoid;
2839 
2840 	if ((err == ENOENT || err == ENXIO || err == ENODEV) &&
2841 	    evsel->core.attr.type   == PERF_TYPE_HARDWARE &&
2842 	    evsel->core.attr.config == PERF_COUNT_HW_CPU_CYCLES) {
2843 		/*
2844 		 * If it's cycles then fall back to hrtimer based
2845 		 * cpu-clock-tick sw counter, which is always available even if
2846 		 * no PMU support.
2847 		 *
2848 		 * PPC returns ENXIO until 2.6.37 (behavior changed with commit
2849 		 * b0a873e).
2850 		 */
2851 		scnprintf(msg, msgsize, "%s",
2852 "The cycles event is not supported, trying to fall back to cpu-clock-ticks");
2853 
2854 		evsel->core.attr.type   = PERF_TYPE_SOFTWARE;
2855 		evsel->core.attr.config = PERF_COUNT_SW_CPU_CLOCK;
2856 
2857 		zfree(&evsel->name);
2858 		return true;
2859 	} else if (err == EACCES && !evsel->core.attr.exclude_kernel &&
2860 		   (paranoid = perf_event_paranoid()) > 1) {
2861 		const char *name = evsel__name(evsel);
2862 		char *new_name;
2863 		const char *sep = ":";
2864 
2865 		/* If event has exclude user then don't exclude kernel. */
2866 		if (evsel->core.attr.exclude_user)
2867 			return false;
2868 
2869 		/* Is there already the separator in the name. */
2870 		if (strchr(name, '/') ||
2871 		    (strchr(name, ':') && !evsel->is_libpfm_event))
2872 			sep = "";
2873 
2874 		if (asprintf(&new_name, "%s%su", name, sep) < 0)
2875 			return false;
2876 
2877 		free(evsel->name);
2878 		evsel->name = new_name;
2879 		scnprintf(msg, msgsize, "kernel.perf_event_paranoid=%d, trying "
2880 			  "to fall back to excluding kernel and hypervisor "
2881 			  " samples", paranoid);
2882 		evsel->core.attr.exclude_kernel = 1;
2883 		evsel->core.attr.exclude_hv     = 1;
2884 
2885 		return true;
2886 	}
2887 
2888 	return false;
2889 }
2890 
2891 static bool find_process(const char *name)
2892 {
2893 	size_t len = strlen(name);
2894 	DIR *dir;
2895 	struct dirent *d;
2896 	int ret = -1;
2897 
2898 	dir = opendir(procfs__mountpoint());
2899 	if (!dir)
2900 		return false;
2901 
2902 	/* Walk through the directory. */
2903 	while (ret && (d = readdir(dir)) != NULL) {
2904 		char path[PATH_MAX];
2905 		char *data;
2906 		size_t size;
2907 
2908 		if ((d->d_type != DT_DIR) ||
2909 		     !strcmp(".", d->d_name) ||
2910 		     !strcmp("..", d->d_name))
2911 			continue;
2912 
2913 		scnprintf(path, sizeof(path), "%s/%s/comm",
2914 			  procfs__mountpoint(), d->d_name);
2915 
2916 		if (filename__read_str(path, &data, &size))
2917 			continue;
2918 
2919 		ret = strncmp(name, data, len);
2920 		free(data);
2921 	}
2922 
2923 	closedir(dir);
2924 	return ret ? false : true;
2925 }
2926 
2927 int __weak arch_evsel__open_strerror(struct evsel *evsel __maybe_unused,
2928 				     char *msg __maybe_unused,
2929 				     size_t size __maybe_unused)
2930 {
2931 	return 0;
2932 }
2933 
2934 int evsel__open_strerror(struct evsel *evsel, struct target *target,
2935 			 int err, char *msg, size_t size)
2936 {
2937 	char sbuf[STRERR_BUFSIZE];
2938 	int printed = 0, enforced = 0;
2939 	int ret;
2940 
2941 	switch (err) {
2942 	case EPERM:
2943 	case EACCES:
2944 		printed += scnprintf(msg + printed, size - printed,
2945 			"Access to performance monitoring and observability operations is limited.\n");
2946 
2947 		if (!sysfs__read_int("fs/selinux/enforce", &enforced)) {
2948 			if (enforced) {
2949 				printed += scnprintf(msg + printed, size - printed,
2950 					"Enforced MAC policy settings (SELinux) can limit access to performance\n"
2951 					"monitoring and observability operations. Inspect system audit records for\n"
2952 					"more perf_event access control information and adjusting the policy.\n");
2953 			}
2954 		}
2955 
2956 		if (err == EPERM)
2957 			printed += scnprintf(msg, size,
2958 				"No permission to enable %s event.\n\n", evsel__name(evsel));
2959 
2960 		return scnprintf(msg + printed, size - printed,
2961 		 "Consider adjusting /proc/sys/kernel/perf_event_paranoid setting to open\n"
2962 		 "access to performance monitoring and observability operations for processes\n"
2963 		 "without CAP_PERFMON, CAP_SYS_PTRACE or CAP_SYS_ADMIN Linux capability.\n"
2964 		 "More information can be found at 'Perf events and tool security' document:\n"
2965 		 "https://www.kernel.org/doc/html/latest/admin-guide/perf-security.html\n"
2966 		 "perf_event_paranoid setting is %d:\n"
2967 		 "  -1: Allow use of (almost) all events by all users\n"
2968 		 "      Ignore mlock limit after perf_event_mlock_kb without CAP_IPC_LOCK\n"
2969 		 ">= 0: Disallow raw and ftrace function tracepoint access\n"
2970 		 ">= 1: Disallow CPU event access\n"
2971 		 ">= 2: Disallow kernel profiling\n"
2972 		 "To make the adjusted perf_event_paranoid setting permanent preserve it\n"
2973 		 "in /etc/sysctl.conf (e.g. kernel.perf_event_paranoid = <setting>)",
2974 		 perf_event_paranoid());
2975 	case ENOENT:
2976 		return scnprintf(msg, size, "The %s event is not supported.", evsel__name(evsel));
2977 	case EMFILE:
2978 		return scnprintf(msg, size, "%s",
2979 			 "Too many events are opened.\n"
2980 			 "Probably the maximum number of open file descriptors has been reached.\n"
2981 			 "Hint: Try again after reducing the number of events.\n"
2982 			 "Hint: Try increasing the limit with 'ulimit -n <limit>'");
2983 	case ENOMEM:
2984 		if (evsel__has_callchain(evsel) &&
2985 		    access("/proc/sys/kernel/perf_event_max_stack", F_OK) == 0)
2986 			return scnprintf(msg, size,
2987 					 "Not enough memory to setup event with callchain.\n"
2988 					 "Hint: Try tweaking /proc/sys/kernel/perf_event_max_stack\n"
2989 					 "Hint: Current value: %d", sysctl__max_stack());
2990 		break;
2991 	case ENODEV:
2992 		if (target->cpu_list)
2993 			return scnprintf(msg, size, "%s",
2994 	 "No such device - did you specify an out-of-range profile CPU?");
2995 		break;
2996 	case EOPNOTSUPP:
2997 		if (evsel->core.attr.sample_type & PERF_SAMPLE_BRANCH_STACK)
2998 			return scnprintf(msg, size,
2999 	"%s: PMU Hardware or event type doesn't support branch stack sampling.",
3000 					 evsel__name(evsel));
3001 		if (evsel->core.attr.aux_output)
3002 			return scnprintf(msg, size,
3003 	"%s: PMU Hardware doesn't support 'aux_output' feature",
3004 					 evsel__name(evsel));
3005 		if (evsel->core.attr.sample_period != 0)
3006 			return scnprintf(msg, size,
3007 	"%s: PMU Hardware doesn't support sampling/overflow-interrupts. Try 'perf stat'",
3008 					 evsel__name(evsel));
3009 		if (evsel->core.attr.precise_ip)
3010 			return scnprintf(msg, size, "%s",
3011 	"\'precise\' request may not be supported. Try removing 'p' modifier.");
3012 #if defined(__i386__) || defined(__x86_64__)
3013 		if (evsel->core.attr.type == PERF_TYPE_HARDWARE)
3014 			return scnprintf(msg, size, "%s",
3015 	"No hardware sampling interrupt available.\n");
3016 #endif
3017 		break;
3018 	case EBUSY:
3019 		if (find_process("oprofiled"))
3020 			return scnprintf(msg, size,
3021 	"The PMU counters are busy/taken by another profiler.\n"
3022 	"We found oprofile daemon running, please stop it and try again.");
3023 		break;
3024 	case EINVAL:
3025 		if (evsel->core.attr.sample_type & PERF_SAMPLE_CODE_PAGE_SIZE && perf_missing_features.code_page_size)
3026 			return scnprintf(msg, size, "Asking for the code page size isn't supported by this kernel.");
3027 		if (evsel->core.attr.sample_type & PERF_SAMPLE_DATA_PAGE_SIZE && perf_missing_features.data_page_size)
3028 			return scnprintf(msg, size, "Asking for the data page size isn't supported by this kernel.");
3029 		if (evsel->core.attr.write_backward && perf_missing_features.write_backward)
3030 			return scnprintf(msg, size, "Reading from overwrite event is not supported by this kernel.");
3031 		if (perf_missing_features.clockid)
3032 			return scnprintf(msg, size, "clockid feature not supported.");
3033 		if (perf_missing_features.clockid_wrong)
3034 			return scnprintf(msg, size, "wrong clockid (%d).", clockid);
3035 		if (perf_missing_features.aux_output)
3036 			return scnprintf(msg, size, "The 'aux_output' feature is not supported, update the kernel.");
3037 		if (!target__has_cpu(target))
3038 			return scnprintf(msg, size,
3039 	"Invalid event (%s) in per-thread mode, enable system wide with '-a'.",
3040 					evsel__name(evsel));
3041 
3042 		break;
3043 	case ENODATA:
3044 		return scnprintf(msg, size, "Cannot collect data source with the load latency event alone. "
3045 				 "Please add an auxiliary event in front of the load latency event.");
3046 	default:
3047 		break;
3048 	}
3049 
3050 	ret = arch_evsel__open_strerror(evsel, msg, size);
3051 	if (ret)
3052 		return ret;
3053 
3054 	return scnprintf(msg, size,
3055 	"The sys_perf_event_open() syscall returned with %d (%s) for event (%s).\n"
3056 	"/bin/dmesg | grep -i perf may provide additional information.\n",
3057 			 err, str_error_r(err, sbuf, sizeof(sbuf)), evsel__name(evsel));
3058 }
3059 
3060 struct perf_env *evsel__env(struct evsel *evsel)
3061 {
3062 	if (evsel && evsel->evlist && evsel->evlist->env)
3063 		return evsel->evlist->env;
3064 	return &perf_env;
3065 }
3066 
3067 static int store_evsel_ids(struct evsel *evsel, struct evlist *evlist)
3068 {
3069 	int cpu_map_idx, thread;
3070 
3071 	for (cpu_map_idx = 0; cpu_map_idx < xyarray__max_x(evsel->core.fd); cpu_map_idx++) {
3072 		for (thread = 0; thread < xyarray__max_y(evsel->core.fd);
3073 		     thread++) {
3074 			int fd = FD(evsel, cpu_map_idx, thread);
3075 
3076 			if (perf_evlist__id_add_fd(&evlist->core, &evsel->core,
3077 						   cpu_map_idx, thread, fd) < 0)
3078 				return -1;
3079 		}
3080 	}
3081 
3082 	return 0;
3083 }
3084 
3085 int evsel__store_ids(struct evsel *evsel, struct evlist *evlist)
3086 {
3087 	struct perf_cpu_map *cpus = evsel->core.cpus;
3088 	struct perf_thread_map *threads = evsel->core.threads;
3089 
3090 	if (perf_evsel__alloc_id(&evsel->core, perf_cpu_map__nr(cpus), threads->nr))
3091 		return -ENOMEM;
3092 
3093 	return store_evsel_ids(evsel, evlist);
3094 }
3095 
3096 void evsel__zero_per_pkg(struct evsel *evsel)
3097 {
3098 	struct hashmap_entry *cur;
3099 	size_t bkt;
3100 
3101 	if (evsel->per_pkg_mask) {
3102 		hashmap__for_each_entry(evsel->per_pkg_mask, cur, bkt)
3103 			zfree(&cur->pkey);
3104 
3105 		hashmap__clear(evsel->per_pkg_mask);
3106 	}
3107 }
3108 
3109 /**
3110  * evsel__is_hybrid - does the evsel have a known PMU that is hybrid. Note, this
3111  *                    will be false on hybrid systems for hardware and legacy
3112  *                    cache events.
3113  */
3114 bool evsel__is_hybrid(const struct evsel *evsel)
3115 {
3116 	if (perf_pmus__num_core_pmus() == 1)
3117 		return false;
3118 
3119 	return evsel->core.is_pmu_core;
3120 }
3121 
3122 struct evsel *evsel__leader(const struct evsel *evsel)
3123 {
3124 	return container_of(evsel->core.leader, struct evsel, core);
3125 }
3126 
3127 bool evsel__has_leader(struct evsel *evsel, struct evsel *leader)
3128 {
3129 	return evsel->core.leader == &leader->core;
3130 }
3131 
3132 bool evsel__is_leader(struct evsel *evsel)
3133 {
3134 	return evsel__has_leader(evsel, evsel);
3135 }
3136 
3137 void evsel__set_leader(struct evsel *evsel, struct evsel *leader)
3138 {
3139 	evsel->core.leader = &leader->core;
3140 }
3141 
3142 int evsel__source_count(const struct evsel *evsel)
3143 {
3144 	struct evsel *pos;
3145 	int count = 0;
3146 
3147 	evlist__for_each_entry(evsel->evlist, pos) {
3148 		if (pos->metric_leader == evsel)
3149 			count++;
3150 	}
3151 	return count;
3152 }
3153 
3154 bool __weak arch_evsel__must_be_in_group(const struct evsel *evsel __maybe_unused)
3155 {
3156 	return false;
3157 }
3158 
3159 /*
3160  * Remove an event from a given group (leader).
3161  * Some events, e.g., perf metrics Topdown events,
3162  * must always be grouped. Ignore the events.
3163  */
3164 void evsel__remove_from_group(struct evsel *evsel, struct evsel *leader)
3165 {
3166 	if (!arch_evsel__must_be_in_group(evsel) && evsel != leader) {
3167 		evsel__set_leader(evsel, evsel);
3168 		evsel->core.nr_members = 0;
3169 		leader->core.nr_members--;
3170 	}
3171 }
3172