xref: /openbmc/linux/tools/perf/util/evsel.h (revision bab55037)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef __PERF_EVSEL_H
3 #define __PERF_EVSEL_H 1
4 
5 #include <linux/list.h>
6 #include <stdbool.h>
7 #include <sys/types.h>
8 #include <linux/perf_event.h>
9 #include <linux/types.h>
10 #include <internal/evsel.h>
11 #include <perf/evsel.h>
12 #include "symbol_conf.h"
13 
14 struct bpf_object;
15 struct cgroup;
16 struct perf_counts;
17 struct perf_stat_evsel;
18 union perf_event;
19 struct bpf_counter_ops;
20 struct target;
21 struct hashmap;
22 struct bperf_leader_bpf;
23 struct bperf_follower_bpf;
24 struct perf_pmu;
25 
26 typedef int (evsel__sb_cb_t)(union perf_event *event, void *data);
27 
28 enum perf_tool_event {
29 	PERF_TOOL_NONE		= 0,
30 	PERF_TOOL_DURATION_TIME = 1,
31 	PERF_TOOL_USER_TIME = 2,
32 	PERF_TOOL_SYSTEM_TIME = 3,
33 
34 	PERF_TOOL_MAX,
35 };
36 
37 const char *perf_tool_event__to_str(enum perf_tool_event ev);
38 enum perf_tool_event perf_tool_event__from_str(const char *str);
39 
40 #define perf_tool_event__for_each_event(ev)		\
41 	for ((ev) = PERF_TOOL_DURATION_TIME; (ev) < PERF_TOOL_MAX; ev++)
42 
43 /** struct evsel - event selector
44  *
45  * @evlist - evlist this evsel is in, if it is in one.
46  * @core - libperf evsel object
47  * @name - Can be set to retain the original event name passed by the user,
48  *         so that when showing results in tools such as 'perf stat', we
49  *         show the name used, not some alias.
50  * @id_pos: the position of the event id (PERF_SAMPLE_ID or
51  *          PERF_SAMPLE_IDENTIFIER) in a sample event i.e. in the array of
52  *          struct perf_record_sample
53  * @is_pos: the position (counting backwards) of the event id (PERF_SAMPLE_ID or
54  *          PERF_SAMPLE_IDENTIFIER) in a non-sample event i.e. if sample_id_all
55  *          is used there is an id sample appended to non-sample events
56  * @priv:   And what is in its containing unnamed union are tool specific
57  */
58 struct evsel {
59 	struct perf_evsel	core;
60 	struct evlist		*evlist;
61 	off_t			id_offset;
62 	int			id_pos;
63 	int			is_pos;
64 	unsigned int		sample_size;
65 
66 	/*
67 	 * These fields can be set in the parse-events code or similar.
68 	 * Please check evsel__clone() to copy them properly so that
69 	 * they can be released properly.
70 	 */
71 	struct {
72 		char			*name;
73 		char			*group_name;
74 		const char		*pmu_name;
75 #ifdef HAVE_LIBTRACEEVENT
76 		struct tep_event	*tp_format;
77 #endif
78 		char			*filter;
79 		unsigned long		max_events;
80 		double			scale;
81 		const char		*unit;
82 		struct cgroup		*cgrp;
83 		const char		*metric_id;
84 		enum perf_tool_event	tool_event;
85 		/* parse modifier helper */
86 		int			exclude_GH;
87 		int			sample_read;
88 		bool			snapshot;
89 		bool			per_pkg;
90 		bool			percore;
91 		bool			precise_max;
92 		bool			use_uncore_alias;
93 		bool			is_libpfm_event;
94 		bool			auto_merge_stats;
95 		bool			collect_stat;
96 		bool			weak_group;
97 		bool			bpf_counter;
98 		bool			use_config_name;
99 		int			bpf_fd;
100 		struct bpf_object	*bpf_obj;
101 		struct list_head	config_terms;
102 	};
103 
104 	/*
105 	 * metric fields are similar, but needs more care as they can have
106 	 * references to other metric (evsel).
107 	 */
108 	const char *		metric_expr;
109 	const char *		metric_name;
110 	struct evsel		**metric_events;
111 	struct evsel		*metric_leader;
112 
113 	void			*handler;
114 	struct perf_counts	*counts;
115 	struct perf_counts	*prev_raw_counts;
116 	unsigned long		nr_events_printed;
117 	struct perf_stat_evsel  *stats;
118 	void			*priv;
119 	u64			db_id;
120 	bool			uniquified_name;
121 	bool 			supported;
122 	bool 			needs_swap;
123 	bool 			disabled;
124 	bool			no_aux_samples;
125 	bool			immediate;
126 	bool			tracking;
127 	bool			ignore_missing_thread;
128 	bool			forced_leader;
129 	bool			cmdline_group_boundary;
130 	bool			merged_stat;
131 	bool			reset_group;
132 	bool			errored;
133 	bool			needs_auxtrace_mmap;
134 	struct hashmap		*per_pkg_mask;
135 	int			err;
136 	struct {
137 		evsel__sb_cb_t	*cb;
138 		void		*data;
139 	} side_band;
140 	/*
141 	 * For reporting purposes, an evsel sample can have a callchain
142 	 * synthesized from AUX area data. Keep track of synthesized sample
143 	 * types here. Note, the recorded sample_type cannot be changed because
144 	 * it is needed to continue to parse events.
145 	 * See also evsel__has_callchain().
146 	 */
147 	__u64			synth_sample_type;
148 
149 	/*
150 	 * bpf_counter_ops serves two use cases:
151 	 *   1. perf-stat -b          counting events used byBPF programs
152 	 *   2. perf-stat --use-bpf   use BPF programs to aggregate counts
153 	 */
154 	struct bpf_counter_ops	*bpf_counter_ops;
155 
156 	/* for perf-stat -b */
157 	struct list_head	bpf_counter_list;
158 
159 	/* for perf-stat --use-bpf */
160 	int			bperf_leader_prog_fd;
161 	int			bperf_leader_link_fd;
162 	union {
163 		struct bperf_leader_bpf *leader_skel;
164 		struct bperf_follower_bpf *follower_skel;
165 	};
166 	unsigned long		open_flags;
167 	int			precise_ip_original;
168 
169 	/* for missing_features */
170 	struct perf_pmu		*pmu;
171 };
172 
173 struct perf_missing_features {
174 	bool sample_id_all;
175 	bool exclude_guest;
176 	bool mmap2;
177 	bool cloexec;
178 	bool clockid;
179 	bool clockid_wrong;
180 	bool lbr_flags;
181 	bool write_backward;
182 	bool group_read;
183 	bool ksymbol;
184 	bool bpf;
185 	bool aux_output;
186 	bool branch_hw_idx;
187 	bool cgroup;
188 	bool data_page_size;
189 	bool code_page_size;
190 	bool weight_struct;
191 	bool read_lost;
192 };
193 
194 extern struct perf_missing_features perf_missing_features;
195 
196 struct perf_cpu_map;
197 struct thread_map;
198 struct record_opts;
199 
200 static inline struct perf_cpu_map *evsel__cpus(struct evsel *evsel)
201 {
202 	return perf_evsel__cpus(&evsel->core);
203 }
204 
205 static inline int evsel__nr_cpus(struct evsel *evsel)
206 {
207 	return perf_cpu_map__nr(evsel__cpus(evsel));
208 }
209 
210 void evsel__compute_deltas(struct evsel *evsel, int cpu, int thread,
211 			   struct perf_counts_values *count);
212 
213 int evsel__object_config(size_t object_size,
214 			 int (*init)(struct evsel *evsel),
215 			 void (*fini)(struct evsel *evsel));
216 
217 struct perf_pmu *evsel__find_pmu(struct evsel *evsel);
218 bool evsel__is_aux_event(struct evsel *evsel);
219 
220 struct evsel *evsel__new_idx(struct perf_event_attr *attr, int idx);
221 
222 static inline struct evsel *evsel__new(struct perf_event_attr *attr)
223 {
224 	return evsel__new_idx(attr, 0);
225 }
226 
227 struct evsel *evsel__clone(struct evsel *orig);
228 
229 int copy_config_terms(struct list_head *dst, struct list_head *src);
230 void free_config_terms(struct list_head *config_terms);
231 
232 
233 #ifdef HAVE_LIBTRACEEVENT
234 struct evsel *evsel__newtp_idx(const char *sys, const char *name, int idx);
235 
236 /*
237  * Returns pointer with encoded error via <linux/err.h> interface.
238  */
239 static inline struct evsel *evsel__newtp(const char *sys, const char *name)
240 {
241 	return evsel__newtp_idx(sys, name, 0);
242 }
243 #endif
244 
245 struct evsel *evsel__new_cycles(bool precise, __u32 type, __u64 config);
246 
247 #ifdef HAVE_LIBTRACEEVENT
248 struct tep_event *event_format__new(const char *sys, const char *name);
249 #endif
250 
251 void evsel__init(struct evsel *evsel, struct perf_event_attr *attr, int idx);
252 void evsel__exit(struct evsel *evsel);
253 void evsel__delete(struct evsel *evsel);
254 
255 struct callchain_param;
256 
257 void evsel__config(struct evsel *evsel, struct record_opts *opts,
258 		   struct callchain_param *callchain);
259 void evsel__config_callchain(struct evsel *evsel, struct record_opts *opts,
260 			     struct callchain_param *callchain);
261 
262 int __evsel__sample_size(u64 sample_type);
263 void evsel__calc_id_pos(struct evsel *evsel);
264 
265 bool evsel__is_cache_op_valid(u8 type, u8 op);
266 
267 static inline bool evsel__is_bpf(struct evsel *evsel)
268 {
269 	return evsel->bpf_counter_ops != NULL;
270 }
271 
272 #define EVSEL__MAX_ALIASES 8
273 
274 extern const char *const evsel__hw_cache[PERF_COUNT_HW_CACHE_MAX][EVSEL__MAX_ALIASES];
275 extern const char *const evsel__hw_cache_op[PERF_COUNT_HW_CACHE_OP_MAX][EVSEL__MAX_ALIASES];
276 extern const char *const evsel__hw_cache_result[PERF_COUNT_HW_CACHE_RESULT_MAX][EVSEL__MAX_ALIASES];
277 extern const char *const evsel__hw_names[PERF_COUNT_HW_MAX];
278 extern const char *const evsel__sw_names[PERF_COUNT_SW_MAX];
279 extern char *evsel__bpf_counter_events;
280 bool evsel__match_bpf_counter_events(const char *name);
281 int arch_evsel__hw_name(struct evsel *evsel, char *bf, size_t size);
282 
283 int __evsel__hw_cache_type_op_res_name(u8 type, u8 op, u8 result, char *bf, size_t size);
284 const char *evsel__name(struct evsel *evsel);
285 const char *evsel__metric_id(const struct evsel *evsel);
286 
287 static inline bool evsel__is_tool(const struct evsel *evsel)
288 {
289 	return evsel->tool_event != PERF_TOOL_NONE;
290 }
291 
292 const char *evsel__group_name(struct evsel *evsel);
293 int evsel__group_desc(struct evsel *evsel, char *buf, size_t size);
294 
295 void __evsel__set_sample_bit(struct evsel *evsel, enum perf_event_sample_format bit);
296 void __evsel__reset_sample_bit(struct evsel *evsel, enum perf_event_sample_format bit);
297 
298 #define evsel__set_sample_bit(evsel, bit) \
299 	__evsel__set_sample_bit(evsel, PERF_SAMPLE_##bit)
300 
301 #define evsel__reset_sample_bit(evsel, bit) \
302 	__evsel__reset_sample_bit(evsel, PERF_SAMPLE_##bit)
303 
304 void evsel__set_sample_id(struct evsel *evsel, bool use_sample_identifier);
305 
306 void arch_evsel__set_sample_weight(struct evsel *evsel);
307 void arch_evsel__fixup_new_cycles(struct perf_event_attr *attr);
308 void arch__post_evsel_config(struct evsel *evsel, struct perf_event_attr *attr);
309 
310 int evsel__set_filter(struct evsel *evsel, const char *filter);
311 int evsel__append_tp_filter(struct evsel *evsel, const char *filter);
312 int evsel__append_addr_filter(struct evsel *evsel, const char *filter);
313 int evsel__enable_cpu(struct evsel *evsel, int cpu_map_idx);
314 int evsel__enable(struct evsel *evsel);
315 int evsel__disable(struct evsel *evsel);
316 int evsel__disable_cpu(struct evsel *evsel, int cpu_map_idx);
317 
318 int evsel__open_per_cpu(struct evsel *evsel, struct perf_cpu_map *cpus, int cpu_map_idx);
319 int evsel__open_per_thread(struct evsel *evsel, struct perf_thread_map *threads);
320 int evsel__open(struct evsel *evsel, struct perf_cpu_map *cpus,
321 		struct perf_thread_map *threads);
322 void evsel__close(struct evsel *evsel);
323 int evsel__prepare_open(struct evsel *evsel, struct perf_cpu_map *cpus,
324 		struct perf_thread_map *threads);
325 bool evsel__detect_missing_features(struct evsel *evsel);
326 
327 enum rlimit_action { NO_CHANGE, SET_TO_MAX, INCREASED_MAX };
328 bool evsel__increase_rlimit(enum rlimit_action *set_rlimit);
329 
330 bool evsel__precise_ip_fallback(struct evsel *evsel);
331 
332 struct perf_sample;
333 
334 #ifdef HAVE_LIBTRACEEVENT
335 void *evsel__rawptr(struct evsel *evsel, struct perf_sample *sample, const char *name);
336 u64 evsel__intval(struct evsel *evsel, struct perf_sample *sample, const char *name);
337 
338 static inline char *evsel__strval(struct evsel *evsel, struct perf_sample *sample, const char *name)
339 {
340 	return evsel__rawptr(evsel, sample, name);
341 }
342 #endif
343 
344 struct tep_format_field;
345 
346 u64 format_field__intval(struct tep_format_field *field, struct perf_sample *sample, bool needs_swap);
347 
348 struct tep_format_field *evsel__field(struct evsel *evsel, const char *name);
349 
350 #define evsel__match(evsel, t, c)		\
351 	(evsel->core.attr.type == PERF_TYPE_##t &&	\
352 	 evsel->core.attr.config == PERF_COUNT_##c)
353 
354 static inline bool evsel__match2(struct evsel *e1, struct evsel *e2)
355 {
356 	return (e1->core.attr.type == e2->core.attr.type) &&
357 	       (e1->core.attr.config == e2->core.attr.config);
358 }
359 
360 int evsel__read_counter(struct evsel *evsel, int cpu_map_idx, int thread);
361 
362 int __evsel__read_on_cpu(struct evsel *evsel, int cpu_map_idx, int thread, bool scale);
363 
364 /**
365  * evsel__read_on_cpu - Read out the results on a CPU and thread
366  *
367  * @evsel - event selector to read value
368  * @cpu_map_idx - CPU of interest
369  * @thread - thread of interest
370  */
371 static inline int evsel__read_on_cpu(struct evsel *evsel, int cpu_map_idx, int thread)
372 {
373 	return __evsel__read_on_cpu(evsel, cpu_map_idx, thread, false);
374 }
375 
376 /**
377  * evsel__read_on_cpu_scaled - Read out the results on a CPU and thread, scaled
378  *
379  * @evsel - event selector to read value
380  * @cpu_map_idx - CPU of interest
381  * @thread - thread of interest
382  */
383 static inline int evsel__read_on_cpu_scaled(struct evsel *evsel, int cpu_map_idx, int thread)
384 {
385 	return __evsel__read_on_cpu(evsel, cpu_map_idx, thread, true);
386 }
387 
388 int evsel__parse_sample(struct evsel *evsel, union perf_event *event,
389 			struct perf_sample *sample);
390 
391 int evsel__parse_sample_timestamp(struct evsel *evsel, union perf_event *event,
392 				  u64 *timestamp);
393 
394 u16 evsel__id_hdr_size(struct evsel *evsel);
395 
396 static inline struct evsel *evsel__next(struct evsel *evsel)
397 {
398 	return list_entry(evsel->core.node.next, struct evsel, core.node);
399 }
400 
401 static inline struct evsel *evsel__prev(struct evsel *evsel)
402 {
403 	return list_entry(evsel->core.node.prev, struct evsel, core.node);
404 }
405 
406 /**
407  * evsel__is_group_leader - Return whether given evsel is a leader event
408  *
409  * @evsel - evsel selector to be tested
410  *
411  * Return %true if @evsel is a group leader or a stand-alone event
412  */
413 static inline bool evsel__is_group_leader(const struct evsel *evsel)
414 {
415 	return evsel->core.leader == &evsel->core;
416 }
417 
418 /**
419  * evsel__is_group_event - Return whether given evsel is a group event
420  *
421  * @evsel - evsel selector to be tested
422  *
423  * Return %true iff event group view is enabled and @evsel is a actual group
424  * leader which has other members in the group
425  */
426 static inline bool evsel__is_group_event(struct evsel *evsel)
427 {
428 	if (!symbol_conf.event_group)
429 		return false;
430 
431 	return evsel__is_group_leader(evsel) && evsel->core.nr_members > 1;
432 }
433 
434 bool evsel__is_function_event(struct evsel *evsel);
435 
436 static inline bool evsel__is_bpf_output(struct evsel *evsel)
437 {
438 	return evsel__match(evsel, SOFTWARE, SW_BPF_OUTPUT);
439 }
440 
441 static inline bool evsel__is_clock(struct evsel *evsel)
442 {
443 	return evsel__match(evsel, SOFTWARE, SW_CPU_CLOCK) ||
444 	       evsel__match(evsel, SOFTWARE, SW_TASK_CLOCK);
445 }
446 
447 bool evsel__fallback(struct evsel *evsel, int err, char *msg, size_t msgsize);
448 int evsel__open_strerror(struct evsel *evsel, struct target *target,
449 			 int err, char *msg, size_t size);
450 
451 static inline int evsel__group_idx(struct evsel *evsel)
452 {
453 	return evsel->core.idx - evsel->core.leader->idx;
454 }
455 
456 /* Iterates group WITHOUT the leader. */
457 #define for_each_group_member(_evsel, _leader) 					\
458 for ((_evsel) = list_entry((_leader)->core.node.next, struct evsel, core.node); \
459      (_evsel) && (_evsel)->core.leader == (&_leader->core);					\
460      (_evsel) = list_entry((_evsel)->core.node.next, struct evsel, core.node))
461 
462 /* Iterates group WITH the leader. */
463 #define for_each_group_evsel(_evsel, _leader) 					\
464 for ((_evsel) = _leader; 							\
465      (_evsel) && (_evsel)->core.leader == (&_leader->core);					\
466      (_evsel) = list_entry((_evsel)->core.node.next, struct evsel, core.node))
467 
468 static inline bool evsel__has_branch_callstack(const struct evsel *evsel)
469 {
470 	return evsel->core.attr.branch_sample_type & PERF_SAMPLE_BRANCH_CALL_STACK;
471 }
472 
473 static inline bool evsel__has_branch_hw_idx(const struct evsel *evsel)
474 {
475 	return evsel->core.attr.branch_sample_type & PERF_SAMPLE_BRANCH_HW_INDEX;
476 }
477 
478 static inline bool evsel__has_callchain(const struct evsel *evsel)
479 {
480 	/*
481 	 * For reporting purposes, an evsel sample can have a recorded callchain
482 	 * or a callchain synthesized from AUX area data.
483 	 */
484 	return evsel->core.attr.sample_type & PERF_SAMPLE_CALLCHAIN ||
485 	       evsel->synth_sample_type & PERF_SAMPLE_CALLCHAIN;
486 }
487 
488 static inline bool evsel__has_br_stack(const struct evsel *evsel)
489 {
490 	/*
491 	 * For reporting purposes, an evsel sample can have a recorded branch
492 	 * stack or a branch stack synthesized from AUX area data.
493 	 */
494 	return evsel->core.attr.sample_type & PERF_SAMPLE_BRANCH_STACK ||
495 	       evsel->synth_sample_type & PERF_SAMPLE_BRANCH_STACK;
496 }
497 
498 static inline bool evsel__is_dummy_event(struct evsel *evsel)
499 {
500 	return (evsel->core.attr.type == PERF_TYPE_SOFTWARE) &&
501 	       (evsel->core.attr.config == PERF_COUNT_SW_DUMMY);
502 }
503 
504 struct perf_env *evsel__env(struct evsel *evsel);
505 
506 int evsel__store_ids(struct evsel *evsel, struct evlist *evlist);
507 
508 void evsel__zero_per_pkg(struct evsel *evsel);
509 bool evsel__is_hybrid(const struct evsel *evsel);
510 struct evsel *evsel__leader(struct evsel *evsel);
511 bool evsel__has_leader(struct evsel *evsel, struct evsel *leader);
512 bool evsel__is_leader(struct evsel *evsel);
513 void evsel__set_leader(struct evsel *evsel, struct evsel *leader);
514 int evsel__source_count(const struct evsel *evsel);
515 void evsel__remove_from_group(struct evsel *evsel, struct evsel *leader);
516 
517 bool arch_evsel__must_be_in_group(const struct evsel *evsel);
518 
519 /*
520  * Macro to swap the bit-field postition and size.
521  * Used when,
522  * - dont need to swap the entire u64 &&
523  * - when u64 has variable bit-field sizes &&
524  * - when presented in a host endian which is different
525  *   than the source endian of the perf.data file
526  */
527 #define bitfield_swap(src, pos, size)	\
528 	((((src) >> (pos)) & ((1ull << (size)) - 1)) << (63 - ((pos) + (size) - 1)))
529 
530 u64 evsel__bitfield_swap_branch_flags(u64 value);
531 #endif /* __PERF_EVSEL_H */
532