xref: /openbmc/linux/tools/perf/util/evsel.h (revision 9fb29c73)
1  /* SPDX-License-Identifier: GPL-2.0 */
2  #ifndef __PERF_EVSEL_H
3  #define __PERF_EVSEL_H 1
4  
5  #include <linux/list.h>
6  #include <stdbool.h>
7  #include <stddef.h>
8  #include <linux/perf_event.h>
9  #include <linux/types.h>
10  #include "xyarray.h"
11  #include "symbol.h"
12  #include "cpumap.h"
13  #include "counts.h"
14  
15  struct perf_evsel;
16  
17  /*
18   * Per fd, to map back from PERF_SAMPLE_ID to evsel, only used when there are
19   * more than one entry in the evlist.
20   */
21  struct perf_sample_id {
22  	struct hlist_node 	node;
23  	u64		 	id;
24  	struct perf_evsel	*evsel;
25  	int			idx;
26  	int			cpu;
27  	pid_t			tid;
28  
29  	/* Holds total ID period value for PERF_SAMPLE_READ processing. */
30  	u64			period;
31  };
32  
33  struct cgroup;
34  
35  /*
36   * The 'struct perf_evsel_config_term' is used to pass event
37   * specific configuration data to perf_evsel__config routine.
38   * It is allocated within event parsing and attached to
39   * perf_evsel::config_terms list head.
40  */
41  enum term_type {
42  	PERF_EVSEL__CONFIG_TERM_PERIOD,
43  	PERF_EVSEL__CONFIG_TERM_FREQ,
44  	PERF_EVSEL__CONFIG_TERM_TIME,
45  	PERF_EVSEL__CONFIG_TERM_CALLGRAPH,
46  	PERF_EVSEL__CONFIG_TERM_STACK_USER,
47  	PERF_EVSEL__CONFIG_TERM_INHERIT,
48  	PERF_EVSEL__CONFIG_TERM_MAX_STACK,
49  	PERF_EVSEL__CONFIG_TERM_MAX_EVENTS,
50  	PERF_EVSEL__CONFIG_TERM_OVERWRITE,
51  	PERF_EVSEL__CONFIG_TERM_DRV_CFG,
52  	PERF_EVSEL__CONFIG_TERM_BRANCH,
53  };
54  
55  struct perf_evsel_config_term {
56  	struct list_head	list;
57  	enum term_type	type;
58  	union {
59  		u64	period;
60  		u64	freq;
61  		bool	time;
62  		char	*callgraph;
63  		char	*drv_cfg;
64  		u64	stack_user;
65  		int	max_stack;
66  		bool	inherit;
67  		bool	overwrite;
68  		char	*branch;
69  		unsigned long max_events;
70  	} val;
71  	bool weak;
72  };
73  
74  struct perf_stat_evsel;
75  
76  /** struct perf_evsel - event selector
77   *
78   * @evlist - evlist this evsel is in, if it is in one.
79   * @node - To insert it into evlist->entries or in other list_heads, say in
80   *         the event parsing routines.
81   * @name - Can be set to retain the original event name passed by the user,
82   *         so that when showing results in tools such as 'perf stat', we
83   *         show the name used, not some alias.
84   * @id_pos: the position of the event id (PERF_SAMPLE_ID or
85   *          PERF_SAMPLE_IDENTIFIER) in a sample event i.e. in the array of
86   *          struct sample_event
87   * @is_pos: the position (counting backwards) of the event id (PERF_SAMPLE_ID or
88   *          PERF_SAMPLE_IDENTIFIER) in a non-sample event i.e. if sample_id_all
89   *          is used there is an id sample appended to non-sample events
90   * @priv:   And what is in its containing unnamed union are tool specific
91   */
92  struct perf_evsel {
93  	struct list_head	node;
94  	struct perf_evlist	*evlist;
95  	struct perf_event_attr	attr;
96  	char			*filter;
97  	struct xyarray		*fd;
98  	struct xyarray		*sample_id;
99  	u64			*id;
100  	struct perf_counts	*counts;
101  	struct perf_counts	*prev_raw_counts;
102  	int			idx;
103  	u32			ids;
104  	unsigned long		max_events;
105  	unsigned long		nr_events_printed;
106  	char			*name;
107  	double			scale;
108  	const char		*unit;
109  	struct tep_event	*tp_format;
110  	off_t			id_offset;
111  	struct perf_stat_evsel  *stats;
112  	void			*priv;
113  	u64			db_id;
114  	struct cgroup		*cgrp;
115  	void			*handler;
116  	struct cpu_map		*cpus;
117  	struct cpu_map		*own_cpus;
118  	struct thread_map	*threads;
119  	unsigned int		sample_size;
120  	int			id_pos;
121  	int			is_pos;
122  	bool			uniquified_name;
123  	bool			snapshot;
124  	bool 			supported;
125  	bool 			needs_swap;
126  	bool 			disabled;
127  	bool			no_aux_samples;
128  	bool			immediate;
129  	bool			system_wide;
130  	bool			tracking;
131  	bool			per_pkg;
132  	bool			precise_max;
133  	bool			ignore_missing_thread;
134  	bool			forced_leader;
135  	bool			use_uncore_alias;
136  	/* parse modifier helper */
137  	int			exclude_GH;
138  	int			nr_members;
139  	int			sample_read;
140  	unsigned long		*per_pkg_mask;
141  	struct perf_evsel	*leader;
142  	char			*group_name;
143  	bool			cmdline_group_boundary;
144  	struct list_head	config_terms;
145  	int			bpf_fd;
146  	bool			auto_merge_stats;
147  	bool			merged_stat;
148  	const char *		metric_expr;
149  	const char *		metric_name;
150  	struct perf_evsel	**metric_events;
151  	bool			collect_stat;
152  	bool			weak_group;
153  	const char		*pmu_name;
154  };
155  
156  union u64_swap {
157  	u64 val64;
158  	u32 val32[2];
159  };
160  
161  struct perf_missing_features {
162  	bool sample_id_all;
163  	bool exclude_guest;
164  	bool mmap2;
165  	bool cloexec;
166  	bool clockid;
167  	bool clockid_wrong;
168  	bool lbr_flags;
169  	bool write_backward;
170  	bool group_read;
171  };
172  
173  extern struct perf_missing_features perf_missing_features;
174  
175  struct cpu_map;
176  struct target;
177  struct thread_map;
178  struct record_opts;
179  
180  static inline struct cpu_map *perf_evsel__cpus(struct perf_evsel *evsel)
181  {
182  	return evsel->cpus;
183  }
184  
185  static inline int perf_evsel__nr_cpus(struct perf_evsel *evsel)
186  {
187  	return perf_evsel__cpus(evsel)->nr;
188  }
189  
190  void perf_counts_values__scale(struct perf_counts_values *count,
191  			       bool scale, s8 *pscaled);
192  
193  void perf_evsel__compute_deltas(struct perf_evsel *evsel, int cpu, int thread,
194  				struct perf_counts_values *count);
195  
196  int perf_evsel__object_config(size_t object_size,
197  			      int (*init)(struct perf_evsel *evsel),
198  			      void (*fini)(struct perf_evsel *evsel));
199  
200  struct perf_evsel *perf_evsel__new_idx(struct perf_event_attr *attr, int idx);
201  
202  static inline struct perf_evsel *perf_evsel__new(struct perf_event_attr *attr)
203  {
204  	return perf_evsel__new_idx(attr, 0);
205  }
206  
207  struct perf_evsel *perf_evsel__newtp_idx(const char *sys, const char *name, int idx);
208  
209  /*
210   * Returns pointer with encoded error via <linux/err.h> interface.
211   */
212  static inline struct perf_evsel *perf_evsel__newtp(const char *sys, const char *name)
213  {
214  	return perf_evsel__newtp_idx(sys, name, 0);
215  }
216  
217  struct perf_evsel *perf_evsel__new_cycles(bool precise);
218  
219  struct tep_event *event_format__new(const char *sys, const char *name);
220  
221  void perf_evsel__init(struct perf_evsel *evsel,
222  		      struct perf_event_attr *attr, int idx);
223  void perf_evsel__exit(struct perf_evsel *evsel);
224  void perf_evsel__delete(struct perf_evsel *evsel);
225  
226  struct callchain_param;
227  
228  void perf_evsel__config(struct perf_evsel *evsel,
229  			struct record_opts *opts,
230  			struct callchain_param *callchain);
231  void perf_evsel__config_callchain(struct perf_evsel *evsel,
232  				  struct record_opts *opts,
233  				  struct callchain_param *callchain);
234  
235  int __perf_evsel__sample_size(u64 sample_type);
236  void perf_evsel__calc_id_pos(struct perf_evsel *evsel);
237  
238  bool perf_evsel__is_cache_op_valid(u8 type, u8 op);
239  
240  #define PERF_EVSEL__MAX_ALIASES 8
241  
242  extern const char *perf_evsel__hw_cache[PERF_COUNT_HW_CACHE_MAX]
243  				       [PERF_EVSEL__MAX_ALIASES];
244  extern const char *perf_evsel__hw_cache_op[PERF_COUNT_HW_CACHE_OP_MAX]
245  					  [PERF_EVSEL__MAX_ALIASES];
246  extern const char *perf_evsel__hw_cache_result[PERF_COUNT_HW_CACHE_RESULT_MAX]
247  					      [PERF_EVSEL__MAX_ALIASES];
248  extern const char *perf_evsel__hw_names[PERF_COUNT_HW_MAX];
249  extern const char *perf_evsel__sw_names[PERF_COUNT_SW_MAX];
250  int __perf_evsel__hw_cache_type_op_res_name(u8 type, u8 op, u8 result,
251  					    char *bf, size_t size);
252  const char *perf_evsel__name(struct perf_evsel *evsel);
253  
254  const char *perf_evsel__group_name(struct perf_evsel *evsel);
255  int perf_evsel__group_desc(struct perf_evsel *evsel, char *buf, size_t size);
256  
257  int perf_evsel__alloc_id(struct perf_evsel *evsel, int ncpus, int nthreads);
258  void perf_evsel__close_fd(struct perf_evsel *evsel);
259  
260  void __perf_evsel__set_sample_bit(struct perf_evsel *evsel,
261  				  enum perf_event_sample_format bit);
262  void __perf_evsel__reset_sample_bit(struct perf_evsel *evsel,
263  				    enum perf_event_sample_format bit);
264  
265  #define perf_evsel__set_sample_bit(evsel, bit) \
266  	__perf_evsel__set_sample_bit(evsel, PERF_SAMPLE_##bit)
267  
268  #define perf_evsel__reset_sample_bit(evsel, bit) \
269  	__perf_evsel__reset_sample_bit(evsel, PERF_SAMPLE_##bit)
270  
271  void perf_evsel__set_sample_id(struct perf_evsel *evsel,
272  			       bool use_sample_identifier);
273  
274  int perf_evsel__set_filter(struct perf_evsel *evsel, const char *filter);
275  int perf_evsel__append_tp_filter(struct perf_evsel *evsel, const char *filter);
276  int perf_evsel__append_addr_filter(struct perf_evsel *evsel,
277  				   const char *filter);
278  int perf_evsel__apply_filter(struct perf_evsel *evsel, const char *filter);
279  int perf_evsel__enable(struct perf_evsel *evsel);
280  int perf_evsel__disable(struct perf_evsel *evsel);
281  
282  int perf_evsel__open_per_cpu(struct perf_evsel *evsel,
283  			     struct cpu_map *cpus);
284  int perf_evsel__open_per_thread(struct perf_evsel *evsel,
285  				struct thread_map *threads);
286  int perf_evsel__open(struct perf_evsel *evsel, struct cpu_map *cpus,
287  		     struct thread_map *threads);
288  void perf_evsel__close(struct perf_evsel *evsel);
289  
290  struct perf_sample;
291  
292  void *perf_evsel__rawptr(struct perf_evsel *evsel, struct perf_sample *sample,
293  			 const char *name);
294  u64 perf_evsel__intval(struct perf_evsel *evsel, struct perf_sample *sample,
295  		       const char *name);
296  
297  static inline char *perf_evsel__strval(struct perf_evsel *evsel,
298  				       struct perf_sample *sample,
299  				       const char *name)
300  {
301  	return perf_evsel__rawptr(evsel, sample, name);
302  }
303  
304  struct tep_format_field;
305  
306  u64 format_field__intval(struct tep_format_field *field, struct perf_sample *sample, bool needs_swap);
307  
308  struct tep_format_field *perf_evsel__field(struct perf_evsel *evsel, const char *name);
309  
310  #define perf_evsel__match(evsel, t, c)		\
311  	(evsel->attr.type == PERF_TYPE_##t &&	\
312  	 evsel->attr.config == PERF_COUNT_##c)
313  
314  static inline bool perf_evsel__match2(struct perf_evsel *e1,
315  				      struct perf_evsel *e2)
316  {
317  	return (e1->attr.type == e2->attr.type) &&
318  	       (e1->attr.config == e2->attr.config);
319  }
320  
321  #define perf_evsel__cmp(a, b)			\
322  	((a) &&					\
323  	 (b) &&					\
324  	 (a)->attr.type == (b)->attr.type &&	\
325  	 (a)->attr.config == (b)->attr.config)
326  
327  int perf_evsel__read(struct perf_evsel *evsel, int cpu, int thread,
328  		     struct perf_counts_values *count);
329  
330  int perf_evsel__read_counter(struct perf_evsel *evsel, int cpu, int thread);
331  
332  int __perf_evsel__read_on_cpu(struct perf_evsel *evsel,
333  			      int cpu, int thread, bool scale);
334  
335  /**
336   * perf_evsel__read_on_cpu - Read out the results on a CPU and thread
337   *
338   * @evsel - event selector to read value
339   * @cpu - CPU of interest
340   * @thread - thread of interest
341   */
342  static inline int perf_evsel__read_on_cpu(struct perf_evsel *evsel,
343  					  int cpu, int thread)
344  {
345  	return __perf_evsel__read_on_cpu(evsel, cpu, thread, false);
346  }
347  
348  /**
349   * perf_evsel__read_on_cpu_scaled - Read out the results on a CPU and thread, scaled
350   *
351   * @evsel - event selector to read value
352   * @cpu - CPU of interest
353   * @thread - thread of interest
354   */
355  static inline int perf_evsel__read_on_cpu_scaled(struct perf_evsel *evsel,
356  						 int cpu, int thread)
357  {
358  	return __perf_evsel__read_on_cpu(evsel, cpu, thread, true);
359  }
360  
361  int perf_evsel__parse_sample(struct perf_evsel *evsel, union perf_event *event,
362  			     struct perf_sample *sample);
363  
364  int perf_evsel__parse_sample_timestamp(struct perf_evsel *evsel,
365  				       union perf_event *event,
366  				       u64 *timestamp);
367  
368  static inline struct perf_evsel *perf_evsel__next(struct perf_evsel *evsel)
369  {
370  	return list_entry(evsel->node.next, struct perf_evsel, node);
371  }
372  
373  static inline struct perf_evsel *perf_evsel__prev(struct perf_evsel *evsel)
374  {
375  	return list_entry(evsel->node.prev, struct perf_evsel, node);
376  }
377  
378  /**
379   * perf_evsel__is_group_leader - Return whether given evsel is a leader event
380   *
381   * @evsel - evsel selector to be tested
382   *
383   * Return %true if @evsel is a group leader or a stand-alone event
384   */
385  static inline bool perf_evsel__is_group_leader(const struct perf_evsel *evsel)
386  {
387  	return evsel->leader == evsel;
388  }
389  
390  /**
391   * perf_evsel__is_group_event - Return whether given evsel is a group event
392   *
393   * @evsel - evsel selector to be tested
394   *
395   * Return %true iff event group view is enabled and @evsel is a actual group
396   * leader which has other members in the group
397   */
398  static inline bool perf_evsel__is_group_event(struct perf_evsel *evsel)
399  {
400  	if (!symbol_conf.event_group)
401  		return false;
402  
403  	return perf_evsel__is_group_leader(evsel) && evsel->nr_members > 1;
404  }
405  
406  bool perf_evsel__is_function_event(struct perf_evsel *evsel);
407  
408  static inline bool perf_evsel__is_bpf_output(struct perf_evsel *evsel)
409  {
410  	return perf_evsel__match(evsel, SOFTWARE, SW_BPF_OUTPUT);
411  }
412  
413  static inline bool perf_evsel__is_clock(struct perf_evsel *evsel)
414  {
415  	return perf_evsel__match(evsel, SOFTWARE, SW_CPU_CLOCK) ||
416  	       perf_evsel__match(evsel, SOFTWARE, SW_TASK_CLOCK);
417  }
418  
419  struct perf_attr_details {
420  	bool freq;
421  	bool verbose;
422  	bool event_group;
423  	bool force;
424  	bool trace_fields;
425  };
426  
427  int perf_evsel__fprintf(struct perf_evsel *evsel,
428  			struct perf_attr_details *details, FILE *fp);
429  
430  #define EVSEL__PRINT_IP			(1<<0)
431  #define EVSEL__PRINT_SYM		(1<<1)
432  #define EVSEL__PRINT_DSO		(1<<2)
433  #define EVSEL__PRINT_SYMOFFSET		(1<<3)
434  #define EVSEL__PRINT_ONELINE		(1<<4)
435  #define EVSEL__PRINT_SRCLINE		(1<<5)
436  #define EVSEL__PRINT_UNKNOWN_AS_ADDR	(1<<6)
437  #define EVSEL__PRINT_CALLCHAIN_ARROW	(1<<7)
438  #define EVSEL__PRINT_SKIP_IGNORED	(1<<8)
439  
440  struct callchain_cursor;
441  
442  int sample__fprintf_callchain(struct perf_sample *sample, int left_alignment,
443  			      unsigned int print_opts,
444  			      struct callchain_cursor *cursor, FILE *fp);
445  
446  int sample__fprintf_sym(struct perf_sample *sample, struct addr_location *al,
447  			int left_alignment, unsigned int print_opts,
448  			struct callchain_cursor *cursor, FILE *fp);
449  
450  bool perf_evsel__fallback(struct perf_evsel *evsel, int err,
451  			  char *msg, size_t msgsize);
452  int perf_evsel__open_strerror(struct perf_evsel *evsel, struct target *target,
453  			      int err, char *msg, size_t size);
454  
455  static inline int perf_evsel__group_idx(struct perf_evsel *evsel)
456  {
457  	return evsel->idx - evsel->leader->idx;
458  }
459  
460  /* Iterates group WITHOUT the leader. */
461  #define for_each_group_member(_evsel, _leader) 					\
462  for ((_evsel) = list_entry((_leader)->node.next, struct perf_evsel, node); 	\
463       (_evsel) && (_evsel)->leader == (_leader);					\
464       (_evsel) = list_entry((_evsel)->node.next, struct perf_evsel, node))
465  
466  /* Iterates group WITH the leader. */
467  #define for_each_group_evsel(_evsel, _leader) 					\
468  for ((_evsel) = _leader; 							\
469       (_evsel) && (_evsel)->leader == (_leader);					\
470       (_evsel) = list_entry((_evsel)->node.next, struct perf_evsel, node))
471  
472  static inline bool perf_evsel__has_branch_callstack(const struct perf_evsel *evsel)
473  {
474  	return evsel->attr.branch_sample_type & PERF_SAMPLE_BRANCH_CALL_STACK;
475  }
476  
477  static inline bool evsel__has_callchain(const struct perf_evsel *evsel)
478  {
479  	return (evsel->attr.sample_type & PERF_SAMPLE_CALLCHAIN) != 0;
480  }
481  
482  typedef int (*attr__fprintf_f)(FILE *, const char *, const char *, void *);
483  
484  int perf_event_attr__fprintf(FILE *fp, struct perf_event_attr *attr,
485  			     attr__fprintf_f attr__fprintf, void *priv);
486  
487  struct perf_env *perf_evsel__env(struct perf_evsel *evsel);
488  
489  int perf_evsel__store_ids(struct perf_evsel *evsel, struct perf_evlist *evlist);
490  #endif /* __PERF_EVSEL_H */
491