13ce311afSJiri Olsa /* SPDX-License-Identifier: GPL-2.0 */
23ce311afSJiri Olsa #ifndef __LIBPERF_INTERNAL_EVSEL_H
33ce311afSJiri Olsa #define __LIBPERF_INTERNAL_EVSEL_H
43ce311afSJiri Olsa 
53ce311afSJiri Olsa #include <linux/types.h>
63ce311afSJiri Olsa #include <linux/perf_event.h>
73ce311afSJiri Olsa #include <stdbool.h>
83ce311afSJiri Olsa #include <sys/types.h>
96d18804bSIan Rogers #include <internal/cpumap.h>
103ce311afSJiri Olsa 
113ce311afSJiri Olsa struct perf_thread_map;
123ce311afSJiri Olsa struct xyarray;
133ce311afSJiri Olsa 
143ce311afSJiri Olsa /*
153ce311afSJiri Olsa  * Per fd, to map back from PERF_SAMPLE_ID to evsel, only used when there are
163ce311afSJiri Olsa  * more than one entry in the evlist.
173ce311afSJiri Olsa  */
183ce311afSJiri Olsa struct perf_sample_id {
193ce311afSJiri Olsa 	struct hlist_node	 node;
203ce311afSJiri Olsa 	u64			 id;
213ce311afSJiri Olsa 	struct perf_evsel	*evsel;
223ce311afSJiri Olsa        /*
233ce311afSJiri Olsa 	* 'idx' will be used for AUX area sampling. A sample will have AUX area
243ce311afSJiri Olsa 	* data that will be queued for decoding, where there are separate
253ce311afSJiri Olsa 	* queues for each CPU (per-cpu tracing) or task (per-thread tracing).
263ce311afSJiri Olsa 	* The sample ID can be used to lookup 'idx' which is effectively the
273ce311afSJiri Olsa 	* queue number.
283ce311afSJiri Olsa 	*/
293ce311afSJiri Olsa 	int			 idx;
306d18804bSIan Rogers 	struct perf_cpu		 cpu;
313ce311afSJiri Olsa 	pid_t			 tid;
323ce311afSJiri Olsa 
333ce311afSJiri Olsa 	/* Holds total ID period value for PERF_SAMPLE_READ processing. */
343ce311afSJiri Olsa 	u64			 period;
353ce311afSJiri Olsa };
363ce311afSJiri Olsa 
373ce311afSJiri Olsa struct perf_evsel {
383ce311afSJiri Olsa 	struct list_head	 node;
393ce311afSJiri Olsa 	struct perf_event_attr	 attr;
403ce311afSJiri Olsa 	struct perf_cpu_map	*cpus;
413ce311afSJiri Olsa 	struct perf_cpu_map	*own_cpus;
423ce311afSJiri Olsa 	struct perf_thread_map	*threads;
433ce311afSJiri Olsa 	struct xyarray		*fd;
446cd70754SRob Herring 	struct xyarray		*mmap;
453ce311afSJiri Olsa 	struct xyarray		*sample_id;
463ce311afSJiri Olsa 	u64			*id;
473ce311afSJiri Olsa 	u32			 ids;
48fba7c866SJiri Olsa 	struct perf_evsel	*leader;
493ce311afSJiri Olsa 
503ce311afSJiri Olsa 	/* parse modifier helper */
513ce311afSJiri Olsa 	int			 nr_members;
52*f5fb6d4eSAdrian Hunter 	/*
53*f5fb6d4eSAdrian Hunter 	 * system_wide is for events that need to be on every CPU, irrespective
54*f5fb6d4eSAdrian Hunter 	 * of user requested CPUs or threads. Map propagation will set cpus to
55*f5fb6d4eSAdrian Hunter 	 * this event's own_cpus, whereby they will contribute to evlist
56*f5fb6d4eSAdrian Hunter 	 * all_cpus.
57*f5fb6d4eSAdrian Hunter 	 */
583ce311afSJiri Olsa 	bool			 system_wide;
59*f5fb6d4eSAdrian Hunter 	/*
60*f5fb6d4eSAdrian Hunter 	 * Some events, for example uncore events, require a CPU.
61*f5fb6d4eSAdrian Hunter 	 * i.e. it cannot be the 'any CPU' value of -1.
62*f5fb6d4eSAdrian Hunter 	 */
63d3345fecSAdrian Hunter 	bool			 requires_cpu;
6438fe0e01SJiri Olsa 	int			 idx;
653ce311afSJiri Olsa };
663ce311afSJiri Olsa 
6738fe0e01SJiri Olsa void perf_evsel__init(struct perf_evsel *evsel, struct perf_event_attr *attr,
6838fe0e01SJiri Olsa 		      int idx);
693ce311afSJiri Olsa int perf_evsel__alloc_fd(struct perf_evsel *evsel, int ncpus, int nthreads);
703ce311afSJiri Olsa void perf_evsel__close_fd(struct perf_evsel *evsel);
713ce311afSJiri Olsa void perf_evsel__free_fd(struct perf_evsel *evsel);
723ce311afSJiri Olsa int perf_evsel__read_size(struct perf_evsel *evsel);
733ce311afSJiri Olsa int perf_evsel__apply_filter(struct perf_evsel *evsel, const char *filter);
743ce311afSJiri Olsa 
753ce311afSJiri Olsa int perf_evsel__alloc_id(struct perf_evsel *evsel, int ncpus, int nthreads);
763ce311afSJiri Olsa void perf_evsel__free_id(struct perf_evsel *evsel);
773ce311afSJiri Olsa 
783ce311afSJiri Olsa #endif /* __LIBPERF_INTERNAL_EVSEL_H */
79