13ce311afSJiri Olsa /* SPDX-License-Identifier: GPL-2.0 */
23ce311afSJiri Olsa #ifndef __LIBPERF_INTERNAL_EVSEL_H
33ce311afSJiri Olsa #define __LIBPERF_INTERNAL_EVSEL_H
43ce311afSJiri Olsa 
53ce311afSJiri Olsa #include <linux/types.h>
63ce311afSJiri Olsa #include <linux/perf_event.h>
73ce311afSJiri Olsa #include <stdbool.h>
83ce311afSJiri Olsa #include <sys/types.h>
96d18804bSIan Rogers #include <internal/cpumap.h>
103ce311afSJiri Olsa 
113ce311afSJiri Olsa struct perf_thread_map;
123ce311afSJiri Olsa struct xyarray;
133ce311afSJiri Olsa 
143ce311afSJiri Olsa /*
153ce311afSJiri Olsa  * Per fd, to map back from PERF_SAMPLE_ID to evsel, only used when there are
163ce311afSJiri Olsa  * more than one entry in the evlist.
173ce311afSJiri Olsa  */
183ce311afSJiri Olsa struct perf_sample_id {
193ce311afSJiri Olsa 	struct hlist_node	 node;
203ce311afSJiri Olsa 	u64			 id;
213ce311afSJiri Olsa 	struct perf_evsel	*evsel;
223ce311afSJiri Olsa        /*
233ce311afSJiri Olsa 	* 'idx' will be used for AUX area sampling. A sample will have AUX area
243ce311afSJiri Olsa 	* data that will be queued for decoding, where there are separate
253ce311afSJiri Olsa 	* queues for each CPU (per-cpu tracing) or task (per-thread tracing).
263ce311afSJiri Olsa 	* The sample ID can be used to lookup 'idx' which is effectively the
273ce311afSJiri Olsa 	* queue number.
283ce311afSJiri Olsa 	*/
293ce311afSJiri Olsa 	int			 idx;
306d18804bSIan Rogers 	struct perf_cpu		 cpu;
313ce311afSJiri Olsa 	pid_t			 tid;
323ce311afSJiri Olsa 
33b47bb186SAdrian Hunter 	/* Guest machine pid and VCPU, valid only if machine_pid is non-zero */
34b47bb186SAdrian Hunter 	pid_t			 machine_pid;
35b47bb186SAdrian Hunter 	struct perf_cpu		 vcpu;
36b47bb186SAdrian Hunter 
373ce311afSJiri Olsa 	/* Holds total ID period value for PERF_SAMPLE_READ processing. */
383ce311afSJiri Olsa 	u64			 period;
393ce311afSJiri Olsa };
403ce311afSJiri Olsa 
413ce311afSJiri Olsa struct perf_evsel {
423ce311afSJiri Olsa 	struct list_head	 node;
433ce311afSJiri Olsa 	struct perf_event_attr	 attr;
441578e63dSIan Rogers 	/** The commonly used cpu map of CPUs the event should be opened upon, etc. */
453ce311afSJiri Olsa 	struct perf_cpu_map	*cpus;
461578e63dSIan Rogers 	/**
471578e63dSIan Rogers 	 * The cpu map read from the PMU. For core PMUs this is the list of all
481578e63dSIan Rogers 	 * CPUs the event can be opened upon. For other PMUs this is the default
491578e63dSIan Rogers 	 * cpu map for opening the event on, for example, the first CPU on a
501578e63dSIan Rogers 	 * socket for an uncore event.
511578e63dSIan Rogers 	 */
523ce311afSJiri Olsa 	struct perf_cpu_map	*own_cpus;
533ce311afSJiri Olsa 	struct perf_thread_map	*threads;
543ce311afSJiri Olsa 	struct xyarray		*fd;
556cd70754SRob Herring 	struct xyarray		*mmap;
563ce311afSJiri Olsa 	struct xyarray		*sample_id;
573ce311afSJiri Olsa 	u64			*id;
583ce311afSJiri Olsa 	u32			 ids;
59fba7c866SJiri Olsa 	struct perf_evsel	*leader;
603ce311afSJiri Olsa 
613ce311afSJiri Olsa 	/* parse modifier helper */
623ce311afSJiri Olsa 	int			 nr_members;
63f5fb6d4eSAdrian Hunter 	/*
64f5fb6d4eSAdrian Hunter 	 * system_wide is for events that need to be on every CPU, irrespective
65*ef91871cSIan Rogers 	 * of user requested CPUs or threads. Tha main example of this is the
66*ef91871cSIan Rogers 	 * dummy event. Map propagation will set cpus for this event to all CPUs
67*ef91871cSIan Rogers 	 * as software PMU events like dummy, have a CPU map that is empty.
68f5fb6d4eSAdrian Hunter 	 */
693ce311afSJiri Olsa 	bool			 system_wide;
70f5fb6d4eSAdrian Hunter 	/*
71f5fb6d4eSAdrian Hunter 	 * Some events, for example uncore events, require a CPU.
72f5fb6d4eSAdrian Hunter 	 * i.e. it cannot be the 'any CPU' value of -1.
73f5fb6d4eSAdrian Hunter 	 */
74d3345fecSAdrian Hunter 	bool			 requires_cpu;
751578e63dSIan Rogers 	/** Is the PMU for the event a core one? Effects the handling of own_cpus. */
761578e63dSIan Rogers 	bool			 is_pmu_core;
7738fe0e01SJiri Olsa 	int			 idx;
783ce311afSJiri Olsa };
793ce311afSJiri Olsa 
8038fe0e01SJiri Olsa void perf_evsel__init(struct perf_evsel *evsel, struct perf_event_attr *attr,
8138fe0e01SJiri Olsa 		      int idx);
823ce311afSJiri Olsa int perf_evsel__alloc_fd(struct perf_evsel *evsel, int ncpus, int nthreads);
833ce311afSJiri Olsa void perf_evsel__close_fd(struct perf_evsel *evsel);
843ce311afSJiri Olsa void perf_evsel__free_fd(struct perf_evsel *evsel);
853ce311afSJiri Olsa int perf_evsel__read_size(struct perf_evsel *evsel);
863ce311afSJiri Olsa int perf_evsel__apply_filter(struct perf_evsel *evsel, const char *filter);
873ce311afSJiri Olsa 
883ce311afSJiri Olsa int perf_evsel__alloc_id(struct perf_evsel *evsel, int ncpus, int nthreads);
893ce311afSJiri Olsa void perf_evsel__free_id(struct perf_evsel *evsel);
903ce311afSJiri Olsa 
913ce311afSJiri Olsa #endif /* __LIBPERF_INTERNAL_EVSEL_H */
92