xref: /openbmc/linux/tools/perf/util/env.h (revision 177fe2a7)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef __PERF_ENV_H
3 #define __PERF_ENV_H
4 
5 #include <linux/types.h>
6 #include <linux/rbtree.h>
7 #include "cpumap.h"
8 #include "rwsem.h"
9 
10 struct perf_cpu_map;
11 
12 struct cpu_topology_map {
13 	int	socket_id;
14 	int	die_id;
15 	int	core_id;
16 };
17 
18 struct cpu_cache_level {
19 	u32	level;
20 	u32	line_size;
21 	u32	sets;
22 	u32	ways;
23 	char	*type;
24 	char	*size;
25 	char	*map;
26 };
27 
28 struct numa_node {
29 	u32		 node;
30 	u64		 mem_total;
31 	u64		 mem_free;
32 	struct perf_cpu_map	*map;
33 };
34 
35 struct memory_node {
36 	u64		 node;
37 	u64		 size;
38 	unsigned long	*set;
39 };
40 
41 struct hybrid_node {
42 	char	*pmu_name;
43 	char	*cpus;
44 };
45 
46 struct pmu_caps {
47 	int		nr_caps;
48 	unsigned int    max_branches;
49 	char            **caps;
50 	char            *pmu_name;
51 };
52 
53 struct perf_env {
54 	char			*hostname;
55 	char			*os_release;
56 	char			*version;
57 	char			*arch;
58 	int			nr_cpus_online;
59 	int			nr_cpus_avail;
60 	char			*cpu_desc;
61 	char			*cpuid;
62 	unsigned long long	total_mem;
63 	unsigned int		msr_pmu_type;
64 	unsigned int		max_branches;
65 	int			kernel_is_64_bit;
66 
67 	int			nr_cmdline;
68 	int			nr_sibling_cores;
69 	int			nr_sibling_dies;
70 	int			nr_sibling_threads;
71 	int			nr_numa_nodes;
72 	int			nr_memory_nodes;
73 	int			nr_pmu_mappings;
74 	int			nr_groups;
75 	int			nr_cpu_pmu_caps;
76 	int			nr_hybrid_nodes;
77 	int			nr_pmus_with_caps;
78 	char			*cmdline;
79 	const char		**cmdline_argv;
80 	char			*sibling_cores;
81 	char			*sibling_dies;
82 	char			*sibling_threads;
83 	char			*pmu_mappings;
84 	char			**cpu_pmu_caps;
85 	struct cpu_topology_map	*cpu;
86 	struct cpu_cache_level	*caches;
87 	int			 caches_cnt;
88 	u32			comp_ratio;
89 	u32			comp_ver;
90 	u32			comp_type;
91 	u32			comp_level;
92 	u32			comp_mmap_len;
93 	struct numa_node	*numa_nodes;
94 	struct memory_node	*memory_nodes;
95 	unsigned long long	 memory_bsize;
96 	struct hybrid_node	*hybrid_nodes;
97 	struct pmu_caps		*pmu_caps;
98 #ifdef HAVE_LIBBPF_SUPPORT
99 	/*
100 	 * bpf_info_lock protects bpf rbtrees. This is needed because the
101 	 * trees are accessed by different threads in perf-top
102 	 */
103 	struct {
104 		struct rw_semaphore	lock;
105 		struct rb_root		infos;
106 		u32			infos_cnt;
107 		struct rb_root		btfs;
108 		u32			btfs_cnt;
109 	} bpf_progs;
110 #endif // HAVE_LIBBPF_SUPPORT
111 	/* same reason as above (for perf-top) */
112 	struct {
113 		struct rw_semaphore	lock;
114 		struct rb_root		tree;
115 	} cgroups;
116 
117 	/* For fast cpu to numa node lookup via perf_env__numa_node */
118 	int			*numa_map;
119 	int			 nr_numa_map;
120 
121 	/* For real clock time reference. */
122 	struct {
123 		u64	tod_ns;
124 		u64	clockid_ns;
125 		u64     clockid_res_ns;
126 		int	clockid;
127 		/*
128 		 * enabled is valid for report mode, and is true if above
129 		 * values are set, it's set in process_clock_data
130 		 */
131 		bool	enabled;
132 	} clock;
133 };
134 
135 enum perf_compress_type {
136 	PERF_COMP_NONE = 0,
137 	PERF_COMP_ZSTD,
138 	PERF_COMP_MAX
139 };
140 
141 struct bpf_prog_info_node;
142 struct btf_node;
143 
144 extern struct perf_env perf_env;
145 
146 void perf_env__exit(struct perf_env *env);
147 
148 int perf_env__kernel_is_64_bit(struct perf_env *env);
149 
150 int perf_env__set_cmdline(struct perf_env *env, int argc, const char *argv[]);
151 
152 int perf_env__read_cpuid(struct perf_env *env);
153 int perf_env__read_pmu_mappings(struct perf_env *env);
154 int perf_env__nr_pmu_mappings(struct perf_env *env);
155 const char *perf_env__pmu_mappings(struct perf_env *env);
156 
157 int perf_env__read_cpu_topology_map(struct perf_env *env);
158 
159 void cpu_cache_level__free(struct cpu_cache_level *cache);
160 
161 const char *perf_env__arch(struct perf_env *env);
162 const char *perf_env__cpuid(struct perf_env *env);
163 const char *perf_env__raw_arch(struct perf_env *env);
164 int perf_env__nr_cpus_avail(struct perf_env *env);
165 
166 void perf_env__init(struct perf_env *env);
167 void __perf_env__insert_bpf_prog_info(struct perf_env *env,
168 				      struct bpf_prog_info_node *info_node);
169 void perf_env__insert_bpf_prog_info(struct perf_env *env,
170 				    struct bpf_prog_info_node *info_node);
171 struct bpf_prog_info_node *perf_env__find_bpf_prog_info(struct perf_env *env,
172 							__u32 prog_id);
173 bool perf_env__insert_btf(struct perf_env *env, struct btf_node *btf_node);
174 bool __perf_env__insert_btf(struct perf_env *env, struct btf_node *btf_node);
175 struct btf_node *perf_env__find_btf(struct perf_env *env, __u32 btf_id);
176 struct btf_node *__perf_env__find_btf(struct perf_env *env, __u32 btf_id);
177 
178 int perf_env__numa_node(struct perf_env *env, struct perf_cpu cpu);
179 char *perf_env__find_pmu_cap(struct perf_env *env, const char *pmu_name,
180 			     const char *cap);
181 #endif /* __PERF_ENV_H */
182