xref: /openbmc/linux/tools/perf/util/env.c (revision f8e17c17)
1 // SPDX-License-Identifier: GPL-2.0
2 #include "cpumap.h"
3 #include "debug.h"
4 #include "env.h"
5 #include "util/header.h"
6 #include <linux/ctype.h>
7 #include <linux/zalloc.h>
8 #include "bpf-event.h"
9 #include <errno.h>
10 #include <sys/utsname.h>
11 #include <bpf/libbpf.h>
12 #include <stdlib.h>
13 #include <string.h>
14 
15 struct perf_env perf_env;
16 
17 void perf_env__insert_bpf_prog_info(struct perf_env *env,
18 				    struct bpf_prog_info_node *info_node)
19 {
20 	__u32 prog_id = info_node->info_linear->info.id;
21 	struct bpf_prog_info_node *node;
22 	struct rb_node *parent = NULL;
23 	struct rb_node **p;
24 
25 	down_write(&env->bpf_progs.lock);
26 	p = &env->bpf_progs.infos.rb_node;
27 
28 	while (*p != NULL) {
29 		parent = *p;
30 		node = rb_entry(parent, struct bpf_prog_info_node, rb_node);
31 		if (prog_id < node->info_linear->info.id) {
32 			p = &(*p)->rb_left;
33 		} else if (prog_id > node->info_linear->info.id) {
34 			p = &(*p)->rb_right;
35 		} else {
36 			pr_debug("duplicated bpf prog info %u\n", prog_id);
37 			goto out;
38 		}
39 	}
40 
41 	rb_link_node(&info_node->rb_node, parent, p);
42 	rb_insert_color(&info_node->rb_node, &env->bpf_progs.infos);
43 	env->bpf_progs.infos_cnt++;
44 out:
45 	up_write(&env->bpf_progs.lock);
46 }
47 
48 struct bpf_prog_info_node *perf_env__find_bpf_prog_info(struct perf_env *env,
49 							__u32 prog_id)
50 {
51 	struct bpf_prog_info_node *node = NULL;
52 	struct rb_node *n;
53 
54 	down_read(&env->bpf_progs.lock);
55 	n = env->bpf_progs.infos.rb_node;
56 
57 	while (n) {
58 		node = rb_entry(n, struct bpf_prog_info_node, rb_node);
59 		if (prog_id < node->info_linear->info.id)
60 			n = n->rb_left;
61 		else if (prog_id > node->info_linear->info.id)
62 			n = n->rb_right;
63 		else
64 			goto out;
65 	}
66 	node = NULL;
67 
68 out:
69 	up_read(&env->bpf_progs.lock);
70 	return node;
71 }
72 
73 void perf_env__insert_btf(struct perf_env *env, struct btf_node *btf_node)
74 {
75 	struct rb_node *parent = NULL;
76 	__u32 btf_id = btf_node->id;
77 	struct btf_node *node;
78 	struct rb_node **p;
79 
80 	down_write(&env->bpf_progs.lock);
81 	p = &env->bpf_progs.btfs.rb_node;
82 
83 	while (*p != NULL) {
84 		parent = *p;
85 		node = rb_entry(parent, struct btf_node, rb_node);
86 		if (btf_id < node->id) {
87 			p = &(*p)->rb_left;
88 		} else if (btf_id > node->id) {
89 			p = &(*p)->rb_right;
90 		} else {
91 			pr_debug("duplicated btf %u\n", btf_id);
92 			goto out;
93 		}
94 	}
95 
96 	rb_link_node(&btf_node->rb_node, parent, p);
97 	rb_insert_color(&btf_node->rb_node, &env->bpf_progs.btfs);
98 	env->bpf_progs.btfs_cnt++;
99 out:
100 	up_write(&env->bpf_progs.lock);
101 }
102 
103 struct btf_node *perf_env__find_btf(struct perf_env *env, __u32 btf_id)
104 {
105 	struct btf_node *node = NULL;
106 	struct rb_node *n;
107 
108 	down_read(&env->bpf_progs.lock);
109 	n = env->bpf_progs.btfs.rb_node;
110 
111 	while (n) {
112 		node = rb_entry(n, struct btf_node, rb_node);
113 		if (btf_id < node->id)
114 			n = n->rb_left;
115 		else if (btf_id > node->id)
116 			n = n->rb_right;
117 		else
118 			goto out;
119 	}
120 	node = NULL;
121 
122 out:
123 	up_read(&env->bpf_progs.lock);
124 	return node;
125 }
126 
127 /* purge data in bpf_progs.infos tree */
128 static void perf_env__purge_bpf(struct perf_env *env)
129 {
130 	struct rb_root *root;
131 	struct rb_node *next;
132 
133 	down_write(&env->bpf_progs.lock);
134 
135 	root = &env->bpf_progs.infos;
136 	next = rb_first(root);
137 
138 	while (next) {
139 		struct bpf_prog_info_node *node;
140 
141 		node = rb_entry(next, struct bpf_prog_info_node, rb_node);
142 		next = rb_next(&node->rb_node);
143 		rb_erase(&node->rb_node, root);
144 		free(node);
145 	}
146 
147 	env->bpf_progs.infos_cnt = 0;
148 
149 	root = &env->bpf_progs.btfs;
150 	next = rb_first(root);
151 
152 	while (next) {
153 		struct btf_node *node;
154 
155 		node = rb_entry(next, struct btf_node, rb_node);
156 		next = rb_next(&node->rb_node);
157 		rb_erase(&node->rb_node, root);
158 		free(node);
159 	}
160 
161 	env->bpf_progs.btfs_cnt = 0;
162 
163 	up_write(&env->bpf_progs.lock);
164 }
165 
166 void perf_env__exit(struct perf_env *env)
167 {
168 	int i;
169 
170 	perf_env__purge_bpf(env);
171 	zfree(&env->hostname);
172 	zfree(&env->os_release);
173 	zfree(&env->version);
174 	zfree(&env->arch);
175 	zfree(&env->cpu_desc);
176 	zfree(&env->cpuid);
177 	zfree(&env->cmdline);
178 	zfree(&env->cmdline_argv);
179 	zfree(&env->sibling_cores);
180 	zfree(&env->sibling_threads);
181 	zfree(&env->pmu_mappings);
182 	zfree(&env->cpu);
183 	zfree(&env->numa_map);
184 
185 	for (i = 0; i < env->nr_numa_nodes; i++)
186 		perf_cpu_map__put(env->numa_nodes[i].map);
187 	zfree(&env->numa_nodes);
188 
189 	for (i = 0; i < env->caches_cnt; i++)
190 		cpu_cache_level__free(&env->caches[i]);
191 	zfree(&env->caches);
192 
193 	for (i = 0; i < env->nr_memory_nodes; i++)
194 		zfree(&env->memory_nodes[i].set);
195 	zfree(&env->memory_nodes);
196 }
197 
198 void perf_env__init(struct perf_env *env)
199 {
200 	env->bpf_progs.infos = RB_ROOT;
201 	env->bpf_progs.btfs = RB_ROOT;
202 	init_rwsem(&env->bpf_progs.lock);
203 }
204 
205 int perf_env__set_cmdline(struct perf_env *env, int argc, const char *argv[])
206 {
207 	int i;
208 
209 	/* do not include NULL termination */
210 	env->cmdline_argv = calloc(argc, sizeof(char *));
211 	if (env->cmdline_argv == NULL)
212 		goto out_enomem;
213 
214 	/*
215 	 * Must copy argv contents because it gets moved around during option
216 	 * parsing:
217 	 */
218 	for (i = 0; i < argc ; i++) {
219 		env->cmdline_argv[i] = argv[i];
220 		if (env->cmdline_argv[i] == NULL)
221 			goto out_free;
222 	}
223 
224 	env->nr_cmdline = argc;
225 
226 	return 0;
227 out_free:
228 	zfree(&env->cmdline_argv);
229 out_enomem:
230 	return -ENOMEM;
231 }
232 
233 int perf_env__read_cpu_topology_map(struct perf_env *env)
234 {
235 	int cpu, nr_cpus;
236 
237 	if (env->cpu != NULL)
238 		return 0;
239 
240 	if (env->nr_cpus_avail == 0)
241 		env->nr_cpus_avail = cpu__max_present_cpu();
242 
243 	nr_cpus = env->nr_cpus_avail;
244 	if (nr_cpus == -1)
245 		return -EINVAL;
246 
247 	env->cpu = calloc(nr_cpus, sizeof(env->cpu[0]));
248 	if (env->cpu == NULL)
249 		return -ENOMEM;
250 
251 	for (cpu = 0; cpu < nr_cpus; ++cpu) {
252 		env->cpu[cpu].core_id	= cpu_map__get_core_id(cpu);
253 		env->cpu[cpu].socket_id	= cpu_map__get_socket_id(cpu);
254 		env->cpu[cpu].die_id	= cpu_map__get_die_id(cpu);
255 	}
256 
257 	env->nr_cpus_avail = nr_cpus;
258 	return 0;
259 }
260 
261 int perf_env__read_cpuid(struct perf_env *env)
262 {
263 	char cpuid[128];
264 	int err = get_cpuid(cpuid, sizeof(cpuid));
265 
266 	if (err)
267 		return err;
268 
269 	free(env->cpuid);
270 	env->cpuid = strdup(cpuid);
271 	if (env->cpuid == NULL)
272 		return ENOMEM;
273 	return 0;
274 }
275 
276 static int perf_env__read_arch(struct perf_env *env)
277 {
278 	struct utsname uts;
279 
280 	if (env->arch)
281 		return 0;
282 
283 	if (!uname(&uts))
284 		env->arch = strdup(uts.machine);
285 
286 	return env->arch ? 0 : -ENOMEM;
287 }
288 
289 static int perf_env__read_nr_cpus_avail(struct perf_env *env)
290 {
291 	if (env->nr_cpus_avail == 0)
292 		env->nr_cpus_avail = cpu__max_present_cpu();
293 
294 	return env->nr_cpus_avail ? 0 : -ENOENT;
295 }
296 
297 const char *perf_env__raw_arch(struct perf_env *env)
298 {
299 	return env && !perf_env__read_arch(env) ? env->arch : "unknown";
300 }
301 
302 int perf_env__nr_cpus_avail(struct perf_env *env)
303 {
304 	return env && !perf_env__read_nr_cpus_avail(env) ? env->nr_cpus_avail : 0;
305 }
306 
307 void cpu_cache_level__free(struct cpu_cache_level *cache)
308 {
309 	zfree(&cache->type);
310 	zfree(&cache->map);
311 	zfree(&cache->size);
312 }
313 
314 /*
315  * Return architecture name in a normalized form.
316  * The conversion logic comes from the Makefile.
317  */
318 static const char *normalize_arch(char *arch)
319 {
320 	if (!strcmp(arch, "x86_64"))
321 		return "x86";
322 	if (arch[0] == 'i' && arch[2] == '8' && arch[3] == '6')
323 		return "x86";
324 	if (!strcmp(arch, "sun4u") || !strncmp(arch, "sparc", 5))
325 		return "sparc";
326 	if (!strcmp(arch, "aarch64") || !strcmp(arch, "arm64"))
327 		return "arm64";
328 	if (!strncmp(arch, "arm", 3) || !strcmp(arch, "sa110"))
329 		return "arm";
330 	if (!strncmp(arch, "s390", 4))
331 		return "s390";
332 	if (!strncmp(arch, "parisc", 6))
333 		return "parisc";
334 	if (!strncmp(arch, "powerpc", 7) || !strncmp(arch, "ppc", 3))
335 		return "powerpc";
336 	if (!strncmp(arch, "mips", 4))
337 		return "mips";
338 	if (!strncmp(arch, "sh", 2) && isdigit(arch[2]))
339 		return "sh";
340 
341 	return arch;
342 }
343 
344 const char *perf_env__arch(struct perf_env *env)
345 {
346 	struct utsname uts;
347 	char *arch_name;
348 
349 	if (!env || !env->arch) { /* Assume local operation */
350 		if (uname(&uts) < 0)
351 			return NULL;
352 		arch_name = uts.machine;
353 	} else
354 		arch_name = env->arch;
355 
356 	return normalize_arch(arch_name);
357 }
358 
359 
360 int perf_env__numa_node(struct perf_env *env, int cpu)
361 {
362 	if (!env->nr_numa_map) {
363 		struct numa_node *nn;
364 		int i, nr = 0;
365 
366 		for (i = 0; i < env->nr_numa_nodes; i++) {
367 			nn = &env->numa_nodes[i];
368 			nr = max(nr, perf_cpu_map__max(nn->map));
369 		}
370 
371 		nr++;
372 
373 		/*
374 		 * We initialize the numa_map array to prepare
375 		 * it for missing cpus, which return node -1
376 		 */
377 		env->numa_map = malloc(nr * sizeof(int));
378 		if (!env->numa_map)
379 			return -1;
380 
381 		for (i = 0; i < nr; i++)
382 			env->numa_map[i] = -1;
383 
384 		env->nr_numa_map = nr;
385 
386 		for (i = 0; i < env->nr_numa_nodes; i++) {
387 			int tmp, j;
388 
389 			nn = &env->numa_nodes[i];
390 			perf_cpu_map__for_each_cpu(j, tmp, nn->map)
391 				env->numa_map[j] = i;
392 		}
393 	}
394 
395 	return cpu >= 0 && cpu < env->nr_numa_map ? env->numa_map[cpu] : -1;
396 }
397