xref: /openbmc/linux/tools/perf/util/cpumap.c (revision a8da474e)
1 #include "util.h"
2 #include <api/fs/fs.h>
3 #include "../perf.h"
4 #include "cpumap.h"
5 #include <assert.h>
6 #include <stdio.h>
7 #include <stdlib.h>
8 #include "asm/bug.h"
9 
10 static struct cpu_map *cpu_map__default_new(void)
11 {
12 	struct cpu_map *cpus;
13 	int nr_cpus;
14 
15 	nr_cpus = sysconf(_SC_NPROCESSORS_ONLN);
16 	if (nr_cpus < 0)
17 		return NULL;
18 
19 	cpus = malloc(sizeof(*cpus) + nr_cpus * sizeof(int));
20 	if (cpus != NULL) {
21 		int i;
22 		for (i = 0; i < nr_cpus; ++i)
23 			cpus->map[i] = i;
24 
25 		cpus->nr = nr_cpus;
26 		atomic_set(&cpus->refcnt, 1);
27 	}
28 
29 	return cpus;
30 }
31 
32 static struct cpu_map *cpu_map__trim_new(int nr_cpus, int *tmp_cpus)
33 {
34 	size_t payload_size = nr_cpus * sizeof(int);
35 	struct cpu_map *cpus = malloc(sizeof(*cpus) + payload_size);
36 
37 	if (cpus != NULL) {
38 		cpus->nr = nr_cpus;
39 		memcpy(cpus->map, tmp_cpus, payload_size);
40 		atomic_set(&cpus->refcnt, 1);
41 	}
42 
43 	return cpus;
44 }
45 
46 struct cpu_map *cpu_map__read(FILE *file)
47 {
48 	struct cpu_map *cpus = NULL;
49 	int nr_cpus = 0;
50 	int *tmp_cpus = NULL, *tmp;
51 	int max_entries = 0;
52 	int n, cpu, prev;
53 	char sep;
54 
55 	sep = 0;
56 	prev = -1;
57 	for (;;) {
58 		n = fscanf(file, "%u%c", &cpu, &sep);
59 		if (n <= 0)
60 			break;
61 		if (prev >= 0) {
62 			int new_max = nr_cpus + cpu - prev - 1;
63 
64 			if (new_max >= max_entries) {
65 				max_entries = new_max + MAX_NR_CPUS / 2;
66 				tmp = realloc(tmp_cpus, max_entries * sizeof(int));
67 				if (tmp == NULL)
68 					goto out_free_tmp;
69 				tmp_cpus = tmp;
70 			}
71 
72 			while (++prev < cpu)
73 				tmp_cpus[nr_cpus++] = prev;
74 		}
75 		if (nr_cpus == max_entries) {
76 			max_entries += MAX_NR_CPUS;
77 			tmp = realloc(tmp_cpus, max_entries * sizeof(int));
78 			if (tmp == NULL)
79 				goto out_free_tmp;
80 			tmp_cpus = tmp;
81 		}
82 
83 		tmp_cpus[nr_cpus++] = cpu;
84 		if (n == 2 && sep == '-')
85 			prev = cpu;
86 		else
87 			prev = -1;
88 		if (n == 1 || sep == '\n')
89 			break;
90 	}
91 
92 	if (nr_cpus > 0)
93 		cpus = cpu_map__trim_new(nr_cpus, tmp_cpus);
94 	else
95 		cpus = cpu_map__default_new();
96 out_free_tmp:
97 	free(tmp_cpus);
98 	return cpus;
99 }
100 
101 static struct cpu_map *cpu_map__read_all_cpu_map(void)
102 {
103 	struct cpu_map *cpus = NULL;
104 	FILE *onlnf;
105 
106 	onlnf = fopen("/sys/devices/system/cpu/online", "r");
107 	if (!onlnf)
108 		return cpu_map__default_new();
109 
110 	cpus = cpu_map__read(onlnf);
111 	fclose(onlnf);
112 	return cpus;
113 }
114 
115 struct cpu_map *cpu_map__new(const char *cpu_list)
116 {
117 	struct cpu_map *cpus = NULL;
118 	unsigned long start_cpu, end_cpu = 0;
119 	char *p = NULL;
120 	int i, nr_cpus = 0;
121 	int *tmp_cpus = NULL, *tmp;
122 	int max_entries = 0;
123 
124 	if (!cpu_list)
125 		return cpu_map__read_all_cpu_map();
126 
127 	if (!isdigit(*cpu_list))
128 		goto out;
129 
130 	while (isdigit(*cpu_list)) {
131 		p = NULL;
132 		start_cpu = strtoul(cpu_list, &p, 0);
133 		if (start_cpu >= INT_MAX
134 		    || (*p != '\0' && *p != ',' && *p != '-'))
135 			goto invalid;
136 
137 		if (*p == '-') {
138 			cpu_list = ++p;
139 			p = NULL;
140 			end_cpu = strtoul(cpu_list, &p, 0);
141 
142 			if (end_cpu >= INT_MAX || (*p != '\0' && *p != ','))
143 				goto invalid;
144 
145 			if (end_cpu < start_cpu)
146 				goto invalid;
147 		} else {
148 			end_cpu = start_cpu;
149 		}
150 
151 		for (; start_cpu <= end_cpu; start_cpu++) {
152 			/* check for duplicates */
153 			for (i = 0; i < nr_cpus; i++)
154 				if (tmp_cpus[i] == (int)start_cpu)
155 					goto invalid;
156 
157 			if (nr_cpus == max_entries) {
158 				max_entries += MAX_NR_CPUS;
159 				tmp = realloc(tmp_cpus, max_entries * sizeof(int));
160 				if (tmp == NULL)
161 					goto invalid;
162 				tmp_cpus = tmp;
163 			}
164 			tmp_cpus[nr_cpus++] = (int)start_cpu;
165 		}
166 		if (*p)
167 			++p;
168 
169 		cpu_list = p;
170 	}
171 
172 	if (nr_cpus > 0)
173 		cpus = cpu_map__trim_new(nr_cpus, tmp_cpus);
174 	else
175 		cpus = cpu_map__default_new();
176 invalid:
177 	free(tmp_cpus);
178 out:
179 	return cpus;
180 }
181 
182 size_t cpu_map__fprintf(struct cpu_map *map, FILE *fp)
183 {
184 	int i;
185 	size_t printed = fprintf(fp, "%d cpu%s: ",
186 				 map->nr, map->nr > 1 ? "s" : "");
187 	for (i = 0; i < map->nr; ++i)
188 		printed += fprintf(fp, "%s%d", i ? ", " : "", map->map[i]);
189 
190 	return printed + fprintf(fp, "\n");
191 }
192 
193 struct cpu_map *cpu_map__dummy_new(void)
194 {
195 	struct cpu_map *cpus = malloc(sizeof(*cpus) + sizeof(int));
196 
197 	if (cpus != NULL) {
198 		cpus->nr = 1;
199 		cpus->map[0] = -1;
200 		atomic_set(&cpus->refcnt, 1);
201 	}
202 
203 	return cpus;
204 }
205 
206 struct cpu_map *cpu_map__empty_new(int nr)
207 {
208 	struct cpu_map *cpus = malloc(sizeof(*cpus) + sizeof(int) * nr);
209 
210 	if (cpus != NULL) {
211 		int i;
212 
213 		cpus->nr = nr;
214 		for (i = 0; i < nr; i++)
215 			cpus->map[i] = -1;
216 
217 		atomic_set(&cpus->refcnt, 1);
218 	}
219 
220 	return cpus;
221 }
222 
223 static void cpu_map__delete(struct cpu_map *map)
224 {
225 	if (map) {
226 		WARN_ONCE(atomic_read(&map->refcnt) != 0,
227 			  "cpu_map refcnt unbalanced\n");
228 		free(map);
229 	}
230 }
231 
232 struct cpu_map *cpu_map__get(struct cpu_map *map)
233 {
234 	if (map)
235 		atomic_inc(&map->refcnt);
236 	return map;
237 }
238 
239 void cpu_map__put(struct cpu_map *map)
240 {
241 	if (map && atomic_dec_and_test(&map->refcnt))
242 		cpu_map__delete(map);
243 }
244 
245 static int cpu__get_topology_int(int cpu, const char *name, int *value)
246 {
247 	char path[PATH_MAX];
248 
249 	snprintf(path, PATH_MAX,
250 		"devices/system/cpu/cpu%d/topology/%s", cpu, name);
251 
252 	return sysfs__read_int(path, value);
253 }
254 
255 int cpu_map__get_socket_id(int cpu)
256 {
257 	int value, ret = cpu__get_topology_int(cpu, "physical_package_id", &value);
258 	return ret ?: value;
259 }
260 
261 int cpu_map__get_socket(struct cpu_map *map, int idx, void *data __maybe_unused)
262 {
263 	int cpu;
264 
265 	if (idx > map->nr)
266 		return -1;
267 
268 	cpu = map->map[idx];
269 
270 	return cpu_map__get_socket_id(cpu);
271 }
272 
273 static int cmp_ids(const void *a, const void *b)
274 {
275 	return *(int *)a - *(int *)b;
276 }
277 
278 int cpu_map__build_map(struct cpu_map *cpus, struct cpu_map **res,
279 		       int (*f)(struct cpu_map *map, int cpu, void *data),
280 		       void *data)
281 {
282 	struct cpu_map *c;
283 	int nr = cpus->nr;
284 	int cpu, s1, s2;
285 
286 	/* allocate as much as possible */
287 	c = calloc(1, sizeof(*c) + nr * sizeof(int));
288 	if (!c)
289 		return -1;
290 
291 	for (cpu = 0; cpu < nr; cpu++) {
292 		s1 = f(cpus, cpu, data);
293 		for (s2 = 0; s2 < c->nr; s2++) {
294 			if (s1 == c->map[s2])
295 				break;
296 		}
297 		if (s2 == c->nr) {
298 			c->map[c->nr] = s1;
299 			c->nr++;
300 		}
301 	}
302 	/* ensure we process id in increasing order */
303 	qsort(c->map, c->nr, sizeof(int), cmp_ids);
304 
305 	atomic_set(&c->refcnt, 1);
306 	*res = c;
307 	return 0;
308 }
309 
310 int cpu_map__get_core_id(int cpu)
311 {
312 	int value, ret = cpu__get_topology_int(cpu, "core_id", &value);
313 	return ret ?: value;
314 }
315 
316 int cpu_map__get_core(struct cpu_map *map, int idx, void *data)
317 {
318 	int cpu, s;
319 
320 	if (idx > map->nr)
321 		return -1;
322 
323 	cpu = map->map[idx];
324 
325 	cpu = cpu_map__get_core_id(cpu);
326 
327 	s = cpu_map__get_socket(map, idx, data);
328 	if (s == -1)
329 		return -1;
330 
331 	/*
332 	 * encode socket in upper 16 bits
333 	 * core_id is relative to socket, and
334 	 * we need a global id. So we combine
335 	 * socket+ core id
336 	 */
337 	return (s << 16) | (cpu & 0xffff);
338 }
339 
340 int cpu_map__build_socket_map(struct cpu_map *cpus, struct cpu_map **sockp)
341 {
342 	return cpu_map__build_map(cpus, sockp, cpu_map__get_socket, NULL);
343 }
344 
345 int cpu_map__build_core_map(struct cpu_map *cpus, struct cpu_map **corep)
346 {
347 	return cpu_map__build_map(cpus, corep, cpu_map__get_core, NULL);
348 }
349 
350 /* setup simple routines to easily access node numbers given a cpu number */
351 static int get_max_num(char *path, int *max)
352 {
353 	size_t num;
354 	char *buf;
355 	int err = 0;
356 
357 	if (filename__read_str(path, &buf, &num))
358 		return -1;
359 
360 	buf[num] = '\0';
361 
362 	/* start on the right, to find highest node num */
363 	while (--num) {
364 		if ((buf[num] == ',') || (buf[num] == '-')) {
365 			num++;
366 			break;
367 		}
368 	}
369 	if (sscanf(&buf[num], "%d", max) < 1) {
370 		err = -1;
371 		goto out;
372 	}
373 
374 	/* convert from 0-based to 1-based */
375 	(*max)++;
376 
377 out:
378 	free(buf);
379 	return err;
380 }
381 
382 /* Determine highest possible cpu in the system for sparse allocation */
383 static void set_max_cpu_num(void)
384 {
385 	const char *mnt;
386 	char path[PATH_MAX];
387 	int ret = -1;
388 
389 	/* set up default */
390 	max_cpu_num = 4096;
391 
392 	mnt = sysfs__mountpoint();
393 	if (!mnt)
394 		goto out;
395 
396 	/* get the highest possible cpu number for a sparse allocation */
397 	ret = snprintf(path, PATH_MAX, "%s/devices/system/cpu/possible", mnt);
398 	if (ret == PATH_MAX) {
399 		pr_err("sysfs path crossed PATH_MAX(%d) size\n", PATH_MAX);
400 		goto out;
401 	}
402 
403 	ret = get_max_num(path, &max_cpu_num);
404 
405 out:
406 	if (ret)
407 		pr_err("Failed to read max cpus, using default of %d\n", max_cpu_num);
408 }
409 
410 /* Determine highest possible node in the system for sparse allocation */
411 static void set_max_node_num(void)
412 {
413 	const char *mnt;
414 	char path[PATH_MAX];
415 	int ret = -1;
416 
417 	/* set up default */
418 	max_node_num = 8;
419 
420 	mnt = sysfs__mountpoint();
421 	if (!mnt)
422 		goto out;
423 
424 	/* get the highest possible cpu number for a sparse allocation */
425 	ret = snprintf(path, PATH_MAX, "%s/devices/system/node/possible", mnt);
426 	if (ret == PATH_MAX) {
427 		pr_err("sysfs path crossed PATH_MAX(%d) size\n", PATH_MAX);
428 		goto out;
429 	}
430 
431 	ret = get_max_num(path, &max_node_num);
432 
433 out:
434 	if (ret)
435 		pr_err("Failed to read max nodes, using default of %d\n", max_node_num);
436 }
437 
438 static int init_cpunode_map(void)
439 {
440 	int i;
441 
442 	set_max_cpu_num();
443 	set_max_node_num();
444 
445 	cpunode_map = calloc(max_cpu_num, sizeof(int));
446 	if (!cpunode_map) {
447 		pr_err("%s: calloc failed\n", __func__);
448 		return -1;
449 	}
450 
451 	for (i = 0; i < max_cpu_num; i++)
452 		cpunode_map[i] = -1;
453 
454 	return 0;
455 }
456 
457 int cpu__setup_cpunode_map(void)
458 {
459 	struct dirent *dent1, *dent2;
460 	DIR *dir1, *dir2;
461 	unsigned int cpu, mem;
462 	char buf[PATH_MAX];
463 	char path[PATH_MAX];
464 	const char *mnt;
465 	int n;
466 
467 	/* initialize globals */
468 	if (init_cpunode_map())
469 		return -1;
470 
471 	mnt = sysfs__mountpoint();
472 	if (!mnt)
473 		return 0;
474 
475 	n = snprintf(path, PATH_MAX, "%s/devices/system/node", mnt);
476 	if (n == PATH_MAX) {
477 		pr_err("sysfs path crossed PATH_MAX(%d) size\n", PATH_MAX);
478 		return -1;
479 	}
480 
481 	dir1 = opendir(path);
482 	if (!dir1)
483 		return 0;
484 
485 	/* walk tree and setup map */
486 	while ((dent1 = readdir(dir1)) != NULL) {
487 		if (dent1->d_type != DT_DIR || sscanf(dent1->d_name, "node%u", &mem) < 1)
488 			continue;
489 
490 		n = snprintf(buf, PATH_MAX, "%s/%s", path, dent1->d_name);
491 		if (n == PATH_MAX) {
492 			pr_err("sysfs path crossed PATH_MAX(%d) size\n", PATH_MAX);
493 			continue;
494 		}
495 
496 		dir2 = opendir(buf);
497 		if (!dir2)
498 			continue;
499 		while ((dent2 = readdir(dir2)) != NULL) {
500 			if (dent2->d_type != DT_LNK || sscanf(dent2->d_name, "cpu%u", &cpu) < 1)
501 				continue;
502 			cpunode_map[cpu] = mem;
503 		}
504 		closedir(dir2);
505 	}
506 	closedir(dir1);
507 	return 0;
508 }
509