xref: /openbmc/linux/tools/perf/util/cpumap.c (revision dfc66bef)
1 // SPDX-License-Identifier: GPL-2.0
2 #include <api/fs/fs.h>
3 #include "cpumap.h"
4 #include "debug.h"
5 #include "event.h"
6 #include <assert.h>
7 #include <dirent.h>
8 #include <stdio.h>
9 #include <stdlib.h>
10 #include <linux/bitmap.h>
11 #include "asm/bug.h"
12 
13 #include <linux/ctype.h>
14 #include <linux/zalloc.h>
15 
16 static int max_cpu_num;
17 static int max_present_cpu_num;
18 static int max_node_num;
19 /**
20  * The numa node X as read from /sys/devices/system/node/nodeX indexed by the
21  * CPU number.
22  */
23 static int *cpunode_map;
24 
25 static struct perf_cpu_map *cpu_map__from_entries(struct cpu_map_entries *cpus)
26 {
27 	struct perf_cpu_map *map;
28 
29 	map = perf_cpu_map__empty_new(cpus->nr);
30 	if (map) {
31 		unsigned i;
32 
33 		for (i = 0; i < cpus->nr; i++) {
34 			/*
35 			 * Special treatment for -1, which is not real cpu number,
36 			 * and we need to use (int) -1 to initialize map[i],
37 			 * otherwise it would become 65535.
38 			 */
39 			if (cpus->cpu[i] == (u16) -1)
40 				map->map[i] = -1;
41 			else
42 				map->map[i] = (int) cpus->cpu[i];
43 		}
44 	}
45 
46 	return map;
47 }
48 
49 static struct perf_cpu_map *cpu_map__from_mask(struct perf_record_record_cpu_map *mask)
50 {
51 	struct perf_cpu_map *map;
52 	int nr, nbits = mask->nr * mask->long_size * BITS_PER_BYTE;
53 
54 	nr = bitmap_weight(mask->mask, nbits);
55 
56 	map = perf_cpu_map__empty_new(nr);
57 	if (map) {
58 		int cpu, i = 0;
59 
60 		for_each_set_bit(cpu, mask->mask, nbits)
61 			map->map[i++] = cpu;
62 	}
63 	return map;
64 
65 }
66 
67 struct perf_cpu_map *cpu_map__new_data(struct perf_record_cpu_map_data *data)
68 {
69 	if (data->type == PERF_CPU_MAP__CPUS)
70 		return cpu_map__from_entries((struct cpu_map_entries *)data->data);
71 	else
72 		return cpu_map__from_mask((struct perf_record_record_cpu_map *)data->data);
73 }
74 
75 size_t cpu_map__fprintf(struct perf_cpu_map *map, FILE *fp)
76 {
77 #define BUFSIZE 1024
78 	char buf[BUFSIZE];
79 
80 	cpu_map__snprint(map, buf, sizeof(buf));
81 	return fprintf(fp, "%s\n", buf);
82 #undef BUFSIZE
83 }
84 
85 struct perf_cpu_map *perf_cpu_map__empty_new(int nr)
86 {
87 	struct perf_cpu_map *cpus = malloc(sizeof(*cpus) + sizeof(int) * nr);
88 
89 	if (cpus != NULL) {
90 		int i;
91 
92 		cpus->nr = nr;
93 		for (i = 0; i < nr; i++)
94 			cpus->map[i] = -1;
95 
96 		refcount_set(&cpus->refcnt, 1);
97 	}
98 
99 	return cpus;
100 }
101 
102 struct cpu_aggr_map *cpu_aggr_map__empty_new(int nr)
103 {
104 	struct cpu_aggr_map *cpus = malloc(sizeof(*cpus) + sizeof(struct aggr_cpu_id) * nr);
105 
106 	if (cpus != NULL) {
107 		int i;
108 
109 		cpus->nr = nr;
110 		for (i = 0; i < nr; i++)
111 			cpus->map[i] = aggr_cpu_id__empty();
112 
113 		refcount_set(&cpus->refcnt, 1);
114 	}
115 
116 	return cpus;
117 }
118 
119 static int cpu__get_topology_int(int cpu, const char *name, int *value)
120 {
121 	char path[PATH_MAX];
122 
123 	snprintf(path, PATH_MAX,
124 		"devices/system/cpu/cpu%d/topology/%s", cpu, name);
125 
126 	return sysfs__read_int(path, value);
127 }
128 
129 int cpu__get_socket_id(int cpu)
130 {
131 	int value, ret = cpu__get_topology_int(cpu, "physical_package_id", &value);
132 	return ret ?: value;
133 }
134 
135 struct aggr_cpu_id aggr_cpu_id__socket(int cpu, void *data __maybe_unused)
136 {
137 	struct aggr_cpu_id id = aggr_cpu_id__empty();
138 
139 	id.socket = cpu__get_socket_id(cpu);
140 	return id;
141 }
142 
143 static int aggr_cpu_id__cmp(const void *a_pointer, const void *b_pointer)
144 {
145 	struct aggr_cpu_id *a = (struct aggr_cpu_id *)a_pointer;
146 	struct aggr_cpu_id *b = (struct aggr_cpu_id *)b_pointer;
147 
148 	if (a->node != b->node)
149 		return a->node - b->node;
150 	else if (a->socket != b->socket)
151 		return a->socket - b->socket;
152 	else if (a->die != b->die)
153 		return a->die - b->die;
154 	else if (a->core != b->core)
155 		return a->core - b->core;
156 	else
157 		return a->thread - b->thread;
158 }
159 
160 struct cpu_aggr_map *cpu_aggr_map__new(const struct perf_cpu_map *cpus,
161 				       aggr_cpu_id_get_t get_id,
162 				       void *data)
163 {
164 	int cpu, idx;
165 	struct cpu_aggr_map *c = cpu_aggr_map__empty_new(cpus->nr);
166 
167 	if (!c)
168 		return NULL;
169 
170 	/* Reset size as it may only be partially filled */
171 	c->nr = 0;
172 
173 	perf_cpu_map__for_each_cpu(cpu, idx, cpus) {
174 		bool duplicate = false;
175 		struct aggr_cpu_id cpu_id = get_id(cpu, data);
176 
177 		for (int j = 0; j < c->nr; j++) {
178 			if (aggr_cpu_id__equal(&cpu_id, &c->map[j])) {
179 				duplicate = true;
180 				break;
181 			}
182 		}
183 		if (!duplicate) {
184 			c->map[c->nr] = cpu_id;
185 			c->nr++;
186 		}
187 	}
188 
189 	/* ensure we process id in increasing order */
190 	qsort(c->map, c->nr, sizeof(struct aggr_cpu_id), aggr_cpu_id__cmp);
191 
192 	return c;
193 
194 }
195 
196 int cpu__get_die_id(int cpu)
197 {
198 	int value, ret = cpu__get_topology_int(cpu, "die_id", &value);
199 
200 	return ret ?: value;
201 }
202 
203 struct aggr_cpu_id aggr_cpu_id__die(int cpu, void *data)
204 {
205 	struct aggr_cpu_id id;
206 	int die;
207 
208 	die = cpu__get_die_id(cpu);
209 	/* There is no die_id on legacy system. */
210 	if (die == -1)
211 		die = 0;
212 
213 	/*
214 	 * die_id is relative to socket, so start
215 	 * with the socket ID and then add die to
216 	 * make a unique ID.
217 	 */
218 	id = aggr_cpu_id__socket(cpu, data);
219 	if (aggr_cpu_id__is_empty(&id))
220 		return id;
221 
222 	id.die = die;
223 	return id;
224 }
225 
226 int cpu__get_core_id(int cpu)
227 {
228 	int value, ret = cpu__get_topology_int(cpu, "core_id", &value);
229 	return ret ?: value;
230 }
231 
232 struct aggr_cpu_id aggr_cpu_id__core(int cpu, void *data)
233 {
234 	struct aggr_cpu_id id;
235 	int core = cpu__get_core_id(cpu);
236 
237 	/* aggr_cpu_id__die returns a struct with socket and die set*/
238 	id = aggr_cpu_id__die(cpu, data);
239 	if (aggr_cpu_id__is_empty(&id))
240 		return id;
241 
242 	/*
243 	 * core_id is relative to socket and die, we need a global id.
244 	 * So we combine the result from cpu_map__get_die with the core id
245 	 */
246 	id.core = core;
247 	return id;
248 
249 }
250 
251 struct aggr_cpu_id aggr_cpu_id__node(int cpu, void *data __maybe_unused)
252 {
253 	struct aggr_cpu_id id = aggr_cpu_id__empty();
254 
255 	id.node = cpu__get_node(cpu);
256 	return id;
257 }
258 
259 /* setup simple routines to easily access node numbers given a cpu number */
260 static int get_max_num(char *path, int *max)
261 {
262 	size_t num;
263 	char *buf;
264 	int err = 0;
265 
266 	if (filename__read_str(path, &buf, &num))
267 		return -1;
268 
269 	buf[num] = '\0';
270 
271 	/* start on the right, to find highest node num */
272 	while (--num) {
273 		if ((buf[num] == ',') || (buf[num] == '-')) {
274 			num++;
275 			break;
276 		}
277 	}
278 	if (sscanf(&buf[num], "%d", max) < 1) {
279 		err = -1;
280 		goto out;
281 	}
282 
283 	/* convert from 0-based to 1-based */
284 	(*max)++;
285 
286 out:
287 	free(buf);
288 	return err;
289 }
290 
291 /* Determine highest possible cpu in the system for sparse allocation */
292 static void set_max_cpu_num(void)
293 {
294 	const char *mnt;
295 	char path[PATH_MAX];
296 	int ret = -1;
297 
298 	/* set up default */
299 	max_cpu_num = 4096;
300 	max_present_cpu_num = 4096;
301 
302 	mnt = sysfs__mountpoint();
303 	if (!mnt)
304 		goto out;
305 
306 	/* get the highest possible cpu number for a sparse allocation */
307 	ret = snprintf(path, PATH_MAX, "%s/devices/system/cpu/possible", mnt);
308 	if (ret >= PATH_MAX) {
309 		pr_err("sysfs path crossed PATH_MAX(%d) size\n", PATH_MAX);
310 		goto out;
311 	}
312 
313 	ret = get_max_num(path, &max_cpu_num);
314 	if (ret)
315 		goto out;
316 
317 	/* get the highest present cpu number for a sparse allocation */
318 	ret = snprintf(path, PATH_MAX, "%s/devices/system/cpu/present", mnt);
319 	if (ret >= PATH_MAX) {
320 		pr_err("sysfs path crossed PATH_MAX(%d) size\n", PATH_MAX);
321 		goto out;
322 	}
323 
324 	ret = get_max_num(path, &max_present_cpu_num);
325 
326 out:
327 	if (ret)
328 		pr_err("Failed to read max cpus, using default of %d\n", max_cpu_num);
329 }
330 
331 /* Determine highest possible node in the system for sparse allocation */
332 static void set_max_node_num(void)
333 {
334 	const char *mnt;
335 	char path[PATH_MAX];
336 	int ret = -1;
337 
338 	/* set up default */
339 	max_node_num = 8;
340 
341 	mnt = sysfs__mountpoint();
342 	if (!mnt)
343 		goto out;
344 
345 	/* get the highest possible cpu number for a sparse allocation */
346 	ret = snprintf(path, PATH_MAX, "%s/devices/system/node/possible", mnt);
347 	if (ret >= PATH_MAX) {
348 		pr_err("sysfs path crossed PATH_MAX(%d) size\n", PATH_MAX);
349 		goto out;
350 	}
351 
352 	ret = get_max_num(path, &max_node_num);
353 
354 out:
355 	if (ret)
356 		pr_err("Failed to read max nodes, using default of %d\n", max_node_num);
357 }
358 
359 int cpu__max_node(void)
360 {
361 	if (unlikely(!max_node_num))
362 		set_max_node_num();
363 
364 	return max_node_num;
365 }
366 
367 int cpu__max_cpu(void)
368 {
369 	if (unlikely(!max_cpu_num))
370 		set_max_cpu_num();
371 
372 	return max_cpu_num;
373 }
374 
375 int cpu__max_present_cpu(void)
376 {
377 	if (unlikely(!max_present_cpu_num))
378 		set_max_cpu_num();
379 
380 	return max_present_cpu_num;
381 }
382 
383 
384 int cpu__get_node(int cpu)
385 {
386 	if (unlikely(cpunode_map == NULL)) {
387 		pr_debug("cpu_map not initialized\n");
388 		return -1;
389 	}
390 
391 	return cpunode_map[cpu];
392 }
393 
394 static int init_cpunode_map(void)
395 {
396 	int i;
397 
398 	set_max_cpu_num();
399 	set_max_node_num();
400 
401 	cpunode_map = calloc(max_cpu_num, sizeof(int));
402 	if (!cpunode_map) {
403 		pr_err("%s: calloc failed\n", __func__);
404 		return -1;
405 	}
406 
407 	for (i = 0; i < max_cpu_num; i++)
408 		cpunode_map[i] = -1;
409 
410 	return 0;
411 }
412 
413 int cpu__setup_cpunode_map(void)
414 {
415 	struct dirent *dent1, *dent2;
416 	DIR *dir1, *dir2;
417 	unsigned int cpu, mem;
418 	char buf[PATH_MAX];
419 	char path[PATH_MAX];
420 	const char *mnt;
421 	int n;
422 
423 	/* initialize globals */
424 	if (init_cpunode_map())
425 		return -1;
426 
427 	mnt = sysfs__mountpoint();
428 	if (!mnt)
429 		return 0;
430 
431 	n = snprintf(path, PATH_MAX, "%s/devices/system/node", mnt);
432 	if (n >= PATH_MAX) {
433 		pr_err("sysfs path crossed PATH_MAX(%d) size\n", PATH_MAX);
434 		return -1;
435 	}
436 
437 	dir1 = opendir(path);
438 	if (!dir1)
439 		return 0;
440 
441 	/* walk tree and setup map */
442 	while ((dent1 = readdir(dir1)) != NULL) {
443 		if (dent1->d_type != DT_DIR || sscanf(dent1->d_name, "node%u", &mem) < 1)
444 			continue;
445 
446 		n = snprintf(buf, PATH_MAX, "%s/%s", path, dent1->d_name);
447 		if (n >= PATH_MAX) {
448 			pr_err("sysfs path crossed PATH_MAX(%d) size\n", PATH_MAX);
449 			continue;
450 		}
451 
452 		dir2 = opendir(buf);
453 		if (!dir2)
454 			continue;
455 		while ((dent2 = readdir(dir2)) != NULL) {
456 			if (dent2->d_type != DT_LNK || sscanf(dent2->d_name, "cpu%u", &cpu) < 1)
457 				continue;
458 			cpunode_map[cpu] = mem;
459 		}
460 		closedir(dir2);
461 	}
462 	closedir(dir1);
463 	return 0;
464 }
465 
466 size_t cpu_map__snprint(struct perf_cpu_map *map, char *buf, size_t size)
467 {
468 	int i, cpu, start = -1;
469 	bool first = true;
470 	size_t ret = 0;
471 
472 #define COMMA first ? "" : ","
473 
474 	for (i = 0; i < map->nr + 1; i++) {
475 		bool last = i == map->nr;
476 
477 		cpu = last ? INT_MAX : map->map[i];
478 
479 		if (start == -1) {
480 			start = i;
481 			if (last) {
482 				ret += snprintf(buf + ret, size - ret,
483 						"%s%d", COMMA,
484 						map->map[i]);
485 			}
486 		} else if (((i - start) != (cpu - map->map[start])) || last) {
487 			int end = i - 1;
488 
489 			if (start == end) {
490 				ret += snprintf(buf + ret, size - ret,
491 						"%s%d", COMMA,
492 						map->map[start]);
493 			} else {
494 				ret += snprintf(buf + ret, size - ret,
495 						"%s%d-%d", COMMA,
496 						map->map[start], map->map[end]);
497 			}
498 			first = false;
499 			start = i;
500 		}
501 	}
502 
503 #undef COMMA
504 
505 	pr_debug2("cpumask list: %s\n", buf);
506 	return ret;
507 }
508 
509 static char hex_char(unsigned char val)
510 {
511 	if (val < 10)
512 		return val + '0';
513 	if (val < 16)
514 		return val - 10 + 'a';
515 	return '?';
516 }
517 
518 size_t cpu_map__snprint_mask(struct perf_cpu_map *map, char *buf, size_t size)
519 {
520 	int i, cpu;
521 	char *ptr = buf;
522 	unsigned char *bitmap;
523 	int last_cpu = perf_cpu_map__cpu(map, map->nr - 1);
524 
525 	if (buf == NULL)
526 		return 0;
527 
528 	bitmap = zalloc(last_cpu / 8 + 1);
529 	if (bitmap == NULL) {
530 		buf[0] = '\0';
531 		return 0;
532 	}
533 
534 	for (i = 0; i < map->nr; i++) {
535 		cpu = perf_cpu_map__cpu(map, i);
536 		bitmap[cpu / 8] |= 1 << (cpu % 8);
537 	}
538 
539 	for (cpu = last_cpu / 4 * 4; cpu >= 0; cpu -= 4) {
540 		unsigned char bits = bitmap[cpu / 8];
541 
542 		if (cpu % 8)
543 			bits >>= 4;
544 		else
545 			bits &= 0xf;
546 
547 		*ptr++ = hex_char(bits);
548 		if ((cpu % 32) == 0 && cpu > 0)
549 			*ptr++ = ',';
550 	}
551 	*ptr = '\0';
552 	free(bitmap);
553 
554 	buf[size - 1] = '\0';
555 	return ptr - buf;
556 }
557 
558 const struct perf_cpu_map *cpu_map__online(void) /* thread unsafe */
559 {
560 	static const struct perf_cpu_map *online = NULL;
561 
562 	if (!online)
563 		online = perf_cpu_map__new(NULL); /* from /sys/devices/system/cpu/online */
564 
565 	return online;
566 }
567 
568 bool aggr_cpu_id__equal(const struct aggr_cpu_id *a, const struct aggr_cpu_id *b)
569 {
570 	return a->thread == b->thread &&
571 		a->node == b->node &&
572 		a->socket == b->socket &&
573 		a->die == b->die &&
574 		a->core == b->core;
575 }
576 
577 bool aggr_cpu_id__is_empty(const struct aggr_cpu_id *a)
578 {
579 	return a->thread == -1 &&
580 		a->node == -1 &&
581 		a->socket == -1 &&
582 		a->die == -1 &&
583 		a->core == -1;
584 }
585 
586 struct aggr_cpu_id aggr_cpu_id__empty(void)
587 {
588 	struct aggr_cpu_id ret = {
589 		.thread = -1,
590 		.node = -1,
591 		.socket = -1,
592 		.die = -1,
593 		.core = -1
594 	};
595 	return ret;
596 }
597