xref: /openbmc/linux/tools/lib/perf/cpumap.c (revision 35e6bcd1)
1 // SPDX-License-Identifier: GPL-2.0-only
2 #include <perf/cpumap.h>
3 #include <stdlib.h>
4 #include <linux/refcount.h>
5 #include <internal/cpumap.h>
6 #include <asm/bug.h>
7 #include <stdio.h>
8 #include <string.h>
9 #include <unistd.h>
10 #include <ctype.h>
11 #include <limits.h>
12 
13 void perf_cpu_map__set_nr(struct perf_cpu_map *map, int nr_cpus)
14 {
15 	RC_CHK_ACCESS(map)->nr = nr_cpus;
16 }
17 
18 struct perf_cpu_map *perf_cpu_map__alloc(int nr_cpus)
19 {
20 	RC_STRUCT(perf_cpu_map) *cpus = malloc(sizeof(*cpus) + sizeof(struct perf_cpu) * nr_cpus);
21 	struct perf_cpu_map *result;
22 
23 	if (ADD_RC_CHK(result, cpus)) {
24 		cpus->nr = nr_cpus;
25 		refcount_set(&cpus->refcnt, 1);
26 	}
27 	return result;
28 }
29 
30 struct perf_cpu_map *perf_cpu_map__dummy_new(void)
31 {
32 	struct perf_cpu_map *cpus = perf_cpu_map__alloc(1);
33 
34 	if (cpus)
35 		RC_CHK_ACCESS(cpus)->map[0].cpu = -1;
36 
37 	return cpus;
38 }
39 
40 static void cpu_map__delete(struct perf_cpu_map *map)
41 {
42 	if (map) {
43 		WARN_ONCE(refcount_read(perf_cpu_map__refcnt(map)) != 0,
44 			  "cpu_map refcnt unbalanced\n");
45 		RC_CHK_FREE(map);
46 	}
47 }
48 
49 struct perf_cpu_map *perf_cpu_map__get(struct perf_cpu_map *map)
50 {
51 	struct perf_cpu_map *result;
52 
53 	if (RC_CHK_GET(result, map))
54 		refcount_inc(perf_cpu_map__refcnt(map));
55 
56 	return result;
57 }
58 
59 void perf_cpu_map__put(struct perf_cpu_map *map)
60 {
61 	if (map) {
62 		if (refcount_dec_and_test(perf_cpu_map__refcnt(map)))
63 			cpu_map__delete(map);
64 		else
65 			RC_CHK_PUT(map);
66 	}
67 }
68 
69 static struct perf_cpu_map *cpu_map__default_new(void)
70 {
71 	struct perf_cpu_map *cpus;
72 	int nr_cpus;
73 
74 	nr_cpus = sysconf(_SC_NPROCESSORS_ONLN);
75 	if (nr_cpus < 0)
76 		return NULL;
77 
78 	cpus = perf_cpu_map__alloc(nr_cpus);
79 	if (cpus != NULL) {
80 		int i;
81 
82 		for (i = 0; i < nr_cpus; ++i)
83 			RC_CHK_ACCESS(cpus)->map[i].cpu = i;
84 	}
85 
86 	return cpus;
87 }
88 
89 struct perf_cpu_map *perf_cpu_map__default_new(void)
90 {
91 	return cpu_map__default_new();
92 }
93 
94 
95 static int cmp_cpu(const void *a, const void *b)
96 {
97 	const struct perf_cpu *cpu_a = a, *cpu_b = b;
98 
99 	return cpu_a->cpu - cpu_b->cpu;
100 }
101 
102 static struct perf_cpu_map *cpu_map__trim_new(int nr_cpus, const struct perf_cpu *tmp_cpus)
103 {
104 	size_t payload_size = nr_cpus * sizeof(struct perf_cpu);
105 	struct perf_cpu_map *cpus = perf_cpu_map__alloc(nr_cpus);
106 	int i, j;
107 
108 	if (cpus != NULL) {
109 		memcpy(RC_CHK_ACCESS(cpus)->map, tmp_cpus, payload_size);
110 		qsort(RC_CHK_ACCESS(cpus)->map, nr_cpus, sizeof(struct perf_cpu), cmp_cpu);
111 		/* Remove dups */
112 		j = 0;
113 		for (i = 0; i < nr_cpus; i++) {
114 			if (i == 0 || RC_CHK_ACCESS(cpus)->map[i].cpu != RC_CHK_ACCESS(cpus)->map[i - 1].cpu)
115 				RC_CHK_ACCESS(cpus)->map[j++].cpu = RC_CHK_ACCESS(cpus)->map[i].cpu;
116 		}
117 		perf_cpu_map__set_nr(cpus, j);
118 		assert(j <= nr_cpus);
119 	}
120 	return cpus;
121 }
122 
123 struct perf_cpu_map *perf_cpu_map__read(FILE *file)
124 {
125 	struct perf_cpu_map *cpus = NULL;
126 	int nr_cpus = 0;
127 	struct perf_cpu *tmp_cpus = NULL, *tmp;
128 	int max_entries = 0;
129 	int n, cpu, prev;
130 	char sep;
131 
132 	sep = 0;
133 	prev = -1;
134 	for (;;) {
135 		n = fscanf(file, "%u%c", &cpu, &sep);
136 		if (n <= 0)
137 			break;
138 		if (prev >= 0) {
139 			int new_max = nr_cpus + cpu - prev - 1;
140 
141 			WARN_ONCE(new_max >= MAX_NR_CPUS, "Perf can support %d CPUs. "
142 							  "Consider raising MAX_NR_CPUS\n", MAX_NR_CPUS);
143 
144 			if (new_max >= max_entries) {
145 				max_entries = new_max + MAX_NR_CPUS / 2;
146 				tmp = realloc(tmp_cpus, max_entries * sizeof(struct perf_cpu));
147 				if (tmp == NULL)
148 					goto out_free_tmp;
149 				tmp_cpus = tmp;
150 			}
151 
152 			while (++prev < cpu)
153 				tmp_cpus[nr_cpus++].cpu = prev;
154 		}
155 		if (nr_cpus == max_entries) {
156 			max_entries += MAX_NR_CPUS;
157 			tmp = realloc(tmp_cpus, max_entries * sizeof(struct perf_cpu));
158 			if (tmp == NULL)
159 				goto out_free_tmp;
160 			tmp_cpus = tmp;
161 		}
162 
163 		tmp_cpus[nr_cpus++].cpu = cpu;
164 		if (n == 2 && sep == '-')
165 			prev = cpu;
166 		else
167 			prev = -1;
168 		if (n == 1 || sep == '\n')
169 			break;
170 	}
171 
172 	if (nr_cpus > 0)
173 		cpus = cpu_map__trim_new(nr_cpus, tmp_cpus);
174 	else
175 		cpus = cpu_map__default_new();
176 out_free_tmp:
177 	free(tmp_cpus);
178 	return cpus;
179 }
180 
181 static struct perf_cpu_map *cpu_map__read_all_cpu_map(void)
182 {
183 	struct perf_cpu_map *cpus = NULL;
184 	FILE *onlnf;
185 
186 	onlnf = fopen("/sys/devices/system/cpu/online", "r");
187 	if (!onlnf)
188 		return cpu_map__default_new();
189 
190 	cpus = perf_cpu_map__read(onlnf);
191 	fclose(onlnf);
192 	return cpus;
193 }
194 
195 struct perf_cpu_map *perf_cpu_map__new(const char *cpu_list)
196 {
197 	struct perf_cpu_map *cpus = NULL;
198 	unsigned long start_cpu, end_cpu = 0;
199 	char *p = NULL;
200 	int i, nr_cpus = 0;
201 	struct perf_cpu *tmp_cpus = NULL, *tmp;
202 	int max_entries = 0;
203 
204 	if (!cpu_list)
205 		return cpu_map__read_all_cpu_map();
206 
207 	/*
208 	 * must handle the case of empty cpumap to cover
209 	 * TOPOLOGY header for NUMA nodes with no CPU
210 	 * ( e.g., because of CPU hotplug)
211 	 */
212 	if (!isdigit(*cpu_list) && *cpu_list != '\0')
213 		goto out;
214 
215 	while (isdigit(*cpu_list)) {
216 		p = NULL;
217 		start_cpu = strtoul(cpu_list, &p, 0);
218 		if (start_cpu >= INT_MAX
219 		    || (*p != '\0' && *p != ',' && *p != '-'))
220 			goto invalid;
221 
222 		if (*p == '-') {
223 			cpu_list = ++p;
224 			p = NULL;
225 			end_cpu = strtoul(cpu_list, &p, 0);
226 
227 			if (end_cpu >= INT_MAX || (*p != '\0' && *p != ','))
228 				goto invalid;
229 
230 			if (end_cpu < start_cpu)
231 				goto invalid;
232 		} else {
233 			end_cpu = start_cpu;
234 		}
235 
236 		WARN_ONCE(end_cpu >= MAX_NR_CPUS, "Perf can support %d CPUs. "
237 						  "Consider raising MAX_NR_CPUS\n", MAX_NR_CPUS);
238 
239 		for (; start_cpu <= end_cpu; start_cpu++) {
240 			/* check for duplicates */
241 			for (i = 0; i < nr_cpus; i++)
242 				if (tmp_cpus[i].cpu == (int)start_cpu)
243 					goto invalid;
244 
245 			if (nr_cpus == max_entries) {
246 				max_entries += MAX_NR_CPUS;
247 				tmp = realloc(tmp_cpus, max_entries * sizeof(struct perf_cpu));
248 				if (tmp == NULL)
249 					goto invalid;
250 				tmp_cpus = tmp;
251 			}
252 			tmp_cpus[nr_cpus++].cpu = (int)start_cpu;
253 		}
254 		if (*p)
255 			++p;
256 
257 		cpu_list = p;
258 	}
259 
260 	if (nr_cpus > 0)
261 		cpus = cpu_map__trim_new(nr_cpus, tmp_cpus);
262 	else if (*cpu_list != '\0')
263 		cpus = cpu_map__default_new();
264 	else
265 		cpus = perf_cpu_map__dummy_new();
266 invalid:
267 	free(tmp_cpus);
268 out:
269 	return cpus;
270 }
271 
272 struct perf_cpu perf_cpu_map__cpu(const struct perf_cpu_map *cpus, int idx)
273 {
274 	struct perf_cpu result = {
275 		.cpu = -1
276 	};
277 
278 	if (cpus && idx < RC_CHK_ACCESS(cpus)->nr)
279 		return RC_CHK_ACCESS(cpus)->map[idx];
280 
281 	return result;
282 }
283 
284 int perf_cpu_map__nr(const struct perf_cpu_map *cpus)
285 {
286 	return cpus ? RC_CHK_ACCESS(cpus)->nr : 1;
287 }
288 
289 bool perf_cpu_map__empty(const struct perf_cpu_map *map)
290 {
291 	return map ? RC_CHK_ACCESS(map)->map[0].cpu == -1 : true;
292 }
293 
294 int perf_cpu_map__idx(const struct perf_cpu_map *cpus, struct perf_cpu cpu)
295 {
296 	int low, high;
297 
298 	if (!cpus)
299 		return -1;
300 
301 	low = 0;
302 	high = RC_CHK_ACCESS(cpus)->nr;
303 	while (low < high) {
304 		int idx = (low + high) / 2;
305 		struct perf_cpu cpu_at_idx = RC_CHK_ACCESS(cpus)->map[idx];
306 
307 		if (cpu_at_idx.cpu == cpu.cpu)
308 			return idx;
309 
310 		if (cpu_at_idx.cpu > cpu.cpu)
311 			high = idx;
312 		else
313 			low = idx + 1;
314 	}
315 
316 	return -1;
317 }
318 
319 bool perf_cpu_map__has(const struct perf_cpu_map *cpus, struct perf_cpu cpu)
320 {
321 	return perf_cpu_map__idx(cpus, cpu) != -1;
322 }
323 
324 struct perf_cpu perf_cpu_map__max(const struct perf_cpu_map *map)
325 {
326 	struct perf_cpu result = {
327 		.cpu = -1
328 	};
329 
330 	// cpu_map__trim_new() qsort()s it, cpu_map__default_new() sorts it as well.
331 	return RC_CHK_ACCESS(map)->nr > 0 ? RC_CHK_ACCESS(map)->map[RC_CHK_ACCESS(map)->nr - 1] : result;
332 }
333 
334 /** Is 'b' a subset of 'a'. */
335 bool perf_cpu_map__is_subset(const struct perf_cpu_map *a, const struct perf_cpu_map *b)
336 {
337 	if (a == b || !b)
338 		return true;
339 	if (!a || RC_CHK_ACCESS(b)->nr > RC_CHK_ACCESS(a)->nr)
340 		return false;
341 
342 	for (int i = 0, j = 0; i < RC_CHK_ACCESS(a)->nr; i++) {
343 		if (RC_CHK_ACCESS(a)->map[i].cpu > RC_CHK_ACCESS(b)->map[j].cpu)
344 			return false;
345 		if (RC_CHK_ACCESS(a)->map[i].cpu == RC_CHK_ACCESS(b)->map[j].cpu) {
346 			j++;
347 			if (j == RC_CHK_ACCESS(b)->nr)
348 				return true;
349 		}
350 	}
351 	return false;
352 }
353 
354 /*
355  * Merge two cpumaps
356  *
357  * orig either gets freed and replaced with a new map, or reused
358  * with no reference count change (similar to "realloc")
359  * other has its reference count increased.
360  */
361 
362 struct perf_cpu_map *perf_cpu_map__merge(struct perf_cpu_map *orig,
363 					 struct perf_cpu_map *other)
364 {
365 	struct perf_cpu *tmp_cpus;
366 	int tmp_len;
367 	int i, j, k;
368 	struct perf_cpu_map *merged;
369 
370 	if (perf_cpu_map__is_subset(orig, other))
371 		return orig;
372 	if (perf_cpu_map__is_subset(other, orig)) {
373 		perf_cpu_map__put(orig);
374 		return perf_cpu_map__get(other);
375 	}
376 
377 	tmp_len = RC_CHK_ACCESS(orig)->nr + RC_CHK_ACCESS(other)->nr;
378 	tmp_cpus = malloc(tmp_len * sizeof(struct perf_cpu));
379 	if (!tmp_cpus)
380 		return NULL;
381 
382 	/* Standard merge algorithm from wikipedia */
383 	i = j = k = 0;
384 	while (i < RC_CHK_ACCESS(orig)->nr && j < RC_CHK_ACCESS(other)->nr) {
385 		if (RC_CHK_ACCESS(orig)->map[i].cpu <= RC_CHK_ACCESS(other)->map[j].cpu) {
386 			if (RC_CHK_ACCESS(orig)->map[i].cpu == RC_CHK_ACCESS(other)->map[j].cpu)
387 				j++;
388 			tmp_cpus[k++] = RC_CHK_ACCESS(orig)->map[i++];
389 		} else
390 			tmp_cpus[k++] = RC_CHK_ACCESS(other)->map[j++];
391 	}
392 
393 	while (i < RC_CHK_ACCESS(orig)->nr)
394 		tmp_cpus[k++] = RC_CHK_ACCESS(orig)->map[i++];
395 
396 	while (j < RC_CHK_ACCESS(other)->nr)
397 		tmp_cpus[k++] = RC_CHK_ACCESS(other)->map[j++];
398 	assert(k <= tmp_len);
399 
400 	merged = cpu_map__trim_new(k, tmp_cpus);
401 	free(tmp_cpus);
402 	perf_cpu_map__put(orig);
403 	return merged;
404 }
405