xref: /openbmc/linux/tools/perf/util/pmus.c (revision f0168042)
1 // SPDX-License-Identifier: GPL-2.0
2 #include <linux/list.h>
3 #include <linux/zalloc.h>
4 #include <subcmd/pager.h>
5 #include <sys/types.h>
6 #include <dirent.h>
7 #include <pthread.h>
8 #include <string.h>
9 #include <unistd.h>
10 #include "debug.h"
11 #include "evsel.h"
12 #include "pmus.h"
13 #include "pmu.h"
14 #include "print-events.h"
15 
16 /*
17  * core_pmus:  A PMU belongs to core_pmus if it's name is "cpu" or it's sysfs
18  *             directory contains "cpus" file. All PMUs belonging to core_pmus
19  *             must have pmu->is_core=1. If there are more than one PMU in
20  *             this list, perf interprets it as a heterogeneous platform.
21  *             (FWIW, certain ARM platforms having heterogeneous cores uses
22  *             homogeneous PMU, and thus they are treated as homogeneous
23  *             platform by perf because core_pmus will have only one entry)
24  * other_pmus: All other PMUs which are not part of core_pmus list. It doesn't
25  *             matter whether PMU is present per SMT-thread or outside of the
26  *             core in the hw. For e.g., an instance of AMD ibs_fetch// and
27  *             ibs_op// PMUs is present in each hw SMT thread, however they
28  *             are captured under other_pmus. PMUs belonging to other_pmus
29  *             must have pmu->is_core=0 but pmu->is_uncore could be 0 or 1.
30  */
31 static LIST_HEAD(core_pmus);
32 static LIST_HEAD(other_pmus);
33 static bool read_sysfs_core_pmus;
34 static bool read_sysfs_all_pmus;
35 
36 void perf_pmus__destroy(void)
37 {
38 	struct perf_pmu *pmu, *tmp;
39 
40 	list_for_each_entry_safe(pmu, tmp, &core_pmus, list) {
41 		list_del(&pmu->list);
42 
43 		perf_pmu__delete(pmu);
44 	}
45 	list_for_each_entry_safe(pmu, tmp, &other_pmus, list) {
46 		list_del(&pmu->list);
47 
48 		perf_pmu__delete(pmu);
49 	}
50 	read_sysfs_core_pmus = false;
51 	read_sysfs_all_pmus = false;
52 }
53 
54 static struct perf_pmu *pmu_find(const char *name)
55 {
56 	struct perf_pmu *pmu;
57 
58 	list_for_each_entry(pmu, &core_pmus, list) {
59 		if (!strcmp(pmu->name, name) ||
60 		    (pmu->alias_name && !strcmp(pmu->alias_name, name)))
61 			return pmu;
62 	}
63 	list_for_each_entry(pmu, &other_pmus, list) {
64 		if (!strcmp(pmu->name, name) ||
65 		    (pmu->alias_name && !strcmp(pmu->alias_name, name)))
66 			return pmu;
67 	}
68 
69 	return NULL;
70 }
71 
72 struct perf_pmu *perf_pmus__find(const char *name)
73 {
74 	struct perf_pmu *pmu;
75 	int dirfd;
76 	bool core_pmu;
77 
78 	/*
79 	 * Once PMU is loaded it stays in the list,
80 	 * so we keep us from multiple reading/parsing
81 	 * the pmu format definitions.
82 	 */
83 	pmu = pmu_find(name);
84 	if (pmu)
85 		return pmu;
86 
87 	if (read_sysfs_all_pmus)
88 		return NULL;
89 
90 	core_pmu = is_pmu_core(name);
91 	if (core_pmu && read_sysfs_core_pmus)
92 		return NULL;
93 
94 	dirfd = perf_pmu__event_source_devices_fd();
95 	pmu = perf_pmu__lookup(core_pmu ? &core_pmus : &other_pmus, dirfd, name);
96 	close(dirfd);
97 
98 	return pmu;
99 }
100 
101 static struct perf_pmu *perf_pmu__find2(int dirfd, const char *name)
102 {
103 	struct perf_pmu *pmu;
104 	bool core_pmu;
105 
106 	/*
107 	 * Once PMU is loaded it stays in the list,
108 	 * so we keep us from multiple reading/parsing
109 	 * the pmu format definitions.
110 	 */
111 	pmu = pmu_find(name);
112 	if (pmu)
113 		return pmu;
114 
115 	if (read_sysfs_all_pmus)
116 		return NULL;
117 
118 	core_pmu = is_pmu_core(name);
119 	if (core_pmu && read_sysfs_core_pmus)
120 		return NULL;
121 
122 	return perf_pmu__lookup(core_pmu ? &core_pmus : &other_pmus, dirfd, name);
123 }
124 
125 /* Add all pmus in sysfs to pmu list: */
126 static void pmu_read_sysfs(bool core_only)
127 {
128 	int fd;
129 	DIR *dir;
130 	struct dirent *dent;
131 
132 	if (read_sysfs_all_pmus || (core_only && read_sysfs_core_pmus))
133 		return;
134 
135 	fd = perf_pmu__event_source_devices_fd();
136 	if (fd < 0)
137 		return;
138 
139 	dir = fdopendir(fd);
140 	if (!dir) {
141 		close(fd);
142 		return;
143 	}
144 
145 	while ((dent = readdir(dir))) {
146 		if (!strcmp(dent->d_name, ".") || !strcmp(dent->d_name, ".."))
147 			continue;
148 		if (core_only && !is_pmu_core(dent->d_name))
149 			continue;
150 		/* add to static LIST_HEAD(core_pmus) or LIST_HEAD(other_pmus): */
151 		perf_pmu__find2(fd, dent->d_name);
152 	}
153 
154 	closedir(dir);
155 	if (core_only) {
156 		if (!list_empty(&core_pmus))
157 			read_sysfs_core_pmus = true;
158 		else {
159 			if (perf_pmu__create_placeholder_core_pmu(&core_pmus))
160 				read_sysfs_core_pmus = true;
161 		}
162 	} else {
163 		read_sysfs_core_pmus = true;
164 		read_sysfs_all_pmus = true;
165 	}
166 }
167 
168 static struct perf_pmu *__perf_pmus__find_by_type(unsigned int type)
169 {
170 	struct perf_pmu *pmu;
171 
172 	list_for_each_entry(pmu, &core_pmus, list) {
173 		if (pmu->type == type)
174 			return pmu;
175 	}
176 
177 	list_for_each_entry(pmu, &other_pmus, list) {
178 		if (pmu->type == type)
179 			return pmu;
180 	}
181 	return NULL;
182 }
183 
184 struct perf_pmu *perf_pmus__find_by_type(unsigned int type)
185 {
186 	struct perf_pmu *pmu = __perf_pmus__find_by_type(type);
187 
188 	if (pmu || read_sysfs_all_pmus)
189 		return pmu;
190 
191 	pmu_read_sysfs(/*core_only=*/false);
192 	pmu = __perf_pmus__find_by_type(type);
193 	return pmu;
194 }
195 
196 /*
197  * pmu iterator: If pmu is NULL, we start at the begin, otherwise return the
198  * next pmu. Returns NULL on end.
199  */
200 struct perf_pmu *perf_pmus__scan(struct perf_pmu *pmu)
201 {
202 	bool use_core_pmus = !pmu || pmu->is_core;
203 
204 	if (!pmu) {
205 		pmu_read_sysfs(/*core_only=*/false);
206 		pmu = list_prepare_entry(pmu, &core_pmus, list);
207 	}
208 	if (use_core_pmus) {
209 		list_for_each_entry_continue(pmu, &core_pmus, list)
210 			return pmu;
211 
212 		pmu = NULL;
213 		pmu = list_prepare_entry(pmu, &other_pmus, list);
214 	}
215 	list_for_each_entry_continue(pmu, &other_pmus, list)
216 		return pmu;
217 	return NULL;
218 }
219 
220 struct perf_pmu *perf_pmus__scan_core(struct perf_pmu *pmu)
221 {
222 	if (!pmu) {
223 		pmu_read_sysfs(/*core_only=*/true);
224 		pmu = list_prepare_entry(pmu, &core_pmus, list);
225 	}
226 	list_for_each_entry_continue(pmu, &core_pmus, list)
227 		return pmu;
228 
229 	return NULL;
230 }
231 
232 const struct perf_pmu *perf_pmus__pmu_for_pmu_filter(const char *str)
233 {
234 	struct perf_pmu *pmu = NULL;
235 
236 	while ((pmu = perf_pmus__scan(pmu)) != NULL) {
237 		if (!strcmp(pmu->name, str))
238 			return pmu;
239 		/* Ignore "uncore_" prefix. */
240 		if (!strncmp(pmu->name, "uncore_", 7)) {
241 			if (!strcmp(pmu->name + 7, str))
242 				return pmu;
243 		}
244 		/* Ignore "cpu_" prefix on Intel hybrid PMUs. */
245 		if (!strncmp(pmu->name, "cpu_", 4)) {
246 			if (!strcmp(pmu->name + 4, str))
247 				return pmu;
248 		}
249 	}
250 	return NULL;
251 }
252 
253 int __weak perf_pmus__num_mem_pmus(void)
254 {
255 	/* All core PMUs are for mem events. */
256 	return perf_pmus__num_core_pmus();
257 }
258 
259 /** Struct for ordering events as output in perf list. */
260 struct sevent {
261 	/** PMU for event. */
262 	const struct perf_pmu *pmu;
263 	/**
264 	 * Optional event for name, desc, etc. If not present then this is a
265 	 * selectable PMU and the event name is shown as "//".
266 	 */
267 	const struct perf_pmu_alias *event;
268 	/** Is the PMU for the CPU? */
269 	bool is_cpu;
270 };
271 
272 static int cmp_sevent(const void *a, const void *b)
273 {
274 	const struct sevent *as = a;
275 	const struct sevent *bs = b;
276 	const char *a_pmu_name = NULL, *b_pmu_name = NULL;
277 	const char *a_name = "//", *a_desc = NULL, *a_topic = "";
278 	const char *b_name = "//", *b_desc = NULL, *b_topic = "";
279 	int ret;
280 
281 	if (as->event) {
282 		a_name = as->event->name;
283 		a_desc = as->event->desc;
284 		a_topic = as->event->topic ?: "";
285 		a_pmu_name = as->event->pmu_name;
286 	}
287 	if (bs->event) {
288 		b_name = bs->event->name;
289 		b_desc = bs->event->desc;
290 		b_topic = bs->event->topic ?: "";
291 		b_pmu_name = bs->event->pmu_name;
292 	}
293 	/* Put extra events last. */
294 	if (!!a_desc != !!b_desc)
295 		return !!a_desc - !!b_desc;
296 
297 	/* Order by topics. */
298 	ret = strcmp(a_topic, b_topic);
299 	if (ret)
300 		return ret;
301 
302 	/* Order CPU core events to be first */
303 	if (as->is_cpu != bs->is_cpu)
304 		return as->is_cpu ? -1 : 1;
305 
306 	/* Order by PMU name. */
307 	if (as->pmu != bs->pmu) {
308 		a_pmu_name = a_pmu_name ?: (as->pmu->name ?: "");
309 		b_pmu_name = b_pmu_name ?: (bs->pmu->name ?: "");
310 		ret = strcmp(a_pmu_name, b_pmu_name);
311 		if (ret)
312 			return ret;
313 	}
314 
315 	/* Order by event name. */
316 	return strcmp(a_name, b_name);
317 }
318 
319 static bool pmu_alias_is_duplicate(struct sevent *alias_a,
320 				   struct sevent *alias_b)
321 {
322 	const char *a_pmu_name = NULL, *b_pmu_name = NULL;
323 	const char *a_name = "//", *b_name = "//";
324 
325 
326 	if (alias_a->event) {
327 		a_name = alias_a->event->name;
328 		a_pmu_name = alias_a->event->pmu_name;
329 	}
330 	if (alias_b->event) {
331 		b_name = alias_b->event->name;
332 		b_pmu_name = alias_b->event->pmu_name;
333 	}
334 
335 	/* Different names -> never duplicates */
336 	if (strcmp(a_name, b_name))
337 		return false;
338 
339 	/* Don't remove duplicates for different PMUs */
340 	a_pmu_name = a_pmu_name ?: (alias_a->pmu->name ?: "");
341 	b_pmu_name = b_pmu_name ?: (alias_b->pmu->name ?: "");
342 	return strcmp(a_pmu_name, b_pmu_name) == 0;
343 }
344 
345 static int sub_non_neg(int a, int b)
346 {
347 	if (b > a)
348 		return 0;
349 	return a - b;
350 }
351 
352 static char *format_alias(char *buf, int len, const struct perf_pmu *pmu,
353 			  const struct perf_pmu_alias *alias)
354 {
355 	struct parse_events_term *term;
356 	int used = snprintf(buf, len, "%s/%s", pmu->name, alias->name);
357 
358 	list_for_each_entry(term, &alias->terms, list) {
359 		if (term->type_val == PARSE_EVENTS__TERM_TYPE_STR)
360 			used += snprintf(buf + used, sub_non_neg(len, used),
361 					",%s=%s", term->config,
362 					term->val.str);
363 	}
364 
365 	if (sub_non_neg(len, used) > 0) {
366 		buf[used] = '/';
367 		used++;
368 	}
369 	if (sub_non_neg(len, used) > 0) {
370 		buf[used] = '\0';
371 		used++;
372 	} else
373 		buf[len - 1] = '\0';
374 
375 	return buf;
376 }
377 
378 void perf_pmus__print_pmu_events(const struct print_callbacks *print_cb, void *print_state)
379 {
380 	struct perf_pmu *pmu;
381 	struct perf_pmu_alias *event;
382 	char buf[1024];
383 	int printed = 0;
384 	int len, j;
385 	struct sevent *aliases;
386 
387 	pmu = NULL;
388 	len = 0;
389 	while ((pmu = perf_pmus__scan(pmu)) != NULL) {
390 		list_for_each_entry(event, &pmu->aliases, list)
391 			len++;
392 		if (pmu->selectable)
393 			len++;
394 	}
395 	aliases = zalloc(sizeof(struct sevent) * len);
396 	if (!aliases) {
397 		pr_err("FATAL: not enough memory to print PMU events\n");
398 		return;
399 	}
400 	pmu = NULL;
401 	j = 0;
402 	while ((pmu = perf_pmus__scan(pmu)) != NULL) {
403 		bool is_cpu = pmu->is_core;
404 
405 		list_for_each_entry(event, &pmu->aliases, list) {
406 			aliases[j].event = event;
407 			aliases[j].pmu = pmu;
408 			aliases[j].is_cpu = is_cpu;
409 			j++;
410 		}
411 		if (pmu->selectable) {
412 			aliases[j].event = NULL;
413 			aliases[j].pmu = pmu;
414 			aliases[j].is_cpu = is_cpu;
415 			j++;
416 		}
417 	}
418 	len = j;
419 	qsort(aliases, len, sizeof(struct sevent), cmp_sevent);
420 	for (j = 0; j < len; j++) {
421 		const char *name, *alias = NULL, *scale_unit = NULL,
422 			*desc = NULL, *long_desc = NULL,
423 			*encoding_desc = NULL, *topic = NULL,
424 			*pmu_name = NULL;
425 		bool deprecated = false;
426 		size_t buf_used;
427 
428 		/* Skip duplicates */
429 		if (j > 0 && pmu_alias_is_duplicate(&aliases[j], &aliases[j - 1]))
430 			continue;
431 
432 		if (!aliases[j].event) {
433 			/* A selectable event. */
434 			pmu_name = aliases[j].pmu->name;
435 			buf_used = snprintf(buf, sizeof(buf), "%s//", pmu_name) + 1;
436 			name = buf;
437 		} else {
438 			if (aliases[j].event->desc) {
439 				name = aliases[j].event->name;
440 				buf_used = 0;
441 			} else {
442 				name = format_alias(buf, sizeof(buf), aliases[j].pmu,
443 						    aliases[j].event);
444 				if (aliases[j].is_cpu) {
445 					alias = name;
446 					name = aliases[j].event->name;
447 				}
448 				buf_used = strlen(buf) + 1;
449 			}
450 			pmu_name = aliases[j].event->pmu_name ?: (aliases[j].pmu->name ?: "");
451 			if (strlen(aliases[j].event->unit) || aliases[j].event->scale != 1.0) {
452 				scale_unit = buf + buf_used;
453 				buf_used += snprintf(buf + buf_used, sizeof(buf) - buf_used,
454 						"%G%s", aliases[j].event->scale,
455 						aliases[j].event->unit) + 1;
456 			}
457 			desc = aliases[j].event->desc;
458 			long_desc = aliases[j].event->long_desc;
459 			topic = aliases[j].event->topic;
460 			encoding_desc = buf + buf_used;
461 			buf_used += snprintf(buf + buf_used, sizeof(buf) - buf_used,
462 					"%s/%s/", pmu_name, aliases[j].event->str) + 1;
463 			deprecated = aliases[j].event->deprecated;
464 		}
465 		print_cb->print_event(print_state,
466 				pmu_name,
467 				topic,
468 				name,
469 				alias,
470 				scale_unit,
471 				deprecated,
472 				"Kernel PMU event",
473 				desc,
474 				long_desc,
475 				encoding_desc);
476 	}
477 	if (printed && pager_in_use())
478 		printf("\n");
479 
480 	zfree(&aliases);
481 }
482 
483 bool perf_pmus__have_event(const char *pname, const char *name)
484 {
485 	struct perf_pmu *pmu = perf_pmus__find(pname);
486 
487 	return pmu && perf_pmu__have_event(pmu, name);
488 }
489 
490 int perf_pmus__num_core_pmus(void)
491 {
492 	static int count;
493 
494 	if (!count) {
495 		struct perf_pmu *pmu = NULL;
496 
497 		while ((pmu = perf_pmus__scan_core(pmu)) != NULL)
498 			count++;
499 	}
500 	return count;
501 }
502 
503 static bool __perf_pmus__supports_extended_type(void)
504 {
505 	struct perf_pmu *pmu = NULL;
506 
507 	if (perf_pmus__num_core_pmus() <= 1)
508 		return false;
509 
510 	while ((pmu = perf_pmus__scan_core(pmu)) != NULL) {
511 		if (!is_event_supported(PERF_TYPE_HARDWARE, PERF_COUNT_HW_CPU_CYCLES | ((__u64)pmu->type << PERF_PMU_TYPE_SHIFT)))
512 			return false;
513 	}
514 
515 	return true;
516 }
517 
518 static bool perf_pmus__do_support_extended_type;
519 
520 static void perf_pmus__init_supports_extended_type(void)
521 {
522 	perf_pmus__do_support_extended_type = __perf_pmus__supports_extended_type();
523 }
524 
525 bool perf_pmus__supports_extended_type(void)
526 {
527 	static pthread_once_t extended_type_once = PTHREAD_ONCE_INIT;
528 
529 	pthread_once(&extended_type_once, perf_pmus__init_supports_extended_type);
530 
531 	return perf_pmus__do_support_extended_type;
532 }
533 
534 char *perf_pmus__default_pmu_name(void)
535 {
536 	int fd;
537 	DIR *dir;
538 	struct dirent *dent;
539 	char *result = NULL;
540 
541 	if (!list_empty(&core_pmus))
542 		return strdup(list_first_entry(&core_pmus, struct perf_pmu, list)->name);
543 
544 	fd = perf_pmu__event_source_devices_fd();
545 	if (fd < 0)
546 		return strdup("cpu");
547 
548 	dir = fdopendir(fd);
549 	if (!dir) {
550 		close(fd);
551 		return strdup("cpu");
552 	}
553 
554 	while ((dent = readdir(dir))) {
555 		if (!strcmp(dent->d_name, ".") || !strcmp(dent->d_name, ".."))
556 			continue;
557 		if (is_pmu_core(dent->d_name)) {
558 			result = strdup(dent->d_name);
559 			break;
560 		}
561 	}
562 
563 	closedir(dir);
564 	return result ?: strdup("cpu");
565 }
566 
567 struct perf_pmu *evsel__find_pmu(const struct evsel *evsel)
568 {
569 	struct perf_pmu *pmu = evsel->pmu;
570 
571 	if (!pmu) {
572 		pmu = perf_pmus__find_by_type(evsel->core.attr.type);
573 		((struct evsel *)evsel)->pmu = pmu;
574 	}
575 	return pmu;
576 }
577