xref: /openbmc/linux/tools/perf/util/pmus.c (revision 8e7d8a2e)
1 // SPDX-License-Identifier: GPL-2.0
2 #include <linux/list.h>
3 #include <linux/zalloc.h>
4 #include <subcmd/pager.h>
5 #include <sys/types.h>
6 #include <dirent.h>
7 #include <string.h>
8 #include <unistd.h>
9 #include "debug.h"
10 #include "evsel.h"
11 #include "pmus.h"
12 #include "pmu.h"
13 #include "print-events.h"
14 
15 static LIST_HEAD(core_pmus);
16 static LIST_HEAD(other_pmus);
17 static bool read_sysfs_core_pmus;
18 static bool read_sysfs_all_pmus;
19 
20 void perf_pmus__destroy(void)
21 {
22 	struct perf_pmu *pmu, *tmp;
23 
24 	list_for_each_entry_safe(pmu, tmp, &core_pmus, list) {
25 		list_del(&pmu->list);
26 
27 		perf_pmu__delete(pmu);
28 	}
29 	list_for_each_entry_safe(pmu, tmp, &other_pmus, list) {
30 		list_del(&pmu->list);
31 
32 		perf_pmu__delete(pmu);
33 	}
34 	read_sysfs_core_pmus = false;
35 	read_sysfs_all_pmus = false;
36 }
37 
38 static struct perf_pmu *pmu_find(const char *name)
39 {
40 	struct perf_pmu *pmu;
41 
42 	list_for_each_entry(pmu, &core_pmus, list) {
43 		if (!strcmp(pmu->name, name) ||
44 		    (pmu->alias_name && !strcmp(pmu->alias_name, name)))
45 			return pmu;
46 	}
47 	list_for_each_entry(pmu, &other_pmus, list) {
48 		if (!strcmp(pmu->name, name) ||
49 		    (pmu->alias_name && !strcmp(pmu->alias_name, name)))
50 			return pmu;
51 	}
52 
53 	return NULL;
54 }
55 
56 struct perf_pmu *perf_pmus__find(const char *name)
57 {
58 	struct perf_pmu *pmu;
59 	int dirfd;
60 	bool core_pmu;
61 
62 	/*
63 	 * Once PMU is loaded it stays in the list,
64 	 * so we keep us from multiple reading/parsing
65 	 * the pmu format definitions.
66 	 */
67 	pmu = pmu_find(name);
68 	if (pmu)
69 		return pmu;
70 
71 	if (read_sysfs_all_pmus)
72 		return NULL;
73 
74 	core_pmu = is_pmu_core(name);
75 	if (core_pmu && read_sysfs_core_pmus)
76 		return NULL;
77 
78 	dirfd = perf_pmu__event_source_devices_fd();
79 	pmu = perf_pmu__lookup(core_pmu ? &core_pmus : &other_pmus, dirfd, name);
80 	close(dirfd);
81 
82 	return pmu;
83 }
84 
85 static struct perf_pmu *perf_pmu__find2(int dirfd, const char *name)
86 {
87 	struct perf_pmu *pmu;
88 	bool core_pmu;
89 
90 	/*
91 	 * Once PMU is loaded it stays in the list,
92 	 * so we keep us from multiple reading/parsing
93 	 * the pmu format definitions.
94 	 */
95 	pmu = pmu_find(name);
96 	if (pmu)
97 		return pmu;
98 
99 	if (read_sysfs_all_pmus)
100 		return NULL;
101 
102 	core_pmu = is_pmu_core(name);
103 	if (core_pmu && read_sysfs_core_pmus)
104 		return NULL;
105 
106 	return perf_pmu__lookup(core_pmu ? &core_pmus : &other_pmus, dirfd, name);
107 }
108 
109 /* Add all pmus in sysfs to pmu list: */
110 static void pmu_read_sysfs(bool core_only)
111 {
112 	int fd;
113 	DIR *dir;
114 	struct dirent *dent;
115 
116 	if (read_sysfs_all_pmus || (core_only && read_sysfs_core_pmus))
117 		return;
118 
119 	fd = perf_pmu__event_source_devices_fd();
120 	if (fd < 0)
121 		return;
122 
123 	dir = fdopendir(fd);
124 	if (!dir)
125 		return;
126 
127 	while ((dent = readdir(dir))) {
128 		if (!strcmp(dent->d_name, ".") || !strcmp(dent->d_name, ".."))
129 			continue;
130 		if (core_only && !is_pmu_core(dent->d_name))
131 			continue;
132 		/* add to static LIST_HEAD(core_pmus) or LIST_HEAD(other_pmus): */
133 		perf_pmu__find2(fd, dent->d_name);
134 	}
135 
136 	closedir(dir);
137 	if (core_only) {
138 		read_sysfs_core_pmus = true;
139 	} else {
140 		read_sysfs_core_pmus = true;
141 		read_sysfs_all_pmus = true;
142 	}
143 }
144 
145 struct perf_pmu *perf_pmus__find_by_type(unsigned int type)
146 {
147 	struct perf_pmu *pmu;
148 
149 	list_for_each_entry(pmu, &core_pmus, list) {
150 		if (pmu->type == type)
151 			return pmu;
152 	}
153 	list_for_each_entry(pmu, &other_pmus, list) {
154 		if (pmu->type == type)
155 			return pmu;
156 	}
157 	return NULL;
158 }
159 
160 /*
161  * pmu iterator: If pmu is NULL, we start at the begin, otherwise return the
162  * next pmu. Returns NULL on end.
163  */
164 struct perf_pmu *perf_pmus__scan(struct perf_pmu *pmu)
165 {
166 	bool use_core_pmus = !pmu || pmu->is_core;
167 
168 	if (!pmu) {
169 		pmu_read_sysfs(/*core_only=*/false);
170 		pmu = list_prepare_entry(pmu, &core_pmus, list);
171 	}
172 	if (use_core_pmus) {
173 		list_for_each_entry_continue(pmu, &core_pmus, list)
174 			return pmu;
175 
176 		pmu = NULL;
177 		pmu = list_prepare_entry(pmu, &other_pmus, list);
178 	}
179 	list_for_each_entry_continue(pmu, &other_pmus, list)
180 		return pmu;
181 	return NULL;
182 }
183 
184 struct perf_pmu *perf_pmus__scan_core(struct perf_pmu *pmu)
185 {
186 	if (!pmu) {
187 		pmu_read_sysfs(/*core_only=*/true);
188 		pmu = list_prepare_entry(pmu, &core_pmus, list);
189 	}
190 	list_for_each_entry_continue(pmu, &core_pmus, list)
191 		return pmu;
192 
193 	return NULL;
194 }
195 
196 const struct perf_pmu *perf_pmus__pmu_for_pmu_filter(const char *str)
197 {
198 	struct perf_pmu *pmu = NULL;
199 
200 	while ((pmu = perf_pmus__scan(pmu)) != NULL) {
201 		if (!strcmp(pmu->name, str))
202 			return pmu;
203 		/* Ignore "uncore_" prefix. */
204 		if (!strncmp(pmu->name, "uncore_", 7)) {
205 			if (!strcmp(pmu->name + 7, str))
206 				return pmu;
207 		}
208 		/* Ignore "cpu_" prefix on Intel hybrid PMUs. */
209 		if (!strncmp(pmu->name, "cpu_", 4)) {
210 			if (!strcmp(pmu->name + 4, str))
211 				return pmu;
212 		}
213 	}
214 	return NULL;
215 }
216 
217 int perf_pmus__num_mem_pmus(void)
218 {
219 	struct perf_pmu *pmu = NULL;
220 	int count = 0;
221 
222 	/* All core PMUs are for mem events. */
223 	while ((pmu = perf_pmus__scan_core(pmu)) != NULL)
224 		count++;
225 
226 	return count;
227 }
228 
229 /** Struct for ordering events as output in perf list. */
230 struct sevent {
231 	/** PMU for event. */
232 	const struct perf_pmu *pmu;
233 	/**
234 	 * Optional event for name, desc, etc. If not present then this is a
235 	 * selectable PMU and the event name is shown as "//".
236 	 */
237 	const struct perf_pmu_alias *event;
238 	/** Is the PMU for the CPU? */
239 	bool is_cpu;
240 };
241 
242 static int cmp_sevent(const void *a, const void *b)
243 {
244 	const struct sevent *as = a;
245 	const struct sevent *bs = b;
246 	const char *a_pmu_name = NULL, *b_pmu_name = NULL;
247 	const char *a_name = "//", *a_desc = NULL, *a_topic = "";
248 	const char *b_name = "//", *b_desc = NULL, *b_topic = "";
249 	int ret;
250 
251 	if (as->event) {
252 		a_name = as->event->name;
253 		a_desc = as->event->desc;
254 		a_topic = as->event->topic ?: "";
255 		a_pmu_name = as->event->pmu_name;
256 	}
257 	if (bs->event) {
258 		b_name = bs->event->name;
259 		b_desc = bs->event->desc;
260 		b_topic = bs->event->topic ?: "";
261 		b_pmu_name = bs->event->pmu_name;
262 	}
263 	/* Put extra events last. */
264 	if (!!a_desc != !!b_desc)
265 		return !!a_desc - !!b_desc;
266 
267 	/* Order by topics. */
268 	ret = strcmp(a_topic, b_topic);
269 	if (ret)
270 		return ret;
271 
272 	/* Order CPU core events to be first */
273 	if (as->is_cpu != bs->is_cpu)
274 		return as->is_cpu ? -1 : 1;
275 
276 	/* Order by PMU name. */
277 	if (as->pmu != bs->pmu) {
278 		a_pmu_name = a_pmu_name ?: (as->pmu->name ?: "");
279 		b_pmu_name = b_pmu_name ?: (bs->pmu->name ?: "");
280 		ret = strcmp(a_pmu_name, b_pmu_name);
281 		if (ret)
282 			return ret;
283 	}
284 
285 	/* Order by event name. */
286 	return strcmp(a_name, b_name);
287 }
288 
289 static bool pmu_alias_is_duplicate(struct sevent *alias_a,
290 				   struct sevent *alias_b)
291 {
292 	const char *a_pmu_name = NULL, *b_pmu_name = NULL;
293 	const char *a_name = "//", *b_name = "//";
294 
295 
296 	if (alias_a->event) {
297 		a_name = alias_a->event->name;
298 		a_pmu_name = alias_a->event->pmu_name;
299 	}
300 	if (alias_b->event) {
301 		b_name = alias_b->event->name;
302 		b_pmu_name = alias_b->event->pmu_name;
303 	}
304 
305 	/* Different names -> never duplicates */
306 	if (strcmp(a_name, b_name))
307 		return false;
308 
309 	/* Don't remove duplicates for different PMUs */
310 	a_pmu_name = a_pmu_name ?: (alias_a->pmu->name ?: "");
311 	b_pmu_name = b_pmu_name ?: (alias_b->pmu->name ?: "");
312 	return strcmp(a_pmu_name, b_pmu_name) == 0;
313 }
314 
315 static int sub_non_neg(int a, int b)
316 {
317 	if (b > a)
318 		return 0;
319 	return a - b;
320 }
321 
322 static char *format_alias(char *buf, int len, const struct perf_pmu *pmu,
323 			  const struct perf_pmu_alias *alias)
324 {
325 	struct parse_events_term *term;
326 	int used = snprintf(buf, len, "%s/%s", pmu->name, alias->name);
327 
328 	list_for_each_entry(term, &alias->terms, list) {
329 		if (term->type_val == PARSE_EVENTS__TERM_TYPE_STR)
330 			used += snprintf(buf + used, sub_non_neg(len, used),
331 					",%s=%s", term->config,
332 					term->val.str);
333 	}
334 
335 	if (sub_non_neg(len, used) > 0) {
336 		buf[used] = '/';
337 		used++;
338 	}
339 	if (sub_non_neg(len, used) > 0) {
340 		buf[used] = '\0';
341 		used++;
342 	} else
343 		buf[len - 1] = '\0';
344 
345 	return buf;
346 }
347 
348 void perf_pmus__print_pmu_events(const struct print_callbacks *print_cb, void *print_state)
349 {
350 	struct perf_pmu *pmu;
351 	struct perf_pmu_alias *event;
352 	char buf[1024];
353 	int printed = 0;
354 	int len, j;
355 	struct sevent *aliases;
356 
357 	pmu = NULL;
358 	len = 0;
359 	while ((pmu = perf_pmus__scan(pmu)) != NULL) {
360 		list_for_each_entry(event, &pmu->aliases, list)
361 			len++;
362 		if (pmu->selectable)
363 			len++;
364 	}
365 	aliases = zalloc(sizeof(struct sevent) * len);
366 	if (!aliases) {
367 		pr_err("FATAL: not enough memory to print PMU events\n");
368 		return;
369 	}
370 	pmu = NULL;
371 	j = 0;
372 	while ((pmu = perf_pmus__scan(pmu)) != NULL) {
373 		bool is_cpu = pmu->is_core;
374 
375 		list_for_each_entry(event, &pmu->aliases, list) {
376 			aliases[j].event = event;
377 			aliases[j].pmu = pmu;
378 			aliases[j].is_cpu = is_cpu;
379 			j++;
380 		}
381 		if (pmu->selectable) {
382 			aliases[j].event = NULL;
383 			aliases[j].pmu = pmu;
384 			aliases[j].is_cpu = is_cpu;
385 			j++;
386 		}
387 	}
388 	len = j;
389 	qsort(aliases, len, sizeof(struct sevent), cmp_sevent);
390 	for (j = 0; j < len; j++) {
391 		const char *name, *alias = NULL, *scale_unit = NULL,
392 			*desc = NULL, *long_desc = NULL,
393 			*encoding_desc = NULL, *topic = NULL,
394 			*pmu_name = NULL;
395 		bool deprecated = false;
396 		size_t buf_used;
397 
398 		/* Skip duplicates */
399 		if (j > 0 && pmu_alias_is_duplicate(&aliases[j], &aliases[j - 1]))
400 			continue;
401 
402 		if (!aliases[j].event) {
403 			/* A selectable event. */
404 			pmu_name = aliases[j].pmu->name;
405 			buf_used = snprintf(buf, sizeof(buf), "%s//", pmu_name) + 1;
406 			name = buf;
407 		} else {
408 			if (aliases[j].event->desc) {
409 				name = aliases[j].event->name;
410 				buf_used = 0;
411 			} else {
412 				name = format_alias(buf, sizeof(buf), aliases[j].pmu,
413 						    aliases[j].event);
414 				if (aliases[j].is_cpu) {
415 					alias = name;
416 					name = aliases[j].event->name;
417 				}
418 				buf_used = strlen(buf) + 1;
419 			}
420 			pmu_name = aliases[j].event->pmu_name ?: (aliases[j].pmu->name ?: "");
421 			if (strlen(aliases[j].event->unit) || aliases[j].event->scale != 1.0) {
422 				scale_unit = buf + buf_used;
423 				buf_used += snprintf(buf + buf_used, sizeof(buf) - buf_used,
424 						"%G%s", aliases[j].event->scale,
425 						aliases[j].event->unit) + 1;
426 			}
427 			desc = aliases[j].event->desc;
428 			long_desc = aliases[j].event->long_desc;
429 			topic = aliases[j].event->topic;
430 			encoding_desc = buf + buf_used;
431 			buf_used += snprintf(buf + buf_used, sizeof(buf) - buf_used,
432 					"%s/%s/", pmu_name, aliases[j].event->str) + 1;
433 			deprecated = aliases[j].event->deprecated;
434 		}
435 		print_cb->print_event(print_state,
436 				pmu_name,
437 				topic,
438 				name,
439 				alias,
440 				scale_unit,
441 				deprecated,
442 				"Kernel PMU event",
443 				desc,
444 				long_desc,
445 				encoding_desc);
446 	}
447 	if (printed && pager_in_use())
448 		printf("\n");
449 
450 	zfree(&aliases);
451 }
452 
453 bool perf_pmus__have_event(const char *pname, const char *name)
454 {
455 	struct perf_pmu *pmu = perf_pmus__find(pname);
456 
457 	return pmu && perf_pmu__have_event(pmu, name);
458 }
459 
460 bool perf_pmus__has_hybrid(void)
461 {
462 	static bool hybrid_scanned, has_hybrid;
463 
464 	if (!hybrid_scanned) {
465 		struct perf_pmu *pmu = NULL;
466 
467 		while ((pmu = perf_pmus__scan_core(pmu)) != NULL) {
468 			if (is_pmu_hybrid(pmu->name)) {
469 				has_hybrid = true;
470 				break;
471 			}
472 		}
473 		hybrid_scanned = true;
474 	}
475 	return has_hybrid;
476 }
477 
478 struct perf_pmu *evsel__find_pmu(const struct evsel *evsel)
479 {
480 	struct perf_pmu *pmu = evsel->pmu;
481 
482 	if (!pmu) {
483 		pmu = perf_pmus__find_by_type(evsel->core.attr.type);
484 		((struct evsel *)evsel)->pmu = pmu;
485 	}
486 	return pmu;
487 }
488