xref: /openbmc/linux/tools/perf/util/pmus.c (revision 9d6a1df9)
1 // SPDX-License-Identifier: GPL-2.0
2 #include <linux/list.h>
3 #include <linux/zalloc.h>
4 #include <subcmd/pager.h>
5 #include <sys/types.h>
6 #include <dirent.h>
7 #include <string.h>
8 #include <unistd.h>
9 #include "debug.h"
10 #include "evsel.h"
11 #include "pmus.h"
12 #include "pmu.h"
13 #include "print-events.h"
14 
15 static LIST_HEAD(core_pmus);
16 static LIST_HEAD(other_pmus);
17 
18 void perf_pmus__destroy(void)
19 {
20 	struct perf_pmu *pmu, *tmp;
21 
22 	list_for_each_entry_safe(pmu, tmp, &core_pmus, list) {
23 		list_del(&pmu->list);
24 
25 		perf_pmu__delete(pmu);
26 	}
27 	list_for_each_entry_safe(pmu, tmp, &other_pmus, list) {
28 		list_del(&pmu->list);
29 
30 		perf_pmu__delete(pmu);
31 	}
32 }
33 
34 static struct perf_pmu *pmu_find(const char *name)
35 {
36 	struct perf_pmu *pmu;
37 
38 	list_for_each_entry(pmu, &core_pmus, list) {
39 		if (!strcmp(pmu->name, name) ||
40 		    (pmu->alias_name && !strcmp(pmu->alias_name, name)))
41 			return pmu;
42 	}
43 	list_for_each_entry(pmu, &other_pmus, list) {
44 		if (!strcmp(pmu->name, name) ||
45 		    (pmu->alias_name && !strcmp(pmu->alias_name, name)))
46 			return pmu;
47 	}
48 
49 	return NULL;
50 }
51 
52 struct perf_pmu *perf_pmus__find(const char *name)
53 {
54 	struct perf_pmu *pmu;
55 	int dirfd;
56 
57 	/*
58 	 * Once PMU is loaded it stays in the list,
59 	 * so we keep us from multiple reading/parsing
60 	 * the pmu format definitions.
61 	 */
62 	pmu = pmu_find(name);
63 	if (pmu)
64 		return pmu;
65 
66 	dirfd = perf_pmu__event_source_devices_fd();
67 	pmu = perf_pmu__lookup(is_pmu_core(name) ? &core_pmus : &other_pmus, dirfd, name);
68 	close(dirfd);
69 
70 	return pmu;
71 }
72 
73 static struct perf_pmu *perf_pmu__find2(int dirfd, const char *name)
74 {
75 	struct perf_pmu *pmu;
76 
77 	/*
78 	 * Once PMU is loaded it stays in the list,
79 	 * so we keep us from multiple reading/parsing
80 	 * the pmu format definitions.
81 	 */
82 	pmu = pmu_find(name);
83 	if (pmu)
84 		return pmu;
85 
86 	return perf_pmu__lookup(is_pmu_core(name) ? &core_pmus : &other_pmus, dirfd, name);
87 }
88 
89 /* Add all pmus in sysfs to pmu list: */
90 static void pmu_read_sysfs(bool core_only)
91 {
92 	int fd;
93 	DIR *dir;
94 	struct dirent *dent;
95 
96 	fd = perf_pmu__event_source_devices_fd();
97 	if (fd < 0)
98 		return;
99 
100 	dir = fdopendir(fd);
101 	if (!dir)
102 		return;
103 
104 	while ((dent = readdir(dir))) {
105 		if (!strcmp(dent->d_name, ".") || !strcmp(dent->d_name, ".."))
106 			continue;
107 		if (core_only && !is_pmu_core(dent->d_name))
108 			continue;
109 		/* add to static LIST_HEAD(core_pmus) or LIST_HEAD(other_pmus): */
110 		perf_pmu__find2(fd, dent->d_name);
111 	}
112 
113 	closedir(dir);
114 }
115 
116 struct perf_pmu *perf_pmus__find_by_type(unsigned int type)
117 {
118 	struct perf_pmu *pmu;
119 
120 	list_for_each_entry(pmu, &core_pmus, list) {
121 		if (pmu->type == type)
122 			return pmu;
123 	}
124 	list_for_each_entry(pmu, &other_pmus, list) {
125 		if (pmu->type == type)
126 			return pmu;
127 	}
128 	return NULL;
129 }
130 
131 /*
132  * pmu iterator: If pmu is NULL, we start at the begin, otherwise return the
133  * next pmu. Returns NULL on end.
134  */
135 struct perf_pmu *perf_pmus__scan(struct perf_pmu *pmu)
136 {
137 	bool use_core_pmus = !pmu || pmu->is_core;
138 
139 	if (!pmu) {
140 		pmu_read_sysfs(/*core_only=*/false);
141 		pmu = list_prepare_entry(pmu, &core_pmus, list);
142 	}
143 	if (use_core_pmus) {
144 		list_for_each_entry_continue(pmu, &core_pmus, list)
145 			return pmu;
146 
147 		pmu = NULL;
148 		pmu = list_prepare_entry(pmu, &other_pmus, list);
149 	}
150 	list_for_each_entry_continue(pmu, &other_pmus, list)
151 		return pmu;
152 	return NULL;
153 }
154 
155 struct perf_pmu *perf_pmus__scan_core(struct perf_pmu *pmu)
156 {
157 	if (!pmu) {
158 		pmu_read_sysfs(/*core_only=*/true);
159 		pmu = list_prepare_entry(pmu, &core_pmus, list);
160 	}
161 	list_for_each_entry_continue(pmu, &core_pmus, list)
162 		return pmu;
163 
164 	return NULL;
165 }
166 
167 const struct perf_pmu *perf_pmus__pmu_for_pmu_filter(const char *str)
168 {
169 	struct perf_pmu *pmu = NULL;
170 
171 	while ((pmu = perf_pmus__scan(pmu)) != NULL) {
172 		if (!strcmp(pmu->name, str))
173 			return pmu;
174 		/* Ignore "uncore_" prefix. */
175 		if (!strncmp(pmu->name, "uncore_", 7)) {
176 			if (!strcmp(pmu->name + 7, str))
177 				return pmu;
178 		}
179 		/* Ignore "cpu_" prefix on Intel hybrid PMUs. */
180 		if (!strncmp(pmu->name, "cpu_", 4)) {
181 			if (!strcmp(pmu->name + 4, str))
182 				return pmu;
183 		}
184 	}
185 	return NULL;
186 }
187 
188 int perf_pmus__num_mem_pmus(void)
189 {
190 	struct perf_pmu *pmu = NULL;
191 	int count = 0;
192 
193 	/* All core PMUs are for mem events. */
194 	while ((pmu = perf_pmus__scan_core(pmu)) != NULL)
195 		count++;
196 
197 	return count;
198 }
199 
200 /** Struct for ordering events as output in perf list. */
201 struct sevent {
202 	/** PMU for event. */
203 	const struct perf_pmu *pmu;
204 	/**
205 	 * Optional event for name, desc, etc. If not present then this is a
206 	 * selectable PMU and the event name is shown as "//".
207 	 */
208 	const struct perf_pmu_alias *event;
209 	/** Is the PMU for the CPU? */
210 	bool is_cpu;
211 };
212 
213 static int cmp_sevent(const void *a, const void *b)
214 {
215 	const struct sevent *as = a;
216 	const struct sevent *bs = b;
217 	const char *a_pmu_name = NULL, *b_pmu_name = NULL;
218 	const char *a_name = "//", *a_desc = NULL, *a_topic = "";
219 	const char *b_name = "//", *b_desc = NULL, *b_topic = "";
220 	int ret;
221 
222 	if (as->event) {
223 		a_name = as->event->name;
224 		a_desc = as->event->desc;
225 		a_topic = as->event->topic ?: "";
226 		a_pmu_name = as->event->pmu_name;
227 	}
228 	if (bs->event) {
229 		b_name = bs->event->name;
230 		b_desc = bs->event->desc;
231 		b_topic = bs->event->topic ?: "";
232 		b_pmu_name = bs->event->pmu_name;
233 	}
234 	/* Put extra events last. */
235 	if (!!a_desc != !!b_desc)
236 		return !!a_desc - !!b_desc;
237 
238 	/* Order by topics. */
239 	ret = strcmp(a_topic, b_topic);
240 	if (ret)
241 		return ret;
242 
243 	/* Order CPU core events to be first */
244 	if (as->is_cpu != bs->is_cpu)
245 		return as->is_cpu ? -1 : 1;
246 
247 	/* Order by PMU name. */
248 	if (as->pmu != bs->pmu) {
249 		a_pmu_name = a_pmu_name ?: (as->pmu->name ?: "");
250 		b_pmu_name = b_pmu_name ?: (bs->pmu->name ?: "");
251 		ret = strcmp(a_pmu_name, b_pmu_name);
252 		if (ret)
253 			return ret;
254 	}
255 
256 	/* Order by event name. */
257 	return strcmp(a_name, b_name);
258 }
259 
260 static bool pmu_alias_is_duplicate(struct sevent *alias_a,
261 				   struct sevent *alias_b)
262 {
263 	const char *a_pmu_name = NULL, *b_pmu_name = NULL;
264 	const char *a_name = "//", *b_name = "//";
265 
266 
267 	if (alias_a->event) {
268 		a_name = alias_a->event->name;
269 		a_pmu_name = alias_a->event->pmu_name;
270 	}
271 	if (alias_b->event) {
272 		b_name = alias_b->event->name;
273 		b_pmu_name = alias_b->event->pmu_name;
274 	}
275 
276 	/* Different names -> never duplicates */
277 	if (strcmp(a_name, b_name))
278 		return false;
279 
280 	/* Don't remove duplicates for different PMUs */
281 	a_pmu_name = a_pmu_name ?: (alias_a->pmu->name ?: "");
282 	b_pmu_name = b_pmu_name ?: (alias_b->pmu->name ?: "");
283 	return strcmp(a_pmu_name, b_pmu_name) == 0;
284 }
285 
286 static int sub_non_neg(int a, int b)
287 {
288 	if (b > a)
289 		return 0;
290 	return a - b;
291 }
292 
293 static char *format_alias(char *buf, int len, const struct perf_pmu *pmu,
294 			  const struct perf_pmu_alias *alias)
295 {
296 	struct parse_events_term *term;
297 	int used = snprintf(buf, len, "%s/%s", pmu->name, alias->name);
298 
299 	list_for_each_entry(term, &alias->terms, list) {
300 		if (term->type_val == PARSE_EVENTS__TERM_TYPE_STR)
301 			used += snprintf(buf + used, sub_non_neg(len, used),
302 					",%s=%s", term->config,
303 					term->val.str);
304 	}
305 
306 	if (sub_non_neg(len, used) > 0) {
307 		buf[used] = '/';
308 		used++;
309 	}
310 	if (sub_non_neg(len, used) > 0) {
311 		buf[used] = '\0';
312 		used++;
313 	} else
314 		buf[len - 1] = '\0';
315 
316 	return buf;
317 }
318 
319 void perf_pmus__print_pmu_events(const struct print_callbacks *print_cb, void *print_state)
320 {
321 	struct perf_pmu *pmu;
322 	struct perf_pmu_alias *event;
323 	char buf[1024];
324 	int printed = 0;
325 	int len, j;
326 	struct sevent *aliases;
327 
328 	pmu = NULL;
329 	len = 0;
330 	while ((pmu = perf_pmus__scan(pmu)) != NULL) {
331 		list_for_each_entry(event, &pmu->aliases, list)
332 			len++;
333 		if (pmu->selectable)
334 			len++;
335 	}
336 	aliases = zalloc(sizeof(struct sevent) * len);
337 	if (!aliases) {
338 		pr_err("FATAL: not enough memory to print PMU events\n");
339 		return;
340 	}
341 	pmu = NULL;
342 	j = 0;
343 	while ((pmu = perf_pmus__scan(pmu)) != NULL) {
344 		bool is_cpu = pmu->is_core;
345 
346 		list_for_each_entry(event, &pmu->aliases, list) {
347 			aliases[j].event = event;
348 			aliases[j].pmu = pmu;
349 			aliases[j].is_cpu = is_cpu;
350 			j++;
351 		}
352 		if (pmu->selectable) {
353 			aliases[j].event = NULL;
354 			aliases[j].pmu = pmu;
355 			aliases[j].is_cpu = is_cpu;
356 			j++;
357 		}
358 	}
359 	len = j;
360 	qsort(aliases, len, sizeof(struct sevent), cmp_sevent);
361 	for (j = 0; j < len; j++) {
362 		const char *name, *alias = NULL, *scale_unit = NULL,
363 			*desc = NULL, *long_desc = NULL,
364 			*encoding_desc = NULL, *topic = NULL,
365 			*pmu_name = NULL;
366 		bool deprecated = false;
367 		size_t buf_used;
368 
369 		/* Skip duplicates */
370 		if (j > 0 && pmu_alias_is_duplicate(&aliases[j], &aliases[j - 1]))
371 			continue;
372 
373 		if (!aliases[j].event) {
374 			/* A selectable event. */
375 			pmu_name = aliases[j].pmu->name;
376 			buf_used = snprintf(buf, sizeof(buf), "%s//", pmu_name) + 1;
377 			name = buf;
378 		} else {
379 			if (aliases[j].event->desc) {
380 				name = aliases[j].event->name;
381 				buf_used = 0;
382 			} else {
383 				name = format_alias(buf, sizeof(buf), aliases[j].pmu,
384 						    aliases[j].event);
385 				if (aliases[j].is_cpu) {
386 					alias = name;
387 					name = aliases[j].event->name;
388 				}
389 				buf_used = strlen(buf) + 1;
390 			}
391 			pmu_name = aliases[j].event->pmu_name ?: (aliases[j].pmu->name ?: "");
392 			if (strlen(aliases[j].event->unit) || aliases[j].event->scale != 1.0) {
393 				scale_unit = buf + buf_used;
394 				buf_used += snprintf(buf + buf_used, sizeof(buf) - buf_used,
395 						"%G%s", aliases[j].event->scale,
396 						aliases[j].event->unit) + 1;
397 			}
398 			desc = aliases[j].event->desc;
399 			long_desc = aliases[j].event->long_desc;
400 			topic = aliases[j].event->topic;
401 			encoding_desc = buf + buf_used;
402 			buf_used += snprintf(buf + buf_used, sizeof(buf) - buf_used,
403 					"%s/%s/", pmu_name, aliases[j].event->str) + 1;
404 			deprecated = aliases[j].event->deprecated;
405 		}
406 		print_cb->print_event(print_state,
407 				pmu_name,
408 				topic,
409 				name,
410 				alias,
411 				scale_unit,
412 				deprecated,
413 				"Kernel PMU event",
414 				desc,
415 				long_desc,
416 				encoding_desc);
417 	}
418 	if (printed && pager_in_use())
419 		printf("\n");
420 
421 	zfree(&aliases);
422 }
423 
424 bool perf_pmus__have_event(const char *pname, const char *name)
425 {
426 	struct perf_pmu *pmu = perf_pmus__find(pname);
427 
428 	return pmu && perf_pmu__have_event(pmu, name);
429 }
430 
431 bool perf_pmus__has_hybrid(void)
432 {
433 	static bool hybrid_scanned, has_hybrid;
434 
435 	if (!hybrid_scanned) {
436 		struct perf_pmu *pmu = NULL;
437 
438 		while ((pmu = perf_pmus__scan_core(pmu)) != NULL) {
439 			if (is_pmu_hybrid(pmu->name)) {
440 				has_hybrid = true;
441 				break;
442 			}
443 		}
444 		hybrid_scanned = true;
445 	}
446 	return has_hybrid;
447 }
448 
449 struct perf_pmu *evsel__find_pmu(const struct evsel *evsel)
450 {
451 	struct perf_pmu *pmu = evsel->pmu;
452 
453 	if (!pmu) {
454 		pmu = perf_pmus__find_by_type(evsel->core.attr.type);
455 		((struct evsel *)evsel)->pmu = pmu;
456 	}
457 	return pmu;
458 }
459