xref: /openbmc/linux/tools/perf/arch/x86/util/topdown.c (revision 6726d552)
1 // SPDX-License-Identifier: GPL-2.0
2 #include <stdio.h>
3 #include "api/fs/fs.h"
4 #include "util/pmu.h"
5 #include "util/topdown.h"
6 #include "util/evlist.h"
7 #include "util/debug.h"
8 #include "util/pmu-hybrid.h"
9 #include "topdown.h"
10 #include "evsel.h"
11 
12 #define TOPDOWN_L1_EVENTS       "{slots,topdown-retiring,topdown-bad-spec,topdown-fe-bound,topdown-be-bound}"
13 #define TOPDOWN_L1_EVENTS_CORE  "{slots,cpu_core/topdown-retiring/,cpu_core/topdown-bad-spec/,cpu_core/topdown-fe-bound/,cpu_core/topdown-be-bound/}"
14 #define TOPDOWN_L2_EVENTS       "{slots,topdown-retiring,topdown-bad-spec,topdown-fe-bound,topdown-be-bound,topdown-heavy-ops,topdown-br-mispredict,topdown-fetch-lat,topdown-mem-bound}"
15 #define TOPDOWN_L2_EVENTS_CORE  "{slots,cpu_core/topdown-retiring/,cpu_core/topdown-bad-spec/,cpu_core/topdown-fe-bound/,cpu_core/topdown-be-bound/,cpu_core/topdown-heavy-ops/,cpu_core/topdown-br-mispredict/,cpu_core/topdown-fetch-lat/,cpu_core/topdown-mem-bound/}"
16 
17 /* Check whether there is a PMU which supports the perf metrics. */
18 bool topdown_sys_has_perf_metrics(void)
19 {
20 	static bool has_perf_metrics;
21 	static bool cached;
22 	struct perf_pmu *pmu;
23 
24 	if (cached)
25 		return has_perf_metrics;
26 
27 	/*
28 	 * The perf metrics feature is a core PMU feature.
29 	 * The PERF_TYPE_RAW type is the type of a core PMU.
30 	 * The slots event is only available when the core PMU
31 	 * supports the perf metrics feature.
32 	 */
33 	pmu = perf_pmu__find_by_type(PERF_TYPE_RAW);
34 	if (pmu && pmu_have_event(pmu->name, "slots"))
35 		has_perf_metrics = true;
36 
37 	cached = true;
38 	return has_perf_metrics;
39 }
40 
41 /*
42  * Check whether we can use a group for top down.
43  * Without a group may get bad results due to multiplexing.
44  */
45 bool arch_topdown_check_group(bool *warn)
46 {
47 	int n;
48 
49 	if (sysctl__read_int("kernel/nmi_watchdog", &n) < 0)
50 		return false;
51 	if (n > 0) {
52 		*warn = true;
53 		return false;
54 	}
55 	return true;
56 }
57 
58 void arch_topdown_group_warn(void)
59 {
60 	fprintf(stderr,
61 		"nmi_watchdog enabled with topdown. May give wrong results.\n"
62 		"Disable with echo 0 > /proc/sys/kernel/nmi_watchdog\n");
63 }
64 
65 #define TOPDOWN_SLOTS		0x0400
66 
67 /*
68  * Check whether a topdown group supports sample-read.
69  *
70  * Only Topdown metric supports sample-read. The slots
71  * event must be the leader of the topdown group.
72  */
73 
74 bool arch_topdown_sample_read(struct evsel *leader)
75 {
76 	if (!evsel__sys_has_perf_metrics(leader))
77 		return false;
78 
79 	if (leader->core.attr.config == TOPDOWN_SLOTS)
80 		return true;
81 
82 	return false;
83 }
84 
85 const char *arch_get_topdown_pmu_name(struct evlist *evlist, bool warn)
86 {
87 	const char *pmu_name;
88 
89 	if (!perf_pmu__has_hybrid())
90 		return "cpu";
91 
92 	if (!evlist->hybrid_pmu_name) {
93 		if (warn)
94 			pr_warning("WARNING: default to use cpu_core topdown events\n");
95 		evlist->hybrid_pmu_name = perf_pmu__hybrid_type_to_pmu("core");
96 	}
97 
98 	pmu_name = evlist->hybrid_pmu_name;
99 
100 	return pmu_name;
101 }
102 
103 int topdown_parse_events(struct evlist *evlist)
104 {
105 	const char *topdown_events;
106 	const char *pmu_name;
107 
108 	if (!topdown_sys_has_perf_metrics())
109 		return 0;
110 
111 	pmu_name = arch_get_topdown_pmu_name(evlist, false);
112 
113 	if (pmu_have_event(pmu_name, "topdown-heavy-ops")) {
114 		if (!strcmp(pmu_name, "cpu_core"))
115 			topdown_events = TOPDOWN_L2_EVENTS_CORE;
116 		else
117 			topdown_events = TOPDOWN_L2_EVENTS;
118 	} else {
119 		if (!strcmp(pmu_name, "cpu_core"))
120 			topdown_events = TOPDOWN_L1_EVENTS_CORE;
121 		else
122 			topdown_events = TOPDOWN_L1_EVENTS;
123 	}
124 
125 	return parse_event(evlist, topdown_events);
126 }
127