xref: /openbmc/linux/tools/perf/arch/x86/util/evsel.c (revision 9221b289)
1 // SPDX-License-Identifier: GPL-2.0
2 #include <stdio.h>
3 #include <stdlib.h>
4 #include "util/evsel.h"
5 #include "util/env.h"
6 #include "util/pmu.h"
7 #include "linux/string.h"
8 #include "evsel.h"
9 #include "util/debug.h"
10 
11 #define IBS_FETCH_L3MISSONLY   (1ULL << 59)
12 #define IBS_OP_L3MISSONLY      (1ULL << 16)
13 
14 void arch_evsel__set_sample_weight(struct evsel *evsel)
15 {
16 	evsel__set_sample_bit(evsel, WEIGHT_STRUCT);
17 }
18 
19 void arch_evsel__fixup_new_cycles(struct perf_event_attr *attr)
20 {
21 	struct perf_env env = { .total_mem = 0, } ;
22 
23 	if (!perf_env__cpuid(&env))
24 		return;
25 
26 	/*
27 	 * On AMD, precise cycles event sampling internally uses IBS pmu.
28 	 * But IBS does not have filtering capabilities and perf by default
29 	 * sets exclude_guest = 1. This makes IBS pmu event init fail and
30 	 * thus perf ends up doing non-precise sampling. Avoid it by clearing
31 	 * exclude_guest.
32 	 */
33 	if (env.cpuid && strstarts(env.cpuid, "AuthenticAMD"))
34 		attr->exclude_guest = 0;
35 
36 	free(env.cpuid);
37 }
38 
39 /* Check whether the evsel's PMU supports the perf metrics */
40 bool evsel__sys_has_perf_metrics(const struct evsel *evsel)
41 {
42 	const char *pmu_name = evsel->pmu_name ? evsel->pmu_name : "cpu";
43 
44 	/*
45 	 * The PERF_TYPE_RAW type is the core PMU type, e.g., "cpu" PMU
46 	 * on a non-hybrid machine, "cpu_core" PMU on a hybrid machine.
47 	 * The slots event is only available for the core PMU, which
48 	 * supports the perf metrics feature.
49 	 * Checking both the PERF_TYPE_RAW type and the slots event
50 	 * should be good enough to detect the perf metrics feature.
51 	 */
52 	if ((evsel->core.attr.type == PERF_TYPE_RAW) &&
53 	    pmu_have_event(pmu_name, "slots"))
54 		return true;
55 
56 	return false;
57 }
58 
59 bool arch_evsel__must_be_in_group(const struct evsel *evsel)
60 {
61 	if (!evsel__sys_has_perf_metrics(evsel))
62 		return false;
63 
64 	return evsel->name &&
65 		(strcasestr(evsel->name, "slots") ||
66 		 strcasestr(evsel->name, "topdown"));
67 }
68 
69 int arch_evsel__hw_name(struct evsel *evsel, char *bf, size_t size)
70 {
71 	u64 event = evsel->core.attr.config & PERF_HW_EVENT_MASK;
72 	u64 pmu = evsel->core.attr.config >> PERF_PMU_TYPE_SHIFT;
73 	const char *event_name;
74 
75 	if (event < PERF_COUNT_HW_MAX && evsel__hw_names[event])
76 		event_name = evsel__hw_names[event];
77 	else
78 		event_name = "unknown-hardware";
79 
80 	/* The PMU type is not required for the non-hybrid platform. */
81 	if (!pmu)
82 		return  scnprintf(bf, size, "%s", event_name);
83 
84 	return scnprintf(bf, size, "%s/%s/",
85 			 evsel->pmu_name ? evsel->pmu_name : "cpu",
86 			 event_name);
87 }
88 
89 static void ibs_l3miss_warn(void)
90 {
91 	pr_warning(
92 "WARNING: Hw internally resets sampling period when L3 Miss Filtering is enabled\n"
93 "and tagged operation does not cause L3 Miss. This causes sampling period skew.\n");
94 }
95 
96 void arch__post_evsel_config(struct evsel *evsel, struct perf_event_attr *attr)
97 {
98 	struct perf_pmu *evsel_pmu, *ibs_fetch_pmu, *ibs_op_pmu;
99 	static int warned_once;
100 	/* 0: Uninitialized, 1: Yes, -1: No */
101 	static int is_amd;
102 
103 	if (warned_once || is_amd == -1)
104 		return;
105 
106 	if (!is_amd) {
107 		struct perf_env *env = evsel__env(evsel);
108 
109 		if (!perf_env__cpuid(env) || !env->cpuid ||
110 		    !strstarts(env->cpuid, "AuthenticAMD")) {
111 			is_amd = -1;
112 			return;
113 		}
114 		is_amd = 1;
115 	}
116 
117 	evsel_pmu = evsel__find_pmu(evsel);
118 	if (!evsel_pmu)
119 		return;
120 
121 	ibs_fetch_pmu = perf_pmu__find("ibs_fetch");
122 	ibs_op_pmu = perf_pmu__find("ibs_op");
123 
124 	if (ibs_fetch_pmu && ibs_fetch_pmu->type == evsel_pmu->type) {
125 		if (attr->config & IBS_FETCH_L3MISSONLY) {
126 			ibs_l3miss_warn();
127 			warned_once = 1;
128 		}
129 	} else if (ibs_op_pmu && ibs_op_pmu->type == evsel_pmu->type) {
130 		if (attr->config & IBS_OP_L3MISSONLY) {
131 			ibs_l3miss_warn();
132 			warned_once = 1;
133 		}
134 	}
135 }
136