xref: /openbmc/linux/arch/x86/events/msr.c (revision 7587eb18)
1 #include <linux/perf_event.h>
2 
3 enum perf_msr_id {
4 	PERF_MSR_TSC			= 0,
5 	PERF_MSR_APERF			= 1,
6 	PERF_MSR_MPERF			= 2,
7 	PERF_MSR_PPERF			= 3,
8 	PERF_MSR_SMI			= 4,
9 	PERF_MSR_PTSC			= 5,
10 	PERF_MSR_IRPERF			= 6,
11 
12 	PERF_MSR_EVENT_MAX,
13 };
14 
15 static bool test_aperfmperf(int idx)
16 {
17 	return boot_cpu_has(X86_FEATURE_APERFMPERF);
18 }
19 
20 static bool test_ptsc(int idx)
21 {
22 	return boot_cpu_has(X86_FEATURE_PTSC);
23 }
24 
25 static bool test_irperf(int idx)
26 {
27 	return boot_cpu_has(X86_FEATURE_IRPERF);
28 }
29 
30 static bool test_intel(int idx)
31 {
32 	if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL ||
33 	    boot_cpu_data.x86 != 6)
34 		return false;
35 
36 	switch (boot_cpu_data.x86_model) {
37 	case 30: /* 45nm Nehalem    */
38 	case 26: /* 45nm Nehalem-EP */
39 	case 46: /* 45nm Nehalem-EX */
40 
41 	case 37: /* 32nm Westmere    */
42 	case 44: /* 32nm Westmere-EP */
43 	case 47: /* 32nm Westmere-EX */
44 
45 	case 42: /* 32nm SandyBridge         */
46 	case 45: /* 32nm SandyBridge-E/EN/EP */
47 
48 	case 58: /* 22nm IvyBridge       */
49 	case 62: /* 22nm IvyBridge-EP/EX */
50 
51 	case 60: /* 22nm Haswell Core */
52 	case 63: /* 22nm Haswell Server */
53 	case 69: /* 22nm Haswell ULT */
54 	case 70: /* 22nm Haswell + GT3e (Intel Iris Pro graphics) */
55 
56 	case 61: /* 14nm Broadwell Core-M */
57 	case 86: /* 14nm Broadwell Xeon D */
58 	case 71: /* 14nm Broadwell + GT3e (Intel Iris Pro graphics) */
59 	case 79: /* 14nm Broadwell Server */
60 
61 	case 55: /* 22nm Atom "Silvermont"                */
62 	case 77: /* 22nm Atom "Silvermont Avoton/Rangely" */
63 	case 76: /* 14nm Atom "Airmont"                   */
64 		if (idx == PERF_MSR_SMI)
65 			return true;
66 		break;
67 
68 	case 78: /* 14nm Skylake Mobile */
69 	case 94: /* 14nm Skylake Desktop */
70 		if (idx == PERF_MSR_SMI || idx == PERF_MSR_PPERF)
71 			return true;
72 		break;
73 	}
74 
75 	return false;
76 }
77 
78 struct perf_msr {
79 	u64	msr;
80 	struct	perf_pmu_events_attr *attr;
81 	bool	(*test)(int idx);
82 };
83 
84 PMU_EVENT_ATTR_STRING(tsc,    evattr_tsc,    "event=0x00");
85 PMU_EVENT_ATTR_STRING(aperf,  evattr_aperf,  "event=0x01");
86 PMU_EVENT_ATTR_STRING(mperf,  evattr_mperf,  "event=0x02");
87 PMU_EVENT_ATTR_STRING(pperf,  evattr_pperf,  "event=0x03");
88 PMU_EVENT_ATTR_STRING(smi,    evattr_smi,    "event=0x04");
89 PMU_EVENT_ATTR_STRING(ptsc,   evattr_ptsc,   "event=0x05");
90 PMU_EVENT_ATTR_STRING(irperf, evattr_irperf, "event=0x06");
91 
92 static struct perf_msr msr[] = {
93 	[PERF_MSR_TSC]    = { 0,		&evattr_tsc,	NULL,		 },
94 	[PERF_MSR_APERF]  = { MSR_IA32_APERF,	&evattr_aperf,	test_aperfmperf, },
95 	[PERF_MSR_MPERF]  = { MSR_IA32_MPERF,	&evattr_mperf,	test_aperfmperf, },
96 	[PERF_MSR_PPERF]  = { MSR_PPERF,	&evattr_pperf,	test_intel,	 },
97 	[PERF_MSR_SMI]    = { MSR_SMI_COUNT,	&evattr_smi,	test_intel,	 },
98 	[PERF_MSR_PTSC]   = { MSR_F15H_PTSC,	&evattr_ptsc,	test_ptsc,	 },
99 	[PERF_MSR_IRPERF] = { MSR_F17H_IRPERF,	&evattr_irperf,	test_irperf,	 },
100 };
101 
102 static struct attribute *events_attrs[PERF_MSR_EVENT_MAX + 1] = {
103 	NULL,
104 };
105 
106 static struct attribute_group events_attr_group = {
107 	.name = "events",
108 	.attrs = events_attrs,
109 };
110 
111 PMU_FORMAT_ATTR(event, "config:0-63");
112 static struct attribute *format_attrs[] = {
113 	&format_attr_event.attr,
114 	NULL,
115 };
116 static struct attribute_group format_attr_group = {
117 	.name = "format",
118 	.attrs = format_attrs,
119 };
120 
121 static const struct attribute_group *attr_groups[] = {
122 	&events_attr_group,
123 	&format_attr_group,
124 	NULL,
125 };
126 
127 static int msr_event_init(struct perf_event *event)
128 {
129 	u64 cfg = event->attr.config;
130 
131 	if (event->attr.type != event->pmu->type)
132 		return -ENOENT;
133 
134 	if (cfg >= PERF_MSR_EVENT_MAX)
135 		return -EINVAL;
136 
137 	/* unsupported modes and filters */
138 	if (event->attr.exclude_user   ||
139 	    event->attr.exclude_kernel ||
140 	    event->attr.exclude_hv     ||
141 	    event->attr.exclude_idle   ||
142 	    event->attr.exclude_host   ||
143 	    event->attr.exclude_guest  ||
144 	    event->attr.sample_period) /* no sampling */
145 		return -EINVAL;
146 
147 	if (!msr[cfg].attr)
148 		return -EINVAL;
149 
150 	event->hw.idx = -1;
151 	event->hw.event_base = msr[cfg].msr;
152 	event->hw.config = cfg;
153 
154 	return 0;
155 }
156 
157 static inline u64 msr_read_counter(struct perf_event *event)
158 {
159 	u64 now;
160 
161 	if (event->hw.event_base)
162 		rdmsrl(event->hw.event_base, now);
163 	else
164 		rdtscll(now);
165 
166 	return now;
167 }
168 static void msr_event_update(struct perf_event *event)
169 {
170 	u64 prev, now;
171 	s64 delta;
172 
173 	/* Careful, an NMI might modify the previous event value. */
174 again:
175 	prev = local64_read(&event->hw.prev_count);
176 	now = msr_read_counter(event);
177 
178 	if (local64_cmpxchg(&event->hw.prev_count, prev, now) != prev)
179 		goto again;
180 
181 	delta = now - prev;
182 	if (unlikely(event->hw.event_base == MSR_SMI_COUNT))
183 		delta = sign_extend64(delta, 31);
184 
185 	local64_add(delta, &event->count);
186 }
187 
188 static void msr_event_start(struct perf_event *event, int flags)
189 {
190 	u64 now;
191 
192 	now = msr_read_counter(event);
193 	local64_set(&event->hw.prev_count, now);
194 }
195 
196 static void msr_event_stop(struct perf_event *event, int flags)
197 {
198 	msr_event_update(event);
199 }
200 
201 static void msr_event_del(struct perf_event *event, int flags)
202 {
203 	msr_event_stop(event, PERF_EF_UPDATE);
204 }
205 
206 static int msr_event_add(struct perf_event *event, int flags)
207 {
208 	if (flags & PERF_EF_START)
209 		msr_event_start(event, flags);
210 
211 	return 0;
212 }
213 
214 static struct pmu pmu_msr = {
215 	.task_ctx_nr	= perf_sw_context,
216 	.attr_groups	= attr_groups,
217 	.event_init	= msr_event_init,
218 	.add		= msr_event_add,
219 	.del		= msr_event_del,
220 	.start		= msr_event_start,
221 	.stop		= msr_event_stop,
222 	.read		= msr_event_update,
223 	.capabilities	= PERF_PMU_CAP_NO_INTERRUPT,
224 };
225 
226 static int __init msr_init(void)
227 {
228 	int i, j = 0;
229 
230 	if (!boot_cpu_has(X86_FEATURE_TSC)) {
231 		pr_cont("no MSR PMU driver.\n");
232 		return 0;
233 	}
234 
235 	/* Probe the MSRs. */
236 	for (i = PERF_MSR_TSC + 1; i < PERF_MSR_EVENT_MAX; i++) {
237 		u64 val;
238 
239 		/*
240 		 * Virt sucks arse; you cannot tell if a R/O MSR is present :/
241 		 */
242 		if (!msr[i].test(i) || rdmsrl_safe(msr[i].msr, &val))
243 			msr[i].attr = NULL;
244 	}
245 
246 	/* List remaining MSRs in the sysfs attrs. */
247 	for (i = 0; i < PERF_MSR_EVENT_MAX; i++) {
248 		if (msr[i].attr)
249 			events_attrs[j++] = &msr[i].attr->attr.attr;
250 	}
251 	events_attrs[j] = NULL;
252 
253 	perf_pmu_register(&pmu_msr, "msr", -1);
254 
255 	return 0;
256 }
257 device_initcall(msr_init);
258