xref: /openbmc/linux/arch/x86/events/intel/cstate.c (revision e5c86679)
1 /*
2  * Support cstate residency counters
3  *
4  * Copyright (C) 2015, Intel Corp.
5  * Author: Kan Liang (kan.liang@intel.com)
6  *
7  * This library is free software; you can redistribute it and/or
8  * modify it under the terms of the GNU Library General Public
9  * License as published by the Free Software Foundation; either
10  * version 2 of the License, or (at your option) any later version.
11  *
12  * This library is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
15  * Library General Public License for more details.
16  *
17  */
18 
19 /*
20  * This file export cstate related free running (read-only) counters
21  * for perf. These counters may be use simultaneously by other tools,
22  * such as turbostat. However, it still make sense to implement them
23  * in perf. Because we can conveniently collect them together with
24  * other events, and allow to use them from tools without special MSR
25  * access code.
26  *
27  * The events only support system-wide mode counting. There is no
28  * sampling support because it is not supported by the hardware.
29  *
30  * According to counters' scope and category, two PMUs are registered
31  * with the perf_event core subsystem.
32  *  - 'cstate_core': The counter is available for each physical core.
33  *    The counters include CORE_C*_RESIDENCY.
34  *  - 'cstate_pkg': The counter is available for each physical package.
35  *    The counters include PKG_C*_RESIDENCY.
36  *
37  * All of these counters are specified in the Intel® 64 and IA-32
38  * Architectures Software Developer.s Manual Vol3b.
39  *
40  * Model specific counters:
41  *	MSR_CORE_C1_RES: CORE C1 Residency Counter
42  *			 perf code: 0x00
43  *			 Available model: SLM,AMT
44  *			 Scope: Core (each processor core has a MSR)
45  *	MSR_CORE_C3_RESIDENCY: CORE C3 Residency Counter
46  *			       perf code: 0x01
47  *			       Available model: NHM,WSM,SNB,IVB,HSW,BDW,SKL
48  *			       Scope: Core
49  *	MSR_CORE_C6_RESIDENCY: CORE C6 Residency Counter
50  *			       perf code: 0x02
51  *			       Available model: SLM,AMT,NHM,WSM,SNB,IVB,HSW,BDW
52  *						SKL,KNL
53  *			       Scope: Core
54  *	MSR_CORE_C7_RESIDENCY: CORE C7 Residency Counter
55  *			       perf code: 0x03
56  *			       Available model: SNB,IVB,HSW,BDW,SKL
57  *			       Scope: Core
58  *	MSR_PKG_C2_RESIDENCY:  Package C2 Residency Counter.
59  *			       perf code: 0x00
60  *			       Available model: SNB,IVB,HSW,BDW,SKL,KNL
61  *			       Scope: Package (physical package)
62  *	MSR_PKG_C3_RESIDENCY:  Package C3 Residency Counter.
63  *			       perf code: 0x01
64  *			       Available model: NHM,WSM,SNB,IVB,HSW,BDW,SKL,KNL
65  *			       Scope: Package (physical package)
66  *	MSR_PKG_C6_RESIDENCY:  Package C6 Residency Counter.
67  *			       perf code: 0x02
68  *			       Available model: SLM,AMT,NHM,WSM,SNB,IVB,HSW,BDW
69  *						SKL,KNL
70  *			       Scope: Package (physical package)
71  *	MSR_PKG_C7_RESIDENCY:  Package C7 Residency Counter.
72  *			       perf code: 0x03
73  *			       Available model: NHM,WSM,SNB,IVB,HSW,BDW,SKL
74  *			       Scope: Package (physical package)
75  *	MSR_PKG_C8_RESIDENCY:  Package C8 Residency Counter.
76  *			       perf code: 0x04
77  *			       Available model: HSW ULT only
78  *			       Scope: Package (physical package)
79  *	MSR_PKG_C9_RESIDENCY:  Package C9 Residency Counter.
80  *			       perf code: 0x05
81  *			       Available model: HSW ULT only
82  *			       Scope: Package (physical package)
83  *	MSR_PKG_C10_RESIDENCY: Package C10 Residency Counter.
84  *			       perf code: 0x06
85  *			       Available model: HSW ULT only
86  *			       Scope: Package (physical package)
87  *
88  */
89 
90 #include <linux/module.h>
91 #include <linux/slab.h>
92 #include <linux/perf_event.h>
93 #include <asm/cpu_device_id.h>
94 #include <asm/intel-family.h>
95 #include "../perf_event.h"
96 
97 MODULE_LICENSE("GPL");
98 
99 #define DEFINE_CSTATE_FORMAT_ATTR(_var, _name, _format)		\
100 static ssize_t __cstate_##_var##_show(struct kobject *kobj,	\
101 				struct kobj_attribute *attr,	\
102 				char *page)			\
103 {								\
104 	BUILD_BUG_ON(sizeof(_format) >= PAGE_SIZE);		\
105 	return sprintf(page, _format "\n");			\
106 }								\
107 static struct kobj_attribute format_attr_##_var =		\
108 	__ATTR(_name, 0444, __cstate_##_var##_show, NULL)
109 
110 static ssize_t cstate_get_attr_cpumask(struct device *dev,
111 				       struct device_attribute *attr,
112 				       char *buf);
113 
114 /* Model -> events mapping */
115 struct cstate_model {
116 	unsigned long		core_events;
117 	unsigned long		pkg_events;
118 	unsigned long		quirks;
119 };
120 
121 /* Quirk flags */
122 #define SLM_PKG_C6_USE_C7_MSR	(1UL << 0)
123 #define KNL_CORE_C6_MSR		(1UL << 1)
124 
125 struct perf_cstate_msr {
126 	u64	msr;
127 	struct	perf_pmu_events_attr *attr;
128 };
129 
130 
131 /* cstate_core PMU */
132 static struct pmu cstate_core_pmu;
133 static bool has_cstate_core;
134 
135 enum perf_cstate_core_events {
136 	PERF_CSTATE_CORE_C1_RES = 0,
137 	PERF_CSTATE_CORE_C3_RES,
138 	PERF_CSTATE_CORE_C6_RES,
139 	PERF_CSTATE_CORE_C7_RES,
140 
141 	PERF_CSTATE_CORE_EVENT_MAX,
142 };
143 
144 PMU_EVENT_ATTR_STRING(c1-residency, evattr_cstate_core_c1, "event=0x00");
145 PMU_EVENT_ATTR_STRING(c3-residency, evattr_cstate_core_c3, "event=0x01");
146 PMU_EVENT_ATTR_STRING(c6-residency, evattr_cstate_core_c6, "event=0x02");
147 PMU_EVENT_ATTR_STRING(c7-residency, evattr_cstate_core_c7, "event=0x03");
148 
149 static struct perf_cstate_msr core_msr[] = {
150 	[PERF_CSTATE_CORE_C1_RES] = { MSR_CORE_C1_RES,		&evattr_cstate_core_c1 },
151 	[PERF_CSTATE_CORE_C3_RES] = { MSR_CORE_C3_RESIDENCY,	&evattr_cstate_core_c3 },
152 	[PERF_CSTATE_CORE_C6_RES] = { MSR_CORE_C6_RESIDENCY,	&evattr_cstate_core_c6 },
153 	[PERF_CSTATE_CORE_C7_RES] = { MSR_CORE_C7_RESIDENCY,	&evattr_cstate_core_c7 },
154 };
155 
156 static struct attribute *core_events_attrs[PERF_CSTATE_CORE_EVENT_MAX + 1] = {
157 	NULL,
158 };
159 
160 static struct attribute_group core_events_attr_group = {
161 	.name = "events",
162 	.attrs = core_events_attrs,
163 };
164 
165 DEFINE_CSTATE_FORMAT_ATTR(core_event, event, "config:0-63");
166 static struct attribute *core_format_attrs[] = {
167 	&format_attr_core_event.attr,
168 	NULL,
169 };
170 
171 static struct attribute_group core_format_attr_group = {
172 	.name = "format",
173 	.attrs = core_format_attrs,
174 };
175 
176 static cpumask_t cstate_core_cpu_mask;
177 static DEVICE_ATTR(cpumask, S_IRUGO, cstate_get_attr_cpumask, NULL);
178 
179 static struct attribute *cstate_cpumask_attrs[] = {
180 	&dev_attr_cpumask.attr,
181 	NULL,
182 };
183 
184 static struct attribute_group cpumask_attr_group = {
185 	.attrs = cstate_cpumask_attrs,
186 };
187 
188 static const struct attribute_group *core_attr_groups[] = {
189 	&core_events_attr_group,
190 	&core_format_attr_group,
191 	&cpumask_attr_group,
192 	NULL,
193 };
194 
195 /* cstate_pkg PMU */
196 static struct pmu cstate_pkg_pmu;
197 static bool has_cstate_pkg;
198 
199 enum perf_cstate_pkg_events {
200 	PERF_CSTATE_PKG_C2_RES = 0,
201 	PERF_CSTATE_PKG_C3_RES,
202 	PERF_CSTATE_PKG_C6_RES,
203 	PERF_CSTATE_PKG_C7_RES,
204 	PERF_CSTATE_PKG_C8_RES,
205 	PERF_CSTATE_PKG_C9_RES,
206 	PERF_CSTATE_PKG_C10_RES,
207 
208 	PERF_CSTATE_PKG_EVENT_MAX,
209 };
210 
211 PMU_EVENT_ATTR_STRING(c2-residency, evattr_cstate_pkg_c2, "event=0x00");
212 PMU_EVENT_ATTR_STRING(c3-residency, evattr_cstate_pkg_c3, "event=0x01");
213 PMU_EVENT_ATTR_STRING(c6-residency, evattr_cstate_pkg_c6, "event=0x02");
214 PMU_EVENT_ATTR_STRING(c7-residency, evattr_cstate_pkg_c7, "event=0x03");
215 PMU_EVENT_ATTR_STRING(c8-residency, evattr_cstate_pkg_c8, "event=0x04");
216 PMU_EVENT_ATTR_STRING(c9-residency, evattr_cstate_pkg_c9, "event=0x05");
217 PMU_EVENT_ATTR_STRING(c10-residency, evattr_cstate_pkg_c10, "event=0x06");
218 
219 static struct perf_cstate_msr pkg_msr[] = {
220 	[PERF_CSTATE_PKG_C2_RES] = { MSR_PKG_C2_RESIDENCY,	&evattr_cstate_pkg_c2 },
221 	[PERF_CSTATE_PKG_C3_RES] = { MSR_PKG_C3_RESIDENCY,	&evattr_cstate_pkg_c3 },
222 	[PERF_CSTATE_PKG_C6_RES] = { MSR_PKG_C6_RESIDENCY,	&evattr_cstate_pkg_c6 },
223 	[PERF_CSTATE_PKG_C7_RES] = { MSR_PKG_C7_RESIDENCY,	&evattr_cstate_pkg_c7 },
224 	[PERF_CSTATE_PKG_C8_RES] = { MSR_PKG_C8_RESIDENCY,	&evattr_cstate_pkg_c8 },
225 	[PERF_CSTATE_PKG_C9_RES] = { MSR_PKG_C9_RESIDENCY,	&evattr_cstate_pkg_c9 },
226 	[PERF_CSTATE_PKG_C10_RES] = { MSR_PKG_C10_RESIDENCY,	&evattr_cstate_pkg_c10 },
227 };
228 
229 static struct attribute *pkg_events_attrs[PERF_CSTATE_PKG_EVENT_MAX + 1] = {
230 	NULL,
231 };
232 
233 static struct attribute_group pkg_events_attr_group = {
234 	.name = "events",
235 	.attrs = pkg_events_attrs,
236 };
237 
238 DEFINE_CSTATE_FORMAT_ATTR(pkg_event, event, "config:0-63");
239 static struct attribute *pkg_format_attrs[] = {
240 	&format_attr_pkg_event.attr,
241 	NULL,
242 };
243 static struct attribute_group pkg_format_attr_group = {
244 	.name = "format",
245 	.attrs = pkg_format_attrs,
246 };
247 
248 static cpumask_t cstate_pkg_cpu_mask;
249 
250 static const struct attribute_group *pkg_attr_groups[] = {
251 	&pkg_events_attr_group,
252 	&pkg_format_attr_group,
253 	&cpumask_attr_group,
254 	NULL,
255 };
256 
257 static ssize_t cstate_get_attr_cpumask(struct device *dev,
258 				       struct device_attribute *attr,
259 				       char *buf)
260 {
261 	struct pmu *pmu = dev_get_drvdata(dev);
262 
263 	if (pmu == &cstate_core_pmu)
264 		return cpumap_print_to_pagebuf(true, buf, &cstate_core_cpu_mask);
265 	else if (pmu == &cstate_pkg_pmu)
266 		return cpumap_print_to_pagebuf(true, buf, &cstate_pkg_cpu_mask);
267 	else
268 		return 0;
269 }
270 
271 static int cstate_pmu_event_init(struct perf_event *event)
272 {
273 	u64 cfg = event->attr.config;
274 	int cpu;
275 
276 	if (event->attr.type != event->pmu->type)
277 		return -ENOENT;
278 
279 	/* unsupported modes and filters */
280 	if (event->attr.exclude_user   ||
281 	    event->attr.exclude_kernel ||
282 	    event->attr.exclude_hv     ||
283 	    event->attr.exclude_idle   ||
284 	    event->attr.exclude_host   ||
285 	    event->attr.exclude_guest  ||
286 	    event->attr.sample_period) /* no sampling */
287 		return -EINVAL;
288 
289 	if (event->cpu < 0)
290 		return -EINVAL;
291 
292 	if (event->pmu == &cstate_core_pmu) {
293 		if (cfg >= PERF_CSTATE_CORE_EVENT_MAX)
294 			return -EINVAL;
295 		if (!core_msr[cfg].attr)
296 			return -EINVAL;
297 		event->hw.event_base = core_msr[cfg].msr;
298 		cpu = cpumask_any_and(&cstate_core_cpu_mask,
299 				      topology_sibling_cpumask(event->cpu));
300 	} else if (event->pmu == &cstate_pkg_pmu) {
301 		if (cfg >= PERF_CSTATE_PKG_EVENT_MAX)
302 			return -EINVAL;
303 		if (!pkg_msr[cfg].attr)
304 			return -EINVAL;
305 		event->hw.event_base = pkg_msr[cfg].msr;
306 		cpu = cpumask_any_and(&cstate_pkg_cpu_mask,
307 				      topology_core_cpumask(event->cpu));
308 	} else {
309 		return -ENOENT;
310 	}
311 
312 	if (cpu >= nr_cpu_ids)
313 		return -ENODEV;
314 
315 	event->cpu = cpu;
316 	event->hw.config = cfg;
317 	event->hw.idx = -1;
318 	return 0;
319 }
320 
321 static inline u64 cstate_pmu_read_counter(struct perf_event *event)
322 {
323 	u64 val;
324 
325 	rdmsrl(event->hw.event_base, val);
326 	return val;
327 }
328 
329 static void cstate_pmu_event_update(struct perf_event *event)
330 {
331 	struct hw_perf_event *hwc = &event->hw;
332 	u64 prev_raw_count, new_raw_count;
333 
334 again:
335 	prev_raw_count = local64_read(&hwc->prev_count);
336 	new_raw_count = cstate_pmu_read_counter(event);
337 
338 	if (local64_cmpxchg(&hwc->prev_count, prev_raw_count,
339 			    new_raw_count) != prev_raw_count)
340 		goto again;
341 
342 	local64_add(new_raw_count - prev_raw_count, &event->count);
343 }
344 
345 static void cstate_pmu_event_start(struct perf_event *event, int mode)
346 {
347 	local64_set(&event->hw.prev_count, cstate_pmu_read_counter(event));
348 }
349 
350 static void cstate_pmu_event_stop(struct perf_event *event, int mode)
351 {
352 	cstate_pmu_event_update(event);
353 }
354 
355 static void cstate_pmu_event_del(struct perf_event *event, int mode)
356 {
357 	cstate_pmu_event_stop(event, PERF_EF_UPDATE);
358 }
359 
360 static int cstate_pmu_event_add(struct perf_event *event, int mode)
361 {
362 	if (mode & PERF_EF_START)
363 		cstate_pmu_event_start(event, mode);
364 
365 	return 0;
366 }
367 
368 /*
369  * Check if exiting cpu is the designated reader. If so migrate the
370  * events when there is a valid target available
371  */
372 static int cstate_cpu_exit(unsigned int cpu)
373 {
374 	unsigned int target;
375 
376 	if (has_cstate_core &&
377 	    cpumask_test_and_clear_cpu(cpu, &cstate_core_cpu_mask)) {
378 
379 		target = cpumask_any_but(topology_sibling_cpumask(cpu), cpu);
380 		/* Migrate events if there is a valid target */
381 		if (target < nr_cpu_ids) {
382 			cpumask_set_cpu(target, &cstate_core_cpu_mask);
383 			perf_pmu_migrate_context(&cstate_core_pmu, cpu, target);
384 		}
385 	}
386 
387 	if (has_cstate_pkg &&
388 	    cpumask_test_and_clear_cpu(cpu, &cstate_pkg_cpu_mask)) {
389 
390 		target = cpumask_any_but(topology_core_cpumask(cpu), cpu);
391 		/* Migrate events if there is a valid target */
392 		if (target < nr_cpu_ids) {
393 			cpumask_set_cpu(target, &cstate_pkg_cpu_mask);
394 			perf_pmu_migrate_context(&cstate_pkg_pmu, cpu, target);
395 		}
396 	}
397 	return 0;
398 }
399 
400 static int cstate_cpu_init(unsigned int cpu)
401 {
402 	unsigned int target;
403 
404 	/*
405 	 * If this is the first online thread of that core, set it in
406 	 * the core cpu mask as the designated reader.
407 	 */
408 	target = cpumask_any_and(&cstate_core_cpu_mask,
409 				 topology_sibling_cpumask(cpu));
410 
411 	if (has_cstate_core && target >= nr_cpu_ids)
412 		cpumask_set_cpu(cpu, &cstate_core_cpu_mask);
413 
414 	/*
415 	 * If this is the first online thread of that package, set it
416 	 * in the package cpu mask as the designated reader.
417 	 */
418 	target = cpumask_any_and(&cstate_pkg_cpu_mask,
419 				 topology_core_cpumask(cpu));
420 	if (has_cstate_pkg && target >= nr_cpu_ids)
421 		cpumask_set_cpu(cpu, &cstate_pkg_cpu_mask);
422 
423 	return 0;
424 }
425 
426 static struct pmu cstate_core_pmu = {
427 	.attr_groups	= core_attr_groups,
428 	.name		= "cstate_core",
429 	.task_ctx_nr	= perf_invalid_context,
430 	.event_init	= cstate_pmu_event_init,
431 	.add		= cstate_pmu_event_add,
432 	.del		= cstate_pmu_event_del,
433 	.start		= cstate_pmu_event_start,
434 	.stop		= cstate_pmu_event_stop,
435 	.read		= cstate_pmu_event_update,
436 	.capabilities	= PERF_PMU_CAP_NO_INTERRUPT,
437 	.module		= THIS_MODULE,
438 };
439 
440 static struct pmu cstate_pkg_pmu = {
441 	.attr_groups	= pkg_attr_groups,
442 	.name		= "cstate_pkg",
443 	.task_ctx_nr	= perf_invalid_context,
444 	.event_init	= cstate_pmu_event_init,
445 	.add		= cstate_pmu_event_add,
446 	.del		= cstate_pmu_event_del,
447 	.start		= cstate_pmu_event_start,
448 	.stop		= cstate_pmu_event_stop,
449 	.read		= cstate_pmu_event_update,
450 	.capabilities	= PERF_PMU_CAP_NO_INTERRUPT,
451 	.module		= THIS_MODULE,
452 };
453 
454 static const struct cstate_model nhm_cstates __initconst = {
455 	.core_events		= BIT(PERF_CSTATE_CORE_C3_RES) |
456 				  BIT(PERF_CSTATE_CORE_C6_RES),
457 
458 	.pkg_events		= BIT(PERF_CSTATE_PKG_C3_RES) |
459 				  BIT(PERF_CSTATE_PKG_C6_RES) |
460 				  BIT(PERF_CSTATE_PKG_C7_RES),
461 };
462 
463 static const struct cstate_model snb_cstates __initconst = {
464 	.core_events		= BIT(PERF_CSTATE_CORE_C3_RES) |
465 				  BIT(PERF_CSTATE_CORE_C6_RES) |
466 				  BIT(PERF_CSTATE_CORE_C7_RES),
467 
468 	.pkg_events		= BIT(PERF_CSTATE_PKG_C2_RES) |
469 				  BIT(PERF_CSTATE_PKG_C3_RES) |
470 				  BIT(PERF_CSTATE_PKG_C6_RES) |
471 				  BIT(PERF_CSTATE_PKG_C7_RES),
472 };
473 
474 static const struct cstate_model hswult_cstates __initconst = {
475 	.core_events		= BIT(PERF_CSTATE_CORE_C3_RES) |
476 				  BIT(PERF_CSTATE_CORE_C6_RES) |
477 				  BIT(PERF_CSTATE_CORE_C7_RES),
478 
479 	.pkg_events		= BIT(PERF_CSTATE_PKG_C2_RES) |
480 				  BIT(PERF_CSTATE_PKG_C3_RES) |
481 				  BIT(PERF_CSTATE_PKG_C6_RES) |
482 				  BIT(PERF_CSTATE_PKG_C7_RES) |
483 				  BIT(PERF_CSTATE_PKG_C8_RES) |
484 				  BIT(PERF_CSTATE_PKG_C9_RES) |
485 				  BIT(PERF_CSTATE_PKG_C10_RES),
486 };
487 
488 static const struct cstate_model slm_cstates __initconst = {
489 	.core_events		= BIT(PERF_CSTATE_CORE_C1_RES) |
490 				  BIT(PERF_CSTATE_CORE_C6_RES),
491 
492 	.pkg_events		= BIT(PERF_CSTATE_PKG_C6_RES),
493 	.quirks			= SLM_PKG_C6_USE_C7_MSR,
494 };
495 
496 
497 static const struct cstate_model knl_cstates __initconst = {
498 	.core_events		= BIT(PERF_CSTATE_CORE_C6_RES),
499 
500 	.pkg_events		= BIT(PERF_CSTATE_PKG_C2_RES) |
501 				  BIT(PERF_CSTATE_PKG_C3_RES) |
502 				  BIT(PERF_CSTATE_PKG_C6_RES),
503 	.quirks			= KNL_CORE_C6_MSR,
504 };
505 
506 
507 
508 #define X86_CSTATES_MODEL(model, states)				\
509 	{ X86_VENDOR_INTEL, 6, model, X86_FEATURE_ANY, (unsigned long) &(states) }
510 
511 static const struct x86_cpu_id intel_cstates_match[] __initconst = {
512 	X86_CSTATES_MODEL(INTEL_FAM6_NEHALEM,    nhm_cstates),
513 	X86_CSTATES_MODEL(INTEL_FAM6_NEHALEM_EP, nhm_cstates),
514 	X86_CSTATES_MODEL(INTEL_FAM6_NEHALEM_EX, nhm_cstates),
515 
516 	X86_CSTATES_MODEL(INTEL_FAM6_WESTMERE,    nhm_cstates),
517 	X86_CSTATES_MODEL(INTEL_FAM6_WESTMERE_EP, nhm_cstates),
518 	X86_CSTATES_MODEL(INTEL_FAM6_WESTMERE_EX, nhm_cstates),
519 
520 	X86_CSTATES_MODEL(INTEL_FAM6_SANDYBRIDGE,   snb_cstates),
521 	X86_CSTATES_MODEL(INTEL_FAM6_SANDYBRIDGE_X, snb_cstates),
522 
523 	X86_CSTATES_MODEL(INTEL_FAM6_IVYBRIDGE,   snb_cstates),
524 	X86_CSTATES_MODEL(INTEL_FAM6_IVYBRIDGE_X, snb_cstates),
525 
526 	X86_CSTATES_MODEL(INTEL_FAM6_HASWELL_CORE, snb_cstates),
527 	X86_CSTATES_MODEL(INTEL_FAM6_HASWELL_X,	   snb_cstates),
528 	X86_CSTATES_MODEL(INTEL_FAM6_HASWELL_GT3E, snb_cstates),
529 
530 	X86_CSTATES_MODEL(INTEL_FAM6_HASWELL_ULT, hswult_cstates),
531 
532 	X86_CSTATES_MODEL(INTEL_FAM6_ATOM_SILVERMONT1, slm_cstates),
533 	X86_CSTATES_MODEL(INTEL_FAM6_ATOM_SILVERMONT2, slm_cstates),
534 	X86_CSTATES_MODEL(INTEL_FAM6_ATOM_AIRMONT,     slm_cstates),
535 
536 	X86_CSTATES_MODEL(INTEL_FAM6_BROADWELL_CORE,   snb_cstates),
537 	X86_CSTATES_MODEL(INTEL_FAM6_BROADWELL_XEON_D, snb_cstates),
538 	X86_CSTATES_MODEL(INTEL_FAM6_BROADWELL_GT3E,   snb_cstates),
539 	X86_CSTATES_MODEL(INTEL_FAM6_BROADWELL_X,      snb_cstates),
540 
541 	X86_CSTATES_MODEL(INTEL_FAM6_SKYLAKE_MOBILE,  snb_cstates),
542 	X86_CSTATES_MODEL(INTEL_FAM6_SKYLAKE_DESKTOP, snb_cstates),
543 
544 	X86_CSTATES_MODEL(INTEL_FAM6_KABYLAKE_MOBILE,  snb_cstates),
545 	X86_CSTATES_MODEL(INTEL_FAM6_KABYLAKE_DESKTOP, snb_cstates),
546 
547 	X86_CSTATES_MODEL(INTEL_FAM6_XEON_PHI_KNL, knl_cstates),
548 	X86_CSTATES_MODEL(INTEL_FAM6_XEON_PHI_KNM, knl_cstates),
549 	{ },
550 };
551 MODULE_DEVICE_TABLE(x86cpu, intel_cstates_match);
552 
553 /*
554  * Probe the cstate events and insert the available one into sysfs attrs
555  * Return false if there are no available events.
556  */
557 static bool __init cstate_probe_msr(const unsigned long evmsk, int max,
558                                    struct perf_cstate_msr *msr,
559                                    struct attribute **attrs)
560 {
561 	bool found = false;
562 	unsigned int bit;
563 	u64 val;
564 
565 	for (bit = 0; bit < max; bit++) {
566 		if (test_bit(bit, &evmsk) && !rdmsrl_safe(msr[bit].msr, &val)) {
567 			*attrs++ = &msr[bit].attr->attr.attr;
568 			found = true;
569 		} else {
570 			msr[bit].attr = NULL;
571 		}
572 	}
573 	*attrs = NULL;
574 
575 	return found;
576 }
577 
578 static int __init cstate_probe(const struct cstate_model *cm)
579 {
580 	/* SLM has different MSR for PKG C6 */
581 	if (cm->quirks & SLM_PKG_C6_USE_C7_MSR)
582 		pkg_msr[PERF_CSTATE_PKG_C6_RES].msr = MSR_PKG_C7_RESIDENCY;
583 
584 	/* KNL has different MSR for CORE C6 */
585 	if (cm->quirks & KNL_CORE_C6_MSR)
586 		pkg_msr[PERF_CSTATE_CORE_C6_RES].msr = MSR_KNL_CORE_C6_RESIDENCY;
587 
588 
589 	has_cstate_core = cstate_probe_msr(cm->core_events,
590 					   PERF_CSTATE_CORE_EVENT_MAX,
591 					   core_msr, core_events_attrs);
592 
593 	has_cstate_pkg = cstate_probe_msr(cm->pkg_events,
594 					  PERF_CSTATE_PKG_EVENT_MAX,
595 					  pkg_msr, pkg_events_attrs);
596 
597 	return (has_cstate_core || has_cstate_pkg) ? 0 : -ENODEV;
598 }
599 
600 static inline void cstate_cleanup(void)
601 {
602 	cpuhp_remove_state_nocalls(CPUHP_AP_PERF_X86_CSTATE_ONLINE);
603 	cpuhp_remove_state_nocalls(CPUHP_AP_PERF_X86_CSTATE_STARTING);
604 
605 	if (has_cstate_core)
606 		perf_pmu_unregister(&cstate_core_pmu);
607 
608 	if (has_cstate_pkg)
609 		perf_pmu_unregister(&cstate_pkg_pmu);
610 }
611 
612 static int __init cstate_init(void)
613 {
614 	int err;
615 
616 	cpuhp_setup_state(CPUHP_AP_PERF_X86_CSTATE_STARTING,
617 			  "perf/x86/cstate:starting", cstate_cpu_init, NULL);
618 	cpuhp_setup_state(CPUHP_AP_PERF_X86_CSTATE_ONLINE,
619 			  "perf/x86/cstate:online", NULL, cstate_cpu_exit);
620 
621 	if (has_cstate_core) {
622 		err = perf_pmu_register(&cstate_core_pmu, cstate_core_pmu.name, -1);
623 		if (err) {
624 			has_cstate_core = false;
625 			pr_info("Failed to register cstate core pmu\n");
626 			cstate_cleanup();
627 			return err;
628 		}
629 	}
630 
631 	if (has_cstate_pkg) {
632 		err = perf_pmu_register(&cstate_pkg_pmu, cstate_pkg_pmu.name, -1);
633 		if (err) {
634 			has_cstate_pkg = false;
635 			pr_info("Failed to register cstate pkg pmu\n");
636 			cstate_cleanup();
637 			return err;
638 		}
639 	}
640 	return 0;
641 }
642 
643 static int __init cstate_pmu_init(void)
644 {
645 	const struct x86_cpu_id *id;
646 	int err;
647 
648 	if (boot_cpu_has(X86_FEATURE_HYPERVISOR))
649 		return -ENODEV;
650 
651 	id = x86_match_cpu(intel_cstates_match);
652 	if (!id)
653 		return -ENODEV;
654 
655 	err = cstate_probe((const struct cstate_model *) id->driver_data);
656 	if (err)
657 		return err;
658 
659 	return cstate_init();
660 }
661 module_init(cstate_pmu_init);
662 
663 static void __exit cstate_pmu_exit(void)
664 {
665 	cstate_cleanup();
666 }
667 module_exit(cstate_pmu_exit);
668