xref: /openbmc/linux/arch/x86/events/intel/cstate.c (revision 2ff40250)
16aec1ad7SBorislav Petkov /*
2940b2f2fSBorislav Petkov  * Support cstate residency counters
36aec1ad7SBorislav Petkov  *
46aec1ad7SBorislav Petkov  * Copyright (C) 2015, Intel Corp.
56aec1ad7SBorislav Petkov  * Author: Kan Liang (kan.liang@intel.com)
66aec1ad7SBorislav Petkov  *
76aec1ad7SBorislav Petkov  * This library is free software; you can redistribute it and/or
86aec1ad7SBorislav Petkov  * modify it under the terms of the GNU Library General Public
96aec1ad7SBorislav Petkov  * License as published by the Free Software Foundation; either
106aec1ad7SBorislav Petkov  * version 2 of the License, or (at your option) any later version.
116aec1ad7SBorislav Petkov  *
126aec1ad7SBorislav Petkov  * This library is distributed in the hope that it will be useful,
136aec1ad7SBorislav Petkov  * but WITHOUT ANY WARRANTY; without even the implied warranty of
146aec1ad7SBorislav Petkov  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
156aec1ad7SBorislav Petkov  * Library General Public License for more details.
166aec1ad7SBorislav Petkov  *
176aec1ad7SBorislav Petkov  */
186aec1ad7SBorislav Petkov 
196aec1ad7SBorislav Petkov /*
206aec1ad7SBorislav Petkov  * This file export cstate related free running (read-only) counters
216aec1ad7SBorislav Petkov  * for perf. These counters may be use simultaneously by other tools,
226aec1ad7SBorislav Petkov  * such as turbostat. However, it still make sense to implement them
236aec1ad7SBorislav Petkov  * in perf. Because we can conveniently collect them together with
246aec1ad7SBorislav Petkov  * other events, and allow to use them from tools without special MSR
256aec1ad7SBorislav Petkov  * access code.
266aec1ad7SBorislav Petkov  *
276aec1ad7SBorislav Petkov  * The events only support system-wide mode counting. There is no
286aec1ad7SBorislav Petkov  * sampling support because it is not supported by the hardware.
296aec1ad7SBorislav Petkov  *
306aec1ad7SBorislav Petkov  * According to counters' scope and category, two PMUs are registered
316aec1ad7SBorislav Petkov  * with the perf_event core subsystem.
326aec1ad7SBorislav Petkov  *  - 'cstate_core': The counter is available for each physical core.
336aec1ad7SBorislav Petkov  *    The counters include CORE_C*_RESIDENCY.
346aec1ad7SBorislav Petkov  *  - 'cstate_pkg': The counter is available for each physical package.
356aec1ad7SBorislav Petkov  *    The counters include PKG_C*_RESIDENCY.
366aec1ad7SBorislav Petkov  *
376aec1ad7SBorislav Petkov  * All of these counters are specified in the Intel® 64 and IA-32
386aec1ad7SBorislav Petkov  * Architectures Software Developer.s Manual Vol3b.
396aec1ad7SBorislav Petkov  *
406aec1ad7SBorislav Petkov  * Model specific counters:
416aec1ad7SBorislav Petkov  *	MSR_CORE_C1_RES: CORE C1 Residency Counter
426aec1ad7SBorislav Petkov  *			 perf code: 0x00
431159e094SHarry Pan  *			 Available model: SLM,AMT,GLM,CNL
446aec1ad7SBorislav Petkov  *			 Scope: Core (each processor core has a MSR)
456aec1ad7SBorislav Petkov  *	MSR_CORE_C3_RESIDENCY: CORE C3 Residency Counter
466aec1ad7SBorislav Petkov  *			       perf code: 0x01
471159e094SHarry Pan  *			       Available model: NHM,WSM,SNB,IVB,HSW,BDW,SKL,GLM,
481159e094SHarry Pan 						CNL
496aec1ad7SBorislav Petkov  *			       Scope: Core
506aec1ad7SBorislav Petkov  *	MSR_CORE_C6_RESIDENCY: CORE C6 Residency Counter
516aec1ad7SBorislav Petkov  *			       perf code: 0x02
521159e094SHarry Pan  *			       Available model: SLM,AMT,NHM,WSM,SNB,IVB,HSW,BDW,
531159e094SHarry Pan  *						SKL,KNL,GLM,CNL
546aec1ad7SBorislav Petkov  *			       Scope: Core
556aec1ad7SBorislav Petkov  *	MSR_CORE_C7_RESIDENCY: CORE C7 Residency Counter
566aec1ad7SBorislav Petkov  *			       perf code: 0x03
571159e094SHarry Pan  *			       Available model: SNB,IVB,HSW,BDW,SKL,CNL
586aec1ad7SBorislav Petkov  *			       Scope: Core
596aec1ad7SBorislav Petkov  *	MSR_PKG_C2_RESIDENCY:  Package C2 Residency Counter.
606aec1ad7SBorislav Petkov  *			       perf code: 0x00
611159e094SHarry Pan  *			       Available model: SNB,IVB,HSW,BDW,SKL,KNL,GLM,CNL
626aec1ad7SBorislav Petkov  *			       Scope: Package (physical package)
636aec1ad7SBorislav Petkov  *	MSR_PKG_C3_RESIDENCY:  Package C3 Residency Counter.
646aec1ad7SBorislav Petkov  *			       perf code: 0x01
651159e094SHarry Pan  *			       Available model: NHM,WSM,SNB,IVB,HSW,BDW,SKL,KNL,
661159e094SHarry Pan  *						GLM,CNL
676aec1ad7SBorislav Petkov  *			       Scope: Package (physical package)
686aec1ad7SBorislav Petkov  *	MSR_PKG_C6_RESIDENCY:  Package C6 Residency Counter.
696aec1ad7SBorislav Petkov  *			       perf code: 0x02
70889882bcSLukasz Odzioba  *			       Available model: SLM,AMT,NHM,WSM,SNB,IVB,HSW,BDW
711159e094SHarry Pan  *						SKL,KNL,GLM,CNL
726aec1ad7SBorislav Petkov  *			       Scope: Package (physical package)
736aec1ad7SBorislav Petkov  *	MSR_PKG_C7_RESIDENCY:  Package C7 Residency Counter.
746aec1ad7SBorislav Petkov  *			       perf code: 0x03
751159e094SHarry Pan  *			       Available model: NHM,WSM,SNB,IVB,HSW,BDW,SKL,CNL
766aec1ad7SBorislav Petkov  *			       Scope: Package (physical package)
776aec1ad7SBorislav Petkov  *	MSR_PKG_C8_RESIDENCY:  Package C8 Residency Counter.
786aec1ad7SBorislav Petkov  *			       perf code: 0x04
791159e094SHarry Pan  *			       Available model: HSW ULT,CNL
806aec1ad7SBorislav Petkov  *			       Scope: Package (physical package)
816aec1ad7SBorislav Petkov  *	MSR_PKG_C9_RESIDENCY:  Package C9 Residency Counter.
826aec1ad7SBorislav Petkov  *			       perf code: 0x05
831159e094SHarry Pan  *			       Available model: HSW ULT,CNL
846aec1ad7SBorislav Petkov  *			       Scope: Package (physical package)
856aec1ad7SBorislav Petkov  *	MSR_PKG_C10_RESIDENCY: Package C10 Residency Counter.
866aec1ad7SBorislav Petkov  *			       perf code: 0x06
871159e094SHarry Pan  *			       Available model: HSW ULT,GLM,CNL
886aec1ad7SBorislav Petkov  *			       Scope: Package (physical package)
896aec1ad7SBorislav Petkov  *
906aec1ad7SBorislav Petkov  */
916aec1ad7SBorislav Petkov 
926aec1ad7SBorislav Petkov #include <linux/module.h>
936aec1ad7SBorislav Petkov #include <linux/slab.h>
946aec1ad7SBorislav Petkov #include <linux/perf_event.h>
95a5f81290SPeter Zijlstra #include <linux/nospec.h>
966aec1ad7SBorislav Petkov #include <asm/cpu_device_id.h>
97bf4ad541SDave Hansen #include <asm/intel-family.h>
9827f6d22bSBorislav Petkov #include "../perf_event.h"
996aec1ad7SBorislav Petkov 
100c7afba32SThomas Gleixner MODULE_LICENSE("GPL");
101c7afba32SThomas Gleixner 
1026aec1ad7SBorislav Petkov #define DEFINE_CSTATE_FORMAT_ATTR(_var, _name, _format)		\
1036aec1ad7SBorislav Petkov static ssize_t __cstate_##_var##_show(struct kobject *kobj,	\
1046aec1ad7SBorislav Petkov 				struct kobj_attribute *attr,	\
1056aec1ad7SBorislav Petkov 				char *page)			\
1066aec1ad7SBorislav Petkov {								\
1076aec1ad7SBorislav Petkov 	BUILD_BUG_ON(sizeof(_format) >= PAGE_SIZE);		\
1086aec1ad7SBorislav Petkov 	return sprintf(page, _format "\n");			\
1096aec1ad7SBorislav Petkov }								\
1106aec1ad7SBorislav Petkov static struct kobj_attribute format_attr_##_var =		\
1116aec1ad7SBorislav Petkov 	__ATTR(_name, 0444, __cstate_##_var##_show, NULL)
1126aec1ad7SBorislav Petkov 
1136aec1ad7SBorislav Petkov static ssize_t cstate_get_attr_cpumask(struct device *dev,
1146aec1ad7SBorislav Petkov 				       struct device_attribute *attr,
1156aec1ad7SBorislav Petkov 				       char *buf);
1166aec1ad7SBorislav Petkov 
117424646eeSThomas Gleixner /* Model -> events mapping */
118424646eeSThomas Gleixner struct cstate_model {
119424646eeSThomas Gleixner 	unsigned long		core_events;
120424646eeSThomas Gleixner 	unsigned long		pkg_events;
121424646eeSThomas Gleixner 	unsigned long		quirks;
122424646eeSThomas Gleixner };
123424646eeSThomas Gleixner 
124424646eeSThomas Gleixner /* Quirk flags */
125424646eeSThomas Gleixner #define SLM_PKG_C6_USE_C7_MSR	(1UL << 0)
126889882bcSLukasz Odzioba #define KNL_CORE_C6_MSR		(1UL << 1)
127424646eeSThomas Gleixner 
1286aec1ad7SBorislav Petkov struct perf_cstate_msr {
1296aec1ad7SBorislav Petkov 	u64	msr;
1306aec1ad7SBorislav Petkov 	struct	perf_pmu_events_attr *attr;
1316aec1ad7SBorislav Petkov };
1326aec1ad7SBorislav Petkov 
1336aec1ad7SBorislav Petkov 
1346aec1ad7SBorislav Petkov /* cstate_core PMU */
1356aec1ad7SBorislav Petkov static struct pmu cstate_core_pmu;
1366aec1ad7SBorislav Petkov static bool has_cstate_core;
1376aec1ad7SBorislav Petkov 
138424646eeSThomas Gleixner enum perf_cstate_core_events {
1396aec1ad7SBorislav Petkov 	PERF_CSTATE_CORE_C1_RES = 0,
1406aec1ad7SBorislav Petkov 	PERF_CSTATE_CORE_C3_RES,
1416aec1ad7SBorislav Petkov 	PERF_CSTATE_CORE_C6_RES,
1426aec1ad7SBorislav Petkov 	PERF_CSTATE_CORE_C7_RES,
1436aec1ad7SBorislav Petkov 
1446aec1ad7SBorislav Petkov 	PERF_CSTATE_CORE_EVENT_MAX,
1456aec1ad7SBorislav Petkov };
1466aec1ad7SBorislav Petkov 
1476aec1ad7SBorislav Petkov PMU_EVENT_ATTR_STRING(c1-residency, evattr_cstate_core_c1, "event=0x00");
1486aec1ad7SBorislav Petkov PMU_EVENT_ATTR_STRING(c3-residency, evattr_cstate_core_c3, "event=0x01");
1496aec1ad7SBorislav Petkov PMU_EVENT_ATTR_STRING(c6-residency, evattr_cstate_core_c6, "event=0x02");
1506aec1ad7SBorislav Petkov PMU_EVENT_ATTR_STRING(c7-residency, evattr_cstate_core_c7, "event=0x03");
1516aec1ad7SBorislav Petkov 
1526aec1ad7SBorislav Petkov static struct perf_cstate_msr core_msr[] = {
153424646eeSThomas Gleixner 	[PERF_CSTATE_CORE_C1_RES] = { MSR_CORE_C1_RES,		&evattr_cstate_core_c1 },
154424646eeSThomas Gleixner 	[PERF_CSTATE_CORE_C3_RES] = { MSR_CORE_C3_RESIDENCY,	&evattr_cstate_core_c3 },
155424646eeSThomas Gleixner 	[PERF_CSTATE_CORE_C6_RES] = { MSR_CORE_C6_RESIDENCY,	&evattr_cstate_core_c6 },
156424646eeSThomas Gleixner 	[PERF_CSTATE_CORE_C7_RES] = { MSR_CORE_C7_RESIDENCY,	&evattr_cstate_core_c7 },
1576aec1ad7SBorislav Petkov };
1586aec1ad7SBorislav Petkov 
1596aec1ad7SBorislav Petkov static struct attribute *core_events_attrs[PERF_CSTATE_CORE_EVENT_MAX + 1] = {
1606aec1ad7SBorislav Petkov 	NULL,
1616aec1ad7SBorislav Petkov };
1626aec1ad7SBorislav Petkov 
1636aec1ad7SBorislav Petkov static struct attribute_group core_events_attr_group = {
1646aec1ad7SBorislav Petkov 	.name = "events",
1656aec1ad7SBorislav Petkov 	.attrs = core_events_attrs,
1666aec1ad7SBorislav Petkov };
1676aec1ad7SBorislav Petkov 
1686aec1ad7SBorislav Petkov DEFINE_CSTATE_FORMAT_ATTR(core_event, event, "config:0-63");
1696aec1ad7SBorislav Petkov static struct attribute *core_format_attrs[] = {
1706aec1ad7SBorislav Petkov 	&format_attr_core_event.attr,
1716aec1ad7SBorislav Petkov 	NULL,
1726aec1ad7SBorislav Petkov };
1736aec1ad7SBorislav Petkov 
1746aec1ad7SBorislav Petkov static struct attribute_group core_format_attr_group = {
1756aec1ad7SBorislav Petkov 	.name = "format",
1766aec1ad7SBorislav Petkov 	.attrs = core_format_attrs,
1776aec1ad7SBorislav Petkov };
1786aec1ad7SBorislav Petkov 
1796aec1ad7SBorislav Petkov static cpumask_t cstate_core_cpu_mask;
1806aec1ad7SBorislav Petkov static DEVICE_ATTR(cpumask, S_IRUGO, cstate_get_attr_cpumask, NULL);
1816aec1ad7SBorislav Petkov 
1826aec1ad7SBorislav Petkov static struct attribute *cstate_cpumask_attrs[] = {
1836aec1ad7SBorislav Petkov 	&dev_attr_cpumask.attr,
1846aec1ad7SBorislav Petkov 	NULL,
1856aec1ad7SBorislav Petkov };
1866aec1ad7SBorislav Petkov 
1876aec1ad7SBorislav Petkov static struct attribute_group cpumask_attr_group = {
1886aec1ad7SBorislav Petkov 	.attrs = cstate_cpumask_attrs,
1896aec1ad7SBorislav Petkov };
1906aec1ad7SBorislav Petkov 
1916aec1ad7SBorislav Petkov static const struct attribute_group *core_attr_groups[] = {
1926aec1ad7SBorislav Petkov 	&core_events_attr_group,
1936aec1ad7SBorislav Petkov 	&core_format_attr_group,
1946aec1ad7SBorislav Petkov 	&cpumask_attr_group,
1956aec1ad7SBorislav Petkov 	NULL,
1966aec1ad7SBorislav Petkov };
1976aec1ad7SBorislav Petkov 
1986aec1ad7SBorislav Petkov /* cstate_pkg PMU */
1996aec1ad7SBorislav Petkov static struct pmu cstate_pkg_pmu;
2006aec1ad7SBorislav Petkov static bool has_cstate_pkg;
2016aec1ad7SBorislav Petkov 
202424646eeSThomas Gleixner enum perf_cstate_pkg_events {
2036aec1ad7SBorislav Petkov 	PERF_CSTATE_PKG_C2_RES = 0,
2046aec1ad7SBorislav Petkov 	PERF_CSTATE_PKG_C3_RES,
2056aec1ad7SBorislav Petkov 	PERF_CSTATE_PKG_C6_RES,
2066aec1ad7SBorislav Petkov 	PERF_CSTATE_PKG_C7_RES,
2076aec1ad7SBorislav Petkov 	PERF_CSTATE_PKG_C8_RES,
2086aec1ad7SBorislav Petkov 	PERF_CSTATE_PKG_C9_RES,
2096aec1ad7SBorislav Petkov 	PERF_CSTATE_PKG_C10_RES,
2106aec1ad7SBorislav Petkov 
2116aec1ad7SBorislav Petkov 	PERF_CSTATE_PKG_EVENT_MAX,
2126aec1ad7SBorislav Petkov };
2136aec1ad7SBorislav Petkov 
2146aec1ad7SBorislav Petkov PMU_EVENT_ATTR_STRING(c2-residency, evattr_cstate_pkg_c2, "event=0x00");
2156aec1ad7SBorislav Petkov PMU_EVENT_ATTR_STRING(c3-residency, evattr_cstate_pkg_c3, "event=0x01");
2166aec1ad7SBorislav Petkov PMU_EVENT_ATTR_STRING(c6-residency, evattr_cstate_pkg_c6, "event=0x02");
2176aec1ad7SBorislav Petkov PMU_EVENT_ATTR_STRING(c7-residency, evattr_cstate_pkg_c7, "event=0x03");
2186aec1ad7SBorislav Petkov PMU_EVENT_ATTR_STRING(c8-residency, evattr_cstate_pkg_c8, "event=0x04");
2196aec1ad7SBorislav Petkov PMU_EVENT_ATTR_STRING(c9-residency, evattr_cstate_pkg_c9, "event=0x05");
2206aec1ad7SBorislav Petkov PMU_EVENT_ATTR_STRING(c10-residency, evattr_cstate_pkg_c10, "event=0x06");
2216aec1ad7SBorislav Petkov 
2226aec1ad7SBorislav Petkov static struct perf_cstate_msr pkg_msr[] = {
223424646eeSThomas Gleixner 	[PERF_CSTATE_PKG_C2_RES] = { MSR_PKG_C2_RESIDENCY,	&evattr_cstate_pkg_c2 },
224424646eeSThomas Gleixner 	[PERF_CSTATE_PKG_C3_RES] = { MSR_PKG_C3_RESIDENCY,	&evattr_cstate_pkg_c3 },
225424646eeSThomas Gleixner 	[PERF_CSTATE_PKG_C6_RES] = { MSR_PKG_C6_RESIDENCY,	&evattr_cstate_pkg_c6 },
226424646eeSThomas Gleixner 	[PERF_CSTATE_PKG_C7_RES] = { MSR_PKG_C7_RESIDENCY,	&evattr_cstate_pkg_c7 },
227424646eeSThomas Gleixner 	[PERF_CSTATE_PKG_C8_RES] = { MSR_PKG_C8_RESIDENCY,	&evattr_cstate_pkg_c8 },
228424646eeSThomas Gleixner 	[PERF_CSTATE_PKG_C9_RES] = { MSR_PKG_C9_RESIDENCY,	&evattr_cstate_pkg_c9 },
229424646eeSThomas Gleixner 	[PERF_CSTATE_PKG_C10_RES] = { MSR_PKG_C10_RESIDENCY,	&evattr_cstate_pkg_c10 },
2306aec1ad7SBorislav Petkov };
2316aec1ad7SBorislav Petkov 
2326aec1ad7SBorislav Petkov static struct attribute *pkg_events_attrs[PERF_CSTATE_PKG_EVENT_MAX + 1] = {
2336aec1ad7SBorislav Petkov 	NULL,
2346aec1ad7SBorislav Petkov };
2356aec1ad7SBorislav Petkov 
2366aec1ad7SBorislav Petkov static struct attribute_group pkg_events_attr_group = {
2376aec1ad7SBorislav Petkov 	.name = "events",
2386aec1ad7SBorislav Petkov 	.attrs = pkg_events_attrs,
2396aec1ad7SBorislav Petkov };
2406aec1ad7SBorislav Petkov 
2416aec1ad7SBorislav Petkov DEFINE_CSTATE_FORMAT_ATTR(pkg_event, event, "config:0-63");
2426aec1ad7SBorislav Petkov static struct attribute *pkg_format_attrs[] = {
2436aec1ad7SBorislav Petkov 	&format_attr_pkg_event.attr,
2446aec1ad7SBorislav Petkov 	NULL,
2456aec1ad7SBorislav Petkov };
2466aec1ad7SBorislav Petkov static struct attribute_group pkg_format_attr_group = {
2476aec1ad7SBorislav Petkov 	.name = "format",
2486aec1ad7SBorislav Petkov 	.attrs = pkg_format_attrs,
2496aec1ad7SBorislav Petkov };
2506aec1ad7SBorislav Petkov 
2516aec1ad7SBorislav Petkov static cpumask_t cstate_pkg_cpu_mask;
2526aec1ad7SBorislav Petkov 
2536aec1ad7SBorislav Petkov static const struct attribute_group *pkg_attr_groups[] = {
2546aec1ad7SBorislav Petkov 	&pkg_events_attr_group,
2556aec1ad7SBorislav Petkov 	&pkg_format_attr_group,
2566aec1ad7SBorislav Petkov 	&cpumask_attr_group,
2576aec1ad7SBorislav Petkov 	NULL,
2586aec1ad7SBorislav Petkov };
2596aec1ad7SBorislav Petkov 
2606aec1ad7SBorislav Petkov static ssize_t cstate_get_attr_cpumask(struct device *dev,
2616aec1ad7SBorislav Petkov 				       struct device_attribute *attr,
2626aec1ad7SBorislav Petkov 				       char *buf)
2636aec1ad7SBorislav Petkov {
2646aec1ad7SBorislav Petkov 	struct pmu *pmu = dev_get_drvdata(dev);
2656aec1ad7SBorislav Petkov 
2666aec1ad7SBorislav Petkov 	if (pmu == &cstate_core_pmu)
2676aec1ad7SBorislav Petkov 		return cpumap_print_to_pagebuf(true, buf, &cstate_core_cpu_mask);
2686aec1ad7SBorislav Petkov 	else if (pmu == &cstate_pkg_pmu)
2696aec1ad7SBorislav Petkov 		return cpumap_print_to_pagebuf(true, buf, &cstate_pkg_cpu_mask);
2706aec1ad7SBorislav Petkov 	else
2716aec1ad7SBorislav Petkov 		return 0;
2726aec1ad7SBorislav Petkov }
2736aec1ad7SBorislav Petkov 
2746aec1ad7SBorislav Petkov static int cstate_pmu_event_init(struct perf_event *event)
2756aec1ad7SBorislav Petkov {
2766aec1ad7SBorislav Petkov 	u64 cfg = event->attr.config;
27749de0493SThomas Gleixner 	int cpu;
2786aec1ad7SBorislav Petkov 
2796aec1ad7SBorislav Petkov 	if (event->attr.type != event->pmu->type)
2806aec1ad7SBorislav Petkov 		return -ENOENT;
2816aec1ad7SBorislav Petkov 
2826aec1ad7SBorislav Petkov 	/* unsupported modes and filters */
2832ff40250SAndrew Murray 	if (event->attr.sample_period) /* no sampling */
2846aec1ad7SBorislav Petkov 		return -EINVAL;
2856aec1ad7SBorislav Petkov 
28649de0493SThomas Gleixner 	if (event->cpu < 0)
28749de0493SThomas Gleixner 		return -EINVAL;
28849de0493SThomas Gleixner 
2896aec1ad7SBorislav Petkov 	if (event->pmu == &cstate_core_pmu) {
2906aec1ad7SBorislav Petkov 		if (cfg >= PERF_CSTATE_CORE_EVENT_MAX)
2916aec1ad7SBorislav Petkov 			return -EINVAL;
2926aec1ad7SBorislav Petkov 		if (!core_msr[cfg].attr)
2936aec1ad7SBorislav Petkov 			return -EINVAL;
2946aec1ad7SBorislav Petkov 		event->hw.event_base = core_msr[cfg].msr;
29549de0493SThomas Gleixner 		cpu = cpumask_any_and(&cstate_core_cpu_mask,
29649de0493SThomas Gleixner 				      topology_sibling_cpumask(event->cpu));
2976aec1ad7SBorislav Petkov 	} else if (event->pmu == &cstate_pkg_pmu) {
2986aec1ad7SBorislav Petkov 		if (cfg >= PERF_CSTATE_PKG_EVENT_MAX)
2996aec1ad7SBorislav Petkov 			return -EINVAL;
300a5f81290SPeter Zijlstra 		cfg = array_index_nospec((unsigned long)cfg, PERF_CSTATE_PKG_EVENT_MAX);
3016aec1ad7SBorislav Petkov 		if (!pkg_msr[cfg].attr)
3026aec1ad7SBorislav Petkov 			return -EINVAL;
3036aec1ad7SBorislav Petkov 		event->hw.event_base = pkg_msr[cfg].msr;
30449de0493SThomas Gleixner 		cpu = cpumask_any_and(&cstate_pkg_cpu_mask,
30549de0493SThomas Gleixner 				      topology_core_cpumask(event->cpu));
30649de0493SThomas Gleixner 	} else {
3076aec1ad7SBorislav Petkov 		return -ENOENT;
30849de0493SThomas Gleixner 	}
3096aec1ad7SBorislav Petkov 
31049de0493SThomas Gleixner 	if (cpu >= nr_cpu_ids)
31149de0493SThomas Gleixner 		return -ENODEV;
31249de0493SThomas Gleixner 
31349de0493SThomas Gleixner 	event->cpu = cpu;
3146aec1ad7SBorislav Petkov 	event->hw.config = cfg;
3156aec1ad7SBorislav Petkov 	event->hw.idx = -1;
31649de0493SThomas Gleixner 	return 0;
3176aec1ad7SBorislav Petkov }
3186aec1ad7SBorislav Petkov 
3196aec1ad7SBorislav Petkov static inline u64 cstate_pmu_read_counter(struct perf_event *event)
3206aec1ad7SBorislav Petkov {
3216aec1ad7SBorislav Petkov 	u64 val;
3226aec1ad7SBorislav Petkov 
3236aec1ad7SBorislav Petkov 	rdmsrl(event->hw.event_base, val);
3246aec1ad7SBorislav Petkov 	return val;
3256aec1ad7SBorislav Petkov }
3266aec1ad7SBorislav Petkov 
3276aec1ad7SBorislav Petkov static void cstate_pmu_event_update(struct perf_event *event)
3286aec1ad7SBorislav Petkov {
3296aec1ad7SBorislav Petkov 	struct hw_perf_event *hwc = &event->hw;
3306aec1ad7SBorislav Petkov 	u64 prev_raw_count, new_raw_count;
3316aec1ad7SBorislav Petkov 
3326aec1ad7SBorislav Petkov again:
3336aec1ad7SBorislav Petkov 	prev_raw_count = local64_read(&hwc->prev_count);
3346aec1ad7SBorislav Petkov 	new_raw_count = cstate_pmu_read_counter(event);
3356aec1ad7SBorislav Petkov 
3366aec1ad7SBorislav Petkov 	if (local64_cmpxchg(&hwc->prev_count, prev_raw_count,
3376aec1ad7SBorislav Petkov 			    new_raw_count) != prev_raw_count)
3386aec1ad7SBorislav Petkov 		goto again;
3396aec1ad7SBorislav Petkov 
3406aec1ad7SBorislav Petkov 	local64_add(new_raw_count - prev_raw_count, &event->count);
3416aec1ad7SBorislav Petkov }
3426aec1ad7SBorislav Petkov 
3436aec1ad7SBorislav Petkov static void cstate_pmu_event_start(struct perf_event *event, int mode)
3446aec1ad7SBorislav Petkov {
3456aec1ad7SBorislav Petkov 	local64_set(&event->hw.prev_count, cstate_pmu_read_counter(event));
3466aec1ad7SBorislav Petkov }
3476aec1ad7SBorislav Petkov 
3486aec1ad7SBorislav Petkov static void cstate_pmu_event_stop(struct perf_event *event, int mode)
3496aec1ad7SBorislav Petkov {
3506aec1ad7SBorislav Petkov 	cstate_pmu_event_update(event);
3516aec1ad7SBorislav Petkov }
3526aec1ad7SBorislav Petkov 
3536aec1ad7SBorislav Petkov static void cstate_pmu_event_del(struct perf_event *event, int mode)
3546aec1ad7SBorislav Petkov {
3556aec1ad7SBorislav Petkov 	cstate_pmu_event_stop(event, PERF_EF_UPDATE);
3566aec1ad7SBorislav Petkov }
3576aec1ad7SBorislav Petkov 
3586aec1ad7SBorislav Petkov static int cstate_pmu_event_add(struct perf_event *event, int mode)
3596aec1ad7SBorislav Petkov {
3606aec1ad7SBorislav Petkov 	if (mode & PERF_EF_START)
3616aec1ad7SBorislav Petkov 		cstate_pmu_event_start(event, mode);
3626aec1ad7SBorislav Petkov 
3636aec1ad7SBorislav Petkov 	return 0;
3646aec1ad7SBorislav Petkov }
3656aec1ad7SBorislav Petkov 
36649de0493SThomas Gleixner /*
36749de0493SThomas Gleixner  * Check if exiting cpu is the designated reader. If so migrate the
36849de0493SThomas Gleixner  * events when there is a valid target available
36949de0493SThomas Gleixner  */
37077c34ef1SSebastian Andrzej Siewior static int cstate_cpu_exit(unsigned int cpu)
3716aec1ad7SBorislav Petkov {
37249de0493SThomas Gleixner 	unsigned int target;
3736aec1ad7SBorislav Petkov 
37449de0493SThomas Gleixner 	if (has_cstate_core &&
37549de0493SThomas Gleixner 	    cpumask_test_and_clear_cpu(cpu, &cstate_core_cpu_mask)) {
3766aec1ad7SBorislav Petkov 
37749de0493SThomas Gleixner 		target = cpumask_any_but(topology_sibling_cpumask(cpu), cpu);
37849de0493SThomas Gleixner 		/* Migrate events if there is a valid target */
37949de0493SThomas Gleixner 		if (target < nr_cpu_ids) {
3806aec1ad7SBorislav Petkov 			cpumask_set_cpu(target, &cstate_core_cpu_mask);
3816aec1ad7SBorislav Petkov 			perf_pmu_migrate_context(&cstate_core_pmu, cpu, target);
3826aec1ad7SBorislav Petkov 		}
3836aec1ad7SBorislav Petkov 	}
38449de0493SThomas Gleixner 
38549de0493SThomas Gleixner 	if (has_cstate_pkg &&
38649de0493SThomas Gleixner 	    cpumask_test_and_clear_cpu(cpu, &cstate_pkg_cpu_mask)) {
38749de0493SThomas Gleixner 
38849de0493SThomas Gleixner 		target = cpumask_any_but(topology_core_cpumask(cpu), cpu);
38949de0493SThomas Gleixner 		/* Migrate events if there is a valid target */
39049de0493SThomas Gleixner 		if (target < nr_cpu_ids) {
3916aec1ad7SBorislav Petkov 			cpumask_set_cpu(target, &cstate_pkg_cpu_mask);
3926aec1ad7SBorislav Petkov 			perf_pmu_migrate_context(&cstate_pkg_pmu, cpu, target);
3936aec1ad7SBorislav Petkov 		}
3946aec1ad7SBorislav Petkov 	}
39577c34ef1SSebastian Andrzej Siewior 	return 0;
39649de0493SThomas Gleixner }
3976aec1ad7SBorislav Petkov 
39877c34ef1SSebastian Andrzej Siewior static int cstate_cpu_init(unsigned int cpu)
3996aec1ad7SBorislav Petkov {
40049de0493SThomas Gleixner 	unsigned int target;
4016aec1ad7SBorislav Petkov 
40249de0493SThomas Gleixner 	/*
40349de0493SThomas Gleixner 	 * If this is the first online thread of that core, set it in
40449de0493SThomas Gleixner 	 * the core cpu mask as the designated reader.
40549de0493SThomas Gleixner 	 */
40649de0493SThomas Gleixner 	target = cpumask_any_and(&cstate_core_cpu_mask,
40749de0493SThomas Gleixner 				 topology_sibling_cpumask(cpu));
40849de0493SThomas Gleixner 
40949de0493SThomas Gleixner 	if (has_cstate_core && target >= nr_cpu_ids)
4106aec1ad7SBorislav Petkov 		cpumask_set_cpu(cpu, &cstate_core_cpu_mask);
4116aec1ad7SBorislav Petkov 
41249de0493SThomas Gleixner 	/*
41349de0493SThomas Gleixner 	 * If this is the first online thread of that package, set it
41449de0493SThomas Gleixner 	 * in the package cpu mask as the designated reader.
41549de0493SThomas Gleixner 	 */
41649de0493SThomas Gleixner 	target = cpumask_any_and(&cstate_pkg_cpu_mask,
41749de0493SThomas Gleixner 				 topology_core_cpumask(cpu));
41849de0493SThomas Gleixner 	if (has_cstate_pkg && target >= nr_cpu_ids)
4196aec1ad7SBorislav Petkov 		cpumask_set_cpu(cpu, &cstate_pkg_cpu_mask);
4206aec1ad7SBorislav Petkov 
42177c34ef1SSebastian Andrzej Siewior 	return 0;
4226aec1ad7SBorislav Petkov }
423c7afba32SThomas Gleixner 
424424646eeSThomas Gleixner static struct pmu cstate_core_pmu = {
425424646eeSThomas Gleixner 	.attr_groups	= core_attr_groups,
426424646eeSThomas Gleixner 	.name		= "cstate_core",
427424646eeSThomas Gleixner 	.task_ctx_nr	= perf_invalid_context,
428424646eeSThomas Gleixner 	.event_init	= cstate_pmu_event_init,
429424646eeSThomas Gleixner 	.add		= cstate_pmu_event_add,
430424646eeSThomas Gleixner 	.del		= cstate_pmu_event_del,
431424646eeSThomas Gleixner 	.start		= cstate_pmu_event_start,
432424646eeSThomas Gleixner 	.stop		= cstate_pmu_event_stop,
433424646eeSThomas Gleixner 	.read		= cstate_pmu_event_update,
4342ff40250SAndrew Murray 	.capabilities	= PERF_PMU_CAP_NO_INTERRUPT | PERF_PMU_CAP_NO_EXCLUDE,
43574545f63SDavid Carrillo-Cisneros 	.module		= THIS_MODULE,
436424646eeSThomas Gleixner };
437424646eeSThomas Gleixner 
438424646eeSThomas Gleixner static struct pmu cstate_pkg_pmu = {
439424646eeSThomas Gleixner 	.attr_groups	= pkg_attr_groups,
440424646eeSThomas Gleixner 	.name		= "cstate_pkg",
441424646eeSThomas Gleixner 	.task_ctx_nr	= perf_invalid_context,
442424646eeSThomas Gleixner 	.event_init	= cstate_pmu_event_init,
443424646eeSThomas Gleixner 	.add		= cstate_pmu_event_add,
444424646eeSThomas Gleixner 	.del		= cstate_pmu_event_del,
445424646eeSThomas Gleixner 	.start		= cstate_pmu_event_start,
446424646eeSThomas Gleixner 	.stop		= cstate_pmu_event_stop,
447424646eeSThomas Gleixner 	.read		= cstate_pmu_event_update,
4482ff40250SAndrew Murray 	.capabilities	= PERF_PMU_CAP_NO_INTERRUPT | PERF_PMU_CAP_NO_EXCLUDE,
44974545f63SDavid Carrillo-Cisneros 	.module		= THIS_MODULE,
450424646eeSThomas Gleixner };
451424646eeSThomas Gleixner 
452424646eeSThomas Gleixner static const struct cstate_model nhm_cstates __initconst = {
453424646eeSThomas Gleixner 	.core_events		= BIT(PERF_CSTATE_CORE_C3_RES) |
454424646eeSThomas Gleixner 				  BIT(PERF_CSTATE_CORE_C6_RES),
455424646eeSThomas Gleixner 
456424646eeSThomas Gleixner 	.pkg_events		= BIT(PERF_CSTATE_PKG_C3_RES) |
457424646eeSThomas Gleixner 				  BIT(PERF_CSTATE_PKG_C6_RES) |
458424646eeSThomas Gleixner 				  BIT(PERF_CSTATE_PKG_C7_RES),
459424646eeSThomas Gleixner };
460424646eeSThomas Gleixner 
461424646eeSThomas Gleixner static const struct cstate_model snb_cstates __initconst = {
462424646eeSThomas Gleixner 	.core_events		= BIT(PERF_CSTATE_CORE_C3_RES) |
463424646eeSThomas Gleixner 				  BIT(PERF_CSTATE_CORE_C6_RES) |
464424646eeSThomas Gleixner 				  BIT(PERF_CSTATE_CORE_C7_RES),
465424646eeSThomas Gleixner 
466424646eeSThomas Gleixner 	.pkg_events		= BIT(PERF_CSTATE_PKG_C2_RES) |
467424646eeSThomas Gleixner 				  BIT(PERF_CSTATE_PKG_C3_RES) |
468424646eeSThomas Gleixner 				  BIT(PERF_CSTATE_PKG_C6_RES) |
469424646eeSThomas Gleixner 				  BIT(PERF_CSTATE_PKG_C7_RES),
470424646eeSThomas Gleixner };
471424646eeSThomas Gleixner 
472424646eeSThomas Gleixner static const struct cstate_model hswult_cstates __initconst = {
473424646eeSThomas Gleixner 	.core_events		= BIT(PERF_CSTATE_CORE_C3_RES) |
474424646eeSThomas Gleixner 				  BIT(PERF_CSTATE_CORE_C6_RES) |
475424646eeSThomas Gleixner 				  BIT(PERF_CSTATE_CORE_C7_RES),
476424646eeSThomas Gleixner 
477424646eeSThomas Gleixner 	.pkg_events		= BIT(PERF_CSTATE_PKG_C2_RES) |
478424646eeSThomas Gleixner 				  BIT(PERF_CSTATE_PKG_C3_RES) |
479424646eeSThomas Gleixner 				  BIT(PERF_CSTATE_PKG_C6_RES) |
480424646eeSThomas Gleixner 				  BIT(PERF_CSTATE_PKG_C7_RES) |
481424646eeSThomas Gleixner 				  BIT(PERF_CSTATE_PKG_C8_RES) |
482424646eeSThomas Gleixner 				  BIT(PERF_CSTATE_PKG_C9_RES) |
483424646eeSThomas Gleixner 				  BIT(PERF_CSTATE_PKG_C10_RES),
484424646eeSThomas Gleixner };
485424646eeSThomas Gleixner 
4861159e094SHarry Pan static const struct cstate_model cnl_cstates __initconst = {
4871159e094SHarry Pan 	.core_events		= BIT(PERF_CSTATE_CORE_C1_RES) |
4881159e094SHarry Pan 				  BIT(PERF_CSTATE_CORE_C3_RES) |
4891159e094SHarry Pan 				  BIT(PERF_CSTATE_CORE_C6_RES) |
4901159e094SHarry Pan 				  BIT(PERF_CSTATE_CORE_C7_RES),
4911159e094SHarry Pan 
4921159e094SHarry Pan 	.pkg_events		= BIT(PERF_CSTATE_PKG_C2_RES) |
4931159e094SHarry Pan 				  BIT(PERF_CSTATE_PKG_C3_RES) |
4941159e094SHarry Pan 				  BIT(PERF_CSTATE_PKG_C6_RES) |
4951159e094SHarry Pan 				  BIT(PERF_CSTATE_PKG_C7_RES) |
4961159e094SHarry Pan 				  BIT(PERF_CSTATE_PKG_C8_RES) |
4971159e094SHarry Pan 				  BIT(PERF_CSTATE_PKG_C9_RES) |
4981159e094SHarry Pan 				  BIT(PERF_CSTATE_PKG_C10_RES),
4991159e094SHarry Pan };
5001159e094SHarry Pan 
501424646eeSThomas Gleixner static const struct cstate_model slm_cstates __initconst = {
502424646eeSThomas Gleixner 	.core_events		= BIT(PERF_CSTATE_CORE_C1_RES) |
503424646eeSThomas Gleixner 				  BIT(PERF_CSTATE_CORE_C6_RES),
504424646eeSThomas Gleixner 
505424646eeSThomas Gleixner 	.pkg_events		= BIT(PERF_CSTATE_PKG_C6_RES),
506424646eeSThomas Gleixner 	.quirks			= SLM_PKG_C6_USE_C7_MSR,
507424646eeSThomas Gleixner };
508424646eeSThomas Gleixner 
509889882bcSLukasz Odzioba 
510889882bcSLukasz Odzioba static const struct cstate_model knl_cstates __initconst = {
511889882bcSLukasz Odzioba 	.core_events		= BIT(PERF_CSTATE_CORE_C6_RES),
512889882bcSLukasz Odzioba 
513889882bcSLukasz Odzioba 	.pkg_events		= BIT(PERF_CSTATE_PKG_C2_RES) |
514889882bcSLukasz Odzioba 				  BIT(PERF_CSTATE_PKG_C3_RES) |
515889882bcSLukasz Odzioba 				  BIT(PERF_CSTATE_PKG_C6_RES),
516889882bcSLukasz Odzioba 	.quirks			= KNL_CORE_C6_MSR,
517889882bcSLukasz Odzioba };
518889882bcSLukasz Odzioba 
519889882bcSLukasz Odzioba 
5205c10b048SHarry Pan static const struct cstate_model glm_cstates __initconst = {
5215c10b048SHarry Pan 	.core_events		= BIT(PERF_CSTATE_CORE_C1_RES) |
5225c10b048SHarry Pan 				  BIT(PERF_CSTATE_CORE_C3_RES) |
5235c10b048SHarry Pan 				  BIT(PERF_CSTATE_CORE_C6_RES),
5245c10b048SHarry Pan 
5255c10b048SHarry Pan 	.pkg_events		= BIT(PERF_CSTATE_PKG_C2_RES) |
5265c10b048SHarry Pan 				  BIT(PERF_CSTATE_PKG_C3_RES) |
5275c10b048SHarry Pan 				  BIT(PERF_CSTATE_PKG_C6_RES) |
5285c10b048SHarry Pan 				  BIT(PERF_CSTATE_PKG_C10_RES),
5295c10b048SHarry Pan };
5305c10b048SHarry Pan 
531889882bcSLukasz Odzioba 
532424646eeSThomas Gleixner #define X86_CSTATES_MODEL(model, states)				\
533424646eeSThomas Gleixner 	{ X86_VENDOR_INTEL, 6, model, X86_FEATURE_ANY, (unsigned long) &(states) }
534424646eeSThomas Gleixner 
535424646eeSThomas Gleixner static const struct x86_cpu_id intel_cstates_match[] __initconst = {
536bf4ad541SDave Hansen 	X86_CSTATES_MODEL(INTEL_FAM6_NEHALEM,    nhm_cstates),
537bf4ad541SDave Hansen 	X86_CSTATES_MODEL(INTEL_FAM6_NEHALEM_EP, nhm_cstates),
538bf4ad541SDave Hansen 	X86_CSTATES_MODEL(INTEL_FAM6_NEHALEM_EX, nhm_cstates),
539424646eeSThomas Gleixner 
540bf4ad541SDave Hansen 	X86_CSTATES_MODEL(INTEL_FAM6_WESTMERE,    nhm_cstates),
541bf4ad541SDave Hansen 	X86_CSTATES_MODEL(INTEL_FAM6_WESTMERE_EP, nhm_cstates),
542bf4ad541SDave Hansen 	X86_CSTATES_MODEL(INTEL_FAM6_WESTMERE_EX, nhm_cstates),
543424646eeSThomas Gleixner 
544bf4ad541SDave Hansen 	X86_CSTATES_MODEL(INTEL_FAM6_SANDYBRIDGE,   snb_cstates),
545bf4ad541SDave Hansen 	X86_CSTATES_MODEL(INTEL_FAM6_SANDYBRIDGE_X, snb_cstates),
546424646eeSThomas Gleixner 
547bf4ad541SDave Hansen 	X86_CSTATES_MODEL(INTEL_FAM6_IVYBRIDGE,   snb_cstates),
548bf4ad541SDave Hansen 	X86_CSTATES_MODEL(INTEL_FAM6_IVYBRIDGE_X, snb_cstates),
549424646eeSThomas Gleixner 
550bf4ad541SDave Hansen 	X86_CSTATES_MODEL(INTEL_FAM6_HASWELL_CORE, snb_cstates),
551bf4ad541SDave Hansen 	X86_CSTATES_MODEL(INTEL_FAM6_HASWELL_X,	   snb_cstates),
552bf4ad541SDave Hansen 	X86_CSTATES_MODEL(INTEL_FAM6_HASWELL_GT3E, snb_cstates),
553424646eeSThomas Gleixner 
554bf4ad541SDave Hansen 	X86_CSTATES_MODEL(INTEL_FAM6_HASWELL_ULT, hswult_cstates),
555424646eeSThomas Gleixner 
556f2c4db1bSPeter Zijlstra 	X86_CSTATES_MODEL(INTEL_FAM6_ATOM_SILVERMONT, slm_cstates),
557f2c4db1bSPeter Zijlstra 	X86_CSTATES_MODEL(INTEL_FAM6_ATOM_SILVERMONT_X, slm_cstates),
558bf4ad541SDave Hansen 	X86_CSTATES_MODEL(INTEL_FAM6_ATOM_AIRMONT,     slm_cstates),
559424646eeSThomas Gleixner 
560bf4ad541SDave Hansen 	X86_CSTATES_MODEL(INTEL_FAM6_BROADWELL_CORE,   snb_cstates),
561bf4ad541SDave Hansen 	X86_CSTATES_MODEL(INTEL_FAM6_BROADWELL_XEON_D, snb_cstates),
562bf4ad541SDave Hansen 	X86_CSTATES_MODEL(INTEL_FAM6_BROADWELL_GT3E,   snb_cstates),
563bf4ad541SDave Hansen 	X86_CSTATES_MODEL(INTEL_FAM6_BROADWELL_X,      snb_cstates),
564424646eeSThomas Gleixner 
565bf4ad541SDave Hansen 	X86_CSTATES_MODEL(INTEL_FAM6_SKYLAKE_MOBILE,  snb_cstates),
566bf4ad541SDave Hansen 	X86_CSTATES_MODEL(INTEL_FAM6_SKYLAKE_DESKTOP, snb_cstates),
567b09c146fSKan Liang 	X86_CSTATES_MODEL(INTEL_FAM6_SKYLAKE_X, snb_cstates),
568889882bcSLukasz Odzioba 
569f2029b1eSSrinivas Pandruvada 	X86_CSTATES_MODEL(INTEL_FAM6_KABYLAKE_MOBILE,  snb_cstates),
570f2029b1eSSrinivas Pandruvada 	X86_CSTATES_MODEL(INTEL_FAM6_KABYLAKE_DESKTOP, snb_cstates),
571f2029b1eSSrinivas Pandruvada 
5721159e094SHarry Pan 	X86_CSTATES_MODEL(INTEL_FAM6_CANNONLAKE_MOBILE, cnl_cstates),
5731159e094SHarry Pan 
574889882bcSLukasz Odzioba 	X86_CSTATES_MODEL(INTEL_FAM6_XEON_PHI_KNL, knl_cstates),
5751dba23b1SPiotr Luc 	X86_CSTATES_MODEL(INTEL_FAM6_XEON_PHI_KNM, knl_cstates),
5765c10b048SHarry Pan 
5775c10b048SHarry Pan 	X86_CSTATES_MODEL(INTEL_FAM6_ATOM_GOLDMONT, glm_cstates),
578f2c4db1bSPeter Zijlstra 	X86_CSTATES_MODEL(INTEL_FAM6_ATOM_GOLDMONT_X, glm_cstates),
579b09c146fSKan Liang 
580f2c4db1bSPeter Zijlstra 	X86_CSTATES_MODEL(INTEL_FAM6_ATOM_GOLDMONT_PLUS, glm_cstates),
581424646eeSThomas Gleixner 	{ },
582424646eeSThomas Gleixner };
583424646eeSThomas Gleixner MODULE_DEVICE_TABLE(x86cpu, intel_cstates_match);
584424646eeSThomas Gleixner 
5856aec1ad7SBorislav Petkov /*
5866aec1ad7SBorislav Petkov  * Probe the cstate events and insert the available one into sysfs attrs
587424646eeSThomas Gleixner  * Return false if there are no available events.
5886aec1ad7SBorislav Petkov  */
589424646eeSThomas Gleixner static bool __init cstate_probe_msr(const unsigned long evmsk, int max,
590424646eeSThomas Gleixner                                    struct perf_cstate_msr *msr,
591424646eeSThomas Gleixner                                    struct attribute **attrs)
5926aec1ad7SBorislav Petkov {
593424646eeSThomas Gleixner 	bool found = false;
594424646eeSThomas Gleixner 	unsigned int bit;
5956aec1ad7SBorislav Petkov 	u64 val;
5966aec1ad7SBorislav Petkov 
597424646eeSThomas Gleixner 	for (bit = 0; bit < max; bit++) {
598424646eeSThomas Gleixner 		if (test_bit(bit, &evmsk) && !rdmsrl_safe(msr[bit].msr, &val)) {
599424646eeSThomas Gleixner 			*attrs++ = &msr[bit].attr->attr.attr;
600424646eeSThomas Gleixner 			found = true;
601424646eeSThomas Gleixner 		} else {
602424646eeSThomas Gleixner 			msr[bit].attr = NULL;
603424646eeSThomas Gleixner 		}
604424646eeSThomas Gleixner 	}
605424646eeSThomas Gleixner 	*attrs = NULL;
606424646eeSThomas Gleixner 
607424646eeSThomas Gleixner 	return found;
6086aec1ad7SBorislav Petkov }
6096aec1ad7SBorislav Petkov 
610424646eeSThomas Gleixner static int __init cstate_probe(const struct cstate_model *cm)
6116aec1ad7SBorislav Petkov {
6126aec1ad7SBorislav Petkov 	/* SLM has different MSR for PKG C6 */
613424646eeSThomas Gleixner 	if (cm->quirks & SLM_PKG_C6_USE_C7_MSR)
6146aec1ad7SBorislav Petkov 		pkg_msr[PERF_CSTATE_PKG_C6_RES].msr = MSR_PKG_C7_RESIDENCY;
6156aec1ad7SBorislav Petkov 
616889882bcSLukasz Odzioba 	/* KNL has different MSR for CORE C6 */
617889882bcSLukasz Odzioba 	if (cm->quirks & KNL_CORE_C6_MSR)
618889882bcSLukasz Odzioba 		pkg_msr[PERF_CSTATE_CORE_C6_RES].msr = MSR_KNL_CORE_C6_RESIDENCY;
619889882bcSLukasz Odzioba 
620889882bcSLukasz Odzioba 
621424646eeSThomas Gleixner 	has_cstate_core = cstate_probe_msr(cm->core_events,
622424646eeSThomas Gleixner 					   PERF_CSTATE_CORE_EVENT_MAX,
623424646eeSThomas Gleixner 					   core_msr, core_events_attrs);
6246aec1ad7SBorislav Petkov 
625424646eeSThomas Gleixner 	has_cstate_pkg = cstate_probe_msr(cm->pkg_events,
626424646eeSThomas Gleixner 					  PERF_CSTATE_PKG_EVENT_MAX,
627424646eeSThomas Gleixner 					  pkg_msr, pkg_events_attrs);
6286aec1ad7SBorislav Petkov 
6296aec1ad7SBorislav Petkov 	return (has_cstate_core || has_cstate_pkg) ? 0 : -ENODEV;
6306aec1ad7SBorislav Petkov }
6316aec1ad7SBorislav Petkov 
632c7afba32SThomas Gleixner static inline void cstate_cleanup(void)
6336aec1ad7SBorislav Petkov {
634834fcd29SThomas Gleixner 	cpuhp_remove_state_nocalls(CPUHP_AP_PERF_X86_CSTATE_ONLINE);
635834fcd29SThomas Gleixner 	cpuhp_remove_state_nocalls(CPUHP_AP_PERF_X86_CSTATE_STARTING);
636834fcd29SThomas Gleixner 
637d29859e7SThomas Gleixner 	if (has_cstate_core)
638d29859e7SThomas Gleixner 		perf_pmu_unregister(&cstate_core_pmu);
639d29859e7SThomas Gleixner 
640d29859e7SThomas Gleixner 	if (has_cstate_pkg)
641d29859e7SThomas Gleixner 		perf_pmu_unregister(&cstate_pkg_pmu);
642d29859e7SThomas Gleixner }
643d29859e7SThomas Gleixner 
644d29859e7SThomas Gleixner static int __init cstate_init(void)
645d29859e7SThomas Gleixner {
64677c34ef1SSebastian Andrzej Siewior 	int err;
6476aec1ad7SBorislav Petkov 
64877c34ef1SSebastian Andrzej Siewior 	cpuhp_setup_state(CPUHP_AP_PERF_X86_CSTATE_STARTING,
649834fcd29SThomas Gleixner 			  "perf/x86/cstate:starting", cstate_cpu_init, NULL);
65077c34ef1SSebastian Andrzej Siewior 	cpuhp_setup_state(CPUHP_AP_PERF_X86_CSTATE_ONLINE,
651834fcd29SThomas Gleixner 			  "perf/x86/cstate:online", NULL, cstate_cpu_exit);
6526aec1ad7SBorislav Petkov 
6536aec1ad7SBorislav Petkov 	if (has_cstate_core) {
6546aec1ad7SBorislav Petkov 		err = perf_pmu_register(&cstate_core_pmu, cstate_core_pmu.name, -1);
655d29859e7SThomas Gleixner 		if (err) {
656d29859e7SThomas Gleixner 			has_cstate_core = false;
657d29859e7SThomas Gleixner 			pr_info("Failed to register cstate core pmu\n");
658834fcd29SThomas Gleixner 			cstate_cleanup();
65977c34ef1SSebastian Andrzej Siewior 			return err;
660d29859e7SThomas Gleixner 		}
6616aec1ad7SBorislav Petkov 	}
6626aec1ad7SBorislav Petkov 
6636aec1ad7SBorislav Petkov 	if (has_cstate_pkg) {
6646aec1ad7SBorislav Petkov 		err = perf_pmu_register(&cstate_pkg_pmu, cstate_pkg_pmu.name, -1);
665d29859e7SThomas Gleixner 		if (err) {
666d29859e7SThomas Gleixner 			has_cstate_pkg = false;
667d29859e7SThomas Gleixner 			pr_info("Failed to register cstate pkg pmu\n");
668d29859e7SThomas Gleixner 			cstate_cleanup();
66977c34ef1SSebastian Andrzej Siewior 			return err;
6706aec1ad7SBorislav Petkov 		}
6716aec1ad7SBorislav Petkov 	}
672834fcd29SThomas Gleixner 	return 0;
673d29859e7SThomas Gleixner }
6746aec1ad7SBorislav Petkov 
6756aec1ad7SBorislav Petkov static int __init cstate_pmu_init(void)
6766aec1ad7SBorislav Petkov {
677424646eeSThomas Gleixner 	const struct x86_cpu_id *id;
6786aec1ad7SBorislav Petkov 	int err;
6796aec1ad7SBorislav Petkov 
680424646eeSThomas Gleixner 	if (boot_cpu_has(X86_FEATURE_HYPERVISOR))
6816aec1ad7SBorislav Petkov 		return -ENODEV;
6826aec1ad7SBorislav Petkov 
683424646eeSThomas Gleixner 	id = x86_match_cpu(intel_cstates_match);
684424646eeSThomas Gleixner 	if (!id)
685424646eeSThomas Gleixner 		return -ENODEV;
686424646eeSThomas Gleixner 
687424646eeSThomas Gleixner 	err = cstate_probe((const struct cstate_model *) id->driver_data);
6886aec1ad7SBorislav Petkov 	if (err)
6896aec1ad7SBorislav Petkov 		return err;
6906aec1ad7SBorislav Petkov 
691d29859e7SThomas Gleixner 	return cstate_init();
6926aec1ad7SBorislav Petkov }
693c7afba32SThomas Gleixner module_init(cstate_pmu_init);
694c7afba32SThomas Gleixner 
695c7afba32SThomas Gleixner static void __exit cstate_pmu_exit(void)
696c7afba32SThomas Gleixner {
697c7afba32SThomas Gleixner 	cstate_cleanup();
698c7afba32SThomas Gleixner }
699c7afba32SThomas Gleixner module_exit(cstate_pmu_exit);
700