xref: /openbmc/linux/arch/x86/events/intel/cstate.c (revision 882cdb06)
16aec1ad7SBorislav Petkov /*
2940b2f2fSBorislav Petkov  * Support cstate residency counters
36aec1ad7SBorislav Petkov  *
46aec1ad7SBorislav Petkov  * Copyright (C) 2015, Intel Corp.
56aec1ad7SBorislav Petkov  * Author: Kan Liang (kan.liang@intel.com)
66aec1ad7SBorislav Petkov  *
76aec1ad7SBorislav Petkov  * This library is free software; you can redistribute it and/or
86aec1ad7SBorislav Petkov  * modify it under the terms of the GNU Library General Public
96aec1ad7SBorislav Petkov  * License as published by the Free Software Foundation; either
106aec1ad7SBorislav Petkov  * version 2 of the License, or (at your option) any later version.
116aec1ad7SBorislav Petkov  *
126aec1ad7SBorislav Petkov  * This library is distributed in the hope that it will be useful,
136aec1ad7SBorislav Petkov  * but WITHOUT ANY WARRANTY; without even the implied warranty of
146aec1ad7SBorislav Petkov  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
156aec1ad7SBorislav Petkov  * Library General Public License for more details.
166aec1ad7SBorislav Petkov  *
176aec1ad7SBorislav Petkov  */
186aec1ad7SBorislav Petkov 
196aec1ad7SBorislav Petkov /*
206aec1ad7SBorislav Petkov  * This file export cstate related free running (read-only) counters
216aec1ad7SBorislav Petkov  * for perf. These counters may be use simultaneously by other tools,
226aec1ad7SBorislav Petkov  * such as turbostat. However, it still make sense to implement them
236aec1ad7SBorislav Petkov  * in perf. Because we can conveniently collect them together with
246aec1ad7SBorislav Petkov  * other events, and allow to use them from tools without special MSR
256aec1ad7SBorislav Petkov  * access code.
266aec1ad7SBorislav Petkov  *
276aec1ad7SBorislav Petkov  * The events only support system-wide mode counting. There is no
286aec1ad7SBorislav Petkov  * sampling support because it is not supported by the hardware.
296aec1ad7SBorislav Petkov  *
306aec1ad7SBorislav Petkov  * According to counters' scope and category, two PMUs are registered
316aec1ad7SBorislav Petkov  * with the perf_event core subsystem.
326aec1ad7SBorislav Petkov  *  - 'cstate_core': The counter is available for each physical core.
336aec1ad7SBorislav Petkov  *    The counters include CORE_C*_RESIDENCY.
346aec1ad7SBorislav Petkov  *  - 'cstate_pkg': The counter is available for each physical package.
356aec1ad7SBorislav Petkov  *    The counters include PKG_C*_RESIDENCY.
366aec1ad7SBorislav Petkov  *
376aec1ad7SBorislav Petkov  * All of these counters are specified in the Intel® 64 and IA-32
386aec1ad7SBorislav Petkov  * Architectures Software Developer.s Manual Vol3b.
396aec1ad7SBorislav Petkov  *
406aec1ad7SBorislav Petkov  * Model specific counters:
416aec1ad7SBorislav Petkov  *	MSR_CORE_C1_RES: CORE C1 Residency Counter
426aec1ad7SBorislav Petkov  *			 perf code: 0x00
432da202aaSKan Liang  *			 Available model: SLM,AMT,GLM,CNL,ICX,TNT,ADL,RPL
4401f2ea5bSKan Liang  *					  MTL
456aec1ad7SBorislav Petkov  *			 Scope: Core (each processor core has a MSR)
466aec1ad7SBorislav Petkov  *	MSR_CORE_C3_RESIDENCY: CORE C3 Residency Counter
476aec1ad7SBorislav Petkov  *			       perf code: 0x01
481159e094SHarry Pan  *			       Available model: NHM,WSM,SNB,IVB,HSW,BDW,SKL,GLM,
49ecf71fbcSKan Liang  *						CNL,KBL,CML,TNT
506aec1ad7SBorislav Petkov  *			       Scope: Core
516aec1ad7SBorislav Petkov  *	MSR_CORE_C6_RESIDENCY: CORE C6 Residency Counter
526aec1ad7SBorislav Petkov  *			       perf code: 0x02
531159e094SHarry Pan  *			       Available model: SLM,AMT,NHM,WSM,SNB,IVB,HSW,BDW,
5487bf399fSZhang Rui  *						SKL,KNL,GLM,CNL,KBL,CML,ICL,ICX,
5501f2ea5bSKan Liang  *						TGL,TNT,RKL,ADL,RPL,SPR,MTL
566aec1ad7SBorislav Petkov  *			       Scope: Core
576aec1ad7SBorislav Petkov  *	MSR_CORE_C7_RESIDENCY: CORE C7 Residency Counter
586aec1ad7SBorislav Petkov  *			       perf code: 0x03
59f1857a24SKan Liang  *			       Available model: SNB,IVB,HSW,BDW,SKL,CNL,KBL,CML,
6001f2ea5bSKan Liang  *						ICL,TGL,RKL,ADL,RPL,MTL
616aec1ad7SBorislav Petkov  *			       Scope: Core
626aec1ad7SBorislav Petkov  *	MSR_PKG_C2_RESIDENCY:  Package C2 Residency Counter.
636aec1ad7SBorislav Petkov  *			       perf code: 0x00
641ffa6c04SKan Liang  *			       Available model: SNB,IVB,HSW,BDW,SKL,KNL,GLM,CNL,
652da202aaSKan Liang  *						KBL,CML,ICL,ICX,TGL,TNT,RKL,ADL,
6601f2ea5bSKan Liang  *						RPL,SPR,MTL
676aec1ad7SBorislav Petkov  *			       Scope: Package (physical package)
686aec1ad7SBorislav Petkov  *	MSR_PKG_C3_RESIDENCY:  Package C3 Residency Counter.
696aec1ad7SBorislav Petkov  *			       perf code: 0x01
701159e094SHarry Pan  *			       Available model: NHM,WSM,SNB,IVB,HSW,BDW,SKL,KNL,
71d0ca946bSKan Liang  *						GLM,CNL,KBL,CML,ICL,TGL,TNT,RKL,
7201f2ea5bSKan Liang  *						ADL,RPL,MTL
736aec1ad7SBorislav Petkov  *			       Scope: Package (physical package)
746aec1ad7SBorislav Petkov  *	MSR_PKG_C6_RESIDENCY:  Package C6 Residency Counter.
756aec1ad7SBorislav Petkov  *			       perf code: 0x02
76ecf71fbcSKan Liang  *			       Available model: SLM,AMT,NHM,WSM,SNB,IVB,HSW,BDW,
7787bf399fSZhang Rui  *						SKL,KNL,GLM,CNL,KBL,CML,ICL,ICX,
7801f2ea5bSKan Liang  *						TGL,TNT,RKL,ADL,RPL,SPR,MTL
796aec1ad7SBorislav Petkov  *			       Scope: Package (physical package)
806aec1ad7SBorislav Petkov  *	MSR_PKG_C7_RESIDENCY:  Package C7 Residency Counter.
816aec1ad7SBorislav Petkov  *			       perf code: 0x03
821ffa6c04SKan Liang  *			       Available model: NHM,WSM,SNB,IVB,HSW,BDW,SKL,CNL,
8301f2ea5bSKan Liang  *						KBL,CML,ICL,TGL,RKL,ADL,RPL,MTL
846aec1ad7SBorislav Petkov  *			       Scope: Package (physical package)
856aec1ad7SBorislav Petkov  *	MSR_PKG_C8_RESIDENCY:  Package C8 Residency Counter.
866aec1ad7SBorislav Petkov  *			       perf code: 0x04
87d0ca946bSKan Liang  *			       Available model: HSW ULT,KBL,CNL,CML,ICL,TGL,RKL,
8801f2ea5bSKan Liang  *						ADL,RPL,MTL
896aec1ad7SBorislav Petkov  *			       Scope: Package (physical package)
906aec1ad7SBorislav Petkov  *	MSR_PKG_C9_RESIDENCY:  Package C9 Residency Counter.
916aec1ad7SBorislav Petkov  *			       perf code: 0x05
92d0ca946bSKan Liang  *			       Available model: HSW ULT,KBL,CNL,CML,ICL,TGL,RKL,
9301f2ea5bSKan Liang  *						ADL,RPL,MTL
946aec1ad7SBorislav Petkov  *			       Scope: Package (physical package)
956aec1ad7SBorislav Petkov  *	MSR_PKG_C10_RESIDENCY: Package C10 Residency Counter.
966aec1ad7SBorislav Petkov  *			       perf code: 0x06
97ecf71fbcSKan Liang  *			       Available model: HSW ULT,KBL,GLM,CNL,CML,ICL,TGL,
9801f2ea5bSKan Liang  *						TNT,RKL,ADL,RPL,MTL
996aec1ad7SBorislav Petkov  *			       Scope: Package (physical package)
1006aec1ad7SBorislav Petkov  *
1016aec1ad7SBorislav Petkov  */
1026aec1ad7SBorislav Petkov 
1036aec1ad7SBorislav Petkov #include <linux/module.h>
1046aec1ad7SBorislav Petkov #include <linux/slab.h>
1056aec1ad7SBorislav Petkov #include <linux/perf_event.h>
106a5f81290SPeter Zijlstra #include <linux/nospec.h>
1076aec1ad7SBorislav Petkov #include <asm/cpu_device_id.h>
108bf4ad541SDave Hansen #include <asm/intel-family.h>
10927f6d22bSBorislav Petkov #include "../perf_event.h"
1108f2a28c5SJiri Olsa #include "../probe.h"
1116aec1ad7SBorislav Petkov 
112c7afba32SThomas Gleixner MODULE_LICENSE("GPL");
113c7afba32SThomas Gleixner 
1146aec1ad7SBorislav Petkov #define DEFINE_CSTATE_FORMAT_ATTR(_var, _name, _format)		\
115ebd19fc3SSami Tolvanen static ssize_t __cstate_##_var##_show(struct device *dev,	\
116ebd19fc3SSami Tolvanen 				struct device_attribute *attr,	\
1176aec1ad7SBorislav Petkov 				char *page)			\
1186aec1ad7SBorislav Petkov {								\
1196aec1ad7SBorislav Petkov 	BUILD_BUG_ON(sizeof(_format) >= PAGE_SIZE);		\
1206aec1ad7SBorislav Petkov 	return sprintf(page, _format "\n");			\
1216aec1ad7SBorislav Petkov }								\
122ebd19fc3SSami Tolvanen static struct device_attribute format_attr_##_var =		\
1236aec1ad7SBorislav Petkov 	__ATTR(_name, 0444, __cstate_##_var##_show, NULL)
1246aec1ad7SBorislav Petkov 
1256aec1ad7SBorislav Petkov static ssize_t cstate_get_attr_cpumask(struct device *dev,
1266aec1ad7SBorislav Petkov 				       struct device_attribute *attr,
1276aec1ad7SBorislav Petkov 				       char *buf);
1286aec1ad7SBorislav Petkov 
129424646eeSThomas Gleixner /* Model -> events mapping */
130424646eeSThomas Gleixner struct cstate_model {
131424646eeSThomas Gleixner 	unsigned long		core_events;
132424646eeSThomas Gleixner 	unsigned long		pkg_events;
133424646eeSThomas Gleixner 	unsigned long		quirks;
134424646eeSThomas Gleixner };
135424646eeSThomas Gleixner 
136424646eeSThomas Gleixner /* Quirk flags */
137424646eeSThomas Gleixner #define SLM_PKG_C6_USE_C7_MSR	(1UL << 0)
138889882bcSLukasz Odzioba #define KNL_CORE_C6_MSR		(1UL << 1)
139424646eeSThomas Gleixner 
1406aec1ad7SBorislav Petkov struct perf_cstate_msr {
1416aec1ad7SBorislav Petkov 	u64	msr;
1426aec1ad7SBorislav Petkov 	struct	perf_pmu_events_attr *attr;
1436aec1ad7SBorislav Petkov };
1446aec1ad7SBorislav Petkov 
1456aec1ad7SBorislav Petkov 
1466aec1ad7SBorislav Petkov /* cstate_core PMU */
1476aec1ad7SBorislav Petkov static struct pmu cstate_core_pmu;
1486aec1ad7SBorislav Petkov static bool has_cstate_core;
1496aec1ad7SBorislav Petkov 
150424646eeSThomas Gleixner enum perf_cstate_core_events {
1516aec1ad7SBorislav Petkov 	PERF_CSTATE_CORE_C1_RES = 0,
1526aec1ad7SBorislav Petkov 	PERF_CSTATE_CORE_C3_RES,
1536aec1ad7SBorislav Petkov 	PERF_CSTATE_CORE_C6_RES,
1546aec1ad7SBorislav Petkov 	PERF_CSTATE_CORE_C7_RES,
1556aec1ad7SBorislav Petkov 
1566aec1ad7SBorislav Petkov 	PERF_CSTATE_CORE_EVENT_MAX,
1576aec1ad7SBorislav Petkov };
1586aec1ad7SBorislav Petkov 
1598f2a28c5SJiri Olsa PMU_EVENT_ATTR_STRING(c1-residency, attr_cstate_core_c1, "event=0x00");
1608f2a28c5SJiri Olsa PMU_EVENT_ATTR_STRING(c3-residency, attr_cstate_core_c3, "event=0x01");
1618f2a28c5SJiri Olsa PMU_EVENT_ATTR_STRING(c6-residency, attr_cstate_core_c6, "event=0x02");
1628f2a28c5SJiri Olsa PMU_EVENT_ATTR_STRING(c7-residency, attr_cstate_core_c7, "event=0x03");
1636aec1ad7SBorislav Petkov 
1648f2a28c5SJiri Olsa static unsigned long core_msr_mask;
1658f2a28c5SJiri Olsa 
1668f2a28c5SJiri Olsa PMU_EVENT_GROUP(events, cstate_core_c1);
1678f2a28c5SJiri Olsa PMU_EVENT_GROUP(events, cstate_core_c3);
1688f2a28c5SJiri Olsa PMU_EVENT_GROUP(events, cstate_core_c6);
1698f2a28c5SJiri Olsa PMU_EVENT_GROUP(events, cstate_core_c7);
1708f2a28c5SJiri Olsa 
test_msr(int idx,void * data)1718f2a28c5SJiri Olsa static bool test_msr(int idx, void *data)
1728f2a28c5SJiri Olsa {
1738f2a28c5SJiri Olsa 	return test_bit(idx, (unsigned long *) data);
1748f2a28c5SJiri Olsa }
1758f2a28c5SJiri Olsa 
1768f2a28c5SJiri Olsa static struct perf_msr core_msr[] = {
1778f2a28c5SJiri Olsa 	[PERF_CSTATE_CORE_C1_RES] = { MSR_CORE_C1_RES,		&group_cstate_core_c1,	test_msr },
1788f2a28c5SJiri Olsa 	[PERF_CSTATE_CORE_C3_RES] = { MSR_CORE_C3_RESIDENCY,	&group_cstate_core_c3,	test_msr },
1798f2a28c5SJiri Olsa 	[PERF_CSTATE_CORE_C6_RES] = { MSR_CORE_C6_RESIDENCY,	&group_cstate_core_c6,	test_msr },
1808f2a28c5SJiri Olsa 	[PERF_CSTATE_CORE_C7_RES] = { MSR_CORE_C7_RESIDENCY,	&group_cstate_core_c7,	test_msr },
1816aec1ad7SBorislav Petkov };
1826aec1ad7SBorislav Petkov 
1838f2a28c5SJiri Olsa static struct attribute *attrs_empty[] = {
1846aec1ad7SBorislav Petkov 	NULL,
1856aec1ad7SBorislav Petkov };
1866aec1ad7SBorislav Petkov 
1878f2a28c5SJiri Olsa /*
1888f2a28c5SJiri Olsa  * There are no default events, but we need to create
1898f2a28c5SJiri Olsa  * "events" group (with empty attrs) before updating
1908f2a28c5SJiri Olsa  * it with detected events.
1918f2a28c5SJiri Olsa  */
1926aec1ad7SBorislav Petkov static struct attribute_group core_events_attr_group = {
1936aec1ad7SBorislav Petkov 	.name = "events",
1948f2a28c5SJiri Olsa 	.attrs = attrs_empty,
1956aec1ad7SBorislav Petkov };
1966aec1ad7SBorislav Petkov 
1976aec1ad7SBorislav Petkov DEFINE_CSTATE_FORMAT_ATTR(core_event, event, "config:0-63");
1986aec1ad7SBorislav Petkov static struct attribute *core_format_attrs[] = {
1996aec1ad7SBorislav Petkov 	&format_attr_core_event.attr,
2006aec1ad7SBorislav Petkov 	NULL,
2016aec1ad7SBorislav Petkov };
2026aec1ad7SBorislav Petkov 
2036aec1ad7SBorislav Petkov static struct attribute_group core_format_attr_group = {
2046aec1ad7SBorislav Petkov 	.name = "format",
2056aec1ad7SBorislav Petkov 	.attrs = core_format_attrs,
2066aec1ad7SBorislav Petkov };
2076aec1ad7SBorislav Petkov 
2086aec1ad7SBorislav Petkov static cpumask_t cstate_core_cpu_mask;
2096aec1ad7SBorislav Petkov static DEVICE_ATTR(cpumask, S_IRUGO, cstate_get_attr_cpumask, NULL);
2106aec1ad7SBorislav Petkov 
2116aec1ad7SBorislav Petkov static struct attribute *cstate_cpumask_attrs[] = {
2126aec1ad7SBorislav Petkov 	&dev_attr_cpumask.attr,
2136aec1ad7SBorislav Petkov 	NULL,
2146aec1ad7SBorislav Petkov };
2156aec1ad7SBorislav Petkov 
2166aec1ad7SBorislav Petkov static struct attribute_group cpumask_attr_group = {
2176aec1ad7SBorislav Petkov 	.attrs = cstate_cpumask_attrs,
2186aec1ad7SBorislav Petkov };
2196aec1ad7SBorislav Petkov 
2206aec1ad7SBorislav Petkov static const struct attribute_group *core_attr_groups[] = {
2216aec1ad7SBorislav Petkov 	&core_events_attr_group,
2226aec1ad7SBorislav Petkov 	&core_format_attr_group,
2236aec1ad7SBorislav Petkov 	&cpumask_attr_group,
2246aec1ad7SBorislav Petkov 	NULL,
2256aec1ad7SBorislav Petkov };
2266aec1ad7SBorislav Petkov 
2276aec1ad7SBorislav Petkov /* cstate_pkg PMU */
2286aec1ad7SBorislav Petkov static struct pmu cstate_pkg_pmu;
2296aec1ad7SBorislav Petkov static bool has_cstate_pkg;
2306aec1ad7SBorislav Petkov 
231424646eeSThomas Gleixner enum perf_cstate_pkg_events {
2326aec1ad7SBorislav Petkov 	PERF_CSTATE_PKG_C2_RES = 0,
2336aec1ad7SBorislav Petkov 	PERF_CSTATE_PKG_C3_RES,
2346aec1ad7SBorislav Petkov 	PERF_CSTATE_PKG_C6_RES,
2356aec1ad7SBorislav Petkov 	PERF_CSTATE_PKG_C7_RES,
2366aec1ad7SBorislav Petkov 	PERF_CSTATE_PKG_C8_RES,
2376aec1ad7SBorislav Petkov 	PERF_CSTATE_PKG_C9_RES,
2386aec1ad7SBorislav Petkov 	PERF_CSTATE_PKG_C10_RES,
2396aec1ad7SBorislav Petkov 
2406aec1ad7SBorislav Petkov 	PERF_CSTATE_PKG_EVENT_MAX,
2416aec1ad7SBorislav Petkov };
2426aec1ad7SBorislav Petkov 
2438f2a28c5SJiri Olsa PMU_EVENT_ATTR_STRING(c2-residency,  attr_cstate_pkg_c2,  "event=0x00");
2448f2a28c5SJiri Olsa PMU_EVENT_ATTR_STRING(c3-residency,  attr_cstate_pkg_c3,  "event=0x01");
2458f2a28c5SJiri Olsa PMU_EVENT_ATTR_STRING(c6-residency,  attr_cstate_pkg_c6,  "event=0x02");
2468f2a28c5SJiri Olsa PMU_EVENT_ATTR_STRING(c7-residency,  attr_cstate_pkg_c7,  "event=0x03");
2478f2a28c5SJiri Olsa PMU_EVENT_ATTR_STRING(c8-residency,  attr_cstate_pkg_c8,  "event=0x04");
2488f2a28c5SJiri Olsa PMU_EVENT_ATTR_STRING(c9-residency,  attr_cstate_pkg_c9,  "event=0x05");
2498f2a28c5SJiri Olsa PMU_EVENT_ATTR_STRING(c10-residency, attr_cstate_pkg_c10, "event=0x06");
2506aec1ad7SBorislav Petkov 
2518f2a28c5SJiri Olsa static unsigned long pkg_msr_mask;
2526aec1ad7SBorislav Petkov 
2538f2a28c5SJiri Olsa PMU_EVENT_GROUP(events, cstate_pkg_c2);
2548f2a28c5SJiri Olsa PMU_EVENT_GROUP(events, cstate_pkg_c3);
2558f2a28c5SJiri Olsa PMU_EVENT_GROUP(events, cstate_pkg_c6);
2568f2a28c5SJiri Olsa PMU_EVENT_GROUP(events, cstate_pkg_c7);
2578f2a28c5SJiri Olsa PMU_EVENT_GROUP(events, cstate_pkg_c8);
2588f2a28c5SJiri Olsa PMU_EVENT_GROUP(events, cstate_pkg_c9);
2598f2a28c5SJiri Olsa PMU_EVENT_GROUP(events, cstate_pkg_c10);
2608f2a28c5SJiri Olsa 
2618f2a28c5SJiri Olsa static struct perf_msr pkg_msr[] = {
2628f2a28c5SJiri Olsa 	[PERF_CSTATE_PKG_C2_RES]  = { MSR_PKG_C2_RESIDENCY,	&group_cstate_pkg_c2,	test_msr },
2638f2a28c5SJiri Olsa 	[PERF_CSTATE_PKG_C3_RES]  = { MSR_PKG_C3_RESIDENCY,	&group_cstate_pkg_c3,	test_msr },
2648f2a28c5SJiri Olsa 	[PERF_CSTATE_PKG_C6_RES]  = { MSR_PKG_C6_RESIDENCY,	&group_cstate_pkg_c6,	test_msr },
2658f2a28c5SJiri Olsa 	[PERF_CSTATE_PKG_C7_RES]  = { MSR_PKG_C7_RESIDENCY,	&group_cstate_pkg_c7,	test_msr },
2668f2a28c5SJiri Olsa 	[PERF_CSTATE_PKG_C8_RES]  = { MSR_PKG_C8_RESIDENCY,	&group_cstate_pkg_c8,	test_msr },
2678f2a28c5SJiri Olsa 	[PERF_CSTATE_PKG_C9_RES]  = { MSR_PKG_C9_RESIDENCY,	&group_cstate_pkg_c9,	test_msr },
2688f2a28c5SJiri Olsa 	[PERF_CSTATE_PKG_C10_RES] = { MSR_PKG_C10_RESIDENCY,	&group_cstate_pkg_c10,	test_msr },
2696aec1ad7SBorislav Petkov };
2706aec1ad7SBorislav Petkov 
2716aec1ad7SBorislav Petkov static struct attribute_group pkg_events_attr_group = {
2726aec1ad7SBorislav Petkov 	.name = "events",
2738f2a28c5SJiri Olsa 	.attrs = attrs_empty,
2746aec1ad7SBorislav Petkov };
2756aec1ad7SBorislav Petkov 
2766aec1ad7SBorislav Petkov DEFINE_CSTATE_FORMAT_ATTR(pkg_event, event, "config:0-63");
2776aec1ad7SBorislav Petkov static struct attribute *pkg_format_attrs[] = {
2786aec1ad7SBorislav Petkov 	&format_attr_pkg_event.attr,
2796aec1ad7SBorislav Petkov 	NULL,
2806aec1ad7SBorislav Petkov };
2816aec1ad7SBorislav Petkov static struct attribute_group pkg_format_attr_group = {
2826aec1ad7SBorislav Petkov 	.name = "format",
2836aec1ad7SBorislav Petkov 	.attrs = pkg_format_attrs,
2846aec1ad7SBorislav Petkov };
2856aec1ad7SBorislav Petkov 
2866aec1ad7SBorislav Petkov static cpumask_t cstate_pkg_cpu_mask;
2876aec1ad7SBorislav Petkov 
2886aec1ad7SBorislav Petkov static const struct attribute_group *pkg_attr_groups[] = {
2896aec1ad7SBorislav Petkov 	&pkg_events_attr_group,
2906aec1ad7SBorislav Petkov 	&pkg_format_attr_group,
2916aec1ad7SBorislav Petkov 	&cpumask_attr_group,
2926aec1ad7SBorislav Petkov 	NULL,
2936aec1ad7SBorislav Petkov };
2946aec1ad7SBorislav Petkov 
cstate_get_attr_cpumask(struct device * dev,struct device_attribute * attr,char * buf)2956aec1ad7SBorislav Petkov static ssize_t cstate_get_attr_cpumask(struct device *dev,
2966aec1ad7SBorislav Petkov 				       struct device_attribute *attr,
2976aec1ad7SBorislav Petkov 				       char *buf)
2986aec1ad7SBorislav Petkov {
2996aec1ad7SBorislav Petkov 	struct pmu *pmu = dev_get_drvdata(dev);
3006aec1ad7SBorislav Petkov 
3016aec1ad7SBorislav Petkov 	if (pmu == &cstate_core_pmu)
3026aec1ad7SBorislav Petkov 		return cpumap_print_to_pagebuf(true, buf, &cstate_core_cpu_mask);
3036aec1ad7SBorislav Petkov 	else if (pmu == &cstate_pkg_pmu)
3046aec1ad7SBorislav Petkov 		return cpumap_print_to_pagebuf(true, buf, &cstate_pkg_cpu_mask);
3056aec1ad7SBorislav Petkov 	else
3066aec1ad7SBorislav Petkov 		return 0;
3076aec1ad7SBorislav Petkov }
3086aec1ad7SBorislav Petkov 
cstate_pmu_event_init(struct perf_event * event)3096aec1ad7SBorislav Petkov static int cstate_pmu_event_init(struct perf_event *event)
3106aec1ad7SBorislav Petkov {
3116aec1ad7SBorislav Petkov 	u64 cfg = event->attr.config;
31249de0493SThomas Gleixner 	int cpu;
3136aec1ad7SBorislav Petkov 
3146aec1ad7SBorislav Petkov 	if (event->attr.type != event->pmu->type)
3156aec1ad7SBorislav Petkov 		return -ENOENT;
3166aec1ad7SBorislav Petkov 
3176aec1ad7SBorislav Petkov 	/* unsupported modes and filters */
3182ff40250SAndrew Murray 	if (event->attr.sample_period) /* no sampling */
3196aec1ad7SBorislav Petkov 		return -EINVAL;
3206aec1ad7SBorislav Petkov 
32149de0493SThomas Gleixner 	if (event->cpu < 0)
32249de0493SThomas Gleixner 		return -EINVAL;
32349de0493SThomas Gleixner 
3246aec1ad7SBorislav Petkov 	if (event->pmu == &cstate_core_pmu) {
3256aec1ad7SBorislav Petkov 		if (cfg >= PERF_CSTATE_CORE_EVENT_MAX)
3266aec1ad7SBorislav Petkov 			return -EINVAL;
3278f2a28c5SJiri Olsa 		cfg = array_index_nospec((unsigned long)cfg, PERF_CSTATE_CORE_EVENT_MAX);
3288f2a28c5SJiri Olsa 		if (!(core_msr_mask & (1 << cfg)))
3296aec1ad7SBorislav Petkov 			return -EINVAL;
3306aec1ad7SBorislav Petkov 		event->hw.event_base = core_msr[cfg].msr;
33149de0493SThomas Gleixner 		cpu = cpumask_any_and(&cstate_core_cpu_mask,
33249de0493SThomas Gleixner 				      topology_sibling_cpumask(event->cpu));
3336aec1ad7SBorislav Petkov 	} else if (event->pmu == &cstate_pkg_pmu) {
3346aec1ad7SBorislav Petkov 		if (cfg >= PERF_CSTATE_PKG_EVENT_MAX)
3356aec1ad7SBorislav Petkov 			return -EINVAL;
336a5f81290SPeter Zijlstra 		cfg = array_index_nospec((unsigned long)cfg, PERF_CSTATE_PKG_EVENT_MAX);
3378f2a28c5SJiri Olsa 		if (!(pkg_msr_mask & (1 << cfg)))
3386aec1ad7SBorislav Petkov 			return -EINVAL;
3396aec1ad7SBorislav Petkov 		event->hw.event_base = pkg_msr[cfg].msr;
34049de0493SThomas Gleixner 		cpu = cpumask_any_and(&cstate_pkg_cpu_mask,
341cb63ba0fSKan Liang 				      topology_die_cpumask(event->cpu));
34249de0493SThomas Gleixner 	} else {
3436aec1ad7SBorislav Petkov 		return -ENOENT;
34449de0493SThomas Gleixner 	}
3456aec1ad7SBorislav Petkov 
34649de0493SThomas Gleixner 	if (cpu >= nr_cpu_ids)
34749de0493SThomas Gleixner 		return -ENODEV;
34849de0493SThomas Gleixner 
34949de0493SThomas Gleixner 	event->cpu = cpu;
3506aec1ad7SBorislav Petkov 	event->hw.config = cfg;
3516aec1ad7SBorislav Petkov 	event->hw.idx = -1;
35249de0493SThomas Gleixner 	return 0;
3536aec1ad7SBorislav Petkov }
3546aec1ad7SBorislav Petkov 
cstate_pmu_read_counter(struct perf_event * event)3556aec1ad7SBorislav Petkov static inline u64 cstate_pmu_read_counter(struct perf_event *event)
3566aec1ad7SBorislav Petkov {
3576aec1ad7SBorislav Petkov 	u64 val;
3586aec1ad7SBorislav Petkov 
3596aec1ad7SBorislav Petkov 	rdmsrl(event->hw.event_base, val);
3606aec1ad7SBorislav Petkov 	return val;
3616aec1ad7SBorislav Petkov }
3626aec1ad7SBorislav Petkov 
cstate_pmu_event_update(struct perf_event * event)3636aec1ad7SBorislav Petkov static void cstate_pmu_event_update(struct perf_event *event)
3646aec1ad7SBorislav Petkov {
3656aec1ad7SBorislav Petkov 	struct hw_perf_event *hwc = &event->hw;
3666aec1ad7SBorislav Petkov 	u64 prev_raw_count, new_raw_count;
3676aec1ad7SBorislav Petkov 
3686aec1ad7SBorislav Petkov 	prev_raw_count = local64_read(&hwc->prev_count);
3694c1c9deaSUros Bizjak 	do {
3706aec1ad7SBorislav Petkov 		new_raw_count = cstate_pmu_read_counter(event);
3714c1c9deaSUros Bizjak 	} while (!local64_try_cmpxchg(&hwc->prev_count,
3724c1c9deaSUros Bizjak 				      &prev_raw_count, new_raw_count));
3736aec1ad7SBorislav Petkov 
3746aec1ad7SBorislav Petkov 	local64_add(new_raw_count - prev_raw_count, &event->count);
3756aec1ad7SBorislav Petkov }
3766aec1ad7SBorislav Petkov 
cstate_pmu_event_start(struct perf_event * event,int mode)3776aec1ad7SBorislav Petkov static void cstate_pmu_event_start(struct perf_event *event, int mode)
3786aec1ad7SBorislav Petkov {
3796aec1ad7SBorislav Petkov 	local64_set(&event->hw.prev_count, cstate_pmu_read_counter(event));
3806aec1ad7SBorislav Petkov }
3816aec1ad7SBorislav Petkov 
cstate_pmu_event_stop(struct perf_event * event,int mode)3826aec1ad7SBorislav Petkov static void cstate_pmu_event_stop(struct perf_event *event, int mode)
3836aec1ad7SBorislav Petkov {
3846aec1ad7SBorislav Petkov 	cstate_pmu_event_update(event);
3856aec1ad7SBorislav Petkov }
3866aec1ad7SBorislav Petkov 
cstate_pmu_event_del(struct perf_event * event,int mode)3876aec1ad7SBorislav Petkov static void cstate_pmu_event_del(struct perf_event *event, int mode)
3886aec1ad7SBorislav Petkov {
3896aec1ad7SBorislav Petkov 	cstate_pmu_event_stop(event, PERF_EF_UPDATE);
3906aec1ad7SBorislav Petkov }
3916aec1ad7SBorislav Petkov 
cstate_pmu_event_add(struct perf_event * event,int mode)3926aec1ad7SBorislav Petkov static int cstate_pmu_event_add(struct perf_event *event, int mode)
3936aec1ad7SBorislav Petkov {
3946aec1ad7SBorislav Petkov 	if (mode & PERF_EF_START)
3956aec1ad7SBorislav Petkov 		cstate_pmu_event_start(event, mode);
3966aec1ad7SBorislav Petkov 
3976aec1ad7SBorislav Petkov 	return 0;
3986aec1ad7SBorislav Petkov }
3996aec1ad7SBorislav Petkov 
40049de0493SThomas Gleixner /*
40149de0493SThomas Gleixner  * Check if exiting cpu is the designated reader. If so migrate the
40249de0493SThomas Gleixner  * events when there is a valid target available
40349de0493SThomas Gleixner  */
cstate_cpu_exit(unsigned int cpu)40477c34ef1SSebastian Andrzej Siewior static int cstate_cpu_exit(unsigned int cpu)
4056aec1ad7SBorislav Petkov {
40649de0493SThomas Gleixner 	unsigned int target;
4076aec1ad7SBorislav Petkov 
40849de0493SThomas Gleixner 	if (has_cstate_core &&
40949de0493SThomas Gleixner 	    cpumask_test_and_clear_cpu(cpu, &cstate_core_cpu_mask)) {
4106aec1ad7SBorislav Petkov 
41149de0493SThomas Gleixner 		target = cpumask_any_but(topology_sibling_cpumask(cpu), cpu);
41249de0493SThomas Gleixner 		/* Migrate events if there is a valid target */
41349de0493SThomas Gleixner 		if (target < nr_cpu_ids) {
4146aec1ad7SBorislav Petkov 			cpumask_set_cpu(target, &cstate_core_cpu_mask);
4156aec1ad7SBorislav Petkov 			perf_pmu_migrate_context(&cstate_core_pmu, cpu, target);
4166aec1ad7SBorislav Petkov 		}
4176aec1ad7SBorislav Petkov 	}
41849de0493SThomas Gleixner 
41949de0493SThomas Gleixner 	if (has_cstate_pkg &&
42049de0493SThomas Gleixner 	    cpumask_test_and_clear_cpu(cpu, &cstate_pkg_cpu_mask)) {
42149de0493SThomas Gleixner 
422cb63ba0fSKan Liang 		target = cpumask_any_but(topology_die_cpumask(cpu), cpu);
42349de0493SThomas Gleixner 		/* Migrate events if there is a valid target */
42449de0493SThomas Gleixner 		if (target < nr_cpu_ids) {
4256aec1ad7SBorislav Petkov 			cpumask_set_cpu(target, &cstate_pkg_cpu_mask);
4266aec1ad7SBorislav Petkov 			perf_pmu_migrate_context(&cstate_pkg_pmu, cpu, target);
4276aec1ad7SBorislav Petkov 		}
4286aec1ad7SBorislav Petkov 	}
42977c34ef1SSebastian Andrzej Siewior 	return 0;
43049de0493SThomas Gleixner }
4316aec1ad7SBorislav Petkov 
cstate_cpu_init(unsigned int cpu)43277c34ef1SSebastian Andrzej Siewior static int cstate_cpu_init(unsigned int cpu)
4336aec1ad7SBorislav Petkov {
43449de0493SThomas Gleixner 	unsigned int target;
4356aec1ad7SBorislav Petkov 
43649de0493SThomas Gleixner 	/*
43749de0493SThomas Gleixner 	 * If this is the first online thread of that core, set it in
43849de0493SThomas Gleixner 	 * the core cpu mask as the designated reader.
43949de0493SThomas Gleixner 	 */
44049de0493SThomas Gleixner 	target = cpumask_any_and(&cstate_core_cpu_mask,
44149de0493SThomas Gleixner 				 topology_sibling_cpumask(cpu));
44249de0493SThomas Gleixner 
44349de0493SThomas Gleixner 	if (has_cstate_core && target >= nr_cpu_ids)
4446aec1ad7SBorislav Petkov 		cpumask_set_cpu(cpu, &cstate_core_cpu_mask);
4456aec1ad7SBorislav Petkov 
44649de0493SThomas Gleixner 	/*
44749de0493SThomas Gleixner 	 * If this is the first online thread of that package, set it
44849de0493SThomas Gleixner 	 * in the package cpu mask as the designated reader.
44949de0493SThomas Gleixner 	 */
45049de0493SThomas Gleixner 	target = cpumask_any_and(&cstate_pkg_cpu_mask,
451cb63ba0fSKan Liang 				 topology_die_cpumask(cpu));
45249de0493SThomas Gleixner 	if (has_cstate_pkg && target >= nr_cpu_ids)
4536aec1ad7SBorislav Petkov 		cpumask_set_cpu(cpu, &cstate_pkg_cpu_mask);
4546aec1ad7SBorislav Petkov 
45577c34ef1SSebastian Andrzej Siewior 	return 0;
4566aec1ad7SBorislav Petkov }
457c7afba32SThomas Gleixner 
458d9f3b450SValdis Klētnieks static const struct attribute_group *core_attr_update[] = {
4598f2a28c5SJiri Olsa 	&group_cstate_core_c1,
4608f2a28c5SJiri Olsa 	&group_cstate_core_c3,
4618f2a28c5SJiri Olsa 	&group_cstate_core_c6,
4628f2a28c5SJiri Olsa 	&group_cstate_core_c7,
4638f2a28c5SJiri Olsa 	NULL,
4648f2a28c5SJiri Olsa };
4658f2a28c5SJiri Olsa 
466d9f3b450SValdis Klētnieks static const struct attribute_group *pkg_attr_update[] = {
4678f2a28c5SJiri Olsa 	&group_cstate_pkg_c2,
4688f2a28c5SJiri Olsa 	&group_cstate_pkg_c3,
4698f2a28c5SJiri Olsa 	&group_cstate_pkg_c6,
4708f2a28c5SJiri Olsa 	&group_cstate_pkg_c7,
4718f2a28c5SJiri Olsa 	&group_cstate_pkg_c8,
4728f2a28c5SJiri Olsa 	&group_cstate_pkg_c9,
4738f2a28c5SJiri Olsa 	&group_cstate_pkg_c10,
4748f2a28c5SJiri Olsa 	NULL,
4758f2a28c5SJiri Olsa };
4768f2a28c5SJiri Olsa 
477424646eeSThomas Gleixner static struct pmu cstate_core_pmu = {
478424646eeSThomas Gleixner 	.attr_groups	= core_attr_groups,
4798f2a28c5SJiri Olsa 	.attr_update	= core_attr_update,
480424646eeSThomas Gleixner 	.name		= "cstate_core",
481424646eeSThomas Gleixner 	.task_ctx_nr	= perf_invalid_context,
482424646eeSThomas Gleixner 	.event_init	= cstate_pmu_event_init,
483424646eeSThomas Gleixner 	.add		= cstate_pmu_event_add,
484424646eeSThomas Gleixner 	.del		= cstate_pmu_event_del,
485424646eeSThomas Gleixner 	.start		= cstate_pmu_event_start,
486424646eeSThomas Gleixner 	.stop		= cstate_pmu_event_stop,
487424646eeSThomas Gleixner 	.read		= cstate_pmu_event_update,
4882ff40250SAndrew Murray 	.capabilities	= PERF_PMU_CAP_NO_INTERRUPT | PERF_PMU_CAP_NO_EXCLUDE,
48974545f63SDavid Carrillo-Cisneros 	.module		= THIS_MODULE,
490424646eeSThomas Gleixner };
491424646eeSThomas Gleixner 
492424646eeSThomas Gleixner static struct pmu cstate_pkg_pmu = {
493424646eeSThomas Gleixner 	.attr_groups	= pkg_attr_groups,
4948f2a28c5SJiri Olsa 	.attr_update	= pkg_attr_update,
495424646eeSThomas Gleixner 	.name		= "cstate_pkg",
496424646eeSThomas Gleixner 	.task_ctx_nr	= perf_invalid_context,
497424646eeSThomas Gleixner 	.event_init	= cstate_pmu_event_init,
498424646eeSThomas Gleixner 	.add		= cstate_pmu_event_add,
499424646eeSThomas Gleixner 	.del		= cstate_pmu_event_del,
500424646eeSThomas Gleixner 	.start		= cstate_pmu_event_start,
501424646eeSThomas Gleixner 	.stop		= cstate_pmu_event_stop,
502424646eeSThomas Gleixner 	.read		= cstate_pmu_event_update,
5032ff40250SAndrew Murray 	.capabilities	= PERF_PMU_CAP_NO_INTERRUPT | PERF_PMU_CAP_NO_EXCLUDE,
50474545f63SDavid Carrillo-Cisneros 	.module		= THIS_MODULE,
505424646eeSThomas Gleixner };
506424646eeSThomas Gleixner 
507424646eeSThomas Gleixner static const struct cstate_model nhm_cstates __initconst = {
508424646eeSThomas Gleixner 	.core_events		= BIT(PERF_CSTATE_CORE_C3_RES) |
509424646eeSThomas Gleixner 				  BIT(PERF_CSTATE_CORE_C6_RES),
510424646eeSThomas Gleixner 
511424646eeSThomas Gleixner 	.pkg_events		= BIT(PERF_CSTATE_PKG_C3_RES) |
512424646eeSThomas Gleixner 				  BIT(PERF_CSTATE_PKG_C6_RES) |
513424646eeSThomas Gleixner 				  BIT(PERF_CSTATE_PKG_C7_RES),
514424646eeSThomas Gleixner };
515424646eeSThomas Gleixner 
516424646eeSThomas Gleixner static const struct cstate_model snb_cstates __initconst = {
517424646eeSThomas Gleixner 	.core_events		= BIT(PERF_CSTATE_CORE_C3_RES) |
518424646eeSThomas Gleixner 				  BIT(PERF_CSTATE_CORE_C6_RES) |
519424646eeSThomas Gleixner 				  BIT(PERF_CSTATE_CORE_C7_RES),
520424646eeSThomas Gleixner 
521424646eeSThomas Gleixner 	.pkg_events		= BIT(PERF_CSTATE_PKG_C2_RES) |
522424646eeSThomas Gleixner 				  BIT(PERF_CSTATE_PKG_C3_RES) |
523424646eeSThomas Gleixner 				  BIT(PERF_CSTATE_PKG_C6_RES) |
524424646eeSThomas Gleixner 				  BIT(PERF_CSTATE_PKG_C7_RES),
525424646eeSThomas Gleixner };
526424646eeSThomas Gleixner 
527424646eeSThomas Gleixner static const struct cstate_model hswult_cstates __initconst = {
528424646eeSThomas Gleixner 	.core_events		= BIT(PERF_CSTATE_CORE_C3_RES) |
529424646eeSThomas Gleixner 				  BIT(PERF_CSTATE_CORE_C6_RES) |
530424646eeSThomas Gleixner 				  BIT(PERF_CSTATE_CORE_C7_RES),
531424646eeSThomas Gleixner 
532424646eeSThomas Gleixner 	.pkg_events		= BIT(PERF_CSTATE_PKG_C2_RES) |
533424646eeSThomas Gleixner 				  BIT(PERF_CSTATE_PKG_C3_RES) |
534424646eeSThomas Gleixner 				  BIT(PERF_CSTATE_PKG_C6_RES) |
535424646eeSThomas Gleixner 				  BIT(PERF_CSTATE_PKG_C7_RES) |
536424646eeSThomas Gleixner 				  BIT(PERF_CSTATE_PKG_C8_RES) |
537424646eeSThomas Gleixner 				  BIT(PERF_CSTATE_PKG_C9_RES) |
538424646eeSThomas Gleixner 				  BIT(PERF_CSTATE_PKG_C10_RES),
539424646eeSThomas Gleixner };
540424646eeSThomas Gleixner 
5411159e094SHarry Pan static const struct cstate_model cnl_cstates __initconst = {
5421159e094SHarry Pan 	.core_events		= BIT(PERF_CSTATE_CORE_C1_RES) |
5431159e094SHarry Pan 				  BIT(PERF_CSTATE_CORE_C3_RES) |
5441159e094SHarry Pan 				  BIT(PERF_CSTATE_CORE_C6_RES) |
5451159e094SHarry Pan 				  BIT(PERF_CSTATE_CORE_C7_RES),
5461159e094SHarry Pan 
5471159e094SHarry Pan 	.pkg_events		= BIT(PERF_CSTATE_PKG_C2_RES) |
5481159e094SHarry Pan 				  BIT(PERF_CSTATE_PKG_C3_RES) |
5491159e094SHarry Pan 				  BIT(PERF_CSTATE_PKG_C6_RES) |
5501159e094SHarry Pan 				  BIT(PERF_CSTATE_PKG_C7_RES) |
5511159e094SHarry Pan 				  BIT(PERF_CSTATE_PKG_C8_RES) |
5521159e094SHarry Pan 				  BIT(PERF_CSTATE_PKG_C9_RES) |
5531159e094SHarry Pan 				  BIT(PERF_CSTATE_PKG_C10_RES),
5541159e094SHarry Pan };
5551159e094SHarry Pan 
556f1857a24SKan Liang static const struct cstate_model icl_cstates __initconst = {
557f1857a24SKan Liang 	.core_events		= BIT(PERF_CSTATE_CORE_C6_RES) |
558f1857a24SKan Liang 				  BIT(PERF_CSTATE_CORE_C7_RES),
559f1857a24SKan Liang 
560f1857a24SKan Liang 	.pkg_events		= BIT(PERF_CSTATE_PKG_C2_RES) |
561f1857a24SKan Liang 				  BIT(PERF_CSTATE_PKG_C3_RES) |
562f1857a24SKan Liang 				  BIT(PERF_CSTATE_PKG_C6_RES) |
563f1857a24SKan Liang 				  BIT(PERF_CSTATE_PKG_C7_RES) |
564f1857a24SKan Liang 				  BIT(PERF_CSTATE_PKG_C8_RES) |
565f1857a24SKan Liang 				  BIT(PERF_CSTATE_PKG_C9_RES) |
566f1857a24SKan Liang 				  BIT(PERF_CSTATE_PKG_C10_RES),
567f1857a24SKan Liang };
568f1857a24SKan Liang 
56987bf399fSZhang Rui static const struct cstate_model icx_cstates __initconst = {
57087bf399fSZhang Rui 	.core_events		= BIT(PERF_CSTATE_CORE_C1_RES) |
57187bf399fSZhang Rui 				  BIT(PERF_CSTATE_CORE_C6_RES),
57287bf399fSZhang Rui 
57387bf399fSZhang Rui 	.pkg_events		= BIT(PERF_CSTATE_PKG_C2_RES) |
57487bf399fSZhang Rui 				  BIT(PERF_CSTATE_PKG_C6_RES),
57587bf399fSZhang Rui };
57687bf399fSZhang Rui 
577d0ca946bSKan Liang static const struct cstate_model adl_cstates __initconst = {
578d0ca946bSKan Liang 	.core_events		= BIT(PERF_CSTATE_CORE_C1_RES) |
579d0ca946bSKan Liang 				  BIT(PERF_CSTATE_CORE_C6_RES) |
580d0ca946bSKan Liang 				  BIT(PERF_CSTATE_CORE_C7_RES),
581d0ca946bSKan Liang 
582d0ca946bSKan Liang 	.pkg_events		= BIT(PERF_CSTATE_PKG_C2_RES) |
583d0ca946bSKan Liang 				  BIT(PERF_CSTATE_PKG_C3_RES) |
584d0ca946bSKan Liang 				  BIT(PERF_CSTATE_PKG_C6_RES) |
585d0ca946bSKan Liang 				  BIT(PERF_CSTATE_PKG_C7_RES) |
586d0ca946bSKan Liang 				  BIT(PERF_CSTATE_PKG_C8_RES) |
587d0ca946bSKan Liang 				  BIT(PERF_CSTATE_PKG_C9_RES) |
588d0ca946bSKan Liang 				  BIT(PERF_CSTATE_PKG_C10_RES),
589d0ca946bSKan Liang };
590d0ca946bSKan Liang 
591424646eeSThomas Gleixner static const struct cstate_model slm_cstates __initconst = {
592424646eeSThomas Gleixner 	.core_events		= BIT(PERF_CSTATE_CORE_C1_RES) |
593424646eeSThomas Gleixner 				  BIT(PERF_CSTATE_CORE_C6_RES),
594424646eeSThomas Gleixner 
595424646eeSThomas Gleixner 	.pkg_events		= BIT(PERF_CSTATE_PKG_C6_RES),
596424646eeSThomas Gleixner 	.quirks			= SLM_PKG_C6_USE_C7_MSR,
597424646eeSThomas Gleixner };
598424646eeSThomas Gleixner 
599889882bcSLukasz Odzioba 
600889882bcSLukasz Odzioba static const struct cstate_model knl_cstates __initconst = {
601889882bcSLukasz Odzioba 	.core_events		= BIT(PERF_CSTATE_CORE_C6_RES),
602889882bcSLukasz Odzioba 
603889882bcSLukasz Odzioba 	.pkg_events		= BIT(PERF_CSTATE_PKG_C2_RES) |
604889882bcSLukasz Odzioba 				  BIT(PERF_CSTATE_PKG_C3_RES) |
605889882bcSLukasz Odzioba 				  BIT(PERF_CSTATE_PKG_C6_RES),
606889882bcSLukasz Odzioba 	.quirks			= KNL_CORE_C6_MSR,
607889882bcSLukasz Odzioba };
608889882bcSLukasz Odzioba 
609889882bcSLukasz Odzioba 
6105c10b048SHarry Pan static const struct cstate_model glm_cstates __initconst = {
6115c10b048SHarry Pan 	.core_events		= BIT(PERF_CSTATE_CORE_C1_RES) |
6125c10b048SHarry Pan 				  BIT(PERF_CSTATE_CORE_C3_RES) |
6135c10b048SHarry Pan 				  BIT(PERF_CSTATE_CORE_C6_RES),
6145c10b048SHarry Pan 
6155c10b048SHarry Pan 	.pkg_events		= BIT(PERF_CSTATE_PKG_C2_RES) |
6165c10b048SHarry Pan 				  BIT(PERF_CSTATE_PKG_C3_RES) |
6175c10b048SHarry Pan 				  BIT(PERF_CSTATE_PKG_C6_RES) |
6185c10b048SHarry Pan 				  BIT(PERF_CSTATE_PKG_C10_RES),
6195c10b048SHarry Pan };
6205c10b048SHarry Pan 
621889882bcSLukasz Odzioba 
622424646eeSThomas Gleixner static const struct x86_cpu_id intel_cstates_match[] __initconst = {
623ef37219aSThomas Gleixner 	X86_MATCH_INTEL_FAM6_MODEL(NEHALEM,		&nhm_cstates),
624ef37219aSThomas Gleixner 	X86_MATCH_INTEL_FAM6_MODEL(NEHALEM_EP,		&nhm_cstates),
625ef37219aSThomas Gleixner 	X86_MATCH_INTEL_FAM6_MODEL(NEHALEM_EX,		&nhm_cstates),
626424646eeSThomas Gleixner 
627ef37219aSThomas Gleixner 	X86_MATCH_INTEL_FAM6_MODEL(WESTMERE,		&nhm_cstates),
628ef37219aSThomas Gleixner 	X86_MATCH_INTEL_FAM6_MODEL(WESTMERE_EP,		&nhm_cstates),
629ef37219aSThomas Gleixner 	X86_MATCH_INTEL_FAM6_MODEL(WESTMERE_EX,		&nhm_cstates),
630424646eeSThomas Gleixner 
631ef37219aSThomas Gleixner 	X86_MATCH_INTEL_FAM6_MODEL(SANDYBRIDGE,		&snb_cstates),
632ef37219aSThomas Gleixner 	X86_MATCH_INTEL_FAM6_MODEL(SANDYBRIDGE_X,	&snb_cstates),
633424646eeSThomas Gleixner 
634ef37219aSThomas Gleixner 	X86_MATCH_INTEL_FAM6_MODEL(IVYBRIDGE,		&snb_cstates),
635ef37219aSThomas Gleixner 	X86_MATCH_INTEL_FAM6_MODEL(IVYBRIDGE_X,		&snb_cstates),
636424646eeSThomas Gleixner 
637ef37219aSThomas Gleixner 	X86_MATCH_INTEL_FAM6_MODEL(HASWELL,		&snb_cstates),
638ef37219aSThomas Gleixner 	X86_MATCH_INTEL_FAM6_MODEL(HASWELL_X,		&snb_cstates),
639ef37219aSThomas Gleixner 	X86_MATCH_INTEL_FAM6_MODEL(HASWELL_G,		&snb_cstates),
640424646eeSThomas Gleixner 
641ef37219aSThomas Gleixner 	X86_MATCH_INTEL_FAM6_MODEL(HASWELL_L,		&hswult_cstates),
642424646eeSThomas Gleixner 
643ef37219aSThomas Gleixner 	X86_MATCH_INTEL_FAM6_MODEL(ATOM_SILVERMONT,	&slm_cstates),
644ef37219aSThomas Gleixner 	X86_MATCH_INTEL_FAM6_MODEL(ATOM_SILVERMONT_D,	&slm_cstates),
645ef37219aSThomas Gleixner 	X86_MATCH_INTEL_FAM6_MODEL(ATOM_AIRMONT,	&slm_cstates),
646424646eeSThomas Gleixner 
647ef37219aSThomas Gleixner 	X86_MATCH_INTEL_FAM6_MODEL(BROADWELL,		&snb_cstates),
648ef37219aSThomas Gleixner 	X86_MATCH_INTEL_FAM6_MODEL(BROADWELL_D,		&snb_cstates),
649ef37219aSThomas Gleixner 	X86_MATCH_INTEL_FAM6_MODEL(BROADWELL_G,		&snb_cstates),
650ef37219aSThomas Gleixner 	X86_MATCH_INTEL_FAM6_MODEL(BROADWELL_X,		&snb_cstates),
651424646eeSThomas Gleixner 
652ef37219aSThomas Gleixner 	X86_MATCH_INTEL_FAM6_MODEL(SKYLAKE_L,		&snb_cstates),
653ef37219aSThomas Gleixner 	X86_MATCH_INTEL_FAM6_MODEL(SKYLAKE,		&snb_cstates),
654ef37219aSThomas Gleixner 	X86_MATCH_INTEL_FAM6_MODEL(SKYLAKE_X,		&snb_cstates),
655889882bcSLukasz Odzioba 
656ef37219aSThomas Gleixner 	X86_MATCH_INTEL_FAM6_MODEL(KABYLAKE_L,		&hswult_cstates),
657ef37219aSThomas Gleixner 	X86_MATCH_INTEL_FAM6_MODEL(KABYLAKE,		&hswult_cstates),
658ef37219aSThomas Gleixner 	X86_MATCH_INTEL_FAM6_MODEL(COMETLAKE_L,		&hswult_cstates),
659ef37219aSThomas Gleixner 	X86_MATCH_INTEL_FAM6_MODEL(COMETLAKE,		&hswult_cstates),
660f2029b1eSSrinivas Pandruvada 
661ef37219aSThomas Gleixner 	X86_MATCH_INTEL_FAM6_MODEL(CANNONLAKE_L,	&cnl_cstates),
6621159e094SHarry Pan 
663ef37219aSThomas Gleixner 	X86_MATCH_INTEL_FAM6_MODEL(XEON_PHI_KNL,	&knl_cstates),
664ef37219aSThomas Gleixner 	X86_MATCH_INTEL_FAM6_MODEL(XEON_PHI_KNM,	&knl_cstates),
6655c10b048SHarry Pan 
666ef37219aSThomas Gleixner 	X86_MATCH_INTEL_FAM6_MODEL(ATOM_GOLDMONT,	&glm_cstates),
667ef37219aSThomas Gleixner 	X86_MATCH_INTEL_FAM6_MODEL(ATOM_GOLDMONT_D,	&glm_cstates),
668ef37219aSThomas Gleixner 	X86_MATCH_INTEL_FAM6_MODEL(ATOM_GOLDMONT_PLUS,	&glm_cstates),
669ef37219aSThomas Gleixner 	X86_MATCH_INTEL_FAM6_MODEL(ATOM_TREMONT_D,	&glm_cstates),
670ef37219aSThomas Gleixner 	X86_MATCH_INTEL_FAM6_MODEL(ATOM_TREMONT,	&glm_cstates),
6715b16ef2eSHarry Pan 	X86_MATCH_INTEL_FAM6_MODEL(ATOM_TREMONT_L,	&glm_cstates),
672*882cdb06SPeter Zijlstra 	X86_MATCH_INTEL_FAM6_MODEL(ATOM_GRACEMONT,	&adl_cstates),
673f08c47d1SKan Liang 
674ef37219aSThomas Gleixner 	X86_MATCH_INTEL_FAM6_MODEL(ICELAKE_L,		&icl_cstates),
675ef37219aSThomas Gleixner 	X86_MATCH_INTEL_FAM6_MODEL(ICELAKE,		&icl_cstates),
67687bf399fSZhang Rui 	X86_MATCH_INTEL_FAM6_MODEL(ICELAKE_X,		&icx_cstates),
67787bf399fSZhang Rui 	X86_MATCH_INTEL_FAM6_MODEL(ICELAKE_D,		&icx_cstates),
678528c9f1dSZhang Rui 	X86_MATCH_INTEL_FAM6_MODEL(SAPPHIRERAPIDS_X,	&icx_cstates),
6795a8a05f1SKan Liang 	X86_MATCH_INTEL_FAM6_MODEL(EMERALDRAPIDS_X,	&icx_cstates),
680872d2800SArtem Bityutskiy 	X86_MATCH_INTEL_FAM6_MODEL(GRANITERAPIDS_X,	&icx_cstates),
681872d2800SArtem Bityutskiy 	X86_MATCH_INTEL_FAM6_MODEL(GRANITERAPIDS_D,	&icx_cstates),
68287bf399fSZhang Rui 
683ef37219aSThomas Gleixner 	X86_MATCH_INTEL_FAM6_MODEL(TIGERLAKE_L,		&icl_cstates),
684ef37219aSThomas Gleixner 	X86_MATCH_INTEL_FAM6_MODEL(TIGERLAKE,		&icl_cstates),
685cbea5639SKan Liang 	X86_MATCH_INTEL_FAM6_MODEL(ROCKETLAKE,		&icl_cstates),
686d0ca946bSKan Liang 	X86_MATCH_INTEL_FAM6_MODEL(ALDERLAKE,		&adl_cstates),
687d0ca946bSKan Liang 	X86_MATCH_INTEL_FAM6_MODEL(ALDERLAKE_L,		&adl_cstates),
6882da202aaSKan Liang 	X86_MATCH_INTEL_FAM6_MODEL(RAPTORLAKE,		&adl_cstates),
689cd971104SKan Liang 	X86_MATCH_INTEL_FAM6_MODEL(RAPTORLAKE_P,	&adl_cstates),
690d12940d2SKan Liang 	X86_MATCH_INTEL_FAM6_MODEL(RAPTORLAKE_S,	&adl_cstates),
69101f2ea5bSKan Liang 	X86_MATCH_INTEL_FAM6_MODEL(METEORLAKE,		&adl_cstates),
69201f2ea5bSKan Liang 	X86_MATCH_INTEL_FAM6_MODEL(METEORLAKE_L,	&adl_cstates),
693424646eeSThomas Gleixner 	{ },
694424646eeSThomas Gleixner };
695424646eeSThomas Gleixner MODULE_DEVICE_TABLE(x86cpu, intel_cstates_match);
696424646eeSThomas Gleixner 
cstate_probe(const struct cstate_model * cm)697424646eeSThomas Gleixner static int __init cstate_probe(const struct cstate_model *cm)
6986aec1ad7SBorislav Petkov {
6996aec1ad7SBorislav Petkov 	/* SLM has different MSR for PKG C6 */
700424646eeSThomas Gleixner 	if (cm->quirks & SLM_PKG_C6_USE_C7_MSR)
7016aec1ad7SBorislav Petkov 		pkg_msr[PERF_CSTATE_PKG_C6_RES].msr = MSR_PKG_C7_RESIDENCY;
7026aec1ad7SBorislav Petkov 
703889882bcSLukasz Odzioba 	/* KNL has different MSR for CORE C6 */
704889882bcSLukasz Odzioba 	if (cm->quirks & KNL_CORE_C6_MSR)
705889882bcSLukasz Odzioba 		pkg_msr[PERF_CSTATE_CORE_C6_RES].msr = MSR_KNL_CORE_C6_RESIDENCY;
706889882bcSLukasz Odzioba 
707889882bcSLukasz Odzioba 
7088f2a28c5SJiri Olsa 	core_msr_mask = perf_msr_probe(core_msr, PERF_CSTATE_CORE_EVENT_MAX,
7098f2a28c5SJiri Olsa 				       true, (void *) &cm->core_events);
7106aec1ad7SBorislav Petkov 
7118f2a28c5SJiri Olsa 	pkg_msr_mask = perf_msr_probe(pkg_msr, PERF_CSTATE_PKG_EVENT_MAX,
7128f2a28c5SJiri Olsa 				      true, (void *) &cm->pkg_events);
7138f2a28c5SJiri Olsa 
7148f2a28c5SJiri Olsa 	has_cstate_core = !!core_msr_mask;
7158f2a28c5SJiri Olsa 	has_cstate_pkg  = !!pkg_msr_mask;
7166aec1ad7SBorislav Petkov 
7176aec1ad7SBorislav Petkov 	return (has_cstate_core || has_cstate_pkg) ? 0 : -ENODEV;
7186aec1ad7SBorislav Petkov }
7196aec1ad7SBorislav Petkov 
cstate_cleanup(void)720c7afba32SThomas Gleixner static inline void cstate_cleanup(void)
7216aec1ad7SBorislav Petkov {
722834fcd29SThomas Gleixner 	cpuhp_remove_state_nocalls(CPUHP_AP_PERF_X86_CSTATE_ONLINE);
723834fcd29SThomas Gleixner 	cpuhp_remove_state_nocalls(CPUHP_AP_PERF_X86_CSTATE_STARTING);
724834fcd29SThomas Gleixner 
725d29859e7SThomas Gleixner 	if (has_cstate_core)
726d29859e7SThomas Gleixner 		perf_pmu_unregister(&cstate_core_pmu);
727d29859e7SThomas Gleixner 
728d29859e7SThomas Gleixner 	if (has_cstate_pkg)
729d29859e7SThomas Gleixner 		perf_pmu_unregister(&cstate_pkg_pmu);
730d29859e7SThomas Gleixner }
731d29859e7SThomas Gleixner 
cstate_init(void)732d29859e7SThomas Gleixner static int __init cstate_init(void)
733d29859e7SThomas Gleixner {
73477c34ef1SSebastian Andrzej Siewior 	int err;
7356aec1ad7SBorislav Petkov 
73677c34ef1SSebastian Andrzej Siewior 	cpuhp_setup_state(CPUHP_AP_PERF_X86_CSTATE_STARTING,
737834fcd29SThomas Gleixner 			  "perf/x86/cstate:starting", cstate_cpu_init, NULL);
73877c34ef1SSebastian Andrzej Siewior 	cpuhp_setup_state(CPUHP_AP_PERF_X86_CSTATE_ONLINE,
739834fcd29SThomas Gleixner 			  "perf/x86/cstate:online", NULL, cstate_cpu_exit);
7406aec1ad7SBorislav Petkov 
7416aec1ad7SBorislav Petkov 	if (has_cstate_core) {
7426aec1ad7SBorislav Petkov 		err = perf_pmu_register(&cstate_core_pmu, cstate_core_pmu.name, -1);
743d29859e7SThomas Gleixner 		if (err) {
744d29859e7SThomas Gleixner 			has_cstate_core = false;
745d29859e7SThomas Gleixner 			pr_info("Failed to register cstate core pmu\n");
746834fcd29SThomas Gleixner 			cstate_cleanup();
74777c34ef1SSebastian Andrzej Siewior 			return err;
748d29859e7SThomas Gleixner 		}
7496aec1ad7SBorislav Petkov 	}
7506aec1ad7SBorislav Petkov 
7516aec1ad7SBorislav Petkov 	if (has_cstate_pkg) {
752cb63ba0fSKan Liang 		if (topology_max_die_per_package() > 1) {
753cb63ba0fSKan Liang 			err = perf_pmu_register(&cstate_pkg_pmu,
754cb63ba0fSKan Liang 						"cstate_die", -1);
755cb63ba0fSKan Liang 		} else {
756cb63ba0fSKan Liang 			err = perf_pmu_register(&cstate_pkg_pmu,
757cb63ba0fSKan Liang 						cstate_pkg_pmu.name, -1);
758cb63ba0fSKan Liang 		}
759d29859e7SThomas Gleixner 		if (err) {
760d29859e7SThomas Gleixner 			has_cstate_pkg = false;
761d29859e7SThomas Gleixner 			pr_info("Failed to register cstate pkg pmu\n");
762d29859e7SThomas Gleixner 			cstate_cleanup();
76377c34ef1SSebastian Andrzej Siewior 			return err;
7646aec1ad7SBorislav Petkov 		}
7656aec1ad7SBorislav Petkov 	}
766834fcd29SThomas Gleixner 	return 0;
767d29859e7SThomas Gleixner }
7686aec1ad7SBorislav Petkov 
cstate_pmu_init(void)7696aec1ad7SBorislav Petkov static int __init cstate_pmu_init(void)
7706aec1ad7SBorislav Petkov {
771424646eeSThomas Gleixner 	const struct x86_cpu_id *id;
7726aec1ad7SBorislav Petkov 	int err;
7736aec1ad7SBorislav Petkov 
774424646eeSThomas Gleixner 	if (boot_cpu_has(X86_FEATURE_HYPERVISOR))
7756aec1ad7SBorislav Petkov 		return -ENODEV;
7766aec1ad7SBorislav Petkov 
777424646eeSThomas Gleixner 	id = x86_match_cpu(intel_cstates_match);
778424646eeSThomas Gleixner 	if (!id)
779424646eeSThomas Gleixner 		return -ENODEV;
780424646eeSThomas Gleixner 
781424646eeSThomas Gleixner 	err = cstate_probe((const struct cstate_model *) id->driver_data);
7826aec1ad7SBorislav Petkov 	if (err)
7836aec1ad7SBorislav Petkov 		return err;
7846aec1ad7SBorislav Petkov 
785d29859e7SThomas Gleixner 	return cstate_init();
7866aec1ad7SBorislav Petkov }
787c7afba32SThomas Gleixner module_init(cstate_pmu_init);
788c7afba32SThomas Gleixner 
cstate_pmu_exit(void)789c7afba32SThomas Gleixner static void __exit cstate_pmu_exit(void)
790c7afba32SThomas Gleixner {
791c7afba32SThomas Gleixner 	cstate_cleanup();
792c7afba32SThomas Gleixner }
793c7afba32SThomas Gleixner module_exit(cstate_pmu_exit);
794