xref: /openbmc/linux/drivers/perf/cxl_pmu.c (revision 75bb19ed)
15d7107c7SJonathan Cameron // SPDX-License-Identifier: GPL-2.0-only
25d7107c7SJonathan Cameron 
35d7107c7SJonathan Cameron /*
45d7107c7SJonathan Cameron  * Copyright(c) 2023 Huawei
55d7107c7SJonathan Cameron  *
65d7107c7SJonathan Cameron  * The CXL 3.0 specification includes a standard Performance Monitoring Unit,
75d7107c7SJonathan Cameron  * called the CXL PMU, or CPMU. In order to allow a high degree of
85d7107c7SJonathan Cameron  * implementation flexibility the specification provides a wide range of
95d7107c7SJonathan Cameron  * options all of which are self describing.
105d7107c7SJonathan Cameron  *
115d7107c7SJonathan Cameron  * Details in CXL rev 3.0 section 8.2.7 CPMU Register Interface
125d7107c7SJonathan Cameron  */
135d7107c7SJonathan Cameron 
145d7107c7SJonathan Cameron #include <linux/io-64-nonatomic-lo-hi.h>
155d7107c7SJonathan Cameron #include <linux/perf_event.h>
165d7107c7SJonathan Cameron #include <linux/bitops.h>
175d7107c7SJonathan Cameron #include <linux/device.h>
185d7107c7SJonathan Cameron #include <linux/bits.h>
195d7107c7SJonathan Cameron #include <linux/list.h>
205d7107c7SJonathan Cameron #include <linux/bug.h>
215d7107c7SJonathan Cameron #include <linux/pci.h>
225d7107c7SJonathan Cameron 
235d7107c7SJonathan Cameron #include "../cxl/cxlpci.h"
245d7107c7SJonathan Cameron #include "../cxl/cxl.h"
255d7107c7SJonathan Cameron #include "../cxl/pmu.h"
265d7107c7SJonathan Cameron 
275d7107c7SJonathan Cameron #define CXL_PMU_CAP_REG			0x0
287625df9fSJeongtae Park #define   CXL_PMU_CAP_NUM_COUNTERS_MSK			GENMASK_ULL(5, 0)
295d7107c7SJonathan Cameron #define   CXL_PMU_CAP_COUNTER_WIDTH_MSK			GENMASK_ULL(15, 8)
305d7107c7SJonathan Cameron #define   CXL_PMU_CAP_NUM_EVN_CAP_REG_SUP_MSK		GENMASK_ULL(24, 20)
315d7107c7SJonathan Cameron #define   CXL_PMU_CAP_FILTERS_SUP_MSK			GENMASK_ULL(39, 32)
325d7107c7SJonathan Cameron #define     CXL_PMU_FILTER_HDM				BIT(0)
335d7107c7SJonathan Cameron #define     CXL_PMU_FILTER_CHAN_RANK_BANK		BIT(1)
345d7107c7SJonathan Cameron #define   CXL_PMU_CAP_MSI_N_MSK				GENMASK_ULL(47, 44)
355d7107c7SJonathan Cameron #define   CXL_PMU_CAP_WRITEABLE_WHEN_FROZEN		BIT_ULL(48)
365d7107c7SJonathan Cameron #define   CXL_PMU_CAP_FREEZE				BIT_ULL(49)
375d7107c7SJonathan Cameron #define   CXL_PMU_CAP_INT				BIT_ULL(50)
385d7107c7SJonathan Cameron #define   CXL_PMU_CAP_VERSION_MSK			GENMASK_ULL(63, 60)
395d7107c7SJonathan Cameron 
405d7107c7SJonathan Cameron #define CXL_PMU_OVERFLOW_REG		0x10
415d7107c7SJonathan Cameron #define CXL_PMU_FREEZE_REG		0x18
425d7107c7SJonathan Cameron #define CXL_PMU_EVENT_CAP_REG(n)	(0x100 + 8 * (n))
435d7107c7SJonathan Cameron #define   CXL_PMU_EVENT_CAP_SUPPORTED_EVENTS_MSK	GENMASK_ULL(31, 0)
445d7107c7SJonathan Cameron #define   CXL_PMU_EVENT_CAP_GROUP_ID_MSK		GENMASK_ULL(47, 32)
455d7107c7SJonathan Cameron #define   CXL_PMU_EVENT_CAP_VENDOR_ID_MSK		GENMASK_ULL(63, 48)
465d7107c7SJonathan Cameron 
475d7107c7SJonathan Cameron #define CXL_PMU_COUNTER_CFG_REG(n)	(0x200 + 8 * (n))
485d7107c7SJonathan Cameron #define   CXL_PMU_COUNTER_CFG_TYPE_MSK			GENMASK_ULL(1, 0)
495d7107c7SJonathan Cameron #define     CXL_PMU_COUNTER_CFG_TYPE_FREE_RUN		0
505d7107c7SJonathan Cameron #define     CXL_PMU_COUNTER_CFG_TYPE_FIXED_FUN		1
515d7107c7SJonathan Cameron #define     CXL_PMU_COUNTER_CFG_TYPE_CONFIGURABLE	2
525d7107c7SJonathan Cameron #define   CXL_PMU_COUNTER_CFG_ENABLE			BIT_ULL(8)
535d7107c7SJonathan Cameron #define   CXL_PMU_COUNTER_CFG_INT_ON_OVRFLW		BIT_ULL(9)
545d7107c7SJonathan Cameron #define   CXL_PMU_COUNTER_CFG_FREEZE_ON_OVRFLW		BIT_ULL(10)
555d7107c7SJonathan Cameron #define   CXL_PMU_COUNTER_CFG_EDGE			BIT_ULL(11)
565d7107c7SJonathan Cameron #define   CXL_PMU_COUNTER_CFG_INVERT			BIT_ULL(12)
575d7107c7SJonathan Cameron #define   CXL_PMU_COUNTER_CFG_THRESHOLD_MSK		GENMASK_ULL(23, 16)
585d7107c7SJonathan Cameron #define   CXL_PMU_COUNTER_CFG_EVENTS_MSK		GENMASK_ULL(55, 24)
595d7107c7SJonathan Cameron #define   CXL_PMU_COUNTER_CFG_EVENT_GRP_ID_IDX_MSK	GENMASK_ULL(63, 59)
605d7107c7SJonathan Cameron 
615d7107c7SJonathan Cameron #define CXL_PMU_FILTER_CFG_REG(n, f)	(0x400 + 4 * ((f) + (n) * 8))
62*75bb19edSHojin Nam #define   CXL_PMU_FILTER_CFG_VALUE_MSK			GENMASK(31, 0)
635d7107c7SJonathan Cameron 
645d7107c7SJonathan Cameron #define CXL_PMU_COUNTER_REG(n)		(0xc00 + 8 * (n))
655d7107c7SJonathan Cameron 
665d7107c7SJonathan Cameron /* CXL rev 3.0 Table 13-5 Events under CXL Vendor ID */
675d7107c7SJonathan Cameron #define CXL_PMU_GID_CLOCK_TICKS		0x00
685d7107c7SJonathan Cameron #define CXL_PMU_GID_D2H_REQ		0x0010
695d7107c7SJonathan Cameron #define CXL_PMU_GID_D2H_RSP		0x0011
705d7107c7SJonathan Cameron #define CXL_PMU_GID_H2D_REQ		0x0012
715d7107c7SJonathan Cameron #define CXL_PMU_GID_H2D_RSP		0x0013
725d7107c7SJonathan Cameron #define CXL_PMU_GID_CACHE_DATA		0x0014
735d7107c7SJonathan Cameron #define CXL_PMU_GID_M2S_REQ		0x0020
745d7107c7SJonathan Cameron #define CXL_PMU_GID_M2S_RWD		0x0021
755d7107c7SJonathan Cameron #define CXL_PMU_GID_M2S_BIRSP		0x0022
765d7107c7SJonathan Cameron #define CXL_PMU_GID_S2M_BISNP		0x0023
775d7107c7SJonathan Cameron #define CXL_PMU_GID_S2M_NDR		0x0024
785d7107c7SJonathan Cameron #define CXL_PMU_GID_S2M_DRS		0x0025
795d7107c7SJonathan Cameron #define CXL_PMU_GID_DDR			0x8000
805d7107c7SJonathan Cameron 
815d7107c7SJonathan Cameron static int cxl_pmu_cpuhp_state_num;
825d7107c7SJonathan Cameron 
835d7107c7SJonathan Cameron struct cxl_pmu_ev_cap {
845d7107c7SJonathan Cameron 	u16 vid;
855d7107c7SJonathan Cameron 	u16 gid;
865d7107c7SJonathan Cameron 	u32 msk;
875d7107c7SJonathan Cameron 	union {
885d7107c7SJonathan Cameron 		int counter_idx; /* fixed counters */
895d7107c7SJonathan Cameron 		int event_idx; /* configurable counters */
905d7107c7SJonathan Cameron 	};
915d7107c7SJonathan Cameron 	struct list_head node;
925d7107c7SJonathan Cameron };
935d7107c7SJonathan Cameron 
945d7107c7SJonathan Cameron #define CXL_PMU_MAX_COUNTERS 64
955d7107c7SJonathan Cameron struct cxl_pmu_info {
965d7107c7SJonathan Cameron 	struct pmu pmu;
975d7107c7SJonathan Cameron 	void __iomem *base;
985d7107c7SJonathan Cameron 	struct perf_event **hw_events;
995d7107c7SJonathan Cameron 	struct list_head event_caps_configurable;
1005d7107c7SJonathan Cameron 	struct list_head event_caps_fixed;
1015d7107c7SJonathan Cameron 	DECLARE_BITMAP(used_counter_bm, CXL_PMU_MAX_COUNTERS);
1025d7107c7SJonathan Cameron 	DECLARE_BITMAP(conf_counter_bm, CXL_PMU_MAX_COUNTERS);
1035d7107c7SJonathan Cameron 	u16 counter_width;
1045d7107c7SJonathan Cameron 	u8 num_counters;
1055d7107c7SJonathan Cameron 	u8 num_event_capabilities;
1065d7107c7SJonathan Cameron 	int on_cpu;
1075d7107c7SJonathan Cameron 	struct hlist_node node;
1085d7107c7SJonathan Cameron 	bool filter_hdm;
1095d7107c7SJonathan Cameron 	int irq;
1105d7107c7SJonathan Cameron };
1115d7107c7SJonathan Cameron 
1125d7107c7SJonathan Cameron #define pmu_to_cxl_pmu_info(_pmu) container_of(_pmu, struct cxl_pmu_info, pmu)
1135d7107c7SJonathan Cameron 
1145d7107c7SJonathan Cameron /*
1155d7107c7SJonathan Cameron  * All CPMU counters are discoverable via the Event Capabilities Registers.
1165d7107c7SJonathan Cameron  * Each Event Capability register contains a a VID / GroupID.
1175d7107c7SJonathan Cameron  * A counter may then count any combination (by summing) of events in
1185d7107c7SJonathan Cameron  * that group which are in the Supported Events Bitmask.
1195d7107c7SJonathan Cameron  * However, there are some complexities to the scheme.
1205d7107c7SJonathan Cameron  *  - Fixed function counters refer to an Event Capabilities register.
1215d7107c7SJonathan Cameron  *    That event capability register is not then used for Configurable
1225d7107c7SJonathan Cameron  *    counters.
1235d7107c7SJonathan Cameron  */
cxl_pmu_parse_caps(struct device * dev,struct cxl_pmu_info * info)1245d7107c7SJonathan Cameron static int cxl_pmu_parse_caps(struct device *dev, struct cxl_pmu_info *info)
1255d7107c7SJonathan Cameron {
1265d7107c7SJonathan Cameron 	unsigned long fixed_counter_event_cap_bm = 0;
1275d7107c7SJonathan Cameron 	void __iomem *base = info->base;
1285d7107c7SJonathan Cameron 	bool freeze_for_enable;
1295d7107c7SJonathan Cameron 	u64 val, eval;
1305d7107c7SJonathan Cameron 	int i;
1315d7107c7SJonathan Cameron 
1325d7107c7SJonathan Cameron 	val = readq(base + CXL_PMU_CAP_REG);
1335d7107c7SJonathan Cameron 	freeze_for_enable = FIELD_GET(CXL_PMU_CAP_WRITEABLE_WHEN_FROZEN, val) &&
1345d7107c7SJonathan Cameron 		FIELD_GET(CXL_PMU_CAP_FREEZE, val);
1355d7107c7SJonathan Cameron 	if (!freeze_for_enable) {
1365d7107c7SJonathan Cameron 		dev_err(dev, "Counters not writable while frozen\n");
1375d7107c7SJonathan Cameron 		return -ENODEV;
1385d7107c7SJonathan Cameron 	}
1395d7107c7SJonathan Cameron 
1405d7107c7SJonathan Cameron 	info->num_counters = FIELD_GET(CXL_PMU_CAP_NUM_COUNTERS_MSK, val) + 1;
1415d7107c7SJonathan Cameron 	info->counter_width = FIELD_GET(CXL_PMU_CAP_COUNTER_WIDTH_MSK, val);
1425d7107c7SJonathan Cameron 	info->num_event_capabilities = FIELD_GET(CXL_PMU_CAP_NUM_EVN_CAP_REG_SUP_MSK, val) + 1;
1435d7107c7SJonathan Cameron 
1445d7107c7SJonathan Cameron 	info->filter_hdm = FIELD_GET(CXL_PMU_CAP_FILTERS_SUP_MSK, val) & CXL_PMU_FILTER_HDM;
1455d7107c7SJonathan Cameron 	if (FIELD_GET(CXL_PMU_CAP_INT, val))
1465d7107c7SJonathan Cameron 		info->irq = FIELD_GET(CXL_PMU_CAP_MSI_N_MSK, val);
1475d7107c7SJonathan Cameron 	else
1485d7107c7SJonathan Cameron 		info->irq = -1;
1495d7107c7SJonathan Cameron 
1505d7107c7SJonathan Cameron 	/* First handle fixed function counters; note if configurable counters found */
1515d7107c7SJonathan Cameron 	for (i = 0; i < info->num_counters; i++) {
1525d7107c7SJonathan Cameron 		struct cxl_pmu_ev_cap *pmu_ev;
1535d7107c7SJonathan Cameron 		u32 events_msk;
1545d7107c7SJonathan Cameron 		u8 group_idx;
1555d7107c7SJonathan Cameron 
1565d7107c7SJonathan Cameron 		val = readq(base + CXL_PMU_COUNTER_CFG_REG(i));
1575d7107c7SJonathan Cameron 
1585d7107c7SJonathan Cameron 		if (FIELD_GET(CXL_PMU_COUNTER_CFG_TYPE_MSK, val) ==
1595d7107c7SJonathan Cameron 			CXL_PMU_COUNTER_CFG_TYPE_CONFIGURABLE) {
1605d7107c7SJonathan Cameron 			set_bit(i, info->conf_counter_bm);
1615d7107c7SJonathan Cameron 		}
1625d7107c7SJonathan Cameron 
1635d7107c7SJonathan Cameron 		if (FIELD_GET(CXL_PMU_COUNTER_CFG_TYPE_MSK, val) !=
1645d7107c7SJonathan Cameron 		    CXL_PMU_COUNTER_CFG_TYPE_FIXED_FUN)
1655d7107c7SJonathan Cameron 			continue;
1665d7107c7SJonathan Cameron 
1675d7107c7SJonathan Cameron 		/* In this case we know which fields are const */
1685d7107c7SJonathan Cameron 		group_idx = FIELD_GET(CXL_PMU_COUNTER_CFG_EVENT_GRP_ID_IDX_MSK, val);
1695d7107c7SJonathan Cameron 		events_msk = FIELD_GET(CXL_PMU_COUNTER_CFG_EVENTS_MSK, val);
1705d7107c7SJonathan Cameron 		eval = readq(base + CXL_PMU_EVENT_CAP_REG(group_idx));
1715d7107c7SJonathan Cameron 		pmu_ev = devm_kzalloc(dev, sizeof(*pmu_ev), GFP_KERNEL);
1725d7107c7SJonathan Cameron 		if (!pmu_ev)
1735d7107c7SJonathan Cameron 			return -ENOMEM;
1745d7107c7SJonathan Cameron 
1755d7107c7SJonathan Cameron 		pmu_ev->vid = FIELD_GET(CXL_PMU_EVENT_CAP_VENDOR_ID_MSK, eval);
1765d7107c7SJonathan Cameron 		pmu_ev->gid = FIELD_GET(CXL_PMU_EVENT_CAP_GROUP_ID_MSK, eval);
1775d7107c7SJonathan Cameron 		/* For a fixed purpose counter use the events mask from the counter CFG */
1785d7107c7SJonathan Cameron 		pmu_ev->msk = events_msk;
1795d7107c7SJonathan Cameron 		pmu_ev->counter_idx = i;
1805d7107c7SJonathan Cameron 		/* This list add is never unwound as all entries deleted on remove */
1815d7107c7SJonathan Cameron 		list_add(&pmu_ev->node, &info->event_caps_fixed);
1825d7107c7SJonathan Cameron 		/*
1835d7107c7SJonathan Cameron 		 * Configurable counters must not use an Event Capability registers that
1845d7107c7SJonathan Cameron 		 * is in use for a Fixed counter
1855d7107c7SJonathan Cameron 		 */
1865d7107c7SJonathan Cameron 		set_bit(group_idx, &fixed_counter_event_cap_bm);
1875d7107c7SJonathan Cameron 	}
1885d7107c7SJonathan Cameron 
1895d7107c7SJonathan Cameron 	if (!bitmap_empty(info->conf_counter_bm, CXL_PMU_MAX_COUNTERS)) {
1905d7107c7SJonathan Cameron 		struct cxl_pmu_ev_cap *pmu_ev;
1915d7107c7SJonathan Cameron 		int j;
1925d7107c7SJonathan Cameron 		/* Walk event capabilities unused by fixed counters */
1935d7107c7SJonathan Cameron 		for_each_clear_bit(j, &fixed_counter_event_cap_bm,
1945d7107c7SJonathan Cameron 				   info->num_event_capabilities) {
1955d7107c7SJonathan Cameron 			pmu_ev = devm_kzalloc(dev, sizeof(*pmu_ev), GFP_KERNEL);
1965d7107c7SJonathan Cameron 			if (!pmu_ev)
1975d7107c7SJonathan Cameron 				return -ENOMEM;
1985d7107c7SJonathan Cameron 
1995d7107c7SJonathan Cameron 			eval = readq(base + CXL_PMU_EVENT_CAP_REG(j));
2005d7107c7SJonathan Cameron 			pmu_ev->vid = FIELD_GET(CXL_PMU_EVENT_CAP_VENDOR_ID_MSK, eval);
2015d7107c7SJonathan Cameron 			pmu_ev->gid = FIELD_GET(CXL_PMU_EVENT_CAP_GROUP_ID_MSK, eval);
2025d7107c7SJonathan Cameron 			pmu_ev->msk = FIELD_GET(CXL_PMU_EVENT_CAP_SUPPORTED_EVENTS_MSK, eval);
2035d7107c7SJonathan Cameron 			pmu_ev->event_idx = j;
2045d7107c7SJonathan Cameron 			list_add(&pmu_ev->node, &info->event_caps_configurable);
2055d7107c7SJonathan Cameron 		}
2065d7107c7SJonathan Cameron 	}
2075d7107c7SJonathan Cameron 
2085d7107c7SJonathan Cameron 	return 0;
2095d7107c7SJonathan Cameron }
2105d7107c7SJonathan Cameron 
cxl_pmu_format_sysfs_show(struct device * dev,struct device_attribute * attr,char * buf)2115d7107c7SJonathan Cameron static ssize_t cxl_pmu_format_sysfs_show(struct device *dev,
2125d7107c7SJonathan Cameron 					 struct device_attribute *attr, char *buf)
2135d7107c7SJonathan Cameron {
2145d7107c7SJonathan Cameron 	struct dev_ext_attribute *eattr;
2155d7107c7SJonathan Cameron 
2165d7107c7SJonathan Cameron 	eattr = container_of(attr, struct dev_ext_attribute, attr);
2175d7107c7SJonathan Cameron 
2185d7107c7SJonathan Cameron 	return sysfs_emit(buf, "%s\n", (char *)eattr->var);
2195d7107c7SJonathan Cameron }
2205d7107c7SJonathan Cameron 
2215d7107c7SJonathan Cameron #define CXL_PMU_FORMAT_ATTR(_name, _format)\
2225d7107c7SJonathan Cameron 	(&((struct dev_ext_attribute[]) {					\
2235d7107c7SJonathan Cameron 		{								\
2245d7107c7SJonathan Cameron 			.attr = __ATTR(_name, 0444,				\
2255d7107c7SJonathan Cameron 				       cxl_pmu_format_sysfs_show, NULL),	\
2265d7107c7SJonathan Cameron 			.var = (void *)_format					\
2275d7107c7SJonathan Cameron 		}								\
2285d7107c7SJonathan Cameron 		})[0].attr.attr)
2295d7107c7SJonathan Cameron 
2305d7107c7SJonathan Cameron enum {
2315d7107c7SJonathan Cameron 	cxl_pmu_mask_attr,
2325d7107c7SJonathan Cameron 	cxl_pmu_gid_attr,
2335d7107c7SJonathan Cameron 	cxl_pmu_vid_attr,
2345d7107c7SJonathan Cameron 	cxl_pmu_threshold_attr,
2355d7107c7SJonathan Cameron 	cxl_pmu_invert_attr,
2365d7107c7SJonathan Cameron 	cxl_pmu_edge_attr,
2375d7107c7SJonathan Cameron 	cxl_pmu_hdm_filter_en_attr,
2385d7107c7SJonathan Cameron 	cxl_pmu_hdm_attr,
2395d7107c7SJonathan Cameron };
2405d7107c7SJonathan Cameron 
2415d7107c7SJonathan Cameron static struct attribute *cxl_pmu_format_attr[] = {
2425d7107c7SJonathan Cameron 	[cxl_pmu_mask_attr] = CXL_PMU_FORMAT_ATTR(mask, "config:0-31"),
2435d7107c7SJonathan Cameron 	[cxl_pmu_gid_attr] = CXL_PMU_FORMAT_ATTR(gid, "config:32-47"),
2445d7107c7SJonathan Cameron 	[cxl_pmu_vid_attr] = CXL_PMU_FORMAT_ATTR(vid, "config:48-63"),
2455d7107c7SJonathan Cameron 	[cxl_pmu_threshold_attr] = CXL_PMU_FORMAT_ATTR(threshold, "config1:0-15"),
2465d7107c7SJonathan Cameron 	[cxl_pmu_invert_attr] = CXL_PMU_FORMAT_ATTR(invert, "config1:16"),
2475d7107c7SJonathan Cameron 	[cxl_pmu_edge_attr] = CXL_PMU_FORMAT_ATTR(edge, "config1:17"),
2485d7107c7SJonathan Cameron 	[cxl_pmu_hdm_filter_en_attr] = CXL_PMU_FORMAT_ATTR(hdm_filter_en, "config1:18"),
2495d7107c7SJonathan Cameron 	[cxl_pmu_hdm_attr] = CXL_PMU_FORMAT_ATTR(hdm, "config2:0-15"),
2505d7107c7SJonathan Cameron 	NULL
2515d7107c7SJonathan Cameron };
2525d7107c7SJonathan Cameron 
2535d7107c7SJonathan Cameron #define CXL_PMU_ATTR_CONFIG_MASK_MSK		GENMASK_ULL(31, 0)
2545d7107c7SJonathan Cameron #define CXL_PMU_ATTR_CONFIG_GID_MSK		GENMASK_ULL(47, 32)
2555d7107c7SJonathan Cameron #define CXL_PMU_ATTR_CONFIG_VID_MSK		GENMASK_ULL(63, 48)
2565d7107c7SJonathan Cameron #define CXL_PMU_ATTR_CONFIG1_THRESHOLD_MSK	GENMASK_ULL(15, 0)
2575d7107c7SJonathan Cameron #define CXL_PMU_ATTR_CONFIG1_INVERT_MSK		BIT(16)
2585d7107c7SJonathan Cameron #define CXL_PMU_ATTR_CONFIG1_EDGE_MSK		BIT(17)
2595d7107c7SJonathan Cameron #define CXL_PMU_ATTR_CONFIG1_FILTER_EN_MSK	BIT(18)
2605d7107c7SJonathan Cameron #define CXL_PMU_ATTR_CONFIG2_HDM_MSK		GENMASK(15, 0)
2615d7107c7SJonathan Cameron 
cxl_pmu_format_is_visible(struct kobject * kobj,struct attribute * attr,int a)2625d7107c7SJonathan Cameron static umode_t cxl_pmu_format_is_visible(struct kobject *kobj,
2635d7107c7SJonathan Cameron 					 struct attribute *attr, int a)
2645d7107c7SJonathan Cameron {
2655d7107c7SJonathan Cameron 	struct device *dev = kobj_to_dev(kobj);
2665d7107c7SJonathan Cameron 	struct cxl_pmu_info *info = dev_get_drvdata(dev);
2675d7107c7SJonathan Cameron 
2685d7107c7SJonathan Cameron 	/*
2695d7107c7SJonathan Cameron 	 * Filter capability at the CPMU level, so hide the attributes if the particular
2705d7107c7SJonathan Cameron 	 * filter is not supported.
2715d7107c7SJonathan Cameron 	 */
2725d7107c7SJonathan Cameron 	if (!info->filter_hdm &&
2735d7107c7SJonathan Cameron 	    (attr == cxl_pmu_format_attr[cxl_pmu_hdm_filter_en_attr] ||
2745d7107c7SJonathan Cameron 	     attr == cxl_pmu_format_attr[cxl_pmu_hdm_attr]))
2755d7107c7SJonathan Cameron 		return 0;
2765d7107c7SJonathan Cameron 
2775d7107c7SJonathan Cameron 	return attr->mode;
2785d7107c7SJonathan Cameron }
2795d7107c7SJonathan Cameron 
2805d7107c7SJonathan Cameron static const struct attribute_group cxl_pmu_format_group = {
2815d7107c7SJonathan Cameron 	.name = "format",
2825d7107c7SJonathan Cameron 	.attrs = cxl_pmu_format_attr,
2835d7107c7SJonathan Cameron 	.is_visible = cxl_pmu_format_is_visible,
2845d7107c7SJonathan Cameron };
2855d7107c7SJonathan Cameron 
cxl_pmu_config_get_mask(struct perf_event * event)2865d7107c7SJonathan Cameron static u32 cxl_pmu_config_get_mask(struct perf_event *event)
2875d7107c7SJonathan Cameron {
2885d7107c7SJonathan Cameron 	return FIELD_GET(CXL_PMU_ATTR_CONFIG_MASK_MSK, event->attr.config);
2895d7107c7SJonathan Cameron }
2905d7107c7SJonathan Cameron 
cxl_pmu_config_get_gid(struct perf_event * event)2915d7107c7SJonathan Cameron static u16 cxl_pmu_config_get_gid(struct perf_event *event)
2925d7107c7SJonathan Cameron {
2935d7107c7SJonathan Cameron 	return FIELD_GET(CXL_PMU_ATTR_CONFIG_GID_MSK, event->attr.config);
2945d7107c7SJonathan Cameron }
2955d7107c7SJonathan Cameron 
cxl_pmu_config_get_vid(struct perf_event * event)2965d7107c7SJonathan Cameron static u16 cxl_pmu_config_get_vid(struct perf_event *event)
2975d7107c7SJonathan Cameron {
2985d7107c7SJonathan Cameron 	return FIELD_GET(CXL_PMU_ATTR_CONFIG_VID_MSK, event->attr.config);
2995d7107c7SJonathan Cameron }
3005d7107c7SJonathan Cameron 
cxl_pmu_config1_get_threshold(struct perf_event * event)3015d7107c7SJonathan Cameron static u8 cxl_pmu_config1_get_threshold(struct perf_event *event)
3025d7107c7SJonathan Cameron {
3035d7107c7SJonathan Cameron 	return FIELD_GET(CXL_PMU_ATTR_CONFIG1_THRESHOLD_MSK, event->attr.config1);
3045d7107c7SJonathan Cameron }
3055d7107c7SJonathan Cameron 
cxl_pmu_config1_get_invert(struct perf_event * event)3065d7107c7SJonathan Cameron static bool cxl_pmu_config1_get_invert(struct perf_event *event)
3075d7107c7SJonathan Cameron {
3085d7107c7SJonathan Cameron 	return FIELD_GET(CXL_PMU_ATTR_CONFIG1_INVERT_MSK, event->attr.config1);
3095d7107c7SJonathan Cameron }
3105d7107c7SJonathan Cameron 
cxl_pmu_config1_get_edge(struct perf_event * event)3115d7107c7SJonathan Cameron static bool cxl_pmu_config1_get_edge(struct perf_event *event)
3125d7107c7SJonathan Cameron {
3135d7107c7SJonathan Cameron 	return FIELD_GET(CXL_PMU_ATTR_CONFIG1_EDGE_MSK, event->attr.config1);
3145d7107c7SJonathan Cameron }
3155d7107c7SJonathan Cameron 
3165d7107c7SJonathan Cameron /*
317*75bb19edSHojin Nam  * CPMU specification allows for 8 filters, each with a 32 bit value...
318*75bb19edSHojin Nam  * So we need to find 8x32bits to store it in.
319*75bb19edSHojin Nam  * As the value used for disable is 0xffff_ffff, a separate enable switch
3205d7107c7SJonathan Cameron  * is needed.
3215d7107c7SJonathan Cameron  */
3225d7107c7SJonathan Cameron 
cxl_pmu_config1_hdm_filter_en(struct perf_event * event)3235d7107c7SJonathan Cameron static bool cxl_pmu_config1_hdm_filter_en(struct perf_event *event)
3245d7107c7SJonathan Cameron {
3255d7107c7SJonathan Cameron 	return FIELD_GET(CXL_PMU_ATTR_CONFIG1_FILTER_EN_MSK, event->attr.config1);
3265d7107c7SJonathan Cameron }
3275d7107c7SJonathan Cameron 
cxl_pmu_config2_get_hdm_decoder(struct perf_event * event)3285d7107c7SJonathan Cameron static u16 cxl_pmu_config2_get_hdm_decoder(struct perf_event *event)
3295d7107c7SJonathan Cameron {
3305d7107c7SJonathan Cameron 	return FIELD_GET(CXL_PMU_ATTR_CONFIG2_HDM_MSK, event->attr.config2);
3315d7107c7SJonathan Cameron }
3325d7107c7SJonathan Cameron 
cxl_pmu_event_sysfs_show(struct device * dev,struct device_attribute * attr,char * buf)3335d7107c7SJonathan Cameron static ssize_t cxl_pmu_event_sysfs_show(struct device *dev,
3345d7107c7SJonathan Cameron 					struct device_attribute *attr, char *buf)
3355d7107c7SJonathan Cameron {
3365d7107c7SJonathan Cameron 	struct perf_pmu_events_attr *pmu_attr =
3375d7107c7SJonathan Cameron 		container_of(attr, struct perf_pmu_events_attr, attr);
3385d7107c7SJonathan Cameron 
3395d7107c7SJonathan Cameron 	return sysfs_emit(buf, "config=%#llx\n", pmu_attr->id);
3405d7107c7SJonathan Cameron }
3415d7107c7SJonathan Cameron 
3425d7107c7SJonathan Cameron #define CXL_PMU_EVENT_ATTR(_name, _vid, _gid, _msk)			\
3435d7107c7SJonathan Cameron 	PMU_EVENT_ATTR_ID(_name, cxl_pmu_event_sysfs_show,		\
3445d7107c7SJonathan Cameron 			  ((u64)(_vid) << 48) | ((u64)(_gid) << 32) | (u64)(_msk))
3455d7107c7SJonathan Cameron 
3465d7107c7SJonathan Cameron /* For CXL spec defined events */
3475d7107c7SJonathan Cameron #define CXL_PMU_EVENT_CXL_ATTR(_name, _gid, _msk)			\
3485d7107c7SJonathan Cameron 	CXL_PMU_EVENT_ATTR(_name, PCI_DVSEC_VENDOR_ID_CXL, _gid, _msk)
3495d7107c7SJonathan Cameron 
3505d7107c7SJonathan Cameron static struct attribute *cxl_pmu_event_attrs[] = {
3515d7107c7SJonathan Cameron 	CXL_PMU_EVENT_CXL_ATTR(clock_ticks,			CXL_PMU_GID_CLOCK_TICKS, BIT(0)),
3525d7107c7SJonathan Cameron 	/* CXL rev 3.0 Table 3-17 - Device to Host Requests */
3535d7107c7SJonathan Cameron 	CXL_PMU_EVENT_CXL_ATTR(d2h_req_rdcurr,			CXL_PMU_GID_D2H_REQ, BIT(1)),
3545d7107c7SJonathan Cameron 	CXL_PMU_EVENT_CXL_ATTR(d2h_req_rdown,			CXL_PMU_GID_D2H_REQ, BIT(2)),
3555d7107c7SJonathan Cameron 	CXL_PMU_EVENT_CXL_ATTR(d2h_req_rdshared,		CXL_PMU_GID_D2H_REQ, BIT(3)),
3565d7107c7SJonathan Cameron 	CXL_PMU_EVENT_CXL_ATTR(d2h_req_rdany,			CXL_PMU_GID_D2H_REQ, BIT(4)),
3575d7107c7SJonathan Cameron 	CXL_PMU_EVENT_CXL_ATTR(d2h_req_rdownnodata,		CXL_PMU_GID_D2H_REQ, BIT(5)),
3585d7107c7SJonathan Cameron 	CXL_PMU_EVENT_CXL_ATTR(d2h_req_itomwr,			CXL_PMU_GID_D2H_REQ, BIT(6)),
3595d7107c7SJonathan Cameron 	CXL_PMU_EVENT_CXL_ATTR(d2h_req_wrcurr,			CXL_PMU_GID_D2H_REQ, BIT(7)),
3605d7107c7SJonathan Cameron 	CXL_PMU_EVENT_CXL_ATTR(d2h_req_clflush,			CXL_PMU_GID_D2H_REQ, BIT(8)),
3615d7107c7SJonathan Cameron 	CXL_PMU_EVENT_CXL_ATTR(d2h_req_cleanevict,		CXL_PMU_GID_D2H_REQ, BIT(9)),
3625d7107c7SJonathan Cameron 	CXL_PMU_EVENT_CXL_ATTR(d2h_req_dirtyevict,		CXL_PMU_GID_D2H_REQ, BIT(10)),
3635d7107c7SJonathan Cameron 	CXL_PMU_EVENT_CXL_ATTR(d2h_req_cleanevictnodata,	CXL_PMU_GID_D2H_REQ, BIT(11)),
3645d7107c7SJonathan Cameron 	CXL_PMU_EVENT_CXL_ATTR(d2h_req_wowrinv,			CXL_PMU_GID_D2H_REQ, BIT(12)),
3655d7107c7SJonathan Cameron 	CXL_PMU_EVENT_CXL_ATTR(d2h_req_wowrinvf,		CXL_PMU_GID_D2H_REQ, BIT(13)),
3665d7107c7SJonathan Cameron 	CXL_PMU_EVENT_CXL_ATTR(d2h_req_wrinv,			CXL_PMU_GID_D2H_REQ, BIT(14)),
3675d7107c7SJonathan Cameron 	CXL_PMU_EVENT_CXL_ATTR(d2h_req_cacheflushed,		CXL_PMU_GID_D2H_REQ, BIT(16)),
3685d7107c7SJonathan Cameron 	/* CXL rev 3.0 Table 3-20 - D2H Repsonse Encodings */
3695d7107c7SJonathan Cameron 	CXL_PMU_EVENT_CXL_ATTR(d2h_rsp_rspihiti,		CXL_PMU_GID_D2H_RSP, BIT(4)),
3705d7107c7SJonathan Cameron 	CXL_PMU_EVENT_CXL_ATTR(d2h_rsp_rspvhitv,		CXL_PMU_GID_D2H_RSP, BIT(6)),
3715d7107c7SJonathan Cameron 	CXL_PMU_EVENT_CXL_ATTR(d2h_rsp_rspihitse,		CXL_PMU_GID_D2H_RSP, BIT(5)),
3725d7107c7SJonathan Cameron 	CXL_PMU_EVENT_CXL_ATTR(d2h_rsp_rspshitse,		CXL_PMU_GID_D2H_RSP, BIT(1)),
3735d7107c7SJonathan Cameron 	CXL_PMU_EVENT_CXL_ATTR(d2h_rsp_rspsfwdm,		CXL_PMU_GID_D2H_RSP, BIT(7)),
3745d7107c7SJonathan Cameron 	CXL_PMU_EVENT_CXL_ATTR(d2h_rsp_rspifwdm,		CXL_PMU_GID_D2H_RSP, BIT(15)),
3755d7107c7SJonathan Cameron 	CXL_PMU_EVENT_CXL_ATTR(d2h_rsp_rspvfwdv,		CXL_PMU_GID_D2H_RSP, BIT(22)),
3765d7107c7SJonathan Cameron 	/* CXL rev 3.0 Table 3-21 - CXL.cache - Mapping of H2D Requests to D2H Responses */
3775d7107c7SJonathan Cameron 	CXL_PMU_EVENT_CXL_ATTR(h2d_req_snpdata,			CXL_PMU_GID_H2D_REQ, BIT(1)),
3785d7107c7SJonathan Cameron 	CXL_PMU_EVENT_CXL_ATTR(h2d_req_snpinv,			CXL_PMU_GID_H2D_REQ, BIT(2)),
3795d7107c7SJonathan Cameron 	CXL_PMU_EVENT_CXL_ATTR(h2d_req_snpcur,			CXL_PMU_GID_H2D_REQ, BIT(3)),
3805d7107c7SJonathan Cameron 	/* CXL rev 3.0 Table 3-22 - H2D Response Opcode Encodings */
3815d7107c7SJonathan Cameron 	CXL_PMU_EVENT_CXL_ATTR(h2d_rsp_writepull,		CXL_PMU_GID_H2D_RSP, BIT(1)),
3825d7107c7SJonathan Cameron 	CXL_PMU_EVENT_CXL_ATTR(h2d_rsp_go,			CXL_PMU_GID_H2D_RSP, BIT(4)),
3835d7107c7SJonathan Cameron 	CXL_PMU_EVENT_CXL_ATTR(h2d_rsp_gowritepull,		CXL_PMU_GID_H2D_RSP, BIT(5)),
3845d7107c7SJonathan Cameron 	CXL_PMU_EVENT_CXL_ATTR(h2d_rsp_extcmp,			CXL_PMU_GID_H2D_RSP, BIT(6)),
3855d7107c7SJonathan Cameron 	CXL_PMU_EVENT_CXL_ATTR(h2d_rsp_gowritepulldrop,		CXL_PMU_GID_H2D_RSP, BIT(8)),
3865d7107c7SJonathan Cameron 	CXL_PMU_EVENT_CXL_ATTR(h2d_rsp_fastgowritepull,		CXL_PMU_GID_H2D_RSP, BIT(13)),
3875d7107c7SJonathan Cameron 	CXL_PMU_EVENT_CXL_ATTR(h2d_rsp_goerrwritepull,		CXL_PMU_GID_H2D_RSP, BIT(15)),
3885d7107c7SJonathan Cameron 	/* CXL rev 3.0 Table 13-5 directly lists these */
3895d7107c7SJonathan Cameron 	CXL_PMU_EVENT_CXL_ATTR(cachedata_d2h_data,		CXL_PMU_GID_CACHE_DATA, BIT(0)),
3905d7107c7SJonathan Cameron 	CXL_PMU_EVENT_CXL_ATTR(cachedata_h2d_data,		CXL_PMU_GID_CACHE_DATA, BIT(1)),
3915d7107c7SJonathan Cameron 	/* CXL rev 3.0 Table 3-29 M2S Req Memory Opcodes */
3925d7107c7SJonathan Cameron 	CXL_PMU_EVENT_CXL_ATTR(m2s_req_meminv,			CXL_PMU_GID_M2S_REQ, BIT(0)),
3935d7107c7SJonathan Cameron 	CXL_PMU_EVENT_CXL_ATTR(m2s_req_memrd,			CXL_PMU_GID_M2S_REQ, BIT(1)),
3945d7107c7SJonathan Cameron 	CXL_PMU_EVENT_CXL_ATTR(m2s_req_memrddata,		CXL_PMU_GID_M2S_REQ, BIT(2)),
3955d7107c7SJonathan Cameron 	CXL_PMU_EVENT_CXL_ATTR(m2s_req_memrdfwd,		CXL_PMU_GID_M2S_REQ, BIT(3)),
3965d7107c7SJonathan Cameron 	CXL_PMU_EVENT_CXL_ATTR(m2s_req_memwrfwd,		CXL_PMU_GID_M2S_REQ, BIT(4)),
3975d7107c7SJonathan Cameron 	CXL_PMU_EVENT_CXL_ATTR(m2s_req_memspecrd,		CXL_PMU_GID_M2S_REQ, BIT(8)),
3985d7107c7SJonathan Cameron 	CXL_PMU_EVENT_CXL_ATTR(m2s_req_meminvnt,		CXL_PMU_GID_M2S_REQ, BIT(9)),
3995d7107c7SJonathan Cameron 	CXL_PMU_EVENT_CXL_ATTR(m2s_req_memcleanevict,		CXL_PMU_GID_M2S_REQ, BIT(10)),
4005d7107c7SJonathan Cameron 	/* CXL rev 3.0 Table 3-35 M2S RwD Memory Opcodes */
4015d7107c7SJonathan Cameron 	CXL_PMU_EVENT_CXL_ATTR(m2s_rwd_memwr,			CXL_PMU_GID_M2S_RWD, BIT(1)),
4025d7107c7SJonathan Cameron 	CXL_PMU_EVENT_CXL_ATTR(m2s_rwd_memwrptl,		CXL_PMU_GID_M2S_RWD, BIT(2)),
4035d7107c7SJonathan Cameron 	CXL_PMU_EVENT_CXL_ATTR(m2s_rwd_biconflict,		CXL_PMU_GID_M2S_RWD, BIT(4)),
4045d7107c7SJonathan Cameron 	/* CXL rev 3.0 Table 3-38 M2S BIRsp Memory Opcodes */
4055d7107c7SJonathan Cameron 	CXL_PMU_EVENT_CXL_ATTR(m2s_birsp_i,			CXL_PMU_GID_M2S_BIRSP, BIT(0)),
4065d7107c7SJonathan Cameron 	CXL_PMU_EVENT_CXL_ATTR(m2s_birsp_s,			CXL_PMU_GID_M2S_BIRSP, BIT(1)),
4075d7107c7SJonathan Cameron 	CXL_PMU_EVENT_CXL_ATTR(m2s_birsp_e,			CXL_PMU_GID_M2S_BIRSP, BIT(2)),
4085d7107c7SJonathan Cameron 	CXL_PMU_EVENT_CXL_ATTR(m2s_birsp_iblk,			CXL_PMU_GID_M2S_BIRSP, BIT(4)),
4095d7107c7SJonathan Cameron 	CXL_PMU_EVENT_CXL_ATTR(m2s_birsp_sblk,			CXL_PMU_GID_M2S_BIRSP, BIT(5)),
4105d7107c7SJonathan Cameron 	CXL_PMU_EVENT_CXL_ATTR(m2s_birsp_eblk,			CXL_PMU_GID_M2S_BIRSP, BIT(6)),
4115d7107c7SJonathan Cameron 	/* CXL rev 3.0 Table 3-40 S2M BISnp Opcodes */
4125d7107c7SJonathan Cameron 	CXL_PMU_EVENT_CXL_ATTR(s2m_bisnp_cur,			CXL_PMU_GID_S2M_BISNP, BIT(0)),
4135d7107c7SJonathan Cameron 	CXL_PMU_EVENT_CXL_ATTR(s2m_bisnp_data,			CXL_PMU_GID_S2M_BISNP, BIT(1)),
4145d7107c7SJonathan Cameron 	CXL_PMU_EVENT_CXL_ATTR(s2m_bisnp_inv,			CXL_PMU_GID_S2M_BISNP, BIT(2)),
4155d7107c7SJonathan Cameron 	CXL_PMU_EVENT_CXL_ATTR(s2m_bisnp_curblk,		CXL_PMU_GID_S2M_BISNP, BIT(4)),
4165d7107c7SJonathan Cameron 	CXL_PMU_EVENT_CXL_ATTR(s2m_bisnp_datblk,		CXL_PMU_GID_S2M_BISNP, BIT(5)),
4175d7107c7SJonathan Cameron 	CXL_PMU_EVENT_CXL_ATTR(s2m_bisnp_invblk,		CXL_PMU_GID_S2M_BISNP, BIT(6)),
4185d7107c7SJonathan Cameron 	/* CXL rev 3.0 Table 3-43 S2M NDR Opcopdes */
4195d7107c7SJonathan Cameron 	CXL_PMU_EVENT_CXL_ATTR(s2m_ndr_cmp,			CXL_PMU_GID_S2M_NDR, BIT(0)),
4205d7107c7SJonathan Cameron 	CXL_PMU_EVENT_CXL_ATTR(s2m_ndr_cmps,			CXL_PMU_GID_S2M_NDR, BIT(1)),
4215d7107c7SJonathan Cameron 	CXL_PMU_EVENT_CXL_ATTR(s2m_ndr_cmpe,			CXL_PMU_GID_S2M_NDR, BIT(2)),
4227512d786SHojin Nam 	CXL_PMU_EVENT_CXL_ATTR(s2m_ndr_biconflictack,		CXL_PMU_GID_S2M_NDR, BIT(4)),
4235d7107c7SJonathan Cameron 	/* CXL rev 3.0 Table 3-46 S2M DRS opcodes */
4245d7107c7SJonathan Cameron 	CXL_PMU_EVENT_CXL_ATTR(s2m_drs_memdata,			CXL_PMU_GID_S2M_DRS, BIT(0)),
4255d7107c7SJonathan Cameron 	CXL_PMU_EVENT_CXL_ATTR(s2m_drs_memdatanxm,		CXL_PMU_GID_S2M_DRS, BIT(1)),
4265d7107c7SJonathan Cameron 	/* CXL rev 3.0 Table 13-5 directly lists these */
4275d7107c7SJonathan Cameron 	CXL_PMU_EVENT_CXL_ATTR(ddr_act,				CXL_PMU_GID_DDR, BIT(0)),
4285d7107c7SJonathan Cameron 	CXL_PMU_EVENT_CXL_ATTR(ddr_pre,				CXL_PMU_GID_DDR, BIT(1)),
4295d7107c7SJonathan Cameron 	CXL_PMU_EVENT_CXL_ATTR(ddr_casrd,			CXL_PMU_GID_DDR, BIT(2)),
4305d7107c7SJonathan Cameron 	CXL_PMU_EVENT_CXL_ATTR(ddr_caswr,			CXL_PMU_GID_DDR, BIT(3)),
4315d7107c7SJonathan Cameron 	CXL_PMU_EVENT_CXL_ATTR(ddr_refresh,			CXL_PMU_GID_DDR, BIT(4)),
4325d7107c7SJonathan Cameron 	CXL_PMU_EVENT_CXL_ATTR(ddr_selfrefreshent,		CXL_PMU_GID_DDR, BIT(5)),
4335d7107c7SJonathan Cameron 	CXL_PMU_EVENT_CXL_ATTR(ddr_rfm,				CXL_PMU_GID_DDR, BIT(6)),
4345d7107c7SJonathan Cameron 	NULL
4355d7107c7SJonathan Cameron };
4365d7107c7SJonathan Cameron 
cxl_pmu_find_fixed_counter_ev_cap(struct cxl_pmu_info * info,int vid,int gid,int msk)4375d7107c7SJonathan Cameron static struct cxl_pmu_ev_cap *cxl_pmu_find_fixed_counter_ev_cap(struct cxl_pmu_info *info,
4385d7107c7SJonathan Cameron 								int vid, int gid, int msk)
4395d7107c7SJonathan Cameron {
4405d7107c7SJonathan Cameron 	struct cxl_pmu_ev_cap *pmu_ev;
4415d7107c7SJonathan Cameron 
4425d7107c7SJonathan Cameron 	list_for_each_entry(pmu_ev, &info->event_caps_fixed, node) {
4435d7107c7SJonathan Cameron 		if (vid != pmu_ev->vid || gid != pmu_ev->gid)
4445d7107c7SJonathan Cameron 			continue;
4455d7107c7SJonathan Cameron 
4465d7107c7SJonathan Cameron 		/* Precise match for fixed counter */
4475d7107c7SJonathan Cameron 		if (msk == pmu_ev->msk)
4485d7107c7SJonathan Cameron 			return pmu_ev;
4495d7107c7SJonathan Cameron 	}
4505d7107c7SJonathan Cameron 
4515d7107c7SJonathan Cameron 	return ERR_PTR(-EINVAL);
4525d7107c7SJonathan Cameron }
4535d7107c7SJonathan Cameron 
cxl_pmu_find_config_counter_ev_cap(struct cxl_pmu_info * info,int vid,int gid,int msk)4545d7107c7SJonathan Cameron static struct cxl_pmu_ev_cap *cxl_pmu_find_config_counter_ev_cap(struct cxl_pmu_info *info,
4555d7107c7SJonathan Cameron 								 int vid, int gid, int msk)
4565d7107c7SJonathan Cameron {
4575d7107c7SJonathan Cameron 	struct cxl_pmu_ev_cap *pmu_ev;
4585d7107c7SJonathan Cameron 
4595d7107c7SJonathan Cameron 	list_for_each_entry(pmu_ev, &info->event_caps_configurable, node) {
4605d7107c7SJonathan Cameron 		if (vid != pmu_ev->vid || gid != pmu_ev->gid)
4615d7107c7SJonathan Cameron 			continue;
4625d7107c7SJonathan Cameron 
4635d7107c7SJonathan Cameron 		/* Request mask must be subset of supported */
4645d7107c7SJonathan Cameron 		if (msk & ~pmu_ev->msk)
4655d7107c7SJonathan Cameron 			continue;
4665d7107c7SJonathan Cameron 
4675d7107c7SJonathan Cameron 		return pmu_ev;
4685d7107c7SJonathan Cameron 	}
4695d7107c7SJonathan Cameron 
4705d7107c7SJonathan Cameron 	return ERR_PTR(-EINVAL);
4715d7107c7SJonathan Cameron }
4725d7107c7SJonathan Cameron 
cxl_pmu_event_is_visible(struct kobject * kobj,struct attribute * attr,int a)4735d7107c7SJonathan Cameron static umode_t cxl_pmu_event_is_visible(struct kobject *kobj, struct attribute *attr, int a)
4745d7107c7SJonathan Cameron {
4755d7107c7SJonathan Cameron 	struct device_attribute *dev_attr = container_of(attr, struct device_attribute, attr);
4765d7107c7SJonathan Cameron 	struct perf_pmu_events_attr *pmu_attr =
4775d7107c7SJonathan Cameron 		container_of(dev_attr, struct perf_pmu_events_attr, attr);
4785d7107c7SJonathan Cameron 	struct device *dev = kobj_to_dev(kobj);
4795d7107c7SJonathan Cameron 	struct cxl_pmu_info *info = dev_get_drvdata(dev);
4805d7107c7SJonathan Cameron 	int vid = FIELD_GET(CXL_PMU_ATTR_CONFIG_VID_MSK, pmu_attr->id);
4815d7107c7SJonathan Cameron 	int gid = FIELD_GET(CXL_PMU_ATTR_CONFIG_GID_MSK, pmu_attr->id);
4825d7107c7SJonathan Cameron 	int msk = FIELD_GET(CXL_PMU_ATTR_CONFIG_MASK_MSK, pmu_attr->id);
4835d7107c7SJonathan Cameron 
4845d7107c7SJonathan Cameron 	if (!IS_ERR(cxl_pmu_find_fixed_counter_ev_cap(info, vid, gid, msk)))
4855d7107c7SJonathan Cameron 		return attr->mode;
4865d7107c7SJonathan Cameron 
4875d7107c7SJonathan Cameron 	if (!IS_ERR(cxl_pmu_find_config_counter_ev_cap(info, vid, gid, msk)))
4885d7107c7SJonathan Cameron 		return attr->mode;
4895d7107c7SJonathan Cameron 
4905d7107c7SJonathan Cameron 	return 0;
4915d7107c7SJonathan Cameron }
4925d7107c7SJonathan Cameron 
4935d7107c7SJonathan Cameron static const struct attribute_group cxl_pmu_events = {
4945d7107c7SJonathan Cameron 	.name = "events",
4955d7107c7SJonathan Cameron 	.attrs = cxl_pmu_event_attrs,
4965d7107c7SJonathan Cameron 	.is_visible = cxl_pmu_event_is_visible,
4975d7107c7SJonathan Cameron };
4985d7107c7SJonathan Cameron 
cpumask_show(struct device * dev,struct device_attribute * attr,char * buf)4995d7107c7SJonathan Cameron static ssize_t cpumask_show(struct device *dev, struct device_attribute *attr,
5005d7107c7SJonathan Cameron 			    char *buf)
5015d7107c7SJonathan Cameron {
5025d7107c7SJonathan Cameron 	struct cxl_pmu_info *info = dev_get_drvdata(dev);
5035d7107c7SJonathan Cameron 
5045d7107c7SJonathan Cameron 	return cpumap_print_to_pagebuf(true, buf, cpumask_of(info->on_cpu));
5055d7107c7SJonathan Cameron }
5065d7107c7SJonathan Cameron static DEVICE_ATTR_RO(cpumask);
5075d7107c7SJonathan Cameron 
5085d7107c7SJonathan Cameron static struct attribute *cxl_pmu_cpumask_attrs[] = {
5095d7107c7SJonathan Cameron 	&dev_attr_cpumask.attr,
5105d7107c7SJonathan Cameron 	NULL
5115d7107c7SJonathan Cameron };
5125d7107c7SJonathan Cameron 
5135d7107c7SJonathan Cameron static const struct attribute_group cxl_pmu_cpumask_group = {
5145d7107c7SJonathan Cameron 	.attrs = cxl_pmu_cpumask_attrs,
5155d7107c7SJonathan Cameron };
5165d7107c7SJonathan Cameron 
5175d7107c7SJonathan Cameron static const struct attribute_group *cxl_pmu_attr_groups[] = {
5185d7107c7SJonathan Cameron 	&cxl_pmu_events,
5195d7107c7SJonathan Cameron 	&cxl_pmu_format_group,
5205d7107c7SJonathan Cameron 	&cxl_pmu_cpumask_group,
5215d7107c7SJonathan Cameron 	NULL
5225d7107c7SJonathan Cameron };
5235d7107c7SJonathan Cameron 
5245d7107c7SJonathan Cameron /* If counter_idx == NULL, don't try to allocate a counter. */
cxl_pmu_get_event_idx(struct perf_event * event,int * counter_idx,int * event_idx)5255d7107c7SJonathan Cameron static int cxl_pmu_get_event_idx(struct perf_event *event, int *counter_idx,
5265d7107c7SJonathan Cameron 				 int *event_idx)
5275d7107c7SJonathan Cameron {
5285d7107c7SJonathan Cameron 	struct cxl_pmu_info *info = pmu_to_cxl_pmu_info(event->pmu);
5295d7107c7SJonathan Cameron 	DECLARE_BITMAP(configurable_and_free, CXL_PMU_MAX_COUNTERS);
5305d7107c7SJonathan Cameron 	struct cxl_pmu_ev_cap *pmu_ev;
5315d7107c7SJonathan Cameron 	u32 mask;
5325d7107c7SJonathan Cameron 	u16 gid, vid;
5335d7107c7SJonathan Cameron 	int i;
5345d7107c7SJonathan Cameron 
5355d7107c7SJonathan Cameron 	vid = cxl_pmu_config_get_vid(event);
5365d7107c7SJonathan Cameron 	gid = cxl_pmu_config_get_gid(event);
5375d7107c7SJonathan Cameron 	mask = cxl_pmu_config_get_mask(event);
5385d7107c7SJonathan Cameron 
5395d7107c7SJonathan Cameron 	pmu_ev = cxl_pmu_find_fixed_counter_ev_cap(info, vid, gid, mask);
5405d7107c7SJonathan Cameron 	if (!IS_ERR(pmu_ev)) {
5415d7107c7SJonathan Cameron 		if (!counter_idx)
5425d7107c7SJonathan Cameron 			return 0;
5435d7107c7SJonathan Cameron 		if (!test_bit(pmu_ev->counter_idx, info->used_counter_bm)) {
5445d7107c7SJonathan Cameron 			*counter_idx = pmu_ev->counter_idx;
5455d7107c7SJonathan Cameron 			return 0;
5465d7107c7SJonathan Cameron 		}
5475d7107c7SJonathan Cameron 		/* Fixed counter is in use, but maybe a configurable one? */
5485d7107c7SJonathan Cameron 	}
5495d7107c7SJonathan Cameron 
5505d7107c7SJonathan Cameron 	pmu_ev = cxl_pmu_find_config_counter_ev_cap(info, vid, gid, mask);
5515d7107c7SJonathan Cameron 	if (!IS_ERR(pmu_ev)) {
5525d7107c7SJonathan Cameron 		if (!counter_idx)
5535d7107c7SJonathan Cameron 			return 0;
5545d7107c7SJonathan Cameron 
5555d7107c7SJonathan Cameron 		bitmap_andnot(configurable_and_free, info->conf_counter_bm,
5565d7107c7SJonathan Cameron 			info->used_counter_bm, CXL_PMU_MAX_COUNTERS);
5575d7107c7SJonathan Cameron 
5585d7107c7SJonathan Cameron 		i = find_first_bit(configurable_and_free, CXL_PMU_MAX_COUNTERS);
5595d7107c7SJonathan Cameron 		if (i == CXL_PMU_MAX_COUNTERS)
5605d7107c7SJonathan Cameron 			return -EINVAL;
5615d7107c7SJonathan Cameron 
5625d7107c7SJonathan Cameron 		*counter_idx = i;
5635d7107c7SJonathan Cameron 		return 0;
5645d7107c7SJonathan Cameron 	}
5655d7107c7SJonathan Cameron 
5665d7107c7SJonathan Cameron 	return -EINVAL;
5675d7107c7SJonathan Cameron }
5685d7107c7SJonathan Cameron 
cxl_pmu_event_init(struct perf_event * event)5695d7107c7SJonathan Cameron static int cxl_pmu_event_init(struct perf_event *event)
5705d7107c7SJonathan Cameron {
5715d7107c7SJonathan Cameron 	struct cxl_pmu_info *info = pmu_to_cxl_pmu_info(event->pmu);
5725d7107c7SJonathan Cameron 	int rc;
5735d7107c7SJonathan Cameron 
5745d7107c7SJonathan Cameron 	/* Top level type sanity check - is this a Hardware Event being requested */
5755d7107c7SJonathan Cameron 	if (event->attr.type != event->pmu->type)
5765d7107c7SJonathan Cameron 		return -ENOENT;
5775d7107c7SJonathan Cameron 
5785d7107c7SJonathan Cameron 	if (is_sampling_event(event) || event->attach_state & PERF_ATTACH_TASK)
5795d7107c7SJonathan Cameron 		return -EOPNOTSUPP;
5805d7107c7SJonathan Cameron 	/* TODO: Validation of any filter */
5815d7107c7SJonathan Cameron 
5825d7107c7SJonathan Cameron 	/*
5835d7107c7SJonathan Cameron 	 * Verify that it is possible to count what was requested. Either must
5845d7107c7SJonathan Cameron 	 * be a fixed counter that is a precise match or a configurable counter
5855d7107c7SJonathan Cameron 	 * where this is a subset.
5865d7107c7SJonathan Cameron 	 */
5875d7107c7SJonathan Cameron 	rc = cxl_pmu_get_event_idx(event, NULL, NULL);
5885d7107c7SJonathan Cameron 	if (rc < 0)
5895d7107c7SJonathan Cameron 		return rc;
5905d7107c7SJonathan Cameron 
5915d7107c7SJonathan Cameron 	event->cpu = info->on_cpu;
5925d7107c7SJonathan Cameron 
5935d7107c7SJonathan Cameron 	return 0;
5945d7107c7SJonathan Cameron }
5955d7107c7SJonathan Cameron 
cxl_pmu_enable(struct pmu * pmu)5965d7107c7SJonathan Cameron static void cxl_pmu_enable(struct pmu *pmu)
5975d7107c7SJonathan Cameron {
5985d7107c7SJonathan Cameron 	struct cxl_pmu_info *info = pmu_to_cxl_pmu_info(pmu);
5995d7107c7SJonathan Cameron 	void __iomem *base = info->base;
6005d7107c7SJonathan Cameron 
6015d7107c7SJonathan Cameron 	/* Can assume frozen at this stage */
6025d7107c7SJonathan Cameron 	writeq(0, base + CXL_PMU_FREEZE_REG);
6035d7107c7SJonathan Cameron }
6045d7107c7SJonathan Cameron 
cxl_pmu_disable(struct pmu * pmu)6055d7107c7SJonathan Cameron static void cxl_pmu_disable(struct pmu *pmu)
6065d7107c7SJonathan Cameron {
6075d7107c7SJonathan Cameron 	struct cxl_pmu_info *info = pmu_to_cxl_pmu_info(pmu);
6085d7107c7SJonathan Cameron 	void __iomem *base = info->base;
6095d7107c7SJonathan Cameron 
6105d7107c7SJonathan Cameron 	/*
6115d7107c7SJonathan Cameron 	 * Whilst bits above number of counters are RsvdZ
6125d7107c7SJonathan Cameron 	 * they are unlikely to be repurposed given
6135d7107c7SJonathan Cameron 	 * number of counters is allowed to be 64 leaving
6145d7107c7SJonathan Cameron 	 * no reserved bits.  Hence this is only slightly
6155d7107c7SJonathan Cameron 	 * naughty.
6165d7107c7SJonathan Cameron 	 */
6175d7107c7SJonathan Cameron 	writeq(GENMASK_ULL(63, 0), base + CXL_PMU_FREEZE_REG);
6185d7107c7SJonathan Cameron }
6195d7107c7SJonathan Cameron 
cxl_pmu_event_start(struct perf_event * event,int flags)6205d7107c7SJonathan Cameron static void cxl_pmu_event_start(struct perf_event *event, int flags)
6215d7107c7SJonathan Cameron {
6225d7107c7SJonathan Cameron 	struct cxl_pmu_info *info = pmu_to_cxl_pmu_info(event->pmu);
6235d7107c7SJonathan Cameron 	struct hw_perf_event *hwc = &event->hw;
6245d7107c7SJonathan Cameron 	void __iomem *base = info->base;
6255d7107c7SJonathan Cameron 	u64 cfg;
6265d7107c7SJonathan Cameron 
6275d7107c7SJonathan Cameron 	/*
6285d7107c7SJonathan Cameron 	 * All paths to here should either set these flags directly or
6295d7107c7SJonathan Cameron 	 * call cxl_pmu_event_stop() which will ensure the correct state.
6305d7107c7SJonathan Cameron 	 */
6315d7107c7SJonathan Cameron 	if (WARN_ON_ONCE(!(hwc->state & PERF_HES_STOPPED)))
6325d7107c7SJonathan Cameron 		return;
6335d7107c7SJonathan Cameron 
6345d7107c7SJonathan Cameron 	WARN_ON_ONCE(!(hwc->state & PERF_HES_UPTODATE));
6355d7107c7SJonathan Cameron 	hwc->state = 0;
6365d7107c7SJonathan Cameron 
6375d7107c7SJonathan Cameron 	/*
6385d7107c7SJonathan Cameron 	 * Currently only hdm filter control is implemnted, this code will
6395d7107c7SJonathan Cameron 	 * want generalizing when more filters are added.
6405d7107c7SJonathan Cameron 	 */
6415d7107c7SJonathan Cameron 	if (info->filter_hdm) {
6425d7107c7SJonathan Cameron 		if (cxl_pmu_config1_hdm_filter_en(event))
6435d7107c7SJonathan Cameron 			cfg = cxl_pmu_config2_get_hdm_decoder(event);
6445d7107c7SJonathan Cameron 		else
645*75bb19edSHojin Nam 			cfg = GENMASK(31, 0); /* No filtering if 0xFFFF_FFFF */
6465d7107c7SJonathan Cameron 		writeq(cfg, base + CXL_PMU_FILTER_CFG_REG(hwc->idx, 0));
6475d7107c7SJonathan Cameron 	}
6485d7107c7SJonathan Cameron 
6495d7107c7SJonathan Cameron 	cfg = readq(base + CXL_PMU_COUNTER_CFG_REG(hwc->idx));
6505d7107c7SJonathan Cameron 	cfg |= FIELD_PREP(CXL_PMU_COUNTER_CFG_INT_ON_OVRFLW, 1);
6515d7107c7SJonathan Cameron 	cfg |= FIELD_PREP(CXL_PMU_COUNTER_CFG_FREEZE_ON_OVRFLW, 1);
6525d7107c7SJonathan Cameron 	cfg |= FIELD_PREP(CXL_PMU_COUNTER_CFG_ENABLE, 1);
6535d7107c7SJonathan Cameron 	cfg |= FIELD_PREP(CXL_PMU_COUNTER_CFG_EDGE,
6545d7107c7SJonathan Cameron 			  cxl_pmu_config1_get_edge(event) ? 1 : 0);
6555d7107c7SJonathan Cameron 	cfg |= FIELD_PREP(CXL_PMU_COUNTER_CFG_INVERT,
6565d7107c7SJonathan Cameron 			  cxl_pmu_config1_get_invert(event) ? 1 : 0);
6575d7107c7SJonathan Cameron 
6585d7107c7SJonathan Cameron 	/* Fixed purpose counters have next two fields RO */
6595d7107c7SJonathan Cameron 	if (test_bit(hwc->idx, info->conf_counter_bm)) {
6605d7107c7SJonathan Cameron 		cfg |= FIELD_PREP(CXL_PMU_COUNTER_CFG_EVENT_GRP_ID_IDX_MSK,
6615d7107c7SJonathan Cameron 				  hwc->event_base);
6625d7107c7SJonathan Cameron 		cfg |= FIELD_PREP(CXL_PMU_COUNTER_CFG_EVENTS_MSK,
6635d7107c7SJonathan Cameron 				  cxl_pmu_config_get_mask(event));
6645d7107c7SJonathan Cameron 	}
6655d7107c7SJonathan Cameron 	cfg &= ~CXL_PMU_COUNTER_CFG_THRESHOLD_MSK;
6665d7107c7SJonathan Cameron 	/*
6675d7107c7SJonathan Cameron 	 * For events that generate only 1 count per clock the CXL 3.0 spec
6685d7107c7SJonathan Cameron 	 * states the threshold shall be set to 1 but if set to 0 it will
6695d7107c7SJonathan Cameron 	 * count the raw value anwyay?
6705d7107c7SJonathan Cameron 	 * There is no definition of what events will count multiple per cycle
6715d7107c7SJonathan Cameron 	 * and hence to which non 1 values of threshold can apply.
6725d7107c7SJonathan Cameron 	 * (CXL 3.0 8.2.7.2.1 Counter Configuration - threshold field definition)
6735d7107c7SJonathan Cameron 	 */
6745d7107c7SJonathan Cameron 	cfg |= FIELD_PREP(CXL_PMU_COUNTER_CFG_THRESHOLD_MSK,
6755d7107c7SJonathan Cameron 			  cxl_pmu_config1_get_threshold(event));
6765d7107c7SJonathan Cameron 	writeq(cfg, base + CXL_PMU_COUNTER_CFG_REG(hwc->idx));
6775d7107c7SJonathan Cameron 
6785d7107c7SJonathan Cameron 	local64_set(&hwc->prev_count, 0);
6795d7107c7SJonathan Cameron 	writeq(0, base + CXL_PMU_COUNTER_REG(hwc->idx));
6805d7107c7SJonathan Cameron 
6815d7107c7SJonathan Cameron 	perf_event_update_userpage(event);
6825d7107c7SJonathan Cameron }
6835d7107c7SJonathan Cameron 
cxl_pmu_read_counter(struct perf_event * event)6845d7107c7SJonathan Cameron static u64 cxl_pmu_read_counter(struct perf_event *event)
6855d7107c7SJonathan Cameron {
6865d7107c7SJonathan Cameron 	struct cxl_pmu_info *info = pmu_to_cxl_pmu_info(event->pmu);
6875d7107c7SJonathan Cameron 	void __iomem *base = info->base;
6885d7107c7SJonathan Cameron 
6895d7107c7SJonathan Cameron 	return readq(base + CXL_PMU_COUNTER_REG(event->hw.idx));
6905d7107c7SJonathan Cameron }
6915d7107c7SJonathan Cameron 
__cxl_pmu_read(struct perf_event * event,bool overflow)6925d7107c7SJonathan Cameron static void __cxl_pmu_read(struct perf_event *event, bool overflow)
6935d7107c7SJonathan Cameron {
6945d7107c7SJonathan Cameron 	struct cxl_pmu_info *info = pmu_to_cxl_pmu_info(event->pmu);
6955d7107c7SJonathan Cameron 	struct hw_perf_event *hwc = &event->hw;
6965d7107c7SJonathan Cameron 	u64 new_cnt, prev_cnt, delta;
6975d7107c7SJonathan Cameron 
6985d7107c7SJonathan Cameron 	do {
6995d7107c7SJonathan Cameron 		prev_cnt = local64_read(&hwc->prev_count);
7005d7107c7SJonathan Cameron 		new_cnt = cxl_pmu_read_counter(event);
7015d7107c7SJonathan Cameron 	} while (local64_cmpxchg(&hwc->prev_count, prev_cnt, new_cnt) != prev_cnt);
7025d7107c7SJonathan Cameron 
7035d7107c7SJonathan Cameron 	/*
7045d7107c7SJonathan Cameron 	 * If we know an overflow occur then take that into account.
7055d7107c7SJonathan Cameron 	 * Note counter is not reset as that would lose events
7065d7107c7SJonathan Cameron 	 */
7075d7107c7SJonathan Cameron 	delta = (new_cnt - prev_cnt) & GENMASK_ULL(info->counter_width - 1, 0);
7085d7107c7SJonathan Cameron 	if (overflow && delta < GENMASK_ULL(info->counter_width - 1, 0))
7095d7107c7SJonathan Cameron 		delta += (1UL << info->counter_width);
7105d7107c7SJonathan Cameron 
7115d7107c7SJonathan Cameron 	local64_add(delta, &event->count);
7125d7107c7SJonathan Cameron }
7135d7107c7SJonathan Cameron 
cxl_pmu_read(struct perf_event * event)7145d7107c7SJonathan Cameron static void cxl_pmu_read(struct perf_event *event)
7155d7107c7SJonathan Cameron {
7165d7107c7SJonathan Cameron 	__cxl_pmu_read(event, false);
7175d7107c7SJonathan Cameron }
7185d7107c7SJonathan Cameron 
cxl_pmu_event_stop(struct perf_event * event,int flags)7195d7107c7SJonathan Cameron static void cxl_pmu_event_stop(struct perf_event *event, int flags)
7205d7107c7SJonathan Cameron {
7215d7107c7SJonathan Cameron 	struct cxl_pmu_info *info = pmu_to_cxl_pmu_info(event->pmu);
7225d7107c7SJonathan Cameron 	void __iomem *base = info->base;
7235d7107c7SJonathan Cameron 	struct hw_perf_event *hwc = &event->hw;
7245d7107c7SJonathan Cameron 	u64 cfg;
7255d7107c7SJonathan Cameron 
7265d7107c7SJonathan Cameron 	cxl_pmu_read(event);
7275d7107c7SJonathan Cameron 	WARN_ON_ONCE(hwc->state & PERF_HES_STOPPED);
7285d7107c7SJonathan Cameron 	hwc->state |= PERF_HES_STOPPED;
7295d7107c7SJonathan Cameron 
7305d7107c7SJonathan Cameron 	cfg = readq(base + CXL_PMU_COUNTER_CFG_REG(hwc->idx));
7315d7107c7SJonathan Cameron 	cfg &= ~(FIELD_PREP(CXL_PMU_COUNTER_CFG_INT_ON_OVRFLW, 1) |
7325d7107c7SJonathan Cameron 		 FIELD_PREP(CXL_PMU_COUNTER_CFG_ENABLE, 1));
7335d7107c7SJonathan Cameron 	writeq(cfg, base + CXL_PMU_COUNTER_CFG_REG(hwc->idx));
7345d7107c7SJonathan Cameron 
7355d7107c7SJonathan Cameron 	hwc->state |= PERF_HES_UPTODATE;
7365d7107c7SJonathan Cameron }
7375d7107c7SJonathan Cameron 
cxl_pmu_event_add(struct perf_event * event,int flags)7385d7107c7SJonathan Cameron static int cxl_pmu_event_add(struct perf_event *event, int flags)
7395d7107c7SJonathan Cameron {
7405d7107c7SJonathan Cameron 	struct cxl_pmu_info *info = pmu_to_cxl_pmu_info(event->pmu);
7415d7107c7SJonathan Cameron 	struct hw_perf_event *hwc = &event->hw;
7425d7107c7SJonathan Cameron 	int idx, rc;
7435d7107c7SJonathan Cameron 	int event_idx = 0;
7445d7107c7SJonathan Cameron 
7455d7107c7SJonathan Cameron 	hwc->state = PERF_HES_STOPPED | PERF_HES_UPTODATE;
7465d7107c7SJonathan Cameron 
7475d7107c7SJonathan Cameron 	rc = cxl_pmu_get_event_idx(event, &idx, &event_idx);
7485d7107c7SJonathan Cameron 	if (rc < 0)
7495d7107c7SJonathan Cameron 		return rc;
7505d7107c7SJonathan Cameron 
7515d7107c7SJonathan Cameron 	hwc->idx = idx;
7525d7107c7SJonathan Cameron 
7535d7107c7SJonathan Cameron 	/* Only set for configurable counters */
7545d7107c7SJonathan Cameron 	hwc->event_base = event_idx;
7555d7107c7SJonathan Cameron 	info->hw_events[idx] = event;
7565d7107c7SJonathan Cameron 	set_bit(idx, info->used_counter_bm);
7575d7107c7SJonathan Cameron 
7585d7107c7SJonathan Cameron 	if (flags & PERF_EF_START)
7595d7107c7SJonathan Cameron 		cxl_pmu_event_start(event, PERF_EF_RELOAD);
7605d7107c7SJonathan Cameron 
7615d7107c7SJonathan Cameron 	return 0;
7625d7107c7SJonathan Cameron }
7635d7107c7SJonathan Cameron 
cxl_pmu_event_del(struct perf_event * event,int flags)7645d7107c7SJonathan Cameron static void cxl_pmu_event_del(struct perf_event *event, int flags)
7655d7107c7SJonathan Cameron {
7665d7107c7SJonathan Cameron 	struct cxl_pmu_info *info = pmu_to_cxl_pmu_info(event->pmu);
7675d7107c7SJonathan Cameron 	struct hw_perf_event *hwc = &event->hw;
7685d7107c7SJonathan Cameron 
7695d7107c7SJonathan Cameron 	cxl_pmu_event_stop(event, PERF_EF_UPDATE);
7705d7107c7SJonathan Cameron 	clear_bit(hwc->idx, info->used_counter_bm);
7715d7107c7SJonathan Cameron 	info->hw_events[hwc->idx] = NULL;
7725d7107c7SJonathan Cameron 	perf_event_update_userpage(event);
7735d7107c7SJonathan Cameron }
7745d7107c7SJonathan Cameron 
cxl_pmu_irq(int irq,void * data)7755d7107c7SJonathan Cameron static irqreturn_t cxl_pmu_irq(int irq, void *data)
7765d7107c7SJonathan Cameron {
7775d7107c7SJonathan Cameron 	struct cxl_pmu_info *info = data;
7785d7107c7SJonathan Cameron 	void __iomem *base = info->base;
7795d7107c7SJonathan Cameron 	u64 overflowed;
7805d7107c7SJonathan Cameron 	DECLARE_BITMAP(overflowedbm, 64);
7815d7107c7SJonathan Cameron 	int i;
7825d7107c7SJonathan Cameron 
7835d7107c7SJonathan Cameron 	overflowed = readq(base + CXL_PMU_OVERFLOW_REG);
7845d7107c7SJonathan Cameron 
7855d7107c7SJonathan Cameron 	/* Interrupt may be shared, so maybe it isn't ours */
7865d7107c7SJonathan Cameron 	if (!overflowed)
7875d7107c7SJonathan Cameron 		return IRQ_NONE;
7885d7107c7SJonathan Cameron 
7895d7107c7SJonathan Cameron 	bitmap_from_arr64(overflowedbm, &overflowed, 64);
7905d7107c7SJonathan Cameron 	for_each_set_bit(i, overflowedbm, info->num_counters) {
7915d7107c7SJonathan Cameron 		struct perf_event *event = info->hw_events[i];
7925d7107c7SJonathan Cameron 
7935d7107c7SJonathan Cameron 		if (!event) {
7945d7107c7SJonathan Cameron 			dev_dbg(info->pmu.dev,
7955d7107c7SJonathan Cameron 				"overflow but on non enabled counter %d\n", i);
7965d7107c7SJonathan Cameron 			continue;
7975d7107c7SJonathan Cameron 		}
7985d7107c7SJonathan Cameron 
7995d7107c7SJonathan Cameron 		__cxl_pmu_read(event, true);
8005d7107c7SJonathan Cameron 	}
8015d7107c7SJonathan Cameron 
8025d7107c7SJonathan Cameron 	writeq(overflowed, base + CXL_PMU_OVERFLOW_REG);
8035d7107c7SJonathan Cameron 
8045d7107c7SJonathan Cameron 	return IRQ_HANDLED;
8055d7107c7SJonathan Cameron }
8065d7107c7SJonathan Cameron 
cxl_pmu_perf_unregister(void * _info)8075d7107c7SJonathan Cameron static void cxl_pmu_perf_unregister(void *_info)
8085d7107c7SJonathan Cameron {
8095d7107c7SJonathan Cameron 	struct cxl_pmu_info *info = _info;
8105d7107c7SJonathan Cameron 
8115d7107c7SJonathan Cameron 	perf_pmu_unregister(&info->pmu);
8125d7107c7SJonathan Cameron }
8135d7107c7SJonathan Cameron 
cxl_pmu_cpuhp_remove(void * _info)8145d7107c7SJonathan Cameron static void cxl_pmu_cpuhp_remove(void *_info)
8155d7107c7SJonathan Cameron {
8165d7107c7SJonathan Cameron 	struct cxl_pmu_info *info = _info;
8175d7107c7SJonathan Cameron 
8185d7107c7SJonathan Cameron 	cpuhp_state_remove_instance_nocalls(cxl_pmu_cpuhp_state_num, &info->node);
8195d7107c7SJonathan Cameron }
8205d7107c7SJonathan Cameron 
cxl_pmu_probe(struct device * dev)8215d7107c7SJonathan Cameron static int cxl_pmu_probe(struct device *dev)
8225d7107c7SJonathan Cameron {
8235d7107c7SJonathan Cameron 	struct cxl_pmu *pmu = to_cxl_pmu(dev);
8245d7107c7SJonathan Cameron 	struct pci_dev *pdev = to_pci_dev(dev->parent);
8255d7107c7SJonathan Cameron 	struct cxl_pmu_info *info;
8265d7107c7SJonathan Cameron 	char *irq_name;
8275d7107c7SJonathan Cameron 	char *dev_name;
8285d7107c7SJonathan Cameron 	int rc, irq;
8295d7107c7SJonathan Cameron 
8305d7107c7SJonathan Cameron 	info = devm_kzalloc(dev, sizeof(*info), GFP_KERNEL);
8315d7107c7SJonathan Cameron 	if (!info)
8325d7107c7SJonathan Cameron 		return -ENOMEM;
8335d7107c7SJonathan Cameron 
8345d7107c7SJonathan Cameron 	dev_set_drvdata(dev, info);
8355d7107c7SJonathan Cameron 	INIT_LIST_HEAD(&info->event_caps_fixed);
8365d7107c7SJonathan Cameron 	INIT_LIST_HEAD(&info->event_caps_configurable);
8375d7107c7SJonathan Cameron 
8385d7107c7SJonathan Cameron 	info->base = pmu->base;
8395d7107c7SJonathan Cameron 
8405d7107c7SJonathan Cameron 	info->on_cpu = -1;
8415d7107c7SJonathan Cameron 	rc = cxl_pmu_parse_caps(dev, info);
8425d7107c7SJonathan Cameron 	if (rc)
8435d7107c7SJonathan Cameron 		return rc;
8445d7107c7SJonathan Cameron 
8455d7107c7SJonathan Cameron 	info->hw_events = devm_kcalloc(dev, sizeof(*info->hw_events),
8465d7107c7SJonathan Cameron 				       info->num_counters, GFP_KERNEL);
8475d7107c7SJonathan Cameron 	if (!info->hw_events)
8485d7107c7SJonathan Cameron 		return -ENOMEM;
8495d7107c7SJonathan Cameron 
8505d7107c7SJonathan Cameron 	switch (pmu->type) {
8515d7107c7SJonathan Cameron 	case CXL_PMU_MEMDEV:
8525d7107c7SJonathan Cameron 		dev_name = devm_kasprintf(dev, GFP_KERNEL, "cxl_pmu_mem%d.%d",
8535d7107c7SJonathan Cameron 					  pmu->assoc_id, pmu->index);
8545d7107c7SJonathan Cameron 		break;
8555d7107c7SJonathan Cameron 	}
8565d7107c7SJonathan Cameron 	if (!dev_name)
8575d7107c7SJonathan Cameron 		return -ENOMEM;
8585d7107c7SJonathan Cameron 
8595d7107c7SJonathan Cameron 	info->pmu = (struct pmu) {
8605d7107c7SJonathan Cameron 		.name = dev_name,
8615d7107c7SJonathan Cameron 		.parent = dev,
8625d7107c7SJonathan Cameron 		.module = THIS_MODULE,
8635d7107c7SJonathan Cameron 		.event_init = cxl_pmu_event_init,
8645d7107c7SJonathan Cameron 		.pmu_enable = cxl_pmu_enable,
8655d7107c7SJonathan Cameron 		.pmu_disable = cxl_pmu_disable,
8665d7107c7SJonathan Cameron 		.add = cxl_pmu_event_add,
8675d7107c7SJonathan Cameron 		.del = cxl_pmu_event_del,
8685d7107c7SJonathan Cameron 		.start = cxl_pmu_event_start,
8695d7107c7SJonathan Cameron 		.stop = cxl_pmu_event_stop,
8705d7107c7SJonathan Cameron 		.read = cxl_pmu_read,
8715d7107c7SJonathan Cameron 		.task_ctx_nr = perf_invalid_context,
8725d7107c7SJonathan Cameron 		.attr_groups = cxl_pmu_attr_groups,
8735d7107c7SJonathan Cameron 		.capabilities = PERF_PMU_CAP_NO_EXCLUDE,
8745d7107c7SJonathan Cameron 	};
8755d7107c7SJonathan Cameron 
8765d7107c7SJonathan Cameron 	if (info->irq <= 0)
8775d7107c7SJonathan Cameron 		return -EINVAL;
8785d7107c7SJonathan Cameron 
8795d7107c7SJonathan Cameron 	rc = pci_irq_vector(pdev, info->irq);
8805d7107c7SJonathan Cameron 	if (rc < 0)
8815d7107c7SJonathan Cameron 		return rc;
8825d7107c7SJonathan Cameron 	irq = rc;
8835d7107c7SJonathan Cameron 
8845d7107c7SJonathan Cameron 	irq_name = devm_kasprintf(dev, GFP_KERNEL, "%s_overflow\n", dev_name);
8855d7107c7SJonathan Cameron 	if (!irq_name)
8865d7107c7SJonathan Cameron 		return -ENOMEM;
8875d7107c7SJonathan Cameron 
8885d7107c7SJonathan Cameron 	rc = devm_request_irq(dev, irq, cxl_pmu_irq, IRQF_SHARED | IRQF_ONESHOT,
8895d7107c7SJonathan Cameron 			      irq_name, info);
8905d7107c7SJonathan Cameron 	if (rc)
8915d7107c7SJonathan Cameron 		return rc;
8925d7107c7SJonathan Cameron 	info->irq = irq;
8935d7107c7SJonathan Cameron 
8945d7107c7SJonathan Cameron 	rc = cpuhp_state_add_instance(cxl_pmu_cpuhp_state_num, &info->node);
8955d7107c7SJonathan Cameron 	if (rc)
8965d7107c7SJonathan Cameron 		return rc;
8975d7107c7SJonathan Cameron 
8985d7107c7SJonathan Cameron 	rc = devm_add_action_or_reset(dev, cxl_pmu_cpuhp_remove, info);
8995d7107c7SJonathan Cameron 	if (rc)
9005d7107c7SJonathan Cameron 		return rc;
9015d7107c7SJonathan Cameron 
9025d7107c7SJonathan Cameron 	rc = perf_pmu_register(&info->pmu, info->pmu.name, -1);
9035d7107c7SJonathan Cameron 	if (rc)
9045d7107c7SJonathan Cameron 		return rc;
9055d7107c7SJonathan Cameron 
9065d7107c7SJonathan Cameron 	rc = devm_add_action_or_reset(dev, cxl_pmu_perf_unregister, info);
9075d7107c7SJonathan Cameron 	if (rc)
9085d7107c7SJonathan Cameron 		return rc;
9095d7107c7SJonathan Cameron 
9105d7107c7SJonathan Cameron 	return 0;
9115d7107c7SJonathan Cameron }
9125d7107c7SJonathan Cameron 
9135d7107c7SJonathan Cameron static struct cxl_driver cxl_pmu_driver = {
9145d7107c7SJonathan Cameron 	.name = "cxl_pmu",
9155d7107c7SJonathan Cameron 	.probe = cxl_pmu_probe,
9165d7107c7SJonathan Cameron 	.id = CXL_DEVICE_PMU,
9175d7107c7SJonathan Cameron };
9185d7107c7SJonathan Cameron 
cxl_pmu_online_cpu(unsigned int cpu,struct hlist_node * node)9195d7107c7SJonathan Cameron static int cxl_pmu_online_cpu(unsigned int cpu, struct hlist_node *node)
9205d7107c7SJonathan Cameron {
9215d7107c7SJonathan Cameron 	struct cxl_pmu_info *info = hlist_entry_safe(node, struct cxl_pmu_info, node);
9225d7107c7SJonathan Cameron 
9235d7107c7SJonathan Cameron 	if (info->on_cpu != -1)
9245d7107c7SJonathan Cameron 		return 0;
9255d7107c7SJonathan Cameron 
9265d7107c7SJonathan Cameron 	info->on_cpu = cpu;
9275d7107c7SJonathan Cameron 	/*
9285d7107c7SJonathan Cameron 	 * CPU HP lock is held so we should be guaranteed that the CPU hasn't yet
9295d7107c7SJonathan Cameron 	 * gone away again.
9305d7107c7SJonathan Cameron 	 */
9315d7107c7SJonathan Cameron 	WARN_ON(irq_set_affinity(info->irq, cpumask_of(cpu)));
9325d7107c7SJonathan Cameron 
9335d7107c7SJonathan Cameron 	return 0;
9345d7107c7SJonathan Cameron }
9355d7107c7SJonathan Cameron 
cxl_pmu_offline_cpu(unsigned int cpu,struct hlist_node * node)9365d7107c7SJonathan Cameron static int cxl_pmu_offline_cpu(unsigned int cpu, struct hlist_node *node)
9375d7107c7SJonathan Cameron {
9385d7107c7SJonathan Cameron 	struct cxl_pmu_info *info = hlist_entry_safe(node, struct cxl_pmu_info, node);
9395d7107c7SJonathan Cameron 	unsigned int target;
9405d7107c7SJonathan Cameron 
9415d7107c7SJonathan Cameron 	if (info->on_cpu != cpu)
9425d7107c7SJonathan Cameron 		return 0;
9435d7107c7SJonathan Cameron 
9445d7107c7SJonathan Cameron 	info->on_cpu = -1;
9455d7107c7SJonathan Cameron 	target = cpumask_any_but(cpu_online_mask, cpu);
9465d7107c7SJonathan Cameron 	if (target >= nr_cpu_ids) {
9475d7107c7SJonathan Cameron 		dev_err(info->pmu.dev, "Unable to find a suitable CPU\n");
9485d7107c7SJonathan Cameron 		return 0;
9495d7107c7SJonathan Cameron 	}
9505d7107c7SJonathan Cameron 
9515d7107c7SJonathan Cameron 	perf_pmu_migrate_context(&info->pmu, cpu, target);
9525d7107c7SJonathan Cameron 	info->on_cpu = target;
9535d7107c7SJonathan Cameron 	/*
9545d7107c7SJonathan Cameron 	 * CPU HP lock is held so we should be guaranteed that this CPU hasn't yet
9555d7107c7SJonathan Cameron 	 * gone away.
9565d7107c7SJonathan Cameron 	 */
9575d7107c7SJonathan Cameron 	WARN_ON(irq_set_affinity(info->irq, cpumask_of(target)));
9585d7107c7SJonathan Cameron 
9595d7107c7SJonathan Cameron 	return 0;
9605d7107c7SJonathan Cameron }
9615d7107c7SJonathan Cameron 
cxl_pmu_init(void)9625d7107c7SJonathan Cameron static __init int cxl_pmu_init(void)
9635d7107c7SJonathan Cameron {
9645d7107c7SJonathan Cameron 	int rc;
9655d7107c7SJonathan Cameron 
9665d7107c7SJonathan Cameron 	rc = cpuhp_setup_state_multi(CPUHP_AP_ONLINE_DYN,
9675d7107c7SJonathan Cameron 				     "AP_PERF_CXL_PMU_ONLINE",
9685d7107c7SJonathan Cameron 				     cxl_pmu_online_cpu, cxl_pmu_offline_cpu);
9695d7107c7SJonathan Cameron 	if (rc < 0)
9705d7107c7SJonathan Cameron 		return rc;
9715d7107c7SJonathan Cameron 	cxl_pmu_cpuhp_state_num = rc;
9725d7107c7SJonathan Cameron 
9735d7107c7SJonathan Cameron 	rc = cxl_driver_register(&cxl_pmu_driver);
9745d7107c7SJonathan Cameron 	if (rc)
9755d7107c7SJonathan Cameron 		cpuhp_remove_multi_state(cxl_pmu_cpuhp_state_num);
9765d7107c7SJonathan Cameron 
9775d7107c7SJonathan Cameron 	return rc;
9785d7107c7SJonathan Cameron }
9795d7107c7SJonathan Cameron 
cxl_pmu_exit(void)9805d7107c7SJonathan Cameron static __exit void cxl_pmu_exit(void)
9815d7107c7SJonathan Cameron {
9825d7107c7SJonathan Cameron 	cxl_driver_unregister(&cxl_pmu_driver);
9835d7107c7SJonathan Cameron 	cpuhp_remove_multi_state(cxl_pmu_cpuhp_state_num);
9845d7107c7SJonathan Cameron }
9855d7107c7SJonathan Cameron 
9865d7107c7SJonathan Cameron MODULE_LICENSE("GPL");
9875d7107c7SJonathan Cameron MODULE_IMPORT_NS(CXL);
9885d7107c7SJonathan Cameron module_init(cxl_pmu_init);
9895d7107c7SJonathan Cameron module_exit(cxl_pmu_exit);
9905d7107c7SJonathan Cameron MODULE_ALIAS_CXL(CXL_DEVICE_PMU);
991