xref: /openbmc/linux/drivers/dma/idxd/perfmon.h (revision 81dd4d4d)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 /* Copyright(c) 2020 Intel Corporation. All rights rsvd. */
3 
4 #ifndef _PERFMON_H_
5 #define _PERFMON_H_
6 
7 #include <linux/slab.h>
8 #include <linux/pci.h>
9 #include <linux/sbitmap.h>
10 #include <linux/dmaengine.h>
11 #include <linux/percpu-rwsem.h>
12 #include <linux/wait.h>
13 #include <linux/cdev.h>
14 #include <linux/uuid.h>
15 #include <linux/idxd.h>
16 #include <linux/perf_event.h>
17 #include "registers.h"
18 
event_to_pmu(struct perf_event * event)19 static inline struct idxd_pmu *event_to_pmu(struct perf_event *event)
20 {
21 	struct idxd_pmu *idxd_pmu;
22 	struct pmu *pmu;
23 
24 	pmu = event->pmu;
25 	idxd_pmu = container_of(pmu, struct idxd_pmu, pmu);
26 
27 	return idxd_pmu;
28 }
29 
event_to_idxd(struct perf_event * event)30 static inline struct idxd_device *event_to_idxd(struct perf_event *event)
31 {
32 	struct idxd_pmu *idxd_pmu;
33 	struct pmu *pmu;
34 
35 	pmu = event->pmu;
36 	idxd_pmu = container_of(pmu, struct idxd_pmu, pmu);
37 
38 	return idxd_pmu->idxd;
39 }
40 
pmu_to_idxd(struct pmu * pmu)41 static inline struct idxd_device *pmu_to_idxd(struct pmu *pmu)
42 {
43 	struct idxd_pmu *idxd_pmu;
44 
45 	idxd_pmu = container_of(pmu, struct idxd_pmu, pmu);
46 
47 	return idxd_pmu->idxd;
48 }
49 
50 enum dsa_perf_events {
51 	DSA_PERF_EVENT_WQ = 0,
52 	DSA_PERF_EVENT_ENGINE,
53 	DSA_PERF_EVENT_ADDR_TRANS,
54 	DSA_PERF_EVENT_OP,
55 	DSA_PERF_EVENT_COMPL,
56 	DSA_PERF_EVENT_MAX,
57 };
58 
59 enum filter_enc {
60 	FLT_WQ = 0,
61 	FLT_TC,
62 	FLT_PG_SZ,
63 	FLT_XFER_SZ,
64 	FLT_ENG,
65 	FLT_MAX,
66 };
67 
68 #define CONFIG_RESET		0x0000000000000001
69 #define CNTR_RESET		0x0000000000000002
70 #define CNTR_ENABLE		0x0000000000000001
71 #define INTR_OVFL		0x0000000000000002
72 
73 #define COUNTER_FREEZE		0x00000000FFFFFFFF
74 #define COUNTER_UNFREEZE	0x0000000000000000
75 #define OVERFLOW_SIZE		32
76 
77 #define CNTRCFG_ENABLE		BIT(0)
78 #define CNTRCFG_IRQ_OVERFLOW	BIT(1)
79 #define CNTRCFG_CATEGORY_SHIFT	8
80 #define CNTRCFG_EVENT_SHIFT	32
81 
82 #define PERFMON_TABLE_OFFSET(_idxd)				\
83 ({								\
84 	typeof(_idxd) __idxd = (_idxd);				\
85 	((__idxd)->reg_base + (__idxd)->perfmon_offset);	\
86 })
87 #define PERFMON_REG_OFFSET(idxd, offset)			\
88 	(PERFMON_TABLE_OFFSET(idxd) + (offset))
89 
90 #define PERFCAP_REG(idxd)	(PERFMON_REG_OFFSET(idxd, IDXD_PERFCAP_OFFSET))
91 #define PERFRST_REG(idxd)	(PERFMON_REG_OFFSET(idxd, IDXD_PERFRST_OFFSET))
92 #define OVFSTATUS_REG(idxd)	(PERFMON_REG_OFFSET(idxd, IDXD_OVFSTATUS_OFFSET))
93 #define PERFFRZ_REG(idxd)	(PERFMON_REG_OFFSET(idxd, IDXD_PERFFRZ_OFFSET))
94 
95 #define FLTCFG_REG(idxd, cntr, flt)				\
96 	(PERFMON_REG_OFFSET(idxd, IDXD_FLTCFG_OFFSET) +	((cntr) * 32) + ((flt) * 4))
97 
98 #define CNTRCFG_REG(idxd, cntr)					\
99 	(PERFMON_REG_OFFSET(idxd, IDXD_CNTRCFG_OFFSET) + ((cntr) * 8))
100 #define CNTRDATA_REG(idxd, cntr)					\
101 	(PERFMON_REG_OFFSET(idxd, IDXD_CNTRDATA_OFFSET) + ((cntr) * 8))
102 #define CNTRCAP_REG(idxd, cntr)					\
103 	(PERFMON_REG_OFFSET(idxd, IDXD_CNTRCAP_OFFSET) + ((cntr) * 8))
104 
105 #define EVNTCAP_REG(idxd, category) \
106 	(PERFMON_REG_OFFSET(idxd, IDXD_EVNTCAP_OFFSET) + ((category) * 8))
107 
108 #define DEFINE_PERFMON_FORMAT_ATTR(_name, _format)			\
109 static ssize_t __perfmon_idxd_##_name##_show(struct kobject *kobj,	\
110 				struct kobj_attribute *attr,		\
111 				char *page)				\
112 {									\
113 	BUILD_BUG_ON(sizeof(_format) >= PAGE_SIZE);			\
114 	return sprintf(page, _format "\n");				\
115 }									\
116 static struct kobj_attribute format_attr_idxd_##_name =			\
117 	__ATTR(_name, 0444, __perfmon_idxd_##_name##_show, NULL)
118 
119 #endif
120