1 // SPDX-License-Identifier: GPL-2.0
2 /* SandyBridge-EP/IvyTown uncore support */
3 #include "uncore.h"
4 #include "uncore_discovery.h"
5 
6 /* SNB-EP pci bus to socket mapping */
7 #define SNBEP_CPUNODEID			0x40
8 #define SNBEP_GIDNIDMAP			0x54
9 
10 /* SNB-EP Box level control */
11 #define SNBEP_PMON_BOX_CTL_RST_CTRL	(1 << 0)
12 #define SNBEP_PMON_BOX_CTL_RST_CTRS	(1 << 1)
13 #define SNBEP_PMON_BOX_CTL_FRZ		(1 << 8)
14 #define SNBEP_PMON_BOX_CTL_FRZ_EN	(1 << 16)
15 #define SNBEP_PMON_BOX_CTL_INT		(SNBEP_PMON_BOX_CTL_RST_CTRL | \
16 					 SNBEP_PMON_BOX_CTL_RST_CTRS | \
17 					 SNBEP_PMON_BOX_CTL_FRZ_EN)
18 /* SNB-EP event control */
19 #define SNBEP_PMON_CTL_EV_SEL_MASK	0x000000ff
20 #define SNBEP_PMON_CTL_UMASK_MASK	0x0000ff00
21 #define SNBEP_PMON_CTL_RST		(1 << 17)
22 #define SNBEP_PMON_CTL_EDGE_DET		(1 << 18)
23 #define SNBEP_PMON_CTL_EV_SEL_EXT	(1 << 21)
24 #define SNBEP_PMON_CTL_EN		(1 << 22)
25 #define SNBEP_PMON_CTL_INVERT		(1 << 23)
26 #define SNBEP_PMON_CTL_TRESH_MASK	0xff000000
27 #define SNBEP_PMON_RAW_EVENT_MASK	(SNBEP_PMON_CTL_EV_SEL_MASK | \
28 					 SNBEP_PMON_CTL_UMASK_MASK | \
29 					 SNBEP_PMON_CTL_EDGE_DET | \
30 					 SNBEP_PMON_CTL_INVERT | \
31 					 SNBEP_PMON_CTL_TRESH_MASK)
32 
33 /* SNB-EP Ubox event control */
34 #define SNBEP_U_MSR_PMON_CTL_TRESH_MASK		0x1f000000
35 #define SNBEP_U_MSR_PMON_RAW_EVENT_MASK		\
36 				(SNBEP_PMON_CTL_EV_SEL_MASK | \
37 				 SNBEP_PMON_CTL_UMASK_MASK | \
38 				 SNBEP_PMON_CTL_EDGE_DET | \
39 				 SNBEP_PMON_CTL_INVERT | \
40 				 SNBEP_U_MSR_PMON_CTL_TRESH_MASK)
41 
42 #define SNBEP_CBO_PMON_CTL_TID_EN		(1 << 19)
43 #define SNBEP_CBO_MSR_PMON_RAW_EVENT_MASK	(SNBEP_PMON_RAW_EVENT_MASK | \
44 						 SNBEP_CBO_PMON_CTL_TID_EN)
45 
46 /* SNB-EP PCU event control */
47 #define SNBEP_PCU_MSR_PMON_CTL_OCC_SEL_MASK	0x0000c000
48 #define SNBEP_PCU_MSR_PMON_CTL_TRESH_MASK	0x1f000000
49 #define SNBEP_PCU_MSR_PMON_CTL_OCC_INVERT	(1 << 30)
50 #define SNBEP_PCU_MSR_PMON_CTL_OCC_EDGE_DET	(1 << 31)
51 #define SNBEP_PCU_MSR_PMON_RAW_EVENT_MASK	\
52 				(SNBEP_PMON_CTL_EV_SEL_MASK | \
53 				 SNBEP_PCU_MSR_PMON_CTL_OCC_SEL_MASK | \
54 				 SNBEP_PMON_CTL_EDGE_DET | \
55 				 SNBEP_PMON_CTL_INVERT | \
56 				 SNBEP_PCU_MSR_PMON_CTL_TRESH_MASK | \
57 				 SNBEP_PCU_MSR_PMON_CTL_OCC_INVERT | \
58 				 SNBEP_PCU_MSR_PMON_CTL_OCC_EDGE_DET)
59 
60 #define SNBEP_QPI_PCI_PMON_RAW_EVENT_MASK	\
61 				(SNBEP_PMON_RAW_EVENT_MASK | \
62 				 SNBEP_PMON_CTL_EV_SEL_EXT)
63 
64 /* SNB-EP pci control register */
65 #define SNBEP_PCI_PMON_BOX_CTL			0xf4
66 #define SNBEP_PCI_PMON_CTL0			0xd8
67 /* SNB-EP pci counter register */
68 #define SNBEP_PCI_PMON_CTR0			0xa0
69 
70 /* SNB-EP home agent register */
71 #define SNBEP_HA_PCI_PMON_BOX_ADDRMATCH0	0x40
72 #define SNBEP_HA_PCI_PMON_BOX_ADDRMATCH1	0x44
73 #define SNBEP_HA_PCI_PMON_BOX_OPCODEMATCH	0x48
74 /* SNB-EP memory controller register */
75 #define SNBEP_MC_CHy_PCI_PMON_FIXED_CTL		0xf0
76 #define SNBEP_MC_CHy_PCI_PMON_FIXED_CTR		0xd0
77 /* SNB-EP QPI register */
78 #define SNBEP_Q_Py_PCI_PMON_PKT_MATCH0		0x228
79 #define SNBEP_Q_Py_PCI_PMON_PKT_MATCH1		0x22c
80 #define SNBEP_Q_Py_PCI_PMON_PKT_MASK0		0x238
81 #define SNBEP_Q_Py_PCI_PMON_PKT_MASK1		0x23c
82 
83 /* SNB-EP Ubox register */
84 #define SNBEP_U_MSR_PMON_CTR0			0xc16
85 #define SNBEP_U_MSR_PMON_CTL0			0xc10
86 
87 #define SNBEP_U_MSR_PMON_UCLK_FIXED_CTL		0xc08
88 #define SNBEP_U_MSR_PMON_UCLK_FIXED_CTR		0xc09
89 
90 /* SNB-EP Cbo register */
91 #define SNBEP_C0_MSR_PMON_CTR0			0xd16
92 #define SNBEP_C0_MSR_PMON_CTL0			0xd10
93 #define SNBEP_C0_MSR_PMON_BOX_CTL		0xd04
94 #define SNBEP_C0_MSR_PMON_BOX_FILTER		0xd14
95 #define SNBEP_CBO_MSR_OFFSET			0x20
96 
97 #define SNBEP_CB0_MSR_PMON_BOX_FILTER_TID	0x1f
98 #define SNBEP_CB0_MSR_PMON_BOX_FILTER_NID	0x3fc00
99 #define SNBEP_CB0_MSR_PMON_BOX_FILTER_STATE	0x7c0000
100 #define SNBEP_CB0_MSR_PMON_BOX_FILTER_OPC	0xff800000
101 
102 #define SNBEP_CBO_EVENT_EXTRA_REG(e, m, i) {	\
103 	.event = (e),				\
104 	.msr = SNBEP_C0_MSR_PMON_BOX_FILTER,	\
105 	.config_mask = (m),			\
106 	.idx = (i)				\
107 }
108 
109 /* SNB-EP PCU register */
110 #define SNBEP_PCU_MSR_PMON_CTR0			0xc36
111 #define SNBEP_PCU_MSR_PMON_CTL0			0xc30
112 #define SNBEP_PCU_MSR_PMON_BOX_CTL		0xc24
113 #define SNBEP_PCU_MSR_PMON_BOX_FILTER		0xc34
114 #define SNBEP_PCU_MSR_PMON_BOX_FILTER_MASK	0xffffffff
115 #define SNBEP_PCU_MSR_CORE_C3_CTR		0x3fc
116 #define SNBEP_PCU_MSR_CORE_C6_CTR		0x3fd
117 
118 /* IVBEP event control */
119 #define IVBEP_PMON_BOX_CTL_INT		(SNBEP_PMON_BOX_CTL_RST_CTRL | \
120 					 SNBEP_PMON_BOX_CTL_RST_CTRS)
121 #define IVBEP_PMON_RAW_EVENT_MASK		(SNBEP_PMON_CTL_EV_SEL_MASK | \
122 					 SNBEP_PMON_CTL_UMASK_MASK | \
123 					 SNBEP_PMON_CTL_EDGE_DET | \
124 					 SNBEP_PMON_CTL_TRESH_MASK)
125 /* IVBEP Ubox */
126 #define IVBEP_U_MSR_PMON_GLOBAL_CTL		0xc00
127 #define IVBEP_U_PMON_GLOBAL_FRZ_ALL		(1 << 31)
128 #define IVBEP_U_PMON_GLOBAL_UNFRZ_ALL		(1 << 29)
129 
130 #define IVBEP_U_MSR_PMON_RAW_EVENT_MASK	\
131 				(SNBEP_PMON_CTL_EV_SEL_MASK | \
132 				 SNBEP_PMON_CTL_UMASK_MASK | \
133 				 SNBEP_PMON_CTL_EDGE_DET | \
134 				 SNBEP_U_MSR_PMON_CTL_TRESH_MASK)
135 /* IVBEP Cbo */
136 #define IVBEP_CBO_MSR_PMON_RAW_EVENT_MASK		(IVBEP_PMON_RAW_EVENT_MASK | \
137 						 SNBEP_CBO_PMON_CTL_TID_EN)
138 
139 #define IVBEP_CB0_MSR_PMON_BOX_FILTER_TID		(0x1fULL << 0)
140 #define IVBEP_CB0_MSR_PMON_BOX_FILTER_LINK	(0xfULL << 5)
141 #define IVBEP_CB0_MSR_PMON_BOX_FILTER_STATE	(0x3fULL << 17)
142 #define IVBEP_CB0_MSR_PMON_BOX_FILTER_NID		(0xffffULL << 32)
143 #define IVBEP_CB0_MSR_PMON_BOX_FILTER_OPC		(0x1ffULL << 52)
144 #define IVBEP_CB0_MSR_PMON_BOX_FILTER_C6		(0x1ULL << 61)
145 #define IVBEP_CB0_MSR_PMON_BOX_FILTER_NC		(0x1ULL << 62)
146 #define IVBEP_CB0_MSR_PMON_BOX_FILTER_ISOC	(0x1ULL << 63)
147 
148 /* IVBEP home agent */
149 #define IVBEP_HA_PCI_PMON_CTL_Q_OCC_RST		(1 << 16)
150 #define IVBEP_HA_PCI_PMON_RAW_EVENT_MASK		\
151 				(IVBEP_PMON_RAW_EVENT_MASK | \
152 				 IVBEP_HA_PCI_PMON_CTL_Q_OCC_RST)
153 /* IVBEP PCU */
154 #define IVBEP_PCU_MSR_PMON_RAW_EVENT_MASK	\
155 				(SNBEP_PMON_CTL_EV_SEL_MASK | \
156 				 SNBEP_PCU_MSR_PMON_CTL_OCC_SEL_MASK | \
157 				 SNBEP_PMON_CTL_EDGE_DET | \
158 				 SNBEP_PCU_MSR_PMON_CTL_TRESH_MASK | \
159 				 SNBEP_PCU_MSR_PMON_CTL_OCC_INVERT | \
160 				 SNBEP_PCU_MSR_PMON_CTL_OCC_EDGE_DET)
161 /* IVBEP QPI */
162 #define IVBEP_QPI_PCI_PMON_RAW_EVENT_MASK	\
163 				(IVBEP_PMON_RAW_EVENT_MASK | \
164 				 SNBEP_PMON_CTL_EV_SEL_EXT)
165 
166 #define __BITS_VALUE(x, i, n)  ((typeof(x))(((x) >> ((i) * (n))) & \
167 				((1ULL << (n)) - 1)))
168 
169 /* Haswell-EP Ubox */
170 #define HSWEP_U_MSR_PMON_CTR0			0x709
171 #define HSWEP_U_MSR_PMON_CTL0			0x705
172 #define HSWEP_U_MSR_PMON_FILTER			0x707
173 
174 #define HSWEP_U_MSR_PMON_UCLK_FIXED_CTL		0x703
175 #define HSWEP_U_MSR_PMON_UCLK_FIXED_CTR		0x704
176 
177 #define HSWEP_U_MSR_PMON_BOX_FILTER_TID		(0x1 << 0)
178 #define HSWEP_U_MSR_PMON_BOX_FILTER_CID		(0x1fULL << 1)
179 #define HSWEP_U_MSR_PMON_BOX_FILTER_MASK \
180 					(HSWEP_U_MSR_PMON_BOX_FILTER_TID | \
181 					 HSWEP_U_MSR_PMON_BOX_FILTER_CID)
182 
183 /* Haswell-EP CBo */
184 #define HSWEP_C0_MSR_PMON_CTR0			0xe08
185 #define HSWEP_C0_MSR_PMON_CTL0			0xe01
186 #define HSWEP_C0_MSR_PMON_BOX_CTL			0xe00
187 #define HSWEP_C0_MSR_PMON_BOX_FILTER0		0xe05
188 #define HSWEP_CBO_MSR_OFFSET			0x10
189 
190 
191 #define HSWEP_CB0_MSR_PMON_BOX_FILTER_TID		(0x3fULL << 0)
192 #define HSWEP_CB0_MSR_PMON_BOX_FILTER_LINK	(0xfULL << 6)
193 #define HSWEP_CB0_MSR_PMON_BOX_FILTER_STATE	(0x7fULL << 17)
194 #define HSWEP_CB0_MSR_PMON_BOX_FILTER_NID		(0xffffULL << 32)
195 #define HSWEP_CB0_MSR_PMON_BOX_FILTER_OPC		(0x1ffULL << 52)
196 #define HSWEP_CB0_MSR_PMON_BOX_FILTER_C6		(0x1ULL << 61)
197 #define HSWEP_CB0_MSR_PMON_BOX_FILTER_NC		(0x1ULL << 62)
198 #define HSWEP_CB0_MSR_PMON_BOX_FILTER_ISOC	(0x1ULL << 63)
199 
200 
201 /* Haswell-EP Sbox */
202 #define HSWEP_S0_MSR_PMON_CTR0			0x726
203 #define HSWEP_S0_MSR_PMON_CTL0			0x721
204 #define HSWEP_S0_MSR_PMON_BOX_CTL			0x720
205 #define HSWEP_SBOX_MSR_OFFSET			0xa
206 #define HSWEP_S_MSR_PMON_RAW_EVENT_MASK		(SNBEP_PMON_RAW_EVENT_MASK | \
207 						 SNBEP_CBO_PMON_CTL_TID_EN)
208 
209 /* Haswell-EP PCU */
210 #define HSWEP_PCU_MSR_PMON_CTR0			0x717
211 #define HSWEP_PCU_MSR_PMON_CTL0			0x711
212 #define HSWEP_PCU_MSR_PMON_BOX_CTL		0x710
213 #define HSWEP_PCU_MSR_PMON_BOX_FILTER		0x715
214 
215 /* KNL Ubox */
216 #define KNL_U_MSR_PMON_RAW_EVENT_MASK \
217 					(SNBEP_U_MSR_PMON_RAW_EVENT_MASK | \
218 						SNBEP_CBO_PMON_CTL_TID_EN)
219 /* KNL CHA */
220 #define KNL_CHA_MSR_OFFSET			0xc
221 #define KNL_CHA_MSR_PMON_CTL_QOR		(1 << 16)
222 #define KNL_CHA_MSR_PMON_RAW_EVENT_MASK \
223 					(SNBEP_CBO_MSR_PMON_RAW_EVENT_MASK | \
224 					 KNL_CHA_MSR_PMON_CTL_QOR)
225 #define KNL_CHA_MSR_PMON_BOX_FILTER_TID		0x1ff
226 #define KNL_CHA_MSR_PMON_BOX_FILTER_STATE	(7 << 18)
227 #define KNL_CHA_MSR_PMON_BOX_FILTER_OP		(0xfffffe2aULL << 32)
228 #define KNL_CHA_MSR_PMON_BOX_FILTER_REMOTE_NODE	(0x1ULL << 32)
229 #define KNL_CHA_MSR_PMON_BOX_FILTER_LOCAL_NODE	(0x1ULL << 33)
230 #define KNL_CHA_MSR_PMON_BOX_FILTER_NNC		(0x1ULL << 37)
231 
232 /* KNL EDC/MC UCLK */
233 #define KNL_UCLK_MSR_PMON_CTR0_LOW		0x400
234 #define KNL_UCLK_MSR_PMON_CTL0			0x420
235 #define KNL_UCLK_MSR_PMON_BOX_CTL		0x430
236 #define KNL_UCLK_MSR_PMON_UCLK_FIXED_LOW	0x44c
237 #define KNL_UCLK_MSR_PMON_UCLK_FIXED_CTL	0x454
238 #define KNL_PMON_FIXED_CTL_EN			0x1
239 
240 /* KNL EDC */
241 #define KNL_EDC0_ECLK_MSR_PMON_CTR0_LOW		0xa00
242 #define KNL_EDC0_ECLK_MSR_PMON_CTL0		0xa20
243 #define KNL_EDC0_ECLK_MSR_PMON_BOX_CTL		0xa30
244 #define KNL_EDC0_ECLK_MSR_PMON_ECLK_FIXED_LOW	0xa3c
245 #define KNL_EDC0_ECLK_MSR_PMON_ECLK_FIXED_CTL	0xa44
246 
247 /* KNL MC */
248 #define KNL_MC0_CH0_MSR_PMON_CTR0_LOW		0xb00
249 #define KNL_MC0_CH0_MSR_PMON_CTL0		0xb20
250 #define KNL_MC0_CH0_MSR_PMON_BOX_CTL		0xb30
251 #define KNL_MC0_CH0_MSR_PMON_FIXED_LOW		0xb3c
252 #define KNL_MC0_CH0_MSR_PMON_FIXED_CTL		0xb44
253 
254 /* KNL IRP */
255 #define KNL_IRP_PCI_PMON_BOX_CTL		0xf0
256 #define KNL_IRP_PCI_PMON_RAW_EVENT_MASK		(SNBEP_PMON_RAW_EVENT_MASK | \
257 						 KNL_CHA_MSR_PMON_CTL_QOR)
258 /* KNL PCU */
259 #define KNL_PCU_PMON_CTL_EV_SEL_MASK		0x0000007f
260 #define KNL_PCU_PMON_CTL_USE_OCC_CTR		(1 << 7)
261 #define KNL_PCU_MSR_PMON_CTL_TRESH_MASK		0x3f000000
262 #define KNL_PCU_MSR_PMON_RAW_EVENT_MASK	\
263 				(KNL_PCU_PMON_CTL_EV_SEL_MASK | \
264 				 KNL_PCU_PMON_CTL_USE_OCC_CTR | \
265 				 SNBEP_PCU_MSR_PMON_CTL_OCC_SEL_MASK | \
266 				 SNBEP_PMON_CTL_EDGE_DET | \
267 				 SNBEP_CBO_PMON_CTL_TID_EN | \
268 				 SNBEP_PMON_CTL_INVERT | \
269 				 KNL_PCU_MSR_PMON_CTL_TRESH_MASK | \
270 				 SNBEP_PCU_MSR_PMON_CTL_OCC_INVERT | \
271 				 SNBEP_PCU_MSR_PMON_CTL_OCC_EDGE_DET)
272 
273 /* SKX pci bus to socket mapping */
274 #define SKX_CPUNODEID			0xc0
275 #define SKX_GIDNIDMAP			0xd4
276 
277 /*
278  * The CPU_BUS_NUMBER MSR returns the values of the respective CPUBUSNO CSR
279  * that BIOS programmed. MSR has package scope.
280  * |  Bit  |  Default  |  Description
281  * | [63]  |    00h    | VALID - When set, indicates the CPU bus
282  *                       numbers have been initialized. (RO)
283  * |[62:48]|    ---    | Reserved
284  * |[47:40]|    00h    | BUS_NUM_5 - Return the bus number BIOS assigned
285  *                       CPUBUSNO(5). (RO)
286  * |[39:32]|    00h    | BUS_NUM_4 - Return the bus number BIOS assigned
287  *                       CPUBUSNO(4). (RO)
288  * |[31:24]|    00h    | BUS_NUM_3 - Return the bus number BIOS assigned
289  *                       CPUBUSNO(3). (RO)
290  * |[23:16]|    00h    | BUS_NUM_2 - Return the bus number BIOS assigned
291  *                       CPUBUSNO(2). (RO)
292  * |[15:8] |    00h    | BUS_NUM_1 - Return the bus number BIOS assigned
293  *                       CPUBUSNO(1). (RO)
294  * | [7:0] |    00h    | BUS_NUM_0 - Return the bus number BIOS assigned
295  *                       CPUBUSNO(0). (RO)
296  */
297 #define SKX_MSR_CPU_BUS_NUMBER		0x300
298 #define SKX_MSR_CPU_BUS_VALID_BIT	(1ULL << 63)
299 #define BUS_NUM_STRIDE			8
300 
301 /* SKX CHA */
302 #define SKX_CHA_MSR_PMON_BOX_FILTER_TID		(0x1ffULL << 0)
303 #define SKX_CHA_MSR_PMON_BOX_FILTER_LINK	(0xfULL << 9)
304 #define SKX_CHA_MSR_PMON_BOX_FILTER_STATE	(0x3ffULL << 17)
305 #define SKX_CHA_MSR_PMON_BOX_FILTER_REM		(0x1ULL << 32)
306 #define SKX_CHA_MSR_PMON_BOX_FILTER_LOC		(0x1ULL << 33)
307 #define SKX_CHA_MSR_PMON_BOX_FILTER_ALL_OPC	(0x1ULL << 35)
308 #define SKX_CHA_MSR_PMON_BOX_FILTER_NM		(0x1ULL << 36)
309 #define SKX_CHA_MSR_PMON_BOX_FILTER_NOT_NM	(0x1ULL << 37)
310 #define SKX_CHA_MSR_PMON_BOX_FILTER_OPC0	(0x3ffULL << 41)
311 #define SKX_CHA_MSR_PMON_BOX_FILTER_OPC1	(0x3ffULL << 51)
312 #define SKX_CHA_MSR_PMON_BOX_FILTER_C6		(0x1ULL << 61)
313 #define SKX_CHA_MSR_PMON_BOX_FILTER_NC		(0x1ULL << 62)
314 #define SKX_CHA_MSR_PMON_BOX_FILTER_ISOC	(0x1ULL << 63)
315 
316 /* SKX IIO */
317 #define SKX_IIO0_MSR_PMON_CTL0		0xa48
318 #define SKX_IIO0_MSR_PMON_CTR0		0xa41
319 #define SKX_IIO0_MSR_PMON_BOX_CTL	0xa40
320 #define SKX_IIO_MSR_OFFSET		0x20
321 
322 #define SKX_PMON_CTL_TRESH_MASK		(0xff << 24)
323 #define SKX_PMON_CTL_TRESH_MASK_EXT	(0xf)
324 #define SKX_PMON_CTL_CH_MASK		(0xff << 4)
325 #define SKX_PMON_CTL_FC_MASK		(0x7 << 12)
326 #define SKX_IIO_PMON_RAW_EVENT_MASK	(SNBEP_PMON_CTL_EV_SEL_MASK | \
327 					 SNBEP_PMON_CTL_UMASK_MASK | \
328 					 SNBEP_PMON_CTL_EDGE_DET | \
329 					 SNBEP_PMON_CTL_INVERT | \
330 					 SKX_PMON_CTL_TRESH_MASK)
331 #define SKX_IIO_PMON_RAW_EVENT_MASK_EXT	(SKX_PMON_CTL_TRESH_MASK_EXT | \
332 					 SKX_PMON_CTL_CH_MASK | \
333 					 SKX_PMON_CTL_FC_MASK)
334 
335 /* SKX IRP */
336 #define SKX_IRP0_MSR_PMON_CTL0		0xa5b
337 #define SKX_IRP0_MSR_PMON_CTR0		0xa59
338 #define SKX_IRP0_MSR_PMON_BOX_CTL	0xa58
339 #define SKX_IRP_MSR_OFFSET		0x20
340 
341 /* SKX UPI */
342 #define SKX_UPI_PCI_PMON_CTL0		0x350
343 #define SKX_UPI_PCI_PMON_CTR0		0x318
344 #define SKX_UPI_PCI_PMON_BOX_CTL	0x378
345 #define SKX_UPI_CTL_UMASK_EXT		0xffefff
346 
347 /* SKX M2M */
348 #define SKX_M2M_PCI_PMON_CTL0		0x228
349 #define SKX_M2M_PCI_PMON_CTR0		0x200
350 #define SKX_M2M_PCI_PMON_BOX_CTL	0x258
351 
352 /* Memory Map registers device ID */
353 #define SNR_ICX_MESH2IIO_MMAP_DID		0x9a2
354 #define SNR_ICX_SAD_CONTROL_CFG		0x3f4
355 
356 /* Getting I/O stack id in SAD_COTROL_CFG notation */
357 #define SAD_CONTROL_STACK_ID(data)		(((data) >> 4) & 0x7)
358 
359 /* SNR Ubox */
360 #define SNR_U_MSR_PMON_CTR0			0x1f98
361 #define SNR_U_MSR_PMON_CTL0			0x1f91
362 #define SNR_U_MSR_PMON_UCLK_FIXED_CTL		0x1f93
363 #define SNR_U_MSR_PMON_UCLK_FIXED_CTR		0x1f94
364 
365 /* SNR CHA */
366 #define SNR_CHA_RAW_EVENT_MASK_EXT		0x3ffffff
367 #define SNR_CHA_MSR_PMON_CTL0			0x1c01
368 #define SNR_CHA_MSR_PMON_CTR0			0x1c08
369 #define SNR_CHA_MSR_PMON_BOX_CTL		0x1c00
370 #define SNR_C0_MSR_PMON_BOX_FILTER0		0x1c05
371 
372 
373 /* SNR IIO */
374 #define SNR_IIO_MSR_PMON_CTL0			0x1e08
375 #define SNR_IIO_MSR_PMON_CTR0			0x1e01
376 #define SNR_IIO_MSR_PMON_BOX_CTL		0x1e00
377 #define SNR_IIO_MSR_OFFSET			0x10
378 #define SNR_IIO_PMON_RAW_EVENT_MASK_EXT		0x7ffff
379 
380 /* SNR IRP */
381 #define SNR_IRP0_MSR_PMON_CTL0			0x1ea8
382 #define SNR_IRP0_MSR_PMON_CTR0			0x1ea1
383 #define SNR_IRP0_MSR_PMON_BOX_CTL		0x1ea0
384 #define SNR_IRP_MSR_OFFSET			0x10
385 
386 /* SNR M2PCIE */
387 #define SNR_M2PCIE_MSR_PMON_CTL0		0x1e58
388 #define SNR_M2PCIE_MSR_PMON_CTR0		0x1e51
389 #define SNR_M2PCIE_MSR_PMON_BOX_CTL		0x1e50
390 #define SNR_M2PCIE_MSR_OFFSET			0x10
391 
392 /* SNR PCU */
393 #define SNR_PCU_MSR_PMON_CTL0			0x1ef1
394 #define SNR_PCU_MSR_PMON_CTR0			0x1ef8
395 #define SNR_PCU_MSR_PMON_BOX_CTL		0x1ef0
396 #define SNR_PCU_MSR_PMON_BOX_FILTER		0x1efc
397 
398 /* SNR M2M */
399 #define SNR_M2M_PCI_PMON_CTL0			0x468
400 #define SNR_M2M_PCI_PMON_CTR0			0x440
401 #define SNR_M2M_PCI_PMON_BOX_CTL		0x438
402 #define SNR_M2M_PCI_PMON_UMASK_EXT		0xff
403 
404 /* SNR PCIE3 */
405 #define SNR_PCIE3_PCI_PMON_CTL0			0x508
406 #define SNR_PCIE3_PCI_PMON_CTR0			0x4e8
407 #define SNR_PCIE3_PCI_PMON_BOX_CTL		0x4e0
408 
409 /* SNR IMC */
410 #define SNR_IMC_MMIO_PMON_FIXED_CTL		0x54
411 #define SNR_IMC_MMIO_PMON_FIXED_CTR		0x38
412 #define SNR_IMC_MMIO_PMON_CTL0			0x40
413 #define SNR_IMC_MMIO_PMON_CTR0			0x8
414 #define SNR_IMC_MMIO_PMON_BOX_CTL		0x22800
415 #define SNR_IMC_MMIO_OFFSET			0x4000
416 #define SNR_IMC_MMIO_SIZE			0x4000
417 #define SNR_IMC_MMIO_BASE_OFFSET		0xd0
418 #define SNR_IMC_MMIO_BASE_MASK			0x1FFFFFFF
419 #define SNR_IMC_MMIO_MEM0_OFFSET		0xd8
420 #define SNR_IMC_MMIO_MEM0_MASK			0x7FF
421 
422 /* ICX CHA */
423 #define ICX_C34_MSR_PMON_CTR0			0xb68
424 #define ICX_C34_MSR_PMON_CTL0			0xb61
425 #define ICX_C34_MSR_PMON_BOX_CTL		0xb60
426 #define ICX_C34_MSR_PMON_BOX_FILTER0		0xb65
427 
428 /* ICX IIO */
429 #define ICX_IIO_MSR_PMON_CTL0			0xa58
430 #define ICX_IIO_MSR_PMON_CTR0			0xa51
431 #define ICX_IIO_MSR_PMON_BOX_CTL		0xa50
432 
433 /* ICX IRP */
434 #define ICX_IRP0_MSR_PMON_CTL0			0xa4d
435 #define ICX_IRP0_MSR_PMON_CTR0			0xa4b
436 #define ICX_IRP0_MSR_PMON_BOX_CTL		0xa4a
437 
438 /* ICX M2PCIE */
439 #define ICX_M2PCIE_MSR_PMON_CTL0		0xa46
440 #define ICX_M2PCIE_MSR_PMON_CTR0		0xa41
441 #define ICX_M2PCIE_MSR_PMON_BOX_CTL		0xa40
442 
443 /* ICX UPI */
444 #define ICX_UPI_PCI_PMON_CTL0			0x350
445 #define ICX_UPI_PCI_PMON_CTR0			0x320
446 #define ICX_UPI_PCI_PMON_BOX_CTL		0x318
447 #define ICX_UPI_CTL_UMASK_EXT			0xffffff
448 #define ICX_UBOX_DID				0x3450
449 
450 /* ICX M3UPI*/
451 #define ICX_M3UPI_PCI_PMON_CTL0			0xd8
452 #define ICX_M3UPI_PCI_PMON_CTR0			0xa8
453 #define ICX_M3UPI_PCI_PMON_BOX_CTL		0xa0
454 
455 /* ICX IMC */
456 #define ICX_NUMBER_IMC_CHN			3
457 #define ICX_IMC_MEM_STRIDE			0x4
458 
459 /* SPR */
460 #define SPR_RAW_EVENT_MASK_EXT			0xffffff
461 #define SPR_UBOX_DID				0x3250
462 
463 /* SPR CHA */
464 #define SPR_CHA_PMON_CTL_TID_EN			(1 << 16)
465 #define SPR_CHA_PMON_EVENT_MASK			(SNBEP_PMON_RAW_EVENT_MASK | \
466 						 SPR_CHA_PMON_CTL_TID_EN)
467 #define SPR_CHA_PMON_BOX_FILTER_TID		0x3ff
468 
469 #define SPR_C0_MSR_PMON_BOX_FILTER0		0x200e
470 
471 DEFINE_UNCORE_FORMAT_ATTR(event, event, "config:0-7");
472 DEFINE_UNCORE_FORMAT_ATTR(event2, event, "config:0-6");
473 DEFINE_UNCORE_FORMAT_ATTR(event_ext, event, "config:0-7,21");
474 DEFINE_UNCORE_FORMAT_ATTR(use_occ_ctr, use_occ_ctr, "config:7");
475 DEFINE_UNCORE_FORMAT_ATTR(umask, umask, "config:8-15");
476 DEFINE_UNCORE_FORMAT_ATTR(umask_ext, umask, "config:8-15,32-43,45-55");
477 DEFINE_UNCORE_FORMAT_ATTR(umask_ext2, umask, "config:8-15,32-57");
478 DEFINE_UNCORE_FORMAT_ATTR(umask_ext3, umask, "config:8-15,32-39");
479 DEFINE_UNCORE_FORMAT_ATTR(umask_ext4, umask, "config:8-15,32-55");
480 DEFINE_UNCORE_FORMAT_ATTR(qor, qor, "config:16");
481 DEFINE_UNCORE_FORMAT_ATTR(edge, edge, "config:18");
482 DEFINE_UNCORE_FORMAT_ATTR(tid_en, tid_en, "config:19");
483 DEFINE_UNCORE_FORMAT_ATTR(tid_en2, tid_en, "config:16");
484 DEFINE_UNCORE_FORMAT_ATTR(inv, inv, "config:23");
485 DEFINE_UNCORE_FORMAT_ATTR(thresh9, thresh, "config:24-35");
486 DEFINE_UNCORE_FORMAT_ATTR(thresh8, thresh, "config:24-31");
487 DEFINE_UNCORE_FORMAT_ATTR(thresh6, thresh, "config:24-29");
488 DEFINE_UNCORE_FORMAT_ATTR(thresh5, thresh, "config:24-28");
489 DEFINE_UNCORE_FORMAT_ATTR(occ_sel, occ_sel, "config:14-15");
490 DEFINE_UNCORE_FORMAT_ATTR(occ_invert, occ_invert, "config:30");
491 DEFINE_UNCORE_FORMAT_ATTR(occ_edge, occ_edge, "config:14-51");
492 DEFINE_UNCORE_FORMAT_ATTR(occ_edge_det, occ_edge_det, "config:31");
493 DEFINE_UNCORE_FORMAT_ATTR(ch_mask, ch_mask, "config:36-43");
494 DEFINE_UNCORE_FORMAT_ATTR(ch_mask2, ch_mask, "config:36-47");
495 DEFINE_UNCORE_FORMAT_ATTR(fc_mask, fc_mask, "config:44-46");
496 DEFINE_UNCORE_FORMAT_ATTR(fc_mask2, fc_mask, "config:48-50");
497 DEFINE_UNCORE_FORMAT_ATTR(filter_tid, filter_tid, "config1:0-4");
498 DEFINE_UNCORE_FORMAT_ATTR(filter_tid2, filter_tid, "config1:0");
499 DEFINE_UNCORE_FORMAT_ATTR(filter_tid3, filter_tid, "config1:0-5");
500 DEFINE_UNCORE_FORMAT_ATTR(filter_tid4, filter_tid, "config1:0-8");
501 DEFINE_UNCORE_FORMAT_ATTR(filter_tid5, filter_tid, "config1:0-9");
502 DEFINE_UNCORE_FORMAT_ATTR(filter_cid, filter_cid, "config1:5");
503 DEFINE_UNCORE_FORMAT_ATTR(filter_link, filter_link, "config1:5-8");
504 DEFINE_UNCORE_FORMAT_ATTR(filter_link2, filter_link, "config1:6-8");
505 DEFINE_UNCORE_FORMAT_ATTR(filter_link3, filter_link, "config1:12");
506 DEFINE_UNCORE_FORMAT_ATTR(filter_nid, filter_nid, "config1:10-17");
507 DEFINE_UNCORE_FORMAT_ATTR(filter_nid2, filter_nid, "config1:32-47");
508 DEFINE_UNCORE_FORMAT_ATTR(filter_state, filter_state, "config1:18-22");
509 DEFINE_UNCORE_FORMAT_ATTR(filter_state2, filter_state, "config1:17-22");
510 DEFINE_UNCORE_FORMAT_ATTR(filter_state3, filter_state, "config1:17-23");
511 DEFINE_UNCORE_FORMAT_ATTR(filter_state4, filter_state, "config1:18-20");
512 DEFINE_UNCORE_FORMAT_ATTR(filter_state5, filter_state, "config1:17-26");
513 DEFINE_UNCORE_FORMAT_ATTR(filter_rem, filter_rem, "config1:32");
514 DEFINE_UNCORE_FORMAT_ATTR(filter_loc, filter_loc, "config1:33");
515 DEFINE_UNCORE_FORMAT_ATTR(filter_nm, filter_nm, "config1:36");
516 DEFINE_UNCORE_FORMAT_ATTR(filter_not_nm, filter_not_nm, "config1:37");
517 DEFINE_UNCORE_FORMAT_ATTR(filter_local, filter_local, "config1:33");
518 DEFINE_UNCORE_FORMAT_ATTR(filter_all_op, filter_all_op, "config1:35");
519 DEFINE_UNCORE_FORMAT_ATTR(filter_nnm, filter_nnm, "config1:37");
520 DEFINE_UNCORE_FORMAT_ATTR(filter_opc, filter_opc, "config1:23-31");
521 DEFINE_UNCORE_FORMAT_ATTR(filter_opc2, filter_opc, "config1:52-60");
522 DEFINE_UNCORE_FORMAT_ATTR(filter_opc3, filter_opc, "config1:41-60");
523 DEFINE_UNCORE_FORMAT_ATTR(filter_opc_0, filter_opc0, "config1:41-50");
524 DEFINE_UNCORE_FORMAT_ATTR(filter_opc_1, filter_opc1, "config1:51-60");
525 DEFINE_UNCORE_FORMAT_ATTR(filter_nc, filter_nc, "config1:62");
526 DEFINE_UNCORE_FORMAT_ATTR(filter_c6, filter_c6, "config1:61");
527 DEFINE_UNCORE_FORMAT_ATTR(filter_isoc, filter_isoc, "config1:63");
528 DEFINE_UNCORE_FORMAT_ATTR(filter_band0, filter_band0, "config1:0-7");
529 DEFINE_UNCORE_FORMAT_ATTR(filter_band1, filter_band1, "config1:8-15");
530 DEFINE_UNCORE_FORMAT_ATTR(filter_band2, filter_band2, "config1:16-23");
531 DEFINE_UNCORE_FORMAT_ATTR(filter_band3, filter_band3, "config1:24-31");
532 DEFINE_UNCORE_FORMAT_ATTR(match_rds, match_rds, "config1:48-51");
533 DEFINE_UNCORE_FORMAT_ATTR(match_rnid30, match_rnid30, "config1:32-35");
534 DEFINE_UNCORE_FORMAT_ATTR(match_rnid4, match_rnid4, "config1:31");
535 DEFINE_UNCORE_FORMAT_ATTR(match_dnid, match_dnid, "config1:13-17");
536 DEFINE_UNCORE_FORMAT_ATTR(match_mc, match_mc, "config1:9-12");
537 DEFINE_UNCORE_FORMAT_ATTR(match_opc, match_opc, "config1:5-8");
538 DEFINE_UNCORE_FORMAT_ATTR(match_vnw, match_vnw, "config1:3-4");
539 DEFINE_UNCORE_FORMAT_ATTR(match0, match0, "config1:0-31");
540 DEFINE_UNCORE_FORMAT_ATTR(match1, match1, "config1:32-63");
541 DEFINE_UNCORE_FORMAT_ATTR(mask_rds, mask_rds, "config2:48-51");
542 DEFINE_UNCORE_FORMAT_ATTR(mask_rnid30, mask_rnid30, "config2:32-35");
543 DEFINE_UNCORE_FORMAT_ATTR(mask_rnid4, mask_rnid4, "config2:31");
544 DEFINE_UNCORE_FORMAT_ATTR(mask_dnid, mask_dnid, "config2:13-17");
545 DEFINE_UNCORE_FORMAT_ATTR(mask_mc, mask_mc, "config2:9-12");
546 DEFINE_UNCORE_FORMAT_ATTR(mask_opc, mask_opc, "config2:5-8");
547 DEFINE_UNCORE_FORMAT_ATTR(mask_vnw, mask_vnw, "config2:3-4");
548 DEFINE_UNCORE_FORMAT_ATTR(mask0, mask0, "config2:0-31");
549 DEFINE_UNCORE_FORMAT_ATTR(mask1, mask1, "config2:32-63");
550 
snbep_uncore_pci_disable_box(struct intel_uncore_box * box)551 static void snbep_uncore_pci_disable_box(struct intel_uncore_box *box)
552 {
553 	struct pci_dev *pdev = box->pci_dev;
554 	int box_ctl = uncore_pci_box_ctl(box);
555 	u32 config = 0;
556 
557 	if (!pci_read_config_dword(pdev, box_ctl, &config)) {
558 		config |= SNBEP_PMON_BOX_CTL_FRZ;
559 		pci_write_config_dword(pdev, box_ctl, config);
560 	}
561 }
562 
snbep_uncore_pci_enable_box(struct intel_uncore_box * box)563 static void snbep_uncore_pci_enable_box(struct intel_uncore_box *box)
564 {
565 	struct pci_dev *pdev = box->pci_dev;
566 	int box_ctl = uncore_pci_box_ctl(box);
567 	u32 config = 0;
568 
569 	if (!pci_read_config_dword(pdev, box_ctl, &config)) {
570 		config &= ~SNBEP_PMON_BOX_CTL_FRZ;
571 		pci_write_config_dword(pdev, box_ctl, config);
572 	}
573 }
574 
snbep_uncore_pci_enable_event(struct intel_uncore_box * box,struct perf_event * event)575 static void snbep_uncore_pci_enable_event(struct intel_uncore_box *box, struct perf_event *event)
576 {
577 	struct pci_dev *pdev = box->pci_dev;
578 	struct hw_perf_event *hwc = &event->hw;
579 
580 	pci_write_config_dword(pdev, hwc->config_base, hwc->config | SNBEP_PMON_CTL_EN);
581 }
582 
snbep_uncore_pci_disable_event(struct intel_uncore_box * box,struct perf_event * event)583 static void snbep_uncore_pci_disable_event(struct intel_uncore_box *box, struct perf_event *event)
584 {
585 	struct pci_dev *pdev = box->pci_dev;
586 	struct hw_perf_event *hwc = &event->hw;
587 
588 	pci_write_config_dword(pdev, hwc->config_base, hwc->config);
589 }
590 
snbep_uncore_pci_read_counter(struct intel_uncore_box * box,struct perf_event * event)591 static u64 snbep_uncore_pci_read_counter(struct intel_uncore_box *box, struct perf_event *event)
592 {
593 	struct pci_dev *pdev = box->pci_dev;
594 	struct hw_perf_event *hwc = &event->hw;
595 	u64 count = 0;
596 
597 	pci_read_config_dword(pdev, hwc->event_base, (u32 *)&count);
598 	pci_read_config_dword(pdev, hwc->event_base + 4, (u32 *)&count + 1);
599 
600 	return count;
601 }
602 
snbep_uncore_pci_init_box(struct intel_uncore_box * box)603 static void snbep_uncore_pci_init_box(struct intel_uncore_box *box)
604 {
605 	struct pci_dev *pdev = box->pci_dev;
606 	int box_ctl = uncore_pci_box_ctl(box);
607 
608 	pci_write_config_dword(pdev, box_ctl, SNBEP_PMON_BOX_CTL_INT);
609 }
610 
snbep_uncore_msr_disable_box(struct intel_uncore_box * box)611 static void snbep_uncore_msr_disable_box(struct intel_uncore_box *box)
612 {
613 	u64 config;
614 	unsigned msr;
615 
616 	msr = uncore_msr_box_ctl(box);
617 	if (msr) {
618 		rdmsrl(msr, config);
619 		config |= SNBEP_PMON_BOX_CTL_FRZ;
620 		wrmsrl(msr, config);
621 	}
622 }
623 
snbep_uncore_msr_enable_box(struct intel_uncore_box * box)624 static void snbep_uncore_msr_enable_box(struct intel_uncore_box *box)
625 {
626 	u64 config;
627 	unsigned msr;
628 
629 	msr = uncore_msr_box_ctl(box);
630 	if (msr) {
631 		rdmsrl(msr, config);
632 		config &= ~SNBEP_PMON_BOX_CTL_FRZ;
633 		wrmsrl(msr, config);
634 	}
635 }
636 
snbep_uncore_msr_enable_event(struct intel_uncore_box * box,struct perf_event * event)637 static void snbep_uncore_msr_enable_event(struct intel_uncore_box *box, struct perf_event *event)
638 {
639 	struct hw_perf_event *hwc = &event->hw;
640 	struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
641 
642 	if (reg1->idx != EXTRA_REG_NONE)
643 		wrmsrl(reg1->reg, uncore_shared_reg_config(box, 0));
644 
645 	wrmsrl(hwc->config_base, hwc->config | SNBEP_PMON_CTL_EN);
646 }
647 
snbep_uncore_msr_disable_event(struct intel_uncore_box * box,struct perf_event * event)648 static void snbep_uncore_msr_disable_event(struct intel_uncore_box *box,
649 					struct perf_event *event)
650 {
651 	struct hw_perf_event *hwc = &event->hw;
652 
653 	wrmsrl(hwc->config_base, hwc->config);
654 }
655 
snbep_uncore_msr_init_box(struct intel_uncore_box * box)656 static void snbep_uncore_msr_init_box(struct intel_uncore_box *box)
657 {
658 	unsigned msr = uncore_msr_box_ctl(box);
659 
660 	if (msr)
661 		wrmsrl(msr, SNBEP_PMON_BOX_CTL_INT);
662 }
663 
664 static struct attribute *snbep_uncore_formats_attr[] = {
665 	&format_attr_event.attr,
666 	&format_attr_umask.attr,
667 	&format_attr_edge.attr,
668 	&format_attr_inv.attr,
669 	&format_attr_thresh8.attr,
670 	NULL,
671 };
672 
673 static struct attribute *snbep_uncore_ubox_formats_attr[] = {
674 	&format_attr_event.attr,
675 	&format_attr_umask.attr,
676 	&format_attr_edge.attr,
677 	&format_attr_inv.attr,
678 	&format_attr_thresh5.attr,
679 	NULL,
680 };
681 
682 static struct attribute *snbep_uncore_cbox_formats_attr[] = {
683 	&format_attr_event.attr,
684 	&format_attr_umask.attr,
685 	&format_attr_edge.attr,
686 	&format_attr_tid_en.attr,
687 	&format_attr_inv.attr,
688 	&format_attr_thresh8.attr,
689 	&format_attr_filter_tid.attr,
690 	&format_attr_filter_nid.attr,
691 	&format_attr_filter_state.attr,
692 	&format_attr_filter_opc.attr,
693 	NULL,
694 };
695 
696 static struct attribute *snbep_uncore_pcu_formats_attr[] = {
697 	&format_attr_event.attr,
698 	&format_attr_occ_sel.attr,
699 	&format_attr_edge.attr,
700 	&format_attr_inv.attr,
701 	&format_attr_thresh5.attr,
702 	&format_attr_occ_invert.attr,
703 	&format_attr_occ_edge.attr,
704 	&format_attr_filter_band0.attr,
705 	&format_attr_filter_band1.attr,
706 	&format_attr_filter_band2.attr,
707 	&format_attr_filter_band3.attr,
708 	NULL,
709 };
710 
711 static struct attribute *snbep_uncore_qpi_formats_attr[] = {
712 	&format_attr_event_ext.attr,
713 	&format_attr_umask.attr,
714 	&format_attr_edge.attr,
715 	&format_attr_inv.attr,
716 	&format_attr_thresh8.attr,
717 	&format_attr_match_rds.attr,
718 	&format_attr_match_rnid30.attr,
719 	&format_attr_match_rnid4.attr,
720 	&format_attr_match_dnid.attr,
721 	&format_attr_match_mc.attr,
722 	&format_attr_match_opc.attr,
723 	&format_attr_match_vnw.attr,
724 	&format_attr_match0.attr,
725 	&format_attr_match1.attr,
726 	&format_attr_mask_rds.attr,
727 	&format_attr_mask_rnid30.attr,
728 	&format_attr_mask_rnid4.attr,
729 	&format_attr_mask_dnid.attr,
730 	&format_attr_mask_mc.attr,
731 	&format_attr_mask_opc.attr,
732 	&format_attr_mask_vnw.attr,
733 	&format_attr_mask0.attr,
734 	&format_attr_mask1.attr,
735 	NULL,
736 };
737 
738 static struct uncore_event_desc snbep_uncore_imc_events[] = {
739 	INTEL_UNCORE_EVENT_DESC(clockticks,      "event=0xff,umask=0x00"),
740 	INTEL_UNCORE_EVENT_DESC(cas_count_read,  "event=0x04,umask=0x03"),
741 	INTEL_UNCORE_EVENT_DESC(cas_count_read.scale, "6.103515625e-5"),
742 	INTEL_UNCORE_EVENT_DESC(cas_count_read.unit, "MiB"),
743 	INTEL_UNCORE_EVENT_DESC(cas_count_write, "event=0x04,umask=0x0c"),
744 	INTEL_UNCORE_EVENT_DESC(cas_count_write.scale, "6.103515625e-5"),
745 	INTEL_UNCORE_EVENT_DESC(cas_count_write.unit, "MiB"),
746 	{ /* end: all zeroes */ },
747 };
748 
749 static struct uncore_event_desc snbep_uncore_qpi_events[] = {
750 	INTEL_UNCORE_EVENT_DESC(clockticks,       "event=0x14"),
751 	INTEL_UNCORE_EVENT_DESC(txl_flits_active, "event=0x00,umask=0x06"),
752 	INTEL_UNCORE_EVENT_DESC(drs_data,         "event=0x102,umask=0x08"),
753 	INTEL_UNCORE_EVENT_DESC(ncb_data,         "event=0x103,umask=0x04"),
754 	{ /* end: all zeroes */ },
755 };
756 
757 static const struct attribute_group snbep_uncore_format_group = {
758 	.name = "format",
759 	.attrs = snbep_uncore_formats_attr,
760 };
761 
762 static const struct attribute_group snbep_uncore_ubox_format_group = {
763 	.name = "format",
764 	.attrs = snbep_uncore_ubox_formats_attr,
765 };
766 
767 static const struct attribute_group snbep_uncore_cbox_format_group = {
768 	.name = "format",
769 	.attrs = snbep_uncore_cbox_formats_attr,
770 };
771 
772 static const struct attribute_group snbep_uncore_pcu_format_group = {
773 	.name = "format",
774 	.attrs = snbep_uncore_pcu_formats_attr,
775 };
776 
777 static const struct attribute_group snbep_uncore_qpi_format_group = {
778 	.name = "format",
779 	.attrs = snbep_uncore_qpi_formats_attr,
780 };
781 
782 #define __SNBEP_UNCORE_MSR_OPS_COMMON_INIT()			\
783 	.disable_box	= snbep_uncore_msr_disable_box,		\
784 	.enable_box	= snbep_uncore_msr_enable_box,		\
785 	.disable_event	= snbep_uncore_msr_disable_event,	\
786 	.enable_event	= snbep_uncore_msr_enable_event,	\
787 	.read_counter	= uncore_msr_read_counter
788 
789 #define SNBEP_UNCORE_MSR_OPS_COMMON_INIT()			\
790 	__SNBEP_UNCORE_MSR_OPS_COMMON_INIT(),			\
791 	.init_box	= snbep_uncore_msr_init_box		\
792 
793 static struct intel_uncore_ops snbep_uncore_msr_ops = {
794 	SNBEP_UNCORE_MSR_OPS_COMMON_INIT(),
795 };
796 
797 #define SNBEP_UNCORE_PCI_OPS_COMMON_INIT()			\
798 	.init_box	= snbep_uncore_pci_init_box,		\
799 	.disable_box	= snbep_uncore_pci_disable_box,		\
800 	.enable_box	= snbep_uncore_pci_enable_box,		\
801 	.disable_event	= snbep_uncore_pci_disable_event,	\
802 	.read_counter	= snbep_uncore_pci_read_counter
803 
804 static struct intel_uncore_ops snbep_uncore_pci_ops = {
805 	SNBEP_UNCORE_PCI_OPS_COMMON_INIT(),
806 	.enable_event	= snbep_uncore_pci_enable_event,	\
807 };
808 
809 static struct event_constraint snbep_uncore_cbox_constraints[] = {
810 	UNCORE_EVENT_CONSTRAINT(0x01, 0x1),
811 	UNCORE_EVENT_CONSTRAINT(0x02, 0x3),
812 	UNCORE_EVENT_CONSTRAINT(0x04, 0x3),
813 	UNCORE_EVENT_CONSTRAINT(0x05, 0x3),
814 	UNCORE_EVENT_CONSTRAINT(0x07, 0x3),
815 	UNCORE_EVENT_CONSTRAINT(0x09, 0x3),
816 	UNCORE_EVENT_CONSTRAINT(0x11, 0x1),
817 	UNCORE_EVENT_CONSTRAINT(0x12, 0x3),
818 	UNCORE_EVENT_CONSTRAINT(0x13, 0x3),
819 	UNCORE_EVENT_CONSTRAINT(0x1b, 0xc),
820 	UNCORE_EVENT_CONSTRAINT(0x1c, 0xc),
821 	UNCORE_EVENT_CONSTRAINT(0x1d, 0xc),
822 	UNCORE_EVENT_CONSTRAINT(0x1e, 0xc),
823 	UNCORE_EVENT_CONSTRAINT(0x1f, 0xe),
824 	UNCORE_EVENT_CONSTRAINT(0x21, 0x3),
825 	UNCORE_EVENT_CONSTRAINT(0x23, 0x3),
826 	UNCORE_EVENT_CONSTRAINT(0x31, 0x3),
827 	UNCORE_EVENT_CONSTRAINT(0x32, 0x3),
828 	UNCORE_EVENT_CONSTRAINT(0x33, 0x3),
829 	UNCORE_EVENT_CONSTRAINT(0x34, 0x3),
830 	UNCORE_EVENT_CONSTRAINT(0x35, 0x3),
831 	UNCORE_EVENT_CONSTRAINT(0x36, 0x1),
832 	UNCORE_EVENT_CONSTRAINT(0x37, 0x3),
833 	UNCORE_EVENT_CONSTRAINT(0x38, 0x3),
834 	UNCORE_EVENT_CONSTRAINT(0x39, 0x3),
835 	UNCORE_EVENT_CONSTRAINT(0x3b, 0x1),
836 	EVENT_CONSTRAINT_END
837 };
838 
839 static struct event_constraint snbep_uncore_r2pcie_constraints[] = {
840 	UNCORE_EVENT_CONSTRAINT(0x10, 0x3),
841 	UNCORE_EVENT_CONSTRAINT(0x11, 0x3),
842 	UNCORE_EVENT_CONSTRAINT(0x12, 0x1),
843 	UNCORE_EVENT_CONSTRAINT(0x23, 0x3),
844 	UNCORE_EVENT_CONSTRAINT(0x24, 0x3),
845 	UNCORE_EVENT_CONSTRAINT(0x25, 0x3),
846 	UNCORE_EVENT_CONSTRAINT(0x26, 0x3),
847 	UNCORE_EVENT_CONSTRAINT(0x32, 0x3),
848 	UNCORE_EVENT_CONSTRAINT(0x33, 0x3),
849 	UNCORE_EVENT_CONSTRAINT(0x34, 0x3),
850 	EVENT_CONSTRAINT_END
851 };
852 
853 static struct event_constraint snbep_uncore_r3qpi_constraints[] = {
854 	UNCORE_EVENT_CONSTRAINT(0x10, 0x3),
855 	UNCORE_EVENT_CONSTRAINT(0x11, 0x3),
856 	UNCORE_EVENT_CONSTRAINT(0x12, 0x3),
857 	UNCORE_EVENT_CONSTRAINT(0x13, 0x1),
858 	UNCORE_EVENT_CONSTRAINT(0x20, 0x3),
859 	UNCORE_EVENT_CONSTRAINT(0x21, 0x3),
860 	UNCORE_EVENT_CONSTRAINT(0x22, 0x3),
861 	UNCORE_EVENT_CONSTRAINT(0x23, 0x3),
862 	UNCORE_EVENT_CONSTRAINT(0x24, 0x3),
863 	UNCORE_EVENT_CONSTRAINT(0x25, 0x3),
864 	UNCORE_EVENT_CONSTRAINT(0x26, 0x3),
865 	UNCORE_EVENT_CONSTRAINT(0x28, 0x3),
866 	UNCORE_EVENT_CONSTRAINT(0x29, 0x3),
867 	UNCORE_EVENT_CONSTRAINT(0x2a, 0x3),
868 	UNCORE_EVENT_CONSTRAINT(0x2b, 0x3),
869 	UNCORE_EVENT_CONSTRAINT(0x2c, 0x3),
870 	UNCORE_EVENT_CONSTRAINT(0x2d, 0x3),
871 	UNCORE_EVENT_CONSTRAINT(0x2e, 0x3),
872 	UNCORE_EVENT_CONSTRAINT(0x2f, 0x3),
873 	UNCORE_EVENT_CONSTRAINT(0x30, 0x3),
874 	UNCORE_EVENT_CONSTRAINT(0x31, 0x3),
875 	UNCORE_EVENT_CONSTRAINT(0x32, 0x3),
876 	UNCORE_EVENT_CONSTRAINT(0x33, 0x3),
877 	UNCORE_EVENT_CONSTRAINT(0x34, 0x3),
878 	UNCORE_EVENT_CONSTRAINT(0x36, 0x3),
879 	UNCORE_EVENT_CONSTRAINT(0x37, 0x3),
880 	UNCORE_EVENT_CONSTRAINT(0x38, 0x3),
881 	UNCORE_EVENT_CONSTRAINT(0x39, 0x3),
882 	EVENT_CONSTRAINT_END
883 };
884 
885 static struct intel_uncore_type snbep_uncore_ubox = {
886 	.name		= "ubox",
887 	.num_counters   = 2,
888 	.num_boxes	= 1,
889 	.perf_ctr_bits	= 44,
890 	.fixed_ctr_bits	= 48,
891 	.perf_ctr	= SNBEP_U_MSR_PMON_CTR0,
892 	.event_ctl	= SNBEP_U_MSR_PMON_CTL0,
893 	.event_mask	= SNBEP_U_MSR_PMON_RAW_EVENT_MASK,
894 	.fixed_ctr	= SNBEP_U_MSR_PMON_UCLK_FIXED_CTR,
895 	.fixed_ctl	= SNBEP_U_MSR_PMON_UCLK_FIXED_CTL,
896 	.ops		= &snbep_uncore_msr_ops,
897 	.format_group	= &snbep_uncore_ubox_format_group,
898 };
899 
900 static struct extra_reg snbep_uncore_cbox_extra_regs[] = {
901 	SNBEP_CBO_EVENT_EXTRA_REG(SNBEP_CBO_PMON_CTL_TID_EN,
902 				  SNBEP_CBO_PMON_CTL_TID_EN, 0x1),
903 	SNBEP_CBO_EVENT_EXTRA_REG(0x0334, 0xffff, 0x4),
904 	SNBEP_CBO_EVENT_EXTRA_REG(0x4334, 0xffff, 0x6),
905 	SNBEP_CBO_EVENT_EXTRA_REG(0x0534, 0xffff, 0x4),
906 	SNBEP_CBO_EVENT_EXTRA_REG(0x4534, 0xffff, 0x6),
907 	SNBEP_CBO_EVENT_EXTRA_REG(0x0934, 0xffff, 0x4),
908 	SNBEP_CBO_EVENT_EXTRA_REG(0x4934, 0xffff, 0x6),
909 	SNBEP_CBO_EVENT_EXTRA_REG(0x4134, 0xffff, 0x6),
910 	SNBEP_CBO_EVENT_EXTRA_REG(0x0135, 0xffff, 0x8),
911 	SNBEP_CBO_EVENT_EXTRA_REG(0x0335, 0xffff, 0x8),
912 	SNBEP_CBO_EVENT_EXTRA_REG(0x4135, 0xffff, 0xa),
913 	SNBEP_CBO_EVENT_EXTRA_REG(0x4335, 0xffff, 0xa),
914 	SNBEP_CBO_EVENT_EXTRA_REG(0x4435, 0xffff, 0x2),
915 	SNBEP_CBO_EVENT_EXTRA_REG(0x4835, 0xffff, 0x2),
916 	SNBEP_CBO_EVENT_EXTRA_REG(0x4a35, 0xffff, 0x2),
917 	SNBEP_CBO_EVENT_EXTRA_REG(0x5035, 0xffff, 0x2),
918 	SNBEP_CBO_EVENT_EXTRA_REG(0x0136, 0xffff, 0x8),
919 	SNBEP_CBO_EVENT_EXTRA_REG(0x0336, 0xffff, 0x8),
920 	SNBEP_CBO_EVENT_EXTRA_REG(0x4136, 0xffff, 0xa),
921 	SNBEP_CBO_EVENT_EXTRA_REG(0x4336, 0xffff, 0xa),
922 	SNBEP_CBO_EVENT_EXTRA_REG(0x4436, 0xffff, 0x2),
923 	SNBEP_CBO_EVENT_EXTRA_REG(0x4836, 0xffff, 0x2),
924 	SNBEP_CBO_EVENT_EXTRA_REG(0x4a36, 0xffff, 0x2),
925 	SNBEP_CBO_EVENT_EXTRA_REG(0x4037, 0x40ff, 0x2),
926 	EVENT_EXTRA_END
927 };
928 
snbep_cbox_put_constraint(struct intel_uncore_box * box,struct perf_event * event)929 static void snbep_cbox_put_constraint(struct intel_uncore_box *box, struct perf_event *event)
930 {
931 	struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
932 	struct intel_uncore_extra_reg *er = &box->shared_regs[0];
933 	int i;
934 
935 	if (uncore_box_is_fake(box))
936 		return;
937 
938 	for (i = 0; i < 5; i++) {
939 		if (reg1->alloc & (0x1 << i))
940 			atomic_sub(1 << (i * 6), &er->ref);
941 	}
942 	reg1->alloc = 0;
943 }
944 
945 static struct event_constraint *
__snbep_cbox_get_constraint(struct intel_uncore_box * box,struct perf_event * event,u64 (* cbox_filter_mask)(int fields))946 __snbep_cbox_get_constraint(struct intel_uncore_box *box, struct perf_event *event,
947 			    u64 (*cbox_filter_mask)(int fields))
948 {
949 	struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
950 	struct intel_uncore_extra_reg *er = &box->shared_regs[0];
951 	int i, alloc = 0;
952 	unsigned long flags;
953 	u64 mask;
954 
955 	if (reg1->idx == EXTRA_REG_NONE)
956 		return NULL;
957 
958 	raw_spin_lock_irqsave(&er->lock, flags);
959 	for (i = 0; i < 5; i++) {
960 		if (!(reg1->idx & (0x1 << i)))
961 			continue;
962 		if (!uncore_box_is_fake(box) && (reg1->alloc & (0x1 << i)))
963 			continue;
964 
965 		mask = cbox_filter_mask(0x1 << i);
966 		if (!__BITS_VALUE(atomic_read(&er->ref), i, 6) ||
967 		    !((reg1->config ^ er->config) & mask)) {
968 			atomic_add(1 << (i * 6), &er->ref);
969 			er->config &= ~mask;
970 			er->config |= reg1->config & mask;
971 			alloc |= (0x1 << i);
972 		} else {
973 			break;
974 		}
975 	}
976 	raw_spin_unlock_irqrestore(&er->lock, flags);
977 	if (i < 5)
978 		goto fail;
979 
980 	if (!uncore_box_is_fake(box))
981 		reg1->alloc |= alloc;
982 
983 	return NULL;
984 fail:
985 	for (; i >= 0; i--) {
986 		if (alloc & (0x1 << i))
987 			atomic_sub(1 << (i * 6), &er->ref);
988 	}
989 	return &uncore_constraint_empty;
990 }
991 
snbep_cbox_filter_mask(int fields)992 static u64 snbep_cbox_filter_mask(int fields)
993 {
994 	u64 mask = 0;
995 
996 	if (fields & 0x1)
997 		mask |= SNBEP_CB0_MSR_PMON_BOX_FILTER_TID;
998 	if (fields & 0x2)
999 		mask |= SNBEP_CB0_MSR_PMON_BOX_FILTER_NID;
1000 	if (fields & 0x4)
1001 		mask |= SNBEP_CB0_MSR_PMON_BOX_FILTER_STATE;
1002 	if (fields & 0x8)
1003 		mask |= SNBEP_CB0_MSR_PMON_BOX_FILTER_OPC;
1004 
1005 	return mask;
1006 }
1007 
1008 static struct event_constraint *
snbep_cbox_get_constraint(struct intel_uncore_box * box,struct perf_event * event)1009 snbep_cbox_get_constraint(struct intel_uncore_box *box, struct perf_event *event)
1010 {
1011 	return __snbep_cbox_get_constraint(box, event, snbep_cbox_filter_mask);
1012 }
1013 
snbep_cbox_hw_config(struct intel_uncore_box * box,struct perf_event * event)1014 static int snbep_cbox_hw_config(struct intel_uncore_box *box, struct perf_event *event)
1015 {
1016 	struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
1017 	struct extra_reg *er;
1018 	int idx = 0;
1019 
1020 	for (er = snbep_uncore_cbox_extra_regs; er->msr; er++) {
1021 		if (er->event != (event->hw.config & er->config_mask))
1022 			continue;
1023 		idx |= er->idx;
1024 	}
1025 
1026 	if (idx) {
1027 		reg1->reg = SNBEP_C0_MSR_PMON_BOX_FILTER +
1028 			SNBEP_CBO_MSR_OFFSET * box->pmu->pmu_idx;
1029 		reg1->config = event->attr.config1 & snbep_cbox_filter_mask(idx);
1030 		reg1->idx = idx;
1031 	}
1032 	return 0;
1033 }
1034 
1035 static struct intel_uncore_ops snbep_uncore_cbox_ops = {
1036 	SNBEP_UNCORE_MSR_OPS_COMMON_INIT(),
1037 	.hw_config		= snbep_cbox_hw_config,
1038 	.get_constraint		= snbep_cbox_get_constraint,
1039 	.put_constraint		= snbep_cbox_put_constraint,
1040 };
1041 
1042 static struct intel_uncore_type snbep_uncore_cbox = {
1043 	.name			= "cbox",
1044 	.num_counters		= 4,
1045 	.num_boxes		= 8,
1046 	.perf_ctr_bits		= 44,
1047 	.event_ctl		= SNBEP_C0_MSR_PMON_CTL0,
1048 	.perf_ctr		= SNBEP_C0_MSR_PMON_CTR0,
1049 	.event_mask		= SNBEP_CBO_MSR_PMON_RAW_EVENT_MASK,
1050 	.box_ctl		= SNBEP_C0_MSR_PMON_BOX_CTL,
1051 	.msr_offset		= SNBEP_CBO_MSR_OFFSET,
1052 	.num_shared_regs	= 1,
1053 	.constraints		= snbep_uncore_cbox_constraints,
1054 	.ops			= &snbep_uncore_cbox_ops,
1055 	.format_group		= &snbep_uncore_cbox_format_group,
1056 };
1057 
snbep_pcu_alter_er(struct perf_event * event,int new_idx,bool modify)1058 static u64 snbep_pcu_alter_er(struct perf_event *event, int new_idx, bool modify)
1059 {
1060 	struct hw_perf_event *hwc = &event->hw;
1061 	struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
1062 	u64 config = reg1->config;
1063 
1064 	if (new_idx > reg1->idx)
1065 		config <<= 8 * (new_idx - reg1->idx);
1066 	else
1067 		config >>= 8 * (reg1->idx - new_idx);
1068 
1069 	if (modify) {
1070 		hwc->config += new_idx - reg1->idx;
1071 		reg1->config = config;
1072 		reg1->idx = new_idx;
1073 	}
1074 	return config;
1075 }
1076 
1077 static struct event_constraint *
snbep_pcu_get_constraint(struct intel_uncore_box * box,struct perf_event * event)1078 snbep_pcu_get_constraint(struct intel_uncore_box *box, struct perf_event *event)
1079 {
1080 	struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
1081 	struct intel_uncore_extra_reg *er = &box->shared_regs[0];
1082 	unsigned long flags;
1083 	int idx = reg1->idx;
1084 	u64 mask, config1 = reg1->config;
1085 	bool ok = false;
1086 
1087 	if (reg1->idx == EXTRA_REG_NONE ||
1088 	    (!uncore_box_is_fake(box) && reg1->alloc))
1089 		return NULL;
1090 again:
1091 	mask = 0xffULL << (idx * 8);
1092 	raw_spin_lock_irqsave(&er->lock, flags);
1093 	if (!__BITS_VALUE(atomic_read(&er->ref), idx, 8) ||
1094 	    !((config1 ^ er->config) & mask)) {
1095 		atomic_add(1 << (idx * 8), &er->ref);
1096 		er->config &= ~mask;
1097 		er->config |= config1 & mask;
1098 		ok = true;
1099 	}
1100 	raw_spin_unlock_irqrestore(&er->lock, flags);
1101 
1102 	if (!ok) {
1103 		idx = (idx + 1) % 4;
1104 		if (idx != reg1->idx) {
1105 			config1 = snbep_pcu_alter_er(event, idx, false);
1106 			goto again;
1107 		}
1108 		return &uncore_constraint_empty;
1109 	}
1110 
1111 	if (!uncore_box_is_fake(box)) {
1112 		if (idx != reg1->idx)
1113 			snbep_pcu_alter_er(event, idx, true);
1114 		reg1->alloc = 1;
1115 	}
1116 	return NULL;
1117 }
1118 
snbep_pcu_put_constraint(struct intel_uncore_box * box,struct perf_event * event)1119 static void snbep_pcu_put_constraint(struct intel_uncore_box *box, struct perf_event *event)
1120 {
1121 	struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
1122 	struct intel_uncore_extra_reg *er = &box->shared_regs[0];
1123 
1124 	if (uncore_box_is_fake(box) || !reg1->alloc)
1125 		return;
1126 
1127 	atomic_sub(1 << (reg1->idx * 8), &er->ref);
1128 	reg1->alloc = 0;
1129 }
1130 
snbep_pcu_hw_config(struct intel_uncore_box * box,struct perf_event * event)1131 static int snbep_pcu_hw_config(struct intel_uncore_box *box, struct perf_event *event)
1132 {
1133 	struct hw_perf_event *hwc = &event->hw;
1134 	struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
1135 	int ev_sel = hwc->config & SNBEP_PMON_CTL_EV_SEL_MASK;
1136 
1137 	if (ev_sel >= 0xb && ev_sel <= 0xe) {
1138 		reg1->reg = SNBEP_PCU_MSR_PMON_BOX_FILTER;
1139 		reg1->idx = ev_sel - 0xb;
1140 		reg1->config = event->attr.config1 & (0xff << (reg1->idx * 8));
1141 	}
1142 	return 0;
1143 }
1144 
1145 static struct intel_uncore_ops snbep_uncore_pcu_ops = {
1146 	SNBEP_UNCORE_MSR_OPS_COMMON_INIT(),
1147 	.hw_config		= snbep_pcu_hw_config,
1148 	.get_constraint		= snbep_pcu_get_constraint,
1149 	.put_constraint		= snbep_pcu_put_constraint,
1150 };
1151 
1152 static struct intel_uncore_type snbep_uncore_pcu = {
1153 	.name			= "pcu",
1154 	.num_counters		= 4,
1155 	.num_boxes		= 1,
1156 	.perf_ctr_bits		= 48,
1157 	.perf_ctr		= SNBEP_PCU_MSR_PMON_CTR0,
1158 	.event_ctl		= SNBEP_PCU_MSR_PMON_CTL0,
1159 	.event_mask		= SNBEP_PCU_MSR_PMON_RAW_EVENT_MASK,
1160 	.box_ctl		= SNBEP_PCU_MSR_PMON_BOX_CTL,
1161 	.num_shared_regs	= 1,
1162 	.ops			= &snbep_uncore_pcu_ops,
1163 	.format_group		= &snbep_uncore_pcu_format_group,
1164 };
1165 
1166 static struct intel_uncore_type *snbep_msr_uncores[] = {
1167 	&snbep_uncore_ubox,
1168 	&snbep_uncore_cbox,
1169 	&snbep_uncore_pcu,
1170 	NULL,
1171 };
1172 
snbep_uncore_cpu_init(void)1173 void snbep_uncore_cpu_init(void)
1174 {
1175 	if (snbep_uncore_cbox.num_boxes > boot_cpu_data.x86_max_cores)
1176 		snbep_uncore_cbox.num_boxes = boot_cpu_data.x86_max_cores;
1177 	uncore_msr_uncores = snbep_msr_uncores;
1178 }
1179 
1180 enum {
1181 	SNBEP_PCI_QPI_PORT0_FILTER,
1182 	SNBEP_PCI_QPI_PORT1_FILTER,
1183 	BDX_PCI_QPI_PORT2_FILTER,
1184 };
1185 
snbep_qpi_hw_config(struct intel_uncore_box * box,struct perf_event * event)1186 static int snbep_qpi_hw_config(struct intel_uncore_box *box, struct perf_event *event)
1187 {
1188 	struct hw_perf_event *hwc = &event->hw;
1189 	struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
1190 	struct hw_perf_event_extra *reg2 = &hwc->branch_reg;
1191 
1192 	if ((hwc->config & SNBEP_PMON_CTL_EV_SEL_MASK) == 0x38) {
1193 		reg1->idx = 0;
1194 		reg1->reg = SNBEP_Q_Py_PCI_PMON_PKT_MATCH0;
1195 		reg1->config = event->attr.config1;
1196 		reg2->reg = SNBEP_Q_Py_PCI_PMON_PKT_MASK0;
1197 		reg2->config = event->attr.config2;
1198 	}
1199 	return 0;
1200 }
1201 
snbep_qpi_enable_event(struct intel_uncore_box * box,struct perf_event * event)1202 static void snbep_qpi_enable_event(struct intel_uncore_box *box, struct perf_event *event)
1203 {
1204 	struct pci_dev *pdev = box->pci_dev;
1205 	struct hw_perf_event *hwc = &event->hw;
1206 	struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
1207 	struct hw_perf_event_extra *reg2 = &hwc->branch_reg;
1208 
1209 	if (reg1->idx != EXTRA_REG_NONE) {
1210 		int idx = box->pmu->pmu_idx + SNBEP_PCI_QPI_PORT0_FILTER;
1211 		int die = box->dieid;
1212 		struct pci_dev *filter_pdev = uncore_extra_pci_dev[die].dev[idx];
1213 
1214 		if (filter_pdev) {
1215 			pci_write_config_dword(filter_pdev, reg1->reg,
1216 						(u32)reg1->config);
1217 			pci_write_config_dword(filter_pdev, reg1->reg + 4,
1218 						(u32)(reg1->config >> 32));
1219 			pci_write_config_dword(filter_pdev, reg2->reg,
1220 						(u32)reg2->config);
1221 			pci_write_config_dword(filter_pdev, reg2->reg + 4,
1222 						(u32)(reg2->config >> 32));
1223 		}
1224 	}
1225 
1226 	pci_write_config_dword(pdev, hwc->config_base, hwc->config | SNBEP_PMON_CTL_EN);
1227 }
1228 
1229 static struct intel_uncore_ops snbep_uncore_qpi_ops = {
1230 	SNBEP_UNCORE_PCI_OPS_COMMON_INIT(),
1231 	.enable_event		= snbep_qpi_enable_event,
1232 	.hw_config		= snbep_qpi_hw_config,
1233 	.get_constraint		= uncore_get_constraint,
1234 	.put_constraint		= uncore_put_constraint,
1235 };
1236 
1237 #define SNBEP_UNCORE_PCI_COMMON_INIT()				\
1238 	.perf_ctr	= SNBEP_PCI_PMON_CTR0,			\
1239 	.event_ctl	= SNBEP_PCI_PMON_CTL0,			\
1240 	.event_mask	= SNBEP_PMON_RAW_EVENT_MASK,		\
1241 	.box_ctl	= SNBEP_PCI_PMON_BOX_CTL,		\
1242 	.ops		= &snbep_uncore_pci_ops,		\
1243 	.format_group	= &snbep_uncore_format_group
1244 
1245 static struct intel_uncore_type snbep_uncore_ha = {
1246 	.name		= "ha",
1247 	.num_counters   = 4,
1248 	.num_boxes	= 1,
1249 	.perf_ctr_bits	= 48,
1250 	SNBEP_UNCORE_PCI_COMMON_INIT(),
1251 };
1252 
1253 static struct intel_uncore_type snbep_uncore_imc = {
1254 	.name		= "imc",
1255 	.num_counters   = 4,
1256 	.num_boxes	= 4,
1257 	.perf_ctr_bits	= 48,
1258 	.fixed_ctr_bits	= 48,
1259 	.fixed_ctr	= SNBEP_MC_CHy_PCI_PMON_FIXED_CTR,
1260 	.fixed_ctl	= SNBEP_MC_CHy_PCI_PMON_FIXED_CTL,
1261 	.event_descs	= snbep_uncore_imc_events,
1262 	SNBEP_UNCORE_PCI_COMMON_INIT(),
1263 };
1264 
1265 static struct intel_uncore_type snbep_uncore_qpi = {
1266 	.name			= "qpi",
1267 	.num_counters		= 4,
1268 	.num_boxes		= 2,
1269 	.perf_ctr_bits		= 48,
1270 	.perf_ctr		= SNBEP_PCI_PMON_CTR0,
1271 	.event_ctl		= SNBEP_PCI_PMON_CTL0,
1272 	.event_mask		= SNBEP_QPI_PCI_PMON_RAW_EVENT_MASK,
1273 	.box_ctl		= SNBEP_PCI_PMON_BOX_CTL,
1274 	.num_shared_regs	= 1,
1275 	.ops			= &snbep_uncore_qpi_ops,
1276 	.event_descs		= snbep_uncore_qpi_events,
1277 	.format_group		= &snbep_uncore_qpi_format_group,
1278 };
1279 
1280 
1281 static struct intel_uncore_type snbep_uncore_r2pcie = {
1282 	.name		= "r2pcie",
1283 	.num_counters   = 4,
1284 	.num_boxes	= 1,
1285 	.perf_ctr_bits	= 44,
1286 	.constraints	= snbep_uncore_r2pcie_constraints,
1287 	SNBEP_UNCORE_PCI_COMMON_INIT(),
1288 };
1289 
1290 static struct intel_uncore_type snbep_uncore_r3qpi = {
1291 	.name		= "r3qpi",
1292 	.num_counters   = 3,
1293 	.num_boxes	= 2,
1294 	.perf_ctr_bits	= 44,
1295 	.constraints	= snbep_uncore_r3qpi_constraints,
1296 	SNBEP_UNCORE_PCI_COMMON_INIT(),
1297 };
1298 
1299 enum {
1300 	SNBEP_PCI_UNCORE_HA,
1301 	SNBEP_PCI_UNCORE_IMC,
1302 	SNBEP_PCI_UNCORE_QPI,
1303 	SNBEP_PCI_UNCORE_R2PCIE,
1304 	SNBEP_PCI_UNCORE_R3QPI,
1305 };
1306 
1307 static struct intel_uncore_type *snbep_pci_uncores[] = {
1308 	[SNBEP_PCI_UNCORE_HA]		= &snbep_uncore_ha,
1309 	[SNBEP_PCI_UNCORE_IMC]		= &snbep_uncore_imc,
1310 	[SNBEP_PCI_UNCORE_QPI]		= &snbep_uncore_qpi,
1311 	[SNBEP_PCI_UNCORE_R2PCIE]	= &snbep_uncore_r2pcie,
1312 	[SNBEP_PCI_UNCORE_R3QPI]	= &snbep_uncore_r3qpi,
1313 	NULL,
1314 };
1315 
1316 static const struct pci_device_id snbep_uncore_pci_ids[] = {
1317 	{ /* Home Agent */
1318 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_HA),
1319 		.driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_HA, 0),
1320 	},
1321 	{ /* MC Channel 0 */
1322 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_IMC0),
1323 		.driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_IMC, 0),
1324 	},
1325 	{ /* MC Channel 1 */
1326 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_IMC1),
1327 		.driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_IMC, 1),
1328 	},
1329 	{ /* MC Channel 2 */
1330 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_IMC2),
1331 		.driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_IMC, 2),
1332 	},
1333 	{ /* MC Channel 3 */
1334 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_IMC3),
1335 		.driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_IMC, 3),
1336 	},
1337 	{ /* QPI Port 0 */
1338 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_QPI0),
1339 		.driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_QPI, 0),
1340 	},
1341 	{ /* QPI Port 1 */
1342 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_QPI1),
1343 		.driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_QPI, 1),
1344 	},
1345 	{ /* R2PCIe */
1346 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_R2PCIE),
1347 		.driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_R2PCIE, 0),
1348 	},
1349 	{ /* R3QPI Link 0 */
1350 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_R3QPI0),
1351 		.driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_R3QPI, 0),
1352 	},
1353 	{ /* R3QPI Link 1 */
1354 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_R3QPI1),
1355 		.driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_R3QPI, 1),
1356 	},
1357 	{ /* QPI Port 0 filter  */
1358 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x3c86),
1359 		.driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV,
1360 						   SNBEP_PCI_QPI_PORT0_FILTER),
1361 	},
1362 	{ /* QPI Port 0 filter  */
1363 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x3c96),
1364 		.driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV,
1365 						   SNBEP_PCI_QPI_PORT1_FILTER),
1366 	},
1367 	{ /* end: all zeroes */ }
1368 };
1369 
1370 static struct pci_driver snbep_uncore_pci_driver = {
1371 	.name		= "snbep_uncore",
1372 	.id_table	= snbep_uncore_pci_ids,
1373 };
1374 
1375 #define NODE_ID_MASK	0x7
1376 
1377 /* Each three bits from 0 to 23 of GIDNIDMAP register correspond Node ID. */
1378 #define GIDNIDMAP(config, id)	(((config) >> (3 * (id))) & 0x7)
1379 
upi_nodeid_groupid(struct pci_dev * ubox_dev,int nodeid_loc,int idmap_loc,int * nodeid,int * groupid)1380 static int upi_nodeid_groupid(struct pci_dev *ubox_dev, int nodeid_loc, int idmap_loc,
1381 			      int *nodeid, int *groupid)
1382 {
1383 	int ret;
1384 
1385 	/* get the Node ID of the local register */
1386 	ret = pci_read_config_dword(ubox_dev, nodeid_loc, nodeid);
1387 	if (ret)
1388 		goto err;
1389 
1390 	*nodeid = *nodeid & NODE_ID_MASK;
1391 	/* get the Node ID mapping */
1392 	ret = pci_read_config_dword(ubox_dev, idmap_loc, groupid);
1393 	if (ret)
1394 		goto err;
1395 err:
1396 	return ret;
1397 }
1398 
1399 /*
1400  * build pci bus to socket mapping
1401  */
snbep_pci2phy_map_init(int devid,int nodeid_loc,int idmap_loc,bool reverse)1402 static int snbep_pci2phy_map_init(int devid, int nodeid_loc, int idmap_loc, bool reverse)
1403 {
1404 	struct pci_dev *ubox_dev = NULL;
1405 	int i, bus, nodeid, segment, die_id;
1406 	struct pci2phy_map *map;
1407 	int err = 0;
1408 	u32 config = 0;
1409 
1410 	while (1) {
1411 		/* find the UBOX device */
1412 		ubox_dev = pci_get_device(PCI_VENDOR_ID_INTEL, devid, ubox_dev);
1413 		if (!ubox_dev)
1414 			break;
1415 		bus = ubox_dev->bus->number;
1416 		/*
1417 		 * The nodeid and idmap registers only contain enough
1418 		 * information to handle 8 nodes.  On systems with more
1419 		 * than 8 nodes, we need to rely on NUMA information,
1420 		 * filled in from BIOS supplied information, to determine
1421 		 * the topology.
1422 		 */
1423 		if (nr_node_ids <= 8) {
1424 			err = upi_nodeid_groupid(ubox_dev, nodeid_loc, idmap_loc,
1425 						 &nodeid, &config);
1426 			if (err)
1427 				break;
1428 
1429 			segment = pci_domain_nr(ubox_dev->bus);
1430 			raw_spin_lock(&pci2phy_map_lock);
1431 			map = __find_pci2phy_map(segment);
1432 			if (!map) {
1433 				raw_spin_unlock(&pci2phy_map_lock);
1434 				err = -ENOMEM;
1435 				break;
1436 			}
1437 
1438 			/*
1439 			 * every three bits in the Node ID mapping register maps
1440 			 * to a particular node.
1441 			 */
1442 			for (i = 0; i < 8; i++) {
1443 				if (nodeid == GIDNIDMAP(config, i)) {
1444 					if (topology_max_die_per_package() > 1)
1445 						die_id = i;
1446 					else
1447 						die_id = topology_phys_to_logical_pkg(i);
1448 					if (die_id < 0)
1449 						die_id = -ENODEV;
1450 					map->pbus_to_dieid[bus] = die_id;
1451 					break;
1452 				}
1453 			}
1454 			raw_spin_unlock(&pci2phy_map_lock);
1455 		} else {
1456 			segment = pci_domain_nr(ubox_dev->bus);
1457 			raw_spin_lock(&pci2phy_map_lock);
1458 			map = __find_pci2phy_map(segment);
1459 			if (!map) {
1460 				raw_spin_unlock(&pci2phy_map_lock);
1461 				err = -ENOMEM;
1462 				break;
1463 			}
1464 
1465 			map->pbus_to_dieid[bus] = die_id = uncore_device_to_die(ubox_dev);
1466 
1467 			raw_spin_unlock(&pci2phy_map_lock);
1468 
1469 			if (WARN_ON_ONCE(die_id == -1)) {
1470 				err = -EINVAL;
1471 				break;
1472 			}
1473 		}
1474 	}
1475 
1476 	if (!err) {
1477 		/*
1478 		 * For PCI bus with no UBOX device, find the next bus
1479 		 * that has UBOX device and use its mapping.
1480 		 */
1481 		raw_spin_lock(&pci2phy_map_lock);
1482 		list_for_each_entry(map, &pci2phy_map_head, list) {
1483 			i = -1;
1484 			if (reverse) {
1485 				for (bus = 255; bus >= 0; bus--) {
1486 					if (map->pbus_to_dieid[bus] != -1)
1487 						i = map->pbus_to_dieid[bus];
1488 					else
1489 						map->pbus_to_dieid[bus] = i;
1490 				}
1491 			} else {
1492 				for (bus = 0; bus <= 255; bus++) {
1493 					if (map->pbus_to_dieid[bus] != -1)
1494 						i = map->pbus_to_dieid[bus];
1495 					else
1496 						map->pbus_to_dieid[bus] = i;
1497 				}
1498 			}
1499 		}
1500 		raw_spin_unlock(&pci2phy_map_lock);
1501 	}
1502 
1503 	pci_dev_put(ubox_dev);
1504 
1505 	return pcibios_err_to_errno(err);
1506 }
1507 
snbep_uncore_pci_init(void)1508 int snbep_uncore_pci_init(void)
1509 {
1510 	int ret = snbep_pci2phy_map_init(0x3ce0, SNBEP_CPUNODEID, SNBEP_GIDNIDMAP, true);
1511 	if (ret)
1512 		return ret;
1513 	uncore_pci_uncores = snbep_pci_uncores;
1514 	uncore_pci_driver = &snbep_uncore_pci_driver;
1515 	return 0;
1516 }
1517 /* end of Sandy Bridge-EP uncore support */
1518 
1519 /* IvyTown uncore support */
ivbep_uncore_msr_init_box(struct intel_uncore_box * box)1520 static void ivbep_uncore_msr_init_box(struct intel_uncore_box *box)
1521 {
1522 	unsigned msr = uncore_msr_box_ctl(box);
1523 	if (msr)
1524 		wrmsrl(msr, IVBEP_PMON_BOX_CTL_INT);
1525 }
1526 
ivbep_uncore_pci_init_box(struct intel_uncore_box * box)1527 static void ivbep_uncore_pci_init_box(struct intel_uncore_box *box)
1528 {
1529 	struct pci_dev *pdev = box->pci_dev;
1530 
1531 	pci_write_config_dword(pdev, SNBEP_PCI_PMON_BOX_CTL, IVBEP_PMON_BOX_CTL_INT);
1532 }
1533 
1534 #define IVBEP_UNCORE_MSR_OPS_COMMON_INIT()			\
1535 	.init_box	= ivbep_uncore_msr_init_box,		\
1536 	.disable_box	= snbep_uncore_msr_disable_box,		\
1537 	.enable_box	= snbep_uncore_msr_enable_box,		\
1538 	.disable_event	= snbep_uncore_msr_disable_event,	\
1539 	.enable_event	= snbep_uncore_msr_enable_event,	\
1540 	.read_counter	= uncore_msr_read_counter
1541 
1542 static struct intel_uncore_ops ivbep_uncore_msr_ops = {
1543 	IVBEP_UNCORE_MSR_OPS_COMMON_INIT(),
1544 };
1545 
1546 static struct intel_uncore_ops ivbep_uncore_pci_ops = {
1547 	.init_box	= ivbep_uncore_pci_init_box,
1548 	.disable_box	= snbep_uncore_pci_disable_box,
1549 	.enable_box	= snbep_uncore_pci_enable_box,
1550 	.disable_event	= snbep_uncore_pci_disable_event,
1551 	.enable_event	= snbep_uncore_pci_enable_event,
1552 	.read_counter	= snbep_uncore_pci_read_counter,
1553 };
1554 
1555 #define IVBEP_UNCORE_PCI_COMMON_INIT()				\
1556 	.perf_ctr	= SNBEP_PCI_PMON_CTR0,			\
1557 	.event_ctl	= SNBEP_PCI_PMON_CTL0,			\
1558 	.event_mask	= IVBEP_PMON_RAW_EVENT_MASK,		\
1559 	.box_ctl	= SNBEP_PCI_PMON_BOX_CTL,		\
1560 	.ops		= &ivbep_uncore_pci_ops,			\
1561 	.format_group	= &ivbep_uncore_format_group
1562 
1563 static struct attribute *ivbep_uncore_formats_attr[] = {
1564 	&format_attr_event.attr,
1565 	&format_attr_umask.attr,
1566 	&format_attr_edge.attr,
1567 	&format_attr_inv.attr,
1568 	&format_attr_thresh8.attr,
1569 	NULL,
1570 };
1571 
1572 static struct attribute *ivbep_uncore_ubox_formats_attr[] = {
1573 	&format_attr_event.attr,
1574 	&format_attr_umask.attr,
1575 	&format_attr_edge.attr,
1576 	&format_attr_inv.attr,
1577 	&format_attr_thresh5.attr,
1578 	NULL,
1579 };
1580 
1581 static struct attribute *ivbep_uncore_cbox_formats_attr[] = {
1582 	&format_attr_event.attr,
1583 	&format_attr_umask.attr,
1584 	&format_attr_edge.attr,
1585 	&format_attr_tid_en.attr,
1586 	&format_attr_thresh8.attr,
1587 	&format_attr_filter_tid.attr,
1588 	&format_attr_filter_link.attr,
1589 	&format_attr_filter_state2.attr,
1590 	&format_attr_filter_nid2.attr,
1591 	&format_attr_filter_opc2.attr,
1592 	&format_attr_filter_nc.attr,
1593 	&format_attr_filter_c6.attr,
1594 	&format_attr_filter_isoc.attr,
1595 	NULL,
1596 };
1597 
1598 static struct attribute *ivbep_uncore_pcu_formats_attr[] = {
1599 	&format_attr_event.attr,
1600 	&format_attr_occ_sel.attr,
1601 	&format_attr_edge.attr,
1602 	&format_attr_thresh5.attr,
1603 	&format_attr_occ_invert.attr,
1604 	&format_attr_occ_edge.attr,
1605 	&format_attr_filter_band0.attr,
1606 	&format_attr_filter_band1.attr,
1607 	&format_attr_filter_band2.attr,
1608 	&format_attr_filter_band3.attr,
1609 	NULL,
1610 };
1611 
1612 static struct attribute *ivbep_uncore_qpi_formats_attr[] = {
1613 	&format_attr_event_ext.attr,
1614 	&format_attr_umask.attr,
1615 	&format_attr_edge.attr,
1616 	&format_attr_thresh8.attr,
1617 	&format_attr_match_rds.attr,
1618 	&format_attr_match_rnid30.attr,
1619 	&format_attr_match_rnid4.attr,
1620 	&format_attr_match_dnid.attr,
1621 	&format_attr_match_mc.attr,
1622 	&format_attr_match_opc.attr,
1623 	&format_attr_match_vnw.attr,
1624 	&format_attr_match0.attr,
1625 	&format_attr_match1.attr,
1626 	&format_attr_mask_rds.attr,
1627 	&format_attr_mask_rnid30.attr,
1628 	&format_attr_mask_rnid4.attr,
1629 	&format_attr_mask_dnid.attr,
1630 	&format_attr_mask_mc.attr,
1631 	&format_attr_mask_opc.attr,
1632 	&format_attr_mask_vnw.attr,
1633 	&format_attr_mask0.attr,
1634 	&format_attr_mask1.attr,
1635 	NULL,
1636 };
1637 
1638 static const struct attribute_group ivbep_uncore_format_group = {
1639 	.name = "format",
1640 	.attrs = ivbep_uncore_formats_attr,
1641 };
1642 
1643 static const struct attribute_group ivbep_uncore_ubox_format_group = {
1644 	.name = "format",
1645 	.attrs = ivbep_uncore_ubox_formats_attr,
1646 };
1647 
1648 static const struct attribute_group ivbep_uncore_cbox_format_group = {
1649 	.name = "format",
1650 	.attrs = ivbep_uncore_cbox_formats_attr,
1651 };
1652 
1653 static const struct attribute_group ivbep_uncore_pcu_format_group = {
1654 	.name = "format",
1655 	.attrs = ivbep_uncore_pcu_formats_attr,
1656 };
1657 
1658 static const struct attribute_group ivbep_uncore_qpi_format_group = {
1659 	.name = "format",
1660 	.attrs = ivbep_uncore_qpi_formats_attr,
1661 };
1662 
1663 static struct intel_uncore_type ivbep_uncore_ubox = {
1664 	.name		= "ubox",
1665 	.num_counters   = 2,
1666 	.num_boxes	= 1,
1667 	.perf_ctr_bits	= 44,
1668 	.fixed_ctr_bits	= 48,
1669 	.perf_ctr	= SNBEP_U_MSR_PMON_CTR0,
1670 	.event_ctl	= SNBEP_U_MSR_PMON_CTL0,
1671 	.event_mask	= IVBEP_U_MSR_PMON_RAW_EVENT_MASK,
1672 	.fixed_ctr	= SNBEP_U_MSR_PMON_UCLK_FIXED_CTR,
1673 	.fixed_ctl	= SNBEP_U_MSR_PMON_UCLK_FIXED_CTL,
1674 	.ops		= &ivbep_uncore_msr_ops,
1675 	.format_group	= &ivbep_uncore_ubox_format_group,
1676 };
1677 
1678 static struct extra_reg ivbep_uncore_cbox_extra_regs[] = {
1679 	SNBEP_CBO_EVENT_EXTRA_REG(SNBEP_CBO_PMON_CTL_TID_EN,
1680 				  SNBEP_CBO_PMON_CTL_TID_EN, 0x1),
1681 	SNBEP_CBO_EVENT_EXTRA_REG(0x1031, 0x10ff, 0x2),
1682 	SNBEP_CBO_EVENT_EXTRA_REG(0x1134, 0xffff, 0x4),
1683 	SNBEP_CBO_EVENT_EXTRA_REG(0x4134, 0xffff, 0xc),
1684 	SNBEP_CBO_EVENT_EXTRA_REG(0x5134, 0xffff, 0xc),
1685 	SNBEP_CBO_EVENT_EXTRA_REG(0x0334, 0xffff, 0x4),
1686 	SNBEP_CBO_EVENT_EXTRA_REG(0x4334, 0xffff, 0xc),
1687 	SNBEP_CBO_EVENT_EXTRA_REG(0x0534, 0xffff, 0x4),
1688 	SNBEP_CBO_EVENT_EXTRA_REG(0x4534, 0xffff, 0xc),
1689 	SNBEP_CBO_EVENT_EXTRA_REG(0x0934, 0xffff, 0x4),
1690 	SNBEP_CBO_EVENT_EXTRA_REG(0x4934, 0xffff, 0xc),
1691 	SNBEP_CBO_EVENT_EXTRA_REG(0x0135, 0xffff, 0x10),
1692 	SNBEP_CBO_EVENT_EXTRA_REG(0x0335, 0xffff, 0x10),
1693 	SNBEP_CBO_EVENT_EXTRA_REG(0x2135, 0xffff, 0x10),
1694 	SNBEP_CBO_EVENT_EXTRA_REG(0x2335, 0xffff, 0x10),
1695 	SNBEP_CBO_EVENT_EXTRA_REG(0x4135, 0xffff, 0x18),
1696 	SNBEP_CBO_EVENT_EXTRA_REG(0x4335, 0xffff, 0x18),
1697 	SNBEP_CBO_EVENT_EXTRA_REG(0x4435, 0xffff, 0x8),
1698 	SNBEP_CBO_EVENT_EXTRA_REG(0x4835, 0xffff, 0x8),
1699 	SNBEP_CBO_EVENT_EXTRA_REG(0x4a35, 0xffff, 0x8),
1700 	SNBEP_CBO_EVENT_EXTRA_REG(0x5035, 0xffff, 0x8),
1701 	SNBEP_CBO_EVENT_EXTRA_REG(0x8135, 0xffff, 0x10),
1702 	SNBEP_CBO_EVENT_EXTRA_REG(0x8335, 0xffff, 0x10),
1703 	SNBEP_CBO_EVENT_EXTRA_REG(0x0136, 0xffff, 0x10),
1704 	SNBEP_CBO_EVENT_EXTRA_REG(0x0336, 0xffff, 0x10),
1705 	SNBEP_CBO_EVENT_EXTRA_REG(0x2136, 0xffff, 0x10),
1706 	SNBEP_CBO_EVENT_EXTRA_REG(0x2336, 0xffff, 0x10),
1707 	SNBEP_CBO_EVENT_EXTRA_REG(0x4136, 0xffff, 0x18),
1708 	SNBEP_CBO_EVENT_EXTRA_REG(0x4336, 0xffff, 0x18),
1709 	SNBEP_CBO_EVENT_EXTRA_REG(0x4436, 0xffff, 0x8),
1710 	SNBEP_CBO_EVENT_EXTRA_REG(0x4836, 0xffff, 0x8),
1711 	SNBEP_CBO_EVENT_EXTRA_REG(0x4a36, 0xffff, 0x8),
1712 	SNBEP_CBO_EVENT_EXTRA_REG(0x5036, 0xffff, 0x8),
1713 	SNBEP_CBO_EVENT_EXTRA_REG(0x8136, 0xffff, 0x10),
1714 	SNBEP_CBO_EVENT_EXTRA_REG(0x8336, 0xffff, 0x10),
1715 	SNBEP_CBO_EVENT_EXTRA_REG(0x4037, 0x40ff, 0x8),
1716 	EVENT_EXTRA_END
1717 };
1718 
ivbep_cbox_filter_mask(int fields)1719 static u64 ivbep_cbox_filter_mask(int fields)
1720 {
1721 	u64 mask = 0;
1722 
1723 	if (fields & 0x1)
1724 		mask |= IVBEP_CB0_MSR_PMON_BOX_FILTER_TID;
1725 	if (fields & 0x2)
1726 		mask |= IVBEP_CB0_MSR_PMON_BOX_FILTER_LINK;
1727 	if (fields & 0x4)
1728 		mask |= IVBEP_CB0_MSR_PMON_BOX_FILTER_STATE;
1729 	if (fields & 0x8)
1730 		mask |= IVBEP_CB0_MSR_PMON_BOX_FILTER_NID;
1731 	if (fields & 0x10) {
1732 		mask |= IVBEP_CB0_MSR_PMON_BOX_FILTER_OPC;
1733 		mask |= IVBEP_CB0_MSR_PMON_BOX_FILTER_NC;
1734 		mask |= IVBEP_CB0_MSR_PMON_BOX_FILTER_C6;
1735 		mask |= IVBEP_CB0_MSR_PMON_BOX_FILTER_ISOC;
1736 	}
1737 
1738 	return mask;
1739 }
1740 
1741 static struct event_constraint *
ivbep_cbox_get_constraint(struct intel_uncore_box * box,struct perf_event * event)1742 ivbep_cbox_get_constraint(struct intel_uncore_box *box, struct perf_event *event)
1743 {
1744 	return __snbep_cbox_get_constraint(box, event, ivbep_cbox_filter_mask);
1745 }
1746 
ivbep_cbox_hw_config(struct intel_uncore_box * box,struct perf_event * event)1747 static int ivbep_cbox_hw_config(struct intel_uncore_box *box, struct perf_event *event)
1748 {
1749 	struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
1750 	struct extra_reg *er;
1751 	int idx = 0;
1752 
1753 	for (er = ivbep_uncore_cbox_extra_regs; er->msr; er++) {
1754 		if (er->event != (event->hw.config & er->config_mask))
1755 			continue;
1756 		idx |= er->idx;
1757 	}
1758 
1759 	if (idx) {
1760 		reg1->reg = SNBEP_C0_MSR_PMON_BOX_FILTER +
1761 			SNBEP_CBO_MSR_OFFSET * box->pmu->pmu_idx;
1762 		reg1->config = event->attr.config1 & ivbep_cbox_filter_mask(idx);
1763 		reg1->idx = idx;
1764 	}
1765 	return 0;
1766 }
1767 
ivbep_cbox_enable_event(struct intel_uncore_box * box,struct perf_event * event)1768 static void ivbep_cbox_enable_event(struct intel_uncore_box *box, struct perf_event *event)
1769 {
1770 	struct hw_perf_event *hwc = &event->hw;
1771 	struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
1772 
1773 	if (reg1->idx != EXTRA_REG_NONE) {
1774 		u64 filter = uncore_shared_reg_config(box, 0);
1775 		wrmsrl(reg1->reg, filter & 0xffffffff);
1776 		wrmsrl(reg1->reg + 6, filter >> 32);
1777 	}
1778 
1779 	wrmsrl(hwc->config_base, hwc->config | SNBEP_PMON_CTL_EN);
1780 }
1781 
1782 static struct intel_uncore_ops ivbep_uncore_cbox_ops = {
1783 	.init_box		= ivbep_uncore_msr_init_box,
1784 	.disable_box		= snbep_uncore_msr_disable_box,
1785 	.enable_box		= snbep_uncore_msr_enable_box,
1786 	.disable_event		= snbep_uncore_msr_disable_event,
1787 	.enable_event		= ivbep_cbox_enable_event,
1788 	.read_counter		= uncore_msr_read_counter,
1789 	.hw_config		= ivbep_cbox_hw_config,
1790 	.get_constraint		= ivbep_cbox_get_constraint,
1791 	.put_constraint		= snbep_cbox_put_constraint,
1792 };
1793 
1794 static struct intel_uncore_type ivbep_uncore_cbox = {
1795 	.name			= "cbox",
1796 	.num_counters		= 4,
1797 	.num_boxes		= 15,
1798 	.perf_ctr_bits		= 44,
1799 	.event_ctl		= SNBEP_C0_MSR_PMON_CTL0,
1800 	.perf_ctr		= SNBEP_C0_MSR_PMON_CTR0,
1801 	.event_mask		= IVBEP_CBO_MSR_PMON_RAW_EVENT_MASK,
1802 	.box_ctl		= SNBEP_C0_MSR_PMON_BOX_CTL,
1803 	.msr_offset		= SNBEP_CBO_MSR_OFFSET,
1804 	.num_shared_regs	= 1,
1805 	.constraints		= snbep_uncore_cbox_constraints,
1806 	.ops			= &ivbep_uncore_cbox_ops,
1807 	.format_group		= &ivbep_uncore_cbox_format_group,
1808 };
1809 
1810 static struct intel_uncore_ops ivbep_uncore_pcu_ops = {
1811 	IVBEP_UNCORE_MSR_OPS_COMMON_INIT(),
1812 	.hw_config		= snbep_pcu_hw_config,
1813 	.get_constraint		= snbep_pcu_get_constraint,
1814 	.put_constraint		= snbep_pcu_put_constraint,
1815 };
1816 
1817 static struct intel_uncore_type ivbep_uncore_pcu = {
1818 	.name			= "pcu",
1819 	.num_counters		= 4,
1820 	.num_boxes		= 1,
1821 	.perf_ctr_bits		= 48,
1822 	.perf_ctr		= SNBEP_PCU_MSR_PMON_CTR0,
1823 	.event_ctl		= SNBEP_PCU_MSR_PMON_CTL0,
1824 	.event_mask		= IVBEP_PCU_MSR_PMON_RAW_EVENT_MASK,
1825 	.box_ctl		= SNBEP_PCU_MSR_PMON_BOX_CTL,
1826 	.num_shared_regs	= 1,
1827 	.ops			= &ivbep_uncore_pcu_ops,
1828 	.format_group		= &ivbep_uncore_pcu_format_group,
1829 };
1830 
1831 static struct intel_uncore_type *ivbep_msr_uncores[] = {
1832 	&ivbep_uncore_ubox,
1833 	&ivbep_uncore_cbox,
1834 	&ivbep_uncore_pcu,
1835 	NULL,
1836 };
1837 
ivbep_uncore_cpu_init(void)1838 void ivbep_uncore_cpu_init(void)
1839 {
1840 	if (ivbep_uncore_cbox.num_boxes > boot_cpu_data.x86_max_cores)
1841 		ivbep_uncore_cbox.num_boxes = boot_cpu_data.x86_max_cores;
1842 	uncore_msr_uncores = ivbep_msr_uncores;
1843 }
1844 
1845 static struct intel_uncore_type ivbep_uncore_ha = {
1846 	.name		= "ha",
1847 	.num_counters   = 4,
1848 	.num_boxes	= 2,
1849 	.perf_ctr_bits	= 48,
1850 	IVBEP_UNCORE_PCI_COMMON_INIT(),
1851 };
1852 
1853 static struct intel_uncore_type ivbep_uncore_imc = {
1854 	.name		= "imc",
1855 	.num_counters   = 4,
1856 	.num_boxes	= 8,
1857 	.perf_ctr_bits	= 48,
1858 	.fixed_ctr_bits	= 48,
1859 	.fixed_ctr	= SNBEP_MC_CHy_PCI_PMON_FIXED_CTR,
1860 	.fixed_ctl	= SNBEP_MC_CHy_PCI_PMON_FIXED_CTL,
1861 	.event_descs	= snbep_uncore_imc_events,
1862 	IVBEP_UNCORE_PCI_COMMON_INIT(),
1863 };
1864 
1865 /* registers in IRP boxes are not properly aligned */
1866 static unsigned ivbep_uncore_irp_ctls[] = {0xd8, 0xdc, 0xe0, 0xe4};
1867 static unsigned ivbep_uncore_irp_ctrs[] = {0xa0, 0xb0, 0xb8, 0xc0};
1868 
ivbep_uncore_irp_enable_event(struct intel_uncore_box * box,struct perf_event * event)1869 static void ivbep_uncore_irp_enable_event(struct intel_uncore_box *box, struct perf_event *event)
1870 {
1871 	struct pci_dev *pdev = box->pci_dev;
1872 	struct hw_perf_event *hwc = &event->hw;
1873 
1874 	pci_write_config_dword(pdev, ivbep_uncore_irp_ctls[hwc->idx],
1875 			       hwc->config | SNBEP_PMON_CTL_EN);
1876 }
1877 
ivbep_uncore_irp_disable_event(struct intel_uncore_box * box,struct perf_event * event)1878 static void ivbep_uncore_irp_disable_event(struct intel_uncore_box *box, struct perf_event *event)
1879 {
1880 	struct pci_dev *pdev = box->pci_dev;
1881 	struct hw_perf_event *hwc = &event->hw;
1882 
1883 	pci_write_config_dword(pdev, ivbep_uncore_irp_ctls[hwc->idx], hwc->config);
1884 }
1885 
ivbep_uncore_irp_read_counter(struct intel_uncore_box * box,struct perf_event * event)1886 static u64 ivbep_uncore_irp_read_counter(struct intel_uncore_box *box, struct perf_event *event)
1887 {
1888 	struct pci_dev *pdev = box->pci_dev;
1889 	struct hw_perf_event *hwc = &event->hw;
1890 	u64 count = 0;
1891 
1892 	pci_read_config_dword(pdev, ivbep_uncore_irp_ctrs[hwc->idx], (u32 *)&count);
1893 	pci_read_config_dword(pdev, ivbep_uncore_irp_ctrs[hwc->idx] + 4, (u32 *)&count + 1);
1894 
1895 	return count;
1896 }
1897 
1898 static struct intel_uncore_ops ivbep_uncore_irp_ops = {
1899 	.init_box	= ivbep_uncore_pci_init_box,
1900 	.disable_box	= snbep_uncore_pci_disable_box,
1901 	.enable_box	= snbep_uncore_pci_enable_box,
1902 	.disable_event	= ivbep_uncore_irp_disable_event,
1903 	.enable_event	= ivbep_uncore_irp_enable_event,
1904 	.read_counter	= ivbep_uncore_irp_read_counter,
1905 };
1906 
1907 static struct intel_uncore_type ivbep_uncore_irp = {
1908 	.name			= "irp",
1909 	.num_counters		= 4,
1910 	.num_boxes		= 1,
1911 	.perf_ctr_bits		= 48,
1912 	.event_mask		= IVBEP_PMON_RAW_EVENT_MASK,
1913 	.box_ctl		= SNBEP_PCI_PMON_BOX_CTL,
1914 	.ops			= &ivbep_uncore_irp_ops,
1915 	.format_group		= &ivbep_uncore_format_group,
1916 };
1917 
1918 static struct intel_uncore_ops ivbep_uncore_qpi_ops = {
1919 	.init_box	= ivbep_uncore_pci_init_box,
1920 	.disable_box	= snbep_uncore_pci_disable_box,
1921 	.enable_box	= snbep_uncore_pci_enable_box,
1922 	.disable_event	= snbep_uncore_pci_disable_event,
1923 	.enable_event	= snbep_qpi_enable_event,
1924 	.read_counter	= snbep_uncore_pci_read_counter,
1925 	.hw_config	= snbep_qpi_hw_config,
1926 	.get_constraint	= uncore_get_constraint,
1927 	.put_constraint	= uncore_put_constraint,
1928 };
1929 
1930 static struct intel_uncore_type ivbep_uncore_qpi = {
1931 	.name			= "qpi",
1932 	.num_counters		= 4,
1933 	.num_boxes		= 3,
1934 	.perf_ctr_bits		= 48,
1935 	.perf_ctr		= SNBEP_PCI_PMON_CTR0,
1936 	.event_ctl		= SNBEP_PCI_PMON_CTL0,
1937 	.event_mask		= IVBEP_QPI_PCI_PMON_RAW_EVENT_MASK,
1938 	.box_ctl		= SNBEP_PCI_PMON_BOX_CTL,
1939 	.num_shared_regs	= 1,
1940 	.ops			= &ivbep_uncore_qpi_ops,
1941 	.format_group		= &ivbep_uncore_qpi_format_group,
1942 };
1943 
1944 static struct intel_uncore_type ivbep_uncore_r2pcie = {
1945 	.name		= "r2pcie",
1946 	.num_counters   = 4,
1947 	.num_boxes	= 1,
1948 	.perf_ctr_bits	= 44,
1949 	.constraints	= snbep_uncore_r2pcie_constraints,
1950 	IVBEP_UNCORE_PCI_COMMON_INIT(),
1951 };
1952 
1953 static struct intel_uncore_type ivbep_uncore_r3qpi = {
1954 	.name		= "r3qpi",
1955 	.num_counters   = 3,
1956 	.num_boxes	= 2,
1957 	.perf_ctr_bits	= 44,
1958 	.constraints	= snbep_uncore_r3qpi_constraints,
1959 	IVBEP_UNCORE_PCI_COMMON_INIT(),
1960 };
1961 
1962 enum {
1963 	IVBEP_PCI_UNCORE_HA,
1964 	IVBEP_PCI_UNCORE_IMC,
1965 	IVBEP_PCI_UNCORE_IRP,
1966 	IVBEP_PCI_UNCORE_QPI,
1967 	IVBEP_PCI_UNCORE_R2PCIE,
1968 	IVBEP_PCI_UNCORE_R3QPI,
1969 };
1970 
1971 static struct intel_uncore_type *ivbep_pci_uncores[] = {
1972 	[IVBEP_PCI_UNCORE_HA]	= &ivbep_uncore_ha,
1973 	[IVBEP_PCI_UNCORE_IMC]	= &ivbep_uncore_imc,
1974 	[IVBEP_PCI_UNCORE_IRP]	= &ivbep_uncore_irp,
1975 	[IVBEP_PCI_UNCORE_QPI]	= &ivbep_uncore_qpi,
1976 	[IVBEP_PCI_UNCORE_R2PCIE]	= &ivbep_uncore_r2pcie,
1977 	[IVBEP_PCI_UNCORE_R3QPI]	= &ivbep_uncore_r3qpi,
1978 	NULL,
1979 };
1980 
1981 static const struct pci_device_id ivbep_uncore_pci_ids[] = {
1982 	{ /* Home Agent 0 */
1983 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe30),
1984 		.driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_HA, 0),
1985 	},
1986 	{ /* Home Agent 1 */
1987 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe38),
1988 		.driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_HA, 1),
1989 	},
1990 	{ /* MC0 Channel 0 */
1991 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xeb4),
1992 		.driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_IMC, 0),
1993 	},
1994 	{ /* MC0 Channel 1 */
1995 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xeb5),
1996 		.driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_IMC, 1),
1997 	},
1998 	{ /* MC0 Channel 3 */
1999 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xeb0),
2000 		.driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_IMC, 2),
2001 	},
2002 	{ /* MC0 Channel 4 */
2003 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xeb1),
2004 		.driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_IMC, 3),
2005 	},
2006 	{ /* MC1 Channel 0 */
2007 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xef4),
2008 		.driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_IMC, 4),
2009 	},
2010 	{ /* MC1 Channel 1 */
2011 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xef5),
2012 		.driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_IMC, 5),
2013 	},
2014 	{ /* MC1 Channel 3 */
2015 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xef0),
2016 		.driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_IMC, 6),
2017 	},
2018 	{ /* MC1 Channel 4 */
2019 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xef1),
2020 		.driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_IMC, 7),
2021 	},
2022 	{ /* IRP */
2023 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe39),
2024 		.driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_IRP, 0),
2025 	},
2026 	{ /* QPI0 Port 0 */
2027 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe32),
2028 		.driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_QPI, 0),
2029 	},
2030 	{ /* QPI0 Port 1 */
2031 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe33),
2032 		.driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_QPI, 1),
2033 	},
2034 	{ /* QPI1 Port 2 */
2035 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe3a),
2036 		.driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_QPI, 2),
2037 	},
2038 	{ /* R2PCIe */
2039 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe34),
2040 		.driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_R2PCIE, 0),
2041 	},
2042 	{ /* R3QPI0 Link 0 */
2043 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe36),
2044 		.driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_R3QPI, 0),
2045 	},
2046 	{ /* R3QPI0 Link 1 */
2047 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe37),
2048 		.driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_R3QPI, 1),
2049 	},
2050 	{ /* R3QPI1 Link 2 */
2051 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe3e),
2052 		.driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_R3QPI, 2),
2053 	},
2054 	{ /* QPI Port 0 filter  */
2055 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe86),
2056 		.driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV,
2057 						   SNBEP_PCI_QPI_PORT0_FILTER),
2058 	},
2059 	{ /* QPI Port 0 filter  */
2060 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe96),
2061 		.driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV,
2062 						   SNBEP_PCI_QPI_PORT1_FILTER),
2063 	},
2064 	{ /* end: all zeroes */ }
2065 };
2066 
2067 static struct pci_driver ivbep_uncore_pci_driver = {
2068 	.name		= "ivbep_uncore",
2069 	.id_table	= ivbep_uncore_pci_ids,
2070 };
2071 
ivbep_uncore_pci_init(void)2072 int ivbep_uncore_pci_init(void)
2073 {
2074 	int ret = snbep_pci2phy_map_init(0x0e1e, SNBEP_CPUNODEID, SNBEP_GIDNIDMAP, true);
2075 	if (ret)
2076 		return ret;
2077 	uncore_pci_uncores = ivbep_pci_uncores;
2078 	uncore_pci_driver = &ivbep_uncore_pci_driver;
2079 	return 0;
2080 }
2081 /* end of IvyTown uncore support */
2082 
2083 /* KNL uncore support */
2084 static struct attribute *knl_uncore_ubox_formats_attr[] = {
2085 	&format_attr_event.attr,
2086 	&format_attr_umask.attr,
2087 	&format_attr_edge.attr,
2088 	&format_attr_tid_en.attr,
2089 	&format_attr_inv.attr,
2090 	&format_attr_thresh5.attr,
2091 	NULL,
2092 };
2093 
2094 static const struct attribute_group knl_uncore_ubox_format_group = {
2095 	.name = "format",
2096 	.attrs = knl_uncore_ubox_formats_attr,
2097 };
2098 
2099 static struct intel_uncore_type knl_uncore_ubox = {
2100 	.name			= "ubox",
2101 	.num_counters		= 2,
2102 	.num_boxes		= 1,
2103 	.perf_ctr_bits		= 48,
2104 	.fixed_ctr_bits		= 48,
2105 	.perf_ctr		= HSWEP_U_MSR_PMON_CTR0,
2106 	.event_ctl		= HSWEP_U_MSR_PMON_CTL0,
2107 	.event_mask		= KNL_U_MSR_PMON_RAW_EVENT_MASK,
2108 	.fixed_ctr		= HSWEP_U_MSR_PMON_UCLK_FIXED_CTR,
2109 	.fixed_ctl		= HSWEP_U_MSR_PMON_UCLK_FIXED_CTL,
2110 	.ops			= &snbep_uncore_msr_ops,
2111 	.format_group		= &knl_uncore_ubox_format_group,
2112 };
2113 
2114 static struct attribute *knl_uncore_cha_formats_attr[] = {
2115 	&format_attr_event.attr,
2116 	&format_attr_umask.attr,
2117 	&format_attr_qor.attr,
2118 	&format_attr_edge.attr,
2119 	&format_attr_tid_en.attr,
2120 	&format_attr_inv.attr,
2121 	&format_attr_thresh8.attr,
2122 	&format_attr_filter_tid4.attr,
2123 	&format_attr_filter_link3.attr,
2124 	&format_attr_filter_state4.attr,
2125 	&format_attr_filter_local.attr,
2126 	&format_attr_filter_all_op.attr,
2127 	&format_attr_filter_nnm.attr,
2128 	&format_attr_filter_opc3.attr,
2129 	&format_attr_filter_nc.attr,
2130 	&format_attr_filter_isoc.attr,
2131 	NULL,
2132 };
2133 
2134 static const struct attribute_group knl_uncore_cha_format_group = {
2135 	.name = "format",
2136 	.attrs = knl_uncore_cha_formats_attr,
2137 };
2138 
2139 static struct event_constraint knl_uncore_cha_constraints[] = {
2140 	UNCORE_EVENT_CONSTRAINT(0x11, 0x1),
2141 	UNCORE_EVENT_CONSTRAINT(0x1f, 0x1),
2142 	UNCORE_EVENT_CONSTRAINT(0x36, 0x1),
2143 	EVENT_CONSTRAINT_END
2144 };
2145 
2146 static struct extra_reg knl_uncore_cha_extra_regs[] = {
2147 	SNBEP_CBO_EVENT_EXTRA_REG(SNBEP_CBO_PMON_CTL_TID_EN,
2148 				  SNBEP_CBO_PMON_CTL_TID_EN, 0x1),
2149 	SNBEP_CBO_EVENT_EXTRA_REG(0x3d, 0xff, 0x2),
2150 	SNBEP_CBO_EVENT_EXTRA_REG(0x35, 0xff, 0x4),
2151 	SNBEP_CBO_EVENT_EXTRA_REG(0x36, 0xff, 0x4),
2152 	EVENT_EXTRA_END
2153 };
2154 
knl_cha_filter_mask(int fields)2155 static u64 knl_cha_filter_mask(int fields)
2156 {
2157 	u64 mask = 0;
2158 
2159 	if (fields & 0x1)
2160 		mask |= KNL_CHA_MSR_PMON_BOX_FILTER_TID;
2161 	if (fields & 0x2)
2162 		mask |= KNL_CHA_MSR_PMON_BOX_FILTER_STATE;
2163 	if (fields & 0x4)
2164 		mask |= KNL_CHA_MSR_PMON_BOX_FILTER_OP;
2165 	return mask;
2166 }
2167 
2168 static struct event_constraint *
knl_cha_get_constraint(struct intel_uncore_box * box,struct perf_event * event)2169 knl_cha_get_constraint(struct intel_uncore_box *box, struct perf_event *event)
2170 {
2171 	return __snbep_cbox_get_constraint(box, event, knl_cha_filter_mask);
2172 }
2173 
knl_cha_hw_config(struct intel_uncore_box * box,struct perf_event * event)2174 static int knl_cha_hw_config(struct intel_uncore_box *box,
2175 			     struct perf_event *event)
2176 {
2177 	struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
2178 	struct extra_reg *er;
2179 	int idx = 0;
2180 
2181 	for (er = knl_uncore_cha_extra_regs; er->msr; er++) {
2182 		if (er->event != (event->hw.config & er->config_mask))
2183 			continue;
2184 		idx |= er->idx;
2185 	}
2186 
2187 	if (idx) {
2188 		reg1->reg = HSWEP_C0_MSR_PMON_BOX_FILTER0 +
2189 			    KNL_CHA_MSR_OFFSET * box->pmu->pmu_idx;
2190 		reg1->config = event->attr.config1 & knl_cha_filter_mask(idx);
2191 
2192 		reg1->config |= KNL_CHA_MSR_PMON_BOX_FILTER_REMOTE_NODE;
2193 		reg1->config |= KNL_CHA_MSR_PMON_BOX_FILTER_LOCAL_NODE;
2194 		reg1->config |= KNL_CHA_MSR_PMON_BOX_FILTER_NNC;
2195 		reg1->idx = idx;
2196 	}
2197 	return 0;
2198 }
2199 
2200 static void hswep_cbox_enable_event(struct intel_uncore_box *box,
2201 				    struct perf_event *event);
2202 
2203 static struct intel_uncore_ops knl_uncore_cha_ops = {
2204 	.init_box		= snbep_uncore_msr_init_box,
2205 	.disable_box		= snbep_uncore_msr_disable_box,
2206 	.enable_box		= snbep_uncore_msr_enable_box,
2207 	.disable_event		= snbep_uncore_msr_disable_event,
2208 	.enable_event		= hswep_cbox_enable_event,
2209 	.read_counter		= uncore_msr_read_counter,
2210 	.hw_config		= knl_cha_hw_config,
2211 	.get_constraint		= knl_cha_get_constraint,
2212 	.put_constraint		= snbep_cbox_put_constraint,
2213 };
2214 
2215 static struct intel_uncore_type knl_uncore_cha = {
2216 	.name			= "cha",
2217 	.num_counters		= 4,
2218 	.num_boxes		= 38,
2219 	.perf_ctr_bits		= 48,
2220 	.event_ctl		= HSWEP_C0_MSR_PMON_CTL0,
2221 	.perf_ctr		= HSWEP_C0_MSR_PMON_CTR0,
2222 	.event_mask		= KNL_CHA_MSR_PMON_RAW_EVENT_MASK,
2223 	.box_ctl		= HSWEP_C0_MSR_PMON_BOX_CTL,
2224 	.msr_offset		= KNL_CHA_MSR_OFFSET,
2225 	.num_shared_regs	= 1,
2226 	.constraints		= knl_uncore_cha_constraints,
2227 	.ops			= &knl_uncore_cha_ops,
2228 	.format_group		= &knl_uncore_cha_format_group,
2229 };
2230 
2231 static struct attribute *knl_uncore_pcu_formats_attr[] = {
2232 	&format_attr_event2.attr,
2233 	&format_attr_use_occ_ctr.attr,
2234 	&format_attr_occ_sel.attr,
2235 	&format_attr_edge.attr,
2236 	&format_attr_tid_en.attr,
2237 	&format_attr_inv.attr,
2238 	&format_attr_thresh6.attr,
2239 	&format_attr_occ_invert.attr,
2240 	&format_attr_occ_edge_det.attr,
2241 	NULL,
2242 };
2243 
2244 static const struct attribute_group knl_uncore_pcu_format_group = {
2245 	.name = "format",
2246 	.attrs = knl_uncore_pcu_formats_attr,
2247 };
2248 
2249 static struct intel_uncore_type knl_uncore_pcu = {
2250 	.name			= "pcu",
2251 	.num_counters		= 4,
2252 	.num_boxes		= 1,
2253 	.perf_ctr_bits		= 48,
2254 	.perf_ctr		= HSWEP_PCU_MSR_PMON_CTR0,
2255 	.event_ctl		= HSWEP_PCU_MSR_PMON_CTL0,
2256 	.event_mask		= KNL_PCU_MSR_PMON_RAW_EVENT_MASK,
2257 	.box_ctl		= HSWEP_PCU_MSR_PMON_BOX_CTL,
2258 	.ops			= &snbep_uncore_msr_ops,
2259 	.format_group		= &knl_uncore_pcu_format_group,
2260 };
2261 
2262 static struct intel_uncore_type *knl_msr_uncores[] = {
2263 	&knl_uncore_ubox,
2264 	&knl_uncore_cha,
2265 	&knl_uncore_pcu,
2266 	NULL,
2267 };
2268 
knl_uncore_cpu_init(void)2269 void knl_uncore_cpu_init(void)
2270 {
2271 	uncore_msr_uncores = knl_msr_uncores;
2272 }
2273 
knl_uncore_imc_enable_box(struct intel_uncore_box * box)2274 static void knl_uncore_imc_enable_box(struct intel_uncore_box *box)
2275 {
2276 	struct pci_dev *pdev = box->pci_dev;
2277 	int box_ctl = uncore_pci_box_ctl(box);
2278 
2279 	pci_write_config_dword(pdev, box_ctl, 0);
2280 }
2281 
knl_uncore_imc_enable_event(struct intel_uncore_box * box,struct perf_event * event)2282 static void knl_uncore_imc_enable_event(struct intel_uncore_box *box,
2283 					struct perf_event *event)
2284 {
2285 	struct pci_dev *pdev = box->pci_dev;
2286 	struct hw_perf_event *hwc = &event->hw;
2287 
2288 	if ((event->attr.config & SNBEP_PMON_CTL_EV_SEL_MASK)
2289 							== UNCORE_FIXED_EVENT)
2290 		pci_write_config_dword(pdev, hwc->config_base,
2291 				       hwc->config | KNL_PMON_FIXED_CTL_EN);
2292 	else
2293 		pci_write_config_dword(pdev, hwc->config_base,
2294 				       hwc->config | SNBEP_PMON_CTL_EN);
2295 }
2296 
2297 static struct intel_uncore_ops knl_uncore_imc_ops = {
2298 	.init_box	= snbep_uncore_pci_init_box,
2299 	.disable_box	= snbep_uncore_pci_disable_box,
2300 	.enable_box	= knl_uncore_imc_enable_box,
2301 	.read_counter	= snbep_uncore_pci_read_counter,
2302 	.enable_event	= knl_uncore_imc_enable_event,
2303 	.disable_event	= snbep_uncore_pci_disable_event,
2304 };
2305 
2306 static struct intel_uncore_type knl_uncore_imc_uclk = {
2307 	.name			= "imc_uclk",
2308 	.num_counters		= 4,
2309 	.num_boxes		= 2,
2310 	.perf_ctr_bits		= 48,
2311 	.fixed_ctr_bits		= 48,
2312 	.perf_ctr		= KNL_UCLK_MSR_PMON_CTR0_LOW,
2313 	.event_ctl		= KNL_UCLK_MSR_PMON_CTL0,
2314 	.event_mask		= SNBEP_PMON_RAW_EVENT_MASK,
2315 	.fixed_ctr		= KNL_UCLK_MSR_PMON_UCLK_FIXED_LOW,
2316 	.fixed_ctl		= KNL_UCLK_MSR_PMON_UCLK_FIXED_CTL,
2317 	.box_ctl		= KNL_UCLK_MSR_PMON_BOX_CTL,
2318 	.ops			= &knl_uncore_imc_ops,
2319 	.format_group		= &snbep_uncore_format_group,
2320 };
2321 
2322 static struct intel_uncore_type knl_uncore_imc_dclk = {
2323 	.name			= "imc",
2324 	.num_counters		= 4,
2325 	.num_boxes		= 6,
2326 	.perf_ctr_bits		= 48,
2327 	.fixed_ctr_bits		= 48,
2328 	.perf_ctr		= KNL_MC0_CH0_MSR_PMON_CTR0_LOW,
2329 	.event_ctl		= KNL_MC0_CH0_MSR_PMON_CTL0,
2330 	.event_mask		= SNBEP_PMON_RAW_EVENT_MASK,
2331 	.fixed_ctr		= KNL_MC0_CH0_MSR_PMON_FIXED_LOW,
2332 	.fixed_ctl		= KNL_MC0_CH0_MSR_PMON_FIXED_CTL,
2333 	.box_ctl		= KNL_MC0_CH0_MSR_PMON_BOX_CTL,
2334 	.ops			= &knl_uncore_imc_ops,
2335 	.format_group		= &snbep_uncore_format_group,
2336 };
2337 
2338 static struct intel_uncore_type knl_uncore_edc_uclk = {
2339 	.name			= "edc_uclk",
2340 	.num_counters		= 4,
2341 	.num_boxes		= 8,
2342 	.perf_ctr_bits		= 48,
2343 	.fixed_ctr_bits		= 48,
2344 	.perf_ctr		= KNL_UCLK_MSR_PMON_CTR0_LOW,
2345 	.event_ctl		= KNL_UCLK_MSR_PMON_CTL0,
2346 	.event_mask		= SNBEP_PMON_RAW_EVENT_MASK,
2347 	.fixed_ctr		= KNL_UCLK_MSR_PMON_UCLK_FIXED_LOW,
2348 	.fixed_ctl		= KNL_UCLK_MSR_PMON_UCLK_FIXED_CTL,
2349 	.box_ctl		= KNL_UCLK_MSR_PMON_BOX_CTL,
2350 	.ops			= &knl_uncore_imc_ops,
2351 	.format_group		= &snbep_uncore_format_group,
2352 };
2353 
2354 static struct intel_uncore_type knl_uncore_edc_eclk = {
2355 	.name			= "edc_eclk",
2356 	.num_counters		= 4,
2357 	.num_boxes		= 8,
2358 	.perf_ctr_bits		= 48,
2359 	.fixed_ctr_bits		= 48,
2360 	.perf_ctr		= KNL_EDC0_ECLK_MSR_PMON_CTR0_LOW,
2361 	.event_ctl		= KNL_EDC0_ECLK_MSR_PMON_CTL0,
2362 	.event_mask		= SNBEP_PMON_RAW_EVENT_MASK,
2363 	.fixed_ctr		= KNL_EDC0_ECLK_MSR_PMON_ECLK_FIXED_LOW,
2364 	.fixed_ctl		= KNL_EDC0_ECLK_MSR_PMON_ECLK_FIXED_CTL,
2365 	.box_ctl		= KNL_EDC0_ECLK_MSR_PMON_BOX_CTL,
2366 	.ops			= &knl_uncore_imc_ops,
2367 	.format_group		= &snbep_uncore_format_group,
2368 };
2369 
2370 static struct event_constraint knl_uncore_m2pcie_constraints[] = {
2371 	UNCORE_EVENT_CONSTRAINT(0x23, 0x3),
2372 	EVENT_CONSTRAINT_END
2373 };
2374 
2375 static struct intel_uncore_type knl_uncore_m2pcie = {
2376 	.name		= "m2pcie",
2377 	.num_counters   = 4,
2378 	.num_boxes	= 1,
2379 	.perf_ctr_bits	= 48,
2380 	.constraints	= knl_uncore_m2pcie_constraints,
2381 	SNBEP_UNCORE_PCI_COMMON_INIT(),
2382 };
2383 
2384 static struct attribute *knl_uncore_irp_formats_attr[] = {
2385 	&format_attr_event.attr,
2386 	&format_attr_umask.attr,
2387 	&format_attr_qor.attr,
2388 	&format_attr_edge.attr,
2389 	&format_attr_inv.attr,
2390 	&format_attr_thresh8.attr,
2391 	NULL,
2392 };
2393 
2394 static const struct attribute_group knl_uncore_irp_format_group = {
2395 	.name = "format",
2396 	.attrs = knl_uncore_irp_formats_attr,
2397 };
2398 
2399 static struct intel_uncore_type knl_uncore_irp = {
2400 	.name			= "irp",
2401 	.num_counters		= 2,
2402 	.num_boxes		= 1,
2403 	.perf_ctr_bits		= 48,
2404 	.perf_ctr		= SNBEP_PCI_PMON_CTR0,
2405 	.event_ctl		= SNBEP_PCI_PMON_CTL0,
2406 	.event_mask		= KNL_IRP_PCI_PMON_RAW_EVENT_MASK,
2407 	.box_ctl		= KNL_IRP_PCI_PMON_BOX_CTL,
2408 	.ops			= &snbep_uncore_pci_ops,
2409 	.format_group		= &knl_uncore_irp_format_group,
2410 };
2411 
2412 enum {
2413 	KNL_PCI_UNCORE_MC_UCLK,
2414 	KNL_PCI_UNCORE_MC_DCLK,
2415 	KNL_PCI_UNCORE_EDC_UCLK,
2416 	KNL_PCI_UNCORE_EDC_ECLK,
2417 	KNL_PCI_UNCORE_M2PCIE,
2418 	KNL_PCI_UNCORE_IRP,
2419 };
2420 
2421 static struct intel_uncore_type *knl_pci_uncores[] = {
2422 	[KNL_PCI_UNCORE_MC_UCLK]	= &knl_uncore_imc_uclk,
2423 	[KNL_PCI_UNCORE_MC_DCLK]	= &knl_uncore_imc_dclk,
2424 	[KNL_PCI_UNCORE_EDC_UCLK]	= &knl_uncore_edc_uclk,
2425 	[KNL_PCI_UNCORE_EDC_ECLK]	= &knl_uncore_edc_eclk,
2426 	[KNL_PCI_UNCORE_M2PCIE]		= &knl_uncore_m2pcie,
2427 	[KNL_PCI_UNCORE_IRP]		= &knl_uncore_irp,
2428 	NULL,
2429 };
2430 
2431 /*
2432  * KNL uses a common PCI device ID for multiple instances of an Uncore PMU
2433  * device type. prior to KNL, each instance of a PMU device type had a unique
2434  * device ID.
2435  *
2436  *	PCI Device ID	Uncore PMU Devices
2437  *	----------------------------------
2438  *	0x7841		MC0 UClk, MC1 UClk
2439  *	0x7843		MC0 DClk CH 0, MC0 DClk CH 1, MC0 DClk CH 2,
2440  *			MC1 DClk CH 0, MC1 DClk CH 1, MC1 DClk CH 2
2441  *	0x7833		EDC0 UClk, EDC1 UClk, EDC2 UClk, EDC3 UClk,
2442  *			EDC4 UClk, EDC5 UClk, EDC6 UClk, EDC7 UClk
2443  *	0x7835		EDC0 EClk, EDC1 EClk, EDC2 EClk, EDC3 EClk,
2444  *			EDC4 EClk, EDC5 EClk, EDC6 EClk, EDC7 EClk
2445  *	0x7817		M2PCIe
2446  *	0x7814		IRP
2447 */
2448 
2449 static const struct pci_device_id knl_uncore_pci_ids[] = {
2450 	{ /* MC0 UClk */
2451 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7841),
2452 		.driver_data = UNCORE_PCI_DEV_FULL_DATA(10, 0, KNL_PCI_UNCORE_MC_UCLK, 0),
2453 	},
2454 	{ /* MC1 UClk */
2455 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7841),
2456 		.driver_data = UNCORE_PCI_DEV_FULL_DATA(11, 0, KNL_PCI_UNCORE_MC_UCLK, 1),
2457 	},
2458 	{ /* MC0 DClk CH 0 */
2459 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7843),
2460 		.driver_data = UNCORE_PCI_DEV_FULL_DATA(8, 2, KNL_PCI_UNCORE_MC_DCLK, 0),
2461 	},
2462 	{ /* MC0 DClk CH 1 */
2463 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7843),
2464 		.driver_data = UNCORE_PCI_DEV_FULL_DATA(8, 3, KNL_PCI_UNCORE_MC_DCLK, 1),
2465 	},
2466 	{ /* MC0 DClk CH 2 */
2467 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7843),
2468 		.driver_data = UNCORE_PCI_DEV_FULL_DATA(8, 4, KNL_PCI_UNCORE_MC_DCLK, 2),
2469 	},
2470 	{ /* MC1 DClk CH 0 */
2471 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7843),
2472 		.driver_data = UNCORE_PCI_DEV_FULL_DATA(9, 2, KNL_PCI_UNCORE_MC_DCLK, 3),
2473 	},
2474 	{ /* MC1 DClk CH 1 */
2475 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7843),
2476 		.driver_data = UNCORE_PCI_DEV_FULL_DATA(9, 3, KNL_PCI_UNCORE_MC_DCLK, 4),
2477 	},
2478 	{ /* MC1 DClk CH 2 */
2479 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7843),
2480 		.driver_data = UNCORE_PCI_DEV_FULL_DATA(9, 4, KNL_PCI_UNCORE_MC_DCLK, 5),
2481 	},
2482 	{ /* EDC0 UClk */
2483 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7833),
2484 		.driver_data = UNCORE_PCI_DEV_FULL_DATA(15, 0, KNL_PCI_UNCORE_EDC_UCLK, 0),
2485 	},
2486 	{ /* EDC1 UClk */
2487 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7833),
2488 		.driver_data = UNCORE_PCI_DEV_FULL_DATA(16, 0, KNL_PCI_UNCORE_EDC_UCLK, 1),
2489 	},
2490 	{ /* EDC2 UClk */
2491 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7833),
2492 		.driver_data = UNCORE_PCI_DEV_FULL_DATA(17, 0, KNL_PCI_UNCORE_EDC_UCLK, 2),
2493 	},
2494 	{ /* EDC3 UClk */
2495 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7833),
2496 		.driver_data = UNCORE_PCI_DEV_FULL_DATA(18, 0, KNL_PCI_UNCORE_EDC_UCLK, 3),
2497 	},
2498 	{ /* EDC4 UClk */
2499 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7833),
2500 		.driver_data = UNCORE_PCI_DEV_FULL_DATA(19, 0, KNL_PCI_UNCORE_EDC_UCLK, 4),
2501 	},
2502 	{ /* EDC5 UClk */
2503 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7833),
2504 		.driver_data = UNCORE_PCI_DEV_FULL_DATA(20, 0, KNL_PCI_UNCORE_EDC_UCLK, 5),
2505 	},
2506 	{ /* EDC6 UClk */
2507 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7833),
2508 		.driver_data = UNCORE_PCI_DEV_FULL_DATA(21, 0, KNL_PCI_UNCORE_EDC_UCLK, 6),
2509 	},
2510 	{ /* EDC7 UClk */
2511 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7833),
2512 		.driver_data = UNCORE_PCI_DEV_FULL_DATA(22, 0, KNL_PCI_UNCORE_EDC_UCLK, 7),
2513 	},
2514 	{ /* EDC0 EClk */
2515 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7835),
2516 		.driver_data = UNCORE_PCI_DEV_FULL_DATA(24, 2, KNL_PCI_UNCORE_EDC_ECLK, 0),
2517 	},
2518 	{ /* EDC1 EClk */
2519 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7835),
2520 		.driver_data = UNCORE_PCI_DEV_FULL_DATA(25, 2, KNL_PCI_UNCORE_EDC_ECLK, 1),
2521 	},
2522 	{ /* EDC2 EClk */
2523 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7835),
2524 		.driver_data = UNCORE_PCI_DEV_FULL_DATA(26, 2, KNL_PCI_UNCORE_EDC_ECLK, 2),
2525 	},
2526 	{ /* EDC3 EClk */
2527 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7835),
2528 		.driver_data = UNCORE_PCI_DEV_FULL_DATA(27, 2, KNL_PCI_UNCORE_EDC_ECLK, 3),
2529 	},
2530 	{ /* EDC4 EClk */
2531 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7835),
2532 		.driver_data = UNCORE_PCI_DEV_FULL_DATA(28, 2, KNL_PCI_UNCORE_EDC_ECLK, 4),
2533 	},
2534 	{ /* EDC5 EClk */
2535 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7835),
2536 		.driver_data = UNCORE_PCI_DEV_FULL_DATA(29, 2, KNL_PCI_UNCORE_EDC_ECLK, 5),
2537 	},
2538 	{ /* EDC6 EClk */
2539 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7835),
2540 		.driver_data = UNCORE_PCI_DEV_FULL_DATA(30, 2, KNL_PCI_UNCORE_EDC_ECLK, 6),
2541 	},
2542 	{ /* EDC7 EClk */
2543 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7835),
2544 		.driver_data = UNCORE_PCI_DEV_FULL_DATA(31, 2, KNL_PCI_UNCORE_EDC_ECLK, 7),
2545 	},
2546 	{ /* M2PCIe */
2547 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7817),
2548 		.driver_data = UNCORE_PCI_DEV_DATA(KNL_PCI_UNCORE_M2PCIE, 0),
2549 	},
2550 	{ /* IRP */
2551 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7814),
2552 		.driver_data = UNCORE_PCI_DEV_DATA(KNL_PCI_UNCORE_IRP, 0),
2553 	},
2554 	{ /* end: all zeroes */ }
2555 };
2556 
2557 static struct pci_driver knl_uncore_pci_driver = {
2558 	.name		= "knl_uncore",
2559 	.id_table	= knl_uncore_pci_ids,
2560 };
2561 
knl_uncore_pci_init(void)2562 int knl_uncore_pci_init(void)
2563 {
2564 	int ret;
2565 
2566 	/* All KNL PCI based PMON units are on the same PCI bus except IRP */
2567 	ret = snb_pci2phy_map_init(0x7814); /* IRP */
2568 	if (ret)
2569 		return ret;
2570 	ret = snb_pci2phy_map_init(0x7817); /* M2PCIe */
2571 	if (ret)
2572 		return ret;
2573 	uncore_pci_uncores = knl_pci_uncores;
2574 	uncore_pci_driver = &knl_uncore_pci_driver;
2575 	return 0;
2576 }
2577 
2578 /* end of KNL uncore support */
2579 
2580 /* Haswell-EP uncore support */
2581 static struct attribute *hswep_uncore_ubox_formats_attr[] = {
2582 	&format_attr_event.attr,
2583 	&format_attr_umask.attr,
2584 	&format_attr_edge.attr,
2585 	&format_attr_inv.attr,
2586 	&format_attr_thresh5.attr,
2587 	&format_attr_filter_tid2.attr,
2588 	&format_attr_filter_cid.attr,
2589 	NULL,
2590 };
2591 
2592 static const struct attribute_group hswep_uncore_ubox_format_group = {
2593 	.name = "format",
2594 	.attrs = hswep_uncore_ubox_formats_attr,
2595 };
2596 
hswep_ubox_hw_config(struct intel_uncore_box * box,struct perf_event * event)2597 static int hswep_ubox_hw_config(struct intel_uncore_box *box, struct perf_event *event)
2598 {
2599 	struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
2600 	reg1->reg = HSWEP_U_MSR_PMON_FILTER;
2601 	reg1->config = event->attr.config1 & HSWEP_U_MSR_PMON_BOX_FILTER_MASK;
2602 	reg1->idx = 0;
2603 	return 0;
2604 }
2605 
2606 static struct intel_uncore_ops hswep_uncore_ubox_ops = {
2607 	SNBEP_UNCORE_MSR_OPS_COMMON_INIT(),
2608 	.hw_config		= hswep_ubox_hw_config,
2609 	.get_constraint		= uncore_get_constraint,
2610 	.put_constraint		= uncore_put_constraint,
2611 };
2612 
2613 static struct intel_uncore_type hswep_uncore_ubox = {
2614 	.name			= "ubox",
2615 	.num_counters		= 2,
2616 	.num_boxes		= 1,
2617 	.perf_ctr_bits		= 44,
2618 	.fixed_ctr_bits		= 48,
2619 	.perf_ctr		= HSWEP_U_MSR_PMON_CTR0,
2620 	.event_ctl		= HSWEP_U_MSR_PMON_CTL0,
2621 	.event_mask		= SNBEP_U_MSR_PMON_RAW_EVENT_MASK,
2622 	.fixed_ctr		= HSWEP_U_MSR_PMON_UCLK_FIXED_CTR,
2623 	.fixed_ctl		= HSWEP_U_MSR_PMON_UCLK_FIXED_CTL,
2624 	.num_shared_regs	= 1,
2625 	.ops			= &hswep_uncore_ubox_ops,
2626 	.format_group		= &hswep_uncore_ubox_format_group,
2627 };
2628 
2629 static struct attribute *hswep_uncore_cbox_formats_attr[] = {
2630 	&format_attr_event.attr,
2631 	&format_attr_umask.attr,
2632 	&format_attr_edge.attr,
2633 	&format_attr_tid_en.attr,
2634 	&format_attr_thresh8.attr,
2635 	&format_attr_filter_tid3.attr,
2636 	&format_attr_filter_link2.attr,
2637 	&format_attr_filter_state3.attr,
2638 	&format_attr_filter_nid2.attr,
2639 	&format_attr_filter_opc2.attr,
2640 	&format_attr_filter_nc.attr,
2641 	&format_attr_filter_c6.attr,
2642 	&format_attr_filter_isoc.attr,
2643 	NULL,
2644 };
2645 
2646 static const struct attribute_group hswep_uncore_cbox_format_group = {
2647 	.name = "format",
2648 	.attrs = hswep_uncore_cbox_formats_attr,
2649 };
2650 
2651 static struct event_constraint hswep_uncore_cbox_constraints[] = {
2652 	UNCORE_EVENT_CONSTRAINT(0x01, 0x1),
2653 	UNCORE_EVENT_CONSTRAINT(0x09, 0x1),
2654 	UNCORE_EVENT_CONSTRAINT(0x11, 0x1),
2655 	UNCORE_EVENT_CONSTRAINT(0x36, 0x1),
2656 	UNCORE_EVENT_CONSTRAINT(0x38, 0x3),
2657 	UNCORE_EVENT_CONSTRAINT(0x3b, 0x1),
2658 	UNCORE_EVENT_CONSTRAINT(0x3e, 0x1),
2659 	EVENT_CONSTRAINT_END
2660 };
2661 
2662 static struct extra_reg hswep_uncore_cbox_extra_regs[] = {
2663 	SNBEP_CBO_EVENT_EXTRA_REG(SNBEP_CBO_PMON_CTL_TID_EN,
2664 				  SNBEP_CBO_PMON_CTL_TID_EN, 0x1),
2665 	SNBEP_CBO_EVENT_EXTRA_REG(0x0334, 0xffff, 0x4),
2666 	SNBEP_CBO_EVENT_EXTRA_REG(0x0534, 0xffff, 0x4),
2667 	SNBEP_CBO_EVENT_EXTRA_REG(0x0934, 0xffff, 0x4),
2668 	SNBEP_CBO_EVENT_EXTRA_REG(0x1134, 0xffff, 0x4),
2669 	SNBEP_CBO_EVENT_EXTRA_REG(0x2134, 0xffff, 0x4),
2670 	SNBEP_CBO_EVENT_EXTRA_REG(0x4134, 0xffff, 0x4),
2671 	SNBEP_CBO_EVENT_EXTRA_REG(0x4037, 0x40ff, 0x8),
2672 	SNBEP_CBO_EVENT_EXTRA_REG(0x4028, 0x40ff, 0x8),
2673 	SNBEP_CBO_EVENT_EXTRA_REG(0x4032, 0x40ff, 0x8),
2674 	SNBEP_CBO_EVENT_EXTRA_REG(0x4029, 0x40ff, 0x8),
2675 	SNBEP_CBO_EVENT_EXTRA_REG(0x4033, 0x40ff, 0x8),
2676 	SNBEP_CBO_EVENT_EXTRA_REG(0x402A, 0x40ff, 0x8),
2677 	SNBEP_CBO_EVENT_EXTRA_REG(0x0135, 0xffff, 0x12),
2678 	SNBEP_CBO_EVENT_EXTRA_REG(0x0335, 0xffff, 0x10),
2679 	SNBEP_CBO_EVENT_EXTRA_REG(0x4135, 0xffff, 0x18),
2680 	SNBEP_CBO_EVENT_EXTRA_REG(0x4435, 0xffff, 0x8),
2681 	SNBEP_CBO_EVENT_EXTRA_REG(0x4835, 0xffff, 0x8),
2682 	SNBEP_CBO_EVENT_EXTRA_REG(0x5035, 0xffff, 0x8),
2683 	SNBEP_CBO_EVENT_EXTRA_REG(0x4335, 0xffff, 0x18),
2684 	SNBEP_CBO_EVENT_EXTRA_REG(0x4a35, 0xffff, 0x8),
2685 	SNBEP_CBO_EVENT_EXTRA_REG(0x2335, 0xffff, 0x10),
2686 	SNBEP_CBO_EVENT_EXTRA_REG(0x8335, 0xffff, 0x10),
2687 	SNBEP_CBO_EVENT_EXTRA_REG(0x2135, 0xffff, 0x10),
2688 	SNBEP_CBO_EVENT_EXTRA_REG(0x8135, 0xffff, 0x10),
2689 	SNBEP_CBO_EVENT_EXTRA_REG(0x0136, 0xffff, 0x10),
2690 	SNBEP_CBO_EVENT_EXTRA_REG(0x0336, 0xffff, 0x10),
2691 	SNBEP_CBO_EVENT_EXTRA_REG(0x4136, 0xffff, 0x18),
2692 	SNBEP_CBO_EVENT_EXTRA_REG(0x4436, 0xffff, 0x8),
2693 	SNBEP_CBO_EVENT_EXTRA_REG(0x4836, 0xffff, 0x8),
2694 	SNBEP_CBO_EVENT_EXTRA_REG(0x4336, 0xffff, 0x18),
2695 	SNBEP_CBO_EVENT_EXTRA_REG(0x4a36, 0xffff, 0x8),
2696 	SNBEP_CBO_EVENT_EXTRA_REG(0x2336, 0xffff, 0x10),
2697 	SNBEP_CBO_EVENT_EXTRA_REG(0x8336, 0xffff, 0x10),
2698 	SNBEP_CBO_EVENT_EXTRA_REG(0x2136, 0xffff, 0x10),
2699 	SNBEP_CBO_EVENT_EXTRA_REG(0x8136, 0xffff, 0x10),
2700 	SNBEP_CBO_EVENT_EXTRA_REG(0x5036, 0xffff, 0x8),
2701 	EVENT_EXTRA_END
2702 };
2703 
hswep_cbox_filter_mask(int fields)2704 static u64 hswep_cbox_filter_mask(int fields)
2705 {
2706 	u64 mask = 0;
2707 	if (fields & 0x1)
2708 		mask |= HSWEP_CB0_MSR_PMON_BOX_FILTER_TID;
2709 	if (fields & 0x2)
2710 		mask |= HSWEP_CB0_MSR_PMON_BOX_FILTER_LINK;
2711 	if (fields & 0x4)
2712 		mask |= HSWEP_CB0_MSR_PMON_BOX_FILTER_STATE;
2713 	if (fields & 0x8)
2714 		mask |= HSWEP_CB0_MSR_PMON_BOX_FILTER_NID;
2715 	if (fields & 0x10) {
2716 		mask |= HSWEP_CB0_MSR_PMON_BOX_FILTER_OPC;
2717 		mask |= HSWEP_CB0_MSR_PMON_BOX_FILTER_NC;
2718 		mask |= HSWEP_CB0_MSR_PMON_BOX_FILTER_C6;
2719 		mask |= HSWEP_CB0_MSR_PMON_BOX_FILTER_ISOC;
2720 	}
2721 	return mask;
2722 }
2723 
2724 static struct event_constraint *
hswep_cbox_get_constraint(struct intel_uncore_box * box,struct perf_event * event)2725 hswep_cbox_get_constraint(struct intel_uncore_box *box, struct perf_event *event)
2726 {
2727 	return __snbep_cbox_get_constraint(box, event, hswep_cbox_filter_mask);
2728 }
2729 
hswep_cbox_hw_config(struct intel_uncore_box * box,struct perf_event * event)2730 static int hswep_cbox_hw_config(struct intel_uncore_box *box, struct perf_event *event)
2731 {
2732 	struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
2733 	struct extra_reg *er;
2734 	int idx = 0;
2735 
2736 	for (er = hswep_uncore_cbox_extra_regs; er->msr; er++) {
2737 		if (er->event != (event->hw.config & er->config_mask))
2738 			continue;
2739 		idx |= er->idx;
2740 	}
2741 
2742 	if (idx) {
2743 		reg1->reg = HSWEP_C0_MSR_PMON_BOX_FILTER0 +
2744 			    HSWEP_CBO_MSR_OFFSET * box->pmu->pmu_idx;
2745 		reg1->config = event->attr.config1 & hswep_cbox_filter_mask(idx);
2746 		reg1->idx = idx;
2747 	}
2748 	return 0;
2749 }
2750 
hswep_cbox_enable_event(struct intel_uncore_box * box,struct perf_event * event)2751 static void hswep_cbox_enable_event(struct intel_uncore_box *box,
2752 				  struct perf_event *event)
2753 {
2754 	struct hw_perf_event *hwc = &event->hw;
2755 	struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
2756 
2757 	if (reg1->idx != EXTRA_REG_NONE) {
2758 		u64 filter = uncore_shared_reg_config(box, 0);
2759 		wrmsrl(reg1->reg, filter & 0xffffffff);
2760 		wrmsrl(reg1->reg + 1, filter >> 32);
2761 	}
2762 
2763 	wrmsrl(hwc->config_base, hwc->config | SNBEP_PMON_CTL_EN);
2764 }
2765 
2766 static struct intel_uncore_ops hswep_uncore_cbox_ops = {
2767 	.init_box		= snbep_uncore_msr_init_box,
2768 	.disable_box		= snbep_uncore_msr_disable_box,
2769 	.enable_box		= snbep_uncore_msr_enable_box,
2770 	.disable_event		= snbep_uncore_msr_disable_event,
2771 	.enable_event		= hswep_cbox_enable_event,
2772 	.read_counter		= uncore_msr_read_counter,
2773 	.hw_config		= hswep_cbox_hw_config,
2774 	.get_constraint		= hswep_cbox_get_constraint,
2775 	.put_constraint		= snbep_cbox_put_constraint,
2776 };
2777 
2778 static struct intel_uncore_type hswep_uncore_cbox = {
2779 	.name			= "cbox",
2780 	.num_counters		= 4,
2781 	.num_boxes		= 18,
2782 	.perf_ctr_bits		= 48,
2783 	.event_ctl		= HSWEP_C0_MSR_PMON_CTL0,
2784 	.perf_ctr		= HSWEP_C0_MSR_PMON_CTR0,
2785 	.event_mask		= SNBEP_CBO_MSR_PMON_RAW_EVENT_MASK,
2786 	.box_ctl		= HSWEP_C0_MSR_PMON_BOX_CTL,
2787 	.msr_offset		= HSWEP_CBO_MSR_OFFSET,
2788 	.num_shared_regs	= 1,
2789 	.constraints		= hswep_uncore_cbox_constraints,
2790 	.ops			= &hswep_uncore_cbox_ops,
2791 	.format_group		= &hswep_uncore_cbox_format_group,
2792 };
2793 
2794 /*
2795  * Write SBOX Initialization register bit by bit to avoid spurious #GPs
2796  */
hswep_uncore_sbox_msr_init_box(struct intel_uncore_box * box)2797 static void hswep_uncore_sbox_msr_init_box(struct intel_uncore_box *box)
2798 {
2799 	unsigned msr = uncore_msr_box_ctl(box);
2800 
2801 	if (msr) {
2802 		u64 init = SNBEP_PMON_BOX_CTL_INT;
2803 		u64 flags = 0;
2804 		int i;
2805 
2806 		for_each_set_bit(i, (unsigned long *)&init, 64) {
2807 			flags |= (1ULL << i);
2808 			wrmsrl(msr, flags);
2809 		}
2810 	}
2811 }
2812 
2813 static struct intel_uncore_ops hswep_uncore_sbox_msr_ops = {
2814 	__SNBEP_UNCORE_MSR_OPS_COMMON_INIT(),
2815 	.init_box		= hswep_uncore_sbox_msr_init_box
2816 };
2817 
2818 static struct attribute *hswep_uncore_sbox_formats_attr[] = {
2819 	&format_attr_event.attr,
2820 	&format_attr_umask.attr,
2821 	&format_attr_edge.attr,
2822 	&format_attr_tid_en.attr,
2823 	&format_attr_inv.attr,
2824 	&format_attr_thresh8.attr,
2825 	NULL,
2826 };
2827 
2828 static const struct attribute_group hswep_uncore_sbox_format_group = {
2829 	.name = "format",
2830 	.attrs = hswep_uncore_sbox_formats_attr,
2831 };
2832 
2833 static struct intel_uncore_type hswep_uncore_sbox = {
2834 	.name			= "sbox",
2835 	.num_counters		= 4,
2836 	.num_boxes		= 4,
2837 	.perf_ctr_bits		= 44,
2838 	.event_ctl		= HSWEP_S0_MSR_PMON_CTL0,
2839 	.perf_ctr		= HSWEP_S0_MSR_PMON_CTR0,
2840 	.event_mask		= HSWEP_S_MSR_PMON_RAW_EVENT_MASK,
2841 	.box_ctl		= HSWEP_S0_MSR_PMON_BOX_CTL,
2842 	.msr_offset		= HSWEP_SBOX_MSR_OFFSET,
2843 	.ops			= &hswep_uncore_sbox_msr_ops,
2844 	.format_group		= &hswep_uncore_sbox_format_group,
2845 };
2846 
hswep_pcu_hw_config(struct intel_uncore_box * box,struct perf_event * event)2847 static int hswep_pcu_hw_config(struct intel_uncore_box *box, struct perf_event *event)
2848 {
2849 	struct hw_perf_event *hwc = &event->hw;
2850 	struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
2851 	int ev_sel = hwc->config & SNBEP_PMON_CTL_EV_SEL_MASK;
2852 
2853 	if (ev_sel >= 0xb && ev_sel <= 0xe) {
2854 		reg1->reg = HSWEP_PCU_MSR_PMON_BOX_FILTER;
2855 		reg1->idx = ev_sel - 0xb;
2856 		reg1->config = event->attr.config1 & (0xff << reg1->idx);
2857 	}
2858 	return 0;
2859 }
2860 
2861 static struct intel_uncore_ops hswep_uncore_pcu_ops = {
2862 	SNBEP_UNCORE_MSR_OPS_COMMON_INIT(),
2863 	.hw_config		= hswep_pcu_hw_config,
2864 	.get_constraint		= snbep_pcu_get_constraint,
2865 	.put_constraint		= snbep_pcu_put_constraint,
2866 };
2867 
2868 static struct intel_uncore_type hswep_uncore_pcu = {
2869 	.name			= "pcu",
2870 	.num_counters		= 4,
2871 	.num_boxes		= 1,
2872 	.perf_ctr_bits		= 48,
2873 	.perf_ctr		= HSWEP_PCU_MSR_PMON_CTR0,
2874 	.event_ctl		= HSWEP_PCU_MSR_PMON_CTL0,
2875 	.event_mask		= SNBEP_PCU_MSR_PMON_RAW_EVENT_MASK,
2876 	.box_ctl		= HSWEP_PCU_MSR_PMON_BOX_CTL,
2877 	.num_shared_regs	= 1,
2878 	.ops			= &hswep_uncore_pcu_ops,
2879 	.format_group		= &snbep_uncore_pcu_format_group,
2880 };
2881 
2882 static struct intel_uncore_type *hswep_msr_uncores[] = {
2883 	&hswep_uncore_ubox,
2884 	&hswep_uncore_cbox,
2885 	&hswep_uncore_sbox,
2886 	&hswep_uncore_pcu,
2887 	NULL,
2888 };
2889 
2890 #define HSWEP_PCU_DID			0x2fc0
2891 #define HSWEP_PCU_CAPID4_OFFET		0x94
2892 #define hswep_get_chop(_cap)		(((_cap) >> 6) & 0x3)
2893 
hswep_has_limit_sbox(unsigned int device)2894 static bool hswep_has_limit_sbox(unsigned int device)
2895 {
2896 	struct pci_dev *dev = pci_get_device(PCI_VENDOR_ID_INTEL, device, NULL);
2897 	u32 capid4;
2898 
2899 	if (!dev)
2900 		return false;
2901 
2902 	pci_read_config_dword(dev, HSWEP_PCU_CAPID4_OFFET, &capid4);
2903 	pci_dev_put(dev);
2904 	if (!hswep_get_chop(capid4))
2905 		return true;
2906 
2907 	return false;
2908 }
2909 
hswep_uncore_cpu_init(void)2910 void hswep_uncore_cpu_init(void)
2911 {
2912 	if (hswep_uncore_cbox.num_boxes > boot_cpu_data.x86_max_cores)
2913 		hswep_uncore_cbox.num_boxes = boot_cpu_data.x86_max_cores;
2914 
2915 	/* Detect 6-8 core systems with only two SBOXes */
2916 	if (hswep_has_limit_sbox(HSWEP_PCU_DID))
2917 		hswep_uncore_sbox.num_boxes = 2;
2918 
2919 	uncore_msr_uncores = hswep_msr_uncores;
2920 }
2921 
2922 static struct intel_uncore_type hswep_uncore_ha = {
2923 	.name		= "ha",
2924 	.num_counters   = 4,
2925 	.num_boxes	= 2,
2926 	.perf_ctr_bits	= 48,
2927 	SNBEP_UNCORE_PCI_COMMON_INIT(),
2928 };
2929 
2930 static struct uncore_event_desc hswep_uncore_imc_events[] = {
2931 	INTEL_UNCORE_EVENT_DESC(clockticks,      "event=0x00,umask=0x00"),
2932 	INTEL_UNCORE_EVENT_DESC(cas_count_read,  "event=0x04,umask=0x03"),
2933 	INTEL_UNCORE_EVENT_DESC(cas_count_read.scale, "6.103515625e-5"),
2934 	INTEL_UNCORE_EVENT_DESC(cas_count_read.unit, "MiB"),
2935 	INTEL_UNCORE_EVENT_DESC(cas_count_write, "event=0x04,umask=0x0c"),
2936 	INTEL_UNCORE_EVENT_DESC(cas_count_write.scale, "6.103515625e-5"),
2937 	INTEL_UNCORE_EVENT_DESC(cas_count_write.unit, "MiB"),
2938 	{ /* end: all zeroes */ },
2939 };
2940 
2941 static struct intel_uncore_type hswep_uncore_imc = {
2942 	.name		= "imc",
2943 	.num_counters   = 4,
2944 	.num_boxes	= 8,
2945 	.perf_ctr_bits	= 48,
2946 	.fixed_ctr_bits	= 48,
2947 	.fixed_ctr	= SNBEP_MC_CHy_PCI_PMON_FIXED_CTR,
2948 	.fixed_ctl	= SNBEP_MC_CHy_PCI_PMON_FIXED_CTL,
2949 	.event_descs	= hswep_uncore_imc_events,
2950 	SNBEP_UNCORE_PCI_COMMON_INIT(),
2951 };
2952 
2953 static unsigned hswep_uncore_irp_ctrs[] = {0xa0, 0xa8, 0xb0, 0xb8};
2954 
hswep_uncore_irp_read_counter(struct intel_uncore_box * box,struct perf_event * event)2955 static u64 hswep_uncore_irp_read_counter(struct intel_uncore_box *box, struct perf_event *event)
2956 {
2957 	struct pci_dev *pdev = box->pci_dev;
2958 	struct hw_perf_event *hwc = &event->hw;
2959 	u64 count = 0;
2960 
2961 	pci_read_config_dword(pdev, hswep_uncore_irp_ctrs[hwc->idx], (u32 *)&count);
2962 	pci_read_config_dword(pdev, hswep_uncore_irp_ctrs[hwc->idx] + 4, (u32 *)&count + 1);
2963 
2964 	return count;
2965 }
2966 
2967 static struct intel_uncore_ops hswep_uncore_irp_ops = {
2968 	.init_box	= snbep_uncore_pci_init_box,
2969 	.disable_box	= snbep_uncore_pci_disable_box,
2970 	.enable_box	= snbep_uncore_pci_enable_box,
2971 	.disable_event	= ivbep_uncore_irp_disable_event,
2972 	.enable_event	= ivbep_uncore_irp_enable_event,
2973 	.read_counter	= hswep_uncore_irp_read_counter,
2974 };
2975 
2976 static struct intel_uncore_type hswep_uncore_irp = {
2977 	.name			= "irp",
2978 	.num_counters		= 4,
2979 	.num_boxes		= 1,
2980 	.perf_ctr_bits		= 48,
2981 	.event_mask		= SNBEP_PMON_RAW_EVENT_MASK,
2982 	.box_ctl		= SNBEP_PCI_PMON_BOX_CTL,
2983 	.ops			= &hswep_uncore_irp_ops,
2984 	.format_group		= &snbep_uncore_format_group,
2985 };
2986 
2987 static struct intel_uncore_type hswep_uncore_qpi = {
2988 	.name			= "qpi",
2989 	.num_counters		= 4,
2990 	.num_boxes		= 3,
2991 	.perf_ctr_bits		= 48,
2992 	.perf_ctr		= SNBEP_PCI_PMON_CTR0,
2993 	.event_ctl		= SNBEP_PCI_PMON_CTL0,
2994 	.event_mask		= SNBEP_QPI_PCI_PMON_RAW_EVENT_MASK,
2995 	.box_ctl		= SNBEP_PCI_PMON_BOX_CTL,
2996 	.num_shared_regs	= 1,
2997 	.ops			= &snbep_uncore_qpi_ops,
2998 	.format_group		= &snbep_uncore_qpi_format_group,
2999 };
3000 
3001 static struct event_constraint hswep_uncore_r2pcie_constraints[] = {
3002 	UNCORE_EVENT_CONSTRAINT(0x10, 0x3),
3003 	UNCORE_EVENT_CONSTRAINT(0x11, 0x3),
3004 	UNCORE_EVENT_CONSTRAINT(0x13, 0x1),
3005 	UNCORE_EVENT_CONSTRAINT(0x23, 0x1),
3006 	UNCORE_EVENT_CONSTRAINT(0x24, 0x1),
3007 	UNCORE_EVENT_CONSTRAINT(0x25, 0x1),
3008 	UNCORE_EVENT_CONSTRAINT(0x26, 0x3),
3009 	UNCORE_EVENT_CONSTRAINT(0x27, 0x1),
3010 	UNCORE_EVENT_CONSTRAINT(0x28, 0x3),
3011 	UNCORE_EVENT_CONSTRAINT(0x29, 0x3),
3012 	UNCORE_EVENT_CONSTRAINT(0x2a, 0x1),
3013 	UNCORE_EVENT_CONSTRAINT(0x2b, 0x3),
3014 	UNCORE_EVENT_CONSTRAINT(0x2c, 0x3),
3015 	UNCORE_EVENT_CONSTRAINT(0x2d, 0x3),
3016 	UNCORE_EVENT_CONSTRAINT(0x32, 0x3),
3017 	UNCORE_EVENT_CONSTRAINT(0x33, 0x3),
3018 	UNCORE_EVENT_CONSTRAINT(0x34, 0x3),
3019 	UNCORE_EVENT_CONSTRAINT(0x35, 0x3),
3020 	EVENT_CONSTRAINT_END
3021 };
3022 
3023 static struct intel_uncore_type hswep_uncore_r2pcie = {
3024 	.name		= "r2pcie",
3025 	.num_counters   = 4,
3026 	.num_boxes	= 1,
3027 	.perf_ctr_bits	= 48,
3028 	.constraints	= hswep_uncore_r2pcie_constraints,
3029 	SNBEP_UNCORE_PCI_COMMON_INIT(),
3030 };
3031 
3032 static struct event_constraint hswep_uncore_r3qpi_constraints[] = {
3033 	UNCORE_EVENT_CONSTRAINT(0x01, 0x3),
3034 	UNCORE_EVENT_CONSTRAINT(0x07, 0x7),
3035 	UNCORE_EVENT_CONSTRAINT(0x08, 0x7),
3036 	UNCORE_EVENT_CONSTRAINT(0x09, 0x7),
3037 	UNCORE_EVENT_CONSTRAINT(0x0a, 0x7),
3038 	UNCORE_EVENT_CONSTRAINT(0x0e, 0x7),
3039 	UNCORE_EVENT_CONSTRAINT(0x10, 0x3),
3040 	UNCORE_EVENT_CONSTRAINT(0x11, 0x3),
3041 	UNCORE_EVENT_CONSTRAINT(0x12, 0x3),
3042 	UNCORE_EVENT_CONSTRAINT(0x13, 0x1),
3043 	UNCORE_EVENT_CONSTRAINT(0x14, 0x3),
3044 	UNCORE_EVENT_CONSTRAINT(0x15, 0x3),
3045 	UNCORE_EVENT_CONSTRAINT(0x1f, 0x3),
3046 	UNCORE_EVENT_CONSTRAINT(0x20, 0x3),
3047 	UNCORE_EVENT_CONSTRAINT(0x21, 0x3),
3048 	UNCORE_EVENT_CONSTRAINT(0x22, 0x3),
3049 	UNCORE_EVENT_CONSTRAINT(0x23, 0x3),
3050 	UNCORE_EVENT_CONSTRAINT(0x25, 0x3),
3051 	UNCORE_EVENT_CONSTRAINT(0x26, 0x3),
3052 	UNCORE_EVENT_CONSTRAINT(0x28, 0x3),
3053 	UNCORE_EVENT_CONSTRAINT(0x29, 0x3),
3054 	UNCORE_EVENT_CONSTRAINT(0x2c, 0x3),
3055 	UNCORE_EVENT_CONSTRAINT(0x2d, 0x3),
3056 	UNCORE_EVENT_CONSTRAINT(0x2e, 0x3),
3057 	UNCORE_EVENT_CONSTRAINT(0x2f, 0x3),
3058 	UNCORE_EVENT_CONSTRAINT(0x31, 0x3),
3059 	UNCORE_EVENT_CONSTRAINT(0x32, 0x3),
3060 	UNCORE_EVENT_CONSTRAINT(0x33, 0x3),
3061 	UNCORE_EVENT_CONSTRAINT(0x34, 0x3),
3062 	UNCORE_EVENT_CONSTRAINT(0x36, 0x3),
3063 	UNCORE_EVENT_CONSTRAINT(0x37, 0x3),
3064 	UNCORE_EVENT_CONSTRAINT(0x38, 0x3),
3065 	UNCORE_EVENT_CONSTRAINT(0x39, 0x3),
3066 	EVENT_CONSTRAINT_END
3067 };
3068 
3069 static struct intel_uncore_type hswep_uncore_r3qpi = {
3070 	.name		= "r3qpi",
3071 	.num_counters   = 3,
3072 	.num_boxes	= 3,
3073 	.perf_ctr_bits	= 44,
3074 	.constraints	= hswep_uncore_r3qpi_constraints,
3075 	SNBEP_UNCORE_PCI_COMMON_INIT(),
3076 };
3077 
3078 enum {
3079 	HSWEP_PCI_UNCORE_HA,
3080 	HSWEP_PCI_UNCORE_IMC,
3081 	HSWEP_PCI_UNCORE_IRP,
3082 	HSWEP_PCI_UNCORE_QPI,
3083 	HSWEP_PCI_UNCORE_R2PCIE,
3084 	HSWEP_PCI_UNCORE_R3QPI,
3085 };
3086 
3087 static struct intel_uncore_type *hswep_pci_uncores[] = {
3088 	[HSWEP_PCI_UNCORE_HA]	= &hswep_uncore_ha,
3089 	[HSWEP_PCI_UNCORE_IMC]	= &hswep_uncore_imc,
3090 	[HSWEP_PCI_UNCORE_IRP]	= &hswep_uncore_irp,
3091 	[HSWEP_PCI_UNCORE_QPI]	= &hswep_uncore_qpi,
3092 	[HSWEP_PCI_UNCORE_R2PCIE]	= &hswep_uncore_r2pcie,
3093 	[HSWEP_PCI_UNCORE_R3QPI]	= &hswep_uncore_r3qpi,
3094 	NULL,
3095 };
3096 
3097 static const struct pci_device_id hswep_uncore_pci_ids[] = {
3098 	{ /* Home Agent 0 */
3099 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2f30),
3100 		.driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_HA, 0),
3101 	},
3102 	{ /* Home Agent 1 */
3103 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2f38),
3104 		.driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_HA, 1),
3105 	},
3106 	{ /* MC0 Channel 0 */
3107 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2fb0),
3108 		.driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_IMC, 0),
3109 	},
3110 	{ /* MC0 Channel 1 */
3111 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2fb1),
3112 		.driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_IMC, 1),
3113 	},
3114 	{ /* MC0 Channel 2 */
3115 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2fb4),
3116 		.driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_IMC, 2),
3117 	},
3118 	{ /* MC0 Channel 3 */
3119 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2fb5),
3120 		.driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_IMC, 3),
3121 	},
3122 	{ /* MC1 Channel 0 */
3123 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2fd0),
3124 		.driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_IMC, 4),
3125 	},
3126 	{ /* MC1 Channel 1 */
3127 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2fd1),
3128 		.driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_IMC, 5),
3129 	},
3130 	{ /* MC1 Channel 2 */
3131 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2fd4),
3132 		.driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_IMC, 6),
3133 	},
3134 	{ /* MC1 Channel 3 */
3135 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2fd5),
3136 		.driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_IMC, 7),
3137 	},
3138 	{ /* IRP */
3139 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2f39),
3140 		.driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_IRP, 0),
3141 	},
3142 	{ /* QPI0 Port 0 */
3143 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2f32),
3144 		.driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_QPI, 0),
3145 	},
3146 	{ /* QPI0 Port 1 */
3147 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2f33),
3148 		.driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_QPI, 1),
3149 	},
3150 	{ /* QPI1 Port 2 */
3151 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2f3a),
3152 		.driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_QPI, 2),
3153 	},
3154 	{ /* R2PCIe */
3155 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2f34),
3156 		.driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_R2PCIE, 0),
3157 	},
3158 	{ /* R3QPI0 Link 0 */
3159 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2f36),
3160 		.driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_R3QPI, 0),
3161 	},
3162 	{ /* R3QPI0 Link 1 */
3163 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2f37),
3164 		.driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_R3QPI, 1),
3165 	},
3166 	{ /* R3QPI1 Link 2 */
3167 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2f3e),
3168 		.driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_R3QPI, 2),
3169 	},
3170 	{ /* QPI Port 0 filter  */
3171 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2f86),
3172 		.driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV,
3173 						   SNBEP_PCI_QPI_PORT0_FILTER),
3174 	},
3175 	{ /* QPI Port 1 filter  */
3176 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2f96),
3177 		.driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV,
3178 						   SNBEP_PCI_QPI_PORT1_FILTER),
3179 	},
3180 	{ /* end: all zeroes */ }
3181 };
3182 
3183 static struct pci_driver hswep_uncore_pci_driver = {
3184 	.name		= "hswep_uncore",
3185 	.id_table	= hswep_uncore_pci_ids,
3186 };
3187 
hswep_uncore_pci_init(void)3188 int hswep_uncore_pci_init(void)
3189 {
3190 	int ret = snbep_pci2phy_map_init(0x2f1e, SNBEP_CPUNODEID, SNBEP_GIDNIDMAP, true);
3191 	if (ret)
3192 		return ret;
3193 	uncore_pci_uncores = hswep_pci_uncores;
3194 	uncore_pci_driver = &hswep_uncore_pci_driver;
3195 	return 0;
3196 }
3197 /* end of Haswell-EP uncore support */
3198 
3199 /* BDX uncore support */
3200 
3201 static struct intel_uncore_type bdx_uncore_ubox = {
3202 	.name			= "ubox",
3203 	.num_counters		= 2,
3204 	.num_boxes		= 1,
3205 	.perf_ctr_bits		= 48,
3206 	.fixed_ctr_bits		= 48,
3207 	.perf_ctr		= HSWEP_U_MSR_PMON_CTR0,
3208 	.event_ctl		= HSWEP_U_MSR_PMON_CTL0,
3209 	.event_mask		= SNBEP_U_MSR_PMON_RAW_EVENT_MASK,
3210 	.fixed_ctr		= HSWEP_U_MSR_PMON_UCLK_FIXED_CTR,
3211 	.fixed_ctl		= HSWEP_U_MSR_PMON_UCLK_FIXED_CTL,
3212 	.num_shared_regs	= 1,
3213 	.ops			= &ivbep_uncore_msr_ops,
3214 	.format_group		= &ivbep_uncore_ubox_format_group,
3215 };
3216 
3217 static struct event_constraint bdx_uncore_cbox_constraints[] = {
3218 	UNCORE_EVENT_CONSTRAINT(0x09, 0x3),
3219 	UNCORE_EVENT_CONSTRAINT(0x11, 0x1),
3220 	UNCORE_EVENT_CONSTRAINT(0x36, 0x1),
3221 	UNCORE_EVENT_CONSTRAINT(0x3e, 0x1),
3222 	EVENT_CONSTRAINT_END
3223 };
3224 
3225 static struct intel_uncore_type bdx_uncore_cbox = {
3226 	.name			= "cbox",
3227 	.num_counters		= 4,
3228 	.num_boxes		= 24,
3229 	.perf_ctr_bits		= 48,
3230 	.event_ctl		= HSWEP_C0_MSR_PMON_CTL0,
3231 	.perf_ctr		= HSWEP_C0_MSR_PMON_CTR0,
3232 	.event_mask		= SNBEP_CBO_MSR_PMON_RAW_EVENT_MASK,
3233 	.box_ctl		= HSWEP_C0_MSR_PMON_BOX_CTL,
3234 	.msr_offset		= HSWEP_CBO_MSR_OFFSET,
3235 	.num_shared_regs	= 1,
3236 	.constraints		= bdx_uncore_cbox_constraints,
3237 	.ops			= &hswep_uncore_cbox_ops,
3238 	.format_group		= &hswep_uncore_cbox_format_group,
3239 };
3240 
3241 static struct intel_uncore_type bdx_uncore_sbox = {
3242 	.name			= "sbox",
3243 	.num_counters		= 4,
3244 	.num_boxes		= 4,
3245 	.perf_ctr_bits		= 48,
3246 	.event_ctl		= HSWEP_S0_MSR_PMON_CTL0,
3247 	.perf_ctr		= HSWEP_S0_MSR_PMON_CTR0,
3248 	.event_mask		= HSWEP_S_MSR_PMON_RAW_EVENT_MASK,
3249 	.box_ctl		= HSWEP_S0_MSR_PMON_BOX_CTL,
3250 	.msr_offset		= HSWEP_SBOX_MSR_OFFSET,
3251 	.ops			= &hswep_uncore_sbox_msr_ops,
3252 	.format_group		= &hswep_uncore_sbox_format_group,
3253 };
3254 
3255 #define BDX_MSR_UNCORE_SBOX	3
3256 
3257 static struct intel_uncore_type *bdx_msr_uncores[] = {
3258 	&bdx_uncore_ubox,
3259 	&bdx_uncore_cbox,
3260 	&hswep_uncore_pcu,
3261 	&bdx_uncore_sbox,
3262 	NULL,
3263 };
3264 
3265 /* Bit 7 'Use Occupancy' is not available for counter 0 on BDX */
3266 static struct event_constraint bdx_uncore_pcu_constraints[] = {
3267 	EVENT_CONSTRAINT(0x80, 0xe, 0x80),
3268 	EVENT_CONSTRAINT_END
3269 };
3270 
3271 #define BDX_PCU_DID			0x6fc0
3272 
bdx_uncore_cpu_init(void)3273 void bdx_uncore_cpu_init(void)
3274 {
3275 	if (bdx_uncore_cbox.num_boxes > boot_cpu_data.x86_max_cores)
3276 		bdx_uncore_cbox.num_boxes = boot_cpu_data.x86_max_cores;
3277 	uncore_msr_uncores = bdx_msr_uncores;
3278 
3279 	/* Detect systems with no SBOXes */
3280 	if ((boot_cpu_data.x86_model == 86) || hswep_has_limit_sbox(BDX_PCU_DID))
3281 		uncore_msr_uncores[BDX_MSR_UNCORE_SBOX] = NULL;
3282 
3283 	hswep_uncore_pcu.constraints = bdx_uncore_pcu_constraints;
3284 }
3285 
3286 static struct intel_uncore_type bdx_uncore_ha = {
3287 	.name		= "ha",
3288 	.num_counters   = 4,
3289 	.num_boxes	= 2,
3290 	.perf_ctr_bits	= 48,
3291 	SNBEP_UNCORE_PCI_COMMON_INIT(),
3292 };
3293 
3294 static struct intel_uncore_type bdx_uncore_imc = {
3295 	.name		= "imc",
3296 	.num_counters   = 4,
3297 	.num_boxes	= 8,
3298 	.perf_ctr_bits	= 48,
3299 	.fixed_ctr_bits	= 48,
3300 	.fixed_ctr	= SNBEP_MC_CHy_PCI_PMON_FIXED_CTR,
3301 	.fixed_ctl	= SNBEP_MC_CHy_PCI_PMON_FIXED_CTL,
3302 	.event_descs	= hswep_uncore_imc_events,
3303 	SNBEP_UNCORE_PCI_COMMON_INIT(),
3304 };
3305 
3306 static struct intel_uncore_type bdx_uncore_irp = {
3307 	.name			= "irp",
3308 	.num_counters		= 4,
3309 	.num_boxes		= 1,
3310 	.perf_ctr_bits		= 48,
3311 	.event_mask		= SNBEP_PMON_RAW_EVENT_MASK,
3312 	.box_ctl		= SNBEP_PCI_PMON_BOX_CTL,
3313 	.ops			= &hswep_uncore_irp_ops,
3314 	.format_group		= &snbep_uncore_format_group,
3315 };
3316 
3317 static struct intel_uncore_type bdx_uncore_qpi = {
3318 	.name			= "qpi",
3319 	.num_counters		= 4,
3320 	.num_boxes		= 3,
3321 	.perf_ctr_bits		= 48,
3322 	.perf_ctr		= SNBEP_PCI_PMON_CTR0,
3323 	.event_ctl		= SNBEP_PCI_PMON_CTL0,
3324 	.event_mask		= SNBEP_QPI_PCI_PMON_RAW_EVENT_MASK,
3325 	.box_ctl		= SNBEP_PCI_PMON_BOX_CTL,
3326 	.num_shared_regs	= 1,
3327 	.ops			= &snbep_uncore_qpi_ops,
3328 	.format_group		= &snbep_uncore_qpi_format_group,
3329 };
3330 
3331 static struct event_constraint bdx_uncore_r2pcie_constraints[] = {
3332 	UNCORE_EVENT_CONSTRAINT(0x10, 0x3),
3333 	UNCORE_EVENT_CONSTRAINT(0x11, 0x3),
3334 	UNCORE_EVENT_CONSTRAINT(0x13, 0x1),
3335 	UNCORE_EVENT_CONSTRAINT(0x23, 0x1),
3336 	UNCORE_EVENT_CONSTRAINT(0x25, 0x1),
3337 	UNCORE_EVENT_CONSTRAINT(0x26, 0x3),
3338 	UNCORE_EVENT_CONSTRAINT(0x28, 0x3),
3339 	UNCORE_EVENT_CONSTRAINT(0x2c, 0x3),
3340 	UNCORE_EVENT_CONSTRAINT(0x2d, 0x3),
3341 	EVENT_CONSTRAINT_END
3342 };
3343 
3344 static struct intel_uncore_type bdx_uncore_r2pcie = {
3345 	.name		= "r2pcie",
3346 	.num_counters   = 4,
3347 	.num_boxes	= 1,
3348 	.perf_ctr_bits	= 48,
3349 	.constraints	= bdx_uncore_r2pcie_constraints,
3350 	SNBEP_UNCORE_PCI_COMMON_INIT(),
3351 };
3352 
3353 static struct event_constraint bdx_uncore_r3qpi_constraints[] = {
3354 	UNCORE_EVENT_CONSTRAINT(0x01, 0x7),
3355 	UNCORE_EVENT_CONSTRAINT(0x07, 0x7),
3356 	UNCORE_EVENT_CONSTRAINT(0x08, 0x7),
3357 	UNCORE_EVENT_CONSTRAINT(0x09, 0x7),
3358 	UNCORE_EVENT_CONSTRAINT(0x0a, 0x7),
3359 	UNCORE_EVENT_CONSTRAINT(0x0e, 0x7),
3360 	UNCORE_EVENT_CONSTRAINT(0x10, 0x3),
3361 	UNCORE_EVENT_CONSTRAINT(0x11, 0x3),
3362 	UNCORE_EVENT_CONSTRAINT(0x13, 0x1),
3363 	UNCORE_EVENT_CONSTRAINT(0x14, 0x3),
3364 	UNCORE_EVENT_CONSTRAINT(0x15, 0x3),
3365 	UNCORE_EVENT_CONSTRAINT(0x1f, 0x3),
3366 	UNCORE_EVENT_CONSTRAINT(0x20, 0x3),
3367 	UNCORE_EVENT_CONSTRAINT(0x21, 0x3),
3368 	UNCORE_EVENT_CONSTRAINT(0x22, 0x3),
3369 	UNCORE_EVENT_CONSTRAINT(0x23, 0x3),
3370 	UNCORE_EVENT_CONSTRAINT(0x25, 0x3),
3371 	UNCORE_EVENT_CONSTRAINT(0x26, 0x3),
3372 	UNCORE_EVENT_CONSTRAINT(0x28, 0x3),
3373 	UNCORE_EVENT_CONSTRAINT(0x29, 0x3),
3374 	UNCORE_EVENT_CONSTRAINT(0x2c, 0x3),
3375 	UNCORE_EVENT_CONSTRAINT(0x2d, 0x3),
3376 	UNCORE_EVENT_CONSTRAINT(0x2e, 0x3),
3377 	UNCORE_EVENT_CONSTRAINT(0x2f, 0x3),
3378 	UNCORE_EVENT_CONSTRAINT(0x33, 0x3),
3379 	UNCORE_EVENT_CONSTRAINT(0x34, 0x3),
3380 	UNCORE_EVENT_CONSTRAINT(0x36, 0x3),
3381 	UNCORE_EVENT_CONSTRAINT(0x37, 0x3),
3382 	UNCORE_EVENT_CONSTRAINT(0x38, 0x3),
3383 	UNCORE_EVENT_CONSTRAINT(0x39, 0x3),
3384 	EVENT_CONSTRAINT_END
3385 };
3386 
3387 static struct intel_uncore_type bdx_uncore_r3qpi = {
3388 	.name		= "r3qpi",
3389 	.num_counters   = 3,
3390 	.num_boxes	= 3,
3391 	.perf_ctr_bits	= 48,
3392 	.constraints	= bdx_uncore_r3qpi_constraints,
3393 	SNBEP_UNCORE_PCI_COMMON_INIT(),
3394 };
3395 
3396 enum {
3397 	BDX_PCI_UNCORE_HA,
3398 	BDX_PCI_UNCORE_IMC,
3399 	BDX_PCI_UNCORE_IRP,
3400 	BDX_PCI_UNCORE_QPI,
3401 	BDX_PCI_UNCORE_R2PCIE,
3402 	BDX_PCI_UNCORE_R3QPI,
3403 };
3404 
3405 static struct intel_uncore_type *bdx_pci_uncores[] = {
3406 	[BDX_PCI_UNCORE_HA]	= &bdx_uncore_ha,
3407 	[BDX_PCI_UNCORE_IMC]	= &bdx_uncore_imc,
3408 	[BDX_PCI_UNCORE_IRP]	= &bdx_uncore_irp,
3409 	[BDX_PCI_UNCORE_QPI]	= &bdx_uncore_qpi,
3410 	[BDX_PCI_UNCORE_R2PCIE]	= &bdx_uncore_r2pcie,
3411 	[BDX_PCI_UNCORE_R3QPI]	= &bdx_uncore_r3qpi,
3412 	NULL,
3413 };
3414 
3415 static const struct pci_device_id bdx_uncore_pci_ids[] = {
3416 	{ /* Home Agent 0 */
3417 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6f30),
3418 		.driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_HA, 0),
3419 	},
3420 	{ /* Home Agent 1 */
3421 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6f38),
3422 		.driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_HA, 1),
3423 	},
3424 	{ /* MC0 Channel 0 */
3425 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6fb0),
3426 		.driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_IMC, 0),
3427 	},
3428 	{ /* MC0 Channel 1 */
3429 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6fb1),
3430 		.driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_IMC, 1),
3431 	},
3432 	{ /* MC0 Channel 2 */
3433 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6fb4),
3434 		.driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_IMC, 2),
3435 	},
3436 	{ /* MC0 Channel 3 */
3437 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6fb5),
3438 		.driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_IMC, 3),
3439 	},
3440 	{ /* MC1 Channel 0 */
3441 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6fd0),
3442 		.driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_IMC, 4),
3443 	},
3444 	{ /* MC1 Channel 1 */
3445 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6fd1),
3446 		.driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_IMC, 5),
3447 	},
3448 	{ /* MC1 Channel 2 */
3449 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6fd4),
3450 		.driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_IMC, 6),
3451 	},
3452 	{ /* MC1 Channel 3 */
3453 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6fd5),
3454 		.driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_IMC, 7),
3455 	},
3456 	{ /* IRP */
3457 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6f39),
3458 		.driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_IRP, 0),
3459 	},
3460 	{ /* QPI0 Port 0 */
3461 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6f32),
3462 		.driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_QPI, 0),
3463 	},
3464 	{ /* QPI0 Port 1 */
3465 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6f33),
3466 		.driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_QPI, 1),
3467 	},
3468 	{ /* QPI1 Port 2 */
3469 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6f3a),
3470 		.driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_QPI, 2),
3471 	},
3472 	{ /* R2PCIe */
3473 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6f34),
3474 		.driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_R2PCIE, 0),
3475 	},
3476 	{ /* R3QPI0 Link 0 */
3477 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6f36),
3478 		.driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_R3QPI, 0),
3479 	},
3480 	{ /* R3QPI0 Link 1 */
3481 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6f37),
3482 		.driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_R3QPI, 1),
3483 	},
3484 	{ /* R3QPI1 Link 2 */
3485 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6f3e),
3486 		.driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_R3QPI, 2),
3487 	},
3488 	{ /* QPI Port 0 filter  */
3489 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6f86),
3490 		.driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV,
3491 						   SNBEP_PCI_QPI_PORT0_FILTER),
3492 	},
3493 	{ /* QPI Port 1 filter  */
3494 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6f96),
3495 		.driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV,
3496 						   SNBEP_PCI_QPI_PORT1_FILTER),
3497 	},
3498 	{ /* QPI Port 2 filter  */
3499 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6f46),
3500 		.driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV,
3501 						   BDX_PCI_QPI_PORT2_FILTER),
3502 	},
3503 	{ /* end: all zeroes */ }
3504 };
3505 
3506 static struct pci_driver bdx_uncore_pci_driver = {
3507 	.name		= "bdx_uncore",
3508 	.id_table	= bdx_uncore_pci_ids,
3509 };
3510 
bdx_uncore_pci_init(void)3511 int bdx_uncore_pci_init(void)
3512 {
3513 	int ret = snbep_pci2phy_map_init(0x6f1e, SNBEP_CPUNODEID, SNBEP_GIDNIDMAP, true);
3514 
3515 	if (ret)
3516 		return ret;
3517 	uncore_pci_uncores = bdx_pci_uncores;
3518 	uncore_pci_driver = &bdx_uncore_pci_driver;
3519 	return 0;
3520 }
3521 
3522 /* end of BDX uncore support */
3523 
3524 /* SKX uncore support */
3525 
3526 static struct intel_uncore_type skx_uncore_ubox = {
3527 	.name			= "ubox",
3528 	.num_counters		= 2,
3529 	.num_boxes		= 1,
3530 	.perf_ctr_bits		= 48,
3531 	.fixed_ctr_bits		= 48,
3532 	.perf_ctr		= HSWEP_U_MSR_PMON_CTR0,
3533 	.event_ctl		= HSWEP_U_MSR_PMON_CTL0,
3534 	.event_mask		= SNBEP_U_MSR_PMON_RAW_EVENT_MASK,
3535 	.fixed_ctr		= HSWEP_U_MSR_PMON_UCLK_FIXED_CTR,
3536 	.fixed_ctl		= HSWEP_U_MSR_PMON_UCLK_FIXED_CTL,
3537 	.ops			= &ivbep_uncore_msr_ops,
3538 	.format_group		= &ivbep_uncore_ubox_format_group,
3539 };
3540 
3541 static struct attribute *skx_uncore_cha_formats_attr[] = {
3542 	&format_attr_event.attr,
3543 	&format_attr_umask.attr,
3544 	&format_attr_edge.attr,
3545 	&format_attr_tid_en.attr,
3546 	&format_attr_inv.attr,
3547 	&format_attr_thresh8.attr,
3548 	&format_attr_filter_tid4.attr,
3549 	&format_attr_filter_state5.attr,
3550 	&format_attr_filter_rem.attr,
3551 	&format_attr_filter_loc.attr,
3552 	&format_attr_filter_nm.attr,
3553 	&format_attr_filter_all_op.attr,
3554 	&format_attr_filter_not_nm.attr,
3555 	&format_attr_filter_opc_0.attr,
3556 	&format_attr_filter_opc_1.attr,
3557 	&format_attr_filter_nc.attr,
3558 	&format_attr_filter_isoc.attr,
3559 	NULL,
3560 };
3561 
3562 static const struct attribute_group skx_uncore_chabox_format_group = {
3563 	.name = "format",
3564 	.attrs = skx_uncore_cha_formats_attr,
3565 };
3566 
3567 static struct event_constraint skx_uncore_chabox_constraints[] = {
3568 	UNCORE_EVENT_CONSTRAINT(0x11, 0x1),
3569 	UNCORE_EVENT_CONSTRAINT(0x36, 0x1),
3570 	EVENT_CONSTRAINT_END
3571 };
3572 
3573 static struct extra_reg skx_uncore_cha_extra_regs[] = {
3574 	SNBEP_CBO_EVENT_EXTRA_REG(0x0334, 0xffff, 0x4),
3575 	SNBEP_CBO_EVENT_EXTRA_REG(0x0534, 0xffff, 0x4),
3576 	SNBEP_CBO_EVENT_EXTRA_REG(0x0934, 0xffff, 0x4),
3577 	SNBEP_CBO_EVENT_EXTRA_REG(0x1134, 0xffff, 0x4),
3578 	SNBEP_CBO_EVENT_EXTRA_REG(0x3134, 0xffff, 0x4),
3579 	SNBEP_CBO_EVENT_EXTRA_REG(0x9134, 0xffff, 0x4),
3580 	SNBEP_CBO_EVENT_EXTRA_REG(0x35, 0xff, 0x8),
3581 	SNBEP_CBO_EVENT_EXTRA_REG(0x36, 0xff, 0x8),
3582 	SNBEP_CBO_EVENT_EXTRA_REG(0x38, 0xff, 0x3),
3583 	EVENT_EXTRA_END
3584 };
3585 
skx_cha_filter_mask(int fields)3586 static u64 skx_cha_filter_mask(int fields)
3587 {
3588 	u64 mask = 0;
3589 
3590 	if (fields & 0x1)
3591 		mask |= SKX_CHA_MSR_PMON_BOX_FILTER_TID;
3592 	if (fields & 0x2)
3593 		mask |= SKX_CHA_MSR_PMON_BOX_FILTER_LINK;
3594 	if (fields & 0x4)
3595 		mask |= SKX_CHA_MSR_PMON_BOX_FILTER_STATE;
3596 	if (fields & 0x8) {
3597 		mask |= SKX_CHA_MSR_PMON_BOX_FILTER_REM;
3598 		mask |= SKX_CHA_MSR_PMON_BOX_FILTER_LOC;
3599 		mask |= SKX_CHA_MSR_PMON_BOX_FILTER_ALL_OPC;
3600 		mask |= SKX_CHA_MSR_PMON_BOX_FILTER_NM;
3601 		mask |= SKX_CHA_MSR_PMON_BOX_FILTER_NOT_NM;
3602 		mask |= SKX_CHA_MSR_PMON_BOX_FILTER_OPC0;
3603 		mask |= SKX_CHA_MSR_PMON_BOX_FILTER_OPC1;
3604 		mask |= SKX_CHA_MSR_PMON_BOX_FILTER_NC;
3605 		mask |= SKX_CHA_MSR_PMON_BOX_FILTER_ISOC;
3606 	}
3607 	return mask;
3608 }
3609 
3610 static struct event_constraint *
skx_cha_get_constraint(struct intel_uncore_box * box,struct perf_event * event)3611 skx_cha_get_constraint(struct intel_uncore_box *box, struct perf_event *event)
3612 {
3613 	return __snbep_cbox_get_constraint(box, event, skx_cha_filter_mask);
3614 }
3615 
skx_cha_hw_config(struct intel_uncore_box * box,struct perf_event * event)3616 static int skx_cha_hw_config(struct intel_uncore_box *box, struct perf_event *event)
3617 {
3618 	struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
3619 	struct extra_reg *er;
3620 	int idx = 0;
3621 	/* Any of the CHA events may be filtered by Thread/Core-ID.*/
3622 	if (event->hw.config & SNBEP_CBO_PMON_CTL_TID_EN)
3623 		idx = SKX_CHA_MSR_PMON_BOX_FILTER_TID;
3624 
3625 	for (er = skx_uncore_cha_extra_regs; er->msr; er++) {
3626 		if (er->event != (event->hw.config & er->config_mask))
3627 			continue;
3628 		idx |= er->idx;
3629 	}
3630 
3631 	if (idx) {
3632 		reg1->reg = HSWEP_C0_MSR_PMON_BOX_FILTER0 +
3633 			    HSWEP_CBO_MSR_OFFSET * box->pmu->pmu_idx;
3634 		reg1->config = event->attr.config1 & skx_cha_filter_mask(idx);
3635 		reg1->idx = idx;
3636 	}
3637 	return 0;
3638 }
3639 
3640 static struct intel_uncore_ops skx_uncore_chabox_ops = {
3641 	/* There is no frz_en for chabox ctl */
3642 	.init_box		= ivbep_uncore_msr_init_box,
3643 	.disable_box		= snbep_uncore_msr_disable_box,
3644 	.enable_box		= snbep_uncore_msr_enable_box,
3645 	.disable_event		= snbep_uncore_msr_disable_event,
3646 	.enable_event		= hswep_cbox_enable_event,
3647 	.read_counter		= uncore_msr_read_counter,
3648 	.hw_config		= skx_cha_hw_config,
3649 	.get_constraint		= skx_cha_get_constraint,
3650 	.put_constraint		= snbep_cbox_put_constraint,
3651 };
3652 
3653 static struct intel_uncore_type skx_uncore_chabox = {
3654 	.name			= "cha",
3655 	.num_counters		= 4,
3656 	.perf_ctr_bits		= 48,
3657 	.event_ctl		= HSWEP_C0_MSR_PMON_CTL0,
3658 	.perf_ctr		= HSWEP_C0_MSR_PMON_CTR0,
3659 	.event_mask		= HSWEP_S_MSR_PMON_RAW_EVENT_MASK,
3660 	.box_ctl		= HSWEP_C0_MSR_PMON_BOX_CTL,
3661 	.msr_offset		= HSWEP_CBO_MSR_OFFSET,
3662 	.num_shared_regs	= 1,
3663 	.constraints		= skx_uncore_chabox_constraints,
3664 	.ops			= &skx_uncore_chabox_ops,
3665 	.format_group		= &skx_uncore_chabox_format_group,
3666 };
3667 
3668 static struct attribute *skx_uncore_iio_formats_attr[] = {
3669 	&format_attr_event.attr,
3670 	&format_attr_umask.attr,
3671 	&format_attr_edge.attr,
3672 	&format_attr_inv.attr,
3673 	&format_attr_thresh9.attr,
3674 	&format_attr_ch_mask.attr,
3675 	&format_attr_fc_mask.attr,
3676 	NULL,
3677 };
3678 
3679 static const struct attribute_group skx_uncore_iio_format_group = {
3680 	.name = "format",
3681 	.attrs = skx_uncore_iio_formats_attr,
3682 };
3683 
3684 static struct event_constraint skx_uncore_iio_constraints[] = {
3685 	UNCORE_EVENT_CONSTRAINT(0x83, 0x3),
3686 	UNCORE_EVENT_CONSTRAINT(0x88, 0xc),
3687 	UNCORE_EVENT_CONSTRAINT(0x95, 0xc),
3688 	UNCORE_EVENT_CONSTRAINT(0xc0, 0xc),
3689 	UNCORE_EVENT_CONSTRAINT(0xc5, 0xc),
3690 	UNCORE_EVENT_CONSTRAINT(0xd4, 0xc),
3691 	UNCORE_EVENT_CONSTRAINT(0xd5, 0xc),
3692 	EVENT_CONSTRAINT_END
3693 };
3694 
skx_iio_enable_event(struct intel_uncore_box * box,struct perf_event * event)3695 static void skx_iio_enable_event(struct intel_uncore_box *box,
3696 				 struct perf_event *event)
3697 {
3698 	struct hw_perf_event *hwc = &event->hw;
3699 
3700 	wrmsrl(hwc->config_base, hwc->config | SNBEP_PMON_CTL_EN);
3701 }
3702 
3703 static struct intel_uncore_ops skx_uncore_iio_ops = {
3704 	.init_box		= ivbep_uncore_msr_init_box,
3705 	.disable_box		= snbep_uncore_msr_disable_box,
3706 	.enable_box		= snbep_uncore_msr_enable_box,
3707 	.disable_event		= snbep_uncore_msr_disable_event,
3708 	.enable_event		= skx_iio_enable_event,
3709 	.read_counter		= uncore_msr_read_counter,
3710 };
3711 
pmu_topology(struct intel_uncore_pmu * pmu,int die)3712 static struct intel_uncore_topology *pmu_topology(struct intel_uncore_pmu *pmu, int die)
3713 {
3714 	int idx;
3715 
3716 	for (idx = 0; idx < pmu->type->num_boxes; idx++) {
3717 		if (pmu->type->topology[die][idx].pmu_idx == pmu->pmu_idx)
3718 			return &pmu->type->topology[die][idx];
3719 	}
3720 
3721 	return NULL;
3722 }
3723 
3724 static umode_t
pmu_iio_mapping_visible(struct kobject * kobj,struct attribute * attr,int die,int zero_bus_pmu)3725 pmu_iio_mapping_visible(struct kobject *kobj, struct attribute *attr,
3726 			 int die, int zero_bus_pmu)
3727 {
3728 	struct intel_uncore_pmu *pmu = dev_to_uncore_pmu(kobj_to_dev(kobj));
3729 	struct intel_uncore_topology *pmut = pmu_topology(pmu, die);
3730 
3731 	return (pmut && !pmut->iio->pci_bus_no && pmu->pmu_idx != zero_bus_pmu) ? 0 : attr->mode;
3732 }
3733 
3734 static umode_t
skx_iio_mapping_visible(struct kobject * kobj,struct attribute * attr,int die)3735 skx_iio_mapping_visible(struct kobject *kobj, struct attribute *attr, int die)
3736 {
3737 	/* Root bus 0x00 is valid only for pmu_idx = 0. */
3738 	return pmu_iio_mapping_visible(kobj, attr, die, 0);
3739 }
3740 
skx_iio_mapping_show(struct device * dev,struct device_attribute * attr,char * buf)3741 static ssize_t skx_iio_mapping_show(struct device *dev,
3742 				    struct device_attribute *attr, char *buf)
3743 {
3744 	struct intel_uncore_pmu *pmu = dev_to_uncore_pmu(dev);
3745 	struct dev_ext_attribute *ea = to_dev_ext_attribute(attr);
3746 	long die = (long)ea->var;
3747 	struct intel_uncore_topology *pmut = pmu_topology(pmu, die);
3748 
3749 	return sprintf(buf, "%04x:%02x\n", pmut ? pmut->iio->segment : 0,
3750 					   pmut ? pmut->iio->pci_bus_no : 0);
3751 }
3752 
skx_msr_cpu_bus_read(int cpu,u64 * topology)3753 static int skx_msr_cpu_bus_read(int cpu, u64 *topology)
3754 {
3755 	u64 msr_value;
3756 
3757 	if (rdmsrl_on_cpu(cpu, SKX_MSR_CPU_BUS_NUMBER, &msr_value) ||
3758 			!(msr_value & SKX_MSR_CPU_BUS_VALID_BIT))
3759 		return -ENXIO;
3760 
3761 	*topology = msr_value;
3762 
3763 	return 0;
3764 }
3765 
die_to_cpu(int die)3766 static int die_to_cpu(int die)
3767 {
3768 	int res = 0, cpu, current_die;
3769 	/*
3770 	 * Using cpus_read_lock() to ensure cpu is not going down between
3771 	 * looking at cpu_online_mask.
3772 	 */
3773 	cpus_read_lock();
3774 	for_each_online_cpu(cpu) {
3775 		current_die = topology_logical_die_id(cpu);
3776 		if (current_die == die) {
3777 			res = cpu;
3778 			break;
3779 		}
3780 	}
3781 	cpus_read_unlock();
3782 	return res;
3783 }
3784 
3785 enum {
3786 	IIO_TOPOLOGY_TYPE,
3787 	UPI_TOPOLOGY_TYPE,
3788 	TOPOLOGY_MAX
3789 };
3790 
3791 static const size_t topology_size[TOPOLOGY_MAX] = {
3792 	sizeof(*((struct intel_uncore_topology *)NULL)->iio),
3793 	sizeof(*((struct intel_uncore_topology *)NULL)->upi)
3794 };
3795 
pmu_alloc_topology(struct intel_uncore_type * type,int topology_type)3796 static int pmu_alloc_topology(struct intel_uncore_type *type, int topology_type)
3797 {
3798 	int die, idx;
3799 	struct intel_uncore_topology **topology;
3800 
3801 	if (!type->num_boxes)
3802 		return -EPERM;
3803 
3804 	topology = kcalloc(uncore_max_dies(), sizeof(*topology), GFP_KERNEL);
3805 	if (!topology)
3806 		goto err;
3807 
3808 	for (die = 0; die < uncore_max_dies(); die++) {
3809 		topology[die] = kcalloc(type->num_boxes, sizeof(**topology), GFP_KERNEL);
3810 		if (!topology[die])
3811 			goto clear;
3812 		for (idx = 0; idx < type->num_boxes; idx++) {
3813 			topology[die][idx].untyped = kcalloc(type->num_boxes,
3814 							     topology_size[topology_type],
3815 							     GFP_KERNEL);
3816 			if (!topology[die][idx].untyped)
3817 				goto clear;
3818 		}
3819 	}
3820 
3821 	type->topology = topology;
3822 
3823 	return 0;
3824 clear:
3825 	for (; die >= 0; die--) {
3826 		for (idx = 0; idx < type->num_boxes; idx++)
3827 			kfree(topology[die][idx].untyped);
3828 		kfree(topology[die]);
3829 	}
3830 	kfree(topology);
3831 err:
3832 	return -ENOMEM;
3833 }
3834 
pmu_free_topology(struct intel_uncore_type * type)3835 static void pmu_free_topology(struct intel_uncore_type *type)
3836 {
3837 	int die, idx;
3838 
3839 	if (type->topology) {
3840 		for (die = 0; die < uncore_max_dies(); die++) {
3841 			for (idx = 0; idx < type->num_boxes; idx++)
3842 				kfree(type->topology[die][idx].untyped);
3843 			kfree(type->topology[die]);
3844 		}
3845 		kfree(type->topology);
3846 		type->topology = NULL;
3847 	}
3848 }
3849 
skx_pmu_get_topology(struct intel_uncore_type * type,int (* topology_cb)(struct intel_uncore_type *,int,int,u64))3850 static int skx_pmu_get_topology(struct intel_uncore_type *type,
3851 				 int (*topology_cb)(struct intel_uncore_type*, int, int, u64))
3852 {
3853 	int die, ret = -EPERM;
3854 	u64 cpu_bus_msr;
3855 
3856 	for (die = 0; die < uncore_max_dies(); die++) {
3857 		ret = skx_msr_cpu_bus_read(die_to_cpu(die), &cpu_bus_msr);
3858 		if (ret)
3859 			break;
3860 
3861 		ret = uncore_die_to_segment(die);
3862 		if (ret < 0)
3863 			break;
3864 
3865 		ret = topology_cb(type, ret, die, cpu_bus_msr);
3866 		if (ret)
3867 			break;
3868 	}
3869 
3870 	return ret;
3871 }
3872 
skx_iio_topology_cb(struct intel_uncore_type * type,int segment,int die,u64 cpu_bus_msr)3873 static int skx_iio_topology_cb(struct intel_uncore_type *type, int segment,
3874 				int die, u64 cpu_bus_msr)
3875 {
3876 	int idx;
3877 	struct intel_uncore_topology *t;
3878 
3879 	for (idx = 0; idx < type->num_boxes; idx++) {
3880 		t = &type->topology[die][idx];
3881 		t->pmu_idx = idx;
3882 		t->iio->segment = segment;
3883 		t->iio->pci_bus_no = (cpu_bus_msr >> (idx * BUS_NUM_STRIDE)) & 0xff;
3884 	}
3885 
3886 	return 0;
3887 }
3888 
skx_iio_get_topology(struct intel_uncore_type * type)3889 static int skx_iio_get_topology(struct intel_uncore_type *type)
3890 {
3891 	return skx_pmu_get_topology(type, skx_iio_topology_cb);
3892 }
3893 
3894 static struct attribute_group skx_iio_mapping_group = {
3895 	.is_visible	= skx_iio_mapping_visible,
3896 };
3897 
3898 static const struct attribute_group *skx_iio_attr_update[] = {
3899 	&skx_iio_mapping_group,
3900 	NULL,
3901 };
3902 
pmu_clear_mapping_attr(const struct attribute_group ** groups,struct attribute_group * ag)3903 static void pmu_clear_mapping_attr(const struct attribute_group **groups,
3904 				   struct attribute_group *ag)
3905 {
3906 	int i;
3907 
3908 	for (i = 0; groups[i]; i++) {
3909 		if (groups[i] == ag) {
3910 			for (i++; groups[i]; i++)
3911 				groups[i - 1] = groups[i];
3912 			groups[i - 1] = NULL;
3913 			break;
3914 		}
3915 	}
3916 }
3917 
3918 static void
pmu_set_mapping(struct intel_uncore_type * type,struct attribute_group * ag,ssize_t (* show)(struct device *,struct device_attribute *,char *),int topology_type)3919 pmu_set_mapping(struct intel_uncore_type *type, struct attribute_group *ag,
3920 		ssize_t (*show)(struct device*, struct device_attribute*, char*),
3921 		int topology_type)
3922 {
3923 	char buf[64];
3924 	int ret;
3925 	long die = -1;
3926 	struct attribute **attrs = NULL;
3927 	struct dev_ext_attribute *eas = NULL;
3928 
3929 	ret = pmu_alloc_topology(type, topology_type);
3930 	if (ret < 0)
3931 		goto clear_attr_update;
3932 
3933 	ret = type->get_topology(type);
3934 	if (ret < 0)
3935 		goto clear_topology;
3936 
3937 	/* One more for NULL. */
3938 	attrs = kcalloc((uncore_max_dies() + 1), sizeof(*attrs), GFP_KERNEL);
3939 	if (!attrs)
3940 		goto clear_topology;
3941 
3942 	eas = kcalloc(uncore_max_dies(), sizeof(*eas), GFP_KERNEL);
3943 	if (!eas)
3944 		goto clear_attrs;
3945 
3946 	for (die = 0; die < uncore_max_dies(); die++) {
3947 		snprintf(buf, sizeof(buf), "die%ld", die);
3948 		sysfs_attr_init(&eas[die].attr.attr);
3949 		eas[die].attr.attr.name = kstrdup(buf, GFP_KERNEL);
3950 		if (!eas[die].attr.attr.name)
3951 			goto err;
3952 		eas[die].attr.attr.mode = 0444;
3953 		eas[die].attr.show = show;
3954 		eas[die].attr.store = NULL;
3955 		eas[die].var = (void *)die;
3956 		attrs[die] = &eas[die].attr.attr;
3957 	}
3958 	ag->attrs = attrs;
3959 
3960 	return;
3961 err:
3962 	for (; die >= 0; die--)
3963 		kfree(eas[die].attr.attr.name);
3964 	kfree(eas);
3965 clear_attrs:
3966 	kfree(attrs);
3967 clear_topology:
3968 	pmu_free_topology(type);
3969 clear_attr_update:
3970 	pmu_clear_mapping_attr(type->attr_update, ag);
3971 }
3972 
3973 static void
pmu_cleanup_mapping(struct intel_uncore_type * type,struct attribute_group * ag)3974 pmu_cleanup_mapping(struct intel_uncore_type *type, struct attribute_group *ag)
3975 {
3976 	struct attribute **attr = ag->attrs;
3977 
3978 	if (!attr)
3979 		return;
3980 
3981 	for (; *attr; attr++)
3982 		kfree((*attr)->name);
3983 	kfree(attr_to_ext_attr(*ag->attrs));
3984 	kfree(ag->attrs);
3985 	ag->attrs = NULL;
3986 	pmu_free_topology(type);
3987 }
3988 
3989 static void
pmu_iio_set_mapping(struct intel_uncore_type * type,struct attribute_group * ag)3990 pmu_iio_set_mapping(struct intel_uncore_type *type, struct attribute_group *ag)
3991 {
3992 	pmu_set_mapping(type, ag, skx_iio_mapping_show, IIO_TOPOLOGY_TYPE);
3993 }
3994 
skx_iio_set_mapping(struct intel_uncore_type * type)3995 static void skx_iio_set_mapping(struct intel_uncore_type *type)
3996 {
3997 	pmu_iio_set_mapping(type, &skx_iio_mapping_group);
3998 }
3999 
skx_iio_cleanup_mapping(struct intel_uncore_type * type)4000 static void skx_iio_cleanup_mapping(struct intel_uncore_type *type)
4001 {
4002 	pmu_cleanup_mapping(type, &skx_iio_mapping_group);
4003 }
4004 
4005 static struct intel_uncore_type skx_uncore_iio = {
4006 	.name			= "iio",
4007 	.num_counters		= 4,
4008 	.num_boxes		= 6,
4009 	.perf_ctr_bits		= 48,
4010 	.event_ctl		= SKX_IIO0_MSR_PMON_CTL0,
4011 	.perf_ctr		= SKX_IIO0_MSR_PMON_CTR0,
4012 	.event_mask		= SKX_IIO_PMON_RAW_EVENT_MASK,
4013 	.event_mask_ext		= SKX_IIO_PMON_RAW_EVENT_MASK_EXT,
4014 	.box_ctl		= SKX_IIO0_MSR_PMON_BOX_CTL,
4015 	.msr_offset		= SKX_IIO_MSR_OFFSET,
4016 	.constraints		= skx_uncore_iio_constraints,
4017 	.ops			= &skx_uncore_iio_ops,
4018 	.format_group		= &skx_uncore_iio_format_group,
4019 	.attr_update		= skx_iio_attr_update,
4020 	.get_topology		= skx_iio_get_topology,
4021 	.set_mapping		= skx_iio_set_mapping,
4022 	.cleanup_mapping	= skx_iio_cleanup_mapping,
4023 };
4024 
4025 enum perf_uncore_iio_freerunning_type_id {
4026 	SKX_IIO_MSR_IOCLK			= 0,
4027 	SKX_IIO_MSR_BW				= 1,
4028 	SKX_IIO_MSR_UTIL			= 2,
4029 
4030 	SKX_IIO_FREERUNNING_TYPE_MAX,
4031 };
4032 
4033 
4034 static struct freerunning_counters skx_iio_freerunning[] = {
4035 	[SKX_IIO_MSR_IOCLK]	= { 0xa45, 0x1, 0x20, 1, 36 },
4036 	[SKX_IIO_MSR_BW]	= { 0xb00, 0x1, 0x10, 8, 36 },
4037 	[SKX_IIO_MSR_UTIL]	= { 0xb08, 0x1, 0x10, 8, 36 },
4038 };
4039 
4040 static struct uncore_event_desc skx_uncore_iio_freerunning_events[] = {
4041 	/* Free-Running IO CLOCKS Counter */
4042 	INTEL_UNCORE_EVENT_DESC(ioclk,			"event=0xff,umask=0x10"),
4043 	/* Free-Running IIO BANDWIDTH Counters */
4044 	INTEL_UNCORE_EVENT_DESC(bw_in_port0,		"event=0xff,umask=0x20"),
4045 	INTEL_UNCORE_EVENT_DESC(bw_in_port0.scale,	"3.814697266e-6"),
4046 	INTEL_UNCORE_EVENT_DESC(bw_in_port0.unit,	"MiB"),
4047 	INTEL_UNCORE_EVENT_DESC(bw_in_port1,		"event=0xff,umask=0x21"),
4048 	INTEL_UNCORE_EVENT_DESC(bw_in_port1.scale,	"3.814697266e-6"),
4049 	INTEL_UNCORE_EVENT_DESC(bw_in_port1.unit,	"MiB"),
4050 	INTEL_UNCORE_EVENT_DESC(bw_in_port2,		"event=0xff,umask=0x22"),
4051 	INTEL_UNCORE_EVENT_DESC(bw_in_port2.scale,	"3.814697266e-6"),
4052 	INTEL_UNCORE_EVENT_DESC(bw_in_port2.unit,	"MiB"),
4053 	INTEL_UNCORE_EVENT_DESC(bw_in_port3,		"event=0xff,umask=0x23"),
4054 	INTEL_UNCORE_EVENT_DESC(bw_in_port3.scale,	"3.814697266e-6"),
4055 	INTEL_UNCORE_EVENT_DESC(bw_in_port3.unit,	"MiB"),
4056 	INTEL_UNCORE_EVENT_DESC(bw_out_port0,		"event=0xff,umask=0x24"),
4057 	INTEL_UNCORE_EVENT_DESC(bw_out_port0.scale,	"3.814697266e-6"),
4058 	INTEL_UNCORE_EVENT_DESC(bw_out_port0.unit,	"MiB"),
4059 	INTEL_UNCORE_EVENT_DESC(bw_out_port1,		"event=0xff,umask=0x25"),
4060 	INTEL_UNCORE_EVENT_DESC(bw_out_port1.scale,	"3.814697266e-6"),
4061 	INTEL_UNCORE_EVENT_DESC(bw_out_port1.unit,	"MiB"),
4062 	INTEL_UNCORE_EVENT_DESC(bw_out_port2,		"event=0xff,umask=0x26"),
4063 	INTEL_UNCORE_EVENT_DESC(bw_out_port2.scale,	"3.814697266e-6"),
4064 	INTEL_UNCORE_EVENT_DESC(bw_out_port2.unit,	"MiB"),
4065 	INTEL_UNCORE_EVENT_DESC(bw_out_port3,		"event=0xff,umask=0x27"),
4066 	INTEL_UNCORE_EVENT_DESC(bw_out_port3.scale,	"3.814697266e-6"),
4067 	INTEL_UNCORE_EVENT_DESC(bw_out_port3.unit,	"MiB"),
4068 	/* Free-running IIO UTILIZATION Counters */
4069 	INTEL_UNCORE_EVENT_DESC(util_in_port0,		"event=0xff,umask=0x30"),
4070 	INTEL_UNCORE_EVENT_DESC(util_out_port0,		"event=0xff,umask=0x31"),
4071 	INTEL_UNCORE_EVENT_DESC(util_in_port1,		"event=0xff,umask=0x32"),
4072 	INTEL_UNCORE_EVENT_DESC(util_out_port1,		"event=0xff,umask=0x33"),
4073 	INTEL_UNCORE_EVENT_DESC(util_in_port2,		"event=0xff,umask=0x34"),
4074 	INTEL_UNCORE_EVENT_DESC(util_out_port2,		"event=0xff,umask=0x35"),
4075 	INTEL_UNCORE_EVENT_DESC(util_in_port3,		"event=0xff,umask=0x36"),
4076 	INTEL_UNCORE_EVENT_DESC(util_out_port3,		"event=0xff,umask=0x37"),
4077 	{ /* end: all zeroes */ },
4078 };
4079 
4080 static struct intel_uncore_ops skx_uncore_iio_freerunning_ops = {
4081 	.read_counter		= uncore_msr_read_counter,
4082 	.hw_config		= uncore_freerunning_hw_config,
4083 };
4084 
4085 static struct attribute *skx_uncore_iio_freerunning_formats_attr[] = {
4086 	&format_attr_event.attr,
4087 	&format_attr_umask.attr,
4088 	NULL,
4089 };
4090 
4091 static const struct attribute_group skx_uncore_iio_freerunning_format_group = {
4092 	.name = "format",
4093 	.attrs = skx_uncore_iio_freerunning_formats_attr,
4094 };
4095 
4096 static struct intel_uncore_type skx_uncore_iio_free_running = {
4097 	.name			= "iio_free_running",
4098 	.num_counters		= 17,
4099 	.num_boxes		= 6,
4100 	.num_freerunning_types	= SKX_IIO_FREERUNNING_TYPE_MAX,
4101 	.freerunning		= skx_iio_freerunning,
4102 	.ops			= &skx_uncore_iio_freerunning_ops,
4103 	.event_descs		= skx_uncore_iio_freerunning_events,
4104 	.format_group		= &skx_uncore_iio_freerunning_format_group,
4105 };
4106 
4107 static struct attribute *skx_uncore_formats_attr[] = {
4108 	&format_attr_event.attr,
4109 	&format_attr_umask.attr,
4110 	&format_attr_edge.attr,
4111 	&format_attr_inv.attr,
4112 	&format_attr_thresh8.attr,
4113 	NULL,
4114 };
4115 
4116 static const struct attribute_group skx_uncore_format_group = {
4117 	.name = "format",
4118 	.attrs = skx_uncore_formats_attr,
4119 };
4120 
4121 static struct intel_uncore_type skx_uncore_irp = {
4122 	.name			= "irp",
4123 	.num_counters		= 2,
4124 	.num_boxes		= 6,
4125 	.perf_ctr_bits		= 48,
4126 	.event_ctl		= SKX_IRP0_MSR_PMON_CTL0,
4127 	.perf_ctr		= SKX_IRP0_MSR_PMON_CTR0,
4128 	.event_mask		= SNBEP_PMON_RAW_EVENT_MASK,
4129 	.box_ctl		= SKX_IRP0_MSR_PMON_BOX_CTL,
4130 	.msr_offset		= SKX_IRP_MSR_OFFSET,
4131 	.ops			= &skx_uncore_iio_ops,
4132 	.format_group		= &skx_uncore_format_group,
4133 };
4134 
4135 static struct attribute *skx_uncore_pcu_formats_attr[] = {
4136 	&format_attr_event.attr,
4137 	&format_attr_umask.attr,
4138 	&format_attr_edge.attr,
4139 	&format_attr_inv.attr,
4140 	&format_attr_thresh8.attr,
4141 	&format_attr_occ_invert.attr,
4142 	&format_attr_occ_edge_det.attr,
4143 	&format_attr_filter_band0.attr,
4144 	&format_attr_filter_band1.attr,
4145 	&format_attr_filter_band2.attr,
4146 	&format_attr_filter_band3.attr,
4147 	NULL,
4148 };
4149 
4150 static struct attribute_group skx_uncore_pcu_format_group = {
4151 	.name = "format",
4152 	.attrs = skx_uncore_pcu_formats_attr,
4153 };
4154 
4155 static struct intel_uncore_ops skx_uncore_pcu_ops = {
4156 	IVBEP_UNCORE_MSR_OPS_COMMON_INIT(),
4157 	.hw_config		= hswep_pcu_hw_config,
4158 	.get_constraint		= snbep_pcu_get_constraint,
4159 	.put_constraint		= snbep_pcu_put_constraint,
4160 };
4161 
4162 static struct intel_uncore_type skx_uncore_pcu = {
4163 	.name			= "pcu",
4164 	.num_counters		= 4,
4165 	.num_boxes		= 1,
4166 	.perf_ctr_bits		= 48,
4167 	.perf_ctr		= HSWEP_PCU_MSR_PMON_CTR0,
4168 	.event_ctl		= HSWEP_PCU_MSR_PMON_CTL0,
4169 	.event_mask		= SNBEP_PCU_MSR_PMON_RAW_EVENT_MASK,
4170 	.box_ctl		= HSWEP_PCU_MSR_PMON_BOX_CTL,
4171 	.num_shared_regs	= 1,
4172 	.ops			= &skx_uncore_pcu_ops,
4173 	.format_group		= &skx_uncore_pcu_format_group,
4174 };
4175 
4176 static struct intel_uncore_type *skx_msr_uncores[] = {
4177 	&skx_uncore_ubox,
4178 	&skx_uncore_chabox,
4179 	&skx_uncore_iio,
4180 	&skx_uncore_iio_free_running,
4181 	&skx_uncore_irp,
4182 	&skx_uncore_pcu,
4183 	NULL,
4184 };
4185 
4186 /*
4187  * To determine the number of CHAs, it should read bits 27:0 in the CAPID6
4188  * register which located at Device 30, Function 3, Offset 0x9C. PCI ID 0x2083.
4189  */
4190 #define SKX_CAPID6		0x9c
4191 #define SKX_CHA_BIT_MASK	GENMASK(27, 0)
4192 
skx_count_chabox(void)4193 static int skx_count_chabox(void)
4194 {
4195 	struct pci_dev *dev = NULL;
4196 	u32 val = 0;
4197 
4198 	dev = pci_get_device(PCI_VENDOR_ID_INTEL, 0x2083, dev);
4199 	if (!dev)
4200 		goto out;
4201 
4202 	pci_read_config_dword(dev, SKX_CAPID6, &val);
4203 	val &= SKX_CHA_BIT_MASK;
4204 out:
4205 	pci_dev_put(dev);
4206 	return hweight32(val);
4207 }
4208 
skx_uncore_cpu_init(void)4209 void skx_uncore_cpu_init(void)
4210 {
4211 	skx_uncore_chabox.num_boxes = skx_count_chabox();
4212 	uncore_msr_uncores = skx_msr_uncores;
4213 }
4214 
4215 static struct intel_uncore_type skx_uncore_imc = {
4216 	.name		= "imc",
4217 	.num_counters   = 4,
4218 	.num_boxes	= 6,
4219 	.perf_ctr_bits	= 48,
4220 	.fixed_ctr_bits	= 48,
4221 	.fixed_ctr	= SNBEP_MC_CHy_PCI_PMON_FIXED_CTR,
4222 	.fixed_ctl	= SNBEP_MC_CHy_PCI_PMON_FIXED_CTL,
4223 	.event_descs	= hswep_uncore_imc_events,
4224 	.perf_ctr	= SNBEP_PCI_PMON_CTR0,
4225 	.event_ctl	= SNBEP_PCI_PMON_CTL0,
4226 	.event_mask	= SNBEP_PMON_RAW_EVENT_MASK,
4227 	.box_ctl	= SNBEP_PCI_PMON_BOX_CTL,
4228 	.ops		= &ivbep_uncore_pci_ops,
4229 	.format_group	= &skx_uncore_format_group,
4230 };
4231 
4232 static struct attribute *skx_upi_uncore_formats_attr[] = {
4233 	&format_attr_event.attr,
4234 	&format_attr_umask_ext.attr,
4235 	&format_attr_edge.attr,
4236 	&format_attr_inv.attr,
4237 	&format_attr_thresh8.attr,
4238 	NULL,
4239 };
4240 
4241 static const struct attribute_group skx_upi_uncore_format_group = {
4242 	.name = "format",
4243 	.attrs = skx_upi_uncore_formats_attr,
4244 };
4245 
skx_upi_uncore_pci_init_box(struct intel_uncore_box * box)4246 static void skx_upi_uncore_pci_init_box(struct intel_uncore_box *box)
4247 {
4248 	struct pci_dev *pdev = box->pci_dev;
4249 
4250 	__set_bit(UNCORE_BOX_FLAG_CTL_OFFS8, &box->flags);
4251 	pci_write_config_dword(pdev, SKX_UPI_PCI_PMON_BOX_CTL, IVBEP_PMON_BOX_CTL_INT);
4252 }
4253 
4254 static struct intel_uncore_ops skx_upi_uncore_pci_ops = {
4255 	.init_box	= skx_upi_uncore_pci_init_box,
4256 	.disable_box	= snbep_uncore_pci_disable_box,
4257 	.enable_box	= snbep_uncore_pci_enable_box,
4258 	.disable_event	= snbep_uncore_pci_disable_event,
4259 	.enable_event	= snbep_uncore_pci_enable_event,
4260 	.read_counter	= snbep_uncore_pci_read_counter,
4261 };
4262 
4263 static umode_t
skx_upi_mapping_visible(struct kobject * kobj,struct attribute * attr,int die)4264 skx_upi_mapping_visible(struct kobject *kobj, struct attribute *attr, int die)
4265 {
4266 	struct intel_uncore_pmu *pmu = dev_to_uncore_pmu(kobj_to_dev(kobj));
4267 
4268 	return pmu->type->topology[die][pmu->pmu_idx].upi->enabled ? attr->mode : 0;
4269 }
4270 
skx_upi_mapping_show(struct device * dev,struct device_attribute * attr,char * buf)4271 static ssize_t skx_upi_mapping_show(struct device *dev,
4272 				    struct device_attribute *attr, char *buf)
4273 {
4274 	struct intel_uncore_pmu *pmu = dev_to_uncore_pmu(dev);
4275 	struct dev_ext_attribute *ea = to_dev_ext_attribute(attr);
4276 	long die = (long)ea->var;
4277 	struct uncore_upi_topology *upi = pmu->type->topology[die][pmu->pmu_idx].upi;
4278 
4279 	return sysfs_emit(buf, "upi_%d,die_%d\n", upi->pmu_idx_to, upi->die_to);
4280 }
4281 
4282 #define SKX_UPI_REG_DID			0x2058
4283 #define SKX_UPI_REGS_ADDR_DEVICE_LINK0	0x0e
4284 #define SKX_UPI_REGS_ADDR_FUNCTION	0x00
4285 
4286 /*
4287  * UPI Link Parameter 0
4288  * |  Bit  |  Default  |  Description
4289  * | 19:16 |     0h    | base_nodeid - The NodeID of the sending socket.
4290  * | 12:8  |    00h    | sending_port - The processor die port number of the sending port.
4291  */
4292 #define SKX_KTILP0_OFFSET	0x94
4293 
4294 /*
4295  * UPI Pcode Status. This register is used by PCode to store the link training status.
4296  * |  Bit  |  Default  |  Description
4297  * |   4   |     0h    | ll_status_valid — Bit indicates the valid training status
4298  *                       logged from PCode to the BIOS.
4299  */
4300 #define SKX_KTIPCSTS_OFFSET	0x120
4301 
upi_fill_topology(struct pci_dev * dev,struct intel_uncore_topology * tp,int pmu_idx)4302 static int upi_fill_topology(struct pci_dev *dev, struct intel_uncore_topology *tp,
4303 			     int pmu_idx)
4304 {
4305 	int ret;
4306 	u32 upi_conf;
4307 	struct uncore_upi_topology *upi = tp->upi;
4308 
4309 	tp->pmu_idx = pmu_idx;
4310 	ret = pci_read_config_dword(dev, SKX_KTIPCSTS_OFFSET, &upi_conf);
4311 	if (ret) {
4312 		ret = pcibios_err_to_errno(ret);
4313 		goto err;
4314 	}
4315 	upi->enabled = (upi_conf >> 4) & 1;
4316 	if (upi->enabled) {
4317 		ret = pci_read_config_dword(dev, SKX_KTILP0_OFFSET,
4318 					    &upi_conf);
4319 		if (ret) {
4320 			ret = pcibios_err_to_errno(ret);
4321 			goto err;
4322 		}
4323 		upi->die_to = (upi_conf >> 16) & 0xf;
4324 		upi->pmu_idx_to = (upi_conf >> 8) & 0x1f;
4325 	}
4326 err:
4327 	return ret;
4328 }
4329 
skx_upi_topology_cb(struct intel_uncore_type * type,int segment,int die,u64 cpu_bus_msr)4330 static int skx_upi_topology_cb(struct intel_uncore_type *type, int segment,
4331 				int die, u64 cpu_bus_msr)
4332 {
4333 	int idx, ret;
4334 	struct intel_uncore_topology *upi;
4335 	unsigned int devfn;
4336 	struct pci_dev *dev = NULL;
4337 	u8 bus = cpu_bus_msr >> (3 * BUS_NUM_STRIDE);
4338 
4339 	for (idx = 0; idx < type->num_boxes; idx++) {
4340 		upi = &type->topology[die][idx];
4341 		devfn = PCI_DEVFN(SKX_UPI_REGS_ADDR_DEVICE_LINK0 + idx,
4342 				  SKX_UPI_REGS_ADDR_FUNCTION);
4343 		dev = pci_get_domain_bus_and_slot(segment, bus, devfn);
4344 		if (dev) {
4345 			ret = upi_fill_topology(dev, upi, idx);
4346 			if (ret)
4347 				break;
4348 		}
4349 	}
4350 
4351 	pci_dev_put(dev);
4352 	return ret;
4353 }
4354 
skx_upi_get_topology(struct intel_uncore_type * type)4355 static int skx_upi_get_topology(struct intel_uncore_type *type)
4356 {
4357 	/* CPX case is not supported */
4358 	if (boot_cpu_data.x86_stepping == 11)
4359 		return -EPERM;
4360 
4361 	return skx_pmu_get_topology(type, skx_upi_topology_cb);
4362 }
4363 
4364 static struct attribute_group skx_upi_mapping_group = {
4365 	.is_visible	= skx_upi_mapping_visible,
4366 };
4367 
4368 static const struct attribute_group *skx_upi_attr_update[] = {
4369 	&skx_upi_mapping_group,
4370 	NULL
4371 };
4372 
4373 static void
pmu_upi_set_mapping(struct intel_uncore_type * type,struct attribute_group * ag)4374 pmu_upi_set_mapping(struct intel_uncore_type *type, struct attribute_group *ag)
4375 {
4376 	pmu_set_mapping(type, ag, skx_upi_mapping_show, UPI_TOPOLOGY_TYPE);
4377 }
4378 
skx_upi_set_mapping(struct intel_uncore_type * type)4379 static void skx_upi_set_mapping(struct intel_uncore_type *type)
4380 {
4381 	pmu_upi_set_mapping(type, &skx_upi_mapping_group);
4382 }
4383 
skx_upi_cleanup_mapping(struct intel_uncore_type * type)4384 static void skx_upi_cleanup_mapping(struct intel_uncore_type *type)
4385 {
4386 	pmu_cleanup_mapping(type, &skx_upi_mapping_group);
4387 }
4388 
4389 static struct intel_uncore_type skx_uncore_upi = {
4390 	.name		= "upi",
4391 	.num_counters   = 4,
4392 	.num_boxes	= 3,
4393 	.perf_ctr_bits	= 48,
4394 	.perf_ctr	= SKX_UPI_PCI_PMON_CTR0,
4395 	.event_ctl	= SKX_UPI_PCI_PMON_CTL0,
4396 	.event_mask	= SNBEP_PMON_RAW_EVENT_MASK,
4397 	.event_mask_ext = SKX_UPI_CTL_UMASK_EXT,
4398 	.box_ctl	= SKX_UPI_PCI_PMON_BOX_CTL,
4399 	.ops		= &skx_upi_uncore_pci_ops,
4400 	.format_group	= &skx_upi_uncore_format_group,
4401 	.attr_update	= skx_upi_attr_update,
4402 	.get_topology	= skx_upi_get_topology,
4403 	.set_mapping	= skx_upi_set_mapping,
4404 	.cleanup_mapping = skx_upi_cleanup_mapping,
4405 };
4406 
skx_m2m_uncore_pci_init_box(struct intel_uncore_box * box)4407 static void skx_m2m_uncore_pci_init_box(struct intel_uncore_box *box)
4408 {
4409 	struct pci_dev *pdev = box->pci_dev;
4410 
4411 	__set_bit(UNCORE_BOX_FLAG_CTL_OFFS8, &box->flags);
4412 	pci_write_config_dword(pdev, SKX_M2M_PCI_PMON_BOX_CTL, IVBEP_PMON_BOX_CTL_INT);
4413 }
4414 
4415 static struct intel_uncore_ops skx_m2m_uncore_pci_ops = {
4416 	.init_box	= skx_m2m_uncore_pci_init_box,
4417 	.disable_box	= snbep_uncore_pci_disable_box,
4418 	.enable_box	= snbep_uncore_pci_enable_box,
4419 	.disable_event	= snbep_uncore_pci_disable_event,
4420 	.enable_event	= snbep_uncore_pci_enable_event,
4421 	.read_counter	= snbep_uncore_pci_read_counter,
4422 };
4423 
4424 static struct intel_uncore_type skx_uncore_m2m = {
4425 	.name		= "m2m",
4426 	.num_counters   = 4,
4427 	.num_boxes	= 2,
4428 	.perf_ctr_bits	= 48,
4429 	.perf_ctr	= SKX_M2M_PCI_PMON_CTR0,
4430 	.event_ctl	= SKX_M2M_PCI_PMON_CTL0,
4431 	.event_mask	= SNBEP_PMON_RAW_EVENT_MASK,
4432 	.box_ctl	= SKX_M2M_PCI_PMON_BOX_CTL,
4433 	.ops		= &skx_m2m_uncore_pci_ops,
4434 	.format_group	= &skx_uncore_format_group,
4435 };
4436 
4437 static struct event_constraint skx_uncore_m2pcie_constraints[] = {
4438 	UNCORE_EVENT_CONSTRAINT(0x23, 0x3),
4439 	EVENT_CONSTRAINT_END
4440 };
4441 
4442 static struct intel_uncore_type skx_uncore_m2pcie = {
4443 	.name		= "m2pcie",
4444 	.num_counters   = 4,
4445 	.num_boxes	= 4,
4446 	.perf_ctr_bits	= 48,
4447 	.constraints	= skx_uncore_m2pcie_constraints,
4448 	.perf_ctr	= SNBEP_PCI_PMON_CTR0,
4449 	.event_ctl	= SNBEP_PCI_PMON_CTL0,
4450 	.event_mask	= SNBEP_PMON_RAW_EVENT_MASK,
4451 	.box_ctl	= SNBEP_PCI_PMON_BOX_CTL,
4452 	.ops		= &ivbep_uncore_pci_ops,
4453 	.format_group	= &skx_uncore_format_group,
4454 };
4455 
4456 static struct event_constraint skx_uncore_m3upi_constraints[] = {
4457 	UNCORE_EVENT_CONSTRAINT(0x1d, 0x1),
4458 	UNCORE_EVENT_CONSTRAINT(0x1e, 0x1),
4459 	UNCORE_EVENT_CONSTRAINT(0x40, 0x7),
4460 	UNCORE_EVENT_CONSTRAINT(0x4e, 0x7),
4461 	UNCORE_EVENT_CONSTRAINT(0x4f, 0x7),
4462 	UNCORE_EVENT_CONSTRAINT(0x50, 0x7),
4463 	UNCORE_EVENT_CONSTRAINT(0x51, 0x7),
4464 	UNCORE_EVENT_CONSTRAINT(0x52, 0x7),
4465 	EVENT_CONSTRAINT_END
4466 };
4467 
4468 static struct intel_uncore_type skx_uncore_m3upi = {
4469 	.name		= "m3upi",
4470 	.num_counters   = 3,
4471 	.num_boxes	= 3,
4472 	.perf_ctr_bits	= 48,
4473 	.constraints	= skx_uncore_m3upi_constraints,
4474 	.perf_ctr	= SNBEP_PCI_PMON_CTR0,
4475 	.event_ctl	= SNBEP_PCI_PMON_CTL0,
4476 	.event_mask	= SNBEP_PMON_RAW_EVENT_MASK,
4477 	.box_ctl	= SNBEP_PCI_PMON_BOX_CTL,
4478 	.ops		= &ivbep_uncore_pci_ops,
4479 	.format_group	= &skx_uncore_format_group,
4480 };
4481 
4482 enum {
4483 	SKX_PCI_UNCORE_IMC,
4484 	SKX_PCI_UNCORE_M2M,
4485 	SKX_PCI_UNCORE_UPI,
4486 	SKX_PCI_UNCORE_M2PCIE,
4487 	SKX_PCI_UNCORE_M3UPI,
4488 };
4489 
4490 static struct intel_uncore_type *skx_pci_uncores[] = {
4491 	[SKX_PCI_UNCORE_IMC]	= &skx_uncore_imc,
4492 	[SKX_PCI_UNCORE_M2M]	= &skx_uncore_m2m,
4493 	[SKX_PCI_UNCORE_UPI]	= &skx_uncore_upi,
4494 	[SKX_PCI_UNCORE_M2PCIE]	= &skx_uncore_m2pcie,
4495 	[SKX_PCI_UNCORE_M3UPI]	= &skx_uncore_m3upi,
4496 	NULL,
4497 };
4498 
4499 static const struct pci_device_id skx_uncore_pci_ids[] = {
4500 	{ /* MC0 Channel 0 */
4501 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2042),
4502 		.driver_data = UNCORE_PCI_DEV_FULL_DATA(10, 2, SKX_PCI_UNCORE_IMC, 0),
4503 	},
4504 	{ /* MC0 Channel 1 */
4505 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2046),
4506 		.driver_data = UNCORE_PCI_DEV_FULL_DATA(10, 6, SKX_PCI_UNCORE_IMC, 1),
4507 	},
4508 	{ /* MC0 Channel 2 */
4509 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x204a),
4510 		.driver_data = UNCORE_PCI_DEV_FULL_DATA(11, 2, SKX_PCI_UNCORE_IMC, 2),
4511 	},
4512 	{ /* MC1 Channel 0 */
4513 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2042),
4514 		.driver_data = UNCORE_PCI_DEV_FULL_DATA(12, 2, SKX_PCI_UNCORE_IMC, 3),
4515 	},
4516 	{ /* MC1 Channel 1 */
4517 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2046),
4518 		.driver_data = UNCORE_PCI_DEV_FULL_DATA(12, 6, SKX_PCI_UNCORE_IMC, 4),
4519 	},
4520 	{ /* MC1 Channel 2 */
4521 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x204a),
4522 		.driver_data = UNCORE_PCI_DEV_FULL_DATA(13, 2, SKX_PCI_UNCORE_IMC, 5),
4523 	},
4524 	{ /* M2M0 */
4525 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2066),
4526 		.driver_data = UNCORE_PCI_DEV_FULL_DATA(8, 0, SKX_PCI_UNCORE_M2M, 0),
4527 	},
4528 	{ /* M2M1 */
4529 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2066),
4530 		.driver_data = UNCORE_PCI_DEV_FULL_DATA(9, 0, SKX_PCI_UNCORE_M2M, 1),
4531 	},
4532 	{ /* UPI0 Link 0 */
4533 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2058),
4534 		.driver_data = UNCORE_PCI_DEV_FULL_DATA(14, 0, SKX_PCI_UNCORE_UPI, 0),
4535 	},
4536 	{ /* UPI0 Link 1 */
4537 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2058),
4538 		.driver_data = UNCORE_PCI_DEV_FULL_DATA(15, 0, SKX_PCI_UNCORE_UPI, 1),
4539 	},
4540 	{ /* UPI1 Link 2 */
4541 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2058),
4542 		.driver_data = UNCORE_PCI_DEV_FULL_DATA(16, 0, SKX_PCI_UNCORE_UPI, 2),
4543 	},
4544 	{ /* M2PCIe 0 */
4545 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2088),
4546 		.driver_data = UNCORE_PCI_DEV_FULL_DATA(21, 1, SKX_PCI_UNCORE_M2PCIE, 0),
4547 	},
4548 	{ /* M2PCIe 1 */
4549 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2088),
4550 		.driver_data = UNCORE_PCI_DEV_FULL_DATA(22, 1, SKX_PCI_UNCORE_M2PCIE, 1),
4551 	},
4552 	{ /* M2PCIe 2 */
4553 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2088),
4554 		.driver_data = UNCORE_PCI_DEV_FULL_DATA(23, 1, SKX_PCI_UNCORE_M2PCIE, 2),
4555 	},
4556 	{ /* M2PCIe 3 */
4557 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2088),
4558 		.driver_data = UNCORE_PCI_DEV_FULL_DATA(21, 5, SKX_PCI_UNCORE_M2PCIE, 3),
4559 	},
4560 	{ /* M3UPI0 Link 0 */
4561 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x204D),
4562 		.driver_data = UNCORE_PCI_DEV_FULL_DATA(18, 1, SKX_PCI_UNCORE_M3UPI, 0),
4563 	},
4564 	{ /* M3UPI0 Link 1 */
4565 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x204E),
4566 		.driver_data = UNCORE_PCI_DEV_FULL_DATA(18, 2, SKX_PCI_UNCORE_M3UPI, 1),
4567 	},
4568 	{ /* M3UPI1 Link 2 */
4569 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x204D),
4570 		.driver_data = UNCORE_PCI_DEV_FULL_DATA(18, 5, SKX_PCI_UNCORE_M3UPI, 2),
4571 	},
4572 	{ /* end: all zeroes */ }
4573 };
4574 
4575 
4576 static struct pci_driver skx_uncore_pci_driver = {
4577 	.name		= "skx_uncore",
4578 	.id_table	= skx_uncore_pci_ids,
4579 };
4580 
skx_uncore_pci_init(void)4581 int skx_uncore_pci_init(void)
4582 {
4583 	/* need to double check pci address */
4584 	int ret = snbep_pci2phy_map_init(0x2014, SKX_CPUNODEID, SKX_GIDNIDMAP, false);
4585 
4586 	if (ret)
4587 		return ret;
4588 
4589 	uncore_pci_uncores = skx_pci_uncores;
4590 	uncore_pci_driver = &skx_uncore_pci_driver;
4591 	return 0;
4592 }
4593 
4594 /* end of SKX uncore support */
4595 
4596 /* SNR uncore support */
4597 
4598 static struct intel_uncore_type snr_uncore_ubox = {
4599 	.name			= "ubox",
4600 	.num_counters		= 2,
4601 	.num_boxes		= 1,
4602 	.perf_ctr_bits		= 48,
4603 	.fixed_ctr_bits		= 48,
4604 	.perf_ctr		= SNR_U_MSR_PMON_CTR0,
4605 	.event_ctl		= SNR_U_MSR_PMON_CTL0,
4606 	.event_mask		= SNBEP_PMON_RAW_EVENT_MASK,
4607 	.fixed_ctr		= SNR_U_MSR_PMON_UCLK_FIXED_CTR,
4608 	.fixed_ctl		= SNR_U_MSR_PMON_UCLK_FIXED_CTL,
4609 	.ops			= &ivbep_uncore_msr_ops,
4610 	.format_group		= &ivbep_uncore_format_group,
4611 };
4612 
4613 static struct attribute *snr_uncore_cha_formats_attr[] = {
4614 	&format_attr_event.attr,
4615 	&format_attr_umask_ext2.attr,
4616 	&format_attr_edge.attr,
4617 	&format_attr_tid_en.attr,
4618 	&format_attr_inv.attr,
4619 	&format_attr_thresh8.attr,
4620 	&format_attr_filter_tid5.attr,
4621 	NULL,
4622 };
4623 static const struct attribute_group snr_uncore_chabox_format_group = {
4624 	.name = "format",
4625 	.attrs = snr_uncore_cha_formats_attr,
4626 };
4627 
snr_cha_hw_config(struct intel_uncore_box * box,struct perf_event * event)4628 static int snr_cha_hw_config(struct intel_uncore_box *box, struct perf_event *event)
4629 {
4630 	struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
4631 
4632 	reg1->reg = SNR_C0_MSR_PMON_BOX_FILTER0 +
4633 		    box->pmu->type->msr_offset * box->pmu->pmu_idx;
4634 	reg1->config = event->attr.config1 & SKX_CHA_MSR_PMON_BOX_FILTER_TID;
4635 	reg1->idx = 0;
4636 
4637 	return 0;
4638 }
4639 
snr_cha_enable_event(struct intel_uncore_box * box,struct perf_event * event)4640 static void snr_cha_enable_event(struct intel_uncore_box *box,
4641 				   struct perf_event *event)
4642 {
4643 	struct hw_perf_event *hwc = &event->hw;
4644 	struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
4645 
4646 	if (reg1->idx != EXTRA_REG_NONE)
4647 		wrmsrl(reg1->reg, reg1->config);
4648 
4649 	wrmsrl(hwc->config_base, hwc->config | SNBEP_PMON_CTL_EN);
4650 }
4651 
4652 static struct intel_uncore_ops snr_uncore_chabox_ops = {
4653 	.init_box		= ivbep_uncore_msr_init_box,
4654 	.disable_box		= snbep_uncore_msr_disable_box,
4655 	.enable_box		= snbep_uncore_msr_enable_box,
4656 	.disable_event		= snbep_uncore_msr_disable_event,
4657 	.enable_event		= snr_cha_enable_event,
4658 	.read_counter		= uncore_msr_read_counter,
4659 	.hw_config		= snr_cha_hw_config,
4660 };
4661 
4662 static struct intel_uncore_type snr_uncore_chabox = {
4663 	.name			= "cha",
4664 	.num_counters		= 4,
4665 	.num_boxes		= 6,
4666 	.perf_ctr_bits		= 48,
4667 	.event_ctl		= SNR_CHA_MSR_PMON_CTL0,
4668 	.perf_ctr		= SNR_CHA_MSR_PMON_CTR0,
4669 	.box_ctl		= SNR_CHA_MSR_PMON_BOX_CTL,
4670 	.msr_offset		= HSWEP_CBO_MSR_OFFSET,
4671 	.event_mask		= HSWEP_S_MSR_PMON_RAW_EVENT_MASK,
4672 	.event_mask_ext		= SNR_CHA_RAW_EVENT_MASK_EXT,
4673 	.ops			= &snr_uncore_chabox_ops,
4674 	.format_group		= &snr_uncore_chabox_format_group,
4675 };
4676 
4677 static struct attribute *snr_uncore_iio_formats_attr[] = {
4678 	&format_attr_event.attr,
4679 	&format_attr_umask.attr,
4680 	&format_attr_edge.attr,
4681 	&format_attr_inv.attr,
4682 	&format_attr_thresh9.attr,
4683 	&format_attr_ch_mask2.attr,
4684 	&format_attr_fc_mask2.attr,
4685 	NULL,
4686 };
4687 
4688 static const struct attribute_group snr_uncore_iio_format_group = {
4689 	.name = "format",
4690 	.attrs = snr_uncore_iio_formats_attr,
4691 };
4692 
4693 static umode_t
snr_iio_mapping_visible(struct kobject * kobj,struct attribute * attr,int die)4694 snr_iio_mapping_visible(struct kobject *kobj, struct attribute *attr, int die)
4695 {
4696 	/* Root bus 0x00 is valid only for pmu_idx = 1. */
4697 	return pmu_iio_mapping_visible(kobj, attr, die, 1);
4698 }
4699 
4700 static struct attribute_group snr_iio_mapping_group = {
4701 	.is_visible	= snr_iio_mapping_visible,
4702 };
4703 
4704 static const struct attribute_group *snr_iio_attr_update[] = {
4705 	&snr_iio_mapping_group,
4706 	NULL,
4707 };
4708 
sad_cfg_iio_topology(struct intel_uncore_type * type,u8 * sad_pmon_mapping)4709 static int sad_cfg_iio_topology(struct intel_uncore_type *type, u8 *sad_pmon_mapping)
4710 {
4711 	u32 sad_cfg;
4712 	int die, stack_id, ret = -EPERM;
4713 	struct pci_dev *dev = NULL;
4714 
4715 	while ((dev = pci_get_device(PCI_VENDOR_ID_INTEL, SNR_ICX_MESH2IIO_MMAP_DID, dev))) {
4716 		ret = pci_read_config_dword(dev, SNR_ICX_SAD_CONTROL_CFG, &sad_cfg);
4717 		if (ret) {
4718 			ret = pcibios_err_to_errno(ret);
4719 			break;
4720 		}
4721 
4722 		die = uncore_pcibus_to_dieid(dev->bus);
4723 		stack_id = SAD_CONTROL_STACK_ID(sad_cfg);
4724 		if (die < 0 || stack_id >= type->num_boxes) {
4725 			ret = -EPERM;
4726 			break;
4727 		}
4728 
4729 		/* Convert stack id from SAD_CONTROL to PMON notation. */
4730 		stack_id = sad_pmon_mapping[stack_id];
4731 
4732 		type->topology[die][stack_id].iio->segment = pci_domain_nr(dev->bus);
4733 		type->topology[die][stack_id].pmu_idx = stack_id;
4734 		type->topology[die][stack_id].iio->pci_bus_no = dev->bus->number;
4735 	}
4736 
4737 	pci_dev_put(dev);
4738 
4739 	return ret;
4740 }
4741 
4742 /*
4743  * SNR has a static mapping of stack IDs from SAD_CONTROL_CFG notation to PMON
4744  */
4745 enum {
4746 	SNR_QAT_PMON_ID,
4747 	SNR_CBDMA_DMI_PMON_ID,
4748 	SNR_NIS_PMON_ID,
4749 	SNR_DLB_PMON_ID,
4750 	SNR_PCIE_GEN3_PMON_ID
4751 };
4752 
4753 static u8 snr_sad_pmon_mapping[] = {
4754 	SNR_CBDMA_DMI_PMON_ID,
4755 	SNR_PCIE_GEN3_PMON_ID,
4756 	SNR_DLB_PMON_ID,
4757 	SNR_NIS_PMON_ID,
4758 	SNR_QAT_PMON_ID
4759 };
4760 
snr_iio_get_topology(struct intel_uncore_type * type)4761 static int snr_iio_get_topology(struct intel_uncore_type *type)
4762 {
4763 	return sad_cfg_iio_topology(type, snr_sad_pmon_mapping);
4764 }
4765 
snr_iio_set_mapping(struct intel_uncore_type * type)4766 static void snr_iio_set_mapping(struct intel_uncore_type *type)
4767 {
4768 	pmu_iio_set_mapping(type, &snr_iio_mapping_group);
4769 }
4770 
snr_iio_cleanup_mapping(struct intel_uncore_type * type)4771 static void snr_iio_cleanup_mapping(struct intel_uncore_type *type)
4772 {
4773 	pmu_cleanup_mapping(type, &snr_iio_mapping_group);
4774 }
4775 
4776 static struct event_constraint snr_uncore_iio_constraints[] = {
4777 	UNCORE_EVENT_CONSTRAINT(0x83, 0x3),
4778 	UNCORE_EVENT_CONSTRAINT(0xc0, 0xc),
4779 	UNCORE_EVENT_CONSTRAINT(0xd5, 0xc),
4780 	EVENT_CONSTRAINT_END
4781 };
4782 
4783 static struct intel_uncore_type snr_uncore_iio = {
4784 	.name			= "iio",
4785 	.num_counters		= 4,
4786 	.num_boxes		= 5,
4787 	.perf_ctr_bits		= 48,
4788 	.event_ctl		= SNR_IIO_MSR_PMON_CTL0,
4789 	.perf_ctr		= SNR_IIO_MSR_PMON_CTR0,
4790 	.event_mask		= SNBEP_PMON_RAW_EVENT_MASK,
4791 	.event_mask_ext		= SNR_IIO_PMON_RAW_EVENT_MASK_EXT,
4792 	.box_ctl		= SNR_IIO_MSR_PMON_BOX_CTL,
4793 	.msr_offset		= SNR_IIO_MSR_OFFSET,
4794 	.constraints		= snr_uncore_iio_constraints,
4795 	.ops			= &ivbep_uncore_msr_ops,
4796 	.format_group		= &snr_uncore_iio_format_group,
4797 	.attr_update		= snr_iio_attr_update,
4798 	.get_topology		= snr_iio_get_topology,
4799 	.set_mapping		= snr_iio_set_mapping,
4800 	.cleanup_mapping	= snr_iio_cleanup_mapping,
4801 };
4802 
4803 static struct intel_uncore_type snr_uncore_irp = {
4804 	.name			= "irp",
4805 	.num_counters		= 2,
4806 	.num_boxes		= 5,
4807 	.perf_ctr_bits		= 48,
4808 	.event_ctl		= SNR_IRP0_MSR_PMON_CTL0,
4809 	.perf_ctr		= SNR_IRP0_MSR_PMON_CTR0,
4810 	.event_mask		= SNBEP_PMON_RAW_EVENT_MASK,
4811 	.box_ctl		= SNR_IRP0_MSR_PMON_BOX_CTL,
4812 	.msr_offset		= SNR_IRP_MSR_OFFSET,
4813 	.ops			= &ivbep_uncore_msr_ops,
4814 	.format_group		= &ivbep_uncore_format_group,
4815 };
4816 
4817 static struct intel_uncore_type snr_uncore_m2pcie = {
4818 	.name		= "m2pcie",
4819 	.num_counters	= 4,
4820 	.num_boxes	= 5,
4821 	.perf_ctr_bits	= 48,
4822 	.event_ctl	= SNR_M2PCIE_MSR_PMON_CTL0,
4823 	.perf_ctr	= SNR_M2PCIE_MSR_PMON_CTR0,
4824 	.box_ctl	= SNR_M2PCIE_MSR_PMON_BOX_CTL,
4825 	.msr_offset	= SNR_M2PCIE_MSR_OFFSET,
4826 	.event_mask	= SNBEP_PMON_RAW_EVENT_MASK,
4827 	.ops		= &ivbep_uncore_msr_ops,
4828 	.format_group	= &ivbep_uncore_format_group,
4829 };
4830 
snr_pcu_hw_config(struct intel_uncore_box * box,struct perf_event * event)4831 static int snr_pcu_hw_config(struct intel_uncore_box *box, struct perf_event *event)
4832 {
4833 	struct hw_perf_event *hwc = &event->hw;
4834 	struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
4835 	int ev_sel = hwc->config & SNBEP_PMON_CTL_EV_SEL_MASK;
4836 
4837 	if (ev_sel >= 0xb && ev_sel <= 0xe) {
4838 		reg1->reg = SNR_PCU_MSR_PMON_BOX_FILTER;
4839 		reg1->idx = ev_sel - 0xb;
4840 		reg1->config = event->attr.config1 & (0xff << reg1->idx);
4841 	}
4842 	return 0;
4843 }
4844 
4845 static struct intel_uncore_ops snr_uncore_pcu_ops = {
4846 	IVBEP_UNCORE_MSR_OPS_COMMON_INIT(),
4847 	.hw_config		= snr_pcu_hw_config,
4848 	.get_constraint		= snbep_pcu_get_constraint,
4849 	.put_constraint		= snbep_pcu_put_constraint,
4850 };
4851 
4852 static struct intel_uncore_type snr_uncore_pcu = {
4853 	.name			= "pcu",
4854 	.num_counters		= 4,
4855 	.num_boxes		= 1,
4856 	.perf_ctr_bits		= 48,
4857 	.perf_ctr		= SNR_PCU_MSR_PMON_CTR0,
4858 	.event_ctl		= SNR_PCU_MSR_PMON_CTL0,
4859 	.event_mask		= SNBEP_PMON_RAW_EVENT_MASK,
4860 	.box_ctl		= SNR_PCU_MSR_PMON_BOX_CTL,
4861 	.num_shared_regs	= 1,
4862 	.ops			= &snr_uncore_pcu_ops,
4863 	.format_group		= &skx_uncore_pcu_format_group,
4864 };
4865 
4866 enum perf_uncore_snr_iio_freerunning_type_id {
4867 	SNR_IIO_MSR_IOCLK,
4868 	SNR_IIO_MSR_BW_IN,
4869 
4870 	SNR_IIO_FREERUNNING_TYPE_MAX,
4871 };
4872 
4873 static struct freerunning_counters snr_iio_freerunning[] = {
4874 	[SNR_IIO_MSR_IOCLK]	= { 0x1eac, 0x1, 0x10, 1, 48 },
4875 	[SNR_IIO_MSR_BW_IN]	= { 0x1f00, 0x1, 0x10, 8, 48 },
4876 };
4877 
4878 static struct uncore_event_desc snr_uncore_iio_freerunning_events[] = {
4879 	/* Free-Running IIO CLOCKS Counter */
4880 	INTEL_UNCORE_EVENT_DESC(ioclk,			"event=0xff,umask=0x10"),
4881 	/* Free-Running IIO BANDWIDTH IN Counters */
4882 	INTEL_UNCORE_EVENT_DESC(bw_in_port0,		"event=0xff,umask=0x20"),
4883 	INTEL_UNCORE_EVENT_DESC(bw_in_port0.scale,	"3.814697266e-6"),
4884 	INTEL_UNCORE_EVENT_DESC(bw_in_port0.unit,	"MiB"),
4885 	INTEL_UNCORE_EVENT_DESC(bw_in_port1,		"event=0xff,umask=0x21"),
4886 	INTEL_UNCORE_EVENT_DESC(bw_in_port1.scale,	"3.814697266e-6"),
4887 	INTEL_UNCORE_EVENT_DESC(bw_in_port1.unit,	"MiB"),
4888 	INTEL_UNCORE_EVENT_DESC(bw_in_port2,		"event=0xff,umask=0x22"),
4889 	INTEL_UNCORE_EVENT_DESC(bw_in_port2.scale,	"3.814697266e-6"),
4890 	INTEL_UNCORE_EVENT_DESC(bw_in_port2.unit,	"MiB"),
4891 	INTEL_UNCORE_EVENT_DESC(bw_in_port3,		"event=0xff,umask=0x23"),
4892 	INTEL_UNCORE_EVENT_DESC(bw_in_port3.scale,	"3.814697266e-6"),
4893 	INTEL_UNCORE_EVENT_DESC(bw_in_port3.unit,	"MiB"),
4894 	INTEL_UNCORE_EVENT_DESC(bw_in_port4,		"event=0xff,umask=0x24"),
4895 	INTEL_UNCORE_EVENT_DESC(bw_in_port4.scale,	"3.814697266e-6"),
4896 	INTEL_UNCORE_EVENT_DESC(bw_in_port4.unit,	"MiB"),
4897 	INTEL_UNCORE_EVENT_DESC(bw_in_port5,		"event=0xff,umask=0x25"),
4898 	INTEL_UNCORE_EVENT_DESC(bw_in_port5.scale,	"3.814697266e-6"),
4899 	INTEL_UNCORE_EVENT_DESC(bw_in_port5.unit,	"MiB"),
4900 	INTEL_UNCORE_EVENT_DESC(bw_in_port6,		"event=0xff,umask=0x26"),
4901 	INTEL_UNCORE_EVENT_DESC(bw_in_port6.scale,	"3.814697266e-6"),
4902 	INTEL_UNCORE_EVENT_DESC(bw_in_port6.unit,	"MiB"),
4903 	INTEL_UNCORE_EVENT_DESC(bw_in_port7,		"event=0xff,umask=0x27"),
4904 	INTEL_UNCORE_EVENT_DESC(bw_in_port7.scale,	"3.814697266e-6"),
4905 	INTEL_UNCORE_EVENT_DESC(bw_in_port7.unit,	"MiB"),
4906 	{ /* end: all zeroes */ },
4907 };
4908 
4909 static struct intel_uncore_type snr_uncore_iio_free_running = {
4910 	.name			= "iio_free_running",
4911 	.num_counters		= 9,
4912 	.num_boxes		= 5,
4913 	.num_freerunning_types	= SNR_IIO_FREERUNNING_TYPE_MAX,
4914 	.freerunning		= snr_iio_freerunning,
4915 	.ops			= &skx_uncore_iio_freerunning_ops,
4916 	.event_descs		= snr_uncore_iio_freerunning_events,
4917 	.format_group		= &skx_uncore_iio_freerunning_format_group,
4918 };
4919 
4920 static struct intel_uncore_type *snr_msr_uncores[] = {
4921 	&snr_uncore_ubox,
4922 	&snr_uncore_chabox,
4923 	&snr_uncore_iio,
4924 	&snr_uncore_irp,
4925 	&snr_uncore_m2pcie,
4926 	&snr_uncore_pcu,
4927 	&snr_uncore_iio_free_running,
4928 	NULL,
4929 };
4930 
snr_uncore_cpu_init(void)4931 void snr_uncore_cpu_init(void)
4932 {
4933 	uncore_msr_uncores = snr_msr_uncores;
4934 }
4935 
snr_m2m_uncore_pci_init_box(struct intel_uncore_box * box)4936 static void snr_m2m_uncore_pci_init_box(struct intel_uncore_box *box)
4937 {
4938 	struct pci_dev *pdev = box->pci_dev;
4939 	int box_ctl = uncore_pci_box_ctl(box);
4940 
4941 	__set_bit(UNCORE_BOX_FLAG_CTL_OFFS8, &box->flags);
4942 	pci_write_config_dword(pdev, box_ctl, IVBEP_PMON_BOX_CTL_INT);
4943 }
4944 
4945 static struct intel_uncore_ops snr_m2m_uncore_pci_ops = {
4946 	.init_box	= snr_m2m_uncore_pci_init_box,
4947 	.disable_box	= snbep_uncore_pci_disable_box,
4948 	.enable_box	= snbep_uncore_pci_enable_box,
4949 	.disable_event	= snbep_uncore_pci_disable_event,
4950 	.enable_event	= snbep_uncore_pci_enable_event,
4951 	.read_counter	= snbep_uncore_pci_read_counter,
4952 };
4953 
4954 static struct attribute *snr_m2m_uncore_formats_attr[] = {
4955 	&format_attr_event.attr,
4956 	&format_attr_umask_ext3.attr,
4957 	&format_attr_edge.attr,
4958 	&format_attr_inv.attr,
4959 	&format_attr_thresh8.attr,
4960 	NULL,
4961 };
4962 
4963 static const struct attribute_group snr_m2m_uncore_format_group = {
4964 	.name = "format",
4965 	.attrs = snr_m2m_uncore_formats_attr,
4966 };
4967 
4968 static struct intel_uncore_type snr_uncore_m2m = {
4969 	.name		= "m2m",
4970 	.num_counters   = 4,
4971 	.num_boxes	= 1,
4972 	.perf_ctr_bits	= 48,
4973 	.perf_ctr	= SNR_M2M_PCI_PMON_CTR0,
4974 	.event_ctl	= SNR_M2M_PCI_PMON_CTL0,
4975 	.event_mask	= SNBEP_PMON_RAW_EVENT_MASK,
4976 	.event_mask_ext	= SNR_M2M_PCI_PMON_UMASK_EXT,
4977 	.box_ctl	= SNR_M2M_PCI_PMON_BOX_CTL,
4978 	.ops		= &snr_m2m_uncore_pci_ops,
4979 	.format_group	= &snr_m2m_uncore_format_group,
4980 };
4981 
snr_uncore_pci_enable_event(struct intel_uncore_box * box,struct perf_event * event)4982 static void snr_uncore_pci_enable_event(struct intel_uncore_box *box, struct perf_event *event)
4983 {
4984 	struct pci_dev *pdev = box->pci_dev;
4985 	struct hw_perf_event *hwc = &event->hw;
4986 
4987 	pci_write_config_dword(pdev, hwc->config_base, (u32)(hwc->config | SNBEP_PMON_CTL_EN));
4988 	pci_write_config_dword(pdev, hwc->config_base + 4, (u32)(hwc->config >> 32));
4989 }
4990 
4991 static struct intel_uncore_ops snr_pcie3_uncore_pci_ops = {
4992 	.init_box	= snr_m2m_uncore_pci_init_box,
4993 	.disable_box	= snbep_uncore_pci_disable_box,
4994 	.enable_box	= snbep_uncore_pci_enable_box,
4995 	.disable_event	= snbep_uncore_pci_disable_event,
4996 	.enable_event	= snr_uncore_pci_enable_event,
4997 	.read_counter	= snbep_uncore_pci_read_counter,
4998 };
4999 
5000 static struct intel_uncore_type snr_uncore_pcie3 = {
5001 	.name		= "pcie3",
5002 	.num_counters	= 4,
5003 	.num_boxes	= 1,
5004 	.perf_ctr_bits	= 48,
5005 	.perf_ctr	= SNR_PCIE3_PCI_PMON_CTR0,
5006 	.event_ctl	= SNR_PCIE3_PCI_PMON_CTL0,
5007 	.event_mask	= SKX_IIO_PMON_RAW_EVENT_MASK,
5008 	.event_mask_ext	= SKX_IIO_PMON_RAW_EVENT_MASK_EXT,
5009 	.box_ctl	= SNR_PCIE3_PCI_PMON_BOX_CTL,
5010 	.ops		= &snr_pcie3_uncore_pci_ops,
5011 	.format_group	= &skx_uncore_iio_format_group,
5012 };
5013 
5014 enum {
5015 	SNR_PCI_UNCORE_M2M,
5016 	SNR_PCI_UNCORE_PCIE3,
5017 };
5018 
5019 static struct intel_uncore_type *snr_pci_uncores[] = {
5020 	[SNR_PCI_UNCORE_M2M]		= &snr_uncore_m2m,
5021 	[SNR_PCI_UNCORE_PCIE3]		= &snr_uncore_pcie3,
5022 	NULL,
5023 };
5024 
5025 static const struct pci_device_id snr_uncore_pci_ids[] = {
5026 	{ /* M2M */
5027 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x344a),
5028 		.driver_data = UNCORE_PCI_DEV_FULL_DATA(12, 0, SNR_PCI_UNCORE_M2M, 0),
5029 	},
5030 	{ /* end: all zeroes */ }
5031 };
5032 
5033 static struct pci_driver snr_uncore_pci_driver = {
5034 	.name		= "snr_uncore",
5035 	.id_table	= snr_uncore_pci_ids,
5036 };
5037 
5038 static const struct pci_device_id snr_uncore_pci_sub_ids[] = {
5039 	{ /* PCIe3 RP */
5040 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x334a),
5041 		.driver_data = UNCORE_PCI_DEV_FULL_DATA(4, 0, SNR_PCI_UNCORE_PCIE3, 0),
5042 	},
5043 	{ /* end: all zeroes */ }
5044 };
5045 
5046 static struct pci_driver snr_uncore_pci_sub_driver = {
5047 	.name		= "snr_uncore_sub",
5048 	.id_table	= snr_uncore_pci_sub_ids,
5049 };
5050 
snr_uncore_pci_init(void)5051 int snr_uncore_pci_init(void)
5052 {
5053 	/* SNR UBOX DID */
5054 	int ret = snbep_pci2phy_map_init(0x3460, SKX_CPUNODEID,
5055 					 SKX_GIDNIDMAP, true);
5056 
5057 	if (ret)
5058 		return ret;
5059 
5060 	uncore_pci_uncores = snr_pci_uncores;
5061 	uncore_pci_driver = &snr_uncore_pci_driver;
5062 	uncore_pci_sub_driver = &snr_uncore_pci_sub_driver;
5063 	return 0;
5064 }
5065 
5066 #define SNR_MC_DEVICE_ID	0x3451
5067 
snr_uncore_get_mc_dev(unsigned int device,int id)5068 static struct pci_dev *snr_uncore_get_mc_dev(unsigned int device, int id)
5069 {
5070 	struct pci_dev *mc_dev = NULL;
5071 	int pkg;
5072 
5073 	while (1) {
5074 		mc_dev = pci_get_device(PCI_VENDOR_ID_INTEL, device, mc_dev);
5075 		if (!mc_dev)
5076 			break;
5077 		pkg = uncore_pcibus_to_dieid(mc_dev->bus);
5078 		if (pkg == id)
5079 			break;
5080 	}
5081 	return mc_dev;
5082 }
5083 
snr_uncore_mmio_map(struct intel_uncore_box * box,unsigned int box_ctl,int mem_offset,unsigned int device)5084 static int snr_uncore_mmio_map(struct intel_uncore_box *box,
5085 			       unsigned int box_ctl, int mem_offset,
5086 			       unsigned int device)
5087 {
5088 	struct pci_dev *pdev = snr_uncore_get_mc_dev(device, box->dieid);
5089 	struct intel_uncore_type *type = box->pmu->type;
5090 	resource_size_t addr;
5091 	u32 pci_dword;
5092 
5093 	if (!pdev)
5094 		return -ENODEV;
5095 
5096 	pci_read_config_dword(pdev, SNR_IMC_MMIO_BASE_OFFSET, &pci_dword);
5097 	addr = ((resource_size_t)pci_dword & SNR_IMC_MMIO_BASE_MASK) << 23;
5098 
5099 	pci_read_config_dword(pdev, mem_offset, &pci_dword);
5100 	addr |= (pci_dword & SNR_IMC_MMIO_MEM0_MASK) << 12;
5101 
5102 	addr += box_ctl;
5103 
5104 	pci_dev_put(pdev);
5105 
5106 	box->io_addr = ioremap(addr, type->mmio_map_size);
5107 	if (!box->io_addr) {
5108 		pr_warn("perf uncore: Failed to ioremap for %s.\n", type->name);
5109 		return -EINVAL;
5110 	}
5111 
5112 	return 0;
5113 }
5114 
__snr_uncore_mmio_init_box(struct intel_uncore_box * box,unsigned int box_ctl,int mem_offset,unsigned int device)5115 static void __snr_uncore_mmio_init_box(struct intel_uncore_box *box,
5116 				       unsigned int box_ctl, int mem_offset,
5117 				       unsigned int device)
5118 {
5119 	if (!snr_uncore_mmio_map(box, box_ctl, mem_offset, device))
5120 		writel(IVBEP_PMON_BOX_CTL_INT, box->io_addr);
5121 }
5122 
snr_uncore_mmio_init_box(struct intel_uncore_box * box)5123 static void snr_uncore_mmio_init_box(struct intel_uncore_box *box)
5124 {
5125 	__snr_uncore_mmio_init_box(box, uncore_mmio_box_ctl(box),
5126 				   SNR_IMC_MMIO_MEM0_OFFSET,
5127 				   SNR_MC_DEVICE_ID);
5128 }
5129 
snr_uncore_mmio_disable_box(struct intel_uncore_box * box)5130 static void snr_uncore_mmio_disable_box(struct intel_uncore_box *box)
5131 {
5132 	u32 config;
5133 
5134 	if (!box->io_addr)
5135 		return;
5136 
5137 	config = readl(box->io_addr);
5138 	config |= SNBEP_PMON_BOX_CTL_FRZ;
5139 	writel(config, box->io_addr);
5140 }
5141 
snr_uncore_mmio_enable_box(struct intel_uncore_box * box)5142 static void snr_uncore_mmio_enable_box(struct intel_uncore_box *box)
5143 {
5144 	u32 config;
5145 
5146 	if (!box->io_addr)
5147 		return;
5148 
5149 	config = readl(box->io_addr);
5150 	config &= ~SNBEP_PMON_BOX_CTL_FRZ;
5151 	writel(config, box->io_addr);
5152 }
5153 
snr_uncore_mmio_enable_event(struct intel_uncore_box * box,struct perf_event * event)5154 static void snr_uncore_mmio_enable_event(struct intel_uncore_box *box,
5155 					   struct perf_event *event)
5156 {
5157 	struct hw_perf_event *hwc = &event->hw;
5158 
5159 	if (!box->io_addr)
5160 		return;
5161 
5162 	if (!uncore_mmio_is_valid_offset(box, hwc->config_base))
5163 		return;
5164 
5165 	writel(hwc->config | SNBEP_PMON_CTL_EN,
5166 	       box->io_addr + hwc->config_base);
5167 }
5168 
snr_uncore_mmio_disable_event(struct intel_uncore_box * box,struct perf_event * event)5169 static void snr_uncore_mmio_disable_event(struct intel_uncore_box *box,
5170 					    struct perf_event *event)
5171 {
5172 	struct hw_perf_event *hwc = &event->hw;
5173 
5174 	if (!box->io_addr)
5175 		return;
5176 
5177 	if (!uncore_mmio_is_valid_offset(box, hwc->config_base))
5178 		return;
5179 
5180 	writel(hwc->config, box->io_addr + hwc->config_base);
5181 }
5182 
5183 static struct intel_uncore_ops snr_uncore_mmio_ops = {
5184 	.init_box	= snr_uncore_mmio_init_box,
5185 	.exit_box	= uncore_mmio_exit_box,
5186 	.disable_box	= snr_uncore_mmio_disable_box,
5187 	.enable_box	= snr_uncore_mmio_enable_box,
5188 	.disable_event	= snr_uncore_mmio_disable_event,
5189 	.enable_event	= snr_uncore_mmio_enable_event,
5190 	.read_counter	= uncore_mmio_read_counter,
5191 };
5192 
5193 static struct uncore_event_desc snr_uncore_imc_events[] = {
5194 	INTEL_UNCORE_EVENT_DESC(clockticks,      "event=0x00,umask=0x00"),
5195 	INTEL_UNCORE_EVENT_DESC(cas_count_read,  "event=0x04,umask=0x0f"),
5196 	INTEL_UNCORE_EVENT_DESC(cas_count_read.scale, "6.103515625e-5"),
5197 	INTEL_UNCORE_EVENT_DESC(cas_count_read.unit, "MiB"),
5198 	INTEL_UNCORE_EVENT_DESC(cas_count_write, "event=0x04,umask=0x30"),
5199 	INTEL_UNCORE_EVENT_DESC(cas_count_write.scale, "6.103515625e-5"),
5200 	INTEL_UNCORE_EVENT_DESC(cas_count_write.unit, "MiB"),
5201 	{ /* end: all zeroes */ },
5202 };
5203 
5204 static struct intel_uncore_type snr_uncore_imc = {
5205 	.name		= "imc",
5206 	.num_counters   = 4,
5207 	.num_boxes	= 2,
5208 	.perf_ctr_bits	= 48,
5209 	.fixed_ctr_bits	= 48,
5210 	.fixed_ctr	= SNR_IMC_MMIO_PMON_FIXED_CTR,
5211 	.fixed_ctl	= SNR_IMC_MMIO_PMON_FIXED_CTL,
5212 	.event_descs	= snr_uncore_imc_events,
5213 	.perf_ctr	= SNR_IMC_MMIO_PMON_CTR0,
5214 	.event_ctl	= SNR_IMC_MMIO_PMON_CTL0,
5215 	.event_mask	= SNBEP_PMON_RAW_EVENT_MASK,
5216 	.box_ctl	= SNR_IMC_MMIO_PMON_BOX_CTL,
5217 	.mmio_offset	= SNR_IMC_MMIO_OFFSET,
5218 	.mmio_map_size	= SNR_IMC_MMIO_SIZE,
5219 	.ops		= &snr_uncore_mmio_ops,
5220 	.format_group	= &skx_uncore_format_group,
5221 };
5222 
5223 enum perf_uncore_snr_imc_freerunning_type_id {
5224 	SNR_IMC_DCLK,
5225 	SNR_IMC_DDR,
5226 
5227 	SNR_IMC_FREERUNNING_TYPE_MAX,
5228 };
5229 
5230 static struct freerunning_counters snr_imc_freerunning[] = {
5231 	[SNR_IMC_DCLK]	= { 0x22b0, 0x0, 0, 1, 48 },
5232 	[SNR_IMC_DDR]	= { 0x2290, 0x8, 0, 2, 48 },
5233 };
5234 
5235 static struct uncore_event_desc snr_uncore_imc_freerunning_events[] = {
5236 	INTEL_UNCORE_EVENT_DESC(dclk,		"event=0xff,umask=0x10"),
5237 
5238 	INTEL_UNCORE_EVENT_DESC(read,		"event=0xff,umask=0x20"),
5239 	INTEL_UNCORE_EVENT_DESC(read.scale,	"6.103515625e-5"),
5240 	INTEL_UNCORE_EVENT_DESC(read.unit,	"MiB"),
5241 	INTEL_UNCORE_EVENT_DESC(write,		"event=0xff,umask=0x21"),
5242 	INTEL_UNCORE_EVENT_DESC(write.scale,	"6.103515625e-5"),
5243 	INTEL_UNCORE_EVENT_DESC(write.unit,	"MiB"),
5244 	{ /* end: all zeroes */ },
5245 };
5246 
5247 static struct intel_uncore_ops snr_uncore_imc_freerunning_ops = {
5248 	.init_box	= snr_uncore_mmio_init_box,
5249 	.exit_box	= uncore_mmio_exit_box,
5250 	.read_counter	= uncore_mmio_read_counter,
5251 	.hw_config	= uncore_freerunning_hw_config,
5252 };
5253 
5254 static struct intel_uncore_type snr_uncore_imc_free_running = {
5255 	.name			= "imc_free_running",
5256 	.num_counters		= 3,
5257 	.num_boxes		= 1,
5258 	.num_freerunning_types	= SNR_IMC_FREERUNNING_TYPE_MAX,
5259 	.mmio_map_size		= SNR_IMC_MMIO_SIZE,
5260 	.freerunning		= snr_imc_freerunning,
5261 	.ops			= &snr_uncore_imc_freerunning_ops,
5262 	.event_descs		= snr_uncore_imc_freerunning_events,
5263 	.format_group		= &skx_uncore_iio_freerunning_format_group,
5264 };
5265 
5266 static struct intel_uncore_type *snr_mmio_uncores[] = {
5267 	&snr_uncore_imc,
5268 	&snr_uncore_imc_free_running,
5269 	NULL,
5270 };
5271 
snr_uncore_mmio_init(void)5272 void snr_uncore_mmio_init(void)
5273 {
5274 	uncore_mmio_uncores = snr_mmio_uncores;
5275 }
5276 
5277 /* end of SNR uncore support */
5278 
5279 /* ICX uncore support */
5280 
5281 static unsigned icx_cha_msr_offsets[] = {
5282 	0x2a0, 0x2ae, 0x2bc, 0x2ca, 0x2d8, 0x2e6, 0x2f4, 0x302, 0x310,
5283 	0x31e, 0x32c, 0x33a, 0x348, 0x356, 0x364, 0x372, 0x380, 0x38e,
5284 	0x3aa, 0x3b8, 0x3c6, 0x3d4, 0x3e2, 0x3f0, 0x3fe, 0x40c, 0x41a,
5285 	0x428, 0x436, 0x444, 0x452, 0x460, 0x46e, 0x47c, 0x0,   0xe,
5286 	0x1c,  0x2a,  0x38,  0x46,
5287 };
5288 
icx_cha_hw_config(struct intel_uncore_box * box,struct perf_event * event)5289 static int icx_cha_hw_config(struct intel_uncore_box *box, struct perf_event *event)
5290 {
5291 	struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
5292 	bool tie_en = !!(event->hw.config & SNBEP_CBO_PMON_CTL_TID_EN);
5293 
5294 	if (tie_en) {
5295 		reg1->reg = ICX_C34_MSR_PMON_BOX_FILTER0 +
5296 			    icx_cha_msr_offsets[box->pmu->pmu_idx];
5297 		reg1->config = event->attr.config1 & SKX_CHA_MSR_PMON_BOX_FILTER_TID;
5298 		reg1->idx = 0;
5299 	}
5300 
5301 	return 0;
5302 }
5303 
5304 static struct intel_uncore_ops icx_uncore_chabox_ops = {
5305 	.init_box		= ivbep_uncore_msr_init_box,
5306 	.disable_box		= snbep_uncore_msr_disable_box,
5307 	.enable_box		= snbep_uncore_msr_enable_box,
5308 	.disable_event		= snbep_uncore_msr_disable_event,
5309 	.enable_event		= snr_cha_enable_event,
5310 	.read_counter		= uncore_msr_read_counter,
5311 	.hw_config		= icx_cha_hw_config,
5312 };
5313 
5314 static struct intel_uncore_type icx_uncore_chabox = {
5315 	.name			= "cha",
5316 	.num_counters		= 4,
5317 	.perf_ctr_bits		= 48,
5318 	.event_ctl		= ICX_C34_MSR_PMON_CTL0,
5319 	.perf_ctr		= ICX_C34_MSR_PMON_CTR0,
5320 	.box_ctl		= ICX_C34_MSR_PMON_BOX_CTL,
5321 	.msr_offsets		= icx_cha_msr_offsets,
5322 	.event_mask		= HSWEP_S_MSR_PMON_RAW_EVENT_MASK,
5323 	.event_mask_ext		= SNR_CHA_RAW_EVENT_MASK_EXT,
5324 	.constraints		= skx_uncore_chabox_constraints,
5325 	.ops			= &icx_uncore_chabox_ops,
5326 	.format_group		= &snr_uncore_chabox_format_group,
5327 };
5328 
5329 static unsigned icx_msr_offsets[] = {
5330 	0x0, 0x20, 0x40, 0x90, 0xb0, 0xd0,
5331 };
5332 
5333 static struct event_constraint icx_uncore_iio_constraints[] = {
5334 	UNCORE_EVENT_CONSTRAINT(0x02, 0x3),
5335 	UNCORE_EVENT_CONSTRAINT(0x03, 0x3),
5336 	UNCORE_EVENT_CONSTRAINT(0x83, 0x3),
5337 	UNCORE_EVENT_CONSTRAINT(0x88, 0xc),
5338 	UNCORE_EVENT_CONSTRAINT(0xc0, 0xc),
5339 	UNCORE_EVENT_CONSTRAINT(0xc5, 0xc),
5340 	UNCORE_EVENT_CONSTRAINT(0xd5, 0xc),
5341 	EVENT_CONSTRAINT_END
5342 };
5343 
5344 static umode_t
icx_iio_mapping_visible(struct kobject * kobj,struct attribute * attr,int die)5345 icx_iio_mapping_visible(struct kobject *kobj, struct attribute *attr, int die)
5346 {
5347 	/* Root bus 0x00 is valid only for pmu_idx = 5. */
5348 	return pmu_iio_mapping_visible(kobj, attr, die, 5);
5349 }
5350 
5351 static struct attribute_group icx_iio_mapping_group = {
5352 	.is_visible	= icx_iio_mapping_visible,
5353 };
5354 
5355 static const struct attribute_group *icx_iio_attr_update[] = {
5356 	&icx_iio_mapping_group,
5357 	NULL,
5358 };
5359 
5360 /*
5361  * ICX has a static mapping of stack IDs from SAD_CONTROL_CFG notation to PMON
5362  */
5363 enum {
5364 	ICX_PCIE1_PMON_ID,
5365 	ICX_PCIE2_PMON_ID,
5366 	ICX_PCIE3_PMON_ID,
5367 	ICX_PCIE4_PMON_ID,
5368 	ICX_PCIE5_PMON_ID,
5369 	ICX_CBDMA_DMI_PMON_ID
5370 };
5371 
5372 static u8 icx_sad_pmon_mapping[] = {
5373 	ICX_CBDMA_DMI_PMON_ID,
5374 	ICX_PCIE1_PMON_ID,
5375 	ICX_PCIE2_PMON_ID,
5376 	ICX_PCIE3_PMON_ID,
5377 	ICX_PCIE4_PMON_ID,
5378 	ICX_PCIE5_PMON_ID,
5379 };
5380 
icx_iio_get_topology(struct intel_uncore_type * type)5381 static int icx_iio_get_topology(struct intel_uncore_type *type)
5382 {
5383 	return sad_cfg_iio_topology(type, icx_sad_pmon_mapping);
5384 }
5385 
icx_iio_set_mapping(struct intel_uncore_type * type)5386 static void icx_iio_set_mapping(struct intel_uncore_type *type)
5387 {
5388 	/* Detect ICX-D system. This case is not supported */
5389 	if (boot_cpu_data.x86_model == INTEL_FAM6_ICELAKE_D) {
5390 		pmu_clear_mapping_attr(type->attr_update, &icx_iio_mapping_group);
5391 		return;
5392 	}
5393 	pmu_iio_set_mapping(type, &icx_iio_mapping_group);
5394 }
5395 
icx_iio_cleanup_mapping(struct intel_uncore_type * type)5396 static void icx_iio_cleanup_mapping(struct intel_uncore_type *type)
5397 {
5398 	pmu_cleanup_mapping(type, &icx_iio_mapping_group);
5399 }
5400 
5401 static struct intel_uncore_type icx_uncore_iio = {
5402 	.name			= "iio",
5403 	.num_counters		= 4,
5404 	.num_boxes		= 6,
5405 	.perf_ctr_bits		= 48,
5406 	.event_ctl		= ICX_IIO_MSR_PMON_CTL0,
5407 	.perf_ctr		= ICX_IIO_MSR_PMON_CTR0,
5408 	.event_mask		= SNBEP_PMON_RAW_EVENT_MASK,
5409 	.event_mask_ext		= SNR_IIO_PMON_RAW_EVENT_MASK_EXT,
5410 	.box_ctl		= ICX_IIO_MSR_PMON_BOX_CTL,
5411 	.msr_offsets		= icx_msr_offsets,
5412 	.constraints		= icx_uncore_iio_constraints,
5413 	.ops			= &skx_uncore_iio_ops,
5414 	.format_group		= &snr_uncore_iio_format_group,
5415 	.attr_update		= icx_iio_attr_update,
5416 	.get_topology		= icx_iio_get_topology,
5417 	.set_mapping		= icx_iio_set_mapping,
5418 	.cleanup_mapping	= icx_iio_cleanup_mapping,
5419 };
5420 
5421 static struct intel_uncore_type icx_uncore_irp = {
5422 	.name			= "irp",
5423 	.num_counters		= 2,
5424 	.num_boxes		= 6,
5425 	.perf_ctr_bits		= 48,
5426 	.event_ctl		= ICX_IRP0_MSR_PMON_CTL0,
5427 	.perf_ctr		= ICX_IRP0_MSR_PMON_CTR0,
5428 	.event_mask		= SNBEP_PMON_RAW_EVENT_MASK,
5429 	.box_ctl		= ICX_IRP0_MSR_PMON_BOX_CTL,
5430 	.msr_offsets		= icx_msr_offsets,
5431 	.ops			= &ivbep_uncore_msr_ops,
5432 	.format_group		= &ivbep_uncore_format_group,
5433 };
5434 
5435 static struct event_constraint icx_uncore_m2pcie_constraints[] = {
5436 	UNCORE_EVENT_CONSTRAINT(0x14, 0x3),
5437 	UNCORE_EVENT_CONSTRAINT(0x23, 0x3),
5438 	UNCORE_EVENT_CONSTRAINT(0x2d, 0x3),
5439 	EVENT_CONSTRAINT_END
5440 };
5441 
5442 static struct intel_uncore_type icx_uncore_m2pcie = {
5443 	.name		= "m2pcie",
5444 	.num_counters	= 4,
5445 	.num_boxes	= 6,
5446 	.perf_ctr_bits	= 48,
5447 	.event_ctl	= ICX_M2PCIE_MSR_PMON_CTL0,
5448 	.perf_ctr	= ICX_M2PCIE_MSR_PMON_CTR0,
5449 	.box_ctl	= ICX_M2PCIE_MSR_PMON_BOX_CTL,
5450 	.msr_offsets	= icx_msr_offsets,
5451 	.constraints	= icx_uncore_m2pcie_constraints,
5452 	.event_mask	= SNBEP_PMON_RAW_EVENT_MASK,
5453 	.ops		= &ivbep_uncore_msr_ops,
5454 	.format_group	= &ivbep_uncore_format_group,
5455 };
5456 
5457 enum perf_uncore_icx_iio_freerunning_type_id {
5458 	ICX_IIO_MSR_IOCLK,
5459 	ICX_IIO_MSR_BW_IN,
5460 
5461 	ICX_IIO_FREERUNNING_TYPE_MAX,
5462 };
5463 
5464 static unsigned icx_iio_clk_freerunning_box_offsets[] = {
5465 	0x0, 0x20, 0x40, 0x90, 0xb0, 0xd0,
5466 };
5467 
5468 static unsigned icx_iio_bw_freerunning_box_offsets[] = {
5469 	0x0, 0x10, 0x20, 0x90, 0xa0, 0xb0,
5470 };
5471 
5472 static struct freerunning_counters icx_iio_freerunning[] = {
5473 	[ICX_IIO_MSR_IOCLK]	= { 0xa55, 0x1, 0x20, 1, 48, icx_iio_clk_freerunning_box_offsets },
5474 	[ICX_IIO_MSR_BW_IN]	= { 0xaa0, 0x1, 0x10, 8, 48, icx_iio_bw_freerunning_box_offsets },
5475 };
5476 
5477 static struct uncore_event_desc icx_uncore_iio_freerunning_events[] = {
5478 	/* Free-Running IIO CLOCKS Counter */
5479 	INTEL_UNCORE_EVENT_DESC(ioclk,			"event=0xff,umask=0x10"),
5480 	/* Free-Running IIO BANDWIDTH IN Counters */
5481 	INTEL_UNCORE_EVENT_DESC(bw_in_port0,		"event=0xff,umask=0x20"),
5482 	INTEL_UNCORE_EVENT_DESC(bw_in_port0.scale,	"3.814697266e-6"),
5483 	INTEL_UNCORE_EVENT_DESC(bw_in_port0.unit,	"MiB"),
5484 	INTEL_UNCORE_EVENT_DESC(bw_in_port1,		"event=0xff,umask=0x21"),
5485 	INTEL_UNCORE_EVENT_DESC(bw_in_port1.scale,	"3.814697266e-6"),
5486 	INTEL_UNCORE_EVENT_DESC(bw_in_port1.unit,	"MiB"),
5487 	INTEL_UNCORE_EVENT_DESC(bw_in_port2,		"event=0xff,umask=0x22"),
5488 	INTEL_UNCORE_EVENT_DESC(bw_in_port2.scale,	"3.814697266e-6"),
5489 	INTEL_UNCORE_EVENT_DESC(bw_in_port2.unit,	"MiB"),
5490 	INTEL_UNCORE_EVENT_DESC(bw_in_port3,		"event=0xff,umask=0x23"),
5491 	INTEL_UNCORE_EVENT_DESC(bw_in_port3.scale,	"3.814697266e-6"),
5492 	INTEL_UNCORE_EVENT_DESC(bw_in_port3.unit,	"MiB"),
5493 	INTEL_UNCORE_EVENT_DESC(bw_in_port4,		"event=0xff,umask=0x24"),
5494 	INTEL_UNCORE_EVENT_DESC(bw_in_port4.scale,	"3.814697266e-6"),
5495 	INTEL_UNCORE_EVENT_DESC(bw_in_port4.unit,	"MiB"),
5496 	INTEL_UNCORE_EVENT_DESC(bw_in_port5,		"event=0xff,umask=0x25"),
5497 	INTEL_UNCORE_EVENT_DESC(bw_in_port5.scale,	"3.814697266e-6"),
5498 	INTEL_UNCORE_EVENT_DESC(bw_in_port5.unit,	"MiB"),
5499 	INTEL_UNCORE_EVENT_DESC(bw_in_port6,		"event=0xff,umask=0x26"),
5500 	INTEL_UNCORE_EVENT_DESC(bw_in_port6.scale,	"3.814697266e-6"),
5501 	INTEL_UNCORE_EVENT_DESC(bw_in_port6.unit,	"MiB"),
5502 	INTEL_UNCORE_EVENT_DESC(bw_in_port7,		"event=0xff,umask=0x27"),
5503 	INTEL_UNCORE_EVENT_DESC(bw_in_port7.scale,	"3.814697266e-6"),
5504 	INTEL_UNCORE_EVENT_DESC(bw_in_port7.unit,	"MiB"),
5505 	{ /* end: all zeroes */ },
5506 };
5507 
5508 static struct intel_uncore_type icx_uncore_iio_free_running = {
5509 	.name			= "iio_free_running",
5510 	.num_counters		= 9,
5511 	.num_boxes		= 6,
5512 	.num_freerunning_types	= ICX_IIO_FREERUNNING_TYPE_MAX,
5513 	.freerunning		= icx_iio_freerunning,
5514 	.ops			= &skx_uncore_iio_freerunning_ops,
5515 	.event_descs		= icx_uncore_iio_freerunning_events,
5516 	.format_group		= &skx_uncore_iio_freerunning_format_group,
5517 };
5518 
5519 static struct intel_uncore_type *icx_msr_uncores[] = {
5520 	&skx_uncore_ubox,
5521 	&icx_uncore_chabox,
5522 	&icx_uncore_iio,
5523 	&icx_uncore_irp,
5524 	&icx_uncore_m2pcie,
5525 	&skx_uncore_pcu,
5526 	&icx_uncore_iio_free_running,
5527 	NULL,
5528 };
5529 
5530 /*
5531  * To determine the number of CHAs, it should read CAPID6(Low) and CAPID7 (High)
5532  * registers which located at Device 30, Function 3
5533  */
5534 #define ICX_CAPID6		0x9c
5535 #define ICX_CAPID7		0xa0
5536 
icx_count_chabox(void)5537 static u64 icx_count_chabox(void)
5538 {
5539 	struct pci_dev *dev = NULL;
5540 	u64 caps = 0;
5541 
5542 	dev = pci_get_device(PCI_VENDOR_ID_INTEL, 0x345b, dev);
5543 	if (!dev)
5544 		goto out;
5545 
5546 	pci_read_config_dword(dev, ICX_CAPID6, (u32 *)&caps);
5547 	pci_read_config_dword(dev, ICX_CAPID7, (u32 *)&caps + 1);
5548 out:
5549 	pci_dev_put(dev);
5550 	return hweight64(caps);
5551 }
5552 
icx_uncore_cpu_init(void)5553 void icx_uncore_cpu_init(void)
5554 {
5555 	u64 num_boxes = icx_count_chabox();
5556 
5557 	if (WARN_ON(num_boxes > ARRAY_SIZE(icx_cha_msr_offsets)))
5558 		return;
5559 	icx_uncore_chabox.num_boxes = num_boxes;
5560 	uncore_msr_uncores = icx_msr_uncores;
5561 }
5562 
5563 static struct intel_uncore_type icx_uncore_m2m = {
5564 	.name		= "m2m",
5565 	.num_counters   = 4,
5566 	.num_boxes	= 4,
5567 	.perf_ctr_bits	= 48,
5568 	.perf_ctr	= SNR_M2M_PCI_PMON_CTR0,
5569 	.event_ctl	= SNR_M2M_PCI_PMON_CTL0,
5570 	.event_mask	= SNBEP_PMON_RAW_EVENT_MASK,
5571 	.event_mask_ext	= SNR_M2M_PCI_PMON_UMASK_EXT,
5572 	.box_ctl	= SNR_M2M_PCI_PMON_BOX_CTL,
5573 	.ops		= &snr_m2m_uncore_pci_ops,
5574 	.format_group	= &snr_m2m_uncore_format_group,
5575 };
5576 
5577 static struct attribute *icx_upi_uncore_formats_attr[] = {
5578 	&format_attr_event.attr,
5579 	&format_attr_umask_ext4.attr,
5580 	&format_attr_edge.attr,
5581 	&format_attr_inv.attr,
5582 	&format_attr_thresh8.attr,
5583 	NULL,
5584 };
5585 
5586 static const struct attribute_group icx_upi_uncore_format_group = {
5587 	.name = "format",
5588 	.attrs = icx_upi_uncore_formats_attr,
5589 };
5590 
5591 #define ICX_UPI_REGS_ADDR_DEVICE_LINK0	0x02
5592 #define ICX_UPI_REGS_ADDR_FUNCTION	0x01
5593 
discover_upi_topology(struct intel_uncore_type * type,int ubox_did,int dev_link0)5594 static int discover_upi_topology(struct intel_uncore_type *type, int ubox_did, int dev_link0)
5595 {
5596 	struct pci_dev *ubox = NULL;
5597 	struct pci_dev *dev = NULL;
5598 	u32 nid, gid;
5599 	int i, idx, lgc_pkg, ret = -EPERM;
5600 	struct intel_uncore_topology *upi;
5601 	unsigned int devfn;
5602 
5603 	/* GIDNIDMAP method supports machines which have less than 8 sockets. */
5604 	if (uncore_max_dies() > 8)
5605 		goto err;
5606 
5607 	while ((ubox = pci_get_device(PCI_VENDOR_ID_INTEL, ubox_did, ubox))) {
5608 		ret = upi_nodeid_groupid(ubox, SKX_CPUNODEID, SKX_GIDNIDMAP, &nid, &gid);
5609 		if (ret) {
5610 			ret = pcibios_err_to_errno(ret);
5611 			break;
5612 		}
5613 
5614 		for (i = 0; i < 8; i++) {
5615 			if (nid != GIDNIDMAP(gid, i))
5616 				continue;
5617 			lgc_pkg = topology_phys_to_logical_pkg(i);
5618 			if (lgc_pkg < 0) {
5619 				ret = -EPERM;
5620 				goto err;
5621 			}
5622 			for (idx = 0; idx < type->num_boxes; idx++) {
5623 				upi = &type->topology[lgc_pkg][idx];
5624 				devfn = PCI_DEVFN(dev_link0 + idx, ICX_UPI_REGS_ADDR_FUNCTION);
5625 				dev = pci_get_domain_bus_and_slot(pci_domain_nr(ubox->bus),
5626 								  ubox->bus->number,
5627 								  devfn);
5628 				if (dev) {
5629 					ret = upi_fill_topology(dev, upi, idx);
5630 					if (ret)
5631 						goto err;
5632 				}
5633 			}
5634 			break;
5635 		}
5636 	}
5637 err:
5638 	pci_dev_put(ubox);
5639 	pci_dev_put(dev);
5640 	return ret;
5641 }
5642 
icx_upi_get_topology(struct intel_uncore_type * type)5643 static int icx_upi_get_topology(struct intel_uncore_type *type)
5644 {
5645 	return discover_upi_topology(type, ICX_UBOX_DID, ICX_UPI_REGS_ADDR_DEVICE_LINK0);
5646 }
5647 
5648 static struct attribute_group icx_upi_mapping_group = {
5649 	.is_visible	= skx_upi_mapping_visible,
5650 };
5651 
5652 static const struct attribute_group *icx_upi_attr_update[] = {
5653 	&icx_upi_mapping_group,
5654 	NULL
5655 };
5656 
icx_upi_set_mapping(struct intel_uncore_type * type)5657 static void icx_upi_set_mapping(struct intel_uncore_type *type)
5658 {
5659 	pmu_upi_set_mapping(type, &icx_upi_mapping_group);
5660 }
5661 
icx_upi_cleanup_mapping(struct intel_uncore_type * type)5662 static void icx_upi_cleanup_mapping(struct intel_uncore_type *type)
5663 {
5664 	pmu_cleanup_mapping(type, &icx_upi_mapping_group);
5665 }
5666 
5667 static struct intel_uncore_type icx_uncore_upi = {
5668 	.name		= "upi",
5669 	.num_counters   = 4,
5670 	.num_boxes	= 3,
5671 	.perf_ctr_bits	= 48,
5672 	.perf_ctr	= ICX_UPI_PCI_PMON_CTR0,
5673 	.event_ctl	= ICX_UPI_PCI_PMON_CTL0,
5674 	.event_mask	= SNBEP_PMON_RAW_EVENT_MASK,
5675 	.event_mask_ext = ICX_UPI_CTL_UMASK_EXT,
5676 	.box_ctl	= ICX_UPI_PCI_PMON_BOX_CTL,
5677 	.ops		= &skx_upi_uncore_pci_ops,
5678 	.format_group	= &icx_upi_uncore_format_group,
5679 	.attr_update	= icx_upi_attr_update,
5680 	.get_topology	= icx_upi_get_topology,
5681 	.set_mapping	= icx_upi_set_mapping,
5682 	.cleanup_mapping = icx_upi_cleanup_mapping,
5683 };
5684 
5685 static struct event_constraint icx_uncore_m3upi_constraints[] = {
5686 	UNCORE_EVENT_CONSTRAINT(0x1c, 0x1),
5687 	UNCORE_EVENT_CONSTRAINT(0x1d, 0x1),
5688 	UNCORE_EVENT_CONSTRAINT(0x1e, 0x1),
5689 	UNCORE_EVENT_CONSTRAINT(0x1f, 0x1),
5690 	UNCORE_EVENT_CONSTRAINT(0x40, 0x7),
5691 	UNCORE_EVENT_CONSTRAINT(0x4e, 0x7),
5692 	UNCORE_EVENT_CONSTRAINT(0x4f, 0x7),
5693 	UNCORE_EVENT_CONSTRAINT(0x50, 0x7),
5694 	EVENT_CONSTRAINT_END
5695 };
5696 
5697 static struct intel_uncore_type icx_uncore_m3upi = {
5698 	.name		= "m3upi",
5699 	.num_counters   = 4,
5700 	.num_boxes	= 3,
5701 	.perf_ctr_bits	= 48,
5702 	.perf_ctr	= ICX_M3UPI_PCI_PMON_CTR0,
5703 	.event_ctl	= ICX_M3UPI_PCI_PMON_CTL0,
5704 	.event_mask	= SNBEP_PMON_RAW_EVENT_MASK,
5705 	.box_ctl	= ICX_M3UPI_PCI_PMON_BOX_CTL,
5706 	.constraints	= icx_uncore_m3upi_constraints,
5707 	.ops		= &ivbep_uncore_pci_ops,
5708 	.format_group	= &skx_uncore_format_group,
5709 };
5710 
5711 enum {
5712 	ICX_PCI_UNCORE_M2M,
5713 	ICX_PCI_UNCORE_UPI,
5714 	ICX_PCI_UNCORE_M3UPI,
5715 };
5716 
5717 static struct intel_uncore_type *icx_pci_uncores[] = {
5718 	[ICX_PCI_UNCORE_M2M]		= &icx_uncore_m2m,
5719 	[ICX_PCI_UNCORE_UPI]		= &icx_uncore_upi,
5720 	[ICX_PCI_UNCORE_M3UPI]		= &icx_uncore_m3upi,
5721 	NULL,
5722 };
5723 
5724 static const struct pci_device_id icx_uncore_pci_ids[] = {
5725 	{ /* M2M 0 */
5726 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x344a),
5727 		.driver_data = UNCORE_PCI_DEV_FULL_DATA(12, 0, ICX_PCI_UNCORE_M2M, 0),
5728 	},
5729 	{ /* M2M 1 */
5730 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x344a),
5731 		.driver_data = UNCORE_PCI_DEV_FULL_DATA(13, 0, ICX_PCI_UNCORE_M2M, 1),
5732 	},
5733 	{ /* M2M 2 */
5734 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x344a),
5735 		.driver_data = UNCORE_PCI_DEV_FULL_DATA(14, 0, ICX_PCI_UNCORE_M2M, 2),
5736 	},
5737 	{ /* M2M 3 */
5738 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x344a),
5739 		.driver_data = UNCORE_PCI_DEV_FULL_DATA(15, 0, ICX_PCI_UNCORE_M2M, 3),
5740 	},
5741 	{ /* UPI Link 0 */
5742 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x3441),
5743 		.driver_data = UNCORE_PCI_DEV_FULL_DATA(2, 1, ICX_PCI_UNCORE_UPI, 0),
5744 	},
5745 	{ /* UPI Link 1 */
5746 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x3441),
5747 		.driver_data = UNCORE_PCI_DEV_FULL_DATA(3, 1, ICX_PCI_UNCORE_UPI, 1),
5748 	},
5749 	{ /* UPI Link 2 */
5750 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x3441),
5751 		.driver_data = UNCORE_PCI_DEV_FULL_DATA(4, 1, ICX_PCI_UNCORE_UPI, 2),
5752 	},
5753 	{ /* M3UPI Link 0 */
5754 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x3446),
5755 		.driver_data = UNCORE_PCI_DEV_FULL_DATA(5, 1, ICX_PCI_UNCORE_M3UPI, 0),
5756 	},
5757 	{ /* M3UPI Link 1 */
5758 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x3446),
5759 		.driver_data = UNCORE_PCI_DEV_FULL_DATA(6, 1, ICX_PCI_UNCORE_M3UPI, 1),
5760 	},
5761 	{ /* M3UPI Link 2 */
5762 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x3446),
5763 		.driver_data = UNCORE_PCI_DEV_FULL_DATA(7, 1, ICX_PCI_UNCORE_M3UPI, 2),
5764 	},
5765 	{ /* end: all zeroes */ }
5766 };
5767 
5768 static struct pci_driver icx_uncore_pci_driver = {
5769 	.name		= "icx_uncore",
5770 	.id_table	= icx_uncore_pci_ids,
5771 };
5772 
icx_uncore_pci_init(void)5773 int icx_uncore_pci_init(void)
5774 {
5775 	/* ICX UBOX DID */
5776 	int ret = snbep_pci2phy_map_init(0x3450, SKX_CPUNODEID,
5777 					 SKX_GIDNIDMAP, true);
5778 
5779 	if (ret)
5780 		return ret;
5781 
5782 	uncore_pci_uncores = icx_pci_uncores;
5783 	uncore_pci_driver = &icx_uncore_pci_driver;
5784 	return 0;
5785 }
5786 
icx_uncore_imc_init_box(struct intel_uncore_box * box)5787 static void icx_uncore_imc_init_box(struct intel_uncore_box *box)
5788 {
5789 	unsigned int box_ctl = box->pmu->type->box_ctl +
5790 			       box->pmu->type->mmio_offset * (box->pmu->pmu_idx % ICX_NUMBER_IMC_CHN);
5791 	int mem_offset = (box->pmu->pmu_idx / ICX_NUMBER_IMC_CHN) * ICX_IMC_MEM_STRIDE +
5792 			 SNR_IMC_MMIO_MEM0_OFFSET;
5793 
5794 	__snr_uncore_mmio_init_box(box, box_ctl, mem_offset,
5795 				   SNR_MC_DEVICE_ID);
5796 }
5797 
5798 static struct intel_uncore_ops icx_uncore_mmio_ops = {
5799 	.init_box	= icx_uncore_imc_init_box,
5800 	.exit_box	= uncore_mmio_exit_box,
5801 	.disable_box	= snr_uncore_mmio_disable_box,
5802 	.enable_box	= snr_uncore_mmio_enable_box,
5803 	.disable_event	= snr_uncore_mmio_disable_event,
5804 	.enable_event	= snr_uncore_mmio_enable_event,
5805 	.read_counter	= uncore_mmio_read_counter,
5806 };
5807 
5808 static struct intel_uncore_type icx_uncore_imc = {
5809 	.name		= "imc",
5810 	.num_counters   = 4,
5811 	.num_boxes	= 12,
5812 	.perf_ctr_bits	= 48,
5813 	.fixed_ctr_bits	= 48,
5814 	.fixed_ctr	= SNR_IMC_MMIO_PMON_FIXED_CTR,
5815 	.fixed_ctl	= SNR_IMC_MMIO_PMON_FIXED_CTL,
5816 	.event_descs	= snr_uncore_imc_events,
5817 	.perf_ctr	= SNR_IMC_MMIO_PMON_CTR0,
5818 	.event_ctl	= SNR_IMC_MMIO_PMON_CTL0,
5819 	.event_mask	= SNBEP_PMON_RAW_EVENT_MASK,
5820 	.box_ctl	= SNR_IMC_MMIO_PMON_BOX_CTL,
5821 	.mmio_offset	= SNR_IMC_MMIO_OFFSET,
5822 	.mmio_map_size	= SNR_IMC_MMIO_SIZE,
5823 	.ops		= &icx_uncore_mmio_ops,
5824 	.format_group	= &skx_uncore_format_group,
5825 };
5826 
5827 enum perf_uncore_icx_imc_freerunning_type_id {
5828 	ICX_IMC_DCLK,
5829 	ICX_IMC_DDR,
5830 	ICX_IMC_DDRT,
5831 
5832 	ICX_IMC_FREERUNNING_TYPE_MAX,
5833 };
5834 
5835 static struct freerunning_counters icx_imc_freerunning[] = {
5836 	[ICX_IMC_DCLK]	= { 0x22b0, 0x0, 0, 1, 48 },
5837 	[ICX_IMC_DDR]	= { 0x2290, 0x8, 0, 2, 48 },
5838 	[ICX_IMC_DDRT]	= { 0x22a0, 0x8, 0, 2, 48 },
5839 };
5840 
5841 static struct uncore_event_desc icx_uncore_imc_freerunning_events[] = {
5842 	INTEL_UNCORE_EVENT_DESC(dclk,			"event=0xff,umask=0x10"),
5843 
5844 	INTEL_UNCORE_EVENT_DESC(read,			"event=0xff,umask=0x20"),
5845 	INTEL_UNCORE_EVENT_DESC(read.scale,		"6.103515625e-5"),
5846 	INTEL_UNCORE_EVENT_DESC(read.unit,		"MiB"),
5847 	INTEL_UNCORE_EVENT_DESC(write,			"event=0xff,umask=0x21"),
5848 	INTEL_UNCORE_EVENT_DESC(write.scale,		"6.103515625e-5"),
5849 	INTEL_UNCORE_EVENT_DESC(write.unit,		"MiB"),
5850 
5851 	INTEL_UNCORE_EVENT_DESC(ddrt_read,		"event=0xff,umask=0x30"),
5852 	INTEL_UNCORE_EVENT_DESC(ddrt_read.scale,	"6.103515625e-5"),
5853 	INTEL_UNCORE_EVENT_DESC(ddrt_read.unit,		"MiB"),
5854 	INTEL_UNCORE_EVENT_DESC(ddrt_write,		"event=0xff,umask=0x31"),
5855 	INTEL_UNCORE_EVENT_DESC(ddrt_write.scale,	"6.103515625e-5"),
5856 	INTEL_UNCORE_EVENT_DESC(ddrt_write.unit,	"MiB"),
5857 	{ /* end: all zeroes */ },
5858 };
5859 
icx_uncore_imc_freerunning_init_box(struct intel_uncore_box * box)5860 static void icx_uncore_imc_freerunning_init_box(struct intel_uncore_box *box)
5861 {
5862 	int mem_offset = box->pmu->pmu_idx * ICX_IMC_MEM_STRIDE +
5863 			 SNR_IMC_MMIO_MEM0_OFFSET;
5864 
5865 	snr_uncore_mmio_map(box, uncore_mmio_box_ctl(box),
5866 			    mem_offset, SNR_MC_DEVICE_ID);
5867 }
5868 
5869 static struct intel_uncore_ops icx_uncore_imc_freerunning_ops = {
5870 	.init_box	= icx_uncore_imc_freerunning_init_box,
5871 	.exit_box	= uncore_mmio_exit_box,
5872 	.read_counter	= uncore_mmio_read_counter,
5873 	.hw_config	= uncore_freerunning_hw_config,
5874 };
5875 
5876 static struct intel_uncore_type icx_uncore_imc_free_running = {
5877 	.name			= "imc_free_running",
5878 	.num_counters		= 5,
5879 	.num_boxes		= 4,
5880 	.num_freerunning_types	= ICX_IMC_FREERUNNING_TYPE_MAX,
5881 	.mmio_map_size		= SNR_IMC_MMIO_SIZE,
5882 	.freerunning		= icx_imc_freerunning,
5883 	.ops			= &icx_uncore_imc_freerunning_ops,
5884 	.event_descs		= icx_uncore_imc_freerunning_events,
5885 	.format_group		= &skx_uncore_iio_freerunning_format_group,
5886 };
5887 
5888 static struct intel_uncore_type *icx_mmio_uncores[] = {
5889 	&icx_uncore_imc,
5890 	&icx_uncore_imc_free_running,
5891 	NULL,
5892 };
5893 
icx_uncore_mmio_init(void)5894 void icx_uncore_mmio_init(void)
5895 {
5896 	uncore_mmio_uncores = icx_mmio_uncores;
5897 }
5898 
5899 /* end of ICX uncore support */
5900 
5901 /* SPR uncore support */
5902 
spr_uncore_msr_enable_event(struct intel_uncore_box * box,struct perf_event * event)5903 static void spr_uncore_msr_enable_event(struct intel_uncore_box *box,
5904 					struct perf_event *event)
5905 {
5906 	struct hw_perf_event *hwc = &event->hw;
5907 	struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
5908 
5909 	if (reg1->idx != EXTRA_REG_NONE)
5910 		wrmsrl(reg1->reg, reg1->config);
5911 
5912 	wrmsrl(hwc->config_base, hwc->config);
5913 }
5914 
spr_uncore_msr_disable_event(struct intel_uncore_box * box,struct perf_event * event)5915 static void spr_uncore_msr_disable_event(struct intel_uncore_box *box,
5916 					 struct perf_event *event)
5917 {
5918 	struct hw_perf_event *hwc = &event->hw;
5919 	struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
5920 
5921 	if (reg1->idx != EXTRA_REG_NONE)
5922 		wrmsrl(reg1->reg, 0);
5923 
5924 	wrmsrl(hwc->config_base, 0);
5925 }
5926 
spr_cha_hw_config(struct intel_uncore_box * box,struct perf_event * event)5927 static int spr_cha_hw_config(struct intel_uncore_box *box, struct perf_event *event)
5928 {
5929 	struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
5930 	bool tie_en = !!(event->hw.config & SPR_CHA_PMON_CTL_TID_EN);
5931 	struct intel_uncore_type *type = box->pmu->type;
5932 
5933 	if (tie_en) {
5934 		reg1->reg = SPR_C0_MSR_PMON_BOX_FILTER0 +
5935 			    HSWEP_CBO_MSR_OFFSET * type->box_ids[box->pmu->pmu_idx];
5936 		reg1->config = event->attr.config1 & SPR_CHA_PMON_BOX_FILTER_TID;
5937 		reg1->idx = 0;
5938 	}
5939 
5940 	return 0;
5941 }
5942 
5943 static struct intel_uncore_ops spr_uncore_chabox_ops = {
5944 	.init_box		= intel_generic_uncore_msr_init_box,
5945 	.disable_box		= intel_generic_uncore_msr_disable_box,
5946 	.enable_box		= intel_generic_uncore_msr_enable_box,
5947 	.disable_event		= spr_uncore_msr_disable_event,
5948 	.enable_event		= spr_uncore_msr_enable_event,
5949 	.read_counter		= uncore_msr_read_counter,
5950 	.hw_config		= spr_cha_hw_config,
5951 	.get_constraint		= uncore_get_constraint,
5952 	.put_constraint		= uncore_put_constraint,
5953 };
5954 
5955 static struct attribute *spr_uncore_cha_formats_attr[] = {
5956 	&format_attr_event.attr,
5957 	&format_attr_umask_ext4.attr,
5958 	&format_attr_tid_en2.attr,
5959 	&format_attr_edge.attr,
5960 	&format_attr_inv.attr,
5961 	&format_attr_thresh8.attr,
5962 	&format_attr_filter_tid5.attr,
5963 	NULL,
5964 };
5965 static const struct attribute_group spr_uncore_chabox_format_group = {
5966 	.name = "format",
5967 	.attrs = spr_uncore_cha_formats_attr,
5968 };
5969 
alias_show(struct device * dev,struct device_attribute * attr,char * buf)5970 static ssize_t alias_show(struct device *dev,
5971 			  struct device_attribute *attr,
5972 			  char *buf)
5973 {
5974 	struct intel_uncore_pmu *pmu = dev_to_uncore_pmu(dev);
5975 	char pmu_name[UNCORE_PMU_NAME_LEN];
5976 
5977 	uncore_get_alias_name(pmu_name, pmu);
5978 	return sysfs_emit(buf, "%s\n", pmu_name);
5979 }
5980 
5981 static DEVICE_ATTR_RO(alias);
5982 
5983 static struct attribute *uncore_alias_attrs[] = {
5984 	&dev_attr_alias.attr,
5985 	NULL
5986 };
5987 
5988 ATTRIBUTE_GROUPS(uncore_alias);
5989 
5990 static struct intel_uncore_type spr_uncore_chabox = {
5991 	.name			= "cha",
5992 	.event_mask		= SPR_CHA_PMON_EVENT_MASK,
5993 	.event_mask_ext		= SPR_RAW_EVENT_MASK_EXT,
5994 	.num_shared_regs	= 1,
5995 	.constraints		= skx_uncore_chabox_constraints,
5996 	.ops			= &spr_uncore_chabox_ops,
5997 	.format_group		= &spr_uncore_chabox_format_group,
5998 	.attr_update		= uncore_alias_groups,
5999 };
6000 
6001 static struct intel_uncore_type spr_uncore_iio = {
6002 	.name			= "iio",
6003 	.event_mask		= SNBEP_PMON_RAW_EVENT_MASK,
6004 	.event_mask_ext		= SNR_IIO_PMON_RAW_EVENT_MASK_EXT,
6005 	.format_group		= &snr_uncore_iio_format_group,
6006 	.attr_update		= uncore_alias_groups,
6007 	.constraints		= icx_uncore_iio_constraints,
6008 };
6009 
6010 static struct attribute *spr_uncore_raw_formats_attr[] = {
6011 	&format_attr_event.attr,
6012 	&format_attr_umask_ext4.attr,
6013 	&format_attr_edge.attr,
6014 	&format_attr_inv.attr,
6015 	&format_attr_thresh8.attr,
6016 	NULL,
6017 };
6018 
6019 static const struct attribute_group spr_uncore_raw_format_group = {
6020 	.name			= "format",
6021 	.attrs			= spr_uncore_raw_formats_attr,
6022 };
6023 
6024 #define SPR_UNCORE_COMMON_FORMAT()				\
6025 	.event_mask		= SNBEP_PMON_RAW_EVENT_MASK,	\
6026 	.event_mask_ext		= SPR_RAW_EVENT_MASK_EXT,	\
6027 	.format_group		= &spr_uncore_raw_format_group,	\
6028 	.attr_update		= uncore_alias_groups
6029 
6030 static struct intel_uncore_type spr_uncore_irp = {
6031 	SPR_UNCORE_COMMON_FORMAT(),
6032 	.name			= "irp",
6033 
6034 };
6035 
6036 static struct event_constraint spr_uncore_m2pcie_constraints[] = {
6037 	UNCORE_EVENT_CONSTRAINT(0x14, 0x3),
6038 	UNCORE_EVENT_CONSTRAINT(0x2d, 0x3),
6039 	EVENT_CONSTRAINT_END
6040 };
6041 
6042 static struct intel_uncore_type spr_uncore_m2pcie = {
6043 	SPR_UNCORE_COMMON_FORMAT(),
6044 	.name			= "m2pcie",
6045 	.constraints		= spr_uncore_m2pcie_constraints,
6046 };
6047 
6048 static struct intel_uncore_type spr_uncore_pcu = {
6049 	.name			= "pcu",
6050 	.attr_update		= uncore_alias_groups,
6051 };
6052 
spr_uncore_mmio_enable_event(struct intel_uncore_box * box,struct perf_event * event)6053 static void spr_uncore_mmio_enable_event(struct intel_uncore_box *box,
6054 					 struct perf_event *event)
6055 {
6056 	struct hw_perf_event *hwc = &event->hw;
6057 
6058 	if (!box->io_addr)
6059 		return;
6060 
6061 	if (uncore_pmc_fixed(hwc->idx))
6062 		writel(SNBEP_PMON_CTL_EN, box->io_addr + hwc->config_base);
6063 	else
6064 		writel(hwc->config, box->io_addr + hwc->config_base);
6065 }
6066 
6067 static struct intel_uncore_ops spr_uncore_mmio_ops = {
6068 	.init_box		= intel_generic_uncore_mmio_init_box,
6069 	.exit_box		= uncore_mmio_exit_box,
6070 	.disable_box		= intel_generic_uncore_mmio_disable_box,
6071 	.enable_box		= intel_generic_uncore_mmio_enable_box,
6072 	.disable_event		= intel_generic_uncore_mmio_disable_event,
6073 	.enable_event		= spr_uncore_mmio_enable_event,
6074 	.read_counter		= uncore_mmio_read_counter,
6075 };
6076 
6077 static struct uncore_event_desc spr_uncore_imc_events[] = {
6078 	INTEL_UNCORE_EVENT_DESC(clockticks,      "event=0x01,umask=0x00"),
6079 	INTEL_UNCORE_EVENT_DESC(cas_count_read,  "event=0x05,umask=0xcf"),
6080 	INTEL_UNCORE_EVENT_DESC(cas_count_read.scale, "6.103515625e-5"),
6081 	INTEL_UNCORE_EVENT_DESC(cas_count_read.unit, "MiB"),
6082 	INTEL_UNCORE_EVENT_DESC(cas_count_write, "event=0x05,umask=0xf0"),
6083 	INTEL_UNCORE_EVENT_DESC(cas_count_write.scale, "6.103515625e-5"),
6084 	INTEL_UNCORE_EVENT_DESC(cas_count_write.unit, "MiB"),
6085 	{ /* end: all zeroes */ },
6086 };
6087 
6088 static struct intel_uncore_type spr_uncore_imc = {
6089 	SPR_UNCORE_COMMON_FORMAT(),
6090 	.name			= "imc",
6091 	.fixed_ctr_bits		= 48,
6092 	.fixed_ctr		= SNR_IMC_MMIO_PMON_FIXED_CTR,
6093 	.fixed_ctl		= SNR_IMC_MMIO_PMON_FIXED_CTL,
6094 	.ops			= &spr_uncore_mmio_ops,
6095 	.event_descs		= spr_uncore_imc_events,
6096 };
6097 
spr_uncore_pci_enable_event(struct intel_uncore_box * box,struct perf_event * event)6098 static void spr_uncore_pci_enable_event(struct intel_uncore_box *box,
6099 					struct perf_event *event)
6100 {
6101 	struct pci_dev *pdev = box->pci_dev;
6102 	struct hw_perf_event *hwc = &event->hw;
6103 
6104 	pci_write_config_dword(pdev, hwc->config_base + 4, (u32)(hwc->config >> 32));
6105 	pci_write_config_dword(pdev, hwc->config_base, (u32)hwc->config);
6106 }
6107 
6108 static struct intel_uncore_ops spr_uncore_pci_ops = {
6109 	.init_box		= intel_generic_uncore_pci_init_box,
6110 	.disable_box		= intel_generic_uncore_pci_disable_box,
6111 	.enable_box		= intel_generic_uncore_pci_enable_box,
6112 	.disable_event		= intel_generic_uncore_pci_disable_event,
6113 	.enable_event		= spr_uncore_pci_enable_event,
6114 	.read_counter		= intel_generic_uncore_pci_read_counter,
6115 };
6116 
6117 #define SPR_UNCORE_PCI_COMMON_FORMAT()			\
6118 	SPR_UNCORE_COMMON_FORMAT(),			\
6119 	.ops			= &spr_uncore_pci_ops
6120 
6121 static struct intel_uncore_type spr_uncore_m2m = {
6122 	SPR_UNCORE_PCI_COMMON_FORMAT(),
6123 	.name			= "m2m",
6124 };
6125 
6126 static struct attribute_group spr_upi_mapping_group = {
6127 	.is_visible	= skx_upi_mapping_visible,
6128 };
6129 
6130 static const struct attribute_group *spr_upi_attr_update[] = {
6131 	&uncore_alias_group,
6132 	&spr_upi_mapping_group,
6133 	NULL
6134 };
6135 
6136 #define SPR_UPI_REGS_ADDR_DEVICE_LINK0	0x01
6137 
spr_upi_set_mapping(struct intel_uncore_type * type)6138 static void spr_upi_set_mapping(struct intel_uncore_type *type)
6139 {
6140 	pmu_upi_set_mapping(type, &spr_upi_mapping_group);
6141 }
6142 
spr_upi_cleanup_mapping(struct intel_uncore_type * type)6143 static void spr_upi_cleanup_mapping(struct intel_uncore_type *type)
6144 {
6145 	pmu_cleanup_mapping(type, &spr_upi_mapping_group);
6146 }
6147 
spr_upi_get_topology(struct intel_uncore_type * type)6148 static int spr_upi_get_topology(struct intel_uncore_type *type)
6149 {
6150 	return discover_upi_topology(type, SPR_UBOX_DID, SPR_UPI_REGS_ADDR_DEVICE_LINK0);
6151 }
6152 
6153 static struct intel_uncore_type spr_uncore_mdf = {
6154 	SPR_UNCORE_COMMON_FORMAT(),
6155 	.name			= "mdf",
6156 };
6157 
6158 #define UNCORE_SPR_NUM_UNCORE_TYPES		12
6159 #define UNCORE_SPR_CHA				0
6160 #define UNCORE_SPR_IIO				1
6161 #define UNCORE_SPR_IMC				6
6162 #define UNCORE_SPR_UPI				8
6163 #define UNCORE_SPR_M3UPI			9
6164 
6165 /*
6166  * The uncore units, which are supported by the discovery table,
6167  * are defined here.
6168  */
6169 static struct intel_uncore_type *spr_uncores[UNCORE_SPR_NUM_UNCORE_TYPES] = {
6170 	&spr_uncore_chabox,
6171 	&spr_uncore_iio,
6172 	&spr_uncore_irp,
6173 	&spr_uncore_m2pcie,
6174 	&spr_uncore_pcu,
6175 	NULL,
6176 	&spr_uncore_imc,
6177 	&spr_uncore_m2m,
6178 	NULL,
6179 	NULL,
6180 	NULL,
6181 	&spr_uncore_mdf,
6182 };
6183 
6184 /*
6185  * The uncore units, which are not supported by the discovery table,
6186  * are implemented from here.
6187  */
6188 #define SPR_UNCORE_UPI_NUM_BOXES	4
6189 
6190 static unsigned int spr_upi_pci_offsets[SPR_UNCORE_UPI_NUM_BOXES] = {
6191 	0, 0x8000, 0x10000, 0x18000
6192 };
6193 
6194 static struct intel_uncore_type spr_uncore_upi = {
6195 	.event_mask		= SNBEP_PMON_RAW_EVENT_MASK,
6196 	.event_mask_ext		= SPR_RAW_EVENT_MASK_EXT,
6197 	.format_group		= &spr_uncore_raw_format_group,
6198 	.ops			= &spr_uncore_pci_ops,
6199 	.name			= "upi",
6200 	.attr_update		= spr_upi_attr_update,
6201 	.get_topology		= spr_upi_get_topology,
6202 	.set_mapping		= spr_upi_set_mapping,
6203 	.cleanup_mapping	= spr_upi_cleanup_mapping,
6204 	.type_id		= UNCORE_SPR_UPI,
6205 	.num_counters		= 4,
6206 	.num_boxes		= SPR_UNCORE_UPI_NUM_BOXES,
6207 	.perf_ctr_bits		= 48,
6208 	.perf_ctr		= ICX_UPI_PCI_PMON_CTR0,
6209 	.event_ctl		= ICX_UPI_PCI_PMON_CTL0,
6210 	.box_ctl		= ICX_UPI_PCI_PMON_BOX_CTL,
6211 	.pci_offsets		= spr_upi_pci_offsets,
6212 };
6213 
6214 static struct intel_uncore_type spr_uncore_m3upi = {
6215 	SPR_UNCORE_PCI_COMMON_FORMAT(),
6216 	.name			= "m3upi",
6217 	.type_id		= UNCORE_SPR_M3UPI,
6218 	.num_counters		= 4,
6219 	.num_boxes		= SPR_UNCORE_UPI_NUM_BOXES,
6220 	.perf_ctr_bits		= 48,
6221 	.perf_ctr		= ICX_M3UPI_PCI_PMON_CTR0,
6222 	.event_ctl		= ICX_M3UPI_PCI_PMON_CTL0,
6223 	.box_ctl		= ICX_M3UPI_PCI_PMON_BOX_CTL,
6224 	.pci_offsets		= spr_upi_pci_offsets,
6225 	.constraints		= icx_uncore_m3upi_constraints,
6226 };
6227 
6228 enum perf_uncore_spr_iio_freerunning_type_id {
6229 	SPR_IIO_MSR_IOCLK,
6230 	SPR_IIO_MSR_BW_IN,
6231 	SPR_IIO_MSR_BW_OUT,
6232 
6233 	SPR_IIO_FREERUNNING_TYPE_MAX,
6234 };
6235 
6236 static struct freerunning_counters spr_iio_freerunning[] = {
6237 	[SPR_IIO_MSR_IOCLK]	= { 0x340e, 0x1, 0x10, 1, 48 },
6238 	[SPR_IIO_MSR_BW_IN]	= { 0x3800, 0x1, 0x10, 8, 48 },
6239 	[SPR_IIO_MSR_BW_OUT]	= { 0x3808, 0x1, 0x10, 8, 48 },
6240 };
6241 
6242 static struct uncore_event_desc spr_uncore_iio_freerunning_events[] = {
6243 	/* Free-Running IIO CLOCKS Counter */
6244 	INTEL_UNCORE_EVENT_DESC(ioclk,			"event=0xff,umask=0x10"),
6245 	/* Free-Running IIO BANDWIDTH IN Counters */
6246 	INTEL_UNCORE_EVENT_DESC(bw_in_port0,		"event=0xff,umask=0x20"),
6247 	INTEL_UNCORE_EVENT_DESC(bw_in_port0.scale,	"3.814697266e-6"),
6248 	INTEL_UNCORE_EVENT_DESC(bw_in_port0.unit,	"MiB"),
6249 	INTEL_UNCORE_EVENT_DESC(bw_in_port1,		"event=0xff,umask=0x21"),
6250 	INTEL_UNCORE_EVENT_DESC(bw_in_port1.scale,	"3.814697266e-6"),
6251 	INTEL_UNCORE_EVENT_DESC(bw_in_port1.unit,	"MiB"),
6252 	INTEL_UNCORE_EVENT_DESC(bw_in_port2,		"event=0xff,umask=0x22"),
6253 	INTEL_UNCORE_EVENT_DESC(bw_in_port2.scale,	"3.814697266e-6"),
6254 	INTEL_UNCORE_EVENT_DESC(bw_in_port2.unit,	"MiB"),
6255 	INTEL_UNCORE_EVENT_DESC(bw_in_port3,		"event=0xff,umask=0x23"),
6256 	INTEL_UNCORE_EVENT_DESC(bw_in_port3.scale,	"3.814697266e-6"),
6257 	INTEL_UNCORE_EVENT_DESC(bw_in_port3.unit,	"MiB"),
6258 	INTEL_UNCORE_EVENT_DESC(bw_in_port4,		"event=0xff,umask=0x24"),
6259 	INTEL_UNCORE_EVENT_DESC(bw_in_port4.scale,	"3.814697266e-6"),
6260 	INTEL_UNCORE_EVENT_DESC(bw_in_port4.unit,	"MiB"),
6261 	INTEL_UNCORE_EVENT_DESC(bw_in_port5,		"event=0xff,umask=0x25"),
6262 	INTEL_UNCORE_EVENT_DESC(bw_in_port5.scale,	"3.814697266e-6"),
6263 	INTEL_UNCORE_EVENT_DESC(bw_in_port5.unit,	"MiB"),
6264 	INTEL_UNCORE_EVENT_DESC(bw_in_port6,		"event=0xff,umask=0x26"),
6265 	INTEL_UNCORE_EVENT_DESC(bw_in_port6.scale,	"3.814697266e-6"),
6266 	INTEL_UNCORE_EVENT_DESC(bw_in_port6.unit,	"MiB"),
6267 	INTEL_UNCORE_EVENT_DESC(bw_in_port7,		"event=0xff,umask=0x27"),
6268 	INTEL_UNCORE_EVENT_DESC(bw_in_port7.scale,	"3.814697266e-6"),
6269 	INTEL_UNCORE_EVENT_DESC(bw_in_port7.unit,	"MiB"),
6270 	/* Free-Running IIO BANDWIDTH OUT Counters */
6271 	INTEL_UNCORE_EVENT_DESC(bw_out_port0,		"event=0xff,umask=0x30"),
6272 	INTEL_UNCORE_EVENT_DESC(bw_out_port0.scale,	"3.814697266e-6"),
6273 	INTEL_UNCORE_EVENT_DESC(bw_out_port0.unit,	"MiB"),
6274 	INTEL_UNCORE_EVENT_DESC(bw_out_port1,		"event=0xff,umask=0x31"),
6275 	INTEL_UNCORE_EVENT_DESC(bw_out_port1.scale,	"3.814697266e-6"),
6276 	INTEL_UNCORE_EVENT_DESC(bw_out_port1.unit,	"MiB"),
6277 	INTEL_UNCORE_EVENT_DESC(bw_out_port2,		"event=0xff,umask=0x32"),
6278 	INTEL_UNCORE_EVENT_DESC(bw_out_port2.scale,	"3.814697266e-6"),
6279 	INTEL_UNCORE_EVENT_DESC(bw_out_port2.unit,	"MiB"),
6280 	INTEL_UNCORE_EVENT_DESC(bw_out_port3,		"event=0xff,umask=0x33"),
6281 	INTEL_UNCORE_EVENT_DESC(bw_out_port3.scale,	"3.814697266e-6"),
6282 	INTEL_UNCORE_EVENT_DESC(bw_out_port3.unit,	"MiB"),
6283 	INTEL_UNCORE_EVENT_DESC(bw_out_port4,		"event=0xff,umask=0x34"),
6284 	INTEL_UNCORE_EVENT_DESC(bw_out_port4.scale,	"3.814697266e-6"),
6285 	INTEL_UNCORE_EVENT_DESC(bw_out_port4.unit,	"MiB"),
6286 	INTEL_UNCORE_EVENT_DESC(bw_out_port5,		"event=0xff,umask=0x35"),
6287 	INTEL_UNCORE_EVENT_DESC(bw_out_port5.scale,	"3.814697266e-6"),
6288 	INTEL_UNCORE_EVENT_DESC(bw_out_port5.unit,	"MiB"),
6289 	INTEL_UNCORE_EVENT_DESC(bw_out_port6,		"event=0xff,umask=0x36"),
6290 	INTEL_UNCORE_EVENT_DESC(bw_out_port6.scale,	"3.814697266e-6"),
6291 	INTEL_UNCORE_EVENT_DESC(bw_out_port6.unit,	"MiB"),
6292 	INTEL_UNCORE_EVENT_DESC(bw_out_port7,		"event=0xff,umask=0x37"),
6293 	INTEL_UNCORE_EVENT_DESC(bw_out_port7.scale,	"3.814697266e-6"),
6294 	INTEL_UNCORE_EVENT_DESC(bw_out_port7.unit,	"MiB"),
6295 	{ /* end: all zeroes */ },
6296 };
6297 
6298 static struct intel_uncore_type spr_uncore_iio_free_running = {
6299 	.name			= "iio_free_running",
6300 	.num_counters		= 17,
6301 	.num_freerunning_types	= SPR_IIO_FREERUNNING_TYPE_MAX,
6302 	.freerunning		= spr_iio_freerunning,
6303 	.ops			= &skx_uncore_iio_freerunning_ops,
6304 	.event_descs		= spr_uncore_iio_freerunning_events,
6305 	.format_group		= &skx_uncore_iio_freerunning_format_group,
6306 };
6307 
6308 enum perf_uncore_spr_imc_freerunning_type_id {
6309 	SPR_IMC_DCLK,
6310 	SPR_IMC_PQ_CYCLES,
6311 
6312 	SPR_IMC_FREERUNNING_TYPE_MAX,
6313 };
6314 
6315 static struct freerunning_counters spr_imc_freerunning[] = {
6316 	[SPR_IMC_DCLK]		= { 0x22b0, 0x0, 0, 1, 48 },
6317 	[SPR_IMC_PQ_CYCLES]	= { 0x2318, 0x8, 0, 2, 48 },
6318 };
6319 
6320 static struct uncore_event_desc spr_uncore_imc_freerunning_events[] = {
6321 	INTEL_UNCORE_EVENT_DESC(dclk,			"event=0xff,umask=0x10"),
6322 
6323 	INTEL_UNCORE_EVENT_DESC(rpq_cycles,		"event=0xff,umask=0x20"),
6324 	INTEL_UNCORE_EVENT_DESC(wpq_cycles,		"event=0xff,umask=0x21"),
6325 	{ /* end: all zeroes */ },
6326 };
6327 
6328 #define SPR_MC_DEVICE_ID	0x3251
6329 
spr_uncore_imc_freerunning_init_box(struct intel_uncore_box * box)6330 static void spr_uncore_imc_freerunning_init_box(struct intel_uncore_box *box)
6331 {
6332 	int mem_offset = box->pmu->pmu_idx * ICX_IMC_MEM_STRIDE + SNR_IMC_MMIO_MEM0_OFFSET;
6333 
6334 	snr_uncore_mmio_map(box, uncore_mmio_box_ctl(box),
6335 			    mem_offset, SPR_MC_DEVICE_ID);
6336 }
6337 
6338 static struct intel_uncore_ops spr_uncore_imc_freerunning_ops = {
6339 	.init_box	= spr_uncore_imc_freerunning_init_box,
6340 	.exit_box	= uncore_mmio_exit_box,
6341 	.read_counter	= uncore_mmio_read_counter,
6342 	.hw_config	= uncore_freerunning_hw_config,
6343 };
6344 
6345 static struct intel_uncore_type spr_uncore_imc_free_running = {
6346 	.name			= "imc_free_running",
6347 	.num_counters		= 3,
6348 	.mmio_map_size		= SNR_IMC_MMIO_SIZE,
6349 	.num_freerunning_types	= SPR_IMC_FREERUNNING_TYPE_MAX,
6350 	.freerunning		= spr_imc_freerunning,
6351 	.ops			= &spr_uncore_imc_freerunning_ops,
6352 	.event_descs		= spr_uncore_imc_freerunning_events,
6353 	.format_group		= &skx_uncore_iio_freerunning_format_group,
6354 };
6355 
6356 #define UNCORE_SPR_MSR_EXTRA_UNCORES		1
6357 #define UNCORE_SPR_MMIO_EXTRA_UNCORES		1
6358 #define UNCORE_SPR_PCI_EXTRA_UNCORES		2
6359 
6360 static struct intel_uncore_type *spr_msr_uncores[UNCORE_SPR_MSR_EXTRA_UNCORES] = {
6361 	&spr_uncore_iio_free_running,
6362 };
6363 
6364 static struct intel_uncore_type *spr_mmio_uncores[UNCORE_SPR_MMIO_EXTRA_UNCORES] = {
6365 	&spr_uncore_imc_free_running,
6366 };
6367 
6368 static struct intel_uncore_type *spr_pci_uncores[UNCORE_SPR_PCI_EXTRA_UNCORES] = {
6369 	&spr_uncore_upi,
6370 	&spr_uncore_m3upi
6371 };
6372 
6373 int spr_uncore_units_ignore[] = {
6374 	UNCORE_SPR_UPI,
6375 	UNCORE_SPR_M3UPI,
6376 	UNCORE_IGNORE_END
6377 };
6378 
uncore_type_customized_copy(struct intel_uncore_type * to_type,struct intel_uncore_type * from_type)6379 static void uncore_type_customized_copy(struct intel_uncore_type *to_type,
6380 					struct intel_uncore_type *from_type)
6381 {
6382 	if (!to_type || !from_type)
6383 		return;
6384 
6385 	if (from_type->name)
6386 		to_type->name = from_type->name;
6387 	if (from_type->fixed_ctr_bits)
6388 		to_type->fixed_ctr_bits = from_type->fixed_ctr_bits;
6389 	if (from_type->event_mask)
6390 		to_type->event_mask = from_type->event_mask;
6391 	if (from_type->event_mask_ext)
6392 		to_type->event_mask_ext = from_type->event_mask_ext;
6393 	if (from_type->fixed_ctr)
6394 		to_type->fixed_ctr = from_type->fixed_ctr;
6395 	if (from_type->fixed_ctl)
6396 		to_type->fixed_ctl = from_type->fixed_ctl;
6397 	if (from_type->fixed_ctr_bits)
6398 		to_type->fixed_ctr_bits = from_type->fixed_ctr_bits;
6399 	if (from_type->num_shared_regs)
6400 		to_type->num_shared_regs = from_type->num_shared_regs;
6401 	if (from_type->constraints)
6402 		to_type->constraints = from_type->constraints;
6403 	if (from_type->ops)
6404 		to_type->ops = from_type->ops;
6405 	if (from_type->event_descs)
6406 		to_type->event_descs = from_type->event_descs;
6407 	if (from_type->format_group)
6408 		to_type->format_group = from_type->format_group;
6409 	if (from_type->attr_update)
6410 		to_type->attr_update = from_type->attr_update;
6411 	if (from_type->set_mapping)
6412 		to_type->set_mapping = from_type->set_mapping;
6413 	if (from_type->get_topology)
6414 		to_type->get_topology = from_type->get_topology;
6415 	if (from_type->cleanup_mapping)
6416 		to_type->cleanup_mapping = from_type->cleanup_mapping;
6417 }
6418 
6419 static struct intel_uncore_type **
uncore_get_uncores(enum uncore_access_type type_id,int num_extra,struct intel_uncore_type ** extra)6420 uncore_get_uncores(enum uncore_access_type type_id, int num_extra,
6421 		    struct intel_uncore_type **extra)
6422 {
6423 	struct intel_uncore_type **types, **start_types;
6424 	int i;
6425 
6426 	start_types = types = intel_uncore_generic_init_uncores(type_id, num_extra);
6427 
6428 	/* Only copy the customized features */
6429 	for (; *types; types++) {
6430 		if ((*types)->type_id >= UNCORE_SPR_NUM_UNCORE_TYPES)
6431 			continue;
6432 		uncore_type_customized_copy(*types, spr_uncores[(*types)->type_id]);
6433 	}
6434 
6435 	for (i = 0; i < num_extra; i++, types++)
6436 		*types = extra[i];
6437 
6438 	return start_types;
6439 }
6440 
6441 static struct intel_uncore_type *
uncore_find_type_by_id(struct intel_uncore_type ** types,int type_id)6442 uncore_find_type_by_id(struct intel_uncore_type **types, int type_id)
6443 {
6444 	for (; *types; types++) {
6445 		if (type_id == (*types)->type_id)
6446 			return *types;
6447 	}
6448 
6449 	return NULL;
6450 }
6451 
uncore_type_max_boxes(struct intel_uncore_type ** types,int type_id)6452 static int uncore_type_max_boxes(struct intel_uncore_type **types,
6453 				 int type_id)
6454 {
6455 	struct intel_uncore_type *type;
6456 	int i, max = 0;
6457 
6458 	type = uncore_find_type_by_id(types, type_id);
6459 	if (!type)
6460 		return 0;
6461 
6462 	for (i = 0; i < type->num_boxes; i++) {
6463 		if (type->box_ids[i] > max)
6464 			max = type->box_ids[i];
6465 	}
6466 
6467 	return max + 1;
6468 }
6469 
6470 #define SPR_MSR_UNC_CBO_CONFIG		0x2FFE
6471 
spr_uncore_cpu_init(void)6472 void spr_uncore_cpu_init(void)
6473 {
6474 	struct intel_uncore_type *type;
6475 	u64 num_cbo;
6476 
6477 	uncore_msr_uncores = uncore_get_uncores(UNCORE_ACCESS_MSR,
6478 						UNCORE_SPR_MSR_EXTRA_UNCORES,
6479 						spr_msr_uncores);
6480 
6481 	type = uncore_find_type_by_id(uncore_msr_uncores, UNCORE_SPR_CHA);
6482 	if (type) {
6483 		/*
6484 		 * The value from the discovery table (stored in the type->num_boxes
6485 		 * of UNCORE_SPR_CHA) is incorrect on some SPR variants because of a
6486 		 * firmware bug. Using the value from SPR_MSR_UNC_CBO_CONFIG to replace it.
6487 		 */
6488 		rdmsrl(SPR_MSR_UNC_CBO_CONFIG, num_cbo);
6489 		/*
6490 		 * The MSR doesn't work on the EMR XCC, but the firmware bug doesn't impact
6491 		 * the EMR XCC. Don't let the value from the MSR replace the existing value.
6492 		 */
6493 		if (num_cbo)
6494 			type->num_boxes = num_cbo;
6495 	}
6496 	spr_uncore_iio_free_running.num_boxes = uncore_type_max_boxes(uncore_msr_uncores, UNCORE_SPR_IIO);
6497 }
6498 
6499 #define SPR_UNCORE_UPI_PCIID		0x3241
6500 #define SPR_UNCORE_UPI0_DEVFN		0x9
6501 #define SPR_UNCORE_M3UPI_PCIID		0x3246
6502 #define SPR_UNCORE_M3UPI0_DEVFN		0x29
6503 
spr_update_device_location(int type_id)6504 static void spr_update_device_location(int type_id)
6505 {
6506 	struct intel_uncore_type *type;
6507 	struct pci_dev *dev = NULL;
6508 	u32 device, devfn;
6509 	u64 *ctls;
6510 	int die;
6511 
6512 	if (type_id == UNCORE_SPR_UPI) {
6513 		type = &spr_uncore_upi;
6514 		device = SPR_UNCORE_UPI_PCIID;
6515 		devfn = SPR_UNCORE_UPI0_DEVFN;
6516 	} else if (type_id == UNCORE_SPR_M3UPI) {
6517 		type = &spr_uncore_m3upi;
6518 		device = SPR_UNCORE_M3UPI_PCIID;
6519 		devfn = SPR_UNCORE_M3UPI0_DEVFN;
6520 	} else
6521 		return;
6522 
6523 	ctls = kcalloc(__uncore_max_dies, sizeof(u64), GFP_KERNEL);
6524 	if (!ctls) {
6525 		type->num_boxes = 0;
6526 		return;
6527 	}
6528 
6529 	while ((dev = pci_get_device(PCI_VENDOR_ID_INTEL, device, dev)) != NULL) {
6530 		if (devfn != dev->devfn)
6531 			continue;
6532 
6533 		die = uncore_device_to_die(dev);
6534 		if (die < 0)
6535 			continue;
6536 
6537 		ctls[die] = pci_domain_nr(dev->bus) << UNCORE_DISCOVERY_PCI_DOMAIN_OFFSET |
6538 			    dev->bus->number << UNCORE_DISCOVERY_PCI_BUS_OFFSET |
6539 			    devfn << UNCORE_DISCOVERY_PCI_DEVFN_OFFSET |
6540 			    type->box_ctl;
6541 	}
6542 
6543 	type->box_ctls = ctls;
6544 }
6545 
spr_uncore_pci_init(void)6546 int spr_uncore_pci_init(void)
6547 {
6548 	/*
6549 	 * The discovery table of UPI on some SPR variant is broken,
6550 	 * which impacts the detection of both UPI and M3UPI uncore PMON.
6551 	 * Use the pre-defined UPI and M3UPI table to replace.
6552 	 *
6553 	 * The accurate location, e.g., domain and BUS number,
6554 	 * can only be retrieved at load time.
6555 	 * Update the location of UPI and M3UPI.
6556 	 */
6557 	spr_update_device_location(UNCORE_SPR_UPI);
6558 	spr_update_device_location(UNCORE_SPR_M3UPI);
6559 	uncore_pci_uncores = uncore_get_uncores(UNCORE_ACCESS_PCI,
6560 						UNCORE_SPR_PCI_EXTRA_UNCORES,
6561 						spr_pci_uncores);
6562 	return 0;
6563 }
6564 
spr_uncore_mmio_init(void)6565 void spr_uncore_mmio_init(void)
6566 {
6567 	int ret = snbep_pci2phy_map_init(0x3250, SKX_CPUNODEID, SKX_GIDNIDMAP, true);
6568 
6569 	if (ret)
6570 		uncore_mmio_uncores = uncore_get_uncores(UNCORE_ACCESS_MMIO, 0, NULL);
6571 	else {
6572 		uncore_mmio_uncores = uncore_get_uncores(UNCORE_ACCESS_MMIO,
6573 							 UNCORE_SPR_MMIO_EXTRA_UNCORES,
6574 							 spr_mmio_uncores);
6575 
6576 		spr_uncore_imc_free_running.num_boxes = uncore_type_max_boxes(uncore_mmio_uncores, UNCORE_SPR_IMC) / 2;
6577 	}
6578 }
6579 
6580 /* end of SPR uncore support */
6581