1 // SPDX-License-Identifier: GPL-2.0
2 /* SandyBridge-EP/IvyTown uncore support */
3 #include "uncore.h"
4 #include "uncore_discovery.h"
5 
6 /* SNB-EP pci bus to socket mapping */
7 #define SNBEP_CPUNODEID			0x40
8 #define SNBEP_GIDNIDMAP			0x54
9 
10 /* SNB-EP Box level control */
11 #define SNBEP_PMON_BOX_CTL_RST_CTRL	(1 << 0)
12 #define SNBEP_PMON_BOX_CTL_RST_CTRS	(1 << 1)
13 #define SNBEP_PMON_BOX_CTL_FRZ		(1 << 8)
14 #define SNBEP_PMON_BOX_CTL_FRZ_EN	(1 << 16)
15 #define SNBEP_PMON_BOX_CTL_INT		(SNBEP_PMON_BOX_CTL_RST_CTRL | \
16 					 SNBEP_PMON_BOX_CTL_RST_CTRS | \
17 					 SNBEP_PMON_BOX_CTL_FRZ_EN)
18 /* SNB-EP event control */
19 #define SNBEP_PMON_CTL_EV_SEL_MASK	0x000000ff
20 #define SNBEP_PMON_CTL_UMASK_MASK	0x0000ff00
21 #define SNBEP_PMON_CTL_RST		(1 << 17)
22 #define SNBEP_PMON_CTL_EDGE_DET		(1 << 18)
23 #define SNBEP_PMON_CTL_EV_SEL_EXT	(1 << 21)
24 #define SNBEP_PMON_CTL_EN		(1 << 22)
25 #define SNBEP_PMON_CTL_INVERT		(1 << 23)
26 #define SNBEP_PMON_CTL_TRESH_MASK	0xff000000
27 #define SNBEP_PMON_RAW_EVENT_MASK	(SNBEP_PMON_CTL_EV_SEL_MASK | \
28 					 SNBEP_PMON_CTL_UMASK_MASK | \
29 					 SNBEP_PMON_CTL_EDGE_DET | \
30 					 SNBEP_PMON_CTL_INVERT | \
31 					 SNBEP_PMON_CTL_TRESH_MASK)
32 
33 /* SNB-EP Ubox event control */
34 #define SNBEP_U_MSR_PMON_CTL_TRESH_MASK		0x1f000000
35 #define SNBEP_U_MSR_PMON_RAW_EVENT_MASK		\
36 				(SNBEP_PMON_CTL_EV_SEL_MASK | \
37 				 SNBEP_PMON_CTL_UMASK_MASK | \
38 				 SNBEP_PMON_CTL_EDGE_DET | \
39 				 SNBEP_PMON_CTL_INVERT | \
40 				 SNBEP_U_MSR_PMON_CTL_TRESH_MASK)
41 
42 #define SNBEP_CBO_PMON_CTL_TID_EN		(1 << 19)
43 #define SNBEP_CBO_MSR_PMON_RAW_EVENT_MASK	(SNBEP_PMON_RAW_EVENT_MASK | \
44 						 SNBEP_CBO_PMON_CTL_TID_EN)
45 
46 /* SNB-EP PCU event control */
47 #define SNBEP_PCU_MSR_PMON_CTL_OCC_SEL_MASK	0x0000c000
48 #define SNBEP_PCU_MSR_PMON_CTL_TRESH_MASK	0x1f000000
49 #define SNBEP_PCU_MSR_PMON_CTL_OCC_INVERT	(1 << 30)
50 #define SNBEP_PCU_MSR_PMON_CTL_OCC_EDGE_DET	(1 << 31)
51 #define SNBEP_PCU_MSR_PMON_RAW_EVENT_MASK	\
52 				(SNBEP_PMON_CTL_EV_SEL_MASK | \
53 				 SNBEP_PCU_MSR_PMON_CTL_OCC_SEL_MASK | \
54 				 SNBEP_PMON_CTL_EDGE_DET | \
55 				 SNBEP_PMON_CTL_INVERT | \
56 				 SNBEP_PCU_MSR_PMON_CTL_TRESH_MASK | \
57 				 SNBEP_PCU_MSR_PMON_CTL_OCC_INVERT | \
58 				 SNBEP_PCU_MSR_PMON_CTL_OCC_EDGE_DET)
59 
60 #define SNBEP_QPI_PCI_PMON_RAW_EVENT_MASK	\
61 				(SNBEP_PMON_RAW_EVENT_MASK | \
62 				 SNBEP_PMON_CTL_EV_SEL_EXT)
63 
64 /* SNB-EP pci control register */
65 #define SNBEP_PCI_PMON_BOX_CTL			0xf4
66 #define SNBEP_PCI_PMON_CTL0			0xd8
67 /* SNB-EP pci counter register */
68 #define SNBEP_PCI_PMON_CTR0			0xa0
69 
70 /* SNB-EP home agent register */
71 #define SNBEP_HA_PCI_PMON_BOX_ADDRMATCH0	0x40
72 #define SNBEP_HA_PCI_PMON_BOX_ADDRMATCH1	0x44
73 #define SNBEP_HA_PCI_PMON_BOX_OPCODEMATCH	0x48
74 /* SNB-EP memory controller register */
75 #define SNBEP_MC_CHy_PCI_PMON_FIXED_CTL		0xf0
76 #define SNBEP_MC_CHy_PCI_PMON_FIXED_CTR		0xd0
77 /* SNB-EP QPI register */
78 #define SNBEP_Q_Py_PCI_PMON_PKT_MATCH0		0x228
79 #define SNBEP_Q_Py_PCI_PMON_PKT_MATCH1		0x22c
80 #define SNBEP_Q_Py_PCI_PMON_PKT_MASK0		0x238
81 #define SNBEP_Q_Py_PCI_PMON_PKT_MASK1		0x23c
82 
83 /* SNB-EP Ubox register */
84 #define SNBEP_U_MSR_PMON_CTR0			0xc16
85 #define SNBEP_U_MSR_PMON_CTL0			0xc10
86 
87 #define SNBEP_U_MSR_PMON_UCLK_FIXED_CTL		0xc08
88 #define SNBEP_U_MSR_PMON_UCLK_FIXED_CTR		0xc09
89 
90 /* SNB-EP Cbo register */
91 #define SNBEP_C0_MSR_PMON_CTR0			0xd16
92 #define SNBEP_C0_MSR_PMON_CTL0			0xd10
93 #define SNBEP_C0_MSR_PMON_BOX_CTL		0xd04
94 #define SNBEP_C0_MSR_PMON_BOX_FILTER		0xd14
95 #define SNBEP_CBO_MSR_OFFSET			0x20
96 
97 #define SNBEP_CB0_MSR_PMON_BOX_FILTER_TID	0x1f
98 #define SNBEP_CB0_MSR_PMON_BOX_FILTER_NID	0x3fc00
99 #define SNBEP_CB0_MSR_PMON_BOX_FILTER_STATE	0x7c0000
100 #define SNBEP_CB0_MSR_PMON_BOX_FILTER_OPC	0xff800000
101 
102 #define SNBEP_CBO_EVENT_EXTRA_REG(e, m, i) {	\
103 	.event = (e),				\
104 	.msr = SNBEP_C0_MSR_PMON_BOX_FILTER,	\
105 	.config_mask = (m),			\
106 	.idx = (i)				\
107 }
108 
109 /* SNB-EP PCU register */
110 #define SNBEP_PCU_MSR_PMON_CTR0			0xc36
111 #define SNBEP_PCU_MSR_PMON_CTL0			0xc30
112 #define SNBEP_PCU_MSR_PMON_BOX_CTL		0xc24
113 #define SNBEP_PCU_MSR_PMON_BOX_FILTER		0xc34
114 #define SNBEP_PCU_MSR_PMON_BOX_FILTER_MASK	0xffffffff
115 #define SNBEP_PCU_MSR_CORE_C3_CTR		0x3fc
116 #define SNBEP_PCU_MSR_CORE_C6_CTR		0x3fd
117 
118 /* IVBEP event control */
119 #define IVBEP_PMON_BOX_CTL_INT		(SNBEP_PMON_BOX_CTL_RST_CTRL | \
120 					 SNBEP_PMON_BOX_CTL_RST_CTRS)
121 #define IVBEP_PMON_RAW_EVENT_MASK		(SNBEP_PMON_CTL_EV_SEL_MASK | \
122 					 SNBEP_PMON_CTL_UMASK_MASK | \
123 					 SNBEP_PMON_CTL_EDGE_DET | \
124 					 SNBEP_PMON_CTL_TRESH_MASK)
125 /* IVBEP Ubox */
126 #define IVBEP_U_MSR_PMON_GLOBAL_CTL		0xc00
127 #define IVBEP_U_PMON_GLOBAL_FRZ_ALL		(1 << 31)
128 #define IVBEP_U_PMON_GLOBAL_UNFRZ_ALL		(1 << 29)
129 
130 #define IVBEP_U_MSR_PMON_RAW_EVENT_MASK	\
131 				(SNBEP_PMON_CTL_EV_SEL_MASK | \
132 				 SNBEP_PMON_CTL_UMASK_MASK | \
133 				 SNBEP_PMON_CTL_EDGE_DET | \
134 				 SNBEP_U_MSR_PMON_CTL_TRESH_MASK)
135 /* IVBEP Cbo */
136 #define IVBEP_CBO_MSR_PMON_RAW_EVENT_MASK		(IVBEP_PMON_RAW_EVENT_MASK | \
137 						 SNBEP_CBO_PMON_CTL_TID_EN)
138 
139 #define IVBEP_CB0_MSR_PMON_BOX_FILTER_TID		(0x1fULL << 0)
140 #define IVBEP_CB0_MSR_PMON_BOX_FILTER_LINK	(0xfULL << 5)
141 #define IVBEP_CB0_MSR_PMON_BOX_FILTER_STATE	(0x3fULL << 17)
142 #define IVBEP_CB0_MSR_PMON_BOX_FILTER_NID		(0xffffULL << 32)
143 #define IVBEP_CB0_MSR_PMON_BOX_FILTER_OPC		(0x1ffULL << 52)
144 #define IVBEP_CB0_MSR_PMON_BOX_FILTER_C6		(0x1ULL << 61)
145 #define IVBEP_CB0_MSR_PMON_BOX_FILTER_NC		(0x1ULL << 62)
146 #define IVBEP_CB0_MSR_PMON_BOX_FILTER_ISOC	(0x1ULL << 63)
147 
148 /* IVBEP home agent */
149 #define IVBEP_HA_PCI_PMON_CTL_Q_OCC_RST		(1 << 16)
150 #define IVBEP_HA_PCI_PMON_RAW_EVENT_MASK		\
151 				(IVBEP_PMON_RAW_EVENT_MASK | \
152 				 IVBEP_HA_PCI_PMON_CTL_Q_OCC_RST)
153 /* IVBEP PCU */
154 #define IVBEP_PCU_MSR_PMON_RAW_EVENT_MASK	\
155 				(SNBEP_PMON_CTL_EV_SEL_MASK | \
156 				 SNBEP_PCU_MSR_PMON_CTL_OCC_SEL_MASK | \
157 				 SNBEP_PMON_CTL_EDGE_DET | \
158 				 SNBEP_PCU_MSR_PMON_CTL_TRESH_MASK | \
159 				 SNBEP_PCU_MSR_PMON_CTL_OCC_INVERT | \
160 				 SNBEP_PCU_MSR_PMON_CTL_OCC_EDGE_DET)
161 /* IVBEP QPI */
162 #define IVBEP_QPI_PCI_PMON_RAW_EVENT_MASK	\
163 				(IVBEP_PMON_RAW_EVENT_MASK | \
164 				 SNBEP_PMON_CTL_EV_SEL_EXT)
165 
166 #define __BITS_VALUE(x, i, n)  ((typeof(x))(((x) >> ((i) * (n))) & \
167 				((1ULL << (n)) - 1)))
168 
169 /* Haswell-EP Ubox */
170 #define HSWEP_U_MSR_PMON_CTR0			0x709
171 #define HSWEP_U_MSR_PMON_CTL0			0x705
172 #define HSWEP_U_MSR_PMON_FILTER			0x707
173 
174 #define HSWEP_U_MSR_PMON_UCLK_FIXED_CTL		0x703
175 #define HSWEP_U_MSR_PMON_UCLK_FIXED_CTR		0x704
176 
177 #define HSWEP_U_MSR_PMON_BOX_FILTER_TID		(0x1 << 0)
178 #define HSWEP_U_MSR_PMON_BOX_FILTER_CID		(0x1fULL << 1)
179 #define HSWEP_U_MSR_PMON_BOX_FILTER_MASK \
180 					(HSWEP_U_MSR_PMON_BOX_FILTER_TID | \
181 					 HSWEP_U_MSR_PMON_BOX_FILTER_CID)
182 
183 /* Haswell-EP CBo */
184 #define HSWEP_C0_MSR_PMON_CTR0			0xe08
185 #define HSWEP_C0_MSR_PMON_CTL0			0xe01
186 #define HSWEP_C0_MSR_PMON_BOX_CTL			0xe00
187 #define HSWEP_C0_MSR_PMON_BOX_FILTER0		0xe05
188 #define HSWEP_CBO_MSR_OFFSET			0x10
189 
190 
191 #define HSWEP_CB0_MSR_PMON_BOX_FILTER_TID		(0x3fULL << 0)
192 #define HSWEP_CB0_MSR_PMON_BOX_FILTER_LINK	(0xfULL << 6)
193 #define HSWEP_CB0_MSR_PMON_BOX_FILTER_STATE	(0x7fULL << 17)
194 #define HSWEP_CB0_MSR_PMON_BOX_FILTER_NID		(0xffffULL << 32)
195 #define HSWEP_CB0_MSR_PMON_BOX_FILTER_OPC		(0x1ffULL << 52)
196 #define HSWEP_CB0_MSR_PMON_BOX_FILTER_C6		(0x1ULL << 61)
197 #define HSWEP_CB0_MSR_PMON_BOX_FILTER_NC		(0x1ULL << 62)
198 #define HSWEP_CB0_MSR_PMON_BOX_FILTER_ISOC	(0x1ULL << 63)
199 
200 
201 /* Haswell-EP Sbox */
202 #define HSWEP_S0_MSR_PMON_CTR0			0x726
203 #define HSWEP_S0_MSR_PMON_CTL0			0x721
204 #define HSWEP_S0_MSR_PMON_BOX_CTL			0x720
205 #define HSWEP_SBOX_MSR_OFFSET			0xa
206 #define HSWEP_S_MSR_PMON_RAW_EVENT_MASK		(SNBEP_PMON_RAW_EVENT_MASK | \
207 						 SNBEP_CBO_PMON_CTL_TID_EN)
208 
209 /* Haswell-EP PCU */
210 #define HSWEP_PCU_MSR_PMON_CTR0			0x717
211 #define HSWEP_PCU_MSR_PMON_CTL0			0x711
212 #define HSWEP_PCU_MSR_PMON_BOX_CTL		0x710
213 #define HSWEP_PCU_MSR_PMON_BOX_FILTER		0x715
214 
215 /* KNL Ubox */
216 #define KNL_U_MSR_PMON_RAW_EVENT_MASK \
217 					(SNBEP_U_MSR_PMON_RAW_EVENT_MASK | \
218 						SNBEP_CBO_PMON_CTL_TID_EN)
219 /* KNL CHA */
220 #define KNL_CHA_MSR_OFFSET			0xc
221 #define KNL_CHA_MSR_PMON_CTL_QOR		(1 << 16)
222 #define KNL_CHA_MSR_PMON_RAW_EVENT_MASK \
223 					(SNBEP_CBO_MSR_PMON_RAW_EVENT_MASK | \
224 					 KNL_CHA_MSR_PMON_CTL_QOR)
225 #define KNL_CHA_MSR_PMON_BOX_FILTER_TID		0x1ff
226 #define KNL_CHA_MSR_PMON_BOX_FILTER_STATE	(7 << 18)
227 #define KNL_CHA_MSR_PMON_BOX_FILTER_OP		(0xfffffe2aULL << 32)
228 #define KNL_CHA_MSR_PMON_BOX_FILTER_REMOTE_NODE	(0x1ULL << 32)
229 #define KNL_CHA_MSR_PMON_BOX_FILTER_LOCAL_NODE	(0x1ULL << 33)
230 #define KNL_CHA_MSR_PMON_BOX_FILTER_NNC		(0x1ULL << 37)
231 
232 /* KNL EDC/MC UCLK */
233 #define KNL_UCLK_MSR_PMON_CTR0_LOW		0x400
234 #define KNL_UCLK_MSR_PMON_CTL0			0x420
235 #define KNL_UCLK_MSR_PMON_BOX_CTL		0x430
236 #define KNL_UCLK_MSR_PMON_UCLK_FIXED_LOW	0x44c
237 #define KNL_UCLK_MSR_PMON_UCLK_FIXED_CTL	0x454
238 #define KNL_PMON_FIXED_CTL_EN			0x1
239 
240 /* KNL EDC */
241 #define KNL_EDC0_ECLK_MSR_PMON_CTR0_LOW		0xa00
242 #define KNL_EDC0_ECLK_MSR_PMON_CTL0		0xa20
243 #define KNL_EDC0_ECLK_MSR_PMON_BOX_CTL		0xa30
244 #define KNL_EDC0_ECLK_MSR_PMON_ECLK_FIXED_LOW	0xa3c
245 #define KNL_EDC0_ECLK_MSR_PMON_ECLK_FIXED_CTL	0xa44
246 
247 /* KNL MC */
248 #define KNL_MC0_CH0_MSR_PMON_CTR0_LOW		0xb00
249 #define KNL_MC0_CH0_MSR_PMON_CTL0		0xb20
250 #define KNL_MC0_CH0_MSR_PMON_BOX_CTL		0xb30
251 #define KNL_MC0_CH0_MSR_PMON_FIXED_LOW		0xb3c
252 #define KNL_MC0_CH0_MSR_PMON_FIXED_CTL		0xb44
253 
254 /* KNL IRP */
255 #define KNL_IRP_PCI_PMON_BOX_CTL		0xf0
256 #define KNL_IRP_PCI_PMON_RAW_EVENT_MASK		(SNBEP_PMON_RAW_EVENT_MASK | \
257 						 KNL_CHA_MSR_PMON_CTL_QOR)
258 /* KNL PCU */
259 #define KNL_PCU_PMON_CTL_EV_SEL_MASK		0x0000007f
260 #define KNL_PCU_PMON_CTL_USE_OCC_CTR		(1 << 7)
261 #define KNL_PCU_MSR_PMON_CTL_TRESH_MASK		0x3f000000
262 #define KNL_PCU_MSR_PMON_RAW_EVENT_MASK	\
263 				(KNL_PCU_PMON_CTL_EV_SEL_MASK | \
264 				 KNL_PCU_PMON_CTL_USE_OCC_CTR | \
265 				 SNBEP_PCU_MSR_PMON_CTL_OCC_SEL_MASK | \
266 				 SNBEP_PMON_CTL_EDGE_DET | \
267 				 SNBEP_CBO_PMON_CTL_TID_EN | \
268 				 SNBEP_PMON_CTL_INVERT | \
269 				 KNL_PCU_MSR_PMON_CTL_TRESH_MASK | \
270 				 SNBEP_PCU_MSR_PMON_CTL_OCC_INVERT | \
271 				 SNBEP_PCU_MSR_PMON_CTL_OCC_EDGE_DET)
272 
273 /* SKX pci bus to socket mapping */
274 #define SKX_CPUNODEID			0xc0
275 #define SKX_GIDNIDMAP			0xd4
276 
277 /*
278  * The CPU_BUS_NUMBER MSR returns the values of the respective CPUBUSNO CSR
279  * that BIOS programmed. MSR has package scope.
280  * |  Bit  |  Default  |  Description
281  * | [63]  |    00h    | VALID - When set, indicates the CPU bus
282  *                       numbers have been initialized. (RO)
283  * |[62:48]|    ---    | Reserved
284  * |[47:40]|    00h    | BUS_NUM_5 - Return the bus number BIOS assigned
285  *                       CPUBUSNO(5). (RO)
286  * |[39:32]|    00h    | BUS_NUM_4 - Return the bus number BIOS assigned
287  *                       CPUBUSNO(4). (RO)
288  * |[31:24]|    00h    | BUS_NUM_3 - Return the bus number BIOS assigned
289  *                       CPUBUSNO(3). (RO)
290  * |[23:16]|    00h    | BUS_NUM_2 - Return the bus number BIOS assigned
291  *                       CPUBUSNO(2). (RO)
292  * |[15:8] |    00h    | BUS_NUM_1 - Return the bus number BIOS assigned
293  *                       CPUBUSNO(1). (RO)
294  * | [7:0] |    00h    | BUS_NUM_0 - Return the bus number BIOS assigned
295  *                       CPUBUSNO(0). (RO)
296  */
297 #define SKX_MSR_CPU_BUS_NUMBER		0x300
298 #define SKX_MSR_CPU_BUS_VALID_BIT	(1ULL << 63)
299 #define BUS_NUM_STRIDE			8
300 
301 /* SKX CHA */
302 #define SKX_CHA_MSR_PMON_BOX_FILTER_TID		(0x1ffULL << 0)
303 #define SKX_CHA_MSR_PMON_BOX_FILTER_LINK	(0xfULL << 9)
304 #define SKX_CHA_MSR_PMON_BOX_FILTER_STATE	(0x3ffULL << 17)
305 #define SKX_CHA_MSR_PMON_BOX_FILTER_REM		(0x1ULL << 32)
306 #define SKX_CHA_MSR_PMON_BOX_FILTER_LOC		(0x1ULL << 33)
307 #define SKX_CHA_MSR_PMON_BOX_FILTER_ALL_OPC	(0x1ULL << 35)
308 #define SKX_CHA_MSR_PMON_BOX_FILTER_NM		(0x1ULL << 36)
309 #define SKX_CHA_MSR_PMON_BOX_FILTER_NOT_NM	(0x1ULL << 37)
310 #define SKX_CHA_MSR_PMON_BOX_FILTER_OPC0	(0x3ffULL << 41)
311 #define SKX_CHA_MSR_PMON_BOX_FILTER_OPC1	(0x3ffULL << 51)
312 #define SKX_CHA_MSR_PMON_BOX_FILTER_C6		(0x1ULL << 61)
313 #define SKX_CHA_MSR_PMON_BOX_FILTER_NC		(0x1ULL << 62)
314 #define SKX_CHA_MSR_PMON_BOX_FILTER_ISOC	(0x1ULL << 63)
315 
316 /* SKX IIO */
317 #define SKX_IIO0_MSR_PMON_CTL0		0xa48
318 #define SKX_IIO0_MSR_PMON_CTR0		0xa41
319 #define SKX_IIO0_MSR_PMON_BOX_CTL	0xa40
320 #define SKX_IIO_MSR_OFFSET		0x20
321 
322 #define SKX_PMON_CTL_TRESH_MASK		(0xff << 24)
323 #define SKX_PMON_CTL_TRESH_MASK_EXT	(0xf)
324 #define SKX_PMON_CTL_CH_MASK		(0xff << 4)
325 #define SKX_PMON_CTL_FC_MASK		(0x7 << 12)
326 #define SKX_IIO_PMON_RAW_EVENT_MASK	(SNBEP_PMON_CTL_EV_SEL_MASK | \
327 					 SNBEP_PMON_CTL_UMASK_MASK | \
328 					 SNBEP_PMON_CTL_EDGE_DET | \
329 					 SNBEP_PMON_CTL_INVERT | \
330 					 SKX_PMON_CTL_TRESH_MASK)
331 #define SKX_IIO_PMON_RAW_EVENT_MASK_EXT	(SKX_PMON_CTL_TRESH_MASK_EXT | \
332 					 SKX_PMON_CTL_CH_MASK | \
333 					 SKX_PMON_CTL_FC_MASK)
334 
335 /* SKX IRP */
336 #define SKX_IRP0_MSR_PMON_CTL0		0xa5b
337 #define SKX_IRP0_MSR_PMON_CTR0		0xa59
338 #define SKX_IRP0_MSR_PMON_BOX_CTL	0xa58
339 #define SKX_IRP_MSR_OFFSET		0x20
340 
341 /* SKX UPI */
342 #define SKX_UPI_PCI_PMON_CTL0		0x350
343 #define SKX_UPI_PCI_PMON_CTR0		0x318
344 #define SKX_UPI_PCI_PMON_BOX_CTL	0x378
345 #define SKX_UPI_CTL_UMASK_EXT		0xffefff
346 
347 /* SKX M2M */
348 #define SKX_M2M_PCI_PMON_CTL0		0x228
349 #define SKX_M2M_PCI_PMON_CTR0		0x200
350 #define SKX_M2M_PCI_PMON_BOX_CTL	0x258
351 
352 /* Memory Map registers device ID */
353 #define SNR_ICX_MESH2IIO_MMAP_DID		0x9a2
354 #define SNR_ICX_SAD_CONTROL_CFG		0x3f4
355 
356 /* Getting I/O stack id in SAD_COTROL_CFG notation */
357 #define SAD_CONTROL_STACK_ID(data)		(((data) >> 4) & 0x7)
358 
359 /* SNR Ubox */
360 #define SNR_U_MSR_PMON_CTR0			0x1f98
361 #define SNR_U_MSR_PMON_CTL0			0x1f91
362 #define SNR_U_MSR_PMON_UCLK_FIXED_CTL		0x1f93
363 #define SNR_U_MSR_PMON_UCLK_FIXED_CTR		0x1f94
364 
365 /* SNR CHA */
366 #define SNR_CHA_RAW_EVENT_MASK_EXT		0x3ffffff
367 #define SNR_CHA_MSR_PMON_CTL0			0x1c01
368 #define SNR_CHA_MSR_PMON_CTR0			0x1c08
369 #define SNR_CHA_MSR_PMON_BOX_CTL		0x1c00
370 #define SNR_C0_MSR_PMON_BOX_FILTER0		0x1c05
371 
372 
373 /* SNR IIO */
374 #define SNR_IIO_MSR_PMON_CTL0			0x1e08
375 #define SNR_IIO_MSR_PMON_CTR0			0x1e01
376 #define SNR_IIO_MSR_PMON_BOX_CTL		0x1e00
377 #define SNR_IIO_MSR_OFFSET			0x10
378 #define SNR_IIO_PMON_RAW_EVENT_MASK_EXT		0x7ffff
379 
380 /* SNR IRP */
381 #define SNR_IRP0_MSR_PMON_CTL0			0x1ea8
382 #define SNR_IRP0_MSR_PMON_CTR0			0x1ea1
383 #define SNR_IRP0_MSR_PMON_BOX_CTL		0x1ea0
384 #define SNR_IRP_MSR_OFFSET			0x10
385 
386 /* SNR M2PCIE */
387 #define SNR_M2PCIE_MSR_PMON_CTL0		0x1e58
388 #define SNR_M2PCIE_MSR_PMON_CTR0		0x1e51
389 #define SNR_M2PCIE_MSR_PMON_BOX_CTL		0x1e50
390 #define SNR_M2PCIE_MSR_OFFSET			0x10
391 
392 /* SNR PCU */
393 #define SNR_PCU_MSR_PMON_CTL0			0x1ef1
394 #define SNR_PCU_MSR_PMON_CTR0			0x1ef8
395 #define SNR_PCU_MSR_PMON_BOX_CTL		0x1ef0
396 #define SNR_PCU_MSR_PMON_BOX_FILTER		0x1efc
397 
398 /* SNR M2M */
399 #define SNR_M2M_PCI_PMON_CTL0			0x468
400 #define SNR_M2M_PCI_PMON_CTR0			0x440
401 #define SNR_M2M_PCI_PMON_BOX_CTL		0x438
402 #define SNR_M2M_PCI_PMON_UMASK_EXT		0xff
403 
404 /* SNR PCIE3 */
405 #define SNR_PCIE3_PCI_PMON_CTL0			0x508
406 #define SNR_PCIE3_PCI_PMON_CTR0			0x4e8
407 #define SNR_PCIE3_PCI_PMON_BOX_CTL		0x4e0
408 
409 /* SNR IMC */
410 #define SNR_IMC_MMIO_PMON_FIXED_CTL		0x54
411 #define SNR_IMC_MMIO_PMON_FIXED_CTR		0x38
412 #define SNR_IMC_MMIO_PMON_CTL0			0x40
413 #define SNR_IMC_MMIO_PMON_CTR0			0x8
414 #define SNR_IMC_MMIO_PMON_BOX_CTL		0x22800
415 #define SNR_IMC_MMIO_OFFSET			0x4000
416 #define SNR_IMC_MMIO_SIZE			0x4000
417 #define SNR_IMC_MMIO_BASE_OFFSET		0xd0
418 #define SNR_IMC_MMIO_BASE_MASK			0x1FFFFFFF
419 #define SNR_IMC_MMIO_MEM0_OFFSET		0xd8
420 #define SNR_IMC_MMIO_MEM0_MASK			0x7FF
421 
422 /* ICX CHA */
423 #define ICX_C34_MSR_PMON_CTR0			0xb68
424 #define ICX_C34_MSR_PMON_CTL0			0xb61
425 #define ICX_C34_MSR_PMON_BOX_CTL		0xb60
426 #define ICX_C34_MSR_PMON_BOX_FILTER0		0xb65
427 
428 /* ICX IIO */
429 #define ICX_IIO_MSR_PMON_CTL0			0xa58
430 #define ICX_IIO_MSR_PMON_CTR0			0xa51
431 #define ICX_IIO_MSR_PMON_BOX_CTL		0xa50
432 
433 /* ICX IRP */
434 #define ICX_IRP0_MSR_PMON_CTL0			0xa4d
435 #define ICX_IRP0_MSR_PMON_CTR0			0xa4b
436 #define ICX_IRP0_MSR_PMON_BOX_CTL		0xa4a
437 
438 /* ICX M2PCIE */
439 #define ICX_M2PCIE_MSR_PMON_CTL0		0xa46
440 #define ICX_M2PCIE_MSR_PMON_CTR0		0xa41
441 #define ICX_M2PCIE_MSR_PMON_BOX_CTL		0xa40
442 
443 /* ICX UPI */
444 #define ICX_UPI_PCI_PMON_CTL0			0x350
445 #define ICX_UPI_PCI_PMON_CTR0			0x320
446 #define ICX_UPI_PCI_PMON_BOX_CTL		0x318
447 #define ICX_UPI_CTL_UMASK_EXT			0xffffff
448 #define ICX_UBOX_DID				0x3450
449 
450 /* ICX M3UPI*/
451 #define ICX_M3UPI_PCI_PMON_CTL0			0xd8
452 #define ICX_M3UPI_PCI_PMON_CTR0			0xa8
453 #define ICX_M3UPI_PCI_PMON_BOX_CTL		0xa0
454 
455 /* ICX IMC */
456 #define ICX_NUMBER_IMC_CHN			3
457 #define ICX_IMC_MEM_STRIDE			0x4
458 
459 /* SPR */
460 #define SPR_RAW_EVENT_MASK_EXT			0xffffff
461 #define SPR_UBOX_DID				0x3250
462 
463 /* SPR CHA */
464 #define SPR_CHA_EVENT_MASK_EXT			0xffffffff
465 #define SPR_CHA_PMON_CTL_TID_EN			(1 << 16)
466 #define SPR_CHA_PMON_EVENT_MASK			(SNBEP_PMON_RAW_EVENT_MASK | \
467 						 SPR_CHA_PMON_CTL_TID_EN)
468 #define SPR_CHA_PMON_BOX_FILTER_TID		0x3ff
469 
470 #define SPR_C0_MSR_PMON_BOX_FILTER0		0x200e
471 
472 DEFINE_UNCORE_FORMAT_ATTR(event, event, "config:0-7");
473 DEFINE_UNCORE_FORMAT_ATTR(event2, event, "config:0-6");
474 DEFINE_UNCORE_FORMAT_ATTR(event_ext, event, "config:0-7,21");
475 DEFINE_UNCORE_FORMAT_ATTR(use_occ_ctr, use_occ_ctr, "config:7");
476 DEFINE_UNCORE_FORMAT_ATTR(umask, umask, "config:8-15");
477 DEFINE_UNCORE_FORMAT_ATTR(umask_ext, umask, "config:8-15,32-43,45-55");
478 DEFINE_UNCORE_FORMAT_ATTR(umask_ext2, umask, "config:8-15,32-57");
479 DEFINE_UNCORE_FORMAT_ATTR(umask_ext3, umask, "config:8-15,32-39");
480 DEFINE_UNCORE_FORMAT_ATTR(umask_ext4, umask, "config:8-15,32-55");
481 DEFINE_UNCORE_FORMAT_ATTR(umask_ext5, umask, "config:8-15,32-63");
482 DEFINE_UNCORE_FORMAT_ATTR(qor, qor, "config:16");
483 DEFINE_UNCORE_FORMAT_ATTR(edge, edge, "config:18");
484 DEFINE_UNCORE_FORMAT_ATTR(tid_en, tid_en, "config:19");
485 DEFINE_UNCORE_FORMAT_ATTR(tid_en2, tid_en, "config:16");
486 DEFINE_UNCORE_FORMAT_ATTR(inv, inv, "config:23");
487 DEFINE_UNCORE_FORMAT_ATTR(thresh9, thresh, "config:24-35");
488 DEFINE_UNCORE_FORMAT_ATTR(thresh8, thresh, "config:24-31");
489 DEFINE_UNCORE_FORMAT_ATTR(thresh6, thresh, "config:24-29");
490 DEFINE_UNCORE_FORMAT_ATTR(thresh5, thresh, "config:24-28");
491 DEFINE_UNCORE_FORMAT_ATTR(occ_sel, occ_sel, "config:14-15");
492 DEFINE_UNCORE_FORMAT_ATTR(occ_invert, occ_invert, "config:30");
493 DEFINE_UNCORE_FORMAT_ATTR(occ_edge, occ_edge, "config:14-51");
494 DEFINE_UNCORE_FORMAT_ATTR(occ_edge_det, occ_edge_det, "config:31");
495 DEFINE_UNCORE_FORMAT_ATTR(ch_mask, ch_mask, "config:36-43");
496 DEFINE_UNCORE_FORMAT_ATTR(ch_mask2, ch_mask, "config:36-47");
497 DEFINE_UNCORE_FORMAT_ATTR(fc_mask, fc_mask, "config:44-46");
498 DEFINE_UNCORE_FORMAT_ATTR(fc_mask2, fc_mask, "config:48-50");
499 DEFINE_UNCORE_FORMAT_ATTR(filter_tid, filter_tid, "config1:0-4");
500 DEFINE_UNCORE_FORMAT_ATTR(filter_tid2, filter_tid, "config1:0");
501 DEFINE_UNCORE_FORMAT_ATTR(filter_tid3, filter_tid, "config1:0-5");
502 DEFINE_UNCORE_FORMAT_ATTR(filter_tid4, filter_tid, "config1:0-8");
503 DEFINE_UNCORE_FORMAT_ATTR(filter_tid5, filter_tid, "config1:0-9");
504 DEFINE_UNCORE_FORMAT_ATTR(filter_cid, filter_cid, "config1:5");
505 DEFINE_UNCORE_FORMAT_ATTR(filter_link, filter_link, "config1:5-8");
506 DEFINE_UNCORE_FORMAT_ATTR(filter_link2, filter_link, "config1:6-8");
507 DEFINE_UNCORE_FORMAT_ATTR(filter_link3, filter_link, "config1:12");
508 DEFINE_UNCORE_FORMAT_ATTR(filter_nid, filter_nid, "config1:10-17");
509 DEFINE_UNCORE_FORMAT_ATTR(filter_nid2, filter_nid, "config1:32-47");
510 DEFINE_UNCORE_FORMAT_ATTR(filter_state, filter_state, "config1:18-22");
511 DEFINE_UNCORE_FORMAT_ATTR(filter_state2, filter_state, "config1:17-22");
512 DEFINE_UNCORE_FORMAT_ATTR(filter_state3, filter_state, "config1:17-23");
513 DEFINE_UNCORE_FORMAT_ATTR(filter_state4, filter_state, "config1:18-20");
514 DEFINE_UNCORE_FORMAT_ATTR(filter_state5, filter_state, "config1:17-26");
515 DEFINE_UNCORE_FORMAT_ATTR(filter_rem, filter_rem, "config1:32");
516 DEFINE_UNCORE_FORMAT_ATTR(filter_loc, filter_loc, "config1:33");
517 DEFINE_UNCORE_FORMAT_ATTR(filter_nm, filter_nm, "config1:36");
518 DEFINE_UNCORE_FORMAT_ATTR(filter_not_nm, filter_not_nm, "config1:37");
519 DEFINE_UNCORE_FORMAT_ATTR(filter_local, filter_local, "config1:33");
520 DEFINE_UNCORE_FORMAT_ATTR(filter_all_op, filter_all_op, "config1:35");
521 DEFINE_UNCORE_FORMAT_ATTR(filter_nnm, filter_nnm, "config1:37");
522 DEFINE_UNCORE_FORMAT_ATTR(filter_opc, filter_opc, "config1:23-31");
523 DEFINE_UNCORE_FORMAT_ATTR(filter_opc2, filter_opc, "config1:52-60");
524 DEFINE_UNCORE_FORMAT_ATTR(filter_opc3, filter_opc, "config1:41-60");
525 DEFINE_UNCORE_FORMAT_ATTR(filter_opc_0, filter_opc0, "config1:41-50");
526 DEFINE_UNCORE_FORMAT_ATTR(filter_opc_1, filter_opc1, "config1:51-60");
527 DEFINE_UNCORE_FORMAT_ATTR(filter_nc, filter_nc, "config1:62");
528 DEFINE_UNCORE_FORMAT_ATTR(filter_c6, filter_c6, "config1:61");
529 DEFINE_UNCORE_FORMAT_ATTR(filter_isoc, filter_isoc, "config1:63");
530 DEFINE_UNCORE_FORMAT_ATTR(filter_band0, filter_band0, "config1:0-7");
531 DEFINE_UNCORE_FORMAT_ATTR(filter_band1, filter_band1, "config1:8-15");
532 DEFINE_UNCORE_FORMAT_ATTR(filter_band2, filter_band2, "config1:16-23");
533 DEFINE_UNCORE_FORMAT_ATTR(filter_band3, filter_band3, "config1:24-31");
534 DEFINE_UNCORE_FORMAT_ATTR(match_rds, match_rds, "config1:48-51");
535 DEFINE_UNCORE_FORMAT_ATTR(match_rnid30, match_rnid30, "config1:32-35");
536 DEFINE_UNCORE_FORMAT_ATTR(match_rnid4, match_rnid4, "config1:31");
537 DEFINE_UNCORE_FORMAT_ATTR(match_dnid, match_dnid, "config1:13-17");
538 DEFINE_UNCORE_FORMAT_ATTR(match_mc, match_mc, "config1:9-12");
539 DEFINE_UNCORE_FORMAT_ATTR(match_opc, match_opc, "config1:5-8");
540 DEFINE_UNCORE_FORMAT_ATTR(match_vnw, match_vnw, "config1:3-4");
541 DEFINE_UNCORE_FORMAT_ATTR(match0, match0, "config1:0-31");
542 DEFINE_UNCORE_FORMAT_ATTR(match1, match1, "config1:32-63");
543 DEFINE_UNCORE_FORMAT_ATTR(mask_rds, mask_rds, "config2:48-51");
544 DEFINE_UNCORE_FORMAT_ATTR(mask_rnid30, mask_rnid30, "config2:32-35");
545 DEFINE_UNCORE_FORMAT_ATTR(mask_rnid4, mask_rnid4, "config2:31");
546 DEFINE_UNCORE_FORMAT_ATTR(mask_dnid, mask_dnid, "config2:13-17");
547 DEFINE_UNCORE_FORMAT_ATTR(mask_mc, mask_mc, "config2:9-12");
548 DEFINE_UNCORE_FORMAT_ATTR(mask_opc, mask_opc, "config2:5-8");
549 DEFINE_UNCORE_FORMAT_ATTR(mask_vnw, mask_vnw, "config2:3-4");
550 DEFINE_UNCORE_FORMAT_ATTR(mask0, mask0, "config2:0-31");
551 DEFINE_UNCORE_FORMAT_ATTR(mask1, mask1, "config2:32-63");
552 
snbep_uncore_pci_disable_box(struct intel_uncore_box * box)553 static void snbep_uncore_pci_disable_box(struct intel_uncore_box *box)
554 {
555 	struct pci_dev *pdev = box->pci_dev;
556 	int box_ctl = uncore_pci_box_ctl(box);
557 	u32 config = 0;
558 
559 	if (!pci_read_config_dword(pdev, box_ctl, &config)) {
560 		config |= SNBEP_PMON_BOX_CTL_FRZ;
561 		pci_write_config_dword(pdev, box_ctl, config);
562 	}
563 }
564 
snbep_uncore_pci_enable_box(struct intel_uncore_box * box)565 static void snbep_uncore_pci_enable_box(struct intel_uncore_box *box)
566 {
567 	struct pci_dev *pdev = box->pci_dev;
568 	int box_ctl = uncore_pci_box_ctl(box);
569 	u32 config = 0;
570 
571 	if (!pci_read_config_dword(pdev, box_ctl, &config)) {
572 		config &= ~SNBEP_PMON_BOX_CTL_FRZ;
573 		pci_write_config_dword(pdev, box_ctl, config);
574 	}
575 }
576 
snbep_uncore_pci_enable_event(struct intel_uncore_box * box,struct perf_event * event)577 static void snbep_uncore_pci_enable_event(struct intel_uncore_box *box, struct perf_event *event)
578 {
579 	struct pci_dev *pdev = box->pci_dev;
580 	struct hw_perf_event *hwc = &event->hw;
581 
582 	pci_write_config_dword(pdev, hwc->config_base, hwc->config | SNBEP_PMON_CTL_EN);
583 }
584 
snbep_uncore_pci_disable_event(struct intel_uncore_box * box,struct perf_event * event)585 static void snbep_uncore_pci_disable_event(struct intel_uncore_box *box, struct perf_event *event)
586 {
587 	struct pci_dev *pdev = box->pci_dev;
588 	struct hw_perf_event *hwc = &event->hw;
589 
590 	pci_write_config_dword(pdev, hwc->config_base, hwc->config);
591 }
592 
snbep_uncore_pci_read_counter(struct intel_uncore_box * box,struct perf_event * event)593 static u64 snbep_uncore_pci_read_counter(struct intel_uncore_box *box, struct perf_event *event)
594 {
595 	struct pci_dev *pdev = box->pci_dev;
596 	struct hw_perf_event *hwc = &event->hw;
597 	u64 count = 0;
598 
599 	pci_read_config_dword(pdev, hwc->event_base, (u32 *)&count);
600 	pci_read_config_dword(pdev, hwc->event_base + 4, (u32 *)&count + 1);
601 
602 	return count;
603 }
604 
snbep_uncore_pci_init_box(struct intel_uncore_box * box)605 static void snbep_uncore_pci_init_box(struct intel_uncore_box *box)
606 {
607 	struct pci_dev *pdev = box->pci_dev;
608 	int box_ctl = uncore_pci_box_ctl(box);
609 
610 	pci_write_config_dword(pdev, box_ctl, SNBEP_PMON_BOX_CTL_INT);
611 }
612 
snbep_uncore_msr_disable_box(struct intel_uncore_box * box)613 static void snbep_uncore_msr_disable_box(struct intel_uncore_box *box)
614 {
615 	u64 config;
616 	unsigned msr;
617 
618 	msr = uncore_msr_box_ctl(box);
619 	if (msr) {
620 		rdmsrl(msr, config);
621 		config |= SNBEP_PMON_BOX_CTL_FRZ;
622 		wrmsrl(msr, config);
623 	}
624 }
625 
snbep_uncore_msr_enable_box(struct intel_uncore_box * box)626 static void snbep_uncore_msr_enable_box(struct intel_uncore_box *box)
627 {
628 	u64 config;
629 	unsigned msr;
630 
631 	msr = uncore_msr_box_ctl(box);
632 	if (msr) {
633 		rdmsrl(msr, config);
634 		config &= ~SNBEP_PMON_BOX_CTL_FRZ;
635 		wrmsrl(msr, config);
636 	}
637 }
638 
snbep_uncore_msr_enable_event(struct intel_uncore_box * box,struct perf_event * event)639 static void snbep_uncore_msr_enable_event(struct intel_uncore_box *box, struct perf_event *event)
640 {
641 	struct hw_perf_event *hwc = &event->hw;
642 	struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
643 
644 	if (reg1->idx != EXTRA_REG_NONE)
645 		wrmsrl(reg1->reg, uncore_shared_reg_config(box, 0));
646 
647 	wrmsrl(hwc->config_base, hwc->config | SNBEP_PMON_CTL_EN);
648 }
649 
snbep_uncore_msr_disable_event(struct intel_uncore_box * box,struct perf_event * event)650 static void snbep_uncore_msr_disable_event(struct intel_uncore_box *box,
651 					struct perf_event *event)
652 {
653 	struct hw_perf_event *hwc = &event->hw;
654 
655 	wrmsrl(hwc->config_base, hwc->config);
656 }
657 
snbep_uncore_msr_init_box(struct intel_uncore_box * box)658 static void snbep_uncore_msr_init_box(struct intel_uncore_box *box)
659 {
660 	unsigned msr = uncore_msr_box_ctl(box);
661 
662 	if (msr)
663 		wrmsrl(msr, SNBEP_PMON_BOX_CTL_INT);
664 }
665 
666 static struct attribute *snbep_uncore_formats_attr[] = {
667 	&format_attr_event.attr,
668 	&format_attr_umask.attr,
669 	&format_attr_edge.attr,
670 	&format_attr_inv.attr,
671 	&format_attr_thresh8.attr,
672 	NULL,
673 };
674 
675 static struct attribute *snbep_uncore_ubox_formats_attr[] = {
676 	&format_attr_event.attr,
677 	&format_attr_umask.attr,
678 	&format_attr_edge.attr,
679 	&format_attr_inv.attr,
680 	&format_attr_thresh5.attr,
681 	NULL,
682 };
683 
684 static struct attribute *snbep_uncore_cbox_formats_attr[] = {
685 	&format_attr_event.attr,
686 	&format_attr_umask.attr,
687 	&format_attr_edge.attr,
688 	&format_attr_tid_en.attr,
689 	&format_attr_inv.attr,
690 	&format_attr_thresh8.attr,
691 	&format_attr_filter_tid.attr,
692 	&format_attr_filter_nid.attr,
693 	&format_attr_filter_state.attr,
694 	&format_attr_filter_opc.attr,
695 	NULL,
696 };
697 
698 static struct attribute *snbep_uncore_pcu_formats_attr[] = {
699 	&format_attr_event.attr,
700 	&format_attr_occ_sel.attr,
701 	&format_attr_edge.attr,
702 	&format_attr_inv.attr,
703 	&format_attr_thresh5.attr,
704 	&format_attr_occ_invert.attr,
705 	&format_attr_occ_edge.attr,
706 	&format_attr_filter_band0.attr,
707 	&format_attr_filter_band1.attr,
708 	&format_attr_filter_band2.attr,
709 	&format_attr_filter_band3.attr,
710 	NULL,
711 };
712 
713 static struct attribute *snbep_uncore_qpi_formats_attr[] = {
714 	&format_attr_event_ext.attr,
715 	&format_attr_umask.attr,
716 	&format_attr_edge.attr,
717 	&format_attr_inv.attr,
718 	&format_attr_thresh8.attr,
719 	&format_attr_match_rds.attr,
720 	&format_attr_match_rnid30.attr,
721 	&format_attr_match_rnid4.attr,
722 	&format_attr_match_dnid.attr,
723 	&format_attr_match_mc.attr,
724 	&format_attr_match_opc.attr,
725 	&format_attr_match_vnw.attr,
726 	&format_attr_match0.attr,
727 	&format_attr_match1.attr,
728 	&format_attr_mask_rds.attr,
729 	&format_attr_mask_rnid30.attr,
730 	&format_attr_mask_rnid4.attr,
731 	&format_attr_mask_dnid.attr,
732 	&format_attr_mask_mc.attr,
733 	&format_attr_mask_opc.attr,
734 	&format_attr_mask_vnw.attr,
735 	&format_attr_mask0.attr,
736 	&format_attr_mask1.attr,
737 	NULL,
738 };
739 
740 static struct uncore_event_desc snbep_uncore_imc_events[] = {
741 	INTEL_UNCORE_EVENT_DESC(clockticks,      "event=0xff,umask=0x00"),
742 	INTEL_UNCORE_EVENT_DESC(cas_count_read,  "event=0x04,umask=0x03"),
743 	INTEL_UNCORE_EVENT_DESC(cas_count_read.scale, "6.103515625e-5"),
744 	INTEL_UNCORE_EVENT_DESC(cas_count_read.unit, "MiB"),
745 	INTEL_UNCORE_EVENT_DESC(cas_count_write, "event=0x04,umask=0x0c"),
746 	INTEL_UNCORE_EVENT_DESC(cas_count_write.scale, "6.103515625e-5"),
747 	INTEL_UNCORE_EVENT_DESC(cas_count_write.unit, "MiB"),
748 	{ /* end: all zeroes */ },
749 };
750 
751 static struct uncore_event_desc snbep_uncore_qpi_events[] = {
752 	INTEL_UNCORE_EVENT_DESC(clockticks,       "event=0x14"),
753 	INTEL_UNCORE_EVENT_DESC(txl_flits_active, "event=0x00,umask=0x06"),
754 	INTEL_UNCORE_EVENT_DESC(drs_data,         "event=0x102,umask=0x08"),
755 	INTEL_UNCORE_EVENT_DESC(ncb_data,         "event=0x103,umask=0x04"),
756 	{ /* end: all zeroes */ },
757 };
758 
759 static const struct attribute_group snbep_uncore_format_group = {
760 	.name = "format",
761 	.attrs = snbep_uncore_formats_attr,
762 };
763 
764 static const struct attribute_group snbep_uncore_ubox_format_group = {
765 	.name = "format",
766 	.attrs = snbep_uncore_ubox_formats_attr,
767 };
768 
769 static const struct attribute_group snbep_uncore_cbox_format_group = {
770 	.name = "format",
771 	.attrs = snbep_uncore_cbox_formats_attr,
772 };
773 
774 static const struct attribute_group snbep_uncore_pcu_format_group = {
775 	.name = "format",
776 	.attrs = snbep_uncore_pcu_formats_attr,
777 };
778 
779 static const struct attribute_group snbep_uncore_qpi_format_group = {
780 	.name = "format",
781 	.attrs = snbep_uncore_qpi_formats_attr,
782 };
783 
784 #define __SNBEP_UNCORE_MSR_OPS_COMMON_INIT()			\
785 	.disable_box	= snbep_uncore_msr_disable_box,		\
786 	.enable_box	= snbep_uncore_msr_enable_box,		\
787 	.disable_event	= snbep_uncore_msr_disable_event,	\
788 	.enable_event	= snbep_uncore_msr_enable_event,	\
789 	.read_counter	= uncore_msr_read_counter
790 
791 #define SNBEP_UNCORE_MSR_OPS_COMMON_INIT()			\
792 	__SNBEP_UNCORE_MSR_OPS_COMMON_INIT(),			\
793 	.init_box	= snbep_uncore_msr_init_box		\
794 
795 static struct intel_uncore_ops snbep_uncore_msr_ops = {
796 	SNBEP_UNCORE_MSR_OPS_COMMON_INIT(),
797 };
798 
799 #define SNBEP_UNCORE_PCI_OPS_COMMON_INIT()			\
800 	.init_box	= snbep_uncore_pci_init_box,		\
801 	.disable_box	= snbep_uncore_pci_disable_box,		\
802 	.enable_box	= snbep_uncore_pci_enable_box,		\
803 	.disable_event	= snbep_uncore_pci_disable_event,	\
804 	.read_counter	= snbep_uncore_pci_read_counter
805 
806 static struct intel_uncore_ops snbep_uncore_pci_ops = {
807 	SNBEP_UNCORE_PCI_OPS_COMMON_INIT(),
808 	.enable_event	= snbep_uncore_pci_enable_event,	\
809 };
810 
811 static struct event_constraint snbep_uncore_cbox_constraints[] = {
812 	UNCORE_EVENT_CONSTRAINT(0x01, 0x1),
813 	UNCORE_EVENT_CONSTRAINT(0x02, 0x3),
814 	UNCORE_EVENT_CONSTRAINT(0x04, 0x3),
815 	UNCORE_EVENT_CONSTRAINT(0x05, 0x3),
816 	UNCORE_EVENT_CONSTRAINT(0x07, 0x3),
817 	UNCORE_EVENT_CONSTRAINT(0x09, 0x3),
818 	UNCORE_EVENT_CONSTRAINT(0x11, 0x1),
819 	UNCORE_EVENT_CONSTRAINT(0x12, 0x3),
820 	UNCORE_EVENT_CONSTRAINT(0x13, 0x3),
821 	UNCORE_EVENT_CONSTRAINT(0x1b, 0xc),
822 	UNCORE_EVENT_CONSTRAINT(0x1c, 0xc),
823 	UNCORE_EVENT_CONSTRAINT(0x1d, 0xc),
824 	UNCORE_EVENT_CONSTRAINT(0x1e, 0xc),
825 	UNCORE_EVENT_CONSTRAINT(0x1f, 0xe),
826 	UNCORE_EVENT_CONSTRAINT(0x21, 0x3),
827 	UNCORE_EVENT_CONSTRAINT(0x23, 0x3),
828 	UNCORE_EVENT_CONSTRAINT(0x31, 0x3),
829 	UNCORE_EVENT_CONSTRAINT(0x32, 0x3),
830 	UNCORE_EVENT_CONSTRAINT(0x33, 0x3),
831 	UNCORE_EVENT_CONSTRAINT(0x34, 0x3),
832 	UNCORE_EVENT_CONSTRAINT(0x35, 0x3),
833 	UNCORE_EVENT_CONSTRAINT(0x36, 0x1),
834 	UNCORE_EVENT_CONSTRAINT(0x37, 0x3),
835 	UNCORE_EVENT_CONSTRAINT(0x38, 0x3),
836 	UNCORE_EVENT_CONSTRAINT(0x39, 0x3),
837 	UNCORE_EVENT_CONSTRAINT(0x3b, 0x1),
838 	EVENT_CONSTRAINT_END
839 };
840 
841 static struct event_constraint snbep_uncore_r2pcie_constraints[] = {
842 	UNCORE_EVENT_CONSTRAINT(0x10, 0x3),
843 	UNCORE_EVENT_CONSTRAINT(0x11, 0x3),
844 	UNCORE_EVENT_CONSTRAINT(0x12, 0x1),
845 	UNCORE_EVENT_CONSTRAINT(0x23, 0x3),
846 	UNCORE_EVENT_CONSTRAINT(0x24, 0x3),
847 	UNCORE_EVENT_CONSTRAINT(0x25, 0x3),
848 	UNCORE_EVENT_CONSTRAINT(0x26, 0x3),
849 	UNCORE_EVENT_CONSTRAINT(0x32, 0x3),
850 	UNCORE_EVENT_CONSTRAINT(0x33, 0x3),
851 	UNCORE_EVENT_CONSTRAINT(0x34, 0x3),
852 	EVENT_CONSTRAINT_END
853 };
854 
855 static struct event_constraint snbep_uncore_r3qpi_constraints[] = {
856 	UNCORE_EVENT_CONSTRAINT(0x10, 0x3),
857 	UNCORE_EVENT_CONSTRAINT(0x11, 0x3),
858 	UNCORE_EVENT_CONSTRAINT(0x12, 0x3),
859 	UNCORE_EVENT_CONSTRAINT(0x13, 0x1),
860 	UNCORE_EVENT_CONSTRAINT(0x20, 0x3),
861 	UNCORE_EVENT_CONSTRAINT(0x21, 0x3),
862 	UNCORE_EVENT_CONSTRAINT(0x22, 0x3),
863 	UNCORE_EVENT_CONSTRAINT(0x23, 0x3),
864 	UNCORE_EVENT_CONSTRAINT(0x24, 0x3),
865 	UNCORE_EVENT_CONSTRAINT(0x25, 0x3),
866 	UNCORE_EVENT_CONSTRAINT(0x26, 0x3),
867 	UNCORE_EVENT_CONSTRAINT(0x28, 0x3),
868 	UNCORE_EVENT_CONSTRAINT(0x29, 0x3),
869 	UNCORE_EVENT_CONSTRAINT(0x2a, 0x3),
870 	UNCORE_EVENT_CONSTRAINT(0x2b, 0x3),
871 	UNCORE_EVENT_CONSTRAINT(0x2c, 0x3),
872 	UNCORE_EVENT_CONSTRAINT(0x2d, 0x3),
873 	UNCORE_EVENT_CONSTRAINT(0x2e, 0x3),
874 	UNCORE_EVENT_CONSTRAINT(0x2f, 0x3),
875 	UNCORE_EVENT_CONSTRAINT(0x30, 0x3),
876 	UNCORE_EVENT_CONSTRAINT(0x31, 0x3),
877 	UNCORE_EVENT_CONSTRAINT(0x32, 0x3),
878 	UNCORE_EVENT_CONSTRAINT(0x33, 0x3),
879 	UNCORE_EVENT_CONSTRAINT(0x34, 0x3),
880 	UNCORE_EVENT_CONSTRAINT(0x36, 0x3),
881 	UNCORE_EVENT_CONSTRAINT(0x37, 0x3),
882 	UNCORE_EVENT_CONSTRAINT(0x38, 0x3),
883 	UNCORE_EVENT_CONSTRAINT(0x39, 0x3),
884 	EVENT_CONSTRAINT_END
885 };
886 
887 static struct intel_uncore_type snbep_uncore_ubox = {
888 	.name		= "ubox",
889 	.num_counters   = 2,
890 	.num_boxes	= 1,
891 	.perf_ctr_bits	= 44,
892 	.fixed_ctr_bits	= 48,
893 	.perf_ctr	= SNBEP_U_MSR_PMON_CTR0,
894 	.event_ctl	= SNBEP_U_MSR_PMON_CTL0,
895 	.event_mask	= SNBEP_U_MSR_PMON_RAW_EVENT_MASK,
896 	.fixed_ctr	= SNBEP_U_MSR_PMON_UCLK_FIXED_CTR,
897 	.fixed_ctl	= SNBEP_U_MSR_PMON_UCLK_FIXED_CTL,
898 	.ops		= &snbep_uncore_msr_ops,
899 	.format_group	= &snbep_uncore_ubox_format_group,
900 };
901 
902 static struct extra_reg snbep_uncore_cbox_extra_regs[] = {
903 	SNBEP_CBO_EVENT_EXTRA_REG(SNBEP_CBO_PMON_CTL_TID_EN,
904 				  SNBEP_CBO_PMON_CTL_TID_EN, 0x1),
905 	SNBEP_CBO_EVENT_EXTRA_REG(0x0334, 0xffff, 0x4),
906 	SNBEP_CBO_EVENT_EXTRA_REG(0x4334, 0xffff, 0x6),
907 	SNBEP_CBO_EVENT_EXTRA_REG(0x0534, 0xffff, 0x4),
908 	SNBEP_CBO_EVENT_EXTRA_REG(0x4534, 0xffff, 0x6),
909 	SNBEP_CBO_EVENT_EXTRA_REG(0x0934, 0xffff, 0x4),
910 	SNBEP_CBO_EVENT_EXTRA_REG(0x4934, 0xffff, 0x6),
911 	SNBEP_CBO_EVENT_EXTRA_REG(0x4134, 0xffff, 0x6),
912 	SNBEP_CBO_EVENT_EXTRA_REG(0x0135, 0xffff, 0x8),
913 	SNBEP_CBO_EVENT_EXTRA_REG(0x0335, 0xffff, 0x8),
914 	SNBEP_CBO_EVENT_EXTRA_REG(0x4135, 0xffff, 0xa),
915 	SNBEP_CBO_EVENT_EXTRA_REG(0x4335, 0xffff, 0xa),
916 	SNBEP_CBO_EVENT_EXTRA_REG(0x4435, 0xffff, 0x2),
917 	SNBEP_CBO_EVENT_EXTRA_REG(0x4835, 0xffff, 0x2),
918 	SNBEP_CBO_EVENT_EXTRA_REG(0x4a35, 0xffff, 0x2),
919 	SNBEP_CBO_EVENT_EXTRA_REG(0x5035, 0xffff, 0x2),
920 	SNBEP_CBO_EVENT_EXTRA_REG(0x0136, 0xffff, 0x8),
921 	SNBEP_CBO_EVENT_EXTRA_REG(0x0336, 0xffff, 0x8),
922 	SNBEP_CBO_EVENT_EXTRA_REG(0x4136, 0xffff, 0xa),
923 	SNBEP_CBO_EVENT_EXTRA_REG(0x4336, 0xffff, 0xa),
924 	SNBEP_CBO_EVENT_EXTRA_REG(0x4436, 0xffff, 0x2),
925 	SNBEP_CBO_EVENT_EXTRA_REG(0x4836, 0xffff, 0x2),
926 	SNBEP_CBO_EVENT_EXTRA_REG(0x4a36, 0xffff, 0x2),
927 	SNBEP_CBO_EVENT_EXTRA_REG(0x4037, 0x40ff, 0x2),
928 	EVENT_EXTRA_END
929 };
930 
snbep_cbox_put_constraint(struct intel_uncore_box * box,struct perf_event * event)931 static void snbep_cbox_put_constraint(struct intel_uncore_box *box, struct perf_event *event)
932 {
933 	struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
934 	struct intel_uncore_extra_reg *er = &box->shared_regs[0];
935 	int i;
936 
937 	if (uncore_box_is_fake(box))
938 		return;
939 
940 	for (i = 0; i < 5; i++) {
941 		if (reg1->alloc & (0x1 << i))
942 			atomic_sub(1 << (i * 6), &er->ref);
943 	}
944 	reg1->alloc = 0;
945 }
946 
947 static struct event_constraint *
__snbep_cbox_get_constraint(struct intel_uncore_box * box,struct perf_event * event,u64 (* cbox_filter_mask)(int fields))948 __snbep_cbox_get_constraint(struct intel_uncore_box *box, struct perf_event *event,
949 			    u64 (*cbox_filter_mask)(int fields))
950 {
951 	struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
952 	struct intel_uncore_extra_reg *er = &box->shared_regs[0];
953 	int i, alloc = 0;
954 	unsigned long flags;
955 	u64 mask;
956 
957 	if (reg1->idx == EXTRA_REG_NONE)
958 		return NULL;
959 
960 	raw_spin_lock_irqsave(&er->lock, flags);
961 	for (i = 0; i < 5; i++) {
962 		if (!(reg1->idx & (0x1 << i)))
963 			continue;
964 		if (!uncore_box_is_fake(box) && (reg1->alloc & (0x1 << i)))
965 			continue;
966 
967 		mask = cbox_filter_mask(0x1 << i);
968 		if (!__BITS_VALUE(atomic_read(&er->ref), i, 6) ||
969 		    !((reg1->config ^ er->config) & mask)) {
970 			atomic_add(1 << (i * 6), &er->ref);
971 			er->config &= ~mask;
972 			er->config |= reg1->config & mask;
973 			alloc |= (0x1 << i);
974 		} else {
975 			break;
976 		}
977 	}
978 	raw_spin_unlock_irqrestore(&er->lock, flags);
979 	if (i < 5)
980 		goto fail;
981 
982 	if (!uncore_box_is_fake(box))
983 		reg1->alloc |= alloc;
984 
985 	return NULL;
986 fail:
987 	for (; i >= 0; i--) {
988 		if (alloc & (0x1 << i))
989 			atomic_sub(1 << (i * 6), &er->ref);
990 	}
991 	return &uncore_constraint_empty;
992 }
993 
snbep_cbox_filter_mask(int fields)994 static u64 snbep_cbox_filter_mask(int fields)
995 {
996 	u64 mask = 0;
997 
998 	if (fields & 0x1)
999 		mask |= SNBEP_CB0_MSR_PMON_BOX_FILTER_TID;
1000 	if (fields & 0x2)
1001 		mask |= SNBEP_CB0_MSR_PMON_BOX_FILTER_NID;
1002 	if (fields & 0x4)
1003 		mask |= SNBEP_CB0_MSR_PMON_BOX_FILTER_STATE;
1004 	if (fields & 0x8)
1005 		mask |= SNBEP_CB0_MSR_PMON_BOX_FILTER_OPC;
1006 
1007 	return mask;
1008 }
1009 
1010 static struct event_constraint *
snbep_cbox_get_constraint(struct intel_uncore_box * box,struct perf_event * event)1011 snbep_cbox_get_constraint(struct intel_uncore_box *box, struct perf_event *event)
1012 {
1013 	return __snbep_cbox_get_constraint(box, event, snbep_cbox_filter_mask);
1014 }
1015 
snbep_cbox_hw_config(struct intel_uncore_box * box,struct perf_event * event)1016 static int snbep_cbox_hw_config(struct intel_uncore_box *box, struct perf_event *event)
1017 {
1018 	struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
1019 	struct extra_reg *er;
1020 	int idx = 0;
1021 
1022 	for (er = snbep_uncore_cbox_extra_regs; er->msr; er++) {
1023 		if (er->event != (event->hw.config & er->config_mask))
1024 			continue;
1025 		idx |= er->idx;
1026 	}
1027 
1028 	if (idx) {
1029 		reg1->reg = SNBEP_C0_MSR_PMON_BOX_FILTER +
1030 			SNBEP_CBO_MSR_OFFSET * box->pmu->pmu_idx;
1031 		reg1->config = event->attr.config1 & snbep_cbox_filter_mask(idx);
1032 		reg1->idx = idx;
1033 	}
1034 	return 0;
1035 }
1036 
1037 static struct intel_uncore_ops snbep_uncore_cbox_ops = {
1038 	SNBEP_UNCORE_MSR_OPS_COMMON_INIT(),
1039 	.hw_config		= snbep_cbox_hw_config,
1040 	.get_constraint		= snbep_cbox_get_constraint,
1041 	.put_constraint		= snbep_cbox_put_constraint,
1042 };
1043 
1044 static struct intel_uncore_type snbep_uncore_cbox = {
1045 	.name			= "cbox",
1046 	.num_counters		= 4,
1047 	.num_boxes		= 8,
1048 	.perf_ctr_bits		= 44,
1049 	.event_ctl		= SNBEP_C0_MSR_PMON_CTL0,
1050 	.perf_ctr		= SNBEP_C0_MSR_PMON_CTR0,
1051 	.event_mask		= SNBEP_CBO_MSR_PMON_RAW_EVENT_MASK,
1052 	.box_ctl		= SNBEP_C0_MSR_PMON_BOX_CTL,
1053 	.msr_offset		= SNBEP_CBO_MSR_OFFSET,
1054 	.num_shared_regs	= 1,
1055 	.constraints		= snbep_uncore_cbox_constraints,
1056 	.ops			= &snbep_uncore_cbox_ops,
1057 	.format_group		= &snbep_uncore_cbox_format_group,
1058 };
1059 
snbep_pcu_alter_er(struct perf_event * event,int new_idx,bool modify)1060 static u64 snbep_pcu_alter_er(struct perf_event *event, int new_idx, bool modify)
1061 {
1062 	struct hw_perf_event *hwc = &event->hw;
1063 	struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
1064 	u64 config = reg1->config;
1065 
1066 	if (new_idx > reg1->idx)
1067 		config <<= 8 * (new_idx - reg1->idx);
1068 	else
1069 		config >>= 8 * (reg1->idx - new_idx);
1070 
1071 	if (modify) {
1072 		hwc->config += new_idx - reg1->idx;
1073 		reg1->config = config;
1074 		reg1->idx = new_idx;
1075 	}
1076 	return config;
1077 }
1078 
1079 static struct event_constraint *
snbep_pcu_get_constraint(struct intel_uncore_box * box,struct perf_event * event)1080 snbep_pcu_get_constraint(struct intel_uncore_box *box, struct perf_event *event)
1081 {
1082 	struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
1083 	struct intel_uncore_extra_reg *er = &box->shared_regs[0];
1084 	unsigned long flags;
1085 	int idx = reg1->idx;
1086 	u64 mask, config1 = reg1->config;
1087 	bool ok = false;
1088 
1089 	if (reg1->idx == EXTRA_REG_NONE ||
1090 	    (!uncore_box_is_fake(box) && reg1->alloc))
1091 		return NULL;
1092 again:
1093 	mask = 0xffULL << (idx * 8);
1094 	raw_spin_lock_irqsave(&er->lock, flags);
1095 	if (!__BITS_VALUE(atomic_read(&er->ref), idx, 8) ||
1096 	    !((config1 ^ er->config) & mask)) {
1097 		atomic_add(1 << (idx * 8), &er->ref);
1098 		er->config &= ~mask;
1099 		er->config |= config1 & mask;
1100 		ok = true;
1101 	}
1102 	raw_spin_unlock_irqrestore(&er->lock, flags);
1103 
1104 	if (!ok) {
1105 		idx = (idx + 1) % 4;
1106 		if (idx != reg1->idx) {
1107 			config1 = snbep_pcu_alter_er(event, idx, false);
1108 			goto again;
1109 		}
1110 		return &uncore_constraint_empty;
1111 	}
1112 
1113 	if (!uncore_box_is_fake(box)) {
1114 		if (idx != reg1->idx)
1115 			snbep_pcu_alter_er(event, idx, true);
1116 		reg1->alloc = 1;
1117 	}
1118 	return NULL;
1119 }
1120 
snbep_pcu_put_constraint(struct intel_uncore_box * box,struct perf_event * event)1121 static void snbep_pcu_put_constraint(struct intel_uncore_box *box, struct perf_event *event)
1122 {
1123 	struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
1124 	struct intel_uncore_extra_reg *er = &box->shared_regs[0];
1125 
1126 	if (uncore_box_is_fake(box) || !reg1->alloc)
1127 		return;
1128 
1129 	atomic_sub(1 << (reg1->idx * 8), &er->ref);
1130 	reg1->alloc = 0;
1131 }
1132 
snbep_pcu_hw_config(struct intel_uncore_box * box,struct perf_event * event)1133 static int snbep_pcu_hw_config(struct intel_uncore_box *box, struct perf_event *event)
1134 {
1135 	struct hw_perf_event *hwc = &event->hw;
1136 	struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
1137 	int ev_sel = hwc->config & SNBEP_PMON_CTL_EV_SEL_MASK;
1138 
1139 	if (ev_sel >= 0xb && ev_sel <= 0xe) {
1140 		reg1->reg = SNBEP_PCU_MSR_PMON_BOX_FILTER;
1141 		reg1->idx = ev_sel - 0xb;
1142 		reg1->config = event->attr.config1 & (0xff << (reg1->idx * 8));
1143 	}
1144 	return 0;
1145 }
1146 
1147 static struct intel_uncore_ops snbep_uncore_pcu_ops = {
1148 	SNBEP_UNCORE_MSR_OPS_COMMON_INIT(),
1149 	.hw_config		= snbep_pcu_hw_config,
1150 	.get_constraint		= snbep_pcu_get_constraint,
1151 	.put_constraint		= snbep_pcu_put_constraint,
1152 };
1153 
1154 static struct intel_uncore_type snbep_uncore_pcu = {
1155 	.name			= "pcu",
1156 	.num_counters		= 4,
1157 	.num_boxes		= 1,
1158 	.perf_ctr_bits		= 48,
1159 	.perf_ctr		= SNBEP_PCU_MSR_PMON_CTR0,
1160 	.event_ctl		= SNBEP_PCU_MSR_PMON_CTL0,
1161 	.event_mask		= SNBEP_PCU_MSR_PMON_RAW_EVENT_MASK,
1162 	.box_ctl		= SNBEP_PCU_MSR_PMON_BOX_CTL,
1163 	.num_shared_regs	= 1,
1164 	.ops			= &snbep_uncore_pcu_ops,
1165 	.format_group		= &snbep_uncore_pcu_format_group,
1166 };
1167 
1168 static struct intel_uncore_type *snbep_msr_uncores[] = {
1169 	&snbep_uncore_ubox,
1170 	&snbep_uncore_cbox,
1171 	&snbep_uncore_pcu,
1172 	NULL,
1173 };
1174 
snbep_uncore_cpu_init(void)1175 void snbep_uncore_cpu_init(void)
1176 {
1177 	if (snbep_uncore_cbox.num_boxes > boot_cpu_data.x86_max_cores)
1178 		snbep_uncore_cbox.num_boxes = boot_cpu_data.x86_max_cores;
1179 	uncore_msr_uncores = snbep_msr_uncores;
1180 }
1181 
1182 enum {
1183 	SNBEP_PCI_QPI_PORT0_FILTER,
1184 	SNBEP_PCI_QPI_PORT1_FILTER,
1185 	BDX_PCI_QPI_PORT2_FILTER,
1186 };
1187 
snbep_qpi_hw_config(struct intel_uncore_box * box,struct perf_event * event)1188 static int snbep_qpi_hw_config(struct intel_uncore_box *box, struct perf_event *event)
1189 {
1190 	struct hw_perf_event *hwc = &event->hw;
1191 	struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
1192 	struct hw_perf_event_extra *reg2 = &hwc->branch_reg;
1193 
1194 	if ((hwc->config & SNBEP_PMON_CTL_EV_SEL_MASK) == 0x38) {
1195 		reg1->idx = 0;
1196 		reg1->reg = SNBEP_Q_Py_PCI_PMON_PKT_MATCH0;
1197 		reg1->config = event->attr.config1;
1198 		reg2->reg = SNBEP_Q_Py_PCI_PMON_PKT_MASK0;
1199 		reg2->config = event->attr.config2;
1200 	}
1201 	return 0;
1202 }
1203 
snbep_qpi_enable_event(struct intel_uncore_box * box,struct perf_event * event)1204 static void snbep_qpi_enable_event(struct intel_uncore_box *box, struct perf_event *event)
1205 {
1206 	struct pci_dev *pdev = box->pci_dev;
1207 	struct hw_perf_event *hwc = &event->hw;
1208 	struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
1209 	struct hw_perf_event_extra *reg2 = &hwc->branch_reg;
1210 
1211 	if (reg1->idx != EXTRA_REG_NONE) {
1212 		int idx = box->pmu->pmu_idx + SNBEP_PCI_QPI_PORT0_FILTER;
1213 		int die = box->dieid;
1214 		struct pci_dev *filter_pdev = uncore_extra_pci_dev[die].dev[idx];
1215 
1216 		if (filter_pdev) {
1217 			pci_write_config_dword(filter_pdev, reg1->reg,
1218 						(u32)reg1->config);
1219 			pci_write_config_dword(filter_pdev, reg1->reg + 4,
1220 						(u32)(reg1->config >> 32));
1221 			pci_write_config_dword(filter_pdev, reg2->reg,
1222 						(u32)reg2->config);
1223 			pci_write_config_dword(filter_pdev, reg2->reg + 4,
1224 						(u32)(reg2->config >> 32));
1225 		}
1226 	}
1227 
1228 	pci_write_config_dword(pdev, hwc->config_base, hwc->config | SNBEP_PMON_CTL_EN);
1229 }
1230 
1231 static struct intel_uncore_ops snbep_uncore_qpi_ops = {
1232 	SNBEP_UNCORE_PCI_OPS_COMMON_INIT(),
1233 	.enable_event		= snbep_qpi_enable_event,
1234 	.hw_config		= snbep_qpi_hw_config,
1235 	.get_constraint		= uncore_get_constraint,
1236 	.put_constraint		= uncore_put_constraint,
1237 };
1238 
1239 #define SNBEP_UNCORE_PCI_COMMON_INIT()				\
1240 	.perf_ctr	= SNBEP_PCI_PMON_CTR0,			\
1241 	.event_ctl	= SNBEP_PCI_PMON_CTL0,			\
1242 	.event_mask	= SNBEP_PMON_RAW_EVENT_MASK,		\
1243 	.box_ctl	= SNBEP_PCI_PMON_BOX_CTL,		\
1244 	.ops		= &snbep_uncore_pci_ops,		\
1245 	.format_group	= &snbep_uncore_format_group
1246 
1247 static struct intel_uncore_type snbep_uncore_ha = {
1248 	.name		= "ha",
1249 	.num_counters   = 4,
1250 	.num_boxes	= 1,
1251 	.perf_ctr_bits	= 48,
1252 	SNBEP_UNCORE_PCI_COMMON_INIT(),
1253 };
1254 
1255 static struct intel_uncore_type snbep_uncore_imc = {
1256 	.name		= "imc",
1257 	.num_counters   = 4,
1258 	.num_boxes	= 4,
1259 	.perf_ctr_bits	= 48,
1260 	.fixed_ctr_bits	= 48,
1261 	.fixed_ctr	= SNBEP_MC_CHy_PCI_PMON_FIXED_CTR,
1262 	.fixed_ctl	= SNBEP_MC_CHy_PCI_PMON_FIXED_CTL,
1263 	.event_descs	= snbep_uncore_imc_events,
1264 	SNBEP_UNCORE_PCI_COMMON_INIT(),
1265 };
1266 
1267 static struct intel_uncore_type snbep_uncore_qpi = {
1268 	.name			= "qpi",
1269 	.num_counters		= 4,
1270 	.num_boxes		= 2,
1271 	.perf_ctr_bits		= 48,
1272 	.perf_ctr		= SNBEP_PCI_PMON_CTR0,
1273 	.event_ctl		= SNBEP_PCI_PMON_CTL0,
1274 	.event_mask		= SNBEP_QPI_PCI_PMON_RAW_EVENT_MASK,
1275 	.box_ctl		= SNBEP_PCI_PMON_BOX_CTL,
1276 	.num_shared_regs	= 1,
1277 	.ops			= &snbep_uncore_qpi_ops,
1278 	.event_descs		= snbep_uncore_qpi_events,
1279 	.format_group		= &snbep_uncore_qpi_format_group,
1280 };
1281 
1282 
1283 static struct intel_uncore_type snbep_uncore_r2pcie = {
1284 	.name		= "r2pcie",
1285 	.num_counters   = 4,
1286 	.num_boxes	= 1,
1287 	.perf_ctr_bits	= 44,
1288 	.constraints	= snbep_uncore_r2pcie_constraints,
1289 	SNBEP_UNCORE_PCI_COMMON_INIT(),
1290 };
1291 
1292 static struct intel_uncore_type snbep_uncore_r3qpi = {
1293 	.name		= "r3qpi",
1294 	.num_counters   = 3,
1295 	.num_boxes	= 2,
1296 	.perf_ctr_bits	= 44,
1297 	.constraints	= snbep_uncore_r3qpi_constraints,
1298 	SNBEP_UNCORE_PCI_COMMON_INIT(),
1299 };
1300 
1301 enum {
1302 	SNBEP_PCI_UNCORE_HA,
1303 	SNBEP_PCI_UNCORE_IMC,
1304 	SNBEP_PCI_UNCORE_QPI,
1305 	SNBEP_PCI_UNCORE_R2PCIE,
1306 	SNBEP_PCI_UNCORE_R3QPI,
1307 };
1308 
1309 static struct intel_uncore_type *snbep_pci_uncores[] = {
1310 	[SNBEP_PCI_UNCORE_HA]		= &snbep_uncore_ha,
1311 	[SNBEP_PCI_UNCORE_IMC]		= &snbep_uncore_imc,
1312 	[SNBEP_PCI_UNCORE_QPI]		= &snbep_uncore_qpi,
1313 	[SNBEP_PCI_UNCORE_R2PCIE]	= &snbep_uncore_r2pcie,
1314 	[SNBEP_PCI_UNCORE_R3QPI]	= &snbep_uncore_r3qpi,
1315 	NULL,
1316 };
1317 
1318 static const struct pci_device_id snbep_uncore_pci_ids[] = {
1319 	{ /* Home Agent */
1320 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_HA),
1321 		.driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_HA, 0),
1322 	},
1323 	{ /* MC Channel 0 */
1324 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_IMC0),
1325 		.driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_IMC, 0),
1326 	},
1327 	{ /* MC Channel 1 */
1328 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_IMC1),
1329 		.driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_IMC, 1),
1330 	},
1331 	{ /* MC Channel 2 */
1332 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_IMC2),
1333 		.driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_IMC, 2),
1334 	},
1335 	{ /* MC Channel 3 */
1336 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_IMC3),
1337 		.driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_IMC, 3),
1338 	},
1339 	{ /* QPI Port 0 */
1340 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_QPI0),
1341 		.driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_QPI, 0),
1342 	},
1343 	{ /* QPI Port 1 */
1344 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_QPI1),
1345 		.driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_QPI, 1),
1346 	},
1347 	{ /* R2PCIe */
1348 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_R2PCIE),
1349 		.driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_R2PCIE, 0),
1350 	},
1351 	{ /* R3QPI Link 0 */
1352 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_R3QPI0),
1353 		.driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_R3QPI, 0),
1354 	},
1355 	{ /* R3QPI Link 1 */
1356 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_R3QPI1),
1357 		.driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_R3QPI, 1),
1358 	},
1359 	{ /* QPI Port 0 filter  */
1360 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x3c86),
1361 		.driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV,
1362 						   SNBEP_PCI_QPI_PORT0_FILTER),
1363 	},
1364 	{ /* QPI Port 0 filter  */
1365 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x3c96),
1366 		.driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV,
1367 						   SNBEP_PCI_QPI_PORT1_FILTER),
1368 	},
1369 	{ /* end: all zeroes */ }
1370 };
1371 
1372 static struct pci_driver snbep_uncore_pci_driver = {
1373 	.name		= "snbep_uncore",
1374 	.id_table	= snbep_uncore_pci_ids,
1375 };
1376 
1377 #define NODE_ID_MASK	0x7
1378 
1379 /* Each three bits from 0 to 23 of GIDNIDMAP register correspond Node ID. */
1380 #define GIDNIDMAP(config, id)	(((config) >> (3 * (id))) & 0x7)
1381 
upi_nodeid_groupid(struct pci_dev * ubox_dev,int nodeid_loc,int idmap_loc,int * nodeid,int * groupid)1382 static int upi_nodeid_groupid(struct pci_dev *ubox_dev, int nodeid_loc, int idmap_loc,
1383 			      int *nodeid, int *groupid)
1384 {
1385 	int ret;
1386 
1387 	/* get the Node ID of the local register */
1388 	ret = pci_read_config_dword(ubox_dev, nodeid_loc, nodeid);
1389 	if (ret)
1390 		goto err;
1391 
1392 	*nodeid = *nodeid & NODE_ID_MASK;
1393 	/* get the Node ID mapping */
1394 	ret = pci_read_config_dword(ubox_dev, idmap_loc, groupid);
1395 	if (ret)
1396 		goto err;
1397 err:
1398 	return ret;
1399 }
1400 
1401 /*
1402  * build pci bus to socket mapping
1403  */
snbep_pci2phy_map_init(int devid,int nodeid_loc,int idmap_loc,bool reverse)1404 static int snbep_pci2phy_map_init(int devid, int nodeid_loc, int idmap_loc, bool reverse)
1405 {
1406 	struct pci_dev *ubox_dev = NULL;
1407 	int i, bus, nodeid, segment, die_id;
1408 	struct pci2phy_map *map;
1409 	int err = 0;
1410 	u32 config = 0;
1411 
1412 	while (1) {
1413 		/* find the UBOX device */
1414 		ubox_dev = pci_get_device(PCI_VENDOR_ID_INTEL, devid, ubox_dev);
1415 		if (!ubox_dev)
1416 			break;
1417 		bus = ubox_dev->bus->number;
1418 		/*
1419 		 * The nodeid and idmap registers only contain enough
1420 		 * information to handle 8 nodes.  On systems with more
1421 		 * than 8 nodes, we need to rely on NUMA information,
1422 		 * filled in from BIOS supplied information, to determine
1423 		 * the topology.
1424 		 */
1425 		if (nr_node_ids <= 8) {
1426 			err = upi_nodeid_groupid(ubox_dev, nodeid_loc, idmap_loc,
1427 						 &nodeid, &config);
1428 			if (err)
1429 				break;
1430 
1431 			segment = pci_domain_nr(ubox_dev->bus);
1432 			raw_spin_lock(&pci2phy_map_lock);
1433 			map = __find_pci2phy_map(segment);
1434 			if (!map) {
1435 				raw_spin_unlock(&pci2phy_map_lock);
1436 				err = -ENOMEM;
1437 				break;
1438 			}
1439 
1440 			/*
1441 			 * every three bits in the Node ID mapping register maps
1442 			 * to a particular node.
1443 			 */
1444 			for (i = 0; i < 8; i++) {
1445 				if (nodeid == GIDNIDMAP(config, i)) {
1446 					if (topology_max_die_per_package() > 1)
1447 						die_id = i;
1448 					else
1449 						die_id = topology_phys_to_logical_pkg(i);
1450 					if (die_id < 0)
1451 						die_id = -ENODEV;
1452 					map->pbus_to_dieid[bus] = die_id;
1453 					break;
1454 				}
1455 			}
1456 			raw_spin_unlock(&pci2phy_map_lock);
1457 		} else {
1458 			segment = pci_domain_nr(ubox_dev->bus);
1459 			raw_spin_lock(&pci2phy_map_lock);
1460 			map = __find_pci2phy_map(segment);
1461 			if (!map) {
1462 				raw_spin_unlock(&pci2phy_map_lock);
1463 				err = -ENOMEM;
1464 				break;
1465 			}
1466 
1467 			map->pbus_to_dieid[bus] = die_id = uncore_device_to_die(ubox_dev);
1468 
1469 			raw_spin_unlock(&pci2phy_map_lock);
1470 
1471 			if (WARN_ON_ONCE(die_id == -1)) {
1472 				err = -EINVAL;
1473 				break;
1474 			}
1475 		}
1476 	}
1477 
1478 	if (!err) {
1479 		/*
1480 		 * For PCI bus with no UBOX device, find the next bus
1481 		 * that has UBOX device and use its mapping.
1482 		 */
1483 		raw_spin_lock(&pci2phy_map_lock);
1484 		list_for_each_entry(map, &pci2phy_map_head, list) {
1485 			i = -1;
1486 			if (reverse) {
1487 				for (bus = 255; bus >= 0; bus--) {
1488 					if (map->pbus_to_dieid[bus] != -1)
1489 						i = map->pbus_to_dieid[bus];
1490 					else
1491 						map->pbus_to_dieid[bus] = i;
1492 				}
1493 			} else {
1494 				for (bus = 0; bus <= 255; bus++) {
1495 					if (map->pbus_to_dieid[bus] != -1)
1496 						i = map->pbus_to_dieid[bus];
1497 					else
1498 						map->pbus_to_dieid[bus] = i;
1499 				}
1500 			}
1501 		}
1502 		raw_spin_unlock(&pci2phy_map_lock);
1503 	}
1504 
1505 	pci_dev_put(ubox_dev);
1506 
1507 	return pcibios_err_to_errno(err);
1508 }
1509 
snbep_uncore_pci_init(void)1510 int snbep_uncore_pci_init(void)
1511 {
1512 	int ret = snbep_pci2phy_map_init(0x3ce0, SNBEP_CPUNODEID, SNBEP_GIDNIDMAP, true);
1513 	if (ret)
1514 		return ret;
1515 	uncore_pci_uncores = snbep_pci_uncores;
1516 	uncore_pci_driver = &snbep_uncore_pci_driver;
1517 	return 0;
1518 }
1519 /* end of Sandy Bridge-EP uncore support */
1520 
1521 /* IvyTown uncore support */
ivbep_uncore_msr_init_box(struct intel_uncore_box * box)1522 static void ivbep_uncore_msr_init_box(struct intel_uncore_box *box)
1523 {
1524 	unsigned msr = uncore_msr_box_ctl(box);
1525 	if (msr)
1526 		wrmsrl(msr, IVBEP_PMON_BOX_CTL_INT);
1527 }
1528 
ivbep_uncore_pci_init_box(struct intel_uncore_box * box)1529 static void ivbep_uncore_pci_init_box(struct intel_uncore_box *box)
1530 {
1531 	struct pci_dev *pdev = box->pci_dev;
1532 
1533 	pci_write_config_dword(pdev, SNBEP_PCI_PMON_BOX_CTL, IVBEP_PMON_BOX_CTL_INT);
1534 }
1535 
1536 #define IVBEP_UNCORE_MSR_OPS_COMMON_INIT()			\
1537 	.init_box	= ivbep_uncore_msr_init_box,		\
1538 	.disable_box	= snbep_uncore_msr_disable_box,		\
1539 	.enable_box	= snbep_uncore_msr_enable_box,		\
1540 	.disable_event	= snbep_uncore_msr_disable_event,	\
1541 	.enable_event	= snbep_uncore_msr_enable_event,	\
1542 	.read_counter	= uncore_msr_read_counter
1543 
1544 static struct intel_uncore_ops ivbep_uncore_msr_ops = {
1545 	IVBEP_UNCORE_MSR_OPS_COMMON_INIT(),
1546 };
1547 
1548 static struct intel_uncore_ops ivbep_uncore_pci_ops = {
1549 	.init_box	= ivbep_uncore_pci_init_box,
1550 	.disable_box	= snbep_uncore_pci_disable_box,
1551 	.enable_box	= snbep_uncore_pci_enable_box,
1552 	.disable_event	= snbep_uncore_pci_disable_event,
1553 	.enable_event	= snbep_uncore_pci_enable_event,
1554 	.read_counter	= snbep_uncore_pci_read_counter,
1555 };
1556 
1557 #define IVBEP_UNCORE_PCI_COMMON_INIT()				\
1558 	.perf_ctr	= SNBEP_PCI_PMON_CTR0,			\
1559 	.event_ctl	= SNBEP_PCI_PMON_CTL0,			\
1560 	.event_mask	= IVBEP_PMON_RAW_EVENT_MASK,		\
1561 	.box_ctl	= SNBEP_PCI_PMON_BOX_CTL,		\
1562 	.ops		= &ivbep_uncore_pci_ops,			\
1563 	.format_group	= &ivbep_uncore_format_group
1564 
1565 static struct attribute *ivbep_uncore_formats_attr[] = {
1566 	&format_attr_event.attr,
1567 	&format_attr_umask.attr,
1568 	&format_attr_edge.attr,
1569 	&format_attr_inv.attr,
1570 	&format_attr_thresh8.attr,
1571 	NULL,
1572 };
1573 
1574 static struct attribute *ivbep_uncore_ubox_formats_attr[] = {
1575 	&format_attr_event.attr,
1576 	&format_attr_umask.attr,
1577 	&format_attr_edge.attr,
1578 	&format_attr_inv.attr,
1579 	&format_attr_thresh5.attr,
1580 	NULL,
1581 };
1582 
1583 static struct attribute *ivbep_uncore_cbox_formats_attr[] = {
1584 	&format_attr_event.attr,
1585 	&format_attr_umask.attr,
1586 	&format_attr_edge.attr,
1587 	&format_attr_tid_en.attr,
1588 	&format_attr_thresh8.attr,
1589 	&format_attr_filter_tid.attr,
1590 	&format_attr_filter_link.attr,
1591 	&format_attr_filter_state2.attr,
1592 	&format_attr_filter_nid2.attr,
1593 	&format_attr_filter_opc2.attr,
1594 	&format_attr_filter_nc.attr,
1595 	&format_attr_filter_c6.attr,
1596 	&format_attr_filter_isoc.attr,
1597 	NULL,
1598 };
1599 
1600 static struct attribute *ivbep_uncore_pcu_formats_attr[] = {
1601 	&format_attr_event.attr,
1602 	&format_attr_occ_sel.attr,
1603 	&format_attr_edge.attr,
1604 	&format_attr_thresh5.attr,
1605 	&format_attr_occ_invert.attr,
1606 	&format_attr_occ_edge.attr,
1607 	&format_attr_filter_band0.attr,
1608 	&format_attr_filter_band1.attr,
1609 	&format_attr_filter_band2.attr,
1610 	&format_attr_filter_band3.attr,
1611 	NULL,
1612 };
1613 
1614 static struct attribute *ivbep_uncore_qpi_formats_attr[] = {
1615 	&format_attr_event_ext.attr,
1616 	&format_attr_umask.attr,
1617 	&format_attr_edge.attr,
1618 	&format_attr_thresh8.attr,
1619 	&format_attr_match_rds.attr,
1620 	&format_attr_match_rnid30.attr,
1621 	&format_attr_match_rnid4.attr,
1622 	&format_attr_match_dnid.attr,
1623 	&format_attr_match_mc.attr,
1624 	&format_attr_match_opc.attr,
1625 	&format_attr_match_vnw.attr,
1626 	&format_attr_match0.attr,
1627 	&format_attr_match1.attr,
1628 	&format_attr_mask_rds.attr,
1629 	&format_attr_mask_rnid30.attr,
1630 	&format_attr_mask_rnid4.attr,
1631 	&format_attr_mask_dnid.attr,
1632 	&format_attr_mask_mc.attr,
1633 	&format_attr_mask_opc.attr,
1634 	&format_attr_mask_vnw.attr,
1635 	&format_attr_mask0.attr,
1636 	&format_attr_mask1.attr,
1637 	NULL,
1638 };
1639 
1640 static const struct attribute_group ivbep_uncore_format_group = {
1641 	.name = "format",
1642 	.attrs = ivbep_uncore_formats_attr,
1643 };
1644 
1645 static const struct attribute_group ivbep_uncore_ubox_format_group = {
1646 	.name = "format",
1647 	.attrs = ivbep_uncore_ubox_formats_attr,
1648 };
1649 
1650 static const struct attribute_group ivbep_uncore_cbox_format_group = {
1651 	.name = "format",
1652 	.attrs = ivbep_uncore_cbox_formats_attr,
1653 };
1654 
1655 static const struct attribute_group ivbep_uncore_pcu_format_group = {
1656 	.name = "format",
1657 	.attrs = ivbep_uncore_pcu_formats_attr,
1658 };
1659 
1660 static const struct attribute_group ivbep_uncore_qpi_format_group = {
1661 	.name = "format",
1662 	.attrs = ivbep_uncore_qpi_formats_attr,
1663 };
1664 
1665 static struct intel_uncore_type ivbep_uncore_ubox = {
1666 	.name		= "ubox",
1667 	.num_counters   = 2,
1668 	.num_boxes	= 1,
1669 	.perf_ctr_bits	= 44,
1670 	.fixed_ctr_bits	= 48,
1671 	.perf_ctr	= SNBEP_U_MSR_PMON_CTR0,
1672 	.event_ctl	= SNBEP_U_MSR_PMON_CTL0,
1673 	.event_mask	= IVBEP_U_MSR_PMON_RAW_EVENT_MASK,
1674 	.fixed_ctr	= SNBEP_U_MSR_PMON_UCLK_FIXED_CTR,
1675 	.fixed_ctl	= SNBEP_U_MSR_PMON_UCLK_FIXED_CTL,
1676 	.ops		= &ivbep_uncore_msr_ops,
1677 	.format_group	= &ivbep_uncore_ubox_format_group,
1678 };
1679 
1680 static struct extra_reg ivbep_uncore_cbox_extra_regs[] = {
1681 	SNBEP_CBO_EVENT_EXTRA_REG(SNBEP_CBO_PMON_CTL_TID_EN,
1682 				  SNBEP_CBO_PMON_CTL_TID_EN, 0x1),
1683 	SNBEP_CBO_EVENT_EXTRA_REG(0x1031, 0x10ff, 0x2),
1684 	SNBEP_CBO_EVENT_EXTRA_REG(0x1134, 0xffff, 0x4),
1685 	SNBEP_CBO_EVENT_EXTRA_REG(0x4134, 0xffff, 0xc),
1686 	SNBEP_CBO_EVENT_EXTRA_REG(0x5134, 0xffff, 0xc),
1687 	SNBEP_CBO_EVENT_EXTRA_REG(0x0334, 0xffff, 0x4),
1688 	SNBEP_CBO_EVENT_EXTRA_REG(0x4334, 0xffff, 0xc),
1689 	SNBEP_CBO_EVENT_EXTRA_REG(0x0534, 0xffff, 0x4),
1690 	SNBEP_CBO_EVENT_EXTRA_REG(0x4534, 0xffff, 0xc),
1691 	SNBEP_CBO_EVENT_EXTRA_REG(0x0934, 0xffff, 0x4),
1692 	SNBEP_CBO_EVENT_EXTRA_REG(0x4934, 0xffff, 0xc),
1693 	SNBEP_CBO_EVENT_EXTRA_REG(0x0135, 0xffff, 0x10),
1694 	SNBEP_CBO_EVENT_EXTRA_REG(0x0335, 0xffff, 0x10),
1695 	SNBEP_CBO_EVENT_EXTRA_REG(0x2135, 0xffff, 0x10),
1696 	SNBEP_CBO_EVENT_EXTRA_REG(0x2335, 0xffff, 0x10),
1697 	SNBEP_CBO_EVENT_EXTRA_REG(0x4135, 0xffff, 0x18),
1698 	SNBEP_CBO_EVENT_EXTRA_REG(0x4335, 0xffff, 0x18),
1699 	SNBEP_CBO_EVENT_EXTRA_REG(0x4435, 0xffff, 0x8),
1700 	SNBEP_CBO_EVENT_EXTRA_REG(0x4835, 0xffff, 0x8),
1701 	SNBEP_CBO_EVENT_EXTRA_REG(0x4a35, 0xffff, 0x8),
1702 	SNBEP_CBO_EVENT_EXTRA_REG(0x5035, 0xffff, 0x8),
1703 	SNBEP_CBO_EVENT_EXTRA_REG(0x8135, 0xffff, 0x10),
1704 	SNBEP_CBO_EVENT_EXTRA_REG(0x8335, 0xffff, 0x10),
1705 	SNBEP_CBO_EVENT_EXTRA_REG(0x0136, 0xffff, 0x10),
1706 	SNBEP_CBO_EVENT_EXTRA_REG(0x0336, 0xffff, 0x10),
1707 	SNBEP_CBO_EVENT_EXTRA_REG(0x2136, 0xffff, 0x10),
1708 	SNBEP_CBO_EVENT_EXTRA_REG(0x2336, 0xffff, 0x10),
1709 	SNBEP_CBO_EVENT_EXTRA_REG(0x4136, 0xffff, 0x18),
1710 	SNBEP_CBO_EVENT_EXTRA_REG(0x4336, 0xffff, 0x18),
1711 	SNBEP_CBO_EVENT_EXTRA_REG(0x4436, 0xffff, 0x8),
1712 	SNBEP_CBO_EVENT_EXTRA_REG(0x4836, 0xffff, 0x8),
1713 	SNBEP_CBO_EVENT_EXTRA_REG(0x4a36, 0xffff, 0x8),
1714 	SNBEP_CBO_EVENT_EXTRA_REG(0x5036, 0xffff, 0x8),
1715 	SNBEP_CBO_EVENT_EXTRA_REG(0x8136, 0xffff, 0x10),
1716 	SNBEP_CBO_EVENT_EXTRA_REG(0x8336, 0xffff, 0x10),
1717 	SNBEP_CBO_EVENT_EXTRA_REG(0x4037, 0x40ff, 0x8),
1718 	EVENT_EXTRA_END
1719 };
1720 
ivbep_cbox_filter_mask(int fields)1721 static u64 ivbep_cbox_filter_mask(int fields)
1722 {
1723 	u64 mask = 0;
1724 
1725 	if (fields & 0x1)
1726 		mask |= IVBEP_CB0_MSR_PMON_BOX_FILTER_TID;
1727 	if (fields & 0x2)
1728 		mask |= IVBEP_CB0_MSR_PMON_BOX_FILTER_LINK;
1729 	if (fields & 0x4)
1730 		mask |= IVBEP_CB0_MSR_PMON_BOX_FILTER_STATE;
1731 	if (fields & 0x8)
1732 		mask |= IVBEP_CB0_MSR_PMON_BOX_FILTER_NID;
1733 	if (fields & 0x10) {
1734 		mask |= IVBEP_CB0_MSR_PMON_BOX_FILTER_OPC;
1735 		mask |= IVBEP_CB0_MSR_PMON_BOX_FILTER_NC;
1736 		mask |= IVBEP_CB0_MSR_PMON_BOX_FILTER_C6;
1737 		mask |= IVBEP_CB0_MSR_PMON_BOX_FILTER_ISOC;
1738 	}
1739 
1740 	return mask;
1741 }
1742 
1743 static struct event_constraint *
ivbep_cbox_get_constraint(struct intel_uncore_box * box,struct perf_event * event)1744 ivbep_cbox_get_constraint(struct intel_uncore_box *box, struct perf_event *event)
1745 {
1746 	return __snbep_cbox_get_constraint(box, event, ivbep_cbox_filter_mask);
1747 }
1748 
ivbep_cbox_hw_config(struct intel_uncore_box * box,struct perf_event * event)1749 static int ivbep_cbox_hw_config(struct intel_uncore_box *box, struct perf_event *event)
1750 {
1751 	struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
1752 	struct extra_reg *er;
1753 	int idx = 0;
1754 
1755 	for (er = ivbep_uncore_cbox_extra_regs; er->msr; er++) {
1756 		if (er->event != (event->hw.config & er->config_mask))
1757 			continue;
1758 		idx |= er->idx;
1759 	}
1760 
1761 	if (idx) {
1762 		reg1->reg = SNBEP_C0_MSR_PMON_BOX_FILTER +
1763 			SNBEP_CBO_MSR_OFFSET * box->pmu->pmu_idx;
1764 		reg1->config = event->attr.config1 & ivbep_cbox_filter_mask(idx);
1765 		reg1->idx = idx;
1766 	}
1767 	return 0;
1768 }
1769 
ivbep_cbox_enable_event(struct intel_uncore_box * box,struct perf_event * event)1770 static void ivbep_cbox_enable_event(struct intel_uncore_box *box, struct perf_event *event)
1771 {
1772 	struct hw_perf_event *hwc = &event->hw;
1773 	struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
1774 
1775 	if (reg1->idx != EXTRA_REG_NONE) {
1776 		u64 filter = uncore_shared_reg_config(box, 0);
1777 		wrmsrl(reg1->reg, filter & 0xffffffff);
1778 		wrmsrl(reg1->reg + 6, filter >> 32);
1779 	}
1780 
1781 	wrmsrl(hwc->config_base, hwc->config | SNBEP_PMON_CTL_EN);
1782 }
1783 
1784 static struct intel_uncore_ops ivbep_uncore_cbox_ops = {
1785 	.init_box		= ivbep_uncore_msr_init_box,
1786 	.disable_box		= snbep_uncore_msr_disable_box,
1787 	.enable_box		= snbep_uncore_msr_enable_box,
1788 	.disable_event		= snbep_uncore_msr_disable_event,
1789 	.enable_event		= ivbep_cbox_enable_event,
1790 	.read_counter		= uncore_msr_read_counter,
1791 	.hw_config		= ivbep_cbox_hw_config,
1792 	.get_constraint		= ivbep_cbox_get_constraint,
1793 	.put_constraint		= snbep_cbox_put_constraint,
1794 };
1795 
1796 static struct intel_uncore_type ivbep_uncore_cbox = {
1797 	.name			= "cbox",
1798 	.num_counters		= 4,
1799 	.num_boxes		= 15,
1800 	.perf_ctr_bits		= 44,
1801 	.event_ctl		= SNBEP_C0_MSR_PMON_CTL0,
1802 	.perf_ctr		= SNBEP_C0_MSR_PMON_CTR0,
1803 	.event_mask		= IVBEP_CBO_MSR_PMON_RAW_EVENT_MASK,
1804 	.box_ctl		= SNBEP_C0_MSR_PMON_BOX_CTL,
1805 	.msr_offset		= SNBEP_CBO_MSR_OFFSET,
1806 	.num_shared_regs	= 1,
1807 	.constraints		= snbep_uncore_cbox_constraints,
1808 	.ops			= &ivbep_uncore_cbox_ops,
1809 	.format_group		= &ivbep_uncore_cbox_format_group,
1810 };
1811 
1812 static struct intel_uncore_ops ivbep_uncore_pcu_ops = {
1813 	IVBEP_UNCORE_MSR_OPS_COMMON_INIT(),
1814 	.hw_config		= snbep_pcu_hw_config,
1815 	.get_constraint		= snbep_pcu_get_constraint,
1816 	.put_constraint		= snbep_pcu_put_constraint,
1817 };
1818 
1819 static struct intel_uncore_type ivbep_uncore_pcu = {
1820 	.name			= "pcu",
1821 	.num_counters		= 4,
1822 	.num_boxes		= 1,
1823 	.perf_ctr_bits		= 48,
1824 	.perf_ctr		= SNBEP_PCU_MSR_PMON_CTR0,
1825 	.event_ctl		= SNBEP_PCU_MSR_PMON_CTL0,
1826 	.event_mask		= IVBEP_PCU_MSR_PMON_RAW_EVENT_MASK,
1827 	.box_ctl		= SNBEP_PCU_MSR_PMON_BOX_CTL,
1828 	.num_shared_regs	= 1,
1829 	.ops			= &ivbep_uncore_pcu_ops,
1830 	.format_group		= &ivbep_uncore_pcu_format_group,
1831 };
1832 
1833 static struct intel_uncore_type *ivbep_msr_uncores[] = {
1834 	&ivbep_uncore_ubox,
1835 	&ivbep_uncore_cbox,
1836 	&ivbep_uncore_pcu,
1837 	NULL,
1838 };
1839 
ivbep_uncore_cpu_init(void)1840 void ivbep_uncore_cpu_init(void)
1841 {
1842 	if (ivbep_uncore_cbox.num_boxes > boot_cpu_data.x86_max_cores)
1843 		ivbep_uncore_cbox.num_boxes = boot_cpu_data.x86_max_cores;
1844 	uncore_msr_uncores = ivbep_msr_uncores;
1845 }
1846 
1847 static struct intel_uncore_type ivbep_uncore_ha = {
1848 	.name		= "ha",
1849 	.num_counters   = 4,
1850 	.num_boxes	= 2,
1851 	.perf_ctr_bits	= 48,
1852 	IVBEP_UNCORE_PCI_COMMON_INIT(),
1853 };
1854 
1855 static struct intel_uncore_type ivbep_uncore_imc = {
1856 	.name		= "imc",
1857 	.num_counters   = 4,
1858 	.num_boxes	= 8,
1859 	.perf_ctr_bits	= 48,
1860 	.fixed_ctr_bits	= 48,
1861 	.fixed_ctr	= SNBEP_MC_CHy_PCI_PMON_FIXED_CTR,
1862 	.fixed_ctl	= SNBEP_MC_CHy_PCI_PMON_FIXED_CTL,
1863 	.event_descs	= snbep_uncore_imc_events,
1864 	IVBEP_UNCORE_PCI_COMMON_INIT(),
1865 };
1866 
1867 /* registers in IRP boxes are not properly aligned */
1868 static unsigned ivbep_uncore_irp_ctls[] = {0xd8, 0xdc, 0xe0, 0xe4};
1869 static unsigned ivbep_uncore_irp_ctrs[] = {0xa0, 0xb0, 0xb8, 0xc0};
1870 
ivbep_uncore_irp_enable_event(struct intel_uncore_box * box,struct perf_event * event)1871 static void ivbep_uncore_irp_enable_event(struct intel_uncore_box *box, struct perf_event *event)
1872 {
1873 	struct pci_dev *pdev = box->pci_dev;
1874 	struct hw_perf_event *hwc = &event->hw;
1875 
1876 	pci_write_config_dword(pdev, ivbep_uncore_irp_ctls[hwc->idx],
1877 			       hwc->config | SNBEP_PMON_CTL_EN);
1878 }
1879 
ivbep_uncore_irp_disable_event(struct intel_uncore_box * box,struct perf_event * event)1880 static void ivbep_uncore_irp_disable_event(struct intel_uncore_box *box, struct perf_event *event)
1881 {
1882 	struct pci_dev *pdev = box->pci_dev;
1883 	struct hw_perf_event *hwc = &event->hw;
1884 
1885 	pci_write_config_dword(pdev, ivbep_uncore_irp_ctls[hwc->idx], hwc->config);
1886 }
1887 
ivbep_uncore_irp_read_counter(struct intel_uncore_box * box,struct perf_event * event)1888 static u64 ivbep_uncore_irp_read_counter(struct intel_uncore_box *box, struct perf_event *event)
1889 {
1890 	struct pci_dev *pdev = box->pci_dev;
1891 	struct hw_perf_event *hwc = &event->hw;
1892 	u64 count = 0;
1893 
1894 	pci_read_config_dword(pdev, ivbep_uncore_irp_ctrs[hwc->idx], (u32 *)&count);
1895 	pci_read_config_dword(pdev, ivbep_uncore_irp_ctrs[hwc->idx] + 4, (u32 *)&count + 1);
1896 
1897 	return count;
1898 }
1899 
1900 static struct intel_uncore_ops ivbep_uncore_irp_ops = {
1901 	.init_box	= ivbep_uncore_pci_init_box,
1902 	.disable_box	= snbep_uncore_pci_disable_box,
1903 	.enable_box	= snbep_uncore_pci_enable_box,
1904 	.disable_event	= ivbep_uncore_irp_disable_event,
1905 	.enable_event	= ivbep_uncore_irp_enable_event,
1906 	.read_counter	= ivbep_uncore_irp_read_counter,
1907 };
1908 
1909 static struct intel_uncore_type ivbep_uncore_irp = {
1910 	.name			= "irp",
1911 	.num_counters		= 4,
1912 	.num_boxes		= 1,
1913 	.perf_ctr_bits		= 48,
1914 	.event_mask		= IVBEP_PMON_RAW_EVENT_MASK,
1915 	.box_ctl		= SNBEP_PCI_PMON_BOX_CTL,
1916 	.ops			= &ivbep_uncore_irp_ops,
1917 	.format_group		= &ivbep_uncore_format_group,
1918 };
1919 
1920 static struct intel_uncore_ops ivbep_uncore_qpi_ops = {
1921 	.init_box	= ivbep_uncore_pci_init_box,
1922 	.disable_box	= snbep_uncore_pci_disable_box,
1923 	.enable_box	= snbep_uncore_pci_enable_box,
1924 	.disable_event	= snbep_uncore_pci_disable_event,
1925 	.enable_event	= snbep_qpi_enable_event,
1926 	.read_counter	= snbep_uncore_pci_read_counter,
1927 	.hw_config	= snbep_qpi_hw_config,
1928 	.get_constraint	= uncore_get_constraint,
1929 	.put_constraint	= uncore_put_constraint,
1930 };
1931 
1932 static struct intel_uncore_type ivbep_uncore_qpi = {
1933 	.name			= "qpi",
1934 	.num_counters		= 4,
1935 	.num_boxes		= 3,
1936 	.perf_ctr_bits		= 48,
1937 	.perf_ctr		= SNBEP_PCI_PMON_CTR0,
1938 	.event_ctl		= SNBEP_PCI_PMON_CTL0,
1939 	.event_mask		= IVBEP_QPI_PCI_PMON_RAW_EVENT_MASK,
1940 	.box_ctl		= SNBEP_PCI_PMON_BOX_CTL,
1941 	.num_shared_regs	= 1,
1942 	.ops			= &ivbep_uncore_qpi_ops,
1943 	.format_group		= &ivbep_uncore_qpi_format_group,
1944 };
1945 
1946 static struct intel_uncore_type ivbep_uncore_r2pcie = {
1947 	.name		= "r2pcie",
1948 	.num_counters   = 4,
1949 	.num_boxes	= 1,
1950 	.perf_ctr_bits	= 44,
1951 	.constraints	= snbep_uncore_r2pcie_constraints,
1952 	IVBEP_UNCORE_PCI_COMMON_INIT(),
1953 };
1954 
1955 static struct intel_uncore_type ivbep_uncore_r3qpi = {
1956 	.name		= "r3qpi",
1957 	.num_counters   = 3,
1958 	.num_boxes	= 2,
1959 	.perf_ctr_bits	= 44,
1960 	.constraints	= snbep_uncore_r3qpi_constraints,
1961 	IVBEP_UNCORE_PCI_COMMON_INIT(),
1962 };
1963 
1964 enum {
1965 	IVBEP_PCI_UNCORE_HA,
1966 	IVBEP_PCI_UNCORE_IMC,
1967 	IVBEP_PCI_UNCORE_IRP,
1968 	IVBEP_PCI_UNCORE_QPI,
1969 	IVBEP_PCI_UNCORE_R2PCIE,
1970 	IVBEP_PCI_UNCORE_R3QPI,
1971 };
1972 
1973 static struct intel_uncore_type *ivbep_pci_uncores[] = {
1974 	[IVBEP_PCI_UNCORE_HA]	= &ivbep_uncore_ha,
1975 	[IVBEP_PCI_UNCORE_IMC]	= &ivbep_uncore_imc,
1976 	[IVBEP_PCI_UNCORE_IRP]	= &ivbep_uncore_irp,
1977 	[IVBEP_PCI_UNCORE_QPI]	= &ivbep_uncore_qpi,
1978 	[IVBEP_PCI_UNCORE_R2PCIE]	= &ivbep_uncore_r2pcie,
1979 	[IVBEP_PCI_UNCORE_R3QPI]	= &ivbep_uncore_r3qpi,
1980 	NULL,
1981 };
1982 
1983 static const struct pci_device_id ivbep_uncore_pci_ids[] = {
1984 	{ /* Home Agent 0 */
1985 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe30),
1986 		.driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_HA, 0),
1987 	},
1988 	{ /* Home Agent 1 */
1989 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe38),
1990 		.driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_HA, 1),
1991 	},
1992 	{ /* MC0 Channel 0 */
1993 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xeb4),
1994 		.driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_IMC, 0),
1995 	},
1996 	{ /* MC0 Channel 1 */
1997 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xeb5),
1998 		.driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_IMC, 1),
1999 	},
2000 	{ /* MC0 Channel 3 */
2001 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xeb0),
2002 		.driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_IMC, 2),
2003 	},
2004 	{ /* MC0 Channel 4 */
2005 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xeb1),
2006 		.driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_IMC, 3),
2007 	},
2008 	{ /* MC1 Channel 0 */
2009 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xef4),
2010 		.driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_IMC, 4),
2011 	},
2012 	{ /* MC1 Channel 1 */
2013 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xef5),
2014 		.driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_IMC, 5),
2015 	},
2016 	{ /* MC1 Channel 3 */
2017 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xef0),
2018 		.driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_IMC, 6),
2019 	},
2020 	{ /* MC1 Channel 4 */
2021 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xef1),
2022 		.driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_IMC, 7),
2023 	},
2024 	{ /* IRP */
2025 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe39),
2026 		.driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_IRP, 0),
2027 	},
2028 	{ /* QPI0 Port 0 */
2029 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe32),
2030 		.driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_QPI, 0),
2031 	},
2032 	{ /* QPI0 Port 1 */
2033 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe33),
2034 		.driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_QPI, 1),
2035 	},
2036 	{ /* QPI1 Port 2 */
2037 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe3a),
2038 		.driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_QPI, 2),
2039 	},
2040 	{ /* R2PCIe */
2041 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe34),
2042 		.driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_R2PCIE, 0),
2043 	},
2044 	{ /* R3QPI0 Link 0 */
2045 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe36),
2046 		.driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_R3QPI, 0),
2047 	},
2048 	{ /* R3QPI0 Link 1 */
2049 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe37),
2050 		.driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_R3QPI, 1),
2051 	},
2052 	{ /* R3QPI1 Link 2 */
2053 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe3e),
2054 		.driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_R3QPI, 2),
2055 	},
2056 	{ /* QPI Port 0 filter  */
2057 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe86),
2058 		.driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV,
2059 						   SNBEP_PCI_QPI_PORT0_FILTER),
2060 	},
2061 	{ /* QPI Port 0 filter  */
2062 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe96),
2063 		.driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV,
2064 						   SNBEP_PCI_QPI_PORT1_FILTER),
2065 	},
2066 	{ /* end: all zeroes */ }
2067 };
2068 
2069 static struct pci_driver ivbep_uncore_pci_driver = {
2070 	.name		= "ivbep_uncore",
2071 	.id_table	= ivbep_uncore_pci_ids,
2072 };
2073 
ivbep_uncore_pci_init(void)2074 int ivbep_uncore_pci_init(void)
2075 {
2076 	int ret = snbep_pci2phy_map_init(0x0e1e, SNBEP_CPUNODEID, SNBEP_GIDNIDMAP, true);
2077 	if (ret)
2078 		return ret;
2079 	uncore_pci_uncores = ivbep_pci_uncores;
2080 	uncore_pci_driver = &ivbep_uncore_pci_driver;
2081 	return 0;
2082 }
2083 /* end of IvyTown uncore support */
2084 
2085 /* KNL uncore support */
2086 static struct attribute *knl_uncore_ubox_formats_attr[] = {
2087 	&format_attr_event.attr,
2088 	&format_attr_umask.attr,
2089 	&format_attr_edge.attr,
2090 	&format_attr_tid_en.attr,
2091 	&format_attr_inv.attr,
2092 	&format_attr_thresh5.attr,
2093 	NULL,
2094 };
2095 
2096 static const struct attribute_group knl_uncore_ubox_format_group = {
2097 	.name = "format",
2098 	.attrs = knl_uncore_ubox_formats_attr,
2099 };
2100 
2101 static struct intel_uncore_type knl_uncore_ubox = {
2102 	.name			= "ubox",
2103 	.num_counters		= 2,
2104 	.num_boxes		= 1,
2105 	.perf_ctr_bits		= 48,
2106 	.fixed_ctr_bits		= 48,
2107 	.perf_ctr		= HSWEP_U_MSR_PMON_CTR0,
2108 	.event_ctl		= HSWEP_U_MSR_PMON_CTL0,
2109 	.event_mask		= KNL_U_MSR_PMON_RAW_EVENT_MASK,
2110 	.fixed_ctr		= HSWEP_U_MSR_PMON_UCLK_FIXED_CTR,
2111 	.fixed_ctl		= HSWEP_U_MSR_PMON_UCLK_FIXED_CTL,
2112 	.ops			= &snbep_uncore_msr_ops,
2113 	.format_group		= &knl_uncore_ubox_format_group,
2114 };
2115 
2116 static struct attribute *knl_uncore_cha_formats_attr[] = {
2117 	&format_attr_event.attr,
2118 	&format_attr_umask.attr,
2119 	&format_attr_qor.attr,
2120 	&format_attr_edge.attr,
2121 	&format_attr_tid_en.attr,
2122 	&format_attr_inv.attr,
2123 	&format_attr_thresh8.attr,
2124 	&format_attr_filter_tid4.attr,
2125 	&format_attr_filter_link3.attr,
2126 	&format_attr_filter_state4.attr,
2127 	&format_attr_filter_local.attr,
2128 	&format_attr_filter_all_op.attr,
2129 	&format_attr_filter_nnm.attr,
2130 	&format_attr_filter_opc3.attr,
2131 	&format_attr_filter_nc.attr,
2132 	&format_attr_filter_isoc.attr,
2133 	NULL,
2134 };
2135 
2136 static const struct attribute_group knl_uncore_cha_format_group = {
2137 	.name = "format",
2138 	.attrs = knl_uncore_cha_formats_attr,
2139 };
2140 
2141 static struct event_constraint knl_uncore_cha_constraints[] = {
2142 	UNCORE_EVENT_CONSTRAINT(0x11, 0x1),
2143 	UNCORE_EVENT_CONSTRAINT(0x1f, 0x1),
2144 	UNCORE_EVENT_CONSTRAINT(0x36, 0x1),
2145 	EVENT_CONSTRAINT_END
2146 };
2147 
2148 static struct extra_reg knl_uncore_cha_extra_regs[] = {
2149 	SNBEP_CBO_EVENT_EXTRA_REG(SNBEP_CBO_PMON_CTL_TID_EN,
2150 				  SNBEP_CBO_PMON_CTL_TID_EN, 0x1),
2151 	SNBEP_CBO_EVENT_EXTRA_REG(0x3d, 0xff, 0x2),
2152 	SNBEP_CBO_EVENT_EXTRA_REG(0x35, 0xff, 0x4),
2153 	SNBEP_CBO_EVENT_EXTRA_REG(0x36, 0xff, 0x4),
2154 	EVENT_EXTRA_END
2155 };
2156 
knl_cha_filter_mask(int fields)2157 static u64 knl_cha_filter_mask(int fields)
2158 {
2159 	u64 mask = 0;
2160 
2161 	if (fields & 0x1)
2162 		mask |= KNL_CHA_MSR_PMON_BOX_FILTER_TID;
2163 	if (fields & 0x2)
2164 		mask |= KNL_CHA_MSR_PMON_BOX_FILTER_STATE;
2165 	if (fields & 0x4)
2166 		mask |= KNL_CHA_MSR_PMON_BOX_FILTER_OP;
2167 	return mask;
2168 }
2169 
2170 static struct event_constraint *
knl_cha_get_constraint(struct intel_uncore_box * box,struct perf_event * event)2171 knl_cha_get_constraint(struct intel_uncore_box *box, struct perf_event *event)
2172 {
2173 	return __snbep_cbox_get_constraint(box, event, knl_cha_filter_mask);
2174 }
2175 
knl_cha_hw_config(struct intel_uncore_box * box,struct perf_event * event)2176 static int knl_cha_hw_config(struct intel_uncore_box *box,
2177 			     struct perf_event *event)
2178 {
2179 	struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
2180 	struct extra_reg *er;
2181 	int idx = 0;
2182 
2183 	for (er = knl_uncore_cha_extra_regs; er->msr; er++) {
2184 		if (er->event != (event->hw.config & er->config_mask))
2185 			continue;
2186 		idx |= er->idx;
2187 	}
2188 
2189 	if (idx) {
2190 		reg1->reg = HSWEP_C0_MSR_PMON_BOX_FILTER0 +
2191 			    KNL_CHA_MSR_OFFSET * box->pmu->pmu_idx;
2192 		reg1->config = event->attr.config1 & knl_cha_filter_mask(idx);
2193 
2194 		reg1->config |= KNL_CHA_MSR_PMON_BOX_FILTER_REMOTE_NODE;
2195 		reg1->config |= KNL_CHA_MSR_PMON_BOX_FILTER_LOCAL_NODE;
2196 		reg1->config |= KNL_CHA_MSR_PMON_BOX_FILTER_NNC;
2197 		reg1->idx = idx;
2198 	}
2199 	return 0;
2200 }
2201 
2202 static void hswep_cbox_enable_event(struct intel_uncore_box *box,
2203 				    struct perf_event *event);
2204 
2205 static struct intel_uncore_ops knl_uncore_cha_ops = {
2206 	.init_box		= snbep_uncore_msr_init_box,
2207 	.disable_box		= snbep_uncore_msr_disable_box,
2208 	.enable_box		= snbep_uncore_msr_enable_box,
2209 	.disable_event		= snbep_uncore_msr_disable_event,
2210 	.enable_event		= hswep_cbox_enable_event,
2211 	.read_counter		= uncore_msr_read_counter,
2212 	.hw_config		= knl_cha_hw_config,
2213 	.get_constraint		= knl_cha_get_constraint,
2214 	.put_constraint		= snbep_cbox_put_constraint,
2215 };
2216 
2217 static struct intel_uncore_type knl_uncore_cha = {
2218 	.name			= "cha",
2219 	.num_counters		= 4,
2220 	.num_boxes		= 38,
2221 	.perf_ctr_bits		= 48,
2222 	.event_ctl		= HSWEP_C0_MSR_PMON_CTL0,
2223 	.perf_ctr		= HSWEP_C0_MSR_PMON_CTR0,
2224 	.event_mask		= KNL_CHA_MSR_PMON_RAW_EVENT_MASK,
2225 	.box_ctl		= HSWEP_C0_MSR_PMON_BOX_CTL,
2226 	.msr_offset		= KNL_CHA_MSR_OFFSET,
2227 	.num_shared_regs	= 1,
2228 	.constraints		= knl_uncore_cha_constraints,
2229 	.ops			= &knl_uncore_cha_ops,
2230 	.format_group		= &knl_uncore_cha_format_group,
2231 };
2232 
2233 static struct attribute *knl_uncore_pcu_formats_attr[] = {
2234 	&format_attr_event2.attr,
2235 	&format_attr_use_occ_ctr.attr,
2236 	&format_attr_occ_sel.attr,
2237 	&format_attr_edge.attr,
2238 	&format_attr_tid_en.attr,
2239 	&format_attr_inv.attr,
2240 	&format_attr_thresh6.attr,
2241 	&format_attr_occ_invert.attr,
2242 	&format_attr_occ_edge_det.attr,
2243 	NULL,
2244 };
2245 
2246 static const struct attribute_group knl_uncore_pcu_format_group = {
2247 	.name = "format",
2248 	.attrs = knl_uncore_pcu_formats_attr,
2249 };
2250 
2251 static struct intel_uncore_type knl_uncore_pcu = {
2252 	.name			= "pcu",
2253 	.num_counters		= 4,
2254 	.num_boxes		= 1,
2255 	.perf_ctr_bits		= 48,
2256 	.perf_ctr		= HSWEP_PCU_MSR_PMON_CTR0,
2257 	.event_ctl		= HSWEP_PCU_MSR_PMON_CTL0,
2258 	.event_mask		= KNL_PCU_MSR_PMON_RAW_EVENT_MASK,
2259 	.box_ctl		= HSWEP_PCU_MSR_PMON_BOX_CTL,
2260 	.ops			= &snbep_uncore_msr_ops,
2261 	.format_group		= &knl_uncore_pcu_format_group,
2262 };
2263 
2264 static struct intel_uncore_type *knl_msr_uncores[] = {
2265 	&knl_uncore_ubox,
2266 	&knl_uncore_cha,
2267 	&knl_uncore_pcu,
2268 	NULL,
2269 };
2270 
knl_uncore_cpu_init(void)2271 void knl_uncore_cpu_init(void)
2272 {
2273 	uncore_msr_uncores = knl_msr_uncores;
2274 }
2275 
knl_uncore_imc_enable_box(struct intel_uncore_box * box)2276 static void knl_uncore_imc_enable_box(struct intel_uncore_box *box)
2277 {
2278 	struct pci_dev *pdev = box->pci_dev;
2279 	int box_ctl = uncore_pci_box_ctl(box);
2280 
2281 	pci_write_config_dword(pdev, box_ctl, 0);
2282 }
2283 
knl_uncore_imc_enable_event(struct intel_uncore_box * box,struct perf_event * event)2284 static void knl_uncore_imc_enable_event(struct intel_uncore_box *box,
2285 					struct perf_event *event)
2286 {
2287 	struct pci_dev *pdev = box->pci_dev;
2288 	struct hw_perf_event *hwc = &event->hw;
2289 
2290 	if ((event->attr.config & SNBEP_PMON_CTL_EV_SEL_MASK)
2291 							== UNCORE_FIXED_EVENT)
2292 		pci_write_config_dword(pdev, hwc->config_base,
2293 				       hwc->config | KNL_PMON_FIXED_CTL_EN);
2294 	else
2295 		pci_write_config_dword(pdev, hwc->config_base,
2296 				       hwc->config | SNBEP_PMON_CTL_EN);
2297 }
2298 
2299 static struct intel_uncore_ops knl_uncore_imc_ops = {
2300 	.init_box	= snbep_uncore_pci_init_box,
2301 	.disable_box	= snbep_uncore_pci_disable_box,
2302 	.enable_box	= knl_uncore_imc_enable_box,
2303 	.read_counter	= snbep_uncore_pci_read_counter,
2304 	.enable_event	= knl_uncore_imc_enable_event,
2305 	.disable_event	= snbep_uncore_pci_disable_event,
2306 };
2307 
2308 static struct intel_uncore_type knl_uncore_imc_uclk = {
2309 	.name			= "imc_uclk",
2310 	.num_counters		= 4,
2311 	.num_boxes		= 2,
2312 	.perf_ctr_bits		= 48,
2313 	.fixed_ctr_bits		= 48,
2314 	.perf_ctr		= KNL_UCLK_MSR_PMON_CTR0_LOW,
2315 	.event_ctl		= KNL_UCLK_MSR_PMON_CTL0,
2316 	.event_mask		= SNBEP_PMON_RAW_EVENT_MASK,
2317 	.fixed_ctr		= KNL_UCLK_MSR_PMON_UCLK_FIXED_LOW,
2318 	.fixed_ctl		= KNL_UCLK_MSR_PMON_UCLK_FIXED_CTL,
2319 	.box_ctl		= KNL_UCLK_MSR_PMON_BOX_CTL,
2320 	.ops			= &knl_uncore_imc_ops,
2321 	.format_group		= &snbep_uncore_format_group,
2322 };
2323 
2324 static struct intel_uncore_type knl_uncore_imc_dclk = {
2325 	.name			= "imc",
2326 	.num_counters		= 4,
2327 	.num_boxes		= 6,
2328 	.perf_ctr_bits		= 48,
2329 	.fixed_ctr_bits		= 48,
2330 	.perf_ctr		= KNL_MC0_CH0_MSR_PMON_CTR0_LOW,
2331 	.event_ctl		= KNL_MC0_CH0_MSR_PMON_CTL0,
2332 	.event_mask		= SNBEP_PMON_RAW_EVENT_MASK,
2333 	.fixed_ctr		= KNL_MC0_CH0_MSR_PMON_FIXED_LOW,
2334 	.fixed_ctl		= KNL_MC0_CH0_MSR_PMON_FIXED_CTL,
2335 	.box_ctl		= KNL_MC0_CH0_MSR_PMON_BOX_CTL,
2336 	.ops			= &knl_uncore_imc_ops,
2337 	.format_group		= &snbep_uncore_format_group,
2338 };
2339 
2340 static struct intel_uncore_type knl_uncore_edc_uclk = {
2341 	.name			= "edc_uclk",
2342 	.num_counters		= 4,
2343 	.num_boxes		= 8,
2344 	.perf_ctr_bits		= 48,
2345 	.fixed_ctr_bits		= 48,
2346 	.perf_ctr		= KNL_UCLK_MSR_PMON_CTR0_LOW,
2347 	.event_ctl		= KNL_UCLK_MSR_PMON_CTL0,
2348 	.event_mask		= SNBEP_PMON_RAW_EVENT_MASK,
2349 	.fixed_ctr		= KNL_UCLK_MSR_PMON_UCLK_FIXED_LOW,
2350 	.fixed_ctl		= KNL_UCLK_MSR_PMON_UCLK_FIXED_CTL,
2351 	.box_ctl		= KNL_UCLK_MSR_PMON_BOX_CTL,
2352 	.ops			= &knl_uncore_imc_ops,
2353 	.format_group		= &snbep_uncore_format_group,
2354 };
2355 
2356 static struct intel_uncore_type knl_uncore_edc_eclk = {
2357 	.name			= "edc_eclk",
2358 	.num_counters		= 4,
2359 	.num_boxes		= 8,
2360 	.perf_ctr_bits		= 48,
2361 	.fixed_ctr_bits		= 48,
2362 	.perf_ctr		= KNL_EDC0_ECLK_MSR_PMON_CTR0_LOW,
2363 	.event_ctl		= KNL_EDC0_ECLK_MSR_PMON_CTL0,
2364 	.event_mask		= SNBEP_PMON_RAW_EVENT_MASK,
2365 	.fixed_ctr		= KNL_EDC0_ECLK_MSR_PMON_ECLK_FIXED_LOW,
2366 	.fixed_ctl		= KNL_EDC0_ECLK_MSR_PMON_ECLK_FIXED_CTL,
2367 	.box_ctl		= KNL_EDC0_ECLK_MSR_PMON_BOX_CTL,
2368 	.ops			= &knl_uncore_imc_ops,
2369 	.format_group		= &snbep_uncore_format_group,
2370 };
2371 
2372 static struct event_constraint knl_uncore_m2pcie_constraints[] = {
2373 	UNCORE_EVENT_CONSTRAINT(0x23, 0x3),
2374 	EVENT_CONSTRAINT_END
2375 };
2376 
2377 static struct intel_uncore_type knl_uncore_m2pcie = {
2378 	.name		= "m2pcie",
2379 	.num_counters   = 4,
2380 	.num_boxes	= 1,
2381 	.perf_ctr_bits	= 48,
2382 	.constraints	= knl_uncore_m2pcie_constraints,
2383 	SNBEP_UNCORE_PCI_COMMON_INIT(),
2384 };
2385 
2386 static struct attribute *knl_uncore_irp_formats_attr[] = {
2387 	&format_attr_event.attr,
2388 	&format_attr_umask.attr,
2389 	&format_attr_qor.attr,
2390 	&format_attr_edge.attr,
2391 	&format_attr_inv.attr,
2392 	&format_attr_thresh8.attr,
2393 	NULL,
2394 };
2395 
2396 static const struct attribute_group knl_uncore_irp_format_group = {
2397 	.name = "format",
2398 	.attrs = knl_uncore_irp_formats_attr,
2399 };
2400 
2401 static struct intel_uncore_type knl_uncore_irp = {
2402 	.name			= "irp",
2403 	.num_counters		= 2,
2404 	.num_boxes		= 1,
2405 	.perf_ctr_bits		= 48,
2406 	.perf_ctr		= SNBEP_PCI_PMON_CTR0,
2407 	.event_ctl		= SNBEP_PCI_PMON_CTL0,
2408 	.event_mask		= KNL_IRP_PCI_PMON_RAW_EVENT_MASK,
2409 	.box_ctl		= KNL_IRP_PCI_PMON_BOX_CTL,
2410 	.ops			= &snbep_uncore_pci_ops,
2411 	.format_group		= &knl_uncore_irp_format_group,
2412 };
2413 
2414 enum {
2415 	KNL_PCI_UNCORE_MC_UCLK,
2416 	KNL_PCI_UNCORE_MC_DCLK,
2417 	KNL_PCI_UNCORE_EDC_UCLK,
2418 	KNL_PCI_UNCORE_EDC_ECLK,
2419 	KNL_PCI_UNCORE_M2PCIE,
2420 	KNL_PCI_UNCORE_IRP,
2421 };
2422 
2423 static struct intel_uncore_type *knl_pci_uncores[] = {
2424 	[KNL_PCI_UNCORE_MC_UCLK]	= &knl_uncore_imc_uclk,
2425 	[KNL_PCI_UNCORE_MC_DCLK]	= &knl_uncore_imc_dclk,
2426 	[KNL_PCI_UNCORE_EDC_UCLK]	= &knl_uncore_edc_uclk,
2427 	[KNL_PCI_UNCORE_EDC_ECLK]	= &knl_uncore_edc_eclk,
2428 	[KNL_PCI_UNCORE_M2PCIE]		= &knl_uncore_m2pcie,
2429 	[KNL_PCI_UNCORE_IRP]		= &knl_uncore_irp,
2430 	NULL,
2431 };
2432 
2433 /*
2434  * KNL uses a common PCI device ID for multiple instances of an Uncore PMU
2435  * device type. prior to KNL, each instance of a PMU device type had a unique
2436  * device ID.
2437  *
2438  *	PCI Device ID	Uncore PMU Devices
2439  *	----------------------------------
2440  *	0x7841		MC0 UClk, MC1 UClk
2441  *	0x7843		MC0 DClk CH 0, MC0 DClk CH 1, MC0 DClk CH 2,
2442  *			MC1 DClk CH 0, MC1 DClk CH 1, MC1 DClk CH 2
2443  *	0x7833		EDC0 UClk, EDC1 UClk, EDC2 UClk, EDC3 UClk,
2444  *			EDC4 UClk, EDC5 UClk, EDC6 UClk, EDC7 UClk
2445  *	0x7835		EDC0 EClk, EDC1 EClk, EDC2 EClk, EDC3 EClk,
2446  *			EDC4 EClk, EDC5 EClk, EDC6 EClk, EDC7 EClk
2447  *	0x7817		M2PCIe
2448  *	0x7814		IRP
2449 */
2450 
2451 static const struct pci_device_id knl_uncore_pci_ids[] = {
2452 	{ /* MC0 UClk */
2453 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7841),
2454 		.driver_data = UNCORE_PCI_DEV_FULL_DATA(10, 0, KNL_PCI_UNCORE_MC_UCLK, 0),
2455 	},
2456 	{ /* MC1 UClk */
2457 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7841),
2458 		.driver_data = UNCORE_PCI_DEV_FULL_DATA(11, 0, KNL_PCI_UNCORE_MC_UCLK, 1),
2459 	},
2460 	{ /* MC0 DClk CH 0 */
2461 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7843),
2462 		.driver_data = UNCORE_PCI_DEV_FULL_DATA(8, 2, KNL_PCI_UNCORE_MC_DCLK, 0),
2463 	},
2464 	{ /* MC0 DClk CH 1 */
2465 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7843),
2466 		.driver_data = UNCORE_PCI_DEV_FULL_DATA(8, 3, KNL_PCI_UNCORE_MC_DCLK, 1),
2467 	},
2468 	{ /* MC0 DClk CH 2 */
2469 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7843),
2470 		.driver_data = UNCORE_PCI_DEV_FULL_DATA(8, 4, KNL_PCI_UNCORE_MC_DCLK, 2),
2471 	},
2472 	{ /* MC1 DClk CH 0 */
2473 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7843),
2474 		.driver_data = UNCORE_PCI_DEV_FULL_DATA(9, 2, KNL_PCI_UNCORE_MC_DCLK, 3),
2475 	},
2476 	{ /* MC1 DClk CH 1 */
2477 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7843),
2478 		.driver_data = UNCORE_PCI_DEV_FULL_DATA(9, 3, KNL_PCI_UNCORE_MC_DCLK, 4),
2479 	},
2480 	{ /* MC1 DClk CH 2 */
2481 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7843),
2482 		.driver_data = UNCORE_PCI_DEV_FULL_DATA(9, 4, KNL_PCI_UNCORE_MC_DCLK, 5),
2483 	},
2484 	{ /* EDC0 UClk */
2485 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7833),
2486 		.driver_data = UNCORE_PCI_DEV_FULL_DATA(15, 0, KNL_PCI_UNCORE_EDC_UCLK, 0),
2487 	},
2488 	{ /* EDC1 UClk */
2489 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7833),
2490 		.driver_data = UNCORE_PCI_DEV_FULL_DATA(16, 0, KNL_PCI_UNCORE_EDC_UCLK, 1),
2491 	},
2492 	{ /* EDC2 UClk */
2493 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7833),
2494 		.driver_data = UNCORE_PCI_DEV_FULL_DATA(17, 0, KNL_PCI_UNCORE_EDC_UCLK, 2),
2495 	},
2496 	{ /* EDC3 UClk */
2497 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7833),
2498 		.driver_data = UNCORE_PCI_DEV_FULL_DATA(18, 0, KNL_PCI_UNCORE_EDC_UCLK, 3),
2499 	},
2500 	{ /* EDC4 UClk */
2501 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7833),
2502 		.driver_data = UNCORE_PCI_DEV_FULL_DATA(19, 0, KNL_PCI_UNCORE_EDC_UCLK, 4),
2503 	},
2504 	{ /* EDC5 UClk */
2505 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7833),
2506 		.driver_data = UNCORE_PCI_DEV_FULL_DATA(20, 0, KNL_PCI_UNCORE_EDC_UCLK, 5),
2507 	},
2508 	{ /* EDC6 UClk */
2509 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7833),
2510 		.driver_data = UNCORE_PCI_DEV_FULL_DATA(21, 0, KNL_PCI_UNCORE_EDC_UCLK, 6),
2511 	},
2512 	{ /* EDC7 UClk */
2513 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7833),
2514 		.driver_data = UNCORE_PCI_DEV_FULL_DATA(22, 0, KNL_PCI_UNCORE_EDC_UCLK, 7),
2515 	},
2516 	{ /* EDC0 EClk */
2517 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7835),
2518 		.driver_data = UNCORE_PCI_DEV_FULL_DATA(24, 2, KNL_PCI_UNCORE_EDC_ECLK, 0),
2519 	},
2520 	{ /* EDC1 EClk */
2521 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7835),
2522 		.driver_data = UNCORE_PCI_DEV_FULL_DATA(25, 2, KNL_PCI_UNCORE_EDC_ECLK, 1),
2523 	},
2524 	{ /* EDC2 EClk */
2525 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7835),
2526 		.driver_data = UNCORE_PCI_DEV_FULL_DATA(26, 2, KNL_PCI_UNCORE_EDC_ECLK, 2),
2527 	},
2528 	{ /* EDC3 EClk */
2529 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7835),
2530 		.driver_data = UNCORE_PCI_DEV_FULL_DATA(27, 2, KNL_PCI_UNCORE_EDC_ECLK, 3),
2531 	},
2532 	{ /* EDC4 EClk */
2533 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7835),
2534 		.driver_data = UNCORE_PCI_DEV_FULL_DATA(28, 2, KNL_PCI_UNCORE_EDC_ECLK, 4),
2535 	},
2536 	{ /* EDC5 EClk */
2537 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7835),
2538 		.driver_data = UNCORE_PCI_DEV_FULL_DATA(29, 2, KNL_PCI_UNCORE_EDC_ECLK, 5),
2539 	},
2540 	{ /* EDC6 EClk */
2541 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7835),
2542 		.driver_data = UNCORE_PCI_DEV_FULL_DATA(30, 2, KNL_PCI_UNCORE_EDC_ECLK, 6),
2543 	},
2544 	{ /* EDC7 EClk */
2545 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7835),
2546 		.driver_data = UNCORE_PCI_DEV_FULL_DATA(31, 2, KNL_PCI_UNCORE_EDC_ECLK, 7),
2547 	},
2548 	{ /* M2PCIe */
2549 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7817),
2550 		.driver_data = UNCORE_PCI_DEV_DATA(KNL_PCI_UNCORE_M2PCIE, 0),
2551 	},
2552 	{ /* IRP */
2553 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7814),
2554 		.driver_data = UNCORE_PCI_DEV_DATA(KNL_PCI_UNCORE_IRP, 0),
2555 	},
2556 	{ /* end: all zeroes */ }
2557 };
2558 
2559 static struct pci_driver knl_uncore_pci_driver = {
2560 	.name		= "knl_uncore",
2561 	.id_table	= knl_uncore_pci_ids,
2562 };
2563 
knl_uncore_pci_init(void)2564 int knl_uncore_pci_init(void)
2565 {
2566 	int ret;
2567 
2568 	/* All KNL PCI based PMON units are on the same PCI bus except IRP */
2569 	ret = snb_pci2phy_map_init(0x7814); /* IRP */
2570 	if (ret)
2571 		return ret;
2572 	ret = snb_pci2phy_map_init(0x7817); /* M2PCIe */
2573 	if (ret)
2574 		return ret;
2575 	uncore_pci_uncores = knl_pci_uncores;
2576 	uncore_pci_driver = &knl_uncore_pci_driver;
2577 	return 0;
2578 }
2579 
2580 /* end of KNL uncore support */
2581 
2582 /* Haswell-EP uncore support */
2583 static struct attribute *hswep_uncore_ubox_formats_attr[] = {
2584 	&format_attr_event.attr,
2585 	&format_attr_umask.attr,
2586 	&format_attr_edge.attr,
2587 	&format_attr_inv.attr,
2588 	&format_attr_thresh5.attr,
2589 	&format_attr_filter_tid2.attr,
2590 	&format_attr_filter_cid.attr,
2591 	NULL,
2592 };
2593 
2594 static const struct attribute_group hswep_uncore_ubox_format_group = {
2595 	.name = "format",
2596 	.attrs = hswep_uncore_ubox_formats_attr,
2597 };
2598 
hswep_ubox_hw_config(struct intel_uncore_box * box,struct perf_event * event)2599 static int hswep_ubox_hw_config(struct intel_uncore_box *box, struct perf_event *event)
2600 {
2601 	struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
2602 	reg1->reg = HSWEP_U_MSR_PMON_FILTER;
2603 	reg1->config = event->attr.config1 & HSWEP_U_MSR_PMON_BOX_FILTER_MASK;
2604 	reg1->idx = 0;
2605 	return 0;
2606 }
2607 
2608 static struct intel_uncore_ops hswep_uncore_ubox_ops = {
2609 	SNBEP_UNCORE_MSR_OPS_COMMON_INIT(),
2610 	.hw_config		= hswep_ubox_hw_config,
2611 	.get_constraint		= uncore_get_constraint,
2612 	.put_constraint		= uncore_put_constraint,
2613 };
2614 
2615 static struct intel_uncore_type hswep_uncore_ubox = {
2616 	.name			= "ubox",
2617 	.num_counters		= 2,
2618 	.num_boxes		= 1,
2619 	.perf_ctr_bits		= 44,
2620 	.fixed_ctr_bits		= 48,
2621 	.perf_ctr		= HSWEP_U_MSR_PMON_CTR0,
2622 	.event_ctl		= HSWEP_U_MSR_PMON_CTL0,
2623 	.event_mask		= SNBEP_U_MSR_PMON_RAW_EVENT_MASK,
2624 	.fixed_ctr		= HSWEP_U_MSR_PMON_UCLK_FIXED_CTR,
2625 	.fixed_ctl		= HSWEP_U_MSR_PMON_UCLK_FIXED_CTL,
2626 	.num_shared_regs	= 1,
2627 	.ops			= &hswep_uncore_ubox_ops,
2628 	.format_group		= &hswep_uncore_ubox_format_group,
2629 };
2630 
2631 static struct attribute *hswep_uncore_cbox_formats_attr[] = {
2632 	&format_attr_event.attr,
2633 	&format_attr_umask.attr,
2634 	&format_attr_edge.attr,
2635 	&format_attr_tid_en.attr,
2636 	&format_attr_thresh8.attr,
2637 	&format_attr_filter_tid3.attr,
2638 	&format_attr_filter_link2.attr,
2639 	&format_attr_filter_state3.attr,
2640 	&format_attr_filter_nid2.attr,
2641 	&format_attr_filter_opc2.attr,
2642 	&format_attr_filter_nc.attr,
2643 	&format_attr_filter_c6.attr,
2644 	&format_attr_filter_isoc.attr,
2645 	NULL,
2646 };
2647 
2648 static const struct attribute_group hswep_uncore_cbox_format_group = {
2649 	.name = "format",
2650 	.attrs = hswep_uncore_cbox_formats_attr,
2651 };
2652 
2653 static struct event_constraint hswep_uncore_cbox_constraints[] = {
2654 	UNCORE_EVENT_CONSTRAINT(0x01, 0x1),
2655 	UNCORE_EVENT_CONSTRAINT(0x09, 0x1),
2656 	UNCORE_EVENT_CONSTRAINT(0x11, 0x1),
2657 	UNCORE_EVENT_CONSTRAINT(0x36, 0x1),
2658 	UNCORE_EVENT_CONSTRAINT(0x38, 0x3),
2659 	UNCORE_EVENT_CONSTRAINT(0x3b, 0x1),
2660 	UNCORE_EVENT_CONSTRAINT(0x3e, 0x1),
2661 	EVENT_CONSTRAINT_END
2662 };
2663 
2664 static struct extra_reg hswep_uncore_cbox_extra_regs[] = {
2665 	SNBEP_CBO_EVENT_EXTRA_REG(SNBEP_CBO_PMON_CTL_TID_EN,
2666 				  SNBEP_CBO_PMON_CTL_TID_EN, 0x1),
2667 	SNBEP_CBO_EVENT_EXTRA_REG(0x0334, 0xffff, 0x4),
2668 	SNBEP_CBO_EVENT_EXTRA_REG(0x0534, 0xffff, 0x4),
2669 	SNBEP_CBO_EVENT_EXTRA_REG(0x0934, 0xffff, 0x4),
2670 	SNBEP_CBO_EVENT_EXTRA_REG(0x1134, 0xffff, 0x4),
2671 	SNBEP_CBO_EVENT_EXTRA_REG(0x2134, 0xffff, 0x4),
2672 	SNBEP_CBO_EVENT_EXTRA_REG(0x4134, 0xffff, 0x4),
2673 	SNBEP_CBO_EVENT_EXTRA_REG(0x4037, 0x40ff, 0x8),
2674 	SNBEP_CBO_EVENT_EXTRA_REG(0x4028, 0x40ff, 0x8),
2675 	SNBEP_CBO_EVENT_EXTRA_REG(0x4032, 0x40ff, 0x8),
2676 	SNBEP_CBO_EVENT_EXTRA_REG(0x4029, 0x40ff, 0x8),
2677 	SNBEP_CBO_EVENT_EXTRA_REG(0x4033, 0x40ff, 0x8),
2678 	SNBEP_CBO_EVENT_EXTRA_REG(0x402A, 0x40ff, 0x8),
2679 	SNBEP_CBO_EVENT_EXTRA_REG(0x0135, 0xffff, 0x12),
2680 	SNBEP_CBO_EVENT_EXTRA_REG(0x0335, 0xffff, 0x10),
2681 	SNBEP_CBO_EVENT_EXTRA_REG(0x4135, 0xffff, 0x18),
2682 	SNBEP_CBO_EVENT_EXTRA_REG(0x4435, 0xffff, 0x8),
2683 	SNBEP_CBO_EVENT_EXTRA_REG(0x4835, 0xffff, 0x8),
2684 	SNBEP_CBO_EVENT_EXTRA_REG(0x5035, 0xffff, 0x8),
2685 	SNBEP_CBO_EVENT_EXTRA_REG(0x4335, 0xffff, 0x18),
2686 	SNBEP_CBO_EVENT_EXTRA_REG(0x4a35, 0xffff, 0x8),
2687 	SNBEP_CBO_EVENT_EXTRA_REG(0x2335, 0xffff, 0x10),
2688 	SNBEP_CBO_EVENT_EXTRA_REG(0x8335, 0xffff, 0x10),
2689 	SNBEP_CBO_EVENT_EXTRA_REG(0x2135, 0xffff, 0x10),
2690 	SNBEP_CBO_EVENT_EXTRA_REG(0x8135, 0xffff, 0x10),
2691 	SNBEP_CBO_EVENT_EXTRA_REG(0x0136, 0xffff, 0x10),
2692 	SNBEP_CBO_EVENT_EXTRA_REG(0x0336, 0xffff, 0x10),
2693 	SNBEP_CBO_EVENT_EXTRA_REG(0x4136, 0xffff, 0x18),
2694 	SNBEP_CBO_EVENT_EXTRA_REG(0x4436, 0xffff, 0x8),
2695 	SNBEP_CBO_EVENT_EXTRA_REG(0x4836, 0xffff, 0x8),
2696 	SNBEP_CBO_EVENT_EXTRA_REG(0x4336, 0xffff, 0x18),
2697 	SNBEP_CBO_EVENT_EXTRA_REG(0x4a36, 0xffff, 0x8),
2698 	SNBEP_CBO_EVENT_EXTRA_REG(0x2336, 0xffff, 0x10),
2699 	SNBEP_CBO_EVENT_EXTRA_REG(0x8336, 0xffff, 0x10),
2700 	SNBEP_CBO_EVENT_EXTRA_REG(0x2136, 0xffff, 0x10),
2701 	SNBEP_CBO_EVENT_EXTRA_REG(0x8136, 0xffff, 0x10),
2702 	SNBEP_CBO_EVENT_EXTRA_REG(0x5036, 0xffff, 0x8),
2703 	EVENT_EXTRA_END
2704 };
2705 
hswep_cbox_filter_mask(int fields)2706 static u64 hswep_cbox_filter_mask(int fields)
2707 {
2708 	u64 mask = 0;
2709 	if (fields & 0x1)
2710 		mask |= HSWEP_CB0_MSR_PMON_BOX_FILTER_TID;
2711 	if (fields & 0x2)
2712 		mask |= HSWEP_CB0_MSR_PMON_BOX_FILTER_LINK;
2713 	if (fields & 0x4)
2714 		mask |= HSWEP_CB0_MSR_PMON_BOX_FILTER_STATE;
2715 	if (fields & 0x8)
2716 		mask |= HSWEP_CB0_MSR_PMON_BOX_FILTER_NID;
2717 	if (fields & 0x10) {
2718 		mask |= HSWEP_CB0_MSR_PMON_BOX_FILTER_OPC;
2719 		mask |= HSWEP_CB0_MSR_PMON_BOX_FILTER_NC;
2720 		mask |= HSWEP_CB0_MSR_PMON_BOX_FILTER_C6;
2721 		mask |= HSWEP_CB0_MSR_PMON_BOX_FILTER_ISOC;
2722 	}
2723 	return mask;
2724 }
2725 
2726 static struct event_constraint *
hswep_cbox_get_constraint(struct intel_uncore_box * box,struct perf_event * event)2727 hswep_cbox_get_constraint(struct intel_uncore_box *box, struct perf_event *event)
2728 {
2729 	return __snbep_cbox_get_constraint(box, event, hswep_cbox_filter_mask);
2730 }
2731 
hswep_cbox_hw_config(struct intel_uncore_box * box,struct perf_event * event)2732 static int hswep_cbox_hw_config(struct intel_uncore_box *box, struct perf_event *event)
2733 {
2734 	struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
2735 	struct extra_reg *er;
2736 	int idx = 0;
2737 
2738 	for (er = hswep_uncore_cbox_extra_regs; er->msr; er++) {
2739 		if (er->event != (event->hw.config & er->config_mask))
2740 			continue;
2741 		idx |= er->idx;
2742 	}
2743 
2744 	if (idx) {
2745 		reg1->reg = HSWEP_C0_MSR_PMON_BOX_FILTER0 +
2746 			    HSWEP_CBO_MSR_OFFSET * box->pmu->pmu_idx;
2747 		reg1->config = event->attr.config1 & hswep_cbox_filter_mask(idx);
2748 		reg1->idx = idx;
2749 	}
2750 	return 0;
2751 }
2752 
hswep_cbox_enable_event(struct intel_uncore_box * box,struct perf_event * event)2753 static void hswep_cbox_enable_event(struct intel_uncore_box *box,
2754 				  struct perf_event *event)
2755 {
2756 	struct hw_perf_event *hwc = &event->hw;
2757 	struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
2758 
2759 	if (reg1->idx != EXTRA_REG_NONE) {
2760 		u64 filter = uncore_shared_reg_config(box, 0);
2761 		wrmsrl(reg1->reg, filter & 0xffffffff);
2762 		wrmsrl(reg1->reg + 1, filter >> 32);
2763 	}
2764 
2765 	wrmsrl(hwc->config_base, hwc->config | SNBEP_PMON_CTL_EN);
2766 }
2767 
2768 static struct intel_uncore_ops hswep_uncore_cbox_ops = {
2769 	.init_box		= snbep_uncore_msr_init_box,
2770 	.disable_box		= snbep_uncore_msr_disable_box,
2771 	.enable_box		= snbep_uncore_msr_enable_box,
2772 	.disable_event		= snbep_uncore_msr_disable_event,
2773 	.enable_event		= hswep_cbox_enable_event,
2774 	.read_counter		= uncore_msr_read_counter,
2775 	.hw_config		= hswep_cbox_hw_config,
2776 	.get_constraint		= hswep_cbox_get_constraint,
2777 	.put_constraint		= snbep_cbox_put_constraint,
2778 };
2779 
2780 static struct intel_uncore_type hswep_uncore_cbox = {
2781 	.name			= "cbox",
2782 	.num_counters		= 4,
2783 	.num_boxes		= 18,
2784 	.perf_ctr_bits		= 48,
2785 	.event_ctl		= HSWEP_C0_MSR_PMON_CTL0,
2786 	.perf_ctr		= HSWEP_C0_MSR_PMON_CTR0,
2787 	.event_mask		= SNBEP_CBO_MSR_PMON_RAW_EVENT_MASK,
2788 	.box_ctl		= HSWEP_C0_MSR_PMON_BOX_CTL,
2789 	.msr_offset		= HSWEP_CBO_MSR_OFFSET,
2790 	.num_shared_regs	= 1,
2791 	.constraints		= hswep_uncore_cbox_constraints,
2792 	.ops			= &hswep_uncore_cbox_ops,
2793 	.format_group		= &hswep_uncore_cbox_format_group,
2794 };
2795 
2796 /*
2797  * Write SBOX Initialization register bit by bit to avoid spurious #GPs
2798  */
hswep_uncore_sbox_msr_init_box(struct intel_uncore_box * box)2799 static void hswep_uncore_sbox_msr_init_box(struct intel_uncore_box *box)
2800 {
2801 	unsigned msr = uncore_msr_box_ctl(box);
2802 
2803 	if (msr) {
2804 		u64 init = SNBEP_PMON_BOX_CTL_INT;
2805 		u64 flags = 0;
2806 		int i;
2807 
2808 		for_each_set_bit(i, (unsigned long *)&init, 64) {
2809 			flags |= (1ULL << i);
2810 			wrmsrl(msr, flags);
2811 		}
2812 	}
2813 }
2814 
2815 static struct intel_uncore_ops hswep_uncore_sbox_msr_ops = {
2816 	__SNBEP_UNCORE_MSR_OPS_COMMON_INIT(),
2817 	.init_box		= hswep_uncore_sbox_msr_init_box
2818 };
2819 
2820 static struct attribute *hswep_uncore_sbox_formats_attr[] = {
2821 	&format_attr_event.attr,
2822 	&format_attr_umask.attr,
2823 	&format_attr_edge.attr,
2824 	&format_attr_tid_en.attr,
2825 	&format_attr_inv.attr,
2826 	&format_attr_thresh8.attr,
2827 	NULL,
2828 };
2829 
2830 static const struct attribute_group hswep_uncore_sbox_format_group = {
2831 	.name = "format",
2832 	.attrs = hswep_uncore_sbox_formats_attr,
2833 };
2834 
2835 static struct intel_uncore_type hswep_uncore_sbox = {
2836 	.name			= "sbox",
2837 	.num_counters		= 4,
2838 	.num_boxes		= 4,
2839 	.perf_ctr_bits		= 44,
2840 	.event_ctl		= HSWEP_S0_MSR_PMON_CTL0,
2841 	.perf_ctr		= HSWEP_S0_MSR_PMON_CTR0,
2842 	.event_mask		= HSWEP_S_MSR_PMON_RAW_EVENT_MASK,
2843 	.box_ctl		= HSWEP_S0_MSR_PMON_BOX_CTL,
2844 	.msr_offset		= HSWEP_SBOX_MSR_OFFSET,
2845 	.ops			= &hswep_uncore_sbox_msr_ops,
2846 	.format_group		= &hswep_uncore_sbox_format_group,
2847 };
2848 
hswep_pcu_hw_config(struct intel_uncore_box * box,struct perf_event * event)2849 static int hswep_pcu_hw_config(struct intel_uncore_box *box, struct perf_event *event)
2850 {
2851 	struct hw_perf_event *hwc = &event->hw;
2852 	struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
2853 	int ev_sel = hwc->config & SNBEP_PMON_CTL_EV_SEL_MASK;
2854 
2855 	if (ev_sel >= 0xb && ev_sel <= 0xe) {
2856 		reg1->reg = HSWEP_PCU_MSR_PMON_BOX_FILTER;
2857 		reg1->idx = ev_sel - 0xb;
2858 		reg1->config = event->attr.config1 & (0xff << reg1->idx);
2859 	}
2860 	return 0;
2861 }
2862 
2863 static struct intel_uncore_ops hswep_uncore_pcu_ops = {
2864 	SNBEP_UNCORE_MSR_OPS_COMMON_INIT(),
2865 	.hw_config		= hswep_pcu_hw_config,
2866 	.get_constraint		= snbep_pcu_get_constraint,
2867 	.put_constraint		= snbep_pcu_put_constraint,
2868 };
2869 
2870 static struct intel_uncore_type hswep_uncore_pcu = {
2871 	.name			= "pcu",
2872 	.num_counters		= 4,
2873 	.num_boxes		= 1,
2874 	.perf_ctr_bits		= 48,
2875 	.perf_ctr		= HSWEP_PCU_MSR_PMON_CTR0,
2876 	.event_ctl		= HSWEP_PCU_MSR_PMON_CTL0,
2877 	.event_mask		= SNBEP_PCU_MSR_PMON_RAW_EVENT_MASK,
2878 	.box_ctl		= HSWEP_PCU_MSR_PMON_BOX_CTL,
2879 	.num_shared_regs	= 1,
2880 	.ops			= &hswep_uncore_pcu_ops,
2881 	.format_group		= &snbep_uncore_pcu_format_group,
2882 };
2883 
2884 static struct intel_uncore_type *hswep_msr_uncores[] = {
2885 	&hswep_uncore_ubox,
2886 	&hswep_uncore_cbox,
2887 	&hswep_uncore_sbox,
2888 	&hswep_uncore_pcu,
2889 	NULL,
2890 };
2891 
2892 #define HSWEP_PCU_DID			0x2fc0
2893 #define HSWEP_PCU_CAPID4_OFFET		0x94
2894 #define hswep_get_chop(_cap)		(((_cap) >> 6) & 0x3)
2895 
hswep_has_limit_sbox(unsigned int device)2896 static bool hswep_has_limit_sbox(unsigned int device)
2897 {
2898 	struct pci_dev *dev = pci_get_device(PCI_VENDOR_ID_INTEL, device, NULL);
2899 	u32 capid4;
2900 
2901 	if (!dev)
2902 		return false;
2903 
2904 	pci_read_config_dword(dev, HSWEP_PCU_CAPID4_OFFET, &capid4);
2905 	pci_dev_put(dev);
2906 	if (!hswep_get_chop(capid4))
2907 		return true;
2908 
2909 	return false;
2910 }
2911 
hswep_uncore_cpu_init(void)2912 void hswep_uncore_cpu_init(void)
2913 {
2914 	if (hswep_uncore_cbox.num_boxes > boot_cpu_data.x86_max_cores)
2915 		hswep_uncore_cbox.num_boxes = boot_cpu_data.x86_max_cores;
2916 
2917 	/* Detect 6-8 core systems with only two SBOXes */
2918 	if (hswep_has_limit_sbox(HSWEP_PCU_DID))
2919 		hswep_uncore_sbox.num_boxes = 2;
2920 
2921 	uncore_msr_uncores = hswep_msr_uncores;
2922 }
2923 
2924 static struct intel_uncore_type hswep_uncore_ha = {
2925 	.name		= "ha",
2926 	.num_counters   = 4,
2927 	.num_boxes	= 2,
2928 	.perf_ctr_bits	= 48,
2929 	SNBEP_UNCORE_PCI_COMMON_INIT(),
2930 };
2931 
2932 static struct uncore_event_desc hswep_uncore_imc_events[] = {
2933 	INTEL_UNCORE_EVENT_DESC(clockticks,      "event=0x00,umask=0x00"),
2934 	INTEL_UNCORE_EVENT_DESC(cas_count_read,  "event=0x04,umask=0x03"),
2935 	INTEL_UNCORE_EVENT_DESC(cas_count_read.scale, "6.103515625e-5"),
2936 	INTEL_UNCORE_EVENT_DESC(cas_count_read.unit, "MiB"),
2937 	INTEL_UNCORE_EVENT_DESC(cas_count_write, "event=0x04,umask=0x0c"),
2938 	INTEL_UNCORE_EVENT_DESC(cas_count_write.scale, "6.103515625e-5"),
2939 	INTEL_UNCORE_EVENT_DESC(cas_count_write.unit, "MiB"),
2940 	{ /* end: all zeroes */ },
2941 };
2942 
2943 static struct intel_uncore_type hswep_uncore_imc = {
2944 	.name		= "imc",
2945 	.num_counters   = 4,
2946 	.num_boxes	= 8,
2947 	.perf_ctr_bits	= 48,
2948 	.fixed_ctr_bits	= 48,
2949 	.fixed_ctr	= SNBEP_MC_CHy_PCI_PMON_FIXED_CTR,
2950 	.fixed_ctl	= SNBEP_MC_CHy_PCI_PMON_FIXED_CTL,
2951 	.event_descs	= hswep_uncore_imc_events,
2952 	SNBEP_UNCORE_PCI_COMMON_INIT(),
2953 };
2954 
2955 static unsigned hswep_uncore_irp_ctrs[] = {0xa0, 0xa8, 0xb0, 0xb8};
2956 
hswep_uncore_irp_read_counter(struct intel_uncore_box * box,struct perf_event * event)2957 static u64 hswep_uncore_irp_read_counter(struct intel_uncore_box *box, struct perf_event *event)
2958 {
2959 	struct pci_dev *pdev = box->pci_dev;
2960 	struct hw_perf_event *hwc = &event->hw;
2961 	u64 count = 0;
2962 
2963 	pci_read_config_dword(pdev, hswep_uncore_irp_ctrs[hwc->idx], (u32 *)&count);
2964 	pci_read_config_dword(pdev, hswep_uncore_irp_ctrs[hwc->idx] + 4, (u32 *)&count + 1);
2965 
2966 	return count;
2967 }
2968 
2969 static struct intel_uncore_ops hswep_uncore_irp_ops = {
2970 	.init_box	= snbep_uncore_pci_init_box,
2971 	.disable_box	= snbep_uncore_pci_disable_box,
2972 	.enable_box	= snbep_uncore_pci_enable_box,
2973 	.disable_event	= ivbep_uncore_irp_disable_event,
2974 	.enable_event	= ivbep_uncore_irp_enable_event,
2975 	.read_counter	= hswep_uncore_irp_read_counter,
2976 };
2977 
2978 static struct intel_uncore_type hswep_uncore_irp = {
2979 	.name			= "irp",
2980 	.num_counters		= 4,
2981 	.num_boxes		= 1,
2982 	.perf_ctr_bits		= 48,
2983 	.event_mask		= SNBEP_PMON_RAW_EVENT_MASK,
2984 	.box_ctl		= SNBEP_PCI_PMON_BOX_CTL,
2985 	.ops			= &hswep_uncore_irp_ops,
2986 	.format_group		= &snbep_uncore_format_group,
2987 };
2988 
2989 static struct intel_uncore_type hswep_uncore_qpi = {
2990 	.name			= "qpi",
2991 	.num_counters		= 4,
2992 	.num_boxes		= 3,
2993 	.perf_ctr_bits		= 48,
2994 	.perf_ctr		= SNBEP_PCI_PMON_CTR0,
2995 	.event_ctl		= SNBEP_PCI_PMON_CTL0,
2996 	.event_mask		= SNBEP_QPI_PCI_PMON_RAW_EVENT_MASK,
2997 	.box_ctl		= SNBEP_PCI_PMON_BOX_CTL,
2998 	.num_shared_regs	= 1,
2999 	.ops			= &snbep_uncore_qpi_ops,
3000 	.format_group		= &snbep_uncore_qpi_format_group,
3001 };
3002 
3003 static struct event_constraint hswep_uncore_r2pcie_constraints[] = {
3004 	UNCORE_EVENT_CONSTRAINT(0x10, 0x3),
3005 	UNCORE_EVENT_CONSTRAINT(0x11, 0x3),
3006 	UNCORE_EVENT_CONSTRAINT(0x13, 0x1),
3007 	UNCORE_EVENT_CONSTRAINT(0x23, 0x1),
3008 	UNCORE_EVENT_CONSTRAINT(0x24, 0x1),
3009 	UNCORE_EVENT_CONSTRAINT(0x25, 0x1),
3010 	UNCORE_EVENT_CONSTRAINT(0x26, 0x3),
3011 	UNCORE_EVENT_CONSTRAINT(0x27, 0x1),
3012 	UNCORE_EVENT_CONSTRAINT(0x28, 0x3),
3013 	UNCORE_EVENT_CONSTRAINT(0x29, 0x3),
3014 	UNCORE_EVENT_CONSTRAINT(0x2a, 0x1),
3015 	UNCORE_EVENT_CONSTRAINT(0x2b, 0x3),
3016 	UNCORE_EVENT_CONSTRAINT(0x2c, 0x3),
3017 	UNCORE_EVENT_CONSTRAINT(0x2d, 0x3),
3018 	UNCORE_EVENT_CONSTRAINT(0x32, 0x3),
3019 	UNCORE_EVENT_CONSTRAINT(0x33, 0x3),
3020 	UNCORE_EVENT_CONSTRAINT(0x34, 0x3),
3021 	UNCORE_EVENT_CONSTRAINT(0x35, 0x3),
3022 	EVENT_CONSTRAINT_END
3023 };
3024 
3025 static struct intel_uncore_type hswep_uncore_r2pcie = {
3026 	.name		= "r2pcie",
3027 	.num_counters   = 4,
3028 	.num_boxes	= 1,
3029 	.perf_ctr_bits	= 48,
3030 	.constraints	= hswep_uncore_r2pcie_constraints,
3031 	SNBEP_UNCORE_PCI_COMMON_INIT(),
3032 };
3033 
3034 static struct event_constraint hswep_uncore_r3qpi_constraints[] = {
3035 	UNCORE_EVENT_CONSTRAINT(0x01, 0x3),
3036 	UNCORE_EVENT_CONSTRAINT(0x07, 0x7),
3037 	UNCORE_EVENT_CONSTRAINT(0x08, 0x7),
3038 	UNCORE_EVENT_CONSTRAINT(0x09, 0x7),
3039 	UNCORE_EVENT_CONSTRAINT(0x0a, 0x7),
3040 	UNCORE_EVENT_CONSTRAINT(0x0e, 0x7),
3041 	UNCORE_EVENT_CONSTRAINT(0x10, 0x3),
3042 	UNCORE_EVENT_CONSTRAINT(0x11, 0x3),
3043 	UNCORE_EVENT_CONSTRAINT(0x12, 0x3),
3044 	UNCORE_EVENT_CONSTRAINT(0x13, 0x1),
3045 	UNCORE_EVENT_CONSTRAINT(0x14, 0x3),
3046 	UNCORE_EVENT_CONSTRAINT(0x15, 0x3),
3047 	UNCORE_EVENT_CONSTRAINT(0x1f, 0x3),
3048 	UNCORE_EVENT_CONSTRAINT(0x20, 0x3),
3049 	UNCORE_EVENT_CONSTRAINT(0x21, 0x3),
3050 	UNCORE_EVENT_CONSTRAINT(0x22, 0x3),
3051 	UNCORE_EVENT_CONSTRAINT(0x23, 0x3),
3052 	UNCORE_EVENT_CONSTRAINT(0x25, 0x3),
3053 	UNCORE_EVENT_CONSTRAINT(0x26, 0x3),
3054 	UNCORE_EVENT_CONSTRAINT(0x28, 0x3),
3055 	UNCORE_EVENT_CONSTRAINT(0x29, 0x3),
3056 	UNCORE_EVENT_CONSTRAINT(0x2c, 0x3),
3057 	UNCORE_EVENT_CONSTRAINT(0x2d, 0x3),
3058 	UNCORE_EVENT_CONSTRAINT(0x2e, 0x3),
3059 	UNCORE_EVENT_CONSTRAINT(0x2f, 0x3),
3060 	UNCORE_EVENT_CONSTRAINT(0x31, 0x3),
3061 	UNCORE_EVENT_CONSTRAINT(0x32, 0x3),
3062 	UNCORE_EVENT_CONSTRAINT(0x33, 0x3),
3063 	UNCORE_EVENT_CONSTRAINT(0x34, 0x3),
3064 	UNCORE_EVENT_CONSTRAINT(0x36, 0x3),
3065 	UNCORE_EVENT_CONSTRAINT(0x37, 0x3),
3066 	UNCORE_EVENT_CONSTRAINT(0x38, 0x3),
3067 	UNCORE_EVENT_CONSTRAINT(0x39, 0x3),
3068 	EVENT_CONSTRAINT_END
3069 };
3070 
3071 static struct intel_uncore_type hswep_uncore_r3qpi = {
3072 	.name		= "r3qpi",
3073 	.num_counters   = 3,
3074 	.num_boxes	= 3,
3075 	.perf_ctr_bits	= 44,
3076 	.constraints	= hswep_uncore_r3qpi_constraints,
3077 	SNBEP_UNCORE_PCI_COMMON_INIT(),
3078 };
3079 
3080 enum {
3081 	HSWEP_PCI_UNCORE_HA,
3082 	HSWEP_PCI_UNCORE_IMC,
3083 	HSWEP_PCI_UNCORE_IRP,
3084 	HSWEP_PCI_UNCORE_QPI,
3085 	HSWEP_PCI_UNCORE_R2PCIE,
3086 	HSWEP_PCI_UNCORE_R3QPI,
3087 };
3088 
3089 static struct intel_uncore_type *hswep_pci_uncores[] = {
3090 	[HSWEP_PCI_UNCORE_HA]	= &hswep_uncore_ha,
3091 	[HSWEP_PCI_UNCORE_IMC]	= &hswep_uncore_imc,
3092 	[HSWEP_PCI_UNCORE_IRP]	= &hswep_uncore_irp,
3093 	[HSWEP_PCI_UNCORE_QPI]	= &hswep_uncore_qpi,
3094 	[HSWEP_PCI_UNCORE_R2PCIE]	= &hswep_uncore_r2pcie,
3095 	[HSWEP_PCI_UNCORE_R3QPI]	= &hswep_uncore_r3qpi,
3096 	NULL,
3097 };
3098 
3099 static const struct pci_device_id hswep_uncore_pci_ids[] = {
3100 	{ /* Home Agent 0 */
3101 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2f30),
3102 		.driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_HA, 0),
3103 	},
3104 	{ /* Home Agent 1 */
3105 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2f38),
3106 		.driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_HA, 1),
3107 	},
3108 	{ /* MC0 Channel 0 */
3109 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2fb0),
3110 		.driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_IMC, 0),
3111 	},
3112 	{ /* MC0 Channel 1 */
3113 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2fb1),
3114 		.driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_IMC, 1),
3115 	},
3116 	{ /* MC0 Channel 2 */
3117 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2fb4),
3118 		.driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_IMC, 2),
3119 	},
3120 	{ /* MC0 Channel 3 */
3121 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2fb5),
3122 		.driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_IMC, 3),
3123 	},
3124 	{ /* MC1 Channel 0 */
3125 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2fd0),
3126 		.driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_IMC, 4),
3127 	},
3128 	{ /* MC1 Channel 1 */
3129 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2fd1),
3130 		.driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_IMC, 5),
3131 	},
3132 	{ /* MC1 Channel 2 */
3133 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2fd4),
3134 		.driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_IMC, 6),
3135 	},
3136 	{ /* MC1 Channel 3 */
3137 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2fd5),
3138 		.driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_IMC, 7),
3139 	},
3140 	{ /* IRP */
3141 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2f39),
3142 		.driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_IRP, 0),
3143 	},
3144 	{ /* QPI0 Port 0 */
3145 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2f32),
3146 		.driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_QPI, 0),
3147 	},
3148 	{ /* QPI0 Port 1 */
3149 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2f33),
3150 		.driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_QPI, 1),
3151 	},
3152 	{ /* QPI1 Port 2 */
3153 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2f3a),
3154 		.driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_QPI, 2),
3155 	},
3156 	{ /* R2PCIe */
3157 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2f34),
3158 		.driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_R2PCIE, 0),
3159 	},
3160 	{ /* R3QPI0 Link 0 */
3161 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2f36),
3162 		.driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_R3QPI, 0),
3163 	},
3164 	{ /* R3QPI0 Link 1 */
3165 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2f37),
3166 		.driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_R3QPI, 1),
3167 	},
3168 	{ /* R3QPI1 Link 2 */
3169 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2f3e),
3170 		.driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_R3QPI, 2),
3171 	},
3172 	{ /* QPI Port 0 filter  */
3173 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2f86),
3174 		.driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV,
3175 						   SNBEP_PCI_QPI_PORT0_FILTER),
3176 	},
3177 	{ /* QPI Port 1 filter  */
3178 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2f96),
3179 		.driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV,
3180 						   SNBEP_PCI_QPI_PORT1_FILTER),
3181 	},
3182 	{ /* end: all zeroes */ }
3183 };
3184 
3185 static struct pci_driver hswep_uncore_pci_driver = {
3186 	.name		= "hswep_uncore",
3187 	.id_table	= hswep_uncore_pci_ids,
3188 };
3189 
hswep_uncore_pci_init(void)3190 int hswep_uncore_pci_init(void)
3191 {
3192 	int ret = snbep_pci2phy_map_init(0x2f1e, SNBEP_CPUNODEID, SNBEP_GIDNIDMAP, true);
3193 	if (ret)
3194 		return ret;
3195 	uncore_pci_uncores = hswep_pci_uncores;
3196 	uncore_pci_driver = &hswep_uncore_pci_driver;
3197 	return 0;
3198 }
3199 /* end of Haswell-EP uncore support */
3200 
3201 /* BDX uncore support */
3202 
3203 static struct intel_uncore_type bdx_uncore_ubox = {
3204 	.name			= "ubox",
3205 	.num_counters		= 2,
3206 	.num_boxes		= 1,
3207 	.perf_ctr_bits		= 48,
3208 	.fixed_ctr_bits		= 48,
3209 	.perf_ctr		= HSWEP_U_MSR_PMON_CTR0,
3210 	.event_ctl		= HSWEP_U_MSR_PMON_CTL0,
3211 	.event_mask		= SNBEP_U_MSR_PMON_RAW_EVENT_MASK,
3212 	.fixed_ctr		= HSWEP_U_MSR_PMON_UCLK_FIXED_CTR,
3213 	.fixed_ctl		= HSWEP_U_MSR_PMON_UCLK_FIXED_CTL,
3214 	.num_shared_regs	= 1,
3215 	.ops			= &ivbep_uncore_msr_ops,
3216 	.format_group		= &ivbep_uncore_ubox_format_group,
3217 };
3218 
3219 static struct event_constraint bdx_uncore_cbox_constraints[] = {
3220 	UNCORE_EVENT_CONSTRAINT(0x09, 0x3),
3221 	UNCORE_EVENT_CONSTRAINT(0x11, 0x1),
3222 	UNCORE_EVENT_CONSTRAINT(0x36, 0x1),
3223 	UNCORE_EVENT_CONSTRAINT(0x3e, 0x1),
3224 	EVENT_CONSTRAINT_END
3225 };
3226 
3227 static struct intel_uncore_type bdx_uncore_cbox = {
3228 	.name			= "cbox",
3229 	.num_counters		= 4,
3230 	.num_boxes		= 24,
3231 	.perf_ctr_bits		= 48,
3232 	.event_ctl		= HSWEP_C0_MSR_PMON_CTL0,
3233 	.perf_ctr		= HSWEP_C0_MSR_PMON_CTR0,
3234 	.event_mask		= SNBEP_CBO_MSR_PMON_RAW_EVENT_MASK,
3235 	.box_ctl		= HSWEP_C0_MSR_PMON_BOX_CTL,
3236 	.msr_offset		= HSWEP_CBO_MSR_OFFSET,
3237 	.num_shared_regs	= 1,
3238 	.constraints		= bdx_uncore_cbox_constraints,
3239 	.ops			= &hswep_uncore_cbox_ops,
3240 	.format_group		= &hswep_uncore_cbox_format_group,
3241 };
3242 
3243 static struct intel_uncore_type bdx_uncore_sbox = {
3244 	.name			= "sbox",
3245 	.num_counters		= 4,
3246 	.num_boxes		= 4,
3247 	.perf_ctr_bits		= 48,
3248 	.event_ctl		= HSWEP_S0_MSR_PMON_CTL0,
3249 	.perf_ctr		= HSWEP_S0_MSR_PMON_CTR0,
3250 	.event_mask		= HSWEP_S_MSR_PMON_RAW_EVENT_MASK,
3251 	.box_ctl		= HSWEP_S0_MSR_PMON_BOX_CTL,
3252 	.msr_offset		= HSWEP_SBOX_MSR_OFFSET,
3253 	.ops			= &hswep_uncore_sbox_msr_ops,
3254 	.format_group		= &hswep_uncore_sbox_format_group,
3255 };
3256 
3257 #define BDX_MSR_UNCORE_SBOX	3
3258 
3259 static struct intel_uncore_type *bdx_msr_uncores[] = {
3260 	&bdx_uncore_ubox,
3261 	&bdx_uncore_cbox,
3262 	&hswep_uncore_pcu,
3263 	&bdx_uncore_sbox,
3264 	NULL,
3265 };
3266 
3267 /* Bit 7 'Use Occupancy' is not available for counter 0 on BDX */
3268 static struct event_constraint bdx_uncore_pcu_constraints[] = {
3269 	EVENT_CONSTRAINT(0x80, 0xe, 0x80),
3270 	EVENT_CONSTRAINT_END
3271 };
3272 
3273 #define BDX_PCU_DID			0x6fc0
3274 
bdx_uncore_cpu_init(void)3275 void bdx_uncore_cpu_init(void)
3276 {
3277 	if (bdx_uncore_cbox.num_boxes > boot_cpu_data.x86_max_cores)
3278 		bdx_uncore_cbox.num_boxes = boot_cpu_data.x86_max_cores;
3279 	uncore_msr_uncores = bdx_msr_uncores;
3280 
3281 	/* Detect systems with no SBOXes */
3282 	if ((boot_cpu_data.x86_model == 86) || hswep_has_limit_sbox(BDX_PCU_DID))
3283 		uncore_msr_uncores[BDX_MSR_UNCORE_SBOX] = NULL;
3284 
3285 	hswep_uncore_pcu.constraints = bdx_uncore_pcu_constraints;
3286 }
3287 
3288 static struct intel_uncore_type bdx_uncore_ha = {
3289 	.name		= "ha",
3290 	.num_counters   = 4,
3291 	.num_boxes	= 2,
3292 	.perf_ctr_bits	= 48,
3293 	SNBEP_UNCORE_PCI_COMMON_INIT(),
3294 };
3295 
3296 static struct intel_uncore_type bdx_uncore_imc = {
3297 	.name		= "imc",
3298 	.num_counters   = 4,
3299 	.num_boxes	= 8,
3300 	.perf_ctr_bits	= 48,
3301 	.fixed_ctr_bits	= 48,
3302 	.fixed_ctr	= SNBEP_MC_CHy_PCI_PMON_FIXED_CTR,
3303 	.fixed_ctl	= SNBEP_MC_CHy_PCI_PMON_FIXED_CTL,
3304 	.event_descs	= hswep_uncore_imc_events,
3305 	SNBEP_UNCORE_PCI_COMMON_INIT(),
3306 };
3307 
3308 static struct intel_uncore_type bdx_uncore_irp = {
3309 	.name			= "irp",
3310 	.num_counters		= 4,
3311 	.num_boxes		= 1,
3312 	.perf_ctr_bits		= 48,
3313 	.event_mask		= SNBEP_PMON_RAW_EVENT_MASK,
3314 	.box_ctl		= SNBEP_PCI_PMON_BOX_CTL,
3315 	.ops			= &hswep_uncore_irp_ops,
3316 	.format_group		= &snbep_uncore_format_group,
3317 };
3318 
3319 static struct intel_uncore_type bdx_uncore_qpi = {
3320 	.name			= "qpi",
3321 	.num_counters		= 4,
3322 	.num_boxes		= 3,
3323 	.perf_ctr_bits		= 48,
3324 	.perf_ctr		= SNBEP_PCI_PMON_CTR0,
3325 	.event_ctl		= SNBEP_PCI_PMON_CTL0,
3326 	.event_mask		= SNBEP_QPI_PCI_PMON_RAW_EVENT_MASK,
3327 	.box_ctl		= SNBEP_PCI_PMON_BOX_CTL,
3328 	.num_shared_regs	= 1,
3329 	.ops			= &snbep_uncore_qpi_ops,
3330 	.format_group		= &snbep_uncore_qpi_format_group,
3331 };
3332 
3333 static struct event_constraint bdx_uncore_r2pcie_constraints[] = {
3334 	UNCORE_EVENT_CONSTRAINT(0x10, 0x3),
3335 	UNCORE_EVENT_CONSTRAINT(0x11, 0x3),
3336 	UNCORE_EVENT_CONSTRAINT(0x13, 0x1),
3337 	UNCORE_EVENT_CONSTRAINT(0x23, 0x1),
3338 	UNCORE_EVENT_CONSTRAINT(0x25, 0x1),
3339 	UNCORE_EVENT_CONSTRAINT(0x26, 0x3),
3340 	UNCORE_EVENT_CONSTRAINT(0x28, 0x3),
3341 	UNCORE_EVENT_CONSTRAINT(0x2c, 0x3),
3342 	UNCORE_EVENT_CONSTRAINT(0x2d, 0x3),
3343 	EVENT_CONSTRAINT_END
3344 };
3345 
3346 static struct intel_uncore_type bdx_uncore_r2pcie = {
3347 	.name		= "r2pcie",
3348 	.num_counters   = 4,
3349 	.num_boxes	= 1,
3350 	.perf_ctr_bits	= 48,
3351 	.constraints	= bdx_uncore_r2pcie_constraints,
3352 	SNBEP_UNCORE_PCI_COMMON_INIT(),
3353 };
3354 
3355 static struct event_constraint bdx_uncore_r3qpi_constraints[] = {
3356 	UNCORE_EVENT_CONSTRAINT(0x01, 0x7),
3357 	UNCORE_EVENT_CONSTRAINT(0x07, 0x7),
3358 	UNCORE_EVENT_CONSTRAINT(0x08, 0x7),
3359 	UNCORE_EVENT_CONSTRAINT(0x09, 0x7),
3360 	UNCORE_EVENT_CONSTRAINT(0x0a, 0x7),
3361 	UNCORE_EVENT_CONSTRAINT(0x0e, 0x7),
3362 	UNCORE_EVENT_CONSTRAINT(0x10, 0x3),
3363 	UNCORE_EVENT_CONSTRAINT(0x11, 0x3),
3364 	UNCORE_EVENT_CONSTRAINT(0x13, 0x1),
3365 	UNCORE_EVENT_CONSTRAINT(0x14, 0x3),
3366 	UNCORE_EVENT_CONSTRAINT(0x15, 0x3),
3367 	UNCORE_EVENT_CONSTRAINT(0x1f, 0x3),
3368 	UNCORE_EVENT_CONSTRAINT(0x20, 0x3),
3369 	UNCORE_EVENT_CONSTRAINT(0x21, 0x3),
3370 	UNCORE_EVENT_CONSTRAINT(0x22, 0x3),
3371 	UNCORE_EVENT_CONSTRAINT(0x23, 0x3),
3372 	UNCORE_EVENT_CONSTRAINT(0x25, 0x3),
3373 	UNCORE_EVENT_CONSTRAINT(0x26, 0x3),
3374 	UNCORE_EVENT_CONSTRAINT(0x28, 0x3),
3375 	UNCORE_EVENT_CONSTRAINT(0x29, 0x3),
3376 	UNCORE_EVENT_CONSTRAINT(0x2c, 0x3),
3377 	UNCORE_EVENT_CONSTRAINT(0x2d, 0x3),
3378 	UNCORE_EVENT_CONSTRAINT(0x2e, 0x3),
3379 	UNCORE_EVENT_CONSTRAINT(0x2f, 0x3),
3380 	UNCORE_EVENT_CONSTRAINT(0x33, 0x3),
3381 	UNCORE_EVENT_CONSTRAINT(0x34, 0x3),
3382 	UNCORE_EVENT_CONSTRAINT(0x36, 0x3),
3383 	UNCORE_EVENT_CONSTRAINT(0x37, 0x3),
3384 	UNCORE_EVENT_CONSTRAINT(0x38, 0x3),
3385 	UNCORE_EVENT_CONSTRAINT(0x39, 0x3),
3386 	EVENT_CONSTRAINT_END
3387 };
3388 
3389 static struct intel_uncore_type bdx_uncore_r3qpi = {
3390 	.name		= "r3qpi",
3391 	.num_counters   = 3,
3392 	.num_boxes	= 3,
3393 	.perf_ctr_bits	= 48,
3394 	.constraints	= bdx_uncore_r3qpi_constraints,
3395 	SNBEP_UNCORE_PCI_COMMON_INIT(),
3396 };
3397 
3398 enum {
3399 	BDX_PCI_UNCORE_HA,
3400 	BDX_PCI_UNCORE_IMC,
3401 	BDX_PCI_UNCORE_IRP,
3402 	BDX_PCI_UNCORE_QPI,
3403 	BDX_PCI_UNCORE_R2PCIE,
3404 	BDX_PCI_UNCORE_R3QPI,
3405 };
3406 
3407 static struct intel_uncore_type *bdx_pci_uncores[] = {
3408 	[BDX_PCI_UNCORE_HA]	= &bdx_uncore_ha,
3409 	[BDX_PCI_UNCORE_IMC]	= &bdx_uncore_imc,
3410 	[BDX_PCI_UNCORE_IRP]	= &bdx_uncore_irp,
3411 	[BDX_PCI_UNCORE_QPI]	= &bdx_uncore_qpi,
3412 	[BDX_PCI_UNCORE_R2PCIE]	= &bdx_uncore_r2pcie,
3413 	[BDX_PCI_UNCORE_R3QPI]	= &bdx_uncore_r3qpi,
3414 	NULL,
3415 };
3416 
3417 static const struct pci_device_id bdx_uncore_pci_ids[] = {
3418 	{ /* Home Agent 0 */
3419 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6f30),
3420 		.driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_HA, 0),
3421 	},
3422 	{ /* Home Agent 1 */
3423 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6f38),
3424 		.driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_HA, 1),
3425 	},
3426 	{ /* MC0 Channel 0 */
3427 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6fb0),
3428 		.driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_IMC, 0),
3429 	},
3430 	{ /* MC0 Channel 1 */
3431 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6fb1),
3432 		.driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_IMC, 1),
3433 	},
3434 	{ /* MC0 Channel 2 */
3435 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6fb4),
3436 		.driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_IMC, 2),
3437 	},
3438 	{ /* MC0 Channel 3 */
3439 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6fb5),
3440 		.driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_IMC, 3),
3441 	},
3442 	{ /* MC1 Channel 0 */
3443 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6fd0),
3444 		.driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_IMC, 4),
3445 	},
3446 	{ /* MC1 Channel 1 */
3447 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6fd1),
3448 		.driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_IMC, 5),
3449 	},
3450 	{ /* MC1 Channel 2 */
3451 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6fd4),
3452 		.driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_IMC, 6),
3453 	},
3454 	{ /* MC1 Channel 3 */
3455 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6fd5),
3456 		.driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_IMC, 7),
3457 	},
3458 	{ /* IRP */
3459 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6f39),
3460 		.driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_IRP, 0),
3461 	},
3462 	{ /* QPI0 Port 0 */
3463 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6f32),
3464 		.driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_QPI, 0),
3465 	},
3466 	{ /* QPI0 Port 1 */
3467 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6f33),
3468 		.driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_QPI, 1),
3469 	},
3470 	{ /* QPI1 Port 2 */
3471 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6f3a),
3472 		.driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_QPI, 2),
3473 	},
3474 	{ /* R2PCIe */
3475 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6f34),
3476 		.driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_R2PCIE, 0),
3477 	},
3478 	{ /* R3QPI0 Link 0 */
3479 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6f36),
3480 		.driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_R3QPI, 0),
3481 	},
3482 	{ /* R3QPI0 Link 1 */
3483 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6f37),
3484 		.driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_R3QPI, 1),
3485 	},
3486 	{ /* R3QPI1 Link 2 */
3487 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6f3e),
3488 		.driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_R3QPI, 2),
3489 	},
3490 	{ /* QPI Port 0 filter  */
3491 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6f86),
3492 		.driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV,
3493 						   SNBEP_PCI_QPI_PORT0_FILTER),
3494 	},
3495 	{ /* QPI Port 1 filter  */
3496 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6f96),
3497 		.driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV,
3498 						   SNBEP_PCI_QPI_PORT1_FILTER),
3499 	},
3500 	{ /* QPI Port 2 filter  */
3501 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6f46),
3502 		.driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV,
3503 						   BDX_PCI_QPI_PORT2_FILTER),
3504 	},
3505 	{ /* end: all zeroes */ }
3506 };
3507 
3508 static struct pci_driver bdx_uncore_pci_driver = {
3509 	.name		= "bdx_uncore",
3510 	.id_table	= bdx_uncore_pci_ids,
3511 };
3512 
bdx_uncore_pci_init(void)3513 int bdx_uncore_pci_init(void)
3514 {
3515 	int ret = snbep_pci2phy_map_init(0x6f1e, SNBEP_CPUNODEID, SNBEP_GIDNIDMAP, true);
3516 
3517 	if (ret)
3518 		return ret;
3519 	uncore_pci_uncores = bdx_pci_uncores;
3520 	uncore_pci_driver = &bdx_uncore_pci_driver;
3521 	return 0;
3522 }
3523 
3524 /* end of BDX uncore support */
3525 
3526 /* SKX uncore support */
3527 
3528 static struct intel_uncore_type skx_uncore_ubox = {
3529 	.name			= "ubox",
3530 	.num_counters		= 2,
3531 	.num_boxes		= 1,
3532 	.perf_ctr_bits		= 48,
3533 	.fixed_ctr_bits		= 48,
3534 	.perf_ctr		= HSWEP_U_MSR_PMON_CTR0,
3535 	.event_ctl		= HSWEP_U_MSR_PMON_CTL0,
3536 	.event_mask		= SNBEP_U_MSR_PMON_RAW_EVENT_MASK,
3537 	.fixed_ctr		= HSWEP_U_MSR_PMON_UCLK_FIXED_CTR,
3538 	.fixed_ctl		= HSWEP_U_MSR_PMON_UCLK_FIXED_CTL,
3539 	.ops			= &ivbep_uncore_msr_ops,
3540 	.format_group		= &ivbep_uncore_ubox_format_group,
3541 };
3542 
3543 static struct attribute *skx_uncore_cha_formats_attr[] = {
3544 	&format_attr_event.attr,
3545 	&format_attr_umask.attr,
3546 	&format_attr_edge.attr,
3547 	&format_attr_tid_en.attr,
3548 	&format_attr_inv.attr,
3549 	&format_attr_thresh8.attr,
3550 	&format_attr_filter_tid4.attr,
3551 	&format_attr_filter_state5.attr,
3552 	&format_attr_filter_rem.attr,
3553 	&format_attr_filter_loc.attr,
3554 	&format_attr_filter_nm.attr,
3555 	&format_attr_filter_all_op.attr,
3556 	&format_attr_filter_not_nm.attr,
3557 	&format_attr_filter_opc_0.attr,
3558 	&format_attr_filter_opc_1.attr,
3559 	&format_attr_filter_nc.attr,
3560 	&format_attr_filter_isoc.attr,
3561 	NULL,
3562 };
3563 
3564 static const struct attribute_group skx_uncore_chabox_format_group = {
3565 	.name = "format",
3566 	.attrs = skx_uncore_cha_formats_attr,
3567 };
3568 
3569 static struct event_constraint skx_uncore_chabox_constraints[] = {
3570 	UNCORE_EVENT_CONSTRAINT(0x11, 0x1),
3571 	UNCORE_EVENT_CONSTRAINT(0x36, 0x1),
3572 	EVENT_CONSTRAINT_END
3573 };
3574 
3575 static struct extra_reg skx_uncore_cha_extra_regs[] = {
3576 	SNBEP_CBO_EVENT_EXTRA_REG(0x0334, 0xffff, 0x4),
3577 	SNBEP_CBO_EVENT_EXTRA_REG(0x0534, 0xffff, 0x4),
3578 	SNBEP_CBO_EVENT_EXTRA_REG(0x0934, 0xffff, 0x4),
3579 	SNBEP_CBO_EVENT_EXTRA_REG(0x1134, 0xffff, 0x4),
3580 	SNBEP_CBO_EVENT_EXTRA_REG(0x3134, 0xffff, 0x4),
3581 	SNBEP_CBO_EVENT_EXTRA_REG(0x9134, 0xffff, 0x4),
3582 	SNBEP_CBO_EVENT_EXTRA_REG(0x35, 0xff, 0x8),
3583 	SNBEP_CBO_EVENT_EXTRA_REG(0x36, 0xff, 0x8),
3584 	SNBEP_CBO_EVENT_EXTRA_REG(0x38, 0xff, 0x3),
3585 	EVENT_EXTRA_END
3586 };
3587 
skx_cha_filter_mask(int fields)3588 static u64 skx_cha_filter_mask(int fields)
3589 {
3590 	u64 mask = 0;
3591 
3592 	if (fields & 0x1)
3593 		mask |= SKX_CHA_MSR_PMON_BOX_FILTER_TID;
3594 	if (fields & 0x2)
3595 		mask |= SKX_CHA_MSR_PMON_BOX_FILTER_LINK;
3596 	if (fields & 0x4)
3597 		mask |= SKX_CHA_MSR_PMON_BOX_FILTER_STATE;
3598 	if (fields & 0x8) {
3599 		mask |= SKX_CHA_MSR_PMON_BOX_FILTER_REM;
3600 		mask |= SKX_CHA_MSR_PMON_BOX_FILTER_LOC;
3601 		mask |= SKX_CHA_MSR_PMON_BOX_FILTER_ALL_OPC;
3602 		mask |= SKX_CHA_MSR_PMON_BOX_FILTER_NM;
3603 		mask |= SKX_CHA_MSR_PMON_BOX_FILTER_NOT_NM;
3604 		mask |= SKX_CHA_MSR_PMON_BOX_FILTER_OPC0;
3605 		mask |= SKX_CHA_MSR_PMON_BOX_FILTER_OPC1;
3606 		mask |= SKX_CHA_MSR_PMON_BOX_FILTER_NC;
3607 		mask |= SKX_CHA_MSR_PMON_BOX_FILTER_ISOC;
3608 	}
3609 	return mask;
3610 }
3611 
3612 static struct event_constraint *
skx_cha_get_constraint(struct intel_uncore_box * box,struct perf_event * event)3613 skx_cha_get_constraint(struct intel_uncore_box *box, struct perf_event *event)
3614 {
3615 	return __snbep_cbox_get_constraint(box, event, skx_cha_filter_mask);
3616 }
3617 
skx_cha_hw_config(struct intel_uncore_box * box,struct perf_event * event)3618 static int skx_cha_hw_config(struct intel_uncore_box *box, struct perf_event *event)
3619 {
3620 	struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
3621 	struct extra_reg *er;
3622 	int idx = 0;
3623 	/* Any of the CHA events may be filtered by Thread/Core-ID.*/
3624 	if (event->hw.config & SNBEP_CBO_PMON_CTL_TID_EN)
3625 		idx = SKX_CHA_MSR_PMON_BOX_FILTER_TID;
3626 
3627 	for (er = skx_uncore_cha_extra_regs; er->msr; er++) {
3628 		if (er->event != (event->hw.config & er->config_mask))
3629 			continue;
3630 		idx |= er->idx;
3631 	}
3632 
3633 	if (idx) {
3634 		reg1->reg = HSWEP_C0_MSR_PMON_BOX_FILTER0 +
3635 			    HSWEP_CBO_MSR_OFFSET * box->pmu->pmu_idx;
3636 		reg1->config = event->attr.config1 & skx_cha_filter_mask(idx);
3637 		reg1->idx = idx;
3638 	}
3639 	return 0;
3640 }
3641 
3642 static struct intel_uncore_ops skx_uncore_chabox_ops = {
3643 	/* There is no frz_en for chabox ctl */
3644 	.init_box		= ivbep_uncore_msr_init_box,
3645 	.disable_box		= snbep_uncore_msr_disable_box,
3646 	.enable_box		= snbep_uncore_msr_enable_box,
3647 	.disable_event		= snbep_uncore_msr_disable_event,
3648 	.enable_event		= hswep_cbox_enable_event,
3649 	.read_counter		= uncore_msr_read_counter,
3650 	.hw_config		= skx_cha_hw_config,
3651 	.get_constraint		= skx_cha_get_constraint,
3652 	.put_constraint		= snbep_cbox_put_constraint,
3653 };
3654 
3655 static struct intel_uncore_type skx_uncore_chabox = {
3656 	.name			= "cha",
3657 	.num_counters		= 4,
3658 	.perf_ctr_bits		= 48,
3659 	.event_ctl		= HSWEP_C0_MSR_PMON_CTL0,
3660 	.perf_ctr		= HSWEP_C0_MSR_PMON_CTR0,
3661 	.event_mask		= HSWEP_S_MSR_PMON_RAW_EVENT_MASK,
3662 	.box_ctl		= HSWEP_C0_MSR_PMON_BOX_CTL,
3663 	.msr_offset		= HSWEP_CBO_MSR_OFFSET,
3664 	.num_shared_regs	= 1,
3665 	.constraints		= skx_uncore_chabox_constraints,
3666 	.ops			= &skx_uncore_chabox_ops,
3667 	.format_group		= &skx_uncore_chabox_format_group,
3668 };
3669 
3670 static struct attribute *skx_uncore_iio_formats_attr[] = {
3671 	&format_attr_event.attr,
3672 	&format_attr_umask.attr,
3673 	&format_attr_edge.attr,
3674 	&format_attr_inv.attr,
3675 	&format_attr_thresh9.attr,
3676 	&format_attr_ch_mask.attr,
3677 	&format_attr_fc_mask.attr,
3678 	NULL,
3679 };
3680 
3681 static const struct attribute_group skx_uncore_iio_format_group = {
3682 	.name = "format",
3683 	.attrs = skx_uncore_iio_formats_attr,
3684 };
3685 
3686 static struct event_constraint skx_uncore_iio_constraints[] = {
3687 	UNCORE_EVENT_CONSTRAINT(0x83, 0x3),
3688 	UNCORE_EVENT_CONSTRAINT(0x88, 0xc),
3689 	UNCORE_EVENT_CONSTRAINT(0x95, 0xc),
3690 	UNCORE_EVENT_CONSTRAINT(0xc0, 0xc),
3691 	UNCORE_EVENT_CONSTRAINT(0xc5, 0xc),
3692 	UNCORE_EVENT_CONSTRAINT(0xd4, 0xc),
3693 	UNCORE_EVENT_CONSTRAINT(0xd5, 0xc),
3694 	EVENT_CONSTRAINT_END
3695 };
3696 
skx_iio_enable_event(struct intel_uncore_box * box,struct perf_event * event)3697 static void skx_iio_enable_event(struct intel_uncore_box *box,
3698 				 struct perf_event *event)
3699 {
3700 	struct hw_perf_event *hwc = &event->hw;
3701 
3702 	wrmsrl(hwc->config_base, hwc->config | SNBEP_PMON_CTL_EN);
3703 }
3704 
3705 static struct intel_uncore_ops skx_uncore_iio_ops = {
3706 	.init_box		= ivbep_uncore_msr_init_box,
3707 	.disable_box		= snbep_uncore_msr_disable_box,
3708 	.enable_box		= snbep_uncore_msr_enable_box,
3709 	.disable_event		= snbep_uncore_msr_disable_event,
3710 	.enable_event		= skx_iio_enable_event,
3711 	.read_counter		= uncore_msr_read_counter,
3712 };
3713 
pmu_topology(struct intel_uncore_pmu * pmu,int die)3714 static struct intel_uncore_topology *pmu_topology(struct intel_uncore_pmu *pmu, int die)
3715 {
3716 	int idx;
3717 
3718 	for (idx = 0; idx < pmu->type->num_boxes; idx++) {
3719 		if (pmu->type->topology[die][idx].pmu_idx == pmu->pmu_idx)
3720 			return &pmu->type->topology[die][idx];
3721 	}
3722 
3723 	return NULL;
3724 }
3725 
3726 static umode_t
pmu_iio_mapping_visible(struct kobject * kobj,struct attribute * attr,int die,int zero_bus_pmu)3727 pmu_iio_mapping_visible(struct kobject *kobj, struct attribute *attr,
3728 			 int die, int zero_bus_pmu)
3729 {
3730 	struct intel_uncore_pmu *pmu = dev_to_uncore_pmu(kobj_to_dev(kobj));
3731 	struct intel_uncore_topology *pmut = pmu_topology(pmu, die);
3732 
3733 	return (pmut && !pmut->iio->pci_bus_no && pmu->pmu_idx != zero_bus_pmu) ? 0 : attr->mode;
3734 }
3735 
3736 static umode_t
skx_iio_mapping_visible(struct kobject * kobj,struct attribute * attr,int die)3737 skx_iio_mapping_visible(struct kobject *kobj, struct attribute *attr, int die)
3738 {
3739 	/* Root bus 0x00 is valid only for pmu_idx = 0. */
3740 	return pmu_iio_mapping_visible(kobj, attr, die, 0);
3741 }
3742 
skx_iio_mapping_show(struct device * dev,struct device_attribute * attr,char * buf)3743 static ssize_t skx_iio_mapping_show(struct device *dev,
3744 				    struct device_attribute *attr, char *buf)
3745 {
3746 	struct intel_uncore_pmu *pmu = dev_to_uncore_pmu(dev);
3747 	struct dev_ext_attribute *ea = to_dev_ext_attribute(attr);
3748 	long die = (long)ea->var;
3749 	struct intel_uncore_topology *pmut = pmu_topology(pmu, die);
3750 
3751 	return sprintf(buf, "%04x:%02x\n", pmut ? pmut->iio->segment : 0,
3752 					   pmut ? pmut->iio->pci_bus_no : 0);
3753 }
3754 
skx_msr_cpu_bus_read(int cpu,u64 * topology)3755 static int skx_msr_cpu_bus_read(int cpu, u64 *topology)
3756 {
3757 	u64 msr_value;
3758 
3759 	if (rdmsrl_on_cpu(cpu, SKX_MSR_CPU_BUS_NUMBER, &msr_value) ||
3760 			!(msr_value & SKX_MSR_CPU_BUS_VALID_BIT))
3761 		return -ENXIO;
3762 
3763 	*topology = msr_value;
3764 
3765 	return 0;
3766 }
3767 
die_to_cpu(int die)3768 static int die_to_cpu(int die)
3769 {
3770 	int res = 0, cpu, current_die;
3771 	/*
3772 	 * Using cpus_read_lock() to ensure cpu is not going down between
3773 	 * looking at cpu_online_mask.
3774 	 */
3775 	cpus_read_lock();
3776 	for_each_online_cpu(cpu) {
3777 		current_die = topology_logical_die_id(cpu);
3778 		if (current_die == die) {
3779 			res = cpu;
3780 			break;
3781 		}
3782 	}
3783 	cpus_read_unlock();
3784 	return res;
3785 }
3786 
3787 enum {
3788 	IIO_TOPOLOGY_TYPE,
3789 	UPI_TOPOLOGY_TYPE,
3790 	TOPOLOGY_MAX
3791 };
3792 
3793 static const size_t topology_size[TOPOLOGY_MAX] = {
3794 	sizeof(*((struct intel_uncore_topology *)NULL)->iio),
3795 	sizeof(*((struct intel_uncore_topology *)NULL)->upi)
3796 };
3797 
pmu_alloc_topology(struct intel_uncore_type * type,int topology_type)3798 static int pmu_alloc_topology(struct intel_uncore_type *type, int topology_type)
3799 {
3800 	int die, idx;
3801 	struct intel_uncore_topology **topology;
3802 
3803 	if (!type->num_boxes)
3804 		return -EPERM;
3805 
3806 	topology = kcalloc(uncore_max_dies(), sizeof(*topology), GFP_KERNEL);
3807 	if (!topology)
3808 		goto err;
3809 
3810 	for (die = 0; die < uncore_max_dies(); die++) {
3811 		topology[die] = kcalloc(type->num_boxes, sizeof(**topology), GFP_KERNEL);
3812 		if (!topology[die])
3813 			goto clear;
3814 		for (idx = 0; idx < type->num_boxes; idx++) {
3815 			topology[die][idx].untyped = kcalloc(type->num_boxes,
3816 							     topology_size[topology_type],
3817 							     GFP_KERNEL);
3818 			if (!topology[die][idx].untyped)
3819 				goto clear;
3820 		}
3821 	}
3822 
3823 	type->topology = topology;
3824 
3825 	return 0;
3826 clear:
3827 	for (; die >= 0; die--) {
3828 		for (idx = 0; idx < type->num_boxes; idx++)
3829 			kfree(topology[die][idx].untyped);
3830 		kfree(topology[die]);
3831 	}
3832 	kfree(topology);
3833 err:
3834 	return -ENOMEM;
3835 }
3836 
pmu_free_topology(struct intel_uncore_type * type)3837 static void pmu_free_topology(struct intel_uncore_type *type)
3838 {
3839 	int die, idx;
3840 
3841 	if (type->topology) {
3842 		for (die = 0; die < uncore_max_dies(); die++) {
3843 			for (idx = 0; idx < type->num_boxes; idx++)
3844 				kfree(type->topology[die][idx].untyped);
3845 			kfree(type->topology[die]);
3846 		}
3847 		kfree(type->topology);
3848 		type->topology = NULL;
3849 	}
3850 }
3851 
skx_pmu_get_topology(struct intel_uncore_type * type,int (* topology_cb)(struct intel_uncore_type *,int,int,u64))3852 static int skx_pmu_get_topology(struct intel_uncore_type *type,
3853 				 int (*topology_cb)(struct intel_uncore_type*, int, int, u64))
3854 {
3855 	int die, ret = -EPERM;
3856 	u64 cpu_bus_msr;
3857 
3858 	for (die = 0; die < uncore_max_dies(); die++) {
3859 		ret = skx_msr_cpu_bus_read(die_to_cpu(die), &cpu_bus_msr);
3860 		if (ret)
3861 			break;
3862 
3863 		ret = uncore_die_to_segment(die);
3864 		if (ret < 0)
3865 			break;
3866 
3867 		ret = topology_cb(type, ret, die, cpu_bus_msr);
3868 		if (ret)
3869 			break;
3870 	}
3871 
3872 	return ret;
3873 }
3874 
skx_iio_topology_cb(struct intel_uncore_type * type,int segment,int die,u64 cpu_bus_msr)3875 static int skx_iio_topology_cb(struct intel_uncore_type *type, int segment,
3876 				int die, u64 cpu_bus_msr)
3877 {
3878 	int idx;
3879 	struct intel_uncore_topology *t;
3880 
3881 	for (idx = 0; idx < type->num_boxes; idx++) {
3882 		t = &type->topology[die][idx];
3883 		t->pmu_idx = idx;
3884 		t->iio->segment = segment;
3885 		t->iio->pci_bus_no = (cpu_bus_msr >> (idx * BUS_NUM_STRIDE)) & 0xff;
3886 	}
3887 
3888 	return 0;
3889 }
3890 
skx_iio_get_topology(struct intel_uncore_type * type)3891 static int skx_iio_get_topology(struct intel_uncore_type *type)
3892 {
3893 	return skx_pmu_get_topology(type, skx_iio_topology_cb);
3894 }
3895 
3896 static struct attribute_group skx_iio_mapping_group = {
3897 	.is_visible	= skx_iio_mapping_visible,
3898 };
3899 
3900 static const struct attribute_group *skx_iio_attr_update[] = {
3901 	&skx_iio_mapping_group,
3902 	NULL,
3903 };
3904 
pmu_clear_mapping_attr(const struct attribute_group ** groups,struct attribute_group * ag)3905 static void pmu_clear_mapping_attr(const struct attribute_group **groups,
3906 				   struct attribute_group *ag)
3907 {
3908 	int i;
3909 
3910 	for (i = 0; groups[i]; i++) {
3911 		if (groups[i] == ag) {
3912 			for (i++; groups[i]; i++)
3913 				groups[i - 1] = groups[i];
3914 			groups[i - 1] = NULL;
3915 			break;
3916 		}
3917 	}
3918 }
3919 
3920 static void
pmu_set_mapping(struct intel_uncore_type * type,struct attribute_group * ag,ssize_t (* show)(struct device *,struct device_attribute *,char *),int topology_type)3921 pmu_set_mapping(struct intel_uncore_type *type, struct attribute_group *ag,
3922 		ssize_t (*show)(struct device*, struct device_attribute*, char*),
3923 		int topology_type)
3924 {
3925 	char buf[64];
3926 	int ret;
3927 	long die = -1;
3928 	struct attribute **attrs = NULL;
3929 	struct dev_ext_attribute *eas = NULL;
3930 
3931 	ret = pmu_alloc_topology(type, topology_type);
3932 	if (ret < 0)
3933 		goto clear_attr_update;
3934 
3935 	ret = type->get_topology(type);
3936 	if (ret < 0)
3937 		goto clear_topology;
3938 
3939 	/* One more for NULL. */
3940 	attrs = kcalloc((uncore_max_dies() + 1), sizeof(*attrs), GFP_KERNEL);
3941 	if (!attrs)
3942 		goto clear_topology;
3943 
3944 	eas = kcalloc(uncore_max_dies(), sizeof(*eas), GFP_KERNEL);
3945 	if (!eas)
3946 		goto clear_attrs;
3947 
3948 	for (die = 0; die < uncore_max_dies(); die++) {
3949 		snprintf(buf, sizeof(buf), "die%ld", die);
3950 		sysfs_attr_init(&eas[die].attr.attr);
3951 		eas[die].attr.attr.name = kstrdup(buf, GFP_KERNEL);
3952 		if (!eas[die].attr.attr.name)
3953 			goto err;
3954 		eas[die].attr.attr.mode = 0444;
3955 		eas[die].attr.show = show;
3956 		eas[die].attr.store = NULL;
3957 		eas[die].var = (void *)die;
3958 		attrs[die] = &eas[die].attr.attr;
3959 	}
3960 	ag->attrs = attrs;
3961 
3962 	return;
3963 err:
3964 	for (; die >= 0; die--)
3965 		kfree(eas[die].attr.attr.name);
3966 	kfree(eas);
3967 clear_attrs:
3968 	kfree(attrs);
3969 clear_topology:
3970 	pmu_free_topology(type);
3971 clear_attr_update:
3972 	pmu_clear_mapping_attr(type->attr_update, ag);
3973 }
3974 
3975 static void
pmu_cleanup_mapping(struct intel_uncore_type * type,struct attribute_group * ag)3976 pmu_cleanup_mapping(struct intel_uncore_type *type, struct attribute_group *ag)
3977 {
3978 	struct attribute **attr = ag->attrs;
3979 
3980 	if (!attr)
3981 		return;
3982 
3983 	for (; *attr; attr++)
3984 		kfree((*attr)->name);
3985 	kfree(attr_to_ext_attr(*ag->attrs));
3986 	kfree(ag->attrs);
3987 	ag->attrs = NULL;
3988 	pmu_free_topology(type);
3989 }
3990 
3991 static void
pmu_iio_set_mapping(struct intel_uncore_type * type,struct attribute_group * ag)3992 pmu_iio_set_mapping(struct intel_uncore_type *type, struct attribute_group *ag)
3993 {
3994 	pmu_set_mapping(type, ag, skx_iio_mapping_show, IIO_TOPOLOGY_TYPE);
3995 }
3996 
skx_iio_set_mapping(struct intel_uncore_type * type)3997 static void skx_iio_set_mapping(struct intel_uncore_type *type)
3998 {
3999 	pmu_iio_set_mapping(type, &skx_iio_mapping_group);
4000 }
4001 
skx_iio_cleanup_mapping(struct intel_uncore_type * type)4002 static void skx_iio_cleanup_mapping(struct intel_uncore_type *type)
4003 {
4004 	pmu_cleanup_mapping(type, &skx_iio_mapping_group);
4005 }
4006 
4007 static struct intel_uncore_type skx_uncore_iio = {
4008 	.name			= "iio",
4009 	.num_counters		= 4,
4010 	.num_boxes		= 6,
4011 	.perf_ctr_bits		= 48,
4012 	.event_ctl		= SKX_IIO0_MSR_PMON_CTL0,
4013 	.perf_ctr		= SKX_IIO0_MSR_PMON_CTR0,
4014 	.event_mask		= SKX_IIO_PMON_RAW_EVENT_MASK,
4015 	.event_mask_ext		= SKX_IIO_PMON_RAW_EVENT_MASK_EXT,
4016 	.box_ctl		= SKX_IIO0_MSR_PMON_BOX_CTL,
4017 	.msr_offset		= SKX_IIO_MSR_OFFSET,
4018 	.constraints		= skx_uncore_iio_constraints,
4019 	.ops			= &skx_uncore_iio_ops,
4020 	.format_group		= &skx_uncore_iio_format_group,
4021 	.attr_update		= skx_iio_attr_update,
4022 	.get_topology		= skx_iio_get_topology,
4023 	.set_mapping		= skx_iio_set_mapping,
4024 	.cleanup_mapping	= skx_iio_cleanup_mapping,
4025 };
4026 
4027 enum perf_uncore_iio_freerunning_type_id {
4028 	SKX_IIO_MSR_IOCLK			= 0,
4029 	SKX_IIO_MSR_BW				= 1,
4030 	SKX_IIO_MSR_UTIL			= 2,
4031 
4032 	SKX_IIO_FREERUNNING_TYPE_MAX,
4033 };
4034 
4035 
4036 static struct freerunning_counters skx_iio_freerunning[] = {
4037 	[SKX_IIO_MSR_IOCLK]	= { 0xa45, 0x1, 0x20, 1, 36 },
4038 	[SKX_IIO_MSR_BW]	= { 0xb00, 0x1, 0x10, 8, 36 },
4039 	[SKX_IIO_MSR_UTIL]	= { 0xb08, 0x1, 0x10, 8, 36 },
4040 };
4041 
4042 static struct uncore_event_desc skx_uncore_iio_freerunning_events[] = {
4043 	/* Free-Running IO CLOCKS Counter */
4044 	INTEL_UNCORE_EVENT_DESC(ioclk,			"event=0xff,umask=0x10"),
4045 	/* Free-Running IIO BANDWIDTH Counters */
4046 	INTEL_UNCORE_EVENT_DESC(bw_in_port0,		"event=0xff,umask=0x20"),
4047 	INTEL_UNCORE_EVENT_DESC(bw_in_port0.scale,	"3.814697266e-6"),
4048 	INTEL_UNCORE_EVENT_DESC(bw_in_port0.unit,	"MiB"),
4049 	INTEL_UNCORE_EVENT_DESC(bw_in_port1,		"event=0xff,umask=0x21"),
4050 	INTEL_UNCORE_EVENT_DESC(bw_in_port1.scale,	"3.814697266e-6"),
4051 	INTEL_UNCORE_EVENT_DESC(bw_in_port1.unit,	"MiB"),
4052 	INTEL_UNCORE_EVENT_DESC(bw_in_port2,		"event=0xff,umask=0x22"),
4053 	INTEL_UNCORE_EVENT_DESC(bw_in_port2.scale,	"3.814697266e-6"),
4054 	INTEL_UNCORE_EVENT_DESC(bw_in_port2.unit,	"MiB"),
4055 	INTEL_UNCORE_EVENT_DESC(bw_in_port3,		"event=0xff,umask=0x23"),
4056 	INTEL_UNCORE_EVENT_DESC(bw_in_port3.scale,	"3.814697266e-6"),
4057 	INTEL_UNCORE_EVENT_DESC(bw_in_port3.unit,	"MiB"),
4058 	INTEL_UNCORE_EVENT_DESC(bw_out_port0,		"event=0xff,umask=0x24"),
4059 	INTEL_UNCORE_EVENT_DESC(bw_out_port0.scale,	"3.814697266e-6"),
4060 	INTEL_UNCORE_EVENT_DESC(bw_out_port0.unit,	"MiB"),
4061 	INTEL_UNCORE_EVENT_DESC(bw_out_port1,		"event=0xff,umask=0x25"),
4062 	INTEL_UNCORE_EVENT_DESC(bw_out_port1.scale,	"3.814697266e-6"),
4063 	INTEL_UNCORE_EVENT_DESC(bw_out_port1.unit,	"MiB"),
4064 	INTEL_UNCORE_EVENT_DESC(bw_out_port2,		"event=0xff,umask=0x26"),
4065 	INTEL_UNCORE_EVENT_DESC(bw_out_port2.scale,	"3.814697266e-6"),
4066 	INTEL_UNCORE_EVENT_DESC(bw_out_port2.unit,	"MiB"),
4067 	INTEL_UNCORE_EVENT_DESC(bw_out_port3,		"event=0xff,umask=0x27"),
4068 	INTEL_UNCORE_EVENT_DESC(bw_out_port3.scale,	"3.814697266e-6"),
4069 	INTEL_UNCORE_EVENT_DESC(bw_out_port3.unit,	"MiB"),
4070 	/* Free-running IIO UTILIZATION Counters */
4071 	INTEL_UNCORE_EVENT_DESC(util_in_port0,		"event=0xff,umask=0x30"),
4072 	INTEL_UNCORE_EVENT_DESC(util_out_port0,		"event=0xff,umask=0x31"),
4073 	INTEL_UNCORE_EVENT_DESC(util_in_port1,		"event=0xff,umask=0x32"),
4074 	INTEL_UNCORE_EVENT_DESC(util_out_port1,		"event=0xff,umask=0x33"),
4075 	INTEL_UNCORE_EVENT_DESC(util_in_port2,		"event=0xff,umask=0x34"),
4076 	INTEL_UNCORE_EVENT_DESC(util_out_port2,		"event=0xff,umask=0x35"),
4077 	INTEL_UNCORE_EVENT_DESC(util_in_port3,		"event=0xff,umask=0x36"),
4078 	INTEL_UNCORE_EVENT_DESC(util_out_port3,		"event=0xff,umask=0x37"),
4079 	{ /* end: all zeroes */ },
4080 };
4081 
4082 static struct intel_uncore_ops skx_uncore_iio_freerunning_ops = {
4083 	.read_counter		= uncore_msr_read_counter,
4084 	.hw_config		= uncore_freerunning_hw_config,
4085 };
4086 
4087 static struct attribute *skx_uncore_iio_freerunning_formats_attr[] = {
4088 	&format_attr_event.attr,
4089 	&format_attr_umask.attr,
4090 	NULL,
4091 };
4092 
4093 static const struct attribute_group skx_uncore_iio_freerunning_format_group = {
4094 	.name = "format",
4095 	.attrs = skx_uncore_iio_freerunning_formats_attr,
4096 };
4097 
4098 static struct intel_uncore_type skx_uncore_iio_free_running = {
4099 	.name			= "iio_free_running",
4100 	.num_counters		= 17,
4101 	.num_boxes		= 6,
4102 	.num_freerunning_types	= SKX_IIO_FREERUNNING_TYPE_MAX,
4103 	.freerunning		= skx_iio_freerunning,
4104 	.ops			= &skx_uncore_iio_freerunning_ops,
4105 	.event_descs		= skx_uncore_iio_freerunning_events,
4106 	.format_group		= &skx_uncore_iio_freerunning_format_group,
4107 };
4108 
4109 static struct attribute *skx_uncore_formats_attr[] = {
4110 	&format_attr_event.attr,
4111 	&format_attr_umask.attr,
4112 	&format_attr_edge.attr,
4113 	&format_attr_inv.attr,
4114 	&format_attr_thresh8.attr,
4115 	NULL,
4116 };
4117 
4118 static const struct attribute_group skx_uncore_format_group = {
4119 	.name = "format",
4120 	.attrs = skx_uncore_formats_attr,
4121 };
4122 
4123 static struct intel_uncore_type skx_uncore_irp = {
4124 	.name			= "irp",
4125 	.num_counters		= 2,
4126 	.num_boxes		= 6,
4127 	.perf_ctr_bits		= 48,
4128 	.event_ctl		= SKX_IRP0_MSR_PMON_CTL0,
4129 	.perf_ctr		= SKX_IRP0_MSR_PMON_CTR0,
4130 	.event_mask		= SNBEP_PMON_RAW_EVENT_MASK,
4131 	.box_ctl		= SKX_IRP0_MSR_PMON_BOX_CTL,
4132 	.msr_offset		= SKX_IRP_MSR_OFFSET,
4133 	.ops			= &skx_uncore_iio_ops,
4134 	.format_group		= &skx_uncore_format_group,
4135 };
4136 
4137 static struct attribute *skx_uncore_pcu_formats_attr[] = {
4138 	&format_attr_event.attr,
4139 	&format_attr_umask.attr,
4140 	&format_attr_edge.attr,
4141 	&format_attr_inv.attr,
4142 	&format_attr_thresh8.attr,
4143 	&format_attr_occ_invert.attr,
4144 	&format_attr_occ_edge_det.attr,
4145 	&format_attr_filter_band0.attr,
4146 	&format_attr_filter_band1.attr,
4147 	&format_attr_filter_band2.attr,
4148 	&format_attr_filter_band3.attr,
4149 	NULL,
4150 };
4151 
4152 static struct attribute_group skx_uncore_pcu_format_group = {
4153 	.name = "format",
4154 	.attrs = skx_uncore_pcu_formats_attr,
4155 };
4156 
4157 static struct intel_uncore_ops skx_uncore_pcu_ops = {
4158 	IVBEP_UNCORE_MSR_OPS_COMMON_INIT(),
4159 	.hw_config		= hswep_pcu_hw_config,
4160 	.get_constraint		= snbep_pcu_get_constraint,
4161 	.put_constraint		= snbep_pcu_put_constraint,
4162 };
4163 
4164 static struct intel_uncore_type skx_uncore_pcu = {
4165 	.name			= "pcu",
4166 	.num_counters		= 4,
4167 	.num_boxes		= 1,
4168 	.perf_ctr_bits		= 48,
4169 	.perf_ctr		= HSWEP_PCU_MSR_PMON_CTR0,
4170 	.event_ctl		= HSWEP_PCU_MSR_PMON_CTL0,
4171 	.event_mask		= SNBEP_PCU_MSR_PMON_RAW_EVENT_MASK,
4172 	.box_ctl		= HSWEP_PCU_MSR_PMON_BOX_CTL,
4173 	.num_shared_regs	= 1,
4174 	.ops			= &skx_uncore_pcu_ops,
4175 	.format_group		= &skx_uncore_pcu_format_group,
4176 };
4177 
4178 static struct intel_uncore_type *skx_msr_uncores[] = {
4179 	&skx_uncore_ubox,
4180 	&skx_uncore_chabox,
4181 	&skx_uncore_iio,
4182 	&skx_uncore_iio_free_running,
4183 	&skx_uncore_irp,
4184 	&skx_uncore_pcu,
4185 	NULL,
4186 };
4187 
4188 /*
4189  * To determine the number of CHAs, it should read bits 27:0 in the CAPID6
4190  * register which located at Device 30, Function 3, Offset 0x9C. PCI ID 0x2083.
4191  */
4192 #define SKX_CAPID6		0x9c
4193 #define SKX_CHA_BIT_MASK	GENMASK(27, 0)
4194 
skx_count_chabox(void)4195 static int skx_count_chabox(void)
4196 {
4197 	struct pci_dev *dev = NULL;
4198 	u32 val = 0;
4199 
4200 	dev = pci_get_device(PCI_VENDOR_ID_INTEL, 0x2083, dev);
4201 	if (!dev)
4202 		goto out;
4203 
4204 	pci_read_config_dword(dev, SKX_CAPID6, &val);
4205 	val &= SKX_CHA_BIT_MASK;
4206 out:
4207 	pci_dev_put(dev);
4208 	return hweight32(val);
4209 }
4210 
skx_uncore_cpu_init(void)4211 void skx_uncore_cpu_init(void)
4212 {
4213 	skx_uncore_chabox.num_boxes = skx_count_chabox();
4214 	uncore_msr_uncores = skx_msr_uncores;
4215 }
4216 
4217 static struct intel_uncore_type skx_uncore_imc = {
4218 	.name		= "imc",
4219 	.num_counters   = 4,
4220 	.num_boxes	= 6,
4221 	.perf_ctr_bits	= 48,
4222 	.fixed_ctr_bits	= 48,
4223 	.fixed_ctr	= SNBEP_MC_CHy_PCI_PMON_FIXED_CTR,
4224 	.fixed_ctl	= SNBEP_MC_CHy_PCI_PMON_FIXED_CTL,
4225 	.event_descs	= hswep_uncore_imc_events,
4226 	.perf_ctr	= SNBEP_PCI_PMON_CTR0,
4227 	.event_ctl	= SNBEP_PCI_PMON_CTL0,
4228 	.event_mask	= SNBEP_PMON_RAW_EVENT_MASK,
4229 	.box_ctl	= SNBEP_PCI_PMON_BOX_CTL,
4230 	.ops		= &ivbep_uncore_pci_ops,
4231 	.format_group	= &skx_uncore_format_group,
4232 };
4233 
4234 static struct attribute *skx_upi_uncore_formats_attr[] = {
4235 	&format_attr_event.attr,
4236 	&format_attr_umask_ext.attr,
4237 	&format_attr_edge.attr,
4238 	&format_attr_inv.attr,
4239 	&format_attr_thresh8.attr,
4240 	NULL,
4241 };
4242 
4243 static const struct attribute_group skx_upi_uncore_format_group = {
4244 	.name = "format",
4245 	.attrs = skx_upi_uncore_formats_attr,
4246 };
4247 
skx_upi_uncore_pci_init_box(struct intel_uncore_box * box)4248 static void skx_upi_uncore_pci_init_box(struct intel_uncore_box *box)
4249 {
4250 	struct pci_dev *pdev = box->pci_dev;
4251 
4252 	__set_bit(UNCORE_BOX_FLAG_CTL_OFFS8, &box->flags);
4253 	pci_write_config_dword(pdev, SKX_UPI_PCI_PMON_BOX_CTL, IVBEP_PMON_BOX_CTL_INT);
4254 }
4255 
4256 static struct intel_uncore_ops skx_upi_uncore_pci_ops = {
4257 	.init_box	= skx_upi_uncore_pci_init_box,
4258 	.disable_box	= snbep_uncore_pci_disable_box,
4259 	.enable_box	= snbep_uncore_pci_enable_box,
4260 	.disable_event	= snbep_uncore_pci_disable_event,
4261 	.enable_event	= snbep_uncore_pci_enable_event,
4262 	.read_counter	= snbep_uncore_pci_read_counter,
4263 };
4264 
4265 static umode_t
skx_upi_mapping_visible(struct kobject * kobj,struct attribute * attr,int die)4266 skx_upi_mapping_visible(struct kobject *kobj, struct attribute *attr, int die)
4267 {
4268 	struct intel_uncore_pmu *pmu = dev_to_uncore_pmu(kobj_to_dev(kobj));
4269 
4270 	return pmu->type->topology[die][pmu->pmu_idx].upi->enabled ? attr->mode : 0;
4271 }
4272 
skx_upi_mapping_show(struct device * dev,struct device_attribute * attr,char * buf)4273 static ssize_t skx_upi_mapping_show(struct device *dev,
4274 				    struct device_attribute *attr, char *buf)
4275 {
4276 	struct intel_uncore_pmu *pmu = dev_to_uncore_pmu(dev);
4277 	struct dev_ext_attribute *ea = to_dev_ext_attribute(attr);
4278 	long die = (long)ea->var;
4279 	struct uncore_upi_topology *upi = pmu->type->topology[die][pmu->pmu_idx].upi;
4280 
4281 	return sysfs_emit(buf, "upi_%d,die_%d\n", upi->pmu_idx_to, upi->die_to);
4282 }
4283 
4284 #define SKX_UPI_REG_DID			0x2058
4285 #define SKX_UPI_REGS_ADDR_DEVICE_LINK0	0x0e
4286 #define SKX_UPI_REGS_ADDR_FUNCTION	0x00
4287 
4288 /*
4289  * UPI Link Parameter 0
4290  * |  Bit  |  Default  |  Description
4291  * | 19:16 |     0h    | base_nodeid - The NodeID of the sending socket.
4292  * | 12:8  |    00h    | sending_port - The processor die port number of the sending port.
4293  */
4294 #define SKX_KTILP0_OFFSET	0x94
4295 
4296 /*
4297  * UPI Pcode Status. This register is used by PCode to store the link training status.
4298  * |  Bit  |  Default  |  Description
4299  * |   4   |     0h    | ll_status_valid — Bit indicates the valid training status
4300  *                       logged from PCode to the BIOS.
4301  */
4302 #define SKX_KTIPCSTS_OFFSET	0x120
4303 
upi_fill_topology(struct pci_dev * dev,struct intel_uncore_topology * tp,int pmu_idx)4304 static int upi_fill_topology(struct pci_dev *dev, struct intel_uncore_topology *tp,
4305 			     int pmu_idx)
4306 {
4307 	int ret;
4308 	u32 upi_conf;
4309 	struct uncore_upi_topology *upi = tp->upi;
4310 
4311 	tp->pmu_idx = pmu_idx;
4312 	ret = pci_read_config_dword(dev, SKX_KTIPCSTS_OFFSET, &upi_conf);
4313 	if (ret) {
4314 		ret = pcibios_err_to_errno(ret);
4315 		goto err;
4316 	}
4317 	upi->enabled = (upi_conf >> 4) & 1;
4318 	if (upi->enabled) {
4319 		ret = pci_read_config_dword(dev, SKX_KTILP0_OFFSET,
4320 					    &upi_conf);
4321 		if (ret) {
4322 			ret = pcibios_err_to_errno(ret);
4323 			goto err;
4324 		}
4325 		upi->die_to = (upi_conf >> 16) & 0xf;
4326 		upi->pmu_idx_to = (upi_conf >> 8) & 0x1f;
4327 	}
4328 err:
4329 	return ret;
4330 }
4331 
skx_upi_topology_cb(struct intel_uncore_type * type,int segment,int die,u64 cpu_bus_msr)4332 static int skx_upi_topology_cb(struct intel_uncore_type *type, int segment,
4333 				int die, u64 cpu_bus_msr)
4334 {
4335 	int idx, ret;
4336 	struct intel_uncore_topology *upi;
4337 	unsigned int devfn;
4338 	struct pci_dev *dev = NULL;
4339 	u8 bus = cpu_bus_msr >> (3 * BUS_NUM_STRIDE);
4340 
4341 	for (idx = 0; idx < type->num_boxes; idx++) {
4342 		upi = &type->topology[die][idx];
4343 		devfn = PCI_DEVFN(SKX_UPI_REGS_ADDR_DEVICE_LINK0 + idx,
4344 				  SKX_UPI_REGS_ADDR_FUNCTION);
4345 		dev = pci_get_domain_bus_and_slot(segment, bus, devfn);
4346 		if (dev) {
4347 			ret = upi_fill_topology(dev, upi, idx);
4348 			if (ret)
4349 				break;
4350 		}
4351 	}
4352 
4353 	pci_dev_put(dev);
4354 	return ret;
4355 }
4356 
skx_upi_get_topology(struct intel_uncore_type * type)4357 static int skx_upi_get_topology(struct intel_uncore_type *type)
4358 {
4359 	/* CPX case is not supported */
4360 	if (boot_cpu_data.x86_stepping == 11)
4361 		return -EPERM;
4362 
4363 	return skx_pmu_get_topology(type, skx_upi_topology_cb);
4364 }
4365 
4366 static struct attribute_group skx_upi_mapping_group = {
4367 	.is_visible	= skx_upi_mapping_visible,
4368 };
4369 
4370 static const struct attribute_group *skx_upi_attr_update[] = {
4371 	&skx_upi_mapping_group,
4372 	NULL
4373 };
4374 
4375 static void
pmu_upi_set_mapping(struct intel_uncore_type * type,struct attribute_group * ag)4376 pmu_upi_set_mapping(struct intel_uncore_type *type, struct attribute_group *ag)
4377 {
4378 	pmu_set_mapping(type, ag, skx_upi_mapping_show, UPI_TOPOLOGY_TYPE);
4379 }
4380 
skx_upi_set_mapping(struct intel_uncore_type * type)4381 static void skx_upi_set_mapping(struct intel_uncore_type *type)
4382 {
4383 	pmu_upi_set_mapping(type, &skx_upi_mapping_group);
4384 }
4385 
skx_upi_cleanup_mapping(struct intel_uncore_type * type)4386 static void skx_upi_cleanup_mapping(struct intel_uncore_type *type)
4387 {
4388 	pmu_cleanup_mapping(type, &skx_upi_mapping_group);
4389 }
4390 
4391 static struct intel_uncore_type skx_uncore_upi = {
4392 	.name		= "upi",
4393 	.num_counters   = 4,
4394 	.num_boxes	= 3,
4395 	.perf_ctr_bits	= 48,
4396 	.perf_ctr	= SKX_UPI_PCI_PMON_CTR0,
4397 	.event_ctl	= SKX_UPI_PCI_PMON_CTL0,
4398 	.event_mask	= SNBEP_PMON_RAW_EVENT_MASK,
4399 	.event_mask_ext = SKX_UPI_CTL_UMASK_EXT,
4400 	.box_ctl	= SKX_UPI_PCI_PMON_BOX_CTL,
4401 	.ops		= &skx_upi_uncore_pci_ops,
4402 	.format_group	= &skx_upi_uncore_format_group,
4403 	.attr_update	= skx_upi_attr_update,
4404 	.get_topology	= skx_upi_get_topology,
4405 	.set_mapping	= skx_upi_set_mapping,
4406 	.cleanup_mapping = skx_upi_cleanup_mapping,
4407 };
4408 
skx_m2m_uncore_pci_init_box(struct intel_uncore_box * box)4409 static void skx_m2m_uncore_pci_init_box(struct intel_uncore_box *box)
4410 {
4411 	struct pci_dev *pdev = box->pci_dev;
4412 
4413 	__set_bit(UNCORE_BOX_FLAG_CTL_OFFS8, &box->flags);
4414 	pci_write_config_dword(pdev, SKX_M2M_PCI_PMON_BOX_CTL, IVBEP_PMON_BOX_CTL_INT);
4415 }
4416 
4417 static struct intel_uncore_ops skx_m2m_uncore_pci_ops = {
4418 	.init_box	= skx_m2m_uncore_pci_init_box,
4419 	.disable_box	= snbep_uncore_pci_disable_box,
4420 	.enable_box	= snbep_uncore_pci_enable_box,
4421 	.disable_event	= snbep_uncore_pci_disable_event,
4422 	.enable_event	= snbep_uncore_pci_enable_event,
4423 	.read_counter	= snbep_uncore_pci_read_counter,
4424 };
4425 
4426 static struct intel_uncore_type skx_uncore_m2m = {
4427 	.name		= "m2m",
4428 	.num_counters   = 4,
4429 	.num_boxes	= 2,
4430 	.perf_ctr_bits	= 48,
4431 	.perf_ctr	= SKX_M2M_PCI_PMON_CTR0,
4432 	.event_ctl	= SKX_M2M_PCI_PMON_CTL0,
4433 	.event_mask	= SNBEP_PMON_RAW_EVENT_MASK,
4434 	.box_ctl	= SKX_M2M_PCI_PMON_BOX_CTL,
4435 	.ops		= &skx_m2m_uncore_pci_ops,
4436 	.format_group	= &skx_uncore_format_group,
4437 };
4438 
4439 static struct event_constraint skx_uncore_m2pcie_constraints[] = {
4440 	UNCORE_EVENT_CONSTRAINT(0x23, 0x3),
4441 	EVENT_CONSTRAINT_END
4442 };
4443 
4444 static struct intel_uncore_type skx_uncore_m2pcie = {
4445 	.name		= "m2pcie",
4446 	.num_counters   = 4,
4447 	.num_boxes	= 4,
4448 	.perf_ctr_bits	= 48,
4449 	.constraints	= skx_uncore_m2pcie_constraints,
4450 	.perf_ctr	= SNBEP_PCI_PMON_CTR0,
4451 	.event_ctl	= SNBEP_PCI_PMON_CTL0,
4452 	.event_mask	= SNBEP_PMON_RAW_EVENT_MASK,
4453 	.box_ctl	= SNBEP_PCI_PMON_BOX_CTL,
4454 	.ops		= &ivbep_uncore_pci_ops,
4455 	.format_group	= &skx_uncore_format_group,
4456 };
4457 
4458 static struct event_constraint skx_uncore_m3upi_constraints[] = {
4459 	UNCORE_EVENT_CONSTRAINT(0x1d, 0x1),
4460 	UNCORE_EVENT_CONSTRAINT(0x1e, 0x1),
4461 	UNCORE_EVENT_CONSTRAINT(0x40, 0x7),
4462 	UNCORE_EVENT_CONSTRAINT(0x4e, 0x7),
4463 	UNCORE_EVENT_CONSTRAINT(0x4f, 0x7),
4464 	UNCORE_EVENT_CONSTRAINT(0x50, 0x7),
4465 	UNCORE_EVENT_CONSTRAINT(0x51, 0x7),
4466 	UNCORE_EVENT_CONSTRAINT(0x52, 0x7),
4467 	EVENT_CONSTRAINT_END
4468 };
4469 
4470 static struct intel_uncore_type skx_uncore_m3upi = {
4471 	.name		= "m3upi",
4472 	.num_counters   = 3,
4473 	.num_boxes	= 3,
4474 	.perf_ctr_bits	= 48,
4475 	.constraints	= skx_uncore_m3upi_constraints,
4476 	.perf_ctr	= SNBEP_PCI_PMON_CTR0,
4477 	.event_ctl	= SNBEP_PCI_PMON_CTL0,
4478 	.event_mask	= SNBEP_PMON_RAW_EVENT_MASK,
4479 	.box_ctl	= SNBEP_PCI_PMON_BOX_CTL,
4480 	.ops		= &ivbep_uncore_pci_ops,
4481 	.format_group	= &skx_uncore_format_group,
4482 };
4483 
4484 enum {
4485 	SKX_PCI_UNCORE_IMC,
4486 	SKX_PCI_UNCORE_M2M,
4487 	SKX_PCI_UNCORE_UPI,
4488 	SKX_PCI_UNCORE_M2PCIE,
4489 	SKX_PCI_UNCORE_M3UPI,
4490 };
4491 
4492 static struct intel_uncore_type *skx_pci_uncores[] = {
4493 	[SKX_PCI_UNCORE_IMC]	= &skx_uncore_imc,
4494 	[SKX_PCI_UNCORE_M2M]	= &skx_uncore_m2m,
4495 	[SKX_PCI_UNCORE_UPI]	= &skx_uncore_upi,
4496 	[SKX_PCI_UNCORE_M2PCIE]	= &skx_uncore_m2pcie,
4497 	[SKX_PCI_UNCORE_M3UPI]	= &skx_uncore_m3upi,
4498 	NULL,
4499 };
4500 
4501 static const struct pci_device_id skx_uncore_pci_ids[] = {
4502 	{ /* MC0 Channel 0 */
4503 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2042),
4504 		.driver_data = UNCORE_PCI_DEV_FULL_DATA(10, 2, SKX_PCI_UNCORE_IMC, 0),
4505 	},
4506 	{ /* MC0 Channel 1 */
4507 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2046),
4508 		.driver_data = UNCORE_PCI_DEV_FULL_DATA(10, 6, SKX_PCI_UNCORE_IMC, 1),
4509 	},
4510 	{ /* MC0 Channel 2 */
4511 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x204a),
4512 		.driver_data = UNCORE_PCI_DEV_FULL_DATA(11, 2, SKX_PCI_UNCORE_IMC, 2),
4513 	},
4514 	{ /* MC1 Channel 0 */
4515 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2042),
4516 		.driver_data = UNCORE_PCI_DEV_FULL_DATA(12, 2, SKX_PCI_UNCORE_IMC, 3),
4517 	},
4518 	{ /* MC1 Channel 1 */
4519 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2046),
4520 		.driver_data = UNCORE_PCI_DEV_FULL_DATA(12, 6, SKX_PCI_UNCORE_IMC, 4),
4521 	},
4522 	{ /* MC1 Channel 2 */
4523 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x204a),
4524 		.driver_data = UNCORE_PCI_DEV_FULL_DATA(13, 2, SKX_PCI_UNCORE_IMC, 5),
4525 	},
4526 	{ /* M2M0 */
4527 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2066),
4528 		.driver_data = UNCORE_PCI_DEV_FULL_DATA(8, 0, SKX_PCI_UNCORE_M2M, 0),
4529 	},
4530 	{ /* M2M1 */
4531 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2066),
4532 		.driver_data = UNCORE_PCI_DEV_FULL_DATA(9, 0, SKX_PCI_UNCORE_M2M, 1),
4533 	},
4534 	{ /* UPI0 Link 0 */
4535 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2058),
4536 		.driver_data = UNCORE_PCI_DEV_FULL_DATA(14, 0, SKX_PCI_UNCORE_UPI, 0),
4537 	},
4538 	{ /* UPI0 Link 1 */
4539 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2058),
4540 		.driver_data = UNCORE_PCI_DEV_FULL_DATA(15, 0, SKX_PCI_UNCORE_UPI, 1),
4541 	},
4542 	{ /* UPI1 Link 2 */
4543 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2058),
4544 		.driver_data = UNCORE_PCI_DEV_FULL_DATA(16, 0, SKX_PCI_UNCORE_UPI, 2),
4545 	},
4546 	{ /* M2PCIe 0 */
4547 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2088),
4548 		.driver_data = UNCORE_PCI_DEV_FULL_DATA(21, 1, SKX_PCI_UNCORE_M2PCIE, 0),
4549 	},
4550 	{ /* M2PCIe 1 */
4551 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2088),
4552 		.driver_data = UNCORE_PCI_DEV_FULL_DATA(22, 1, SKX_PCI_UNCORE_M2PCIE, 1),
4553 	},
4554 	{ /* M2PCIe 2 */
4555 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2088),
4556 		.driver_data = UNCORE_PCI_DEV_FULL_DATA(23, 1, SKX_PCI_UNCORE_M2PCIE, 2),
4557 	},
4558 	{ /* M2PCIe 3 */
4559 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2088),
4560 		.driver_data = UNCORE_PCI_DEV_FULL_DATA(21, 5, SKX_PCI_UNCORE_M2PCIE, 3),
4561 	},
4562 	{ /* M3UPI0 Link 0 */
4563 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x204D),
4564 		.driver_data = UNCORE_PCI_DEV_FULL_DATA(18, 1, SKX_PCI_UNCORE_M3UPI, 0),
4565 	},
4566 	{ /* M3UPI0 Link 1 */
4567 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x204E),
4568 		.driver_data = UNCORE_PCI_DEV_FULL_DATA(18, 2, SKX_PCI_UNCORE_M3UPI, 1),
4569 	},
4570 	{ /* M3UPI1 Link 2 */
4571 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x204D),
4572 		.driver_data = UNCORE_PCI_DEV_FULL_DATA(18, 5, SKX_PCI_UNCORE_M3UPI, 2),
4573 	},
4574 	{ /* end: all zeroes */ }
4575 };
4576 
4577 
4578 static struct pci_driver skx_uncore_pci_driver = {
4579 	.name		= "skx_uncore",
4580 	.id_table	= skx_uncore_pci_ids,
4581 };
4582 
skx_uncore_pci_init(void)4583 int skx_uncore_pci_init(void)
4584 {
4585 	/* need to double check pci address */
4586 	int ret = snbep_pci2phy_map_init(0x2014, SKX_CPUNODEID, SKX_GIDNIDMAP, false);
4587 
4588 	if (ret)
4589 		return ret;
4590 
4591 	uncore_pci_uncores = skx_pci_uncores;
4592 	uncore_pci_driver = &skx_uncore_pci_driver;
4593 	return 0;
4594 }
4595 
4596 /* end of SKX uncore support */
4597 
4598 /* SNR uncore support */
4599 
4600 static struct intel_uncore_type snr_uncore_ubox = {
4601 	.name			= "ubox",
4602 	.num_counters		= 2,
4603 	.num_boxes		= 1,
4604 	.perf_ctr_bits		= 48,
4605 	.fixed_ctr_bits		= 48,
4606 	.perf_ctr		= SNR_U_MSR_PMON_CTR0,
4607 	.event_ctl		= SNR_U_MSR_PMON_CTL0,
4608 	.event_mask		= SNBEP_PMON_RAW_EVENT_MASK,
4609 	.fixed_ctr		= SNR_U_MSR_PMON_UCLK_FIXED_CTR,
4610 	.fixed_ctl		= SNR_U_MSR_PMON_UCLK_FIXED_CTL,
4611 	.ops			= &ivbep_uncore_msr_ops,
4612 	.format_group		= &ivbep_uncore_format_group,
4613 };
4614 
4615 static struct attribute *snr_uncore_cha_formats_attr[] = {
4616 	&format_attr_event.attr,
4617 	&format_attr_umask_ext2.attr,
4618 	&format_attr_edge.attr,
4619 	&format_attr_tid_en.attr,
4620 	&format_attr_inv.attr,
4621 	&format_attr_thresh8.attr,
4622 	&format_attr_filter_tid5.attr,
4623 	NULL,
4624 };
4625 static const struct attribute_group snr_uncore_chabox_format_group = {
4626 	.name = "format",
4627 	.attrs = snr_uncore_cha_formats_attr,
4628 };
4629 
snr_cha_hw_config(struct intel_uncore_box * box,struct perf_event * event)4630 static int snr_cha_hw_config(struct intel_uncore_box *box, struct perf_event *event)
4631 {
4632 	struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
4633 
4634 	reg1->reg = SNR_C0_MSR_PMON_BOX_FILTER0 +
4635 		    box->pmu->type->msr_offset * box->pmu->pmu_idx;
4636 	reg1->config = event->attr.config1 & SKX_CHA_MSR_PMON_BOX_FILTER_TID;
4637 	reg1->idx = 0;
4638 
4639 	return 0;
4640 }
4641 
snr_cha_enable_event(struct intel_uncore_box * box,struct perf_event * event)4642 static void snr_cha_enable_event(struct intel_uncore_box *box,
4643 				   struct perf_event *event)
4644 {
4645 	struct hw_perf_event *hwc = &event->hw;
4646 	struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
4647 
4648 	if (reg1->idx != EXTRA_REG_NONE)
4649 		wrmsrl(reg1->reg, reg1->config);
4650 
4651 	wrmsrl(hwc->config_base, hwc->config | SNBEP_PMON_CTL_EN);
4652 }
4653 
4654 static struct intel_uncore_ops snr_uncore_chabox_ops = {
4655 	.init_box		= ivbep_uncore_msr_init_box,
4656 	.disable_box		= snbep_uncore_msr_disable_box,
4657 	.enable_box		= snbep_uncore_msr_enable_box,
4658 	.disable_event		= snbep_uncore_msr_disable_event,
4659 	.enable_event		= snr_cha_enable_event,
4660 	.read_counter		= uncore_msr_read_counter,
4661 	.hw_config		= snr_cha_hw_config,
4662 };
4663 
4664 static struct intel_uncore_type snr_uncore_chabox = {
4665 	.name			= "cha",
4666 	.num_counters		= 4,
4667 	.num_boxes		= 6,
4668 	.perf_ctr_bits		= 48,
4669 	.event_ctl		= SNR_CHA_MSR_PMON_CTL0,
4670 	.perf_ctr		= SNR_CHA_MSR_PMON_CTR0,
4671 	.box_ctl		= SNR_CHA_MSR_PMON_BOX_CTL,
4672 	.msr_offset		= HSWEP_CBO_MSR_OFFSET,
4673 	.event_mask		= HSWEP_S_MSR_PMON_RAW_EVENT_MASK,
4674 	.event_mask_ext		= SNR_CHA_RAW_EVENT_MASK_EXT,
4675 	.ops			= &snr_uncore_chabox_ops,
4676 	.format_group		= &snr_uncore_chabox_format_group,
4677 };
4678 
4679 static struct attribute *snr_uncore_iio_formats_attr[] = {
4680 	&format_attr_event.attr,
4681 	&format_attr_umask.attr,
4682 	&format_attr_edge.attr,
4683 	&format_attr_inv.attr,
4684 	&format_attr_thresh9.attr,
4685 	&format_attr_ch_mask2.attr,
4686 	&format_attr_fc_mask2.attr,
4687 	NULL,
4688 };
4689 
4690 static const struct attribute_group snr_uncore_iio_format_group = {
4691 	.name = "format",
4692 	.attrs = snr_uncore_iio_formats_attr,
4693 };
4694 
4695 static umode_t
snr_iio_mapping_visible(struct kobject * kobj,struct attribute * attr,int die)4696 snr_iio_mapping_visible(struct kobject *kobj, struct attribute *attr, int die)
4697 {
4698 	/* Root bus 0x00 is valid only for pmu_idx = 1. */
4699 	return pmu_iio_mapping_visible(kobj, attr, die, 1);
4700 }
4701 
4702 static struct attribute_group snr_iio_mapping_group = {
4703 	.is_visible	= snr_iio_mapping_visible,
4704 };
4705 
4706 static const struct attribute_group *snr_iio_attr_update[] = {
4707 	&snr_iio_mapping_group,
4708 	NULL,
4709 };
4710 
sad_cfg_iio_topology(struct intel_uncore_type * type,u8 * sad_pmon_mapping)4711 static int sad_cfg_iio_topology(struct intel_uncore_type *type, u8 *sad_pmon_mapping)
4712 {
4713 	u32 sad_cfg;
4714 	int die, stack_id, ret = -EPERM;
4715 	struct pci_dev *dev = NULL;
4716 
4717 	while ((dev = pci_get_device(PCI_VENDOR_ID_INTEL, SNR_ICX_MESH2IIO_MMAP_DID, dev))) {
4718 		ret = pci_read_config_dword(dev, SNR_ICX_SAD_CONTROL_CFG, &sad_cfg);
4719 		if (ret) {
4720 			ret = pcibios_err_to_errno(ret);
4721 			break;
4722 		}
4723 
4724 		die = uncore_pcibus_to_dieid(dev->bus);
4725 		stack_id = SAD_CONTROL_STACK_ID(sad_cfg);
4726 		if (die < 0 || stack_id >= type->num_boxes) {
4727 			ret = -EPERM;
4728 			break;
4729 		}
4730 
4731 		/* Convert stack id from SAD_CONTROL to PMON notation. */
4732 		stack_id = sad_pmon_mapping[stack_id];
4733 
4734 		type->topology[die][stack_id].iio->segment = pci_domain_nr(dev->bus);
4735 		type->topology[die][stack_id].pmu_idx = stack_id;
4736 		type->topology[die][stack_id].iio->pci_bus_no = dev->bus->number;
4737 	}
4738 
4739 	pci_dev_put(dev);
4740 
4741 	return ret;
4742 }
4743 
4744 /*
4745  * SNR has a static mapping of stack IDs from SAD_CONTROL_CFG notation to PMON
4746  */
4747 enum {
4748 	SNR_QAT_PMON_ID,
4749 	SNR_CBDMA_DMI_PMON_ID,
4750 	SNR_NIS_PMON_ID,
4751 	SNR_DLB_PMON_ID,
4752 	SNR_PCIE_GEN3_PMON_ID
4753 };
4754 
4755 static u8 snr_sad_pmon_mapping[] = {
4756 	SNR_CBDMA_DMI_PMON_ID,
4757 	SNR_PCIE_GEN3_PMON_ID,
4758 	SNR_DLB_PMON_ID,
4759 	SNR_NIS_PMON_ID,
4760 	SNR_QAT_PMON_ID
4761 };
4762 
snr_iio_get_topology(struct intel_uncore_type * type)4763 static int snr_iio_get_topology(struct intel_uncore_type *type)
4764 {
4765 	return sad_cfg_iio_topology(type, snr_sad_pmon_mapping);
4766 }
4767 
snr_iio_set_mapping(struct intel_uncore_type * type)4768 static void snr_iio_set_mapping(struct intel_uncore_type *type)
4769 {
4770 	pmu_iio_set_mapping(type, &snr_iio_mapping_group);
4771 }
4772 
snr_iio_cleanup_mapping(struct intel_uncore_type * type)4773 static void snr_iio_cleanup_mapping(struct intel_uncore_type *type)
4774 {
4775 	pmu_cleanup_mapping(type, &snr_iio_mapping_group);
4776 }
4777 
4778 static struct event_constraint snr_uncore_iio_constraints[] = {
4779 	UNCORE_EVENT_CONSTRAINT(0x83, 0x3),
4780 	UNCORE_EVENT_CONSTRAINT(0xc0, 0xc),
4781 	UNCORE_EVENT_CONSTRAINT(0xd5, 0xc),
4782 	EVENT_CONSTRAINT_END
4783 };
4784 
4785 static struct intel_uncore_type snr_uncore_iio = {
4786 	.name			= "iio",
4787 	.num_counters		= 4,
4788 	.num_boxes		= 5,
4789 	.perf_ctr_bits		= 48,
4790 	.event_ctl		= SNR_IIO_MSR_PMON_CTL0,
4791 	.perf_ctr		= SNR_IIO_MSR_PMON_CTR0,
4792 	.event_mask		= SNBEP_PMON_RAW_EVENT_MASK,
4793 	.event_mask_ext		= SNR_IIO_PMON_RAW_EVENT_MASK_EXT,
4794 	.box_ctl		= SNR_IIO_MSR_PMON_BOX_CTL,
4795 	.msr_offset		= SNR_IIO_MSR_OFFSET,
4796 	.constraints		= snr_uncore_iio_constraints,
4797 	.ops			= &ivbep_uncore_msr_ops,
4798 	.format_group		= &snr_uncore_iio_format_group,
4799 	.attr_update		= snr_iio_attr_update,
4800 	.get_topology		= snr_iio_get_topology,
4801 	.set_mapping		= snr_iio_set_mapping,
4802 	.cleanup_mapping	= snr_iio_cleanup_mapping,
4803 };
4804 
4805 static struct intel_uncore_type snr_uncore_irp = {
4806 	.name			= "irp",
4807 	.num_counters		= 2,
4808 	.num_boxes		= 5,
4809 	.perf_ctr_bits		= 48,
4810 	.event_ctl		= SNR_IRP0_MSR_PMON_CTL0,
4811 	.perf_ctr		= SNR_IRP0_MSR_PMON_CTR0,
4812 	.event_mask		= SNBEP_PMON_RAW_EVENT_MASK,
4813 	.box_ctl		= SNR_IRP0_MSR_PMON_BOX_CTL,
4814 	.msr_offset		= SNR_IRP_MSR_OFFSET,
4815 	.ops			= &ivbep_uncore_msr_ops,
4816 	.format_group		= &ivbep_uncore_format_group,
4817 };
4818 
4819 static struct intel_uncore_type snr_uncore_m2pcie = {
4820 	.name		= "m2pcie",
4821 	.num_counters	= 4,
4822 	.num_boxes	= 5,
4823 	.perf_ctr_bits	= 48,
4824 	.event_ctl	= SNR_M2PCIE_MSR_PMON_CTL0,
4825 	.perf_ctr	= SNR_M2PCIE_MSR_PMON_CTR0,
4826 	.box_ctl	= SNR_M2PCIE_MSR_PMON_BOX_CTL,
4827 	.msr_offset	= SNR_M2PCIE_MSR_OFFSET,
4828 	.event_mask	= SNBEP_PMON_RAW_EVENT_MASK,
4829 	.ops		= &ivbep_uncore_msr_ops,
4830 	.format_group	= &ivbep_uncore_format_group,
4831 };
4832 
snr_pcu_hw_config(struct intel_uncore_box * box,struct perf_event * event)4833 static int snr_pcu_hw_config(struct intel_uncore_box *box, struct perf_event *event)
4834 {
4835 	struct hw_perf_event *hwc = &event->hw;
4836 	struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
4837 	int ev_sel = hwc->config & SNBEP_PMON_CTL_EV_SEL_MASK;
4838 
4839 	if (ev_sel >= 0xb && ev_sel <= 0xe) {
4840 		reg1->reg = SNR_PCU_MSR_PMON_BOX_FILTER;
4841 		reg1->idx = ev_sel - 0xb;
4842 		reg1->config = event->attr.config1 & (0xff << reg1->idx);
4843 	}
4844 	return 0;
4845 }
4846 
4847 static struct intel_uncore_ops snr_uncore_pcu_ops = {
4848 	IVBEP_UNCORE_MSR_OPS_COMMON_INIT(),
4849 	.hw_config		= snr_pcu_hw_config,
4850 	.get_constraint		= snbep_pcu_get_constraint,
4851 	.put_constraint		= snbep_pcu_put_constraint,
4852 };
4853 
4854 static struct intel_uncore_type snr_uncore_pcu = {
4855 	.name			= "pcu",
4856 	.num_counters		= 4,
4857 	.num_boxes		= 1,
4858 	.perf_ctr_bits		= 48,
4859 	.perf_ctr		= SNR_PCU_MSR_PMON_CTR0,
4860 	.event_ctl		= SNR_PCU_MSR_PMON_CTL0,
4861 	.event_mask		= SNBEP_PMON_RAW_EVENT_MASK,
4862 	.box_ctl		= SNR_PCU_MSR_PMON_BOX_CTL,
4863 	.num_shared_regs	= 1,
4864 	.ops			= &snr_uncore_pcu_ops,
4865 	.format_group		= &skx_uncore_pcu_format_group,
4866 };
4867 
4868 enum perf_uncore_snr_iio_freerunning_type_id {
4869 	SNR_IIO_MSR_IOCLK,
4870 	SNR_IIO_MSR_BW_IN,
4871 
4872 	SNR_IIO_FREERUNNING_TYPE_MAX,
4873 };
4874 
4875 static struct freerunning_counters snr_iio_freerunning[] = {
4876 	[SNR_IIO_MSR_IOCLK]	= { 0x1eac, 0x1, 0x10, 1, 48 },
4877 	[SNR_IIO_MSR_BW_IN]	= { 0x1f00, 0x1, 0x10, 8, 48 },
4878 };
4879 
4880 static struct uncore_event_desc snr_uncore_iio_freerunning_events[] = {
4881 	/* Free-Running IIO CLOCKS Counter */
4882 	INTEL_UNCORE_EVENT_DESC(ioclk,			"event=0xff,umask=0x10"),
4883 	/* Free-Running IIO BANDWIDTH IN Counters */
4884 	INTEL_UNCORE_EVENT_DESC(bw_in_port0,		"event=0xff,umask=0x20"),
4885 	INTEL_UNCORE_EVENT_DESC(bw_in_port0.scale,	"3.814697266e-6"),
4886 	INTEL_UNCORE_EVENT_DESC(bw_in_port0.unit,	"MiB"),
4887 	INTEL_UNCORE_EVENT_DESC(bw_in_port1,		"event=0xff,umask=0x21"),
4888 	INTEL_UNCORE_EVENT_DESC(bw_in_port1.scale,	"3.814697266e-6"),
4889 	INTEL_UNCORE_EVENT_DESC(bw_in_port1.unit,	"MiB"),
4890 	INTEL_UNCORE_EVENT_DESC(bw_in_port2,		"event=0xff,umask=0x22"),
4891 	INTEL_UNCORE_EVENT_DESC(bw_in_port2.scale,	"3.814697266e-6"),
4892 	INTEL_UNCORE_EVENT_DESC(bw_in_port2.unit,	"MiB"),
4893 	INTEL_UNCORE_EVENT_DESC(bw_in_port3,		"event=0xff,umask=0x23"),
4894 	INTEL_UNCORE_EVENT_DESC(bw_in_port3.scale,	"3.814697266e-6"),
4895 	INTEL_UNCORE_EVENT_DESC(bw_in_port3.unit,	"MiB"),
4896 	INTEL_UNCORE_EVENT_DESC(bw_in_port4,		"event=0xff,umask=0x24"),
4897 	INTEL_UNCORE_EVENT_DESC(bw_in_port4.scale,	"3.814697266e-6"),
4898 	INTEL_UNCORE_EVENT_DESC(bw_in_port4.unit,	"MiB"),
4899 	INTEL_UNCORE_EVENT_DESC(bw_in_port5,		"event=0xff,umask=0x25"),
4900 	INTEL_UNCORE_EVENT_DESC(bw_in_port5.scale,	"3.814697266e-6"),
4901 	INTEL_UNCORE_EVENT_DESC(bw_in_port5.unit,	"MiB"),
4902 	INTEL_UNCORE_EVENT_DESC(bw_in_port6,		"event=0xff,umask=0x26"),
4903 	INTEL_UNCORE_EVENT_DESC(bw_in_port6.scale,	"3.814697266e-6"),
4904 	INTEL_UNCORE_EVENT_DESC(bw_in_port6.unit,	"MiB"),
4905 	INTEL_UNCORE_EVENT_DESC(bw_in_port7,		"event=0xff,umask=0x27"),
4906 	INTEL_UNCORE_EVENT_DESC(bw_in_port7.scale,	"3.814697266e-6"),
4907 	INTEL_UNCORE_EVENT_DESC(bw_in_port7.unit,	"MiB"),
4908 	{ /* end: all zeroes */ },
4909 };
4910 
4911 static struct intel_uncore_type snr_uncore_iio_free_running = {
4912 	.name			= "iio_free_running",
4913 	.num_counters		= 9,
4914 	.num_boxes		= 5,
4915 	.num_freerunning_types	= SNR_IIO_FREERUNNING_TYPE_MAX,
4916 	.freerunning		= snr_iio_freerunning,
4917 	.ops			= &skx_uncore_iio_freerunning_ops,
4918 	.event_descs		= snr_uncore_iio_freerunning_events,
4919 	.format_group		= &skx_uncore_iio_freerunning_format_group,
4920 };
4921 
4922 static struct intel_uncore_type *snr_msr_uncores[] = {
4923 	&snr_uncore_ubox,
4924 	&snr_uncore_chabox,
4925 	&snr_uncore_iio,
4926 	&snr_uncore_irp,
4927 	&snr_uncore_m2pcie,
4928 	&snr_uncore_pcu,
4929 	&snr_uncore_iio_free_running,
4930 	NULL,
4931 };
4932 
snr_uncore_cpu_init(void)4933 void snr_uncore_cpu_init(void)
4934 {
4935 	uncore_msr_uncores = snr_msr_uncores;
4936 }
4937 
snr_m2m_uncore_pci_init_box(struct intel_uncore_box * box)4938 static void snr_m2m_uncore_pci_init_box(struct intel_uncore_box *box)
4939 {
4940 	struct pci_dev *pdev = box->pci_dev;
4941 	int box_ctl = uncore_pci_box_ctl(box);
4942 
4943 	__set_bit(UNCORE_BOX_FLAG_CTL_OFFS8, &box->flags);
4944 	pci_write_config_dword(pdev, box_ctl, IVBEP_PMON_BOX_CTL_INT);
4945 }
4946 
4947 static struct intel_uncore_ops snr_m2m_uncore_pci_ops = {
4948 	.init_box	= snr_m2m_uncore_pci_init_box,
4949 	.disable_box	= snbep_uncore_pci_disable_box,
4950 	.enable_box	= snbep_uncore_pci_enable_box,
4951 	.disable_event	= snbep_uncore_pci_disable_event,
4952 	.enable_event	= snbep_uncore_pci_enable_event,
4953 	.read_counter	= snbep_uncore_pci_read_counter,
4954 };
4955 
4956 static struct attribute *snr_m2m_uncore_formats_attr[] = {
4957 	&format_attr_event.attr,
4958 	&format_attr_umask_ext3.attr,
4959 	&format_attr_edge.attr,
4960 	&format_attr_inv.attr,
4961 	&format_attr_thresh8.attr,
4962 	NULL,
4963 };
4964 
4965 static const struct attribute_group snr_m2m_uncore_format_group = {
4966 	.name = "format",
4967 	.attrs = snr_m2m_uncore_formats_attr,
4968 };
4969 
4970 static struct intel_uncore_type snr_uncore_m2m = {
4971 	.name		= "m2m",
4972 	.num_counters   = 4,
4973 	.num_boxes	= 1,
4974 	.perf_ctr_bits	= 48,
4975 	.perf_ctr	= SNR_M2M_PCI_PMON_CTR0,
4976 	.event_ctl	= SNR_M2M_PCI_PMON_CTL0,
4977 	.event_mask	= SNBEP_PMON_RAW_EVENT_MASK,
4978 	.event_mask_ext	= SNR_M2M_PCI_PMON_UMASK_EXT,
4979 	.box_ctl	= SNR_M2M_PCI_PMON_BOX_CTL,
4980 	.ops		= &snr_m2m_uncore_pci_ops,
4981 	.format_group	= &snr_m2m_uncore_format_group,
4982 };
4983 
snr_uncore_pci_enable_event(struct intel_uncore_box * box,struct perf_event * event)4984 static void snr_uncore_pci_enable_event(struct intel_uncore_box *box, struct perf_event *event)
4985 {
4986 	struct pci_dev *pdev = box->pci_dev;
4987 	struct hw_perf_event *hwc = &event->hw;
4988 
4989 	pci_write_config_dword(pdev, hwc->config_base, (u32)(hwc->config | SNBEP_PMON_CTL_EN));
4990 	pci_write_config_dword(pdev, hwc->config_base + 4, (u32)(hwc->config >> 32));
4991 }
4992 
4993 static struct intel_uncore_ops snr_pcie3_uncore_pci_ops = {
4994 	.init_box	= snr_m2m_uncore_pci_init_box,
4995 	.disable_box	= snbep_uncore_pci_disable_box,
4996 	.enable_box	= snbep_uncore_pci_enable_box,
4997 	.disable_event	= snbep_uncore_pci_disable_event,
4998 	.enable_event	= snr_uncore_pci_enable_event,
4999 	.read_counter	= snbep_uncore_pci_read_counter,
5000 };
5001 
5002 static struct intel_uncore_type snr_uncore_pcie3 = {
5003 	.name		= "pcie3",
5004 	.num_counters	= 4,
5005 	.num_boxes	= 1,
5006 	.perf_ctr_bits	= 48,
5007 	.perf_ctr	= SNR_PCIE3_PCI_PMON_CTR0,
5008 	.event_ctl	= SNR_PCIE3_PCI_PMON_CTL0,
5009 	.event_mask	= SKX_IIO_PMON_RAW_EVENT_MASK,
5010 	.event_mask_ext	= SKX_IIO_PMON_RAW_EVENT_MASK_EXT,
5011 	.box_ctl	= SNR_PCIE3_PCI_PMON_BOX_CTL,
5012 	.ops		= &snr_pcie3_uncore_pci_ops,
5013 	.format_group	= &skx_uncore_iio_format_group,
5014 };
5015 
5016 enum {
5017 	SNR_PCI_UNCORE_M2M,
5018 	SNR_PCI_UNCORE_PCIE3,
5019 };
5020 
5021 static struct intel_uncore_type *snr_pci_uncores[] = {
5022 	[SNR_PCI_UNCORE_M2M]		= &snr_uncore_m2m,
5023 	[SNR_PCI_UNCORE_PCIE3]		= &snr_uncore_pcie3,
5024 	NULL,
5025 };
5026 
5027 static const struct pci_device_id snr_uncore_pci_ids[] = {
5028 	{ /* M2M */
5029 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x344a),
5030 		.driver_data = UNCORE_PCI_DEV_FULL_DATA(12, 0, SNR_PCI_UNCORE_M2M, 0),
5031 	},
5032 	{ /* end: all zeroes */ }
5033 };
5034 
5035 static struct pci_driver snr_uncore_pci_driver = {
5036 	.name		= "snr_uncore",
5037 	.id_table	= snr_uncore_pci_ids,
5038 };
5039 
5040 static const struct pci_device_id snr_uncore_pci_sub_ids[] = {
5041 	{ /* PCIe3 RP */
5042 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x334a),
5043 		.driver_data = UNCORE_PCI_DEV_FULL_DATA(4, 0, SNR_PCI_UNCORE_PCIE3, 0),
5044 	},
5045 	{ /* end: all zeroes */ }
5046 };
5047 
5048 static struct pci_driver snr_uncore_pci_sub_driver = {
5049 	.name		= "snr_uncore_sub",
5050 	.id_table	= snr_uncore_pci_sub_ids,
5051 };
5052 
snr_uncore_pci_init(void)5053 int snr_uncore_pci_init(void)
5054 {
5055 	/* SNR UBOX DID */
5056 	int ret = snbep_pci2phy_map_init(0x3460, SKX_CPUNODEID,
5057 					 SKX_GIDNIDMAP, true);
5058 
5059 	if (ret)
5060 		return ret;
5061 
5062 	uncore_pci_uncores = snr_pci_uncores;
5063 	uncore_pci_driver = &snr_uncore_pci_driver;
5064 	uncore_pci_sub_driver = &snr_uncore_pci_sub_driver;
5065 	return 0;
5066 }
5067 
5068 #define SNR_MC_DEVICE_ID	0x3451
5069 
snr_uncore_get_mc_dev(unsigned int device,int id)5070 static struct pci_dev *snr_uncore_get_mc_dev(unsigned int device, int id)
5071 {
5072 	struct pci_dev *mc_dev = NULL;
5073 	int pkg;
5074 
5075 	while (1) {
5076 		mc_dev = pci_get_device(PCI_VENDOR_ID_INTEL, device, mc_dev);
5077 		if (!mc_dev)
5078 			break;
5079 		pkg = uncore_pcibus_to_dieid(mc_dev->bus);
5080 		if (pkg == id)
5081 			break;
5082 	}
5083 	return mc_dev;
5084 }
5085 
snr_uncore_mmio_map(struct intel_uncore_box * box,unsigned int box_ctl,int mem_offset,unsigned int device)5086 static int snr_uncore_mmio_map(struct intel_uncore_box *box,
5087 			       unsigned int box_ctl, int mem_offset,
5088 			       unsigned int device)
5089 {
5090 	struct pci_dev *pdev = snr_uncore_get_mc_dev(device, box->dieid);
5091 	struct intel_uncore_type *type = box->pmu->type;
5092 	resource_size_t addr;
5093 	u32 pci_dword;
5094 
5095 	if (!pdev)
5096 		return -ENODEV;
5097 
5098 	pci_read_config_dword(pdev, SNR_IMC_MMIO_BASE_OFFSET, &pci_dword);
5099 	addr = ((resource_size_t)pci_dword & SNR_IMC_MMIO_BASE_MASK) << 23;
5100 
5101 	pci_read_config_dword(pdev, mem_offset, &pci_dword);
5102 	addr |= (pci_dword & SNR_IMC_MMIO_MEM0_MASK) << 12;
5103 
5104 	addr += box_ctl;
5105 
5106 	pci_dev_put(pdev);
5107 
5108 	box->io_addr = ioremap(addr, type->mmio_map_size);
5109 	if (!box->io_addr) {
5110 		pr_warn("perf uncore: Failed to ioremap for %s.\n", type->name);
5111 		return -EINVAL;
5112 	}
5113 
5114 	return 0;
5115 }
5116 
__snr_uncore_mmio_init_box(struct intel_uncore_box * box,unsigned int box_ctl,int mem_offset,unsigned int device)5117 static void __snr_uncore_mmio_init_box(struct intel_uncore_box *box,
5118 				       unsigned int box_ctl, int mem_offset,
5119 				       unsigned int device)
5120 {
5121 	if (!snr_uncore_mmio_map(box, box_ctl, mem_offset, device))
5122 		writel(IVBEP_PMON_BOX_CTL_INT, box->io_addr);
5123 }
5124 
snr_uncore_mmio_init_box(struct intel_uncore_box * box)5125 static void snr_uncore_mmio_init_box(struct intel_uncore_box *box)
5126 {
5127 	__snr_uncore_mmio_init_box(box, uncore_mmio_box_ctl(box),
5128 				   SNR_IMC_MMIO_MEM0_OFFSET,
5129 				   SNR_MC_DEVICE_ID);
5130 }
5131 
snr_uncore_mmio_disable_box(struct intel_uncore_box * box)5132 static void snr_uncore_mmio_disable_box(struct intel_uncore_box *box)
5133 {
5134 	u32 config;
5135 
5136 	if (!box->io_addr)
5137 		return;
5138 
5139 	config = readl(box->io_addr);
5140 	config |= SNBEP_PMON_BOX_CTL_FRZ;
5141 	writel(config, box->io_addr);
5142 }
5143 
snr_uncore_mmio_enable_box(struct intel_uncore_box * box)5144 static void snr_uncore_mmio_enable_box(struct intel_uncore_box *box)
5145 {
5146 	u32 config;
5147 
5148 	if (!box->io_addr)
5149 		return;
5150 
5151 	config = readl(box->io_addr);
5152 	config &= ~SNBEP_PMON_BOX_CTL_FRZ;
5153 	writel(config, box->io_addr);
5154 }
5155 
snr_uncore_mmio_enable_event(struct intel_uncore_box * box,struct perf_event * event)5156 static void snr_uncore_mmio_enable_event(struct intel_uncore_box *box,
5157 					   struct perf_event *event)
5158 {
5159 	struct hw_perf_event *hwc = &event->hw;
5160 
5161 	if (!box->io_addr)
5162 		return;
5163 
5164 	if (!uncore_mmio_is_valid_offset(box, hwc->config_base))
5165 		return;
5166 
5167 	writel(hwc->config | SNBEP_PMON_CTL_EN,
5168 	       box->io_addr + hwc->config_base);
5169 }
5170 
snr_uncore_mmio_disable_event(struct intel_uncore_box * box,struct perf_event * event)5171 static void snr_uncore_mmio_disable_event(struct intel_uncore_box *box,
5172 					    struct perf_event *event)
5173 {
5174 	struct hw_perf_event *hwc = &event->hw;
5175 
5176 	if (!box->io_addr)
5177 		return;
5178 
5179 	if (!uncore_mmio_is_valid_offset(box, hwc->config_base))
5180 		return;
5181 
5182 	writel(hwc->config, box->io_addr + hwc->config_base);
5183 }
5184 
5185 static struct intel_uncore_ops snr_uncore_mmio_ops = {
5186 	.init_box	= snr_uncore_mmio_init_box,
5187 	.exit_box	= uncore_mmio_exit_box,
5188 	.disable_box	= snr_uncore_mmio_disable_box,
5189 	.enable_box	= snr_uncore_mmio_enable_box,
5190 	.disable_event	= snr_uncore_mmio_disable_event,
5191 	.enable_event	= snr_uncore_mmio_enable_event,
5192 	.read_counter	= uncore_mmio_read_counter,
5193 };
5194 
5195 static struct uncore_event_desc snr_uncore_imc_events[] = {
5196 	INTEL_UNCORE_EVENT_DESC(clockticks,      "event=0x00,umask=0x00"),
5197 	INTEL_UNCORE_EVENT_DESC(cas_count_read,  "event=0x04,umask=0x0f"),
5198 	INTEL_UNCORE_EVENT_DESC(cas_count_read.scale, "6.103515625e-5"),
5199 	INTEL_UNCORE_EVENT_DESC(cas_count_read.unit, "MiB"),
5200 	INTEL_UNCORE_EVENT_DESC(cas_count_write, "event=0x04,umask=0x30"),
5201 	INTEL_UNCORE_EVENT_DESC(cas_count_write.scale, "6.103515625e-5"),
5202 	INTEL_UNCORE_EVENT_DESC(cas_count_write.unit, "MiB"),
5203 	{ /* end: all zeroes */ },
5204 };
5205 
5206 static struct intel_uncore_type snr_uncore_imc = {
5207 	.name		= "imc",
5208 	.num_counters   = 4,
5209 	.num_boxes	= 2,
5210 	.perf_ctr_bits	= 48,
5211 	.fixed_ctr_bits	= 48,
5212 	.fixed_ctr	= SNR_IMC_MMIO_PMON_FIXED_CTR,
5213 	.fixed_ctl	= SNR_IMC_MMIO_PMON_FIXED_CTL,
5214 	.event_descs	= snr_uncore_imc_events,
5215 	.perf_ctr	= SNR_IMC_MMIO_PMON_CTR0,
5216 	.event_ctl	= SNR_IMC_MMIO_PMON_CTL0,
5217 	.event_mask	= SNBEP_PMON_RAW_EVENT_MASK,
5218 	.box_ctl	= SNR_IMC_MMIO_PMON_BOX_CTL,
5219 	.mmio_offset	= SNR_IMC_MMIO_OFFSET,
5220 	.mmio_map_size	= SNR_IMC_MMIO_SIZE,
5221 	.ops		= &snr_uncore_mmio_ops,
5222 	.format_group	= &skx_uncore_format_group,
5223 };
5224 
5225 enum perf_uncore_snr_imc_freerunning_type_id {
5226 	SNR_IMC_DCLK,
5227 	SNR_IMC_DDR,
5228 
5229 	SNR_IMC_FREERUNNING_TYPE_MAX,
5230 };
5231 
5232 static struct freerunning_counters snr_imc_freerunning[] = {
5233 	[SNR_IMC_DCLK]	= { 0x22b0, 0x0, 0, 1, 48 },
5234 	[SNR_IMC_DDR]	= { 0x2290, 0x8, 0, 2, 48 },
5235 };
5236 
5237 static struct uncore_event_desc snr_uncore_imc_freerunning_events[] = {
5238 	INTEL_UNCORE_EVENT_DESC(dclk,		"event=0xff,umask=0x10"),
5239 
5240 	INTEL_UNCORE_EVENT_DESC(read,		"event=0xff,umask=0x20"),
5241 	INTEL_UNCORE_EVENT_DESC(read.scale,	"6.103515625e-5"),
5242 	INTEL_UNCORE_EVENT_DESC(read.unit,	"MiB"),
5243 	INTEL_UNCORE_EVENT_DESC(write,		"event=0xff,umask=0x21"),
5244 	INTEL_UNCORE_EVENT_DESC(write.scale,	"6.103515625e-5"),
5245 	INTEL_UNCORE_EVENT_DESC(write.unit,	"MiB"),
5246 	{ /* end: all zeroes */ },
5247 };
5248 
5249 static struct intel_uncore_ops snr_uncore_imc_freerunning_ops = {
5250 	.init_box	= snr_uncore_mmio_init_box,
5251 	.exit_box	= uncore_mmio_exit_box,
5252 	.read_counter	= uncore_mmio_read_counter,
5253 	.hw_config	= uncore_freerunning_hw_config,
5254 };
5255 
5256 static struct intel_uncore_type snr_uncore_imc_free_running = {
5257 	.name			= "imc_free_running",
5258 	.num_counters		= 3,
5259 	.num_boxes		= 1,
5260 	.num_freerunning_types	= SNR_IMC_FREERUNNING_TYPE_MAX,
5261 	.mmio_map_size		= SNR_IMC_MMIO_SIZE,
5262 	.freerunning		= snr_imc_freerunning,
5263 	.ops			= &snr_uncore_imc_freerunning_ops,
5264 	.event_descs		= snr_uncore_imc_freerunning_events,
5265 	.format_group		= &skx_uncore_iio_freerunning_format_group,
5266 };
5267 
5268 static struct intel_uncore_type *snr_mmio_uncores[] = {
5269 	&snr_uncore_imc,
5270 	&snr_uncore_imc_free_running,
5271 	NULL,
5272 };
5273 
snr_uncore_mmio_init(void)5274 void snr_uncore_mmio_init(void)
5275 {
5276 	uncore_mmio_uncores = snr_mmio_uncores;
5277 }
5278 
5279 /* end of SNR uncore support */
5280 
5281 /* ICX uncore support */
5282 
5283 static unsigned icx_cha_msr_offsets[] = {
5284 	0x2a0, 0x2ae, 0x2bc, 0x2ca, 0x2d8, 0x2e6, 0x2f4, 0x302, 0x310,
5285 	0x31e, 0x32c, 0x33a, 0x348, 0x356, 0x364, 0x372, 0x380, 0x38e,
5286 	0x3aa, 0x3b8, 0x3c6, 0x3d4, 0x3e2, 0x3f0, 0x3fe, 0x40c, 0x41a,
5287 	0x428, 0x436, 0x444, 0x452, 0x460, 0x46e, 0x47c, 0x0,   0xe,
5288 	0x1c,  0x2a,  0x38,  0x46,
5289 };
5290 
icx_cha_hw_config(struct intel_uncore_box * box,struct perf_event * event)5291 static int icx_cha_hw_config(struct intel_uncore_box *box, struct perf_event *event)
5292 {
5293 	struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
5294 	bool tie_en = !!(event->hw.config & SNBEP_CBO_PMON_CTL_TID_EN);
5295 
5296 	if (tie_en) {
5297 		reg1->reg = ICX_C34_MSR_PMON_BOX_FILTER0 +
5298 			    icx_cha_msr_offsets[box->pmu->pmu_idx];
5299 		reg1->config = event->attr.config1 & SKX_CHA_MSR_PMON_BOX_FILTER_TID;
5300 		reg1->idx = 0;
5301 	}
5302 
5303 	return 0;
5304 }
5305 
5306 static struct intel_uncore_ops icx_uncore_chabox_ops = {
5307 	.init_box		= ivbep_uncore_msr_init_box,
5308 	.disable_box		= snbep_uncore_msr_disable_box,
5309 	.enable_box		= snbep_uncore_msr_enable_box,
5310 	.disable_event		= snbep_uncore_msr_disable_event,
5311 	.enable_event		= snr_cha_enable_event,
5312 	.read_counter		= uncore_msr_read_counter,
5313 	.hw_config		= icx_cha_hw_config,
5314 };
5315 
5316 static struct intel_uncore_type icx_uncore_chabox = {
5317 	.name			= "cha",
5318 	.num_counters		= 4,
5319 	.perf_ctr_bits		= 48,
5320 	.event_ctl		= ICX_C34_MSR_PMON_CTL0,
5321 	.perf_ctr		= ICX_C34_MSR_PMON_CTR0,
5322 	.box_ctl		= ICX_C34_MSR_PMON_BOX_CTL,
5323 	.msr_offsets		= icx_cha_msr_offsets,
5324 	.event_mask		= HSWEP_S_MSR_PMON_RAW_EVENT_MASK,
5325 	.event_mask_ext		= SNR_CHA_RAW_EVENT_MASK_EXT,
5326 	.constraints		= skx_uncore_chabox_constraints,
5327 	.ops			= &icx_uncore_chabox_ops,
5328 	.format_group		= &snr_uncore_chabox_format_group,
5329 };
5330 
5331 static unsigned icx_msr_offsets[] = {
5332 	0x0, 0x20, 0x40, 0x90, 0xb0, 0xd0,
5333 };
5334 
5335 static struct event_constraint icx_uncore_iio_constraints[] = {
5336 	UNCORE_EVENT_CONSTRAINT(0x02, 0x3),
5337 	UNCORE_EVENT_CONSTRAINT(0x03, 0x3),
5338 	UNCORE_EVENT_CONSTRAINT(0x83, 0x3),
5339 	UNCORE_EVENT_CONSTRAINT(0x88, 0xc),
5340 	UNCORE_EVENT_CONSTRAINT(0xc0, 0xc),
5341 	UNCORE_EVENT_CONSTRAINT(0xc5, 0xc),
5342 	UNCORE_EVENT_CONSTRAINT(0xd5, 0xc),
5343 	EVENT_CONSTRAINT_END
5344 };
5345 
5346 static umode_t
icx_iio_mapping_visible(struct kobject * kobj,struct attribute * attr,int die)5347 icx_iio_mapping_visible(struct kobject *kobj, struct attribute *attr, int die)
5348 {
5349 	/* Root bus 0x00 is valid only for pmu_idx = 5. */
5350 	return pmu_iio_mapping_visible(kobj, attr, die, 5);
5351 }
5352 
5353 static struct attribute_group icx_iio_mapping_group = {
5354 	.is_visible	= icx_iio_mapping_visible,
5355 };
5356 
5357 static const struct attribute_group *icx_iio_attr_update[] = {
5358 	&icx_iio_mapping_group,
5359 	NULL,
5360 };
5361 
5362 /*
5363  * ICX has a static mapping of stack IDs from SAD_CONTROL_CFG notation to PMON
5364  */
5365 enum {
5366 	ICX_PCIE1_PMON_ID,
5367 	ICX_PCIE2_PMON_ID,
5368 	ICX_PCIE3_PMON_ID,
5369 	ICX_PCIE4_PMON_ID,
5370 	ICX_PCIE5_PMON_ID,
5371 	ICX_CBDMA_DMI_PMON_ID
5372 };
5373 
5374 static u8 icx_sad_pmon_mapping[] = {
5375 	ICX_CBDMA_DMI_PMON_ID,
5376 	ICX_PCIE1_PMON_ID,
5377 	ICX_PCIE2_PMON_ID,
5378 	ICX_PCIE3_PMON_ID,
5379 	ICX_PCIE4_PMON_ID,
5380 	ICX_PCIE5_PMON_ID,
5381 };
5382 
icx_iio_get_topology(struct intel_uncore_type * type)5383 static int icx_iio_get_topology(struct intel_uncore_type *type)
5384 {
5385 	return sad_cfg_iio_topology(type, icx_sad_pmon_mapping);
5386 }
5387 
icx_iio_set_mapping(struct intel_uncore_type * type)5388 static void icx_iio_set_mapping(struct intel_uncore_type *type)
5389 {
5390 	/* Detect ICX-D system. This case is not supported */
5391 	if (boot_cpu_data.x86_model == INTEL_FAM6_ICELAKE_D) {
5392 		pmu_clear_mapping_attr(type->attr_update, &icx_iio_mapping_group);
5393 		return;
5394 	}
5395 	pmu_iio_set_mapping(type, &icx_iio_mapping_group);
5396 }
5397 
icx_iio_cleanup_mapping(struct intel_uncore_type * type)5398 static void icx_iio_cleanup_mapping(struct intel_uncore_type *type)
5399 {
5400 	pmu_cleanup_mapping(type, &icx_iio_mapping_group);
5401 }
5402 
5403 static struct intel_uncore_type icx_uncore_iio = {
5404 	.name			= "iio",
5405 	.num_counters		= 4,
5406 	.num_boxes		= 6,
5407 	.perf_ctr_bits		= 48,
5408 	.event_ctl		= ICX_IIO_MSR_PMON_CTL0,
5409 	.perf_ctr		= ICX_IIO_MSR_PMON_CTR0,
5410 	.event_mask		= SNBEP_PMON_RAW_EVENT_MASK,
5411 	.event_mask_ext		= SNR_IIO_PMON_RAW_EVENT_MASK_EXT,
5412 	.box_ctl		= ICX_IIO_MSR_PMON_BOX_CTL,
5413 	.msr_offsets		= icx_msr_offsets,
5414 	.constraints		= icx_uncore_iio_constraints,
5415 	.ops			= &skx_uncore_iio_ops,
5416 	.format_group		= &snr_uncore_iio_format_group,
5417 	.attr_update		= icx_iio_attr_update,
5418 	.get_topology		= icx_iio_get_topology,
5419 	.set_mapping		= icx_iio_set_mapping,
5420 	.cleanup_mapping	= icx_iio_cleanup_mapping,
5421 };
5422 
5423 static struct intel_uncore_type icx_uncore_irp = {
5424 	.name			= "irp",
5425 	.num_counters		= 2,
5426 	.num_boxes		= 6,
5427 	.perf_ctr_bits		= 48,
5428 	.event_ctl		= ICX_IRP0_MSR_PMON_CTL0,
5429 	.perf_ctr		= ICX_IRP0_MSR_PMON_CTR0,
5430 	.event_mask		= SNBEP_PMON_RAW_EVENT_MASK,
5431 	.box_ctl		= ICX_IRP0_MSR_PMON_BOX_CTL,
5432 	.msr_offsets		= icx_msr_offsets,
5433 	.ops			= &ivbep_uncore_msr_ops,
5434 	.format_group		= &ivbep_uncore_format_group,
5435 };
5436 
5437 static struct event_constraint icx_uncore_m2pcie_constraints[] = {
5438 	UNCORE_EVENT_CONSTRAINT(0x14, 0x3),
5439 	UNCORE_EVENT_CONSTRAINT(0x23, 0x3),
5440 	UNCORE_EVENT_CONSTRAINT(0x2d, 0x3),
5441 	EVENT_CONSTRAINT_END
5442 };
5443 
5444 static struct intel_uncore_type icx_uncore_m2pcie = {
5445 	.name		= "m2pcie",
5446 	.num_counters	= 4,
5447 	.num_boxes	= 6,
5448 	.perf_ctr_bits	= 48,
5449 	.event_ctl	= ICX_M2PCIE_MSR_PMON_CTL0,
5450 	.perf_ctr	= ICX_M2PCIE_MSR_PMON_CTR0,
5451 	.box_ctl	= ICX_M2PCIE_MSR_PMON_BOX_CTL,
5452 	.msr_offsets	= icx_msr_offsets,
5453 	.constraints	= icx_uncore_m2pcie_constraints,
5454 	.event_mask	= SNBEP_PMON_RAW_EVENT_MASK,
5455 	.ops		= &ivbep_uncore_msr_ops,
5456 	.format_group	= &ivbep_uncore_format_group,
5457 };
5458 
5459 enum perf_uncore_icx_iio_freerunning_type_id {
5460 	ICX_IIO_MSR_IOCLK,
5461 	ICX_IIO_MSR_BW_IN,
5462 
5463 	ICX_IIO_FREERUNNING_TYPE_MAX,
5464 };
5465 
5466 static unsigned icx_iio_clk_freerunning_box_offsets[] = {
5467 	0x0, 0x20, 0x40, 0x90, 0xb0, 0xd0,
5468 };
5469 
5470 static unsigned icx_iio_bw_freerunning_box_offsets[] = {
5471 	0x0, 0x10, 0x20, 0x90, 0xa0, 0xb0,
5472 };
5473 
5474 static struct freerunning_counters icx_iio_freerunning[] = {
5475 	[ICX_IIO_MSR_IOCLK]	= { 0xa55, 0x1, 0x20, 1, 48, icx_iio_clk_freerunning_box_offsets },
5476 	[ICX_IIO_MSR_BW_IN]	= { 0xaa0, 0x1, 0x10, 8, 48, icx_iio_bw_freerunning_box_offsets },
5477 };
5478 
5479 static struct uncore_event_desc icx_uncore_iio_freerunning_events[] = {
5480 	/* Free-Running IIO CLOCKS Counter */
5481 	INTEL_UNCORE_EVENT_DESC(ioclk,			"event=0xff,umask=0x10"),
5482 	/* Free-Running IIO BANDWIDTH IN Counters */
5483 	INTEL_UNCORE_EVENT_DESC(bw_in_port0,		"event=0xff,umask=0x20"),
5484 	INTEL_UNCORE_EVENT_DESC(bw_in_port0.scale,	"3.814697266e-6"),
5485 	INTEL_UNCORE_EVENT_DESC(bw_in_port0.unit,	"MiB"),
5486 	INTEL_UNCORE_EVENT_DESC(bw_in_port1,		"event=0xff,umask=0x21"),
5487 	INTEL_UNCORE_EVENT_DESC(bw_in_port1.scale,	"3.814697266e-6"),
5488 	INTEL_UNCORE_EVENT_DESC(bw_in_port1.unit,	"MiB"),
5489 	INTEL_UNCORE_EVENT_DESC(bw_in_port2,		"event=0xff,umask=0x22"),
5490 	INTEL_UNCORE_EVENT_DESC(bw_in_port2.scale,	"3.814697266e-6"),
5491 	INTEL_UNCORE_EVENT_DESC(bw_in_port2.unit,	"MiB"),
5492 	INTEL_UNCORE_EVENT_DESC(bw_in_port3,		"event=0xff,umask=0x23"),
5493 	INTEL_UNCORE_EVENT_DESC(bw_in_port3.scale,	"3.814697266e-6"),
5494 	INTEL_UNCORE_EVENT_DESC(bw_in_port3.unit,	"MiB"),
5495 	INTEL_UNCORE_EVENT_DESC(bw_in_port4,		"event=0xff,umask=0x24"),
5496 	INTEL_UNCORE_EVENT_DESC(bw_in_port4.scale,	"3.814697266e-6"),
5497 	INTEL_UNCORE_EVENT_DESC(bw_in_port4.unit,	"MiB"),
5498 	INTEL_UNCORE_EVENT_DESC(bw_in_port5,		"event=0xff,umask=0x25"),
5499 	INTEL_UNCORE_EVENT_DESC(bw_in_port5.scale,	"3.814697266e-6"),
5500 	INTEL_UNCORE_EVENT_DESC(bw_in_port5.unit,	"MiB"),
5501 	INTEL_UNCORE_EVENT_DESC(bw_in_port6,		"event=0xff,umask=0x26"),
5502 	INTEL_UNCORE_EVENT_DESC(bw_in_port6.scale,	"3.814697266e-6"),
5503 	INTEL_UNCORE_EVENT_DESC(bw_in_port6.unit,	"MiB"),
5504 	INTEL_UNCORE_EVENT_DESC(bw_in_port7,		"event=0xff,umask=0x27"),
5505 	INTEL_UNCORE_EVENT_DESC(bw_in_port7.scale,	"3.814697266e-6"),
5506 	INTEL_UNCORE_EVENT_DESC(bw_in_port7.unit,	"MiB"),
5507 	{ /* end: all zeroes */ },
5508 };
5509 
5510 static struct intel_uncore_type icx_uncore_iio_free_running = {
5511 	.name			= "iio_free_running",
5512 	.num_counters		= 9,
5513 	.num_boxes		= 6,
5514 	.num_freerunning_types	= ICX_IIO_FREERUNNING_TYPE_MAX,
5515 	.freerunning		= icx_iio_freerunning,
5516 	.ops			= &skx_uncore_iio_freerunning_ops,
5517 	.event_descs		= icx_uncore_iio_freerunning_events,
5518 	.format_group		= &skx_uncore_iio_freerunning_format_group,
5519 };
5520 
5521 static struct intel_uncore_type *icx_msr_uncores[] = {
5522 	&skx_uncore_ubox,
5523 	&icx_uncore_chabox,
5524 	&icx_uncore_iio,
5525 	&icx_uncore_irp,
5526 	&icx_uncore_m2pcie,
5527 	&skx_uncore_pcu,
5528 	&icx_uncore_iio_free_running,
5529 	NULL,
5530 };
5531 
5532 /*
5533  * To determine the number of CHAs, it should read CAPID6(Low) and CAPID7 (High)
5534  * registers which located at Device 30, Function 3
5535  */
5536 #define ICX_CAPID6		0x9c
5537 #define ICX_CAPID7		0xa0
5538 
icx_count_chabox(void)5539 static u64 icx_count_chabox(void)
5540 {
5541 	struct pci_dev *dev = NULL;
5542 	u64 caps = 0;
5543 
5544 	dev = pci_get_device(PCI_VENDOR_ID_INTEL, 0x345b, dev);
5545 	if (!dev)
5546 		goto out;
5547 
5548 	pci_read_config_dword(dev, ICX_CAPID6, (u32 *)&caps);
5549 	pci_read_config_dword(dev, ICX_CAPID7, (u32 *)&caps + 1);
5550 out:
5551 	pci_dev_put(dev);
5552 	return hweight64(caps);
5553 }
5554 
icx_uncore_cpu_init(void)5555 void icx_uncore_cpu_init(void)
5556 {
5557 	u64 num_boxes = icx_count_chabox();
5558 
5559 	if (WARN_ON(num_boxes > ARRAY_SIZE(icx_cha_msr_offsets)))
5560 		return;
5561 	icx_uncore_chabox.num_boxes = num_boxes;
5562 	uncore_msr_uncores = icx_msr_uncores;
5563 }
5564 
5565 static struct intel_uncore_type icx_uncore_m2m = {
5566 	.name		= "m2m",
5567 	.num_counters   = 4,
5568 	.num_boxes	= 4,
5569 	.perf_ctr_bits	= 48,
5570 	.perf_ctr	= SNR_M2M_PCI_PMON_CTR0,
5571 	.event_ctl	= SNR_M2M_PCI_PMON_CTL0,
5572 	.event_mask	= SNBEP_PMON_RAW_EVENT_MASK,
5573 	.event_mask_ext	= SNR_M2M_PCI_PMON_UMASK_EXT,
5574 	.box_ctl	= SNR_M2M_PCI_PMON_BOX_CTL,
5575 	.ops		= &snr_m2m_uncore_pci_ops,
5576 	.format_group	= &snr_m2m_uncore_format_group,
5577 };
5578 
5579 static struct attribute *icx_upi_uncore_formats_attr[] = {
5580 	&format_attr_event.attr,
5581 	&format_attr_umask_ext4.attr,
5582 	&format_attr_edge.attr,
5583 	&format_attr_inv.attr,
5584 	&format_attr_thresh8.attr,
5585 	NULL,
5586 };
5587 
5588 static const struct attribute_group icx_upi_uncore_format_group = {
5589 	.name = "format",
5590 	.attrs = icx_upi_uncore_formats_attr,
5591 };
5592 
5593 #define ICX_UPI_REGS_ADDR_DEVICE_LINK0	0x02
5594 #define ICX_UPI_REGS_ADDR_FUNCTION	0x01
5595 
discover_upi_topology(struct intel_uncore_type * type,int ubox_did,int dev_link0)5596 static int discover_upi_topology(struct intel_uncore_type *type, int ubox_did, int dev_link0)
5597 {
5598 	struct pci_dev *ubox = NULL;
5599 	struct pci_dev *dev = NULL;
5600 	u32 nid, gid;
5601 	int i, idx, lgc_pkg, ret = -EPERM;
5602 	struct intel_uncore_topology *upi;
5603 	unsigned int devfn;
5604 
5605 	/* GIDNIDMAP method supports machines which have less than 8 sockets. */
5606 	if (uncore_max_dies() > 8)
5607 		goto err;
5608 
5609 	while ((ubox = pci_get_device(PCI_VENDOR_ID_INTEL, ubox_did, ubox))) {
5610 		ret = upi_nodeid_groupid(ubox, SKX_CPUNODEID, SKX_GIDNIDMAP, &nid, &gid);
5611 		if (ret) {
5612 			ret = pcibios_err_to_errno(ret);
5613 			break;
5614 		}
5615 
5616 		for (i = 0; i < 8; i++) {
5617 			if (nid != GIDNIDMAP(gid, i))
5618 				continue;
5619 			lgc_pkg = topology_phys_to_logical_pkg(i);
5620 			if (lgc_pkg < 0) {
5621 				ret = -EPERM;
5622 				goto err;
5623 			}
5624 			for (idx = 0; idx < type->num_boxes; idx++) {
5625 				upi = &type->topology[lgc_pkg][idx];
5626 				devfn = PCI_DEVFN(dev_link0 + idx, ICX_UPI_REGS_ADDR_FUNCTION);
5627 				dev = pci_get_domain_bus_and_slot(pci_domain_nr(ubox->bus),
5628 								  ubox->bus->number,
5629 								  devfn);
5630 				if (dev) {
5631 					ret = upi_fill_topology(dev, upi, idx);
5632 					if (ret)
5633 						goto err;
5634 				}
5635 			}
5636 			break;
5637 		}
5638 	}
5639 err:
5640 	pci_dev_put(ubox);
5641 	pci_dev_put(dev);
5642 	return ret;
5643 }
5644 
icx_upi_get_topology(struct intel_uncore_type * type)5645 static int icx_upi_get_topology(struct intel_uncore_type *type)
5646 {
5647 	return discover_upi_topology(type, ICX_UBOX_DID, ICX_UPI_REGS_ADDR_DEVICE_LINK0);
5648 }
5649 
5650 static struct attribute_group icx_upi_mapping_group = {
5651 	.is_visible	= skx_upi_mapping_visible,
5652 };
5653 
5654 static const struct attribute_group *icx_upi_attr_update[] = {
5655 	&icx_upi_mapping_group,
5656 	NULL
5657 };
5658 
icx_upi_set_mapping(struct intel_uncore_type * type)5659 static void icx_upi_set_mapping(struct intel_uncore_type *type)
5660 {
5661 	pmu_upi_set_mapping(type, &icx_upi_mapping_group);
5662 }
5663 
icx_upi_cleanup_mapping(struct intel_uncore_type * type)5664 static void icx_upi_cleanup_mapping(struct intel_uncore_type *type)
5665 {
5666 	pmu_cleanup_mapping(type, &icx_upi_mapping_group);
5667 }
5668 
5669 static struct intel_uncore_type icx_uncore_upi = {
5670 	.name		= "upi",
5671 	.num_counters   = 4,
5672 	.num_boxes	= 3,
5673 	.perf_ctr_bits	= 48,
5674 	.perf_ctr	= ICX_UPI_PCI_PMON_CTR0,
5675 	.event_ctl	= ICX_UPI_PCI_PMON_CTL0,
5676 	.event_mask	= SNBEP_PMON_RAW_EVENT_MASK,
5677 	.event_mask_ext = ICX_UPI_CTL_UMASK_EXT,
5678 	.box_ctl	= ICX_UPI_PCI_PMON_BOX_CTL,
5679 	.ops		= &skx_upi_uncore_pci_ops,
5680 	.format_group	= &icx_upi_uncore_format_group,
5681 	.attr_update	= icx_upi_attr_update,
5682 	.get_topology	= icx_upi_get_topology,
5683 	.set_mapping	= icx_upi_set_mapping,
5684 	.cleanup_mapping = icx_upi_cleanup_mapping,
5685 };
5686 
5687 static struct event_constraint icx_uncore_m3upi_constraints[] = {
5688 	UNCORE_EVENT_CONSTRAINT(0x1c, 0x1),
5689 	UNCORE_EVENT_CONSTRAINT(0x1d, 0x1),
5690 	UNCORE_EVENT_CONSTRAINT(0x1e, 0x1),
5691 	UNCORE_EVENT_CONSTRAINT(0x1f, 0x1),
5692 	UNCORE_EVENT_CONSTRAINT(0x40, 0x7),
5693 	UNCORE_EVENT_CONSTRAINT(0x4e, 0x7),
5694 	UNCORE_EVENT_CONSTRAINT(0x4f, 0x7),
5695 	UNCORE_EVENT_CONSTRAINT(0x50, 0x7),
5696 	EVENT_CONSTRAINT_END
5697 };
5698 
5699 static struct intel_uncore_type icx_uncore_m3upi = {
5700 	.name		= "m3upi",
5701 	.num_counters   = 4,
5702 	.num_boxes	= 3,
5703 	.perf_ctr_bits	= 48,
5704 	.perf_ctr	= ICX_M3UPI_PCI_PMON_CTR0,
5705 	.event_ctl	= ICX_M3UPI_PCI_PMON_CTL0,
5706 	.event_mask	= SNBEP_PMON_RAW_EVENT_MASK,
5707 	.box_ctl	= ICX_M3UPI_PCI_PMON_BOX_CTL,
5708 	.constraints	= icx_uncore_m3upi_constraints,
5709 	.ops		= &ivbep_uncore_pci_ops,
5710 	.format_group	= &skx_uncore_format_group,
5711 };
5712 
5713 enum {
5714 	ICX_PCI_UNCORE_M2M,
5715 	ICX_PCI_UNCORE_UPI,
5716 	ICX_PCI_UNCORE_M3UPI,
5717 };
5718 
5719 static struct intel_uncore_type *icx_pci_uncores[] = {
5720 	[ICX_PCI_UNCORE_M2M]		= &icx_uncore_m2m,
5721 	[ICX_PCI_UNCORE_UPI]		= &icx_uncore_upi,
5722 	[ICX_PCI_UNCORE_M3UPI]		= &icx_uncore_m3upi,
5723 	NULL,
5724 };
5725 
5726 static const struct pci_device_id icx_uncore_pci_ids[] = {
5727 	{ /* M2M 0 */
5728 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x344a),
5729 		.driver_data = UNCORE_PCI_DEV_FULL_DATA(12, 0, ICX_PCI_UNCORE_M2M, 0),
5730 	},
5731 	{ /* M2M 1 */
5732 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x344a),
5733 		.driver_data = UNCORE_PCI_DEV_FULL_DATA(13, 0, ICX_PCI_UNCORE_M2M, 1),
5734 	},
5735 	{ /* M2M 2 */
5736 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x344a),
5737 		.driver_data = UNCORE_PCI_DEV_FULL_DATA(14, 0, ICX_PCI_UNCORE_M2M, 2),
5738 	},
5739 	{ /* M2M 3 */
5740 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x344a),
5741 		.driver_data = UNCORE_PCI_DEV_FULL_DATA(15, 0, ICX_PCI_UNCORE_M2M, 3),
5742 	},
5743 	{ /* UPI Link 0 */
5744 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x3441),
5745 		.driver_data = UNCORE_PCI_DEV_FULL_DATA(2, 1, ICX_PCI_UNCORE_UPI, 0),
5746 	},
5747 	{ /* UPI Link 1 */
5748 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x3441),
5749 		.driver_data = UNCORE_PCI_DEV_FULL_DATA(3, 1, ICX_PCI_UNCORE_UPI, 1),
5750 	},
5751 	{ /* UPI Link 2 */
5752 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x3441),
5753 		.driver_data = UNCORE_PCI_DEV_FULL_DATA(4, 1, ICX_PCI_UNCORE_UPI, 2),
5754 	},
5755 	{ /* M3UPI Link 0 */
5756 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x3446),
5757 		.driver_data = UNCORE_PCI_DEV_FULL_DATA(5, 1, ICX_PCI_UNCORE_M3UPI, 0),
5758 	},
5759 	{ /* M3UPI Link 1 */
5760 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x3446),
5761 		.driver_data = UNCORE_PCI_DEV_FULL_DATA(6, 1, ICX_PCI_UNCORE_M3UPI, 1),
5762 	},
5763 	{ /* M3UPI Link 2 */
5764 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x3446),
5765 		.driver_data = UNCORE_PCI_DEV_FULL_DATA(7, 1, ICX_PCI_UNCORE_M3UPI, 2),
5766 	},
5767 	{ /* end: all zeroes */ }
5768 };
5769 
5770 static struct pci_driver icx_uncore_pci_driver = {
5771 	.name		= "icx_uncore",
5772 	.id_table	= icx_uncore_pci_ids,
5773 };
5774 
icx_uncore_pci_init(void)5775 int icx_uncore_pci_init(void)
5776 {
5777 	/* ICX UBOX DID */
5778 	int ret = snbep_pci2phy_map_init(0x3450, SKX_CPUNODEID,
5779 					 SKX_GIDNIDMAP, true);
5780 
5781 	if (ret)
5782 		return ret;
5783 
5784 	uncore_pci_uncores = icx_pci_uncores;
5785 	uncore_pci_driver = &icx_uncore_pci_driver;
5786 	return 0;
5787 }
5788 
icx_uncore_imc_init_box(struct intel_uncore_box * box)5789 static void icx_uncore_imc_init_box(struct intel_uncore_box *box)
5790 {
5791 	unsigned int box_ctl = box->pmu->type->box_ctl +
5792 			       box->pmu->type->mmio_offset * (box->pmu->pmu_idx % ICX_NUMBER_IMC_CHN);
5793 	int mem_offset = (box->pmu->pmu_idx / ICX_NUMBER_IMC_CHN) * ICX_IMC_MEM_STRIDE +
5794 			 SNR_IMC_MMIO_MEM0_OFFSET;
5795 
5796 	__snr_uncore_mmio_init_box(box, box_ctl, mem_offset,
5797 				   SNR_MC_DEVICE_ID);
5798 }
5799 
5800 static struct intel_uncore_ops icx_uncore_mmio_ops = {
5801 	.init_box	= icx_uncore_imc_init_box,
5802 	.exit_box	= uncore_mmio_exit_box,
5803 	.disable_box	= snr_uncore_mmio_disable_box,
5804 	.enable_box	= snr_uncore_mmio_enable_box,
5805 	.disable_event	= snr_uncore_mmio_disable_event,
5806 	.enable_event	= snr_uncore_mmio_enable_event,
5807 	.read_counter	= uncore_mmio_read_counter,
5808 };
5809 
5810 static struct intel_uncore_type icx_uncore_imc = {
5811 	.name		= "imc",
5812 	.num_counters   = 4,
5813 	.num_boxes	= 12,
5814 	.perf_ctr_bits	= 48,
5815 	.fixed_ctr_bits	= 48,
5816 	.fixed_ctr	= SNR_IMC_MMIO_PMON_FIXED_CTR,
5817 	.fixed_ctl	= SNR_IMC_MMIO_PMON_FIXED_CTL,
5818 	.event_descs	= snr_uncore_imc_events,
5819 	.perf_ctr	= SNR_IMC_MMIO_PMON_CTR0,
5820 	.event_ctl	= SNR_IMC_MMIO_PMON_CTL0,
5821 	.event_mask	= SNBEP_PMON_RAW_EVENT_MASK,
5822 	.box_ctl	= SNR_IMC_MMIO_PMON_BOX_CTL,
5823 	.mmio_offset	= SNR_IMC_MMIO_OFFSET,
5824 	.mmio_map_size	= SNR_IMC_MMIO_SIZE,
5825 	.ops		= &icx_uncore_mmio_ops,
5826 	.format_group	= &skx_uncore_format_group,
5827 };
5828 
5829 enum perf_uncore_icx_imc_freerunning_type_id {
5830 	ICX_IMC_DCLK,
5831 	ICX_IMC_DDR,
5832 	ICX_IMC_DDRT,
5833 
5834 	ICX_IMC_FREERUNNING_TYPE_MAX,
5835 };
5836 
5837 static struct freerunning_counters icx_imc_freerunning[] = {
5838 	[ICX_IMC_DCLK]	= { 0x22b0, 0x0, 0, 1, 48 },
5839 	[ICX_IMC_DDR]	= { 0x2290, 0x8, 0, 2, 48 },
5840 	[ICX_IMC_DDRT]	= { 0x22a0, 0x8, 0, 2, 48 },
5841 };
5842 
5843 static struct uncore_event_desc icx_uncore_imc_freerunning_events[] = {
5844 	INTEL_UNCORE_EVENT_DESC(dclk,			"event=0xff,umask=0x10"),
5845 
5846 	INTEL_UNCORE_EVENT_DESC(read,			"event=0xff,umask=0x20"),
5847 	INTEL_UNCORE_EVENT_DESC(read.scale,		"6.103515625e-5"),
5848 	INTEL_UNCORE_EVENT_DESC(read.unit,		"MiB"),
5849 	INTEL_UNCORE_EVENT_DESC(write,			"event=0xff,umask=0x21"),
5850 	INTEL_UNCORE_EVENT_DESC(write.scale,		"6.103515625e-5"),
5851 	INTEL_UNCORE_EVENT_DESC(write.unit,		"MiB"),
5852 
5853 	INTEL_UNCORE_EVENT_DESC(ddrt_read,		"event=0xff,umask=0x30"),
5854 	INTEL_UNCORE_EVENT_DESC(ddrt_read.scale,	"6.103515625e-5"),
5855 	INTEL_UNCORE_EVENT_DESC(ddrt_read.unit,		"MiB"),
5856 	INTEL_UNCORE_EVENT_DESC(ddrt_write,		"event=0xff,umask=0x31"),
5857 	INTEL_UNCORE_EVENT_DESC(ddrt_write.scale,	"6.103515625e-5"),
5858 	INTEL_UNCORE_EVENT_DESC(ddrt_write.unit,	"MiB"),
5859 	{ /* end: all zeroes */ },
5860 };
5861 
icx_uncore_imc_freerunning_init_box(struct intel_uncore_box * box)5862 static void icx_uncore_imc_freerunning_init_box(struct intel_uncore_box *box)
5863 {
5864 	int mem_offset = box->pmu->pmu_idx * ICX_IMC_MEM_STRIDE +
5865 			 SNR_IMC_MMIO_MEM0_OFFSET;
5866 
5867 	snr_uncore_mmio_map(box, uncore_mmio_box_ctl(box),
5868 			    mem_offset, SNR_MC_DEVICE_ID);
5869 }
5870 
5871 static struct intel_uncore_ops icx_uncore_imc_freerunning_ops = {
5872 	.init_box	= icx_uncore_imc_freerunning_init_box,
5873 	.exit_box	= uncore_mmio_exit_box,
5874 	.read_counter	= uncore_mmio_read_counter,
5875 	.hw_config	= uncore_freerunning_hw_config,
5876 };
5877 
5878 static struct intel_uncore_type icx_uncore_imc_free_running = {
5879 	.name			= "imc_free_running",
5880 	.num_counters		= 5,
5881 	.num_boxes		= 4,
5882 	.num_freerunning_types	= ICX_IMC_FREERUNNING_TYPE_MAX,
5883 	.mmio_map_size		= SNR_IMC_MMIO_SIZE,
5884 	.freerunning		= icx_imc_freerunning,
5885 	.ops			= &icx_uncore_imc_freerunning_ops,
5886 	.event_descs		= icx_uncore_imc_freerunning_events,
5887 	.format_group		= &skx_uncore_iio_freerunning_format_group,
5888 };
5889 
5890 static struct intel_uncore_type *icx_mmio_uncores[] = {
5891 	&icx_uncore_imc,
5892 	&icx_uncore_imc_free_running,
5893 	NULL,
5894 };
5895 
icx_uncore_mmio_init(void)5896 void icx_uncore_mmio_init(void)
5897 {
5898 	uncore_mmio_uncores = icx_mmio_uncores;
5899 }
5900 
5901 /* end of ICX uncore support */
5902 
5903 /* SPR uncore support */
5904 
spr_uncore_msr_enable_event(struct intel_uncore_box * box,struct perf_event * event)5905 static void spr_uncore_msr_enable_event(struct intel_uncore_box *box,
5906 					struct perf_event *event)
5907 {
5908 	struct hw_perf_event *hwc = &event->hw;
5909 	struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
5910 
5911 	if (reg1->idx != EXTRA_REG_NONE)
5912 		wrmsrl(reg1->reg, reg1->config);
5913 
5914 	wrmsrl(hwc->config_base, hwc->config);
5915 }
5916 
spr_uncore_msr_disable_event(struct intel_uncore_box * box,struct perf_event * event)5917 static void spr_uncore_msr_disable_event(struct intel_uncore_box *box,
5918 					 struct perf_event *event)
5919 {
5920 	struct hw_perf_event *hwc = &event->hw;
5921 	struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
5922 
5923 	if (reg1->idx != EXTRA_REG_NONE)
5924 		wrmsrl(reg1->reg, 0);
5925 
5926 	wrmsrl(hwc->config_base, 0);
5927 }
5928 
spr_cha_hw_config(struct intel_uncore_box * box,struct perf_event * event)5929 static int spr_cha_hw_config(struct intel_uncore_box *box, struct perf_event *event)
5930 {
5931 	struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
5932 	bool tie_en = !!(event->hw.config & SPR_CHA_PMON_CTL_TID_EN);
5933 	struct intel_uncore_type *type = box->pmu->type;
5934 
5935 	if (tie_en) {
5936 		reg1->reg = SPR_C0_MSR_PMON_BOX_FILTER0 +
5937 			    HSWEP_CBO_MSR_OFFSET * type->box_ids[box->pmu->pmu_idx];
5938 		reg1->config = event->attr.config1 & SPR_CHA_PMON_BOX_FILTER_TID;
5939 		reg1->idx = 0;
5940 	}
5941 
5942 	return 0;
5943 }
5944 
5945 static struct intel_uncore_ops spr_uncore_chabox_ops = {
5946 	.init_box		= intel_generic_uncore_msr_init_box,
5947 	.disable_box		= intel_generic_uncore_msr_disable_box,
5948 	.enable_box		= intel_generic_uncore_msr_enable_box,
5949 	.disable_event		= spr_uncore_msr_disable_event,
5950 	.enable_event		= spr_uncore_msr_enable_event,
5951 	.read_counter		= uncore_msr_read_counter,
5952 	.hw_config		= spr_cha_hw_config,
5953 	.get_constraint		= uncore_get_constraint,
5954 	.put_constraint		= uncore_put_constraint,
5955 };
5956 
5957 static struct attribute *spr_uncore_cha_formats_attr[] = {
5958 	&format_attr_event.attr,
5959 	&format_attr_umask_ext5.attr,
5960 	&format_attr_tid_en2.attr,
5961 	&format_attr_edge.attr,
5962 	&format_attr_inv.attr,
5963 	&format_attr_thresh8.attr,
5964 	&format_attr_filter_tid5.attr,
5965 	NULL,
5966 };
5967 static const struct attribute_group spr_uncore_chabox_format_group = {
5968 	.name = "format",
5969 	.attrs = spr_uncore_cha_formats_attr,
5970 };
5971 
alias_show(struct device * dev,struct device_attribute * attr,char * buf)5972 static ssize_t alias_show(struct device *dev,
5973 			  struct device_attribute *attr,
5974 			  char *buf)
5975 {
5976 	struct intel_uncore_pmu *pmu = dev_to_uncore_pmu(dev);
5977 	char pmu_name[UNCORE_PMU_NAME_LEN];
5978 
5979 	uncore_get_alias_name(pmu_name, pmu);
5980 	return sysfs_emit(buf, "%s\n", pmu_name);
5981 }
5982 
5983 static DEVICE_ATTR_RO(alias);
5984 
5985 static struct attribute *uncore_alias_attrs[] = {
5986 	&dev_attr_alias.attr,
5987 	NULL
5988 };
5989 
5990 ATTRIBUTE_GROUPS(uncore_alias);
5991 
5992 static struct intel_uncore_type spr_uncore_chabox = {
5993 	.name			= "cha",
5994 	.event_mask		= SPR_CHA_PMON_EVENT_MASK,
5995 	.event_mask_ext		= SPR_CHA_EVENT_MASK_EXT,
5996 	.num_shared_regs	= 1,
5997 	.constraints		= skx_uncore_chabox_constraints,
5998 	.ops			= &spr_uncore_chabox_ops,
5999 	.format_group		= &spr_uncore_chabox_format_group,
6000 	.attr_update		= uncore_alias_groups,
6001 };
6002 
6003 static struct intel_uncore_type spr_uncore_iio = {
6004 	.name			= "iio",
6005 	.event_mask		= SNBEP_PMON_RAW_EVENT_MASK,
6006 	.event_mask_ext		= SNR_IIO_PMON_RAW_EVENT_MASK_EXT,
6007 	.format_group		= &snr_uncore_iio_format_group,
6008 	.attr_update		= uncore_alias_groups,
6009 	.constraints		= icx_uncore_iio_constraints,
6010 };
6011 
6012 static struct attribute *spr_uncore_raw_formats_attr[] = {
6013 	&format_attr_event.attr,
6014 	&format_attr_umask_ext4.attr,
6015 	&format_attr_edge.attr,
6016 	&format_attr_inv.attr,
6017 	&format_attr_thresh8.attr,
6018 	NULL,
6019 };
6020 
6021 static const struct attribute_group spr_uncore_raw_format_group = {
6022 	.name			= "format",
6023 	.attrs			= spr_uncore_raw_formats_attr,
6024 };
6025 
6026 #define SPR_UNCORE_COMMON_FORMAT()				\
6027 	.event_mask		= SNBEP_PMON_RAW_EVENT_MASK,	\
6028 	.event_mask_ext		= SPR_RAW_EVENT_MASK_EXT,	\
6029 	.format_group		= &spr_uncore_raw_format_group,	\
6030 	.attr_update		= uncore_alias_groups
6031 
6032 static struct intel_uncore_type spr_uncore_irp = {
6033 	SPR_UNCORE_COMMON_FORMAT(),
6034 	.name			= "irp",
6035 
6036 };
6037 
6038 static struct event_constraint spr_uncore_m2pcie_constraints[] = {
6039 	UNCORE_EVENT_CONSTRAINT(0x14, 0x3),
6040 	UNCORE_EVENT_CONSTRAINT(0x2d, 0x3),
6041 	EVENT_CONSTRAINT_END
6042 };
6043 
6044 static struct intel_uncore_type spr_uncore_m2pcie = {
6045 	SPR_UNCORE_COMMON_FORMAT(),
6046 	.name			= "m2pcie",
6047 	.constraints		= spr_uncore_m2pcie_constraints,
6048 };
6049 
6050 static struct intel_uncore_type spr_uncore_pcu = {
6051 	.name			= "pcu",
6052 	.attr_update		= uncore_alias_groups,
6053 };
6054 
spr_uncore_mmio_enable_event(struct intel_uncore_box * box,struct perf_event * event)6055 static void spr_uncore_mmio_enable_event(struct intel_uncore_box *box,
6056 					 struct perf_event *event)
6057 {
6058 	struct hw_perf_event *hwc = &event->hw;
6059 
6060 	if (!box->io_addr)
6061 		return;
6062 
6063 	if (uncore_pmc_fixed(hwc->idx))
6064 		writel(SNBEP_PMON_CTL_EN, box->io_addr + hwc->config_base);
6065 	else
6066 		writel(hwc->config, box->io_addr + hwc->config_base);
6067 }
6068 
6069 static struct intel_uncore_ops spr_uncore_mmio_ops = {
6070 	.init_box		= intel_generic_uncore_mmio_init_box,
6071 	.exit_box		= uncore_mmio_exit_box,
6072 	.disable_box		= intel_generic_uncore_mmio_disable_box,
6073 	.enable_box		= intel_generic_uncore_mmio_enable_box,
6074 	.disable_event		= intel_generic_uncore_mmio_disable_event,
6075 	.enable_event		= spr_uncore_mmio_enable_event,
6076 	.read_counter		= uncore_mmio_read_counter,
6077 };
6078 
6079 static struct uncore_event_desc spr_uncore_imc_events[] = {
6080 	INTEL_UNCORE_EVENT_DESC(clockticks,      "event=0x01,umask=0x00"),
6081 	INTEL_UNCORE_EVENT_DESC(cas_count_read,  "event=0x05,umask=0xcf"),
6082 	INTEL_UNCORE_EVENT_DESC(cas_count_read.scale, "6.103515625e-5"),
6083 	INTEL_UNCORE_EVENT_DESC(cas_count_read.unit, "MiB"),
6084 	INTEL_UNCORE_EVENT_DESC(cas_count_write, "event=0x05,umask=0xf0"),
6085 	INTEL_UNCORE_EVENT_DESC(cas_count_write.scale, "6.103515625e-5"),
6086 	INTEL_UNCORE_EVENT_DESC(cas_count_write.unit, "MiB"),
6087 	{ /* end: all zeroes */ },
6088 };
6089 
6090 static struct intel_uncore_type spr_uncore_imc = {
6091 	SPR_UNCORE_COMMON_FORMAT(),
6092 	.name			= "imc",
6093 	.fixed_ctr_bits		= 48,
6094 	.fixed_ctr		= SNR_IMC_MMIO_PMON_FIXED_CTR,
6095 	.fixed_ctl		= SNR_IMC_MMIO_PMON_FIXED_CTL,
6096 	.ops			= &spr_uncore_mmio_ops,
6097 	.event_descs		= spr_uncore_imc_events,
6098 };
6099 
spr_uncore_pci_enable_event(struct intel_uncore_box * box,struct perf_event * event)6100 static void spr_uncore_pci_enable_event(struct intel_uncore_box *box,
6101 					struct perf_event *event)
6102 {
6103 	struct pci_dev *pdev = box->pci_dev;
6104 	struct hw_perf_event *hwc = &event->hw;
6105 
6106 	pci_write_config_dword(pdev, hwc->config_base + 4, (u32)(hwc->config >> 32));
6107 	pci_write_config_dword(pdev, hwc->config_base, (u32)hwc->config);
6108 }
6109 
6110 static struct intel_uncore_ops spr_uncore_pci_ops = {
6111 	.init_box		= intel_generic_uncore_pci_init_box,
6112 	.disable_box		= intel_generic_uncore_pci_disable_box,
6113 	.enable_box		= intel_generic_uncore_pci_enable_box,
6114 	.disable_event		= intel_generic_uncore_pci_disable_event,
6115 	.enable_event		= spr_uncore_pci_enable_event,
6116 	.read_counter		= intel_generic_uncore_pci_read_counter,
6117 };
6118 
6119 #define SPR_UNCORE_PCI_COMMON_FORMAT()			\
6120 	SPR_UNCORE_COMMON_FORMAT(),			\
6121 	.ops			= &spr_uncore_pci_ops
6122 
6123 static struct intel_uncore_type spr_uncore_m2m = {
6124 	SPR_UNCORE_PCI_COMMON_FORMAT(),
6125 	.name			= "m2m",
6126 };
6127 
6128 static struct attribute_group spr_upi_mapping_group = {
6129 	.is_visible	= skx_upi_mapping_visible,
6130 };
6131 
6132 static const struct attribute_group *spr_upi_attr_update[] = {
6133 	&uncore_alias_group,
6134 	&spr_upi_mapping_group,
6135 	NULL
6136 };
6137 
6138 #define SPR_UPI_REGS_ADDR_DEVICE_LINK0	0x01
6139 
spr_upi_set_mapping(struct intel_uncore_type * type)6140 static void spr_upi_set_mapping(struct intel_uncore_type *type)
6141 {
6142 	pmu_upi_set_mapping(type, &spr_upi_mapping_group);
6143 }
6144 
spr_upi_cleanup_mapping(struct intel_uncore_type * type)6145 static void spr_upi_cleanup_mapping(struct intel_uncore_type *type)
6146 {
6147 	pmu_cleanup_mapping(type, &spr_upi_mapping_group);
6148 }
6149 
spr_upi_get_topology(struct intel_uncore_type * type)6150 static int spr_upi_get_topology(struct intel_uncore_type *type)
6151 {
6152 	return discover_upi_topology(type, SPR_UBOX_DID, SPR_UPI_REGS_ADDR_DEVICE_LINK0);
6153 }
6154 
6155 static struct intel_uncore_type spr_uncore_mdf = {
6156 	SPR_UNCORE_COMMON_FORMAT(),
6157 	.name			= "mdf",
6158 };
6159 
6160 #define UNCORE_SPR_NUM_UNCORE_TYPES		12
6161 #define UNCORE_SPR_CHA				0
6162 #define UNCORE_SPR_IIO				1
6163 #define UNCORE_SPR_IMC				6
6164 #define UNCORE_SPR_UPI				8
6165 #define UNCORE_SPR_M3UPI			9
6166 
6167 /*
6168  * The uncore units, which are supported by the discovery table,
6169  * are defined here.
6170  */
6171 static struct intel_uncore_type *spr_uncores[UNCORE_SPR_NUM_UNCORE_TYPES] = {
6172 	&spr_uncore_chabox,
6173 	&spr_uncore_iio,
6174 	&spr_uncore_irp,
6175 	&spr_uncore_m2pcie,
6176 	&spr_uncore_pcu,
6177 	NULL,
6178 	&spr_uncore_imc,
6179 	&spr_uncore_m2m,
6180 	NULL,
6181 	NULL,
6182 	NULL,
6183 	&spr_uncore_mdf,
6184 };
6185 
6186 /*
6187  * The uncore units, which are not supported by the discovery table,
6188  * are implemented from here.
6189  */
6190 #define SPR_UNCORE_UPI_NUM_BOXES	4
6191 
6192 static unsigned int spr_upi_pci_offsets[SPR_UNCORE_UPI_NUM_BOXES] = {
6193 	0, 0x8000, 0x10000, 0x18000
6194 };
6195 
6196 static struct intel_uncore_type spr_uncore_upi = {
6197 	.event_mask		= SNBEP_PMON_RAW_EVENT_MASK,
6198 	.event_mask_ext		= SPR_RAW_EVENT_MASK_EXT,
6199 	.format_group		= &spr_uncore_raw_format_group,
6200 	.ops			= &spr_uncore_pci_ops,
6201 	.name			= "upi",
6202 	.attr_update		= spr_upi_attr_update,
6203 	.get_topology		= spr_upi_get_topology,
6204 	.set_mapping		= spr_upi_set_mapping,
6205 	.cleanup_mapping	= spr_upi_cleanup_mapping,
6206 	.type_id		= UNCORE_SPR_UPI,
6207 	.num_counters		= 4,
6208 	.num_boxes		= SPR_UNCORE_UPI_NUM_BOXES,
6209 	.perf_ctr_bits		= 48,
6210 	.perf_ctr		= ICX_UPI_PCI_PMON_CTR0,
6211 	.event_ctl		= ICX_UPI_PCI_PMON_CTL0,
6212 	.box_ctl		= ICX_UPI_PCI_PMON_BOX_CTL,
6213 	.pci_offsets		= spr_upi_pci_offsets,
6214 };
6215 
6216 static struct intel_uncore_type spr_uncore_m3upi = {
6217 	SPR_UNCORE_PCI_COMMON_FORMAT(),
6218 	.name			= "m3upi",
6219 	.type_id		= UNCORE_SPR_M3UPI,
6220 	.num_counters		= 4,
6221 	.num_boxes		= SPR_UNCORE_UPI_NUM_BOXES,
6222 	.perf_ctr_bits		= 48,
6223 	.perf_ctr		= ICX_M3UPI_PCI_PMON_CTR0,
6224 	.event_ctl		= ICX_M3UPI_PCI_PMON_CTL0,
6225 	.box_ctl		= ICX_M3UPI_PCI_PMON_BOX_CTL,
6226 	.pci_offsets		= spr_upi_pci_offsets,
6227 	.constraints		= icx_uncore_m3upi_constraints,
6228 };
6229 
6230 enum perf_uncore_spr_iio_freerunning_type_id {
6231 	SPR_IIO_MSR_IOCLK,
6232 	SPR_IIO_MSR_BW_IN,
6233 	SPR_IIO_MSR_BW_OUT,
6234 
6235 	SPR_IIO_FREERUNNING_TYPE_MAX,
6236 };
6237 
6238 static struct freerunning_counters spr_iio_freerunning[] = {
6239 	[SPR_IIO_MSR_IOCLK]	= { 0x340e, 0x1, 0x10, 1, 48 },
6240 	[SPR_IIO_MSR_BW_IN]	= { 0x3800, 0x1, 0x10, 8, 48 },
6241 	[SPR_IIO_MSR_BW_OUT]	= { 0x3808, 0x1, 0x10, 8, 48 },
6242 };
6243 
6244 static struct uncore_event_desc spr_uncore_iio_freerunning_events[] = {
6245 	/* Free-Running IIO CLOCKS Counter */
6246 	INTEL_UNCORE_EVENT_DESC(ioclk,			"event=0xff,umask=0x10"),
6247 	/* Free-Running IIO BANDWIDTH IN Counters */
6248 	INTEL_UNCORE_EVENT_DESC(bw_in_port0,		"event=0xff,umask=0x20"),
6249 	INTEL_UNCORE_EVENT_DESC(bw_in_port0.scale,	"3.814697266e-6"),
6250 	INTEL_UNCORE_EVENT_DESC(bw_in_port0.unit,	"MiB"),
6251 	INTEL_UNCORE_EVENT_DESC(bw_in_port1,		"event=0xff,umask=0x21"),
6252 	INTEL_UNCORE_EVENT_DESC(bw_in_port1.scale,	"3.814697266e-6"),
6253 	INTEL_UNCORE_EVENT_DESC(bw_in_port1.unit,	"MiB"),
6254 	INTEL_UNCORE_EVENT_DESC(bw_in_port2,		"event=0xff,umask=0x22"),
6255 	INTEL_UNCORE_EVENT_DESC(bw_in_port2.scale,	"3.814697266e-6"),
6256 	INTEL_UNCORE_EVENT_DESC(bw_in_port2.unit,	"MiB"),
6257 	INTEL_UNCORE_EVENT_DESC(bw_in_port3,		"event=0xff,umask=0x23"),
6258 	INTEL_UNCORE_EVENT_DESC(bw_in_port3.scale,	"3.814697266e-6"),
6259 	INTEL_UNCORE_EVENT_DESC(bw_in_port3.unit,	"MiB"),
6260 	INTEL_UNCORE_EVENT_DESC(bw_in_port4,		"event=0xff,umask=0x24"),
6261 	INTEL_UNCORE_EVENT_DESC(bw_in_port4.scale,	"3.814697266e-6"),
6262 	INTEL_UNCORE_EVENT_DESC(bw_in_port4.unit,	"MiB"),
6263 	INTEL_UNCORE_EVENT_DESC(bw_in_port5,		"event=0xff,umask=0x25"),
6264 	INTEL_UNCORE_EVENT_DESC(bw_in_port5.scale,	"3.814697266e-6"),
6265 	INTEL_UNCORE_EVENT_DESC(bw_in_port5.unit,	"MiB"),
6266 	INTEL_UNCORE_EVENT_DESC(bw_in_port6,		"event=0xff,umask=0x26"),
6267 	INTEL_UNCORE_EVENT_DESC(bw_in_port6.scale,	"3.814697266e-6"),
6268 	INTEL_UNCORE_EVENT_DESC(bw_in_port6.unit,	"MiB"),
6269 	INTEL_UNCORE_EVENT_DESC(bw_in_port7,		"event=0xff,umask=0x27"),
6270 	INTEL_UNCORE_EVENT_DESC(bw_in_port7.scale,	"3.814697266e-6"),
6271 	INTEL_UNCORE_EVENT_DESC(bw_in_port7.unit,	"MiB"),
6272 	/* Free-Running IIO BANDWIDTH OUT Counters */
6273 	INTEL_UNCORE_EVENT_DESC(bw_out_port0,		"event=0xff,umask=0x30"),
6274 	INTEL_UNCORE_EVENT_DESC(bw_out_port0.scale,	"3.814697266e-6"),
6275 	INTEL_UNCORE_EVENT_DESC(bw_out_port0.unit,	"MiB"),
6276 	INTEL_UNCORE_EVENT_DESC(bw_out_port1,		"event=0xff,umask=0x31"),
6277 	INTEL_UNCORE_EVENT_DESC(bw_out_port1.scale,	"3.814697266e-6"),
6278 	INTEL_UNCORE_EVENT_DESC(bw_out_port1.unit,	"MiB"),
6279 	INTEL_UNCORE_EVENT_DESC(bw_out_port2,		"event=0xff,umask=0x32"),
6280 	INTEL_UNCORE_EVENT_DESC(bw_out_port2.scale,	"3.814697266e-6"),
6281 	INTEL_UNCORE_EVENT_DESC(bw_out_port2.unit,	"MiB"),
6282 	INTEL_UNCORE_EVENT_DESC(bw_out_port3,		"event=0xff,umask=0x33"),
6283 	INTEL_UNCORE_EVENT_DESC(bw_out_port3.scale,	"3.814697266e-6"),
6284 	INTEL_UNCORE_EVENT_DESC(bw_out_port3.unit,	"MiB"),
6285 	INTEL_UNCORE_EVENT_DESC(bw_out_port4,		"event=0xff,umask=0x34"),
6286 	INTEL_UNCORE_EVENT_DESC(bw_out_port4.scale,	"3.814697266e-6"),
6287 	INTEL_UNCORE_EVENT_DESC(bw_out_port4.unit,	"MiB"),
6288 	INTEL_UNCORE_EVENT_DESC(bw_out_port5,		"event=0xff,umask=0x35"),
6289 	INTEL_UNCORE_EVENT_DESC(bw_out_port5.scale,	"3.814697266e-6"),
6290 	INTEL_UNCORE_EVENT_DESC(bw_out_port5.unit,	"MiB"),
6291 	INTEL_UNCORE_EVENT_DESC(bw_out_port6,		"event=0xff,umask=0x36"),
6292 	INTEL_UNCORE_EVENT_DESC(bw_out_port6.scale,	"3.814697266e-6"),
6293 	INTEL_UNCORE_EVENT_DESC(bw_out_port6.unit,	"MiB"),
6294 	INTEL_UNCORE_EVENT_DESC(bw_out_port7,		"event=0xff,umask=0x37"),
6295 	INTEL_UNCORE_EVENT_DESC(bw_out_port7.scale,	"3.814697266e-6"),
6296 	INTEL_UNCORE_EVENT_DESC(bw_out_port7.unit,	"MiB"),
6297 	{ /* end: all zeroes */ },
6298 };
6299 
6300 static struct intel_uncore_type spr_uncore_iio_free_running = {
6301 	.name			= "iio_free_running",
6302 	.num_counters		= 17,
6303 	.num_freerunning_types	= SPR_IIO_FREERUNNING_TYPE_MAX,
6304 	.freerunning		= spr_iio_freerunning,
6305 	.ops			= &skx_uncore_iio_freerunning_ops,
6306 	.event_descs		= spr_uncore_iio_freerunning_events,
6307 	.format_group		= &skx_uncore_iio_freerunning_format_group,
6308 };
6309 
6310 enum perf_uncore_spr_imc_freerunning_type_id {
6311 	SPR_IMC_DCLK,
6312 	SPR_IMC_PQ_CYCLES,
6313 
6314 	SPR_IMC_FREERUNNING_TYPE_MAX,
6315 };
6316 
6317 static struct freerunning_counters spr_imc_freerunning[] = {
6318 	[SPR_IMC_DCLK]		= { 0x22b0, 0x0, 0, 1, 48 },
6319 	[SPR_IMC_PQ_CYCLES]	= { 0x2318, 0x8, 0, 2, 48 },
6320 };
6321 
6322 static struct uncore_event_desc spr_uncore_imc_freerunning_events[] = {
6323 	INTEL_UNCORE_EVENT_DESC(dclk,			"event=0xff,umask=0x10"),
6324 
6325 	INTEL_UNCORE_EVENT_DESC(rpq_cycles,		"event=0xff,umask=0x20"),
6326 	INTEL_UNCORE_EVENT_DESC(wpq_cycles,		"event=0xff,umask=0x21"),
6327 	{ /* end: all zeroes */ },
6328 };
6329 
6330 #define SPR_MC_DEVICE_ID	0x3251
6331 
spr_uncore_imc_freerunning_init_box(struct intel_uncore_box * box)6332 static void spr_uncore_imc_freerunning_init_box(struct intel_uncore_box *box)
6333 {
6334 	int mem_offset = box->pmu->pmu_idx * ICX_IMC_MEM_STRIDE + SNR_IMC_MMIO_MEM0_OFFSET;
6335 
6336 	snr_uncore_mmio_map(box, uncore_mmio_box_ctl(box),
6337 			    mem_offset, SPR_MC_DEVICE_ID);
6338 }
6339 
6340 static struct intel_uncore_ops spr_uncore_imc_freerunning_ops = {
6341 	.init_box	= spr_uncore_imc_freerunning_init_box,
6342 	.exit_box	= uncore_mmio_exit_box,
6343 	.read_counter	= uncore_mmio_read_counter,
6344 	.hw_config	= uncore_freerunning_hw_config,
6345 };
6346 
6347 static struct intel_uncore_type spr_uncore_imc_free_running = {
6348 	.name			= "imc_free_running",
6349 	.num_counters		= 3,
6350 	.mmio_map_size		= SNR_IMC_MMIO_SIZE,
6351 	.num_freerunning_types	= SPR_IMC_FREERUNNING_TYPE_MAX,
6352 	.freerunning		= spr_imc_freerunning,
6353 	.ops			= &spr_uncore_imc_freerunning_ops,
6354 	.event_descs		= spr_uncore_imc_freerunning_events,
6355 	.format_group		= &skx_uncore_iio_freerunning_format_group,
6356 };
6357 
6358 #define UNCORE_SPR_MSR_EXTRA_UNCORES		1
6359 #define UNCORE_SPR_MMIO_EXTRA_UNCORES		1
6360 #define UNCORE_SPR_PCI_EXTRA_UNCORES		2
6361 
6362 static struct intel_uncore_type *spr_msr_uncores[UNCORE_SPR_MSR_EXTRA_UNCORES] = {
6363 	&spr_uncore_iio_free_running,
6364 };
6365 
6366 static struct intel_uncore_type *spr_mmio_uncores[UNCORE_SPR_MMIO_EXTRA_UNCORES] = {
6367 	&spr_uncore_imc_free_running,
6368 };
6369 
6370 static struct intel_uncore_type *spr_pci_uncores[UNCORE_SPR_PCI_EXTRA_UNCORES] = {
6371 	&spr_uncore_upi,
6372 	&spr_uncore_m3upi
6373 };
6374 
6375 int spr_uncore_units_ignore[] = {
6376 	UNCORE_SPR_UPI,
6377 	UNCORE_SPR_M3UPI,
6378 	UNCORE_IGNORE_END
6379 };
6380 
uncore_type_customized_copy(struct intel_uncore_type * to_type,struct intel_uncore_type * from_type)6381 static void uncore_type_customized_copy(struct intel_uncore_type *to_type,
6382 					struct intel_uncore_type *from_type)
6383 {
6384 	if (!to_type || !from_type)
6385 		return;
6386 
6387 	if (from_type->name)
6388 		to_type->name = from_type->name;
6389 	if (from_type->fixed_ctr_bits)
6390 		to_type->fixed_ctr_bits = from_type->fixed_ctr_bits;
6391 	if (from_type->event_mask)
6392 		to_type->event_mask = from_type->event_mask;
6393 	if (from_type->event_mask_ext)
6394 		to_type->event_mask_ext = from_type->event_mask_ext;
6395 	if (from_type->fixed_ctr)
6396 		to_type->fixed_ctr = from_type->fixed_ctr;
6397 	if (from_type->fixed_ctl)
6398 		to_type->fixed_ctl = from_type->fixed_ctl;
6399 	if (from_type->fixed_ctr_bits)
6400 		to_type->fixed_ctr_bits = from_type->fixed_ctr_bits;
6401 	if (from_type->num_shared_regs)
6402 		to_type->num_shared_regs = from_type->num_shared_regs;
6403 	if (from_type->constraints)
6404 		to_type->constraints = from_type->constraints;
6405 	if (from_type->ops)
6406 		to_type->ops = from_type->ops;
6407 	if (from_type->event_descs)
6408 		to_type->event_descs = from_type->event_descs;
6409 	if (from_type->format_group)
6410 		to_type->format_group = from_type->format_group;
6411 	if (from_type->attr_update)
6412 		to_type->attr_update = from_type->attr_update;
6413 	if (from_type->set_mapping)
6414 		to_type->set_mapping = from_type->set_mapping;
6415 	if (from_type->get_topology)
6416 		to_type->get_topology = from_type->get_topology;
6417 	if (from_type->cleanup_mapping)
6418 		to_type->cleanup_mapping = from_type->cleanup_mapping;
6419 }
6420 
6421 static struct intel_uncore_type **
uncore_get_uncores(enum uncore_access_type type_id,int num_extra,struct intel_uncore_type ** extra)6422 uncore_get_uncores(enum uncore_access_type type_id, int num_extra,
6423 		    struct intel_uncore_type **extra)
6424 {
6425 	struct intel_uncore_type **types, **start_types;
6426 	int i;
6427 
6428 	start_types = types = intel_uncore_generic_init_uncores(type_id, num_extra);
6429 
6430 	/* Only copy the customized features */
6431 	for (; *types; types++) {
6432 		if ((*types)->type_id >= UNCORE_SPR_NUM_UNCORE_TYPES)
6433 			continue;
6434 		uncore_type_customized_copy(*types, spr_uncores[(*types)->type_id]);
6435 	}
6436 
6437 	for (i = 0; i < num_extra; i++, types++)
6438 		*types = extra[i];
6439 
6440 	return start_types;
6441 }
6442 
6443 static struct intel_uncore_type *
uncore_find_type_by_id(struct intel_uncore_type ** types,int type_id)6444 uncore_find_type_by_id(struct intel_uncore_type **types, int type_id)
6445 {
6446 	for (; *types; types++) {
6447 		if (type_id == (*types)->type_id)
6448 			return *types;
6449 	}
6450 
6451 	return NULL;
6452 }
6453 
uncore_type_max_boxes(struct intel_uncore_type ** types,int type_id)6454 static int uncore_type_max_boxes(struct intel_uncore_type **types,
6455 				 int type_id)
6456 {
6457 	struct intel_uncore_type *type;
6458 	int i, max = 0;
6459 
6460 	type = uncore_find_type_by_id(types, type_id);
6461 	if (!type)
6462 		return 0;
6463 
6464 	for (i = 0; i < type->num_boxes; i++) {
6465 		if (type->box_ids[i] > max)
6466 			max = type->box_ids[i];
6467 	}
6468 
6469 	return max + 1;
6470 }
6471 
6472 #define SPR_MSR_UNC_CBO_CONFIG		0x2FFE
6473 
spr_uncore_cpu_init(void)6474 void spr_uncore_cpu_init(void)
6475 {
6476 	struct intel_uncore_type *type;
6477 	u64 num_cbo;
6478 
6479 	uncore_msr_uncores = uncore_get_uncores(UNCORE_ACCESS_MSR,
6480 						UNCORE_SPR_MSR_EXTRA_UNCORES,
6481 						spr_msr_uncores);
6482 
6483 	type = uncore_find_type_by_id(uncore_msr_uncores, UNCORE_SPR_CHA);
6484 	if (type) {
6485 		/*
6486 		 * The value from the discovery table (stored in the type->num_boxes
6487 		 * of UNCORE_SPR_CHA) is incorrect on some SPR variants because of a
6488 		 * firmware bug. Using the value from SPR_MSR_UNC_CBO_CONFIG to replace it.
6489 		 */
6490 		rdmsrl(SPR_MSR_UNC_CBO_CONFIG, num_cbo);
6491 		/*
6492 		 * The MSR doesn't work on the EMR XCC, but the firmware bug doesn't impact
6493 		 * the EMR XCC. Don't let the value from the MSR replace the existing value.
6494 		 */
6495 		if (num_cbo)
6496 			type->num_boxes = num_cbo;
6497 	}
6498 	spr_uncore_iio_free_running.num_boxes = uncore_type_max_boxes(uncore_msr_uncores, UNCORE_SPR_IIO);
6499 }
6500 
6501 #define SPR_UNCORE_UPI_PCIID		0x3241
6502 #define SPR_UNCORE_UPI0_DEVFN		0x9
6503 #define SPR_UNCORE_M3UPI_PCIID		0x3246
6504 #define SPR_UNCORE_M3UPI0_DEVFN		0x29
6505 
spr_update_device_location(int type_id)6506 static void spr_update_device_location(int type_id)
6507 {
6508 	struct intel_uncore_type *type;
6509 	struct pci_dev *dev = NULL;
6510 	u32 device, devfn;
6511 	u64 *ctls;
6512 	int die;
6513 
6514 	if (type_id == UNCORE_SPR_UPI) {
6515 		type = &spr_uncore_upi;
6516 		device = SPR_UNCORE_UPI_PCIID;
6517 		devfn = SPR_UNCORE_UPI0_DEVFN;
6518 	} else if (type_id == UNCORE_SPR_M3UPI) {
6519 		type = &spr_uncore_m3upi;
6520 		device = SPR_UNCORE_M3UPI_PCIID;
6521 		devfn = SPR_UNCORE_M3UPI0_DEVFN;
6522 	} else
6523 		return;
6524 
6525 	ctls = kcalloc(__uncore_max_dies, sizeof(u64), GFP_KERNEL);
6526 	if (!ctls) {
6527 		type->num_boxes = 0;
6528 		return;
6529 	}
6530 
6531 	while ((dev = pci_get_device(PCI_VENDOR_ID_INTEL, device, dev)) != NULL) {
6532 		if (devfn != dev->devfn)
6533 			continue;
6534 
6535 		die = uncore_device_to_die(dev);
6536 		if (die < 0)
6537 			continue;
6538 
6539 		ctls[die] = pci_domain_nr(dev->bus) << UNCORE_DISCOVERY_PCI_DOMAIN_OFFSET |
6540 			    dev->bus->number << UNCORE_DISCOVERY_PCI_BUS_OFFSET |
6541 			    devfn << UNCORE_DISCOVERY_PCI_DEVFN_OFFSET |
6542 			    type->box_ctl;
6543 	}
6544 
6545 	type->box_ctls = ctls;
6546 }
6547 
spr_uncore_pci_init(void)6548 int spr_uncore_pci_init(void)
6549 {
6550 	/*
6551 	 * The discovery table of UPI on some SPR variant is broken,
6552 	 * which impacts the detection of both UPI and M3UPI uncore PMON.
6553 	 * Use the pre-defined UPI and M3UPI table to replace.
6554 	 *
6555 	 * The accurate location, e.g., domain and BUS number,
6556 	 * can only be retrieved at load time.
6557 	 * Update the location of UPI and M3UPI.
6558 	 */
6559 	spr_update_device_location(UNCORE_SPR_UPI);
6560 	spr_update_device_location(UNCORE_SPR_M3UPI);
6561 	uncore_pci_uncores = uncore_get_uncores(UNCORE_ACCESS_PCI,
6562 						UNCORE_SPR_PCI_EXTRA_UNCORES,
6563 						spr_pci_uncores);
6564 	return 0;
6565 }
6566 
spr_uncore_mmio_init(void)6567 void spr_uncore_mmio_init(void)
6568 {
6569 	int ret = snbep_pci2phy_map_init(0x3250, SKX_CPUNODEID, SKX_GIDNIDMAP, true);
6570 
6571 	if (ret)
6572 		uncore_mmio_uncores = uncore_get_uncores(UNCORE_ACCESS_MMIO, 0, NULL);
6573 	else {
6574 		uncore_mmio_uncores = uncore_get_uncores(UNCORE_ACCESS_MMIO,
6575 							 UNCORE_SPR_MMIO_EXTRA_UNCORES,
6576 							 spr_mmio_uncores);
6577 
6578 		spr_uncore_imc_free_running.num_boxes = uncore_type_max_boxes(uncore_mmio_uncores, UNCORE_SPR_IMC) / 2;
6579 	}
6580 }
6581 
6582 /* end of SPR uncore support */
6583