1 // SPDX-License-Identifier: GPL-2.0
2 /* SandyBridge-EP/IvyTown uncore support */
3 #include "uncore.h"
4 
5 /* SNB-EP pci bus to socket mapping */
6 #define SNBEP_CPUNODEID			0x40
7 #define SNBEP_GIDNIDMAP			0x54
8 
9 /* SNB-EP Box level control */
10 #define SNBEP_PMON_BOX_CTL_RST_CTRL	(1 << 0)
11 #define SNBEP_PMON_BOX_CTL_RST_CTRS	(1 << 1)
12 #define SNBEP_PMON_BOX_CTL_FRZ		(1 << 8)
13 #define SNBEP_PMON_BOX_CTL_FRZ_EN	(1 << 16)
14 #define SNBEP_PMON_BOX_CTL_INT		(SNBEP_PMON_BOX_CTL_RST_CTRL | \
15 					 SNBEP_PMON_BOX_CTL_RST_CTRS | \
16 					 SNBEP_PMON_BOX_CTL_FRZ_EN)
17 /* SNB-EP event control */
18 #define SNBEP_PMON_CTL_EV_SEL_MASK	0x000000ff
19 #define SNBEP_PMON_CTL_UMASK_MASK	0x0000ff00
20 #define SNBEP_PMON_CTL_RST		(1 << 17)
21 #define SNBEP_PMON_CTL_EDGE_DET		(1 << 18)
22 #define SNBEP_PMON_CTL_EV_SEL_EXT	(1 << 21)
23 #define SNBEP_PMON_CTL_EN		(1 << 22)
24 #define SNBEP_PMON_CTL_INVERT		(1 << 23)
25 #define SNBEP_PMON_CTL_TRESH_MASK	0xff000000
26 #define SNBEP_PMON_RAW_EVENT_MASK	(SNBEP_PMON_CTL_EV_SEL_MASK | \
27 					 SNBEP_PMON_CTL_UMASK_MASK | \
28 					 SNBEP_PMON_CTL_EDGE_DET | \
29 					 SNBEP_PMON_CTL_INVERT | \
30 					 SNBEP_PMON_CTL_TRESH_MASK)
31 
32 /* SNB-EP Ubox event control */
33 #define SNBEP_U_MSR_PMON_CTL_TRESH_MASK		0x1f000000
34 #define SNBEP_U_MSR_PMON_RAW_EVENT_MASK		\
35 				(SNBEP_PMON_CTL_EV_SEL_MASK | \
36 				 SNBEP_PMON_CTL_UMASK_MASK | \
37 				 SNBEP_PMON_CTL_EDGE_DET | \
38 				 SNBEP_PMON_CTL_INVERT | \
39 				 SNBEP_U_MSR_PMON_CTL_TRESH_MASK)
40 
41 #define SNBEP_CBO_PMON_CTL_TID_EN		(1 << 19)
42 #define SNBEP_CBO_MSR_PMON_RAW_EVENT_MASK	(SNBEP_PMON_RAW_EVENT_MASK | \
43 						 SNBEP_CBO_PMON_CTL_TID_EN)
44 
45 /* SNB-EP PCU event control */
46 #define SNBEP_PCU_MSR_PMON_CTL_OCC_SEL_MASK	0x0000c000
47 #define SNBEP_PCU_MSR_PMON_CTL_TRESH_MASK	0x1f000000
48 #define SNBEP_PCU_MSR_PMON_CTL_OCC_INVERT	(1 << 30)
49 #define SNBEP_PCU_MSR_PMON_CTL_OCC_EDGE_DET	(1 << 31)
50 #define SNBEP_PCU_MSR_PMON_RAW_EVENT_MASK	\
51 				(SNBEP_PMON_CTL_EV_SEL_MASK | \
52 				 SNBEP_PCU_MSR_PMON_CTL_OCC_SEL_MASK | \
53 				 SNBEP_PMON_CTL_EDGE_DET | \
54 				 SNBEP_PMON_CTL_INVERT | \
55 				 SNBEP_PCU_MSR_PMON_CTL_TRESH_MASK | \
56 				 SNBEP_PCU_MSR_PMON_CTL_OCC_INVERT | \
57 				 SNBEP_PCU_MSR_PMON_CTL_OCC_EDGE_DET)
58 
59 #define SNBEP_QPI_PCI_PMON_RAW_EVENT_MASK	\
60 				(SNBEP_PMON_RAW_EVENT_MASK | \
61 				 SNBEP_PMON_CTL_EV_SEL_EXT)
62 
63 /* SNB-EP pci control register */
64 #define SNBEP_PCI_PMON_BOX_CTL			0xf4
65 #define SNBEP_PCI_PMON_CTL0			0xd8
66 /* SNB-EP pci counter register */
67 #define SNBEP_PCI_PMON_CTR0			0xa0
68 
69 /* SNB-EP home agent register */
70 #define SNBEP_HA_PCI_PMON_BOX_ADDRMATCH0	0x40
71 #define SNBEP_HA_PCI_PMON_BOX_ADDRMATCH1	0x44
72 #define SNBEP_HA_PCI_PMON_BOX_OPCODEMATCH	0x48
73 /* SNB-EP memory controller register */
74 #define SNBEP_MC_CHy_PCI_PMON_FIXED_CTL		0xf0
75 #define SNBEP_MC_CHy_PCI_PMON_FIXED_CTR		0xd0
76 /* SNB-EP QPI register */
77 #define SNBEP_Q_Py_PCI_PMON_PKT_MATCH0		0x228
78 #define SNBEP_Q_Py_PCI_PMON_PKT_MATCH1		0x22c
79 #define SNBEP_Q_Py_PCI_PMON_PKT_MASK0		0x238
80 #define SNBEP_Q_Py_PCI_PMON_PKT_MASK1		0x23c
81 
82 /* SNB-EP Ubox register */
83 #define SNBEP_U_MSR_PMON_CTR0			0xc16
84 #define SNBEP_U_MSR_PMON_CTL0			0xc10
85 
86 #define SNBEP_U_MSR_PMON_UCLK_FIXED_CTL		0xc08
87 #define SNBEP_U_MSR_PMON_UCLK_FIXED_CTR		0xc09
88 
89 /* SNB-EP Cbo register */
90 #define SNBEP_C0_MSR_PMON_CTR0			0xd16
91 #define SNBEP_C0_MSR_PMON_CTL0			0xd10
92 #define SNBEP_C0_MSR_PMON_BOX_CTL		0xd04
93 #define SNBEP_C0_MSR_PMON_BOX_FILTER		0xd14
94 #define SNBEP_CBO_MSR_OFFSET			0x20
95 
96 #define SNBEP_CB0_MSR_PMON_BOX_FILTER_TID	0x1f
97 #define SNBEP_CB0_MSR_PMON_BOX_FILTER_NID	0x3fc00
98 #define SNBEP_CB0_MSR_PMON_BOX_FILTER_STATE	0x7c0000
99 #define SNBEP_CB0_MSR_PMON_BOX_FILTER_OPC	0xff800000
100 
101 #define SNBEP_CBO_EVENT_EXTRA_REG(e, m, i) {	\
102 	.event = (e),				\
103 	.msr = SNBEP_C0_MSR_PMON_BOX_FILTER,	\
104 	.config_mask = (m),			\
105 	.idx = (i)				\
106 }
107 
108 /* SNB-EP PCU register */
109 #define SNBEP_PCU_MSR_PMON_CTR0			0xc36
110 #define SNBEP_PCU_MSR_PMON_CTL0			0xc30
111 #define SNBEP_PCU_MSR_PMON_BOX_CTL		0xc24
112 #define SNBEP_PCU_MSR_PMON_BOX_FILTER		0xc34
113 #define SNBEP_PCU_MSR_PMON_BOX_FILTER_MASK	0xffffffff
114 #define SNBEP_PCU_MSR_CORE_C3_CTR		0x3fc
115 #define SNBEP_PCU_MSR_CORE_C6_CTR		0x3fd
116 
117 /* IVBEP event control */
118 #define IVBEP_PMON_BOX_CTL_INT		(SNBEP_PMON_BOX_CTL_RST_CTRL | \
119 					 SNBEP_PMON_BOX_CTL_RST_CTRS)
120 #define IVBEP_PMON_RAW_EVENT_MASK		(SNBEP_PMON_CTL_EV_SEL_MASK | \
121 					 SNBEP_PMON_CTL_UMASK_MASK | \
122 					 SNBEP_PMON_CTL_EDGE_DET | \
123 					 SNBEP_PMON_CTL_TRESH_MASK)
124 /* IVBEP Ubox */
125 #define IVBEP_U_MSR_PMON_GLOBAL_CTL		0xc00
126 #define IVBEP_U_PMON_GLOBAL_FRZ_ALL		(1 << 31)
127 #define IVBEP_U_PMON_GLOBAL_UNFRZ_ALL		(1 << 29)
128 
129 #define IVBEP_U_MSR_PMON_RAW_EVENT_MASK	\
130 				(SNBEP_PMON_CTL_EV_SEL_MASK | \
131 				 SNBEP_PMON_CTL_UMASK_MASK | \
132 				 SNBEP_PMON_CTL_EDGE_DET | \
133 				 SNBEP_U_MSR_PMON_CTL_TRESH_MASK)
134 /* IVBEP Cbo */
135 #define IVBEP_CBO_MSR_PMON_RAW_EVENT_MASK		(IVBEP_PMON_RAW_EVENT_MASK | \
136 						 SNBEP_CBO_PMON_CTL_TID_EN)
137 
138 #define IVBEP_CB0_MSR_PMON_BOX_FILTER_TID		(0x1fULL << 0)
139 #define IVBEP_CB0_MSR_PMON_BOX_FILTER_LINK	(0xfULL << 5)
140 #define IVBEP_CB0_MSR_PMON_BOX_FILTER_STATE	(0x3fULL << 17)
141 #define IVBEP_CB0_MSR_PMON_BOX_FILTER_NID		(0xffffULL << 32)
142 #define IVBEP_CB0_MSR_PMON_BOX_FILTER_OPC		(0x1ffULL << 52)
143 #define IVBEP_CB0_MSR_PMON_BOX_FILTER_C6		(0x1ULL << 61)
144 #define IVBEP_CB0_MSR_PMON_BOX_FILTER_NC		(0x1ULL << 62)
145 #define IVBEP_CB0_MSR_PMON_BOX_FILTER_ISOC	(0x1ULL << 63)
146 
147 /* IVBEP home agent */
148 #define IVBEP_HA_PCI_PMON_CTL_Q_OCC_RST		(1 << 16)
149 #define IVBEP_HA_PCI_PMON_RAW_EVENT_MASK		\
150 				(IVBEP_PMON_RAW_EVENT_MASK | \
151 				 IVBEP_HA_PCI_PMON_CTL_Q_OCC_RST)
152 /* IVBEP PCU */
153 #define IVBEP_PCU_MSR_PMON_RAW_EVENT_MASK	\
154 				(SNBEP_PMON_CTL_EV_SEL_MASK | \
155 				 SNBEP_PCU_MSR_PMON_CTL_OCC_SEL_MASK | \
156 				 SNBEP_PMON_CTL_EDGE_DET | \
157 				 SNBEP_PCU_MSR_PMON_CTL_TRESH_MASK | \
158 				 SNBEP_PCU_MSR_PMON_CTL_OCC_INVERT | \
159 				 SNBEP_PCU_MSR_PMON_CTL_OCC_EDGE_DET)
160 /* IVBEP QPI */
161 #define IVBEP_QPI_PCI_PMON_RAW_EVENT_MASK	\
162 				(IVBEP_PMON_RAW_EVENT_MASK | \
163 				 SNBEP_PMON_CTL_EV_SEL_EXT)
164 
165 #define __BITS_VALUE(x, i, n)  ((typeof(x))(((x) >> ((i) * (n))) & \
166 				((1ULL << (n)) - 1)))
167 
168 /* Haswell-EP Ubox */
169 #define HSWEP_U_MSR_PMON_CTR0			0x709
170 #define HSWEP_U_MSR_PMON_CTL0			0x705
171 #define HSWEP_U_MSR_PMON_FILTER			0x707
172 
173 #define HSWEP_U_MSR_PMON_UCLK_FIXED_CTL		0x703
174 #define HSWEP_U_MSR_PMON_UCLK_FIXED_CTR		0x704
175 
176 #define HSWEP_U_MSR_PMON_BOX_FILTER_TID		(0x1 << 0)
177 #define HSWEP_U_MSR_PMON_BOX_FILTER_CID		(0x1fULL << 1)
178 #define HSWEP_U_MSR_PMON_BOX_FILTER_MASK \
179 					(HSWEP_U_MSR_PMON_BOX_FILTER_TID | \
180 					 HSWEP_U_MSR_PMON_BOX_FILTER_CID)
181 
182 /* Haswell-EP CBo */
183 #define HSWEP_C0_MSR_PMON_CTR0			0xe08
184 #define HSWEP_C0_MSR_PMON_CTL0			0xe01
185 #define HSWEP_C0_MSR_PMON_BOX_CTL			0xe00
186 #define HSWEP_C0_MSR_PMON_BOX_FILTER0		0xe05
187 #define HSWEP_CBO_MSR_OFFSET			0x10
188 
189 
190 #define HSWEP_CB0_MSR_PMON_BOX_FILTER_TID		(0x3fULL << 0)
191 #define HSWEP_CB0_MSR_PMON_BOX_FILTER_LINK	(0xfULL << 6)
192 #define HSWEP_CB0_MSR_PMON_BOX_FILTER_STATE	(0x7fULL << 17)
193 #define HSWEP_CB0_MSR_PMON_BOX_FILTER_NID		(0xffffULL << 32)
194 #define HSWEP_CB0_MSR_PMON_BOX_FILTER_OPC		(0x1ffULL << 52)
195 #define HSWEP_CB0_MSR_PMON_BOX_FILTER_C6		(0x1ULL << 61)
196 #define HSWEP_CB0_MSR_PMON_BOX_FILTER_NC		(0x1ULL << 62)
197 #define HSWEP_CB0_MSR_PMON_BOX_FILTER_ISOC	(0x1ULL << 63)
198 
199 
200 /* Haswell-EP Sbox */
201 #define HSWEP_S0_MSR_PMON_CTR0			0x726
202 #define HSWEP_S0_MSR_PMON_CTL0			0x721
203 #define HSWEP_S0_MSR_PMON_BOX_CTL			0x720
204 #define HSWEP_SBOX_MSR_OFFSET			0xa
205 #define HSWEP_S_MSR_PMON_RAW_EVENT_MASK		(SNBEP_PMON_RAW_EVENT_MASK | \
206 						 SNBEP_CBO_PMON_CTL_TID_EN)
207 
208 /* Haswell-EP PCU */
209 #define HSWEP_PCU_MSR_PMON_CTR0			0x717
210 #define HSWEP_PCU_MSR_PMON_CTL0			0x711
211 #define HSWEP_PCU_MSR_PMON_BOX_CTL		0x710
212 #define HSWEP_PCU_MSR_PMON_BOX_FILTER		0x715
213 
214 /* KNL Ubox */
215 #define KNL_U_MSR_PMON_RAW_EVENT_MASK \
216 					(SNBEP_U_MSR_PMON_RAW_EVENT_MASK | \
217 						SNBEP_CBO_PMON_CTL_TID_EN)
218 /* KNL CHA */
219 #define KNL_CHA_MSR_OFFSET			0xc
220 #define KNL_CHA_MSR_PMON_CTL_QOR		(1 << 16)
221 #define KNL_CHA_MSR_PMON_RAW_EVENT_MASK \
222 					(SNBEP_CBO_MSR_PMON_RAW_EVENT_MASK | \
223 					 KNL_CHA_MSR_PMON_CTL_QOR)
224 #define KNL_CHA_MSR_PMON_BOX_FILTER_TID		0x1ff
225 #define KNL_CHA_MSR_PMON_BOX_FILTER_STATE	(7 << 18)
226 #define KNL_CHA_MSR_PMON_BOX_FILTER_OP		(0xfffffe2aULL << 32)
227 #define KNL_CHA_MSR_PMON_BOX_FILTER_REMOTE_NODE	(0x1ULL << 32)
228 #define KNL_CHA_MSR_PMON_BOX_FILTER_LOCAL_NODE	(0x1ULL << 33)
229 #define KNL_CHA_MSR_PMON_BOX_FILTER_NNC		(0x1ULL << 37)
230 
231 /* KNL EDC/MC UCLK */
232 #define KNL_UCLK_MSR_PMON_CTR0_LOW		0x400
233 #define KNL_UCLK_MSR_PMON_CTL0			0x420
234 #define KNL_UCLK_MSR_PMON_BOX_CTL		0x430
235 #define KNL_UCLK_MSR_PMON_UCLK_FIXED_LOW	0x44c
236 #define KNL_UCLK_MSR_PMON_UCLK_FIXED_CTL	0x454
237 #define KNL_PMON_FIXED_CTL_EN			0x1
238 
239 /* KNL EDC */
240 #define KNL_EDC0_ECLK_MSR_PMON_CTR0_LOW		0xa00
241 #define KNL_EDC0_ECLK_MSR_PMON_CTL0		0xa20
242 #define KNL_EDC0_ECLK_MSR_PMON_BOX_CTL		0xa30
243 #define KNL_EDC0_ECLK_MSR_PMON_ECLK_FIXED_LOW	0xa3c
244 #define KNL_EDC0_ECLK_MSR_PMON_ECLK_FIXED_CTL	0xa44
245 
246 /* KNL MC */
247 #define KNL_MC0_CH0_MSR_PMON_CTR0_LOW		0xb00
248 #define KNL_MC0_CH0_MSR_PMON_CTL0		0xb20
249 #define KNL_MC0_CH0_MSR_PMON_BOX_CTL		0xb30
250 #define KNL_MC0_CH0_MSR_PMON_FIXED_LOW		0xb3c
251 #define KNL_MC0_CH0_MSR_PMON_FIXED_CTL		0xb44
252 
253 /* KNL IRP */
254 #define KNL_IRP_PCI_PMON_BOX_CTL		0xf0
255 #define KNL_IRP_PCI_PMON_RAW_EVENT_MASK		(SNBEP_PMON_RAW_EVENT_MASK | \
256 						 KNL_CHA_MSR_PMON_CTL_QOR)
257 /* KNL PCU */
258 #define KNL_PCU_PMON_CTL_EV_SEL_MASK		0x0000007f
259 #define KNL_PCU_PMON_CTL_USE_OCC_CTR		(1 << 7)
260 #define KNL_PCU_MSR_PMON_CTL_TRESH_MASK		0x3f000000
261 #define KNL_PCU_MSR_PMON_RAW_EVENT_MASK	\
262 				(KNL_PCU_PMON_CTL_EV_SEL_MASK | \
263 				 KNL_PCU_PMON_CTL_USE_OCC_CTR | \
264 				 SNBEP_PCU_MSR_PMON_CTL_OCC_SEL_MASK | \
265 				 SNBEP_PMON_CTL_EDGE_DET | \
266 				 SNBEP_CBO_PMON_CTL_TID_EN | \
267 				 SNBEP_PMON_CTL_INVERT | \
268 				 KNL_PCU_MSR_PMON_CTL_TRESH_MASK | \
269 				 SNBEP_PCU_MSR_PMON_CTL_OCC_INVERT | \
270 				 SNBEP_PCU_MSR_PMON_CTL_OCC_EDGE_DET)
271 
272 /* SKX pci bus to socket mapping */
273 #define SKX_CPUNODEID			0xc0
274 #define SKX_GIDNIDMAP			0xd4
275 
276 /* SKX CHA */
277 #define SKX_CHA_MSR_PMON_BOX_FILTER_TID		(0x1ffULL << 0)
278 #define SKX_CHA_MSR_PMON_BOX_FILTER_LINK	(0xfULL << 9)
279 #define SKX_CHA_MSR_PMON_BOX_FILTER_STATE	(0x3ffULL << 17)
280 #define SKX_CHA_MSR_PMON_BOX_FILTER_REM		(0x1ULL << 32)
281 #define SKX_CHA_MSR_PMON_BOX_FILTER_LOC		(0x1ULL << 33)
282 #define SKX_CHA_MSR_PMON_BOX_FILTER_ALL_OPC	(0x1ULL << 35)
283 #define SKX_CHA_MSR_PMON_BOX_FILTER_NM		(0x1ULL << 36)
284 #define SKX_CHA_MSR_PMON_BOX_FILTER_NOT_NM	(0x1ULL << 37)
285 #define SKX_CHA_MSR_PMON_BOX_FILTER_OPC0	(0x3ffULL << 41)
286 #define SKX_CHA_MSR_PMON_BOX_FILTER_OPC1	(0x3ffULL << 51)
287 #define SKX_CHA_MSR_PMON_BOX_FILTER_C6		(0x1ULL << 61)
288 #define SKX_CHA_MSR_PMON_BOX_FILTER_NC		(0x1ULL << 62)
289 #define SKX_CHA_MSR_PMON_BOX_FILTER_ISOC	(0x1ULL << 63)
290 
291 /* SKX IIO */
292 #define SKX_IIO0_MSR_PMON_CTL0		0xa48
293 #define SKX_IIO0_MSR_PMON_CTR0		0xa41
294 #define SKX_IIO0_MSR_PMON_BOX_CTL	0xa40
295 #define SKX_IIO_MSR_OFFSET		0x20
296 
297 #define SKX_PMON_CTL_TRESH_MASK		(0xff << 24)
298 #define SKX_PMON_CTL_TRESH_MASK_EXT	(0xf)
299 #define SKX_PMON_CTL_CH_MASK		(0xff << 4)
300 #define SKX_PMON_CTL_FC_MASK		(0x7 << 12)
301 #define SKX_IIO_PMON_RAW_EVENT_MASK	(SNBEP_PMON_CTL_EV_SEL_MASK | \
302 					 SNBEP_PMON_CTL_UMASK_MASK | \
303 					 SNBEP_PMON_CTL_EDGE_DET | \
304 					 SNBEP_PMON_CTL_INVERT | \
305 					 SKX_PMON_CTL_TRESH_MASK)
306 #define SKX_IIO_PMON_RAW_EVENT_MASK_EXT	(SKX_PMON_CTL_TRESH_MASK_EXT | \
307 					 SKX_PMON_CTL_CH_MASK | \
308 					 SKX_PMON_CTL_FC_MASK)
309 
310 /* SKX IRP */
311 #define SKX_IRP0_MSR_PMON_CTL0		0xa5b
312 #define SKX_IRP0_MSR_PMON_CTR0		0xa59
313 #define SKX_IRP0_MSR_PMON_BOX_CTL	0xa58
314 #define SKX_IRP_MSR_OFFSET		0x20
315 
316 /* SKX UPI */
317 #define SKX_UPI_PCI_PMON_CTL0		0x350
318 #define SKX_UPI_PCI_PMON_CTR0		0x318
319 #define SKX_UPI_PCI_PMON_BOX_CTL	0x378
320 #define SKX_UPI_CTL_UMASK_EXT		0xffefff
321 
322 /* SKX M2M */
323 #define SKX_M2M_PCI_PMON_CTL0		0x228
324 #define SKX_M2M_PCI_PMON_CTR0		0x200
325 #define SKX_M2M_PCI_PMON_BOX_CTL	0x258
326 
327 /* SNR Ubox */
328 #define SNR_U_MSR_PMON_CTR0			0x1f98
329 #define SNR_U_MSR_PMON_CTL0			0x1f91
330 #define SNR_U_MSR_PMON_UCLK_FIXED_CTL		0x1f93
331 #define SNR_U_MSR_PMON_UCLK_FIXED_CTR		0x1f94
332 
333 /* SNR CHA */
334 #define SNR_CHA_RAW_EVENT_MASK_EXT		0x3ffffff
335 #define SNR_CHA_MSR_PMON_CTL0			0x1c01
336 #define SNR_CHA_MSR_PMON_CTR0			0x1c08
337 #define SNR_CHA_MSR_PMON_BOX_CTL		0x1c00
338 #define SNR_C0_MSR_PMON_BOX_FILTER0		0x1c05
339 
340 
341 /* SNR IIO */
342 #define SNR_IIO_MSR_PMON_CTL0			0x1e08
343 #define SNR_IIO_MSR_PMON_CTR0			0x1e01
344 #define SNR_IIO_MSR_PMON_BOX_CTL		0x1e00
345 #define SNR_IIO_MSR_OFFSET			0x10
346 #define SNR_IIO_PMON_RAW_EVENT_MASK_EXT		0x7ffff
347 
348 /* SNR IRP */
349 #define SNR_IRP0_MSR_PMON_CTL0			0x1ea8
350 #define SNR_IRP0_MSR_PMON_CTR0			0x1ea1
351 #define SNR_IRP0_MSR_PMON_BOX_CTL		0x1ea0
352 #define SNR_IRP_MSR_OFFSET			0x10
353 
354 /* SNR M2PCIE */
355 #define SNR_M2PCIE_MSR_PMON_CTL0		0x1e58
356 #define SNR_M2PCIE_MSR_PMON_CTR0		0x1e51
357 #define SNR_M2PCIE_MSR_PMON_BOX_CTL		0x1e50
358 #define SNR_M2PCIE_MSR_OFFSET			0x10
359 
360 /* SNR PCU */
361 #define SNR_PCU_MSR_PMON_CTL0			0x1ef1
362 #define SNR_PCU_MSR_PMON_CTR0			0x1ef8
363 #define SNR_PCU_MSR_PMON_BOX_CTL		0x1ef0
364 #define SNR_PCU_MSR_PMON_BOX_FILTER		0x1efc
365 
366 /* SNR M2M */
367 #define SNR_M2M_PCI_PMON_CTL0			0x468
368 #define SNR_M2M_PCI_PMON_CTR0			0x440
369 #define SNR_M2M_PCI_PMON_BOX_CTL		0x438
370 #define SNR_M2M_PCI_PMON_UMASK_EXT		0xff
371 
372 /* SNR IMC */
373 #define SNR_IMC_MMIO_PMON_FIXED_CTL		0x54
374 #define SNR_IMC_MMIO_PMON_FIXED_CTR		0x38
375 #define SNR_IMC_MMIO_PMON_CTL0			0x40
376 #define SNR_IMC_MMIO_PMON_CTR0			0x8
377 #define SNR_IMC_MMIO_PMON_BOX_CTL		0x22800
378 #define SNR_IMC_MMIO_OFFSET			0x4000
379 #define SNR_IMC_MMIO_SIZE			0x4000
380 #define SNR_IMC_MMIO_BASE_OFFSET		0xd0
381 #define SNR_IMC_MMIO_BASE_MASK			0x1FFFFFFF
382 #define SNR_IMC_MMIO_MEM0_OFFSET		0xd8
383 #define SNR_IMC_MMIO_MEM0_MASK			0x7FF
384 
385 /* ICX CHA */
386 #define ICX_C34_MSR_PMON_CTR0			0xb68
387 #define ICX_C34_MSR_PMON_CTL0			0xb61
388 #define ICX_C34_MSR_PMON_BOX_CTL		0xb60
389 #define ICX_C34_MSR_PMON_BOX_FILTER0		0xb65
390 
391 /* ICX IIO */
392 #define ICX_IIO_MSR_PMON_CTL0			0xa58
393 #define ICX_IIO_MSR_PMON_CTR0			0xa51
394 #define ICX_IIO_MSR_PMON_BOX_CTL		0xa50
395 
396 /* ICX IRP */
397 #define ICX_IRP0_MSR_PMON_CTL0			0xa4d
398 #define ICX_IRP0_MSR_PMON_CTR0			0xa4b
399 #define ICX_IRP0_MSR_PMON_BOX_CTL		0xa4a
400 
401 /* ICX M2PCIE */
402 #define ICX_M2PCIE_MSR_PMON_CTL0		0xa46
403 #define ICX_M2PCIE_MSR_PMON_CTR0		0xa41
404 #define ICX_M2PCIE_MSR_PMON_BOX_CTL		0xa40
405 
406 /* ICX UPI */
407 #define ICX_UPI_PCI_PMON_CTL0			0x350
408 #define ICX_UPI_PCI_PMON_CTR0			0x320
409 #define ICX_UPI_PCI_PMON_BOX_CTL		0x318
410 #define ICX_UPI_CTL_UMASK_EXT			0xffffff
411 
412 /* ICX M3UPI*/
413 #define ICX_M3UPI_PCI_PMON_CTL0			0xd8
414 #define ICX_M3UPI_PCI_PMON_CTR0			0xa8
415 #define ICX_M3UPI_PCI_PMON_BOX_CTL		0xa0
416 
417 /* ICX IMC */
418 #define ICX_NUMBER_IMC_CHN			2
419 #define ICX_IMC_MEM_STRIDE			0x4
420 
421 DEFINE_UNCORE_FORMAT_ATTR(event, event, "config:0-7");
422 DEFINE_UNCORE_FORMAT_ATTR(event2, event, "config:0-6");
423 DEFINE_UNCORE_FORMAT_ATTR(event_ext, event, "config:0-7,21");
424 DEFINE_UNCORE_FORMAT_ATTR(use_occ_ctr, use_occ_ctr, "config:7");
425 DEFINE_UNCORE_FORMAT_ATTR(umask, umask, "config:8-15");
426 DEFINE_UNCORE_FORMAT_ATTR(umask_ext, umask, "config:8-15,32-43,45-55");
427 DEFINE_UNCORE_FORMAT_ATTR(umask_ext2, umask, "config:8-15,32-57");
428 DEFINE_UNCORE_FORMAT_ATTR(umask_ext3, umask, "config:8-15,32-39");
429 DEFINE_UNCORE_FORMAT_ATTR(umask_ext4, umask, "config:8-15,32-55");
430 DEFINE_UNCORE_FORMAT_ATTR(qor, qor, "config:16");
431 DEFINE_UNCORE_FORMAT_ATTR(edge, edge, "config:18");
432 DEFINE_UNCORE_FORMAT_ATTR(tid_en, tid_en, "config:19");
433 DEFINE_UNCORE_FORMAT_ATTR(inv, inv, "config:23");
434 DEFINE_UNCORE_FORMAT_ATTR(thresh9, thresh, "config:24-35");
435 DEFINE_UNCORE_FORMAT_ATTR(thresh8, thresh, "config:24-31");
436 DEFINE_UNCORE_FORMAT_ATTR(thresh6, thresh, "config:24-29");
437 DEFINE_UNCORE_FORMAT_ATTR(thresh5, thresh, "config:24-28");
438 DEFINE_UNCORE_FORMAT_ATTR(occ_sel, occ_sel, "config:14-15");
439 DEFINE_UNCORE_FORMAT_ATTR(occ_invert, occ_invert, "config:30");
440 DEFINE_UNCORE_FORMAT_ATTR(occ_edge, occ_edge, "config:14-51");
441 DEFINE_UNCORE_FORMAT_ATTR(occ_edge_det, occ_edge_det, "config:31");
442 DEFINE_UNCORE_FORMAT_ATTR(ch_mask, ch_mask, "config:36-43");
443 DEFINE_UNCORE_FORMAT_ATTR(ch_mask2, ch_mask, "config:36-47");
444 DEFINE_UNCORE_FORMAT_ATTR(fc_mask, fc_mask, "config:44-46");
445 DEFINE_UNCORE_FORMAT_ATTR(fc_mask2, fc_mask, "config:48-50");
446 DEFINE_UNCORE_FORMAT_ATTR(filter_tid, filter_tid, "config1:0-4");
447 DEFINE_UNCORE_FORMAT_ATTR(filter_tid2, filter_tid, "config1:0");
448 DEFINE_UNCORE_FORMAT_ATTR(filter_tid3, filter_tid, "config1:0-5");
449 DEFINE_UNCORE_FORMAT_ATTR(filter_tid4, filter_tid, "config1:0-8");
450 DEFINE_UNCORE_FORMAT_ATTR(filter_tid5, filter_tid, "config1:0-9");
451 DEFINE_UNCORE_FORMAT_ATTR(filter_cid, filter_cid, "config1:5");
452 DEFINE_UNCORE_FORMAT_ATTR(filter_link, filter_link, "config1:5-8");
453 DEFINE_UNCORE_FORMAT_ATTR(filter_link2, filter_link, "config1:6-8");
454 DEFINE_UNCORE_FORMAT_ATTR(filter_link3, filter_link, "config1:12");
455 DEFINE_UNCORE_FORMAT_ATTR(filter_nid, filter_nid, "config1:10-17");
456 DEFINE_UNCORE_FORMAT_ATTR(filter_nid2, filter_nid, "config1:32-47");
457 DEFINE_UNCORE_FORMAT_ATTR(filter_state, filter_state, "config1:18-22");
458 DEFINE_UNCORE_FORMAT_ATTR(filter_state2, filter_state, "config1:17-22");
459 DEFINE_UNCORE_FORMAT_ATTR(filter_state3, filter_state, "config1:17-23");
460 DEFINE_UNCORE_FORMAT_ATTR(filter_state4, filter_state, "config1:18-20");
461 DEFINE_UNCORE_FORMAT_ATTR(filter_state5, filter_state, "config1:17-26");
462 DEFINE_UNCORE_FORMAT_ATTR(filter_rem, filter_rem, "config1:32");
463 DEFINE_UNCORE_FORMAT_ATTR(filter_loc, filter_loc, "config1:33");
464 DEFINE_UNCORE_FORMAT_ATTR(filter_nm, filter_nm, "config1:36");
465 DEFINE_UNCORE_FORMAT_ATTR(filter_not_nm, filter_not_nm, "config1:37");
466 DEFINE_UNCORE_FORMAT_ATTR(filter_local, filter_local, "config1:33");
467 DEFINE_UNCORE_FORMAT_ATTR(filter_all_op, filter_all_op, "config1:35");
468 DEFINE_UNCORE_FORMAT_ATTR(filter_nnm, filter_nnm, "config1:37");
469 DEFINE_UNCORE_FORMAT_ATTR(filter_opc, filter_opc, "config1:23-31");
470 DEFINE_UNCORE_FORMAT_ATTR(filter_opc2, filter_opc, "config1:52-60");
471 DEFINE_UNCORE_FORMAT_ATTR(filter_opc3, filter_opc, "config1:41-60");
472 DEFINE_UNCORE_FORMAT_ATTR(filter_opc_0, filter_opc0, "config1:41-50");
473 DEFINE_UNCORE_FORMAT_ATTR(filter_opc_1, filter_opc1, "config1:51-60");
474 DEFINE_UNCORE_FORMAT_ATTR(filter_nc, filter_nc, "config1:62");
475 DEFINE_UNCORE_FORMAT_ATTR(filter_c6, filter_c6, "config1:61");
476 DEFINE_UNCORE_FORMAT_ATTR(filter_isoc, filter_isoc, "config1:63");
477 DEFINE_UNCORE_FORMAT_ATTR(filter_band0, filter_band0, "config1:0-7");
478 DEFINE_UNCORE_FORMAT_ATTR(filter_band1, filter_band1, "config1:8-15");
479 DEFINE_UNCORE_FORMAT_ATTR(filter_band2, filter_band2, "config1:16-23");
480 DEFINE_UNCORE_FORMAT_ATTR(filter_band3, filter_band3, "config1:24-31");
481 DEFINE_UNCORE_FORMAT_ATTR(match_rds, match_rds, "config1:48-51");
482 DEFINE_UNCORE_FORMAT_ATTR(match_rnid30, match_rnid30, "config1:32-35");
483 DEFINE_UNCORE_FORMAT_ATTR(match_rnid4, match_rnid4, "config1:31");
484 DEFINE_UNCORE_FORMAT_ATTR(match_dnid, match_dnid, "config1:13-17");
485 DEFINE_UNCORE_FORMAT_ATTR(match_mc, match_mc, "config1:9-12");
486 DEFINE_UNCORE_FORMAT_ATTR(match_opc, match_opc, "config1:5-8");
487 DEFINE_UNCORE_FORMAT_ATTR(match_vnw, match_vnw, "config1:3-4");
488 DEFINE_UNCORE_FORMAT_ATTR(match0, match0, "config1:0-31");
489 DEFINE_UNCORE_FORMAT_ATTR(match1, match1, "config1:32-63");
490 DEFINE_UNCORE_FORMAT_ATTR(mask_rds, mask_rds, "config2:48-51");
491 DEFINE_UNCORE_FORMAT_ATTR(mask_rnid30, mask_rnid30, "config2:32-35");
492 DEFINE_UNCORE_FORMAT_ATTR(mask_rnid4, mask_rnid4, "config2:31");
493 DEFINE_UNCORE_FORMAT_ATTR(mask_dnid, mask_dnid, "config2:13-17");
494 DEFINE_UNCORE_FORMAT_ATTR(mask_mc, mask_mc, "config2:9-12");
495 DEFINE_UNCORE_FORMAT_ATTR(mask_opc, mask_opc, "config2:5-8");
496 DEFINE_UNCORE_FORMAT_ATTR(mask_vnw, mask_vnw, "config2:3-4");
497 DEFINE_UNCORE_FORMAT_ATTR(mask0, mask0, "config2:0-31");
498 DEFINE_UNCORE_FORMAT_ATTR(mask1, mask1, "config2:32-63");
499 
500 static void snbep_uncore_pci_disable_box(struct intel_uncore_box *box)
501 {
502 	struct pci_dev *pdev = box->pci_dev;
503 	int box_ctl = uncore_pci_box_ctl(box);
504 	u32 config = 0;
505 
506 	if (!pci_read_config_dword(pdev, box_ctl, &config)) {
507 		config |= SNBEP_PMON_BOX_CTL_FRZ;
508 		pci_write_config_dword(pdev, box_ctl, config);
509 	}
510 }
511 
512 static void snbep_uncore_pci_enable_box(struct intel_uncore_box *box)
513 {
514 	struct pci_dev *pdev = box->pci_dev;
515 	int box_ctl = uncore_pci_box_ctl(box);
516 	u32 config = 0;
517 
518 	if (!pci_read_config_dword(pdev, box_ctl, &config)) {
519 		config &= ~SNBEP_PMON_BOX_CTL_FRZ;
520 		pci_write_config_dword(pdev, box_ctl, config);
521 	}
522 }
523 
524 static void snbep_uncore_pci_enable_event(struct intel_uncore_box *box, struct perf_event *event)
525 {
526 	struct pci_dev *pdev = box->pci_dev;
527 	struct hw_perf_event *hwc = &event->hw;
528 
529 	pci_write_config_dword(pdev, hwc->config_base, hwc->config | SNBEP_PMON_CTL_EN);
530 }
531 
532 static void snbep_uncore_pci_disable_event(struct intel_uncore_box *box, struct perf_event *event)
533 {
534 	struct pci_dev *pdev = box->pci_dev;
535 	struct hw_perf_event *hwc = &event->hw;
536 
537 	pci_write_config_dword(pdev, hwc->config_base, hwc->config);
538 }
539 
540 static u64 snbep_uncore_pci_read_counter(struct intel_uncore_box *box, struct perf_event *event)
541 {
542 	struct pci_dev *pdev = box->pci_dev;
543 	struct hw_perf_event *hwc = &event->hw;
544 	u64 count = 0;
545 
546 	pci_read_config_dword(pdev, hwc->event_base, (u32 *)&count);
547 	pci_read_config_dword(pdev, hwc->event_base + 4, (u32 *)&count + 1);
548 
549 	return count;
550 }
551 
552 static void snbep_uncore_pci_init_box(struct intel_uncore_box *box)
553 {
554 	struct pci_dev *pdev = box->pci_dev;
555 	int box_ctl = uncore_pci_box_ctl(box);
556 
557 	pci_write_config_dword(pdev, box_ctl, SNBEP_PMON_BOX_CTL_INT);
558 }
559 
560 static void snbep_uncore_msr_disable_box(struct intel_uncore_box *box)
561 {
562 	u64 config;
563 	unsigned msr;
564 
565 	msr = uncore_msr_box_ctl(box);
566 	if (msr) {
567 		rdmsrl(msr, config);
568 		config |= SNBEP_PMON_BOX_CTL_FRZ;
569 		wrmsrl(msr, config);
570 	}
571 }
572 
573 static void snbep_uncore_msr_enable_box(struct intel_uncore_box *box)
574 {
575 	u64 config;
576 	unsigned msr;
577 
578 	msr = uncore_msr_box_ctl(box);
579 	if (msr) {
580 		rdmsrl(msr, config);
581 		config &= ~SNBEP_PMON_BOX_CTL_FRZ;
582 		wrmsrl(msr, config);
583 	}
584 }
585 
586 static void snbep_uncore_msr_enable_event(struct intel_uncore_box *box, struct perf_event *event)
587 {
588 	struct hw_perf_event *hwc = &event->hw;
589 	struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
590 
591 	if (reg1->idx != EXTRA_REG_NONE)
592 		wrmsrl(reg1->reg, uncore_shared_reg_config(box, 0));
593 
594 	wrmsrl(hwc->config_base, hwc->config | SNBEP_PMON_CTL_EN);
595 }
596 
597 static void snbep_uncore_msr_disable_event(struct intel_uncore_box *box,
598 					struct perf_event *event)
599 {
600 	struct hw_perf_event *hwc = &event->hw;
601 
602 	wrmsrl(hwc->config_base, hwc->config);
603 }
604 
605 static void snbep_uncore_msr_init_box(struct intel_uncore_box *box)
606 {
607 	unsigned msr = uncore_msr_box_ctl(box);
608 
609 	if (msr)
610 		wrmsrl(msr, SNBEP_PMON_BOX_CTL_INT);
611 }
612 
613 static struct attribute *snbep_uncore_formats_attr[] = {
614 	&format_attr_event.attr,
615 	&format_attr_umask.attr,
616 	&format_attr_edge.attr,
617 	&format_attr_inv.attr,
618 	&format_attr_thresh8.attr,
619 	NULL,
620 };
621 
622 static struct attribute *snbep_uncore_ubox_formats_attr[] = {
623 	&format_attr_event.attr,
624 	&format_attr_umask.attr,
625 	&format_attr_edge.attr,
626 	&format_attr_inv.attr,
627 	&format_attr_thresh5.attr,
628 	NULL,
629 };
630 
631 static struct attribute *snbep_uncore_cbox_formats_attr[] = {
632 	&format_attr_event.attr,
633 	&format_attr_umask.attr,
634 	&format_attr_edge.attr,
635 	&format_attr_tid_en.attr,
636 	&format_attr_inv.attr,
637 	&format_attr_thresh8.attr,
638 	&format_attr_filter_tid.attr,
639 	&format_attr_filter_nid.attr,
640 	&format_attr_filter_state.attr,
641 	&format_attr_filter_opc.attr,
642 	NULL,
643 };
644 
645 static struct attribute *snbep_uncore_pcu_formats_attr[] = {
646 	&format_attr_event.attr,
647 	&format_attr_occ_sel.attr,
648 	&format_attr_edge.attr,
649 	&format_attr_inv.attr,
650 	&format_attr_thresh5.attr,
651 	&format_attr_occ_invert.attr,
652 	&format_attr_occ_edge.attr,
653 	&format_attr_filter_band0.attr,
654 	&format_attr_filter_band1.attr,
655 	&format_attr_filter_band2.attr,
656 	&format_attr_filter_band3.attr,
657 	NULL,
658 };
659 
660 static struct attribute *snbep_uncore_qpi_formats_attr[] = {
661 	&format_attr_event_ext.attr,
662 	&format_attr_umask.attr,
663 	&format_attr_edge.attr,
664 	&format_attr_inv.attr,
665 	&format_attr_thresh8.attr,
666 	&format_attr_match_rds.attr,
667 	&format_attr_match_rnid30.attr,
668 	&format_attr_match_rnid4.attr,
669 	&format_attr_match_dnid.attr,
670 	&format_attr_match_mc.attr,
671 	&format_attr_match_opc.attr,
672 	&format_attr_match_vnw.attr,
673 	&format_attr_match0.attr,
674 	&format_attr_match1.attr,
675 	&format_attr_mask_rds.attr,
676 	&format_attr_mask_rnid30.attr,
677 	&format_attr_mask_rnid4.attr,
678 	&format_attr_mask_dnid.attr,
679 	&format_attr_mask_mc.attr,
680 	&format_attr_mask_opc.attr,
681 	&format_attr_mask_vnw.attr,
682 	&format_attr_mask0.attr,
683 	&format_attr_mask1.attr,
684 	NULL,
685 };
686 
687 static struct uncore_event_desc snbep_uncore_imc_events[] = {
688 	INTEL_UNCORE_EVENT_DESC(clockticks,      "event=0xff,umask=0x00"),
689 	INTEL_UNCORE_EVENT_DESC(cas_count_read,  "event=0x04,umask=0x03"),
690 	INTEL_UNCORE_EVENT_DESC(cas_count_read.scale, "6.103515625e-5"),
691 	INTEL_UNCORE_EVENT_DESC(cas_count_read.unit, "MiB"),
692 	INTEL_UNCORE_EVENT_DESC(cas_count_write, "event=0x04,umask=0x0c"),
693 	INTEL_UNCORE_EVENT_DESC(cas_count_write.scale, "6.103515625e-5"),
694 	INTEL_UNCORE_EVENT_DESC(cas_count_write.unit, "MiB"),
695 	{ /* end: all zeroes */ },
696 };
697 
698 static struct uncore_event_desc snbep_uncore_qpi_events[] = {
699 	INTEL_UNCORE_EVENT_DESC(clockticks,       "event=0x14"),
700 	INTEL_UNCORE_EVENT_DESC(txl_flits_active, "event=0x00,umask=0x06"),
701 	INTEL_UNCORE_EVENT_DESC(drs_data,         "event=0x102,umask=0x08"),
702 	INTEL_UNCORE_EVENT_DESC(ncb_data,         "event=0x103,umask=0x04"),
703 	{ /* end: all zeroes */ },
704 };
705 
706 static const struct attribute_group snbep_uncore_format_group = {
707 	.name = "format",
708 	.attrs = snbep_uncore_formats_attr,
709 };
710 
711 static const struct attribute_group snbep_uncore_ubox_format_group = {
712 	.name = "format",
713 	.attrs = snbep_uncore_ubox_formats_attr,
714 };
715 
716 static const struct attribute_group snbep_uncore_cbox_format_group = {
717 	.name = "format",
718 	.attrs = snbep_uncore_cbox_formats_attr,
719 };
720 
721 static const struct attribute_group snbep_uncore_pcu_format_group = {
722 	.name = "format",
723 	.attrs = snbep_uncore_pcu_formats_attr,
724 };
725 
726 static const struct attribute_group snbep_uncore_qpi_format_group = {
727 	.name = "format",
728 	.attrs = snbep_uncore_qpi_formats_attr,
729 };
730 
731 #define __SNBEP_UNCORE_MSR_OPS_COMMON_INIT()			\
732 	.disable_box	= snbep_uncore_msr_disable_box,		\
733 	.enable_box	= snbep_uncore_msr_enable_box,		\
734 	.disable_event	= snbep_uncore_msr_disable_event,	\
735 	.enable_event	= snbep_uncore_msr_enable_event,	\
736 	.read_counter	= uncore_msr_read_counter
737 
738 #define SNBEP_UNCORE_MSR_OPS_COMMON_INIT()			\
739 	__SNBEP_UNCORE_MSR_OPS_COMMON_INIT(),			\
740 	.init_box	= snbep_uncore_msr_init_box		\
741 
742 static struct intel_uncore_ops snbep_uncore_msr_ops = {
743 	SNBEP_UNCORE_MSR_OPS_COMMON_INIT(),
744 };
745 
746 #define SNBEP_UNCORE_PCI_OPS_COMMON_INIT()			\
747 	.init_box	= snbep_uncore_pci_init_box,		\
748 	.disable_box	= snbep_uncore_pci_disable_box,		\
749 	.enable_box	= snbep_uncore_pci_enable_box,		\
750 	.disable_event	= snbep_uncore_pci_disable_event,	\
751 	.read_counter	= snbep_uncore_pci_read_counter
752 
753 static struct intel_uncore_ops snbep_uncore_pci_ops = {
754 	SNBEP_UNCORE_PCI_OPS_COMMON_INIT(),
755 	.enable_event	= snbep_uncore_pci_enable_event,	\
756 };
757 
758 static struct event_constraint snbep_uncore_cbox_constraints[] = {
759 	UNCORE_EVENT_CONSTRAINT(0x01, 0x1),
760 	UNCORE_EVENT_CONSTRAINT(0x02, 0x3),
761 	UNCORE_EVENT_CONSTRAINT(0x04, 0x3),
762 	UNCORE_EVENT_CONSTRAINT(0x05, 0x3),
763 	UNCORE_EVENT_CONSTRAINT(0x07, 0x3),
764 	UNCORE_EVENT_CONSTRAINT(0x09, 0x3),
765 	UNCORE_EVENT_CONSTRAINT(0x11, 0x1),
766 	UNCORE_EVENT_CONSTRAINT(0x12, 0x3),
767 	UNCORE_EVENT_CONSTRAINT(0x13, 0x3),
768 	UNCORE_EVENT_CONSTRAINT(0x1b, 0xc),
769 	UNCORE_EVENT_CONSTRAINT(0x1c, 0xc),
770 	UNCORE_EVENT_CONSTRAINT(0x1d, 0xc),
771 	UNCORE_EVENT_CONSTRAINT(0x1e, 0xc),
772 	UNCORE_EVENT_CONSTRAINT(0x1f, 0xe),
773 	UNCORE_EVENT_CONSTRAINT(0x21, 0x3),
774 	UNCORE_EVENT_CONSTRAINT(0x23, 0x3),
775 	UNCORE_EVENT_CONSTRAINT(0x31, 0x3),
776 	UNCORE_EVENT_CONSTRAINT(0x32, 0x3),
777 	UNCORE_EVENT_CONSTRAINT(0x33, 0x3),
778 	UNCORE_EVENT_CONSTRAINT(0x34, 0x3),
779 	UNCORE_EVENT_CONSTRAINT(0x35, 0x3),
780 	UNCORE_EVENT_CONSTRAINT(0x36, 0x1),
781 	UNCORE_EVENT_CONSTRAINT(0x37, 0x3),
782 	UNCORE_EVENT_CONSTRAINT(0x38, 0x3),
783 	UNCORE_EVENT_CONSTRAINT(0x39, 0x3),
784 	UNCORE_EVENT_CONSTRAINT(0x3b, 0x1),
785 	EVENT_CONSTRAINT_END
786 };
787 
788 static struct event_constraint snbep_uncore_r2pcie_constraints[] = {
789 	UNCORE_EVENT_CONSTRAINT(0x10, 0x3),
790 	UNCORE_EVENT_CONSTRAINT(0x11, 0x3),
791 	UNCORE_EVENT_CONSTRAINT(0x12, 0x1),
792 	UNCORE_EVENT_CONSTRAINT(0x23, 0x3),
793 	UNCORE_EVENT_CONSTRAINT(0x24, 0x3),
794 	UNCORE_EVENT_CONSTRAINT(0x25, 0x3),
795 	UNCORE_EVENT_CONSTRAINT(0x26, 0x3),
796 	UNCORE_EVENT_CONSTRAINT(0x32, 0x3),
797 	UNCORE_EVENT_CONSTRAINT(0x33, 0x3),
798 	UNCORE_EVENT_CONSTRAINT(0x34, 0x3),
799 	EVENT_CONSTRAINT_END
800 };
801 
802 static struct event_constraint snbep_uncore_r3qpi_constraints[] = {
803 	UNCORE_EVENT_CONSTRAINT(0x10, 0x3),
804 	UNCORE_EVENT_CONSTRAINT(0x11, 0x3),
805 	UNCORE_EVENT_CONSTRAINT(0x12, 0x3),
806 	UNCORE_EVENT_CONSTRAINT(0x13, 0x1),
807 	UNCORE_EVENT_CONSTRAINT(0x20, 0x3),
808 	UNCORE_EVENT_CONSTRAINT(0x21, 0x3),
809 	UNCORE_EVENT_CONSTRAINT(0x22, 0x3),
810 	UNCORE_EVENT_CONSTRAINT(0x23, 0x3),
811 	UNCORE_EVENT_CONSTRAINT(0x24, 0x3),
812 	UNCORE_EVENT_CONSTRAINT(0x25, 0x3),
813 	UNCORE_EVENT_CONSTRAINT(0x26, 0x3),
814 	UNCORE_EVENT_CONSTRAINT(0x28, 0x3),
815 	UNCORE_EVENT_CONSTRAINT(0x29, 0x3),
816 	UNCORE_EVENT_CONSTRAINT(0x2a, 0x3),
817 	UNCORE_EVENT_CONSTRAINT(0x2b, 0x3),
818 	UNCORE_EVENT_CONSTRAINT(0x2c, 0x3),
819 	UNCORE_EVENT_CONSTRAINT(0x2d, 0x3),
820 	UNCORE_EVENT_CONSTRAINT(0x2e, 0x3),
821 	UNCORE_EVENT_CONSTRAINT(0x2f, 0x3),
822 	UNCORE_EVENT_CONSTRAINT(0x30, 0x3),
823 	UNCORE_EVENT_CONSTRAINT(0x31, 0x3),
824 	UNCORE_EVENT_CONSTRAINT(0x32, 0x3),
825 	UNCORE_EVENT_CONSTRAINT(0x33, 0x3),
826 	UNCORE_EVENT_CONSTRAINT(0x34, 0x3),
827 	UNCORE_EVENT_CONSTRAINT(0x36, 0x3),
828 	UNCORE_EVENT_CONSTRAINT(0x37, 0x3),
829 	UNCORE_EVENT_CONSTRAINT(0x38, 0x3),
830 	UNCORE_EVENT_CONSTRAINT(0x39, 0x3),
831 	EVENT_CONSTRAINT_END
832 };
833 
834 static struct intel_uncore_type snbep_uncore_ubox = {
835 	.name		= "ubox",
836 	.num_counters   = 2,
837 	.num_boxes	= 1,
838 	.perf_ctr_bits	= 44,
839 	.fixed_ctr_bits	= 48,
840 	.perf_ctr	= SNBEP_U_MSR_PMON_CTR0,
841 	.event_ctl	= SNBEP_U_MSR_PMON_CTL0,
842 	.event_mask	= SNBEP_U_MSR_PMON_RAW_EVENT_MASK,
843 	.fixed_ctr	= SNBEP_U_MSR_PMON_UCLK_FIXED_CTR,
844 	.fixed_ctl	= SNBEP_U_MSR_PMON_UCLK_FIXED_CTL,
845 	.ops		= &snbep_uncore_msr_ops,
846 	.format_group	= &snbep_uncore_ubox_format_group,
847 };
848 
849 static struct extra_reg snbep_uncore_cbox_extra_regs[] = {
850 	SNBEP_CBO_EVENT_EXTRA_REG(SNBEP_CBO_PMON_CTL_TID_EN,
851 				  SNBEP_CBO_PMON_CTL_TID_EN, 0x1),
852 	SNBEP_CBO_EVENT_EXTRA_REG(0x0334, 0xffff, 0x4),
853 	SNBEP_CBO_EVENT_EXTRA_REG(0x4334, 0xffff, 0x6),
854 	SNBEP_CBO_EVENT_EXTRA_REG(0x0534, 0xffff, 0x4),
855 	SNBEP_CBO_EVENT_EXTRA_REG(0x4534, 0xffff, 0x6),
856 	SNBEP_CBO_EVENT_EXTRA_REG(0x0934, 0xffff, 0x4),
857 	SNBEP_CBO_EVENT_EXTRA_REG(0x4934, 0xffff, 0x6),
858 	SNBEP_CBO_EVENT_EXTRA_REG(0x4134, 0xffff, 0x6),
859 	SNBEP_CBO_EVENT_EXTRA_REG(0x0135, 0xffff, 0x8),
860 	SNBEP_CBO_EVENT_EXTRA_REG(0x0335, 0xffff, 0x8),
861 	SNBEP_CBO_EVENT_EXTRA_REG(0x4135, 0xffff, 0xa),
862 	SNBEP_CBO_EVENT_EXTRA_REG(0x4335, 0xffff, 0xa),
863 	SNBEP_CBO_EVENT_EXTRA_REG(0x4435, 0xffff, 0x2),
864 	SNBEP_CBO_EVENT_EXTRA_REG(0x4835, 0xffff, 0x2),
865 	SNBEP_CBO_EVENT_EXTRA_REG(0x4a35, 0xffff, 0x2),
866 	SNBEP_CBO_EVENT_EXTRA_REG(0x5035, 0xffff, 0x2),
867 	SNBEP_CBO_EVENT_EXTRA_REG(0x0136, 0xffff, 0x8),
868 	SNBEP_CBO_EVENT_EXTRA_REG(0x0336, 0xffff, 0x8),
869 	SNBEP_CBO_EVENT_EXTRA_REG(0x4136, 0xffff, 0xa),
870 	SNBEP_CBO_EVENT_EXTRA_REG(0x4336, 0xffff, 0xa),
871 	SNBEP_CBO_EVENT_EXTRA_REG(0x4436, 0xffff, 0x2),
872 	SNBEP_CBO_EVENT_EXTRA_REG(0x4836, 0xffff, 0x2),
873 	SNBEP_CBO_EVENT_EXTRA_REG(0x4a36, 0xffff, 0x2),
874 	SNBEP_CBO_EVENT_EXTRA_REG(0x4037, 0x40ff, 0x2),
875 	EVENT_EXTRA_END
876 };
877 
878 static void snbep_cbox_put_constraint(struct intel_uncore_box *box, struct perf_event *event)
879 {
880 	struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
881 	struct intel_uncore_extra_reg *er = &box->shared_regs[0];
882 	int i;
883 
884 	if (uncore_box_is_fake(box))
885 		return;
886 
887 	for (i = 0; i < 5; i++) {
888 		if (reg1->alloc & (0x1 << i))
889 			atomic_sub(1 << (i * 6), &er->ref);
890 	}
891 	reg1->alloc = 0;
892 }
893 
894 static struct event_constraint *
895 __snbep_cbox_get_constraint(struct intel_uncore_box *box, struct perf_event *event,
896 			    u64 (*cbox_filter_mask)(int fields))
897 {
898 	struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
899 	struct intel_uncore_extra_reg *er = &box->shared_regs[0];
900 	int i, alloc = 0;
901 	unsigned long flags;
902 	u64 mask;
903 
904 	if (reg1->idx == EXTRA_REG_NONE)
905 		return NULL;
906 
907 	raw_spin_lock_irqsave(&er->lock, flags);
908 	for (i = 0; i < 5; i++) {
909 		if (!(reg1->idx & (0x1 << i)))
910 			continue;
911 		if (!uncore_box_is_fake(box) && (reg1->alloc & (0x1 << i)))
912 			continue;
913 
914 		mask = cbox_filter_mask(0x1 << i);
915 		if (!__BITS_VALUE(atomic_read(&er->ref), i, 6) ||
916 		    !((reg1->config ^ er->config) & mask)) {
917 			atomic_add(1 << (i * 6), &er->ref);
918 			er->config &= ~mask;
919 			er->config |= reg1->config & mask;
920 			alloc |= (0x1 << i);
921 		} else {
922 			break;
923 		}
924 	}
925 	raw_spin_unlock_irqrestore(&er->lock, flags);
926 	if (i < 5)
927 		goto fail;
928 
929 	if (!uncore_box_is_fake(box))
930 		reg1->alloc |= alloc;
931 
932 	return NULL;
933 fail:
934 	for (; i >= 0; i--) {
935 		if (alloc & (0x1 << i))
936 			atomic_sub(1 << (i * 6), &er->ref);
937 	}
938 	return &uncore_constraint_empty;
939 }
940 
941 static u64 snbep_cbox_filter_mask(int fields)
942 {
943 	u64 mask = 0;
944 
945 	if (fields & 0x1)
946 		mask |= SNBEP_CB0_MSR_PMON_BOX_FILTER_TID;
947 	if (fields & 0x2)
948 		mask |= SNBEP_CB0_MSR_PMON_BOX_FILTER_NID;
949 	if (fields & 0x4)
950 		mask |= SNBEP_CB0_MSR_PMON_BOX_FILTER_STATE;
951 	if (fields & 0x8)
952 		mask |= SNBEP_CB0_MSR_PMON_BOX_FILTER_OPC;
953 
954 	return mask;
955 }
956 
957 static struct event_constraint *
958 snbep_cbox_get_constraint(struct intel_uncore_box *box, struct perf_event *event)
959 {
960 	return __snbep_cbox_get_constraint(box, event, snbep_cbox_filter_mask);
961 }
962 
963 static int snbep_cbox_hw_config(struct intel_uncore_box *box, struct perf_event *event)
964 {
965 	struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
966 	struct extra_reg *er;
967 	int idx = 0;
968 
969 	for (er = snbep_uncore_cbox_extra_regs; er->msr; er++) {
970 		if (er->event != (event->hw.config & er->config_mask))
971 			continue;
972 		idx |= er->idx;
973 	}
974 
975 	if (idx) {
976 		reg1->reg = SNBEP_C0_MSR_PMON_BOX_FILTER +
977 			SNBEP_CBO_MSR_OFFSET * box->pmu->pmu_idx;
978 		reg1->config = event->attr.config1 & snbep_cbox_filter_mask(idx);
979 		reg1->idx = idx;
980 	}
981 	return 0;
982 }
983 
984 static struct intel_uncore_ops snbep_uncore_cbox_ops = {
985 	SNBEP_UNCORE_MSR_OPS_COMMON_INIT(),
986 	.hw_config		= snbep_cbox_hw_config,
987 	.get_constraint		= snbep_cbox_get_constraint,
988 	.put_constraint		= snbep_cbox_put_constraint,
989 };
990 
991 static struct intel_uncore_type snbep_uncore_cbox = {
992 	.name			= "cbox",
993 	.num_counters		= 4,
994 	.num_boxes		= 8,
995 	.perf_ctr_bits		= 44,
996 	.event_ctl		= SNBEP_C0_MSR_PMON_CTL0,
997 	.perf_ctr		= SNBEP_C0_MSR_PMON_CTR0,
998 	.event_mask		= SNBEP_CBO_MSR_PMON_RAW_EVENT_MASK,
999 	.box_ctl		= SNBEP_C0_MSR_PMON_BOX_CTL,
1000 	.msr_offset		= SNBEP_CBO_MSR_OFFSET,
1001 	.num_shared_regs	= 1,
1002 	.constraints		= snbep_uncore_cbox_constraints,
1003 	.ops			= &snbep_uncore_cbox_ops,
1004 	.format_group		= &snbep_uncore_cbox_format_group,
1005 };
1006 
1007 static u64 snbep_pcu_alter_er(struct perf_event *event, int new_idx, bool modify)
1008 {
1009 	struct hw_perf_event *hwc = &event->hw;
1010 	struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
1011 	u64 config = reg1->config;
1012 
1013 	if (new_idx > reg1->idx)
1014 		config <<= 8 * (new_idx - reg1->idx);
1015 	else
1016 		config >>= 8 * (reg1->idx - new_idx);
1017 
1018 	if (modify) {
1019 		hwc->config += new_idx - reg1->idx;
1020 		reg1->config = config;
1021 		reg1->idx = new_idx;
1022 	}
1023 	return config;
1024 }
1025 
1026 static struct event_constraint *
1027 snbep_pcu_get_constraint(struct intel_uncore_box *box, struct perf_event *event)
1028 {
1029 	struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
1030 	struct intel_uncore_extra_reg *er = &box->shared_regs[0];
1031 	unsigned long flags;
1032 	int idx = reg1->idx;
1033 	u64 mask, config1 = reg1->config;
1034 	bool ok = false;
1035 
1036 	if (reg1->idx == EXTRA_REG_NONE ||
1037 	    (!uncore_box_is_fake(box) && reg1->alloc))
1038 		return NULL;
1039 again:
1040 	mask = 0xffULL << (idx * 8);
1041 	raw_spin_lock_irqsave(&er->lock, flags);
1042 	if (!__BITS_VALUE(atomic_read(&er->ref), idx, 8) ||
1043 	    !((config1 ^ er->config) & mask)) {
1044 		atomic_add(1 << (idx * 8), &er->ref);
1045 		er->config &= ~mask;
1046 		er->config |= config1 & mask;
1047 		ok = true;
1048 	}
1049 	raw_spin_unlock_irqrestore(&er->lock, flags);
1050 
1051 	if (!ok) {
1052 		idx = (idx + 1) % 4;
1053 		if (idx != reg1->idx) {
1054 			config1 = snbep_pcu_alter_er(event, idx, false);
1055 			goto again;
1056 		}
1057 		return &uncore_constraint_empty;
1058 	}
1059 
1060 	if (!uncore_box_is_fake(box)) {
1061 		if (idx != reg1->idx)
1062 			snbep_pcu_alter_er(event, idx, true);
1063 		reg1->alloc = 1;
1064 	}
1065 	return NULL;
1066 }
1067 
1068 static void snbep_pcu_put_constraint(struct intel_uncore_box *box, struct perf_event *event)
1069 {
1070 	struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
1071 	struct intel_uncore_extra_reg *er = &box->shared_regs[0];
1072 
1073 	if (uncore_box_is_fake(box) || !reg1->alloc)
1074 		return;
1075 
1076 	atomic_sub(1 << (reg1->idx * 8), &er->ref);
1077 	reg1->alloc = 0;
1078 }
1079 
1080 static int snbep_pcu_hw_config(struct intel_uncore_box *box, struct perf_event *event)
1081 {
1082 	struct hw_perf_event *hwc = &event->hw;
1083 	struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
1084 	int ev_sel = hwc->config & SNBEP_PMON_CTL_EV_SEL_MASK;
1085 
1086 	if (ev_sel >= 0xb && ev_sel <= 0xe) {
1087 		reg1->reg = SNBEP_PCU_MSR_PMON_BOX_FILTER;
1088 		reg1->idx = ev_sel - 0xb;
1089 		reg1->config = event->attr.config1 & (0xff << (reg1->idx * 8));
1090 	}
1091 	return 0;
1092 }
1093 
1094 static struct intel_uncore_ops snbep_uncore_pcu_ops = {
1095 	SNBEP_UNCORE_MSR_OPS_COMMON_INIT(),
1096 	.hw_config		= snbep_pcu_hw_config,
1097 	.get_constraint		= snbep_pcu_get_constraint,
1098 	.put_constraint		= snbep_pcu_put_constraint,
1099 };
1100 
1101 static struct intel_uncore_type snbep_uncore_pcu = {
1102 	.name			= "pcu",
1103 	.num_counters		= 4,
1104 	.num_boxes		= 1,
1105 	.perf_ctr_bits		= 48,
1106 	.perf_ctr		= SNBEP_PCU_MSR_PMON_CTR0,
1107 	.event_ctl		= SNBEP_PCU_MSR_PMON_CTL0,
1108 	.event_mask		= SNBEP_PCU_MSR_PMON_RAW_EVENT_MASK,
1109 	.box_ctl		= SNBEP_PCU_MSR_PMON_BOX_CTL,
1110 	.num_shared_regs	= 1,
1111 	.ops			= &snbep_uncore_pcu_ops,
1112 	.format_group		= &snbep_uncore_pcu_format_group,
1113 };
1114 
1115 static struct intel_uncore_type *snbep_msr_uncores[] = {
1116 	&snbep_uncore_ubox,
1117 	&snbep_uncore_cbox,
1118 	&snbep_uncore_pcu,
1119 	NULL,
1120 };
1121 
1122 void snbep_uncore_cpu_init(void)
1123 {
1124 	if (snbep_uncore_cbox.num_boxes > boot_cpu_data.x86_max_cores)
1125 		snbep_uncore_cbox.num_boxes = boot_cpu_data.x86_max_cores;
1126 	uncore_msr_uncores = snbep_msr_uncores;
1127 }
1128 
1129 enum {
1130 	SNBEP_PCI_QPI_PORT0_FILTER,
1131 	SNBEP_PCI_QPI_PORT1_FILTER,
1132 	BDX_PCI_QPI_PORT2_FILTER,
1133 	HSWEP_PCI_PCU_3,
1134 };
1135 
1136 static int snbep_qpi_hw_config(struct intel_uncore_box *box, struct perf_event *event)
1137 {
1138 	struct hw_perf_event *hwc = &event->hw;
1139 	struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
1140 	struct hw_perf_event_extra *reg2 = &hwc->branch_reg;
1141 
1142 	if ((hwc->config & SNBEP_PMON_CTL_EV_SEL_MASK) == 0x38) {
1143 		reg1->idx = 0;
1144 		reg1->reg = SNBEP_Q_Py_PCI_PMON_PKT_MATCH0;
1145 		reg1->config = event->attr.config1;
1146 		reg2->reg = SNBEP_Q_Py_PCI_PMON_PKT_MASK0;
1147 		reg2->config = event->attr.config2;
1148 	}
1149 	return 0;
1150 }
1151 
1152 static void snbep_qpi_enable_event(struct intel_uncore_box *box, struct perf_event *event)
1153 {
1154 	struct pci_dev *pdev = box->pci_dev;
1155 	struct hw_perf_event *hwc = &event->hw;
1156 	struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
1157 	struct hw_perf_event_extra *reg2 = &hwc->branch_reg;
1158 
1159 	if (reg1->idx != EXTRA_REG_NONE) {
1160 		int idx = box->pmu->pmu_idx + SNBEP_PCI_QPI_PORT0_FILTER;
1161 		int die = box->dieid;
1162 		struct pci_dev *filter_pdev = uncore_extra_pci_dev[die].dev[idx];
1163 
1164 		if (filter_pdev) {
1165 			pci_write_config_dword(filter_pdev, reg1->reg,
1166 						(u32)reg1->config);
1167 			pci_write_config_dword(filter_pdev, reg1->reg + 4,
1168 						(u32)(reg1->config >> 32));
1169 			pci_write_config_dword(filter_pdev, reg2->reg,
1170 						(u32)reg2->config);
1171 			pci_write_config_dword(filter_pdev, reg2->reg + 4,
1172 						(u32)(reg2->config >> 32));
1173 		}
1174 	}
1175 
1176 	pci_write_config_dword(pdev, hwc->config_base, hwc->config | SNBEP_PMON_CTL_EN);
1177 }
1178 
1179 static struct intel_uncore_ops snbep_uncore_qpi_ops = {
1180 	SNBEP_UNCORE_PCI_OPS_COMMON_INIT(),
1181 	.enable_event		= snbep_qpi_enable_event,
1182 	.hw_config		= snbep_qpi_hw_config,
1183 	.get_constraint		= uncore_get_constraint,
1184 	.put_constraint		= uncore_put_constraint,
1185 };
1186 
1187 #define SNBEP_UNCORE_PCI_COMMON_INIT()				\
1188 	.perf_ctr	= SNBEP_PCI_PMON_CTR0,			\
1189 	.event_ctl	= SNBEP_PCI_PMON_CTL0,			\
1190 	.event_mask	= SNBEP_PMON_RAW_EVENT_MASK,		\
1191 	.box_ctl	= SNBEP_PCI_PMON_BOX_CTL,		\
1192 	.ops		= &snbep_uncore_pci_ops,		\
1193 	.format_group	= &snbep_uncore_format_group
1194 
1195 static struct intel_uncore_type snbep_uncore_ha = {
1196 	.name		= "ha",
1197 	.num_counters   = 4,
1198 	.num_boxes	= 1,
1199 	.perf_ctr_bits	= 48,
1200 	SNBEP_UNCORE_PCI_COMMON_INIT(),
1201 };
1202 
1203 static struct intel_uncore_type snbep_uncore_imc = {
1204 	.name		= "imc",
1205 	.num_counters   = 4,
1206 	.num_boxes	= 4,
1207 	.perf_ctr_bits	= 48,
1208 	.fixed_ctr_bits	= 48,
1209 	.fixed_ctr	= SNBEP_MC_CHy_PCI_PMON_FIXED_CTR,
1210 	.fixed_ctl	= SNBEP_MC_CHy_PCI_PMON_FIXED_CTL,
1211 	.event_descs	= snbep_uncore_imc_events,
1212 	SNBEP_UNCORE_PCI_COMMON_INIT(),
1213 };
1214 
1215 static struct intel_uncore_type snbep_uncore_qpi = {
1216 	.name			= "qpi",
1217 	.num_counters		= 4,
1218 	.num_boxes		= 2,
1219 	.perf_ctr_bits		= 48,
1220 	.perf_ctr		= SNBEP_PCI_PMON_CTR0,
1221 	.event_ctl		= SNBEP_PCI_PMON_CTL0,
1222 	.event_mask		= SNBEP_QPI_PCI_PMON_RAW_EVENT_MASK,
1223 	.box_ctl		= SNBEP_PCI_PMON_BOX_CTL,
1224 	.num_shared_regs	= 1,
1225 	.ops			= &snbep_uncore_qpi_ops,
1226 	.event_descs		= snbep_uncore_qpi_events,
1227 	.format_group		= &snbep_uncore_qpi_format_group,
1228 };
1229 
1230 
1231 static struct intel_uncore_type snbep_uncore_r2pcie = {
1232 	.name		= "r2pcie",
1233 	.num_counters   = 4,
1234 	.num_boxes	= 1,
1235 	.perf_ctr_bits	= 44,
1236 	.constraints	= snbep_uncore_r2pcie_constraints,
1237 	SNBEP_UNCORE_PCI_COMMON_INIT(),
1238 };
1239 
1240 static struct intel_uncore_type snbep_uncore_r3qpi = {
1241 	.name		= "r3qpi",
1242 	.num_counters   = 3,
1243 	.num_boxes	= 2,
1244 	.perf_ctr_bits	= 44,
1245 	.constraints	= snbep_uncore_r3qpi_constraints,
1246 	SNBEP_UNCORE_PCI_COMMON_INIT(),
1247 };
1248 
1249 enum {
1250 	SNBEP_PCI_UNCORE_HA,
1251 	SNBEP_PCI_UNCORE_IMC,
1252 	SNBEP_PCI_UNCORE_QPI,
1253 	SNBEP_PCI_UNCORE_R2PCIE,
1254 	SNBEP_PCI_UNCORE_R3QPI,
1255 };
1256 
1257 static struct intel_uncore_type *snbep_pci_uncores[] = {
1258 	[SNBEP_PCI_UNCORE_HA]		= &snbep_uncore_ha,
1259 	[SNBEP_PCI_UNCORE_IMC]		= &snbep_uncore_imc,
1260 	[SNBEP_PCI_UNCORE_QPI]		= &snbep_uncore_qpi,
1261 	[SNBEP_PCI_UNCORE_R2PCIE]	= &snbep_uncore_r2pcie,
1262 	[SNBEP_PCI_UNCORE_R3QPI]	= &snbep_uncore_r3qpi,
1263 	NULL,
1264 };
1265 
1266 static const struct pci_device_id snbep_uncore_pci_ids[] = {
1267 	{ /* Home Agent */
1268 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_HA),
1269 		.driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_HA, 0),
1270 	},
1271 	{ /* MC Channel 0 */
1272 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_IMC0),
1273 		.driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_IMC, 0),
1274 	},
1275 	{ /* MC Channel 1 */
1276 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_IMC1),
1277 		.driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_IMC, 1),
1278 	},
1279 	{ /* MC Channel 2 */
1280 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_IMC2),
1281 		.driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_IMC, 2),
1282 	},
1283 	{ /* MC Channel 3 */
1284 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_IMC3),
1285 		.driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_IMC, 3),
1286 	},
1287 	{ /* QPI Port 0 */
1288 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_QPI0),
1289 		.driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_QPI, 0),
1290 	},
1291 	{ /* QPI Port 1 */
1292 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_QPI1),
1293 		.driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_QPI, 1),
1294 	},
1295 	{ /* R2PCIe */
1296 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_R2PCIE),
1297 		.driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_R2PCIE, 0),
1298 	},
1299 	{ /* R3QPI Link 0 */
1300 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_R3QPI0),
1301 		.driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_R3QPI, 0),
1302 	},
1303 	{ /* R3QPI Link 1 */
1304 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_R3QPI1),
1305 		.driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_R3QPI, 1),
1306 	},
1307 	{ /* QPI Port 0 filter  */
1308 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x3c86),
1309 		.driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV,
1310 						   SNBEP_PCI_QPI_PORT0_FILTER),
1311 	},
1312 	{ /* QPI Port 0 filter  */
1313 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x3c96),
1314 		.driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV,
1315 						   SNBEP_PCI_QPI_PORT1_FILTER),
1316 	},
1317 	{ /* end: all zeroes */ }
1318 };
1319 
1320 static struct pci_driver snbep_uncore_pci_driver = {
1321 	.name		= "snbep_uncore",
1322 	.id_table	= snbep_uncore_pci_ids,
1323 };
1324 
1325 #define NODE_ID_MASK	0x7
1326 
1327 /*
1328  * build pci bus to socket mapping
1329  */
1330 static int snbep_pci2phy_map_init(int devid, int nodeid_loc, int idmap_loc, bool reverse)
1331 {
1332 	struct pci_dev *ubox_dev = NULL;
1333 	int i, bus, nodeid, segment;
1334 	struct pci2phy_map *map;
1335 	int err = 0;
1336 	u32 config = 0;
1337 
1338 	while (1) {
1339 		/* find the UBOX device */
1340 		ubox_dev = pci_get_device(PCI_VENDOR_ID_INTEL, devid, ubox_dev);
1341 		if (!ubox_dev)
1342 			break;
1343 		bus = ubox_dev->bus->number;
1344 		/* get the Node ID of the local register */
1345 		err = pci_read_config_dword(ubox_dev, nodeid_loc, &config);
1346 		if (err)
1347 			break;
1348 		nodeid = config & NODE_ID_MASK;
1349 		/* get the Node ID mapping */
1350 		err = pci_read_config_dword(ubox_dev, idmap_loc, &config);
1351 		if (err)
1352 			break;
1353 
1354 		segment = pci_domain_nr(ubox_dev->bus);
1355 		raw_spin_lock(&pci2phy_map_lock);
1356 		map = __find_pci2phy_map(segment);
1357 		if (!map) {
1358 			raw_spin_unlock(&pci2phy_map_lock);
1359 			err = -ENOMEM;
1360 			break;
1361 		}
1362 
1363 		/*
1364 		 * every three bits in the Node ID mapping register maps
1365 		 * to a particular node.
1366 		 */
1367 		for (i = 0; i < 8; i++) {
1368 			if (nodeid == ((config >> (3 * i)) & 0x7)) {
1369 				map->pbus_to_physid[bus] = i;
1370 				break;
1371 			}
1372 		}
1373 		raw_spin_unlock(&pci2phy_map_lock);
1374 	}
1375 
1376 	if (!err) {
1377 		/*
1378 		 * For PCI bus with no UBOX device, find the next bus
1379 		 * that has UBOX device and use its mapping.
1380 		 */
1381 		raw_spin_lock(&pci2phy_map_lock);
1382 		list_for_each_entry(map, &pci2phy_map_head, list) {
1383 			i = -1;
1384 			if (reverse) {
1385 				for (bus = 255; bus >= 0; bus--) {
1386 					if (map->pbus_to_physid[bus] >= 0)
1387 						i = map->pbus_to_physid[bus];
1388 					else
1389 						map->pbus_to_physid[bus] = i;
1390 				}
1391 			} else {
1392 				for (bus = 0; bus <= 255; bus++) {
1393 					if (map->pbus_to_physid[bus] >= 0)
1394 						i = map->pbus_to_physid[bus];
1395 					else
1396 						map->pbus_to_physid[bus] = i;
1397 				}
1398 			}
1399 		}
1400 		raw_spin_unlock(&pci2phy_map_lock);
1401 	}
1402 
1403 	pci_dev_put(ubox_dev);
1404 
1405 	return err ? pcibios_err_to_errno(err) : 0;
1406 }
1407 
1408 int snbep_uncore_pci_init(void)
1409 {
1410 	int ret = snbep_pci2phy_map_init(0x3ce0, SNBEP_CPUNODEID, SNBEP_GIDNIDMAP, true);
1411 	if (ret)
1412 		return ret;
1413 	uncore_pci_uncores = snbep_pci_uncores;
1414 	uncore_pci_driver = &snbep_uncore_pci_driver;
1415 	return 0;
1416 }
1417 /* end of Sandy Bridge-EP uncore support */
1418 
1419 /* IvyTown uncore support */
1420 static void ivbep_uncore_msr_init_box(struct intel_uncore_box *box)
1421 {
1422 	unsigned msr = uncore_msr_box_ctl(box);
1423 	if (msr)
1424 		wrmsrl(msr, IVBEP_PMON_BOX_CTL_INT);
1425 }
1426 
1427 static void ivbep_uncore_pci_init_box(struct intel_uncore_box *box)
1428 {
1429 	struct pci_dev *pdev = box->pci_dev;
1430 
1431 	pci_write_config_dword(pdev, SNBEP_PCI_PMON_BOX_CTL, IVBEP_PMON_BOX_CTL_INT);
1432 }
1433 
1434 #define IVBEP_UNCORE_MSR_OPS_COMMON_INIT()			\
1435 	.init_box	= ivbep_uncore_msr_init_box,		\
1436 	.disable_box	= snbep_uncore_msr_disable_box,		\
1437 	.enable_box	= snbep_uncore_msr_enable_box,		\
1438 	.disable_event	= snbep_uncore_msr_disable_event,	\
1439 	.enable_event	= snbep_uncore_msr_enable_event,	\
1440 	.read_counter	= uncore_msr_read_counter
1441 
1442 static struct intel_uncore_ops ivbep_uncore_msr_ops = {
1443 	IVBEP_UNCORE_MSR_OPS_COMMON_INIT(),
1444 };
1445 
1446 static struct intel_uncore_ops ivbep_uncore_pci_ops = {
1447 	.init_box	= ivbep_uncore_pci_init_box,
1448 	.disable_box	= snbep_uncore_pci_disable_box,
1449 	.enable_box	= snbep_uncore_pci_enable_box,
1450 	.disable_event	= snbep_uncore_pci_disable_event,
1451 	.enable_event	= snbep_uncore_pci_enable_event,
1452 	.read_counter	= snbep_uncore_pci_read_counter,
1453 };
1454 
1455 #define IVBEP_UNCORE_PCI_COMMON_INIT()				\
1456 	.perf_ctr	= SNBEP_PCI_PMON_CTR0,			\
1457 	.event_ctl	= SNBEP_PCI_PMON_CTL0,			\
1458 	.event_mask	= IVBEP_PMON_RAW_EVENT_MASK,		\
1459 	.box_ctl	= SNBEP_PCI_PMON_BOX_CTL,		\
1460 	.ops		= &ivbep_uncore_pci_ops,			\
1461 	.format_group	= &ivbep_uncore_format_group
1462 
1463 static struct attribute *ivbep_uncore_formats_attr[] = {
1464 	&format_attr_event.attr,
1465 	&format_attr_umask.attr,
1466 	&format_attr_edge.attr,
1467 	&format_attr_inv.attr,
1468 	&format_attr_thresh8.attr,
1469 	NULL,
1470 };
1471 
1472 static struct attribute *ivbep_uncore_ubox_formats_attr[] = {
1473 	&format_attr_event.attr,
1474 	&format_attr_umask.attr,
1475 	&format_attr_edge.attr,
1476 	&format_attr_inv.attr,
1477 	&format_attr_thresh5.attr,
1478 	NULL,
1479 };
1480 
1481 static struct attribute *ivbep_uncore_cbox_formats_attr[] = {
1482 	&format_attr_event.attr,
1483 	&format_attr_umask.attr,
1484 	&format_attr_edge.attr,
1485 	&format_attr_tid_en.attr,
1486 	&format_attr_thresh8.attr,
1487 	&format_attr_filter_tid.attr,
1488 	&format_attr_filter_link.attr,
1489 	&format_attr_filter_state2.attr,
1490 	&format_attr_filter_nid2.attr,
1491 	&format_attr_filter_opc2.attr,
1492 	&format_attr_filter_nc.attr,
1493 	&format_attr_filter_c6.attr,
1494 	&format_attr_filter_isoc.attr,
1495 	NULL,
1496 };
1497 
1498 static struct attribute *ivbep_uncore_pcu_formats_attr[] = {
1499 	&format_attr_event.attr,
1500 	&format_attr_occ_sel.attr,
1501 	&format_attr_edge.attr,
1502 	&format_attr_thresh5.attr,
1503 	&format_attr_occ_invert.attr,
1504 	&format_attr_occ_edge.attr,
1505 	&format_attr_filter_band0.attr,
1506 	&format_attr_filter_band1.attr,
1507 	&format_attr_filter_band2.attr,
1508 	&format_attr_filter_band3.attr,
1509 	NULL,
1510 };
1511 
1512 static struct attribute *ivbep_uncore_qpi_formats_attr[] = {
1513 	&format_attr_event_ext.attr,
1514 	&format_attr_umask.attr,
1515 	&format_attr_edge.attr,
1516 	&format_attr_thresh8.attr,
1517 	&format_attr_match_rds.attr,
1518 	&format_attr_match_rnid30.attr,
1519 	&format_attr_match_rnid4.attr,
1520 	&format_attr_match_dnid.attr,
1521 	&format_attr_match_mc.attr,
1522 	&format_attr_match_opc.attr,
1523 	&format_attr_match_vnw.attr,
1524 	&format_attr_match0.attr,
1525 	&format_attr_match1.attr,
1526 	&format_attr_mask_rds.attr,
1527 	&format_attr_mask_rnid30.attr,
1528 	&format_attr_mask_rnid4.attr,
1529 	&format_attr_mask_dnid.attr,
1530 	&format_attr_mask_mc.attr,
1531 	&format_attr_mask_opc.attr,
1532 	&format_attr_mask_vnw.attr,
1533 	&format_attr_mask0.attr,
1534 	&format_attr_mask1.attr,
1535 	NULL,
1536 };
1537 
1538 static const struct attribute_group ivbep_uncore_format_group = {
1539 	.name = "format",
1540 	.attrs = ivbep_uncore_formats_attr,
1541 };
1542 
1543 static const struct attribute_group ivbep_uncore_ubox_format_group = {
1544 	.name = "format",
1545 	.attrs = ivbep_uncore_ubox_formats_attr,
1546 };
1547 
1548 static const struct attribute_group ivbep_uncore_cbox_format_group = {
1549 	.name = "format",
1550 	.attrs = ivbep_uncore_cbox_formats_attr,
1551 };
1552 
1553 static const struct attribute_group ivbep_uncore_pcu_format_group = {
1554 	.name = "format",
1555 	.attrs = ivbep_uncore_pcu_formats_attr,
1556 };
1557 
1558 static const struct attribute_group ivbep_uncore_qpi_format_group = {
1559 	.name = "format",
1560 	.attrs = ivbep_uncore_qpi_formats_attr,
1561 };
1562 
1563 static struct intel_uncore_type ivbep_uncore_ubox = {
1564 	.name		= "ubox",
1565 	.num_counters   = 2,
1566 	.num_boxes	= 1,
1567 	.perf_ctr_bits	= 44,
1568 	.fixed_ctr_bits	= 48,
1569 	.perf_ctr	= SNBEP_U_MSR_PMON_CTR0,
1570 	.event_ctl	= SNBEP_U_MSR_PMON_CTL0,
1571 	.event_mask	= IVBEP_U_MSR_PMON_RAW_EVENT_MASK,
1572 	.fixed_ctr	= SNBEP_U_MSR_PMON_UCLK_FIXED_CTR,
1573 	.fixed_ctl	= SNBEP_U_MSR_PMON_UCLK_FIXED_CTL,
1574 	.ops		= &ivbep_uncore_msr_ops,
1575 	.format_group	= &ivbep_uncore_ubox_format_group,
1576 };
1577 
1578 static struct extra_reg ivbep_uncore_cbox_extra_regs[] = {
1579 	SNBEP_CBO_EVENT_EXTRA_REG(SNBEP_CBO_PMON_CTL_TID_EN,
1580 				  SNBEP_CBO_PMON_CTL_TID_EN, 0x1),
1581 	SNBEP_CBO_EVENT_EXTRA_REG(0x1031, 0x10ff, 0x2),
1582 	SNBEP_CBO_EVENT_EXTRA_REG(0x1134, 0xffff, 0x4),
1583 	SNBEP_CBO_EVENT_EXTRA_REG(0x4134, 0xffff, 0xc),
1584 	SNBEP_CBO_EVENT_EXTRA_REG(0x5134, 0xffff, 0xc),
1585 	SNBEP_CBO_EVENT_EXTRA_REG(0x0334, 0xffff, 0x4),
1586 	SNBEP_CBO_EVENT_EXTRA_REG(0x4334, 0xffff, 0xc),
1587 	SNBEP_CBO_EVENT_EXTRA_REG(0x0534, 0xffff, 0x4),
1588 	SNBEP_CBO_EVENT_EXTRA_REG(0x4534, 0xffff, 0xc),
1589 	SNBEP_CBO_EVENT_EXTRA_REG(0x0934, 0xffff, 0x4),
1590 	SNBEP_CBO_EVENT_EXTRA_REG(0x4934, 0xffff, 0xc),
1591 	SNBEP_CBO_EVENT_EXTRA_REG(0x0135, 0xffff, 0x10),
1592 	SNBEP_CBO_EVENT_EXTRA_REG(0x0335, 0xffff, 0x10),
1593 	SNBEP_CBO_EVENT_EXTRA_REG(0x2135, 0xffff, 0x10),
1594 	SNBEP_CBO_EVENT_EXTRA_REG(0x2335, 0xffff, 0x10),
1595 	SNBEP_CBO_EVENT_EXTRA_REG(0x4135, 0xffff, 0x18),
1596 	SNBEP_CBO_EVENT_EXTRA_REG(0x4335, 0xffff, 0x18),
1597 	SNBEP_CBO_EVENT_EXTRA_REG(0x4435, 0xffff, 0x8),
1598 	SNBEP_CBO_EVENT_EXTRA_REG(0x4835, 0xffff, 0x8),
1599 	SNBEP_CBO_EVENT_EXTRA_REG(0x4a35, 0xffff, 0x8),
1600 	SNBEP_CBO_EVENT_EXTRA_REG(0x5035, 0xffff, 0x8),
1601 	SNBEP_CBO_EVENT_EXTRA_REG(0x8135, 0xffff, 0x10),
1602 	SNBEP_CBO_EVENT_EXTRA_REG(0x8335, 0xffff, 0x10),
1603 	SNBEP_CBO_EVENT_EXTRA_REG(0x0136, 0xffff, 0x10),
1604 	SNBEP_CBO_EVENT_EXTRA_REG(0x0336, 0xffff, 0x10),
1605 	SNBEP_CBO_EVENT_EXTRA_REG(0x2136, 0xffff, 0x10),
1606 	SNBEP_CBO_EVENT_EXTRA_REG(0x2336, 0xffff, 0x10),
1607 	SNBEP_CBO_EVENT_EXTRA_REG(0x4136, 0xffff, 0x18),
1608 	SNBEP_CBO_EVENT_EXTRA_REG(0x4336, 0xffff, 0x18),
1609 	SNBEP_CBO_EVENT_EXTRA_REG(0x4436, 0xffff, 0x8),
1610 	SNBEP_CBO_EVENT_EXTRA_REG(0x4836, 0xffff, 0x8),
1611 	SNBEP_CBO_EVENT_EXTRA_REG(0x4a36, 0xffff, 0x8),
1612 	SNBEP_CBO_EVENT_EXTRA_REG(0x5036, 0xffff, 0x8),
1613 	SNBEP_CBO_EVENT_EXTRA_REG(0x8136, 0xffff, 0x10),
1614 	SNBEP_CBO_EVENT_EXTRA_REG(0x8336, 0xffff, 0x10),
1615 	SNBEP_CBO_EVENT_EXTRA_REG(0x4037, 0x40ff, 0x8),
1616 	EVENT_EXTRA_END
1617 };
1618 
1619 static u64 ivbep_cbox_filter_mask(int fields)
1620 {
1621 	u64 mask = 0;
1622 
1623 	if (fields & 0x1)
1624 		mask |= IVBEP_CB0_MSR_PMON_BOX_FILTER_TID;
1625 	if (fields & 0x2)
1626 		mask |= IVBEP_CB0_MSR_PMON_BOX_FILTER_LINK;
1627 	if (fields & 0x4)
1628 		mask |= IVBEP_CB0_MSR_PMON_BOX_FILTER_STATE;
1629 	if (fields & 0x8)
1630 		mask |= IVBEP_CB0_MSR_PMON_BOX_FILTER_NID;
1631 	if (fields & 0x10) {
1632 		mask |= IVBEP_CB0_MSR_PMON_BOX_FILTER_OPC;
1633 		mask |= IVBEP_CB0_MSR_PMON_BOX_FILTER_NC;
1634 		mask |= IVBEP_CB0_MSR_PMON_BOX_FILTER_C6;
1635 		mask |= IVBEP_CB0_MSR_PMON_BOX_FILTER_ISOC;
1636 	}
1637 
1638 	return mask;
1639 }
1640 
1641 static struct event_constraint *
1642 ivbep_cbox_get_constraint(struct intel_uncore_box *box, struct perf_event *event)
1643 {
1644 	return __snbep_cbox_get_constraint(box, event, ivbep_cbox_filter_mask);
1645 }
1646 
1647 static int ivbep_cbox_hw_config(struct intel_uncore_box *box, struct perf_event *event)
1648 {
1649 	struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
1650 	struct extra_reg *er;
1651 	int idx = 0;
1652 
1653 	for (er = ivbep_uncore_cbox_extra_regs; er->msr; er++) {
1654 		if (er->event != (event->hw.config & er->config_mask))
1655 			continue;
1656 		idx |= er->idx;
1657 	}
1658 
1659 	if (idx) {
1660 		reg1->reg = SNBEP_C0_MSR_PMON_BOX_FILTER +
1661 			SNBEP_CBO_MSR_OFFSET * box->pmu->pmu_idx;
1662 		reg1->config = event->attr.config1 & ivbep_cbox_filter_mask(idx);
1663 		reg1->idx = idx;
1664 	}
1665 	return 0;
1666 }
1667 
1668 static void ivbep_cbox_enable_event(struct intel_uncore_box *box, struct perf_event *event)
1669 {
1670 	struct hw_perf_event *hwc = &event->hw;
1671 	struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
1672 
1673 	if (reg1->idx != EXTRA_REG_NONE) {
1674 		u64 filter = uncore_shared_reg_config(box, 0);
1675 		wrmsrl(reg1->reg, filter & 0xffffffff);
1676 		wrmsrl(reg1->reg + 6, filter >> 32);
1677 	}
1678 
1679 	wrmsrl(hwc->config_base, hwc->config | SNBEP_PMON_CTL_EN);
1680 }
1681 
1682 static struct intel_uncore_ops ivbep_uncore_cbox_ops = {
1683 	.init_box		= ivbep_uncore_msr_init_box,
1684 	.disable_box		= snbep_uncore_msr_disable_box,
1685 	.enable_box		= snbep_uncore_msr_enable_box,
1686 	.disable_event		= snbep_uncore_msr_disable_event,
1687 	.enable_event		= ivbep_cbox_enable_event,
1688 	.read_counter		= uncore_msr_read_counter,
1689 	.hw_config		= ivbep_cbox_hw_config,
1690 	.get_constraint		= ivbep_cbox_get_constraint,
1691 	.put_constraint		= snbep_cbox_put_constraint,
1692 };
1693 
1694 static struct intel_uncore_type ivbep_uncore_cbox = {
1695 	.name			= "cbox",
1696 	.num_counters		= 4,
1697 	.num_boxes		= 15,
1698 	.perf_ctr_bits		= 44,
1699 	.event_ctl		= SNBEP_C0_MSR_PMON_CTL0,
1700 	.perf_ctr		= SNBEP_C0_MSR_PMON_CTR0,
1701 	.event_mask		= IVBEP_CBO_MSR_PMON_RAW_EVENT_MASK,
1702 	.box_ctl		= SNBEP_C0_MSR_PMON_BOX_CTL,
1703 	.msr_offset		= SNBEP_CBO_MSR_OFFSET,
1704 	.num_shared_regs	= 1,
1705 	.constraints		= snbep_uncore_cbox_constraints,
1706 	.ops			= &ivbep_uncore_cbox_ops,
1707 	.format_group		= &ivbep_uncore_cbox_format_group,
1708 };
1709 
1710 static struct intel_uncore_ops ivbep_uncore_pcu_ops = {
1711 	IVBEP_UNCORE_MSR_OPS_COMMON_INIT(),
1712 	.hw_config		= snbep_pcu_hw_config,
1713 	.get_constraint		= snbep_pcu_get_constraint,
1714 	.put_constraint		= snbep_pcu_put_constraint,
1715 };
1716 
1717 static struct intel_uncore_type ivbep_uncore_pcu = {
1718 	.name			= "pcu",
1719 	.num_counters		= 4,
1720 	.num_boxes		= 1,
1721 	.perf_ctr_bits		= 48,
1722 	.perf_ctr		= SNBEP_PCU_MSR_PMON_CTR0,
1723 	.event_ctl		= SNBEP_PCU_MSR_PMON_CTL0,
1724 	.event_mask		= IVBEP_PCU_MSR_PMON_RAW_EVENT_MASK,
1725 	.box_ctl		= SNBEP_PCU_MSR_PMON_BOX_CTL,
1726 	.num_shared_regs	= 1,
1727 	.ops			= &ivbep_uncore_pcu_ops,
1728 	.format_group		= &ivbep_uncore_pcu_format_group,
1729 };
1730 
1731 static struct intel_uncore_type *ivbep_msr_uncores[] = {
1732 	&ivbep_uncore_ubox,
1733 	&ivbep_uncore_cbox,
1734 	&ivbep_uncore_pcu,
1735 	NULL,
1736 };
1737 
1738 void ivbep_uncore_cpu_init(void)
1739 {
1740 	if (ivbep_uncore_cbox.num_boxes > boot_cpu_data.x86_max_cores)
1741 		ivbep_uncore_cbox.num_boxes = boot_cpu_data.x86_max_cores;
1742 	uncore_msr_uncores = ivbep_msr_uncores;
1743 }
1744 
1745 static struct intel_uncore_type ivbep_uncore_ha = {
1746 	.name		= "ha",
1747 	.num_counters   = 4,
1748 	.num_boxes	= 2,
1749 	.perf_ctr_bits	= 48,
1750 	IVBEP_UNCORE_PCI_COMMON_INIT(),
1751 };
1752 
1753 static struct intel_uncore_type ivbep_uncore_imc = {
1754 	.name		= "imc",
1755 	.num_counters   = 4,
1756 	.num_boxes	= 8,
1757 	.perf_ctr_bits	= 48,
1758 	.fixed_ctr_bits	= 48,
1759 	.fixed_ctr	= SNBEP_MC_CHy_PCI_PMON_FIXED_CTR,
1760 	.fixed_ctl	= SNBEP_MC_CHy_PCI_PMON_FIXED_CTL,
1761 	.event_descs	= snbep_uncore_imc_events,
1762 	IVBEP_UNCORE_PCI_COMMON_INIT(),
1763 };
1764 
1765 /* registers in IRP boxes are not properly aligned */
1766 static unsigned ivbep_uncore_irp_ctls[] = {0xd8, 0xdc, 0xe0, 0xe4};
1767 static unsigned ivbep_uncore_irp_ctrs[] = {0xa0, 0xb0, 0xb8, 0xc0};
1768 
1769 static void ivbep_uncore_irp_enable_event(struct intel_uncore_box *box, struct perf_event *event)
1770 {
1771 	struct pci_dev *pdev = box->pci_dev;
1772 	struct hw_perf_event *hwc = &event->hw;
1773 
1774 	pci_write_config_dword(pdev, ivbep_uncore_irp_ctls[hwc->idx],
1775 			       hwc->config | SNBEP_PMON_CTL_EN);
1776 }
1777 
1778 static void ivbep_uncore_irp_disable_event(struct intel_uncore_box *box, struct perf_event *event)
1779 {
1780 	struct pci_dev *pdev = box->pci_dev;
1781 	struct hw_perf_event *hwc = &event->hw;
1782 
1783 	pci_write_config_dword(pdev, ivbep_uncore_irp_ctls[hwc->idx], hwc->config);
1784 }
1785 
1786 static u64 ivbep_uncore_irp_read_counter(struct intel_uncore_box *box, struct perf_event *event)
1787 {
1788 	struct pci_dev *pdev = box->pci_dev;
1789 	struct hw_perf_event *hwc = &event->hw;
1790 	u64 count = 0;
1791 
1792 	pci_read_config_dword(pdev, ivbep_uncore_irp_ctrs[hwc->idx], (u32 *)&count);
1793 	pci_read_config_dword(pdev, ivbep_uncore_irp_ctrs[hwc->idx] + 4, (u32 *)&count + 1);
1794 
1795 	return count;
1796 }
1797 
1798 static struct intel_uncore_ops ivbep_uncore_irp_ops = {
1799 	.init_box	= ivbep_uncore_pci_init_box,
1800 	.disable_box	= snbep_uncore_pci_disable_box,
1801 	.enable_box	= snbep_uncore_pci_enable_box,
1802 	.disable_event	= ivbep_uncore_irp_disable_event,
1803 	.enable_event	= ivbep_uncore_irp_enable_event,
1804 	.read_counter	= ivbep_uncore_irp_read_counter,
1805 };
1806 
1807 static struct intel_uncore_type ivbep_uncore_irp = {
1808 	.name			= "irp",
1809 	.num_counters		= 4,
1810 	.num_boxes		= 1,
1811 	.perf_ctr_bits		= 48,
1812 	.event_mask		= IVBEP_PMON_RAW_EVENT_MASK,
1813 	.box_ctl		= SNBEP_PCI_PMON_BOX_CTL,
1814 	.ops			= &ivbep_uncore_irp_ops,
1815 	.format_group		= &ivbep_uncore_format_group,
1816 };
1817 
1818 static struct intel_uncore_ops ivbep_uncore_qpi_ops = {
1819 	.init_box	= ivbep_uncore_pci_init_box,
1820 	.disable_box	= snbep_uncore_pci_disable_box,
1821 	.enable_box	= snbep_uncore_pci_enable_box,
1822 	.disable_event	= snbep_uncore_pci_disable_event,
1823 	.enable_event	= snbep_qpi_enable_event,
1824 	.read_counter	= snbep_uncore_pci_read_counter,
1825 	.hw_config	= snbep_qpi_hw_config,
1826 	.get_constraint	= uncore_get_constraint,
1827 	.put_constraint	= uncore_put_constraint,
1828 };
1829 
1830 static struct intel_uncore_type ivbep_uncore_qpi = {
1831 	.name			= "qpi",
1832 	.num_counters		= 4,
1833 	.num_boxes		= 3,
1834 	.perf_ctr_bits		= 48,
1835 	.perf_ctr		= SNBEP_PCI_PMON_CTR0,
1836 	.event_ctl		= SNBEP_PCI_PMON_CTL0,
1837 	.event_mask		= IVBEP_QPI_PCI_PMON_RAW_EVENT_MASK,
1838 	.box_ctl		= SNBEP_PCI_PMON_BOX_CTL,
1839 	.num_shared_regs	= 1,
1840 	.ops			= &ivbep_uncore_qpi_ops,
1841 	.format_group		= &ivbep_uncore_qpi_format_group,
1842 };
1843 
1844 static struct intel_uncore_type ivbep_uncore_r2pcie = {
1845 	.name		= "r2pcie",
1846 	.num_counters   = 4,
1847 	.num_boxes	= 1,
1848 	.perf_ctr_bits	= 44,
1849 	.constraints	= snbep_uncore_r2pcie_constraints,
1850 	IVBEP_UNCORE_PCI_COMMON_INIT(),
1851 };
1852 
1853 static struct intel_uncore_type ivbep_uncore_r3qpi = {
1854 	.name		= "r3qpi",
1855 	.num_counters   = 3,
1856 	.num_boxes	= 2,
1857 	.perf_ctr_bits	= 44,
1858 	.constraints	= snbep_uncore_r3qpi_constraints,
1859 	IVBEP_UNCORE_PCI_COMMON_INIT(),
1860 };
1861 
1862 enum {
1863 	IVBEP_PCI_UNCORE_HA,
1864 	IVBEP_PCI_UNCORE_IMC,
1865 	IVBEP_PCI_UNCORE_IRP,
1866 	IVBEP_PCI_UNCORE_QPI,
1867 	IVBEP_PCI_UNCORE_R2PCIE,
1868 	IVBEP_PCI_UNCORE_R3QPI,
1869 };
1870 
1871 static struct intel_uncore_type *ivbep_pci_uncores[] = {
1872 	[IVBEP_PCI_UNCORE_HA]	= &ivbep_uncore_ha,
1873 	[IVBEP_PCI_UNCORE_IMC]	= &ivbep_uncore_imc,
1874 	[IVBEP_PCI_UNCORE_IRP]	= &ivbep_uncore_irp,
1875 	[IVBEP_PCI_UNCORE_QPI]	= &ivbep_uncore_qpi,
1876 	[IVBEP_PCI_UNCORE_R2PCIE]	= &ivbep_uncore_r2pcie,
1877 	[IVBEP_PCI_UNCORE_R3QPI]	= &ivbep_uncore_r3qpi,
1878 	NULL,
1879 };
1880 
1881 static const struct pci_device_id ivbep_uncore_pci_ids[] = {
1882 	{ /* Home Agent 0 */
1883 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe30),
1884 		.driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_HA, 0),
1885 	},
1886 	{ /* Home Agent 1 */
1887 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe38),
1888 		.driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_HA, 1),
1889 	},
1890 	{ /* MC0 Channel 0 */
1891 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xeb4),
1892 		.driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_IMC, 0),
1893 	},
1894 	{ /* MC0 Channel 1 */
1895 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xeb5),
1896 		.driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_IMC, 1),
1897 	},
1898 	{ /* MC0 Channel 3 */
1899 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xeb0),
1900 		.driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_IMC, 2),
1901 	},
1902 	{ /* MC0 Channel 4 */
1903 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xeb1),
1904 		.driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_IMC, 3),
1905 	},
1906 	{ /* MC1 Channel 0 */
1907 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xef4),
1908 		.driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_IMC, 4),
1909 	},
1910 	{ /* MC1 Channel 1 */
1911 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xef5),
1912 		.driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_IMC, 5),
1913 	},
1914 	{ /* MC1 Channel 3 */
1915 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xef0),
1916 		.driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_IMC, 6),
1917 	},
1918 	{ /* MC1 Channel 4 */
1919 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xef1),
1920 		.driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_IMC, 7),
1921 	},
1922 	{ /* IRP */
1923 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe39),
1924 		.driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_IRP, 0),
1925 	},
1926 	{ /* QPI0 Port 0 */
1927 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe32),
1928 		.driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_QPI, 0),
1929 	},
1930 	{ /* QPI0 Port 1 */
1931 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe33),
1932 		.driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_QPI, 1),
1933 	},
1934 	{ /* QPI1 Port 2 */
1935 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe3a),
1936 		.driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_QPI, 2),
1937 	},
1938 	{ /* R2PCIe */
1939 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe34),
1940 		.driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_R2PCIE, 0),
1941 	},
1942 	{ /* R3QPI0 Link 0 */
1943 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe36),
1944 		.driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_R3QPI, 0),
1945 	},
1946 	{ /* R3QPI0 Link 1 */
1947 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe37),
1948 		.driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_R3QPI, 1),
1949 	},
1950 	{ /* R3QPI1 Link 2 */
1951 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe3e),
1952 		.driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_R3QPI, 2),
1953 	},
1954 	{ /* QPI Port 0 filter  */
1955 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe86),
1956 		.driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV,
1957 						   SNBEP_PCI_QPI_PORT0_FILTER),
1958 	},
1959 	{ /* QPI Port 0 filter  */
1960 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe96),
1961 		.driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV,
1962 						   SNBEP_PCI_QPI_PORT1_FILTER),
1963 	},
1964 	{ /* end: all zeroes */ }
1965 };
1966 
1967 static struct pci_driver ivbep_uncore_pci_driver = {
1968 	.name		= "ivbep_uncore",
1969 	.id_table	= ivbep_uncore_pci_ids,
1970 };
1971 
1972 int ivbep_uncore_pci_init(void)
1973 {
1974 	int ret = snbep_pci2phy_map_init(0x0e1e, SNBEP_CPUNODEID, SNBEP_GIDNIDMAP, true);
1975 	if (ret)
1976 		return ret;
1977 	uncore_pci_uncores = ivbep_pci_uncores;
1978 	uncore_pci_driver = &ivbep_uncore_pci_driver;
1979 	return 0;
1980 }
1981 /* end of IvyTown uncore support */
1982 
1983 /* KNL uncore support */
1984 static struct attribute *knl_uncore_ubox_formats_attr[] = {
1985 	&format_attr_event.attr,
1986 	&format_attr_umask.attr,
1987 	&format_attr_edge.attr,
1988 	&format_attr_tid_en.attr,
1989 	&format_attr_inv.attr,
1990 	&format_attr_thresh5.attr,
1991 	NULL,
1992 };
1993 
1994 static const struct attribute_group knl_uncore_ubox_format_group = {
1995 	.name = "format",
1996 	.attrs = knl_uncore_ubox_formats_attr,
1997 };
1998 
1999 static struct intel_uncore_type knl_uncore_ubox = {
2000 	.name			= "ubox",
2001 	.num_counters		= 2,
2002 	.num_boxes		= 1,
2003 	.perf_ctr_bits		= 48,
2004 	.fixed_ctr_bits		= 48,
2005 	.perf_ctr		= HSWEP_U_MSR_PMON_CTR0,
2006 	.event_ctl		= HSWEP_U_MSR_PMON_CTL0,
2007 	.event_mask		= KNL_U_MSR_PMON_RAW_EVENT_MASK,
2008 	.fixed_ctr		= HSWEP_U_MSR_PMON_UCLK_FIXED_CTR,
2009 	.fixed_ctl		= HSWEP_U_MSR_PMON_UCLK_FIXED_CTL,
2010 	.ops			= &snbep_uncore_msr_ops,
2011 	.format_group		= &knl_uncore_ubox_format_group,
2012 };
2013 
2014 static struct attribute *knl_uncore_cha_formats_attr[] = {
2015 	&format_attr_event.attr,
2016 	&format_attr_umask.attr,
2017 	&format_attr_qor.attr,
2018 	&format_attr_edge.attr,
2019 	&format_attr_tid_en.attr,
2020 	&format_attr_inv.attr,
2021 	&format_attr_thresh8.attr,
2022 	&format_attr_filter_tid4.attr,
2023 	&format_attr_filter_link3.attr,
2024 	&format_attr_filter_state4.attr,
2025 	&format_attr_filter_local.attr,
2026 	&format_attr_filter_all_op.attr,
2027 	&format_attr_filter_nnm.attr,
2028 	&format_attr_filter_opc3.attr,
2029 	&format_attr_filter_nc.attr,
2030 	&format_attr_filter_isoc.attr,
2031 	NULL,
2032 };
2033 
2034 static const struct attribute_group knl_uncore_cha_format_group = {
2035 	.name = "format",
2036 	.attrs = knl_uncore_cha_formats_attr,
2037 };
2038 
2039 static struct event_constraint knl_uncore_cha_constraints[] = {
2040 	UNCORE_EVENT_CONSTRAINT(0x11, 0x1),
2041 	UNCORE_EVENT_CONSTRAINT(0x1f, 0x1),
2042 	UNCORE_EVENT_CONSTRAINT(0x36, 0x1),
2043 	EVENT_CONSTRAINT_END
2044 };
2045 
2046 static struct extra_reg knl_uncore_cha_extra_regs[] = {
2047 	SNBEP_CBO_EVENT_EXTRA_REG(SNBEP_CBO_PMON_CTL_TID_EN,
2048 				  SNBEP_CBO_PMON_CTL_TID_EN, 0x1),
2049 	SNBEP_CBO_EVENT_EXTRA_REG(0x3d, 0xff, 0x2),
2050 	SNBEP_CBO_EVENT_EXTRA_REG(0x35, 0xff, 0x4),
2051 	SNBEP_CBO_EVENT_EXTRA_REG(0x36, 0xff, 0x4),
2052 	EVENT_EXTRA_END
2053 };
2054 
2055 static u64 knl_cha_filter_mask(int fields)
2056 {
2057 	u64 mask = 0;
2058 
2059 	if (fields & 0x1)
2060 		mask |= KNL_CHA_MSR_PMON_BOX_FILTER_TID;
2061 	if (fields & 0x2)
2062 		mask |= KNL_CHA_MSR_PMON_BOX_FILTER_STATE;
2063 	if (fields & 0x4)
2064 		mask |= KNL_CHA_MSR_PMON_BOX_FILTER_OP;
2065 	return mask;
2066 }
2067 
2068 static struct event_constraint *
2069 knl_cha_get_constraint(struct intel_uncore_box *box, struct perf_event *event)
2070 {
2071 	return __snbep_cbox_get_constraint(box, event, knl_cha_filter_mask);
2072 }
2073 
2074 static int knl_cha_hw_config(struct intel_uncore_box *box,
2075 			     struct perf_event *event)
2076 {
2077 	struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
2078 	struct extra_reg *er;
2079 	int idx = 0;
2080 
2081 	for (er = knl_uncore_cha_extra_regs; er->msr; er++) {
2082 		if (er->event != (event->hw.config & er->config_mask))
2083 			continue;
2084 		idx |= er->idx;
2085 	}
2086 
2087 	if (idx) {
2088 		reg1->reg = HSWEP_C0_MSR_PMON_BOX_FILTER0 +
2089 			    KNL_CHA_MSR_OFFSET * box->pmu->pmu_idx;
2090 		reg1->config = event->attr.config1 & knl_cha_filter_mask(idx);
2091 
2092 		reg1->config |= KNL_CHA_MSR_PMON_BOX_FILTER_REMOTE_NODE;
2093 		reg1->config |= KNL_CHA_MSR_PMON_BOX_FILTER_LOCAL_NODE;
2094 		reg1->config |= KNL_CHA_MSR_PMON_BOX_FILTER_NNC;
2095 		reg1->idx = idx;
2096 	}
2097 	return 0;
2098 }
2099 
2100 static void hswep_cbox_enable_event(struct intel_uncore_box *box,
2101 				    struct perf_event *event);
2102 
2103 static struct intel_uncore_ops knl_uncore_cha_ops = {
2104 	.init_box		= snbep_uncore_msr_init_box,
2105 	.disable_box		= snbep_uncore_msr_disable_box,
2106 	.enable_box		= snbep_uncore_msr_enable_box,
2107 	.disable_event		= snbep_uncore_msr_disable_event,
2108 	.enable_event		= hswep_cbox_enable_event,
2109 	.read_counter		= uncore_msr_read_counter,
2110 	.hw_config		= knl_cha_hw_config,
2111 	.get_constraint		= knl_cha_get_constraint,
2112 	.put_constraint		= snbep_cbox_put_constraint,
2113 };
2114 
2115 static struct intel_uncore_type knl_uncore_cha = {
2116 	.name			= "cha",
2117 	.num_counters		= 4,
2118 	.num_boxes		= 38,
2119 	.perf_ctr_bits		= 48,
2120 	.event_ctl		= HSWEP_C0_MSR_PMON_CTL0,
2121 	.perf_ctr		= HSWEP_C0_MSR_PMON_CTR0,
2122 	.event_mask		= KNL_CHA_MSR_PMON_RAW_EVENT_MASK,
2123 	.box_ctl		= HSWEP_C0_MSR_PMON_BOX_CTL,
2124 	.msr_offset		= KNL_CHA_MSR_OFFSET,
2125 	.num_shared_regs	= 1,
2126 	.constraints		= knl_uncore_cha_constraints,
2127 	.ops			= &knl_uncore_cha_ops,
2128 	.format_group		= &knl_uncore_cha_format_group,
2129 };
2130 
2131 static struct attribute *knl_uncore_pcu_formats_attr[] = {
2132 	&format_attr_event2.attr,
2133 	&format_attr_use_occ_ctr.attr,
2134 	&format_attr_occ_sel.attr,
2135 	&format_attr_edge.attr,
2136 	&format_attr_tid_en.attr,
2137 	&format_attr_inv.attr,
2138 	&format_attr_thresh6.attr,
2139 	&format_attr_occ_invert.attr,
2140 	&format_attr_occ_edge_det.attr,
2141 	NULL,
2142 };
2143 
2144 static const struct attribute_group knl_uncore_pcu_format_group = {
2145 	.name = "format",
2146 	.attrs = knl_uncore_pcu_formats_attr,
2147 };
2148 
2149 static struct intel_uncore_type knl_uncore_pcu = {
2150 	.name			= "pcu",
2151 	.num_counters		= 4,
2152 	.num_boxes		= 1,
2153 	.perf_ctr_bits		= 48,
2154 	.perf_ctr		= HSWEP_PCU_MSR_PMON_CTR0,
2155 	.event_ctl		= HSWEP_PCU_MSR_PMON_CTL0,
2156 	.event_mask		= KNL_PCU_MSR_PMON_RAW_EVENT_MASK,
2157 	.box_ctl		= HSWEP_PCU_MSR_PMON_BOX_CTL,
2158 	.ops			= &snbep_uncore_msr_ops,
2159 	.format_group		= &knl_uncore_pcu_format_group,
2160 };
2161 
2162 static struct intel_uncore_type *knl_msr_uncores[] = {
2163 	&knl_uncore_ubox,
2164 	&knl_uncore_cha,
2165 	&knl_uncore_pcu,
2166 	NULL,
2167 };
2168 
2169 void knl_uncore_cpu_init(void)
2170 {
2171 	uncore_msr_uncores = knl_msr_uncores;
2172 }
2173 
2174 static void knl_uncore_imc_enable_box(struct intel_uncore_box *box)
2175 {
2176 	struct pci_dev *pdev = box->pci_dev;
2177 	int box_ctl = uncore_pci_box_ctl(box);
2178 
2179 	pci_write_config_dword(pdev, box_ctl, 0);
2180 }
2181 
2182 static void knl_uncore_imc_enable_event(struct intel_uncore_box *box,
2183 					struct perf_event *event)
2184 {
2185 	struct pci_dev *pdev = box->pci_dev;
2186 	struct hw_perf_event *hwc = &event->hw;
2187 
2188 	if ((event->attr.config & SNBEP_PMON_CTL_EV_SEL_MASK)
2189 							== UNCORE_FIXED_EVENT)
2190 		pci_write_config_dword(pdev, hwc->config_base,
2191 				       hwc->config | KNL_PMON_FIXED_CTL_EN);
2192 	else
2193 		pci_write_config_dword(pdev, hwc->config_base,
2194 				       hwc->config | SNBEP_PMON_CTL_EN);
2195 }
2196 
2197 static struct intel_uncore_ops knl_uncore_imc_ops = {
2198 	.init_box	= snbep_uncore_pci_init_box,
2199 	.disable_box	= snbep_uncore_pci_disable_box,
2200 	.enable_box	= knl_uncore_imc_enable_box,
2201 	.read_counter	= snbep_uncore_pci_read_counter,
2202 	.enable_event	= knl_uncore_imc_enable_event,
2203 	.disable_event	= snbep_uncore_pci_disable_event,
2204 };
2205 
2206 static struct intel_uncore_type knl_uncore_imc_uclk = {
2207 	.name			= "imc_uclk",
2208 	.num_counters		= 4,
2209 	.num_boxes		= 2,
2210 	.perf_ctr_bits		= 48,
2211 	.fixed_ctr_bits		= 48,
2212 	.perf_ctr		= KNL_UCLK_MSR_PMON_CTR0_LOW,
2213 	.event_ctl		= KNL_UCLK_MSR_PMON_CTL0,
2214 	.event_mask		= SNBEP_PMON_RAW_EVENT_MASK,
2215 	.fixed_ctr		= KNL_UCLK_MSR_PMON_UCLK_FIXED_LOW,
2216 	.fixed_ctl		= KNL_UCLK_MSR_PMON_UCLK_FIXED_CTL,
2217 	.box_ctl		= KNL_UCLK_MSR_PMON_BOX_CTL,
2218 	.ops			= &knl_uncore_imc_ops,
2219 	.format_group		= &snbep_uncore_format_group,
2220 };
2221 
2222 static struct intel_uncore_type knl_uncore_imc_dclk = {
2223 	.name			= "imc",
2224 	.num_counters		= 4,
2225 	.num_boxes		= 6,
2226 	.perf_ctr_bits		= 48,
2227 	.fixed_ctr_bits		= 48,
2228 	.perf_ctr		= KNL_MC0_CH0_MSR_PMON_CTR0_LOW,
2229 	.event_ctl		= KNL_MC0_CH0_MSR_PMON_CTL0,
2230 	.event_mask		= SNBEP_PMON_RAW_EVENT_MASK,
2231 	.fixed_ctr		= KNL_MC0_CH0_MSR_PMON_FIXED_LOW,
2232 	.fixed_ctl		= KNL_MC0_CH0_MSR_PMON_FIXED_CTL,
2233 	.box_ctl		= KNL_MC0_CH0_MSR_PMON_BOX_CTL,
2234 	.ops			= &knl_uncore_imc_ops,
2235 	.format_group		= &snbep_uncore_format_group,
2236 };
2237 
2238 static struct intel_uncore_type knl_uncore_edc_uclk = {
2239 	.name			= "edc_uclk",
2240 	.num_counters		= 4,
2241 	.num_boxes		= 8,
2242 	.perf_ctr_bits		= 48,
2243 	.fixed_ctr_bits		= 48,
2244 	.perf_ctr		= KNL_UCLK_MSR_PMON_CTR0_LOW,
2245 	.event_ctl		= KNL_UCLK_MSR_PMON_CTL0,
2246 	.event_mask		= SNBEP_PMON_RAW_EVENT_MASK,
2247 	.fixed_ctr		= KNL_UCLK_MSR_PMON_UCLK_FIXED_LOW,
2248 	.fixed_ctl		= KNL_UCLK_MSR_PMON_UCLK_FIXED_CTL,
2249 	.box_ctl		= KNL_UCLK_MSR_PMON_BOX_CTL,
2250 	.ops			= &knl_uncore_imc_ops,
2251 	.format_group		= &snbep_uncore_format_group,
2252 };
2253 
2254 static struct intel_uncore_type knl_uncore_edc_eclk = {
2255 	.name			= "edc_eclk",
2256 	.num_counters		= 4,
2257 	.num_boxes		= 8,
2258 	.perf_ctr_bits		= 48,
2259 	.fixed_ctr_bits		= 48,
2260 	.perf_ctr		= KNL_EDC0_ECLK_MSR_PMON_CTR0_LOW,
2261 	.event_ctl		= KNL_EDC0_ECLK_MSR_PMON_CTL0,
2262 	.event_mask		= SNBEP_PMON_RAW_EVENT_MASK,
2263 	.fixed_ctr		= KNL_EDC0_ECLK_MSR_PMON_ECLK_FIXED_LOW,
2264 	.fixed_ctl		= KNL_EDC0_ECLK_MSR_PMON_ECLK_FIXED_CTL,
2265 	.box_ctl		= KNL_EDC0_ECLK_MSR_PMON_BOX_CTL,
2266 	.ops			= &knl_uncore_imc_ops,
2267 	.format_group		= &snbep_uncore_format_group,
2268 };
2269 
2270 static struct event_constraint knl_uncore_m2pcie_constraints[] = {
2271 	UNCORE_EVENT_CONSTRAINT(0x23, 0x3),
2272 	EVENT_CONSTRAINT_END
2273 };
2274 
2275 static struct intel_uncore_type knl_uncore_m2pcie = {
2276 	.name		= "m2pcie",
2277 	.num_counters   = 4,
2278 	.num_boxes	= 1,
2279 	.perf_ctr_bits	= 48,
2280 	.constraints	= knl_uncore_m2pcie_constraints,
2281 	SNBEP_UNCORE_PCI_COMMON_INIT(),
2282 };
2283 
2284 static struct attribute *knl_uncore_irp_formats_attr[] = {
2285 	&format_attr_event.attr,
2286 	&format_attr_umask.attr,
2287 	&format_attr_qor.attr,
2288 	&format_attr_edge.attr,
2289 	&format_attr_inv.attr,
2290 	&format_attr_thresh8.attr,
2291 	NULL,
2292 };
2293 
2294 static const struct attribute_group knl_uncore_irp_format_group = {
2295 	.name = "format",
2296 	.attrs = knl_uncore_irp_formats_attr,
2297 };
2298 
2299 static struct intel_uncore_type knl_uncore_irp = {
2300 	.name			= "irp",
2301 	.num_counters		= 2,
2302 	.num_boxes		= 1,
2303 	.perf_ctr_bits		= 48,
2304 	.perf_ctr		= SNBEP_PCI_PMON_CTR0,
2305 	.event_ctl		= SNBEP_PCI_PMON_CTL0,
2306 	.event_mask		= KNL_IRP_PCI_PMON_RAW_EVENT_MASK,
2307 	.box_ctl		= KNL_IRP_PCI_PMON_BOX_CTL,
2308 	.ops			= &snbep_uncore_pci_ops,
2309 	.format_group		= &knl_uncore_irp_format_group,
2310 };
2311 
2312 enum {
2313 	KNL_PCI_UNCORE_MC_UCLK,
2314 	KNL_PCI_UNCORE_MC_DCLK,
2315 	KNL_PCI_UNCORE_EDC_UCLK,
2316 	KNL_PCI_UNCORE_EDC_ECLK,
2317 	KNL_PCI_UNCORE_M2PCIE,
2318 	KNL_PCI_UNCORE_IRP,
2319 };
2320 
2321 static struct intel_uncore_type *knl_pci_uncores[] = {
2322 	[KNL_PCI_UNCORE_MC_UCLK]	= &knl_uncore_imc_uclk,
2323 	[KNL_PCI_UNCORE_MC_DCLK]	= &knl_uncore_imc_dclk,
2324 	[KNL_PCI_UNCORE_EDC_UCLK]	= &knl_uncore_edc_uclk,
2325 	[KNL_PCI_UNCORE_EDC_ECLK]	= &knl_uncore_edc_eclk,
2326 	[KNL_PCI_UNCORE_M2PCIE]		= &knl_uncore_m2pcie,
2327 	[KNL_PCI_UNCORE_IRP]		= &knl_uncore_irp,
2328 	NULL,
2329 };
2330 
2331 /*
2332  * KNL uses a common PCI device ID for multiple instances of an Uncore PMU
2333  * device type. prior to KNL, each instance of a PMU device type had a unique
2334  * device ID.
2335  *
2336  *	PCI Device ID	Uncore PMU Devices
2337  *	----------------------------------
2338  *	0x7841		MC0 UClk, MC1 UClk
2339  *	0x7843		MC0 DClk CH 0, MC0 DClk CH 1, MC0 DClk CH 2,
2340  *			MC1 DClk CH 0, MC1 DClk CH 1, MC1 DClk CH 2
2341  *	0x7833		EDC0 UClk, EDC1 UClk, EDC2 UClk, EDC3 UClk,
2342  *			EDC4 UClk, EDC5 UClk, EDC6 UClk, EDC7 UClk
2343  *	0x7835		EDC0 EClk, EDC1 EClk, EDC2 EClk, EDC3 EClk,
2344  *			EDC4 EClk, EDC5 EClk, EDC6 EClk, EDC7 EClk
2345  *	0x7817		M2PCIe
2346  *	0x7814		IRP
2347 */
2348 
2349 static const struct pci_device_id knl_uncore_pci_ids[] = {
2350 	{ /* MC0 UClk */
2351 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7841),
2352 		.driver_data = UNCORE_PCI_DEV_FULL_DATA(10, 0, KNL_PCI_UNCORE_MC_UCLK, 0),
2353 	},
2354 	{ /* MC1 UClk */
2355 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7841),
2356 		.driver_data = UNCORE_PCI_DEV_FULL_DATA(11, 0, KNL_PCI_UNCORE_MC_UCLK, 1),
2357 	},
2358 	{ /* MC0 DClk CH 0 */
2359 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7843),
2360 		.driver_data = UNCORE_PCI_DEV_FULL_DATA(8, 2, KNL_PCI_UNCORE_MC_DCLK, 0),
2361 	},
2362 	{ /* MC0 DClk CH 1 */
2363 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7843),
2364 		.driver_data = UNCORE_PCI_DEV_FULL_DATA(8, 3, KNL_PCI_UNCORE_MC_DCLK, 1),
2365 	},
2366 	{ /* MC0 DClk CH 2 */
2367 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7843),
2368 		.driver_data = UNCORE_PCI_DEV_FULL_DATA(8, 4, KNL_PCI_UNCORE_MC_DCLK, 2),
2369 	},
2370 	{ /* MC1 DClk CH 0 */
2371 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7843),
2372 		.driver_data = UNCORE_PCI_DEV_FULL_DATA(9, 2, KNL_PCI_UNCORE_MC_DCLK, 3),
2373 	},
2374 	{ /* MC1 DClk CH 1 */
2375 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7843),
2376 		.driver_data = UNCORE_PCI_DEV_FULL_DATA(9, 3, KNL_PCI_UNCORE_MC_DCLK, 4),
2377 	},
2378 	{ /* MC1 DClk CH 2 */
2379 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7843),
2380 		.driver_data = UNCORE_PCI_DEV_FULL_DATA(9, 4, KNL_PCI_UNCORE_MC_DCLK, 5),
2381 	},
2382 	{ /* EDC0 UClk */
2383 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7833),
2384 		.driver_data = UNCORE_PCI_DEV_FULL_DATA(15, 0, KNL_PCI_UNCORE_EDC_UCLK, 0),
2385 	},
2386 	{ /* EDC1 UClk */
2387 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7833),
2388 		.driver_data = UNCORE_PCI_DEV_FULL_DATA(16, 0, KNL_PCI_UNCORE_EDC_UCLK, 1),
2389 	},
2390 	{ /* EDC2 UClk */
2391 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7833),
2392 		.driver_data = UNCORE_PCI_DEV_FULL_DATA(17, 0, KNL_PCI_UNCORE_EDC_UCLK, 2),
2393 	},
2394 	{ /* EDC3 UClk */
2395 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7833),
2396 		.driver_data = UNCORE_PCI_DEV_FULL_DATA(18, 0, KNL_PCI_UNCORE_EDC_UCLK, 3),
2397 	},
2398 	{ /* EDC4 UClk */
2399 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7833),
2400 		.driver_data = UNCORE_PCI_DEV_FULL_DATA(19, 0, KNL_PCI_UNCORE_EDC_UCLK, 4),
2401 	},
2402 	{ /* EDC5 UClk */
2403 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7833),
2404 		.driver_data = UNCORE_PCI_DEV_FULL_DATA(20, 0, KNL_PCI_UNCORE_EDC_UCLK, 5),
2405 	},
2406 	{ /* EDC6 UClk */
2407 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7833),
2408 		.driver_data = UNCORE_PCI_DEV_FULL_DATA(21, 0, KNL_PCI_UNCORE_EDC_UCLK, 6),
2409 	},
2410 	{ /* EDC7 UClk */
2411 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7833),
2412 		.driver_data = UNCORE_PCI_DEV_FULL_DATA(22, 0, KNL_PCI_UNCORE_EDC_UCLK, 7),
2413 	},
2414 	{ /* EDC0 EClk */
2415 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7835),
2416 		.driver_data = UNCORE_PCI_DEV_FULL_DATA(24, 2, KNL_PCI_UNCORE_EDC_ECLK, 0),
2417 	},
2418 	{ /* EDC1 EClk */
2419 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7835),
2420 		.driver_data = UNCORE_PCI_DEV_FULL_DATA(25, 2, KNL_PCI_UNCORE_EDC_ECLK, 1),
2421 	},
2422 	{ /* EDC2 EClk */
2423 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7835),
2424 		.driver_data = UNCORE_PCI_DEV_FULL_DATA(26, 2, KNL_PCI_UNCORE_EDC_ECLK, 2),
2425 	},
2426 	{ /* EDC3 EClk */
2427 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7835),
2428 		.driver_data = UNCORE_PCI_DEV_FULL_DATA(27, 2, KNL_PCI_UNCORE_EDC_ECLK, 3),
2429 	},
2430 	{ /* EDC4 EClk */
2431 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7835),
2432 		.driver_data = UNCORE_PCI_DEV_FULL_DATA(28, 2, KNL_PCI_UNCORE_EDC_ECLK, 4),
2433 	},
2434 	{ /* EDC5 EClk */
2435 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7835),
2436 		.driver_data = UNCORE_PCI_DEV_FULL_DATA(29, 2, KNL_PCI_UNCORE_EDC_ECLK, 5),
2437 	},
2438 	{ /* EDC6 EClk */
2439 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7835),
2440 		.driver_data = UNCORE_PCI_DEV_FULL_DATA(30, 2, KNL_PCI_UNCORE_EDC_ECLK, 6),
2441 	},
2442 	{ /* EDC7 EClk */
2443 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7835),
2444 		.driver_data = UNCORE_PCI_DEV_FULL_DATA(31, 2, KNL_PCI_UNCORE_EDC_ECLK, 7),
2445 	},
2446 	{ /* M2PCIe */
2447 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7817),
2448 		.driver_data = UNCORE_PCI_DEV_DATA(KNL_PCI_UNCORE_M2PCIE, 0),
2449 	},
2450 	{ /* IRP */
2451 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7814),
2452 		.driver_data = UNCORE_PCI_DEV_DATA(KNL_PCI_UNCORE_IRP, 0),
2453 	},
2454 	{ /* end: all zeroes */ }
2455 };
2456 
2457 static struct pci_driver knl_uncore_pci_driver = {
2458 	.name		= "knl_uncore",
2459 	.id_table	= knl_uncore_pci_ids,
2460 };
2461 
2462 int knl_uncore_pci_init(void)
2463 {
2464 	int ret;
2465 
2466 	/* All KNL PCI based PMON units are on the same PCI bus except IRP */
2467 	ret = snb_pci2phy_map_init(0x7814); /* IRP */
2468 	if (ret)
2469 		return ret;
2470 	ret = snb_pci2phy_map_init(0x7817); /* M2PCIe */
2471 	if (ret)
2472 		return ret;
2473 	uncore_pci_uncores = knl_pci_uncores;
2474 	uncore_pci_driver = &knl_uncore_pci_driver;
2475 	return 0;
2476 }
2477 
2478 /* end of KNL uncore support */
2479 
2480 /* Haswell-EP uncore support */
2481 static struct attribute *hswep_uncore_ubox_formats_attr[] = {
2482 	&format_attr_event.attr,
2483 	&format_attr_umask.attr,
2484 	&format_attr_edge.attr,
2485 	&format_attr_inv.attr,
2486 	&format_attr_thresh5.attr,
2487 	&format_attr_filter_tid2.attr,
2488 	&format_attr_filter_cid.attr,
2489 	NULL,
2490 };
2491 
2492 static const struct attribute_group hswep_uncore_ubox_format_group = {
2493 	.name = "format",
2494 	.attrs = hswep_uncore_ubox_formats_attr,
2495 };
2496 
2497 static int hswep_ubox_hw_config(struct intel_uncore_box *box, struct perf_event *event)
2498 {
2499 	struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
2500 	reg1->reg = HSWEP_U_MSR_PMON_FILTER;
2501 	reg1->config = event->attr.config1 & HSWEP_U_MSR_PMON_BOX_FILTER_MASK;
2502 	reg1->idx = 0;
2503 	return 0;
2504 }
2505 
2506 static struct intel_uncore_ops hswep_uncore_ubox_ops = {
2507 	SNBEP_UNCORE_MSR_OPS_COMMON_INIT(),
2508 	.hw_config		= hswep_ubox_hw_config,
2509 	.get_constraint		= uncore_get_constraint,
2510 	.put_constraint		= uncore_put_constraint,
2511 };
2512 
2513 static struct intel_uncore_type hswep_uncore_ubox = {
2514 	.name			= "ubox",
2515 	.num_counters		= 2,
2516 	.num_boxes		= 1,
2517 	.perf_ctr_bits		= 44,
2518 	.fixed_ctr_bits		= 48,
2519 	.perf_ctr		= HSWEP_U_MSR_PMON_CTR0,
2520 	.event_ctl		= HSWEP_U_MSR_PMON_CTL0,
2521 	.event_mask		= SNBEP_U_MSR_PMON_RAW_EVENT_MASK,
2522 	.fixed_ctr		= HSWEP_U_MSR_PMON_UCLK_FIXED_CTR,
2523 	.fixed_ctl		= HSWEP_U_MSR_PMON_UCLK_FIXED_CTL,
2524 	.num_shared_regs	= 1,
2525 	.ops			= &hswep_uncore_ubox_ops,
2526 	.format_group		= &hswep_uncore_ubox_format_group,
2527 };
2528 
2529 static struct attribute *hswep_uncore_cbox_formats_attr[] = {
2530 	&format_attr_event.attr,
2531 	&format_attr_umask.attr,
2532 	&format_attr_edge.attr,
2533 	&format_attr_tid_en.attr,
2534 	&format_attr_thresh8.attr,
2535 	&format_attr_filter_tid3.attr,
2536 	&format_attr_filter_link2.attr,
2537 	&format_attr_filter_state3.attr,
2538 	&format_attr_filter_nid2.attr,
2539 	&format_attr_filter_opc2.attr,
2540 	&format_attr_filter_nc.attr,
2541 	&format_attr_filter_c6.attr,
2542 	&format_attr_filter_isoc.attr,
2543 	NULL,
2544 };
2545 
2546 static const struct attribute_group hswep_uncore_cbox_format_group = {
2547 	.name = "format",
2548 	.attrs = hswep_uncore_cbox_formats_attr,
2549 };
2550 
2551 static struct event_constraint hswep_uncore_cbox_constraints[] = {
2552 	UNCORE_EVENT_CONSTRAINT(0x01, 0x1),
2553 	UNCORE_EVENT_CONSTRAINT(0x09, 0x1),
2554 	UNCORE_EVENT_CONSTRAINT(0x11, 0x1),
2555 	UNCORE_EVENT_CONSTRAINT(0x36, 0x1),
2556 	UNCORE_EVENT_CONSTRAINT(0x38, 0x3),
2557 	UNCORE_EVENT_CONSTRAINT(0x3b, 0x1),
2558 	UNCORE_EVENT_CONSTRAINT(0x3e, 0x1),
2559 	EVENT_CONSTRAINT_END
2560 };
2561 
2562 static struct extra_reg hswep_uncore_cbox_extra_regs[] = {
2563 	SNBEP_CBO_EVENT_EXTRA_REG(SNBEP_CBO_PMON_CTL_TID_EN,
2564 				  SNBEP_CBO_PMON_CTL_TID_EN, 0x1),
2565 	SNBEP_CBO_EVENT_EXTRA_REG(0x0334, 0xffff, 0x4),
2566 	SNBEP_CBO_EVENT_EXTRA_REG(0x0534, 0xffff, 0x4),
2567 	SNBEP_CBO_EVENT_EXTRA_REG(0x0934, 0xffff, 0x4),
2568 	SNBEP_CBO_EVENT_EXTRA_REG(0x1134, 0xffff, 0x4),
2569 	SNBEP_CBO_EVENT_EXTRA_REG(0x2134, 0xffff, 0x4),
2570 	SNBEP_CBO_EVENT_EXTRA_REG(0x4134, 0xffff, 0x4),
2571 	SNBEP_CBO_EVENT_EXTRA_REG(0x4037, 0x40ff, 0x8),
2572 	SNBEP_CBO_EVENT_EXTRA_REG(0x4028, 0x40ff, 0x8),
2573 	SNBEP_CBO_EVENT_EXTRA_REG(0x4032, 0x40ff, 0x8),
2574 	SNBEP_CBO_EVENT_EXTRA_REG(0x4029, 0x40ff, 0x8),
2575 	SNBEP_CBO_EVENT_EXTRA_REG(0x4033, 0x40ff, 0x8),
2576 	SNBEP_CBO_EVENT_EXTRA_REG(0x402A, 0x40ff, 0x8),
2577 	SNBEP_CBO_EVENT_EXTRA_REG(0x0135, 0xffff, 0x12),
2578 	SNBEP_CBO_EVENT_EXTRA_REG(0x0335, 0xffff, 0x10),
2579 	SNBEP_CBO_EVENT_EXTRA_REG(0x4135, 0xffff, 0x18),
2580 	SNBEP_CBO_EVENT_EXTRA_REG(0x4435, 0xffff, 0x8),
2581 	SNBEP_CBO_EVENT_EXTRA_REG(0x4835, 0xffff, 0x8),
2582 	SNBEP_CBO_EVENT_EXTRA_REG(0x5035, 0xffff, 0x8),
2583 	SNBEP_CBO_EVENT_EXTRA_REG(0x4335, 0xffff, 0x18),
2584 	SNBEP_CBO_EVENT_EXTRA_REG(0x4a35, 0xffff, 0x8),
2585 	SNBEP_CBO_EVENT_EXTRA_REG(0x2335, 0xffff, 0x10),
2586 	SNBEP_CBO_EVENT_EXTRA_REG(0x8335, 0xffff, 0x10),
2587 	SNBEP_CBO_EVENT_EXTRA_REG(0x2135, 0xffff, 0x10),
2588 	SNBEP_CBO_EVENT_EXTRA_REG(0x8135, 0xffff, 0x10),
2589 	SNBEP_CBO_EVENT_EXTRA_REG(0x0136, 0xffff, 0x10),
2590 	SNBEP_CBO_EVENT_EXTRA_REG(0x0336, 0xffff, 0x10),
2591 	SNBEP_CBO_EVENT_EXTRA_REG(0x4136, 0xffff, 0x18),
2592 	SNBEP_CBO_EVENT_EXTRA_REG(0x4436, 0xffff, 0x8),
2593 	SNBEP_CBO_EVENT_EXTRA_REG(0x4836, 0xffff, 0x8),
2594 	SNBEP_CBO_EVENT_EXTRA_REG(0x4336, 0xffff, 0x18),
2595 	SNBEP_CBO_EVENT_EXTRA_REG(0x4a36, 0xffff, 0x8),
2596 	SNBEP_CBO_EVENT_EXTRA_REG(0x2336, 0xffff, 0x10),
2597 	SNBEP_CBO_EVENT_EXTRA_REG(0x8336, 0xffff, 0x10),
2598 	SNBEP_CBO_EVENT_EXTRA_REG(0x2136, 0xffff, 0x10),
2599 	SNBEP_CBO_EVENT_EXTRA_REG(0x8136, 0xffff, 0x10),
2600 	SNBEP_CBO_EVENT_EXTRA_REG(0x5036, 0xffff, 0x8),
2601 	EVENT_EXTRA_END
2602 };
2603 
2604 static u64 hswep_cbox_filter_mask(int fields)
2605 {
2606 	u64 mask = 0;
2607 	if (fields & 0x1)
2608 		mask |= HSWEP_CB0_MSR_PMON_BOX_FILTER_TID;
2609 	if (fields & 0x2)
2610 		mask |= HSWEP_CB0_MSR_PMON_BOX_FILTER_LINK;
2611 	if (fields & 0x4)
2612 		mask |= HSWEP_CB0_MSR_PMON_BOX_FILTER_STATE;
2613 	if (fields & 0x8)
2614 		mask |= HSWEP_CB0_MSR_PMON_BOX_FILTER_NID;
2615 	if (fields & 0x10) {
2616 		mask |= HSWEP_CB0_MSR_PMON_BOX_FILTER_OPC;
2617 		mask |= HSWEP_CB0_MSR_PMON_BOX_FILTER_NC;
2618 		mask |= HSWEP_CB0_MSR_PMON_BOX_FILTER_C6;
2619 		mask |= HSWEP_CB0_MSR_PMON_BOX_FILTER_ISOC;
2620 	}
2621 	return mask;
2622 }
2623 
2624 static struct event_constraint *
2625 hswep_cbox_get_constraint(struct intel_uncore_box *box, struct perf_event *event)
2626 {
2627 	return __snbep_cbox_get_constraint(box, event, hswep_cbox_filter_mask);
2628 }
2629 
2630 static int hswep_cbox_hw_config(struct intel_uncore_box *box, struct perf_event *event)
2631 {
2632 	struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
2633 	struct extra_reg *er;
2634 	int idx = 0;
2635 
2636 	for (er = hswep_uncore_cbox_extra_regs; er->msr; er++) {
2637 		if (er->event != (event->hw.config & er->config_mask))
2638 			continue;
2639 		idx |= er->idx;
2640 	}
2641 
2642 	if (idx) {
2643 		reg1->reg = HSWEP_C0_MSR_PMON_BOX_FILTER0 +
2644 			    HSWEP_CBO_MSR_OFFSET * box->pmu->pmu_idx;
2645 		reg1->config = event->attr.config1 & hswep_cbox_filter_mask(idx);
2646 		reg1->idx = idx;
2647 	}
2648 	return 0;
2649 }
2650 
2651 static void hswep_cbox_enable_event(struct intel_uncore_box *box,
2652 				  struct perf_event *event)
2653 {
2654 	struct hw_perf_event *hwc = &event->hw;
2655 	struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
2656 
2657 	if (reg1->idx != EXTRA_REG_NONE) {
2658 		u64 filter = uncore_shared_reg_config(box, 0);
2659 		wrmsrl(reg1->reg, filter & 0xffffffff);
2660 		wrmsrl(reg1->reg + 1, filter >> 32);
2661 	}
2662 
2663 	wrmsrl(hwc->config_base, hwc->config | SNBEP_PMON_CTL_EN);
2664 }
2665 
2666 static struct intel_uncore_ops hswep_uncore_cbox_ops = {
2667 	.init_box		= snbep_uncore_msr_init_box,
2668 	.disable_box		= snbep_uncore_msr_disable_box,
2669 	.enable_box		= snbep_uncore_msr_enable_box,
2670 	.disable_event		= snbep_uncore_msr_disable_event,
2671 	.enable_event		= hswep_cbox_enable_event,
2672 	.read_counter		= uncore_msr_read_counter,
2673 	.hw_config		= hswep_cbox_hw_config,
2674 	.get_constraint		= hswep_cbox_get_constraint,
2675 	.put_constraint		= snbep_cbox_put_constraint,
2676 };
2677 
2678 static struct intel_uncore_type hswep_uncore_cbox = {
2679 	.name			= "cbox",
2680 	.num_counters		= 4,
2681 	.num_boxes		= 18,
2682 	.perf_ctr_bits		= 48,
2683 	.event_ctl		= HSWEP_C0_MSR_PMON_CTL0,
2684 	.perf_ctr		= HSWEP_C0_MSR_PMON_CTR0,
2685 	.event_mask		= SNBEP_CBO_MSR_PMON_RAW_EVENT_MASK,
2686 	.box_ctl		= HSWEP_C0_MSR_PMON_BOX_CTL,
2687 	.msr_offset		= HSWEP_CBO_MSR_OFFSET,
2688 	.num_shared_regs	= 1,
2689 	.constraints		= hswep_uncore_cbox_constraints,
2690 	.ops			= &hswep_uncore_cbox_ops,
2691 	.format_group		= &hswep_uncore_cbox_format_group,
2692 };
2693 
2694 /*
2695  * Write SBOX Initialization register bit by bit to avoid spurious #GPs
2696  */
2697 static void hswep_uncore_sbox_msr_init_box(struct intel_uncore_box *box)
2698 {
2699 	unsigned msr = uncore_msr_box_ctl(box);
2700 
2701 	if (msr) {
2702 		u64 init = SNBEP_PMON_BOX_CTL_INT;
2703 		u64 flags = 0;
2704 		int i;
2705 
2706 		for_each_set_bit(i, (unsigned long *)&init, 64) {
2707 			flags |= (1ULL << i);
2708 			wrmsrl(msr, flags);
2709 		}
2710 	}
2711 }
2712 
2713 static struct intel_uncore_ops hswep_uncore_sbox_msr_ops = {
2714 	__SNBEP_UNCORE_MSR_OPS_COMMON_INIT(),
2715 	.init_box		= hswep_uncore_sbox_msr_init_box
2716 };
2717 
2718 static struct attribute *hswep_uncore_sbox_formats_attr[] = {
2719 	&format_attr_event.attr,
2720 	&format_attr_umask.attr,
2721 	&format_attr_edge.attr,
2722 	&format_attr_tid_en.attr,
2723 	&format_attr_inv.attr,
2724 	&format_attr_thresh8.attr,
2725 	NULL,
2726 };
2727 
2728 static const struct attribute_group hswep_uncore_sbox_format_group = {
2729 	.name = "format",
2730 	.attrs = hswep_uncore_sbox_formats_attr,
2731 };
2732 
2733 static struct intel_uncore_type hswep_uncore_sbox = {
2734 	.name			= "sbox",
2735 	.num_counters		= 4,
2736 	.num_boxes		= 4,
2737 	.perf_ctr_bits		= 44,
2738 	.event_ctl		= HSWEP_S0_MSR_PMON_CTL0,
2739 	.perf_ctr		= HSWEP_S0_MSR_PMON_CTR0,
2740 	.event_mask		= HSWEP_S_MSR_PMON_RAW_EVENT_MASK,
2741 	.box_ctl		= HSWEP_S0_MSR_PMON_BOX_CTL,
2742 	.msr_offset		= HSWEP_SBOX_MSR_OFFSET,
2743 	.ops			= &hswep_uncore_sbox_msr_ops,
2744 	.format_group		= &hswep_uncore_sbox_format_group,
2745 };
2746 
2747 static int hswep_pcu_hw_config(struct intel_uncore_box *box, struct perf_event *event)
2748 {
2749 	struct hw_perf_event *hwc = &event->hw;
2750 	struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
2751 	int ev_sel = hwc->config & SNBEP_PMON_CTL_EV_SEL_MASK;
2752 
2753 	if (ev_sel >= 0xb && ev_sel <= 0xe) {
2754 		reg1->reg = HSWEP_PCU_MSR_PMON_BOX_FILTER;
2755 		reg1->idx = ev_sel - 0xb;
2756 		reg1->config = event->attr.config1 & (0xff << reg1->idx);
2757 	}
2758 	return 0;
2759 }
2760 
2761 static struct intel_uncore_ops hswep_uncore_pcu_ops = {
2762 	SNBEP_UNCORE_MSR_OPS_COMMON_INIT(),
2763 	.hw_config		= hswep_pcu_hw_config,
2764 	.get_constraint		= snbep_pcu_get_constraint,
2765 	.put_constraint		= snbep_pcu_put_constraint,
2766 };
2767 
2768 static struct intel_uncore_type hswep_uncore_pcu = {
2769 	.name			= "pcu",
2770 	.num_counters		= 4,
2771 	.num_boxes		= 1,
2772 	.perf_ctr_bits		= 48,
2773 	.perf_ctr		= HSWEP_PCU_MSR_PMON_CTR0,
2774 	.event_ctl		= HSWEP_PCU_MSR_PMON_CTL0,
2775 	.event_mask		= SNBEP_PCU_MSR_PMON_RAW_EVENT_MASK,
2776 	.box_ctl		= HSWEP_PCU_MSR_PMON_BOX_CTL,
2777 	.num_shared_regs	= 1,
2778 	.ops			= &hswep_uncore_pcu_ops,
2779 	.format_group		= &snbep_uncore_pcu_format_group,
2780 };
2781 
2782 static struct intel_uncore_type *hswep_msr_uncores[] = {
2783 	&hswep_uncore_ubox,
2784 	&hswep_uncore_cbox,
2785 	&hswep_uncore_sbox,
2786 	&hswep_uncore_pcu,
2787 	NULL,
2788 };
2789 
2790 void hswep_uncore_cpu_init(void)
2791 {
2792 	int pkg = boot_cpu_data.logical_proc_id;
2793 
2794 	if (hswep_uncore_cbox.num_boxes > boot_cpu_data.x86_max_cores)
2795 		hswep_uncore_cbox.num_boxes = boot_cpu_data.x86_max_cores;
2796 
2797 	/* Detect 6-8 core systems with only two SBOXes */
2798 	if (uncore_extra_pci_dev[pkg].dev[HSWEP_PCI_PCU_3]) {
2799 		u32 capid4;
2800 
2801 		pci_read_config_dword(uncore_extra_pci_dev[pkg].dev[HSWEP_PCI_PCU_3],
2802 				      0x94, &capid4);
2803 		if (((capid4 >> 6) & 0x3) == 0)
2804 			hswep_uncore_sbox.num_boxes = 2;
2805 	}
2806 
2807 	uncore_msr_uncores = hswep_msr_uncores;
2808 }
2809 
2810 static struct intel_uncore_type hswep_uncore_ha = {
2811 	.name		= "ha",
2812 	.num_counters   = 4,
2813 	.num_boxes	= 2,
2814 	.perf_ctr_bits	= 48,
2815 	SNBEP_UNCORE_PCI_COMMON_INIT(),
2816 };
2817 
2818 static struct uncore_event_desc hswep_uncore_imc_events[] = {
2819 	INTEL_UNCORE_EVENT_DESC(clockticks,      "event=0x00,umask=0x00"),
2820 	INTEL_UNCORE_EVENT_DESC(cas_count_read,  "event=0x04,umask=0x03"),
2821 	INTEL_UNCORE_EVENT_DESC(cas_count_read.scale, "6.103515625e-5"),
2822 	INTEL_UNCORE_EVENT_DESC(cas_count_read.unit, "MiB"),
2823 	INTEL_UNCORE_EVENT_DESC(cas_count_write, "event=0x04,umask=0x0c"),
2824 	INTEL_UNCORE_EVENT_DESC(cas_count_write.scale, "6.103515625e-5"),
2825 	INTEL_UNCORE_EVENT_DESC(cas_count_write.unit, "MiB"),
2826 	{ /* end: all zeroes */ },
2827 };
2828 
2829 static struct intel_uncore_type hswep_uncore_imc = {
2830 	.name		= "imc",
2831 	.num_counters   = 4,
2832 	.num_boxes	= 8,
2833 	.perf_ctr_bits	= 48,
2834 	.fixed_ctr_bits	= 48,
2835 	.fixed_ctr	= SNBEP_MC_CHy_PCI_PMON_FIXED_CTR,
2836 	.fixed_ctl	= SNBEP_MC_CHy_PCI_PMON_FIXED_CTL,
2837 	.event_descs	= hswep_uncore_imc_events,
2838 	SNBEP_UNCORE_PCI_COMMON_INIT(),
2839 };
2840 
2841 static unsigned hswep_uncore_irp_ctrs[] = {0xa0, 0xa8, 0xb0, 0xb8};
2842 
2843 static u64 hswep_uncore_irp_read_counter(struct intel_uncore_box *box, struct perf_event *event)
2844 {
2845 	struct pci_dev *pdev = box->pci_dev;
2846 	struct hw_perf_event *hwc = &event->hw;
2847 	u64 count = 0;
2848 
2849 	pci_read_config_dword(pdev, hswep_uncore_irp_ctrs[hwc->idx], (u32 *)&count);
2850 	pci_read_config_dword(pdev, hswep_uncore_irp_ctrs[hwc->idx] + 4, (u32 *)&count + 1);
2851 
2852 	return count;
2853 }
2854 
2855 static struct intel_uncore_ops hswep_uncore_irp_ops = {
2856 	.init_box	= snbep_uncore_pci_init_box,
2857 	.disable_box	= snbep_uncore_pci_disable_box,
2858 	.enable_box	= snbep_uncore_pci_enable_box,
2859 	.disable_event	= ivbep_uncore_irp_disable_event,
2860 	.enable_event	= ivbep_uncore_irp_enable_event,
2861 	.read_counter	= hswep_uncore_irp_read_counter,
2862 };
2863 
2864 static struct intel_uncore_type hswep_uncore_irp = {
2865 	.name			= "irp",
2866 	.num_counters		= 4,
2867 	.num_boxes		= 1,
2868 	.perf_ctr_bits		= 48,
2869 	.event_mask		= SNBEP_PMON_RAW_EVENT_MASK,
2870 	.box_ctl		= SNBEP_PCI_PMON_BOX_CTL,
2871 	.ops			= &hswep_uncore_irp_ops,
2872 	.format_group		= &snbep_uncore_format_group,
2873 };
2874 
2875 static struct intel_uncore_type hswep_uncore_qpi = {
2876 	.name			= "qpi",
2877 	.num_counters		= 4,
2878 	.num_boxes		= 3,
2879 	.perf_ctr_bits		= 48,
2880 	.perf_ctr		= SNBEP_PCI_PMON_CTR0,
2881 	.event_ctl		= SNBEP_PCI_PMON_CTL0,
2882 	.event_mask		= SNBEP_QPI_PCI_PMON_RAW_EVENT_MASK,
2883 	.box_ctl		= SNBEP_PCI_PMON_BOX_CTL,
2884 	.num_shared_regs	= 1,
2885 	.ops			= &snbep_uncore_qpi_ops,
2886 	.format_group		= &snbep_uncore_qpi_format_group,
2887 };
2888 
2889 static struct event_constraint hswep_uncore_r2pcie_constraints[] = {
2890 	UNCORE_EVENT_CONSTRAINT(0x10, 0x3),
2891 	UNCORE_EVENT_CONSTRAINT(0x11, 0x3),
2892 	UNCORE_EVENT_CONSTRAINT(0x13, 0x1),
2893 	UNCORE_EVENT_CONSTRAINT(0x23, 0x1),
2894 	UNCORE_EVENT_CONSTRAINT(0x24, 0x1),
2895 	UNCORE_EVENT_CONSTRAINT(0x25, 0x1),
2896 	UNCORE_EVENT_CONSTRAINT(0x26, 0x3),
2897 	UNCORE_EVENT_CONSTRAINT(0x27, 0x1),
2898 	UNCORE_EVENT_CONSTRAINT(0x28, 0x3),
2899 	UNCORE_EVENT_CONSTRAINT(0x29, 0x3),
2900 	UNCORE_EVENT_CONSTRAINT(0x2a, 0x1),
2901 	UNCORE_EVENT_CONSTRAINT(0x2b, 0x3),
2902 	UNCORE_EVENT_CONSTRAINT(0x2c, 0x3),
2903 	UNCORE_EVENT_CONSTRAINT(0x2d, 0x3),
2904 	UNCORE_EVENT_CONSTRAINT(0x32, 0x3),
2905 	UNCORE_EVENT_CONSTRAINT(0x33, 0x3),
2906 	UNCORE_EVENT_CONSTRAINT(0x34, 0x3),
2907 	UNCORE_EVENT_CONSTRAINT(0x35, 0x3),
2908 	EVENT_CONSTRAINT_END
2909 };
2910 
2911 static struct intel_uncore_type hswep_uncore_r2pcie = {
2912 	.name		= "r2pcie",
2913 	.num_counters   = 4,
2914 	.num_boxes	= 1,
2915 	.perf_ctr_bits	= 48,
2916 	.constraints	= hswep_uncore_r2pcie_constraints,
2917 	SNBEP_UNCORE_PCI_COMMON_INIT(),
2918 };
2919 
2920 static struct event_constraint hswep_uncore_r3qpi_constraints[] = {
2921 	UNCORE_EVENT_CONSTRAINT(0x01, 0x3),
2922 	UNCORE_EVENT_CONSTRAINT(0x07, 0x7),
2923 	UNCORE_EVENT_CONSTRAINT(0x08, 0x7),
2924 	UNCORE_EVENT_CONSTRAINT(0x09, 0x7),
2925 	UNCORE_EVENT_CONSTRAINT(0x0a, 0x7),
2926 	UNCORE_EVENT_CONSTRAINT(0x0e, 0x7),
2927 	UNCORE_EVENT_CONSTRAINT(0x10, 0x3),
2928 	UNCORE_EVENT_CONSTRAINT(0x11, 0x3),
2929 	UNCORE_EVENT_CONSTRAINT(0x12, 0x3),
2930 	UNCORE_EVENT_CONSTRAINT(0x13, 0x1),
2931 	UNCORE_EVENT_CONSTRAINT(0x14, 0x3),
2932 	UNCORE_EVENT_CONSTRAINT(0x15, 0x3),
2933 	UNCORE_EVENT_CONSTRAINT(0x1f, 0x3),
2934 	UNCORE_EVENT_CONSTRAINT(0x20, 0x3),
2935 	UNCORE_EVENT_CONSTRAINT(0x21, 0x3),
2936 	UNCORE_EVENT_CONSTRAINT(0x22, 0x3),
2937 	UNCORE_EVENT_CONSTRAINT(0x23, 0x3),
2938 	UNCORE_EVENT_CONSTRAINT(0x25, 0x3),
2939 	UNCORE_EVENT_CONSTRAINT(0x26, 0x3),
2940 	UNCORE_EVENT_CONSTRAINT(0x28, 0x3),
2941 	UNCORE_EVENT_CONSTRAINT(0x29, 0x3),
2942 	UNCORE_EVENT_CONSTRAINT(0x2c, 0x3),
2943 	UNCORE_EVENT_CONSTRAINT(0x2d, 0x3),
2944 	UNCORE_EVENT_CONSTRAINT(0x2e, 0x3),
2945 	UNCORE_EVENT_CONSTRAINT(0x2f, 0x3),
2946 	UNCORE_EVENT_CONSTRAINT(0x31, 0x3),
2947 	UNCORE_EVENT_CONSTRAINT(0x32, 0x3),
2948 	UNCORE_EVENT_CONSTRAINT(0x33, 0x3),
2949 	UNCORE_EVENT_CONSTRAINT(0x34, 0x3),
2950 	UNCORE_EVENT_CONSTRAINT(0x36, 0x3),
2951 	UNCORE_EVENT_CONSTRAINT(0x37, 0x3),
2952 	UNCORE_EVENT_CONSTRAINT(0x38, 0x3),
2953 	UNCORE_EVENT_CONSTRAINT(0x39, 0x3),
2954 	EVENT_CONSTRAINT_END
2955 };
2956 
2957 static struct intel_uncore_type hswep_uncore_r3qpi = {
2958 	.name		= "r3qpi",
2959 	.num_counters   = 3,
2960 	.num_boxes	= 3,
2961 	.perf_ctr_bits	= 44,
2962 	.constraints	= hswep_uncore_r3qpi_constraints,
2963 	SNBEP_UNCORE_PCI_COMMON_INIT(),
2964 };
2965 
2966 enum {
2967 	HSWEP_PCI_UNCORE_HA,
2968 	HSWEP_PCI_UNCORE_IMC,
2969 	HSWEP_PCI_UNCORE_IRP,
2970 	HSWEP_PCI_UNCORE_QPI,
2971 	HSWEP_PCI_UNCORE_R2PCIE,
2972 	HSWEP_PCI_UNCORE_R3QPI,
2973 };
2974 
2975 static struct intel_uncore_type *hswep_pci_uncores[] = {
2976 	[HSWEP_PCI_UNCORE_HA]	= &hswep_uncore_ha,
2977 	[HSWEP_PCI_UNCORE_IMC]	= &hswep_uncore_imc,
2978 	[HSWEP_PCI_UNCORE_IRP]	= &hswep_uncore_irp,
2979 	[HSWEP_PCI_UNCORE_QPI]	= &hswep_uncore_qpi,
2980 	[HSWEP_PCI_UNCORE_R2PCIE]	= &hswep_uncore_r2pcie,
2981 	[HSWEP_PCI_UNCORE_R3QPI]	= &hswep_uncore_r3qpi,
2982 	NULL,
2983 };
2984 
2985 static const struct pci_device_id hswep_uncore_pci_ids[] = {
2986 	{ /* Home Agent 0 */
2987 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2f30),
2988 		.driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_HA, 0),
2989 	},
2990 	{ /* Home Agent 1 */
2991 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2f38),
2992 		.driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_HA, 1),
2993 	},
2994 	{ /* MC0 Channel 0 */
2995 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2fb0),
2996 		.driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_IMC, 0),
2997 	},
2998 	{ /* MC0 Channel 1 */
2999 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2fb1),
3000 		.driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_IMC, 1),
3001 	},
3002 	{ /* MC0 Channel 2 */
3003 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2fb4),
3004 		.driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_IMC, 2),
3005 	},
3006 	{ /* MC0 Channel 3 */
3007 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2fb5),
3008 		.driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_IMC, 3),
3009 	},
3010 	{ /* MC1 Channel 0 */
3011 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2fd0),
3012 		.driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_IMC, 4),
3013 	},
3014 	{ /* MC1 Channel 1 */
3015 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2fd1),
3016 		.driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_IMC, 5),
3017 	},
3018 	{ /* MC1 Channel 2 */
3019 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2fd4),
3020 		.driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_IMC, 6),
3021 	},
3022 	{ /* MC1 Channel 3 */
3023 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2fd5),
3024 		.driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_IMC, 7),
3025 	},
3026 	{ /* IRP */
3027 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2f39),
3028 		.driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_IRP, 0),
3029 	},
3030 	{ /* QPI0 Port 0 */
3031 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2f32),
3032 		.driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_QPI, 0),
3033 	},
3034 	{ /* QPI0 Port 1 */
3035 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2f33),
3036 		.driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_QPI, 1),
3037 	},
3038 	{ /* QPI1 Port 2 */
3039 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2f3a),
3040 		.driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_QPI, 2),
3041 	},
3042 	{ /* R2PCIe */
3043 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2f34),
3044 		.driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_R2PCIE, 0),
3045 	},
3046 	{ /* R3QPI0 Link 0 */
3047 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2f36),
3048 		.driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_R3QPI, 0),
3049 	},
3050 	{ /* R3QPI0 Link 1 */
3051 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2f37),
3052 		.driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_R3QPI, 1),
3053 	},
3054 	{ /* R3QPI1 Link 2 */
3055 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2f3e),
3056 		.driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_R3QPI, 2),
3057 	},
3058 	{ /* QPI Port 0 filter  */
3059 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2f86),
3060 		.driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV,
3061 						   SNBEP_PCI_QPI_PORT0_FILTER),
3062 	},
3063 	{ /* QPI Port 1 filter  */
3064 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2f96),
3065 		.driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV,
3066 						   SNBEP_PCI_QPI_PORT1_FILTER),
3067 	},
3068 	{ /* PCU.3 (for Capability registers) */
3069 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2fc0),
3070 		.driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV,
3071 						   HSWEP_PCI_PCU_3),
3072 	},
3073 	{ /* end: all zeroes */ }
3074 };
3075 
3076 static struct pci_driver hswep_uncore_pci_driver = {
3077 	.name		= "hswep_uncore",
3078 	.id_table	= hswep_uncore_pci_ids,
3079 };
3080 
3081 int hswep_uncore_pci_init(void)
3082 {
3083 	int ret = snbep_pci2phy_map_init(0x2f1e, SNBEP_CPUNODEID, SNBEP_GIDNIDMAP, true);
3084 	if (ret)
3085 		return ret;
3086 	uncore_pci_uncores = hswep_pci_uncores;
3087 	uncore_pci_driver = &hswep_uncore_pci_driver;
3088 	return 0;
3089 }
3090 /* end of Haswell-EP uncore support */
3091 
3092 /* BDX uncore support */
3093 
3094 static struct intel_uncore_type bdx_uncore_ubox = {
3095 	.name			= "ubox",
3096 	.num_counters		= 2,
3097 	.num_boxes		= 1,
3098 	.perf_ctr_bits		= 48,
3099 	.fixed_ctr_bits		= 48,
3100 	.perf_ctr		= HSWEP_U_MSR_PMON_CTR0,
3101 	.event_ctl		= HSWEP_U_MSR_PMON_CTL0,
3102 	.event_mask		= SNBEP_U_MSR_PMON_RAW_EVENT_MASK,
3103 	.fixed_ctr		= HSWEP_U_MSR_PMON_UCLK_FIXED_CTR,
3104 	.fixed_ctl		= HSWEP_U_MSR_PMON_UCLK_FIXED_CTL,
3105 	.num_shared_regs	= 1,
3106 	.ops			= &ivbep_uncore_msr_ops,
3107 	.format_group		= &ivbep_uncore_ubox_format_group,
3108 };
3109 
3110 static struct event_constraint bdx_uncore_cbox_constraints[] = {
3111 	UNCORE_EVENT_CONSTRAINT(0x09, 0x3),
3112 	UNCORE_EVENT_CONSTRAINT(0x11, 0x1),
3113 	UNCORE_EVENT_CONSTRAINT(0x36, 0x1),
3114 	UNCORE_EVENT_CONSTRAINT(0x3e, 0x1),
3115 	EVENT_CONSTRAINT_END
3116 };
3117 
3118 static struct intel_uncore_type bdx_uncore_cbox = {
3119 	.name			= "cbox",
3120 	.num_counters		= 4,
3121 	.num_boxes		= 24,
3122 	.perf_ctr_bits		= 48,
3123 	.event_ctl		= HSWEP_C0_MSR_PMON_CTL0,
3124 	.perf_ctr		= HSWEP_C0_MSR_PMON_CTR0,
3125 	.event_mask		= SNBEP_CBO_MSR_PMON_RAW_EVENT_MASK,
3126 	.box_ctl		= HSWEP_C0_MSR_PMON_BOX_CTL,
3127 	.msr_offset		= HSWEP_CBO_MSR_OFFSET,
3128 	.num_shared_regs	= 1,
3129 	.constraints		= bdx_uncore_cbox_constraints,
3130 	.ops			= &hswep_uncore_cbox_ops,
3131 	.format_group		= &hswep_uncore_cbox_format_group,
3132 };
3133 
3134 static struct intel_uncore_type bdx_uncore_sbox = {
3135 	.name			= "sbox",
3136 	.num_counters		= 4,
3137 	.num_boxes		= 4,
3138 	.perf_ctr_bits		= 48,
3139 	.event_ctl		= HSWEP_S0_MSR_PMON_CTL0,
3140 	.perf_ctr		= HSWEP_S0_MSR_PMON_CTR0,
3141 	.event_mask		= HSWEP_S_MSR_PMON_RAW_EVENT_MASK,
3142 	.box_ctl		= HSWEP_S0_MSR_PMON_BOX_CTL,
3143 	.msr_offset		= HSWEP_SBOX_MSR_OFFSET,
3144 	.ops			= &hswep_uncore_sbox_msr_ops,
3145 	.format_group		= &hswep_uncore_sbox_format_group,
3146 };
3147 
3148 #define BDX_MSR_UNCORE_SBOX	3
3149 
3150 static struct intel_uncore_type *bdx_msr_uncores[] = {
3151 	&bdx_uncore_ubox,
3152 	&bdx_uncore_cbox,
3153 	&hswep_uncore_pcu,
3154 	&bdx_uncore_sbox,
3155 	NULL,
3156 };
3157 
3158 /* Bit 7 'Use Occupancy' is not available for counter 0 on BDX */
3159 static struct event_constraint bdx_uncore_pcu_constraints[] = {
3160 	EVENT_CONSTRAINT(0x80, 0xe, 0x80),
3161 	EVENT_CONSTRAINT_END
3162 };
3163 
3164 void bdx_uncore_cpu_init(void)
3165 {
3166 	int pkg = topology_phys_to_logical_pkg(boot_cpu_data.phys_proc_id);
3167 
3168 	if (bdx_uncore_cbox.num_boxes > boot_cpu_data.x86_max_cores)
3169 		bdx_uncore_cbox.num_boxes = boot_cpu_data.x86_max_cores;
3170 	uncore_msr_uncores = bdx_msr_uncores;
3171 
3172 	/* BDX-DE doesn't have SBOX */
3173 	if (boot_cpu_data.x86_model == 86) {
3174 		uncore_msr_uncores[BDX_MSR_UNCORE_SBOX] = NULL;
3175 	/* Detect systems with no SBOXes */
3176 	} else if (uncore_extra_pci_dev[pkg].dev[HSWEP_PCI_PCU_3]) {
3177 		struct pci_dev *pdev;
3178 		u32 capid4;
3179 
3180 		pdev = uncore_extra_pci_dev[pkg].dev[HSWEP_PCI_PCU_3];
3181 		pci_read_config_dword(pdev, 0x94, &capid4);
3182 		if (((capid4 >> 6) & 0x3) == 0)
3183 			bdx_msr_uncores[BDX_MSR_UNCORE_SBOX] = NULL;
3184 	}
3185 	hswep_uncore_pcu.constraints = bdx_uncore_pcu_constraints;
3186 }
3187 
3188 static struct intel_uncore_type bdx_uncore_ha = {
3189 	.name		= "ha",
3190 	.num_counters   = 4,
3191 	.num_boxes	= 2,
3192 	.perf_ctr_bits	= 48,
3193 	SNBEP_UNCORE_PCI_COMMON_INIT(),
3194 };
3195 
3196 static struct intel_uncore_type bdx_uncore_imc = {
3197 	.name		= "imc",
3198 	.num_counters   = 4,
3199 	.num_boxes	= 8,
3200 	.perf_ctr_bits	= 48,
3201 	.fixed_ctr_bits	= 48,
3202 	.fixed_ctr	= SNBEP_MC_CHy_PCI_PMON_FIXED_CTR,
3203 	.fixed_ctl	= SNBEP_MC_CHy_PCI_PMON_FIXED_CTL,
3204 	.event_descs	= hswep_uncore_imc_events,
3205 	SNBEP_UNCORE_PCI_COMMON_INIT(),
3206 };
3207 
3208 static struct intel_uncore_type bdx_uncore_irp = {
3209 	.name			= "irp",
3210 	.num_counters		= 4,
3211 	.num_boxes		= 1,
3212 	.perf_ctr_bits		= 48,
3213 	.event_mask		= SNBEP_PMON_RAW_EVENT_MASK,
3214 	.box_ctl		= SNBEP_PCI_PMON_BOX_CTL,
3215 	.ops			= &hswep_uncore_irp_ops,
3216 	.format_group		= &snbep_uncore_format_group,
3217 };
3218 
3219 static struct intel_uncore_type bdx_uncore_qpi = {
3220 	.name			= "qpi",
3221 	.num_counters		= 4,
3222 	.num_boxes		= 3,
3223 	.perf_ctr_bits		= 48,
3224 	.perf_ctr		= SNBEP_PCI_PMON_CTR0,
3225 	.event_ctl		= SNBEP_PCI_PMON_CTL0,
3226 	.event_mask		= SNBEP_QPI_PCI_PMON_RAW_EVENT_MASK,
3227 	.box_ctl		= SNBEP_PCI_PMON_BOX_CTL,
3228 	.num_shared_regs	= 1,
3229 	.ops			= &snbep_uncore_qpi_ops,
3230 	.format_group		= &snbep_uncore_qpi_format_group,
3231 };
3232 
3233 static struct event_constraint bdx_uncore_r2pcie_constraints[] = {
3234 	UNCORE_EVENT_CONSTRAINT(0x10, 0x3),
3235 	UNCORE_EVENT_CONSTRAINT(0x11, 0x3),
3236 	UNCORE_EVENT_CONSTRAINT(0x13, 0x1),
3237 	UNCORE_EVENT_CONSTRAINT(0x23, 0x1),
3238 	UNCORE_EVENT_CONSTRAINT(0x25, 0x1),
3239 	UNCORE_EVENT_CONSTRAINT(0x26, 0x3),
3240 	UNCORE_EVENT_CONSTRAINT(0x28, 0x3),
3241 	UNCORE_EVENT_CONSTRAINT(0x2c, 0x3),
3242 	UNCORE_EVENT_CONSTRAINT(0x2d, 0x3),
3243 	EVENT_CONSTRAINT_END
3244 };
3245 
3246 static struct intel_uncore_type bdx_uncore_r2pcie = {
3247 	.name		= "r2pcie",
3248 	.num_counters   = 4,
3249 	.num_boxes	= 1,
3250 	.perf_ctr_bits	= 48,
3251 	.constraints	= bdx_uncore_r2pcie_constraints,
3252 	SNBEP_UNCORE_PCI_COMMON_INIT(),
3253 };
3254 
3255 static struct event_constraint bdx_uncore_r3qpi_constraints[] = {
3256 	UNCORE_EVENT_CONSTRAINT(0x01, 0x7),
3257 	UNCORE_EVENT_CONSTRAINT(0x07, 0x7),
3258 	UNCORE_EVENT_CONSTRAINT(0x08, 0x7),
3259 	UNCORE_EVENT_CONSTRAINT(0x09, 0x7),
3260 	UNCORE_EVENT_CONSTRAINT(0x0a, 0x7),
3261 	UNCORE_EVENT_CONSTRAINT(0x0e, 0x7),
3262 	UNCORE_EVENT_CONSTRAINT(0x10, 0x3),
3263 	UNCORE_EVENT_CONSTRAINT(0x11, 0x3),
3264 	UNCORE_EVENT_CONSTRAINT(0x13, 0x1),
3265 	UNCORE_EVENT_CONSTRAINT(0x14, 0x3),
3266 	UNCORE_EVENT_CONSTRAINT(0x15, 0x3),
3267 	UNCORE_EVENT_CONSTRAINT(0x1f, 0x3),
3268 	UNCORE_EVENT_CONSTRAINT(0x20, 0x3),
3269 	UNCORE_EVENT_CONSTRAINT(0x21, 0x3),
3270 	UNCORE_EVENT_CONSTRAINT(0x22, 0x3),
3271 	UNCORE_EVENT_CONSTRAINT(0x23, 0x3),
3272 	UNCORE_EVENT_CONSTRAINT(0x25, 0x3),
3273 	UNCORE_EVENT_CONSTRAINT(0x26, 0x3),
3274 	UNCORE_EVENT_CONSTRAINT(0x28, 0x3),
3275 	UNCORE_EVENT_CONSTRAINT(0x29, 0x3),
3276 	UNCORE_EVENT_CONSTRAINT(0x2c, 0x3),
3277 	UNCORE_EVENT_CONSTRAINT(0x2d, 0x3),
3278 	UNCORE_EVENT_CONSTRAINT(0x2e, 0x3),
3279 	UNCORE_EVENT_CONSTRAINT(0x2f, 0x3),
3280 	UNCORE_EVENT_CONSTRAINT(0x33, 0x3),
3281 	UNCORE_EVENT_CONSTRAINT(0x34, 0x3),
3282 	UNCORE_EVENT_CONSTRAINT(0x36, 0x3),
3283 	UNCORE_EVENT_CONSTRAINT(0x37, 0x3),
3284 	UNCORE_EVENT_CONSTRAINT(0x38, 0x3),
3285 	UNCORE_EVENT_CONSTRAINT(0x39, 0x3),
3286 	EVENT_CONSTRAINT_END
3287 };
3288 
3289 static struct intel_uncore_type bdx_uncore_r3qpi = {
3290 	.name		= "r3qpi",
3291 	.num_counters   = 3,
3292 	.num_boxes	= 3,
3293 	.perf_ctr_bits	= 48,
3294 	.constraints	= bdx_uncore_r3qpi_constraints,
3295 	SNBEP_UNCORE_PCI_COMMON_INIT(),
3296 };
3297 
3298 enum {
3299 	BDX_PCI_UNCORE_HA,
3300 	BDX_PCI_UNCORE_IMC,
3301 	BDX_PCI_UNCORE_IRP,
3302 	BDX_PCI_UNCORE_QPI,
3303 	BDX_PCI_UNCORE_R2PCIE,
3304 	BDX_PCI_UNCORE_R3QPI,
3305 };
3306 
3307 static struct intel_uncore_type *bdx_pci_uncores[] = {
3308 	[BDX_PCI_UNCORE_HA]	= &bdx_uncore_ha,
3309 	[BDX_PCI_UNCORE_IMC]	= &bdx_uncore_imc,
3310 	[BDX_PCI_UNCORE_IRP]	= &bdx_uncore_irp,
3311 	[BDX_PCI_UNCORE_QPI]	= &bdx_uncore_qpi,
3312 	[BDX_PCI_UNCORE_R2PCIE]	= &bdx_uncore_r2pcie,
3313 	[BDX_PCI_UNCORE_R3QPI]	= &bdx_uncore_r3qpi,
3314 	NULL,
3315 };
3316 
3317 static const struct pci_device_id bdx_uncore_pci_ids[] = {
3318 	{ /* Home Agent 0 */
3319 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6f30),
3320 		.driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_HA, 0),
3321 	},
3322 	{ /* Home Agent 1 */
3323 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6f38),
3324 		.driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_HA, 1),
3325 	},
3326 	{ /* MC0 Channel 0 */
3327 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6fb0),
3328 		.driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_IMC, 0),
3329 	},
3330 	{ /* MC0 Channel 1 */
3331 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6fb1),
3332 		.driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_IMC, 1),
3333 	},
3334 	{ /* MC0 Channel 2 */
3335 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6fb4),
3336 		.driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_IMC, 2),
3337 	},
3338 	{ /* MC0 Channel 3 */
3339 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6fb5),
3340 		.driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_IMC, 3),
3341 	},
3342 	{ /* MC1 Channel 0 */
3343 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6fd0),
3344 		.driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_IMC, 4),
3345 	},
3346 	{ /* MC1 Channel 1 */
3347 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6fd1),
3348 		.driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_IMC, 5),
3349 	},
3350 	{ /* MC1 Channel 2 */
3351 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6fd4),
3352 		.driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_IMC, 6),
3353 	},
3354 	{ /* MC1 Channel 3 */
3355 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6fd5),
3356 		.driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_IMC, 7),
3357 	},
3358 	{ /* IRP */
3359 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6f39),
3360 		.driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_IRP, 0),
3361 	},
3362 	{ /* QPI0 Port 0 */
3363 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6f32),
3364 		.driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_QPI, 0),
3365 	},
3366 	{ /* QPI0 Port 1 */
3367 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6f33),
3368 		.driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_QPI, 1),
3369 	},
3370 	{ /* QPI1 Port 2 */
3371 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6f3a),
3372 		.driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_QPI, 2),
3373 	},
3374 	{ /* R2PCIe */
3375 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6f34),
3376 		.driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_R2PCIE, 0),
3377 	},
3378 	{ /* R3QPI0 Link 0 */
3379 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6f36),
3380 		.driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_R3QPI, 0),
3381 	},
3382 	{ /* R3QPI0 Link 1 */
3383 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6f37),
3384 		.driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_R3QPI, 1),
3385 	},
3386 	{ /* R3QPI1 Link 2 */
3387 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6f3e),
3388 		.driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_R3QPI, 2),
3389 	},
3390 	{ /* QPI Port 0 filter  */
3391 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6f86),
3392 		.driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV,
3393 						   SNBEP_PCI_QPI_PORT0_FILTER),
3394 	},
3395 	{ /* QPI Port 1 filter  */
3396 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6f96),
3397 		.driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV,
3398 						   SNBEP_PCI_QPI_PORT1_FILTER),
3399 	},
3400 	{ /* QPI Port 2 filter  */
3401 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6f46),
3402 		.driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV,
3403 						   BDX_PCI_QPI_PORT2_FILTER),
3404 	},
3405 	{ /* PCU.3 (for Capability registers) */
3406 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6fc0),
3407 		.driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV,
3408 						   HSWEP_PCI_PCU_3),
3409 	},
3410 	{ /* end: all zeroes */ }
3411 };
3412 
3413 static struct pci_driver bdx_uncore_pci_driver = {
3414 	.name		= "bdx_uncore",
3415 	.id_table	= bdx_uncore_pci_ids,
3416 };
3417 
3418 int bdx_uncore_pci_init(void)
3419 {
3420 	int ret = snbep_pci2phy_map_init(0x6f1e, SNBEP_CPUNODEID, SNBEP_GIDNIDMAP, true);
3421 
3422 	if (ret)
3423 		return ret;
3424 	uncore_pci_uncores = bdx_pci_uncores;
3425 	uncore_pci_driver = &bdx_uncore_pci_driver;
3426 	return 0;
3427 }
3428 
3429 /* end of BDX uncore support */
3430 
3431 /* SKX uncore support */
3432 
3433 static struct intel_uncore_type skx_uncore_ubox = {
3434 	.name			= "ubox",
3435 	.num_counters		= 2,
3436 	.num_boxes		= 1,
3437 	.perf_ctr_bits		= 48,
3438 	.fixed_ctr_bits		= 48,
3439 	.perf_ctr		= HSWEP_U_MSR_PMON_CTR0,
3440 	.event_ctl		= HSWEP_U_MSR_PMON_CTL0,
3441 	.event_mask		= SNBEP_U_MSR_PMON_RAW_EVENT_MASK,
3442 	.fixed_ctr		= HSWEP_U_MSR_PMON_UCLK_FIXED_CTR,
3443 	.fixed_ctl		= HSWEP_U_MSR_PMON_UCLK_FIXED_CTL,
3444 	.ops			= &ivbep_uncore_msr_ops,
3445 	.format_group		= &ivbep_uncore_ubox_format_group,
3446 };
3447 
3448 static struct attribute *skx_uncore_cha_formats_attr[] = {
3449 	&format_attr_event.attr,
3450 	&format_attr_umask.attr,
3451 	&format_attr_edge.attr,
3452 	&format_attr_tid_en.attr,
3453 	&format_attr_inv.attr,
3454 	&format_attr_thresh8.attr,
3455 	&format_attr_filter_tid4.attr,
3456 	&format_attr_filter_state5.attr,
3457 	&format_attr_filter_rem.attr,
3458 	&format_attr_filter_loc.attr,
3459 	&format_attr_filter_nm.attr,
3460 	&format_attr_filter_all_op.attr,
3461 	&format_attr_filter_not_nm.attr,
3462 	&format_attr_filter_opc_0.attr,
3463 	&format_attr_filter_opc_1.attr,
3464 	&format_attr_filter_nc.attr,
3465 	&format_attr_filter_isoc.attr,
3466 	NULL,
3467 };
3468 
3469 static const struct attribute_group skx_uncore_chabox_format_group = {
3470 	.name = "format",
3471 	.attrs = skx_uncore_cha_formats_attr,
3472 };
3473 
3474 static struct event_constraint skx_uncore_chabox_constraints[] = {
3475 	UNCORE_EVENT_CONSTRAINT(0x11, 0x1),
3476 	UNCORE_EVENT_CONSTRAINT(0x36, 0x1),
3477 	EVENT_CONSTRAINT_END
3478 };
3479 
3480 static struct extra_reg skx_uncore_cha_extra_regs[] = {
3481 	SNBEP_CBO_EVENT_EXTRA_REG(0x0334, 0xffff, 0x4),
3482 	SNBEP_CBO_EVENT_EXTRA_REG(0x0534, 0xffff, 0x4),
3483 	SNBEP_CBO_EVENT_EXTRA_REG(0x0934, 0xffff, 0x4),
3484 	SNBEP_CBO_EVENT_EXTRA_REG(0x1134, 0xffff, 0x4),
3485 	SNBEP_CBO_EVENT_EXTRA_REG(0x3134, 0xffff, 0x4),
3486 	SNBEP_CBO_EVENT_EXTRA_REG(0x9134, 0xffff, 0x4),
3487 	SNBEP_CBO_EVENT_EXTRA_REG(0x35, 0xff, 0x8),
3488 	SNBEP_CBO_EVENT_EXTRA_REG(0x36, 0xff, 0x8),
3489 	SNBEP_CBO_EVENT_EXTRA_REG(0x38, 0xff, 0x3),
3490 	EVENT_EXTRA_END
3491 };
3492 
3493 static u64 skx_cha_filter_mask(int fields)
3494 {
3495 	u64 mask = 0;
3496 
3497 	if (fields & 0x1)
3498 		mask |= SKX_CHA_MSR_PMON_BOX_FILTER_TID;
3499 	if (fields & 0x2)
3500 		mask |= SKX_CHA_MSR_PMON_BOX_FILTER_LINK;
3501 	if (fields & 0x4)
3502 		mask |= SKX_CHA_MSR_PMON_BOX_FILTER_STATE;
3503 	if (fields & 0x8) {
3504 		mask |= SKX_CHA_MSR_PMON_BOX_FILTER_REM;
3505 		mask |= SKX_CHA_MSR_PMON_BOX_FILTER_LOC;
3506 		mask |= SKX_CHA_MSR_PMON_BOX_FILTER_ALL_OPC;
3507 		mask |= SKX_CHA_MSR_PMON_BOX_FILTER_NM;
3508 		mask |= SKX_CHA_MSR_PMON_BOX_FILTER_NOT_NM;
3509 		mask |= SKX_CHA_MSR_PMON_BOX_FILTER_OPC0;
3510 		mask |= SKX_CHA_MSR_PMON_BOX_FILTER_OPC1;
3511 		mask |= SKX_CHA_MSR_PMON_BOX_FILTER_NC;
3512 		mask |= SKX_CHA_MSR_PMON_BOX_FILTER_ISOC;
3513 	}
3514 	return mask;
3515 }
3516 
3517 static struct event_constraint *
3518 skx_cha_get_constraint(struct intel_uncore_box *box, struct perf_event *event)
3519 {
3520 	return __snbep_cbox_get_constraint(box, event, skx_cha_filter_mask);
3521 }
3522 
3523 static int skx_cha_hw_config(struct intel_uncore_box *box, struct perf_event *event)
3524 {
3525 	struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
3526 	struct extra_reg *er;
3527 	int idx = 0;
3528 
3529 	for (er = skx_uncore_cha_extra_regs; er->msr; er++) {
3530 		if (er->event != (event->hw.config & er->config_mask))
3531 			continue;
3532 		idx |= er->idx;
3533 	}
3534 
3535 	if (idx) {
3536 		reg1->reg = HSWEP_C0_MSR_PMON_BOX_FILTER0 +
3537 			    HSWEP_CBO_MSR_OFFSET * box->pmu->pmu_idx;
3538 		reg1->config = event->attr.config1 & skx_cha_filter_mask(idx);
3539 		reg1->idx = idx;
3540 	}
3541 	return 0;
3542 }
3543 
3544 static struct intel_uncore_ops skx_uncore_chabox_ops = {
3545 	/* There is no frz_en for chabox ctl */
3546 	.init_box		= ivbep_uncore_msr_init_box,
3547 	.disable_box		= snbep_uncore_msr_disable_box,
3548 	.enable_box		= snbep_uncore_msr_enable_box,
3549 	.disable_event		= snbep_uncore_msr_disable_event,
3550 	.enable_event		= hswep_cbox_enable_event,
3551 	.read_counter		= uncore_msr_read_counter,
3552 	.hw_config		= skx_cha_hw_config,
3553 	.get_constraint		= skx_cha_get_constraint,
3554 	.put_constraint		= snbep_cbox_put_constraint,
3555 };
3556 
3557 static struct intel_uncore_type skx_uncore_chabox = {
3558 	.name			= "cha",
3559 	.num_counters		= 4,
3560 	.perf_ctr_bits		= 48,
3561 	.event_ctl		= HSWEP_C0_MSR_PMON_CTL0,
3562 	.perf_ctr		= HSWEP_C0_MSR_PMON_CTR0,
3563 	.event_mask		= HSWEP_S_MSR_PMON_RAW_EVENT_MASK,
3564 	.box_ctl		= HSWEP_C0_MSR_PMON_BOX_CTL,
3565 	.msr_offset		= HSWEP_CBO_MSR_OFFSET,
3566 	.num_shared_regs	= 1,
3567 	.constraints		= skx_uncore_chabox_constraints,
3568 	.ops			= &skx_uncore_chabox_ops,
3569 	.format_group		= &skx_uncore_chabox_format_group,
3570 };
3571 
3572 static struct attribute *skx_uncore_iio_formats_attr[] = {
3573 	&format_attr_event.attr,
3574 	&format_attr_umask.attr,
3575 	&format_attr_edge.attr,
3576 	&format_attr_inv.attr,
3577 	&format_attr_thresh9.attr,
3578 	&format_attr_ch_mask.attr,
3579 	&format_attr_fc_mask.attr,
3580 	NULL,
3581 };
3582 
3583 static const struct attribute_group skx_uncore_iio_format_group = {
3584 	.name = "format",
3585 	.attrs = skx_uncore_iio_formats_attr,
3586 };
3587 
3588 static struct event_constraint skx_uncore_iio_constraints[] = {
3589 	UNCORE_EVENT_CONSTRAINT(0x83, 0x3),
3590 	UNCORE_EVENT_CONSTRAINT(0x88, 0xc),
3591 	UNCORE_EVENT_CONSTRAINT(0x95, 0xc),
3592 	UNCORE_EVENT_CONSTRAINT(0xc0, 0xc),
3593 	UNCORE_EVENT_CONSTRAINT(0xc5, 0xc),
3594 	UNCORE_EVENT_CONSTRAINT(0xd4, 0xc),
3595 	EVENT_CONSTRAINT_END
3596 };
3597 
3598 static void skx_iio_enable_event(struct intel_uncore_box *box,
3599 				 struct perf_event *event)
3600 {
3601 	struct hw_perf_event *hwc = &event->hw;
3602 
3603 	wrmsrl(hwc->config_base, hwc->config | SNBEP_PMON_CTL_EN);
3604 }
3605 
3606 static struct intel_uncore_ops skx_uncore_iio_ops = {
3607 	.init_box		= ivbep_uncore_msr_init_box,
3608 	.disable_box		= snbep_uncore_msr_disable_box,
3609 	.enable_box		= snbep_uncore_msr_enable_box,
3610 	.disable_event		= snbep_uncore_msr_disable_event,
3611 	.enable_event		= skx_iio_enable_event,
3612 	.read_counter		= uncore_msr_read_counter,
3613 };
3614 
3615 static struct intel_uncore_type skx_uncore_iio = {
3616 	.name			= "iio",
3617 	.num_counters		= 4,
3618 	.num_boxes		= 6,
3619 	.perf_ctr_bits		= 48,
3620 	.event_ctl		= SKX_IIO0_MSR_PMON_CTL0,
3621 	.perf_ctr		= SKX_IIO0_MSR_PMON_CTR0,
3622 	.event_mask		= SKX_IIO_PMON_RAW_EVENT_MASK,
3623 	.event_mask_ext		= SKX_IIO_PMON_RAW_EVENT_MASK_EXT,
3624 	.box_ctl		= SKX_IIO0_MSR_PMON_BOX_CTL,
3625 	.msr_offset		= SKX_IIO_MSR_OFFSET,
3626 	.constraints		= skx_uncore_iio_constraints,
3627 	.ops			= &skx_uncore_iio_ops,
3628 	.format_group		= &skx_uncore_iio_format_group,
3629 };
3630 
3631 enum perf_uncore_iio_freerunning_type_id {
3632 	SKX_IIO_MSR_IOCLK			= 0,
3633 	SKX_IIO_MSR_BW				= 1,
3634 	SKX_IIO_MSR_UTIL			= 2,
3635 
3636 	SKX_IIO_FREERUNNING_TYPE_MAX,
3637 };
3638 
3639 
3640 static struct freerunning_counters skx_iio_freerunning[] = {
3641 	[SKX_IIO_MSR_IOCLK]	= { 0xa45, 0x1, 0x20, 1, 36 },
3642 	[SKX_IIO_MSR_BW]	= { 0xb00, 0x1, 0x10, 8, 36 },
3643 	[SKX_IIO_MSR_UTIL]	= { 0xb08, 0x1, 0x10, 8, 36 },
3644 };
3645 
3646 static struct uncore_event_desc skx_uncore_iio_freerunning_events[] = {
3647 	/* Free-Running IO CLOCKS Counter */
3648 	INTEL_UNCORE_EVENT_DESC(ioclk,			"event=0xff,umask=0x10"),
3649 	/* Free-Running IIO BANDWIDTH Counters */
3650 	INTEL_UNCORE_EVENT_DESC(bw_in_port0,		"event=0xff,umask=0x20"),
3651 	INTEL_UNCORE_EVENT_DESC(bw_in_port0.scale,	"3.814697266e-6"),
3652 	INTEL_UNCORE_EVENT_DESC(bw_in_port0.unit,	"MiB"),
3653 	INTEL_UNCORE_EVENT_DESC(bw_in_port1,		"event=0xff,umask=0x21"),
3654 	INTEL_UNCORE_EVENT_DESC(bw_in_port1.scale,	"3.814697266e-6"),
3655 	INTEL_UNCORE_EVENT_DESC(bw_in_port1.unit,	"MiB"),
3656 	INTEL_UNCORE_EVENT_DESC(bw_in_port2,		"event=0xff,umask=0x22"),
3657 	INTEL_UNCORE_EVENT_DESC(bw_in_port2.scale,	"3.814697266e-6"),
3658 	INTEL_UNCORE_EVENT_DESC(bw_in_port2.unit,	"MiB"),
3659 	INTEL_UNCORE_EVENT_DESC(bw_in_port3,		"event=0xff,umask=0x23"),
3660 	INTEL_UNCORE_EVENT_DESC(bw_in_port3.scale,	"3.814697266e-6"),
3661 	INTEL_UNCORE_EVENT_DESC(bw_in_port3.unit,	"MiB"),
3662 	INTEL_UNCORE_EVENT_DESC(bw_out_port0,		"event=0xff,umask=0x24"),
3663 	INTEL_UNCORE_EVENT_DESC(bw_out_port0.scale,	"3.814697266e-6"),
3664 	INTEL_UNCORE_EVENT_DESC(bw_out_port0.unit,	"MiB"),
3665 	INTEL_UNCORE_EVENT_DESC(bw_out_port1,		"event=0xff,umask=0x25"),
3666 	INTEL_UNCORE_EVENT_DESC(bw_out_port1.scale,	"3.814697266e-6"),
3667 	INTEL_UNCORE_EVENT_DESC(bw_out_port1.unit,	"MiB"),
3668 	INTEL_UNCORE_EVENT_DESC(bw_out_port2,		"event=0xff,umask=0x26"),
3669 	INTEL_UNCORE_EVENT_DESC(bw_out_port2.scale,	"3.814697266e-6"),
3670 	INTEL_UNCORE_EVENT_DESC(bw_out_port2.unit,	"MiB"),
3671 	INTEL_UNCORE_EVENT_DESC(bw_out_port3,		"event=0xff,umask=0x27"),
3672 	INTEL_UNCORE_EVENT_DESC(bw_out_port3.scale,	"3.814697266e-6"),
3673 	INTEL_UNCORE_EVENT_DESC(bw_out_port3.unit,	"MiB"),
3674 	/* Free-running IIO UTILIZATION Counters */
3675 	INTEL_UNCORE_EVENT_DESC(util_in_port0,		"event=0xff,umask=0x30"),
3676 	INTEL_UNCORE_EVENT_DESC(util_out_port0,		"event=0xff,umask=0x31"),
3677 	INTEL_UNCORE_EVENT_DESC(util_in_port1,		"event=0xff,umask=0x32"),
3678 	INTEL_UNCORE_EVENT_DESC(util_out_port1,		"event=0xff,umask=0x33"),
3679 	INTEL_UNCORE_EVENT_DESC(util_in_port2,		"event=0xff,umask=0x34"),
3680 	INTEL_UNCORE_EVENT_DESC(util_out_port2,		"event=0xff,umask=0x35"),
3681 	INTEL_UNCORE_EVENT_DESC(util_in_port3,		"event=0xff,umask=0x36"),
3682 	INTEL_UNCORE_EVENT_DESC(util_out_port3,		"event=0xff,umask=0x37"),
3683 	{ /* end: all zeroes */ },
3684 };
3685 
3686 static struct intel_uncore_ops skx_uncore_iio_freerunning_ops = {
3687 	.read_counter		= uncore_msr_read_counter,
3688 	.hw_config		= uncore_freerunning_hw_config,
3689 };
3690 
3691 static struct attribute *skx_uncore_iio_freerunning_formats_attr[] = {
3692 	&format_attr_event.attr,
3693 	&format_attr_umask.attr,
3694 	NULL,
3695 };
3696 
3697 static const struct attribute_group skx_uncore_iio_freerunning_format_group = {
3698 	.name = "format",
3699 	.attrs = skx_uncore_iio_freerunning_formats_attr,
3700 };
3701 
3702 static struct intel_uncore_type skx_uncore_iio_free_running = {
3703 	.name			= "iio_free_running",
3704 	.num_counters		= 17,
3705 	.num_boxes		= 6,
3706 	.num_freerunning_types	= SKX_IIO_FREERUNNING_TYPE_MAX,
3707 	.freerunning		= skx_iio_freerunning,
3708 	.ops			= &skx_uncore_iio_freerunning_ops,
3709 	.event_descs		= skx_uncore_iio_freerunning_events,
3710 	.format_group		= &skx_uncore_iio_freerunning_format_group,
3711 };
3712 
3713 static struct attribute *skx_uncore_formats_attr[] = {
3714 	&format_attr_event.attr,
3715 	&format_attr_umask.attr,
3716 	&format_attr_edge.attr,
3717 	&format_attr_inv.attr,
3718 	&format_attr_thresh8.attr,
3719 	NULL,
3720 };
3721 
3722 static const struct attribute_group skx_uncore_format_group = {
3723 	.name = "format",
3724 	.attrs = skx_uncore_formats_attr,
3725 };
3726 
3727 static struct intel_uncore_type skx_uncore_irp = {
3728 	.name			= "irp",
3729 	.num_counters		= 2,
3730 	.num_boxes		= 6,
3731 	.perf_ctr_bits		= 48,
3732 	.event_ctl		= SKX_IRP0_MSR_PMON_CTL0,
3733 	.perf_ctr		= SKX_IRP0_MSR_PMON_CTR0,
3734 	.event_mask		= SNBEP_PMON_RAW_EVENT_MASK,
3735 	.box_ctl		= SKX_IRP0_MSR_PMON_BOX_CTL,
3736 	.msr_offset		= SKX_IRP_MSR_OFFSET,
3737 	.ops			= &skx_uncore_iio_ops,
3738 	.format_group		= &skx_uncore_format_group,
3739 };
3740 
3741 static struct attribute *skx_uncore_pcu_formats_attr[] = {
3742 	&format_attr_event.attr,
3743 	&format_attr_umask.attr,
3744 	&format_attr_edge.attr,
3745 	&format_attr_inv.attr,
3746 	&format_attr_thresh8.attr,
3747 	&format_attr_occ_invert.attr,
3748 	&format_attr_occ_edge_det.attr,
3749 	&format_attr_filter_band0.attr,
3750 	&format_attr_filter_band1.attr,
3751 	&format_attr_filter_band2.attr,
3752 	&format_attr_filter_band3.attr,
3753 	NULL,
3754 };
3755 
3756 static struct attribute_group skx_uncore_pcu_format_group = {
3757 	.name = "format",
3758 	.attrs = skx_uncore_pcu_formats_attr,
3759 };
3760 
3761 static struct intel_uncore_ops skx_uncore_pcu_ops = {
3762 	IVBEP_UNCORE_MSR_OPS_COMMON_INIT(),
3763 	.hw_config		= hswep_pcu_hw_config,
3764 	.get_constraint		= snbep_pcu_get_constraint,
3765 	.put_constraint		= snbep_pcu_put_constraint,
3766 };
3767 
3768 static struct intel_uncore_type skx_uncore_pcu = {
3769 	.name			= "pcu",
3770 	.num_counters		= 4,
3771 	.num_boxes		= 1,
3772 	.perf_ctr_bits		= 48,
3773 	.perf_ctr		= HSWEP_PCU_MSR_PMON_CTR0,
3774 	.event_ctl		= HSWEP_PCU_MSR_PMON_CTL0,
3775 	.event_mask		= SNBEP_PCU_MSR_PMON_RAW_EVENT_MASK,
3776 	.box_ctl		= HSWEP_PCU_MSR_PMON_BOX_CTL,
3777 	.num_shared_regs	= 1,
3778 	.ops			= &skx_uncore_pcu_ops,
3779 	.format_group		= &skx_uncore_pcu_format_group,
3780 };
3781 
3782 static struct intel_uncore_type *skx_msr_uncores[] = {
3783 	&skx_uncore_ubox,
3784 	&skx_uncore_chabox,
3785 	&skx_uncore_iio,
3786 	&skx_uncore_iio_free_running,
3787 	&skx_uncore_irp,
3788 	&skx_uncore_pcu,
3789 	NULL,
3790 };
3791 
3792 /*
3793  * To determine the number of CHAs, it should read bits 27:0 in the CAPID6
3794  * register which located at Device 30, Function 3, Offset 0x9C. PCI ID 0x2083.
3795  */
3796 #define SKX_CAPID6		0x9c
3797 #define SKX_CHA_BIT_MASK	GENMASK(27, 0)
3798 
3799 static int skx_count_chabox(void)
3800 {
3801 	struct pci_dev *dev = NULL;
3802 	u32 val = 0;
3803 
3804 	dev = pci_get_device(PCI_VENDOR_ID_INTEL, 0x2083, dev);
3805 	if (!dev)
3806 		goto out;
3807 
3808 	pci_read_config_dword(dev, SKX_CAPID6, &val);
3809 	val &= SKX_CHA_BIT_MASK;
3810 out:
3811 	pci_dev_put(dev);
3812 	return hweight32(val);
3813 }
3814 
3815 void skx_uncore_cpu_init(void)
3816 {
3817 	skx_uncore_chabox.num_boxes = skx_count_chabox();
3818 	uncore_msr_uncores = skx_msr_uncores;
3819 }
3820 
3821 static struct intel_uncore_type skx_uncore_imc = {
3822 	.name		= "imc",
3823 	.num_counters   = 4,
3824 	.num_boxes	= 6,
3825 	.perf_ctr_bits	= 48,
3826 	.fixed_ctr_bits	= 48,
3827 	.fixed_ctr	= SNBEP_MC_CHy_PCI_PMON_FIXED_CTR,
3828 	.fixed_ctl	= SNBEP_MC_CHy_PCI_PMON_FIXED_CTL,
3829 	.event_descs	= hswep_uncore_imc_events,
3830 	.perf_ctr	= SNBEP_PCI_PMON_CTR0,
3831 	.event_ctl	= SNBEP_PCI_PMON_CTL0,
3832 	.event_mask	= SNBEP_PMON_RAW_EVENT_MASK,
3833 	.box_ctl	= SNBEP_PCI_PMON_BOX_CTL,
3834 	.ops		= &ivbep_uncore_pci_ops,
3835 	.format_group	= &skx_uncore_format_group,
3836 };
3837 
3838 static struct attribute *skx_upi_uncore_formats_attr[] = {
3839 	&format_attr_event.attr,
3840 	&format_attr_umask_ext.attr,
3841 	&format_attr_edge.attr,
3842 	&format_attr_inv.attr,
3843 	&format_attr_thresh8.attr,
3844 	NULL,
3845 };
3846 
3847 static const struct attribute_group skx_upi_uncore_format_group = {
3848 	.name = "format",
3849 	.attrs = skx_upi_uncore_formats_attr,
3850 };
3851 
3852 static void skx_upi_uncore_pci_init_box(struct intel_uncore_box *box)
3853 {
3854 	struct pci_dev *pdev = box->pci_dev;
3855 
3856 	__set_bit(UNCORE_BOX_FLAG_CTL_OFFS8, &box->flags);
3857 	pci_write_config_dword(pdev, SKX_UPI_PCI_PMON_BOX_CTL, IVBEP_PMON_BOX_CTL_INT);
3858 }
3859 
3860 static struct intel_uncore_ops skx_upi_uncore_pci_ops = {
3861 	.init_box	= skx_upi_uncore_pci_init_box,
3862 	.disable_box	= snbep_uncore_pci_disable_box,
3863 	.enable_box	= snbep_uncore_pci_enable_box,
3864 	.disable_event	= snbep_uncore_pci_disable_event,
3865 	.enable_event	= snbep_uncore_pci_enable_event,
3866 	.read_counter	= snbep_uncore_pci_read_counter,
3867 };
3868 
3869 static struct intel_uncore_type skx_uncore_upi = {
3870 	.name		= "upi",
3871 	.num_counters   = 4,
3872 	.num_boxes	= 3,
3873 	.perf_ctr_bits	= 48,
3874 	.perf_ctr	= SKX_UPI_PCI_PMON_CTR0,
3875 	.event_ctl	= SKX_UPI_PCI_PMON_CTL0,
3876 	.event_mask	= SNBEP_PMON_RAW_EVENT_MASK,
3877 	.event_mask_ext = SKX_UPI_CTL_UMASK_EXT,
3878 	.box_ctl	= SKX_UPI_PCI_PMON_BOX_CTL,
3879 	.ops		= &skx_upi_uncore_pci_ops,
3880 	.format_group	= &skx_upi_uncore_format_group,
3881 };
3882 
3883 static void skx_m2m_uncore_pci_init_box(struct intel_uncore_box *box)
3884 {
3885 	struct pci_dev *pdev = box->pci_dev;
3886 
3887 	__set_bit(UNCORE_BOX_FLAG_CTL_OFFS8, &box->flags);
3888 	pci_write_config_dword(pdev, SKX_M2M_PCI_PMON_BOX_CTL, IVBEP_PMON_BOX_CTL_INT);
3889 }
3890 
3891 static struct intel_uncore_ops skx_m2m_uncore_pci_ops = {
3892 	.init_box	= skx_m2m_uncore_pci_init_box,
3893 	.disable_box	= snbep_uncore_pci_disable_box,
3894 	.enable_box	= snbep_uncore_pci_enable_box,
3895 	.disable_event	= snbep_uncore_pci_disable_event,
3896 	.enable_event	= snbep_uncore_pci_enable_event,
3897 	.read_counter	= snbep_uncore_pci_read_counter,
3898 };
3899 
3900 static struct intel_uncore_type skx_uncore_m2m = {
3901 	.name		= "m2m",
3902 	.num_counters   = 4,
3903 	.num_boxes	= 2,
3904 	.perf_ctr_bits	= 48,
3905 	.perf_ctr	= SKX_M2M_PCI_PMON_CTR0,
3906 	.event_ctl	= SKX_M2M_PCI_PMON_CTL0,
3907 	.event_mask	= SNBEP_PMON_RAW_EVENT_MASK,
3908 	.box_ctl	= SKX_M2M_PCI_PMON_BOX_CTL,
3909 	.ops		= &skx_m2m_uncore_pci_ops,
3910 	.format_group	= &skx_uncore_format_group,
3911 };
3912 
3913 static struct event_constraint skx_uncore_m2pcie_constraints[] = {
3914 	UNCORE_EVENT_CONSTRAINT(0x23, 0x3),
3915 	EVENT_CONSTRAINT_END
3916 };
3917 
3918 static struct intel_uncore_type skx_uncore_m2pcie = {
3919 	.name		= "m2pcie",
3920 	.num_counters   = 4,
3921 	.num_boxes	= 4,
3922 	.perf_ctr_bits	= 48,
3923 	.constraints	= skx_uncore_m2pcie_constraints,
3924 	.perf_ctr	= SNBEP_PCI_PMON_CTR0,
3925 	.event_ctl	= SNBEP_PCI_PMON_CTL0,
3926 	.event_mask	= SNBEP_PMON_RAW_EVENT_MASK,
3927 	.box_ctl	= SNBEP_PCI_PMON_BOX_CTL,
3928 	.ops		= &ivbep_uncore_pci_ops,
3929 	.format_group	= &skx_uncore_format_group,
3930 };
3931 
3932 static struct event_constraint skx_uncore_m3upi_constraints[] = {
3933 	UNCORE_EVENT_CONSTRAINT(0x1d, 0x1),
3934 	UNCORE_EVENT_CONSTRAINT(0x1e, 0x1),
3935 	UNCORE_EVENT_CONSTRAINT(0x40, 0x7),
3936 	UNCORE_EVENT_CONSTRAINT(0x4e, 0x7),
3937 	UNCORE_EVENT_CONSTRAINT(0x4f, 0x7),
3938 	UNCORE_EVENT_CONSTRAINT(0x50, 0x7),
3939 	UNCORE_EVENT_CONSTRAINT(0x51, 0x7),
3940 	UNCORE_EVENT_CONSTRAINT(0x52, 0x7),
3941 	EVENT_CONSTRAINT_END
3942 };
3943 
3944 static struct intel_uncore_type skx_uncore_m3upi = {
3945 	.name		= "m3upi",
3946 	.num_counters   = 3,
3947 	.num_boxes	= 3,
3948 	.perf_ctr_bits	= 48,
3949 	.constraints	= skx_uncore_m3upi_constraints,
3950 	.perf_ctr	= SNBEP_PCI_PMON_CTR0,
3951 	.event_ctl	= SNBEP_PCI_PMON_CTL0,
3952 	.event_mask	= SNBEP_PMON_RAW_EVENT_MASK,
3953 	.box_ctl	= SNBEP_PCI_PMON_BOX_CTL,
3954 	.ops		= &ivbep_uncore_pci_ops,
3955 	.format_group	= &skx_uncore_format_group,
3956 };
3957 
3958 enum {
3959 	SKX_PCI_UNCORE_IMC,
3960 	SKX_PCI_UNCORE_M2M,
3961 	SKX_PCI_UNCORE_UPI,
3962 	SKX_PCI_UNCORE_M2PCIE,
3963 	SKX_PCI_UNCORE_M3UPI,
3964 };
3965 
3966 static struct intel_uncore_type *skx_pci_uncores[] = {
3967 	[SKX_PCI_UNCORE_IMC]	= &skx_uncore_imc,
3968 	[SKX_PCI_UNCORE_M2M]	= &skx_uncore_m2m,
3969 	[SKX_PCI_UNCORE_UPI]	= &skx_uncore_upi,
3970 	[SKX_PCI_UNCORE_M2PCIE]	= &skx_uncore_m2pcie,
3971 	[SKX_PCI_UNCORE_M3UPI]	= &skx_uncore_m3upi,
3972 	NULL,
3973 };
3974 
3975 static const struct pci_device_id skx_uncore_pci_ids[] = {
3976 	{ /* MC0 Channel 0 */
3977 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2042),
3978 		.driver_data = UNCORE_PCI_DEV_FULL_DATA(10, 2, SKX_PCI_UNCORE_IMC, 0),
3979 	},
3980 	{ /* MC0 Channel 1 */
3981 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2046),
3982 		.driver_data = UNCORE_PCI_DEV_FULL_DATA(10, 6, SKX_PCI_UNCORE_IMC, 1),
3983 	},
3984 	{ /* MC0 Channel 2 */
3985 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x204a),
3986 		.driver_data = UNCORE_PCI_DEV_FULL_DATA(11, 2, SKX_PCI_UNCORE_IMC, 2),
3987 	},
3988 	{ /* MC1 Channel 0 */
3989 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2042),
3990 		.driver_data = UNCORE_PCI_DEV_FULL_DATA(12, 2, SKX_PCI_UNCORE_IMC, 3),
3991 	},
3992 	{ /* MC1 Channel 1 */
3993 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2046),
3994 		.driver_data = UNCORE_PCI_DEV_FULL_DATA(12, 6, SKX_PCI_UNCORE_IMC, 4),
3995 	},
3996 	{ /* MC1 Channel 2 */
3997 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x204a),
3998 		.driver_data = UNCORE_PCI_DEV_FULL_DATA(13, 2, SKX_PCI_UNCORE_IMC, 5),
3999 	},
4000 	{ /* M2M0 */
4001 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2066),
4002 		.driver_data = UNCORE_PCI_DEV_FULL_DATA(8, 0, SKX_PCI_UNCORE_M2M, 0),
4003 	},
4004 	{ /* M2M1 */
4005 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2066),
4006 		.driver_data = UNCORE_PCI_DEV_FULL_DATA(9, 0, SKX_PCI_UNCORE_M2M, 1),
4007 	},
4008 	{ /* UPI0 Link 0 */
4009 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2058),
4010 		.driver_data = UNCORE_PCI_DEV_FULL_DATA(14, 0, SKX_PCI_UNCORE_UPI, 0),
4011 	},
4012 	{ /* UPI0 Link 1 */
4013 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2058),
4014 		.driver_data = UNCORE_PCI_DEV_FULL_DATA(15, 0, SKX_PCI_UNCORE_UPI, 1),
4015 	},
4016 	{ /* UPI1 Link 2 */
4017 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2058),
4018 		.driver_data = UNCORE_PCI_DEV_FULL_DATA(16, 0, SKX_PCI_UNCORE_UPI, 2),
4019 	},
4020 	{ /* M2PCIe 0 */
4021 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2088),
4022 		.driver_data = UNCORE_PCI_DEV_FULL_DATA(21, 1, SKX_PCI_UNCORE_M2PCIE, 0),
4023 	},
4024 	{ /* M2PCIe 1 */
4025 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2088),
4026 		.driver_data = UNCORE_PCI_DEV_FULL_DATA(22, 1, SKX_PCI_UNCORE_M2PCIE, 1),
4027 	},
4028 	{ /* M2PCIe 2 */
4029 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2088),
4030 		.driver_data = UNCORE_PCI_DEV_FULL_DATA(23, 1, SKX_PCI_UNCORE_M2PCIE, 2),
4031 	},
4032 	{ /* M2PCIe 3 */
4033 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2088),
4034 		.driver_data = UNCORE_PCI_DEV_FULL_DATA(21, 5, SKX_PCI_UNCORE_M2PCIE, 3),
4035 	},
4036 	{ /* M3UPI0 Link 0 */
4037 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x204D),
4038 		.driver_data = UNCORE_PCI_DEV_FULL_DATA(18, 1, SKX_PCI_UNCORE_M3UPI, 0),
4039 	},
4040 	{ /* M3UPI0 Link 1 */
4041 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x204E),
4042 		.driver_data = UNCORE_PCI_DEV_FULL_DATA(18, 2, SKX_PCI_UNCORE_M3UPI, 1),
4043 	},
4044 	{ /* M3UPI1 Link 2 */
4045 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x204D),
4046 		.driver_data = UNCORE_PCI_DEV_FULL_DATA(18, 5, SKX_PCI_UNCORE_M3UPI, 2),
4047 	},
4048 	{ /* end: all zeroes */ }
4049 };
4050 
4051 
4052 static struct pci_driver skx_uncore_pci_driver = {
4053 	.name		= "skx_uncore",
4054 	.id_table	= skx_uncore_pci_ids,
4055 };
4056 
4057 int skx_uncore_pci_init(void)
4058 {
4059 	/* need to double check pci address */
4060 	int ret = snbep_pci2phy_map_init(0x2014, SKX_CPUNODEID, SKX_GIDNIDMAP, false);
4061 
4062 	if (ret)
4063 		return ret;
4064 
4065 	uncore_pci_uncores = skx_pci_uncores;
4066 	uncore_pci_driver = &skx_uncore_pci_driver;
4067 	return 0;
4068 }
4069 
4070 /* end of SKX uncore support */
4071 
4072 /* SNR uncore support */
4073 
4074 static struct intel_uncore_type snr_uncore_ubox = {
4075 	.name			= "ubox",
4076 	.num_counters		= 2,
4077 	.num_boxes		= 1,
4078 	.perf_ctr_bits		= 48,
4079 	.fixed_ctr_bits		= 48,
4080 	.perf_ctr		= SNR_U_MSR_PMON_CTR0,
4081 	.event_ctl		= SNR_U_MSR_PMON_CTL0,
4082 	.event_mask		= SNBEP_PMON_RAW_EVENT_MASK,
4083 	.fixed_ctr		= SNR_U_MSR_PMON_UCLK_FIXED_CTR,
4084 	.fixed_ctl		= SNR_U_MSR_PMON_UCLK_FIXED_CTL,
4085 	.ops			= &ivbep_uncore_msr_ops,
4086 	.format_group		= &ivbep_uncore_format_group,
4087 };
4088 
4089 static struct attribute *snr_uncore_cha_formats_attr[] = {
4090 	&format_attr_event.attr,
4091 	&format_attr_umask_ext2.attr,
4092 	&format_attr_edge.attr,
4093 	&format_attr_tid_en.attr,
4094 	&format_attr_inv.attr,
4095 	&format_attr_thresh8.attr,
4096 	&format_attr_filter_tid5.attr,
4097 	NULL,
4098 };
4099 static const struct attribute_group snr_uncore_chabox_format_group = {
4100 	.name = "format",
4101 	.attrs = snr_uncore_cha_formats_attr,
4102 };
4103 
4104 static int snr_cha_hw_config(struct intel_uncore_box *box, struct perf_event *event)
4105 {
4106 	struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
4107 
4108 	reg1->reg = SNR_C0_MSR_PMON_BOX_FILTER0 +
4109 		    box->pmu->type->msr_offset * box->pmu->pmu_idx;
4110 	reg1->config = event->attr.config1 & SKX_CHA_MSR_PMON_BOX_FILTER_TID;
4111 	reg1->idx = 0;
4112 
4113 	return 0;
4114 }
4115 
4116 static void snr_cha_enable_event(struct intel_uncore_box *box,
4117 				   struct perf_event *event)
4118 {
4119 	struct hw_perf_event *hwc = &event->hw;
4120 	struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
4121 
4122 	if (reg1->idx != EXTRA_REG_NONE)
4123 		wrmsrl(reg1->reg, reg1->config);
4124 
4125 	wrmsrl(hwc->config_base, hwc->config | SNBEP_PMON_CTL_EN);
4126 }
4127 
4128 static struct intel_uncore_ops snr_uncore_chabox_ops = {
4129 	.init_box		= ivbep_uncore_msr_init_box,
4130 	.disable_box		= snbep_uncore_msr_disable_box,
4131 	.enable_box		= snbep_uncore_msr_enable_box,
4132 	.disable_event		= snbep_uncore_msr_disable_event,
4133 	.enable_event		= snr_cha_enable_event,
4134 	.read_counter		= uncore_msr_read_counter,
4135 	.hw_config		= snr_cha_hw_config,
4136 };
4137 
4138 static struct intel_uncore_type snr_uncore_chabox = {
4139 	.name			= "cha",
4140 	.num_counters		= 4,
4141 	.num_boxes		= 6,
4142 	.perf_ctr_bits		= 48,
4143 	.event_ctl		= SNR_CHA_MSR_PMON_CTL0,
4144 	.perf_ctr		= SNR_CHA_MSR_PMON_CTR0,
4145 	.box_ctl		= SNR_CHA_MSR_PMON_BOX_CTL,
4146 	.msr_offset		= HSWEP_CBO_MSR_OFFSET,
4147 	.event_mask		= HSWEP_S_MSR_PMON_RAW_EVENT_MASK,
4148 	.event_mask_ext		= SNR_CHA_RAW_EVENT_MASK_EXT,
4149 	.ops			= &snr_uncore_chabox_ops,
4150 	.format_group		= &snr_uncore_chabox_format_group,
4151 };
4152 
4153 static struct attribute *snr_uncore_iio_formats_attr[] = {
4154 	&format_attr_event.attr,
4155 	&format_attr_umask.attr,
4156 	&format_attr_edge.attr,
4157 	&format_attr_inv.attr,
4158 	&format_attr_thresh9.attr,
4159 	&format_attr_ch_mask2.attr,
4160 	&format_attr_fc_mask2.attr,
4161 	NULL,
4162 };
4163 
4164 static const struct attribute_group snr_uncore_iio_format_group = {
4165 	.name = "format",
4166 	.attrs = snr_uncore_iio_formats_attr,
4167 };
4168 
4169 static struct intel_uncore_type snr_uncore_iio = {
4170 	.name			= "iio",
4171 	.num_counters		= 4,
4172 	.num_boxes		= 5,
4173 	.perf_ctr_bits		= 48,
4174 	.event_ctl		= SNR_IIO_MSR_PMON_CTL0,
4175 	.perf_ctr		= SNR_IIO_MSR_PMON_CTR0,
4176 	.event_mask		= SNBEP_PMON_RAW_EVENT_MASK,
4177 	.event_mask_ext		= SNR_IIO_PMON_RAW_EVENT_MASK_EXT,
4178 	.box_ctl		= SNR_IIO_MSR_PMON_BOX_CTL,
4179 	.msr_offset		= SNR_IIO_MSR_OFFSET,
4180 	.ops			= &ivbep_uncore_msr_ops,
4181 	.format_group		= &snr_uncore_iio_format_group,
4182 };
4183 
4184 static struct intel_uncore_type snr_uncore_irp = {
4185 	.name			= "irp",
4186 	.num_counters		= 2,
4187 	.num_boxes		= 5,
4188 	.perf_ctr_bits		= 48,
4189 	.event_ctl		= SNR_IRP0_MSR_PMON_CTL0,
4190 	.perf_ctr		= SNR_IRP0_MSR_PMON_CTR0,
4191 	.event_mask		= SNBEP_PMON_RAW_EVENT_MASK,
4192 	.box_ctl		= SNR_IRP0_MSR_PMON_BOX_CTL,
4193 	.msr_offset		= SNR_IRP_MSR_OFFSET,
4194 	.ops			= &ivbep_uncore_msr_ops,
4195 	.format_group		= &ivbep_uncore_format_group,
4196 };
4197 
4198 static struct intel_uncore_type snr_uncore_m2pcie = {
4199 	.name		= "m2pcie",
4200 	.num_counters	= 4,
4201 	.num_boxes	= 5,
4202 	.perf_ctr_bits	= 48,
4203 	.event_ctl	= SNR_M2PCIE_MSR_PMON_CTL0,
4204 	.perf_ctr	= SNR_M2PCIE_MSR_PMON_CTR0,
4205 	.box_ctl	= SNR_M2PCIE_MSR_PMON_BOX_CTL,
4206 	.msr_offset	= SNR_M2PCIE_MSR_OFFSET,
4207 	.event_mask	= SNBEP_PMON_RAW_EVENT_MASK,
4208 	.ops		= &ivbep_uncore_msr_ops,
4209 	.format_group	= &ivbep_uncore_format_group,
4210 };
4211 
4212 static int snr_pcu_hw_config(struct intel_uncore_box *box, struct perf_event *event)
4213 {
4214 	struct hw_perf_event *hwc = &event->hw;
4215 	struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
4216 	int ev_sel = hwc->config & SNBEP_PMON_CTL_EV_SEL_MASK;
4217 
4218 	if (ev_sel >= 0xb && ev_sel <= 0xe) {
4219 		reg1->reg = SNR_PCU_MSR_PMON_BOX_FILTER;
4220 		reg1->idx = ev_sel - 0xb;
4221 		reg1->config = event->attr.config1 & (0xff << reg1->idx);
4222 	}
4223 	return 0;
4224 }
4225 
4226 static struct intel_uncore_ops snr_uncore_pcu_ops = {
4227 	IVBEP_UNCORE_MSR_OPS_COMMON_INIT(),
4228 	.hw_config		= snr_pcu_hw_config,
4229 	.get_constraint		= snbep_pcu_get_constraint,
4230 	.put_constraint		= snbep_pcu_put_constraint,
4231 };
4232 
4233 static struct intel_uncore_type snr_uncore_pcu = {
4234 	.name			= "pcu",
4235 	.num_counters		= 4,
4236 	.num_boxes		= 1,
4237 	.perf_ctr_bits		= 48,
4238 	.perf_ctr		= SNR_PCU_MSR_PMON_CTR0,
4239 	.event_ctl		= SNR_PCU_MSR_PMON_CTL0,
4240 	.event_mask		= SNBEP_PMON_RAW_EVENT_MASK,
4241 	.box_ctl		= SNR_PCU_MSR_PMON_BOX_CTL,
4242 	.num_shared_regs	= 1,
4243 	.ops			= &snr_uncore_pcu_ops,
4244 	.format_group		= &skx_uncore_pcu_format_group,
4245 };
4246 
4247 enum perf_uncore_snr_iio_freerunning_type_id {
4248 	SNR_IIO_MSR_IOCLK,
4249 	SNR_IIO_MSR_BW_IN,
4250 
4251 	SNR_IIO_FREERUNNING_TYPE_MAX,
4252 };
4253 
4254 static struct freerunning_counters snr_iio_freerunning[] = {
4255 	[SNR_IIO_MSR_IOCLK]	= { 0x1eac, 0x1, 0x10, 1, 48 },
4256 	[SNR_IIO_MSR_BW_IN]	= { 0x1f00, 0x1, 0x10, 8, 48 },
4257 };
4258 
4259 static struct uncore_event_desc snr_uncore_iio_freerunning_events[] = {
4260 	/* Free-Running IIO CLOCKS Counter */
4261 	INTEL_UNCORE_EVENT_DESC(ioclk,			"event=0xff,umask=0x10"),
4262 	/* Free-Running IIO BANDWIDTH IN Counters */
4263 	INTEL_UNCORE_EVENT_DESC(bw_in_port0,		"event=0xff,umask=0x20"),
4264 	INTEL_UNCORE_EVENT_DESC(bw_in_port0.scale,	"3.814697266e-6"),
4265 	INTEL_UNCORE_EVENT_DESC(bw_in_port0.unit,	"MiB"),
4266 	INTEL_UNCORE_EVENT_DESC(bw_in_port1,		"event=0xff,umask=0x21"),
4267 	INTEL_UNCORE_EVENT_DESC(bw_in_port1.scale,	"3.814697266e-6"),
4268 	INTEL_UNCORE_EVENT_DESC(bw_in_port1.unit,	"MiB"),
4269 	INTEL_UNCORE_EVENT_DESC(bw_in_port2,		"event=0xff,umask=0x22"),
4270 	INTEL_UNCORE_EVENT_DESC(bw_in_port2.scale,	"3.814697266e-6"),
4271 	INTEL_UNCORE_EVENT_DESC(bw_in_port2.unit,	"MiB"),
4272 	INTEL_UNCORE_EVENT_DESC(bw_in_port3,		"event=0xff,umask=0x23"),
4273 	INTEL_UNCORE_EVENT_DESC(bw_in_port3.scale,	"3.814697266e-6"),
4274 	INTEL_UNCORE_EVENT_DESC(bw_in_port3.unit,	"MiB"),
4275 	INTEL_UNCORE_EVENT_DESC(bw_in_port4,		"event=0xff,umask=0x24"),
4276 	INTEL_UNCORE_EVENT_DESC(bw_in_port4.scale,	"3.814697266e-6"),
4277 	INTEL_UNCORE_EVENT_DESC(bw_in_port4.unit,	"MiB"),
4278 	INTEL_UNCORE_EVENT_DESC(bw_in_port5,		"event=0xff,umask=0x25"),
4279 	INTEL_UNCORE_EVENT_DESC(bw_in_port5.scale,	"3.814697266e-6"),
4280 	INTEL_UNCORE_EVENT_DESC(bw_in_port5.unit,	"MiB"),
4281 	INTEL_UNCORE_EVENT_DESC(bw_in_port6,		"event=0xff,umask=0x26"),
4282 	INTEL_UNCORE_EVENT_DESC(bw_in_port6.scale,	"3.814697266e-6"),
4283 	INTEL_UNCORE_EVENT_DESC(bw_in_port6.unit,	"MiB"),
4284 	INTEL_UNCORE_EVENT_DESC(bw_in_port7,		"event=0xff,umask=0x27"),
4285 	INTEL_UNCORE_EVENT_DESC(bw_in_port7.scale,	"3.814697266e-6"),
4286 	INTEL_UNCORE_EVENT_DESC(bw_in_port7.unit,	"MiB"),
4287 	{ /* end: all zeroes */ },
4288 };
4289 
4290 static struct intel_uncore_type snr_uncore_iio_free_running = {
4291 	.name			= "iio_free_running",
4292 	.num_counters		= 9,
4293 	.num_boxes		= 5,
4294 	.num_freerunning_types	= SNR_IIO_FREERUNNING_TYPE_MAX,
4295 	.freerunning		= snr_iio_freerunning,
4296 	.ops			= &skx_uncore_iio_freerunning_ops,
4297 	.event_descs		= snr_uncore_iio_freerunning_events,
4298 	.format_group		= &skx_uncore_iio_freerunning_format_group,
4299 };
4300 
4301 static struct intel_uncore_type *snr_msr_uncores[] = {
4302 	&snr_uncore_ubox,
4303 	&snr_uncore_chabox,
4304 	&snr_uncore_iio,
4305 	&snr_uncore_irp,
4306 	&snr_uncore_m2pcie,
4307 	&snr_uncore_pcu,
4308 	&snr_uncore_iio_free_running,
4309 	NULL,
4310 };
4311 
4312 void snr_uncore_cpu_init(void)
4313 {
4314 	uncore_msr_uncores = snr_msr_uncores;
4315 }
4316 
4317 static void snr_m2m_uncore_pci_init_box(struct intel_uncore_box *box)
4318 {
4319 	struct pci_dev *pdev = box->pci_dev;
4320 	int box_ctl = uncore_pci_box_ctl(box);
4321 
4322 	__set_bit(UNCORE_BOX_FLAG_CTL_OFFS8, &box->flags);
4323 	pci_write_config_dword(pdev, box_ctl, IVBEP_PMON_BOX_CTL_INT);
4324 }
4325 
4326 static struct intel_uncore_ops snr_m2m_uncore_pci_ops = {
4327 	.init_box	= snr_m2m_uncore_pci_init_box,
4328 	.disable_box	= snbep_uncore_pci_disable_box,
4329 	.enable_box	= snbep_uncore_pci_enable_box,
4330 	.disable_event	= snbep_uncore_pci_disable_event,
4331 	.enable_event	= snbep_uncore_pci_enable_event,
4332 	.read_counter	= snbep_uncore_pci_read_counter,
4333 };
4334 
4335 static struct attribute *snr_m2m_uncore_formats_attr[] = {
4336 	&format_attr_event.attr,
4337 	&format_attr_umask_ext3.attr,
4338 	&format_attr_edge.attr,
4339 	&format_attr_inv.attr,
4340 	&format_attr_thresh8.attr,
4341 	NULL,
4342 };
4343 
4344 static const struct attribute_group snr_m2m_uncore_format_group = {
4345 	.name = "format",
4346 	.attrs = snr_m2m_uncore_formats_attr,
4347 };
4348 
4349 static struct intel_uncore_type snr_uncore_m2m = {
4350 	.name		= "m2m",
4351 	.num_counters   = 4,
4352 	.num_boxes	= 1,
4353 	.perf_ctr_bits	= 48,
4354 	.perf_ctr	= SNR_M2M_PCI_PMON_CTR0,
4355 	.event_ctl	= SNR_M2M_PCI_PMON_CTL0,
4356 	.event_mask	= SNBEP_PMON_RAW_EVENT_MASK,
4357 	.event_mask_ext	= SNR_M2M_PCI_PMON_UMASK_EXT,
4358 	.box_ctl	= SNR_M2M_PCI_PMON_BOX_CTL,
4359 	.ops		= &snr_m2m_uncore_pci_ops,
4360 	.format_group	= &snr_m2m_uncore_format_group,
4361 };
4362 
4363 enum {
4364 	SNR_PCI_UNCORE_M2M,
4365 };
4366 
4367 static struct intel_uncore_type *snr_pci_uncores[] = {
4368 	[SNR_PCI_UNCORE_M2M]		= &snr_uncore_m2m,
4369 	NULL,
4370 };
4371 
4372 static const struct pci_device_id snr_uncore_pci_ids[] = {
4373 	{ /* M2M */
4374 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x344a),
4375 		.driver_data = UNCORE_PCI_DEV_FULL_DATA(12, 0, SNR_PCI_UNCORE_M2M, 0),
4376 	},
4377 	{ /* end: all zeroes */ }
4378 };
4379 
4380 static struct pci_driver snr_uncore_pci_driver = {
4381 	.name		= "snr_uncore",
4382 	.id_table	= snr_uncore_pci_ids,
4383 };
4384 
4385 int snr_uncore_pci_init(void)
4386 {
4387 	/* SNR UBOX DID */
4388 	int ret = snbep_pci2phy_map_init(0x3460, SKX_CPUNODEID,
4389 					 SKX_GIDNIDMAP, true);
4390 
4391 	if (ret)
4392 		return ret;
4393 
4394 	uncore_pci_uncores = snr_pci_uncores;
4395 	uncore_pci_driver = &snr_uncore_pci_driver;
4396 	return 0;
4397 }
4398 
4399 static struct pci_dev *snr_uncore_get_mc_dev(int id)
4400 {
4401 	struct pci_dev *mc_dev = NULL;
4402 	int phys_id, pkg;
4403 
4404 	while (1) {
4405 		mc_dev = pci_get_device(PCI_VENDOR_ID_INTEL, 0x3451, mc_dev);
4406 		if (!mc_dev)
4407 			break;
4408 		phys_id = uncore_pcibus_to_physid(mc_dev->bus);
4409 		if (phys_id < 0)
4410 			continue;
4411 		pkg = topology_phys_to_logical_pkg(phys_id);
4412 		if (pkg < 0)
4413 			continue;
4414 		else if (pkg == id)
4415 			break;
4416 	}
4417 	return mc_dev;
4418 }
4419 
4420 static void __snr_uncore_mmio_init_box(struct intel_uncore_box *box,
4421 				       unsigned int box_ctl, int mem_offset)
4422 {
4423 	struct pci_dev *pdev = snr_uncore_get_mc_dev(box->dieid);
4424 	resource_size_t addr;
4425 	u32 pci_dword;
4426 
4427 	if (!pdev)
4428 		return;
4429 
4430 	pci_read_config_dword(pdev, SNR_IMC_MMIO_BASE_OFFSET, &pci_dword);
4431 	addr = (pci_dword & SNR_IMC_MMIO_BASE_MASK) << 23;
4432 
4433 	pci_read_config_dword(pdev, mem_offset, &pci_dword);
4434 	addr |= (pci_dword & SNR_IMC_MMIO_MEM0_MASK) << 12;
4435 
4436 	addr += box_ctl;
4437 
4438 	box->io_addr = ioremap(addr, SNR_IMC_MMIO_SIZE);
4439 	if (!box->io_addr)
4440 		return;
4441 
4442 	writel(IVBEP_PMON_BOX_CTL_INT, box->io_addr);
4443 }
4444 
4445 static void snr_uncore_mmio_init_box(struct intel_uncore_box *box)
4446 {
4447 	__snr_uncore_mmio_init_box(box, uncore_mmio_box_ctl(box),
4448 				   SNR_IMC_MMIO_MEM0_OFFSET);
4449 }
4450 
4451 static void snr_uncore_mmio_disable_box(struct intel_uncore_box *box)
4452 {
4453 	u32 config;
4454 
4455 	if (!box->io_addr)
4456 		return;
4457 
4458 	config = readl(box->io_addr);
4459 	config |= SNBEP_PMON_BOX_CTL_FRZ;
4460 	writel(config, box->io_addr);
4461 }
4462 
4463 static void snr_uncore_mmio_enable_box(struct intel_uncore_box *box)
4464 {
4465 	u32 config;
4466 
4467 	if (!box->io_addr)
4468 		return;
4469 
4470 	config = readl(box->io_addr);
4471 	config &= ~SNBEP_PMON_BOX_CTL_FRZ;
4472 	writel(config, box->io_addr);
4473 }
4474 
4475 static void snr_uncore_mmio_enable_event(struct intel_uncore_box *box,
4476 					   struct perf_event *event)
4477 {
4478 	struct hw_perf_event *hwc = &event->hw;
4479 
4480 	if (!box->io_addr)
4481 		return;
4482 
4483 	writel(hwc->config | SNBEP_PMON_CTL_EN,
4484 	       box->io_addr + hwc->config_base);
4485 }
4486 
4487 static void snr_uncore_mmio_disable_event(struct intel_uncore_box *box,
4488 					    struct perf_event *event)
4489 {
4490 	struct hw_perf_event *hwc = &event->hw;
4491 
4492 	if (!box->io_addr)
4493 		return;
4494 
4495 	writel(hwc->config, box->io_addr + hwc->config_base);
4496 }
4497 
4498 static struct intel_uncore_ops snr_uncore_mmio_ops = {
4499 	.init_box	= snr_uncore_mmio_init_box,
4500 	.exit_box	= uncore_mmio_exit_box,
4501 	.disable_box	= snr_uncore_mmio_disable_box,
4502 	.enable_box	= snr_uncore_mmio_enable_box,
4503 	.disable_event	= snr_uncore_mmio_disable_event,
4504 	.enable_event	= snr_uncore_mmio_enable_event,
4505 	.read_counter	= uncore_mmio_read_counter,
4506 };
4507 
4508 static struct uncore_event_desc snr_uncore_imc_events[] = {
4509 	INTEL_UNCORE_EVENT_DESC(clockticks,      "event=0x00,umask=0x00"),
4510 	INTEL_UNCORE_EVENT_DESC(cas_count_read,  "event=0x04,umask=0x0f"),
4511 	INTEL_UNCORE_EVENT_DESC(cas_count_read.scale, "6.103515625e-5"),
4512 	INTEL_UNCORE_EVENT_DESC(cas_count_read.unit, "MiB"),
4513 	INTEL_UNCORE_EVENT_DESC(cas_count_write, "event=0x04,umask=0x30"),
4514 	INTEL_UNCORE_EVENT_DESC(cas_count_write.scale, "6.103515625e-5"),
4515 	INTEL_UNCORE_EVENT_DESC(cas_count_write.unit, "MiB"),
4516 	{ /* end: all zeroes */ },
4517 };
4518 
4519 static struct intel_uncore_type snr_uncore_imc = {
4520 	.name		= "imc",
4521 	.num_counters   = 4,
4522 	.num_boxes	= 2,
4523 	.perf_ctr_bits	= 48,
4524 	.fixed_ctr_bits	= 48,
4525 	.fixed_ctr	= SNR_IMC_MMIO_PMON_FIXED_CTR,
4526 	.fixed_ctl	= SNR_IMC_MMIO_PMON_FIXED_CTL,
4527 	.event_descs	= snr_uncore_imc_events,
4528 	.perf_ctr	= SNR_IMC_MMIO_PMON_CTR0,
4529 	.event_ctl	= SNR_IMC_MMIO_PMON_CTL0,
4530 	.event_mask	= SNBEP_PMON_RAW_EVENT_MASK,
4531 	.box_ctl	= SNR_IMC_MMIO_PMON_BOX_CTL,
4532 	.mmio_offset	= SNR_IMC_MMIO_OFFSET,
4533 	.ops		= &snr_uncore_mmio_ops,
4534 	.format_group	= &skx_uncore_format_group,
4535 };
4536 
4537 enum perf_uncore_snr_imc_freerunning_type_id {
4538 	SNR_IMC_DCLK,
4539 	SNR_IMC_DDR,
4540 
4541 	SNR_IMC_FREERUNNING_TYPE_MAX,
4542 };
4543 
4544 static struct freerunning_counters snr_imc_freerunning[] = {
4545 	[SNR_IMC_DCLK]	= { 0x22b0, 0x0, 0, 1, 48 },
4546 	[SNR_IMC_DDR]	= { 0x2290, 0x8, 0, 2, 48 },
4547 };
4548 
4549 static struct uncore_event_desc snr_uncore_imc_freerunning_events[] = {
4550 	INTEL_UNCORE_EVENT_DESC(dclk,		"event=0xff,umask=0x10"),
4551 
4552 	INTEL_UNCORE_EVENT_DESC(read,		"event=0xff,umask=0x20"),
4553 	INTEL_UNCORE_EVENT_DESC(read.scale,	"3.814697266e-6"),
4554 	INTEL_UNCORE_EVENT_DESC(read.unit,	"MiB"),
4555 	INTEL_UNCORE_EVENT_DESC(write,		"event=0xff,umask=0x21"),
4556 	INTEL_UNCORE_EVENT_DESC(write.scale,	"3.814697266e-6"),
4557 	INTEL_UNCORE_EVENT_DESC(write.unit,	"MiB"),
4558 	{ /* end: all zeroes */ },
4559 };
4560 
4561 static struct intel_uncore_ops snr_uncore_imc_freerunning_ops = {
4562 	.init_box	= snr_uncore_mmio_init_box,
4563 	.exit_box	= uncore_mmio_exit_box,
4564 	.read_counter	= uncore_mmio_read_counter,
4565 	.hw_config	= uncore_freerunning_hw_config,
4566 };
4567 
4568 static struct intel_uncore_type snr_uncore_imc_free_running = {
4569 	.name			= "imc_free_running",
4570 	.num_counters		= 3,
4571 	.num_boxes		= 1,
4572 	.num_freerunning_types	= SNR_IMC_FREERUNNING_TYPE_MAX,
4573 	.freerunning		= snr_imc_freerunning,
4574 	.ops			= &snr_uncore_imc_freerunning_ops,
4575 	.event_descs		= snr_uncore_imc_freerunning_events,
4576 	.format_group		= &skx_uncore_iio_freerunning_format_group,
4577 };
4578 
4579 static struct intel_uncore_type *snr_mmio_uncores[] = {
4580 	&snr_uncore_imc,
4581 	&snr_uncore_imc_free_running,
4582 	NULL,
4583 };
4584 
4585 void snr_uncore_mmio_init(void)
4586 {
4587 	uncore_mmio_uncores = snr_mmio_uncores;
4588 }
4589 
4590 /* end of SNR uncore support */
4591 
4592 /* ICX uncore support */
4593 
4594 static unsigned icx_cha_msr_offsets[] = {
4595 	0x2a0, 0x2ae, 0x2bc, 0x2ca, 0x2d8, 0x2e6, 0x2f4, 0x302, 0x310,
4596 	0x31e, 0x32c, 0x33a, 0x348, 0x356, 0x364, 0x372, 0x380, 0x38e,
4597 	0x3aa, 0x3b8, 0x3c6, 0x3d4, 0x3e2, 0x3f0, 0x3fe, 0x40c, 0x41a,
4598 	0x428, 0x436, 0x444, 0x452, 0x460, 0x46e, 0x47c, 0x0,   0xe,
4599 	0x1c,  0x2a,  0x38,  0x46,
4600 };
4601 
4602 static int icx_cha_hw_config(struct intel_uncore_box *box, struct perf_event *event)
4603 {
4604 	struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
4605 	bool tie_en = !!(event->hw.config & SNBEP_CBO_PMON_CTL_TID_EN);
4606 
4607 	if (tie_en) {
4608 		reg1->reg = ICX_C34_MSR_PMON_BOX_FILTER0 +
4609 			    icx_cha_msr_offsets[box->pmu->pmu_idx];
4610 		reg1->config = event->attr.config1 & SKX_CHA_MSR_PMON_BOX_FILTER_TID;
4611 		reg1->idx = 0;
4612 	}
4613 
4614 	return 0;
4615 }
4616 
4617 static struct intel_uncore_ops icx_uncore_chabox_ops = {
4618 	.init_box		= ivbep_uncore_msr_init_box,
4619 	.disable_box		= snbep_uncore_msr_disable_box,
4620 	.enable_box		= snbep_uncore_msr_enable_box,
4621 	.disable_event		= snbep_uncore_msr_disable_event,
4622 	.enable_event		= snr_cha_enable_event,
4623 	.read_counter		= uncore_msr_read_counter,
4624 	.hw_config		= icx_cha_hw_config,
4625 };
4626 
4627 static struct intel_uncore_type icx_uncore_chabox = {
4628 	.name			= "cha",
4629 	.num_counters		= 4,
4630 	.perf_ctr_bits		= 48,
4631 	.event_ctl		= ICX_C34_MSR_PMON_CTL0,
4632 	.perf_ctr		= ICX_C34_MSR_PMON_CTR0,
4633 	.box_ctl		= ICX_C34_MSR_PMON_BOX_CTL,
4634 	.msr_offsets		= icx_cha_msr_offsets,
4635 	.event_mask		= HSWEP_S_MSR_PMON_RAW_EVENT_MASK,
4636 	.event_mask_ext		= SNR_CHA_RAW_EVENT_MASK_EXT,
4637 	.constraints		= skx_uncore_chabox_constraints,
4638 	.ops			= &icx_uncore_chabox_ops,
4639 	.format_group		= &snr_uncore_chabox_format_group,
4640 };
4641 
4642 static unsigned icx_msr_offsets[] = {
4643 	0x0, 0x20, 0x40, 0x90, 0xb0, 0xd0,
4644 };
4645 
4646 static struct event_constraint icx_uncore_iio_constraints[] = {
4647 	UNCORE_EVENT_CONSTRAINT(0x02, 0x3),
4648 	UNCORE_EVENT_CONSTRAINT(0x03, 0x3),
4649 	UNCORE_EVENT_CONSTRAINT(0x83, 0x3),
4650 	UNCORE_EVENT_CONSTRAINT(0xc0, 0xc),
4651 	UNCORE_EVENT_CONSTRAINT(0xc5, 0xc),
4652 	EVENT_CONSTRAINT_END
4653 };
4654 
4655 static struct intel_uncore_type icx_uncore_iio = {
4656 	.name			= "iio",
4657 	.num_counters		= 4,
4658 	.num_boxes		= 6,
4659 	.perf_ctr_bits		= 48,
4660 	.event_ctl		= ICX_IIO_MSR_PMON_CTL0,
4661 	.perf_ctr		= ICX_IIO_MSR_PMON_CTR0,
4662 	.event_mask		= SNBEP_PMON_RAW_EVENT_MASK,
4663 	.event_mask_ext		= SNR_IIO_PMON_RAW_EVENT_MASK_EXT,
4664 	.box_ctl		= ICX_IIO_MSR_PMON_BOX_CTL,
4665 	.msr_offsets		= icx_msr_offsets,
4666 	.constraints		= icx_uncore_iio_constraints,
4667 	.ops			= &skx_uncore_iio_ops,
4668 	.format_group		= &snr_uncore_iio_format_group,
4669 };
4670 
4671 static struct intel_uncore_type icx_uncore_irp = {
4672 	.name			= "irp",
4673 	.num_counters		= 2,
4674 	.num_boxes		= 6,
4675 	.perf_ctr_bits		= 48,
4676 	.event_ctl		= ICX_IRP0_MSR_PMON_CTL0,
4677 	.perf_ctr		= ICX_IRP0_MSR_PMON_CTR0,
4678 	.event_mask		= SNBEP_PMON_RAW_EVENT_MASK,
4679 	.box_ctl		= ICX_IRP0_MSR_PMON_BOX_CTL,
4680 	.msr_offsets		= icx_msr_offsets,
4681 	.ops			= &ivbep_uncore_msr_ops,
4682 	.format_group		= &ivbep_uncore_format_group,
4683 };
4684 
4685 static struct event_constraint icx_uncore_m2pcie_constraints[] = {
4686 	UNCORE_EVENT_CONSTRAINT(0x14, 0x3),
4687 	UNCORE_EVENT_CONSTRAINT(0x23, 0x3),
4688 	UNCORE_EVENT_CONSTRAINT(0x2d, 0x3),
4689 	EVENT_CONSTRAINT_END
4690 };
4691 
4692 static struct intel_uncore_type icx_uncore_m2pcie = {
4693 	.name		= "m2pcie",
4694 	.num_counters	= 4,
4695 	.num_boxes	= 6,
4696 	.perf_ctr_bits	= 48,
4697 	.event_ctl	= ICX_M2PCIE_MSR_PMON_CTL0,
4698 	.perf_ctr	= ICX_M2PCIE_MSR_PMON_CTR0,
4699 	.box_ctl	= ICX_M2PCIE_MSR_PMON_BOX_CTL,
4700 	.msr_offsets	= icx_msr_offsets,
4701 	.constraints	= icx_uncore_m2pcie_constraints,
4702 	.event_mask	= SNBEP_PMON_RAW_EVENT_MASK,
4703 	.ops		= &ivbep_uncore_msr_ops,
4704 	.format_group	= &ivbep_uncore_format_group,
4705 };
4706 
4707 enum perf_uncore_icx_iio_freerunning_type_id {
4708 	ICX_IIO_MSR_IOCLK,
4709 	ICX_IIO_MSR_BW_IN,
4710 
4711 	ICX_IIO_FREERUNNING_TYPE_MAX,
4712 };
4713 
4714 static unsigned icx_iio_clk_freerunning_box_offsets[] = {
4715 	0x0, 0x20, 0x40, 0x90, 0xb0, 0xd0,
4716 };
4717 
4718 static unsigned icx_iio_bw_freerunning_box_offsets[] = {
4719 	0x0, 0x10, 0x20, 0x90, 0xa0, 0xb0,
4720 };
4721 
4722 static struct freerunning_counters icx_iio_freerunning[] = {
4723 	[ICX_IIO_MSR_IOCLK]	= { 0xa55, 0x1, 0x20, 1, 48, icx_iio_clk_freerunning_box_offsets },
4724 	[ICX_IIO_MSR_BW_IN]	= { 0xaa0, 0x1, 0x10, 8, 48, icx_iio_bw_freerunning_box_offsets },
4725 };
4726 
4727 static struct uncore_event_desc icx_uncore_iio_freerunning_events[] = {
4728 	/* Free-Running IIO CLOCKS Counter */
4729 	INTEL_UNCORE_EVENT_DESC(ioclk,			"event=0xff,umask=0x10"),
4730 	/* Free-Running IIO BANDWIDTH IN Counters */
4731 	INTEL_UNCORE_EVENT_DESC(bw_in_port0,		"event=0xff,umask=0x20"),
4732 	INTEL_UNCORE_EVENT_DESC(bw_in_port0.scale,	"3.814697266e-6"),
4733 	INTEL_UNCORE_EVENT_DESC(bw_in_port0.unit,	"MiB"),
4734 	INTEL_UNCORE_EVENT_DESC(bw_in_port1,		"event=0xff,umask=0x21"),
4735 	INTEL_UNCORE_EVENT_DESC(bw_in_port1.scale,	"3.814697266e-6"),
4736 	INTEL_UNCORE_EVENT_DESC(bw_in_port1.unit,	"MiB"),
4737 	INTEL_UNCORE_EVENT_DESC(bw_in_port2,		"event=0xff,umask=0x22"),
4738 	INTEL_UNCORE_EVENT_DESC(bw_in_port2.scale,	"3.814697266e-6"),
4739 	INTEL_UNCORE_EVENT_DESC(bw_in_port2.unit,	"MiB"),
4740 	INTEL_UNCORE_EVENT_DESC(bw_in_port3,		"event=0xff,umask=0x23"),
4741 	INTEL_UNCORE_EVENT_DESC(bw_in_port3.scale,	"3.814697266e-6"),
4742 	INTEL_UNCORE_EVENT_DESC(bw_in_port3.unit,	"MiB"),
4743 	INTEL_UNCORE_EVENT_DESC(bw_in_port4,		"event=0xff,umask=0x24"),
4744 	INTEL_UNCORE_EVENT_DESC(bw_in_port4.scale,	"3.814697266e-6"),
4745 	INTEL_UNCORE_EVENT_DESC(bw_in_port4.unit,	"MiB"),
4746 	INTEL_UNCORE_EVENT_DESC(bw_in_port5,		"event=0xff,umask=0x25"),
4747 	INTEL_UNCORE_EVENT_DESC(bw_in_port5.scale,	"3.814697266e-6"),
4748 	INTEL_UNCORE_EVENT_DESC(bw_in_port5.unit,	"MiB"),
4749 	INTEL_UNCORE_EVENT_DESC(bw_in_port6,		"event=0xff,umask=0x26"),
4750 	INTEL_UNCORE_EVENT_DESC(bw_in_port6.scale,	"3.814697266e-6"),
4751 	INTEL_UNCORE_EVENT_DESC(bw_in_port6.unit,	"MiB"),
4752 	INTEL_UNCORE_EVENT_DESC(bw_in_port7,		"event=0xff,umask=0x27"),
4753 	INTEL_UNCORE_EVENT_DESC(bw_in_port7.scale,	"3.814697266e-6"),
4754 	INTEL_UNCORE_EVENT_DESC(bw_in_port7.unit,	"MiB"),
4755 	{ /* end: all zeroes */ },
4756 };
4757 
4758 static struct intel_uncore_type icx_uncore_iio_free_running = {
4759 	.name			= "iio_free_running",
4760 	.num_counters		= 9,
4761 	.num_boxes		= 6,
4762 	.num_freerunning_types	= ICX_IIO_FREERUNNING_TYPE_MAX,
4763 	.freerunning		= icx_iio_freerunning,
4764 	.ops			= &skx_uncore_iio_freerunning_ops,
4765 	.event_descs		= icx_uncore_iio_freerunning_events,
4766 	.format_group		= &skx_uncore_iio_freerunning_format_group,
4767 };
4768 
4769 static struct intel_uncore_type *icx_msr_uncores[] = {
4770 	&skx_uncore_ubox,
4771 	&icx_uncore_chabox,
4772 	&icx_uncore_iio,
4773 	&icx_uncore_irp,
4774 	&icx_uncore_m2pcie,
4775 	&skx_uncore_pcu,
4776 	&icx_uncore_iio_free_running,
4777 	NULL,
4778 };
4779 
4780 /*
4781  * To determine the number of CHAs, it should read CAPID6(Low) and CAPID7 (High)
4782  * registers which located at Device 30, Function 3
4783  */
4784 #define ICX_CAPID6		0x9c
4785 #define ICX_CAPID7		0xa0
4786 
4787 static u64 icx_count_chabox(void)
4788 {
4789 	struct pci_dev *dev = NULL;
4790 	u64 caps = 0;
4791 
4792 	dev = pci_get_device(PCI_VENDOR_ID_INTEL, 0x345b, dev);
4793 	if (!dev)
4794 		goto out;
4795 
4796 	pci_read_config_dword(dev, ICX_CAPID6, (u32 *)&caps);
4797 	pci_read_config_dword(dev, ICX_CAPID7, (u32 *)&caps + 1);
4798 out:
4799 	pci_dev_put(dev);
4800 	return hweight64(caps);
4801 }
4802 
4803 void icx_uncore_cpu_init(void)
4804 {
4805 	u64 num_boxes = icx_count_chabox();
4806 
4807 	if (WARN_ON(num_boxes > ARRAY_SIZE(icx_cha_msr_offsets)))
4808 		return;
4809 	icx_uncore_chabox.num_boxes = num_boxes;
4810 	uncore_msr_uncores = icx_msr_uncores;
4811 }
4812 
4813 static struct intel_uncore_type icx_uncore_m2m = {
4814 	.name		= "m2m",
4815 	.num_counters   = 4,
4816 	.num_boxes	= 4,
4817 	.perf_ctr_bits	= 48,
4818 	.perf_ctr	= SNR_M2M_PCI_PMON_CTR0,
4819 	.event_ctl	= SNR_M2M_PCI_PMON_CTL0,
4820 	.event_mask	= SNBEP_PMON_RAW_EVENT_MASK,
4821 	.box_ctl	= SNR_M2M_PCI_PMON_BOX_CTL,
4822 	.ops		= &snr_m2m_uncore_pci_ops,
4823 	.format_group	= &skx_uncore_format_group,
4824 };
4825 
4826 static struct attribute *icx_upi_uncore_formats_attr[] = {
4827 	&format_attr_event.attr,
4828 	&format_attr_umask_ext4.attr,
4829 	&format_attr_edge.attr,
4830 	&format_attr_inv.attr,
4831 	&format_attr_thresh8.attr,
4832 	NULL,
4833 };
4834 
4835 static const struct attribute_group icx_upi_uncore_format_group = {
4836 	.name = "format",
4837 	.attrs = icx_upi_uncore_formats_attr,
4838 };
4839 
4840 static struct intel_uncore_type icx_uncore_upi = {
4841 	.name		= "upi",
4842 	.num_counters   = 4,
4843 	.num_boxes	= 3,
4844 	.perf_ctr_bits	= 48,
4845 	.perf_ctr	= ICX_UPI_PCI_PMON_CTR0,
4846 	.event_ctl	= ICX_UPI_PCI_PMON_CTL0,
4847 	.event_mask	= SNBEP_PMON_RAW_EVENT_MASK,
4848 	.event_mask_ext = ICX_UPI_CTL_UMASK_EXT,
4849 	.box_ctl	= ICX_UPI_PCI_PMON_BOX_CTL,
4850 	.ops		= &skx_upi_uncore_pci_ops,
4851 	.format_group	= &icx_upi_uncore_format_group,
4852 };
4853 
4854 static struct event_constraint icx_uncore_m3upi_constraints[] = {
4855 	UNCORE_EVENT_CONSTRAINT(0x1c, 0x1),
4856 	UNCORE_EVENT_CONSTRAINT(0x1d, 0x1),
4857 	UNCORE_EVENT_CONSTRAINT(0x1e, 0x1),
4858 	UNCORE_EVENT_CONSTRAINT(0x1f, 0x1),
4859 	UNCORE_EVENT_CONSTRAINT(0x40, 0x7),
4860 	UNCORE_EVENT_CONSTRAINT(0x4e, 0x7),
4861 	UNCORE_EVENT_CONSTRAINT(0x4f, 0x7),
4862 	UNCORE_EVENT_CONSTRAINT(0x50, 0x7),
4863 	EVENT_CONSTRAINT_END
4864 };
4865 
4866 static struct intel_uncore_type icx_uncore_m3upi = {
4867 	.name		= "m3upi",
4868 	.num_counters   = 4,
4869 	.num_boxes	= 3,
4870 	.perf_ctr_bits	= 48,
4871 	.perf_ctr	= ICX_M3UPI_PCI_PMON_CTR0,
4872 	.event_ctl	= ICX_M3UPI_PCI_PMON_CTL0,
4873 	.event_mask	= SNBEP_PMON_RAW_EVENT_MASK,
4874 	.box_ctl	= ICX_M3UPI_PCI_PMON_BOX_CTL,
4875 	.constraints	= icx_uncore_m3upi_constraints,
4876 	.ops		= &ivbep_uncore_pci_ops,
4877 	.format_group	= &skx_uncore_format_group,
4878 };
4879 
4880 enum {
4881 	ICX_PCI_UNCORE_M2M,
4882 	ICX_PCI_UNCORE_UPI,
4883 	ICX_PCI_UNCORE_M3UPI,
4884 };
4885 
4886 static struct intel_uncore_type *icx_pci_uncores[] = {
4887 	[ICX_PCI_UNCORE_M2M]		= &icx_uncore_m2m,
4888 	[ICX_PCI_UNCORE_UPI]		= &icx_uncore_upi,
4889 	[ICX_PCI_UNCORE_M3UPI]		= &icx_uncore_m3upi,
4890 	NULL,
4891 };
4892 
4893 static const struct pci_device_id icx_uncore_pci_ids[] = {
4894 	{ /* M2M 0 */
4895 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x344a),
4896 		.driver_data = UNCORE_PCI_DEV_FULL_DATA(12, 0, ICX_PCI_UNCORE_M2M, 0),
4897 	},
4898 	{ /* M2M 1 */
4899 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x344a),
4900 		.driver_data = UNCORE_PCI_DEV_FULL_DATA(13, 0, ICX_PCI_UNCORE_M2M, 1),
4901 	},
4902 	{ /* M2M 2 */
4903 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x344a),
4904 		.driver_data = UNCORE_PCI_DEV_FULL_DATA(14, 0, ICX_PCI_UNCORE_M2M, 2),
4905 	},
4906 	{ /* M2M 3 */
4907 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x344a),
4908 		.driver_data = UNCORE_PCI_DEV_FULL_DATA(15, 0, ICX_PCI_UNCORE_M2M, 3),
4909 	},
4910 	{ /* UPI Link 0 */
4911 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x3441),
4912 		.driver_data = UNCORE_PCI_DEV_FULL_DATA(2, 1, ICX_PCI_UNCORE_UPI, 0),
4913 	},
4914 	{ /* UPI Link 1 */
4915 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x3441),
4916 		.driver_data = UNCORE_PCI_DEV_FULL_DATA(3, 1, ICX_PCI_UNCORE_UPI, 1),
4917 	},
4918 	{ /* UPI Link 2 */
4919 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x3441),
4920 		.driver_data = UNCORE_PCI_DEV_FULL_DATA(4, 1, ICX_PCI_UNCORE_UPI, 2),
4921 	},
4922 	{ /* M3UPI Link 0 */
4923 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x3446),
4924 		.driver_data = UNCORE_PCI_DEV_FULL_DATA(5, 1, ICX_PCI_UNCORE_M3UPI, 0),
4925 	},
4926 	{ /* M3UPI Link 1 */
4927 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x3446),
4928 		.driver_data = UNCORE_PCI_DEV_FULL_DATA(6, 1, ICX_PCI_UNCORE_M3UPI, 1),
4929 	},
4930 	{ /* M3UPI Link 2 */
4931 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x3446),
4932 		.driver_data = UNCORE_PCI_DEV_FULL_DATA(7, 1, ICX_PCI_UNCORE_M3UPI, 2),
4933 	},
4934 	{ /* end: all zeroes */ }
4935 };
4936 
4937 static struct pci_driver icx_uncore_pci_driver = {
4938 	.name		= "icx_uncore",
4939 	.id_table	= icx_uncore_pci_ids,
4940 };
4941 
4942 int icx_uncore_pci_init(void)
4943 {
4944 	/* ICX UBOX DID */
4945 	int ret = snbep_pci2phy_map_init(0x3450, SKX_CPUNODEID,
4946 					 SKX_GIDNIDMAP, true);
4947 
4948 	if (ret)
4949 		return ret;
4950 
4951 	uncore_pci_uncores = icx_pci_uncores;
4952 	uncore_pci_driver = &icx_uncore_pci_driver;
4953 	return 0;
4954 }
4955 
4956 static void icx_uncore_imc_init_box(struct intel_uncore_box *box)
4957 {
4958 	unsigned int box_ctl = box->pmu->type->box_ctl +
4959 			       box->pmu->type->mmio_offset * (box->pmu->pmu_idx % ICX_NUMBER_IMC_CHN);
4960 	int mem_offset = (box->pmu->pmu_idx / ICX_NUMBER_IMC_CHN) * ICX_IMC_MEM_STRIDE +
4961 			 SNR_IMC_MMIO_MEM0_OFFSET;
4962 
4963 	__snr_uncore_mmio_init_box(box, box_ctl, mem_offset);
4964 }
4965 
4966 static struct intel_uncore_ops icx_uncore_mmio_ops = {
4967 	.init_box	= icx_uncore_imc_init_box,
4968 	.exit_box	= uncore_mmio_exit_box,
4969 	.disable_box	= snr_uncore_mmio_disable_box,
4970 	.enable_box	= snr_uncore_mmio_enable_box,
4971 	.disable_event	= snr_uncore_mmio_disable_event,
4972 	.enable_event	= snr_uncore_mmio_enable_event,
4973 	.read_counter	= uncore_mmio_read_counter,
4974 };
4975 
4976 static struct intel_uncore_type icx_uncore_imc = {
4977 	.name		= "imc",
4978 	.num_counters   = 4,
4979 	.num_boxes	= 8,
4980 	.perf_ctr_bits	= 48,
4981 	.fixed_ctr_bits	= 48,
4982 	.fixed_ctr	= SNR_IMC_MMIO_PMON_FIXED_CTR,
4983 	.fixed_ctl	= SNR_IMC_MMIO_PMON_FIXED_CTL,
4984 	.event_descs	= hswep_uncore_imc_events,
4985 	.perf_ctr	= SNR_IMC_MMIO_PMON_CTR0,
4986 	.event_ctl	= SNR_IMC_MMIO_PMON_CTL0,
4987 	.event_mask	= SNBEP_PMON_RAW_EVENT_MASK,
4988 	.box_ctl	= SNR_IMC_MMIO_PMON_BOX_CTL,
4989 	.mmio_offset	= SNR_IMC_MMIO_OFFSET,
4990 	.ops		= &icx_uncore_mmio_ops,
4991 	.format_group	= &skx_uncore_format_group,
4992 };
4993 
4994 enum perf_uncore_icx_imc_freerunning_type_id {
4995 	ICX_IMC_DCLK,
4996 	ICX_IMC_DDR,
4997 	ICX_IMC_DDRT,
4998 
4999 	ICX_IMC_FREERUNNING_TYPE_MAX,
5000 };
5001 
5002 static struct freerunning_counters icx_imc_freerunning[] = {
5003 	[ICX_IMC_DCLK]	= { 0x22b0, 0x0, 0, 1, 48 },
5004 	[ICX_IMC_DDR]	= { 0x2290, 0x8, 0, 2, 48 },
5005 	[ICX_IMC_DDRT]	= { 0x22a0, 0x8, 0, 2, 48 },
5006 };
5007 
5008 static struct uncore_event_desc icx_uncore_imc_freerunning_events[] = {
5009 	INTEL_UNCORE_EVENT_DESC(dclk,			"event=0xff,umask=0x10"),
5010 
5011 	INTEL_UNCORE_EVENT_DESC(read,			"event=0xff,umask=0x20"),
5012 	INTEL_UNCORE_EVENT_DESC(read.scale,		"3.814697266e-6"),
5013 	INTEL_UNCORE_EVENT_DESC(read.unit,		"MiB"),
5014 	INTEL_UNCORE_EVENT_DESC(write,			"event=0xff,umask=0x21"),
5015 	INTEL_UNCORE_EVENT_DESC(write.scale,		"3.814697266e-6"),
5016 	INTEL_UNCORE_EVENT_DESC(write.unit,		"MiB"),
5017 
5018 	INTEL_UNCORE_EVENT_DESC(ddrt_read,		"event=0xff,umask=0x30"),
5019 	INTEL_UNCORE_EVENT_DESC(ddrt_read.scale,	"3.814697266e-6"),
5020 	INTEL_UNCORE_EVENT_DESC(ddrt_read.unit,		"MiB"),
5021 	INTEL_UNCORE_EVENT_DESC(ddrt_write,		"event=0xff,umask=0x31"),
5022 	INTEL_UNCORE_EVENT_DESC(ddrt_write.scale,	"3.814697266e-6"),
5023 	INTEL_UNCORE_EVENT_DESC(ddrt_write.unit,	"MiB"),
5024 	{ /* end: all zeroes */ },
5025 };
5026 
5027 static void icx_uncore_imc_freerunning_init_box(struct intel_uncore_box *box)
5028 {
5029 	int mem_offset = box->pmu->pmu_idx * ICX_IMC_MEM_STRIDE +
5030 			 SNR_IMC_MMIO_MEM0_OFFSET;
5031 
5032 	__snr_uncore_mmio_init_box(box, uncore_mmio_box_ctl(box), mem_offset);
5033 }
5034 
5035 static struct intel_uncore_ops icx_uncore_imc_freerunning_ops = {
5036 	.init_box	= icx_uncore_imc_freerunning_init_box,
5037 	.exit_box	= uncore_mmio_exit_box,
5038 	.read_counter	= uncore_mmio_read_counter,
5039 	.hw_config	= uncore_freerunning_hw_config,
5040 };
5041 
5042 static struct intel_uncore_type icx_uncore_imc_free_running = {
5043 	.name			= "imc_free_running",
5044 	.num_counters		= 5,
5045 	.num_boxes		= 4,
5046 	.num_freerunning_types	= ICX_IMC_FREERUNNING_TYPE_MAX,
5047 	.freerunning		= icx_imc_freerunning,
5048 	.ops			= &icx_uncore_imc_freerunning_ops,
5049 	.event_descs		= icx_uncore_imc_freerunning_events,
5050 	.format_group		= &skx_uncore_iio_freerunning_format_group,
5051 };
5052 
5053 static struct intel_uncore_type *icx_mmio_uncores[] = {
5054 	&icx_uncore_imc,
5055 	&icx_uncore_imc_free_running,
5056 	NULL,
5057 };
5058 
5059 void icx_uncore_mmio_init(void)
5060 {
5061 	uncore_mmio_uncores = icx_mmio_uncores;
5062 }
5063 
5064 /* end of ICX uncore support */
5065