1 // SPDX-License-Identifier: GPL-2.0
2 /* SandyBridge-EP/IvyTown uncore support */
3 #include "uncore.h"
4 
5 /* SNB-EP pci bus to socket mapping */
6 #define SNBEP_CPUNODEID			0x40
7 #define SNBEP_GIDNIDMAP			0x54
8 
9 /* SNB-EP Box level control */
10 #define SNBEP_PMON_BOX_CTL_RST_CTRL	(1 << 0)
11 #define SNBEP_PMON_BOX_CTL_RST_CTRS	(1 << 1)
12 #define SNBEP_PMON_BOX_CTL_FRZ		(1 << 8)
13 #define SNBEP_PMON_BOX_CTL_FRZ_EN	(1 << 16)
14 #define SNBEP_PMON_BOX_CTL_INT		(SNBEP_PMON_BOX_CTL_RST_CTRL | \
15 					 SNBEP_PMON_BOX_CTL_RST_CTRS | \
16 					 SNBEP_PMON_BOX_CTL_FRZ_EN)
17 /* SNB-EP event control */
18 #define SNBEP_PMON_CTL_EV_SEL_MASK	0x000000ff
19 #define SNBEP_PMON_CTL_UMASK_MASK	0x0000ff00
20 #define SNBEP_PMON_CTL_RST		(1 << 17)
21 #define SNBEP_PMON_CTL_EDGE_DET		(1 << 18)
22 #define SNBEP_PMON_CTL_EV_SEL_EXT	(1 << 21)
23 #define SNBEP_PMON_CTL_EN		(1 << 22)
24 #define SNBEP_PMON_CTL_INVERT		(1 << 23)
25 #define SNBEP_PMON_CTL_TRESH_MASK	0xff000000
26 #define SNBEP_PMON_RAW_EVENT_MASK	(SNBEP_PMON_CTL_EV_SEL_MASK | \
27 					 SNBEP_PMON_CTL_UMASK_MASK | \
28 					 SNBEP_PMON_CTL_EDGE_DET | \
29 					 SNBEP_PMON_CTL_INVERT | \
30 					 SNBEP_PMON_CTL_TRESH_MASK)
31 
32 /* SNB-EP Ubox event control */
33 #define SNBEP_U_MSR_PMON_CTL_TRESH_MASK		0x1f000000
34 #define SNBEP_U_MSR_PMON_RAW_EVENT_MASK		\
35 				(SNBEP_PMON_CTL_EV_SEL_MASK | \
36 				 SNBEP_PMON_CTL_UMASK_MASK | \
37 				 SNBEP_PMON_CTL_EDGE_DET | \
38 				 SNBEP_PMON_CTL_INVERT | \
39 				 SNBEP_U_MSR_PMON_CTL_TRESH_MASK)
40 
41 #define SNBEP_CBO_PMON_CTL_TID_EN		(1 << 19)
42 #define SNBEP_CBO_MSR_PMON_RAW_EVENT_MASK	(SNBEP_PMON_RAW_EVENT_MASK | \
43 						 SNBEP_CBO_PMON_CTL_TID_EN)
44 
45 /* SNB-EP PCU event control */
46 #define SNBEP_PCU_MSR_PMON_CTL_OCC_SEL_MASK	0x0000c000
47 #define SNBEP_PCU_MSR_PMON_CTL_TRESH_MASK	0x1f000000
48 #define SNBEP_PCU_MSR_PMON_CTL_OCC_INVERT	(1 << 30)
49 #define SNBEP_PCU_MSR_PMON_CTL_OCC_EDGE_DET	(1 << 31)
50 #define SNBEP_PCU_MSR_PMON_RAW_EVENT_MASK	\
51 				(SNBEP_PMON_CTL_EV_SEL_MASK | \
52 				 SNBEP_PCU_MSR_PMON_CTL_OCC_SEL_MASK | \
53 				 SNBEP_PMON_CTL_EDGE_DET | \
54 				 SNBEP_PMON_CTL_INVERT | \
55 				 SNBEP_PCU_MSR_PMON_CTL_TRESH_MASK | \
56 				 SNBEP_PCU_MSR_PMON_CTL_OCC_INVERT | \
57 				 SNBEP_PCU_MSR_PMON_CTL_OCC_EDGE_DET)
58 
59 #define SNBEP_QPI_PCI_PMON_RAW_EVENT_MASK	\
60 				(SNBEP_PMON_RAW_EVENT_MASK | \
61 				 SNBEP_PMON_CTL_EV_SEL_EXT)
62 
63 /* SNB-EP pci control register */
64 #define SNBEP_PCI_PMON_BOX_CTL			0xf4
65 #define SNBEP_PCI_PMON_CTL0			0xd8
66 /* SNB-EP pci counter register */
67 #define SNBEP_PCI_PMON_CTR0			0xa0
68 
69 /* SNB-EP home agent register */
70 #define SNBEP_HA_PCI_PMON_BOX_ADDRMATCH0	0x40
71 #define SNBEP_HA_PCI_PMON_BOX_ADDRMATCH1	0x44
72 #define SNBEP_HA_PCI_PMON_BOX_OPCODEMATCH	0x48
73 /* SNB-EP memory controller register */
74 #define SNBEP_MC_CHy_PCI_PMON_FIXED_CTL		0xf0
75 #define SNBEP_MC_CHy_PCI_PMON_FIXED_CTR		0xd0
76 /* SNB-EP QPI register */
77 #define SNBEP_Q_Py_PCI_PMON_PKT_MATCH0		0x228
78 #define SNBEP_Q_Py_PCI_PMON_PKT_MATCH1		0x22c
79 #define SNBEP_Q_Py_PCI_PMON_PKT_MASK0		0x238
80 #define SNBEP_Q_Py_PCI_PMON_PKT_MASK1		0x23c
81 
82 /* SNB-EP Ubox register */
83 #define SNBEP_U_MSR_PMON_CTR0			0xc16
84 #define SNBEP_U_MSR_PMON_CTL0			0xc10
85 
86 #define SNBEP_U_MSR_PMON_UCLK_FIXED_CTL		0xc08
87 #define SNBEP_U_MSR_PMON_UCLK_FIXED_CTR		0xc09
88 
89 /* SNB-EP Cbo register */
90 #define SNBEP_C0_MSR_PMON_CTR0			0xd16
91 #define SNBEP_C0_MSR_PMON_CTL0			0xd10
92 #define SNBEP_C0_MSR_PMON_BOX_CTL		0xd04
93 #define SNBEP_C0_MSR_PMON_BOX_FILTER		0xd14
94 #define SNBEP_CBO_MSR_OFFSET			0x20
95 
96 #define SNBEP_CB0_MSR_PMON_BOX_FILTER_TID	0x1f
97 #define SNBEP_CB0_MSR_PMON_BOX_FILTER_NID	0x3fc00
98 #define SNBEP_CB0_MSR_PMON_BOX_FILTER_STATE	0x7c0000
99 #define SNBEP_CB0_MSR_PMON_BOX_FILTER_OPC	0xff800000
100 
101 #define SNBEP_CBO_EVENT_EXTRA_REG(e, m, i) {	\
102 	.event = (e),				\
103 	.msr = SNBEP_C0_MSR_PMON_BOX_FILTER,	\
104 	.config_mask = (m),			\
105 	.idx = (i)				\
106 }
107 
108 /* SNB-EP PCU register */
109 #define SNBEP_PCU_MSR_PMON_CTR0			0xc36
110 #define SNBEP_PCU_MSR_PMON_CTL0			0xc30
111 #define SNBEP_PCU_MSR_PMON_BOX_CTL		0xc24
112 #define SNBEP_PCU_MSR_PMON_BOX_FILTER		0xc34
113 #define SNBEP_PCU_MSR_PMON_BOX_FILTER_MASK	0xffffffff
114 #define SNBEP_PCU_MSR_CORE_C3_CTR		0x3fc
115 #define SNBEP_PCU_MSR_CORE_C6_CTR		0x3fd
116 
117 /* IVBEP event control */
118 #define IVBEP_PMON_BOX_CTL_INT		(SNBEP_PMON_BOX_CTL_RST_CTRL | \
119 					 SNBEP_PMON_BOX_CTL_RST_CTRS)
120 #define IVBEP_PMON_RAW_EVENT_MASK		(SNBEP_PMON_CTL_EV_SEL_MASK | \
121 					 SNBEP_PMON_CTL_UMASK_MASK | \
122 					 SNBEP_PMON_CTL_EDGE_DET | \
123 					 SNBEP_PMON_CTL_TRESH_MASK)
124 /* IVBEP Ubox */
125 #define IVBEP_U_MSR_PMON_GLOBAL_CTL		0xc00
126 #define IVBEP_U_PMON_GLOBAL_FRZ_ALL		(1 << 31)
127 #define IVBEP_U_PMON_GLOBAL_UNFRZ_ALL		(1 << 29)
128 
129 #define IVBEP_U_MSR_PMON_RAW_EVENT_MASK	\
130 				(SNBEP_PMON_CTL_EV_SEL_MASK | \
131 				 SNBEP_PMON_CTL_UMASK_MASK | \
132 				 SNBEP_PMON_CTL_EDGE_DET | \
133 				 SNBEP_U_MSR_PMON_CTL_TRESH_MASK)
134 /* IVBEP Cbo */
135 #define IVBEP_CBO_MSR_PMON_RAW_EVENT_MASK		(IVBEP_PMON_RAW_EVENT_MASK | \
136 						 SNBEP_CBO_PMON_CTL_TID_EN)
137 
138 #define IVBEP_CB0_MSR_PMON_BOX_FILTER_TID		(0x1fULL << 0)
139 #define IVBEP_CB0_MSR_PMON_BOX_FILTER_LINK	(0xfULL << 5)
140 #define IVBEP_CB0_MSR_PMON_BOX_FILTER_STATE	(0x3fULL << 17)
141 #define IVBEP_CB0_MSR_PMON_BOX_FILTER_NID		(0xffffULL << 32)
142 #define IVBEP_CB0_MSR_PMON_BOX_FILTER_OPC		(0x1ffULL << 52)
143 #define IVBEP_CB0_MSR_PMON_BOX_FILTER_C6		(0x1ULL << 61)
144 #define IVBEP_CB0_MSR_PMON_BOX_FILTER_NC		(0x1ULL << 62)
145 #define IVBEP_CB0_MSR_PMON_BOX_FILTER_ISOC	(0x1ULL << 63)
146 
147 /* IVBEP home agent */
148 #define IVBEP_HA_PCI_PMON_CTL_Q_OCC_RST		(1 << 16)
149 #define IVBEP_HA_PCI_PMON_RAW_EVENT_MASK		\
150 				(IVBEP_PMON_RAW_EVENT_MASK | \
151 				 IVBEP_HA_PCI_PMON_CTL_Q_OCC_RST)
152 /* IVBEP PCU */
153 #define IVBEP_PCU_MSR_PMON_RAW_EVENT_MASK	\
154 				(SNBEP_PMON_CTL_EV_SEL_MASK | \
155 				 SNBEP_PCU_MSR_PMON_CTL_OCC_SEL_MASK | \
156 				 SNBEP_PMON_CTL_EDGE_DET | \
157 				 SNBEP_PCU_MSR_PMON_CTL_TRESH_MASK | \
158 				 SNBEP_PCU_MSR_PMON_CTL_OCC_INVERT | \
159 				 SNBEP_PCU_MSR_PMON_CTL_OCC_EDGE_DET)
160 /* IVBEP QPI */
161 #define IVBEP_QPI_PCI_PMON_RAW_EVENT_MASK	\
162 				(IVBEP_PMON_RAW_EVENT_MASK | \
163 				 SNBEP_PMON_CTL_EV_SEL_EXT)
164 
165 #define __BITS_VALUE(x, i, n)  ((typeof(x))(((x) >> ((i) * (n))) & \
166 				((1ULL << (n)) - 1)))
167 
168 /* Haswell-EP Ubox */
169 #define HSWEP_U_MSR_PMON_CTR0			0x709
170 #define HSWEP_U_MSR_PMON_CTL0			0x705
171 #define HSWEP_U_MSR_PMON_FILTER			0x707
172 
173 #define HSWEP_U_MSR_PMON_UCLK_FIXED_CTL		0x703
174 #define HSWEP_U_MSR_PMON_UCLK_FIXED_CTR		0x704
175 
176 #define HSWEP_U_MSR_PMON_BOX_FILTER_TID		(0x1 << 0)
177 #define HSWEP_U_MSR_PMON_BOX_FILTER_CID		(0x1fULL << 1)
178 #define HSWEP_U_MSR_PMON_BOX_FILTER_MASK \
179 					(HSWEP_U_MSR_PMON_BOX_FILTER_TID | \
180 					 HSWEP_U_MSR_PMON_BOX_FILTER_CID)
181 
182 /* Haswell-EP CBo */
183 #define HSWEP_C0_MSR_PMON_CTR0			0xe08
184 #define HSWEP_C0_MSR_PMON_CTL0			0xe01
185 #define HSWEP_C0_MSR_PMON_BOX_CTL			0xe00
186 #define HSWEP_C0_MSR_PMON_BOX_FILTER0		0xe05
187 #define HSWEP_CBO_MSR_OFFSET			0x10
188 
189 
190 #define HSWEP_CB0_MSR_PMON_BOX_FILTER_TID		(0x3fULL << 0)
191 #define HSWEP_CB0_MSR_PMON_BOX_FILTER_LINK	(0xfULL << 6)
192 #define HSWEP_CB0_MSR_PMON_BOX_FILTER_STATE	(0x7fULL << 17)
193 #define HSWEP_CB0_MSR_PMON_BOX_FILTER_NID		(0xffffULL << 32)
194 #define HSWEP_CB0_MSR_PMON_BOX_FILTER_OPC		(0x1ffULL << 52)
195 #define HSWEP_CB0_MSR_PMON_BOX_FILTER_C6		(0x1ULL << 61)
196 #define HSWEP_CB0_MSR_PMON_BOX_FILTER_NC		(0x1ULL << 62)
197 #define HSWEP_CB0_MSR_PMON_BOX_FILTER_ISOC	(0x1ULL << 63)
198 
199 
200 /* Haswell-EP Sbox */
201 #define HSWEP_S0_MSR_PMON_CTR0			0x726
202 #define HSWEP_S0_MSR_PMON_CTL0			0x721
203 #define HSWEP_S0_MSR_PMON_BOX_CTL			0x720
204 #define HSWEP_SBOX_MSR_OFFSET			0xa
205 #define HSWEP_S_MSR_PMON_RAW_EVENT_MASK		(SNBEP_PMON_RAW_EVENT_MASK | \
206 						 SNBEP_CBO_PMON_CTL_TID_EN)
207 
208 /* Haswell-EP PCU */
209 #define HSWEP_PCU_MSR_PMON_CTR0			0x717
210 #define HSWEP_PCU_MSR_PMON_CTL0			0x711
211 #define HSWEP_PCU_MSR_PMON_BOX_CTL		0x710
212 #define HSWEP_PCU_MSR_PMON_BOX_FILTER		0x715
213 
214 /* KNL Ubox */
215 #define KNL_U_MSR_PMON_RAW_EVENT_MASK \
216 					(SNBEP_U_MSR_PMON_RAW_EVENT_MASK | \
217 						SNBEP_CBO_PMON_CTL_TID_EN)
218 /* KNL CHA */
219 #define KNL_CHA_MSR_OFFSET			0xc
220 #define KNL_CHA_MSR_PMON_CTL_QOR		(1 << 16)
221 #define KNL_CHA_MSR_PMON_RAW_EVENT_MASK \
222 					(SNBEP_CBO_MSR_PMON_RAW_EVENT_MASK | \
223 					 KNL_CHA_MSR_PMON_CTL_QOR)
224 #define KNL_CHA_MSR_PMON_BOX_FILTER_TID		0x1ff
225 #define KNL_CHA_MSR_PMON_BOX_FILTER_STATE	(7 << 18)
226 #define KNL_CHA_MSR_PMON_BOX_FILTER_OP		(0xfffffe2aULL << 32)
227 #define KNL_CHA_MSR_PMON_BOX_FILTER_REMOTE_NODE	(0x1ULL << 32)
228 #define KNL_CHA_MSR_PMON_BOX_FILTER_LOCAL_NODE	(0x1ULL << 33)
229 #define KNL_CHA_MSR_PMON_BOX_FILTER_NNC		(0x1ULL << 37)
230 
231 /* KNL EDC/MC UCLK */
232 #define KNL_UCLK_MSR_PMON_CTR0_LOW		0x400
233 #define KNL_UCLK_MSR_PMON_CTL0			0x420
234 #define KNL_UCLK_MSR_PMON_BOX_CTL		0x430
235 #define KNL_UCLK_MSR_PMON_UCLK_FIXED_LOW	0x44c
236 #define KNL_UCLK_MSR_PMON_UCLK_FIXED_CTL	0x454
237 #define KNL_PMON_FIXED_CTL_EN			0x1
238 
239 /* KNL EDC */
240 #define KNL_EDC0_ECLK_MSR_PMON_CTR0_LOW		0xa00
241 #define KNL_EDC0_ECLK_MSR_PMON_CTL0		0xa20
242 #define KNL_EDC0_ECLK_MSR_PMON_BOX_CTL		0xa30
243 #define KNL_EDC0_ECLK_MSR_PMON_ECLK_FIXED_LOW	0xa3c
244 #define KNL_EDC0_ECLK_MSR_PMON_ECLK_FIXED_CTL	0xa44
245 
246 /* KNL MC */
247 #define KNL_MC0_CH0_MSR_PMON_CTR0_LOW		0xb00
248 #define KNL_MC0_CH0_MSR_PMON_CTL0		0xb20
249 #define KNL_MC0_CH0_MSR_PMON_BOX_CTL		0xb30
250 #define KNL_MC0_CH0_MSR_PMON_FIXED_LOW		0xb3c
251 #define KNL_MC0_CH0_MSR_PMON_FIXED_CTL		0xb44
252 
253 /* KNL IRP */
254 #define KNL_IRP_PCI_PMON_BOX_CTL		0xf0
255 #define KNL_IRP_PCI_PMON_RAW_EVENT_MASK		(SNBEP_PMON_RAW_EVENT_MASK | \
256 						 KNL_CHA_MSR_PMON_CTL_QOR)
257 /* KNL PCU */
258 #define KNL_PCU_PMON_CTL_EV_SEL_MASK		0x0000007f
259 #define KNL_PCU_PMON_CTL_USE_OCC_CTR		(1 << 7)
260 #define KNL_PCU_MSR_PMON_CTL_TRESH_MASK		0x3f000000
261 #define KNL_PCU_MSR_PMON_RAW_EVENT_MASK	\
262 				(KNL_PCU_PMON_CTL_EV_SEL_MASK | \
263 				 KNL_PCU_PMON_CTL_USE_OCC_CTR | \
264 				 SNBEP_PCU_MSR_PMON_CTL_OCC_SEL_MASK | \
265 				 SNBEP_PMON_CTL_EDGE_DET | \
266 				 SNBEP_CBO_PMON_CTL_TID_EN | \
267 				 SNBEP_PMON_CTL_INVERT | \
268 				 KNL_PCU_MSR_PMON_CTL_TRESH_MASK | \
269 				 SNBEP_PCU_MSR_PMON_CTL_OCC_INVERT | \
270 				 SNBEP_PCU_MSR_PMON_CTL_OCC_EDGE_DET)
271 
272 /* SKX pci bus to socket mapping */
273 #define SKX_CPUNODEID			0xc0
274 #define SKX_GIDNIDMAP			0xd4
275 
276 /* SKX CHA */
277 #define SKX_CHA_MSR_PMON_BOX_FILTER_TID		(0x1ffULL << 0)
278 #define SKX_CHA_MSR_PMON_BOX_FILTER_LINK	(0xfULL << 9)
279 #define SKX_CHA_MSR_PMON_BOX_FILTER_STATE	(0x3ffULL << 17)
280 #define SKX_CHA_MSR_PMON_BOX_FILTER_REM		(0x1ULL << 32)
281 #define SKX_CHA_MSR_PMON_BOX_FILTER_LOC		(0x1ULL << 33)
282 #define SKX_CHA_MSR_PMON_BOX_FILTER_ALL_OPC	(0x1ULL << 35)
283 #define SKX_CHA_MSR_PMON_BOX_FILTER_NM		(0x1ULL << 36)
284 #define SKX_CHA_MSR_PMON_BOX_FILTER_NOT_NM	(0x1ULL << 37)
285 #define SKX_CHA_MSR_PMON_BOX_FILTER_OPC0	(0x3ffULL << 41)
286 #define SKX_CHA_MSR_PMON_BOX_FILTER_OPC1	(0x3ffULL << 51)
287 #define SKX_CHA_MSR_PMON_BOX_FILTER_C6		(0x1ULL << 61)
288 #define SKX_CHA_MSR_PMON_BOX_FILTER_NC		(0x1ULL << 62)
289 #define SKX_CHA_MSR_PMON_BOX_FILTER_ISOC	(0x1ULL << 63)
290 
291 /* SKX IIO */
292 #define SKX_IIO0_MSR_PMON_CTL0		0xa48
293 #define SKX_IIO0_MSR_PMON_CTR0		0xa41
294 #define SKX_IIO0_MSR_PMON_BOX_CTL	0xa40
295 #define SKX_IIO_MSR_OFFSET		0x20
296 
297 #define SKX_PMON_CTL_TRESH_MASK		(0xff << 24)
298 #define SKX_PMON_CTL_TRESH_MASK_EXT	(0xf)
299 #define SKX_PMON_CTL_CH_MASK		(0xff << 4)
300 #define SKX_PMON_CTL_FC_MASK		(0x7 << 12)
301 #define SKX_IIO_PMON_RAW_EVENT_MASK	(SNBEP_PMON_CTL_EV_SEL_MASK | \
302 					 SNBEP_PMON_CTL_UMASK_MASK | \
303 					 SNBEP_PMON_CTL_EDGE_DET | \
304 					 SNBEP_PMON_CTL_INVERT | \
305 					 SKX_PMON_CTL_TRESH_MASK)
306 #define SKX_IIO_PMON_RAW_EVENT_MASK_EXT	(SKX_PMON_CTL_TRESH_MASK_EXT | \
307 					 SKX_PMON_CTL_CH_MASK | \
308 					 SKX_PMON_CTL_FC_MASK)
309 
310 /* SKX IRP */
311 #define SKX_IRP0_MSR_PMON_CTL0		0xa5b
312 #define SKX_IRP0_MSR_PMON_CTR0		0xa59
313 #define SKX_IRP0_MSR_PMON_BOX_CTL	0xa58
314 #define SKX_IRP_MSR_OFFSET		0x20
315 
316 /* SKX UPI */
317 #define SKX_UPI_PCI_PMON_CTL0		0x350
318 #define SKX_UPI_PCI_PMON_CTR0		0x318
319 #define SKX_UPI_PCI_PMON_BOX_CTL	0x378
320 #define SKX_UPI_CTL_UMASK_EXT		0xffefff
321 
322 /* SKX M2M */
323 #define SKX_M2M_PCI_PMON_CTL0		0x228
324 #define SKX_M2M_PCI_PMON_CTR0		0x200
325 #define SKX_M2M_PCI_PMON_BOX_CTL	0x258
326 
327 /* SNR Ubox */
328 #define SNR_U_MSR_PMON_CTR0			0x1f98
329 #define SNR_U_MSR_PMON_CTL0			0x1f91
330 #define SNR_U_MSR_PMON_UCLK_FIXED_CTL		0x1f93
331 #define SNR_U_MSR_PMON_UCLK_FIXED_CTR		0x1f94
332 
333 /* SNR CHA */
334 #define SNR_CHA_RAW_EVENT_MASK_EXT		0x3ffffff
335 #define SNR_CHA_MSR_PMON_CTL0			0x1c01
336 #define SNR_CHA_MSR_PMON_CTR0			0x1c08
337 #define SNR_CHA_MSR_PMON_BOX_CTL		0x1c00
338 #define SNR_C0_MSR_PMON_BOX_FILTER0		0x1c05
339 
340 
341 /* SNR IIO */
342 #define SNR_IIO_MSR_PMON_CTL0			0x1e08
343 #define SNR_IIO_MSR_PMON_CTR0			0x1e01
344 #define SNR_IIO_MSR_PMON_BOX_CTL		0x1e00
345 #define SNR_IIO_MSR_OFFSET			0x10
346 #define SNR_IIO_PMON_RAW_EVENT_MASK_EXT		0x7ffff
347 
348 /* SNR IRP */
349 #define SNR_IRP0_MSR_PMON_CTL0			0x1ea8
350 #define SNR_IRP0_MSR_PMON_CTR0			0x1ea1
351 #define SNR_IRP0_MSR_PMON_BOX_CTL		0x1ea0
352 #define SNR_IRP_MSR_OFFSET			0x10
353 
354 /* SNR M2PCIE */
355 #define SNR_M2PCIE_MSR_PMON_CTL0		0x1e58
356 #define SNR_M2PCIE_MSR_PMON_CTR0		0x1e51
357 #define SNR_M2PCIE_MSR_PMON_BOX_CTL		0x1e50
358 #define SNR_M2PCIE_MSR_OFFSET			0x10
359 
360 /* SNR PCU */
361 #define SNR_PCU_MSR_PMON_CTL0			0x1ef1
362 #define SNR_PCU_MSR_PMON_CTR0			0x1ef8
363 #define SNR_PCU_MSR_PMON_BOX_CTL		0x1ef0
364 #define SNR_PCU_MSR_PMON_BOX_FILTER		0x1efc
365 
366 /* SNR M2M */
367 #define SNR_M2M_PCI_PMON_CTL0			0x468
368 #define SNR_M2M_PCI_PMON_CTR0			0x440
369 #define SNR_M2M_PCI_PMON_BOX_CTL		0x438
370 #define SNR_M2M_PCI_PMON_UMASK_EXT		0xff
371 
372 /* SNR IMC */
373 #define SNR_IMC_MMIO_PMON_FIXED_CTL		0x54
374 #define SNR_IMC_MMIO_PMON_FIXED_CTR		0x38
375 #define SNR_IMC_MMIO_PMON_CTL0			0x40
376 #define SNR_IMC_MMIO_PMON_CTR0			0x8
377 #define SNR_IMC_MMIO_PMON_BOX_CTL		0x22800
378 #define SNR_IMC_MMIO_OFFSET			0x4000
379 #define SNR_IMC_MMIO_SIZE			0x4000
380 #define SNR_IMC_MMIO_BASE_OFFSET		0xd0
381 #define SNR_IMC_MMIO_BASE_MASK			0x1FFFFFFF
382 #define SNR_IMC_MMIO_MEM0_OFFSET		0xd8
383 #define SNR_IMC_MMIO_MEM0_MASK			0x7FF
384 
385 DEFINE_UNCORE_FORMAT_ATTR(event, event, "config:0-7");
386 DEFINE_UNCORE_FORMAT_ATTR(event2, event, "config:0-6");
387 DEFINE_UNCORE_FORMAT_ATTR(event_ext, event, "config:0-7,21");
388 DEFINE_UNCORE_FORMAT_ATTR(use_occ_ctr, use_occ_ctr, "config:7");
389 DEFINE_UNCORE_FORMAT_ATTR(umask, umask, "config:8-15");
390 DEFINE_UNCORE_FORMAT_ATTR(umask_ext, umask, "config:8-15,32-43,45-55");
391 DEFINE_UNCORE_FORMAT_ATTR(umask_ext2, umask, "config:8-15,32-57");
392 DEFINE_UNCORE_FORMAT_ATTR(umask_ext3, umask, "config:8-15,32-39");
393 DEFINE_UNCORE_FORMAT_ATTR(qor, qor, "config:16");
394 DEFINE_UNCORE_FORMAT_ATTR(edge, edge, "config:18");
395 DEFINE_UNCORE_FORMAT_ATTR(tid_en, tid_en, "config:19");
396 DEFINE_UNCORE_FORMAT_ATTR(inv, inv, "config:23");
397 DEFINE_UNCORE_FORMAT_ATTR(thresh9, thresh, "config:24-35");
398 DEFINE_UNCORE_FORMAT_ATTR(thresh8, thresh, "config:24-31");
399 DEFINE_UNCORE_FORMAT_ATTR(thresh6, thresh, "config:24-29");
400 DEFINE_UNCORE_FORMAT_ATTR(thresh5, thresh, "config:24-28");
401 DEFINE_UNCORE_FORMAT_ATTR(occ_sel, occ_sel, "config:14-15");
402 DEFINE_UNCORE_FORMAT_ATTR(occ_invert, occ_invert, "config:30");
403 DEFINE_UNCORE_FORMAT_ATTR(occ_edge, occ_edge, "config:14-51");
404 DEFINE_UNCORE_FORMAT_ATTR(occ_edge_det, occ_edge_det, "config:31");
405 DEFINE_UNCORE_FORMAT_ATTR(ch_mask, ch_mask, "config:36-43");
406 DEFINE_UNCORE_FORMAT_ATTR(ch_mask2, ch_mask, "config:36-47");
407 DEFINE_UNCORE_FORMAT_ATTR(fc_mask, fc_mask, "config:44-46");
408 DEFINE_UNCORE_FORMAT_ATTR(fc_mask2, fc_mask, "config:48-50");
409 DEFINE_UNCORE_FORMAT_ATTR(filter_tid, filter_tid, "config1:0-4");
410 DEFINE_UNCORE_FORMAT_ATTR(filter_tid2, filter_tid, "config1:0");
411 DEFINE_UNCORE_FORMAT_ATTR(filter_tid3, filter_tid, "config1:0-5");
412 DEFINE_UNCORE_FORMAT_ATTR(filter_tid4, filter_tid, "config1:0-8");
413 DEFINE_UNCORE_FORMAT_ATTR(filter_tid5, filter_tid, "config1:0-9");
414 DEFINE_UNCORE_FORMAT_ATTR(filter_cid, filter_cid, "config1:5");
415 DEFINE_UNCORE_FORMAT_ATTR(filter_link, filter_link, "config1:5-8");
416 DEFINE_UNCORE_FORMAT_ATTR(filter_link2, filter_link, "config1:6-8");
417 DEFINE_UNCORE_FORMAT_ATTR(filter_link3, filter_link, "config1:12");
418 DEFINE_UNCORE_FORMAT_ATTR(filter_nid, filter_nid, "config1:10-17");
419 DEFINE_UNCORE_FORMAT_ATTR(filter_nid2, filter_nid, "config1:32-47");
420 DEFINE_UNCORE_FORMAT_ATTR(filter_state, filter_state, "config1:18-22");
421 DEFINE_UNCORE_FORMAT_ATTR(filter_state2, filter_state, "config1:17-22");
422 DEFINE_UNCORE_FORMAT_ATTR(filter_state3, filter_state, "config1:17-23");
423 DEFINE_UNCORE_FORMAT_ATTR(filter_state4, filter_state, "config1:18-20");
424 DEFINE_UNCORE_FORMAT_ATTR(filter_state5, filter_state, "config1:17-26");
425 DEFINE_UNCORE_FORMAT_ATTR(filter_rem, filter_rem, "config1:32");
426 DEFINE_UNCORE_FORMAT_ATTR(filter_loc, filter_loc, "config1:33");
427 DEFINE_UNCORE_FORMAT_ATTR(filter_nm, filter_nm, "config1:36");
428 DEFINE_UNCORE_FORMAT_ATTR(filter_not_nm, filter_not_nm, "config1:37");
429 DEFINE_UNCORE_FORMAT_ATTR(filter_local, filter_local, "config1:33");
430 DEFINE_UNCORE_FORMAT_ATTR(filter_all_op, filter_all_op, "config1:35");
431 DEFINE_UNCORE_FORMAT_ATTR(filter_nnm, filter_nnm, "config1:37");
432 DEFINE_UNCORE_FORMAT_ATTR(filter_opc, filter_opc, "config1:23-31");
433 DEFINE_UNCORE_FORMAT_ATTR(filter_opc2, filter_opc, "config1:52-60");
434 DEFINE_UNCORE_FORMAT_ATTR(filter_opc3, filter_opc, "config1:41-60");
435 DEFINE_UNCORE_FORMAT_ATTR(filter_opc_0, filter_opc0, "config1:41-50");
436 DEFINE_UNCORE_FORMAT_ATTR(filter_opc_1, filter_opc1, "config1:51-60");
437 DEFINE_UNCORE_FORMAT_ATTR(filter_nc, filter_nc, "config1:62");
438 DEFINE_UNCORE_FORMAT_ATTR(filter_c6, filter_c6, "config1:61");
439 DEFINE_UNCORE_FORMAT_ATTR(filter_isoc, filter_isoc, "config1:63");
440 DEFINE_UNCORE_FORMAT_ATTR(filter_band0, filter_band0, "config1:0-7");
441 DEFINE_UNCORE_FORMAT_ATTR(filter_band1, filter_band1, "config1:8-15");
442 DEFINE_UNCORE_FORMAT_ATTR(filter_band2, filter_band2, "config1:16-23");
443 DEFINE_UNCORE_FORMAT_ATTR(filter_band3, filter_band3, "config1:24-31");
444 DEFINE_UNCORE_FORMAT_ATTR(match_rds, match_rds, "config1:48-51");
445 DEFINE_UNCORE_FORMAT_ATTR(match_rnid30, match_rnid30, "config1:32-35");
446 DEFINE_UNCORE_FORMAT_ATTR(match_rnid4, match_rnid4, "config1:31");
447 DEFINE_UNCORE_FORMAT_ATTR(match_dnid, match_dnid, "config1:13-17");
448 DEFINE_UNCORE_FORMAT_ATTR(match_mc, match_mc, "config1:9-12");
449 DEFINE_UNCORE_FORMAT_ATTR(match_opc, match_opc, "config1:5-8");
450 DEFINE_UNCORE_FORMAT_ATTR(match_vnw, match_vnw, "config1:3-4");
451 DEFINE_UNCORE_FORMAT_ATTR(match0, match0, "config1:0-31");
452 DEFINE_UNCORE_FORMAT_ATTR(match1, match1, "config1:32-63");
453 DEFINE_UNCORE_FORMAT_ATTR(mask_rds, mask_rds, "config2:48-51");
454 DEFINE_UNCORE_FORMAT_ATTR(mask_rnid30, mask_rnid30, "config2:32-35");
455 DEFINE_UNCORE_FORMAT_ATTR(mask_rnid4, mask_rnid4, "config2:31");
456 DEFINE_UNCORE_FORMAT_ATTR(mask_dnid, mask_dnid, "config2:13-17");
457 DEFINE_UNCORE_FORMAT_ATTR(mask_mc, mask_mc, "config2:9-12");
458 DEFINE_UNCORE_FORMAT_ATTR(mask_opc, mask_opc, "config2:5-8");
459 DEFINE_UNCORE_FORMAT_ATTR(mask_vnw, mask_vnw, "config2:3-4");
460 DEFINE_UNCORE_FORMAT_ATTR(mask0, mask0, "config2:0-31");
461 DEFINE_UNCORE_FORMAT_ATTR(mask1, mask1, "config2:32-63");
462 
463 static void snbep_uncore_pci_disable_box(struct intel_uncore_box *box)
464 {
465 	struct pci_dev *pdev = box->pci_dev;
466 	int box_ctl = uncore_pci_box_ctl(box);
467 	u32 config = 0;
468 
469 	if (!pci_read_config_dword(pdev, box_ctl, &config)) {
470 		config |= SNBEP_PMON_BOX_CTL_FRZ;
471 		pci_write_config_dword(pdev, box_ctl, config);
472 	}
473 }
474 
475 static void snbep_uncore_pci_enable_box(struct intel_uncore_box *box)
476 {
477 	struct pci_dev *pdev = box->pci_dev;
478 	int box_ctl = uncore_pci_box_ctl(box);
479 	u32 config = 0;
480 
481 	if (!pci_read_config_dword(pdev, box_ctl, &config)) {
482 		config &= ~SNBEP_PMON_BOX_CTL_FRZ;
483 		pci_write_config_dword(pdev, box_ctl, config);
484 	}
485 }
486 
487 static void snbep_uncore_pci_enable_event(struct intel_uncore_box *box, struct perf_event *event)
488 {
489 	struct pci_dev *pdev = box->pci_dev;
490 	struct hw_perf_event *hwc = &event->hw;
491 
492 	pci_write_config_dword(pdev, hwc->config_base, hwc->config | SNBEP_PMON_CTL_EN);
493 }
494 
495 static void snbep_uncore_pci_disable_event(struct intel_uncore_box *box, struct perf_event *event)
496 {
497 	struct pci_dev *pdev = box->pci_dev;
498 	struct hw_perf_event *hwc = &event->hw;
499 
500 	pci_write_config_dword(pdev, hwc->config_base, hwc->config);
501 }
502 
503 static u64 snbep_uncore_pci_read_counter(struct intel_uncore_box *box, struct perf_event *event)
504 {
505 	struct pci_dev *pdev = box->pci_dev;
506 	struct hw_perf_event *hwc = &event->hw;
507 	u64 count = 0;
508 
509 	pci_read_config_dword(pdev, hwc->event_base, (u32 *)&count);
510 	pci_read_config_dword(pdev, hwc->event_base + 4, (u32 *)&count + 1);
511 
512 	return count;
513 }
514 
515 static void snbep_uncore_pci_init_box(struct intel_uncore_box *box)
516 {
517 	struct pci_dev *pdev = box->pci_dev;
518 	int box_ctl = uncore_pci_box_ctl(box);
519 
520 	pci_write_config_dword(pdev, box_ctl, SNBEP_PMON_BOX_CTL_INT);
521 }
522 
523 static void snbep_uncore_msr_disable_box(struct intel_uncore_box *box)
524 {
525 	u64 config;
526 	unsigned msr;
527 
528 	msr = uncore_msr_box_ctl(box);
529 	if (msr) {
530 		rdmsrl(msr, config);
531 		config |= SNBEP_PMON_BOX_CTL_FRZ;
532 		wrmsrl(msr, config);
533 	}
534 }
535 
536 static void snbep_uncore_msr_enable_box(struct intel_uncore_box *box)
537 {
538 	u64 config;
539 	unsigned msr;
540 
541 	msr = uncore_msr_box_ctl(box);
542 	if (msr) {
543 		rdmsrl(msr, config);
544 		config &= ~SNBEP_PMON_BOX_CTL_FRZ;
545 		wrmsrl(msr, config);
546 	}
547 }
548 
549 static void snbep_uncore_msr_enable_event(struct intel_uncore_box *box, struct perf_event *event)
550 {
551 	struct hw_perf_event *hwc = &event->hw;
552 	struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
553 
554 	if (reg1->idx != EXTRA_REG_NONE)
555 		wrmsrl(reg1->reg, uncore_shared_reg_config(box, 0));
556 
557 	wrmsrl(hwc->config_base, hwc->config | SNBEP_PMON_CTL_EN);
558 }
559 
560 static void snbep_uncore_msr_disable_event(struct intel_uncore_box *box,
561 					struct perf_event *event)
562 {
563 	struct hw_perf_event *hwc = &event->hw;
564 
565 	wrmsrl(hwc->config_base, hwc->config);
566 }
567 
568 static void snbep_uncore_msr_init_box(struct intel_uncore_box *box)
569 {
570 	unsigned msr = uncore_msr_box_ctl(box);
571 
572 	if (msr)
573 		wrmsrl(msr, SNBEP_PMON_BOX_CTL_INT);
574 }
575 
576 static struct attribute *snbep_uncore_formats_attr[] = {
577 	&format_attr_event.attr,
578 	&format_attr_umask.attr,
579 	&format_attr_edge.attr,
580 	&format_attr_inv.attr,
581 	&format_attr_thresh8.attr,
582 	NULL,
583 };
584 
585 static struct attribute *snbep_uncore_ubox_formats_attr[] = {
586 	&format_attr_event.attr,
587 	&format_attr_umask.attr,
588 	&format_attr_edge.attr,
589 	&format_attr_inv.attr,
590 	&format_attr_thresh5.attr,
591 	NULL,
592 };
593 
594 static struct attribute *snbep_uncore_cbox_formats_attr[] = {
595 	&format_attr_event.attr,
596 	&format_attr_umask.attr,
597 	&format_attr_edge.attr,
598 	&format_attr_tid_en.attr,
599 	&format_attr_inv.attr,
600 	&format_attr_thresh8.attr,
601 	&format_attr_filter_tid.attr,
602 	&format_attr_filter_nid.attr,
603 	&format_attr_filter_state.attr,
604 	&format_attr_filter_opc.attr,
605 	NULL,
606 };
607 
608 static struct attribute *snbep_uncore_pcu_formats_attr[] = {
609 	&format_attr_event.attr,
610 	&format_attr_occ_sel.attr,
611 	&format_attr_edge.attr,
612 	&format_attr_inv.attr,
613 	&format_attr_thresh5.attr,
614 	&format_attr_occ_invert.attr,
615 	&format_attr_occ_edge.attr,
616 	&format_attr_filter_band0.attr,
617 	&format_attr_filter_band1.attr,
618 	&format_attr_filter_band2.attr,
619 	&format_attr_filter_band3.attr,
620 	NULL,
621 };
622 
623 static struct attribute *snbep_uncore_qpi_formats_attr[] = {
624 	&format_attr_event_ext.attr,
625 	&format_attr_umask.attr,
626 	&format_attr_edge.attr,
627 	&format_attr_inv.attr,
628 	&format_attr_thresh8.attr,
629 	&format_attr_match_rds.attr,
630 	&format_attr_match_rnid30.attr,
631 	&format_attr_match_rnid4.attr,
632 	&format_attr_match_dnid.attr,
633 	&format_attr_match_mc.attr,
634 	&format_attr_match_opc.attr,
635 	&format_attr_match_vnw.attr,
636 	&format_attr_match0.attr,
637 	&format_attr_match1.attr,
638 	&format_attr_mask_rds.attr,
639 	&format_attr_mask_rnid30.attr,
640 	&format_attr_mask_rnid4.attr,
641 	&format_attr_mask_dnid.attr,
642 	&format_attr_mask_mc.attr,
643 	&format_attr_mask_opc.attr,
644 	&format_attr_mask_vnw.attr,
645 	&format_attr_mask0.attr,
646 	&format_attr_mask1.attr,
647 	NULL,
648 };
649 
650 static struct uncore_event_desc snbep_uncore_imc_events[] = {
651 	INTEL_UNCORE_EVENT_DESC(clockticks,      "event=0xff,umask=0x00"),
652 	INTEL_UNCORE_EVENT_DESC(cas_count_read,  "event=0x04,umask=0x03"),
653 	INTEL_UNCORE_EVENT_DESC(cas_count_read.scale, "6.103515625e-5"),
654 	INTEL_UNCORE_EVENT_DESC(cas_count_read.unit, "MiB"),
655 	INTEL_UNCORE_EVENT_DESC(cas_count_write, "event=0x04,umask=0x0c"),
656 	INTEL_UNCORE_EVENT_DESC(cas_count_write.scale, "6.103515625e-5"),
657 	INTEL_UNCORE_EVENT_DESC(cas_count_write.unit, "MiB"),
658 	{ /* end: all zeroes */ },
659 };
660 
661 static struct uncore_event_desc snbep_uncore_qpi_events[] = {
662 	INTEL_UNCORE_EVENT_DESC(clockticks,       "event=0x14"),
663 	INTEL_UNCORE_EVENT_DESC(txl_flits_active, "event=0x00,umask=0x06"),
664 	INTEL_UNCORE_EVENT_DESC(drs_data,         "event=0x102,umask=0x08"),
665 	INTEL_UNCORE_EVENT_DESC(ncb_data,         "event=0x103,umask=0x04"),
666 	{ /* end: all zeroes */ },
667 };
668 
669 static const struct attribute_group snbep_uncore_format_group = {
670 	.name = "format",
671 	.attrs = snbep_uncore_formats_attr,
672 };
673 
674 static const struct attribute_group snbep_uncore_ubox_format_group = {
675 	.name = "format",
676 	.attrs = snbep_uncore_ubox_formats_attr,
677 };
678 
679 static const struct attribute_group snbep_uncore_cbox_format_group = {
680 	.name = "format",
681 	.attrs = snbep_uncore_cbox_formats_attr,
682 };
683 
684 static const struct attribute_group snbep_uncore_pcu_format_group = {
685 	.name = "format",
686 	.attrs = snbep_uncore_pcu_formats_attr,
687 };
688 
689 static const struct attribute_group snbep_uncore_qpi_format_group = {
690 	.name = "format",
691 	.attrs = snbep_uncore_qpi_formats_attr,
692 };
693 
694 #define __SNBEP_UNCORE_MSR_OPS_COMMON_INIT()			\
695 	.disable_box	= snbep_uncore_msr_disable_box,		\
696 	.enable_box	= snbep_uncore_msr_enable_box,		\
697 	.disable_event	= snbep_uncore_msr_disable_event,	\
698 	.enable_event	= snbep_uncore_msr_enable_event,	\
699 	.read_counter	= uncore_msr_read_counter
700 
701 #define SNBEP_UNCORE_MSR_OPS_COMMON_INIT()			\
702 	__SNBEP_UNCORE_MSR_OPS_COMMON_INIT(),			\
703 	.init_box	= snbep_uncore_msr_init_box		\
704 
705 static struct intel_uncore_ops snbep_uncore_msr_ops = {
706 	SNBEP_UNCORE_MSR_OPS_COMMON_INIT(),
707 };
708 
709 #define SNBEP_UNCORE_PCI_OPS_COMMON_INIT()			\
710 	.init_box	= snbep_uncore_pci_init_box,		\
711 	.disable_box	= snbep_uncore_pci_disable_box,		\
712 	.enable_box	= snbep_uncore_pci_enable_box,		\
713 	.disable_event	= snbep_uncore_pci_disable_event,	\
714 	.read_counter	= snbep_uncore_pci_read_counter
715 
716 static struct intel_uncore_ops snbep_uncore_pci_ops = {
717 	SNBEP_UNCORE_PCI_OPS_COMMON_INIT(),
718 	.enable_event	= snbep_uncore_pci_enable_event,	\
719 };
720 
721 static struct event_constraint snbep_uncore_cbox_constraints[] = {
722 	UNCORE_EVENT_CONSTRAINT(0x01, 0x1),
723 	UNCORE_EVENT_CONSTRAINT(0x02, 0x3),
724 	UNCORE_EVENT_CONSTRAINT(0x04, 0x3),
725 	UNCORE_EVENT_CONSTRAINT(0x05, 0x3),
726 	UNCORE_EVENT_CONSTRAINT(0x07, 0x3),
727 	UNCORE_EVENT_CONSTRAINT(0x09, 0x3),
728 	UNCORE_EVENT_CONSTRAINT(0x11, 0x1),
729 	UNCORE_EVENT_CONSTRAINT(0x12, 0x3),
730 	UNCORE_EVENT_CONSTRAINT(0x13, 0x3),
731 	UNCORE_EVENT_CONSTRAINT(0x1b, 0xc),
732 	UNCORE_EVENT_CONSTRAINT(0x1c, 0xc),
733 	UNCORE_EVENT_CONSTRAINT(0x1d, 0xc),
734 	UNCORE_EVENT_CONSTRAINT(0x1e, 0xc),
735 	UNCORE_EVENT_CONSTRAINT(0x1f, 0xe),
736 	UNCORE_EVENT_CONSTRAINT(0x21, 0x3),
737 	UNCORE_EVENT_CONSTRAINT(0x23, 0x3),
738 	UNCORE_EVENT_CONSTRAINT(0x31, 0x3),
739 	UNCORE_EVENT_CONSTRAINT(0x32, 0x3),
740 	UNCORE_EVENT_CONSTRAINT(0x33, 0x3),
741 	UNCORE_EVENT_CONSTRAINT(0x34, 0x3),
742 	UNCORE_EVENT_CONSTRAINT(0x35, 0x3),
743 	UNCORE_EVENT_CONSTRAINT(0x36, 0x1),
744 	UNCORE_EVENT_CONSTRAINT(0x37, 0x3),
745 	UNCORE_EVENT_CONSTRAINT(0x38, 0x3),
746 	UNCORE_EVENT_CONSTRAINT(0x39, 0x3),
747 	UNCORE_EVENT_CONSTRAINT(0x3b, 0x1),
748 	EVENT_CONSTRAINT_END
749 };
750 
751 static struct event_constraint snbep_uncore_r2pcie_constraints[] = {
752 	UNCORE_EVENT_CONSTRAINT(0x10, 0x3),
753 	UNCORE_EVENT_CONSTRAINT(0x11, 0x3),
754 	UNCORE_EVENT_CONSTRAINT(0x12, 0x1),
755 	UNCORE_EVENT_CONSTRAINT(0x23, 0x3),
756 	UNCORE_EVENT_CONSTRAINT(0x24, 0x3),
757 	UNCORE_EVENT_CONSTRAINT(0x25, 0x3),
758 	UNCORE_EVENT_CONSTRAINT(0x26, 0x3),
759 	UNCORE_EVENT_CONSTRAINT(0x32, 0x3),
760 	UNCORE_EVENT_CONSTRAINT(0x33, 0x3),
761 	UNCORE_EVENT_CONSTRAINT(0x34, 0x3),
762 	EVENT_CONSTRAINT_END
763 };
764 
765 static struct event_constraint snbep_uncore_r3qpi_constraints[] = {
766 	UNCORE_EVENT_CONSTRAINT(0x10, 0x3),
767 	UNCORE_EVENT_CONSTRAINT(0x11, 0x3),
768 	UNCORE_EVENT_CONSTRAINT(0x12, 0x3),
769 	UNCORE_EVENT_CONSTRAINT(0x13, 0x1),
770 	UNCORE_EVENT_CONSTRAINT(0x20, 0x3),
771 	UNCORE_EVENT_CONSTRAINT(0x21, 0x3),
772 	UNCORE_EVENT_CONSTRAINT(0x22, 0x3),
773 	UNCORE_EVENT_CONSTRAINT(0x23, 0x3),
774 	UNCORE_EVENT_CONSTRAINT(0x24, 0x3),
775 	UNCORE_EVENT_CONSTRAINT(0x25, 0x3),
776 	UNCORE_EVENT_CONSTRAINT(0x26, 0x3),
777 	UNCORE_EVENT_CONSTRAINT(0x28, 0x3),
778 	UNCORE_EVENT_CONSTRAINT(0x29, 0x3),
779 	UNCORE_EVENT_CONSTRAINT(0x2a, 0x3),
780 	UNCORE_EVENT_CONSTRAINT(0x2b, 0x3),
781 	UNCORE_EVENT_CONSTRAINT(0x2c, 0x3),
782 	UNCORE_EVENT_CONSTRAINT(0x2d, 0x3),
783 	UNCORE_EVENT_CONSTRAINT(0x2e, 0x3),
784 	UNCORE_EVENT_CONSTRAINT(0x2f, 0x3),
785 	UNCORE_EVENT_CONSTRAINT(0x30, 0x3),
786 	UNCORE_EVENT_CONSTRAINT(0x31, 0x3),
787 	UNCORE_EVENT_CONSTRAINT(0x32, 0x3),
788 	UNCORE_EVENT_CONSTRAINT(0x33, 0x3),
789 	UNCORE_EVENT_CONSTRAINT(0x34, 0x3),
790 	UNCORE_EVENT_CONSTRAINT(0x36, 0x3),
791 	UNCORE_EVENT_CONSTRAINT(0x37, 0x3),
792 	UNCORE_EVENT_CONSTRAINT(0x38, 0x3),
793 	UNCORE_EVENT_CONSTRAINT(0x39, 0x3),
794 	EVENT_CONSTRAINT_END
795 };
796 
797 static struct intel_uncore_type snbep_uncore_ubox = {
798 	.name		= "ubox",
799 	.num_counters   = 2,
800 	.num_boxes	= 1,
801 	.perf_ctr_bits	= 44,
802 	.fixed_ctr_bits	= 48,
803 	.perf_ctr	= SNBEP_U_MSR_PMON_CTR0,
804 	.event_ctl	= SNBEP_U_MSR_PMON_CTL0,
805 	.event_mask	= SNBEP_U_MSR_PMON_RAW_EVENT_MASK,
806 	.fixed_ctr	= SNBEP_U_MSR_PMON_UCLK_FIXED_CTR,
807 	.fixed_ctl	= SNBEP_U_MSR_PMON_UCLK_FIXED_CTL,
808 	.ops		= &snbep_uncore_msr_ops,
809 	.format_group	= &snbep_uncore_ubox_format_group,
810 };
811 
812 static struct extra_reg snbep_uncore_cbox_extra_regs[] = {
813 	SNBEP_CBO_EVENT_EXTRA_REG(SNBEP_CBO_PMON_CTL_TID_EN,
814 				  SNBEP_CBO_PMON_CTL_TID_EN, 0x1),
815 	SNBEP_CBO_EVENT_EXTRA_REG(0x0334, 0xffff, 0x4),
816 	SNBEP_CBO_EVENT_EXTRA_REG(0x4334, 0xffff, 0x6),
817 	SNBEP_CBO_EVENT_EXTRA_REG(0x0534, 0xffff, 0x4),
818 	SNBEP_CBO_EVENT_EXTRA_REG(0x4534, 0xffff, 0x6),
819 	SNBEP_CBO_EVENT_EXTRA_REG(0x0934, 0xffff, 0x4),
820 	SNBEP_CBO_EVENT_EXTRA_REG(0x4934, 0xffff, 0x6),
821 	SNBEP_CBO_EVENT_EXTRA_REG(0x4134, 0xffff, 0x6),
822 	SNBEP_CBO_EVENT_EXTRA_REG(0x0135, 0xffff, 0x8),
823 	SNBEP_CBO_EVENT_EXTRA_REG(0x0335, 0xffff, 0x8),
824 	SNBEP_CBO_EVENT_EXTRA_REG(0x4135, 0xffff, 0xa),
825 	SNBEP_CBO_EVENT_EXTRA_REG(0x4335, 0xffff, 0xa),
826 	SNBEP_CBO_EVENT_EXTRA_REG(0x4435, 0xffff, 0x2),
827 	SNBEP_CBO_EVENT_EXTRA_REG(0x4835, 0xffff, 0x2),
828 	SNBEP_CBO_EVENT_EXTRA_REG(0x4a35, 0xffff, 0x2),
829 	SNBEP_CBO_EVENT_EXTRA_REG(0x5035, 0xffff, 0x2),
830 	SNBEP_CBO_EVENT_EXTRA_REG(0x0136, 0xffff, 0x8),
831 	SNBEP_CBO_EVENT_EXTRA_REG(0x0336, 0xffff, 0x8),
832 	SNBEP_CBO_EVENT_EXTRA_REG(0x4136, 0xffff, 0xa),
833 	SNBEP_CBO_EVENT_EXTRA_REG(0x4336, 0xffff, 0xa),
834 	SNBEP_CBO_EVENT_EXTRA_REG(0x4436, 0xffff, 0x2),
835 	SNBEP_CBO_EVENT_EXTRA_REG(0x4836, 0xffff, 0x2),
836 	SNBEP_CBO_EVENT_EXTRA_REG(0x4a36, 0xffff, 0x2),
837 	SNBEP_CBO_EVENT_EXTRA_REG(0x4037, 0x40ff, 0x2),
838 	EVENT_EXTRA_END
839 };
840 
841 static void snbep_cbox_put_constraint(struct intel_uncore_box *box, struct perf_event *event)
842 {
843 	struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
844 	struct intel_uncore_extra_reg *er = &box->shared_regs[0];
845 	int i;
846 
847 	if (uncore_box_is_fake(box))
848 		return;
849 
850 	for (i = 0; i < 5; i++) {
851 		if (reg1->alloc & (0x1 << i))
852 			atomic_sub(1 << (i * 6), &er->ref);
853 	}
854 	reg1->alloc = 0;
855 }
856 
857 static struct event_constraint *
858 __snbep_cbox_get_constraint(struct intel_uncore_box *box, struct perf_event *event,
859 			    u64 (*cbox_filter_mask)(int fields))
860 {
861 	struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
862 	struct intel_uncore_extra_reg *er = &box->shared_regs[0];
863 	int i, alloc = 0;
864 	unsigned long flags;
865 	u64 mask;
866 
867 	if (reg1->idx == EXTRA_REG_NONE)
868 		return NULL;
869 
870 	raw_spin_lock_irqsave(&er->lock, flags);
871 	for (i = 0; i < 5; i++) {
872 		if (!(reg1->idx & (0x1 << i)))
873 			continue;
874 		if (!uncore_box_is_fake(box) && (reg1->alloc & (0x1 << i)))
875 			continue;
876 
877 		mask = cbox_filter_mask(0x1 << i);
878 		if (!__BITS_VALUE(atomic_read(&er->ref), i, 6) ||
879 		    !((reg1->config ^ er->config) & mask)) {
880 			atomic_add(1 << (i * 6), &er->ref);
881 			er->config &= ~mask;
882 			er->config |= reg1->config & mask;
883 			alloc |= (0x1 << i);
884 		} else {
885 			break;
886 		}
887 	}
888 	raw_spin_unlock_irqrestore(&er->lock, flags);
889 	if (i < 5)
890 		goto fail;
891 
892 	if (!uncore_box_is_fake(box))
893 		reg1->alloc |= alloc;
894 
895 	return NULL;
896 fail:
897 	for (; i >= 0; i--) {
898 		if (alloc & (0x1 << i))
899 			atomic_sub(1 << (i * 6), &er->ref);
900 	}
901 	return &uncore_constraint_empty;
902 }
903 
904 static u64 snbep_cbox_filter_mask(int fields)
905 {
906 	u64 mask = 0;
907 
908 	if (fields & 0x1)
909 		mask |= SNBEP_CB0_MSR_PMON_BOX_FILTER_TID;
910 	if (fields & 0x2)
911 		mask |= SNBEP_CB0_MSR_PMON_BOX_FILTER_NID;
912 	if (fields & 0x4)
913 		mask |= SNBEP_CB0_MSR_PMON_BOX_FILTER_STATE;
914 	if (fields & 0x8)
915 		mask |= SNBEP_CB0_MSR_PMON_BOX_FILTER_OPC;
916 
917 	return mask;
918 }
919 
920 static struct event_constraint *
921 snbep_cbox_get_constraint(struct intel_uncore_box *box, struct perf_event *event)
922 {
923 	return __snbep_cbox_get_constraint(box, event, snbep_cbox_filter_mask);
924 }
925 
926 static int snbep_cbox_hw_config(struct intel_uncore_box *box, struct perf_event *event)
927 {
928 	struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
929 	struct extra_reg *er;
930 	int idx = 0;
931 
932 	for (er = snbep_uncore_cbox_extra_regs; er->msr; er++) {
933 		if (er->event != (event->hw.config & er->config_mask))
934 			continue;
935 		idx |= er->idx;
936 	}
937 
938 	if (idx) {
939 		reg1->reg = SNBEP_C0_MSR_PMON_BOX_FILTER +
940 			SNBEP_CBO_MSR_OFFSET * box->pmu->pmu_idx;
941 		reg1->config = event->attr.config1 & snbep_cbox_filter_mask(idx);
942 		reg1->idx = idx;
943 	}
944 	return 0;
945 }
946 
947 static struct intel_uncore_ops snbep_uncore_cbox_ops = {
948 	SNBEP_UNCORE_MSR_OPS_COMMON_INIT(),
949 	.hw_config		= snbep_cbox_hw_config,
950 	.get_constraint		= snbep_cbox_get_constraint,
951 	.put_constraint		= snbep_cbox_put_constraint,
952 };
953 
954 static struct intel_uncore_type snbep_uncore_cbox = {
955 	.name			= "cbox",
956 	.num_counters		= 4,
957 	.num_boxes		= 8,
958 	.perf_ctr_bits		= 44,
959 	.event_ctl		= SNBEP_C0_MSR_PMON_CTL0,
960 	.perf_ctr		= SNBEP_C0_MSR_PMON_CTR0,
961 	.event_mask		= SNBEP_CBO_MSR_PMON_RAW_EVENT_MASK,
962 	.box_ctl		= SNBEP_C0_MSR_PMON_BOX_CTL,
963 	.msr_offset		= SNBEP_CBO_MSR_OFFSET,
964 	.num_shared_regs	= 1,
965 	.constraints		= snbep_uncore_cbox_constraints,
966 	.ops			= &snbep_uncore_cbox_ops,
967 	.format_group		= &snbep_uncore_cbox_format_group,
968 };
969 
970 static u64 snbep_pcu_alter_er(struct perf_event *event, int new_idx, bool modify)
971 {
972 	struct hw_perf_event *hwc = &event->hw;
973 	struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
974 	u64 config = reg1->config;
975 
976 	if (new_idx > reg1->idx)
977 		config <<= 8 * (new_idx - reg1->idx);
978 	else
979 		config >>= 8 * (reg1->idx - new_idx);
980 
981 	if (modify) {
982 		hwc->config += new_idx - reg1->idx;
983 		reg1->config = config;
984 		reg1->idx = new_idx;
985 	}
986 	return config;
987 }
988 
989 static struct event_constraint *
990 snbep_pcu_get_constraint(struct intel_uncore_box *box, struct perf_event *event)
991 {
992 	struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
993 	struct intel_uncore_extra_reg *er = &box->shared_regs[0];
994 	unsigned long flags;
995 	int idx = reg1->idx;
996 	u64 mask, config1 = reg1->config;
997 	bool ok = false;
998 
999 	if (reg1->idx == EXTRA_REG_NONE ||
1000 	    (!uncore_box_is_fake(box) && reg1->alloc))
1001 		return NULL;
1002 again:
1003 	mask = 0xffULL << (idx * 8);
1004 	raw_spin_lock_irqsave(&er->lock, flags);
1005 	if (!__BITS_VALUE(atomic_read(&er->ref), idx, 8) ||
1006 	    !((config1 ^ er->config) & mask)) {
1007 		atomic_add(1 << (idx * 8), &er->ref);
1008 		er->config &= ~mask;
1009 		er->config |= config1 & mask;
1010 		ok = true;
1011 	}
1012 	raw_spin_unlock_irqrestore(&er->lock, flags);
1013 
1014 	if (!ok) {
1015 		idx = (idx + 1) % 4;
1016 		if (idx != reg1->idx) {
1017 			config1 = snbep_pcu_alter_er(event, idx, false);
1018 			goto again;
1019 		}
1020 		return &uncore_constraint_empty;
1021 	}
1022 
1023 	if (!uncore_box_is_fake(box)) {
1024 		if (idx != reg1->idx)
1025 			snbep_pcu_alter_er(event, idx, true);
1026 		reg1->alloc = 1;
1027 	}
1028 	return NULL;
1029 }
1030 
1031 static void snbep_pcu_put_constraint(struct intel_uncore_box *box, struct perf_event *event)
1032 {
1033 	struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
1034 	struct intel_uncore_extra_reg *er = &box->shared_regs[0];
1035 
1036 	if (uncore_box_is_fake(box) || !reg1->alloc)
1037 		return;
1038 
1039 	atomic_sub(1 << (reg1->idx * 8), &er->ref);
1040 	reg1->alloc = 0;
1041 }
1042 
1043 static int snbep_pcu_hw_config(struct intel_uncore_box *box, struct perf_event *event)
1044 {
1045 	struct hw_perf_event *hwc = &event->hw;
1046 	struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
1047 	int ev_sel = hwc->config & SNBEP_PMON_CTL_EV_SEL_MASK;
1048 
1049 	if (ev_sel >= 0xb && ev_sel <= 0xe) {
1050 		reg1->reg = SNBEP_PCU_MSR_PMON_BOX_FILTER;
1051 		reg1->idx = ev_sel - 0xb;
1052 		reg1->config = event->attr.config1 & (0xff << (reg1->idx * 8));
1053 	}
1054 	return 0;
1055 }
1056 
1057 static struct intel_uncore_ops snbep_uncore_pcu_ops = {
1058 	SNBEP_UNCORE_MSR_OPS_COMMON_INIT(),
1059 	.hw_config		= snbep_pcu_hw_config,
1060 	.get_constraint		= snbep_pcu_get_constraint,
1061 	.put_constraint		= snbep_pcu_put_constraint,
1062 };
1063 
1064 static struct intel_uncore_type snbep_uncore_pcu = {
1065 	.name			= "pcu",
1066 	.num_counters		= 4,
1067 	.num_boxes		= 1,
1068 	.perf_ctr_bits		= 48,
1069 	.perf_ctr		= SNBEP_PCU_MSR_PMON_CTR0,
1070 	.event_ctl		= SNBEP_PCU_MSR_PMON_CTL0,
1071 	.event_mask		= SNBEP_PCU_MSR_PMON_RAW_EVENT_MASK,
1072 	.box_ctl		= SNBEP_PCU_MSR_PMON_BOX_CTL,
1073 	.num_shared_regs	= 1,
1074 	.ops			= &snbep_uncore_pcu_ops,
1075 	.format_group		= &snbep_uncore_pcu_format_group,
1076 };
1077 
1078 static struct intel_uncore_type *snbep_msr_uncores[] = {
1079 	&snbep_uncore_ubox,
1080 	&snbep_uncore_cbox,
1081 	&snbep_uncore_pcu,
1082 	NULL,
1083 };
1084 
1085 void snbep_uncore_cpu_init(void)
1086 {
1087 	if (snbep_uncore_cbox.num_boxes > boot_cpu_data.x86_max_cores)
1088 		snbep_uncore_cbox.num_boxes = boot_cpu_data.x86_max_cores;
1089 	uncore_msr_uncores = snbep_msr_uncores;
1090 }
1091 
1092 enum {
1093 	SNBEP_PCI_QPI_PORT0_FILTER,
1094 	SNBEP_PCI_QPI_PORT1_FILTER,
1095 	BDX_PCI_QPI_PORT2_FILTER,
1096 	HSWEP_PCI_PCU_3,
1097 };
1098 
1099 static int snbep_qpi_hw_config(struct intel_uncore_box *box, struct perf_event *event)
1100 {
1101 	struct hw_perf_event *hwc = &event->hw;
1102 	struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
1103 	struct hw_perf_event_extra *reg2 = &hwc->branch_reg;
1104 
1105 	if ((hwc->config & SNBEP_PMON_CTL_EV_SEL_MASK) == 0x38) {
1106 		reg1->idx = 0;
1107 		reg1->reg = SNBEP_Q_Py_PCI_PMON_PKT_MATCH0;
1108 		reg1->config = event->attr.config1;
1109 		reg2->reg = SNBEP_Q_Py_PCI_PMON_PKT_MASK0;
1110 		reg2->config = event->attr.config2;
1111 	}
1112 	return 0;
1113 }
1114 
1115 static void snbep_qpi_enable_event(struct intel_uncore_box *box, struct perf_event *event)
1116 {
1117 	struct pci_dev *pdev = box->pci_dev;
1118 	struct hw_perf_event *hwc = &event->hw;
1119 	struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
1120 	struct hw_perf_event_extra *reg2 = &hwc->branch_reg;
1121 
1122 	if (reg1->idx != EXTRA_REG_NONE) {
1123 		int idx = box->pmu->pmu_idx + SNBEP_PCI_QPI_PORT0_FILTER;
1124 		int die = box->dieid;
1125 		struct pci_dev *filter_pdev = uncore_extra_pci_dev[die].dev[idx];
1126 
1127 		if (filter_pdev) {
1128 			pci_write_config_dword(filter_pdev, reg1->reg,
1129 						(u32)reg1->config);
1130 			pci_write_config_dword(filter_pdev, reg1->reg + 4,
1131 						(u32)(reg1->config >> 32));
1132 			pci_write_config_dword(filter_pdev, reg2->reg,
1133 						(u32)reg2->config);
1134 			pci_write_config_dword(filter_pdev, reg2->reg + 4,
1135 						(u32)(reg2->config >> 32));
1136 		}
1137 	}
1138 
1139 	pci_write_config_dword(pdev, hwc->config_base, hwc->config | SNBEP_PMON_CTL_EN);
1140 }
1141 
1142 static struct intel_uncore_ops snbep_uncore_qpi_ops = {
1143 	SNBEP_UNCORE_PCI_OPS_COMMON_INIT(),
1144 	.enable_event		= snbep_qpi_enable_event,
1145 	.hw_config		= snbep_qpi_hw_config,
1146 	.get_constraint		= uncore_get_constraint,
1147 	.put_constraint		= uncore_put_constraint,
1148 };
1149 
1150 #define SNBEP_UNCORE_PCI_COMMON_INIT()				\
1151 	.perf_ctr	= SNBEP_PCI_PMON_CTR0,			\
1152 	.event_ctl	= SNBEP_PCI_PMON_CTL0,			\
1153 	.event_mask	= SNBEP_PMON_RAW_EVENT_MASK,		\
1154 	.box_ctl	= SNBEP_PCI_PMON_BOX_CTL,		\
1155 	.ops		= &snbep_uncore_pci_ops,		\
1156 	.format_group	= &snbep_uncore_format_group
1157 
1158 static struct intel_uncore_type snbep_uncore_ha = {
1159 	.name		= "ha",
1160 	.num_counters   = 4,
1161 	.num_boxes	= 1,
1162 	.perf_ctr_bits	= 48,
1163 	SNBEP_UNCORE_PCI_COMMON_INIT(),
1164 };
1165 
1166 static struct intel_uncore_type snbep_uncore_imc = {
1167 	.name		= "imc",
1168 	.num_counters   = 4,
1169 	.num_boxes	= 4,
1170 	.perf_ctr_bits	= 48,
1171 	.fixed_ctr_bits	= 48,
1172 	.fixed_ctr	= SNBEP_MC_CHy_PCI_PMON_FIXED_CTR,
1173 	.fixed_ctl	= SNBEP_MC_CHy_PCI_PMON_FIXED_CTL,
1174 	.event_descs	= snbep_uncore_imc_events,
1175 	SNBEP_UNCORE_PCI_COMMON_INIT(),
1176 };
1177 
1178 static struct intel_uncore_type snbep_uncore_qpi = {
1179 	.name			= "qpi",
1180 	.num_counters		= 4,
1181 	.num_boxes		= 2,
1182 	.perf_ctr_bits		= 48,
1183 	.perf_ctr		= SNBEP_PCI_PMON_CTR0,
1184 	.event_ctl		= SNBEP_PCI_PMON_CTL0,
1185 	.event_mask		= SNBEP_QPI_PCI_PMON_RAW_EVENT_MASK,
1186 	.box_ctl		= SNBEP_PCI_PMON_BOX_CTL,
1187 	.num_shared_regs	= 1,
1188 	.ops			= &snbep_uncore_qpi_ops,
1189 	.event_descs		= snbep_uncore_qpi_events,
1190 	.format_group		= &snbep_uncore_qpi_format_group,
1191 };
1192 
1193 
1194 static struct intel_uncore_type snbep_uncore_r2pcie = {
1195 	.name		= "r2pcie",
1196 	.num_counters   = 4,
1197 	.num_boxes	= 1,
1198 	.perf_ctr_bits	= 44,
1199 	.constraints	= snbep_uncore_r2pcie_constraints,
1200 	SNBEP_UNCORE_PCI_COMMON_INIT(),
1201 };
1202 
1203 static struct intel_uncore_type snbep_uncore_r3qpi = {
1204 	.name		= "r3qpi",
1205 	.num_counters   = 3,
1206 	.num_boxes	= 2,
1207 	.perf_ctr_bits	= 44,
1208 	.constraints	= snbep_uncore_r3qpi_constraints,
1209 	SNBEP_UNCORE_PCI_COMMON_INIT(),
1210 };
1211 
1212 enum {
1213 	SNBEP_PCI_UNCORE_HA,
1214 	SNBEP_PCI_UNCORE_IMC,
1215 	SNBEP_PCI_UNCORE_QPI,
1216 	SNBEP_PCI_UNCORE_R2PCIE,
1217 	SNBEP_PCI_UNCORE_R3QPI,
1218 };
1219 
1220 static struct intel_uncore_type *snbep_pci_uncores[] = {
1221 	[SNBEP_PCI_UNCORE_HA]		= &snbep_uncore_ha,
1222 	[SNBEP_PCI_UNCORE_IMC]		= &snbep_uncore_imc,
1223 	[SNBEP_PCI_UNCORE_QPI]		= &snbep_uncore_qpi,
1224 	[SNBEP_PCI_UNCORE_R2PCIE]	= &snbep_uncore_r2pcie,
1225 	[SNBEP_PCI_UNCORE_R3QPI]	= &snbep_uncore_r3qpi,
1226 	NULL,
1227 };
1228 
1229 static const struct pci_device_id snbep_uncore_pci_ids[] = {
1230 	{ /* Home Agent */
1231 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_HA),
1232 		.driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_HA, 0),
1233 	},
1234 	{ /* MC Channel 0 */
1235 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_IMC0),
1236 		.driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_IMC, 0),
1237 	},
1238 	{ /* MC Channel 1 */
1239 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_IMC1),
1240 		.driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_IMC, 1),
1241 	},
1242 	{ /* MC Channel 2 */
1243 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_IMC2),
1244 		.driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_IMC, 2),
1245 	},
1246 	{ /* MC Channel 3 */
1247 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_IMC3),
1248 		.driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_IMC, 3),
1249 	},
1250 	{ /* QPI Port 0 */
1251 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_QPI0),
1252 		.driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_QPI, 0),
1253 	},
1254 	{ /* QPI Port 1 */
1255 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_QPI1),
1256 		.driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_QPI, 1),
1257 	},
1258 	{ /* R2PCIe */
1259 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_R2PCIE),
1260 		.driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_R2PCIE, 0),
1261 	},
1262 	{ /* R3QPI Link 0 */
1263 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_R3QPI0),
1264 		.driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_R3QPI, 0),
1265 	},
1266 	{ /* R3QPI Link 1 */
1267 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_R3QPI1),
1268 		.driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_R3QPI, 1),
1269 	},
1270 	{ /* QPI Port 0 filter  */
1271 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x3c86),
1272 		.driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV,
1273 						   SNBEP_PCI_QPI_PORT0_FILTER),
1274 	},
1275 	{ /* QPI Port 0 filter  */
1276 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x3c96),
1277 		.driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV,
1278 						   SNBEP_PCI_QPI_PORT1_FILTER),
1279 	},
1280 	{ /* end: all zeroes */ }
1281 };
1282 
1283 static struct pci_driver snbep_uncore_pci_driver = {
1284 	.name		= "snbep_uncore",
1285 	.id_table	= snbep_uncore_pci_ids,
1286 };
1287 
1288 #define NODE_ID_MASK	0x7
1289 
1290 /*
1291  * build pci bus to socket mapping
1292  */
1293 static int snbep_pci2phy_map_init(int devid, int nodeid_loc, int idmap_loc, bool reverse)
1294 {
1295 	struct pci_dev *ubox_dev = NULL;
1296 	int i, bus, nodeid, segment;
1297 	struct pci2phy_map *map;
1298 	int err = 0;
1299 	u32 config = 0;
1300 
1301 	while (1) {
1302 		/* find the UBOX device */
1303 		ubox_dev = pci_get_device(PCI_VENDOR_ID_INTEL, devid, ubox_dev);
1304 		if (!ubox_dev)
1305 			break;
1306 		bus = ubox_dev->bus->number;
1307 		/* get the Node ID of the local register */
1308 		err = pci_read_config_dword(ubox_dev, nodeid_loc, &config);
1309 		if (err)
1310 			break;
1311 		nodeid = config & NODE_ID_MASK;
1312 		/* get the Node ID mapping */
1313 		err = pci_read_config_dword(ubox_dev, idmap_loc, &config);
1314 		if (err)
1315 			break;
1316 
1317 		segment = pci_domain_nr(ubox_dev->bus);
1318 		raw_spin_lock(&pci2phy_map_lock);
1319 		map = __find_pci2phy_map(segment);
1320 		if (!map) {
1321 			raw_spin_unlock(&pci2phy_map_lock);
1322 			err = -ENOMEM;
1323 			break;
1324 		}
1325 
1326 		/*
1327 		 * every three bits in the Node ID mapping register maps
1328 		 * to a particular node.
1329 		 */
1330 		for (i = 0; i < 8; i++) {
1331 			if (nodeid == ((config >> (3 * i)) & 0x7)) {
1332 				map->pbus_to_physid[bus] = i;
1333 				break;
1334 			}
1335 		}
1336 		raw_spin_unlock(&pci2phy_map_lock);
1337 	}
1338 
1339 	if (!err) {
1340 		/*
1341 		 * For PCI bus with no UBOX device, find the next bus
1342 		 * that has UBOX device and use its mapping.
1343 		 */
1344 		raw_spin_lock(&pci2phy_map_lock);
1345 		list_for_each_entry(map, &pci2phy_map_head, list) {
1346 			i = -1;
1347 			if (reverse) {
1348 				for (bus = 255; bus >= 0; bus--) {
1349 					if (map->pbus_to_physid[bus] >= 0)
1350 						i = map->pbus_to_physid[bus];
1351 					else
1352 						map->pbus_to_physid[bus] = i;
1353 				}
1354 			} else {
1355 				for (bus = 0; bus <= 255; bus++) {
1356 					if (map->pbus_to_physid[bus] >= 0)
1357 						i = map->pbus_to_physid[bus];
1358 					else
1359 						map->pbus_to_physid[bus] = i;
1360 				}
1361 			}
1362 		}
1363 		raw_spin_unlock(&pci2phy_map_lock);
1364 	}
1365 
1366 	pci_dev_put(ubox_dev);
1367 
1368 	return err ? pcibios_err_to_errno(err) : 0;
1369 }
1370 
1371 int snbep_uncore_pci_init(void)
1372 {
1373 	int ret = snbep_pci2phy_map_init(0x3ce0, SNBEP_CPUNODEID, SNBEP_GIDNIDMAP, true);
1374 	if (ret)
1375 		return ret;
1376 	uncore_pci_uncores = snbep_pci_uncores;
1377 	uncore_pci_driver = &snbep_uncore_pci_driver;
1378 	return 0;
1379 }
1380 /* end of Sandy Bridge-EP uncore support */
1381 
1382 /* IvyTown uncore support */
1383 static void ivbep_uncore_msr_init_box(struct intel_uncore_box *box)
1384 {
1385 	unsigned msr = uncore_msr_box_ctl(box);
1386 	if (msr)
1387 		wrmsrl(msr, IVBEP_PMON_BOX_CTL_INT);
1388 }
1389 
1390 static void ivbep_uncore_pci_init_box(struct intel_uncore_box *box)
1391 {
1392 	struct pci_dev *pdev = box->pci_dev;
1393 
1394 	pci_write_config_dword(pdev, SNBEP_PCI_PMON_BOX_CTL, IVBEP_PMON_BOX_CTL_INT);
1395 }
1396 
1397 #define IVBEP_UNCORE_MSR_OPS_COMMON_INIT()			\
1398 	.init_box	= ivbep_uncore_msr_init_box,		\
1399 	.disable_box	= snbep_uncore_msr_disable_box,		\
1400 	.enable_box	= snbep_uncore_msr_enable_box,		\
1401 	.disable_event	= snbep_uncore_msr_disable_event,	\
1402 	.enable_event	= snbep_uncore_msr_enable_event,	\
1403 	.read_counter	= uncore_msr_read_counter
1404 
1405 static struct intel_uncore_ops ivbep_uncore_msr_ops = {
1406 	IVBEP_UNCORE_MSR_OPS_COMMON_INIT(),
1407 };
1408 
1409 static struct intel_uncore_ops ivbep_uncore_pci_ops = {
1410 	.init_box	= ivbep_uncore_pci_init_box,
1411 	.disable_box	= snbep_uncore_pci_disable_box,
1412 	.enable_box	= snbep_uncore_pci_enable_box,
1413 	.disable_event	= snbep_uncore_pci_disable_event,
1414 	.enable_event	= snbep_uncore_pci_enable_event,
1415 	.read_counter	= snbep_uncore_pci_read_counter,
1416 };
1417 
1418 #define IVBEP_UNCORE_PCI_COMMON_INIT()				\
1419 	.perf_ctr	= SNBEP_PCI_PMON_CTR0,			\
1420 	.event_ctl	= SNBEP_PCI_PMON_CTL0,			\
1421 	.event_mask	= IVBEP_PMON_RAW_EVENT_MASK,		\
1422 	.box_ctl	= SNBEP_PCI_PMON_BOX_CTL,		\
1423 	.ops		= &ivbep_uncore_pci_ops,			\
1424 	.format_group	= &ivbep_uncore_format_group
1425 
1426 static struct attribute *ivbep_uncore_formats_attr[] = {
1427 	&format_attr_event.attr,
1428 	&format_attr_umask.attr,
1429 	&format_attr_edge.attr,
1430 	&format_attr_inv.attr,
1431 	&format_attr_thresh8.attr,
1432 	NULL,
1433 };
1434 
1435 static struct attribute *ivbep_uncore_ubox_formats_attr[] = {
1436 	&format_attr_event.attr,
1437 	&format_attr_umask.attr,
1438 	&format_attr_edge.attr,
1439 	&format_attr_inv.attr,
1440 	&format_attr_thresh5.attr,
1441 	NULL,
1442 };
1443 
1444 static struct attribute *ivbep_uncore_cbox_formats_attr[] = {
1445 	&format_attr_event.attr,
1446 	&format_attr_umask.attr,
1447 	&format_attr_edge.attr,
1448 	&format_attr_tid_en.attr,
1449 	&format_attr_thresh8.attr,
1450 	&format_attr_filter_tid.attr,
1451 	&format_attr_filter_link.attr,
1452 	&format_attr_filter_state2.attr,
1453 	&format_attr_filter_nid2.attr,
1454 	&format_attr_filter_opc2.attr,
1455 	&format_attr_filter_nc.attr,
1456 	&format_attr_filter_c6.attr,
1457 	&format_attr_filter_isoc.attr,
1458 	NULL,
1459 };
1460 
1461 static struct attribute *ivbep_uncore_pcu_formats_attr[] = {
1462 	&format_attr_event.attr,
1463 	&format_attr_occ_sel.attr,
1464 	&format_attr_edge.attr,
1465 	&format_attr_thresh5.attr,
1466 	&format_attr_occ_invert.attr,
1467 	&format_attr_occ_edge.attr,
1468 	&format_attr_filter_band0.attr,
1469 	&format_attr_filter_band1.attr,
1470 	&format_attr_filter_band2.attr,
1471 	&format_attr_filter_band3.attr,
1472 	NULL,
1473 };
1474 
1475 static struct attribute *ivbep_uncore_qpi_formats_attr[] = {
1476 	&format_attr_event_ext.attr,
1477 	&format_attr_umask.attr,
1478 	&format_attr_edge.attr,
1479 	&format_attr_thresh8.attr,
1480 	&format_attr_match_rds.attr,
1481 	&format_attr_match_rnid30.attr,
1482 	&format_attr_match_rnid4.attr,
1483 	&format_attr_match_dnid.attr,
1484 	&format_attr_match_mc.attr,
1485 	&format_attr_match_opc.attr,
1486 	&format_attr_match_vnw.attr,
1487 	&format_attr_match0.attr,
1488 	&format_attr_match1.attr,
1489 	&format_attr_mask_rds.attr,
1490 	&format_attr_mask_rnid30.attr,
1491 	&format_attr_mask_rnid4.attr,
1492 	&format_attr_mask_dnid.attr,
1493 	&format_attr_mask_mc.attr,
1494 	&format_attr_mask_opc.attr,
1495 	&format_attr_mask_vnw.attr,
1496 	&format_attr_mask0.attr,
1497 	&format_attr_mask1.attr,
1498 	NULL,
1499 };
1500 
1501 static const struct attribute_group ivbep_uncore_format_group = {
1502 	.name = "format",
1503 	.attrs = ivbep_uncore_formats_attr,
1504 };
1505 
1506 static const struct attribute_group ivbep_uncore_ubox_format_group = {
1507 	.name = "format",
1508 	.attrs = ivbep_uncore_ubox_formats_attr,
1509 };
1510 
1511 static const struct attribute_group ivbep_uncore_cbox_format_group = {
1512 	.name = "format",
1513 	.attrs = ivbep_uncore_cbox_formats_attr,
1514 };
1515 
1516 static const struct attribute_group ivbep_uncore_pcu_format_group = {
1517 	.name = "format",
1518 	.attrs = ivbep_uncore_pcu_formats_attr,
1519 };
1520 
1521 static const struct attribute_group ivbep_uncore_qpi_format_group = {
1522 	.name = "format",
1523 	.attrs = ivbep_uncore_qpi_formats_attr,
1524 };
1525 
1526 static struct intel_uncore_type ivbep_uncore_ubox = {
1527 	.name		= "ubox",
1528 	.num_counters   = 2,
1529 	.num_boxes	= 1,
1530 	.perf_ctr_bits	= 44,
1531 	.fixed_ctr_bits	= 48,
1532 	.perf_ctr	= SNBEP_U_MSR_PMON_CTR0,
1533 	.event_ctl	= SNBEP_U_MSR_PMON_CTL0,
1534 	.event_mask	= IVBEP_U_MSR_PMON_RAW_EVENT_MASK,
1535 	.fixed_ctr	= SNBEP_U_MSR_PMON_UCLK_FIXED_CTR,
1536 	.fixed_ctl	= SNBEP_U_MSR_PMON_UCLK_FIXED_CTL,
1537 	.ops		= &ivbep_uncore_msr_ops,
1538 	.format_group	= &ivbep_uncore_ubox_format_group,
1539 };
1540 
1541 static struct extra_reg ivbep_uncore_cbox_extra_regs[] = {
1542 	SNBEP_CBO_EVENT_EXTRA_REG(SNBEP_CBO_PMON_CTL_TID_EN,
1543 				  SNBEP_CBO_PMON_CTL_TID_EN, 0x1),
1544 	SNBEP_CBO_EVENT_EXTRA_REG(0x1031, 0x10ff, 0x2),
1545 	SNBEP_CBO_EVENT_EXTRA_REG(0x1134, 0xffff, 0x4),
1546 	SNBEP_CBO_EVENT_EXTRA_REG(0x4134, 0xffff, 0xc),
1547 	SNBEP_CBO_EVENT_EXTRA_REG(0x5134, 0xffff, 0xc),
1548 	SNBEP_CBO_EVENT_EXTRA_REG(0x0334, 0xffff, 0x4),
1549 	SNBEP_CBO_EVENT_EXTRA_REG(0x4334, 0xffff, 0xc),
1550 	SNBEP_CBO_EVENT_EXTRA_REG(0x0534, 0xffff, 0x4),
1551 	SNBEP_CBO_EVENT_EXTRA_REG(0x4534, 0xffff, 0xc),
1552 	SNBEP_CBO_EVENT_EXTRA_REG(0x0934, 0xffff, 0x4),
1553 	SNBEP_CBO_EVENT_EXTRA_REG(0x4934, 0xffff, 0xc),
1554 	SNBEP_CBO_EVENT_EXTRA_REG(0x0135, 0xffff, 0x10),
1555 	SNBEP_CBO_EVENT_EXTRA_REG(0x0335, 0xffff, 0x10),
1556 	SNBEP_CBO_EVENT_EXTRA_REG(0x2135, 0xffff, 0x10),
1557 	SNBEP_CBO_EVENT_EXTRA_REG(0x2335, 0xffff, 0x10),
1558 	SNBEP_CBO_EVENT_EXTRA_REG(0x4135, 0xffff, 0x18),
1559 	SNBEP_CBO_EVENT_EXTRA_REG(0x4335, 0xffff, 0x18),
1560 	SNBEP_CBO_EVENT_EXTRA_REG(0x4435, 0xffff, 0x8),
1561 	SNBEP_CBO_EVENT_EXTRA_REG(0x4835, 0xffff, 0x8),
1562 	SNBEP_CBO_EVENT_EXTRA_REG(0x4a35, 0xffff, 0x8),
1563 	SNBEP_CBO_EVENT_EXTRA_REG(0x5035, 0xffff, 0x8),
1564 	SNBEP_CBO_EVENT_EXTRA_REG(0x8135, 0xffff, 0x10),
1565 	SNBEP_CBO_EVENT_EXTRA_REG(0x8335, 0xffff, 0x10),
1566 	SNBEP_CBO_EVENT_EXTRA_REG(0x0136, 0xffff, 0x10),
1567 	SNBEP_CBO_EVENT_EXTRA_REG(0x0336, 0xffff, 0x10),
1568 	SNBEP_CBO_EVENT_EXTRA_REG(0x2136, 0xffff, 0x10),
1569 	SNBEP_CBO_EVENT_EXTRA_REG(0x2336, 0xffff, 0x10),
1570 	SNBEP_CBO_EVENT_EXTRA_REG(0x4136, 0xffff, 0x18),
1571 	SNBEP_CBO_EVENT_EXTRA_REG(0x4336, 0xffff, 0x18),
1572 	SNBEP_CBO_EVENT_EXTRA_REG(0x4436, 0xffff, 0x8),
1573 	SNBEP_CBO_EVENT_EXTRA_REG(0x4836, 0xffff, 0x8),
1574 	SNBEP_CBO_EVENT_EXTRA_REG(0x4a36, 0xffff, 0x8),
1575 	SNBEP_CBO_EVENT_EXTRA_REG(0x5036, 0xffff, 0x8),
1576 	SNBEP_CBO_EVENT_EXTRA_REG(0x8136, 0xffff, 0x10),
1577 	SNBEP_CBO_EVENT_EXTRA_REG(0x8336, 0xffff, 0x10),
1578 	SNBEP_CBO_EVENT_EXTRA_REG(0x4037, 0x40ff, 0x8),
1579 	EVENT_EXTRA_END
1580 };
1581 
1582 static u64 ivbep_cbox_filter_mask(int fields)
1583 {
1584 	u64 mask = 0;
1585 
1586 	if (fields & 0x1)
1587 		mask |= IVBEP_CB0_MSR_PMON_BOX_FILTER_TID;
1588 	if (fields & 0x2)
1589 		mask |= IVBEP_CB0_MSR_PMON_BOX_FILTER_LINK;
1590 	if (fields & 0x4)
1591 		mask |= IVBEP_CB0_MSR_PMON_BOX_FILTER_STATE;
1592 	if (fields & 0x8)
1593 		mask |= IVBEP_CB0_MSR_PMON_BOX_FILTER_NID;
1594 	if (fields & 0x10) {
1595 		mask |= IVBEP_CB0_MSR_PMON_BOX_FILTER_OPC;
1596 		mask |= IVBEP_CB0_MSR_PMON_BOX_FILTER_NC;
1597 		mask |= IVBEP_CB0_MSR_PMON_BOX_FILTER_C6;
1598 		mask |= IVBEP_CB0_MSR_PMON_BOX_FILTER_ISOC;
1599 	}
1600 
1601 	return mask;
1602 }
1603 
1604 static struct event_constraint *
1605 ivbep_cbox_get_constraint(struct intel_uncore_box *box, struct perf_event *event)
1606 {
1607 	return __snbep_cbox_get_constraint(box, event, ivbep_cbox_filter_mask);
1608 }
1609 
1610 static int ivbep_cbox_hw_config(struct intel_uncore_box *box, struct perf_event *event)
1611 {
1612 	struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
1613 	struct extra_reg *er;
1614 	int idx = 0;
1615 
1616 	for (er = ivbep_uncore_cbox_extra_regs; er->msr; er++) {
1617 		if (er->event != (event->hw.config & er->config_mask))
1618 			continue;
1619 		idx |= er->idx;
1620 	}
1621 
1622 	if (idx) {
1623 		reg1->reg = SNBEP_C0_MSR_PMON_BOX_FILTER +
1624 			SNBEP_CBO_MSR_OFFSET * box->pmu->pmu_idx;
1625 		reg1->config = event->attr.config1 & ivbep_cbox_filter_mask(idx);
1626 		reg1->idx = idx;
1627 	}
1628 	return 0;
1629 }
1630 
1631 static void ivbep_cbox_enable_event(struct intel_uncore_box *box, struct perf_event *event)
1632 {
1633 	struct hw_perf_event *hwc = &event->hw;
1634 	struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
1635 
1636 	if (reg1->idx != EXTRA_REG_NONE) {
1637 		u64 filter = uncore_shared_reg_config(box, 0);
1638 		wrmsrl(reg1->reg, filter & 0xffffffff);
1639 		wrmsrl(reg1->reg + 6, filter >> 32);
1640 	}
1641 
1642 	wrmsrl(hwc->config_base, hwc->config | SNBEP_PMON_CTL_EN);
1643 }
1644 
1645 static struct intel_uncore_ops ivbep_uncore_cbox_ops = {
1646 	.init_box		= ivbep_uncore_msr_init_box,
1647 	.disable_box		= snbep_uncore_msr_disable_box,
1648 	.enable_box		= snbep_uncore_msr_enable_box,
1649 	.disable_event		= snbep_uncore_msr_disable_event,
1650 	.enable_event		= ivbep_cbox_enable_event,
1651 	.read_counter		= uncore_msr_read_counter,
1652 	.hw_config		= ivbep_cbox_hw_config,
1653 	.get_constraint		= ivbep_cbox_get_constraint,
1654 	.put_constraint		= snbep_cbox_put_constraint,
1655 };
1656 
1657 static struct intel_uncore_type ivbep_uncore_cbox = {
1658 	.name			= "cbox",
1659 	.num_counters		= 4,
1660 	.num_boxes		= 15,
1661 	.perf_ctr_bits		= 44,
1662 	.event_ctl		= SNBEP_C0_MSR_PMON_CTL0,
1663 	.perf_ctr		= SNBEP_C0_MSR_PMON_CTR0,
1664 	.event_mask		= IVBEP_CBO_MSR_PMON_RAW_EVENT_MASK,
1665 	.box_ctl		= SNBEP_C0_MSR_PMON_BOX_CTL,
1666 	.msr_offset		= SNBEP_CBO_MSR_OFFSET,
1667 	.num_shared_regs	= 1,
1668 	.constraints		= snbep_uncore_cbox_constraints,
1669 	.ops			= &ivbep_uncore_cbox_ops,
1670 	.format_group		= &ivbep_uncore_cbox_format_group,
1671 };
1672 
1673 static struct intel_uncore_ops ivbep_uncore_pcu_ops = {
1674 	IVBEP_UNCORE_MSR_OPS_COMMON_INIT(),
1675 	.hw_config		= snbep_pcu_hw_config,
1676 	.get_constraint		= snbep_pcu_get_constraint,
1677 	.put_constraint		= snbep_pcu_put_constraint,
1678 };
1679 
1680 static struct intel_uncore_type ivbep_uncore_pcu = {
1681 	.name			= "pcu",
1682 	.num_counters		= 4,
1683 	.num_boxes		= 1,
1684 	.perf_ctr_bits		= 48,
1685 	.perf_ctr		= SNBEP_PCU_MSR_PMON_CTR0,
1686 	.event_ctl		= SNBEP_PCU_MSR_PMON_CTL0,
1687 	.event_mask		= IVBEP_PCU_MSR_PMON_RAW_EVENT_MASK,
1688 	.box_ctl		= SNBEP_PCU_MSR_PMON_BOX_CTL,
1689 	.num_shared_regs	= 1,
1690 	.ops			= &ivbep_uncore_pcu_ops,
1691 	.format_group		= &ivbep_uncore_pcu_format_group,
1692 };
1693 
1694 static struct intel_uncore_type *ivbep_msr_uncores[] = {
1695 	&ivbep_uncore_ubox,
1696 	&ivbep_uncore_cbox,
1697 	&ivbep_uncore_pcu,
1698 	NULL,
1699 };
1700 
1701 void ivbep_uncore_cpu_init(void)
1702 {
1703 	if (ivbep_uncore_cbox.num_boxes > boot_cpu_data.x86_max_cores)
1704 		ivbep_uncore_cbox.num_boxes = boot_cpu_data.x86_max_cores;
1705 	uncore_msr_uncores = ivbep_msr_uncores;
1706 }
1707 
1708 static struct intel_uncore_type ivbep_uncore_ha = {
1709 	.name		= "ha",
1710 	.num_counters   = 4,
1711 	.num_boxes	= 2,
1712 	.perf_ctr_bits	= 48,
1713 	IVBEP_UNCORE_PCI_COMMON_INIT(),
1714 };
1715 
1716 static struct intel_uncore_type ivbep_uncore_imc = {
1717 	.name		= "imc",
1718 	.num_counters   = 4,
1719 	.num_boxes	= 8,
1720 	.perf_ctr_bits	= 48,
1721 	.fixed_ctr_bits	= 48,
1722 	.fixed_ctr	= SNBEP_MC_CHy_PCI_PMON_FIXED_CTR,
1723 	.fixed_ctl	= SNBEP_MC_CHy_PCI_PMON_FIXED_CTL,
1724 	.event_descs	= snbep_uncore_imc_events,
1725 	IVBEP_UNCORE_PCI_COMMON_INIT(),
1726 };
1727 
1728 /* registers in IRP boxes are not properly aligned */
1729 static unsigned ivbep_uncore_irp_ctls[] = {0xd8, 0xdc, 0xe0, 0xe4};
1730 static unsigned ivbep_uncore_irp_ctrs[] = {0xa0, 0xb0, 0xb8, 0xc0};
1731 
1732 static void ivbep_uncore_irp_enable_event(struct intel_uncore_box *box, struct perf_event *event)
1733 {
1734 	struct pci_dev *pdev = box->pci_dev;
1735 	struct hw_perf_event *hwc = &event->hw;
1736 
1737 	pci_write_config_dword(pdev, ivbep_uncore_irp_ctls[hwc->idx],
1738 			       hwc->config | SNBEP_PMON_CTL_EN);
1739 }
1740 
1741 static void ivbep_uncore_irp_disable_event(struct intel_uncore_box *box, struct perf_event *event)
1742 {
1743 	struct pci_dev *pdev = box->pci_dev;
1744 	struct hw_perf_event *hwc = &event->hw;
1745 
1746 	pci_write_config_dword(pdev, ivbep_uncore_irp_ctls[hwc->idx], hwc->config);
1747 }
1748 
1749 static u64 ivbep_uncore_irp_read_counter(struct intel_uncore_box *box, struct perf_event *event)
1750 {
1751 	struct pci_dev *pdev = box->pci_dev;
1752 	struct hw_perf_event *hwc = &event->hw;
1753 	u64 count = 0;
1754 
1755 	pci_read_config_dword(pdev, ivbep_uncore_irp_ctrs[hwc->idx], (u32 *)&count);
1756 	pci_read_config_dword(pdev, ivbep_uncore_irp_ctrs[hwc->idx] + 4, (u32 *)&count + 1);
1757 
1758 	return count;
1759 }
1760 
1761 static struct intel_uncore_ops ivbep_uncore_irp_ops = {
1762 	.init_box	= ivbep_uncore_pci_init_box,
1763 	.disable_box	= snbep_uncore_pci_disable_box,
1764 	.enable_box	= snbep_uncore_pci_enable_box,
1765 	.disable_event	= ivbep_uncore_irp_disable_event,
1766 	.enable_event	= ivbep_uncore_irp_enable_event,
1767 	.read_counter	= ivbep_uncore_irp_read_counter,
1768 };
1769 
1770 static struct intel_uncore_type ivbep_uncore_irp = {
1771 	.name			= "irp",
1772 	.num_counters		= 4,
1773 	.num_boxes		= 1,
1774 	.perf_ctr_bits		= 48,
1775 	.event_mask		= IVBEP_PMON_RAW_EVENT_MASK,
1776 	.box_ctl		= SNBEP_PCI_PMON_BOX_CTL,
1777 	.ops			= &ivbep_uncore_irp_ops,
1778 	.format_group		= &ivbep_uncore_format_group,
1779 };
1780 
1781 static struct intel_uncore_ops ivbep_uncore_qpi_ops = {
1782 	.init_box	= ivbep_uncore_pci_init_box,
1783 	.disable_box	= snbep_uncore_pci_disable_box,
1784 	.enable_box	= snbep_uncore_pci_enable_box,
1785 	.disable_event	= snbep_uncore_pci_disable_event,
1786 	.enable_event	= snbep_qpi_enable_event,
1787 	.read_counter	= snbep_uncore_pci_read_counter,
1788 	.hw_config	= snbep_qpi_hw_config,
1789 	.get_constraint	= uncore_get_constraint,
1790 	.put_constraint	= uncore_put_constraint,
1791 };
1792 
1793 static struct intel_uncore_type ivbep_uncore_qpi = {
1794 	.name			= "qpi",
1795 	.num_counters		= 4,
1796 	.num_boxes		= 3,
1797 	.perf_ctr_bits		= 48,
1798 	.perf_ctr		= SNBEP_PCI_PMON_CTR0,
1799 	.event_ctl		= SNBEP_PCI_PMON_CTL0,
1800 	.event_mask		= IVBEP_QPI_PCI_PMON_RAW_EVENT_MASK,
1801 	.box_ctl		= SNBEP_PCI_PMON_BOX_CTL,
1802 	.num_shared_regs	= 1,
1803 	.ops			= &ivbep_uncore_qpi_ops,
1804 	.format_group		= &ivbep_uncore_qpi_format_group,
1805 };
1806 
1807 static struct intel_uncore_type ivbep_uncore_r2pcie = {
1808 	.name		= "r2pcie",
1809 	.num_counters   = 4,
1810 	.num_boxes	= 1,
1811 	.perf_ctr_bits	= 44,
1812 	.constraints	= snbep_uncore_r2pcie_constraints,
1813 	IVBEP_UNCORE_PCI_COMMON_INIT(),
1814 };
1815 
1816 static struct intel_uncore_type ivbep_uncore_r3qpi = {
1817 	.name		= "r3qpi",
1818 	.num_counters   = 3,
1819 	.num_boxes	= 2,
1820 	.perf_ctr_bits	= 44,
1821 	.constraints	= snbep_uncore_r3qpi_constraints,
1822 	IVBEP_UNCORE_PCI_COMMON_INIT(),
1823 };
1824 
1825 enum {
1826 	IVBEP_PCI_UNCORE_HA,
1827 	IVBEP_PCI_UNCORE_IMC,
1828 	IVBEP_PCI_UNCORE_IRP,
1829 	IVBEP_PCI_UNCORE_QPI,
1830 	IVBEP_PCI_UNCORE_R2PCIE,
1831 	IVBEP_PCI_UNCORE_R3QPI,
1832 };
1833 
1834 static struct intel_uncore_type *ivbep_pci_uncores[] = {
1835 	[IVBEP_PCI_UNCORE_HA]	= &ivbep_uncore_ha,
1836 	[IVBEP_PCI_UNCORE_IMC]	= &ivbep_uncore_imc,
1837 	[IVBEP_PCI_UNCORE_IRP]	= &ivbep_uncore_irp,
1838 	[IVBEP_PCI_UNCORE_QPI]	= &ivbep_uncore_qpi,
1839 	[IVBEP_PCI_UNCORE_R2PCIE]	= &ivbep_uncore_r2pcie,
1840 	[IVBEP_PCI_UNCORE_R3QPI]	= &ivbep_uncore_r3qpi,
1841 	NULL,
1842 };
1843 
1844 static const struct pci_device_id ivbep_uncore_pci_ids[] = {
1845 	{ /* Home Agent 0 */
1846 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe30),
1847 		.driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_HA, 0),
1848 	},
1849 	{ /* Home Agent 1 */
1850 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe38),
1851 		.driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_HA, 1),
1852 	},
1853 	{ /* MC0 Channel 0 */
1854 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xeb4),
1855 		.driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_IMC, 0),
1856 	},
1857 	{ /* MC0 Channel 1 */
1858 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xeb5),
1859 		.driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_IMC, 1),
1860 	},
1861 	{ /* MC0 Channel 3 */
1862 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xeb0),
1863 		.driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_IMC, 2),
1864 	},
1865 	{ /* MC0 Channel 4 */
1866 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xeb1),
1867 		.driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_IMC, 3),
1868 	},
1869 	{ /* MC1 Channel 0 */
1870 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xef4),
1871 		.driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_IMC, 4),
1872 	},
1873 	{ /* MC1 Channel 1 */
1874 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xef5),
1875 		.driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_IMC, 5),
1876 	},
1877 	{ /* MC1 Channel 3 */
1878 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xef0),
1879 		.driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_IMC, 6),
1880 	},
1881 	{ /* MC1 Channel 4 */
1882 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xef1),
1883 		.driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_IMC, 7),
1884 	},
1885 	{ /* IRP */
1886 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe39),
1887 		.driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_IRP, 0),
1888 	},
1889 	{ /* QPI0 Port 0 */
1890 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe32),
1891 		.driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_QPI, 0),
1892 	},
1893 	{ /* QPI0 Port 1 */
1894 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe33),
1895 		.driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_QPI, 1),
1896 	},
1897 	{ /* QPI1 Port 2 */
1898 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe3a),
1899 		.driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_QPI, 2),
1900 	},
1901 	{ /* R2PCIe */
1902 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe34),
1903 		.driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_R2PCIE, 0),
1904 	},
1905 	{ /* R3QPI0 Link 0 */
1906 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe36),
1907 		.driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_R3QPI, 0),
1908 	},
1909 	{ /* R3QPI0 Link 1 */
1910 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe37),
1911 		.driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_R3QPI, 1),
1912 	},
1913 	{ /* R3QPI1 Link 2 */
1914 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe3e),
1915 		.driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_R3QPI, 2),
1916 	},
1917 	{ /* QPI Port 0 filter  */
1918 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe86),
1919 		.driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV,
1920 						   SNBEP_PCI_QPI_PORT0_FILTER),
1921 	},
1922 	{ /* QPI Port 0 filter  */
1923 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe96),
1924 		.driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV,
1925 						   SNBEP_PCI_QPI_PORT1_FILTER),
1926 	},
1927 	{ /* end: all zeroes */ }
1928 };
1929 
1930 static struct pci_driver ivbep_uncore_pci_driver = {
1931 	.name		= "ivbep_uncore",
1932 	.id_table	= ivbep_uncore_pci_ids,
1933 };
1934 
1935 int ivbep_uncore_pci_init(void)
1936 {
1937 	int ret = snbep_pci2phy_map_init(0x0e1e, SNBEP_CPUNODEID, SNBEP_GIDNIDMAP, true);
1938 	if (ret)
1939 		return ret;
1940 	uncore_pci_uncores = ivbep_pci_uncores;
1941 	uncore_pci_driver = &ivbep_uncore_pci_driver;
1942 	return 0;
1943 }
1944 /* end of IvyTown uncore support */
1945 
1946 /* KNL uncore support */
1947 static struct attribute *knl_uncore_ubox_formats_attr[] = {
1948 	&format_attr_event.attr,
1949 	&format_attr_umask.attr,
1950 	&format_attr_edge.attr,
1951 	&format_attr_tid_en.attr,
1952 	&format_attr_inv.attr,
1953 	&format_attr_thresh5.attr,
1954 	NULL,
1955 };
1956 
1957 static const struct attribute_group knl_uncore_ubox_format_group = {
1958 	.name = "format",
1959 	.attrs = knl_uncore_ubox_formats_attr,
1960 };
1961 
1962 static struct intel_uncore_type knl_uncore_ubox = {
1963 	.name			= "ubox",
1964 	.num_counters		= 2,
1965 	.num_boxes		= 1,
1966 	.perf_ctr_bits		= 48,
1967 	.fixed_ctr_bits		= 48,
1968 	.perf_ctr		= HSWEP_U_MSR_PMON_CTR0,
1969 	.event_ctl		= HSWEP_U_MSR_PMON_CTL0,
1970 	.event_mask		= KNL_U_MSR_PMON_RAW_EVENT_MASK,
1971 	.fixed_ctr		= HSWEP_U_MSR_PMON_UCLK_FIXED_CTR,
1972 	.fixed_ctl		= HSWEP_U_MSR_PMON_UCLK_FIXED_CTL,
1973 	.ops			= &snbep_uncore_msr_ops,
1974 	.format_group		= &knl_uncore_ubox_format_group,
1975 };
1976 
1977 static struct attribute *knl_uncore_cha_formats_attr[] = {
1978 	&format_attr_event.attr,
1979 	&format_attr_umask.attr,
1980 	&format_attr_qor.attr,
1981 	&format_attr_edge.attr,
1982 	&format_attr_tid_en.attr,
1983 	&format_attr_inv.attr,
1984 	&format_attr_thresh8.attr,
1985 	&format_attr_filter_tid4.attr,
1986 	&format_attr_filter_link3.attr,
1987 	&format_attr_filter_state4.attr,
1988 	&format_attr_filter_local.attr,
1989 	&format_attr_filter_all_op.attr,
1990 	&format_attr_filter_nnm.attr,
1991 	&format_attr_filter_opc3.attr,
1992 	&format_attr_filter_nc.attr,
1993 	&format_attr_filter_isoc.attr,
1994 	NULL,
1995 };
1996 
1997 static const struct attribute_group knl_uncore_cha_format_group = {
1998 	.name = "format",
1999 	.attrs = knl_uncore_cha_formats_attr,
2000 };
2001 
2002 static struct event_constraint knl_uncore_cha_constraints[] = {
2003 	UNCORE_EVENT_CONSTRAINT(0x11, 0x1),
2004 	UNCORE_EVENT_CONSTRAINT(0x1f, 0x1),
2005 	UNCORE_EVENT_CONSTRAINT(0x36, 0x1),
2006 	EVENT_CONSTRAINT_END
2007 };
2008 
2009 static struct extra_reg knl_uncore_cha_extra_regs[] = {
2010 	SNBEP_CBO_EVENT_EXTRA_REG(SNBEP_CBO_PMON_CTL_TID_EN,
2011 				  SNBEP_CBO_PMON_CTL_TID_EN, 0x1),
2012 	SNBEP_CBO_EVENT_EXTRA_REG(0x3d, 0xff, 0x2),
2013 	SNBEP_CBO_EVENT_EXTRA_REG(0x35, 0xff, 0x4),
2014 	SNBEP_CBO_EVENT_EXTRA_REG(0x36, 0xff, 0x4),
2015 	EVENT_EXTRA_END
2016 };
2017 
2018 static u64 knl_cha_filter_mask(int fields)
2019 {
2020 	u64 mask = 0;
2021 
2022 	if (fields & 0x1)
2023 		mask |= KNL_CHA_MSR_PMON_BOX_FILTER_TID;
2024 	if (fields & 0x2)
2025 		mask |= KNL_CHA_MSR_PMON_BOX_FILTER_STATE;
2026 	if (fields & 0x4)
2027 		mask |= KNL_CHA_MSR_PMON_BOX_FILTER_OP;
2028 	return mask;
2029 }
2030 
2031 static struct event_constraint *
2032 knl_cha_get_constraint(struct intel_uncore_box *box, struct perf_event *event)
2033 {
2034 	return __snbep_cbox_get_constraint(box, event, knl_cha_filter_mask);
2035 }
2036 
2037 static int knl_cha_hw_config(struct intel_uncore_box *box,
2038 			     struct perf_event *event)
2039 {
2040 	struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
2041 	struct extra_reg *er;
2042 	int idx = 0;
2043 
2044 	for (er = knl_uncore_cha_extra_regs; er->msr; er++) {
2045 		if (er->event != (event->hw.config & er->config_mask))
2046 			continue;
2047 		idx |= er->idx;
2048 	}
2049 
2050 	if (idx) {
2051 		reg1->reg = HSWEP_C0_MSR_PMON_BOX_FILTER0 +
2052 			    KNL_CHA_MSR_OFFSET * box->pmu->pmu_idx;
2053 		reg1->config = event->attr.config1 & knl_cha_filter_mask(idx);
2054 
2055 		reg1->config |= KNL_CHA_MSR_PMON_BOX_FILTER_REMOTE_NODE;
2056 		reg1->config |= KNL_CHA_MSR_PMON_BOX_FILTER_LOCAL_NODE;
2057 		reg1->config |= KNL_CHA_MSR_PMON_BOX_FILTER_NNC;
2058 		reg1->idx = idx;
2059 	}
2060 	return 0;
2061 }
2062 
2063 static void hswep_cbox_enable_event(struct intel_uncore_box *box,
2064 				    struct perf_event *event);
2065 
2066 static struct intel_uncore_ops knl_uncore_cha_ops = {
2067 	.init_box		= snbep_uncore_msr_init_box,
2068 	.disable_box		= snbep_uncore_msr_disable_box,
2069 	.enable_box		= snbep_uncore_msr_enable_box,
2070 	.disable_event		= snbep_uncore_msr_disable_event,
2071 	.enable_event		= hswep_cbox_enable_event,
2072 	.read_counter		= uncore_msr_read_counter,
2073 	.hw_config		= knl_cha_hw_config,
2074 	.get_constraint		= knl_cha_get_constraint,
2075 	.put_constraint		= snbep_cbox_put_constraint,
2076 };
2077 
2078 static struct intel_uncore_type knl_uncore_cha = {
2079 	.name			= "cha",
2080 	.num_counters		= 4,
2081 	.num_boxes		= 38,
2082 	.perf_ctr_bits		= 48,
2083 	.event_ctl		= HSWEP_C0_MSR_PMON_CTL0,
2084 	.perf_ctr		= HSWEP_C0_MSR_PMON_CTR0,
2085 	.event_mask		= KNL_CHA_MSR_PMON_RAW_EVENT_MASK,
2086 	.box_ctl		= HSWEP_C0_MSR_PMON_BOX_CTL,
2087 	.msr_offset		= KNL_CHA_MSR_OFFSET,
2088 	.num_shared_regs	= 1,
2089 	.constraints		= knl_uncore_cha_constraints,
2090 	.ops			= &knl_uncore_cha_ops,
2091 	.format_group		= &knl_uncore_cha_format_group,
2092 };
2093 
2094 static struct attribute *knl_uncore_pcu_formats_attr[] = {
2095 	&format_attr_event2.attr,
2096 	&format_attr_use_occ_ctr.attr,
2097 	&format_attr_occ_sel.attr,
2098 	&format_attr_edge.attr,
2099 	&format_attr_tid_en.attr,
2100 	&format_attr_inv.attr,
2101 	&format_attr_thresh6.attr,
2102 	&format_attr_occ_invert.attr,
2103 	&format_attr_occ_edge_det.attr,
2104 	NULL,
2105 };
2106 
2107 static const struct attribute_group knl_uncore_pcu_format_group = {
2108 	.name = "format",
2109 	.attrs = knl_uncore_pcu_formats_attr,
2110 };
2111 
2112 static struct intel_uncore_type knl_uncore_pcu = {
2113 	.name			= "pcu",
2114 	.num_counters		= 4,
2115 	.num_boxes		= 1,
2116 	.perf_ctr_bits		= 48,
2117 	.perf_ctr		= HSWEP_PCU_MSR_PMON_CTR0,
2118 	.event_ctl		= HSWEP_PCU_MSR_PMON_CTL0,
2119 	.event_mask		= KNL_PCU_MSR_PMON_RAW_EVENT_MASK,
2120 	.box_ctl		= HSWEP_PCU_MSR_PMON_BOX_CTL,
2121 	.ops			= &snbep_uncore_msr_ops,
2122 	.format_group		= &knl_uncore_pcu_format_group,
2123 };
2124 
2125 static struct intel_uncore_type *knl_msr_uncores[] = {
2126 	&knl_uncore_ubox,
2127 	&knl_uncore_cha,
2128 	&knl_uncore_pcu,
2129 	NULL,
2130 };
2131 
2132 void knl_uncore_cpu_init(void)
2133 {
2134 	uncore_msr_uncores = knl_msr_uncores;
2135 }
2136 
2137 static void knl_uncore_imc_enable_box(struct intel_uncore_box *box)
2138 {
2139 	struct pci_dev *pdev = box->pci_dev;
2140 	int box_ctl = uncore_pci_box_ctl(box);
2141 
2142 	pci_write_config_dword(pdev, box_ctl, 0);
2143 }
2144 
2145 static void knl_uncore_imc_enable_event(struct intel_uncore_box *box,
2146 					struct perf_event *event)
2147 {
2148 	struct pci_dev *pdev = box->pci_dev;
2149 	struct hw_perf_event *hwc = &event->hw;
2150 
2151 	if ((event->attr.config & SNBEP_PMON_CTL_EV_SEL_MASK)
2152 							== UNCORE_FIXED_EVENT)
2153 		pci_write_config_dword(pdev, hwc->config_base,
2154 				       hwc->config | KNL_PMON_FIXED_CTL_EN);
2155 	else
2156 		pci_write_config_dword(pdev, hwc->config_base,
2157 				       hwc->config | SNBEP_PMON_CTL_EN);
2158 }
2159 
2160 static struct intel_uncore_ops knl_uncore_imc_ops = {
2161 	.init_box	= snbep_uncore_pci_init_box,
2162 	.disable_box	= snbep_uncore_pci_disable_box,
2163 	.enable_box	= knl_uncore_imc_enable_box,
2164 	.read_counter	= snbep_uncore_pci_read_counter,
2165 	.enable_event	= knl_uncore_imc_enable_event,
2166 	.disable_event	= snbep_uncore_pci_disable_event,
2167 };
2168 
2169 static struct intel_uncore_type knl_uncore_imc_uclk = {
2170 	.name			= "imc_uclk",
2171 	.num_counters		= 4,
2172 	.num_boxes		= 2,
2173 	.perf_ctr_bits		= 48,
2174 	.fixed_ctr_bits		= 48,
2175 	.perf_ctr		= KNL_UCLK_MSR_PMON_CTR0_LOW,
2176 	.event_ctl		= KNL_UCLK_MSR_PMON_CTL0,
2177 	.event_mask		= SNBEP_PMON_RAW_EVENT_MASK,
2178 	.fixed_ctr		= KNL_UCLK_MSR_PMON_UCLK_FIXED_LOW,
2179 	.fixed_ctl		= KNL_UCLK_MSR_PMON_UCLK_FIXED_CTL,
2180 	.box_ctl		= KNL_UCLK_MSR_PMON_BOX_CTL,
2181 	.ops			= &knl_uncore_imc_ops,
2182 	.format_group		= &snbep_uncore_format_group,
2183 };
2184 
2185 static struct intel_uncore_type knl_uncore_imc_dclk = {
2186 	.name			= "imc",
2187 	.num_counters		= 4,
2188 	.num_boxes		= 6,
2189 	.perf_ctr_bits		= 48,
2190 	.fixed_ctr_bits		= 48,
2191 	.perf_ctr		= KNL_MC0_CH0_MSR_PMON_CTR0_LOW,
2192 	.event_ctl		= KNL_MC0_CH0_MSR_PMON_CTL0,
2193 	.event_mask		= SNBEP_PMON_RAW_EVENT_MASK,
2194 	.fixed_ctr		= KNL_MC0_CH0_MSR_PMON_FIXED_LOW,
2195 	.fixed_ctl		= KNL_MC0_CH0_MSR_PMON_FIXED_CTL,
2196 	.box_ctl		= KNL_MC0_CH0_MSR_PMON_BOX_CTL,
2197 	.ops			= &knl_uncore_imc_ops,
2198 	.format_group		= &snbep_uncore_format_group,
2199 };
2200 
2201 static struct intel_uncore_type knl_uncore_edc_uclk = {
2202 	.name			= "edc_uclk",
2203 	.num_counters		= 4,
2204 	.num_boxes		= 8,
2205 	.perf_ctr_bits		= 48,
2206 	.fixed_ctr_bits		= 48,
2207 	.perf_ctr		= KNL_UCLK_MSR_PMON_CTR0_LOW,
2208 	.event_ctl		= KNL_UCLK_MSR_PMON_CTL0,
2209 	.event_mask		= SNBEP_PMON_RAW_EVENT_MASK,
2210 	.fixed_ctr		= KNL_UCLK_MSR_PMON_UCLK_FIXED_LOW,
2211 	.fixed_ctl		= KNL_UCLK_MSR_PMON_UCLK_FIXED_CTL,
2212 	.box_ctl		= KNL_UCLK_MSR_PMON_BOX_CTL,
2213 	.ops			= &knl_uncore_imc_ops,
2214 	.format_group		= &snbep_uncore_format_group,
2215 };
2216 
2217 static struct intel_uncore_type knl_uncore_edc_eclk = {
2218 	.name			= "edc_eclk",
2219 	.num_counters		= 4,
2220 	.num_boxes		= 8,
2221 	.perf_ctr_bits		= 48,
2222 	.fixed_ctr_bits		= 48,
2223 	.perf_ctr		= KNL_EDC0_ECLK_MSR_PMON_CTR0_LOW,
2224 	.event_ctl		= KNL_EDC0_ECLK_MSR_PMON_CTL0,
2225 	.event_mask		= SNBEP_PMON_RAW_EVENT_MASK,
2226 	.fixed_ctr		= KNL_EDC0_ECLK_MSR_PMON_ECLK_FIXED_LOW,
2227 	.fixed_ctl		= KNL_EDC0_ECLK_MSR_PMON_ECLK_FIXED_CTL,
2228 	.box_ctl		= KNL_EDC0_ECLK_MSR_PMON_BOX_CTL,
2229 	.ops			= &knl_uncore_imc_ops,
2230 	.format_group		= &snbep_uncore_format_group,
2231 };
2232 
2233 static struct event_constraint knl_uncore_m2pcie_constraints[] = {
2234 	UNCORE_EVENT_CONSTRAINT(0x23, 0x3),
2235 	EVENT_CONSTRAINT_END
2236 };
2237 
2238 static struct intel_uncore_type knl_uncore_m2pcie = {
2239 	.name		= "m2pcie",
2240 	.num_counters   = 4,
2241 	.num_boxes	= 1,
2242 	.perf_ctr_bits	= 48,
2243 	.constraints	= knl_uncore_m2pcie_constraints,
2244 	SNBEP_UNCORE_PCI_COMMON_INIT(),
2245 };
2246 
2247 static struct attribute *knl_uncore_irp_formats_attr[] = {
2248 	&format_attr_event.attr,
2249 	&format_attr_umask.attr,
2250 	&format_attr_qor.attr,
2251 	&format_attr_edge.attr,
2252 	&format_attr_inv.attr,
2253 	&format_attr_thresh8.attr,
2254 	NULL,
2255 };
2256 
2257 static const struct attribute_group knl_uncore_irp_format_group = {
2258 	.name = "format",
2259 	.attrs = knl_uncore_irp_formats_attr,
2260 };
2261 
2262 static struct intel_uncore_type knl_uncore_irp = {
2263 	.name			= "irp",
2264 	.num_counters		= 2,
2265 	.num_boxes		= 1,
2266 	.perf_ctr_bits		= 48,
2267 	.perf_ctr		= SNBEP_PCI_PMON_CTR0,
2268 	.event_ctl		= SNBEP_PCI_PMON_CTL0,
2269 	.event_mask		= KNL_IRP_PCI_PMON_RAW_EVENT_MASK,
2270 	.box_ctl		= KNL_IRP_PCI_PMON_BOX_CTL,
2271 	.ops			= &snbep_uncore_pci_ops,
2272 	.format_group		= &knl_uncore_irp_format_group,
2273 };
2274 
2275 enum {
2276 	KNL_PCI_UNCORE_MC_UCLK,
2277 	KNL_PCI_UNCORE_MC_DCLK,
2278 	KNL_PCI_UNCORE_EDC_UCLK,
2279 	KNL_PCI_UNCORE_EDC_ECLK,
2280 	KNL_PCI_UNCORE_M2PCIE,
2281 	KNL_PCI_UNCORE_IRP,
2282 };
2283 
2284 static struct intel_uncore_type *knl_pci_uncores[] = {
2285 	[KNL_PCI_UNCORE_MC_UCLK]	= &knl_uncore_imc_uclk,
2286 	[KNL_PCI_UNCORE_MC_DCLK]	= &knl_uncore_imc_dclk,
2287 	[KNL_PCI_UNCORE_EDC_UCLK]	= &knl_uncore_edc_uclk,
2288 	[KNL_PCI_UNCORE_EDC_ECLK]	= &knl_uncore_edc_eclk,
2289 	[KNL_PCI_UNCORE_M2PCIE]		= &knl_uncore_m2pcie,
2290 	[KNL_PCI_UNCORE_IRP]		= &knl_uncore_irp,
2291 	NULL,
2292 };
2293 
2294 /*
2295  * KNL uses a common PCI device ID for multiple instances of an Uncore PMU
2296  * device type. prior to KNL, each instance of a PMU device type had a unique
2297  * device ID.
2298  *
2299  *	PCI Device ID	Uncore PMU Devices
2300  *	----------------------------------
2301  *	0x7841		MC0 UClk, MC1 UClk
2302  *	0x7843		MC0 DClk CH 0, MC0 DClk CH 1, MC0 DClk CH 2,
2303  *			MC1 DClk CH 0, MC1 DClk CH 1, MC1 DClk CH 2
2304  *	0x7833		EDC0 UClk, EDC1 UClk, EDC2 UClk, EDC3 UClk,
2305  *			EDC4 UClk, EDC5 UClk, EDC6 UClk, EDC7 UClk
2306  *	0x7835		EDC0 EClk, EDC1 EClk, EDC2 EClk, EDC3 EClk,
2307  *			EDC4 EClk, EDC5 EClk, EDC6 EClk, EDC7 EClk
2308  *	0x7817		M2PCIe
2309  *	0x7814		IRP
2310 */
2311 
2312 static const struct pci_device_id knl_uncore_pci_ids[] = {
2313 	{ /* MC0 UClk */
2314 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7841),
2315 		.driver_data = UNCORE_PCI_DEV_FULL_DATA(10, 0, KNL_PCI_UNCORE_MC_UCLK, 0),
2316 	},
2317 	{ /* MC1 UClk */
2318 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7841),
2319 		.driver_data = UNCORE_PCI_DEV_FULL_DATA(11, 0, KNL_PCI_UNCORE_MC_UCLK, 1),
2320 	},
2321 	{ /* MC0 DClk CH 0 */
2322 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7843),
2323 		.driver_data = UNCORE_PCI_DEV_FULL_DATA(8, 2, KNL_PCI_UNCORE_MC_DCLK, 0),
2324 	},
2325 	{ /* MC0 DClk CH 1 */
2326 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7843),
2327 		.driver_data = UNCORE_PCI_DEV_FULL_DATA(8, 3, KNL_PCI_UNCORE_MC_DCLK, 1),
2328 	},
2329 	{ /* MC0 DClk CH 2 */
2330 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7843),
2331 		.driver_data = UNCORE_PCI_DEV_FULL_DATA(8, 4, KNL_PCI_UNCORE_MC_DCLK, 2),
2332 	},
2333 	{ /* MC1 DClk CH 0 */
2334 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7843),
2335 		.driver_data = UNCORE_PCI_DEV_FULL_DATA(9, 2, KNL_PCI_UNCORE_MC_DCLK, 3),
2336 	},
2337 	{ /* MC1 DClk CH 1 */
2338 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7843),
2339 		.driver_data = UNCORE_PCI_DEV_FULL_DATA(9, 3, KNL_PCI_UNCORE_MC_DCLK, 4),
2340 	},
2341 	{ /* MC1 DClk CH 2 */
2342 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7843),
2343 		.driver_data = UNCORE_PCI_DEV_FULL_DATA(9, 4, KNL_PCI_UNCORE_MC_DCLK, 5),
2344 	},
2345 	{ /* EDC0 UClk */
2346 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7833),
2347 		.driver_data = UNCORE_PCI_DEV_FULL_DATA(15, 0, KNL_PCI_UNCORE_EDC_UCLK, 0),
2348 	},
2349 	{ /* EDC1 UClk */
2350 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7833),
2351 		.driver_data = UNCORE_PCI_DEV_FULL_DATA(16, 0, KNL_PCI_UNCORE_EDC_UCLK, 1),
2352 	},
2353 	{ /* EDC2 UClk */
2354 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7833),
2355 		.driver_data = UNCORE_PCI_DEV_FULL_DATA(17, 0, KNL_PCI_UNCORE_EDC_UCLK, 2),
2356 	},
2357 	{ /* EDC3 UClk */
2358 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7833),
2359 		.driver_data = UNCORE_PCI_DEV_FULL_DATA(18, 0, KNL_PCI_UNCORE_EDC_UCLK, 3),
2360 	},
2361 	{ /* EDC4 UClk */
2362 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7833),
2363 		.driver_data = UNCORE_PCI_DEV_FULL_DATA(19, 0, KNL_PCI_UNCORE_EDC_UCLK, 4),
2364 	},
2365 	{ /* EDC5 UClk */
2366 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7833),
2367 		.driver_data = UNCORE_PCI_DEV_FULL_DATA(20, 0, KNL_PCI_UNCORE_EDC_UCLK, 5),
2368 	},
2369 	{ /* EDC6 UClk */
2370 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7833),
2371 		.driver_data = UNCORE_PCI_DEV_FULL_DATA(21, 0, KNL_PCI_UNCORE_EDC_UCLK, 6),
2372 	},
2373 	{ /* EDC7 UClk */
2374 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7833),
2375 		.driver_data = UNCORE_PCI_DEV_FULL_DATA(22, 0, KNL_PCI_UNCORE_EDC_UCLK, 7),
2376 	},
2377 	{ /* EDC0 EClk */
2378 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7835),
2379 		.driver_data = UNCORE_PCI_DEV_FULL_DATA(24, 2, KNL_PCI_UNCORE_EDC_ECLK, 0),
2380 	},
2381 	{ /* EDC1 EClk */
2382 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7835),
2383 		.driver_data = UNCORE_PCI_DEV_FULL_DATA(25, 2, KNL_PCI_UNCORE_EDC_ECLK, 1),
2384 	},
2385 	{ /* EDC2 EClk */
2386 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7835),
2387 		.driver_data = UNCORE_PCI_DEV_FULL_DATA(26, 2, KNL_PCI_UNCORE_EDC_ECLK, 2),
2388 	},
2389 	{ /* EDC3 EClk */
2390 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7835),
2391 		.driver_data = UNCORE_PCI_DEV_FULL_DATA(27, 2, KNL_PCI_UNCORE_EDC_ECLK, 3),
2392 	},
2393 	{ /* EDC4 EClk */
2394 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7835),
2395 		.driver_data = UNCORE_PCI_DEV_FULL_DATA(28, 2, KNL_PCI_UNCORE_EDC_ECLK, 4),
2396 	},
2397 	{ /* EDC5 EClk */
2398 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7835),
2399 		.driver_data = UNCORE_PCI_DEV_FULL_DATA(29, 2, KNL_PCI_UNCORE_EDC_ECLK, 5),
2400 	},
2401 	{ /* EDC6 EClk */
2402 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7835),
2403 		.driver_data = UNCORE_PCI_DEV_FULL_DATA(30, 2, KNL_PCI_UNCORE_EDC_ECLK, 6),
2404 	},
2405 	{ /* EDC7 EClk */
2406 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7835),
2407 		.driver_data = UNCORE_PCI_DEV_FULL_DATA(31, 2, KNL_PCI_UNCORE_EDC_ECLK, 7),
2408 	},
2409 	{ /* M2PCIe */
2410 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7817),
2411 		.driver_data = UNCORE_PCI_DEV_DATA(KNL_PCI_UNCORE_M2PCIE, 0),
2412 	},
2413 	{ /* IRP */
2414 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7814),
2415 		.driver_data = UNCORE_PCI_DEV_DATA(KNL_PCI_UNCORE_IRP, 0),
2416 	},
2417 	{ /* end: all zeroes */ }
2418 };
2419 
2420 static struct pci_driver knl_uncore_pci_driver = {
2421 	.name		= "knl_uncore",
2422 	.id_table	= knl_uncore_pci_ids,
2423 };
2424 
2425 int knl_uncore_pci_init(void)
2426 {
2427 	int ret;
2428 
2429 	/* All KNL PCI based PMON units are on the same PCI bus except IRP */
2430 	ret = snb_pci2phy_map_init(0x7814); /* IRP */
2431 	if (ret)
2432 		return ret;
2433 	ret = snb_pci2phy_map_init(0x7817); /* M2PCIe */
2434 	if (ret)
2435 		return ret;
2436 	uncore_pci_uncores = knl_pci_uncores;
2437 	uncore_pci_driver = &knl_uncore_pci_driver;
2438 	return 0;
2439 }
2440 
2441 /* end of KNL uncore support */
2442 
2443 /* Haswell-EP uncore support */
2444 static struct attribute *hswep_uncore_ubox_formats_attr[] = {
2445 	&format_attr_event.attr,
2446 	&format_attr_umask.attr,
2447 	&format_attr_edge.attr,
2448 	&format_attr_inv.attr,
2449 	&format_attr_thresh5.attr,
2450 	&format_attr_filter_tid2.attr,
2451 	&format_attr_filter_cid.attr,
2452 	NULL,
2453 };
2454 
2455 static const struct attribute_group hswep_uncore_ubox_format_group = {
2456 	.name = "format",
2457 	.attrs = hswep_uncore_ubox_formats_attr,
2458 };
2459 
2460 static int hswep_ubox_hw_config(struct intel_uncore_box *box, struct perf_event *event)
2461 {
2462 	struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
2463 	reg1->reg = HSWEP_U_MSR_PMON_FILTER;
2464 	reg1->config = event->attr.config1 & HSWEP_U_MSR_PMON_BOX_FILTER_MASK;
2465 	reg1->idx = 0;
2466 	return 0;
2467 }
2468 
2469 static struct intel_uncore_ops hswep_uncore_ubox_ops = {
2470 	SNBEP_UNCORE_MSR_OPS_COMMON_INIT(),
2471 	.hw_config		= hswep_ubox_hw_config,
2472 	.get_constraint		= uncore_get_constraint,
2473 	.put_constraint		= uncore_put_constraint,
2474 };
2475 
2476 static struct intel_uncore_type hswep_uncore_ubox = {
2477 	.name			= "ubox",
2478 	.num_counters		= 2,
2479 	.num_boxes		= 1,
2480 	.perf_ctr_bits		= 44,
2481 	.fixed_ctr_bits		= 48,
2482 	.perf_ctr		= HSWEP_U_MSR_PMON_CTR0,
2483 	.event_ctl		= HSWEP_U_MSR_PMON_CTL0,
2484 	.event_mask		= SNBEP_U_MSR_PMON_RAW_EVENT_MASK,
2485 	.fixed_ctr		= HSWEP_U_MSR_PMON_UCLK_FIXED_CTR,
2486 	.fixed_ctl		= HSWEP_U_MSR_PMON_UCLK_FIXED_CTL,
2487 	.num_shared_regs	= 1,
2488 	.ops			= &hswep_uncore_ubox_ops,
2489 	.format_group		= &hswep_uncore_ubox_format_group,
2490 };
2491 
2492 static struct attribute *hswep_uncore_cbox_formats_attr[] = {
2493 	&format_attr_event.attr,
2494 	&format_attr_umask.attr,
2495 	&format_attr_edge.attr,
2496 	&format_attr_tid_en.attr,
2497 	&format_attr_thresh8.attr,
2498 	&format_attr_filter_tid3.attr,
2499 	&format_attr_filter_link2.attr,
2500 	&format_attr_filter_state3.attr,
2501 	&format_attr_filter_nid2.attr,
2502 	&format_attr_filter_opc2.attr,
2503 	&format_attr_filter_nc.attr,
2504 	&format_attr_filter_c6.attr,
2505 	&format_attr_filter_isoc.attr,
2506 	NULL,
2507 };
2508 
2509 static const struct attribute_group hswep_uncore_cbox_format_group = {
2510 	.name = "format",
2511 	.attrs = hswep_uncore_cbox_formats_attr,
2512 };
2513 
2514 static struct event_constraint hswep_uncore_cbox_constraints[] = {
2515 	UNCORE_EVENT_CONSTRAINT(0x01, 0x1),
2516 	UNCORE_EVENT_CONSTRAINT(0x09, 0x1),
2517 	UNCORE_EVENT_CONSTRAINT(0x11, 0x1),
2518 	UNCORE_EVENT_CONSTRAINT(0x36, 0x1),
2519 	UNCORE_EVENT_CONSTRAINT(0x38, 0x3),
2520 	UNCORE_EVENT_CONSTRAINT(0x3b, 0x1),
2521 	UNCORE_EVENT_CONSTRAINT(0x3e, 0x1),
2522 	EVENT_CONSTRAINT_END
2523 };
2524 
2525 static struct extra_reg hswep_uncore_cbox_extra_regs[] = {
2526 	SNBEP_CBO_EVENT_EXTRA_REG(SNBEP_CBO_PMON_CTL_TID_EN,
2527 				  SNBEP_CBO_PMON_CTL_TID_EN, 0x1),
2528 	SNBEP_CBO_EVENT_EXTRA_REG(0x0334, 0xffff, 0x4),
2529 	SNBEP_CBO_EVENT_EXTRA_REG(0x0534, 0xffff, 0x4),
2530 	SNBEP_CBO_EVENT_EXTRA_REG(0x0934, 0xffff, 0x4),
2531 	SNBEP_CBO_EVENT_EXTRA_REG(0x1134, 0xffff, 0x4),
2532 	SNBEP_CBO_EVENT_EXTRA_REG(0x2134, 0xffff, 0x4),
2533 	SNBEP_CBO_EVENT_EXTRA_REG(0x4134, 0xffff, 0x4),
2534 	SNBEP_CBO_EVENT_EXTRA_REG(0x4037, 0x40ff, 0x8),
2535 	SNBEP_CBO_EVENT_EXTRA_REG(0x4028, 0x40ff, 0x8),
2536 	SNBEP_CBO_EVENT_EXTRA_REG(0x4032, 0x40ff, 0x8),
2537 	SNBEP_CBO_EVENT_EXTRA_REG(0x4029, 0x40ff, 0x8),
2538 	SNBEP_CBO_EVENT_EXTRA_REG(0x4033, 0x40ff, 0x8),
2539 	SNBEP_CBO_EVENT_EXTRA_REG(0x402A, 0x40ff, 0x8),
2540 	SNBEP_CBO_EVENT_EXTRA_REG(0x0135, 0xffff, 0x12),
2541 	SNBEP_CBO_EVENT_EXTRA_REG(0x0335, 0xffff, 0x10),
2542 	SNBEP_CBO_EVENT_EXTRA_REG(0x4135, 0xffff, 0x18),
2543 	SNBEP_CBO_EVENT_EXTRA_REG(0x4435, 0xffff, 0x8),
2544 	SNBEP_CBO_EVENT_EXTRA_REG(0x4835, 0xffff, 0x8),
2545 	SNBEP_CBO_EVENT_EXTRA_REG(0x5035, 0xffff, 0x8),
2546 	SNBEP_CBO_EVENT_EXTRA_REG(0x4335, 0xffff, 0x18),
2547 	SNBEP_CBO_EVENT_EXTRA_REG(0x4a35, 0xffff, 0x8),
2548 	SNBEP_CBO_EVENT_EXTRA_REG(0x2335, 0xffff, 0x10),
2549 	SNBEP_CBO_EVENT_EXTRA_REG(0x8335, 0xffff, 0x10),
2550 	SNBEP_CBO_EVENT_EXTRA_REG(0x2135, 0xffff, 0x10),
2551 	SNBEP_CBO_EVENT_EXTRA_REG(0x8135, 0xffff, 0x10),
2552 	SNBEP_CBO_EVENT_EXTRA_REG(0x0136, 0xffff, 0x10),
2553 	SNBEP_CBO_EVENT_EXTRA_REG(0x0336, 0xffff, 0x10),
2554 	SNBEP_CBO_EVENT_EXTRA_REG(0x4136, 0xffff, 0x18),
2555 	SNBEP_CBO_EVENT_EXTRA_REG(0x4436, 0xffff, 0x8),
2556 	SNBEP_CBO_EVENT_EXTRA_REG(0x4836, 0xffff, 0x8),
2557 	SNBEP_CBO_EVENT_EXTRA_REG(0x4336, 0xffff, 0x18),
2558 	SNBEP_CBO_EVENT_EXTRA_REG(0x4a36, 0xffff, 0x8),
2559 	SNBEP_CBO_EVENT_EXTRA_REG(0x2336, 0xffff, 0x10),
2560 	SNBEP_CBO_EVENT_EXTRA_REG(0x8336, 0xffff, 0x10),
2561 	SNBEP_CBO_EVENT_EXTRA_REG(0x2136, 0xffff, 0x10),
2562 	SNBEP_CBO_EVENT_EXTRA_REG(0x8136, 0xffff, 0x10),
2563 	SNBEP_CBO_EVENT_EXTRA_REG(0x5036, 0xffff, 0x8),
2564 	EVENT_EXTRA_END
2565 };
2566 
2567 static u64 hswep_cbox_filter_mask(int fields)
2568 {
2569 	u64 mask = 0;
2570 	if (fields & 0x1)
2571 		mask |= HSWEP_CB0_MSR_PMON_BOX_FILTER_TID;
2572 	if (fields & 0x2)
2573 		mask |= HSWEP_CB0_MSR_PMON_BOX_FILTER_LINK;
2574 	if (fields & 0x4)
2575 		mask |= HSWEP_CB0_MSR_PMON_BOX_FILTER_STATE;
2576 	if (fields & 0x8)
2577 		mask |= HSWEP_CB0_MSR_PMON_BOX_FILTER_NID;
2578 	if (fields & 0x10) {
2579 		mask |= HSWEP_CB0_MSR_PMON_BOX_FILTER_OPC;
2580 		mask |= HSWEP_CB0_MSR_PMON_BOX_FILTER_NC;
2581 		mask |= HSWEP_CB0_MSR_PMON_BOX_FILTER_C6;
2582 		mask |= HSWEP_CB0_MSR_PMON_BOX_FILTER_ISOC;
2583 	}
2584 	return mask;
2585 }
2586 
2587 static struct event_constraint *
2588 hswep_cbox_get_constraint(struct intel_uncore_box *box, struct perf_event *event)
2589 {
2590 	return __snbep_cbox_get_constraint(box, event, hswep_cbox_filter_mask);
2591 }
2592 
2593 static int hswep_cbox_hw_config(struct intel_uncore_box *box, struct perf_event *event)
2594 {
2595 	struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
2596 	struct extra_reg *er;
2597 	int idx = 0;
2598 
2599 	for (er = hswep_uncore_cbox_extra_regs; er->msr; er++) {
2600 		if (er->event != (event->hw.config & er->config_mask))
2601 			continue;
2602 		idx |= er->idx;
2603 	}
2604 
2605 	if (idx) {
2606 		reg1->reg = HSWEP_C0_MSR_PMON_BOX_FILTER0 +
2607 			    HSWEP_CBO_MSR_OFFSET * box->pmu->pmu_idx;
2608 		reg1->config = event->attr.config1 & hswep_cbox_filter_mask(idx);
2609 		reg1->idx = idx;
2610 	}
2611 	return 0;
2612 }
2613 
2614 static void hswep_cbox_enable_event(struct intel_uncore_box *box,
2615 				  struct perf_event *event)
2616 {
2617 	struct hw_perf_event *hwc = &event->hw;
2618 	struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
2619 
2620 	if (reg1->idx != EXTRA_REG_NONE) {
2621 		u64 filter = uncore_shared_reg_config(box, 0);
2622 		wrmsrl(reg1->reg, filter & 0xffffffff);
2623 		wrmsrl(reg1->reg + 1, filter >> 32);
2624 	}
2625 
2626 	wrmsrl(hwc->config_base, hwc->config | SNBEP_PMON_CTL_EN);
2627 }
2628 
2629 static struct intel_uncore_ops hswep_uncore_cbox_ops = {
2630 	.init_box		= snbep_uncore_msr_init_box,
2631 	.disable_box		= snbep_uncore_msr_disable_box,
2632 	.enable_box		= snbep_uncore_msr_enable_box,
2633 	.disable_event		= snbep_uncore_msr_disable_event,
2634 	.enable_event		= hswep_cbox_enable_event,
2635 	.read_counter		= uncore_msr_read_counter,
2636 	.hw_config		= hswep_cbox_hw_config,
2637 	.get_constraint		= hswep_cbox_get_constraint,
2638 	.put_constraint		= snbep_cbox_put_constraint,
2639 };
2640 
2641 static struct intel_uncore_type hswep_uncore_cbox = {
2642 	.name			= "cbox",
2643 	.num_counters		= 4,
2644 	.num_boxes		= 18,
2645 	.perf_ctr_bits		= 48,
2646 	.event_ctl		= HSWEP_C0_MSR_PMON_CTL0,
2647 	.perf_ctr		= HSWEP_C0_MSR_PMON_CTR0,
2648 	.event_mask		= SNBEP_CBO_MSR_PMON_RAW_EVENT_MASK,
2649 	.box_ctl		= HSWEP_C0_MSR_PMON_BOX_CTL,
2650 	.msr_offset		= HSWEP_CBO_MSR_OFFSET,
2651 	.num_shared_regs	= 1,
2652 	.constraints		= hswep_uncore_cbox_constraints,
2653 	.ops			= &hswep_uncore_cbox_ops,
2654 	.format_group		= &hswep_uncore_cbox_format_group,
2655 };
2656 
2657 /*
2658  * Write SBOX Initialization register bit by bit to avoid spurious #GPs
2659  */
2660 static void hswep_uncore_sbox_msr_init_box(struct intel_uncore_box *box)
2661 {
2662 	unsigned msr = uncore_msr_box_ctl(box);
2663 
2664 	if (msr) {
2665 		u64 init = SNBEP_PMON_BOX_CTL_INT;
2666 		u64 flags = 0;
2667 		int i;
2668 
2669 		for_each_set_bit(i, (unsigned long *)&init, 64) {
2670 			flags |= (1ULL << i);
2671 			wrmsrl(msr, flags);
2672 		}
2673 	}
2674 }
2675 
2676 static struct intel_uncore_ops hswep_uncore_sbox_msr_ops = {
2677 	__SNBEP_UNCORE_MSR_OPS_COMMON_INIT(),
2678 	.init_box		= hswep_uncore_sbox_msr_init_box
2679 };
2680 
2681 static struct attribute *hswep_uncore_sbox_formats_attr[] = {
2682 	&format_attr_event.attr,
2683 	&format_attr_umask.attr,
2684 	&format_attr_edge.attr,
2685 	&format_attr_tid_en.attr,
2686 	&format_attr_inv.attr,
2687 	&format_attr_thresh8.attr,
2688 	NULL,
2689 };
2690 
2691 static const struct attribute_group hswep_uncore_sbox_format_group = {
2692 	.name = "format",
2693 	.attrs = hswep_uncore_sbox_formats_attr,
2694 };
2695 
2696 static struct intel_uncore_type hswep_uncore_sbox = {
2697 	.name			= "sbox",
2698 	.num_counters		= 4,
2699 	.num_boxes		= 4,
2700 	.perf_ctr_bits		= 44,
2701 	.event_ctl		= HSWEP_S0_MSR_PMON_CTL0,
2702 	.perf_ctr		= HSWEP_S0_MSR_PMON_CTR0,
2703 	.event_mask		= HSWEP_S_MSR_PMON_RAW_EVENT_MASK,
2704 	.box_ctl		= HSWEP_S0_MSR_PMON_BOX_CTL,
2705 	.msr_offset		= HSWEP_SBOX_MSR_OFFSET,
2706 	.ops			= &hswep_uncore_sbox_msr_ops,
2707 	.format_group		= &hswep_uncore_sbox_format_group,
2708 };
2709 
2710 static int hswep_pcu_hw_config(struct intel_uncore_box *box, struct perf_event *event)
2711 {
2712 	struct hw_perf_event *hwc = &event->hw;
2713 	struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
2714 	int ev_sel = hwc->config & SNBEP_PMON_CTL_EV_SEL_MASK;
2715 
2716 	if (ev_sel >= 0xb && ev_sel <= 0xe) {
2717 		reg1->reg = HSWEP_PCU_MSR_PMON_BOX_FILTER;
2718 		reg1->idx = ev_sel - 0xb;
2719 		reg1->config = event->attr.config1 & (0xff << reg1->idx);
2720 	}
2721 	return 0;
2722 }
2723 
2724 static struct intel_uncore_ops hswep_uncore_pcu_ops = {
2725 	SNBEP_UNCORE_MSR_OPS_COMMON_INIT(),
2726 	.hw_config		= hswep_pcu_hw_config,
2727 	.get_constraint		= snbep_pcu_get_constraint,
2728 	.put_constraint		= snbep_pcu_put_constraint,
2729 };
2730 
2731 static struct intel_uncore_type hswep_uncore_pcu = {
2732 	.name			= "pcu",
2733 	.num_counters		= 4,
2734 	.num_boxes		= 1,
2735 	.perf_ctr_bits		= 48,
2736 	.perf_ctr		= HSWEP_PCU_MSR_PMON_CTR0,
2737 	.event_ctl		= HSWEP_PCU_MSR_PMON_CTL0,
2738 	.event_mask		= SNBEP_PCU_MSR_PMON_RAW_EVENT_MASK,
2739 	.box_ctl		= HSWEP_PCU_MSR_PMON_BOX_CTL,
2740 	.num_shared_regs	= 1,
2741 	.ops			= &hswep_uncore_pcu_ops,
2742 	.format_group		= &snbep_uncore_pcu_format_group,
2743 };
2744 
2745 static struct intel_uncore_type *hswep_msr_uncores[] = {
2746 	&hswep_uncore_ubox,
2747 	&hswep_uncore_cbox,
2748 	&hswep_uncore_sbox,
2749 	&hswep_uncore_pcu,
2750 	NULL,
2751 };
2752 
2753 void hswep_uncore_cpu_init(void)
2754 {
2755 	int pkg = boot_cpu_data.logical_proc_id;
2756 
2757 	if (hswep_uncore_cbox.num_boxes > boot_cpu_data.x86_max_cores)
2758 		hswep_uncore_cbox.num_boxes = boot_cpu_data.x86_max_cores;
2759 
2760 	/* Detect 6-8 core systems with only two SBOXes */
2761 	if (uncore_extra_pci_dev[pkg].dev[HSWEP_PCI_PCU_3]) {
2762 		u32 capid4;
2763 
2764 		pci_read_config_dword(uncore_extra_pci_dev[pkg].dev[HSWEP_PCI_PCU_3],
2765 				      0x94, &capid4);
2766 		if (((capid4 >> 6) & 0x3) == 0)
2767 			hswep_uncore_sbox.num_boxes = 2;
2768 	}
2769 
2770 	uncore_msr_uncores = hswep_msr_uncores;
2771 }
2772 
2773 static struct intel_uncore_type hswep_uncore_ha = {
2774 	.name		= "ha",
2775 	.num_counters   = 4,
2776 	.num_boxes	= 2,
2777 	.perf_ctr_bits	= 48,
2778 	SNBEP_UNCORE_PCI_COMMON_INIT(),
2779 };
2780 
2781 static struct uncore_event_desc hswep_uncore_imc_events[] = {
2782 	INTEL_UNCORE_EVENT_DESC(clockticks,      "event=0x00,umask=0x00"),
2783 	INTEL_UNCORE_EVENT_DESC(cas_count_read,  "event=0x04,umask=0x03"),
2784 	INTEL_UNCORE_EVENT_DESC(cas_count_read.scale, "6.103515625e-5"),
2785 	INTEL_UNCORE_EVENT_DESC(cas_count_read.unit, "MiB"),
2786 	INTEL_UNCORE_EVENT_DESC(cas_count_write, "event=0x04,umask=0x0c"),
2787 	INTEL_UNCORE_EVENT_DESC(cas_count_write.scale, "6.103515625e-5"),
2788 	INTEL_UNCORE_EVENT_DESC(cas_count_write.unit, "MiB"),
2789 	{ /* end: all zeroes */ },
2790 };
2791 
2792 static struct intel_uncore_type hswep_uncore_imc = {
2793 	.name		= "imc",
2794 	.num_counters   = 4,
2795 	.num_boxes	= 8,
2796 	.perf_ctr_bits	= 48,
2797 	.fixed_ctr_bits	= 48,
2798 	.fixed_ctr	= SNBEP_MC_CHy_PCI_PMON_FIXED_CTR,
2799 	.fixed_ctl	= SNBEP_MC_CHy_PCI_PMON_FIXED_CTL,
2800 	.event_descs	= hswep_uncore_imc_events,
2801 	SNBEP_UNCORE_PCI_COMMON_INIT(),
2802 };
2803 
2804 static unsigned hswep_uncore_irp_ctrs[] = {0xa0, 0xa8, 0xb0, 0xb8};
2805 
2806 static u64 hswep_uncore_irp_read_counter(struct intel_uncore_box *box, struct perf_event *event)
2807 {
2808 	struct pci_dev *pdev = box->pci_dev;
2809 	struct hw_perf_event *hwc = &event->hw;
2810 	u64 count = 0;
2811 
2812 	pci_read_config_dword(pdev, hswep_uncore_irp_ctrs[hwc->idx], (u32 *)&count);
2813 	pci_read_config_dword(pdev, hswep_uncore_irp_ctrs[hwc->idx] + 4, (u32 *)&count + 1);
2814 
2815 	return count;
2816 }
2817 
2818 static struct intel_uncore_ops hswep_uncore_irp_ops = {
2819 	.init_box	= snbep_uncore_pci_init_box,
2820 	.disable_box	= snbep_uncore_pci_disable_box,
2821 	.enable_box	= snbep_uncore_pci_enable_box,
2822 	.disable_event	= ivbep_uncore_irp_disable_event,
2823 	.enable_event	= ivbep_uncore_irp_enable_event,
2824 	.read_counter	= hswep_uncore_irp_read_counter,
2825 };
2826 
2827 static struct intel_uncore_type hswep_uncore_irp = {
2828 	.name			= "irp",
2829 	.num_counters		= 4,
2830 	.num_boxes		= 1,
2831 	.perf_ctr_bits		= 48,
2832 	.event_mask		= SNBEP_PMON_RAW_EVENT_MASK,
2833 	.box_ctl		= SNBEP_PCI_PMON_BOX_CTL,
2834 	.ops			= &hswep_uncore_irp_ops,
2835 	.format_group		= &snbep_uncore_format_group,
2836 };
2837 
2838 static struct intel_uncore_type hswep_uncore_qpi = {
2839 	.name			= "qpi",
2840 	.num_counters		= 4,
2841 	.num_boxes		= 3,
2842 	.perf_ctr_bits		= 48,
2843 	.perf_ctr		= SNBEP_PCI_PMON_CTR0,
2844 	.event_ctl		= SNBEP_PCI_PMON_CTL0,
2845 	.event_mask		= SNBEP_QPI_PCI_PMON_RAW_EVENT_MASK,
2846 	.box_ctl		= SNBEP_PCI_PMON_BOX_CTL,
2847 	.num_shared_regs	= 1,
2848 	.ops			= &snbep_uncore_qpi_ops,
2849 	.format_group		= &snbep_uncore_qpi_format_group,
2850 };
2851 
2852 static struct event_constraint hswep_uncore_r2pcie_constraints[] = {
2853 	UNCORE_EVENT_CONSTRAINT(0x10, 0x3),
2854 	UNCORE_EVENT_CONSTRAINT(0x11, 0x3),
2855 	UNCORE_EVENT_CONSTRAINT(0x13, 0x1),
2856 	UNCORE_EVENT_CONSTRAINT(0x23, 0x1),
2857 	UNCORE_EVENT_CONSTRAINT(0x24, 0x1),
2858 	UNCORE_EVENT_CONSTRAINT(0x25, 0x1),
2859 	UNCORE_EVENT_CONSTRAINT(0x26, 0x3),
2860 	UNCORE_EVENT_CONSTRAINT(0x27, 0x1),
2861 	UNCORE_EVENT_CONSTRAINT(0x28, 0x3),
2862 	UNCORE_EVENT_CONSTRAINT(0x29, 0x3),
2863 	UNCORE_EVENT_CONSTRAINT(0x2a, 0x1),
2864 	UNCORE_EVENT_CONSTRAINT(0x2b, 0x3),
2865 	UNCORE_EVENT_CONSTRAINT(0x2c, 0x3),
2866 	UNCORE_EVENT_CONSTRAINT(0x2d, 0x3),
2867 	UNCORE_EVENT_CONSTRAINT(0x32, 0x3),
2868 	UNCORE_EVENT_CONSTRAINT(0x33, 0x3),
2869 	UNCORE_EVENT_CONSTRAINT(0x34, 0x3),
2870 	UNCORE_EVENT_CONSTRAINT(0x35, 0x3),
2871 	EVENT_CONSTRAINT_END
2872 };
2873 
2874 static struct intel_uncore_type hswep_uncore_r2pcie = {
2875 	.name		= "r2pcie",
2876 	.num_counters   = 4,
2877 	.num_boxes	= 1,
2878 	.perf_ctr_bits	= 48,
2879 	.constraints	= hswep_uncore_r2pcie_constraints,
2880 	SNBEP_UNCORE_PCI_COMMON_INIT(),
2881 };
2882 
2883 static struct event_constraint hswep_uncore_r3qpi_constraints[] = {
2884 	UNCORE_EVENT_CONSTRAINT(0x01, 0x3),
2885 	UNCORE_EVENT_CONSTRAINT(0x07, 0x7),
2886 	UNCORE_EVENT_CONSTRAINT(0x08, 0x7),
2887 	UNCORE_EVENT_CONSTRAINT(0x09, 0x7),
2888 	UNCORE_EVENT_CONSTRAINT(0x0a, 0x7),
2889 	UNCORE_EVENT_CONSTRAINT(0x0e, 0x7),
2890 	UNCORE_EVENT_CONSTRAINT(0x10, 0x3),
2891 	UNCORE_EVENT_CONSTRAINT(0x11, 0x3),
2892 	UNCORE_EVENT_CONSTRAINT(0x12, 0x3),
2893 	UNCORE_EVENT_CONSTRAINT(0x13, 0x1),
2894 	UNCORE_EVENT_CONSTRAINT(0x14, 0x3),
2895 	UNCORE_EVENT_CONSTRAINT(0x15, 0x3),
2896 	UNCORE_EVENT_CONSTRAINT(0x1f, 0x3),
2897 	UNCORE_EVENT_CONSTRAINT(0x20, 0x3),
2898 	UNCORE_EVENT_CONSTRAINT(0x21, 0x3),
2899 	UNCORE_EVENT_CONSTRAINT(0x22, 0x3),
2900 	UNCORE_EVENT_CONSTRAINT(0x23, 0x3),
2901 	UNCORE_EVENT_CONSTRAINT(0x25, 0x3),
2902 	UNCORE_EVENT_CONSTRAINT(0x26, 0x3),
2903 	UNCORE_EVENT_CONSTRAINT(0x28, 0x3),
2904 	UNCORE_EVENT_CONSTRAINT(0x29, 0x3),
2905 	UNCORE_EVENT_CONSTRAINT(0x2c, 0x3),
2906 	UNCORE_EVENT_CONSTRAINT(0x2d, 0x3),
2907 	UNCORE_EVENT_CONSTRAINT(0x2e, 0x3),
2908 	UNCORE_EVENT_CONSTRAINT(0x2f, 0x3),
2909 	UNCORE_EVENT_CONSTRAINT(0x31, 0x3),
2910 	UNCORE_EVENT_CONSTRAINT(0x32, 0x3),
2911 	UNCORE_EVENT_CONSTRAINT(0x33, 0x3),
2912 	UNCORE_EVENT_CONSTRAINT(0x34, 0x3),
2913 	UNCORE_EVENT_CONSTRAINT(0x36, 0x3),
2914 	UNCORE_EVENT_CONSTRAINT(0x37, 0x3),
2915 	UNCORE_EVENT_CONSTRAINT(0x38, 0x3),
2916 	UNCORE_EVENT_CONSTRAINT(0x39, 0x3),
2917 	EVENT_CONSTRAINT_END
2918 };
2919 
2920 static struct intel_uncore_type hswep_uncore_r3qpi = {
2921 	.name		= "r3qpi",
2922 	.num_counters   = 3,
2923 	.num_boxes	= 3,
2924 	.perf_ctr_bits	= 44,
2925 	.constraints	= hswep_uncore_r3qpi_constraints,
2926 	SNBEP_UNCORE_PCI_COMMON_INIT(),
2927 };
2928 
2929 enum {
2930 	HSWEP_PCI_UNCORE_HA,
2931 	HSWEP_PCI_UNCORE_IMC,
2932 	HSWEP_PCI_UNCORE_IRP,
2933 	HSWEP_PCI_UNCORE_QPI,
2934 	HSWEP_PCI_UNCORE_R2PCIE,
2935 	HSWEP_PCI_UNCORE_R3QPI,
2936 };
2937 
2938 static struct intel_uncore_type *hswep_pci_uncores[] = {
2939 	[HSWEP_PCI_UNCORE_HA]	= &hswep_uncore_ha,
2940 	[HSWEP_PCI_UNCORE_IMC]	= &hswep_uncore_imc,
2941 	[HSWEP_PCI_UNCORE_IRP]	= &hswep_uncore_irp,
2942 	[HSWEP_PCI_UNCORE_QPI]	= &hswep_uncore_qpi,
2943 	[HSWEP_PCI_UNCORE_R2PCIE]	= &hswep_uncore_r2pcie,
2944 	[HSWEP_PCI_UNCORE_R3QPI]	= &hswep_uncore_r3qpi,
2945 	NULL,
2946 };
2947 
2948 static const struct pci_device_id hswep_uncore_pci_ids[] = {
2949 	{ /* Home Agent 0 */
2950 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2f30),
2951 		.driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_HA, 0),
2952 	},
2953 	{ /* Home Agent 1 */
2954 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2f38),
2955 		.driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_HA, 1),
2956 	},
2957 	{ /* MC0 Channel 0 */
2958 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2fb0),
2959 		.driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_IMC, 0),
2960 	},
2961 	{ /* MC0 Channel 1 */
2962 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2fb1),
2963 		.driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_IMC, 1),
2964 	},
2965 	{ /* MC0 Channel 2 */
2966 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2fb4),
2967 		.driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_IMC, 2),
2968 	},
2969 	{ /* MC0 Channel 3 */
2970 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2fb5),
2971 		.driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_IMC, 3),
2972 	},
2973 	{ /* MC1 Channel 0 */
2974 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2fd0),
2975 		.driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_IMC, 4),
2976 	},
2977 	{ /* MC1 Channel 1 */
2978 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2fd1),
2979 		.driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_IMC, 5),
2980 	},
2981 	{ /* MC1 Channel 2 */
2982 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2fd4),
2983 		.driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_IMC, 6),
2984 	},
2985 	{ /* MC1 Channel 3 */
2986 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2fd5),
2987 		.driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_IMC, 7),
2988 	},
2989 	{ /* IRP */
2990 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2f39),
2991 		.driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_IRP, 0),
2992 	},
2993 	{ /* QPI0 Port 0 */
2994 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2f32),
2995 		.driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_QPI, 0),
2996 	},
2997 	{ /* QPI0 Port 1 */
2998 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2f33),
2999 		.driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_QPI, 1),
3000 	},
3001 	{ /* QPI1 Port 2 */
3002 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2f3a),
3003 		.driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_QPI, 2),
3004 	},
3005 	{ /* R2PCIe */
3006 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2f34),
3007 		.driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_R2PCIE, 0),
3008 	},
3009 	{ /* R3QPI0 Link 0 */
3010 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2f36),
3011 		.driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_R3QPI, 0),
3012 	},
3013 	{ /* R3QPI0 Link 1 */
3014 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2f37),
3015 		.driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_R3QPI, 1),
3016 	},
3017 	{ /* R3QPI1 Link 2 */
3018 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2f3e),
3019 		.driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_R3QPI, 2),
3020 	},
3021 	{ /* QPI Port 0 filter  */
3022 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2f86),
3023 		.driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV,
3024 						   SNBEP_PCI_QPI_PORT0_FILTER),
3025 	},
3026 	{ /* QPI Port 1 filter  */
3027 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2f96),
3028 		.driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV,
3029 						   SNBEP_PCI_QPI_PORT1_FILTER),
3030 	},
3031 	{ /* PCU.3 (for Capability registers) */
3032 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2fc0),
3033 		.driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV,
3034 						   HSWEP_PCI_PCU_3),
3035 	},
3036 	{ /* end: all zeroes */ }
3037 };
3038 
3039 static struct pci_driver hswep_uncore_pci_driver = {
3040 	.name		= "hswep_uncore",
3041 	.id_table	= hswep_uncore_pci_ids,
3042 };
3043 
3044 int hswep_uncore_pci_init(void)
3045 {
3046 	int ret = snbep_pci2phy_map_init(0x2f1e, SNBEP_CPUNODEID, SNBEP_GIDNIDMAP, true);
3047 	if (ret)
3048 		return ret;
3049 	uncore_pci_uncores = hswep_pci_uncores;
3050 	uncore_pci_driver = &hswep_uncore_pci_driver;
3051 	return 0;
3052 }
3053 /* end of Haswell-EP uncore support */
3054 
3055 /* BDX uncore support */
3056 
3057 static struct intel_uncore_type bdx_uncore_ubox = {
3058 	.name			= "ubox",
3059 	.num_counters		= 2,
3060 	.num_boxes		= 1,
3061 	.perf_ctr_bits		= 48,
3062 	.fixed_ctr_bits		= 48,
3063 	.perf_ctr		= HSWEP_U_MSR_PMON_CTR0,
3064 	.event_ctl		= HSWEP_U_MSR_PMON_CTL0,
3065 	.event_mask		= SNBEP_U_MSR_PMON_RAW_EVENT_MASK,
3066 	.fixed_ctr		= HSWEP_U_MSR_PMON_UCLK_FIXED_CTR,
3067 	.fixed_ctl		= HSWEP_U_MSR_PMON_UCLK_FIXED_CTL,
3068 	.num_shared_regs	= 1,
3069 	.ops			= &ivbep_uncore_msr_ops,
3070 	.format_group		= &ivbep_uncore_ubox_format_group,
3071 };
3072 
3073 static struct event_constraint bdx_uncore_cbox_constraints[] = {
3074 	UNCORE_EVENT_CONSTRAINT(0x09, 0x3),
3075 	UNCORE_EVENT_CONSTRAINT(0x11, 0x1),
3076 	UNCORE_EVENT_CONSTRAINT(0x36, 0x1),
3077 	UNCORE_EVENT_CONSTRAINT(0x3e, 0x1),
3078 	EVENT_CONSTRAINT_END
3079 };
3080 
3081 static struct intel_uncore_type bdx_uncore_cbox = {
3082 	.name			= "cbox",
3083 	.num_counters		= 4,
3084 	.num_boxes		= 24,
3085 	.perf_ctr_bits		= 48,
3086 	.event_ctl		= HSWEP_C0_MSR_PMON_CTL0,
3087 	.perf_ctr		= HSWEP_C0_MSR_PMON_CTR0,
3088 	.event_mask		= SNBEP_CBO_MSR_PMON_RAW_EVENT_MASK,
3089 	.box_ctl		= HSWEP_C0_MSR_PMON_BOX_CTL,
3090 	.msr_offset		= HSWEP_CBO_MSR_OFFSET,
3091 	.num_shared_regs	= 1,
3092 	.constraints		= bdx_uncore_cbox_constraints,
3093 	.ops			= &hswep_uncore_cbox_ops,
3094 	.format_group		= &hswep_uncore_cbox_format_group,
3095 };
3096 
3097 static struct intel_uncore_type bdx_uncore_sbox = {
3098 	.name			= "sbox",
3099 	.num_counters		= 4,
3100 	.num_boxes		= 4,
3101 	.perf_ctr_bits		= 48,
3102 	.event_ctl		= HSWEP_S0_MSR_PMON_CTL0,
3103 	.perf_ctr		= HSWEP_S0_MSR_PMON_CTR0,
3104 	.event_mask		= HSWEP_S_MSR_PMON_RAW_EVENT_MASK,
3105 	.box_ctl		= HSWEP_S0_MSR_PMON_BOX_CTL,
3106 	.msr_offset		= HSWEP_SBOX_MSR_OFFSET,
3107 	.ops			= &hswep_uncore_sbox_msr_ops,
3108 	.format_group		= &hswep_uncore_sbox_format_group,
3109 };
3110 
3111 #define BDX_MSR_UNCORE_SBOX	3
3112 
3113 static struct intel_uncore_type *bdx_msr_uncores[] = {
3114 	&bdx_uncore_ubox,
3115 	&bdx_uncore_cbox,
3116 	&hswep_uncore_pcu,
3117 	&bdx_uncore_sbox,
3118 	NULL,
3119 };
3120 
3121 /* Bit 7 'Use Occupancy' is not available for counter 0 on BDX */
3122 static struct event_constraint bdx_uncore_pcu_constraints[] = {
3123 	EVENT_CONSTRAINT(0x80, 0xe, 0x80),
3124 	EVENT_CONSTRAINT_END
3125 };
3126 
3127 void bdx_uncore_cpu_init(void)
3128 {
3129 	int pkg = topology_phys_to_logical_pkg(boot_cpu_data.phys_proc_id);
3130 
3131 	if (bdx_uncore_cbox.num_boxes > boot_cpu_data.x86_max_cores)
3132 		bdx_uncore_cbox.num_boxes = boot_cpu_data.x86_max_cores;
3133 	uncore_msr_uncores = bdx_msr_uncores;
3134 
3135 	/* BDX-DE doesn't have SBOX */
3136 	if (boot_cpu_data.x86_model == 86) {
3137 		uncore_msr_uncores[BDX_MSR_UNCORE_SBOX] = NULL;
3138 	/* Detect systems with no SBOXes */
3139 	} else if (uncore_extra_pci_dev[pkg].dev[HSWEP_PCI_PCU_3]) {
3140 		struct pci_dev *pdev;
3141 		u32 capid4;
3142 
3143 		pdev = uncore_extra_pci_dev[pkg].dev[HSWEP_PCI_PCU_3];
3144 		pci_read_config_dword(pdev, 0x94, &capid4);
3145 		if (((capid4 >> 6) & 0x3) == 0)
3146 			bdx_msr_uncores[BDX_MSR_UNCORE_SBOX] = NULL;
3147 	}
3148 	hswep_uncore_pcu.constraints = bdx_uncore_pcu_constraints;
3149 }
3150 
3151 static struct intel_uncore_type bdx_uncore_ha = {
3152 	.name		= "ha",
3153 	.num_counters   = 4,
3154 	.num_boxes	= 2,
3155 	.perf_ctr_bits	= 48,
3156 	SNBEP_UNCORE_PCI_COMMON_INIT(),
3157 };
3158 
3159 static struct intel_uncore_type bdx_uncore_imc = {
3160 	.name		= "imc",
3161 	.num_counters   = 4,
3162 	.num_boxes	= 8,
3163 	.perf_ctr_bits	= 48,
3164 	.fixed_ctr_bits	= 48,
3165 	.fixed_ctr	= SNBEP_MC_CHy_PCI_PMON_FIXED_CTR,
3166 	.fixed_ctl	= SNBEP_MC_CHy_PCI_PMON_FIXED_CTL,
3167 	.event_descs	= hswep_uncore_imc_events,
3168 	SNBEP_UNCORE_PCI_COMMON_INIT(),
3169 };
3170 
3171 static struct intel_uncore_type bdx_uncore_irp = {
3172 	.name			= "irp",
3173 	.num_counters		= 4,
3174 	.num_boxes		= 1,
3175 	.perf_ctr_bits		= 48,
3176 	.event_mask		= SNBEP_PMON_RAW_EVENT_MASK,
3177 	.box_ctl		= SNBEP_PCI_PMON_BOX_CTL,
3178 	.ops			= &hswep_uncore_irp_ops,
3179 	.format_group		= &snbep_uncore_format_group,
3180 };
3181 
3182 static struct intel_uncore_type bdx_uncore_qpi = {
3183 	.name			= "qpi",
3184 	.num_counters		= 4,
3185 	.num_boxes		= 3,
3186 	.perf_ctr_bits		= 48,
3187 	.perf_ctr		= SNBEP_PCI_PMON_CTR0,
3188 	.event_ctl		= SNBEP_PCI_PMON_CTL0,
3189 	.event_mask		= SNBEP_QPI_PCI_PMON_RAW_EVENT_MASK,
3190 	.box_ctl		= SNBEP_PCI_PMON_BOX_CTL,
3191 	.num_shared_regs	= 1,
3192 	.ops			= &snbep_uncore_qpi_ops,
3193 	.format_group		= &snbep_uncore_qpi_format_group,
3194 };
3195 
3196 static struct event_constraint bdx_uncore_r2pcie_constraints[] = {
3197 	UNCORE_EVENT_CONSTRAINT(0x10, 0x3),
3198 	UNCORE_EVENT_CONSTRAINT(0x11, 0x3),
3199 	UNCORE_EVENT_CONSTRAINT(0x13, 0x1),
3200 	UNCORE_EVENT_CONSTRAINT(0x23, 0x1),
3201 	UNCORE_EVENT_CONSTRAINT(0x25, 0x1),
3202 	UNCORE_EVENT_CONSTRAINT(0x26, 0x3),
3203 	UNCORE_EVENT_CONSTRAINT(0x28, 0x3),
3204 	UNCORE_EVENT_CONSTRAINT(0x2c, 0x3),
3205 	UNCORE_EVENT_CONSTRAINT(0x2d, 0x3),
3206 	EVENT_CONSTRAINT_END
3207 };
3208 
3209 static struct intel_uncore_type bdx_uncore_r2pcie = {
3210 	.name		= "r2pcie",
3211 	.num_counters   = 4,
3212 	.num_boxes	= 1,
3213 	.perf_ctr_bits	= 48,
3214 	.constraints	= bdx_uncore_r2pcie_constraints,
3215 	SNBEP_UNCORE_PCI_COMMON_INIT(),
3216 };
3217 
3218 static struct event_constraint bdx_uncore_r3qpi_constraints[] = {
3219 	UNCORE_EVENT_CONSTRAINT(0x01, 0x7),
3220 	UNCORE_EVENT_CONSTRAINT(0x07, 0x7),
3221 	UNCORE_EVENT_CONSTRAINT(0x08, 0x7),
3222 	UNCORE_EVENT_CONSTRAINT(0x09, 0x7),
3223 	UNCORE_EVENT_CONSTRAINT(0x0a, 0x7),
3224 	UNCORE_EVENT_CONSTRAINT(0x0e, 0x7),
3225 	UNCORE_EVENT_CONSTRAINT(0x10, 0x3),
3226 	UNCORE_EVENT_CONSTRAINT(0x11, 0x3),
3227 	UNCORE_EVENT_CONSTRAINT(0x13, 0x1),
3228 	UNCORE_EVENT_CONSTRAINT(0x14, 0x3),
3229 	UNCORE_EVENT_CONSTRAINT(0x15, 0x3),
3230 	UNCORE_EVENT_CONSTRAINT(0x1f, 0x3),
3231 	UNCORE_EVENT_CONSTRAINT(0x20, 0x3),
3232 	UNCORE_EVENT_CONSTRAINT(0x21, 0x3),
3233 	UNCORE_EVENT_CONSTRAINT(0x22, 0x3),
3234 	UNCORE_EVENT_CONSTRAINT(0x23, 0x3),
3235 	UNCORE_EVENT_CONSTRAINT(0x25, 0x3),
3236 	UNCORE_EVENT_CONSTRAINT(0x26, 0x3),
3237 	UNCORE_EVENT_CONSTRAINT(0x28, 0x3),
3238 	UNCORE_EVENT_CONSTRAINT(0x29, 0x3),
3239 	UNCORE_EVENT_CONSTRAINT(0x2c, 0x3),
3240 	UNCORE_EVENT_CONSTRAINT(0x2d, 0x3),
3241 	UNCORE_EVENT_CONSTRAINT(0x2e, 0x3),
3242 	UNCORE_EVENT_CONSTRAINT(0x2f, 0x3),
3243 	UNCORE_EVENT_CONSTRAINT(0x33, 0x3),
3244 	UNCORE_EVENT_CONSTRAINT(0x34, 0x3),
3245 	UNCORE_EVENT_CONSTRAINT(0x36, 0x3),
3246 	UNCORE_EVENT_CONSTRAINT(0x37, 0x3),
3247 	UNCORE_EVENT_CONSTRAINT(0x38, 0x3),
3248 	UNCORE_EVENT_CONSTRAINT(0x39, 0x3),
3249 	EVENT_CONSTRAINT_END
3250 };
3251 
3252 static struct intel_uncore_type bdx_uncore_r3qpi = {
3253 	.name		= "r3qpi",
3254 	.num_counters   = 3,
3255 	.num_boxes	= 3,
3256 	.perf_ctr_bits	= 48,
3257 	.constraints	= bdx_uncore_r3qpi_constraints,
3258 	SNBEP_UNCORE_PCI_COMMON_INIT(),
3259 };
3260 
3261 enum {
3262 	BDX_PCI_UNCORE_HA,
3263 	BDX_PCI_UNCORE_IMC,
3264 	BDX_PCI_UNCORE_IRP,
3265 	BDX_PCI_UNCORE_QPI,
3266 	BDX_PCI_UNCORE_R2PCIE,
3267 	BDX_PCI_UNCORE_R3QPI,
3268 };
3269 
3270 static struct intel_uncore_type *bdx_pci_uncores[] = {
3271 	[BDX_PCI_UNCORE_HA]	= &bdx_uncore_ha,
3272 	[BDX_PCI_UNCORE_IMC]	= &bdx_uncore_imc,
3273 	[BDX_PCI_UNCORE_IRP]	= &bdx_uncore_irp,
3274 	[BDX_PCI_UNCORE_QPI]	= &bdx_uncore_qpi,
3275 	[BDX_PCI_UNCORE_R2PCIE]	= &bdx_uncore_r2pcie,
3276 	[BDX_PCI_UNCORE_R3QPI]	= &bdx_uncore_r3qpi,
3277 	NULL,
3278 };
3279 
3280 static const struct pci_device_id bdx_uncore_pci_ids[] = {
3281 	{ /* Home Agent 0 */
3282 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6f30),
3283 		.driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_HA, 0),
3284 	},
3285 	{ /* Home Agent 1 */
3286 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6f38),
3287 		.driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_HA, 1),
3288 	},
3289 	{ /* MC0 Channel 0 */
3290 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6fb0),
3291 		.driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_IMC, 0),
3292 	},
3293 	{ /* MC0 Channel 1 */
3294 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6fb1),
3295 		.driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_IMC, 1),
3296 	},
3297 	{ /* MC0 Channel 2 */
3298 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6fb4),
3299 		.driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_IMC, 2),
3300 	},
3301 	{ /* MC0 Channel 3 */
3302 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6fb5),
3303 		.driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_IMC, 3),
3304 	},
3305 	{ /* MC1 Channel 0 */
3306 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6fd0),
3307 		.driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_IMC, 4),
3308 	},
3309 	{ /* MC1 Channel 1 */
3310 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6fd1),
3311 		.driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_IMC, 5),
3312 	},
3313 	{ /* MC1 Channel 2 */
3314 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6fd4),
3315 		.driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_IMC, 6),
3316 	},
3317 	{ /* MC1 Channel 3 */
3318 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6fd5),
3319 		.driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_IMC, 7),
3320 	},
3321 	{ /* IRP */
3322 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6f39),
3323 		.driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_IRP, 0),
3324 	},
3325 	{ /* QPI0 Port 0 */
3326 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6f32),
3327 		.driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_QPI, 0),
3328 	},
3329 	{ /* QPI0 Port 1 */
3330 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6f33),
3331 		.driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_QPI, 1),
3332 	},
3333 	{ /* QPI1 Port 2 */
3334 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6f3a),
3335 		.driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_QPI, 2),
3336 	},
3337 	{ /* R2PCIe */
3338 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6f34),
3339 		.driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_R2PCIE, 0),
3340 	},
3341 	{ /* R3QPI0 Link 0 */
3342 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6f36),
3343 		.driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_R3QPI, 0),
3344 	},
3345 	{ /* R3QPI0 Link 1 */
3346 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6f37),
3347 		.driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_R3QPI, 1),
3348 	},
3349 	{ /* R3QPI1 Link 2 */
3350 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6f3e),
3351 		.driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_R3QPI, 2),
3352 	},
3353 	{ /* QPI Port 0 filter  */
3354 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6f86),
3355 		.driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV,
3356 						   SNBEP_PCI_QPI_PORT0_FILTER),
3357 	},
3358 	{ /* QPI Port 1 filter  */
3359 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6f96),
3360 		.driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV,
3361 						   SNBEP_PCI_QPI_PORT1_FILTER),
3362 	},
3363 	{ /* QPI Port 2 filter  */
3364 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6f46),
3365 		.driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV,
3366 						   BDX_PCI_QPI_PORT2_FILTER),
3367 	},
3368 	{ /* PCU.3 (for Capability registers) */
3369 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6fc0),
3370 		.driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV,
3371 						   HSWEP_PCI_PCU_3),
3372 	},
3373 	{ /* end: all zeroes */ }
3374 };
3375 
3376 static struct pci_driver bdx_uncore_pci_driver = {
3377 	.name		= "bdx_uncore",
3378 	.id_table	= bdx_uncore_pci_ids,
3379 };
3380 
3381 int bdx_uncore_pci_init(void)
3382 {
3383 	int ret = snbep_pci2phy_map_init(0x6f1e, SNBEP_CPUNODEID, SNBEP_GIDNIDMAP, true);
3384 
3385 	if (ret)
3386 		return ret;
3387 	uncore_pci_uncores = bdx_pci_uncores;
3388 	uncore_pci_driver = &bdx_uncore_pci_driver;
3389 	return 0;
3390 }
3391 
3392 /* end of BDX uncore support */
3393 
3394 /* SKX uncore support */
3395 
3396 static struct intel_uncore_type skx_uncore_ubox = {
3397 	.name			= "ubox",
3398 	.num_counters		= 2,
3399 	.num_boxes		= 1,
3400 	.perf_ctr_bits		= 48,
3401 	.fixed_ctr_bits		= 48,
3402 	.perf_ctr		= HSWEP_U_MSR_PMON_CTR0,
3403 	.event_ctl		= HSWEP_U_MSR_PMON_CTL0,
3404 	.event_mask		= SNBEP_U_MSR_PMON_RAW_EVENT_MASK,
3405 	.fixed_ctr		= HSWEP_U_MSR_PMON_UCLK_FIXED_CTR,
3406 	.fixed_ctl		= HSWEP_U_MSR_PMON_UCLK_FIXED_CTL,
3407 	.ops			= &ivbep_uncore_msr_ops,
3408 	.format_group		= &ivbep_uncore_ubox_format_group,
3409 };
3410 
3411 static struct attribute *skx_uncore_cha_formats_attr[] = {
3412 	&format_attr_event.attr,
3413 	&format_attr_umask.attr,
3414 	&format_attr_edge.attr,
3415 	&format_attr_tid_en.attr,
3416 	&format_attr_inv.attr,
3417 	&format_attr_thresh8.attr,
3418 	&format_attr_filter_tid4.attr,
3419 	&format_attr_filter_state5.attr,
3420 	&format_attr_filter_rem.attr,
3421 	&format_attr_filter_loc.attr,
3422 	&format_attr_filter_nm.attr,
3423 	&format_attr_filter_all_op.attr,
3424 	&format_attr_filter_not_nm.attr,
3425 	&format_attr_filter_opc_0.attr,
3426 	&format_attr_filter_opc_1.attr,
3427 	&format_attr_filter_nc.attr,
3428 	&format_attr_filter_isoc.attr,
3429 	NULL,
3430 };
3431 
3432 static const struct attribute_group skx_uncore_chabox_format_group = {
3433 	.name = "format",
3434 	.attrs = skx_uncore_cha_formats_attr,
3435 };
3436 
3437 static struct event_constraint skx_uncore_chabox_constraints[] = {
3438 	UNCORE_EVENT_CONSTRAINT(0x11, 0x1),
3439 	UNCORE_EVENT_CONSTRAINT(0x36, 0x1),
3440 	EVENT_CONSTRAINT_END
3441 };
3442 
3443 static struct extra_reg skx_uncore_cha_extra_regs[] = {
3444 	SNBEP_CBO_EVENT_EXTRA_REG(0x0334, 0xffff, 0x4),
3445 	SNBEP_CBO_EVENT_EXTRA_REG(0x0534, 0xffff, 0x4),
3446 	SNBEP_CBO_EVENT_EXTRA_REG(0x0934, 0xffff, 0x4),
3447 	SNBEP_CBO_EVENT_EXTRA_REG(0x1134, 0xffff, 0x4),
3448 	SNBEP_CBO_EVENT_EXTRA_REG(0x3134, 0xffff, 0x4),
3449 	SNBEP_CBO_EVENT_EXTRA_REG(0x9134, 0xffff, 0x4),
3450 	SNBEP_CBO_EVENT_EXTRA_REG(0x35, 0xff, 0x8),
3451 	SNBEP_CBO_EVENT_EXTRA_REG(0x36, 0xff, 0x8),
3452 	SNBEP_CBO_EVENT_EXTRA_REG(0x38, 0xff, 0x3),
3453 	EVENT_EXTRA_END
3454 };
3455 
3456 static u64 skx_cha_filter_mask(int fields)
3457 {
3458 	u64 mask = 0;
3459 
3460 	if (fields & 0x1)
3461 		mask |= SKX_CHA_MSR_PMON_BOX_FILTER_TID;
3462 	if (fields & 0x2)
3463 		mask |= SKX_CHA_MSR_PMON_BOX_FILTER_LINK;
3464 	if (fields & 0x4)
3465 		mask |= SKX_CHA_MSR_PMON_BOX_FILTER_STATE;
3466 	if (fields & 0x8) {
3467 		mask |= SKX_CHA_MSR_PMON_BOX_FILTER_REM;
3468 		mask |= SKX_CHA_MSR_PMON_BOX_FILTER_LOC;
3469 		mask |= SKX_CHA_MSR_PMON_BOX_FILTER_ALL_OPC;
3470 		mask |= SKX_CHA_MSR_PMON_BOX_FILTER_NM;
3471 		mask |= SKX_CHA_MSR_PMON_BOX_FILTER_NOT_NM;
3472 		mask |= SKX_CHA_MSR_PMON_BOX_FILTER_OPC0;
3473 		mask |= SKX_CHA_MSR_PMON_BOX_FILTER_OPC1;
3474 		mask |= SKX_CHA_MSR_PMON_BOX_FILTER_NC;
3475 		mask |= SKX_CHA_MSR_PMON_BOX_FILTER_ISOC;
3476 	}
3477 	return mask;
3478 }
3479 
3480 static struct event_constraint *
3481 skx_cha_get_constraint(struct intel_uncore_box *box, struct perf_event *event)
3482 {
3483 	return __snbep_cbox_get_constraint(box, event, skx_cha_filter_mask);
3484 }
3485 
3486 static int skx_cha_hw_config(struct intel_uncore_box *box, struct perf_event *event)
3487 {
3488 	struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
3489 	struct extra_reg *er;
3490 	int idx = 0;
3491 
3492 	for (er = skx_uncore_cha_extra_regs; er->msr; er++) {
3493 		if (er->event != (event->hw.config & er->config_mask))
3494 			continue;
3495 		idx |= er->idx;
3496 	}
3497 
3498 	if (idx) {
3499 		reg1->reg = HSWEP_C0_MSR_PMON_BOX_FILTER0 +
3500 			    HSWEP_CBO_MSR_OFFSET * box->pmu->pmu_idx;
3501 		reg1->config = event->attr.config1 & skx_cha_filter_mask(idx);
3502 		reg1->idx = idx;
3503 	}
3504 	return 0;
3505 }
3506 
3507 static struct intel_uncore_ops skx_uncore_chabox_ops = {
3508 	/* There is no frz_en for chabox ctl */
3509 	.init_box		= ivbep_uncore_msr_init_box,
3510 	.disable_box		= snbep_uncore_msr_disable_box,
3511 	.enable_box		= snbep_uncore_msr_enable_box,
3512 	.disable_event		= snbep_uncore_msr_disable_event,
3513 	.enable_event		= hswep_cbox_enable_event,
3514 	.read_counter		= uncore_msr_read_counter,
3515 	.hw_config		= skx_cha_hw_config,
3516 	.get_constraint		= skx_cha_get_constraint,
3517 	.put_constraint		= snbep_cbox_put_constraint,
3518 };
3519 
3520 static struct intel_uncore_type skx_uncore_chabox = {
3521 	.name			= "cha",
3522 	.num_counters		= 4,
3523 	.perf_ctr_bits		= 48,
3524 	.event_ctl		= HSWEP_C0_MSR_PMON_CTL0,
3525 	.perf_ctr		= HSWEP_C0_MSR_PMON_CTR0,
3526 	.event_mask		= HSWEP_S_MSR_PMON_RAW_EVENT_MASK,
3527 	.box_ctl		= HSWEP_C0_MSR_PMON_BOX_CTL,
3528 	.msr_offset		= HSWEP_CBO_MSR_OFFSET,
3529 	.num_shared_regs	= 1,
3530 	.constraints		= skx_uncore_chabox_constraints,
3531 	.ops			= &skx_uncore_chabox_ops,
3532 	.format_group		= &skx_uncore_chabox_format_group,
3533 };
3534 
3535 static struct attribute *skx_uncore_iio_formats_attr[] = {
3536 	&format_attr_event.attr,
3537 	&format_attr_umask.attr,
3538 	&format_attr_edge.attr,
3539 	&format_attr_inv.attr,
3540 	&format_attr_thresh9.attr,
3541 	&format_attr_ch_mask.attr,
3542 	&format_attr_fc_mask.attr,
3543 	NULL,
3544 };
3545 
3546 static const struct attribute_group skx_uncore_iio_format_group = {
3547 	.name = "format",
3548 	.attrs = skx_uncore_iio_formats_attr,
3549 };
3550 
3551 static struct event_constraint skx_uncore_iio_constraints[] = {
3552 	UNCORE_EVENT_CONSTRAINT(0x83, 0x3),
3553 	UNCORE_EVENT_CONSTRAINT(0x88, 0xc),
3554 	UNCORE_EVENT_CONSTRAINT(0x95, 0xc),
3555 	UNCORE_EVENT_CONSTRAINT(0xc0, 0xc),
3556 	UNCORE_EVENT_CONSTRAINT(0xc5, 0xc),
3557 	UNCORE_EVENT_CONSTRAINT(0xd4, 0xc),
3558 	EVENT_CONSTRAINT_END
3559 };
3560 
3561 static void skx_iio_enable_event(struct intel_uncore_box *box,
3562 				 struct perf_event *event)
3563 {
3564 	struct hw_perf_event *hwc = &event->hw;
3565 
3566 	wrmsrl(hwc->config_base, hwc->config | SNBEP_PMON_CTL_EN);
3567 }
3568 
3569 static struct intel_uncore_ops skx_uncore_iio_ops = {
3570 	.init_box		= ivbep_uncore_msr_init_box,
3571 	.disable_box		= snbep_uncore_msr_disable_box,
3572 	.enable_box		= snbep_uncore_msr_enable_box,
3573 	.disable_event		= snbep_uncore_msr_disable_event,
3574 	.enable_event		= skx_iio_enable_event,
3575 	.read_counter		= uncore_msr_read_counter,
3576 };
3577 
3578 static struct intel_uncore_type skx_uncore_iio = {
3579 	.name			= "iio",
3580 	.num_counters		= 4,
3581 	.num_boxes		= 6,
3582 	.perf_ctr_bits		= 48,
3583 	.event_ctl		= SKX_IIO0_MSR_PMON_CTL0,
3584 	.perf_ctr		= SKX_IIO0_MSR_PMON_CTR0,
3585 	.event_mask		= SKX_IIO_PMON_RAW_EVENT_MASK,
3586 	.event_mask_ext		= SKX_IIO_PMON_RAW_EVENT_MASK_EXT,
3587 	.box_ctl		= SKX_IIO0_MSR_PMON_BOX_CTL,
3588 	.msr_offset		= SKX_IIO_MSR_OFFSET,
3589 	.constraints		= skx_uncore_iio_constraints,
3590 	.ops			= &skx_uncore_iio_ops,
3591 	.format_group		= &skx_uncore_iio_format_group,
3592 };
3593 
3594 enum perf_uncore_iio_freerunning_type_id {
3595 	SKX_IIO_MSR_IOCLK			= 0,
3596 	SKX_IIO_MSR_BW				= 1,
3597 	SKX_IIO_MSR_UTIL			= 2,
3598 
3599 	SKX_IIO_FREERUNNING_TYPE_MAX,
3600 };
3601 
3602 
3603 static struct freerunning_counters skx_iio_freerunning[] = {
3604 	[SKX_IIO_MSR_IOCLK]	= { 0xa45, 0x1, 0x20, 1, 36 },
3605 	[SKX_IIO_MSR_BW]	= { 0xb00, 0x1, 0x10, 8, 36 },
3606 	[SKX_IIO_MSR_UTIL]	= { 0xb08, 0x1, 0x10, 8, 36 },
3607 };
3608 
3609 static struct uncore_event_desc skx_uncore_iio_freerunning_events[] = {
3610 	/* Free-Running IO CLOCKS Counter */
3611 	INTEL_UNCORE_EVENT_DESC(ioclk,			"event=0xff,umask=0x10"),
3612 	/* Free-Running IIO BANDWIDTH Counters */
3613 	INTEL_UNCORE_EVENT_DESC(bw_in_port0,		"event=0xff,umask=0x20"),
3614 	INTEL_UNCORE_EVENT_DESC(bw_in_port0.scale,	"3.814697266e-6"),
3615 	INTEL_UNCORE_EVENT_DESC(bw_in_port0.unit,	"MiB"),
3616 	INTEL_UNCORE_EVENT_DESC(bw_in_port1,		"event=0xff,umask=0x21"),
3617 	INTEL_UNCORE_EVENT_DESC(bw_in_port1.scale,	"3.814697266e-6"),
3618 	INTEL_UNCORE_EVENT_DESC(bw_in_port1.unit,	"MiB"),
3619 	INTEL_UNCORE_EVENT_DESC(bw_in_port2,		"event=0xff,umask=0x22"),
3620 	INTEL_UNCORE_EVENT_DESC(bw_in_port2.scale,	"3.814697266e-6"),
3621 	INTEL_UNCORE_EVENT_DESC(bw_in_port2.unit,	"MiB"),
3622 	INTEL_UNCORE_EVENT_DESC(bw_in_port3,		"event=0xff,umask=0x23"),
3623 	INTEL_UNCORE_EVENT_DESC(bw_in_port3.scale,	"3.814697266e-6"),
3624 	INTEL_UNCORE_EVENT_DESC(bw_in_port3.unit,	"MiB"),
3625 	INTEL_UNCORE_EVENT_DESC(bw_out_port0,		"event=0xff,umask=0x24"),
3626 	INTEL_UNCORE_EVENT_DESC(bw_out_port0.scale,	"3.814697266e-6"),
3627 	INTEL_UNCORE_EVENT_DESC(bw_out_port0.unit,	"MiB"),
3628 	INTEL_UNCORE_EVENT_DESC(bw_out_port1,		"event=0xff,umask=0x25"),
3629 	INTEL_UNCORE_EVENT_DESC(bw_out_port1.scale,	"3.814697266e-6"),
3630 	INTEL_UNCORE_EVENT_DESC(bw_out_port1.unit,	"MiB"),
3631 	INTEL_UNCORE_EVENT_DESC(bw_out_port2,		"event=0xff,umask=0x26"),
3632 	INTEL_UNCORE_EVENT_DESC(bw_out_port2.scale,	"3.814697266e-6"),
3633 	INTEL_UNCORE_EVENT_DESC(bw_out_port2.unit,	"MiB"),
3634 	INTEL_UNCORE_EVENT_DESC(bw_out_port3,		"event=0xff,umask=0x27"),
3635 	INTEL_UNCORE_EVENT_DESC(bw_out_port3.scale,	"3.814697266e-6"),
3636 	INTEL_UNCORE_EVENT_DESC(bw_out_port3.unit,	"MiB"),
3637 	/* Free-running IIO UTILIZATION Counters */
3638 	INTEL_UNCORE_EVENT_DESC(util_in_port0,		"event=0xff,umask=0x30"),
3639 	INTEL_UNCORE_EVENT_DESC(util_out_port0,		"event=0xff,umask=0x31"),
3640 	INTEL_UNCORE_EVENT_DESC(util_in_port1,		"event=0xff,umask=0x32"),
3641 	INTEL_UNCORE_EVENT_DESC(util_out_port1,		"event=0xff,umask=0x33"),
3642 	INTEL_UNCORE_EVENT_DESC(util_in_port2,		"event=0xff,umask=0x34"),
3643 	INTEL_UNCORE_EVENT_DESC(util_out_port2,		"event=0xff,umask=0x35"),
3644 	INTEL_UNCORE_EVENT_DESC(util_in_port3,		"event=0xff,umask=0x36"),
3645 	INTEL_UNCORE_EVENT_DESC(util_out_port3,		"event=0xff,umask=0x37"),
3646 	{ /* end: all zeroes */ },
3647 };
3648 
3649 static struct intel_uncore_ops skx_uncore_iio_freerunning_ops = {
3650 	.read_counter		= uncore_msr_read_counter,
3651 	.hw_config		= uncore_freerunning_hw_config,
3652 };
3653 
3654 static struct attribute *skx_uncore_iio_freerunning_formats_attr[] = {
3655 	&format_attr_event.attr,
3656 	&format_attr_umask.attr,
3657 	NULL,
3658 };
3659 
3660 static const struct attribute_group skx_uncore_iio_freerunning_format_group = {
3661 	.name = "format",
3662 	.attrs = skx_uncore_iio_freerunning_formats_attr,
3663 };
3664 
3665 static struct intel_uncore_type skx_uncore_iio_free_running = {
3666 	.name			= "iio_free_running",
3667 	.num_counters		= 17,
3668 	.num_boxes		= 6,
3669 	.num_freerunning_types	= SKX_IIO_FREERUNNING_TYPE_MAX,
3670 	.freerunning		= skx_iio_freerunning,
3671 	.ops			= &skx_uncore_iio_freerunning_ops,
3672 	.event_descs		= skx_uncore_iio_freerunning_events,
3673 	.format_group		= &skx_uncore_iio_freerunning_format_group,
3674 };
3675 
3676 static struct attribute *skx_uncore_formats_attr[] = {
3677 	&format_attr_event.attr,
3678 	&format_attr_umask.attr,
3679 	&format_attr_edge.attr,
3680 	&format_attr_inv.attr,
3681 	&format_attr_thresh8.attr,
3682 	NULL,
3683 };
3684 
3685 static const struct attribute_group skx_uncore_format_group = {
3686 	.name = "format",
3687 	.attrs = skx_uncore_formats_attr,
3688 };
3689 
3690 static struct intel_uncore_type skx_uncore_irp = {
3691 	.name			= "irp",
3692 	.num_counters		= 2,
3693 	.num_boxes		= 6,
3694 	.perf_ctr_bits		= 48,
3695 	.event_ctl		= SKX_IRP0_MSR_PMON_CTL0,
3696 	.perf_ctr		= SKX_IRP0_MSR_PMON_CTR0,
3697 	.event_mask		= SNBEP_PMON_RAW_EVENT_MASK,
3698 	.box_ctl		= SKX_IRP0_MSR_PMON_BOX_CTL,
3699 	.msr_offset		= SKX_IRP_MSR_OFFSET,
3700 	.ops			= &skx_uncore_iio_ops,
3701 	.format_group		= &skx_uncore_format_group,
3702 };
3703 
3704 static struct attribute *skx_uncore_pcu_formats_attr[] = {
3705 	&format_attr_event.attr,
3706 	&format_attr_umask.attr,
3707 	&format_attr_edge.attr,
3708 	&format_attr_inv.attr,
3709 	&format_attr_thresh8.attr,
3710 	&format_attr_occ_invert.attr,
3711 	&format_attr_occ_edge_det.attr,
3712 	&format_attr_filter_band0.attr,
3713 	&format_attr_filter_band1.attr,
3714 	&format_attr_filter_band2.attr,
3715 	&format_attr_filter_band3.attr,
3716 	NULL,
3717 };
3718 
3719 static struct attribute_group skx_uncore_pcu_format_group = {
3720 	.name = "format",
3721 	.attrs = skx_uncore_pcu_formats_attr,
3722 };
3723 
3724 static struct intel_uncore_ops skx_uncore_pcu_ops = {
3725 	IVBEP_UNCORE_MSR_OPS_COMMON_INIT(),
3726 	.hw_config		= hswep_pcu_hw_config,
3727 	.get_constraint		= snbep_pcu_get_constraint,
3728 	.put_constraint		= snbep_pcu_put_constraint,
3729 };
3730 
3731 static struct intel_uncore_type skx_uncore_pcu = {
3732 	.name			= "pcu",
3733 	.num_counters		= 4,
3734 	.num_boxes		= 1,
3735 	.perf_ctr_bits		= 48,
3736 	.perf_ctr		= HSWEP_PCU_MSR_PMON_CTR0,
3737 	.event_ctl		= HSWEP_PCU_MSR_PMON_CTL0,
3738 	.event_mask		= SNBEP_PCU_MSR_PMON_RAW_EVENT_MASK,
3739 	.box_ctl		= HSWEP_PCU_MSR_PMON_BOX_CTL,
3740 	.num_shared_regs	= 1,
3741 	.ops			= &skx_uncore_pcu_ops,
3742 	.format_group		= &skx_uncore_pcu_format_group,
3743 };
3744 
3745 static struct intel_uncore_type *skx_msr_uncores[] = {
3746 	&skx_uncore_ubox,
3747 	&skx_uncore_chabox,
3748 	&skx_uncore_iio,
3749 	&skx_uncore_iio_free_running,
3750 	&skx_uncore_irp,
3751 	&skx_uncore_pcu,
3752 	NULL,
3753 };
3754 
3755 /*
3756  * To determine the number of CHAs, it should read bits 27:0 in the CAPID6
3757  * register which located at Device 30, Function 3, Offset 0x9C. PCI ID 0x2083.
3758  */
3759 #define SKX_CAPID6		0x9c
3760 #define SKX_CHA_BIT_MASK	GENMASK(27, 0)
3761 
3762 static int skx_count_chabox(void)
3763 {
3764 	struct pci_dev *dev = NULL;
3765 	u32 val = 0;
3766 
3767 	dev = pci_get_device(PCI_VENDOR_ID_INTEL, 0x2083, dev);
3768 	if (!dev)
3769 		goto out;
3770 
3771 	pci_read_config_dword(dev, SKX_CAPID6, &val);
3772 	val &= SKX_CHA_BIT_MASK;
3773 out:
3774 	pci_dev_put(dev);
3775 	return hweight32(val);
3776 }
3777 
3778 void skx_uncore_cpu_init(void)
3779 {
3780 	skx_uncore_chabox.num_boxes = skx_count_chabox();
3781 	uncore_msr_uncores = skx_msr_uncores;
3782 }
3783 
3784 static struct intel_uncore_type skx_uncore_imc = {
3785 	.name		= "imc",
3786 	.num_counters   = 4,
3787 	.num_boxes	= 6,
3788 	.perf_ctr_bits	= 48,
3789 	.fixed_ctr_bits	= 48,
3790 	.fixed_ctr	= SNBEP_MC_CHy_PCI_PMON_FIXED_CTR,
3791 	.fixed_ctl	= SNBEP_MC_CHy_PCI_PMON_FIXED_CTL,
3792 	.event_descs	= hswep_uncore_imc_events,
3793 	.perf_ctr	= SNBEP_PCI_PMON_CTR0,
3794 	.event_ctl	= SNBEP_PCI_PMON_CTL0,
3795 	.event_mask	= SNBEP_PMON_RAW_EVENT_MASK,
3796 	.box_ctl	= SNBEP_PCI_PMON_BOX_CTL,
3797 	.ops		= &ivbep_uncore_pci_ops,
3798 	.format_group	= &skx_uncore_format_group,
3799 };
3800 
3801 static struct attribute *skx_upi_uncore_formats_attr[] = {
3802 	&format_attr_event.attr,
3803 	&format_attr_umask_ext.attr,
3804 	&format_attr_edge.attr,
3805 	&format_attr_inv.attr,
3806 	&format_attr_thresh8.attr,
3807 	NULL,
3808 };
3809 
3810 static const struct attribute_group skx_upi_uncore_format_group = {
3811 	.name = "format",
3812 	.attrs = skx_upi_uncore_formats_attr,
3813 };
3814 
3815 static void skx_upi_uncore_pci_init_box(struct intel_uncore_box *box)
3816 {
3817 	struct pci_dev *pdev = box->pci_dev;
3818 
3819 	__set_bit(UNCORE_BOX_FLAG_CTL_OFFS8, &box->flags);
3820 	pci_write_config_dword(pdev, SKX_UPI_PCI_PMON_BOX_CTL, IVBEP_PMON_BOX_CTL_INT);
3821 }
3822 
3823 static struct intel_uncore_ops skx_upi_uncore_pci_ops = {
3824 	.init_box	= skx_upi_uncore_pci_init_box,
3825 	.disable_box	= snbep_uncore_pci_disable_box,
3826 	.enable_box	= snbep_uncore_pci_enable_box,
3827 	.disable_event	= snbep_uncore_pci_disable_event,
3828 	.enable_event	= snbep_uncore_pci_enable_event,
3829 	.read_counter	= snbep_uncore_pci_read_counter,
3830 };
3831 
3832 static struct intel_uncore_type skx_uncore_upi = {
3833 	.name		= "upi",
3834 	.num_counters   = 4,
3835 	.num_boxes	= 3,
3836 	.perf_ctr_bits	= 48,
3837 	.perf_ctr	= SKX_UPI_PCI_PMON_CTR0,
3838 	.event_ctl	= SKX_UPI_PCI_PMON_CTL0,
3839 	.event_mask	= SNBEP_PMON_RAW_EVENT_MASK,
3840 	.event_mask_ext = SKX_UPI_CTL_UMASK_EXT,
3841 	.box_ctl	= SKX_UPI_PCI_PMON_BOX_CTL,
3842 	.ops		= &skx_upi_uncore_pci_ops,
3843 	.format_group	= &skx_upi_uncore_format_group,
3844 };
3845 
3846 static void skx_m2m_uncore_pci_init_box(struct intel_uncore_box *box)
3847 {
3848 	struct pci_dev *pdev = box->pci_dev;
3849 
3850 	__set_bit(UNCORE_BOX_FLAG_CTL_OFFS8, &box->flags);
3851 	pci_write_config_dword(pdev, SKX_M2M_PCI_PMON_BOX_CTL, IVBEP_PMON_BOX_CTL_INT);
3852 }
3853 
3854 static struct intel_uncore_ops skx_m2m_uncore_pci_ops = {
3855 	.init_box	= skx_m2m_uncore_pci_init_box,
3856 	.disable_box	= snbep_uncore_pci_disable_box,
3857 	.enable_box	= snbep_uncore_pci_enable_box,
3858 	.disable_event	= snbep_uncore_pci_disable_event,
3859 	.enable_event	= snbep_uncore_pci_enable_event,
3860 	.read_counter	= snbep_uncore_pci_read_counter,
3861 };
3862 
3863 static struct intel_uncore_type skx_uncore_m2m = {
3864 	.name		= "m2m",
3865 	.num_counters   = 4,
3866 	.num_boxes	= 2,
3867 	.perf_ctr_bits	= 48,
3868 	.perf_ctr	= SKX_M2M_PCI_PMON_CTR0,
3869 	.event_ctl	= SKX_M2M_PCI_PMON_CTL0,
3870 	.event_mask	= SNBEP_PMON_RAW_EVENT_MASK,
3871 	.box_ctl	= SKX_M2M_PCI_PMON_BOX_CTL,
3872 	.ops		= &skx_m2m_uncore_pci_ops,
3873 	.format_group	= &skx_uncore_format_group,
3874 };
3875 
3876 static struct event_constraint skx_uncore_m2pcie_constraints[] = {
3877 	UNCORE_EVENT_CONSTRAINT(0x23, 0x3),
3878 	EVENT_CONSTRAINT_END
3879 };
3880 
3881 static struct intel_uncore_type skx_uncore_m2pcie = {
3882 	.name		= "m2pcie",
3883 	.num_counters   = 4,
3884 	.num_boxes	= 4,
3885 	.perf_ctr_bits	= 48,
3886 	.constraints	= skx_uncore_m2pcie_constraints,
3887 	.perf_ctr	= SNBEP_PCI_PMON_CTR0,
3888 	.event_ctl	= SNBEP_PCI_PMON_CTL0,
3889 	.event_mask	= SNBEP_PMON_RAW_EVENT_MASK,
3890 	.box_ctl	= SNBEP_PCI_PMON_BOX_CTL,
3891 	.ops		= &ivbep_uncore_pci_ops,
3892 	.format_group	= &skx_uncore_format_group,
3893 };
3894 
3895 static struct event_constraint skx_uncore_m3upi_constraints[] = {
3896 	UNCORE_EVENT_CONSTRAINT(0x1d, 0x1),
3897 	UNCORE_EVENT_CONSTRAINT(0x1e, 0x1),
3898 	UNCORE_EVENT_CONSTRAINT(0x40, 0x7),
3899 	UNCORE_EVENT_CONSTRAINT(0x4e, 0x7),
3900 	UNCORE_EVENT_CONSTRAINT(0x4f, 0x7),
3901 	UNCORE_EVENT_CONSTRAINT(0x50, 0x7),
3902 	UNCORE_EVENT_CONSTRAINT(0x51, 0x7),
3903 	UNCORE_EVENT_CONSTRAINT(0x52, 0x7),
3904 	EVENT_CONSTRAINT_END
3905 };
3906 
3907 static struct intel_uncore_type skx_uncore_m3upi = {
3908 	.name		= "m3upi",
3909 	.num_counters   = 3,
3910 	.num_boxes	= 3,
3911 	.perf_ctr_bits	= 48,
3912 	.constraints	= skx_uncore_m3upi_constraints,
3913 	.perf_ctr	= SNBEP_PCI_PMON_CTR0,
3914 	.event_ctl	= SNBEP_PCI_PMON_CTL0,
3915 	.event_mask	= SNBEP_PMON_RAW_EVENT_MASK,
3916 	.box_ctl	= SNBEP_PCI_PMON_BOX_CTL,
3917 	.ops		= &ivbep_uncore_pci_ops,
3918 	.format_group	= &skx_uncore_format_group,
3919 };
3920 
3921 enum {
3922 	SKX_PCI_UNCORE_IMC,
3923 	SKX_PCI_UNCORE_M2M,
3924 	SKX_PCI_UNCORE_UPI,
3925 	SKX_PCI_UNCORE_M2PCIE,
3926 	SKX_PCI_UNCORE_M3UPI,
3927 };
3928 
3929 static struct intel_uncore_type *skx_pci_uncores[] = {
3930 	[SKX_PCI_UNCORE_IMC]	= &skx_uncore_imc,
3931 	[SKX_PCI_UNCORE_M2M]	= &skx_uncore_m2m,
3932 	[SKX_PCI_UNCORE_UPI]	= &skx_uncore_upi,
3933 	[SKX_PCI_UNCORE_M2PCIE]	= &skx_uncore_m2pcie,
3934 	[SKX_PCI_UNCORE_M3UPI]	= &skx_uncore_m3upi,
3935 	NULL,
3936 };
3937 
3938 static const struct pci_device_id skx_uncore_pci_ids[] = {
3939 	{ /* MC0 Channel 0 */
3940 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2042),
3941 		.driver_data = UNCORE_PCI_DEV_FULL_DATA(10, 2, SKX_PCI_UNCORE_IMC, 0),
3942 	},
3943 	{ /* MC0 Channel 1 */
3944 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2046),
3945 		.driver_data = UNCORE_PCI_DEV_FULL_DATA(10, 6, SKX_PCI_UNCORE_IMC, 1),
3946 	},
3947 	{ /* MC0 Channel 2 */
3948 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x204a),
3949 		.driver_data = UNCORE_PCI_DEV_FULL_DATA(11, 2, SKX_PCI_UNCORE_IMC, 2),
3950 	},
3951 	{ /* MC1 Channel 0 */
3952 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2042),
3953 		.driver_data = UNCORE_PCI_DEV_FULL_DATA(12, 2, SKX_PCI_UNCORE_IMC, 3),
3954 	},
3955 	{ /* MC1 Channel 1 */
3956 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2046),
3957 		.driver_data = UNCORE_PCI_DEV_FULL_DATA(12, 6, SKX_PCI_UNCORE_IMC, 4),
3958 	},
3959 	{ /* MC1 Channel 2 */
3960 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x204a),
3961 		.driver_data = UNCORE_PCI_DEV_FULL_DATA(13, 2, SKX_PCI_UNCORE_IMC, 5),
3962 	},
3963 	{ /* M2M0 */
3964 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2066),
3965 		.driver_data = UNCORE_PCI_DEV_FULL_DATA(8, 0, SKX_PCI_UNCORE_M2M, 0),
3966 	},
3967 	{ /* M2M1 */
3968 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2066),
3969 		.driver_data = UNCORE_PCI_DEV_FULL_DATA(9, 0, SKX_PCI_UNCORE_M2M, 1),
3970 	},
3971 	{ /* UPI0 Link 0 */
3972 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2058),
3973 		.driver_data = UNCORE_PCI_DEV_FULL_DATA(14, 0, SKX_PCI_UNCORE_UPI, 0),
3974 	},
3975 	{ /* UPI0 Link 1 */
3976 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2058),
3977 		.driver_data = UNCORE_PCI_DEV_FULL_DATA(15, 0, SKX_PCI_UNCORE_UPI, 1),
3978 	},
3979 	{ /* UPI1 Link 2 */
3980 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2058),
3981 		.driver_data = UNCORE_PCI_DEV_FULL_DATA(16, 0, SKX_PCI_UNCORE_UPI, 2),
3982 	},
3983 	{ /* M2PCIe 0 */
3984 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2088),
3985 		.driver_data = UNCORE_PCI_DEV_FULL_DATA(21, 1, SKX_PCI_UNCORE_M2PCIE, 0),
3986 	},
3987 	{ /* M2PCIe 1 */
3988 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2088),
3989 		.driver_data = UNCORE_PCI_DEV_FULL_DATA(22, 1, SKX_PCI_UNCORE_M2PCIE, 1),
3990 	},
3991 	{ /* M2PCIe 2 */
3992 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2088),
3993 		.driver_data = UNCORE_PCI_DEV_FULL_DATA(23, 1, SKX_PCI_UNCORE_M2PCIE, 2),
3994 	},
3995 	{ /* M2PCIe 3 */
3996 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2088),
3997 		.driver_data = UNCORE_PCI_DEV_FULL_DATA(21, 5, SKX_PCI_UNCORE_M2PCIE, 3),
3998 	},
3999 	{ /* M3UPI0 Link 0 */
4000 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x204D),
4001 		.driver_data = UNCORE_PCI_DEV_FULL_DATA(18, 1, SKX_PCI_UNCORE_M3UPI, 0),
4002 	},
4003 	{ /* M3UPI0 Link 1 */
4004 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x204E),
4005 		.driver_data = UNCORE_PCI_DEV_FULL_DATA(18, 2, SKX_PCI_UNCORE_M3UPI, 1),
4006 	},
4007 	{ /* M3UPI1 Link 2 */
4008 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x204D),
4009 		.driver_data = UNCORE_PCI_DEV_FULL_DATA(18, 5, SKX_PCI_UNCORE_M3UPI, 2),
4010 	},
4011 	{ /* end: all zeroes */ }
4012 };
4013 
4014 
4015 static struct pci_driver skx_uncore_pci_driver = {
4016 	.name		= "skx_uncore",
4017 	.id_table	= skx_uncore_pci_ids,
4018 };
4019 
4020 int skx_uncore_pci_init(void)
4021 {
4022 	/* need to double check pci address */
4023 	int ret = snbep_pci2phy_map_init(0x2014, SKX_CPUNODEID, SKX_GIDNIDMAP, false);
4024 
4025 	if (ret)
4026 		return ret;
4027 
4028 	uncore_pci_uncores = skx_pci_uncores;
4029 	uncore_pci_driver = &skx_uncore_pci_driver;
4030 	return 0;
4031 }
4032 
4033 /* end of SKX uncore support */
4034 
4035 /* SNR uncore support */
4036 
4037 static struct intel_uncore_type snr_uncore_ubox = {
4038 	.name			= "ubox",
4039 	.num_counters		= 2,
4040 	.num_boxes		= 1,
4041 	.perf_ctr_bits		= 48,
4042 	.fixed_ctr_bits		= 48,
4043 	.perf_ctr		= SNR_U_MSR_PMON_CTR0,
4044 	.event_ctl		= SNR_U_MSR_PMON_CTL0,
4045 	.event_mask		= SNBEP_PMON_RAW_EVENT_MASK,
4046 	.fixed_ctr		= SNR_U_MSR_PMON_UCLK_FIXED_CTR,
4047 	.fixed_ctl		= SNR_U_MSR_PMON_UCLK_FIXED_CTL,
4048 	.ops			= &ivbep_uncore_msr_ops,
4049 	.format_group		= &ivbep_uncore_format_group,
4050 };
4051 
4052 static struct attribute *snr_uncore_cha_formats_attr[] = {
4053 	&format_attr_event.attr,
4054 	&format_attr_umask_ext2.attr,
4055 	&format_attr_edge.attr,
4056 	&format_attr_tid_en.attr,
4057 	&format_attr_inv.attr,
4058 	&format_attr_thresh8.attr,
4059 	&format_attr_filter_tid5.attr,
4060 	NULL,
4061 };
4062 static const struct attribute_group snr_uncore_chabox_format_group = {
4063 	.name = "format",
4064 	.attrs = snr_uncore_cha_formats_attr,
4065 };
4066 
4067 static int snr_cha_hw_config(struct intel_uncore_box *box, struct perf_event *event)
4068 {
4069 	struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
4070 
4071 	reg1->reg = SNR_C0_MSR_PMON_BOX_FILTER0 +
4072 		    box->pmu->type->msr_offset * box->pmu->pmu_idx;
4073 	reg1->config = event->attr.config1 & SKX_CHA_MSR_PMON_BOX_FILTER_TID;
4074 	reg1->idx = 0;
4075 
4076 	return 0;
4077 }
4078 
4079 static void snr_cha_enable_event(struct intel_uncore_box *box,
4080 				   struct perf_event *event)
4081 {
4082 	struct hw_perf_event *hwc = &event->hw;
4083 	struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
4084 
4085 	if (reg1->idx != EXTRA_REG_NONE)
4086 		wrmsrl(reg1->reg, reg1->config);
4087 
4088 	wrmsrl(hwc->config_base, hwc->config | SNBEP_PMON_CTL_EN);
4089 }
4090 
4091 static struct intel_uncore_ops snr_uncore_chabox_ops = {
4092 	.init_box		= ivbep_uncore_msr_init_box,
4093 	.disable_box		= snbep_uncore_msr_disable_box,
4094 	.enable_box		= snbep_uncore_msr_enable_box,
4095 	.disable_event		= snbep_uncore_msr_disable_event,
4096 	.enable_event		= snr_cha_enable_event,
4097 	.read_counter		= uncore_msr_read_counter,
4098 	.hw_config		= snr_cha_hw_config,
4099 };
4100 
4101 static struct intel_uncore_type snr_uncore_chabox = {
4102 	.name			= "cha",
4103 	.num_counters		= 4,
4104 	.num_boxes		= 6,
4105 	.perf_ctr_bits		= 48,
4106 	.event_ctl		= SNR_CHA_MSR_PMON_CTL0,
4107 	.perf_ctr		= SNR_CHA_MSR_PMON_CTR0,
4108 	.box_ctl		= SNR_CHA_MSR_PMON_BOX_CTL,
4109 	.msr_offset		= HSWEP_CBO_MSR_OFFSET,
4110 	.event_mask		= HSWEP_S_MSR_PMON_RAW_EVENT_MASK,
4111 	.event_mask_ext		= SNR_CHA_RAW_EVENT_MASK_EXT,
4112 	.ops			= &snr_uncore_chabox_ops,
4113 	.format_group		= &snr_uncore_chabox_format_group,
4114 };
4115 
4116 static struct attribute *snr_uncore_iio_formats_attr[] = {
4117 	&format_attr_event.attr,
4118 	&format_attr_umask.attr,
4119 	&format_attr_edge.attr,
4120 	&format_attr_inv.attr,
4121 	&format_attr_thresh9.attr,
4122 	&format_attr_ch_mask2.attr,
4123 	&format_attr_fc_mask2.attr,
4124 	NULL,
4125 };
4126 
4127 static const struct attribute_group snr_uncore_iio_format_group = {
4128 	.name = "format",
4129 	.attrs = snr_uncore_iio_formats_attr,
4130 };
4131 
4132 static struct intel_uncore_type snr_uncore_iio = {
4133 	.name			= "iio",
4134 	.num_counters		= 4,
4135 	.num_boxes		= 5,
4136 	.perf_ctr_bits		= 48,
4137 	.event_ctl		= SNR_IIO_MSR_PMON_CTL0,
4138 	.perf_ctr		= SNR_IIO_MSR_PMON_CTR0,
4139 	.event_mask		= SNBEP_PMON_RAW_EVENT_MASK,
4140 	.event_mask_ext		= SNR_IIO_PMON_RAW_EVENT_MASK_EXT,
4141 	.box_ctl		= SNR_IIO_MSR_PMON_BOX_CTL,
4142 	.msr_offset		= SNR_IIO_MSR_OFFSET,
4143 	.ops			= &ivbep_uncore_msr_ops,
4144 	.format_group		= &snr_uncore_iio_format_group,
4145 };
4146 
4147 static struct intel_uncore_type snr_uncore_irp = {
4148 	.name			= "irp",
4149 	.num_counters		= 2,
4150 	.num_boxes		= 5,
4151 	.perf_ctr_bits		= 48,
4152 	.event_ctl		= SNR_IRP0_MSR_PMON_CTL0,
4153 	.perf_ctr		= SNR_IRP0_MSR_PMON_CTR0,
4154 	.event_mask		= SNBEP_PMON_RAW_EVENT_MASK,
4155 	.box_ctl		= SNR_IRP0_MSR_PMON_BOX_CTL,
4156 	.msr_offset		= SNR_IRP_MSR_OFFSET,
4157 	.ops			= &ivbep_uncore_msr_ops,
4158 	.format_group		= &ivbep_uncore_format_group,
4159 };
4160 
4161 static struct intel_uncore_type snr_uncore_m2pcie = {
4162 	.name		= "m2pcie",
4163 	.num_counters	= 4,
4164 	.num_boxes	= 5,
4165 	.perf_ctr_bits	= 48,
4166 	.event_ctl	= SNR_M2PCIE_MSR_PMON_CTL0,
4167 	.perf_ctr	= SNR_M2PCIE_MSR_PMON_CTR0,
4168 	.box_ctl	= SNR_M2PCIE_MSR_PMON_BOX_CTL,
4169 	.msr_offset	= SNR_M2PCIE_MSR_OFFSET,
4170 	.event_mask	= SNBEP_PMON_RAW_EVENT_MASK,
4171 	.ops		= &ivbep_uncore_msr_ops,
4172 	.format_group	= &ivbep_uncore_format_group,
4173 };
4174 
4175 static int snr_pcu_hw_config(struct intel_uncore_box *box, struct perf_event *event)
4176 {
4177 	struct hw_perf_event *hwc = &event->hw;
4178 	struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
4179 	int ev_sel = hwc->config & SNBEP_PMON_CTL_EV_SEL_MASK;
4180 
4181 	if (ev_sel >= 0xb && ev_sel <= 0xe) {
4182 		reg1->reg = SNR_PCU_MSR_PMON_BOX_FILTER;
4183 		reg1->idx = ev_sel - 0xb;
4184 		reg1->config = event->attr.config1 & (0xff << reg1->idx);
4185 	}
4186 	return 0;
4187 }
4188 
4189 static struct intel_uncore_ops snr_uncore_pcu_ops = {
4190 	IVBEP_UNCORE_MSR_OPS_COMMON_INIT(),
4191 	.hw_config		= snr_pcu_hw_config,
4192 	.get_constraint		= snbep_pcu_get_constraint,
4193 	.put_constraint		= snbep_pcu_put_constraint,
4194 };
4195 
4196 static struct intel_uncore_type snr_uncore_pcu = {
4197 	.name			= "pcu",
4198 	.num_counters		= 4,
4199 	.num_boxes		= 1,
4200 	.perf_ctr_bits		= 48,
4201 	.perf_ctr		= SNR_PCU_MSR_PMON_CTR0,
4202 	.event_ctl		= SNR_PCU_MSR_PMON_CTL0,
4203 	.event_mask		= SNBEP_PMON_RAW_EVENT_MASK,
4204 	.box_ctl		= SNR_PCU_MSR_PMON_BOX_CTL,
4205 	.num_shared_regs	= 1,
4206 	.ops			= &snr_uncore_pcu_ops,
4207 	.format_group		= &skx_uncore_pcu_format_group,
4208 };
4209 
4210 enum perf_uncore_snr_iio_freerunning_type_id {
4211 	SNR_IIO_MSR_IOCLK,
4212 	SNR_IIO_MSR_BW_IN,
4213 
4214 	SNR_IIO_FREERUNNING_TYPE_MAX,
4215 };
4216 
4217 static struct freerunning_counters snr_iio_freerunning[] = {
4218 	[SNR_IIO_MSR_IOCLK]	= { 0x1eac, 0x1, 0x10, 1, 48 },
4219 	[SNR_IIO_MSR_BW_IN]	= { 0x1f00, 0x1, 0x10, 8, 48 },
4220 };
4221 
4222 static struct uncore_event_desc snr_uncore_iio_freerunning_events[] = {
4223 	/* Free-Running IIO CLOCKS Counter */
4224 	INTEL_UNCORE_EVENT_DESC(ioclk,			"event=0xff,umask=0x10"),
4225 	/* Free-Running IIO BANDWIDTH IN Counters */
4226 	INTEL_UNCORE_EVENT_DESC(bw_in_port0,		"event=0xff,umask=0x20"),
4227 	INTEL_UNCORE_EVENT_DESC(bw_in_port0.scale,	"3.814697266e-6"),
4228 	INTEL_UNCORE_EVENT_DESC(bw_in_port0.unit,	"MiB"),
4229 	INTEL_UNCORE_EVENT_DESC(bw_in_port1,		"event=0xff,umask=0x21"),
4230 	INTEL_UNCORE_EVENT_DESC(bw_in_port1.scale,	"3.814697266e-6"),
4231 	INTEL_UNCORE_EVENT_DESC(bw_in_port1.unit,	"MiB"),
4232 	INTEL_UNCORE_EVENT_DESC(bw_in_port2,		"event=0xff,umask=0x22"),
4233 	INTEL_UNCORE_EVENT_DESC(bw_in_port2.scale,	"3.814697266e-6"),
4234 	INTEL_UNCORE_EVENT_DESC(bw_in_port2.unit,	"MiB"),
4235 	INTEL_UNCORE_EVENT_DESC(bw_in_port3,		"event=0xff,umask=0x23"),
4236 	INTEL_UNCORE_EVENT_DESC(bw_in_port3.scale,	"3.814697266e-6"),
4237 	INTEL_UNCORE_EVENT_DESC(bw_in_port3.unit,	"MiB"),
4238 	INTEL_UNCORE_EVENT_DESC(bw_in_port4,		"event=0xff,umask=0x24"),
4239 	INTEL_UNCORE_EVENT_DESC(bw_in_port4.scale,	"3.814697266e-6"),
4240 	INTEL_UNCORE_EVENT_DESC(bw_in_port4.unit,	"MiB"),
4241 	INTEL_UNCORE_EVENT_DESC(bw_in_port5,		"event=0xff,umask=0x25"),
4242 	INTEL_UNCORE_EVENT_DESC(bw_in_port5.scale,	"3.814697266e-6"),
4243 	INTEL_UNCORE_EVENT_DESC(bw_in_port5.unit,	"MiB"),
4244 	INTEL_UNCORE_EVENT_DESC(bw_in_port6,		"event=0xff,umask=0x26"),
4245 	INTEL_UNCORE_EVENT_DESC(bw_in_port6.scale,	"3.814697266e-6"),
4246 	INTEL_UNCORE_EVENT_DESC(bw_in_port6.unit,	"MiB"),
4247 	INTEL_UNCORE_EVENT_DESC(bw_in_port7,		"event=0xff,umask=0x27"),
4248 	INTEL_UNCORE_EVENT_DESC(bw_in_port7.scale,	"3.814697266e-6"),
4249 	INTEL_UNCORE_EVENT_DESC(bw_in_port7.unit,	"MiB"),
4250 	{ /* end: all zeroes */ },
4251 };
4252 
4253 static struct intel_uncore_type snr_uncore_iio_free_running = {
4254 	.name			= "iio_free_running",
4255 	.num_counters		= 9,
4256 	.num_boxes		= 5,
4257 	.num_freerunning_types	= SNR_IIO_FREERUNNING_TYPE_MAX,
4258 	.freerunning		= snr_iio_freerunning,
4259 	.ops			= &skx_uncore_iio_freerunning_ops,
4260 	.event_descs		= snr_uncore_iio_freerunning_events,
4261 	.format_group		= &skx_uncore_iio_freerunning_format_group,
4262 };
4263 
4264 static struct intel_uncore_type *snr_msr_uncores[] = {
4265 	&snr_uncore_ubox,
4266 	&snr_uncore_chabox,
4267 	&snr_uncore_iio,
4268 	&snr_uncore_irp,
4269 	&snr_uncore_m2pcie,
4270 	&snr_uncore_pcu,
4271 	&snr_uncore_iio_free_running,
4272 	NULL,
4273 };
4274 
4275 void snr_uncore_cpu_init(void)
4276 {
4277 	uncore_msr_uncores = snr_msr_uncores;
4278 }
4279 
4280 static void snr_m2m_uncore_pci_init_box(struct intel_uncore_box *box)
4281 {
4282 	struct pci_dev *pdev = box->pci_dev;
4283 	int box_ctl = uncore_pci_box_ctl(box);
4284 
4285 	__set_bit(UNCORE_BOX_FLAG_CTL_OFFS8, &box->flags);
4286 	pci_write_config_dword(pdev, box_ctl, IVBEP_PMON_BOX_CTL_INT);
4287 }
4288 
4289 static struct intel_uncore_ops snr_m2m_uncore_pci_ops = {
4290 	.init_box	= snr_m2m_uncore_pci_init_box,
4291 	.disable_box	= snbep_uncore_pci_disable_box,
4292 	.enable_box	= snbep_uncore_pci_enable_box,
4293 	.disable_event	= snbep_uncore_pci_disable_event,
4294 	.enable_event	= snbep_uncore_pci_enable_event,
4295 	.read_counter	= snbep_uncore_pci_read_counter,
4296 };
4297 
4298 static struct attribute *snr_m2m_uncore_formats_attr[] = {
4299 	&format_attr_event.attr,
4300 	&format_attr_umask_ext3.attr,
4301 	&format_attr_edge.attr,
4302 	&format_attr_inv.attr,
4303 	&format_attr_thresh8.attr,
4304 	NULL,
4305 };
4306 
4307 static const struct attribute_group snr_m2m_uncore_format_group = {
4308 	.name = "format",
4309 	.attrs = snr_m2m_uncore_formats_attr,
4310 };
4311 
4312 static struct intel_uncore_type snr_uncore_m2m = {
4313 	.name		= "m2m",
4314 	.num_counters   = 4,
4315 	.num_boxes	= 1,
4316 	.perf_ctr_bits	= 48,
4317 	.perf_ctr	= SNR_M2M_PCI_PMON_CTR0,
4318 	.event_ctl	= SNR_M2M_PCI_PMON_CTL0,
4319 	.event_mask	= SNBEP_PMON_RAW_EVENT_MASK,
4320 	.event_mask_ext	= SNR_M2M_PCI_PMON_UMASK_EXT,
4321 	.box_ctl	= SNR_M2M_PCI_PMON_BOX_CTL,
4322 	.ops		= &snr_m2m_uncore_pci_ops,
4323 	.format_group	= &snr_m2m_uncore_format_group,
4324 };
4325 
4326 enum {
4327 	SNR_PCI_UNCORE_M2M,
4328 };
4329 
4330 static struct intel_uncore_type *snr_pci_uncores[] = {
4331 	[SNR_PCI_UNCORE_M2M]		= &snr_uncore_m2m,
4332 	NULL,
4333 };
4334 
4335 static const struct pci_device_id snr_uncore_pci_ids[] = {
4336 	{ /* M2M */
4337 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x344a),
4338 		.driver_data = UNCORE_PCI_DEV_FULL_DATA(12, 0, SNR_PCI_UNCORE_M2M, 0),
4339 	},
4340 	{ /* end: all zeroes */ }
4341 };
4342 
4343 static struct pci_driver snr_uncore_pci_driver = {
4344 	.name		= "snr_uncore",
4345 	.id_table	= snr_uncore_pci_ids,
4346 };
4347 
4348 int snr_uncore_pci_init(void)
4349 {
4350 	/* SNR UBOX DID */
4351 	int ret = snbep_pci2phy_map_init(0x3460, SKX_CPUNODEID,
4352 					 SKX_GIDNIDMAP, true);
4353 
4354 	if (ret)
4355 		return ret;
4356 
4357 	uncore_pci_uncores = snr_pci_uncores;
4358 	uncore_pci_driver = &snr_uncore_pci_driver;
4359 	return 0;
4360 }
4361 
4362 static struct pci_dev *snr_uncore_get_mc_dev(int id)
4363 {
4364 	struct pci_dev *mc_dev = NULL;
4365 	int phys_id, pkg;
4366 
4367 	while (1) {
4368 		mc_dev = pci_get_device(PCI_VENDOR_ID_INTEL, 0x3451, mc_dev);
4369 		if (!mc_dev)
4370 			break;
4371 		phys_id = uncore_pcibus_to_physid(mc_dev->bus);
4372 		if (phys_id < 0)
4373 			continue;
4374 		pkg = topology_phys_to_logical_pkg(phys_id);
4375 		if (pkg < 0)
4376 			continue;
4377 		else if (pkg == id)
4378 			break;
4379 	}
4380 	return mc_dev;
4381 }
4382 
4383 static void snr_uncore_mmio_init_box(struct intel_uncore_box *box)
4384 {
4385 	struct pci_dev *pdev = snr_uncore_get_mc_dev(box->dieid);
4386 	unsigned int box_ctl = uncore_mmio_box_ctl(box);
4387 	resource_size_t addr;
4388 	u32 pci_dword;
4389 
4390 	if (!pdev)
4391 		return;
4392 
4393 	pci_read_config_dword(pdev, SNR_IMC_MMIO_BASE_OFFSET, &pci_dword);
4394 	addr = (pci_dword & SNR_IMC_MMIO_BASE_MASK) << 23;
4395 
4396 	pci_read_config_dword(pdev, SNR_IMC_MMIO_MEM0_OFFSET, &pci_dword);
4397 	addr |= (pci_dword & SNR_IMC_MMIO_MEM0_MASK) << 12;
4398 
4399 	addr += box_ctl;
4400 
4401 	box->io_addr = ioremap(addr, SNR_IMC_MMIO_SIZE);
4402 	if (!box->io_addr)
4403 		return;
4404 
4405 	writel(IVBEP_PMON_BOX_CTL_INT, box->io_addr);
4406 }
4407 
4408 static void snr_uncore_mmio_disable_box(struct intel_uncore_box *box)
4409 {
4410 	u32 config;
4411 
4412 	if (!box->io_addr)
4413 		return;
4414 
4415 	config = readl(box->io_addr);
4416 	config |= SNBEP_PMON_BOX_CTL_FRZ;
4417 	writel(config, box->io_addr);
4418 }
4419 
4420 static void snr_uncore_mmio_enable_box(struct intel_uncore_box *box)
4421 {
4422 	u32 config;
4423 
4424 	if (!box->io_addr)
4425 		return;
4426 
4427 	config = readl(box->io_addr);
4428 	config &= ~SNBEP_PMON_BOX_CTL_FRZ;
4429 	writel(config, box->io_addr);
4430 }
4431 
4432 static void snr_uncore_mmio_enable_event(struct intel_uncore_box *box,
4433 					   struct perf_event *event)
4434 {
4435 	struct hw_perf_event *hwc = &event->hw;
4436 
4437 	if (!box->io_addr)
4438 		return;
4439 
4440 	writel(hwc->config | SNBEP_PMON_CTL_EN,
4441 	       box->io_addr + hwc->config_base);
4442 }
4443 
4444 static void snr_uncore_mmio_disable_event(struct intel_uncore_box *box,
4445 					    struct perf_event *event)
4446 {
4447 	struct hw_perf_event *hwc = &event->hw;
4448 
4449 	if (!box->io_addr)
4450 		return;
4451 
4452 	writel(hwc->config, box->io_addr + hwc->config_base);
4453 }
4454 
4455 static struct intel_uncore_ops snr_uncore_mmio_ops = {
4456 	.init_box	= snr_uncore_mmio_init_box,
4457 	.exit_box	= uncore_mmio_exit_box,
4458 	.disable_box	= snr_uncore_mmio_disable_box,
4459 	.enable_box	= snr_uncore_mmio_enable_box,
4460 	.disable_event	= snr_uncore_mmio_disable_event,
4461 	.enable_event	= snr_uncore_mmio_enable_event,
4462 	.read_counter	= uncore_mmio_read_counter,
4463 };
4464 
4465 static struct uncore_event_desc snr_uncore_imc_events[] = {
4466 	INTEL_UNCORE_EVENT_DESC(clockticks,      "event=0x00,umask=0x00"),
4467 	INTEL_UNCORE_EVENT_DESC(cas_count_read,  "event=0x04,umask=0x0f"),
4468 	INTEL_UNCORE_EVENT_DESC(cas_count_read.scale, "6.103515625e-5"),
4469 	INTEL_UNCORE_EVENT_DESC(cas_count_read.unit, "MiB"),
4470 	INTEL_UNCORE_EVENT_DESC(cas_count_write, "event=0x04,umask=0x30"),
4471 	INTEL_UNCORE_EVENT_DESC(cas_count_write.scale, "6.103515625e-5"),
4472 	INTEL_UNCORE_EVENT_DESC(cas_count_write.unit, "MiB"),
4473 	{ /* end: all zeroes */ },
4474 };
4475 
4476 static struct intel_uncore_type snr_uncore_imc = {
4477 	.name		= "imc",
4478 	.num_counters   = 4,
4479 	.num_boxes	= 2,
4480 	.perf_ctr_bits	= 48,
4481 	.fixed_ctr_bits	= 48,
4482 	.fixed_ctr	= SNR_IMC_MMIO_PMON_FIXED_CTR,
4483 	.fixed_ctl	= SNR_IMC_MMIO_PMON_FIXED_CTL,
4484 	.event_descs	= snr_uncore_imc_events,
4485 	.perf_ctr	= SNR_IMC_MMIO_PMON_CTR0,
4486 	.event_ctl	= SNR_IMC_MMIO_PMON_CTL0,
4487 	.event_mask	= SNBEP_PMON_RAW_EVENT_MASK,
4488 	.box_ctl	= SNR_IMC_MMIO_PMON_BOX_CTL,
4489 	.mmio_offset	= SNR_IMC_MMIO_OFFSET,
4490 	.ops		= &snr_uncore_mmio_ops,
4491 	.format_group	= &skx_uncore_format_group,
4492 };
4493 
4494 enum perf_uncore_snr_imc_freerunning_type_id {
4495 	SNR_IMC_DCLK,
4496 	SNR_IMC_DDR,
4497 
4498 	SNR_IMC_FREERUNNING_TYPE_MAX,
4499 };
4500 
4501 static struct freerunning_counters snr_imc_freerunning[] = {
4502 	[SNR_IMC_DCLK]	= { 0x22b0, 0x0, 0, 1, 48 },
4503 	[SNR_IMC_DDR]	= { 0x2290, 0x8, 0, 2, 48 },
4504 };
4505 
4506 static struct uncore_event_desc snr_uncore_imc_freerunning_events[] = {
4507 	INTEL_UNCORE_EVENT_DESC(dclk,		"event=0xff,umask=0x10"),
4508 
4509 	INTEL_UNCORE_EVENT_DESC(read,		"event=0xff,umask=0x20"),
4510 	INTEL_UNCORE_EVENT_DESC(read.scale,	"3.814697266e-6"),
4511 	INTEL_UNCORE_EVENT_DESC(read.unit,	"MiB"),
4512 	INTEL_UNCORE_EVENT_DESC(write,		"event=0xff,umask=0x21"),
4513 	INTEL_UNCORE_EVENT_DESC(write.scale,	"3.814697266e-6"),
4514 	INTEL_UNCORE_EVENT_DESC(write.unit,	"MiB"),
4515 	{ /* end: all zeroes */ },
4516 };
4517 
4518 static struct intel_uncore_ops snr_uncore_imc_freerunning_ops = {
4519 	.init_box	= snr_uncore_mmio_init_box,
4520 	.exit_box	= uncore_mmio_exit_box,
4521 	.read_counter	= uncore_mmio_read_counter,
4522 	.hw_config	= uncore_freerunning_hw_config,
4523 };
4524 
4525 static struct intel_uncore_type snr_uncore_imc_free_running = {
4526 	.name			= "imc_free_running",
4527 	.num_counters		= 3,
4528 	.num_boxes		= 1,
4529 	.num_freerunning_types	= SNR_IMC_FREERUNNING_TYPE_MAX,
4530 	.freerunning		= snr_imc_freerunning,
4531 	.ops			= &snr_uncore_imc_freerunning_ops,
4532 	.event_descs		= snr_uncore_imc_freerunning_events,
4533 	.format_group		= &skx_uncore_iio_freerunning_format_group,
4534 };
4535 
4536 static struct intel_uncore_type *snr_mmio_uncores[] = {
4537 	&snr_uncore_imc,
4538 	&snr_uncore_imc_free_running,
4539 	NULL,
4540 };
4541 
4542 void snr_uncore_mmio_init(void)
4543 {
4544 	uncore_mmio_uncores = snr_mmio_uncores;
4545 }
4546 
4547 /* end of SNR uncore support */
4548