1 // SPDX-License-Identifier: GPL-2.0
2 /* SandyBridge-EP/IvyTown uncore support */
3 #include "uncore.h"
4 
5 /* SNB-EP pci bus to socket mapping */
6 #define SNBEP_CPUNODEID			0x40
7 #define SNBEP_GIDNIDMAP			0x54
8 
9 /* SNB-EP Box level control */
10 #define SNBEP_PMON_BOX_CTL_RST_CTRL	(1 << 0)
11 #define SNBEP_PMON_BOX_CTL_RST_CTRS	(1 << 1)
12 #define SNBEP_PMON_BOX_CTL_FRZ		(1 << 8)
13 #define SNBEP_PMON_BOX_CTL_FRZ_EN	(1 << 16)
14 #define SNBEP_PMON_BOX_CTL_INT		(SNBEP_PMON_BOX_CTL_RST_CTRL | \
15 					 SNBEP_PMON_BOX_CTL_RST_CTRS | \
16 					 SNBEP_PMON_BOX_CTL_FRZ_EN)
17 /* SNB-EP event control */
18 #define SNBEP_PMON_CTL_EV_SEL_MASK	0x000000ff
19 #define SNBEP_PMON_CTL_UMASK_MASK	0x0000ff00
20 #define SNBEP_PMON_CTL_RST		(1 << 17)
21 #define SNBEP_PMON_CTL_EDGE_DET		(1 << 18)
22 #define SNBEP_PMON_CTL_EV_SEL_EXT	(1 << 21)
23 #define SNBEP_PMON_CTL_EN		(1 << 22)
24 #define SNBEP_PMON_CTL_INVERT		(1 << 23)
25 #define SNBEP_PMON_CTL_TRESH_MASK	0xff000000
26 #define SNBEP_PMON_RAW_EVENT_MASK	(SNBEP_PMON_CTL_EV_SEL_MASK | \
27 					 SNBEP_PMON_CTL_UMASK_MASK | \
28 					 SNBEP_PMON_CTL_EDGE_DET | \
29 					 SNBEP_PMON_CTL_INVERT | \
30 					 SNBEP_PMON_CTL_TRESH_MASK)
31 
32 /* SNB-EP Ubox event control */
33 #define SNBEP_U_MSR_PMON_CTL_TRESH_MASK		0x1f000000
34 #define SNBEP_U_MSR_PMON_RAW_EVENT_MASK		\
35 				(SNBEP_PMON_CTL_EV_SEL_MASK | \
36 				 SNBEP_PMON_CTL_UMASK_MASK | \
37 				 SNBEP_PMON_CTL_EDGE_DET | \
38 				 SNBEP_PMON_CTL_INVERT | \
39 				 SNBEP_U_MSR_PMON_CTL_TRESH_MASK)
40 
41 #define SNBEP_CBO_PMON_CTL_TID_EN		(1 << 19)
42 #define SNBEP_CBO_MSR_PMON_RAW_EVENT_MASK	(SNBEP_PMON_RAW_EVENT_MASK | \
43 						 SNBEP_CBO_PMON_CTL_TID_EN)
44 
45 /* SNB-EP PCU event control */
46 #define SNBEP_PCU_MSR_PMON_CTL_OCC_SEL_MASK	0x0000c000
47 #define SNBEP_PCU_MSR_PMON_CTL_TRESH_MASK	0x1f000000
48 #define SNBEP_PCU_MSR_PMON_CTL_OCC_INVERT	(1 << 30)
49 #define SNBEP_PCU_MSR_PMON_CTL_OCC_EDGE_DET	(1 << 31)
50 #define SNBEP_PCU_MSR_PMON_RAW_EVENT_MASK	\
51 				(SNBEP_PMON_CTL_EV_SEL_MASK | \
52 				 SNBEP_PCU_MSR_PMON_CTL_OCC_SEL_MASK | \
53 				 SNBEP_PMON_CTL_EDGE_DET | \
54 				 SNBEP_PMON_CTL_INVERT | \
55 				 SNBEP_PCU_MSR_PMON_CTL_TRESH_MASK | \
56 				 SNBEP_PCU_MSR_PMON_CTL_OCC_INVERT | \
57 				 SNBEP_PCU_MSR_PMON_CTL_OCC_EDGE_DET)
58 
59 #define SNBEP_QPI_PCI_PMON_RAW_EVENT_MASK	\
60 				(SNBEP_PMON_RAW_EVENT_MASK | \
61 				 SNBEP_PMON_CTL_EV_SEL_EXT)
62 
63 /* SNB-EP pci control register */
64 #define SNBEP_PCI_PMON_BOX_CTL			0xf4
65 #define SNBEP_PCI_PMON_CTL0			0xd8
66 /* SNB-EP pci counter register */
67 #define SNBEP_PCI_PMON_CTR0			0xa0
68 
69 /* SNB-EP home agent register */
70 #define SNBEP_HA_PCI_PMON_BOX_ADDRMATCH0	0x40
71 #define SNBEP_HA_PCI_PMON_BOX_ADDRMATCH1	0x44
72 #define SNBEP_HA_PCI_PMON_BOX_OPCODEMATCH	0x48
73 /* SNB-EP memory controller register */
74 #define SNBEP_MC_CHy_PCI_PMON_FIXED_CTL		0xf0
75 #define SNBEP_MC_CHy_PCI_PMON_FIXED_CTR		0xd0
76 /* SNB-EP QPI register */
77 #define SNBEP_Q_Py_PCI_PMON_PKT_MATCH0		0x228
78 #define SNBEP_Q_Py_PCI_PMON_PKT_MATCH1		0x22c
79 #define SNBEP_Q_Py_PCI_PMON_PKT_MASK0		0x238
80 #define SNBEP_Q_Py_PCI_PMON_PKT_MASK1		0x23c
81 
82 /* SNB-EP Ubox register */
83 #define SNBEP_U_MSR_PMON_CTR0			0xc16
84 #define SNBEP_U_MSR_PMON_CTL0			0xc10
85 
86 #define SNBEP_U_MSR_PMON_UCLK_FIXED_CTL		0xc08
87 #define SNBEP_U_MSR_PMON_UCLK_FIXED_CTR		0xc09
88 
89 /* SNB-EP Cbo register */
90 #define SNBEP_C0_MSR_PMON_CTR0			0xd16
91 #define SNBEP_C0_MSR_PMON_CTL0			0xd10
92 #define SNBEP_C0_MSR_PMON_BOX_CTL		0xd04
93 #define SNBEP_C0_MSR_PMON_BOX_FILTER		0xd14
94 #define SNBEP_CBO_MSR_OFFSET			0x20
95 
96 #define SNBEP_CB0_MSR_PMON_BOX_FILTER_TID	0x1f
97 #define SNBEP_CB0_MSR_PMON_BOX_FILTER_NID	0x3fc00
98 #define SNBEP_CB0_MSR_PMON_BOX_FILTER_STATE	0x7c0000
99 #define SNBEP_CB0_MSR_PMON_BOX_FILTER_OPC	0xff800000
100 
101 #define SNBEP_CBO_EVENT_EXTRA_REG(e, m, i) {	\
102 	.event = (e),				\
103 	.msr = SNBEP_C0_MSR_PMON_BOX_FILTER,	\
104 	.config_mask = (m),			\
105 	.idx = (i)				\
106 }
107 
108 /* SNB-EP PCU register */
109 #define SNBEP_PCU_MSR_PMON_CTR0			0xc36
110 #define SNBEP_PCU_MSR_PMON_CTL0			0xc30
111 #define SNBEP_PCU_MSR_PMON_BOX_CTL		0xc24
112 #define SNBEP_PCU_MSR_PMON_BOX_FILTER		0xc34
113 #define SNBEP_PCU_MSR_PMON_BOX_FILTER_MASK	0xffffffff
114 #define SNBEP_PCU_MSR_CORE_C3_CTR		0x3fc
115 #define SNBEP_PCU_MSR_CORE_C6_CTR		0x3fd
116 
117 /* IVBEP event control */
118 #define IVBEP_PMON_BOX_CTL_INT		(SNBEP_PMON_BOX_CTL_RST_CTRL | \
119 					 SNBEP_PMON_BOX_CTL_RST_CTRS)
120 #define IVBEP_PMON_RAW_EVENT_MASK		(SNBEP_PMON_CTL_EV_SEL_MASK | \
121 					 SNBEP_PMON_CTL_UMASK_MASK | \
122 					 SNBEP_PMON_CTL_EDGE_DET | \
123 					 SNBEP_PMON_CTL_TRESH_MASK)
124 /* IVBEP Ubox */
125 #define IVBEP_U_MSR_PMON_GLOBAL_CTL		0xc00
126 #define IVBEP_U_PMON_GLOBAL_FRZ_ALL		(1 << 31)
127 #define IVBEP_U_PMON_GLOBAL_UNFRZ_ALL		(1 << 29)
128 
129 #define IVBEP_U_MSR_PMON_RAW_EVENT_MASK	\
130 				(SNBEP_PMON_CTL_EV_SEL_MASK | \
131 				 SNBEP_PMON_CTL_UMASK_MASK | \
132 				 SNBEP_PMON_CTL_EDGE_DET | \
133 				 SNBEP_U_MSR_PMON_CTL_TRESH_MASK)
134 /* IVBEP Cbo */
135 #define IVBEP_CBO_MSR_PMON_RAW_EVENT_MASK		(IVBEP_PMON_RAW_EVENT_MASK | \
136 						 SNBEP_CBO_PMON_CTL_TID_EN)
137 
138 #define IVBEP_CB0_MSR_PMON_BOX_FILTER_TID		(0x1fULL << 0)
139 #define IVBEP_CB0_MSR_PMON_BOX_FILTER_LINK	(0xfULL << 5)
140 #define IVBEP_CB0_MSR_PMON_BOX_FILTER_STATE	(0x3fULL << 17)
141 #define IVBEP_CB0_MSR_PMON_BOX_FILTER_NID		(0xffffULL << 32)
142 #define IVBEP_CB0_MSR_PMON_BOX_FILTER_OPC		(0x1ffULL << 52)
143 #define IVBEP_CB0_MSR_PMON_BOX_FILTER_C6		(0x1ULL << 61)
144 #define IVBEP_CB0_MSR_PMON_BOX_FILTER_NC		(0x1ULL << 62)
145 #define IVBEP_CB0_MSR_PMON_BOX_FILTER_ISOC	(0x1ULL << 63)
146 
147 /* IVBEP home agent */
148 #define IVBEP_HA_PCI_PMON_CTL_Q_OCC_RST		(1 << 16)
149 #define IVBEP_HA_PCI_PMON_RAW_EVENT_MASK		\
150 				(IVBEP_PMON_RAW_EVENT_MASK | \
151 				 IVBEP_HA_PCI_PMON_CTL_Q_OCC_RST)
152 /* IVBEP PCU */
153 #define IVBEP_PCU_MSR_PMON_RAW_EVENT_MASK	\
154 				(SNBEP_PMON_CTL_EV_SEL_MASK | \
155 				 SNBEP_PCU_MSR_PMON_CTL_OCC_SEL_MASK | \
156 				 SNBEP_PMON_CTL_EDGE_DET | \
157 				 SNBEP_PCU_MSR_PMON_CTL_TRESH_MASK | \
158 				 SNBEP_PCU_MSR_PMON_CTL_OCC_INVERT | \
159 				 SNBEP_PCU_MSR_PMON_CTL_OCC_EDGE_DET)
160 /* IVBEP QPI */
161 #define IVBEP_QPI_PCI_PMON_RAW_EVENT_MASK	\
162 				(IVBEP_PMON_RAW_EVENT_MASK | \
163 				 SNBEP_PMON_CTL_EV_SEL_EXT)
164 
165 #define __BITS_VALUE(x, i, n)  ((typeof(x))(((x) >> ((i) * (n))) & \
166 				((1ULL << (n)) - 1)))
167 
168 /* Haswell-EP Ubox */
169 #define HSWEP_U_MSR_PMON_CTR0			0x709
170 #define HSWEP_U_MSR_PMON_CTL0			0x705
171 #define HSWEP_U_MSR_PMON_FILTER			0x707
172 
173 #define HSWEP_U_MSR_PMON_UCLK_FIXED_CTL		0x703
174 #define HSWEP_U_MSR_PMON_UCLK_FIXED_CTR		0x704
175 
176 #define HSWEP_U_MSR_PMON_BOX_FILTER_TID		(0x1 << 0)
177 #define HSWEP_U_MSR_PMON_BOX_FILTER_CID		(0x1fULL << 1)
178 #define HSWEP_U_MSR_PMON_BOX_FILTER_MASK \
179 					(HSWEP_U_MSR_PMON_BOX_FILTER_TID | \
180 					 HSWEP_U_MSR_PMON_BOX_FILTER_CID)
181 
182 /* Haswell-EP CBo */
183 #define HSWEP_C0_MSR_PMON_CTR0			0xe08
184 #define HSWEP_C0_MSR_PMON_CTL0			0xe01
185 #define HSWEP_C0_MSR_PMON_BOX_CTL			0xe00
186 #define HSWEP_C0_MSR_PMON_BOX_FILTER0		0xe05
187 #define HSWEP_CBO_MSR_OFFSET			0x10
188 
189 
190 #define HSWEP_CB0_MSR_PMON_BOX_FILTER_TID		(0x3fULL << 0)
191 #define HSWEP_CB0_MSR_PMON_BOX_FILTER_LINK	(0xfULL << 6)
192 #define HSWEP_CB0_MSR_PMON_BOX_FILTER_STATE	(0x7fULL << 17)
193 #define HSWEP_CB0_MSR_PMON_BOX_FILTER_NID		(0xffffULL << 32)
194 #define HSWEP_CB0_MSR_PMON_BOX_FILTER_OPC		(0x1ffULL << 52)
195 #define HSWEP_CB0_MSR_PMON_BOX_FILTER_C6		(0x1ULL << 61)
196 #define HSWEP_CB0_MSR_PMON_BOX_FILTER_NC		(0x1ULL << 62)
197 #define HSWEP_CB0_MSR_PMON_BOX_FILTER_ISOC	(0x1ULL << 63)
198 
199 
200 /* Haswell-EP Sbox */
201 #define HSWEP_S0_MSR_PMON_CTR0			0x726
202 #define HSWEP_S0_MSR_PMON_CTL0			0x721
203 #define HSWEP_S0_MSR_PMON_BOX_CTL			0x720
204 #define HSWEP_SBOX_MSR_OFFSET			0xa
205 #define HSWEP_S_MSR_PMON_RAW_EVENT_MASK		(SNBEP_PMON_RAW_EVENT_MASK | \
206 						 SNBEP_CBO_PMON_CTL_TID_EN)
207 
208 /* Haswell-EP PCU */
209 #define HSWEP_PCU_MSR_PMON_CTR0			0x717
210 #define HSWEP_PCU_MSR_PMON_CTL0			0x711
211 #define HSWEP_PCU_MSR_PMON_BOX_CTL		0x710
212 #define HSWEP_PCU_MSR_PMON_BOX_FILTER		0x715
213 
214 /* KNL Ubox */
215 #define KNL_U_MSR_PMON_RAW_EVENT_MASK \
216 					(SNBEP_U_MSR_PMON_RAW_EVENT_MASK | \
217 						SNBEP_CBO_PMON_CTL_TID_EN)
218 /* KNL CHA */
219 #define KNL_CHA_MSR_OFFSET			0xc
220 #define KNL_CHA_MSR_PMON_CTL_QOR		(1 << 16)
221 #define KNL_CHA_MSR_PMON_RAW_EVENT_MASK \
222 					(SNBEP_CBO_MSR_PMON_RAW_EVENT_MASK | \
223 					 KNL_CHA_MSR_PMON_CTL_QOR)
224 #define KNL_CHA_MSR_PMON_BOX_FILTER_TID		0x1ff
225 #define KNL_CHA_MSR_PMON_BOX_FILTER_STATE	(7 << 18)
226 #define KNL_CHA_MSR_PMON_BOX_FILTER_OP		(0xfffffe2aULL << 32)
227 #define KNL_CHA_MSR_PMON_BOX_FILTER_REMOTE_NODE	(0x1ULL << 32)
228 #define KNL_CHA_MSR_PMON_BOX_FILTER_LOCAL_NODE	(0x1ULL << 33)
229 #define KNL_CHA_MSR_PMON_BOX_FILTER_NNC		(0x1ULL << 37)
230 
231 /* KNL EDC/MC UCLK */
232 #define KNL_UCLK_MSR_PMON_CTR0_LOW		0x400
233 #define KNL_UCLK_MSR_PMON_CTL0			0x420
234 #define KNL_UCLK_MSR_PMON_BOX_CTL		0x430
235 #define KNL_UCLK_MSR_PMON_UCLK_FIXED_LOW	0x44c
236 #define KNL_UCLK_MSR_PMON_UCLK_FIXED_CTL	0x454
237 #define KNL_PMON_FIXED_CTL_EN			0x1
238 
239 /* KNL EDC */
240 #define KNL_EDC0_ECLK_MSR_PMON_CTR0_LOW		0xa00
241 #define KNL_EDC0_ECLK_MSR_PMON_CTL0		0xa20
242 #define KNL_EDC0_ECLK_MSR_PMON_BOX_CTL		0xa30
243 #define KNL_EDC0_ECLK_MSR_PMON_ECLK_FIXED_LOW	0xa3c
244 #define KNL_EDC0_ECLK_MSR_PMON_ECLK_FIXED_CTL	0xa44
245 
246 /* KNL MC */
247 #define KNL_MC0_CH0_MSR_PMON_CTR0_LOW		0xb00
248 #define KNL_MC0_CH0_MSR_PMON_CTL0		0xb20
249 #define KNL_MC0_CH0_MSR_PMON_BOX_CTL		0xb30
250 #define KNL_MC0_CH0_MSR_PMON_FIXED_LOW		0xb3c
251 #define KNL_MC0_CH0_MSR_PMON_FIXED_CTL		0xb44
252 
253 /* KNL IRP */
254 #define KNL_IRP_PCI_PMON_BOX_CTL		0xf0
255 #define KNL_IRP_PCI_PMON_RAW_EVENT_MASK		(SNBEP_PMON_RAW_EVENT_MASK | \
256 						 KNL_CHA_MSR_PMON_CTL_QOR)
257 /* KNL PCU */
258 #define KNL_PCU_PMON_CTL_EV_SEL_MASK		0x0000007f
259 #define KNL_PCU_PMON_CTL_USE_OCC_CTR		(1 << 7)
260 #define KNL_PCU_MSR_PMON_CTL_TRESH_MASK		0x3f000000
261 #define KNL_PCU_MSR_PMON_RAW_EVENT_MASK	\
262 				(KNL_PCU_PMON_CTL_EV_SEL_MASK | \
263 				 KNL_PCU_PMON_CTL_USE_OCC_CTR | \
264 				 SNBEP_PCU_MSR_PMON_CTL_OCC_SEL_MASK | \
265 				 SNBEP_PMON_CTL_EDGE_DET | \
266 				 SNBEP_CBO_PMON_CTL_TID_EN | \
267 				 SNBEP_PMON_CTL_INVERT | \
268 				 KNL_PCU_MSR_PMON_CTL_TRESH_MASK | \
269 				 SNBEP_PCU_MSR_PMON_CTL_OCC_INVERT | \
270 				 SNBEP_PCU_MSR_PMON_CTL_OCC_EDGE_DET)
271 
272 /* SKX pci bus to socket mapping */
273 #define SKX_CPUNODEID			0xc0
274 #define SKX_GIDNIDMAP			0xd4
275 
276 /*
277  * The CPU_BUS_NUMBER MSR returns the values of the respective CPUBUSNO CSR
278  * that BIOS programmed. MSR has package scope.
279  * |  Bit  |  Default  |  Description
280  * | [63]  |    00h    | VALID - When set, indicates the CPU bus
281  *                       numbers have been initialized. (RO)
282  * |[62:48]|    ---    | Reserved
283  * |[47:40]|    00h    | BUS_NUM_5 - Return the bus number BIOS assigned
284  *                       CPUBUSNO(5). (RO)
285  * |[39:32]|    00h    | BUS_NUM_4 - Return the bus number BIOS assigned
286  *                       CPUBUSNO(4). (RO)
287  * |[31:24]|    00h    | BUS_NUM_3 - Return the bus number BIOS assigned
288  *                       CPUBUSNO(3). (RO)
289  * |[23:16]|    00h    | BUS_NUM_2 - Return the bus number BIOS assigned
290  *                       CPUBUSNO(2). (RO)
291  * |[15:8] |    00h    | BUS_NUM_1 - Return the bus number BIOS assigned
292  *                       CPUBUSNO(1). (RO)
293  * | [7:0] |    00h    | BUS_NUM_0 - Return the bus number BIOS assigned
294  *                       CPUBUSNO(0). (RO)
295  */
296 #define SKX_MSR_CPU_BUS_NUMBER		0x300
297 #define SKX_MSR_CPU_BUS_VALID_BIT	(1ULL << 63)
298 #define BUS_NUM_STRIDE			8
299 
300 /* SKX CHA */
301 #define SKX_CHA_MSR_PMON_BOX_FILTER_TID		(0x1ffULL << 0)
302 #define SKX_CHA_MSR_PMON_BOX_FILTER_LINK	(0xfULL << 9)
303 #define SKX_CHA_MSR_PMON_BOX_FILTER_STATE	(0x3ffULL << 17)
304 #define SKX_CHA_MSR_PMON_BOX_FILTER_REM		(0x1ULL << 32)
305 #define SKX_CHA_MSR_PMON_BOX_FILTER_LOC		(0x1ULL << 33)
306 #define SKX_CHA_MSR_PMON_BOX_FILTER_ALL_OPC	(0x1ULL << 35)
307 #define SKX_CHA_MSR_PMON_BOX_FILTER_NM		(0x1ULL << 36)
308 #define SKX_CHA_MSR_PMON_BOX_FILTER_NOT_NM	(0x1ULL << 37)
309 #define SKX_CHA_MSR_PMON_BOX_FILTER_OPC0	(0x3ffULL << 41)
310 #define SKX_CHA_MSR_PMON_BOX_FILTER_OPC1	(0x3ffULL << 51)
311 #define SKX_CHA_MSR_PMON_BOX_FILTER_C6		(0x1ULL << 61)
312 #define SKX_CHA_MSR_PMON_BOX_FILTER_NC		(0x1ULL << 62)
313 #define SKX_CHA_MSR_PMON_BOX_FILTER_ISOC	(0x1ULL << 63)
314 
315 /* SKX IIO */
316 #define SKX_IIO0_MSR_PMON_CTL0		0xa48
317 #define SKX_IIO0_MSR_PMON_CTR0		0xa41
318 #define SKX_IIO0_MSR_PMON_BOX_CTL	0xa40
319 #define SKX_IIO_MSR_OFFSET		0x20
320 
321 #define SKX_PMON_CTL_TRESH_MASK		(0xff << 24)
322 #define SKX_PMON_CTL_TRESH_MASK_EXT	(0xf)
323 #define SKX_PMON_CTL_CH_MASK		(0xff << 4)
324 #define SKX_PMON_CTL_FC_MASK		(0x7 << 12)
325 #define SKX_IIO_PMON_RAW_EVENT_MASK	(SNBEP_PMON_CTL_EV_SEL_MASK | \
326 					 SNBEP_PMON_CTL_UMASK_MASK | \
327 					 SNBEP_PMON_CTL_EDGE_DET | \
328 					 SNBEP_PMON_CTL_INVERT | \
329 					 SKX_PMON_CTL_TRESH_MASK)
330 #define SKX_IIO_PMON_RAW_EVENT_MASK_EXT	(SKX_PMON_CTL_TRESH_MASK_EXT | \
331 					 SKX_PMON_CTL_CH_MASK | \
332 					 SKX_PMON_CTL_FC_MASK)
333 
334 /* SKX IRP */
335 #define SKX_IRP0_MSR_PMON_CTL0		0xa5b
336 #define SKX_IRP0_MSR_PMON_CTR0		0xa59
337 #define SKX_IRP0_MSR_PMON_BOX_CTL	0xa58
338 #define SKX_IRP_MSR_OFFSET		0x20
339 
340 /* SKX UPI */
341 #define SKX_UPI_PCI_PMON_CTL0		0x350
342 #define SKX_UPI_PCI_PMON_CTR0		0x318
343 #define SKX_UPI_PCI_PMON_BOX_CTL	0x378
344 #define SKX_UPI_CTL_UMASK_EXT		0xffefff
345 
346 /* SKX M2M */
347 #define SKX_M2M_PCI_PMON_CTL0		0x228
348 #define SKX_M2M_PCI_PMON_CTR0		0x200
349 #define SKX_M2M_PCI_PMON_BOX_CTL	0x258
350 
351 /* SNR Ubox */
352 #define SNR_U_MSR_PMON_CTR0			0x1f98
353 #define SNR_U_MSR_PMON_CTL0			0x1f91
354 #define SNR_U_MSR_PMON_UCLK_FIXED_CTL		0x1f93
355 #define SNR_U_MSR_PMON_UCLK_FIXED_CTR		0x1f94
356 
357 /* SNR CHA */
358 #define SNR_CHA_RAW_EVENT_MASK_EXT		0x3ffffff
359 #define SNR_CHA_MSR_PMON_CTL0			0x1c01
360 #define SNR_CHA_MSR_PMON_CTR0			0x1c08
361 #define SNR_CHA_MSR_PMON_BOX_CTL		0x1c00
362 #define SNR_C0_MSR_PMON_BOX_FILTER0		0x1c05
363 
364 
365 /* SNR IIO */
366 #define SNR_IIO_MSR_PMON_CTL0			0x1e08
367 #define SNR_IIO_MSR_PMON_CTR0			0x1e01
368 #define SNR_IIO_MSR_PMON_BOX_CTL		0x1e00
369 #define SNR_IIO_MSR_OFFSET			0x10
370 #define SNR_IIO_PMON_RAW_EVENT_MASK_EXT		0x7ffff
371 
372 /* SNR IRP */
373 #define SNR_IRP0_MSR_PMON_CTL0			0x1ea8
374 #define SNR_IRP0_MSR_PMON_CTR0			0x1ea1
375 #define SNR_IRP0_MSR_PMON_BOX_CTL		0x1ea0
376 #define SNR_IRP_MSR_OFFSET			0x10
377 
378 /* SNR M2PCIE */
379 #define SNR_M2PCIE_MSR_PMON_CTL0		0x1e58
380 #define SNR_M2PCIE_MSR_PMON_CTR0		0x1e51
381 #define SNR_M2PCIE_MSR_PMON_BOX_CTL		0x1e50
382 #define SNR_M2PCIE_MSR_OFFSET			0x10
383 
384 /* SNR PCU */
385 #define SNR_PCU_MSR_PMON_CTL0			0x1ef1
386 #define SNR_PCU_MSR_PMON_CTR0			0x1ef8
387 #define SNR_PCU_MSR_PMON_BOX_CTL		0x1ef0
388 #define SNR_PCU_MSR_PMON_BOX_FILTER		0x1efc
389 
390 /* SNR M2M */
391 #define SNR_M2M_PCI_PMON_CTL0			0x468
392 #define SNR_M2M_PCI_PMON_CTR0			0x440
393 #define SNR_M2M_PCI_PMON_BOX_CTL		0x438
394 #define SNR_M2M_PCI_PMON_UMASK_EXT		0xff
395 
396 /* SNR PCIE3 */
397 #define SNR_PCIE3_PCI_PMON_CTL0			0x508
398 #define SNR_PCIE3_PCI_PMON_CTR0			0x4e8
399 #define SNR_PCIE3_PCI_PMON_BOX_CTL		0x4e0
400 
401 /* SNR IMC */
402 #define SNR_IMC_MMIO_PMON_FIXED_CTL		0x54
403 #define SNR_IMC_MMIO_PMON_FIXED_CTR		0x38
404 #define SNR_IMC_MMIO_PMON_CTL0			0x40
405 #define SNR_IMC_MMIO_PMON_CTR0			0x8
406 #define SNR_IMC_MMIO_PMON_BOX_CTL		0x22800
407 #define SNR_IMC_MMIO_OFFSET			0x4000
408 #define SNR_IMC_MMIO_SIZE			0x4000
409 #define SNR_IMC_MMIO_BASE_OFFSET		0xd0
410 #define SNR_IMC_MMIO_BASE_MASK			0x1FFFFFFF
411 #define SNR_IMC_MMIO_MEM0_OFFSET		0xd8
412 #define SNR_IMC_MMIO_MEM0_MASK			0x7FF
413 
414 /* ICX CHA */
415 #define ICX_C34_MSR_PMON_CTR0			0xb68
416 #define ICX_C34_MSR_PMON_CTL0			0xb61
417 #define ICX_C34_MSR_PMON_BOX_CTL		0xb60
418 #define ICX_C34_MSR_PMON_BOX_FILTER0		0xb65
419 
420 /* ICX IIO */
421 #define ICX_IIO_MSR_PMON_CTL0			0xa58
422 #define ICX_IIO_MSR_PMON_CTR0			0xa51
423 #define ICX_IIO_MSR_PMON_BOX_CTL		0xa50
424 
425 /* ICX IRP */
426 #define ICX_IRP0_MSR_PMON_CTL0			0xa4d
427 #define ICX_IRP0_MSR_PMON_CTR0			0xa4b
428 #define ICX_IRP0_MSR_PMON_BOX_CTL		0xa4a
429 
430 /* ICX M2PCIE */
431 #define ICX_M2PCIE_MSR_PMON_CTL0		0xa46
432 #define ICX_M2PCIE_MSR_PMON_CTR0		0xa41
433 #define ICX_M2PCIE_MSR_PMON_BOX_CTL		0xa40
434 
435 /* ICX UPI */
436 #define ICX_UPI_PCI_PMON_CTL0			0x350
437 #define ICX_UPI_PCI_PMON_CTR0			0x320
438 #define ICX_UPI_PCI_PMON_BOX_CTL		0x318
439 #define ICX_UPI_CTL_UMASK_EXT			0xffffff
440 
441 /* ICX M3UPI*/
442 #define ICX_M3UPI_PCI_PMON_CTL0			0xd8
443 #define ICX_M3UPI_PCI_PMON_CTR0			0xa8
444 #define ICX_M3UPI_PCI_PMON_BOX_CTL		0xa0
445 
446 /* ICX IMC */
447 #define ICX_NUMBER_IMC_CHN			2
448 #define ICX_IMC_MEM_STRIDE			0x4
449 
450 DEFINE_UNCORE_FORMAT_ATTR(event, event, "config:0-7");
451 DEFINE_UNCORE_FORMAT_ATTR(event2, event, "config:0-6");
452 DEFINE_UNCORE_FORMAT_ATTR(event_ext, event, "config:0-7,21");
453 DEFINE_UNCORE_FORMAT_ATTR(use_occ_ctr, use_occ_ctr, "config:7");
454 DEFINE_UNCORE_FORMAT_ATTR(umask, umask, "config:8-15");
455 DEFINE_UNCORE_FORMAT_ATTR(umask_ext, umask, "config:8-15,32-43,45-55");
456 DEFINE_UNCORE_FORMAT_ATTR(umask_ext2, umask, "config:8-15,32-57");
457 DEFINE_UNCORE_FORMAT_ATTR(umask_ext3, umask, "config:8-15,32-39");
458 DEFINE_UNCORE_FORMAT_ATTR(umask_ext4, umask, "config:8-15,32-55");
459 DEFINE_UNCORE_FORMAT_ATTR(qor, qor, "config:16");
460 DEFINE_UNCORE_FORMAT_ATTR(edge, edge, "config:18");
461 DEFINE_UNCORE_FORMAT_ATTR(tid_en, tid_en, "config:19");
462 DEFINE_UNCORE_FORMAT_ATTR(inv, inv, "config:23");
463 DEFINE_UNCORE_FORMAT_ATTR(thresh9, thresh, "config:24-35");
464 DEFINE_UNCORE_FORMAT_ATTR(thresh8, thresh, "config:24-31");
465 DEFINE_UNCORE_FORMAT_ATTR(thresh6, thresh, "config:24-29");
466 DEFINE_UNCORE_FORMAT_ATTR(thresh5, thresh, "config:24-28");
467 DEFINE_UNCORE_FORMAT_ATTR(occ_sel, occ_sel, "config:14-15");
468 DEFINE_UNCORE_FORMAT_ATTR(occ_invert, occ_invert, "config:30");
469 DEFINE_UNCORE_FORMAT_ATTR(occ_edge, occ_edge, "config:14-51");
470 DEFINE_UNCORE_FORMAT_ATTR(occ_edge_det, occ_edge_det, "config:31");
471 DEFINE_UNCORE_FORMAT_ATTR(ch_mask, ch_mask, "config:36-43");
472 DEFINE_UNCORE_FORMAT_ATTR(ch_mask2, ch_mask, "config:36-47");
473 DEFINE_UNCORE_FORMAT_ATTR(fc_mask, fc_mask, "config:44-46");
474 DEFINE_UNCORE_FORMAT_ATTR(fc_mask2, fc_mask, "config:48-50");
475 DEFINE_UNCORE_FORMAT_ATTR(filter_tid, filter_tid, "config1:0-4");
476 DEFINE_UNCORE_FORMAT_ATTR(filter_tid2, filter_tid, "config1:0");
477 DEFINE_UNCORE_FORMAT_ATTR(filter_tid3, filter_tid, "config1:0-5");
478 DEFINE_UNCORE_FORMAT_ATTR(filter_tid4, filter_tid, "config1:0-8");
479 DEFINE_UNCORE_FORMAT_ATTR(filter_tid5, filter_tid, "config1:0-9");
480 DEFINE_UNCORE_FORMAT_ATTR(filter_cid, filter_cid, "config1:5");
481 DEFINE_UNCORE_FORMAT_ATTR(filter_link, filter_link, "config1:5-8");
482 DEFINE_UNCORE_FORMAT_ATTR(filter_link2, filter_link, "config1:6-8");
483 DEFINE_UNCORE_FORMAT_ATTR(filter_link3, filter_link, "config1:12");
484 DEFINE_UNCORE_FORMAT_ATTR(filter_nid, filter_nid, "config1:10-17");
485 DEFINE_UNCORE_FORMAT_ATTR(filter_nid2, filter_nid, "config1:32-47");
486 DEFINE_UNCORE_FORMAT_ATTR(filter_state, filter_state, "config1:18-22");
487 DEFINE_UNCORE_FORMAT_ATTR(filter_state2, filter_state, "config1:17-22");
488 DEFINE_UNCORE_FORMAT_ATTR(filter_state3, filter_state, "config1:17-23");
489 DEFINE_UNCORE_FORMAT_ATTR(filter_state4, filter_state, "config1:18-20");
490 DEFINE_UNCORE_FORMAT_ATTR(filter_state5, filter_state, "config1:17-26");
491 DEFINE_UNCORE_FORMAT_ATTR(filter_rem, filter_rem, "config1:32");
492 DEFINE_UNCORE_FORMAT_ATTR(filter_loc, filter_loc, "config1:33");
493 DEFINE_UNCORE_FORMAT_ATTR(filter_nm, filter_nm, "config1:36");
494 DEFINE_UNCORE_FORMAT_ATTR(filter_not_nm, filter_not_nm, "config1:37");
495 DEFINE_UNCORE_FORMAT_ATTR(filter_local, filter_local, "config1:33");
496 DEFINE_UNCORE_FORMAT_ATTR(filter_all_op, filter_all_op, "config1:35");
497 DEFINE_UNCORE_FORMAT_ATTR(filter_nnm, filter_nnm, "config1:37");
498 DEFINE_UNCORE_FORMAT_ATTR(filter_opc, filter_opc, "config1:23-31");
499 DEFINE_UNCORE_FORMAT_ATTR(filter_opc2, filter_opc, "config1:52-60");
500 DEFINE_UNCORE_FORMAT_ATTR(filter_opc3, filter_opc, "config1:41-60");
501 DEFINE_UNCORE_FORMAT_ATTR(filter_opc_0, filter_opc0, "config1:41-50");
502 DEFINE_UNCORE_FORMAT_ATTR(filter_opc_1, filter_opc1, "config1:51-60");
503 DEFINE_UNCORE_FORMAT_ATTR(filter_nc, filter_nc, "config1:62");
504 DEFINE_UNCORE_FORMAT_ATTR(filter_c6, filter_c6, "config1:61");
505 DEFINE_UNCORE_FORMAT_ATTR(filter_isoc, filter_isoc, "config1:63");
506 DEFINE_UNCORE_FORMAT_ATTR(filter_band0, filter_band0, "config1:0-7");
507 DEFINE_UNCORE_FORMAT_ATTR(filter_band1, filter_band1, "config1:8-15");
508 DEFINE_UNCORE_FORMAT_ATTR(filter_band2, filter_band2, "config1:16-23");
509 DEFINE_UNCORE_FORMAT_ATTR(filter_band3, filter_band3, "config1:24-31");
510 DEFINE_UNCORE_FORMAT_ATTR(match_rds, match_rds, "config1:48-51");
511 DEFINE_UNCORE_FORMAT_ATTR(match_rnid30, match_rnid30, "config1:32-35");
512 DEFINE_UNCORE_FORMAT_ATTR(match_rnid4, match_rnid4, "config1:31");
513 DEFINE_UNCORE_FORMAT_ATTR(match_dnid, match_dnid, "config1:13-17");
514 DEFINE_UNCORE_FORMAT_ATTR(match_mc, match_mc, "config1:9-12");
515 DEFINE_UNCORE_FORMAT_ATTR(match_opc, match_opc, "config1:5-8");
516 DEFINE_UNCORE_FORMAT_ATTR(match_vnw, match_vnw, "config1:3-4");
517 DEFINE_UNCORE_FORMAT_ATTR(match0, match0, "config1:0-31");
518 DEFINE_UNCORE_FORMAT_ATTR(match1, match1, "config1:32-63");
519 DEFINE_UNCORE_FORMAT_ATTR(mask_rds, mask_rds, "config2:48-51");
520 DEFINE_UNCORE_FORMAT_ATTR(mask_rnid30, mask_rnid30, "config2:32-35");
521 DEFINE_UNCORE_FORMAT_ATTR(mask_rnid4, mask_rnid4, "config2:31");
522 DEFINE_UNCORE_FORMAT_ATTR(mask_dnid, mask_dnid, "config2:13-17");
523 DEFINE_UNCORE_FORMAT_ATTR(mask_mc, mask_mc, "config2:9-12");
524 DEFINE_UNCORE_FORMAT_ATTR(mask_opc, mask_opc, "config2:5-8");
525 DEFINE_UNCORE_FORMAT_ATTR(mask_vnw, mask_vnw, "config2:3-4");
526 DEFINE_UNCORE_FORMAT_ATTR(mask0, mask0, "config2:0-31");
527 DEFINE_UNCORE_FORMAT_ATTR(mask1, mask1, "config2:32-63");
528 
529 static void snbep_uncore_pci_disable_box(struct intel_uncore_box *box)
530 {
531 	struct pci_dev *pdev = box->pci_dev;
532 	int box_ctl = uncore_pci_box_ctl(box);
533 	u32 config = 0;
534 
535 	if (!pci_read_config_dword(pdev, box_ctl, &config)) {
536 		config |= SNBEP_PMON_BOX_CTL_FRZ;
537 		pci_write_config_dword(pdev, box_ctl, config);
538 	}
539 }
540 
541 static void snbep_uncore_pci_enable_box(struct intel_uncore_box *box)
542 {
543 	struct pci_dev *pdev = box->pci_dev;
544 	int box_ctl = uncore_pci_box_ctl(box);
545 	u32 config = 0;
546 
547 	if (!pci_read_config_dword(pdev, box_ctl, &config)) {
548 		config &= ~SNBEP_PMON_BOX_CTL_FRZ;
549 		pci_write_config_dword(pdev, box_ctl, config);
550 	}
551 }
552 
553 static void snbep_uncore_pci_enable_event(struct intel_uncore_box *box, struct perf_event *event)
554 {
555 	struct pci_dev *pdev = box->pci_dev;
556 	struct hw_perf_event *hwc = &event->hw;
557 
558 	pci_write_config_dword(pdev, hwc->config_base, hwc->config | SNBEP_PMON_CTL_EN);
559 }
560 
561 static void snbep_uncore_pci_disable_event(struct intel_uncore_box *box, struct perf_event *event)
562 {
563 	struct pci_dev *pdev = box->pci_dev;
564 	struct hw_perf_event *hwc = &event->hw;
565 
566 	pci_write_config_dword(pdev, hwc->config_base, hwc->config);
567 }
568 
569 static u64 snbep_uncore_pci_read_counter(struct intel_uncore_box *box, struct perf_event *event)
570 {
571 	struct pci_dev *pdev = box->pci_dev;
572 	struct hw_perf_event *hwc = &event->hw;
573 	u64 count = 0;
574 
575 	pci_read_config_dword(pdev, hwc->event_base, (u32 *)&count);
576 	pci_read_config_dword(pdev, hwc->event_base + 4, (u32 *)&count + 1);
577 
578 	return count;
579 }
580 
581 static void snbep_uncore_pci_init_box(struct intel_uncore_box *box)
582 {
583 	struct pci_dev *pdev = box->pci_dev;
584 	int box_ctl = uncore_pci_box_ctl(box);
585 
586 	pci_write_config_dword(pdev, box_ctl, SNBEP_PMON_BOX_CTL_INT);
587 }
588 
589 static void snbep_uncore_msr_disable_box(struct intel_uncore_box *box)
590 {
591 	u64 config;
592 	unsigned msr;
593 
594 	msr = uncore_msr_box_ctl(box);
595 	if (msr) {
596 		rdmsrl(msr, config);
597 		config |= SNBEP_PMON_BOX_CTL_FRZ;
598 		wrmsrl(msr, config);
599 	}
600 }
601 
602 static void snbep_uncore_msr_enable_box(struct intel_uncore_box *box)
603 {
604 	u64 config;
605 	unsigned msr;
606 
607 	msr = uncore_msr_box_ctl(box);
608 	if (msr) {
609 		rdmsrl(msr, config);
610 		config &= ~SNBEP_PMON_BOX_CTL_FRZ;
611 		wrmsrl(msr, config);
612 	}
613 }
614 
615 static void snbep_uncore_msr_enable_event(struct intel_uncore_box *box, struct perf_event *event)
616 {
617 	struct hw_perf_event *hwc = &event->hw;
618 	struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
619 
620 	if (reg1->idx != EXTRA_REG_NONE)
621 		wrmsrl(reg1->reg, uncore_shared_reg_config(box, 0));
622 
623 	wrmsrl(hwc->config_base, hwc->config | SNBEP_PMON_CTL_EN);
624 }
625 
626 static void snbep_uncore_msr_disable_event(struct intel_uncore_box *box,
627 					struct perf_event *event)
628 {
629 	struct hw_perf_event *hwc = &event->hw;
630 
631 	wrmsrl(hwc->config_base, hwc->config);
632 }
633 
634 static void snbep_uncore_msr_init_box(struct intel_uncore_box *box)
635 {
636 	unsigned msr = uncore_msr_box_ctl(box);
637 
638 	if (msr)
639 		wrmsrl(msr, SNBEP_PMON_BOX_CTL_INT);
640 }
641 
642 static struct attribute *snbep_uncore_formats_attr[] = {
643 	&format_attr_event.attr,
644 	&format_attr_umask.attr,
645 	&format_attr_edge.attr,
646 	&format_attr_inv.attr,
647 	&format_attr_thresh8.attr,
648 	NULL,
649 };
650 
651 static struct attribute *snbep_uncore_ubox_formats_attr[] = {
652 	&format_attr_event.attr,
653 	&format_attr_umask.attr,
654 	&format_attr_edge.attr,
655 	&format_attr_inv.attr,
656 	&format_attr_thresh5.attr,
657 	NULL,
658 };
659 
660 static struct attribute *snbep_uncore_cbox_formats_attr[] = {
661 	&format_attr_event.attr,
662 	&format_attr_umask.attr,
663 	&format_attr_edge.attr,
664 	&format_attr_tid_en.attr,
665 	&format_attr_inv.attr,
666 	&format_attr_thresh8.attr,
667 	&format_attr_filter_tid.attr,
668 	&format_attr_filter_nid.attr,
669 	&format_attr_filter_state.attr,
670 	&format_attr_filter_opc.attr,
671 	NULL,
672 };
673 
674 static struct attribute *snbep_uncore_pcu_formats_attr[] = {
675 	&format_attr_event.attr,
676 	&format_attr_occ_sel.attr,
677 	&format_attr_edge.attr,
678 	&format_attr_inv.attr,
679 	&format_attr_thresh5.attr,
680 	&format_attr_occ_invert.attr,
681 	&format_attr_occ_edge.attr,
682 	&format_attr_filter_band0.attr,
683 	&format_attr_filter_band1.attr,
684 	&format_attr_filter_band2.attr,
685 	&format_attr_filter_band3.attr,
686 	NULL,
687 };
688 
689 static struct attribute *snbep_uncore_qpi_formats_attr[] = {
690 	&format_attr_event_ext.attr,
691 	&format_attr_umask.attr,
692 	&format_attr_edge.attr,
693 	&format_attr_inv.attr,
694 	&format_attr_thresh8.attr,
695 	&format_attr_match_rds.attr,
696 	&format_attr_match_rnid30.attr,
697 	&format_attr_match_rnid4.attr,
698 	&format_attr_match_dnid.attr,
699 	&format_attr_match_mc.attr,
700 	&format_attr_match_opc.attr,
701 	&format_attr_match_vnw.attr,
702 	&format_attr_match0.attr,
703 	&format_attr_match1.attr,
704 	&format_attr_mask_rds.attr,
705 	&format_attr_mask_rnid30.attr,
706 	&format_attr_mask_rnid4.attr,
707 	&format_attr_mask_dnid.attr,
708 	&format_attr_mask_mc.attr,
709 	&format_attr_mask_opc.attr,
710 	&format_attr_mask_vnw.attr,
711 	&format_attr_mask0.attr,
712 	&format_attr_mask1.attr,
713 	NULL,
714 };
715 
716 static struct uncore_event_desc snbep_uncore_imc_events[] = {
717 	INTEL_UNCORE_EVENT_DESC(clockticks,      "event=0xff,umask=0x00"),
718 	INTEL_UNCORE_EVENT_DESC(cas_count_read,  "event=0x04,umask=0x03"),
719 	INTEL_UNCORE_EVENT_DESC(cas_count_read.scale, "6.103515625e-5"),
720 	INTEL_UNCORE_EVENT_DESC(cas_count_read.unit, "MiB"),
721 	INTEL_UNCORE_EVENT_DESC(cas_count_write, "event=0x04,umask=0x0c"),
722 	INTEL_UNCORE_EVENT_DESC(cas_count_write.scale, "6.103515625e-5"),
723 	INTEL_UNCORE_EVENT_DESC(cas_count_write.unit, "MiB"),
724 	{ /* end: all zeroes */ },
725 };
726 
727 static struct uncore_event_desc snbep_uncore_qpi_events[] = {
728 	INTEL_UNCORE_EVENT_DESC(clockticks,       "event=0x14"),
729 	INTEL_UNCORE_EVENT_DESC(txl_flits_active, "event=0x00,umask=0x06"),
730 	INTEL_UNCORE_EVENT_DESC(drs_data,         "event=0x102,umask=0x08"),
731 	INTEL_UNCORE_EVENT_DESC(ncb_data,         "event=0x103,umask=0x04"),
732 	{ /* end: all zeroes */ },
733 };
734 
735 static const struct attribute_group snbep_uncore_format_group = {
736 	.name = "format",
737 	.attrs = snbep_uncore_formats_attr,
738 };
739 
740 static const struct attribute_group snbep_uncore_ubox_format_group = {
741 	.name = "format",
742 	.attrs = snbep_uncore_ubox_formats_attr,
743 };
744 
745 static const struct attribute_group snbep_uncore_cbox_format_group = {
746 	.name = "format",
747 	.attrs = snbep_uncore_cbox_formats_attr,
748 };
749 
750 static const struct attribute_group snbep_uncore_pcu_format_group = {
751 	.name = "format",
752 	.attrs = snbep_uncore_pcu_formats_attr,
753 };
754 
755 static const struct attribute_group snbep_uncore_qpi_format_group = {
756 	.name = "format",
757 	.attrs = snbep_uncore_qpi_formats_attr,
758 };
759 
760 #define __SNBEP_UNCORE_MSR_OPS_COMMON_INIT()			\
761 	.disable_box	= snbep_uncore_msr_disable_box,		\
762 	.enable_box	= snbep_uncore_msr_enable_box,		\
763 	.disable_event	= snbep_uncore_msr_disable_event,	\
764 	.enable_event	= snbep_uncore_msr_enable_event,	\
765 	.read_counter	= uncore_msr_read_counter
766 
767 #define SNBEP_UNCORE_MSR_OPS_COMMON_INIT()			\
768 	__SNBEP_UNCORE_MSR_OPS_COMMON_INIT(),			\
769 	.init_box	= snbep_uncore_msr_init_box		\
770 
771 static struct intel_uncore_ops snbep_uncore_msr_ops = {
772 	SNBEP_UNCORE_MSR_OPS_COMMON_INIT(),
773 };
774 
775 #define SNBEP_UNCORE_PCI_OPS_COMMON_INIT()			\
776 	.init_box	= snbep_uncore_pci_init_box,		\
777 	.disable_box	= snbep_uncore_pci_disable_box,		\
778 	.enable_box	= snbep_uncore_pci_enable_box,		\
779 	.disable_event	= snbep_uncore_pci_disable_event,	\
780 	.read_counter	= snbep_uncore_pci_read_counter
781 
782 static struct intel_uncore_ops snbep_uncore_pci_ops = {
783 	SNBEP_UNCORE_PCI_OPS_COMMON_INIT(),
784 	.enable_event	= snbep_uncore_pci_enable_event,	\
785 };
786 
787 static struct event_constraint snbep_uncore_cbox_constraints[] = {
788 	UNCORE_EVENT_CONSTRAINT(0x01, 0x1),
789 	UNCORE_EVENT_CONSTRAINT(0x02, 0x3),
790 	UNCORE_EVENT_CONSTRAINT(0x04, 0x3),
791 	UNCORE_EVENT_CONSTRAINT(0x05, 0x3),
792 	UNCORE_EVENT_CONSTRAINT(0x07, 0x3),
793 	UNCORE_EVENT_CONSTRAINT(0x09, 0x3),
794 	UNCORE_EVENT_CONSTRAINT(0x11, 0x1),
795 	UNCORE_EVENT_CONSTRAINT(0x12, 0x3),
796 	UNCORE_EVENT_CONSTRAINT(0x13, 0x3),
797 	UNCORE_EVENT_CONSTRAINT(0x1b, 0xc),
798 	UNCORE_EVENT_CONSTRAINT(0x1c, 0xc),
799 	UNCORE_EVENT_CONSTRAINT(0x1d, 0xc),
800 	UNCORE_EVENT_CONSTRAINT(0x1e, 0xc),
801 	UNCORE_EVENT_CONSTRAINT(0x1f, 0xe),
802 	UNCORE_EVENT_CONSTRAINT(0x21, 0x3),
803 	UNCORE_EVENT_CONSTRAINT(0x23, 0x3),
804 	UNCORE_EVENT_CONSTRAINT(0x31, 0x3),
805 	UNCORE_EVENT_CONSTRAINT(0x32, 0x3),
806 	UNCORE_EVENT_CONSTRAINT(0x33, 0x3),
807 	UNCORE_EVENT_CONSTRAINT(0x34, 0x3),
808 	UNCORE_EVENT_CONSTRAINT(0x35, 0x3),
809 	UNCORE_EVENT_CONSTRAINT(0x36, 0x1),
810 	UNCORE_EVENT_CONSTRAINT(0x37, 0x3),
811 	UNCORE_EVENT_CONSTRAINT(0x38, 0x3),
812 	UNCORE_EVENT_CONSTRAINT(0x39, 0x3),
813 	UNCORE_EVENT_CONSTRAINT(0x3b, 0x1),
814 	EVENT_CONSTRAINT_END
815 };
816 
817 static struct event_constraint snbep_uncore_r2pcie_constraints[] = {
818 	UNCORE_EVENT_CONSTRAINT(0x10, 0x3),
819 	UNCORE_EVENT_CONSTRAINT(0x11, 0x3),
820 	UNCORE_EVENT_CONSTRAINT(0x12, 0x1),
821 	UNCORE_EVENT_CONSTRAINT(0x23, 0x3),
822 	UNCORE_EVENT_CONSTRAINT(0x24, 0x3),
823 	UNCORE_EVENT_CONSTRAINT(0x25, 0x3),
824 	UNCORE_EVENT_CONSTRAINT(0x26, 0x3),
825 	UNCORE_EVENT_CONSTRAINT(0x32, 0x3),
826 	UNCORE_EVENT_CONSTRAINT(0x33, 0x3),
827 	UNCORE_EVENT_CONSTRAINT(0x34, 0x3),
828 	EVENT_CONSTRAINT_END
829 };
830 
831 static struct event_constraint snbep_uncore_r3qpi_constraints[] = {
832 	UNCORE_EVENT_CONSTRAINT(0x10, 0x3),
833 	UNCORE_EVENT_CONSTRAINT(0x11, 0x3),
834 	UNCORE_EVENT_CONSTRAINT(0x12, 0x3),
835 	UNCORE_EVENT_CONSTRAINT(0x13, 0x1),
836 	UNCORE_EVENT_CONSTRAINT(0x20, 0x3),
837 	UNCORE_EVENT_CONSTRAINT(0x21, 0x3),
838 	UNCORE_EVENT_CONSTRAINT(0x22, 0x3),
839 	UNCORE_EVENT_CONSTRAINT(0x23, 0x3),
840 	UNCORE_EVENT_CONSTRAINT(0x24, 0x3),
841 	UNCORE_EVENT_CONSTRAINT(0x25, 0x3),
842 	UNCORE_EVENT_CONSTRAINT(0x26, 0x3),
843 	UNCORE_EVENT_CONSTRAINT(0x28, 0x3),
844 	UNCORE_EVENT_CONSTRAINT(0x29, 0x3),
845 	UNCORE_EVENT_CONSTRAINT(0x2a, 0x3),
846 	UNCORE_EVENT_CONSTRAINT(0x2b, 0x3),
847 	UNCORE_EVENT_CONSTRAINT(0x2c, 0x3),
848 	UNCORE_EVENT_CONSTRAINT(0x2d, 0x3),
849 	UNCORE_EVENT_CONSTRAINT(0x2e, 0x3),
850 	UNCORE_EVENT_CONSTRAINT(0x2f, 0x3),
851 	UNCORE_EVENT_CONSTRAINT(0x30, 0x3),
852 	UNCORE_EVENT_CONSTRAINT(0x31, 0x3),
853 	UNCORE_EVENT_CONSTRAINT(0x32, 0x3),
854 	UNCORE_EVENT_CONSTRAINT(0x33, 0x3),
855 	UNCORE_EVENT_CONSTRAINT(0x34, 0x3),
856 	UNCORE_EVENT_CONSTRAINT(0x36, 0x3),
857 	UNCORE_EVENT_CONSTRAINT(0x37, 0x3),
858 	UNCORE_EVENT_CONSTRAINT(0x38, 0x3),
859 	UNCORE_EVENT_CONSTRAINT(0x39, 0x3),
860 	EVENT_CONSTRAINT_END
861 };
862 
863 static struct intel_uncore_type snbep_uncore_ubox = {
864 	.name		= "ubox",
865 	.num_counters   = 2,
866 	.num_boxes	= 1,
867 	.perf_ctr_bits	= 44,
868 	.fixed_ctr_bits	= 48,
869 	.perf_ctr	= SNBEP_U_MSR_PMON_CTR0,
870 	.event_ctl	= SNBEP_U_MSR_PMON_CTL0,
871 	.event_mask	= SNBEP_U_MSR_PMON_RAW_EVENT_MASK,
872 	.fixed_ctr	= SNBEP_U_MSR_PMON_UCLK_FIXED_CTR,
873 	.fixed_ctl	= SNBEP_U_MSR_PMON_UCLK_FIXED_CTL,
874 	.ops		= &snbep_uncore_msr_ops,
875 	.format_group	= &snbep_uncore_ubox_format_group,
876 };
877 
878 static struct extra_reg snbep_uncore_cbox_extra_regs[] = {
879 	SNBEP_CBO_EVENT_EXTRA_REG(SNBEP_CBO_PMON_CTL_TID_EN,
880 				  SNBEP_CBO_PMON_CTL_TID_EN, 0x1),
881 	SNBEP_CBO_EVENT_EXTRA_REG(0x0334, 0xffff, 0x4),
882 	SNBEP_CBO_EVENT_EXTRA_REG(0x4334, 0xffff, 0x6),
883 	SNBEP_CBO_EVENT_EXTRA_REG(0x0534, 0xffff, 0x4),
884 	SNBEP_CBO_EVENT_EXTRA_REG(0x4534, 0xffff, 0x6),
885 	SNBEP_CBO_EVENT_EXTRA_REG(0x0934, 0xffff, 0x4),
886 	SNBEP_CBO_EVENT_EXTRA_REG(0x4934, 0xffff, 0x6),
887 	SNBEP_CBO_EVENT_EXTRA_REG(0x4134, 0xffff, 0x6),
888 	SNBEP_CBO_EVENT_EXTRA_REG(0x0135, 0xffff, 0x8),
889 	SNBEP_CBO_EVENT_EXTRA_REG(0x0335, 0xffff, 0x8),
890 	SNBEP_CBO_EVENT_EXTRA_REG(0x4135, 0xffff, 0xa),
891 	SNBEP_CBO_EVENT_EXTRA_REG(0x4335, 0xffff, 0xa),
892 	SNBEP_CBO_EVENT_EXTRA_REG(0x4435, 0xffff, 0x2),
893 	SNBEP_CBO_EVENT_EXTRA_REG(0x4835, 0xffff, 0x2),
894 	SNBEP_CBO_EVENT_EXTRA_REG(0x4a35, 0xffff, 0x2),
895 	SNBEP_CBO_EVENT_EXTRA_REG(0x5035, 0xffff, 0x2),
896 	SNBEP_CBO_EVENT_EXTRA_REG(0x0136, 0xffff, 0x8),
897 	SNBEP_CBO_EVENT_EXTRA_REG(0x0336, 0xffff, 0x8),
898 	SNBEP_CBO_EVENT_EXTRA_REG(0x4136, 0xffff, 0xa),
899 	SNBEP_CBO_EVENT_EXTRA_REG(0x4336, 0xffff, 0xa),
900 	SNBEP_CBO_EVENT_EXTRA_REG(0x4436, 0xffff, 0x2),
901 	SNBEP_CBO_EVENT_EXTRA_REG(0x4836, 0xffff, 0x2),
902 	SNBEP_CBO_EVENT_EXTRA_REG(0x4a36, 0xffff, 0x2),
903 	SNBEP_CBO_EVENT_EXTRA_REG(0x4037, 0x40ff, 0x2),
904 	EVENT_EXTRA_END
905 };
906 
907 static void snbep_cbox_put_constraint(struct intel_uncore_box *box, struct perf_event *event)
908 {
909 	struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
910 	struct intel_uncore_extra_reg *er = &box->shared_regs[0];
911 	int i;
912 
913 	if (uncore_box_is_fake(box))
914 		return;
915 
916 	for (i = 0; i < 5; i++) {
917 		if (reg1->alloc & (0x1 << i))
918 			atomic_sub(1 << (i * 6), &er->ref);
919 	}
920 	reg1->alloc = 0;
921 }
922 
923 static struct event_constraint *
924 __snbep_cbox_get_constraint(struct intel_uncore_box *box, struct perf_event *event,
925 			    u64 (*cbox_filter_mask)(int fields))
926 {
927 	struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
928 	struct intel_uncore_extra_reg *er = &box->shared_regs[0];
929 	int i, alloc = 0;
930 	unsigned long flags;
931 	u64 mask;
932 
933 	if (reg1->idx == EXTRA_REG_NONE)
934 		return NULL;
935 
936 	raw_spin_lock_irqsave(&er->lock, flags);
937 	for (i = 0; i < 5; i++) {
938 		if (!(reg1->idx & (0x1 << i)))
939 			continue;
940 		if (!uncore_box_is_fake(box) && (reg1->alloc & (0x1 << i)))
941 			continue;
942 
943 		mask = cbox_filter_mask(0x1 << i);
944 		if (!__BITS_VALUE(atomic_read(&er->ref), i, 6) ||
945 		    !((reg1->config ^ er->config) & mask)) {
946 			atomic_add(1 << (i * 6), &er->ref);
947 			er->config &= ~mask;
948 			er->config |= reg1->config & mask;
949 			alloc |= (0x1 << i);
950 		} else {
951 			break;
952 		}
953 	}
954 	raw_spin_unlock_irqrestore(&er->lock, flags);
955 	if (i < 5)
956 		goto fail;
957 
958 	if (!uncore_box_is_fake(box))
959 		reg1->alloc |= alloc;
960 
961 	return NULL;
962 fail:
963 	for (; i >= 0; i--) {
964 		if (alloc & (0x1 << i))
965 			atomic_sub(1 << (i * 6), &er->ref);
966 	}
967 	return &uncore_constraint_empty;
968 }
969 
970 static u64 snbep_cbox_filter_mask(int fields)
971 {
972 	u64 mask = 0;
973 
974 	if (fields & 0x1)
975 		mask |= SNBEP_CB0_MSR_PMON_BOX_FILTER_TID;
976 	if (fields & 0x2)
977 		mask |= SNBEP_CB0_MSR_PMON_BOX_FILTER_NID;
978 	if (fields & 0x4)
979 		mask |= SNBEP_CB0_MSR_PMON_BOX_FILTER_STATE;
980 	if (fields & 0x8)
981 		mask |= SNBEP_CB0_MSR_PMON_BOX_FILTER_OPC;
982 
983 	return mask;
984 }
985 
986 static struct event_constraint *
987 snbep_cbox_get_constraint(struct intel_uncore_box *box, struct perf_event *event)
988 {
989 	return __snbep_cbox_get_constraint(box, event, snbep_cbox_filter_mask);
990 }
991 
992 static int snbep_cbox_hw_config(struct intel_uncore_box *box, struct perf_event *event)
993 {
994 	struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
995 	struct extra_reg *er;
996 	int idx = 0;
997 
998 	for (er = snbep_uncore_cbox_extra_regs; er->msr; er++) {
999 		if (er->event != (event->hw.config & er->config_mask))
1000 			continue;
1001 		idx |= er->idx;
1002 	}
1003 
1004 	if (idx) {
1005 		reg1->reg = SNBEP_C0_MSR_PMON_BOX_FILTER +
1006 			SNBEP_CBO_MSR_OFFSET * box->pmu->pmu_idx;
1007 		reg1->config = event->attr.config1 & snbep_cbox_filter_mask(idx);
1008 		reg1->idx = idx;
1009 	}
1010 	return 0;
1011 }
1012 
1013 static struct intel_uncore_ops snbep_uncore_cbox_ops = {
1014 	SNBEP_UNCORE_MSR_OPS_COMMON_INIT(),
1015 	.hw_config		= snbep_cbox_hw_config,
1016 	.get_constraint		= snbep_cbox_get_constraint,
1017 	.put_constraint		= snbep_cbox_put_constraint,
1018 };
1019 
1020 static struct intel_uncore_type snbep_uncore_cbox = {
1021 	.name			= "cbox",
1022 	.num_counters		= 4,
1023 	.num_boxes		= 8,
1024 	.perf_ctr_bits		= 44,
1025 	.event_ctl		= SNBEP_C0_MSR_PMON_CTL0,
1026 	.perf_ctr		= SNBEP_C0_MSR_PMON_CTR0,
1027 	.event_mask		= SNBEP_CBO_MSR_PMON_RAW_EVENT_MASK,
1028 	.box_ctl		= SNBEP_C0_MSR_PMON_BOX_CTL,
1029 	.msr_offset		= SNBEP_CBO_MSR_OFFSET,
1030 	.num_shared_regs	= 1,
1031 	.constraints		= snbep_uncore_cbox_constraints,
1032 	.ops			= &snbep_uncore_cbox_ops,
1033 	.format_group		= &snbep_uncore_cbox_format_group,
1034 };
1035 
1036 static u64 snbep_pcu_alter_er(struct perf_event *event, int new_idx, bool modify)
1037 {
1038 	struct hw_perf_event *hwc = &event->hw;
1039 	struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
1040 	u64 config = reg1->config;
1041 
1042 	if (new_idx > reg1->idx)
1043 		config <<= 8 * (new_idx - reg1->idx);
1044 	else
1045 		config >>= 8 * (reg1->idx - new_idx);
1046 
1047 	if (modify) {
1048 		hwc->config += new_idx - reg1->idx;
1049 		reg1->config = config;
1050 		reg1->idx = new_idx;
1051 	}
1052 	return config;
1053 }
1054 
1055 static struct event_constraint *
1056 snbep_pcu_get_constraint(struct intel_uncore_box *box, struct perf_event *event)
1057 {
1058 	struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
1059 	struct intel_uncore_extra_reg *er = &box->shared_regs[0];
1060 	unsigned long flags;
1061 	int idx = reg1->idx;
1062 	u64 mask, config1 = reg1->config;
1063 	bool ok = false;
1064 
1065 	if (reg1->idx == EXTRA_REG_NONE ||
1066 	    (!uncore_box_is_fake(box) && reg1->alloc))
1067 		return NULL;
1068 again:
1069 	mask = 0xffULL << (idx * 8);
1070 	raw_spin_lock_irqsave(&er->lock, flags);
1071 	if (!__BITS_VALUE(atomic_read(&er->ref), idx, 8) ||
1072 	    !((config1 ^ er->config) & mask)) {
1073 		atomic_add(1 << (idx * 8), &er->ref);
1074 		er->config &= ~mask;
1075 		er->config |= config1 & mask;
1076 		ok = true;
1077 	}
1078 	raw_spin_unlock_irqrestore(&er->lock, flags);
1079 
1080 	if (!ok) {
1081 		idx = (idx + 1) % 4;
1082 		if (idx != reg1->idx) {
1083 			config1 = snbep_pcu_alter_er(event, idx, false);
1084 			goto again;
1085 		}
1086 		return &uncore_constraint_empty;
1087 	}
1088 
1089 	if (!uncore_box_is_fake(box)) {
1090 		if (idx != reg1->idx)
1091 			snbep_pcu_alter_er(event, idx, true);
1092 		reg1->alloc = 1;
1093 	}
1094 	return NULL;
1095 }
1096 
1097 static void snbep_pcu_put_constraint(struct intel_uncore_box *box, struct perf_event *event)
1098 {
1099 	struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
1100 	struct intel_uncore_extra_reg *er = &box->shared_regs[0];
1101 
1102 	if (uncore_box_is_fake(box) || !reg1->alloc)
1103 		return;
1104 
1105 	atomic_sub(1 << (reg1->idx * 8), &er->ref);
1106 	reg1->alloc = 0;
1107 }
1108 
1109 static int snbep_pcu_hw_config(struct intel_uncore_box *box, struct perf_event *event)
1110 {
1111 	struct hw_perf_event *hwc = &event->hw;
1112 	struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
1113 	int ev_sel = hwc->config & SNBEP_PMON_CTL_EV_SEL_MASK;
1114 
1115 	if (ev_sel >= 0xb && ev_sel <= 0xe) {
1116 		reg1->reg = SNBEP_PCU_MSR_PMON_BOX_FILTER;
1117 		reg1->idx = ev_sel - 0xb;
1118 		reg1->config = event->attr.config1 & (0xff << (reg1->idx * 8));
1119 	}
1120 	return 0;
1121 }
1122 
1123 static struct intel_uncore_ops snbep_uncore_pcu_ops = {
1124 	SNBEP_UNCORE_MSR_OPS_COMMON_INIT(),
1125 	.hw_config		= snbep_pcu_hw_config,
1126 	.get_constraint		= snbep_pcu_get_constraint,
1127 	.put_constraint		= snbep_pcu_put_constraint,
1128 };
1129 
1130 static struct intel_uncore_type snbep_uncore_pcu = {
1131 	.name			= "pcu",
1132 	.num_counters		= 4,
1133 	.num_boxes		= 1,
1134 	.perf_ctr_bits		= 48,
1135 	.perf_ctr		= SNBEP_PCU_MSR_PMON_CTR0,
1136 	.event_ctl		= SNBEP_PCU_MSR_PMON_CTL0,
1137 	.event_mask		= SNBEP_PCU_MSR_PMON_RAW_EVENT_MASK,
1138 	.box_ctl		= SNBEP_PCU_MSR_PMON_BOX_CTL,
1139 	.num_shared_regs	= 1,
1140 	.ops			= &snbep_uncore_pcu_ops,
1141 	.format_group		= &snbep_uncore_pcu_format_group,
1142 };
1143 
1144 static struct intel_uncore_type *snbep_msr_uncores[] = {
1145 	&snbep_uncore_ubox,
1146 	&snbep_uncore_cbox,
1147 	&snbep_uncore_pcu,
1148 	NULL,
1149 };
1150 
1151 void snbep_uncore_cpu_init(void)
1152 {
1153 	if (snbep_uncore_cbox.num_boxes > boot_cpu_data.x86_max_cores)
1154 		snbep_uncore_cbox.num_boxes = boot_cpu_data.x86_max_cores;
1155 	uncore_msr_uncores = snbep_msr_uncores;
1156 }
1157 
1158 enum {
1159 	SNBEP_PCI_QPI_PORT0_FILTER,
1160 	SNBEP_PCI_QPI_PORT1_FILTER,
1161 	BDX_PCI_QPI_PORT2_FILTER,
1162 };
1163 
1164 static int snbep_qpi_hw_config(struct intel_uncore_box *box, struct perf_event *event)
1165 {
1166 	struct hw_perf_event *hwc = &event->hw;
1167 	struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
1168 	struct hw_perf_event_extra *reg2 = &hwc->branch_reg;
1169 
1170 	if ((hwc->config & SNBEP_PMON_CTL_EV_SEL_MASK) == 0x38) {
1171 		reg1->idx = 0;
1172 		reg1->reg = SNBEP_Q_Py_PCI_PMON_PKT_MATCH0;
1173 		reg1->config = event->attr.config1;
1174 		reg2->reg = SNBEP_Q_Py_PCI_PMON_PKT_MASK0;
1175 		reg2->config = event->attr.config2;
1176 	}
1177 	return 0;
1178 }
1179 
1180 static void snbep_qpi_enable_event(struct intel_uncore_box *box, struct perf_event *event)
1181 {
1182 	struct pci_dev *pdev = box->pci_dev;
1183 	struct hw_perf_event *hwc = &event->hw;
1184 	struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
1185 	struct hw_perf_event_extra *reg2 = &hwc->branch_reg;
1186 
1187 	if (reg1->idx != EXTRA_REG_NONE) {
1188 		int idx = box->pmu->pmu_idx + SNBEP_PCI_QPI_PORT0_FILTER;
1189 		int die = box->dieid;
1190 		struct pci_dev *filter_pdev = uncore_extra_pci_dev[die].dev[idx];
1191 
1192 		if (filter_pdev) {
1193 			pci_write_config_dword(filter_pdev, reg1->reg,
1194 						(u32)reg1->config);
1195 			pci_write_config_dword(filter_pdev, reg1->reg + 4,
1196 						(u32)(reg1->config >> 32));
1197 			pci_write_config_dword(filter_pdev, reg2->reg,
1198 						(u32)reg2->config);
1199 			pci_write_config_dword(filter_pdev, reg2->reg + 4,
1200 						(u32)(reg2->config >> 32));
1201 		}
1202 	}
1203 
1204 	pci_write_config_dword(pdev, hwc->config_base, hwc->config | SNBEP_PMON_CTL_EN);
1205 }
1206 
1207 static struct intel_uncore_ops snbep_uncore_qpi_ops = {
1208 	SNBEP_UNCORE_PCI_OPS_COMMON_INIT(),
1209 	.enable_event		= snbep_qpi_enable_event,
1210 	.hw_config		= snbep_qpi_hw_config,
1211 	.get_constraint		= uncore_get_constraint,
1212 	.put_constraint		= uncore_put_constraint,
1213 };
1214 
1215 #define SNBEP_UNCORE_PCI_COMMON_INIT()				\
1216 	.perf_ctr	= SNBEP_PCI_PMON_CTR0,			\
1217 	.event_ctl	= SNBEP_PCI_PMON_CTL0,			\
1218 	.event_mask	= SNBEP_PMON_RAW_EVENT_MASK,		\
1219 	.box_ctl	= SNBEP_PCI_PMON_BOX_CTL,		\
1220 	.ops		= &snbep_uncore_pci_ops,		\
1221 	.format_group	= &snbep_uncore_format_group
1222 
1223 static struct intel_uncore_type snbep_uncore_ha = {
1224 	.name		= "ha",
1225 	.num_counters   = 4,
1226 	.num_boxes	= 1,
1227 	.perf_ctr_bits	= 48,
1228 	SNBEP_UNCORE_PCI_COMMON_INIT(),
1229 };
1230 
1231 static struct intel_uncore_type snbep_uncore_imc = {
1232 	.name		= "imc",
1233 	.num_counters   = 4,
1234 	.num_boxes	= 4,
1235 	.perf_ctr_bits	= 48,
1236 	.fixed_ctr_bits	= 48,
1237 	.fixed_ctr	= SNBEP_MC_CHy_PCI_PMON_FIXED_CTR,
1238 	.fixed_ctl	= SNBEP_MC_CHy_PCI_PMON_FIXED_CTL,
1239 	.event_descs	= snbep_uncore_imc_events,
1240 	SNBEP_UNCORE_PCI_COMMON_INIT(),
1241 };
1242 
1243 static struct intel_uncore_type snbep_uncore_qpi = {
1244 	.name			= "qpi",
1245 	.num_counters		= 4,
1246 	.num_boxes		= 2,
1247 	.perf_ctr_bits		= 48,
1248 	.perf_ctr		= SNBEP_PCI_PMON_CTR0,
1249 	.event_ctl		= SNBEP_PCI_PMON_CTL0,
1250 	.event_mask		= SNBEP_QPI_PCI_PMON_RAW_EVENT_MASK,
1251 	.box_ctl		= SNBEP_PCI_PMON_BOX_CTL,
1252 	.num_shared_regs	= 1,
1253 	.ops			= &snbep_uncore_qpi_ops,
1254 	.event_descs		= snbep_uncore_qpi_events,
1255 	.format_group		= &snbep_uncore_qpi_format_group,
1256 };
1257 
1258 
1259 static struct intel_uncore_type snbep_uncore_r2pcie = {
1260 	.name		= "r2pcie",
1261 	.num_counters   = 4,
1262 	.num_boxes	= 1,
1263 	.perf_ctr_bits	= 44,
1264 	.constraints	= snbep_uncore_r2pcie_constraints,
1265 	SNBEP_UNCORE_PCI_COMMON_INIT(),
1266 };
1267 
1268 static struct intel_uncore_type snbep_uncore_r3qpi = {
1269 	.name		= "r3qpi",
1270 	.num_counters   = 3,
1271 	.num_boxes	= 2,
1272 	.perf_ctr_bits	= 44,
1273 	.constraints	= snbep_uncore_r3qpi_constraints,
1274 	SNBEP_UNCORE_PCI_COMMON_INIT(),
1275 };
1276 
1277 enum {
1278 	SNBEP_PCI_UNCORE_HA,
1279 	SNBEP_PCI_UNCORE_IMC,
1280 	SNBEP_PCI_UNCORE_QPI,
1281 	SNBEP_PCI_UNCORE_R2PCIE,
1282 	SNBEP_PCI_UNCORE_R3QPI,
1283 };
1284 
1285 static struct intel_uncore_type *snbep_pci_uncores[] = {
1286 	[SNBEP_PCI_UNCORE_HA]		= &snbep_uncore_ha,
1287 	[SNBEP_PCI_UNCORE_IMC]		= &snbep_uncore_imc,
1288 	[SNBEP_PCI_UNCORE_QPI]		= &snbep_uncore_qpi,
1289 	[SNBEP_PCI_UNCORE_R2PCIE]	= &snbep_uncore_r2pcie,
1290 	[SNBEP_PCI_UNCORE_R3QPI]	= &snbep_uncore_r3qpi,
1291 	NULL,
1292 };
1293 
1294 static const struct pci_device_id snbep_uncore_pci_ids[] = {
1295 	{ /* Home Agent */
1296 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_HA),
1297 		.driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_HA, 0),
1298 	},
1299 	{ /* MC Channel 0 */
1300 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_IMC0),
1301 		.driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_IMC, 0),
1302 	},
1303 	{ /* MC Channel 1 */
1304 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_IMC1),
1305 		.driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_IMC, 1),
1306 	},
1307 	{ /* MC Channel 2 */
1308 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_IMC2),
1309 		.driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_IMC, 2),
1310 	},
1311 	{ /* MC Channel 3 */
1312 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_IMC3),
1313 		.driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_IMC, 3),
1314 	},
1315 	{ /* QPI Port 0 */
1316 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_QPI0),
1317 		.driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_QPI, 0),
1318 	},
1319 	{ /* QPI Port 1 */
1320 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_QPI1),
1321 		.driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_QPI, 1),
1322 	},
1323 	{ /* R2PCIe */
1324 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_R2PCIE),
1325 		.driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_R2PCIE, 0),
1326 	},
1327 	{ /* R3QPI Link 0 */
1328 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_R3QPI0),
1329 		.driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_R3QPI, 0),
1330 	},
1331 	{ /* R3QPI Link 1 */
1332 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_R3QPI1),
1333 		.driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_R3QPI, 1),
1334 	},
1335 	{ /* QPI Port 0 filter  */
1336 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x3c86),
1337 		.driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV,
1338 						   SNBEP_PCI_QPI_PORT0_FILTER),
1339 	},
1340 	{ /* QPI Port 0 filter  */
1341 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x3c96),
1342 		.driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV,
1343 						   SNBEP_PCI_QPI_PORT1_FILTER),
1344 	},
1345 	{ /* end: all zeroes */ }
1346 };
1347 
1348 static struct pci_driver snbep_uncore_pci_driver = {
1349 	.name		= "snbep_uncore",
1350 	.id_table	= snbep_uncore_pci_ids,
1351 };
1352 
1353 #define NODE_ID_MASK	0x7
1354 
1355 /*
1356  * build pci bus to socket mapping
1357  */
1358 static int snbep_pci2phy_map_init(int devid, int nodeid_loc, int idmap_loc, bool reverse)
1359 {
1360 	struct pci_dev *ubox_dev = NULL;
1361 	int i, bus, nodeid, segment, die_id;
1362 	struct pci2phy_map *map;
1363 	int err = 0;
1364 	u32 config = 0;
1365 
1366 	while (1) {
1367 		/* find the UBOX device */
1368 		ubox_dev = pci_get_device(PCI_VENDOR_ID_INTEL, devid, ubox_dev);
1369 		if (!ubox_dev)
1370 			break;
1371 		bus = ubox_dev->bus->number;
1372 		/*
1373 		 * The nodeid and idmap registers only contain enough
1374 		 * information to handle 8 nodes.  On systems with more
1375 		 * than 8 nodes, we need to rely on NUMA information,
1376 		 * filled in from BIOS supplied information, to determine
1377 		 * the topology.
1378 		 */
1379 		if (nr_node_ids <= 8) {
1380 			/* get the Node ID of the local register */
1381 			err = pci_read_config_dword(ubox_dev, nodeid_loc, &config);
1382 			if (err)
1383 				break;
1384 			nodeid = config & NODE_ID_MASK;
1385 			/* get the Node ID mapping */
1386 			err = pci_read_config_dword(ubox_dev, idmap_loc, &config);
1387 			if (err)
1388 				break;
1389 
1390 			segment = pci_domain_nr(ubox_dev->bus);
1391 			raw_spin_lock(&pci2phy_map_lock);
1392 			map = __find_pci2phy_map(segment);
1393 			if (!map) {
1394 				raw_spin_unlock(&pci2phy_map_lock);
1395 				err = -ENOMEM;
1396 				break;
1397 			}
1398 
1399 			/*
1400 			 * every three bits in the Node ID mapping register maps
1401 			 * to a particular node.
1402 			 */
1403 			for (i = 0; i < 8; i++) {
1404 				if (nodeid == ((config >> (3 * i)) & 0x7)) {
1405 					if (topology_max_die_per_package() > 1)
1406 						die_id = i;
1407 					else
1408 						die_id = topology_phys_to_logical_pkg(i);
1409 					map->pbus_to_dieid[bus] = die_id;
1410 					break;
1411 				}
1412 			}
1413 			raw_spin_unlock(&pci2phy_map_lock);
1414 		} else {
1415 			int node = pcibus_to_node(ubox_dev->bus);
1416 			int cpu;
1417 
1418 			segment = pci_domain_nr(ubox_dev->bus);
1419 			raw_spin_lock(&pci2phy_map_lock);
1420 			map = __find_pci2phy_map(segment);
1421 			if (!map) {
1422 				raw_spin_unlock(&pci2phy_map_lock);
1423 				err = -ENOMEM;
1424 				break;
1425 			}
1426 
1427 			die_id = -1;
1428 			for_each_cpu(cpu, cpumask_of_pcibus(ubox_dev->bus)) {
1429 				struct cpuinfo_x86 *c = &cpu_data(cpu);
1430 
1431 				if (c->initialized && cpu_to_node(cpu) == node) {
1432 					map->pbus_to_dieid[bus] = die_id = c->logical_die_id;
1433 					break;
1434 				}
1435 			}
1436 			raw_spin_unlock(&pci2phy_map_lock);
1437 
1438 			if (WARN_ON_ONCE(die_id == -1)) {
1439 				err = -EINVAL;
1440 				break;
1441 			}
1442 		}
1443 	}
1444 
1445 	if (!err) {
1446 		/*
1447 		 * For PCI bus with no UBOX device, find the next bus
1448 		 * that has UBOX device and use its mapping.
1449 		 */
1450 		raw_spin_lock(&pci2phy_map_lock);
1451 		list_for_each_entry(map, &pci2phy_map_head, list) {
1452 			i = -1;
1453 			if (reverse) {
1454 				for (bus = 255; bus >= 0; bus--) {
1455 					if (map->pbus_to_dieid[bus] >= 0)
1456 						i = map->pbus_to_dieid[bus];
1457 					else
1458 						map->pbus_to_dieid[bus] = i;
1459 				}
1460 			} else {
1461 				for (bus = 0; bus <= 255; bus++) {
1462 					if (map->pbus_to_dieid[bus] >= 0)
1463 						i = map->pbus_to_dieid[bus];
1464 					else
1465 						map->pbus_to_dieid[bus] = i;
1466 				}
1467 			}
1468 		}
1469 		raw_spin_unlock(&pci2phy_map_lock);
1470 	}
1471 
1472 	pci_dev_put(ubox_dev);
1473 
1474 	return err ? pcibios_err_to_errno(err) : 0;
1475 }
1476 
1477 int snbep_uncore_pci_init(void)
1478 {
1479 	int ret = snbep_pci2phy_map_init(0x3ce0, SNBEP_CPUNODEID, SNBEP_GIDNIDMAP, true);
1480 	if (ret)
1481 		return ret;
1482 	uncore_pci_uncores = snbep_pci_uncores;
1483 	uncore_pci_driver = &snbep_uncore_pci_driver;
1484 	return 0;
1485 }
1486 /* end of Sandy Bridge-EP uncore support */
1487 
1488 /* IvyTown uncore support */
1489 static void ivbep_uncore_msr_init_box(struct intel_uncore_box *box)
1490 {
1491 	unsigned msr = uncore_msr_box_ctl(box);
1492 	if (msr)
1493 		wrmsrl(msr, IVBEP_PMON_BOX_CTL_INT);
1494 }
1495 
1496 static void ivbep_uncore_pci_init_box(struct intel_uncore_box *box)
1497 {
1498 	struct pci_dev *pdev = box->pci_dev;
1499 
1500 	pci_write_config_dword(pdev, SNBEP_PCI_PMON_BOX_CTL, IVBEP_PMON_BOX_CTL_INT);
1501 }
1502 
1503 #define IVBEP_UNCORE_MSR_OPS_COMMON_INIT()			\
1504 	.init_box	= ivbep_uncore_msr_init_box,		\
1505 	.disable_box	= snbep_uncore_msr_disable_box,		\
1506 	.enable_box	= snbep_uncore_msr_enable_box,		\
1507 	.disable_event	= snbep_uncore_msr_disable_event,	\
1508 	.enable_event	= snbep_uncore_msr_enable_event,	\
1509 	.read_counter	= uncore_msr_read_counter
1510 
1511 static struct intel_uncore_ops ivbep_uncore_msr_ops = {
1512 	IVBEP_UNCORE_MSR_OPS_COMMON_INIT(),
1513 };
1514 
1515 static struct intel_uncore_ops ivbep_uncore_pci_ops = {
1516 	.init_box	= ivbep_uncore_pci_init_box,
1517 	.disable_box	= snbep_uncore_pci_disable_box,
1518 	.enable_box	= snbep_uncore_pci_enable_box,
1519 	.disable_event	= snbep_uncore_pci_disable_event,
1520 	.enable_event	= snbep_uncore_pci_enable_event,
1521 	.read_counter	= snbep_uncore_pci_read_counter,
1522 };
1523 
1524 #define IVBEP_UNCORE_PCI_COMMON_INIT()				\
1525 	.perf_ctr	= SNBEP_PCI_PMON_CTR0,			\
1526 	.event_ctl	= SNBEP_PCI_PMON_CTL0,			\
1527 	.event_mask	= IVBEP_PMON_RAW_EVENT_MASK,		\
1528 	.box_ctl	= SNBEP_PCI_PMON_BOX_CTL,		\
1529 	.ops		= &ivbep_uncore_pci_ops,			\
1530 	.format_group	= &ivbep_uncore_format_group
1531 
1532 static struct attribute *ivbep_uncore_formats_attr[] = {
1533 	&format_attr_event.attr,
1534 	&format_attr_umask.attr,
1535 	&format_attr_edge.attr,
1536 	&format_attr_inv.attr,
1537 	&format_attr_thresh8.attr,
1538 	NULL,
1539 };
1540 
1541 static struct attribute *ivbep_uncore_ubox_formats_attr[] = {
1542 	&format_attr_event.attr,
1543 	&format_attr_umask.attr,
1544 	&format_attr_edge.attr,
1545 	&format_attr_inv.attr,
1546 	&format_attr_thresh5.attr,
1547 	NULL,
1548 };
1549 
1550 static struct attribute *ivbep_uncore_cbox_formats_attr[] = {
1551 	&format_attr_event.attr,
1552 	&format_attr_umask.attr,
1553 	&format_attr_edge.attr,
1554 	&format_attr_tid_en.attr,
1555 	&format_attr_thresh8.attr,
1556 	&format_attr_filter_tid.attr,
1557 	&format_attr_filter_link.attr,
1558 	&format_attr_filter_state2.attr,
1559 	&format_attr_filter_nid2.attr,
1560 	&format_attr_filter_opc2.attr,
1561 	&format_attr_filter_nc.attr,
1562 	&format_attr_filter_c6.attr,
1563 	&format_attr_filter_isoc.attr,
1564 	NULL,
1565 };
1566 
1567 static struct attribute *ivbep_uncore_pcu_formats_attr[] = {
1568 	&format_attr_event.attr,
1569 	&format_attr_occ_sel.attr,
1570 	&format_attr_edge.attr,
1571 	&format_attr_thresh5.attr,
1572 	&format_attr_occ_invert.attr,
1573 	&format_attr_occ_edge.attr,
1574 	&format_attr_filter_band0.attr,
1575 	&format_attr_filter_band1.attr,
1576 	&format_attr_filter_band2.attr,
1577 	&format_attr_filter_band3.attr,
1578 	NULL,
1579 };
1580 
1581 static struct attribute *ivbep_uncore_qpi_formats_attr[] = {
1582 	&format_attr_event_ext.attr,
1583 	&format_attr_umask.attr,
1584 	&format_attr_edge.attr,
1585 	&format_attr_thresh8.attr,
1586 	&format_attr_match_rds.attr,
1587 	&format_attr_match_rnid30.attr,
1588 	&format_attr_match_rnid4.attr,
1589 	&format_attr_match_dnid.attr,
1590 	&format_attr_match_mc.attr,
1591 	&format_attr_match_opc.attr,
1592 	&format_attr_match_vnw.attr,
1593 	&format_attr_match0.attr,
1594 	&format_attr_match1.attr,
1595 	&format_attr_mask_rds.attr,
1596 	&format_attr_mask_rnid30.attr,
1597 	&format_attr_mask_rnid4.attr,
1598 	&format_attr_mask_dnid.attr,
1599 	&format_attr_mask_mc.attr,
1600 	&format_attr_mask_opc.attr,
1601 	&format_attr_mask_vnw.attr,
1602 	&format_attr_mask0.attr,
1603 	&format_attr_mask1.attr,
1604 	NULL,
1605 };
1606 
1607 static const struct attribute_group ivbep_uncore_format_group = {
1608 	.name = "format",
1609 	.attrs = ivbep_uncore_formats_attr,
1610 };
1611 
1612 static const struct attribute_group ivbep_uncore_ubox_format_group = {
1613 	.name = "format",
1614 	.attrs = ivbep_uncore_ubox_formats_attr,
1615 };
1616 
1617 static const struct attribute_group ivbep_uncore_cbox_format_group = {
1618 	.name = "format",
1619 	.attrs = ivbep_uncore_cbox_formats_attr,
1620 };
1621 
1622 static const struct attribute_group ivbep_uncore_pcu_format_group = {
1623 	.name = "format",
1624 	.attrs = ivbep_uncore_pcu_formats_attr,
1625 };
1626 
1627 static const struct attribute_group ivbep_uncore_qpi_format_group = {
1628 	.name = "format",
1629 	.attrs = ivbep_uncore_qpi_formats_attr,
1630 };
1631 
1632 static struct intel_uncore_type ivbep_uncore_ubox = {
1633 	.name		= "ubox",
1634 	.num_counters   = 2,
1635 	.num_boxes	= 1,
1636 	.perf_ctr_bits	= 44,
1637 	.fixed_ctr_bits	= 48,
1638 	.perf_ctr	= SNBEP_U_MSR_PMON_CTR0,
1639 	.event_ctl	= SNBEP_U_MSR_PMON_CTL0,
1640 	.event_mask	= IVBEP_U_MSR_PMON_RAW_EVENT_MASK,
1641 	.fixed_ctr	= SNBEP_U_MSR_PMON_UCLK_FIXED_CTR,
1642 	.fixed_ctl	= SNBEP_U_MSR_PMON_UCLK_FIXED_CTL,
1643 	.ops		= &ivbep_uncore_msr_ops,
1644 	.format_group	= &ivbep_uncore_ubox_format_group,
1645 };
1646 
1647 static struct extra_reg ivbep_uncore_cbox_extra_regs[] = {
1648 	SNBEP_CBO_EVENT_EXTRA_REG(SNBEP_CBO_PMON_CTL_TID_EN,
1649 				  SNBEP_CBO_PMON_CTL_TID_EN, 0x1),
1650 	SNBEP_CBO_EVENT_EXTRA_REG(0x1031, 0x10ff, 0x2),
1651 	SNBEP_CBO_EVENT_EXTRA_REG(0x1134, 0xffff, 0x4),
1652 	SNBEP_CBO_EVENT_EXTRA_REG(0x4134, 0xffff, 0xc),
1653 	SNBEP_CBO_EVENT_EXTRA_REG(0x5134, 0xffff, 0xc),
1654 	SNBEP_CBO_EVENT_EXTRA_REG(0x0334, 0xffff, 0x4),
1655 	SNBEP_CBO_EVENT_EXTRA_REG(0x4334, 0xffff, 0xc),
1656 	SNBEP_CBO_EVENT_EXTRA_REG(0x0534, 0xffff, 0x4),
1657 	SNBEP_CBO_EVENT_EXTRA_REG(0x4534, 0xffff, 0xc),
1658 	SNBEP_CBO_EVENT_EXTRA_REG(0x0934, 0xffff, 0x4),
1659 	SNBEP_CBO_EVENT_EXTRA_REG(0x4934, 0xffff, 0xc),
1660 	SNBEP_CBO_EVENT_EXTRA_REG(0x0135, 0xffff, 0x10),
1661 	SNBEP_CBO_EVENT_EXTRA_REG(0x0335, 0xffff, 0x10),
1662 	SNBEP_CBO_EVENT_EXTRA_REG(0x2135, 0xffff, 0x10),
1663 	SNBEP_CBO_EVENT_EXTRA_REG(0x2335, 0xffff, 0x10),
1664 	SNBEP_CBO_EVENT_EXTRA_REG(0x4135, 0xffff, 0x18),
1665 	SNBEP_CBO_EVENT_EXTRA_REG(0x4335, 0xffff, 0x18),
1666 	SNBEP_CBO_EVENT_EXTRA_REG(0x4435, 0xffff, 0x8),
1667 	SNBEP_CBO_EVENT_EXTRA_REG(0x4835, 0xffff, 0x8),
1668 	SNBEP_CBO_EVENT_EXTRA_REG(0x4a35, 0xffff, 0x8),
1669 	SNBEP_CBO_EVENT_EXTRA_REG(0x5035, 0xffff, 0x8),
1670 	SNBEP_CBO_EVENT_EXTRA_REG(0x8135, 0xffff, 0x10),
1671 	SNBEP_CBO_EVENT_EXTRA_REG(0x8335, 0xffff, 0x10),
1672 	SNBEP_CBO_EVENT_EXTRA_REG(0x0136, 0xffff, 0x10),
1673 	SNBEP_CBO_EVENT_EXTRA_REG(0x0336, 0xffff, 0x10),
1674 	SNBEP_CBO_EVENT_EXTRA_REG(0x2136, 0xffff, 0x10),
1675 	SNBEP_CBO_EVENT_EXTRA_REG(0x2336, 0xffff, 0x10),
1676 	SNBEP_CBO_EVENT_EXTRA_REG(0x4136, 0xffff, 0x18),
1677 	SNBEP_CBO_EVENT_EXTRA_REG(0x4336, 0xffff, 0x18),
1678 	SNBEP_CBO_EVENT_EXTRA_REG(0x4436, 0xffff, 0x8),
1679 	SNBEP_CBO_EVENT_EXTRA_REG(0x4836, 0xffff, 0x8),
1680 	SNBEP_CBO_EVENT_EXTRA_REG(0x4a36, 0xffff, 0x8),
1681 	SNBEP_CBO_EVENT_EXTRA_REG(0x5036, 0xffff, 0x8),
1682 	SNBEP_CBO_EVENT_EXTRA_REG(0x8136, 0xffff, 0x10),
1683 	SNBEP_CBO_EVENT_EXTRA_REG(0x8336, 0xffff, 0x10),
1684 	SNBEP_CBO_EVENT_EXTRA_REG(0x4037, 0x40ff, 0x8),
1685 	EVENT_EXTRA_END
1686 };
1687 
1688 static u64 ivbep_cbox_filter_mask(int fields)
1689 {
1690 	u64 mask = 0;
1691 
1692 	if (fields & 0x1)
1693 		mask |= IVBEP_CB0_MSR_PMON_BOX_FILTER_TID;
1694 	if (fields & 0x2)
1695 		mask |= IVBEP_CB0_MSR_PMON_BOX_FILTER_LINK;
1696 	if (fields & 0x4)
1697 		mask |= IVBEP_CB0_MSR_PMON_BOX_FILTER_STATE;
1698 	if (fields & 0x8)
1699 		mask |= IVBEP_CB0_MSR_PMON_BOX_FILTER_NID;
1700 	if (fields & 0x10) {
1701 		mask |= IVBEP_CB0_MSR_PMON_BOX_FILTER_OPC;
1702 		mask |= IVBEP_CB0_MSR_PMON_BOX_FILTER_NC;
1703 		mask |= IVBEP_CB0_MSR_PMON_BOX_FILTER_C6;
1704 		mask |= IVBEP_CB0_MSR_PMON_BOX_FILTER_ISOC;
1705 	}
1706 
1707 	return mask;
1708 }
1709 
1710 static struct event_constraint *
1711 ivbep_cbox_get_constraint(struct intel_uncore_box *box, struct perf_event *event)
1712 {
1713 	return __snbep_cbox_get_constraint(box, event, ivbep_cbox_filter_mask);
1714 }
1715 
1716 static int ivbep_cbox_hw_config(struct intel_uncore_box *box, struct perf_event *event)
1717 {
1718 	struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
1719 	struct extra_reg *er;
1720 	int idx = 0;
1721 
1722 	for (er = ivbep_uncore_cbox_extra_regs; er->msr; er++) {
1723 		if (er->event != (event->hw.config & er->config_mask))
1724 			continue;
1725 		idx |= er->idx;
1726 	}
1727 
1728 	if (idx) {
1729 		reg1->reg = SNBEP_C0_MSR_PMON_BOX_FILTER +
1730 			SNBEP_CBO_MSR_OFFSET * box->pmu->pmu_idx;
1731 		reg1->config = event->attr.config1 & ivbep_cbox_filter_mask(idx);
1732 		reg1->idx = idx;
1733 	}
1734 	return 0;
1735 }
1736 
1737 static void ivbep_cbox_enable_event(struct intel_uncore_box *box, struct perf_event *event)
1738 {
1739 	struct hw_perf_event *hwc = &event->hw;
1740 	struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
1741 
1742 	if (reg1->idx != EXTRA_REG_NONE) {
1743 		u64 filter = uncore_shared_reg_config(box, 0);
1744 		wrmsrl(reg1->reg, filter & 0xffffffff);
1745 		wrmsrl(reg1->reg + 6, filter >> 32);
1746 	}
1747 
1748 	wrmsrl(hwc->config_base, hwc->config | SNBEP_PMON_CTL_EN);
1749 }
1750 
1751 static struct intel_uncore_ops ivbep_uncore_cbox_ops = {
1752 	.init_box		= ivbep_uncore_msr_init_box,
1753 	.disable_box		= snbep_uncore_msr_disable_box,
1754 	.enable_box		= snbep_uncore_msr_enable_box,
1755 	.disable_event		= snbep_uncore_msr_disable_event,
1756 	.enable_event		= ivbep_cbox_enable_event,
1757 	.read_counter		= uncore_msr_read_counter,
1758 	.hw_config		= ivbep_cbox_hw_config,
1759 	.get_constraint		= ivbep_cbox_get_constraint,
1760 	.put_constraint		= snbep_cbox_put_constraint,
1761 };
1762 
1763 static struct intel_uncore_type ivbep_uncore_cbox = {
1764 	.name			= "cbox",
1765 	.num_counters		= 4,
1766 	.num_boxes		= 15,
1767 	.perf_ctr_bits		= 44,
1768 	.event_ctl		= SNBEP_C0_MSR_PMON_CTL0,
1769 	.perf_ctr		= SNBEP_C0_MSR_PMON_CTR0,
1770 	.event_mask		= IVBEP_CBO_MSR_PMON_RAW_EVENT_MASK,
1771 	.box_ctl		= SNBEP_C0_MSR_PMON_BOX_CTL,
1772 	.msr_offset		= SNBEP_CBO_MSR_OFFSET,
1773 	.num_shared_regs	= 1,
1774 	.constraints		= snbep_uncore_cbox_constraints,
1775 	.ops			= &ivbep_uncore_cbox_ops,
1776 	.format_group		= &ivbep_uncore_cbox_format_group,
1777 };
1778 
1779 static struct intel_uncore_ops ivbep_uncore_pcu_ops = {
1780 	IVBEP_UNCORE_MSR_OPS_COMMON_INIT(),
1781 	.hw_config		= snbep_pcu_hw_config,
1782 	.get_constraint		= snbep_pcu_get_constraint,
1783 	.put_constraint		= snbep_pcu_put_constraint,
1784 };
1785 
1786 static struct intel_uncore_type ivbep_uncore_pcu = {
1787 	.name			= "pcu",
1788 	.num_counters		= 4,
1789 	.num_boxes		= 1,
1790 	.perf_ctr_bits		= 48,
1791 	.perf_ctr		= SNBEP_PCU_MSR_PMON_CTR0,
1792 	.event_ctl		= SNBEP_PCU_MSR_PMON_CTL0,
1793 	.event_mask		= IVBEP_PCU_MSR_PMON_RAW_EVENT_MASK,
1794 	.box_ctl		= SNBEP_PCU_MSR_PMON_BOX_CTL,
1795 	.num_shared_regs	= 1,
1796 	.ops			= &ivbep_uncore_pcu_ops,
1797 	.format_group		= &ivbep_uncore_pcu_format_group,
1798 };
1799 
1800 static struct intel_uncore_type *ivbep_msr_uncores[] = {
1801 	&ivbep_uncore_ubox,
1802 	&ivbep_uncore_cbox,
1803 	&ivbep_uncore_pcu,
1804 	NULL,
1805 };
1806 
1807 void ivbep_uncore_cpu_init(void)
1808 {
1809 	if (ivbep_uncore_cbox.num_boxes > boot_cpu_data.x86_max_cores)
1810 		ivbep_uncore_cbox.num_boxes = boot_cpu_data.x86_max_cores;
1811 	uncore_msr_uncores = ivbep_msr_uncores;
1812 }
1813 
1814 static struct intel_uncore_type ivbep_uncore_ha = {
1815 	.name		= "ha",
1816 	.num_counters   = 4,
1817 	.num_boxes	= 2,
1818 	.perf_ctr_bits	= 48,
1819 	IVBEP_UNCORE_PCI_COMMON_INIT(),
1820 };
1821 
1822 static struct intel_uncore_type ivbep_uncore_imc = {
1823 	.name		= "imc",
1824 	.num_counters   = 4,
1825 	.num_boxes	= 8,
1826 	.perf_ctr_bits	= 48,
1827 	.fixed_ctr_bits	= 48,
1828 	.fixed_ctr	= SNBEP_MC_CHy_PCI_PMON_FIXED_CTR,
1829 	.fixed_ctl	= SNBEP_MC_CHy_PCI_PMON_FIXED_CTL,
1830 	.event_descs	= snbep_uncore_imc_events,
1831 	IVBEP_UNCORE_PCI_COMMON_INIT(),
1832 };
1833 
1834 /* registers in IRP boxes are not properly aligned */
1835 static unsigned ivbep_uncore_irp_ctls[] = {0xd8, 0xdc, 0xe0, 0xe4};
1836 static unsigned ivbep_uncore_irp_ctrs[] = {0xa0, 0xb0, 0xb8, 0xc0};
1837 
1838 static void ivbep_uncore_irp_enable_event(struct intel_uncore_box *box, struct perf_event *event)
1839 {
1840 	struct pci_dev *pdev = box->pci_dev;
1841 	struct hw_perf_event *hwc = &event->hw;
1842 
1843 	pci_write_config_dword(pdev, ivbep_uncore_irp_ctls[hwc->idx],
1844 			       hwc->config | SNBEP_PMON_CTL_EN);
1845 }
1846 
1847 static void ivbep_uncore_irp_disable_event(struct intel_uncore_box *box, struct perf_event *event)
1848 {
1849 	struct pci_dev *pdev = box->pci_dev;
1850 	struct hw_perf_event *hwc = &event->hw;
1851 
1852 	pci_write_config_dword(pdev, ivbep_uncore_irp_ctls[hwc->idx], hwc->config);
1853 }
1854 
1855 static u64 ivbep_uncore_irp_read_counter(struct intel_uncore_box *box, struct perf_event *event)
1856 {
1857 	struct pci_dev *pdev = box->pci_dev;
1858 	struct hw_perf_event *hwc = &event->hw;
1859 	u64 count = 0;
1860 
1861 	pci_read_config_dword(pdev, ivbep_uncore_irp_ctrs[hwc->idx], (u32 *)&count);
1862 	pci_read_config_dword(pdev, ivbep_uncore_irp_ctrs[hwc->idx] + 4, (u32 *)&count + 1);
1863 
1864 	return count;
1865 }
1866 
1867 static struct intel_uncore_ops ivbep_uncore_irp_ops = {
1868 	.init_box	= ivbep_uncore_pci_init_box,
1869 	.disable_box	= snbep_uncore_pci_disable_box,
1870 	.enable_box	= snbep_uncore_pci_enable_box,
1871 	.disable_event	= ivbep_uncore_irp_disable_event,
1872 	.enable_event	= ivbep_uncore_irp_enable_event,
1873 	.read_counter	= ivbep_uncore_irp_read_counter,
1874 };
1875 
1876 static struct intel_uncore_type ivbep_uncore_irp = {
1877 	.name			= "irp",
1878 	.num_counters		= 4,
1879 	.num_boxes		= 1,
1880 	.perf_ctr_bits		= 48,
1881 	.event_mask		= IVBEP_PMON_RAW_EVENT_MASK,
1882 	.box_ctl		= SNBEP_PCI_PMON_BOX_CTL,
1883 	.ops			= &ivbep_uncore_irp_ops,
1884 	.format_group		= &ivbep_uncore_format_group,
1885 };
1886 
1887 static struct intel_uncore_ops ivbep_uncore_qpi_ops = {
1888 	.init_box	= ivbep_uncore_pci_init_box,
1889 	.disable_box	= snbep_uncore_pci_disable_box,
1890 	.enable_box	= snbep_uncore_pci_enable_box,
1891 	.disable_event	= snbep_uncore_pci_disable_event,
1892 	.enable_event	= snbep_qpi_enable_event,
1893 	.read_counter	= snbep_uncore_pci_read_counter,
1894 	.hw_config	= snbep_qpi_hw_config,
1895 	.get_constraint	= uncore_get_constraint,
1896 	.put_constraint	= uncore_put_constraint,
1897 };
1898 
1899 static struct intel_uncore_type ivbep_uncore_qpi = {
1900 	.name			= "qpi",
1901 	.num_counters		= 4,
1902 	.num_boxes		= 3,
1903 	.perf_ctr_bits		= 48,
1904 	.perf_ctr		= SNBEP_PCI_PMON_CTR0,
1905 	.event_ctl		= SNBEP_PCI_PMON_CTL0,
1906 	.event_mask		= IVBEP_QPI_PCI_PMON_RAW_EVENT_MASK,
1907 	.box_ctl		= SNBEP_PCI_PMON_BOX_CTL,
1908 	.num_shared_regs	= 1,
1909 	.ops			= &ivbep_uncore_qpi_ops,
1910 	.format_group		= &ivbep_uncore_qpi_format_group,
1911 };
1912 
1913 static struct intel_uncore_type ivbep_uncore_r2pcie = {
1914 	.name		= "r2pcie",
1915 	.num_counters   = 4,
1916 	.num_boxes	= 1,
1917 	.perf_ctr_bits	= 44,
1918 	.constraints	= snbep_uncore_r2pcie_constraints,
1919 	IVBEP_UNCORE_PCI_COMMON_INIT(),
1920 };
1921 
1922 static struct intel_uncore_type ivbep_uncore_r3qpi = {
1923 	.name		= "r3qpi",
1924 	.num_counters   = 3,
1925 	.num_boxes	= 2,
1926 	.perf_ctr_bits	= 44,
1927 	.constraints	= snbep_uncore_r3qpi_constraints,
1928 	IVBEP_UNCORE_PCI_COMMON_INIT(),
1929 };
1930 
1931 enum {
1932 	IVBEP_PCI_UNCORE_HA,
1933 	IVBEP_PCI_UNCORE_IMC,
1934 	IVBEP_PCI_UNCORE_IRP,
1935 	IVBEP_PCI_UNCORE_QPI,
1936 	IVBEP_PCI_UNCORE_R2PCIE,
1937 	IVBEP_PCI_UNCORE_R3QPI,
1938 };
1939 
1940 static struct intel_uncore_type *ivbep_pci_uncores[] = {
1941 	[IVBEP_PCI_UNCORE_HA]	= &ivbep_uncore_ha,
1942 	[IVBEP_PCI_UNCORE_IMC]	= &ivbep_uncore_imc,
1943 	[IVBEP_PCI_UNCORE_IRP]	= &ivbep_uncore_irp,
1944 	[IVBEP_PCI_UNCORE_QPI]	= &ivbep_uncore_qpi,
1945 	[IVBEP_PCI_UNCORE_R2PCIE]	= &ivbep_uncore_r2pcie,
1946 	[IVBEP_PCI_UNCORE_R3QPI]	= &ivbep_uncore_r3qpi,
1947 	NULL,
1948 };
1949 
1950 static const struct pci_device_id ivbep_uncore_pci_ids[] = {
1951 	{ /* Home Agent 0 */
1952 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe30),
1953 		.driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_HA, 0),
1954 	},
1955 	{ /* Home Agent 1 */
1956 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe38),
1957 		.driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_HA, 1),
1958 	},
1959 	{ /* MC0 Channel 0 */
1960 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xeb4),
1961 		.driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_IMC, 0),
1962 	},
1963 	{ /* MC0 Channel 1 */
1964 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xeb5),
1965 		.driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_IMC, 1),
1966 	},
1967 	{ /* MC0 Channel 3 */
1968 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xeb0),
1969 		.driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_IMC, 2),
1970 	},
1971 	{ /* MC0 Channel 4 */
1972 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xeb1),
1973 		.driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_IMC, 3),
1974 	},
1975 	{ /* MC1 Channel 0 */
1976 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xef4),
1977 		.driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_IMC, 4),
1978 	},
1979 	{ /* MC1 Channel 1 */
1980 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xef5),
1981 		.driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_IMC, 5),
1982 	},
1983 	{ /* MC1 Channel 3 */
1984 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xef0),
1985 		.driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_IMC, 6),
1986 	},
1987 	{ /* MC1 Channel 4 */
1988 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xef1),
1989 		.driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_IMC, 7),
1990 	},
1991 	{ /* IRP */
1992 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe39),
1993 		.driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_IRP, 0),
1994 	},
1995 	{ /* QPI0 Port 0 */
1996 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe32),
1997 		.driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_QPI, 0),
1998 	},
1999 	{ /* QPI0 Port 1 */
2000 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe33),
2001 		.driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_QPI, 1),
2002 	},
2003 	{ /* QPI1 Port 2 */
2004 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe3a),
2005 		.driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_QPI, 2),
2006 	},
2007 	{ /* R2PCIe */
2008 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe34),
2009 		.driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_R2PCIE, 0),
2010 	},
2011 	{ /* R3QPI0 Link 0 */
2012 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe36),
2013 		.driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_R3QPI, 0),
2014 	},
2015 	{ /* R3QPI0 Link 1 */
2016 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe37),
2017 		.driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_R3QPI, 1),
2018 	},
2019 	{ /* R3QPI1 Link 2 */
2020 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe3e),
2021 		.driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_R3QPI, 2),
2022 	},
2023 	{ /* QPI Port 0 filter  */
2024 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe86),
2025 		.driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV,
2026 						   SNBEP_PCI_QPI_PORT0_FILTER),
2027 	},
2028 	{ /* QPI Port 0 filter  */
2029 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe96),
2030 		.driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV,
2031 						   SNBEP_PCI_QPI_PORT1_FILTER),
2032 	},
2033 	{ /* end: all zeroes */ }
2034 };
2035 
2036 static struct pci_driver ivbep_uncore_pci_driver = {
2037 	.name		= "ivbep_uncore",
2038 	.id_table	= ivbep_uncore_pci_ids,
2039 };
2040 
2041 int ivbep_uncore_pci_init(void)
2042 {
2043 	int ret = snbep_pci2phy_map_init(0x0e1e, SNBEP_CPUNODEID, SNBEP_GIDNIDMAP, true);
2044 	if (ret)
2045 		return ret;
2046 	uncore_pci_uncores = ivbep_pci_uncores;
2047 	uncore_pci_driver = &ivbep_uncore_pci_driver;
2048 	return 0;
2049 }
2050 /* end of IvyTown uncore support */
2051 
2052 /* KNL uncore support */
2053 static struct attribute *knl_uncore_ubox_formats_attr[] = {
2054 	&format_attr_event.attr,
2055 	&format_attr_umask.attr,
2056 	&format_attr_edge.attr,
2057 	&format_attr_tid_en.attr,
2058 	&format_attr_inv.attr,
2059 	&format_attr_thresh5.attr,
2060 	NULL,
2061 };
2062 
2063 static const struct attribute_group knl_uncore_ubox_format_group = {
2064 	.name = "format",
2065 	.attrs = knl_uncore_ubox_formats_attr,
2066 };
2067 
2068 static struct intel_uncore_type knl_uncore_ubox = {
2069 	.name			= "ubox",
2070 	.num_counters		= 2,
2071 	.num_boxes		= 1,
2072 	.perf_ctr_bits		= 48,
2073 	.fixed_ctr_bits		= 48,
2074 	.perf_ctr		= HSWEP_U_MSR_PMON_CTR0,
2075 	.event_ctl		= HSWEP_U_MSR_PMON_CTL0,
2076 	.event_mask		= KNL_U_MSR_PMON_RAW_EVENT_MASK,
2077 	.fixed_ctr		= HSWEP_U_MSR_PMON_UCLK_FIXED_CTR,
2078 	.fixed_ctl		= HSWEP_U_MSR_PMON_UCLK_FIXED_CTL,
2079 	.ops			= &snbep_uncore_msr_ops,
2080 	.format_group		= &knl_uncore_ubox_format_group,
2081 };
2082 
2083 static struct attribute *knl_uncore_cha_formats_attr[] = {
2084 	&format_attr_event.attr,
2085 	&format_attr_umask.attr,
2086 	&format_attr_qor.attr,
2087 	&format_attr_edge.attr,
2088 	&format_attr_tid_en.attr,
2089 	&format_attr_inv.attr,
2090 	&format_attr_thresh8.attr,
2091 	&format_attr_filter_tid4.attr,
2092 	&format_attr_filter_link3.attr,
2093 	&format_attr_filter_state4.attr,
2094 	&format_attr_filter_local.attr,
2095 	&format_attr_filter_all_op.attr,
2096 	&format_attr_filter_nnm.attr,
2097 	&format_attr_filter_opc3.attr,
2098 	&format_attr_filter_nc.attr,
2099 	&format_attr_filter_isoc.attr,
2100 	NULL,
2101 };
2102 
2103 static const struct attribute_group knl_uncore_cha_format_group = {
2104 	.name = "format",
2105 	.attrs = knl_uncore_cha_formats_attr,
2106 };
2107 
2108 static struct event_constraint knl_uncore_cha_constraints[] = {
2109 	UNCORE_EVENT_CONSTRAINT(0x11, 0x1),
2110 	UNCORE_EVENT_CONSTRAINT(0x1f, 0x1),
2111 	UNCORE_EVENT_CONSTRAINT(0x36, 0x1),
2112 	EVENT_CONSTRAINT_END
2113 };
2114 
2115 static struct extra_reg knl_uncore_cha_extra_regs[] = {
2116 	SNBEP_CBO_EVENT_EXTRA_REG(SNBEP_CBO_PMON_CTL_TID_EN,
2117 				  SNBEP_CBO_PMON_CTL_TID_EN, 0x1),
2118 	SNBEP_CBO_EVENT_EXTRA_REG(0x3d, 0xff, 0x2),
2119 	SNBEP_CBO_EVENT_EXTRA_REG(0x35, 0xff, 0x4),
2120 	SNBEP_CBO_EVENT_EXTRA_REG(0x36, 0xff, 0x4),
2121 	EVENT_EXTRA_END
2122 };
2123 
2124 static u64 knl_cha_filter_mask(int fields)
2125 {
2126 	u64 mask = 0;
2127 
2128 	if (fields & 0x1)
2129 		mask |= KNL_CHA_MSR_PMON_BOX_FILTER_TID;
2130 	if (fields & 0x2)
2131 		mask |= KNL_CHA_MSR_PMON_BOX_FILTER_STATE;
2132 	if (fields & 0x4)
2133 		mask |= KNL_CHA_MSR_PMON_BOX_FILTER_OP;
2134 	return mask;
2135 }
2136 
2137 static struct event_constraint *
2138 knl_cha_get_constraint(struct intel_uncore_box *box, struct perf_event *event)
2139 {
2140 	return __snbep_cbox_get_constraint(box, event, knl_cha_filter_mask);
2141 }
2142 
2143 static int knl_cha_hw_config(struct intel_uncore_box *box,
2144 			     struct perf_event *event)
2145 {
2146 	struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
2147 	struct extra_reg *er;
2148 	int idx = 0;
2149 
2150 	for (er = knl_uncore_cha_extra_regs; er->msr; er++) {
2151 		if (er->event != (event->hw.config & er->config_mask))
2152 			continue;
2153 		idx |= er->idx;
2154 	}
2155 
2156 	if (idx) {
2157 		reg1->reg = HSWEP_C0_MSR_PMON_BOX_FILTER0 +
2158 			    KNL_CHA_MSR_OFFSET * box->pmu->pmu_idx;
2159 		reg1->config = event->attr.config1 & knl_cha_filter_mask(idx);
2160 
2161 		reg1->config |= KNL_CHA_MSR_PMON_BOX_FILTER_REMOTE_NODE;
2162 		reg1->config |= KNL_CHA_MSR_PMON_BOX_FILTER_LOCAL_NODE;
2163 		reg1->config |= KNL_CHA_MSR_PMON_BOX_FILTER_NNC;
2164 		reg1->idx = idx;
2165 	}
2166 	return 0;
2167 }
2168 
2169 static void hswep_cbox_enable_event(struct intel_uncore_box *box,
2170 				    struct perf_event *event);
2171 
2172 static struct intel_uncore_ops knl_uncore_cha_ops = {
2173 	.init_box		= snbep_uncore_msr_init_box,
2174 	.disable_box		= snbep_uncore_msr_disable_box,
2175 	.enable_box		= snbep_uncore_msr_enable_box,
2176 	.disable_event		= snbep_uncore_msr_disable_event,
2177 	.enable_event		= hswep_cbox_enable_event,
2178 	.read_counter		= uncore_msr_read_counter,
2179 	.hw_config		= knl_cha_hw_config,
2180 	.get_constraint		= knl_cha_get_constraint,
2181 	.put_constraint		= snbep_cbox_put_constraint,
2182 };
2183 
2184 static struct intel_uncore_type knl_uncore_cha = {
2185 	.name			= "cha",
2186 	.num_counters		= 4,
2187 	.num_boxes		= 38,
2188 	.perf_ctr_bits		= 48,
2189 	.event_ctl		= HSWEP_C0_MSR_PMON_CTL0,
2190 	.perf_ctr		= HSWEP_C0_MSR_PMON_CTR0,
2191 	.event_mask		= KNL_CHA_MSR_PMON_RAW_EVENT_MASK,
2192 	.box_ctl		= HSWEP_C0_MSR_PMON_BOX_CTL,
2193 	.msr_offset		= KNL_CHA_MSR_OFFSET,
2194 	.num_shared_regs	= 1,
2195 	.constraints		= knl_uncore_cha_constraints,
2196 	.ops			= &knl_uncore_cha_ops,
2197 	.format_group		= &knl_uncore_cha_format_group,
2198 };
2199 
2200 static struct attribute *knl_uncore_pcu_formats_attr[] = {
2201 	&format_attr_event2.attr,
2202 	&format_attr_use_occ_ctr.attr,
2203 	&format_attr_occ_sel.attr,
2204 	&format_attr_edge.attr,
2205 	&format_attr_tid_en.attr,
2206 	&format_attr_inv.attr,
2207 	&format_attr_thresh6.attr,
2208 	&format_attr_occ_invert.attr,
2209 	&format_attr_occ_edge_det.attr,
2210 	NULL,
2211 };
2212 
2213 static const struct attribute_group knl_uncore_pcu_format_group = {
2214 	.name = "format",
2215 	.attrs = knl_uncore_pcu_formats_attr,
2216 };
2217 
2218 static struct intel_uncore_type knl_uncore_pcu = {
2219 	.name			= "pcu",
2220 	.num_counters		= 4,
2221 	.num_boxes		= 1,
2222 	.perf_ctr_bits		= 48,
2223 	.perf_ctr		= HSWEP_PCU_MSR_PMON_CTR0,
2224 	.event_ctl		= HSWEP_PCU_MSR_PMON_CTL0,
2225 	.event_mask		= KNL_PCU_MSR_PMON_RAW_EVENT_MASK,
2226 	.box_ctl		= HSWEP_PCU_MSR_PMON_BOX_CTL,
2227 	.ops			= &snbep_uncore_msr_ops,
2228 	.format_group		= &knl_uncore_pcu_format_group,
2229 };
2230 
2231 static struct intel_uncore_type *knl_msr_uncores[] = {
2232 	&knl_uncore_ubox,
2233 	&knl_uncore_cha,
2234 	&knl_uncore_pcu,
2235 	NULL,
2236 };
2237 
2238 void knl_uncore_cpu_init(void)
2239 {
2240 	uncore_msr_uncores = knl_msr_uncores;
2241 }
2242 
2243 static void knl_uncore_imc_enable_box(struct intel_uncore_box *box)
2244 {
2245 	struct pci_dev *pdev = box->pci_dev;
2246 	int box_ctl = uncore_pci_box_ctl(box);
2247 
2248 	pci_write_config_dword(pdev, box_ctl, 0);
2249 }
2250 
2251 static void knl_uncore_imc_enable_event(struct intel_uncore_box *box,
2252 					struct perf_event *event)
2253 {
2254 	struct pci_dev *pdev = box->pci_dev;
2255 	struct hw_perf_event *hwc = &event->hw;
2256 
2257 	if ((event->attr.config & SNBEP_PMON_CTL_EV_SEL_MASK)
2258 							== UNCORE_FIXED_EVENT)
2259 		pci_write_config_dword(pdev, hwc->config_base,
2260 				       hwc->config | KNL_PMON_FIXED_CTL_EN);
2261 	else
2262 		pci_write_config_dword(pdev, hwc->config_base,
2263 				       hwc->config | SNBEP_PMON_CTL_EN);
2264 }
2265 
2266 static struct intel_uncore_ops knl_uncore_imc_ops = {
2267 	.init_box	= snbep_uncore_pci_init_box,
2268 	.disable_box	= snbep_uncore_pci_disable_box,
2269 	.enable_box	= knl_uncore_imc_enable_box,
2270 	.read_counter	= snbep_uncore_pci_read_counter,
2271 	.enable_event	= knl_uncore_imc_enable_event,
2272 	.disable_event	= snbep_uncore_pci_disable_event,
2273 };
2274 
2275 static struct intel_uncore_type knl_uncore_imc_uclk = {
2276 	.name			= "imc_uclk",
2277 	.num_counters		= 4,
2278 	.num_boxes		= 2,
2279 	.perf_ctr_bits		= 48,
2280 	.fixed_ctr_bits		= 48,
2281 	.perf_ctr		= KNL_UCLK_MSR_PMON_CTR0_LOW,
2282 	.event_ctl		= KNL_UCLK_MSR_PMON_CTL0,
2283 	.event_mask		= SNBEP_PMON_RAW_EVENT_MASK,
2284 	.fixed_ctr		= KNL_UCLK_MSR_PMON_UCLK_FIXED_LOW,
2285 	.fixed_ctl		= KNL_UCLK_MSR_PMON_UCLK_FIXED_CTL,
2286 	.box_ctl		= KNL_UCLK_MSR_PMON_BOX_CTL,
2287 	.ops			= &knl_uncore_imc_ops,
2288 	.format_group		= &snbep_uncore_format_group,
2289 };
2290 
2291 static struct intel_uncore_type knl_uncore_imc_dclk = {
2292 	.name			= "imc",
2293 	.num_counters		= 4,
2294 	.num_boxes		= 6,
2295 	.perf_ctr_bits		= 48,
2296 	.fixed_ctr_bits		= 48,
2297 	.perf_ctr		= KNL_MC0_CH0_MSR_PMON_CTR0_LOW,
2298 	.event_ctl		= KNL_MC0_CH0_MSR_PMON_CTL0,
2299 	.event_mask		= SNBEP_PMON_RAW_EVENT_MASK,
2300 	.fixed_ctr		= KNL_MC0_CH0_MSR_PMON_FIXED_LOW,
2301 	.fixed_ctl		= KNL_MC0_CH0_MSR_PMON_FIXED_CTL,
2302 	.box_ctl		= KNL_MC0_CH0_MSR_PMON_BOX_CTL,
2303 	.ops			= &knl_uncore_imc_ops,
2304 	.format_group		= &snbep_uncore_format_group,
2305 };
2306 
2307 static struct intel_uncore_type knl_uncore_edc_uclk = {
2308 	.name			= "edc_uclk",
2309 	.num_counters		= 4,
2310 	.num_boxes		= 8,
2311 	.perf_ctr_bits		= 48,
2312 	.fixed_ctr_bits		= 48,
2313 	.perf_ctr		= KNL_UCLK_MSR_PMON_CTR0_LOW,
2314 	.event_ctl		= KNL_UCLK_MSR_PMON_CTL0,
2315 	.event_mask		= SNBEP_PMON_RAW_EVENT_MASK,
2316 	.fixed_ctr		= KNL_UCLK_MSR_PMON_UCLK_FIXED_LOW,
2317 	.fixed_ctl		= KNL_UCLK_MSR_PMON_UCLK_FIXED_CTL,
2318 	.box_ctl		= KNL_UCLK_MSR_PMON_BOX_CTL,
2319 	.ops			= &knl_uncore_imc_ops,
2320 	.format_group		= &snbep_uncore_format_group,
2321 };
2322 
2323 static struct intel_uncore_type knl_uncore_edc_eclk = {
2324 	.name			= "edc_eclk",
2325 	.num_counters		= 4,
2326 	.num_boxes		= 8,
2327 	.perf_ctr_bits		= 48,
2328 	.fixed_ctr_bits		= 48,
2329 	.perf_ctr		= KNL_EDC0_ECLK_MSR_PMON_CTR0_LOW,
2330 	.event_ctl		= KNL_EDC0_ECLK_MSR_PMON_CTL0,
2331 	.event_mask		= SNBEP_PMON_RAW_EVENT_MASK,
2332 	.fixed_ctr		= KNL_EDC0_ECLK_MSR_PMON_ECLK_FIXED_LOW,
2333 	.fixed_ctl		= KNL_EDC0_ECLK_MSR_PMON_ECLK_FIXED_CTL,
2334 	.box_ctl		= KNL_EDC0_ECLK_MSR_PMON_BOX_CTL,
2335 	.ops			= &knl_uncore_imc_ops,
2336 	.format_group		= &snbep_uncore_format_group,
2337 };
2338 
2339 static struct event_constraint knl_uncore_m2pcie_constraints[] = {
2340 	UNCORE_EVENT_CONSTRAINT(0x23, 0x3),
2341 	EVENT_CONSTRAINT_END
2342 };
2343 
2344 static struct intel_uncore_type knl_uncore_m2pcie = {
2345 	.name		= "m2pcie",
2346 	.num_counters   = 4,
2347 	.num_boxes	= 1,
2348 	.perf_ctr_bits	= 48,
2349 	.constraints	= knl_uncore_m2pcie_constraints,
2350 	SNBEP_UNCORE_PCI_COMMON_INIT(),
2351 };
2352 
2353 static struct attribute *knl_uncore_irp_formats_attr[] = {
2354 	&format_attr_event.attr,
2355 	&format_attr_umask.attr,
2356 	&format_attr_qor.attr,
2357 	&format_attr_edge.attr,
2358 	&format_attr_inv.attr,
2359 	&format_attr_thresh8.attr,
2360 	NULL,
2361 };
2362 
2363 static const struct attribute_group knl_uncore_irp_format_group = {
2364 	.name = "format",
2365 	.attrs = knl_uncore_irp_formats_attr,
2366 };
2367 
2368 static struct intel_uncore_type knl_uncore_irp = {
2369 	.name			= "irp",
2370 	.num_counters		= 2,
2371 	.num_boxes		= 1,
2372 	.perf_ctr_bits		= 48,
2373 	.perf_ctr		= SNBEP_PCI_PMON_CTR0,
2374 	.event_ctl		= SNBEP_PCI_PMON_CTL0,
2375 	.event_mask		= KNL_IRP_PCI_PMON_RAW_EVENT_MASK,
2376 	.box_ctl		= KNL_IRP_PCI_PMON_BOX_CTL,
2377 	.ops			= &snbep_uncore_pci_ops,
2378 	.format_group		= &knl_uncore_irp_format_group,
2379 };
2380 
2381 enum {
2382 	KNL_PCI_UNCORE_MC_UCLK,
2383 	KNL_PCI_UNCORE_MC_DCLK,
2384 	KNL_PCI_UNCORE_EDC_UCLK,
2385 	KNL_PCI_UNCORE_EDC_ECLK,
2386 	KNL_PCI_UNCORE_M2PCIE,
2387 	KNL_PCI_UNCORE_IRP,
2388 };
2389 
2390 static struct intel_uncore_type *knl_pci_uncores[] = {
2391 	[KNL_PCI_UNCORE_MC_UCLK]	= &knl_uncore_imc_uclk,
2392 	[KNL_PCI_UNCORE_MC_DCLK]	= &knl_uncore_imc_dclk,
2393 	[KNL_PCI_UNCORE_EDC_UCLK]	= &knl_uncore_edc_uclk,
2394 	[KNL_PCI_UNCORE_EDC_ECLK]	= &knl_uncore_edc_eclk,
2395 	[KNL_PCI_UNCORE_M2PCIE]		= &knl_uncore_m2pcie,
2396 	[KNL_PCI_UNCORE_IRP]		= &knl_uncore_irp,
2397 	NULL,
2398 };
2399 
2400 /*
2401  * KNL uses a common PCI device ID for multiple instances of an Uncore PMU
2402  * device type. prior to KNL, each instance of a PMU device type had a unique
2403  * device ID.
2404  *
2405  *	PCI Device ID	Uncore PMU Devices
2406  *	----------------------------------
2407  *	0x7841		MC0 UClk, MC1 UClk
2408  *	0x7843		MC0 DClk CH 0, MC0 DClk CH 1, MC0 DClk CH 2,
2409  *			MC1 DClk CH 0, MC1 DClk CH 1, MC1 DClk CH 2
2410  *	0x7833		EDC0 UClk, EDC1 UClk, EDC2 UClk, EDC3 UClk,
2411  *			EDC4 UClk, EDC5 UClk, EDC6 UClk, EDC7 UClk
2412  *	0x7835		EDC0 EClk, EDC1 EClk, EDC2 EClk, EDC3 EClk,
2413  *			EDC4 EClk, EDC5 EClk, EDC6 EClk, EDC7 EClk
2414  *	0x7817		M2PCIe
2415  *	0x7814		IRP
2416 */
2417 
2418 static const struct pci_device_id knl_uncore_pci_ids[] = {
2419 	{ /* MC0 UClk */
2420 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7841),
2421 		.driver_data = UNCORE_PCI_DEV_FULL_DATA(10, 0, KNL_PCI_UNCORE_MC_UCLK, 0),
2422 	},
2423 	{ /* MC1 UClk */
2424 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7841),
2425 		.driver_data = UNCORE_PCI_DEV_FULL_DATA(11, 0, KNL_PCI_UNCORE_MC_UCLK, 1),
2426 	},
2427 	{ /* MC0 DClk CH 0 */
2428 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7843),
2429 		.driver_data = UNCORE_PCI_DEV_FULL_DATA(8, 2, KNL_PCI_UNCORE_MC_DCLK, 0),
2430 	},
2431 	{ /* MC0 DClk CH 1 */
2432 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7843),
2433 		.driver_data = UNCORE_PCI_DEV_FULL_DATA(8, 3, KNL_PCI_UNCORE_MC_DCLK, 1),
2434 	},
2435 	{ /* MC0 DClk CH 2 */
2436 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7843),
2437 		.driver_data = UNCORE_PCI_DEV_FULL_DATA(8, 4, KNL_PCI_UNCORE_MC_DCLK, 2),
2438 	},
2439 	{ /* MC1 DClk CH 0 */
2440 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7843),
2441 		.driver_data = UNCORE_PCI_DEV_FULL_DATA(9, 2, KNL_PCI_UNCORE_MC_DCLK, 3),
2442 	},
2443 	{ /* MC1 DClk CH 1 */
2444 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7843),
2445 		.driver_data = UNCORE_PCI_DEV_FULL_DATA(9, 3, KNL_PCI_UNCORE_MC_DCLK, 4),
2446 	},
2447 	{ /* MC1 DClk CH 2 */
2448 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7843),
2449 		.driver_data = UNCORE_PCI_DEV_FULL_DATA(9, 4, KNL_PCI_UNCORE_MC_DCLK, 5),
2450 	},
2451 	{ /* EDC0 UClk */
2452 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7833),
2453 		.driver_data = UNCORE_PCI_DEV_FULL_DATA(15, 0, KNL_PCI_UNCORE_EDC_UCLK, 0),
2454 	},
2455 	{ /* EDC1 UClk */
2456 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7833),
2457 		.driver_data = UNCORE_PCI_DEV_FULL_DATA(16, 0, KNL_PCI_UNCORE_EDC_UCLK, 1),
2458 	},
2459 	{ /* EDC2 UClk */
2460 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7833),
2461 		.driver_data = UNCORE_PCI_DEV_FULL_DATA(17, 0, KNL_PCI_UNCORE_EDC_UCLK, 2),
2462 	},
2463 	{ /* EDC3 UClk */
2464 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7833),
2465 		.driver_data = UNCORE_PCI_DEV_FULL_DATA(18, 0, KNL_PCI_UNCORE_EDC_UCLK, 3),
2466 	},
2467 	{ /* EDC4 UClk */
2468 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7833),
2469 		.driver_data = UNCORE_PCI_DEV_FULL_DATA(19, 0, KNL_PCI_UNCORE_EDC_UCLK, 4),
2470 	},
2471 	{ /* EDC5 UClk */
2472 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7833),
2473 		.driver_data = UNCORE_PCI_DEV_FULL_DATA(20, 0, KNL_PCI_UNCORE_EDC_UCLK, 5),
2474 	},
2475 	{ /* EDC6 UClk */
2476 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7833),
2477 		.driver_data = UNCORE_PCI_DEV_FULL_DATA(21, 0, KNL_PCI_UNCORE_EDC_UCLK, 6),
2478 	},
2479 	{ /* EDC7 UClk */
2480 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7833),
2481 		.driver_data = UNCORE_PCI_DEV_FULL_DATA(22, 0, KNL_PCI_UNCORE_EDC_UCLK, 7),
2482 	},
2483 	{ /* EDC0 EClk */
2484 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7835),
2485 		.driver_data = UNCORE_PCI_DEV_FULL_DATA(24, 2, KNL_PCI_UNCORE_EDC_ECLK, 0),
2486 	},
2487 	{ /* EDC1 EClk */
2488 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7835),
2489 		.driver_data = UNCORE_PCI_DEV_FULL_DATA(25, 2, KNL_PCI_UNCORE_EDC_ECLK, 1),
2490 	},
2491 	{ /* EDC2 EClk */
2492 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7835),
2493 		.driver_data = UNCORE_PCI_DEV_FULL_DATA(26, 2, KNL_PCI_UNCORE_EDC_ECLK, 2),
2494 	},
2495 	{ /* EDC3 EClk */
2496 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7835),
2497 		.driver_data = UNCORE_PCI_DEV_FULL_DATA(27, 2, KNL_PCI_UNCORE_EDC_ECLK, 3),
2498 	},
2499 	{ /* EDC4 EClk */
2500 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7835),
2501 		.driver_data = UNCORE_PCI_DEV_FULL_DATA(28, 2, KNL_PCI_UNCORE_EDC_ECLK, 4),
2502 	},
2503 	{ /* EDC5 EClk */
2504 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7835),
2505 		.driver_data = UNCORE_PCI_DEV_FULL_DATA(29, 2, KNL_PCI_UNCORE_EDC_ECLK, 5),
2506 	},
2507 	{ /* EDC6 EClk */
2508 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7835),
2509 		.driver_data = UNCORE_PCI_DEV_FULL_DATA(30, 2, KNL_PCI_UNCORE_EDC_ECLK, 6),
2510 	},
2511 	{ /* EDC7 EClk */
2512 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7835),
2513 		.driver_data = UNCORE_PCI_DEV_FULL_DATA(31, 2, KNL_PCI_UNCORE_EDC_ECLK, 7),
2514 	},
2515 	{ /* M2PCIe */
2516 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7817),
2517 		.driver_data = UNCORE_PCI_DEV_DATA(KNL_PCI_UNCORE_M2PCIE, 0),
2518 	},
2519 	{ /* IRP */
2520 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7814),
2521 		.driver_data = UNCORE_PCI_DEV_DATA(KNL_PCI_UNCORE_IRP, 0),
2522 	},
2523 	{ /* end: all zeroes */ }
2524 };
2525 
2526 static struct pci_driver knl_uncore_pci_driver = {
2527 	.name		= "knl_uncore",
2528 	.id_table	= knl_uncore_pci_ids,
2529 };
2530 
2531 int knl_uncore_pci_init(void)
2532 {
2533 	int ret;
2534 
2535 	/* All KNL PCI based PMON units are on the same PCI bus except IRP */
2536 	ret = snb_pci2phy_map_init(0x7814); /* IRP */
2537 	if (ret)
2538 		return ret;
2539 	ret = snb_pci2phy_map_init(0x7817); /* M2PCIe */
2540 	if (ret)
2541 		return ret;
2542 	uncore_pci_uncores = knl_pci_uncores;
2543 	uncore_pci_driver = &knl_uncore_pci_driver;
2544 	return 0;
2545 }
2546 
2547 /* end of KNL uncore support */
2548 
2549 /* Haswell-EP uncore support */
2550 static struct attribute *hswep_uncore_ubox_formats_attr[] = {
2551 	&format_attr_event.attr,
2552 	&format_attr_umask.attr,
2553 	&format_attr_edge.attr,
2554 	&format_attr_inv.attr,
2555 	&format_attr_thresh5.attr,
2556 	&format_attr_filter_tid2.attr,
2557 	&format_attr_filter_cid.attr,
2558 	NULL,
2559 };
2560 
2561 static const struct attribute_group hswep_uncore_ubox_format_group = {
2562 	.name = "format",
2563 	.attrs = hswep_uncore_ubox_formats_attr,
2564 };
2565 
2566 static int hswep_ubox_hw_config(struct intel_uncore_box *box, struct perf_event *event)
2567 {
2568 	struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
2569 	reg1->reg = HSWEP_U_MSR_PMON_FILTER;
2570 	reg1->config = event->attr.config1 & HSWEP_U_MSR_PMON_BOX_FILTER_MASK;
2571 	reg1->idx = 0;
2572 	return 0;
2573 }
2574 
2575 static struct intel_uncore_ops hswep_uncore_ubox_ops = {
2576 	SNBEP_UNCORE_MSR_OPS_COMMON_INIT(),
2577 	.hw_config		= hswep_ubox_hw_config,
2578 	.get_constraint		= uncore_get_constraint,
2579 	.put_constraint		= uncore_put_constraint,
2580 };
2581 
2582 static struct intel_uncore_type hswep_uncore_ubox = {
2583 	.name			= "ubox",
2584 	.num_counters		= 2,
2585 	.num_boxes		= 1,
2586 	.perf_ctr_bits		= 44,
2587 	.fixed_ctr_bits		= 48,
2588 	.perf_ctr		= HSWEP_U_MSR_PMON_CTR0,
2589 	.event_ctl		= HSWEP_U_MSR_PMON_CTL0,
2590 	.event_mask		= SNBEP_U_MSR_PMON_RAW_EVENT_MASK,
2591 	.fixed_ctr		= HSWEP_U_MSR_PMON_UCLK_FIXED_CTR,
2592 	.fixed_ctl		= HSWEP_U_MSR_PMON_UCLK_FIXED_CTL,
2593 	.num_shared_regs	= 1,
2594 	.ops			= &hswep_uncore_ubox_ops,
2595 	.format_group		= &hswep_uncore_ubox_format_group,
2596 };
2597 
2598 static struct attribute *hswep_uncore_cbox_formats_attr[] = {
2599 	&format_attr_event.attr,
2600 	&format_attr_umask.attr,
2601 	&format_attr_edge.attr,
2602 	&format_attr_tid_en.attr,
2603 	&format_attr_thresh8.attr,
2604 	&format_attr_filter_tid3.attr,
2605 	&format_attr_filter_link2.attr,
2606 	&format_attr_filter_state3.attr,
2607 	&format_attr_filter_nid2.attr,
2608 	&format_attr_filter_opc2.attr,
2609 	&format_attr_filter_nc.attr,
2610 	&format_attr_filter_c6.attr,
2611 	&format_attr_filter_isoc.attr,
2612 	NULL,
2613 };
2614 
2615 static const struct attribute_group hswep_uncore_cbox_format_group = {
2616 	.name = "format",
2617 	.attrs = hswep_uncore_cbox_formats_attr,
2618 };
2619 
2620 static struct event_constraint hswep_uncore_cbox_constraints[] = {
2621 	UNCORE_EVENT_CONSTRAINT(0x01, 0x1),
2622 	UNCORE_EVENT_CONSTRAINT(0x09, 0x1),
2623 	UNCORE_EVENT_CONSTRAINT(0x11, 0x1),
2624 	UNCORE_EVENT_CONSTRAINT(0x36, 0x1),
2625 	UNCORE_EVENT_CONSTRAINT(0x38, 0x3),
2626 	UNCORE_EVENT_CONSTRAINT(0x3b, 0x1),
2627 	UNCORE_EVENT_CONSTRAINT(0x3e, 0x1),
2628 	EVENT_CONSTRAINT_END
2629 };
2630 
2631 static struct extra_reg hswep_uncore_cbox_extra_regs[] = {
2632 	SNBEP_CBO_EVENT_EXTRA_REG(SNBEP_CBO_PMON_CTL_TID_EN,
2633 				  SNBEP_CBO_PMON_CTL_TID_EN, 0x1),
2634 	SNBEP_CBO_EVENT_EXTRA_REG(0x0334, 0xffff, 0x4),
2635 	SNBEP_CBO_EVENT_EXTRA_REG(0x0534, 0xffff, 0x4),
2636 	SNBEP_CBO_EVENT_EXTRA_REG(0x0934, 0xffff, 0x4),
2637 	SNBEP_CBO_EVENT_EXTRA_REG(0x1134, 0xffff, 0x4),
2638 	SNBEP_CBO_EVENT_EXTRA_REG(0x2134, 0xffff, 0x4),
2639 	SNBEP_CBO_EVENT_EXTRA_REG(0x4134, 0xffff, 0x4),
2640 	SNBEP_CBO_EVENT_EXTRA_REG(0x4037, 0x40ff, 0x8),
2641 	SNBEP_CBO_EVENT_EXTRA_REG(0x4028, 0x40ff, 0x8),
2642 	SNBEP_CBO_EVENT_EXTRA_REG(0x4032, 0x40ff, 0x8),
2643 	SNBEP_CBO_EVENT_EXTRA_REG(0x4029, 0x40ff, 0x8),
2644 	SNBEP_CBO_EVENT_EXTRA_REG(0x4033, 0x40ff, 0x8),
2645 	SNBEP_CBO_EVENT_EXTRA_REG(0x402A, 0x40ff, 0x8),
2646 	SNBEP_CBO_EVENT_EXTRA_REG(0x0135, 0xffff, 0x12),
2647 	SNBEP_CBO_EVENT_EXTRA_REG(0x0335, 0xffff, 0x10),
2648 	SNBEP_CBO_EVENT_EXTRA_REG(0x4135, 0xffff, 0x18),
2649 	SNBEP_CBO_EVENT_EXTRA_REG(0x4435, 0xffff, 0x8),
2650 	SNBEP_CBO_EVENT_EXTRA_REG(0x4835, 0xffff, 0x8),
2651 	SNBEP_CBO_EVENT_EXTRA_REG(0x5035, 0xffff, 0x8),
2652 	SNBEP_CBO_EVENT_EXTRA_REG(0x4335, 0xffff, 0x18),
2653 	SNBEP_CBO_EVENT_EXTRA_REG(0x4a35, 0xffff, 0x8),
2654 	SNBEP_CBO_EVENT_EXTRA_REG(0x2335, 0xffff, 0x10),
2655 	SNBEP_CBO_EVENT_EXTRA_REG(0x8335, 0xffff, 0x10),
2656 	SNBEP_CBO_EVENT_EXTRA_REG(0x2135, 0xffff, 0x10),
2657 	SNBEP_CBO_EVENT_EXTRA_REG(0x8135, 0xffff, 0x10),
2658 	SNBEP_CBO_EVENT_EXTRA_REG(0x0136, 0xffff, 0x10),
2659 	SNBEP_CBO_EVENT_EXTRA_REG(0x0336, 0xffff, 0x10),
2660 	SNBEP_CBO_EVENT_EXTRA_REG(0x4136, 0xffff, 0x18),
2661 	SNBEP_CBO_EVENT_EXTRA_REG(0x4436, 0xffff, 0x8),
2662 	SNBEP_CBO_EVENT_EXTRA_REG(0x4836, 0xffff, 0x8),
2663 	SNBEP_CBO_EVENT_EXTRA_REG(0x4336, 0xffff, 0x18),
2664 	SNBEP_CBO_EVENT_EXTRA_REG(0x4a36, 0xffff, 0x8),
2665 	SNBEP_CBO_EVENT_EXTRA_REG(0x2336, 0xffff, 0x10),
2666 	SNBEP_CBO_EVENT_EXTRA_REG(0x8336, 0xffff, 0x10),
2667 	SNBEP_CBO_EVENT_EXTRA_REG(0x2136, 0xffff, 0x10),
2668 	SNBEP_CBO_EVENT_EXTRA_REG(0x8136, 0xffff, 0x10),
2669 	SNBEP_CBO_EVENT_EXTRA_REG(0x5036, 0xffff, 0x8),
2670 	EVENT_EXTRA_END
2671 };
2672 
2673 static u64 hswep_cbox_filter_mask(int fields)
2674 {
2675 	u64 mask = 0;
2676 	if (fields & 0x1)
2677 		mask |= HSWEP_CB0_MSR_PMON_BOX_FILTER_TID;
2678 	if (fields & 0x2)
2679 		mask |= HSWEP_CB0_MSR_PMON_BOX_FILTER_LINK;
2680 	if (fields & 0x4)
2681 		mask |= HSWEP_CB0_MSR_PMON_BOX_FILTER_STATE;
2682 	if (fields & 0x8)
2683 		mask |= HSWEP_CB0_MSR_PMON_BOX_FILTER_NID;
2684 	if (fields & 0x10) {
2685 		mask |= HSWEP_CB0_MSR_PMON_BOX_FILTER_OPC;
2686 		mask |= HSWEP_CB0_MSR_PMON_BOX_FILTER_NC;
2687 		mask |= HSWEP_CB0_MSR_PMON_BOX_FILTER_C6;
2688 		mask |= HSWEP_CB0_MSR_PMON_BOX_FILTER_ISOC;
2689 	}
2690 	return mask;
2691 }
2692 
2693 static struct event_constraint *
2694 hswep_cbox_get_constraint(struct intel_uncore_box *box, struct perf_event *event)
2695 {
2696 	return __snbep_cbox_get_constraint(box, event, hswep_cbox_filter_mask);
2697 }
2698 
2699 static int hswep_cbox_hw_config(struct intel_uncore_box *box, struct perf_event *event)
2700 {
2701 	struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
2702 	struct extra_reg *er;
2703 	int idx = 0;
2704 
2705 	for (er = hswep_uncore_cbox_extra_regs; er->msr; er++) {
2706 		if (er->event != (event->hw.config & er->config_mask))
2707 			continue;
2708 		idx |= er->idx;
2709 	}
2710 
2711 	if (idx) {
2712 		reg1->reg = HSWEP_C0_MSR_PMON_BOX_FILTER0 +
2713 			    HSWEP_CBO_MSR_OFFSET * box->pmu->pmu_idx;
2714 		reg1->config = event->attr.config1 & hswep_cbox_filter_mask(idx);
2715 		reg1->idx = idx;
2716 	}
2717 	return 0;
2718 }
2719 
2720 static void hswep_cbox_enable_event(struct intel_uncore_box *box,
2721 				  struct perf_event *event)
2722 {
2723 	struct hw_perf_event *hwc = &event->hw;
2724 	struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
2725 
2726 	if (reg1->idx != EXTRA_REG_NONE) {
2727 		u64 filter = uncore_shared_reg_config(box, 0);
2728 		wrmsrl(reg1->reg, filter & 0xffffffff);
2729 		wrmsrl(reg1->reg + 1, filter >> 32);
2730 	}
2731 
2732 	wrmsrl(hwc->config_base, hwc->config | SNBEP_PMON_CTL_EN);
2733 }
2734 
2735 static struct intel_uncore_ops hswep_uncore_cbox_ops = {
2736 	.init_box		= snbep_uncore_msr_init_box,
2737 	.disable_box		= snbep_uncore_msr_disable_box,
2738 	.enable_box		= snbep_uncore_msr_enable_box,
2739 	.disable_event		= snbep_uncore_msr_disable_event,
2740 	.enable_event		= hswep_cbox_enable_event,
2741 	.read_counter		= uncore_msr_read_counter,
2742 	.hw_config		= hswep_cbox_hw_config,
2743 	.get_constraint		= hswep_cbox_get_constraint,
2744 	.put_constraint		= snbep_cbox_put_constraint,
2745 };
2746 
2747 static struct intel_uncore_type hswep_uncore_cbox = {
2748 	.name			= "cbox",
2749 	.num_counters		= 4,
2750 	.num_boxes		= 18,
2751 	.perf_ctr_bits		= 48,
2752 	.event_ctl		= HSWEP_C0_MSR_PMON_CTL0,
2753 	.perf_ctr		= HSWEP_C0_MSR_PMON_CTR0,
2754 	.event_mask		= SNBEP_CBO_MSR_PMON_RAW_EVENT_MASK,
2755 	.box_ctl		= HSWEP_C0_MSR_PMON_BOX_CTL,
2756 	.msr_offset		= HSWEP_CBO_MSR_OFFSET,
2757 	.num_shared_regs	= 1,
2758 	.constraints		= hswep_uncore_cbox_constraints,
2759 	.ops			= &hswep_uncore_cbox_ops,
2760 	.format_group		= &hswep_uncore_cbox_format_group,
2761 };
2762 
2763 /*
2764  * Write SBOX Initialization register bit by bit to avoid spurious #GPs
2765  */
2766 static void hswep_uncore_sbox_msr_init_box(struct intel_uncore_box *box)
2767 {
2768 	unsigned msr = uncore_msr_box_ctl(box);
2769 
2770 	if (msr) {
2771 		u64 init = SNBEP_PMON_BOX_CTL_INT;
2772 		u64 flags = 0;
2773 		int i;
2774 
2775 		for_each_set_bit(i, (unsigned long *)&init, 64) {
2776 			flags |= (1ULL << i);
2777 			wrmsrl(msr, flags);
2778 		}
2779 	}
2780 }
2781 
2782 static struct intel_uncore_ops hswep_uncore_sbox_msr_ops = {
2783 	__SNBEP_UNCORE_MSR_OPS_COMMON_INIT(),
2784 	.init_box		= hswep_uncore_sbox_msr_init_box
2785 };
2786 
2787 static struct attribute *hswep_uncore_sbox_formats_attr[] = {
2788 	&format_attr_event.attr,
2789 	&format_attr_umask.attr,
2790 	&format_attr_edge.attr,
2791 	&format_attr_tid_en.attr,
2792 	&format_attr_inv.attr,
2793 	&format_attr_thresh8.attr,
2794 	NULL,
2795 };
2796 
2797 static const struct attribute_group hswep_uncore_sbox_format_group = {
2798 	.name = "format",
2799 	.attrs = hswep_uncore_sbox_formats_attr,
2800 };
2801 
2802 static struct intel_uncore_type hswep_uncore_sbox = {
2803 	.name			= "sbox",
2804 	.num_counters		= 4,
2805 	.num_boxes		= 4,
2806 	.perf_ctr_bits		= 44,
2807 	.event_ctl		= HSWEP_S0_MSR_PMON_CTL0,
2808 	.perf_ctr		= HSWEP_S0_MSR_PMON_CTR0,
2809 	.event_mask		= HSWEP_S_MSR_PMON_RAW_EVENT_MASK,
2810 	.box_ctl		= HSWEP_S0_MSR_PMON_BOX_CTL,
2811 	.msr_offset		= HSWEP_SBOX_MSR_OFFSET,
2812 	.ops			= &hswep_uncore_sbox_msr_ops,
2813 	.format_group		= &hswep_uncore_sbox_format_group,
2814 };
2815 
2816 static int hswep_pcu_hw_config(struct intel_uncore_box *box, struct perf_event *event)
2817 {
2818 	struct hw_perf_event *hwc = &event->hw;
2819 	struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
2820 	int ev_sel = hwc->config & SNBEP_PMON_CTL_EV_SEL_MASK;
2821 
2822 	if (ev_sel >= 0xb && ev_sel <= 0xe) {
2823 		reg1->reg = HSWEP_PCU_MSR_PMON_BOX_FILTER;
2824 		reg1->idx = ev_sel - 0xb;
2825 		reg1->config = event->attr.config1 & (0xff << reg1->idx);
2826 	}
2827 	return 0;
2828 }
2829 
2830 static struct intel_uncore_ops hswep_uncore_pcu_ops = {
2831 	SNBEP_UNCORE_MSR_OPS_COMMON_INIT(),
2832 	.hw_config		= hswep_pcu_hw_config,
2833 	.get_constraint		= snbep_pcu_get_constraint,
2834 	.put_constraint		= snbep_pcu_put_constraint,
2835 };
2836 
2837 static struct intel_uncore_type hswep_uncore_pcu = {
2838 	.name			= "pcu",
2839 	.num_counters		= 4,
2840 	.num_boxes		= 1,
2841 	.perf_ctr_bits		= 48,
2842 	.perf_ctr		= HSWEP_PCU_MSR_PMON_CTR0,
2843 	.event_ctl		= HSWEP_PCU_MSR_PMON_CTL0,
2844 	.event_mask		= SNBEP_PCU_MSR_PMON_RAW_EVENT_MASK,
2845 	.box_ctl		= HSWEP_PCU_MSR_PMON_BOX_CTL,
2846 	.num_shared_regs	= 1,
2847 	.ops			= &hswep_uncore_pcu_ops,
2848 	.format_group		= &snbep_uncore_pcu_format_group,
2849 };
2850 
2851 static struct intel_uncore_type *hswep_msr_uncores[] = {
2852 	&hswep_uncore_ubox,
2853 	&hswep_uncore_cbox,
2854 	&hswep_uncore_sbox,
2855 	&hswep_uncore_pcu,
2856 	NULL,
2857 };
2858 
2859 #define HSWEP_PCU_DID			0x2fc0
2860 #define HSWEP_PCU_CAPID4_OFFET		0x94
2861 #define hswep_get_chop(_cap)		(((_cap) >> 6) & 0x3)
2862 
2863 static bool hswep_has_limit_sbox(unsigned int device)
2864 {
2865 	struct pci_dev *dev = pci_get_device(PCI_VENDOR_ID_INTEL, device, NULL);
2866 	u32 capid4;
2867 
2868 	if (!dev)
2869 		return false;
2870 
2871 	pci_read_config_dword(dev, HSWEP_PCU_CAPID4_OFFET, &capid4);
2872 	if (!hswep_get_chop(capid4))
2873 		return true;
2874 
2875 	return false;
2876 }
2877 
2878 void hswep_uncore_cpu_init(void)
2879 {
2880 	if (hswep_uncore_cbox.num_boxes > boot_cpu_data.x86_max_cores)
2881 		hswep_uncore_cbox.num_boxes = boot_cpu_data.x86_max_cores;
2882 
2883 	/* Detect 6-8 core systems with only two SBOXes */
2884 	if (hswep_has_limit_sbox(HSWEP_PCU_DID))
2885 		hswep_uncore_sbox.num_boxes = 2;
2886 
2887 	uncore_msr_uncores = hswep_msr_uncores;
2888 }
2889 
2890 static struct intel_uncore_type hswep_uncore_ha = {
2891 	.name		= "ha",
2892 	.num_counters   = 4,
2893 	.num_boxes	= 2,
2894 	.perf_ctr_bits	= 48,
2895 	SNBEP_UNCORE_PCI_COMMON_INIT(),
2896 };
2897 
2898 static struct uncore_event_desc hswep_uncore_imc_events[] = {
2899 	INTEL_UNCORE_EVENT_DESC(clockticks,      "event=0x00,umask=0x00"),
2900 	INTEL_UNCORE_EVENT_DESC(cas_count_read,  "event=0x04,umask=0x03"),
2901 	INTEL_UNCORE_EVENT_DESC(cas_count_read.scale, "6.103515625e-5"),
2902 	INTEL_UNCORE_EVENT_DESC(cas_count_read.unit, "MiB"),
2903 	INTEL_UNCORE_EVENT_DESC(cas_count_write, "event=0x04,umask=0x0c"),
2904 	INTEL_UNCORE_EVENT_DESC(cas_count_write.scale, "6.103515625e-5"),
2905 	INTEL_UNCORE_EVENT_DESC(cas_count_write.unit, "MiB"),
2906 	{ /* end: all zeroes */ },
2907 };
2908 
2909 static struct intel_uncore_type hswep_uncore_imc = {
2910 	.name		= "imc",
2911 	.num_counters   = 4,
2912 	.num_boxes	= 8,
2913 	.perf_ctr_bits	= 48,
2914 	.fixed_ctr_bits	= 48,
2915 	.fixed_ctr	= SNBEP_MC_CHy_PCI_PMON_FIXED_CTR,
2916 	.fixed_ctl	= SNBEP_MC_CHy_PCI_PMON_FIXED_CTL,
2917 	.event_descs	= hswep_uncore_imc_events,
2918 	SNBEP_UNCORE_PCI_COMMON_INIT(),
2919 };
2920 
2921 static unsigned hswep_uncore_irp_ctrs[] = {0xa0, 0xa8, 0xb0, 0xb8};
2922 
2923 static u64 hswep_uncore_irp_read_counter(struct intel_uncore_box *box, struct perf_event *event)
2924 {
2925 	struct pci_dev *pdev = box->pci_dev;
2926 	struct hw_perf_event *hwc = &event->hw;
2927 	u64 count = 0;
2928 
2929 	pci_read_config_dword(pdev, hswep_uncore_irp_ctrs[hwc->idx], (u32 *)&count);
2930 	pci_read_config_dword(pdev, hswep_uncore_irp_ctrs[hwc->idx] + 4, (u32 *)&count + 1);
2931 
2932 	return count;
2933 }
2934 
2935 static struct intel_uncore_ops hswep_uncore_irp_ops = {
2936 	.init_box	= snbep_uncore_pci_init_box,
2937 	.disable_box	= snbep_uncore_pci_disable_box,
2938 	.enable_box	= snbep_uncore_pci_enable_box,
2939 	.disable_event	= ivbep_uncore_irp_disable_event,
2940 	.enable_event	= ivbep_uncore_irp_enable_event,
2941 	.read_counter	= hswep_uncore_irp_read_counter,
2942 };
2943 
2944 static struct intel_uncore_type hswep_uncore_irp = {
2945 	.name			= "irp",
2946 	.num_counters		= 4,
2947 	.num_boxes		= 1,
2948 	.perf_ctr_bits		= 48,
2949 	.event_mask		= SNBEP_PMON_RAW_EVENT_MASK,
2950 	.box_ctl		= SNBEP_PCI_PMON_BOX_CTL,
2951 	.ops			= &hswep_uncore_irp_ops,
2952 	.format_group		= &snbep_uncore_format_group,
2953 };
2954 
2955 static struct intel_uncore_type hswep_uncore_qpi = {
2956 	.name			= "qpi",
2957 	.num_counters		= 4,
2958 	.num_boxes		= 3,
2959 	.perf_ctr_bits		= 48,
2960 	.perf_ctr		= SNBEP_PCI_PMON_CTR0,
2961 	.event_ctl		= SNBEP_PCI_PMON_CTL0,
2962 	.event_mask		= SNBEP_QPI_PCI_PMON_RAW_EVENT_MASK,
2963 	.box_ctl		= SNBEP_PCI_PMON_BOX_CTL,
2964 	.num_shared_regs	= 1,
2965 	.ops			= &snbep_uncore_qpi_ops,
2966 	.format_group		= &snbep_uncore_qpi_format_group,
2967 };
2968 
2969 static struct event_constraint hswep_uncore_r2pcie_constraints[] = {
2970 	UNCORE_EVENT_CONSTRAINT(0x10, 0x3),
2971 	UNCORE_EVENT_CONSTRAINT(0x11, 0x3),
2972 	UNCORE_EVENT_CONSTRAINT(0x13, 0x1),
2973 	UNCORE_EVENT_CONSTRAINT(0x23, 0x1),
2974 	UNCORE_EVENT_CONSTRAINT(0x24, 0x1),
2975 	UNCORE_EVENT_CONSTRAINT(0x25, 0x1),
2976 	UNCORE_EVENT_CONSTRAINT(0x26, 0x3),
2977 	UNCORE_EVENT_CONSTRAINT(0x27, 0x1),
2978 	UNCORE_EVENT_CONSTRAINT(0x28, 0x3),
2979 	UNCORE_EVENT_CONSTRAINT(0x29, 0x3),
2980 	UNCORE_EVENT_CONSTRAINT(0x2a, 0x1),
2981 	UNCORE_EVENT_CONSTRAINT(0x2b, 0x3),
2982 	UNCORE_EVENT_CONSTRAINT(0x2c, 0x3),
2983 	UNCORE_EVENT_CONSTRAINT(0x2d, 0x3),
2984 	UNCORE_EVENT_CONSTRAINT(0x32, 0x3),
2985 	UNCORE_EVENT_CONSTRAINT(0x33, 0x3),
2986 	UNCORE_EVENT_CONSTRAINT(0x34, 0x3),
2987 	UNCORE_EVENT_CONSTRAINT(0x35, 0x3),
2988 	EVENT_CONSTRAINT_END
2989 };
2990 
2991 static struct intel_uncore_type hswep_uncore_r2pcie = {
2992 	.name		= "r2pcie",
2993 	.num_counters   = 4,
2994 	.num_boxes	= 1,
2995 	.perf_ctr_bits	= 48,
2996 	.constraints	= hswep_uncore_r2pcie_constraints,
2997 	SNBEP_UNCORE_PCI_COMMON_INIT(),
2998 };
2999 
3000 static struct event_constraint hswep_uncore_r3qpi_constraints[] = {
3001 	UNCORE_EVENT_CONSTRAINT(0x01, 0x3),
3002 	UNCORE_EVENT_CONSTRAINT(0x07, 0x7),
3003 	UNCORE_EVENT_CONSTRAINT(0x08, 0x7),
3004 	UNCORE_EVENT_CONSTRAINT(0x09, 0x7),
3005 	UNCORE_EVENT_CONSTRAINT(0x0a, 0x7),
3006 	UNCORE_EVENT_CONSTRAINT(0x0e, 0x7),
3007 	UNCORE_EVENT_CONSTRAINT(0x10, 0x3),
3008 	UNCORE_EVENT_CONSTRAINT(0x11, 0x3),
3009 	UNCORE_EVENT_CONSTRAINT(0x12, 0x3),
3010 	UNCORE_EVENT_CONSTRAINT(0x13, 0x1),
3011 	UNCORE_EVENT_CONSTRAINT(0x14, 0x3),
3012 	UNCORE_EVENT_CONSTRAINT(0x15, 0x3),
3013 	UNCORE_EVENT_CONSTRAINT(0x1f, 0x3),
3014 	UNCORE_EVENT_CONSTRAINT(0x20, 0x3),
3015 	UNCORE_EVENT_CONSTRAINT(0x21, 0x3),
3016 	UNCORE_EVENT_CONSTRAINT(0x22, 0x3),
3017 	UNCORE_EVENT_CONSTRAINT(0x23, 0x3),
3018 	UNCORE_EVENT_CONSTRAINT(0x25, 0x3),
3019 	UNCORE_EVENT_CONSTRAINT(0x26, 0x3),
3020 	UNCORE_EVENT_CONSTRAINT(0x28, 0x3),
3021 	UNCORE_EVENT_CONSTRAINT(0x29, 0x3),
3022 	UNCORE_EVENT_CONSTRAINT(0x2c, 0x3),
3023 	UNCORE_EVENT_CONSTRAINT(0x2d, 0x3),
3024 	UNCORE_EVENT_CONSTRAINT(0x2e, 0x3),
3025 	UNCORE_EVENT_CONSTRAINT(0x2f, 0x3),
3026 	UNCORE_EVENT_CONSTRAINT(0x31, 0x3),
3027 	UNCORE_EVENT_CONSTRAINT(0x32, 0x3),
3028 	UNCORE_EVENT_CONSTRAINT(0x33, 0x3),
3029 	UNCORE_EVENT_CONSTRAINT(0x34, 0x3),
3030 	UNCORE_EVENT_CONSTRAINT(0x36, 0x3),
3031 	UNCORE_EVENT_CONSTRAINT(0x37, 0x3),
3032 	UNCORE_EVENT_CONSTRAINT(0x38, 0x3),
3033 	UNCORE_EVENT_CONSTRAINT(0x39, 0x3),
3034 	EVENT_CONSTRAINT_END
3035 };
3036 
3037 static struct intel_uncore_type hswep_uncore_r3qpi = {
3038 	.name		= "r3qpi",
3039 	.num_counters   = 3,
3040 	.num_boxes	= 3,
3041 	.perf_ctr_bits	= 44,
3042 	.constraints	= hswep_uncore_r3qpi_constraints,
3043 	SNBEP_UNCORE_PCI_COMMON_INIT(),
3044 };
3045 
3046 enum {
3047 	HSWEP_PCI_UNCORE_HA,
3048 	HSWEP_PCI_UNCORE_IMC,
3049 	HSWEP_PCI_UNCORE_IRP,
3050 	HSWEP_PCI_UNCORE_QPI,
3051 	HSWEP_PCI_UNCORE_R2PCIE,
3052 	HSWEP_PCI_UNCORE_R3QPI,
3053 };
3054 
3055 static struct intel_uncore_type *hswep_pci_uncores[] = {
3056 	[HSWEP_PCI_UNCORE_HA]	= &hswep_uncore_ha,
3057 	[HSWEP_PCI_UNCORE_IMC]	= &hswep_uncore_imc,
3058 	[HSWEP_PCI_UNCORE_IRP]	= &hswep_uncore_irp,
3059 	[HSWEP_PCI_UNCORE_QPI]	= &hswep_uncore_qpi,
3060 	[HSWEP_PCI_UNCORE_R2PCIE]	= &hswep_uncore_r2pcie,
3061 	[HSWEP_PCI_UNCORE_R3QPI]	= &hswep_uncore_r3qpi,
3062 	NULL,
3063 };
3064 
3065 static const struct pci_device_id hswep_uncore_pci_ids[] = {
3066 	{ /* Home Agent 0 */
3067 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2f30),
3068 		.driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_HA, 0),
3069 	},
3070 	{ /* Home Agent 1 */
3071 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2f38),
3072 		.driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_HA, 1),
3073 	},
3074 	{ /* MC0 Channel 0 */
3075 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2fb0),
3076 		.driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_IMC, 0),
3077 	},
3078 	{ /* MC0 Channel 1 */
3079 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2fb1),
3080 		.driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_IMC, 1),
3081 	},
3082 	{ /* MC0 Channel 2 */
3083 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2fb4),
3084 		.driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_IMC, 2),
3085 	},
3086 	{ /* MC0 Channel 3 */
3087 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2fb5),
3088 		.driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_IMC, 3),
3089 	},
3090 	{ /* MC1 Channel 0 */
3091 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2fd0),
3092 		.driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_IMC, 4),
3093 	},
3094 	{ /* MC1 Channel 1 */
3095 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2fd1),
3096 		.driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_IMC, 5),
3097 	},
3098 	{ /* MC1 Channel 2 */
3099 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2fd4),
3100 		.driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_IMC, 6),
3101 	},
3102 	{ /* MC1 Channel 3 */
3103 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2fd5),
3104 		.driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_IMC, 7),
3105 	},
3106 	{ /* IRP */
3107 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2f39),
3108 		.driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_IRP, 0),
3109 	},
3110 	{ /* QPI0 Port 0 */
3111 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2f32),
3112 		.driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_QPI, 0),
3113 	},
3114 	{ /* QPI0 Port 1 */
3115 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2f33),
3116 		.driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_QPI, 1),
3117 	},
3118 	{ /* QPI1 Port 2 */
3119 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2f3a),
3120 		.driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_QPI, 2),
3121 	},
3122 	{ /* R2PCIe */
3123 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2f34),
3124 		.driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_R2PCIE, 0),
3125 	},
3126 	{ /* R3QPI0 Link 0 */
3127 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2f36),
3128 		.driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_R3QPI, 0),
3129 	},
3130 	{ /* R3QPI0 Link 1 */
3131 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2f37),
3132 		.driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_R3QPI, 1),
3133 	},
3134 	{ /* R3QPI1 Link 2 */
3135 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2f3e),
3136 		.driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_R3QPI, 2),
3137 	},
3138 	{ /* QPI Port 0 filter  */
3139 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2f86),
3140 		.driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV,
3141 						   SNBEP_PCI_QPI_PORT0_FILTER),
3142 	},
3143 	{ /* QPI Port 1 filter  */
3144 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2f96),
3145 		.driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV,
3146 						   SNBEP_PCI_QPI_PORT1_FILTER),
3147 	},
3148 	{ /* end: all zeroes */ }
3149 };
3150 
3151 static struct pci_driver hswep_uncore_pci_driver = {
3152 	.name		= "hswep_uncore",
3153 	.id_table	= hswep_uncore_pci_ids,
3154 };
3155 
3156 int hswep_uncore_pci_init(void)
3157 {
3158 	int ret = snbep_pci2phy_map_init(0x2f1e, SNBEP_CPUNODEID, SNBEP_GIDNIDMAP, true);
3159 	if (ret)
3160 		return ret;
3161 	uncore_pci_uncores = hswep_pci_uncores;
3162 	uncore_pci_driver = &hswep_uncore_pci_driver;
3163 	return 0;
3164 }
3165 /* end of Haswell-EP uncore support */
3166 
3167 /* BDX uncore support */
3168 
3169 static struct intel_uncore_type bdx_uncore_ubox = {
3170 	.name			= "ubox",
3171 	.num_counters		= 2,
3172 	.num_boxes		= 1,
3173 	.perf_ctr_bits		= 48,
3174 	.fixed_ctr_bits		= 48,
3175 	.perf_ctr		= HSWEP_U_MSR_PMON_CTR0,
3176 	.event_ctl		= HSWEP_U_MSR_PMON_CTL0,
3177 	.event_mask		= SNBEP_U_MSR_PMON_RAW_EVENT_MASK,
3178 	.fixed_ctr		= HSWEP_U_MSR_PMON_UCLK_FIXED_CTR,
3179 	.fixed_ctl		= HSWEP_U_MSR_PMON_UCLK_FIXED_CTL,
3180 	.num_shared_regs	= 1,
3181 	.ops			= &ivbep_uncore_msr_ops,
3182 	.format_group		= &ivbep_uncore_ubox_format_group,
3183 };
3184 
3185 static struct event_constraint bdx_uncore_cbox_constraints[] = {
3186 	UNCORE_EVENT_CONSTRAINT(0x09, 0x3),
3187 	UNCORE_EVENT_CONSTRAINT(0x11, 0x1),
3188 	UNCORE_EVENT_CONSTRAINT(0x36, 0x1),
3189 	UNCORE_EVENT_CONSTRAINT(0x3e, 0x1),
3190 	EVENT_CONSTRAINT_END
3191 };
3192 
3193 static struct intel_uncore_type bdx_uncore_cbox = {
3194 	.name			= "cbox",
3195 	.num_counters		= 4,
3196 	.num_boxes		= 24,
3197 	.perf_ctr_bits		= 48,
3198 	.event_ctl		= HSWEP_C0_MSR_PMON_CTL0,
3199 	.perf_ctr		= HSWEP_C0_MSR_PMON_CTR0,
3200 	.event_mask		= SNBEP_CBO_MSR_PMON_RAW_EVENT_MASK,
3201 	.box_ctl		= HSWEP_C0_MSR_PMON_BOX_CTL,
3202 	.msr_offset		= HSWEP_CBO_MSR_OFFSET,
3203 	.num_shared_regs	= 1,
3204 	.constraints		= bdx_uncore_cbox_constraints,
3205 	.ops			= &hswep_uncore_cbox_ops,
3206 	.format_group		= &hswep_uncore_cbox_format_group,
3207 };
3208 
3209 static struct intel_uncore_type bdx_uncore_sbox = {
3210 	.name			= "sbox",
3211 	.num_counters		= 4,
3212 	.num_boxes		= 4,
3213 	.perf_ctr_bits		= 48,
3214 	.event_ctl		= HSWEP_S0_MSR_PMON_CTL0,
3215 	.perf_ctr		= HSWEP_S0_MSR_PMON_CTR0,
3216 	.event_mask		= HSWEP_S_MSR_PMON_RAW_EVENT_MASK,
3217 	.box_ctl		= HSWEP_S0_MSR_PMON_BOX_CTL,
3218 	.msr_offset		= HSWEP_SBOX_MSR_OFFSET,
3219 	.ops			= &hswep_uncore_sbox_msr_ops,
3220 	.format_group		= &hswep_uncore_sbox_format_group,
3221 };
3222 
3223 #define BDX_MSR_UNCORE_SBOX	3
3224 
3225 static struct intel_uncore_type *bdx_msr_uncores[] = {
3226 	&bdx_uncore_ubox,
3227 	&bdx_uncore_cbox,
3228 	&hswep_uncore_pcu,
3229 	&bdx_uncore_sbox,
3230 	NULL,
3231 };
3232 
3233 /* Bit 7 'Use Occupancy' is not available for counter 0 on BDX */
3234 static struct event_constraint bdx_uncore_pcu_constraints[] = {
3235 	EVENT_CONSTRAINT(0x80, 0xe, 0x80),
3236 	EVENT_CONSTRAINT_END
3237 };
3238 
3239 #define BDX_PCU_DID			0x6fc0
3240 
3241 void bdx_uncore_cpu_init(void)
3242 {
3243 	if (bdx_uncore_cbox.num_boxes > boot_cpu_data.x86_max_cores)
3244 		bdx_uncore_cbox.num_boxes = boot_cpu_data.x86_max_cores;
3245 	uncore_msr_uncores = bdx_msr_uncores;
3246 
3247 	/* Detect systems with no SBOXes */
3248 	if ((boot_cpu_data.x86_model == 86) || hswep_has_limit_sbox(BDX_PCU_DID))
3249 		uncore_msr_uncores[BDX_MSR_UNCORE_SBOX] = NULL;
3250 
3251 	hswep_uncore_pcu.constraints = bdx_uncore_pcu_constraints;
3252 }
3253 
3254 static struct intel_uncore_type bdx_uncore_ha = {
3255 	.name		= "ha",
3256 	.num_counters   = 4,
3257 	.num_boxes	= 2,
3258 	.perf_ctr_bits	= 48,
3259 	SNBEP_UNCORE_PCI_COMMON_INIT(),
3260 };
3261 
3262 static struct intel_uncore_type bdx_uncore_imc = {
3263 	.name		= "imc",
3264 	.num_counters   = 4,
3265 	.num_boxes	= 8,
3266 	.perf_ctr_bits	= 48,
3267 	.fixed_ctr_bits	= 48,
3268 	.fixed_ctr	= SNBEP_MC_CHy_PCI_PMON_FIXED_CTR,
3269 	.fixed_ctl	= SNBEP_MC_CHy_PCI_PMON_FIXED_CTL,
3270 	.event_descs	= hswep_uncore_imc_events,
3271 	SNBEP_UNCORE_PCI_COMMON_INIT(),
3272 };
3273 
3274 static struct intel_uncore_type bdx_uncore_irp = {
3275 	.name			= "irp",
3276 	.num_counters		= 4,
3277 	.num_boxes		= 1,
3278 	.perf_ctr_bits		= 48,
3279 	.event_mask		= SNBEP_PMON_RAW_EVENT_MASK,
3280 	.box_ctl		= SNBEP_PCI_PMON_BOX_CTL,
3281 	.ops			= &hswep_uncore_irp_ops,
3282 	.format_group		= &snbep_uncore_format_group,
3283 };
3284 
3285 static struct intel_uncore_type bdx_uncore_qpi = {
3286 	.name			= "qpi",
3287 	.num_counters		= 4,
3288 	.num_boxes		= 3,
3289 	.perf_ctr_bits		= 48,
3290 	.perf_ctr		= SNBEP_PCI_PMON_CTR0,
3291 	.event_ctl		= SNBEP_PCI_PMON_CTL0,
3292 	.event_mask		= SNBEP_QPI_PCI_PMON_RAW_EVENT_MASK,
3293 	.box_ctl		= SNBEP_PCI_PMON_BOX_CTL,
3294 	.num_shared_regs	= 1,
3295 	.ops			= &snbep_uncore_qpi_ops,
3296 	.format_group		= &snbep_uncore_qpi_format_group,
3297 };
3298 
3299 static struct event_constraint bdx_uncore_r2pcie_constraints[] = {
3300 	UNCORE_EVENT_CONSTRAINT(0x10, 0x3),
3301 	UNCORE_EVENT_CONSTRAINT(0x11, 0x3),
3302 	UNCORE_EVENT_CONSTRAINT(0x13, 0x1),
3303 	UNCORE_EVENT_CONSTRAINT(0x23, 0x1),
3304 	UNCORE_EVENT_CONSTRAINT(0x25, 0x1),
3305 	UNCORE_EVENT_CONSTRAINT(0x26, 0x3),
3306 	UNCORE_EVENT_CONSTRAINT(0x28, 0x3),
3307 	UNCORE_EVENT_CONSTRAINT(0x2c, 0x3),
3308 	UNCORE_EVENT_CONSTRAINT(0x2d, 0x3),
3309 	EVENT_CONSTRAINT_END
3310 };
3311 
3312 static struct intel_uncore_type bdx_uncore_r2pcie = {
3313 	.name		= "r2pcie",
3314 	.num_counters   = 4,
3315 	.num_boxes	= 1,
3316 	.perf_ctr_bits	= 48,
3317 	.constraints	= bdx_uncore_r2pcie_constraints,
3318 	SNBEP_UNCORE_PCI_COMMON_INIT(),
3319 };
3320 
3321 static struct event_constraint bdx_uncore_r3qpi_constraints[] = {
3322 	UNCORE_EVENT_CONSTRAINT(0x01, 0x7),
3323 	UNCORE_EVENT_CONSTRAINT(0x07, 0x7),
3324 	UNCORE_EVENT_CONSTRAINT(0x08, 0x7),
3325 	UNCORE_EVENT_CONSTRAINT(0x09, 0x7),
3326 	UNCORE_EVENT_CONSTRAINT(0x0a, 0x7),
3327 	UNCORE_EVENT_CONSTRAINT(0x0e, 0x7),
3328 	UNCORE_EVENT_CONSTRAINT(0x10, 0x3),
3329 	UNCORE_EVENT_CONSTRAINT(0x11, 0x3),
3330 	UNCORE_EVENT_CONSTRAINT(0x13, 0x1),
3331 	UNCORE_EVENT_CONSTRAINT(0x14, 0x3),
3332 	UNCORE_EVENT_CONSTRAINT(0x15, 0x3),
3333 	UNCORE_EVENT_CONSTRAINT(0x1f, 0x3),
3334 	UNCORE_EVENT_CONSTRAINT(0x20, 0x3),
3335 	UNCORE_EVENT_CONSTRAINT(0x21, 0x3),
3336 	UNCORE_EVENT_CONSTRAINT(0x22, 0x3),
3337 	UNCORE_EVENT_CONSTRAINT(0x23, 0x3),
3338 	UNCORE_EVENT_CONSTRAINT(0x25, 0x3),
3339 	UNCORE_EVENT_CONSTRAINT(0x26, 0x3),
3340 	UNCORE_EVENT_CONSTRAINT(0x28, 0x3),
3341 	UNCORE_EVENT_CONSTRAINT(0x29, 0x3),
3342 	UNCORE_EVENT_CONSTRAINT(0x2c, 0x3),
3343 	UNCORE_EVENT_CONSTRAINT(0x2d, 0x3),
3344 	UNCORE_EVENT_CONSTRAINT(0x2e, 0x3),
3345 	UNCORE_EVENT_CONSTRAINT(0x2f, 0x3),
3346 	UNCORE_EVENT_CONSTRAINT(0x33, 0x3),
3347 	UNCORE_EVENT_CONSTRAINT(0x34, 0x3),
3348 	UNCORE_EVENT_CONSTRAINT(0x36, 0x3),
3349 	UNCORE_EVENT_CONSTRAINT(0x37, 0x3),
3350 	UNCORE_EVENT_CONSTRAINT(0x38, 0x3),
3351 	UNCORE_EVENT_CONSTRAINT(0x39, 0x3),
3352 	EVENT_CONSTRAINT_END
3353 };
3354 
3355 static struct intel_uncore_type bdx_uncore_r3qpi = {
3356 	.name		= "r3qpi",
3357 	.num_counters   = 3,
3358 	.num_boxes	= 3,
3359 	.perf_ctr_bits	= 48,
3360 	.constraints	= bdx_uncore_r3qpi_constraints,
3361 	SNBEP_UNCORE_PCI_COMMON_INIT(),
3362 };
3363 
3364 enum {
3365 	BDX_PCI_UNCORE_HA,
3366 	BDX_PCI_UNCORE_IMC,
3367 	BDX_PCI_UNCORE_IRP,
3368 	BDX_PCI_UNCORE_QPI,
3369 	BDX_PCI_UNCORE_R2PCIE,
3370 	BDX_PCI_UNCORE_R3QPI,
3371 };
3372 
3373 static struct intel_uncore_type *bdx_pci_uncores[] = {
3374 	[BDX_PCI_UNCORE_HA]	= &bdx_uncore_ha,
3375 	[BDX_PCI_UNCORE_IMC]	= &bdx_uncore_imc,
3376 	[BDX_PCI_UNCORE_IRP]	= &bdx_uncore_irp,
3377 	[BDX_PCI_UNCORE_QPI]	= &bdx_uncore_qpi,
3378 	[BDX_PCI_UNCORE_R2PCIE]	= &bdx_uncore_r2pcie,
3379 	[BDX_PCI_UNCORE_R3QPI]	= &bdx_uncore_r3qpi,
3380 	NULL,
3381 };
3382 
3383 static const struct pci_device_id bdx_uncore_pci_ids[] = {
3384 	{ /* Home Agent 0 */
3385 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6f30),
3386 		.driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_HA, 0),
3387 	},
3388 	{ /* Home Agent 1 */
3389 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6f38),
3390 		.driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_HA, 1),
3391 	},
3392 	{ /* MC0 Channel 0 */
3393 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6fb0),
3394 		.driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_IMC, 0),
3395 	},
3396 	{ /* MC0 Channel 1 */
3397 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6fb1),
3398 		.driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_IMC, 1),
3399 	},
3400 	{ /* MC0 Channel 2 */
3401 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6fb4),
3402 		.driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_IMC, 2),
3403 	},
3404 	{ /* MC0 Channel 3 */
3405 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6fb5),
3406 		.driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_IMC, 3),
3407 	},
3408 	{ /* MC1 Channel 0 */
3409 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6fd0),
3410 		.driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_IMC, 4),
3411 	},
3412 	{ /* MC1 Channel 1 */
3413 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6fd1),
3414 		.driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_IMC, 5),
3415 	},
3416 	{ /* MC1 Channel 2 */
3417 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6fd4),
3418 		.driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_IMC, 6),
3419 	},
3420 	{ /* MC1 Channel 3 */
3421 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6fd5),
3422 		.driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_IMC, 7),
3423 	},
3424 	{ /* IRP */
3425 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6f39),
3426 		.driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_IRP, 0),
3427 	},
3428 	{ /* QPI0 Port 0 */
3429 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6f32),
3430 		.driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_QPI, 0),
3431 	},
3432 	{ /* QPI0 Port 1 */
3433 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6f33),
3434 		.driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_QPI, 1),
3435 	},
3436 	{ /* QPI1 Port 2 */
3437 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6f3a),
3438 		.driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_QPI, 2),
3439 	},
3440 	{ /* R2PCIe */
3441 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6f34),
3442 		.driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_R2PCIE, 0),
3443 	},
3444 	{ /* R3QPI0 Link 0 */
3445 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6f36),
3446 		.driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_R3QPI, 0),
3447 	},
3448 	{ /* R3QPI0 Link 1 */
3449 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6f37),
3450 		.driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_R3QPI, 1),
3451 	},
3452 	{ /* R3QPI1 Link 2 */
3453 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6f3e),
3454 		.driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_R3QPI, 2),
3455 	},
3456 	{ /* QPI Port 0 filter  */
3457 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6f86),
3458 		.driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV,
3459 						   SNBEP_PCI_QPI_PORT0_FILTER),
3460 	},
3461 	{ /* QPI Port 1 filter  */
3462 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6f96),
3463 		.driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV,
3464 						   SNBEP_PCI_QPI_PORT1_FILTER),
3465 	},
3466 	{ /* QPI Port 2 filter  */
3467 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6f46),
3468 		.driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV,
3469 						   BDX_PCI_QPI_PORT2_FILTER),
3470 	},
3471 	{ /* end: all zeroes */ }
3472 };
3473 
3474 static struct pci_driver bdx_uncore_pci_driver = {
3475 	.name		= "bdx_uncore",
3476 	.id_table	= bdx_uncore_pci_ids,
3477 };
3478 
3479 int bdx_uncore_pci_init(void)
3480 {
3481 	int ret = snbep_pci2phy_map_init(0x6f1e, SNBEP_CPUNODEID, SNBEP_GIDNIDMAP, true);
3482 
3483 	if (ret)
3484 		return ret;
3485 	uncore_pci_uncores = bdx_pci_uncores;
3486 	uncore_pci_driver = &bdx_uncore_pci_driver;
3487 	return 0;
3488 }
3489 
3490 /* end of BDX uncore support */
3491 
3492 /* SKX uncore support */
3493 
3494 static struct intel_uncore_type skx_uncore_ubox = {
3495 	.name			= "ubox",
3496 	.num_counters		= 2,
3497 	.num_boxes		= 1,
3498 	.perf_ctr_bits		= 48,
3499 	.fixed_ctr_bits		= 48,
3500 	.perf_ctr		= HSWEP_U_MSR_PMON_CTR0,
3501 	.event_ctl		= HSWEP_U_MSR_PMON_CTL0,
3502 	.event_mask		= SNBEP_U_MSR_PMON_RAW_EVENT_MASK,
3503 	.fixed_ctr		= HSWEP_U_MSR_PMON_UCLK_FIXED_CTR,
3504 	.fixed_ctl		= HSWEP_U_MSR_PMON_UCLK_FIXED_CTL,
3505 	.ops			= &ivbep_uncore_msr_ops,
3506 	.format_group		= &ivbep_uncore_ubox_format_group,
3507 };
3508 
3509 static struct attribute *skx_uncore_cha_formats_attr[] = {
3510 	&format_attr_event.attr,
3511 	&format_attr_umask.attr,
3512 	&format_attr_edge.attr,
3513 	&format_attr_tid_en.attr,
3514 	&format_attr_inv.attr,
3515 	&format_attr_thresh8.attr,
3516 	&format_attr_filter_tid4.attr,
3517 	&format_attr_filter_state5.attr,
3518 	&format_attr_filter_rem.attr,
3519 	&format_attr_filter_loc.attr,
3520 	&format_attr_filter_nm.attr,
3521 	&format_attr_filter_all_op.attr,
3522 	&format_attr_filter_not_nm.attr,
3523 	&format_attr_filter_opc_0.attr,
3524 	&format_attr_filter_opc_1.attr,
3525 	&format_attr_filter_nc.attr,
3526 	&format_attr_filter_isoc.attr,
3527 	NULL,
3528 };
3529 
3530 static const struct attribute_group skx_uncore_chabox_format_group = {
3531 	.name = "format",
3532 	.attrs = skx_uncore_cha_formats_attr,
3533 };
3534 
3535 static struct event_constraint skx_uncore_chabox_constraints[] = {
3536 	UNCORE_EVENT_CONSTRAINT(0x11, 0x1),
3537 	UNCORE_EVENT_CONSTRAINT(0x36, 0x1),
3538 	EVENT_CONSTRAINT_END
3539 };
3540 
3541 static struct extra_reg skx_uncore_cha_extra_regs[] = {
3542 	SNBEP_CBO_EVENT_EXTRA_REG(0x0334, 0xffff, 0x4),
3543 	SNBEP_CBO_EVENT_EXTRA_REG(0x0534, 0xffff, 0x4),
3544 	SNBEP_CBO_EVENT_EXTRA_REG(0x0934, 0xffff, 0x4),
3545 	SNBEP_CBO_EVENT_EXTRA_REG(0x1134, 0xffff, 0x4),
3546 	SNBEP_CBO_EVENT_EXTRA_REG(0x3134, 0xffff, 0x4),
3547 	SNBEP_CBO_EVENT_EXTRA_REG(0x9134, 0xffff, 0x4),
3548 	SNBEP_CBO_EVENT_EXTRA_REG(0x35, 0xff, 0x8),
3549 	SNBEP_CBO_EVENT_EXTRA_REG(0x36, 0xff, 0x8),
3550 	SNBEP_CBO_EVENT_EXTRA_REG(0x38, 0xff, 0x3),
3551 	EVENT_EXTRA_END
3552 };
3553 
3554 static u64 skx_cha_filter_mask(int fields)
3555 {
3556 	u64 mask = 0;
3557 
3558 	if (fields & 0x1)
3559 		mask |= SKX_CHA_MSR_PMON_BOX_FILTER_TID;
3560 	if (fields & 0x2)
3561 		mask |= SKX_CHA_MSR_PMON_BOX_FILTER_LINK;
3562 	if (fields & 0x4)
3563 		mask |= SKX_CHA_MSR_PMON_BOX_FILTER_STATE;
3564 	if (fields & 0x8) {
3565 		mask |= SKX_CHA_MSR_PMON_BOX_FILTER_REM;
3566 		mask |= SKX_CHA_MSR_PMON_BOX_FILTER_LOC;
3567 		mask |= SKX_CHA_MSR_PMON_BOX_FILTER_ALL_OPC;
3568 		mask |= SKX_CHA_MSR_PMON_BOX_FILTER_NM;
3569 		mask |= SKX_CHA_MSR_PMON_BOX_FILTER_NOT_NM;
3570 		mask |= SKX_CHA_MSR_PMON_BOX_FILTER_OPC0;
3571 		mask |= SKX_CHA_MSR_PMON_BOX_FILTER_OPC1;
3572 		mask |= SKX_CHA_MSR_PMON_BOX_FILTER_NC;
3573 		mask |= SKX_CHA_MSR_PMON_BOX_FILTER_ISOC;
3574 	}
3575 	return mask;
3576 }
3577 
3578 static struct event_constraint *
3579 skx_cha_get_constraint(struct intel_uncore_box *box, struct perf_event *event)
3580 {
3581 	return __snbep_cbox_get_constraint(box, event, skx_cha_filter_mask);
3582 }
3583 
3584 static int skx_cha_hw_config(struct intel_uncore_box *box, struct perf_event *event)
3585 {
3586 	struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
3587 	struct extra_reg *er;
3588 	int idx = 0;
3589 
3590 	for (er = skx_uncore_cha_extra_regs; er->msr; er++) {
3591 		if (er->event != (event->hw.config & er->config_mask))
3592 			continue;
3593 		idx |= er->idx;
3594 	}
3595 
3596 	if (idx) {
3597 		reg1->reg = HSWEP_C0_MSR_PMON_BOX_FILTER0 +
3598 			    HSWEP_CBO_MSR_OFFSET * box->pmu->pmu_idx;
3599 		reg1->config = event->attr.config1 & skx_cha_filter_mask(idx);
3600 		reg1->idx = idx;
3601 	}
3602 	return 0;
3603 }
3604 
3605 static struct intel_uncore_ops skx_uncore_chabox_ops = {
3606 	/* There is no frz_en for chabox ctl */
3607 	.init_box		= ivbep_uncore_msr_init_box,
3608 	.disable_box		= snbep_uncore_msr_disable_box,
3609 	.enable_box		= snbep_uncore_msr_enable_box,
3610 	.disable_event		= snbep_uncore_msr_disable_event,
3611 	.enable_event		= hswep_cbox_enable_event,
3612 	.read_counter		= uncore_msr_read_counter,
3613 	.hw_config		= skx_cha_hw_config,
3614 	.get_constraint		= skx_cha_get_constraint,
3615 	.put_constraint		= snbep_cbox_put_constraint,
3616 };
3617 
3618 static struct intel_uncore_type skx_uncore_chabox = {
3619 	.name			= "cha",
3620 	.num_counters		= 4,
3621 	.perf_ctr_bits		= 48,
3622 	.event_ctl		= HSWEP_C0_MSR_PMON_CTL0,
3623 	.perf_ctr		= HSWEP_C0_MSR_PMON_CTR0,
3624 	.event_mask		= HSWEP_S_MSR_PMON_RAW_EVENT_MASK,
3625 	.box_ctl		= HSWEP_C0_MSR_PMON_BOX_CTL,
3626 	.msr_offset		= HSWEP_CBO_MSR_OFFSET,
3627 	.num_shared_regs	= 1,
3628 	.constraints		= skx_uncore_chabox_constraints,
3629 	.ops			= &skx_uncore_chabox_ops,
3630 	.format_group		= &skx_uncore_chabox_format_group,
3631 };
3632 
3633 static struct attribute *skx_uncore_iio_formats_attr[] = {
3634 	&format_attr_event.attr,
3635 	&format_attr_umask.attr,
3636 	&format_attr_edge.attr,
3637 	&format_attr_inv.attr,
3638 	&format_attr_thresh9.attr,
3639 	&format_attr_ch_mask.attr,
3640 	&format_attr_fc_mask.attr,
3641 	NULL,
3642 };
3643 
3644 static const struct attribute_group skx_uncore_iio_format_group = {
3645 	.name = "format",
3646 	.attrs = skx_uncore_iio_formats_attr,
3647 };
3648 
3649 static struct event_constraint skx_uncore_iio_constraints[] = {
3650 	UNCORE_EVENT_CONSTRAINT(0x83, 0x3),
3651 	UNCORE_EVENT_CONSTRAINT(0x88, 0xc),
3652 	UNCORE_EVENT_CONSTRAINT(0x95, 0xc),
3653 	UNCORE_EVENT_CONSTRAINT(0xc0, 0xc),
3654 	UNCORE_EVENT_CONSTRAINT(0xc5, 0xc),
3655 	UNCORE_EVENT_CONSTRAINT(0xd4, 0xc),
3656 	EVENT_CONSTRAINT_END
3657 };
3658 
3659 static void skx_iio_enable_event(struct intel_uncore_box *box,
3660 				 struct perf_event *event)
3661 {
3662 	struct hw_perf_event *hwc = &event->hw;
3663 
3664 	wrmsrl(hwc->config_base, hwc->config | SNBEP_PMON_CTL_EN);
3665 }
3666 
3667 static struct intel_uncore_ops skx_uncore_iio_ops = {
3668 	.init_box		= ivbep_uncore_msr_init_box,
3669 	.disable_box		= snbep_uncore_msr_disable_box,
3670 	.enable_box		= snbep_uncore_msr_enable_box,
3671 	.disable_event		= snbep_uncore_msr_disable_event,
3672 	.enable_event		= skx_iio_enable_event,
3673 	.read_counter		= uncore_msr_read_counter,
3674 };
3675 
3676 static inline u8 skx_iio_stack(struct intel_uncore_pmu *pmu, int die)
3677 {
3678 	return pmu->type->topology[die].configuration >>
3679 	       (pmu->pmu_idx * BUS_NUM_STRIDE);
3680 }
3681 
3682 static umode_t
3683 skx_iio_mapping_visible(struct kobject *kobj, struct attribute *attr, int die)
3684 {
3685 	struct intel_uncore_pmu *pmu = dev_to_uncore_pmu(kobj_to_dev(kobj));
3686 
3687 	/* Root bus 0x00 is valid only for die 0 AND pmu_idx = 0. */
3688 	return (!skx_iio_stack(pmu, die) && pmu->pmu_idx) ? 0 : attr->mode;
3689 }
3690 
3691 static ssize_t skx_iio_mapping_show(struct device *dev,
3692 				    struct device_attribute *attr, char *buf)
3693 {
3694 	struct intel_uncore_pmu *pmu = dev_to_uncore_pmu(dev);
3695 	struct dev_ext_attribute *ea = to_dev_ext_attribute(attr);
3696 	long die = (long)ea->var;
3697 
3698 	return sprintf(buf, "%04x:%02x\n", pmu->type->topology[die].segment,
3699 					   skx_iio_stack(pmu, die));
3700 }
3701 
3702 static int skx_msr_cpu_bus_read(int cpu, u64 *topology)
3703 {
3704 	u64 msr_value;
3705 
3706 	if (rdmsrl_on_cpu(cpu, SKX_MSR_CPU_BUS_NUMBER, &msr_value) ||
3707 			!(msr_value & SKX_MSR_CPU_BUS_VALID_BIT))
3708 		return -ENXIO;
3709 
3710 	*topology = msr_value;
3711 
3712 	return 0;
3713 }
3714 
3715 static int die_to_cpu(int die)
3716 {
3717 	int res = 0, cpu, current_die;
3718 	/*
3719 	 * Using cpus_read_lock() to ensure cpu is not going down between
3720 	 * looking at cpu_online_mask.
3721 	 */
3722 	cpus_read_lock();
3723 	for_each_online_cpu(cpu) {
3724 		current_die = topology_logical_die_id(cpu);
3725 		if (current_die == die) {
3726 			res = cpu;
3727 			break;
3728 		}
3729 	}
3730 	cpus_read_unlock();
3731 	return res;
3732 }
3733 
3734 static int skx_iio_get_topology(struct intel_uncore_type *type)
3735 {
3736 	int die, ret = -EPERM;
3737 
3738 	type->topology = kcalloc(uncore_max_dies(), sizeof(*type->topology),
3739 				 GFP_KERNEL);
3740 	if (!type->topology)
3741 		return -ENOMEM;
3742 
3743 	for (die = 0; die < uncore_max_dies(); die++) {
3744 		ret = skx_msr_cpu_bus_read(die_to_cpu(die),
3745 					   &type->topology[die].configuration);
3746 		if (ret)
3747 			break;
3748 
3749 		ret = uncore_die_to_segment(die);
3750 		if (ret < 0)
3751 			break;
3752 
3753 		type->topology[die].segment = ret;
3754 	}
3755 
3756 	if (ret < 0) {
3757 		kfree(type->topology);
3758 		type->topology = NULL;
3759 	}
3760 
3761 	return ret;
3762 }
3763 
3764 static struct attribute_group skx_iio_mapping_group = {
3765 	.is_visible	= skx_iio_mapping_visible,
3766 };
3767 
3768 static const struct attribute_group *skx_iio_attr_update[] = {
3769 	&skx_iio_mapping_group,
3770 	NULL,
3771 };
3772 
3773 static int skx_iio_set_mapping(struct intel_uncore_type *type)
3774 {
3775 	char buf[64];
3776 	int ret;
3777 	long die = -1;
3778 	struct attribute **attrs = NULL;
3779 	struct dev_ext_attribute *eas = NULL;
3780 
3781 	ret = skx_iio_get_topology(type);
3782 	if (ret < 0)
3783 		goto clear_attr_update;
3784 
3785 	ret = -ENOMEM;
3786 
3787 	/* One more for NULL. */
3788 	attrs = kcalloc((uncore_max_dies() + 1), sizeof(*attrs), GFP_KERNEL);
3789 	if (!attrs)
3790 		goto err;
3791 
3792 	eas = kcalloc(uncore_max_dies(), sizeof(*eas), GFP_KERNEL);
3793 	if (!eas)
3794 		goto err;
3795 
3796 	for (die = 0; die < uncore_max_dies(); die++) {
3797 		sprintf(buf, "die%ld", die);
3798 		sysfs_attr_init(&eas[die].attr.attr);
3799 		eas[die].attr.attr.name = kstrdup(buf, GFP_KERNEL);
3800 		if (!eas[die].attr.attr.name)
3801 			goto err;
3802 		eas[die].attr.attr.mode = 0444;
3803 		eas[die].attr.show = skx_iio_mapping_show;
3804 		eas[die].attr.store = NULL;
3805 		eas[die].var = (void *)die;
3806 		attrs[die] = &eas[die].attr.attr;
3807 	}
3808 	skx_iio_mapping_group.attrs = attrs;
3809 
3810 	return 0;
3811 err:
3812 	for (; die >= 0; die--)
3813 		kfree(eas[die].attr.attr.name);
3814 	kfree(eas);
3815 	kfree(attrs);
3816 	kfree(type->topology);
3817 clear_attr_update:
3818 	type->attr_update = NULL;
3819 	return ret;
3820 }
3821 
3822 static void skx_iio_cleanup_mapping(struct intel_uncore_type *type)
3823 {
3824 	struct attribute **attr = skx_iio_mapping_group.attrs;
3825 
3826 	if (!attr)
3827 		return;
3828 
3829 	for (; *attr; attr++)
3830 		kfree((*attr)->name);
3831 	kfree(attr_to_ext_attr(*skx_iio_mapping_group.attrs));
3832 	kfree(skx_iio_mapping_group.attrs);
3833 	skx_iio_mapping_group.attrs = NULL;
3834 	kfree(type->topology);
3835 }
3836 
3837 static struct intel_uncore_type skx_uncore_iio = {
3838 	.name			= "iio",
3839 	.num_counters		= 4,
3840 	.num_boxes		= 6,
3841 	.perf_ctr_bits		= 48,
3842 	.event_ctl		= SKX_IIO0_MSR_PMON_CTL0,
3843 	.perf_ctr		= SKX_IIO0_MSR_PMON_CTR0,
3844 	.event_mask		= SKX_IIO_PMON_RAW_EVENT_MASK,
3845 	.event_mask_ext		= SKX_IIO_PMON_RAW_EVENT_MASK_EXT,
3846 	.box_ctl		= SKX_IIO0_MSR_PMON_BOX_CTL,
3847 	.msr_offset		= SKX_IIO_MSR_OFFSET,
3848 	.constraints		= skx_uncore_iio_constraints,
3849 	.ops			= &skx_uncore_iio_ops,
3850 	.format_group		= &skx_uncore_iio_format_group,
3851 	.attr_update		= skx_iio_attr_update,
3852 	.set_mapping		= skx_iio_set_mapping,
3853 	.cleanup_mapping	= skx_iio_cleanup_mapping,
3854 };
3855 
3856 enum perf_uncore_iio_freerunning_type_id {
3857 	SKX_IIO_MSR_IOCLK			= 0,
3858 	SKX_IIO_MSR_BW				= 1,
3859 	SKX_IIO_MSR_UTIL			= 2,
3860 
3861 	SKX_IIO_FREERUNNING_TYPE_MAX,
3862 };
3863 
3864 
3865 static struct freerunning_counters skx_iio_freerunning[] = {
3866 	[SKX_IIO_MSR_IOCLK]	= { 0xa45, 0x1, 0x20, 1, 36 },
3867 	[SKX_IIO_MSR_BW]	= { 0xb00, 0x1, 0x10, 8, 36 },
3868 	[SKX_IIO_MSR_UTIL]	= { 0xb08, 0x1, 0x10, 8, 36 },
3869 };
3870 
3871 static struct uncore_event_desc skx_uncore_iio_freerunning_events[] = {
3872 	/* Free-Running IO CLOCKS Counter */
3873 	INTEL_UNCORE_EVENT_DESC(ioclk,			"event=0xff,umask=0x10"),
3874 	/* Free-Running IIO BANDWIDTH Counters */
3875 	INTEL_UNCORE_EVENT_DESC(bw_in_port0,		"event=0xff,umask=0x20"),
3876 	INTEL_UNCORE_EVENT_DESC(bw_in_port0.scale,	"3.814697266e-6"),
3877 	INTEL_UNCORE_EVENT_DESC(bw_in_port0.unit,	"MiB"),
3878 	INTEL_UNCORE_EVENT_DESC(bw_in_port1,		"event=0xff,umask=0x21"),
3879 	INTEL_UNCORE_EVENT_DESC(bw_in_port1.scale,	"3.814697266e-6"),
3880 	INTEL_UNCORE_EVENT_DESC(bw_in_port1.unit,	"MiB"),
3881 	INTEL_UNCORE_EVENT_DESC(bw_in_port2,		"event=0xff,umask=0x22"),
3882 	INTEL_UNCORE_EVENT_DESC(bw_in_port2.scale,	"3.814697266e-6"),
3883 	INTEL_UNCORE_EVENT_DESC(bw_in_port2.unit,	"MiB"),
3884 	INTEL_UNCORE_EVENT_DESC(bw_in_port3,		"event=0xff,umask=0x23"),
3885 	INTEL_UNCORE_EVENT_DESC(bw_in_port3.scale,	"3.814697266e-6"),
3886 	INTEL_UNCORE_EVENT_DESC(bw_in_port3.unit,	"MiB"),
3887 	INTEL_UNCORE_EVENT_DESC(bw_out_port0,		"event=0xff,umask=0x24"),
3888 	INTEL_UNCORE_EVENT_DESC(bw_out_port0.scale,	"3.814697266e-6"),
3889 	INTEL_UNCORE_EVENT_DESC(bw_out_port0.unit,	"MiB"),
3890 	INTEL_UNCORE_EVENT_DESC(bw_out_port1,		"event=0xff,umask=0x25"),
3891 	INTEL_UNCORE_EVENT_DESC(bw_out_port1.scale,	"3.814697266e-6"),
3892 	INTEL_UNCORE_EVENT_DESC(bw_out_port1.unit,	"MiB"),
3893 	INTEL_UNCORE_EVENT_DESC(bw_out_port2,		"event=0xff,umask=0x26"),
3894 	INTEL_UNCORE_EVENT_DESC(bw_out_port2.scale,	"3.814697266e-6"),
3895 	INTEL_UNCORE_EVENT_DESC(bw_out_port2.unit,	"MiB"),
3896 	INTEL_UNCORE_EVENT_DESC(bw_out_port3,		"event=0xff,umask=0x27"),
3897 	INTEL_UNCORE_EVENT_DESC(bw_out_port3.scale,	"3.814697266e-6"),
3898 	INTEL_UNCORE_EVENT_DESC(bw_out_port3.unit,	"MiB"),
3899 	/* Free-running IIO UTILIZATION Counters */
3900 	INTEL_UNCORE_EVENT_DESC(util_in_port0,		"event=0xff,umask=0x30"),
3901 	INTEL_UNCORE_EVENT_DESC(util_out_port0,		"event=0xff,umask=0x31"),
3902 	INTEL_UNCORE_EVENT_DESC(util_in_port1,		"event=0xff,umask=0x32"),
3903 	INTEL_UNCORE_EVENT_DESC(util_out_port1,		"event=0xff,umask=0x33"),
3904 	INTEL_UNCORE_EVENT_DESC(util_in_port2,		"event=0xff,umask=0x34"),
3905 	INTEL_UNCORE_EVENT_DESC(util_out_port2,		"event=0xff,umask=0x35"),
3906 	INTEL_UNCORE_EVENT_DESC(util_in_port3,		"event=0xff,umask=0x36"),
3907 	INTEL_UNCORE_EVENT_DESC(util_out_port3,		"event=0xff,umask=0x37"),
3908 	{ /* end: all zeroes */ },
3909 };
3910 
3911 static struct intel_uncore_ops skx_uncore_iio_freerunning_ops = {
3912 	.read_counter		= uncore_msr_read_counter,
3913 	.hw_config		= uncore_freerunning_hw_config,
3914 };
3915 
3916 static struct attribute *skx_uncore_iio_freerunning_formats_attr[] = {
3917 	&format_attr_event.attr,
3918 	&format_attr_umask.attr,
3919 	NULL,
3920 };
3921 
3922 static const struct attribute_group skx_uncore_iio_freerunning_format_group = {
3923 	.name = "format",
3924 	.attrs = skx_uncore_iio_freerunning_formats_attr,
3925 };
3926 
3927 static struct intel_uncore_type skx_uncore_iio_free_running = {
3928 	.name			= "iio_free_running",
3929 	.num_counters		= 17,
3930 	.num_boxes		= 6,
3931 	.num_freerunning_types	= SKX_IIO_FREERUNNING_TYPE_MAX,
3932 	.freerunning		= skx_iio_freerunning,
3933 	.ops			= &skx_uncore_iio_freerunning_ops,
3934 	.event_descs		= skx_uncore_iio_freerunning_events,
3935 	.format_group		= &skx_uncore_iio_freerunning_format_group,
3936 };
3937 
3938 static struct attribute *skx_uncore_formats_attr[] = {
3939 	&format_attr_event.attr,
3940 	&format_attr_umask.attr,
3941 	&format_attr_edge.attr,
3942 	&format_attr_inv.attr,
3943 	&format_attr_thresh8.attr,
3944 	NULL,
3945 };
3946 
3947 static const struct attribute_group skx_uncore_format_group = {
3948 	.name = "format",
3949 	.attrs = skx_uncore_formats_attr,
3950 };
3951 
3952 static struct intel_uncore_type skx_uncore_irp = {
3953 	.name			= "irp",
3954 	.num_counters		= 2,
3955 	.num_boxes		= 6,
3956 	.perf_ctr_bits		= 48,
3957 	.event_ctl		= SKX_IRP0_MSR_PMON_CTL0,
3958 	.perf_ctr		= SKX_IRP0_MSR_PMON_CTR0,
3959 	.event_mask		= SNBEP_PMON_RAW_EVENT_MASK,
3960 	.box_ctl		= SKX_IRP0_MSR_PMON_BOX_CTL,
3961 	.msr_offset		= SKX_IRP_MSR_OFFSET,
3962 	.ops			= &skx_uncore_iio_ops,
3963 	.format_group		= &skx_uncore_format_group,
3964 };
3965 
3966 static struct attribute *skx_uncore_pcu_formats_attr[] = {
3967 	&format_attr_event.attr,
3968 	&format_attr_umask.attr,
3969 	&format_attr_edge.attr,
3970 	&format_attr_inv.attr,
3971 	&format_attr_thresh8.attr,
3972 	&format_attr_occ_invert.attr,
3973 	&format_attr_occ_edge_det.attr,
3974 	&format_attr_filter_band0.attr,
3975 	&format_attr_filter_band1.attr,
3976 	&format_attr_filter_band2.attr,
3977 	&format_attr_filter_band3.attr,
3978 	NULL,
3979 };
3980 
3981 static struct attribute_group skx_uncore_pcu_format_group = {
3982 	.name = "format",
3983 	.attrs = skx_uncore_pcu_formats_attr,
3984 };
3985 
3986 static struct intel_uncore_ops skx_uncore_pcu_ops = {
3987 	IVBEP_UNCORE_MSR_OPS_COMMON_INIT(),
3988 	.hw_config		= hswep_pcu_hw_config,
3989 	.get_constraint		= snbep_pcu_get_constraint,
3990 	.put_constraint		= snbep_pcu_put_constraint,
3991 };
3992 
3993 static struct intel_uncore_type skx_uncore_pcu = {
3994 	.name			= "pcu",
3995 	.num_counters		= 4,
3996 	.num_boxes		= 1,
3997 	.perf_ctr_bits		= 48,
3998 	.perf_ctr		= HSWEP_PCU_MSR_PMON_CTR0,
3999 	.event_ctl		= HSWEP_PCU_MSR_PMON_CTL0,
4000 	.event_mask		= SNBEP_PCU_MSR_PMON_RAW_EVENT_MASK,
4001 	.box_ctl		= HSWEP_PCU_MSR_PMON_BOX_CTL,
4002 	.num_shared_regs	= 1,
4003 	.ops			= &skx_uncore_pcu_ops,
4004 	.format_group		= &skx_uncore_pcu_format_group,
4005 };
4006 
4007 static struct intel_uncore_type *skx_msr_uncores[] = {
4008 	&skx_uncore_ubox,
4009 	&skx_uncore_chabox,
4010 	&skx_uncore_iio,
4011 	&skx_uncore_iio_free_running,
4012 	&skx_uncore_irp,
4013 	&skx_uncore_pcu,
4014 	NULL,
4015 };
4016 
4017 /*
4018  * To determine the number of CHAs, it should read bits 27:0 in the CAPID6
4019  * register which located at Device 30, Function 3, Offset 0x9C. PCI ID 0x2083.
4020  */
4021 #define SKX_CAPID6		0x9c
4022 #define SKX_CHA_BIT_MASK	GENMASK(27, 0)
4023 
4024 static int skx_count_chabox(void)
4025 {
4026 	struct pci_dev *dev = NULL;
4027 	u32 val = 0;
4028 
4029 	dev = pci_get_device(PCI_VENDOR_ID_INTEL, 0x2083, dev);
4030 	if (!dev)
4031 		goto out;
4032 
4033 	pci_read_config_dword(dev, SKX_CAPID6, &val);
4034 	val &= SKX_CHA_BIT_MASK;
4035 out:
4036 	pci_dev_put(dev);
4037 	return hweight32(val);
4038 }
4039 
4040 void skx_uncore_cpu_init(void)
4041 {
4042 	skx_uncore_chabox.num_boxes = skx_count_chabox();
4043 	uncore_msr_uncores = skx_msr_uncores;
4044 }
4045 
4046 static struct intel_uncore_type skx_uncore_imc = {
4047 	.name		= "imc",
4048 	.num_counters   = 4,
4049 	.num_boxes	= 6,
4050 	.perf_ctr_bits	= 48,
4051 	.fixed_ctr_bits	= 48,
4052 	.fixed_ctr	= SNBEP_MC_CHy_PCI_PMON_FIXED_CTR,
4053 	.fixed_ctl	= SNBEP_MC_CHy_PCI_PMON_FIXED_CTL,
4054 	.event_descs	= hswep_uncore_imc_events,
4055 	.perf_ctr	= SNBEP_PCI_PMON_CTR0,
4056 	.event_ctl	= SNBEP_PCI_PMON_CTL0,
4057 	.event_mask	= SNBEP_PMON_RAW_EVENT_MASK,
4058 	.box_ctl	= SNBEP_PCI_PMON_BOX_CTL,
4059 	.ops		= &ivbep_uncore_pci_ops,
4060 	.format_group	= &skx_uncore_format_group,
4061 };
4062 
4063 static struct attribute *skx_upi_uncore_formats_attr[] = {
4064 	&format_attr_event.attr,
4065 	&format_attr_umask_ext.attr,
4066 	&format_attr_edge.attr,
4067 	&format_attr_inv.attr,
4068 	&format_attr_thresh8.attr,
4069 	NULL,
4070 };
4071 
4072 static const struct attribute_group skx_upi_uncore_format_group = {
4073 	.name = "format",
4074 	.attrs = skx_upi_uncore_formats_attr,
4075 };
4076 
4077 static void skx_upi_uncore_pci_init_box(struct intel_uncore_box *box)
4078 {
4079 	struct pci_dev *pdev = box->pci_dev;
4080 
4081 	__set_bit(UNCORE_BOX_FLAG_CTL_OFFS8, &box->flags);
4082 	pci_write_config_dword(pdev, SKX_UPI_PCI_PMON_BOX_CTL, IVBEP_PMON_BOX_CTL_INT);
4083 }
4084 
4085 static struct intel_uncore_ops skx_upi_uncore_pci_ops = {
4086 	.init_box	= skx_upi_uncore_pci_init_box,
4087 	.disable_box	= snbep_uncore_pci_disable_box,
4088 	.enable_box	= snbep_uncore_pci_enable_box,
4089 	.disable_event	= snbep_uncore_pci_disable_event,
4090 	.enable_event	= snbep_uncore_pci_enable_event,
4091 	.read_counter	= snbep_uncore_pci_read_counter,
4092 };
4093 
4094 static struct intel_uncore_type skx_uncore_upi = {
4095 	.name		= "upi",
4096 	.num_counters   = 4,
4097 	.num_boxes	= 3,
4098 	.perf_ctr_bits	= 48,
4099 	.perf_ctr	= SKX_UPI_PCI_PMON_CTR0,
4100 	.event_ctl	= SKX_UPI_PCI_PMON_CTL0,
4101 	.event_mask	= SNBEP_PMON_RAW_EVENT_MASK,
4102 	.event_mask_ext = SKX_UPI_CTL_UMASK_EXT,
4103 	.box_ctl	= SKX_UPI_PCI_PMON_BOX_CTL,
4104 	.ops		= &skx_upi_uncore_pci_ops,
4105 	.format_group	= &skx_upi_uncore_format_group,
4106 };
4107 
4108 static void skx_m2m_uncore_pci_init_box(struct intel_uncore_box *box)
4109 {
4110 	struct pci_dev *pdev = box->pci_dev;
4111 
4112 	__set_bit(UNCORE_BOX_FLAG_CTL_OFFS8, &box->flags);
4113 	pci_write_config_dword(pdev, SKX_M2M_PCI_PMON_BOX_CTL, IVBEP_PMON_BOX_CTL_INT);
4114 }
4115 
4116 static struct intel_uncore_ops skx_m2m_uncore_pci_ops = {
4117 	.init_box	= skx_m2m_uncore_pci_init_box,
4118 	.disable_box	= snbep_uncore_pci_disable_box,
4119 	.enable_box	= snbep_uncore_pci_enable_box,
4120 	.disable_event	= snbep_uncore_pci_disable_event,
4121 	.enable_event	= snbep_uncore_pci_enable_event,
4122 	.read_counter	= snbep_uncore_pci_read_counter,
4123 };
4124 
4125 static struct intel_uncore_type skx_uncore_m2m = {
4126 	.name		= "m2m",
4127 	.num_counters   = 4,
4128 	.num_boxes	= 2,
4129 	.perf_ctr_bits	= 48,
4130 	.perf_ctr	= SKX_M2M_PCI_PMON_CTR0,
4131 	.event_ctl	= SKX_M2M_PCI_PMON_CTL0,
4132 	.event_mask	= SNBEP_PMON_RAW_EVENT_MASK,
4133 	.box_ctl	= SKX_M2M_PCI_PMON_BOX_CTL,
4134 	.ops		= &skx_m2m_uncore_pci_ops,
4135 	.format_group	= &skx_uncore_format_group,
4136 };
4137 
4138 static struct event_constraint skx_uncore_m2pcie_constraints[] = {
4139 	UNCORE_EVENT_CONSTRAINT(0x23, 0x3),
4140 	EVENT_CONSTRAINT_END
4141 };
4142 
4143 static struct intel_uncore_type skx_uncore_m2pcie = {
4144 	.name		= "m2pcie",
4145 	.num_counters   = 4,
4146 	.num_boxes	= 4,
4147 	.perf_ctr_bits	= 48,
4148 	.constraints	= skx_uncore_m2pcie_constraints,
4149 	.perf_ctr	= SNBEP_PCI_PMON_CTR0,
4150 	.event_ctl	= SNBEP_PCI_PMON_CTL0,
4151 	.event_mask	= SNBEP_PMON_RAW_EVENT_MASK,
4152 	.box_ctl	= SNBEP_PCI_PMON_BOX_CTL,
4153 	.ops		= &ivbep_uncore_pci_ops,
4154 	.format_group	= &skx_uncore_format_group,
4155 };
4156 
4157 static struct event_constraint skx_uncore_m3upi_constraints[] = {
4158 	UNCORE_EVENT_CONSTRAINT(0x1d, 0x1),
4159 	UNCORE_EVENT_CONSTRAINT(0x1e, 0x1),
4160 	UNCORE_EVENT_CONSTRAINT(0x40, 0x7),
4161 	UNCORE_EVENT_CONSTRAINT(0x4e, 0x7),
4162 	UNCORE_EVENT_CONSTRAINT(0x4f, 0x7),
4163 	UNCORE_EVENT_CONSTRAINT(0x50, 0x7),
4164 	UNCORE_EVENT_CONSTRAINT(0x51, 0x7),
4165 	UNCORE_EVENT_CONSTRAINT(0x52, 0x7),
4166 	EVENT_CONSTRAINT_END
4167 };
4168 
4169 static struct intel_uncore_type skx_uncore_m3upi = {
4170 	.name		= "m3upi",
4171 	.num_counters   = 3,
4172 	.num_boxes	= 3,
4173 	.perf_ctr_bits	= 48,
4174 	.constraints	= skx_uncore_m3upi_constraints,
4175 	.perf_ctr	= SNBEP_PCI_PMON_CTR0,
4176 	.event_ctl	= SNBEP_PCI_PMON_CTL0,
4177 	.event_mask	= SNBEP_PMON_RAW_EVENT_MASK,
4178 	.box_ctl	= SNBEP_PCI_PMON_BOX_CTL,
4179 	.ops		= &ivbep_uncore_pci_ops,
4180 	.format_group	= &skx_uncore_format_group,
4181 };
4182 
4183 enum {
4184 	SKX_PCI_UNCORE_IMC,
4185 	SKX_PCI_UNCORE_M2M,
4186 	SKX_PCI_UNCORE_UPI,
4187 	SKX_PCI_UNCORE_M2PCIE,
4188 	SKX_PCI_UNCORE_M3UPI,
4189 };
4190 
4191 static struct intel_uncore_type *skx_pci_uncores[] = {
4192 	[SKX_PCI_UNCORE_IMC]	= &skx_uncore_imc,
4193 	[SKX_PCI_UNCORE_M2M]	= &skx_uncore_m2m,
4194 	[SKX_PCI_UNCORE_UPI]	= &skx_uncore_upi,
4195 	[SKX_PCI_UNCORE_M2PCIE]	= &skx_uncore_m2pcie,
4196 	[SKX_PCI_UNCORE_M3UPI]	= &skx_uncore_m3upi,
4197 	NULL,
4198 };
4199 
4200 static const struct pci_device_id skx_uncore_pci_ids[] = {
4201 	{ /* MC0 Channel 0 */
4202 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2042),
4203 		.driver_data = UNCORE_PCI_DEV_FULL_DATA(10, 2, SKX_PCI_UNCORE_IMC, 0),
4204 	},
4205 	{ /* MC0 Channel 1 */
4206 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2046),
4207 		.driver_data = UNCORE_PCI_DEV_FULL_DATA(10, 6, SKX_PCI_UNCORE_IMC, 1),
4208 	},
4209 	{ /* MC0 Channel 2 */
4210 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x204a),
4211 		.driver_data = UNCORE_PCI_DEV_FULL_DATA(11, 2, SKX_PCI_UNCORE_IMC, 2),
4212 	},
4213 	{ /* MC1 Channel 0 */
4214 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2042),
4215 		.driver_data = UNCORE_PCI_DEV_FULL_DATA(12, 2, SKX_PCI_UNCORE_IMC, 3),
4216 	},
4217 	{ /* MC1 Channel 1 */
4218 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2046),
4219 		.driver_data = UNCORE_PCI_DEV_FULL_DATA(12, 6, SKX_PCI_UNCORE_IMC, 4),
4220 	},
4221 	{ /* MC1 Channel 2 */
4222 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x204a),
4223 		.driver_data = UNCORE_PCI_DEV_FULL_DATA(13, 2, SKX_PCI_UNCORE_IMC, 5),
4224 	},
4225 	{ /* M2M0 */
4226 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2066),
4227 		.driver_data = UNCORE_PCI_DEV_FULL_DATA(8, 0, SKX_PCI_UNCORE_M2M, 0),
4228 	},
4229 	{ /* M2M1 */
4230 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2066),
4231 		.driver_data = UNCORE_PCI_DEV_FULL_DATA(9, 0, SKX_PCI_UNCORE_M2M, 1),
4232 	},
4233 	{ /* UPI0 Link 0 */
4234 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2058),
4235 		.driver_data = UNCORE_PCI_DEV_FULL_DATA(14, 0, SKX_PCI_UNCORE_UPI, 0),
4236 	},
4237 	{ /* UPI0 Link 1 */
4238 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2058),
4239 		.driver_data = UNCORE_PCI_DEV_FULL_DATA(15, 0, SKX_PCI_UNCORE_UPI, 1),
4240 	},
4241 	{ /* UPI1 Link 2 */
4242 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2058),
4243 		.driver_data = UNCORE_PCI_DEV_FULL_DATA(16, 0, SKX_PCI_UNCORE_UPI, 2),
4244 	},
4245 	{ /* M2PCIe 0 */
4246 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2088),
4247 		.driver_data = UNCORE_PCI_DEV_FULL_DATA(21, 1, SKX_PCI_UNCORE_M2PCIE, 0),
4248 	},
4249 	{ /* M2PCIe 1 */
4250 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2088),
4251 		.driver_data = UNCORE_PCI_DEV_FULL_DATA(22, 1, SKX_PCI_UNCORE_M2PCIE, 1),
4252 	},
4253 	{ /* M2PCIe 2 */
4254 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2088),
4255 		.driver_data = UNCORE_PCI_DEV_FULL_DATA(23, 1, SKX_PCI_UNCORE_M2PCIE, 2),
4256 	},
4257 	{ /* M2PCIe 3 */
4258 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2088),
4259 		.driver_data = UNCORE_PCI_DEV_FULL_DATA(21, 5, SKX_PCI_UNCORE_M2PCIE, 3),
4260 	},
4261 	{ /* M3UPI0 Link 0 */
4262 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x204D),
4263 		.driver_data = UNCORE_PCI_DEV_FULL_DATA(18, 1, SKX_PCI_UNCORE_M3UPI, 0),
4264 	},
4265 	{ /* M3UPI0 Link 1 */
4266 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x204E),
4267 		.driver_data = UNCORE_PCI_DEV_FULL_DATA(18, 2, SKX_PCI_UNCORE_M3UPI, 1),
4268 	},
4269 	{ /* M3UPI1 Link 2 */
4270 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x204D),
4271 		.driver_data = UNCORE_PCI_DEV_FULL_DATA(18, 5, SKX_PCI_UNCORE_M3UPI, 2),
4272 	},
4273 	{ /* end: all zeroes */ }
4274 };
4275 
4276 
4277 static struct pci_driver skx_uncore_pci_driver = {
4278 	.name		= "skx_uncore",
4279 	.id_table	= skx_uncore_pci_ids,
4280 };
4281 
4282 int skx_uncore_pci_init(void)
4283 {
4284 	/* need to double check pci address */
4285 	int ret = snbep_pci2phy_map_init(0x2014, SKX_CPUNODEID, SKX_GIDNIDMAP, false);
4286 
4287 	if (ret)
4288 		return ret;
4289 
4290 	uncore_pci_uncores = skx_pci_uncores;
4291 	uncore_pci_driver = &skx_uncore_pci_driver;
4292 	return 0;
4293 }
4294 
4295 /* end of SKX uncore support */
4296 
4297 /* SNR uncore support */
4298 
4299 static struct intel_uncore_type snr_uncore_ubox = {
4300 	.name			= "ubox",
4301 	.num_counters		= 2,
4302 	.num_boxes		= 1,
4303 	.perf_ctr_bits		= 48,
4304 	.fixed_ctr_bits		= 48,
4305 	.perf_ctr		= SNR_U_MSR_PMON_CTR0,
4306 	.event_ctl		= SNR_U_MSR_PMON_CTL0,
4307 	.event_mask		= SNBEP_PMON_RAW_EVENT_MASK,
4308 	.fixed_ctr		= SNR_U_MSR_PMON_UCLK_FIXED_CTR,
4309 	.fixed_ctl		= SNR_U_MSR_PMON_UCLK_FIXED_CTL,
4310 	.ops			= &ivbep_uncore_msr_ops,
4311 	.format_group		= &ivbep_uncore_format_group,
4312 };
4313 
4314 static struct attribute *snr_uncore_cha_formats_attr[] = {
4315 	&format_attr_event.attr,
4316 	&format_attr_umask_ext2.attr,
4317 	&format_attr_edge.attr,
4318 	&format_attr_tid_en.attr,
4319 	&format_attr_inv.attr,
4320 	&format_attr_thresh8.attr,
4321 	&format_attr_filter_tid5.attr,
4322 	NULL,
4323 };
4324 static const struct attribute_group snr_uncore_chabox_format_group = {
4325 	.name = "format",
4326 	.attrs = snr_uncore_cha_formats_attr,
4327 };
4328 
4329 static int snr_cha_hw_config(struct intel_uncore_box *box, struct perf_event *event)
4330 {
4331 	struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
4332 
4333 	reg1->reg = SNR_C0_MSR_PMON_BOX_FILTER0 +
4334 		    box->pmu->type->msr_offset * box->pmu->pmu_idx;
4335 	reg1->config = event->attr.config1 & SKX_CHA_MSR_PMON_BOX_FILTER_TID;
4336 	reg1->idx = 0;
4337 
4338 	return 0;
4339 }
4340 
4341 static void snr_cha_enable_event(struct intel_uncore_box *box,
4342 				   struct perf_event *event)
4343 {
4344 	struct hw_perf_event *hwc = &event->hw;
4345 	struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
4346 
4347 	if (reg1->idx != EXTRA_REG_NONE)
4348 		wrmsrl(reg1->reg, reg1->config);
4349 
4350 	wrmsrl(hwc->config_base, hwc->config | SNBEP_PMON_CTL_EN);
4351 }
4352 
4353 static struct intel_uncore_ops snr_uncore_chabox_ops = {
4354 	.init_box		= ivbep_uncore_msr_init_box,
4355 	.disable_box		= snbep_uncore_msr_disable_box,
4356 	.enable_box		= snbep_uncore_msr_enable_box,
4357 	.disable_event		= snbep_uncore_msr_disable_event,
4358 	.enable_event		= snr_cha_enable_event,
4359 	.read_counter		= uncore_msr_read_counter,
4360 	.hw_config		= snr_cha_hw_config,
4361 };
4362 
4363 static struct intel_uncore_type snr_uncore_chabox = {
4364 	.name			= "cha",
4365 	.num_counters		= 4,
4366 	.num_boxes		= 6,
4367 	.perf_ctr_bits		= 48,
4368 	.event_ctl		= SNR_CHA_MSR_PMON_CTL0,
4369 	.perf_ctr		= SNR_CHA_MSR_PMON_CTR0,
4370 	.box_ctl		= SNR_CHA_MSR_PMON_BOX_CTL,
4371 	.msr_offset		= HSWEP_CBO_MSR_OFFSET,
4372 	.event_mask		= HSWEP_S_MSR_PMON_RAW_EVENT_MASK,
4373 	.event_mask_ext		= SNR_CHA_RAW_EVENT_MASK_EXT,
4374 	.ops			= &snr_uncore_chabox_ops,
4375 	.format_group		= &snr_uncore_chabox_format_group,
4376 };
4377 
4378 static struct attribute *snr_uncore_iio_formats_attr[] = {
4379 	&format_attr_event.attr,
4380 	&format_attr_umask.attr,
4381 	&format_attr_edge.attr,
4382 	&format_attr_inv.attr,
4383 	&format_attr_thresh9.attr,
4384 	&format_attr_ch_mask2.attr,
4385 	&format_attr_fc_mask2.attr,
4386 	NULL,
4387 };
4388 
4389 static const struct attribute_group snr_uncore_iio_format_group = {
4390 	.name = "format",
4391 	.attrs = snr_uncore_iio_formats_attr,
4392 };
4393 
4394 static struct intel_uncore_type snr_uncore_iio = {
4395 	.name			= "iio",
4396 	.num_counters		= 4,
4397 	.num_boxes		= 5,
4398 	.perf_ctr_bits		= 48,
4399 	.event_ctl		= SNR_IIO_MSR_PMON_CTL0,
4400 	.perf_ctr		= SNR_IIO_MSR_PMON_CTR0,
4401 	.event_mask		= SNBEP_PMON_RAW_EVENT_MASK,
4402 	.event_mask_ext		= SNR_IIO_PMON_RAW_EVENT_MASK_EXT,
4403 	.box_ctl		= SNR_IIO_MSR_PMON_BOX_CTL,
4404 	.msr_offset		= SNR_IIO_MSR_OFFSET,
4405 	.ops			= &ivbep_uncore_msr_ops,
4406 	.format_group		= &snr_uncore_iio_format_group,
4407 };
4408 
4409 static struct intel_uncore_type snr_uncore_irp = {
4410 	.name			= "irp",
4411 	.num_counters		= 2,
4412 	.num_boxes		= 5,
4413 	.perf_ctr_bits		= 48,
4414 	.event_ctl		= SNR_IRP0_MSR_PMON_CTL0,
4415 	.perf_ctr		= SNR_IRP0_MSR_PMON_CTR0,
4416 	.event_mask		= SNBEP_PMON_RAW_EVENT_MASK,
4417 	.box_ctl		= SNR_IRP0_MSR_PMON_BOX_CTL,
4418 	.msr_offset		= SNR_IRP_MSR_OFFSET,
4419 	.ops			= &ivbep_uncore_msr_ops,
4420 	.format_group		= &ivbep_uncore_format_group,
4421 };
4422 
4423 static struct intel_uncore_type snr_uncore_m2pcie = {
4424 	.name		= "m2pcie",
4425 	.num_counters	= 4,
4426 	.num_boxes	= 5,
4427 	.perf_ctr_bits	= 48,
4428 	.event_ctl	= SNR_M2PCIE_MSR_PMON_CTL0,
4429 	.perf_ctr	= SNR_M2PCIE_MSR_PMON_CTR0,
4430 	.box_ctl	= SNR_M2PCIE_MSR_PMON_BOX_CTL,
4431 	.msr_offset	= SNR_M2PCIE_MSR_OFFSET,
4432 	.event_mask	= SNBEP_PMON_RAW_EVENT_MASK,
4433 	.ops		= &ivbep_uncore_msr_ops,
4434 	.format_group	= &ivbep_uncore_format_group,
4435 };
4436 
4437 static int snr_pcu_hw_config(struct intel_uncore_box *box, struct perf_event *event)
4438 {
4439 	struct hw_perf_event *hwc = &event->hw;
4440 	struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
4441 	int ev_sel = hwc->config & SNBEP_PMON_CTL_EV_SEL_MASK;
4442 
4443 	if (ev_sel >= 0xb && ev_sel <= 0xe) {
4444 		reg1->reg = SNR_PCU_MSR_PMON_BOX_FILTER;
4445 		reg1->idx = ev_sel - 0xb;
4446 		reg1->config = event->attr.config1 & (0xff << reg1->idx);
4447 	}
4448 	return 0;
4449 }
4450 
4451 static struct intel_uncore_ops snr_uncore_pcu_ops = {
4452 	IVBEP_UNCORE_MSR_OPS_COMMON_INIT(),
4453 	.hw_config		= snr_pcu_hw_config,
4454 	.get_constraint		= snbep_pcu_get_constraint,
4455 	.put_constraint		= snbep_pcu_put_constraint,
4456 };
4457 
4458 static struct intel_uncore_type snr_uncore_pcu = {
4459 	.name			= "pcu",
4460 	.num_counters		= 4,
4461 	.num_boxes		= 1,
4462 	.perf_ctr_bits		= 48,
4463 	.perf_ctr		= SNR_PCU_MSR_PMON_CTR0,
4464 	.event_ctl		= SNR_PCU_MSR_PMON_CTL0,
4465 	.event_mask		= SNBEP_PMON_RAW_EVENT_MASK,
4466 	.box_ctl		= SNR_PCU_MSR_PMON_BOX_CTL,
4467 	.num_shared_regs	= 1,
4468 	.ops			= &snr_uncore_pcu_ops,
4469 	.format_group		= &skx_uncore_pcu_format_group,
4470 };
4471 
4472 enum perf_uncore_snr_iio_freerunning_type_id {
4473 	SNR_IIO_MSR_IOCLK,
4474 	SNR_IIO_MSR_BW_IN,
4475 
4476 	SNR_IIO_FREERUNNING_TYPE_MAX,
4477 };
4478 
4479 static struct freerunning_counters snr_iio_freerunning[] = {
4480 	[SNR_IIO_MSR_IOCLK]	= { 0x1eac, 0x1, 0x10, 1, 48 },
4481 	[SNR_IIO_MSR_BW_IN]	= { 0x1f00, 0x1, 0x10, 8, 48 },
4482 };
4483 
4484 static struct uncore_event_desc snr_uncore_iio_freerunning_events[] = {
4485 	/* Free-Running IIO CLOCKS Counter */
4486 	INTEL_UNCORE_EVENT_DESC(ioclk,			"event=0xff,umask=0x10"),
4487 	/* Free-Running IIO BANDWIDTH IN Counters */
4488 	INTEL_UNCORE_EVENT_DESC(bw_in_port0,		"event=0xff,umask=0x20"),
4489 	INTEL_UNCORE_EVENT_DESC(bw_in_port0.scale,	"3.814697266e-6"),
4490 	INTEL_UNCORE_EVENT_DESC(bw_in_port0.unit,	"MiB"),
4491 	INTEL_UNCORE_EVENT_DESC(bw_in_port1,		"event=0xff,umask=0x21"),
4492 	INTEL_UNCORE_EVENT_DESC(bw_in_port1.scale,	"3.814697266e-6"),
4493 	INTEL_UNCORE_EVENT_DESC(bw_in_port1.unit,	"MiB"),
4494 	INTEL_UNCORE_EVENT_DESC(bw_in_port2,		"event=0xff,umask=0x22"),
4495 	INTEL_UNCORE_EVENT_DESC(bw_in_port2.scale,	"3.814697266e-6"),
4496 	INTEL_UNCORE_EVENT_DESC(bw_in_port2.unit,	"MiB"),
4497 	INTEL_UNCORE_EVENT_DESC(bw_in_port3,		"event=0xff,umask=0x23"),
4498 	INTEL_UNCORE_EVENT_DESC(bw_in_port3.scale,	"3.814697266e-6"),
4499 	INTEL_UNCORE_EVENT_DESC(bw_in_port3.unit,	"MiB"),
4500 	INTEL_UNCORE_EVENT_DESC(bw_in_port4,		"event=0xff,umask=0x24"),
4501 	INTEL_UNCORE_EVENT_DESC(bw_in_port4.scale,	"3.814697266e-6"),
4502 	INTEL_UNCORE_EVENT_DESC(bw_in_port4.unit,	"MiB"),
4503 	INTEL_UNCORE_EVENT_DESC(bw_in_port5,		"event=0xff,umask=0x25"),
4504 	INTEL_UNCORE_EVENT_DESC(bw_in_port5.scale,	"3.814697266e-6"),
4505 	INTEL_UNCORE_EVENT_DESC(bw_in_port5.unit,	"MiB"),
4506 	INTEL_UNCORE_EVENT_DESC(bw_in_port6,		"event=0xff,umask=0x26"),
4507 	INTEL_UNCORE_EVENT_DESC(bw_in_port6.scale,	"3.814697266e-6"),
4508 	INTEL_UNCORE_EVENT_DESC(bw_in_port6.unit,	"MiB"),
4509 	INTEL_UNCORE_EVENT_DESC(bw_in_port7,		"event=0xff,umask=0x27"),
4510 	INTEL_UNCORE_EVENT_DESC(bw_in_port7.scale,	"3.814697266e-6"),
4511 	INTEL_UNCORE_EVENT_DESC(bw_in_port7.unit,	"MiB"),
4512 	{ /* end: all zeroes */ },
4513 };
4514 
4515 static struct intel_uncore_type snr_uncore_iio_free_running = {
4516 	.name			= "iio_free_running",
4517 	.num_counters		= 9,
4518 	.num_boxes		= 5,
4519 	.num_freerunning_types	= SNR_IIO_FREERUNNING_TYPE_MAX,
4520 	.freerunning		= snr_iio_freerunning,
4521 	.ops			= &skx_uncore_iio_freerunning_ops,
4522 	.event_descs		= snr_uncore_iio_freerunning_events,
4523 	.format_group		= &skx_uncore_iio_freerunning_format_group,
4524 };
4525 
4526 static struct intel_uncore_type *snr_msr_uncores[] = {
4527 	&snr_uncore_ubox,
4528 	&snr_uncore_chabox,
4529 	&snr_uncore_iio,
4530 	&snr_uncore_irp,
4531 	&snr_uncore_m2pcie,
4532 	&snr_uncore_pcu,
4533 	&snr_uncore_iio_free_running,
4534 	NULL,
4535 };
4536 
4537 void snr_uncore_cpu_init(void)
4538 {
4539 	uncore_msr_uncores = snr_msr_uncores;
4540 }
4541 
4542 static void snr_m2m_uncore_pci_init_box(struct intel_uncore_box *box)
4543 {
4544 	struct pci_dev *pdev = box->pci_dev;
4545 	int box_ctl = uncore_pci_box_ctl(box);
4546 
4547 	__set_bit(UNCORE_BOX_FLAG_CTL_OFFS8, &box->flags);
4548 	pci_write_config_dword(pdev, box_ctl, IVBEP_PMON_BOX_CTL_INT);
4549 }
4550 
4551 static struct intel_uncore_ops snr_m2m_uncore_pci_ops = {
4552 	.init_box	= snr_m2m_uncore_pci_init_box,
4553 	.disable_box	= snbep_uncore_pci_disable_box,
4554 	.enable_box	= snbep_uncore_pci_enable_box,
4555 	.disable_event	= snbep_uncore_pci_disable_event,
4556 	.enable_event	= snbep_uncore_pci_enable_event,
4557 	.read_counter	= snbep_uncore_pci_read_counter,
4558 };
4559 
4560 static struct attribute *snr_m2m_uncore_formats_attr[] = {
4561 	&format_attr_event.attr,
4562 	&format_attr_umask_ext3.attr,
4563 	&format_attr_edge.attr,
4564 	&format_attr_inv.attr,
4565 	&format_attr_thresh8.attr,
4566 	NULL,
4567 };
4568 
4569 static const struct attribute_group snr_m2m_uncore_format_group = {
4570 	.name = "format",
4571 	.attrs = snr_m2m_uncore_formats_attr,
4572 };
4573 
4574 static struct intel_uncore_type snr_uncore_m2m = {
4575 	.name		= "m2m",
4576 	.num_counters   = 4,
4577 	.num_boxes	= 1,
4578 	.perf_ctr_bits	= 48,
4579 	.perf_ctr	= SNR_M2M_PCI_PMON_CTR0,
4580 	.event_ctl	= SNR_M2M_PCI_PMON_CTL0,
4581 	.event_mask	= SNBEP_PMON_RAW_EVENT_MASK,
4582 	.event_mask_ext	= SNR_M2M_PCI_PMON_UMASK_EXT,
4583 	.box_ctl	= SNR_M2M_PCI_PMON_BOX_CTL,
4584 	.ops		= &snr_m2m_uncore_pci_ops,
4585 	.format_group	= &snr_m2m_uncore_format_group,
4586 };
4587 
4588 static void snr_uncore_pci_enable_event(struct intel_uncore_box *box, struct perf_event *event)
4589 {
4590 	struct pci_dev *pdev = box->pci_dev;
4591 	struct hw_perf_event *hwc = &event->hw;
4592 
4593 	pci_write_config_dword(pdev, hwc->config_base, (u32)(hwc->config | SNBEP_PMON_CTL_EN));
4594 	pci_write_config_dword(pdev, hwc->config_base + 4, (u32)(hwc->config >> 32));
4595 }
4596 
4597 static struct intel_uncore_ops snr_pcie3_uncore_pci_ops = {
4598 	.init_box	= snr_m2m_uncore_pci_init_box,
4599 	.disable_box	= snbep_uncore_pci_disable_box,
4600 	.enable_box	= snbep_uncore_pci_enable_box,
4601 	.disable_event	= snbep_uncore_pci_disable_event,
4602 	.enable_event	= snr_uncore_pci_enable_event,
4603 	.read_counter	= snbep_uncore_pci_read_counter,
4604 };
4605 
4606 static struct intel_uncore_type snr_uncore_pcie3 = {
4607 	.name		= "pcie3",
4608 	.num_counters	= 4,
4609 	.num_boxes	= 1,
4610 	.perf_ctr_bits	= 48,
4611 	.perf_ctr	= SNR_PCIE3_PCI_PMON_CTR0,
4612 	.event_ctl	= SNR_PCIE3_PCI_PMON_CTL0,
4613 	.event_mask	= SKX_IIO_PMON_RAW_EVENT_MASK,
4614 	.event_mask_ext	= SKX_IIO_PMON_RAW_EVENT_MASK_EXT,
4615 	.box_ctl	= SNR_PCIE3_PCI_PMON_BOX_CTL,
4616 	.ops		= &snr_pcie3_uncore_pci_ops,
4617 	.format_group	= &skx_uncore_iio_format_group,
4618 };
4619 
4620 enum {
4621 	SNR_PCI_UNCORE_M2M,
4622 	SNR_PCI_UNCORE_PCIE3,
4623 };
4624 
4625 static struct intel_uncore_type *snr_pci_uncores[] = {
4626 	[SNR_PCI_UNCORE_M2M]		= &snr_uncore_m2m,
4627 	[SNR_PCI_UNCORE_PCIE3]		= &snr_uncore_pcie3,
4628 	NULL,
4629 };
4630 
4631 static const struct pci_device_id snr_uncore_pci_ids[] = {
4632 	{ /* M2M */
4633 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x344a),
4634 		.driver_data = UNCORE_PCI_DEV_FULL_DATA(12, 0, SNR_PCI_UNCORE_M2M, 0),
4635 	},
4636 	{ /* end: all zeroes */ }
4637 };
4638 
4639 static struct pci_driver snr_uncore_pci_driver = {
4640 	.name		= "snr_uncore",
4641 	.id_table	= snr_uncore_pci_ids,
4642 };
4643 
4644 static const struct pci_device_id snr_uncore_pci_sub_ids[] = {
4645 	{ /* PCIe3 RP */
4646 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x334a),
4647 		.driver_data = UNCORE_PCI_DEV_FULL_DATA(4, 0, SNR_PCI_UNCORE_PCIE3, 0),
4648 	},
4649 	{ /* end: all zeroes */ }
4650 };
4651 
4652 static struct pci_driver snr_uncore_pci_sub_driver = {
4653 	.name		= "snr_uncore_sub",
4654 	.id_table	= snr_uncore_pci_sub_ids,
4655 };
4656 
4657 int snr_uncore_pci_init(void)
4658 {
4659 	/* SNR UBOX DID */
4660 	int ret = snbep_pci2phy_map_init(0x3460, SKX_CPUNODEID,
4661 					 SKX_GIDNIDMAP, true);
4662 
4663 	if (ret)
4664 		return ret;
4665 
4666 	uncore_pci_uncores = snr_pci_uncores;
4667 	uncore_pci_driver = &snr_uncore_pci_driver;
4668 	uncore_pci_sub_driver = &snr_uncore_pci_sub_driver;
4669 	return 0;
4670 }
4671 
4672 static struct pci_dev *snr_uncore_get_mc_dev(int id)
4673 {
4674 	struct pci_dev *mc_dev = NULL;
4675 	int pkg;
4676 
4677 	while (1) {
4678 		mc_dev = pci_get_device(PCI_VENDOR_ID_INTEL, 0x3451, mc_dev);
4679 		if (!mc_dev)
4680 			break;
4681 		pkg = uncore_pcibus_to_dieid(mc_dev->bus);
4682 		if (pkg == id)
4683 			break;
4684 	}
4685 	return mc_dev;
4686 }
4687 
4688 static void __snr_uncore_mmio_init_box(struct intel_uncore_box *box,
4689 				       unsigned int box_ctl, int mem_offset)
4690 {
4691 	struct pci_dev *pdev = snr_uncore_get_mc_dev(box->dieid);
4692 	struct intel_uncore_type *type = box->pmu->type;
4693 	resource_size_t addr;
4694 	u32 pci_dword;
4695 
4696 	if (!pdev)
4697 		return;
4698 
4699 	pci_read_config_dword(pdev, SNR_IMC_MMIO_BASE_OFFSET, &pci_dword);
4700 	addr = (pci_dword & SNR_IMC_MMIO_BASE_MASK) << 23;
4701 
4702 	pci_read_config_dword(pdev, mem_offset, &pci_dword);
4703 	addr |= (pci_dword & SNR_IMC_MMIO_MEM0_MASK) << 12;
4704 
4705 	addr += box_ctl;
4706 
4707 	box->io_addr = ioremap(addr, type->mmio_map_size);
4708 	if (!box->io_addr) {
4709 		pr_warn("perf uncore: Failed to ioremap for %s.\n", type->name);
4710 		return;
4711 	}
4712 
4713 	writel(IVBEP_PMON_BOX_CTL_INT, box->io_addr);
4714 }
4715 
4716 static void snr_uncore_mmio_init_box(struct intel_uncore_box *box)
4717 {
4718 	__snr_uncore_mmio_init_box(box, uncore_mmio_box_ctl(box),
4719 				   SNR_IMC_MMIO_MEM0_OFFSET);
4720 }
4721 
4722 static void snr_uncore_mmio_disable_box(struct intel_uncore_box *box)
4723 {
4724 	u32 config;
4725 
4726 	if (!box->io_addr)
4727 		return;
4728 
4729 	config = readl(box->io_addr);
4730 	config |= SNBEP_PMON_BOX_CTL_FRZ;
4731 	writel(config, box->io_addr);
4732 }
4733 
4734 static void snr_uncore_mmio_enable_box(struct intel_uncore_box *box)
4735 {
4736 	u32 config;
4737 
4738 	if (!box->io_addr)
4739 		return;
4740 
4741 	config = readl(box->io_addr);
4742 	config &= ~SNBEP_PMON_BOX_CTL_FRZ;
4743 	writel(config, box->io_addr);
4744 }
4745 
4746 static void snr_uncore_mmio_enable_event(struct intel_uncore_box *box,
4747 					   struct perf_event *event)
4748 {
4749 	struct hw_perf_event *hwc = &event->hw;
4750 
4751 	if (!box->io_addr)
4752 		return;
4753 
4754 	if (!uncore_mmio_is_valid_offset(box, hwc->config_base))
4755 		return;
4756 
4757 	writel(hwc->config | SNBEP_PMON_CTL_EN,
4758 	       box->io_addr + hwc->config_base);
4759 }
4760 
4761 static void snr_uncore_mmio_disable_event(struct intel_uncore_box *box,
4762 					    struct perf_event *event)
4763 {
4764 	struct hw_perf_event *hwc = &event->hw;
4765 
4766 	if (!box->io_addr)
4767 		return;
4768 
4769 	if (!uncore_mmio_is_valid_offset(box, hwc->config_base))
4770 		return;
4771 
4772 	writel(hwc->config, box->io_addr + hwc->config_base);
4773 }
4774 
4775 static struct intel_uncore_ops snr_uncore_mmio_ops = {
4776 	.init_box	= snr_uncore_mmio_init_box,
4777 	.exit_box	= uncore_mmio_exit_box,
4778 	.disable_box	= snr_uncore_mmio_disable_box,
4779 	.enable_box	= snr_uncore_mmio_enable_box,
4780 	.disable_event	= snr_uncore_mmio_disable_event,
4781 	.enable_event	= snr_uncore_mmio_enable_event,
4782 	.read_counter	= uncore_mmio_read_counter,
4783 };
4784 
4785 static struct uncore_event_desc snr_uncore_imc_events[] = {
4786 	INTEL_UNCORE_EVENT_DESC(clockticks,      "event=0x00,umask=0x00"),
4787 	INTEL_UNCORE_EVENT_DESC(cas_count_read,  "event=0x04,umask=0x0f"),
4788 	INTEL_UNCORE_EVENT_DESC(cas_count_read.scale, "6.103515625e-5"),
4789 	INTEL_UNCORE_EVENT_DESC(cas_count_read.unit, "MiB"),
4790 	INTEL_UNCORE_EVENT_DESC(cas_count_write, "event=0x04,umask=0x30"),
4791 	INTEL_UNCORE_EVENT_DESC(cas_count_write.scale, "6.103515625e-5"),
4792 	INTEL_UNCORE_EVENT_DESC(cas_count_write.unit, "MiB"),
4793 	{ /* end: all zeroes */ },
4794 };
4795 
4796 static struct intel_uncore_type snr_uncore_imc = {
4797 	.name		= "imc",
4798 	.num_counters   = 4,
4799 	.num_boxes	= 2,
4800 	.perf_ctr_bits	= 48,
4801 	.fixed_ctr_bits	= 48,
4802 	.fixed_ctr	= SNR_IMC_MMIO_PMON_FIXED_CTR,
4803 	.fixed_ctl	= SNR_IMC_MMIO_PMON_FIXED_CTL,
4804 	.event_descs	= snr_uncore_imc_events,
4805 	.perf_ctr	= SNR_IMC_MMIO_PMON_CTR0,
4806 	.event_ctl	= SNR_IMC_MMIO_PMON_CTL0,
4807 	.event_mask	= SNBEP_PMON_RAW_EVENT_MASK,
4808 	.box_ctl	= SNR_IMC_MMIO_PMON_BOX_CTL,
4809 	.mmio_offset	= SNR_IMC_MMIO_OFFSET,
4810 	.mmio_map_size	= SNR_IMC_MMIO_SIZE,
4811 	.ops		= &snr_uncore_mmio_ops,
4812 	.format_group	= &skx_uncore_format_group,
4813 };
4814 
4815 enum perf_uncore_snr_imc_freerunning_type_id {
4816 	SNR_IMC_DCLK,
4817 	SNR_IMC_DDR,
4818 
4819 	SNR_IMC_FREERUNNING_TYPE_MAX,
4820 };
4821 
4822 static struct freerunning_counters snr_imc_freerunning[] = {
4823 	[SNR_IMC_DCLK]	= { 0x22b0, 0x0, 0, 1, 48 },
4824 	[SNR_IMC_DDR]	= { 0x2290, 0x8, 0, 2, 48 },
4825 };
4826 
4827 static struct uncore_event_desc snr_uncore_imc_freerunning_events[] = {
4828 	INTEL_UNCORE_EVENT_DESC(dclk,		"event=0xff,umask=0x10"),
4829 
4830 	INTEL_UNCORE_EVENT_DESC(read,		"event=0xff,umask=0x20"),
4831 	INTEL_UNCORE_EVENT_DESC(read.scale,	"6.103515625e-5"),
4832 	INTEL_UNCORE_EVENT_DESC(read.unit,	"MiB"),
4833 	INTEL_UNCORE_EVENT_DESC(write,		"event=0xff,umask=0x21"),
4834 	INTEL_UNCORE_EVENT_DESC(write.scale,	"6.103515625e-5"),
4835 	INTEL_UNCORE_EVENT_DESC(write.unit,	"MiB"),
4836 	{ /* end: all zeroes */ },
4837 };
4838 
4839 static struct intel_uncore_ops snr_uncore_imc_freerunning_ops = {
4840 	.init_box	= snr_uncore_mmio_init_box,
4841 	.exit_box	= uncore_mmio_exit_box,
4842 	.read_counter	= uncore_mmio_read_counter,
4843 	.hw_config	= uncore_freerunning_hw_config,
4844 };
4845 
4846 static struct intel_uncore_type snr_uncore_imc_free_running = {
4847 	.name			= "imc_free_running",
4848 	.num_counters		= 3,
4849 	.num_boxes		= 1,
4850 	.num_freerunning_types	= SNR_IMC_FREERUNNING_TYPE_MAX,
4851 	.mmio_map_size		= SNR_IMC_MMIO_SIZE,
4852 	.freerunning		= snr_imc_freerunning,
4853 	.ops			= &snr_uncore_imc_freerunning_ops,
4854 	.event_descs		= snr_uncore_imc_freerunning_events,
4855 	.format_group		= &skx_uncore_iio_freerunning_format_group,
4856 };
4857 
4858 static struct intel_uncore_type *snr_mmio_uncores[] = {
4859 	&snr_uncore_imc,
4860 	&snr_uncore_imc_free_running,
4861 	NULL,
4862 };
4863 
4864 void snr_uncore_mmio_init(void)
4865 {
4866 	uncore_mmio_uncores = snr_mmio_uncores;
4867 }
4868 
4869 /* end of SNR uncore support */
4870 
4871 /* ICX uncore support */
4872 
4873 static unsigned icx_cha_msr_offsets[] = {
4874 	0x2a0, 0x2ae, 0x2bc, 0x2ca, 0x2d8, 0x2e6, 0x2f4, 0x302, 0x310,
4875 	0x31e, 0x32c, 0x33a, 0x348, 0x356, 0x364, 0x372, 0x380, 0x38e,
4876 	0x3aa, 0x3b8, 0x3c6, 0x3d4, 0x3e2, 0x3f0, 0x3fe, 0x40c, 0x41a,
4877 	0x428, 0x436, 0x444, 0x452, 0x460, 0x46e, 0x47c, 0x0,   0xe,
4878 	0x1c,  0x2a,  0x38,  0x46,
4879 };
4880 
4881 static int icx_cha_hw_config(struct intel_uncore_box *box, struct perf_event *event)
4882 {
4883 	struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
4884 	bool tie_en = !!(event->hw.config & SNBEP_CBO_PMON_CTL_TID_EN);
4885 
4886 	if (tie_en) {
4887 		reg1->reg = ICX_C34_MSR_PMON_BOX_FILTER0 +
4888 			    icx_cha_msr_offsets[box->pmu->pmu_idx];
4889 		reg1->config = event->attr.config1 & SKX_CHA_MSR_PMON_BOX_FILTER_TID;
4890 		reg1->idx = 0;
4891 	}
4892 
4893 	return 0;
4894 }
4895 
4896 static struct intel_uncore_ops icx_uncore_chabox_ops = {
4897 	.init_box		= ivbep_uncore_msr_init_box,
4898 	.disable_box		= snbep_uncore_msr_disable_box,
4899 	.enable_box		= snbep_uncore_msr_enable_box,
4900 	.disable_event		= snbep_uncore_msr_disable_event,
4901 	.enable_event		= snr_cha_enable_event,
4902 	.read_counter		= uncore_msr_read_counter,
4903 	.hw_config		= icx_cha_hw_config,
4904 };
4905 
4906 static struct intel_uncore_type icx_uncore_chabox = {
4907 	.name			= "cha",
4908 	.num_counters		= 4,
4909 	.perf_ctr_bits		= 48,
4910 	.event_ctl		= ICX_C34_MSR_PMON_CTL0,
4911 	.perf_ctr		= ICX_C34_MSR_PMON_CTR0,
4912 	.box_ctl		= ICX_C34_MSR_PMON_BOX_CTL,
4913 	.msr_offsets		= icx_cha_msr_offsets,
4914 	.event_mask		= HSWEP_S_MSR_PMON_RAW_EVENT_MASK,
4915 	.event_mask_ext		= SNR_CHA_RAW_EVENT_MASK_EXT,
4916 	.constraints		= skx_uncore_chabox_constraints,
4917 	.ops			= &icx_uncore_chabox_ops,
4918 	.format_group		= &snr_uncore_chabox_format_group,
4919 };
4920 
4921 static unsigned icx_msr_offsets[] = {
4922 	0x0, 0x20, 0x40, 0x90, 0xb0, 0xd0,
4923 };
4924 
4925 static struct event_constraint icx_uncore_iio_constraints[] = {
4926 	UNCORE_EVENT_CONSTRAINT(0x02, 0x3),
4927 	UNCORE_EVENT_CONSTRAINT(0x03, 0x3),
4928 	UNCORE_EVENT_CONSTRAINT(0x83, 0x3),
4929 	UNCORE_EVENT_CONSTRAINT(0xc0, 0xc),
4930 	UNCORE_EVENT_CONSTRAINT(0xc5, 0xc),
4931 	EVENT_CONSTRAINT_END
4932 };
4933 
4934 static struct intel_uncore_type icx_uncore_iio = {
4935 	.name			= "iio",
4936 	.num_counters		= 4,
4937 	.num_boxes		= 6,
4938 	.perf_ctr_bits		= 48,
4939 	.event_ctl		= ICX_IIO_MSR_PMON_CTL0,
4940 	.perf_ctr		= ICX_IIO_MSR_PMON_CTR0,
4941 	.event_mask		= SNBEP_PMON_RAW_EVENT_MASK,
4942 	.event_mask_ext		= SNR_IIO_PMON_RAW_EVENT_MASK_EXT,
4943 	.box_ctl		= ICX_IIO_MSR_PMON_BOX_CTL,
4944 	.msr_offsets		= icx_msr_offsets,
4945 	.constraints		= icx_uncore_iio_constraints,
4946 	.ops			= &skx_uncore_iio_ops,
4947 	.format_group		= &snr_uncore_iio_format_group,
4948 };
4949 
4950 static struct intel_uncore_type icx_uncore_irp = {
4951 	.name			= "irp",
4952 	.num_counters		= 2,
4953 	.num_boxes		= 6,
4954 	.perf_ctr_bits		= 48,
4955 	.event_ctl		= ICX_IRP0_MSR_PMON_CTL0,
4956 	.perf_ctr		= ICX_IRP0_MSR_PMON_CTR0,
4957 	.event_mask		= SNBEP_PMON_RAW_EVENT_MASK,
4958 	.box_ctl		= ICX_IRP0_MSR_PMON_BOX_CTL,
4959 	.msr_offsets		= icx_msr_offsets,
4960 	.ops			= &ivbep_uncore_msr_ops,
4961 	.format_group		= &ivbep_uncore_format_group,
4962 };
4963 
4964 static struct event_constraint icx_uncore_m2pcie_constraints[] = {
4965 	UNCORE_EVENT_CONSTRAINT(0x14, 0x3),
4966 	UNCORE_EVENT_CONSTRAINT(0x23, 0x3),
4967 	UNCORE_EVENT_CONSTRAINT(0x2d, 0x3),
4968 	EVENT_CONSTRAINT_END
4969 };
4970 
4971 static struct intel_uncore_type icx_uncore_m2pcie = {
4972 	.name		= "m2pcie",
4973 	.num_counters	= 4,
4974 	.num_boxes	= 6,
4975 	.perf_ctr_bits	= 48,
4976 	.event_ctl	= ICX_M2PCIE_MSR_PMON_CTL0,
4977 	.perf_ctr	= ICX_M2PCIE_MSR_PMON_CTR0,
4978 	.box_ctl	= ICX_M2PCIE_MSR_PMON_BOX_CTL,
4979 	.msr_offsets	= icx_msr_offsets,
4980 	.constraints	= icx_uncore_m2pcie_constraints,
4981 	.event_mask	= SNBEP_PMON_RAW_EVENT_MASK,
4982 	.ops		= &ivbep_uncore_msr_ops,
4983 	.format_group	= &ivbep_uncore_format_group,
4984 };
4985 
4986 enum perf_uncore_icx_iio_freerunning_type_id {
4987 	ICX_IIO_MSR_IOCLK,
4988 	ICX_IIO_MSR_BW_IN,
4989 
4990 	ICX_IIO_FREERUNNING_TYPE_MAX,
4991 };
4992 
4993 static unsigned icx_iio_clk_freerunning_box_offsets[] = {
4994 	0x0, 0x20, 0x40, 0x90, 0xb0, 0xd0,
4995 };
4996 
4997 static unsigned icx_iio_bw_freerunning_box_offsets[] = {
4998 	0x0, 0x10, 0x20, 0x90, 0xa0, 0xb0,
4999 };
5000 
5001 static struct freerunning_counters icx_iio_freerunning[] = {
5002 	[ICX_IIO_MSR_IOCLK]	= { 0xa55, 0x1, 0x20, 1, 48, icx_iio_clk_freerunning_box_offsets },
5003 	[ICX_IIO_MSR_BW_IN]	= { 0xaa0, 0x1, 0x10, 8, 48, icx_iio_bw_freerunning_box_offsets },
5004 };
5005 
5006 static struct uncore_event_desc icx_uncore_iio_freerunning_events[] = {
5007 	/* Free-Running IIO CLOCKS Counter */
5008 	INTEL_UNCORE_EVENT_DESC(ioclk,			"event=0xff,umask=0x10"),
5009 	/* Free-Running IIO BANDWIDTH IN Counters */
5010 	INTEL_UNCORE_EVENT_DESC(bw_in_port0,		"event=0xff,umask=0x20"),
5011 	INTEL_UNCORE_EVENT_DESC(bw_in_port0.scale,	"3.814697266e-6"),
5012 	INTEL_UNCORE_EVENT_DESC(bw_in_port0.unit,	"MiB"),
5013 	INTEL_UNCORE_EVENT_DESC(bw_in_port1,		"event=0xff,umask=0x21"),
5014 	INTEL_UNCORE_EVENT_DESC(bw_in_port1.scale,	"3.814697266e-6"),
5015 	INTEL_UNCORE_EVENT_DESC(bw_in_port1.unit,	"MiB"),
5016 	INTEL_UNCORE_EVENT_DESC(bw_in_port2,		"event=0xff,umask=0x22"),
5017 	INTEL_UNCORE_EVENT_DESC(bw_in_port2.scale,	"3.814697266e-6"),
5018 	INTEL_UNCORE_EVENT_DESC(bw_in_port2.unit,	"MiB"),
5019 	INTEL_UNCORE_EVENT_DESC(bw_in_port3,		"event=0xff,umask=0x23"),
5020 	INTEL_UNCORE_EVENT_DESC(bw_in_port3.scale,	"3.814697266e-6"),
5021 	INTEL_UNCORE_EVENT_DESC(bw_in_port3.unit,	"MiB"),
5022 	INTEL_UNCORE_EVENT_DESC(bw_in_port4,		"event=0xff,umask=0x24"),
5023 	INTEL_UNCORE_EVENT_DESC(bw_in_port4.scale,	"3.814697266e-6"),
5024 	INTEL_UNCORE_EVENT_DESC(bw_in_port4.unit,	"MiB"),
5025 	INTEL_UNCORE_EVENT_DESC(bw_in_port5,		"event=0xff,umask=0x25"),
5026 	INTEL_UNCORE_EVENT_DESC(bw_in_port5.scale,	"3.814697266e-6"),
5027 	INTEL_UNCORE_EVENT_DESC(bw_in_port5.unit,	"MiB"),
5028 	INTEL_UNCORE_EVENT_DESC(bw_in_port6,		"event=0xff,umask=0x26"),
5029 	INTEL_UNCORE_EVENT_DESC(bw_in_port6.scale,	"3.814697266e-6"),
5030 	INTEL_UNCORE_EVENT_DESC(bw_in_port6.unit,	"MiB"),
5031 	INTEL_UNCORE_EVENT_DESC(bw_in_port7,		"event=0xff,umask=0x27"),
5032 	INTEL_UNCORE_EVENT_DESC(bw_in_port7.scale,	"3.814697266e-6"),
5033 	INTEL_UNCORE_EVENT_DESC(bw_in_port7.unit,	"MiB"),
5034 	{ /* end: all zeroes */ },
5035 };
5036 
5037 static struct intel_uncore_type icx_uncore_iio_free_running = {
5038 	.name			= "iio_free_running",
5039 	.num_counters		= 9,
5040 	.num_boxes		= 6,
5041 	.num_freerunning_types	= ICX_IIO_FREERUNNING_TYPE_MAX,
5042 	.freerunning		= icx_iio_freerunning,
5043 	.ops			= &skx_uncore_iio_freerunning_ops,
5044 	.event_descs		= icx_uncore_iio_freerunning_events,
5045 	.format_group		= &skx_uncore_iio_freerunning_format_group,
5046 };
5047 
5048 static struct intel_uncore_type *icx_msr_uncores[] = {
5049 	&skx_uncore_ubox,
5050 	&icx_uncore_chabox,
5051 	&icx_uncore_iio,
5052 	&icx_uncore_irp,
5053 	&icx_uncore_m2pcie,
5054 	&skx_uncore_pcu,
5055 	&icx_uncore_iio_free_running,
5056 	NULL,
5057 };
5058 
5059 /*
5060  * To determine the number of CHAs, it should read CAPID6(Low) and CAPID7 (High)
5061  * registers which located at Device 30, Function 3
5062  */
5063 #define ICX_CAPID6		0x9c
5064 #define ICX_CAPID7		0xa0
5065 
5066 static u64 icx_count_chabox(void)
5067 {
5068 	struct pci_dev *dev = NULL;
5069 	u64 caps = 0;
5070 
5071 	dev = pci_get_device(PCI_VENDOR_ID_INTEL, 0x345b, dev);
5072 	if (!dev)
5073 		goto out;
5074 
5075 	pci_read_config_dword(dev, ICX_CAPID6, (u32 *)&caps);
5076 	pci_read_config_dword(dev, ICX_CAPID7, (u32 *)&caps + 1);
5077 out:
5078 	pci_dev_put(dev);
5079 	return hweight64(caps);
5080 }
5081 
5082 void icx_uncore_cpu_init(void)
5083 {
5084 	u64 num_boxes = icx_count_chabox();
5085 
5086 	if (WARN_ON(num_boxes > ARRAY_SIZE(icx_cha_msr_offsets)))
5087 		return;
5088 	icx_uncore_chabox.num_boxes = num_boxes;
5089 	uncore_msr_uncores = icx_msr_uncores;
5090 }
5091 
5092 static struct intel_uncore_type icx_uncore_m2m = {
5093 	.name		= "m2m",
5094 	.num_counters   = 4,
5095 	.num_boxes	= 4,
5096 	.perf_ctr_bits	= 48,
5097 	.perf_ctr	= SNR_M2M_PCI_PMON_CTR0,
5098 	.event_ctl	= SNR_M2M_PCI_PMON_CTL0,
5099 	.event_mask	= SNBEP_PMON_RAW_EVENT_MASK,
5100 	.box_ctl	= SNR_M2M_PCI_PMON_BOX_CTL,
5101 	.ops		= &snr_m2m_uncore_pci_ops,
5102 	.format_group	= &skx_uncore_format_group,
5103 };
5104 
5105 static struct attribute *icx_upi_uncore_formats_attr[] = {
5106 	&format_attr_event.attr,
5107 	&format_attr_umask_ext4.attr,
5108 	&format_attr_edge.attr,
5109 	&format_attr_inv.attr,
5110 	&format_attr_thresh8.attr,
5111 	NULL,
5112 };
5113 
5114 static const struct attribute_group icx_upi_uncore_format_group = {
5115 	.name = "format",
5116 	.attrs = icx_upi_uncore_formats_attr,
5117 };
5118 
5119 static struct intel_uncore_type icx_uncore_upi = {
5120 	.name		= "upi",
5121 	.num_counters   = 4,
5122 	.num_boxes	= 3,
5123 	.perf_ctr_bits	= 48,
5124 	.perf_ctr	= ICX_UPI_PCI_PMON_CTR0,
5125 	.event_ctl	= ICX_UPI_PCI_PMON_CTL0,
5126 	.event_mask	= SNBEP_PMON_RAW_EVENT_MASK,
5127 	.event_mask_ext = ICX_UPI_CTL_UMASK_EXT,
5128 	.box_ctl	= ICX_UPI_PCI_PMON_BOX_CTL,
5129 	.ops		= &skx_upi_uncore_pci_ops,
5130 	.format_group	= &icx_upi_uncore_format_group,
5131 };
5132 
5133 static struct event_constraint icx_uncore_m3upi_constraints[] = {
5134 	UNCORE_EVENT_CONSTRAINT(0x1c, 0x1),
5135 	UNCORE_EVENT_CONSTRAINT(0x1d, 0x1),
5136 	UNCORE_EVENT_CONSTRAINT(0x1e, 0x1),
5137 	UNCORE_EVENT_CONSTRAINT(0x1f, 0x1),
5138 	UNCORE_EVENT_CONSTRAINT(0x40, 0x7),
5139 	UNCORE_EVENT_CONSTRAINT(0x4e, 0x7),
5140 	UNCORE_EVENT_CONSTRAINT(0x4f, 0x7),
5141 	UNCORE_EVENT_CONSTRAINT(0x50, 0x7),
5142 	EVENT_CONSTRAINT_END
5143 };
5144 
5145 static struct intel_uncore_type icx_uncore_m3upi = {
5146 	.name		= "m3upi",
5147 	.num_counters   = 4,
5148 	.num_boxes	= 3,
5149 	.perf_ctr_bits	= 48,
5150 	.perf_ctr	= ICX_M3UPI_PCI_PMON_CTR0,
5151 	.event_ctl	= ICX_M3UPI_PCI_PMON_CTL0,
5152 	.event_mask	= SNBEP_PMON_RAW_EVENT_MASK,
5153 	.box_ctl	= ICX_M3UPI_PCI_PMON_BOX_CTL,
5154 	.constraints	= icx_uncore_m3upi_constraints,
5155 	.ops		= &ivbep_uncore_pci_ops,
5156 	.format_group	= &skx_uncore_format_group,
5157 };
5158 
5159 enum {
5160 	ICX_PCI_UNCORE_M2M,
5161 	ICX_PCI_UNCORE_UPI,
5162 	ICX_PCI_UNCORE_M3UPI,
5163 };
5164 
5165 static struct intel_uncore_type *icx_pci_uncores[] = {
5166 	[ICX_PCI_UNCORE_M2M]		= &icx_uncore_m2m,
5167 	[ICX_PCI_UNCORE_UPI]		= &icx_uncore_upi,
5168 	[ICX_PCI_UNCORE_M3UPI]		= &icx_uncore_m3upi,
5169 	NULL,
5170 };
5171 
5172 static const struct pci_device_id icx_uncore_pci_ids[] = {
5173 	{ /* M2M 0 */
5174 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x344a),
5175 		.driver_data = UNCORE_PCI_DEV_FULL_DATA(12, 0, ICX_PCI_UNCORE_M2M, 0),
5176 	},
5177 	{ /* M2M 1 */
5178 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x344a),
5179 		.driver_data = UNCORE_PCI_DEV_FULL_DATA(13, 0, ICX_PCI_UNCORE_M2M, 1),
5180 	},
5181 	{ /* M2M 2 */
5182 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x344a),
5183 		.driver_data = UNCORE_PCI_DEV_FULL_DATA(14, 0, ICX_PCI_UNCORE_M2M, 2),
5184 	},
5185 	{ /* M2M 3 */
5186 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x344a),
5187 		.driver_data = UNCORE_PCI_DEV_FULL_DATA(15, 0, ICX_PCI_UNCORE_M2M, 3),
5188 	},
5189 	{ /* UPI Link 0 */
5190 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x3441),
5191 		.driver_data = UNCORE_PCI_DEV_FULL_DATA(2, 1, ICX_PCI_UNCORE_UPI, 0),
5192 	},
5193 	{ /* UPI Link 1 */
5194 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x3441),
5195 		.driver_data = UNCORE_PCI_DEV_FULL_DATA(3, 1, ICX_PCI_UNCORE_UPI, 1),
5196 	},
5197 	{ /* UPI Link 2 */
5198 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x3441),
5199 		.driver_data = UNCORE_PCI_DEV_FULL_DATA(4, 1, ICX_PCI_UNCORE_UPI, 2),
5200 	},
5201 	{ /* M3UPI Link 0 */
5202 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x3446),
5203 		.driver_data = UNCORE_PCI_DEV_FULL_DATA(5, 1, ICX_PCI_UNCORE_M3UPI, 0),
5204 	},
5205 	{ /* M3UPI Link 1 */
5206 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x3446),
5207 		.driver_data = UNCORE_PCI_DEV_FULL_DATA(6, 1, ICX_PCI_UNCORE_M3UPI, 1),
5208 	},
5209 	{ /* M3UPI Link 2 */
5210 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x3446),
5211 		.driver_data = UNCORE_PCI_DEV_FULL_DATA(7, 1, ICX_PCI_UNCORE_M3UPI, 2),
5212 	},
5213 	{ /* end: all zeroes */ }
5214 };
5215 
5216 static struct pci_driver icx_uncore_pci_driver = {
5217 	.name		= "icx_uncore",
5218 	.id_table	= icx_uncore_pci_ids,
5219 };
5220 
5221 int icx_uncore_pci_init(void)
5222 {
5223 	/* ICX UBOX DID */
5224 	int ret = snbep_pci2phy_map_init(0x3450, SKX_CPUNODEID,
5225 					 SKX_GIDNIDMAP, true);
5226 
5227 	if (ret)
5228 		return ret;
5229 
5230 	uncore_pci_uncores = icx_pci_uncores;
5231 	uncore_pci_driver = &icx_uncore_pci_driver;
5232 	return 0;
5233 }
5234 
5235 static void icx_uncore_imc_init_box(struct intel_uncore_box *box)
5236 {
5237 	unsigned int box_ctl = box->pmu->type->box_ctl +
5238 			       box->pmu->type->mmio_offset * (box->pmu->pmu_idx % ICX_NUMBER_IMC_CHN);
5239 	int mem_offset = (box->pmu->pmu_idx / ICX_NUMBER_IMC_CHN) * ICX_IMC_MEM_STRIDE +
5240 			 SNR_IMC_MMIO_MEM0_OFFSET;
5241 
5242 	__snr_uncore_mmio_init_box(box, box_ctl, mem_offset);
5243 }
5244 
5245 static struct intel_uncore_ops icx_uncore_mmio_ops = {
5246 	.init_box	= icx_uncore_imc_init_box,
5247 	.exit_box	= uncore_mmio_exit_box,
5248 	.disable_box	= snr_uncore_mmio_disable_box,
5249 	.enable_box	= snr_uncore_mmio_enable_box,
5250 	.disable_event	= snr_uncore_mmio_disable_event,
5251 	.enable_event	= snr_uncore_mmio_enable_event,
5252 	.read_counter	= uncore_mmio_read_counter,
5253 };
5254 
5255 static struct intel_uncore_type icx_uncore_imc = {
5256 	.name		= "imc",
5257 	.num_counters   = 4,
5258 	.num_boxes	= 8,
5259 	.perf_ctr_bits	= 48,
5260 	.fixed_ctr_bits	= 48,
5261 	.fixed_ctr	= SNR_IMC_MMIO_PMON_FIXED_CTR,
5262 	.fixed_ctl	= SNR_IMC_MMIO_PMON_FIXED_CTL,
5263 	.event_descs	= hswep_uncore_imc_events,
5264 	.perf_ctr	= SNR_IMC_MMIO_PMON_CTR0,
5265 	.event_ctl	= SNR_IMC_MMIO_PMON_CTL0,
5266 	.event_mask	= SNBEP_PMON_RAW_EVENT_MASK,
5267 	.box_ctl	= SNR_IMC_MMIO_PMON_BOX_CTL,
5268 	.mmio_offset	= SNR_IMC_MMIO_OFFSET,
5269 	.mmio_map_size	= SNR_IMC_MMIO_SIZE,
5270 	.ops		= &icx_uncore_mmio_ops,
5271 	.format_group	= &skx_uncore_format_group,
5272 };
5273 
5274 enum perf_uncore_icx_imc_freerunning_type_id {
5275 	ICX_IMC_DCLK,
5276 	ICX_IMC_DDR,
5277 	ICX_IMC_DDRT,
5278 
5279 	ICX_IMC_FREERUNNING_TYPE_MAX,
5280 };
5281 
5282 static struct freerunning_counters icx_imc_freerunning[] = {
5283 	[ICX_IMC_DCLK]	= { 0x22b0, 0x0, 0, 1, 48 },
5284 	[ICX_IMC_DDR]	= { 0x2290, 0x8, 0, 2, 48 },
5285 	[ICX_IMC_DDRT]	= { 0x22a0, 0x8, 0, 2, 48 },
5286 };
5287 
5288 static struct uncore_event_desc icx_uncore_imc_freerunning_events[] = {
5289 	INTEL_UNCORE_EVENT_DESC(dclk,			"event=0xff,umask=0x10"),
5290 
5291 	INTEL_UNCORE_EVENT_DESC(read,			"event=0xff,umask=0x20"),
5292 	INTEL_UNCORE_EVENT_DESC(read.scale,		"6.103515625e-5"),
5293 	INTEL_UNCORE_EVENT_DESC(read.unit,		"MiB"),
5294 	INTEL_UNCORE_EVENT_DESC(write,			"event=0xff,umask=0x21"),
5295 	INTEL_UNCORE_EVENT_DESC(write.scale,		"6.103515625e-5"),
5296 	INTEL_UNCORE_EVENT_DESC(write.unit,		"MiB"),
5297 
5298 	INTEL_UNCORE_EVENT_DESC(ddrt_read,		"event=0xff,umask=0x30"),
5299 	INTEL_UNCORE_EVENT_DESC(ddrt_read.scale,	"6.103515625e-5"),
5300 	INTEL_UNCORE_EVENT_DESC(ddrt_read.unit,		"MiB"),
5301 	INTEL_UNCORE_EVENT_DESC(ddrt_write,		"event=0xff,umask=0x31"),
5302 	INTEL_UNCORE_EVENT_DESC(ddrt_write.scale,	"6.103515625e-5"),
5303 	INTEL_UNCORE_EVENT_DESC(ddrt_write.unit,	"MiB"),
5304 	{ /* end: all zeroes */ },
5305 };
5306 
5307 static void icx_uncore_imc_freerunning_init_box(struct intel_uncore_box *box)
5308 {
5309 	int mem_offset = box->pmu->pmu_idx * ICX_IMC_MEM_STRIDE +
5310 			 SNR_IMC_MMIO_MEM0_OFFSET;
5311 
5312 	__snr_uncore_mmio_init_box(box, uncore_mmio_box_ctl(box), mem_offset);
5313 }
5314 
5315 static struct intel_uncore_ops icx_uncore_imc_freerunning_ops = {
5316 	.init_box	= icx_uncore_imc_freerunning_init_box,
5317 	.exit_box	= uncore_mmio_exit_box,
5318 	.read_counter	= uncore_mmio_read_counter,
5319 	.hw_config	= uncore_freerunning_hw_config,
5320 };
5321 
5322 static struct intel_uncore_type icx_uncore_imc_free_running = {
5323 	.name			= "imc_free_running",
5324 	.num_counters		= 5,
5325 	.num_boxes		= 4,
5326 	.num_freerunning_types	= ICX_IMC_FREERUNNING_TYPE_MAX,
5327 	.mmio_map_size		= SNR_IMC_MMIO_SIZE,
5328 	.freerunning		= icx_imc_freerunning,
5329 	.ops			= &icx_uncore_imc_freerunning_ops,
5330 	.event_descs		= icx_uncore_imc_freerunning_events,
5331 	.format_group		= &skx_uncore_iio_freerunning_format_group,
5332 };
5333 
5334 static struct intel_uncore_type *icx_mmio_uncores[] = {
5335 	&icx_uncore_imc,
5336 	&icx_uncore_imc_free_running,
5337 	NULL,
5338 };
5339 
5340 void icx_uncore_mmio_init(void)
5341 {
5342 	uncore_mmio_uncores = icx_mmio_uncores;
5343 }
5344 
5345 /* end of ICX uncore support */
5346