1 /* Nehalem-EX/Westmere-EX uncore support */
2 #include "uncore.h"
3 
4 /* NHM-EX event control */
5 #define NHMEX_PMON_CTL_EV_SEL_MASK	0x000000ff
6 #define NHMEX_PMON_CTL_UMASK_MASK	0x0000ff00
7 #define NHMEX_PMON_CTL_EN_BIT0		(1 << 0)
8 #define NHMEX_PMON_CTL_EDGE_DET		(1 << 18)
9 #define NHMEX_PMON_CTL_PMI_EN		(1 << 20)
10 #define NHMEX_PMON_CTL_EN_BIT22		(1 << 22)
11 #define NHMEX_PMON_CTL_INVERT		(1 << 23)
12 #define NHMEX_PMON_CTL_TRESH_MASK	0xff000000
13 #define NHMEX_PMON_RAW_EVENT_MASK	(NHMEX_PMON_CTL_EV_SEL_MASK | \
14 					 NHMEX_PMON_CTL_UMASK_MASK | \
15 					 NHMEX_PMON_CTL_EDGE_DET | \
16 					 NHMEX_PMON_CTL_INVERT | \
17 					 NHMEX_PMON_CTL_TRESH_MASK)
18 
19 /* NHM-EX Ubox */
20 #define NHMEX_U_MSR_PMON_GLOBAL_CTL		0xc00
21 #define NHMEX_U_MSR_PMON_CTR			0xc11
22 #define NHMEX_U_MSR_PMON_EV_SEL			0xc10
23 
24 #define NHMEX_U_PMON_GLOBAL_EN			(1 << 0)
25 #define NHMEX_U_PMON_GLOBAL_PMI_CORE_SEL	0x0000001e
26 #define NHMEX_U_PMON_GLOBAL_EN_ALL		(1 << 28)
27 #define NHMEX_U_PMON_GLOBAL_RST_ALL		(1 << 29)
28 #define NHMEX_U_PMON_GLOBAL_FRZ_ALL		(1 << 31)
29 
30 #define NHMEX_U_PMON_RAW_EVENT_MASK		\
31 		(NHMEX_PMON_CTL_EV_SEL_MASK |	\
32 		 NHMEX_PMON_CTL_EDGE_DET)
33 
34 /* NHM-EX Cbox */
35 #define NHMEX_C0_MSR_PMON_GLOBAL_CTL		0xd00
36 #define NHMEX_C0_MSR_PMON_CTR0			0xd11
37 #define NHMEX_C0_MSR_PMON_EV_SEL0		0xd10
38 #define NHMEX_C_MSR_OFFSET			0x20
39 
40 /* NHM-EX Bbox */
41 #define NHMEX_B0_MSR_PMON_GLOBAL_CTL		0xc20
42 #define NHMEX_B0_MSR_PMON_CTR0			0xc31
43 #define NHMEX_B0_MSR_PMON_CTL0			0xc30
44 #define NHMEX_B_MSR_OFFSET			0x40
45 #define NHMEX_B0_MSR_MATCH			0xe45
46 #define NHMEX_B0_MSR_MASK			0xe46
47 #define NHMEX_B1_MSR_MATCH			0xe4d
48 #define NHMEX_B1_MSR_MASK			0xe4e
49 
50 #define NHMEX_B_PMON_CTL_EN			(1 << 0)
51 #define NHMEX_B_PMON_CTL_EV_SEL_SHIFT		1
52 #define NHMEX_B_PMON_CTL_EV_SEL_MASK		\
53 		(0x1f << NHMEX_B_PMON_CTL_EV_SEL_SHIFT)
54 #define NHMEX_B_PMON_CTR_SHIFT		6
55 #define NHMEX_B_PMON_CTR_MASK		\
56 		(0x3 << NHMEX_B_PMON_CTR_SHIFT)
57 #define NHMEX_B_PMON_RAW_EVENT_MASK		\
58 		(NHMEX_B_PMON_CTL_EV_SEL_MASK | \
59 		 NHMEX_B_PMON_CTR_MASK)
60 
61 /* NHM-EX Sbox */
62 #define NHMEX_S0_MSR_PMON_GLOBAL_CTL		0xc40
63 #define NHMEX_S0_MSR_PMON_CTR0			0xc51
64 #define NHMEX_S0_MSR_PMON_CTL0			0xc50
65 #define NHMEX_S_MSR_OFFSET			0x80
66 #define NHMEX_S0_MSR_MM_CFG			0xe48
67 #define NHMEX_S0_MSR_MATCH			0xe49
68 #define NHMEX_S0_MSR_MASK			0xe4a
69 #define NHMEX_S1_MSR_MM_CFG			0xe58
70 #define NHMEX_S1_MSR_MATCH			0xe59
71 #define NHMEX_S1_MSR_MASK			0xe5a
72 
73 #define NHMEX_S_PMON_MM_CFG_EN			(0x1ULL << 63)
74 #define NHMEX_S_EVENT_TO_R_PROG_EV		0
75 
76 /* NHM-EX Mbox */
77 #define NHMEX_M0_MSR_GLOBAL_CTL			0xca0
78 #define NHMEX_M0_MSR_PMU_DSP			0xca5
79 #define NHMEX_M0_MSR_PMU_ISS			0xca6
80 #define NHMEX_M0_MSR_PMU_MAP			0xca7
81 #define NHMEX_M0_MSR_PMU_MSC_THR		0xca8
82 #define NHMEX_M0_MSR_PMU_PGT			0xca9
83 #define NHMEX_M0_MSR_PMU_PLD			0xcaa
84 #define NHMEX_M0_MSR_PMU_ZDP_CTL_FVC		0xcab
85 #define NHMEX_M0_MSR_PMU_CTL0			0xcb0
86 #define NHMEX_M0_MSR_PMU_CNT0			0xcb1
87 #define NHMEX_M_MSR_OFFSET			0x40
88 #define NHMEX_M0_MSR_PMU_MM_CFG			0xe54
89 #define NHMEX_M1_MSR_PMU_MM_CFG			0xe5c
90 
91 #define NHMEX_M_PMON_MM_CFG_EN			(1ULL << 63)
92 #define NHMEX_M_PMON_ADDR_MATCH_MASK		0x3ffffffffULL
93 #define NHMEX_M_PMON_ADDR_MASK_MASK		0x7ffffffULL
94 #define NHMEX_M_PMON_ADDR_MASK_SHIFT		34
95 
96 #define NHMEX_M_PMON_CTL_EN			(1 << 0)
97 #define NHMEX_M_PMON_CTL_PMI_EN			(1 << 1)
98 #define NHMEX_M_PMON_CTL_COUNT_MODE_SHIFT	2
99 #define NHMEX_M_PMON_CTL_COUNT_MODE_MASK	\
100 	(0x3 << NHMEX_M_PMON_CTL_COUNT_MODE_SHIFT)
101 #define NHMEX_M_PMON_CTL_STORAGE_MODE_SHIFT	4
102 #define NHMEX_M_PMON_CTL_STORAGE_MODE_MASK	\
103 	(0x3 << NHMEX_M_PMON_CTL_STORAGE_MODE_SHIFT)
104 #define NHMEX_M_PMON_CTL_WRAP_MODE		(1 << 6)
105 #define NHMEX_M_PMON_CTL_FLAG_MODE		(1 << 7)
106 #define NHMEX_M_PMON_CTL_INC_SEL_SHIFT		9
107 #define NHMEX_M_PMON_CTL_INC_SEL_MASK		\
108 	(0x1f << NHMEX_M_PMON_CTL_INC_SEL_SHIFT)
109 #define NHMEX_M_PMON_CTL_SET_FLAG_SEL_SHIFT	19
110 #define NHMEX_M_PMON_CTL_SET_FLAG_SEL_MASK	\
111 	(0x7 << NHMEX_M_PMON_CTL_SET_FLAG_SEL_SHIFT)
112 #define NHMEX_M_PMON_RAW_EVENT_MASK			\
113 		(NHMEX_M_PMON_CTL_COUNT_MODE_MASK |	\
114 		 NHMEX_M_PMON_CTL_STORAGE_MODE_MASK |	\
115 		 NHMEX_M_PMON_CTL_WRAP_MODE |		\
116 		 NHMEX_M_PMON_CTL_FLAG_MODE |		\
117 		 NHMEX_M_PMON_CTL_INC_SEL_MASK |	\
118 		 NHMEX_M_PMON_CTL_SET_FLAG_SEL_MASK)
119 
120 #define NHMEX_M_PMON_ZDP_CTL_FVC_MASK		(((1 << 11) - 1) | (1 << 23))
121 #define NHMEX_M_PMON_ZDP_CTL_FVC_EVENT_MASK(n)	(0x7ULL << (11 + 3 * (n)))
122 
123 #define WSMEX_M_PMON_ZDP_CTL_FVC_MASK		(((1 << 12) - 1) | (1 << 24))
124 #define WSMEX_M_PMON_ZDP_CTL_FVC_EVENT_MASK(n)	(0x7ULL << (12 + 3 * (n)))
125 
126 /*
127  * use the 9~13 bits to select event If the 7th bit is not set,
128  * otherwise use the 19~21 bits to select event.
129  */
130 #define MBOX_INC_SEL(x) ((x) << NHMEX_M_PMON_CTL_INC_SEL_SHIFT)
131 #define MBOX_SET_FLAG_SEL(x) (((x) << NHMEX_M_PMON_CTL_SET_FLAG_SEL_SHIFT) | \
132 				NHMEX_M_PMON_CTL_FLAG_MODE)
133 #define MBOX_INC_SEL_MASK (NHMEX_M_PMON_CTL_INC_SEL_MASK | \
134 			   NHMEX_M_PMON_CTL_FLAG_MODE)
135 #define MBOX_SET_FLAG_SEL_MASK (NHMEX_M_PMON_CTL_SET_FLAG_SEL_MASK | \
136 				NHMEX_M_PMON_CTL_FLAG_MODE)
137 #define MBOX_INC_SEL_EXTAR_REG(c, r) \
138 		EVENT_EXTRA_REG(MBOX_INC_SEL(c), NHMEX_M0_MSR_PMU_##r, \
139 				MBOX_INC_SEL_MASK, (u64)-1, NHMEX_M_##r)
140 #define MBOX_SET_FLAG_SEL_EXTRA_REG(c, r) \
141 		EVENT_EXTRA_REG(MBOX_SET_FLAG_SEL(c), NHMEX_M0_MSR_PMU_##r, \
142 				MBOX_SET_FLAG_SEL_MASK, \
143 				(u64)-1, NHMEX_M_##r)
144 
145 /* NHM-EX Rbox */
146 #define NHMEX_R_MSR_GLOBAL_CTL			0xe00
147 #define NHMEX_R_MSR_PMON_CTL0			0xe10
148 #define NHMEX_R_MSR_PMON_CNT0			0xe11
149 #define NHMEX_R_MSR_OFFSET			0x20
150 
151 #define NHMEX_R_MSR_PORTN_QLX_CFG(n)		\
152 		((n) < 4 ? (0xe0c + (n)) : (0xe2c + (n) - 4))
153 #define NHMEX_R_MSR_PORTN_IPERF_CFG0(n)		(0xe04 + (n))
154 #define NHMEX_R_MSR_PORTN_IPERF_CFG1(n)		(0xe24 + (n))
155 #define NHMEX_R_MSR_PORTN_XBR_OFFSET(n)		\
156 		(((n) < 4 ? 0 : 0x10) + (n) * 4)
157 #define NHMEX_R_MSR_PORTN_XBR_SET1_MM_CFG(n)	\
158 		(0xe60 + NHMEX_R_MSR_PORTN_XBR_OFFSET(n))
159 #define NHMEX_R_MSR_PORTN_XBR_SET1_MATCH(n)	\
160 		(NHMEX_R_MSR_PORTN_XBR_SET1_MM_CFG(n) + 1)
161 #define NHMEX_R_MSR_PORTN_XBR_SET1_MASK(n)	\
162 		(NHMEX_R_MSR_PORTN_XBR_SET1_MM_CFG(n) + 2)
163 #define NHMEX_R_MSR_PORTN_XBR_SET2_MM_CFG(n)	\
164 		(0xe70 + NHMEX_R_MSR_PORTN_XBR_OFFSET(n))
165 #define NHMEX_R_MSR_PORTN_XBR_SET2_MATCH(n)	\
166 		(NHMEX_R_MSR_PORTN_XBR_SET2_MM_CFG(n) + 1)
167 #define NHMEX_R_MSR_PORTN_XBR_SET2_MASK(n)	\
168 		(NHMEX_R_MSR_PORTN_XBR_SET2_MM_CFG(n) + 2)
169 
170 #define NHMEX_R_PMON_CTL_EN			(1 << 0)
171 #define NHMEX_R_PMON_CTL_EV_SEL_SHIFT		1
172 #define NHMEX_R_PMON_CTL_EV_SEL_MASK		\
173 		(0x1f << NHMEX_R_PMON_CTL_EV_SEL_SHIFT)
174 #define NHMEX_R_PMON_CTL_PMI_EN			(1 << 6)
175 #define NHMEX_R_PMON_RAW_EVENT_MASK		NHMEX_R_PMON_CTL_EV_SEL_MASK
176 
177 /* NHM-EX Wbox */
178 #define NHMEX_W_MSR_GLOBAL_CTL			0xc80
179 #define NHMEX_W_MSR_PMON_CNT0			0xc90
180 #define NHMEX_W_MSR_PMON_EVT_SEL0		0xc91
181 #define NHMEX_W_MSR_PMON_FIXED_CTR		0x394
182 #define NHMEX_W_MSR_PMON_FIXED_CTL		0x395
183 
184 #define NHMEX_W_PMON_GLOBAL_FIXED_EN		(1ULL << 31)
185 
186 #define __BITS_VALUE(x, i, n)  ((typeof(x))(((x) >> ((i) * (n))) & \
187 				((1ULL << (n)) - 1)))
188 
189 DEFINE_UNCORE_FORMAT_ATTR(event, event, "config:0-7");
190 DEFINE_UNCORE_FORMAT_ATTR(event5, event, "config:1-5");
191 DEFINE_UNCORE_FORMAT_ATTR(umask, umask, "config:8-15");
192 DEFINE_UNCORE_FORMAT_ATTR(edge, edge, "config:18");
193 DEFINE_UNCORE_FORMAT_ATTR(inv, inv, "config:23");
194 DEFINE_UNCORE_FORMAT_ATTR(thresh8, thresh, "config:24-31");
195 DEFINE_UNCORE_FORMAT_ATTR(counter, counter, "config:6-7");
196 DEFINE_UNCORE_FORMAT_ATTR(match, match, "config1:0-63");
197 DEFINE_UNCORE_FORMAT_ATTR(mask, mask, "config2:0-63");
198 
199 static void nhmex_uncore_msr_init_box(struct intel_uncore_box *box)
200 {
201 	wrmsrl(NHMEX_U_MSR_PMON_GLOBAL_CTL, NHMEX_U_PMON_GLOBAL_EN_ALL);
202 }
203 
204 static void nhmex_uncore_msr_exit_box(struct intel_uncore_box *box)
205 {
206 	wrmsrl(NHMEX_U_MSR_PMON_GLOBAL_CTL, 0);
207 }
208 
209 static void nhmex_uncore_msr_disable_box(struct intel_uncore_box *box)
210 {
211 	unsigned msr = uncore_msr_box_ctl(box);
212 	u64 config;
213 
214 	if (msr) {
215 		rdmsrl(msr, config);
216 		config &= ~((1ULL << uncore_num_counters(box)) - 1);
217 		/* WBox has a fixed counter */
218 		if (uncore_msr_fixed_ctl(box))
219 			config &= ~NHMEX_W_PMON_GLOBAL_FIXED_EN;
220 		wrmsrl(msr, config);
221 	}
222 }
223 
224 static void nhmex_uncore_msr_enable_box(struct intel_uncore_box *box)
225 {
226 	unsigned msr = uncore_msr_box_ctl(box);
227 	u64 config;
228 
229 	if (msr) {
230 		rdmsrl(msr, config);
231 		config |= (1ULL << uncore_num_counters(box)) - 1;
232 		/* WBox has a fixed counter */
233 		if (uncore_msr_fixed_ctl(box))
234 			config |= NHMEX_W_PMON_GLOBAL_FIXED_EN;
235 		wrmsrl(msr, config);
236 	}
237 }
238 
239 static void nhmex_uncore_msr_disable_event(struct intel_uncore_box *box, struct perf_event *event)
240 {
241 	wrmsrl(event->hw.config_base, 0);
242 }
243 
244 static void nhmex_uncore_msr_enable_event(struct intel_uncore_box *box, struct perf_event *event)
245 {
246 	struct hw_perf_event *hwc = &event->hw;
247 
248 	if (hwc->idx >= UNCORE_PMC_IDX_FIXED)
249 		wrmsrl(hwc->config_base, NHMEX_PMON_CTL_EN_BIT0);
250 	else if (box->pmu->type->event_mask & NHMEX_PMON_CTL_EN_BIT0)
251 		wrmsrl(hwc->config_base, hwc->config | NHMEX_PMON_CTL_EN_BIT22);
252 	else
253 		wrmsrl(hwc->config_base, hwc->config | NHMEX_PMON_CTL_EN_BIT0);
254 }
255 
256 #define NHMEX_UNCORE_OPS_COMMON_INIT()				\
257 	.init_box	= nhmex_uncore_msr_init_box,		\
258 	.exit_box	= nhmex_uncore_msr_exit_box,		\
259 	.disable_box	= nhmex_uncore_msr_disable_box,		\
260 	.enable_box	= nhmex_uncore_msr_enable_box,		\
261 	.disable_event	= nhmex_uncore_msr_disable_event,	\
262 	.read_counter	= uncore_msr_read_counter
263 
264 static struct intel_uncore_ops nhmex_uncore_ops = {
265 	NHMEX_UNCORE_OPS_COMMON_INIT(),
266 	.enable_event	= nhmex_uncore_msr_enable_event,
267 };
268 
269 static struct attribute *nhmex_uncore_ubox_formats_attr[] = {
270 	&format_attr_event.attr,
271 	&format_attr_edge.attr,
272 	NULL,
273 };
274 
275 static struct attribute_group nhmex_uncore_ubox_format_group = {
276 	.name		= "format",
277 	.attrs		= nhmex_uncore_ubox_formats_attr,
278 };
279 
280 static struct intel_uncore_type nhmex_uncore_ubox = {
281 	.name		= "ubox",
282 	.num_counters	= 1,
283 	.num_boxes	= 1,
284 	.perf_ctr_bits	= 48,
285 	.event_ctl	= NHMEX_U_MSR_PMON_EV_SEL,
286 	.perf_ctr	= NHMEX_U_MSR_PMON_CTR,
287 	.event_mask	= NHMEX_U_PMON_RAW_EVENT_MASK,
288 	.box_ctl	= NHMEX_U_MSR_PMON_GLOBAL_CTL,
289 	.ops		= &nhmex_uncore_ops,
290 	.format_group	= &nhmex_uncore_ubox_format_group
291 };
292 
293 static struct attribute *nhmex_uncore_cbox_formats_attr[] = {
294 	&format_attr_event.attr,
295 	&format_attr_umask.attr,
296 	&format_attr_edge.attr,
297 	&format_attr_inv.attr,
298 	&format_attr_thresh8.attr,
299 	NULL,
300 };
301 
302 static struct attribute_group nhmex_uncore_cbox_format_group = {
303 	.name = "format",
304 	.attrs = nhmex_uncore_cbox_formats_attr,
305 };
306 
307 /* msr offset for each instance of cbox */
308 static unsigned nhmex_cbox_msr_offsets[] = {
309 	0x0, 0x80, 0x40, 0xc0, 0x20, 0xa0, 0x60, 0xe0, 0x240, 0x2c0,
310 };
311 
312 static struct intel_uncore_type nhmex_uncore_cbox = {
313 	.name			= "cbox",
314 	.num_counters		= 6,
315 	.num_boxes		= 10,
316 	.perf_ctr_bits		= 48,
317 	.event_ctl		= NHMEX_C0_MSR_PMON_EV_SEL0,
318 	.perf_ctr		= NHMEX_C0_MSR_PMON_CTR0,
319 	.event_mask		= NHMEX_PMON_RAW_EVENT_MASK,
320 	.box_ctl		= NHMEX_C0_MSR_PMON_GLOBAL_CTL,
321 	.msr_offsets		= nhmex_cbox_msr_offsets,
322 	.pair_ctr_ctl		= 1,
323 	.ops			= &nhmex_uncore_ops,
324 	.format_group		= &nhmex_uncore_cbox_format_group
325 };
326 
327 static struct uncore_event_desc nhmex_uncore_wbox_events[] = {
328 	INTEL_UNCORE_EVENT_DESC(clockticks, "event=0xff,umask=0"),
329 	{ /* end: all zeroes */ },
330 };
331 
332 static struct intel_uncore_type nhmex_uncore_wbox = {
333 	.name			= "wbox",
334 	.num_counters		= 4,
335 	.num_boxes		= 1,
336 	.perf_ctr_bits		= 48,
337 	.event_ctl		= NHMEX_W_MSR_PMON_CNT0,
338 	.perf_ctr		= NHMEX_W_MSR_PMON_EVT_SEL0,
339 	.fixed_ctr		= NHMEX_W_MSR_PMON_FIXED_CTR,
340 	.fixed_ctl		= NHMEX_W_MSR_PMON_FIXED_CTL,
341 	.event_mask		= NHMEX_PMON_RAW_EVENT_MASK,
342 	.box_ctl		= NHMEX_W_MSR_GLOBAL_CTL,
343 	.pair_ctr_ctl		= 1,
344 	.event_descs		= nhmex_uncore_wbox_events,
345 	.ops			= &nhmex_uncore_ops,
346 	.format_group		= &nhmex_uncore_cbox_format_group
347 };
348 
349 static int nhmex_bbox_hw_config(struct intel_uncore_box *box, struct perf_event *event)
350 {
351 	struct hw_perf_event *hwc = &event->hw;
352 	struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
353 	struct hw_perf_event_extra *reg2 = &hwc->branch_reg;
354 	int ctr, ev_sel;
355 
356 	ctr = (hwc->config & NHMEX_B_PMON_CTR_MASK) >>
357 		NHMEX_B_PMON_CTR_SHIFT;
358 	ev_sel = (hwc->config & NHMEX_B_PMON_CTL_EV_SEL_MASK) >>
359 		  NHMEX_B_PMON_CTL_EV_SEL_SHIFT;
360 
361 	/* events that do not use the match/mask registers */
362 	if ((ctr == 0 && ev_sel > 0x3) || (ctr == 1 && ev_sel > 0x6) ||
363 	    (ctr == 2 && ev_sel != 0x4) || ctr == 3)
364 		return 0;
365 
366 	if (box->pmu->pmu_idx == 0)
367 		reg1->reg = NHMEX_B0_MSR_MATCH;
368 	else
369 		reg1->reg = NHMEX_B1_MSR_MATCH;
370 	reg1->idx = 0;
371 	reg1->config = event->attr.config1;
372 	reg2->config = event->attr.config2;
373 	return 0;
374 }
375 
376 static void nhmex_bbox_msr_enable_event(struct intel_uncore_box *box, struct perf_event *event)
377 {
378 	struct hw_perf_event *hwc = &event->hw;
379 	struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
380 	struct hw_perf_event_extra *reg2 = &hwc->branch_reg;
381 
382 	if (reg1->idx != EXTRA_REG_NONE) {
383 		wrmsrl(reg1->reg, reg1->config);
384 		wrmsrl(reg1->reg + 1, reg2->config);
385 	}
386 	wrmsrl(hwc->config_base, NHMEX_PMON_CTL_EN_BIT0 |
387 		(hwc->config & NHMEX_B_PMON_CTL_EV_SEL_MASK));
388 }
389 
390 /*
391  * The Bbox has 4 counters, but each counter monitors different events.
392  * Use bits 6-7 in the event config to select counter.
393  */
394 static struct event_constraint nhmex_uncore_bbox_constraints[] = {
395 	EVENT_CONSTRAINT(0 , 1, 0xc0),
396 	EVENT_CONSTRAINT(0x40, 2, 0xc0),
397 	EVENT_CONSTRAINT(0x80, 4, 0xc0),
398 	EVENT_CONSTRAINT(0xc0, 8, 0xc0),
399 	EVENT_CONSTRAINT_END,
400 };
401 
402 static struct attribute *nhmex_uncore_bbox_formats_attr[] = {
403 	&format_attr_event5.attr,
404 	&format_attr_counter.attr,
405 	&format_attr_match.attr,
406 	&format_attr_mask.attr,
407 	NULL,
408 };
409 
410 static struct attribute_group nhmex_uncore_bbox_format_group = {
411 	.name = "format",
412 	.attrs = nhmex_uncore_bbox_formats_attr,
413 };
414 
415 static struct intel_uncore_ops nhmex_uncore_bbox_ops = {
416 	NHMEX_UNCORE_OPS_COMMON_INIT(),
417 	.enable_event		= nhmex_bbox_msr_enable_event,
418 	.hw_config		= nhmex_bbox_hw_config,
419 	.get_constraint		= uncore_get_constraint,
420 	.put_constraint		= uncore_put_constraint,
421 };
422 
423 static struct intel_uncore_type nhmex_uncore_bbox = {
424 	.name			= "bbox",
425 	.num_counters		= 4,
426 	.num_boxes		= 2,
427 	.perf_ctr_bits		= 48,
428 	.event_ctl		= NHMEX_B0_MSR_PMON_CTL0,
429 	.perf_ctr		= NHMEX_B0_MSR_PMON_CTR0,
430 	.event_mask		= NHMEX_B_PMON_RAW_EVENT_MASK,
431 	.box_ctl		= NHMEX_B0_MSR_PMON_GLOBAL_CTL,
432 	.msr_offset		= NHMEX_B_MSR_OFFSET,
433 	.pair_ctr_ctl		= 1,
434 	.num_shared_regs	= 1,
435 	.constraints		= nhmex_uncore_bbox_constraints,
436 	.ops			= &nhmex_uncore_bbox_ops,
437 	.format_group		= &nhmex_uncore_bbox_format_group
438 };
439 
440 static int nhmex_sbox_hw_config(struct intel_uncore_box *box, struct perf_event *event)
441 {
442 	struct hw_perf_event *hwc = &event->hw;
443 	struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
444 	struct hw_perf_event_extra *reg2 = &hwc->branch_reg;
445 
446 	/* only TO_R_PROG_EV event uses the match/mask register */
447 	if ((hwc->config & NHMEX_PMON_CTL_EV_SEL_MASK) !=
448 	    NHMEX_S_EVENT_TO_R_PROG_EV)
449 		return 0;
450 
451 	if (box->pmu->pmu_idx == 0)
452 		reg1->reg = NHMEX_S0_MSR_MM_CFG;
453 	else
454 		reg1->reg = NHMEX_S1_MSR_MM_CFG;
455 	reg1->idx = 0;
456 	reg1->config = event->attr.config1;
457 	reg2->config = event->attr.config2;
458 	return 0;
459 }
460 
461 static void nhmex_sbox_msr_enable_event(struct intel_uncore_box *box, struct perf_event *event)
462 {
463 	struct hw_perf_event *hwc = &event->hw;
464 	struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
465 	struct hw_perf_event_extra *reg2 = &hwc->branch_reg;
466 
467 	if (reg1->idx != EXTRA_REG_NONE) {
468 		wrmsrl(reg1->reg, 0);
469 		wrmsrl(reg1->reg + 1, reg1->config);
470 		wrmsrl(reg1->reg + 2, reg2->config);
471 		wrmsrl(reg1->reg, NHMEX_S_PMON_MM_CFG_EN);
472 	}
473 	wrmsrl(hwc->config_base, hwc->config | NHMEX_PMON_CTL_EN_BIT22);
474 }
475 
476 static struct attribute *nhmex_uncore_sbox_formats_attr[] = {
477 	&format_attr_event.attr,
478 	&format_attr_umask.attr,
479 	&format_attr_edge.attr,
480 	&format_attr_inv.attr,
481 	&format_attr_thresh8.attr,
482 	&format_attr_match.attr,
483 	&format_attr_mask.attr,
484 	NULL,
485 };
486 
487 static struct attribute_group nhmex_uncore_sbox_format_group = {
488 	.name			= "format",
489 	.attrs			= nhmex_uncore_sbox_formats_attr,
490 };
491 
492 static struct intel_uncore_ops nhmex_uncore_sbox_ops = {
493 	NHMEX_UNCORE_OPS_COMMON_INIT(),
494 	.enable_event		= nhmex_sbox_msr_enable_event,
495 	.hw_config		= nhmex_sbox_hw_config,
496 	.get_constraint		= uncore_get_constraint,
497 	.put_constraint		= uncore_put_constraint,
498 };
499 
500 static struct intel_uncore_type nhmex_uncore_sbox = {
501 	.name			= "sbox",
502 	.num_counters		= 4,
503 	.num_boxes		= 2,
504 	.perf_ctr_bits		= 48,
505 	.event_ctl		= NHMEX_S0_MSR_PMON_CTL0,
506 	.perf_ctr		= NHMEX_S0_MSR_PMON_CTR0,
507 	.event_mask		= NHMEX_PMON_RAW_EVENT_MASK,
508 	.box_ctl		= NHMEX_S0_MSR_PMON_GLOBAL_CTL,
509 	.msr_offset		= NHMEX_S_MSR_OFFSET,
510 	.pair_ctr_ctl		= 1,
511 	.num_shared_regs	= 1,
512 	.ops			= &nhmex_uncore_sbox_ops,
513 	.format_group		= &nhmex_uncore_sbox_format_group
514 };
515 
516 enum {
517 	EXTRA_REG_NHMEX_M_FILTER,
518 	EXTRA_REG_NHMEX_M_DSP,
519 	EXTRA_REG_NHMEX_M_ISS,
520 	EXTRA_REG_NHMEX_M_MAP,
521 	EXTRA_REG_NHMEX_M_MSC_THR,
522 	EXTRA_REG_NHMEX_M_PGT,
523 	EXTRA_REG_NHMEX_M_PLD,
524 	EXTRA_REG_NHMEX_M_ZDP_CTL_FVC,
525 };
526 
527 static struct extra_reg nhmex_uncore_mbox_extra_regs[] = {
528 	MBOX_INC_SEL_EXTAR_REG(0x0, DSP),
529 	MBOX_INC_SEL_EXTAR_REG(0x4, MSC_THR),
530 	MBOX_INC_SEL_EXTAR_REG(0x5, MSC_THR),
531 	MBOX_INC_SEL_EXTAR_REG(0x9, ISS),
532 	/* event 0xa uses two extra registers */
533 	MBOX_INC_SEL_EXTAR_REG(0xa, ISS),
534 	MBOX_INC_SEL_EXTAR_REG(0xa, PLD),
535 	MBOX_INC_SEL_EXTAR_REG(0xb, PLD),
536 	/* events 0xd ~ 0x10 use the same extra register */
537 	MBOX_INC_SEL_EXTAR_REG(0xd, ZDP_CTL_FVC),
538 	MBOX_INC_SEL_EXTAR_REG(0xe, ZDP_CTL_FVC),
539 	MBOX_INC_SEL_EXTAR_REG(0xf, ZDP_CTL_FVC),
540 	MBOX_INC_SEL_EXTAR_REG(0x10, ZDP_CTL_FVC),
541 	MBOX_INC_SEL_EXTAR_REG(0x16, PGT),
542 	MBOX_SET_FLAG_SEL_EXTRA_REG(0x0, DSP),
543 	MBOX_SET_FLAG_SEL_EXTRA_REG(0x1, ISS),
544 	MBOX_SET_FLAG_SEL_EXTRA_REG(0x5, PGT),
545 	MBOX_SET_FLAG_SEL_EXTRA_REG(0x6, MAP),
546 	EVENT_EXTRA_END
547 };
548 
549 /* Nehalem-EX or Westmere-EX ? */
550 static bool uncore_nhmex;
551 
552 static bool nhmex_mbox_get_shared_reg(struct intel_uncore_box *box, int idx, u64 config)
553 {
554 	struct intel_uncore_extra_reg *er;
555 	unsigned long flags;
556 	bool ret = false;
557 	u64 mask;
558 
559 	if (idx < EXTRA_REG_NHMEX_M_ZDP_CTL_FVC) {
560 		er = &box->shared_regs[idx];
561 		raw_spin_lock_irqsave(&er->lock, flags);
562 		if (!atomic_read(&er->ref) || er->config == config) {
563 			atomic_inc(&er->ref);
564 			er->config = config;
565 			ret = true;
566 		}
567 		raw_spin_unlock_irqrestore(&er->lock, flags);
568 
569 		return ret;
570 	}
571 	/*
572 	 * The ZDP_CTL_FVC MSR has 4 fields which are used to control
573 	 * events 0xd ~ 0x10. Besides these 4 fields, there are additional
574 	 * fields which are shared.
575 	 */
576 	idx -= EXTRA_REG_NHMEX_M_ZDP_CTL_FVC;
577 	if (WARN_ON_ONCE(idx >= 4))
578 		return false;
579 
580 	/* mask of the shared fields */
581 	if (uncore_nhmex)
582 		mask = NHMEX_M_PMON_ZDP_CTL_FVC_MASK;
583 	else
584 		mask = WSMEX_M_PMON_ZDP_CTL_FVC_MASK;
585 	er = &box->shared_regs[EXTRA_REG_NHMEX_M_ZDP_CTL_FVC];
586 
587 	raw_spin_lock_irqsave(&er->lock, flags);
588 	/* add mask of the non-shared field if it's in use */
589 	if (__BITS_VALUE(atomic_read(&er->ref), idx, 8)) {
590 		if (uncore_nhmex)
591 			mask |= NHMEX_M_PMON_ZDP_CTL_FVC_EVENT_MASK(idx);
592 		else
593 			mask |= WSMEX_M_PMON_ZDP_CTL_FVC_EVENT_MASK(idx);
594 	}
595 
596 	if (!atomic_read(&er->ref) || !((er->config ^ config) & mask)) {
597 		atomic_add(1 << (idx * 8), &er->ref);
598 		if (uncore_nhmex)
599 			mask = NHMEX_M_PMON_ZDP_CTL_FVC_MASK |
600 				NHMEX_M_PMON_ZDP_CTL_FVC_EVENT_MASK(idx);
601 		else
602 			mask = WSMEX_M_PMON_ZDP_CTL_FVC_MASK |
603 				WSMEX_M_PMON_ZDP_CTL_FVC_EVENT_MASK(idx);
604 		er->config &= ~mask;
605 		er->config |= (config & mask);
606 		ret = true;
607 	}
608 	raw_spin_unlock_irqrestore(&er->lock, flags);
609 
610 	return ret;
611 }
612 
613 static void nhmex_mbox_put_shared_reg(struct intel_uncore_box *box, int idx)
614 {
615 	struct intel_uncore_extra_reg *er;
616 
617 	if (idx < EXTRA_REG_NHMEX_M_ZDP_CTL_FVC) {
618 		er = &box->shared_regs[idx];
619 		atomic_dec(&er->ref);
620 		return;
621 	}
622 
623 	idx -= EXTRA_REG_NHMEX_M_ZDP_CTL_FVC;
624 	er = &box->shared_regs[EXTRA_REG_NHMEX_M_ZDP_CTL_FVC];
625 	atomic_sub(1 << (idx * 8), &er->ref);
626 }
627 
628 static u64 nhmex_mbox_alter_er(struct perf_event *event, int new_idx, bool modify)
629 {
630 	struct hw_perf_event *hwc = &event->hw;
631 	struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
632 	u64 idx, orig_idx = __BITS_VALUE(reg1->idx, 0, 8);
633 	u64 config = reg1->config;
634 
635 	/* get the non-shared control bits and shift them */
636 	idx = orig_idx - EXTRA_REG_NHMEX_M_ZDP_CTL_FVC;
637 	if (uncore_nhmex)
638 		config &= NHMEX_M_PMON_ZDP_CTL_FVC_EVENT_MASK(idx);
639 	else
640 		config &= WSMEX_M_PMON_ZDP_CTL_FVC_EVENT_MASK(idx);
641 	if (new_idx > orig_idx) {
642 		idx = new_idx - orig_idx;
643 		config <<= 3 * idx;
644 	} else {
645 		idx = orig_idx - new_idx;
646 		config >>= 3 * idx;
647 	}
648 
649 	/* add the shared control bits back */
650 	if (uncore_nhmex)
651 		config |= NHMEX_M_PMON_ZDP_CTL_FVC_MASK & reg1->config;
652 	else
653 		config |= WSMEX_M_PMON_ZDP_CTL_FVC_MASK & reg1->config;
654 	config |= NHMEX_M_PMON_ZDP_CTL_FVC_MASK & reg1->config;
655 	if (modify) {
656 		/* adjust the main event selector */
657 		if (new_idx > orig_idx)
658 			hwc->config += idx << NHMEX_M_PMON_CTL_INC_SEL_SHIFT;
659 		else
660 			hwc->config -= idx << NHMEX_M_PMON_CTL_INC_SEL_SHIFT;
661 		reg1->config = config;
662 		reg1->idx = ~0xff | new_idx;
663 	}
664 	return config;
665 }
666 
667 static struct event_constraint *
668 nhmex_mbox_get_constraint(struct intel_uncore_box *box, struct perf_event *event)
669 {
670 	struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
671 	struct hw_perf_event_extra *reg2 = &event->hw.branch_reg;
672 	int i, idx[2], alloc = 0;
673 	u64 config1 = reg1->config;
674 
675 	idx[0] = __BITS_VALUE(reg1->idx, 0, 8);
676 	idx[1] = __BITS_VALUE(reg1->idx, 1, 8);
677 again:
678 	for (i = 0; i < 2; i++) {
679 		if (!uncore_box_is_fake(box) && (reg1->alloc & (0x1 << i)))
680 			idx[i] = 0xff;
681 
682 		if (idx[i] == 0xff)
683 			continue;
684 
685 		if (!nhmex_mbox_get_shared_reg(box, idx[i],
686 				__BITS_VALUE(config1, i, 32)))
687 			goto fail;
688 		alloc |= (0x1 << i);
689 	}
690 
691 	/* for the match/mask registers */
692 	if (reg2->idx != EXTRA_REG_NONE &&
693 	    (uncore_box_is_fake(box) || !reg2->alloc) &&
694 	    !nhmex_mbox_get_shared_reg(box, reg2->idx, reg2->config))
695 		goto fail;
696 
697 	/*
698 	 * If it's a fake box -- as per validate_{group,event}() we
699 	 * shouldn't touch event state and we can avoid doing so
700 	 * since both will only call get_event_constraints() once
701 	 * on each event, this avoids the need for reg->alloc.
702 	 */
703 	if (!uncore_box_is_fake(box)) {
704 		if (idx[0] != 0xff && idx[0] != __BITS_VALUE(reg1->idx, 0, 8))
705 			nhmex_mbox_alter_er(event, idx[0], true);
706 		reg1->alloc |= alloc;
707 		if (reg2->idx != EXTRA_REG_NONE)
708 			reg2->alloc = 1;
709 	}
710 	return NULL;
711 fail:
712 	if (idx[0] != 0xff && !(alloc & 0x1) &&
713 	    idx[0] >= EXTRA_REG_NHMEX_M_ZDP_CTL_FVC) {
714 		/*
715 		 * events 0xd ~ 0x10 are functional identical, but are
716 		 * controlled by different fields in the ZDP_CTL_FVC
717 		 * register. If we failed to take one field, try the
718 		 * rest 3 choices.
719 		 */
720 		BUG_ON(__BITS_VALUE(reg1->idx, 1, 8) != 0xff);
721 		idx[0] -= EXTRA_REG_NHMEX_M_ZDP_CTL_FVC;
722 		idx[0] = (idx[0] + 1) % 4;
723 		idx[0] += EXTRA_REG_NHMEX_M_ZDP_CTL_FVC;
724 		if (idx[0] != __BITS_VALUE(reg1->idx, 0, 8)) {
725 			config1 = nhmex_mbox_alter_er(event, idx[0], false);
726 			goto again;
727 		}
728 	}
729 
730 	if (alloc & 0x1)
731 		nhmex_mbox_put_shared_reg(box, idx[0]);
732 	if (alloc & 0x2)
733 		nhmex_mbox_put_shared_reg(box, idx[1]);
734 	return &uncore_constraint_empty;
735 }
736 
737 static void nhmex_mbox_put_constraint(struct intel_uncore_box *box, struct perf_event *event)
738 {
739 	struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
740 	struct hw_perf_event_extra *reg2 = &event->hw.branch_reg;
741 
742 	if (uncore_box_is_fake(box))
743 		return;
744 
745 	if (reg1->alloc & 0x1)
746 		nhmex_mbox_put_shared_reg(box, __BITS_VALUE(reg1->idx, 0, 8));
747 	if (reg1->alloc & 0x2)
748 		nhmex_mbox_put_shared_reg(box, __BITS_VALUE(reg1->idx, 1, 8));
749 	reg1->alloc = 0;
750 
751 	if (reg2->alloc) {
752 		nhmex_mbox_put_shared_reg(box, reg2->idx);
753 		reg2->alloc = 0;
754 	}
755 }
756 
757 static int nhmex_mbox_extra_reg_idx(struct extra_reg *er)
758 {
759 	if (er->idx < EXTRA_REG_NHMEX_M_ZDP_CTL_FVC)
760 		return er->idx;
761 	return er->idx + (er->event >> NHMEX_M_PMON_CTL_INC_SEL_SHIFT) - 0xd;
762 }
763 
764 static int nhmex_mbox_hw_config(struct intel_uncore_box *box, struct perf_event *event)
765 {
766 	struct intel_uncore_type *type = box->pmu->type;
767 	struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
768 	struct hw_perf_event_extra *reg2 = &event->hw.branch_reg;
769 	struct extra_reg *er;
770 	unsigned msr;
771 	int reg_idx = 0;
772 	/*
773 	 * The mbox events may require 2 extra MSRs at the most. But only
774 	 * the lower 32 bits in these MSRs are significant, so we can use
775 	 * config1 to pass two MSRs' config.
776 	 */
777 	for (er = nhmex_uncore_mbox_extra_regs; er->msr; er++) {
778 		if (er->event != (event->hw.config & er->config_mask))
779 			continue;
780 		if (event->attr.config1 & ~er->valid_mask)
781 			return -EINVAL;
782 
783 		msr = er->msr + type->msr_offset * box->pmu->pmu_idx;
784 		if (WARN_ON_ONCE(msr >= 0xffff || er->idx >= 0xff))
785 			return -EINVAL;
786 
787 		/* always use the 32~63 bits to pass the PLD config */
788 		if (er->idx == EXTRA_REG_NHMEX_M_PLD)
789 			reg_idx = 1;
790 		else if (WARN_ON_ONCE(reg_idx > 0))
791 			return -EINVAL;
792 
793 		reg1->idx &= ~(0xff << (reg_idx * 8));
794 		reg1->reg &= ~(0xffff << (reg_idx * 16));
795 		reg1->idx |= nhmex_mbox_extra_reg_idx(er) << (reg_idx * 8);
796 		reg1->reg |= msr << (reg_idx * 16);
797 		reg1->config = event->attr.config1;
798 		reg_idx++;
799 	}
800 	/*
801 	 * The mbox only provides ability to perform address matching
802 	 * for the PLD events.
803 	 */
804 	if (reg_idx == 2) {
805 		reg2->idx = EXTRA_REG_NHMEX_M_FILTER;
806 		if (event->attr.config2 & NHMEX_M_PMON_MM_CFG_EN)
807 			reg2->config = event->attr.config2;
808 		else
809 			reg2->config = ~0ULL;
810 		if (box->pmu->pmu_idx == 0)
811 			reg2->reg = NHMEX_M0_MSR_PMU_MM_CFG;
812 		else
813 			reg2->reg = NHMEX_M1_MSR_PMU_MM_CFG;
814 	}
815 	return 0;
816 }
817 
818 static u64 nhmex_mbox_shared_reg_config(struct intel_uncore_box *box, int idx)
819 {
820 	struct intel_uncore_extra_reg *er;
821 	unsigned long flags;
822 	u64 config;
823 
824 	if (idx < EXTRA_REG_NHMEX_M_ZDP_CTL_FVC)
825 		return box->shared_regs[idx].config;
826 
827 	er = &box->shared_regs[EXTRA_REG_NHMEX_M_ZDP_CTL_FVC];
828 	raw_spin_lock_irqsave(&er->lock, flags);
829 	config = er->config;
830 	raw_spin_unlock_irqrestore(&er->lock, flags);
831 	return config;
832 }
833 
834 static void nhmex_mbox_msr_enable_event(struct intel_uncore_box *box, struct perf_event *event)
835 {
836 	struct hw_perf_event *hwc = &event->hw;
837 	struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
838 	struct hw_perf_event_extra *reg2 = &hwc->branch_reg;
839 	int idx;
840 
841 	idx = __BITS_VALUE(reg1->idx, 0, 8);
842 	if (idx != 0xff)
843 		wrmsrl(__BITS_VALUE(reg1->reg, 0, 16),
844 			nhmex_mbox_shared_reg_config(box, idx));
845 	idx = __BITS_VALUE(reg1->idx, 1, 8);
846 	if (idx != 0xff)
847 		wrmsrl(__BITS_VALUE(reg1->reg, 1, 16),
848 			nhmex_mbox_shared_reg_config(box, idx));
849 
850 	if (reg2->idx != EXTRA_REG_NONE) {
851 		wrmsrl(reg2->reg, 0);
852 		if (reg2->config != ~0ULL) {
853 			wrmsrl(reg2->reg + 1,
854 				reg2->config & NHMEX_M_PMON_ADDR_MATCH_MASK);
855 			wrmsrl(reg2->reg + 2, NHMEX_M_PMON_ADDR_MASK_MASK &
856 				(reg2->config >> NHMEX_M_PMON_ADDR_MASK_SHIFT));
857 			wrmsrl(reg2->reg, NHMEX_M_PMON_MM_CFG_EN);
858 		}
859 	}
860 
861 	wrmsrl(hwc->config_base, hwc->config | NHMEX_PMON_CTL_EN_BIT0);
862 }
863 
864 DEFINE_UNCORE_FORMAT_ATTR(count_mode,		count_mode,	"config:2-3");
865 DEFINE_UNCORE_FORMAT_ATTR(storage_mode,		storage_mode,	"config:4-5");
866 DEFINE_UNCORE_FORMAT_ATTR(wrap_mode,		wrap_mode,	"config:6");
867 DEFINE_UNCORE_FORMAT_ATTR(flag_mode,		flag_mode,	"config:7");
868 DEFINE_UNCORE_FORMAT_ATTR(inc_sel,		inc_sel,	"config:9-13");
869 DEFINE_UNCORE_FORMAT_ATTR(set_flag_sel,		set_flag_sel,	"config:19-21");
870 DEFINE_UNCORE_FORMAT_ATTR(filter_cfg_en,	filter_cfg_en,	"config2:63");
871 DEFINE_UNCORE_FORMAT_ATTR(filter_match,		filter_match,	"config2:0-33");
872 DEFINE_UNCORE_FORMAT_ATTR(filter_mask,		filter_mask,	"config2:34-61");
873 DEFINE_UNCORE_FORMAT_ATTR(dsp,			dsp,		"config1:0-31");
874 DEFINE_UNCORE_FORMAT_ATTR(thr,			thr,		"config1:0-31");
875 DEFINE_UNCORE_FORMAT_ATTR(fvc,			fvc,		"config1:0-31");
876 DEFINE_UNCORE_FORMAT_ATTR(pgt,			pgt,		"config1:0-31");
877 DEFINE_UNCORE_FORMAT_ATTR(map,			map,		"config1:0-31");
878 DEFINE_UNCORE_FORMAT_ATTR(iss,			iss,		"config1:0-31");
879 DEFINE_UNCORE_FORMAT_ATTR(pld,			pld,		"config1:32-63");
880 
881 static struct attribute *nhmex_uncore_mbox_formats_attr[] = {
882 	&format_attr_count_mode.attr,
883 	&format_attr_storage_mode.attr,
884 	&format_attr_wrap_mode.attr,
885 	&format_attr_flag_mode.attr,
886 	&format_attr_inc_sel.attr,
887 	&format_attr_set_flag_sel.attr,
888 	&format_attr_filter_cfg_en.attr,
889 	&format_attr_filter_match.attr,
890 	&format_attr_filter_mask.attr,
891 	&format_attr_dsp.attr,
892 	&format_attr_thr.attr,
893 	&format_attr_fvc.attr,
894 	&format_attr_pgt.attr,
895 	&format_attr_map.attr,
896 	&format_attr_iss.attr,
897 	&format_attr_pld.attr,
898 	NULL,
899 };
900 
901 static struct attribute_group nhmex_uncore_mbox_format_group = {
902 	.name		= "format",
903 	.attrs		= nhmex_uncore_mbox_formats_attr,
904 };
905 
906 static struct uncore_event_desc nhmex_uncore_mbox_events[] = {
907 	INTEL_UNCORE_EVENT_DESC(bbox_cmds_read, "inc_sel=0xd,fvc=0x2800"),
908 	INTEL_UNCORE_EVENT_DESC(bbox_cmds_write, "inc_sel=0xd,fvc=0x2820"),
909 	{ /* end: all zeroes */ },
910 };
911 
912 static struct uncore_event_desc wsmex_uncore_mbox_events[] = {
913 	INTEL_UNCORE_EVENT_DESC(bbox_cmds_read, "inc_sel=0xd,fvc=0x5000"),
914 	INTEL_UNCORE_EVENT_DESC(bbox_cmds_write, "inc_sel=0xd,fvc=0x5040"),
915 	{ /* end: all zeroes */ },
916 };
917 
918 static struct intel_uncore_ops nhmex_uncore_mbox_ops = {
919 	NHMEX_UNCORE_OPS_COMMON_INIT(),
920 	.enable_event	= nhmex_mbox_msr_enable_event,
921 	.hw_config	= nhmex_mbox_hw_config,
922 	.get_constraint	= nhmex_mbox_get_constraint,
923 	.put_constraint	= nhmex_mbox_put_constraint,
924 };
925 
926 static struct intel_uncore_type nhmex_uncore_mbox = {
927 	.name			= "mbox",
928 	.num_counters		= 6,
929 	.num_boxes		= 2,
930 	.perf_ctr_bits		= 48,
931 	.event_ctl		= NHMEX_M0_MSR_PMU_CTL0,
932 	.perf_ctr		= NHMEX_M0_MSR_PMU_CNT0,
933 	.event_mask		= NHMEX_M_PMON_RAW_EVENT_MASK,
934 	.box_ctl		= NHMEX_M0_MSR_GLOBAL_CTL,
935 	.msr_offset		= NHMEX_M_MSR_OFFSET,
936 	.pair_ctr_ctl		= 1,
937 	.num_shared_regs	= 8,
938 	.event_descs		= nhmex_uncore_mbox_events,
939 	.ops			= &nhmex_uncore_mbox_ops,
940 	.format_group		= &nhmex_uncore_mbox_format_group,
941 };
942 
943 static void nhmex_rbox_alter_er(struct intel_uncore_box *box, struct perf_event *event)
944 {
945 	struct hw_perf_event *hwc = &event->hw;
946 	struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
947 
948 	/* adjust the main event selector and extra register index */
949 	if (reg1->idx % 2) {
950 		reg1->idx--;
951 		hwc->config -= 1 << NHMEX_R_PMON_CTL_EV_SEL_SHIFT;
952 	} else {
953 		reg1->idx++;
954 		hwc->config += 1 << NHMEX_R_PMON_CTL_EV_SEL_SHIFT;
955 	}
956 
957 	/* adjust extra register config */
958 	switch (reg1->idx % 6) {
959 	case 2:
960 		/* shift the 8~15 bits to the 0~7 bits */
961 		reg1->config >>= 8;
962 		break;
963 	case 3:
964 		/* shift the 0~7 bits to the 8~15 bits */
965 		reg1->config <<= 8;
966 		break;
967 	}
968 }
969 
970 /*
971  * Each rbox has 4 event set which monitor PQI port 0~3 or 4~7.
972  * An event set consists of 6 events, the 3rd and 4th events in
973  * an event set use the same extra register. So an event set uses
974  * 5 extra registers.
975  */
976 static struct event_constraint *
977 nhmex_rbox_get_constraint(struct intel_uncore_box *box, struct perf_event *event)
978 {
979 	struct hw_perf_event *hwc = &event->hw;
980 	struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
981 	struct hw_perf_event_extra *reg2 = &hwc->branch_reg;
982 	struct intel_uncore_extra_reg *er;
983 	unsigned long flags;
984 	int idx, er_idx;
985 	u64 config1;
986 	bool ok = false;
987 
988 	if (!uncore_box_is_fake(box) && reg1->alloc)
989 		return NULL;
990 
991 	idx = reg1->idx % 6;
992 	config1 = reg1->config;
993 again:
994 	er_idx = idx;
995 	/* the 3rd and 4th events use the same extra register */
996 	if (er_idx > 2)
997 		er_idx--;
998 	er_idx += (reg1->idx / 6) * 5;
999 
1000 	er = &box->shared_regs[er_idx];
1001 	raw_spin_lock_irqsave(&er->lock, flags);
1002 	if (idx < 2) {
1003 		if (!atomic_read(&er->ref) || er->config == reg1->config) {
1004 			atomic_inc(&er->ref);
1005 			er->config = reg1->config;
1006 			ok = true;
1007 		}
1008 	} else if (idx == 2 || idx == 3) {
1009 		/*
1010 		 * these two events use different fields in a extra register,
1011 		 * the 0~7 bits and the 8~15 bits respectively.
1012 		 */
1013 		u64 mask = 0xff << ((idx - 2) * 8);
1014 		if (!__BITS_VALUE(atomic_read(&er->ref), idx - 2, 8) ||
1015 				!((er->config ^ config1) & mask)) {
1016 			atomic_add(1 << ((idx - 2) * 8), &er->ref);
1017 			er->config &= ~mask;
1018 			er->config |= config1 & mask;
1019 			ok = true;
1020 		}
1021 	} else {
1022 		if (!atomic_read(&er->ref) ||
1023 				(er->config == (hwc->config >> 32) &&
1024 				 er->config1 == reg1->config &&
1025 				 er->config2 == reg2->config)) {
1026 			atomic_inc(&er->ref);
1027 			er->config = (hwc->config >> 32);
1028 			er->config1 = reg1->config;
1029 			er->config2 = reg2->config;
1030 			ok = true;
1031 		}
1032 	}
1033 	raw_spin_unlock_irqrestore(&er->lock, flags);
1034 
1035 	if (!ok) {
1036 		/*
1037 		 * The Rbox events are always in pairs. The paired
1038 		 * events are functional identical, but use different
1039 		 * extra registers. If we failed to take an extra
1040 		 * register, try the alternative.
1041 		 */
1042 		idx ^= 1;
1043 		if (idx != reg1->idx % 6) {
1044 			if (idx == 2)
1045 				config1 >>= 8;
1046 			else if (idx == 3)
1047 				config1 <<= 8;
1048 			goto again;
1049 		}
1050 	} else {
1051 		if (!uncore_box_is_fake(box)) {
1052 			if (idx != reg1->idx % 6)
1053 				nhmex_rbox_alter_er(box, event);
1054 			reg1->alloc = 1;
1055 		}
1056 		return NULL;
1057 	}
1058 	return &uncore_constraint_empty;
1059 }
1060 
1061 static void nhmex_rbox_put_constraint(struct intel_uncore_box *box, struct perf_event *event)
1062 {
1063 	struct intel_uncore_extra_reg *er;
1064 	struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
1065 	int idx, er_idx;
1066 
1067 	if (uncore_box_is_fake(box) || !reg1->alloc)
1068 		return;
1069 
1070 	idx = reg1->idx % 6;
1071 	er_idx = idx;
1072 	if (er_idx > 2)
1073 		er_idx--;
1074 	er_idx += (reg1->idx / 6) * 5;
1075 
1076 	er = &box->shared_regs[er_idx];
1077 	if (idx == 2 || idx == 3)
1078 		atomic_sub(1 << ((idx - 2) * 8), &er->ref);
1079 	else
1080 		atomic_dec(&er->ref);
1081 
1082 	reg1->alloc = 0;
1083 }
1084 
1085 static int nhmex_rbox_hw_config(struct intel_uncore_box *box, struct perf_event *event)
1086 {
1087 	struct hw_perf_event *hwc = &event->hw;
1088 	struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
1089 	struct hw_perf_event_extra *reg2 = &event->hw.branch_reg;
1090 	int idx;
1091 
1092 	idx = (event->hw.config & NHMEX_R_PMON_CTL_EV_SEL_MASK) >>
1093 		NHMEX_R_PMON_CTL_EV_SEL_SHIFT;
1094 	if (idx >= 0x18)
1095 		return -EINVAL;
1096 
1097 	reg1->idx = idx;
1098 	reg1->config = event->attr.config1;
1099 
1100 	switch (idx % 6) {
1101 	case 4:
1102 	case 5:
1103 		hwc->config |= event->attr.config & (~0ULL << 32);
1104 		reg2->config = event->attr.config2;
1105 		break;
1106 	}
1107 	return 0;
1108 }
1109 
1110 static void nhmex_rbox_msr_enable_event(struct intel_uncore_box *box, struct perf_event *event)
1111 {
1112 	struct hw_perf_event *hwc = &event->hw;
1113 	struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
1114 	struct hw_perf_event_extra *reg2 = &hwc->branch_reg;
1115 	int idx, port;
1116 
1117 	idx = reg1->idx;
1118 	port = idx / 6 + box->pmu->pmu_idx * 4;
1119 
1120 	switch (idx % 6) {
1121 	case 0:
1122 		wrmsrl(NHMEX_R_MSR_PORTN_IPERF_CFG0(port), reg1->config);
1123 		break;
1124 	case 1:
1125 		wrmsrl(NHMEX_R_MSR_PORTN_IPERF_CFG1(port), reg1->config);
1126 		break;
1127 	case 2:
1128 	case 3:
1129 		wrmsrl(NHMEX_R_MSR_PORTN_QLX_CFG(port),
1130 			uncore_shared_reg_config(box, 2 + (idx / 6) * 5));
1131 		break;
1132 	case 4:
1133 		wrmsrl(NHMEX_R_MSR_PORTN_XBR_SET1_MM_CFG(port),
1134 			hwc->config >> 32);
1135 		wrmsrl(NHMEX_R_MSR_PORTN_XBR_SET1_MATCH(port), reg1->config);
1136 		wrmsrl(NHMEX_R_MSR_PORTN_XBR_SET1_MASK(port), reg2->config);
1137 		break;
1138 	case 5:
1139 		wrmsrl(NHMEX_R_MSR_PORTN_XBR_SET2_MM_CFG(port),
1140 			hwc->config >> 32);
1141 		wrmsrl(NHMEX_R_MSR_PORTN_XBR_SET2_MATCH(port), reg1->config);
1142 		wrmsrl(NHMEX_R_MSR_PORTN_XBR_SET2_MASK(port), reg2->config);
1143 		break;
1144 	}
1145 
1146 	wrmsrl(hwc->config_base, NHMEX_PMON_CTL_EN_BIT0 |
1147 		(hwc->config & NHMEX_R_PMON_CTL_EV_SEL_MASK));
1148 }
1149 
1150 DEFINE_UNCORE_FORMAT_ATTR(xbr_mm_cfg, xbr_mm_cfg, "config:32-63");
1151 DEFINE_UNCORE_FORMAT_ATTR(xbr_match, xbr_match, "config1:0-63");
1152 DEFINE_UNCORE_FORMAT_ATTR(xbr_mask, xbr_mask, "config2:0-63");
1153 DEFINE_UNCORE_FORMAT_ATTR(qlx_cfg, qlx_cfg, "config1:0-15");
1154 DEFINE_UNCORE_FORMAT_ATTR(iperf_cfg, iperf_cfg, "config1:0-31");
1155 
1156 static struct attribute *nhmex_uncore_rbox_formats_attr[] = {
1157 	&format_attr_event5.attr,
1158 	&format_attr_xbr_mm_cfg.attr,
1159 	&format_attr_xbr_match.attr,
1160 	&format_attr_xbr_mask.attr,
1161 	&format_attr_qlx_cfg.attr,
1162 	&format_attr_iperf_cfg.attr,
1163 	NULL,
1164 };
1165 
1166 static struct attribute_group nhmex_uncore_rbox_format_group = {
1167 	.name = "format",
1168 	.attrs = nhmex_uncore_rbox_formats_attr,
1169 };
1170 
1171 static struct uncore_event_desc nhmex_uncore_rbox_events[] = {
1172 	INTEL_UNCORE_EVENT_DESC(qpi0_flit_send,		"event=0x0,iperf_cfg=0x80000000"),
1173 	INTEL_UNCORE_EVENT_DESC(qpi1_filt_send,		"event=0x6,iperf_cfg=0x80000000"),
1174 	INTEL_UNCORE_EVENT_DESC(qpi0_idle_filt,		"event=0x0,iperf_cfg=0x40000000"),
1175 	INTEL_UNCORE_EVENT_DESC(qpi1_idle_filt,		"event=0x6,iperf_cfg=0x40000000"),
1176 	INTEL_UNCORE_EVENT_DESC(qpi0_date_response,	"event=0x0,iperf_cfg=0xc4"),
1177 	INTEL_UNCORE_EVENT_DESC(qpi1_date_response,	"event=0x6,iperf_cfg=0xc4"),
1178 	{ /* end: all zeroes */ },
1179 };
1180 
1181 static struct intel_uncore_ops nhmex_uncore_rbox_ops = {
1182 	NHMEX_UNCORE_OPS_COMMON_INIT(),
1183 	.enable_event		= nhmex_rbox_msr_enable_event,
1184 	.hw_config		= nhmex_rbox_hw_config,
1185 	.get_constraint		= nhmex_rbox_get_constraint,
1186 	.put_constraint		= nhmex_rbox_put_constraint,
1187 };
1188 
1189 static struct intel_uncore_type nhmex_uncore_rbox = {
1190 	.name			= "rbox",
1191 	.num_counters		= 8,
1192 	.num_boxes		= 2,
1193 	.perf_ctr_bits		= 48,
1194 	.event_ctl		= NHMEX_R_MSR_PMON_CTL0,
1195 	.perf_ctr		= NHMEX_R_MSR_PMON_CNT0,
1196 	.event_mask		= NHMEX_R_PMON_RAW_EVENT_MASK,
1197 	.box_ctl		= NHMEX_R_MSR_GLOBAL_CTL,
1198 	.msr_offset		= NHMEX_R_MSR_OFFSET,
1199 	.pair_ctr_ctl		= 1,
1200 	.num_shared_regs	= 20,
1201 	.event_descs		= nhmex_uncore_rbox_events,
1202 	.ops			= &nhmex_uncore_rbox_ops,
1203 	.format_group		= &nhmex_uncore_rbox_format_group
1204 };
1205 
1206 static struct intel_uncore_type *nhmex_msr_uncores[] = {
1207 	&nhmex_uncore_ubox,
1208 	&nhmex_uncore_cbox,
1209 	&nhmex_uncore_bbox,
1210 	&nhmex_uncore_sbox,
1211 	&nhmex_uncore_mbox,
1212 	&nhmex_uncore_rbox,
1213 	&nhmex_uncore_wbox,
1214 	NULL,
1215 };
1216 
1217 void nhmex_uncore_cpu_init(void)
1218 {
1219 	if (boot_cpu_data.x86_model == 46)
1220 		uncore_nhmex = true;
1221 	else
1222 		nhmex_uncore_mbox.event_descs = wsmex_uncore_mbox_events;
1223 	if (nhmex_uncore_cbox.num_boxes > boot_cpu_data.x86_max_cores)
1224 		nhmex_uncore_cbox.num_boxes = boot_cpu_data.x86_max_cores;
1225 	uncore_msr_uncores = nhmex_msr_uncores;
1226 }
1227 /* end of Nehalem-EX uncore support */
1228