1 // SPDX-License-Identifier: GPL-2.0
2 /* Nehalem-EX/Westmere-EX uncore support */
3 #include "uncore.h"
4 
5 /* NHM-EX event control */
6 #define NHMEX_PMON_CTL_EV_SEL_MASK	0x000000ff
7 #define NHMEX_PMON_CTL_UMASK_MASK	0x0000ff00
8 #define NHMEX_PMON_CTL_EN_BIT0		(1 << 0)
9 #define NHMEX_PMON_CTL_EDGE_DET		(1 << 18)
10 #define NHMEX_PMON_CTL_PMI_EN		(1 << 20)
11 #define NHMEX_PMON_CTL_EN_BIT22		(1 << 22)
12 #define NHMEX_PMON_CTL_INVERT		(1 << 23)
13 #define NHMEX_PMON_CTL_TRESH_MASK	0xff000000
14 #define NHMEX_PMON_RAW_EVENT_MASK	(NHMEX_PMON_CTL_EV_SEL_MASK | \
15 					 NHMEX_PMON_CTL_UMASK_MASK | \
16 					 NHMEX_PMON_CTL_EDGE_DET | \
17 					 NHMEX_PMON_CTL_INVERT | \
18 					 NHMEX_PMON_CTL_TRESH_MASK)
19 
20 /* NHM-EX Ubox */
21 #define NHMEX_U_MSR_PMON_GLOBAL_CTL		0xc00
22 #define NHMEX_U_MSR_PMON_CTR			0xc11
23 #define NHMEX_U_MSR_PMON_EV_SEL			0xc10
24 
25 #define NHMEX_U_PMON_GLOBAL_EN			(1 << 0)
26 #define NHMEX_U_PMON_GLOBAL_PMI_CORE_SEL	0x0000001e
27 #define NHMEX_U_PMON_GLOBAL_EN_ALL		(1 << 28)
28 #define NHMEX_U_PMON_GLOBAL_RST_ALL		(1 << 29)
29 #define NHMEX_U_PMON_GLOBAL_FRZ_ALL		(1 << 31)
30 
31 #define NHMEX_U_PMON_RAW_EVENT_MASK		\
32 		(NHMEX_PMON_CTL_EV_SEL_MASK |	\
33 		 NHMEX_PMON_CTL_EDGE_DET)
34 
35 /* NHM-EX Cbox */
36 #define NHMEX_C0_MSR_PMON_GLOBAL_CTL		0xd00
37 #define NHMEX_C0_MSR_PMON_CTR0			0xd11
38 #define NHMEX_C0_MSR_PMON_EV_SEL0		0xd10
39 #define NHMEX_C_MSR_OFFSET			0x20
40 
41 /* NHM-EX Bbox */
42 #define NHMEX_B0_MSR_PMON_GLOBAL_CTL		0xc20
43 #define NHMEX_B0_MSR_PMON_CTR0			0xc31
44 #define NHMEX_B0_MSR_PMON_CTL0			0xc30
45 #define NHMEX_B_MSR_OFFSET			0x40
46 #define NHMEX_B0_MSR_MATCH			0xe45
47 #define NHMEX_B0_MSR_MASK			0xe46
48 #define NHMEX_B1_MSR_MATCH			0xe4d
49 #define NHMEX_B1_MSR_MASK			0xe4e
50 
51 #define NHMEX_B_PMON_CTL_EN			(1 << 0)
52 #define NHMEX_B_PMON_CTL_EV_SEL_SHIFT		1
53 #define NHMEX_B_PMON_CTL_EV_SEL_MASK		\
54 		(0x1f << NHMEX_B_PMON_CTL_EV_SEL_SHIFT)
55 #define NHMEX_B_PMON_CTR_SHIFT		6
56 #define NHMEX_B_PMON_CTR_MASK		\
57 		(0x3 << NHMEX_B_PMON_CTR_SHIFT)
58 #define NHMEX_B_PMON_RAW_EVENT_MASK		\
59 		(NHMEX_B_PMON_CTL_EV_SEL_MASK | \
60 		 NHMEX_B_PMON_CTR_MASK)
61 
62 /* NHM-EX Sbox */
63 #define NHMEX_S0_MSR_PMON_GLOBAL_CTL		0xc40
64 #define NHMEX_S0_MSR_PMON_CTR0			0xc51
65 #define NHMEX_S0_MSR_PMON_CTL0			0xc50
66 #define NHMEX_S_MSR_OFFSET			0x80
67 #define NHMEX_S0_MSR_MM_CFG			0xe48
68 #define NHMEX_S0_MSR_MATCH			0xe49
69 #define NHMEX_S0_MSR_MASK			0xe4a
70 #define NHMEX_S1_MSR_MM_CFG			0xe58
71 #define NHMEX_S1_MSR_MATCH			0xe59
72 #define NHMEX_S1_MSR_MASK			0xe5a
73 
74 #define NHMEX_S_PMON_MM_CFG_EN			(0x1ULL << 63)
75 #define NHMEX_S_EVENT_TO_R_PROG_EV		0
76 
77 /* NHM-EX Mbox */
78 #define NHMEX_M0_MSR_GLOBAL_CTL			0xca0
79 #define NHMEX_M0_MSR_PMU_DSP			0xca5
80 #define NHMEX_M0_MSR_PMU_ISS			0xca6
81 #define NHMEX_M0_MSR_PMU_MAP			0xca7
82 #define NHMEX_M0_MSR_PMU_MSC_THR		0xca8
83 #define NHMEX_M0_MSR_PMU_PGT			0xca9
84 #define NHMEX_M0_MSR_PMU_PLD			0xcaa
85 #define NHMEX_M0_MSR_PMU_ZDP_CTL_FVC		0xcab
86 #define NHMEX_M0_MSR_PMU_CTL0			0xcb0
87 #define NHMEX_M0_MSR_PMU_CNT0			0xcb1
88 #define NHMEX_M_MSR_OFFSET			0x40
89 #define NHMEX_M0_MSR_PMU_MM_CFG			0xe54
90 #define NHMEX_M1_MSR_PMU_MM_CFG			0xe5c
91 
92 #define NHMEX_M_PMON_MM_CFG_EN			(1ULL << 63)
93 #define NHMEX_M_PMON_ADDR_MATCH_MASK		0x3ffffffffULL
94 #define NHMEX_M_PMON_ADDR_MASK_MASK		0x7ffffffULL
95 #define NHMEX_M_PMON_ADDR_MASK_SHIFT		34
96 
97 #define NHMEX_M_PMON_CTL_EN			(1 << 0)
98 #define NHMEX_M_PMON_CTL_PMI_EN			(1 << 1)
99 #define NHMEX_M_PMON_CTL_COUNT_MODE_SHIFT	2
100 #define NHMEX_M_PMON_CTL_COUNT_MODE_MASK	\
101 	(0x3 << NHMEX_M_PMON_CTL_COUNT_MODE_SHIFT)
102 #define NHMEX_M_PMON_CTL_STORAGE_MODE_SHIFT	4
103 #define NHMEX_M_PMON_CTL_STORAGE_MODE_MASK	\
104 	(0x3 << NHMEX_M_PMON_CTL_STORAGE_MODE_SHIFT)
105 #define NHMEX_M_PMON_CTL_WRAP_MODE		(1 << 6)
106 #define NHMEX_M_PMON_CTL_FLAG_MODE		(1 << 7)
107 #define NHMEX_M_PMON_CTL_INC_SEL_SHIFT		9
108 #define NHMEX_M_PMON_CTL_INC_SEL_MASK		\
109 	(0x1f << NHMEX_M_PMON_CTL_INC_SEL_SHIFT)
110 #define NHMEX_M_PMON_CTL_SET_FLAG_SEL_SHIFT	19
111 #define NHMEX_M_PMON_CTL_SET_FLAG_SEL_MASK	\
112 	(0x7 << NHMEX_M_PMON_CTL_SET_FLAG_SEL_SHIFT)
113 #define NHMEX_M_PMON_RAW_EVENT_MASK			\
114 		(NHMEX_M_PMON_CTL_COUNT_MODE_MASK |	\
115 		 NHMEX_M_PMON_CTL_STORAGE_MODE_MASK |	\
116 		 NHMEX_M_PMON_CTL_WRAP_MODE |		\
117 		 NHMEX_M_PMON_CTL_FLAG_MODE |		\
118 		 NHMEX_M_PMON_CTL_INC_SEL_MASK |	\
119 		 NHMEX_M_PMON_CTL_SET_FLAG_SEL_MASK)
120 
121 #define NHMEX_M_PMON_ZDP_CTL_FVC_MASK		(((1 << 11) - 1) | (1 << 23))
122 #define NHMEX_M_PMON_ZDP_CTL_FVC_EVENT_MASK(n)	(0x7ULL << (11 + 3 * (n)))
123 
124 #define WSMEX_M_PMON_ZDP_CTL_FVC_MASK		(((1 << 12) - 1) | (1 << 24))
125 #define WSMEX_M_PMON_ZDP_CTL_FVC_EVENT_MASK(n)	(0x7ULL << (12 + 3 * (n)))
126 
127 /*
128  * use the 9~13 bits to select event If the 7th bit is not set,
129  * otherwise use the 19~21 bits to select event.
130  */
131 #define MBOX_INC_SEL(x) ((x) << NHMEX_M_PMON_CTL_INC_SEL_SHIFT)
132 #define MBOX_SET_FLAG_SEL(x) (((x) << NHMEX_M_PMON_CTL_SET_FLAG_SEL_SHIFT) | \
133 				NHMEX_M_PMON_CTL_FLAG_MODE)
134 #define MBOX_INC_SEL_MASK (NHMEX_M_PMON_CTL_INC_SEL_MASK | \
135 			   NHMEX_M_PMON_CTL_FLAG_MODE)
136 #define MBOX_SET_FLAG_SEL_MASK (NHMEX_M_PMON_CTL_SET_FLAG_SEL_MASK | \
137 				NHMEX_M_PMON_CTL_FLAG_MODE)
138 #define MBOX_INC_SEL_EXTAR_REG(c, r) \
139 		EVENT_EXTRA_REG(MBOX_INC_SEL(c), NHMEX_M0_MSR_PMU_##r, \
140 				MBOX_INC_SEL_MASK, (u64)-1, NHMEX_M_##r)
141 #define MBOX_SET_FLAG_SEL_EXTRA_REG(c, r) \
142 		EVENT_EXTRA_REG(MBOX_SET_FLAG_SEL(c), NHMEX_M0_MSR_PMU_##r, \
143 				MBOX_SET_FLAG_SEL_MASK, \
144 				(u64)-1, NHMEX_M_##r)
145 
146 /* NHM-EX Rbox */
147 #define NHMEX_R_MSR_GLOBAL_CTL			0xe00
148 #define NHMEX_R_MSR_PMON_CTL0			0xe10
149 #define NHMEX_R_MSR_PMON_CNT0			0xe11
150 #define NHMEX_R_MSR_OFFSET			0x20
151 
152 #define NHMEX_R_MSR_PORTN_QLX_CFG(n)		\
153 		((n) < 4 ? (0xe0c + (n)) : (0xe2c + (n) - 4))
154 #define NHMEX_R_MSR_PORTN_IPERF_CFG0(n)		(0xe04 + (n))
155 #define NHMEX_R_MSR_PORTN_IPERF_CFG1(n)		(0xe24 + (n))
156 #define NHMEX_R_MSR_PORTN_XBR_OFFSET(n)		\
157 		(((n) < 4 ? 0 : 0x10) + (n) * 4)
158 #define NHMEX_R_MSR_PORTN_XBR_SET1_MM_CFG(n)	\
159 		(0xe60 + NHMEX_R_MSR_PORTN_XBR_OFFSET(n))
160 #define NHMEX_R_MSR_PORTN_XBR_SET1_MATCH(n)	\
161 		(NHMEX_R_MSR_PORTN_XBR_SET1_MM_CFG(n) + 1)
162 #define NHMEX_R_MSR_PORTN_XBR_SET1_MASK(n)	\
163 		(NHMEX_R_MSR_PORTN_XBR_SET1_MM_CFG(n) + 2)
164 #define NHMEX_R_MSR_PORTN_XBR_SET2_MM_CFG(n)	\
165 		(0xe70 + NHMEX_R_MSR_PORTN_XBR_OFFSET(n))
166 #define NHMEX_R_MSR_PORTN_XBR_SET2_MATCH(n)	\
167 		(NHMEX_R_MSR_PORTN_XBR_SET2_MM_CFG(n) + 1)
168 #define NHMEX_R_MSR_PORTN_XBR_SET2_MASK(n)	\
169 		(NHMEX_R_MSR_PORTN_XBR_SET2_MM_CFG(n) + 2)
170 
171 #define NHMEX_R_PMON_CTL_EN			(1 << 0)
172 #define NHMEX_R_PMON_CTL_EV_SEL_SHIFT		1
173 #define NHMEX_R_PMON_CTL_EV_SEL_MASK		\
174 		(0x1f << NHMEX_R_PMON_CTL_EV_SEL_SHIFT)
175 #define NHMEX_R_PMON_CTL_PMI_EN			(1 << 6)
176 #define NHMEX_R_PMON_RAW_EVENT_MASK		NHMEX_R_PMON_CTL_EV_SEL_MASK
177 
178 /* NHM-EX Wbox */
179 #define NHMEX_W_MSR_GLOBAL_CTL			0xc80
180 #define NHMEX_W_MSR_PMON_CNT0			0xc90
181 #define NHMEX_W_MSR_PMON_EVT_SEL0		0xc91
182 #define NHMEX_W_MSR_PMON_FIXED_CTR		0x394
183 #define NHMEX_W_MSR_PMON_FIXED_CTL		0x395
184 
185 #define NHMEX_W_PMON_GLOBAL_FIXED_EN		(1ULL << 31)
186 
187 #define __BITS_VALUE(x, i, n)  ((typeof(x))(((x) >> ((i) * (n))) & \
188 				((1ULL << (n)) - 1)))
189 
190 DEFINE_UNCORE_FORMAT_ATTR(event, event, "config:0-7");
191 DEFINE_UNCORE_FORMAT_ATTR(event5, event, "config:1-5");
192 DEFINE_UNCORE_FORMAT_ATTR(umask, umask, "config:8-15");
193 DEFINE_UNCORE_FORMAT_ATTR(edge, edge, "config:18");
194 DEFINE_UNCORE_FORMAT_ATTR(inv, inv, "config:23");
195 DEFINE_UNCORE_FORMAT_ATTR(thresh8, thresh, "config:24-31");
196 DEFINE_UNCORE_FORMAT_ATTR(counter, counter, "config:6-7");
197 DEFINE_UNCORE_FORMAT_ATTR(match, match, "config1:0-63");
198 DEFINE_UNCORE_FORMAT_ATTR(mask, mask, "config2:0-63");
199 
200 static void nhmex_uncore_msr_init_box(struct intel_uncore_box *box)
201 {
202 	wrmsrl(NHMEX_U_MSR_PMON_GLOBAL_CTL, NHMEX_U_PMON_GLOBAL_EN_ALL);
203 }
204 
205 static void nhmex_uncore_msr_exit_box(struct intel_uncore_box *box)
206 {
207 	wrmsrl(NHMEX_U_MSR_PMON_GLOBAL_CTL, 0);
208 }
209 
210 static void nhmex_uncore_msr_disable_box(struct intel_uncore_box *box)
211 {
212 	unsigned msr = uncore_msr_box_ctl(box);
213 	u64 config;
214 
215 	if (msr) {
216 		rdmsrl(msr, config);
217 		config &= ~((1ULL << uncore_num_counters(box)) - 1);
218 		/* WBox has a fixed counter */
219 		if (uncore_msr_fixed_ctl(box))
220 			config &= ~NHMEX_W_PMON_GLOBAL_FIXED_EN;
221 		wrmsrl(msr, config);
222 	}
223 }
224 
225 static void nhmex_uncore_msr_enable_box(struct intel_uncore_box *box)
226 {
227 	unsigned msr = uncore_msr_box_ctl(box);
228 	u64 config;
229 
230 	if (msr) {
231 		rdmsrl(msr, config);
232 		config |= (1ULL << uncore_num_counters(box)) - 1;
233 		/* WBox has a fixed counter */
234 		if (uncore_msr_fixed_ctl(box))
235 			config |= NHMEX_W_PMON_GLOBAL_FIXED_EN;
236 		wrmsrl(msr, config);
237 	}
238 }
239 
240 static void nhmex_uncore_msr_disable_event(struct intel_uncore_box *box, struct perf_event *event)
241 {
242 	wrmsrl(event->hw.config_base, 0);
243 }
244 
245 static void nhmex_uncore_msr_enable_event(struct intel_uncore_box *box, struct perf_event *event)
246 {
247 	struct hw_perf_event *hwc = &event->hw;
248 
249 	if (hwc->idx == UNCORE_PMC_IDX_FIXED)
250 		wrmsrl(hwc->config_base, NHMEX_PMON_CTL_EN_BIT0);
251 	else if (box->pmu->type->event_mask & NHMEX_PMON_CTL_EN_BIT0)
252 		wrmsrl(hwc->config_base, hwc->config | NHMEX_PMON_CTL_EN_BIT22);
253 	else
254 		wrmsrl(hwc->config_base, hwc->config | NHMEX_PMON_CTL_EN_BIT0);
255 }
256 
257 #define NHMEX_UNCORE_OPS_COMMON_INIT()				\
258 	.init_box	= nhmex_uncore_msr_init_box,		\
259 	.exit_box	= nhmex_uncore_msr_exit_box,		\
260 	.disable_box	= nhmex_uncore_msr_disable_box,		\
261 	.enable_box	= nhmex_uncore_msr_enable_box,		\
262 	.disable_event	= nhmex_uncore_msr_disable_event,	\
263 	.read_counter	= uncore_msr_read_counter
264 
265 static struct intel_uncore_ops nhmex_uncore_ops = {
266 	NHMEX_UNCORE_OPS_COMMON_INIT(),
267 	.enable_event	= nhmex_uncore_msr_enable_event,
268 };
269 
270 static struct attribute *nhmex_uncore_ubox_formats_attr[] = {
271 	&format_attr_event.attr,
272 	&format_attr_edge.attr,
273 	NULL,
274 };
275 
276 static const struct attribute_group nhmex_uncore_ubox_format_group = {
277 	.name		= "format",
278 	.attrs		= nhmex_uncore_ubox_formats_attr,
279 };
280 
281 static struct intel_uncore_type nhmex_uncore_ubox = {
282 	.name		= "ubox",
283 	.num_counters	= 1,
284 	.num_boxes	= 1,
285 	.perf_ctr_bits	= 48,
286 	.event_ctl	= NHMEX_U_MSR_PMON_EV_SEL,
287 	.perf_ctr	= NHMEX_U_MSR_PMON_CTR,
288 	.event_mask	= NHMEX_U_PMON_RAW_EVENT_MASK,
289 	.box_ctl	= NHMEX_U_MSR_PMON_GLOBAL_CTL,
290 	.ops		= &nhmex_uncore_ops,
291 	.format_group	= &nhmex_uncore_ubox_format_group
292 };
293 
294 static struct attribute *nhmex_uncore_cbox_formats_attr[] = {
295 	&format_attr_event.attr,
296 	&format_attr_umask.attr,
297 	&format_attr_edge.attr,
298 	&format_attr_inv.attr,
299 	&format_attr_thresh8.attr,
300 	NULL,
301 };
302 
303 static const struct attribute_group nhmex_uncore_cbox_format_group = {
304 	.name = "format",
305 	.attrs = nhmex_uncore_cbox_formats_attr,
306 };
307 
308 /* msr offset for each instance of cbox */
309 static unsigned nhmex_cbox_msr_offsets[] = {
310 	0x0, 0x80, 0x40, 0xc0, 0x20, 0xa0, 0x60, 0xe0, 0x240, 0x2c0,
311 };
312 
313 static struct intel_uncore_type nhmex_uncore_cbox = {
314 	.name			= "cbox",
315 	.num_counters		= 6,
316 	.num_boxes		= 10,
317 	.perf_ctr_bits		= 48,
318 	.event_ctl		= NHMEX_C0_MSR_PMON_EV_SEL0,
319 	.perf_ctr		= NHMEX_C0_MSR_PMON_CTR0,
320 	.event_mask		= NHMEX_PMON_RAW_EVENT_MASK,
321 	.box_ctl		= NHMEX_C0_MSR_PMON_GLOBAL_CTL,
322 	.msr_offsets		= nhmex_cbox_msr_offsets,
323 	.pair_ctr_ctl		= 1,
324 	.ops			= &nhmex_uncore_ops,
325 	.format_group		= &nhmex_uncore_cbox_format_group
326 };
327 
328 static struct uncore_event_desc nhmex_uncore_wbox_events[] = {
329 	INTEL_UNCORE_EVENT_DESC(clockticks, "event=0xff,umask=0"),
330 	{ /* end: all zeroes */ },
331 };
332 
333 static struct intel_uncore_type nhmex_uncore_wbox = {
334 	.name			= "wbox",
335 	.num_counters		= 4,
336 	.num_boxes		= 1,
337 	.perf_ctr_bits		= 48,
338 	.event_ctl		= NHMEX_W_MSR_PMON_CNT0,
339 	.perf_ctr		= NHMEX_W_MSR_PMON_EVT_SEL0,
340 	.fixed_ctr		= NHMEX_W_MSR_PMON_FIXED_CTR,
341 	.fixed_ctl		= NHMEX_W_MSR_PMON_FIXED_CTL,
342 	.event_mask		= NHMEX_PMON_RAW_EVENT_MASK,
343 	.box_ctl		= NHMEX_W_MSR_GLOBAL_CTL,
344 	.pair_ctr_ctl		= 1,
345 	.event_descs		= nhmex_uncore_wbox_events,
346 	.ops			= &nhmex_uncore_ops,
347 	.format_group		= &nhmex_uncore_cbox_format_group
348 };
349 
350 static int nhmex_bbox_hw_config(struct intel_uncore_box *box, struct perf_event *event)
351 {
352 	struct hw_perf_event *hwc = &event->hw;
353 	struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
354 	struct hw_perf_event_extra *reg2 = &hwc->branch_reg;
355 	int ctr, ev_sel;
356 
357 	ctr = (hwc->config & NHMEX_B_PMON_CTR_MASK) >>
358 		NHMEX_B_PMON_CTR_SHIFT;
359 	ev_sel = (hwc->config & NHMEX_B_PMON_CTL_EV_SEL_MASK) >>
360 		  NHMEX_B_PMON_CTL_EV_SEL_SHIFT;
361 
362 	/* events that do not use the match/mask registers */
363 	if ((ctr == 0 && ev_sel > 0x3) || (ctr == 1 && ev_sel > 0x6) ||
364 	    (ctr == 2 && ev_sel != 0x4) || ctr == 3)
365 		return 0;
366 
367 	if (box->pmu->pmu_idx == 0)
368 		reg1->reg = NHMEX_B0_MSR_MATCH;
369 	else
370 		reg1->reg = NHMEX_B1_MSR_MATCH;
371 	reg1->idx = 0;
372 	reg1->config = event->attr.config1;
373 	reg2->config = event->attr.config2;
374 	return 0;
375 }
376 
377 static void nhmex_bbox_msr_enable_event(struct intel_uncore_box *box, struct perf_event *event)
378 {
379 	struct hw_perf_event *hwc = &event->hw;
380 	struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
381 	struct hw_perf_event_extra *reg2 = &hwc->branch_reg;
382 
383 	if (reg1->idx != EXTRA_REG_NONE) {
384 		wrmsrl(reg1->reg, reg1->config);
385 		wrmsrl(reg1->reg + 1, reg2->config);
386 	}
387 	wrmsrl(hwc->config_base, NHMEX_PMON_CTL_EN_BIT0 |
388 		(hwc->config & NHMEX_B_PMON_CTL_EV_SEL_MASK));
389 }
390 
391 /*
392  * The Bbox has 4 counters, but each counter monitors different events.
393  * Use bits 6-7 in the event config to select counter.
394  */
395 static struct event_constraint nhmex_uncore_bbox_constraints[] = {
396 	EVENT_CONSTRAINT(0 , 1, 0xc0),
397 	EVENT_CONSTRAINT(0x40, 2, 0xc0),
398 	EVENT_CONSTRAINT(0x80, 4, 0xc0),
399 	EVENT_CONSTRAINT(0xc0, 8, 0xc0),
400 	EVENT_CONSTRAINT_END,
401 };
402 
403 static struct attribute *nhmex_uncore_bbox_formats_attr[] = {
404 	&format_attr_event5.attr,
405 	&format_attr_counter.attr,
406 	&format_attr_match.attr,
407 	&format_attr_mask.attr,
408 	NULL,
409 };
410 
411 static const struct attribute_group nhmex_uncore_bbox_format_group = {
412 	.name = "format",
413 	.attrs = nhmex_uncore_bbox_formats_attr,
414 };
415 
416 static struct intel_uncore_ops nhmex_uncore_bbox_ops = {
417 	NHMEX_UNCORE_OPS_COMMON_INIT(),
418 	.enable_event		= nhmex_bbox_msr_enable_event,
419 	.hw_config		= nhmex_bbox_hw_config,
420 	.get_constraint		= uncore_get_constraint,
421 	.put_constraint		= uncore_put_constraint,
422 };
423 
424 static struct intel_uncore_type nhmex_uncore_bbox = {
425 	.name			= "bbox",
426 	.num_counters		= 4,
427 	.num_boxes		= 2,
428 	.perf_ctr_bits		= 48,
429 	.event_ctl		= NHMEX_B0_MSR_PMON_CTL0,
430 	.perf_ctr		= NHMEX_B0_MSR_PMON_CTR0,
431 	.event_mask		= NHMEX_B_PMON_RAW_EVENT_MASK,
432 	.box_ctl		= NHMEX_B0_MSR_PMON_GLOBAL_CTL,
433 	.msr_offset		= NHMEX_B_MSR_OFFSET,
434 	.pair_ctr_ctl		= 1,
435 	.num_shared_regs	= 1,
436 	.constraints		= nhmex_uncore_bbox_constraints,
437 	.ops			= &nhmex_uncore_bbox_ops,
438 	.format_group		= &nhmex_uncore_bbox_format_group
439 };
440 
441 static int nhmex_sbox_hw_config(struct intel_uncore_box *box, struct perf_event *event)
442 {
443 	struct hw_perf_event *hwc = &event->hw;
444 	struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
445 	struct hw_perf_event_extra *reg2 = &hwc->branch_reg;
446 
447 	/* only TO_R_PROG_EV event uses the match/mask register */
448 	if ((hwc->config & NHMEX_PMON_CTL_EV_SEL_MASK) !=
449 	    NHMEX_S_EVENT_TO_R_PROG_EV)
450 		return 0;
451 
452 	if (box->pmu->pmu_idx == 0)
453 		reg1->reg = NHMEX_S0_MSR_MM_CFG;
454 	else
455 		reg1->reg = NHMEX_S1_MSR_MM_CFG;
456 	reg1->idx = 0;
457 	reg1->config = event->attr.config1;
458 	reg2->config = event->attr.config2;
459 	return 0;
460 }
461 
462 static void nhmex_sbox_msr_enable_event(struct intel_uncore_box *box, struct perf_event *event)
463 {
464 	struct hw_perf_event *hwc = &event->hw;
465 	struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
466 	struct hw_perf_event_extra *reg2 = &hwc->branch_reg;
467 
468 	if (reg1->idx != EXTRA_REG_NONE) {
469 		wrmsrl(reg1->reg, 0);
470 		wrmsrl(reg1->reg + 1, reg1->config);
471 		wrmsrl(reg1->reg + 2, reg2->config);
472 		wrmsrl(reg1->reg, NHMEX_S_PMON_MM_CFG_EN);
473 	}
474 	wrmsrl(hwc->config_base, hwc->config | NHMEX_PMON_CTL_EN_BIT22);
475 }
476 
477 static struct attribute *nhmex_uncore_sbox_formats_attr[] = {
478 	&format_attr_event.attr,
479 	&format_attr_umask.attr,
480 	&format_attr_edge.attr,
481 	&format_attr_inv.attr,
482 	&format_attr_thresh8.attr,
483 	&format_attr_match.attr,
484 	&format_attr_mask.attr,
485 	NULL,
486 };
487 
488 static const struct attribute_group nhmex_uncore_sbox_format_group = {
489 	.name			= "format",
490 	.attrs			= nhmex_uncore_sbox_formats_attr,
491 };
492 
493 static struct intel_uncore_ops nhmex_uncore_sbox_ops = {
494 	NHMEX_UNCORE_OPS_COMMON_INIT(),
495 	.enable_event		= nhmex_sbox_msr_enable_event,
496 	.hw_config		= nhmex_sbox_hw_config,
497 	.get_constraint		= uncore_get_constraint,
498 	.put_constraint		= uncore_put_constraint,
499 };
500 
501 static struct intel_uncore_type nhmex_uncore_sbox = {
502 	.name			= "sbox",
503 	.num_counters		= 4,
504 	.num_boxes		= 2,
505 	.perf_ctr_bits		= 48,
506 	.event_ctl		= NHMEX_S0_MSR_PMON_CTL0,
507 	.perf_ctr		= NHMEX_S0_MSR_PMON_CTR0,
508 	.event_mask		= NHMEX_PMON_RAW_EVENT_MASK,
509 	.box_ctl		= NHMEX_S0_MSR_PMON_GLOBAL_CTL,
510 	.msr_offset		= NHMEX_S_MSR_OFFSET,
511 	.pair_ctr_ctl		= 1,
512 	.num_shared_regs	= 1,
513 	.ops			= &nhmex_uncore_sbox_ops,
514 	.format_group		= &nhmex_uncore_sbox_format_group
515 };
516 
517 enum {
518 	EXTRA_REG_NHMEX_M_FILTER,
519 	EXTRA_REG_NHMEX_M_DSP,
520 	EXTRA_REG_NHMEX_M_ISS,
521 	EXTRA_REG_NHMEX_M_MAP,
522 	EXTRA_REG_NHMEX_M_MSC_THR,
523 	EXTRA_REG_NHMEX_M_PGT,
524 	EXTRA_REG_NHMEX_M_PLD,
525 	EXTRA_REG_NHMEX_M_ZDP_CTL_FVC,
526 };
527 
528 static struct extra_reg nhmex_uncore_mbox_extra_regs[] = {
529 	MBOX_INC_SEL_EXTAR_REG(0x0, DSP),
530 	MBOX_INC_SEL_EXTAR_REG(0x4, MSC_THR),
531 	MBOX_INC_SEL_EXTAR_REG(0x5, MSC_THR),
532 	MBOX_INC_SEL_EXTAR_REG(0x9, ISS),
533 	/* event 0xa uses two extra registers */
534 	MBOX_INC_SEL_EXTAR_REG(0xa, ISS),
535 	MBOX_INC_SEL_EXTAR_REG(0xa, PLD),
536 	MBOX_INC_SEL_EXTAR_REG(0xb, PLD),
537 	/* events 0xd ~ 0x10 use the same extra register */
538 	MBOX_INC_SEL_EXTAR_REG(0xd, ZDP_CTL_FVC),
539 	MBOX_INC_SEL_EXTAR_REG(0xe, ZDP_CTL_FVC),
540 	MBOX_INC_SEL_EXTAR_REG(0xf, ZDP_CTL_FVC),
541 	MBOX_INC_SEL_EXTAR_REG(0x10, ZDP_CTL_FVC),
542 	MBOX_INC_SEL_EXTAR_REG(0x16, PGT),
543 	MBOX_SET_FLAG_SEL_EXTRA_REG(0x0, DSP),
544 	MBOX_SET_FLAG_SEL_EXTRA_REG(0x1, ISS),
545 	MBOX_SET_FLAG_SEL_EXTRA_REG(0x5, PGT),
546 	MBOX_SET_FLAG_SEL_EXTRA_REG(0x6, MAP),
547 	EVENT_EXTRA_END
548 };
549 
550 /* Nehalem-EX or Westmere-EX ? */
551 static bool uncore_nhmex;
552 
553 static bool nhmex_mbox_get_shared_reg(struct intel_uncore_box *box, int idx, u64 config)
554 {
555 	struct intel_uncore_extra_reg *er;
556 	unsigned long flags;
557 	bool ret = false;
558 	u64 mask;
559 
560 	if (idx < EXTRA_REG_NHMEX_M_ZDP_CTL_FVC) {
561 		er = &box->shared_regs[idx];
562 		raw_spin_lock_irqsave(&er->lock, flags);
563 		if (!atomic_read(&er->ref) || er->config == config) {
564 			atomic_inc(&er->ref);
565 			er->config = config;
566 			ret = true;
567 		}
568 		raw_spin_unlock_irqrestore(&er->lock, flags);
569 
570 		return ret;
571 	}
572 	/*
573 	 * The ZDP_CTL_FVC MSR has 4 fields which are used to control
574 	 * events 0xd ~ 0x10. Besides these 4 fields, there are additional
575 	 * fields which are shared.
576 	 */
577 	idx -= EXTRA_REG_NHMEX_M_ZDP_CTL_FVC;
578 	if (WARN_ON_ONCE(idx >= 4))
579 		return false;
580 
581 	/* mask of the shared fields */
582 	if (uncore_nhmex)
583 		mask = NHMEX_M_PMON_ZDP_CTL_FVC_MASK;
584 	else
585 		mask = WSMEX_M_PMON_ZDP_CTL_FVC_MASK;
586 	er = &box->shared_regs[EXTRA_REG_NHMEX_M_ZDP_CTL_FVC];
587 
588 	raw_spin_lock_irqsave(&er->lock, flags);
589 	/* add mask of the non-shared field if it's in use */
590 	if (__BITS_VALUE(atomic_read(&er->ref), idx, 8)) {
591 		if (uncore_nhmex)
592 			mask |= NHMEX_M_PMON_ZDP_CTL_FVC_EVENT_MASK(idx);
593 		else
594 			mask |= WSMEX_M_PMON_ZDP_CTL_FVC_EVENT_MASK(idx);
595 	}
596 
597 	if (!atomic_read(&er->ref) || !((er->config ^ config) & mask)) {
598 		atomic_add(1 << (idx * 8), &er->ref);
599 		if (uncore_nhmex)
600 			mask = NHMEX_M_PMON_ZDP_CTL_FVC_MASK |
601 				NHMEX_M_PMON_ZDP_CTL_FVC_EVENT_MASK(idx);
602 		else
603 			mask = WSMEX_M_PMON_ZDP_CTL_FVC_MASK |
604 				WSMEX_M_PMON_ZDP_CTL_FVC_EVENT_MASK(idx);
605 		er->config &= ~mask;
606 		er->config |= (config & mask);
607 		ret = true;
608 	}
609 	raw_spin_unlock_irqrestore(&er->lock, flags);
610 
611 	return ret;
612 }
613 
614 static void nhmex_mbox_put_shared_reg(struct intel_uncore_box *box, int idx)
615 {
616 	struct intel_uncore_extra_reg *er;
617 
618 	if (idx < EXTRA_REG_NHMEX_M_ZDP_CTL_FVC) {
619 		er = &box->shared_regs[idx];
620 		atomic_dec(&er->ref);
621 		return;
622 	}
623 
624 	idx -= EXTRA_REG_NHMEX_M_ZDP_CTL_FVC;
625 	er = &box->shared_regs[EXTRA_REG_NHMEX_M_ZDP_CTL_FVC];
626 	atomic_sub(1 << (idx * 8), &er->ref);
627 }
628 
629 static u64 nhmex_mbox_alter_er(struct perf_event *event, int new_idx, bool modify)
630 {
631 	struct hw_perf_event *hwc = &event->hw;
632 	struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
633 	u64 idx, orig_idx = __BITS_VALUE(reg1->idx, 0, 8);
634 	u64 config = reg1->config;
635 
636 	/* get the non-shared control bits and shift them */
637 	idx = orig_idx - EXTRA_REG_NHMEX_M_ZDP_CTL_FVC;
638 	if (uncore_nhmex)
639 		config &= NHMEX_M_PMON_ZDP_CTL_FVC_EVENT_MASK(idx);
640 	else
641 		config &= WSMEX_M_PMON_ZDP_CTL_FVC_EVENT_MASK(idx);
642 	if (new_idx > orig_idx) {
643 		idx = new_idx - orig_idx;
644 		config <<= 3 * idx;
645 	} else {
646 		idx = orig_idx - new_idx;
647 		config >>= 3 * idx;
648 	}
649 
650 	/* add the shared control bits back */
651 	if (uncore_nhmex)
652 		config |= NHMEX_M_PMON_ZDP_CTL_FVC_MASK & reg1->config;
653 	else
654 		config |= WSMEX_M_PMON_ZDP_CTL_FVC_MASK & reg1->config;
655 	config |= NHMEX_M_PMON_ZDP_CTL_FVC_MASK & reg1->config;
656 	if (modify) {
657 		/* adjust the main event selector */
658 		if (new_idx > orig_idx)
659 			hwc->config += idx << NHMEX_M_PMON_CTL_INC_SEL_SHIFT;
660 		else
661 			hwc->config -= idx << NHMEX_M_PMON_CTL_INC_SEL_SHIFT;
662 		reg1->config = config;
663 		reg1->idx = ~0xff | new_idx;
664 	}
665 	return config;
666 }
667 
668 static struct event_constraint *
669 nhmex_mbox_get_constraint(struct intel_uncore_box *box, struct perf_event *event)
670 {
671 	struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
672 	struct hw_perf_event_extra *reg2 = &event->hw.branch_reg;
673 	int i, idx[2], alloc = 0;
674 	u64 config1 = reg1->config;
675 
676 	idx[0] = __BITS_VALUE(reg1->idx, 0, 8);
677 	idx[1] = __BITS_VALUE(reg1->idx, 1, 8);
678 again:
679 	for (i = 0; i < 2; i++) {
680 		if (!uncore_box_is_fake(box) && (reg1->alloc & (0x1 << i)))
681 			idx[i] = 0xff;
682 
683 		if (idx[i] == 0xff)
684 			continue;
685 
686 		if (!nhmex_mbox_get_shared_reg(box, idx[i],
687 				__BITS_VALUE(config1, i, 32)))
688 			goto fail;
689 		alloc |= (0x1 << i);
690 	}
691 
692 	/* for the match/mask registers */
693 	if (reg2->idx != EXTRA_REG_NONE &&
694 	    (uncore_box_is_fake(box) || !reg2->alloc) &&
695 	    !nhmex_mbox_get_shared_reg(box, reg2->idx, reg2->config))
696 		goto fail;
697 
698 	/*
699 	 * If it's a fake box -- as per validate_{group,event}() we
700 	 * shouldn't touch event state and we can avoid doing so
701 	 * since both will only call get_event_constraints() once
702 	 * on each event, this avoids the need for reg->alloc.
703 	 */
704 	if (!uncore_box_is_fake(box)) {
705 		if (idx[0] != 0xff && idx[0] != __BITS_VALUE(reg1->idx, 0, 8))
706 			nhmex_mbox_alter_er(event, idx[0], true);
707 		reg1->alloc |= alloc;
708 		if (reg2->idx != EXTRA_REG_NONE)
709 			reg2->alloc = 1;
710 	}
711 	return NULL;
712 fail:
713 	if (idx[0] != 0xff && !(alloc & 0x1) &&
714 	    idx[0] >= EXTRA_REG_NHMEX_M_ZDP_CTL_FVC) {
715 		/*
716 		 * events 0xd ~ 0x10 are functional identical, but are
717 		 * controlled by different fields in the ZDP_CTL_FVC
718 		 * register. If we failed to take one field, try the
719 		 * rest 3 choices.
720 		 */
721 		BUG_ON(__BITS_VALUE(reg1->idx, 1, 8) != 0xff);
722 		idx[0] -= EXTRA_REG_NHMEX_M_ZDP_CTL_FVC;
723 		idx[0] = (idx[0] + 1) % 4;
724 		idx[0] += EXTRA_REG_NHMEX_M_ZDP_CTL_FVC;
725 		if (idx[0] != __BITS_VALUE(reg1->idx, 0, 8)) {
726 			config1 = nhmex_mbox_alter_er(event, idx[0], false);
727 			goto again;
728 		}
729 	}
730 
731 	if (alloc & 0x1)
732 		nhmex_mbox_put_shared_reg(box, idx[0]);
733 	if (alloc & 0x2)
734 		nhmex_mbox_put_shared_reg(box, idx[1]);
735 	return &uncore_constraint_empty;
736 }
737 
738 static void nhmex_mbox_put_constraint(struct intel_uncore_box *box, struct perf_event *event)
739 {
740 	struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
741 	struct hw_perf_event_extra *reg2 = &event->hw.branch_reg;
742 
743 	if (uncore_box_is_fake(box))
744 		return;
745 
746 	if (reg1->alloc & 0x1)
747 		nhmex_mbox_put_shared_reg(box, __BITS_VALUE(reg1->idx, 0, 8));
748 	if (reg1->alloc & 0x2)
749 		nhmex_mbox_put_shared_reg(box, __BITS_VALUE(reg1->idx, 1, 8));
750 	reg1->alloc = 0;
751 
752 	if (reg2->alloc) {
753 		nhmex_mbox_put_shared_reg(box, reg2->idx);
754 		reg2->alloc = 0;
755 	}
756 }
757 
758 static int nhmex_mbox_extra_reg_idx(struct extra_reg *er)
759 {
760 	if (er->idx < EXTRA_REG_NHMEX_M_ZDP_CTL_FVC)
761 		return er->idx;
762 	return er->idx + (er->event >> NHMEX_M_PMON_CTL_INC_SEL_SHIFT) - 0xd;
763 }
764 
765 static int nhmex_mbox_hw_config(struct intel_uncore_box *box, struct perf_event *event)
766 {
767 	struct intel_uncore_type *type = box->pmu->type;
768 	struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
769 	struct hw_perf_event_extra *reg2 = &event->hw.branch_reg;
770 	struct extra_reg *er;
771 	unsigned msr;
772 	int reg_idx = 0;
773 	/*
774 	 * The mbox events may require 2 extra MSRs at the most. But only
775 	 * the lower 32 bits in these MSRs are significant, so we can use
776 	 * config1 to pass two MSRs' config.
777 	 */
778 	for (er = nhmex_uncore_mbox_extra_regs; er->msr; er++) {
779 		if (er->event != (event->hw.config & er->config_mask))
780 			continue;
781 		if (event->attr.config1 & ~er->valid_mask)
782 			return -EINVAL;
783 
784 		msr = er->msr + type->msr_offset * box->pmu->pmu_idx;
785 		if (WARN_ON_ONCE(msr >= 0xffff || er->idx >= 0xff))
786 			return -EINVAL;
787 
788 		/* always use the 32~63 bits to pass the PLD config */
789 		if (er->idx == EXTRA_REG_NHMEX_M_PLD)
790 			reg_idx = 1;
791 		else if (WARN_ON_ONCE(reg_idx > 0))
792 			return -EINVAL;
793 
794 		reg1->idx &= ~(0xff << (reg_idx * 8));
795 		reg1->reg &= ~(0xffff << (reg_idx * 16));
796 		reg1->idx |= nhmex_mbox_extra_reg_idx(er) << (reg_idx * 8);
797 		reg1->reg |= msr << (reg_idx * 16);
798 		reg1->config = event->attr.config1;
799 		reg_idx++;
800 	}
801 	/*
802 	 * The mbox only provides ability to perform address matching
803 	 * for the PLD events.
804 	 */
805 	if (reg_idx == 2) {
806 		reg2->idx = EXTRA_REG_NHMEX_M_FILTER;
807 		if (event->attr.config2 & NHMEX_M_PMON_MM_CFG_EN)
808 			reg2->config = event->attr.config2;
809 		else
810 			reg2->config = ~0ULL;
811 		if (box->pmu->pmu_idx == 0)
812 			reg2->reg = NHMEX_M0_MSR_PMU_MM_CFG;
813 		else
814 			reg2->reg = NHMEX_M1_MSR_PMU_MM_CFG;
815 	}
816 	return 0;
817 }
818 
819 static u64 nhmex_mbox_shared_reg_config(struct intel_uncore_box *box, int idx)
820 {
821 	struct intel_uncore_extra_reg *er;
822 	unsigned long flags;
823 	u64 config;
824 
825 	if (idx < EXTRA_REG_NHMEX_M_ZDP_CTL_FVC)
826 		return box->shared_regs[idx].config;
827 
828 	er = &box->shared_regs[EXTRA_REG_NHMEX_M_ZDP_CTL_FVC];
829 	raw_spin_lock_irqsave(&er->lock, flags);
830 	config = er->config;
831 	raw_spin_unlock_irqrestore(&er->lock, flags);
832 	return config;
833 }
834 
835 static void nhmex_mbox_msr_enable_event(struct intel_uncore_box *box, struct perf_event *event)
836 {
837 	struct hw_perf_event *hwc = &event->hw;
838 	struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
839 	struct hw_perf_event_extra *reg2 = &hwc->branch_reg;
840 	int idx;
841 
842 	idx = __BITS_VALUE(reg1->idx, 0, 8);
843 	if (idx != 0xff)
844 		wrmsrl(__BITS_VALUE(reg1->reg, 0, 16),
845 			nhmex_mbox_shared_reg_config(box, idx));
846 	idx = __BITS_VALUE(reg1->idx, 1, 8);
847 	if (idx != 0xff)
848 		wrmsrl(__BITS_VALUE(reg1->reg, 1, 16),
849 			nhmex_mbox_shared_reg_config(box, idx));
850 
851 	if (reg2->idx != EXTRA_REG_NONE) {
852 		wrmsrl(reg2->reg, 0);
853 		if (reg2->config != ~0ULL) {
854 			wrmsrl(reg2->reg + 1,
855 				reg2->config & NHMEX_M_PMON_ADDR_MATCH_MASK);
856 			wrmsrl(reg2->reg + 2, NHMEX_M_PMON_ADDR_MASK_MASK &
857 				(reg2->config >> NHMEX_M_PMON_ADDR_MASK_SHIFT));
858 			wrmsrl(reg2->reg, NHMEX_M_PMON_MM_CFG_EN);
859 		}
860 	}
861 
862 	wrmsrl(hwc->config_base, hwc->config | NHMEX_PMON_CTL_EN_BIT0);
863 }
864 
865 DEFINE_UNCORE_FORMAT_ATTR(count_mode,		count_mode,	"config:2-3");
866 DEFINE_UNCORE_FORMAT_ATTR(storage_mode,		storage_mode,	"config:4-5");
867 DEFINE_UNCORE_FORMAT_ATTR(wrap_mode,		wrap_mode,	"config:6");
868 DEFINE_UNCORE_FORMAT_ATTR(flag_mode,		flag_mode,	"config:7");
869 DEFINE_UNCORE_FORMAT_ATTR(inc_sel,		inc_sel,	"config:9-13");
870 DEFINE_UNCORE_FORMAT_ATTR(set_flag_sel,		set_flag_sel,	"config:19-21");
871 DEFINE_UNCORE_FORMAT_ATTR(filter_cfg_en,	filter_cfg_en,	"config2:63");
872 DEFINE_UNCORE_FORMAT_ATTR(filter_match,		filter_match,	"config2:0-33");
873 DEFINE_UNCORE_FORMAT_ATTR(filter_mask,		filter_mask,	"config2:34-61");
874 DEFINE_UNCORE_FORMAT_ATTR(dsp,			dsp,		"config1:0-31");
875 DEFINE_UNCORE_FORMAT_ATTR(thr,			thr,		"config1:0-31");
876 DEFINE_UNCORE_FORMAT_ATTR(fvc,			fvc,		"config1:0-31");
877 DEFINE_UNCORE_FORMAT_ATTR(pgt,			pgt,		"config1:0-31");
878 DEFINE_UNCORE_FORMAT_ATTR(map,			map,		"config1:0-31");
879 DEFINE_UNCORE_FORMAT_ATTR(iss,			iss,		"config1:0-31");
880 DEFINE_UNCORE_FORMAT_ATTR(pld,			pld,		"config1:32-63");
881 
882 static struct attribute *nhmex_uncore_mbox_formats_attr[] = {
883 	&format_attr_count_mode.attr,
884 	&format_attr_storage_mode.attr,
885 	&format_attr_wrap_mode.attr,
886 	&format_attr_flag_mode.attr,
887 	&format_attr_inc_sel.attr,
888 	&format_attr_set_flag_sel.attr,
889 	&format_attr_filter_cfg_en.attr,
890 	&format_attr_filter_match.attr,
891 	&format_attr_filter_mask.attr,
892 	&format_attr_dsp.attr,
893 	&format_attr_thr.attr,
894 	&format_attr_fvc.attr,
895 	&format_attr_pgt.attr,
896 	&format_attr_map.attr,
897 	&format_attr_iss.attr,
898 	&format_attr_pld.attr,
899 	NULL,
900 };
901 
902 static const struct attribute_group nhmex_uncore_mbox_format_group = {
903 	.name		= "format",
904 	.attrs		= nhmex_uncore_mbox_formats_attr,
905 };
906 
907 static struct uncore_event_desc nhmex_uncore_mbox_events[] = {
908 	INTEL_UNCORE_EVENT_DESC(bbox_cmds_read, "inc_sel=0xd,fvc=0x2800"),
909 	INTEL_UNCORE_EVENT_DESC(bbox_cmds_write, "inc_sel=0xd,fvc=0x2820"),
910 	{ /* end: all zeroes */ },
911 };
912 
913 static struct uncore_event_desc wsmex_uncore_mbox_events[] = {
914 	INTEL_UNCORE_EVENT_DESC(bbox_cmds_read, "inc_sel=0xd,fvc=0x5000"),
915 	INTEL_UNCORE_EVENT_DESC(bbox_cmds_write, "inc_sel=0xd,fvc=0x5040"),
916 	{ /* end: all zeroes */ },
917 };
918 
919 static struct intel_uncore_ops nhmex_uncore_mbox_ops = {
920 	NHMEX_UNCORE_OPS_COMMON_INIT(),
921 	.enable_event	= nhmex_mbox_msr_enable_event,
922 	.hw_config	= nhmex_mbox_hw_config,
923 	.get_constraint	= nhmex_mbox_get_constraint,
924 	.put_constraint	= nhmex_mbox_put_constraint,
925 };
926 
927 static struct intel_uncore_type nhmex_uncore_mbox = {
928 	.name			= "mbox",
929 	.num_counters		= 6,
930 	.num_boxes		= 2,
931 	.perf_ctr_bits		= 48,
932 	.event_ctl		= NHMEX_M0_MSR_PMU_CTL0,
933 	.perf_ctr		= NHMEX_M0_MSR_PMU_CNT0,
934 	.event_mask		= NHMEX_M_PMON_RAW_EVENT_MASK,
935 	.box_ctl		= NHMEX_M0_MSR_GLOBAL_CTL,
936 	.msr_offset		= NHMEX_M_MSR_OFFSET,
937 	.pair_ctr_ctl		= 1,
938 	.num_shared_regs	= 8,
939 	.event_descs		= nhmex_uncore_mbox_events,
940 	.ops			= &nhmex_uncore_mbox_ops,
941 	.format_group		= &nhmex_uncore_mbox_format_group,
942 };
943 
944 static void nhmex_rbox_alter_er(struct intel_uncore_box *box, struct perf_event *event)
945 {
946 	struct hw_perf_event *hwc = &event->hw;
947 	struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
948 
949 	/* adjust the main event selector and extra register index */
950 	if (reg1->idx % 2) {
951 		reg1->idx--;
952 		hwc->config -= 1 << NHMEX_R_PMON_CTL_EV_SEL_SHIFT;
953 	} else {
954 		reg1->idx++;
955 		hwc->config += 1 << NHMEX_R_PMON_CTL_EV_SEL_SHIFT;
956 	}
957 
958 	/* adjust extra register config */
959 	switch (reg1->idx % 6) {
960 	case 2:
961 		/* shift the 8~15 bits to the 0~7 bits */
962 		reg1->config >>= 8;
963 		break;
964 	case 3:
965 		/* shift the 0~7 bits to the 8~15 bits */
966 		reg1->config <<= 8;
967 		break;
968 	}
969 }
970 
971 /*
972  * Each rbox has 4 event set which monitor PQI port 0~3 or 4~7.
973  * An event set consists of 6 events, the 3rd and 4th events in
974  * an event set use the same extra register. So an event set uses
975  * 5 extra registers.
976  */
977 static struct event_constraint *
978 nhmex_rbox_get_constraint(struct intel_uncore_box *box, struct perf_event *event)
979 {
980 	struct hw_perf_event *hwc = &event->hw;
981 	struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
982 	struct hw_perf_event_extra *reg2 = &hwc->branch_reg;
983 	struct intel_uncore_extra_reg *er;
984 	unsigned long flags;
985 	int idx, er_idx;
986 	u64 config1;
987 	bool ok = false;
988 
989 	if (!uncore_box_is_fake(box) && reg1->alloc)
990 		return NULL;
991 
992 	idx = reg1->idx % 6;
993 	config1 = reg1->config;
994 again:
995 	er_idx = idx;
996 	/* the 3rd and 4th events use the same extra register */
997 	if (er_idx > 2)
998 		er_idx--;
999 	er_idx += (reg1->idx / 6) * 5;
1000 
1001 	er = &box->shared_regs[er_idx];
1002 	raw_spin_lock_irqsave(&er->lock, flags);
1003 	if (idx < 2) {
1004 		if (!atomic_read(&er->ref) || er->config == reg1->config) {
1005 			atomic_inc(&er->ref);
1006 			er->config = reg1->config;
1007 			ok = true;
1008 		}
1009 	} else if (idx == 2 || idx == 3) {
1010 		/*
1011 		 * these two events use different fields in a extra register,
1012 		 * the 0~7 bits and the 8~15 bits respectively.
1013 		 */
1014 		u64 mask = 0xff << ((idx - 2) * 8);
1015 		if (!__BITS_VALUE(atomic_read(&er->ref), idx - 2, 8) ||
1016 				!((er->config ^ config1) & mask)) {
1017 			atomic_add(1 << ((idx - 2) * 8), &er->ref);
1018 			er->config &= ~mask;
1019 			er->config |= config1 & mask;
1020 			ok = true;
1021 		}
1022 	} else {
1023 		if (!atomic_read(&er->ref) ||
1024 				(er->config == (hwc->config >> 32) &&
1025 				 er->config1 == reg1->config &&
1026 				 er->config2 == reg2->config)) {
1027 			atomic_inc(&er->ref);
1028 			er->config = (hwc->config >> 32);
1029 			er->config1 = reg1->config;
1030 			er->config2 = reg2->config;
1031 			ok = true;
1032 		}
1033 	}
1034 	raw_spin_unlock_irqrestore(&er->lock, flags);
1035 
1036 	if (!ok) {
1037 		/*
1038 		 * The Rbox events are always in pairs. The paired
1039 		 * events are functional identical, but use different
1040 		 * extra registers. If we failed to take an extra
1041 		 * register, try the alternative.
1042 		 */
1043 		idx ^= 1;
1044 		if (idx != reg1->idx % 6) {
1045 			if (idx == 2)
1046 				config1 >>= 8;
1047 			else if (idx == 3)
1048 				config1 <<= 8;
1049 			goto again;
1050 		}
1051 	} else {
1052 		if (!uncore_box_is_fake(box)) {
1053 			if (idx != reg1->idx % 6)
1054 				nhmex_rbox_alter_er(box, event);
1055 			reg1->alloc = 1;
1056 		}
1057 		return NULL;
1058 	}
1059 	return &uncore_constraint_empty;
1060 }
1061 
1062 static void nhmex_rbox_put_constraint(struct intel_uncore_box *box, struct perf_event *event)
1063 {
1064 	struct intel_uncore_extra_reg *er;
1065 	struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
1066 	int idx, er_idx;
1067 
1068 	if (uncore_box_is_fake(box) || !reg1->alloc)
1069 		return;
1070 
1071 	idx = reg1->idx % 6;
1072 	er_idx = idx;
1073 	if (er_idx > 2)
1074 		er_idx--;
1075 	er_idx += (reg1->idx / 6) * 5;
1076 
1077 	er = &box->shared_regs[er_idx];
1078 	if (idx == 2 || idx == 3)
1079 		atomic_sub(1 << ((idx - 2) * 8), &er->ref);
1080 	else
1081 		atomic_dec(&er->ref);
1082 
1083 	reg1->alloc = 0;
1084 }
1085 
1086 static int nhmex_rbox_hw_config(struct intel_uncore_box *box, struct perf_event *event)
1087 {
1088 	struct hw_perf_event *hwc = &event->hw;
1089 	struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
1090 	struct hw_perf_event_extra *reg2 = &event->hw.branch_reg;
1091 	int idx;
1092 
1093 	idx = (event->hw.config & NHMEX_R_PMON_CTL_EV_SEL_MASK) >>
1094 		NHMEX_R_PMON_CTL_EV_SEL_SHIFT;
1095 	if (idx >= 0x18)
1096 		return -EINVAL;
1097 
1098 	reg1->idx = idx;
1099 	reg1->config = event->attr.config1;
1100 
1101 	switch (idx % 6) {
1102 	case 4:
1103 	case 5:
1104 		hwc->config |= event->attr.config & (~0ULL << 32);
1105 		reg2->config = event->attr.config2;
1106 		break;
1107 	}
1108 	return 0;
1109 }
1110 
1111 static void nhmex_rbox_msr_enable_event(struct intel_uncore_box *box, struct perf_event *event)
1112 {
1113 	struct hw_perf_event *hwc = &event->hw;
1114 	struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
1115 	struct hw_perf_event_extra *reg2 = &hwc->branch_reg;
1116 	int idx, port;
1117 
1118 	idx = reg1->idx;
1119 	port = idx / 6 + box->pmu->pmu_idx * 4;
1120 
1121 	switch (idx % 6) {
1122 	case 0:
1123 		wrmsrl(NHMEX_R_MSR_PORTN_IPERF_CFG0(port), reg1->config);
1124 		break;
1125 	case 1:
1126 		wrmsrl(NHMEX_R_MSR_PORTN_IPERF_CFG1(port), reg1->config);
1127 		break;
1128 	case 2:
1129 	case 3:
1130 		wrmsrl(NHMEX_R_MSR_PORTN_QLX_CFG(port),
1131 			uncore_shared_reg_config(box, 2 + (idx / 6) * 5));
1132 		break;
1133 	case 4:
1134 		wrmsrl(NHMEX_R_MSR_PORTN_XBR_SET1_MM_CFG(port),
1135 			hwc->config >> 32);
1136 		wrmsrl(NHMEX_R_MSR_PORTN_XBR_SET1_MATCH(port), reg1->config);
1137 		wrmsrl(NHMEX_R_MSR_PORTN_XBR_SET1_MASK(port), reg2->config);
1138 		break;
1139 	case 5:
1140 		wrmsrl(NHMEX_R_MSR_PORTN_XBR_SET2_MM_CFG(port),
1141 			hwc->config >> 32);
1142 		wrmsrl(NHMEX_R_MSR_PORTN_XBR_SET2_MATCH(port), reg1->config);
1143 		wrmsrl(NHMEX_R_MSR_PORTN_XBR_SET2_MASK(port), reg2->config);
1144 		break;
1145 	}
1146 
1147 	wrmsrl(hwc->config_base, NHMEX_PMON_CTL_EN_BIT0 |
1148 		(hwc->config & NHMEX_R_PMON_CTL_EV_SEL_MASK));
1149 }
1150 
1151 DEFINE_UNCORE_FORMAT_ATTR(xbr_mm_cfg, xbr_mm_cfg, "config:32-63");
1152 DEFINE_UNCORE_FORMAT_ATTR(xbr_match, xbr_match, "config1:0-63");
1153 DEFINE_UNCORE_FORMAT_ATTR(xbr_mask, xbr_mask, "config2:0-63");
1154 DEFINE_UNCORE_FORMAT_ATTR(qlx_cfg, qlx_cfg, "config1:0-15");
1155 DEFINE_UNCORE_FORMAT_ATTR(iperf_cfg, iperf_cfg, "config1:0-31");
1156 
1157 static struct attribute *nhmex_uncore_rbox_formats_attr[] = {
1158 	&format_attr_event5.attr,
1159 	&format_attr_xbr_mm_cfg.attr,
1160 	&format_attr_xbr_match.attr,
1161 	&format_attr_xbr_mask.attr,
1162 	&format_attr_qlx_cfg.attr,
1163 	&format_attr_iperf_cfg.attr,
1164 	NULL,
1165 };
1166 
1167 static const struct attribute_group nhmex_uncore_rbox_format_group = {
1168 	.name = "format",
1169 	.attrs = nhmex_uncore_rbox_formats_attr,
1170 };
1171 
1172 static struct uncore_event_desc nhmex_uncore_rbox_events[] = {
1173 	INTEL_UNCORE_EVENT_DESC(qpi0_flit_send,		"event=0x0,iperf_cfg=0x80000000"),
1174 	INTEL_UNCORE_EVENT_DESC(qpi1_filt_send,		"event=0x6,iperf_cfg=0x80000000"),
1175 	INTEL_UNCORE_EVENT_DESC(qpi0_idle_filt,		"event=0x0,iperf_cfg=0x40000000"),
1176 	INTEL_UNCORE_EVENT_DESC(qpi1_idle_filt,		"event=0x6,iperf_cfg=0x40000000"),
1177 	INTEL_UNCORE_EVENT_DESC(qpi0_date_response,	"event=0x0,iperf_cfg=0xc4"),
1178 	INTEL_UNCORE_EVENT_DESC(qpi1_date_response,	"event=0x6,iperf_cfg=0xc4"),
1179 	{ /* end: all zeroes */ },
1180 };
1181 
1182 static struct intel_uncore_ops nhmex_uncore_rbox_ops = {
1183 	NHMEX_UNCORE_OPS_COMMON_INIT(),
1184 	.enable_event		= nhmex_rbox_msr_enable_event,
1185 	.hw_config		= nhmex_rbox_hw_config,
1186 	.get_constraint		= nhmex_rbox_get_constraint,
1187 	.put_constraint		= nhmex_rbox_put_constraint,
1188 };
1189 
1190 static struct intel_uncore_type nhmex_uncore_rbox = {
1191 	.name			= "rbox",
1192 	.num_counters		= 8,
1193 	.num_boxes		= 2,
1194 	.perf_ctr_bits		= 48,
1195 	.event_ctl		= NHMEX_R_MSR_PMON_CTL0,
1196 	.perf_ctr		= NHMEX_R_MSR_PMON_CNT0,
1197 	.event_mask		= NHMEX_R_PMON_RAW_EVENT_MASK,
1198 	.box_ctl		= NHMEX_R_MSR_GLOBAL_CTL,
1199 	.msr_offset		= NHMEX_R_MSR_OFFSET,
1200 	.pair_ctr_ctl		= 1,
1201 	.num_shared_regs	= 20,
1202 	.event_descs		= nhmex_uncore_rbox_events,
1203 	.ops			= &nhmex_uncore_rbox_ops,
1204 	.format_group		= &nhmex_uncore_rbox_format_group
1205 };
1206 
1207 static struct intel_uncore_type *nhmex_msr_uncores[] = {
1208 	&nhmex_uncore_ubox,
1209 	&nhmex_uncore_cbox,
1210 	&nhmex_uncore_bbox,
1211 	&nhmex_uncore_sbox,
1212 	&nhmex_uncore_mbox,
1213 	&nhmex_uncore_rbox,
1214 	&nhmex_uncore_wbox,
1215 	NULL,
1216 };
1217 
1218 void nhmex_uncore_cpu_init(void)
1219 {
1220 	if (boot_cpu_data.x86_model == 46)
1221 		uncore_nhmex = true;
1222 	else
1223 		nhmex_uncore_mbox.event_descs = wsmex_uncore_mbox_events;
1224 	if (nhmex_uncore_cbox.num_boxes > boot_cpu_data.x86_max_cores)
1225 		nhmex_uncore_cbox.num_boxes = boot_cpu_data.x86_max_cores;
1226 	uncore_msr_uncores = nhmex_msr_uncores;
1227 }
1228 /* end of Nehalem-EX uncore support */
1229