xref: /openbmc/linux/arch/x86/events/intel/uncore_snb.c (revision e3d786a3)
1 // SPDX-License-Identifier: GPL-2.0
2 /* Nehalem/SandBridge/Haswell/Broadwell/Skylake uncore support */
3 #include "uncore.h"
4 
5 /* Uncore IMC PCI IDs */
6 #define PCI_DEVICE_ID_INTEL_SNB_IMC	0x0100
7 #define PCI_DEVICE_ID_INTEL_IVB_IMC	0x0154
8 #define PCI_DEVICE_ID_INTEL_IVB_E3_IMC	0x0150
9 #define PCI_DEVICE_ID_INTEL_HSW_IMC	0x0c00
10 #define PCI_DEVICE_ID_INTEL_HSW_U_IMC	0x0a04
11 #define PCI_DEVICE_ID_INTEL_BDW_IMC	0x1604
12 #define PCI_DEVICE_ID_INTEL_SKL_U_IMC	0x1904
13 #define PCI_DEVICE_ID_INTEL_SKL_Y_IMC	0x190c
14 #define PCI_DEVICE_ID_INTEL_SKL_HD_IMC	0x1900
15 #define PCI_DEVICE_ID_INTEL_SKL_HQ_IMC	0x1910
16 #define PCI_DEVICE_ID_INTEL_SKL_SD_IMC	0x190f
17 #define PCI_DEVICE_ID_INTEL_SKL_SQ_IMC	0x191f
18 
19 /* SNB event control */
20 #define SNB_UNC_CTL_EV_SEL_MASK			0x000000ff
21 #define SNB_UNC_CTL_UMASK_MASK			0x0000ff00
22 #define SNB_UNC_CTL_EDGE_DET			(1 << 18)
23 #define SNB_UNC_CTL_EN				(1 << 22)
24 #define SNB_UNC_CTL_INVERT			(1 << 23)
25 #define SNB_UNC_CTL_CMASK_MASK			0x1f000000
26 #define NHM_UNC_CTL_CMASK_MASK			0xff000000
27 #define NHM_UNC_FIXED_CTR_CTL_EN		(1 << 0)
28 
29 #define SNB_UNC_RAW_EVENT_MASK			(SNB_UNC_CTL_EV_SEL_MASK | \
30 						 SNB_UNC_CTL_UMASK_MASK | \
31 						 SNB_UNC_CTL_EDGE_DET | \
32 						 SNB_UNC_CTL_INVERT | \
33 						 SNB_UNC_CTL_CMASK_MASK)
34 
35 #define NHM_UNC_RAW_EVENT_MASK			(SNB_UNC_CTL_EV_SEL_MASK | \
36 						 SNB_UNC_CTL_UMASK_MASK | \
37 						 SNB_UNC_CTL_EDGE_DET | \
38 						 SNB_UNC_CTL_INVERT | \
39 						 NHM_UNC_CTL_CMASK_MASK)
40 
41 /* SNB global control register */
42 #define SNB_UNC_PERF_GLOBAL_CTL                 0x391
43 #define SNB_UNC_FIXED_CTR_CTRL                  0x394
44 #define SNB_UNC_FIXED_CTR                       0x395
45 
46 /* SNB uncore global control */
47 #define SNB_UNC_GLOBAL_CTL_CORE_ALL             ((1 << 4) - 1)
48 #define SNB_UNC_GLOBAL_CTL_EN                   (1 << 29)
49 
50 /* SNB Cbo register */
51 #define SNB_UNC_CBO_0_PERFEVTSEL0               0x700
52 #define SNB_UNC_CBO_0_PER_CTR0                  0x706
53 #define SNB_UNC_CBO_MSR_OFFSET                  0x10
54 
55 /* SNB ARB register */
56 #define SNB_UNC_ARB_PER_CTR0			0x3b0
57 #define SNB_UNC_ARB_PERFEVTSEL0			0x3b2
58 #define SNB_UNC_ARB_MSR_OFFSET			0x10
59 
60 /* NHM global control register */
61 #define NHM_UNC_PERF_GLOBAL_CTL                 0x391
62 #define NHM_UNC_FIXED_CTR                       0x394
63 #define NHM_UNC_FIXED_CTR_CTRL                  0x395
64 
65 /* NHM uncore global control */
66 #define NHM_UNC_GLOBAL_CTL_EN_PC_ALL            ((1ULL << 8) - 1)
67 #define NHM_UNC_GLOBAL_CTL_EN_FC                (1ULL << 32)
68 
69 /* NHM uncore register */
70 #define NHM_UNC_PERFEVTSEL0                     0x3c0
71 #define NHM_UNC_UNCORE_PMC0                     0x3b0
72 
73 /* SKL uncore global control */
74 #define SKL_UNC_PERF_GLOBAL_CTL			0xe01
75 #define SKL_UNC_GLOBAL_CTL_CORE_ALL		((1 << 5) - 1)
76 
77 DEFINE_UNCORE_FORMAT_ATTR(event, event, "config:0-7");
78 DEFINE_UNCORE_FORMAT_ATTR(umask, umask, "config:8-15");
79 DEFINE_UNCORE_FORMAT_ATTR(edge, edge, "config:18");
80 DEFINE_UNCORE_FORMAT_ATTR(inv, inv, "config:23");
81 DEFINE_UNCORE_FORMAT_ATTR(cmask5, cmask, "config:24-28");
82 DEFINE_UNCORE_FORMAT_ATTR(cmask8, cmask, "config:24-31");
83 
84 /* Sandy Bridge uncore support */
85 static void snb_uncore_msr_enable_event(struct intel_uncore_box *box, struct perf_event *event)
86 {
87 	struct hw_perf_event *hwc = &event->hw;
88 
89 	if (hwc->idx < UNCORE_PMC_IDX_FIXED)
90 		wrmsrl(hwc->config_base, hwc->config | SNB_UNC_CTL_EN);
91 	else
92 		wrmsrl(hwc->config_base, SNB_UNC_CTL_EN);
93 }
94 
95 static void snb_uncore_msr_disable_event(struct intel_uncore_box *box, struct perf_event *event)
96 {
97 	wrmsrl(event->hw.config_base, 0);
98 }
99 
100 static void snb_uncore_msr_init_box(struct intel_uncore_box *box)
101 {
102 	if (box->pmu->pmu_idx == 0) {
103 		wrmsrl(SNB_UNC_PERF_GLOBAL_CTL,
104 			SNB_UNC_GLOBAL_CTL_EN | SNB_UNC_GLOBAL_CTL_CORE_ALL);
105 	}
106 }
107 
108 static void snb_uncore_msr_enable_box(struct intel_uncore_box *box)
109 {
110 	wrmsrl(SNB_UNC_PERF_GLOBAL_CTL,
111 		SNB_UNC_GLOBAL_CTL_EN | SNB_UNC_GLOBAL_CTL_CORE_ALL);
112 }
113 
114 static void snb_uncore_msr_exit_box(struct intel_uncore_box *box)
115 {
116 	if (box->pmu->pmu_idx == 0)
117 		wrmsrl(SNB_UNC_PERF_GLOBAL_CTL, 0);
118 }
119 
120 static struct uncore_event_desc snb_uncore_events[] = {
121 	INTEL_UNCORE_EVENT_DESC(clockticks, "event=0xff,umask=0x00"),
122 	{ /* end: all zeroes */ },
123 };
124 
125 static struct attribute *snb_uncore_formats_attr[] = {
126 	&format_attr_event.attr,
127 	&format_attr_umask.attr,
128 	&format_attr_edge.attr,
129 	&format_attr_inv.attr,
130 	&format_attr_cmask5.attr,
131 	NULL,
132 };
133 
134 static const struct attribute_group snb_uncore_format_group = {
135 	.name		= "format",
136 	.attrs		= snb_uncore_formats_attr,
137 };
138 
139 static struct intel_uncore_ops snb_uncore_msr_ops = {
140 	.init_box	= snb_uncore_msr_init_box,
141 	.enable_box	= snb_uncore_msr_enable_box,
142 	.exit_box	= snb_uncore_msr_exit_box,
143 	.disable_event	= snb_uncore_msr_disable_event,
144 	.enable_event	= snb_uncore_msr_enable_event,
145 	.read_counter	= uncore_msr_read_counter,
146 };
147 
148 static struct event_constraint snb_uncore_arb_constraints[] = {
149 	UNCORE_EVENT_CONSTRAINT(0x80, 0x1),
150 	UNCORE_EVENT_CONSTRAINT(0x83, 0x1),
151 	EVENT_CONSTRAINT_END
152 };
153 
154 static struct intel_uncore_type snb_uncore_cbox = {
155 	.name		= "cbox",
156 	.num_counters   = 2,
157 	.num_boxes	= 4,
158 	.perf_ctr_bits	= 44,
159 	.fixed_ctr_bits	= 48,
160 	.perf_ctr	= SNB_UNC_CBO_0_PER_CTR0,
161 	.event_ctl	= SNB_UNC_CBO_0_PERFEVTSEL0,
162 	.fixed_ctr	= SNB_UNC_FIXED_CTR,
163 	.fixed_ctl	= SNB_UNC_FIXED_CTR_CTRL,
164 	.single_fixed	= 1,
165 	.event_mask	= SNB_UNC_RAW_EVENT_MASK,
166 	.msr_offset	= SNB_UNC_CBO_MSR_OFFSET,
167 	.ops		= &snb_uncore_msr_ops,
168 	.format_group	= &snb_uncore_format_group,
169 	.event_descs	= snb_uncore_events,
170 };
171 
172 static struct intel_uncore_type snb_uncore_arb = {
173 	.name		= "arb",
174 	.num_counters   = 2,
175 	.num_boxes	= 1,
176 	.perf_ctr_bits	= 44,
177 	.perf_ctr	= SNB_UNC_ARB_PER_CTR0,
178 	.event_ctl	= SNB_UNC_ARB_PERFEVTSEL0,
179 	.event_mask	= SNB_UNC_RAW_EVENT_MASK,
180 	.msr_offset	= SNB_UNC_ARB_MSR_OFFSET,
181 	.constraints	= snb_uncore_arb_constraints,
182 	.ops		= &snb_uncore_msr_ops,
183 	.format_group	= &snb_uncore_format_group,
184 };
185 
186 static struct intel_uncore_type *snb_msr_uncores[] = {
187 	&snb_uncore_cbox,
188 	&snb_uncore_arb,
189 	NULL,
190 };
191 
192 void snb_uncore_cpu_init(void)
193 {
194 	uncore_msr_uncores = snb_msr_uncores;
195 	if (snb_uncore_cbox.num_boxes > boot_cpu_data.x86_max_cores)
196 		snb_uncore_cbox.num_boxes = boot_cpu_data.x86_max_cores;
197 }
198 
199 static void skl_uncore_msr_init_box(struct intel_uncore_box *box)
200 {
201 	if (box->pmu->pmu_idx == 0) {
202 		wrmsrl(SKL_UNC_PERF_GLOBAL_CTL,
203 			SNB_UNC_GLOBAL_CTL_EN | SKL_UNC_GLOBAL_CTL_CORE_ALL);
204 	}
205 }
206 
207 static void skl_uncore_msr_enable_box(struct intel_uncore_box *box)
208 {
209 	wrmsrl(SKL_UNC_PERF_GLOBAL_CTL,
210 		SNB_UNC_GLOBAL_CTL_EN | SKL_UNC_GLOBAL_CTL_CORE_ALL);
211 }
212 
213 static void skl_uncore_msr_exit_box(struct intel_uncore_box *box)
214 {
215 	if (box->pmu->pmu_idx == 0)
216 		wrmsrl(SKL_UNC_PERF_GLOBAL_CTL, 0);
217 }
218 
219 static struct intel_uncore_ops skl_uncore_msr_ops = {
220 	.init_box	= skl_uncore_msr_init_box,
221 	.enable_box	= skl_uncore_msr_enable_box,
222 	.exit_box	= skl_uncore_msr_exit_box,
223 	.disable_event	= snb_uncore_msr_disable_event,
224 	.enable_event	= snb_uncore_msr_enable_event,
225 	.read_counter	= uncore_msr_read_counter,
226 };
227 
228 static struct intel_uncore_type skl_uncore_cbox = {
229 	.name		= "cbox",
230 	.num_counters   = 4,
231 	.num_boxes	= 5,
232 	.perf_ctr_bits	= 44,
233 	.fixed_ctr_bits	= 48,
234 	.perf_ctr	= SNB_UNC_CBO_0_PER_CTR0,
235 	.event_ctl	= SNB_UNC_CBO_0_PERFEVTSEL0,
236 	.fixed_ctr	= SNB_UNC_FIXED_CTR,
237 	.fixed_ctl	= SNB_UNC_FIXED_CTR_CTRL,
238 	.single_fixed	= 1,
239 	.event_mask	= SNB_UNC_RAW_EVENT_MASK,
240 	.msr_offset	= SNB_UNC_CBO_MSR_OFFSET,
241 	.ops		= &skl_uncore_msr_ops,
242 	.format_group	= &snb_uncore_format_group,
243 	.event_descs	= snb_uncore_events,
244 };
245 
246 static struct intel_uncore_type *skl_msr_uncores[] = {
247 	&skl_uncore_cbox,
248 	&snb_uncore_arb,
249 	NULL,
250 };
251 
252 void skl_uncore_cpu_init(void)
253 {
254 	uncore_msr_uncores = skl_msr_uncores;
255 	if (skl_uncore_cbox.num_boxes > boot_cpu_data.x86_max_cores)
256 		skl_uncore_cbox.num_boxes = boot_cpu_data.x86_max_cores;
257 	snb_uncore_arb.ops = &skl_uncore_msr_ops;
258 }
259 
260 enum {
261 	SNB_PCI_UNCORE_IMC,
262 };
263 
264 static struct uncore_event_desc snb_uncore_imc_events[] = {
265 	INTEL_UNCORE_EVENT_DESC(data_reads,  "event=0x01"),
266 	INTEL_UNCORE_EVENT_DESC(data_reads.scale, "6.103515625e-5"),
267 	INTEL_UNCORE_EVENT_DESC(data_reads.unit, "MiB"),
268 
269 	INTEL_UNCORE_EVENT_DESC(data_writes, "event=0x02"),
270 	INTEL_UNCORE_EVENT_DESC(data_writes.scale, "6.103515625e-5"),
271 	INTEL_UNCORE_EVENT_DESC(data_writes.unit, "MiB"),
272 
273 	{ /* end: all zeroes */ },
274 };
275 
276 #define SNB_UNCORE_PCI_IMC_EVENT_MASK		0xff
277 #define SNB_UNCORE_PCI_IMC_BAR_OFFSET		0x48
278 
279 /* page size multiple covering all config regs */
280 #define SNB_UNCORE_PCI_IMC_MAP_SIZE		0x6000
281 
282 #define SNB_UNCORE_PCI_IMC_DATA_READS		0x1
283 #define SNB_UNCORE_PCI_IMC_DATA_READS_BASE	0x5050
284 #define SNB_UNCORE_PCI_IMC_DATA_WRITES		0x2
285 #define SNB_UNCORE_PCI_IMC_DATA_WRITES_BASE	0x5054
286 #define SNB_UNCORE_PCI_IMC_CTR_BASE		SNB_UNCORE_PCI_IMC_DATA_READS_BASE
287 
288 enum perf_snb_uncore_imc_freerunning_types {
289 	SNB_PCI_UNCORE_IMC_DATA		= 0,
290 	SNB_PCI_UNCORE_IMC_FREERUNNING_TYPE_MAX,
291 };
292 
293 static struct freerunning_counters snb_uncore_imc_freerunning[] = {
294 	[SNB_PCI_UNCORE_IMC_DATA]     = { SNB_UNCORE_PCI_IMC_DATA_READS_BASE, 0x4, 0x0, 2, 32 },
295 };
296 
297 static struct attribute *snb_uncore_imc_formats_attr[] = {
298 	&format_attr_event.attr,
299 	NULL,
300 };
301 
302 static const struct attribute_group snb_uncore_imc_format_group = {
303 	.name = "format",
304 	.attrs = snb_uncore_imc_formats_attr,
305 };
306 
307 static void snb_uncore_imc_init_box(struct intel_uncore_box *box)
308 {
309 	struct pci_dev *pdev = box->pci_dev;
310 	int where = SNB_UNCORE_PCI_IMC_BAR_OFFSET;
311 	resource_size_t addr;
312 	u32 pci_dword;
313 
314 	pci_read_config_dword(pdev, where, &pci_dword);
315 	addr = pci_dword;
316 
317 #ifdef CONFIG_PHYS_ADDR_T_64BIT
318 	pci_read_config_dword(pdev, where + 4, &pci_dword);
319 	addr |= ((resource_size_t)pci_dword << 32);
320 #endif
321 
322 	addr &= ~(PAGE_SIZE - 1);
323 
324 	box->io_addr = ioremap(addr, SNB_UNCORE_PCI_IMC_MAP_SIZE);
325 	box->hrtimer_duration = UNCORE_SNB_IMC_HRTIMER_INTERVAL;
326 }
327 
328 static void snb_uncore_imc_exit_box(struct intel_uncore_box *box)
329 {
330 	iounmap(box->io_addr);
331 }
332 
333 static void snb_uncore_imc_enable_box(struct intel_uncore_box *box)
334 {}
335 
336 static void snb_uncore_imc_disable_box(struct intel_uncore_box *box)
337 {}
338 
339 static void snb_uncore_imc_enable_event(struct intel_uncore_box *box, struct perf_event *event)
340 {}
341 
342 static void snb_uncore_imc_disable_event(struct intel_uncore_box *box, struct perf_event *event)
343 {}
344 
345 static u64 snb_uncore_imc_read_counter(struct intel_uncore_box *box, struct perf_event *event)
346 {
347 	struct hw_perf_event *hwc = &event->hw;
348 
349 	return (u64)*(unsigned int *)(box->io_addr + hwc->event_base);
350 }
351 
352 /*
353  * Keep the custom event_init() function compatible with old event
354  * encoding for free running counters.
355  */
356 static int snb_uncore_imc_event_init(struct perf_event *event)
357 {
358 	struct intel_uncore_pmu *pmu;
359 	struct intel_uncore_box *box;
360 	struct hw_perf_event *hwc = &event->hw;
361 	u64 cfg = event->attr.config & SNB_UNCORE_PCI_IMC_EVENT_MASK;
362 	int idx, base;
363 
364 	if (event->attr.type != event->pmu->type)
365 		return -ENOENT;
366 
367 	pmu = uncore_event_to_pmu(event);
368 	/* no device found for this pmu */
369 	if (pmu->func_id < 0)
370 		return -ENOENT;
371 
372 	/* Sampling not supported yet */
373 	if (hwc->sample_period)
374 		return -EINVAL;
375 
376 	/* unsupported modes and filters */
377 	if (event->attr.exclude_user   ||
378 	    event->attr.exclude_kernel ||
379 	    event->attr.exclude_hv     ||
380 	    event->attr.exclude_idle   ||
381 	    event->attr.exclude_host   ||
382 	    event->attr.exclude_guest  ||
383 	    event->attr.sample_period) /* no sampling */
384 		return -EINVAL;
385 
386 	/*
387 	 * Place all uncore events for a particular physical package
388 	 * onto a single cpu
389 	 */
390 	if (event->cpu < 0)
391 		return -EINVAL;
392 
393 	/* check only supported bits are set */
394 	if (event->attr.config & ~SNB_UNCORE_PCI_IMC_EVENT_MASK)
395 		return -EINVAL;
396 
397 	box = uncore_pmu_to_box(pmu, event->cpu);
398 	if (!box || box->cpu < 0)
399 		return -EINVAL;
400 
401 	event->cpu = box->cpu;
402 	event->pmu_private = box;
403 
404 	event->event_caps |= PERF_EV_CAP_READ_ACTIVE_PKG;
405 
406 	event->hw.idx = -1;
407 	event->hw.last_tag = ~0ULL;
408 	event->hw.extra_reg.idx = EXTRA_REG_NONE;
409 	event->hw.branch_reg.idx = EXTRA_REG_NONE;
410 	/*
411 	 * check event is known (whitelist, determines counter)
412 	 */
413 	switch (cfg) {
414 	case SNB_UNCORE_PCI_IMC_DATA_READS:
415 		base = SNB_UNCORE_PCI_IMC_DATA_READS_BASE;
416 		idx = UNCORE_PMC_IDX_FREERUNNING;
417 		break;
418 	case SNB_UNCORE_PCI_IMC_DATA_WRITES:
419 		base = SNB_UNCORE_PCI_IMC_DATA_WRITES_BASE;
420 		idx = UNCORE_PMC_IDX_FREERUNNING;
421 		break;
422 	default:
423 		return -EINVAL;
424 	}
425 
426 	/* must be done before validate_group */
427 	event->hw.event_base = base;
428 	event->hw.config = cfg;
429 	event->hw.idx = idx;
430 
431 	/* no group validation needed, we have free running counters */
432 
433 	return 0;
434 }
435 
436 static int snb_uncore_imc_hw_config(struct intel_uncore_box *box, struct perf_event *event)
437 {
438 	return 0;
439 }
440 
441 int snb_pci2phy_map_init(int devid)
442 {
443 	struct pci_dev *dev = NULL;
444 	struct pci2phy_map *map;
445 	int bus, segment;
446 
447 	dev = pci_get_device(PCI_VENDOR_ID_INTEL, devid, dev);
448 	if (!dev)
449 		return -ENOTTY;
450 
451 	bus = dev->bus->number;
452 	segment = pci_domain_nr(dev->bus);
453 
454 	raw_spin_lock(&pci2phy_map_lock);
455 	map = __find_pci2phy_map(segment);
456 	if (!map) {
457 		raw_spin_unlock(&pci2phy_map_lock);
458 		pci_dev_put(dev);
459 		return -ENOMEM;
460 	}
461 	map->pbus_to_physid[bus] = 0;
462 	raw_spin_unlock(&pci2phy_map_lock);
463 
464 	pci_dev_put(dev);
465 
466 	return 0;
467 }
468 
469 static struct pmu snb_uncore_imc_pmu = {
470 	.task_ctx_nr	= perf_invalid_context,
471 	.event_init	= snb_uncore_imc_event_init,
472 	.add		= uncore_pmu_event_add,
473 	.del		= uncore_pmu_event_del,
474 	.start		= uncore_pmu_event_start,
475 	.stop		= uncore_pmu_event_stop,
476 	.read		= uncore_pmu_event_read,
477 };
478 
479 static struct intel_uncore_ops snb_uncore_imc_ops = {
480 	.init_box	= snb_uncore_imc_init_box,
481 	.exit_box	= snb_uncore_imc_exit_box,
482 	.enable_box	= snb_uncore_imc_enable_box,
483 	.disable_box	= snb_uncore_imc_disable_box,
484 	.disable_event	= snb_uncore_imc_disable_event,
485 	.enable_event	= snb_uncore_imc_enable_event,
486 	.hw_config	= snb_uncore_imc_hw_config,
487 	.read_counter	= snb_uncore_imc_read_counter,
488 };
489 
490 static struct intel_uncore_type snb_uncore_imc = {
491 	.name		= "imc",
492 	.num_counters   = 2,
493 	.num_boxes	= 1,
494 	.num_freerunning_types	= SNB_PCI_UNCORE_IMC_FREERUNNING_TYPE_MAX,
495 	.freerunning	= snb_uncore_imc_freerunning,
496 	.event_descs	= snb_uncore_imc_events,
497 	.format_group	= &snb_uncore_imc_format_group,
498 	.ops		= &snb_uncore_imc_ops,
499 	.pmu		= &snb_uncore_imc_pmu,
500 };
501 
502 static struct intel_uncore_type *snb_pci_uncores[] = {
503 	[SNB_PCI_UNCORE_IMC]	= &snb_uncore_imc,
504 	NULL,
505 };
506 
507 static const struct pci_device_id snb_uncore_pci_ids[] = {
508 	{ /* IMC */
509 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_SNB_IMC),
510 		.driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
511 	},
512 	{ /* end: all zeroes */ },
513 };
514 
515 static const struct pci_device_id ivb_uncore_pci_ids[] = {
516 	{ /* IMC */
517 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_IVB_IMC),
518 		.driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
519 	},
520 	{ /* IMC */
521 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_IVB_E3_IMC),
522 		.driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
523 	},
524 	{ /* end: all zeroes */ },
525 };
526 
527 static const struct pci_device_id hsw_uncore_pci_ids[] = {
528 	{ /* IMC */
529 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_HSW_IMC),
530 		.driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
531 	},
532 	{ /* IMC */
533 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_HSW_U_IMC),
534 		.driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
535 	},
536 	{ /* end: all zeroes */ },
537 };
538 
539 static const struct pci_device_id bdw_uncore_pci_ids[] = {
540 	{ /* IMC */
541 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_BDW_IMC),
542 		.driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
543 	},
544 	{ /* end: all zeroes */ },
545 };
546 
547 static const struct pci_device_id skl_uncore_pci_ids[] = {
548 	{ /* IMC */
549 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_SKL_Y_IMC),
550 		.driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
551 	},
552 	{ /* IMC */
553 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_SKL_U_IMC),
554 		.driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
555 	},
556 	{ /* IMC */
557 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_SKL_HD_IMC),
558 		.driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
559 	},
560 	{ /* IMC */
561 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_SKL_HQ_IMC),
562 		.driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
563 	},
564 	{ /* IMC */
565 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_SKL_SD_IMC),
566 		.driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
567 	},
568 	{ /* IMC */
569 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_SKL_SQ_IMC),
570 		.driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
571 	},
572 
573 	{ /* end: all zeroes */ },
574 };
575 
576 static struct pci_driver snb_uncore_pci_driver = {
577 	.name		= "snb_uncore",
578 	.id_table	= snb_uncore_pci_ids,
579 };
580 
581 static struct pci_driver ivb_uncore_pci_driver = {
582 	.name		= "ivb_uncore",
583 	.id_table	= ivb_uncore_pci_ids,
584 };
585 
586 static struct pci_driver hsw_uncore_pci_driver = {
587 	.name		= "hsw_uncore",
588 	.id_table	= hsw_uncore_pci_ids,
589 };
590 
591 static struct pci_driver bdw_uncore_pci_driver = {
592 	.name		= "bdw_uncore",
593 	.id_table	= bdw_uncore_pci_ids,
594 };
595 
596 static struct pci_driver skl_uncore_pci_driver = {
597 	.name		= "skl_uncore",
598 	.id_table	= skl_uncore_pci_ids,
599 };
600 
601 struct imc_uncore_pci_dev {
602 	__u32 pci_id;
603 	struct pci_driver *driver;
604 };
605 #define IMC_DEV(a, d) \
606 	{ .pci_id = PCI_DEVICE_ID_INTEL_##a, .driver = (d) }
607 
608 static const struct imc_uncore_pci_dev desktop_imc_pci_ids[] = {
609 	IMC_DEV(SNB_IMC, &snb_uncore_pci_driver),
610 	IMC_DEV(IVB_IMC, &ivb_uncore_pci_driver),    /* 3rd Gen Core processor */
611 	IMC_DEV(IVB_E3_IMC, &ivb_uncore_pci_driver), /* Xeon E3-1200 v2/3rd Gen Core processor */
612 	IMC_DEV(HSW_IMC, &hsw_uncore_pci_driver),    /* 4th Gen Core Processor */
613 	IMC_DEV(HSW_U_IMC, &hsw_uncore_pci_driver),  /* 4th Gen Core ULT Mobile Processor */
614 	IMC_DEV(BDW_IMC, &bdw_uncore_pci_driver),    /* 5th Gen Core U */
615 	IMC_DEV(SKL_Y_IMC, &skl_uncore_pci_driver),  /* 6th Gen Core Y */
616 	IMC_DEV(SKL_U_IMC, &skl_uncore_pci_driver),  /* 6th Gen Core U */
617 	IMC_DEV(SKL_HD_IMC, &skl_uncore_pci_driver),  /* 6th Gen Core H Dual Core */
618 	IMC_DEV(SKL_HQ_IMC, &skl_uncore_pci_driver),  /* 6th Gen Core H Quad Core */
619 	IMC_DEV(SKL_SD_IMC, &skl_uncore_pci_driver),  /* 6th Gen Core S Dual Core */
620 	IMC_DEV(SKL_SQ_IMC, &skl_uncore_pci_driver),  /* 6th Gen Core S Quad Core */
621 	{  /* end marker */ }
622 };
623 
624 
625 #define for_each_imc_pci_id(x, t) \
626 	for (x = (t); (x)->pci_id; x++)
627 
628 static struct pci_driver *imc_uncore_find_dev(void)
629 {
630 	const struct imc_uncore_pci_dev *p;
631 	int ret;
632 
633 	for_each_imc_pci_id(p, desktop_imc_pci_ids) {
634 		ret = snb_pci2phy_map_init(p->pci_id);
635 		if (ret == 0)
636 			return p->driver;
637 	}
638 	return NULL;
639 }
640 
641 static int imc_uncore_pci_init(void)
642 {
643 	struct pci_driver *imc_drv = imc_uncore_find_dev();
644 
645 	if (!imc_drv)
646 		return -ENODEV;
647 
648 	uncore_pci_uncores = snb_pci_uncores;
649 	uncore_pci_driver = imc_drv;
650 
651 	return 0;
652 }
653 
654 int snb_uncore_pci_init(void)
655 {
656 	return imc_uncore_pci_init();
657 }
658 
659 int ivb_uncore_pci_init(void)
660 {
661 	return imc_uncore_pci_init();
662 }
663 int hsw_uncore_pci_init(void)
664 {
665 	return imc_uncore_pci_init();
666 }
667 
668 int bdw_uncore_pci_init(void)
669 {
670 	return imc_uncore_pci_init();
671 }
672 
673 int skl_uncore_pci_init(void)
674 {
675 	return imc_uncore_pci_init();
676 }
677 
678 /* end of Sandy Bridge uncore support */
679 
680 /* Nehalem uncore support */
681 static void nhm_uncore_msr_disable_box(struct intel_uncore_box *box)
682 {
683 	wrmsrl(NHM_UNC_PERF_GLOBAL_CTL, 0);
684 }
685 
686 static void nhm_uncore_msr_enable_box(struct intel_uncore_box *box)
687 {
688 	wrmsrl(NHM_UNC_PERF_GLOBAL_CTL, NHM_UNC_GLOBAL_CTL_EN_PC_ALL | NHM_UNC_GLOBAL_CTL_EN_FC);
689 }
690 
691 static void nhm_uncore_msr_enable_event(struct intel_uncore_box *box, struct perf_event *event)
692 {
693 	struct hw_perf_event *hwc = &event->hw;
694 
695 	if (hwc->idx < UNCORE_PMC_IDX_FIXED)
696 		wrmsrl(hwc->config_base, hwc->config | SNB_UNC_CTL_EN);
697 	else
698 		wrmsrl(hwc->config_base, NHM_UNC_FIXED_CTR_CTL_EN);
699 }
700 
701 static struct attribute *nhm_uncore_formats_attr[] = {
702 	&format_attr_event.attr,
703 	&format_attr_umask.attr,
704 	&format_attr_edge.attr,
705 	&format_attr_inv.attr,
706 	&format_attr_cmask8.attr,
707 	NULL,
708 };
709 
710 static const struct attribute_group nhm_uncore_format_group = {
711 	.name = "format",
712 	.attrs = nhm_uncore_formats_attr,
713 };
714 
715 static struct uncore_event_desc nhm_uncore_events[] = {
716 	INTEL_UNCORE_EVENT_DESC(clockticks,                "event=0xff,umask=0x00"),
717 	INTEL_UNCORE_EVENT_DESC(qmc_writes_full_any,       "event=0x2f,umask=0x0f"),
718 	INTEL_UNCORE_EVENT_DESC(qmc_normal_reads_any,      "event=0x2c,umask=0x0f"),
719 	INTEL_UNCORE_EVENT_DESC(qhl_request_ioh_reads,     "event=0x20,umask=0x01"),
720 	INTEL_UNCORE_EVENT_DESC(qhl_request_ioh_writes,    "event=0x20,umask=0x02"),
721 	INTEL_UNCORE_EVENT_DESC(qhl_request_remote_reads,  "event=0x20,umask=0x04"),
722 	INTEL_UNCORE_EVENT_DESC(qhl_request_remote_writes, "event=0x20,umask=0x08"),
723 	INTEL_UNCORE_EVENT_DESC(qhl_request_local_reads,   "event=0x20,umask=0x10"),
724 	INTEL_UNCORE_EVENT_DESC(qhl_request_local_writes,  "event=0x20,umask=0x20"),
725 	{ /* end: all zeroes */ },
726 };
727 
728 static struct intel_uncore_ops nhm_uncore_msr_ops = {
729 	.disable_box	= nhm_uncore_msr_disable_box,
730 	.enable_box	= nhm_uncore_msr_enable_box,
731 	.disable_event	= snb_uncore_msr_disable_event,
732 	.enable_event	= nhm_uncore_msr_enable_event,
733 	.read_counter	= uncore_msr_read_counter,
734 };
735 
736 static struct intel_uncore_type nhm_uncore = {
737 	.name		= "",
738 	.num_counters   = 8,
739 	.num_boxes	= 1,
740 	.perf_ctr_bits	= 48,
741 	.fixed_ctr_bits	= 48,
742 	.event_ctl	= NHM_UNC_PERFEVTSEL0,
743 	.perf_ctr	= NHM_UNC_UNCORE_PMC0,
744 	.fixed_ctr	= NHM_UNC_FIXED_CTR,
745 	.fixed_ctl	= NHM_UNC_FIXED_CTR_CTRL,
746 	.event_mask	= NHM_UNC_RAW_EVENT_MASK,
747 	.event_descs	= nhm_uncore_events,
748 	.ops		= &nhm_uncore_msr_ops,
749 	.format_group	= &nhm_uncore_format_group,
750 };
751 
752 static struct intel_uncore_type *nhm_msr_uncores[] = {
753 	&nhm_uncore,
754 	NULL,
755 };
756 
757 void nhm_uncore_cpu_init(void)
758 {
759 	uncore_msr_uncores = nhm_msr_uncores;
760 }
761 
762 /* end of Nehalem uncore support */
763