xref: /openbmc/linux/arch/x86/events/intel/uncore.c (revision f519f0be)
1 // SPDX-License-Identifier: GPL-2.0-only
2 #include <linux/module.h>
3 
4 #include <asm/cpu_device_id.h>
5 #include <asm/intel-family.h>
6 #include "uncore.h"
7 
8 static struct intel_uncore_type *empty_uncore[] = { NULL, };
9 struct intel_uncore_type **uncore_msr_uncores = empty_uncore;
10 struct intel_uncore_type **uncore_pci_uncores = empty_uncore;
11 
12 static bool pcidrv_registered;
13 struct pci_driver *uncore_pci_driver;
14 /* pci bus to socket mapping */
15 DEFINE_RAW_SPINLOCK(pci2phy_map_lock);
16 struct list_head pci2phy_map_head = LIST_HEAD_INIT(pci2phy_map_head);
17 struct pci_extra_dev *uncore_extra_pci_dev;
18 static int max_packages;
19 
20 /* mask of cpus that collect uncore events */
21 static cpumask_t uncore_cpu_mask;
22 
23 /* constraint for the fixed counter */
24 static struct event_constraint uncore_constraint_fixed =
25 	EVENT_CONSTRAINT(~0ULL, 1 << UNCORE_PMC_IDX_FIXED, ~0ULL);
26 struct event_constraint uncore_constraint_empty =
27 	EVENT_CONSTRAINT(0, 0, 0);
28 
29 MODULE_LICENSE("GPL");
30 
31 static int uncore_pcibus_to_physid(struct pci_bus *bus)
32 {
33 	struct pci2phy_map *map;
34 	int phys_id = -1;
35 
36 	raw_spin_lock(&pci2phy_map_lock);
37 	list_for_each_entry(map, &pci2phy_map_head, list) {
38 		if (map->segment == pci_domain_nr(bus)) {
39 			phys_id = map->pbus_to_physid[bus->number];
40 			break;
41 		}
42 	}
43 	raw_spin_unlock(&pci2phy_map_lock);
44 
45 	return phys_id;
46 }
47 
48 static void uncore_free_pcibus_map(void)
49 {
50 	struct pci2phy_map *map, *tmp;
51 
52 	list_for_each_entry_safe(map, tmp, &pci2phy_map_head, list) {
53 		list_del(&map->list);
54 		kfree(map);
55 	}
56 }
57 
58 struct pci2phy_map *__find_pci2phy_map(int segment)
59 {
60 	struct pci2phy_map *map, *alloc = NULL;
61 	int i;
62 
63 	lockdep_assert_held(&pci2phy_map_lock);
64 
65 lookup:
66 	list_for_each_entry(map, &pci2phy_map_head, list) {
67 		if (map->segment == segment)
68 			goto end;
69 	}
70 
71 	if (!alloc) {
72 		raw_spin_unlock(&pci2phy_map_lock);
73 		alloc = kmalloc(sizeof(struct pci2phy_map), GFP_KERNEL);
74 		raw_spin_lock(&pci2phy_map_lock);
75 
76 		if (!alloc)
77 			return NULL;
78 
79 		goto lookup;
80 	}
81 
82 	map = alloc;
83 	alloc = NULL;
84 	map->segment = segment;
85 	for (i = 0; i < 256; i++)
86 		map->pbus_to_physid[i] = -1;
87 	list_add_tail(&map->list, &pci2phy_map_head);
88 
89 end:
90 	kfree(alloc);
91 	return map;
92 }
93 
94 ssize_t uncore_event_show(struct kobject *kobj,
95 			  struct kobj_attribute *attr, char *buf)
96 {
97 	struct uncore_event_desc *event =
98 		container_of(attr, struct uncore_event_desc, attr);
99 	return sprintf(buf, "%s", event->config);
100 }
101 
102 struct intel_uncore_box *uncore_pmu_to_box(struct intel_uncore_pmu *pmu, int cpu)
103 {
104 	unsigned int pkgid = topology_logical_package_id(cpu);
105 
106 	/*
107 	 * The unsigned check also catches the '-1' return value for non
108 	 * existent mappings in the topology map.
109 	 */
110 	return pkgid < max_packages ? pmu->boxes[pkgid] : NULL;
111 }
112 
113 u64 uncore_msr_read_counter(struct intel_uncore_box *box, struct perf_event *event)
114 {
115 	u64 count;
116 
117 	rdmsrl(event->hw.event_base, count);
118 
119 	return count;
120 }
121 
122 /*
123  * generic get constraint function for shared match/mask registers.
124  */
125 struct event_constraint *
126 uncore_get_constraint(struct intel_uncore_box *box, struct perf_event *event)
127 {
128 	struct intel_uncore_extra_reg *er;
129 	struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
130 	struct hw_perf_event_extra *reg2 = &event->hw.branch_reg;
131 	unsigned long flags;
132 	bool ok = false;
133 
134 	/*
135 	 * reg->alloc can be set due to existing state, so for fake box we
136 	 * need to ignore this, otherwise we might fail to allocate proper
137 	 * fake state for this extra reg constraint.
138 	 */
139 	if (reg1->idx == EXTRA_REG_NONE ||
140 	    (!uncore_box_is_fake(box) && reg1->alloc))
141 		return NULL;
142 
143 	er = &box->shared_regs[reg1->idx];
144 	raw_spin_lock_irqsave(&er->lock, flags);
145 	if (!atomic_read(&er->ref) ||
146 	    (er->config1 == reg1->config && er->config2 == reg2->config)) {
147 		atomic_inc(&er->ref);
148 		er->config1 = reg1->config;
149 		er->config2 = reg2->config;
150 		ok = true;
151 	}
152 	raw_spin_unlock_irqrestore(&er->lock, flags);
153 
154 	if (ok) {
155 		if (!uncore_box_is_fake(box))
156 			reg1->alloc = 1;
157 		return NULL;
158 	}
159 
160 	return &uncore_constraint_empty;
161 }
162 
163 void uncore_put_constraint(struct intel_uncore_box *box, struct perf_event *event)
164 {
165 	struct intel_uncore_extra_reg *er;
166 	struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
167 
168 	/*
169 	 * Only put constraint if extra reg was actually allocated. Also
170 	 * takes care of event which do not use an extra shared reg.
171 	 *
172 	 * Also, if this is a fake box we shouldn't touch any event state
173 	 * (reg->alloc) and we don't care about leaving inconsistent box
174 	 * state either since it will be thrown out.
175 	 */
176 	if (uncore_box_is_fake(box) || !reg1->alloc)
177 		return;
178 
179 	er = &box->shared_regs[reg1->idx];
180 	atomic_dec(&er->ref);
181 	reg1->alloc = 0;
182 }
183 
184 u64 uncore_shared_reg_config(struct intel_uncore_box *box, int idx)
185 {
186 	struct intel_uncore_extra_reg *er;
187 	unsigned long flags;
188 	u64 config;
189 
190 	er = &box->shared_regs[idx];
191 
192 	raw_spin_lock_irqsave(&er->lock, flags);
193 	config = er->config;
194 	raw_spin_unlock_irqrestore(&er->lock, flags);
195 
196 	return config;
197 }
198 
199 static void uncore_assign_hw_event(struct intel_uncore_box *box,
200 				   struct perf_event *event, int idx)
201 {
202 	struct hw_perf_event *hwc = &event->hw;
203 
204 	hwc->idx = idx;
205 	hwc->last_tag = ++box->tags[idx];
206 
207 	if (uncore_pmc_fixed(hwc->idx)) {
208 		hwc->event_base = uncore_fixed_ctr(box);
209 		hwc->config_base = uncore_fixed_ctl(box);
210 		return;
211 	}
212 
213 	hwc->config_base = uncore_event_ctl(box, hwc->idx);
214 	hwc->event_base  = uncore_perf_ctr(box, hwc->idx);
215 }
216 
217 void uncore_perf_event_update(struct intel_uncore_box *box, struct perf_event *event)
218 {
219 	u64 prev_count, new_count, delta;
220 	int shift;
221 
222 	if (uncore_pmc_freerunning(event->hw.idx))
223 		shift = 64 - uncore_freerunning_bits(box, event);
224 	else if (uncore_pmc_fixed(event->hw.idx))
225 		shift = 64 - uncore_fixed_ctr_bits(box);
226 	else
227 		shift = 64 - uncore_perf_ctr_bits(box);
228 
229 	/* the hrtimer might modify the previous event value */
230 again:
231 	prev_count = local64_read(&event->hw.prev_count);
232 	new_count = uncore_read_counter(box, event);
233 	if (local64_xchg(&event->hw.prev_count, new_count) != prev_count)
234 		goto again;
235 
236 	delta = (new_count << shift) - (prev_count << shift);
237 	delta >>= shift;
238 
239 	local64_add(delta, &event->count);
240 }
241 
242 /*
243  * The overflow interrupt is unavailable for SandyBridge-EP, is broken
244  * for SandyBridge. So we use hrtimer to periodically poll the counter
245  * to avoid overflow.
246  */
247 static enum hrtimer_restart uncore_pmu_hrtimer(struct hrtimer *hrtimer)
248 {
249 	struct intel_uncore_box *box;
250 	struct perf_event *event;
251 	unsigned long flags;
252 	int bit;
253 
254 	box = container_of(hrtimer, struct intel_uncore_box, hrtimer);
255 	if (!box->n_active || box->cpu != smp_processor_id())
256 		return HRTIMER_NORESTART;
257 	/*
258 	 * disable local interrupt to prevent uncore_pmu_event_start/stop
259 	 * to interrupt the update process
260 	 */
261 	local_irq_save(flags);
262 
263 	/*
264 	 * handle boxes with an active event list as opposed to active
265 	 * counters
266 	 */
267 	list_for_each_entry(event, &box->active_list, active_entry) {
268 		uncore_perf_event_update(box, event);
269 	}
270 
271 	for_each_set_bit(bit, box->active_mask, UNCORE_PMC_IDX_MAX)
272 		uncore_perf_event_update(box, box->events[bit]);
273 
274 	local_irq_restore(flags);
275 
276 	hrtimer_forward_now(hrtimer, ns_to_ktime(box->hrtimer_duration));
277 	return HRTIMER_RESTART;
278 }
279 
280 void uncore_pmu_start_hrtimer(struct intel_uncore_box *box)
281 {
282 	hrtimer_start(&box->hrtimer, ns_to_ktime(box->hrtimer_duration),
283 		      HRTIMER_MODE_REL_PINNED);
284 }
285 
286 void uncore_pmu_cancel_hrtimer(struct intel_uncore_box *box)
287 {
288 	hrtimer_cancel(&box->hrtimer);
289 }
290 
291 static void uncore_pmu_init_hrtimer(struct intel_uncore_box *box)
292 {
293 	hrtimer_init(&box->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
294 	box->hrtimer.function = uncore_pmu_hrtimer;
295 }
296 
297 static struct intel_uncore_box *uncore_alloc_box(struct intel_uncore_type *type,
298 						 int node)
299 {
300 	int i, size, numshared = type->num_shared_regs ;
301 	struct intel_uncore_box *box;
302 
303 	size = sizeof(*box) + numshared * sizeof(struct intel_uncore_extra_reg);
304 
305 	box = kzalloc_node(size, GFP_KERNEL, node);
306 	if (!box)
307 		return NULL;
308 
309 	for (i = 0; i < numshared; i++)
310 		raw_spin_lock_init(&box->shared_regs[i].lock);
311 
312 	uncore_pmu_init_hrtimer(box);
313 	box->cpu = -1;
314 	box->pci_phys_id = -1;
315 	box->pkgid = -1;
316 
317 	/* set default hrtimer timeout */
318 	box->hrtimer_duration = UNCORE_PMU_HRTIMER_INTERVAL;
319 
320 	INIT_LIST_HEAD(&box->active_list);
321 
322 	return box;
323 }
324 
325 /*
326  * Using uncore_pmu_event_init pmu event_init callback
327  * as a detection point for uncore events.
328  */
329 static int uncore_pmu_event_init(struct perf_event *event);
330 
331 static bool is_box_event(struct intel_uncore_box *box, struct perf_event *event)
332 {
333 	return &box->pmu->pmu == event->pmu;
334 }
335 
336 static int
337 uncore_collect_events(struct intel_uncore_box *box, struct perf_event *leader,
338 		      bool dogrp)
339 {
340 	struct perf_event *event;
341 	int n, max_count;
342 
343 	max_count = box->pmu->type->num_counters;
344 	if (box->pmu->type->fixed_ctl)
345 		max_count++;
346 
347 	if (box->n_events >= max_count)
348 		return -EINVAL;
349 
350 	n = box->n_events;
351 
352 	if (is_box_event(box, leader)) {
353 		box->event_list[n] = leader;
354 		n++;
355 	}
356 
357 	if (!dogrp)
358 		return n;
359 
360 	for_each_sibling_event(event, leader) {
361 		if (!is_box_event(box, event) ||
362 		    event->state <= PERF_EVENT_STATE_OFF)
363 			continue;
364 
365 		if (n >= max_count)
366 			return -EINVAL;
367 
368 		box->event_list[n] = event;
369 		n++;
370 	}
371 	return n;
372 }
373 
374 static struct event_constraint *
375 uncore_get_event_constraint(struct intel_uncore_box *box, struct perf_event *event)
376 {
377 	struct intel_uncore_type *type = box->pmu->type;
378 	struct event_constraint *c;
379 
380 	if (type->ops->get_constraint) {
381 		c = type->ops->get_constraint(box, event);
382 		if (c)
383 			return c;
384 	}
385 
386 	if (event->attr.config == UNCORE_FIXED_EVENT)
387 		return &uncore_constraint_fixed;
388 
389 	if (type->constraints) {
390 		for_each_event_constraint(c, type->constraints) {
391 			if ((event->hw.config & c->cmask) == c->code)
392 				return c;
393 		}
394 	}
395 
396 	return &type->unconstrainted;
397 }
398 
399 static void uncore_put_event_constraint(struct intel_uncore_box *box,
400 					struct perf_event *event)
401 {
402 	if (box->pmu->type->ops->put_constraint)
403 		box->pmu->type->ops->put_constraint(box, event);
404 }
405 
406 static int uncore_assign_events(struct intel_uncore_box *box, int assign[], int n)
407 {
408 	unsigned long used_mask[BITS_TO_LONGS(UNCORE_PMC_IDX_MAX)];
409 	struct event_constraint *c;
410 	int i, wmin, wmax, ret = 0;
411 	struct hw_perf_event *hwc;
412 
413 	bitmap_zero(used_mask, UNCORE_PMC_IDX_MAX);
414 
415 	for (i = 0, wmin = UNCORE_PMC_IDX_MAX, wmax = 0; i < n; i++) {
416 		c = uncore_get_event_constraint(box, box->event_list[i]);
417 		box->event_constraint[i] = c;
418 		wmin = min(wmin, c->weight);
419 		wmax = max(wmax, c->weight);
420 	}
421 
422 	/* fastpath, try to reuse previous register */
423 	for (i = 0; i < n; i++) {
424 		hwc = &box->event_list[i]->hw;
425 		c = box->event_constraint[i];
426 
427 		/* never assigned */
428 		if (hwc->idx == -1)
429 			break;
430 
431 		/* constraint still honored */
432 		if (!test_bit(hwc->idx, c->idxmsk))
433 			break;
434 
435 		/* not already used */
436 		if (test_bit(hwc->idx, used_mask))
437 			break;
438 
439 		__set_bit(hwc->idx, used_mask);
440 		if (assign)
441 			assign[i] = hwc->idx;
442 	}
443 	/* slow path */
444 	if (i != n)
445 		ret = perf_assign_events(box->event_constraint, n,
446 					 wmin, wmax, n, assign);
447 
448 	if (!assign || ret) {
449 		for (i = 0; i < n; i++)
450 			uncore_put_event_constraint(box, box->event_list[i]);
451 	}
452 	return ret ? -EINVAL : 0;
453 }
454 
455 void uncore_pmu_event_start(struct perf_event *event, int flags)
456 {
457 	struct intel_uncore_box *box = uncore_event_to_box(event);
458 	int idx = event->hw.idx;
459 
460 	if (WARN_ON_ONCE(idx == -1 || idx >= UNCORE_PMC_IDX_MAX))
461 		return;
462 
463 	/*
464 	 * Free running counter is read-only and always active.
465 	 * Use the current counter value as start point.
466 	 * There is no overflow interrupt for free running counter.
467 	 * Use hrtimer to periodically poll the counter to avoid overflow.
468 	 */
469 	if (uncore_pmc_freerunning(event->hw.idx)) {
470 		list_add_tail(&event->active_entry, &box->active_list);
471 		local64_set(&event->hw.prev_count,
472 			    uncore_read_counter(box, event));
473 		if (box->n_active++ == 0)
474 			uncore_pmu_start_hrtimer(box);
475 		return;
476 	}
477 
478 	if (WARN_ON_ONCE(!(event->hw.state & PERF_HES_STOPPED)))
479 		return;
480 
481 	event->hw.state = 0;
482 	box->events[idx] = event;
483 	box->n_active++;
484 	__set_bit(idx, box->active_mask);
485 
486 	local64_set(&event->hw.prev_count, uncore_read_counter(box, event));
487 	uncore_enable_event(box, event);
488 
489 	if (box->n_active == 1) {
490 		uncore_enable_box(box);
491 		uncore_pmu_start_hrtimer(box);
492 	}
493 }
494 
495 void uncore_pmu_event_stop(struct perf_event *event, int flags)
496 {
497 	struct intel_uncore_box *box = uncore_event_to_box(event);
498 	struct hw_perf_event *hwc = &event->hw;
499 
500 	/* Cannot disable free running counter which is read-only */
501 	if (uncore_pmc_freerunning(hwc->idx)) {
502 		list_del(&event->active_entry);
503 		if (--box->n_active == 0)
504 			uncore_pmu_cancel_hrtimer(box);
505 		uncore_perf_event_update(box, event);
506 		return;
507 	}
508 
509 	if (__test_and_clear_bit(hwc->idx, box->active_mask)) {
510 		uncore_disable_event(box, event);
511 		box->n_active--;
512 		box->events[hwc->idx] = NULL;
513 		WARN_ON_ONCE(hwc->state & PERF_HES_STOPPED);
514 		hwc->state |= PERF_HES_STOPPED;
515 
516 		if (box->n_active == 0) {
517 			uncore_disable_box(box);
518 			uncore_pmu_cancel_hrtimer(box);
519 		}
520 	}
521 
522 	if ((flags & PERF_EF_UPDATE) && !(hwc->state & PERF_HES_UPTODATE)) {
523 		/*
524 		 * Drain the remaining delta count out of a event
525 		 * that we are disabling:
526 		 */
527 		uncore_perf_event_update(box, event);
528 		hwc->state |= PERF_HES_UPTODATE;
529 	}
530 }
531 
532 int uncore_pmu_event_add(struct perf_event *event, int flags)
533 {
534 	struct intel_uncore_box *box = uncore_event_to_box(event);
535 	struct hw_perf_event *hwc = &event->hw;
536 	int assign[UNCORE_PMC_IDX_MAX];
537 	int i, n, ret;
538 
539 	if (!box)
540 		return -ENODEV;
541 
542 	/*
543 	 * The free funning counter is assigned in event_init().
544 	 * The free running counter event and free running counter
545 	 * are 1:1 mapped. It doesn't need to be tracked in event_list.
546 	 */
547 	if (uncore_pmc_freerunning(hwc->idx)) {
548 		if (flags & PERF_EF_START)
549 			uncore_pmu_event_start(event, 0);
550 		return 0;
551 	}
552 
553 	ret = n = uncore_collect_events(box, event, false);
554 	if (ret < 0)
555 		return ret;
556 
557 	hwc->state = PERF_HES_UPTODATE | PERF_HES_STOPPED;
558 	if (!(flags & PERF_EF_START))
559 		hwc->state |= PERF_HES_ARCH;
560 
561 	ret = uncore_assign_events(box, assign, n);
562 	if (ret)
563 		return ret;
564 
565 	/* save events moving to new counters */
566 	for (i = 0; i < box->n_events; i++) {
567 		event = box->event_list[i];
568 		hwc = &event->hw;
569 
570 		if (hwc->idx == assign[i] &&
571 			hwc->last_tag == box->tags[assign[i]])
572 			continue;
573 		/*
574 		 * Ensure we don't accidentally enable a stopped
575 		 * counter simply because we rescheduled.
576 		 */
577 		if (hwc->state & PERF_HES_STOPPED)
578 			hwc->state |= PERF_HES_ARCH;
579 
580 		uncore_pmu_event_stop(event, PERF_EF_UPDATE);
581 	}
582 
583 	/* reprogram moved events into new counters */
584 	for (i = 0; i < n; i++) {
585 		event = box->event_list[i];
586 		hwc = &event->hw;
587 
588 		if (hwc->idx != assign[i] ||
589 			hwc->last_tag != box->tags[assign[i]])
590 			uncore_assign_hw_event(box, event, assign[i]);
591 		else if (i < box->n_events)
592 			continue;
593 
594 		if (hwc->state & PERF_HES_ARCH)
595 			continue;
596 
597 		uncore_pmu_event_start(event, 0);
598 	}
599 	box->n_events = n;
600 
601 	return 0;
602 }
603 
604 void uncore_pmu_event_del(struct perf_event *event, int flags)
605 {
606 	struct intel_uncore_box *box = uncore_event_to_box(event);
607 	int i;
608 
609 	uncore_pmu_event_stop(event, PERF_EF_UPDATE);
610 
611 	/*
612 	 * The event for free running counter is not tracked by event_list.
613 	 * It doesn't need to force event->hw.idx = -1 to reassign the counter.
614 	 * Because the event and the free running counter are 1:1 mapped.
615 	 */
616 	if (uncore_pmc_freerunning(event->hw.idx))
617 		return;
618 
619 	for (i = 0; i < box->n_events; i++) {
620 		if (event == box->event_list[i]) {
621 			uncore_put_event_constraint(box, event);
622 
623 			for (++i; i < box->n_events; i++)
624 				box->event_list[i - 1] = box->event_list[i];
625 
626 			--box->n_events;
627 			break;
628 		}
629 	}
630 
631 	event->hw.idx = -1;
632 	event->hw.last_tag = ~0ULL;
633 }
634 
635 void uncore_pmu_event_read(struct perf_event *event)
636 {
637 	struct intel_uncore_box *box = uncore_event_to_box(event);
638 	uncore_perf_event_update(box, event);
639 }
640 
641 /*
642  * validation ensures the group can be loaded onto the
643  * PMU if it was the only group available.
644  */
645 static int uncore_validate_group(struct intel_uncore_pmu *pmu,
646 				struct perf_event *event)
647 {
648 	struct perf_event *leader = event->group_leader;
649 	struct intel_uncore_box *fake_box;
650 	int ret = -EINVAL, n;
651 
652 	/* The free running counter is always active. */
653 	if (uncore_pmc_freerunning(event->hw.idx))
654 		return 0;
655 
656 	fake_box = uncore_alloc_box(pmu->type, NUMA_NO_NODE);
657 	if (!fake_box)
658 		return -ENOMEM;
659 
660 	fake_box->pmu = pmu;
661 	/*
662 	 * the event is not yet connected with its
663 	 * siblings therefore we must first collect
664 	 * existing siblings, then add the new event
665 	 * before we can simulate the scheduling
666 	 */
667 	n = uncore_collect_events(fake_box, leader, true);
668 	if (n < 0)
669 		goto out;
670 
671 	fake_box->n_events = n;
672 	n = uncore_collect_events(fake_box, event, false);
673 	if (n < 0)
674 		goto out;
675 
676 	fake_box->n_events = n;
677 
678 	ret = uncore_assign_events(fake_box, NULL, n);
679 out:
680 	kfree(fake_box);
681 	return ret;
682 }
683 
684 static int uncore_pmu_event_init(struct perf_event *event)
685 {
686 	struct intel_uncore_pmu *pmu;
687 	struct intel_uncore_box *box;
688 	struct hw_perf_event *hwc = &event->hw;
689 	int ret;
690 
691 	if (event->attr.type != event->pmu->type)
692 		return -ENOENT;
693 
694 	pmu = uncore_event_to_pmu(event);
695 	/* no device found for this pmu */
696 	if (pmu->func_id < 0)
697 		return -ENOENT;
698 
699 	/* Sampling not supported yet */
700 	if (hwc->sample_period)
701 		return -EINVAL;
702 
703 	/*
704 	 * Place all uncore events for a particular physical package
705 	 * onto a single cpu
706 	 */
707 	if (event->cpu < 0)
708 		return -EINVAL;
709 	box = uncore_pmu_to_box(pmu, event->cpu);
710 	if (!box || box->cpu < 0)
711 		return -EINVAL;
712 	event->cpu = box->cpu;
713 	event->pmu_private = box;
714 
715 	event->event_caps |= PERF_EV_CAP_READ_ACTIVE_PKG;
716 
717 	event->hw.idx = -1;
718 	event->hw.last_tag = ~0ULL;
719 	event->hw.extra_reg.idx = EXTRA_REG_NONE;
720 	event->hw.branch_reg.idx = EXTRA_REG_NONE;
721 
722 	if (event->attr.config == UNCORE_FIXED_EVENT) {
723 		/* no fixed counter */
724 		if (!pmu->type->fixed_ctl)
725 			return -EINVAL;
726 		/*
727 		 * if there is only one fixed counter, only the first pmu
728 		 * can access the fixed counter
729 		 */
730 		if (pmu->type->single_fixed && pmu->pmu_idx > 0)
731 			return -EINVAL;
732 
733 		/* fixed counters have event field hardcoded to zero */
734 		hwc->config = 0ULL;
735 	} else if (is_freerunning_event(event)) {
736 		hwc->config = event->attr.config;
737 		if (!check_valid_freerunning_event(box, event))
738 			return -EINVAL;
739 		event->hw.idx = UNCORE_PMC_IDX_FREERUNNING;
740 		/*
741 		 * The free running counter event and free running counter
742 		 * are always 1:1 mapped.
743 		 * The free running counter is always active.
744 		 * Assign the free running counter here.
745 		 */
746 		event->hw.event_base = uncore_freerunning_counter(box, event);
747 	} else {
748 		hwc->config = event->attr.config &
749 			      (pmu->type->event_mask | ((u64)pmu->type->event_mask_ext << 32));
750 		if (pmu->type->ops->hw_config) {
751 			ret = pmu->type->ops->hw_config(box, event);
752 			if (ret)
753 				return ret;
754 		}
755 	}
756 
757 	if (event->group_leader != event)
758 		ret = uncore_validate_group(pmu, event);
759 	else
760 		ret = 0;
761 
762 	return ret;
763 }
764 
765 static ssize_t uncore_get_attr_cpumask(struct device *dev,
766 				struct device_attribute *attr, char *buf)
767 {
768 	return cpumap_print_to_pagebuf(true, buf, &uncore_cpu_mask);
769 }
770 
771 static DEVICE_ATTR(cpumask, S_IRUGO, uncore_get_attr_cpumask, NULL);
772 
773 static struct attribute *uncore_pmu_attrs[] = {
774 	&dev_attr_cpumask.attr,
775 	NULL,
776 };
777 
778 static const struct attribute_group uncore_pmu_attr_group = {
779 	.attrs = uncore_pmu_attrs,
780 };
781 
782 static int uncore_pmu_register(struct intel_uncore_pmu *pmu)
783 {
784 	int ret;
785 
786 	if (!pmu->type->pmu) {
787 		pmu->pmu = (struct pmu) {
788 			.attr_groups	= pmu->type->attr_groups,
789 			.task_ctx_nr	= perf_invalid_context,
790 			.event_init	= uncore_pmu_event_init,
791 			.add		= uncore_pmu_event_add,
792 			.del		= uncore_pmu_event_del,
793 			.start		= uncore_pmu_event_start,
794 			.stop		= uncore_pmu_event_stop,
795 			.read		= uncore_pmu_event_read,
796 			.module		= THIS_MODULE,
797 			.capabilities	= PERF_PMU_CAP_NO_EXCLUDE,
798 		};
799 	} else {
800 		pmu->pmu = *pmu->type->pmu;
801 		pmu->pmu.attr_groups = pmu->type->attr_groups;
802 	}
803 
804 	if (pmu->type->num_boxes == 1) {
805 		if (strlen(pmu->type->name) > 0)
806 			sprintf(pmu->name, "uncore_%s", pmu->type->name);
807 		else
808 			sprintf(pmu->name, "uncore");
809 	} else {
810 		sprintf(pmu->name, "uncore_%s_%d", pmu->type->name,
811 			pmu->pmu_idx);
812 	}
813 
814 	ret = perf_pmu_register(&pmu->pmu, pmu->name, -1);
815 	if (!ret)
816 		pmu->registered = true;
817 	return ret;
818 }
819 
820 static void uncore_pmu_unregister(struct intel_uncore_pmu *pmu)
821 {
822 	if (!pmu->registered)
823 		return;
824 	perf_pmu_unregister(&pmu->pmu);
825 	pmu->registered = false;
826 }
827 
828 static void uncore_free_boxes(struct intel_uncore_pmu *pmu)
829 {
830 	int pkg;
831 
832 	for (pkg = 0; pkg < max_packages; pkg++)
833 		kfree(pmu->boxes[pkg]);
834 	kfree(pmu->boxes);
835 }
836 
837 static void uncore_type_exit(struct intel_uncore_type *type)
838 {
839 	struct intel_uncore_pmu *pmu = type->pmus;
840 	int i;
841 
842 	if (pmu) {
843 		for (i = 0; i < type->num_boxes; i++, pmu++) {
844 			uncore_pmu_unregister(pmu);
845 			uncore_free_boxes(pmu);
846 		}
847 		kfree(type->pmus);
848 		type->pmus = NULL;
849 	}
850 	kfree(type->events_group);
851 	type->events_group = NULL;
852 }
853 
854 static void uncore_types_exit(struct intel_uncore_type **types)
855 {
856 	for (; *types; types++)
857 		uncore_type_exit(*types);
858 }
859 
860 static int __init uncore_type_init(struct intel_uncore_type *type, bool setid)
861 {
862 	struct intel_uncore_pmu *pmus;
863 	size_t size;
864 	int i, j;
865 
866 	pmus = kcalloc(type->num_boxes, sizeof(*pmus), GFP_KERNEL);
867 	if (!pmus)
868 		return -ENOMEM;
869 
870 	size = max_packages * sizeof(struct intel_uncore_box *);
871 
872 	for (i = 0; i < type->num_boxes; i++) {
873 		pmus[i].func_id	= setid ? i : -1;
874 		pmus[i].pmu_idx	= i;
875 		pmus[i].type	= type;
876 		pmus[i].boxes	= kzalloc(size, GFP_KERNEL);
877 		if (!pmus[i].boxes)
878 			goto err;
879 	}
880 
881 	type->pmus = pmus;
882 	type->unconstrainted = (struct event_constraint)
883 		__EVENT_CONSTRAINT(0, (1ULL << type->num_counters) - 1,
884 				0, type->num_counters, 0, 0);
885 
886 	if (type->event_descs) {
887 		struct {
888 			struct attribute_group group;
889 			struct attribute *attrs[];
890 		} *attr_group;
891 		for (i = 0; type->event_descs[i].attr.attr.name; i++);
892 
893 		attr_group = kzalloc(struct_size(attr_group, attrs, i + 1),
894 								GFP_KERNEL);
895 		if (!attr_group)
896 			goto err;
897 
898 		attr_group->group.name = "events";
899 		attr_group->group.attrs = attr_group->attrs;
900 
901 		for (j = 0; j < i; j++)
902 			attr_group->attrs[j] = &type->event_descs[j].attr.attr;
903 
904 		type->events_group = &attr_group->group;
905 	}
906 
907 	type->pmu_group = &uncore_pmu_attr_group;
908 
909 	return 0;
910 
911 err:
912 	for (i = 0; i < type->num_boxes; i++)
913 		kfree(pmus[i].boxes);
914 	kfree(pmus);
915 
916 	return -ENOMEM;
917 }
918 
919 static int __init
920 uncore_types_init(struct intel_uncore_type **types, bool setid)
921 {
922 	int ret;
923 
924 	for (; *types; types++) {
925 		ret = uncore_type_init(*types, setid);
926 		if (ret)
927 			return ret;
928 	}
929 	return 0;
930 }
931 
932 /*
933  * add a pci uncore device
934  */
935 static int uncore_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
936 {
937 	struct intel_uncore_type *type;
938 	struct intel_uncore_pmu *pmu = NULL;
939 	struct intel_uncore_box *box;
940 	int phys_id, pkg, ret;
941 
942 	phys_id = uncore_pcibus_to_physid(pdev->bus);
943 	if (phys_id < 0)
944 		return -ENODEV;
945 
946 	pkg = topology_phys_to_logical_pkg(phys_id);
947 	if (pkg < 0)
948 		return -EINVAL;
949 
950 	if (UNCORE_PCI_DEV_TYPE(id->driver_data) == UNCORE_EXTRA_PCI_DEV) {
951 		int idx = UNCORE_PCI_DEV_IDX(id->driver_data);
952 
953 		uncore_extra_pci_dev[pkg].dev[idx] = pdev;
954 		pci_set_drvdata(pdev, NULL);
955 		return 0;
956 	}
957 
958 	type = uncore_pci_uncores[UNCORE_PCI_DEV_TYPE(id->driver_data)];
959 
960 	/*
961 	 * Some platforms, e.g.  Knights Landing, use a common PCI device ID
962 	 * for multiple instances of an uncore PMU device type. We should check
963 	 * PCI slot and func to indicate the uncore box.
964 	 */
965 	if (id->driver_data & ~0xffff) {
966 		struct pci_driver *pci_drv = pdev->driver;
967 		const struct pci_device_id *ids = pci_drv->id_table;
968 		unsigned int devfn;
969 
970 		while (ids && ids->vendor) {
971 			if ((ids->vendor == pdev->vendor) &&
972 			    (ids->device == pdev->device)) {
973 				devfn = PCI_DEVFN(UNCORE_PCI_DEV_DEV(ids->driver_data),
974 						  UNCORE_PCI_DEV_FUNC(ids->driver_data));
975 				if (devfn == pdev->devfn) {
976 					pmu = &type->pmus[UNCORE_PCI_DEV_IDX(ids->driver_data)];
977 					break;
978 				}
979 			}
980 			ids++;
981 		}
982 		if (pmu == NULL)
983 			return -ENODEV;
984 	} else {
985 		/*
986 		 * for performance monitoring unit with multiple boxes,
987 		 * each box has a different function id.
988 		 */
989 		pmu = &type->pmus[UNCORE_PCI_DEV_IDX(id->driver_data)];
990 	}
991 
992 	if (WARN_ON_ONCE(pmu->boxes[pkg] != NULL))
993 		return -EINVAL;
994 
995 	box = uncore_alloc_box(type, NUMA_NO_NODE);
996 	if (!box)
997 		return -ENOMEM;
998 
999 	if (pmu->func_id < 0)
1000 		pmu->func_id = pdev->devfn;
1001 	else
1002 		WARN_ON_ONCE(pmu->func_id != pdev->devfn);
1003 
1004 	atomic_inc(&box->refcnt);
1005 	box->pci_phys_id = phys_id;
1006 	box->pkgid = pkg;
1007 	box->pci_dev = pdev;
1008 	box->pmu = pmu;
1009 	uncore_box_init(box);
1010 	pci_set_drvdata(pdev, box);
1011 
1012 	pmu->boxes[pkg] = box;
1013 	if (atomic_inc_return(&pmu->activeboxes) > 1)
1014 		return 0;
1015 
1016 	/* First active box registers the pmu */
1017 	ret = uncore_pmu_register(pmu);
1018 	if (ret) {
1019 		pci_set_drvdata(pdev, NULL);
1020 		pmu->boxes[pkg] = NULL;
1021 		uncore_box_exit(box);
1022 		kfree(box);
1023 	}
1024 	return ret;
1025 }
1026 
1027 static void uncore_pci_remove(struct pci_dev *pdev)
1028 {
1029 	struct intel_uncore_box *box;
1030 	struct intel_uncore_pmu *pmu;
1031 	int i, phys_id, pkg;
1032 
1033 	phys_id = uncore_pcibus_to_physid(pdev->bus);
1034 
1035 	box = pci_get_drvdata(pdev);
1036 	if (!box) {
1037 		pkg = topology_phys_to_logical_pkg(phys_id);
1038 		for (i = 0; i < UNCORE_EXTRA_PCI_DEV_MAX; i++) {
1039 			if (uncore_extra_pci_dev[pkg].dev[i] == pdev) {
1040 				uncore_extra_pci_dev[pkg].dev[i] = NULL;
1041 				break;
1042 			}
1043 		}
1044 		WARN_ON_ONCE(i >= UNCORE_EXTRA_PCI_DEV_MAX);
1045 		return;
1046 	}
1047 
1048 	pmu = box->pmu;
1049 	if (WARN_ON_ONCE(phys_id != box->pci_phys_id))
1050 		return;
1051 
1052 	pci_set_drvdata(pdev, NULL);
1053 	pmu->boxes[box->pkgid] = NULL;
1054 	if (atomic_dec_return(&pmu->activeboxes) == 0)
1055 		uncore_pmu_unregister(pmu);
1056 	uncore_box_exit(box);
1057 	kfree(box);
1058 }
1059 
1060 static int __init uncore_pci_init(void)
1061 {
1062 	size_t size;
1063 	int ret;
1064 
1065 	size = max_packages * sizeof(struct pci_extra_dev);
1066 	uncore_extra_pci_dev = kzalloc(size, GFP_KERNEL);
1067 	if (!uncore_extra_pci_dev) {
1068 		ret = -ENOMEM;
1069 		goto err;
1070 	}
1071 
1072 	ret = uncore_types_init(uncore_pci_uncores, false);
1073 	if (ret)
1074 		goto errtype;
1075 
1076 	uncore_pci_driver->probe = uncore_pci_probe;
1077 	uncore_pci_driver->remove = uncore_pci_remove;
1078 
1079 	ret = pci_register_driver(uncore_pci_driver);
1080 	if (ret)
1081 		goto errtype;
1082 
1083 	pcidrv_registered = true;
1084 	return 0;
1085 
1086 errtype:
1087 	uncore_types_exit(uncore_pci_uncores);
1088 	kfree(uncore_extra_pci_dev);
1089 	uncore_extra_pci_dev = NULL;
1090 	uncore_free_pcibus_map();
1091 err:
1092 	uncore_pci_uncores = empty_uncore;
1093 	return ret;
1094 }
1095 
1096 static void uncore_pci_exit(void)
1097 {
1098 	if (pcidrv_registered) {
1099 		pcidrv_registered = false;
1100 		pci_unregister_driver(uncore_pci_driver);
1101 		uncore_types_exit(uncore_pci_uncores);
1102 		kfree(uncore_extra_pci_dev);
1103 		uncore_free_pcibus_map();
1104 	}
1105 }
1106 
1107 static void uncore_change_type_ctx(struct intel_uncore_type *type, int old_cpu,
1108 				   int new_cpu)
1109 {
1110 	struct intel_uncore_pmu *pmu = type->pmus;
1111 	struct intel_uncore_box *box;
1112 	int i, pkg;
1113 
1114 	pkg = topology_logical_package_id(old_cpu < 0 ? new_cpu : old_cpu);
1115 	for (i = 0; i < type->num_boxes; i++, pmu++) {
1116 		box = pmu->boxes[pkg];
1117 		if (!box)
1118 			continue;
1119 
1120 		if (old_cpu < 0) {
1121 			WARN_ON_ONCE(box->cpu != -1);
1122 			box->cpu = new_cpu;
1123 			continue;
1124 		}
1125 
1126 		WARN_ON_ONCE(box->cpu != old_cpu);
1127 		box->cpu = -1;
1128 		if (new_cpu < 0)
1129 			continue;
1130 
1131 		uncore_pmu_cancel_hrtimer(box);
1132 		perf_pmu_migrate_context(&pmu->pmu, old_cpu, new_cpu);
1133 		box->cpu = new_cpu;
1134 	}
1135 }
1136 
1137 static void uncore_change_context(struct intel_uncore_type **uncores,
1138 				  int old_cpu, int new_cpu)
1139 {
1140 	for (; *uncores; uncores++)
1141 		uncore_change_type_ctx(*uncores, old_cpu, new_cpu);
1142 }
1143 
1144 static int uncore_event_cpu_offline(unsigned int cpu)
1145 {
1146 	struct intel_uncore_type *type, **types = uncore_msr_uncores;
1147 	struct intel_uncore_pmu *pmu;
1148 	struct intel_uncore_box *box;
1149 	int i, pkg, target;
1150 
1151 	/* Check if exiting cpu is used for collecting uncore events */
1152 	if (!cpumask_test_and_clear_cpu(cpu, &uncore_cpu_mask))
1153 		goto unref;
1154 	/* Find a new cpu to collect uncore events */
1155 	target = cpumask_any_but(topology_core_cpumask(cpu), cpu);
1156 
1157 	/* Migrate uncore events to the new target */
1158 	if (target < nr_cpu_ids)
1159 		cpumask_set_cpu(target, &uncore_cpu_mask);
1160 	else
1161 		target = -1;
1162 
1163 	uncore_change_context(uncore_msr_uncores, cpu, target);
1164 	uncore_change_context(uncore_pci_uncores, cpu, target);
1165 
1166 unref:
1167 	/* Clear the references */
1168 	pkg = topology_logical_package_id(cpu);
1169 	for (; *types; types++) {
1170 		type = *types;
1171 		pmu = type->pmus;
1172 		for (i = 0; i < type->num_boxes; i++, pmu++) {
1173 			box = pmu->boxes[pkg];
1174 			if (box && atomic_dec_return(&box->refcnt) == 0)
1175 				uncore_box_exit(box);
1176 		}
1177 	}
1178 	return 0;
1179 }
1180 
1181 static int allocate_boxes(struct intel_uncore_type **types,
1182 			 unsigned int pkg, unsigned int cpu)
1183 {
1184 	struct intel_uncore_box *box, *tmp;
1185 	struct intel_uncore_type *type;
1186 	struct intel_uncore_pmu *pmu;
1187 	LIST_HEAD(allocated);
1188 	int i;
1189 
1190 	/* Try to allocate all required boxes */
1191 	for (; *types; types++) {
1192 		type = *types;
1193 		pmu = type->pmus;
1194 		for (i = 0; i < type->num_boxes; i++, pmu++) {
1195 			if (pmu->boxes[pkg])
1196 				continue;
1197 			box = uncore_alloc_box(type, cpu_to_node(cpu));
1198 			if (!box)
1199 				goto cleanup;
1200 			box->pmu = pmu;
1201 			box->pkgid = pkg;
1202 			list_add(&box->active_list, &allocated);
1203 		}
1204 	}
1205 	/* Install them in the pmus */
1206 	list_for_each_entry_safe(box, tmp, &allocated, active_list) {
1207 		list_del_init(&box->active_list);
1208 		box->pmu->boxes[pkg] = box;
1209 	}
1210 	return 0;
1211 
1212 cleanup:
1213 	list_for_each_entry_safe(box, tmp, &allocated, active_list) {
1214 		list_del_init(&box->active_list);
1215 		kfree(box);
1216 	}
1217 	return -ENOMEM;
1218 }
1219 
1220 static int uncore_event_cpu_online(unsigned int cpu)
1221 {
1222 	struct intel_uncore_type *type, **types = uncore_msr_uncores;
1223 	struct intel_uncore_pmu *pmu;
1224 	struct intel_uncore_box *box;
1225 	int i, ret, pkg, target;
1226 
1227 	pkg = topology_logical_package_id(cpu);
1228 	ret = allocate_boxes(types, pkg, cpu);
1229 	if (ret)
1230 		return ret;
1231 
1232 	for (; *types; types++) {
1233 		type = *types;
1234 		pmu = type->pmus;
1235 		for (i = 0; i < type->num_boxes; i++, pmu++) {
1236 			box = pmu->boxes[pkg];
1237 			if (box && atomic_inc_return(&box->refcnt) == 1)
1238 				uncore_box_init(box);
1239 		}
1240 	}
1241 
1242 	/*
1243 	 * Check if there is an online cpu in the package
1244 	 * which collects uncore events already.
1245 	 */
1246 	target = cpumask_any_and(&uncore_cpu_mask, topology_core_cpumask(cpu));
1247 	if (target < nr_cpu_ids)
1248 		return 0;
1249 
1250 	cpumask_set_cpu(cpu, &uncore_cpu_mask);
1251 
1252 	uncore_change_context(uncore_msr_uncores, -1, cpu);
1253 	uncore_change_context(uncore_pci_uncores, -1, cpu);
1254 	return 0;
1255 }
1256 
1257 static int __init type_pmu_register(struct intel_uncore_type *type)
1258 {
1259 	int i, ret;
1260 
1261 	for (i = 0; i < type->num_boxes; i++) {
1262 		ret = uncore_pmu_register(&type->pmus[i]);
1263 		if (ret)
1264 			return ret;
1265 	}
1266 	return 0;
1267 }
1268 
1269 static int __init uncore_msr_pmus_register(void)
1270 {
1271 	struct intel_uncore_type **types = uncore_msr_uncores;
1272 	int ret;
1273 
1274 	for (; *types; types++) {
1275 		ret = type_pmu_register(*types);
1276 		if (ret)
1277 			return ret;
1278 	}
1279 	return 0;
1280 }
1281 
1282 static int __init uncore_cpu_init(void)
1283 {
1284 	int ret;
1285 
1286 	ret = uncore_types_init(uncore_msr_uncores, true);
1287 	if (ret)
1288 		goto err;
1289 
1290 	ret = uncore_msr_pmus_register();
1291 	if (ret)
1292 		goto err;
1293 	return 0;
1294 err:
1295 	uncore_types_exit(uncore_msr_uncores);
1296 	uncore_msr_uncores = empty_uncore;
1297 	return ret;
1298 }
1299 
1300 #define X86_UNCORE_MODEL_MATCH(model, init)	\
1301 	{ X86_VENDOR_INTEL, 6, model, X86_FEATURE_ANY, (unsigned long)&init }
1302 
1303 struct intel_uncore_init_fun {
1304 	void	(*cpu_init)(void);
1305 	int	(*pci_init)(void);
1306 };
1307 
1308 static const struct intel_uncore_init_fun nhm_uncore_init __initconst = {
1309 	.cpu_init = nhm_uncore_cpu_init,
1310 };
1311 
1312 static const struct intel_uncore_init_fun snb_uncore_init __initconst = {
1313 	.cpu_init = snb_uncore_cpu_init,
1314 	.pci_init = snb_uncore_pci_init,
1315 };
1316 
1317 static const struct intel_uncore_init_fun ivb_uncore_init __initconst = {
1318 	.cpu_init = snb_uncore_cpu_init,
1319 	.pci_init = ivb_uncore_pci_init,
1320 };
1321 
1322 static const struct intel_uncore_init_fun hsw_uncore_init __initconst = {
1323 	.cpu_init = snb_uncore_cpu_init,
1324 	.pci_init = hsw_uncore_pci_init,
1325 };
1326 
1327 static const struct intel_uncore_init_fun bdw_uncore_init __initconst = {
1328 	.cpu_init = snb_uncore_cpu_init,
1329 	.pci_init = bdw_uncore_pci_init,
1330 };
1331 
1332 static const struct intel_uncore_init_fun snbep_uncore_init __initconst = {
1333 	.cpu_init = snbep_uncore_cpu_init,
1334 	.pci_init = snbep_uncore_pci_init,
1335 };
1336 
1337 static const struct intel_uncore_init_fun nhmex_uncore_init __initconst = {
1338 	.cpu_init = nhmex_uncore_cpu_init,
1339 };
1340 
1341 static const struct intel_uncore_init_fun ivbep_uncore_init __initconst = {
1342 	.cpu_init = ivbep_uncore_cpu_init,
1343 	.pci_init = ivbep_uncore_pci_init,
1344 };
1345 
1346 static const struct intel_uncore_init_fun hswep_uncore_init __initconst = {
1347 	.cpu_init = hswep_uncore_cpu_init,
1348 	.pci_init = hswep_uncore_pci_init,
1349 };
1350 
1351 static const struct intel_uncore_init_fun bdx_uncore_init __initconst = {
1352 	.cpu_init = bdx_uncore_cpu_init,
1353 	.pci_init = bdx_uncore_pci_init,
1354 };
1355 
1356 static const struct intel_uncore_init_fun knl_uncore_init __initconst = {
1357 	.cpu_init = knl_uncore_cpu_init,
1358 	.pci_init = knl_uncore_pci_init,
1359 };
1360 
1361 static const struct intel_uncore_init_fun skl_uncore_init __initconst = {
1362 	.cpu_init = skl_uncore_cpu_init,
1363 	.pci_init = skl_uncore_pci_init,
1364 };
1365 
1366 static const struct intel_uncore_init_fun skx_uncore_init __initconst = {
1367 	.cpu_init = skx_uncore_cpu_init,
1368 	.pci_init = skx_uncore_pci_init,
1369 };
1370 
1371 static const struct intel_uncore_init_fun icl_uncore_init __initconst = {
1372 	.cpu_init = icl_uncore_cpu_init,
1373 	.pci_init = skl_uncore_pci_init,
1374 };
1375 
1376 static const struct x86_cpu_id intel_uncore_match[] __initconst = {
1377 	X86_UNCORE_MODEL_MATCH(INTEL_FAM6_NEHALEM_EP,	  nhm_uncore_init),
1378 	X86_UNCORE_MODEL_MATCH(INTEL_FAM6_NEHALEM,	  nhm_uncore_init),
1379 	X86_UNCORE_MODEL_MATCH(INTEL_FAM6_WESTMERE,	  nhm_uncore_init),
1380 	X86_UNCORE_MODEL_MATCH(INTEL_FAM6_WESTMERE_EP,	  nhm_uncore_init),
1381 	X86_UNCORE_MODEL_MATCH(INTEL_FAM6_SANDYBRIDGE,	  snb_uncore_init),
1382 	X86_UNCORE_MODEL_MATCH(INTEL_FAM6_IVYBRIDGE,	  ivb_uncore_init),
1383 	X86_UNCORE_MODEL_MATCH(INTEL_FAM6_HASWELL_CORE,	  hsw_uncore_init),
1384 	X86_UNCORE_MODEL_MATCH(INTEL_FAM6_HASWELL_ULT,	  hsw_uncore_init),
1385 	X86_UNCORE_MODEL_MATCH(INTEL_FAM6_HASWELL_GT3E,	  hsw_uncore_init),
1386 	X86_UNCORE_MODEL_MATCH(INTEL_FAM6_BROADWELL_CORE, bdw_uncore_init),
1387 	X86_UNCORE_MODEL_MATCH(INTEL_FAM6_BROADWELL_GT3E, bdw_uncore_init),
1388 	X86_UNCORE_MODEL_MATCH(INTEL_FAM6_SANDYBRIDGE_X,  snbep_uncore_init),
1389 	X86_UNCORE_MODEL_MATCH(INTEL_FAM6_NEHALEM_EX,	  nhmex_uncore_init),
1390 	X86_UNCORE_MODEL_MATCH(INTEL_FAM6_WESTMERE_EX,	  nhmex_uncore_init),
1391 	X86_UNCORE_MODEL_MATCH(INTEL_FAM6_IVYBRIDGE_X,	  ivbep_uncore_init),
1392 	X86_UNCORE_MODEL_MATCH(INTEL_FAM6_HASWELL_X,	  hswep_uncore_init),
1393 	X86_UNCORE_MODEL_MATCH(INTEL_FAM6_BROADWELL_X,	  bdx_uncore_init),
1394 	X86_UNCORE_MODEL_MATCH(INTEL_FAM6_BROADWELL_XEON_D, bdx_uncore_init),
1395 	X86_UNCORE_MODEL_MATCH(INTEL_FAM6_XEON_PHI_KNL,	  knl_uncore_init),
1396 	X86_UNCORE_MODEL_MATCH(INTEL_FAM6_XEON_PHI_KNM,	  knl_uncore_init),
1397 	X86_UNCORE_MODEL_MATCH(INTEL_FAM6_SKYLAKE_DESKTOP,skl_uncore_init),
1398 	X86_UNCORE_MODEL_MATCH(INTEL_FAM6_SKYLAKE_MOBILE, skl_uncore_init),
1399 	X86_UNCORE_MODEL_MATCH(INTEL_FAM6_SKYLAKE_X,      skx_uncore_init),
1400 	X86_UNCORE_MODEL_MATCH(INTEL_FAM6_KABYLAKE_MOBILE, skl_uncore_init),
1401 	X86_UNCORE_MODEL_MATCH(INTEL_FAM6_KABYLAKE_DESKTOP, skl_uncore_init),
1402 	X86_UNCORE_MODEL_MATCH(INTEL_FAM6_ICELAKE_MOBILE, icl_uncore_init),
1403 	{},
1404 };
1405 
1406 MODULE_DEVICE_TABLE(x86cpu, intel_uncore_match);
1407 
1408 static int __init intel_uncore_init(void)
1409 {
1410 	const struct x86_cpu_id *id;
1411 	struct intel_uncore_init_fun *uncore_init;
1412 	int pret = 0, cret = 0, ret;
1413 
1414 	id = x86_match_cpu(intel_uncore_match);
1415 	if (!id)
1416 		return -ENODEV;
1417 
1418 	if (boot_cpu_has(X86_FEATURE_HYPERVISOR))
1419 		return -ENODEV;
1420 
1421 	max_packages = topology_max_packages();
1422 
1423 	uncore_init = (struct intel_uncore_init_fun *)id->driver_data;
1424 	if (uncore_init->pci_init) {
1425 		pret = uncore_init->pci_init();
1426 		if (!pret)
1427 			pret = uncore_pci_init();
1428 	}
1429 
1430 	if (uncore_init->cpu_init) {
1431 		uncore_init->cpu_init();
1432 		cret = uncore_cpu_init();
1433 	}
1434 
1435 	if (cret && pret)
1436 		return -ENODEV;
1437 
1438 	/* Install hotplug callbacks to setup the targets for each package */
1439 	ret = cpuhp_setup_state(CPUHP_AP_PERF_X86_UNCORE_ONLINE,
1440 				"perf/x86/intel/uncore:online",
1441 				uncore_event_cpu_online,
1442 				uncore_event_cpu_offline);
1443 	if (ret)
1444 		goto err;
1445 	return 0;
1446 
1447 err:
1448 	uncore_types_exit(uncore_msr_uncores);
1449 	uncore_pci_exit();
1450 	return ret;
1451 }
1452 module_init(intel_uncore_init);
1453 
1454 static void __exit intel_uncore_exit(void)
1455 {
1456 	cpuhp_remove_state(CPUHP_AP_PERF_X86_UNCORE_ONLINE);
1457 	uncore_types_exit(uncore_msr_uncores);
1458 	uncore_pci_exit();
1459 }
1460 module_exit(intel_uncore_exit);
1461