109c434b8SThomas Gleixner // SPDX-License-Identifier: GPL-2.0-only
2eb008eb6SPaul Gortmaker #include <linux/module.h>
3eb008eb6SPaul Gortmaker
4e633c65aSKan Liang #include <asm/cpu_device_id.h>
5a07301abSDave Hansen #include <asm/intel-family.h>
66bcb2db5SBorislav Petkov #include "uncore.h"
7edae1f06SKan Liang #include "uncore_discovery.h"
86bcb2db5SBorislav Petkov
9edae1f06SKan Liang static bool uncore_no_discover;
10edae1f06SKan Liang module_param(uncore_no_discover, bool, 0);
11edae1f06SKan Liang MODULE_PARM_DESC(uncore_no_discover, "Don't enable the Intel uncore PerfMon discovery mechanism "
12edae1f06SKan Liang "(default: enable the discovery mechanism).");
13d6c75413SKan Liang struct intel_uncore_type *empty_uncore[] = { NULL, };
146bcb2db5SBorislav Petkov struct intel_uncore_type **uncore_msr_uncores = empty_uncore;
156bcb2db5SBorislav Petkov struct intel_uncore_type **uncore_pci_uncores = empty_uncore;
163da04b8aSKan Liang struct intel_uncore_type **uncore_mmio_uncores = empty_uncore;
176bcb2db5SBorislav Petkov
186bcb2db5SBorislav Petkov static bool pcidrv_registered;
196bcb2db5SBorislav Petkov struct pci_driver *uncore_pci_driver;
2095a7fc77SKan Liang /* The PCI driver for the device which the uncore doesn't own. */
2195a7fc77SKan Liang struct pci_driver *uncore_pci_sub_driver;
226bcb2db5SBorislav Petkov /* pci bus to socket mapping */
236bcb2db5SBorislav Petkov DEFINE_RAW_SPINLOCK(pci2phy_map_lock);
246bcb2db5SBorislav Petkov struct list_head pci2phy_map_head = LIST_HEAD_INIT(pci2phy_map_head);
25cf6d445fSThomas Gleixner struct pci_extra_dev *uncore_extra_pci_dev;
2636b533bcSRoman Sudarikov int __uncore_max_dies;
276bcb2db5SBorislav Petkov
286bcb2db5SBorislav Petkov /* mask of cpus that collect uncore events */
296bcb2db5SBorislav Petkov static cpumask_t uncore_cpu_mask;
306bcb2db5SBorislav Petkov
316bcb2db5SBorislav Petkov /* constraint for the fixed counter */
326bcb2db5SBorislav Petkov static struct event_constraint uncore_constraint_fixed =
336bcb2db5SBorislav Petkov EVENT_CONSTRAINT(~0ULL, 1 << UNCORE_PMC_IDX_FIXED, ~0ULL);
346bcb2db5SBorislav Petkov struct event_constraint uncore_constraint_empty =
356bcb2db5SBorislav Petkov EVENT_CONSTRAINT(0, 0, 0);
366bcb2db5SBorislav Petkov
37e633c65aSKan Liang MODULE_LICENSE("GPL");
38e633c65aSKan Liang
uncore_pcibus_to_dieid(struct pci_bus * bus)39ba9506beSSteve Wahl int uncore_pcibus_to_dieid(struct pci_bus *bus)
406bcb2db5SBorislav Petkov {
416bcb2db5SBorislav Petkov struct pci2phy_map *map;
42ba9506beSSteve Wahl int die_id = -1;
436bcb2db5SBorislav Petkov
446bcb2db5SBorislav Petkov raw_spin_lock(&pci2phy_map_lock);
456bcb2db5SBorislav Petkov list_for_each_entry(map, &pci2phy_map_head, list) {
466bcb2db5SBorislav Petkov if (map->segment == pci_domain_nr(bus)) {
47ba9506beSSteve Wahl die_id = map->pbus_to_dieid[bus->number];
486bcb2db5SBorislav Petkov break;
496bcb2db5SBorislav Petkov }
506bcb2db5SBorislav Petkov }
516bcb2db5SBorislav Petkov raw_spin_unlock(&pci2phy_map_lock);
526bcb2db5SBorislav Petkov
53ba9506beSSteve Wahl return die_id;
546bcb2db5SBorislav Petkov }
556bcb2db5SBorislav Petkov
uncore_die_to_segment(int die)56cface032SAlexander Antonov int uncore_die_to_segment(int die)
57cface032SAlexander Antonov {
58cface032SAlexander Antonov struct pci_bus *bus = NULL;
59cface032SAlexander Antonov
60cface032SAlexander Antonov /* Find first pci bus which attributes to specified die. */
61cface032SAlexander Antonov while ((bus = pci_find_next_bus(bus)) &&
62cface032SAlexander Antonov (die != uncore_pcibus_to_dieid(bus)))
63cface032SAlexander Antonov ;
64cface032SAlexander Antonov
65cface032SAlexander Antonov return bus ? pci_domain_nr(bus) : -EINVAL;
66cface032SAlexander Antonov }
67cface032SAlexander Antonov
uncore_device_to_die(struct pci_dev * dev)68dbf061b2SKan Liang int uncore_device_to_die(struct pci_dev *dev)
69dbf061b2SKan Liang {
70dbf061b2SKan Liang int node = pcibus_to_node(dev->bus);
71dbf061b2SKan Liang int cpu;
72dbf061b2SKan Liang
73dbf061b2SKan Liang for_each_cpu(cpu, cpumask_of_pcibus(dev->bus)) {
74dbf061b2SKan Liang struct cpuinfo_x86 *c = &cpu_data(cpu);
75dbf061b2SKan Liang
76dbf061b2SKan Liang if (c->initialized && cpu_to_node(cpu) == node)
77dbf061b2SKan Liang return c->logical_die_id;
78dbf061b2SKan Liang }
79dbf061b2SKan Liang
80dbf061b2SKan Liang return -1;
81dbf061b2SKan Liang }
82dbf061b2SKan Liang
uncore_free_pcibus_map(void)834f089678SThomas Gleixner static void uncore_free_pcibus_map(void)
844f089678SThomas Gleixner {
854f089678SThomas Gleixner struct pci2phy_map *map, *tmp;
864f089678SThomas Gleixner
874f089678SThomas Gleixner list_for_each_entry_safe(map, tmp, &pci2phy_map_head, list) {
884f089678SThomas Gleixner list_del(&map->list);
894f089678SThomas Gleixner kfree(map);
904f089678SThomas Gleixner }
914f089678SThomas Gleixner }
924f089678SThomas Gleixner
__find_pci2phy_map(int segment)936bcb2db5SBorislav Petkov struct pci2phy_map *__find_pci2phy_map(int segment)
946bcb2db5SBorislav Petkov {
956bcb2db5SBorislav Petkov struct pci2phy_map *map, *alloc = NULL;
966bcb2db5SBorislav Petkov int i;
976bcb2db5SBorislav Petkov
986bcb2db5SBorislav Petkov lockdep_assert_held(&pci2phy_map_lock);
996bcb2db5SBorislav Petkov
1006bcb2db5SBorislav Petkov lookup:
1016bcb2db5SBorislav Petkov list_for_each_entry(map, &pci2phy_map_head, list) {
1026bcb2db5SBorislav Petkov if (map->segment == segment)
1036bcb2db5SBorislav Petkov goto end;
1046bcb2db5SBorislav Petkov }
1056bcb2db5SBorislav Petkov
1066bcb2db5SBorislav Petkov if (!alloc) {
1076bcb2db5SBorislav Petkov raw_spin_unlock(&pci2phy_map_lock);
1086bcb2db5SBorislav Petkov alloc = kmalloc(sizeof(struct pci2phy_map), GFP_KERNEL);
1096bcb2db5SBorislav Petkov raw_spin_lock(&pci2phy_map_lock);
1106bcb2db5SBorislav Petkov
1116bcb2db5SBorislav Petkov if (!alloc)
1126bcb2db5SBorislav Petkov return NULL;
1136bcb2db5SBorislav Petkov
1146bcb2db5SBorislav Petkov goto lookup;
1156bcb2db5SBorislav Petkov }
1166bcb2db5SBorislav Petkov
1176bcb2db5SBorislav Petkov map = alloc;
1186bcb2db5SBorislav Petkov alloc = NULL;
1196bcb2db5SBorislav Petkov map->segment = segment;
1206bcb2db5SBorislav Petkov for (i = 0; i < 256; i++)
121ba9506beSSteve Wahl map->pbus_to_dieid[i] = -1;
1226bcb2db5SBorislav Petkov list_add_tail(&map->list, &pci2phy_map_head);
1236bcb2db5SBorislav Petkov
1246bcb2db5SBorislav Petkov end:
1256bcb2db5SBorislav Petkov kfree(alloc);
1266bcb2db5SBorislav Petkov return map;
1276bcb2db5SBorislav Petkov }
1286bcb2db5SBorislav Petkov
uncore_event_show(struct device * dev,struct device_attribute * attr,char * buf)129ebd19fc3SSami Tolvanen ssize_t uncore_event_show(struct device *dev,
130ebd19fc3SSami Tolvanen struct device_attribute *attr, char *buf)
1316bcb2db5SBorislav Petkov {
1326bcb2db5SBorislav Petkov struct uncore_event_desc *event =
1336bcb2db5SBorislav Petkov container_of(attr, struct uncore_event_desc, attr);
1346bcb2db5SBorislav Petkov return sprintf(buf, "%s", event->config);
1356bcb2db5SBorislav Petkov }
1366bcb2db5SBorislav Petkov
uncore_pmu_to_box(struct intel_uncore_pmu * pmu,int cpu)1376bcb2db5SBorislav Petkov struct intel_uncore_box *uncore_pmu_to_box(struct intel_uncore_pmu *pmu, int cpu)
1386bcb2db5SBorislav Petkov {
139b0529b9cSKan Liang unsigned int dieid = topology_logical_die_id(cpu);
140fff4b87eSThomas Gleixner
141fff4b87eSThomas Gleixner /*
142fff4b87eSThomas Gleixner * The unsigned check also catches the '-1' return value for non
143fff4b87eSThomas Gleixner * existent mappings in the topology map.
144fff4b87eSThomas Gleixner */
14536b533bcSRoman Sudarikov return dieid < uncore_max_dies() ? pmu->boxes[dieid] : NULL;
1466bcb2db5SBorislav Petkov }
1476bcb2db5SBorislav Petkov
uncore_msr_read_counter(struct intel_uncore_box * box,struct perf_event * event)1486bcb2db5SBorislav Petkov u64 uncore_msr_read_counter(struct intel_uncore_box *box, struct perf_event *event)
1496bcb2db5SBorislav Petkov {
1506bcb2db5SBorislav Petkov u64 count;
1516bcb2db5SBorislav Petkov
1526bcb2db5SBorislav Petkov rdmsrl(event->hw.event_base, count);
1536bcb2db5SBorislav Petkov
1546bcb2db5SBorislav Petkov return count;
1556bcb2db5SBorislav Petkov }
1566bcb2db5SBorislav Petkov
uncore_mmio_exit_box(struct intel_uncore_box * box)15707ce734dSKan Liang void uncore_mmio_exit_box(struct intel_uncore_box *box)
15807ce734dSKan Liang {
15907ce734dSKan Liang if (box->io_addr)
16007ce734dSKan Liang iounmap(box->io_addr);
16107ce734dSKan Liang }
16207ce734dSKan Liang
uncore_mmio_read_counter(struct intel_uncore_box * box,struct perf_event * event)16307ce734dSKan Liang u64 uncore_mmio_read_counter(struct intel_uncore_box *box,
16407ce734dSKan Liang struct perf_event *event)
16507ce734dSKan Liang {
16607ce734dSKan Liang if (!box->io_addr)
16707ce734dSKan Liang return 0;
16807ce734dSKan Liang
169f0171973SKan Liang if (!uncore_mmio_is_valid_offset(box, event->hw.event_base))
170f0171973SKan Liang return 0;
171f0171973SKan Liang
17207ce734dSKan Liang return readq(box->io_addr + event->hw.event_base);
17307ce734dSKan Liang }
17407ce734dSKan Liang
1756bcb2db5SBorislav Petkov /*
1766bcb2db5SBorislav Petkov * generic get constraint function for shared match/mask registers.
1776bcb2db5SBorislav Petkov */
1786bcb2db5SBorislav Petkov struct event_constraint *
uncore_get_constraint(struct intel_uncore_box * box,struct perf_event * event)1796bcb2db5SBorislav Petkov uncore_get_constraint(struct intel_uncore_box *box, struct perf_event *event)
1806bcb2db5SBorislav Petkov {
1816bcb2db5SBorislav Petkov struct intel_uncore_extra_reg *er;
1826bcb2db5SBorislav Petkov struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
1836bcb2db5SBorislav Petkov struct hw_perf_event_extra *reg2 = &event->hw.branch_reg;
1846bcb2db5SBorislav Petkov unsigned long flags;
1856bcb2db5SBorislav Petkov bool ok = false;
1866bcb2db5SBorislav Petkov
1876bcb2db5SBorislav Petkov /*
1886bcb2db5SBorislav Petkov * reg->alloc can be set due to existing state, so for fake box we
1896bcb2db5SBorislav Petkov * need to ignore this, otherwise we might fail to allocate proper
1906bcb2db5SBorislav Petkov * fake state for this extra reg constraint.
1916bcb2db5SBorislav Petkov */
1926bcb2db5SBorislav Petkov if (reg1->idx == EXTRA_REG_NONE ||
1936bcb2db5SBorislav Petkov (!uncore_box_is_fake(box) && reg1->alloc))
1946bcb2db5SBorislav Petkov return NULL;
1956bcb2db5SBorislav Petkov
1966bcb2db5SBorislav Petkov er = &box->shared_regs[reg1->idx];
1976bcb2db5SBorislav Petkov raw_spin_lock_irqsave(&er->lock, flags);
1986bcb2db5SBorislav Petkov if (!atomic_read(&er->ref) ||
1996bcb2db5SBorislav Petkov (er->config1 == reg1->config && er->config2 == reg2->config)) {
2006bcb2db5SBorislav Petkov atomic_inc(&er->ref);
2016bcb2db5SBorislav Petkov er->config1 = reg1->config;
2026bcb2db5SBorislav Petkov er->config2 = reg2->config;
2036bcb2db5SBorislav Petkov ok = true;
2046bcb2db5SBorislav Petkov }
2056bcb2db5SBorislav Petkov raw_spin_unlock_irqrestore(&er->lock, flags);
2066bcb2db5SBorislav Petkov
2076bcb2db5SBorislav Petkov if (ok) {
2086bcb2db5SBorislav Petkov if (!uncore_box_is_fake(box))
2096bcb2db5SBorislav Petkov reg1->alloc = 1;
2106bcb2db5SBorislav Petkov return NULL;
2116bcb2db5SBorislav Petkov }
2126bcb2db5SBorislav Petkov
2136bcb2db5SBorislav Petkov return &uncore_constraint_empty;
2146bcb2db5SBorislav Petkov }
2156bcb2db5SBorislav Petkov
uncore_put_constraint(struct intel_uncore_box * box,struct perf_event * event)2166bcb2db5SBorislav Petkov void uncore_put_constraint(struct intel_uncore_box *box, struct perf_event *event)
2176bcb2db5SBorislav Petkov {
2186bcb2db5SBorislav Petkov struct intel_uncore_extra_reg *er;
2196bcb2db5SBorislav Petkov struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
2206bcb2db5SBorislav Petkov
2216bcb2db5SBorislav Petkov /*
2226bcb2db5SBorislav Petkov * Only put constraint if extra reg was actually allocated. Also
2236bcb2db5SBorislav Petkov * takes care of event which do not use an extra shared reg.
2246bcb2db5SBorislav Petkov *
2256bcb2db5SBorislav Petkov * Also, if this is a fake box we shouldn't touch any event state
2266bcb2db5SBorislav Petkov * (reg->alloc) and we don't care about leaving inconsistent box
2276bcb2db5SBorislav Petkov * state either since it will be thrown out.
2286bcb2db5SBorislav Petkov */
2296bcb2db5SBorislav Petkov if (uncore_box_is_fake(box) || !reg1->alloc)
2306bcb2db5SBorislav Petkov return;
2316bcb2db5SBorislav Petkov
2326bcb2db5SBorislav Petkov er = &box->shared_regs[reg1->idx];
2336bcb2db5SBorislav Petkov atomic_dec(&er->ref);
2346bcb2db5SBorislav Petkov reg1->alloc = 0;
2356bcb2db5SBorislav Petkov }
2366bcb2db5SBorislav Petkov
uncore_shared_reg_config(struct intel_uncore_box * box,int idx)2376bcb2db5SBorislav Petkov u64 uncore_shared_reg_config(struct intel_uncore_box *box, int idx)
2386bcb2db5SBorislav Petkov {
2396bcb2db5SBorislav Petkov struct intel_uncore_extra_reg *er;
2406bcb2db5SBorislav Petkov unsigned long flags;
2416bcb2db5SBorislav Petkov u64 config;
2426bcb2db5SBorislav Petkov
2436bcb2db5SBorislav Petkov er = &box->shared_regs[idx];
2446bcb2db5SBorislav Petkov
2456bcb2db5SBorislav Petkov raw_spin_lock_irqsave(&er->lock, flags);
2466bcb2db5SBorislav Petkov config = er->config;
2476bcb2db5SBorislav Petkov raw_spin_unlock_irqrestore(&er->lock, flags);
2486bcb2db5SBorislav Petkov
2496bcb2db5SBorislav Petkov return config;
2506bcb2db5SBorislav Petkov }
2516bcb2db5SBorislav Petkov
uncore_assign_hw_event(struct intel_uncore_box * box,struct perf_event * event,int idx)2521229735bSThomas Gleixner static void uncore_assign_hw_event(struct intel_uncore_box *box,
2531229735bSThomas Gleixner struct perf_event *event, int idx)
2546bcb2db5SBorislav Petkov {
2556bcb2db5SBorislav Petkov struct hw_perf_event *hwc = &event->hw;
2566bcb2db5SBorislav Petkov
2576bcb2db5SBorislav Petkov hwc->idx = idx;
2586bcb2db5SBorislav Petkov hwc->last_tag = ++box->tags[idx];
2596bcb2db5SBorislav Petkov
2600e0162dfSKan Liang if (uncore_pmc_fixed(hwc->idx)) {
2616bcb2db5SBorislav Petkov hwc->event_base = uncore_fixed_ctr(box);
2626bcb2db5SBorislav Petkov hwc->config_base = uncore_fixed_ctl(box);
2636bcb2db5SBorislav Petkov return;
2646bcb2db5SBorislav Petkov }
2656bcb2db5SBorislav Petkov
2666bcb2db5SBorislav Petkov hwc->config_base = uncore_event_ctl(box, hwc->idx);
2676bcb2db5SBorislav Petkov hwc->event_base = uncore_perf_ctr(box, hwc->idx);
2686bcb2db5SBorislav Petkov }
2696bcb2db5SBorislav Petkov
uncore_perf_event_update(struct intel_uncore_box * box,struct perf_event * event)2706bcb2db5SBorislav Petkov void uncore_perf_event_update(struct intel_uncore_box *box, struct perf_event *event)
2716bcb2db5SBorislav Petkov {
2726bcb2db5SBorislav Petkov u64 prev_count, new_count, delta;
2736bcb2db5SBorislav Petkov int shift;
2746bcb2db5SBorislav Petkov
2750e0162dfSKan Liang if (uncore_pmc_freerunning(event->hw.idx))
2760e0162dfSKan Liang shift = 64 - uncore_freerunning_bits(box, event);
2770e0162dfSKan Liang else if (uncore_pmc_fixed(event->hw.idx))
2786bcb2db5SBorislav Petkov shift = 64 - uncore_fixed_ctr_bits(box);
2796bcb2db5SBorislav Petkov else
2806bcb2db5SBorislav Petkov shift = 64 - uncore_perf_ctr_bits(box);
2816bcb2db5SBorislav Petkov
2826bcb2db5SBorislav Petkov /* the hrtimer might modify the previous event value */
2836bcb2db5SBorislav Petkov again:
2846bcb2db5SBorislav Petkov prev_count = local64_read(&event->hw.prev_count);
2856bcb2db5SBorislav Petkov new_count = uncore_read_counter(box, event);
2866bcb2db5SBorislav Petkov if (local64_xchg(&event->hw.prev_count, new_count) != prev_count)
2876bcb2db5SBorislav Petkov goto again;
2886bcb2db5SBorislav Petkov
2896bcb2db5SBorislav Petkov delta = (new_count << shift) - (prev_count << shift);
2906bcb2db5SBorislav Petkov delta >>= shift;
2916bcb2db5SBorislav Petkov
2926bcb2db5SBorislav Petkov local64_add(delta, &event->count);
2936bcb2db5SBorislav Petkov }
2946bcb2db5SBorislav Petkov
2956bcb2db5SBorislav Petkov /*
2966bcb2db5SBorislav Petkov * The overflow interrupt is unavailable for SandyBridge-EP, is broken
2976bcb2db5SBorislav Petkov * for SandyBridge. So we use hrtimer to periodically poll the counter
2986bcb2db5SBorislav Petkov * to avoid overflow.
2996bcb2db5SBorislav Petkov */
uncore_pmu_hrtimer(struct hrtimer * hrtimer)3006bcb2db5SBorislav Petkov static enum hrtimer_restart uncore_pmu_hrtimer(struct hrtimer *hrtimer)
3016bcb2db5SBorislav Petkov {
3026bcb2db5SBorislav Petkov struct intel_uncore_box *box;
3036bcb2db5SBorislav Petkov struct perf_event *event;
3046bcb2db5SBorislav Petkov unsigned long flags;
3056bcb2db5SBorislav Petkov int bit;
3066bcb2db5SBorislav Petkov
3076bcb2db5SBorislav Petkov box = container_of(hrtimer, struct intel_uncore_box, hrtimer);
3086bcb2db5SBorislav Petkov if (!box->n_active || box->cpu != smp_processor_id())
3096bcb2db5SBorislav Petkov return HRTIMER_NORESTART;
3106bcb2db5SBorislav Petkov /*
3116bcb2db5SBorislav Petkov * disable local interrupt to prevent uncore_pmu_event_start/stop
3126bcb2db5SBorislav Petkov * to interrupt the update process
3136bcb2db5SBorislav Petkov */
3146bcb2db5SBorislav Petkov local_irq_save(flags);
3156bcb2db5SBorislav Petkov
3166bcb2db5SBorislav Petkov /*
3176bcb2db5SBorislav Petkov * handle boxes with an active event list as opposed to active
3186bcb2db5SBorislav Petkov * counters
3196bcb2db5SBorislav Petkov */
3206bcb2db5SBorislav Petkov list_for_each_entry(event, &box->active_list, active_entry) {
3216bcb2db5SBorislav Petkov uncore_perf_event_update(box, event);
3226bcb2db5SBorislav Petkov }
3236bcb2db5SBorislav Petkov
3246bcb2db5SBorislav Petkov for_each_set_bit(bit, box->active_mask, UNCORE_PMC_IDX_MAX)
3256bcb2db5SBorislav Petkov uncore_perf_event_update(box, box->events[bit]);
3266bcb2db5SBorislav Petkov
3276bcb2db5SBorislav Petkov local_irq_restore(flags);
3286bcb2db5SBorislav Petkov
3296bcb2db5SBorislav Petkov hrtimer_forward_now(hrtimer, ns_to_ktime(box->hrtimer_duration));
3306bcb2db5SBorislav Petkov return HRTIMER_RESTART;
3316bcb2db5SBorislav Petkov }
3326bcb2db5SBorislav Petkov
uncore_pmu_start_hrtimer(struct intel_uncore_box * box)3336bcb2db5SBorislav Petkov void uncore_pmu_start_hrtimer(struct intel_uncore_box *box)
3346bcb2db5SBorislav Petkov {
3356bcb2db5SBorislav Petkov hrtimer_start(&box->hrtimer, ns_to_ktime(box->hrtimer_duration),
3366bcb2db5SBorislav Petkov HRTIMER_MODE_REL_PINNED);
3376bcb2db5SBorislav Petkov }
3386bcb2db5SBorislav Petkov
uncore_pmu_cancel_hrtimer(struct intel_uncore_box * box)3396bcb2db5SBorislav Petkov void uncore_pmu_cancel_hrtimer(struct intel_uncore_box *box)
3406bcb2db5SBorislav Petkov {
3416bcb2db5SBorislav Petkov hrtimer_cancel(&box->hrtimer);
3426bcb2db5SBorislav Petkov }
3436bcb2db5SBorislav Petkov
uncore_pmu_init_hrtimer(struct intel_uncore_box * box)3446bcb2db5SBorislav Petkov static void uncore_pmu_init_hrtimer(struct intel_uncore_box *box)
3456bcb2db5SBorislav Petkov {
3466bcb2db5SBorislav Petkov hrtimer_init(&box->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
3476bcb2db5SBorislav Petkov box->hrtimer.function = uncore_pmu_hrtimer;
3486bcb2db5SBorislav Petkov }
3496bcb2db5SBorislav Petkov
uncore_alloc_box(struct intel_uncore_type * type,int node)3501229735bSThomas Gleixner static struct intel_uncore_box *uncore_alloc_box(struct intel_uncore_type *type,
3511229735bSThomas Gleixner int node)
3526bcb2db5SBorislav Petkov {
3531229735bSThomas Gleixner int i, size, numshared = type->num_shared_regs ;
3546bcb2db5SBorislav Petkov struct intel_uncore_box *box;
3556bcb2db5SBorislav Petkov
3561229735bSThomas Gleixner size = sizeof(*box) + numshared * sizeof(struct intel_uncore_extra_reg);
3576bcb2db5SBorislav Petkov
3586bcb2db5SBorislav Petkov box = kzalloc_node(size, GFP_KERNEL, node);
3596bcb2db5SBorislav Petkov if (!box)
3606bcb2db5SBorislav Petkov return NULL;
3616bcb2db5SBorislav Petkov
3621229735bSThomas Gleixner for (i = 0; i < numshared; i++)
3636bcb2db5SBorislav Petkov raw_spin_lock_init(&box->shared_regs[i].lock);
3646bcb2db5SBorislav Petkov
3656bcb2db5SBorislav Petkov uncore_pmu_init_hrtimer(box);
3666bcb2db5SBorislav Petkov box->cpu = -1;
367b0529b9cSKan Liang box->dieid = -1;
3686bcb2db5SBorislav Petkov
3696bcb2db5SBorislav Petkov /* set default hrtimer timeout */
3706bcb2db5SBorislav Petkov box->hrtimer_duration = UNCORE_PMU_HRTIMER_INTERVAL;
3716bcb2db5SBorislav Petkov
3726bcb2db5SBorislav Petkov INIT_LIST_HEAD(&box->active_list);
3736bcb2db5SBorislav Petkov
3746bcb2db5SBorislav Petkov return box;
3756bcb2db5SBorislav Petkov }
3766bcb2db5SBorislav Petkov
3776bcb2db5SBorislav Petkov /*
3786bcb2db5SBorislav Petkov * Using uncore_pmu_event_init pmu event_init callback
3796bcb2db5SBorislav Petkov * as a detection point for uncore events.
3806bcb2db5SBorislav Petkov */
3816bcb2db5SBorislav Petkov static int uncore_pmu_event_init(struct perf_event *event);
3826bcb2db5SBorislav Petkov
is_box_event(struct intel_uncore_box * box,struct perf_event * event)383033ac60cSPeter Zijlstra static bool is_box_event(struct intel_uncore_box *box, struct perf_event *event)
3846bcb2db5SBorislav Petkov {
385033ac60cSPeter Zijlstra return &box->pmu->pmu == event->pmu;
3866bcb2db5SBorislav Petkov }
3876bcb2db5SBorislav Petkov
3886bcb2db5SBorislav Petkov static int
uncore_collect_events(struct intel_uncore_box * box,struct perf_event * leader,bool dogrp)3891229735bSThomas Gleixner uncore_collect_events(struct intel_uncore_box *box, struct perf_event *leader,
3901229735bSThomas Gleixner bool dogrp)
3916bcb2db5SBorislav Petkov {
3926bcb2db5SBorislav Petkov struct perf_event *event;
3936bcb2db5SBorislav Petkov int n, max_count;
3946bcb2db5SBorislav Petkov
3956bcb2db5SBorislav Petkov max_count = box->pmu->type->num_counters;
3966bcb2db5SBorislav Petkov if (box->pmu->type->fixed_ctl)
3976bcb2db5SBorislav Petkov max_count++;
3986bcb2db5SBorislav Petkov
3996bcb2db5SBorislav Petkov if (box->n_events >= max_count)
4006bcb2db5SBorislav Petkov return -EINVAL;
4016bcb2db5SBorislav Petkov
4026bcb2db5SBorislav Petkov n = box->n_events;
4036bcb2db5SBorislav Petkov
404033ac60cSPeter Zijlstra if (is_box_event(box, leader)) {
4056bcb2db5SBorislav Petkov box->event_list[n] = leader;
4066bcb2db5SBorislav Petkov n++;
4076bcb2db5SBorislav Petkov }
4086bcb2db5SBorislav Petkov
4096bcb2db5SBorislav Petkov if (!dogrp)
4106bcb2db5SBorislav Petkov return n;
4116bcb2db5SBorislav Petkov
412edb39592SPeter Zijlstra for_each_sibling_event(event, leader) {
413033ac60cSPeter Zijlstra if (!is_box_event(box, event) ||
4146bcb2db5SBorislav Petkov event->state <= PERF_EVENT_STATE_OFF)
4156bcb2db5SBorislav Petkov continue;
4166bcb2db5SBorislav Petkov
4176bcb2db5SBorislav Petkov if (n >= max_count)
4186bcb2db5SBorislav Petkov return -EINVAL;
4196bcb2db5SBorislav Petkov
4206bcb2db5SBorislav Petkov box->event_list[n] = event;
4216bcb2db5SBorislav Petkov n++;
4226bcb2db5SBorislav Petkov }
4236bcb2db5SBorislav Petkov return n;
4246bcb2db5SBorislav Petkov }
4256bcb2db5SBorislav Petkov
4266bcb2db5SBorislav Petkov static struct event_constraint *
uncore_get_event_constraint(struct intel_uncore_box * box,struct perf_event * event)4276bcb2db5SBorislav Petkov uncore_get_event_constraint(struct intel_uncore_box *box, struct perf_event *event)
4286bcb2db5SBorislav Petkov {
4296bcb2db5SBorislav Petkov struct intel_uncore_type *type = box->pmu->type;
4306bcb2db5SBorislav Petkov struct event_constraint *c;
4316bcb2db5SBorislav Petkov
4326bcb2db5SBorislav Petkov if (type->ops->get_constraint) {
4336bcb2db5SBorislav Petkov c = type->ops->get_constraint(box, event);
4346bcb2db5SBorislav Petkov if (c)
4356bcb2db5SBorislav Petkov return c;
4366bcb2db5SBorislav Petkov }
4376bcb2db5SBorislav Petkov
4386bcb2db5SBorislav Petkov if (event->attr.config == UNCORE_FIXED_EVENT)
4396bcb2db5SBorislav Petkov return &uncore_constraint_fixed;
4406bcb2db5SBorislav Petkov
4416bcb2db5SBorislav Petkov if (type->constraints) {
4426bcb2db5SBorislav Petkov for_each_event_constraint(c, type->constraints) {
4436bcb2db5SBorislav Petkov if ((event->hw.config & c->cmask) == c->code)
4446bcb2db5SBorislav Petkov return c;
4456bcb2db5SBorislav Petkov }
4466bcb2db5SBorislav Petkov }
4476bcb2db5SBorislav Petkov
4486bcb2db5SBorislav Petkov return &type->unconstrainted;
4496bcb2db5SBorislav Petkov }
4506bcb2db5SBorislav Petkov
uncore_put_event_constraint(struct intel_uncore_box * box,struct perf_event * event)4511229735bSThomas Gleixner static void uncore_put_event_constraint(struct intel_uncore_box *box,
4521229735bSThomas Gleixner struct perf_event *event)
4536bcb2db5SBorislav Petkov {
4546bcb2db5SBorislav Petkov if (box->pmu->type->ops->put_constraint)
4556bcb2db5SBorislav Petkov box->pmu->type->ops->put_constraint(box, event);
4566bcb2db5SBorislav Petkov }
4576bcb2db5SBorislav Petkov
uncore_assign_events(struct intel_uncore_box * box,int assign[],int n)4586bcb2db5SBorislav Petkov static int uncore_assign_events(struct intel_uncore_box *box, int assign[], int n)
4596bcb2db5SBorislav Petkov {
4606bcb2db5SBorislav Petkov unsigned long used_mask[BITS_TO_LONGS(UNCORE_PMC_IDX_MAX)];
4616bcb2db5SBorislav Petkov struct event_constraint *c;
4626bcb2db5SBorislav Petkov int i, wmin, wmax, ret = 0;
4636bcb2db5SBorislav Petkov struct hw_perf_event *hwc;
4646bcb2db5SBorislav Petkov
4656bcb2db5SBorislav Petkov bitmap_zero(used_mask, UNCORE_PMC_IDX_MAX);
4666bcb2db5SBorislav Petkov
4676bcb2db5SBorislav Petkov for (i = 0, wmin = UNCORE_PMC_IDX_MAX, wmax = 0; i < n; i++) {
4686bcb2db5SBorislav Petkov c = uncore_get_event_constraint(box, box->event_list[i]);
4696bcb2db5SBorislav Petkov box->event_constraint[i] = c;
4706bcb2db5SBorislav Petkov wmin = min(wmin, c->weight);
4716bcb2db5SBorislav Petkov wmax = max(wmax, c->weight);
4726bcb2db5SBorislav Petkov }
4736bcb2db5SBorislav Petkov
4746bcb2db5SBorislav Petkov /* fastpath, try to reuse previous register */
4756bcb2db5SBorislav Petkov for (i = 0; i < n; i++) {
4766bcb2db5SBorislav Petkov hwc = &box->event_list[i]->hw;
4776bcb2db5SBorislav Petkov c = box->event_constraint[i];
4786bcb2db5SBorislav Petkov
4796bcb2db5SBorislav Petkov /* never assigned */
4806bcb2db5SBorislav Petkov if (hwc->idx == -1)
4816bcb2db5SBorislav Petkov break;
4826bcb2db5SBorislav Petkov
4836bcb2db5SBorislav Petkov /* constraint still honored */
4846bcb2db5SBorislav Petkov if (!test_bit(hwc->idx, c->idxmsk))
4856bcb2db5SBorislav Petkov break;
4866bcb2db5SBorislav Petkov
4876bcb2db5SBorislav Petkov /* not already used */
4886bcb2db5SBorislav Petkov if (test_bit(hwc->idx, used_mask))
4896bcb2db5SBorislav Petkov break;
4906bcb2db5SBorislav Petkov
4916bcb2db5SBorislav Petkov __set_bit(hwc->idx, used_mask);
4926bcb2db5SBorislav Petkov if (assign)
4936bcb2db5SBorislav Petkov assign[i] = hwc->idx;
4946bcb2db5SBorislav Petkov }
4956bcb2db5SBorislav Petkov /* slow path */
4966bcb2db5SBorislav Petkov if (i != n)
4976bcb2db5SBorislav Petkov ret = perf_assign_events(box->event_constraint, n,
4986bcb2db5SBorislav Petkov wmin, wmax, n, assign);
4996bcb2db5SBorislav Petkov
5006bcb2db5SBorislav Petkov if (!assign || ret) {
5016bcb2db5SBorislav Petkov for (i = 0; i < n; i++)
5026bcb2db5SBorislav Petkov uncore_put_event_constraint(box, box->event_list[i]);
5036bcb2db5SBorislav Petkov }
5046bcb2db5SBorislav Petkov return ret ? -EINVAL : 0;
5056bcb2db5SBorislav Petkov }
5066bcb2db5SBorislav Petkov
uncore_pmu_event_start(struct perf_event * event,int flags)5075a6c9d94SKan Liang void uncore_pmu_event_start(struct perf_event *event, int flags)
5086bcb2db5SBorislav Petkov {
5096bcb2db5SBorislav Petkov struct intel_uncore_box *box = uncore_event_to_box(event);
5106bcb2db5SBorislav Petkov int idx = event->hw.idx;
5116bcb2db5SBorislav Petkov
5120e0162dfSKan Liang if (WARN_ON_ONCE(idx == -1 || idx >= UNCORE_PMC_IDX_MAX))
5136bcb2db5SBorislav Petkov return;
5146bcb2db5SBorislav Petkov
5150e0162dfSKan Liang /*
5160e0162dfSKan Liang * Free running counter is read-only and always active.
5170e0162dfSKan Liang * Use the current counter value as start point.
5180e0162dfSKan Liang * There is no overflow interrupt for free running counter.
5190e0162dfSKan Liang * Use hrtimer to periodically poll the counter to avoid overflow.
5200e0162dfSKan Liang */
5210e0162dfSKan Liang if (uncore_pmc_freerunning(event->hw.idx)) {
5220e0162dfSKan Liang list_add_tail(&event->active_entry, &box->active_list);
5230e0162dfSKan Liang local64_set(&event->hw.prev_count,
5240e0162dfSKan Liang uncore_read_counter(box, event));
5250e0162dfSKan Liang if (box->n_active++ == 0)
5260e0162dfSKan Liang uncore_pmu_start_hrtimer(box);
5270e0162dfSKan Liang return;
5280e0162dfSKan Liang }
5290e0162dfSKan Liang
5300e0162dfSKan Liang if (WARN_ON_ONCE(!(event->hw.state & PERF_HES_STOPPED)))
5316bcb2db5SBorislav Petkov return;
5326bcb2db5SBorislav Petkov
5336bcb2db5SBorislav Petkov event->hw.state = 0;
5346bcb2db5SBorislav Petkov box->events[idx] = event;
5356bcb2db5SBorislav Petkov box->n_active++;
5366bcb2db5SBorislav Petkov __set_bit(idx, box->active_mask);
5376bcb2db5SBorislav Petkov
5386bcb2db5SBorislav Petkov local64_set(&event->hw.prev_count, uncore_read_counter(box, event));
5396bcb2db5SBorislav Petkov uncore_enable_event(box, event);
5406bcb2db5SBorislav Petkov
54175be6f70SKan Liang if (box->n_active == 1)
5426bcb2db5SBorislav Petkov uncore_pmu_start_hrtimer(box);
5436bcb2db5SBorislav Petkov }
5446bcb2db5SBorislav Petkov
uncore_pmu_event_stop(struct perf_event * event,int flags)5455a6c9d94SKan Liang void uncore_pmu_event_stop(struct perf_event *event, int flags)
5466bcb2db5SBorislav Petkov {
5476bcb2db5SBorislav Petkov struct intel_uncore_box *box = uncore_event_to_box(event);
5486bcb2db5SBorislav Petkov struct hw_perf_event *hwc = &event->hw;
5496bcb2db5SBorislav Petkov
5500e0162dfSKan Liang /* Cannot disable free running counter which is read-only */
5510e0162dfSKan Liang if (uncore_pmc_freerunning(hwc->idx)) {
5520e0162dfSKan Liang list_del(&event->active_entry);
5530e0162dfSKan Liang if (--box->n_active == 0)
5540e0162dfSKan Liang uncore_pmu_cancel_hrtimer(box);
5550e0162dfSKan Liang uncore_perf_event_update(box, event);
5560e0162dfSKan Liang return;
5570e0162dfSKan Liang }
5580e0162dfSKan Liang
5596bcb2db5SBorislav Petkov if (__test_and_clear_bit(hwc->idx, box->active_mask)) {
5606bcb2db5SBorislav Petkov uncore_disable_event(box, event);
5616bcb2db5SBorislav Petkov box->n_active--;
5626bcb2db5SBorislav Petkov box->events[hwc->idx] = NULL;
5636bcb2db5SBorislav Petkov WARN_ON_ONCE(hwc->state & PERF_HES_STOPPED);
5646bcb2db5SBorislav Petkov hwc->state |= PERF_HES_STOPPED;
5656bcb2db5SBorislav Petkov
56675be6f70SKan Liang if (box->n_active == 0)
5676bcb2db5SBorislav Petkov uncore_pmu_cancel_hrtimer(box);
5686bcb2db5SBorislav Petkov }
5696bcb2db5SBorislav Petkov
5706bcb2db5SBorislav Petkov if ((flags & PERF_EF_UPDATE) && !(hwc->state & PERF_HES_UPTODATE)) {
5716bcb2db5SBorislav Petkov /*
5726bcb2db5SBorislav Petkov * Drain the remaining delta count out of a event
5736bcb2db5SBorislav Petkov * that we are disabling:
5746bcb2db5SBorislav Petkov */
5756bcb2db5SBorislav Petkov uncore_perf_event_update(box, event);
5766bcb2db5SBorislav Petkov hwc->state |= PERF_HES_UPTODATE;
5776bcb2db5SBorislav Petkov }
5786bcb2db5SBorislav Petkov }
5796bcb2db5SBorislav Petkov
uncore_pmu_event_add(struct perf_event * event,int flags)5805a6c9d94SKan Liang int uncore_pmu_event_add(struct perf_event *event, int flags)
5816bcb2db5SBorislav Petkov {
5826bcb2db5SBorislav Petkov struct intel_uncore_box *box = uncore_event_to_box(event);
5836bcb2db5SBorislav Petkov struct hw_perf_event *hwc = &event->hw;
5846bcb2db5SBorislav Petkov int assign[UNCORE_PMC_IDX_MAX];
5856bcb2db5SBorislav Petkov int i, n, ret;
5866bcb2db5SBorislav Petkov
5876bcb2db5SBorislav Petkov if (!box)
5886bcb2db5SBorislav Petkov return -ENODEV;
5896bcb2db5SBorislav Petkov
5900e0162dfSKan Liang /*
5910e0162dfSKan Liang * The free funning counter is assigned in event_init().
5920e0162dfSKan Liang * The free running counter event and free running counter
5930e0162dfSKan Liang * are 1:1 mapped. It doesn't need to be tracked in event_list.
5940e0162dfSKan Liang */
5950e0162dfSKan Liang if (uncore_pmc_freerunning(hwc->idx)) {
5960e0162dfSKan Liang if (flags & PERF_EF_START)
5970e0162dfSKan Liang uncore_pmu_event_start(event, 0);
5980e0162dfSKan Liang return 0;
5990e0162dfSKan Liang }
6000e0162dfSKan Liang
6016bcb2db5SBorislav Petkov ret = n = uncore_collect_events(box, event, false);
6026bcb2db5SBorislav Petkov if (ret < 0)
6036bcb2db5SBorislav Petkov return ret;
6046bcb2db5SBorislav Petkov
6056bcb2db5SBorislav Petkov hwc->state = PERF_HES_UPTODATE | PERF_HES_STOPPED;
6066bcb2db5SBorislav Petkov if (!(flags & PERF_EF_START))
6076bcb2db5SBorislav Petkov hwc->state |= PERF_HES_ARCH;
6086bcb2db5SBorislav Petkov
6096bcb2db5SBorislav Petkov ret = uncore_assign_events(box, assign, n);
6106bcb2db5SBorislav Petkov if (ret)
6116bcb2db5SBorislav Petkov return ret;
6126bcb2db5SBorislav Petkov
6136bcb2db5SBorislav Petkov /* save events moving to new counters */
6146bcb2db5SBorislav Petkov for (i = 0; i < box->n_events; i++) {
6156bcb2db5SBorislav Petkov event = box->event_list[i];
6166bcb2db5SBorislav Petkov hwc = &event->hw;
6176bcb2db5SBorislav Petkov
6186bcb2db5SBorislav Petkov if (hwc->idx == assign[i] &&
6196bcb2db5SBorislav Petkov hwc->last_tag == box->tags[assign[i]])
6206bcb2db5SBorislav Petkov continue;
6216bcb2db5SBorislav Petkov /*
6226bcb2db5SBorislav Petkov * Ensure we don't accidentally enable a stopped
6236bcb2db5SBorislav Petkov * counter simply because we rescheduled.
6246bcb2db5SBorislav Petkov */
6256bcb2db5SBorislav Petkov if (hwc->state & PERF_HES_STOPPED)
6266bcb2db5SBorislav Petkov hwc->state |= PERF_HES_ARCH;
6276bcb2db5SBorislav Petkov
6286bcb2db5SBorislav Petkov uncore_pmu_event_stop(event, PERF_EF_UPDATE);
6296bcb2db5SBorislav Petkov }
6306bcb2db5SBorislav Petkov
6316bcb2db5SBorislav Petkov /* reprogram moved events into new counters */
6326bcb2db5SBorislav Petkov for (i = 0; i < n; i++) {
6336bcb2db5SBorislav Petkov event = box->event_list[i];
6346bcb2db5SBorislav Petkov hwc = &event->hw;
6356bcb2db5SBorislav Petkov
6366bcb2db5SBorislav Petkov if (hwc->idx != assign[i] ||
6376bcb2db5SBorislav Petkov hwc->last_tag != box->tags[assign[i]])
6386bcb2db5SBorislav Petkov uncore_assign_hw_event(box, event, assign[i]);
6396bcb2db5SBorislav Petkov else if (i < box->n_events)
6406bcb2db5SBorislav Petkov continue;
6416bcb2db5SBorislav Petkov
6426bcb2db5SBorislav Petkov if (hwc->state & PERF_HES_ARCH)
6436bcb2db5SBorislav Petkov continue;
6446bcb2db5SBorislav Petkov
6456bcb2db5SBorislav Petkov uncore_pmu_event_start(event, 0);
6466bcb2db5SBorislav Petkov }
6476bcb2db5SBorislav Petkov box->n_events = n;
6486bcb2db5SBorislav Petkov
6496bcb2db5SBorislav Petkov return 0;
6506bcb2db5SBorislav Petkov }
6516bcb2db5SBorislav Petkov
uncore_pmu_event_del(struct perf_event * event,int flags)6525a6c9d94SKan Liang void uncore_pmu_event_del(struct perf_event *event, int flags)
6536bcb2db5SBorislav Petkov {
6546bcb2db5SBorislav Petkov struct intel_uncore_box *box = uncore_event_to_box(event);
6556bcb2db5SBorislav Petkov int i;
6566bcb2db5SBorislav Petkov
6576bcb2db5SBorislav Petkov uncore_pmu_event_stop(event, PERF_EF_UPDATE);
6586bcb2db5SBorislav Petkov
6590e0162dfSKan Liang /*
6600e0162dfSKan Liang * The event for free running counter is not tracked by event_list.
6610e0162dfSKan Liang * It doesn't need to force event->hw.idx = -1 to reassign the counter.
6620e0162dfSKan Liang * Because the event and the free running counter are 1:1 mapped.
6630e0162dfSKan Liang */
6640e0162dfSKan Liang if (uncore_pmc_freerunning(event->hw.idx))
6650e0162dfSKan Liang return;
6660e0162dfSKan Liang
6676bcb2db5SBorislav Petkov for (i = 0; i < box->n_events; i++) {
6686bcb2db5SBorislav Petkov if (event == box->event_list[i]) {
6696bcb2db5SBorislav Petkov uncore_put_event_constraint(box, event);
6706bcb2db5SBorislav Petkov
6711229735bSThomas Gleixner for (++i; i < box->n_events; i++)
6726bcb2db5SBorislav Petkov box->event_list[i - 1] = box->event_list[i];
6736bcb2db5SBorislav Petkov
6746bcb2db5SBorislav Petkov --box->n_events;
6756bcb2db5SBorislav Petkov break;
6766bcb2db5SBorislav Petkov }
6776bcb2db5SBorislav Petkov }
6786bcb2db5SBorislav Petkov
6796bcb2db5SBorislav Petkov event->hw.idx = -1;
6806bcb2db5SBorislav Petkov event->hw.last_tag = ~0ULL;
6816bcb2db5SBorislav Petkov }
6826bcb2db5SBorislav Petkov
uncore_pmu_event_read(struct perf_event * event)6836bcb2db5SBorislav Petkov void uncore_pmu_event_read(struct perf_event *event)
6846bcb2db5SBorislav Petkov {
6856bcb2db5SBorislav Petkov struct intel_uncore_box *box = uncore_event_to_box(event);
6866bcb2db5SBorislav Petkov uncore_perf_event_update(box, event);
6876bcb2db5SBorislav Petkov }
6886bcb2db5SBorislav Petkov
6896bcb2db5SBorislav Petkov /*
6906bcb2db5SBorislav Petkov * validation ensures the group can be loaded onto the
6916bcb2db5SBorislav Petkov * PMU if it was the only group available.
6926bcb2db5SBorislav Petkov */
uncore_validate_group(struct intel_uncore_pmu * pmu,struct perf_event * event)6936bcb2db5SBorislav Petkov static int uncore_validate_group(struct intel_uncore_pmu *pmu,
6946bcb2db5SBorislav Petkov struct perf_event *event)
6956bcb2db5SBorislav Petkov {
6966bcb2db5SBorislav Petkov struct perf_event *leader = event->group_leader;
6976bcb2db5SBorislav Petkov struct intel_uncore_box *fake_box;
6986bcb2db5SBorislav Petkov int ret = -EINVAL, n;
6996bcb2db5SBorislav Petkov
7000e0162dfSKan Liang /* The free running counter is always active. */
7010e0162dfSKan Liang if (uncore_pmc_freerunning(event->hw.idx))
7020e0162dfSKan Liang return 0;
7030e0162dfSKan Liang
7046bcb2db5SBorislav Petkov fake_box = uncore_alloc_box(pmu->type, NUMA_NO_NODE);
7056bcb2db5SBorislav Petkov if (!fake_box)
7066bcb2db5SBorislav Petkov return -ENOMEM;
7076bcb2db5SBorislav Petkov
7086bcb2db5SBorislav Petkov fake_box->pmu = pmu;
7096bcb2db5SBorislav Petkov /*
7106bcb2db5SBorislav Petkov * the event is not yet connected with its
7116bcb2db5SBorislav Petkov * siblings therefore we must first collect
7126bcb2db5SBorislav Petkov * existing siblings, then add the new event
7136bcb2db5SBorislav Petkov * before we can simulate the scheduling
7146bcb2db5SBorislav Petkov */
7156bcb2db5SBorislav Petkov n = uncore_collect_events(fake_box, leader, true);
7166bcb2db5SBorislav Petkov if (n < 0)
7176bcb2db5SBorislav Petkov goto out;
7186bcb2db5SBorislav Petkov
7196bcb2db5SBorislav Petkov fake_box->n_events = n;
7206bcb2db5SBorislav Petkov n = uncore_collect_events(fake_box, event, false);
7216bcb2db5SBorislav Petkov if (n < 0)
7226bcb2db5SBorislav Petkov goto out;
7236bcb2db5SBorislav Petkov
7246bcb2db5SBorislav Petkov fake_box->n_events = n;
7256bcb2db5SBorislav Petkov
7266bcb2db5SBorislav Petkov ret = uncore_assign_events(fake_box, NULL, n);
7276bcb2db5SBorislav Petkov out:
7286bcb2db5SBorislav Petkov kfree(fake_box);
7296bcb2db5SBorislav Petkov return ret;
7306bcb2db5SBorislav Petkov }
7316bcb2db5SBorislav Petkov
uncore_pmu_event_init(struct perf_event * event)7326bcb2db5SBorislav Petkov static int uncore_pmu_event_init(struct perf_event *event)
7336bcb2db5SBorislav Petkov {
7346bcb2db5SBorislav Petkov struct intel_uncore_pmu *pmu;
7356bcb2db5SBorislav Petkov struct intel_uncore_box *box;
7366bcb2db5SBorislav Petkov struct hw_perf_event *hwc = &event->hw;
7376bcb2db5SBorislav Petkov int ret;
7386bcb2db5SBorislav Petkov
7396bcb2db5SBorislav Petkov if (event->attr.type != event->pmu->type)
7406bcb2db5SBorislav Petkov return -ENOENT;
7416bcb2db5SBorislav Petkov
7426bcb2db5SBorislav Petkov pmu = uncore_event_to_pmu(event);
7436bcb2db5SBorislav Petkov /* no device found for this pmu */
7446bcb2db5SBorislav Petkov if (pmu->func_id < 0)
7456bcb2db5SBorislav Petkov return -ENOENT;
7466bcb2db5SBorislav Petkov
7476bcb2db5SBorislav Petkov /* Sampling not supported yet */
7486bcb2db5SBorislav Petkov if (hwc->sample_period)
7496bcb2db5SBorislav Petkov return -EINVAL;
7506bcb2db5SBorislav Petkov
7516bcb2db5SBorislav Petkov /*
7526bcb2db5SBorislav Petkov * Place all uncore events for a particular physical package
7536bcb2db5SBorislav Petkov * onto a single cpu
7546bcb2db5SBorislav Petkov */
7556bcb2db5SBorislav Petkov if (event->cpu < 0)
7566bcb2db5SBorislav Petkov return -EINVAL;
7576bcb2db5SBorislav Petkov box = uncore_pmu_to_box(pmu, event->cpu);
7586bcb2db5SBorislav Petkov if (!box || box->cpu < 0)
7596bcb2db5SBorislav Petkov return -EINVAL;
7606bcb2db5SBorislav Petkov event->cpu = box->cpu;
7611f2569faSThomas Gleixner event->pmu_private = box;
7626bcb2db5SBorislav Petkov
763e64cd6f7SDavid Carrillo-Cisneros event->event_caps |= PERF_EV_CAP_READ_ACTIVE_PKG;
764e64cd6f7SDavid Carrillo-Cisneros
7656bcb2db5SBorislav Petkov event->hw.idx = -1;
7666bcb2db5SBorislav Petkov event->hw.last_tag = ~0ULL;
7676bcb2db5SBorislav Petkov event->hw.extra_reg.idx = EXTRA_REG_NONE;
7686bcb2db5SBorislav Petkov event->hw.branch_reg.idx = EXTRA_REG_NONE;
7696bcb2db5SBorislav Petkov
7706bcb2db5SBorislav Petkov if (event->attr.config == UNCORE_FIXED_EVENT) {
7716bcb2db5SBorislav Petkov /* no fixed counter */
7726bcb2db5SBorislav Petkov if (!pmu->type->fixed_ctl)
7736bcb2db5SBorislav Petkov return -EINVAL;
7746bcb2db5SBorislav Petkov /*
7756bcb2db5SBorislav Petkov * if there is only one fixed counter, only the first pmu
7766bcb2db5SBorislav Petkov * can access the fixed counter
7776bcb2db5SBorislav Petkov */
7786bcb2db5SBorislav Petkov if (pmu->type->single_fixed && pmu->pmu_idx > 0)
7796bcb2db5SBorislav Petkov return -EINVAL;
7806bcb2db5SBorislav Petkov
7816bcb2db5SBorislav Petkov /* fixed counters have event field hardcoded to zero */
7826bcb2db5SBorislav Petkov hwc->config = 0ULL;
7830e0162dfSKan Liang } else if (is_freerunning_event(event)) {
7848041ffd3SKan Liang hwc->config = event->attr.config;
7850e0162dfSKan Liang if (!check_valid_freerunning_event(box, event))
7860e0162dfSKan Liang return -EINVAL;
7870e0162dfSKan Liang event->hw.idx = UNCORE_PMC_IDX_FREERUNNING;
7880e0162dfSKan Liang /*
7890e0162dfSKan Liang * The free running counter event and free running counter
7900e0162dfSKan Liang * are always 1:1 mapped.
7910e0162dfSKan Liang * The free running counter is always active.
7920e0162dfSKan Liang * Assign the free running counter here.
7930e0162dfSKan Liang */
7940e0162dfSKan Liang event->hw.event_base = uncore_freerunning_counter(box, event);
7956bcb2db5SBorislav Petkov } else {
796cd34cd97SKan Liang hwc->config = event->attr.config &
797cd34cd97SKan Liang (pmu->type->event_mask | ((u64)pmu->type->event_mask_ext << 32));
7986bcb2db5SBorislav Petkov if (pmu->type->ops->hw_config) {
7996bcb2db5SBorislav Petkov ret = pmu->type->ops->hw_config(box, event);
8006bcb2db5SBorislav Petkov if (ret)
8016bcb2db5SBorislav Petkov return ret;
8026bcb2db5SBorislav Petkov }
8036bcb2db5SBorislav Petkov }
8046bcb2db5SBorislav Petkov
8056bcb2db5SBorislav Petkov if (event->group_leader != event)
8066bcb2db5SBorislav Petkov ret = uncore_validate_group(pmu, event);
8076bcb2db5SBorislav Petkov else
8086bcb2db5SBorislav Petkov ret = 0;
8096bcb2db5SBorislav Petkov
8106bcb2db5SBorislav Petkov return ret;
8116bcb2db5SBorislav Petkov }
8126bcb2db5SBorislav Petkov
uncore_pmu_enable(struct pmu * pmu)81375be6f70SKan Liang static void uncore_pmu_enable(struct pmu *pmu)
81475be6f70SKan Liang {
81575be6f70SKan Liang struct intel_uncore_pmu *uncore_pmu;
81675be6f70SKan Liang struct intel_uncore_box *box;
81775be6f70SKan Liang
81875be6f70SKan Liang uncore_pmu = container_of(pmu, struct intel_uncore_pmu, pmu);
81975be6f70SKan Liang
82075be6f70SKan Liang box = uncore_pmu_to_box(uncore_pmu, smp_processor_id());
82175be6f70SKan Liang if (!box)
82275be6f70SKan Liang return;
82375be6f70SKan Liang
82475be6f70SKan Liang if (uncore_pmu->type->ops->enable_box)
82575be6f70SKan Liang uncore_pmu->type->ops->enable_box(box);
82675be6f70SKan Liang }
82775be6f70SKan Liang
uncore_pmu_disable(struct pmu * pmu)82875be6f70SKan Liang static void uncore_pmu_disable(struct pmu *pmu)
82975be6f70SKan Liang {
83075be6f70SKan Liang struct intel_uncore_pmu *uncore_pmu;
83175be6f70SKan Liang struct intel_uncore_box *box;
83275be6f70SKan Liang
83375be6f70SKan Liang uncore_pmu = container_of(pmu, struct intel_uncore_pmu, pmu);
83475be6f70SKan Liang
83575be6f70SKan Liang box = uncore_pmu_to_box(uncore_pmu, smp_processor_id());
83675be6f70SKan Liang if (!box)
83775be6f70SKan Liang return;
83875be6f70SKan Liang
83975be6f70SKan Liang if (uncore_pmu->type->ops->disable_box)
84075be6f70SKan Liang uncore_pmu->type->ops->disable_box(box);
84175be6f70SKan Liang }
84275be6f70SKan Liang
uncore_get_attr_cpumask(struct device * dev,struct device_attribute * attr,char * buf)8436bcb2db5SBorislav Petkov static ssize_t uncore_get_attr_cpumask(struct device *dev,
8446bcb2db5SBorislav Petkov struct device_attribute *attr, char *buf)
8456bcb2db5SBorislav Petkov {
8466bcb2db5SBorislav Petkov return cpumap_print_to_pagebuf(true, buf, &uncore_cpu_mask);
8476bcb2db5SBorislav Petkov }
8486bcb2db5SBorislav Petkov
8496bcb2db5SBorislav Petkov static DEVICE_ATTR(cpumask, S_IRUGO, uncore_get_attr_cpumask, NULL);
8506bcb2db5SBorislav Petkov
8516bcb2db5SBorislav Petkov static struct attribute *uncore_pmu_attrs[] = {
8526bcb2db5SBorislav Petkov &dev_attr_cpumask.attr,
8536bcb2db5SBorislav Petkov NULL,
8546bcb2db5SBorislav Petkov };
8556bcb2db5SBorislav Petkov
85645bd07adSArvind Yadav static const struct attribute_group uncore_pmu_attr_group = {
8576bcb2db5SBorislav Petkov .attrs = uncore_pmu_attrs,
8586bcb2db5SBorislav Petkov };
8596bcb2db5SBorislav Petkov
uncore_get_box_id(struct intel_uncore_type * type,struct intel_uncore_pmu * pmu)8603af548f2SKan Liang static inline int uncore_get_box_id(struct intel_uncore_type *type,
8613af548f2SKan Liang struct intel_uncore_pmu *pmu)
8623af548f2SKan Liang {
8633af548f2SKan Liang return type->box_ids ? type->box_ids[pmu->pmu_idx] : pmu->pmu_idx;
8643af548f2SKan Liang }
8653af548f2SKan Liang
uncore_get_alias_name(char * pmu_name,struct intel_uncore_pmu * pmu)8668053f2d7SKan Liang void uncore_get_alias_name(char *pmu_name, struct intel_uncore_pmu *pmu)
8678053f2d7SKan Liang {
8688053f2d7SKan Liang struct intel_uncore_type *type = pmu->type;
8698053f2d7SKan Liang
8708053f2d7SKan Liang if (type->num_boxes == 1)
8718053f2d7SKan Liang sprintf(pmu_name, "uncore_type_%u", type->type_id);
8728053f2d7SKan Liang else {
8738053f2d7SKan Liang sprintf(pmu_name, "uncore_type_%u_%d",
8743af548f2SKan Liang type->type_id, uncore_get_box_id(type, pmu));
8758053f2d7SKan Liang }
8768053f2d7SKan Liang }
8778053f2d7SKan Liang
uncore_get_pmu_name(struct intel_uncore_pmu * pmu)878d6c75413SKan Liang static void uncore_get_pmu_name(struct intel_uncore_pmu *pmu)
879d6c75413SKan Liang {
880d6c75413SKan Liang struct intel_uncore_type *type = pmu->type;
881d6c75413SKan Liang
882d6c75413SKan Liang /*
883d6c75413SKan Liang * No uncore block name in discovery table.
884d6c75413SKan Liang * Use uncore_type_&typeid_&boxid as name.
885d6c75413SKan Liang */
886d6c75413SKan Liang if (!type->name) {
8878053f2d7SKan Liang uncore_get_alias_name(pmu->name, pmu);
888d6c75413SKan Liang return;
889d6c75413SKan Liang }
890d6c75413SKan Liang
891d6c75413SKan Liang if (type->num_boxes == 1) {
892d6c75413SKan Liang if (strlen(type->name) > 0)
893d6c75413SKan Liang sprintf(pmu->name, "uncore_%s", type->name);
894d6c75413SKan Liang else
895d6c75413SKan Liang sprintf(pmu->name, "uncore");
896c54c53d9SKan Liang } else {
897c54c53d9SKan Liang /*
898c54c53d9SKan Liang * Use the box ID from the discovery table if applicable.
899c54c53d9SKan Liang */
900c54c53d9SKan Liang sprintf(pmu->name, "uncore_%s_%d", type->name,
9013af548f2SKan Liang uncore_get_box_id(type, pmu));
902c54c53d9SKan Liang }
903d6c75413SKan Liang }
904d6c75413SKan Liang
uncore_pmu_register(struct intel_uncore_pmu * pmu)9056bcb2db5SBorislav Petkov static int uncore_pmu_register(struct intel_uncore_pmu *pmu)
9066bcb2db5SBorislav Petkov {
9076bcb2db5SBorislav Petkov int ret;
9086bcb2db5SBorislav Petkov
9096bcb2db5SBorislav Petkov if (!pmu->type->pmu) {
9106bcb2db5SBorislav Petkov pmu->pmu = (struct pmu) {
9116bcb2db5SBorislav Petkov .attr_groups = pmu->type->attr_groups,
9126bcb2db5SBorislav Petkov .task_ctx_nr = perf_invalid_context,
91375be6f70SKan Liang .pmu_enable = uncore_pmu_enable,
91475be6f70SKan Liang .pmu_disable = uncore_pmu_disable,
9156bcb2db5SBorislav Petkov .event_init = uncore_pmu_event_init,
9166bcb2db5SBorislav Petkov .add = uncore_pmu_event_add,
9176bcb2db5SBorislav Petkov .del = uncore_pmu_event_del,
9186bcb2db5SBorislav Petkov .start = uncore_pmu_event_start,
9196bcb2db5SBorislav Petkov .stop = uncore_pmu_event_stop,
9206bcb2db5SBorislav Petkov .read = uncore_pmu_event_read,
92174545f63SDavid Carrillo-Cisneros .module = THIS_MODULE,
92288dbe3c9SAndrew Murray .capabilities = PERF_PMU_CAP_NO_EXCLUDE,
92319a39819SRoman Sudarikov .attr_update = pmu->type->attr_update,
9246bcb2db5SBorislav Petkov };
9256bcb2db5SBorislav Petkov } else {
9266bcb2db5SBorislav Petkov pmu->pmu = *pmu->type->pmu;
9276bcb2db5SBorislav Petkov pmu->pmu.attr_groups = pmu->type->attr_groups;
92819a39819SRoman Sudarikov pmu->pmu.attr_update = pmu->type->attr_update;
9296bcb2db5SBorislav Petkov }
9306bcb2db5SBorislav Petkov
931d6c75413SKan Liang uncore_get_pmu_name(pmu);
9326bcb2db5SBorislav Petkov
9336bcb2db5SBorislav Petkov ret = perf_pmu_register(&pmu->pmu, pmu->name, -1);
9344f089678SThomas Gleixner if (!ret)
9354f089678SThomas Gleixner pmu->registered = true;
9366bcb2db5SBorislav Petkov return ret;
9376bcb2db5SBorislav Petkov }
9386bcb2db5SBorislav Petkov
uncore_pmu_unregister(struct intel_uncore_pmu * pmu)9394f089678SThomas Gleixner static void uncore_pmu_unregister(struct intel_uncore_pmu *pmu)
9404f089678SThomas Gleixner {
9414f089678SThomas Gleixner if (!pmu->registered)
9424f089678SThomas Gleixner return;
9434f089678SThomas Gleixner perf_pmu_unregister(&pmu->pmu);
9444f089678SThomas Gleixner pmu->registered = false;
9454f089678SThomas Gleixner }
9464f089678SThomas Gleixner
uncore_free_boxes(struct intel_uncore_pmu * pmu)947cf6d445fSThomas Gleixner static void uncore_free_boxes(struct intel_uncore_pmu *pmu)
948cf6d445fSThomas Gleixner {
949b0529b9cSKan Liang int die;
950cf6d445fSThomas Gleixner
95136b533bcSRoman Sudarikov for (die = 0; die < uncore_max_dies(); die++)
952b0529b9cSKan Liang kfree(pmu->boxes[die]);
953cf6d445fSThomas Gleixner kfree(pmu->boxes);
954cf6d445fSThomas Gleixner }
955cf6d445fSThomas Gleixner
uncore_type_exit(struct intel_uncore_type * type)956e633c65aSKan Liang static void uncore_type_exit(struct intel_uncore_type *type)
9576bcb2db5SBorislav Petkov {
958cf6d445fSThomas Gleixner struct intel_uncore_pmu *pmu = type->pmus;
9596bcb2db5SBorislav Petkov int i;
9606bcb2db5SBorislav Petkov
96119a39819SRoman Sudarikov if (type->cleanup_mapping)
96219a39819SRoman Sudarikov type->cleanup_mapping(type);
96319a39819SRoman Sudarikov
964cf6d445fSThomas Gleixner if (pmu) {
965cf6d445fSThomas Gleixner for (i = 0; i < type->num_boxes; i++, pmu++) {
966cf6d445fSThomas Gleixner uncore_pmu_unregister(pmu);
967cf6d445fSThomas Gleixner uncore_free_boxes(pmu);
9684f089678SThomas Gleixner }
9696bcb2db5SBorislav Petkov kfree(type->pmus);
9706bcb2db5SBorislav Petkov type->pmus = NULL;
971ffeda003SThomas Gleixner }
972d6c75413SKan Liang if (type->box_ids) {
973d6c75413SKan Liang kfree(type->box_ids);
974d6c75413SKan Liang type->box_ids = NULL;
975d6c75413SKan Liang }
9766bcb2db5SBorislav Petkov kfree(type->events_group);
9776bcb2db5SBorislav Petkov type->events_group = NULL;
9786bcb2db5SBorislav Petkov }
9796bcb2db5SBorislav Petkov
uncore_types_exit(struct intel_uncore_type ** types)980e633c65aSKan Liang static void uncore_types_exit(struct intel_uncore_type **types)
9816bcb2db5SBorislav Petkov {
9821229735bSThomas Gleixner for (; *types; types++)
9831229735bSThomas Gleixner uncore_type_exit(*types);
9846bcb2db5SBorislav Petkov }
9856bcb2db5SBorislav Petkov
uncore_type_init(struct intel_uncore_type * type,bool setid)986cf6d445fSThomas Gleixner static int __init uncore_type_init(struct intel_uncore_type *type, bool setid)
9876bcb2db5SBorislav Petkov {
9886bcb2db5SBorislav Petkov struct intel_uncore_pmu *pmus;
989cf6d445fSThomas Gleixner size_t size;
9906bcb2db5SBorislav Petkov int i, j;
9916bcb2db5SBorislav Petkov
9926396bb22SKees Cook pmus = kcalloc(type->num_boxes, sizeof(*pmus), GFP_KERNEL);
9936bcb2db5SBorislav Petkov if (!pmus)
9946bcb2db5SBorislav Petkov return -ENOMEM;
9956bcb2db5SBorislav Petkov
99636b533bcSRoman Sudarikov size = uncore_max_dies() * sizeof(struct intel_uncore_box *);
9976bcb2db5SBorislav Petkov
998cf6d445fSThomas Gleixner for (i = 0; i < type->num_boxes; i++) {
999cf6d445fSThomas Gleixner pmus[i].func_id = setid ? i : -1;
1000cf6d445fSThomas Gleixner pmus[i].pmu_idx = i;
1001cf6d445fSThomas Gleixner pmus[i].type = type;
1002cf6d445fSThomas Gleixner pmus[i].boxes = kzalloc(size, GFP_KERNEL);
1003cf6d445fSThomas Gleixner if (!pmus[i].boxes)
1004629eb703SColin Ian King goto err;
1005cf6d445fSThomas Gleixner }
1006cf6d445fSThomas Gleixner
1007cf6d445fSThomas Gleixner type->pmus = pmus;
10086bcb2db5SBorislav Petkov type->unconstrainted = (struct event_constraint)
10096bcb2db5SBorislav Petkov __EVENT_CONSTRAINT(0, (1ULL << type->num_counters) - 1,
10106bcb2db5SBorislav Petkov 0, type->num_counters, 0, 0);
10116bcb2db5SBorislav Petkov
10126bcb2db5SBorislav Petkov if (type->event_descs) {
10136566f907SMatthew Wilcox struct {
10146566f907SMatthew Wilcox struct attribute_group group;
10156566f907SMatthew Wilcox struct attribute *attrs[];
10166566f907SMatthew Wilcox } *attr_group;
1017cf6d445fSThomas Gleixner for (i = 0; type->event_descs[i].attr.attr.name; i++);
10186bcb2db5SBorislav Petkov
10196566f907SMatthew Wilcox attr_group = kzalloc(struct_size(attr_group, attrs, i + 1),
10206566f907SMatthew Wilcox GFP_KERNEL);
10216bcb2db5SBorislav Petkov if (!attr_group)
1022629eb703SColin Ian King goto err;
10236bcb2db5SBorislav Petkov
10246566f907SMatthew Wilcox attr_group->group.name = "events";
10256566f907SMatthew Wilcox attr_group->group.attrs = attr_group->attrs;
10266bcb2db5SBorislav Petkov
10276bcb2db5SBorislav Petkov for (j = 0; j < i; j++)
10286566f907SMatthew Wilcox attr_group->attrs[j] = &type->event_descs[j].attr.attr;
10296bcb2db5SBorislav Petkov
10306566f907SMatthew Wilcox type->events_group = &attr_group->group;
10316bcb2db5SBorislav Petkov }
10326bcb2db5SBorislav Petkov
10336bcb2db5SBorislav Petkov type->pmu_group = &uncore_pmu_attr_group;
1034629eb703SColin Ian King
103519a39819SRoman Sudarikov if (type->set_mapping)
103619a39819SRoman Sudarikov type->set_mapping(type);
103719a39819SRoman Sudarikov
10386bcb2db5SBorislav Petkov return 0;
1039629eb703SColin Ian King
1040629eb703SColin Ian King err:
1041629eb703SColin Ian King for (i = 0; i < type->num_boxes; i++)
1042629eb703SColin Ian King kfree(pmus[i].boxes);
1043629eb703SColin Ian King kfree(pmus);
1044629eb703SColin Ian King
1045629eb703SColin Ian King return -ENOMEM;
10466bcb2db5SBorislav Petkov }
10476bcb2db5SBorislav Petkov
1048cf6d445fSThomas Gleixner static int __init
uncore_types_init(struct intel_uncore_type ** types,bool setid)1049cf6d445fSThomas Gleixner uncore_types_init(struct intel_uncore_type **types, bool setid)
10506bcb2db5SBorislav Petkov {
1051cf6d445fSThomas Gleixner int ret;
10526bcb2db5SBorislav Petkov
1053cf6d445fSThomas Gleixner for (; *types; types++) {
1054cf6d445fSThomas Gleixner ret = uncore_type_init(*types, setid);
10556bcb2db5SBorislav Petkov if (ret)
1056ffeda003SThomas Gleixner return ret;
10576bcb2db5SBorislav Petkov }
10586bcb2db5SBorislav Petkov return 0;
10596bcb2db5SBorislav Petkov }
10606bcb2db5SBorislav Petkov
10616bcb2db5SBorislav Petkov /*
1062fe650733SKan Liang * Get the die information of a PCI device.
1063fe650733SKan Liang * @pdev: The PCI device.
1064fe650733SKan Liang * @die: The die id which the device maps to.
1065fe650733SKan Liang */
uncore_pci_get_dev_die_info(struct pci_dev * pdev,int * die)1066ba9506beSSteve Wahl static int uncore_pci_get_dev_die_info(struct pci_dev *pdev, int *die)
1067fe650733SKan Liang {
1068ba9506beSSteve Wahl *die = uncore_pcibus_to_dieid(pdev->bus);
1069fe650733SKan Liang if (*die < 0)
1070fe650733SKan Liang return -EINVAL;
1071fe650733SKan Liang
1072fe650733SKan Liang return 0;
1073fe650733SKan Liang }
10748ed2ccaaSKan Liang
107542839ef4SKan Liang static struct intel_uncore_pmu *
uncore_pci_find_dev_pmu_from_types(struct pci_dev * pdev)107642839ef4SKan Liang uncore_pci_find_dev_pmu_from_types(struct pci_dev *pdev)
107742839ef4SKan Liang {
107842839ef4SKan Liang struct intel_uncore_type **types = uncore_pci_uncores;
107942839ef4SKan Liang struct intel_uncore_type *type;
108042839ef4SKan Liang u64 box_ctl;
108142839ef4SKan Liang int i, die;
108242839ef4SKan Liang
108342839ef4SKan Liang for (; *types; types++) {
108442839ef4SKan Liang type = *types;
108542839ef4SKan Liang for (die = 0; die < __uncore_max_dies; die++) {
108642839ef4SKan Liang for (i = 0; i < type->num_boxes; i++) {
108742839ef4SKan Liang if (!type->box_ctls[die])
108842839ef4SKan Liang continue;
108942839ef4SKan Liang box_ctl = type->box_ctls[die] + type->pci_offsets[i];
109042839ef4SKan Liang if (pdev->devfn == UNCORE_DISCOVERY_PCI_DEVFN(box_ctl) &&
109142839ef4SKan Liang pdev->bus->number == UNCORE_DISCOVERY_PCI_BUS(box_ctl) &&
109242839ef4SKan Liang pci_domain_nr(pdev->bus) == UNCORE_DISCOVERY_PCI_DOMAIN(box_ctl))
109342839ef4SKan Liang return &type->pmus[i];
109442839ef4SKan Liang }
109542839ef4SKan Liang }
109642839ef4SKan Liang }
109742839ef4SKan Liang
109842839ef4SKan Liang return NULL;
109942839ef4SKan Liang }
110042839ef4SKan Liang
11018ed2ccaaSKan Liang /*
11028ed2ccaaSKan Liang * Find the PMU of a PCI device.
11038ed2ccaaSKan Liang * @pdev: The PCI device.
11048ed2ccaaSKan Liang * @ids: The ID table of the available PCI devices with a PMU.
110542839ef4SKan Liang * If NULL, search the whole uncore_pci_uncores.
11068ed2ccaaSKan Liang */
11078ed2ccaaSKan Liang static struct intel_uncore_pmu *
uncore_pci_find_dev_pmu(struct pci_dev * pdev,const struct pci_device_id * ids)11088ed2ccaaSKan Liang uncore_pci_find_dev_pmu(struct pci_dev *pdev, const struct pci_device_id *ids)
11098ed2ccaaSKan Liang {
11108ed2ccaaSKan Liang struct intel_uncore_pmu *pmu = NULL;
11118ed2ccaaSKan Liang struct intel_uncore_type *type;
11128ed2ccaaSKan Liang kernel_ulong_t data;
11138ed2ccaaSKan Liang unsigned int devfn;
11148ed2ccaaSKan Liang
111542839ef4SKan Liang if (!ids)
111642839ef4SKan Liang return uncore_pci_find_dev_pmu_from_types(pdev);
111742839ef4SKan Liang
11188ed2ccaaSKan Liang while (ids && ids->vendor) {
11198ed2ccaaSKan Liang if ((ids->vendor == pdev->vendor) &&
11208ed2ccaaSKan Liang (ids->device == pdev->device)) {
11218ed2ccaaSKan Liang data = ids->driver_data;
11228ed2ccaaSKan Liang devfn = PCI_DEVFN(UNCORE_PCI_DEV_DEV(data),
11238ed2ccaaSKan Liang UNCORE_PCI_DEV_FUNC(data));
11248ed2ccaaSKan Liang if (devfn == pdev->devfn) {
11258ed2ccaaSKan Liang type = uncore_pci_uncores[UNCORE_PCI_DEV_TYPE(data)];
11268ed2ccaaSKan Liang pmu = &type->pmus[UNCORE_PCI_DEV_IDX(data)];
11278ed2ccaaSKan Liang break;
11288ed2ccaaSKan Liang }
11298ed2ccaaSKan Liang }
11308ed2ccaaSKan Liang ids++;
11318ed2ccaaSKan Liang }
11328ed2ccaaSKan Liang return pmu;
11338ed2ccaaSKan Liang }
11348ed2ccaaSKan Liang
1135fe650733SKan Liang /*
113616fa6431SKan Liang * Register the PMU for a PCI device
113716fa6431SKan Liang * @pdev: The PCI device.
113816fa6431SKan Liang * @type: The corresponding PMU type of the device.
113916fa6431SKan Liang * @pmu: The corresponding PMU of the device.
114016fa6431SKan Liang * @die: The die id which the device maps to.
114116fa6431SKan Liang */
uncore_pci_pmu_register(struct pci_dev * pdev,struct intel_uncore_type * type,struct intel_uncore_pmu * pmu,int die)114216fa6431SKan Liang static int uncore_pci_pmu_register(struct pci_dev *pdev,
114316fa6431SKan Liang struct intel_uncore_type *type,
114416fa6431SKan Liang struct intel_uncore_pmu *pmu,
1145ba9506beSSteve Wahl int die)
114616fa6431SKan Liang {
114716fa6431SKan Liang struct intel_uncore_box *box;
114816fa6431SKan Liang int ret;
114916fa6431SKan Liang
115016fa6431SKan Liang if (WARN_ON_ONCE(pmu->boxes[die] != NULL))
115116fa6431SKan Liang return -EINVAL;
115216fa6431SKan Liang
115316fa6431SKan Liang box = uncore_alloc_box(type, NUMA_NO_NODE);
115416fa6431SKan Liang if (!box)
115516fa6431SKan Liang return -ENOMEM;
115616fa6431SKan Liang
115716fa6431SKan Liang if (pmu->func_id < 0)
115816fa6431SKan Liang pmu->func_id = pdev->devfn;
115916fa6431SKan Liang else
116016fa6431SKan Liang WARN_ON_ONCE(pmu->func_id != pdev->devfn);
116116fa6431SKan Liang
116216fa6431SKan Liang atomic_inc(&box->refcnt);
116316fa6431SKan Liang box->dieid = die;
116416fa6431SKan Liang box->pci_dev = pdev;
116516fa6431SKan Liang box->pmu = pmu;
116616fa6431SKan Liang uncore_box_init(box);
116716fa6431SKan Liang
116816fa6431SKan Liang pmu->boxes[die] = box;
116916fa6431SKan Liang if (atomic_inc_return(&pmu->activeboxes) > 1)
117016fa6431SKan Liang return 0;
117116fa6431SKan Liang
117216fa6431SKan Liang /* First active box registers the pmu */
117316fa6431SKan Liang ret = uncore_pmu_register(pmu);
117416fa6431SKan Liang if (ret) {
117516fa6431SKan Liang pmu->boxes[die] = NULL;
117616fa6431SKan Liang uncore_box_exit(box);
117716fa6431SKan Liang kfree(box);
117816fa6431SKan Liang }
117916fa6431SKan Liang return ret;
118016fa6431SKan Liang }
118116fa6431SKan Liang
118216fa6431SKan Liang /*
11836bcb2db5SBorislav Petkov * add a pci uncore device
11846bcb2db5SBorislav Petkov */
uncore_pci_probe(struct pci_dev * pdev,const struct pci_device_id * id)11856bcb2db5SBorislav Petkov static int uncore_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
11866bcb2db5SBorislav Petkov {
1187cf6d445fSThomas Gleixner struct intel_uncore_type *type;
1188a54fa079SKan Liang struct intel_uncore_pmu *pmu = NULL;
1189ba9506beSSteve Wahl int die, ret;
11906bcb2db5SBorislav Petkov
1191ba9506beSSteve Wahl ret = uncore_pci_get_dev_die_info(pdev, &die);
1192fe650733SKan Liang if (ret)
1193fe650733SKan Liang return ret;
1194cf6d445fSThomas Gleixner
11956bcb2db5SBorislav Petkov if (UNCORE_PCI_DEV_TYPE(id->driver_data) == UNCORE_EXTRA_PCI_DEV) {
11966bcb2db5SBorislav Petkov int idx = UNCORE_PCI_DEV_IDX(id->driver_data);
1197cf6d445fSThomas Gleixner
1198b0529b9cSKan Liang uncore_extra_pci_dev[die].dev[idx] = pdev;
11996bcb2db5SBorislav Petkov pci_set_drvdata(pdev, NULL);
12006bcb2db5SBorislav Petkov return 0;
12016bcb2db5SBorislav Petkov }
12026bcb2db5SBorislav Petkov
12036bcb2db5SBorislav Petkov type = uncore_pci_uncores[UNCORE_PCI_DEV_TYPE(id->driver_data)];
1204a54fa079SKan Liang
1205a54fa079SKan Liang /*
1206a54fa079SKan Liang * Some platforms, e.g. Knights Landing, use a common PCI device ID
1207a54fa079SKan Liang * for multiple instances of an uncore PMU device type. We should check
1208a54fa079SKan Liang * PCI slot and func to indicate the uncore box.
1209a54fa079SKan Liang */
1210a54fa079SKan Liang if (id->driver_data & ~0xffff) {
1211ba51521bSUwe Kleine-König struct pci_driver *pci_drv = to_pci_driver(pdev->dev.driver);
1212a54fa079SKan Liang
12138ed2ccaaSKan Liang pmu = uncore_pci_find_dev_pmu(pdev, pci_drv->id_table);
1214a54fa079SKan Liang if (pmu == NULL)
1215a54fa079SKan Liang return -ENODEV;
1216a54fa079SKan Liang } else {
12176bcb2db5SBorislav Petkov /*
12186bcb2db5SBorislav Petkov * for performance monitoring unit with multiple boxes,
12196bcb2db5SBorislav Petkov * each box has a different function id.
12206bcb2db5SBorislav Petkov */
12216bcb2db5SBorislav Petkov pmu = &type->pmus[UNCORE_PCI_DEV_IDX(id->driver_data)];
12221229735bSThomas Gleixner }
12231229735bSThomas Gleixner
1224ba9506beSSteve Wahl ret = uncore_pci_pmu_register(pdev, type, pmu, die);
1225cf6d445fSThomas Gleixner
122616fa6431SKan Liang pci_set_drvdata(pdev, pmu->boxes[die]);
1227cf6d445fSThomas Gleixner
12284f089678SThomas Gleixner return ret;
12296bcb2db5SBorislav Petkov }
12306bcb2db5SBorislav Petkov
1231cdcce92aSKan Liang /*
1232cdcce92aSKan Liang * Unregister the PMU of a PCI device
1233cdcce92aSKan Liang * @pmu: The corresponding PMU is unregistered.
1234cdcce92aSKan Liang * @die: The die id which the device maps to.
1235cdcce92aSKan Liang */
uncore_pci_pmu_unregister(struct intel_uncore_pmu * pmu,int die)1236ba9506beSSteve Wahl static void uncore_pci_pmu_unregister(struct intel_uncore_pmu *pmu, int die)
1237cdcce92aSKan Liang {
1238cdcce92aSKan Liang struct intel_uncore_box *box = pmu->boxes[die];
1239cdcce92aSKan Liang
1240cdcce92aSKan Liang pmu->boxes[die] = NULL;
1241cdcce92aSKan Liang if (atomic_dec_return(&pmu->activeboxes) == 0)
1242cdcce92aSKan Liang uncore_pmu_unregister(pmu);
1243cdcce92aSKan Liang uncore_box_exit(box);
1244cdcce92aSKan Liang kfree(box);
1245cdcce92aSKan Liang }
1246cdcce92aSKan Liang
uncore_pci_remove(struct pci_dev * pdev)12476bcb2db5SBorislav Petkov static void uncore_pci_remove(struct pci_dev *pdev)
12486bcb2db5SBorislav Petkov {
1249281ee056SBjorn Helgaas struct intel_uncore_box *box;
12506bcb2db5SBorislav Petkov struct intel_uncore_pmu *pmu;
1251ba9506beSSteve Wahl int i, die;
12526bcb2db5SBorislav Petkov
1253ba9506beSSteve Wahl if (uncore_pci_get_dev_die_info(pdev, &die))
1254cdcce92aSKan Liang return;
1255cf6d445fSThomas Gleixner
12566bcb2db5SBorislav Petkov box = pci_get_drvdata(pdev);
12576bcb2db5SBorislav Petkov if (!box) {
12586bcb2db5SBorislav Petkov for (i = 0; i < UNCORE_EXTRA_PCI_DEV_MAX; i++) {
1259b0529b9cSKan Liang if (uncore_extra_pci_dev[die].dev[i] == pdev) {
1260b0529b9cSKan Liang uncore_extra_pci_dev[die].dev[i] = NULL;
12616bcb2db5SBorislav Petkov break;
12626bcb2db5SBorislav Petkov }
12636bcb2db5SBorislav Petkov }
12646bcb2db5SBorislav Petkov WARN_ON_ONCE(i >= UNCORE_EXTRA_PCI_DEV_MAX);
12656bcb2db5SBorislav Petkov return;
12666bcb2db5SBorislav Petkov }
12676bcb2db5SBorislav Petkov
12686bcb2db5SBorislav Petkov pmu = box->pmu;
12696bcb2db5SBorislav Petkov
12706bcb2db5SBorislav Petkov pci_set_drvdata(pdev, NULL);
1271cdcce92aSKan Liang
1272ba9506beSSteve Wahl uncore_pci_pmu_unregister(pmu, die);
12736bcb2db5SBorislav Petkov }
12746bcb2db5SBorislav Petkov
uncore_bus_notify(struct notifier_block * nb,unsigned long action,void * data,const struct pci_device_id * ids)127595a7fc77SKan Liang static int uncore_bus_notify(struct notifier_block *nb,
12766477dc39SKan Liang unsigned long action, void *data,
12776477dc39SKan Liang const struct pci_device_id *ids)
127895a7fc77SKan Liang {
127995a7fc77SKan Liang struct device *dev = data;
128095a7fc77SKan Liang struct pci_dev *pdev = to_pci_dev(dev);
128195a7fc77SKan Liang struct intel_uncore_pmu *pmu;
1282ba9506beSSteve Wahl int die;
128395a7fc77SKan Liang
128495a7fc77SKan Liang /* Unregister the PMU when the device is going to be deleted. */
128595a7fc77SKan Liang if (action != BUS_NOTIFY_DEL_DEVICE)
128695a7fc77SKan Liang return NOTIFY_DONE;
128795a7fc77SKan Liang
12886477dc39SKan Liang pmu = uncore_pci_find_dev_pmu(pdev, ids);
128995a7fc77SKan Liang if (!pmu)
129095a7fc77SKan Liang return NOTIFY_DONE;
129195a7fc77SKan Liang
1292ba9506beSSteve Wahl if (uncore_pci_get_dev_die_info(pdev, &die))
129395a7fc77SKan Liang return NOTIFY_DONE;
129495a7fc77SKan Liang
1295ba9506beSSteve Wahl uncore_pci_pmu_unregister(pmu, die);
129695a7fc77SKan Liang
129795a7fc77SKan Liang return NOTIFY_OK;
129895a7fc77SKan Liang }
129995a7fc77SKan Liang
uncore_pci_sub_bus_notify(struct notifier_block * nb,unsigned long action,void * data)13006477dc39SKan Liang static int uncore_pci_sub_bus_notify(struct notifier_block *nb,
13016477dc39SKan Liang unsigned long action, void *data)
13026477dc39SKan Liang {
13036477dc39SKan Liang return uncore_bus_notify(nb, action, data,
13046477dc39SKan Liang uncore_pci_sub_driver->id_table);
13056477dc39SKan Liang }
13066477dc39SKan Liang
13076477dc39SKan Liang static struct notifier_block uncore_pci_sub_notifier = {
13086477dc39SKan Liang .notifier_call = uncore_pci_sub_bus_notify,
130995a7fc77SKan Liang };
131095a7fc77SKan Liang
uncore_pci_sub_driver_init(void)131195a7fc77SKan Liang static void uncore_pci_sub_driver_init(void)
131295a7fc77SKan Liang {
131395a7fc77SKan Liang const struct pci_device_id *ids = uncore_pci_sub_driver->id_table;
131495a7fc77SKan Liang struct intel_uncore_type *type;
131595a7fc77SKan Liang struct intel_uncore_pmu *pmu;
131695a7fc77SKan Liang struct pci_dev *pci_sub_dev;
131795a7fc77SKan Liang bool notify = false;
131895a7fc77SKan Liang unsigned int devfn;
1319ba9506beSSteve Wahl int die;
132095a7fc77SKan Liang
132195a7fc77SKan Liang while (ids && ids->vendor) {
132295a7fc77SKan Liang pci_sub_dev = NULL;
132395a7fc77SKan Liang type = uncore_pci_uncores[UNCORE_PCI_DEV_TYPE(ids->driver_data)];
132495a7fc77SKan Liang /*
132595a7fc77SKan Liang * Search the available device, and register the
132695a7fc77SKan Liang * corresponding PMU.
132795a7fc77SKan Liang */
132895a7fc77SKan Liang while ((pci_sub_dev = pci_get_device(PCI_VENDOR_ID_INTEL,
132995a7fc77SKan Liang ids->device, pci_sub_dev))) {
133095a7fc77SKan Liang devfn = PCI_DEVFN(UNCORE_PCI_DEV_DEV(ids->driver_data),
133195a7fc77SKan Liang UNCORE_PCI_DEV_FUNC(ids->driver_data));
133295a7fc77SKan Liang if (devfn != pci_sub_dev->devfn)
133395a7fc77SKan Liang continue;
133495a7fc77SKan Liang
133595a7fc77SKan Liang pmu = &type->pmus[UNCORE_PCI_DEV_IDX(ids->driver_data)];
133695a7fc77SKan Liang if (!pmu)
133795a7fc77SKan Liang continue;
133895a7fc77SKan Liang
1339ba9506beSSteve Wahl if (uncore_pci_get_dev_die_info(pci_sub_dev, &die))
134095a7fc77SKan Liang continue;
134195a7fc77SKan Liang
134295a7fc77SKan Liang if (!uncore_pci_pmu_register(pci_sub_dev, type, pmu,
1343ba9506beSSteve Wahl die))
134495a7fc77SKan Liang notify = true;
134595a7fc77SKan Liang }
134695a7fc77SKan Liang ids++;
134795a7fc77SKan Liang }
134895a7fc77SKan Liang
13496477dc39SKan Liang if (notify && bus_register_notifier(&pci_bus_type, &uncore_pci_sub_notifier))
135095a7fc77SKan Liang notify = false;
135195a7fc77SKan Liang
135295a7fc77SKan Liang if (!notify)
135395a7fc77SKan Liang uncore_pci_sub_driver = NULL;
135495a7fc77SKan Liang }
135595a7fc77SKan Liang
uncore_pci_bus_notify(struct notifier_block * nb,unsigned long action,void * data)135642839ef4SKan Liang static int uncore_pci_bus_notify(struct notifier_block *nb,
135742839ef4SKan Liang unsigned long action, void *data)
135842839ef4SKan Liang {
135942839ef4SKan Liang return uncore_bus_notify(nb, action, data, NULL);
136042839ef4SKan Liang }
136142839ef4SKan Liang
136242839ef4SKan Liang static struct notifier_block uncore_pci_notifier = {
136342839ef4SKan Liang .notifier_call = uncore_pci_bus_notify,
136442839ef4SKan Liang };
136542839ef4SKan Liang
136642839ef4SKan Liang
uncore_pci_pmus_register(void)136742839ef4SKan Liang static void uncore_pci_pmus_register(void)
136842839ef4SKan Liang {
136942839ef4SKan Liang struct intel_uncore_type **types = uncore_pci_uncores;
137042839ef4SKan Liang struct intel_uncore_type *type;
137142839ef4SKan Liang struct intel_uncore_pmu *pmu;
137242839ef4SKan Liang struct pci_dev *pdev;
137342839ef4SKan Liang u64 box_ctl;
137442839ef4SKan Liang int i, die;
137542839ef4SKan Liang
137642839ef4SKan Liang for (; *types; types++) {
137742839ef4SKan Liang type = *types;
137842839ef4SKan Liang for (die = 0; die < __uncore_max_dies; die++) {
137942839ef4SKan Liang for (i = 0; i < type->num_boxes; i++) {
138042839ef4SKan Liang if (!type->box_ctls[die])
138142839ef4SKan Liang continue;
138242839ef4SKan Liang box_ctl = type->box_ctls[die] + type->pci_offsets[i];
138342839ef4SKan Liang pdev = pci_get_domain_bus_and_slot(UNCORE_DISCOVERY_PCI_DOMAIN(box_ctl),
138442839ef4SKan Liang UNCORE_DISCOVERY_PCI_BUS(box_ctl),
138542839ef4SKan Liang UNCORE_DISCOVERY_PCI_DEVFN(box_ctl));
138642839ef4SKan Liang if (!pdev)
138742839ef4SKan Liang continue;
138842839ef4SKan Liang pmu = &type->pmus[i];
138942839ef4SKan Liang
139042839ef4SKan Liang uncore_pci_pmu_register(pdev, type, pmu, die);
139142839ef4SKan Liang }
139242839ef4SKan Liang }
139342839ef4SKan Liang }
139442839ef4SKan Liang
139542839ef4SKan Liang bus_register_notifier(&pci_bus_type, &uncore_pci_notifier);
139642839ef4SKan Liang }
139742839ef4SKan Liang
uncore_pci_init(void)13986bcb2db5SBorislav Petkov static int __init uncore_pci_init(void)
13996bcb2db5SBorislav Petkov {
1400cf6d445fSThomas Gleixner size_t size;
14016bcb2db5SBorislav Petkov int ret;
14026bcb2db5SBorislav Petkov
140336b533bcSRoman Sudarikov size = uncore_max_dies() * sizeof(struct pci_extra_dev);
1404cf6d445fSThomas Gleixner uncore_extra_pci_dev = kzalloc(size, GFP_KERNEL);
1405cf6d445fSThomas Gleixner if (!uncore_extra_pci_dev) {
1406cf6d445fSThomas Gleixner ret = -ENOMEM;
1407ffeda003SThomas Gleixner goto err;
1408cf6d445fSThomas Gleixner }
1409cf6d445fSThomas Gleixner
1410cf6d445fSThomas Gleixner ret = uncore_types_init(uncore_pci_uncores, false);
1411cf6d445fSThomas Gleixner if (ret)
1412cf6d445fSThomas Gleixner goto errtype;
14136bcb2db5SBorislav Petkov
141442839ef4SKan Liang if (uncore_pci_driver) {
14156bcb2db5SBorislav Petkov uncore_pci_driver->probe = uncore_pci_probe;
14166bcb2db5SBorislav Petkov uncore_pci_driver->remove = uncore_pci_remove;
14176bcb2db5SBorislav Petkov
14186bcb2db5SBorislav Petkov ret = pci_register_driver(uncore_pci_driver);
1419ffeda003SThomas Gleixner if (ret)
1420cf6d445fSThomas Gleixner goto errtype;
142142839ef4SKan Liang } else
142242839ef4SKan Liang uncore_pci_pmus_register();
14236bcb2db5SBorislav Petkov
142495a7fc77SKan Liang if (uncore_pci_sub_driver)
142595a7fc77SKan Liang uncore_pci_sub_driver_init();
142695a7fc77SKan Liang
1427ffeda003SThomas Gleixner pcidrv_registered = true;
1428ffeda003SThomas Gleixner return 0;
1429ffeda003SThomas Gleixner
1430cf6d445fSThomas Gleixner errtype:
1431ffeda003SThomas Gleixner uncore_types_exit(uncore_pci_uncores);
1432cf6d445fSThomas Gleixner kfree(uncore_extra_pci_dev);
1433cf6d445fSThomas Gleixner uncore_extra_pci_dev = NULL;
14344f089678SThomas Gleixner uncore_free_pcibus_map();
1435cf6d445fSThomas Gleixner err:
1436cf6d445fSThomas Gleixner uncore_pci_uncores = empty_uncore;
14376bcb2db5SBorislav Petkov return ret;
14386bcb2db5SBorislav Petkov }
14396bcb2db5SBorislav Petkov
uncore_pci_exit(void)1440e633c65aSKan Liang static void uncore_pci_exit(void)
14416bcb2db5SBorislav Petkov {
14426bcb2db5SBorislav Petkov if (pcidrv_registered) {
14436bcb2db5SBorislav Petkov pcidrv_registered = false;
144495a7fc77SKan Liang if (uncore_pci_sub_driver)
14456477dc39SKan Liang bus_unregister_notifier(&pci_bus_type, &uncore_pci_sub_notifier);
144642839ef4SKan Liang if (uncore_pci_driver)
14476bcb2db5SBorislav Petkov pci_unregister_driver(uncore_pci_driver);
144842839ef4SKan Liang else
144942839ef4SKan Liang bus_unregister_notifier(&pci_bus_type, &uncore_pci_notifier);
14506bcb2db5SBorislav Petkov uncore_types_exit(uncore_pci_uncores);
1451cf6d445fSThomas Gleixner kfree(uncore_extra_pci_dev);
14524f089678SThomas Gleixner uncore_free_pcibus_map();
14536bcb2db5SBorislav Petkov }
14546bcb2db5SBorislav Petkov }
14556bcb2db5SBorislav Petkov
uncore_change_type_ctx(struct intel_uncore_type * type,int old_cpu,int new_cpu)14561229735bSThomas Gleixner static void uncore_change_type_ctx(struct intel_uncore_type *type, int old_cpu,
14571229735bSThomas Gleixner int new_cpu)
14586bcb2db5SBorislav Petkov {
14591229735bSThomas Gleixner struct intel_uncore_pmu *pmu = type->pmus;
14606bcb2db5SBorislav Petkov struct intel_uncore_box *box;
1461b0529b9cSKan Liang int i, die;
14626bcb2db5SBorislav Petkov
1463b0529b9cSKan Liang die = topology_logical_die_id(old_cpu < 0 ? new_cpu : old_cpu);
14641229735bSThomas Gleixner for (i = 0; i < type->num_boxes; i++, pmu++) {
1465b0529b9cSKan Liang box = pmu->boxes[die];
14666bcb2db5SBorislav Petkov if (!box)
14676bcb2db5SBorislav Petkov continue;
14686bcb2db5SBorislav Petkov
14696bcb2db5SBorislav Petkov if (old_cpu < 0) {
14706bcb2db5SBorislav Petkov WARN_ON_ONCE(box->cpu != -1);
14716bcb2db5SBorislav Petkov box->cpu = new_cpu;
14726bcb2db5SBorislav Petkov continue;
14736bcb2db5SBorislav Petkov }
14746bcb2db5SBorislav Petkov
14756bcb2db5SBorislav Petkov WARN_ON_ONCE(box->cpu != old_cpu);
14766bcb2db5SBorislav Petkov box->cpu = -1;
14771229735bSThomas Gleixner if (new_cpu < 0)
14781229735bSThomas Gleixner continue;
14791229735bSThomas Gleixner
14801229735bSThomas Gleixner uncore_pmu_cancel_hrtimer(box);
14811229735bSThomas Gleixner perf_pmu_migrate_context(&pmu->pmu, old_cpu, new_cpu);
14821229735bSThomas Gleixner box->cpu = new_cpu;
14836bcb2db5SBorislav Petkov }
14846bcb2db5SBorislav Petkov }
14851229735bSThomas Gleixner
uncore_change_context(struct intel_uncore_type ** uncores,int old_cpu,int new_cpu)14861229735bSThomas Gleixner static void uncore_change_context(struct intel_uncore_type **uncores,
14871229735bSThomas Gleixner int old_cpu, int new_cpu)
14881229735bSThomas Gleixner {
14891229735bSThomas Gleixner for (; *uncores; uncores++)
14901229735bSThomas Gleixner uncore_change_type_ctx(*uncores, old_cpu, new_cpu);
14916bcb2db5SBorislav Petkov }
14926bcb2db5SBorislav Petkov
uncore_box_unref(struct intel_uncore_type ** types,int id)1493c8872d90SKan Liang static void uncore_box_unref(struct intel_uncore_type **types, int id)
14946bcb2db5SBorislav Petkov {
1495c8872d90SKan Liang struct intel_uncore_type *type;
1496fff4b87eSThomas Gleixner struct intel_uncore_pmu *pmu;
1497fff4b87eSThomas Gleixner struct intel_uncore_box *box;
1498c8872d90SKan Liang int i;
1499c8872d90SKan Liang
1500c8872d90SKan Liang for (; *types; types++) {
1501c8872d90SKan Liang type = *types;
1502c8872d90SKan Liang pmu = type->pmus;
1503c8872d90SKan Liang for (i = 0; i < type->num_boxes; i++, pmu++) {
1504c8872d90SKan Liang box = pmu->boxes[id];
1505c8872d90SKan Liang if (box && atomic_dec_return(&box->refcnt) == 0)
1506c8872d90SKan Liang uncore_box_exit(box);
1507c8872d90SKan Liang }
1508c8872d90SKan Liang }
1509c8872d90SKan Liang }
1510c8872d90SKan Liang
uncore_event_cpu_offline(unsigned int cpu)1511c8872d90SKan Liang static int uncore_event_cpu_offline(unsigned int cpu)
1512c8872d90SKan Liang {
1513c8872d90SKan Liang int die, target;
15146bcb2db5SBorislav Petkov
1515cf6d445fSThomas Gleixner /* Check if exiting cpu is used for collecting uncore events */
15166bcb2db5SBorislav Petkov if (!cpumask_test_and_clear_cpu(cpu, &uncore_cpu_mask))
1517fff4b87eSThomas Gleixner goto unref;
1518cf6d445fSThomas Gleixner /* Find a new cpu to collect uncore events */
15191ff4a47bSKan Liang target = cpumask_any_but(topology_die_cpumask(cpu), cpu);
15206bcb2db5SBorislav Petkov
1521cf6d445fSThomas Gleixner /* Migrate uncore events to the new target */
1522cf6d445fSThomas Gleixner if (target < nr_cpu_ids)
15236bcb2db5SBorislav Petkov cpumask_set_cpu(target, &uncore_cpu_mask);
1524cf6d445fSThomas Gleixner else
1525cf6d445fSThomas Gleixner target = -1;
15266bcb2db5SBorislav Petkov
15276bcb2db5SBorislav Petkov uncore_change_context(uncore_msr_uncores, cpu, target);
15283da04b8aSKan Liang uncore_change_context(uncore_mmio_uncores, cpu, target);
15296bcb2db5SBorislav Petkov uncore_change_context(uncore_pci_uncores, cpu, target);
1530fff4b87eSThomas Gleixner
1531fff4b87eSThomas Gleixner unref:
1532fff4b87eSThomas Gleixner /* Clear the references */
1533b0529b9cSKan Liang die = topology_logical_die_id(cpu);
1534c8872d90SKan Liang uncore_box_unref(uncore_msr_uncores, die);
15353da04b8aSKan Liang uncore_box_unref(uncore_mmio_uncores, die);
15361a246b9fSThomas Gleixner return 0;
15376bcb2db5SBorislav Petkov }
15386bcb2db5SBorislav Petkov
allocate_boxes(struct intel_uncore_type ** types,unsigned int die,unsigned int cpu)1539fff4b87eSThomas Gleixner static int allocate_boxes(struct intel_uncore_type **types,
1540b0529b9cSKan Liang unsigned int die, unsigned int cpu)
1541fff4b87eSThomas Gleixner {
1542fff4b87eSThomas Gleixner struct intel_uncore_box *box, *tmp;
1543fff4b87eSThomas Gleixner struct intel_uncore_type *type;
1544fff4b87eSThomas Gleixner struct intel_uncore_pmu *pmu;
1545fff4b87eSThomas Gleixner LIST_HEAD(allocated);
1546fff4b87eSThomas Gleixner int i;
1547fff4b87eSThomas Gleixner
1548fff4b87eSThomas Gleixner /* Try to allocate all required boxes */
1549fff4b87eSThomas Gleixner for (; *types; types++) {
1550fff4b87eSThomas Gleixner type = *types;
1551fff4b87eSThomas Gleixner pmu = type->pmus;
1552fff4b87eSThomas Gleixner for (i = 0; i < type->num_boxes; i++, pmu++) {
1553b0529b9cSKan Liang if (pmu->boxes[die])
1554fff4b87eSThomas Gleixner continue;
1555fff4b87eSThomas Gleixner box = uncore_alloc_box(type, cpu_to_node(cpu));
1556fff4b87eSThomas Gleixner if (!box)
1557fff4b87eSThomas Gleixner goto cleanup;
1558fff4b87eSThomas Gleixner box->pmu = pmu;
1559b0529b9cSKan Liang box->dieid = die;
1560fff4b87eSThomas Gleixner list_add(&box->active_list, &allocated);
1561fff4b87eSThomas Gleixner }
1562fff4b87eSThomas Gleixner }
1563fff4b87eSThomas Gleixner /* Install them in the pmus */
1564fff4b87eSThomas Gleixner list_for_each_entry_safe(box, tmp, &allocated, active_list) {
1565fff4b87eSThomas Gleixner list_del_init(&box->active_list);
1566b0529b9cSKan Liang box->pmu->boxes[die] = box;
1567fff4b87eSThomas Gleixner }
1568fff4b87eSThomas Gleixner return 0;
1569fff4b87eSThomas Gleixner
1570fff4b87eSThomas Gleixner cleanup:
1571fff4b87eSThomas Gleixner list_for_each_entry_safe(box, tmp, &allocated, active_list) {
1572fff4b87eSThomas Gleixner list_del_init(&box->active_list);
1573fff4b87eSThomas Gleixner kfree(box);
1574fff4b87eSThomas Gleixner }
1575fff4b87eSThomas Gleixner return -ENOMEM;
1576fff4b87eSThomas Gleixner }
1577fff4b87eSThomas Gleixner
uncore_box_ref(struct intel_uncore_type ** types,int id,unsigned int cpu)1578c8872d90SKan Liang static int uncore_box_ref(struct intel_uncore_type **types,
1579c8872d90SKan Liang int id, unsigned int cpu)
15806bcb2db5SBorislav Petkov {
1581c8872d90SKan Liang struct intel_uncore_type *type;
1582fff4b87eSThomas Gleixner struct intel_uncore_pmu *pmu;
1583fff4b87eSThomas Gleixner struct intel_uncore_box *box;
1584c8872d90SKan Liang int i, ret;
1585fff4b87eSThomas Gleixner
1586c8872d90SKan Liang ret = allocate_boxes(types, id, cpu);
1587fff4b87eSThomas Gleixner if (ret)
1588fff4b87eSThomas Gleixner return ret;
1589fff4b87eSThomas Gleixner
1590fff4b87eSThomas Gleixner for (; *types; types++) {
1591fff4b87eSThomas Gleixner type = *types;
1592fff4b87eSThomas Gleixner pmu = type->pmus;
1593fff4b87eSThomas Gleixner for (i = 0; i < type->num_boxes; i++, pmu++) {
1594c8872d90SKan Liang box = pmu->boxes[id];
159580c65fdbSKan Liang if (box && atomic_inc_return(&box->refcnt) == 1)
1596fff4b87eSThomas Gleixner uncore_box_init(box);
1597fff4b87eSThomas Gleixner }
1598fff4b87eSThomas Gleixner }
1599c8872d90SKan Liang return 0;
1600c8872d90SKan Liang }
1601c8872d90SKan Liang
uncore_event_cpu_online(unsigned int cpu)1602c8872d90SKan Liang static int uncore_event_cpu_online(unsigned int cpu)
1603c8872d90SKan Liang {
16043da04b8aSKan Liang int die, target, msr_ret, mmio_ret;
1605c8872d90SKan Liang
1606c8872d90SKan Liang die = topology_logical_die_id(cpu);
16073da04b8aSKan Liang msr_ret = uncore_box_ref(uncore_msr_uncores, die, cpu);
16083da04b8aSKan Liang mmio_ret = uncore_box_ref(uncore_mmio_uncores, die, cpu);
16093da04b8aSKan Liang if (msr_ret && mmio_ret)
16103da04b8aSKan Liang return -ENOMEM;
16116bcb2db5SBorislav Petkov
1612cf6d445fSThomas Gleixner /*
1613cf6d445fSThomas Gleixner * Check if there is an online cpu in the package
1614cf6d445fSThomas Gleixner * which collects uncore events already.
1615cf6d445fSThomas Gleixner */
16161ff4a47bSKan Liang target = cpumask_any_and(&uncore_cpu_mask, topology_die_cpumask(cpu));
1617cf6d445fSThomas Gleixner if (target < nr_cpu_ids)
16181a246b9fSThomas Gleixner return 0;
16196bcb2db5SBorislav Petkov
16206bcb2db5SBorislav Petkov cpumask_set_cpu(cpu, &uncore_cpu_mask);
16216bcb2db5SBorislav Petkov
16223da04b8aSKan Liang if (!msr_ret)
16236bcb2db5SBorislav Petkov uncore_change_context(uncore_msr_uncores, -1, cpu);
16243da04b8aSKan Liang if (!mmio_ret)
16253da04b8aSKan Liang uncore_change_context(uncore_mmio_uncores, -1, cpu);
16266bcb2db5SBorislav Petkov uncore_change_context(uncore_pci_uncores, -1, cpu);
16271a246b9fSThomas Gleixner return 0;
16286bcb2db5SBorislav Petkov }
16296bcb2db5SBorislav Petkov
type_pmu_register(struct intel_uncore_type * type)16304f089678SThomas Gleixner static int __init type_pmu_register(struct intel_uncore_type *type)
16316bcb2db5SBorislav Petkov {
16324f089678SThomas Gleixner int i, ret;
16334f089678SThomas Gleixner
16344f089678SThomas Gleixner for (i = 0; i < type->num_boxes; i++) {
16354f089678SThomas Gleixner ret = uncore_pmu_register(&type->pmus[i]);
16364f089678SThomas Gleixner if (ret)
16374f089678SThomas Gleixner return ret;
16384f089678SThomas Gleixner }
16394f089678SThomas Gleixner return 0;
16404f089678SThomas Gleixner }
16414f089678SThomas Gleixner
uncore_msr_pmus_register(void)16424f089678SThomas Gleixner static int __init uncore_msr_pmus_register(void)
16434f089678SThomas Gleixner {
16444f089678SThomas Gleixner struct intel_uncore_type **types = uncore_msr_uncores;
16454f089678SThomas Gleixner int ret;
16464f089678SThomas Gleixner
16471229735bSThomas Gleixner for (; *types; types++) {
16481229735bSThomas Gleixner ret = type_pmu_register(*types);
16494f089678SThomas Gleixner if (ret)
16504f089678SThomas Gleixner return ret;
16514f089678SThomas Gleixner }
16524f089678SThomas Gleixner return 0;
16536bcb2db5SBorislav Petkov }
16546bcb2db5SBorislav Petkov
uncore_cpu_init(void)16556bcb2db5SBorislav Petkov static int __init uncore_cpu_init(void)
16566bcb2db5SBorislav Petkov {
16576bcb2db5SBorislav Petkov int ret;
16586bcb2db5SBorislav Petkov
1659cf6d445fSThomas Gleixner ret = uncore_types_init(uncore_msr_uncores, true);
16606bcb2db5SBorislav Petkov if (ret)
1661ffeda003SThomas Gleixner goto err;
16624f089678SThomas Gleixner
16634f089678SThomas Gleixner ret = uncore_msr_pmus_register();
16644f089678SThomas Gleixner if (ret)
16654f089678SThomas Gleixner goto err;
16666bcb2db5SBorislav Petkov return 0;
1667ffeda003SThomas Gleixner err:
1668ffeda003SThomas Gleixner uncore_types_exit(uncore_msr_uncores);
1669ffeda003SThomas Gleixner uncore_msr_uncores = empty_uncore;
1670ffeda003SThomas Gleixner return ret;
16716bcb2db5SBorislav Petkov }
16726bcb2db5SBorislav Petkov
uncore_mmio_init(void)16733da04b8aSKan Liang static int __init uncore_mmio_init(void)
16743da04b8aSKan Liang {
16753da04b8aSKan Liang struct intel_uncore_type **types = uncore_mmio_uncores;
16763da04b8aSKan Liang int ret;
16773da04b8aSKan Liang
16783da04b8aSKan Liang ret = uncore_types_init(types, true);
16793da04b8aSKan Liang if (ret)
16803da04b8aSKan Liang goto err;
16813da04b8aSKan Liang
16823da04b8aSKan Liang for (; *types; types++) {
16833da04b8aSKan Liang ret = type_pmu_register(*types);
16843da04b8aSKan Liang if (ret)
16853da04b8aSKan Liang goto err;
16863da04b8aSKan Liang }
16873da04b8aSKan Liang return 0;
16883da04b8aSKan Liang err:
16893da04b8aSKan Liang uncore_types_exit(uncore_mmio_uncores);
16903da04b8aSKan Liang uncore_mmio_uncores = empty_uncore;
16913da04b8aSKan Liang return ret;
16923da04b8aSKan Liang }
16933da04b8aSKan Liang
1694e633c65aSKan Liang struct intel_uncore_init_fun {
1695e633c65aSKan Liang void (*cpu_init)(void);
1696e633c65aSKan Liang int (*pci_init)(void);
16973da04b8aSKan Liang void (*mmio_init)(void);
1698bd9514a4SKan Liang /* Discovery table is required */
1699c54c53d9SKan Liang bool use_discovery;
1700bd9514a4SKan Liang /* The units in the discovery table should be ignored. */
1701bd9514a4SKan Liang int *uncore_units_ignore;
1702e633c65aSKan Liang };
1703e633c65aSKan Liang
1704e633c65aSKan Liang static const struct intel_uncore_init_fun nhm_uncore_init __initconst = {
1705e633c65aSKan Liang .cpu_init = nhm_uncore_cpu_init,
1706e633c65aSKan Liang };
1707e633c65aSKan Liang
1708e633c65aSKan Liang static const struct intel_uncore_init_fun snb_uncore_init __initconst = {
1709e633c65aSKan Liang .cpu_init = snb_uncore_cpu_init,
1710e633c65aSKan Liang .pci_init = snb_uncore_pci_init,
1711e633c65aSKan Liang };
1712e633c65aSKan Liang
1713e633c65aSKan Liang static const struct intel_uncore_init_fun ivb_uncore_init __initconst = {
1714e633c65aSKan Liang .cpu_init = snb_uncore_cpu_init,
1715e633c65aSKan Liang .pci_init = ivb_uncore_pci_init,
1716e633c65aSKan Liang };
1717e633c65aSKan Liang
1718e633c65aSKan Liang static const struct intel_uncore_init_fun hsw_uncore_init __initconst = {
1719e633c65aSKan Liang .cpu_init = snb_uncore_cpu_init,
1720e633c65aSKan Liang .pci_init = hsw_uncore_pci_init,
1721e633c65aSKan Liang };
1722e633c65aSKan Liang
1723e633c65aSKan Liang static const struct intel_uncore_init_fun bdw_uncore_init __initconst = {
1724e633c65aSKan Liang .cpu_init = snb_uncore_cpu_init,
1725e633c65aSKan Liang .pci_init = bdw_uncore_pci_init,
1726e633c65aSKan Liang };
1727e633c65aSKan Liang
1728e633c65aSKan Liang static const struct intel_uncore_init_fun snbep_uncore_init __initconst = {
1729e633c65aSKan Liang .cpu_init = snbep_uncore_cpu_init,
1730e633c65aSKan Liang .pci_init = snbep_uncore_pci_init,
1731e633c65aSKan Liang };
1732e633c65aSKan Liang
1733e633c65aSKan Liang static const struct intel_uncore_init_fun nhmex_uncore_init __initconst = {
1734e633c65aSKan Liang .cpu_init = nhmex_uncore_cpu_init,
1735e633c65aSKan Liang };
1736e633c65aSKan Liang
1737e633c65aSKan Liang static const struct intel_uncore_init_fun ivbep_uncore_init __initconst = {
1738e633c65aSKan Liang .cpu_init = ivbep_uncore_cpu_init,
1739e633c65aSKan Liang .pci_init = ivbep_uncore_pci_init,
1740e633c65aSKan Liang };
1741e633c65aSKan Liang
1742e633c65aSKan Liang static const struct intel_uncore_init_fun hswep_uncore_init __initconst = {
1743e633c65aSKan Liang .cpu_init = hswep_uncore_cpu_init,
1744e633c65aSKan Liang .pci_init = hswep_uncore_pci_init,
1745e633c65aSKan Liang };
1746e633c65aSKan Liang
1747e633c65aSKan Liang static const struct intel_uncore_init_fun bdx_uncore_init __initconst = {
1748e633c65aSKan Liang .cpu_init = bdx_uncore_cpu_init,
1749e633c65aSKan Liang .pci_init = bdx_uncore_pci_init,
1750e633c65aSKan Liang };
1751e633c65aSKan Liang
1752e633c65aSKan Liang static const struct intel_uncore_init_fun knl_uncore_init __initconst = {
1753e633c65aSKan Liang .cpu_init = knl_uncore_cpu_init,
1754e633c65aSKan Liang .pci_init = knl_uncore_pci_init,
1755e633c65aSKan Liang };
1756e633c65aSKan Liang
1757e633c65aSKan Liang static const struct intel_uncore_init_fun skl_uncore_init __initconst = {
175846866b59SKan Liang .cpu_init = skl_uncore_cpu_init,
1759e633c65aSKan Liang .pci_init = skl_uncore_pci_init,
1760e633c65aSKan Liang };
1761e633c65aSKan Liang
1762cd34cd97SKan Liang static const struct intel_uncore_init_fun skx_uncore_init __initconst = {
1763cd34cd97SKan Liang .cpu_init = skx_uncore_cpu_init,
1764cd34cd97SKan Liang .pci_init = skx_uncore_pci_init,
1765cd34cd97SKan Liang };
1766cd34cd97SKan Liang
17676e394376SKan Liang static const struct intel_uncore_init_fun icl_uncore_init __initconst = {
17686e394376SKan Liang .cpu_init = icl_uncore_cpu_init,
17696e394376SKan Liang .pci_init = skl_uncore_pci_init,
17706e394376SKan Liang };
17716e394376SKan Liang
1772fdb64822SKan Liang static const struct intel_uncore_init_fun tgl_uncore_init __initconst = {
17738abbcfefSKan Liang .cpu_init = tgl_uncore_cpu_init,
1774fdb64822SKan Liang .mmio_init = tgl_uncore_mmio_init,
1775fdb64822SKan Liang };
1776fdb64822SKan Liang
1777fdb64822SKan Liang static const struct intel_uncore_init_fun tgl_l_uncore_init __initconst = {
17788abbcfefSKan Liang .cpu_init = tgl_uncore_cpu_init,
1779fdb64822SKan Liang .mmio_init = tgl_l_uncore_mmio_init,
1780fdb64822SKan Liang };
1781fdb64822SKan Liang
178243bc103aSKan Liang static const struct intel_uncore_init_fun rkl_uncore_init __initconst = {
178343bc103aSKan Liang .cpu_init = tgl_uncore_cpu_init,
178443bc103aSKan Liang .pci_init = skl_uncore_pci_init,
178543bc103aSKan Liang };
178643bc103aSKan Liang
1787772ed05fSKan Liang static const struct intel_uncore_init_fun adl_uncore_init __initconst = {
1788772ed05fSKan Liang .cpu_init = adl_uncore_cpu_init,
17895a4487f9SKan Liang .mmio_init = adl_uncore_mmio_init,
1790772ed05fSKan Liang };
1791772ed05fSKan Liang
1792c828441fSKan Liang static const struct intel_uncore_init_fun mtl_uncore_init __initconst = {
1793c828441fSKan Liang .cpu_init = mtl_uncore_cpu_init,
1794c828441fSKan Liang .mmio_init = adl_uncore_mmio_init,
1795c828441fSKan Liang };
1796c828441fSKan Liang
17972b3b76b5SKan Liang static const struct intel_uncore_init_fun icx_uncore_init __initconst = {
17982b3b76b5SKan Liang .cpu_init = icx_uncore_cpu_init,
17992b3b76b5SKan Liang .pci_init = icx_uncore_pci_init,
18002b3b76b5SKan Liang .mmio_init = icx_uncore_mmio_init,
18012b3b76b5SKan Liang };
18022b3b76b5SKan Liang
1803210cc5f9SKan Liang static const struct intel_uncore_init_fun snr_uncore_init __initconst = {
1804210cc5f9SKan Liang .cpu_init = snr_uncore_cpu_init,
1805210cc5f9SKan Liang .pci_init = snr_uncore_pci_init,
1806ee49532bSKan Liang .mmio_init = snr_uncore_mmio_init,
1807210cc5f9SKan Liang };
1808210cc5f9SKan Liang
1809c54c53d9SKan Liang static const struct intel_uncore_init_fun spr_uncore_init __initconst = {
1810c54c53d9SKan Liang .cpu_init = spr_uncore_cpu_init,
1811c54c53d9SKan Liang .pci_init = spr_uncore_pci_init,
1812c54c53d9SKan Liang .mmio_init = spr_uncore_mmio_init,
1813c54c53d9SKan Liang .use_discovery = true,
181465248a9aSKan Liang .uncore_units_ignore = spr_uncore_units_ignore,
1815c54c53d9SKan Liang };
1816c54c53d9SKan Liang
1817edae1f06SKan Liang static const struct intel_uncore_init_fun generic_uncore_init __initconst = {
1818d6c75413SKan Liang .cpu_init = intel_uncore_generic_uncore_cpu_init,
181942839ef4SKan Liang .pci_init = intel_uncore_generic_uncore_pci_init,
1820c4c55e36SKan Liang .mmio_init = intel_uncore_generic_uncore_mmio_init,
1821edae1f06SKan Liang };
1822edae1f06SKan Liang
1823e633c65aSKan Liang static const struct x86_cpu_id intel_uncore_match[] __initconst = {
1824ef37219aSThomas Gleixner X86_MATCH_INTEL_FAM6_MODEL(NEHALEM_EP, &nhm_uncore_init),
1825ef37219aSThomas Gleixner X86_MATCH_INTEL_FAM6_MODEL(NEHALEM, &nhm_uncore_init),
1826ef37219aSThomas Gleixner X86_MATCH_INTEL_FAM6_MODEL(WESTMERE, &nhm_uncore_init),
1827ef37219aSThomas Gleixner X86_MATCH_INTEL_FAM6_MODEL(WESTMERE_EP, &nhm_uncore_init),
1828ef37219aSThomas Gleixner X86_MATCH_INTEL_FAM6_MODEL(SANDYBRIDGE, &snb_uncore_init),
1829ef37219aSThomas Gleixner X86_MATCH_INTEL_FAM6_MODEL(IVYBRIDGE, &ivb_uncore_init),
1830ef37219aSThomas Gleixner X86_MATCH_INTEL_FAM6_MODEL(HASWELL, &hsw_uncore_init),
1831ef37219aSThomas Gleixner X86_MATCH_INTEL_FAM6_MODEL(HASWELL_L, &hsw_uncore_init),
1832ef37219aSThomas Gleixner X86_MATCH_INTEL_FAM6_MODEL(HASWELL_G, &hsw_uncore_init),
1833ef37219aSThomas Gleixner X86_MATCH_INTEL_FAM6_MODEL(BROADWELL, &bdw_uncore_init),
1834ef37219aSThomas Gleixner X86_MATCH_INTEL_FAM6_MODEL(BROADWELL_G, &bdw_uncore_init),
1835ef37219aSThomas Gleixner X86_MATCH_INTEL_FAM6_MODEL(SANDYBRIDGE_X, &snbep_uncore_init),
1836ef37219aSThomas Gleixner X86_MATCH_INTEL_FAM6_MODEL(NEHALEM_EX, &nhmex_uncore_init),
1837ef37219aSThomas Gleixner X86_MATCH_INTEL_FAM6_MODEL(WESTMERE_EX, &nhmex_uncore_init),
1838ef37219aSThomas Gleixner X86_MATCH_INTEL_FAM6_MODEL(IVYBRIDGE_X, &ivbep_uncore_init),
1839ef37219aSThomas Gleixner X86_MATCH_INTEL_FAM6_MODEL(HASWELL_X, &hswep_uncore_init),
1840ef37219aSThomas Gleixner X86_MATCH_INTEL_FAM6_MODEL(BROADWELL_X, &bdx_uncore_init),
1841ef37219aSThomas Gleixner X86_MATCH_INTEL_FAM6_MODEL(BROADWELL_D, &bdx_uncore_init),
1842ef37219aSThomas Gleixner X86_MATCH_INTEL_FAM6_MODEL(XEON_PHI_KNL, &knl_uncore_init),
1843ef37219aSThomas Gleixner X86_MATCH_INTEL_FAM6_MODEL(XEON_PHI_KNM, &knl_uncore_init),
1844ef37219aSThomas Gleixner X86_MATCH_INTEL_FAM6_MODEL(SKYLAKE, &skl_uncore_init),
1845ef37219aSThomas Gleixner X86_MATCH_INTEL_FAM6_MODEL(SKYLAKE_L, &skl_uncore_init),
1846ef37219aSThomas Gleixner X86_MATCH_INTEL_FAM6_MODEL(SKYLAKE_X, &skx_uncore_init),
1847ef37219aSThomas Gleixner X86_MATCH_INTEL_FAM6_MODEL(KABYLAKE_L, &skl_uncore_init),
1848ef37219aSThomas Gleixner X86_MATCH_INTEL_FAM6_MODEL(KABYLAKE, &skl_uncore_init),
1849bb85429aSKan Liang X86_MATCH_INTEL_FAM6_MODEL(COMETLAKE_L, &skl_uncore_init),
1850bb85429aSKan Liang X86_MATCH_INTEL_FAM6_MODEL(COMETLAKE, &skl_uncore_init),
1851ef37219aSThomas Gleixner X86_MATCH_INTEL_FAM6_MODEL(ICELAKE_L, &icl_uncore_init),
1852ef37219aSThomas Gleixner X86_MATCH_INTEL_FAM6_MODEL(ICELAKE_NNPI, &icl_uncore_init),
1853ef37219aSThomas Gleixner X86_MATCH_INTEL_FAM6_MODEL(ICELAKE, &icl_uncore_init),
18542b3b76b5SKan Liang X86_MATCH_INTEL_FAM6_MODEL(ICELAKE_D, &icx_uncore_init),
18552b3b76b5SKan Liang X86_MATCH_INTEL_FAM6_MODEL(ICELAKE_X, &icx_uncore_init),
1856629b3df7SIngo Molnar X86_MATCH_INTEL_FAM6_MODEL(TIGERLAKE_L, &tgl_l_uncore_init),
1857629b3df7SIngo Molnar X86_MATCH_INTEL_FAM6_MODEL(TIGERLAKE, &tgl_uncore_init),
185843bc103aSKan Liang X86_MATCH_INTEL_FAM6_MODEL(ROCKETLAKE, &rkl_uncore_init),
1859772ed05fSKan Liang X86_MATCH_INTEL_FAM6_MODEL(ALDERLAKE, &adl_uncore_init),
1860772ed05fSKan Liang X86_MATCH_INTEL_FAM6_MODEL(ALDERLAKE_L, &adl_uncore_init),
1861ad4878d4SKan Liang X86_MATCH_INTEL_FAM6_MODEL(RAPTORLAKE, &adl_uncore_init),
1862f758bc5aSKan Liang X86_MATCH_INTEL_FAM6_MODEL(RAPTORLAKE_P, &adl_uncore_init),
1863e04a1607SKan Liang X86_MATCH_INTEL_FAM6_MODEL(RAPTORLAKE_S, &adl_uncore_init),
1864c828441fSKan Liang X86_MATCH_INTEL_FAM6_MODEL(METEORLAKE, &mtl_uncore_init),
1865c828441fSKan Liang X86_MATCH_INTEL_FAM6_MODEL(METEORLAKE_L, &mtl_uncore_init),
1866c54c53d9SKan Liang X86_MATCH_INTEL_FAM6_MODEL(SAPPHIRERAPIDS_X, &spr_uncore_init),
18675268a284SKan Liang X86_MATCH_INTEL_FAM6_MODEL(EMERALDRAPIDS_X, &spr_uncore_init),
1868ef37219aSThomas Gleixner X86_MATCH_INTEL_FAM6_MODEL(ATOM_TREMONT_D, &snr_uncore_init),
1869*882cdb06SPeter Zijlstra X86_MATCH_INTEL_FAM6_MODEL(ATOM_GRACEMONT, &adl_uncore_init),
1870e633c65aSKan Liang {},
1871e633c65aSKan Liang };
1872e633c65aSKan Liang MODULE_DEVICE_TABLE(x86cpu, intel_uncore_match);
1873e633c65aSKan Liang
intel_uncore_init(void)18746bcb2db5SBorislav Petkov static int __init intel_uncore_init(void)
18756bcb2db5SBorislav Petkov {
1876e633c65aSKan Liang const struct x86_cpu_id *id;
1877e633c65aSKan Liang struct intel_uncore_init_fun *uncore_init;
18783da04b8aSKan Liang int pret = 0, cret = 0, mret = 0, ret;
18796bcb2db5SBorislav Petkov
18800c9f3536SBorislav Petkov if (boot_cpu_has(X86_FEATURE_HYPERVISOR))
18816bcb2db5SBorislav Petkov return -ENODEV;
18826bcb2db5SBorislav Petkov
188336b533bcSRoman Sudarikov __uncore_max_dies =
188436b533bcSRoman Sudarikov topology_max_packages() * topology_max_die_per_package();
1885cf6d445fSThomas Gleixner
1886edae1f06SKan Liang id = x86_match_cpu(intel_uncore_match);
1887edae1f06SKan Liang if (!id) {
1888bd9514a4SKan Liang if (!uncore_no_discover && intel_uncore_has_discovery_tables(NULL))
1889edae1f06SKan Liang uncore_init = (struct intel_uncore_init_fun *)&generic_uncore_init;
1890edae1f06SKan Liang else
1891edae1f06SKan Liang return -ENODEV;
1892c54c53d9SKan Liang } else {
1893e633c65aSKan Liang uncore_init = (struct intel_uncore_init_fun *)id->driver_data;
1894c54c53d9SKan Liang if (uncore_no_discover && uncore_init->use_discovery)
1895c54c53d9SKan Liang return -ENODEV;
1896bd9514a4SKan Liang if (uncore_init->use_discovery &&
1897bd9514a4SKan Liang !intel_uncore_has_discovery_tables(uncore_init->uncore_units_ignore))
1898c54c53d9SKan Liang return -ENODEV;
1899c54c53d9SKan Liang }
1900edae1f06SKan Liang
1901e633c65aSKan Liang if (uncore_init->pci_init) {
1902e633c65aSKan Liang pret = uncore_init->pci_init();
1903e633c65aSKan Liang if (!pret)
19045485592cSThomas Gleixner pret = uncore_pci_init();
1905e633c65aSKan Liang }
1906e633c65aSKan Liang
1907e633c65aSKan Liang if (uncore_init->cpu_init) {
1908e633c65aSKan Liang uncore_init->cpu_init();
19095485592cSThomas Gleixner cret = uncore_cpu_init();
1910e633c65aSKan Liang }
19115485592cSThomas Gleixner
19123da04b8aSKan Liang if (uncore_init->mmio_init) {
19133da04b8aSKan Liang uncore_init->mmio_init();
19143da04b8aSKan Liang mret = uncore_mmio_init();
19153da04b8aSKan Liang }
19163da04b8aSKan Liang
1917edae1f06SKan Liang if (cret && pret && mret) {
1918edae1f06SKan Liang ret = -ENODEV;
1919edae1f06SKan Liang goto free_discovery;
1920edae1f06SKan Liang }
1921cf6d445fSThomas Gleixner
1922fff4b87eSThomas Gleixner /* Install hotplug callbacks to setup the targets for each package */
1923fff4b87eSThomas Gleixner ret = cpuhp_setup_state(CPUHP_AP_PERF_X86_UNCORE_ONLINE,
1924fff4b87eSThomas Gleixner "perf/x86/intel/uncore:online",
1925fff4b87eSThomas Gleixner uncore_event_cpu_online,
1926fff4b87eSThomas Gleixner uncore_event_cpu_offline);
19274f089678SThomas Gleixner if (ret)
1928cf6d445fSThomas Gleixner goto err;
19296bcb2db5SBorislav Petkov return 0;
19304f089678SThomas Gleixner
1931cf6d445fSThomas Gleixner err:
19324f089678SThomas Gleixner uncore_types_exit(uncore_msr_uncores);
19333da04b8aSKan Liang uncore_types_exit(uncore_mmio_uncores);
19344f089678SThomas Gleixner uncore_pci_exit();
1935edae1f06SKan Liang free_discovery:
1936edae1f06SKan Liang intel_uncore_clear_discovery_tables();
19376bcb2db5SBorislav Petkov return ret;
19386bcb2db5SBorislav Petkov }
1939e633c65aSKan Liang module_init(intel_uncore_init);
1940e633c65aSKan Liang
intel_uncore_exit(void)1941e633c65aSKan Liang static void __exit intel_uncore_exit(void)
1942e633c65aSKan Liang {
1943fff4b87eSThomas Gleixner cpuhp_remove_state(CPUHP_AP_PERF_X86_UNCORE_ONLINE);
1944e633c65aSKan Liang uncore_types_exit(uncore_msr_uncores);
19453da04b8aSKan Liang uncore_types_exit(uncore_mmio_uncores);
1946e633c65aSKan Liang uncore_pci_exit();
1947edae1f06SKan Liang intel_uncore_clear_discovery_tables();
1948e633c65aSKan Liang }
1949e633c65aSKan Liang module_exit(intel_uncore_exit);
1950