Lines Matching +full:cci +full:- +full:400
1 // SPDX-License-Identifier: GPL-2.0
2 // CCI Cache Coherent Interconnect PMU driver
3 // Copyright (C) 2013-2018 Arm Ltd.
6 #include <linux/arm-cci.h>
16 #define DRIVER_NAME "ARM-CCI PMU"
35 #define CCI_PMU_CNTR_SIZE(model) ((model)->cntr_size)
37 #define CCI_PMU_CNTR_MASK ((1ULL << 32) - 1)
38 #define CCI_PMU_CNTR_LAST(cci_pmu) (cci_pmu->num_cntrs - 1)
41 ((model)->num_hw_cntrs + (model)->fixed_hw_cntrs)
77 * @fixed_hw_cntrs - Number of fixed event counters
78 * @num_hw_cntrs - Maximum number of programmable event counters
79 * @cntr_size - Size of an event counter mapping
162 * Instead of an event id to monitor CCI cycles, a dedicated counter is
163 * provided. Use 0xff to represent CCI cycles and hope that no future revisions
174 * CCI PMU event id is an 8-bit value made of two parts - bits 7:5 for one of 8
214 CCI_FORMAT_EXT_ATTR_ENTRY(event, "config:0-4"),
215 CCI_FORMAT_EXT_ATTR_ENTRY(source, "config:5-7"),
306 return sysfs_emit(buf, "config=0x%lx\n", (unsigned long)eattr->var); in cci400_pmu_cycle_event_show()
317 if (test_and_set_bit(CCI400_PMU_CYCLE_CNTR_IDX, hw->used_mask)) in cci400_get_event_idx()
318 return -EAGAIN; in cci400_get_event_idx()
324 if (!test_and_set_bit(idx, hw->used_mask)) in cci400_get_event_idx()
328 return -EAGAIN; in cci400_get_event_idx()
338 return -ENOENT; in cci400_validate_hw_event()
359 return -ENOENT; in cci400_validate_hw_event()
362 if (ev_code >= cci_pmu->model->event_ranges[if_type].min && in cci400_validate_hw_event()
363 ev_code <= cci_pmu->model->event_ranges[if_type].max) in cci400_validate_hw_event()
366 return -ENOENT; in cci400_validate_hw_event()
372 rev = readl_relaxed(cci_pmu->ctrl_base + CCI_PID2) & CCI_PID2_REV_MASK; in probe_cci400_revision()
397 * CCI5xx PMU event id is an 9-bit value made of two parts.
398 * bits [8:5] - Source for the event
399 * bits [4:0] - Event code (specific to type of interface)
450 CCI_FORMAT_EXT_ATTR_ENTRY(event, "config:0-4"),
451 CCI_FORMAT_EXT_ATTR_ENTRY(source, "config:5-8"),
526 (unsigned long)eattr->var, CCI5xx_PORT_GLOBAL); in cci5xx_pmu_global_event_show()
533 * 0x0-0x6 - Slave interfaces
534 * 0x8-0xD - Master interfaces
535 * 0xf - Global Events
536 * 0x7,0xe - Reserved
546 return -ENOENT; in cci500_validate_hw_event()
570 return -ENOENT; in cci500_validate_hw_event()
573 if (ev_code >= cci_pmu->model->event_ranges[if_type].min && in cci500_validate_hw_event()
574 ev_code <= cci_pmu->model->event_ranges[if_type].max) in cci500_validate_hw_event()
577 return -ENOENT; in cci500_validate_hw_event()
584 * 0x0-0x6 - Slave interfaces
585 * 0x8-0xe - Master interfaces
586 * 0xf - Global Events
587 * 0x7 - Reserved
597 return -ENOENT; in cci550_validate_hw_event()
622 return -ENOENT; in cci550_validate_hw_event()
625 if (ev_code >= cci_pmu->model->event_ranges[if_type].min && in cci550_validate_hw_event()
626 ev_code <= cci_pmu->model->event_ranges[if_type].max) in cci550_validate_hw_event()
629 return -ENOENT; in cci550_validate_hw_event()
635 * Program the CCI PMU counters which have PERF_HES_ARCH set
642 struct cci_pmu_hw_events *cci_hw = &cci_pmu->hw_events; in cci_pmu_sync_counters()
646 for_each_set_bit(i, cci_pmu->hw_events.used_mask, cci_pmu->num_cntrs) { in cci_pmu_sync_counters()
647 struct perf_event *event = cci_hw->events[i]; in cci_pmu_sync_counters()
653 if (event->hw.state & PERF_HES_STOPPED) in cci_pmu_sync_counters()
655 if (event->hw.state & PERF_HES_ARCH) { in cci_pmu_sync_counters()
657 event->hw.state &= ~PERF_HES_ARCH; in cci_pmu_sync_counters()
664 /* Should be called with cci_pmu->hw_events->pmu_lock held */
670 val = readl_relaxed(cci_pmu->ctrl_base + CCI_PMCR) | CCI_PMCR_CEN; in __cci_pmu_enable_nosync()
671 writel(val, cci_pmu->ctrl_base + CCI_PMCR); in __cci_pmu_enable_nosync()
674 /* Should be called with cci_pmu->hw_events->pmu_lock held */
681 /* Should be called with cci_pmu->hw_events->pmu_lock held */
687 val = readl_relaxed(cci_pmu->ctrl_base + CCI_PMCR) & ~CCI_PMCR_CEN; in __cci_pmu_disable()
688 writel(val, cci_pmu->ctrl_base + CCI_PMCR); in __cci_pmu_disable()
696 return sysfs_emit(buf, "%s\n", (char *)eattr->var); in cci_pmu_format_show()
706 (unsigned long)eattr->var); in cci_pmu_event_show()
716 return readl_relaxed(cci_pmu->base + in pmu_read_register()
717 CCI_PMU_CNTR_BASE(cci_pmu->model, idx) + offset); in pmu_read_register()
723 writel_relaxed(value, cci_pmu->base + in pmu_write_register()
724 CCI_PMU_CNTR_BASE(cci_pmu->model, idx) + offset); in pmu_write_register()
749 * For all counters on the CCI-PMU, disable any 'enabled' counters,
756 * cci_pm->hw_events->pmu_lock).
765 for (i = 0; i < cci_pmu->num_cntrs; i++) { in pmu_save_counters()
782 for_each_set_bit(i, mask, cci_pmu->num_cntrs) in pmu_restore_counters()
788 * by the cci
792 return (readl_relaxed(cci_pmu->ctrl_base + CCI_PMCR) & in pmu_get_max_counters()
798 struct cci_pmu *cci_pmu = to_cci_pmu(event->pmu); in pmu_get_event_idx()
799 unsigned long cci_event = event->hw.config_base; in pmu_get_event_idx()
802 if (cci_pmu->model->get_event_idx) in pmu_get_event_idx()
803 return cci_pmu->model->get_event_idx(cci_pmu, hw, cci_event); in pmu_get_event_idx()
807 if (!test_and_set_bit(idx, hw->used_mask)) in pmu_get_event_idx()
811 return -EAGAIN; in pmu_get_event_idx()
816 struct cci_pmu *cci_pmu = to_cci_pmu(event->pmu); in pmu_map_event()
818 if (event->attr.type < PERF_TYPE_MAX || in pmu_map_event()
819 !cci_pmu->model->validate_hw_event) in pmu_map_event()
820 return -ENOENT; in pmu_map_event()
822 return cci_pmu->model->validate_hw_event(cci_pmu, event->attr.config); in pmu_map_event()
828 struct platform_device *pmu_device = cci_pmu->plat_device; in pmu_request_irq()
831 return -ENODEV; in pmu_request_irq()
833 if (cci_pmu->nr_irqs < 1) { in pmu_request_irq()
834 dev_err(&pmu_device->dev, "no irqs for CCI PMUs defined\n"); in pmu_request_irq()
835 return -ENODEV; in pmu_request_irq()
839 * Register all available CCI PMU interrupts. In the interrupt handler in pmu_request_irq()
843 * This should allow handling of non-unique interrupt for the counters. in pmu_request_irq()
845 for (i = 0; i < cci_pmu->nr_irqs; i++) { in pmu_request_irq()
846 int err = request_irq(cci_pmu->irqs[i], handler, IRQF_SHARED, in pmu_request_irq()
847 "arm-cci-pmu", cci_pmu); in pmu_request_irq()
849 dev_err(&pmu_device->dev, "unable to request IRQ%d for ARM CCI PMU counters\n", in pmu_request_irq()
850 cci_pmu->irqs[i]); in pmu_request_irq()
854 set_bit(i, &cci_pmu->active_irqs); in pmu_request_irq()
864 for (i = 0; i < cci_pmu->nr_irqs; i++) { in pmu_free_irq()
865 if (!test_and_clear_bit(i, &cci_pmu->active_irqs)) in pmu_free_irq()
868 free_irq(cci_pmu->irqs[i], cci_pmu); in pmu_free_irq()
874 struct cci_pmu *cci_pmu = to_cci_pmu(event->pmu); in pmu_read_counter()
875 struct hw_perf_event *hw_counter = &event->hw; in pmu_read_counter()
876 int idx = hw_counter->idx; in pmu_read_counter()
880 dev_err(&cci_pmu->plat_device->dev, "Invalid CCI PMU counter %d\n", idx); in pmu_read_counter()
896 struct cci_pmu_hw_events *cci_hw = &cci_pmu->hw_events; in __pmu_write_counters()
898 for_each_set_bit(i, mask, cci_pmu->num_cntrs) { in __pmu_write_counters()
899 struct perf_event *event = cci_hw->events[i]; in __pmu_write_counters()
903 pmu_write_counter(cci_pmu, local64_read(&event->hw.prev_count), i); in __pmu_write_counters()
909 if (cci_pmu->model->write_counters) in pmu_write_counters()
910 cci_pmu->model->write_counters(cci_pmu, mask); in pmu_write_counters()
918 * CCI-500/CCI-550 has advanced power saving policies, which could gate the
929 * For each counter to be programmed, repeat steps 3-7:
941 * We choose an event which for CCI-5xx is guaranteed not to count.
951 bitmap_zero(saved_mask, cci_pmu->num_cntrs); in cci5xx_pmu_write_counters()
960 for_each_set_bit(i, mask, cci_pmu->num_cntrs) { in cci5xx_pmu_write_counters()
961 struct perf_event *event = cci_pmu->hw_events.events[i]; in cci5xx_pmu_write_counters()
968 pmu_write_counter(cci_pmu, local64_read(&event->hw.prev_count), i); in cci5xx_pmu_write_counters()
970 pmu_set_event(cci_pmu, i, event->hw.config_base); in cci5xx_pmu_write_counters()
982 struct hw_perf_event *hwc = &event->hw; in pmu_event_update()
986 prev_raw_count = local64_read(&hwc->prev_count); in pmu_event_update()
988 } while (local64_cmpxchg(&hwc->prev_count, prev_raw_count, in pmu_event_update()
991 delta = (new_raw_count - prev_raw_count) & CCI_PMU_CNTR_MASK; in pmu_event_update()
993 local64_add(delta, &event->count); in pmu_event_update()
1005 struct hw_perf_event *hwc = &event->hw; in pmu_event_set_period()
1007 * The CCI PMU counters have a period of 2^32. To account for the in pmu_event_set_period()
1013 local64_set(&hwc->prev_count, val); in pmu_event_set_period()
1016 * CCI PMU uses PERF_HES_ARCH to keep track of the counters, whose in pmu_event_set_period()
1017 * values needs to be sync-ed with the s/w state before the PMU is in pmu_event_set_period()
1021 hwc->state |= PERF_HES_ARCH; in pmu_event_set_period()
1027 struct cci_pmu_hw_events *events = &cci_pmu->hw_events; in pmu_handle_irq()
1030 raw_spin_lock(&events->pmu_lock); in pmu_handle_irq()
1036 * This should work regardless of whether we have per-counter overflow in pmu_handle_irq()
1040 struct perf_event *event = events->events[idx]; in pmu_handle_irq()
1060 raw_spin_unlock(&events->pmu_lock); in pmu_handle_irq()
1082 struct cci_pmu *cci_pmu = to_cci_pmu(event->pmu); in hw_perf_event_destroy()
1083 atomic_t *active_events = &cci_pmu->active_events; in hw_perf_event_destroy()
1084 struct mutex *reserve_mutex = &cci_pmu->reserve_mutex; in hw_perf_event_destroy()
1095 struct cci_pmu_hw_events *hw_events = &cci_pmu->hw_events; in cci_pmu_enable()
1096 bool enabled = !bitmap_empty(hw_events->used_mask, cci_pmu->num_cntrs); in cci_pmu_enable()
1102 raw_spin_lock_irqsave(&hw_events->pmu_lock, flags); in cci_pmu_enable()
1104 raw_spin_unlock_irqrestore(&hw_events->pmu_lock, flags); in cci_pmu_enable()
1111 struct cci_pmu_hw_events *hw_events = &cci_pmu->hw_events; in cci_pmu_disable()
1114 raw_spin_lock_irqsave(&hw_events->pmu_lock, flags); in cci_pmu_disable()
1116 raw_spin_unlock_irqrestore(&hw_events->pmu_lock, flags); in cci_pmu_disable()
1120 * Check if the idx represents a non-programmable counter.
1126 return (idx >= 0) && (idx < cci_pmu->model->fixed_hw_cntrs); in pmu_fixed_hw_idx()
1131 struct cci_pmu *cci_pmu = to_cci_pmu(event->pmu); in cci_pmu_start()
1132 struct cci_pmu_hw_events *hw_events = &cci_pmu->hw_events; in cci_pmu_start()
1133 struct hw_perf_event *hwc = &event->hw; in cci_pmu_start()
1134 int idx = hwc->idx; in cci_pmu_start()
1142 WARN_ON_ONCE(!(hwc->state & PERF_HES_UPTODATE)); in cci_pmu_start()
1144 hwc->state = 0; in cci_pmu_start()
1147 dev_err(&cci_pmu->plat_device->dev, "Invalid CCI PMU counter %d\n", idx); in cci_pmu_start()
1151 raw_spin_lock_irqsave(&hw_events->pmu_lock, flags); in cci_pmu_start()
1155 pmu_set_event(cci_pmu, idx, hwc->config_base); in cci_pmu_start()
1160 raw_spin_unlock_irqrestore(&hw_events->pmu_lock, flags); in cci_pmu_start()
1165 struct cci_pmu *cci_pmu = to_cci_pmu(event->pmu); in cci_pmu_stop()
1166 struct hw_perf_event *hwc = &event->hw; in cci_pmu_stop()
1167 int idx = hwc->idx; in cci_pmu_stop()
1169 if (hwc->state & PERF_HES_STOPPED) in cci_pmu_stop()
1173 dev_err(&cci_pmu->plat_device->dev, "Invalid CCI PMU counter %d\n", idx); in cci_pmu_stop()
1183 hwc->state |= PERF_HES_STOPPED | PERF_HES_UPTODATE; in cci_pmu_stop()
1188 struct cci_pmu *cci_pmu = to_cci_pmu(event->pmu); in cci_pmu_add()
1189 struct cci_pmu_hw_events *hw_events = &cci_pmu->hw_events; in cci_pmu_add()
1190 struct hw_perf_event *hwc = &event->hw; in cci_pmu_add()
1198 event->hw.idx = idx; in cci_pmu_add()
1199 hw_events->events[idx] = event; in cci_pmu_add()
1201 hwc->state = PERF_HES_STOPPED | PERF_HES_UPTODATE; in cci_pmu_add()
1213 struct cci_pmu *cci_pmu = to_cci_pmu(event->pmu); in cci_pmu_del()
1214 struct cci_pmu_hw_events *hw_events = &cci_pmu->hw_events; in cci_pmu_del()
1215 struct hw_perf_event *hwc = &event->hw; in cci_pmu_del()
1216 int idx = hwc->idx; in cci_pmu_del()
1219 hw_events->events[idx] = NULL; in cci_pmu_del()
1220 clear_bit(idx, hw_events->used_mask); in cci_pmu_del()
1233 * Reject groups spanning multiple HW PMUs (e.g. CPU + CCI). The in validate_event()
1234 * core perf code won't check that the pmu->ctx == leader->ctx in validate_event()
1235 * until after pmu->event_init(event). in validate_event()
1237 if (event->pmu != cci_pmu) in validate_event()
1240 if (event->state < PERF_EVENT_STATE_OFF) in validate_event()
1243 if (event->state == PERF_EVENT_STATE_OFF && !event->attr.enable_on_exec) in validate_event()
1251 struct perf_event *sibling, *leader = event->group_leader; in validate_group()
1252 struct cci_pmu *cci_pmu = to_cci_pmu(event->pmu); in validate_group()
1261 bitmap_zero(mask, cci_pmu->num_cntrs); in validate_group()
1263 if (!validate_event(event->pmu, &fake_pmu, leader)) in validate_group()
1264 return -EINVAL; in validate_group()
1267 if (!validate_event(event->pmu, &fake_pmu, sibling)) in validate_group()
1268 return -EINVAL; in validate_group()
1271 if (!validate_event(event->pmu, &fake_pmu, event)) in validate_group()
1272 return -EINVAL; in validate_group()
1279 struct hw_perf_event *hwc = &event->hw; in __hw_perf_event_init()
1285 pr_debug("event %x:%llx not supported\n", event->attr.type, in __hw_perf_event_init()
1286 event->attr.config); in __hw_perf_event_init()
1292 * hardware. Use -1 to signify that we haven't decided where to put it in __hw_perf_event_init()
1295 hwc->idx = -1; in __hw_perf_event_init()
1296 hwc->config_base = 0; in __hw_perf_event_init()
1297 hwc->config = 0; in __hw_perf_event_init()
1298 hwc->event_base = 0; in __hw_perf_event_init()
1303 hwc->config_base |= (unsigned long)mapping; in __hw_perf_event_init()
1305 if (event->group_leader != event) { in __hw_perf_event_init()
1307 return -EINVAL; in __hw_perf_event_init()
1315 struct cci_pmu *cci_pmu = to_cci_pmu(event->pmu); in cci_pmu_event_init()
1316 atomic_t *active_events = &cci_pmu->active_events; in cci_pmu_event_init()
1319 if (event->attr.type != event->pmu->type) in cci_pmu_event_init()
1320 return -ENOENT; in cci_pmu_event_init()
1323 if (is_sampling_event(event) || event->attach_state & PERF_ATTACH_TASK) in cci_pmu_event_init()
1324 return -EOPNOTSUPP; in cci_pmu_event_init()
1329 * handle cpu == -1 and pid == -1 for this case. in cci_pmu_event_init()
1335 if (event->cpu < 0) in cci_pmu_event_init()
1336 return -EINVAL; in cci_pmu_event_init()
1337 event->cpu = cci_pmu->cpu; in cci_pmu_event_init()
1339 event->destroy = hw_perf_event_destroy; in cci_pmu_event_init()
1341 mutex_lock(&cci_pmu->reserve_mutex); in cci_pmu_event_init()
1346 mutex_unlock(&cci_pmu->reserve_mutex); in cci_pmu_event_init()
1364 return cpumap_print_to_pagebuf(true, buf, cpumask_of(cci_pmu->cpu)); in pmu_cpumask_attr_show()
1398 const struct cci_pmu_model *model = cci_pmu->model; in cci_pmu_init()
1399 char *name = model->name; in cci_pmu_init()
1402 if (WARN_ON(model->num_hw_cntrs > NUM_HW_CNTRS_MAX)) in cci_pmu_init()
1403 return -EINVAL; in cci_pmu_init()
1404 if (WARN_ON(model->fixed_hw_cntrs > FIXED_HW_CNTRS_MAX)) in cci_pmu_init()
1405 return -EINVAL; in cci_pmu_init()
1407 pmu_event_attr_group.attrs = model->event_attrs; in cci_pmu_init()
1408 pmu_format_attr_group.attrs = model->format_attrs; in cci_pmu_init()
1410 cci_pmu->pmu = (struct pmu) { in cci_pmu_init()
1412 .name = cci_pmu->model->name, in cci_pmu_init()
1426 cci_pmu->plat_device = pdev; in cci_pmu_init()
1428 if (num_cntrs > cci_pmu->model->num_hw_cntrs) { in cci_pmu_init()
1429 dev_warn(&pdev->dev, in cci_pmu_init()
1432 num_cntrs, cci_pmu->model->num_hw_cntrs); in cci_pmu_init()
1433 num_cntrs = cci_pmu->model->num_hw_cntrs; in cci_pmu_init()
1435 cci_pmu->num_cntrs = num_cntrs + cci_pmu->model->fixed_hw_cntrs; in cci_pmu_init()
1437 return perf_pmu_register(&cci_pmu->pmu, name, -1); in cci_pmu_init()
1444 if (!g_cci_pmu || cpu != g_cci_pmu->cpu) in cci_pmu_offline_cpu()
1451 perf_pmu_migrate_context(&g_cci_pmu->pmu, cpu, target); in cci_pmu_offline_cpu()
1452 g_cci_pmu->cpu = target; in cci_pmu_offline_cpu()
1554 .compatible = "arm,cci-400-pmu",
1558 .compatible = "arm,cci-400-pmu,r0",
1562 .compatible = "arm,cci-400-pmu,r1",
1568 .compatible = "arm,cci-500-pmu,r0",
1572 .compatible = "arm,cci-550-pmu,r0",
1603 return ERR_PTR(-ENOMEM); in cci_pmu_alloc()
1605 cci_pmu->ctrl_base = *(void __iomem **)dev->platform_data; in cci_pmu_alloc()
1610 "DEPRECATED compatible property, requires secure access to CCI registers"); in cci_pmu_alloc()
1614 dev_warn(dev, "CCI PMU version not supported\n"); in cci_pmu_alloc()
1615 return ERR_PTR(-ENODEV); in cci_pmu_alloc()
1618 cci_pmu->model = model; in cci_pmu_alloc()
1619 cci_pmu->irqs = devm_kcalloc(dev, CCI_PMU_MAX_HW_CNTRS(model), in cci_pmu_alloc()
1620 sizeof(*cci_pmu->irqs), GFP_KERNEL); in cci_pmu_alloc()
1621 if (!cci_pmu->irqs) in cci_pmu_alloc()
1622 return ERR_PTR(-ENOMEM); in cci_pmu_alloc()
1623 cci_pmu->hw_events.events = devm_kcalloc(dev, in cci_pmu_alloc()
1625 sizeof(*cci_pmu->hw_events.events), in cci_pmu_alloc()
1627 if (!cci_pmu->hw_events.events) in cci_pmu_alloc()
1628 return ERR_PTR(-ENOMEM); in cci_pmu_alloc()
1629 cci_pmu->hw_events.used_mask = devm_bitmap_zalloc(dev, in cci_pmu_alloc()
1632 if (!cci_pmu->hw_events.used_mask) in cci_pmu_alloc()
1633 return ERR_PTR(-ENOMEM); in cci_pmu_alloc()
1643 cci_pmu = cci_pmu_alloc(&pdev->dev); in cci_pmu_probe()
1647 cci_pmu->base = devm_platform_ioremap_resource(pdev, 0); in cci_pmu_probe()
1648 if (IS_ERR(cci_pmu->base)) in cci_pmu_probe()
1649 return -ENOMEM; in cci_pmu_probe()
1652 * CCI PMU has one overflow interrupt per counter; but some may be tied in cci_pmu_probe()
1655 cci_pmu->nr_irqs = 0; in cci_pmu_probe()
1656 for (i = 0; i < CCI_PMU_MAX_HW_CNTRS(cci_pmu->model); i++) { in cci_pmu_probe()
1661 if (is_duplicate_irq(irq, cci_pmu->irqs, cci_pmu->nr_irqs)) in cci_pmu_probe()
1664 cci_pmu->irqs[cci_pmu->nr_irqs++] = irq; in cci_pmu_probe()
1671 if (i < CCI_PMU_MAX_HW_CNTRS(cci_pmu->model)) { in cci_pmu_probe()
1672 dev_warn(&pdev->dev, "In-correct number of interrupts: %d, should be %d\n", in cci_pmu_probe()
1673 i, CCI_PMU_MAX_HW_CNTRS(cci_pmu->model)); in cci_pmu_probe()
1674 return -EINVAL; in cci_pmu_probe()
1677 raw_spin_lock_init(&cci_pmu->hw_events.pmu_lock); in cci_pmu_probe()
1678 mutex_init(&cci_pmu->reserve_mutex); in cci_pmu_probe()
1679 atomic_set(&cci_pmu->active_events, 0); in cci_pmu_probe()
1681 cci_pmu->cpu = raw_smp_processor_id(); in cci_pmu_probe()
1684 "perf/arm/cci:online", NULL, in cci_pmu_probe()
1691 pr_info("ARM %s PMU driver probed", cci_pmu->model->name); in cci_pmu_probe()
1706 perf_pmu_unregister(&g_cci_pmu->pmu); in cci_pmu_remove()
1724 MODULE_DESCRIPTION("ARM CCI PMU support");