Lines Matching +full:smmu +full:- +full:v2
1 // SPDX-License-Identifier: GPL-2.0
9 * <phys_addr_page> is the physical page address of the SMMU PMCG wrapped
15 * filter_enable - 0 = no filtering, 1 = filtering enabled
16 * filter_span - 0 = exact match, 1 = pattern match
17 * filter_stream_id - pattern to filter against
19 * To match a partial StreamID where the X most-significant bits must match
20 * but the Y least-significant bits might differ, STREAMID is programmed
22 * STREAMID[Y - 1] == 0.
23 * STREAMID[Y - 2:0] == 1 (where Y > 1).
27 * Example: perf stat -e smmuv3_pmcg_ff88840/transaction,filter_enable=1,
28 * filter_span=1,filter_stream_id=0x42/ -a netperf
31 * information is available in the SMMU documentation.
33 * SMMU events are not attributable to a CPU, so task mode and sampling
91 /* IMP-DEF ID registers */
146 event->attr._config); \
159 smmu_pmu->reg_base + SMMU_PMCG_IRQ_CTRL); in smmu_pmu_enable()
160 writel(SMMU_PMCG_CR_ENABLE, smmu_pmu->reg_base + SMMU_PMCG_CR); in smmu_pmu_enable()
171 for_each_set_bit(idx, smmu_pmu->used_counters, smmu_pmu->num_counters) in smmu_pmu_enable_quirk_hip08_09()
172 smmu_pmu_apply_event_filter(smmu_pmu, smmu_pmu->events[idx], idx); in smmu_pmu_enable_quirk_hip08_09()
181 writel(0, smmu_pmu->reg_base + SMMU_PMCG_CR); in smmu_pmu_disable()
182 writel(0, smmu_pmu->reg_base + SMMU_PMCG_IRQ_CTRL); in smmu_pmu_disable()
195 for_each_set_bit(idx, smmu_pmu->used_counters, smmu_pmu->num_counters) in smmu_pmu_disable_quirk_hip08_09()
196 writel(0xffff, smmu_pmu->reg_base + SMMU_PMCG_EVTYPER(idx)); in smmu_pmu_disable_quirk_hip08_09()
204 if (smmu_pmu->counter_mask & BIT(32)) in smmu_pmu_counter_set_value()
205 writeq(value, smmu_pmu->reloc_base + SMMU_PMCG_EVCNTR(idx, 8)); in smmu_pmu_counter_set_value()
207 writel(value, smmu_pmu->reloc_base + SMMU_PMCG_EVCNTR(idx, 4)); in smmu_pmu_counter_set_value()
214 if (smmu_pmu->counter_mask & BIT(32)) in smmu_pmu_counter_get_value()
215 value = readq(smmu_pmu->reloc_base + SMMU_PMCG_EVCNTR(idx, 8)); in smmu_pmu_counter_get_value()
217 value = readl(smmu_pmu->reloc_base + SMMU_PMCG_EVCNTR(idx, 4)); in smmu_pmu_counter_get_value()
224 writeq(BIT(idx), smmu_pmu->reg_base + SMMU_PMCG_CNTENSET0); in smmu_pmu_counter_enable()
229 writeq(BIT(idx), smmu_pmu->reg_base + SMMU_PMCG_CNTENCLR0); in smmu_pmu_counter_disable()
234 writeq(BIT(idx), smmu_pmu->reg_base + SMMU_PMCG_INTENSET0); in smmu_pmu_interrupt_enable()
240 writeq(BIT(idx), smmu_pmu->reg_base + SMMU_PMCG_INTENCLR0); in smmu_pmu_interrupt_disable()
246 writel(val, smmu_pmu->reg_base + SMMU_PMCG_EVTYPER(idx)); in smmu_pmu_set_evtyper()
251 writel(val, smmu_pmu->reg_base + SMMU_PMCG_SMR(idx)); in smmu_pmu_set_smr()
256 struct hw_perf_event *hwc = &event->hw; in smmu_pmu_event_update()
257 struct smmu_pmu *smmu_pmu = to_smmu_pmu(event->pmu); in smmu_pmu_event_update()
259 u32 idx = hwc->idx; in smmu_pmu_event_update()
262 prev = local64_read(&hwc->prev_count); in smmu_pmu_event_update()
264 } while (local64_cmpxchg(&hwc->prev_count, prev, now) != prev); in smmu_pmu_event_update()
267 delta = now - prev; in smmu_pmu_event_update()
268 delta &= smmu_pmu->counter_mask; in smmu_pmu_event_update()
270 local64_add(delta, &event->count); in smmu_pmu_event_update()
276 u32 idx = hwc->idx; in smmu_pmu_set_period()
279 if (smmu_pmu->options & SMMU_PMCG_EVCNTR_RDONLY) { in smmu_pmu_set_period()
295 new = smmu_pmu->counter_mask >> 1; in smmu_pmu_set_period()
299 local64_set(&hwc->prev_count, new); in smmu_pmu_set_period()
305 struct smmu_pmu *smmu_pmu = to_smmu_pmu(event->pmu); in smmu_pmu_set_event_filter()
330 unsigned int cur_idx, num_ctrs = smmu_pmu->num_counters; in smmu_pmu_apply_event_filter()
338 cur_idx = find_first_bit(smmu_pmu->used_counters, num_ctrs); in smmu_pmu_apply_event_filter()
340 * Per-counter filtering, or scheduling the first globally-filtered in smmu_pmu_apply_event_filter()
343 if (!smmu_pmu->global_filter || cur_idx == num_ctrs) { in smmu_pmu_apply_event_filter()
349 if (smmu_pmu_check_global_filter(smmu_pmu->events[cur_idx], event)) { in smmu_pmu_apply_event_filter()
354 return -EAGAIN; in smmu_pmu_apply_event_filter()
361 unsigned int num_ctrs = smmu_pmu->num_counters; in smmu_pmu_get_event_idx()
363 idx = find_first_zero_bit(smmu_pmu->used_counters, num_ctrs); in smmu_pmu_get_event_idx()
366 return -EAGAIN; in smmu_pmu_get_event_idx()
372 set_bit(idx, smmu_pmu->used_counters); in smmu_pmu_get_event_idx()
380 if (new->pmu != curr->pmu) in smmu_pmu_events_compatible()
383 if (to_smmu_pmu(new->pmu)->global_filter && in smmu_pmu_events_compatible()
397 struct hw_perf_event *hwc = &event->hw; in smmu_pmu_event_init()
398 struct smmu_pmu *smmu_pmu = to_smmu_pmu(event->pmu); in smmu_pmu_event_init()
399 struct device *dev = smmu_pmu->dev; in smmu_pmu_event_init()
404 if (event->attr.type != event->pmu->type) in smmu_pmu_event_init()
405 return -ENOENT; in smmu_pmu_event_init()
407 if (hwc->sample_period) { in smmu_pmu_event_init()
409 return -EOPNOTSUPP; in smmu_pmu_event_init()
412 if (event->cpu < 0) { in smmu_pmu_event_init()
413 dev_dbg(dev, "Per-task mode not supported\n"); in smmu_pmu_event_init()
414 return -EOPNOTSUPP; in smmu_pmu_event_init()
420 (!test_bit(event_id, smmu_pmu->supported_events))) { in smmu_pmu_event_init()
422 return -EINVAL; in smmu_pmu_event_init()
426 if (!is_software_event(event->group_leader)) { in smmu_pmu_event_init()
427 if (!smmu_pmu_events_compatible(event->group_leader, event)) in smmu_pmu_event_init()
428 return -EINVAL; in smmu_pmu_event_init()
430 if (++group_num_events > smmu_pmu->num_counters) in smmu_pmu_event_init()
431 return -EINVAL; in smmu_pmu_event_init()
438 event->cpu = smmu_pmu->on_cpu; in smmu_pmu_event_init()
440 hwc->idx = -1; in smmu_pmu_event_init()
442 if (event->group_leader == event) in smmu_pmu_event_init()
445 for_each_sibling_event(sibling, event->group_leader) { in smmu_pmu_event_init()
450 return -EINVAL; in smmu_pmu_event_init()
452 if (++group_num_events > smmu_pmu->num_counters) in smmu_pmu_event_init()
453 return -EINVAL; in smmu_pmu_event_init()
461 struct smmu_pmu *smmu_pmu = to_smmu_pmu(event->pmu); in smmu_pmu_event_start()
462 struct hw_perf_event *hwc = &event->hw; in smmu_pmu_event_start()
463 int idx = hwc->idx; in smmu_pmu_event_start()
465 hwc->state = 0; in smmu_pmu_event_start()
474 struct smmu_pmu *smmu_pmu = to_smmu_pmu(event->pmu); in smmu_pmu_event_stop()
475 struct hw_perf_event *hwc = &event->hw; in smmu_pmu_event_stop()
476 int idx = hwc->idx; in smmu_pmu_event_stop()
478 if (hwc->state & PERF_HES_STOPPED) in smmu_pmu_event_stop()
484 hwc->state |= PERF_HES_STOPPED | PERF_HES_UPTODATE; in smmu_pmu_event_stop()
489 struct hw_perf_event *hwc = &event->hw; in smmu_pmu_event_add()
491 struct smmu_pmu *smmu_pmu = to_smmu_pmu(event->pmu); in smmu_pmu_event_add()
497 hwc->idx = idx; in smmu_pmu_event_add()
498 hwc->state = PERF_HES_STOPPED | PERF_HES_UPTODATE; in smmu_pmu_event_add()
499 smmu_pmu->events[idx] = event; in smmu_pmu_event_add()
500 local64_set(&hwc->prev_count, 0); in smmu_pmu_event_add()
515 struct hw_perf_event *hwc = &event->hw; in smmu_pmu_event_del()
516 struct smmu_pmu *smmu_pmu = to_smmu_pmu(event->pmu); in smmu_pmu_event_del()
517 int idx = hwc->idx; in smmu_pmu_event_del()
521 smmu_pmu->events[idx] = NULL; in smmu_pmu_event_del()
522 clear_bit(idx, smmu_pmu->used_counters); in smmu_pmu_event_del()
540 return cpumap_print_to_pagebuf(true, buf, cpumask_of(smmu_pmu->on_cpu)); in smmu_pmu_cpumask_show()
564 return sysfs_emit(page, "event=0x%02llx\n", pmu_attr->id); in smmu_pmu_event_show()
591 if (test_bit(pmu_attr->id, smmu_pmu->supported_events)) in smmu_pmu_event_is_visible()
592 return attr->mode; in smmu_pmu_event_is_visible()
609 return sysfs_emit(page, "0x%08x\n", smmu_pmu->iidr); in smmu_pmu_identifier_attr_show()
619 if (!smmu_pmu->iidr) in smmu_pmu_identifier_attr_visible()
621 return attr->mode; in smmu_pmu_identifier_attr_visible()
638 PMU_FORMAT_ATTR(event, "config:0-15");
639 PMU_FORMAT_ATTR(filter_stream_id, "config1:0-31");
674 if (cpu != smmu_pmu->on_cpu) in smmu_pmu_offline_cpu()
681 perf_pmu_migrate_context(&smmu_pmu->pmu, cpu, target); in smmu_pmu_offline_cpu()
682 smmu_pmu->on_cpu = target; in smmu_pmu_offline_cpu()
683 WARN_ON(irq_set_affinity(smmu_pmu->irq, cpumask_of(target))); in smmu_pmu_offline_cpu()
695 ovsr = readq(smmu_pmu->reloc_base + SMMU_PMCG_OVSSET0); in smmu_pmu_handle_irq()
699 writeq(ovsr, smmu_pmu->reloc_base + SMMU_PMCG_OVSCLR0); in smmu_pmu_handle_irq()
702 for_each_set_bit(idx, ovs, smmu_pmu->num_counters) { in smmu_pmu_handle_irq()
703 struct perf_event *event = smmu_pmu->events[idx]; in smmu_pmu_handle_irq()
710 hwc = &event->hw; in smmu_pmu_handle_irq()
731 doorbell = (((u64)msg->address_hi) << 32) | msg->address_lo; in smmu_pmu_write_msi_msg()
734 writeq_relaxed(doorbell, pmu->reg_base + SMMU_PMCG_IRQ_CFG0); in smmu_pmu_write_msi_msg()
735 writel_relaxed(msg->data, pmu->reg_base + SMMU_PMCG_IRQ_CFG1); in smmu_pmu_write_msi_msg()
737 pmu->reg_base + SMMU_PMCG_IRQ_CFG2); in smmu_pmu_write_msi_msg()
742 struct device *dev = pmu->dev; in smmu_pmu_setup_msi()
746 writeq_relaxed(0, pmu->reg_base + SMMU_PMCG_IRQ_CFG0); in smmu_pmu_setup_msi()
749 if (!(readl(pmu->reg_base + SMMU_PMCG_CFGR) & SMMU_PMCG_CFGR_MSI)) in smmu_pmu_setup_msi()
758 pmu->irq = msi_get_virq(dev, 0); in smmu_pmu_setup_msi()
767 int irq, ret = -ENXIO; in smmu_pmu_setup_irq()
771 irq = pmu->irq; in smmu_pmu_setup_irq()
773 ret = devm_request_irq(pmu->dev, irq, smmu_pmu_handle_irq, in smmu_pmu_setup_irq()
774 flags, "smmuv3-pmu", pmu); in smmu_pmu_setup_irq()
780 u64 counter_present_mask = GENMASK_ULL(smmu_pmu->num_counters - 1, 0); in smmu_pmu_reset()
782 smmu_pmu_disable(&smmu_pmu->pmu); in smmu_pmu_reset()
786 smmu_pmu->reg_base + SMMU_PMCG_CNTENCLR0); in smmu_pmu_reset()
788 smmu_pmu->reg_base + SMMU_PMCG_INTENCLR0); in smmu_pmu_reset()
790 smmu_pmu->reloc_base + SMMU_PMCG_OVSCLR0); in smmu_pmu_reset()
797 model = *(u32 *)dev_get_platdata(smmu_pmu->dev); in smmu_pmu_get_acpi_options()
802 smmu_pmu->options |= SMMU_PMCG_EVCNTR_RDONLY | SMMU_PMCG_HARDEN_DISABLE; in smmu_pmu_get_acpi_options()
805 smmu_pmu->options |= SMMU_PMCG_HARDEN_DISABLE; in smmu_pmu_get_acpi_options()
809 dev_notice(smmu_pmu->dev, "option mask 0x%x\n", smmu_pmu->options); in smmu_pmu_get_acpi_options()
814 return of_device_is_compatible(smmu_pmu->dev->of_node, in smmu_pmu_coresight_id_regs()
815 "arm,mmu-600-pmcg"); in smmu_pmu_coresight_id_regs()
820 u32 iidr = readl_relaxed(smmu_pmu->reg_base + SMMU_PMCG_IIDR); in smmu_pmu_get_iidr()
823 u32 pidr0 = readl(smmu_pmu->reg_base + SMMU_PMCG_PIDR0); in smmu_pmu_get_iidr()
824 u32 pidr1 = readl(smmu_pmu->reg_base + SMMU_PMCG_PIDR1); in smmu_pmu_get_iidr()
825 u32 pidr2 = readl(smmu_pmu->reg_base + SMMU_PMCG_PIDR2); in smmu_pmu_get_iidr()
826 u32 pidr3 = readl(smmu_pmu->reg_base + SMMU_PMCG_PIDR3); in smmu_pmu_get_iidr()
827 u32 pidr4 = readl(smmu_pmu->reg_base + SMMU_PMCG_PIDR4); in smmu_pmu_get_iidr()
844 smmu_pmu->iidr = iidr; in smmu_pmu_get_iidr()
855 struct device *dev = &pdev->dev; in smmu_pmu_probe()
859 return -ENOMEM; in smmu_pmu_probe()
861 smmu_pmu->dev = dev; in smmu_pmu_probe()
864 smmu_pmu->pmu = (struct pmu) { in smmu_pmu_probe()
879 smmu_pmu->reg_base = devm_platform_get_and_ioremap_resource(pdev, 0, &res_0); in smmu_pmu_probe()
880 if (IS_ERR(smmu_pmu->reg_base)) in smmu_pmu_probe()
881 return PTR_ERR(smmu_pmu->reg_base); in smmu_pmu_probe()
883 cfgr = readl_relaxed(smmu_pmu->reg_base + SMMU_PMCG_CFGR); in smmu_pmu_probe()
887 smmu_pmu->reloc_base = devm_platform_ioremap_resource(pdev, 1); in smmu_pmu_probe()
888 if (IS_ERR(smmu_pmu->reloc_base)) in smmu_pmu_probe()
889 return PTR_ERR(smmu_pmu->reloc_base); in smmu_pmu_probe()
891 smmu_pmu->reloc_base = smmu_pmu->reg_base; in smmu_pmu_probe()
896 smmu_pmu->irq = irq; in smmu_pmu_probe()
898 ceid_64[0] = readq_relaxed(smmu_pmu->reg_base + SMMU_PMCG_CEID0); in smmu_pmu_probe()
899 ceid_64[1] = readq_relaxed(smmu_pmu->reg_base + SMMU_PMCG_CEID1); in smmu_pmu_probe()
900 bitmap_from_arr32(smmu_pmu->supported_events, (u32 *)ceid_64, in smmu_pmu_probe()
903 smmu_pmu->num_counters = FIELD_GET(SMMU_PMCG_CFGR_NCTR, cfgr) + 1; in smmu_pmu_probe()
905 smmu_pmu->global_filter = !!(cfgr & SMMU_PMCG_CFGR_SID_FILTER_TYPE); in smmu_pmu_probe()
908 smmu_pmu->counter_mask = GENMASK_ULL(reg_size, 0); in smmu_pmu_probe()
914 dev_err(dev, "Setup irq failed, PMU @%pa\n", &res_0->start); in smmu_pmu_probe()
920 name = devm_kasprintf(&pdev->dev, GFP_KERNEL, "smmuv3_pmcg_%llx", in smmu_pmu_probe()
921 (res_0->start) >> SMMU_PMCG_PA_SHIFT); in smmu_pmu_probe()
923 dev_err(dev, "Create name failed, PMU @%pa\n", &res_0->start); in smmu_pmu_probe()
924 return -EINVAL; in smmu_pmu_probe()
927 if (!dev->of_node) in smmu_pmu_probe()
935 if (smmu_pmu->options & SMMU_PMCG_HARDEN_DISABLE) { in smmu_pmu_probe()
936 smmu_pmu->pmu.pmu_enable = smmu_pmu_enable_quirk_hip08_09; in smmu_pmu_probe()
937 smmu_pmu->pmu.pmu_disable = smmu_pmu_disable_quirk_hip08_09; in smmu_pmu_probe()
941 smmu_pmu->on_cpu = raw_smp_processor_id(); in smmu_pmu_probe()
942 WARN_ON(irq_set_affinity(smmu_pmu->irq, cpumask_of(smmu_pmu->on_cpu))); in smmu_pmu_probe()
945 &smmu_pmu->node); in smmu_pmu_probe()
948 err, &res_0->start); in smmu_pmu_probe()
952 err = perf_pmu_register(&smmu_pmu->pmu, name, -1); in smmu_pmu_probe()
955 err, &res_0->start); in smmu_pmu_probe()
960 &res_0->start, smmu_pmu->num_counters, in smmu_pmu_probe()
961 smmu_pmu->global_filter ? "Global(Counter0)" : in smmu_pmu_probe()
967 cpuhp_state_remove_instance_nocalls(cpuhp_state_num, &smmu_pmu->node); in smmu_pmu_probe()
975 perf_pmu_unregister(&smmu_pmu->pmu); in smmu_pmu_remove()
976 cpuhp_state_remove_instance_nocalls(cpuhp_state_num, &smmu_pmu->node); in smmu_pmu_remove()
985 smmu_pmu_disable(&smmu_pmu->pmu); in smmu_pmu_shutdown()
990 { .compatible = "arm,smmu-v3-pmcg" },
998 .name = "arm-smmu-v3-pmcg",
1034 MODULE_ALIAS("platform:arm-smmu-v3-pmcg");
1038 MODULE_LICENSE("GPL v2");