Lines Matching refs:iommu_pmu

62 static inline struct iommu_pmu *dev_to_iommu_pmu(struct device *dev)  in dev_to_iommu_pmu()
68 return container_of(dev_get_drvdata(dev), struct iommu_pmu, pmu); in dev_to_iommu_pmu()
83 struct iommu_pmu *iommu_pmu = dev_to_iommu_pmu(dev); \
85 if (!iommu_pmu) \
87 return (iommu_pmu->filter & _filter) ? attr->mode : 0; \
120 if ((iommu_pmu->filter & _filter) && iommu_pmu_en_##_name(_econfig)) { \
121 dmar_writel(iommu_pmu->cfg_reg + _idx * IOMMU_PMU_CFG_OFFSET + \
130 if (iommu_pmu->filter & _filter) { \
131 dmar_writel(iommu_pmu->cfg_reg + _idx * IOMMU_PMU_CFG_OFFSET + \
157 struct iommu_pmu *iommu_pmu = dev_to_iommu_pmu(dev); \
159 if (!iommu_pmu) \
161 return (iommu_pmu->evcap[_g_idx] & _event) ? attr->mode : 0; \
234 iommu_event_base(struct iommu_pmu *iommu_pmu, int idx) in iommu_event_base() argument
236 return iommu_pmu->cntr_reg + idx * iommu_pmu->cntr_stride; in iommu_event_base()
240 iommu_config_base(struct iommu_pmu *iommu_pmu, int idx) in iommu_config_base() argument
242 return iommu_pmu->cfg_reg + idx * IOMMU_PMU_CFG_OFFSET; in iommu_config_base()
245 static inline struct iommu_pmu *iommu_event_to_pmu(struct perf_event *event) in iommu_event_to_pmu()
247 return container_of(event->pmu, struct iommu_pmu, pmu); in iommu_event_to_pmu()
259 static inline bool is_iommu_pmu_event(struct iommu_pmu *iommu_pmu, in is_iommu_pmu_event() argument
262 return event->pmu == &iommu_pmu->pmu; in is_iommu_pmu_event()
267 struct iommu_pmu *iommu_pmu = iommu_event_to_pmu(event); in iommu_pmu_validate_event() local
270 if (event_group >= iommu_pmu->num_eg) in iommu_pmu_validate_event()
278 struct iommu_pmu *iommu_pmu = iommu_event_to_pmu(event); in iommu_pmu_validate_group() local
287 if (!is_iommu_pmu_event(iommu_pmu, sibling) || in iommu_pmu_validate_group()
291 if (++nr > iommu_pmu->num_cntr) in iommu_pmu_validate_group()
322 struct iommu_pmu *iommu_pmu = iommu_event_to_pmu(event); in iommu_pmu_event_update() local
325 int shift = 64 - iommu_pmu->cntr_width; in iommu_pmu_event_update()
329 new_count = dmar_readq(iommu_event_base(iommu_pmu, hwc->idx)); in iommu_pmu_event_update()
345 struct iommu_pmu *iommu_pmu = iommu_event_to_pmu(event); in iommu_pmu_start() local
346 struct intel_iommu *iommu = iommu_pmu->iommu; in iommu_pmu_start()
362 count = dmar_readq(iommu_event_base(iommu_pmu, hwc->idx)); in iommu_pmu_start()
382 struct iommu_pmu *iommu_pmu = iommu_event_to_pmu(event); in iommu_pmu_stop() local
383 struct intel_iommu *iommu = iommu_pmu->iommu; in iommu_pmu_stop()
396 iommu_pmu_validate_per_cntr_event(struct iommu_pmu *iommu_pmu, in iommu_pmu_validate_per_cntr_event() argument
402 if (!(iommu_pmu->cntr_evcap[idx][event_group] & select)) in iommu_pmu_validate_per_cntr_event()
408 static int iommu_pmu_assign_event(struct iommu_pmu *iommu_pmu, in iommu_pmu_assign_event() argument
418 for (idx = iommu_pmu->num_cntr - 1; idx >= 0; idx--) { in iommu_pmu_assign_event()
419 if (test_and_set_bit(idx, iommu_pmu->used_mask)) in iommu_pmu_assign_event()
422 if (!iommu_pmu_validate_per_cntr_event(iommu_pmu, idx, event)) in iommu_pmu_assign_event()
424 clear_bit(idx, iommu_pmu->used_mask); in iommu_pmu_assign_event()
429 iommu_pmu->event_list[idx] = event; in iommu_pmu_assign_event()
433 dmar_writeq(iommu_config_base(iommu_pmu, idx), hwc->config); in iommu_pmu_assign_event()
456 struct iommu_pmu *iommu_pmu = iommu_event_to_pmu(event); in iommu_pmu_add() local
460 ret = iommu_pmu_assign_event(iommu_pmu, event); in iommu_pmu_add()
474 struct iommu_pmu *iommu_pmu = iommu_event_to_pmu(event); in iommu_pmu_del() local
485 iommu_pmu->event_list[idx] = NULL; in iommu_pmu_del()
487 clear_bit(idx, iommu_pmu->used_mask); in iommu_pmu_del()
494 struct iommu_pmu *iommu_pmu = container_of(pmu, struct iommu_pmu, pmu); in iommu_pmu_enable() local
495 struct intel_iommu *iommu = iommu_pmu->iommu; in iommu_pmu_enable()
502 struct iommu_pmu *iommu_pmu = container_of(pmu, struct iommu_pmu, pmu); in iommu_pmu_disable() local
503 struct intel_iommu *iommu = iommu_pmu->iommu; in iommu_pmu_disable()
508 static void iommu_pmu_counter_overflow(struct iommu_pmu *iommu_pmu) in iommu_pmu_counter_overflow() argument
518 while ((status = dmar_readq(iommu_pmu->overflow))) { in iommu_pmu_counter_overflow()
519 for_each_set_bit(i, (unsigned long *)&status, iommu_pmu->num_cntr) { in iommu_pmu_counter_overflow()
524 event = iommu_pmu->event_list[i]; in iommu_pmu_counter_overflow()
532 dmar_writeq(iommu_pmu->overflow, status); in iommu_pmu_counter_overflow()
553 struct iommu_pmu *iommu_pmu = iommu->pmu; in __iommu_pmu_register() local
555 iommu_pmu->pmu.name = iommu->name; in __iommu_pmu_register()
556 iommu_pmu->pmu.task_ctx_nr = perf_invalid_context; in __iommu_pmu_register()
557 iommu_pmu->pmu.event_init = iommu_pmu_event_init; in __iommu_pmu_register()
558 iommu_pmu->pmu.pmu_enable = iommu_pmu_enable; in __iommu_pmu_register()
559 iommu_pmu->pmu.pmu_disable = iommu_pmu_disable; in __iommu_pmu_register()
560 iommu_pmu->pmu.add = iommu_pmu_add; in __iommu_pmu_register()
561 iommu_pmu->pmu.del = iommu_pmu_del; in __iommu_pmu_register()
562 iommu_pmu->pmu.start = iommu_pmu_start; in __iommu_pmu_register()
563 iommu_pmu->pmu.stop = iommu_pmu_stop; in __iommu_pmu_register()
564 iommu_pmu->pmu.read = iommu_pmu_event_update; in __iommu_pmu_register()
565 iommu_pmu->pmu.attr_groups = iommu_pmu_attr_groups; in __iommu_pmu_register()
566 iommu_pmu->pmu.attr_update = iommu_pmu_attr_update; in __iommu_pmu_register()
567 iommu_pmu->pmu.capabilities = PERF_PMU_CAP_NO_EXCLUDE; in __iommu_pmu_register()
568 iommu_pmu->pmu.module = THIS_MODULE; in __iommu_pmu_register()
570 return perf_pmu_register(&iommu_pmu->pmu, iommu_pmu->pmu.name, -1); in __iommu_pmu_register()
583 struct iommu_pmu *iommu_pmu; in alloc_iommu_pmu() local
612 iommu_pmu = kzalloc(sizeof(*iommu_pmu), GFP_KERNEL); in alloc_iommu_pmu()
613 if (!iommu_pmu) in alloc_iommu_pmu()
616 iommu_pmu->num_cntr = pcap_num_cntr(perfcap); in alloc_iommu_pmu()
617 if (iommu_pmu->num_cntr > IOMMU_PMU_IDX_MAX) { in alloc_iommu_pmu()
619 iommu_pmu->num_cntr, IOMMU_PMU_IDX_MAX); in alloc_iommu_pmu()
620 iommu_pmu->num_cntr = IOMMU_PMU_IDX_MAX; in alloc_iommu_pmu()
623 iommu_pmu->cntr_width = pcap_cntr_width(perfcap); in alloc_iommu_pmu()
624 iommu_pmu->filter = pcap_filters_mask(perfcap); in alloc_iommu_pmu()
625 iommu_pmu->cntr_stride = pcap_cntr_stride(perfcap); in alloc_iommu_pmu()
626 iommu_pmu->num_eg = pcap_num_event_group(perfcap); in alloc_iommu_pmu()
628 iommu_pmu->evcap = kcalloc(iommu_pmu->num_eg, sizeof(u64), GFP_KERNEL); in alloc_iommu_pmu()
629 if (!iommu_pmu->evcap) { in alloc_iommu_pmu()
635 for (i = 0; i < iommu_pmu->num_eg; i++) { in alloc_iommu_pmu()
640 iommu_pmu->evcap[i] = pecap_es(pcap); in alloc_iommu_pmu()
643 iommu_pmu->cntr_evcap = kcalloc(iommu_pmu->num_cntr, sizeof(u32 *), GFP_KERNEL); in alloc_iommu_pmu()
644 if (!iommu_pmu->cntr_evcap) { in alloc_iommu_pmu()
648 for (i = 0; i < iommu_pmu->num_cntr; i++) { in alloc_iommu_pmu()
649 iommu_pmu->cntr_evcap[i] = kcalloc(iommu_pmu->num_eg, sizeof(u32), GFP_KERNEL); in alloc_iommu_pmu()
650 if (!iommu_pmu->cntr_evcap[i]) { in alloc_iommu_pmu()
658 for (j = 0; j < iommu_pmu->num_eg; j++) in alloc_iommu_pmu()
659 iommu_pmu->cntr_evcap[i][j] = (u32)iommu_pmu->evcap[j]; in alloc_iommu_pmu()
662 iommu_pmu->cfg_reg = get_perf_reg_address(iommu, DMAR_PERFCFGOFF_REG); in alloc_iommu_pmu()
663 iommu_pmu->cntr_reg = get_perf_reg_address(iommu, DMAR_PERFCNTROFF_REG); in alloc_iommu_pmu()
664 iommu_pmu->overflow = get_perf_reg_address(iommu, DMAR_PERFOVFOFF_REG); in alloc_iommu_pmu()
671 for (i = 0; i < iommu_pmu->num_cntr; i++) { in alloc_iommu_pmu()
672 cap = dmar_readl(iommu_pmu->cfg_reg + in alloc_iommu_pmu()
683 if ((iommu_cntrcap_cw(cap) != iommu_pmu->cntr_width) || in alloc_iommu_pmu()
685 iommu_pmu->num_cntr = i; in alloc_iommu_pmu()
687 iommu_pmu->num_cntr); in alloc_iommu_pmu()
691 for (j = 0; j < iommu_pmu->num_eg; j++) in alloc_iommu_pmu()
692 iommu_pmu->cntr_evcap[i][j] = 0; in alloc_iommu_pmu()
696 cap = dmar_readl(iommu_pmu->cfg_reg + i * IOMMU_PMU_CFG_OFFSET + in alloc_iommu_pmu()
699 iommu_pmu->cntr_evcap[i][iommu_event_group(cap)] = iommu_event_select(cap); in alloc_iommu_pmu()
704 iommu_pmu->evcap[iommu_event_group(cap)] |= iommu_event_select(cap); in alloc_iommu_pmu()
708 iommu_pmu->iommu = iommu; in alloc_iommu_pmu()
709 iommu->pmu = iommu_pmu; in alloc_iommu_pmu()
714 for (i = 0; i < iommu_pmu->num_cntr; i++) in alloc_iommu_pmu()
715 kfree(iommu_pmu->cntr_evcap[i]); in alloc_iommu_pmu()
716 kfree(iommu_pmu->cntr_evcap); in alloc_iommu_pmu()
718 kfree(iommu_pmu->evcap); in alloc_iommu_pmu()
720 kfree(iommu_pmu); in alloc_iommu_pmu()
727 struct iommu_pmu *iommu_pmu = iommu->pmu; in free_iommu_pmu() local
729 if (!iommu_pmu) in free_iommu_pmu()
732 if (iommu_pmu->evcap) { in free_iommu_pmu()
735 for (i = 0; i < iommu_pmu->num_cntr; i++) in free_iommu_pmu()
736 kfree(iommu_pmu->cntr_evcap[i]); in free_iommu_pmu()
737 kfree(iommu_pmu->cntr_evcap); in free_iommu_pmu()
739 kfree(iommu_pmu->evcap); in free_iommu_pmu()
740 kfree(iommu_pmu); in free_iommu_pmu()
746 struct iommu_pmu *iommu_pmu = iommu->pmu; in iommu_pmu_set_interrupt() local
753 snprintf(iommu_pmu->irq_name, sizeof(iommu_pmu->irq_name), "dmar%d-perf", iommu->seq_id); in iommu_pmu_set_interrupt()
757 IRQF_ONESHOT, iommu_pmu->irq_name, iommu); in iommu_pmu_set_interrupt()
778 struct iommu_pmu *iommu_pmu = hlist_entry_safe(node, typeof(*iommu_pmu), cpuhp_node); in iommu_pmu_cpu_online() local
784 iommu_pmu->cpu = cpu; in iommu_pmu_cpu_online()
791 struct iommu_pmu *iommu_pmu = hlist_entry_safe(node, typeof(*iommu_pmu), cpuhp_node); in iommu_pmu_cpu_offline() local
799 if (target < nr_cpu_ids && target != iommu_pmu->cpu) { in iommu_pmu_cpu_offline()
800 perf_pmu_migrate_context(&iommu_pmu->pmu, cpu, target); in iommu_pmu_cpu_offline()
801 iommu_pmu->cpu = target; in iommu_pmu_cpu_offline()
815 perf_pmu_migrate_context(&iommu_pmu->pmu, cpu, target); in iommu_pmu_cpu_offline()
816 iommu_pmu->cpu = target; in iommu_pmu_cpu_offline()
824 static int iommu_pmu_cpuhp_setup(struct iommu_pmu *iommu_pmu) in iommu_pmu_cpuhp_setup() argument
838 ret = cpuhp_state_add_instance(iommu_cpuhp_slot, &iommu_pmu->cpuhp_node); in iommu_pmu_cpuhp_setup()
849 static void iommu_pmu_cpuhp_free(struct iommu_pmu *iommu_pmu) in iommu_pmu_cpuhp_free() argument
851 cpuhp_state_remove_instance(iommu_cpuhp_slot, &iommu_pmu->cpuhp_node); in iommu_pmu_cpuhp_free()
861 struct iommu_pmu *iommu_pmu = iommu->pmu; in iommu_pmu_register() local
863 if (!iommu_pmu) in iommu_pmu_register()
869 if (iommu_pmu_cpuhp_setup(iommu_pmu)) in iommu_pmu_register()
879 iommu_pmu_cpuhp_free(iommu_pmu); in iommu_pmu_register()
881 perf_pmu_unregister(&iommu_pmu->pmu); in iommu_pmu_register()
889 struct iommu_pmu *iommu_pmu = iommu->pmu; in iommu_pmu_unregister() local
891 if (!iommu_pmu) in iommu_pmu_unregister()
895 iommu_pmu_cpuhp_free(iommu_pmu); in iommu_pmu_unregister()
896 perf_pmu_unregister(&iommu_pmu->pmu); in iommu_pmu_unregister()