Lines Matching refs:l2cache_pmu

109 struct l2cache_pmu {  struct
138 struct l2cache_pmu *l2cache_pmu; member
150 #define to_l2cache_pmu(p) (container_of(p, struct l2cache_pmu, pmu))
164 struct l2cache_pmu *l2cache_pmu, int cpu) in get_cluster_pmu() argument
166 return *per_cpu_ptr(l2cache_pmu->pmu_cluster, cpu); in get_cluster_pmu()
344 int num_ctrs = cluster->l2cache_pmu->num_counters - 1; in l2_cache_get_event_idx()
388 int num_counters = cluster->l2cache_pmu->num_counters; in l2_cache_handle_irq()
443 struct l2cache_pmu *l2cache_pmu; in l2_cache_event_init() local
448 l2cache_pmu = to_l2cache_pmu(event->pmu); in l2_cache_event_init()
451 dev_dbg_ratelimited(&l2cache_pmu->pdev->dev, in l2_cache_event_init()
457 dev_dbg_ratelimited(&l2cache_pmu->pdev->dev, in l2_cache_event_init()
465 dev_dbg_ratelimited(&l2cache_pmu->pdev->dev, in l2_cache_event_init()
474 dev_dbg_ratelimited(&l2cache_pmu->pdev->dev, in l2_cache_event_init()
482 dev_dbg_ratelimited(&l2cache_pmu->pdev->dev, in l2_cache_event_init()
488 cluster = get_cluster_pmu(l2cache_pmu, event->cpu); in l2_cache_event_init()
491 dev_dbg_ratelimited(&l2cache_pmu->pdev->dev, in l2_cache_event_init()
499 dev_dbg_ratelimited(&l2cache_pmu->pdev->dev, in l2_cache_event_init()
509 dev_dbg_ratelimited(&l2cache_pmu->pdev->dev, in l2_cache_event_init()
521 dev_dbg_ratelimited(&l2cache_pmu->pdev->dev, in l2_cache_event_init()
639 struct l2cache_pmu *l2cache_pmu = to_l2cache_pmu(dev_get_drvdata(dev)); in l2_cache_pmu_cpumask_show() local
641 return cpumap_print_to_pagebuf(true, buf, &l2cache_pmu->cpumask); in l2_cache_pmu_cpumask_show()
735 struct l2cache_pmu *l2cache_pmu, int cpu) in l2_cache_associate_cpu_with_cluster() argument
752 list_for_each_entry(cluster, &l2cache_pmu->clusters, next) { in l2_cache_associate_cpu_with_cluster()
756 dev_info(&l2cache_pmu->pdev->dev, in l2_cache_associate_cpu_with_cluster()
760 *per_cpu_ptr(l2cache_pmu->pmu_cluster, cpu) = cluster; in l2_cache_associate_cpu_with_cluster()
770 struct l2cache_pmu *l2cache_pmu; in l2cache_pmu_online_cpu() local
772 l2cache_pmu = hlist_entry_safe(node, struct l2cache_pmu, node); in l2cache_pmu_online_cpu()
773 cluster = get_cluster_pmu(l2cache_pmu, cpu); in l2cache_pmu_online_cpu()
776 cluster = l2_cache_associate_cpu_with_cluster(l2cache_pmu, cpu); in l2cache_pmu_online_cpu()
793 cpumask_set_cpu(cpu, &l2cache_pmu->cpumask); in l2cache_pmu_online_cpu()
805 struct l2cache_pmu *l2cache_pmu; in l2cache_pmu_offline_cpu() local
809 l2cache_pmu = hlist_entry_safe(node, struct l2cache_pmu, node); in l2cache_pmu_offline_cpu()
810 cluster = get_cluster_pmu(l2cache_pmu, cpu); in l2cache_pmu_offline_cpu()
819 cpumask_clear_cpu(cpu, &l2cache_pmu->cpumask); in l2cache_pmu_offline_cpu()
831 perf_pmu_migrate_context(&l2cache_pmu->pmu, cpu, target); in l2cache_pmu_offline_cpu()
833 cpumask_set_cpu(target, &l2cache_pmu->cpumask); in l2cache_pmu_offline_cpu()
843 struct l2cache_pmu *l2cache_pmu = data; in l2_cache_pmu_probe_cluster() local
867 cluster->l2cache_pmu = l2cache_pmu; in l2_cache_pmu_probe_cluster()
885 list_add(&cluster->next, &l2cache_pmu->clusters); in l2_cache_pmu_probe_cluster()
886 l2cache_pmu->num_pmus++; in l2_cache_pmu_probe_cluster()
894 struct l2cache_pmu *l2cache_pmu; in l2_cache_pmu_probe() local
896 l2cache_pmu = in l2_cache_pmu_probe()
897 devm_kzalloc(&pdev->dev, sizeof(*l2cache_pmu), GFP_KERNEL); in l2_cache_pmu_probe()
898 if (!l2cache_pmu) in l2_cache_pmu_probe()
901 INIT_LIST_HEAD(&l2cache_pmu->clusters); in l2_cache_pmu_probe()
903 platform_set_drvdata(pdev, l2cache_pmu); in l2_cache_pmu_probe()
904 l2cache_pmu->pmu = (struct pmu) { in l2_cache_pmu_probe()
920 l2cache_pmu->num_counters = get_num_counters(); in l2_cache_pmu_probe()
921 l2cache_pmu->pdev = pdev; in l2_cache_pmu_probe()
922 l2cache_pmu->pmu_cluster = devm_alloc_percpu(&pdev->dev, in l2_cache_pmu_probe()
924 if (!l2cache_pmu->pmu_cluster) in l2_cache_pmu_probe()
927 l2_cycle_ctr_idx = l2cache_pmu->num_counters - 1; in l2_cache_pmu_probe()
928 l2_counter_present_mask = GENMASK(l2cache_pmu->num_counters - 2, 0) | in l2_cache_pmu_probe()
931 cpumask_clear(&l2cache_pmu->cpumask); in l2_cache_pmu_probe()
934 err = device_for_each_child(&pdev->dev, l2cache_pmu, in l2_cache_pmu_probe()
939 if (l2cache_pmu->num_pmus == 0) { in l2_cache_pmu_probe()
945 &l2cache_pmu->node); in l2_cache_pmu_probe()
951 err = perf_pmu_register(&l2cache_pmu->pmu, l2cache_pmu->pmu.name, -1); in l2_cache_pmu_probe()
958 l2cache_pmu->num_pmus); in l2_cache_pmu_probe()
964 &l2cache_pmu->node); in l2_cache_pmu_probe()
970 struct l2cache_pmu *l2cache_pmu = in l2_cache_pmu_remove() local
973 perf_pmu_unregister(&l2cache_pmu->pmu); in l2_cache_pmu_remove()
975 &l2cache_pmu->node); in l2_cache_pmu_remove()