1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * platform_device probing code for ARM performance counters.
4  *
5  * Copyright (C) 2009 picoChip Designs, Ltd., Jamie Iles
6  * Copyright (C) 2010 ARM Ltd., Will Deacon <will.deacon@arm.com>
7  */
8 #define pr_fmt(fmt) "hw perfevents: " fmt
9 
10 #include <linux/bug.h>
11 #include <linux/cpumask.h>
12 #include <linux/device.h>
13 #include <linux/errno.h>
14 #include <linux/irq.h>
15 #include <linux/irqdesc.h>
16 #include <linux/kconfig.h>
17 #include <linux/of.h>
18 #include <linux/of_device.h>
19 #include <linux/percpu.h>
20 #include <linux/perf/arm_pmu.h>
21 #include <linux/platform_device.h>
22 #include <linux/printk.h>
23 #include <linux/smp.h>
24 
25 static int probe_current_pmu(struct arm_pmu *pmu,
26 			     const struct pmu_probe_info *info)
27 {
28 	int cpu = get_cpu();
29 	unsigned int cpuid = read_cpuid_id();
30 	int ret = -ENODEV;
31 
32 	pr_info("probing PMU on CPU %d\n", cpu);
33 
34 	for (; info->init != NULL; info++) {
35 		if ((cpuid & info->mask) != info->cpuid)
36 			continue;
37 		ret = info->init(pmu);
38 		break;
39 	}
40 
41 	put_cpu();
42 	return ret;
43 }
44 
45 static int pmu_parse_percpu_irq(struct arm_pmu *pmu, int irq)
46 {
47 	int cpu, ret;
48 	struct pmu_hw_events __percpu *hw_events = pmu->hw_events;
49 
50 	ret = irq_get_percpu_devid_partition(irq, &pmu->supported_cpus);
51 	if (ret)
52 		return ret;
53 
54 	for_each_cpu(cpu, &pmu->supported_cpus)
55 		per_cpu(hw_events->irq, cpu) = irq;
56 
57 	return 0;
58 }
59 
60 static bool pmu_has_irq_affinity(struct device_node *node)
61 {
62 	return !!of_find_property(node, "interrupt-affinity", NULL);
63 }
64 
65 static int pmu_parse_irq_affinity(struct device_node *node, int i)
66 {
67 	struct device_node *dn;
68 	int cpu;
69 
70 	/*
71 	 * If we don't have an interrupt-affinity property, we guess irq
72 	 * affinity matches our logical CPU order, as we used to assume.
73 	 * This is fragile, so we'll warn in pmu_parse_irqs().
74 	 */
75 	if (!pmu_has_irq_affinity(node))
76 		return i;
77 
78 	dn = of_parse_phandle(node, "interrupt-affinity", i);
79 	if (!dn) {
80 		pr_warn("failed to parse interrupt-affinity[%d] for %s\n",
81 			i, node->name);
82 		return -EINVAL;
83 	}
84 
85 	cpu = of_cpu_node_to_id(dn);
86 	if (cpu < 0) {
87 		pr_warn("failed to find logical CPU for %s\n", dn->name);
88 		cpu = nr_cpu_ids;
89 	}
90 
91 	of_node_put(dn);
92 
93 	return cpu;
94 }
95 
96 static int pmu_parse_irqs(struct arm_pmu *pmu)
97 {
98 	int i = 0, num_irqs;
99 	struct platform_device *pdev = pmu->plat_device;
100 	struct pmu_hw_events __percpu *hw_events = pmu->hw_events;
101 
102 	num_irqs = platform_irq_count(pdev);
103 	if (num_irqs < 0) {
104 		pr_err("unable to count PMU IRQs\n");
105 		return num_irqs;
106 	}
107 
108 	/*
109 	 * In this case we have no idea which CPUs are covered by the PMU.
110 	 * To match our prior behaviour, we assume all CPUs in this case.
111 	 */
112 	if (num_irqs == 0) {
113 		pr_warn("no irqs for PMU, sampling events not supported\n");
114 		pmu->pmu.capabilities |= PERF_PMU_CAP_NO_INTERRUPT;
115 		cpumask_setall(&pmu->supported_cpus);
116 		return 0;
117 	}
118 
119 	if (num_irqs == 1) {
120 		int irq = platform_get_irq(pdev, 0);
121 		if (irq && irq_is_percpu_devid(irq))
122 			return pmu_parse_percpu_irq(pmu, irq);
123 	}
124 
125 	if (!pmu_has_irq_affinity(pdev->dev.of_node)) {
126 		pr_warn("no interrupt-affinity property for %pOF, guessing.\n",
127 			pdev->dev.of_node);
128 	}
129 
130 	/*
131 	 * Some platforms have all PMU IRQs OR'd into a single IRQ, with a
132 	 * special platdata function that attempts to demux them.
133 	 */
134 	if (dev_get_platdata(&pdev->dev))
135 		cpumask_setall(&pmu->supported_cpus);
136 
137 	for (i = 0; i < num_irqs; i++) {
138 		int cpu, irq;
139 
140 		irq = platform_get_irq(pdev, i);
141 		if (WARN_ON(irq <= 0))
142 			continue;
143 
144 		if (irq_is_percpu_devid(irq)) {
145 			pr_warn("multiple PPIs or mismatched SPI/PPI detected\n");
146 			return -EINVAL;
147 		}
148 
149 		cpu = pmu_parse_irq_affinity(pdev->dev.of_node, i);
150 		if (cpu < 0)
151 			return cpu;
152 		if (cpu >= nr_cpu_ids)
153 			continue;
154 
155 		if (per_cpu(hw_events->irq, cpu)) {
156 			pr_warn("multiple PMU IRQs for the same CPU detected\n");
157 			return -EINVAL;
158 		}
159 
160 		per_cpu(hw_events->irq, cpu) = irq;
161 		cpumask_set_cpu(cpu, &pmu->supported_cpus);
162 	}
163 
164 	return 0;
165 }
166 
167 int arm_pmu_device_probe(struct platform_device *pdev,
168 			 const struct of_device_id *of_table,
169 			 const struct pmu_probe_info *probe_table)
170 {
171 	const struct of_device_id *of_id;
172 	armpmu_init_fn init_fn;
173 	struct device_node *node = pdev->dev.of_node;
174 	struct arm_pmu *pmu;
175 	int ret = -ENODEV;
176 
177 	pmu = armpmu_alloc();
178 	if (!pmu)
179 		return -ENOMEM;
180 
181 	pmu->plat_device = pdev;
182 
183 	ret = pmu_parse_irqs(pmu);
184 	if (ret)
185 		goto out_free;
186 
187 	if (node && (of_id = of_match_node(of_table, pdev->dev.of_node))) {
188 		init_fn = of_id->data;
189 
190 		pmu->secure_access = of_property_read_bool(pdev->dev.of_node,
191 							   "secure-reg-access");
192 
193 		/* arm64 systems boot only as non-secure */
194 		if (IS_ENABLED(CONFIG_ARM64) && pmu->secure_access) {
195 			pr_warn("ignoring \"secure-reg-access\" property for arm64\n");
196 			pmu->secure_access = false;
197 		}
198 
199 		ret = init_fn(pmu);
200 	} else if (probe_table) {
201 		cpumask_setall(&pmu->supported_cpus);
202 		ret = probe_current_pmu(pmu, probe_table);
203 	}
204 
205 	if (ret) {
206 		pr_info("%pOF: failed to probe PMU!\n", node);
207 		goto out_free;
208 	}
209 
210 	ret = armpmu_request_irqs(pmu);
211 	if (ret)
212 		goto out_free_irqs;
213 
214 	ret = armpmu_register(pmu);
215 	if (ret)
216 		goto out_free;
217 
218 	return 0;
219 
220 out_free_irqs:
221 	armpmu_free_irqs(pmu);
222 out_free:
223 	pr_info("%pOF: failed to register PMU devices!\n", node);
224 	armpmu_free(pmu);
225 	return ret;
226 }
227