xref: /openbmc/linux/drivers/perf/arm_pmu_acpi.c (revision 22d55f02)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * ACPI probing code for ARM performance counters.
4  *
5  * Copyright (C) 2017 ARM Ltd.
6  */
7 
8 #include <linux/acpi.h>
9 #include <linux/cpumask.h>
10 #include <linux/init.h>
11 #include <linux/irq.h>
12 #include <linux/irqdesc.h>
13 #include <linux/percpu.h>
14 #include <linux/perf/arm_pmu.h>
15 
16 #include <asm/cputype.h>
17 
18 static DEFINE_PER_CPU(struct arm_pmu *, probed_pmus);
19 static DEFINE_PER_CPU(int, pmu_irqs);
20 
21 static int arm_pmu_acpi_register_irq(int cpu)
22 {
23 	struct acpi_madt_generic_interrupt *gicc;
24 	int gsi, trigger;
25 
26 	gicc = acpi_cpu_get_madt_gicc(cpu);
27 	if (WARN_ON(!gicc))
28 		return -EINVAL;
29 
30 	gsi = gicc->performance_interrupt;
31 
32 	/*
33 	 * Per the ACPI spec, the MADT cannot describe a PMU that doesn't
34 	 * have an interrupt. QEMU advertises this by using a GSI of zero,
35 	 * which is not known to be valid on any hardware despite being
36 	 * valid per the spec. Take the pragmatic approach and reject a
37 	 * GSI of zero for now.
38 	 */
39 	if (!gsi)
40 		return 0;
41 
42 	if (gicc->flags & ACPI_MADT_PERFORMANCE_IRQ_MODE)
43 		trigger = ACPI_EDGE_SENSITIVE;
44 	else
45 		trigger = ACPI_LEVEL_SENSITIVE;
46 
47 	/*
48 	 * Helpfully, the MADT GICC doesn't have a polarity flag for the
49 	 * "performance interrupt". Luckily, on compliant GICs the polarity is
50 	 * a fixed value in HW (for both SPIs and PPIs) that we cannot change
51 	 * from SW.
52 	 *
53 	 * Here we pass in ACPI_ACTIVE_HIGH to keep the core code happy. This
54 	 * may not match the real polarity, but that should not matter.
55 	 *
56 	 * Other interrupt controllers are not supported with ACPI.
57 	 */
58 	return acpi_register_gsi(NULL, gsi, trigger, ACPI_ACTIVE_HIGH);
59 }
60 
61 static void arm_pmu_acpi_unregister_irq(int cpu)
62 {
63 	struct acpi_madt_generic_interrupt *gicc;
64 	int gsi;
65 
66 	gicc = acpi_cpu_get_madt_gicc(cpu);
67 	if (!gicc)
68 		return;
69 
70 	gsi = gicc->performance_interrupt;
71 	acpi_unregister_gsi(gsi);
72 }
73 
74 static int arm_pmu_acpi_parse_irqs(void)
75 {
76 	int irq, cpu, irq_cpu, err;
77 
78 	for_each_possible_cpu(cpu) {
79 		irq = arm_pmu_acpi_register_irq(cpu);
80 		if (irq < 0) {
81 			err = irq;
82 			pr_warn("Unable to parse ACPI PMU IRQ for CPU%d: %d\n",
83 				cpu, err);
84 			goto out_err;
85 		} else if (irq == 0) {
86 			pr_warn("No ACPI PMU IRQ for CPU%d\n", cpu);
87 		}
88 
89 		/*
90 		 * Log and request the IRQ so the core arm_pmu code can manage
91 		 * it. We'll have to sanity-check IRQs later when we associate
92 		 * them with their PMUs.
93 		 */
94 		per_cpu(pmu_irqs, cpu) = irq;
95 		armpmu_request_irq(irq, cpu);
96 	}
97 
98 	return 0;
99 
100 out_err:
101 	for_each_possible_cpu(cpu) {
102 		irq = per_cpu(pmu_irqs, cpu);
103 		if (!irq)
104 			continue;
105 
106 		arm_pmu_acpi_unregister_irq(cpu);
107 
108 		/*
109 		 * Blat all copies of the IRQ so that we only unregister the
110 		 * corresponding GSI once (e.g. when we have PPIs).
111 		 */
112 		for_each_possible_cpu(irq_cpu) {
113 			if (per_cpu(pmu_irqs, irq_cpu) == irq)
114 				per_cpu(pmu_irqs, irq_cpu) = 0;
115 		}
116 	}
117 
118 	return err;
119 }
120 
121 static struct arm_pmu *arm_pmu_acpi_find_alloc_pmu(void)
122 {
123 	unsigned long cpuid = read_cpuid_id();
124 	struct arm_pmu *pmu;
125 	int cpu;
126 
127 	for_each_possible_cpu(cpu) {
128 		pmu = per_cpu(probed_pmus, cpu);
129 		if (!pmu || pmu->acpi_cpuid != cpuid)
130 			continue;
131 
132 		return pmu;
133 	}
134 
135 	pmu = armpmu_alloc_atomic();
136 	if (!pmu) {
137 		pr_warn("Unable to allocate PMU for CPU%d\n",
138 			smp_processor_id());
139 		return NULL;
140 	}
141 
142 	pmu->acpi_cpuid = cpuid;
143 
144 	return pmu;
145 }
146 
147 /*
148  * Check whether the new IRQ is compatible with those already associated with
149  * the PMU (e.g. we don't have mismatched PPIs).
150  */
151 static bool pmu_irq_matches(struct arm_pmu *pmu, int irq)
152 {
153 	struct pmu_hw_events __percpu *hw_events = pmu->hw_events;
154 	int cpu;
155 
156 	if (!irq)
157 		return true;
158 
159 	for_each_cpu(cpu, &pmu->supported_cpus) {
160 		int other_irq = per_cpu(hw_events->irq, cpu);
161 		if (!other_irq)
162 			continue;
163 
164 		if (irq == other_irq)
165 			continue;
166 		if (!irq_is_percpu_devid(irq) && !irq_is_percpu_devid(other_irq))
167 			continue;
168 
169 		pr_warn("mismatched PPIs detected\n");
170 		return false;
171 	}
172 
173 	return true;
174 }
175 
176 /*
177  * This must run before the common arm_pmu hotplug logic, so that we can
178  * associate a CPU and its interrupt before the common code tries to manage the
179  * affinity and so on.
180  *
181  * Note that hotplug events are serialized, so we cannot race with another CPU
182  * coming up. The perf core won't open events while a hotplug event is in
183  * progress.
184  */
185 static int arm_pmu_acpi_cpu_starting(unsigned int cpu)
186 {
187 	struct arm_pmu *pmu;
188 	struct pmu_hw_events __percpu *hw_events;
189 	int irq;
190 
191 	/* If we've already probed this CPU, we have nothing to do */
192 	if (per_cpu(probed_pmus, cpu))
193 		return 0;
194 
195 	irq = per_cpu(pmu_irqs, cpu);
196 
197 	pmu = arm_pmu_acpi_find_alloc_pmu();
198 	if (!pmu)
199 		return -ENOMEM;
200 
201 	per_cpu(probed_pmus, cpu) = pmu;
202 
203 	if (pmu_irq_matches(pmu, irq)) {
204 		hw_events = pmu->hw_events;
205 		per_cpu(hw_events->irq, cpu) = irq;
206 	}
207 
208 	cpumask_set_cpu(cpu, &pmu->supported_cpus);
209 
210 	/*
211 	 * Ideally, we'd probe the PMU here when we find the first matching
212 	 * CPU. We can't do that for several reasons; see the comment in
213 	 * arm_pmu_acpi_init().
214 	 *
215 	 * So for the time being, we're done.
216 	 */
217 	return 0;
218 }
219 
220 int arm_pmu_acpi_probe(armpmu_init_fn init_fn)
221 {
222 	int pmu_idx = 0;
223 	int cpu, ret;
224 
225 	/*
226 	 * Initialise and register the set of PMUs which we know about right
227 	 * now. Ideally we'd do this in arm_pmu_acpi_cpu_starting() so that we
228 	 * could handle late hotplug, but this may lead to deadlock since we
229 	 * might try to register a hotplug notifier instance from within a
230 	 * hotplug notifier.
231 	 *
232 	 * There's also the problem of having access to the right init_fn,
233 	 * without tying this too deeply into the "real" PMU driver.
234 	 *
235 	 * For the moment, as with the platform/DT case, we need at least one
236 	 * of a PMU's CPUs to be online at probe time.
237 	 */
238 	for_each_possible_cpu(cpu) {
239 		struct arm_pmu *pmu = per_cpu(probed_pmus, cpu);
240 		char *base_name;
241 
242 		if (!pmu || pmu->name)
243 			continue;
244 
245 		ret = init_fn(pmu);
246 		if (ret == -ENODEV) {
247 			/* PMU not handled by this driver, or not present */
248 			continue;
249 		} else if (ret) {
250 			pr_warn("Unable to initialise PMU for CPU%d\n", cpu);
251 			return ret;
252 		}
253 
254 		base_name = pmu->name;
255 		pmu->name = kasprintf(GFP_KERNEL, "%s_%d", base_name, pmu_idx++);
256 		if (!pmu->name) {
257 			pr_warn("Unable to allocate PMU name for CPU%d\n", cpu);
258 			return -ENOMEM;
259 		}
260 
261 		ret = armpmu_register(pmu);
262 		if (ret) {
263 			pr_warn("Failed to register PMU for CPU%d\n", cpu);
264 			kfree(pmu->name);
265 			return ret;
266 		}
267 	}
268 
269 	return 0;
270 }
271 
272 static int arm_pmu_acpi_init(void)
273 {
274 	int ret;
275 
276 	if (acpi_disabled)
277 		return 0;
278 
279 	ret = arm_pmu_acpi_parse_irqs();
280 	if (ret)
281 		return ret;
282 
283 	ret = cpuhp_setup_state(CPUHP_AP_PERF_ARM_ACPI_STARTING,
284 				"perf/arm/pmu_acpi:starting",
285 				arm_pmu_acpi_cpu_starting, NULL);
286 
287 	return ret;
288 }
289 subsys_initcall(arm_pmu_acpi_init)
290