1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * ARM CoreSight Architecture PMU driver.
4 *
5 * This driver adds support for uncore PMU based on ARM CoreSight Performance
6 * Monitoring Unit Architecture. The PMU is accessible via MMIO registers and
7 * like other uncore PMUs, it does not support process specific events and
8 * cannot be used in sampling mode.
9 *
10 * This code is based on other uncore PMUs like ARM DSU PMU. It provides a
11 * generic implementation to operate the PMU according to CoreSight PMU
12 * architecture and ACPI ARM PMU table (APMT) documents below:
13 * - ARM CoreSight PMU architecture document number: ARM IHI 0091 A.a-00bet0.
14 * - APMT document number: ARM DEN0117.
15 *
16 * The user should refer to the vendor technical documentation to get details
17 * about the supported events.
18 *
19 * Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
20 *
21 */
22
23 #include <linux/acpi.h>
24 #include <linux/cacheinfo.h>
25 #include <linux/ctype.h>
26 #include <linux/interrupt.h>
27 #include <linux/io-64-nonatomic-lo-hi.h>
28 #include <linux/module.h>
29 #include <linux/perf_event.h>
30 #include <linux/platform_device.h>
31
32 #include "arm_cspmu.h"
33 #include "nvidia_cspmu.h"
34
35 #define PMUNAME "arm_cspmu"
36 #define DRVNAME "arm-cs-arch-pmu"
37
38 #define ARM_CSPMU_CPUMASK_ATTR(_name, _config) \
39 ARM_CSPMU_EXT_ATTR(_name, arm_cspmu_cpumask_show, \
40 (unsigned long)_config)
41
42 /*
43 * CoreSight PMU Arch register offsets.
44 */
45 #define PMEVCNTR_LO 0x0
46 #define PMEVCNTR_HI 0x4
47 #define PMEVTYPER 0x400
48 #define PMCCFILTR 0x47C
49 #define PMEVFILTR 0xA00
50 #define PMCNTENSET 0xC00
51 #define PMCNTENCLR 0xC20
52 #define PMINTENSET 0xC40
53 #define PMINTENCLR 0xC60
54 #define PMOVSCLR 0xC80
55 #define PMOVSSET 0xCC0
56 #define PMCFGR 0xE00
57 #define PMCR 0xE04
58 #define PMIIDR 0xE08
59
60 /* PMCFGR register field */
61 #define PMCFGR_NCG GENMASK(31, 28)
62 #define PMCFGR_HDBG BIT(24)
63 #define PMCFGR_TRO BIT(23)
64 #define PMCFGR_SS BIT(22)
65 #define PMCFGR_FZO BIT(21)
66 #define PMCFGR_MSI BIT(20)
67 #define PMCFGR_UEN BIT(19)
68 #define PMCFGR_NA BIT(17)
69 #define PMCFGR_EX BIT(16)
70 #define PMCFGR_CCD BIT(15)
71 #define PMCFGR_CC BIT(14)
72 #define PMCFGR_SIZE GENMASK(13, 8)
73 #define PMCFGR_N GENMASK(7, 0)
74
75 /* PMCR register field */
76 #define PMCR_TRO BIT(11)
77 #define PMCR_HDBG BIT(10)
78 #define PMCR_FZO BIT(9)
79 #define PMCR_NA BIT(8)
80 #define PMCR_DP BIT(5)
81 #define PMCR_X BIT(4)
82 #define PMCR_D BIT(3)
83 #define PMCR_C BIT(2)
84 #define PMCR_P BIT(1)
85 #define PMCR_E BIT(0)
86
87 /* Each SET/CLR register supports up to 32 counters. */
88 #define ARM_CSPMU_SET_CLR_COUNTER_SHIFT 5
89 #define ARM_CSPMU_SET_CLR_COUNTER_NUM \
90 (1 << ARM_CSPMU_SET_CLR_COUNTER_SHIFT)
91
92 /* Convert counter idx into SET/CLR register number. */
93 #define COUNTER_TO_SET_CLR_ID(idx) \
94 (idx >> ARM_CSPMU_SET_CLR_COUNTER_SHIFT)
95
96 /* Convert counter idx into SET/CLR register bit. */
97 #define COUNTER_TO_SET_CLR_BIT(idx) \
98 (idx & (ARM_CSPMU_SET_CLR_COUNTER_NUM - 1))
99
100 #define ARM_CSPMU_ACTIVE_CPU_MASK 0x0
101 #define ARM_CSPMU_ASSOCIATED_CPU_MASK 0x1
102
103 /* Check and use default if implementer doesn't provide attribute callback */
104 #define CHECK_DEFAULT_IMPL_OPS(ops, callback) \
105 do { \
106 if (!ops->callback) \
107 ops->callback = arm_cspmu_ ## callback; \
108 } while (0)
109
110 /*
111 * Maximum poll count for reading counter value using high-low-high sequence.
112 */
113 #define HILOHI_MAX_POLL 1000
114
115 /* JEDEC-assigned JEP106 identification code */
116 #define ARM_CSPMU_IMPL_ID_NVIDIA 0x36B
117
118 static unsigned long arm_cspmu_cpuhp_state;
119
arm_cspmu_apmt_node(struct device * dev)120 static struct acpi_apmt_node *arm_cspmu_apmt_node(struct device *dev)
121 {
122 return *(struct acpi_apmt_node **)dev_get_platdata(dev);
123 }
124
125 /*
126 * In CoreSight PMU architecture, all of the MMIO registers are 32-bit except
127 * counter register. The counter register can be implemented as 32-bit or 64-bit
128 * register depending on the value of PMCFGR.SIZE field. For 64-bit access,
129 * single-copy 64-bit atomic support is implementation defined. APMT node flag
130 * is used to identify if the PMU supports 64-bit single copy atomic. If 64-bit
131 * single copy atomic is not supported, the driver treats the register as a pair
132 * of 32-bit register.
133 */
134
135 /*
136 * Read 64-bit register as a pair of 32-bit registers using hi-lo-hi sequence.
137 */
read_reg64_hilohi(const void __iomem * addr,u32 max_poll_count)138 static u64 read_reg64_hilohi(const void __iomem *addr, u32 max_poll_count)
139 {
140 u32 val_lo, val_hi;
141 u64 val;
142
143 /* Use high-low-high sequence to avoid tearing */
144 do {
145 if (max_poll_count-- == 0) {
146 pr_err("ARM CSPMU: timeout hi-low-high sequence\n");
147 return 0;
148 }
149
150 val_hi = readl(addr + 4);
151 val_lo = readl(addr);
152 } while (val_hi != readl(addr + 4));
153
154 val = (((u64)val_hi << 32) | val_lo);
155
156 return val;
157 }
158
159 /* Check if cycle counter is supported. */
supports_cycle_counter(const struct arm_cspmu * cspmu)160 static inline bool supports_cycle_counter(const struct arm_cspmu *cspmu)
161 {
162 return (cspmu->pmcfgr & PMCFGR_CC);
163 }
164
165 /* Get counter size, which is (PMCFGR_SIZE + 1). */
counter_size(const struct arm_cspmu * cspmu)166 static inline u32 counter_size(const struct arm_cspmu *cspmu)
167 {
168 return FIELD_GET(PMCFGR_SIZE, cspmu->pmcfgr) + 1;
169 }
170
171 /* Get counter mask. */
counter_mask(const struct arm_cspmu * cspmu)172 static inline u64 counter_mask(const struct arm_cspmu *cspmu)
173 {
174 return GENMASK_ULL(counter_size(cspmu) - 1, 0);
175 }
176
177 /* Check if counter is implemented as 64-bit register. */
use_64b_counter_reg(const struct arm_cspmu * cspmu)178 static inline bool use_64b_counter_reg(const struct arm_cspmu *cspmu)
179 {
180 return (counter_size(cspmu) > 32);
181 }
182
arm_cspmu_sysfs_event_show(struct device * dev,struct device_attribute * attr,char * buf)183 ssize_t arm_cspmu_sysfs_event_show(struct device *dev,
184 struct device_attribute *attr, char *buf)
185 {
186 struct perf_pmu_events_attr *pmu_attr;
187
188 pmu_attr = container_of(attr, typeof(*pmu_attr), attr);
189 return sysfs_emit(buf, "event=0x%llx\n", pmu_attr->id);
190 }
191 EXPORT_SYMBOL_GPL(arm_cspmu_sysfs_event_show);
192
193 /* Default event list. */
194 static struct attribute *arm_cspmu_event_attrs[] = {
195 ARM_CSPMU_EVENT_ATTR(cycles, ARM_CSPMU_EVT_CYCLES_DEFAULT),
196 NULL,
197 };
198
199 static struct attribute **
arm_cspmu_get_event_attrs(const struct arm_cspmu * cspmu)200 arm_cspmu_get_event_attrs(const struct arm_cspmu *cspmu)
201 {
202 struct attribute **attrs;
203
204 attrs = devm_kmemdup(cspmu->dev, arm_cspmu_event_attrs,
205 sizeof(arm_cspmu_event_attrs), GFP_KERNEL);
206
207 return attrs;
208 }
209
210 static umode_t
arm_cspmu_event_attr_is_visible(struct kobject * kobj,struct attribute * attr,int unused)211 arm_cspmu_event_attr_is_visible(struct kobject *kobj,
212 struct attribute *attr, int unused)
213 {
214 struct device *dev = kobj_to_dev(kobj);
215 struct arm_cspmu *cspmu = to_arm_cspmu(dev_get_drvdata(dev));
216 struct perf_pmu_events_attr *eattr;
217
218 eattr = container_of(attr, typeof(*eattr), attr.attr);
219
220 /* Hide cycle event if not supported */
221 if (!supports_cycle_counter(cspmu) &&
222 eattr->id == ARM_CSPMU_EVT_CYCLES_DEFAULT)
223 return 0;
224
225 return attr->mode;
226 }
227
arm_cspmu_sysfs_format_show(struct device * dev,struct device_attribute * attr,char * buf)228 ssize_t arm_cspmu_sysfs_format_show(struct device *dev,
229 struct device_attribute *attr,
230 char *buf)
231 {
232 struct dev_ext_attribute *eattr =
233 container_of(attr, struct dev_ext_attribute, attr);
234 return sysfs_emit(buf, "%s\n", (char *)eattr->var);
235 }
236 EXPORT_SYMBOL_GPL(arm_cspmu_sysfs_format_show);
237
238 static struct attribute *arm_cspmu_format_attrs[] = {
239 ARM_CSPMU_FORMAT_EVENT_ATTR,
240 ARM_CSPMU_FORMAT_FILTER_ATTR,
241 NULL,
242 };
243
244 static struct attribute **
arm_cspmu_get_format_attrs(const struct arm_cspmu * cspmu)245 arm_cspmu_get_format_attrs(const struct arm_cspmu *cspmu)
246 {
247 struct attribute **attrs;
248
249 attrs = devm_kmemdup(cspmu->dev, arm_cspmu_format_attrs,
250 sizeof(arm_cspmu_format_attrs), GFP_KERNEL);
251
252 return attrs;
253 }
254
arm_cspmu_event_type(const struct perf_event * event)255 static u32 arm_cspmu_event_type(const struct perf_event *event)
256 {
257 return event->attr.config & ARM_CSPMU_EVENT_MASK;
258 }
259
arm_cspmu_is_cycle_counter_event(const struct perf_event * event)260 static bool arm_cspmu_is_cycle_counter_event(const struct perf_event *event)
261 {
262 return (event->attr.config == ARM_CSPMU_EVT_CYCLES_DEFAULT);
263 }
264
arm_cspmu_event_filter(const struct perf_event * event)265 static u32 arm_cspmu_event_filter(const struct perf_event *event)
266 {
267 return event->attr.config1 & ARM_CSPMU_FILTER_MASK;
268 }
269
arm_cspmu_identifier_show(struct device * dev,struct device_attribute * attr,char * page)270 static ssize_t arm_cspmu_identifier_show(struct device *dev,
271 struct device_attribute *attr,
272 char *page)
273 {
274 struct arm_cspmu *cspmu = to_arm_cspmu(dev_get_drvdata(dev));
275
276 return sysfs_emit(page, "%s\n", cspmu->identifier);
277 }
278
279 static struct device_attribute arm_cspmu_identifier_attr =
280 __ATTR(identifier, 0444, arm_cspmu_identifier_show, NULL);
281
282 static struct attribute *arm_cspmu_identifier_attrs[] = {
283 &arm_cspmu_identifier_attr.attr,
284 NULL,
285 };
286
287 static struct attribute_group arm_cspmu_identifier_attr_group = {
288 .attrs = arm_cspmu_identifier_attrs,
289 };
290
arm_cspmu_get_identifier(const struct arm_cspmu * cspmu)291 static const char *arm_cspmu_get_identifier(const struct arm_cspmu *cspmu)
292 {
293 const char *identifier =
294 devm_kasprintf(cspmu->dev, GFP_KERNEL, "%x",
295 cspmu->impl.pmiidr);
296 return identifier;
297 }
298
299 static const char *arm_cspmu_type_str[ACPI_APMT_NODE_TYPE_COUNT] = {
300 "mc",
301 "smmu",
302 "pcie",
303 "acpi",
304 "cache",
305 };
306
arm_cspmu_get_name(const struct arm_cspmu * cspmu)307 static const char *arm_cspmu_get_name(const struct arm_cspmu *cspmu)
308 {
309 struct device *dev;
310 struct acpi_apmt_node *apmt_node;
311 u8 pmu_type;
312 char *name;
313 char acpi_hid_string[ACPI_ID_LEN] = { 0 };
314 static atomic_t pmu_idx[ACPI_APMT_NODE_TYPE_COUNT] = { 0 };
315
316 dev = cspmu->dev;
317 apmt_node = arm_cspmu_apmt_node(dev);
318 pmu_type = apmt_node->type;
319
320 if (pmu_type >= ACPI_APMT_NODE_TYPE_COUNT) {
321 dev_err(dev, "unsupported PMU type-%u\n", pmu_type);
322 return NULL;
323 }
324
325 if (pmu_type == ACPI_APMT_NODE_TYPE_ACPI) {
326 memcpy(acpi_hid_string,
327 &apmt_node->inst_primary,
328 sizeof(apmt_node->inst_primary));
329 name = devm_kasprintf(dev, GFP_KERNEL, "%s_%s_%s_%u", PMUNAME,
330 arm_cspmu_type_str[pmu_type],
331 acpi_hid_string,
332 apmt_node->inst_secondary);
333 } else {
334 name = devm_kasprintf(dev, GFP_KERNEL, "%s_%s_%d", PMUNAME,
335 arm_cspmu_type_str[pmu_type],
336 atomic_fetch_inc(&pmu_idx[pmu_type]));
337 }
338
339 return name;
340 }
341
arm_cspmu_cpumask_show(struct device * dev,struct device_attribute * attr,char * buf)342 static ssize_t arm_cspmu_cpumask_show(struct device *dev,
343 struct device_attribute *attr,
344 char *buf)
345 {
346 struct pmu *pmu = dev_get_drvdata(dev);
347 struct arm_cspmu *cspmu = to_arm_cspmu(pmu);
348 struct dev_ext_attribute *eattr =
349 container_of(attr, struct dev_ext_attribute, attr);
350 unsigned long mask_id = (unsigned long)eattr->var;
351 const cpumask_t *cpumask;
352
353 switch (mask_id) {
354 case ARM_CSPMU_ACTIVE_CPU_MASK:
355 cpumask = &cspmu->active_cpu;
356 break;
357 case ARM_CSPMU_ASSOCIATED_CPU_MASK:
358 cpumask = &cspmu->associated_cpus;
359 break;
360 default:
361 return 0;
362 }
363 return cpumap_print_to_pagebuf(true, buf, cpumask);
364 }
365
366 static struct attribute *arm_cspmu_cpumask_attrs[] = {
367 ARM_CSPMU_CPUMASK_ATTR(cpumask, ARM_CSPMU_ACTIVE_CPU_MASK),
368 ARM_CSPMU_CPUMASK_ATTR(associated_cpus, ARM_CSPMU_ASSOCIATED_CPU_MASK),
369 NULL,
370 };
371
372 static struct attribute_group arm_cspmu_cpumask_attr_group = {
373 .attrs = arm_cspmu_cpumask_attrs,
374 };
375
376 struct impl_match {
377 u32 pmiidr;
378 u32 mask;
379 int (*impl_init_ops)(struct arm_cspmu *cspmu);
380 };
381
382 static const struct impl_match impl_match[] = {
383 {
384 .pmiidr = ARM_CSPMU_IMPL_ID_NVIDIA,
385 .mask = ARM_CSPMU_PMIIDR_IMPLEMENTER,
386 .impl_init_ops = nv_cspmu_init_ops
387 },
388 {}
389 };
390
arm_cspmu_init_impl_ops(struct arm_cspmu * cspmu)391 static int arm_cspmu_init_impl_ops(struct arm_cspmu *cspmu)
392 {
393 int ret;
394 struct arm_cspmu_impl_ops *impl_ops = &cspmu->impl.ops;
395 struct acpi_apmt_node *apmt_node = arm_cspmu_apmt_node(cspmu->dev);
396 const struct impl_match *match = impl_match;
397
398 /*
399 * Get PMU implementer and product id from APMT node.
400 * If APMT node doesn't have implementer/product id, try get it
401 * from PMIIDR.
402 */
403 cspmu->impl.pmiidr =
404 (apmt_node->impl_id) ? apmt_node->impl_id :
405 readl(cspmu->base0 + PMIIDR);
406
407 /* Find implementer specific attribute ops. */
408 for (; match->pmiidr; match++) {
409 const u32 mask = match->mask;
410
411 if ((match->pmiidr & mask) == (cspmu->impl.pmiidr & mask)) {
412 ret = match->impl_init_ops(cspmu);
413 if (ret)
414 return ret;
415
416 break;
417 }
418 }
419
420 /* Use default callbacks if implementer doesn't provide one. */
421 CHECK_DEFAULT_IMPL_OPS(impl_ops, get_event_attrs);
422 CHECK_DEFAULT_IMPL_OPS(impl_ops, get_format_attrs);
423 CHECK_DEFAULT_IMPL_OPS(impl_ops, get_identifier);
424 CHECK_DEFAULT_IMPL_OPS(impl_ops, get_name);
425 CHECK_DEFAULT_IMPL_OPS(impl_ops, is_cycle_counter_event);
426 CHECK_DEFAULT_IMPL_OPS(impl_ops, event_type);
427 CHECK_DEFAULT_IMPL_OPS(impl_ops, event_filter);
428 CHECK_DEFAULT_IMPL_OPS(impl_ops, event_attr_is_visible);
429
430 return 0;
431 }
432
433 static struct attribute_group *
arm_cspmu_alloc_event_attr_group(struct arm_cspmu * cspmu)434 arm_cspmu_alloc_event_attr_group(struct arm_cspmu *cspmu)
435 {
436 struct attribute_group *event_group;
437 struct device *dev = cspmu->dev;
438 const struct arm_cspmu_impl_ops *impl_ops = &cspmu->impl.ops;
439
440 event_group =
441 devm_kzalloc(dev, sizeof(struct attribute_group), GFP_KERNEL);
442 if (!event_group)
443 return NULL;
444
445 event_group->name = "events";
446 event_group->is_visible = impl_ops->event_attr_is_visible;
447 event_group->attrs = impl_ops->get_event_attrs(cspmu);
448
449 if (!event_group->attrs)
450 return NULL;
451
452 return event_group;
453 }
454
455 static struct attribute_group *
arm_cspmu_alloc_format_attr_group(struct arm_cspmu * cspmu)456 arm_cspmu_alloc_format_attr_group(struct arm_cspmu *cspmu)
457 {
458 struct attribute_group *format_group;
459 struct device *dev = cspmu->dev;
460
461 format_group =
462 devm_kzalloc(dev, sizeof(struct attribute_group), GFP_KERNEL);
463 if (!format_group)
464 return NULL;
465
466 format_group->name = "format";
467 format_group->attrs = cspmu->impl.ops.get_format_attrs(cspmu);
468
469 if (!format_group->attrs)
470 return NULL;
471
472 return format_group;
473 }
474
475 static struct attribute_group **
arm_cspmu_alloc_attr_group(struct arm_cspmu * cspmu)476 arm_cspmu_alloc_attr_group(struct arm_cspmu *cspmu)
477 {
478 struct attribute_group **attr_groups = NULL;
479 struct device *dev = cspmu->dev;
480 const struct arm_cspmu_impl_ops *impl_ops = &cspmu->impl.ops;
481 int ret;
482
483 ret = arm_cspmu_init_impl_ops(cspmu);
484 if (ret)
485 return NULL;
486
487 cspmu->identifier = impl_ops->get_identifier(cspmu);
488 cspmu->name = impl_ops->get_name(cspmu);
489
490 if (!cspmu->identifier || !cspmu->name)
491 return NULL;
492
493 attr_groups = devm_kcalloc(dev, 5, sizeof(struct attribute_group *),
494 GFP_KERNEL);
495 if (!attr_groups)
496 return NULL;
497
498 attr_groups[0] = arm_cspmu_alloc_event_attr_group(cspmu);
499 attr_groups[1] = arm_cspmu_alloc_format_attr_group(cspmu);
500 attr_groups[2] = &arm_cspmu_identifier_attr_group;
501 attr_groups[3] = &arm_cspmu_cpumask_attr_group;
502
503 if (!attr_groups[0] || !attr_groups[1])
504 return NULL;
505
506 return attr_groups;
507 }
508
arm_cspmu_reset_counters(struct arm_cspmu * cspmu)509 static inline void arm_cspmu_reset_counters(struct arm_cspmu *cspmu)
510 {
511 u32 pmcr = 0;
512
513 pmcr |= PMCR_P;
514 pmcr |= PMCR_C;
515 writel(pmcr, cspmu->base0 + PMCR);
516 }
517
arm_cspmu_start_counters(struct arm_cspmu * cspmu)518 static inline void arm_cspmu_start_counters(struct arm_cspmu *cspmu)
519 {
520 writel(PMCR_E, cspmu->base0 + PMCR);
521 }
522
arm_cspmu_stop_counters(struct arm_cspmu * cspmu)523 static inline void arm_cspmu_stop_counters(struct arm_cspmu *cspmu)
524 {
525 writel(0, cspmu->base0 + PMCR);
526 }
527
arm_cspmu_enable(struct pmu * pmu)528 static void arm_cspmu_enable(struct pmu *pmu)
529 {
530 bool disabled;
531 struct arm_cspmu *cspmu = to_arm_cspmu(pmu);
532
533 disabled = bitmap_empty(cspmu->hw_events.used_ctrs,
534 cspmu->num_logical_ctrs);
535
536 if (disabled)
537 return;
538
539 arm_cspmu_start_counters(cspmu);
540 }
541
arm_cspmu_disable(struct pmu * pmu)542 static void arm_cspmu_disable(struct pmu *pmu)
543 {
544 struct arm_cspmu *cspmu = to_arm_cspmu(pmu);
545
546 arm_cspmu_stop_counters(cspmu);
547 }
548
arm_cspmu_get_event_idx(struct arm_cspmu_hw_events * hw_events,struct perf_event * event)549 static int arm_cspmu_get_event_idx(struct arm_cspmu_hw_events *hw_events,
550 struct perf_event *event)
551 {
552 int idx;
553 struct arm_cspmu *cspmu = to_arm_cspmu(event->pmu);
554
555 if (supports_cycle_counter(cspmu)) {
556 if (cspmu->impl.ops.is_cycle_counter_event(event)) {
557 /* Search for available cycle counter. */
558 if (test_and_set_bit(cspmu->cycle_counter_logical_idx,
559 hw_events->used_ctrs))
560 return -EAGAIN;
561
562 return cspmu->cycle_counter_logical_idx;
563 }
564
565 /*
566 * Search a regular counter from the used counter bitmap.
567 * The cycle counter divides the bitmap into two parts. Search
568 * the first then second half to exclude the cycle counter bit.
569 */
570 idx = find_first_zero_bit(hw_events->used_ctrs,
571 cspmu->cycle_counter_logical_idx);
572 if (idx >= cspmu->cycle_counter_logical_idx) {
573 idx = find_next_zero_bit(
574 hw_events->used_ctrs,
575 cspmu->num_logical_ctrs,
576 cspmu->cycle_counter_logical_idx + 1);
577 }
578 } else {
579 idx = find_first_zero_bit(hw_events->used_ctrs,
580 cspmu->num_logical_ctrs);
581 }
582
583 if (idx >= cspmu->num_logical_ctrs)
584 return -EAGAIN;
585
586 set_bit(idx, hw_events->used_ctrs);
587
588 return idx;
589 }
590
arm_cspmu_validate_event(struct pmu * pmu,struct arm_cspmu_hw_events * hw_events,struct perf_event * event)591 static bool arm_cspmu_validate_event(struct pmu *pmu,
592 struct arm_cspmu_hw_events *hw_events,
593 struct perf_event *event)
594 {
595 if (is_software_event(event))
596 return true;
597
598 /* Reject groups spanning multiple HW PMUs. */
599 if (event->pmu != pmu)
600 return false;
601
602 return (arm_cspmu_get_event_idx(hw_events, event) >= 0);
603 }
604
605 /*
606 * Make sure the group of events can be scheduled at once
607 * on the PMU.
608 */
arm_cspmu_validate_group(struct perf_event * event)609 static bool arm_cspmu_validate_group(struct perf_event *event)
610 {
611 struct perf_event *sibling, *leader = event->group_leader;
612 struct arm_cspmu_hw_events fake_hw_events;
613
614 if (event->group_leader == event)
615 return true;
616
617 memset(&fake_hw_events, 0, sizeof(fake_hw_events));
618
619 if (!arm_cspmu_validate_event(event->pmu, &fake_hw_events, leader))
620 return false;
621
622 for_each_sibling_event(sibling, leader) {
623 if (!arm_cspmu_validate_event(event->pmu, &fake_hw_events,
624 sibling))
625 return false;
626 }
627
628 return arm_cspmu_validate_event(event->pmu, &fake_hw_events, event);
629 }
630
arm_cspmu_event_init(struct perf_event * event)631 static int arm_cspmu_event_init(struct perf_event *event)
632 {
633 struct arm_cspmu *cspmu;
634 struct hw_perf_event *hwc = &event->hw;
635
636 cspmu = to_arm_cspmu(event->pmu);
637
638 if (event->attr.type != event->pmu->type)
639 return -ENOENT;
640
641 /*
642 * Following other "uncore" PMUs, we do not support sampling mode or
643 * attach to a task (per-process mode).
644 */
645 if (is_sampling_event(event)) {
646 dev_dbg(cspmu->pmu.dev,
647 "Can't support sampling events\n");
648 return -EOPNOTSUPP;
649 }
650
651 if (event->cpu < 0 || event->attach_state & PERF_ATTACH_TASK) {
652 dev_dbg(cspmu->pmu.dev,
653 "Can't support per-task counters\n");
654 return -EINVAL;
655 }
656
657 /*
658 * Make sure the CPU assignment is on one of the CPUs associated with
659 * this PMU.
660 */
661 if (!cpumask_test_cpu(event->cpu, &cspmu->associated_cpus)) {
662 dev_dbg(cspmu->pmu.dev,
663 "Requested cpu is not associated with the PMU\n");
664 return -EINVAL;
665 }
666
667 /* Enforce the current active CPU to handle the events in this PMU. */
668 event->cpu = cpumask_first(&cspmu->active_cpu);
669 if (event->cpu >= nr_cpu_ids)
670 return -EINVAL;
671
672 if (!arm_cspmu_validate_group(event))
673 return -EINVAL;
674
675 /*
676 * The logical counter id is tracked with hw_perf_event.extra_reg.idx.
677 * The physical counter id is tracked with hw_perf_event.idx.
678 * We don't assign an index until we actually place the event onto
679 * hardware. Use -1 to signify that we haven't decided where to put it
680 * yet.
681 */
682 hwc->idx = -1;
683 hwc->extra_reg.idx = -1;
684 hwc->config = cspmu->impl.ops.event_type(event);
685
686 return 0;
687 }
688
counter_offset(u32 reg_sz,u32 ctr_idx)689 static inline u32 counter_offset(u32 reg_sz, u32 ctr_idx)
690 {
691 return (PMEVCNTR_LO + (reg_sz * ctr_idx));
692 }
693
arm_cspmu_write_counter(struct perf_event * event,u64 val)694 static void arm_cspmu_write_counter(struct perf_event *event, u64 val)
695 {
696 u32 offset;
697 struct arm_cspmu *cspmu = to_arm_cspmu(event->pmu);
698
699 if (use_64b_counter_reg(cspmu)) {
700 offset = counter_offset(sizeof(u64), event->hw.idx);
701
702 writeq(val, cspmu->base1 + offset);
703 } else {
704 offset = counter_offset(sizeof(u32), event->hw.idx);
705
706 writel(lower_32_bits(val), cspmu->base1 + offset);
707 }
708 }
709
arm_cspmu_read_counter(struct perf_event * event)710 static u64 arm_cspmu_read_counter(struct perf_event *event)
711 {
712 u32 offset;
713 const void __iomem *counter_addr;
714 struct arm_cspmu *cspmu = to_arm_cspmu(event->pmu);
715
716 if (use_64b_counter_reg(cspmu)) {
717 offset = counter_offset(sizeof(u64), event->hw.idx);
718 counter_addr = cspmu->base1 + offset;
719
720 return cspmu->has_atomic_dword ?
721 readq(counter_addr) :
722 read_reg64_hilohi(counter_addr, HILOHI_MAX_POLL);
723 }
724
725 offset = counter_offset(sizeof(u32), event->hw.idx);
726 return readl(cspmu->base1 + offset);
727 }
728
729 /*
730 * arm_cspmu_set_event_period: Set the period for the counter.
731 *
732 * To handle cases of extreme interrupt latency, we program
733 * the counter with half of the max count for the counters.
734 */
arm_cspmu_set_event_period(struct perf_event * event)735 static void arm_cspmu_set_event_period(struct perf_event *event)
736 {
737 struct arm_cspmu *cspmu = to_arm_cspmu(event->pmu);
738 u64 val = counter_mask(cspmu) >> 1ULL;
739
740 local64_set(&event->hw.prev_count, val);
741 arm_cspmu_write_counter(event, val);
742 }
743
arm_cspmu_enable_counter(struct arm_cspmu * cspmu,int idx)744 static void arm_cspmu_enable_counter(struct arm_cspmu *cspmu, int idx)
745 {
746 u32 reg_id, reg_bit, inten_off, cnten_off;
747
748 reg_id = COUNTER_TO_SET_CLR_ID(idx);
749 reg_bit = COUNTER_TO_SET_CLR_BIT(idx);
750
751 inten_off = PMINTENSET + (4 * reg_id);
752 cnten_off = PMCNTENSET + (4 * reg_id);
753
754 writel(BIT(reg_bit), cspmu->base0 + inten_off);
755 writel(BIT(reg_bit), cspmu->base0 + cnten_off);
756 }
757
arm_cspmu_disable_counter(struct arm_cspmu * cspmu,int idx)758 static void arm_cspmu_disable_counter(struct arm_cspmu *cspmu, int idx)
759 {
760 u32 reg_id, reg_bit, inten_off, cnten_off;
761
762 reg_id = COUNTER_TO_SET_CLR_ID(idx);
763 reg_bit = COUNTER_TO_SET_CLR_BIT(idx);
764
765 inten_off = PMINTENCLR + (4 * reg_id);
766 cnten_off = PMCNTENCLR + (4 * reg_id);
767
768 writel(BIT(reg_bit), cspmu->base0 + cnten_off);
769 writel(BIT(reg_bit), cspmu->base0 + inten_off);
770 }
771
arm_cspmu_event_update(struct perf_event * event)772 static void arm_cspmu_event_update(struct perf_event *event)
773 {
774 struct arm_cspmu *cspmu = to_arm_cspmu(event->pmu);
775 struct hw_perf_event *hwc = &event->hw;
776 u64 delta, prev, now;
777
778 do {
779 prev = local64_read(&hwc->prev_count);
780 now = arm_cspmu_read_counter(event);
781 } while (local64_cmpxchg(&hwc->prev_count, prev, now) != prev);
782
783 delta = (now - prev) & counter_mask(cspmu);
784 local64_add(delta, &event->count);
785 }
786
arm_cspmu_set_event(struct arm_cspmu * cspmu,struct hw_perf_event * hwc)787 static inline void arm_cspmu_set_event(struct arm_cspmu *cspmu,
788 struct hw_perf_event *hwc)
789 {
790 u32 offset = PMEVTYPER + (4 * hwc->idx);
791
792 writel(hwc->config, cspmu->base0 + offset);
793 }
794
arm_cspmu_set_ev_filter(struct arm_cspmu * cspmu,struct hw_perf_event * hwc,u32 filter)795 static inline void arm_cspmu_set_ev_filter(struct arm_cspmu *cspmu,
796 struct hw_perf_event *hwc,
797 u32 filter)
798 {
799 u32 offset = PMEVFILTR + (4 * hwc->idx);
800
801 writel(filter, cspmu->base0 + offset);
802 }
803
arm_cspmu_set_cc_filter(struct arm_cspmu * cspmu,u32 filter)804 static inline void arm_cspmu_set_cc_filter(struct arm_cspmu *cspmu, u32 filter)
805 {
806 u32 offset = PMCCFILTR;
807
808 writel(filter, cspmu->base0 + offset);
809 }
810
arm_cspmu_start(struct perf_event * event,int pmu_flags)811 static void arm_cspmu_start(struct perf_event *event, int pmu_flags)
812 {
813 struct arm_cspmu *cspmu = to_arm_cspmu(event->pmu);
814 struct hw_perf_event *hwc = &event->hw;
815 u32 filter;
816
817 /* We always reprogram the counter */
818 if (pmu_flags & PERF_EF_RELOAD)
819 WARN_ON(!(hwc->state & PERF_HES_UPTODATE));
820
821 arm_cspmu_set_event_period(event);
822
823 filter = cspmu->impl.ops.event_filter(event);
824
825 if (event->hw.extra_reg.idx == cspmu->cycle_counter_logical_idx) {
826 arm_cspmu_set_cc_filter(cspmu, filter);
827 } else {
828 arm_cspmu_set_event(cspmu, hwc);
829 arm_cspmu_set_ev_filter(cspmu, hwc, filter);
830 }
831
832 hwc->state = 0;
833
834 arm_cspmu_enable_counter(cspmu, hwc->idx);
835 }
836
arm_cspmu_stop(struct perf_event * event,int pmu_flags)837 static void arm_cspmu_stop(struct perf_event *event, int pmu_flags)
838 {
839 struct arm_cspmu *cspmu = to_arm_cspmu(event->pmu);
840 struct hw_perf_event *hwc = &event->hw;
841
842 if (hwc->state & PERF_HES_STOPPED)
843 return;
844
845 arm_cspmu_disable_counter(cspmu, hwc->idx);
846 arm_cspmu_event_update(event);
847
848 hwc->state |= PERF_HES_STOPPED | PERF_HES_UPTODATE;
849 }
850
to_phys_idx(struct arm_cspmu * cspmu,u32 idx)851 static inline u32 to_phys_idx(struct arm_cspmu *cspmu, u32 idx)
852 {
853 return (idx == cspmu->cycle_counter_logical_idx) ?
854 ARM_CSPMU_CYCLE_CNTR_IDX : idx;
855 }
856
arm_cspmu_add(struct perf_event * event,int flags)857 static int arm_cspmu_add(struct perf_event *event, int flags)
858 {
859 struct arm_cspmu *cspmu = to_arm_cspmu(event->pmu);
860 struct arm_cspmu_hw_events *hw_events = &cspmu->hw_events;
861 struct hw_perf_event *hwc = &event->hw;
862 int idx;
863
864 if (WARN_ON_ONCE(!cpumask_test_cpu(smp_processor_id(),
865 &cspmu->associated_cpus)))
866 return -ENOENT;
867
868 idx = arm_cspmu_get_event_idx(hw_events, event);
869 if (idx < 0)
870 return idx;
871
872 hw_events->events[idx] = event;
873 hwc->idx = to_phys_idx(cspmu, idx);
874 hwc->extra_reg.idx = idx;
875 hwc->state = PERF_HES_STOPPED | PERF_HES_UPTODATE;
876
877 if (flags & PERF_EF_START)
878 arm_cspmu_start(event, PERF_EF_RELOAD);
879
880 /* Propagate changes to the userspace mapping. */
881 perf_event_update_userpage(event);
882
883 return 0;
884 }
885
arm_cspmu_del(struct perf_event * event,int flags)886 static void arm_cspmu_del(struct perf_event *event, int flags)
887 {
888 struct arm_cspmu *cspmu = to_arm_cspmu(event->pmu);
889 struct arm_cspmu_hw_events *hw_events = &cspmu->hw_events;
890 struct hw_perf_event *hwc = &event->hw;
891 int idx = hwc->extra_reg.idx;
892
893 arm_cspmu_stop(event, PERF_EF_UPDATE);
894
895 hw_events->events[idx] = NULL;
896
897 clear_bit(idx, hw_events->used_ctrs);
898
899 perf_event_update_userpage(event);
900 }
901
arm_cspmu_read(struct perf_event * event)902 static void arm_cspmu_read(struct perf_event *event)
903 {
904 arm_cspmu_event_update(event);
905 }
906
arm_cspmu_alloc(struct platform_device * pdev)907 static struct arm_cspmu *arm_cspmu_alloc(struct platform_device *pdev)
908 {
909 struct acpi_apmt_node *apmt_node;
910 struct arm_cspmu *cspmu;
911 struct device *dev = &pdev->dev;
912
913 cspmu = devm_kzalloc(dev, sizeof(*cspmu), GFP_KERNEL);
914 if (!cspmu)
915 return NULL;
916
917 cspmu->dev = dev;
918 platform_set_drvdata(pdev, cspmu);
919
920 apmt_node = arm_cspmu_apmt_node(dev);
921 cspmu->has_atomic_dword = apmt_node->flags & ACPI_APMT_FLAGS_ATOMIC;
922
923 return cspmu;
924 }
925
arm_cspmu_init_mmio(struct arm_cspmu * cspmu)926 static int arm_cspmu_init_mmio(struct arm_cspmu *cspmu)
927 {
928 struct device *dev;
929 struct platform_device *pdev;
930
931 dev = cspmu->dev;
932 pdev = to_platform_device(dev);
933
934 /* Base address for page 0. */
935 cspmu->base0 = devm_platform_ioremap_resource(pdev, 0);
936 if (IS_ERR(cspmu->base0)) {
937 dev_err(dev, "ioremap failed for page-0 resource\n");
938 return PTR_ERR(cspmu->base0);
939 }
940
941 /* Base address for page 1 if supported. Otherwise point to page 0. */
942 cspmu->base1 = cspmu->base0;
943 if (platform_get_resource(pdev, IORESOURCE_MEM, 1)) {
944 cspmu->base1 = devm_platform_ioremap_resource(pdev, 1);
945 if (IS_ERR(cspmu->base1)) {
946 dev_err(dev, "ioremap failed for page-1 resource\n");
947 return PTR_ERR(cspmu->base1);
948 }
949 }
950
951 cspmu->pmcfgr = readl(cspmu->base0 + PMCFGR);
952
953 cspmu->num_logical_ctrs = FIELD_GET(PMCFGR_N, cspmu->pmcfgr) + 1;
954
955 cspmu->cycle_counter_logical_idx = ARM_CSPMU_MAX_HW_CNTRS;
956
957 if (supports_cycle_counter(cspmu)) {
958 /*
959 * The last logical counter is mapped to cycle counter if
960 * there is a gap between regular and cycle counter. Otherwise,
961 * logical and physical have 1-to-1 mapping.
962 */
963 cspmu->cycle_counter_logical_idx =
964 (cspmu->num_logical_ctrs <= ARM_CSPMU_CYCLE_CNTR_IDX) ?
965 cspmu->num_logical_ctrs - 1 :
966 ARM_CSPMU_CYCLE_CNTR_IDX;
967 }
968
969 cspmu->num_set_clr_reg =
970 DIV_ROUND_UP(cspmu->num_logical_ctrs,
971 ARM_CSPMU_SET_CLR_COUNTER_NUM);
972
973 cspmu->hw_events.events =
974 devm_kcalloc(dev, cspmu->num_logical_ctrs,
975 sizeof(*cspmu->hw_events.events), GFP_KERNEL);
976
977 if (!cspmu->hw_events.events)
978 return -ENOMEM;
979
980 return 0;
981 }
982
arm_cspmu_get_reset_overflow(struct arm_cspmu * cspmu,u32 * pmovs)983 static inline int arm_cspmu_get_reset_overflow(struct arm_cspmu *cspmu,
984 u32 *pmovs)
985 {
986 int i;
987 u32 pmovclr_offset = PMOVSCLR;
988 u32 has_overflowed = 0;
989
990 for (i = 0; i < cspmu->num_set_clr_reg; ++i) {
991 pmovs[i] = readl(cspmu->base1 + pmovclr_offset);
992 has_overflowed |= pmovs[i];
993 writel(pmovs[i], cspmu->base1 + pmovclr_offset);
994 pmovclr_offset += sizeof(u32);
995 }
996
997 return has_overflowed != 0;
998 }
999
arm_cspmu_handle_irq(int irq_num,void * dev)1000 static irqreturn_t arm_cspmu_handle_irq(int irq_num, void *dev)
1001 {
1002 int idx, has_overflowed;
1003 struct perf_event *event;
1004 struct arm_cspmu *cspmu = dev;
1005 DECLARE_BITMAP(pmovs, ARM_CSPMU_MAX_HW_CNTRS);
1006 bool handled = false;
1007
1008 arm_cspmu_stop_counters(cspmu);
1009
1010 has_overflowed = arm_cspmu_get_reset_overflow(cspmu, (u32 *)pmovs);
1011 if (!has_overflowed)
1012 goto done;
1013
1014 for_each_set_bit(idx, cspmu->hw_events.used_ctrs,
1015 cspmu->num_logical_ctrs) {
1016 event = cspmu->hw_events.events[idx];
1017
1018 if (!event)
1019 continue;
1020
1021 if (!test_bit(event->hw.idx, pmovs))
1022 continue;
1023
1024 arm_cspmu_event_update(event);
1025 arm_cspmu_set_event_period(event);
1026
1027 handled = true;
1028 }
1029
1030 done:
1031 arm_cspmu_start_counters(cspmu);
1032 return IRQ_RETVAL(handled);
1033 }
1034
arm_cspmu_request_irq(struct arm_cspmu * cspmu)1035 static int arm_cspmu_request_irq(struct arm_cspmu *cspmu)
1036 {
1037 int irq, ret;
1038 struct device *dev;
1039 struct platform_device *pdev;
1040
1041 dev = cspmu->dev;
1042 pdev = to_platform_device(dev);
1043
1044 /* Skip IRQ request if the PMU does not support overflow interrupt. */
1045 irq = platform_get_irq_optional(pdev, 0);
1046 if (irq < 0)
1047 return irq == -ENXIO ? 0 : irq;
1048
1049 ret = devm_request_irq(dev, irq, arm_cspmu_handle_irq,
1050 IRQF_NOBALANCING | IRQF_NO_THREAD, dev_name(dev),
1051 cspmu);
1052 if (ret) {
1053 dev_err(dev, "Could not request IRQ %d\n", irq);
1054 return ret;
1055 }
1056
1057 cspmu->irq = irq;
1058
1059 return 0;
1060 }
1061
1062 #if defined(CONFIG_ACPI) && defined(CONFIG_ARM64)
1063 #include <acpi/processor.h>
1064
arm_cspmu_find_cpu_container(int cpu,u32 container_uid)1065 static inline int arm_cspmu_find_cpu_container(int cpu, u32 container_uid)
1066 {
1067 u32 acpi_uid;
1068 struct device *cpu_dev;
1069 struct acpi_device *acpi_dev;
1070
1071 cpu_dev = get_cpu_device(cpu);
1072 if (!cpu_dev)
1073 return -ENODEV;
1074
1075 acpi_dev = ACPI_COMPANION(cpu_dev);
1076 while (acpi_dev) {
1077 if (!strcmp(acpi_device_hid(acpi_dev),
1078 ACPI_PROCESSOR_CONTAINER_HID) &&
1079 !kstrtouint(acpi_device_uid(acpi_dev), 0, &acpi_uid) &&
1080 acpi_uid == container_uid)
1081 return 0;
1082
1083 acpi_dev = acpi_dev_parent(acpi_dev);
1084 }
1085
1086 return -ENODEV;
1087 }
1088
arm_cspmu_acpi_get_cpus(struct arm_cspmu * cspmu)1089 static int arm_cspmu_acpi_get_cpus(struct arm_cspmu *cspmu)
1090 {
1091 struct acpi_apmt_node *apmt_node;
1092 int affinity_flag;
1093 int cpu;
1094
1095 apmt_node = arm_cspmu_apmt_node(cspmu->dev);
1096 affinity_flag = apmt_node->flags & ACPI_APMT_FLAGS_AFFINITY;
1097
1098 if (affinity_flag == ACPI_APMT_FLAGS_AFFINITY_PROC) {
1099 for_each_possible_cpu(cpu) {
1100 if (apmt_node->proc_affinity ==
1101 get_acpi_id_for_cpu(cpu)) {
1102 cpumask_set_cpu(cpu, &cspmu->associated_cpus);
1103 break;
1104 }
1105 }
1106 } else {
1107 for_each_possible_cpu(cpu) {
1108 if (arm_cspmu_find_cpu_container(
1109 cpu, apmt_node->proc_affinity))
1110 continue;
1111
1112 cpumask_set_cpu(cpu, &cspmu->associated_cpus);
1113 }
1114 }
1115
1116 if (cpumask_empty(&cspmu->associated_cpus)) {
1117 dev_dbg(cspmu->dev, "No cpu associated with the PMU\n");
1118 return -ENODEV;
1119 }
1120
1121 return 0;
1122 }
1123 #else
arm_cspmu_acpi_get_cpus(struct arm_cspmu * cspmu)1124 static int arm_cspmu_acpi_get_cpus(struct arm_cspmu *cspmu)
1125 {
1126 return -ENODEV;
1127 }
1128 #endif
1129
arm_cspmu_get_cpus(struct arm_cspmu * cspmu)1130 static int arm_cspmu_get_cpus(struct arm_cspmu *cspmu)
1131 {
1132 return arm_cspmu_acpi_get_cpus(cspmu);
1133 }
1134
arm_cspmu_register_pmu(struct arm_cspmu * cspmu)1135 static int arm_cspmu_register_pmu(struct arm_cspmu *cspmu)
1136 {
1137 int ret, capabilities;
1138 struct attribute_group **attr_groups;
1139
1140 attr_groups = arm_cspmu_alloc_attr_group(cspmu);
1141 if (!attr_groups)
1142 return -ENOMEM;
1143
1144 ret = cpuhp_state_add_instance(arm_cspmu_cpuhp_state,
1145 &cspmu->cpuhp_node);
1146 if (ret)
1147 return ret;
1148
1149 capabilities = PERF_PMU_CAP_NO_EXCLUDE;
1150 if (cspmu->irq == 0)
1151 capabilities |= PERF_PMU_CAP_NO_INTERRUPT;
1152
1153 cspmu->pmu = (struct pmu){
1154 .task_ctx_nr = perf_invalid_context,
1155 .module = THIS_MODULE,
1156 .pmu_enable = arm_cspmu_enable,
1157 .pmu_disable = arm_cspmu_disable,
1158 .event_init = arm_cspmu_event_init,
1159 .add = arm_cspmu_add,
1160 .del = arm_cspmu_del,
1161 .start = arm_cspmu_start,
1162 .stop = arm_cspmu_stop,
1163 .read = arm_cspmu_read,
1164 .attr_groups = (const struct attribute_group **)attr_groups,
1165 .capabilities = capabilities,
1166 };
1167
1168 /* Hardware counter init */
1169 arm_cspmu_stop_counters(cspmu);
1170 arm_cspmu_reset_counters(cspmu);
1171
1172 ret = perf_pmu_register(&cspmu->pmu, cspmu->name, -1);
1173 if (ret) {
1174 cpuhp_state_remove_instance(arm_cspmu_cpuhp_state,
1175 &cspmu->cpuhp_node);
1176 }
1177
1178 return ret;
1179 }
1180
arm_cspmu_device_probe(struct platform_device * pdev)1181 static int arm_cspmu_device_probe(struct platform_device *pdev)
1182 {
1183 int ret;
1184 struct arm_cspmu *cspmu;
1185
1186 cspmu = arm_cspmu_alloc(pdev);
1187 if (!cspmu)
1188 return -ENOMEM;
1189
1190 ret = arm_cspmu_init_mmio(cspmu);
1191 if (ret)
1192 return ret;
1193
1194 ret = arm_cspmu_request_irq(cspmu);
1195 if (ret)
1196 return ret;
1197
1198 ret = arm_cspmu_get_cpus(cspmu);
1199 if (ret)
1200 return ret;
1201
1202 ret = arm_cspmu_register_pmu(cspmu);
1203 if (ret)
1204 return ret;
1205
1206 return 0;
1207 }
1208
arm_cspmu_device_remove(struct platform_device * pdev)1209 static int arm_cspmu_device_remove(struct platform_device *pdev)
1210 {
1211 struct arm_cspmu *cspmu = platform_get_drvdata(pdev);
1212
1213 perf_pmu_unregister(&cspmu->pmu);
1214 cpuhp_state_remove_instance(arm_cspmu_cpuhp_state, &cspmu->cpuhp_node);
1215
1216 return 0;
1217 }
1218
1219 static const struct platform_device_id arm_cspmu_id[] = {
1220 {DRVNAME, 0},
1221 { },
1222 };
1223 MODULE_DEVICE_TABLE(platform, arm_cspmu_id);
1224
1225 static struct platform_driver arm_cspmu_driver = {
1226 .driver = {
1227 .name = DRVNAME,
1228 .suppress_bind_attrs = true,
1229 },
1230 .probe = arm_cspmu_device_probe,
1231 .remove = arm_cspmu_device_remove,
1232 .id_table = arm_cspmu_id,
1233 };
1234
arm_cspmu_set_active_cpu(int cpu,struct arm_cspmu * cspmu)1235 static void arm_cspmu_set_active_cpu(int cpu, struct arm_cspmu *cspmu)
1236 {
1237 cpumask_set_cpu(cpu, &cspmu->active_cpu);
1238 if (cspmu->irq)
1239 WARN_ON(irq_set_affinity(cspmu->irq, &cspmu->active_cpu));
1240 }
1241
arm_cspmu_cpu_online(unsigned int cpu,struct hlist_node * node)1242 static int arm_cspmu_cpu_online(unsigned int cpu, struct hlist_node *node)
1243 {
1244 struct arm_cspmu *cspmu =
1245 hlist_entry_safe(node, struct arm_cspmu, cpuhp_node);
1246
1247 if (!cpumask_test_cpu(cpu, &cspmu->associated_cpus))
1248 return 0;
1249
1250 /* If the PMU is already managed, there is nothing to do */
1251 if (!cpumask_empty(&cspmu->active_cpu))
1252 return 0;
1253
1254 /* Use this CPU for event counting */
1255 arm_cspmu_set_active_cpu(cpu, cspmu);
1256
1257 return 0;
1258 }
1259
arm_cspmu_cpu_teardown(unsigned int cpu,struct hlist_node * node)1260 static int arm_cspmu_cpu_teardown(unsigned int cpu, struct hlist_node *node)
1261 {
1262 int dst;
1263 struct cpumask online_supported;
1264
1265 struct arm_cspmu *cspmu =
1266 hlist_entry_safe(node, struct arm_cspmu, cpuhp_node);
1267
1268 /* Nothing to do if this CPU doesn't own the PMU */
1269 if (!cpumask_test_and_clear_cpu(cpu, &cspmu->active_cpu))
1270 return 0;
1271
1272 /* Choose a new CPU to migrate ownership of the PMU to */
1273 cpumask_and(&online_supported, &cspmu->associated_cpus,
1274 cpu_online_mask);
1275 dst = cpumask_any_but(&online_supported, cpu);
1276 if (dst >= nr_cpu_ids)
1277 return 0;
1278
1279 /* Use this CPU for event counting */
1280 perf_pmu_migrate_context(&cspmu->pmu, cpu, dst);
1281 arm_cspmu_set_active_cpu(dst, cspmu);
1282
1283 return 0;
1284 }
1285
arm_cspmu_init(void)1286 static int __init arm_cspmu_init(void)
1287 {
1288 int ret;
1289
1290 ret = cpuhp_setup_state_multi(CPUHP_AP_ONLINE_DYN,
1291 "perf/arm/cspmu:online",
1292 arm_cspmu_cpu_online,
1293 arm_cspmu_cpu_teardown);
1294 if (ret < 0)
1295 return ret;
1296 arm_cspmu_cpuhp_state = ret;
1297 return platform_driver_register(&arm_cspmu_driver);
1298 }
1299
arm_cspmu_exit(void)1300 static void __exit arm_cspmu_exit(void)
1301 {
1302 platform_driver_unregister(&arm_cspmu_driver);
1303 cpuhp_remove_multi_state(arm_cspmu_cpuhp_state);
1304 }
1305
1306 module_init(arm_cspmu_init);
1307 module_exit(arm_cspmu_exit);
1308
1309 MODULE_LICENSE("GPL v2");
1310