xref: /openbmc/linux/drivers/perf/arm_smmuv3_pmu.c (revision 8365a898)
1 // SPDX-License-Identifier: GPL-2.0
2 
3 /*
4  * This driver adds support for perf events to use the Performance
5  * Monitor Counter Groups (PMCG) associated with an SMMUv3 node
6  * to monitor that node.
7  *
8  * SMMUv3 PMCG devices are named as smmuv3_pmcg_<phys_addr_page> where
9  * <phys_addr_page> is the physical page address of the SMMU PMCG wrapped
10  * to 4K boundary. For example, the PMCG at 0xff88840000 is named
11  * smmuv3_pmcg_ff88840
12  *
13  * Filtering by stream id is done by specifying filtering parameters
14  * with the event. options are:
15  *   filter_enable    - 0 = no filtering, 1 = filtering enabled
16  *   filter_span      - 0 = exact match, 1 = pattern match
17  *   filter_stream_id - pattern to filter against
18  *
19  * To match a partial StreamID where the X most-significant bits must match
20  * but the Y least-significant bits might differ, STREAMID is programmed
21  * with a value that contains:
22  *  STREAMID[Y - 1] == 0.
23  *  STREAMID[Y - 2:0] == 1 (where Y > 1).
24  * The remainder of implemented bits of STREAMID (X bits, from bit Y upwards)
25  * contain a value to match from the corresponding bits of event StreamID.
26  *
27  * Example: perf stat -e smmuv3_pmcg_ff88840/transaction,filter_enable=1,
28  *                    filter_span=1,filter_stream_id=0x42/ -a netperf
29  * Applies filter pattern 0x42 to transaction events, which means events
30  * matching stream ids 0x42 and 0x43 are counted. Further filtering
31  * information is available in the SMMU documentation.
32  *
33  * SMMU events are not attributable to a CPU, so task mode and sampling
34  * are not supported.
35  */
36 
37 #include <linux/acpi.h>
38 #include <linux/acpi_iort.h>
39 #include <linux/bitfield.h>
40 #include <linux/bitops.h>
41 #include <linux/cpuhotplug.h>
42 #include <linux/cpumask.h>
43 #include <linux/device.h>
44 #include <linux/errno.h>
45 #include <linux/interrupt.h>
46 #include <linux/irq.h>
47 #include <linux/kernel.h>
48 #include <linux/list.h>
49 #include <linux/msi.h>
50 #include <linux/perf_event.h>
51 #include <linux/platform_device.h>
52 #include <linux/smp.h>
53 #include <linux/sysfs.h>
54 #include <linux/types.h>
55 
56 #define SMMU_PMCG_EVCNTR0               0x0
57 #define SMMU_PMCG_EVCNTR(n, stride)     (SMMU_PMCG_EVCNTR0 + (n) * (stride))
58 #define SMMU_PMCG_EVTYPER0              0x400
59 #define SMMU_PMCG_EVTYPER(n)            (SMMU_PMCG_EVTYPER0 + (n) * 4)
60 #define SMMU_PMCG_SID_SPAN_SHIFT        29
61 #define SMMU_PMCG_SMR0                  0xA00
62 #define SMMU_PMCG_SMR(n)                (SMMU_PMCG_SMR0 + (n) * 4)
63 #define SMMU_PMCG_CNTENSET0             0xC00
64 #define SMMU_PMCG_CNTENCLR0             0xC20
65 #define SMMU_PMCG_INTENSET0             0xC40
66 #define SMMU_PMCG_INTENCLR0             0xC60
67 #define SMMU_PMCG_OVSCLR0               0xC80
68 #define SMMU_PMCG_OVSSET0               0xCC0
69 #define SMMU_PMCG_CFGR                  0xE00
70 #define SMMU_PMCG_CFGR_SID_FILTER_TYPE  BIT(23)
71 #define SMMU_PMCG_CFGR_MSI              BIT(21)
72 #define SMMU_PMCG_CFGR_RELOC_CTRS       BIT(20)
73 #define SMMU_PMCG_CFGR_SIZE             GENMASK(13, 8)
74 #define SMMU_PMCG_CFGR_NCTR             GENMASK(5, 0)
75 #define SMMU_PMCG_CR                    0xE04
76 #define SMMU_PMCG_CR_ENABLE             BIT(0)
77 #define SMMU_PMCG_CEID0                 0xE20
78 #define SMMU_PMCG_CEID1                 0xE28
79 #define SMMU_PMCG_IRQ_CTRL              0xE50
80 #define SMMU_PMCG_IRQ_CTRL_IRQEN        BIT(0)
81 #define SMMU_PMCG_IRQ_CFG0              0xE58
82 #define SMMU_PMCG_IRQ_CFG1              0xE60
83 #define SMMU_PMCG_IRQ_CFG2              0xE64
84 
85 /* MSI config fields */
86 #define MSI_CFG0_ADDR_MASK              GENMASK_ULL(51, 2)
87 #define MSI_CFG2_MEMATTR_DEVICE_nGnRE   0x1
88 
89 #define SMMU_PMCG_DEFAULT_FILTER_SPAN   1
90 #define SMMU_PMCG_DEFAULT_FILTER_SID    GENMASK(31, 0)
91 
92 #define SMMU_PMCG_MAX_COUNTERS          64
93 #define SMMU_PMCG_ARCH_MAX_EVENTS       128
94 
95 #define SMMU_PMCG_PA_SHIFT              12
96 
97 #define SMMU_PMCG_EVCNTR_RDONLY         BIT(0)
98 
99 static int cpuhp_state_num;
100 
101 struct smmu_pmu {
102 	struct hlist_node node;
103 	struct perf_event *events[SMMU_PMCG_MAX_COUNTERS];
104 	DECLARE_BITMAP(used_counters, SMMU_PMCG_MAX_COUNTERS);
105 	DECLARE_BITMAP(supported_events, SMMU_PMCG_ARCH_MAX_EVENTS);
106 	unsigned int irq;
107 	unsigned int on_cpu;
108 	struct pmu pmu;
109 	unsigned int num_counters;
110 	struct device *dev;
111 	void __iomem *reg_base;
112 	void __iomem *reloc_base;
113 	u64 counter_mask;
114 	u32 options;
115 	bool global_filter;
116 };
117 
118 #define to_smmu_pmu(p) (container_of(p, struct smmu_pmu, pmu))
119 
120 #define SMMU_PMU_EVENT_ATTR_EXTRACTOR(_name, _config, _start, _end)        \
121 	static inline u32 get_##_name(struct perf_event *event)            \
122 	{                                                                  \
123 		return FIELD_GET(GENMASK_ULL(_end, _start),                \
124 				 event->attr._config);                     \
125 	}                                                                  \
126 
127 SMMU_PMU_EVENT_ATTR_EXTRACTOR(event, config, 0, 15);
128 SMMU_PMU_EVENT_ATTR_EXTRACTOR(filter_stream_id, config1, 0, 31);
129 SMMU_PMU_EVENT_ATTR_EXTRACTOR(filter_span, config1, 32, 32);
130 SMMU_PMU_EVENT_ATTR_EXTRACTOR(filter_enable, config1, 33, 33);
131 
132 static inline void smmu_pmu_enable(struct pmu *pmu)
133 {
134 	struct smmu_pmu *smmu_pmu = to_smmu_pmu(pmu);
135 
136 	writel(SMMU_PMCG_IRQ_CTRL_IRQEN,
137 	       smmu_pmu->reg_base + SMMU_PMCG_IRQ_CTRL);
138 	writel(SMMU_PMCG_CR_ENABLE, smmu_pmu->reg_base + SMMU_PMCG_CR);
139 }
140 
141 static inline void smmu_pmu_disable(struct pmu *pmu)
142 {
143 	struct smmu_pmu *smmu_pmu = to_smmu_pmu(pmu);
144 
145 	writel(0, smmu_pmu->reg_base + SMMU_PMCG_CR);
146 	writel(0, smmu_pmu->reg_base + SMMU_PMCG_IRQ_CTRL);
147 }
148 
149 static inline void smmu_pmu_counter_set_value(struct smmu_pmu *smmu_pmu,
150 					      u32 idx, u64 value)
151 {
152 	if (smmu_pmu->counter_mask & BIT(32))
153 		writeq(value, smmu_pmu->reloc_base + SMMU_PMCG_EVCNTR(idx, 8));
154 	else
155 		writel(value, smmu_pmu->reloc_base + SMMU_PMCG_EVCNTR(idx, 4));
156 }
157 
158 static inline u64 smmu_pmu_counter_get_value(struct smmu_pmu *smmu_pmu, u32 idx)
159 {
160 	u64 value;
161 
162 	if (smmu_pmu->counter_mask & BIT(32))
163 		value = readq(smmu_pmu->reloc_base + SMMU_PMCG_EVCNTR(idx, 8));
164 	else
165 		value = readl(smmu_pmu->reloc_base + SMMU_PMCG_EVCNTR(idx, 4));
166 
167 	return value;
168 }
169 
170 static inline void smmu_pmu_counter_enable(struct smmu_pmu *smmu_pmu, u32 idx)
171 {
172 	writeq(BIT(idx), smmu_pmu->reg_base + SMMU_PMCG_CNTENSET0);
173 }
174 
175 static inline void smmu_pmu_counter_disable(struct smmu_pmu *smmu_pmu, u32 idx)
176 {
177 	writeq(BIT(idx), smmu_pmu->reg_base + SMMU_PMCG_CNTENCLR0);
178 }
179 
180 static inline void smmu_pmu_interrupt_enable(struct smmu_pmu *smmu_pmu, u32 idx)
181 {
182 	writeq(BIT(idx), smmu_pmu->reg_base + SMMU_PMCG_INTENSET0);
183 }
184 
185 static inline void smmu_pmu_interrupt_disable(struct smmu_pmu *smmu_pmu,
186 					      u32 idx)
187 {
188 	writeq(BIT(idx), smmu_pmu->reg_base + SMMU_PMCG_INTENCLR0);
189 }
190 
191 static inline void smmu_pmu_set_evtyper(struct smmu_pmu *smmu_pmu, u32 idx,
192 					u32 val)
193 {
194 	writel(val, smmu_pmu->reg_base + SMMU_PMCG_EVTYPER(idx));
195 }
196 
197 static inline void smmu_pmu_set_smr(struct smmu_pmu *smmu_pmu, u32 idx, u32 val)
198 {
199 	writel(val, smmu_pmu->reg_base + SMMU_PMCG_SMR(idx));
200 }
201 
202 static void smmu_pmu_event_update(struct perf_event *event)
203 {
204 	struct hw_perf_event *hwc = &event->hw;
205 	struct smmu_pmu *smmu_pmu = to_smmu_pmu(event->pmu);
206 	u64 delta, prev, now;
207 	u32 idx = hwc->idx;
208 
209 	do {
210 		prev = local64_read(&hwc->prev_count);
211 		now = smmu_pmu_counter_get_value(smmu_pmu, idx);
212 	} while (local64_cmpxchg(&hwc->prev_count, prev, now) != prev);
213 
214 	/* handle overflow. */
215 	delta = now - prev;
216 	delta &= smmu_pmu->counter_mask;
217 
218 	local64_add(delta, &event->count);
219 }
220 
221 static void smmu_pmu_set_period(struct smmu_pmu *smmu_pmu,
222 				struct hw_perf_event *hwc)
223 {
224 	u32 idx = hwc->idx;
225 	u64 new;
226 
227 	if (smmu_pmu->options & SMMU_PMCG_EVCNTR_RDONLY) {
228 		/*
229 		 * On platforms that require this quirk, if the counter starts
230 		 * at < half_counter value and wraps, the current logic of
231 		 * handling the overflow may not work. It is expected that,
232 		 * those platforms will have full 64 counter bits implemented
233 		 * so that such a possibility is remote(eg: HiSilicon HIP08).
234 		 */
235 		new = smmu_pmu_counter_get_value(smmu_pmu, idx);
236 	} else {
237 		/*
238 		 * We limit the max period to half the max counter value
239 		 * of the counter size, so that even in the case of extreme
240 		 * interrupt latency the counter will (hopefully) not wrap
241 		 * past its initial value.
242 		 */
243 		new = smmu_pmu->counter_mask >> 1;
244 		smmu_pmu_counter_set_value(smmu_pmu, idx, new);
245 	}
246 
247 	local64_set(&hwc->prev_count, new);
248 }
249 
250 static void smmu_pmu_set_event_filter(struct perf_event *event,
251 				      int idx, u32 span, u32 sid)
252 {
253 	struct smmu_pmu *smmu_pmu = to_smmu_pmu(event->pmu);
254 	u32 evtyper;
255 
256 	evtyper = get_event(event) | span << SMMU_PMCG_SID_SPAN_SHIFT;
257 	smmu_pmu_set_evtyper(smmu_pmu, idx, evtyper);
258 	smmu_pmu_set_smr(smmu_pmu, idx, sid);
259 }
260 
261 static bool smmu_pmu_check_global_filter(struct perf_event *curr,
262 					 struct perf_event *new)
263 {
264 	if (get_filter_enable(new) != get_filter_enable(curr))
265 		return false;
266 
267 	if (!get_filter_enable(new))
268 		return true;
269 
270 	return get_filter_span(new) == get_filter_span(curr) &&
271 	       get_filter_stream_id(new) == get_filter_stream_id(curr);
272 }
273 
274 static int smmu_pmu_apply_event_filter(struct smmu_pmu *smmu_pmu,
275 				       struct perf_event *event, int idx)
276 {
277 	u32 span, sid;
278 	unsigned int num_ctrs = smmu_pmu->num_counters;
279 	bool filter_en = !!get_filter_enable(event);
280 
281 	span = filter_en ? get_filter_span(event) :
282 			   SMMU_PMCG_DEFAULT_FILTER_SPAN;
283 	sid = filter_en ? get_filter_stream_id(event) :
284 			   SMMU_PMCG_DEFAULT_FILTER_SID;
285 
286 	/* Support individual filter settings */
287 	if (!smmu_pmu->global_filter) {
288 		smmu_pmu_set_event_filter(event, idx, span, sid);
289 		return 0;
290 	}
291 
292 	/* Requested settings same as current global settings*/
293 	idx = find_first_bit(smmu_pmu->used_counters, num_ctrs);
294 	if (idx == num_ctrs ||
295 	    smmu_pmu_check_global_filter(smmu_pmu->events[idx], event)) {
296 		smmu_pmu_set_event_filter(event, 0, span, sid);
297 		return 0;
298 	}
299 
300 	return -EAGAIN;
301 }
302 
303 static int smmu_pmu_get_event_idx(struct smmu_pmu *smmu_pmu,
304 				  struct perf_event *event)
305 {
306 	int idx, err;
307 	unsigned int num_ctrs = smmu_pmu->num_counters;
308 
309 	idx = find_first_zero_bit(smmu_pmu->used_counters, num_ctrs);
310 	if (idx == num_ctrs)
311 		/* The counters are all in use. */
312 		return -EAGAIN;
313 
314 	err = smmu_pmu_apply_event_filter(smmu_pmu, event, idx);
315 	if (err)
316 		return err;
317 
318 	set_bit(idx, smmu_pmu->used_counters);
319 
320 	return idx;
321 }
322 
323 static bool smmu_pmu_events_compatible(struct perf_event *curr,
324 				       struct perf_event *new)
325 {
326 	if (new->pmu != curr->pmu)
327 		return false;
328 
329 	if (to_smmu_pmu(new->pmu)->global_filter &&
330 	    !smmu_pmu_check_global_filter(curr, new))
331 		return false;
332 
333 	return true;
334 }
335 
336 /*
337  * Implementation of abstract pmu functionality required by
338  * the core perf events code.
339  */
340 
341 static int smmu_pmu_event_init(struct perf_event *event)
342 {
343 	struct hw_perf_event *hwc = &event->hw;
344 	struct smmu_pmu *smmu_pmu = to_smmu_pmu(event->pmu);
345 	struct device *dev = smmu_pmu->dev;
346 	struct perf_event *sibling;
347 	int group_num_events = 1;
348 	u16 event_id;
349 
350 	if (event->attr.type != event->pmu->type)
351 		return -ENOENT;
352 
353 	if (hwc->sample_period) {
354 		dev_dbg(dev, "Sampling not supported\n");
355 		return -EOPNOTSUPP;
356 	}
357 
358 	if (event->cpu < 0) {
359 		dev_dbg(dev, "Per-task mode not supported\n");
360 		return -EOPNOTSUPP;
361 	}
362 
363 	/* Verify specified event is supported on this PMU */
364 	event_id = get_event(event);
365 	if (event_id < SMMU_PMCG_ARCH_MAX_EVENTS &&
366 	    (!test_bit(event_id, smmu_pmu->supported_events))) {
367 		dev_dbg(dev, "Invalid event %d for this PMU\n", event_id);
368 		return -EINVAL;
369 	}
370 
371 	/* Don't allow groups with mixed PMUs, except for s/w events */
372 	if (!is_software_event(event->group_leader)) {
373 		if (!smmu_pmu_events_compatible(event->group_leader, event))
374 			return -EINVAL;
375 
376 		if (++group_num_events > smmu_pmu->num_counters)
377 			return -EINVAL;
378 	}
379 
380 	for_each_sibling_event(sibling, event->group_leader) {
381 		if (is_software_event(sibling))
382 			continue;
383 
384 		if (!smmu_pmu_events_compatible(sibling, event))
385 			return -EINVAL;
386 
387 		if (++group_num_events > smmu_pmu->num_counters)
388 			return -EINVAL;
389 	}
390 
391 	hwc->idx = -1;
392 
393 	/*
394 	 * Ensure all events are on the same cpu so all events are in the
395 	 * same cpu context, to avoid races on pmu_enable etc.
396 	 */
397 	event->cpu = smmu_pmu->on_cpu;
398 
399 	return 0;
400 }
401 
402 static void smmu_pmu_event_start(struct perf_event *event, int flags)
403 {
404 	struct smmu_pmu *smmu_pmu = to_smmu_pmu(event->pmu);
405 	struct hw_perf_event *hwc = &event->hw;
406 	int idx = hwc->idx;
407 
408 	hwc->state = 0;
409 
410 	smmu_pmu_set_period(smmu_pmu, hwc);
411 
412 	smmu_pmu_counter_enable(smmu_pmu, idx);
413 }
414 
415 static void smmu_pmu_event_stop(struct perf_event *event, int flags)
416 {
417 	struct smmu_pmu *smmu_pmu = to_smmu_pmu(event->pmu);
418 	struct hw_perf_event *hwc = &event->hw;
419 	int idx = hwc->idx;
420 
421 	if (hwc->state & PERF_HES_STOPPED)
422 		return;
423 
424 	smmu_pmu_counter_disable(smmu_pmu, idx);
425 	/* As the counter gets updated on _start, ignore PERF_EF_UPDATE */
426 	smmu_pmu_event_update(event);
427 	hwc->state |= PERF_HES_STOPPED | PERF_HES_UPTODATE;
428 }
429 
430 static int smmu_pmu_event_add(struct perf_event *event, int flags)
431 {
432 	struct hw_perf_event *hwc = &event->hw;
433 	int idx;
434 	struct smmu_pmu *smmu_pmu = to_smmu_pmu(event->pmu);
435 
436 	idx = smmu_pmu_get_event_idx(smmu_pmu, event);
437 	if (idx < 0)
438 		return idx;
439 
440 	hwc->idx = idx;
441 	hwc->state = PERF_HES_STOPPED | PERF_HES_UPTODATE;
442 	smmu_pmu->events[idx] = event;
443 	local64_set(&hwc->prev_count, 0);
444 
445 	smmu_pmu_interrupt_enable(smmu_pmu, idx);
446 
447 	if (flags & PERF_EF_START)
448 		smmu_pmu_event_start(event, flags);
449 
450 	/* Propagate changes to the userspace mapping. */
451 	perf_event_update_userpage(event);
452 
453 	return 0;
454 }
455 
456 static void smmu_pmu_event_del(struct perf_event *event, int flags)
457 {
458 	struct hw_perf_event *hwc = &event->hw;
459 	struct smmu_pmu *smmu_pmu = to_smmu_pmu(event->pmu);
460 	int idx = hwc->idx;
461 
462 	smmu_pmu_event_stop(event, flags | PERF_EF_UPDATE);
463 	smmu_pmu_interrupt_disable(smmu_pmu, idx);
464 	smmu_pmu->events[idx] = NULL;
465 	clear_bit(idx, smmu_pmu->used_counters);
466 
467 	perf_event_update_userpage(event);
468 }
469 
470 static void smmu_pmu_event_read(struct perf_event *event)
471 {
472 	smmu_pmu_event_update(event);
473 }
474 
475 /* cpumask */
476 
477 static ssize_t smmu_pmu_cpumask_show(struct device *dev,
478 				     struct device_attribute *attr,
479 				     char *buf)
480 {
481 	struct smmu_pmu *smmu_pmu = to_smmu_pmu(dev_get_drvdata(dev));
482 
483 	return cpumap_print_to_pagebuf(true, buf, cpumask_of(smmu_pmu->on_cpu));
484 }
485 
486 static struct device_attribute smmu_pmu_cpumask_attr =
487 		__ATTR(cpumask, 0444, smmu_pmu_cpumask_show, NULL);
488 
489 static struct attribute *smmu_pmu_cpumask_attrs[] = {
490 	&smmu_pmu_cpumask_attr.attr,
491 	NULL
492 };
493 
494 static struct attribute_group smmu_pmu_cpumask_group = {
495 	.attrs = smmu_pmu_cpumask_attrs,
496 };
497 
498 /* Events */
499 
500 static ssize_t smmu_pmu_event_show(struct device *dev,
501 				   struct device_attribute *attr, char *page)
502 {
503 	struct perf_pmu_events_attr *pmu_attr;
504 
505 	pmu_attr = container_of(attr, struct perf_pmu_events_attr, attr);
506 
507 	return sprintf(page, "event=0x%02llx\n", pmu_attr->id);
508 }
509 
510 #define SMMU_EVENT_ATTR(name, config) \
511 	PMU_EVENT_ATTR(name, smmu_event_attr_##name, \
512 		       config, smmu_pmu_event_show)
513 SMMU_EVENT_ATTR(cycles, 0);
514 SMMU_EVENT_ATTR(transaction, 1);
515 SMMU_EVENT_ATTR(tlb_miss, 2);
516 SMMU_EVENT_ATTR(config_cache_miss, 3);
517 SMMU_EVENT_ATTR(trans_table_walk_access, 4);
518 SMMU_EVENT_ATTR(config_struct_access, 5);
519 SMMU_EVENT_ATTR(pcie_ats_trans_rq, 6);
520 SMMU_EVENT_ATTR(pcie_ats_trans_passed, 7);
521 
522 static struct attribute *smmu_pmu_events[] = {
523 	&smmu_event_attr_cycles.attr.attr,
524 	&smmu_event_attr_transaction.attr.attr,
525 	&smmu_event_attr_tlb_miss.attr.attr,
526 	&smmu_event_attr_config_cache_miss.attr.attr,
527 	&smmu_event_attr_trans_table_walk_access.attr.attr,
528 	&smmu_event_attr_config_struct_access.attr.attr,
529 	&smmu_event_attr_pcie_ats_trans_rq.attr.attr,
530 	&smmu_event_attr_pcie_ats_trans_passed.attr.attr,
531 	NULL
532 };
533 
534 static umode_t smmu_pmu_event_is_visible(struct kobject *kobj,
535 					 struct attribute *attr, int unused)
536 {
537 	struct device *dev = kobj_to_dev(kobj);
538 	struct smmu_pmu *smmu_pmu = to_smmu_pmu(dev_get_drvdata(dev));
539 	struct perf_pmu_events_attr *pmu_attr;
540 
541 	pmu_attr = container_of(attr, struct perf_pmu_events_attr, attr.attr);
542 
543 	if (test_bit(pmu_attr->id, smmu_pmu->supported_events))
544 		return attr->mode;
545 
546 	return 0;
547 }
548 
549 static struct attribute_group smmu_pmu_events_group = {
550 	.name = "events",
551 	.attrs = smmu_pmu_events,
552 	.is_visible = smmu_pmu_event_is_visible,
553 };
554 
555 /* Formats */
556 PMU_FORMAT_ATTR(event,		   "config:0-15");
557 PMU_FORMAT_ATTR(filter_stream_id,  "config1:0-31");
558 PMU_FORMAT_ATTR(filter_span,	   "config1:32");
559 PMU_FORMAT_ATTR(filter_enable,	   "config1:33");
560 
561 static struct attribute *smmu_pmu_formats[] = {
562 	&format_attr_event.attr,
563 	&format_attr_filter_stream_id.attr,
564 	&format_attr_filter_span.attr,
565 	&format_attr_filter_enable.attr,
566 	NULL
567 };
568 
569 static struct attribute_group smmu_pmu_format_group = {
570 	.name = "format",
571 	.attrs = smmu_pmu_formats,
572 };
573 
574 static const struct attribute_group *smmu_pmu_attr_grps[] = {
575 	&smmu_pmu_cpumask_group,
576 	&smmu_pmu_events_group,
577 	&smmu_pmu_format_group,
578 	NULL
579 };
580 
581 /*
582  * Generic device handlers
583  */
584 
585 static int smmu_pmu_offline_cpu(unsigned int cpu, struct hlist_node *node)
586 {
587 	struct smmu_pmu *smmu_pmu;
588 	unsigned int target;
589 
590 	smmu_pmu = hlist_entry_safe(node, struct smmu_pmu, node);
591 	if (cpu != smmu_pmu->on_cpu)
592 		return 0;
593 
594 	target = cpumask_any_but(cpu_online_mask, cpu);
595 	if (target >= nr_cpu_ids)
596 		return 0;
597 
598 	perf_pmu_migrate_context(&smmu_pmu->pmu, cpu, target);
599 	smmu_pmu->on_cpu = target;
600 	WARN_ON(irq_set_affinity_hint(smmu_pmu->irq, cpumask_of(target)));
601 
602 	return 0;
603 }
604 
605 static irqreturn_t smmu_pmu_handle_irq(int irq_num, void *data)
606 {
607 	struct smmu_pmu *smmu_pmu = data;
608 	u64 ovsr;
609 	unsigned int idx;
610 
611 	ovsr = readq(smmu_pmu->reloc_base + SMMU_PMCG_OVSSET0);
612 	if (!ovsr)
613 		return IRQ_NONE;
614 
615 	writeq(ovsr, smmu_pmu->reloc_base + SMMU_PMCG_OVSCLR0);
616 
617 	for_each_set_bit(idx, (unsigned long *)&ovsr, smmu_pmu->num_counters) {
618 		struct perf_event *event = smmu_pmu->events[idx];
619 		struct hw_perf_event *hwc;
620 
621 		if (WARN_ON_ONCE(!event))
622 			continue;
623 
624 		smmu_pmu_event_update(event);
625 		hwc = &event->hw;
626 
627 		smmu_pmu_set_period(smmu_pmu, hwc);
628 	}
629 
630 	return IRQ_HANDLED;
631 }
632 
633 static void smmu_pmu_free_msis(void *data)
634 {
635 	struct device *dev = data;
636 
637 	platform_msi_domain_free_irqs(dev);
638 }
639 
640 static void smmu_pmu_write_msi_msg(struct msi_desc *desc, struct msi_msg *msg)
641 {
642 	phys_addr_t doorbell;
643 	struct device *dev = msi_desc_to_dev(desc);
644 	struct smmu_pmu *pmu = dev_get_drvdata(dev);
645 
646 	doorbell = (((u64)msg->address_hi) << 32) | msg->address_lo;
647 	doorbell &= MSI_CFG0_ADDR_MASK;
648 
649 	writeq_relaxed(doorbell, pmu->reg_base + SMMU_PMCG_IRQ_CFG0);
650 	writel_relaxed(msg->data, pmu->reg_base + SMMU_PMCG_IRQ_CFG1);
651 	writel_relaxed(MSI_CFG2_MEMATTR_DEVICE_nGnRE,
652 		       pmu->reg_base + SMMU_PMCG_IRQ_CFG2);
653 }
654 
655 static void smmu_pmu_setup_msi(struct smmu_pmu *pmu)
656 {
657 	struct msi_desc *desc;
658 	struct device *dev = pmu->dev;
659 	int ret;
660 
661 	/* Clear MSI address reg */
662 	writeq_relaxed(0, pmu->reg_base + SMMU_PMCG_IRQ_CFG0);
663 
664 	/* MSI supported or not */
665 	if (!(readl(pmu->reg_base + SMMU_PMCG_CFGR) & SMMU_PMCG_CFGR_MSI))
666 		return;
667 
668 	ret = platform_msi_domain_alloc_irqs(dev, 1, smmu_pmu_write_msi_msg);
669 	if (ret) {
670 		dev_warn(dev, "failed to allocate MSIs\n");
671 		return;
672 	}
673 
674 	desc = first_msi_entry(dev);
675 	if (desc)
676 		pmu->irq = desc->irq;
677 
678 	/* Add callback to free MSIs on teardown */
679 	devm_add_action(dev, smmu_pmu_free_msis, dev);
680 }
681 
682 static int smmu_pmu_setup_irq(struct smmu_pmu *pmu)
683 {
684 	unsigned long flags = IRQF_NOBALANCING | IRQF_SHARED | IRQF_NO_THREAD;
685 	int irq, ret = -ENXIO;
686 
687 	smmu_pmu_setup_msi(pmu);
688 
689 	irq = pmu->irq;
690 	if (irq)
691 		ret = devm_request_irq(pmu->dev, irq, smmu_pmu_handle_irq,
692 				       flags, "smmuv3-pmu", pmu);
693 	return ret;
694 }
695 
696 static void smmu_pmu_reset(struct smmu_pmu *smmu_pmu)
697 {
698 	u64 counter_present_mask = GENMASK_ULL(smmu_pmu->num_counters - 1, 0);
699 
700 	smmu_pmu_disable(&smmu_pmu->pmu);
701 
702 	/* Disable counter and interrupt */
703 	writeq_relaxed(counter_present_mask,
704 		       smmu_pmu->reg_base + SMMU_PMCG_CNTENCLR0);
705 	writeq_relaxed(counter_present_mask,
706 		       smmu_pmu->reg_base + SMMU_PMCG_INTENCLR0);
707 	writeq_relaxed(counter_present_mask,
708 		       smmu_pmu->reloc_base + SMMU_PMCG_OVSCLR0);
709 }
710 
711 static void smmu_pmu_get_acpi_options(struct smmu_pmu *smmu_pmu)
712 {
713 	u32 model;
714 
715 	model = *(u32 *)dev_get_platdata(smmu_pmu->dev);
716 
717 	switch (model) {
718 	case IORT_SMMU_V3_PMCG_HISI_HIP08:
719 		/* HiSilicon Erratum 162001800 */
720 		smmu_pmu->options |= SMMU_PMCG_EVCNTR_RDONLY;
721 		break;
722 	}
723 
724 	dev_notice(smmu_pmu->dev, "option mask 0x%x\n", smmu_pmu->options);
725 }
726 
727 static int smmu_pmu_probe(struct platform_device *pdev)
728 {
729 	struct smmu_pmu *smmu_pmu;
730 	struct resource *res_0;
731 	u32 cfgr, reg_size;
732 	u64 ceid_64[2];
733 	int irq, err;
734 	char *name;
735 	struct device *dev = &pdev->dev;
736 
737 	smmu_pmu = devm_kzalloc(dev, sizeof(*smmu_pmu), GFP_KERNEL);
738 	if (!smmu_pmu)
739 		return -ENOMEM;
740 
741 	smmu_pmu->dev = dev;
742 	platform_set_drvdata(pdev, smmu_pmu);
743 
744 	smmu_pmu->pmu = (struct pmu) {
745 		.task_ctx_nr    = perf_invalid_context,
746 		.pmu_enable	= smmu_pmu_enable,
747 		.pmu_disable	= smmu_pmu_disable,
748 		.event_init	= smmu_pmu_event_init,
749 		.add		= smmu_pmu_event_add,
750 		.del		= smmu_pmu_event_del,
751 		.start		= smmu_pmu_event_start,
752 		.stop		= smmu_pmu_event_stop,
753 		.read		= smmu_pmu_event_read,
754 		.attr_groups	= smmu_pmu_attr_grps,
755 		.capabilities	= PERF_PMU_CAP_NO_EXCLUDE,
756 	};
757 
758 	res_0 = platform_get_resource(pdev, IORESOURCE_MEM, 0);
759 	smmu_pmu->reg_base = devm_ioremap_resource(dev, res_0);
760 	if (IS_ERR(smmu_pmu->reg_base))
761 		return PTR_ERR(smmu_pmu->reg_base);
762 
763 	cfgr = readl_relaxed(smmu_pmu->reg_base + SMMU_PMCG_CFGR);
764 
765 	/* Determine if page 1 is present */
766 	if (cfgr & SMMU_PMCG_CFGR_RELOC_CTRS) {
767 		smmu_pmu->reloc_base = devm_platform_ioremap_resource(pdev, 1);
768 		if (IS_ERR(smmu_pmu->reloc_base))
769 			return PTR_ERR(smmu_pmu->reloc_base);
770 	} else {
771 		smmu_pmu->reloc_base = smmu_pmu->reg_base;
772 	}
773 
774 	irq = platform_get_irq_optional(pdev, 0);
775 	if (irq > 0)
776 		smmu_pmu->irq = irq;
777 
778 	ceid_64[0] = readq_relaxed(smmu_pmu->reg_base + SMMU_PMCG_CEID0);
779 	ceid_64[1] = readq_relaxed(smmu_pmu->reg_base + SMMU_PMCG_CEID1);
780 	bitmap_from_arr32(smmu_pmu->supported_events, (u32 *)ceid_64,
781 			  SMMU_PMCG_ARCH_MAX_EVENTS);
782 
783 	smmu_pmu->num_counters = FIELD_GET(SMMU_PMCG_CFGR_NCTR, cfgr) + 1;
784 
785 	smmu_pmu->global_filter = !!(cfgr & SMMU_PMCG_CFGR_SID_FILTER_TYPE);
786 
787 	reg_size = FIELD_GET(SMMU_PMCG_CFGR_SIZE, cfgr);
788 	smmu_pmu->counter_mask = GENMASK_ULL(reg_size, 0);
789 
790 	smmu_pmu_reset(smmu_pmu);
791 
792 	err = smmu_pmu_setup_irq(smmu_pmu);
793 	if (err) {
794 		dev_err(dev, "Setup irq failed, PMU @%pa\n", &res_0->start);
795 		return err;
796 	}
797 
798 	name = devm_kasprintf(&pdev->dev, GFP_KERNEL, "smmuv3_pmcg_%llx",
799 			      (res_0->start) >> SMMU_PMCG_PA_SHIFT);
800 	if (!name) {
801 		dev_err(dev, "Create name failed, PMU @%pa\n", &res_0->start);
802 		return -EINVAL;
803 	}
804 
805 	smmu_pmu_get_acpi_options(smmu_pmu);
806 
807 	/* Pick one CPU to be the preferred one to use */
808 	smmu_pmu->on_cpu = raw_smp_processor_id();
809 	WARN_ON(irq_set_affinity_hint(smmu_pmu->irq,
810 				      cpumask_of(smmu_pmu->on_cpu)));
811 
812 	err = cpuhp_state_add_instance_nocalls(cpuhp_state_num,
813 					       &smmu_pmu->node);
814 	if (err) {
815 		dev_err(dev, "Error %d registering hotplug, PMU @%pa\n",
816 			err, &res_0->start);
817 		goto out_clear_affinity;
818 	}
819 
820 	err = perf_pmu_register(&smmu_pmu->pmu, name, -1);
821 	if (err) {
822 		dev_err(dev, "Error %d registering PMU @%pa\n",
823 			err, &res_0->start);
824 		goto out_unregister;
825 	}
826 
827 	dev_info(dev, "Registered PMU @ %pa using %d counters with %s filter settings\n",
828 		 &res_0->start, smmu_pmu->num_counters,
829 		 smmu_pmu->global_filter ? "Global(Counter0)" :
830 		 "Individual");
831 
832 	return 0;
833 
834 out_unregister:
835 	cpuhp_state_remove_instance_nocalls(cpuhp_state_num, &smmu_pmu->node);
836 out_clear_affinity:
837 	irq_set_affinity_hint(smmu_pmu->irq, NULL);
838 	return err;
839 }
840 
841 static int smmu_pmu_remove(struct platform_device *pdev)
842 {
843 	struct smmu_pmu *smmu_pmu = platform_get_drvdata(pdev);
844 
845 	perf_pmu_unregister(&smmu_pmu->pmu);
846 	cpuhp_state_remove_instance_nocalls(cpuhp_state_num, &smmu_pmu->node);
847 	irq_set_affinity_hint(smmu_pmu->irq, NULL);
848 
849 	return 0;
850 }
851 
852 static void smmu_pmu_shutdown(struct platform_device *pdev)
853 {
854 	struct smmu_pmu *smmu_pmu = platform_get_drvdata(pdev);
855 
856 	smmu_pmu_disable(&smmu_pmu->pmu);
857 }
858 
859 static struct platform_driver smmu_pmu_driver = {
860 	.driver = {
861 		.name = "arm-smmu-v3-pmcg",
862 	},
863 	.probe = smmu_pmu_probe,
864 	.remove = smmu_pmu_remove,
865 	.shutdown = smmu_pmu_shutdown,
866 };
867 
868 static int __init arm_smmu_pmu_init(void)
869 {
870 	cpuhp_state_num = cpuhp_setup_state_multi(CPUHP_AP_ONLINE_DYN,
871 						  "perf/arm/pmcg:online",
872 						  NULL,
873 						  smmu_pmu_offline_cpu);
874 	if (cpuhp_state_num < 0)
875 		return cpuhp_state_num;
876 
877 	return platform_driver_register(&smmu_pmu_driver);
878 }
879 module_init(arm_smmu_pmu_init);
880 
881 static void __exit arm_smmu_pmu_exit(void)
882 {
883 	platform_driver_unregister(&smmu_pmu_driver);
884 	cpuhp_remove_multi_state(cpuhp_state_num);
885 }
886 
887 module_exit(arm_smmu_pmu_exit);
888 
889 MODULE_DESCRIPTION("PMU driver for ARM SMMUv3 Performance Monitors Extension");
890 MODULE_AUTHOR("Neil Leeder <nleeder@codeaurora.org>");
891 MODULE_AUTHOR("Shameer Kolothum <shameerali.kolothum.thodi@huawei.com>");
892 MODULE_LICENSE("GPL v2");
893