1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * ARM DMC-620 memory controller PMU driver
4 *
5 * Copyright (C) 2020 Ampere Computing LLC.
6 */
7
8 #define DMC620_PMUNAME "arm_dmc620"
9 #define DMC620_DRVNAME DMC620_PMUNAME "_pmu"
10 #define pr_fmt(fmt) DMC620_DRVNAME ": " fmt
11
12 #include <linux/acpi.h>
13 #include <linux/bitfield.h>
14 #include <linux/bitops.h>
15 #include <linux/cpuhotplug.h>
16 #include <linux/cpumask.h>
17 #include <linux/device.h>
18 #include <linux/errno.h>
19 #include <linux/interrupt.h>
20 #include <linux/irq.h>
21 #include <linux/kernel.h>
22 #include <linux/list.h>
23 #include <linux/module.h>
24 #include <linux/mutex.h>
25 #include <linux/perf_event.h>
26 #include <linux/platform_device.h>
27 #include <linux/printk.h>
28 #include <linux/rculist.h>
29 #include <linux/refcount.h>
30
31 #define DMC620_PA_SHIFT 12
32 #define DMC620_CNT_INIT 0x80000000
33 #define DMC620_CNT_MAX_PERIOD 0xffffffff
34 #define DMC620_PMU_CLKDIV2_MAX_COUNTERS 8
35 #define DMC620_PMU_CLK_MAX_COUNTERS 2
36 #define DMC620_PMU_MAX_COUNTERS \
37 (DMC620_PMU_CLKDIV2_MAX_COUNTERS + DMC620_PMU_CLK_MAX_COUNTERS)
38
39 /*
40 * The PMU registers start at 0xA00 in the DMC-620 memory map, and these
41 * offsets are relative to that base.
42 *
43 * Each counter has a group of control/value registers, and the
44 * DMC620_PMU_COUNTERn offsets are within a counter group.
45 *
46 * The counter registers groups start at 0xA10.
47 */
48 #define DMC620_PMU_OVERFLOW_STATUS_CLKDIV2 0x8
49 #define DMC620_PMU_OVERFLOW_STATUS_CLKDIV2_MASK \
50 (DMC620_PMU_CLKDIV2_MAX_COUNTERS - 1)
51 #define DMC620_PMU_OVERFLOW_STATUS_CLK 0xC
52 #define DMC620_PMU_OVERFLOW_STATUS_CLK_MASK \
53 (DMC620_PMU_CLK_MAX_COUNTERS - 1)
54 #define DMC620_PMU_COUNTERS_BASE 0x10
55 #define DMC620_PMU_COUNTERn_MASK_31_00 0x0
56 #define DMC620_PMU_COUNTERn_MASK_63_32 0x4
57 #define DMC620_PMU_COUNTERn_MATCH_31_00 0x8
58 #define DMC620_PMU_COUNTERn_MATCH_63_32 0xC
59 #define DMC620_PMU_COUNTERn_CONTROL 0x10
60 #define DMC620_PMU_COUNTERn_CONTROL_ENABLE BIT(0)
61 #define DMC620_PMU_COUNTERn_CONTROL_INVERT BIT(1)
62 #define DMC620_PMU_COUNTERn_CONTROL_EVENT_MUX GENMASK(6, 2)
63 #define DMC620_PMU_COUNTERn_CONTROL_INCR_MUX GENMASK(8, 7)
64 #define DMC620_PMU_COUNTERn_VALUE 0x20
65 /* Offset of the registers for a given counter, relative to 0xA00 */
66 #define DMC620_PMU_COUNTERn_OFFSET(n) \
67 (DMC620_PMU_COUNTERS_BASE + 0x28 * (n))
68
69 /*
70 * dmc620_pmu_irqs_lock: protects dmc620_pmu_irqs list
71 * dmc620_pmu_node_lock: protects pmus_node lists in all dmc620_pmu instances
72 */
73 static DEFINE_MUTEX(dmc620_pmu_irqs_lock);
74 static DEFINE_MUTEX(dmc620_pmu_node_lock);
75 static LIST_HEAD(dmc620_pmu_irqs);
76
77 struct dmc620_pmu_irq {
78 struct hlist_node node;
79 struct list_head pmus_node;
80 struct list_head irqs_node;
81 refcount_t refcount;
82 unsigned int irq_num;
83 unsigned int cpu;
84 };
85
86 struct dmc620_pmu {
87 struct pmu pmu;
88
89 void __iomem *base;
90 struct dmc620_pmu_irq *irq;
91 struct list_head pmus_node;
92
93 /*
94 * We put all clkdiv2 and clk counters to a same array.
95 * The first DMC620_PMU_CLKDIV2_MAX_COUNTERS bits belong to
96 * clkdiv2 counters, the last DMC620_PMU_CLK_MAX_COUNTERS
97 * belong to clk counters.
98 */
99 DECLARE_BITMAP(used_mask, DMC620_PMU_MAX_COUNTERS);
100 struct perf_event *events[DMC620_PMU_MAX_COUNTERS];
101 };
102
103 #define to_dmc620_pmu(p) (container_of(p, struct dmc620_pmu, pmu))
104
105 static int cpuhp_state_num;
106
107 struct dmc620_pmu_event_attr {
108 struct device_attribute attr;
109 u8 clkdiv2;
110 u8 eventid;
111 };
112
113 static ssize_t
dmc620_pmu_event_show(struct device * dev,struct device_attribute * attr,char * page)114 dmc620_pmu_event_show(struct device *dev,
115 struct device_attribute *attr, char *page)
116 {
117 struct dmc620_pmu_event_attr *eattr;
118
119 eattr = container_of(attr, typeof(*eattr), attr);
120
121 return sysfs_emit(page, "event=0x%x,clkdiv2=0x%x\n", eattr->eventid, eattr->clkdiv2);
122 }
123
124 #define DMC620_PMU_EVENT_ATTR(_name, _eventid, _clkdiv2) \
125 (&((struct dmc620_pmu_event_attr[]) {{ \
126 .attr = __ATTR(_name, 0444, dmc620_pmu_event_show, NULL), \
127 .clkdiv2 = _clkdiv2, \
128 .eventid = _eventid, \
129 }})[0].attr.attr)
130
131 static struct attribute *dmc620_pmu_events_attrs[] = {
132 /* clkdiv2 events list */
133 DMC620_PMU_EVENT_ATTR(clkdiv2_cycle_count, 0x0, 1),
134 DMC620_PMU_EVENT_ATTR(clkdiv2_allocate, 0x1, 1),
135 DMC620_PMU_EVENT_ATTR(clkdiv2_queue_depth, 0x2, 1),
136 DMC620_PMU_EVENT_ATTR(clkdiv2_waiting_for_wr_data, 0x3, 1),
137 DMC620_PMU_EVENT_ATTR(clkdiv2_read_backlog, 0x4, 1),
138 DMC620_PMU_EVENT_ATTR(clkdiv2_waiting_for_mi, 0x5, 1),
139 DMC620_PMU_EVENT_ATTR(clkdiv2_hazard_resolution, 0x6, 1),
140 DMC620_PMU_EVENT_ATTR(clkdiv2_enqueue, 0x7, 1),
141 DMC620_PMU_EVENT_ATTR(clkdiv2_arbitrate, 0x8, 1),
142 DMC620_PMU_EVENT_ATTR(clkdiv2_lrank_turnaround_activate, 0x9, 1),
143 DMC620_PMU_EVENT_ATTR(clkdiv2_prank_turnaround_activate, 0xa, 1),
144 DMC620_PMU_EVENT_ATTR(clkdiv2_read_depth, 0xb, 1),
145 DMC620_PMU_EVENT_ATTR(clkdiv2_write_depth, 0xc, 1),
146 DMC620_PMU_EVENT_ATTR(clkdiv2_highigh_qos_depth, 0xd, 1),
147 DMC620_PMU_EVENT_ATTR(clkdiv2_high_qos_depth, 0xe, 1),
148 DMC620_PMU_EVENT_ATTR(clkdiv2_medium_qos_depth, 0xf, 1),
149 DMC620_PMU_EVENT_ATTR(clkdiv2_low_qos_depth, 0x10, 1),
150 DMC620_PMU_EVENT_ATTR(clkdiv2_activate, 0x11, 1),
151 DMC620_PMU_EVENT_ATTR(clkdiv2_rdwr, 0x12, 1),
152 DMC620_PMU_EVENT_ATTR(clkdiv2_refresh, 0x13, 1),
153 DMC620_PMU_EVENT_ATTR(clkdiv2_training_request, 0x14, 1),
154 DMC620_PMU_EVENT_ATTR(clkdiv2_t_mac_tracker, 0x15, 1),
155 DMC620_PMU_EVENT_ATTR(clkdiv2_bk_fsm_tracker, 0x16, 1),
156 DMC620_PMU_EVENT_ATTR(clkdiv2_bk_open_tracker, 0x17, 1),
157 DMC620_PMU_EVENT_ATTR(clkdiv2_ranks_in_pwr_down, 0x18, 1),
158 DMC620_PMU_EVENT_ATTR(clkdiv2_ranks_in_sref, 0x19, 1),
159
160 /* clk events list */
161 DMC620_PMU_EVENT_ATTR(clk_cycle_count, 0x0, 0),
162 DMC620_PMU_EVENT_ATTR(clk_request, 0x1, 0),
163 DMC620_PMU_EVENT_ATTR(clk_upload_stall, 0x2, 0),
164 NULL,
165 };
166
167 static const struct attribute_group dmc620_pmu_events_attr_group = {
168 .name = "events",
169 .attrs = dmc620_pmu_events_attrs,
170 };
171
172 /* User ABI */
173 #define ATTR_CFG_FLD_mask_CFG config
174 #define ATTR_CFG_FLD_mask_LO 0
175 #define ATTR_CFG_FLD_mask_HI 44
176 #define ATTR_CFG_FLD_match_CFG config1
177 #define ATTR_CFG_FLD_match_LO 0
178 #define ATTR_CFG_FLD_match_HI 44
179 #define ATTR_CFG_FLD_invert_CFG config2
180 #define ATTR_CFG_FLD_invert_LO 0
181 #define ATTR_CFG_FLD_invert_HI 0
182 #define ATTR_CFG_FLD_incr_CFG config2
183 #define ATTR_CFG_FLD_incr_LO 1
184 #define ATTR_CFG_FLD_incr_HI 2
185 #define ATTR_CFG_FLD_event_CFG config2
186 #define ATTR_CFG_FLD_event_LO 3
187 #define ATTR_CFG_FLD_event_HI 8
188 #define ATTR_CFG_FLD_clkdiv2_CFG config2
189 #define ATTR_CFG_FLD_clkdiv2_LO 9
190 #define ATTR_CFG_FLD_clkdiv2_HI 9
191
192 #define __GEN_PMU_FORMAT_ATTR(cfg, lo, hi) \
193 (lo) == (hi) ? #cfg ":" #lo "\n" : #cfg ":" #lo "-" #hi
194
195 #define _GEN_PMU_FORMAT_ATTR(cfg, lo, hi) \
196 __GEN_PMU_FORMAT_ATTR(cfg, lo, hi)
197
198 #define GEN_PMU_FORMAT_ATTR(name) \
199 PMU_FORMAT_ATTR(name, \
200 _GEN_PMU_FORMAT_ATTR(ATTR_CFG_FLD_##name##_CFG, \
201 ATTR_CFG_FLD_##name##_LO, \
202 ATTR_CFG_FLD_##name##_HI))
203
204 #define _ATTR_CFG_GET_FLD(attr, cfg, lo, hi) \
205 ((((attr)->cfg) >> lo) & GENMASK_ULL(hi - lo, 0))
206
207 #define ATTR_CFG_GET_FLD(attr, name) \
208 _ATTR_CFG_GET_FLD(attr, \
209 ATTR_CFG_FLD_##name##_CFG, \
210 ATTR_CFG_FLD_##name##_LO, \
211 ATTR_CFG_FLD_##name##_HI)
212
213 GEN_PMU_FORMAT_ATTR(mask);
214 GEN_PMU_FORMAT_ATTR(match);
215 GEN_PMU_FORMAT_ATTR(invert);
216 GEN_PMU_FORMAT_ATTR(incr);
217 GEN_PMU_FORMAT_ATTR(event);
218 GEN_PMU_FORMAT_ATTR(clkdiv2);
219
220 static struct attribute *dmc620_pmu_formats_attrs[] = {
221 &format_attr_mask.attr,
222 &format_attr_match.attr,
223 &format_attr_invert.attr,
224 &format_attr_incr.attr,
225 &format_attr_event.attr,
226 &format_attr_clkdiv2.attr,
227 NULL,
228 };
229
230 static const struct attribute_group dmc620_pmu_format_attr_group = {
231 .name = "format",
232 .attrs = dmc620_pmu_formats_attrs,
233 };
234
dmc620_pmu_cpumask_show(struct device * dev,struct device_attribute * attr,char * buf)235 static ssize_t dmc620_pmu_cpumask_show(struct device *dev,
236 struct device_attribute *attr, char *buf)
237 {
238 struct dmc620_pmu *dmc620_pmu = to_dmc620_pmu(dev_get_drvdata(dev));
239
240 return cpumap_print_to_pagebuf(true, buf,
241 cpumask_of(dmc620_pmu->irq->cpu));
242 }
243
244 static struct device_attribute dmc620_pmu_cpumask_attr =
245 __ATTR(cpumask, 0444, dmc620_pmu_cpumask_show, NULL);
246
247 static struct attribute *dmc620_pmu_cpumask_attrs[] = {
248 &dmc620_pmu_cpumask_attr.attr,
249 NULL,
250 };
251
252 static const struct attribute_group dmc620_pmu_cpumask_attr_group = {
253 .attrs = dmc620_pmu_cpumask_attrs,
254 };
255
256 static const struct attribute_group *dmc620_pmu_attr_groups[] = {
257 &dmc620_pmu_events_attr_group,
258 &dmc620_pmu_format_attr_group,
259 &dmc620_pmu_cpumask_attr_group,
260 NULL,
261 };
262
263 static inline
dmc620_pmu_creg_read(struct dmc620_pmu * dmc620_pmu,unsigned int idx,unsigned int reg)264 u32 dmc620_pmu_creg_read(struct dmc620_pmu *dmc620_pmu,
265 unsigned int idx, unsigned int reg)
266 {
267 return readl(dmc620_pmu->base + DMC620_PMU_COUNTERn_OFFSET(idx) + reg);
268 }
269
270 static inline
dmc620_pmu_creg_write(struct dmc620_pmu * dmc620_pmu,unsigned int idx,unsigned int reg,u32 val)271 void dmc620_pmu_creg_write(struct dmc620_pmu *dmc620_pmu,
272 unsigned int idx, unsigned int reg, u32 val)
273 {
274 writel(val, dmc620_pmu->base + DMC620_PMU_COUNTERn_OFFSET(idx) + reg);
275 }
276
277 static
dmc620_event_to_counter_control(struct perf_event * event)278 unsigned int dmc620_event_to_counter_control(struct perf_event *event)
279 {
280 struct perf_event_attr *attr = &event->attr;
281 unsigned int reg = 0;
282
283 reg |= FIELD_PREP(DMC620_PMU_COUNTERn_CONTROL_INVERT,
284 ATTR_CFG_GET_FLD(attr, invert));
285 reg |= FIELD_PREP(DMC620_PMU_COUNTERn_CONTROL_EVENT_MUX,
286 ATTR_CFG_GET_FLD(attr, event));
287 reg |= FIELD_PREP(DMC620_PMU_COUNTERn_CONTROL_INCR_MUX,
288 ATTR_CFG_GET_FLD(attr, incr));
289
290 return reg;
291 }
292
dmc620_get_event_idx(struct perf_event * event)293 static int dmc620_get_event_idx(struct perf_event *event)
294 {
295 struct dmc620_pmu *dmc620_pmu = to_dmc620_pmu(event->pmu);
296 int idx, start_idx, end_idx;
297
298 if (ATTR_CFG_GET_FLD(&event->attr, clkdiv2)) {
299 start_idx = 0;
300 end_idx = DMC620_PMU_CLKDIV2_MAX_COUNTERS;
301 } else {
302 start_idx = DMC620_PMU_CLKDIV2_MAX_COUNTERS;
303 end_idx = DMC620_PMU_MAX_COUNTERS;
304 }
305
306 for (idx = start_idx; idx < end_idx; ++idx) {
307 if (!test_and_set_bit(idx, dmc620_pmu->used_mask))
308 return idx;
309 }
310
311 /* The counters are all in use. */
312 return -EAGAIN;
313 }
314
315 static inline
dmc620_pmu_read_counter(struct perf_event * event)316 u64 dmc620_pmu_read_counter(struct perf_event *event)
317 {
318 struct dmc620_pmu *dmc620_pmu = to_dmc620_pmu(event->pmu);
319
320 return dmc620_pmu_creg_read(dmc620_pmu,
321 event->hw.idx, DMC620_PMU_COUNTERn_VALUE);
322 }
323
dmc620_pmu_event_update(struct perf_event * event)324 static void dmc620_pmu_event_update(struct perf_event *event)
325 {
326 struct hw_perf_event *hwc = &event->hw;
327 u64 delta, prev_count, new_count;
328
329 do {
330 /* We may also be called from the irq handler */
331 prev_count = local64_read(&hwc->prev_count);
332 new_count = dmc620_pmu_read_counter(event);
333 } while (local64_cmpxchg(&hwc->prev_count,
334 prev_count, new_count) != prev_count);
335 delta = (new_count - prev_count) & DMC620_CNT_MAX_PERIOD;
336 local64_add(delta, &event->count);
337 }
338
dmc620_pmu_event_set_period(struct perf_event * event)339 static void dmc620_pmu_event_set_period(struct perf_event *event)
340 {
341 struct dmc620_pmu *dmc620_pmu = to_dmc620_pmu(event->pmu);
342
343 local64_set(&event->hw.prev_count, DMC620_CNT_INIT);
344 dmc620_pmu_creg_write(dmc620_pmu,
345 event->hw.idx, DMC620_PMU_COUNTERn_VALUE, DMC620_CNT_INIT);
346 }
347
dmc620_pmu_enable_counter(struct perf_event * event)348 static void dmc620_pmu_enable_counter(struct perf_event *event)
349 {
350 struct dmc620_pmu *dmc620_pmu = to_dmc620_pmu(event->pmu);
351 u32 reg;
352
353 reg = dmc620_event_to_counter_control(event) | DMC620_PMU_COUNTERn_CONTROL_ENABLE;
354 dmc620_pmu_creg_write(dmc620_pmu,
355 event->hw.idx, DMC620_PMU_COUNTERn_CONTROL, reg);
356 }
357
dmc620_pmu_disable_counter(struct perf_event * event)358 static void dmc620_pmu_disable_counter(struct perf_event *event)
359 {
360 struct dmc620_pmu *dmc620_pmu = to_dmc620_pmu(event->pmu);
361
362 dmc620_pmu_creg_write(dmc620_pmu,
363 event->hw.idx, DMC620_PMU_COUNTERn_CONTROL, 0);
364 }
365
dmc620_pmu_handle_irq(int irq_num,void * data)366 static irqreturn_t dmc620_pmu_handle_irq(int irq_num, void *data)
367 {
368 struct dmc620_pmu_irq *irq = data;
369 struct dmc620_pmu *dmc620_pmu;
370 irqreturn_t ret = IRQ_NONE;
371
372 rcu_read_lock();
373 list_for_each_entry_rcu(dmc620_pmu, &irq->pmus_node, pmus_node) {
374 unsigned long status;
375 struct perf_event *event;
376 unsigned int idx;
377
378 /*
379 * HW doesn't provide a control to atomically disable all counters.
380 * To prevent race condition (overflow happens while clearing status register),
381 * disable all events before continuing
382 */
383 for (idx = 0; idx < DMC620_PMU_MAX_COUNTERS; idx++) {
384 event = dmc620_pmu->events[idx];
385 if (!event)
386 continue;
387 dmc620_pmu_disable_counter(event);
388 }
389
390 status = readl(dmc620_pmu->base + DMC620_PMU_OVERFLOW_STATUS_CLKDIV2);
391 status |= (readl(dmc620_pmu->base + DMC620_PMU_OVERFLOW_STATUS_CLK) <<
392 DMC620_PMU_CLKDIV2_MAX_COUNTERS);
393 if (status) {
394 for_each_set_bit(idx, &status,
395 DMC620_PMU_MAX_COUNTERS) {
396 event = dmc620_pmu->events[idx];
397 if (WARN_ON_ONCE(!event))
398 continue;
399 dmc620_pmu_event_update(event);
400 dmc620_pmu_event_set_period(event);
401 }
402
403 if (status & DMC620_PMU_OVERFLOW_STATUS_CLKDIV2_MASK)
404 writel(0, dmc620_pmu->base + DMC620_PMU_OVERFLOW_STATUS_CLKDIV2);
405
406 if ((status >> DMC620_PMU_CLKDIV2_MAX_COUNTERS) &
407 DMC620_PMU_OVERFLOW_STATUS_CLK_MASK)
408 writel(0, dmc620_pmu->base + DMC620_PMU_OVERFLOW_STATUS_CLK);
409 }
410
411 for (idx = 0; idx < DMC620_PMU_MAX_COUNTERS; idx++) {
412 event = dmc620_pmu->events[idx];
413 if (!event)
414 continue;
415 if (!(event->hw.state & PERF_HES_STOPPED))
416 dmc620_pmu_enable_counter(event);
417 }
418
419 ret = IRQ_HANDLED;
420 }
421 rcu_read_unlock();
422
423 return ret;
424 }
425
__dmc620_pmu_get_irq(int irq_num)426 static struct dmc620_pmu_irq *__dmc620_pmu_get_irq(int irq_num)
427 {
428 struct dmc620_pmu_irq *irq;
429 int ret;
430
431 list_for_each_entry(irq, &dmc620_pmu_irqs, irqs_node)
432 if (irq->irq_num == irq_num && refcount_inc_not_zero(&irq->refcount))
433 return irq;
434
435 irq = kzalloc(sizeof(*irq), GFP_KERNEL);
436 if (!irq)
437 return ERR_PTR(-ENOMEM);
438
439 INIT_LIST_HEAD(&irq->pmus_node);
440
441 /* Pick one CPU to be the preferred one to use */
442 irq->cpu = raw_smp_processor_id();
443 refcount_set(&irq->refcount, 1);
444
445 ret = request_irq(irq_num, dmc620_pmu_handle_irq,
446 IRQF_NOBALANCING | IRQF_NO_THREAD,
447 "dmc620-pmu", irq);
448 if (ret)
449 goto out_free_aff;
450
451 ret = irq_set_affinity(irq_num, cpumask_of(irq->cpu));
452 if (ret)
453 goto out_free_irq;
454
455 ret = cpuhp_state_add_instance_nocalls(cpuhp_state_num, &irq->node);
456 if (ret)
457 goto out_free_irq;
458
459 irq->irq_num = irq_num;
460 list_add(&irq->irqs_node, &dmc620_pmu_irqs);
461
462 return irq;
463
464 out_free_irq:
465 free_irq(irq_num, irq);
466 out_free_aff:
467 kfree(irq);
468 return ERR_PTR(ret);
469 }
470
dmc620_pmu_get_irq(struct dmc620_pmu * dmc620_pmu,int irq_num)471 static int dmc620_pmu_get_irq(struct dmc620_pmu *dmc620_pmu, int irq_num)
472 {
473 struct dmc620_pmu_irq *irq;
474
475 mutex_lock(&dmc620_pmu_irqs_lock);
476 irq = __dmc620_pmu_get_irq(irq_num);
477 mutex_unlock(&dmc620_pmu_irqs_lock);
478
479 if (IS_ERR(irq))
480 return PTR_ERR(irq);
481
482 dmc620_pmu->irq = irq;
483 mutex_lock(&dmc620_pmu_node_lock);
484 list_add_rcu(&dmc620_pmu->pmus_node, &irq->pmus_node);
485 mutex_unlock(&dmc620_pmu_node_lock);
486
487 return 0;
488 }
489
dmc620_pmu_put_irq(struct dmc620_pmu * dmc620_pmu)490 static void dmc620_pmu_put_irq(struct dmc620_pmu *dmc620_pmu)
491 {
492 struct dmc620_pmu_irq *irq = dmc620_pmu->irq;
493
494 mutex_lock(&dmc620_pmu_node_lock);
495 list_del_rcu(&dmc620_pmu->pmus_node);
496 mutex_unlock(&dmc620_pmu_node_lock);
497
498 mutex_lock(&dmc620_pmu_irqs_lock);
499 if (!refcount_dec_and_test(&irq->refcount)) {
500 mutex_unlock(&dmc620_pmu_irqs_lock);
501 return;
502 }
503
504 list_del(&irq->irqs_node);
505 mutex_unlock(&dmc620_pmu_irqs_lock);
506
507 free_irq(irq->irq_num, irq);
508 cpuhp_state_remove_instance_nocalls(cpuhp_state_num, &irq->node);
509 kfree(irq);
510 }
511
dmc620_pmu_event_init(struct perf_event * event)512 static int dmc620_pmu_event_init(struct perf_event *event)
513 {
514 struct dmc620_pmu *dmc620_pmu = to_dmc620_pmu(event->pmu);
515 struct hw_perf_event *hwc = &event->hw;
516 struct perf_event *sibling;
517
518 if (event->attr.type != event->pmu->type)
519 return -ENOENT;
520
521 /*
522 * DMC 620 PMUs are shared across all cpus and cannot
523 * support task bound and sampling events.
524 */
525 if (is_sampling_event(event) ||
526 event->attach_state & PERF_ATTACH_TASK) {
527 dev_dbg(dmc620_pmu->pmu.dev,
528 "Can't support per-task counters\n");
529 return -EOPNOTSUPP;
530 }
531
532 /*
533 * Many perf core operations (eg. events rotation) operate on a
534 * single CPU context. This is obvious for CPU PMUs, where one
535 * expects the same sets of events being observed on all CPUs,
536 * but can lead to issues for off-core PMUs, where each
537 * event could be theoretically assigned to a different CPU. To
538 * mitigate this, we enforce CPU assignment to one, selected
539 * processor.
540 */
541 event->cpu = dmc620_pmu->irq->cpu;
542 if (event->cpu < 0)
543 return -EINVAL;
544
545 hwc->idx = -1;
546
547 if (event->group_leader == event)
548 return 0;
549
550 /*
551 * We can't atomically disable all HW counters so only one event allowed,
552 * although software events are acceptable.
553 */
554 if (!is_software_event(event->group_leader))
555 return -EINVAL;
556
557 for_each_sibling_event(sibling, event->group_leader) {
558 if (sibling != event &&
559 !is_software_event(sibling))
560 return -EINVAL;
561 }
562
563 return 0;
564 }
565
dmc620_pmu_read(struct perf_event * event)566 static void dmc620_pmu_read(struct perf_event *event)
567 {
568 dmc620_pmu_event_update(event);
569 }
570
dmc620_pmu_start(struct perf_event * event,int flags)571 static void dmc620_pmu_start(struct perf_event *event, int flags)
572 {
573 event->hw.state = 0;
574 dmc620_pmu_event_set_period(event);
575 dmc620_pmu_enable_counter(event);
576 }
577
dmc620_pmu_stop(struct perf_event * event,int flags)578 static void dmc620_pmu_stop(struct perf_event *event, int flags)
579 {
580 if (event->hw.state & PERF_HES_STOPPED)
581 return;
582
583 dmc620_pmu_disable_counter(event);
584 dmc620_pmu_event_update(event);
585 event->hw.state |= PERF_HES_STOPPED | PERF_HES_UPTODATE;
586 }
587
dmc620_pmu_add(struct perf_event * event,int flags)588 static int dmc620_pmu_add(struct perf_event *event, int flags)
589 {
590 struct dmc620_pmu *dmc620_pmu = to_dmc620_pmu(event->pmu);
591 struct perf_event_attr *attr = &event->attr;
592 struct hw_perf_event *hwc = &event->hw;
593 int idx;
594 u64 reg;
595
596 idx = dmc620_get_event_idx(event);
597 if (idx < 0)
598 return idx;
599
600 hwc->idx = idx;
601 dmc620_pmu->events[idx] = event;
602 hwc->state = PERF_HES_STOPPED | PERF_HES_UPTODATE;
603
604 reg = ATTR_CFG_GET_FLD(attr, mask);
605 dmc620_pmu_creg_write(dmc620_pmu,
606 idx, DMC620_PMU_COUNTERn_MASK_31_00, lower_32_bits(reg));
607 dmc620_pmu_creg_write(dmc620_pmu,
608 idx, DMC620_PMU_COUNTERn_MASK_63_32, upper_32_bits(reg));
609
610 reg = ATTR_CFG_GET_FLD(attr, match);
611 dmc620_pmu_creg_write(dmc620_pmu,
612 idx, DMC620_PMU_COUNTERn_MATCH_31_00, lower_32_bits(reg));
613 dmc620_pmu_creg_write(dmc620_pmu,
614 idx, DMC620_PMU_COUNTERn_MATCH_63_32, upper_32_bits(reg));
615
616 if (flags & PERF_EF_START)
617 dmc620_pmu_start(event, PERF_EF_RELOAD);
618
619 perf_event_update_userpage(event);
620 return 0;
621 }
622
dmc620_pmu_del(struct perf_event * event,int flags)623 static void dmc620_pmu_del(struct perf_event *event, int flags)
624 {
625 struct dmc620_pmu *dmc620_pmu = to_dmc620_pmu(event->pmu);
626 struct hw_perf_event *hwc = &event->hw;
627 int idx = hwc->idx;
628
629 dmc620_pmu_stop(event, PERF_EF_UPDATE);
630 dmc620_pmu->events[idx] = NULL;
631 clear_bit(idx, dmc620_pmu->used_mask);
632 perf_event_update_userpage(event);
633 }
634
dmc620_pmu_cpu_teardown(unsigned int cpu,struct hlist_node * node)635 static int dmc620_pmu_cpu_teardown(unsigned int cpu,
636 struct hlist_node *node)
637 {
638 struct dmc620_pmu_irq *irq;
639 struct dmc620_pmu *dmc620_pmu;
640 unsigned int target;
641
642 irq = hlist_entry_safe(node, struct dmc620_pmu_irq, node);
643 if (cpu != irq->cpu)
644 return 0;
645
646 target = cpumask_any_but(cpu_online_mask, cpu);
647 if (target >= nr_cpu_ids)
648 return 0;
649
650 /* We're only reading, but this isn't the place to be involving RCU */
651 mutex_lock(&dmc620_pmu_node_lock);
652 list_for_each_entry(dmc620_pmu, &irq->pmus_node, pmus_node)
653 perf_pmu_migrate_context(&dmc620_pmu->pmu, irq->cpu, target);
654 mutex_unlock(&dmc620_pmu_node_lock);
655
656 WARN_ON(irq_set_affinity(irq->irq_num, cpumask_of(target)));
657 irq->cpu = target;
658
659 return 0;
660 }
661
dmc620_pmu_device_probe(struct platform_device * pdev)662 static int dmc620_pmu_device_probe(struct platform_device *pdev)
663 {
664 struct dmc620_pmu *dmc620_pmu;
665 struct resource *res;
666 char *name;
667 int irq_num;
668 int i, ret;
669
670 dmc620_pmu = devm_kzalloc(&pdev->dev,
671 sizeof(struct dmc620_pmu), GFP_KERNEL);
672 if (!dmc620_pmu)
673 return -ENOMEM;
674
675 platform_set_drvdata(pdev, dmc620_pmu);
676
677 dmc620_pmu->pmu = (struct pmu) {
678 .module = THIS_MODULE,
679 .capabilities = PERF_PMU_CAP_NO_EXCLUDE,
680 .task_ctx_nr = perf_invalid_context,
681 .event_init = dmc620_pmu_event_init,
682 .add = dmc620_pmu_add,
683 .del = dmc620_pmu_del,
684 .start = dmc620_pmu_start,
685 .stop = dmc620_pmu_stop,
686 .read = dmc620_pmu_read,
687 .attr_groups = dmc620_pmu_attr_groups,
688 };
689
690 dmc620_pmu->base = devm_platform_get_and_ioremap_resource(pdev, 0, &res);
691 if (IS_ERR(dmc620_pmu->base))
692 return PTR_ERR(dmc620_pmu->base);
693
694 /* Make sure device is reset before enabling interrupt */
695 for (i = 0; i < DMC620_PMU_MAX_COUNTERS; i++)
696 dmc620_pmu_creg_write(dmc620_pmu, i, DMC620_PMU_COUNTERn_CONTROL, 0);
697 writel(0, dmc620_pmu->base + DMC620_PMU_OVERFLOW_STATUS_CLKDIV2);
698 writel(0, dmc620_pmu->base + DMC620_PMU_OVERFLOW_STATUS_CLK);
699
700 irq_num = platform_get_irq(pdev, 0);
701 if (irq_num < 0)
702 return irq_num;
703
704 ret = dmc620_pmu_get_irq(dmc620_pmu, irq_num);
705 if (ret)
706 return ret;
707
708 name = devm_kasprintf(&pdev->dev, GFP_KERNEL,
709 "%s_%llx", DMC620_PMUNAME,
710 (u64)(res->start >> DMC620_PA_SHIFT));
711 if (!name) {
712 dev_err(&pdev->dev,
713 "Create name failed, PMU @%pa\n", &res->start);
714 ret = -ENOMEM;
715 goto out_teardown_dev;
716 }
717
718 ret = perf_pmu_register(&dmc620_pmu->pmu, name, -1);
719 if (ret)
720 goto out_teardown_dev;
721
722 return 0;
723
724 out_teardown_dev:
725 dmc620_pmu_put_irq(dmc620_pmu);
726 synchronize_rcu();
727 return ret;
728 }
729
dmc620_pmu_device_remove(struct platform_device * pdev)730 static int dmc620_pmu_device_remove(struct platform_device *pdev)
731 {
732 struct dmc620_pmu *dmc620_pmu = platform_get_drvdata(pdev);
733
734 dmc620_pmu_put_irq(dmc620_pmu);
735
736 /* perf will synchronise RCU before devres can free dmc620_pmu */
737 perf_pmu_unregister(&dmc620_pmu->pmu);
738
739 return 0;
740 }
741
742 static const struct acpi_device_id dmc620_acpi_match[] = {
743 { "ARMHD620", 0},
744 {},
745 };
746 MODULE_DEVICE_TABLE(acpi, dmc620_acpi_match);
747 static struct platform_driver dmc620_pmu_driver = {
748 .driver = {
749 .name = DMC620_DRVNAME,
750 .acpi_match_table = dmc620_acpi_match,
751 .suppress_bind_attrs = true,
752 },
753 .probe = dmc620_pmu_device_probe,
754 .remove = dmc620_pmu_device_remove,
755 };
756
dmc620_pmu_init(void)757 static int __init dmc620_pmu_init(void)
758 {
759 int ret;
760
761 cpuhp_state_num = cpuhp_setup_state_multi(CPUHP_AP_ONLINE_DYN,
762 DMC620_DRVNAME,
763 NULL,
764 dmc620_pmu_cpu_teardown);
765 if (cpuhp_state_num < 0)
766 return cpuhp_state_num;
767
768 ret = platform_driver_register(&dmc620_pmu_driver);
769 if (ret)
770 cpuhp_remove_multi_state(cpuhp_state_num);
771
772 return ret;
773 }
774
dmc620_pmu_exit(void)775 static void __exit dmc620_pmu_exit(void)
776 {
777 platform_driver_unregister(&dmc620_pmu_driver);
778 cpuhp_remove_multi_state(cpuhp_state_num);
779 }
780
781 module_init(dmc620_pmu_init);
782 module_exit(dmc620_pmu_exit);
783
784 MODULE_DESCRIPTION("Perf driver for the ARM DMC-620 memory controller");
785 MODULE_AUTHOR("Tuan Phan <tuanphan@os.amperecomputing.com");
786 MODULE_LICENSE("GPL v2");
787