xref: /openbmc/linux/arch/arc/kernel/perf_event.c (revision cbdf59ad)
1 // SPDX-License-Identifier: GPL-2.0+
2 //
3 // Linux performance counter support for ARC CPUs.
4 // This code is inspired by the perf support of various other architectures.
5 //
6 // Copyright (C) 2013-2018 Synopsys, Inc. (www.synopsys.com)
7 
8 #include <linux/errno.h>
9 #include <linux/interrupt.h>
10 #include <linux/module.h>
11 #include <linux/of.h>
12 #include <linux/perf_event.h>
13 #include <linux/platform_device.h>
14 #include <asm/arcregs.h>
15 #include <asm/stacktrace.h>
16 
17 /* HW holds 8 symbols + one for null terminator */
18 #define ARCPMU_EVENT_NAME_LEN	9
19 
20 enum arc_pmu_attr_groups {
21 	ARCPMU_ATTR_GR_EVENTS,
22 	ARCPMU_ATTR_GR_FORMATS,
23 	ARCPMU_NR_ATTR_GR
24 };
25 
26 struct arc_pmu_raw_event_entry {
27 	char name[ARCPMU_EVENT_NAME_LEN];
28 };
29 
30 struct arc_pmu {
31 	struct pmu	pmu;
32 	unsigned int	irq;
33 	int		n_counters;
34 	int		n_events;
35 	u64		max_period;
36 	int		ev_hw_idx[PERF_COUNT_ARC_HW_MAX];
37 
38 	struct arc_pmu_raw_event_entry	*raw_entry;
39 	struct attribute		**attrs;
40 	struct perf_pmu_events_attr	*attr;
41 	const struct attribute_group	*attr_groups[ARCPMU_NR_ATTR_GR + 1];
42 };
43 
44 struct arc_pmu_cpu {
45 	/*
46 	 * A 1 bit for an index indicates that the counter is being used for
47 	 * an event. A 0 means that the counter can be used.
48 	 */
49 	unsigned long	used_mask[BITS_TO_LONGS(ARC_PERF_MAX_COUNTERS)];
50 
51 	/*
52 	 * The events that are active on the PMU for the given index.
53 	 */
54 	struct perf_event *act_counter[ARC_PERF_MAX_COUNTERS];
55 };
56 
57 struct arc_callchain_trace {
58 	int depth;
59 	void *perf_stuff;
60 };
61 
62 static int callchain_trace(unsigned int addr, void *data)
63 {
64 	struct arc_callchain_trace *ctrl = data;
65 	struct perf_callchain_entry_ctx *entry = ctrl->perf_stuff;
66 
67 	perf_callchain_store(entry, addr);
68 
69 	if (ctrl->depth++ < 3)
70 		return 0;
71 
72 	return -1;
73 }
74 
75 void perf_callchain_kernel(struct perf_callchain_entry_ctx *entry,
76 			   struct pt_regs *regs)
77 {
78 	struct arc_callchain_trace ctrl = {
79 		.depth = 0,
80 		.perf_stuff = entry,
81 	};
82 
83 	arc_unwind_core(NULL, regs, callchain_trace, &ctrl);
84 }
85 
86 void perf_callchain_user(struct perf_callchain_entry_ctx *entry,
87 			 struct pt_regs *regs)
88 {
89 	/*
90 	 * User stack can't be unwound trivially with kernel dwarf unwinder
91 	 * So for now just record the user PC
92 	 */
93 	perf_callchain_store(entry, instruction_pointer(regs));
94 }
95 
96 static struct arc_pmu *arc_pmu;
97 static DEFINE_PER_CPU(struct arc_pmu_cpu, arc_pmu_cpu);
98 
99 /* read counter #idx; note that counter# != event# on ARC! */
100 static u64 arc_pmu_read_counter(int idx)
101 {
102 	u32 tmp;
103 	u64 result;
104 
105 	/*
106 	 * ARC supports making 'snapshots' of the counters, so we don't
107 	 * need to care about counters wrapping to 0 underneath our feet
108 	 */
109 	write_aux_reg(ARC_REG_PCT_INDEX, idx);
110 	tmp = read_aux_reg(ARC_REG_PCT_CONTROL);
111 	write_aux_reg(ARC_REG_PCT_CONTROL, tmp | ARC_REG_PCT_CONTROL_SN);
112 	result = (u64) (read_aux_reg(ARC_REG_PCT_SNAPH)) << 32;
113 	result |= read_aux_reg(ARC_REG_PCT_SNAPL);
114 
115 	return result;
116 }
117 
118 static void arc_perf_event_update(struct perf_event *event,
119 				  struct hw_perf_event *hwc, int idx)
120 {
121 	u64 prev_raw_count = local64_read(&hwc->prev_count);
122 	u64 new_raw_count = arc_pmu_read_counter(idx);
123 	s64 delta = new_raw_count - prev_raw_count;
124 
125 	/*
126 	 * We aren't afraid of hwc->prev_count changing beneath our feet
127 	 * because there's no way for us to re-enter this function anytime.
128 	 */
129 	local64_set(&hwc->prev_count, new_raw_count);
130 	local64_add(delta, &event->count);
131 	local64_sub(delta, &hwc->period_left);
132 }
133 
134 static void arc_pmu_read(struct perf_event *event)
135 {
136 	arc_perf_event_update(event, &event->hw, event->hw.idx);
137 }
138 
139 static int arc_pmu_cache_event(u64 config)
140 {
141 	unsigned int cache_type, cache_op, cache_result;
142 	int ret;
143 
144 	cache_type	= (config >>  0) & 0xff;
145 	cache_op	= (config >>  8) & 0xff;
146 	cache_result	= (config >> 16) & 0xff;
147 	if (cache_type >= PERF_COUNT_HW_CACHE_MAX)
148 		return -EINVAL;
149 	if (cache_op >= PERF_COUNT_HW_CACHE_OP_MAX)
150 		return -EINVAL;
151 	if (cache_result >= PERF_COUNT_HW_CACHE_RESULT_MAX)
152 		return -EINVAL;
153 
154 	ret = arc_pmu_cache_map[cache_type][cache_op][cache_result];
155 
156 	if (ret == CACHE_OP_UNSUPPORTED)
157 		return -ENOENT;
158 
159 	pr_debug("init cache event: type/op/result %d/%d/%d with h/w %d \'%s\'\n",
160 		 cache_type, cache_op, cache_result, ret,
161 		 arc_pmu_ev_hw_map[ret]);
162 
163 	return ret;
164 }
165 
166 /* initializes hw_perf_event structure if event is supported */
167 static int arc_pmu_event_init(struct perf_event *event)
168 {
169 	struct hw_perf_event *hwc = &event->hw;
170 	int ret;
171 
172 	if (!is_sampling_event(event)) {
173 		hwc->sample_period = arc_pmu->max_period;
174 		hwc->last_period = hwc->sample_period;
175 		local64_set(&hwc->period_left, hwc->sample_period);
176 	}
177 
178 	hwc->config = 0;
179 
180 	if (is_isa_arcv2()) {
181 		/* "exclude user" means "count only kernel" */
182 		if (event->attr.exclude_user)
183 			hwc->config |= ARC_REG_PCT_CONFIG_KERN;
184 
185 		/* "exclude kernel" means "count only user" */
186 		if (event->attr.exclude_kernel)
187 			hwc->config |= ARC_REG_PCT_CONFIG_USER;
188 	}
189 
190 	switch (event->attr.type) {
191 	case PERF_TYPE_HARDWARE:
192 		if (event->attr.config >= PERF_COUNT_HW_MAX)
193 			return -ENOENT;
194 		if (arc_pmu->ev_hw_idx[event->attr.config] < 0)
195 			return -ENOENT;
196 		hwc->config |= arc_pmu->ev_hw_idx[event->attr.config];
197 		pr_debug("init event %d with h/w %08x \'%s\'\n",
198 			 (int)event->attr.config, (int)hwc->config,
199 			 arc_pmu_ev_hw_map[event->attr.config]);
200 		return 0;
201 
202 	case PERF_TYPE_HW_CACHE:
203 		ret = arc_pmu_cache_event(event->attr.config);
204 		if (ret < 0)
205 			return ret;
206 		hwc->config |= arc_pmu->ev_hw_idx[ret];
207 		pr_debug("init cache event with h/w %08x \'%s\'\n",
208 			 (int)hwc->config, arc_pmu_ev_hw_map[ret]);
209 		return 0;
210 
211 	case PERF_TYPE_RAW:
212 		if (event->attr.config >= arc_pmu->n_events)
213 			return -ENOENT;
214 
215 		hwc->config |= event->attr.config;
216 		pr_debug("init raw event with idx %lld \'%s\'\n",
217 			 event->attr.config,
218 			 arc_pmu->raw_entry[event->attr.config].name);
219 
220 		return 0;
221 
222 	default:
223 		return -ENOENT;
224 	}
225 }
226 
227 /* starts all counters */
228 static void arc_pmu_enable(struct pmu *pmu)
229 {
230 	u32 tmp;
231 	tmp = read_aux_reg(ARC_REG_PCT_CONTROL);
232 	write_aux_reg(ARC_REG_PCT_CONTROL, (tmp & 0xffff0000) | 0x1);
233 }
234 
235 /* stops all counters */
236 static void arc_pmu_disable(struct pmu *pmu)
237 {
238 	u32 tmp;
239 	tmp = read_aux_reg(ARC_REG_PCT_CONTROL);
240 	write_aux_reg(ARC_REG_PCT_CONTROL, (tmp & 0xffff0000) | 0x0);
241 }
242 
243 static int arc_pmu_event_set_period(struct perf_event *event)
244 {
245 	struct hw_perf_event *hwc = &event->hw;
246 	s64 left = local64_read(&hwc->period_left);
247 	s64 period = hwc->sample_period;
248 	int idx = hwc->idx;
249 	int overflow = 0;
250 	u64 value;
251 
252 	if (unlikely(left <= -period)) {
253 		/* left underflowed by more than period. */
254 		left = period;
255 		local64_set(&hwc->period_left, left);
256 		hwc->last_period = period;
257 		overflow = 1;
258 	} else if (unlikely(left <= 0)) {
259 		/* left underflowed by less than period. */
260 		left += period;
261 		local64_set(&hwc->period_left, left);
262 		hwc->last_period = period;
263 		overflow = 1;
264 	}
265 
266 	if (left > arc_pmu->max_period)
267 		left = arc_pmu->max_period;
268 
269 	value = arc_pmu->max_period - left;
270 	local64_set(&hwc->prev_count, value);
271 
272 	/* Select counter */
273 	write_aux_reg(ARC_REG_PCT_INDEX, idx);
274 
275 	/* Write value */
276 	write_aux_reg(ARC_REG_PCT_COUNTL, lower_32_bits(value));
277 	write_aux_reg(ARC_REG_PCT_COUNTH, upper_32_bits(value));
278 
279 	perf_event_update_userpage(event);
280 
281 	return overflow;
282 }
283 
284 /*
285  * Assigns hardware counter to hardware condition.
286  * Note that there is no separate start/stop mechanism;
287  * stopping is achieved by assigning the 'never' condition
288  */
289 static void arc_pmu_start(struct perf_event *event, int flags)
290 {
291 	struct hw_perf_event *hwc = &event->hw;
292 	int idx = hwc->idx;
293 
294 	if (WARN_ON_ONCE(idx == -1))
295 		return;
296 
297 	if (flags & PERF_EF_RELOAD)
298 		WARN_ON_ONCE(!(hwc->state & PERF_HES_UPTODATE));
299 
300 	hwc->state = 0;
301 
302 	arc_pmu_event_set_period(event);
303 
304 	/* Enable interrupt for this counter */
305 	if (is_sampling_event(event))
306 		write_aux_reg(ARC_REG_PCT_INT_CTRL,
307 			      read_aux_reg(ARC_REG_PCT_INT_CTRL) | BIT(idx));
308 
309 	/* enable ARC pmu here */
310 	write_aux_reg(ARC_REG_PCT_INDEX, idx);		/* counter # */
311 	write_aux_reg(ARC_REG_PCT_CONFIG, hwc->config);	/* condition */
312 }
313 
314 static void arc_pmu_stop(struct perf_event *event, int flags)
315 {
316 	struct hw_perf_event *hwc = &event->hw;
317 	int idx = hwc->idx;
318 
319 	/* Disable interrupt for this counter */
320 	if (is_sampling_event(event)) {
321 		/*
322 		 * Reset interrupt flag by writing of 1. This is required
323 		 * to make sure pending interrupt was not left.
324 		 */
325 		write_aux_reg(ARC_REG_PCT_INT_ACT, BIT(idx));
326 		write_aux_reg(ARC_REG_PCT_INT_CTRL,
327 			      read_aux_reg(ARC_REG_PCT_INT_CTRL) & ~BIT(idx));
328 	}
329 
330 	if (!(event->hw.state & PERF_HES_STOPPED)) {
331 		/* stop ARC pmu here */
332 		write_aux_reg(ARC_REG_PCT_INDEX, idx);
333 
334 		/* condition code #0 is always "never" */
335 		write_aux_reg(ARC_REG_PCT_CONFIG, 0);
336 
337 		event->hw.state |= PERF_HES_STOPPED;
338 	}
339 
340 	if ((flags & PERF_EF_UPDATE) &&
341 	    !(event->hw.state & PERF_HES_UPTODATE)) {
342 		arc_perf_event_update(event, &event->hw, idx);
343 		event->hw.state |= PERF_HES_UPTODATE;
344 	}
345 }
346 
347 static void arc_pmu_del(struct perf_event *event, int flags)
348 {
349 	struct arc_pmu_cpu *pmu_cpu = this_cpu_ptr(&arc_pmu_cpu);
350 
351 	arc_pmu_stop(event, PERF_EF_UPDATE);
352 	__clear_bit(event->hw.idx, pmu_cpu->used_mask);
353 
354 	pmu_cpu->act_counter[event->hw.idx] = 0;
355 
356 	perf_event_update_userpage(event);
357 }
358 
359 /* allocate hardware counter and optionally start counting */
360 static int arc_pmu_add(struct perf_event *event, int flags)
361 {
362 	struct arc_pmu_cpu *pmu_cpu = this_cpu_ptr(&arc_pmu_cpu);
363 	struct hw_perf_event *hwc = &event->hw;
364 	int idx = hwc->idx;
365 
366 	idx = ffz(pmu_cpu->used_mask[0]);
367 	if (idx == arc_pmu->n_counters)
368 		return -EAGAIN;
369 
370 	__set_bit(idx, pmu_cpu->used_mask);
371 	hwc->idx = idx;
372 
373 	write_aux_reg(ARC_REG_PCT_INDEX, idx);
374 
375 	pmu_cpu->act_counter[idx] = event;
376 
377 	if (is_sampling_event(event)) {
378 		/* Mimic full counter overflow as other arches do */
379 		write_aux_reg(ARC_REG_PCT_INT_CNTL,
380 			      lower_32_bits(arc_pmu->max_period));
381 		write_aux_reg(ARC_REG_PCT_INT_CNTH,
382 			      upper_32_bits(arc_pmu->max_period));
383 	}
384 
385 	write_aux_reg(ARC_REG_PCT_CONFIG, 0);
386 	write_aux_reg(ARC_REG_PCT_COUNTL, 0);
387 	write_aux_reg(ARC_REG_PCT_COUNTH, 0);
388 	local64_set(&hwc->prev_count, 0);
389 
390 	hwc->state = PERF_HES_UPTODATE | PERF_HES_STOPPED;
391 	if (flags & PERF_EF_START)
392 		arc_pmu_start(event, PERF_EF_RELOAD);
393 
394 	perf_event_update_userpage(event);
395 
396 	return 0;
397 }
398 
399 #ifdef CONFIG_ISA_ARCV2
400 static irqreturn_t arc_pmu_intr(int irq, void *dev)
401 {
402 	struct perf_sample_data data;
403 	struct arc_pmu_cpu *pmu_cpu = this_cpu_ptr(&arc_pmu_cpu);
404 	struct pt_regs *regs;
405 	unsigned int active_ints;
406 	int idx;
407 
408 	arc_pmu_disable(&arc_pmu->pmu);
409 
410 	active_ints = read_aux_reg(ARC_REG_PCT_INT_ACT);
411 	if (!active_ints)
412 		goto done;
413 
414 	regs = get_irq_regs();
415 
416 	do {
417 		struct perf_event *event;
418 		struct hw_perf_event *hwc;
419 
420 		idx = __ffs(active_ints);
421 
422 		/* Reset interrupt flag by writing of 1 */
423 		write_aux_reg(ARC_REG_PCT_INT_ACT, BIT(idx));
424 
425 		/*
426 		 * On reset of "interrupt active" bit corresponding
427 		 * "interrupt enable" bit gets automatically reset as well.
428 		 * Now we need to re-enable interrupt for the counter.
429 		 */
430 		write_aux_reg(ARC_REG_PCT_INT_CTRL,
431 			read_aux_reg(ARC_REG_PCT_INT_CTRL) | BIT(idx));
432 
433 		event = pmu_cpu->act_counter[idx];
434 		hwc = &event->hw;
435 
436 		WARN_ON_ONCE(hwc->idx != idx);
437 
438 		arc_perf_event_update(event, &event->hw, event->hw.idx);
439 		perf_sample_data_init(&data, 0, hwc->last_period);
440 		if (arc_pmu_event_set_period(event)) {
441 			if (perf_event_overflow(event, &data, regs))
442 				arc_pmu_stop(event, 0);
443 		}
444 
445 		active_ints &= ~BIT(idx);
446 	} while (active_ints);
447 
448 done:
449 	arc_pmu_enable(&arc_pmu->pmu);
450 
451 	return IRQ_HANDLED;
452 }
453 #else
454 
455 static irqreturn_t arc_pmu_intr(int irq, void *dev)
456 {
457 	return IRQ_NONE;
458 }
459 
460 #endif /* CONFIG_ISA_ARCV2 */
461 
462 static void arc_cpu_pmu_irq_init(void *data)
463 {
464 	int irq = *(int *)data;
465 
466 	enable_percpu_irq(irq, IRQ_TYPE_NONE);
467 
468 	/* Clear all pending interrupt flags */
469 	write_aux_reg(ARC_REG_PCT_INT_ACT, 0xffffffff);
470 }
471 
472 /* Event field occupies the bottom 15 bits of our config field */
473 PMU_FORMAT_ATTR(event, "config:0-14");
474 static struct attribute *arc_pmu_format_attrs[] = {
475 	&format_attr_event.attr,
476 	NULL,
477 };
478 
479 static struct attribute_group arc_pmu_format_attr_gr = {
480 	.name = "format",
481 	.attrs = arc_pmu_format_attrs,
482 };
483 
484 static ssize_t arc_pmu_events_sysfs_show(struct device *dev,
485 					 struct device_attribute *attr,
486 					 char *page)
487 {
488 	struct perf_pmu_events_attr *pmu_attr;
489 
490 	pmu_attr = container_of(attr, struct perf_pmu_events_attr, attr);
491 	return sprintf(page, "event=0x%04llx\n", pmu_attr->id);
492 }
493 
494 /*
495  * We don't add attrs here as we don't have pre-defined list of perf events.
496  * We will generate and add attrs dynamically in probe() after we read HW
497  * configuration.
498  */
499 static struct attribute_group arc_pmu_events_attr_gr = {
500 	.name = "events",
501 };
502 
503 static void arc_pmu_add_raw_event_attr(int j, char *str)
504 {
505 	memmove(arc_pmu->raw_entry[j].name, str, ARCPMU_EVENT_NAME_LEN - 1);
506 	arc_pmu->attr[j].attr.attr.name = arc_pmu->raw_entry[j].name;
507 	arc_pmu->attr[j].attr.attr.mode = VERIFY_OCTAL_PERMISSIONS(0444);
508 	arc_pmu->attr[j].attr.show = arc_pmu_events_sysfs_show;
509 	arc_pmu->attr[j].id = j;
510 	arc_pmu->attrs[j] = &(arc_pmu->attr[j].attr.attr);
511 }
512 
513 static int arc_pmu_raw_alloc(struct device *dev)
514 {
515 	arc_pmu->attr = devm_kmalloc_array(dev, arc_pmu->n_events + 1,
516 		sizeof(*arc_pmu->attr), GFP_KERNEL | __GFP_ZERO);
517 	if (!arc_pmu->attr)
518 		return -ENOMEM;
519 
520 	arc_pmu->attrs = devm_kmalloc_array(dev, arc_pmu->n_events + 1,
521 		sizeof(*arc_pmu->attrs), GFP_KERNEL | __GFP_ZERO);
522 	if (!arc_pmu->attrs)
523 		return -ENOMEM;
524 
525 	arc_pmu->raw_entry = devm_kmalloc_array(dev, arc_pmu->n_events,
526 		sizeof(*arc_pmu->raw_entry), GFP_KERNEL | __GFP_ZERO);
527 	if (!arc_pmu->raw_entry)
528 		return -ENOMEM;
529 
530 	return 0;
531 }
532 
533 static inline bool event_in_hw_event_map(int i, char *name)
534 {
535 	if (!arc_pmu_ev_hw_map[i])
536 		return false;
537 
538 	if (!strlen(arc_pmu_ev_hw_map[i]))
539 		return false;
540 
541 	if (strcmp(arc_pmu_ev_hw_map[i], name))
542 		return false;
543 
544 	return true;
545 }
546 
547 static void arc_pmu_map_hw_event(int j, char *str)
548 {
549 	int i;
550 
551 	/* See if HW condition has been mapped to a perf event_id */
552 	for (i = 0; i < ARRAY_SIZE(arc_pmu_ev_hw_map); i++) {
553 		if (event_in_hw_event_map(i, str)) {
554 			pr_debug("mapping perf event %2d to h/w event \'%8s\' (idx %d)\n",
555 				 i, str, j);
556 			arc_pmu->ev_hw_idx[i] = j;
557 		}
558 	}
559 }
560 
561 static int arc_pmu_device_probe(struct platform_device *pdev)
562 {
563 	struct arc_reg_pct_build pct_bcr;
564 	struct arc_reg_cc_build cc_bcr;
565 	int i, has_interrupts;
566 	int counter_size;	/* in bits */
567 
568 	union cc_name {
569 		struct {
570 			u32 word0, word1;
571 			char sentinel;
572 		} indiv;
573 		char str[ARCPMU_EVENT_NAME_LEN];
574 	} cc_name;
575 
576 
577 	READ_BCR(ARC_REG_PCT_BUILD, pct_bcr);
578 	if (!pct_bcr.v) {
579 		pr_err("This core does not have performance counters!\n");
580 		return -ENODEV;
581 	}
582 	BUILD_BUG_ON(ARC_PERF_MAX_COUNTERS > 32);
583 	if (WARN_ON(pct_bcr.c > ARC_PERF_MAX_COUNTERS))
584 		return -EINVAL;
585 
586 	READ_BCR(ARC_REG_CC_BUILD, cc_bcr);
587 	if (WARN(!cc_bcr.v, "Counters exist but No countable conditions?"))
588 		return -EINVAL;
589 
590 	arc_pmu = devm_kzalloc(&pdev->dev, sizeof(struct arc_pmu), GFP_KERNEL);
591 	if (!arc_pmu)
592 		return -ENOMEM;
593 
594 	arc_pmu->n_events = cc_bcr.c;
595 
596 	if (arc_pmu_raw_alloc(&pdev->dev))
597 		return -ENOMEM;
598 
599 	has_interrupts = is_isa_arcv2() ? pct_bcr.i : 0;
600 
601 	arc_pmu->n_counters = pct_bcr.c;
602 	counter_size = 32 + (pct_bcr.s << 4);
603 
604 	arc_pmu->max_period = (1ULL << counter_size) / 2 - 1ULL;
605 
606 	pr_info("ARC perf\t: %d counters (%d bits), %d conditions%s\n",
607 		arc_pmu->n_counters, counter_size, cc_bcr.c,
608 		has_interrupts ? ", [overflow IRQ support]" : "");
609 
610 	cc_name.str[ARCPMU_EVENT_NAME_LEN - 1] = 0;
611 	for (i = 0; i < PERF_COUNT_ARC_HW_MAX; i++)
612 		arc_pmu->ev_hw_idx[i] = -1;
613 
614 	/* loop thru all available h/w condition indexes */
615 	for (i = 0; i < cc_bcr.c; i++) {
616 		write_aux_reg(ARC_REG_CC_INDEX, i);
617 		cc_name.indiv.word0 = read_aux_reg(ARC_REG_CC_NAME0);
618 		cc_name.indiv.word1 = read_aux_reg(ARC_REG_CC_NAME1);
619 
620 		arc_pmu_map_hw_event(i, cc_name.str);
621 		arc_pmu_add_raw_event_attr(i, cc_name.str);
622 	}
623 
624 	arc_pmu_events_attr_gr.attrs = arc_pmu->attrs;
625 	arc_pmu->attr_groups[ARCPMU_ATTR_GR_EVENTS] = &arc_pmu_events_attr_gr;
626 	arc_pmu->attr_groups[ARCPMU_ATTR_GR_FORMATS] = &arc_pmu_format_attr_gr;
627 
628 	arc_pmu->pmu = (struct pmu) {
629 		.pmu_enable	= arc_pmu_enable,
630 		.pmu_disable	= arc_pmu_disable,
631 		.event_init	= arc_pmu_event_init,
632 		.add		= arc_pmu_add,
633 		.del		= arc_pmu_del,
634 		.start		= arc_pmu_start,
635 		.stop		= arc_pmu_stop,
636 		.read		= arc_pmu_read,
637 		.attr_groups	= arc_pmu->attr_groups,
638 	};
639 
640 	if (has_interrupts) {
641 		int irq = platform_get_irq(pdev, 0);
642 
643 		if (irq < 0) {
644 			pr_err("Cannot get IRQ number for the platform\n");
645 			return -ENODEV;
646 		}
647 
648 		arc_pmu->irq = irq;
649 
650 		/* intc map function ensures irq_set_percpu_devid() called */
651 		request_percpu_irq(irq, arc_pmu_intr, "ARC perf counters",
652 				   this_cpu_ptr(&arc_pmu_cpu));
653 
654 		on_each_cpu(arc_cpu_pmu_irq_init, &irq, 1);
655 
656 	} else
657 		arc_pmu->pmu.capabilities |= PERF_PMU_CAP_NO_INTERRUPT;
658 
659 	/*
660 	 * perf parser doesn't really like '-' symbol in events name, so let's
661 	 * use '_' in arc pct name as it goes to kernel PMU event prefix.
662 	 */
663 	return perf_pmu_register(&arc_pmu->pmu, "arc_pct", PERF_TYPE_RAW);
664 }
665 
666 static const struct of_device_id arc_pmu_match[] = {
667 	{ .compatible = "snps,arc700-pct" },
668 	{ .compatible = "snps,archs-pct" },
669 	{},
670 };
671 MODULE_DEVICE_TABLE(of, arc_pmu_match);
672 
673 static struct platform_driver arc_pmu_driver = {
674 	.driver	= {
675 		.name		= "arc-pct",
676 		.of_match_table = of_match_ptr(arc_pmu_match),
677 	},
678 	.probe		= arc_pmu_device_probe,
679 };
680 
681 module_platform_driver(arc_pmu_driver);
682 
683 MODULE_LICENSE("GPL");
684 MODULE_AUTHOR("Mischa Jonker <mjonker@synopsys.com>");
685 MODULE_DESCRIPTION("ARC PMU driver");
686