xref: /openbmc/linux/arch/arc/kernel/perf_event.c (revision cfbb9be8)
1 /*
2  * Linux performance counter support for ARC700 series
3  *
4  * Copyright (C) 2013-2015 Synopsys, Inc. (www.synopsys.com)
5  *
6  * This code is inspired by the perf support of various other architectures.
7  *
8  * This program is free software; you can redistribute it and/or modify
9  * it under the terms of the GNU General Public License version 2 as
10  * published by the Free Software Foundation.
11  *
12  */
13 #include <linux/errno.h>
14 #include <linux/interrupt.h>
15 #include <linux/module.h>
16 #include <linux/of.h>
17 #include <linux/perf_event.h>
18 #include <linux/platform_device.h>
19 #include <asm/arcregs.h>
20 #include <asm/stacktrace.h>
21 
22 struct arc_pmu {
23 	struct pmu	pmu;
24 	unsigned int	irq;
25 	int		n_counters;
26 	u64		max_period;
27 	int		ev_hw_idx[PERF_COUNT_ARC_HW_MAX];
28 };
29 
30 struct arc_pmu_cpu {
31 	/*
32 	 * A 1 bit for an index indicates that the counter is being used for
33 	 * an event. A 0 means that the counter can be used.
34 	 */
35 	unsigned long	used_mask[BITS_TO_LONGS(ARC_PERF_MAX_COUNTERS)];
36 
37 	/*
38 	 * The events that are active on the PMU for the given index.
39 	 */
40 	struct perf_event *act_counter[ARC_PERF_MAX_COUNTERS];
41 };
42 
43 struct arc_callchain_trace {
44 	int depth;
45 	void *perf_stuff;
46 };
47 
48 static int callchain_trace(unsigned int addr, void *data)
49 {
50 	struct arc_callchain_trace *ctrl = data;
51 	struct perf_callchain_entry_ctx *entry = ctrl->perf_stuff;
52 	perf_callchain_store(entry, addr);
53 
54 	if (ctrl->depth++ < 3)
55 		return 0;
56 
57 	return -1;
58 }
59 
60 void
61 perf_callchain_kernel(struct perf_callchain_entry_ctx *entry, struct pt_regs *regs)
62 {
63 	struct arc_callchain_trace ctrl = {
64 		.depth = 0,
65 		.perf_stuff = entry,
66 	};
67 
68 	arc_unwind_core(NULL, regs, callchain_trace, &ctrl);
69 }
70 
71 void
72 perf_callchain_user(struct perf_callchain_entry_ctx *entry, struct pt_regs *regs)
73 {
74 	/*
75 	 * User stack can't be unwound trivially with kernel dwarf unwinder
76 	 * So for now just record the user PC
77 	 */
78 	perf_callchain_store(entry, instruction_pointer(regs));
79 }
80 
81 static struct arc_pmu *arc_pmu;
82 static DEFINE_PER_CPU(struct arc_pmu_cpu, arc_pmu_cpu);
83 
84 /* read counter #idx; note that counter# != event# on ARC! */
85 static uint64_t arc_pmu_read_counter(int idx)
86 {
87 	uint32_t tmp;
88 	uint64_t result;
89 
90 	/*
91 	 * ARC supports making 'snapshots' of the counters, so we don't
92 	 * need to care about counters wrapping to 0 underneath our feet
93 	 */
94 	write_aux_reg(ARC_REG_PCT_INDEX, idx);
95 	tmp = read_aux_reg(ARC_REG_PCT_CONTROL);
96 	write_aux_reg(ARC_REG_PCT_CONTROL, tmp | ARC_REG_PCT_CONTROL_SN);
97 	result = (uint64_t) (read_aux_reg(ARC_REG_PCT_SNAPH)) << 32;
98 	result |= read_aux_reg(ARC_REG_PCT_SNAPL);
99 
100 	return result;
101 }
102 
103 static void arc_perf_event_update(struct perf_event *event,
104 				  struct hw_perf_event *hwc, int idx)
105 {
106 	uint64_t prev_raw_count = local64_read(&hwc->prev_count);
107 	uint64_t new_raw_count = arc_pmu_read_counter(idx);
108 	int64_t delta = new_raw_count - prev_raw_count;
109 
110 	/*
111 	 * We aren't afraid of hwc->prev_count changing beneath our feet
112 	 * because there's no way for us to re-enter this function anytime.
113 	 */
114 	local64_set(&hwc->prev_count, new_raw_count);
115 	local64_add(delta, &event->count);
116 	local64_sub(delta, &hwc->period_left);
117 }
118 
119 static void arc_pmu_read(struct perf_event *event)
120 {
121 	arc_perf_event_update(event, &event->hw, event->hw.idx);
122 }
123 
124 static int arc_pmu_cache_event(u64 config)
125 {
126 	unsigned int cache_type, cache_op, cache_result;
127 	int ret;
128 
129 	cache_type	= (config >>  0) & 0xff;
130 	cache_op	= (config >>  8) & 0xff;
131 	cache_result	= (config >> 16) & 0xff;
132 	if (cache_type >= PERF_COUNT_HW_CACHE_MAX)
133 		return -EINVAL;
134 	if (cache_op >= PERF_COUNT_HW_CACHE_OP_MAX)
135 		return -EINVAL;
136 	if (cache_result >= PERF_COUNT_HW_CACHE_RESULT_MAX)
137 		return -EINVAL;
138 
139 	ret = arc_pmu_cache_map[cache_type][cache_op][cache_result];
140 
141 	if (ret == CACHE_OP_UNSUPPORTED)
142 		return -ENOENT;
143 
144 	pr_debug("init cache event: type/op/result %d/%d/%d with h/w %d \'%s\'\n",
145 		 cache_type, cache_op, cache_result, ret,
146 		 arc_pmu_ev_hw_map[ret]);
147 
148 	return ret;
149 }
150 
151 /* initializes hw_perf_event structure if event is supported */
152 static int arc_pmu_event_init(struct perf_event *event)
153 {
154 	struct hw_perf_event *hwc = &event->hw;
155 	int ret;
156 
157 	if (!is_sampling_event(event)) {
158 		hwc->sample_period  = arc_pmu->max_period;
159 		hwc->last_period = hwc->sample_period;
160 		local64_set(&hwc->period_left, hwc->sample_period);
161 	}
162 
163 	hwc->config = 0;
164 
165 	if (is_isa_arcv2()) {
166 		/* "exclude user" means "count only kernel" */
167 		if (event->attr.exclude_user)
168 			hwc->config |= ARC_REG_PCT_CONFIG_KERN;
169 
170 		/* "exclude kernel" means "count only user" */
171 		if (event->attr.exclude_kernel)
172 			hwc->config |= ARC_REG_PCT_CONFIG_USER;
173 	}
174 
175 	switch (event->attr.type) {
176 	case PERF_TYPE_HARDWARE:
177 		if (event->attr.config >= PERF_COUNT_HW_MAX)
178 			return -ENOENT;
179 		if (arc_pmu->ev_hw_idx[event->attr.config] < 0)
180 			return -ENOENT;
181 		hwc->config |= arc_pmu->ev_hw_idx[event->attr.config];
182 		pr_debug("init event %d with h/w %08x \'%s\'\n",
183 			 (int)event->attr.config, (int)hwc->config,
184 			 arc_pmu_ev_hw_map[event->attr.config]);
185 		return 0;
186 
187 	case PERF_TYPE_HW_CACHE:
188 		ret = arc_pmu_cache_event(event->attr.config);
189 		if (ret < 0)
190 			return ret;
191 		hwc->config |= arc_pmu->ev_hw_idx[ret];
192 		pr_debug("init cache event with h/w %08x \'%s\'\n",
193 			 (int)hwc->config, arc_pmu_ev_hw_map[ret]);
194 		return 0;
195 	default:
196 		return -ENOENT;
197 	}
198 }
199 
200 /* starts all counters */
201 static void arc_pmu_enable(struct pmu *pmu)
202 {
203 	uint32_t tmp;
204 	tmp = read_aux_reg(ARC_REG_PCT_CONTROL);
205 	write_aux_reg(ARC_REG_PCT_CONTROL, (tmp & 0xffff0000) | 0x1);
206 }
207 
208 /* stops all counters */
209 static void arc_pmu_disable(struct pmu *pmu)
210 {
211 	uint32_t tmp;
212 	tmp = read_aux_reg(ARC_REG_PCT_CONTROL);
213 	write_aux_reg(ARC_REG_PCT_CONTROL, (tmp & 0xffff0000) | 0x0);
214 }
215 
216 static int arc_pmu_event_set_period(struct perf_event *event)
217 {
218 	struct hw_perf_event *hwc = &event->hw;
219 	s64 left = local64_read(&hwc->period_left);
220 	s64 period = hwc->sample_period;
221 	int idx = hwc->idx;
222 	int overflow = 0;
223 	u64 value;
224 
225 	if (unlikely(left <= -period)) {
226 		/* left underflowed by more than period. */
227 		left = period;
228 		local64_set(&hwc->period_left, left);
229 		hwc->last_period = period;
230 		overflow = 1;
231 	} else	if (unlikely(left <= 0)) {
232 		/* left underflowed by less than period. */
233 		left += period;
234 		local64_set(&hwc->period_left, left);
235 		hwc->last_period = period;
236 		overflow = 1;
237 	}
238 
239 	if (left > arc_pmu->max_period)
240 		left = arc_pmu->max_period;
241 
242 	value = arc_pmu->max_period - left;
243 	local64_set(&hwc->prev_count, value);
244 
245 	/* Select counter */
246 	write_aux_reg(ARC_REG_PCT_INDEX, idx);
247 
248 	/* Write value */
249 	write_aux_reg(ARC_REG_PCT_COUNTL, (u32)value);
250 	write_aux_reg(ARC_REG_PCT_COUNTH, (value >> 32));
251 
252 	perf_event_update_userpage(event);
253 
254 	return overflow;
255 }
256 
257 /*
258  * Assigns hardware counter to hardware condition.
259  * Note that there is no separate start/stop mechanism;
260  * stopping is achieved by assigning the 'never' condition
261  */
262 static void arc_pmu_start(struct perf_event *event, int flags)
263 {
264 	struct hw_perf_event *hwc = &event->hw;
265 	int idx = hwc->idx;
266 
267 	if (WARN_ON_ONCE(idx == -1))
268 		return;
269 
270 	if (flags & PERF_EF_RELOAD)
271 		WARN_ON_ONCE(!(hwc->state & PERF_HES_UPTODATE));
272 
273 	hwc->state = 0;
274 
275 	arc_pmu_event_set_period(event);
276 
277 	/* Enable interrupt for this counter */
278 	if (is_sampling_event(event))
279 		write_aux_reg(ARC_REG_PCT_INT_CTRL,
280 			      read_aux_reg(ARC_REG_PCT_INT_CTRL) | (1 << idx));
281 
282 	/* enable ARC pmu here */
283 	write_aux_reg(ARC_REG_PCT_INDEX, idx);		/* counter # */
284 	write_aux_reg(ARC_REG_PCT_CONFIG, hwc->config);	/* condition */
285 }
286 
287 static void arc_pmu_stop(struct perf_event *event, int flags)
288 {
289 	struct hw_perf_event *hwc = &event->hw;
290 	int idx = hwc->idx;
291 
292 	/* Disable interrupt for this counter */
293 	if (is_sampling_event(event)) {
294 		/*
295 		 * Reset interrupt flag by writing of 1. This is required
296 		 * to make sure pending interrupt was not left.
297 		 */
298 		write_aux_reg(ARC_REG_PCT_INT_ACT, 1 << idx);
299 		write_aux_reg(ARC_REG_PCT_INT_CTRL,
300 			      read_aux_reg(ARC_REG_PCT_INT_CTRL) & ~(1 << idx));
301 	}
302 
303 	if (!(event->hw.state & PERF_HES_STOPPED)) {
304 		/* stop ARC pmu here */
305 		write_aux_reg(ARC_REG_PCT_INDEX, idx);
306 
307 		/* condition code #0 is always "never" */
308 		write_aux_reg(ARC_REG_PCT_CONFIG, 0);
309 
310 		event->hw.state |= PERF_HES_STOPPED;
311 	}
312 
313 	if ((flags & PERF_EF_UPDATE) &&
314 	    !(event->hw.state & PERF_HES_UPTODATE)) {
315 		arc_perf_event_update(event, &event->hw, idx);
316 		event->hw.state |= PERF_HES_UPTODATE;
317 	}
318 }
319 
320 static void arc_pmu_del(struct perf_event *event, int flags)
321 {
322 	struct arc_pmu_cpu *pmu_cpu = this_cpu_ptr(&arc_pmu_cpu);
323 
324 	arc_pmu_stop(event, PERF_EF_UPDATE);
325 	__clear_bit(event->hw.idx, pmu_cpu->used_mask);
326 
327 	pmu_cpu->act_counter[event->hw.idx] = 0;
328 
329 	perf_event_update_userpage(event);
330 }
331 
332 /* allocate hardware counter and optionally start counting */
333 static int arc_pmu_add(struct perf_event *event, int flags)
334 {
335 	struct arc_pmu_cpu *pmu_cpu = this_cpu_ptr(&arc_pmu_cpu);
336 	struct hw_perf_event *hwc = &event->hw;
337 	int idx = hwc->idx;
338 
339 	idx = ffz(pmu_cpu->used_mask[0]);
340 	if (idx == arc_pmu->n_counters)
341 		return -EAGAIN;
342 
343 	__set_bit(idx, pmu_cpu->used_mask);
344 	hwc->idx = idx;
345 
346 	write_aux_reg(ARC_REG_PCT_INDEX, idx);
347 
348 	pmu_cpu->act_counter[idx] = event;
349 
350 	if (is_sampling_event(event)) {
351 		/* Mimic full counter overflow as other arches do */
352 		write_aux_reg(ARC_REG_PCT_INT_CNTL, (u32)arc_pmu->max_period);
353 		write_aux_reg(ARC_REG_PCT_INT_CNTH,
354 			      (arc_pmu->max_period >> 32));
355 	}
356 
357 	write_aux_reg(ARC_REG_PCT_CONFIG, 0);
358 	write_aux_reg(ARC_REG_PCT_COUNTL, 0);
359 	write_aux_reg(ARC_REG_PCT_COUNTH, 0);
360 	local64_set(&hwc->prev_count, 0);
361 
362 	hwc->state = PERF_HES_UPTODATE | PERF_HES_STOPPED;
363 	if (flags & PERF_EF_START)
364 		arc_pmu_start(event, PERF_EF_RELOAD);
365 
366 	perf_event_update_userpage(event);
367 
368 	return 0;
369 }
370 
371 #ifdef CONFIG_ISA_ARCV2
372 static irqreturn_t arc_pmu_intr(int irq, void *dev)
373 {
374 	struct perf_sample_data data;
375 	struct arc_pmu_cpu *pmu_cpu = this_cpu_ptr(&arc_pmu_cpu);
376 	struct pt_regs *regs;
377 	unsigned int active_ints;
378 	int idx;
379 
380 	arc_pmu_disable(&arc_pmu->pmu);
381 
382 	active_ints = read_aux_reg(ARC_REG_PCT_INT_ACT);
383 	if (!active_ints)
384 		goto done;
385 
386 	regs = get_irq_regs();
387 
388 	do {
389 		struct perf_event *event;
390 		struct hw_perf_event *hwc;
391 
392 		idx = __ffs(active_ints);
393 
394 		/* Reset interrupt flag by writing of 1 */
395 		write_aux_reg(ARC_REG_PCT_INT_ACT, 1 << idx);
396 
397 		/*
398 		 * On reset of "interrupt active" bit corresponding
399 		 * "interrupt enable" bit gets automatically reset as well.
400 		 * Now we need to re-enable interrupt for the counter.
401 		 */
402 		write_aux_reg(ARC_REG_PCT_INT_CTRL,
403 			read_aux_reg(ARC_REG_PCT_INT_CTRL) | (1 << idx));
404 
405 		event = pmu_cpu->act_counter[idx];
406 		hwc = &event->hw;
407 
408 		WARN_ON_ONCE(hwc->idx != idx);
409 
410 		arc_perf_event_update(event, &event->hw, event->hw.idx);
411 		perf_sample_data_init(&data, 0, hwc->last_period);
412 		if (arc_pmu_event_set_period(event)) {
413 			if (perf_event_overflow(event, &data, regs))
414 				arc_pmu_stop(event, 0);
415 		}
416 
417 		active_ints &= ~(1U << idx);
418 	} while (active_ints);
419 
420 done:
421 	arc_pmu_enable(&arc_pmu->pmu);
422 
423 	return IRQ_HANDLED;
424 }
425 #else
426 
427 static irqreturn_t arc_pmu_intr(int irq, void *dev)
428 {
429 	return IRQ_NONE;
430 }
431 
432 #endif /* CONFIG_ISA_ARCV2 */
433 
434 static void arc_cpu_pmu_irq_init(void *data)
435 {
436 	int irq = *(int *)data;
437 
438 	enable_percpu_irq(irq, IRQ_TYPE_NONE);
439 
440 	/* Clear all pending interrupt flags */
441 	write_aux_reg(ARC_REG_PCT_INT_ACT, 0xffffffff);
442 }
443 
444 static int arc_pmu_device_probe(struct platform_device *pdev)
445 {
446 	struct arc_reg_pct_build pct_bcr;
447 	struct arc_reg_cc_build cc_bcr;
448 	int i, j, has_interrupts;
449 	int counter_size;	/* in bits */
450 
451 	union cc_name {
452 		struct {
453 			uint32_t word0, word1;
454 			char sentinel;
455 		} indiv;
456 		char str[9];
457 	} cc_name;
458 
459 
460 	READ_BCR(ARC_REG_PCT_BUILD, pct_bcr);
461 	if (!pct_bcr.v) {
462 		pr_err("This core does not have performance counters!\n");
463 		return -ENODEV;
464 	}
465 	BUILD_BUG_ON(ARC_PERF_MAX_COUNTERS > 32);
466 	BUG_ON(pct_bcr.c > ARC_PERF_MAX_COUNTERS);
467 
468 	READ_BCR(ARC_REG_CC_BUILD, cc_bcr);
469 	BUG_ON(!cc_bcr.v); /* Counters exist but No countable conditions ? */
470 
471 	arc_pmu = devm_kzalloc(&pdev->dev, sizeof(struct arc_pmu), GFP_KERNEL);
472 	if (!arc_pmu)
473 		return -ENOMEM;
474 
475 	has_interrupts = is_isa_arcv2() ? pct_bcr.i : 0;
476 
477 	arc_pmu->n_counters = pct_bcr.c;
478 	counter_size = 32 + (pct_bcr.s << 4);
479 
480 	arc_pmu->max_period = (1ULL << counter_size) / 2 - 1ULL;
481 
482 	pr_info("ARC perf\t: %d counters (%d bits), %d conditions%s\n",
483 		arc_pmu->n_counters, counter_size, cc_bcr.c,
484 		has_interrupts ? ", [overflow IRQ support]":"");
485 
486 	cc_name.str[8] = 0;
487 	for (i = 0; i < PERF_COUNT_ARC_HW_MAX; i++)
488 		arc_pmu->ev_hw_idx[i] = -1;
489 
490 	/* loop thru all available h/w condition indexes */
491 	for (j = 0; j < cc_bcr.c; j++) {
492 		write_aux_reg(ARC_REG_CC_INDEX, j);
493 		cc_name.indiv.word0 = read_aux_reg(ARC_REG_CC_NAME0);
494 		cc_name.indiv.word1 = read_aux_reg(ARC_REG_CC_NAME1);
495 
496 		/* See if it has been mapped to a perf event_id */
497 		for (i = 0; i < ARRAY_SIZE(arc_pmu_ev_hw_map); i++) {
498 			if (arc_pmu_ev_hw_map[i] &&
499 			    !strcmp(arc_pmu_ev_hw_map[i], cc_name.str) &&
500 			    strlen(arc_pmu_ev_hw_map[i])) {
501 				pr_debug("mapping perf event %2d to h/w event \'%8s\' (idx %d)\n",
502 					 i, cc_name.str, j);
503 				arc_pmu->ev_hw_idx[i] = j;
504 			}
505 		}
506 	}
507 
508 	arc_pmu->pmu = (struct pmu) {
509 		.pmu_enable	= arc_pmu_enable,
510 		.pmu_disable	= arc_pmu_disable,
511 		.event_init	= arc_pmu_event_init,
512 		.add		= arc_pmu_add,
513 		.del		= arc_pmu_del,
514 		.start		= arc_pmu_start,
515 		.stop		= arc_pmu_stop,
516 		.read		= arc_pmu_read,
517 	};
518 
519 	if (has_interrupts) {
520 		int irq = platform_get_irq(pdev, 0);
521 
522 		if (irq < 0) {
523 			pr_err("Cannot get IRQ number for the platform\n");
524 			return -ENODEV;
525 		}
526 
527 		arc_pmu->irq = irq;
528 
529 		/* intc map function ensures irq_set_percpu_devid() called */
530 		request_percpu_irq(irq, arc_pmu_intr, "ARC perf counters",
531 				   this_cpu_ptr(&arc_pmu_cpu));
532 
533 		on_each_cpu(arc_cpu_pmu_irq_init, &irq, 1);
534 
535 	} else
536 		arc_pmu->pmu.capabilities |= PERF_PMU_CAP_NO_INTERRUPT;
537 
538 	return perf_pmu_register(&arc_pmu->pmu, pdev->name, PERF_TYPE_RAW);
539 }
540 
541 #ifdef CONFIG_OF
542 static const struct of_device_id arc_pmu_match[] = {
543 	{ .compatible = "snps,arc700-pct" },
544 	{ .compatible = "snps,archs-pct" },
545 	{},
546 };
547 MODULE_DEVICE_TABLE(of, arc_pmu_match);
548 #endif
549 
550 static struct platform_driver arc_pmu_driver = {
551 	.driver	= {
552 		.name		= "arc-pct",
553 		.of_match_table = of_match_ptr(arc_pmu_match),
554 	},
555 	.probe		= arc_pmu_device_probe,
556 };
557 
558 module_platform_driver(arc_pmu_driver);
559 
560 MODULE_LICENSE("GPL");
561 MODULE_AUTHOR("Mischa Jonker <mjonker@synopsys.com>");
562 MODULE_DESCRIPTION("ARC PMU driver");
563