xref: /openbmc/linux/arch/mips/kernel/perf_event.c (revision 9c1f8594)
1 /*
2  * Linux performance counter support for MIPS.
3  *
4  * Copyright (C) 2010 MIPS Technologies, Inc.
5  * Author: Deng-Cheng Zhu
6  *
7  * This code is based on the implementation for ARM, which is in turn
8  * based on the sparc64 perf event code and the x86 code. Performance
9  * counter access is based on the MIPS Oprofile code. And the callchain
10  * support references the code of MIPS stacktrace.c.
11  *
12  * This program is free software; you can redistribute it and/or modify
13  * it under the terms of the GNU General Public License version 2 as
14  * published by the Free Software Foundation.
15  */
16 
17 #include <linux/cpumask.h>
18 #include <linux/interrupt.h>
19 #include <linux/smp.h>
20 #include <linux/kernel.h>
21 #include <linux/perf_event.h>
22 #include <linux/uaccess.h>
23 
24 #include <asm/irq.h>
25 #include <asm/irq_regs.h>
26 #include <asm/stacktrace.h>
27 #include <asm/time.h> /* For perf_irq */
28 
29 /* These are for 32bit counters. For 64bit ones, define them accordingly. */
30 #define MAX_PERIOD	((1ULL << 32) - 1)
31 #define VALID_COUNT	0x7fffffff
32 #define TOTAL_BITS	32
33 #define HIGHEST_BIT	31
34 
35 #define MIPS_MAX_HWEVENTS 4
36 
37 struct cpu_hw_events {
38 	/* Array of events on this cpu. */
39 	struct perf_event	*events[MIPS_MAX_HWEVENTS];
40 
41 	/*
42 	 * Set the bit (indexed by the counter number) when the counter
43 	 * is used for an event.
44 	 */
45 	unsigned long		used_mask[BITS_TO_LONGS(MIPS_MAX_HWEVENTS)];
46 
47 	/*
48 	 * The borrowed MSB for the performance counter. A MIPS performance
49 	 * counter uses its bit 31 (for 32bit counters) or bit 63 (for 64bit
50 	 * counters) as a factor of determining whether a counter overflow
51 	 * should be signaled. So here we use a separate MSB for each
52 	 * counter to make things easy.
53 	 */
54 	unsigned long		msbs[BITS_TO_LONGS(MIPS_MAX_HWEVENTS)];
55 
56 	/*
57 	 * Software copy of the control register for each performance counter.
58 	 * MIPS CPUs vary in performance counters. They use this differently,
59 	 * and even may not use it.
60 	 */
61 	unsigned int		saved_ctrl[MIPS_MAX_HWEVENTS];
62 };
63 DEFINE_PER_CPU(struct cpu_hw_events, cpu_hw_events) = {
64 	.saved_ctrl = {0},
65 };
66 
67 /* The description of MIPS performance events. */
68 struct mips_perf_event {
69 	unsigned int event_id;
70 	/*
71 	 * MIPS performance counters are indexed starting from 0.
72 	 * CNTR_EVEN indicates the indexes of the counters to be used are
73 	 * even numbers.
74 	 */
75 	unsigned int cntr_mask;
76 	#define CNTR_EVEN	0x55555555
77 	#define CNTR_ODD	0xaaaaaaaa
78 #ifdef CONFIG_MIPS_MT_SMP
79 	enum {
80 		T  = 0,
81 		V  = 1,
82 		P  = 2,
83 	} range;
84 #else
85 	#define T
86 	#define V
87 	#define P
88 #endif
89 };
90 
91 static struct mips_perf_event raw_event;
92 static DEFINE_MUTEX(raw_event_mutex);
93 
94 #define UNSUPPORTED_PERF_EVENT_ID 0xffffffff
95 #define C(x) PERF_COUNT_HW_CACHE_##x
96 
97 struct mips_pmu {
98 	const char	*name;
99 	int		irq;
100 	irqreturn_t	(*handle_irq)(int irq, void *dev);
101 	int		(*handle_shared_irq)(void);
102 	void		(*start)(void);
103 	void		(*stop)(void);
104 	int		(*alloc_counter)(struct cpu_hw_events *cpuc,
105 					struct hw_perf_event *hwc);
106 	u64		(*read_counter)(unsigned int idx);
107 	void		(*write_counter)(unsigned int idx, u64 val);
108 	void		(*enable_event)(struct hw_perf_event *evt, int idx);
109 	void		(*disable_event)(int idx);
110 	const struct mips_perf_event *(*map_raw_event)(u64 config);
111 	const struct mips_perf_event (*general_event_map)[PERF_COUNT_HW_MAX];
112 	const struct mips_perf_event (*cache_event_map)
113 				[PERF_COUNT_HW_CACHE_MAX]
114 				[PERF_COUNT_HW_CACHE_OP_MAX]
115 				[PERF_COUNT_HW_CACHE_RESULT_MAX];
116 	unsigned int	num_counters;
117 };
118 
119 static const struct mips_pmu *mipspmu;
120 
121 static int
122 mipspmu_event_set_period(struct perf_event *event,
123 			struct hw_perf_event *hwc,
124 			int idx)
125 {
126 	struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
127 	s64 left = local64_read(&hwc->period_left);
128 	s64 period = hwc->sample_period;
129 	int ret = 0;
130 	u64 uleft;
131 	unsigned long flags;
132 
133 	if (unlikely(left <= -period)) {
134 		left = period;
135 		local64_set(&hwc->period_left, left);
136 		hwc->last_period = period;
137 		ret = 1;
138 	}
139 
140 	if (unlikely(left <= 0)) {
141 		left += period;
142 		local64_set(&hwc->period_left, left);
143 		hwc->last_period = period;
144 		ret = 1;
145 	}
146 
147 	if (left > (s64)MAX_PERIOD)
148 		left = MAX_PERIOD;
149 
150 	local64_set(&hwc->prev_count, (u64)-left);
151 
152 	local_irq_save(flags);
153 	uleft = (u64)(-left) & MAX_PERIOD;
154 	uleft > VALID_COUNT ?
155 		set_bit(idx, cpuc->msbs) : clear_bit(idx, cpuc->msbs);
156 	mipspmu->write_counter(idx, (u64)(-left) & VALID_COUNT);
157 	local_irq_restore(flags);
158 
159 	perf_event_update_userpage(event);
160 
161 	return ret;
162 }
163 
164 static void mipspmu_event_update(struct perf_event *event,
165 			struct hw_perf_event *hwc,
166 			int idx)
167 {
168 	struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
169 	unsigned long flags;
170 	int shift = 64 - TOTAL_BITS;
171 	s64 prev_raw_count, new_raw_count;
172 	u64 delta;
173 
174 again:
175 	prev_raw_count = local64_read(&hwc->prev_count);
176 	local_irq_save(flags);
177 	/* Make the counter value be a "real" one. */
178 	new_raw_count = mipspmu->read_counter(idx);
179 	if (new_raw_count & (test_bit(idx, cpuc->msbs) << HIGHEST_BIT)) {
180 		new_raw_count &= VALID_COUNT;
181 		clear_bit(idx, cpuc->msbs);
182 	} else
183 		new_raw_count |= (test_bit(idx, cpuc->msbs) << HIGHEST_BIT);
184 	local_irq_restore(flags);
185 
186 	if (local64_cmpxchg(&hwc->prev_count, prev_raw_count,
187 				new_raw_count) != prev_raw_count)
188 		goto again;
189 
190 	delta = (new_raw_count << shift) - (prev_raw_count << shift);
191 	delta >>= shift;
192 
193 	local64_add(delta, &event->count);
194 	local64_sub(delta, &hwc->period_left);
195 }
196 
197 static void mipspmu_start(struct perf_event *event, int flags)
198 {
199 	struct hw_perf_event *hwc = &event->hw;
200 
201 	if (!mipspmu)
202 		return;
203 
204 	if (flags & PERF_EF_RELOAD)
205 		WARN_ON_ONCE(!(hwc->state & PERF_HES_UPTODATE));
206 
207 	hwc->state = 0;
208 
209 	/* Set the period for the event. */
210 	mipspmu_event_set_period(event, hwc, hwc->idx);
211 
212 	/* Enable the event. */
213 	mipspmu->enable_event(hwc, hwc->idx);
214 }
215 
216 static void mipspmu_stop(struct perf_event *event, int flags)
217 {
218 	struct hw_perf_event *hwc = &event->hw;
219 
220 	if (!mipspmu)
221 		return;
222 
223 	if (!(hwc->state & PERF_HES_STOPPED)) {
224 		/* We are working on a local event. */
225 		mipspmu->disable_event(hwc->idx);
226 		barrier();
227 		mipspmu_event_update(event, hwc, hwc->idx);
228 		hwc->state |= PERF_HES_STOPPED | PERF_HES_UPTODATE;
229 	}
230 }
231 
232 static int mipspmu_add(struct perf_event *event, int flags)
233 {
234 	struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
235 	struct hw_perf_event *hwc = &event->hw;
236 	int idx;
237 	int err = 0;
238 
239 	perf_pmu_disable(event->pmu);
240 
241 	/* To look for a free counter for this event. */
242 	idx = mipspmu->alloc_counter(cpuc, hwc);
243 	if (idx < 0) {
244 		err = idx;
245 		goto out;
246 	}
247 
248 	/*
249 	 * If there is an event in the counter we are going to use then
250 	 * make sure it is disabled.
251 	 */
252 	event->hw.idx = idx;
253 	mipspmu->disable_event(idx);
254 	cpuc->events[idx] = event;
255 
256 	hwc->state = PERF_HES_STOPPED | PERF_HES_UPTODATE;
257 	if (flags & PERF_EF_START)
258 		mipspmu_start(event, PERF_EF_RELOAD);
259 
260 	/* Propagate our changes to the userspace mapping. */
261 	perf_event_update_userpage(event);
262 
263 out:
264 	perf_pmu_enable(event->pmu);
265 	return err;
266 }
267 
268 static void mipspmu_del(struct perf_event *event, int flags)
269 {
270 	struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
271 	struct hw_perf_event *hwc = &event->hw;
272 	int idx = hwc->idx;
273 
274 	WARN_ON(idx < 0 || idx >= mipspmu->num_counters);
275 
276 	mipspmu_stop(event, PERF_EF_UPDATE);
277 	cpuc->events[idx] = NULL;
278 	clear_bit(idx, cpuc->used_mask);
279 
280 	perf_event_update_userpage(event);
281 }
282 
283 static void mipspmu_read(struct perf_event *event)
284 {
285 	struct hw_perf_event *hwc = &event->hw;
286 
287 	/* Don't read disabled counters! */
288 	if (hwc->idx < 0)
289 		return;
290 
291 	mipspmu_event_update(event, hwc, hwc->idx);
292 }
293 
294 static void mipspmu_enable(struct pmu *pmu)
295 {
296 	if (mipspmu)
297 		mipspmu->start();
298 }
299 
300 static void mipspmu_disable(struct pmu *pmu)
301 {
302 	if (mipspmu)
303 		mipspmu->stop();
304 }
305 
306 static atomic_t active_events = ATOMIC_INIT(0);
307 static DEFINE_MUTEX(pmu_reserve_mutex);
308 static int (*save_perf_irq)(void);
309 
310 static int mipspmu_get_irq(void)
311 {
312 	int err;
313 
314 	if (mipspmu->irq >= 0) {
315 		/* Request my own irq handler. */
316 		err = request_irq(mipspmu->irq, mipspmu->handle_irq,
317 			IRQF_DISABLED | IRQF_NOBALANCING,
318 			"mips_perf_pmu", NULL);
319 		if (err) {
320 			pr_warning("Unable to request IRQ%d for MIPS "
321 			   "performance counters!\n", mipspmu->irq);
322 		}
323 	} else if (cp0_perfcount_irq < 0) {
324 		/*
325 		 * We are sharing the irq number with the timer interrupt.
326 		 */
327 		save_perf_irq = perf_irq;
328 		perf_irq = mipspmu->handle_shared_irq;
329 		err = 0;
330 	} else {
331 		pr_warning("The platform hasn't properly defined its "
332 			"interrupt controller.\n");
333 		err = -ENOENT;
334 	}
335 
336 	return err;
337 }
338 
339 static void mipspmu_free_irq(void)
340 {
341 	if (mipspmu->irq >= 0)
342 		free_irq(mipspmu->irq, NULL);
343 	else if (cp0_perfcount_irq < 0)
344 		perf_irq = save_perf_irq;
345 }
346 
347 /*
348  * mipsxx/rm9000/loongson2 have different performance counters, they have
349  * specific low-level init routines.
350  */
351 static void reset_counters(void *arg);
352 static int __hw_perf_event_init(struct perf_event *event);
353 
354 static void hw_perf_event_destroy(struct perf_event *event)
355 {
356 	if (atomic_dec_and_mutex_lock(&active_events,
357 				&pmu_reserve_mutex)) {
358 		/*
359 		 * We must not call the destroy function with interrupts
360 		 * disabled.
361 		 */
362 		on_each_cpu(reset_counters,
363 			(void *)(long)mipspmu->num_counters, 1);
364 		mipspmu_free_irq();
365 		mutex_unlock(&pmu_reserve_mutex);
366 	}
367 }
368 
369 static int mipspmu_event_init(struct perf_event *event)
370 {
371 	int err = 0;
372 
373 	switch (event->attr.type) {
374 	case PERF_TYPE_RAW:
375 	case PERF_TYPE_HARDWARE:
376 	case PERF_TYPE_HW_CACHE:
377 		break;
378 
379 	default:
380 		return -ENOENT;
381 	}
382 
383 	if (!mipspmu || event->cpu >= nr_cpumask_bits ||
384 		(event->cpu >= 0 && !cpu_online(event->cpu)))
385 		return -ENODEV;
386 
387 	if (!atomic_inc_not_zero(&active_events)) {
388 		if (atomic_read(&active_events) > MIPS_MAX_HWEVENTS) {
389 			atomic_dec(&active_events);
390 			return -ENOSPC;
391 		}
392 
393 		mutex_lock(&pmu_reserve_mutex);
394 		if (atomic_read(&active_events) == 0)
395 			err = mipspmu_get_irq();
396 
397 		if (!err)
398 			atomic_inc(&active_events);
399 		mutex_unlock(&pmu_reserve_mutex);
400 	}
401 
402 	if (err)
403 		return err;
404 
405 	err = __hw_perf_event_init(event);
406 	if (err)
407 		hw_perf_event_destroy(event);
408 
409 	return err;
410 }
411 
412 static struct pmu pmu = {
413 	.pmu_enable	= mipspmu_enable,
414 	.pmu_disable	= mipspmu_disable,
415 	.event_init	= mipspmu_event_init,
416 	.add		= mipspmu_add,
417 	.del		= mipspmu_del,
418 	.start		= mipspmu_start,
419 	.stop		= mipspmu_stop,
420 	.read		= mipspmu_read,
421 };
422 
423 static inline unsigned int
424 mipspmu_perf_event_encode(const struct mips_perf_event *pev)
425 {
426 /*
427  * Top 8 bits for range, next 16 bits for cntr_mask, lowest 8 bits for
428  * event_id.
429  */
430 #ifdef CONFIG_MIPS_MT_SMP
431 	return ((unsigned int)pev->range << 24) |
432 		(pev->cntr_mask & 0xffff00) |
433 		(pev->event_id & 0xff);
434 #else
435 	return (pev->cntr_mask & 0xffff00) |
436 		(pev->event_id & 0xff);
437 #endif
438 }
439 
440 static const struct mips_perf_event *
441 mipspmu_map_general_event(int idx)
442 {
443 	const struct mips_perf_event *pev;
444 
445 	pev = ((*mipspmu->general_event_map)[idx].event_id ==
446 		UNSUPPORTED_PERF_EVENT_ID ? ERR_PTR(-EOPNOTSUPP) :
447 		&(*mipspmu->general_event_map)[idx]);
448 
449 	return pev;
450 }
451 
452 static const struct mips_perf_event *
453 mipspmu_map_cache_event(u64 config)
454 {
455 	unsigned int cache_type, cache_op, cache_result;
456 	const struct mips_perf_event *pev;
457 
458 	cache_type = (config >> 0) & 0xff;
459 	if (cache_type >= PERF_COUNT_HW_CACHE_MAX)
460 		return ERR_PTR(-EINVAL);
461 
462 	cache_op = (config >> 8) & 0xff;
463 	if (cache_op >= PERF_COUNT_HW_CACHE_OP_MAX)
464 		return ERR_PTR(-EINVAL);
465 
466 	cache_result = (config >> 16) & 0xff;
467 	if (cache_result >= PERF_COUNT_HW_CACHE_RESULT_MAX)
468 		return ERR_PTR(-EINVAL);
469 
470 	pev = &((*mipspmu->cache_event_map)
471 					[cache_type]
472 					[cache_op]
473 					[cache_result]);
474 
475 	if (pev->event_id == UNSUPPORTED_PERF_EVENT_ID)
476 		return ERR_PTR(-EOPNOTSUPP);
477 
478 	return pev;
479 
480 }
481 
482 static int validate_event(struct cpu_hw_events *cpuc,
483 	       struct perf_event *event)
484 {
485 	struct hw_perf_event fake_hwc = event->hw;
486 
487 	/* Allow mixed event group. So return 1 to pass validation. */
488 	if (event->pmu != &pmu || event->state <= PERF_EVENT_STATE_OFF)
489 		return 1;
490 
491 	return mipspmu->alloc_counter(cpuc, &fake_hwc) >= 0;
492 }
493 
494 static int validate_group(struct perf_event *event)
495 {
496 	struct perf_event *sibling, *leader = event->group_leader;
497 	struct cpu_hw_events fake_cpuc;
498 
499 	memset(&fake_cpuc, 0, sizeof(fake_cpuc));
500 
501 	if (!validate_event(&fake_cpuc, leader))
502 		return -ENOSPC;
503 
504 	list_for_each_entry(sibling, &leader->sibling_list, group_entry) {
505 		if (!validate_event(&fake_cpuc, sibling))
506 			return -ENOSPC;
507 	}
508 
509 	if (!validate_event(&fake_cpuc, event))
510 		return -ENOSPC;
511 
512 	return 0;
513 }
514 
515 /* This is needed by specific irq handlers in perf_event_*.c */
516 static void
517 handle_associated_event(struct cpu_hw_events *cpuc,
518 	int idx, struct perf_sample_data *data, struct pt_regs *regs)
519 {
520 	struct perf_event *event = cpuc->events[idx];
521 	struct hw_perf_event *hwc = &event->hw;
522 
523 	mipspmu_event_update(event, hwc, idx);
524 	data->period = event->hw.last_period;
525 	if (!mipspmu_event_set_period(event, hwc, idx))
526 		return;
527 
528 	if (perf_event_overflow(event, data, regs))
529 		mipspmu->disable_event(idx);
530 }
531 
532 #include "perf_event_mipsxx.c"
533 
534 /* Callchain handling code. */
535 
536 /*
537  * Leave userspace callchain empty for now. When we find a way to trace
538  * the user stack callchains, we add here.
539  */
540 void perf_callchain_user(struct perf_callchain_entry *entry,
541 		    struct pt_regs *regs)
542 {
543 }
544 
545 static void save_raw_perf_callchain(struct perf_callchain_entry *entry,
546 	unsigned long reg29)
547 {
548 	unsigned long *sp = (unsigned long *)reg29;
549 	unsigned long addr;
550 
551 	while (!kstack_end(sp)) {
552 		addr = *sp++;
553 		if (__kernel_text_address(addr)) {
554 			perf_callchain_store(entry, addr);
555 			if (entry->nr >= PERF_MAX_STACK_DEPTH)
556 				break;
557 		}
558 	}
559 }
560 
561 void perf_callchain_kernel(struct perf_callchain_entry *entry,
562 		      struct pt_regs *regs)
563 {
564 	unsigned long sp = regs->regs[29];
565 #ifdef CONFIG_KALLSYMS
566 	unsigned long ra = regs->regs[31];
567 	unsigned long pc = regs->cp0_epc;
568 
569 	if (raw_show_trace || !__kernel_text_address(pc)) {
570 		unsigned long stack_page =
571 			(unsigned long)task_stack_page(current);
572 		if (stack_page && sp >= stack_page &&
573 		    sp <= stack_page + THREAD_SIZE - 32)
574 			save_raw_perf_callchain(entry, sp);
575 		return;
576 	}
577 	do {
578 		perf_callchain_store(entry, pc);
579 		if (entry->nr >= PERF_MAX_STACK_DEPTH)
580 			break;
581 		pc = unwind_stack(current, &sp, pc, &ra);
582 	} while (pc);
583 #else
584 	save_raw_perf_callchain(entry, sp);
585 #endif
586 }
587