1 /*
2  * Linux performance counter support for MIPS.
3  *
4  * Copyright (C) 2010 MIPS Technologies, Inc.
5  * Copyright (C) 2011 Cavium Networks, Inc.
6  * Author: Deng-Cheng Zhu
7  *
8  * This code is based on the implementation for ARM, which is in turn
9  * based on the sparc64 perf event code and the x86 code. Performance
10  * counter access is based on the MIPS Oprofile code. And the callchain
11  * support references the code of MIPS stacktrace.c.
12  *
13  * This program is free software; you can redistribute it and/or modify
14  * it under the terms of the GNU General Public License version 2 as
15  * published by the Free Software Foundation.
16  */
17 
18 #include <linux/cpumask.h>
19 #include <linux/interrupt.h>
20 #include <linux/smp.h>
21 #include <linux/kernel.h>
22 #include <linux/perf_event.h>
23 #include <linux/uaccess.h>
24 
25 #include <asm/irq.h>
26 #include <asm/irq_regs.h>
27 #include <asm/stacktrace.h>
28 #include <asm/time.h> /* For perf_irq */
29 
30 #define MIPS_MAX_HWEVENTS 4
31 
32 struct cpu_hw_events {
33 	/* Array of events on this cpu. */
34 	struct perf_event	*events[MIPS_MAX_HWEVENTS];
35 
36 	/*
37 	 * Set the bit (indexed by the counter number) when the counter
38 	 * is used for an event.
39 	 */
40 	unsigned long		used_mask[BITS_TO_LONGS(MIPS_MAX_HWEVENTS)];
41 
42 	/*
43 	 * Software copy of the control register for each performance counter.
44 	 * MIPS CPUs vary in performance counters. They use this differently,
45 	 * and even may not use it.
46 	 */
47 	unsigned int		saved_ctrl[MIPS_MAX_HWEVENTS];
48 };
49 DEFINE_PER_CPU(struct cpu_hw_events, cpu_hw_events) = {
50 	.saved_ctrl = {0},
51 };
52 
53 /* The description of MIPS performance events. */
54 struct mips_perf_event {
55 	unsigned int event_id;
56 	/*
57 	 * MIPS performance counters are indexed starting from 0.
58 	 * CNTR_EVEN indicates the indexes of the counters to be used are
59 	 * even numbers.
60 	 */
61 	unsigned int cntr_mask;
62 	#define CNTR_EVEN	0x55555555
63 	#define CNTR_ODD	0xaaaaaaaa
64 	#define CNTR_ALL	0xffffffff
65 #ifdef CONFIG_MIPS_MT_SMP
66 	enum {
67 		T  = 0,
68 		V  = 1,
69 		P  = 2,
70 	} range;
71 #else
72 	#define T
73 	#define V
74 	#define P
75 #endif
76 };
77 
78 static struct mips_perf_event raw_event;
79 static DEFINE_MUTEX(raw_event_mutex);
80 
81 #define UNSUPPORTED_PERF_EVENT_ID 0xffffffff
82 #define C(x) PERF_COUNT_HW_CACHE_##x
83 
84 struct mips_pmu {
85 	u64		max_period;
86 	u64		valid_count;
87 	u64		overflow;
88 	const char	*name;
89 	int		irq;
90 	u64		(*read_counter)(unsigned int idx);
91 	void		(*write_counter)(unsigned int idx, u64 val);
92 	const struct mips_perf_event *(*map_raw_event)(u64 config);
93 	const struct mips_perf_event (*general_event_map)[PERF_COUNT_HW_MAX];
94 	const struct mips_perf_event (*cache_event_map)
95 				[PERF_COUNT_HW_CACHE_MAX]
96 				[PERF_COUNT_HW_CACHE_OP_MAX]
97 				[PERF_COUNT_HW_CACHE_RESULT_MAX];
98 	unsigned int	num_counters;
99 };
100 
101 static struct mips_pmu mipspmu;
102 
103 #define M_CONFIG1_PC	(1 << 4)
104 
105 #define M_PERFCTL_EXL			(1      <<  0)
106 #define M_PERFCTL_KERNEL		(1      <<  1)
107 #define M_PERFCTL_SUPERVISOR		(1      <<  2)
108 #define M_PERFCTL_USER			(1      <<  3)
109 #define M_PERFCTL_INTERRUPT_ENABLE	(1      <<  4)
110 #define M_PERFCTL_EVENT(event)		(((event) & 0x3ff)  << 5)
111 #define M_PERFCTL_VPEID(vpe)		((vpe)    << 16)
112 #define M_PERFCTL_MT_EN(filter)		((filter) << 20)
113 #define    M_TC_EN_ALL			M_PERFCTL_MT_EN(0)
114 #define    M_TC_EN_VPE			M_PERFCTL_MT_EN(1)
115 #define    M_TC_EN_TC			M_PERFCTL_MT_EN(2)
116 #define M_PERFCTL_TCID(tcid)		((tcid)   << 22)
117 #define M_PERFCTL_WIDE			(1      << 30)
118 #define M_PERFCTL_MORE			(1      << 31)
119 
120 #define M_PERFCTL_COUNT_EVENT_WHENEVER	(M_PERFCTL_EXL |		\
121 					M_PERFCTL_KERNEL |		\
122 					M_PERFCTL_USER |		\
123 					M_PERFCTL_SUPERVISOR |		\
124 					M_PERFCTL_INTERRUPT_ENABLE)
125 
126 #ifdef CONFIG_MIPS_MT_SMP
127 #define M_PERFCTL_CONFIG_MASK		0x3fff801f
128 #else
129 #define M_PERFCTL_CONFIG_MASK		0x1f
130 #endif
131 #define M_PERFCTL_EVENT_MASK		0xfe0
132 
133 
134 #ifdef CONFIG_MIPS_MT_SMP
135 static int cpu_has_mipsmt_pertccounters;
136 
137 static DEFINE_RWLOCK(pmuint_rwlock);
138 
139 /*
140  * FIXME: For VSMP, vpe_id() is redefined for Perf-events, because
141  * cpu_data[cpuid].vpe_id reports 0 for _both_ CPUs.
142  */
143 #if defined(CONFIG_HW_PERF_EVENTS)
144 #define vpe_id()	(cpu_has_mipsmt_pertccounters ? \
145 			0 : smp_processor_id())
146 #else
147 #define vpe_id()	(cpu_has_mipsmt_pertccounters ? \
148 			0 : cpu_data[smp_processor_id()].vpe_id)
149 #endif
150 
151 /* Copied from op_model_mipsxx.c */
152 static unsigned int vpe_shift(void)
153 {
154 	if (num_possible_cpus() > 1)
155 		return 1;
156 
157 	return 0;
158 }
159 
160 static unsigned int counters_total_to_per_cpu(unsigned int counters)
161 {
162 	return counters >> vpe_shift();
163 }
164 
165 static unsigned int counters_per_cpu_to_total(unsigned int counters)
166 {
167 	return counters << vpe_shift();
168 }
169 
170 #else /* !CONFIG_MIPS_MT_SMP */
171 #define vpe_id()	0
172 
173 #endif /* CONFIG_MIPS_MT_SMP */
174 
175 static void resume_local_counters(void);
176 static void pause_local_counters(void);
177 static irqreturn_t mipsxx_pmu_handle_irq(int, void *);
178 static int mipsxx_pmu_handle_shared_irq(void);
179 
180 static unsigned int mipsxx_pmu_swizzle_perf_idx(unsigned int idx)
181 {
182 	if (vpe_id() == 1)
183 		idx = (idx + 2) & 3;
184 	return idx;
185 }
186 
187 static u64 mipsxx_pmu_read_counter(unsigned int idx)
188 {
189 	idx = mipsxx_pmu_swizzle_perf_idx(idx);
190 
191 	switch (idx) {
192 	case 0:
193 		/*
194 		 * The counters are unsigned, we must cast to truncate
195 		 * off the high bits.
196 		 */
197 		return (u32)read_c0_perfcntr0();
198 	case 1:
199 		return (u32)read_c0_perfcntr1();
200 	case 2:
201 		return (u32)read_c0_perfcntr2();
202 	case 3:
203 		return (u32)read_c0_perfcntr3();
204 	default:
205 		WARN_ONCE(1, "Invalid performance counter number (%d)\n", idx);
206 		return 0;
207 	}
208 }
209 
210 static u64 mipsxx_pmu_read_counter_64(unsigned int idx)
211 {
212 	idx = mipsxx_pmu_swizzle_perf_idx(idx);
213 
214 	switch (idx) {
215 	case 0:
216 		return read_c0_perfcntr0_64();
217 	case 1:
218 		return read_c0_perfcntr1_64();
219 	case 2:
220 		return read_c0_perfcntr2_64();
221 	case 3:
222 		return read_c0_perfcntr3_64();
223 	default:
224 		WARN_ONCE(1, "Invalid performance counter number (%d)\n", idx);
225 		return 0;
226 	}
227 }
228 
229 static void mipsxx_pmu_write_counter(unsigned int idx, u64 val)
230 {
231 	idx = mipsxx_pmu_swizzle_perf_idx(idx);
232 
233 	switch (idx) {
234 	case 0:
235 		write_c0_perfcntr0(val);
236 		return;
237 	case 1:
238 		write_c0_perfcntr1(val);
239 		return;
240 	case 2:
241 		write_c0_perfcntr2(val);
242 		return;
243 	case 3:
244 		write_c0_perfcntr3(val);
245 		return;
246 	}
247 }
248 
249 static void mipsxx_pmu_write_counter_64(unsigned int idx, u64 val)
250 {
251 	idx = mipsxx_pmu_swizzle_perf_idx(idx);
252 
253 	switch (idx) {
254 	case 0:
255 		write_c0_perfcntr0_64(val);
256 		return;
257 	case 1:
258 		write_c0_perfcntr1_64(val);
259 		return;
260 	case 2:
261 		write_c0_perfcntr2_64(val);
262 		return;
263 	case 3:
264 		write_c0_perfcntr3_64(val);
265 		return;
266 	}
267 }
268 
269 static unsigned int mipsxx_pmu_read_control(unsigned int idx)
270 {
271 	idx = mipsxx_pmu_swizzle_perf_idx(idx);
272 
273 	switch (idx) {
274 	case 0:
275 		return read_c0_perfctrl0();
276 	case 1:
277 		return read_c0_perfctrl1();
278 	case 2:
279 		return read_c0_perfctrl2();
280 	case 3:
281 		return read_c0_perfctrl3();
282 	default:
283 		WARN_ONCE(1, "Invalid performance counter number (%d)\n", idx);
284 		return 0;
285 	}
286 }
287 
288 static void mipsxx_pmu_write_control(unsigned int idx, unsigned int val)
289 {
290 	idx = mipsxx_pmu_swizzle_perf_idx(idx);
291 
292 	switch (idx) {
293 	case 0:
294 		write_c0_perfctrl0(val);
295 		return;
296 	case 1:
297 		write_c0_perfctrl1(val);
298 		return;
299 	case 2:
300 		write_c0_perfctrl2(val);
301 		return;
302 	case 3:
303 		write_c0_perfctrl3(val);
304 		return;
305 	}
306 }
307 
308 static int mipsxx_pmu_alloc_counter(struct cpu_hw_events *cpuc,
309 				    struct hw_perf_event *hwc)
310 {
311 	int i;
312 
313 	/*
314 	 * We only need to care the counter mask. The range has been
315 	 * checked definitely.
316 	 */
317 	unsigned long cntr_mask = (hwc->event_base >> 8) & 0xffff;
318 
319 	for (i = mipspmu.num_counters - 1; i >= 0; i--) {
320 		/*
321 		 * Note that some MIPS perf events can be counted by both
322 		 * even and odd counters, wheresas many other are only by
323 		 * even _or_ odd counters. This introduces an issue that
324 		 * when the former kind of event takes the counter the
325 		 * latter kind of event wants to use, then the "counter
326 		 * allocation" for the latter event will fail. In fact if
327 		 * they can be dynamically swapped, they both feel happy.
328 		 * But here we leave this issue alone for now.
329 		 */
330 		if (test_bit(i, &cntr_mask) &&
331 			!test_and_set_bit(i, cpuc->used_mask))
332 			return i;
333 	}
334 
335 	return -EAGAIN;
336 }
337 
338 static void mipsxx_pmu_enable_event(struct hw_perf_event *evt, int idx)
339 {
340 	struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
341 
342 	WARN_ON(idx < 0 || idx >= mipspmu.num_counters);
343 
344 	cpuc->saved_ctrl[idx] = M_PERFCTL_EVENT(evt->event_base & 0xff) |
345 		(evt->config_base & M_PERFCTL_CONFIG_MASK) |
346 		/* Make sure interrupt enabled. */
347 		M_PERFCTL_INTERRUPT_ENABLE;
348 	/*
349 	 * We do not actually let the counter run. Leave it until start().
350 	 */
351 }
352 
353 static void mipsxx_pmu_disable_event(int idx)
354 {
355 	struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
356 	unsigned long flags;
357 
358 	WARN_ON(idx < 0 || idx >= mipspmu.num_counters);
359 
360 	local_irq_save(flags);
361 	cpuc->saved_ctrl[idx] = mipsxx_pmu_read_control(idx) &
362 		~M_PERFCTL_COUNT_EVENT_WHENEVER;
363 	mipsxx_pmu_write_control(idx, cpuc->saved_ctrl[idx]);
364 	local_irq_restore(flags);
365 }
366 
367 static int mipspmu_event_set_period(struct perf_event *event,
368 				    struct hw_perf_event *hwc,
369 				    int idx)
370 {
371 	u64 left = local64_read(&hwc->period_left);
372 	u64 period = hwc->sample_period;
373 	int ret = 0;
374 
375 	if (unlikely((left + period) & (1ULL << 63))) {
376 		/* left underflowed by more than period. */
377 		left = period;
378 		local64_set(&hwc->period_left, left);
379 		hwc->last_period = period;
380 		ret = 1;
381 	} else	if (unlikely((left + period) <= period)) {
382 		/* left underflowed by less than period. */
383 		left += period;
384 		local64_set(&hwc->period_left, left);
385 		hwc->last_period = period;
386 		ret = 1;
387 	}
388 
389 	if (left > mipspmu.max_period) {
390 		left = mipspmu.max_period;
391 		local64_set(&hwc->period_left, left);
392 	}
393 
394 	local64_set(&hwc->prev_count, mipspmu.overflow - left);
395 
396 	mipspmu.write_counter(idx, mipspmu.overflow - left);
397 
398 	perf_event_update_userpage(event);
399 
400 	return ret;
401 }
402 
403 static void mipspmu_event_update(struct perf_event *event,
404 				 struct hw_perf_event *hwc,
405 				 int idx)
406 {
407 	u64 prev_raw_count, new_raw_count;
408 	u64 delta;
409 
410 again:
411 	prev_raw_count = local64_read(&hwc->prev_count);
412 	new_raw_count = mipspmu.read_counter(idx);
413 
414 	if (local64_cmpxchg(&hwc->prev_count, prev_raw_count,
415 				new_raw_count) != prev_raw_count)
416 		goto again;
417 
418 	delta = new_raw_count - prev_raw_count;
419 
420 	local64_add(delta, &event->count);
421 	local64_sub(delta, &hwc->period_left);
422 }
423 
424 static void mipspmu_start(struct perf_event *event, int flags)
425 {
426 	struct hw_perf_event *hwc = &event->hw;
427 
428 	if (flags & PERF_EF_RELOAD)
429 		WARN_ON_ONCE(!(hwc->state & PERF_HES_UPTODATE));
430 
431 	hwc->state = 0;
432 
433 	/* Set the period for the event. */
434 	mipspmu_event_set_period(event, hwc, hwc->idx);
435 
436 	/* Enable the event. */
437 	mipsxx_pmu_enable_event(hwc, hwc->idx);
438 }
439 
440 static void mipspmu_stop(struct perf_event *event, int flags)
441 {
442 	struct hw_perf_event *hwc = &event->hw;
443 
444 	if (!(hwc->state & PERF_HES_STOPPED)) {
445 		/* We are working on a local event. */
446 		mipsxx_pmu_disable_event(hwc->idx);
447 		barrier();
448 		mipspmu_event_update(event, hwc, hwc->idx);
449 		hwc->state |= PERF_HES_STOPPED | PERF_HES_UPTODATE;
450 	}
451 }
452 
453 static int mipspmu_add(struct perf_event *event, int flags)
454 {
455 	struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
456 	struct hw_perf_event *hwc = &event->hw;
457 	int idx;
458 	int err = 0;
459 
460 	perf_pmu_disable(event->pmu);
461 
462 	/* To look for a free counter for this event. */
463 	idx = mipsxx_pmu_alloc_counter(cpuc, hwc);
464 	if (idx < 0) {
465 		err = idx;
466 		goto out;
467 	}
468 
469 	/*
470 	 * If there is an event in the counter we are going to use then
471 	 * make sure it is disabled.
472 	 */
473 	event->hw.idx = idx;
474 	mipsxx_pmu_disable_event(idx);
475 	cpuc->events[idx] = event;
476 
477 	hwc->state = PERF_HES_STOPPED | PERF_HES_UPTODATE;
478 	if (flags & PERF_EF_START)
479 		mipspmu_start(event, PERF_EF_RELOAD);
480 
481 	/* Propagate our changes to the userspace mapping. */
482 	perf_event_update_userpage(event);
483 
484 out:
485 	perf_pmu_enable(event->pmu);
486 	return err;
487 }
488 
489 static void mipspmu_del(struct perf_event *event, int flags)
490 {
491 	struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
492 	struct hw_perf_event *hwc = &event->hw;
493 	int idx = hwc->idx;
494 
495 	WARN_ON(idx < 0 || idx >= mipspmu.num_counters);
496 
497 	mipspmu_stop(event, PERF_EF_UPDATE);
498 	cpuc->events[idx] = NULL;
499 	clear_bit(idx, cpuc->used_mask);
500 
501 	perf_event_update_userpage(event);
502 }
503 
504 static void mipspmu_read(struct perf_event *event)
505 {
506 	struct hw_perf_event *hwc = &event->hw;
507 
508 	/* Don't read disabled counters! */
509 	if (hwc->idx < 0)
510 		return;
511 
512 	mipspmu_event_update(event, hwc, hwc->idx);
513 }
514 
515 static void mipspmu_enable(struct pmu *pmu)
516 {
517 #ifdef CONFIG_MIPS_MT_SMP
518 	write_unlock(&pmuint_rwlock);
519 #endif
520 	resume_local_counters();
521 }
522 
523 /*
524  * MIPS performance counters can be per-TC. The control registers can
525  * not be directly accessed accross CPUs. Hence if we want to do global
526  * control, we need cross CPU calls. on_each_cpu() can help us, but we
527  * can not make sure this function is called with interrupts enabled. So
528  * here we pause local counters and then grab a rwlock and leave the
529  * counters on other CPUs alone. If any counter interrupt raises while
530  * we own the write lock, simply pause local counters on that CPU and
531  * spin in the handler. Also we know we won't be switched to another
532  * CPU after pausing local counters and before grabbing the lock.
533  */
534 static void mipspmu_disable(struct pmu *pmu)
535 {
536 	pause_local_counters();
537 #ifdef CONFIG_MIPS_MT_SMP
538 	write_lock(&pmuint_rwlock);
539 #endif
540 }
541 
542 static atomic_t active_events = ATOMIC_INIT(0);
543 static DEFINE_MUTEX(pmu_reserve_mutex);
544 static int (*save_perf_irq)(void);
545 
546 static int mipspmu_get_irq(void)
547 {
548 	int err;
549 
550 	if (mipspmu.irq >= 0) {
551 		/* Request my own irq handler. */
552 		err = request_irq(mipspmu.irq, mipsxx_pmu_handle_irq,
553 			IRQF_PERCPU | IRQF_NOBALANCING,
554 			"mips_perf_pmu", NULL);
555 		if (err) {
556 			pr_warning("Unable to request IRQ%d for MIPS "
557 			   "performance counters!\n", mipspmu.irq);
558 		}
559 	} else if (cp0_perfcount_irq < 0) {
560 		/*
561 		 * We are sharing the irq number with the timer interrupt.
562 		 */
563 		save_perf_irq = perf_irq;
564 		perf_irq = mipsxx_pmu_handle_shared_irq;
565 		err = 0;
566 	} else {
567 		pr_warning("The platform hasn't properly defined its "
568 			"interrupt controller.\n");
569 		err = -ENOENT;
570 	}
571 
572 	return err;
573 }
574 
575 static void mipspmu_free_irq(void)
576 {
577 	if (mipspmu.irq >= 0)
578 		free_irq(mipspmu.irq, NULL);
579 	else if (cp0_perfcount_irq < 0)
580 		perf_irq = save_perf_irq;
581 }
582 
583 /*
584  * mipsxx/rm9000/loongson2 have different performance counters, they have
585  * specific low-level init routines.
586  */
587 static void reset_counters(void *arg);
588 static int __hw_perf_event_init(struct perf_event *event);
589 
590 static void hw_perf_event_destroy(struct perf_event *event)
591 {
592 	if (atomic_dec_and_mutex_lock(&active_events,
593 				&pmu_reserve_mutex)) {
594 		/*
595 		 * We must not call the destroy function with interrupts
596 		 * disabled.
597 		 */
598 		on_each_cpu(reset_counters,
599 			(void *)(long)mipspmu.num_counters, 1);
600 		mipspmu_free_irq();
601 		mutex_unlock(&pmu_reserve_mutex);
602 	}
603 }
604 
605 static int mipspmu_event_init(struct perf_event *event)
606 {
607 	int err = 0;
608 
609 	/* does not support taken branch sampling */
610 	if (has_branch_stack(event))
611 		return -EOPNOTSUPP;
612 
613 	switch (event->attr.type) {
614 	case PERF_TYPE_RAW:
615 	case PERF_TYPE_HARDWARE:
616 	case PERF_TYPE_HW_CACHE:
617 		break;
618 
619 	default:
620 		return -ENOENT;
621 	}
622 
623 	if (event->cpu >= nr_cpumask_bits ||
624 	    (event->cpu >= 0 && !cpu_online(event->cpu)))
625 		return -ENODEV;
626 
627 	if (!atomic_inc_not_zero(&active_events)) {
628 		mutex_lock(&pmu_reserve_mutex);
629 		if (atomic_read(&active_events) == 0)
630 			err = mipspmu_get_irq();
631 
632 		if (!err)
633 			atomic_inc(&active_events);
634 		mutex_unlock(&pmu_reserve_mutex);
635 	}
636 
637 	if (err)
638 		return err;
639 
640 	return __hw_perf_event_init(event);
641 }
642 
643 static struct pmu pmu = {
644 	.pmu_enable	= mipspmu_enable,
645 	.pmu_disable	= mipspmu_disable,
646 	.event_init	= mipspmu_event_init,
647 	.add		= mipspmu_add,
648 	.del		= mipspmu_del,
649 	.start		= mipspmu_start,
650 	.stop		= mipspmu_stop,
651 	.read		= mipspmu_read,
652 };
653 
654 static unsigned int mipspmu_perf_event_encode(const struct mips_perf_event *pev)
655 {
656 /*
657  * Top 8 bits for range, next 16 bits for cntr_mask, lowest 8 bits for
658  * event_id.
659  */
660 #ifdef CONFIG_MIPS_MT_SMP
661 	return ((unsigned int)pev->range << 24) |
662 		(pev->cntr_mask & 0xffff00) |
663 		(pev->event_id & 0xff);
664 #else
665 	return (pev->cntr_mask & 0xffff00) |
666 		(pev->event_id & 0xff);
667 #endif
668 }
669 
670 static const struct mips_perf_event *mipspmu_map_general_event(int idx)
671 {
672 	const struct mips_perf_event *pev;
673 
674 	pev = ((*mipspmu.general_event_map)[idx].event_id ==
675 		UNSUPPORTED_PERF_EVENT_ID ? ERR_PTR(-EOPNOTSUPP) :
676 		&(*mipspmu.general_event_map)[idx]);
677 
678 	return pev;
679 }
680 
681 static const struct mips_perf_event *mipspmu_map_cache_event(u64 config)
682 {
683 	unsigned int cache_type, cache_op, cache_result;
684 	const struct mips_perf_event *pev;
685 
686 	cache_type = (config >> 0) & 0xff;
687 	if (cache_type >= PERF_COUNT_HW_CACHE_MAX)
688 		return ERR_PTR(-EINVAL);
689 
690 	cache_op = (config >> 8) & 0xff;
691 	if (cache_op >= PERF_COUNT_HW_CACHE_OP_MAX)
692 		return ERR_PTR(-EINVAL);
693 
694 	cache_result = (config >> 16) & 0xff;
695 	if (cache_result >= PERF_COUNT_HW_CACHE_RESULT_MAX)
696 		return ERR_PTR(-EINVAL);
697 
698 	pev = &((*mipspmu.cache_event_map)
699 					[cache_type]
700 					[cache_op]
701 					[cache_result]);
702 
703 	if (pev->event_id == UNSUPPORTED_PERF_EVENT_ID)
704 		return ERR_PTR(-EOPNOTSUPP);
705 
706 	return pev;
707 
708 }
709 
710 static int validate_group(struct perf_event *event)
711 {
712 	struct perf_event *sibling, *leader = event->group_leader;
713 	struct cpu_hw_events fake_cpuc;
714 
715 	memset(&fake_cpuc, 0, sizeof(fake_cpuc));
716 
717 	if (mipsxx_pmu_alloc_counter(&fake_cpuc, &leader->hw) < 0)
718 		return -EINVAL;
719 
720 	list_for_each_entry(sibling, &leader->sibling_list, group_entry) {
721 		if (mipsxx_pmu_alloc_counter(&fake_cpuc, &sibling->hw) < 0)
722 			return -EINVAL;
723 	}
724 
725 	if (mipsxx_pmu_alloc_counter(&fake_cpuc, &event->hw) < 0)
726 		return -EINVAL;
727 
728 	return 0;
729 }
730 
731 /* This is needed by specific irq handlers in perf_event_*.c */
732 static void handle_associated_event(struct cpu_hw_events *cpuc,
733 				    int idx, struct perf_sample_data *data,
734 				    struct pt_regs *regs)
735 {
736 	struct perf_event *event = cpuc->events[idx];
737 	struct hw_perf_event *hwc = &event->hw;
738 
739 	mipspmu_event_update(event, hwc, idx);
740 	data->period = event->hw.last_period;
741 	if (!mipspmu_event_set_period(event, hwc, idx))
742 		return;
743 
744 	if (perf_event_overflow(event, data, regs))
745 		mipsxx_pmu_disable_event(idx);
746 }
747 
748 
749 static int __n_counters(void)
750 {
751 	if (!(read_c0_config1() & M_CONFIG1_PC))
752 		return 0;
753 	if (!(read_c0_perfctrl0() & M_PERFCTL_MORE))
754 		return 1;
755 	if (!(read_c0_perfctrl1() & M_PERFCTL_MORE))
756 		return 2;
757 	if (!(read_c0_perfctrl2() & M_PERFCTL_MORE))
758 		return 3;
759 
760 	return 4;
761 }
762 
763 static int n_counters(void)
764 {
765 	int counters;
766 
767 	switch (current_cpu_type()) {
768 	case CPU_R10000:
769 		counters = 2;
770 		break;
771 
772 	case CPU_R12000:
773 	case CPU_R14000:
774 		counters = 4;
775 		break;
776 
777 	default:
778 		counters = __n_counters();
779 	}
780 
781 	return counters;
782 }
783 
784 static void reset_counters(void *arg)
785 {
786 	int counters = (int)(long)arg;
787 	switch (counters) {
788 	case 4:
789 		mipsxx_pmu_write_control(3, 0);
790 		mipspmu.write_counter(3, 0);
791 	case 3:
792 		mipsxx_pmu_write_control(2, 0);
793 		mipspmu.write_counter(2, 0);
794 	case 2:
795 		mipsxx_pmu_write_control(1, 0);
796 		mipspmu.write_counter(1, 0);
797 	case 1:
798 		mipsxx_pmu_write_control(0, 0);
799 		mipspmu.write_counter(0, 0);
800 	}
801 }
802 
803 /* 24K/34K/1004K cores can share the same event map. */
804 static const struct mips_perf_event mipsxxcore_event_map
805 				[PERF_COUNT_HW_MAX] = {
806 	[PERF_COUNT_HW_CPU_CYCLES] = { 0x00, CNTR_EVEN | CNTR_ODD, P },
807 	[PERF_COUNT_HW_INSTRUCTIONS] = { 0x01, CNTR_EVEN | CNTR_ODD, T },
808 	[PERF_COUNT_HW_CACHE_REFERENCES] = { UNSUPPORTED_PERF_EVENT_ID },
809 	[PERF_COUNT_HW_CACHE_MISSES] = { UNSUPPORTED_PERF_EVENT_ID },
810 	[PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = { 0x02, CNTR_EVEN, T },
811 	[PERF_COUNT_HW_BRANCH_MISSES] = { 0x02, CNTR_ODD, T },
812 	[PERF_COUNT_HW_BUS_CYCLES] = { UNSUPPORTED_PERF_EVENT_ID },
813 };
814 
815 /* 74K core has different branch event code. */
816 static const struct mips_perf_event mipsxx74Kcore_event_map
817 				[PERF_COUNT_HW_MAX] = {
818 	[PERF_COUNT_HW_CPU_CYCLES] = { 0x00, CNTR_EVEN | CNTR_ODD, P },
819 	[PERF_COUNT_HW_INSTRUCTIONS] = { 0x01, CNTR_EVEN | CNTR_ODD, T },
820 	[PERF_COUNT_HW_CACHE_REFERENCES] = { UNSUPPORTED_PERF_EVENT_ID },
821 	[PERF_COUNT_HW_CACHE_MISSES] = { UNSUPPORTED_PERF_EVENT_ID },
822 	[PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = { 0x27, CNTR_EVEN, T },
823 	[PERF_COUNT_HW_BRANCH_MISSES] = { 0x27, CNTR_ODD, T },
824 	[PERF_COUNT_HW_BUS_CYCLES] = { UNSUPPORTED_PERF_EVENT_ID },
825 };
826 
827 static const struct mips_perf_event octeon_event_map[PERF_COUNT_HW_MAX] = {
828 	[PERF_COUNT_HW_CPU_CYCLES] = { 0x01, CNTR_ALL },
829 	[PERF_COUNT_HW_INSTRUCTIONS] = { 0x03, CNTR_ALL },
830 	[PERF_COUNT_HW_CACHE_REFERENCES] = { 0x2b, CNTR_ALL },
831 	[PERF_COUNT_HW_CACHE_MISSES] = { 0x2e, CNTR_ALL  },
832 	[PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = { 0x08, CNTR_ALL },
833 	[PERF_COUNT_HW_BRANCH_MISSES] = { 0x09, CNTR_ALL },
834 	[PERF_COUNT_HW_BUS_CYCLES] = { 0x25, CNTR_ALL },
835 };
836 
837 /* 24K/34K/1004K cores can share the same cache event map. */
838 static const struct mips_perf_event mipsxxcore_cache_map
839 				[PERF_COUNT_HW_CACHE_MAX]
840 				[PERF_COUNT_HW_CACHE_OP_MAX]
841 				[PERF_COUNT_HW_CACHE_RESULT_MAX] = {
842 [C(L1D)] = {
843 	/*
844 	 * Like some other architectures (e.g. ARM), the performance
845 	 * counters don't differentiate between read and write
846 	 * accesses/misses, so this isn't strictly correct, but it's the
847 	 * best we can do. Writes and reads get combined.
848 	 */
849 	[C(OP_READ)] = {
850 		[C(RESULT_ACCESS)]	= { 0x0a, CNTR_EVEN, T },
851 		[C(RESULT_MISS)]	= { 0x0b, CNTR_EVEN | CNTR_ODD, T },
852 	},
853 	[C(OP_WRITE)] = {
854 		[C(RESULT_ACCESS)]	= { 0x0a, CNTR_EVEN, T },
855 		[C(RESULT_MISS)]	= { 0x0b, CNTR_EVEN | CNTR_ODD, T },
856 	},
857 	[C(OP_PREFETCH)] = {
858 		[C(RESULT_ACCESS)]	= { UNSUPPORTED_PERF_EVENT_ID },
859 		[C(RESULT_MISS)]	= { UNSUPPORTED_PERF_EVENT_ID },
860 	},
861 },
862 [C(L1I)] = {
863 	[C(OP_READ)] = {
864 		[C(RESULT_ACCESS)]	= { 0x09, CNTR_EVEN, T },
865 		[C(RESULT_MISS)]	= { 0x09, CNTR_ODD, T },
866 	},
867 	[C(OP_WRITE)] = {
868 		[C(RESULT_ACCESS)]	= { 0x09, CNTR_EVEN, T },
869 		[C(RESULT_MISS)]	= { 0x09, CNTR_ODD, T },
870 	},
871 	[C(OP_PREFETCH)] = {
872 		[C(RESULT_ACCESS)]	= { 0x14, CNTR_EVEN, T },
873 		/*
874 		 * Note that MIPS has only "hit" events countable for
875 		 * the prefetch operation.
876 		 */
877 		[C(RESULT_MISS)]	= { UNSUPPORTED_PERF_EVENT_ID },
878 	},
879 },
880 [C(LL)] = {
881 	[C(OP_READ)] = {
882 		[C(RESULT_ACCESS)]	= { 0x15, CNTR_ODD, P },
883 		[C(RESULT_MISS)]	= { 0x16, CNTR_EVEN, P },
884 	},
885 	[C(OP_WRITE)] = {
886 		[C(RESULT_ACCESS)]	= { 0x15, CNTR_ODD, P },
887 		[C(RESULT_MISS)]	= { 0x16, CNTR_EVEN, P },
888 	},
889 	[C(OP_PREFETCH)] = {
890 		[C(RESULT_ACCESS)]	= { UNSUPPORTED_PERF_EVENT_ID },
891 		[C(RESULT_MISS)]	= { UNSUPPORTED_PERF_EVENT_ID },
892 	},
893 },
894 [C(DTLB)] = {
895 	[C(OP_READ)] = {
896 		[C(RESULT_ACCESS)]	= { 0x06, CNTR_EVEN, T },
897 		[C(RESULT_MISS)]	= { 0x06, CNTR_ODD, T },
898 	},
899 	[C(OP_WRITE)] = {
900 		[C(RESULT_ACCESS)]	= { 0x06, CNTR_EVEN, T },
901 		[C(RESULT_MISS)]	= { 0x06, CNTR_ODD, T },
902 	},
903 	[C(OP_PREFETCH)] = {
904 		[C(RESULT_ACCESS)]	= { UNSUPPORTED_PERF_EVENT_ID },
905 		[C(RESULT_MISS)]	= { UNSUPPORTED_PERF_EVENT_ID },
906 	},
907 },
908 [C(ITLB)] = {
909 	[C(OP_READ)] = {
910 		[C(RESULT_ACCESS)]	= { 0x05, CNTR_EVEN, T },
911 		[C(RESULT_MISS)]	= { 0x05, CNTR_ODD, T },
912 	},
913 	[C(OP_WRITE)] = {
914 		[C(RESULT_ACCESS)]	= { 0x05, CNTR_EVEN, T },
915 		[C(RESULT_MISS)]	= { 0x05, CNTR_ODD, T },
916 	},
917 	[C(OP_PREFETCH)] = {
918 		[C(RESULT_ACCESS)]	= { UNSUPPORTED_PERF_EVENT_ID },
919 		[C(RESULT_MISS)]	= { UNSUPPORTED_PERF_EVENT_ID },
920 	},
921 },
922 [C(BPU)] = {
923 	/* Using the same code for *HW_BRANCH* */
924 	[C(OP_READ)] = {
925 		[C(RESULT_ACCESS)]	= { 0x02, CNTR_EVEN, T },
926 		[C(RESULT_MISS)]	= { 0x02, CNTR_ODD, T },
927 	},
928 	[C(OP_WRITE)] = {
929 		[C(RESULT_ACCESS)]	= { 0x02, CNTR_EVEN, T },
930 		[C(RESULT_MISS)]	= { 0x02, CNTR_ODD, T },
931 	},
932 	[C(OP_PREFETCH)] = {
933 		[C(RESULT_ACCESS)]	= { UNSUPPORTED_PERF_EVENT_ID },
934 		[C(RESULT_MISS)]	= { UNSUPPORTED_PERF_EVENT_ID },
935 	},
936 },
937 [C(NODE)] = {
938 	[C(OP_READ)] = {
939 		[C(RESULT_ACCESS)]	= { UNSUPPORTED_PERF_EVENT_ID },
940 		[C(RESULT_MISS)]	= { UNSUPPORTED_PERF_EVENT_ID },
941 	},
942 	[C(OP_WRITE)] = {
943 		[C(RESULT_ACCESS)]	= { UNSUPPORTED_PERF_EVENT_ID },
944 		[C(RESULT_MISS)]	= { UNSUPPORTED_PERF_EVENT_ID },
945 	},
946 	[C(OP_PREFETCH)] = {
947 		[C(RESULT_ACCESS)]	= { UNSUPPORTED_PERF_EVENT_ID },
948 		[C(RESULT_MISS)]	= { UNSUPPORTED_PERF_EVENT_ID },
949 	},
950 },
951 };
952 
953 /* 74K core has completely different cache event map. */
954 static const struct mips_perf_event mipsxx74Kcore_cache_map
955 				[PERF_COUNT_HW_CACHE_MAX]
956 				[PERF_COUNT_HW_CACHE_OP_MAX]
957 				[PERF_COUNT_HW_CACHE_RESULT_MAX] = {
958 [C(L1D)] = {
959 	/*
960 	 * Like some other architectures (e.g. ARM), the performance
961 	 * counters don't differentiate between read and write
962 	 * accesses/misses, so this isn't strictly correct, but it's the
963 	 * best we can do. Writes and reads get combined.
964 	 */
965 	[C(OP_READ)] = {
966 		[C(RESULT_ACCESS)]	= { 0x17, CNTR_ODD, T },
967 		[C(RESULT_MISS)]	= { 0x18, CNTR_ODD, T },
968 	},
969 	[C(OP_WRITE)] = {
970 		[C(RESULT_ACCESS)]	= { 0x17, CNTR_ODD, T },
971 		[C(RESULT_MISS)]	= { 0x18, CNTR_ODD, T },
972 	},
973 	[C(OP_PREFETCH)] = {
974 		[C(RESULT_ACCESS)]	= { UNSUPPORTED_PERF_EVENT_ID },
975 		[C(RESULT_MISS)]	= { UNSUPPORTED_PERF_EVENT_ID },
976 	},
977 },
978 [C(L1I)] = {
979 	[C(OP_READ)] = {
980 		[C(RESULT_ACCESS)]	= { 0x06, CNTR_EVEN, T },
981 		[C(RESULT_MISS)]	= { 0x06, CNTR_ODD, T },
982 	},
983 	[C(OP_WRITE)] = {
984 		[C(RESULT_ACCESS)]	= { 0x06, CNTR_EVEN, T },
985 		[C(RESULT_MISS)]	= { 0x06, CNTR_ODD, T },
986 	},
987 	[C(OP_PREFETCH)] = {
988 		[C(RESULT_ACCESS)]	= { 0x34, CNTR_EVEN, T },
989 		/*
990 		 * Note that MIPS has only "hit" events countable for
991 		 * the prefetch operation.
992 		 */
993 		[C(RESULT_MISS)]	= { UNSUPPORTED_PERF_EVENT_ID },
994 	},
995 },
996 [C(LL)] = {
997 	[C(OP_READ)] = {
998 		[C(RESULT_ACCESS)]	= { 0x1c, CNTR_ODD, P },
999 		[C(RESULT_MISS)]	= { 0x1d, CNTR_EVEN | CNTR_ODD, P },
1000 	},
1001 	[C(OP_WRITE)] = {
1002 		[C(RESULT_ACCESS)]	= { 0x1c, CNTR_ODD, P },
1003 		[C(RESULT_MISS)]	= { 0x1d, CNTR_EVEN | CNTR_ODD, P },
1004 	},
1005 	[C(OP_PREFETCH)] = {
1006 		[C(RESULT_ACCESS)]	= { UNSUPPORTED_PERF_EVENT_ID },
1007 		[C(RESULT_MISS)]	= { UNSUPPORTED_PERF_EVENT_ID },
1008 	},
1009 },
1010 [C(DTLB)] = {
1011 	/* 74K core does not have specific DTLB events. */
1012 	[C(OP_READ)] = {
1013 		[C(RESULT_ACCESS)]	= { UNSUPPORTED_PERF_EVENT_ID },
1014 		[C(RESULT_MISS)]	= { UNSUPPORTED_PERF_EVENT_ID },
1015 	},
1016 	[C(OP_WRITE)] = {
1017 		[C(RESULT_ACCESS)]	= { UNSUPPORTED_PERF_EVENT_ID },
1018 		[C(RESULT_MISS)]	= { UNSUPPORTED_PERF_EVENT_ID },
1019 	},
1020 	[C(OP_PREFETCH)] = {
1021 		[C(RESULT_ACCESS)]	= { UNSUPPORTED_PERF_EVENT_ID },
1022 		[C(RESULT_MISS)]	= { UNSUPPORTED_PERF_EVENT_ID },
1023 	},
1024 },
1025 [C(ITLB)] = {
1026 	[C(OP_READ)] = {
1027 		[C(RESULT_ACCESS)]	= { 0x04, CNTR_EVEN, T },
1028 		[C(RESULT_MISS)]	= { 0x04, CNTR_ODD, T },
1029 	},
1030 	[C(OP_WRITE)] = {
1031 		[C(RESULT_ACCESS)]	= { 0x04, CNTR_EVEN, T },
1032 		[C(RESULT_MISS)]	= { 0x04, CNTR_ODD, T },
1033 	},
1034 	[C(OP_PREFETCH)] = {
1035 		[C(RESULT_ACCESS)]	= { UNSUPPORTED_PERF_EVENT_ID },
1036 		[C(RESULT_MISS)]	= { UNSUPPORTED_PERF_EVENT_ID },
1037 	},
1038 },
1039 [C(BPU)] = {
1040 	/* Using the same code for *HW_BRANCH* */
1041 	[C(OP_READ)] = {
1042 		[C(RESULT_ACCESS)]	= { 0x27, CNTR_EVEN, T },
1043 		[C(RESULT_MISS)]	= { 0x27, CNTR_ODD, T },
1044 	},
1045 	[C(OP_WRITE)] = {
1046 		[C(RESULT_ACCESS)]	= { 0x27, CNTR_EVEN, T },
1047 		[C(RESULT_MISS)]	= { 0x27, CNTR_ODD, T },
1048 	},
1049 	[C(OP_PREFETCH)] = {
1050 		[C(RESULT_ACCESS)]	= { UNSUPPORTED_PERF_EVENT_ID },
1051 		[C(RESULT_MISS)]	= { UNSUPPORTED_PERF_EVENT_ID },
1052 	},
1053 },
1054 [C(NODE)] = {
1055 	[C(OP_READ)] = {
1056 		[C(RESULT_ACCESS)]	= { UNSUPPORTED_PERF_EVENT_ID },
1057 		[C(RESULT_MISS)]	= { UNSUPPORTED_PERF_EVENT_ID },
1058 	},
1059 	[C(OP_WRITE)] = {
1060 		[C(RESULT_ACCESS)]	= { UNSUPPORTED_PERF_EVENT_ID },
1061 		[C(RESULT_MISS)]	= { UNSUPPORTED_PERF_EVENT_ID },
1062 	},
1063 	[C(OP_PREFETCH)] = {
1064 		[C(RESULT_ACCESS)]	= { UNSUPPORTED_PERF_EVENT_ID },
1065 		[C(RESULT_MISS)]	= { UNSUPPORTED_PERF_EVENT_ID },
1066 	},
1067 },
1068 };
1069 
1070 
1071 static const struct mips_perf_event octeon_cache_map
1072 				[PERF_COUNT_HW_CACHE_MAX]
1073 				[PERF_COUNT_HW_CACHE_OP_MAX]
1074 				[PERF_COUNT_HW_CACHE_RESULT_MAX] = {
1075 [C(L1D)] = {
1076 	[C(OP_READ)] = {
1077 		[C(RESULT_ACCESS)]	= { 0x2b, CNTR_ALL },
1078 		[C(RESULT_MISS)]	= { 0x2e, CNTR_ALL },
1079 	},
1080 	[C(OP_WRITE)] = {
1081 		[C(RESULT_ACCESS)]	= { 0x30, CNTR_ALL },
1082 		[C(RESULT_MISS)]	= { UNSUPPORTED_PERF_EVENT_ID },
1083 	},
1084 	[C(OP_PREFETCH)] = {
1085 		[C(RESULT_ACCESS)]	= { UNSUPPORTED_PERF_EVENT_ID },
1086 		[C(RESULT_MISS)]	= { UNSUPPORTED_PERF_EVENT_ID },
1087 	},
1088 },
1089 [C(L1I)] = {
1090 	[C(OP_READ)] = {
1091 		[C(RESULT_ACCESS)]	= { 0x18, CNTR_ALL },
1092 		[C(RESULT_MISS)]	= { UNSUPPORTED_PERF_EVENT_ID },
1093 	},
1094 	[C(OP_WRITE)] = {
1095 		[C(RESULT_ACCESS)]	= { UNSUPPORTED_PERF_EVENT_ID },
1096 		[C(RESULT_MISS)]	= { UNSUPPORTED_PERF_EVENT_ID },
1097 	},
1098 	[C(OP_PREFETCH)] = {
1099 		[C(RESULT_ACCESS)]	= { 0x19, CNTR_ALL },
1100 		[C(RESULT_MISS)]	= { UNSUPPORTED_PERF_EVENT_ID },
1101 	},
1102 },
1103 [C(LL)] = {
1104 	[C(OP_READ)] = {
1105 		[C(RESULT_ACCESS)]	= { UNSUPPORTED_PERF_EVENT_ID },
1106 		[C(RESULT_MISS)]	= { UNSUPPORTED_PERF_EVENT_ID },
1107 	},
1108 	[C(OP_WRITE)] = {
1109 		[C(RESULT_ACCESS)]	= { UNSUPPORTED_PERF_EVENT_ID },
1110 		[C(RESULT_MISS)]	= { UNSUPPORTED_PERF_EVENT_ID },
1111 	},
1112 	[C(OP_PREFETCH)] = {
1113 		[C(RESULT_ACCESS)]	= { UNSUPPORTED_PERF_EVENT_ID },
1114 		[C(RESULT_MISS)]	= { UNSUPPORTED_PERF_EVENT_ID },
1115 	},
1116 },
1117 [C(DTLB)] = {
1118 	/*
1119 	 * Only general DTLB misses are counted use the same event for
1120 	 * read and write.
1121 	 */
1122 	[C(OP_READ)] = {
1123 		[C(RESULT_ACCESS)]	= { UNSUPPORTED_PERF_EVENT_ID },
1124 		[C(RESULT_MISS)]	= { 0x35, CNTR_ALL },
1125 	},
1126 	[C(OP_WRITE)] = {
1127 		[C(RESULT_ACCESS)]	= { UNSUPPORTED_PERF_EVENT_ID },
1128 		[C(RESULT_MISS)]	= { 0x35, CNTR_ALL },
1129 	},
1130 	[C(OP_PREFETCH)] = {
1131 		[C(RESULT_ACCESS)]	= { UNSUPPORTED_PERF_EVENT_ID },
1132 		[C(RESULT_MISS)]	= { UNSUPPORTED_PERF_EVENT_ID },
1133 	},
1134 },
1135 [C(ITLB)] = {
1136 	[C(OP_READ)] = {
1137 		[C(RESULT_ACCESS)]	= { UNSUPPORTED_PERF_EVENT_ID },
1138 		[C(RESULT_MISS)]	= { 0x37, CNTR_ALL },
1139 	},
1140 	[C(OP_WRITE)] = {
1141 		[C(RESULT_ACCESS)]	= { UNSUPPORTED_PERF_EVENT_ID },
1142 		[C(RESULT_MISS)]	= { UNSUPPORTED_PERF_EVENT_ID },
1143 	},
1144 	[C(OP_PREFETCH)] = {
1145 		[C(RESULT_ACCESS)]	= { UNSUPPORTED_PERF_EVENT_ID },
1146 		[C(RESULT_MISS)]	= { UNSUPPORTED_PERF_EVENT_ID },
1147 	},
1148 },
1149 [C(BPU)] = {
1150 	/* Using the same code for *HW_BRANCH* */
1151 	[C(OP_READ)] = {
1152 		[C(RESULT_ACCESS)]	= { UNSUPPORTED_PERF_EVENT_ID },
1153 		[C(RESULT_MISS)]	= { UNSUPPORTED_PERF_EVENT_ID },
1154 	},
1155 	[C(OP_WRITE)] = {
1156 		[C(RESULT_ACCESS)]	= { UNSUPPORTED_PERF_EVENT_ID },
1157 		[C(RESULT_MISS)]	= { UNSUPPORTED_PERF_EVENT_ID },
1158 	},
1159 	[C(OP_PREFETCH)] = {
1160 		[C(RESULT_ACCESS)]	= { UNSUPPORTED_PERF_EVENT_ID },
1161 		[C(RESULT_MISS)]	= { UNSUPPORTED_PERF_EVENT_ID },
1162 	},
1163 },
1164 };
1165 
1166 #ifdef CONFIG_MIPS_MT_SMP
1167 static void check_and_calc_range(struct perf_event *event,
1168 				 const struct mips_perf_event *pev)
1169 {
1170 	struct hw_perf_event *hwc = &event->hw;
1171 
1172 	if (event->cpu >= 0) {
1173 		if (pev->range > V) {
1174 			/*
1175 			 * The user selected an event that is processor
1176 			 * wide, while expecting it to be VPE wide.
1177 			 */
1178 			hwc->config_base |= M_TC_EN_ALL;
1179 		} else {
1180 			/*
1181 			 * FIXME: cpu_data[event->cpu].vpe_id reports 0
1182 			 * for both CPUs.
1183 			 */
1184 			hwc->config_base |= M_PERFCTL_VPEID(event->cpu);
1185 			hwc->config_base |= M_TC_EN_VPE;
1186 		}
1187 	} else
1188 		hwc->config_base |= M_TC_EN_ALL;
1189 }
1190 #else
1191 static void check_and_calc_range(struct perf_event *event,
1192 				 const struct mips_perf_event *pev)
1193 {
1194 }
1195 #endif
1196 
1197 static int __hw_perf_event_init(struct perf_event *event)
1198 {
1199 	struct perf_event_attr *attr = &event->attr;
1200 	struct hw_perf_event *hwc = &event->hw;
1201 	const struct mips_perf_event *pev;
1202 	int err;
1203 
1204 	/* Returning MIPS event descriptor for generic perf event. */
1205 	if (PERF_TYPE_HARDWARE == event->attr.type) {
1206 		if (event->attr.config >= PERF_COUNT_HW_MAX)
1207 			return -EINVAL;
1208 		pev = mipspmu_map_general_event(event->attr.config);
1209 	} else if (PERF_TYPE_HW_CACHE == event->attr.type) {
1210 		pev = mipspmu_map_cache_event(event->attr.config);
1211 	} else if (PERF_TYPE_RAW == event->attr.type) {
1212 		/* We are working on the global raw event. */
1213 		mutex_lock(&raw_event_mutex);
1214 		pev = mipspmu.map_raw_event(event->attr.config);
1215 	} else {
1216 		/* The event type is not (yet) supported. */
1217 		return -EOPNOTSUPP;
1218 	}
1219 
1220 	if (IS_ERR(pev)) {
1221 		if (PERF_TYPE_RAW == event->attr.type)
1222 			mutex_unlock(&raw_event_mutex);
1223 		return PTR_ERR(pev);
1224 	}
1225 
1226 	/*
1227 	 * We allow max flexibility on how each individual counter shared
1228 	 * by the single CPU operates (the mode exclusion and the range).
1229 	 */
1230 	hwc->config_base = M_PERFCTL_INTERRUPT_ENABLE;
1231 
1232 	/* Calculate range bits and validate it. */
1233 	if (num_possible_cpus() > 1)
1234 		check_and_calc_range(event, pev);
1235 
1236 	hwc->event_base = mipspmu_perf_event_encode(pev);
1237 	if (PERF_TYPE_RAW == event->attr.type)
1238 		mutex_unlock(&raw_event_mutex);
1239 
1240 	if (!attr->exclude_user)
1241 		hwc->config_base |= M_PERFCTL_USER;
1242 	if (!attr->exclude_kernel) {
1243 		hwc->config_base |= M_PERFCTL_KERNEL;
1244 		/* MIPS kernel mode: KSU == 00b || EXL == 1 || ERL == 1 */
1245 		hwc->config_base |= M_PERFCTL_EXL;
1246 	}
1247 	if (!attr->exclude_hv)
1248 		hwc->config_base |= M_PERFCTL_SUPERVISOR;
1249 
1250 	hwc->config_base &= M_PERFCTL_CONFIG_MASK;
1251 	/*
1252 	 * The event can belong to another cpu. We do not assign a local
1253 	 * counter for it for now.
1254 	 */
1255 	hwc->idx = -1;
1256 	hwc->config = 0;
1257 
1258 	if (!hwc->sample_period) {
1259 		hwc->sample_period  = mipspmu.max_period;
1260 		hwc->last_period    = hwc->sample_period;
1261 		local64_set(&hwc->period_left, hwc->sample_period);
1262 	}
1263 
1264 	err = 0;
1265 	if (event->group_leader != event)
1266 		err = validate_group(event);
1267 
1268 	event->destroy = hw_perf_event_destroy;
1269 
1270 	if (err)
1271 		event->destroy(event);
1272 
1273 	return err;
1274 }
1275 
1276 static void pause_local_counters(void)
1277 {
1278 	struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
1279 	int ctr = mipspmu.num_counters;
1280 	unsigned long flags;
1281 
1282 	local_irq_save(flags);
1283 	do {
1284 		ctr--;
1285 		cpuc->saved_ctrl[ctr] = mipsxx_pmu_read_control(ctr);
1286 		mipsxx_pmu_write_control(ctr, cpuc->saved_ctrl[ctr] &
1287 					 ~M_PERFCTL_COUNT_EVENT_WHENEVER);
1288 	} while (ctr > 0);
1289 	local_irq_restore(flags);
1290 }
1291 
1292 static void resume_local_counters(void)
1293 {
1294 	struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
1295 	int ctr = mipspmu.num_counters;
1296 
1297 	do {
1298 		ctr--;
1299 		mipsxx_pmu_write_control(ctr, cpuc->saved_ctrl[ctr]);
1300 	} while (ctr > 0);
1301 }
1302 
1303 static int mipsxx_pmu_handle_shared_irq(void)
1304 {
1305 	struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
1306 	struct perf_sample_data data;
1307 	unsigned int counters = mipspmu.num_counters;
1308 	u64 counter;
1309 	int handled = IRQ_NONE;
1310 	struct pt_regs *regs;
1311 
1312 	if (cpu_has_mips_r2 && !(read_c0_cause() & (1 << 26)))
1313 		return handled;
1314 	/*
1315 	 * First we pause the local counters, so that when we are locked
1316 	 * here, the counters are all paused. When it gets locked due to
1317 	 * perf_disable(), the timer interrupt handler will be delayed.
1318 	 *
1319 	 * See also mipsxx_pmu_start().
1320 	 */
1321 	pause_local_counters();
1322 #ifdef CONFIG_MIPS_MT_SMP
1323 	read_lock(&pmuint_rwlock);
1324 #endif
1325 
1326 	regs = get_irq_regs();
1327 
1328 	perf_sample_data_init(&data, 0);
1329 
1330 	switch (counters) {
1331 #define HANDLE_COUNTER(n)						\
1332 	case n + 1:							\
1333 		if (test_bit(n, cpuc->used_mask)) {			\
1334 			counter = mipspmu.read_counter(n);		\
1335 			if (counter & mipspmu.overflow) {		\
1336 				handle_associated_event(cpuc, n, &data, regs); \
1337 				handled = IRQ_HANDLED;			\
1338 			}						\
1339 		}
1340 	HANDLE_COUNTER(3)
1341 	HANDLE_COUNTER(2)
1342 	HANDLE_COUNTER(1)
1343 	HANDLE_COUNTER(0)
1344 	}
1345 
1346 	/*
1347 	 * Do all the work for the pending perf events. We can do this
1348 	 * in here because the performance counter interrupt is a regular
1349 	 * interrupt, not NMI.
1350 	 */
1351 	if (handled == IRQ_HANDLED)
1352 		irq_work_run();
1353 
1354 #ifdef CONFIG_MIPS_MT_SMP
1355 	read_unlock(&pmuint_rwlock);
1356 #endif
1357 	resume_local_counters();
1358 	return handled;
1359 }
1360 
1361 static irqreturn_t mipsxx_pmu_handle_irq(int irq, void *dev)
1362 {
1363 	return mipsxx_pmu_handle_shared_irq();
1364 }
1365 
1366 /* 24K */
1367 #define IS_BOTH_COUNTERS_24K_EVENT(b)					\
1368 	((b) == 0 || (b) == 1 || (b) == 11)
1369 
1370 /* 34K */
1371 #define IS_BOTH_COUNTERS_34K_EVENT(b)					\
1372 	((b) == 0 || (b) == 1 || (b) == 11)
1373 #ifdef CONFIG_MIPS_MT_SMP
1374 #define IS_RANGE_P_34K_EVENT(r, b)					\
1375 	((b) == 0 || (r) == 18 || (b) == 21 || (b) == 22 ||		\
1376 	 (b) == 25 || (b) == 39 || (r) == 44 || (r) == 174 ||		\
1377 	 (r) == 176 || ((b) >= 50 && (b) <= 55) ||			\
1378 	 ((b) >= 64 && (b) <= 67))
1379 #define IS_RANGE_V_34K_EVENT(r)	((r) == 47)
1380 #endif
1381 
1382 /* 74K */
1383 #define IS_BOTH_COUNTERS_74K_EVENT(b)					\
1384 	((b) == 0 || (b) == 1)
1385 
1386 /* 1004K */
1387 #define IS_BOTH_COUNTERS_1004K_EVENT(b)					\
1388 	((b) == 0 || (b) == 1 || (b) == 11)
1389 #ifdef CONFIG_MIPS_MT_SMP
1390 #define IS_RANGE_P_1004K_EVENT(r, b)					\
1391 	((b) == 0 || (r) == 18 || (b) == 21 || (b) == 22 ||		\
1392 	 (b) == 25 || (b) == 36 || (b) == 39 || (r) == 44 ||		\
1393 	 (r) == 174 || (r) == 176 || ((b) >= 50 && (b) <= 59) ||	\
1394 	 (r) == 188 || (b) == 61 || (b) == 62 ||			\
1395 	 ((b) >= 64 && (b) <= 67))
1396 #define IS_RANGE_V_1004K_EVENT(r)	((r) == 47)
1397 #endif
1398 
1399 /*
1400  * User can use 0-255 raw events, where 0-127 for the events of even
1401  * counters, and 128-255 for odd counters. Note that bit 7 is used to
1402  * indicate the parity. So, for example, when user wants to take the
1403  * Event Num of 15 for odd counters (by referring to the user manual),
1404  * then 128 needs to be added to 15 as the input for the event config,
1405  * i.e., 143 (0x8F) to be used.
1406  */
1407 static const struct mips_perf_event *mipsxx_pmu_map_raw_event(u64 config)
1408 {
1409 	unsigned int raw_id = config & 0xff;
1410 	unsigned int base_id = raw_id & 0x7f;
1411 
1412 	raw_event.event_id = base_id;
1413 
1414 	switch (current_cpu_type()) {
1415 	case CPU_24K:
1416 		if (IS_BOTH_COUNTERS_24K_EVENT(base_id))
1417 			raw_event.cntr_mask = CNTR_EVEN | CNTR_ODD;
1418 		else
1419 			raw_event.cntr_mask =
1420 				raw_id > 127 ? CNTR_ODD : CNTR_EVEN;
1421 #ifdef CONFIG_MIPS_MT_SMP
1422 		/*
1423 		 * This is actually doing nothing. Non-multithreading
1424 		 * CPUs will not check and calculate the range.
1425 		 */
1426 		raw_event.range = P;
1427 #endif
1428 		break;
1429 	case CPU_34K:
1430 		if (IS_BOTH_COUNTERS_34K_EVENT(base_id))
1431 			raw_event.cntr_mask = CNTR_EVEN | CNTR_ODD;
1432 		else
1433 			raw_event.cntr_mask =
1434 				raw_id > 127 ? CNTR_ODD : CNTR_EVEN;
1435 #ifdef CONFIG_MIPS_MT_SMP
1436 		if (IS_RANGE_P_34K_EVENT(raw_id, base_id))
1437 			raw_event.range = P;
1438 		else if (unlikely(IS_RANGE_V_34K_EVENT(raw_id)))
1439 			raw_event.range = V;
1440 		else
1441 			raw_event.range = T;
1442 #endif
1443 		break;
1444 	case CPU_74K:
1445 		if (IS_BOTH_COUNTERS_74K_EVENT(base_id))
1446 			raw_event.cntr_mask = CNTR_EVEN | CNTR_ODD;
1447 		else
1448 			raw_event.cntr_mask =
1449 				raw_id > 127 ? CNTR_ODD : CNTR_EVEN;
1450 #ifdef CONFIG_MIPS_MT_SMP
1451 		raw_event.range = P;
1452 #endif
1453 		break;
1454 	case CPU_1004K:
1455 		if (IS_BOTH_COUNTERS_1004K_EVENT(base_id))
1456 			raw_event.cntr_mask = CNTR_EVEN | CNTR_ODD;
1457 		else
1458 			raw_event.cntr_mask =
1459 				raw_id > 127 ? CNTR_ODD : CNTR_EVEN;
1460 #ifdef CONFIG_MIPS_MT_SMP
1461 		if (IS_RANGE_P_1004K_EVENT(raw_id, base_id))
1462 			raw_event.range = P;
1463 		else if (unlikely(IS_RANGE_V_1004K_EVENT(raw_id)))
1464 			raw_event.range = V;
1465 		else
1466 			raw_event.range = T;
1467 #endif
1468 		break;
1469 	}
1470 
1471 	return &raw_event;
1472 }
1473 
1474 static const struct mips_perf_event *octeon_pmu_map_raw_event(u64 config)
1475 {
1476 	unsigned int raw_id = config & 0xff;
1477 	unsigned int base_id = raw_id & 0x7f;
1478 
1479 
1480 	raw_event.cntr_mask = CNTR_ALL;
1481 	raw_event.event_id = base_id;
1482 
1483 	if (current_cpu_type() == CPU_CAVIUM_OCTEON2) {
1484 		if (base_id > 0x42)
1485 			return ERR_PTR(-EOPNOTSUPP);
1486 	} else {
1487 		if (base_id > 0x3a)
1488 			return ERR_PTR(-EOPNOTSUPP);
1489 	}
1490 
1491 	switch (base_id) {
1492 	case 0x00:
1493 	case 0x0f:
1494 	case 0x1e:
1495 	case 0x1f:
1496 	case 0x2f:
1497 	case 0x34:
1498 	case 0x3b ... 0x3f:
1499 		return ERR_PTR(-EOPNOTSUPP);
1500 	default:
1501 		break;
1502 	}
1503 
1504 	return &raw_event;
1505 }
1506 
1507 static int __init
1508 init_hw_perf_events(void)
1509 {
1510 	int counters, irq;
1511 	int counter_bits;
1512 
1513 	pr_info("Performance counters: ");
1514 
1515 	counters = n_counters();
1516 	if (counters == 0) {
1517 		pr_cont("No available PMU.\n");
1518 		return -ENODEV;
1519 	}
1520 
1521 #ifdef CONFIG_MIPS_MT_SMP
1522 	cpu_has_mipsmt_pertccounters = read_c0_config7() & (1<<19);
1523 	if (!cpu_has_mipsmt_pertccounters)
1524 		counters = counters_total_to_per_cpu(counters);
1525 #endif
1526 
1527 #ifdef MSC01E_INT_BASE
1528 	if (cpu_has_veic) {
1529 		/*
1530 		 * Using platform specific interrupt controller defines.
1531 		 */
1532 		irq = MSC01E_INT_BASE + MSC01E_INT_PERFCTR;
1533 	} else {
1534 #endif
1535 		if (cp0_perfcount_irq >= 0)
1536 			irq = MIPS_CPU_IRQ_BASE + cp0_perfcount_irq;
1537 		else
1538 			irq = -1;
1539 #ifdef MSC01E_INT_BASE
1540 	}
1541 #endif
1542 
1543 	mipspmu.map_raw_event = mipsxx_pmu_map_raw_event;
1544 
1545 	switch (current_cpu_type()) {
1546 	case CPU_24K:
1547 		mipspmu.name = "mips/24K";
1548 		mipspmu.general_event_map = &mipsxxcore_event_map;
1549 		mipspmu.cache_event_map = &mipsxxcore_cache_map;
1550 		break;
1551 	case CPU_34K:
1552 		mipspmu.name = "mips/34K";
1553 		mipspmu.general_event_map = &mipsxxcore_event_map;
1554 		mipspmu.cache_event_map = &mipsxxcore_cache_map;
1555 		break;
1556 	case CPU_74K:
1557 		mipspmu.name = "mips/74K";
1558 		mipspmu.general_event_map = &mipsxx74Kcore_event_map;
1559 		mipspmu.cache_event_map = &mipsxx74Kcore_cache_map;
1560 		break;
1561 	case CPU_1004K:
1562 		mipspmu.name = "mips/1004K";
1563 		mipspmu.general_event_map = &mipsxxcore_event_map;
1564 		mipspmu.cache_event_map = &mipsxxcore_cache_map;
1565 		break;
1566 	case CPU_CAVIUM_OCTEON:
1567 	case CPU_CAVIUM_OCTEON_PLUS:
1568 	case CPU_CAVIUM_OCTEON2:
1569 		mipspmu.name = "octeon";
1570 		mipspmu.general_event_map = &octeon_event_map;
1571 		mipspmu.cache_event_map = &octeon_cache_map;
1572 		mipspmu.map_raw_event = octeon_pmu_map_raw_event;
1573 		break;
1574 	default:
1575 		pr_cont("Either hardware does not support performance "
1576 			"counters, or not yet implemented.\n");
1577 		return -ENODEV;
1578 	}
1579 
1580 	mipspmu.num_counters = counters;
1581 	mipspmu.irq = irq;
1582 
1583 	if (read_c0_perfctrl0() & M_PERFCTL_WIDE) {
1584 		mipspmu.max_period = (1ULL << 63) - 1;
1585 		mipspmu.valid_count = (1ULL << 63) - 1;
1586 		mipspmu.overflow = 1ULL << 63;
1587 		mipspmu.read_counter = mipsxx_pmu_read_counter_64;
1588 		mipspmu.write_counter = mipsxx_pmu_write_counter_64;
1589 		counter_bits = 64;
1590 	} else {
1591 		mipspmu.max_period = (1ULL << 31) - 1;
1592 		mipspmu.valid_count = (1ULL << 31) - 1;
1593 		mipspmu.overflow = 1ULL << 31;
1594 		mipspmu.read_counter = mipsxx_pmu_read_counter;
1595 		mipspmu.write_counter = mipsxx_pmu_write_counter;
1596 		counter_bits = 32;
1597 	}
1598 
1599 	on_each_cpu(reset_counters, (void *)(long)counters, 1);
1600 
1601 	pr_cont("%s PMU enabled, %d %d-bit counters available to each "
1602 		"CPU, irq %d%s\n", mipspmu.name, counters, counter_bits, irq,
1603 		irq < 0 ? " (share with timer interrupt)" : "");
1604 
1605 	perf_pmu_register(&pmu, "cpu", PERF_TYPE_RAW);
1606 
1607 	return 0;
1608 }
1609 early_initcall(init_hw_perf_events);
1610