1 /*
2  * Linux performance counter support for MIPS.
3  *
4  * Copyright (C) 2010 MIPS Technologies, Inc.
5  * Copyright (C) 2011 Cavium Networks, Inc.
6  * Author: Deng-Cheng Zhu
7  *
8  * This code is based on the implementation for ARM, which is in turn
9  * based on the sparc64 perf event code and the x86 code. Performance
10  * counter access is based on the MIPS Oprofile code. And the callchain
11  * support references the code of MIPS stacktrace.c.
12  *
13  * This program is free software; you can redistribute it and/or modify
14  * it under the terms of the GNU General Public License version 2 as
15  * published by the Free Software Foundation.
16  */
17 
18 #include <linux/cpumask.h>
19 #include <linux/interrupt.h>
20 #include <linux/smp.h>
21 #include <linux/kernel.h>
22 #include <linux/perf_event.h>
23 #include <linux/uaccess.h>
24 
25 #include <asm/irq.h>
26 #include <asm/irq_regs.h>
27 #include <asm/stacktrace.h>
28 #include <asm/time.h> /* For perf_irq */
29 
30 #define MIPS_MAX_HWEVENTS 4
31 #define MIPS_TCS_PER_COUNTER 2
32 #define MIPS_CPUID_TO_COUNTER_MASK (MIPS_TCS_PER_COUNTER - 1)
33 
34 struct cpu_hw_events {
35 	/* Array of events on this cpu. */
36 	struct perf_event	*events[MIPS_MAX_HWEVENTS];
37 
38 	/*
39 	 * Set the bit (indexed by the counter number) when the counter
40 	 * is used for an event.
41 	 */
42 	unsigned long		used_mask[BITS_TO_LONGS(MIPS_MAX_HWEVENTS)];
43 
44 	/*
45 	 * Software copy of the control register for each performance counter.
46 	 * MIPS CPUs vary in performance counters. They use this differently,
47 	 * and even may not use it.
48 	 */
49 	unsigned int		saved_ctrl[MIPS_MAX_HWEVENTS];
50 };
51 DEFINE_PER_CPU(struct cpu_hw_events, cpu_hw_events) = {
52 	.saved_ctrl = {0},
53 };
54 
55 /* The description of MIPS performance events. */
56 struct mips_perf_event {
57 	unsigned int event_id;
58 	/*
59 	 * MIPS performance counters are indexed starting from 0.
60 	 * CNTR_EVEN indicates the indexes of the counters to be used are
61 	 * even numbers.
62 	 */
63 	unsigned int cntr_mask;
64 	#define CNTR_EVEN	0x55555555
65 	#define CNTR_ODD	0xaaaaaaaa
66 	#define CNTR_ALL	0xffffffff
67 #ifdef CONFIG_MIPS_MT_SMP
68 	enum {
69 		T  = 0,
70 		V  = 1,
71 		P  = 2,
72 	} range;
73 #else
74 	#define T
75 	#define V
76 	#define P
77 #endif
78 };
79 
80 static struct mips_perf_event raw_event;
81 static DEFINE_MUTEX(raw_event_mutex);
82 
83 #define C(x) PERF_COUNT_HW_CACHE_##x
84 
85 struct mips_pmu {
86 	u64		max_period;
87 	u64		valid_count;
88 	u64		overflow;
89 	const char	*name;
90 	int		irq;
91 	u64		(*read_counter)(unsigned int idx);
92 	void		(*write_counter)(unsigned int idx, u64 val);
93 	const struct mips_perf_event *(*map_raw_event)(u64 config);
94 	const struct mips_perf_event (*general_event_map)[PERF_COUNT_HW_MAX];
95 	const struct mips_perf_event (*cache_event_map)
96 				[PERF_COUNT_HW_CACHE_MAX]
97 				[PERF_COUNT_HW_CACHE_OP_MAX]
98 				[PERF_COUNT_HW_CACHE_RESULT_MAX];
99 	unsigned int	num_counters;
100 };
101 
102 static struct mips_pmu mipspmu;
103 
104 #define M_PERFCTL_EXL			(1	<<  0)
105 #define M_PERFCTL_KERNEL		(1	<<  1)
106 #define M_PERFCTL_SUPERVISOR		(1	<<  2)
107 #define M_PERFCTL_USER			(1	<<  3)
108 #define M_PERFCTL_INTERRUPT_ENABLE	(1	<<  4)
109 #define M_PERFCTL_EVENT(event)		(((event) & 0x3ff)  << 5)
110 #define M_PERFCTL_VPEID(vpe)		((vpe)	  << 16)
111 
112 #ifdef CONFIG_CPU_BMIPS5000
113 #define M_PERFCTL_MT_EN(filter)		0
114 #else /* !CONFIG_CPU_BMIPS5000 */
115 #define M_PERFCTL_MT_EN(filter)		((filter) << 20)
116 #endif /* CONFIG_CPU_BMIPS5000 */
117 
118 #define	   M_TC_EN_ALL			M_PERFCTL_MT_EN(0)
119 #define	   M_TC_EN_VPE			M_PERFCTL_MT_EN(1)
120 #define	   M_TC_EN_TC			M_PERFCTL_MT_EN(2)
121 #define M_PERFCTL_TCID(tcid)		((tcid)	  << 22)
122 #define M_PERFCTL_WIDE			(1	<< 30)
123 #define M_PERFCTL_MORE			(1	<< 31)
124 #define M_PERFCTL_TC			(1	<< 30)
125 
126 #define M_PERFCTL_COUNT_EVENT_WHENEVER	(M_PERFCTL_EXL |		\
127 					M_PERFCTL_KERNEL |		\
128 					M_PERFCTL_USER |		\
129 					M_PERFCTL_SUPERVISOR |		\
130 					M_PERFCTL_INTERRUPT_ENABLE)
131 
132 #ifdef CONFIG_MIPS_MT_SMP
133 #define M_PERFCTL_CONFIG_MASK		0x3fff801f
134 #else
135 #define M_PERFCTL_CONFIG_MASK		0x1f
136 #endif
137 #define M_PERFCTL_EVENT_MASK		0xfe0
138 
139 
140 #ifdef CONFIG_MIPS_PERF_SHARED_TC_COUNTERS
141 static int cpu_has_mipsmt_pertccounters;
142 
143 static DEFINE_RWLOCK(pmuint_rwlock);
144 
145 #if defined(CONFIG_CPU_BMIPS5000)
146 #define vpe_id()	(cpu_has_mipsmt_pertccounters ? \
147 			 0 : (smp_processor_id() & MIPS_CPUID_TO_COUNTER_MASK))
148 #else
149 /*
150  * FIXME: For VSMP, vpe_id() is redefined for Perf-events, because
151  * cpu_data[cpuid].vpe_id reports 0 for _both_ CPUs.
152  */
153 #define vpe_id()	(cpu_has_mipsmt_pertccounters ? \
154 			 0 : smp_processor_id())
155 #endif
156 
157 /* Copied from op_model_mipsxx.c */
158 static unsigned int vpe_shift(void)
159 {
160 	if (num_possible_cpus() > 1)
161 		return 1;
162 
163 	return 0;
164 }
165 
166 static unsigned int counters_total_to_per_cpu(unsigned int counters)
167 {
168 	return counters >> vpe_shift();
169 }
170 
171 #else /* !CONFIG_MIPS_PERF_SHARED_TC_COUNTERS */
172 #define vpe_id()	0
173 
174 #endif /* CONFIG_MIPS_PERF_SHARED_TC_COUNTERS */
175 
176 static void resume_local_counters(void);
177 static void pause_local_counters(void);
178 static irqreturn_t mipsxx_pmu_handle_irq(int, void *);
179 static int mipsxx_pmu_handle_shared_irq(void);
180 
181 static unsigned int mipsxx_pmu_swizzle_perf_idx(unsigned int idx)
182 {
183 	if (vpe_id() == 1)
184 		idx = (idx + 2) & 3;
185 	return idx;
186 }
187 
188 static u64 mipsxx_pmu_read_counter(unsigned int idx)
189 {
190 	idx = mipsxx_pmu_swizzle_perf_idx(idx);
191 
192 	switch (idx) {
193 	case 0:
194 		/*
195 		 * The counters are unsigned, we must cast to truncate
196 		 * off the high bits.
197 		 */
198 		return (u32)read_c0_perfcntr0();
199 	case 1:
200 		return (u32)read_c0_perfcntr1();
201 	case 2:
202 		return (u32)read_c0_perfcntr2();
203 	case 3:
204 		return (u32)read_c0_perfcntr3();
205 	default:
206 		WARN_ONCE(1, "Invalid performance counter number (%d)\n", idx);
207 		return 0;
208 	}
209 }
210 
211 static u64 mipsxx_pmu_read_counter_64(unsigned int idx)
212 {
213 	idx = mipsxx_pmu_swizzle_perf_idx(idx);
214 
215 	switch (idx) {
216 	case 0:
217 		return read_c0_perfcntr0_64();
218 	case 1:
219 		return read_c0_perfcntr1_64();
220 	case 2:
221 		return read_c0_perfcntr2_64();
222 	case 3:
223 		return read_c0_perfcntr3_64();
224 	default:
225 		WARN_ONCE(1, "Invalid performance counter number (%d)\n", idx);
226 		return 0;
227 	}
228 }
229 
230 static void mipsxx_pmu_write_counter(unsigned int idx, u64 val)
231 {
232 	idx = mipsxx_pmu_swizzle_perf_idx(idx);
233 
234 	switch (idx) {
235 	case 0:
236 		write_c0_perfcntr0(val);
237 		return;
238 	case 1:
239 		write_c0_perfcntr1(val);
240 		return;
241 	case 2:
242 		write_c0_perfcntr2(val);
243 		return;
244 	case 3:
245 		write_c0_perfcntr3(val);
246 		return;
247 	}
248 }
249 
250 static void mipsxx_pmu_write_counter_64(unsigned int idx, u64 val)
251 {
252 	idx = mipsxx_pmu_swizzle_perf_idx(idx);
253 
254 	switch (idx) {
255 	case 0:
256 		write_c0_perfcntr0_64(val);
257 		return;
258 	case 1:
259 		write_c0_perfcntr1_64(val);
260 		return;
261 	case 2:
262 		write_c0_perfcntr2_64(val);
263 		return;
264 	case 3:
265 		write_c0_perfcntr3_64(val);
266 		return;
267 	}
268 }
269 
270 static unsigned int mipsxx_pmu_read_control(unsigned int idx)
271 {
272 	idx = mipsxx_pmu_swizzle_perf_idx(idx);
273 
274 	switch (idx) {
275 	case 0:
276 		return read_c0_perfctrl0();
277 	case 1:
278 		return read_c0_perfctrl1();
279 	case 2:
280 		return read_c0_perfctrl2();
281 	case 3:
282 		return read_c0_perfctrl3();
283 	default:
284 		WARN_ONCE(1, "Invalid performance counter number (%d)\n", idx);
285 		return 0;
286 	}
287 }
288 
289 static void mipsxx_pmu_write_control(unsigned int idx, unsigned int val)
290 {
291 	idx = mipsxx_pmu_swizzle_perf_idx(idx);
292 
293 	switch (idx) {
294 	case 0:
295 		write_c0_perfctrl0(val);
296 		return;
297 	case 1:
298 		write_c0_perfctrl1(val);
299 		return;
300 	case 2:
301 		write_c0_perfctrl2(val);
302 		return;
303 	case 3:
304 		write_c0_perfctrl3(val);
305 		return;
306 	}
307 }
308 
309 static int mipsxx_pmu_alloc_counter(struct cpu_hw_events *cpuc,
310 				    struct hw_perf_event *hwc)
311 {
312 	int i;
313 
314 	/*
315 	 * We only need to care the counter mask. The range has been
316 	 * checked definitely.
317 	 */
318 	unsigned long cntr_mask = (hwc->event_base >> 8) & 0xffff;
319 
320 	for (i = mipspmu.num_counters - 1; i >= 0; i--) {
321 		/*
322 		 * Note that some MIPS perf events can be counted by both
323 		 * even and odd counters, wheresas many other are only by
324 		 * even _or_ odd counters. This introduces an issue that
325 		 * when the former kind of event takes the counter the
326 		 * latter kind of event wants to use, then the "counter
327 		 * allocation" for the latter event will fail. In fact if
328 		 * they can be dynamically swapped, they both feel happy.
329 		 * But here we leave this issue alone for now.
330 		 */
331 		if (test_bit(i, &cntr_mask) &&
332 			!test_and_set_bit(i, cpuc->used_mask))
333 			return i;
334 	}
335 
336 	return -EAGAIN;
337 }
338 
339 static void mipsxx_pmu_enable_event(struct hw_perf_event *evt, int idx)
340 {
341 	struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
342 
343 	WARN_ON(idx < 0 || idx >= mipspmu.num_counters);
344 
345 	cpuc->saved_ctrl[idx] = M_PERFCTL_EVENT(evt->event_base & 0xff) |
346 		(evt->config_base & M_PERFCTL_CONFIG_MASK) |
347 		/* Make sure interrupt enabled. */
348 		M_PERFCTL_INTERRUPT_ENABLE;
349 	if (IS_ENABLED(CONFIG_CPU_BMIPS5000))
350 		/* enable the counter for the calling thread */
351 		cpuc->saved_ctrl[idx] |=
352 			(1 << (12 + vpe_id())) | M_PERFCTL_TC;
353 
354 	/*
355 	 * We do not actually let the counter run. Leave it until start().
356 	 */
357 }
358 
359 static void mipsxx_pmu_disable_event(int idx)
360 {
361 	struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
362 	unsigned long flags;
363 
364 	WARN_ON(idx < 0 || idx >= mipspmu.num_counters);
365 
366 	local_irq_save(flags);
367 	cpuc->saved_ctrl[idx] = mipsxx_pmu_read_control(idx) &
368 		~M_PERFCTL_COUNT_EVENT_WHENEVER;
369 	mipsxx_pmu_write_control(idx, cpuc->saved_ctrl[idx]);
370 	local_irq_restore(flags);
371 }
372 
373 static int mipspmu_event_set_period(struct perf_event *event,
374 				    struct hw_perf_event *hwc,
375 				    int idx)
376 {
377 	u64 left = local64_read(&hwc->period_left);
378 	u64 period = hwc->sample_period;
379 	int ret = 0;
380 
381 	if (unlikely((left + period) & (1ULL << 63))) {
382 		/* left underflowed by more than period. */
383 		left = period;
384 		local64_set(&hwc->period_left, left);
385 		hwc->last_period = period;
386 		ret = 1;
387 	} else	if (unlikely((left + period) <= period)) {
388 		/* left underflowed by less than period. */
389 		left += period;
390 		local64_set(&hwc->period_left, left);
391 		hwc->last_period = period;
392 		ret = 1;
393 	}
394 
395 	if (left > mipspmu.max_period) {
396 		left = mipspmu.max_period;
397 		local64_set(&hwc->period_left, left);
398 	}
399 
400 	local64_set(&hwc->prev_count, mipspmu.overflow - left);
401 
402 	mipspmu.write_counter(idx, mipspmu.overflow - left);
403 
404 	perf_event_update_userpage(event);
405 
406 	return ret;
407 }
408 
409 static void mipspmu_event_update(struct perf_event *event,
410 				 struct hw_perf_event *hwc,
411 				 int idx)
412 {
413 	u64 prev_raw_count, new_raw_count;
414 	u64 delta;
415 
416 again:
417 	prev_raw_count = local64_read(&hwc->prev_count);
418 	new_raw_count = mipspmu.read_counter(idx);
419 
420 	if (local64_cmpxchg(&hwc->prev_count, prev_raw_count,
421 				new_raw_count) != prev_raw_count)
422 		goto again;
423 
424 	delta = new_raw_count - prev_raw_count;
425 
426 	local64_add(delta, &event->count);
427 	local64_sub(delta, &hwc->period_left);
428 }
429 
430 static void mipspmu_start(struct perf_event *event, int flags)
431 {
432 	struct hw_perf_event *hwc = &event->hw;
433 
434 	if (flags & PERF_EF_RELOAD)
435 		WARN_ON_ONCE(!(hwc->state & PERF_HES_UPTODATE));
436 
437 	hwc->state = 0;
438 
439 	/* Set the period for the event. */
440 	mipspmu_event_set_period(event, hwc, hwc->idx);
441 
442 	/* Enable the event. */
443 	mipsxx_pmu_enable_event(hwc, hwc->idx);
444 }
445 
446 static void mipspmu_stop(struct perf_event *event, int flags)
447 {
448 	struct hw_perf_event *hwc = &event->hw;
449 
450 	if (!(hwc->state & PERF_HES_STOPPED)) {
451 		/* We are working on a local event. */
452 		mipsxx_pmu_disable_event(hwc->idx);
453 		barrier();
454 		mipspmu_event_update(event, hwc, hwc->idx);
455 		hwc->state |= PERF_HES_STOPPED | PERF_HES_UPTODATE;
456 	}
457 }
458 
459 static int mipspmu_add(struct perf_event *event, int flags)
460 {
461 	struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
462 	struct hw_perf_event *hwc = &event->hw;
463 	int idx;
464 	int err = 0;
465 
466 	perf_pmu_disable(event->pmu);
467 
468 	/* To look for a free counter for this event. */
469 	idx = mipsxx_pmu_alloc_counter(cpuc, hwc);
470 	if (idx < 0) {
471 		err = idx;
472 		goto out;
473 	}
474 
475 	/*
476 	 * If there is an event in the counter we are going to use then
477 	 * make sure it is disabled.
478 	 */
479 	event->hw.idx = idx;
480 	mipsxx_pmu_disable_event(idx);
481 	cpuc->events[idx] = event;
482 
483 	hwc->state = PERF_HES_STOPPED | PERF_HES_UPTODATE;
484 	if (flags & PERF_EF_START)
485 		mipspmu_start(event, PERF_EF_RELOAD);
486 
487 	/* Propagate our changes to the userspace mapping. */
488 	perf_event_update_userpage(event);
489 
490 out:
491 	perf_pmu_enable(event->pmu);
492 	return err;
493 }
494 
495 static void mipspmu_del(struct perf_event *event, int flags)
496 {
497 	struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
498 	struct hw_perf_event *hwc = &event->hw;
499 	int idx = hwc->idx;
500 
501 	WARN_ON(idx < 0 || idx >= mipspmu.num_counters);
502 
503 	mipspmu_stop(event, PERF_EF_UPDATE);
504 	cpuc->events[idx] = NULL;
505 	clear_bit(idx, cpuc->used_mask);
506 
507 	perf_event_update_userpage(event);
508 }
509 
510 static void mipspmu_read(struct perf_event *event)
511 {
512 	struct hw_perf_event *hwc = &event->hw;
513 
514 	/* Don't read disabled counters! */
515 	if (hwc->idx < 0)
516 		return;
517 
518 	mipspmu_event_update(event, hwc, hwc->idx);
519 }
520 
521 static void mipspmu_enable(struct pmu *pmu)
522 {
523 #ifdef CONFIG_MIPS_PERF_SHARED_TC_COUNTERS
524 	write_unlock(&pmuint_rwlock);
525 #endif
526 	resume_local_counters();
527 }
528 
529 /*
530  * MIPS performance counters can be per-TC. The control registers can
531  * not be directly accessed across CPUs. Hence if we want to do global
532  * control, we need cross CPU calls. on_each_cpu() can help us, but we
533  * can not make sure this function is called with interrupts enabled. So
534  * here we pause local counters and then grab a rwlock and leave the
535  * counters on other CPUs alone. If any counter interrupt raises while
536  * we own the write lock, simply pause local counters on that CPU and
537  * spin in the handler. Also we know we won't be switched to another
538  * CPU after pausing local counters and before grabbing the lock.
539  */
540 static void mipspmu_disable(struct pmu *pmu)
541 {
542 	pause_local_counters();
543 #ifdef CONFIG_MIPS_PERF_SHARED_TC_COUNTERS
544 	write_lock(&pmuint_rwlock);
545 #endif
546 }
547 
548 static atomic_t active_events = ATOMIC_INIT(0);
549 static DEFINE_MUTEX(pmu_reserve_mutex);
550 static int (*save_perf_irq)(void);
551 
552 static int mipspmu_get_irq(void)
553 {
554 	int err;
555 
556 	if (mipspmu.irq >= 0) {
557 		/* Request my own irq handler. */
558 		err = request_irq(mipspmu.irq, mipsxx_pmu_handle_irq,
559 				  IRQF_PERCPU | IRQF_NOBALANCING |
560 				  IRQF_NO_THREAD | IRQF_NO_SUSPEND |
561 				  IRQF_SHARED,
562 				  "mips_perf_pmu", &mipspmu);
563 		if (err) {
564 			pr_warn("Unable to request IRQ%d for MIPS performance counters!\n",
565 				mipspmu.irq);
566 		}
567 	} else if (cp0_perfcount_irq < 0) {
568 		/*
569 		 * We are sharing the irq number with the timer interrupt.
570 		 */
571 		save_perf_irq = perf_irq;
572 		perf_irq = mipsxx_pmu_handle_shared_irq;
573 		err = 0;
574 	} else {
575 		pr_warn("The platform hasn't properly defined its interrupt controller\n");
576 		err = -ENOENT;
577 	}
578 
579 	return err;
580 }
581 
582 static void mipspmu_free_irq(void)
583 {
584 	if (mipspmu.irq >= 0)
585 		free_irq(mipspmu.irq, &mipspmu);
586 	else if (cp0_perfcount_irq < 0)
587 		perf_irq = save_perf_irq;
588 }
589 
590 /*
591  * mipsxx/rm9000/loongson2 have different performance counters, they have
592  * specific low-level init routines.
593  */
594 static void reset_counters(void *arg);
595 static int __hw_perf_event_init(struct perf_event *event);
596 
597 static void hw_perf_event_destroy(struct perf_event *event)
598 {
599 	if (atomic_dec_and_mutex_lock(&active_events,
600 				&pmu_reserve_mutex)) {
601 		/*
602 		 * We must not call the destroy function with interrupts
603 		 * disabled.
604 		 */
605 		on_each_cpu(reset_counters,
606 			(void *)(long)mipspmu.num_counters, 1);
607 		mipspmu_free_irq();
608 		mutex_unlock(&pmu_reserve_mutex);
609 	}
610 }
611 
612 static int mipspmu_event_init(struct perf_event *event)
613 {
614 	int err = 0;
615 
616 	/* does not support taken branch sampling */
617 	if (has_branch_stack(event))
618 		return -EOPNOTSUPP;
619 
620 	switch (event->attr.type) {
621 	case PERF_TYPE_RAW:
622 	case PERF_TYPE_HARDWARE:
623 	case PERF_TYPE_HW_CACHE:
624 		break;
625 
626 	default:
627 		return -ENOENT;
628 	}
629 
630 	if (event->cpu >= nr_cpumask_bits ||
631 	    (event->cpu >= 0 && !cpu_online(event->cpu)))
632 		return -ENODEV;
633 
634 	if (!atomic_inc_not_zero(&active_events)) {
635 		mutex_lock(&pmu_reserve_mutex);
636 		if (atomic_read(&active_events) == 0)
637 			err = mipspmu_get_irq();
638 
639 		if (!err)
640 			atomic_inc(&active_events);
641 		mutex_unlock(&pmu_reserve_mutex);
642 	}
643 
644 	if (err)
645 		return err;
646 
647 	return __hw_perf_event_init(event);
648 }
649 
650 static struct pmu pmu = {
651 	.pmu_enable	= mipspmu_enable,
652 	.pmu_disable	= mipspmu_disable,
653 	.event_init	= mipspmu_event_init,
654 	.add		= mipspmu_add,
655 	.del		= mipspmu_del,
656 	.start		= mipspmu_start,
657 	.stop		= mipspmu_stop,
658 	.read		= mipspmu_read,
659 };
660 
661 static unsigned int mipspmu_perf_event_encode(const struct mips_perf_event *pev)
662 {
663 /*
664  * Top 8 bits for range, next 16 bits for cntr_mask, lowest 8 bits for
665  * event_id.
666  */
667 #ifdef CONFIG_MIPS_MT_SMP
668 	return ((unsigned int)pev->range << 24) |
669 		(pev->cntr_mask & 0xffff00) |
670 		(pev->event_id & 0xff);
671 #else
672 	return (pev->cntr_mask & 0xffff00) |
673 		(pev->event_id & 0xff);
674 #endif
675 }
676 
677 static const struct mips_perf_event *mipspmu_map_general_event(int idx)
678 {
679 
680 	if ((*mipspmu.general_event_map)[idx].cntr_mask == 0)
681 		return ERR_PTR(-EOPNOTSUPP);
682 	return &(*mipspmu.general_event_map)[idx];
683 }
684 
685 static const struct mips_perf_event *mipspmu_map_cache_event(u64 config)
686 {
687 	unsigned int cache_type, cache_op, cache_result;
688 	const struct mips_perf_event *pev;
689 
690 	cache_type = (config >> 0) & 0xff;
691 	if (cache_type >= PERF_COUNT_HW_CACHE_MAX)
692 		return ERR_PTR(-EINVAL);
693 
694 	cache_op = (config >> 8) & 0xff;
695 	if (cache_op >= PERF_COUNT_HW_CACHE_OP_MAX)
696 		return ERR_PTR(-EINVAL);
697 
698 	cache_result = (config >> 16) & 0xff;
699 	if (cache_result >= PERF_COUNT_HW_CACHE_RESULT_MAX)
700 		return ERR_PTR(-EINVAL);
701 
702 	pev = &((*mipspmu.cache_event_map)
703 					[cache_type]
704 					[cache_op]
705 					[cache_result]);
706 
707 	if (pev->cntr_mask == 0)
708 		return ERR_PTR(-EOPNOTSUPP);
709 
710 	return pev;
711 
712 }
713 
714 static int validate_group(struct perf_event *event)
715 {
716 	struct perf_event *sibling, *leader = event->group_leader;
717 	struct cpu_hw_events fake_cpuc;
718 
719 	memset(&fake_cpuc, 0, sizeof(fake_cpuc));
720 
721 	if (mipsxx_pmu_alloc_counter(&fake_cpuc, &leader->hw) < 0)
722 		return -EINVAL;
723 
724 	list_for_each_entry(sibling, &leader->sibling_list, group_entry) {
725 		if (mipsxx_pmu_alloc_counter(&fake_cpuc, &sibling->hw) < 0)
726 			return -EINVAL;
727 	}
728 
729 	if (mipsxx_pmu_alloc_counter(&fake_cpuc, &event->hw) < 0)
730 		return -EINVAL;
731 
732 	return 0;
733 }
734 
735 /* This is needed by specific irq handlers in perf_event_*.c */
736 static void handle_associated_event(struct cpu_hw_events *cpuc,
737 				    int idx, struct perf_sample_data *data,
738 				    struct pt_regs *regs)
739 {
740 	struct perf_event *event = cpuc->events[idx];
741 	struct hw_perf_event *hwc = &event->hw;
742 
743 	mipspmu_event_update(event, hwc, idx);
744 	data->period = event->hw.last_period;
745 	if (!mipspmu_event_set_period(event, hwc, idx))
746 		return;
747 
748 	if (perf_event_overflow(event, data, regs))
749 		mipsxx_pmu_disable_event(idx);
750 }
751 
752 
753 static int __n_counters(void)
754 {
755 	if (!cpu_has_perf)
756 		return 0;
757 	if (!(read_c0_perfctrl0() & M_PERFCTL_MORE))
758 		return 1;
759 	if (!(read_c0_perfctrl1() & M_PERFCTL_MORE))
760 		return 2;
761 	if (!(read_c0_perfctrl2() & M_PERFCTL_MORE))
762 		return 3;
763 
764 	return 4;
765 }
766 
767 static int n_counters(void)
768 {
769 	int counters;
770 
771 	switch (current_cpu_type()) {
772 	case CPU_R10000:
773 		counters = 2;
774 		break;
775 
776 	case CPU_R12000:
777 	case CPU_R14000:
778 	case CPU_R16000:
779 		counters = 4;
780 		break;
781 
782 	default:
783 		counters = __n_counters();
784 	}
785 
786 	return counters;
787 }
788 
789 static void reset_counters(void *arg)
790 {
791 	int counters = (int)(long)arg;
792 	switch (counters) {
793 	case 4:
794 		mipsxx_pmu_write_control(3, 0);
795 		mipspmu.write_counter(3, 0);
796 	case 3:
797 		mipsxx_pmu_write_control(2, 0);
798 		mipspmu.write_counter(2, 0);
799 	case 2:
800 		mipsxx_pmu_write_control(1, 0);
801 		mipspmu.write_counter(1, 0);
802 	case 1:
803 		mipsxx_pmu_write_control(0, 0);
804 		mipspmu.write_counter(0, 0);
805 	}
806 }
807 
808 /* 24K/34K/1004K/interAptiv/loongson1 cores share the same event map. */
809 static const struct mips_perf_event mipsxxcore_event_map
810 				[PERF_COUNT_HW_MAX] = {
811 	[PERF_COUNT_HW_CPU_CYCLES] = { 0x00, CNTR_EVEN | CNTR_ODD, P },
812 	[PERF_COUNT_HW_INSTRUCTIONS] = { 0x01, CNTR_EVEN | CNTR_ODD, T },
813 	[PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = { 0x02, CNTR_EVEN, T },
814 	[PERF_COUNT_HW_BRANCH_MISSES] = { 0x02, CNTR_ODD, T },
815 };
816 
817 /* 74K/proAptiv core has different branch event code. */
818 static const struct mips_perf_event mipsxxcore_event_map2
819 				[PERF_COUNT_HW_MAX] = {
820 	[PERF_COUNT_HW_CPU_CYCLES] = { 0x00, CNTR_EVEN | CNTR_ODD, P },
821 	[PERF_COUNT_HW_INSTRUCTIONS] = { 0x01, CNTR_EVEN | CNTR_ODD, T },
822 	[PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = { 0x27, CNTR_EVEN, T },
823 	[PERF_COUNT_HW_BRANCH_MISSES] = { 0x27, CNTR_ODD, T },
824 };
825 
826 static const struct mips_perf_event i6400_event_map[PERF_COUNT_HW_MAX] = {
827 	[PERF_COUNT_HW_CPU_CYCLES]          = { 0x00, CNTR_EVEN | CNTR_ODD },
828 	[PERF_COUNT_HW_INSTRUCTIONS]        = { 0x01, CNTR_EVEN | CNTR_ODD },
829 	/* These only count dcache, not icache */
830 	[PERF_COUNT_HW_CACHE_REFERENCES]    = { 0x45, CNTR_EVEN | CNTR_ODD },
831 	[PERF_COUNT_HW_CACHE_MISSES]        = { 0x48, CNTR_EVEN | CNTR_ODD },
832 	[PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = { 0x15, CNTR_EVEN | CNTR_ODD },
833 	[PERF_COUNT_HW_BRANCH_MISSES]       = { 0x16, CNTR_EVEN | CNTR_ODD },
834 };
835 
836 static const struct mips_perf_event loongson3_event_map[PERF_COUNT_HW_MAX] = {
837 	[PERF_COUNT_HW_CPU_CYCLES] = { 0x00, CNTR_EVEN },
838 	[PERF_COUNT_HW_INSTRUCTIONS] = { 0x00, CNTR_ODD },
839 	[PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = { 0x01, CNTR_EVEN },
840 	[PERF_COUNT_HW_BRANCH_MISSES] = { 0x01, CNTR_ODD },
841 };
842 
843 static const struct mips_perf_event octeon_event_map[PERF_COUNT_HW_MAX] = {
844 	[PERF_COUNT_HW_CPU_CYCLES] = { 0x01, CNTR_ALL },
845 	[PERF_COUNT_HW_INSTRUCTIONS] = { 0x03, CNTR_ALL },
846 	[PERF_COUNT_HW_CACHE_REFERENCES] = { 0x2b, CNTR_ALL },
847 	[PERF_COUNT_HW_CACHE_MISSES] = { 0x2e, CNTR_ALL	 },
848 	[PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = { 0x08, CNTR_ALL },
849 	[PERF_COUNT_HW_BRANCH_MISSES] = { 0x09, CNTR_ALL },
850 	[PERF_COUNT_HW_BUS_CYCLES] = { 0x25, CNTR_ALL },
851 };
852 
853 static const struct mips_perf_event bmips5000_event_map
854 				[PERF_COUNT_HW_MAX] = {
855 	[PERF_COUNT_HW_CPU_CYCLES] = { 0x00, CNTR_EVEN | CNTR_ODD, T },
856 	[PERF_COUNT_HW_INSTRUCTIONS] = { 0x01, CNTR_EVEN | CNTR_ODD, T },
857 	[PERF_COUNT_HW_BRANCH_MISSES] = { 0x02, CNTR_ODD, T },
858 };
859 
860 static const struct mips_perf_event xlp_event_map[PERF_COUNT_HW_MAX] = {
861 	[PERF_COUNT_HW_CPU_CYCLES] = { 0x01, CNTR_ALL },
862 	[PERF_COUNT_HW_INSTRUCTIONS] = { 0x18, CNTR_ALL }, /* PAPI_TOT_INS */
863 	[PERF_COUNT_HW_CACHE_REFERENCES] = { 0x04, CNTR_ALL }, /* PAPI_L1_ICA */
864 	[PERF_COUNT_HW_CACHE_MISSES] = { 0x07, CNTR_ALL }, /* PAPI_L1_ICM */
865 	[PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = { 0x1b, CNTR_ALL }, /* PAPI_BR_CN */
866 	[PERF_COUNT_HW_BRANCH_MISSES] = { 0x1c, CNTR_ALL }, /* PAPI_BR_MSP */
867 };
868 
869 /* 24K/34K/1004K/interAptiv/loongson1 cores share the same cache event map. */
870 static const struct mips_perf_event mipsxxcore_cache_map
871 				[PERF_COUNT_HW_CACHE_MAX]
872 				[PERF_COUNT_HW_CACHE_OP_MAX]
873 				[PERF_COUNT_HW_CACHE_RESULT_MAX] = {
874 [C(L1D)] = {
875 	/*
876 	 * Like some other architectures (e.g. ARM), the performance
877 	 * counters don't differentiate between read and write
878 	 * accesses/misses, so this isn't strictly correct, but it's the
879 	 * best we can do. Writes and reads get combined.
880 	 */
881 	[C(OP_READ)] = {
882 		[C(RESULT_ACCESS)]	= { 0x0a, CNTR_EVEN, T },
883 		[C(RESULT_MISS)]	= { 0x0b, CNTR_EVEN | CNTR_ODD, T },
884 	},
885 	[C(OP_WRITE)] = {
886 		[C(RESULT_ACCESS)]	= { 0x0a, CNTR_EVEN, T },
887 		[C(RESULT_MISS)]	= { 0x0b, CNTR_EVEN | CNTR_ODD, T },
888 	},
889 },
890 [C(L1I)] = {
891 	[C(OP_READ)] = {
892 		[C(RESULT_ACCESS)]	= { 0x09, CNTR_EVEN, T },
893 		[C(RESULT_MISS)]	= { 0x09, CNTR_ODD, T },
894 	},
895 	[C(OP_WRITE)] = {
896 		[C(RESULT_ACCESS)]	= { 0x09, CNTR_EVEN, T },
897 		[C(RESULT_MISS)]	= { 0x09, CNTR_ODD, T },
898 	},
899 	[C(OP_PREFETCH)] = {
900 		[C(RESULT_ACCESS)]	= { 0x14, CNTR_EVEN, T },
901 		/*
902 		 * Note that MIPS has only "hit" events countable for
903 		 * the prefetch operation.
904 		 */
905 	},
906 },
907 [C(LL)] = {
908 	[C(OP_READ)] = {
909 		[C(RESULT_ACCESS)]	= { 0x15, CNTR_ODD, P },
910 		[C(RESULT_MISS)]	= { 0x16, CNTR_EVEN, P },
911 	},
912 	[C(OP_WRITE)] = {
913 		[C(RESULT_ACCESS)]	= { 0x15, CNTR_ODD, P },
914 		[C(RESULT_MISS)]	= { 0x16, CNTR_EVEN, P },
915 	},
916 },
917 [C(DTLB)] = {
918 	[C(OP_READ)] = {
919 		[C(RESULT_ACCESS)]	= { 0x06, CNTR_EVEN, T },
920 		[C(RESULT_MISS)]	= { 0x06, CNTR_ODD, T },
921 	},
922 	[C(OP_WRITE)] = {
923 		[C(RESULT_ACCESS)]	= { 0x06, CNTR_EVEN, T },
924 		[C(RESULT_MISS)]	= { 0x06, CNTR_ODD, T },
925 	},
926 },
927 [C(ITLB)] = {
928 	[C(OP_READ)] = {
929 		[C(RESULT_ACCESS)]	= { 0x05, CNTR_EVEN, T },
930 		[C(RESULT_MISS)]	= { 0x05, CNTR_ODD, T },
931 	},
932 	[C(OP_WRITE)] = {
933 		[C(RESULT_ACCESS)]	= { 0x05, CNTR_EVEN, T },
934 		[C(RESULT_MISS)]	= { 0x05, CNTR_ODD, T },
935 	},
936 },
937 [C(BPU)] = {
938 	/* Using the same code for *HW_BRANCH* */
939 	[C(OP_READ)] = {
940 		[C(RESULT_ACCESS)]	= { 0x02, CNTR_EVEN, T },
941 		[C(RESULT_MISS)]	= { 0x02, CNTR_ODD, T },
942 	},
943 	[C(OP_WRITE)] = {
944 		[C(RESULT_ACCESS)]	= { 0x02, CNTR_EVEN, T },
945 		[C(RESULT_MISS)]	= { 0x02, CNTR_ODD, T },
946 	},
947 },
948 };
949 
950 /* 74K/proAptiv core has completely different cache event map. */
951 static const struct mips_perf_event mipsxxcore_cache_map2
952 				[PERF_COUNT_HW_CACHE_MAX]
953 				[PERF_COUNT_HW_CACHE_OP_MAX]
954 				[PERF_COUNT_HW_CACHE_RESULT_MAX] = {
955 [C(L1D)] = {
956 	/*
957 	 * Like some other architectures (e.g. ARM), the performance
958 	 * counters don't differentiate between read and write
959 	 * accesses/misses, so this isn't strictly correct, but it's the
960 	 * best we can do. Writes and reads get combined.
961 	 */
962 	[C(OP_READ)] = {
963 		[C(RESULT_ACCESS)]	= { 0x17, CNTR_ODD, T },
964 		[C(RESULT_MISS)]	= { 0x18, CNTR_ODD, T },
965 	},
966 	[C(OP_WRITE)] = {
967 		[C(RESULT_ACCESS)]	= { 0x17, CNTR_ODD, T },
968 		[C(RESULT_MISS)]	= { 0x18, CNTR_ODD, T },
969 	},
970 },
971 [C(L1I)] = {
972 	[C(OP_READ)] = {
973 		[C(RESULT_ACCESS)]	= { 0x06, CNTR_EVEN, T },
974 		[C(RESULT_MISS)]	= { 0x06, CNTR_ODD, T },
975 	},
976 	[C(OP_WRITE)] = {
977 		[C(RESULT_ACCESS)]	= { 0x06, CNTR_EVEN, T },
978 		[C(RESULT_MISS)]	= { 0x06, CNTR_ODD, T },
979 	},
980 	[C(OP_PREFETCH)] = {
981 		[C(RESULT_ACCESS)]	= { 0x34, CNTR_EVEN, T },
982 		/*
983 		 * Note that MIPS has only "hit" events countable for
984 		 * the prefetch operation.
985 		 */
986 	},
987 },
988 [C(LL)] = {
989 	[C(OP_READ)] = {
990 		[C(RESULT_ACCESS)]	= { 0x1c, CNTR_ODD, P },
991 		[C(RESULT_MISS)]	= { 0x1d, CNTR_EVEN, P },
992 	},
993 	[C(OP_WRITE)] = {
994 		[C(RESULT_ACCESS)]	= { 0x1c, CNTR_ODD, P },
995 		[C(RESULT_MISS)]	= { 0x1d, CNTR_EVEN, P },
996 	},
997 },
998 /*
999  * 74K core does not have specific DTLB events. proAptiv core has
1000  * "speculative" DTLB events which are numbered 0x63 (even/odd) and
1001  * not included here. One can use raw events if really needed.
1002  */
1003 [C(ITLB)] = {
1004 	[C(OP_READ)] = {
1005 		[C(RESULT_ACCESS)]	= { 0x04, CNTR_EVEN, T },
1006 		[C(RESULT_MISS)]	= { 0x04, CNTR_ODD, T },
1007 	},
1008 	[C(OP_WRITE)] = {
1009 		[C(RESULT_ACCESS)]	= { 0x04, CNTR_EVEN, T },
1010 		[C(RESULT_MISS)]	= { 0x04, CNTR_ODD, T },
1011 	},
1012 },
1013 [C(BPU)] = {
1014 	/* Using the same code for *HW_BRANCH* */
1015 	[C(OP_READ)] = {
1016 		[C(RESULT_ACCESS)]	= { 0x27, CNTR_EVEN, T },
1017 		[C(RESULT_MISS)]	= { 0x27, CNTR_ODD, T },
1018 	},
1019 	[C(OP_WRITE)] = {
1020 		[C(RESULT_ACCESS)]	= { 0x27, CNTR_EVEN, T },
1021 		[C(RESULT_MISS)]	= { 0x27, CNTR_ODD, T },
1022 	},
1023 },
1024 };
1025 
1026 static const struct mips_perf_event i6400_cache_map
1027 				[PERF_COUNT_HW_CACHE_MAX]
1028 				[PERF_COUNT_HW_CACHE_OP_MAX]
1029 				[PERF_COUNT_HW_CACHE_RESULT_MAX] = {
1030 [C(L1D)] = {
1031 	[C(OP_READ)] = {
1032 		[C(RESULT_ACCESS)]	= { 0x46, CNTR_EVEN | CNTR_ODD },
1033 		[C(RESULT_MISS)]	= { 0x49, CNTR_EVEN | CNTR_ODD },
1034 	},
1035 	[C(OP_WRITE)] = {
1036 		[C(RESULT_ACCESS)]	= { 0x47, CNTR_EVEN | CNTR_ODD },
1037 		[C(RESULT_MISS)]	= { 0x4a, CNTR_EVEN | CNTR_ODD },
1038 	},
1039 },
1040 [C(L1I)] = {
1041 	[C(OP_READ)] = {
1042 		[C(RESULT_ACCESS)]	= { 0x84, CNTR_EVEN | CNTR_ODD },
1043 		[C(RESULT_MISS)]	= { 0x85, CNTR_EVEN | CNTR_ODD },
1044 	},
1045 },
1046 [C(DTLB)] = {
1047 	/* Can't distinguish read & write */
1048 	[C(OP_READ)] = {
1049 		[C(RESULT_ACCESS)]	= { 0x40, CNTR_EVEN | CNTR_ODD },
1050 		[C(RESULT_MISS)]	= { 0x41, CNTR_EVEN | CNTR_ODD },
1051 	},
1052 	[C(OP_WRITE)] = {
1053 		[C(RESULT_ACCESS)]	= { 0x40, CNTR_EVEN | CNTR_ODD },
1054 		[C(RESULT_MISS)]	= { 0x41, CNTR_EVEN | CNTR_ODD },
1055 	},
1056 },
1057 [C(BPU)] = {
1058 	/* Conditional branches / mispredicted */
1059 	[C(OP_READ)] = {
1060 		[C(RESULT_ACCESS)]	= { 0x15, CNTR_EVEN | CNTR_ODD },
1061 		[C(RESULT_MISS)]	= { 0x16, CNTR_EVEN | CNTR_ODD },
1062 	},
1063 },
1064 };
1065 
1066 static const struct mips_perf_event loongson3_cache_map
1067 				[PERF_COUNT_HW_CACHE_MAX]
1068 				[PERF_COUNT_HW_CACHE_OP_MAX]
1069 				[PERF_COUNT_HW_CACHE_RESULT_MAX] = {
1070 [C(L1D)] = {
1071 	/*
1072 	 * Like some other architectures (e.g. ARM), the performance
1073 	 * counters don't differentiate between read and write
1074 	 * accesses/misses, so this isn't strictly correct, but it's the
1075 	 * best we can do. Writes and reads get combined.
1076 	 */
1077 	[C(OP_READ)] = {
1078 		[C(RESULT_MISS)]        = { 0x04, CNTR_ODD },
1079 	},
1080 	[C(OP_WRITE)] = {
1081 		[C(RESULT_MISS)]        = { 0x04, CNTR_ODD },
1082 	},
1083 },
1084 [C(L1I)] = {
1085 	[C(OP_READ)] = {
1086 		[C(RESULT_MISS)]        = { 0x04, CNTR_EVEN },
1087 	},
1088 	[C(OP_WRITE)] = {
1089 		[C(RESULT_MISS)]        = { 0x04, CNTR_EVEN },
1090 	},
1091 },
1092 [C(DTLB)] = {
1093 	[C(OP_READ)] = {
1094 		[C(RESULT_MISS)]        = { 0x09, CNTR_ODD },
1095 	},
1096 	[C(OP_WRITE)] = {
1097 		[C(RESULT_MISS)]        = { 0x09, CNTR_ODD },
1098 	},
1099 },
1100 [C(ITLB)] = {
1101 	[C(OP_READ)] = {
1102 		[C(RESULT_MISS)]        = { 0x0c, CNTR_ODD },
1103 	},
1104 	[C(OP_WRITE)] = {
1105 		[C(RESULT_MISS)]        = { 0x0c, CNTR_ODD },
1106 	},
1107 },
1108 [C(BPU)] = {
1109 	/* Using the same code for *HW_BRANCH* */
1110 	[C(OP_READ)] = {
1111 		[C(RESULT_ACCESS)]      = { 0x02, CNTR_EVEN },
1112 		[C(RESULT_MISS)]        = { 0x02, CNTR_ODD },
1113 	},
1114 	[C(OP_WRITE)] = {
1115 		[C(RESULT_ACCESS)]      = { 0x02, CNTR_EVEN },
1116 		[C(RESULT_MISS)]        = { 0x02, CNTR_ODD },
1117 	},
1118 },
1119 };
1120 
1121 /* BMIPS5000 */
1122 static const struct mips_perf_event bmips5000_cache_map
1123 				[PERF_COUNT_HW_CACHE_MAX]
1124 				[PERF_COUNT_HW_CACHE_OP_MAX]
1125 				[PERF_COUNT_HW_CACHE_RESULT_MAX] = {
1126 [C(L1D)] = {
1127 	/*
1128 	 * Like some other architectures (e.g. ARM), the performance
1129 	 * counters don't differentiate between read and write
1130 	 * accesses/misses, so this isn't strictly correct, but it's the
1131 	 * best we can do. Writes and reads get combined.
1132 	 */
1133 	[C(OP_READ)] = {
1134 		[C(RESULT_ACCESS)]	= { 12, CNTR_EVEN, T },
1135 		[C(RESULT_MISS)]	= { 12, CNTR_ODD, T },
1136 	},
1137 	[C(OP_WRITE)] = {
1138 		[C(RESULT_ACCESS)]	= { 12, CNTR_EVEN, T },
1139 		[C(RESULT_MISS)]	= { 12, CNTR_ODD, T },
1140 	},
1141 },
1142 [C(L1I)] = {
1143 	[C(OP_READ)] = {
1144 		[C(RESULT_ACCESS)]	= { 10, CNTR_EVEN, T },
1145 		[C(RESULT_MISS)]	= { 10, CNTR_ODD, T },
1146 	},
1147 	[C(OP_WRITE)] = {
1148 		[C(RESULT_ACCESS)]	= { 10, CNTR_EVEN, T },
1149 		[C(RESULT_MISS)]	= { 10, CNTR_ODD, T },
1150 	},
1151 	[C(OP_PREFETCH)] = {
1152 		[C(RESULT_ACCESS)]	= { 23, CNTR_EVEN, T },
1153 		/*
1154 		 * Note that MIPS has only "hit" events countable for
1155 		 * the prefetch operation.
1156 		 */
1157 	},
1158 },
1159 [C(LL)] = {
1160 	[C(OP_READ)] = {
1161 		[C(RESULT_ACCESS)]	= { 28, CNTR_EVEN, P },
1162 		[C(RESULT_MISS)]	= { 28, CNTR_ODD, P },
1163 	},
1164 	[C(OP_WRITE)] = {
1165 		[C(RESULT_ACCESS)]	= { 28, CNTR_EVEN, P },
1166 		[C(RESULT_MISS)]	= { 28, CNTR_ODD, P },
1167 	},
1168 },
1169 [C(BPU)] = {
1170 	/* Using the same code for *HW_BRANCH* */
1171 	[C(OP_READ)] = {
1172 		[C(RESULT_MISS)]	= { 0x02, CNTR_ODD, T },
1173 	},
1174 	[C(OP_WRITE)] = {
1175 		[C(RESULT_MISS)]	= { 0x02, CNTR_ODD, T },
1176 	},
1177 },
1178 };
1179 
1180 
1181 static const struct mips_perf_event octeon_cache_map
1182 				[PERF_COUNT_HW_CACHE_MAX]
1183 				[PERF_COUNT_HW_CACHE_OP_MAX]
1184 				[PERF_COUNT_HW_CACHE_RESULT_MAX] = {
1185 [C(L1D)] = {
1186 	[C(OP_READ)] = {
1187 		[C(RESULT_ACCESS)]	= { 0x2b, CNTR_ALL },
1188 		[C(RESULT_MISS)]	= { 0x2e, CNTR_ALL },
1189 	},
1190 	[C(OP_WRITE)] = {
1191 		[C(RESULT_ACCESS)]	= { 0x30, CNTR_ALL },
1192 	},
1193 },
1194 [C(L1I)] = {
1195 	[C(OP_READ)] = {
1196 		[C(RESULT_ACCESS)]	= { 0x18, CNTR_ALL },
1197 	},
1198 	[C(OP_PREFETCH)] = {
1199 		[C(RESULT_ACCESS)]	= { 0x19, CNTR_ALL },
1200 	},
1201 },
1202 [C(DTLB)] = {
1203 	/*
1204 	 * Only general DTLB misses are counted use the same event for
1205 	 * read and write.
1206 	 */
1207 	[C(OP_READ)] = {
1208 		[C(RESULT_MISS)]	= { 0x35, CNTR_ALL },
1209 	},
1210 	[C(OP_WRITE)] = {
1211 		[C(RESULT_MISS)]	= { 0x35, CNTR_ALL },
1212 	},
1213 },
1214 [C(ITLB)] = {
1215 	[C(OP_READ)] = {
1216 		[C(RESULT_MISS)]	= { 0x37, CNTR_ALL },
1217 	},
1218 },
1219 };
1220 
1221 static const struct mips_perf_event xlp_cache_map
1222 				[PERF_COUNT_HW_CACHE_MAX]
1223 				[PERF_COUNT_HW_CACHE_OP_MAX]
1224 				[PERF_COUNT_HW_CACHE_RESULT_MAX] = {
1225 [C(L1D)] = {
1226 	[C(OP_READ)] = {
1227 		[C(RESULT_ACCESS)]	= { 0x31, CNTR_ALL }, /* PAPI_L1_DCR */
1228 		[C(RESULT_MISS)]	= { 0x30, CNTR_ALL }, /* PAPI_L1_LDM */
1229 	},
1230 	[C(OP_WRITE)] = {
1231 		[C(RESULT_ACCESS)]	= { 0x2f, CNTR_ALL }, /* PAPI_L1_DCW */
1232 		[C(RESULT_MISS)]	= { 0x2e, CNTR_ALL }, /* PAPI_L1_STM */
1233 	},
1234 },
1235 [C(L1I)] = {
1236 	[C(OP_READ)] = {
1237 		[C(RESULT_ACCESS)]	= { 0x04, CNTR_ALL }, /* PAPI_L1_ICA */
1238 		[C(RESULT_MISS)]	= { 0x07, CNTR_ALL }, /* PAPI_L1_ICM */
1239 	},
1240 },
1241 [C(LL)] = {
1242 	[C(OP_READ)] = {
1243 		[C(RESULT_ACCESS)]	= { 0x35, CNTR_ALL }, /* PAPI_L2_DCR */
1244 		[C(RESULT_MISS)]	= { 0x37, CNTR_ALL }, /* PAPI_L2_LDM */
1245 	},
1246 	[C(OP_WRITE)] = {
1247 		[C(RESULT_ACCESS)]	= { 0x34, CNTR_ALL }, /* PAPI_L2_DCA */
1248 		[C(RESULT_MISS)]	= { 0x36, CNTR_ALL }, /* PAPI_L2_DCM */
1249 	},
1250 },
1251 [C(DTLB)] = {
1252 	/*
1253 	 * Only general DTLB misses are counted use the same event for
1254 	 * read and write.
1255 	 */
1256 	[C(OP_READ)] = {
1257 		[C(RESULT_MISS)]	= { 0x2d, CNTR_ALL }, /* PAPI_TLB_DM */
1258 	},
1259 	[C(OP_WRITE)] = {
1260 		[C(RESULT_MISS)]	= { 0x2d, CNTR_ALL }, /* PAPI_TLB_DM */
1261 	},
1262 },
1263 [C(ITLB)] = {
1264 	[C(OP_READ)] = {
1265 		[C(RESULT_MISS)]	= { 0x08, CNTR_ALL }, /* PAPI_TLB_IM */
1266 	},
1267 	[C(OP_WRITE)] = {
1268 		[C(RESULT_MISS)]	= { 0x08, CNTR_ALL }, /* PAPI_TLB_IM */
1269 	},
1270 },
1271 [C(BPU)] = {
1272 	[C(OP_READ)] = {
1273 		[C(RESULT_MISS)]	= { 0x25, CNTR_ALL },
1274 	},
1275 },
1276 };
1277 
1278 #ifdef CONFIG_MIPS_MT_SMP
1279 static void check_and_calc_range(struct perf_event *event,
1280 				 const struct mips_perf_event *pev)
1281 {
1282 	struct hw_perf_event *hwc = &event->hw;
1283 
1284 	if (event->cpu >= 0) {
1285 		if (pev->range > V) {
1286 			/*
1287 			 * The user selected an event that is processor
1288 			 * wide, while expecting it to be VPE wide.
1289 			 */
1290 			hwc->config_base |= M_TC_EN_ALL;
1291 		} else {
1292 			/*
1293 			 * FIXME: cpu_data[event->cpu].vpe_id reports 0
1294 			 * for both CPUs.
1295 			 */
1296 			hwc->config_base |= M_PERFCTL_VPEID(event->cpu);
1297 			hwc->config_base |= M_TC_EN_VPE;
1298 		}
1299 	} else
1300 		hwc->config_base |= M_TC_EN_ALL;
1301 }
1302 #else
1303 static void check_and_calc_range(struct perf_event *event,
1304 				 const struct mips_perf_event *pev)
1305 {
1306 }
1307 #endif
1308 
1309 static int __hw_perf_event_init(struct perf_event *event)
1310 {
1311 	struct perf_event_attr *attr = &event->attr;
1312 	struct hw_perf_event *hwc = &event->hw;
1313 	const struct mips_perf_event *pev;
1314 	int err;
1315 
1316 	/* Returning MIPS event descriptor for generic perf event. */
1317 	if (PERF_TYPE_HARDWARE == event->attr.type) {
1318 		if (event->attr.config >= PERF_COUNT_HW_MAX)
1319 			return -EINVAL;
1320 		pev = mipspmu_map_general_event(event->attr.config);
1321 	} else if (PERF_TYPE_HW_CACHE == event->attr.type) {
1322 		pev = mipspmu_map_cache_event(event->attr.config);
1323 	} else if (PERF_TYPE_RAW == event->attr.type) {
1324 		/* We are working on the global raw event. */
1325 		mutex_lock(&raw_event_mutex);
1326 		pev = mipspmu.map_raw_event(event->attr.config);
1327 	} else {
1328 		/* The event type is not (yet) supported. */
1329 		return -EOPNOTSUPP;
1330 	}
1331 
1332 	if (IS_ERR(pev)) {
1333 		if (PERF_TYPE_RAW == event->attr.type)
1334 			mutex_unlock(&raw_event_mutex);
1335 		return PTR_ERR(pev);
1336 	}
1337 
1338 	/*
1339 	 * We allow max flexibility on how each individual counter shared
1340 	 * by the single CPU operates (the mode exclusion and the range).
1341 	 */
1342 	hwc->config_base = M_PERFCTL_INTERRUPT_ENABLE;
1343 
1344 	/* Calculate range bits and validate it. */
1345 	if (num_possible_cpus() > 1)
1346 		check_and_calc_range(event, pev);
1347 
1348 	hwc->event_base = mipspmu_perf_event_encode(pev);
1349 	if (PERF_TYPE_RAW == event->attr.type)
1350 		mutex_unlock(&raw_event_mutex);
1351 
1352 	if (!attr->exclude_user)
1353 		hwc->config_base |= M_PERFCTL_USER;
1354 	if (!attr->exclude_kernel) {
1355 		hwc->config_base |= M_PERFCTL_KERNEL;
1356 		/* MIPS kernel mode: KSU == 00b || EXL == 1 || ERL == 1 */
1357 		hwc->config_base |= M_PERFCTL_EXL;
1358 	}
1359 	if (!attr->exclude_hv)
1360 		hwc->config_base |= M_PERFCTL_SUPERVISOR;
1361 
1362 	hwc->config_base &= M_PERFCTL_CONFIG_MASK;
1363 	/*
1364 	 * The event can belong to another cpu. We do not assign a local
1365 	 * counter for it for now.
1366 	 */
1367 	hwc->idx = -1;
1368 	hwc->config = 0;
1369 
1370 	if (!hwc->sample_period) {
1371 		hwc->sample_period  = mipspmu.max_period;
1372 		hwc->last_period    = hwc->sample_period;
1373 		local64_set(&hwc->period_left, hwc->sample_period);
1374 	}
1375 
1376 	err = 0;
1377 	if (event->group_leader != event)
1378 		err = validate_group(event);
1379 
1380 	event->destroy = hw_perf_event_destroy;
1381 
1382 	if (err)
1383 		event->destroy(event);
1384 
1385 	return err;
1386 }
1387 
1388 static void pause_local_counters(void)
1389 {
1390 	struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
1391 	int ctr = mipspmu.num_counters;
1392 	unsigned long flags;
1393 
1394 	local_irq_save(flags);
1395 	do {
1396 		ctr--;
1397 		cpuc->saved_ctrl[ctr] = mipsxx_pmu_read_control(ctr);
1398 		mipsxx_pmu_write_control(ctr, cpuc->saved_ctrl[ctr] &
1399 					 ~M_PERFCTL_COUNT_EVENT_WHENEVER);
1400 	} while (ctr > 0);
1401 	local_irq_restore(flags);
1402 }
1403 
1404 static void resume_local_counters(void)
1405 {
1406 	struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
1407 	int ctr = mipspmu.num_counters;
1408 
1409 	do {
1410 		ctr--;
1411 		mipsxx_pmu_write_control(ctr, cpuc->saved_ctrl[ctr]);
1412 	} while (ctr > 0);
1413 }
1414 
1415 static int mipsxx_pmu_handle_shared_irq(void)
1416 {
1417 	struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
1418 	struct perf_sample_data data;
1419 	unsigned int counters = mipspmu.num_counters;
1420 	u64 counter;
1421 	int handled = IRQ_NONE;
1422 	struct pt_regs *regs;
1423 
1424 	if (cpu_has_perf_cntr_intr_bit && !(read_c0_cause() & CAUSEF_PCI))
1425 		return handled;
1426 	/*
1427 	 * First we pause the local counters, so that when we are locked
1428 	 * here, the counters are all paused. When it gets locked due to
1429 	 * perf_disable(), the timer interrupt handler will be delayed.
1430 	 *
1431 	 * See also mipsxx_pmu_start().
1432 	 */
1433 	pause_local_counters();
1434 #ifdef CONFIG_MIPS_PERF_SHARED_TC_COUNTERS
1435 	read_lock(&pmuint_rwlock);
1436 #endif
1437 
1438 	regs = get_irq_regs();
1439 
1440 	perf_sample_data_init(&data, 0, 0);
1441 
1442 	switch (counters) {
1443 #define HANDLE_COUNTER(n)						\
1444 	case n + 1:							\
1445 		if (test_bit(n, cpuc->used_mask)) {			\
1446 			counter = mipspmu.read_counter(n);		\
1447 			if (counter & mipspmu.overflow) {		\
1448 				handle_associated_event(cpuc, n, &data, regs); \
1449 				handled = IRQ_HANDLED;			\
1450 			}						\
1451 		}
1452 	HANDLE_COUNTER(3)
1453 	HANDLE_COUNTER(2)
1454 	HANDLE_COUNTER(1)
1455 	HANDLE_COUNTER(0)
1456 	}
1457 
1458 	/*
1459 	 * Do all the work for the pending perf events. We can do this
1460 	 * in here because the performance counter interrupt is a regular
1461 	 * interrupt, not NMI.
1462 	 */
1463 	if (handled == IRQ_HANDLED)
1464 		irq_work_run();
1465 
1466 #ifdef CONFIG_MIPS_PERF_SHARED_TC_COUNTERS
1467 	read_unlock(&pmuint_rwlock);
1468 #endif
1469 	resume_local_counters();
1470 	return handled;
1471 }
1472 
1473 static irqreturn_t mipsxx_pmu_handle_irq(int irq, void *dev)
1474 {
1475 	return mipsxx_pmu_handle_shared_irq();
1476 }
1477 
1478 /* 24K */
1479 #define IS_BOTH_COUNTERS_24K_EVENT(b)					\
1480 	((b) == 0 || (b) == 1 || (b) == 11)
1481 
1482 /* 34K */
1483 #define IS_BOTH_COUNTERS_34K_EVENT(b)					\
1484 	((b) == 0 || (b) == 1 || (b) == 11)
1485 #ifdef CONFIG_MIPS_MT_SMP
1486 #define IS_RANGE_P_34K_EVENT(r, b)					\
1487 	((b) == 0 || (r) == 18 || (b) == 21 || (b) == 22 ||		\
1488 	 (b) == 25 || (b) == 39 || (r) == 44 || (r) == 174 ||		\
1489 	 (r) == 176 || ((b) >= 50 && (b) <= 55) ||			\
1490 	 ((b) >= 64 && (b) <= 67))
1491 #define IS_RANGE_V_34K_EVENT(r) ((r) == 47)
1492 #endif
1493 
1494 /* 74K */
1495 #define IS_BOTH_COUNTERS_74K_EVENT(b)					\
1496 	((b) == 0 || (b) == 1)
1497 
1498 /* proAptiv */
1499 #define IS_BOTH_COUNTERS_PROAPTIV_EVENT(b)				\
1500 	((b) == 0 || (b) == 1)
1501 /* P5600 */
1502 #define IS_BOTH_COUNTERS_P5600_EVENT(b)					\
1503 	((b) == 0 || (b) == 1)
1504 
1505 /* 1004K */
1506 #define IS_BOTH_COUNTERS_1004K_EVENT(b)					\
1507 	((b) == 0 || (b) == 1 || (b) == 11)
1508 #ifdef CONFIG_MIPS_MT_SMP
1509 #define IS_RANGE_P_1004K_EVENT(r, b)					\
1510 	((b) == 0 || (r) == 18 || (b) == 21 || (b) == 22 ||		\
1511 	 (b) == 25 || (b) == 36 || (b) == 39 || (r) == 44 ||		\
1512 	 (r) == 174 || (r) == 176 || ((b) >= 50 && (b) <= 59) ||	\
1513 	 (r) == 188 || (b) == 61 || (b) == 62 ||			\
1514 	 ((b) >= 64 && (b) <= 67))
1515 #define IS_RANGE_V_1004K_EVENT(r)	((r) == 47)
1516 #endif
1517 
1518 /* interAptiv */
1519 #define IS_BOTH_COUNTERS_INTERAPTIV_EVENT(b)				\
1520 	((b) == 0 || (b) == 1 || (b) == 11)
1521 #ifdef CONFIG_MIPS_MT_SMP
1522 /* The P/V/T info is not provided for "(b) == 38" in SUM, assume P. */
1523 #define IS_RANGE_P_INTERAPTIV_EVENT(r, b)				\
1524 	((b) == 0 || (r) == 18 || (b) == 21 || (b) == 22 ||		\
1525 	 (b) == 25 || (b) == 36 || (b) == 38 || (b) == 39 ||		\
1526 	 (r) == 44 || (r) == 174 || (r) == 176 || ((b) >= 50 &&		\
1527 	 (b) <= 59) || (r) == 188 || (b) == 61 || (b) == 62 ||		\
1528 	 ((b) >= 64 && (b) <= 67))
1529 #define IS_RANGE_V_INTERAPTIV_EVENT(r)	((r) == 47 || (r) == 175)
1530 #endif
1531 
1532 /* BMIPS5000 */
1533 #define IS_BOTH_COUNTERS_BMIPS5000_EVENT(b)				\
1534 	((b) == 0 || (b) == 1)
1535 
1536 
1537 /*
1538  * For most cores the user can use 0-255 raw events, where 0-127 for the events
1539  * of even counters, and 128-255 for odd counters. Note that bit 7 is used to
1540  * indicate the even/odd bank selector. So, for example, when user wants to take
1541  * the Event Num of 15 for odd counters (by referring to the user manual), then
1542  * 128 needs to be added to 15 as the input for the event config, i.e., 143 (0x8F)
1543  * to be used.
1544  *
1545  * Some newer cores have even more events, in which case the user can use raw
1546  * events 0-511, where 0-255 are for the events of even counters, and 256-511
1547  * are for odd counters, so bit 8 is used to indicate the even/odd bank selector.
1548  */
1549 static const struct mips_perf_event *mipsxx_pmu_map_raw_event(u64 config)
1550 {
1551 	/* currently most cores have 7-bit event numbers */
1552 	unsigned int raw_id = config & 0xff;
1553 	unsigned int base_id = raw_id & 0x7f;
1554 
1555 	switch (current_cpu_type()) {
1556 	case CPU_24K:
1557 		if (IS_BOTH_COUNTERS_24K_EVENT(base_id))
1558 			raw_event.cntr_mask = CNTR_EVEN | CNTR_ODD;
1559 		else
1560 			raw_event.cntr_mask =
1561 				raw_id > 127 ? CNTR_ODD : CNTR_EVEN;
1562 #ifdef CONFIG_MIPS_MT_SMP
1563 		/*
1564 		 * This is actually doing nothing. Non-multithreading
1565 		 * CPUs will not check and calculate the range.
1566 		 */
1567 		raw_event.range = P;
1568 #endif
1569 		break;
1570 	case CPU_34K:
1571 		if (IS_BOTH_COUNTERS_34K_EVENT(base_id))
1572 			raw_event.cntr_mask = CNTR_EVEN | CNTR_ODD;
1573 		else
1574 			raw_event.cntr_mask =
1575 				raw_id > 127 ? CNTR_ODD : CNTR_EVEN;
1576 #ifdef CONFIG_MIPS_MT_SMP
1577 		if (IS_RANGE_P_34K_EVENT(raw_id, base_id))
1578 			raw_event.range = P;
1579 		else if (unlikely(IS_RANGE_V_34K_EVENT(raw_id)))
1580 			raw_event.range = V;
1581 		else
1582 			raw_event.range = T;
1583 #endif
1584 		break;
1585 	case CPU_74K:
1586 	case CPU_1074K:
1587 		if (IS_BOTH_COUNTERS_74K_EVENT(base_id))
1588 			raw_event.cntr_mask = CNTR_EVEN | CNTR_ODD;
1589 		else
1590 			raw_event.cntr_mask =
1591 				raw_id > 127 ? CNTR_ODD : CNTR_EVEN;
1592 #ifdef CONFIG_MIPS_MT_SMP
1593 		raw_event.range = P;
1594 #endif
1595 		break;
1596 	case CPU_PROAPTIV:
1597 		if (IS_BOTH_COUNTERS_PROAPTIV_EVENT(base_id))
1598 			raw_event.cntr_mask = CNTR_EVEN | CNTR_ODD;
1599 		else
1600 			raw_event.cntr_mask =
1601 				raw_id > 127 ? CNTR_ODD : CNTR_EVEN;
1602 #ifdef CONFIG_MIPS_MT_SMP
1603 		raw_event.range = P;
1604 #endif
1605 		break;
1606 	case CPU_P5600:
1607 	case CPU_P6600:
1608 	case CPU_I6400:
1609 		/* 8-bit event numbers */
1610 		raw_id = config & 0x1ff;
1611 		base_id = raw_id & 0xff;
1612 		if (IS_BOTH_COUNTERS_P5600_EVENT(base_id))
1613 			raw_event.cntr_mask = CNTR_EVEN | CNTR_ODD;
1614 		else
1615 			raw_event.cntr_mask =
1616 				raw_id > 255 ? CNTR_ODD : CNTR_EVEN;
1617 #ifdef CONFIG_MIPS_MT_SMP
1618 		raw_event.range = P;
1619 #endif
1620 		break;
1621 	case CPU_1004K:
1622 		if (IS_BOTH_COUNTERS_1004K_EVENT(base_id))
1623 			raw_event.cntr_mask = CNTR_EVEN | CNTR_ODD;
1624 		else
1625 			raw_event.cntr_mask =
1626 				raw_id > 127 ? CNTR_ODD : CNTR_EVEN;
1627 #ifdef CONFIG_MIPS_MT_SMP
1628 		if (IS_RANGE_P_1004K_EVENT(raw_id, base_id))
1629 			raw_event.range = P;
1630 		else if (unlikely(IS_RANGE_V_1004K_EVENT(raw_id)))
1631 			raw_event.range = V;
1632 		else
1633 			raw_event.range = T;
1634 #endif
1635 		break;
1636 	case CPU_INTERAPTIV:
1637 		if (IS_BOTH_COUNTERS_INTERAPTIV_EVENT(base_id))
1638 			raw_event.cntr_mask = CNTR_EVEN | CNTR_ODD;
1639 		else
1640 			raw_event.cntr_mask =
1641 				raw_id > 127 ? CNTR_ODD : CNTR_EVEN;
1642 #ifdef CONFIG_MIPS_MT_SMP
1643 		if (IS_RANGE_P_INTERAPTIV_EVENT(raw_id, base_id))
1644 			raw_event.range = P;
1645 		else if (unlikely(IS_RANGE_V_INTERAPTIV_EVENT(raw_id)))
1646 			raw_event.range = V;
1647 		else
1648 			raw_event.range = T;
1649 #endif
1650 		break;
1651 	case CPU_BMIPS5000:
1652 		if (IS_BOTH_COUNTERS_BMIPS5000_EVENT(base_id))
1653 			raw_event.cntr_mask = CNTR_EVEN | CNTR_ODD;
1654 		else
1655 			raw_event.cntr_mask =
1656 				raw_id > 127 ? CNTR_ODD : CNTR_EVEN;
1657 		break;
1658 	case CPU_LOONGSON3:
1659 		raw_event.cntr_mask = raw_id > 127 ? CNTR_ODD : CNTR_EVEN;
1660 	break;
1661 	}
1662 
1663 	raw_event.event_id = base_id;
1664 
1665 	return &raw_event;
1666 }
1667 
1668 static const struct mips_perf_event *octeon_pmu_map_raw_event(u64 config)
1669 {
1670 	unsigned int raw_id = config & 0xff;
1671 	unsigned int base_id = raw_id & 0x7f;
1672 
1673 
1674 	raw_event.cntr_mask = CNTR_ALL;
1675 	raw_event.event_id = base_id;
1676 
1677 	if (current_cpu_type() == CPU_CAVIUM_OCTEON2) {
1678 		if (base_id > 0x42)
1679 			return ERR_PTR(-EOPNOTSUPP);
1680 	} else {
1681 		if (base_id > 0x3a)
1682 			return ERR_PTR(-EOPNOTSUPP);
1683 	}
1684 
1685 	switch (base_id) {
1686 	case 0x00:
1687 	case 0x0f:
1688 	case 0x1e:
1689 	case 0x1f:
1690 	case 0x2f:
1691 	case 0x34:
1692 	case 0x3b ... 0x3f:
1693 		return ERR_PTR(-EOPNOTSUPP);
1694 	default:
1695 		break;
1696 	}
1697 
1698 	return &raw_event;
1699 }
1700 
1701 static const struct mips_perf_event *xlp_pmu_map_raw_event(u64 config)
1702 {
1703 	unsigned int raw_id = config & 0xff;
1704 
1705 	/* Only 1-63 are defined */
1706 	if ((raw_id < 0x01) || (raw_id > 0x3f))
1707 		return ERR_PTR(-EOPNOTSUPP);
1708 
1709 	raw_event.cntr_mask = CNTR_ALL;
1710 	raw_event.event_id = raw_id;
1711 
1712 	return &raw_event;
1713 }
1714 
1715 static int __init
1716 init_hw_perf_events(void)
1717 {
1718 	int counters, irq;
1719 	int counter_bits;
1720 
1721 	pr_info("Performance counters: ");
1722 
1723 	counters = n_counters();
1724 	if (counters == 0) {
1725 		pr_cont("No available PMU.\n");
1726 		return -ENODEV;
1727 	}
1728 
1729 #ifdef CONFIG_MIPS_PERF_SHARED_TC_COUNTERS
1730 	cpu_has_mipsmt_pertccounters = read_c0_config7() & (1<<19);
1731 	if (!cpu_has_mipsmt_pertccounters)
1732 		counters = counters_total_to_per_cpu(counters);
1733 #endif
1734 
1735 	if (get_c0_perfcount_int)
1736 		irq = get_c0_perfcount_int();
1737 	else if (cp0_perfcount_irq >= 0)
1738 		irq = MIPS_CPU_IRQ_BASE + cp0_perfcount_irq;
1739 	else
1740 		irq = -1;
1741 
1742 	mipspmu.map_raw_event = mipsxx_pmu_map_raw_event;
1743 
1744 	switch (current_cpu_type()) {
1745 	case CPU_24K:
1746 		mipspmu.name = "mips/24K";
1747 		mipspmu.general_event_map = &mipsxxcore_event_map;
1748 		mipspmu.cache_event_map = &mipsxxcore_cache_map;
1749 		break;
1750 	case CPU_34K:
1751 		mipspmu.name = "mips/34K";
1752 		mipspmu.general_event_map = &mipsxxcore_event_map;
1753 		mipspmu.cache_event_map = &mipsxxcore_cache_map;
1754 		break;
1755 	case CPU_74K:
1756 		mipspmu.name = "mips/74K";
1757 		mipspmu.general_event_map = &mipsxxcore_event_map2;
1758 		mipspmu.cache_event_map = &mipsxxcore_cache_map2;
1759 		break;
1760 	case CPU_PROAPTIV:
1761 		mipspmu.name = "mips/proAptiv";
1762 		mipspmu.general_event_map = &mipsxxcore_event_map2;
1763 		mipspmu.cache_event_map = &mipsxxcore_cache_map2;
1764 		break;
1765 	case CPU_P5600:
1766 		mipspmu.name = "mips/P5600";
1767 		mipspmu.general_event_map = &mipsxxcore_event_map2;
1768 		mipspmu.cache_event_map = &mipsxxcore_cache_map2;
1769 		break;
1770 	case CPU_P6600:
1771 		mipspmu.name = "mips/P6600";
1772 		mipspmu.general_event_map = &mipsxxcore_event_map2;
1773 		mipspmu.cache_event_map = &mipsxxcore_cache_map2;
1774 		break;
1775 	case CPU_I6400:
1776 		mipspmu.name = "mips/I6400";
1777 		mipspmu.general_event_map = &i6400_event_map;
1778 		mipspmu.cache_event_map = &i6400_cache_map;
1779 		break;
1780 	case CPU_1004K:
1781 		mipspmu.name = "mips/1004K";
1782 		mipspmu.general_event_map = &mipsxxcore_event_map;
1783 		mipspmu.cache_event_map = &mipsxxcore_cache_map;
1784 		break;
1785 	case CPU_1074K:
1786 		mipspmu.name = "mips/1074K";
1787 		mipspmu.general_event_map = &mipsxxcore_event_map;
1788 		mipspmu.cache_event_map = &mipsxxcore_cache_map;
1789 		break;
1790 	case CPU_INTERAPTIV:
1791 		mipspmu.name = "mips/interAptiv";
1792 		mipspmu.general_event_map = &mipsxxcore_event_map;
1793 		mipspmu.cache_event_map = &mipsxxcore_cache_map;
1794 		break;
1795 	case CPU_LOONGSON1:
1796 		mipspmu.name = "mips/loongson1";
1797 		mipspmu.general_event_map = &mipsxxcore_event_map;
1798 		mipspmu.cache_event_map = &mipsxxcore_cache_map;
1799 		break;
1800 	case CPU_LOONGSON3:
1801 		mipspmu.name = "mips/loongson3";
1802 		mipspmu.general_event_map = &loongson3_event_map;
1803 		mipspmu.cache_event_map = &loongson3_cache_map;
1804 		break;
1805 	case CPU_CAVIUM_OCTEON:
1806 	case CPU_CAVIUM_OCTEON_PLUS:
1807 	case CPU_CAVIUM_OCTEON2:
1808 		mipspmu.name = "octeon";
1809 		mipspmu.general_event_map = &octeon_event_map;
1810 		mipspmu.cache_event_map = &octeon_cache_map;
1811 		mipspmu.map_raw_event = octeon_pmu_map_raw_event;
1812 		break;
1813 	case CPU_BMIPS5000:
1814 		mipspmu.name = "BMIPS5000";
1815 		mipspmu.general_event_map = &bmips5000_event_map;
1816 		mipspmu.cache_event_map = &bmips5000_cache_map;
1817 		break;
1818 	case CPU_XLP:
1819 		mipspmu.name = "xlp";
1820 		mipspmu.general_event_map = &xlp_event_map;
1821 		mipspmu.cache_event_map = &xlp_cache_map;
1822 		mipspmu.map_raw_event = xlp_pmu_map_raw_event;
1823 		break;
1824 	default:
1825 		pr_cont("Either hardware does not support performance "
1826 			"counters, or not yet implemented.\n");
1827 		return -ENODEV;
1828 	}
1829 
1830 	mipspmu.num_counters = counters;
1831 	mipspmu.irq = irq;
1832 
1833 	if (read_c0_perfctrl0() & M_PERFCTL_WIDE) {
1834 		mipspmu.max_period = (1ULL << 63) - 1;
1835 		mipspmu.valid_count = (1ULL << 63) - 1;
1836 		mipspmu.overflow = 1ULL << 63;
1837 		mipspmu.read_counter = mipsxx_pmu_read_counter_64;
1838 		mipspmu.write_counter = mipsxx_pmu_write_counter_64;
1839 		counter_bits = 64;
1840 	} else {
1841 		mipspmu.max_period = (1ULL << 31) - 1;
1842 		mipspmu.valid_count = (1ULL << 31) - 1;
1843 		mipspmu.overflow = 1ULL << 31;
1844 		mipspmu.read_counter = mipsxx_pmu_read_counter;
1845 		mipspmu.write_counter = mipsxx_pmu_write_counter;
1846 		counter_bits = 32;
1847 	}
1848 
1849 	on_each_cpu(reset_counters, (void *)(long)counters, 1);
1850 
1851 	pr_cont("%s PMU enabled, %d %d-bit counters available to each "
1852 		"CPU, irq %d%s\n", mipspmu.name, counters, counter_bits, irq,
1853 		irq < 0 ? " (share with timer interrupt)" : "");
1854 
1855 	perf_pmu_register(&pmu, "cpu", PERF_TYPE_RAW);
1856 
1857 	return 0;
1858 }
1859 early_initcall(init_hw_perf_events);
1860