1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Linux performance counter support for MIPS.
4  *
5  * Copyright (C) 2010 MIPS Technologies, Inc.
6  * Copyright (C) 2011 Cavium Networks, Inc.
7  * Author: Deng-Cheng Zhu
8  *
9  * This code is based on the implementation for ARM, which is in turn
10  * based on the sparc64 perf event code and the x86 code. Performance
11  * counter access is based on the MIPS Oprofile code. And the callchain
12  * support references the code of MIPS stacktrace.c.
13  */
14 
15 #include <linux/cpumask.h>
16 #include <linux/interrupt.h>
17 #include <linux/smp.h>
18 #include <linux/kernel.h>
19 #include <linux/perf_event.h>
20 #include <linux/uaccess.h>
21 
22 #include <asm/irq.h>
23 #include <asm/irq_regs.h>
24 #include <asm/stacktrace.h>
25 #include <asm/time.h> /* For perf_irq */
26 
27 #define MIPS_MAX_HWEVENTS 4
28 #define MIPS_TCS_PER_COUNTER 2
29 #define MIPS_CPUID_TO_COUNTER_MASK (MIPS_TCS_PER_COUNTER - 1)
30 
31 struct cpu_hw_events {
32 	/* Array of events on this cpu. */
33 	struct perf_event	*events[MIPS_MAX_HWEVENTS];
34 
35 	/*
36 	 * Set the bit (indexed by the counter number) when the counter
37 	 * is used for an event.
38 	 */
39 	unsigned long		used_mask[BITS_TO_LONGS(MIPS_MAX_HWEVENTS)];
40 
41 	/*
42 	 * Software copy of the control register for each performance counter.
43 	 * MIPS CPUs vary in performance counters. They use this differently,
44 	 * and even may not use it.
45 	 */
46 	unsigned int		saved_ctrl[MIPS_MAX_HWEVENTS];
47 };
48 DEFINE_PER_CPU(struct cpu_hw_events, cpu_hw_events) = {
49 	.saved_ctrl = {0},
50 };
51 
52 /* The description of MIPS performance events. */
53 struct mips_perf_event {
54 	unsigned int event_id;
55 	/*
56 	 * MIPS performance counters are indexed starting from 0.
57 	 * CNTR_EVEN indicates the indexes of the counters to be used are
58 	 * even numbers.
59 	 */
60 	unsigned int cntr_mask;
61 	#define CNTR_EVEN	0x55555555
62 	#define CNTR_ODD	0xaaaaaaaa
63 	#define CNTR_ALL	0xffffffff
64 	enum {
65 		T  = 0,
66 		V  = 1,
67 		P  = 2,
68 	} range;
69 };
70 
71 static struct mips_perf_event raw_event;
72 static DEFINE_MUTEX(raw_event_mutex);
73 
74 #define C(x) PERF_COUNT_HW_CACHE_##x
75 
76 struct mips_pmu {
77 	u64		max_period;
78 	u64		valid_count;
79 	u64		overflow;
80 	const char	*name;
81 	int		irq;
82 	u64		(*read_counter)(unsigned int idx);
83 	void		(*write_counter)(unsigned int idx, u64 val);
84 	const struct mips_perf_event *(*map_raw_event)(u64 config);
85 	const struct mips_perf_event (*general_event_map)[PERF_COUNT_HW_MAX];
86 	const struct mips_perf_event (*cache_event_map)
87 				[PERF_COUNT_HW_CACHE_MAX]
88 				[PERF_COUNT_HW_CACHE_OP_MAX]
89 				[PERF_COUNT_HW_CACHE_RESULT_MAX];
90 	unsigned int	num_counters;
91 };
92 
93 static struct mips_pmu mipspmu;
94 
95 #define M_PERFCTL_EVENT(event)		(((event) << MIPS_PERFCTRL_EVENT_S) & \
96 					 MIPS_PERFCTRL_EVENT)
97 #define M_PERFCTL_VPEID(vpe)		((vpe)	  << MIPS_PERFCTRL_VPEID_S)
98 
99 #ifdef CONFIG_CPU_BMIPS5000
100 #define M_PERFCTL_MT_EN(filter)		0
101 #else /* !CONFIG_CPU_BMIPS5000 */
102 #define M_PERFCTL_MT_EN(filter)		(filter)
103 #endif /* CONFIG_CPU_BMIPS5000 */
104 
105 #define	   M_TC_EN_ALL			M_PERFCTL_MT_EN(MIPS_PERFCTRL_MT_EN_ALL)
106 #define	   M_TC_EN_VPE			M_PERFCTL_MT_EN(MIPS_PERFCTRL_MT_EN_VPE)
107 #define	   M_TC_EN_TC			M_PERFCTL_MT_EN(MIPS_PERFCTRL_MT_EN_TC)
108 
109 #define M_PERFCTL_COUNT_EVENT_WHENEVER	(MIPS_PERFCTRL_EXL |		\
110 					 MIPS_PERFCTRL_K |		\
111 					 MIPS_PERFCTRL_U |		\
112 					 MIPS_PERFCTRL_S |		\
113 					 MIPS_PERFCTRL_IE)
114 
115 #ifdef CONFIG_MIPS_MT_SMP
116 #define M_PERFCTL_CONFIG_MASK		0x3fff801f
117 #else
118 #define M_PERFCTL_CONFIG_MASK		0x1f
119 #endif
120 
121 
122 #ifdef CONFIG_MIPS_PERF_SHARED_TC_COUNTERS
123 static DEFINE_RWLOCK(pmuint_rwlock);
124 
125 #if defined(CONFIG_CPU_BMIPS5000)
126 #define vpe_id()	(cpu_has_mipsmt_pertccounters ? \
127 			 0 : (smp_processor_id() & MIPS_CPUID_TO_COUNTER_MASK))
128 #else
129 #define vpe_id()	(cpu_has_mipsmt_pertccounters ? \
130 			 0 : cpu_vpe_id(&current_cpu_data))
131 #endif
132 
133 /* Copied from op_model_mipsxx.c */
134 static unsigned int vpe_shift(void)
135 {
136 	if (num_possible_cpus() > 1)
137 		return 1;
138 
139 	return 0;
140 }
141 
142 static unsigned int counters_total_to_per_cpu(unsigned int counters)
143 {
144 	return counters >> vpe_shift();
145 }
146 
147 #else /* !CONFIG_MIPS_PERF_SHARED_TC_COUNTERS */
148 #define vpe_id()	0
149 
150 #endif /* CONFIG_MIPS_PERF_SHARED_TC_COUNTERS */
151 
152 static void resume_local_counters(void);
153 static void pause_local_counters(void);
154 static irqreturn_t mipsxx_pmu_handle_irq(int, void *);
155 static int mipsxx_pmu_handle_shared_irq(void);
156 
157 static unsigned int mipsxx_pmu_swizzle_perf_idx(unsigned int idx)
158 {
159 	if (vpe_id() == 1)
160 		idx = (idx + 2) & 3;
161 	return idx;
162 }
163 
164 static u64 mipsxx_pmu_read_counter(unsigned int idx)
165 {
166 	idx = mipsxx_pmu_swizzle_perf_idx(idx);
167 
168 	switch (idx) {
169 	case 0:
170 		/*
171 		 * The counters are unsigned, we must cast to truncate
172 		 * off the high bits.
173 		 */
174 		return (u32)read_c0_perfcntr0();
175 	case 1:
176 		return (u32)read_c0_perfcntr1();
177 	case 2:
178 		return (u32)read_c0_perfcntr2();
179 	case 3:
180 		return (u32)read_c0_perfcntr3();
181 	default:
182 		WARN_ONCE(1, "Invalid performance counter number (%d)\n", idx);
183 		return 0;
184 	}
185 }
186 
187 static u64 mipsxx_pmu_read_counter_64(unsigned int idx)
188 {
189 	idx = mipsxx_pmu_swizzle_perf_idx(idx);
190 
191 	switch (idx) {
192 	case 0:
193 		return read_c0_perfcntr0_64();
194 	case 1:
195 		return read_c0_perfcntr1_64();
196 	case 2:
197 		return read_c0_perfcntr2_64();
198 	case 3:
199 		return read_c0_perfcntr3_64();
200 	default:
201 		WARN_ONCE(1, "Invalid performance counter number (%d)\n", idx);
202 		return 0;
203 	}
204 }
205 
206 static void mipsxx_pmu_write_counter(unsigned int idx, u64 val)
207 {
208 	idx = mipsxx_pmu_swizzle_perf_idx(idx);
209 
210 	switch (idx) {
211 	case 0:
212 		write_c0_perfcntr0(val);
213 		return;
214 	case 1:
215 		write_c0_perfcntr1(val);
216 		return;
217 	case 2:
218 		write_c0_perfcntr2(val);
219 		return;
220 	case 3:
221 		write_c0_perfcntr3(val);
222 		return;
223 	}
224 }
225 
226 static void mipsxx_pmu_write_counter_64(unsigned int idx, u64 val)
227 {
228 	idx = mipsxx_pmu_swizzle_perf_idx(idx);
229 
230 	switch (idx) {
231 	case 0:
232 		write_c0_perfcntr0_64(val);
233 		return;
234 	case 1:
235 		write_c0_perfcntr1_64(val);
236 		return;
237 	case 2:
238 		write_c0_perfcntr2_64(val);
239 		return;
240 	case 3:
241 		write_c0_perfcntr3_64(val);
242 		return;
243 	}
244 }
245 
246 static unsigned int mipsxx_pmu_read_control(unsigned int idx)
247 {
248 	idx = mipsxx_pmu_swizzle_perf_idx(idx);
249 
250 	switch (idx) {
251 	case 0:
252 		return read_c0_perfctrl0();
253 	case 1:
254 		return read_c0_perfctrl1();
255 	case 2:
256 		return read_c0_perfctrl2();
257 	case 3:
258 		return read_c0_perfctrl3();
259 	default:
260 		WARN_ONCE(1, "Invalid performance counter number (%d)\n", idx);
261 		return 0;
262 	}
263 }
264 
265 static void mipsxx_pmu_write_control(unsigned int idx, unsigned int val)
266 {
267 	idx = mipsxx_pmu_swizzle_perf_idx(idx);
268 
269 	switch (idx) {
270 	case 0:
271 		write_c0_perfctrl0(val);
272 		return;
273 	case 1:
274 		write_c0_perfctrl1(val);
275 		return;
276 	case 2:
277 		write_c0_perfctrl2(val);
278 		return;
279 	case 3:
280 		write_c0_perfctrl3(val);
281 		return;
282 	}
283 }
284 
285 static int mipsxx_pmu_alloc_counter(struct cpu_hw_events *cpuc,
286 				    struct hw_perf_event *hwc)
287 {
288 	int i;
289 
290 	/*
291 	 * We only need to care the counter mask. The range has been
292 	 * checked definitely.
293 	 */
294 	unsigned long cntr_mask = (hwc->event_base >> 8) & 0xffff;
295 
296 	for (i = mipspmu.num_counters - 1; i >= 0; i--) {
297 		/*
298 		 * Note that some MIPS perf events can be counted by both
299 		 * even and odd counters, wheresas many other are only by
300 		 * even _or_ odd counters. This introduces an issue that
301 		 * when the former kind of event takes the counter the
302 		 * latter kind of event wants to use, then the "counter
303 		 * allocation" for the latter event will fail. In fact if
304 		 * they can be dynamically swapped, they both feel happy.
305 		 * But here we leave this issue alone for now.
306 		 */
307 		if (test_bit(i, &cntr_mask) &&
308 			!test_and_set_bit(i, cpuc->used_mask))
309 			return i;
310 	}
311 
312 	return -EAGAIN;
313 }
314 
315 static void mipsxx_pmu_enable_event(struct hw_perf_event *evt, int idx)
316 {
317 	struct perf_event *event = container_of(evt, struct perf_event, hw);
318 	struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
319 	unsigned int range = evt->event_base >> 24;
320 
321 	WARN_ON(idx < 0 || idx >= mipspmu.num_counters);
322 
323 	cpuc->saved_ctrl[idx] = M_PERFCTL_EVENT(evt->event_base & 0xff) |
324 		(evt->config_base & M_PERFCTL_CONFIG_MASK) |
325 		/* Make sure interrupt enabled. */
326 		MIPS_PERFCTRL_IE;
327 
328 	if (IS_ENABLED(CONFIG_CPU_BMIPS5000)) {
329 		/* enable the counter for the calling thread */
330 		cpuc->saved_ctrl[idx] |=
331 			(1 << (12 + vpe_id())) | BRCM_PERFCTRL_TC;
332 	} else if (IS_ENABLED(CONFIG_MIPS_MT_SMP) && range > V) {
333 		/* The counter is processor wide. Set it up to count all TCs. */
334 		pr_debug("Enabling perf counter for all TCs\n");
335 		cpuc->saved_ctrl[idx] |= M_TC_EN_ALL;
336 	} else {
337 		unsigned int cpu, ctrl;
338 
339 		/*
340 		 * Set up the counter for a particular CPU when event->cpu is
341 		 * a valid CPU number. Otherwise set up the counter for the CPU
342 		 * scheduling this thread.
343 		 */
344 		cpu = (event->cpu >= 0) ? event->cpu : smp_processor_id();
345 
346 		ctrl = M_PERFCTL_VPEID(cpu_vpe_id(&cpu_data[cpu]));
347 		ctrl |= M_TC_EN_VPE;
348 		cpuc->saved_ctrl[idx] |= ctrl;
349 		pr_debug("Enabling perf counter for CPU%d\n", cpu);
350 	}
351 	/*
352 	 * We do not actually let the counter run. Leave it until start().
353 	 */
354 }
355 
356 static void mipsxx_pmu_disable_event(int idx)
357 {
358 	struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
359 	unsigned long flags;
360 
361 	WARN_ON(idx < 0 || idx >= mipspmu.num_counters);
362 
363 	local_irq_save(flags);
364 	cpuc->saved_ctrl[idx] = mipsxx_pmu_read_control(idx) &
365 		~M_PERFCTL_COUNT_EVENT_WHENEVER;
366 	mipsxx_pmu_write_control(idx, cpuc->saved_ctrl[idx]);
367 	local_irq_restore(flags);
368 }
369 
370 static int mipspmu_event_set_period(struct perf_event *event,
371 				    struct hw_perf_event *hwc,
372 				    int idx)
373 {
374 	u64 left = local64_read(&hwc->period_left);
375 	u64 period = hwc->sample_period;
376 	int ret = 0;
377 
378 	if (unlikely((left + period) & (1ULL << 63))) {
379 		/* left underflowed by more than period. */
380 		left = period;
381 		local64_set(&hwc->period_left, left);
382 		hwc->last_period = period;
383 		ret = 1;
384 	} else	if (unlikely((left + period) <= period)) {
385 		/* left underflowed by less than period. */
386 		left += period;
387 		local64_set(&hwc->period_left, left);
388 		hwc->last_period = period;
389 		ret = 1;
390 	}
391 
392 	if (left > mipspmu.max_period) {
393 		left = mipspmu.max_period;
394 		local64_set(&hwc->period_left, left);
395 	}
396 
397 	local64_set(&hwc->prev_count, mipspmu.overflow - left);
398 
399 	mipspmu.write_counter(idx, mipspmu.overflow - left);
400 
401 	perf_event_update_userpage(event);
402 
403 	return ret;
404 }
405 
406 static void mipspmu_event_update(struct perf_event *event,
407 				 struct hw_perf_event *hwc,
408 				 int idx)
409 {
410 	u64 prev_raw_count, new_raw_count;
411 	u64 delta;
412 
413 again:
414 	prev_raw_count = local64_read(&hwc->prev_count);
415 	new_raw_count = mipspmu.read_counter(idx);
416 
417 	if (local64_cmpxchg(&hwc->prev_count, prev_raw_count,
418 				new_raw_count) != prev_raw_count)
419 		goto again;
420 
421 	delta = new_raw_count - prev_raw_count;
422 
423 	local64_add(delta, &event->count);
424 	local64_sub(delta, &hwc->period_left);
425 }
426 
427 static void mipspmu_start(struct perf_event *event, int flags)
428 {
429 	struct hw_perf_event *hwc = &event->hw;
430 
431 	if (flags & PERF_EF_RELOAD)
432 		WARN_ON_ONCE(!(hwc->state & PERF_HES_UPTODATE));
433 
434 	hwc->state = 0;
435 
436 	/* Set the period for the event. */
437 	mipspmu_event_set_period(event, hwc, hwc->idx);
438 
439 	/* Enable the event. */
440 	mipsxx_pmu_enable_event(hwc, hwc->idx);
441 }
442 
443 static void mipspmu_stop(struct perf_event *event, int flags)
444 {
445 	struct hw_perf_event *hwc = &event->hw;
446 
447 	if (!(hwc->state & PERF_HES_STOPPED)) {
448 		/* We are working on a local event. */
449 		mipsxx_pmu_disable_event(hwc->idx);
450 		barrier();
451 		mipspmu_event_update(event, hwc, hwc->idx);
452 		hwc->state |= PERF_HES_STOPPED | PERF_HES_UPTODATE;
453 	}
454 }
455 
456 static int mipspmu_add(struct perf_event *event, int flags)
457 {
458 	struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
459 	struct hw_perf_event *hwc = &event->hw;
460 	int idx;
461 	int err = 0;
462 
463 	perf_pmu_disable(event->pmu);
464 
465 	/* To look for a free counter for this event. */
466 	idx = mipsxx_pmu_alloc_counter(cpuc, hwc);
467 	if (idx < 0) {
468 		err = idx;
469 		goto out;
470 	}
471 
472 	/*
473 	 * If there is an event in the counter we are going to use then
474 	 * make sure it is disabled.
475 	 */
476 	event->hw.idx = idx;
477 	mipsxx_pmu_disable_event(idx);
478 	cpuc->events[idx] = event;
479 
480 	hwc->state = PERF_HES_STOPPED | PERF_HES_UPTODATE;
481 	if (flags & PERF_EF_START)
482 		mipspmu_start(event, PERF_EF_RELOAD);
483 
484 	/* Propagate our changes to the userspace mapping. */
485 	perf_event_update_userpage(event);
486 
487 out:
488 	perf_pmu_enable(event->pmu);
489 	return err;
490 }
491 
492 static void mipspmu_del(struct perf_event *event, int flags)
493 {
494 	struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
495 	struct hw_perf_event *hwc = &event->hw;
496 	int idx = hwc->idx;
497 
498 	WARN_ON(idx < 0 || idx >= mipspmu.num_counters);
499 
500 	mipspmu_stop(event, PERF_EF_UPDATE);
501 	cpuc->events[idx] = NULL;
502 	clear_bit(idx, cpuc->used_mask);
503 
504 	perf_event_update_userpage(event);
505 }
506 
507 static void mipspmu_read(struct perf_event *event)
508 {
509 	struct hw_perf_event *hwc = &event->hw;
510 
511 	/* Don't read disabled counters! */
512 	if (hwc->idx < 0)
513 		return;
514 
515 	mipspmu_event_update(event, hwc, hwc->idx);
516 }
517 
518 static void mipspmu_enable(struct pmu *pmu)
519 {
520 #ifdef CONFIG_MIPS_PERF_SHARED_TC_COUNTERS
521 	write_unlock(&pmuint_rwlock);
522 #endif
523 	resume_local_counters();
524 }
525 
526 /*
527  * MIPS performance counters can be per-TC. The control registers can
528  * not be directly accessed across CPUs. Hence if we want to do global
529  * control, we need cross CPU calls. on_each_cpu() can help us, but we
530  * can not make sure this function is called with interrupts enabled. So
531  * here we pause local counters and then grab a rwlock and leave the
532  * counters on other CPUs alone. If any counter interrupt raises while
533  * we own the write lock, simply pause local counters on that CPU and
534  * spin in the handler. Also we know we won't be switched to another
535  * CPU after pausing local counters and before grabbing the lock.
536  */
537 static void mipspmu_disable(struct pmu *pmu)
538 {
539 	pause_local_counters();
540 #ifdef CONFIG_MIPS_PERF_SHARED_TC_COUNTERS
541 	write_lock(&pmuint_rwlock);
542 #endif
543 }
544 
545 static atomic_t active_events = ATOMIC_INIT(0);
546 static DEFINE_MUTEX(pmu_reserve_mutex);
547 static int (*save_perf_irq)(void);
548 
549 static int mipspmu_get_irq(void)
550 {
551 	int err;
552 
553 	if (mipspmu.irq >= 0) {
554 		/* Request my own irq handler. */
555 		err = request_irq(mipspmu.irq, mipsxx_pmu_handle_irq,
556 				  IRQF_PERCPU | IRQF_NOBALANCING |
557 				  IRQF_NO_THREAD | IRQF_NO_SUSPEND |
558 				  IRQF_SHARED,
559 				  "mips_perf_pmu", &mipspmu);
560 		if (err) {
561 			pr_warn("Unable to request IRQ%d for MIPS performance counters!\n",
562 				mipspmu.irq);
563 		}
564 	} else if (cp0_perfcount_irq < 0) {
565 		/*
566 		 * We are sharing the irq number with the timer interrupt.
567 		 */
568 		save_perf_irq = perf_irq;
569 		perf_irq = mipsxx_pmu_handle_shared_irq;
570 		err = 0;
571 	} else {
572 		pr_warn("The platform hasn't properly defined its interrupt controller\n");
573 		err = -ENOENT;
574 	}
575 
576 	return err;
577 }
578 
579 static void mipspmu_free_irq(void)
580 {
581 	if (mipspmu.irq >= 0)
582 		free_irq(mipspmu.irq, &mipspmu);
583 	else if (cp0_perfcount_irq < 0)
584 		perf_irq = save_perf_irq;
585 }
586 
587 /*
588  * mipsxx/rm9000/loongson2 have different performance counters, they have
589  * specific low-level init routines.
590  */
591 static void reset_counters(void *arg);
592 static int __hw_perf_event_init(struct perf_event *event);
593 
594 static void hw_perf_event_destroy(struct perf_event *event)
595 {
596 	if (atomic_dec_and_mutex_lock(&active_events,
597 				&pmu_reserve_mutex)) {
598 		/*
599 		 * We must not call the destroy function with interrupts
600 		 * disabled.
601 		 */
602 		on_each_cpu(reset_counters,
603 			(void *)(long)mipspmu.num_counters, 1);
604 		mipspmu_free_irq();
605 		mutex_unlock(&pmu_reserve_mutex);
606 	}
607 }
608 
609 static int mipspmu_event_init(struct perf_event *event)
610 {
611 	int err = 0;
612 
613 	/* does not support taken branch sampling */
614 	if (has_branch_stack(event))
615 		return -EOPNOTSUPP;
616 
617 	switch (event->attr.type) {
618 	case PERF_TYPE_RAW:
619 	case PERF_TYPE_HARDWARE:
620 	case PERF_TYPE_HW_CACHE:
621 		break;
622 
623 	default:
624 		return -ENOENT;
625 	}
626 
627 	if (event->cpu >= 0 && !cpu_online(event->cpu))
628 		return -ENODEV;
629 
630 	if (!atomic_inc_not_zero(&active_events)) {
631 		mutex_lock(&pmu_reserve_mutex);
632 		if (atomic_read(&active_events) == 0)
633 			err = mipspmu_get_irq();
634 
635 		if (!err)
636 			atomic_inc(&active_events);
637 		mutex_unlock(&pmu_reserve_mutex);
638 	}
639 
640 	if (err)
641 		return err;
642 
643 	return __hw_perf_event_init(event);
644 }
645 
646 static struct pmu pmu = {
647 	.pmu_enable	= mipspmu_enable,
648 	.pmu_disable	= mipspmu_disable,
649 	.event_init	= mipspmu_event_init,
650 	.add		= mipspmu_add,
651 	.del		= mipspmu_del,
652 	.start		= mipspmu_start,
653 	.stop		= mipspmu_stop,
654 	.read		= mipspmu_read,
655 };
656 
657 static unsigned int mipspmu_perf_event_encode(const struct mips_perf_event *pev)
658 {
659 /*
660  * Top 8 bits for range, next 16 bits for cntr_mask, lowest 8 bits for
661  * event_id.
662  */
663 #ifdef CONFIG_MIPS_MT_SMP
664 	if (num_possible_cpus() > 1)
665 		return ((unsigned int)pev->range << 24) |
666 			(pev->cntr_mask & 0xffff00) |
667 			(pev->event_id & 0xff);
668 	else
669 #endif /* CONFIG_MIPS_MT_SMP */
670 		return ((pev->cntr_mask & 0xffff00) |
671 			(pev->event_id & 0xff));
672 }
673 
674 static const struct mips_perf_event *mipspmu_map_general_event(int idx)
675 {
676 
677 	if ((*mipspmu.general_event_map)[idx].cntr_mask == 0)
678 		return ERR_PTR(-EOPNOTSUPP);
679 	return &(*mipspmu.general_event_map)[idx];
680 }
681 
682 static const struct mips_perf_event *mipspmu_map_cache_event(u64 config)
683 {
684 	unsigned int cache_type, cache_op, cache_result;
685 	const struct mips_perf_event *pev;
686 
687 	cache_type = (config >> 0) & 0xff;
688 	if (cache_type >= PERF_COUNT_HW_CACHE_MAX)
689 		return ERR_PTR(-EINVAL);
690 
691 	cache_op = (config >> 8) & 0xff;
692 	if (cache_op >= PERF_COUNT_HW_CACHE_OP_MAX)
693 		return ERR_PTR(-EINVAL);
694 
695 	cache_result = (config >> 16) & 0xff;
696 	if (cache_result >= PERF_COUNT_HW_CACHE_RESULT_MAX)
697 		return ERR_PTR(-EINVAL);
698 
699 	pev = &((*mipspmu.cache_event_map)
700 					[cache_type]
701 					[cache_op]
702 					[cache_result]);
703 
704 	if (pev->cntr_mask == 0)
705 		return ERR_PTR(-EOPNOTSUPP);
706 
707 	return pev;
708 
709 }
710 
711 static int validate_group(struct perf_event *event)
712 {
713 	struct perf_event *sibling, *leader = event->group_leader;
714 	struct cpu_hw_events fake_cpuc;
715 
716 	memset(&fake_cpuc, 0, sizeof(fake_cpuc));
717 
718 	if (mipsxx_pmu_alloc_counter(&fake_cpuc, &leader->hw) < 0)
719 		return -EINVAL;
720 
721 	for_each_sibling_event(sibling, leader) {
722 		if (mipsxx_pmu_alloc_counter(&fake_cpuc, &sibling->hw) < 0)
723 			return -EINVAL;
724 	}
725 
726 	if (mipsxx_pmu_alloc_counter(&fake_cpuc, &event->hw) < 0)
727 		return -EINVAL;
728 
729 	return 0;
730 }
731 
732 /* This is needed by specific irq handlers in perf_event_*.c */
733 static void handle_associated_event(struct cpu_hw_events *cpuc,
734 				    int idx, struct perf_sample_data *data,
735 				    struct pt_regs *regs)
736 {
737 	struct perf_event *event = cpuc->events[idx];
738 	struct hw_perf_event *hwc = &event->hw;
739 
740 	mipspmu_event_update(event, hwc, idx);
741 	data->period = event->hw.last_period;
742 	if (!mipspmu_event_set_period(event, hwc, idx))
743 		return;
744 
745 	if (perf_event_overflow(event, data, regs))
746 		mipsxx_pmu_disable_event(idx);
747 }
748 
749 
750 static int __n_counters(void)
751 {
752 	if (!cpu_has_perf)
753 		return 0;
754 	if (!(read_c0_perfctrl0() & MIPS_PERFCTRL_M))
755 		return 1;
756 	if (!(read_c0_perfctrl1() & MIPS_PERFCTRL_M))
757 		return 2;
758 	if (!(read_c0_perfctrl2() & MIPS_PERFCTRL_M))
759 		return 3;
760 
761 	return 4;
762 }
763 
764 static int n_counters(void)
765 {
766 	int counters;
767 
768 	switch (current_cpu_type()) {
769 	case CPU_R10000:
770 		counters = 2;
771 		break;
772 
773 	case CPU_R12000:
774 	case CPU_R14000:
775 	case CPU_R16000:
776 		counters = 4;
777 		break;
778 
779 	default:
780 		counters = __n_counters();
781 	}
782 
783 	return counters;
784 }
785 
786 static void reset_counters(void *arg)
787 {
788 	int counters = (int)(long)arg;
789 	switch (counters) {
790 	case 4:
791 		mipsxx_pmu_write_control(3, 0);
792 		mipspmu.write_counter(3, 0);
793 		/* fall through */
794 	case 3:
795 		mipsxx_pmu_write_control(2, 0);
796 		mipspmu.write_counter(2, 0);
797 		/* fall through */
798 	case 2:
799 		mipsxx_pmu_write_control(1, 0);
800 		mipspmu.write_counter(1, 0);
801 		/* fall through */
802 	case 1:
803 		mipsxx_pmu_write_control(0, 0);
804 		mipspmu.write_counter(0, 0);
805 		/* fall through */
806 	}
807 }
808 
809 /* 24K/34K/1004K/interAptiv/loongson1 cores share the same event map. */
810 static const struct mips_perf_event mipsxxcore_event_map
811 				[PERF_COUNT_HW_MAX] = {
812 	[PERF_COUNT_HW_CPU_CYCLES] = { 0x00, CNTR_EVEN | CNTR_ODD, P },
813 	[PERF_COUNT_HW_INSTRUCTIONS] = { 0x01, CNTR_EVEN | CNTR_ODD, T },
814 	[PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = { 0x02, CNTR_EVEN, T },
815 	[PERF_COUNT_HW_BRANCH_MISSES] = { 0x02, CNTR_ODD, T },
816 };
817 
818 /* 74K/proAptiv core has different branch event code. */
819 static const struct mips_perf_event mipsxxcore_event_map2
820 				[PERF_COUNT_HW_MAX] = {
821 	[PERF_COUNT_HW_CPU_CYCLES] = { 0x00, CNTR_EVEN | CNTR_ODD, P },
822 	[PERF_COUNT_HW_INSTRUCTIONS] = { 0x01, CNTR_EVEN | CNTR_ODD, T },
823 	[PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = { 0x27, CNTR_EVEN, T },
824 	[PERF_COUNT_HW_BRANCH_MISSES] = { 0x27, CNTR_ODD, T },
825 };
826 
827 static const struct mips_perf_event i6x00_event_map[PERF_COUNT_HW_MAX] = {
828 	[PERF_COUNT_HW_CPU_CYCLES]          = { 0x00, CNTR_EVEN | CNTR_ODD },
829 	[PERF_COUNT_HW_INSTRUCTIONS]        = { 0x01, CNTR_EVEN | CNTR_ODD },
830 	/* These only count dcache, not icache */
831 	[PERF_COUNT_HW_CACHE_REFERENCES]    = { 0x45, CNTR_EVEN | CNTR_ODD },
832 	[PERF_COUNT_HW_CACHE_MISSES]        = { 0x48, CNTR_EVEN | CNTR_ODD },
833 	[PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = { 0x15, CNTR_EVEN | CNTR_ODD },
834 	[PERF_COUNT_HW_BRANCH_MISSES]       = { 0x16, CNTR_EVEN | CNTR_ODD },
835 };
836 
837 static const struct mips_perf_event loongson3_event_map[PERF_COUNT_HW_MAX] = {
838 	[PERF_COUNT_HW_CPU_CYCLES] = { 0x00, CNTR_EVEN },
839 	[PERF_COUNT_HW_INSTRUCTIONS] = { 0x00, CNTR_ODD },
840 	[PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = { 0x01, CNTR_EVEN },
841 	[PERF_COUNT_HW_BRANCH_MISSES] = { 0x01, CNTR_ODD },
842 };
843 
844 static const struct mips_perf_event octeon_event_map[PERF_COUNT_HW_MAX] = {
845 	[PERF_COUNT_HW_CPU_CYCLES] = { 0x01, CNTR_ALL },
846 	[PERF_COUNT_HW_INSTRUCTIONS] = { 0x03, CNTR_ALL },
847 	[PERF_COUNT_HW_CACHE_REFERENCES] = { 0x2b, CNTR_ALL },
848 	[PERF_COUNT_HW_CACHE_MISSES] = { 0x2e, CNTR_ALL	 },
849 	[PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = { 0x08, CNTR_ALL },
850 	[PERF_COUNT_HW_BRANCH_MISSES] = { 0x09, CNTR_ALL },
851 	[PERF_COUNT_HW_BUS_CYCLES] = { 0x25, CNTR_ALL },
852 };
853 
854 static const struct mips_perf_event bmips5000_event_map
855 				[PERF_COUNT_HW_MAX] = {
856 	[PERF_COUNT_HW_CPU_CYCLES] = { 0x00, CNTR_EVEN | CNTR_ODD, T },
857 	[PERF_COUNT_HW_INSTRUCTIONS] = { 0x01, CNTR_EVEN | CNTR_ODD, T },
858 	[PERF_COUNT_HW_BRANCH_MISSES] = { 0x02, CNTR_ODD, T },
859 };
860 
861 static const struct mips_perf_event xlp_event_map[PERF_COUNT_HW_MAX] = {
862 	[PERF_COUNT_HW_CPU_CYCLES] = { 0x01, CNTR_ALL },
863 	[PERF_COUNT_HW_INSTRUCTIONS] = { 0x18, CNTR_ALL }, /* PAPI_TOT_INS */
864 	[PERF_COUNT_HW_CACHE_REFERENCES] = { 0x04, CNTR_ALL }, /* PAPI_L1_ICA */
865 	[PERF_COUNT_HW_CACHE_MISSES] = { 0x07, CNTR_ALL }, /* PAPI_L1_ICM */
866 	[PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = { 0x1b, CNTR_ALL }, /* PAPI_BR_CN */
867 	[PERF_COUNT_HW_BRANCH_MISSES] = { 0x1c, CNTR_ALL }, /* PAPI_BR_MSP */
868 };
869 
870 /* 24K/34K/1004K/interAptiv/loongson1 cores share the same cache event map. */
871 static const struct mips_perf_event mipsxxcore_cache_map
872 				[PERF_COUNT_HW_CACHE_MAX]
873 				[PERF_COUNT_HW_CACHE_OP_MAX]
874 				[PERF_COUNT_HW_CACHE_RESULT_MAX] = {
875 [C(L1D)] = {
876 	/*
877 	 * Like some other architectures (e.g. ARM), the performance
878 	 * counters don't differentiate between read and write
879 	 * accesses/misses, so this isn't strictly correct, but it's the
880 	 * best we can do. Writes and reads get combined.
881 	 */
882 	[C(OP_READ)] = {
883 		[C(RESULT_ACCESS)]	= { 0x0a, CNTR_EVEN, T },
884 		[C(RESULT_MISS)]	= { 0x0b, CNTR_EVEN | CNTR_ODD, T },
885 	},
886 	[C(OP_WRITE)] = {
887 		[C(RESULT_ACCESS)]	= { 0x0a, CNTR_EVEN, T },
888 		[C(RESULT_MISS)]	= { 0x0b, CNTR_EVEN | CNTR_ODD, T },
889 	},
890 },
891 [C(L1I)] = {
892 	[C(OP_READ)] = {
893 		[C(RESULT_ACCESS)]	= { 0x09, CNTR_EVEN, T },
894 		[C(RESULT_MISS)]	= { 0x09, CNTR_ODD, T },
895 	},
896 	[C(OP_WRITE)] = {
897 		[C(RESULT_ACCESS)]	= { 0x09, CNTR_EVEN, T },
898 		[C(RESULT_MISS)]	= { 0x09, CNTR_ODD, T },
899 	},
900 	[C(OP_PREFETCH)] = {
901 		[C(RESULT_ACCESS)]	= { 0x14, CNTR_EVEN, T },
902 		/*
903 		 * Note that MIPS has only "hit" events countable for
904 		 * the prefetch operation.
905 		 */
906 	},
907 },
908 [C(LL)] = {
909 	[C(OP_READ)] = {
910 		[C(RESULT_ACCESS)]	= { 0x15, CNTR_ODD, P },
911 		[C(RESULT_MISS)]	= { 0x16, CNTR_EVEN, P },
912 	},
913 	[C(OP_WRITE)] = {
914 		[C(RESULT_ACCESS)]	= { 0x15, CNTR_ODD, P },
915 		[C(RESULT_MISS)]	= { 0x16, CNTR_EVEN, P },
916 	},
917 },
918 [C(DTLB)] = {
919 	[C(OP_READ)] = {
920 		[C(RESULT_ACCESS)]	= { 0x06, CNTR_EVEN, T },
921 		[C(RESULT_MISS)]	= { 0x06, CNTR_ODD, T },
922 	},
923 	[C(OP_WRITE)] = {
924 		[C(RESULT_ACCESS)]	= { 0x06, CNTR_EVEN, T },
925 		[C(RESULT_MISS)]	= { 0x06, CNTR_ODD, T },
926 	},
927 },
928 [C(ITLB)] = {
929 	[C(OP_READ)] = {
930 		[C(RESULT_ACCESS)]	= { 0x05, CNTR_EVEN, T },
931 		[C(RESULT_MISS)]	= { 0x05, CNTR_ODD, T },
932 	},
933 	[C(OP_WRITE)] = {
934 		[C(RESULT_ACCESS)]	= { 0x05, CNTR_EVEN, T },
935 		[C(RESULT_MISS)]	= { 0x05, CNTR_ODD, T },
936 	},
937 },
938 [C(BPU)] = {
939 	/* Using the same code for *HW_BRANCH* */
940 	[C(OP_READ)] = {
941 		[C(RESULT_ACCESS)]	= { 0x02, CNTR_EVEN, T },
942 		[C(RESULT_MISS)]	= { 0x02, CNTR_ODD, T },
943 	},
944 	[C(OP_WRITE)] = {
945 		[C(RESULT_ACCESS)]	= { 0x02, CNTR_EVEN, T },
946 		[C(RESULT_MISS)]	= { 0x02, CNTR_ODD, T },
947 	},
948 },
949 };
950 
951 /* 74K/proAptiv core has completely different cache event map. */
952 static const struct mips_perf_event mipsxxcore_cache_map2
953 				[PERF_COUNT_HW_CACHE_MAX]
954 				[PERF_COUNT_HW_CACHE_OP_MAX]
955 				[PERF_COUNT_HW_CACHE_RESULT_MAX] = {
956 [C(L1D)] = {
957 	/*
958 	 * Like some other architectures (e.g. ARM), the performance
959 	 * counters don't differentiate between read and write
960 	 * accesses/misses, so this isn't strictly correct, but it's the
961 	 * best we can do. Writes and reads get combined.
962 	 */
963 	[C(OP_READ)] = {
964 		[C(RESULT_ACCESS)]	= { 0x17, CNTR_ODD, T },
965 		[C(RESULT_MISS)]	= { 0x18, CNTR_ODD, T },
966 	},
967 	[C(OP_WRITE)] = {
968 		[C(RESULT_ACCESS)]	= { 0x17, CNTR_ODD, T },
969 		[C(RESULT_MISS)]	= { 0x18, CNTR_ODD, T },
970 	},
971 },
972 [C(L1I)] = {
973 	[C(OP_READ)] = {
974 		[C(RESULT_ACCESS)]	= { 0x06, CNTR_EVEN, T },
975 		[C(RESULT_MISS)]	= { 0x06, CNTR_ODD, T },
976 	},
977 	[C(OP_WRITE)] = {
978 		[C(RESULT_ACCESS)]	= { 0x06, CNTR_EVEN, T },
979 		[C(RESULT_MISS)]	= { 0x06, CNTR_ODD, T },
980 	},
981 	[C(OP_PREFETCH)] = {
982 		[C(RESULT_ACCESS)]	= { 0x34, CNTR_EVEN, T },
983 		/*
984 		 * Note that MIPS has only "hit" events countable for
985 		 * the prefetch operation.
986 		 */
987 	},
988 },
989 [C(LL)] = {
990 	[C(OP_READ)] = {
991 		[C(RESULT_ACCESS)]	= { 0x1c, CNTR_ODD, P },
992 		[C(RESULT_MISS)]	= { 0x1d, CNTR_EVEN, P },
993 	},
994 	[C(OP_WRITE)] = {
995 		[C(RESULT_ACCESS)]	= { 0x1c, CNTR_ODD, P },
996 		[C(RESULT_MISS)]	= { 0x1d, CNTR_EVEN, P },
997 	},
998 },
999 /*
1000  * 74K core does not have specific DTLB events. proAptiv core has
1001  * "speculative" DTLB events which are numbered 0x63 (even/odd) and
1002  * not included here. One can use raw events if really needed.
1003  */
1004 [C(ITLB)] = {
1005 	[C(OP_READ)] = {
1006 		[C(RESULT_ACCESS)]	= { 0x04, CNTR_EVEN, T },
1007 		[C(RESULT_MISS)]	= { 0x04, CNTR_ODD, T },
1008 	},
1009 	[C(OP_WRITE)] = {
1010 		[C(RESULT_ACCESS)]	= { 0x04, CNTR_EVEN, T },
1011 		[C(RESULT_MISS)]	= { 0x04, CNTR_ODD, T },
1012 	},
1013 },
1014 [C(BPU)] = {
1015 	/* Using the same code for *HW_BRANCH* */
1016 	[C(OP_READ)] = {
1017 		[C(RESULT_ACCESS)]	= { 0x27, CNTR_EVEN, T },
1018 		[C(RESULT_MISS)]	= { 0x27, CNTR_ODD, T },
1019 	},
1020 	[C(OP_WRITE)] = {
1021 		[C(RESULT_ACCESS)]	= { 0x27, CNTR_EVEN, T },
1022 		[C(RESULT_MISS)]	= { 0x27, CNTR_ODD, T },
1023 	},
1024 },
1025 };
1026 
1027 static const struct mips_perf_event i6x00_cache_map
1028 				[PERF_COUNT_HW_CACHE_MAX]
1029 				[PERF_COUNT_HW_CACHE_OP_MAX]
1030 				[PERF_COUNT_HW_CACHE_RESULT_MAX] = {
1031 [C(L1D)] = {
1032 	[C(OP_READ)] = {
1033 		[C(RESULT_ACCESS)]	= { 0x46, CNTR_EVEN | CNTR_ODD },
1034 		[C(RESULT_MISS)]	= { 0x49, CNTR_EVEN | CNTR_ODD },
1035 	},
1036 	[C(OP_WRITE)] = {
1037 		[C(RESULT_ACCESS)]	= { 0x47, CNTR_EVEN | CNTR_ODD },
1038 		[C(RESULT_MISS)]	= { 0x4a, CNTR_EVEN | CNTR_ODD },
1039 	},
1040 },
1041 [C(L1I)] = {
1042 	[C(OP_READ)] = {
1043 		[C(RESULT_ACCESS)]	= { 0x84, CNTR_EVEN | CNTR_ODD },
1044 		[C(RESULT_MISS)]	= { 0x85, CNTR_EVEN | CNTR_ODD },
1045 	},
1046 },
1047 [C(DTLB)] = {
1048 	/* Can't distinguish read & write */
1049 	[C(OP_READ)] = {
1050 		[C(RESULT_ACCESS)]	= { 0x40, CNTR_EVEN | CNTR_ODD },
1051 		[C(RESULT_MISS)]	= { 0x41, CNTR_EVEN | CNTR_ODD },
1052 	},
1053 	[C(OP_WRITE)] = {
1054 		[C(RESULT_ACCESS)]	= { 0x40, CNTR_EVEN | CNTR_ODD },
1055 		[C(RESULT_MISS)]	= { 0x41, CNTR_EVEN | CNTR_ODD },
1056 	},
1057 },
1058 [C(BPU)] = {
1059 	/* Conditional branches / mispredicted */
1060 	[C(OP_READ)] = {
1061 		[C(RESULT_ACCESS)]	= { 0x15, CNTR_EVEN | CNTR_ODD },
1062 		[C(RESULT_MISS)]	= { 0x16, CNTR_EVEN | CNTR_ODD },
1063 	},
1064 },
1065 };
1066 
1067 static const struct mips_perf_event loongson3_cache_map
1068 				[PERF_COUNT_HW_CACHE_MAX]
1069 				[PERF_COUNT_HW_CACHE_OP_MAX]
1070 				[PERF_COUNT_HW_CACHE_RESULT_MAX] = {
1071 [C(L1D)] = {
1072 	/*
1073 	 * Like some other architectures (e.g. ARM), the performance
1074 	 * counters don't differentiate between read and write
1075 	 * accesses/misses, so this isn't strictly correct, but it's the
1076 	 * best we can do. Writes and reads get combined.
1077 	 */
1078 	[C(OP_READ)] = {
1079 		[C(RESULT_MISS)]        = { 0x04, CNTR_ODD },
1080 	},
1081 	[C(OP_WRITE)] = {
1082 		[C(RESULT_MISS)]        = { 0x04, CNTR_ODD },
1083 	},
1084 },
1085 [C(L1I)] = {
1086 	[C(OP_READ)] = {
1087 		[C(RESULT_MISS)]        = { 0x04, CNTR_EVEN },
1088 	},
1089 	[C(OP_WRITE)] = {
1090 		[C(RESULT_MISS)]        = { 0x04, CNTR_EVEN },
1091 	},
1092 },
1093 [C(DTLB)] = {
1094 	[C(OP_READ)] = {
1095 		[C(RESULT_MISS)]        = { 0x09, CNTR_ODD },
1096 	},
1097 	[C(OP_WRITE)] = {
1098 		[C(RESULT_MISS)]        = { 0x09, CNTR_ODD },
1099 	},
1100 },
1101 [C(ITLB)] = {
1102 	[C(OP_READ)] = {
1103 		[C(RESULT_MISS)]        = { 0x0c, CNTR_ODD },
1104 	},
1105 	[C(OP_WRITE)] = {
1106 		[C(RESULT_MISS)]        = { 0x0c, CNTR_ODD },
1107 	},
1108 },
1109 [C(BPU)] = {
1110 	/* Using the same code for *HW_BRANCH* */
1111 	[C(OP_READ)] = {
1112 		[C(RESULT_ACCESS)]      = { 0x02, CNTR_EVEN },
1113 		[C(RESULT_MISS)]        = { 0x02, CNTR_ODD },
1114 	},
1115 	[C(OP_WRITE)] = {
1116 		[C(RESULT_ACCESS)]      = { 0x02, CNTR_EVEN },
1117 		[C(RESULT_MISS)]        = { 0x02, CNTR_ODD },
1118 	},
1119 },
1120 };
1121 
1122 /* BMIPS5000 */
1123 static const struct mips_perf_event bmips5000_cache_map
1124 				[PERF_COUNT_HW_CACHE_MAX]
1125 				[PERF_COUNT_HW_CACHE_OP_MAX]
1126 				[PERF_COUNT_HW_CACHE_RESULT_MAX] = {
1127 [C(L1D)] = {
1128 	/*
1129 	 * Like some other architectures (e.g. ARM), the performance
1130 	 * counters don't differentiate between read and write
1131 	 * accesses/misses, so this isn't strictly correct, but it's the
1132 	 * best we can do. Writes and reads get combined.
1133 	 */
1134 	[C(OP_READ)] = {
1135 		[C(RESULT_ACCESS)]	= { 12, CNTR_EVEN, T },
1136 		[C(RESULT_MISS)]	= { 12, CNTR_ODD, T },
1137 	},
1138 	[C(OP_WRITE)] = {
1139 		[C(RESULT_ACCESS)]	= { 12, CNTR_EVEN, T },
1140 		[C(RESULT_MISS)]	= { 12, CNTR_ODD, T },
1141 	},
1142 },
1143 [C(L1I)] = {
1144 	[C(OP_READ)] = {
1145 		[C(RESULT_ACCESS)]	= { 10, CNTR_EVEN, T },
1146 		[C(RESULT_MISS)]	= { 10, CNTR_ODD, T },
1147 	},
1148 	[C(OP_WRITE)] = {
1149 		[C(RESULT_ACCESS)]	= { 10, CNTR_EVEN, T },
1150 		[C(RESULT_MISS)]	= { 10, CNTR_ODD, T },
1151 	},
1152 	[C(OP_PREFETCH)] = {
1153 		[C(RESULT_ACCESS)]	= { 23, CNTR_EVEN, T },
1154 		/*
1155 		 * Note that MIPS has only "hit" events countable for
1156 		 * the prefetch operation.
1157 		 */
1158 	},
1159 },
1160 [C(LL)] = {
1161 	[C(OP_READ)] = {
1162 		[C(RESULT_ACCESS)]	= { 28, CNTR_EVEN, P },
1163 		[C(RESULT_MISS)]	= { 28, CNTR_ODD, P },
1164 	},
1165 	[C(OP_WRITE)] = {
1166 		[C(RESULT_ACCESS)]	= { 28, CNTR_EVEN, P },
1167 		[C(RESULT_MISS)]	= { 28, CNTR_ODD, P },
1168 	},
1169 },
1170 [C(BPU)] = {
1171 	/* Using the same code for *HW_BRANCH* */
1172 	[C(OP_READ)] = {
1173 		[C(RESULT_MISS)]	= { 0x02, CNTR_ODD, T },
1174 	},
1175 	[C(OP_WRITE)] = {
1176 		[C(RESULT_MISS)]	= { 0x02, CNTR_ODD, T },
1177 	},
1178 },
1179 };
1180 
1181 
1182 static const struct mips_perf_event octeon_cache_map
1183 				[PERF_COUNT_HW_CACHE_MAX]
1184 				[PERF_COUNT_HW_CACHE_OP_MAX]
1185 				[PERF_COUNT_HW_CACHE_RESULT_MAX] = {
1186 [C(L1D)] = {
1187 	[C(OP_READ)] = {
1188 		[C(RESULT_ACCESS)]	= { 0x2b, CNTR_ALL },
1189 		[C(RESULT_MISS)]	= { 0x2e, CNTR_ALL },
1190 	},
1191 	[C(OP_WRITE)] = {
1192 		[C(RESULT_ACCESS)]	= { 0x30, CNTR_ALL },
1193 	},
1194 },
1195 [C(L1I)] = {
1196 	[C(OP_READ)] = {
1197 		[C(RESULT_ACCESS)]	= { 0x18, CNTR_ALL },
1198 	},
1199 	[C(OP_PREFETCH)] = {
1200 		[C(RESULT_ACCESS)]	= { 0x19, CNTR_ALL },
1201 	},
1202 },
1203 [C(DTLB)] = {
1204 	/*
1205 	 * Only general DTLB misses are counted use the same event for
1206 	 * read and write.
1207 	 */
1208 	[C(OP_READ)] = {
1209 		[C(RESULT_MISS)]	= { 0x35, CNTR_ALL },
1210 	},
1211 	[C(OP_WRITE)] = {
1212 		[C(RESULT_MISS)]	= { 0x35, CNTR_ALL },
1213 	},
1214 },
1215 [C(ITLB)] = {
1216 	[C(OP_READ)] = {
1217 		[C(RESULT_MISS)]	= { 0x37, CNTR_ALL },
1218 	},
1219 },
1220 };
1221 
1222 static const struct mips_perf_event xlp_cache_map
1223 				[PERF_COUNT_HW_CACHE_MAX]
1224 				[PERF_COUNT_HW_CACHE_OP_MAX]
1225 				[PERF_COUNT_HW_CACHE_RESULT_MAX] = {
1226 [C(L1D)] = {
1227 	[C(OP_READ)] = {
1228 		[C(RESULT_ACCESS)]	= { 0x31, CNTR_ALL }, /* PAPI_L1_DCR */
1229 		[C(RESULT_MISS)]	= { 0x30, CNTR_ALL }, /* PAPI_L1_LDM */
1230 	},
1231 	[C(OP_WRITE)] = {
1232 		[C(RESULT_ACCESS)]	= { 0x2f, CNTR_ALL }, /* PAPI_L1_DCW */
1233 		[C(RESULT_MISS)]	= { 0x2e, CNTR_ALL }, /* PAPI_L1_STM */
1234 	},
1235 },
1236 [C(L1I)] = {
1237 	[C(OP_READ)] = {
1238 		[C(RESULT_ACCESS)]	= { 0x04, CNTR_ALL }, /* PAPI_L1_ICA */
1239 		[C(RESULT_MISS)]	= { 0x07, CNTR_ALL }, /* PAPI_L1_ICM */
1240 	},
1241 },
1242 [C(LL)] = {
1243 	[C(OP_READ)] = {
1244 		[C(RESULT_ACCESS)]	= { 0x35, CNTR_ALL }, /* PAPI_L2_DCR */
1245 		[C(RESULT_MISS)]	= { 0x37, CNTR_ALL }, /* PAPI_L2_LDM */
1246 	},
1247 	[C(OP_WRITE)] = {
1248 		[C(RESULT_ACCESS)]	= { 0x34, CNTR_ALL }, /* PAPI_L2_DCA */
1249 		[C(RESULT_MISS)]	= { 0x36, CNTR_ALL }, /* PAPI_L2_DCM */
1250 	},
1251 },
1252 [C(DTLB)] = {
1253 	/*
1254 	 * Only general DTLB misses are counted use the same event for
1255 	 * read and write.
1256 	 */
1257 	[C(OP_READ)] = {
1258 		[C(RESULT_MISS)]	= { 0x2d, CNTR_ALL }, /* PAPI_TLB_DM */
1259 	},
1260 	[C(OP_WRITE)] = {
1261 		[C(RESULT_MISS)]	= { 0x2d, CNTR_ALL }, /* PAPI_TLB_DM */
1262 	},
1263 },
1264 [C(ITLB)] = {
1265 	[C(OP_READ)] = {
1266 		[C(RESULT_MISS)]	= { 0x08, CNTR_ALL }, /* PAPI_TLB_IM */
1267 	},
1268 	[C(OP_WRITE)] = {
1269 		[C(RESULT_MISS)]	= { 0x08, CNTR_ALL }, /* PAPI_TLB_IM */
1270 	},
1271 },
1272 [C(BPU)] = {
1273 	[C(OP_READ)] = {
1274 		[C(RESULT_MISS)]	= { 0x25, CNTR_ALL },
1275 	},
1276 },
1277 };
1278 
1279 static int __hw_perf_event_init(struct perf_event *event)
1280 {
1281 	struct perf_event_attr *attr = &event->attr;
1282 	struct hw_perf_event *hwc = &event->hw;
1283 	const struct mips_perf_event *pev;
1284 	int err;
1285 
1286 	/* Returning MIPS event descriptor for generic perf event. */
1287 	if (PERF_TYPE_HARDWARE == event->attr.type) {
1288 		if (event->attr.config >= PERF_COUNT_HW_MAX)
1289 			return -EINVAL;
1290 		pev = mipspmu_map_general_event(event->attr.config);
1291 	} else if (PERF_TYPE_HW_CACHE == event->attr.type) {
1292 		pev = mipspmu_map_cache_event(event->attr.config);
1293 	} else if (PERF_TYPE_RAW == event->attr.type) {
1294 		/* We are working on the global raw event. */
1295 		mutex_lock(&raw_event_mutex);
1296 		pev = mipspmu.map_raw_event(event->attr.config);
1297 	} else {
1298 		/* The event type is not (yet) supported. */
1299 		return -EOPNOTSUPP;
1300 	}
1301 
1302 	if (IS_ERR(pev)) {
1303 		if (PERF_TYPE_RAW == event->attr.type)
1304 			mutex_unlock(&raw_event_mutex);
1305 		return PTR_ERR(pev);
1306 	}
1307 
1308 	/*
1309 	 * We allow max flexibility on how each individual counter shared
1310 	 * by the single CPU operates (the mode exclusion and the range).
1311 	 */
1312 	hwc->config_base = MIPS_PERFCTRL_IE;
1313 
1314 	hwc->event_base = mipspmu_perf_event_encode(pev);
1315 	if (PERF_TYPE_RAW == event->attr.type)
1316 		mutex_unlock(&raw_event_mutex);
1317 
1318 	if (!attr->exclude_user)
1319 		hwc->config_base |= MIPS_PERFCTRL_U;
1320 	if (!attr->exclude_kernel) {
1321 		hwc->config_base |= MIPS_PERFCTRL_K;
1322 		/* MIPS kernel mode: KSU == 00b || EXL == 1 || ERL == 1 */
1323 		hwc->config_base |= MIPS_PERFCTRL_EXL;
1324 	}
1325 	if (!attr->exclude_hv)
1326 		hwc->config_base |= MIPS_PERFCTRL_S;
1327 
1328 	hwc->config_base &= M_PERFCTL_CONFIG_MASK;
1329 	/*
1330 	 * The event can belong to another cpu. We do not assign a local
1331 	 * counter for it for now.
1332 	 */
1333 	hwc->idx = -1;
1334 	hwc->config = 0;
1335 
1336 	if (!hwc->sample_period) {
1337 		hwc->sample_period  = mipspmu.max_period;
1338 		hwc->last_period    = hwc->sample_period;
1339 		local64_set(&hwc->period_left, hwc->sample_period);
1340 	}
1341 
1342 	err = 0;
1343 	if (event->group_leader != event)
1344 		err = validate_group(event);
1345 
1346 	event->destroy = hw_perf_event_destroy;
1347 
1348 	if (err)
1349 		event->destroy(event);
1350 
1351 	return err;
1352 }
1353 
1354 static void pause_local_counters(void)
1355 {
1356 	struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
1357 	int ctr = mipspmu.num_counters;
1358 	unsigned long flags;
1359 
1360 	local_irq_save(flags);
1361 	do {
1362 		ctr--;
1363 		cpuc->saved_ctrl[ctr] = mipsxx_pmu_read_control(ctr);
1364 		mipsxx_pmu_write_control(ctr, cpuc->saved_ctrl[ctr] &
1365 					 ~M_PERFCTL_COUNT_EVENT_WHENEVER);
1366 	} while (ctr > 0);
1367 	local_irq_restore(flags);
1368 }
1369 
1370 static void resume_local_counters(void)
1371 {
1372 	struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
1373 	int ctr = mipspmu.num_counters;
1374 
1375 	do {
1376 		ctr--;
1377 		mipsxx_pmu_write_control(ctr, cpuc->saved_ctrl[ctr]);
1378 	} while (ctr > 0);
1379 }
1380 
1381 static int mipsxx_pmu_handle_shared_irq(void)
1382 {
1383 	struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
1384 	struct perf_sample_data data;
1385 	unsigned int counters = mipspmu.num_counters;
1386 	u64 counter;
1387 	int n, handled = IRQ_NONE;
1388 	struct pt_regs *regs;
1389 
1390 	if (cpu_has_perf_cntr_intr_bit && !(read_c0_cause() & CAUSEF_PCI))
1391 		return handled;
1392 	/*
1393 	 * First we pause the local counters, so that when we are locked
1394 	 * here, the counters are all paused. When it gets locked due to
1395 	 * perf_disable(), the timer interrupt handler will be delayed.
1396 	 *
1397 	 * See also mipsxx_pmu_start().
1398 	 */
1399 	pause_local_counters();
1400 #ifdef CONFIG_MIPS_PERF_SHARED_TC_COUNTERS
1401 	read_lock(&pmuint_rwlock);
1402 #endif
1403 
1404 	regs = get_irq_regs();
1405 
1406 	perf_sample_data_init(&data, 0, 0);
1407 
1408 	for (n = counters - 1; n >= 0; n--) {
1409 		if (!test_bit(n, cpuc->used_mask))
1410 			continue;
1411 
1412 		counter = mipspmu.read_counter(n);
1413 		if (!(counter & mipspmu.overflow))
1414 			continue;
1415 
1416 		handle_associated_event(cpuc, n, &data, regs);
1417 		handled = IRQ_HANDLED;
1418 	}
1419 
1420 #ifdef CONFIG_MIPS_PERF_SHARED_TC_COUNTERS
1421 	read_unlock(&pmuint_rwlock);
1422 #endif
1423 	resume_local_counters();
1424 
1425 	/*
1426 	 * Do all the work for the pending perf events. We can do this
1427 	 * in here because the performance counter interrupt is a regular
1428 	 * interrupt, not NMI.
1429 	 */
1430 	if (handled == IRQ_HANDLED)
1431 		irq_work_run();
1432 
1433 	return handled;
1434 }
1435 
1436 static irqreturn_t mipsxx_pmu_handle_irq(int irq, void *dev)
1437 {
1438 	return mipsxx_pmu_handle_shared_irq();
1439 }
1440 
1441 /* 24K */
1442 #define IS_BOTH_COUNTERS_24K_EVENT(b)					\
1443 	((b) == 0 || (b) == 1 || (b) == 11)
1444 
1445 /* 34K */
1446 #define IS_BOTH_COUNTERS_34K_EVENT(b)					\
1447 	((b) == 0 || (b) == 1 || (b) == 11)
1448 #ifdef CONFIG_MIPS_MT_SMP
1449 #define IS_RANGE_P_34K_EVENT(r, b)					\
1450 	((b) == 0 || (r) == 18 || (b) == 21 || (b) == 22 ||		\
1451 	 (b) == 25 || (b) == 39 || (r) == 44 || (r) == 174 ||		\
1452 	 (r) == 176 || ((b) >= 50 && (b) <= 55) ||			\
1453 	 ((b) >= 64 && (b) <= 67))
1454 #define IS_RANGE_V_34K_EVENT(r) ((r) == 47)
1455 #endif
1456 
1457 /* 74K */
1458 #define IS_BOTH_COUNTERS_74K_EVENT(b)					\
1459 	((b) == 0 || (b) == 1)
1460 
1461 /* proAptiv */
1462 #define IS_BOTH_COUNTERS_PROAPTIV_EVENT(b)				\
1463 	((b) == 0 || (b) == 1)
1464 /* P5600 */
1465 #define IS_BOTH_COUNTERS_P5600_EVENT(b)					\
1466 	((b) == 0 || (b) == 1)
1467 
1468 /* 1004K */
1469 #define IS_BOTH_COUNTERS_1004K_EVENT(b)					\
1470 	((b) == 0 || (b) == 1 || (b) == 11)
1471 #ifdef CONFIG_MIPS_MT_SMP
1472 #define IS_RANGE_P_1004K_EVENT(r, b)					\
1473 	((b) == 0 || (r) == 18 || (b) == 21 || (b) == 22 ||		\
1474 	 (b) == 25 || (b) == 36 || (b) == 39 || (r) == 44 ||		\
1475 	 (r) == 174 || (r) == 176 || ((b) >= 50 && (b) <= 59) ||	\
1476 	 (r) == 188 || (b) == 61 || (b) == 62 ||			\
1477 	 ((b) >= 64 && (b) <= 67))
1478 #define IS_RANGE_V_1004K_EVENT(r)	((r) == 47)
1479 #endif
1480 
1481 /* interAptiv */
1482 #define IS_BOTH_COUNTERS_INTERAPTIV_EVENT(b)				\
1483 	((b) == 0 || (b) == 1 || (b) == 11)
1484 #ifdef CONFIG_MIPS_MT_SMP
1485 /* The P/V/T info is not provided for "(b) == 38" in SUM, assume P. */
1486 #define IS_RANGE_P_INTERAPTIV_EVENT(r, b)				\
1487 	((b) == 0 || (r) == 18 || (b) == 21 || (b) == 22 ||		\
1488 	 (b) == 25 || (b) == 36 || (b) == 38 || (b) == 39 ||		\
1489 	 (r) == 44 || (r) == 174 || (r) == 176 || ((b) >= 50 &&		\
1490 	 (b) <= 59) || (r) == 188 || (b) == 61 || (b) == 62 ||		\
1491 	 ((b) >= 64 && (b) <= 67))
1492 #define IS_RANGE_V_INTERAPTIV_EVENT(r)	((r) == 47 || (r) == 175)
1493 #endif
1494 
1495 /* BMIPS5000 */
1496 #define IS_BOTH_COUNTERS_BMIPS5000_EVENT(b)				\
1497 	((b) == 0 || (b) == 1)
1498 
1499 
1500 /*
1501  * For most cores the user can use 0-255 raw events, where 0-127 for the events
1502  * of even counters, and 128-255 for odd counters. Note that bit 7 is used to
1503  * indicate the even/odd bank selector. So, for example, when user wants to take
1504  * the Event Num of 15 for odd counters (by referring to the user manual), then
1505  * 128 needs to be added to 15 as the input for the event config, i.e., 143 (0x8F)
1506  * to be used.
1507  *
1508  * Some newer cores have even more events, in which case the user can use raw
1509  * events 0-511, where 0-255 are for the events of even counters, and 256-511
1510  * are for odd counters, so bit 8 is used to indicate the even/odd bank selector.
1511  */
1512 static const struct mips_perf_event *mipsxx_pmu_map_raw_event(u64 config)
1513 {
1514 	/* currently most cores have 7-bit event numbers */
1515 	unsigned int raw_id = config & 0xff;
1516 	unsigned int base_id = raw_id & 0x7f;
1517 
1518 	switch (current_cpu_type()) {
1519 	case CPU_24K:
1520 		if (IS_BOTH_COUNTERS_24K_EVENT(base_id))
1521 			raw_event.cntr_mask = CNTR_EVEN | CNTR_ODD;
1522 		else
1523 			raw_event.cntr_mask =
1524 				raw_id > 127 ? CNTR_ODD : CNTR_EVEN;
1525 #ifdef CONFIG_MIPS_MT_SMP
1526 		/*
1527 		 * This is actually doing nothing. Non-multithreading
1528 		 * CPUs will not check and calculate the range.
1529 		 */
1530 		raw_event.range = P;
1531 #endif
1532 		break;
1533 	case CPU_34K:
1534 		if (IS_BOTH_COUNTERS_34K_EVENT(base_id))
1535 			raw_event.cntr_mask = CNTR_EVEN | CNTR_ODD;
1536 		else
1537 			raw_event.cntr_mask =
1538 				raw_id > 127 ? CNTR_ODD : CNTR_EVEN;
1539 #ifdef CONFIG_MIPS_MT_SMP
1540 		if (IS_RANGE_P_34K_EVENT(raw_id, base_id))
1541 			raw_event.range = P;
1542 		else if (unlikely(IS_RANGE_V_34K_EVENT(raw_id)))
1543 			raw_event.range = V;
1544 		else
1545 			raw_event.range = T;
1546 #endif
1547 		break;
1548 	case CPU_74K:
1549 	case CPU_1074K:
1550 		if (IS_BOTH_COUNTERS_74K_EVENT(base_id))
1551 			raw_event.cntr_mask = CNTR_EVEN | CNTR_ODD;
1552 		else
1553 			raw_event.cntr_mask =
1554 				raw_id > 127 ? CNTR_ODD : CNTR_EVEN;
1555 #ifdef CONFIG_MIPS_MT_SMP
1556 		raw_event.range = P;
1557 #endif
1558 		break;
1559 	case CPU_PROAPTIV:
1560 		if (IS_BOTH_COUNTERS_PROAPTIV_EVENT(base_id))
1561 			raw_event.cntr_mask = CNTR_EVEN | CNTR_ODD;
1562 		else
1563 			raw_event.cntr_mask =
1564 				raw_id > 127 ? CNTR_ODD : CNTR_EVEN;
1565 #ifdef CONFIG_MIPS_MT_SMP
1566 		raw_event.range = P;
1567 #endif
1568 		break;
1569 	case CPU_P5600:
1570 	case CPU_P6600:
1571 		/* 8-bit event numbers */
1572 		raw_id = config & 0x1ff;
1573 		base_id = raw_id & 0xff;
1574 		if (IS_BOTH_COUNTERS_P5600_EVENT(base_id))
1575 			raw_event.cntr_mask = CNTR_EVEN | CNTR_ODD;
1576 		else
1577 			raw_event.cntr_mask =
1578 				raw_id > 255 ? CNTR_ODD : CNTR_EVEN;
1579 #ifdef CONFIG_MIPS_MT_SMP
1580 		raw_event.range = P;
1581 #endif
1582 		break;
1583 	case CPU_I6400:
1584 	case CPU_I6500:
1585 		/* 8-bit event numbers */
1586 		base_id = config & 0xff;
1587 		raw_event.cntr_mask = CNTR_EVEN | CNTR_ODD;
1588 		break;
1589 	case CPU_1004K:
1590 		if (IS_BOTH_COUNTERS_1004K_EVENT(base_id))
1591 			raw_event.cntr_mask = CNTR_EVEN | CNTR_ODD;
1592 		else
1593 			raw_event.cntr_mask =
1594 				raw_id > 127 ? CNTR_ODD : CNTR_EVEN;
1595 #ifdef CONFIG_MIPS_MT_SMP
1596 		if (IS_RANGE_P_1004K_EVENT(raw_id, base_id))
1597 			raw_event.range = P;
1598 		else if (unlikely(IS_RANGE_V_1004K_EVENT(raw_id)))
1599 			raw_event.range = V;
1600 		else
1601 			raw_event.range = T;
1602 #endif
1603 		break;
1604 	case CPU_INTERAPTIV:
1605 		if (IS_BOTH_COUNTERS_INTERAPTIV_EVENT(base_id))
1606 			raw_event.cntr_mask = CNTR_EVEN | CNTR_ODD;
1607 		else
1608 			raw_event.cntr_mask =
1609 				raw_id > 127 ? CNTR_ODD : CNTR_EVEN;
1610 #ifdef CONFIG_MIPS_MT_SMP
1611 		if (IS_RANGE_P_INTERAPTIV_EVENT(raw_id, base_id))
1612 			raw_event.range = P;
1613 		else if (unlikely(IS_RANGE_V_INTERAPTIV_EVENT(raw_id)))
1614 			raw_event.range = V;
1615 		else
1616 			raw_event.range = T;
1617 #endif
1618 		break;
1619 	case CPU_BMIPS5000:
1620 		if (IS_BOTH_COUNTERS_BMIPS5000_EVENT(base_id))
1621 			raw_event.cntr_mask = CNTR_EVEN | CNTR_ODD;
1622 		else
1623 			raw_event.cntr_mask =
1624 				raw_id > 127 ? CNTR_ODD : CNTR_EVEN;
1625 		break;
1626 	case CPU_LOONGSON64:
1627 		raw_event.cntr_mask = raw_id > 127 ? CNTR_ODD : CNTR_EVEN;
1628 	break;
1629 	}
1630 
1631 	raw_event.event_id = base_id;
1632 
1633 	return &raw_event;
1634 }
1635 
1636 static const struct mips_perf_event *octeon_pmu_map_raw_event(u64 config)
1637 {
1638 	unsigned int raw_id = config & 0xff;
1639 	unsigned int base_id = raw_id & 0x7f;
1640 
1641 
1642 	raw_event.cntr_mask = CNTR_ALL;
1643 	raw_event.event_id = base_id;
1644 
1645 	if (current_cpu_type() == CPU_CAVIUM_OCTEON2) {
1646 		if (base_id > 0x42)
1647 			return ERR_PTR(-EOPNOTSUPP);
1648 	} else {
1649 		if (base_id > 0x3a)
1650 			return ERR_PTR(-EOPNOTSUPP);
1651 	}
1652 
1653 	switch (base_id) {
1654 	case 0x00:
1655 	case 0x0f:
1656 	case 0x1e:
1657 	case 0x1f:
1658 	case 0x2f:
1659 	case 0x34:
1660 	case 0x3b ... 0x3f:
1661 		return ERR_PTR(-EOPNOTSUPP);
1662 	default:
1663 		break;
1664 	}
1665 
1666 	return &raw_event;
1667 }
1668 
1669 static const struct mips_perf_event *xlp_pmu_map_raw_event(u64 config)
1670 {
1671 	unsigned int raw_id = config & 0xff;
1672 
1673 	/* Only 1-63 are defined */
1674 	if ((raw_id < 0x01) || (raw_id > 0x3f))
1675 		return ERR_PTR(-EOPNOTSUPP);
1676 
1677 	raw_event.cntr_mask = CNTR_ALL;
1678 	raw_event.event_id = raw_id;
1679 
1680 	return &raw_event;
1681 }
1682 
1683 static int __init
1684 init_hw_perf_events(void)
1685 {
1686 	int counters, irq;
1687 	int counter_bits;
1688 
1689 	pr_info("Performance counters: ");
1690 
1691 	counters = n_counters();
1692 	if (counters == 0) {
1693 		pr_cont("No available PMU.\n");
1694 		return -ENODEV;
1695 	}
1696 
1697 #ifdef CONFIG_MIPS_PERF_SHARED_TC_COUNTERS
1698 	if (!cpu_has_mipsmt_pertccounters)
1699 		counters = counters_total_to_per_cpu(counters);
1700 #endif
1701 
1702 	if (get_c0_perfcount_int)
1703 		irq = get_c0_perfcount_int();
1704 	else if (cp0_perfcount_irq >= 0)
1705 		irq = MIPS_CPU_IRQ_BASE + cp0_perfcount_irq;
1706 	else
1707 		irq = -1;
1708 
1709 	mipspmu.map_raw_event = mipsxx_pmu_map_raw_event;
1710 
1711 	switch (current_cpu_type()) {
1712 	case CPU_24K:
1713 		mipspmu.name = "mips/24K";
1714 		mipspmu.general_event_map = &mipsxxcore_event_map;
1715 		mipspmu.cache_event_map = &mipsxxcore_cache_map;
1716 		break;
1717 	case CPU_34K:
1718 		mipspmu.name = "mips/34K";
1719 		mipspmu.general_event_map = &mipsxxcore_event_map;
1720 		mipspmu.cache_event_map = &mipsxxcore_cache_map;
1721 		break;
1722 	case CPU_74K:
1723 		mipspmu.name = "mips/74K";
1724 		mipspmu.general_event_map = &mipsxxcore_event_map2;
1725 		mipspmu.cache_event_map = &mipsxxcore_cache_map2;
1726 		break;
1727 	case CPU_PROAPTIV:
1728 		mipspmu.name = "mips/proAptiv";
1729 		mipspmu.general_event_map = &mipsxxcore_event_map2;
1730 		mipspmu.cache_event_map = &mipsxxcore_cache_map2;
1731 		break;
1732 	case CPU_P5600:
1733 		mipspmu.name = "mips/P5600";
1734 		mipspmu.general_event_map = &mipsxxcore_event_map2;
1735 		mipspmu.cache_event_map = &mipsxxcore_cache_map2;
1736 		break;
1737 	case CPU_P6600:
1738 		mipspmu.name = "mips/P6600";
1739 		mipspmu.general_event_map = &mipsxxcore_event_map2;
1740 		mipspmu.cache_event_map = &mipsxxcore_cache_map2;
1741 		break;
1742 	case CPU_I6400:
1743 		mipspmu.name = "mips/I6400";
1744 		mipspmu.general_event_map = &i6x00_event_map;
1745 		mipspmu.cache_event_map = &i6x00_cache_map;
1746 		break;
1747 	case CPU_I6500:
1748 		mipspmu.name = "mips/I6500";
1749 		mipspmu.general_event_map = &i6x00_event_map;
1750 		mipspmu.cache_event_map = &i6x00_cache_map;
1751 		break;
1752 	case CPU_1004K:
1753 		mipspmu.name = "mips/1004K";
1754 		mipspmu.general_event_map = &mipsxxcore_event_map;
1755 		mipspmu.cache_event_map = &mipsxxcore_cache_map;
1756 		break;
1757 	case CPU_1074K:
1758 		mipspmu.name = "mips/1074K";
1759 		mipspmu.general_event_map = &mipsxxcore_event_map;
1760 		mipspmu.cache_event_map = &mipsxxcore_cache_map;
1761 		break;
1762 	case CPU_INTERAPTIV:
1763 		mipspmu.name = "mips/interAptiv";
1764 		mipspmu.general_event_map = &mipsxxcore_event_map;
1765 		mipspmu.cache_event_map = &mipsxxcore_cache_map;
1766 		break;
1767 	case CPU_LOONGSON32:
1768 		mipspmu.name = "mips/loongson1";
1769 		mipspmu.general_event_map = &mipsxxcore_event_map;
1770 		mipspmu.cache_event_map = &mipsxxcore_cache_map;
1771 		break;
1772 	case CPU_LOONGSON64:
1773 		mipspmu.name = "mips/loongson3";
1774 		mipspmu.general_event_map = &loongson3_event_map;
1775 		mipspmu.cache_event_map = &loongson3_cache_map;
1776 		break;
1777 	case CPU_CAVIUM_OCTEON:
1778 	case CPU_CAVIUM_OCTEON_PLUS:
1779 	case CPU_CAVIUM_OCTEON2:
1780 		mipspmu.name = "octeon";
1781 		mipspmu.general_event_map = &octeon_event_map;
1782 		mipspmu.cache_event_map = &octeon_cache_map;
1783 		mipspmu.map_raw_event = octeon_pmu_map_raw_event;
1784 		break;
1785 	case CPU_BMIPS5000:
1786 		mipspmu.name = "BMIPS5000";
1787 		mipspmu.general_event_map = &bmips5000_event_map;
1788 		mipspmu.cache_event_map = &bmips5000_cache_map;
1789 		break;
1790 	case CPU_XLP:
1791 		mipspmu.name = "xlp";
1792 		mipspmu.general_event_map = &xlp_event_map;
1793 		mipspmu.cache_event_map = &xlp_cache_map;
1794 		mipspmu.map_raw_event = xlp_pmu_map_raw_event;
1795 		break;
1796 	default:
1797 		pr_cont("Either hardware does not support performance "
1798 			"counters, or not yet implemented.\n");
1799 		return -ENODEV;
1800 	}
1801 
1802 	mipspmu.num_counters = counters;
1803 	mipspmu.irq = irq;
1804 
1805 	if (read_c0_perfctrl0() & MIPS_PERFCTRL_W) {
1806 		mipspmu.max_period = (1ULL << 63) - 1;
1807 		mipspmu.valid_count = (1ULL << 63) - 1;
1808 		mipspmu.overflow = 1ULL << 63;
1809 		mipspmu.read_counter = mipsxx_pmu_read_counter_64;
1810 		mipspmu.write_counter = mipsxx_pmu_write_counter_64;
1811 		counter_bits = 64;
1812 	} else {
1813 		mipspmu.max_period = (1ULL << 31) - 1;
1814 		mipspmu.valid_count = (1ULL << 31) - 1;
1815 		mipspmu.overflow = 1ULL << 31;
1816 		mipspmu.read_counter = mipsxx_pmu_read_counter;
1817 		mipspmu.write_counter = mipsxx_pmu_write_counter;
1818 		counter_bits = 32;
1819 	}
1820 
1821 	on_each_cpu(reset_counters, (void *)(long)counters, 1);
1822 
1823 	pr_cont("%s PMU enabled, %d %d-bit counters available to each "
1824 		"CPU, irq %d%s\n", mipspmu.name, counters, counter_bits, irq,
1825 		irq < 0 ? " (share with timer interrupt)" : "");
1826 
1827 	perf_pmu_register(&pmu, "cpu", PERF_TYPE_RAW);
1828 
1829 	return 0;
1830 }
1831 early_initcall(init_hw_perf_events);
1832