1 #if defined(CONFIG_CPU_MIPS32) || defined(CONFIG_CPU_MIPS64) || \
2     defined(CONFIG_CPU_R10000) || defined(CONFIG_CPU_SB1)
3 
4 #define M_CONFIG1_PC	(1 << 4)
5 
6 #define M_PERFCTL_EXL			(1UL      <<  0)
7 #define M_PERFCTL_KERNEL		(1UL      <<  1)
8 #define M_PERFCTL_SUPERVISOR		(1UL      <<  2)
9 #define M_PERFCTL_USER			(1UL      <<  3)
10 #define M_PERFCTL_INTERRUPT_ENABLE	(1UL      <<  4)
11 #define M_PERFCTL_EVENT(event)		(((event) & 0x3ff)  << 5)
12 #define M_PERFCTL_VPEID(vpe)		((vpe)    << 16)
13 #define M_PERFCTL_MT_EN(filter)		((filter) << 20)
14 #define    M_TC_EN_ALL			M_PERFCTL_MT_EN(0)
15 #define    M_TC_EN_VPE			M_PERFCTL_MT_EN(1)
16 #define    M_TC_EN_TC			M_PERFCTL_MT_EN(2)
17 #define M_PERFCTL_TCID(tcid)		((tcid)   << 22)
18 #define M_PERFCTL_WIDE			(1UL      << 30)
19 #define M_PERFCTL_MORE			(1UL      << 31)
20 
21 #define M_PERFCTL_COUNT_EVENT_WHENEVER	(M_PERFCTL_EXL |		\
22 					M_PERFCTL_KERNEL |		\
23 					M_PERFCTL_USER |		\
24 					M_PERFCTL_SUPERVISOR |		\
25 					M_PERFCTL_INTERRUPT_ENABLE)
26 
27 #ifdef CONFIG_MIPS_MT_SMP
28 #define M_PERFCTL_CONFIG_MASK		0x3fff801f
29 #else
30 #define M_PERFCTL_CONFIG_MASK		0x1f
31 #endif
32 #define M_PERFCTL_EVENT_MASK		0xfe0
33 
34 #define M_COUNTER_OVERFLOW		(1UL      << 31)
35 
36 #ifdef CONFIG_MIPS_MT_SMP
37 static int cpu_has_mipsmt_pertccounters;
38 
39 /*
40  * FIXME: For VSMP, vpe_id() is redefined for Perf-events, because
41  * cpu_data[cpuid].vpe_id reports 0 for _both_ CPUs.
42  */
43 #if defined(CONFIG_HW_PERF_EVENTS)
44 #define vpe_id()	(cpu_has_mipsmt_pertccounters ? \
45 			0 : smp_processor_id())
46 #else
47 #define vpe_id()	(cpu_has_mipsmt_pertccounters ? \
48 			0 : cpu_data[smp_processor_id()].vpe_id)
49 #endif
50 
51 /* Copied from op_model_mipsxx.c */
52 static inline unsigned int vpe_shift(void)
53 {
54 	if (num_possible_cpus() > 1)
55 		return 1;
56 
57 	return 0;
58 }
59 #else /* !CONFIG_MIPS_MT_SMP */
60 #define vpe_id()	0
61 
62 static inline unsigned int vpe_shift(void)
63 {
64 	return 0;
65 }
66 #endif /* CONFIG_MIPS_MT_SMP */
67 
68 static inline unsigned int
69 counters_total_to_per_cpu(unsigned int counters)
70 {
71 	return counters >> vpe_shift();
72 }
73 
74 static inline unsigned int
75 counters_per_cpu_to_total(unsigned int counters)
76 {
77 	return counters << vpe_shift();
78 }
79 
80 #define __define_perf_accessors(r, n, np)				\
81 									\
82 static inline unsigned int r_c0_ ## r ## n(void)			\
83 {									\
84 	unsigned int cpu = vpe_id();					\
85 									\
86 	switch (cpu) {							\
87 	case 0:								\
88 		return read_c0_ ## r ## n();				\
89 	case 1:								\
90 		return read_c0_ ## r ## np();				\
91 	default:							\
92 		BUG();							\
93 	}								\
94 	return 0;							\
95 }									\
96 									\
97 static inline void w_c0_ ## r ## n(unsigned int value)			\
98 {									\
99 	unsigned int cpu = vpe_id();					\
100 									\
101 	switch (cpu) {							\
102 	case 0:								\
103 		write_c0_ ## r ## n(value);				\
104 		return;							\
105 	case 1:								\
106 		write_c0_ ## r ## np(value);				\
107 		return;							\
108 	default:							\
109 		BUG();							\
110 	}								\
111 	return;								\
112 }									\
113 
114 __define_perf_accessors(perfcntr, 0, 2)
115 __define_perf_accessors(perfcntr, 1, 3)
116 __define_perf_accessors(perfcntr, 2, 0)
117 __define_perf_accessors(perfcntr, 3, 1)
118 
119 __define_perf_accessors(perfctrl, 0, 2)
120 __define_perf_accessors(perfctrl, 1, 3)
121 __define_perf_accessors(perfctrl, 2, 0)
122 __define_perf_accessors(perfctrl, 3, 1)
123 
124 static inline int __n_counters(void)
125 {
126 	if (!(read_c0_config1() & M_CONFIG1_PC))
127 		return 0;
128 	if (!(read_c0_perfctrl0() & M_PERFCTL_MORE))
129 		return 1;
130 	if (!(read_c0_perfctrl1() & M_PERFCTL_MORE))
131 		return 2;
132 	if (!(read_c0_perfctrl2() & M_PERFCTL_MORE))
133 		return 3;
134 
135 	return 4;
136 }
137 
138 static inline int n_counters(void)
139 {
140 	int counters;
141 
142 	switch (current_cpu_type()) {
143 	case CPU_R10000:
144 		counters = 2;
145 		break;
146 
147 	case CPU_R12000:
148 	case CPU_R14000:
149 		counters = 4;
150 		break;
151 
152 	default:
153 		counters = __n_counters();
154 	}
155 
156 	return counters;
157 }
158 
159 static void reset_counters(void *arg)
160 {
161 	int counters = (int)(long)arg;
162 	switch (counters) {
163 	case 4:
164 		w_c0_perfctrl3(0);
165 		w_c0_perfcntr3(0);
166 	case 3:
167 		w_c0_perfctrl2(0);
168 		w_c0_perfcntr2(0);
169 	case 2:
170 		w_c0_perfctrl1(0);
171 		w_c0_perfcntr1(0);
172 	case 1:
173 		w_c0_perfctrl0(0);
174 		w_c0_perfcntr0(0);
175 	}
176 }
177 
178 static inline u64
179 mipsxx_pmu_read_counter(unsigned int idx)
180 {
181 	switch (idx) {
182 	case 0:
183 		return r_c0_perfcntr0();
184 	case 1:
185 		return r_c0_perfcntr1();
186 	case 2:
187 		return r_c0_perfcntr2();
188 	case 3:
189 		return r_c0_perfcntr3();
190 	default:
191 		WARN_ONCE(1, "Invalid performance counter number (%d)\n", idx);
192 		return 0;
193 	}
194 }
195 
196 static inline void
197 mipsxx_pmu_write_counter(unsigned int idx, u64 val)
198 {
199 	switch (idx) {
200 	case 0:
201 		w_c0_perfcntr0(val);
202 		return;
203 	case 1:
204 		w_c0_perfcntr1(val);
205 		return;
206 	case 2:
207 		w_c0_perfcntr2(val);
208 		return;
209 	case 3:
210 		w_c0_perfcntr3(val);
211 		return;
212 	}
213 }
214 
215 static inline unsigned int
216 mipsxx_pmu_read_control(unsigned int idx)
217 {
218 	switch (idx) {
219 	case 0:
220 		return r_c0_perfctrl0();
221 	case 1:
222 		return r_c0_perfctrl1();
223 	case 2:
224 		return r_c0_perfctrl2();
225 	case 3:
226 		return r_c0_perfctrl3();
227 	default:
228 		WARN_ONCE(1, "Invalid performance counter number (%d)\n", idx);
229 		return 0;
230 	}
231 }
232 
233 static inline void
234 mipsxx_pmu_write_control(unsigned int idx, unsigned int val)
235 {
236 	switch (idx) {
237 	case 0:
238 		w_c0_perfctrl0(val);
239 		return;
240 	case 1:
241 		w_c0_perfctrl1(val);
242 		return;
243 	case 2:
244 		w_c0_perfctrl2(val);
245 		return;
246 	case 3:
247 		w_c0_perfctrl3(val);
248 		return;
249 	}
250 }
251 
252 #ifdef CONFIG_MIPS_MT_SMP
253 static DEFINE_RWLOCK(pmuint_rwlock);
254 #endif
255 
256 /* 24K/34K/1004K cores can share the same event map. */
257 static const struct mips_perf_event mipsxxcore_event_map
258 				[PERF_COUNT_HW_MAX] = {
259 	[PERF_COUNT_HW_CPU_CYCLES] = { 0x00, CNTR_EVEN | CNTR_ODD, P },
260 	[PERF_COUNT_HW_INSTRUCTIONS] = { 0x01, CNTR_EVEN | CNTR_ODD, T },
261 	[PERF_COUNT_HW_CACHE_REFERENCES] = { UNSUPPORTED_PERF_EVENT_ID },
262 	[PERF_COUNT_HW_CACHE_MISSES] = { UNSUPPORTED_PERF_EVENT_ID },
263 	[PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = { 0x02, CNTR_EVEN, T },
264 	[PERF_COUNT_HW_BRANCH_MISSES] = { 0x02, CNTR_ODD, T },
265 	[PERF_COUNT_HW_BUS_CYCLES] = { UNSUPPORTED_PERF_EVENT_ID },
266 };
267 
268 /* 74K core has different branch event code. */
269 static const struct mips_perf_event mipsxx74Kcore_event_map
270 				[PERF_COUNT_HW_MAX] = {
271 	[PERF_COUNT_HW_CPU_CYCLES] = { 0x00, CNTR_EVEN | CNTR_ODD, P },
272 	[PERF_COUNT_HW_INSTRUCTIONS] = { 0x01, CNTR_EVEN | CNTR_ODD, T },
273 	[PERF_COUNT_HW_CACHE_REFERENCES] = { UNSUPPORTED_PERF_EVENT_ID },
274 	[PERF_COUNT_HW_CACHE_MISSES] = { UNSUPPORTED_PERF_EVENT_ID },
275 	[PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = { 0x27, CNTR_EVEN, T },
276 	[PERF_COUNT_HW_BRANCH_MISSES] = { 0x27, CNTR_ODD, T },
277 	[PERF_COUNT_HW_BUS_CYCLES] = { UNSUPPORTED_PERF_EVENT_ID },
278 };
279 
280 /* 24K/34K/1004K cores can share the same cache event map. */
281 static const struct mips_perf_event mipsxxcore_cache_map
282 				[PERF_COUNT_HW_CACHE_MAX]
283 				[PERF_COUNT_HW_CACHE_OP_MAX]
284 				[PERF_COUNT_HW_CACHE_RESULT_MAX] = {
285 [C(L1D)] = {
286 	/*
287 	 * Like some other architectures (e.g. ARM), the performance
288 	 * counters don't differentiate between read and write
289 	 * accesses/misses, so this isn't strictly correct, but it's the
290 	 * best we can do. Writes and reads get combined.
291 	 */
292 	[C(OP_READ)] = {
293 		[C(RESULT_ACCESS)]	= { 0x0a, CNTR_EVEN, T },
294 		[C(RESULT_MISS)]	= { 0x0b, CNTR_EVEN | CNTR_ODD, T },
295 	},
296 	[C(OP_WRITE)] = {
297 		[C(RESULT_ACCESS)]	= { 0x0a, CNTR_EVEN, T },
298 		[C(RESULT_MISS)]	= { 0x0b, CNTR_EVEN | CNTR_ODD, T },
299 	},
300 	[C(OP_PREFETCH)] = {
301 		[C(RESULT_ACCESS)]	= { UNSUPPORTED_PERF_EVENT_ID },
302 		[C(RESULT_MISS)]	= { UNSUPPORTED_PERF_EVENT_ID },
303 	},
304 },
305 [C(L1I)] = {
306 	[C(OP_READ)] = {
307 		[C(RESULT_ACCESS)]	= { 0x09, CNTR_EVEN, T },
308 		[C(RESULT_MISS)]	= { 0x09, CNTR_ODD, T },
309 	},
310 	[C(OP_WRITE)] = {
311 		[C(RESULT_ACCESS)]	= { 0x09, CNTR_EVEN, T },
312 		[C(RESULT_MISS)]	= { 0x09, CNTR_ODD, T },
313 	},
314 	[C(OP_PREFETCH)] = {
315 		[C(RESULT_ACCESS)]	= { 0x14, CNTR_EVEN, T },
316 		/*
317 		 * Note that MIPS has only "hit" events countable for
318 		 * the prefetch operation.
319 		 */
320 		[C(RESULT_MISS)]	= { UNSUPPORTED_PERF_EVENT_ID },
321 	},
322 },
323 [C(LL)] = {
324 	[C(OP_READ)] = {
325 		[C(RESULT_ACCESS)]	= { 0x15, CNTR_ODD, P },
326 		[C(RESULT_MISS)]	= { 0x16, CNTR_EVEN, P },
327 	},
328 	[C(OP_WRITE)] = {
329 		[C(RESULT_ACCESS)]	= { 0x15, CNTR_ODD, P },
330 		[C(RESULT_MISS)]	= { 0x16, CNTR_EVEN, P },
331 	},
332 	[C(OP_PREFETCH)] = {
333 		[C(RESULT_ACCESS)]	= { UNSUPPORTED_PERF_EVENT_ID },
334 		[C(RESULT_MISS)]	= { UNSUPPORTED_PERF_EVENT_ID },
335 	},
336 },
337 [C(DTLB)] = {
338 	[C(OP_READ)] = {
339 		[C(RESULT_ACCESS)]	= { 0x06, CNTR_EVEN, T },
340 		[C(RESULT_MISS)]	= { 0x06, CNTR_ODD, T },
341 	},
342 	[C(OP_WRITE)] = {
343 		[C(RESULT_ACCESS)]	= { 0x06, CNTR_EVEN, T },
344 		[C(RESULT_MISS)]	= { 0x06, CNTR_ODD, T },
345 	},
346 	[C(OP_PREFETCH)] = {
347 		[C(RESULT_ACCESS)]	= { UNSUPPORTED_PERF_EVENT_ID },
348 		[C(RESULT_MISS)]	= { UNSUPPORTED_PERF_EVENT_ID },
349 	},
350 },
351 [C(ITLB)] = {
352 	[C(OP_READ)] = {
353 		[C(RESULT_ACCESS)]	= { 0x05, CNTR_EVEN, T },
354 		[C(RESULT_MISS)]	= { 0x05, CNTR_ODD, T },
355 	},
356 	[C(OP_WRITE)] = {
357 		[C(RESULT_ACCESS)]	= { 0x05, CNTR_EVEN, T },
358 		[C(RESULT_MISS)]	= { 0x05, CNTR_ODD, T },
359 	},
360 	[C(OP_PREFETCH)] = {
361 		[C(RESULT_ACCESS)]	= { UNSUPPORTED_PERF_EVENT_ID },
362 		[C(RESULT_MISS)]	= { UNSUPPORTED_PERF_EVENT_ID },
363 	},
364 },
365 [C(BPU)] = {
366 	/* Using the same code for *HW_BRANCH* */
367 	[C(OP_READ)] = {
368 		[C(RESULT_ACCESS)]	= { 0x02, CNTR_EVEN, T },
369 		[C(RESULT_MISS)]	= { 0x02, CNTR_ODD, T },
370 	},
371 	[C(OP_WRITE)] = {
372 		[C(RESULT_ACCESS)]	= { 0x02, CNTR_EVEN, T },
373 		[C(RESULT_MISS)]	= { 0x02, CNTR_ODD, T },
374 	},
375 	[C(OP_PREFETCH)] = {
376 		[C(RESULT_ACCESS)]	= { UNSUPPORTED_PERF_EVENT_ID },
377 		[C(RESULT_MISS)]	= { UNSUPPORTED_PERF_EVENT_ID },
378 	},
379 },
380 [C(NODE)] = {
381 	[C(OP_READ)] = {
382 		[C(RESULT_ACCESS)]	= { UNSUPPORTED_PERF_EVENT_ID },
383 		[C(RESULT_MISS)]	= { UNSUPPORTED_PERF_EVENT_ID },
384 	},
385 	[C(OP_WRITE)] = {
386 		[C(RESULT_ACCESS)]	= { UNSUPPORTED_PERF_EVENT_ID },
387 		[C(RESULT_MISS)]	= { UNSUPPORTED_PERF_EVENT_ID },
388 	},
389 	[C(OP_PREFETCH)] = {
390 		[C(RESULT_ACCESS)]	= { UNSUPPORTED_PERF_EVENT_ID },
391 		[C(RESULT_MISS)]	= { UNSUPPORTED_PERF_EVENT_ID },
392 	},
393 },
394 };
395 
396 /* 74K core has completely different cache event map. */
397 static const struct mips_perf_event mipsxx74Kcore_cache_map
398 				[PERF_COUNT_HW_CACHE_MAX]
399 				[PERF_COUNT_HW_CACHE_OP_MAX]
400 				[PERF_COUNT_HW_CACHE_RESULT_MAX] = {
401 [C(L1D)] = {
402 	/*
403 	 * Like some other architectures (e.g. ARM), the performance
404 	 * counters don't differentiate between read and write
405 	 * accesses/misses, so this isn't strictly correct, but it's the
406 	 * best we can do. Writes and reads get combined.
407 	 */
408 	[C(OP_READ)] = {
409 		[C(RESULT_ACCESS)]	= { 0x17, CNTR_ODD, T },
410 		[C(RESULT_MISS)]	= { 0x18, CNTR_ODD, T },
411 	},
412 	[C(OP_WRITE)] = {
413 		[C(RESULT_ACCESS)]	= { 0x17, CNTR_ODD, T },
414 		[C(RESULT_MISS)]	= { 0x18, CNTR_ODD, T },
415 	},
416 	[C(OP_PREFETCH)] = {
417 		[C(RESULT_ACCESS)]	= { UNSUPPORTED_PERF_EVENT_ID },
418 		[C(RESULT_MISS)]	= { UNSUPPORTED_PERF_EVENT_ID },
419 	},
420 },
421 [C(L1I)] = {
422 	[C(OP_READ)] = {
423 		[C(RESULT_ACCESS)]	= { 0x06, CNTR_EVEN, T },
424 		[C(RESULT_MISS)]	= { 0x06, CNTR_ODD, T },
425 	},
426 	[C(OP_WRITE)] = {
427 		[C(RESULT_ACCESS)]	= { 0x06, CNTR_EVEN, T },
428 		[C(RESULT_MISS)]	= { 0x06, CNTR_ODD, T },
429 	},
430 	[C(OP_PREFETCH)] = {
431 		[C(RESULT_ACCESS)]	= { 0x34, CNTR_EVEN, T },
432 		/*
433 		 * Note that MIPS has only "hit" events countable for
434 		 * the prefetch operation.
435 		 */
436 		[C(RESULT_MISS)]	= { UNSUPPORTED_PERF_EVENT_ID },
437 	},
438 },
439 [C(LL)] = {
440 	[C(OP_READ)] = {
441 		[C(RESULT_ACCESS)]	= { 0x1c, CNTR_ODD, P },
442 		[C(RESULT_MISS)]	= { 0x1d, CNTR_EVEN | CNTR_ODD, P },
443 	},
444 	[C(OP_WRITE)] = {
445 		[C(RESULT_ACCESS)]	= { 0x1c, CNTR_ODD, P },
446 		[C(RESULT_MISS)]	= { 0x1d, CNTR_EVEN | CNTR_ODD, P },
447 	},
448 	[C(OP_PREFETCH)] = {
449 		[C(RESULT_ACCESS)]	= { UNSUPPORTED_PERF_EVENT_ID },
450 		[C(RESULT_MISS)]	= { UNSUPPORTED_PERF_EVENT_ID },
451 	},
452 },
453 [C(DTLB)] = {
454 	/* 74K core does not have specific DTLB events. */
455 	[C(OP_READ)] = {
456 		[C(RESULT_ACCESS)]	= { UNSUPPORTED_PERF_EVENT_ID },
457 		[C(RESULT_MISS)]	= { UNSUPPORTED_PERF_EVENT_ID },
458 	},
459 	[C(OP_WRITE)] = {
460 		[C(RESULT_ACCESS)]	= { UNSUPPORTED_PERF_EVENT_ID },
461 		[C(RESULT_MISS)]	= { UNSUPPORTED_PERF_EVENT_ID },
462 	},
463 	[C(OP_PREFETCH)] = {
464 		[C(RESULT_ACCESS)]	= { UNSUPPORTED_PERF_EVENT_ID },
465 		[C(RESULT_MISS)]	= { UNSUPPORTED_PERF_EVENT_ID },
466 	},
467 },
468 [C(ITLB)] = {
469 	[C(OP_READ)] = {
470 		[C(RESULT_ACCESS)]	= { 0x04, CNTR_EVEN, T },
471 		[C(RESULT_MISS)]	= { 0x04, CNTR_ODD, T },
472 	},
473 	[C(OP_WRITE)] = {
474 		[C(RESULT_ACCESS)]	= { 0x04, CNTR_EVEN, T },
475 		[C(RESULT_MISS)]	= { 0x04, CNTR_ODD, T },
476 	},
477 	[C(OP_PREFETCH)] = {
478 		[C(RESULT_ACCESS)]	= { UNSUPPORTED_PERF_EVENT_ID },
479 		[C(RESULT_MISS)]	= { UNSUPPORTED_PERF_EVENT_ID },
480 	},
481 },
482 [C(BPU)] = {
483 	/* Using the same code for *HW_BRANCH* */
484 	[C(OP_READ)] = {
485 		[C(RESULT_ACCESS)]	= { 0x27, CNTR_EVEN, T },
486 		[C(RESULT_MISS)]	= { 0x27, CNTR_ODD, T },
487 	},
488 	[C(OP_WRITE)] = {
489 		[C(RESULT_ACCESS)]	= { 0x27, CNTR_EVEN, T },
490 		[C(RESULT_MISS)]	= { 0x27, CNTR_ODD, T },
491 	},
492 	[C(OP_PREFETCH)] = {
493 		[C(RESULT_ACCESS)]	= { UNSUPPORTED_PERF_EVENT_ID },
494 		[C(RESULT_MISS)]	= { UNSUPPORTED_PERF_EVENT_ID },
495 	},
496 },
497 [C(NODE)] = {
498 	[C(OP_READ)] = {
499 		[C(RESULT_ACCESS)]	= { UNSUPPORTED_PERF_EVENT_ID },
500 		[C(RESULT_MISS)]	= { UNSUPPORTED_PERF_EVENT_ID },
501 	},
502 	[C(OP_WRITE)] = {
503 		[C(RESULT_ACCESS)]	= { UNSUPPORTED_PERF_EVENT_ID },
504 		[C(RESULT_MISS)]	= { UNSUPPORTED_PERF_EVENT_ID },
505 	},
506 	[C(OP_PREFETCH)] = {
507 		[C(RESULT_ACCESS)]	= { UNSUPPORTED_PERF_EVENT_ID },
508 		[C(RESULT_MISS)]	= { UNSUPPORTED_PERF_EVENT_ID },
509 	},
510 },
511 };
512 
513 #ifdef CONFIG_MIPS_MT_SMP
514 static void
515 check_and_calc_range(struct perf_event *event,
516 			const struct mips_perf_event *pev)
517 {
518 	struct hw_perf_event *hwc = &event->hw;
519 
520 	if (event->cpu >= 0) {
521 		if (pev->range > V) {
522 			/*
523 			 * The user selected an event that is processor
524 			 * wide, while expecting it to be VPE wide.
525 			 */
526 			hwc->config_base |= M_TC_EN_ALL;
527 		} else {
528 			/*
529 			 * FIXME: cpu_data[event->cpu].vpe_id reports 0
530 			 * for both CPUs.
531 			 */
532 			hwc->config_base |= M_PERFCTL_VPEID(event->cpu);
533 			hwc->config_base |= M_TC_EN_VPE;
534 		}
535 	} else
536 		hwc->config_base |= M_TC_EN_ALL;
537 }
538 #else
539 static void
540 check_and_calc_range(struct perf_event *event,
541 			const struct mips_perf_event *pev)
542 {
543 }
544 #endif
545 
546 static int __hw_perf_event_init(struct perf_event *event)
547 {
548 	struct perf_event_attr *attr = &event->attr;
549 	struct hw_perf_event *hwc = &event->hw;
550 	const struct mips_perf_event *pev;
551 	int err;
552 
553 	/* Returning MIPS event descriptor for generic perf event. */
554 	if (PERF_TYPE_HARDWARE == event->attr.type) {
555 		if (event->attr.config >= PERF_COUNT_HW_MAX)
556 			return -EINVAL;
557 		pev = mipspmu_map_general_event(event->attr.config);
558 	} else if (PERF_TYPE_HW_CACHE == event->attr.type) {
559 		pev = mipspmu_map_cache_event(event->attr.config);
560 	} else if (PERF_TYPE_RAW == event->attr.type) {
561 		/* We are working on the global raw event. */
562 		mutex_lock(&raw_event_mutex);
563 		pev = mipspmu->map_raw_event(event->attr.config);
564 	} else {
565 		/* The event type is not (yet) supported. */
566 		return -EOPNOTSUPP;
567 	}
568 
569 	if (IS_ERR(pev)) {
570 		if (PERF_TYPE_RAW == event->attr.type)
571 			mutex_unlock(&raw_event_mutex);
572 		return PTR_ERR(pev);
573 	}
574 
575 	/*
576 	 * We allow max flexibility on how each individual counter shared
577 	 * by the single CPU operates (the mode exclusion and the range).
578 	 */
579 	hwc->config_base = M_PERFCTL_INTERRUPT_ENABLE;
580 
581 	/* Calculate range bits and validate it. */
582 	if (num_possible_cpus() > 1)
583 		check_and_calc_range(event, pev);
584 
585 	hwc->event_base = mipspmu_perf_event_encode(pev);
586 	if (PERF_TYPE_RAW == event->attr.type)
587 		mutex_unlock(&raw_event_mutex);
588 
589 	if (!attr->exclude_user)
590 		hwc->config_base |= M_PERFCTL_USER;
591 	if (!attr->exclude_kernel) {
592 		hwc->config_base |= M_PERFCTL_KERNEL;
593 		/* MIPS kernel mode: KSU == 00b || EXL == 1 || ERL == 1 */
594 		hwc->config_base |= M_PERFCTL_EXL;
595 	}
596 	if (!attr->exclude_hv)
597 		hwc->config_base |= M_PERFCTL_SUPERVISOR;
598 
599 	hwc->config_base &= M_PERFCTL_CONFIG_MASK;
600 	/*
601 	 * The event can belong to another cpu. We do not assign a local
602 	 * counter for it for now.
603 	 */
604 	hwc->idx = -1;
605 	hwc->config = 0;
606 
607 	if (!hwc->sample_period) {
608 		hwc->sample_period  = MAX_PERIOD;
609 		hwc->last_period    = hwc->sample_period;
610 		local64_set(&hwc->period_left, hwc->sample_period);
611 	}
612 
613 	err = 0;
614 	if (event->group_leader != event) {
615 		err = validate_group(event);
616 		if (err)
617 			return -EINVAL;
618 	}
619 
620 	event->destroy = hw_perf_event_destroy;
621 
622 	return err;
623 }
624 
625 static void pause_local_counters(void)
626 {
627 	struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
628 	int counters = mipspmu->num_counters;
629 	unsigned long flags;
630 
631 	local_irq_save(flags);
632 	switch (counters) {
633 	case 4:
634 		cpuc->saved_ctrl[3] = r_c0_perfctrl3();
635 		w_c0_perfctrl3(cpuc->saved_ctrl[3] &
636 			~M_PERFCTL_COUNT_EVENT_WHENEVER);
637 	case 3:
638 		cpuc->saved_ctrl[2] = r_c0_perfctrl2();
639 		w_c0_perfctrl2(cpuc->saved_ctrl[2] &
640 			~M_PERFCTL_COUNT_EVENT_WHENEVER);
641 	case 2:
642 		cpuc->saved_ctrl[1] = r_c0_perfctrl1();
643 		w_c0_perfctrl1(cpuc->saved_ctrl[1] &
644 			~M_PERFCTL_COUNT_EVENT_WHENEVER);
645 	case 1:
646 		cpuc->saved_ctrl[0] = r_c0_perfctrl0();
647 		w_c0_perfctrl0(cpuc->saved_ctrl[0] &
648 			~M_PERFCTL_COUNT_EVENT_WHENEVER);
649 	}
650 	local_irq_restore(flags);
651 }
652 
653 static void resume_local_counters(void)
654 {
655 	struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
656 	int counters = mipspmu->num_counters;
657 	unsigned long flags;
658 
659 	local_irq_save(flags);
660 	switch (counters) {
661 	case 4:
662 		w_c0_perfctrl3(cpuc->saved_ctrl[3]);
663 	case 3:
664 		w_c0_perfctrl2(cpuc->saved_ctrl[2]);
665 	case 2:
666 		w_c0_perfctrl1(cpuc->saved_ctrl[1]);
667 	case 1:
668 		w_c0_perfctrl0(cpuc->saved_ctrl[0]);
669 	}
670 	local_irq_restore(flags);
671 }
672 
673 static int mipsxx_pmu_handle_shared_irq(void)
674 {
675 	struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
676 	struct perf_sample_data data;
677 	unsigned int counters = mipspmu->num_counters;
678 	unsigned int counter;
679 	int handled = IRQ_NONE;
680 	struct pt_regs *regs;
681 
682 	if (cpu_has_mips_r2 && !(read_c0_cause() & (1 << 26)))
683 		return handled;
684 
685 	/*
686 	 * First we pause the local counters, so that when we are locked
687 	 * here, the counters are all paused. When it gets locked due to
688 	 * perf_disable(), the timer interrupt handler will be delayed.
689 	 *
690 	 * See also mipsxx_pmu_start().
691 	 */
692 	pause_local_counters();
693 #ifdef CONFIG_MIPS_MT_SMP
694 	read_lock(&pmuint_rwlock);
695 #endif
696 
697 	regs = get_irq_regs();
698 
699 	perf_sample_data_init(&data, 0);
700 
701 	switch (counters) {
702 #define HANDLE_COUNTER(n)						\
703 	case n + 1:							\
704 		if (test_bit(n, cpuc->used_mask)) {			\
705 			counter = r_c0_perfcntr ## n();			\
706 			if (counter & M_COUNTER_OVERFLOW) {		\
707 				w_c0_perfcntr ## n(counter &		\
708 						VALID_COUNT);		\
709 				if (test_and_change_bit(n, cpuc->msbs))	\
710 					handle_associated_event(cpuc,	\
711 						n, &data, regs);	\
712 				handled = IRQ_HANDLED;			\
713 			}						\
714 		}
715 	HANDLE_COUNTER(3)
716 	HANDLE_COUNTER(2)
717 	HANDLE_COUNTER(1)
718 	HANDLE_COUNTER(0)
719 	}
720 
721 	/*
722 	 * Do all the work for the pending perf events. We can do this
723 	 * in here because the performance counter interrupt is a regular
724 	 * interrupt, not NMI.
725 	 */
726 	if (handled == IRQ_HANDLED)
727 		irq_work_run();
728 
729 #ifdef CONFIG_MIPS_MT_SMP
730 	read_unlock(&pmuint_rwlock);
731 #endif
732 	resume_local_counters();
733 	return handled;
734 }
735 
736 static irqreturn_t
737 mipsxx_pmu_handle_irq(int irq, void *dev)
738 {
739 	return mipsxx_pmu_handle_shared_irq();
740 }
741 
742 static void mipsxx_pmu_start(void)
743 {
744 #ifdef CONFIG_MIPS_MT_SMP
745 	write_unlock(&pmuint_rwlock);
746 #endif
747 	resume_local_counters();
748 }
749 
750 /*
751  * MIPS performance counters can be per-TC. The control registers can
752  * not be directly accessed across CPUs. Hence if we want to do global
753  * control, we need cross CPU calls. on_each_cpu() can help us, but we
754  * can not make sure this function is called with interrupts enabled. So
755  * here we pause local counters and then grab a rwlock and leave the
756  * counters on other CPUs alone. If any counter interrupt raises while
757  * we own the write lock, simply pause local counters on that CPU and
758  * spin in the handler. Also we know we won't be switched to another
759  * CPU after pausing local counters and before grabbing the lock.
760  */
761 static void mipsxx_pmu_stop(void)
762 {
763 	pause_local_counters();
764 #ifdef CONFIG_MIPS_MT_SMP
765 	write_lock(&pmuint_rwlock);
766 #endif
767 }
768 
769 static int
770 mipsxx_pmu_alloc_counter(struct cpu_hw_events *cpuc,
771 			struct hw_perf_event *hwc)
772 {
773 	int i;
774 
775 	/*
776 	 * We only need to care the counter mask. The range has been
777 	 * checked definitely.
778 	 */
779 	unsigned long cntr_mask = (hwc->event_base >> 8) & 0xffff;
780 
781 	for (i = mipspmu->num_counters - 1; i >= 0; i--) {
782 		/*
783 		 * Note that some MIPS perf events can be counted by both
784 		 * even and odd counters, wheresas many other are only by
785 		 * even _or_ odd counters. This introduces an issue that
786 		 * when the former kind of event takes the counter the
787 		 * latter kind of event wants to use, then the "counter
788 		 * allocation" for the latter event will fail. In fact if
789 		 * they can be dynamically swapped, they both feel happy.
790 		 * But here we leave this issue alone for now.
791 		 */
792 		if (test_bit(i, &cntr_mask) &&
793 			!test_and_set_bit(i, cpuc->used_mask))
794 			return i;
795 	}
796 
797 	return -EAGAIN;
798 }
799 
800 static void
801 mipsxx_pmu_enable_event(struct hw_perf_event *evt, int idx)
802 {
803 	struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
804 	unsigned long flags;
805 
806 	WARN_ON(idx < 0 || idx >= mipspmu->num_counters);
807 
808 	local_irq_save(flags);
809 	cpuc->saved_ctrl[idx] = M_PERFCTL_EVENT(evt->event_base & 0xff) |
810 		(evt->config_base & M_PERFCTL_CONFIG_MASK) |
811 		/* Make sure interrupt enabled. */
812 		M_PERFCTL_INTERRUPT_ENABLE;
813 	/*
814 	 * We do not actually let the counter run. Leave it until start().
815 	 */
816 	local_irq_restore(flags);
817 }
818 
819 static void
820 mipsxx_pmu_disable_event(int idx)
821 {
822 	struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
823 	unsigned long flags;
824 
825 	WARN_ON(idx < 0 || idx >= mipspmu->num_counters);
826 
827 	local_irq_save(flags);
828 	cpuc->saved_ctrl[idx] = mipsxx_pmu_read_control(idx) &
829 		~M_PERFCTL_COUNT_EVENT_WHENEVER;
830 	mipsxx_pmu_write_control(idx, cpuc->saved_ctrl[idx]);
831 	local_irq_restore(flags);
832 }
833 
834 /* 24K */
835 #define IS_UNSUPPORTED_24K_EVENT(r, b)					\
836 	((b) == 12 || (r) == 151 || (r) == 152 || (b) == 26 ||		\
837 	 (b) == 27 || (r) == 28 || (r) == 158 || (b) == 31 ||		\
838 	 (b) == 32 || (b) == 34 || (b) == 36 || (r) == 168 ||		\
839 	 (r) == 172 || (b) == 47 || ((b) >= 56 && (b) <= 63) ||		\
840 	 ((b) >= 68 && (b) <= 127))
841 #define IS_BOTH_COUNTERS_24K_EVENT(b)					\
842 	((b) == 0 || (b) == 1 || (b) == 11)
843 
844 /* 34K */
845 #define IS_UNSUPPORTED_34K_EVENT(r, b)					\
846 	((b) == 12 || (r) == 27 || (r) == 158 || (b) == 36 ||		\
847 	 (b) == 38 || (r) == 175 || ((b) >= 56 && (b) <= 63) ||		\
848 	 ((b) >= 68 && (b) <= 127))
849 #define IS_BOTH_COUNTERS_34K_EVENT(b)					\
850 	((b) == 0 || (b) == 1 || (b) == 11)
851 #ifdef CONFIG_MIPS_MT_SMP
852 #define IS_RANGE_P_34K_EVENT(r, b)					\
853 	((b) == 0 || (r) == 18 || (b) == 21 || (b) == 22 ||		\
854 	 (b) == 25 || (b) == 39 || (r) == 44 || (r) == 174 ||		\
855 	 (r) == 176 || ((b) >= 50 && (b) <= 55) ||			\
856 	 ((b) >= 64 && (b) <= 67))
857 #define IS_RANGE_V_34K_EVENT(r)	((r) == 47)
858 #endif
859 
860 /* 74K */
861 #define IS_UNSUPPORTED_74K_EVENT(r, b)					\
862 	((r) == 5 || ((r) >= 135 && (r) <= 137) ||			\
863 	 ((b) >= 10 && (b) <= 12) || (b) == 22 || (b) == 27 ||		\
864 	 (b) == 33 || (b) == 34 || ((b) >= 47 && (b) <= 49) ||		\
865 	 (r) == 178 || (b) == 55 || (b) == 57 || (b) == 60 ||		\
866 	 (b) == 61 || (r) == 62 || (r) == 191 ||			\
867 	 ((b) >= 64 && (b) <= 127))
868 #define IS_BOTH_COUNTERS_74K_EVENT(b)					\
869 	((b) == 0 || (b) == 1)
870 
871 /* 1004K */
872 #define IS_UNSUPPORTED_1004K_EVENT(r, b)				\
873 	((b) == 12 || (r) == 27 || (r) == 158 || (b) == 38 ||		\
874 	 (r) == 175 || (b) == 63 || ((b) >= 68 && (b) <= 127))
875 #define IS_BOTH_COUNTERS_1004K_EVENT(b)					\
876 	((b) == 0 || (b) == 1 || (b) == 11)
877 #ifdef CONFIG_MIPS_MT_SMP
878 #define IS_RANGE_P_1004K_EVENT(r, b)					\
879 	((b) == 0 || (r) == 18 || (b) == 21 || (b) == 22 ||		\
880 	 (b) == 25 || (b) == 36 || (b) == 39 || (r) == 44 ||		\
881 	 (r) == 174 || (r) == 176 || ((b) >= 50 && (b) <= 59) ||	\
882 	 (r) == 188 || (b) == 61 || (b) == 62 ||			\
883 	 ((b) >= 64 && (b) <= 67))
884 #define IS_RANGE_V_1004K_EVENT(r)	((r) == 47)
885 #endif
886 
887 /*
888  * User can use 0-255 raw events, where 0-127 for the events of even
889  * counters, and 128-255 for odd counters. Note that bit 7 is used to
890  * indicate the parity. So, for example, when user wants to take the
891  * Event Num of 15 for odd counters (by referring to the user manual),
892  * then 128 needs to be added to 15 as the input for the event config,
893  * i.e., 143 (0x8F) to be used.
894  */
895 static const struct mips_perf_event *
896 mipsxx_pmu_map_raw_event(u64 config)
897 {
898 	unsigned int raw_id = config & 0xff;
899 	unsigned int base_id = raw_id & 0x7f;
900 
901 	switch (current_cpu_type()) {
902 	case CPU_24K:
903 		if (IS_UNSUPPORTED_24K_EVENT(raw_id, base_id))
904 			return ERR_PTR(-EOPNOTSUPP);
905 		raw_event.event_id = base_id;
906 		if (IS_BOTH_COUNTERS_24K_EVENT(base_id))
907 			raw_event.cntr_mask = CNTR_EVEN | CNTR_ODD;
908 		else
909 			raw_event.cntr_mask =
910 				raw_id > 127 ? CNTR_ODD : CNTR_EVEN;
911 #ifdef CONFIG_MIPS_MT_SMP
912 		/*
913 		 * This is actually doing nothing. Non-multithreading
914 		 * CPUs will not check and calculate the range.
915 		 */
916 		raw_event.range = P;
917 #endif
918 		break;
919 	case CPU_34K:
920 		if (IS_UNSUPPORTED_34K_EVENT(raw_id, base_id))
921 			return ERR_PTR(-EOPNOTSUPP);
922 		raw_event.event_id = base_id;
923 		if (IS_BOTH_COUNTERS_34K_EVENT(base_id))
924 			raw_event.cntr_mask = CNTR_EVEN | CNTR_ODD;
925 		else
926 			raw_event.cntr_mask =
927 				raw_id > 127 ? CNTR_ODD : CNTR_EVEN;
928 #ifdef CONFIG_MIPS_MT_SMP
929 		if (IS_RANGE_P_34K_EVENT(raw_id, base_id))
930 			raw_event.range = P;
931 		else if (unlikely(IS_RANGE_V_34K_EVENT(raw_id)))
932 			raw_event.range = V;
933 		else
934 			raw_event.range = T;
935 #endif
936 		break;
937 	case CPU_74K:
938 		if (IS_UNSUPPORTED_74K_EVENT(raw_id, base_id))
939 			return ERR_PTR(-EOPNOTSUPP);
940 		raw_event.event_id = base_id;
941 		if (IS_BOTH_COUNTERS_74K_EVENT(base_id))
942 			raw_event.cntr_mask = CNTR_EVEN | CNTR_ODD;
943 		else
944 			raw_event.cntr_mask =
945 				raw_id > 127 ? CNTR_ODD : CNTR_EVEN;
946 #ifdef CONFIG_MIPS_MT_SMP
947 		raw_event.range = P;
948 #endif
949 		break;
950 	case CPU_1004K:
951 		if (IS_UNSUPPORTED_1004K_EVENT(raw_id, base_id))
952 			return ERR_PTR(-EOPNOTSUPP);
953 		raw_event.event_id = base_id;
954 		if (IS_BOTH_COUNTERS_1004K_EVENT(base_id))
955 			raw_event.cntr_mask = CNTR_EVEN | CNTR_ODD;
956 		else
957 			raw_event.cntr_mask =
958 				raw_id > 127 ? CNTR_ODD : CNTR_EVEN;
959 #ifdef CONFIG_MIPS_MT_SMP
960 		if (IS_RANGE_P_1004K_EVENT(raw_id, base_id))
961 			raw_event.range = P;
962 		else if (unlikely(IS_RANGE_V_1004K_EVENT(raw_id)))
963 			raw_event.range = V;
964 		else
965 			raw_event.range = T;
966 #endif
967 		break;
968 	}
969 
970 	return &raw_event;
971 }
972 
973 static struct mips_pmu mipsxxcore_pmu = {
974 	.handle_irq = mipsxx_pmu_handle_irq,
975 	.handle_shared_irq = mipsxx_pmu_handle_shared_irq,
976 	.start = mipsxx_pmu_start,
977 	.stop = mipsxx_pmu_stop,
978 	.alloc_counter = mipsxx_pmu_alloc_counter,
979 	.read_counter = mipsxx_pmu_read_counter,
980 	.write_counter = mipsxx_pmu_write_counter,
981 	.enable_event = mipsxx_pmu_enable_event,
982 	.disable_event = mipsxx_pmu_disable_event,
983 	.map_raw_event = mipsxx_pmu_map_raw_event,
984 	.general_event_map = &mipsxxcore_event_map,
985 	.cache_event_map = &mipsxxcore_cache_map,
986 };
987 
988 static struct mips_pmu mipsxx74Kcore_pmu = {
989 	.handle_irq = mipsxx_pmu_handle_irq,
990 	.handle_shared_irq = mipsxx_pmu_handle_shared_irq,
991 	.start = mipsxx_pmu_start,
992 	.stop = mipsxx_pmu_stop,
993 	.alloc_counter = mipsxx_pmu_alloc_counter,
994 	.read_counter = mipsxx_pmu_read_counter,
995 	.write_counter = mipsxx_pmu_write_counter,
996 	.enable_event = mipsxx_pmu_enable_event,
997 	.disable_event = mipsxx_pmu_disable_event,
998 	.map_raw_event = mipsxx_pmu_map_raw_event,
999 	.general_event_map = &mipsxx74Kcore_event_map,
1000 	.cache_event_map = &mipsxx74Kcore_cache_map,
1001 };
1002 
1003 static int __init
1004 init_hw_perf_events(void)
1005 {
1006 	int counters, irq;
1007 
1008 	pr_info("Performance counters: ");
1009 
1010 	counters = n_counters();
1011 	if (counters == 0) {
1012 		pr_cont("No available PMU.\n");
1013 		return -ENODEV;
1014 	}
1015 
1016 #ifdef CONFIG_MIPS_MT_SMP
1017 	cpu_has_mipsmt_pertccounters = read_c0_config7() & (1<<19);
1018 	if (!cpu_has_mipsmt_pertccounters)
1019 		counters = counters_total_to_per_cpu(counters);
1020 #endif
1021 
1022 #ifdef MSC01E_INT_BASE
1023 	if (cpu_has_veic) {
1024 		/*
1025 		 * Using platform specific interrupt controller defines.
1026 		 */
1027 		irq = MSC01E_INT_BASE + MSC01E_INT_PERFCTR;
1028 	} else {
1029 #endif
1030 		if (cp0_perfcount_irq >= 0)
1031 			irq = MIPS_CPU_IRQ_BASE + cp0_perfcount_irq;
1032 		else
1033 			irq = -1;
1034 #ifdef MSC01E_INT_BASE
1035 	}
1036 #endif
1037 
1038 	on_each_cpu(reset_counters, (void *)(long)counters, 1);
1039 
1040 	switch (current_cpu_type()) {
1041 	case CPU_24K:
1042 		mipsxxcore_pmu.name = "mips/24K";
1043 		mipsxxcore_pmu.num_counters = counters;
1044 		mipsxxcore_pmu.irq = irq;
1045 		mipspmu = &mipsxxcore_pmu;
1046 		break;
1047 	case CPU_34K:
1048 		mipsxxcore_pmu.name = "mips/34K";
1049 		mipsxxcore_pmu.num_counters = counters;
1050 		mipsxxcore_pmu.irq = irq;
1051 		mipspmu = &mipsxxcore_pmu;
1052 		break;
1053 	case CPU_74K:
1054 		mipsxx74Kcore_pmu.name = "mips/74K";
1055 		mipsxx74Kcore_pmu.num_counters = counters;
1056 		mipsxx74Kcore_pmu.irq = irq;
1057 		mipspmu = &mipsxx74Kcore_pmu;
1058 		break;
1059 	case CPU_1004K:
1060 		mipsxxcore_pmu.name = "mips/1004K";
1061 		mipsxxcore_pmu.num_counters = counters;
1062 		mipsxxcore_pmu.irq = irq;
1063 		mipspmu = &mipsxxcore_pmu;
1064 		break;
1065 	default:
1066 		pr_cont("Either hardware does not support performance "
1067 			"counters, or not yet implemented.\n");
1068 		return -ENODEV;
1069 	}
1070 
1071 	if (mipspmu)
1072 		pr_cont("%s PMU enabled, %d counters available to each "
1073 			"CPU, irq %d%s\n", mipspmu->name, counters, irq,
1074 			irq < 0 ? " (share with timer interrupt)" : "");
1075 
1076 	perf_pmu_register(&pmu, "cpu", PERF_TYPE_RAW);
1077 
1078 	return 0;
1079 }
1080 early_initcall(init_hw_perf_events);
1081 
1082 #endif /* defined(CONFIG_CPU_MIPS32)... */
1083