xref: /openbmc/linux/arch/arm/kernel/perf_event_v6.c (revision 588b48ca)
1 /*
2  * ARMv6 Performance counter handling code.
3  *
4  * Copyright (C) 2009 picoChip Designs, Ltd., Jamie Iles
5  *
6  * ARMv6 has 2 configurable performance counters and a single cycle counter.
7  * They all share a single reset bit but can be written to zero so we can use
8  * that for a reset.
9  *
10  * The counters can't be individually enabled or disabled so when we remove
11  * one event and replace it with another we could get spurious counts from the
12  * wrong event. However, we can take advantage of the fact that the
13  * performance counters can export events to the event bus, and the event bus
14  * itself can be monitored. This requires that we *don't* export the events to
15  * the event bus. The procedure for disabling a configurable counter is:
16  *	- change the counter to count the ETMEXTOUT[0] signal (0x20). This
17  *	  effectively stops the counter from counting.
18  *	- disable the counter's interrupt generation (each counter has it's
19  *	  own interrupt enable bit).
20  * Once stopped, the counter value can be written as 0 to reset.
21  *
22  * To enable a counter:
23  *	- enable the counter's interrupt generation.
24  *	- set the new event type.
25  *
26  * Note: the dedicated cycle counter only counts cycles and can't be
27  * enabled/disabled independently of the others. When we want to disable the
28  * cycle counter, we have to just disable the interrupt reporting and start
29  * ignoring that counter. When re-enabling, we have to reset the value and
30  * enable the interrupt.
31  */
32 
33 #if defined(CONFIG_CPU_V6) || defined(CONFIG_CPU_V6K)
34 enum armv6_perf_types {
35 	ARMV6_PERFCTR_ICACHE_MISS	    = 0x0,
36 	ARMV6_PERFCTR_IBUF_STALL	    = 0x1,
37 	ARMV6_PERFCTR_DDEP_STALL	    = 0x2,
38 	ARMV6_PERFCTR_ITLB_MISS		    = 0x3,
39 	ARMV6_PERFCTR_DTLB_MISS		    = 0x4,
40 	ARMV6_PERFCTR_BR_EXEC		    = 0x5,
41 	ARMV6_PERFCTR_BR_MISPREDICT	    = 0x6,
42 	ARMV6_PERFCTR_INSTR_EXEC	    = 0x7,
43 	ARMV6_PERFCTR_DCACHE_HIT	    = 0x9,
44 	ARMV6_PERFCTR_DCACHE_ACCESS	    = 0xA,
45 	ARMV6_PERFCTR_DCACHE_MISS	    = 0xB,
46 	ARMV6_PERFCTR_DCACHE_WBACK	    = 0xC,
47 	ARMV6_PERFCTR_SW_PC_CHANGE	    = 0xD,
48 	ARMV6_PERFCTR_MAIN_TLB_MISS	    = 0xF,
49 	ARMV6_PERFCTR_EXPL_D_ACCESS	    = 0x10,
50 	ARMV6_PERFCTR_LSU_FULL_STALL	    = 0x11,
51 	ARMV6_PERFCTR_WBUF_DRAINED	    = 0x12,
52 	ARMV6_PERFCTR_CPU_CYCLES	    = 0xFF,
53 	ARMV6_PERFCTR_NOP		    = 0x20,
54 };
55 
56 enum armv6_counters {
57 	ARMV6_CYCLE_COUNTER = 0,
58 	ARMV6_COUNTER0,
59 	ARMV6_COUNTER1,
60 };
61 
62 /*
63  * The hardware events that we support. We do support cache operations but
64  * we have harvard caches and no way to combine instruction and data
65  * accesses/misses in hardware.
66  */
67 static const unsigned armv6_perf_map[PERF_COUNT_HW_MAX] = {
68 	PERF_MAP_ALL_UNSUPPORTED,
69 	[PERF_COUNT_HW_CPU_CYCLES]		= ARMV6_PERFCTR_CPU_CYCLES,
70 	[PERF_COUNT_HW_INSTRUCTIONS]		= ARMV6_PERFCTR_INSTR_EXEC,
71 	[PERF_COUNT_HW_BRANCH_INSTRUCTIONS]	= ARMV6_PERFCTR_BR_EXEC,
72 	[PERF_COUNT_HW_BRANCH_MISSES]		= ARMV6_PERFCTR_BR_MISPREDICT,
73 	[PERF_COUNT_HW_STALLED_CYCLES_FRONTEND]	= ARMV6_PERFCTR_IBUF_STALL,
74 	[PERF_COUNT_HW_STALLED_CYCLES_BACKEND]	= ARMV6_PERFCTR_LSU_FULL_STALL,
75 };
76 
77 static const unsigned armv6_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
78 					  [PERF_COUNT_HW_CACHE_OP_MAX]
79 					  [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
80 	PERF_CACHE_MAP_ALL_UNSUPPORTED,
81 
82 	/*
83 	 * The performance counters don't differentiate between read and write
84 	 * accesses/misses so this isn't strictly correct, but it's the best we
85 	 * can do. Writes and reads get combined.
86 	 */
87 	[C(L1D)][C(OP_READ)][C(RESULT_ACCESS)]	= ARMV6_PERFCTR_DCACHE_ACCESS,
88 	[C(L1D)][C(OP_READ)][C(RESULT_MISS)]	= ARMV6_PERFCTR_DCACHE_MISS,
89 	[C(L1D)][C(OP_WRITE)][C(RESULT_ACCESS)]	= ARMV6_PERFCTR_DCACHE_ACCESS,
90 	[C(L1D)][C(OP_WRITE)][C(RESULT_MISS)]	= ARMV6_PERFCTR_DCACHE_MISS,
91 
92 	[C(L1I)][C(OP_READ)][C(RESULT_MISS)]	= ARMV6_PERFCTR_ICACHE_MISS,
93 
94 	/*
95 	 * The ARM performance counters can count micro DTLB misses, micro ITLB
96 	 * misses and main TLB misses. There isn't an event for TLB misses, so
97 	 * use the micro misses here and if users want the main TLB misses they
98 	 * can use a raw counter.
99 	 */
100 	[C(DTLB)][C(OP_READ)][C(RESULT_MISS)]	= ARMV6_PERFCTR_DTLB_MISS,
101 	[C(DTLB)][C(OP_WRITE)][C(RESULT_MISS)]	= ARMV6_PERFCTR_DTLB_MISS,
102 
103 	[C(ITLB)][C(OP_READ)][C(RESULT_MISS)]	= ARMV6_PERFCTR_ITLB_MISS,
104 	[C(ITLB)][C(OP_WRITE)][C(RESULT_MISS)]	= ARMV6_PERFCTR_ITLB_MISS,
105 };
106 
107 enum armv6mpcore_perf_types {
108 	ARMV6MPCORE_PERFCTR_ICACHE_MISS	    = 0x0,
109 	ARMV6MPCORE_PERFCTR_IBUF_STALL	    = 0x1,
110 	ARMV6MPCORE_PERFCTR_DDEP_STALL	    = 0x2,
111 	ARMV6MPCORE_PERFCTR_ITLB_MISS	    = 0x3,
112 	ARMV6MPCORE_PERFCTR_DTLB_MISS	    = 0x4,
113 	ARMV6MPCORE_PERFCTR_BR_EXEC	    = 0x5,
114 	ARMV6MPCORE_PERFCTR_BR_NOTPREDICT   = 0x6,
115 	ARMV6MPCORE_PERFCTR_BR_MISPREDICT   = 0x7,
116 	ARMV6MPCORE_PERFCTR_INSTR_EXEC	    = 0x8,
117 	ARMV6MPCORE_PERFCTR_DCACHE_RDACCESS = 0xA,
118 	ARMV6MPCORE_PERFCTR_DCACHE_RDMISS   = 0xB,
119 	ARMV6MPCORE_PERFCTR_DCACHE_WRACCESS = 0xC,
120 	ARMV6MPCORE_PERFCTR_DCACHE_WRMISS   = 0xD,
121 	ARMV6MPCORE_PERFCTR_DCACHE_EVICTION = 0xE,
122 	ARMV6MPCORE_PERFCTR_SW_PC_CHANGE    = 0xF,
123 	ARMV6MPCORE_PERFCTR_MAIN_TLB_MISS   = 0x10,
124 	ARMV6MPCORE_PERFCTR_EXPL_MEM_ACCESS = 0x11,
125 	ARMV6MPCORE_PERFCTR_LSU_FULL_STALL  = 0x12,
126 	ARMV6MPCORE_PERFCTR_WBUF_DRAINED    = 0x13,
127 	ARMV6MPCORE_PERFCTR_CPU_CYCLES	    = 0xFF,
128 };
129 
130 /*
131  * The hardware events that we support. We do support cache operations but
132  * we have harvard caches and no way to combine instruction and data
133  * accesses/misses in hardware.
134  */
135 static const unsigned armv6mpcore_perf_map[PERF_COUNT_HW_MAX] = {
136 	PERF_MAP_ALL_UNSUPPORTED,
137 	[PERF_COUNT_HW_CPU_CYCLES]		= ARMV6MPCORE_PERFCTR_CPU_CYCLES,
138 	[PERF_COUNT_HW_INSTRUCTIONS]		= ARMV6MPCORE_PERFCTR_INSTR_EXEC,
139 	[PERF_COUNT_HW_BRANCH_INSTRUCTIONS]	= ARMV6MPCORE_PERFCTR_BR_EXEC,
140 	[PERF_COUNT_HW_BRANCH_MISSES]		= ARMV6MPCORE_PERFCTR_BR_MISPREDICT,
141 	[PERF_COUNT_HW_STALLED_CYCLES_FRONTEND]	= ARMV6MPCORE_PERFCTR_IBUF_STALL,
142 	[PERF_COUNT_HW_STALLED_CYCLES_BACKEND]	= ARMV6MPCORE_PERFCTR_LSU_FULL_STALL,
143 };
144 
145 static const unsigned armv6mpcore_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
146 					[PERF_COUNT_HW_CACHE_OP_MAX]
147 					[PERF_COUNT_HW_CACHE_RESULT_MAX] = {
148 	PERF_CACHE_MAP_ALL_UNSUPPORTED,
149 
150 	[C(L1D)][C(OP_READ)][C(RESULT_ACCESS)]	= ARMV6MPCORE_PERFCTR_DCACHE_RDACCESS,
151 	[C(L1D)][C(OP_READ)][C(RESULT_MISS)]	= ARMV6MPCORE_PERFCTR_DCACHE_RDMISS,
152 	[C(L1D)][C(OP_WRITE)][C(RESULT_ACCESS)]	= ARMV6MPCORE_PERFCTR_DCACHE_WRACCESS,
153 	[C(L1D)][C(OP_WRITE)][C(RESULT_MISS)]	= ARMV6MPCORE_PERFCTR_DCACHE_WRMISS,
154 
155 	[C(L1I)][C(OP_READ)][C(RESULT_MISS)]	= ARMV6MPCORE_PERFCTR_ICACHE_MISS,
156 
157 	/*
158 	 * The ARM performance counters can count micro DTLB misses, micro ITLB
159 	 * misses and main TLB misses. There isn't an event for TLB misses, so
160 	 * use the micro misses here and if users want the main TLB misses they
161 	 * can use a raw counter.
162 	 */
163 	[C(DTLB)][C(OP_READ)][C(RESULT_MISS)]	= ARMV6MPCORE_PERFCTR_DTLB_MISS,
164 	[C(DTLB)][C(OP_WRITE)][C(RESULT_MISS)]	= ARMV6MPCORE_PERFCTR_DTLB_MISS,
165 
166 	[C(ITLB)][C(OP_READ)][C(RESULT_MISS)]	= ARMV6MPCORE_PERFCTR_ITLB_MISS,
167 	[C(ITLB)][C(OP_WRITE)][C(RESULT_MISS)]	= ARMV6MPCORE_PERFCTR_ITLB_MISS,
168 };
169 
170 static inline unsigned long
171 armv6_pmcr_read(void)
172 {
173 	u32 val;
174 	asm volatile("mrc   p15, 0, %0, c15, c12, 0" : "=r"(val));
175 	return val;
176 }
177 
178 static inline void
179 armv6_pmcr_write(unsigned long val)
180 {
181 	asm volatile("mcr   p15, 0, %0, c15, c12, 0" : : "r"(val));
182 }
183 
184 #define ARMV6_PMCR_ENABLE		(1 << 0)
185 #define ARMV6_PMCR_CTR01_RESET		(1 << 1)
186 #define ARMV6_PMCR_CCOUNT_RESET		(1 << 2)
187 #define ARMV6_PMCR_CCOUNT_DIV		(1 << 3)
188 #define ARMV6_PMCR_COUNT0_IEN		(1 << 4)
189 #define ARMV6_PMCR_COUNT1_IEN		(1 << 5)
190 #define ARMV6_PMCR_CCOUNT_IEN		(1 << 6)
191 #define ARMV6_PMCR_COUNT0_OVERFLOW	(1 << 8)
192 #define ARMV6_PMCR_COUNT1_OVERFLOW	(1 << 9)
193 #define ARMV6_PMCR_CCOUNT_OVERFLOW	(1 << 10)
194 #define ARMV6_PMCR_EVT_COUNT0_SHIFT	20
195 #define ARMV6_PMCR_EVT_COUNT0_MASK	(0xFF << ARMV6_PMCR_EVT_COUNT0_SHIFT)
196 #define ARMV6_PMCR_EVT_COUNT1_SHIFT	12
197 #define ARMV6_PMCR_EVT_COUNT1_MASK	(0xFF << ARMV6_PMCR_EVT_COUNT1_SHIFT)
198 
199 #define ARMV6_PMCR_OVERFLOWED_MASK \
200 	(ARMV6_PMCR_COUNT0_OVERFLOW | ARMV6_PMCR_COUNT1_OVERFLOW | \
201 	 ARMV6_PMCR_CCOUNT_OVERFLOW)
202 
203 static inline int
204 armv6_pmcr_has_overflowed(unsigned long pmcr)
205 {
206 	return pmcr & ARMV6_PMCR_OVERFLOWED_MASK;
207 }
208 
209 static inline int
210 armv6_pmcr_counter_has_overflowed(unsigned long pmcr,
211 				  enum armv6_counters counter)
212 {
213 	int ret = 0;
214 
215 	if (ARMV6_CYCLE_COUNTER == counter)
216 		ret = pmcr & ARMV6_PMCR_CCOUNT_OVERFLOW;
217 	else if (ARMV6_COUNTER0 == counter)
218 		ret = pmcr & ARMV6_PMCR_COUNT0_OVERFLOW;
219 	else if (ARMV6_COUNTER1 == counter)
220 		ret = pmcr & ARMV6_PMCR_COUNT1_OVERFLOW;
221 	else
222 		WARN_ONCE(1, "invalid counter number (%d)\n", counter);
223 
224 	return ret;
225 }
226 
227 static inline u32 armv6pmu_read_counter(struct perf_event *event)
228 {
229 	struct hw_perf_event *hwc = &event->hw;
230 	int counter = hwc->idx;
231 	unsigned long value = 0;
232 
233 	if (ARMV6_CYCLE_COUNTER == counter)
234 		asm volatile("mrc   p15, 0, %0, c15, c12, 1" : "=r"(value));
235 	else if (ARMV6_COUNTER0 == counter)
236 		asm volatile("mrc   p15, 0, %0, c15, c12, 2" : "=r"(value));
237 	else if (ARMV6_COUNTER1 == counter)
238 		asm volatile("mrc   p15, 0, %0, c15, c12, 3" : "=r"(value));
239 	else
240 		WARN_ONCE(1, "invalid counter number (%d)\n", counter);
241 
242 	return value;
243 }
244 
245 static inline void armv6pmu_write_counter(struct perf_event *event, u32 value)
246 {
247 	struct hw_perf_event *hwc = &event->hw;
248 	int counter = hwc->idx;
249 
250 	if (ARMV6_CYCLE_COUNTER == counter)
251 		asm volatile("mcr   p15, 0, %0, c15, c12, 1" : : "r"(value));
252 	else if (ARMV6_COUNTER0 == counter)
253 		asm volatile("mcr   p15, 0, %0, c15, c12, 2" : : "r"(value));
254 	else if (ARMV6_COUNTER1 == counter)
255 		asm volatile("mcr   p15, 0, %0, c15, c12, 3" : : "r"(value));
256 	else
257 		WARN_ONCE(1, "invalid counter number (%d)\n", counter);
258 }
259 
260 static void armv6pmu_enable_event(struct perf_event *event)
261 {
262 	unsigned long val, mask, evt, flags;
263 	struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu);
264 	struct hw_perf_event *hwc = &event->hw;
265 	struct pmu_hw_events *events = cpu_pmu->get_hw_events();
266 	int idx = hwc->idx;
267 
268 	if (ARMV6_CYCLE_COUNTER == idx) {
269 		mask	= 0;
270 		evt	= ARMV6_PMCR_CCOUNT_IEN;
271 	} else if (ARMV6_COUNTER0 == idx) {
272 		mask	= ARMV6_PMCR_EVT_COUNT0_MASK;
273 		evt	= (hwc->config_base << ARMV6_PMCR_EVT_COUNT0_SHIFT) |
274 			  ARMV6_PMCR_COUNT0_IEN;
275 	} else if (ARMV6_COUNTER1 == idx) {
276 		mask	= ARMV6_PMCR_EVT_COUNT1_MASK;
277 		evt	= (hwc->config_base << ARMV6_PMCR_EVT_COUNT1_SHIFT) |
278 			  ARMV6_PMCR_COUNT1_IEN;
279 	} else {
280 		WARN_ONCE(1, "invalid counter number (%d)\n", idx);
281 		return;
282 	}
283 
284 	/*
285 	 * Mask out the current event and set the counter to count the event
286 	 * that we're interested in.
287 	 */
288 	raw_spin_lock_irqsave(&events->pmu_lock, flags);
289 	val = armv6_pmcr_read();
290 	val &= ~mask;
291 	val |= evt;
292 	armv6_pmcr_write(val);
293 	raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
294 }
295 
296 static irqreturn_t
297 armv6pmu_handle_irq(int irq_num,
298 		    void *dev)
299 {
300 	unsigned long pmcr = armv6_pmcr_read();
301 	struct perf_sample_data data;
302 	struct arm_pmu *cpu_pmu = (struct arm_pmu *)dev;
303 	struct pmu_hw_events *cpuc = cpu_pmu->get_hw_events();
304 	struct pt_regs *regs;
305 	int idx;
306 
307 	if (!armv6_pmcr_has_overflowed(pmcr))
308 		return IRQ_NONE;
309 
310 	regs = get_irq_regs();
311 
312 	/*
313 	 * The interrupts are cleared by writing the overflow flags back to
314 	 * the control register. All of the other bits don't have any effect
315 	 * if they are rewritten, so write the whole value back.
316 	 */
317 	armv6_pmcr_write(pmcr);
318 
319 	for (idx = 0; idx < cpu_pmu->num_events; ++idx) {
320 		struct perf_event *event = cpuc->events[idx];
321 		struct hw_perf_event *hwc;
322 
323 		/* Ignore if we don't have an event. */
324 		if (!event)
325 			continue;
326 
327 		/*
328 		 * We have a single interrupt for all counters. Check that
329 		 * each counter has overflowed before we process it.
330 		 */
331 		if (!armv6_pmcr_counter_has_overflowed(pmcr, idx))
332 			continue;
333 
334 		hwc = &event->hw;
335 		armpmu_event_update(event);
336 		perf_sample_data_init(&data, 0, hwc->last_period);
337 		if (!armpmu_event_set_period(event))
338 			continue;
339 
340 		if (perf_event_overflow(event, &data, regs))
341 			cpu_pmu->disable(event);
342 	}
343 
344 	/*
345 	 * Handle the pending perf events.
346 	 *
347 	 * Note: this call *must* be run with interrupts disabled. For
348 	 * platforms that can have the PMU interrupts raised as an NMI, this
349 	 * will not work.
350 	 */
351 	irq_work_run();
352 
353 	return IRQ_HANDLED;
354 }
355 
356 static void armv6pmu_start(struct arm_pmu *cpu_pmu)
357 {
358 	unsigned long flags, val;
359 	struct pmu_hw_events *events = cpu_pmu->get_hw_events();
360 
361 	raw_spin_lock_irqsave(&events->pmu_lock, flags);
362 	val = armv6_pmcr_read();
363 	val |= ARMV6_PMCR_ENABLE;
364 	armv6_pmcr_write(val);
365 	raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
366 }
367 
368 static void armv6pmu_stop(struct arm_pmu *cpu_pmu)
369 {
370 	unsigned long flags, val;
371 	struct pmu_hw_events *events = cpu_pmu->get_hw_events();
372 
373 	raw_spin_lock_irqsave(&events->pmu_lock, flags);
374 	val = armv6_pmcr_read();
375 	val &= ~ARMV6_PMCR_ENABLE;
376 	armv6_pmcr_write(val);
377 	raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
378 }
379 
380 static int
381 armv6pmu_get_event_idx(struct pmu_hw_events *cpuc,
382 				struct perf_event *event)
383 {
384 	struct hw_perf_event *hwc = &event->hw;
385 	/* Always place a cycle counter into the cycle counter. */
386 	if (ARMV6_PERFCTR_CPU_CYCLES == hwc->config_base) {
387 		if (test_and_set_bit(ARMV6_CYCLE_COUNTER, cpuc->used_mask))
388 			return -EAGAIN;
389 
390 		return ARMV6_CYCLE_COUNTER;
391 	} else {
392 		/*
393 		 * For anything other than a cycle counter, try and use
394 		 * counter0 and counter1.
395 		 */
396 		if (!test_and_set_bit(ARMV6_COUNTER1, cpuc->used_mask))
397 			return ARMV6_COUNTER1;
398 
399 		if (!test_and_set_bit(ARMV6_COUNTER0, cpuc->used_mask))
400 			return ARMV6_COUNTER0;
401 
402 		/* The counters are all in use. */
403 		return -EAGAIN;
404 	}
405 }
406 
407 static void armv6pmu_disable_event(struct perf_event *event)
408 {
409 	unsigned long val, mask, evt, flags;
410 	struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu);
411 	struct hw_perf_event *hwc = &event->hw;
412 	struct pmu_hw_events *events = cpu_pmu->get_hw_events();
413 	int idx = hwc->idx;
414 
415 	if (ARMV6_CYCLE_COUNTER == idx) {
416 		mask	= ARMV6_PMCR_CCOUNT_IEN;
417 		evt	= 0;
418 	} else if (ARMV6_COUNTER0 == idx) {
419 		mask	= ARMV6_PMCR_COUNT0_IEN | ARMV6_PMCR_EVT_COUNT0_MASK;
420 		evt	= ARMV6_PERFCTR_NOP << ARMV6_PMCR_EVT_COUNT0_SHIFT;
421 	} else if (ARMV6_COUNTER1 == idx) {
422 		mask	= ARMV6_PMCR_COUNT1_IEN | ARMV6_PMCR_EVT_COUNT1_MASK;
423 		evt	= ARMV6_PERFCTR_NOP << ARMV6_PMCR_EVT_COUNT1_SHIFT;
424 	} else {
425 		WARN_ONCE(1, "invalid counter number (%d)\n", idx);
426 		return;
427 	}
428 
429 	/*
430 	 * Mask out the current event and set the counter to count the number
431 	 * of ETM bus signal assertion cycles. The external reporting should
432 	 * be disabled and so this should never increment.
433 	 */
434 	raw_spin_lock_irqsave(&events->pmu_lock, flags);
435 	val = armv6_pmcr_read();
436 	val &= ~mask;
437 	val |= evt;
438 	armv6_pmcr_write(val);
439 	raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
440 }
441 
442 static void armv6mpcore_pmu_disable_event(struct perf_event *event)
443 {
444 	unsigned long val, mask, flags, evt = 0;
445 	struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu);
446 	struct hw_perf_event *hwc = &event->hw;
447 	struct pmu_hw_events *events = cpu_pmu->get_hw_events();
448 	int idx = hwc->idx;
449 
450 	if (ARMV6_CYCLE_COUNTER == idx) {
451 		mask	= ARMV6_PMCR_CCOUNT_IEN;
452 	} else if (ARMV6_COUNTER0 == idx) {
453 		mask	= ARMV6_PMCR_COUNT0_IEN;
454 	} else if (ARMV6_COUNTER1 == idx) {
455 		mask	= ARMV6_PMCR_COUNT1_IEN;
456 	} else {
457 		WARN_ONCE(1, "invalid counter number (%d)\n", idx);
458 		return;
459 	}
460 
461 	/*
462 	 * Unlike UP ARMv6, we don't have a way of stopping the counters. We
463 	 * simply disable the interrupt reporting.
464 	 */
465 	raw_spin_lock_irqsave(&events->pmu_lock, flags);
466 	val = armv6_pmcr_read();
467 	val &= ~mask;
468 	val |= evt;
469 	armv6_pmcr_write(val);
470 	raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
471 }
472 
473 static int armv6_map_event(struct perf_event *event)
474 {
475 	return armpmu_map_event(event, &armv6_perf_map,
476 				&armv6_perf_cache_map, 0xFF);
477 }
478 
479 static void armv6pmu_init(struct arm_pmu *cpu_pmu)
480 {
481 	cpu_pmu->handle_irq	= armv6pmu_handle_irq;
482 	cpu_pmu->enable		= armv6pmu_enable_event;
483 	cpu_pmu->disable	= armv6pmu_disable_event;
484 	cpu_pmu->read_counter	= armv6pmu_read_counter;
485 	cpu_pmu->write_counter	= armv6pmu_write_counter;
486 	cpu_pmu->get_event_idx	= armv6pmu_get_event_idx;
487 	cpu_pmu->start		= armv6pmu_start;
488 	cpu_pmu->stop		= armv6pmu_stop;
489 	cpu_pmu->map_event	= armv6_map_event;
490 	cpu_pmu->num_events	= 3;
491 	cpu_pmu->max_period	= (1LLU << 32) - 1;
492 }
493 
494 static int armv6_1136_pmu_init(struct arm_pmu *cpu_pmu)
495 {
496 	armv6pmu_init(cpu_pmu);
497 	cpu_pmu->name		= "armv6_1136";
498 	return 0;
499 }
500 
501 static int armv6_1156_pmu_init(struct arm_pmu *cpu_pmu)
502 {
503 	armv6pmu_init(cpu_pmu);
504 	cpu_pmu->name		= "armv6_1156";
505 	return 0;
506 }
507 
508 static int armv6_1176_pmu_init(struct arm_pmu *cpu_pmu)
509 {
510 	armv6pmu_init(cpu_pmu);
511 	cpu_pmu->name		= "armv6_1176";
512 	return 0;
513 }
514 
515 /*
516  * ARMv6mpcore is almost identical to single core ARMv6 with the exception
517  * that some of the events have different enumerations and that there is no
518  * *hack* to stop the programmable counters. To stop the counters we simply
519  * disable the interrupt reporting and update the event. When unthrottling we
520  * reset the period and enable the interrupt reporting.
521  */
522 
523 static int armv6mpcore_map_event(struct perf_event *event)
524 {
525 	return armpmu_map_event(event, &armv6mpcore_perf_map,
526 				&armv6mpcore_perf_cache_map, 0xFF);
527 }
528 
529 static int armv6mpcore_pmu_init(struct arm_pmu *cpu_pmu)
530 {
531 	cpu_pmu->name		= "armv6_11mpcore";
532 	cpu_pmu->handle_irq	= armv6pmu_handle_irq;
533 	cpu_pmu->enable		= armv6pmu_enable_event;
534 	cpu_pmu->disable	= armv6mpcore_pmu_disable_event;
535 	cpu_pmu->read_counter	= armv6pmu_read_counter;
536 	cpu_pmu->write_counter	= armv6pmu_write_counter;
537 	cpu_pmu->get_event_idx	= armv6pmu_get_event_idx;
538 	cpu_pmu->start		= armv6pmu_start;
539 	cpu_pmu->stop		= armv6pmu_stop;
540 	cpu_pmu->map_event	= armv6mpcore_map_event;
541 	cpu_pmu->num_events	= 3;
542 	cpu_pmu->max_period	= (1LLU << 32) - 1;
543 
544 	return 0;
545 }
546 #else
547 static int armv6_1136_pmu_init(struct arm_pmu *cpu_pmu)
548 {
549 	return -ENODEV;
550 }
551 
552 static int armv6_1156_pmu_init(struct arm_pmu *cpu_pmu)
553 {
554 	return -ENODEV;
555 }
556 
557 static int armv6_1176_pmu_init(struct arm_pmu *cpu_pmu)
558 {
559 	return -ENODEV;
560 }
561 
562 static int armv6mpcore_pmu_init(struct arm_pmu *cpu_pmu)
563 {
564 	return -ENODEV;
565 }
566 #endif	/* CONFIG_CPU_V6 || CONFIG_CPU_V6K */
567