xref: /openbmc/linux/arch/arm/kernel/perf_event_v7.c (revision afb46f79)
1 /*
2  * ARMv7 Cortex-A8 and Cortex-A9 Performance Events handling code.
3  *
4  * ARMv7 support: Jean Pihet <jpihet@mvista.com>
5  * 2010 (c) MontaVista Software, LLC.
6  *
7  * Copied from ARMv6 code, with the low level code inspired
8  *  by the ARMv7 Oprofile code.
9  *
10  * Cortex-A8 has up to 4 configurable performance counters and
11  *  a single cycle counter.
12  * Cortex-A9 has up to 31 configurable performance counters and
13  *  a single cycle counter.
14  *
15  * All counters can be enabled/disabled and IRQ masked separately. The cycle
16  *  counter and all 4 performance counters together can be reset separately.
17  */
18 
19 #ifdef CONFIG_CPU_V7
20 
21 #include <asm/cp15.h>
22 #include <asm/vfp.h>
23 #include "../vfp/vfpinstr.h"
24 
25 /*
26  * Common ARMv7 event types
27  *
28  * Note: An implementation may not be able to count all of these events
29  * but the encodings are considered to be `reserved' in the case that
30  * they are not available.
31  */
32 enum armv7_perf_types {
33 	ARMV7_PERFCTR_PMNC_SW_INCR			= 0x00,
34 	ARMV7_PERFCTR_L1_ICACHE_REFILL			= 0x01,
35 	ARMV7_PERFCTR_ITLB_REFILL			= 0x02,
36 	ARMV7_PERFCTR_L1_DCACHE_REFILL			= 0x03,
37 	ARMV7_PERFCTR_L1_DCACHE_ACCESS			= 0x04,
38 	ARMV7_PERFCTR_DTLB_REFILL			= 0x05,
39 	ARMV7_PERFCTR_MEM_READ				= 0x06,
40 	ARMV7_PERFCTR_MEM_WRITE				= 0x07,
41 	ARMV7_PERFCTR_INSTR_EXECUTED			= 0x08,
42 	ARMV7_PERFCTR_EXC_TAKEN				= 0x09,
43 	ARMV7_PERFCTR_EXC_EXECUTED			= 0x0A,
44 	ARMV7_PERFCTR_CID_WRITE				= 0x0B,
45 
46 	/*
47 	 * ARMV7_PERFCTR_PC_WRITE is equivalent to HW_BRANCH_INSTRUCTIONS.
48 	 * It counts:
49 	 *  - all (taken) branch instructions,
50 	 *  - instructions that explicitly write the PC,
51 	 *  - exception generating instructions.
52 	 */
53 	ARMV7_PERFCTR_PC_WRITE				= 0x0C,
54 	ARMV7_PERFCTR_PC_IMM_BRANCH			= 0x0D,
55 	ARMV7_PERFCTR_PC_PROC_RETURN			= 0x0E,
56 	ARMV7_PERFCTR_MEM_UNALIGNED_ACCESS		= 0x0F,
57 	ARMV7_PERFCTR_PC_BRANCH_MIS_PRED		= 0x10,
58 	ARMV7_PERFCTR_CLOCK_CYCLES			= 0x11,
59 	ARMV7_PERFCTR_PC_BRANCH_PRED			= 0x12,
60 
61 	/* These events are defined by the PMUv2 supplement (ARM DDI 0457A). */
62 	ARMV7_PERFCTR_MEM_ACCESS			= 0x13,
63 	ARMV7_PERFCTR_L1_ICACHE_ACCESS			= 0x14,
64 	ARMV7_PERFCTR_L1_DCACHE_WB			= 0x15,
65 	ARMV7_PERFCTR_L2_CACHE_ACCESS			= 0x16,
66 	ARMV7_PERFCTR_L2_CACHE_REFILL			= 0x17,
67 	ARMV7_PERFCTR_L2_CACHE_WB			= 0x18,
68 	ARMV7_PERFCTR_BUS_ACCESS			= 0x19,
69 	ARMV7_PERFCTR_MEM_ERROR				= 0x1A,
70 	ARMV7_PERFCTR_INSTR_SPEC			= 0x1B,
71 	ARMV7_PERFCTR_TTBR_WRITE			= 0x1C,
72 	ARMV7_PERFCTR_BUS_CYCLES			= 0x1D,
73 
74 	ARMV7_PERFCTR_CPU_CYCLES			= 0xFF
75 };
76 
77 /* ARMv7 Cortex-A8 specific event types */
78 enum armv7_a8_perf_types {
79 	ARMV7_A8_PERFCTR_L2_CACHE_ACCESS		= 0x43,
80 	ARMV7_A8_PERFCTR_L2_CACHE_REFILL		= 0x44,
81 	ARMV7_A8_PERFCTR_L1_ICACHE_ACCESS		= 0x50,
82 	ARMV7_A8_PERFCTR_STALL_ISIDE			= 0x56,
83 };
84 
85 /* ARMv7 Cortex-A9 specific event types */
86 enum armv7_a9_perf_types {
87 	ARMV7_A9_PERFCTR_INSTR_CORE_RENAME		= 0x68,
88 	ARMV7_A9_PERFCTR_STALL_ICACHE			= 0x60,
89 	ARMV7_A9_PERFCTR_STALL_DISPATCH			= 0x66,
90 };
91 
92 /* ARMv7 Cortex-A5 specific event types */
93 enum armv7_a5_perf_types {
94 	ARMV7_A5_PERFCTR_PREFETCH_LINEFILL		= 0xc2,
95 	ARMV7_A5_PERFCTR_PREFETCH_LINEFILL_DROP		= 0xc3,
96 };
97 
98 /* ARMv7 Cortex-A15 specific event types */
99 enum armv7_a15_perf_types {
100 	ARMV7_A15_PERFCTR_L1_DCACHE_ACCESS_READ		= 0x40,
101 	ARMV7_A15_PERFCTR_L1_DCACHE_ACCESS_WRITE	= 0x41,
102 	ARMV7_A15_PERFCTR_L1_DCACHE_REFILL_READ		= 0x42,
103 	ARMV7_A15_PERFCTR_L1_DCACHE_REFILL_WRITE	= 0x43,
104 
105 	ARMV7_A15_PERFCTR_DTLB_REFILL_L1_READ		= 0x4C,
106 	ARMV7_A15_PERFCTR_DTLB_REFILL_L1_WRITE		= 0x4D,
107 
108 	ARMV7_A15_PERFCTR_L2_CACHE_ACCESS_READ		= 0x50,
109 	ARMV7_A15_PERFCTR_L2_CACHE_ACCESS_WRITE		= 0x51,
110 	ARMV7_A15_PERFCTR_L2_CACHE_REFILL_READ		= 0x52,
111 	ARMV7_A15_PERFCTR_L2_CACHE_REFILL_WRITE		= 0x53,
112 
113 	ARMV7_A15_PERFCTR_PC_WRITE_SPEC			= 0x76,
114 };
115 
116 /* ARMv7 Cortex-A12 specific event types */
117 enum armv7_a12_perf_types {
118 	ARMV7_A12_PERFCTR_L1_DCACHE_ACCESS_READ		= 0x40,
119 	ARMV7_A12_PERFCTR_L1_DCACHE_ACCESS_WRITE	= 0x41,
120 
121 	ARMV7_A12_PERFCTR_L2_CACHE_ACCESS_READ		= 0x50,
122 	ARMV7_A12_PERFCTR_L2_CACHE_ACCESS_WRITE		= 0x51,
123 
124 	ARMV7_A12_PERFCTR_PC_WRITE_SPEC			= 0x76,
125 
126 	ARMV7_A12_PERFCTR_PF_TLB_REFILL			= 0xe7,
127 };
128 
129 /* ARMv7 Krait specific event types */
130 enum krait_perf_types {
131 	KRAIT_PMRESR0_GROUP0				= 0xcc,
132 	KRAIT_PMRESR1_GROUP0				= 0xd0,
133 	KRAIT_PMRESR2_GROUP0				= 0xd4,
134 	KRAIT_VPMRESR0_GROUP0				= 0xd8,
135 
136 	KRAIT_PERFCTR_L1_ICACHE_ACCESS			= 0x10011,
137 	KRAIT_PERFCTR_L1_ICACHE_MISS			= 0x10010,
138 
139 	KRAIT_PERFCTR_L1_ITLB_ACCESS			= 0x12222,
140 	KRAIT_PERFCTR_L1_DTLB_ACCESS			= 0x12210,
141 };
142 
143 /*
144  * Cortex-A8 HW events mapping
145  *
146  * The hardware events that we support. We do support cache operations but
147  * we have harvard caches and no way to combine instruction and data
148  * accesses/misses in hardware.
149  */
150 static const unsigned armv7_a8_perf_map[PERF_COUNT_HW_MAX] = {
151 	[PERF_COUNT_HW_CPU_CYCLES]		= ARMV7_PERFCTR_CPU_CYCLES,
152 	[PERF_COUNT_HW_INSTRUCTIONS]		= ARMV7_PERFCTR_INSTR_EXECUTED,
153 	[PERF_COUNT_HW_CACHE_REFERENCES]	= ARMV7_PERFCTR_L1_DCACHE_ACCESS,
154 	[PERF_COUNT_HW_CACHE_MISSES]		= ARMV7_PERFCTR_L1_DCACHE_REFILL,
155 	[PERF_COUNT_HW_BRANCH_INSTRUCTIONS]	= ARMV7_PERFCTR_PC_WRITE,
156 	[PERF_COUNT_HW_BRANCH_MISSES]		= ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
157 	[PERF_COUNT_HW_BUS_CYCLES]		= HW_OP_UNSUPPORTED,
158 	[PERF_COUNT_HW_STALLED_CYCLES_FRONTEND]	= ARMV7_A8_PERFCTR_STALL_ISIDE,
159 	[PERF_COUNT_HW_STALLED_CYCLES_BACKEND]	= HW_OP_UNSUPPORTED,
160 };
161 
162 static const unsigned armv7_a8_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
163 					  [PERF_COUNT_HW_CACHE_OP_MAX]
164 					  [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
165 	[C(L1D)] = {
166 		/*
167 		 * The performance counters don't differentiate between read
168 		 * and write accesses/misses so this isn't strictly correct,
169 		 * but it's the best we can do. Writes and reads get
170 		 * combined.
171 		 */
172 		[C(OP_READ)] = {
173 			[C(RESULT_ACCESS)]	= ARMV7_PERFCTR_L1_DCACHE_ACCESS,
174 			[C(RESULT_MISS)]	= ARMV7_PERFCTR_L1_DCACHE_REFILL,
175 		},
176 		[C(OP_WRITE)] = {
177 			[C(RESULT_ACCESS)]	= ARMV7_PERFCTR_L1_DCACHE_ACCESS,
178 			[C(RESULT_MISS)]	= ARMV7_PERFCTR_L1_DCACHE_REFILL,
179 		},
180 		[C(OP_PREFETCH)] = {
181 			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
182 			[C(RESULT_MISS)]	= CACHE_OP_UNSUPPORTED,
183 		},
184 	},
185 	[C(L1I)] = {
186 		[C(OP_READ)] = {
187 			[C(RESULT_ACCESS)]	= ARMV7_A8_PERFCTR_L1_ICACHE_ACCESS,
188 			[C(RESULT_MISS)]	= ARMV7_PERFCTR_L1_ICACHE_REFILL,
189 		},
190 		[C(OP_WRITE)] = {
191 			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
192 			[C(RESULT_MISS)]	= CACHE_OP_UNSUPPORTED,
193 		},
194 		[C(OP_PREFETCH)] = {
195 			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
196 			[C(RESULT_MISS)]	= CACHE_OP_UNSUPPORTED,
197 		},
198 	},
199 	[C(LL)] = {
200 		[C(OP_READ)] = {
201 			[C(RESULT_ACCESS)]	= ARMV7_A8_PERFCTR_L2_CACHE_ACCESS,
202 			[C(RESULT_MISS)]	= ARMV7_A8_PERFCTR_L2_CACHE_REFILL,
203 		},
204 		[C(OP_WRITE)] = {
205 			[C(RESULT_ACCESS)]	= ARMV7_A8_PERFCTR_L2_CACHE_ACCESS,
206 			[C(RESULT_MISS)]	= ARMV7_A8_PERFCTR_L2_CACHE_REFILL,
207 		},
208 		[C(OP_PREFETCH)] = {
209 			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
210 			[C(RESULT_MISS)]	= CACHE_OP_UNSUPPORTED,
211 		},
212 	},
213 	[C(DTLB)] = {
214 		[C(OP_READ)] = {
215 			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
216 			[C(RESULT_MISS)]	= ARMV7_PERFCTR_DTLB_REFILL,
217 		},
218 		[C(OP_WRITE)] = {
219 			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
220 			[C(RESULT_MISS)]	= ARMV7_PERFCTR_DTLB_REFILL,
221 		},
222 		[C(OP_PREFETCH)] = {
223 			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
224 			[C(RESULT_MISS)]	= CACHE_OP_UNSUPPORTED,
225 		},
226 	},
227 	[C(ITLB)] = {
228 		[C(OP_READ)] = {
229 			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
230 			[C(RESULT_MISS)]	= ARMV7_PERFCTR_ITLB_REFILL,
231 		},
232 		[C(OP_WRITE)] = {
233 			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
234 			[C(RESULT_MISS)]	= ARMV7_PERFCTR_ITLB_REFILL,
235 		},
236 		[C(OP_PREFETCH)] = {
237 			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
238 			[C(RESULT_MISS)]	= CACHE_OP_UNSUPPORTED,
239 		},
240 	},
241 	[C(BPU)] = {
242 		[C(OP_READ)] = {
243 			[C(RESULT_ACCESS)]	= ARMV7_PERFCTR_PC_BRANCH_PRED,
244 			[C(RESULT_MISS)]	= ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
245 		},
246 		[C(OP_WRITE)] = {
247 			[C(RESULT_ACCESS)]	= ARMV7_PERFCTR_PC_BRANCH_PRED,
248 			[C(RESULT_MISS)]	= ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
249 		},
250 		[C(OP_PREFETCH)] = {
251 			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
252 			[C(RESULT_MISS)]	= CACHE_OP_UNSUPPORTED,
253 		},
254 	},
255 	[C(NODE)] = {
256 		[C(OP_READ)] = {
257 			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
258 			[C(RESULT_MISS)]	= CACHE_OP_UNSUPPORTED,
259 		},
260 		[C(OP_WRITE)] = {
261 			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
262 			[C(RESULT_MISS)]	= CACHE_OP_UNSUPPORTED,
263 		},
264 		[C(OP_PREFETCH)] = {
265 			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
266 			[C(RESULT_MISS)]	= CACHE_OP_UNSUPPORTED,
267 		},
268 	},
269 };
270 
271 /*
272  * Cortex-A9 HW events mapping
273  */
274 static const unsigned armv7_a9_perf_map[PERF_COUNT_HW_MAX] = {
275 	[PERF_COUNT_HW_CPU_CYCLES]		= ARMV7_PERFCTR_CPU_CYCLES,
276 	[PERF_COUNT_HW_INSTRUCTIONS]		= ARMV7_A9_PERFCTR_INSTR_CORE_RENAME,
277 	[PERF_COUNT_HW_CACHE_REFERENCES]	= ARMV7_PERFCTR_L1_DCACHE_ACCESS,
278 	[PERF_COUNT_HW_CACHE_MISSES]		= ARMV7_PERFCTR_L1_DCACHE_REFILL,
279 	[PERF_COUNT_HW_BRANCH_INSTRUCTIONS]	= ARMV7_PERFCTR_PC_WRITE,
280 	[PERF_COUNT_HW_BRANCH_MISSES]		= ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
281 	[PERF_COUNT_HW_BUS_CYCLES]		= HW_OP_UNSUPPORTED,
282 	[PERF_COUNT_HW_STALLED_CYCLES_FRONTEND]	= ARMV7_A9_PERFCTR_STALL_ICACHE,
283 	[PERF_COUNT_HW_STALLED_CYCLES_BACKEND]	= ARMV7_A9_PERFCTR_STALL_DISPATCH,
284 };
285 
286 static const unsigned armv7_a9_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
287 					  [PERF_COUNT_HW_CACHE_OP_MAX]
288 					  [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
289 	[C(L1D)] = {
290 		/*
291 		 * The performance counters don't differentiate between read
292 		 * and write accesses/misses so this isn't strictly correct,
293 		 * but it's the best we can do. Writes and reads get
294 		 * combined.
295 		 */
296 		[C(OP_READ)] = {
297 			[C(RESULT_ACCESS)]	= ARMV7_PERFCTR_L1_DCACHE_ACCESS,
298 			[C(RESULT_MISS)]	= ARMV7_PERFCTR_L1_DCACHE_REFILL,
299 		},
300 		[C(OP_WRITE)] = {
301 			[C(RESULT_ACCESS)]	= ARMV7_PERFCTR_L1_DCACHE_ACCESS,
302 			[C(RESULT_MISS)]	= ARMV7_PERFCTR_L1_DCACHE_REFILL,
303 		},
304 		[C(OP_PREFETCH)] = {
305 			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
306 			[C(RESULT_MISS)]	= CACHE_OP_UNSUPPORTED,
307 		},
308 	},
309 	[C(L1I)] = {
310 		[C(OP_READ)] = {
311 			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
312 			[C(RESULT_MISS)]	= ARMV7_PERFCTR_L1_ICACHE_REFILL,
313 		},
314 		[C(OP_WRITE)] = {
315 			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
316 			[C(RESULT_MISS)]	= CACHE_OP_UNSUPPORTED,
317 		},
318 		[C(OP_PREFETCH)] = {
319 			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
320 			[C(RESULT_MISS)]	= CACHE_OP_UNSUPPORTED,
321 		},
322 	},
323 	[C(LL)] = {
324 		[C(OP_READ)] = {
325 			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
326 			[C(RESULT_MISS)]	= CACHE_OP_UNSUPPORTED,
327 		},
328 		[C(OP_WRITE)] = {
329 			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
330 			[C(RESULT_MISS)]	= CACHE_OP_UNSUPPORTED,
331 		},
332 		[C(OP_PREFETCH)] = {
333 			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
334 			[C(RESULT_MISS)]	= CACHE_OP_UNSUPPORTED,
335 		},
336 	},
337 	[C(DTLB)] = {
338 		[C(OP_READ)] = {
339 			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
340 			[C(RESULT_MISS)]	= ARMV7_PERFCTR_DTLB_REFILL,
341 		},
342 		[C(OP_WRITE)] = {
343 			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
344 			[C(RESULT_MISS)]	= ARMV7_PERFCTR_DTLB_REFILL,
345 		},
346 		[C(OP_PREFETCH)] = {
347 			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
348 			[C(RESULT_MISS)]	= CACHE_OP_UNSUPPORTED,
349 		},
350 	},
351 	[C(ITLB)] = {
352 		[C(OP_READ)] = {
353 			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
354 			[C(RESULT_MISS)]	= ARMV7_PERFCTR_ITLB_REFILL,
355 		},
356 		[C(OP_WRITE)] = {
357 			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
358 			[C(RESULT_MISS)]	= ARMV7_PERFCTR_ITLB_REFILL,
359 		},
360 		[C(OP_PREFETCH)] = {
361 			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
362 			[C(RESULT_MISS)]	= CACHE_OP_UNSUPPORTED,
363 		},
364 	},
365 	[C(BPU)] = {
366 		[C(OP_READ)] = {
367 			[C(RESULT_ACCESS)]	= ARMV7_PERFCTR_PC_BRANCH_PRED,
368 			[C(RESULT_MISS)]	= ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
369 		},
370 		[C(OP_WRITE)] = {
371 			[C(RESULT_ACCESS)]	= ARMV7_PERFCTR_PC_BRANCH_PRED,
372 			[C(RESULT_MISS)]	= ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
373 		},
374 		[C(OP_PREFETCH)] = {
375 			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
376 			[C(RESULT_MISS)]	= CACHE_OP_UNSUPPORTED,
377 		},
378 	},
379 	[C(NODE)] = {
380 		[C(OP_READ)] = {
381 			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
382 			[C(RESULT_MISS)]	= CACHE_OP_UNSUPPORTED,
383 		},
384 		[C(OP_WRITE)] = {
385 			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
386 			[C(RESULT_MISS)]	= CACHE_OP_UNSUPPORTED,
387 		},
388 		[C(OP_PREFETCH)] = {
389 			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
390 			[C(RESULT_MISS)]	= CACHE_OP_UNSUPPORTED,
391 		},
392 	},
393 };
394 
395 /*
396  * Cortex-A5 HW events mapping
397  */
398 static const unsigned armv7_a5_perf_map[PERF_COUNT_HW_MAX] = {
399 	[PERF_COUNT_HW_CPU_CYCLES]		= ARMV7_PERFCTR_CPU_CYCLES,
400 	[PERF_COUNT_HW_INSTRUCTIONS]		= ARMV7_PERFCTR_INSTR_EXECUTED,
401 	[PERF_COUNT_HW_CACHE_REFERENCES]	= ARMV7_PERFCTR_L1_DCACHE_ACCESS,
402 	[PERF_COUNT_HW_CACHE_MISSES]		= ARMV7_PERFCTR_L1_DCACHE_REFILL,
403 	[PERF_COUNT_HW_BRANCH_INSTRUCTIONS]	= ARMV7_PERFCTR_PC_WRITE,
404 	[PERF_COUNT_HW_BRANCH_MISSES]		= ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
405 	[PERF_COUNT_HW_BUS_CYCLES]		= HW_OP_UNSUPPORTED,
406 	[PERF_COUNT_HW_STALLED_CYCLES_FRONTEND]	= HW_OP_UNSUPPORTED,
407 	[PERF_COUNT_HW_STALLED_CYCLES_BACKEND]	= HW_OP_UNSUPPORTED,
408 };
409 
410 static const unsigned armv7_a5_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
411 					[PERF_COUNT_HW_CACHE_OP_MAX]
412 					[PERF_COUNT_HW_CACHE_RESULT_MAX] = {
413 	[C(L1D)] = {
414 		[C(OP_READ)] = {
415 			[C(RESULT_ACCESS)]	= ARMV7_PERFCTR_L1_DCACHE_ACCESS,
416 			[C(RESULT_MISS)]	= ARMV7_PERFCTR_L1_DCACHE_REFILL,
417 		},
418 		[C(OP_WRITE)] = {
419 			[C(RESULT_ACCESS)]	= ARMV7_PERFCTR_L1_DCACHE_ACCESS,
420 			[C(RESULT_MISS)]	= ARMV7_PERFCTR_L1_DCACHE_REFILL,
421 		},
422 		[C(OP_PREFETCH)] = {
423 			[C(RESULT_ACCESS)]	= ARMV7_A5_PERFCTR_PREFETCH_LINEFILL,
424 			[C(RESULT_MISS)]	= ARMV7_A5_PERFCTR_PREFETCH_LINEFILL_DROP,
425 		},
426 	},
427 	[C(L1I)] = {
428 		[C(OP_READ)] = {
429 			[C(RESULT_ACCESS)]	= ARMV7_PERFCTR_L1_ICACHE_ACCESS,
430 			[C(RESULT_MISS)]	= ARMV7_PERFCTR_L1_ICACHE_REFILL,
431 		},
432 		[C(OP_WRITE)] = {
433 			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
434 			[C(RESULT_MISS)]	= CACHE_OP_UNSUPPORTED,
435 		},
436 		/*
437 		 * The prefetch counters don't differentiate between the I
438 		 * side and the D side.
439 		 */
440 		[C(OP_PREFETCH)] = {
441 			[C(RESULT_ACCESS)]	= ARMV7_A5_PERFCTR_PREFETCH_LINEFILL,
442 			[C(RESULT_MISS)]	= ARMV7_A5_PERFCTR_PREFETCH_LINEFILL_DROP,
443 		},
444 	},
445 	[C(LL)] = {
446 		[C(OP_READ)] = {
447 			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
448 			[C(RESULT_MISS)]	= CACHE_OP_UNSUPPORTED,
449 		},
450 		[C(OP_WRITE)] = {
451 			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
452 			[C(RESULT_MISS)]	= CACHE_OP_UNSUPPORTED,
453 		},
454 		[C(OP_PREFETCH)] = {
455 			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
456 			[C(RESULT_MISS)]	= CACHE_OP_UNSUPPORTED,
457 		},
458 	},
459 	[C(DTLB)] = {
460 		[C(OP_READ)] = {
461 			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
462 			[C(RESULT_MISS)]	= ARMV7_PERFCTR_DTLB_REFILL,
463 		},
464 		[C(OP_WRITE)] = {
465 			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
466 			[C(RESULT_MISS)]	= ARMV7_PERFCTR_DTLB_REFILL,
467 		},
468 		[C(OP_PREFETCH)] = {
469 			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
470 			[C(RESULT_MISS)]	= CACHE_OP_UNSUPPORTED,
471 		},
472 	},
473 	[C(ITLB)] = {
474 		[C(OP_READ)] = {
475 			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
476 			[C(RESULT_MISS)]	= ARMV7_PERFCTR_ITLB_REFILL,
477 		},
478 		[C(OP_WRITE)] = {
479 			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
480 			[C(RESULT_MISS)]	= ARMV7_PERFCTR_ITLB_REFILL,
481 		},
482 		[C(OP_PREFETCH)] = {
483 			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
484 			[C(RESULT_MISS)]	= CACHE_OP_UNSUPPORTED,
485 		},
486 	},
487 	[C(BPU)] = {
488 		[C(OP_READ)] = {
489 			[C(RESULT_ACCESS)]	= ARMV7_PERFCTR_PC_BRANCH_PRED,
490 			[C(RESULT_MISS)]	= ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
491 		},
492 		[C(OP_WRITE)] = {
493 			[C(RESULT_ACCESS)]	= ARMV7_PERFCTR_PC_BRANCH_PRED,
494 			[C(RESULT_MISS)]	= ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
495 		},
496 		[C(OP_PREFETCH)] = {
497 			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
498 			[C(RESULT_MISS)]	= CACHE_OP_UNSUPPORTED,
499 		},
500 	},
501 	[C(NODE)] = {
502 		[C(OP_READ)] = {
503 			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
504 			[C(RESULT_MISS)]	= CACHE_OP_UNSUPPORTED,
505 		},
506 		[C(OP_WRITE)] = {
507 			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
508 			[C(RESULT_MISS)]	= CACHE_OP_UNSUPPORTED,
509 		},
510 		[C(OP_PREFETCH)] = {
511 			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
512 			[C(RESULT_MISS)]	= CACHE_OP_UNSUPPORTED,
513 		},
514 	},
515 };
516 
517 /*
518  * Cortex-A15 HW events mapping
519  */
520 static const unsigned armv7_a15_perf_map[PERF_COUNT_HW_MAX] = {
521 	[PERF_COUNT_HW_CPU_CYCLES]		= ARMV7_PERFCTR_CPU_CYCLES,
522 	[PERF_COUNT_HW_INSTRUCTIONS]		= ARMV7_PERFCTR_INSTR_EXECUTED,
523 	[PERF_COUNT_HW_CACHE_REFERENCES]	= ARMV7_PERFCTR_L1_DCACHE_ACCESS,
524 	[PERF_COUNT_HW_CACHE_MISSES]		= ARMV7_PERFCTR_L1_DCACHE_REFILL,
525 	[PERF_COUNT_HW_BRANCH_INSTRUCTIONS]	= ARMV7_A15_PERFCTR_PC_WRITE_SPEC,
526 	[PERF_COUNT_HW_BRANCH_MISSES]		= ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
527 	[PERF_COUNT_HW_BUS_CYCLES]		= ARMV7_PERFCTR_BUS_CYCLES,
528 	[PERF_COUNT_HW_STALLED_CYCLES_FRONTEND]	= HW_OP_UNSUPPORTED,
529 	[PERF_COUNT_HW_STALLED_CYCLES_BACKEND]	= HW_OP_UNSUPPORTED,
530 };
531 
532 static const unsigned armv7_a15_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
533 					[PERF_COUNT_HW_CACHE_OP_MAX]
534 					[PERF_COUNT_HW_CACHE_RESULT_MAX] = {
535 	[C(L1D)] = {
536 		[C(OP_READ)] = {
537 			[C(RESULT_ACCESS)]	= ARMV7_A15_PERFCTR_L1_DCACHE_ACCESS_READ,
538 			[C(RESULT_MISS)]	= ARMV7_A15_PERFCTR_L1_DCACHE_REFILL_READ,
539 		},
540 		[C(OP_WRITE)] = {
541 			[C(RESULT_ACCESS)]	= ARMV7_A15_PERFCTR_L1_DCACHE_ACCESS_WRITE,
542 			[C(RESULT_MISS)]	= ARMV7_A15_PERFCTR_L1_DCACHE_REFILL_WRITE,
543 		},
544 		[C(OP_PREFETCH)] = {
545 			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
546 			[C(RESULT_MISS)]	= CACHE_OP_UNSUPPORTED,
547 		},
548 	},
549 	[C(L1I)] = {
550 		/*
551 		 * Not all performance counters differentiate between read
552 		 * and write accesses/misses so we're not always strictly
553 		 * correct, but it's the best we can do. Writes and reads get
554 		 * combined in these cases.
555 		 */
556 		[C(OP_READ)] = {
557 			[C(RESULT_ACCESS)]	= ARMV7_PERFCTR_L1_ICACHE_ACCESS,
558 			[C(RESULT_MISS)]	= ARMV7_PERFCTR_L1_ICACHE_REFILL,
559 		},
560 		[C(OP_WRITE)] = {
561 			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
562 			[C(RESULT_MISS)]	= CACHE_OP_UNSUPPORTED,
563 		},
564 		[C(OP_PREFETCH)] = {
565 			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
566 			[C(RESULT_MISS)]	= CACHE_OP_UNSUPPORTED,
567 		},
568 	},
569 	[C(LL)] = {
570 		[C(OP_READ)] = {
571 			[C(RESULT_ACCESS)]	= ARMV7_A15_PERFCTR_L2_CACHE_ACCESS_READ,
572 			[C(RESULT_MISS)]	= ARMV7_A15_PERFCTR_L2_CACHE_REFILL_READ,
573 		},
574 		[C(OP_WRITE)] = {
575 			[C(RESULT_ACCESS)]	= ARMV7_A15_PERFCTR_L2_CACHE_ACCESS_WRITE,
576 			[C(RESULT_MISS)]	= ARMV7_A15_PERFCTR_L2_CACHE_REFILL_WRITE,
577 		},
578 		[C(OP_PREFETCH)] = {
579 			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
580 			[C(RESULT_MISS)]	= CACHE_OP_UNSUPPORTED,
581 		},
582 	},
583 	[C(DTLB)] = {
584 		[C(OP_READ)] = {
585 			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
586 			[C(RESULT_MISS)]	= ARMV7_A15_PERFCTR_DTLB_REFILL_L1_READ,
587 		},
588 		[C(OP_WRITE)] = {
589 			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
590 			[C(RESULT_MISS)]	= ARMV7_A15_PERFCTR_DTLB_REFILL_L1_WRITE,
591 		},
592 		[C(OP_PREFETCH)] = {
593 			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
594 			[C(RESULT_MISS)]	= CACHE_OP_UNSUPPORTED,
595 		},
596 	},
597 	[C(ITLB)] = {
598 		[C(OP_READ)] = {
599 			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
600 			[C(RESULT_MISS)]	= ARMV7_PERFCTR_ITLB_REFILL,
601 		},
602 		[C(OP_WRITE)] = {
603 			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
604 			[C(RESULT_MISS)]	= ARMV7_PERFCTR_ITLB_REFILL,
605 		},
606 		[C(OP_PREFETCH)] = {
607 			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
608 			[C(RESULT_MISS)]	= CACHE_OP_UNSUPPORTED,
609 		},
610 	},
611 	[C(BPU)] = {
612 		[C(OP_READ)] = {
613 			[C(RESULT_ACCESS)]	= ARMV7_PERFCTR_PC_BRANCH_PRED,
614 			[C(RESULT_MISS)]	= ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
615 		},
616 		[C(OP_WRITE)] = {
617 			[C(RESULT_ACCESS)]	= ARMV7_PERFCTR_PC_BRANCH_PRED,
618 			[C(RESULT_MISS)]	= ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
619 		},
620 		[C(OP_PREFETCH)] = {
621 			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
622 			[C(RESULT_MISS)]	= CACHE_OP_UNSUPPORTED,
623 		},
624 	},
625 	[C(NODE)] = {
626 		[C(OP_READ)] = {
627 			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
628 			[C(RESULT_MISS)]	= CACHE_OP_UNSUPPORTED,
629 		},
630 		[C(OP_WRITE)] = {
631 			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
632 			[C(RESULT_MISS)]	= CACHE_OP_UNSUPPORTED,
633 		},
634 		[C(OP_PREFETCH)] = {
635 			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
636 			[C(RESULT_MISS)]	= CACHE_OP_UNSUPPORTED,
637 		},
638 	},
639 };
640 
641 /*
642  * Cortex-A7 HW events mapping
643  */
644 static const unsigned armv7_a7_perf_map[PERF_COUNT_HW_MAX] = {
645 	[PERF_COUNT_HW_CPU_CYCLES]		= ARMV7_PERFCTR_CPU_CYCLES,
646 	[PERF_COUNT_HW_INSTRUCTIONS]		= ARMV7_PERFCTR_INSTR_EXECUTED,
647 	[PERF_COUNT_HW_CACHE_REFERENCES]	= ARMV7_PERFCTR_L1_DCACHE_ACCESS,
648 	[PERF_COUNT_HW_CACHE_MISSES]		= ARMV7_PERFCTR_L1_DCACHE_REFILL,
649 	[PERF_COUNT_HW_BRANCH_INSTRUCTIONS]	= ARMV7_PERFCTR_PC_WRITE,
650 	[PERF_COUNT_HW_BRANCH_MISSES]		= ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
651 	[PERF_COUNT_HW_BUS_CYCLES]		= ARMV7_PERFCTR_BUS_CYCLES,
652 	[PERF_COUNT_HW_STALLED_CYCLES_FRONTEND]	= HW_OP_UNSUPPORTED,
653 	[PERF_COUNT_HW_STALLED_CYCLES_BACKEND]	= HW_OP_UNSUPPORTED,
654 };
655 
656 static const unsigned armv7_a7_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
657 					[PERF_COUNT_HW_CACHE_OP_MAX]
658 					[PERF_COUNT_HW_CACHE_RESULT_MAX] = {
659 	[C(L1D)] = {
660 		/*
661 		 * The performance counters don't differentiate between read
662 		 * and write accesses/misses so this isn't strictly correct,
663 		 * but it's the best we can do. Writes and reads get
664 		 * combined.
665 		 */
666 		[C(OP_READ)] = {
667 			[C(RESULT_ACCESS)]	= ARMV7_PERFCTR_L1_DCACHE_ACCESS,
668 			[C(RESULT_MISS)]	= ARMV7_PERFCTR_L1_DCACHE_REFILL,
669 		},
670 		[C(OP_WRITE)] = {
671 			[C(RESULT_ACCESS)]	= ARMV7_PERFCTR_L1_DCACHE_ACCESS,
672 			[C(RESULT_MISS)]	= ARMV7_PERFCTR_L1_DCACHE_REFILL,
673 		},
674 		[C(OP_PREFETCH)] = {
675 			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
676 			[C(RESULT_MISS)]	= CACHE_OP_UNSUPPORTED,
677 		},
678 	},
679 	[C(L1I)] = {
680 		[C(OP_READ)] = {
681 			[C(RESULT_ACCESS)]	= ARMV7_PERFCTR_L1_ICACHE_ACCESS,
682 			[C(RESULT_MISS)]	= ARMV7_PERFCTR_L1_ICACHE_REFILL,
683 		},
684 		[C(OP_WRITE)] = {
685 			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
686 			[C(RESULT_MISS)]	= CACHE_OP_UNSUPPORTED,
687 		},
688 		[C(OP_PREFETCH)] = {
689 			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
690 			[C(RESULT_MISS)]	= CACHE_OP_UNSUPPORTED,
691 		},
692 	},
693 	[C(LL)] = {
694 		[C(OP_READ)] = {
695 			[C(RESULT_ACCESS)]	= ARMV7_PERFCTR_L2_CACHE_ACCESS,
696 			[C(RESULT_MISS)]	= ARMV7_PERFCTR_L2_CACHE_REFILL,
697 		},
698 		[C(OP_WRITE)] = {
699 			[C(RESULT_ACCESS)]	= ARMV7_PERFCTR_L2_CACHE_ACCESS,
700 			[C(RESULT_MISS)]	= ARMV7_PERFCTR_L2_CACHE_REFILL,
701 		},
702 		[C(OP_PREFETCH)] = {
703 			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
704 			[C(RESULT_MISS)]	= CACHE_OP_UNSUPPORTED,
705 		},
706 	},
707 	[C(DTLB)] = {
708 		[C(OP_READ)] = {
709 			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
710 			[C(RESULT_MISS)]	= ARMV7_PERFCTR_DTLB_REFILL,
711 		},
712 		[C(OP_WRITE)] = {
713 			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
714 			[C(RESULT_MISS)]	= ARMV7_PERFCTR_DTLB_REFILL,
715 		},
716 		[C(OP_PREFETCH)] = {
717 			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
718 			[C(RESULT_MISS)]	= CACHE_OP_UNSUPPORTED,
719 		},
720 	},
721 	[C(ITLB)] = {
722 		[C(OP_READ)] = {
723 			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
724 			[C(RESULT_MISS)]	= ARMV7_PERFCTR_ITLB_REFILL,
725 		},
726 		[C(OP_WRITE)] = {
727 			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
728 			[C(RESULT_MISS)]	= ARMV7_PERFCTR_ITLB_REFILL,
729 		},
730 		[C(OP_PREFETCH)] = {
731 			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
732 			[C(RESULT_MISS)]	= CACHE_OP_UNSUPPORTED,
733 		},
734 	},
735 	[C(BPU)] = {
736 		[C(OP_READ)] = {
737 			[C(RESULT_ACCESS)]	= ARMV7_PERFCTR_PC_BRANCH_PRED,
738 			[C(RESULT_MISS)]	= ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
739 		},
740 		[C(OP_WRITE)] = {
741 			[C(RESULT_ACCESS)]	= ARMV7_PERFCTR_PC_BRANCH_PRED,
742 			[C(RESULT_MISS)]	= ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
743 		},
744 		[C(OP_PREFETCH)] = {
745 			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
746 			[C(RESULT_MISS)]	= CACHE_OP_UNSUPPORTED,
747 		},
748 	},
749 	[C(NODE)] = {
750 		[C(OP_READ)] = {
751 			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
752 			[C(RESULT_MISS)]	= CACHE_OP_UNSUPPORTED,
753 		},
754 		[C(OP_WRITE)] = {
755 			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
756 			[C(RESULT_MISS)]	= CACHE_OP_UNSUPPORTED,
757 		},
758 		[C(OP_PREFETCH)] = {
759 			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
760 			[C(RESULT_MISS)]	= CACHE_OP_UNSUPPORTED,
761 		},
762 	},
763 };
764 
765 /*
766  * Cortex-A12 HW events mapping
767  */
768 static const unsigned armv7_a12_perf_map[PERF_COUNT_HW_MAX] = {
769 	[PERF_COUNT_HW_CPU_CYCLES]		= ARMV7_PERFCTR_CPU_CYCLES,
770 	[PERF_COUNT_HW_INSTRUCTIONS]		= ARMV7_PERFCTR_INSTR_EXECUTED,
771 	[PERF_COUNT_HW_CACHE_REFERENCES]	= ARMV7_PERFCTR_L1_DCACHE_ACCESS,
772 	[PERF_COUNT_HW_CACHE_MISSES]		= ARMV7_PERFCTR_L1_DCACHE_REFILL,
773 	[PERF_COUNT_HW_BRANCH_INSTRUCTIONS]	= ARMV7_A12_PERFCTR_PC_WRITE_SPEC,
774 	[PERF_COUNT_HW_BRANCH_MISSES]		= ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
775 	[PERF_COUNT_HW_BUS_CYCLES]		= ARMV7_PERFCTR_BUS_CYCLES,
776 	[PERF_COUNT_HW_STALLED_CYCLES_FRONTEND]	= HW_OP_UNSUPPORTED,
777 	[PERF_COUNT_HW_STALLED_CYCLES_BACKEND]	= HW_OP_UNSUPPORTED,
778 };
779 
780 static const unsigned armv7_a12_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
781 					[PERF_COUNT_HW_CACHE_OP_MAX]
782 					[PERF_COUNT_HW_CACHE_RESULT_MAX] = {
783 	[C(L1D)] = {
784 		[C(OP_READ)] = {
785 			[C(RESULT_ACCESS)]	= ARMV7_A12_PERFCTR_L1_DCACHE_ACCESS_READ,
786 			[C(RESULT_MISS)]	= ARMV7_PERFCTR_L1_DCACHE_REFILL,
787 		},
788 		[C(OP_WRITE)] = {
789 			[C(RESULT_ACCESS)]	= ARMV7_A12_PERFCTR_L1_DCACHE_ACCESS_WRITE,
790 			[C(RESULT_MISS)]	= ARMV7_PERFCTR_L1_DCACHE_REFILL,
791 		},
792 		[C(OP_PREFETCH)] = {
793 			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
794 			[C(RESULT_MISS)]	= CACHE_OP_UNSUPPORTED,
795 		},
796 	},
797 	[C(L1I)] = {
798 		/*
799 		 * Not all performance counters differentiate between read
800 		 * and write accesses/misses so we're not always strictly
801 		 * correct, but it's the best we can do. Writes and reads get
802 		 * combined in these cases.
803 		 */
804 		[C(OP_READ)] = {
805 			[C(RESULT_ACCESS)]	= ARMV7_PERFCTR_L1_ICACHE_ACCESS,
806 			[C(RESULT_MISS)]	= ARMV7_PERFCTR_L1_ICACHE_REFILL,
807 		},
808 		[C(OP_WRITE)] = {
809 			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
810 			[C(RESULT_MISS)]	= CACHE_OP_UNSUPPORTED,
811 		},
812 		[C(OP_PREFETCH)] = {
813 			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
814 			[C(RESULT_MISS)]	= CACHE_OP_UNSUPPORTED,
815 		},
816 	},
817 	[C(LL)] = {
818 		[C(OP_READ)] = {
819 			[C(RESULT_ACCESS)]	= ARMV7_A12_PERFCTR_L2_CACHE_ACCESS_READ,
820 			[C(RESULT_MISS)]	= ARMV7_PERFCTR_L2_CACHE_REFILL,
821 		},
822 		[C(OP_WRITE)] = {
823 			[C(RESULT_ACCESS)]	= ARMV7_A12_PERFCTR_L2_CACHE_ACCESS_WRITE,
824 			[C(RESULT_MISS)]	= ARMV7_PERFCTR_L2_CACHE_REFILL,
825 		},
826 		[C(OP_PREFETCH)] = {
827 			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
828 			[C(RESULT_MISS)]	= CACHE_OP_UNSUPPORTED,
829 		},
830 	},
831 	[C(DTLB)] = {
832 		[C(OP_READ)] = {
833 			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
834 			[C(RESULT_MISS)]	= ARMV7_PERFCTR_DTLB_REFILL,
835 		},
836 		[C(OP_WRITE)] = {
837 			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
838 			[C(RESULT_MISS)]	= ARMV7_PERFCTR_DTLB_REFILL,
839 		},
840 		[C(OP_PREFETCH)] = {
841 			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
842 			[C(RESULT_MISS)]	= ARMV7_A12_PERFCTR_PF_TLB_REFILL,
843 		},
844 	},
845 	[C(ITLB)] = {
846 		[C(OP_READ)] = {
847 			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
848 			[C(RESULT_MISS)]	= ARMV7_PERFCTR_ITLB_REFILL,
849 		},
850 		[C(OP_WRITE)] = {
851 			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
852 			[C(RESULT_MISS)]	= ARMV7_PERFCTR_ITLB_REFILL,
853 		},
854 		[C(OP_PREFETCH)] = {
855 			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
856 			[C(RESULT_MISS)]	= CACHE_OP_UNSUPPORTED,
857 		},
858 	},
859 	[C(BPU)] = {
860 		[C(OP_READ)] = {
861 			[C(RESULT_ACCESS)]	= ARMV7_PERFCTR_PC_BRANCH_PRED,
862 			[C(RESULT_MISS)]	= ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
863 		},
864 		[C(OP_WRITE)] = {
865 			[C(RESULT_ACCESS)]	= ARMV7_PERFCTR_PC_BRANCH_PRED,
866 			[C(RESULT_MISS)]	= ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
867 		},
868 		[C(OP_PREFETCH)] = {
869 			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
870 			[C(RESULT_MISS)]	= CACHE_OP_UNSUPPORTED,
871 		},
872 	},
873 	[C(NODE)] = {
874 		[C(OP_READ)] = {
875 			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
876 			[C(RESULT_MISS)]	= CACHE_OP_UNSUPPORTED,
877 		},
878 		[C(OP_WRITE)] = {
879 			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
880 			[C(RESULT_MISS)]	= CACHE_OP_UNSUPPORTED,
881 		},
882 		[C(OP_PREFETCH)] = {
883 			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
884 			[C(RESULT_MISS)]	= CACHE_OP_UNSUPPORTED,
885 		},
886 	},
887 };
888 
889 /*
890  * Krait HW events mapping
891  */
892 static const unsigned krait_perf_map[PERF_COUNT_HW_MAX] = {
893 	[PERF_COUNT_HW_CPU_CYCLES]	    = ARMV7_PERFCTR_CPU_CYCLES,
894 	[PERF_COUNT_HW_INSTRUCTIONS]	    = ARMV7_PERFCTR_INSTR_EXECUTED,
895 	[PERF_COUNT_HW_CACHE_REFERENCES]    = HW_OP_UNSUPPORTED,
896 	[PERF_COUNT_HW_CACHE_MISSES]	    = HW_OP_UNSUPPORTED,
897 	[PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = ARMV7_PERFCTR_PC_WRITE,
898 	[PERF_COUNT_HW_BRANCH_MISSES]	    = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
899 	[PERF_COUNT_HW_BUS_CYCLES]	    = ARMV7_PERFCTR_CLOCK_CYCLES,
900 };
901 
902 static const unsigned krait_perf_map_no_branch[PERF_COUNT_HW_MAX] = {
903 	[PERF_COUNT_HW_CPU_CYCLES]	    = ARMV7_PERFCTR_CPU_CYCLES,
904 	[PERF_COUNT_HW_INSTRUCTIONS]	    = ARMV7_PERFCTR_INSTR_EXECUTED,
905 	[PERF_COUNT_HW_CACHE_REFERENCES]    = HW_OP_UNSUPPORTED,
906 	[PERF_COUNT_HW_CACHE_MISSES]	    = HW_OP_UNSUPPORTED,
907 	[PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = HW_OP_UNSUPPORTED,
908 	[PERF_COUNT_HW_BRANCH_MISSES]	    = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
909 	[PERF_COUNT_HW_BUS_CYCLES]	    = ARMV7_PERFCTR_CLOCK_CYCLES,
910 };
911 
912 static const unsigned krait_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
913 					  [PERF_COUNT_HW_CACHE_OP_MAX]
914 					  [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
915 	[C(L1D)] = {
916 		/*
917 		 * The performance counters don't differentiate between read
918 		 * and write accesses/misses so this isn't strictly correct,
919 		 * but it's the best we can do. Writes and reads get
920 		 * combined.
921 		 */
922 		[C(OP_READ)] = {
923 			[C(RESULT_ACCESS)]	= ARMV7_PERFCTR_L1_DCACHE_ACCESS,
924 			[C(RESULT_MISS)]	= ARMV7_PERFCTR_L1_DCACHE_REFILL,
925 		},
926 		[C(OP_WRITE)] = {
927 			[C(RESULT_ACCESS)]	= ARMV7_PERFCTR_L1_DCACHE_ACCESS,
928 			[C(RESULT_MISS)]	= ARMV7_PERFCTR_L1_DCACHE_REFILL,
929 		},
930 		[C(OP_PREFETCH)] = {
931 			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
932 			[C(RESULT_MISS)]	= CACHE_OP_UNSUPPORTED,
933 		},
934 	},
935 	[C(L1I)] = {
936 		[C(OP_READ)] = {
937 			[C(RESULT_ACCESS)]	= KRAIT_PERFCTR_L1_ICACHE_ACCESS,
938 			[C(RESULT_MISS)]	= KRAIT_PERFCTR_L1_ICACHE_MISS,
939 		},
940 		[C(OP_WRITE)] = {
941 			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
942 			[C(RESULT_MISS)]	= CACHE_OP_UNSUPPORTED,
943 		},
944 		[C(OP_PREFETCH)] = {
945 			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
946 			[C(RESULT_MISS)]	= CACHE_OP_UNSUPPORTED,
947 		},
948 	},
949 	[C(LL)] = {
950 		[C(OP_READ)] = {
951 			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
952 			[C(RESULT_MISS)]	= CACHE_OP_UNSUPPORTED,
953 		},
954 		[C(OP_WRITE)] = {
955 			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
956 			[C(RESULT_MISS)]	= CACHE_OP_UNSUPPORTED,
957 		},
958 		[C(OP_PREFETCH)] = {
959 			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
960 			[C(RESULT_MISS)]	= CACHE_OP_UNSUPPORTED,
961 		},
962 	},
963 	[C(DTLB)] = {
964 		[C(OP_READ)] = {
965 			[C(RESULT_ACCESS)]	= KRAIT_PERFCTR_L1_DTLB_ACCESS,
966 			[C(RESULT_MISS)]	= CACHE_OP_UNSUPPORTED,
967 		},
968 		[C(OP_WRITE)] = {
969 			[C(RESULT_ACCESS)]	= KRAIT_PERFCTR_L1_DTLB_ACCESS,
970 			[C(RESULT_MISS)]	= CACHE_OP_UNSUPPORTED,
971 		},
972 		[C(OP_PREFETCH)] = {
973 			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
974 			[C(RESULT_MISS)]	= CACHE_OP_UNSUPPORTED,
975 		},
976 	},
977 	[C(ITLB)] = {
978 		[C(OP_READ)] = {
979 			[C(RESULT_ACCESS)]	= KRAIT_PERFCTR_L1_ITLB_ACCESS,
980 			[C(RESULT_MISS)]	= CACHE_OP_UNSUPPORTED,
981 		},
982 		[C(OP_WRITE)] = {
983 			[C(RESULT_ACCESS)]	= KRAIT_PERFCTR_L1_ITLB_ACCESS,
984 			[C(RESULT_MISS)]	= CACHE_OP_UNSUPPORTED,
985 		},
986 		[C(OP_PREFETCH)] = {
987 			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
988 			[C(RESULT_MISS)]	= CACHE_OP_UNSUPPORTED,
989 		},
990 	},
991 	[C(BPU)] = {
992 		[C(OP_READ)] = {
993 			[C(RESULT_ACCESS)]	= ARMV7_PERFCTR_PC_BRANCH_PRED,
994 			[C(RESULT_MISS)]	= ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
995 		},
996 		[C(OP_WRITE)] = {
997 			[C(RESULT_ACCESS)]	= ARMV7_PERFCTR_PC_BRANCH_PRED,
998 			[C(RESULT_MISS)]	= ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
999 		},
1000 		[C(OP_PREFETCH)] = {
1001 			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
1002 			[C(RESULT_MISS)]	= CACHE_OP_UNSUPPORTED,
1003 		},
1004 	},
1005 	[C(NODE)] = {
1006 		[C(OP_READ)] = {
1007 			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
1008 			[C(RESULT_MISS)]	= CACHE_OP_UNSUPPORTED,
1009 		},
1010 		[C(OP_WRITE)] = {
1011 			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
1012 			[C(RESULT_MISS)]	= CACHE_OP_UNSUPPORTED,
1013 		},
1014 		[C(OP_PREFETCH)] = {
1015 			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
1016 			[C(RESULT_MISS)]	= CACHE_OP_UNSUPPORTED,
1017 		},
1018 	},
1019 };
1020 
1021 /*
1022  * Perf Events' indices
1023  */
1024 #define	ARMV7_IDX_CYCLE_COUNTER	0
1025 #define	ARMV7_IDX_COUNTER0	1
1026 #define	ARMV7_IDX_COUNTER_LAST(cpu_pmu) \
1027 	(ARMV7_IDX_CYCLE_COUNTER + cpu_pmu->num_events - 1)
1028 
1029 #define	ARMV7_MAX_COUNTERS	32
1030 #define	ARMV7_COUNTER_MASK	(ARMV7_MAX_COUNTERS - 1)
1031 
1032 /*
1033  * ARMv7 low level PMNC access
1034  */
1035 
1036 /*
1037  * Perf Event to low level counters mapping
1038  */
1039 #define	ARMV7_IDX_TO_COUNTER(x)	\
1040 	(((x) - ARMV7_IDX_COUNTER0) & ARMV7_COUNTER_MASK)
1041 
1042 /*
1043  * Per-CPU PMNC: config reg
1044  */
1045 #define ARMV7_PMNC_E		(1 << 0) /* Enable all counters */
1046 #define ARMV7_PMNC_P		(1 << 1) /* Reset all counters */
1047 #define ARMV7_PMNC_C		(1 << 2) /* Cycle counter reset */
1048 #define ARMV7_PMNC_D		(1 << 3) /* CCNT counts every 64th cpu cycle */
1049 #define ARMV7_PMNC_X		(1 << 4) /* Export to ETM */
1050 #define ARMV7_PMNC_DP		(1 << 5) /* Disable CCNT if non-invasive debug*/
1051 #define	ARMV7_PMNC_N_SHIFT	11	 /* Number of counters supported */
1052 #define	ARMV7_PMNC_N_MASK	0x1f
1053 #define	ARMV7_PMNC_MASK		0x3f	 /* Mask for writable bits */
1054 
1055 /*
1056  * FLAG: counters overflow flag status reg
1057  */
1058 #define	ARMV7_FLAG_MASK		0xffffffff	/* Mask for writable bits */
1059 #define	ARMV7_OVERFLOWED_MASK	ARMV7_FLAG_MASK
1060 
1061 /*
1062  * PMXEVTYPER: Event selection reg
1063  */
1064 #define	ARMV7_EVTYPE_MASK	0xc80000ff	/* Mask for writable bits */
1065 #define	ARMV7_EVTYPE_EVENT	0xff		/* Mask for EVENT bits */
1066 
1067 /*
1068  * Event filters for PMUv2
1069  */
1070 #define	ARMV7_EXCLUDE_PL1	(1 << 31)
1071 #define	ARMV7_EXCLUDE_USER	(1 << 30)
1072 #define	ARMV7_INCLUDE_HYP	(1 << 27)
1073 
1074 static inline u32 armv7_pmnc_read(void)
1075 {
1076 	u32 val;
1077 	asm volatile("mrc p15, 0, %0, c9, c12, 0" : "=r"(val));
1078 	return val;
1079 }
1080 
1081 static inline void armv7_pmnc_write(u32 val)
1082 {
1083 	val &= ARMV7_PMNC_MASK;
1084 	isb();
1085 	asm volatile("mcr p15, 0, %0, c9, c12, 0" : : "r"(val));
1086 }
1087 
1088 static inline int armv7_pmnc_has_overflowed(u32 pmnc)
1089 {
1090 	return pmnc & ARMV7_OVERFLOWED_MASK;
1091 }
1092 
1093 static inline int armv7_pmnc_counter_valid(struct arm_pmu *cpu_pmu, int idx)
1094 {
1095 	return idx >= ARMV7_IDX_CYCLE_COUNTER &&
1096 		idx <= ARMV7_IDX_COUNTER_LAST(cpu_pmu);
1097 }
1098 
1099 static inline int armv7_pmnc_counter_has_overflowed(u32 pmnc, int idx)
1100 {
1101 	return pmnc & BIT(ARMV7_IDX_TO_COUNTER(idx));
1102 }
1103 
1104 static inline int armv7_pmnc_select_counter(int idx)
1105 {
1106 	u32 counter = ARMV7_IDX_TO_COUNTER(idx);
1107 	asm volatile("mcr p15, 0, %0, c9, c12, 5" : : "r" (counter));
1108 	isb();
1109 
1110 	return idx;
1111 }
1112 
1113 static inline u32 armv7pmu_read_counter(struct perf_event *event)
1114 {
1115 	struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu);
1116 	struct hw_perf_event *hwc = &event->hw;
1117 	int idx = hwc->idx;
1118 	u32 value = 0;
1119 
1120 	if (!armv7_pmnc_counter_valid(cpu_pmu, idx))
1121 		pr_err("CPU%u reading wrong counter %d\n",
1122 			smp_processor_id(), idx);
1123 	else if (idx == ARMV7_IDX_CYCLE_COUNTER)
1124 		asm volatile("mrc p15, 0, %0, c9, c13, 0" : "=r" (value));
1125 	else if (armv7_pmnc_select_counter(idx) == idx)
1126 		asm volatile("mrc p15, 0, %0, c9, c13, 2" : "=r" (value));
1127 
1128 	return value;
1129 }
1130 
1131 static inline void armv7pmu_write_counter(struct perf_event *event, u32 value)
1132 {
1133 	struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu);
1134 	struct hw_perf_event *hwc = &event->hw;
1135 	int idx = hwc->idx;
1136 
1137 	if (!armv7_pmnc_counter_valid(cpu_pmu, idx))
1138 		pr_err("CPU%u writing wrong counter %d\n",
1139 			smp_processor_id(), idx);
1140 	else if (idx == ARMV7_IDX_CYCLE_COUNTER)
1141 		asm volatile("mcr p15, 0, %0, c9, c13, 0" : : "r" (value));
1142 	else if (armv7_pmnc_select_counter(idx) == idx)
1143 		asm volatile("mcr p15, 0, %0, c9, c13, 2" : : "r" (value));
1144 }
1145 
1146 static inline void armv7_pmnc_write_evtsel(int idx, u32 val)
1147 {
1148 	if (armv7_pmnc_select_counter(idx) == idx) {
1149 		val &= ARMV7_EVTYPE_MASK;
1150 		asm volatile("mcr p15, 0, %0, c9, c13, 1" : : "r" (val));
1151 	}
1152 }
1153 
1154 static inline int armv7_pmnc_enable_counter(int idx)
1155 {
1156 	u32 counter = ARMV7_IDX_TO_COUNTER(idx);
1157 	asm volatile("mcr p15, 0, %0, c9, c12, 1" : : "r" (BIT(counter)));
1158 	return idx;
1159 }
1160 
1161 static inline int armv7_pmnc_disable_counter(int idx)
1162 {
1163 	u32 counter = ARMV7_IDX_TO_COUNTER(idx);
1164 	asm volatile("mcr p15, 0, %0, c9, c12, 2" : : "r" (BIT(counter)));
1165 	return idx;
1166 }
1167 
1168 static inline int armv7_pmnc_enable_intens(int idx)
1169 {
1170 	u32 counter = ARMV7_IDX_TO_COUNTER(idx);
1171 	asm volatile("mcr p15, 0, %0, c9, c14, 1" : : "r" (BIT(counter)));
1172 	return idx;
1173 }
1174 
1175 static inline int armv7_pmnc_disable_intens(int idx)
1176 {
1177 	u32 counter = ARMV7_IDX_TO_COUNTER(idx);
1178 	asm volatile("mcr p15, 0, %0, c9, c14, 2" : : "r" (BIT(counter)));
1179 	isb();
1180 	/* Clear the overflow flag in case an interrupt is pending. */
1181 	asm volatile("mcr p15, 0, %0, c9, c12, 3" : : "r" (BIT(counter)));
1182 	isb();
1183 
1184 	return idx;
1185 }
1186 
1187 static inline u32 armv7_pmnc_getreset_flags(void)
1188 {
1189 	u32 val;
1190 
1191 	/* Read */
1192 	asm volatile("mrc p15, 0, %0, c9, c12, 3" : "=r" (val));
1193 
1194 	/* Write to clear flags */
1195 	val &= ARMV7_FLAG_MASK;
1196 	asm volatile("mcr p15, 0, %0, c9, c12, 3" : : "r" (val));
1197 
1198 	return val;
1199 }
1200 
1201 #ifdef DEBUG
1202 static void armv7_pmnc_dump_regs(struct arm_pmu *cpu_pmu)
1203 {
1204 	u32 val;
1205 	unsigned int cnt;
1206 
1207 	printk(KERN_INFO "PMNC registers dump:\n");
1208 
1209 	asm volatile("mrc p15, 0, %0, c9, c12, 0" : "=r" (val));
1210 	printk(KERN_INFO "PMNC  =0x%08x\n", val);
1211 
1212 	asm volatile("mrc p15, 0, %0, c9, c12, 1" : "=r" (val));
1213 	printk(KERN_INFO "CNTENS=0x%08x\n", val);
1214 
1215 	asm volatile("mrc p15, 0, %0, c9, c14, 1" : "=r" (val));
1216 	printk(KERN_INFO "INTENS=0x%08x\n", val);
1217 
1218 	asm volatile("mrc p15, 0, %0, c9, c12, 3" : "=r" (val));
1219 	printk(KERN_INFO "FLAGS =0x%08x\n", val);
1220 
1221 	asm volatile("mrc p15, 0, %0, c9, c12, 5" : "=r" (val));
1222 	printk(KERN_INFO "SELECT=0x%08x\n", val);
1223 
1224 	asm volatile("mrc p15, 0, %0, c9, c13, 0" : "=r" (val));
1225 	printk(KERN_INFO "CCNT  =0x%08x\n", val);
1226 
1227 	for (cnt = ARMV7_IDX_COUNTER0;
1228 			cnt <= ARMV7_IDX_COUNTER_LAST(cpu_pmu); cnt++) {
1229 		armv7_pmnc_select_counter(cnt);
1230 		asm volatile("mrc p15, 0, %0, c9, c13, 2" : "=r" (val));
1231 		printk(KERN_INFO "CNT[%d] count =0x%08x\n",
1232 			ARMV7_IDX_TO_COUNTER(cnt), val);
1233 		asm volatile("mrc p15, 0, %0, c9, c13, 1" : "=r" (val));
1234 		printk(KERN_INFO "CNT[%d] evtsel=0x%08x\n",
1235 			ARMV7_IDX_TO_COUNTER(cnt), val);
1236 	}
1237 }
1238 #endif
1239 
1240 static void armv7pmu_enable_event(struct perf_event *event)
1241 {
1242 	unsigned long flags;
1243 	struct hw_perf_event *hwc = &event->hw;
1244 	struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu);
1245 	struct pmu_hw_events *events = cpu_pmu->get_hw_events();
1246 	int idx = hwc->idx;
1247 
1248 	if (!armv7_pmnc_counter_valid(cpu_pmu, idx)) {
1249 		pr_err("CPU%u enabling wrong PMNC counter IRQ enable %d\n",
1250 			smp_processor_id(), idx);
1251 		return;
1252 	}
1253 
1254 	/*
1255 	 * Enable counter and interrupt, and set the counter to count
1256 	 * the event that we're interested in.
1257 	 */
1258 	raw_spin_lock_irqsave(&events->pmu_lock, flags);
1259 
1260 	/*
1261 	 * Disable counter
1262 	 */
1263 	armv7_pmnc_disable_counter(idx);
1264 
1265 	/*
1266 	 * Set event (if destined for PMNx counters)
1267 	 * We only need to set the event for the cycle counter if we
1268 	 * have the ability to perform event filtering.
1269 	 */
1270 	if (cpu_pmu->set_event_filter || idx != ARMV7_IDX_CYCLE_COUNTER)
1271 		armv7_pmnc_write_evtsel(idx, hwc->config_base);
1272 
1273 	/*
1274 	 * Enable interrupt for this counter
1275 	 */
1276 	armv7_pmnc_enable_intens(idx);
1277 
1278 	/*
1279 	 * Enable counter
1280 	 */
1281 	armv7_pmnc_enable_counter(idx);
1282 
1283 	raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
1284 }
1285 
1286 static void armv7pmu_disable_event(struct perf_event *event)
1287 {
1288 	unsigned long flags;
1289 	struct hw_perf_event *hwc = &event->hw;
1290 	struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu);
1291 	struct pmu_hw_events *events = cpu_pmu->get_hw_events();
1292 	int idx = hwc->idx;
1293 
1294 	if (!armv7_pmnc_counter_valid(cpu_pmu, idx)) {
1295 		pr_err("CPU%u disabling wrong PMNC counter IRQ enable %d\n",
1296 			smp_processor_id(), idx);
1297 		return;
1298 	}
1299 
1300 	/*
1301 	 * Disable counter and interrupt
1302 	 */
1303 	raw_spin_lock_irqsave(&events->pmu_lock, flags);
1304 
1305 	/*
1306 	 * Disable counter
1307 	 */
1308 	armv7_pmnc_disable_counter(idx);
1309 
1310 	/*
1311 	 * Disable interrupt for this counter
1312 	 */
1313 	armv7_pmnc_disable_intens(idx);
1314 
1315 	raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
1316 }
1317 
1318 static irqreturn_t armv7pmu_handle_irq(int irq_num, void *dev)
1319 {
1320 	u32 pmnc;
1321 	struct perf_sample_data data;
1322 	struct arm_pmu *cpu_pmu = (struct arm_pmu *)dev;
1323 	struct pmu_hw_events *cpuc = cpu_pmu->get_hw_events();
1324 	struct pt_regs *regs;
1325 	int idx;
1326 
1327 	/*
1328 	 * Get and reset the IRQ flags
1329 	 */
1330 	pmnc = armv7_pmnc_getreset_flags();
1331 
1332 	/*
1333 	 * Did an overflow occur?
1334 	 */
1335 	if (!armv7_pmnc_has_overflowed(pmnc))
1336 		return IRQ_NONE;
1337 
1338 	/*
1339 	 * Handle the counter(s) overflow(s)
1340 	 */
1341 	regs = get_irq_regs();
1342 
1343 	for (idx = 0; idx < cpu_pmu->num_events; ++idx) {
1344 		struct perf_event *event = cpuc->events[idx];
1345 		struct hw_perf_event *hwc;
1346 
1347 		/* Ignore if we don't have an event. */
1348 		if (!event)
1349 			continue;
1350 
1351 		/*
1352 		 * We have a single interrupt for all counters. Check that
1353 		 * each counter has overflowed before we process it.
1354 		 */
1355 		if (!armv7_pmnc_counter_has_overflowed(pmnc, idx))
1356 			continue;
1357 
1358 		hwc = &event->hw;
1359 		armpmu_event_update(event);
1360 		perf_sample_data_init(&data, 0, hwc->last_period);
1361 		if (!armpmu_event_set_period(event))
1362 			continue;
1363 
1364 		if (perf_event_overflow(event, &data, regs))
1365 			cpu_pmu->disable(event);
1366 	}
1367 
1368 	/*
1369 	 * Handle the pending perf events.
1370 	 *
1371 	 * Note: this call *must* be run with interrupts disabled. For
1372 	 * platforms that can have the PMU interrupts raised as an NMI, this
1373 	 * will not work.
1374 	 */
1375 	irq_work_run();
1376 
1377 	return IRQ_HANDLED;
1378 }
1379 
1380 static void armv7pmu_start(struct arm_pmu *cpu_pmu)
1381 {
1382 	unsigned long flags;
1383 	struct pmu_hw_events *events = cpu_pmu->get_hw_events();
1384 
1385 	raw_spin_lock_irqsave(&events->pmu_lock, flags);
1386 	/* Enable all counters */
1387 	armv7_pmnc_write(armv7_pmnc_read() | ARMV7_PMNC_E);
1388 	raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
1389 }
1390 
1391 static void armv7pmu_stop(struct arm_pmu *cpu_pmu)
1392 {
1393 	unsigned long flags;
1394 	struct pmu_hw_events *events = cpu_pmu->get_hw_events();
1395 
1396 	raw_spin_lock_irqsave(&events->pmu_lock, flags);
1397 	/* Disable all counters */
1398 	armv7_pmnc_write(armv7_pmnc_read() & ~ARMV7_PMNC_E);
1399 	raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
1400 }
1401 
1402 static int armv7pmu_get_event_idx(struct pmu_hw_events *cpuc,
1403 				  struct perf_event *event)
1404 {
1405 	int idx;
1406 	struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu);
1407 	struct hw_perf_event *hwc = &event->hw;
1408 	unsigned long evtype = hwc->config_base & ARMV7_EVTYPE_EVENT;
1409 
1410 	/* Always place a cycle counter into the cycle counter. */
1411 	if (evtype == ARMV7_PERFCTR_CPU_CYCLES) {
1412 		if (test_and_set_bit(ARMV7_IDX_CYCLE_COUNTER, cpuc->used_mask))
1413 			return -EAGAIN;
1414 
1415 		return ARMV7_IDX_CYCLE_COUNTER;
1416 	}
1417 
1418 	/*
1419 	 * For anything other than a cycle counter, try and use
1420 	 * the events counters
1421 	 */
1422 	for (idx = ARMV7_IDX_COUNTER0; idx < cpu_pmu->num_events; ++idx) {
1423 		if (!test_and_set_bit(idx, cpuc->used_mask))
1424 			return idx;
1425 	}
1426 
1427 	/* The counters are all in use. */
1428 	return -EAGAIN;
1429 }
1430 
1431 /*
1432  * Add an event filter to a given event. This will only work for PMUv2 PMUs.
1433  */
1434 static int armv7pmu_set_event_filter(struct hw_perf_event *event,
1435 				     struct perf_event_attr *attr)
1436 {
1437 	unsigned long config_base = 0;
1438 
1439 	if (attr->exclude_idle)
1440 		return -EPERM;
1441 	if (attr->exclude_user)
1442 		config_base |= ARMV7_EXCLUDE_USER;
1443 	if (attr->exclude_kernel)
1444 		config_base |= ARMV7_EXCLUDE_PL1;
1445 	if (!attr->exclude_hv)
1446 		config_base |= ARMV7_INCLUDE_HYP;
1447 
1448 	/*
1449 	 * Install the filter into config_base as this is used to
1450 	 * construct the event type.
1451 	 */
1452 	event->config_base = config_base;
1453 
1454 	return 0;
1455 }
1456 
1457 static void armv7pmu_reset(void *info)
1458 {
1459 	struct arm_pmu *cpu_pmu = (struct arm_pmu *)info;
1460 	u32 idx, nb_cnt = cpu_pmu->num_events;
1461 
1462 	/* The counter and interrupt enable registers are unknown at reset. */
1463 	for (idx = ARMV7_IDX_CYCLE_COUNTER; idx < nb_cnt; ++idx) {
1464 		armv7_pmnc_disable_counter(idx);
1465 		armv7_pmnc_disable_intens(idx);
1466 	}
1467 
1468 	/* Initialize & Reset PMNC: C and P bits */
1469 	armv7_pmnc_write(ARMV7_PMNC_P | ARMV7_PMNC_C);
1470 }
1471 
1472 static int armv7_a8_map_event(struct perf_event *event)
1473 {
1474 	return armpmu_map_event(event, &armv7_a8_perf_map,
1475 				&armv7_a8_perf_cache_map, 0xFF);
1476 }
1477 
1478 static int armv7_a9_map_event(struct perf_event *event)
1479 {
1480 	return armpmu_map_event(event, &armv7_a9_perf_map,
1481 				&armv7_a9_perf_cache_map, 0xFF);
1482 }
1483 
1484 static int armv7_a5_map_event(struct perf_event *event)
1485 {
1486 	return armpmu_map_event(event, &armv7_a5_perf_map,
1487 				&armv7_a5_perf_cache_map, 0xFF);
1488 }
1489 
1490 static int armv7_a15_map_event(struct perf_event *event)
1491 {
1492 	return armpmu_map_event(event, &armv7_a15_perf_map,
1493 				&armv7_a15_perf_cache_map, 0xFF);
1494 }
1495 
1496 static int armv7_a7_map_event(struct perf_event *event)
1497 {
1498 	return armpmu_map_event(event, &armv7_a7_perf_map,
1499 				&armv7_a7_perf_cache_map, 0xFF);
1500 }
1501 
1502 static int armv7_a12_map_event(struct perf_event *event)
1503 {
1504 	return armpmu_map_event(event, &armv7_a12_perf_map,
1505 				&armv7_a12_perf_cache_map, 0xFF);
1506 }
1507 
1508 static int krait_map_event(struct perf_event *event)
1509 {
1510 	return armpmu_map_event(event, &krait_perf_map,
1511 				&krait_perf_cache_map, 0xFFFFF);
1512 }
1513 
1514 static int krait_map_event_no_branch(struct perf_event *event)
1515 {
1516 	return armpmu_map_event(event, &krait_perf_map_no_branch,
1517 				&krait_perf_cache_map, 0xFFFFF);
1518 }
1519 
1520 static void armv7pmu_init(struct arm_pmu *cpu_pmu)
1521 {
1522 	cpu_pmu->handle_irq	= armv7pmu_handle_irq;
1523 	cpu_pmu->enable		= armv7pmu_enable_event;
1524 	cpu_pmu->disable	= armv7pmu_disable_event;
1525 	cpu_pmu->read_counter	= armv7pmu_read_counter;
1526 	cpu_pmu->write_counter	= armv7pmu_write_counter;
1527 	cpu_pmu->get_event_idx	= armv7pmu_get_event_idx;
1528 	cpu_pmu->start		= armv7pmu_start;
1529 	cpu_pmu->stop		= armv7pmu_stop;
1530 	cpu_pmu->reset		= armv7pmu_reset;
1531 	cpu_pmu->max_period	= (1LLU << 32) - 1;
1532 };
1533 
1534 static u32 armv7_read_num_pmnc_events(void)
1535 {
1536 	u32 nb_cnt;
1537 
1538 	/* Read the nb of CNTx counters supported from PMNC */
1539 	nb_cnt = (armv7_pmnc_read() >> ARMV7_PMNC_N_SHIFT) & ARMV7_PMNC_N_MASK;
1540 
1541 	/* Add the CPU cycles counter and return */
1542 	return nb_cnt + 1;
1543 }
1544 
1545 static int armv7_a8_pmu_init(struct arm_pmu *cpu_pmu)
1546 {
1547 	armv7pmu_init(cpu_pmu);
1548 	cpu_pmu->name		= "ARMv7 Cortex-A8";
1549 	cpu_pmu->map_event	= armv7_a8_map_event;
1550 	cpu_pmu->num_events	= armv7_read_num_pmnc_events();
1551 	return 0;
1552 }
1553 
1554 static int armv7_a9_pmu_init(struct arm_pmu *cpu_pmu)
1555 {
1556 	armv7pmu_init(cpu_pmu);
1557 	cpu_pmu->name		= "ARMv7 Cortex-A9";
1558 	cpu_pmu->map_event	= armv7_a9_map_event;
1559 	cpu_pmu->num_events	= armv7_read_num_pmnc_events();
1560 	return 0;
1561 }
1562 
1563 static int armv7_a5_pmu_init(struct arm_pmu *cpu_pmu)
1564 {
1565 	armv7pmu_init(cpu_pmu);
1566 	cpu_pmu->name		= "ARMv7 Cortex-A5";
1567 	cpu_pmu->map_event	= armv7_a5_map_event;
1568 	cpu_pmu->num_events	= armv7_read_num_pmnc_events();
1569 	return 0;
1570 }
1571 
1572 static int armv7_a15_pmu_init(struct arm_pmu *cpu_pmu)
1573 {
1574 	armv7pmu_init(cpu_pmu);
1575 	cpu_pmu->name		= "ARMv7 Cortex-A15";
1576 	cpu_pmu->map_event	= armv7_a15_map_event;
1577 	cpu_pmu->num_events	= armv7_read_num_pmnc_events();
1578 	cpu_pmu->set_event_filter = armv7pmu_set_event_filter;
1579 	return 0;
1580 }
1581 
1582 static int armv7_a7_pmu_init(struct arm_pmu *cpu_pmu)
1583 {
1584 	armv7pmu_init(cpu_pmu);
1585 	cpu_pmu->name		= "ARMv7 Cortex-A7";
1586 	cpu_pmu->map_event	= armv7_a7_map_event;
1587 	cpu_pmu->num_events	= armv7_read_num_pmnc_events();
1588 	cpu_pmu->set_event_filter = armv7pmu_set_event_filter;
1589 	return 0;
1590 }
1591 
1592 static int armv7_a12_pmu_init(struct arm_pmu *cpu_pmu)
1593 {
1594 	armv7pmu_init(cpu_pmu);
1595 	cpu_pmu->name		= "ARMv7 Cortex-A12";
1596 	cpu_pmu->map_event	= armv7_a12_map_event;
1597 	cpu_pmu->num_events	= armv7_read_num_pmnc_events();
1598 	cpu_pmu->set_event_filter = armv7pmu_set_event_filter;
1599 	return 0;
1600 }
1601 
1602 /*
1603  * Krait Performance Monitor Region Event Selection Register (PMRESRn)
1604  *
1605  *            31   30     24     16     8      0
1606  *            +--------------------------------+
1607  *  PMRESR0   | EN |  CC  |  CC  |  CC  |  CC  |   N = 1, R = 0
1608  *            +--------------------------------+
1609  *  PMRESR1   | EN |  CC  |  CC  |  CC  |  CC  |   N = 1, R = 1
1610  *            +--------------------------------+
1611  *  PMRESR2   | EN |  CC  |  CC  |  CC  |  CC  |   N = 1, R = 2
1612  *            +--------------------------------+
1613  *  VPMRESR0  | EN |  CC  |  CC  |  CC  |  CC  |   N = 2, R = ?
1614  *            +--------------------------------+
1615  *              EN | G=3  | G=2  | G=1  | G=0
1616  *
1617  *  Event Encoding:
1618  *
1619  *      hwc->config_base = 0xNRCCG
1620  *
1621  *      N  = prefix, 1 for Krait CPU (PMRESRn), 2 for Venum VFP (VPMRESR)
1622  *      R  = region register
1623  *      CC = class of events the group G is choosing from
1624  *      G  = group or particular event
1625  *
1626  *  Example: 0x12021 is a Krait CPU event in PMRESR2's group 1 with code 2
1627  *
1628  *  A region (R) corresponds to a piece of the CPU (execution unit, instruction
1629  *  unit, etc.) while the event code (CC) corresponds to a particular class of
1630  *  events (interrupts for example). An event code is broken down into
1631  *  groups (G) that can be mapped into the PMU (irq, fiqs, and irq+fiqs for
1632  *  example).
1633  */
1634 
1635 #define KRAIT_EVENT		(1 << 16)
1636 #define VENUM_EVENT		(2 << 16)
1637 #define KRAIT_EVENT_MASK	(KRAIT_EVENT | VENUM_EVENT)
1638 #define PMRESRn_EN		BIT(31)
1639 
1640 static u32 krait_read_pmresrn(int n)
1641 {
1642 	u32 val;
1643 
1644 	switch (n) {
1645 	case 0:
1646 		asm volatile("mrc p15, 1, %0, c9, c15, 0" : "=r" (val));
1647 		break;
1648 	case 1:
1649 		asm volatile("mrc p15, 1, %0, c9, c15, 1" : "=r" (val));
1650 		break;
1651 	case 2:
1652 		asm volatile("mrc p15, 1, %0, c9, c15, 2" : "=r" (val));
1653 		break;
1654 	default:
1655 		BUG(); /* Should be validated in krait_pmu_get_event_idx() */
1656 	}
1657 
1658 	return val;
1659 }
1660 
1661 static void krait_write_pmresrn(int n, u32 val)
1662 {
1663 	switch (n) {
1664 	case 0:
1665 		asm volatile("mcr p15, 1, %0, c9, c15, 0" : : "r" (val));
1666 		break;
1667 	case 1:
1668 		asm volatile("mcr p15, 1, %0, c9, c15, 1" : : "r" (val));
1669 		break;
1670 	case 2:
1671 		asm volatile("mcr p15, 1, %0, c9, c15, 2" : : "r" (val));
1672 		break;
1673 	default:
1674 		BUG(); /* Should be validated in krait_pmu_get_event_idx() */
1675 	}
1676 }
1677 
1678 static u32 krait_read_vpmresr0(void)
1679 {
1680 	u32 val;
1681 	asm volatile("mrc p10, 7, %0, c11, c0, 0" : "=r" (val));
1682 	return val;
1683 }
1684 
1685 static void krait_write_vpmresr0(u32 val)
1686 {
1687 	asm volatile("mcr p10, 7, %0, c11, c0, 0" : : "r" (val));
1688 }
1689 
1690 static void krait_pre_vpmresr0(u32 *venum_orig_val, u32 *fp_orig_val)
1691 {
1692 	u32 venum_new_val;
1693 	u32 fp_new_val;
1694 
1695 	BUG_ON(preemptible());
1696 	/* CPACR Enable CP10 and CP11 access */
1697 	*venum_orig_val = get_copro_access();
1698 	venum_new_val = *venum_orig_val | CPACC_SVC(10) | CPACC_SVC(11);
1699 	set_copro_access(venum_new_val);
1700 
1701 	/* Enable FPEXC */
1702 	*fp_orig_val = fmrx(FPEXC);
1703 	fp_new_val = *fp_orig_val | FPEXC_EN;
1704 	fmxr(FPEXC, fp_new_val);
1705 }
1706 
1707 static void krait_post_vpmresr0(u32 venum_orig_val, u32 fp_orig_val)
1708 {
1709 	BUG_ON(preemptible());
1710 	/* Restore FPEXC */
1711 	fmxr(FPEXC, fp_orig_val);
1712 	isb();
1713 	/* Restore CPACR */
1714 	set_copro_access(venum_orig_val);
1715 }
1716 
1717 static u32 krait_get_pmresrn_event(unsigned int region)
1718 {
1719 	static const u32 pmresrn_table[] = { KRAIT_PMRESR0_GROUP0,
1720 					     KRAIT_PMRESR1_GROUP0,
1721 					     KRAIT_PMRESR2_GROUP0 };
1722 	return pmresrn_table[region];
1723 }
1724 
1725 static void krait_evt_setup(int idx, u32 config_base)
1726 {
1727 	u32 val;
1728 	u32 mask;
1729 	u32 vval, fval;
1730 	unsigned int region;
1731 	unsigned int group;
1732 	unsigned int code;
1733 	unsigned int group_shift;
1734 	bool venum_event;
1735 
1736 	venum_event = !!(config_base & VENUM_EVENT);
1737 	region = (config_base >> 12) & 0xf;
1738 	code   = (config_base >> 4) & 0xff;
1739 	group  = (config_base >> 0)  & 0xf;
1740 
1741 	group_shift = group * 8;
1742 	mask = 0xff << group_shift;
1743 
1744 	/* Configure evtsel for the region and group */
1745 	if (venum_event)
1746 		val = KRAIT_VPMRESR0_GROUP0;
1747 	else
1748 		val = krait_get_pmresrn_event(region);
1749 	val += group;
1750 	/* Mix in mode-exclusion bits */
1751 	val |= config_base & (ARMV7_EXCLUDE_USER | ARMV7_EXCLUDE_PL1);
1752 	armv7_pmnc_write_evtsel(idx, val);
1753 
1754 	asm volatile("mcr p15, 0, %0, c9, c15, 0" : : "r" (0));
1755 
1756 	if (venum_event) {
1757 		krait_pre_vpmresr0(&vval, &fval);
1758 		val = krait_read_vpmresr0();
1759 		val &= ~mask;
1760 		val |= code << group_shift;
1761 		val |= PMRESRn_EN;
1762 		krait_write_vpmresr0(val);
1763 		krait_post_vpmresr0(vval, fval);
1764 	} else {
1765 		val = krait_read_pmresrn(region);
1766 		val &= ~mask;
1767 		val |= code << group_shift;
1768 		val |= PMRESRn_EN;
1769 		krait_write_pmresrn(region, val);
1770 	}
1771 }
1772 
1773 static u32 krait_clear_pmresrn_group(u32 val, int group)
1774 {
1775 	u32 mask;
1776 	int group_shift;
1777 
1778 	group_shift = group * 8;
1779 	mask = 0xff << group_shift;
1780 	val &= ~mask;
1781 
1782 	/* Don't clear enable bit if entire region isn't disabled */
1783 	if (val & ~PMRESRn_EN)
1784 		return val |= PMRESRn_EN;
1785 
1786 	return 0;
1787 }
1788 
1789 static void krait_clearpmu(u32 config_base)
1790 {
1791 	u32 val;
1792 	u32 vval, fval;
1793 	unsigned int region;
1794 	unsigned int group;
1795 	bool venum_event;
1796 
1797 	venum_event = !!(config_base & VENUM_EVENT);
1798 	region = (config_base >> 12) & 0xf;
1799 	group  = (config_base >> 0)  & 0xf;
1800 
1801 	if (venum_event) {
1802 		krait_pre_vpmresr0(&vval, &fval);
1803 		val = krait_read_vpmresr0();
1804 		val = krait_clear_pmresrn_group(val, group);
1805 		krait_write_vpmresr0(val);
1806 		krait_post_vpmresr0(vval, fval);
1807 	} else {
1808 		val = krait_read_pmresrn(region);
1809 		val = krait_clear_pmresrn_group(val, group);
1810 		krait_write_pmresrn(region, val);
1811 	}
1812 }
1813 
1814 static void krait_pmu_disable_event(struct perf_event *event)
1815 {
1816 	unsigned long flags;
1817 	struct hw_perf_event *hwc = &event->hw;
1818 	int idx = hwc->idx;
1819 	struct pmu_hw_events *events = cpu_pmu->get_hw_events();
1820 
1821 	/* Disable counter and interrupt */
1822 	raw_spin_lock_irqsave(&events->pmu_lock, flags);
1823 
1824 	/* Disable counter */
1825 	armv7_pmnc_disable_counter(idx);
1826 
1827 	/*
1828 	 * Clear pmresr code (if destined for PMNx counters)
1829 	 */
1830 	if (hwc->config_base & KRAIT_EVENT_MASK)
1831 		krait_clearpmu(hwc->config_base);
1832 
1833 	/* Disable interrupt for this counter */
1834 	armv7_pmnc_disable_intens(idx);
1835 
1836 	raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
1837 }
1838 
1839 static void krait_pmu_enable_event(struct perf_event *event)
1840 {
1841 	unsigned long flags;
1842 	struct hw_perf_event *hwc = &event->hw;
1843 	int idx = hwc->idx;
1844 	struct pmu_hw_events *events = cpu_pmu->get_hw_events();
1845 
1846 	/*
1847 	 * Enable counter and interrupt, and set the counter to count
1848 	 * the event that we're interested in.
1849 	 */
1850 	raw_spin_lock_irqsave(&events->pmu_lock, flags);
1851 
1852 	/* Disable counter */
1853 	armv7_pmnc_disable_counter(idx);
1854 
1855 	/*
1856 	 * Set event (if destined for PMNx counters)
1857 	 * We set the event for the cycle counter because we
1858 	 * have the ability to perform event filtering.
1859 	 */
1860 	if (hwc->config_base & KRAIT_EVENT_MASK)
1861 		krait_evt_setup(idx, hwc->config_base);
1862 	else
1863 		armv7_pmnc_write_evtsel(idx, hwc->config_base);
1864 
1865 	/* Enable interrupt for this counter */
1866 	armv7_pmnc_enable_intens(idx);
1867 
1868 	/* Enable counter */
1869 	armv7_pmnc_enable_counter(idx);
1870 
1871 	raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
1872 }
1873 
1874 static void krait_pmu_reset(void *info)
1875 {
1876 	u32 vval, fval;
1877 
1878 	armv7pmu_reset(info);
1879 
1880 	/* Clear all pmresrs */
1881 	krait_write_pmresrn(0, 0);
1882 	krait_write_pmresrn(1, 0);
1883 	krait_write_pmresrn(2, 0);
1884 
1885 	krait_pre_vpmresr0(&vval, &fval);
1886 	krait_write_vpmresr0(0);
1887 	krait_post_vpmresr0(vval, fval);
1888 }
1889 
1890 static int krait_event_to_bit(struct perf_event *event, unsigned int region,
1891 			      unsigned int group)
1892 {
1893 	int bit;
1894 	struct hw_perf_event *hwc = &event->hw;
1895 	struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu);
1896 
1897 	if (hwc->config_base & VENUM_EVENT)
1898 		bit = KRAIT_VPMRESR0_GROUP0;
1899 	else
1900 		bit = krait_get_pmresrn_event(region);
1901 	bit -= krait_get_pmresrn_event(0);
1902 	bit += group;
1903 	/*
1904 	 * Lower bits are reserved for use by the counters (see
1905 	 * armv7pmu_get_event_idx() for more info)
1906 	 */
1907 	bit += ARMV7_IDX_COUNTER_LAST(cpu_pmu) + 1;
1908 
1909 	return bit;
1910 }
1911 
1912 /*
1913  * We check for column exclusion constraints here.
1914  * Two events cant use the same group within a pmresr register.
1915  */
1916 static int krait_pmu_get_event_idx(struct pmu_hw_events *cpuc,
1917 				   struct perf_event *event)
1918 {
1919 	int idx;
1920 	int bit;
1921 	unsigned int prefix;
1922 	unsigned int region;
1923 	unsigned int code;
1924 	unsigned int group;
1925 	bool krait_event;
1926 	struct hw_perf_event *hwc = &event->hw;
1927 
1928 	region = (hwc->config_base >> 12) & 0xf;
1929 	code   = (hwc->config_base >> 4) & 0xff;
1930 	group  = (hwc->config_base >> 0) & 0xf;
1931 	krait_event = !!(hwc->config_base & KRAIT_EVENT_MASK);
1932 
1933 	if (krait_event) {
1934 		/* Ignore invalid events */
1935 		if (group > 3 || region > 2)
1936 			return -EINVAL;
1937 		prefix = hwc->config_base & KRAIT_EVENT_MASK;
1938 		if (prefix != KRAIT_EVENT && prefix != VENUM_EVENT)
1939 			return -EINVAL;
1940 		if (prefix == VENUM_EVENT && (code & 0xe0))
1941 			return -EINVAL;
1942 
1943 		bit = krait_event_to_bit(event, region, group);
1944 		if (test_and_set_bit(bit, cpuc->used_mask))
1945 			return -EAGAIN;
1946 	}
1947 
1948 	idx = armv7pmu_get_event_idx(cpuc, event);
1949 	if (idx < 0 && krait_event)
1950 		clear_bit(bit, cpuc->used_mask);
1951 
1952 	return idx;
1953 }
1954 
1955 static void krait_pmu_clear_event_idx(struct pmu_hw_events *cpuc,
1956 				      struct perf_event *event)
1957 {
1958 	int bit;
1959 	struct hw_perf_event *hwc = &event->hw;
1960 	unsigned int region;
1961 	unsigned int group;
1962 	bool krait_event;
1963 
1964 	region = (hwc->config_base >> 12) & 0xf;
1965 	group  = (hwc->config_base >> 0) & 0xf;
1966 	krait_event = !!(hwc->config_base & KRAIT_EVENT_MASK);
1967 
1968 	if (krait_event) {
1969 		bit = krait_event_to_bit(event, region, group);
1970 		clear_bit(bit, cpuc->used_mask);
1971 	}
1972 }
1973 
1974 static int krait_pmu_init(struct arm_pmu *cpu_pmu)
1975 {
1976 	armv7pmu_init(cpu_pmu);
1977 	cpu_pmu->name		= "ARMv7 Krait";
1978 	/* Some early versions of Krait don't support PC write events */
1979 	if (of_property_read_bool(cpu_pmu->plat_device->dev.of_node,
1980 				  "qcom,no-pc-write"))
1981 		cpu_pmu->map_event = krait_map_event_no_branch;
1982 	else
1983 		cpu_pmu->map_event = krait_map_event;
1984 	cpu_pmu->num_events	= armv7_read_num_pmnc_events();
1985 	cpu_pmu->set_event_filter = armv7pmu_set_event_filter;
1986 	cpu_pmu->reset		= krait_pmu_reset;
1987 	cpu_pmu->enable		= krait_pmu_enable_event;
1988 	cpu_pmu->disable	= krait_pmu_disable_event;
1989 	cpu_pmu->get_event_idx	= krait_pmu_get_event_idx;
1990 	cpu_pmu->clear_event_idx = krait_pmu_clear_event_idx;
1991 	return 0;
1992 }
1993 #else
1994 static inline int armv7_a8_pmu_init(struct arm_pmu *cpu_pmu)
1995 {
1996 	return -ENODEV;
1997 }
1998 
1999 static inline int armv7_a9_pmu_init(struct arm_pmu *cpu_pmu)
2000 {
2001 	return -ENODEV;
2002 }
2003 
2004 static inline int armv7_a5_pmu_init(struct arm_pmu *cpu_pmu)
2005 {
2006 	return -ENODEV;
2007 }
2008 
2009 static inline int armv7_a15_pmu_init(struct arm_pmu *cpu_pmu)
2010 {
2011 	return -ENODEV;
2012 }
2013 
2014 static inline int armv7_a7_pmu_init(struct arm_pmu *cpu_pmu)
2015 {
2016 	return -ENODEV;
2017 }
2018 
2019 static inline int armv7_a12_pmu_init(struct arm_pmu *cpu_pmu)
2020 {
2021 	return -ENODEV;
2022 }
2023 
2024 static inline int krait_pmu_init(struct arm_pmu *cpu_pmu)
2025 {
2026 	return -ENODEV;
2027 }
2028 #endif	/* CONFIG_CPU_V7 */
2029