xref: /openbmc/linux/arch/arm/kernel/perf_event_v7.c (revision a8fe58ce)
1 /*
2  * ARMv7 Cortex-A8 and Cortex-A9 Performance Events handling code.
3  *
4  * ARMv7 support: Jean Pihet <jpihet@mvista.com>
5  * 2010 (c) MontaVista Software, LLC.
6  *
7  * Copied from ARMv6 code, with the low level code inspired
8  *  by the ARMv7 Oprofile code.
9  *
10  * Cortex-A8 has up to 4 configurable performance counters and
11  *  a single cycle counter.
12  * Cortex-A9 has up to 31 configurable performance counters and
13  *  a single cycle counter.
14  *
15  * All counters can be enabled/disabled and IRQ masked separately. The cycle
16  *  counter and all 4 performance counters together can be reset separately.
17  */
18 
19 #ifdef CONFIG_CPU_V7
20 
21 #include <asm/cp15.h>
22 #include <asm/cputype.h>
23 #include <asm/irq_regs.h>
24 #include <asm/vfp.h>
25 #include "../vfp/vfpinstr.h"
26 
27 #include <linux/of.h>
28 #include <linux/perf/arm_pmu.h>
29 #include <linux/platform_device.h>
30 
31 /*
32  * Common ARMv7 event types
33  *
34  * Note: An implementation may not be able to count all of these events
35  * but the encodings are considered to be `reserved' in the case that
36  * they are not available.
37  */
38 #define ARMV7_PERFCTR_PMNC_SW_INCR			0x00
39 #define ARMV7_PERFCTR_L1_ICACHE_REFILL			0x01
40 #define ARMV7_PERFCTR_ITLB_REFILL			0x02
41 #define ARMV7_PERFCTR_L1_DCACHE_REFILL			0x03
42 #define ARMV7_PERFCTR_L1_DCACHE_ACCESS			0x04
43 #define ARMV7_PERFCTR_DTLB_REFILL			0x05
44 #define ARMV7_PERFCTR_MEM_READ				0x06
45 #define ARMV7_PERFCTR_MEM_WRITE				0x07
46 #define ARMV7_PERFCTR_INSTR_EXECUTED			0x08
47 #define ARMV7_PERFCTR_EXC_TAKEN				0x09
48 #define ARMV7_PERFCTR_EXC_EXECUTED			0x0A
49 #define ARMV7_PERFCTR_CID_WRITE				0x0B
50 
51 /*
52  * ARMV7_PERFCTR_PC_WRITE is equivalent to HW_BRANCH_INSTRUCTIONS.
53  * It counts:
54  *  - all (taken) branch instructions,
55  *  - instructions that explicitly write the PC,
56  *  - exception generating instructions.
57  */
58 #define ARMV7_PERFCTR_PC_WRITE				0x0C
59 #define ARMV7_PERFCTR_PC_IMM_BRANCH			0x0D
60 #define ARMV7_PERFCTR_PC_PROC_RETURN			0x0E
61 #define ARMV7_PERFCTR_MEM_UNALIGNED_ACCESS		0x0F
62 #define ARMV7_PERFCTR_PC_BRANCH_MIS_PRED		0x10
63 #define ARMV7_PERFCTR_CLOCK_CYCLES			0x11
64 #define ARMV7_PERFCTR_PC_BRANCH_PRED			0x12
65 
66 /* These events are defined by the PMUv2 supplement (ARM DDI 0457A). */
67 #define ARMV7_PERFCTR_MEM_ACCESS			0x13
68 #define ARMV7_PERFCTR_L1_ICACHE_ACCESS			0x14
69 #define ARMV7_PERFCTR_L1_DCACHE_WB			0x15
70 #define ARMV7_PERFCTR_L2_CACHE_ACCESS			0x16
71 #define ARMV7_PERFCTR_L2_CACHE_REFILL			0x17
72 #define ARMV7_PERFCTR_L2_CACHE_WB			0x18
73 #define ARMV7_PERFCTR_BUS_ACCESS			0x19
74 #define ARMV7_PERFCTR_MEM_ERROR				0x1A
75 #define ARMV7_PERFCTR_INSTR_SPEC			0x1B
76 #define ARMV7_PERFCTR_TTBR_WRITE			0x1C
77 #define ARMV7_PERFCTR_BUS_CYCLES			0x1D
78 
79 #define ARMV7_PERFCTR_CPU_CYCLES			0xFF
80 
81 /* ARMv7 Cortex-A8 specific event types */
82 #define ARMV7_A8_PERFCTR_L2_CACHE_ACCESS		0x43
83 #define ARMV7_A8_PERFCTR_L2_CACHE_REFILL		0x44
84 #define ARMV7_A8_PERFCTR_L1_ICACHE_ACCESS		0x50
85 #define ARMV7_A8_PERFCTR_STALL_ISIDE			0x56
86 
87 /* ARMv7 Cortex-A9 specific event types */
88 #define ARMV7_A9_PERFCTR_INSTR_CORE_RENAME		0x68
89 #define ARMV7_A9_PERFCTR_STALL_ICACHE			0x60
90 #define ARMV7_A9_PERFCTR_STALL_DISPATCH			0x66
91 
92 /* ARMv7 Cortex-A5 specific event types */
93 #define ARMV7_A5_PERFCTR_PREFETCH_LINEFILL		0xc2
94 #define ARMV7_A5_PERFCTR_PREFETCH_LINEFILL_DROP		0xc3
95 
96 /* ARMv7 Cortex-A15 specific event types */
97 #define ARMV7_A15_PERFCTR_L1_DCACHE_ACCESS_READ		0x40
98 #define ARMV7_A15_PERFCTR_L1_DCACHE_ACCESS_WRITE	0x41
99 #define ARMV7_A15_PERFCTR_L1_DCACHE_REFILL_READ		0x42
100 #define ARMV7_A15_PERFCTR_L1_DCACHE_REFILL_WRITE	0x43
101 
102 #define ARMV7_A15_PERFCTR_DTLB_REFILL_L1_READ		0x4C
103 #define ARMV7_A15_PERFCTR_DTLB_REFILL_L1_WRITE		0x4D
104 
105 #define ARMV7_A15_PERFCTR_L2_CACHE_ACCESS_READ		0x50
106 #define ARMV7_A15_PERFCTR_L2_CACHE_ACCESS_WRITE		0x51
107 #define ARMV7_A15_PERFCTR_L2_CACHE_REFILL_READ		0x52
108 #define ARMV7_A15_PERFCTR_L2_CACHE_REFILL_WRITE		0x53
109 
110 #define ARMV7_A15_PERFCTR_PC_WRITE_SPEC			0x76
111 
112 /* ARMv7 Cortex-A12 specific event types */
113 #define ARMV7_A12_PERFCTR_L1_DCACHE_ACCESS_READ		0x40
114 #define ARMV7_A12_PERFCTR_L1_DCACHE_ACCESS_WRITE	0x41
115 
116 #define ARMV7_A12_PERFCTR_L2_CACHE_ACCESS_READ		0x50
117 #define ARMV7_A12_PERFCTR_L2_CACHE_ACCESS_WRITE		0x51
118 
119 #define ARMV7_A12_PERFCTR_PC_WRITE_SPEC			0x76
120 
121 #define ARMV7_A12_PERFCTR_PF_TLB_REFILL			0xe7
122 
123 /* ARMv7 Krait specific event types */
124 #define KRAIT_PMRESR0_GROUP0				0xcc
125 #define KRAIT_PMRESR1_GROUP0				0xd0
126 #define KRAIT_PMRESR2_GROUP0				0xd4
127 #define KRAIT_VPMRESR0_GROUP0				0xd8
128 
129 #define KRAIT_PERFCTR_L1_ICACHE_ACCESS			0x10011
130 #define KRAIT_PERFCTR_L1_ICACHE_MISS			0x10010
131 
132 #define KRAIT_PERFCTR_L1_ITLB_ACCESS			0x12222
133 #define KRAIT_PERFCTR_L1_DTLB_ACCESS			0x12210
134 
135 /* ARMv7 Scorpion specific event types */
136 #define SCORPION_LPM0_GROUP0				0x4c
137 #define SCORPION_LPM1_GROUP0				0x50
138 #define SCORPION_LPM2_GROUP0				0x54
139 #define SCORPION_L2LPM_GROUP0				0x58
140 #define SCORPION_VLPM_GROUP0				0x5c
141 
142 #define SCORPION_ICACHE_ACCESS				0x10053
143 #define SCORPION_ICACHE_MISS				0x10052
144 
145 #define SCORPION_DTLB_ACCESS				0x12013
146 #define SCORPION_DTLB_MISS				0x12012
147 
148 #define SCORPION_ITLB_MISS				0x12021
149 
150 /*
151  * Cortex-A8 HW events mapping
152  *
153  * The hardware events that we support. We do support cache operations but
154  * we have harvard caches and no way to combine instruction and data
155  * accesses/misses in hardware.
156  */
157 static const unsigned armv7_a8_perf_map[PERF_COUNT_HW_MAX] = {
158 	PERF_MAP_ALL_UNSUPPORTED,
159 	[PERF_COUNT_HW_CPU_CYCLES]		= ARMV7_PERFCTR_CPU_CYCLES,
160 	[PERF_COUNT_HW_INSTRUCTIONS]		= ARMV7_PERFCTR_INSTR_EXECUTED,
161 	[PERF_COUNT_HW_CACHE_REFERENCES]	= ARMV7_PERFCTR_L1_DCACHE_ACCESS,
162 	[PERF_COUNT_HW_CACHE_MISSES]		= ARMV7_PERFCTR_L1_DCACHE_REFILL,
163 	[PERF_COUNT_HW_BRANCH_INSTRUCTIONS]	= ARMV7_PERFCTR_PC_WRITE,
164 	[PERF_COUNT_HW_BRANCH_MISSES]		= ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
165 	[PERF_COUNT_HW_STALLED_CYCLES_FRONTEND]	= ARMV7_A8_PERFCTR_STALL_ISIDE,
166 };
167 
168 static const unsigned armv7_a8_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
169 					  [PERF_COUNT_HW_CACHE_OP_MAX]
170 					  [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
171 	PERF_CACHE_MAP_ALL_UNSUPPORTED,
172 
173 	/*
174 	 * The performance counters don't differentiate between read and write
175 	 * accesses/misses so this isn't strictly correct, but it's the best we
176 	 * can do. Writes and reads get combined.
177 	 */
178 	[C(L1D)][C(OP_READ)][C(RESULT_ACCESS)]	= ARMV7_PERFCTR_L1_DCACHE_ACCESS,
179 	[C(L1D)][C(OP_READ)][C(RESULT_MISS)]	= ARMV7_PERFCTR_L1_DCACHE_REFILL,
180 	[C(L1D)][C(OP_WRITE)][C(RESULT_ACCESS)]	= ARMV7_PERFCTR_L1_DCACHE_ACCESS,
181 	[C(L1D)][C(OP_WRITE)][C(RESULT_MISS)]	= ARMV7_PERFCTR_L1_DCACHE_REFILL,
182 
183 	[C(L1I)][C(OP_READ)][C(RESULT_ACCESS)]	= ARMV7_A8_PERFCTR_L1_ICACHE_ACCESS,
184 	[C(L1I)][C(OP_READ)][C(RESULT_MISS)]	= ARMV7_PERFCTR_L1_ICACHE_REFILL,
185 
186 	[C(LL)][C(OP_READ)][C(RESULT_ACCESS)]	= ARMV7_A8_PERFCTR_L2_CACHE_ACCESS,
187 	[C(LL)][C(OP_READ)][C(RESULT_MISS)]	= ARMV7_A8_PERFCTR_L2_CACHE_REFILL,
188 	[C(LL)][C(OP_WRITE)][C(RESULT_ACCESS)]	= ARMV7_A8_PERFCTR_L2_CACHE_ACCESS,
189 	[C(LL)][C(OP_WRITE)][C(RESULT_MISS)]	= ARMV7_A8_PERFCTR_L2_CACHE_REFILL,
190 
191 	[C(DTLB)][C(OP_READ)][C(RESULT_MISS)]	= ARMV7_PERFCTR_DTLB_REFILL,
192 	[C(DTLB)][C(OP_WRITE)][C(RESULT_MISS)]	= ARMV7_PERFCTR_DTLB_REFILL,
193 
194 	[C(ITLB)][C(OP_READ)][C(RESULT_MISS)]	= ARMV7_PERFCTR_ITLB_REFILL,
195 	[C(ITLB)][C(OP_WRITE)][C(RESULT_MISS)]	= ARMV7_PERFCTR_ITLB_REFILL,
196 
197 	[C(BPU)][C(OP_READ)][C(RESULT_ACCESS)]	= ARMV7_PERFCTR_PC_BRANCH_PRED,
198 	[C(BPU)][C(OP_READ)][C(RESULT_MISS)]	= ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
199 	[C(BPU)][C(OP_WRITE)][C(RESULT_ACCESS)]	= ARMV7_PERFCTR_PC_BRANCH_PRED,
200 	[C(BPU)][C(OP_WRITE)][C(RESULT_MISS)]	= ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
201 };
202 
203 /*
204  * Cortex-A9 HW events mapping
205  */
206 static const unsigned armv7_a9_perf_map[PERF_COUNT_HW_MAX] = {
207 	PERF_MAP_ALL_UNSUPPORTED,
208 	[PERF_COUNT_HW_CPU_CYCLES]		= ARMV7_PERFCTR_CPU_CYCLES,
209 	[PERF_COUNT_HW_INSTRUCTIONS]		= ARMV7_A9_PERFCTR_INSTR_CORE_RENAME,
210 	[PERF_COUNT_HW_CACHE_REFERENCES]	= ARMV7_PERFCTR_L1_DCACHE_ACCESS,
211 	[PERF_COUNT_HW_CACHE_MISSES]		= ARMV7_PERFCTR_L1_DCACHE_REFILL,
212 	[PERF_COUNT_HW_BRANCH_INSTRUCTIONS]	= ARMV7_PERFCTR_PC_WRITE,
213 	[PERF_COUNT_HW_BRANCH_MISSES]		= ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
214 	[PERF_COUNT_HW_STALLED_CYCLES_FRONTEND]	= ARMV7_A9_PERFCTR_STALL_ICACHE,
215 	[PERF_COUNT_HW_STALLED_CYCLES_BACKEND]	= ARMV7_A9_PERFCTR_STALL_DISPATCH,
216 };
217 
218 static const unsigned armv7_a9_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
219 					  [PERF_COUNT_HW_CACHE_OP_MAX]
220 					  [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
221 	PERF_CACHE_MAP_ALL_UNSUPPORTED,
222 
223 	/*
224 	 * The performance counters don't differentiate between read and write
225 	 * accesses/misses so this isn't strictly correct, but it's the best we
226 	 * can do. Writes and reads get combined.
227 	 */
228 	[C(L1D)][C(OP_READ)][C(RESULT_ACCESS)]	= ARMV7_PERFCTR_L1_DCACHE_ACCESS,
229 	[C(L1D)][C(OP_READ)][C(RESULT_MISS)]	= ARMV7_PERFCTR_L1_DCACHE_REFILL,
230 	[C(L1D)][C(OP_WRITE)][C(RESULT_ACCESS)]	= ARMV7_PERFCTR_L1_DCACHE_ACCESS,
231 	[C(L1D)][C(OP_WRITE)][C(RESULT_MISS)]	= ARMV7_PERFCTR_L1_DCACHE_REFILL,
232 
233 	[C(L1I)][C(OP_READ)][C(RESULT_MISS)]	= ARMV7_PERFCTR_L1_ICACHE_REFILL,
234 
235 	[C(DTLB)][C(OP_READ)][C(RESULT_MISS)]	= ARMV7_PERFCTR_DTLB_REFILL,
236 	[C(DTLB)][C(OP_WRITE)][C(RESULT_MISS)]	= ARMV7_PERFCTR_DTLB_REFILL,
237 
238 	[C(ITLB)][C(OP_READ)][C(RESULT_MISS)]	= ARMV7_PERFCTR_ITLB_REFILL,
239 	[C(ITLB)][C(OP_WRITE)][C(RESULT_MISS)]	= ARMV7_PERFCTR_ITLB_REFILL,
240 
241 	[C(BPU)][C(OP_READ)][C(RESULT_ACCESS)]	= ARMV7_PERFCTR_PC_BRANCH_PRED,
242 	[C(BPU)][C(OP_READ)][C(RESULT_MISS)]	= ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
243 	[C(BPU)][C(OP_WRITE)][C(RESULT_ACCESS)]	= ARMV7_PERFCTR_PC_BRANCH_PRED,
244 	[C(BPU)][C(OP_WRITE)][C(RESULT_MISS)]	= ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
245 };
246 
247 /*
248  * Cortex-A5 HW events mapping
249  */
250 static const unsigned armv7_a5_perf_map[PERF_COUNT_HW_MAX] = {
251 	PERF_MAP_ALL_UNSUPPORTED,
252 	[PERF_COUNT_HW_CPU_CYCLES]		= ARMV7_PERFCTR_CPU_CYCLES,
253 	[PERF_COUNT_HW_INSTRUCTIONS]		= ARMV7_PERFCTR_INSTR_EXECUTED,
254 	[PERF_COUNT_HW_CACHE_REFERENCES]	= ARMV7_PERFCTR_L1_DCACHE_ACCESS,
255 	[PERF_COUNT_HW_CACHE_MISSES]		= ARMV7_PERFCTR_L1_DCACHE_REFILL,
256 	[PERF_COUNT_HW_BRANCH_INSTRUCTIONS]	= ARMV7_PERFCTR_PC_WRITE,
257 	[PERF_COUNT_HW_BRANCH_MISSES]		= ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
258 };
259 
260 static const unsigned armv7_a5_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
261 					[PERF_COUNT_HW_CACHE_OP_MAX]
262 					[PERF_COUNT_HW_CACHE_RESULT_MAX] = {
263 	PERF_CACHE_MAP_ALL_UNSUPPORTED,
264 
265 	[C(L1D)][C(OP_READ)][C(RESULT_ACCESS)]	= ARMV7_PERFCTR_L1_DCACHE_ACCESS,
266 	[C(L1D)][C(OP_READ)][C(RESULT_MISS)]	= ARMV7_PERFCTR_L1_DCACHE_REFILL,
267 	[C(L1D)][C(OP_WRITE)][C(RESULT_ACCESS)]	= ARMV7_PERFCTR_L1_DCACHE_ACCESS,
268 	[C(L1D)][C(OP_WRITE)][C(RESULT_MISS)]	= ARMV7_PERFCTR_L1_DCACHE_REFILL,
269 	[C(L1D)][C(OP_PREFETCH)][C(RESULT_ACCESS)]	= ARMV7_A5_PERFCTR_PREFETCH_LINEFILL,
270 	[C(L1D)][C(OP_PREFETCH)][C(RESULT_MISS)]	= ARMV7_A5_PERFCTR_PREFETCH_LINEFILL_DROP,
271 
272 	[C(L1I)][C(OP_READ)][C(RESULT_ACCESS)]	= ARMV7_PERFCTR_L1_ICACHE_ACCESS,
273 	[C(L1I)][C(OP_READ)][C(RESULT_MISS)]	= ARMV7_PERFCTR_L1_ICACHE_REFILL,
274 	/*
275 	 * The prefetch counters don't differentiate between the I side and the
276 	 * D side.
277 	 */
278 	[C(L1I)][C(OP_PREFETCH)][C(RESULT_ACCESS)]	= ARMV7_A5_PERFCTR_PREFETCH_LINEFILL,
279 	[C(L1I)][C(OP_PREFETCH)][C(RESULT_MISS)]	= ARMV7_A5_PERFCTR_PREFETCH_LINEFILL_DROP,
280 
281 	[C(DTLB)][C(OP_READ)][C(RESULT_MISS)]	= ARMV7_PERFCTR_DTLB_REFILL,
282 	[C(DTLB)][C(OP_WRITE)][C(RESULT_MISS)]	= ARMV7_PERFCTR_DTLB_REFILL,
283 
284 	[C(ITLB)][C(OP_READ)][C(RESULT_MISS)]	= ARMV7_PERFCTR_ITLB_REFILL,
285 	[C(ITLB)][C(OP_WRITE)][C(RESULT_MISS)]	= ARMV7_PERFCTR_ITLB_REFILL,
286 
287 	[C(BPU)][C(OP_READ)][C(RESULT_ACCESS)]	= ARMV7_PERFCTR_PC_BRANCH_PRED,
288 	[C(BPU)][C(OP_READ)][C(RESULT_MISS)]	= ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
289 	[C(BPU)][C(OP_WRITE)][C(RESULT_ACCESS)]	= ARMV7_PERFCTR_PC_BRANCH_PRED,
290 	[C(BPU)][C(OP_WRITE)][C(RESULT_MISS)]	= ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
291 };
292 
293 /*
294  * Cortex-A15 HW events mapping
295  */
296 static const unsigned armv7_a15_perf_map[PERF_COUNT_HW_MAX] = {
297 	PERF_MAP_ALL_UNSUPPORTED,
298 	[PERF_COUNT_HW_CPU_CYCLES]		= ARMV7_PERFCTR_CPU_CYCLES,
299 	[PERF_COUNT_HW_INSTRUCTIONS]		= ARMV7_PERFCTR_INSTR_EXECUTED,
300 	[PERF_COUNT_HW_CACHE_REFERENCES]	= ARMV7_PERFCTR_L1_DCACHE_ACCESS,
301 	[PERF_COUNT_HW_CACHE_MISSES]		= ARMV7_PERFCTR_L1_DCACHE_REFILL,
302 	[PERF_COUNT_HW_BRANCH_INSTRUCTIONS]	= ARMV7_A15_PERFCTR_PC_WRITE_SPEC,
303 	[PERF_COUNT_HW_BRANCH_MISSES]		= ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
304 	[PERF_COUNT_HW_BUS_CYCLES]		= ARMV7_PERFCTR_BUS_CYCLES,
305 };
306 
307 static const unsigned armv7_a15_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
308 					[PERF_COUNT_HW_CACHE_OP_MAX]
309 					[PERF_COUNT_HW_CACHE_RESULT_MAX] = {
310 	PERF_CACHE_MAP_ALL_UNSUPPORTED,
311 
312 	[C(L1D)][C(OP_READ)][C(RESULT_ACCESS)]	= ARMV7_A15_PERFCTR_L1_DCACHE_ACCESS_READ,
313 	[C(L1D)][C(OP_READ)][C(RESULT_MISS)]	= ARMV7_A15_PERFCTR_L1_DCACHE_REFILL_READ,
314 	[C(L1D)][C(OP_WRITE)][C(RESULT_ACCESS)]	= ARMV7_A15_PERFCTR_L1_DCACHE_ACCESS_WRITE,
315 	[C(L1D)][C(OP_WRITE)][C(RESULT_MISS)]	= ARMV7_A15_PERFCTR_L1_DCACHE_REFILL_WRITE,
316 
317 	/*
318 	 * Not all performance counters differentiate between read and write
319 	 * accesses/misses so we're not always strictly correct, but it's the
320 	 * best we can do. Writes and reads get combined in these cases.
321 	 */
322 	[C(L1I)][C(OP_READ)][C(RESULT_ACCESS)]	= ARMV7_PERFCTR_L1_ICACHE_ACCESS,
323 	[C(L1I)][C(OP_READ)][C(RESULT_MISS)]	= ARMV7_PERFCTR_L1_ICACHE_REFILL,
324 
325 	[C(LL)][C(OP_READ)][C(RESULT_ACCESS)]	= ARMV7_A15_PERFCTR_L2_CACHE_ACCESS_READ,
326 	[C(LL)][C(OP_READ)][C(RESULT_MISS)]	= ARMV7_A15_PERFCTR_L2_CACHE_REFILL_READ,
327 	[C(LL)][C(OP_WRITE)][C(RESULT_ACCESS)]	= ARMV7_A15_PERFCTR_L2_CACHE_ACCESS_WRITE,
328 	[C(LL)][C(OP_WRITE)][C(RESULT_MISS)]	= ARMV7_A15_PERFCTR_L2_CACHE_REFILL_WRITE,
329 
330 	[C(DTLB)][C(OP_READ)][C(RESULT_MISS)]	= ARMV7_A15_PERFCTR_DTLB_REFILL_L1_READ,
331 	[C(DTLB)][C(OP_WRITE)][C(RESULT_MISS)]	= ARMV7_A15_PERFCTR_DTLB_REFILL_L1_WRITE,
332 
333 	[C(ITLB)][C(OP_READ)][C(RESULT_MISS)]	= ARMV7_PERFCTR_ITLB_REFILL,
334 	[C(ITLB)][C(OP_WRITE)][C(RESULT_MISS)]	= ARMV7_PERFCTR_ITLB_REFILL,
335 
336 	[C(BPU)][C(OP_READ)][C(RESULT_ACCESS)]	= ARMV7_PERFCTR_PC_BRANCH_PRED,
337 	[C(BPU)][C(OP_READ)][C(RESULT_MISS)]	= ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
338 	[C(BPU)][C(OP_WRITE)][C(RESULT_ACCESS)]	= ARMV7_PERFCTR_PC_BRANCH_PRED,
339 	[C(BPU)][C(OP_WRITE)][C(RESULT_MISS)]	= ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
340 };
341 
342 /*
343  * Cortex-A7 HW events mapping
344  */
345 static const unsigned armv7_a7_perf_map[PERF_COUNT_HW_MAX] = {
346 	PERF_MAP_ALL_UNSUPPORTED,
347 	[PERF_COUNT_HW_CPU_CYCLES]		= ARMV7_PERFCTR_CPU_CYCLES,
348 	[PERF_COUNT_HW_INSTRUCTIONS]		= ARMV7_PERFCTR_INSTR_EXECUTED,
349 	[PERF_COUNT_HW_CACHE_REFERENCES]	= ARMV7_PERFCTR_L1_DCACHE_ACCESS,
350 	[PERF_COUNT_HW_CACHE_MISSES]		= ARMV7_PERFCTR_L1_DCACHE_REFILL,
351 	[PERF_COUNT_HW_BRANCH_INSTRUCTIONS]	= ARMV7_PERFCTR_PC_WRITE,
352 	[PERF_COUNT_HW_BRANCH_MISSES]		= ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
353 	[PERF_COUNT_HW_BUS_CYCLES]		= ARMV7_PERFCTR_BUS_CYCLES,
354 };
355 
356 static const unsigned armv7_a7_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
357 					[PERF_COUNT_HW_CACHE_OP_MAX]
358 					[PERF_COUNT_HW_CACHE_RESULT_MAX] = {
359 	PERF_CACHE_MAP_ALL_UNSUPPORTED,
360 
361 	/*
362 	 * The performance counters don't differentiate between read and write
363 	 * accesses/misses so this isn't strictly correct, but it's the best we
364 	 * can do. Writes and reads get combined.
365 	 */
366 	[C(L1D)][C(OP_READ)][C(RESULT_ACCESS)]	= ARMV7_PERFCTR_L1_DCACHE_ACCESS,
367 	[C(L1D)][C(OP_READ)][C(RESULT_MISS)]	= ARMV7_PERFCTR_L1_DCACHE_REFILL,
368 	[C(L1D)][C(OP_WRITE)][C(RESULT_ACCESS)]	= ARMV7_PERFCTR_L1_DCACHE_ACCESS,
369 	[C(L1D)][C(OP_WRITE)][C(RESULT_MISS)]	= ARMV7_PERFCTR_L1_DCACHE_REFILL,
370 
371 	[C(L1I)][C(OP_READ)][C(RESULT_ACCESS)]	= ARMV7_PERFCTR_L1_ICACHE_ACCESS,
372 	[C(L1I)][C(OP_READ)][C(RESULT_MISS)]	= ARMV7_PERFCTR_L1_ICACHE_REFILL,
373 
374 	[C(LL)][C(OP_READ)][C(RESULT_ACCESS)]	= ARMV7_PERFCTR_L2_CACHE_ACCESS,
375 	[C(LL)][C(OP_READ)][C(RESULT_MISS)]	= ARMV7_PERFCTR_L2_CACHE_REFILL,
376 	[C(LL)][C(OP_WRITE)][C(RESULT_ACCESS)]	= ARMV7_PERFCTR_L2_CACHE_ACCESS,
377 	[C(LL)][C(OP_WRITE)][C(RESULT_MISS)]	= ARMV7_PERFCTR_L2_CACHE_REFILL,
378 
379 	[C(DTLB)][C(OP_READ)][C(RESULT_MISS)]	= ARMV7_PERFCTR_DTLB_REFILL,
380 	[C(DTLB)][C(OP_WRITE)][C(RESULT_MISS)]	= ARMV7_PERFCTR_DTLB_REFILL,
381 
382 	[C(ITLB)][C(OP_READ)][C(RESULT_MISS)]	= ARMV7_PERFCTR_ITLB_REFILL,
383 	[C(ITLB)][C(OP_WRITE)][C(RESULT_MISS)]	= ARMV7_PERFCTR_ITLB_REFILL,
384 
385 	[C(BPU)][C(OP_READ)][C(RESULT_ACCESS)]	= ARMV7_PERFCTR_PC_BRANCH_PRED,
386 	[C(BPU)][C(OP_READ)][C(RESULT_MISS)]	= ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
387 	[C(BPU)][C(OP_WRITE)][C(RESULT_ACCESS)]	= ARMV7_PERFCTR_PC_BRANCH_PRED,
388 	[C(BPU)][C(OP_WRITE)][C(RESULT_MISS)]	= ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
389 };
390 
391 /*
392  * Cortex-A12 HW events mapping
393  */
394 static const unsigned armv7_a12_perf_map[PERF_COUNT_HW_MAX] = {
395 	PERF_MAP_ALL_UNSUPPORTED,
396 	[PERF_COUNT_HW_CPU_CYCLES]		= ARMV7_PERFCTR_CPU_CYCLES,
397 	[PERF_COUNT_HW_INSTRUCTIONS]		= ARMV7_PERFCTR_INSTR_EXECUTED,
398 	[PERF_COUNT_HW_CACHE_REFERENCES]	= ARMV7_PERFCTR_L1_DCACHE_ACCESS,
399 	[PERF_COUNT_HW_CACHE_MISSES]		= ARMV7_PERFCTR_L1_DCACHE_REFILL,
400 	[PERF_COUNT_HW_BRANCH_INSTRUCTIONS]	= ARMV7_A12_PERFCTR_PC_WRITE_SPEC,
401 	[PERF_COUNT_HW_BRANCH_MISSES]		= ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
402 	[PERF_COUNT_HW_BUS_CYCLES]		= ARMV7_PERFCTR_BUS_CYCLES,
403 };
404 
405 static const unsigned armv7_a12_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
406 					[PERF_COUNT_HW_CACHE_OP_MAX]
407 					[PERF_COUNT_HW_CACHE_RESULT_MAX] = {
408 	PERF_CACHE_MAP_ALL_UNSUPPORTED,
409 
410 	[C(L1D)][C(OP_READ)][C(RESULT_ACCESS)]	= ARMV7_A12_PERFCTR_L1_DCACHE_ACCESS_READ,
411 	[C(L1D)][C(OP_READ)][C(RESULT_MISS)]	= ARMV7_PERFCTR_L1_DCACHE_REFILL,
412 	[C(L1D)][C(OP_WRITE)][C(RESULT_ACCESS)]	= ARMV7_A12_PERFCTR_L1_DCACHE_ACCESS_WRITE,
413 	[C(L1D)][C(OP_WRITE)][C(RESULT_MISS)]	= ARMV7_PERFCTR_L1_DCACHE_REFILL,
414 
415 	/*
416 	 * Not all performance counters differentiate between read and write
417 	 * accesses/misses so we're not always strictly correct, but it's the
418 	 * best we can do. Writes and reads get combined in these cases.
419 	 */
420 	[C(L1I)][C(OP_READ)][C(RESULT_ACCESS)]	= ARMV7_PERFCTR_L1_ICACHE_ACCESS,
421 	[C(L1I)][C(OP_READ)][C(RESULT_MISS)]	= ARMV7_PERFCTR_L1_ICACHE_REFILL,
422 
423 	[C(LL)][C(OP_READ)][C(RESULT_ACCESS)]	= ARMV7_A12_PERFCTR_L2_CACHE_ACCESS_READ,
424 	[C(LL)][C(OP_READ)][C(RESULT_MISS)]	= ARMV7_PERFCTR_L2_CACHE_REFILL,
425 	[C(LL)][C(OP_WRITE)][C(RESULT_ACCESS)]	= ARMV7_A12_PERFCTR_L2_CACHE_ACCESS_WRITE,
426 	[C(LL)][C(OP_WRITE)][C(RESULT_MISS)]	= ARMV7_PERFCTR_L2_CACHE_REFILL,
427 
428 	[C(DTLB)][C(OP_READ)][C(RESULT_MISS)]	= ARMV7_PERFCTR_DTLB_REFILL,
429 	[C(DTLB)][C(OP_WRITE)][C(RESULT_MISS)]	= ARMV7_PERFCTR_DTLB_REFILL,
430 	[C(DTLB)][C(OP_PREFETCH)][C(RESULT_MISS)]	= ARMV7_A12_PERFCTR_PF_TLB_REFILL,
431 
432 	[C(ITLB)][C(OP_READ)][C(RESULT_MISS)]	= ARMV7_PERFCTR_ITLB_REFILL,
433 	[C(ITLB)][C(OP_WRITE)][C(RESULT_MISS)]	= ARMV7_PERFCTR_ITLB_REFILL,
434 
435 	[C(BPU)][C(OP_READ)][C(RESULT_ACCESS)]	= ARMV7_PERFCTR_PC_BRANCH_PRED,
436 	[C(BPU)][C(OP_READ)][C(RESULT_MISS)]	= ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
437 	[C(BPU)][C(OP_WRITE)][C(RESULT_ACCESS)]	= ARMV7_PERFCTR_PC_BRANCH_PRED,
438 	[C(BPU)][C(OP_WRITE)][C(RESULT_MISS)]	= ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
439 };
440 
441 /*
442  * Krait HW events mapping
443  */
444 static const unsigned krait_perf_map[PERF_COUNT_HW_MAX] = {
445 	PERF_MAP_ALL_UNSUPPORTED,
446 	[PERF_COUNT_HW_CPU_CYCLES]	    = ARMV7_PERFCTR_CPU_CYCLES,
447 	[PERF_COUNT_HW_INSTRUCTIONS]	    = ARMV7_PERFCTR_INSTR_EXECUTED,
448 	[PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = ARMV7_PERFCTR_PC_WRITE,
449 	[PERF_COUNT_HW_BRANCH_MISSES]	    = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
450 	[PERF_COUNT_HW_BUS_CYCLES]	    = ARMV7_PERFCTR_CLOCK_CYCLES,
451 };
452 
453 static const unsigned krait_perf_map_no_branch[PERF_COUNT_HW_MAX] = {
454 	PERF_MAP_ALL_UNSUPPORTED,
455 	[PERF_COUNT_HW_CPU_CYCLES]	    = ARMV7_PERFCTR_CPU_CYCLES,
456 	[PERF_COUNT_HW_INSTRUCTIONS]	    = ARMV7_PERFCTR_INSTR_EXECUTED,
457 	[PERF_COUNT_HW_BRANCH_MISSES]	    = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
458 	[PERF_COUNT_HW_BUS_CYCLES]	    = ARMV7_PERFCTR_CLOCK_CYCLES,
459 };
460 
461 static const unsigned krait_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
462 					  [PERF_COUNT_HW_CACHE_OP_MAX]
463 					  [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
464 	PERF_CACHE_MAP_ALL_UNSUPPORTED,
465 
466 	/*
467 	 * The performance counters don't differentiate between read and write
468 	 * accesses/misses so this isn't strictly correct, but it's the best we
469 	 * can do. Writes and reads get combined.
470 	 */
471 	[C(L1D)][C(OP_READ)][C(RESULT_ACCESS)]	= ARMV7_PERFCTR_L1_DCACHE_ACCESS,
472 	[C(L1D)][C(OP_READ)][C(RESULT_MISS)]	= ARMV7_PERFCTR_L1_DCACHE_REFILL,
473 	[C(L1D)][C(OP_WRITE)][C(RESULT_ACCESS)]	= ARMV7_PERFCTR_L1_DCACHE_ACCESS,
474 	[C(L1D)][C(OP_WRITE)][C(RESULT_MISS)]	= ARMV7_PERFCTR_L1_DCACHE_REFILL,
475 
476 	[C(L1I)][C(OP_READ)][C(RESULT_ACCESS)]	= KRAIT_PERFCTR_L1_ICACHE_ACCESS,
477 	[C(L1I)][C(OP_READ)][C(RESULT_MISS)]	= KRAIT_PERFCTR_L1_ICACHE_MISS,
478 
479 	[C(DTLB)][C(OP_READ)][C(RESULT_ACCESS)]	= KRAIT_PERFCTR_L1_DTLB_ACCESS,
480 	[C(DTLB)][C(OP_WRITE)][C(RESULT_ACCESS)]	= KRAIT_PERFCTR_L1_DTLB_ACCESS,
481 
482 	[C(ITLB)][C(OP_READ)][C(RESULT_ACCESS)]	= KRAIT_PERFCTR_L1_ITLB_ACCESS,
483 	[C(ITLB)][C(OP_WRITE)][C(RESULT_ACCESS)]	= KRAIT_PERFCTR_L1_ITLB_ACCESS,
484 
485 	[C(BPU)][C(OP_READ)][C(RESULT_ACCESS)]	= ARMV7_PERFCTR_PC_BRANCH_PRED,
486 	[C(BPU)][C(OP_READ)][C(RESULT_MISS)]	= ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
487 	[C(BPU)][C(OP_WRITE)][C(RESULT_ACCESS)]	= ARMV7_PERFCTR_PC_BRANCH_PRED,
488 	[C(BPU)][C(OP_WRITE)][C(RESULT_MISS)]	= ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
489 };
490 
491 /*
492  * Scorpion HW events mapping
493  */
494 static const unsigned scorpion_perf_map[PERF_COUNT_HW_MAX] = {
495 	PERF_MAP_ALL_UNSUPPORTED,
496 	[PERF_COUNT_HW_CPU_CYCLES]	    = ARMV7_PERFCTR_CPU_CYCLES,
497 	[PERF_COUNT_HW_INSTRUCTIONS]	    = ARMV7_PERFCTR_INSTR_EXECUTED,
498 	[PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = ARMV7_PERFCTR_PC_WRITE,
499 	[PERF_COUNT_HW_BRANCH_MISSES]	    = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
500 	[PERF_COUNT_HW_BUS_CYCLES]	    = ARMV7_PERFCTR_CLOCK_CYCLES,
501 };
502 
503 static const unsigned scorpion_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
504 					    [PERF_COUNT_HW_CACHE_OP_MAX]
505 					    [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
506 	PERF_CACHE_MAP_ALL_UNSUPPORTED,
507 	/*
508 	 * The performance counters don't differentiate between read and write
509 	 * accesses/misses so this isn't strictly correct, but it's the best we
510 	 * can do. Writes and reads get combined.
511 	 */
512 	[C(L1D)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV7_PERFCTR_L1_DCACHE_ACCESS,
513 	[C(L1D)][C(OP_READ)][C(RESULT_MISS)] = ARMV7_PERFCTR_L1_DCACHE_REFILL,
514 	[C(L1D)][C(OP_WRITE)][C(RESULT_ACCESS)] = ARMV7_PERFCTR_L1_DCACHE_ACCESS,
515 	[C(L1D)][C(OP_WRITE)][C(RESULT_MISS)] = ARMV7_PERFCTR_L1_DCACHE_REFILL,
516 	[C(L1I)][C(OP_READ)][C(RESULT_ACCESS)] = SCORPION_ICACHE_ACCESS,
517 	[C(L1I)][C(OP_READ)][C(RESULT_MISS)] = SCORPION_ICACHE_MISS,
518 	/*
519 	 * Only ITLB misses and DTLB refills are supported.  If users want the
520 	 * DTLB refills misses a raw counter must be used.
521 	 */
522 	[C(DTLB)][C(OP_READ)][C(RESULT_ACCESS)] = SCORPION_DTLB_ACCESS,
523 	[C(DTLB)][C(OP_READ)][C(RESULT_MISS)] = SCORPION_DTLB_MISS,
524 	[C(DTLB)][C(OP_WRITE)][C(RESULT_ACCESS)] = SCORPION_DTLB_ACCESS,
525 	[C(DTLB)][C(OP_WRITE)][C(RESULT_MISS)] = SCORPION_DTLB_MISS,
526 	[C(ITLB)][C(OP_READ)][C(RESULT_MISS)] = SCORPION_ITLB_MISS,
527 	[C(ITLB)][C(OP_WRITE)][C(RESULT_MISS)] = SCORPION_ITLB_MISS,
528 	[C(BPU)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV7_PERFCTR_PC_BRANCH_PRED,
529 	[C(BPU)][C(OP_READ)][C(RESULT_MISS)] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
530 	[C(BPU)][C(OP_WRITE)][C(RESULT_ACCESS)] = ARMV7_PERFCTR_PC_BRANCH_PRED,
531 	[C(BPU)][C(OP_WRITE)][C(RESULT_MISS)] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
532 };
533 
534 PMU_FORMAT_ATTR(event, "config:0-7");
535 
536 static struct attribute *armv7_pmu_format_attrs[] = {
537 	&format_attr_event.attr,
538 	NULL,
539 };
540 
541 static struct attribute_group armv7_pmu_format_attr_group = {
542 	.name = "format",
543 	.attrs = armv7_pmu_format_attrs,
544 };
545 
546 #define ARMV7_EVENT_ATTR_RESOLVE(m) #m
547 #define ARMV7_EVENT_ATTR(name, config) \
548 	PMU_EVENT_ATTR_STRING(name, armv7_event_attr_##name, \
549 			      "event=" ARMV7_EVENT_ATTR_RESOLVE(config))
550 
551 ARMV7_EVENT_ATTR(sw_incr, ARMV7_PERFCTR_PMNC_SW_INCR);
552 ARMV7_EVENT_ATTR(l1i_cache_refill, ARMV7_PERFCTR_L1_ICACHE_REFILL);
553 ARMV7_EVENT_ATTR(l1i_tlb_refill, ARMV7_PERFCTR_ITLB_REFILL);
554 ARMV7_EVENT_ATTR(l1d_cache_refill, ARMV7_PERFCTR_L1_DCACHE_REFILL);
555 ARMV7_EVENT_ATTR(l1d_cache, ARMV7_PERFCTR_L1_DCACHE_ACCESS);
556 ARMV7_EVENT_ATTR(l1d_tlb_refill, ARMV7_PERFCTR_DTLB_REFILL);
557 ARMV7_EVENT_ATTR(ld_retired, ARMV7_PERFCTR_MEM_READ);
558 ARMV7_EVENT_ATTR(st_retired, ARMV7_PERFCTR_MEM_WRITE);
559 ARMV7_EVENT_ATTR(inst_retired, ARMV7_PERFCTR_INSTR_EXECUTED);
560 ARMV7_EVENT_ATTR(exc_taken, ARMV7_PERFCTR_EXC_TAKEN);
561 ARMV7_EVENT_ATTR(exc_return, ARMV7_PERFCTR_EXC_EXECUTED);
562 ARMV7_EVENT_ATTR(cid_write_retired, ARMV7_PERFCTR_CID_WRITE);
563 ARMV7_EVENT_ATTR(pc_write_retired, ARMV7_PERFCTR_PC_WRITE);
564 ARMV7_EVENT_ATTR(br_immed_retired, ARMV7_PERFCTR_PC_IMM_BRANCH);
565 ARMV7_EVENT_ATTR(br_return_retired, ARMV7_PERFCTR_PC_PROC_RETURN);
566 ARMV7_EVENT_ATTR(unaligned_ldst_retired, ARMV7_PERFCTR_MEM_UNALIGNED_ACCESS);
567 ARMV7_EVENT_ATTR(br_mis_pred, ARMV7_PERFCTR_PC_BRANCH_MIS_PRED);
568 ARMV7_EVENT_ATTR(cpu_cycles, ARMV7_PERFCTR_CLOCK_CYCLES);
569 ARMV7_EVENT_ATTR(br_pred, ARMV7_PERFCTR_PC_BRANCH_PRED);
570 
571 static struct attribute *armv7_pmuv1_event_attrs[] = {
572 	&armv7_event_attr_sw_incr.attr.attr,
573 	&armv7_event_attr_l1i_cache_refill.attr.attr,
574 	&armv7_event_attr_l1i_tlb_refill.attr.attr,
575 	&armv7_event_attr_l1d_cache_refill.attr.attr,
576 	&armv7_event_attr_l1d_cache.attr.attr,
577 	&armv7_event_attr_l1d_tlb_refill.attr.attr,
578 	&armv7_event_attr_ld_retired.attr.attr,
579 	&armv7_event_attr_st_retired.attr.attr,
580 	&armv7_event_attr_inst_retired.attr.attr,
581 	&armv7_event_attr_exc_taken.attr.attr,
582 	&armv7_event_attr_exc_return.attr.attr,
583 	&armv7_event_attr_cid_write_retired.attr.attr,
584 	&armv7_event_attr_pc_write_retired.attr.attr,
585 	&armv7_event_attr_br_immed_retired.attr.attr,
586 	&armv7_event_attr_br_return_retired.attr.attr,
587 	&armv7_event_attr_unaligned_ldst_retired.attr.attr,
588 	&armv7_event_attr_br_mis_pred.attr.attr,
589 	&armv7_event_attr_cpu_cycles.attr.attr,
590 	&armv7_event_attr_br_pred.attr.attr,
591 	NULL,
592 };
593 
594 static struct attribute_group armv7_pmuv1_events_attr_group = {
595 	.name = "events",
596 	.attrs = armv7_pmuv1_event_attrs,
597 };
598 
599 static const struct attribute_group *armv7_pmuv1_attr_groups[] = {
600 	&armv7_pmuv1_events_attr_group,
601 	&armv7_pmu_format_attr_group,
602 	NULL,
603 };
604 
605 ARMV7_EVENT_ATTR(mem_access, ARMV7_PERFCTR_MEM_ACCESS);
606 ARMV7_EVENT_ATTR(l1i_cache, ARMV7_PERFCTR_L1_ICACHE_ACCESS);
607 ARMV7_EVENT_ATTR(l1d_cache_wb, ARMV7_PERFCTR_L1_DCACHE_WB);
608 ARMV7_EVENT_ATTR(l2d_cache, ARMV7_PERFCTR_L2_CACHE_ACCESS);
609 ARMV7_EVENT_ATTR(l2d_cache_refill, ARMV7_PERFCTR_L2_CACHE_REFILL);
610 ARMV7_EVENT_ATTR(l2d_cache_wb, ARMV7_PERFCTR_L2_CACHE_WB);
611 ARMV7_EVENT_ATTR(bus_access, ARMV7_PERFCTR_BUS_ACCESS);
612 ARMV7_EVENT_ATTR(memory_error, ARMV7_PERFCTR_MEM_ERROR);
613 ARMV7_EVENT_ATTR(inst_spec, ARMV7_PERFCTR_INSTR_SPEC);
614 ARMV7_EVENT_ATTR(ttbr_write_retired, ARMV7_PERFCTR_TTBR_WRITE);
615 ARMV7_EVENT_ATTR(bus_cycles, ARMV7_PERFCTR_BUS_CYCLES);
616 
617 static struct attribute *armv7_pmuv2_event_attrs[] = {
618 	&armv7_event_attr_sw_incr.attr.attr,
619 	&armv7_event_attr_l1i_cache_refill.attr.attr,
620 	&armv7_event_attr_l1i_tlb_refill.attr.attr,
621 	&armv7_event_attr_l1d_cache_refill.attr.attr,
622 	&armv7_event_attr_l1d_cache.attr.attr,
623 	&armv7_event_attr_l1d_tlb_refill.attr.attr,
624 	&armv7_event_attr_ld_retired.attr.attr,
625 	&armv7_event_attr_st_retired.attr.attr,
626 	&armv7_event_attr_inst_retired.attr.attr,
627 	&armv7_event_attr_exc_taken.attr.attr,
628 	&armv7_event_attr_exc_return.attr.attr,
629 	&armv7_event_attr_cid_write_retired.attr.attr,
630 	&armv7_event_attr_pc_write_retired.attr.attr,
631 	&armv7_event_attr_br_immed_retired.attr.attr,
632 	&armv7_event_attr_br_return_retired.attr.attr,
633 	&armv7_event_attr_unaligned_ldst_retired.attr.attr,
634 	&armv7_event_attr_br_mis_pred.attr.attr,
635 	&armv7_event_attr_cpu_cycles.attr.attr,
636 	&armv7_event_attr_br_pred.attr.attr,
637 	&armv7_event_attr_mem_access.attr.attr,
638 	&armv7_event_attr_l1i_cache.attr.attr,
639 	&armv7_event_attr_l1d_cache_wb.attr.attr,
640 	&armv7_event_attr_l2d_cache.attr.attr,
641 	&armv7_event_attr_l2d_cache_refill.attr.attr,
642 	&armv7_event_attr_l2d_cache_wb.attr.attr,
643 	&armv7_event_attr_bus_access.attr.attr,
644 	&armv7_event_attr_memory_error.attr.attr,
645 	&armv7_event_attr_inst_spec.attr.attr,
646 	&armv7_event_attr_ttbr_write_retired.attr.attr,
647 	&armv7_event_attr_bus_cycles.attr.attr,
648 	NULL,
649 };
650 
651 static struct attribute_group armv7_pmuv2_events_attr_group = {
652 	.name = "events",
653 	.attrs = armv7_pmuv2_event_attrs,
654 };
655 
656 static const struct attribute_group *armv7_pmuv2_attr_groups[] = {
657 	&armv7_pmuv2_events_attr_group,
658 	&armv7_pmu_format_attr_group,
659 	NULL,
660 };
661 
662 /*
663  * Perf Events' indices
664  */
665 #define	ARMV7_IDX_CYCLE_COUNTER	0
666 #define	ARMV7_IDX_COUNTER0	1
667 #define	ARMV7_IDX_COUNTER_LAST(cpu_pmu) \
668 	(ARMV7_IDX_CYCLE_COUNTER + cpu_pmu->num_events - 1)
669 
670 #define	ARMV7_MAX_COUNTERS	32
671 #define	ARMV7_COUNTER_MASK	(ARMV7_MAX_COUNTERS - 1)
672 
673 /*
674  * ARMv7 low level PMNC access
675  */
676 
677 /*
678  * Perf Event to low level counters mapping
679  */
680 #define	ARMV7_IDX_TO_COUNTER(x)	\
681 	(((x) - ARMV7_IDX_COUNTER0) & ARMV7_COUNTER_MASK)
682 
683 /*
684  * Per-CPU PMNC: config reg
685  */
686 #define ARMV7_PMNC_E		(1 << 0) /* Enable all counters */
687 #define ARMV7_PMNC_P		(1 << 1) /* Reset all counters */
688 #define ARMV7_PMNC_C		(1 << 2) /* Cycle counter reset */
689 #define ARMV7_PMNC_D		(1 << 3) /* CCNT counts every 64th cpu cycle */
690 #define ARMV7_PMNC_X		(1 << 4) /* Export to ETM */
691 #define ARMV7_PMNC_DP		(1 << 5) /* Disable CCNT if non-invasive debug*/
692 #define	ARMV7_PMNC_N_SHIFT	11	 /* Number of counters supported */
693 #define	ARMV7_PMNC_N_MASK	0x1f
694 #define	ARMV7_PMNC_MASK		0x3f	 /* Mask for writable bits */
695 
696 /*
697  * FLAG: counters overflow flag status reg
698  */
699 #define	ARMV7_FLAG_MASK		0xffffffff	/* Mask for writable bits */
700 #define	ARMV7_OVERFLOWED_MASK	ARMV7_FLAG_MASK
701 
702 /*
703  * PMXEVTYPER: Event selection reg
704  */
705 #define	ARMV7_EVTYPE_MASK	0xc80000ff	/* Mask for writable bits */
706 #define	ARMV7_EVTYPE_EVENT	0xff		/* Mask for EVENT bits */
707 
708 /*
709  * Event filters for PMUv2
710  */
711 #define	ARMV7_EXCLUDE_PL1	(1 << 31)
712 #define	ARMV7_EXCLUDE_USER	(1 << 30)
713 #define	ARMV7_INCLUDE_HYP	(1 << 27)
714 
715 static inline u32 armv7_pmnc_read(void)
716 {
717 	u32 val;
718 	asm volatile("mrc p15, 0, %0, c9, c12, 0" : "=r"(val));
719 	return val;
720 }
721 
722 static inline void armv7_pmnc_write(u32 val)
723 {
724 	val &= ARMV7_PMNC_MASK;
725 	isb();
726 	asm volatile("mcr p15, 0, %0, c9, c12, 0" : : "r"(val));
727 }
728 
729 static inline int armv7_pmnc_has_overflowed(u32 pmnc)
730 {
731 	return pmnc & ARMV7_OVERFLOWED_MASK;
732 }
733 
734 static inline int armv7_pmnc_counter_valid(struct arm_pmu *cpu_pmu, int idx)
735 {
736 	return idx >= ARMV7_IDX_CYCLE_COUNTER &&
737 		idx <= ARMV7_IDX_COUNTER_LAST(cpu_pmu);
738 }
739 
740 static inline int armv7_pmnc_counter_has_overflowed(u32 pmnc, int idx)
741 {
742 	return pmnc & BIT(ARMV7_IDX_TO_COUNTER(idx));
743 }
744 
745 static inline void armv7_pmnc_select_counter(int idx)
746 {
747 	u32 counter = ARMV7_IDX_TO_COUNTER(idx);
748 	asm volatile("mcr p15, 0, %0, c9, c12, 5" : : "r" (counter));
749 	isb();
750 }
751 
752 static inline u32 armv7pmu_read_counter(struct perf_event *event)
753 {
754 	struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu);
755 	struct hw_perf_event *hwc = &event->hw;
756 	int idx = hwc->idx;
757 	u32 value = 0;
758 
759 	if (!armv7_pmnc_counter_valid(cpu_pmu, idx)) {
760 		pr_err("CPU%u reading wrong counter %d\n",
761 			smp_processor_id(), idx);
762 	} else if (idx == ARMV7_IDX_CYCLE_COUNTER) {
763 		asm volatile("mrc p15, 0, %0, c9, c13, 0" : "=r" (value));
764 	} else {
765 		armv7_pmnc_select_counter(idx);
766 		asm volatile("mrc p15, 0, %0, c9, c13, 2" : "=r" (value));
767 	}
768 
769 	return value;
770 }
771 
772 static inline void armv7pmu_write_counter(struct perf_event *event, u32 value)
773 {
774 	struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu);
775 	struct hw_perf_event *hwc = &event->hw;
776 	int idx = hwc->idx;
777 
778 	if (!armv7_pmnc_counter_valid(cpu_pmu, idx)) {
779 		pr_err("CPU%u writing wrong counter %d\n",
780 			smp_processor_id(), idx);
781 	} else if (idx == ARMV7_IDX_CYCLE_COUNTER) {
782 		asm volatile("mcr p15, 0, %0, c9, c13, 0" : : "r" (value));
783 	} else {
784 		armv7_pmnc_select_counter(idx);
785 		asm volatile("mcr p15, 0, %0, c9, c13, 2" : : "r" (value));
786 	}
787 }
788 
789 static inline void armv7_pmnc_write_evtsel(int idx, u32 val)
790 {
791 	armv7_pmnc_select_counter(idx);
792 	val &= ARMV7_EVTYPE_MASK;
793 	asm volatile("mcr p15, 0, %0, c9, c13, 1" : : "r" (val));
794 }
795 
796 static inline void armv7_pmnc_enable_counter(int idx)
797 {
798 	u32 counter = ARMV7_IDX_TO_COUNTER(idx);
799 	asm volatile("mcr p15, 0, %0, c9, c12, 1" : : "r" (BIT(counter)));
800 }
801 
802 static inline void armv7_pmnc_disable_counter(int idx)
803 {
804 	u32 counter = ARMV7_IDX_TO_COUNTER(idx);
805 	asm volatile("mcr p15, 0, %0, c9, c12, 2" : : "r" (BIT(counter)));
806 }
807 
808 static inline void armv7_pmnc_enable_intens(int idx)
809 {
810 	u32 counter = ARMV7_IDX_TO_COUNTER(idx);
811 	asm volatile("mcr p15, 0, %0, c9, c14, 1" : : "r" (BIT(counter)));
812 }
813 
814 static inline void armv7_pmnc_disable_intens(int idx)
815 {
816 	u32 counter = ARMV7_IDX_TO_COUNTER(idx);
817 	asm volatile("mcr p15, 0, %0, c9, c14, 2" : : "r" (BIT(counter)));
818 	isb();
819 	/* Clear the overflow flag in case an interrupt is pending. */
820 	asm volatile("mcr p15, 0, %0, c9, c12, 3" : : "r" (BIT(counter)));
821 	isb();
822 }
823 
824 static inline u32 armv7_pmnc_getreset_flags(void)
825 {
826 	u32 val;
827 
828 	/* Read */
829 	asm volatile("mrc p15, 0, %0, c9, c12, 3" : "=r" (val));
830 
831 	/* Write to clear flags */
832 	val &= ARMV7_FLAG_MASK;
833 	asm volatile("mcr p15, 0, %0, c9, c12, 3" : : "r" (val));
834 
835 	return val;
836 }
837 
838 #ifdef DEBUG
839 static void armv7_pmnc_dump_regs(struct arm_pmu *cpu_pmu)
840 {
841 	u32 val;
842 	unsigned int cnt;
843 
844 	pr_info("PMNC registers dump:\n");
845 
846 	asm volatile("mrc p15, 0, %0, c9, c12, 0" : "=r" (val));
847 	pr_info("PMNC  =0x%08x\n", val);
848 
849 	asm volatile("mrc p15, 0, %0, c9, c12, 1" : "=r" (val));
850 	pr_info("CNTENS=0x%08x\n", val);
851 
852 	asm volatile("mrc p15, 0, %0, c9, c14, 1" : "=r" (val));
853 	pr_info("INTENS=0x%08x\n", val);
854 
855 	asm volatile("mrc p15, 0, %0, c9, c12, 3" : "=r" (val));
856 	pr_info("FLAGS =0x%08x\n", val);
857 
858 	asm volatile("mrc p15, 0, %0, c9, c12, 5" : "=r" (val));
859 	pr_info("SELECT=0x%08x\n", val);
860 
861 	asm volatile("mrc p15, 0, %0, c9, c13, 0" : "=r" (val));
862 	pr_info("CCNT  =0x%08x\n", val);
863 
864 	for (cnt = ARMV7_IDX_COUNTER0;
865 			cnt <= ARMV7_IDX_COUNTER_LAST(cpu_pmu); cnt++) {
866 		armv7_pmnc_select_counter(cnt);
867 		asm volatile("mrc p15, 0, %0, c9, c13, 2" : "=r" (val));
868 		pr_info("CNT[%d] count =0x%08x\n",
869 			ARMV7_IDX_TO_COUNTER(cnt), val);
870 		asm volatile("mrc p15, 0, %0, c9, c13, 1" : "=r" (val));
871 		pr_info("CNT[%d] evtsel=0x%08x\n",
872 			ARMV7_IDX_TO_COUNTER(cnt), val);
873 	}
874 }
875 #endif
876 
877 static void armv7pmu_enable_event(struct perf_event *event)
878 {
879 	unsigned long flags;
880 	struct hw_perf_event *hwc = &event->hw;
881 	struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu);
882 	struct pmu_hw_events *events = this_cpu_ptr(cpu_pmu->hw_events);
883 	int idx = hwc->idx;
884 
885 	if (!armv7_pmnc_counter_valid(cpu_pmu, idx)) {
886 		pr_err("CPU%u enabling wrong PMNC counter IRQ enable %d\n",
887 			smp_processor_id(), idx);
888 		return;
889 	}
890 
891 	/*
892 	 * Enable counter and interrupt, and set the counter to count
893 	 * the event that we're interested in.
894 	 */
895 	raw_spin_lock_irqsave(&events->pmu_lock, flags);
896 
897 	/*
898 	 * Disable counter
899 	 */
900 	armv7_pmnc_disable_counter(idx);
901 
902 	/*
903 	 * Set event (if destined for PMNx counters)
904 	 * We only need to set the event for the cycle counter if we
905 	 * have the ability to perform event filtering.
906 	 */
907 	if (cpu_pmu->set_event_filter || idx != ARMV7_IDX_CYCLE_COUNTER)
908 		armv7_pmnc_write_evtsel(idx, hwc->config_base);
909 
910 	/*
911 	 * Enable interrupt for this counter
912 	 */
913 	armv7_pmnc_enable_intens(idx);
914 
915 	/*
916 	 * Enable counter
917 	 */
918 	armv7_pmnc_enable_counter(idx);
919 
920 	raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
921 }
922 
923 static void armv7pmu_disable_event(struct perf_event *event)
924 {
925 	unsigned long flags;
926 	struct hw_perf_event *hwc = &event->hw;
927 	struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu);
928 	struct pmu_hw_events *events = this_cpu_ptr(cpu_pmu->hw_events);
929 	int idx = hwc->idx;
930 
931 	if (!armv7_pmnc_counter_valid(cpu_pmu, idx)) {
932 		pr_err("CPU%u disabling wrong PMNC counter IRQ enable %d\n",
933 			smp_processor_id(), idx);
934 		return;
935 	}
936 
937 	/*
938 	 * Disable counter and interrupt
939 	 */
940 	raw_spin_lock_irqsave(&events->pmu_lock, flags);
941 
942 	/*
943 	 * Disable counter
944 	 */
945 	armv7_pmnc_disable_counter(idx);
946 
947 	/*
948 	 * Disable interrupt for this counter
949 	 */
950 	armv7_pmnc_disable_intens(idx);
951 
952 	raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
953 }
954 
955 static irqreturn_t armv7pmu_handle_irq(int irq_num, void *dev)
956 {
957 	u32 pmnc;
958 	struct perf_sample_data data;
959 	struct arm_pmu *cpu_pmu = (struct arm_pmu *)dev;
960 	struct pmu_hw_events *cpuc = this_cpu_ptr(cpu_pmu->hw_events);
961 	struct pt_regs *regs;
962 	int idx;
963 
964 	/*
965 	 * Get and reset the IRQ flags
966 	 */
967 	pmnc = armv7_pmnc_getreset_flags();
968 
969 	/*
970 	 * Did an overflow occur?
971 	 */
972 	if (!armv7_pmnc_has_overflowed(pmnc))
973 		return IRQ_NONE;
974 
975 	/*
976 	 * Handle the counter(s) overflow(s)
977 	 */
978 	regs = get_irq_regs();
979 
980 	for (idx = 0; idx < cpu_pmu->num_events; ++idx) {
981 		struct perf_event *event = cpuc->events[idx];
982 		struct hw_perf_event *hwc;
983 
984 		/* Ignore if we don't have an event. */
985 		if (!event)
986 			continue;
987 
988 		/*
989 		 * We have a single interrupt for all counters. Check that
990 		 * each counter has overflowed before we process it.
991 		 */
992 		if (!armv7_pmnc_counter_has_overflowed(pmnc, idx))
993 			continue;
994 
995 		hwc = &event->hw;
996 		armpmu_event_update(event);
997 		perf_sample_data_init(&data, 0, hwc->last_period);
998 		if (!armpmu_event_set_period(event))
999 			continue;
1000 
1001 		if (perf_event_overflow(event, &data, regs))
1002 			cpu_pmu->disable(event);
1003 	}
1004 
1005 	/*
1006 	 * Handle the pending perf events.
1007 	 *
1008 	 * Note: this call *must* be run with interrupts disabled. For
1009 	 * platforms that can have the PMU interrupts raised as an NMI, this
1010 	 * will not work.
1011 	 */
1012 	irq_work_run();
1013 
1014 	return IRQ_HANDLED;
1015 }
1016 
1017 static void armv7pmu_start(struct arm_pmu *cpu_pmu)
1018 {
1019 	unsigned long flags;
1020 	struct pmu_hw_events *events = this_cpu_ptr(cpu_pmu->hw_events);
1021 
1022 	raw_spin_lock_irqsave(&events->pmu_lock, flags);
1023 	/* Enable all counters */
1024 	armv7_pmnc_write(armv7_pmnc_read() | ARMV7_PMNC_E);
1025 	raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
1026 }
1027 
1028 static void armv7pmu_stop(struct arm_pmu *cpu_pmu)
1029 {
1030 	unsigned long flags;
1031 	struct pmu_hw_events *events = this_cpu_ptr(cpu_pmu->hw_events);
1032 
1033 	raw_spin_lock_irqsave(&events->pmu_lock, flags);
1034 	/* Disable all counters */
1035 	armv7_pmnc_write(armv7_pmnc_read() & ~ARMV7_PMNC_E);
1036 	raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
1037 }
1038 
1039 static int armv7pmu_get_event_idx(struct pmu_hw_events *cpuc,
1040 				  struct perf_event *event)
1041 {
1042 	int idx;
1043 	struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu);
1044 	struct hw_perf_event *hwc = &event->hw;
1045 	unsigned long evtype = hwc->config_base & ARMV7_EVTYPE_EVENT;
1046 
1047 	/* Always place a cycle counter into the cycle counter. */
1048 	if (evtype == ARMV7_PERFCTR_CPU_CYCLES) {
1049 		if (test_and_set_bit(ARMV7_IDX_CYCLE_COUNTER, cpuc->used_mask))
1050 			return -EAGAIN;
1051 
1052 		return ARMV7_IDX_CYCLE_COUNTER;
1053 	}
1054 
1055 	/*
1056 	 * For anything other than a cycle counter, try and use
1057 	 * the events counters
1058 	 */
1059 	for (idx = ARMV7_IDX_COUNTER0; idx < cpu_pmu->num_events; ++idx) {
1060 		if (!test_and_set_bit(idx, cpuc->used_mask))
1061 			return idx;
1062 	}
1063 
1064 	/* The counters are all in use. */
1065 	return -EAGAIN;
1066 }
1067 
1068 /*
1069  * Add an event filter to a given event. This will only work for PMUv2 PMUs.
1070  */
1071 static int armv7pmu_set_event_filter(struct hw_perf_event *event,
1072 				     struct perf_event_attr *attr)
1073 {
1074 	unsigned long config_base = 0;
1075 
1076 	if (attr->exclude_idle)
1077 		return -EPERM;
1078 	if (attr->exclude_user)
1079 		config_base |= ARMV7_EXCLUDE_USER;
1080 	if (attr->exclude_kernel)
1081 		config_base |= ARMV7_EXCLUDE_PL1;
1082 	if (!attr->exclude_hv)
1083 		config_base |= ARMV7_INCLUDE_HYP;
1084 
1085 	/*
1086 	 * Install the filter into config_base as this is used to
1087 	 * construct the event type.
1088 	 */
1089 	event->config_base = config_base;
1090 
1091 	return 0;
1092 }
1093 
1094 static void armv7pmu_reset(void *info)
1095 {
1096 	struct arm_pmu *cpu_pmu = (struct arm_pmu *)info;
1097 	u32 idx, nb_cnt = cpu_pmu->num_events;
1098 
1099 	/* The counter and interrupt enable registers are unknown at reset. */
1100 	for (idx = ARMV7_IDX_CYCLE_COUNTER; idx < nb_cnt; ++idx) {
1101 		armv7_pmnc_disable_counter(idx);
1102 		armv7_pmnc_disable_intens(idx);
1103 	}
1104 
1105 	/* Initialize & Reset PMNC: C and P bits */
1106 	armv7_pmnc_write(ARMV7_PMNC_P | ARMV7_PMNC_C);
1107 }
1108 
1109 static int armv7_a8_map_event(struct perf_event *event)
1110 {
1111 	return armpmu_map_event(event, &armv7_a8_perf_map,
1112 				&armv7_a8_perf_cache_map, 0xFF);
1113 }
1114 
1115 static int armv7_a9_map_event(struct perf_event *event)
1116 {
1117 	return armpmu_map_event(event, &armv7_a9_perf_map,
1118 				&armv7_a9_perf_cache_map, 0xFF);
1119 }
1120 
1121 static int armv7_a5_map_event(struct perf_event *event)
1122 {
1123 	return armpmu_map_event(event, &armv7_a5_perf_map,
1124 				&armv7_a5_perf_cache_map, 0xFF);
1125 }
1126 
1127 static int armv7_a15_map_event(struct perf_event *event)
1128 {
1129 	return armpmu_map_event(event, &armv7_a15_perf_map,
1130 				&armv7_a15_perf_cache_map, 0xFF);
1131 }
1132 
1133 static int armv7_a7_map_event(struct perf_event *event)
1134 {
1135 	return armpmu_map_event(event, &armv7_a7_perf_map,
1136 				&armv7_a7_perf_cache_map, 0xFF);
1137 }
1138 
1139 static int armv7_a12_map_event(struct perf_event *event)
1140 {
1141 	return armpmu_map_event(event, &armv7_a12_perf_map,
1142 				&armv7_a12_perf_cache_map, 0xFF);
1143 }
1144 
1145 static int krait_map_event(struct perf_event *event)
1146 {
1147 	return armpmu_map_event(event, &krait_perf_map,
1148 				&krait_perf_cache_map, 0xFFFFF);
1149 }
1150 
1151 static int krait_map_event_no_branch(struct perf_event *event)
1152 {
1153 	return armpmu_map_event(event, &krait_perf_map_no_branch,
1154 				&krait_perf_cache_map, 0xFFFFF);
1155 }
1156 
1157 static int scorpion_map_event(struct perf_event *event)
1158 {
1159 	return armpmu_map_event(event, &scorpion_perf_map,
1160 				&scorpion_perf_cache_map, 0xFFFFF);
1161 }
1162 
1163 static void armv7pmu_init(struct arm_pmu *cpu_pmu)
1164 {
1165 	cpu_pmu->handle_irq	= armv7pmu_handle_irq;
1166 	cpu_pmu->enable		= armv7pmu_enable_event;
1167 	cpu_pmu->disable	= armv7pmu_disable_event;
1168 	cpu_pmu->read_counter	= armv7pmu_read_counter;
1169 	cpu_pmu->write_counter	= armv7pmu_write_counter;
1170 	cpu_pmu->get_event_idx	= armv7pmu_get_event_idx;
1171 	cpu_pmu->start		= armv7pmu_start;
1172 	cpu_pmu->stop		= armv7pmu_stop;
1173 	cpu_pmu->reset		= armv7pmu_reset;
1174 	cpu_pmu->max_period	= (1LLU << 32) - 1;
1175 };
1176 
1177 static void armv7_read_num_pmnc_events(void *info)
1178 {
1179 	int *nb_cnt = info;
1180 
1181 	/* Read the nb of CNTx counters supported from PMNC */
1182 	*nb_cnt = (armv7_pmnc_read() >> ARMV7_PMNC_N_SHIFT) & ARMV7_PMNC_N_MASK;
1183 
1184 	/* Add the CPU cycles counter */
1185 	*nb_cnt += 1;
1186 }
1187 
1188 static int armv7_probe_num_events(struct arm_pmu *arm_pmu)
1189 {
1190 	return smp_call_function_any(&arm_pmu->supported_cpus,
1191 				     armv7_read_num_pmnc_events,
1192 				     &arm_pmu->num_events, 1);
1193 }
1194 
1195 static int armv7_a8_pmu_init(struct arm_pmu *cpu_pmu)
1196 {
1197 	armv7pmu_init(cpu_pmu);
1198 	cpu_pmu->name		= "armv7_cortex_a8";
1199 	cpu_pmu->map_event	= armv7_a8_map_event;
1200 	cpu_pmu->pmu.attr_groups = armv7_pmuv1_attr_groups;
1201 	return armv7_probe_num_events(cpu_pmu);
1202 }
1203 
1204 static int armv7_a9_pmu_init(struct arm_pmu *cpu_pmu)
1205 {
1206 	armv7pmu_init(cpu_pmu);
1207 	cpu_pmu->name		= "armv7_cortex_a9";
1208 	cpu_pmu->map_event	= armv7_a9_map_event;
1209 	cpu_pmu->pmu.attr_groups = armv7_pmuv1_attr_groups;
1210 	return armv7_probe_num_events(cpu_pmu);
1211 }
1212 
1213 static int armv7_a5_pmu_init(struct arm_pmu *cpu_pmu)
1214 {
1215 	armv7pmu_init(cpu_pmu);
1216 	cpu_pmu->name		= "armv7_cortex_a5";
1217 	cpu_pmu->map_event	= armv7_a5_map_event;
1218 	cpu_pmu->pmu.attr_groups = armv7_pmuv1_attr_groups;
1219 	return armv7_probe_num_events(cpu_pmu);
1220 }
1221 
1222 static int armv7_a15_pmu_init(struct arm_pmu *cpu_pmu)
1223 {
1224 	armv7pmu_init(cpu_pmu);
1225 	cpu_pmu->name		= "armv7_cortex_a15";
1226 	cpu_pmu->map_event	= armv7_a15_map_event;
1227 	cpu_pmu->set_event_filter = armv7pmu_set_event_filter;
1228 	cpu_pmu->pmu.attr_groups = armv7_pmuv2_attr_groups;
1229 	return armv7_probe_num_events(cpu_pmu);
1230 }
1231 
1232 static int armv7_a7_pmu_init(struct arm_pmu *cpu_pmu)
1233 {
1234 	armv7pmu_init(cpu_pmu);
1235 	cpu_pmu->name		= "armv7_cortex_a7";
1236 	cpu_pmu->map_event	= armv7_a7_map_event;
1237 	cpu_pmu->set_event_filter = armv7pmu_set_event_filter;
1238 	cpu_pmu->pmu.attr_groups = armv7_pmuv2_attr_groups;
1239 	return armv7_probe_num_events(cpu_pmu);
1240 }
1241 
1242 static int armv7_a12_pmu_init(struct arm_pmu *cpu_pmu)
1243 {
1244 	armv7pmu_init(cpu_pmu);
1245 	cpu_pmu->name		= "armv7_cortex_a12";
1246 	cpu_pmu->map_event	= armv7_a12_map_event;
1247 	cpu_pmu->set_event_filter = armv7pmu_set_event_filter;
1248 	cpu_pmu->pmu.attr_groups = armv7_pmuv2_attr_groups;
1249 	return armv7_probe_num_events(cpu_pmu);
1250 }
1251 
1252 static int armv7_a17_pmu_init(struct arm_pmu *cpu_pmu)
1253 {
1254 	int ret = armv7_a12_pmu_init(cpu_pmu);
1255 	cpu_pmu->name = "armv7_cortex_a17";
1256 	cpu_pmu->pmu.attr_groups = armv7_pmuv2_attr_groups;
1257 	return ret;
1258 }
1259 
1260 /*
1261  * Krait Performance Monitor Region Event Selection Register (PMRESRn)
1262  *
1263  *            31   30     24     16     8      0
1264  *            +--------------------------------+
1265  *  PMRESR0   | EN |  CC  |  CC  |  CC  |  CC  |   N = 1, R = 0
1266  *            +--------------------------------+
1267  *  PMRESR1   | EN |  CC  |  CC  |  CC  |  CC  |   N = 1, R = 1
1268  *            +--------------------------------+
1269  *  PMRESR2   | EN |  CC  |  CC  |  CC  |  CC  |   N = 1, R = 2
1270  *            +--------------------------------+
1271  *  VPMRESR0  | EN |  CC  |  CC  |  CC  |  CC  |   N = 2, R = ?
1272  *            +--------------------------------+
1273  *              EN | G=3  | G=2  | G=1  | G=0
1274  *
1275  *  Event Encoding:
1276  *
1277  *      hwc->config_base = 0xNRCCG
1278  *
1279  *      N  = prefix, 1 for Krait CPU (PMRESRn), 2 for Venum VFP (VPMRESR)
1280  *      R  = region register
1281  *      CC = class of events the group G is choosing from
1282  *      G  = group or particular event
1283  *
1284  *  Example: 0x12021 is a Krait CPU event in PMRESR2's group 1 with code 2
1285  *
1286  *  A region (R) corresponds to a piece of the CPU (execution unit, instruction
1287  *  unit, etc.) while the event code (CC) corresponds to a particular class of
1288  *  events (interrupts for example). An event code is broken down into
1289  *  groups (G) that can be mapped into the PMU (irq, fiqs, and irq+fiqs for
1290  *  example).
1291  */
1292 
1293 #define KRAIT_EVENT		(1 << 16)
1294 #define VENUM_EVENT		(2 << 16)
1295 #define KRAIT_EVENT_MASK	(KRAIT_EVENT | VENUM_EVENT)
1296 #define PMRESRn_EN		BIT(31)
1297 
1298 #define EVENT_REGION(event)	(((event) >> 12) & 0xf)		/* R */
1299 #define EVENT_GROUP(event)	((event) & 0xf)			/* G */
1300 #define EVENT_CODE(event)	(((event) >> 4) & 0xff)		/* CC */
1301 #define EVENT_VENUM(event)	(!!(event & VENUM_EVENT))	/* N=2 */
1302 #define EVENT_CPU(event)	(!!(event & KRAIT_EVENT))	/* N=1 */
1303 
1304 static u32 krait_read_pmresrn(int n)
1305 {
1306 	u32 val;
1307 
1308 	switch (n) {
1309 	case 0:
1310 		asm volatile("mrc p15, 1, %0, c9, c15, 0" : "=r" (val));
1311 		break;
1312 	case 1:
1313 		asm volatile("mrc p15, 1, %0, c9, c15, 1" : "=r" (val));
1314 		break;
1315 	case 2:
1316 		asm volatile("mrc p15, 1, %0, c9, c15, 2" : "=r" (val));
1317 		break;
1318 	default:
1319 		BUG(); /* Should be validated in krait_pmu_get_event_idx() */
1320 	}
1321 
1322 	return val;
1323 }
1324 
1325 static void krait_write_pmresrn(int n, u32 val)
1326 {
1327 	switch (n) {
1328 	case 0:
1329 		asm volatile("mcr p15, 1, %0, c9, c15, 0" : : "r" (val));
1330 		break;
1331 	case 1:
1332 		asm volatile("mcr p15, 1, %0, c9, c15, 1" : : "r" (val));
1333 		break;
1334 	case 2:
1335 		asm volatile("mcr p15, 1, %0, c9, c15, 2" : : "r" (val));
1336 		break;
1337 	default:
1338 		BUG(); /* Should be validated in krait_pmu_get_event_idx() */
1339 	}
1340 }
1341 
1342 static u32 venum_read_pmresr(void)
1343 {
1344 	u32 val;
1345 	asm volatile("mrc p10, 7, %0, c11, c0, 0" : "=r" (val));
1346 	return val;
1347 }
1348 
1349 static void venum_write_pmresr(u32 val)
1350 {
1351 	asm volatile("mcr p10, 7, %0, c11, c0, 0" : : "r" (val));
1352 }
1353 
1354 static void venum_pre_pmresr(u32 *venum_orig_val, u32 *fp_orig_val)
1355 {
1356 	u32 venum_new_val;
1357 	u32 fp_new_val;
1358 
1359 	BUG_ON(preemptible());
1360 	/* CPACR Enable CP10 and CP11 access */
1361 	*venum_orig_val = get_copro_access();
1362 	venum_new_val = *venum_orig_val | CPACC_SVC(10) | CPACC_SVC(11);
1363 	set_copro_access(venum_new_val);
1364 
1365 	/* Enable FPEXC */
1366 	*fp_orig_val = fmrx(FPEXC);
1367 	fp_new_val = *fp_orig_val | FPEXC_EN;
1368 	fmxr(FPEXC, fp_new_val);
1369 }
1370 
1371 static void venum_post_pmresr(u32 venum_orig_val, u32 fp_orig_val)
1372 {
1373 	BUG_ON(preemptible());
1374 	/* Restore FPEXC */
1375 	fmxr(FPEXC, fp_orig_val);
1376 	isb();
1377 	/* Restore CPACR */
1378 	set_copro_access(venum_orig_val);
1379 }
1380 
1381 static u32 krait_get_pmresrn_event(unsigned int region)
1382 {
1383 	static const u32 pmresrn_table[] = { KRAIT_PMRESR0_GROUP0,
1384 					     KRAIT_PMRESR1_GROUP0,
1385 					     KRAIT_PMRESR2_GROUP0 };
1386 	return pmresrn_table[region];
1387 }
1388 
1389 static void krait_evt_setup(int idx, u32 config_base)
1390 {
1391 	u32 val;
1392 	u32 mask;
1393 	u32 vval, fval;
1394 	unsigned int region = EVENT_REGION(config_base);
1395 	unsigned int group = EVENT_GROUP(config_base);
1396 	unsigned int code = EVENT_CODE(config_base);
1397 	unsigned int group_shift;
1398 	bool venum_event = EVENT_VENUM(config_base);
1399 
1400 	group_shift = group * 8;
1401 	mask = 0xff << group_shift;
1402 
1403 	/* Configure evtsel for the region and group */
1404 	if (venum_event)
1405 		val = KRAIT_VPMRESR0_GROUP0;
1406 	else
1407 		val = krait_get_pmresrn_event(region);
1408 	val += group;
1409 	/* Mix in mode-exclusion bits */
1410 	val |= config_base & (ARMV7_EXCLUDE_USER | ARMV7_EXCLUDE_PL1);
1411 	armv7_pmnc_write_evtsel(idx, val);
1412 
1413 	if (venum_event) {
1414 		venum_pre_pmresr(&vval, &fval);
1415 		val = venum_read_pmresr();
1416 		val &= ~mask;
1417 		val |= code << group_shift;
1418 		val |= PMRESRn_EN;
1419 		venum_write_pmresr(val);
1420 		venum_post_pmresr(vval, fval);
1421 	} else {
1422 		val = krait_read_pmresrn(region);
1423 		val &= ~mask;
1424 		val |= code << group_shift;
1425 		val |= PMRESRn_EN;
1426 		krait_write_pmresrn(region, val);
1427 	}
1428 }
1429 
1430 static u32 clear_pmresrn_group(u32 val, int group)
1431 {
1432 	u32 mask;
1433 	int group_shift;
1434 
1435 	group_shift = group * 8;
1436 	mask = 0xff << group_shift;
1437 	val &= ~mask;
1438 
1439 	/* Don't clear enable bit if entire region isn't disabled */
1440 	if (val & ~PMRESRn_EN)
1441 		return val |= PMRESRn_EN;
1442 
1443 	return 0;
1444 }
1445 
1446 static void krait_clearpmu(u32 config_base)
1447 {
1448 	u32 val;
1449 	u32 vval, fval;
1450 	unsigned int region = EVENT_REGION(config_base);
1451 	unsigned int group = EVENT_GROUP(config_base);
1452 	bool venum_event = EVENT_VENUM(config_base);
1453 
1454 	if (venum_event) {
1455 		venum_pre_pmresr(&vval, &fval);
1456 		val = venum_read_pmresr();
1457 		val = clear_pmresrn_group(val, group);
1458 		venum_write_pmresr(val);
1459 		venum_post_pmresr(vval, fval);
1460 	} else {
1461 		val = krait_read_pmresrn(region);
1462 		val = clear_pmresrn_group(val, group);
1463 		krait_write_pmresrn(region, val);
1464 	}
1465 }
1466 
1467 static void krait_pmu_disable_event(struct perf_event *event)
1468 {
1469 	unsigned long flags;
1470 	struct hw_perf_event *hwc = &event->hw;
1471 	int idx = hwc->idx;
1472 	struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu);
1473 	struct pmu_hw_events *events = this_cpu_ptr(cpu_pmu->hw_events);
1474 
1475 	/* Disable counter and interrupt */
1476 	raw_spin_lock_irqsave(&events->pmu_lock, flags);
1477 
1478 	/* Disable counter */
1479 	armv7_pmnc_disable_counter(idx);
1480 
1481 	/*
1482 	 * Clear pmresr code (if destined for PMNx counters)
1483 	 */
1484 	if (hwc->config_base & KRAIT_EVENT_MASK)
1485 		krait_clearpmu(hwc->config_base);
1486 
1487 	/* Disable interrupt for this counter */
1488 	armv7_pmnc_disable_intens(idx);
1489 
1490 	raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
1491 }
1492 
1493 static void krait_pmu_enable_event(struct perf_event *event)
1494 {
1495 	unsigned long flags;
1496 	struct hw_perf_event *hwc = &event->hw;
1497 	int idx = hwc->idx;
1498 	struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu);
1499 	struct pmu_hw_events *events = this_cpu_ptr(cpu_pmu->hw_events);
1500 
1501 	/*
1502 	 * Enable counter and interrupt, and set the counter to count
1503 	 * the event that we're interested in.
1504 	 */
1505 	raw_spin_lock_irqsave(&events->pmu_lock, flags);
1506 
1507 	/* Disable counter */
1508 	armv7_pmnc_disable_counter(idx);
1509 
1510 	/*
1511 	 * Set event (if destined for PMNx counters)
1512 	 * We set the event for the cycle counter because we
1513 	 * have the ability to perform event filtering.
1514 	 */
1515 	if (hwc->config_base & KRAIT_EVENT_MASK)
1516 		krait_evt_setup(idx, hwc->config_base);
1517 	else
1518 		armv7_pmnc_write_evtsel(idx, hwc->config_base);
1519 
1520 	/* Enable interrupt for this counter */
1521 	armv7_pmnc_enable_intens(idx);
1522 
1523 	/* Enable counter */
1524 	armv7_pmnc_enable_counter(idx);
1525 
1526 	raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
1527 }
1528 
1529 static void krait_pmu_reset(void *info)
1530 {
1531 	u32 vval, fval;
1532 	struct arm_pmu *cpu_pmu = info;
1533 	u32 idx, nb_cnt = cpu_pmu->num_events;
1534 
1535 	armv7pmu_reset(info);
1536 
1537 	/* Clear all pmresrs */
1538 	krait_write_pmresrn(0, 0);
1539 	krait_write_pmresrn(1, 0);
1540 	krait_write_pmresrn(2, 0);
1541 
1542 	venum_pre_pmresr(&vval, &fval);
1543 	venum_write_pmresr(0);
1544 	venum_post_pmresr(vval, fval);
1545 
1546 	/* Reset PMxEVNCTCR to sane default */
1547 	for (idx = ARMV7_IDX_CYCLE_COUNTER; idx < nb_cnt; ++idx) {
1548 		armv7_pmnc_select_counter(idx);
1549 		asm volatile("mcr p15, 0, %0, c9, c15, 0" : : "r" (0));
1550 	}
1551 
1552 }
1553 
1554 static int krait_event_to_bit(struct perf_event *event, unsigned int region,
1555 			      unsigned int group)
1556 {
1557 	int bit;
1558 	struct hw_perf_event *hwc = &event->hw;
1559 	struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu);
1560 
1561 	if (hwc->config_base & VENUM_EVENT)
1562 		bit = KRAIT_VPMRESR0_GROUP0;
1563 	else
1564 		bit = krait_get_pmresrn_event(region);
1565 	bit -= krait_get_pmresrn_event(0);
1566 	bit += group;
1567 	/*
1568 	 * Lower bits are reserved for use by the counters (see
1569 	 * armv7pmu_get_event_idx() for more info)
1570 	 */
1571 	bit += ARMV7_IDX_COUNTER_LAST(cpu_pmu) + 1;
1572 
1573 	return bit;
1574 }
1575 
1576 /*
1577  * We check for column exclusion constraints here.
1578  * Two events cant use the same group within a pmresr register.
1579  */
1580 static int krait_pmu_get_event_idx(struct pmu_hw_events *cpuc,
1581 				   struct perf_event *event)
1582 {
1583 	int idx;
1584 	int bit = -1;
1585 	struct hw_perf_event *hwc = &event->hw;
1586 	unsigned int region = EVENT_REGION(hwc->config_base);
1587 	unsigned int code = EVENT_CODE(hwc->config_base);
1588 	unsigned int group = EVENT_GROUP(hwc->config_base);
1589 	bool venum_event = EVENT_VENUM(hwc->config_base);
1590 	bool krait_event = EVENT_CPU(hwc->config_base);
1591 
1592 	if (venum_event || krait_event) {
1593 		/* Ignore invalid events */
1594 		if (group > 3 || region > 2)
1595 			return -EINVAL;
1596 		if (venum_event && (code & 0xe0))
1597 			return -EINVAL;
1598 
1599 		bit = krait_event_to_bit(event, region, group);
1600 		if (test_and_set_bit(bit, cpuc->used_mask))
1601 			return -EAGAIN;
1602 	}
1603 
1604 	idx = armv7pmu_get_event_idx(cpuc, event);
1605 	if (idx < 0 && bit >= 0)
1606 		clear_bit(bit, cpuc->used_mask);
1607 
1608 	return idx;
1609 }
1610 
1611 static void krait_pmu_clear_event_idx(struct pmu_hw_events *cpuc,
1612 				      struct perf_event *event)
1613 {
1614 	int bit;
1615 	struct hw_perf_event *hwc = &event->hw;
1616 	unsigned int region = EVENT_REGION(hwc->config_base);
1617 	unsigned int group = EVENT_GROUP(hwc->config_base);
1618 	bool venum_event = EVENT_VENUM(hwc->config_base);
1619 	bool krait_event = EVENT_CPU(hwc->config_base);
1620 
1621 	if (venum_event || krait_event) {
1622 		bit = krait_event_to_bit(event, region, group);
1623 		clear_bit(bit, cpuc->used_mask);
1624 	}
1625 }
1626 
1627 static int krait_pmu_init(struct arm_pmu *cpu_pmu)
1628 {
1629 	armv7pmu_init(cpu_pmu);
1630 	cpu_pmu->name		= "armv7_krait";
1631 	/* Some early versions of Krait don't support PC write events */
1632 	if (of_property_read_bool(cpu_pmu->plat_device->dev.of_node,
1633 				  "qcom,no-pc-write"))
1634 		cpu_pmu->map_event = krait_map_event_no_branch;
1635 	else
1636 		cpu_pmu->map_event = krait_map_event;
1637 	cpu_pmu->set_event_filter = armv7pmu_set_event_filter;
1638 	cpu_pmu->reset		= krait_pmu_reset;
1639 	cpu_pmu->enable		= krait_pmu_enable_event;
1640 	cpu_pmu->disable	= krait_pmu_disable_event;
1641 	cpu_pmu->get_event_idx	= krait_pmu_get_event_idx;
1642 	cpu_pmu->clear_event_idx = krait_pmu_clear_event_idx;
1643 	return armv7_probe_num_events(cpu_pmu);
1644 }
1645 
1646 /*
1647  * Scorpion Local Performance Monitor Register (LPMn)
1648  *
1649  *            31   30     24     16     8      0
1650  *            +--------------------------------+
1651  *  LPM0      | EN |  CC  |  CC  |  CC  |  CC  |   N = 1, R = 0
1652  *            +--------------------------------+
1653  *  LPM1      | EN |  CC  |  CC  |  CC  |  CC  |   N = 1, R = 1
1654  *            +--------------------------------+
1655  *  LPM2      | EN |  CC  |  CC  |  CC  |  CC  |   N = 1, R = 2
1656  *            +--------------------------------+
1657  *  L2LPM     | EN |  CC  |  CC  |  CC  |  CC  |   N = 1, R = 3
1658  *            +--------------------------------+
1659  *  VLPM      | EN |  CC  |  CC  |  CC  |  CC  |   N = 2, R = ?
1660  *            +--------------------------------+
1661  *              EN | G=3  | G=2  | G=1  | G=0
1662  *
1663  *
1664  *  Event Encoding:
1665  *
1666  *      hwc->config_base = 0xNRCCG
1667  *
1668  *      N  = prefix, 1 for Scorpion CPU (LPMn/L2LPM), 2 for Venum VFP (VLPM)
1669  *      R  = region register
1670  *      CC = class of events the group G is choosing from
1671  *      G  = group or particular event
1672  *
1673  *  Example: 0x12021 is a Scorpion CPU event in LPM2's group 1 with code 2
1674  *
1675  *  A region (R) corresponds to a piece of the CPU (execution unit, instruction
1676  *  unit, etc.) while the event code (CC) corresponds to a particular class of
1677  *  events (interrupts for example). An event code is broken down into
1678  *  groups (G) that can be mapped into the PMU (irq, fiqs, and irq+fiqs for
1679  *  example).
1680  */
1681 
1682 static u32 scorpion_read_pmresrn(int n)
1683 {
1684 	u32 val;
1685 
1686 	switch (n) {
1687 	case 0:
1688 		asm volatile("mrc p15, 0, %0, c15, c0, 0" : "=r" (val));
1689 		break;
1690 	case 1:
1691 		asm volatile("mrc p15, 1, %0, c15, c0, 0" : "=r" (val));
1692 		break;
1693 	case 2:
1694 		asm volatile("mrc p15, 2, %0, c15, c0, 0" : "=r" (val));
1695 		break;
1696 	case 3:
1697 		asm volatile("mrc p15, 3, %0, c15, c2, 0" : "=r" (val));
1698 		break;
1699 	default:
1700 		BUG(); /* Should be validated in scorpion_pmu_get_event_idx() */
1701 	}
1702 
1703 	return val;
1704 }
1705 
1706 static void scorpion_write_pmresrn(int n, u32 val)
1707 {
1708 	switch (n) {
1709 	case 0:
1710 		asm volatile("mcr p15, 0, %0, c15, c0, 0" : : "r" (val));
1711 		break;
1712 	case 1:
1713 		asm volatile("mcr p15, 1, %0, c15, c0, 0" : : "r" (val));
1714 		break;
1715 	case 2:
1716 		asm volatile("mcr p15, 2, %0, c15, c0, 0" : : "r" (val));
1717 		break;
1718 	case 3:
1719 		asm volatile("mcr p15, 3, %0, c15, c2, 0" : : "r" (val));
1720 		break;
1721 	default:
1722 		BUG(); /* Should be validated in scorpion_pmu_get_event_idx() */
1723 	}
1724 }
1725 
1726 static u32 scorpion_get_pmresrn_event(unsigned int region)
1727 {
1728 	static const u32 pmresrn_table[] = { SCORPION_LPM0_GROUP0,
1729 					     SCORPION_LPM1_GROUP0,
1730 					     SCORPION_LPM2_GROUP0,
1731 					     SCORPION_L2LPM_GROUP0 };
1732 	return pmresrn_table[region];
1733 }
1734 
1735 static void scorpion_evt_setup(int idx, u32 config_base)
1736 {
1737 	u32 val;
1738 	u32 mask;
1739 	u32 vval, fval;
1740 	unsigned int region = EVENT_REGION(config_base);
1741 	unsigned int group = EVENT_GROUP(config_base);
1742 	unsigned int code = EVENT_CODE(config_base);
1743 	unsigned int group_shift;
1744 	bool venum_event = EVENT_VENUM(config_base);
1745 
1746 	group_shift = group * 8;
1747 	mask = 0xff << group_shift;
1748 
1749 	/* Configure evtsel for the region and group */
1750 	if (venum_event)
1751 		val = SCORPION_VLPM_GROUP0;
1752 	else
1753 		val = scorpion_get_pmresrn_event(region);
1754 	val += group;
1755 	/* Mix in mode-exclusion bits */
1756 	val |= config_base & (ARMV7_EXCLUDE_USER | ARMV7_EXCLUDE_PL1);
1757 	armv7_pmnc_write_evtsel(idx, val);
1758 
1759 	asm volatile("mcr p15, 0, %0, c9, c15, 0" : : "r" (0));
1760 
1761 	if (venum_event) {
1762 		venum_pre_pmresr(&vval, &fval);
1763 		val = venum_read_pmresr();
1764 		val &= ~mask;
1765 		val |= code << group_shift;
1766 		val |= PMRESRn_EN;
1767 		venum_write_pmresr(val);
1768 		venum_post_pmresr(vval, fval);
1769 	} else {
1770 		val = scorpion_read_pmresrn(region);
1771 		val &= ~mask;
1772 		val |= code << group_shift;
1773 		val |= PMRESRn_EN;
1774 		scorpion_write_pmresrn(region, val);
1775 	}
1776 }
1777 
1778 static void scorpion_clearpmu(u32 config_base)
1779 {
1780 	u32 val;
1781 	u32 vval, fval;
1782 	unsigned int region = EVENT_REGION(config_base);
1783 	unsigned int group = EVENT_GROUP(config_base);
1784 	bool venum_event = EVENT_VENUM(config_base);
1785 
1786 	if (venum_event) {
1787 		venum_pre_pmresr(&vval, &fval);
1788 		val = venum_read_pmresr();
1789 		val = clear_pmresrn_group(val, group);
1790 		venum_write_pmresr(val);
1791 		venum_post_pmresr(vval, fval);
1792 	} else {
1793 		val = scorpion_read_pmresrn(region);
1794 		val = clear_pmresrn_group(val, group);
1795 		scorpion_write_pmresrn(region, val);
1796 	}
1797 }
1798 
1799 static void scorpion_pmu_disable_event(struct perf_event *event)
1800 {
1801 	unsigned long flags;
1802 	struct hw_perf_event *hwc = &event->hw;
1803 	int idx = hwc->idx;
1804 	struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu);
1805 	struct pmu_hw_events *events = this_cpu_ptr(cpu_pmu->hw_events);
1806 
1807 	/* Disable counter and interrupt */
1808 	raw_spin_lock_irqsave(&events->pmu_lock, flags);
1809 
1810 	/* Disable counter */
1811 	armv7_pmnc_disable_counter(idx);
1812 
1813 	/*
1814 	 * Clear pmresr code (if destined for PMNx counters)
1815 	 */
1816 	if (hwc->config_base & KRAIT_EVENT_MASK)
1817 		scorpion_clearpmu(hwc->config_base);
1818 
1819 	/* Disable interrupt for this counter */
1820 	armv7_pmnc_disable_intens(idx);
1821 
1822 	raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
1823 }
1824 
1825 static void scorpion_pmu_enable_event(struct perf_event *event)
1826 {
1827 	unsigned long flags;
1828 	struct hw_perf_event *hwc = &event->hw;
1829 	int idx = hwc->idx;
1830 	struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu);
1831 	struct pmu_hw_events *events = this_cpu_ptr(cpu_pmu->hw_events);
1832 
1833 	/*
1834 	 * Enable counter and interrupt, and set the counter to count
1835 	 * the event that we're interested in.
1836 	 */
1837 	raw_spin_lock_irqsave(&events->pmu_lock, flags);
1838 
1839 	/* Disable counter */
1840 	armv7_pmnc_disable_counter(idx);
1841 
1842 	/*
1843 	 * Set event (if destined for PMNx counters)
1844 	 * We don't set the event for the cycle counter because we
1845 	 * don't have the ability to perform event filtering.
1846 	 */
1847 	if (hwc->config_base & KRAIT_EVENT_MASK)
1848 		scorpion_evt_setup(idx, hwc->config_base);
1849 	else if (idx != ARMV7_IDX_CYCLE_COUNTER)
1850 		armv7_pmnc_write_evtsel(idx, hwc->config_base);
1851 
1852 	/* Enable interrupt for this counter */
1853 	armv7_pmnc_enable_intens(idx);
1854 
1855 	/* Enable counter */
1856 	armv7_pmnc_enable_counter(idx);
1857 
1858 	raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
1859 }
1860 
1861 static void scorpion_pmu_reset(void *info)
1862 {
1863 	u32 vval, fval;
1864 	struct arm_pmu *cpu_pmu = info;
1865 	u32 idx, nb_cnt = cpu_pmu->num_events;
1866 
1867 	armv7pmu_reset(info);
1868 
1869 	/* Clear all pmresrs */
1870 	scorpion_write_pmresrn(0, 0);
1871 	scorpion_write_pmresrn(1, 0);
1872 	scorpion_write_pmresrn(2, 0);
1873 	scorpion_write_pmresrn(3, 0);
1874 
1875 	venum_pre_pmresr(&vval, &fval);
1876 	venum_write_pmresr(0);
1877 	venum_post_pmresr(vval, fval);
1878 
1879 	/* Reset PMxEVNCTCR to sane default */
1880 	for (idx = ARMV7_IDX_CYCLE_COUNTER; idx < nb_cnt; ++idx) {
1881 		armv7_pmnc_select_counter(idx);
1882 		asm volatile("mcr p15, 0, %0, c9, c15, 0" : : "r" (0));
1883 	}
1884 }
1885 
1886 static int scorpion_event_to_bit(struct perf_event *event, unsigned int region,
1887 			      unsigned int group)
1888 {
1889 	int bit;
1890 	struct hw_perf_event *hwc = &event->hw;
1891 	struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu);
1892 
1893 	if (hwc->config_base & VENUM_EVENT)
1894 		bit = SCORPION_VLPM_GROUP0;
1895 	else
1896 		bit = scorpion_get_pmresrn_event(region);
1897 	bit -= scorpion_get_pmresrn_event(0);
1898 	bit += group;
1899 	/*
1900 	 * Lower bits are reserved for use by the counters (see
1901 	 * armv7pmu_get_event_idx() for more info)
1902 	 */
1903 	bit += ARMV7_IDX_COUNTER_LAST(cpu_pmu) + 1;
1904 
1905 	return bit;
1906 }
1907 
1908 /*
1909  * We check for column exclusion constraints here.
1910  * Two events cant use the same group within a pmresr register.
1911  */
1912 static int scorpion_pmu_get_event_idx(struct pmu_hw_events *cpuc,
1913 				   struct perf_event *event)
1914 {
1915 	int idx;
1916 	int bit = -1;
1917 	struct hw_perf_event *hwc = &event->hw;
1918 	unsigned int region = EVENT_REGION(hwc->config_base);
1919 	unsigned int group = EVENT_GROUP(hwc->config_base);
1920 	bool venum_event = EVENT_VENUM(hwc->config_base);
1921 	bool scorpion_event = EVENT_CPU(hwc->config_base);
1922 
1923 	if (venum_event || scorpion_event) {
1924 		/* Ignore invalid events */
1925 		if (group > 3 || region > 3)
1926 			return -EINVAL;
1927 
1928 		bit = scorpion_event_to_bit(event, region, group);
1929 		if (test_and_set_bit(bit, cpuc->used_mask))
1930 			return -EAGAIN;
1931 	}
1932 
1933 	idx = armv7pmu_get_event_idx(cpuc, event);
1934 	if (idx < 0 && bit >= 0)
1935 		clear_bit(bit, cpuc->used_mask);
1936 
1937 	return idx;
1938 }
1939 
1940 static void scorpion_pmu_clear_event_idx(struct pmu_hw_events *cpuc,
1941 				      struct perf_event *event)
1942 {
1943 	int bit;
1944 	struct hw_perf_event *hwc = &event->hw;
1945 	unsigned int region = EVENT_REGION(hwc->config_base);
1946 	unsigned int group = EVENT_GROUP(hwc->config_base);
1947 	bool venum_event = EVENT_VENUM(hwc->config_base);
1948 	bool scorpion_event = EVENT_CPU(hwc->config_base);
1949 
1950 	if (venum_event || scorpion_event) {
1951 		bit = scorpion_event_to_bit(event, region, group);
1952 		clear_bit(bit, cpuc->used_mask);
1953 	}
1954 }
1955 
1956 static int scorpion_pmu_init(struct arm_pmu *cpu_pmu)
1957 {
1958 	armv7pmu_init(cpu_pmu);
1959 	cpu_pmu->name		= "armv7_scorpion";
1960 	cpu_pmu->map_event	= scorpion_map_event;
1961 	cpu_pmu->reset		= scorpion_pmu_reset;
1962 	cpu_pmu->enable		= scorpion_pmu_enable_event;
1963 	cpu_pmu->disable	= scorpion_pmu_disable_event;
1964 	cpu_pmu->get_event_idx	= scorpion_pmu_get_event_idx;
1965 	cpu_pmu->clear_event_idx = scorpion_pmu_clear_event_idx;
1966 	return armv7_probe_num_events(cpu_pmu);
1967 }
1968 
1969 static int scorpion_mp_pmu_init(struct arm_pmu *cpu_pmu)
1970 {
1971 	armv7pmu_init(cpu_pmu);
1972 	cpu_pmu->name		= "armv7_scorpion_mp";
1973 	cpu_pmu->map_event	= scorpion_map_event;
1974 	cpu_pmu->reset		= scorpion_pmu_reset;
1975 	cpu_pmu->enable		= scorpion_pmu_enable_event;
1976 	cpu_pmu->disable	= scorpion_pmu_disable_event;
1977 	cpu_pmu->get_event_idx	= scorpion_pmu_get_event_idx;
1978 	cpu_pmu->clear_event_idx = scorpion_pmu_clear_event_idx;
1979 	return armv7_probe_num_events(cpu_pmu);
1980 }
1981 
1982 static const struct of_device_id armv7_pmu_of_device_ids[] = {
1983 	{.compatible = "arm,cortex-a17-pmu",	.data = armv7_a17_pmu_init},
1984 	{.compatible = "arm,cortex-a15-pmu",	.data = armv7_a15_pmu_init},
1985 	{.compatible = "arm,cortex-a12-pmu",	.data = armv7_a12_pmu_init},
1986 	{.compatible = "arm,cortex-a9-pmu",	.data = armv7_a9_pmu_init},
1987 	{.compatible = "arm,cortex-a8-pmu",	.data = armv7_a8_pmu_init},
1988 	{.compatible = "arm,cortex-a7-pmu",	.data = armv7_a7_pmu_init},
1989 	{.compatible = "arm,cortex-a5-pmu",	.data = armv7_a5_pmu_init},
1990 	{.compatible = "qcom,krait-pmu",	.data = krait_pmu_init},
1991 	{.compatible = "qcom,scorpion-pmu",	.data = scorpion_pmu_init},
1992 	{.compatible = "qcom,scorpion-mp-pmu",	.data = scorpion_mp_pmu_init},
1993 	{},
1994 };
1995 
1996 static const struct pmu_probe_info armv7_pmu_probe_table[] = {
1997 	ARM_PMU_PROBE(ARM_CPU_PART_CORTEX_A8, armv7_a8_pmu_init),
1998 	ARM_PMU_PROBE(ARM_CPU_PART_CORTEX_A9, armv7_a9_pmu_init),
1999 	{ /* sentinel value */ }
2000 };
2001 
2002 
2003 static int armv7_pmu_device_probe(struct platform_device *pdev)
2004 {
2005 	return arm_pmu_device_probe(pdev, armv7_pmu_of_device_ids,
2006 				    armv7_pmu_probe_table);
2007 }
2008 
2009 static struct platform_driver armv7_pmu_driver = {
2010 	.driver		= {
2011 		.name	= "armv7-pmu",
2012 		.of_match_table = armv7_pmu_of_device_ids,
2013 	},
2014 	.probe		= armv7_pmu_device_probe,
2015 };
2016 
2017 static int __init register_armv7_pmu_driver(void)
2018 {
2019 	return platform_driver_register(&armv7_pmu_driver);
2020 }
2021 device_initcall(register_armv7_pmu_driver);
2022 #endif	/* CONFIG_CPU_V7 */
2023