xref: /openbmc/linux/arch/arm/kernel/perf_event_v7.c (revision 762f99f4f3cb41a775b5157dd761217beba65873)
1b2441318SGreg Kroah-Hartman // SPDX-License-Identifier: GPL-2.0
243eab878SWill Deacon /*
343eab878SWill Deacon  * ARMv7 Cortex-A8 and Cortex-A9 Performance Events handling code.
443eab878SWill Deacon  *
543eab878SWill Deacon  * ARMv7 support: Jean Pihet <jpihet@mvista.com>
643eab878SWill Deacon  * 2010 (c) MontaVista Software, LLC.
743eab878SWill Deacon  *
843eab878SWill Deacon  * Copied from ARMv6 code, with the low level code inspired
943eab878SWill Deacon  *  by the ARMv7 Oprofile code.
1043eab878SWill Deacon  *
1143eab878SWill Deacon  * Cortex-A8 has up to 4 configurable performance counters and
1243eab878SWill Deacon  *  a single cycle counter.
1343eab878SWill Deacon  * Cortex-A9 has up to 31 configurable performance counters and
1443eab878SWill Deacon  *  a single cycle counter.
1543eab878SWill Deacon  *
1643eab878SWill Deacon  * All counters can be enabled/disabled and IRQ masked separately. The cycle
1743eab878SWill Deacon  *  counter and all 4 performance counters together can be reset separately.
1843eab878SWill Deacon  */
1943eab878SWill Deacon 
2043eab878SWill Deacon #ifdef CONFIG_CPU_V7
21a505addcSWill Deacon 
22b7aafe99SStephen Boyd #include <asm/cp15.h>
2329ba0f37SMark Rutland #include <asm/cputype.h>
2429ba0f37SMark Rutland #include <asm/irq_regs.h>
25b7aafe99SStephen Boyd #include <asm/vfp.h>
26b7aafe99SStephen Boyd #include "../vfp/vfpinstr.h"
27b7aafe99SStephen Boyd 
2829ba0f37SMark Rutland #include <linux/of.h>
29fa8ad788SMark Rutland #include <linux/perf/arm_pmu.h>
3029ba0f37SMark Rutland #include <linux/platform_device.h>
3129ba0f37SMark Rutland 
326d4eaf99SWill Deacon /*
336d4eaf99SWill Deacon  * Common ARMv7 event types
346d4eaf99SWill Deacon  *
356d4eaf99SWill Deacon  * Note: An implementation may not be able to count all of these events
366d4eaf99SWill Deacon  * but the encodings are considered to be `reserved' in the case that
376d4eaf99SWill Deacon  * they are not available.
386d4eaf99SWill Deacon  */
39f4ab36cbSDrew Richardson #define ARMV7_PERFCTR_PMNC_SW_INCR			0x00
40f4ab36cbSDrew Richardson #define ARMV7_PERFCTR_L1_ICACHE_REFILL			0x01
41f4ab36cbSDrew Richardson #define ARMV7_PERFCTR_ITLB_REFILL			0x02
42f4ab36cbSDrew Richardson #define ARMV7_PERFCTR_L1_DCACHE_REFILL			0x03
43f4ab36cbSDrew Richardson #define ARMV7_PERFCTR_L1_DCACHE_ACCESS			0x04
44f4ab36cbSDrew Richardson #define ARMV7_PERFCTR_DTLB_REFILL			0x05
45f4ab36cbSDrew Richardson #define ARMV7_PERFCTR_MEM_READ				0x06
46f4ab36cbSDrew Richardson #define ARMV7_PERFCTR_MEM_WRITE				0x07
47f4ab36cbSDrew Richardson #define ARMV7_PERFCTR_INSTR_EXECUTED			0x08
48f4ab36cbSDrew Richardson #define ARMV7_PERFCTR_EXC_TAKEN				0x09
49f4ab36cbSDrew Richardson #define ARMV7_PERFCTR_EXC_EXECUTED			0x0A
50f4ab36cbSDrew Richardson #define ARMV7_PERFCTR_CID_WRITE				0x0B
514d301512SWill Deacon 
524d301512SWill Deacon /*
534d301512SWill Deacon  * ARMV7_PERFCTR_PC_WRITE is equivalent to HW_BRANCH_INSTRUCTIONS.
5443eab878SWill Deacon  * It counts:
554d301512SWill Deacon  *  - all (taken) branch instructions,
5643eab878SWill Deacon  *  - instructions that explicitly write the PC,
5743eab878SWill Deacon  *  - exception generating instructions.
5843eab878SWill Deacon  */
59f4ab36cbSDrew Richardson #define ARMV7_PERFCTR_PC_WRITE				0x0C
60f4ab36cbSDrew Richardson #define ARMV7_PERFCTR_PC_IMM_BRANCH			0x0D
61f4ab36cbSDrew Richardson #define ARMV7_PERFCTR_PC_PROC_RETURN			0x0E
62f4ab36cbSDrew Richardson #define ARMV7_PERFCTR_MEM_UNALIGNED_ACCESS		0x0F
63f4ab36cbSDrew Richardson #define ARMV7_PERFCTR_PC_BRANCH_MIS_PRED		0x10
64f4ab36cbSDrew Richardson #define ARMV7_PERFCTR_CLOCK_CYCLES			0x11
65f4ab36cbSDrew Richardson #define ARMV7_PERFCTR_PC_BRANCH_PRED			0x12
664d301512SWill Deacon 
674d301512SWill Deacon /* These events are defined by the PMUv2 supplement (ARM DDI 0457A). */
68f4ab36cbSDrew Richardson #define ARMV7_PERFCTR_MEM_ACCESS			0x13
69f4ab36cbSDrew Richardson #define ARMV7_PERFCTR_L1_ICACHE_ACCESS			0x14
70f4ab36cbSDrew Richardson #define ARMV7_PERFCTR_L1_DCACHE_WB			0x15
71f4ab36cbSDrew Richardson #define ARMV7_PERFCTR_L2_CACHE_ACCESS			0x16
72f4ab36cbSDrew Richardson #define ARMV7_PERFCTR_L2_CACHE_REFILL			0x17
73f4ab36cbSDrew Richardson #define ARMV7_PERFCTR_L2_CACHE_WB			0x18
74f4ab36cbSDrew Richardson #define ARMV7_PERFCTR_BUS_ACCESS			0x19
75f4ab36cbSDrew Richardson #define ARMV7_PERFCTR_MEM_ERROR				0x1A
76f4ab36cbSDrew Richardson #define ARMV7_PERFCTR_INSTR_SPEC			0x1B
77f4ab36cbSDrew Richardson #define ARMV7_PERFCTR_TTBR_WRITE			0x1C
78f4ab36cbSDrew Richardson #define ARMV7_PERFCTR_BUS_CYCLES			0x1D
7943eab878SWill Deacon 
80f4ab36cbSDrew Richardson #define ARMV7_PERFCTR_CPU_CYCLES			0xFF
8143eab878SWill Deacon 
8243eab878SWill Deacon /* ARMv7 Cortex-A8 specific event types */
83f4ab36cbSDrew Richardson #define ARMV7_A8_PERFCTR_L2_CACHE_ACCESS		0x43
84f4ab36cbSDrew Richardson #define ARMV7_A8_PERFCTR_L2_CACHE_REFILL		0x44
85f4ab36cbSDrew Richardson #define ARMV7_A8_PERFCTR_L1_ICACHE_ACCESS		0x50
86f4ab36cbSDrew Richardson #define ARMV7_A8_PERFCTR_STALL_ISIDE			0x56
8743eab878SWill Deacon 
8843eab878SWill Deacon /* ARMv7 Cortex-A9 specific event types */
89f4ab36cbSDrew Richardson #define ARMV7_A9_PERFCTR_INSTR_CORE_RENAME		0x68
90f4ab36cbSDrew Richardson #define ARMV7_A9_PERFCTR_STALL_ICACHE			0x60
91f4ab36cbSDrew Richardson #define ARMV7_A9_PERFCTR_STALL_DISPATCH			0x66
9243eab878SWill Deacon 
930c205cbeSWill Deacon /* ARMv7 Cortex-A5 specific event types */
94f4ab36cbSDrew Richardson #define ARMV7_A5_PERFCTR_PREFETCH_LINEFILL		0xc2
95f4ab36cbSDrew Richardson #define ARMV7_A5_PERFCTR_PREFETCH_LINEFILL_DROP		0xc3
960c205cbeSWill Deacon 
9714abd038SWill Deacon /* ARMv7 Cortex-A15 specific event types */
98f4ab36cbSDrew Richardson #define ARMV7_A15_PERFCTR_L1_DCACHE_ACCESS_READ		0x40
99f4ab36cbSDrew Richardson #define ARMV7_A15_PERFCTR_L1_DCACHE_ACCESS_WRITE	0x41
100f4ab36cbSDrew Richardson #define ARMV7_A15_PERFCTR_L1_DCACHE_REFILL_READ		0x42
101f4ab36cbSDrew Richardson #define ARMV7_A15_PERFCTR_L1_DCACHE_REFILL_WRITE	0x43
10214abd038SWill Deacon 
103f4ab36cbSDrew Richardson #define ARMV7_A15_PERFCTR_DTLB_REFILL_L1_READ		0x4C
104f4ab36cbSDrew Richardson #define ARMV7_A15_PERFCTR_DTLB_REFILL_L1_WRITE		0x4D
10514abd038SWill Deacon 
106f4ab36cbSDrew Richardson #define ARMV7_A15_PERFCTR_L2_CACHE_ACCESS_READ		0x50
107f4ab36cbSDrew Richardson #define ARMV7_A15_PERFCTR_L2_CACHE_ACCESS_WRITE		0x51
108f4ab36cbSDrew Richardson #define ARMV7_A15_PERFCTR_L2_CACHE_REFILL_READ		0x52
109f4ab36cbSDrew Richardson #define ARMV7_A15_PERFCTR_L2_CACHE_REFILL_WRITE		0x53
11014abd038SWill Deacon 
111f4ab36cbSDrew Richardson #define ARMV7_A15_PERFCTR_PC_WRITE_SPEC			0x76
11214abd038SWill Deacon 
1138e781f65SAlbin Tonnerre /* ARMv7 Cortex-A12 specific event types */
114f4ab36cbSDrew Richardson #define ARMV7_A12_PERFCTR_L1_DCACHE_ACCESS_READ		0x40
115f4ab36cbSDrew Richardson #define ARMV7_A12_PERFCTR_L1_DCACHE_ACCESS_WRITE	0x41
1168e781f65SAlbin Tonnerre 
117f4ab36cbSDrew Richardson #define ARMV7_A12_PERFCTR_L2_CACHE_ACCESS_READ		0x50
118f4ab36cbSDrew Richardson #define ARMV7_A12_PERFCTR_L2_CACHE_ACCESS_WRITE		0x51
1198e781f65SAlbin Tonnerre 
120f4ab36cbSDrew Richardson #define ARMV7_A12_PERFCTR_PC_WRITE_SPEC			0x76
1218e781f65SAlbin Tonnerre 
122f4ab36cbSDrew Richardson #define ARMV7_A12_PERFCTR_PF_TLB_REFILL			0xe7
1238e781f65SAlbin Tonnerre 
124b7aafe99SStephen Boyd /* ARMv7 Krait specific event types */
125f4ab36cbSDrew Richardson #define KRAIT_PMRESR0_GROUP0				0xcc
126f4ab36cbSDrew Richardson #define KRAIT_PMRESR1_GROUP0				0xd0
127f4ab36cbSDrew Richardson #define KRAIT_PMRESR2_GROUP0				0xd4
128f4ab36cbSDrew Richardson #define KRAIT_VPMRESR0_GROUP0				0xd8
129b7aafe99SStephen Boyd 
130f4ab36cbSDrew Richardson #define KRAIT_PERFCTR_L1_ICACHE_ACCESS			0x10011
131f4ab36cbSDrew Richardson #define KRAIT_PERFCTR_L1_ICACHE_MISS			0x10010
132b7aafe99SStephen Boyd 
133f4ab36cbSDrew Richardson #define KRAIT_PERFCTR_L1_ITLB_ACCESS			0x12222
134f4ab36cbSDrew Richardson #define KRAIT_PERFCTR_L1_DTLB_ACCESS			0x12210
135b7aafe99SStephen Boyd 
136341e42c4SStephen Boyd /* ARMv7 Scorpion specific event types */
137f4ab36cbSDrew Richardson #define SCORPION_LPM0_GROUP0				0x4c
138f4ab36cbSDrew Richardson #define SCORPION_LPM1_GROUP0				0x50
139f4ab36cbSDrew Richardson #define SCORPION_LPM2_GROUP0				0x54
140f4ab36cbSDrew Richardson #define SCORPION_L2LPM_GROUP0				0x58
141f4ab36cbSDrew Richardson #define SCORPION_VLPM_GROUP0				0x5c
142341e42c4SStephen Boyd 
143f4ab36cbSDrew Richardson #define SCORPION_ICACHE_ACCESS				0x10053
144f4ab36cbSDrew Richardson #define SCORPION_ICACHE_MISS				0x10052
145341e42c4SStephen Boyd 
146f4ab36cbSDrew Richardson #define SCORPION_DTLB_ACCESS				0x12013
147f4ab36cbSDrew Richardson #define SCORPION_DTLB_MISS				0x12012
148341e42c4SStephen Boyd 
149f4ab36cbSDrew Richardson #define SCORPION_ITLB_MISS				0x12021
150341e42c4SStephen Boyd 
15143eab878SWill Deacon /*
15243eab878SWill Deacon  * Cortex-A8 HW events mapping
15343eab878SWill Deacon  *
15443eab878SWill Deacon  * The hardware events that we support. We do support cache operations but
15543eab878SWill Deacon  * we have harvard caches and no way to combine instruction and data
15643eab878SWill Deacon  * accesses/misses in hardware.
15743eab878SWill Deacon  */
15843eab878SWill Deacon static const unsigned armv7_a8_perf_map[PERF_COUNT_HW_MAX] = {
1596b7658ecSMark Rutland 	PERF_MAP_ALL_UNSUPPORTED,
16043eab878SWill Deacon 	[PERF_COUNT_HW_CPU_CYCLES]		= ARMV7_PERFCTR_CPU_CYCLES,
16143eab878SWill Deacon 	[PERF_COUNT_HW_INSTRUCTIONS]		= ARMV7_PERFCTR_INSTR_EXECUTED,
1624d301512SWill Deacon 	[PERF_COUNT_HW_CACHE_REFERENCES]	= ARMV7_PERFCTR_L1_DCACHE_ACCESS,
1634d301512SWill Deacon 	[PERF_COUNT_HW_CACHE_MISSES]		= ARMV7_PERFCTR_L1_DCACHE_REFILL,
16443eab878SWill Deacon 	[PERF_COUNT_HW_BRANCH_INSTRUCTIONS]	= ARMV7_PERFCTR_PC_WRITE,
16543eab878SWill Deacon 	[PERF_COUNT_HW_BRANCH_MISSES]		= ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
1660445e7a5SWill Deacon 	[PERF_COUNT_HW_STALLED_CYCLES_FRONTEND]	= ARMV7_A8_PERFCTR_STALL_ISIDE,
16743eab878SWill Deacon };
16843eab878SWill Deacon 
16943eab878SWill Deacon static const unsigned armv7_a8_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
17043eab878SWill Deacon 					  [PERF_COUNT_HW_CACHE_OP_MAX]
17143eab878SWill Deacon 					  [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
1726b7658ecSMark Rutland 	PERF_CACHE_MAP_ALL_UNSUPPORTED,
1736b7658ecSMark Rutland 
17443eab878SWill Deacon 	/*
1756b7658ecSMark Rutland 	 * The performance counters don't differentiate between read and write
1766b7658ecSMark Rutland 	 * accesses/misses so this isn't strictly correct, but it's the best we
1776b7658ecSMark Rutland 	 * can do. Writes and reads get combined.
17843eab878SWill Deacon 	 */
1796b7658ecSMark Rutland 	[C(L1D)][C(OP_READ)][C(RESULT_ACCESS)]	= ARMV7_PERFCTR_L1_DCACHE_ACCESS,
1806b7658ecSMark Rutland 	[C(L1D)][C(OP_READ)][C(RESULT_MISS)]	= ARMV7_PERFCTR_L1_DCACHE_REFILL,
1816b7658ecSMark Rutland 	[C(L1D)][C(OP_WRITE)][C(RESULT_ACCESS)]	= ARMV7_PERFCTR_L1_DCACHE_ACCESS,
1826b7658ecSMark Rutland 	[C(L1D)][C(OP_WRITE)][C(RESULT_MISS)]	= ARMV7_PERFCTR_L1_DCACHE_REFILL,
1836b7658ecSMark Rutland 
1846b7658ecSMark Rutland 	[C(L1I)][C(OP_READ)][C(RESULT_ACCESS)]	= ARMV7_A8_PERFCTR_L1_ICACHE_ACCESS,
1856b7658ecSMark Rutland 	[C(L1I)][C(OP_READ)][C(RESULT_MISS)]	= ARMV7_PERFCTR_L1_ICACHE_REFILL,
1866b7658ecSMark Rutland 
1876b7658ecSMark Rutland 	[C(LL)][C(OP_READ)][C(RESULT_ACCESS)]	= ARMV7_A8_PERFCTR_L2_CACHE_ACCESS,
1886b7658ecSMark Rutland 	[C(LL)][C(OP_READ)][C(RESULT_MISS)]	= ARMV7_A8_PERFCTR_L2_CACHE_REFILL,
1896b7658ecSMark Rutland 	[C(LL)][C(OP_WRITE)][C(RESULT_ACCESS)]	= ARMV7_A8_PERFCTR_L2_CACHE_ACCESS,
1906b7658ecSMark Rutland 	[C(LL)][C(OP_WRITE)][C(RESULT_MISS)]	= ARMV7_A8_PERFCTR_L2_CACHE_REFILL,
1916b7658ecSMark Rutland 
1926b7658ecSMark Rutland 	[C(DTLB)][C(OP_READ)][C(RESULT_MISS)]	= ARMV7_PERFCTR_DTLB_REFILL,
1936b7658ecSMark Rutland 	[C(DTLB)][C(OP_WRITE)][C(RESULT_MISS)]	= ARMV7_PERFCTR_DTLB_REFILL,
1946b7658ecSMark Rutland 
1956b7658ecSMark Rutland 	[C(ITLB)][C(OP_READ)][C(RESULT_MISS)]	= ARMV7_PERFCTR_ITLB_REFILL,
1966b7658ecSMark Rutland 	[C(ITLB)][C(OP_WRITE)][C(RESULT_MISS)]	= ARMV7_PERFCTR_ITLB_REFILL,
1976b7658ecSMark Rutland 
1986b7658ecSMark Rutland 	[C(BPU)][C(OP_READ)][C(RESULT_ACCESS)]	= ARMV7_PERFCTR_PC_BRANCH_PRED,
1996b7658ecSMark Rutland 	[C(BPU)][C(OP_READ)][C(RESULT_MISS)]	= ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
2006b7658ecSMark Rutland 	[C(BPU)][C(OP_WRITE)][C(RESULT_ACCESS)]	= ARMV7_PERFCTR_PC_BRANCH_PRED,
2016b7658ecSMark Rutland 	[C(BPU)][C(OP_WRITE)][C(RESULT_MISS)]	= ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
20243eab878SWill Deacon };
20343eab878SWill Deacon 
20443eab878SWill Deacon /*
20543eab878SWill Deacon  * Cortex-A9 HW events mapping
20643eab878SWill Deacon  */
20743eab878SWill Deacon static const unsigned armv7_a9_perf_map[PERF_COUNT_HW_MAX] = {
2086b7658ecSMark Rutland 	PERF_MAP_ALL_UNSUPPORTED,
20943eab878SWill Deacon 	[PERF_COUNT_HW_CPU_CYCLES]		= ARMV7_PERFCTR_CPU_CYCLES,
2104d301512SWill Deacon 	[PERF_COUNT_HW_INSTRUCTIONS]		= ARMV7_A9_PERFCTR_INSTR_CORE_RENAME,
2114d301512SWill Deacon 	[PERF_COUNT_HW_CACHE_REFERENCES]	= ARMV7_PERFCTR_L1_DCACHE_ACCESS,
2124d301512SWill Deacon 	[PERF_COUNT_HW_CACHE_MISSES]		= ARMV7_PERFCTR_L1_DCACHE_REFILL,
21343eab878SWill Deacon 	[PERF_COUNT_HW_BRANCH_INSTRUCTIONS]	= ARMV7_PERFCTR_PC_WRITE,
21443eab878SWill Deacon 	[PERF_COUNT_HW_BRANCH_MISSES]		= ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
2150445e7a5SWill Deacon 	[PERF_COUNT_HW_STALLED_CYCLES_FRONTEND]	= ARMV7_A9_PERFCTR_STALL_ICACHE,
2160445e7a5SWill Deacon 	[PERF_COUNT_HW_STALLED_CYCLES_BACKEND]	= ARMV7_A9_PERFCTR_STALL_DISPATCH,
21743eab878SWill Deacon };
21843eab878SWill Deacon 
21943eab878SWill Deacon static const unsigned armv7_a9_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
22043eab878SWill Deacon 					  [PERF_COUNT_HW_CACHE_OP_MAX]
22143eab878SWill Deacon 					  [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
2226b7658ecSMark Rutland 	PERF_CACHE_MAP_ALL_UNSUPPORTED,
2236b7658ecSMark Rutland 
22443eab878SWill Deacon 	/*
2256b7658ecSMark Rutland 	 * The performance counters don't differentiate between read and write
2266b7658ecSMark Rutland 	 * accesses/misses so this isn't strictly correct, but it's the best we
2276b7658ecSMark Rutland 	 * can do. Writes and reads get combined.
22843eab878SWill Deacon 	 */
2296b7658ecSMark Rutland 	[C(L1D)][C(OP_READ)][C(RESULT_ACCESS)]	= ARMV7_PERFCTR_L1_DCACHE_ACCESS,
2306b7658ecSMark Rutland 	[C(L1D)][C(OP_READ)][C(RESULT_MISS)]	= ARMV7_PERFCTR_L1_DCACHE_REFILL,
2316b7658ecSMark Rutland 	[C(L1D)][C(OP_WRITE)][C(RESULT_ACCESS)]	= ARMV7_PERFCTR_L1_DCACHE_ACCESS,
2326b7658ecSMark Rutland 	[C(L1D)][C(OP_WRITE)][C(RESULT_MISS)]	= ARMV7_PERFCTR_L1_DCACHE_REFILL,
2336b7658ecSMark Rutland 
2346b7658ecSMark Rutland 	[C(L1I)][C(OP_READ)][C(RESULT_MISS)]	= ARMV7_PERFCTR_L1_ICACHE_REFILL,
2356b7658ecSMark Rutland 
2366b7658ecSMark Rutland 	[C(DTLB)][C(OP_READ)][C(RESULT_MISS)]	= ARMV7_PERFCTR_DTLB_REFILL,
2376b7658ecSMark Rutland 	[C(DTLB)][C(OP_WRITE)][C(RESULT_MISS)]	= ARMV7_PERFCTR_DTLB_REFILL,
2386b7658ecSMark Rutland 
2396b7658ecSMark Rutland 	[C(ITLB)][C(OP_READ)][C(RESULT_MISS)]	= ARMV7_PERFCTR_ITLB_REFILL,
2406b7658ecSMark Rutland 	[C(ITLB)][C(OP_WRITE)][C(RESULT_MISS)]	= ARMV7_PERFCTR_ITLB_REFILL,
2416b7658ecSMark Rutland 
2426b7658ecSMark Rutland 	[C(BPU)][C(OP_READ)][C(RESULT_ACCESS)]	= ARMV7_PERFCTR_PC_BRANCH_PRED,
2436b7658ecSMark Rutland 	[C(BPU)][C(OP_READ)][C(RESULT_MISS)]	= ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
2446b7658ecSMark Rutland 	[C(BPU)][C(OP_WRITE)][C(RESULT_ACCESS)]	= ARMV7_PERFCTR_PC_BRANCH_PRED,
2456b7658ecSMark Rutland 	[C(BPU)][C(OP_WRITE)][C(RESULT_MISS)]	= ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
24643eab878SWill Deacon };
24743eab878SWill Deacon 
24843eab878SWill Deacon /*
2490c205cbeSWill Deacon  * Cortex-A5 HW events mapping
2500c205cbeSWill Deacon  */
2510c205cbeSWill Deacon static const unsigned armv7_a5_perf_map[PERF_COUNT_HW_MAX] = {
2526b7658ecSMark Rutland 	PERF_MAP_ALL_UNSUPPORTED,
2530c205cbeSWill Deacon 	[PERF_COUNT_HW_CPU_CYCLES]		= ARMV7_PERFCTR_CPU_CYCLES,
2540c205cbeSWill Deacon 	[PERF_COUNT_HW_INSTRUCTIONS]		= ARMV7_PERFCTR_INSTR_EXECUTED,
2554d301512SWill Deacon 	[PERF_COUNT_HW_CACHE_REFERENCES]	= ARMV7_PERFCTR_L1_DCACHE_ACCESS,
2564d301512SWill Deacon 	[PERF_COUNT_HW_CACHE_MISSES]		= ARMV7_PERFCTR_L1_DCACHE_REFILL,
2570c205cbeSWill Deacon 	[PERF_COUNT_HW_BRANCH_INSTRUCTIONS]	= ARMV7_PERFCTR_PC_WRITE,
2580c205cbeSWill Deacon 	[PERF_COUNT_HW_BRANCH_MISSES]		= ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
2590c205cbeSWill Deacon };
2600c205cbeSWill Deacon 
2610c205cbeSWill Deacon static const unsigned armv7_a5_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
2620c205cbeSWill Deacon 					[PERF_COUNT_HW_CACHE_OP_MAX]
2630c205cbeSWill Deacon 					[PERF_COUNT_HW_CACHE_RESULT_MAX] = {
2646b7658ecSMark Rutland 	PERF_CACHE_MAP_ALL_UNSUPPORTED,
2656b7658ecSMark Rutland 
2666b7658ecSMark Rutland 	[C(L1D)][C(OP_READ)][C(RESULT_ACCESS)]	= ARMV7_PERFCTR_L1_DCACHE_ACCESS,
2676b7658ecSMark Rutland 	[C(L1D)][C(OP_READ)][C(RESULT_MISS)]	= ARMV7_PERFCTR_L1_DCACHE_REFILL,
2686b7658ecSMark Rutland 	[C(L1D)][C(OP_WRITE)][C(RESULT_ACCESS)]	= ARMV7_PERFCTR_L1_DCACHE_ACCESS,
2696b7658ecSMark Rutland 	[C(L1D)][C(OP_WRITE)][C(RESULT_MISS)]	= ARMV7_PERFCTR_L1_DCACHE_REFILL,
2706b7658ecSMark Rutland 	[C(L1D)][C(OP_PREFETCH)][C(RESULT_ACCESS)]	= ARMV7_A5_PERFCTR_PREFETCH_LINEFILL,
2716b7658ecSMark Rutland 	[C(L1D)][C(OP_PREFETCH)][C(RESULT_MISS)]	= ARMV7_A5_PERFCTR_PREFETCH_LINEFILL_DROP,
2726b7658ecSMark Rutland 
2736b7658ecSMark Rutland 	[C(L1I)][C(OP_READ)][C(RESULT_ACCESS)]	= ARMV7_PERFCTR_L1_ICACHE_ACCESS,
2746b7658ecSMark Rutland 	[C(L1I)][C(OP_READ)][C(RESULT_MISS)]	= ARMV7_PERFCTR_L1_ICACHE_REFILL,
2750c205cbeSWill Deacon 	/*
2766b7658ecSMark Rutland 	 * The prefetch counters don't differentiate between the I side and the
2776b7658ecSMark Rutland 	 * D side.
2780c205cbeSWill Deacon 	 */
2796b7658ecSMark Rutland 	[C(L1I)][C(OP_PREFETCH)][C(RESULT_ACCESS)]	= ARMV7_A5_PERFCTR_PREFETCH_LINEFILL,
2806b7658ecSMark Rutland 	[C(L1I)][C(OP_PREFETCH)][C(RESULT_MISS)]	= ARMV7_A5_PERFCTR_PREFETCH_LINEFILL_DROP,
2816b7658ecSMark Rutland 
2826b7658ecSMark Rutland 	[C(DTLB)][C(OP_READ)][C(RESULT_MISS)]	= ARMV7_PERFCTR_DTLB_REFILL,
2836b7658ecSMark Rutland 	[C(DTLB)][C(OP_WRITE)][C(RESULT_MISS)]	= ARMV7_PERFCTR_DTLB_REFILL,
2846b7658ecSMark Rutland 
2856b7658ecSMark Rutland 	[C(ITLB)][C(OP_READ)][C(RESULT_MISS)]	= ARMV7_PERFCTR_ITLB_REFILL,
2866b7658ecSMark Rutland 	[C(ITLB)][C(OP_WRITE)][C(RESULT_MISS)]	= ARMV7_PERFCTR_ITLB_REFILL,
2876b7658ecSMark Rutland 
2886b7658ecSMark Rutland 	[C(BPU)][C(OP_READ)][C(RESULT_ACCESS)]	= ARMV7_PERFCTR_PC_BRANCH_PRED,
2896b7658ecSMark Rutland 	[C(BPU)][C(OP_READ)][C(RESULT_MISS)]	= ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
2906b7658ecSMark Rutland 	[C(BPU)][C(OP_WRITE)][C(RESULT_ACCESS)]	= ARMV7_PERFCTR_PC_BRANCH_PRED,
2916b7658ecSMark Rutland 	[C(BPU)][C(OP_WRITE)][C(RESULT_MISS)]	= ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
2920c205cbeSWill Deacon };
2930c205cbeSWill Deacon 
2940c205cbeSWill Deacon /*
29514abd038SWill Deacon  * Cortex-A15 HW events mapping
29614abd038SWill Deacon  */
29714abd038SWill Deacon static const unsigned armv7_a15_perf_map[PERF_COUNT_HW_MAX] = {
2986b7658ecSMark Rutland 	PERF_MAP_ALL_UNSUPPORTED,
29914abd038SWill Deacon 	[PERF_COUNT_HW_CPU_CYCLES]		= ARMV7_PERFCTR_CPU_CYCLES,
30014abd038SWill Deacon 	[PERF_COUNT_HW_INSTRUCTIONS]		= ARMV7_PERFCTR_INSTR_EXECUTED,
3014d301512SWill Deacon 	[PERF_COUNT_HW_CACHE_REFERENCES]	= ARMV7_PERFCTR_L1_DCACHE_ACCESS,
3024d301512SWill Deacon 	[PERF_COUNT_HW_CACHE_MISSES]		= ARMV7_PERFCTR_L1_DCACHE_REFILL,
3034d301512SWill Deacon 	[PERF_COUNT_HW_BRANCH_INSTRUCTIONS]	= ARMV7_A15_PERFCTR_PC_WRITE_SPEC,
30414abd038SWill Deacon 	[PERF_COUNT_HW_BRANCH_MISSES]		= ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
30514abd038SWill Deacon 	[PERF_COUNT_HW_BUS_CYCLES]		= ARMV7_PERFCTR_BUS_CYCLES,
30614abd038SWill Deacon };
30714abd038SWill Deacon 
30814abd038SWill Deacon static const unsigned armv7_a15_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
30914abd038SWill Deacon 					[PERF_COUNT_HW_CACHE_OP_MAX]
31014abd038SWill Deacon 					[PERF_COUNT_HW_CACHE_RESULT_MAX] = {
3116b7658ecSMark Rutland 	PERF_CACHE_MAP_ALL_UNSUPPORTED,
3126b7658ecSMark Rutland 
3136b7658ecSMark Rutland 	[C(L1D)][C(OP_READ)][C(RESULT_ACCESS)]	= ARMV7_A15_PERFCTR_L1_DCACHE_ACCESS_READ,
3146b7658ecSMark Rutland 	[C(L1D)][C(OP_READ)][C(RESULT_MISS)]	= ARMV7_A15_PERFCTR_L1_DCACHE_REFILL_READ,
3156b7658ecSMark Rutland 	[C(L1D)][C(OP_WRITE)][C(RESULT_ACCESS)]	= ARMV7_A15_PERFCTR_L1_DCACHE_ACCESS_WRITE,
3166b7658ecSMark Rutland 	[C(L1D)][C(OP_WRITE)][C(RESULT_MISS)]	= ARMV7_A15_PERFCTR_L1_DCACHE_REFILL_WRITE,
3176b7658ecSMark Rutland 
31814abd038SWill Deacon 	/*
3196b7658ecSMark Rutland 	 * Not all performance counters differentiate between read and write
3206b7658ecSMark Rutland 	 * accesses/misses so we're not always strictly correct, but it's the
3216b7658ecSMark Rutland 	 * best we can do. Writes and reads get combined in these cases.
32214abd038SWill Deacon 	 */
3236b7658ecSMark Rutland 	[C(L1I)][C(OP_READ)][C(RESULT_ACCESS)]	= ARMV7_PERFCTR_L1_ICACHE_ACCESS,
3246b7658ecSMark Rutland 	[C(L1I)][C(OP_READ)][C(RESULT_MISS)]	= ARMV7_PERFCTR_L1_ICACHE_REFILL,
3256b7658ecSMark Rutland 
3266b7658ecSMark Rutland 	[C(LL)][C(OP_READ)][C(RESULT_ACCESS)]	= ARMV7_A15_PERFCTR_L2_CACHE_ACCESS_READ,
3276b7658ecSMark Rutland 	[C(LL)][C(OP_READ)][C(RESULT_MISS)]	= ARMV7_A15_PERFCTR_L2_CACHE_REFILL_READ,
3286b7658ecSMark Rutland 	[C(LL)][C(OP_WRITE)][C(RESULT_ACCESS)]	= ARMV7_A15_PERFCTR_L2_CACHE_ACCESS_WRITE,
3296b7658ecSMark Rutland 	[C(LL)][C(OP_WRITE)][C(RESULT_MISS)]	= ARMV7_A15_PERFCTR_L2_CACHE_REFILL_WRITE,
3306b7658ecSMark Rutland 
3316b7658ecSMark Rutland 	[C(DTLB)][C(OP_READ)][C(RESULT_MISS)]	= ARMV7_A15_PERFCTR_DTLB_REFILL_L1_READ,
3326b7658ecSMark Rutland 	[C(DTLB)][C(OP_WRITE)][C(RESULT_MISS)]	= ARMV7_A15_PERFCTR_DTLB_REFILL_L1_WRITE,
3336b7658ecSMark Rutland 
3346b7658ecSMark Rutland 	[C(ITLB)][C(OP_READ)][C(RESULT_MISS)]	= ARMV7_PERFCTR_ITLB_REFILL,
3356b7658ecSMark Rutland 	[C(ITLB)][C(OP_WRITE)][C(RESULT_MISS)]	= ARMV7_PERFCTR_ITLB_REFILL,
3366b7658ecSMark Rutland 
3376b7658ecSMark Rutland 	[C(BPU)][C(OP_READ)][C(RESULT_ACCESS)]	= ARMV7_PERFCTR_PC_BRANCH_PRED,
3386b7658ecSMark Rutland 	[C(BPU)][C(OP_READ)][C(RESULT_MISS)]	= ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
3396b7658ecSMark Rutland 	[C(BPU)][C(OP_WRITE)][C(RESULT_ACCESS)]	= ARMV7_PERFCTR_PC_BRANCH_PRED,
3406b7658ecSMark Rutland 	[C(BPU)][C(OP_WRITE)][C(RESULT_MISS)]	= ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
34114abd038SWill Deacon };
34214abd038SWill Deacon 
34314abd038SWill Deacon /*
344d33c88c6SWill Deacon  * Cortex-A7 HW events mapping
345d33c88c6SWill Deacon  */
346d33c88c6SWill Deacon static const unsigned armv7_a7_perf_map[PERF_COUNT_HW_MAX] = {
3476b7658ecSMark Rutland 	PERF_MAP_ALL_UNSUPPORTED,
348d33c88c6SWill Deacon 	[PERF_COUNT_HW_CPU_CYCLES]		= ARMV7_PERFCTR_CPU_CYCLES,
349d33c88c6SWill Deacon 	[PERF_COUNT_HW_INSTRUCTIONS]		= ARMV7_PERFCTR_INSTR_EXECUTED,
350d33c88c6SWill Deacon 	[PERF_COUNT_HW_CACHE_REFERENCES]	= ARMV7_PERFCTR_L1_DCACHE_ACCESS,
351d33c88c6SWill Deacon 	[PERF_COUNT_HW_CACHE_MISSES]		= ARMV7_PERFCTR_L1_DCACHE_REFILL,
352d33c88c6SWill Deacon 	[PERF_COUNT_HW_BRANCH_INSTRUCTIONS]	= ARMV7_PERFCTR_PC_WRITE,
353d33c88c6SWill Deacon 	[PERF_COUNT_HW_BRANCH_MISSES]		= ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
354d33c88c6SWill Deacon 	[PERF_COUNT_HW_BUS_CYCLES]		= ARMV7_PERFCTR_BUS_CYCLES,
355d33c88c6SWill Deacon };
356d33c88c6SWill Deacon 
357d33c88c6SWill Deacon static const unsigned armv7_a7_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
358d33c88c6SWill Deacon 					[PERF_COUNT_HW_CACHE_OP_MAX]
359d33c88c6SWill Deacon 					[PERF_COUNT_HW_CACHE_RESULT_MAX] = {
3606b7658ecSMark Rutland 	PERF_CACHE_MAP_ALL_UNSUPPORTED,
3616b7658ecSMark Rutland 
362d33c88c6SWill Deacon 	/*
3636b7658ecSMark Rutland 	 * The performance counters don't differentiate between read and write
3646b7658ecSMark Rutland 	 * accesses/misses so this isn't strictly correct, but it's the best we
3656b7658ecSMark Rutland 	 * can do. Writes and reads get combined.
366d33c88c6SWill Deacon 	 */
3676b7658ecSMark Rutland 	[C(L1D)][C(OP_READ)][C(RESULT_ACCESS)]	= ARMV7_PERFCTR_L1_DCACHE_ACCESS,
3686b7658ecSMark Rutland 	[C(L1D)][C(OP_READ)][C(RESULT_MISS)]	= ARMV7_PERFCTR_L1_DCACHE_REFILL,
3696b7658ecSMark Rutland 	[C(L1D)][C(OP_WRITE)][C(RESULT_ACCESS)]	= ARMV7_PERFCTR_L1_DCACHE_ACCESS,
3706b7658ecSMark Rutland 	[C(L1D)][C(OP_WRITE)][C(RESULT_MISS)]	= ARMV7_PERFCTR_L1_DCACHE_REFILL,
3716b7658ecSMark Rutland 
3726b7658ecSMark Rutland 	[C(L1I)][C(OP_READ)][C(RESULT_ACCESS)]	= ARMV7_PERFCTR_L1_ICACHE_ACCESS,
3736b7658ecSMark Rutland 	[C(L1I)][C(OP_READ)][C(RESULT_MISS)]	= ARMV7_PERFCTR_L1_ICACHE_REFILL,
3746b7658ecSMark Rutland 
3756b7658ecSMark Rutland 	[C(LL)][C(OP_READ)][C(RESULT_ACCESS)]	= ARMV7_PERFCTR_L2_CACHE_ACCESS,
3766b7658ecSMark Rutland 	[C(LL)][C(OP_READ)][C(RESULT_MISS)]	= ARMV7_PERFCTR_L2_CACHE_REFILL,
3776b7658ecSMark Rutland 	[C(LL)][C(OP_WRITE)][C(RESULT_ACCESS)]	= ARMV7_PERFCTR_L2_CACHE_ACCESS,
3786b7658ecSMark Rutland 	[C(LL)][C(OP_WRITE)][C(RESULT_MISS)]	= ARMV7_PERFCTR_L2_CACHE_REFILL,
3796b7658ecSMark Rutland 
3806b7658ecSMark Rutland 	[C(DTLB)][C(OP_READ)][C(RESULT_MISS)]	= ARMV7_PERFCTR_DTLB_REFILL,
3816b7658ecSMark Rutland 	[C(DTLB)][C(OP_WRITE)][C(RESULT_MISS)]	= ARMV7_PERFCTR_DTLB_REFILL,
3826b7658ecSMark Rutland 
3836b7658ecSMark Rutland 	[C(ITLB)][C(OP_READ)][C(RESULT_MISS)]	= ARMV7_PERFCTR_ITLB_REFILL,
3846b7658ecSMark Rutland 	[C(ITLB)][C(OP_WRITE)][C(RESULT_MISS)]	= ARMV7_PERFCTR_ITLB_REFILL,
3856b7658ecSMark Rutland 
3866b7658ecSMark Rutland 	[C(BPU)][C(OP_READ)][C(RESULT_ACCESS)]	= ARMV7_PERFCTR_PC_BRANCH_PRED,
3876b7658ecSMark Rutland 	[C(BPU)][C(OP_READ)][C(RESULT_MISS)]	= ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
3886b7658ecSMark Rutland 	[C(BPU)][C(OP_WRITE)][C(RESULT_ACCESS)]	= ARMV7_PERFCTR_PC_BRANCH_PRED,
3896b7658ecSMark Rutland 	[C(BPU)][C(OP_WRITE)][C(RESULT_MISS)]	= ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
390d33c88c6SWill Deacon };
391d33c88c6SWill Deacon 
392d33c88c6SWill Deacon /*
3938e781f65SAlbin Tonnerre  * Cortex-A12 HW events mapping
3948e781f65SAlbin Tonnerre  */
3958e781f65SAlbin Tonnerre static const unsigned armv7_a12_perf_map[PERF_COUNT_HW_MAX] = {
3966b7658ecSMark Rutland 	PERF_MAP_ALL_UNSUPPORTED,
3978e781f65SAlbin Tonnerre 	[PERF_COUNT_HW_CPU_CYCLES]		= ARMV7_PERFCTR_CPU_CYCLES,
3988e781f65SAlbin Tonnerre 	[PERF_COUNT_HW_INSTRUCTIONS]		= ARMV7_PERFCTR_INSTR_EXECUTED,
3998e781f65SAlbin Tonnerre 	[PERF_COUNT_HW_CACHE_REFERENCES]	= ARMV7_PERFCTR_L1_DCACHE_ACCESS,
4008e781f65SAlbin Tonnerre 	[PERF_COUNT_HW_CACHE_MISSES]		= ARMV7_PERFCTR_L1_DCACHE_REFILL,
4018e781f65SAlbin Tonnerre 	[PERF_COUNT_HW_BRANCH_INSTRUCTIONS]	= ARMV7_A12_PERFCTR_PC_WRITE_SPEC,
4028e781f65SAlbin Tonnerre 	[PERF_COUNT_HW_BRANCH_MISSES]		= ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
4038e781f65SAlbin Tonnerre 	[PERF_COUNT_HW_BUS_CYCLES]		= ARMV7_PERFCTR_BUS_CYCLES,
4048e781f65SAlbin Tonnerre };
4058e781f65SAlbin Tonnerre 
4068e781f65SAlbin Tonnerre static const unsigned armv7_a12_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
4078e781f65SAlbin Tonnerre 					[PERF_COUNT_HW_CACHE_OP_MAX]
4088e781f65SAlbin Tonnerre 					[PERF_COUNT_HW_CACHE_RESULT_MAX] = {
4096b7658ecSMark Rutland 	PERF_CACHE_MAP_ALL_UNSUPPORTED,
4106b7658ecSMark Rutland 
4116b7658ecSMark Rutland 	[C(L1D)][C(OP_READ)][C(RESULT_ACCESS)]	= ARMV7_A12_PERFCTR_L1_DCACHE_ACCESS_READ,
4126b7658ecSMark Rutland 	[C(L1D)][C(OP_READ)][C(RESULT_MISS)]	= ARMV7_PERFCTR_L1_DCACHE_REFILL,
4136b7658ecSMark Rutland 	[C(L1D)][C(OP_WRITE)][C(RESULT_ACCESS)]	= ARMV7_A12_PERFCTR_L1_DCACHE_ACCESS_WRITE,
4146b7658ecSMark Rutland 	[C(L1D)][C(OP_WRITE)][C(RESULT_MISS)]	= ARMV7_PERFCTR_L1_DCACHE_REFILL,
4156b7658ecSMark Rutland 
4168e781f65SAlbin Tonnerre 	/*
4176b7658ecSMark Rutland 	 * Not all performance counters differentiate between read and write
4186b7658ecSMark Rutland 	 * accesses/misses so we're not always strictly correct, but it's the
4196b7658ecSMark Rutland 	 * best we can do. Writes and reads get combined in these cases.
4208e781f65SAlbin Tonnerre 	 */
4216b7658ecSMark Rutland 	[C(L1I)][C(OP_READ)][C(RESULT_ACCESS)]	= ARMV7_PERFCTR_L1_ICACHE_ACCESS,
4226b7658ecSMark Rutland 	[C(L1I)][C(OP_READ)][C(RESULT_MISS)]	= ARMV7_PERFCTR_L1_ICACHE_REFILL,
4236b7658ecSMark Rutland 
4246b7658ecSMark Rutland 	[C(LL)][C(OP_READ)][C(RESULT_ACCESS)]	= ARMV7_A12_PERFCTR_L2_CACHE_ACCESS_READ,
4256b7658ecSMark Rutland 	[C(LL)][C(OP_READ)][C(RESULT_MISS)]	= ARMV7_PERFCTR_L2_CACHE_REFILL,
4266b7658ecSMark Rutland 	[C(LL)][C(OP_WRITE)][C(RESULT_ACCESS)]	= ARMV7_A12_PERFCTR_L2_CACHE_ACCESS_WRITE,
4276b7658ecSMark Rutland 	[C(LL)][C(OP_WRITE)][C(RESULT_MISS)]	= ARMV7_PERFCTR_L2_CACHE_REFILL,
4286b7658ecSMark Rutland 
4296b7658ecSMark Rutland 	[C(DTLB)][C(OP_READ)][C(RESULT_MISS)]	= ARMV7_PERFCTR_DTLB_REFILL,
4306b7658ecSMark Rutland 	[C(DTLB)][C(OP_WRITE)][C(RESULT_MISS)]	= ARMV7_PERFCTR_DTLB_REFILL,
4316b7658ecSMark Rutland 	[C(DTLB)][C(OP_PREFETCH)][C(RESULT_MISS)]	= ARMV7_A12_PERFCTR_PF_TLB_REFILL,
4326b7658ecSMark Rutland 
4336b7658ecSMark Rutland 	[C(ITLB)][C(OP_READ)][C(RESULT_MISS)]	= ARMV7_PERFCTR_ITLB_REFILL,
4346b7658ecSMark Rutland 	[C(ITLB)][C(OP_WRITE)][C(RESULT_MISS)]	= ARMV7_PERFCTR_ITLB_REFILL,
4356b7658ecSMark Rutland 
4366b7658ecSMark Rutland 	[C(BPU)][C(OP_READ)][C(RESULT_ACCESS)]	= ARMV7_PERFCTR_PC_BRANCH_PRED,
4376b7658ecSMark Rutland 	[C(BPU)][C(OP_READ)][C(RESULT_MISS)]	= ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
4386b7658ecSMark Rutland 	[C(BPU)][C(OP_WRITE)][C(RESULT_ACCESS)]	= ARMV7_PERFCTR_PC_BRANCH_PRED,
4396b7658ecSMark Rutland 	[C(BPU)][C(OP_WRITE)][C(RESULT_MISS)]	= ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
4408e781f65SAlbin Tonnerre };
4418e781f65SAlbin Tonnerre 
4428e781f65SAlbin Tonnerre /*
4432a3391cdSStephen Boyd  * Krait HW events mapping
4442a3391cdSStephen Boyd  */
4452a3391cdSStephen Boyd static const unsigned krait_perf_map[PERF_COUNT_HW_MAX] = {
4466b7658ecSMark Rutland 	PERF_MAP_ALL_UNSUPPORTED,
4472a3391cdSStephen Boyd 	[PERF_COUNT_HW_CPU_CYCLES]	    = ARMV7_PERFCTR_CPU_CYCLES,
4482a3391cdSStephen Boyd 	[PERF_COUNT_HW_INSTRUCTIONS]	    = ARMV7_PERFCTR_INSTR_EXECUTED,
4492a3391cdSStephen Boyd 	[PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = ARMV7_PERFCTR_PC_WRITE,
4502a3391cdSStephen Boyd 	[PERF_COUNT_HW_BRANCH_MISSES]	    = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
4512a3391cdSStephen Boyd 	[PERF_COUNT_HW_BUS_CYCLES]	    = ARMV7_PERFCTR_CLOCK_CYCLES,
4522a3391cdSStephen Boyd };
4532a3391cdSStephen Boyd 
4542a3391cdSStephen Boyd static const unsigned krait_perf_map_no_branch[PERF_COUNT_HW_MAX] = {
4556b7658ecSMark Rutland 	PERF_MAP_ALL_UNSUPPORTED,
4562a3391cdSStephen Boyd 	[PERF_COUNT_HW_CPU_CYCLES]	    = ARMV7_PERFCTR_CPU_CYCLES,
4572a3391cdSStephen Boyd 	[PERF_COUNT_HW_INSTRUCTIONS]	    = ARMV7_PERFCTR_INSTR_EXECUTED,
4582a3391cdSStephen Boyd 	[PERF_COUNT_HW_BRANCH_MISSES]	    = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
4592a3391cdSStephen Boyd 	[PERF_COUNT_HW_BUS_CYCLES]	    = ARMV7_PERFCTR_CLOCK_CYCLES,
4602a3391cdSStephen Boyd };
4612a3391cdSStephen Boyd 
4622a3391cdSStephen Boyd static const unsigned krait_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
4632a3391cdSStephen Boyd 					  [PERF_COUNT_HW_CACHE_OP_MAX]
4642a3391cdSStephen Boyd 					  [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
4656b7658ecSMark Rutland 	PERF_CACHE_MAP_ALL_UNSUPPORTED,
4666b7658ecSMark Rutland 
4672a3391cdSStephen Boyd 	/*
4686b7658ecSMark Rutland 	 * The performance counters don't differentiate between read and write
4696b7658ecSMark Rutland 	 * accesses/misses so this isn't strictly correct, but it's the best we
4706b7658ecSMark Rutland 	 * can do. Writes and reads get combined.
4712a3391cdSStephen Boyd 	 */
4726b7658ecSMark Rutland 	[C(L1D)][C(OP_READ)][C(RESULT_ACCESS)]	= ARMV7_PERFCTR_L1_DCACHE_ACCESS,
4736b7658ecSMark Rutland 	[C(L1D)][C(OP_READ)][C(RESULT_MISS)]	= ARMV7_PERFCTR_L1_DCACHE_REFILL,
4746b7658ecSMark Rutland 	[C(L1D)][C(OP_WRITE)][C(RESULT_ACCESS)]	= ARMV7_PERFCTR_L1_DCACHE_ACCESS,
4756b7658ecSMark Rutland 	[C(L1D)][C(OP_WRITE)][C(RESULT_MISS)]	= ARMV7_PERFCTR_L1_DCACHE_REFILL,
4766b7658ecSMark Rutland 
4776b7658ecSMark Rutland 	[C(L1I)][C(OP_READ)][C(RESULT_ACCESS)]	= KRAIT_PERFCTR_L1_ICACHE_ACCESS,
4786b7658ecSMark Rutland 	[C(L1I)][C(OP_READ)][C(RESULT_MISS)]	= KRAIT_PERFCTR_L1_ICACHE_MISS,
4796b7658ecSMark Rutland 
4806b7658ecSMark Rutland 	[C(DTLB)][C(OP_READ)][C(RESULT_ACCESS)]	= KRAIT_PERFCTR_L1_DTLB_ACCESS,
4816b7658ecSMark Rutland 	[C(DTLB)][C(OP_WRITE)][C(RESULT_ACCESS)]	= KRAIT_PERFCTR_L1_DTLB_ACCESS,
4826b7658ecSMark Rutland 
4836b7658ecSMark Rutland 	[C(ITLB)][C(OP_READ)][C(RESULT_ACCESS)]	= KRAIT_PERFCTR_L1_ITLB_ACCESS,
4846b7658ecSMark Rutland 	[C(ITLB)][C(OP_WRITE)][C(RESULT_ACCESS)]	= KRAIT_PERFCTR_L1_ITLB_ACCESS,
4856b7658ecSMark Rutland 
4866b7658ecSMark Rutland 	[C(BPU)][C(OP_READ)][C(RESULT_ACCESS)]	= ARMV7_PERFCTR_PC_BRANCH_PRED,
4876b7658ecSMark Rutland 	[C(BPU)][C(OP_READ)][C(RESULT_MISS)]	= ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
4886b7658ecSMark Rutland 	[C(BPU)][C(OP_WRITE)][C(RESULT_ACCESS)]	= ARMV7_PERFCTR_PC_BRANCH_PRED,
4896b7658ecSMark Rutland 	[C(BPU)][C(OP_WRITE)][C(RESULT_MISS)]	= ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
4902a3391cdSStephen Boyd };
4912a3391cdSStephen Boyd 
4922a3391cdSStephen Boyd /*
493341e42c4SStephen Boyd  * Scorpion HW events mapping
494341e42c4SStephen Boyd  */
495341e42c4SStephen Boyd static const unsigned scorpion_perf_map[PERF_COUNT_HW_MAX] = {
496341e42c4SStephen Boyd 	PERF_MAP_ALL_UNSUPPORTED,
497341e42c4SStephen Boyd 	[PERF_COUNT_HW_CPU_CYCLES]	    = ARMV7_PERFCTR_CPU_CYCLES,
498341e42c4SStephen Boyd 	[PERF_COUNT_HW_INSTRUCTIONS]	    = ARMV7_PERFCTR_INSTR_EXECUTED,
499341e42c4SStephen Boyd 	[PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = ARMV7_PERFCTR_PC_WRITE,
500341e42c4SStephen Boyd 	[PERF_COUNT_HW_BRANCH_MISSES]	    = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
501341e42c4SStephen Boyd 	[PERF_COUNT_HW_BUS_CYCLES]	    = ARMV7_PERFCTR_CLOCK_CYCLES,
502341e42c4SStephen Boyd };
503341e42c4SStephen Boyd 
504341e42c4SStephen Boyd static const unsigned scorpion_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
505341e42c4SStephen Boyd 					    [PERF_COUNT_HW_CACHE_OP_MAX]
506341e42c4SStephen Boyd 					    [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
507341e42c4SStephen Boyd 	PERF_CACHE_MAP_ALL_UNSUPPORTED,
508341e42c4SStephen Boyd 	/*
509341e42c4SStephen Boyd 	 * The performance counters don't differentiate between read and write
510341e42c4SStephen Boyd 	 * accesses/misses so this isn't strictly correct, but it's the best we
511341e42c4SStephen Boyd 	 * can do. Writes and reads get combined.
512341e42c4SStephen Boyd 	 */
513341e42c4SStephen Boyd 	[C(L1D)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV7_PERFCTR_L1_DCACHE_ACCESS,
514341e42c4SStephen Boyd 	[C(L1D)][C(OP_READ)][C(RESULT_MISS)] = ARMV7_PERFCTR_L1_DCACHE_REFILL,
515341e42c4SStephen Boyd 	[C(L1D)][C(OP_WRITE)][C(RESULT_ACCESS)] = ARMV7_PERFCTR_L1_DCACHE_ACCESS,
516341e42c4SStephen Boyd 	[C(L1D)][C(OP_WRITE)][C(RESULT_MISS)] = ARMV7_PERFCTR_L1_DCACHE_REFILL,
517341e42c4SStephen Boyd 	[C(L1I)][C(OP_READ)][C(RESULT_ACCESS)] = SCORPION_ICACHE_ACCESS,
518341e42c4SStephen Boyd 	[C(L1I)][C(OP_READ)][C(RESULT_MISS)] = SCORPION_ICACHE_MISS,
519341e42c4SStephen Boyd 	/*
520341e42c4SStephen Boyd 	 * Only ITLB misses and DTLB refills are supported.  If users want the
521341e42c4SStephen Boyd 	 * DTLB refills misses a raw counter must be used.
522341e42c4SStephen Boyd 	 */
523341e42c4SStephen Boyd 	[C(DTLB)][C(OP_READ)][C(RESULT_ACCESS)] = SCORPION_DTLB_ACCESS,
524341e42c4SStephen Boyd 	[C(DTLB)][C(OP_READ)][C(RESULT_MISS)] = SCORPION_DTLB_MISS,
525341e42c4SStephen Boyd 	[C(DTLB)][C(OP_WRITE)][C(RESULT_ACCESS)] = SCORPION_DTLB_ACCESS,
526341e42c4SStephen Boyd 	[C(DTLB)][C(OP_WRITE)][C(RESULT_MISS)] = SCORPION_DTLB_MISS,
527341e42c4SStephen Boyd 	[C(ITLB)][C(OP_READ)][C(RESULT_MISS)] = SCORPION_ITLB_MISS,
528341e42c4SStephen Boyd 	[C(ITLB)][C(OP_WRITE)][C(RESULT_MISS)] = SCORPION_ITLB_MISS,
529341e42c4SStephen Boyd 	[C(BPU)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV7_PERFCTR_PC_BRANCH_PRED,
530341e42c4SStephen Boyd 	[C(BPU)][C(OP_READ)][C(RESULT_MISS)] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
531341e42c4SStephen Boyd 	[C(BPU)][C(OP_WRITE)][C(RESULT_ACCESS)] = ARMV7_PERFCTR_PC_BRANCH_PRED,
532341e42c4SStephen Boyd 	[C(BPU)][C(OP_WRITE)][C(RESULT_MISS)] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
533341e42c4SStephen Boyd };
534341e42c4SStephen Boyd 
535abff083cSWill Deacon PMU_FORMAT_ATTR(event, "config:0-7");
536abff083cSWill Deacon 
537abff083cSWill Deacon static struct attribute *armv7_pmu_format_attrs[] = {
538abff083cSWill Deacon 	&format_attr_event.attr,
539abff083cSWill Deacon 	NULL,
540abff083cSWill Deacon };
541abff083cSWill Deacon 
542abff083cSWill Deacon static struct attribute_group armv7_pmu_format_attr_group = {
543abff083cSWill Deacon 	.name = "format",
544abff083cSWill Deacon 	.attrs = armv7_pmu_format_attrs,
545abff083cSWill Deacon };
546abff083cSWill Deacon 
5473fbac6ccSDrew Richardson #define ARMV7_EVENT_ATTR_RESOLVE(m) #m
5483fbac6ccSDrew Richardson #define ARMV7_EVENT_ATTR(name, config) \
5493fbac6ccSDrew Richardson 	PMU_EVENT_ATTR_STRING(name, armv7_event_attr_##name, \
5503fbac6ccSDrew Richardson 			      "event=" ARMV7_EVENT_ATTR_RESOLVE(config))
5513fbac6ccSDrew Richardson 
5523fbac6ccSDrew Richardson ARMV7_EVENT_ATTR(sw_incr, ARMV7_PERFCTR_PMNC_SW_INCR);
5533fbac6ccSDrew Richardson ARMV7_EVENT_ATTR(l1i_cache_refill, ARMV7_PERFCTR_L1_ICACHE_REFILL);
5543fbac6ccSDrew Richardson ARMV7_EVENT_ATTR(l1i_tlb_refill, ARMV7_PERFCTR_ITLB_REFILL);
5553fbac6ccSDrew Richardson ARMV7_EVENT_ATTR(l1d_cache_refill, ARMV7_PERFCTR_L1_DCACHE_REFILL);
5563fbac6ccSDrew Richardson ARMV7_EVENT_ATTR(l1d_cache, ARMV7_PERFCTR_L1_DCACHE_ACCESS);
5573fbac6ccSDrew Richardson ARMV7_EVENT_ATTR(l1d_tlb_refill, ARMV7_PERFCTR_DTLB_REFILL);
5583fbac6ccSDrew Richardson ARMV7_EVENT_ATTR(ld_retired, ARMV7_PERFCTR_MEM_READ);
5593fbac6ccSDrew Richardson ARMV7_EVENT_ATTR(st_retired, ARMV7_PERFCTR_MEM_WRITE);
5603fbac6ccSDrew Richardson ARMV7_EVENT_ATTR(inst_retired, ARMV7_PERFCTR_INSTR_EXECUTED);
5613fbac6ccSDrew Richardson ARMV7_EVENT_ATTR(exc_taken, ARMV7_PERFCTR_EXC_TAKEN);
5623fbac6ccSDrew Richardson ARMV7_EVENT_ATTR(exc_return, ARMV7_PERFCTR_EXC_EXECUTED);
5633fbac6ccSDrew Richardson ARMV7_EVENT_ATTR(cid_write_retired, ARMV7_PERFCTR_CID_WRITE);
5643fbac6ccSDrew Richardson ARMV7_EVENT_ATTR(pc_write_retired, ARMV7_PERFCTR_PC_WRITE);
5653fbac6ccSDrew Richardson ARMV7_EVENT_ATTR(br_immed_retired, ARMV7_PERFCTR_PC_IMM_BRANCH);
5663fbac6ccSDrew Richardson ARMV7_EVENT_ATTR(br_return_retired, ARMV7_PERFCTR_PC_PROC_RETURN);
5673fbac6ccSDrew Richardson ARMV7_EVENT_ATTR(unaligned_ldst_retired, ARMV7_PERFCTR_MEM_UNALIGNED_ACCESS);
5683fbac6ccSDrew Richardson ARMV7_EVENT_ATTR(br_mis_pred, ARMV7_PERFCTR_PC_BRANCH_MIS_PRED);
5693fbac6ccSDrew Richardson ARMV7_EVENT_ATTR(cpu_cycles, ARMV7_PERFCTR_CLOCK_CYCLES);
5703fbac6ccSDrew Richardson ARMV7_EVENT_ATTR(br_pred, ARMV7_PERFCTR_PC_BRANCH_PRED);
5713fbac6ccSDrew Richardson 
5723fbac6ccSDrew Richardson static struct attribute *armv7_pmuv1_event_attrs[] = {
5733fbac6ccSDrew Richardson 	&armv7_event_attr_sw_incr.attr.attr,
5743fbac6ccSDrew Richardson 	&armv7_event_attr_l1i_cache_refill.attr.attr,
5753fbac6ccSDrew Richardson 	&armv7_event_attr_l1i_tlb_refill.attr.attr,
5763fbac6ccSDrew Richardson 	&armv7_event_attr_l1d_cache_refill.attr.attr,
5773fbac6ccSDrew Richardson 	&armv7_event_attr_l1d_cache.attr.attr,
5783fbac6ccSDrew Richardson 	&armv7_event_attr_l1d_tlb_refill.attr.attr,
5793fbac6ccSDrew Richardson 	&armv7_event_attr_ld_retired.attr.attr,
5803fbac6ccSDrew Richardson 	&armv7_event_attr_st_retired.attr.attr,
5813fbac6ccSDrew Richardson 	&armv7_event_attr_inst_retired.attr.attr,
5823fbac6ccSDrew Richardson 	&armv7_event_attr_exc_taken.attr.attr,
5833fbac6ccSDrew Richardson 	&armv7_event_attr_exc_return.attr.attr,
5843fbac6ccSDrew Richardson 	&armv7_event_attr_cid_write_retired.attr.attr,
5853fbac6ccSDrew Richardson 	&armv7_event_attr_pc_write_retired.attr.attr,
5863fbac6ccSDrew Richardson 	&armv7_event_attr_br_immed_retired.attr.attr,
5873fbac6ccSDrew Richardson 	&armv7_event_attr_br_return_retired.attr.attr,
5883fbac6ccSDrew Richardson 	&armv7_event_attr_unaligned_ldst_retired.attr.attr,
5893fbac6ccSDrew Richardson 	&armv7_event_attr_br_mis_pred.attr.attr,
5903fbac6ccSDrew Richardson 	&armv7_event_attr_cpu_cycles.attr.attr,
5913fbac6ccSDrew Richardson 	&armv7_event_attr_br_pred.attr.attr,
592abff083cSWill Deacon 	NULL,
5933fbac6ccSDrew Richardson };
5943fbac6ccSDrew Richardson 
5953fbac6ccSDrew Richardson static struct attribute_group armv7_pmuv1_events_attr_group = {
5963fbac6ccSDrew Richardson 	.name = "events",
5973fbac6ccSDrew Richardson 	.attrs = armv7_pmuv1_event_attrs,
5983fbac6ccSDrew Richardson };
5993fbac6ccSDrew Richardson 
6003fbac6ccSDrew Richardson ARMV7_EVENT_ATTR(mem_access, ARMV7_PERFCTR_MEM_ACCESS);
6013fbac6ccSDrew Richardson ARMV7_EVENT_ATTR(l1i_cache, ARMV7_PERFCTR_L1_ICACHE_ACCESS);
6023fbac6ccSDrew Richardson ARMV7_EVENT_ATTR(l1d_cache_wb, ARMV7_PERFCTR_L1_DCACHE_WB);
6033fbac6ccSDrew Richardson ARMV7_EVENT_ATTR(l2d_cache, ARMV7_PERFCTR_L2_CACHE_ACCESS);
6043fbac6ccSDrew Richardson ARMV7_EVENT_ATTR(l2d_cache_refill, ARMV7_PERFCTR_L2_CACHE_REFILL);
6053fbac6ccSDrew Richardson ARMV7_EVENT_ATTR(l2d_cache_wb, ARMV7_PERFCTR_L2_CACHE_WB);
6063fbac6ccSDrew Richardson ARMV7_EVENT_ATTR(bus_access, ARMV7_PERFCTR_BUS_ACCESS);
6073fbac6ccSDrew Richardson ARMV7_EVENT_ATTR(memory_error, ARMV7_PERFCTR_MEM_ERROR);
6083fbac6ccSDrew Richardson ARMV7_EVENT_ATTR(inst_spec, ARMV7_PERFCTR_INSTR_SPEC);
6093fbac6ccSDrew Richardson ARMV7_EVENT_ATTR(ttbr_write_retired, ARMV7_PERFCTR_TTBR_WRITE);
6103fbac6ccSDrew Richardson ARMV7_EVENT_ATTR(bus_cycles, ARMV7_PERFCTR_BUS_CYCLES);
6113fbac6ccSDrew Richardson 
6123fbac6ccSDrew Richardson static struct attribute *armv7_pmuv2_event_attrs[] = {
6133fbac6ccSDrew Richardson 	&armv7_event_attr_sw_incr.attr.attr,
6143fbac6ccSDrew Richardson 	&armv7_event_attr_l1i_cache_refill.attr.attr,
6153fbac6ccSDrew Richardson 	&armv7_event_attr_l1i_tlb_refill.attr.attr,
6163fbac6ccSDrew Richardson 	&armv7_event_attr_l1d_cache_refill.attr.attr,
6173fbac6ccSDrew Richardson 	&armv7_event_attr_l1d_cache.attr.attr,
6183fbac6ccSDrew Richardson 	&armv7_event_attr_l1d_tlb_refill.attr.attr,
6193fbac6ccSDrew Richardson 	&armv7_event_attr_ld_retired.attr.attr,
6203fbac6ccSDrew Richardson 	&armv7_event_attr_st_retired.attr.attr,
6213fbac6ccSDrew Richardson 	&armv7_event_attr_inst_retired.attr.attr,
6223fbac6ccSDrew Richardson 	&armv7_event_attr_exc_taken.attr.attr,
6233fbac6ccSDrew Richardson 	&armv7_event_attr_exc_return.attr.attr,
6243fbac6ccSDrew Richardson 	&armv7_event_attr_cid_write_retired.attr.attr,
6253fbac6ccSDrew Richardson 	&armv7_event_attr_pc_write_retired.attr.attr,
6263fbac6ccSDrew Richardson 	&armv7_event_attr_br_immed_retired.attr.attr,
6273fbac6ccSDrew Richardson 	&armv7_event_attr_br_return_retired.attr.attr,
6283fbac6ccSDrew Richardson 	&armv7_event_attr_unaligned_ldst_retired.attr.attr,
6293fbac6ccSDrew Richardson 	&armv7_event_attr_br_mis_pred.attr.attr,
6303fbac6ccSDrew Richardson 	&armv7_event_attr_cpu_cycles.attr.attr,
6313fbac6ccSDrew Richardson 	&armv7_event_attr_br_pred.attr.attr,
6323fbac6ccSDrew Richardson 	&armv7_event_attr_mem_access.attr.attr,
6333fbac6ccSDrew Richardson 	&armv7_event_attr_l1i_cache.attr.attr,
6343fbac6ccSDrew Richardson 	&armv7_event_attr_l1d_cache_wb.attr.attr,
6353fbac6ccSDrew Richardson 	&armv7_event_attr_l2d_cache.attr.attr,
6363fbac6ccSDrew Richardson 	&armv7_event_attr_l2d_cache_refill.attr.attr,
6373fbac6ccSDrew Richardson 	&armv7_event_attr_l2d_cache_wb.attr.attr,
6383fbac6ccSDrew Richardson 	&armv7_event_attr_bus_access.attr.attr,
6393fbac6ccSDrew Richardson 	&armv7_event_attr_memory_error.attr.attr,
6403fbac6ccSDrew Richardson 	&armv7_event_attr_inst_spec.attr.attr,
6413fbac6ccSDrew Richardson 	&armv7_event_attr_ttbr_write_retired.attr.attr,
6423fbac6ccSDrew Richardson 	&armv7_event_attr_bus_cycles.attr.attr,
643abff083cSWill Deacon 	NULL,
6443fbac6ccSDrew Richardson };
6453fbac6ccSDrew Richardson 
6463fbac6ccSDrew Richardson static struct attribute_group armv7_pmuv2_events_attr_group = {
6473fbac6ccSDrew Richardson 	.name = "events",
6483fbac6ccSDrew Richardson 	.attrs = armv7_pmuv2_event_attrs,
6493fbac6ccSDrew Richardson };
6503fbac6ccSDrew Richardson 
651341e42c4SStephen Boyd /*
652c691bb62SWill Deacon  * Perf Events' indices
65343eab878SWill Deacon  */
654c691bb62SWill Deacon #define	ARMV7_IDX_CYCLE_COUNTER	0
655c691bb62SWill Deacon #define	ARMV7_IDX_COUNTER0	1
6567279adbdSSudeep KarkadaNagesha #define	ARMV7_IDX_COUNTER_LAST(cpu_pmu) \
6577279adbdSSudeep KarkadaNagesha 	(ARMV7_IDX_CYCLE_COUNTER + cpu_pmu->num_events - 1)
65843eab878SWill Deacon 
659c691bb62SWill Deacon #define	ARMV7_MAX_COUNTERS	32
660c691bb62SWill Deacon #define	ARMV7_COUNTER_MASK	(ARMV7_MAX_COUNTERS - 1)
66143eab878SWill Deacon 
66243eab878SWill Deacon /*
66343eab878SWill Deacon  * ARMv7 low level PMNC access
66443eab878SWill Deacon  */
66543eab878SWill Deacon 
66643eab878SWill Deacon /*
667c691bb62SWill Deacon  * Perf Event to low level counters mapping
668c691bb62SWill Deacon  */
669c691bb62SWill Deacon #define	ARMV7_IDX_TO_COUNTER(x)	\
670c691bb62SWill Deacon 	(((x) - ARMV7_IDX_COUNTER0) & ARMV7_COUNTER_MASK)
671c691bb62SWill Deacon 
672c691bb62SWill Deacon /*
67343eab878SWill Deacon  * Per-CPU PMNC: config reg
67443eab878SWill Deacon  */
67543eab878SWill Deacon #define ARMV7_PMNC_E		(1 << 0) /* Enable all counters */
67643eab878SWill Deacon #define ARMV7_PMNC_P		(1 << 1) /* Reset all counters */
67743eab878SWill Deacon #define ARMV7_PMNC_C		(1 << 2) /* Cycle counter reset */
67843eab878SWill Deacon #define ARMV7_PMNC_D		(1 << 3) /* CCNT counts every 64th cpu cycle */
67943eab878SWill Deacon #define ARMV7_PMNC_X		(1 << 4) /* Export to ETM */
68043eab878SWill Deacon #define ARMV7_PMNC_DP		(1 << 5) /* Disable CCNT if non-invasive debug*/
68143eab878SWill Deacon #define	ARMV7_PMNC_N_SHIFT	11	 /* Number of counters supported */
68243eab878SWill Deacon #define	ARMV7_PMNC_N_MASK	0x1f
68343eab878SWill Deacon #define	ARMV7_PMNC_MASK		0x3f	 /* Mask for writable bits */
68443eab878SWill Deacon 
68543eab878SWill Deacon /*
68643eab878SWill Deacon  * FLAG: counters overflow flag status reg
68743eab878SWill Deacon  */
68843eab878SWill Deacon #define	ARMV7_FLAG_MASK		0xffffffff	/* Mask for writable bits */
68943eab878SWill Deacon #define	ARMV7_OVERFLOWED_MASK	ARMV7_FLAG_MASK
69043eab878SWill Deacon 
691a505addcSWill Deacon /*
692a505addcSWill Deacon  * PMXEVTYPER: Event selection reg
693a505addcSWill Deacon  */
694f2fe09b0SWill Deacon #define	ARMV7_EVTYPE_MASK	0xc80000ff	/* Mask for writable bits */
695a505addcSWill Deacon #define	ARMV7_EVTYPE_EVENT	0xff		/* Mask for EVENT bits */
696a505addcSWill Deacon 
697a505addcSWill Deacon /*
698a505addcSWill Deacon  * Event filters for PMUv2
699a505addcSWill Deacon  */
7006f8f3570SPhong Tran #define	ARMV7_EXCLUDE_PL1	BIT(31)
7016f8f3570SPhong Tran #define	ARMV7_EXCLUDE_USER	BIT(30)
7026f8f3570SPhong Tran #define	ARMV7_INCLUDE_HYP	BIT(27)
703a505addcSWill Deacon 
7048d1a0ae7SMartin Fuzzey /*
7058d1a0ae7SMartin Fuzzey  * Secure debug enable reg
7068d1a0ae7SMartin Fuzzey  */
7078d1a0ae7SMartin Fuzzey #define ARMV7_SDER_SUNIDEN	BIT(1) /* Permit non-invasive debug */
7088d1a0ae7SMartin Fuzzey 
armv7_pmnc_read(void)7096330aae7SWill Deacon static inline u32 armv7_pmnc_read(void)
71043eab878SWill Deacon {
71143eab878SWill Deacon 	u32 val;
71243eab878SWill Deacon 	asm volatile("mrc p15, 0, %0, c9, c12, 0" : "=r"(val));
71343eab878SWill Deacon 	return val;
71443eab878SWill Deacon }
71543eab878SWill Deacon 
armv7_pmnc_write(u32 val)7166330aae7SWill Deacon static inline void armv7_pmnc_write(u32 val)
71743eab878SWill Deacon {
71843eab878SWill Deacon 	val &= ARMV7_PMNC_MASK;
719d25d3b4cSWill Deacon 	isb();
72043eab878SWill Deacon 	asm volatile("mcr p15, 0, %0, c9, c12, 0" : : "r"(val));
72143eab878SWill Deacon }
72243eab878SWill Deacon 
armv7_pmnc_has_overflowed(u32 pmnc)7236330aae7SWill Deacon static inline int armv7_pmnc_has_overflowed(u32 pmnc)
72443eab878SWill Deacon {
72543eab878SWill Deacon 	return pmnc & ARMV7_OVERFLOWED_MASK;
72643eab878SWill Deacon }
72743eab878SWill Deacon 
armv7_pmnc_counter_valid(struct arm_pmu * cpu_pmu,int idx)7287279adbdSSudeep KarkadaNagesha static inline int armv7_pmnc_counter_valid(struct arm_pmu *cpu_pmu, int idx)
729c691bb62SWill Deacon {
7307279adbdSSudeep KarkadaNagesha 	return idx >= ARMV7_IDX_CYCLE_COUNTER &&
7317279adbdSSudeep KarkadaNagesha 		idx <= ARMV7_IDX_COUNTER_LAST(cpu_pmu);
732c691bb62SWill Deacon }
733c691bb62SWill Deacon 
armv7_pmnc_counter_has_overflowed(u32 pmnc,int idx)734c691bb62SWill Deacon static inline int armv7_pmnc_counter_has_overflowed(u32 pmnc, int idx)
73543eab878SWill Deacon {
7367279adbdSSudeep KarkadaNagesha 	return pmnc & BIT(ARMV7_IDX_TO_COUNTER(idx));
73743eab878SWill Deacon }
73843eab878SWill Deacon 
armv7_pmnc_select_counter(int idx)739cb6eb108Schai wen static inline void armv7_pmnc_select_counter(int idx)
74043eab878SWill Deacon {
7417279adbdSSudeep KarkadaNagesha 	u32 counter = ARMV7_IDX_TO_COUNTER(idx);
742c691bb62SWill Deacon 	asm volatile("mcr p15, 0, %0, c9, c12, 5" : : "r" (counter));
743d25d3b4cSWill Deacon 	isb();
74443eab878SWill Deacon }
74543eab878SWill Deacon 
armv7pmu_read_counter(struct perf_event * event)7463a95200dSSuzuki K Poulose static inline u64 armv7pmu_read_counter(struct perf_event *event)
74743eab878SWill Deacon {
7487279adbdSSudeep KarkadaNagesha 	struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu);
749ed6f2a52SSudeep KarkadaNagesha 	struct hw_perf_event *hwc = &event->hw;
750ed6f2a52SSudeep KarkadaNagesha 	int idx = hwc->idx;
7516330aae7SWill Deacon 	u32 value = 0;
75243eab878SWill Deacon 
753cb6eb108Schai wen 	if (!armv7_pmnc_counter_valid(cpu_pmu, idx)) {
75443eab878SWill Deacon 		pr_err("CPU%u reading wrong counter %d\n",
75543eab878SWill Deacon 			smp_processor_id(), idx);
756cb6eb108Schai wen 	} else if (idx == ARMV7_IDX_CYCLE_COUNTER) {
757c691bb62SWill Deacon 		asm volatile("mrc p15, 0, %0, c9, c13, 0" : "=r" (value));
758cb6eb108Schai wen 	} else {
759cb6eb108Schai wen 		armv7_pmnc_select_counter(idx);
760c691bb62SWill Deacon 		asm volatile("mrc p15, 0, %0, c9, c13, 2" : "=r" (value));
761cb6eb108Schai wen 	}
76243eab878SWill Deacon 
76343eab878SWill Deacon 	return value;
76443eab878SWill Deacon }
76543eab878SWill Deacon 
armv7pmu_write_counter(struct perf_event * event,u64 value)7663a95200dSSuzuki K Poulose static inline void armv7pmu_write_counter(struct perf_event *event, u64 value)
76743eab878SWill Deacon {
7687279adbdSSudeep KarkadaNagesha 	struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu);
769ed6f2a52SSudeep KarkadaNagesha 	struct hw_perf_event *hwc = &event->hw;
770ed6f2a52SSudeep KarkadaNagesha 	int idx = hwc->idx;
771ed6f2a52SSudeep KarkadaNagesha 
772cb6eb108Schai wen 	if (!armv7_pmnc_counter_valid(cpu_pmu, idx)) {
77343eab878SWill Deacon 		pr_err("CPU%u writing wrong counter %d\n",
77443eab878SWill Deacon 			smp_processor_id(), idx);
775cb6eb108Schai wen 	} else if (idx == ARMV7_IDX_CYCLE_COUNTER) {
776*fdbef8c4SYang Jihong 		asm volatile("mcr p15, 0, %0, c9, c13, 0" : : "r" ((u32)value));
777cb6eb108Schai wen 	} else {
778cb6eb108Schai wen 		armv7_pmnc_select_counter(idx);
779*fdbef8c4SYang Jihong 		asm volatile("mcr p15, 0, %0, c9, c13, 2" : : "r" ((u32)value));
78043eab878SWill Deacon 	}
781cb6eb108Schai wen }
78243eab878SWill Deacon 
armv7_pmnc_write_evtsel(int idx,u32 val)78325e29c7cSWill Deacon static inline void armv7_pmnc_write_evtsel(int idx, u32 val)
78443eab878SWill Deacon {
785cb6eb108Schai wen 	armv7_pmnc_select_counter(idx);
786a505addcSWill Deacon 	val &= ARMV7_EVTYPE_MASK;
78743eab878SWill Deacon 	asm volatile("mcr p15, 0, %0, c9, c13, 1" : : "r" (val));
78843eab878SWill Deacon }
78943eab878SWill Deacon 
armv7_pmnc_enable_counter(int idx)790cb6eb108Schai wen static inline void armv7_pmnc_enable_counter(int idx)
79143eab878SWill Deacon {
7927279adbdSSudeep KarkadaNagesha 	u32 counter = ARMV7_IDX_TO_COUNTER(idx);
793c691bb62SWill Deacon 	asm volatile("mcr p15, 0, %0, c9, c12, 1" : : "r" (BIT(counter)));
79443eab878SWill Deacon }
79543eab878SWill Deacon 
armv7_pmnc_disable_counter(int idx)796cb6eb108Schai wen static inline void armv7_pmnc_disable_counter(int idx)
79743eab878SWill Deacon {
7987279adbdSSudeep KarkadaNagesha 	u32 counter = ARMV7_IDX_TO_COUNTER(idx);
799c691bb62SWill Deacon 	asm volatile("mcr p15, 0, %0, c9, c12, 2" : : "r" (BIT(counter)));
80043eab878SWill Deacon }
80143eab878SWill Deacon 
armv7_pmnc_enable_intens(int idx)802cb6eb108Schai wen static inline void armv7_pmnc_enable_intens(int idx)
80343eab878SWill Deacon {
8047279adbdSSudeep KarkadaNagesha 	u32 counter = ARMV7_IDX_TO_COUNTER(idx);
805c691bb62SWill Deacon 	asm volatile("mcr p15, 0, %0, c9, c14, 1" : : "r" (BIT(counter)));
80643eab878SWill Deacon }
80743eab878SWill Deacon 
armv7_pmnc_disable_intens(int idx)808cb6eb108Schai wen static inline void armv7_pmnc_disable_intens(int idx)
80943eab878SWill Deacon {
8107279adbdSSudeep KarkadaNagesha 	u32 counter = ARMV7_IDX_TO_COUNTER(idx);
811c691bb62SWill Deacon 	asm volatile("mcr p15, 0, %0, c9, c14, 2" : : "r" (BIT(counter)));
81299c1745bSWill Deacon 	isb();
81399c1745bSWill Deacon 	/* Clear the overflow flag in case an interrupt is pending. */
81499c1745bSWill Deacon 	asm volatile("mcr p15, 0, %0, c9, c12, 3" : : "r" (BIT(counter)));
81599c1745bSWill Deacon 	isb();
81643eab878SWill Deacon }
81743eab878SWill Deacon 
armv7_pmnc_getreset_flags(void)81843eab878SWill Deacon static inline u32 armv7_pmnc_getreset_flags(void)
81943eab878SWill Deacon {
82043eab878SWill Deacon 	u32 val;
82143eab878SWill Deacon 
82243eab878SWill Deacon 	/* Read */
82343eab878SWill Deacon 	asm volatile("mrc p15, 0, %0, c9, c12, 3" : "=r" (val));
82443eab878SWill Deacon 
82543eab878SWill Deacon 	/* Write to clear flags */
82643eab878SWill Deacon 	val &= ARMV7_FLAG_MASK;
82743eab878SWill Deacon 	asm volatile("mcr p15, 0, %0, c9, c12, 3" : : "r" (val));
82843eab878SWill Deacon 
82943eab878SWill Deacon 	return val;
83043eab878SWill Deacon }
83143eab878SWill Deacon 
83243eab878SWill Deacon #ifdef DEBUG
armv7_pmnc_dump_regs(struct arm_pmu * cpu_pmu)8337279adbdSSudeep KarkadaNagesha static void armv7_pmnc_dump_regs(struct arm_pmu *cpu_pmu)
83443eab878SWill Deacon {
83543eab878SWill Deacon 	u32 val;
83643eab878SWill Deacon 	unsigned int cnt;
83743eab878SWill Deacon 
83852a5566eSWill Deacon 	pr_info("PMNC registers dump:\n");
83943eab878SWill Deacon 
84043eab878SWill Deacon 	asm volatile("mrc p15, 0, %0, c9, c12, 0" : "=r" (val));
84152a5566eSWill Deacon 	pr_info("PMNC  =0x%08x\n", val);
84243eab878SWill Deacon 
84343eab878SWill Deacon 	asm volatile("mrc p15, 0, %0, c9, c12, 1" : "=r" (val));
84452a5566eSWill Deacon 	pr_info("CNTENS=0x%08x\n", val);
84543eab878SWill Deacon 
84643eab878SWill Deacon 	asm volatile("mrc p15, 0, %0, c9, c14, 1" : "=r" (val));
84752a5566eSWill Deacon 	pr_info("INTENS=0x%08x\n", val);
84843eab878SWill Deacon 
84943eab878SWill Deacon 	asm volatile("mrc p15, 0, %0, c9, c12, 3" : "=r" (val));
85052a5566eSWill Deacon 	pr_info("FLAGS =0x%08x\n", val);
85143eab878SWill Deacon 
85243eab878SWill Deacon 	asm volatile("mrc p15, 0, %0, c9, c12, 5" : "=r" (val));
85352a5566eSWill Deacon 	pr_info("SELECT=0x%08x\n", val);
85443eab878SWill Deacon 
85543eab878SWill Deacon 	asm volatile("mrc p15, 0, %0, c9, c13, 0" : "=r" (val));
85652a5566eSWill Deacon 	pr_info("CCNT  =0x%08x\n", val);
85743eab878SWill Deacon 
8587279adbdSSudeep KarkadaNagesha 	for (cnt = ARMV7_IDX_COUNTER0;
8597279adbdSSudeep KarkadaNagesha 			cnt <= ARMV7_IDX_COUNTER_LAST(cpu_pmu); cnt++) {
86043eab878SWill Deacon 		armv7_pmnc_select_counter(cnt);
86143eab878SWill Deacon 		asm volatile("mrc p15, 0, %0, c9, c13, 2" : "=r" (val));
86252a5566eSWill Deacon 		pr_info("CNT[%d] count =0x%08x\n",
863c691bb62SWill Deacon 			ARMV7_IDX_TO_COUNTER(cnt), val);
86443eab878SWill Deacon 		asm volatile("mrc p15, 0, %0, c9, c13, 1" : "=r" (val));
86552a5566eSWill Deacon 		pr_info("CNT[%d] evtsel=0x%08x\n",
866c691bb62SWill Deacon 			ARMV7_IDX_TO_COUNTER(cnt), val);
86743eab878SWill Deacon 	}
86843eab878SWill Deacon }
86943eab878SWill Deacon #endif
87043eab878SWill Deacon 
armv7pmu_enable_event(struct perf_event * event)871ed6f2a52SSudeep KarkadaNagesha static void armv7pmu_enable_event(struct perf_event *event)
87243eab878SWill Deacon {
87343eab878SWill Deacon 	unsigned long flags;
874ed6f2a52SSudeep KarkadaNagesha 	struct hw_perf_event *hwc = &event->hw;
875ed6f2a52SSudeep KarkadaNagesha 	struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu);
87611679250SMark Rutland 	struct pmu_hw_events *events = this_cpu_ptr(cpu_pmu->hw_events);
877ed6f2a52SSudeep KarkadaNagesha 	int idx = hwc->idx;
87843eab878SWill Deacon 
8797279adbdSSudeep KarkadaNagesha 	if (!armv7_pmnc_counter_valid(cpu_pmu, idx)) {
8807279adbdSSudeep KarkadaNagesha 		pr_err("CPU%u enabling wrong PMNC counter IRQ enable %d\n",
8817279adbdSSudeep KarkadaNagesha 			smp_processor_id(), idx);
8827279adbdSSudeep KarkadaNagesha 		return;
8837279adbdSSudeep KarkadaNagesha 	}
8847279adbdSSudeep KarkadaNagesha 
88543eab878SWill Deacon 	/*
88643eab878SWill Deacon 	 * Enable counter and interrupt, and set the counter to count
88743eab878SWill Deacon 	 * the event that we're interested in.
88843eab878SWill Deacon 	 */
8890f78d2d5SMark Rutland 	raw_spin_lock_irqsave(&events->pmu_lock, flags);
89043eab878SWill Deacon 
89143eab878SWill Deacon 	/*
89243eab878SWill Deacon 	 * Disable counter
89343eab878SWill Deacon 	 */
89443eab878SWill Deacon 	armv7_pmnc_disable_counter(idx);
89543eab878SWill Deacon 
89643eab878SWill Deacon 	/*
89743eab878SWill Deacon 	 * Set event (if destined for PMNx counters)
898a505addcSWill Deacon 	 * We only need to set the event for the cycle counter if we
899a505addcSWill Deacon 	 * have the ability to perform event filtering.
90043eab878SWill Deacon 	 */
901513c99ceSSudeep KarkadaNagesha 	if (cpu_pmu->set_event_filter || idx != ARMV7_IDX_CYCLE_COUNTER)
90243eab878SWill Deacon 		armv7_pmnc_write_evtsel(idx, hwc->config_base);
90343eab878SWill Deacon 
90443eab878SWill Deacon 	/*
90543eab878SWill Deacon 	 * Enable interrupt for this counter
90643eab878SWill Deacon 	 */
90743eab878SWill Deacon 	armv7_pmnc_enable_intens(idx);
90843eab878SWill Deacon 
90943eab878SWill Deacon 	/*
91043eab878SWill Deacon 	 * Enable counter
91143eab878SWill Deacon 	 */
91243eab878SWill Deacon 	armv7_pmnc_enable_counter(idx);
91343eab878SWill Deacon 
9140f78d2d5SMark Rutland 	raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
91543eab878SWill Deacon }
91643eab878SWill Deacon 
armv7pmu_disable_event(struct perf_event * event)917ed6f2a52SSudeep KarkadaNagesha static void armv7pmu_disable_event(struct perf_event *event)
91843eab878SWill Deacon {
91943eab878SWill Deacon 	unsigned long flags;
920ed6f2a52SSudeep KarkadaNagesha 	struct hw_perf_event *hwc = &event->hw;
921ed6f2a52SSudeep KarkadaNagesha 	struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu);
92211679250SMark Rutland 	struct pmu_hw_events *events = this_cpu_ptr(cpu_pmu->hw_events);
923ed6f2a52SSudeep KarkadaNagesha 	int idx = hwc->idx;
92443eab878SWill Deacon 
9257279adbdSSudeep KarkadaNagesha 	if (!armv7_pmnc_counter_valid(cpu_pmu, idx)) {
9267279adbdSSudeep KarkadaNagesha 		pr_err("CPU%u disabling wrong PMNC counter IRQ enable %d\n",
9277279adbdSSudeep KarkadaNagesha 			smp_processor_id(), idx);
9287279adbdSSudeep KarkadaNagesha 		return;
9297279adbdSSudeep KarkadaNagesha 	}
9307279adbdSSudeep KarkadaNagesha 
93143eab878SWill Deacon 	/*
93243eab878SWill Deacon 	 * Disable counter and interrupt
93343eab878SWill Deacon 	 */
9340f78d2d5SMark Rutland 	raw_spin_lock_irqsave(&events->pmu_lock, flags);
93543eab878SWill Deacon 
93643eab878SWill Deacon 	/*
93743eab878SWill Deacon 	 * Disable counter
93843eab878SWill Deacon 	 */
93943eab878SWill Deacon 	armv7_pmnc_disable_counter(idx);
94043eab878SWill Deacon 
94143eab878SWill Deacon 	/*
94243eab878SWill Deacon 	 * Disable interrupt for this counter
94343eab878SWill Deacon 	 */
94443eab878SWill Deacon 	armv7_pmnc_disable_intens(idx);
94543eab878SWill Deacon 
9460f78d2d5SMark Rutland 	raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
94743eab878SWill Deacon }
94843eab878SWill Deacon 
armv7pmu_handle_irq(struct arm_pmu * cpu_pmu)9490788f1e9SMark Rutland static irqreturn_t armv7pmu_handle_irq(struct arm_pmu *cpu_pmu)
95043eab878SWill Deacon {
9516330aae7SWill Deacon 	u32 pmnc;
95243eab878SWill Deacon 	struct perf_sample_data data;
95311679250SMark Rutland 	struct pmu_hw_events *cpuc = this_cpu_ptr(cpu_pmu->hw_events);
95443eab878SWill Deacon 	struct pt_regs *regs;
95543eab878SWill Deacon 	int idx;
95643eab878SWill Deacon 
95743eab878SWill Deacon 	/*
95843eab878SWill Deacon 	 * Get and reset the IRQ flags
95943eab878SWill Deacon 	 */
96043eab878SWill Deacon 	pmnc = armv7_pmnc_getreset_flags();
96143eab878SWill Deacon 
96243eab878SWill Deacon 	/*
96343eab878SWill Deacon 	 * Did an overflow occur?
96443eab878SWill Deacon 	 */
96543eab878SWill Deacon 	if (!armv7_pmnc_has_overflowed(pmnc))
96643eab878SWill Deacon 		return IRQ_NONE;
96743eab878SWill Deacon 
96843eab878SWill Deacon 	/*
96943eab878SWill Deacon 	 * Handle the counter(s) overflow(s)
97043eab878SWill Deacon 	 */
97143eab878SWill Deacon 	regs = get_irq_regs();
97243eab878SWill Deacon 
9738be3f9a2SMark Rutland 	for (idx = 0; idx < cpu_pmu->num_events; ++idx) {
97443eab878SWill Deacon 		struct perf_event *event = cpuc->events[idx];
97543eab878SWill Deacon 		struct hw_perf_event *hwc;
97643eab878SWill Deacon 
977f6f5a30cSWill Deacon 		/* Ignore if we don't have an event. */
978f6f5a30cSWill Deacon 		if (!event)
979f6f5a30cSWill Deacon 			continue;
980f6f5a30cSWill Deacon 
98143eab878SWill Deacon 		/*
98243eab878SWill Deacon 		 * We have a single interrupt for all counters. Check that
98343eab878SWill Deacon 		 * each counter has overflowed before we process it.
98443eab878SWill Deacon 		 */
98543eab878SWill Deacon 		if (!armv7_pmnc_counter_has_overflowed(pmnc, idx))
98643eab878SWill Deacon 			continue;
98743eab878SWill Deacon 
98843eab878SWill Deacon 		hwc = &event->hw;
989ed6f2a52SSudeep KarkadaNagesha 		armpmu_event_update(event);
990fd0d000bSRobert Richter 		perf_sample_data_init(&data, 0, hwc->last_period);
991ed6f2a52SSudeep KarkadaNagesha 		if (!armpmu_event_set_period(event))
99243eab878SWill Deacon 			continue;
99343eab878SWill Deacon 
994a8b0ca17SPeter Zijlstra 		if (perf_event_overflow(event, &data, regs))
995ed6f2a52SSudeep KarkadaNagesha 			cpu_pmu->disable(event);
99643eab878SWill Deacon 	}
99743eab878SWill Deacon 
99843eab878SWill Deacon 	/*
99943eab878SWill Deacon 	 * Handle the pending perf events.
100043eab878SWill Deacon 	 *
100143eab878SWill Deacon 	 * Note: this call *must* be run with interrupts disabled. For
100243eab878SWill Deacon 	 * platforms that can have the PMU interrupts raised as an NMI, this
100343eab878SWill Deacon 	 * will not work.
100443eab878SWill Deacon 	 */
100543eab878SWill Deacon 	irq_work_run();
100643eab878SWill Deacon 
100743eab878SWill Deacon 	return IRQ_HANDLED;
100843eab878SWill Deacon }
100943eab878SWill Deacon 
armv7pmu_start(struct arm_pmu * cpu_pmu)1010ed6f2a52SSudeep KarkadaNagesha static void armv7pmu_start(struct arm_pmu *cpu_pmu)
101143eab878SWill Deacon {
101243eab878SWill Deacon 	unsigned long flags;
101311679250SMark Rutland 	struct pmu_hw_events *events = this_cpu_ptr(cpu_pmu->hw_events);
101443eab878SWill Deacon 
10150f78d2d5SMark Rutland 	raw_spin_lock_irqsave(&events->pmu_lock, flags);
101643eab878SWill Deacon 	/* Enable all counters */
101743eab878SWill Deacon 	armv7_pmnc_write(armv7_pmnc_read() | ARMV7_PMNC_E);
10180f78d2d5SMark Rutland 	raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
101943eab878SWill Deacon }
102043eab878SWill Deacon 
armv7pmu_stop(struct arm_pmu * cpu_pmu)1021ed6f2a52SSudeep KarkadaNagesha static void armv7pmu_stop(struct arm_pmu *cpu_pmu)
102243eab878SWill Deacon {
102343eab878SWill Deacon 	unsigned long flags;
102411679250SMark Rutland 	struct pmu_hw_events *events = this_cpu_ptr(cpu_pmu->hw_events);
102543eab878SWill Deacon 
10260f78d2d5SMark Rutland 	raw_spin_lock_irqsave(&events->pmu_lock, flags);
102743eab878SWill Deacon 	/* Disable all counters */
102843eab878SWill Deacon 	armv7_pmnc_write(armv7_pmnc_read() & ~ARMV7_PMNC_E);
10290f78d2d5SMark Rutland 	raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
103043eab878SWill Deacon }
103143eab878SWill Deacon 
armv7pmu_get_event_idx(struct pmu_hw_events * cpuc,struct perf_event * event)10328be3f9a2SMark Rutland static int armv7pmu_get_event_idx(struct pmu_hw_events *cpuc,
1033ed6f2a52SSudeep KarkadaNagesha 				  struct perf_event *event)
103443eab878SWill Deacon {
103543eab878SWill Deacon 	int idx;
1036ed6f2a52SSudeep KarkadaNagesha 	struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu);
1037ed6f2a52SSudeep KarkadaNagesha 	struct hw_perf_event *hwc = &event->hw;
1038ed6f2a52SSudeep KarkadaNagesha 	unsigned long evtype = hwc->config_base & ARMV7_EVTYPE_EVENT;
103943eab878SWill Deacon 
104043eab878SWill Deacon 	/* Always place a cycle counter into the cycle counter. */
1041a505addcSWill Deacon 	if (evtype == ARMV7_PERFCTR_CPU_CYCLES) {
1042c691bb62SWill Deacon 		if (test_and_set_bit(ARMV7_IDX_CYCLE_COUNTER, cpuc->used_mask))
104343eab878SWill Deacon 			return -EAGAIN;
104443eab878SWill Deacon 
1045c691bb62SWill Deacon 		return ARMV7_IDX_CYCLE_COUNTER;
1046c691bb62SWill Deacon 	}
1047c691bb62SWill Deacon 
104843eab878SWill Deacon 	/*
104943eab878SWill Deacon 	 * For anything other than a cycle counter, try and use
105043eab878SWill Deacon 	 * the events counters
105143eab878SWill Deacon 	 */
10528be3f9a2SMark Rutland 	for (idx = ARMV7_IDX_COUNTER0; idx < cpu_pmu->num_events; ++idx) {
105343eab878SWill Deacon 		if (!test_and_set_bit(idx, cpuc->used_mask))
105443eab878SWill Deacon 			return idx;
105543eab878SWill Deacon 	}
105643eab878SWill Deacon 
105743eab878SWill Deacon 	/* The counters are all in use. */
105843eab878SWill Deacon 	return -EAGAIN;
105943eab878SWill Deacon }
106043eab878SWill Deacon 
armv7pmu_clear_event_idx(struct pmu_hw_events * cpuc,struct perf_event * event)10617dfc8db1SSuzuki K Poulose static void armv7pmu_clear_event_idx(struct pmu_hw_events *cpuc,
10627dfc8db1SSuzuki K Poulose 				     struct perf_event *event)
10637dfc8db1SSuzuki K Poulose {
10647dfc8db1SSuzuki K Poulose 	clear_bit(event->hw.idx, cpuc->used_mask);
10657dfc8db1SSuzuki K Poulose }
10667dfc8db1SSuzuki K Poulose 
1067a505addcSWill Deacon /*
1068a505addcSWill Deacon  * Add an event filter to a given event. This will only work for PMUv2 PMUs.
1069a505addcSWill Deacon  */
armv7pmu_set_event_filter(struct hw_perf_event * event,struct perf_event_attr * attr)1070a505addcSWill Deacon static int armv7pmu_set_event_filter(struct hw_perf_event *event,
1071a505addcSWill Deacon 				     struct perf_event_attr *attr)
1072a505addcSWill Deacon {
1073a505addcSWill Deacon 	unsigned long config_base = 0;
1074a505addcSWill Deacon 
1075a505addcSWill Deacon 	if (attr->exclude_idle)
1076a505addcSWill Deacon 		return -EPERM;
1077a505addcSWill Deacon 	if (attr->exclude_user)
1078a505addcSWill Deacon 		config_base |= ARMV7_EXCLUDE_USER;
1079a505addcSWill Deacon 	if (attr->exclude_kernel)
1080a505addcSWill Deacon 		config_base |= ARMV7_EXCLUDE_PL1;
1081a505addcSWill Deacon 	if (!attr->exclude_hv)
1082a505addcSWill Deacon 		config_base |= ARMV7_INCLUDE_HYP;
1083a505addcSWill Deacon 
1084a505addcSWill Deacon 	/*
1085a505addcSWill Deacon 	 * Install the filter into config_base as this is used to
1086a505addcSWill Deacon 	 * construct the event type.
1087a505addcSWill Deacon 	 */
1088a505addcSWill Deacon 	event->config_base = config_base;
1089a505addcSWill Deacon 
1090a505addcSWill Deacon 	return 0;
109143eab878SWill Deacon }
109243eab878SWill Deacon 
armv7pmu_reset(void * info)1093574b69cbSWill Deacon static void armv7pmu_reset(void *info)
1094574b69cbSWill Deacon {
1095ed6f2a52SSudeep KarkadaNagesha 	struct arm_pmu *cpu_pmu = (struct arm_pmu *)info;
10968d1a0ae7SMartin Fuzzey 	u32 idx, nb_cnt = cpu_pmu->num_events, val;
10978d1a0ae7SMartin Fuzzey 
10988d1a0ae7SMartin Fuzzey 	if (cpu_pmu->secure_access) {
10998d1a0ae7SMartin Fuzzey 		asm volatile("mrc p15, 0, %0, c1, c1, 1" : "=r" (val));
11008d1a0ae7SMartin Fuzzey 		val |= ARMV7_SDER_SUNIDEN;
11018d1a0ae7SMartin Fuzzey 		asm volatile("mcr p15, 0, %0, c1, c1, 1" : : "r" (val));
11028d1a0ae7SMartin Fuzzey 	}
1103574b69cbSWill Deacon 
1104574b69cbSWill Deacon 	/* The counter and interrupt enable registers are unknown at reset. */
1105ed6f2a52SSudeep KarkadaNagesha 	for (idx = ARMV7_IDX_CYCLE_COUNTER; idx < nb_cnt; ++idx) {
1106ed6f2a52SSudeep KarkadaNagesha 		armv7_pmnc_disable_counter(idx);
1107ed6f2a52SSudeep KarkadaNagesha 		armv7_pmnc_disable_intens(idx);
1108ed6f2a52SSudeep KarkadaNagesha 	}
1109574b69cbSWill Deacon 
1110574b69cbSWill Deacon 	/* Initialize & Reset PMNC: C and P bits */
1111574b69cbSWill Deacon 	armv7_pmnc_write(ARMV7_PMNC_P | ARMV7_PMNC_C);
1112574b69cbSWill Deacon }
1113574b69cbSWill Deacon 
armv7_a8_map_event(struct perf_event * event)1114e1f431b5SMark Rutland static int armv7_a8_map_event(struct perf_event *event)
1115e1f431b5SMark Rutland {
11166dbc0029SWill Deacon 	return armpmu_map_event(event, &armv7_a8_perf_map,
1117e1f431b5SMark Rutland 				&armv7_a8_perf_cache_map, 0xFF);
1118e1f431b5SMark Rutland }
1119e1f431b5SMark Rutland 
armv7_a9_map_event(struct perf_event * event)1120e1f431b5SMark Rutland static int armv7_a9_map_event(struct perf_event *event)
1121e1f431b5SMark Rutland {
11226dbc0029SWill Deacon 	return armpmu_map_event(event, &armv7_a9_perf_map,
1123e1f431b5SMark Rutland 				&armv7_a9_perf_cache_map, 0xFF);
1124e1f431b5SMark Rutland }
1125e1f431b5SMark Rutland 
armv7_a5_map_event(struct perf_event * event)1126e1f431b5SMark Rutland static int armv7_a5_map_event(struct perf_event *event)
1127e1f431b5SMark Rutland {
11286dbc0029SWill Deacon 	return armpmu_map_event(event, &armv7_a5_perf_map,
1129e1f431b5SMark Rutland 				&armv7_a5_perf_cache_map, 0xFF);
1130e1f431b5SMark Rutland }
1131e1f431b5SMark Rutland 
armv7_a15_map_event(struct perf_event * event)1132e1f431b5SMark Rutland static int armv7_a15_map_event(struct perf_event *event)
1133e1f431b5SMark Rutland {
11346dbc0029SWill Deacon 	return armpmu_map_event(event, &armv7_a15_perf_map,
1135e1f431b5SMark Rutland 				&armv7_a15_perf_cache_map, 0xFF);
1136e1f431b5SMark Rutland }
1137e1f431b5SMark Rutland 
armv7_a7_map_event(struct perf_event * event)1138d33c88c6SWill Deacon static int armv7_a7_map_event(struct perf_event *event)
1139d33c88c6SWill Deacon {
11406dbc0029SWill Deacon 	return armpmu_map_event(event, &armv7_a7_perf_map,
1141d33c88c6SWill Deacon 				&armv7_a7_perf_cache_map, 0xFF);
1142d33c88c6SWill Deacon }
1143d33c88c6SWill Deacon 
armv7_a12_map_event(struct perf_event * event)11448e781f65SAlbin Tonnerre static int armv7_a12_map_event(struct perf_event *event)
11458e781f65SAlbin Tonnerre {
11468e781f65SAlbin Tonnerre 	return armpmu_map_event(event, &armv7_a12_perf_map,
11478e781f65SAlbin Tonnerre 				&armv7_a12_perf_cache_map, 0xFF);
11488e781f65SAlbin Tonnerre }
11498e781f65SAlbin Tonnerre 
krait_map_event(struct perf_event * event)11502a3391cdSStephen Boyd static int krait_map_event(struct perf_event *event)
11512a3391cdSStephen Boyd {
11522a3391cdSStephen Boyd 	return armpmu_map_event(event, &krait_perf_map,
11532a3391cdSStephen Boyd 				&krait_perf_cache_map, 0xFFFFF);
11542a3391cdSStephen Boyd }
11552a3391cdSStephen Boyd 
krait_map_event_no_branch(struct perf_event * event)11562a3391cdSStephen Boyd static int krait_map_event_no_branch(struct perf_event *event)
11572a3391cdSStephen Boyd {
11582a3391cdSStephen Boyd 	return armpmu_map_event(event, &krait_perf_map_no_branch,
11592a3391cdSStephen Boyd 				&krait_perf_cache_map, 0xFFFFF);
11602a3391cdSStephen Boyd }
11612a3391cdSStephen Boyd 
scorpion_map_event(struct perf_event * event)1162341e42c4SStephen Boyd static int scorpion_map_event(struct perf_event *event)
1163341e42c4SStephen Boyd {
1164341e42c4SStephen Boyd 	return armpmu_map_event(event, &scorpion_perf_map,
1165341e42c4SStephen Boyd 				&scorpion_perf_cache_map, 0xFFFFF);
1166341e42c4SStephen Boyd }
1167341e42c4SStephen Boyd 
armv7pmu_init(struct arm_pmu * cpu_pmu)1168513c99ceSSudeep KarkadaNagesha static void armv7pmu_init(struct arm_pmu *cpu_pmu)
1169513c99ceSSudeep KarkadaNagesha {
1170513c99ceSSudeep KarkadaNagesha 	cpu_pmu->handle_irq	= armv7pmu_handle_irq;
1171513c99ceSSudeep KarkadaNagesha 	cpu_pmu->enable		= armv7pmu_enable_event;
1172513c99ceSSudeep KarkadaNagesha 	cpu_pmu->disable	= armv7pmu_disable_event;
1173513c99ceSSudeep KarkadaNagesha 	cpu_pmu->read_counter	= armv7pmu_read_counter;
1174513c99ceSSudeep KarkadaNagesha 	cpu_pmu->write_counter	= armv7pmu_write_counter;
1175513c99ceSSudeep KarkadaNagesha 	cpu_pmu->get_event_idx	= armv7pmu_get_event_idx;
11767dfc8db1SSuzuki K Poulose 	cpu_pmu->clear_event_idx = armv7pmu_clear_event_idx;
1177513c99ceSSudeep KarkadaNagesha 	cpu_pmu->start		= armv7pmu_start;
1178513c99ceSSudeep KarkadaNagesha 	cpu_pmu->stop		= armv7pmu_stop;
1179513c99ceSSudeep KarkadaNagesha 	cpu_pmu->reset		= armv7pmu_reset;
118043eab878SWill Deacon };
118143eab878SWill Deacon 
armv7_read_num_pmnc_events(void * info)11820e3038d1SMark Rutland static void armv7_read_num_pmnc_events(void *info)
118343eab878SWill Deacon {
11840e3038d1SMark Rutland 	int *nb_cnt = info;
118543eab878SWill Deacon 
118643eab878SWill Deacon 	/* Read the nb of CNTx counters supported from PMNC */
11870e3038d1SMark Rutland 	*nb_cnt = (armv7_pmnc_read() >> ARMV7_PMNC_N_SHIFT) & ARMV7_PMNC_N_MASK;
118843eab878SWill Deacon 
11890e3038d1SMark Rutland 	/* Add the CPU cycles counter */
11900e3038d1SMark Rutland 	*nb_cnt += 1;
11910e3038d1SMark Rutland }
11920e3038d1SMark Rutland 
armv7_probe_num_events(struct arm_pmu * arm_pmu)11930e3038d1SMark Rutland static int armv7_probe_num_events(struct arm_pmu *arm_pmu)
11940e3038d1SMark Rutland {
11950e3038d1SMark Rutland 	return smp_call_function_any(&arm_pmu->supported_cpus,
11960e3038d1SMark Rutland 				     armv7_read_num_pmnc_events,
11970e3038d1SMark Rutland 				     &arm_pmu->num_events, 1);
119843eab878SWill Deacon }
119943eab878SWill Deacon 
armv7_a8_pmu_init(struct arm_pmu * cpu_pmu)1200351a102dSGreg Kroah-Hartman static int armv7_a8_pmu_init(struct arm_pmu *cpu_pmu)
120143eab878SWill Deacon {
1202513c99ceSSudeep KarkadaNagesha 	armv7pmu_init(cpu_pmu);
12033d1ff755SMark Rutland 	cpu_pmu->name		= "armv7_cortex_a8";
1204513c99ceSSudeep KarkadaNagesha 	cpu_pmu->map_event	= armv7_a8_map_event;
12059268c5daSMark Rutland 	cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_EVENTS] =
12069268c5daSMark Rutland 		&armv7_pmuv1_events_attr_group;
12079268c5daSMark Rutland 	cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_FORMATS] =
12089268c5daSMark Rutland 		&armv7_pmu_format_attr_group;
12090e3038d1SMark Rutland 	return armv7_probe_num_events(cpu_pmu);
121043eab878SWill Deacon }
121143eab878SWill Deacon 
armv7_a9_pmu_init(struct arm_pmu * cpu_pmu)1212351a102dSGreg Kroah-Hartman static int armv7_a9_pmu_init(struct arm_pmu *cpu_pmu)
121343eab878SWill Deacon {
1214513c99ceSSudeep KarkadaNagesha 	armv7pmu_init(cpu_pmu);
12153d1ff755SMark Rutland 	cpu_pmu->name		= "armv7_cortex_a9";
1216513c99ceSSudeep KarkadaNagesha 	cpu_pmu->map_event	= armv7_a9_map_event;
12179268c5daSMark Rutland 	cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_EVENTS] =
12189268c5daSMark Rutland 		&armv7_pmuv1_events_attr_group;
12199268c5daSMark Rutland 	cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_FORMATS] =
12209268c5daSMark Rutland 		&armv7_pmu_format_attr_group;
12210e3038d1SMark Rutland 	return armv7_probe_num_events(cpu_pmu);
122243eab878SWill Deacon }
12230c205cbeSWill Deacon 
armv7_a5_pmu_init(struct arm_pmu * cpu_pmu)1224351a102dSGreg Kroah-Hartman static int armv7_a5_pmu_init(struct arm_pmu *cpu_pmu)
12250c205cbeSWill Deacon {
1226513c99ceSSudeep KarkadaNagesha 	armv7pmu_init(cpu_pmu);
12273d1ff755SMark Rutland 	cpu_pmu->name		= "armv7_cortex_a5";
1228513c99ceSSudeep KarkadaNagesha 	cpu_pmu->map_event	= armv7_a5_map_event;
12299268c5daSMark Rutland 	cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_EVENTS] =
12309268c5daSMark Rutland 		&armv7_pmuv1_events_attr_group;
12319268c5daSMark Rutland 	cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_FORMATS] =
12329268c5daSMark Rutland 		&armv7_pmu_format_attr_group;
12330e3038d1SMark Rutland 	return armv7_probe_num_events(cpu_pmu);
12340c205cbeSWill Deacon }
123514abd038SWill Deacon 
armv7_a15_pmu_init(struct arm_pmu * cpu_pmu)1236351a102dSGreg Kroah-Hartman static int armv7_a15_pmu_init(struct arm_pmu *cpu_pmu)
123714abd038SWill Deacon {
1238513c99ceSSudeep KarkadaNagesha 	armv7pmu_init(cpu_pmu);
12393d1ff755SMark Rutland 	cpu_pmu->name		= "armv7_cortex_a15";
1240513c99ceSSudeep KarkadaNagesha 	cpu_pmu->map_event	= armv7_a15_map_event;
1241513c99ceSSudeep KarkadaNagesha 	cpu_pmu->set_event_filter = armv7pmu_set_event_filter;
12429268c5daSMark Rutland 	cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_EVENTS] =
12439268c5daSMark Rutland 		&armv7_pmuv2_events_attr_group;
12449268c5daSMark Rutland 	cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_FORMATS] =
12459268c5daSMark Rutland 		&armv7_pmu_format_attr_group;
12460e3038d1SMark Rutland 	return armv7_probe_num_events(cpu_pmu);
124714abd038SWill Deacon }
1248d33c88c6SWill Deacon 
armv7_a7_pmu_init(struct arm_pmu * cpu_pmu)1249351a102dSGreg Kroah-Hartman static int armv7_a7_pmu_init(struct arm_pmu *cpu_pmu)
1250d33c88c6SWill Deacon {
1251513c99ceSSudeep KarkadaNagesha 	armv7pmu_init(cpu_pmu);
12523d1ff755SMark Rutland 	cpu_pmu->name		= "armv7_cortex_a7";
1253513c99ceSSudeep KarkadaNagesha 	cpu_pmu->map_event	= armv7_a7_map_event;
1254513c99ceSSudeep KarkadaNagesha 	cpu_pmu->set_event_filter = armv7pmu_set_event_filter;
12559268c5daSMark Rutland 	cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_EVENTS] =
12569268c5daSMark Rutland 		&armv7_pmuv2_events_attr_group;
12579268c5daSMark Rutland 	cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_FORMATS] =
12589268c5daSMark Rutland 		&armv7_pmu_format_attr_group;
12590e3038d1SMark Rutland 	return armv7_probe_num_events(cpu_pmu);
1260d33c88c6SWill Deacon }
12612a3391cdSStephen Boyd 
armv7_a12_pmu_init(struct arm_pmu * cpu_pmu)12628e781f65SAlbin Tonnerre static int armv7_a12_pmu_init(struct arm_pmu *cpu_pmu)
12638e781f65SAlbin Tonnerre {
12648e781f65SAlbin Tonnerre 	armv7pmu_init(cpu_pmu);
12653d1ff755SMark Rutland 	cpu_pmu->name		= "armv7_cortex_a12";
12668e781f65SAlbin Tonnerre 	cpu_pmu->map_event	= armv7_a12_map_event;
12678e781f65SAlbin Tonnerre 	cpu_pmu->set_event_filter = armv7pmu_set_event_filter;
12689268c5daSMark Rutland 	cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_EVENTS] =
12699268c5daSMark Rutland 		&armv7_pmuv2_events_attr_group;
12709268c5daSMark Rutland 	cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_FORMATS] =
12719268c5daSMark Rutland 		&armv7_pmu_format_attr_group;
12720e3038d1SMark Rutland 	return armv7_probe_num_events(cpu_pmu);
12738e781f65SAlbin Tonnerre }
12748e781f65SAlbin Tonnerre 
armv7_a17_pmu_init(struct arm_pmu * cpu_pmu)127503eff46cSWill Deacon static int armv7_a17_pmu_init(struct arm_pmu *cpu_pmu)
127603eff46cSWill Deacon {
12770e3038d1SMark Rutland 	int ret = armv7_a12_pmu_init(cpu_pmu);
12783d1ff755SMark Rutland 	cpu_pmu->name = "armv7_cortex_a17";
12799268c5daSMark Rutland 	cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_EVENTS] =
12809268c5daSMark Rutland 		&armv7_pmuv2_events_attr_group;
12819268c5daSMark Rutland 	cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_FORMATS] =
12829268c5daSMark Rutland 		&armv7_pmu_format_attr_group;
12830e3038d1SMark Rutland 	return ret;
128403eff46cSWill Deacon }
128503eff46cSWill Deacon 
1286b7aafe99SStephen Boyd /*
1287b7aafe99SStephen Boyd  * Krait Performance Monitor Region Event Selection Register (PMRESRn)
1288b7aafe99SStephen Boyd  *
1289b7aafe99SStephen Boyd  *            31   30     24     16     8      0
1290b7aafe99SStephen Boyd  *            +--------------------------------+
1291b7aafe99SStephen Boyd  *  PMRESR0   | EN |  CC  |  CC  |  CC  |  CC  |   N = 1, R = 0
1292b7aafe99SStephen Boyd  *            +--------------------------------+
1293b7aafe99SStephen Boyd  *  PMRESR1   | EN |  CC  |  CC  |  CC  |  CC  |   N = 1, R = 1
1294b7aafe99SStephen Boyd  *            +--------------------------------+
1295b7aafe99SStephen Boyd  *  PMRESR2   | EN |  CC  |  CC  |  CC  |  CC  |   N = 1, R = 2
1296b7aafe99SStephen Boyd  *            +--------------------------------+
1297b7aafe99SStephen Boyd  *  VPMRESR0  | EN |  CC  |  CC  |  CC  |  CC  |   N = 2, R = ?
1298b7aafe99SStephen Boyd  *            +--------------------------------+
1299b7aafe99SStephen Boyd  *              EN | G=3  | G=2  | G=1  | G=0
1300b7aafe99SStephen Boyd  *
1301b7aafe99SStephen Boyd  *  Event Encoding:
1302b7aafe99SStephen Boyd  *
1303b7aafe99SStephen Boyd  *      hwc->config_base = 0xNRCCG
1304b7aafe99SStephen Boyd  *
1305b7aafe99SStephen Boyd  *      N  = prefix, 1 for Krait CPU (PMRESRn), 2 for Venum VFP (VPMRESR)
1306b7aafe99SStephen Boyd  *      R  = region register
1307b7aafe99SStephen Boyd  *      CC = class of events the group G is choosing from
1308b7aafe99SStephen Boyd  *      G  = group or particular event
1309b7aafe99SStephen Boyd  *
1310b7aafe99SStephen Boyd  *  Example: 0x12021 is a Krait CPU event in PMRESR2's group 1 with code 2
1311b7aafe99SStephen Boyd  *
1312b7aafe99SStephen Boyd  *  A region (R) corresponds to a piece of the CPU (execution unit, instruction
1313b7aafe99SStephen Boyd  *  unit, etc.) while the event code (CC) corresponds to a particular class of
1314b7aafe99SStephen Boyd  *  events (interrupts for example). An event code is broken down into
1315b7aafe99SStephen Boyd  *  groups (G) that can be mapped into the PMU (irq, fiqs, and irq+fiqs for
1316b7aafe99SStephen Boyd  *  example).
1317b7aafe99SStephen Boyd  */
1318b7aafe99SStephen Boyd 
1319b7aafe99SStephen Boyd #define KRAIT_EVENT		(1 << 16)
1320b7aafe99SStephen Boyd #define VENUM_EVENT		(2 << 16)
1321b7aafe99SStephen Boyd #define KRAIT_EVENT_MASK	(KRAIT_EVENT | VENUM_EVENT)
1322b7aafe99SStephen Boyd #define PMRESRn_EN		BIT(31)
1323b7aafe99SStephen Boyd 
132465bab451SStephen Boyd #define EVENT_REGION(event)	(((event) >> 12) & 0xf)		/* R */
132565bab451SStephen Boyd #define EVENT_GROUP(event)	((event) & 0xf)			/* G */
132665bab451SStephen Boyd #define EVENT_CODE(event)	(((event) >> 4) & 0xff)		/* CC */
132765bab451SStephen Boyd #define EVENT_VENUM(event)	(!!(event & VENUM_EVENT))	/* N=2 */
132865bab451SStephen Boyd #define EVENT_CPU(event)	(!!(event & KRAIT_EVENT))	/* N=1 */
132965bab451SStephen Boyd 
krait_read_pmresrn(int n)1330b7aafe99SStephen Boyd static u32 krait_read_pmresrn(int n)
1331b7aafe99SStephen Boyd {
1332b7aafe99SStephen Boyd 	u32 val;
1333b7aafe99SStephen Boyd 
1334b7aafe99SStephen Boyd 	switch (n) {
1335b7aafe99SStephen Boyd 	case 0:
1336b7aafe99SStephen Boyd 		asm volatile("mrc p15, 1, %0, c9, c15, 0" : "=r" (val));
1337b7aafe99SStephen Boyd 		break;
1338b7aafe99SStephen Boyd 	case 1:
1339b7aafe99SStephen Boyd 		asm volatile("mrc p15, 1, %0, c9, c15, 1" : "=r" (val));
1340b7aafe99SStephen Boyd 		break;
1341b7aafe99SStephen Boyd 	case 2:
1342b7aafe99SStephen Boyd 		asm volatile("mrc p15, 1, %0, c9, c15, 2" : "=r" (val));
1343b7aafe99SStephen Boyd 		break;
1344b7aafe99SStephen Boyd 	default:
1345b7aafe99SStephen Boyd 		BUG(); /* Should be validated in krait_pmu_get_event_idx() */
1346b7aafe99SStephen Boyd 	}
1347b7aafe99SStephen Boyd 
1348b7aafe99SStephen Boyd 	return val;
1349b7aafe99SStephen Boyd }
1350b7aafe99SStephen Boyd 
krait_write_pmresrn(int n,u32 val)1351b7aafe99SStephen Boyd static void krait_write_pmresrn(int n, u32 val)
1352b7aafe99SStephen Boyd {
1353b7aafe99SStephen Boyd 	switch (n) {
1354b7aafe99SStephen Boyd 	case 0:
1355b7aafe99SStephen Boyd 		asm volatile("mcr p15, 1, %0, c9, c15, 0" : : "r" (val));
1356b7aafe99SStephen Boyd 		break;
1357b7aafe99SStephen Boyd 	case 1:
1358b7aafe99SStephen Boyd 		asm volatile("mcr p15, 1, %0, c9, c15, 1" : : "r" (val));
1359b7aafe99SStephen Boyd 		break;
1360b7aafe99SStephen Boyd 	case 2:
1361b7aafe99SStephen Boyd 		asm volatile("mcr p15, 1, %0, c9, c15, 2" : : "r" (val));
1362b7aafe99SStephen Boyd 		break;
1363b7aafe99SStephen Boyd 	default:
1364b7aafe99SStephen Boyd 		BUG(); /* Should be validated in krait_pmu_get_event_idx() */
1365b7aafe99SStephen Boyd 	}
1366b7aafe99SStephen Boyd }
1367b7aafe99SStephen Boyd 
venum_read_pmresr(void)136865bab451SStephen Boyd static u32 venum_read_pmresr(void)
1369b7aafe99SStephen Boyd {
1370b7aafe99SStephen Boyd 	u32 val;
1371b7aafe99SStephen Boyd 	asm volatile("mrc p10, 7, %0, c11, c0, 0" : "=r" (val));
1372b7aafe99SStephen Boyd 	return val;
1373b7aafe99SStephen Boyd }
1374b7aafe99SStephen Boyd 
venum_write_pmresr(u32 val)137565bab451SStephen Boyd static void venum_write_pmresr(u32 val)
1376b7aafe99SStephen Boyd {
1377b7aafe99SStephen Boyd 	asm volatile("mcr p10, 7, %0, c11, c0, 0" : : "r" (val));
1378b7aafe99SStephen Boyd }
1379b7aafe99SStephen Boyd 
venum_pre_pmresr(u32 * venum_orig_val,u32 * fp_orig_val)138065bab451SStephen Boyd static void venum_pre_pmresr(u32 *venum_orig_val, u32 *fp_orig_val)
1381b7aafe99SStephen Boyd {
1382b7aafe99SStephen Boyd 	u32 venum_new_val;
1383b7aafe99SStephen Boyd 	u32 fp_new_val;
1384b7aafe99SStephen Boyd 
1385b7aafe99SStephen Boyd 	BUG_ON(preemptible());
1386b7aafe99SStephen Boyd 	/* CPACR Enable CP10 and CP11 access */
1387b7aafe99SStephen Boyd 	*venum_orig_val = get_copro_access();
1388b7aafe99SStephen Boyd 	venum_new_val = *venum_orig_val | CPACC_SVC(10) | CPACC_SVC(11);
1389b7aafe99SStephen Boyd 	set_copro_access(venum_new_val);
1390b7aafe99SStephen Boyd 
1391b7aafe99SStephen Boyd 	/* Enable FPEXC */
1392b7aafe99SStephen Boyd 	*fp_orig_val = fmrx(FPEXC);
1393b7aafe99SStephen Boyd 	fp_new_val = *fp_orig_val | FPEXC_EN;
1394b7aafe99SStephen Boyd 	fmxr(FPEXC, fp_new_val);
1395b7aafe99SStephen Boyd }
1396b7aafe99SStephen Boyd 
venum_post_pmresr(u32 venum_orig_val,u32 fp_orig_val)139765bab451SStephen Boyd static void venum_post_pmresr(u32 venum_orig_val, u32 fp_orig_val)
1398b7aafe99SStephen Boyd {
1399b7aafe99SStephen Boyd 	BUG_ON(preemptible());
1400b7aafe99SStephen Boyd 	/* Restore FPEXC */
1401b7aafe99SStephen Boyd 	fmxr(FPEXC, fp_orig_val);
1402b7aafe99SStephen Boyd 	isb();
1403b7aafe99SStephen Boyd 	/* Restore CPACR */
1404b7aafe99SStephen Boyd 	set_copro_access(venum_orig_val);
1405b7aafe99SStephen Boyd }
1406b7aafe99SStephen Boyd 
krait_get_pmresrn_event(unsigned int region)1407b7aafe99SStephen Boyd static u32 krait_get_pmresrn_event(unsigned int region)
1408b7aafe99SStephen Boyd {
1409b7aafe99SStephen Boyd 	static const u32 pmresrn_table[] = { KRAIT_PMRESR0_GROUP0,
1410b7aafe99SStephen Boyd 					     KRAIT_PMRESR1_GROUP0,
1411b7aafe99SStephen Boyd 					     KRAIT_PMRESR2_GROUP0 };
1412b7aafe99SStephen Boyd 	return pmresrn_table[region];
1413b7aafe99SStephen Boyd }
1414b7aafe99SStephen Boyd 
krait_evt_setup(int idx,u32 config_base)1415b7aafe99SStephen Boyd static void krait_evt_setup(int idx, u32 config_base)
1416b7aafe99SStephen Boyd {
1417b7aafe99SStephen Boyd 	u32 val;
1418b7aafe99SStephen Boyd 	u32 mask;
1419b7aafe99SStephen Boyd 	u32 vval, fval;
142065bab451SStephen Boyd 	unsigned int region = EVENT_REGION(config_base);
142165bab451SStephen Boyd 	unsigned int group = EVENT_GROUP(config_base);
142265bab451SStephen Boyd 	unsigned int code = EVENT_CODE(config_base);
1423b7aafe99SStephen Boyd 	unsigned int group_shift;
142465bab451SStephen Boyd 	bool venum_event = EVENT_VENUM(config_base);
1425b7aafe99SStephen Boyd 
1426b7aafe99SStephen Boyd 	group_shift = group * 8;
1427b7aafe99SStephen Boyd 	mask = 0xff << group_shift;
1428b7aafe99SStephen Boyd 
1429b7aafe99SStephen Boyd 	/* Configure evtsel for the region and group */
1430b7aafe99SStephen Boyd 	if (venum_event)
1431b7aafe99SStephen Boyd 		val = KRAIT_VPMRESR0_GROUP0;
1432b7aafe99SStephen Boyd 	else
1433b7aafe99SStephen Boyd 		val = krait_get_pmresrn_event(region);
1434b7aafe99SStephen Boyd 	val += group;
1435b7aafe99SStephen Boyd 	/* Mix in mode-exclusion bits */
1436b7aafe99SStephen Boyd 	val |= config_base & (ARMV7_EXCLUDE_USER | ARMV7_EXCLUDE_PL1);
1437b7aafe99SStephen Boyd 	armv7_pmnc_write_evtsel(idx, val);
1438b7aafe99SStephen Boyd 
1439b7aafe99SStephen Boyd 	if (venum_event) {
144065bab451SStephen Boyd 		venum_pre_pmresr(&vval, &fval);
144165bab451SStephen Boyd 		val = venum_read_pmresr();
1442b7aafe99SStephen Boyd 		val &= ~mask;
1443b7aafe99SStephen Boyd 		val |= code << group_shift;
1444b7aafe99SStephen Boyd 		val |= PMRESRn_EN;
144565bab451SStephen Boyd 		venum_write_pmresr(val);
144665bab451SStephen Boyd 		venum_post_pmresr(vval, fval);
1447b7aafe99SStephen Boyd 	} else {
1448b7aafe99SStephen Boyd 		val = krait_read_pmresrn(region);
1449b7aafe99SStephen Boyd 		val &= ~mask;
1450b7aafe99SStephen Boyd 		val |= code << group_shift;
1451b7aafe99SStephen Boyd 		val |= PMRESRn_EN;
1452b7aafe99SStephen Boyd 		krait_write_pmresrn(region, val);
1453b7aafe99SStephen Boyd 	}
1454b7aafe99SStephen Boyd }
1455b7aafe99SStephen Boyd 
clear_pmresrn_group(u32 val,int group)145665bab451SStephen Boyd static u32 clear_pmresrn_group(u32 val, int group)
1457b7aafe99SStephen Boyd {
1458b7aafe99SStephen Boyd 	u32 mask;
1459b7aafe99SStephen Boyd 	int group_shift;
1460b7aafe99SStephen Boyd 
1461b7aafe99SStephen Boyd 	group_shift = group * 8;
1462b7aafe99SStephen Boyd 	mask = 0xff << group_shift;
1463b7aafe99SStephen Boyd 	val &= ~mask;
1464b7aafe99SStephen Boyd 
1465b7aafe99SStephen Boyd 	/* Don't clear enable bit if entire region isn't disabled */
1466b7aafe99SStephen Boyd 	if (val & ~PMRESRn_EN)
1467b7aafe99SStephen Boyd 		return val |= PMRESRn_EN;
1468b7aafe99SStephen Boyd 
1469b7aafe99SStephen Boyd 	return 0;
1470b7aafe99SStephen Boyd }
1471b7aafe99SStephen Boyd 
krait_clearpmu(u32 config_base)1472b7aafe99SStephen Boyd static void krait_clearpmu(u32 config_base)
1473b7aafe99SStephen Boyd {
1474b7aafe99SStephen Boyd 	u32 val;
1475b7aafe99SStephen Boyd 	u32 vval, fval;
147665bab451SStephen Boyd 	unsigned int region = EVENT_REGION(config_base);
147765bab451SStephen Boyd 	unsigned int group = EVENT_GROUP(config_base);
147865bab451SStephen Boyd 	bool venum_event = EVENT_VENUM(config_base);
1479b7aafe99SStephen Boyd 
1480b7aafe99SStephen Boyd 	if (venum_event) {
148165bab451SStephen Boyd 		venum_pre_pmresr(&vval, &fval);
148265bab451SStephen Boyd 		val = venum_read_pmresr();
148365bab451SStephen Boyd 		val = clear_pmresrn_group(val, group);
148465bab451SStephen Boyd 		venum_write_pmresr(val);
148565bab451SStephen Boyd 		venum_post_pmresr(vval, fval);
1486b7aafe99SStephen Boyd 	} else {
1487b7aafe99SStephen Boyd 		val = krait_read_pmresrn(region);
148865bab451SStephen Boyd 		val = clear_pmresrn_group(val, group);
1489b7aafe99SStephen Boyd 		krait_write_pmresrn(region, val);
1490b7aafe99SStephen Boyd 	}
1491b7aafe99SStephen Boyd }
1492b7aafe99SStephen Boyd 
krait_pmu_disable_event(struct perf_event * event)1493b7aafe99SStephen Boyd static void krait_pmu_disable_event(struct perf_event *event)
1494b7aafe99SStephen Boyd {
1495b7aafe99SStephen Boyd 	unsigned long flags;
1496b7aafe99SStephen Boyd 	struct hw_perf_event *hwc = &event->hw;
1497b7aafe99SStephen Boyd 	int idx = hwc->idx;
1498037e79aaSMark Rutland 	struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu);
149911679250SMark Rutland 	struct pmu_hw_events *events = this_cpu_ptr(cpu_pmu->hw_events);
1500b7aafe99SStephen Boyd 
1501b7aafe99SStephen Boyd 	/* Disable counter and interrupt */
1502b7aafe99SStephen Boyd 	raw_spin_lock_irqsave(&events->pmu_lock, flags);
1503b7aafe99SStephen Boyd 
1504b7aafe99SStephen Boyd 	/* Disable counter */
1505b7aafe99SStephen Boyd 	armv7_pmnc_disable_counter(idx);
1506b7aafe99SStephen Boyd 
1507b7aafe99SStephen Boyd 	/*
1508b7aafe99SStephen Boyd 	 * Clear pmresr code (if destined for PMNx counters)
1509b7aafe99SStephen Boyd 	 */
1510b7aafe99SStephen Boyd 	if (hwc->config_base & KRAIT_EVENT_MASK)
1511b7aafe99SStephen Boyd 		krait_clearpmu(hwc->config_base);
1512b7aafe99SStephen Boyd 
1513b7aafe99SStephen Boyd 	/* Disable interrupt for this counter */
1514b7aafe99SStephen Boyd 	armv7_pmnc_disable_intens(idx);
1515b7aafe99SStephen Boyd 
1516b7aafe99SStephen Boyd 	raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
1517b7aafe99SStephen Boyd }
1518b7aafe99SStephen Boyd 
krait_pmu_enable_event(struct perf_event * event)1519b7aafe99SStephen Boyd static void krait_pmu_enable_event(struct perf_event *event)
1520b7aafe99SStephen Boyd {
1521b7aafe99SStephen Boyd 	unsigned long flags;
1522b7aafe99SStephen Boyd 	struct hw_perf_event *hwc = &event->hw;
1523b7aafe99SStephen Boyd 	int idx = hwc->idx;
1524037e79aaSMark Rutland 	struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu);
152511679250SMark Rutland 	struct pmu_hw_events *events = this_cpu_ptr(cpu_pmu->hw_events);
1526b7aafe99SStephen Boyd 
1527b7aafe99SStephen Boyd 	/*
1528b7aafe99SStephen Boyd 	 * Enable counter and interrupt, and set the counter to count
1529b7aafe99SStephen Boyd 	 * the event that we're interested in.
1530b7aafe99SStephen Boyd 	 */
1531b7aafe99SStephen Boyd 	raw_spin_lock_irqsave(&events->pmu_lock, flags);
1532b7aafe99SStephen Boyd 
1533b7aafe99SStephen Boyd 	/* Disable counter */
1534b7aafe99SStephen Boyd 	armv7_pmnc_disable_counter(idx);
1535b7aafe99SStephen Boyd 
1536b7aafe99SStephen Boyd 	/*
1537b7aafe99SStephen Boyd 	 * Set event (if destined for PMNx counters)
1538b7aafe99SStephen Boyd 	 * We set the event for the cycle counter because we
1539b7aafe99SStephen Boyd 	 * have the ability to perform event filtering.
1540b7aafe99SStephen Boyd 	 */
1541b7aafe99SStephen Boyd 	if (hwc->config_base & KRAIT_EVENT_MASK)
1542b7aafe99SStephen Boyd 		krait_evt_setup(idx, hwc->config_base);
1543b7aafe99SStephen Boyd 	else
1544b7aafe99SStephen Boyd 		armv7_pmnc_write_evtsel(idx, hwc->config_base);
1545b7aafe99SStephen Boyd 
1546b7aafe99SStephen Boyd 	/* Enable interrupt for this counter */
1547b7aafe99SStephen Boyd 	armv7_pmnc_enable_intens(idx);
1548b7aafe99SStephen Boyd 
1549b7aafe99SStephen Boyd 	/* Enable counter */
1550b7aafe99SStephen Boyd 	armv7_pmnc_enable_counter(idx);
1551b7aafe99SStephen Boyd 
1552b7aafe99SStephen Boyd 	raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
1553b7aafe99SStephen Boyd }
1554b7aafe99SStephen Boyd 
krait_pmu_reset(void * info)1555b7aafe99SStephen Boyd static void krait_pmu_reset(void *info)
1556b7aafe99SStephen Boyd {
1557b7aafe99SStephen Boyd 	u32 vval, fval;
155893499918SStephen Boyd 	struct arm_pmu *cpu_pmu = info;
155993499918SStephen Boyd 	u32 idx, nb_cnt = cpu_pmu->num_events;
1560b7aafe99SStephen Boyd 
1561b7aafe99SStephen Boyd 	armv7pmu_reset(info);
1562b7aafe99SStephen Boyd 
1563b7aafe99SStephen Boyd 	/* Clear all pmresrs */
1564b7aafe99SStephen Boyd 	krait_write_pmresrn(0, 0);
1565b7aafe99SStephen Boyd 	krait_write_pmresrn(1, 0);
1566b7aafe99SStephen Boyd 	krait_write_pmresrn(2, 0);
1567b7aafe99SStephen Boyd 
156865bab451SStephen Boyd 	venum_pre_pmresr(&vval, &fval);
156965bab451SStephen Boyd 	venum_write_pmresr(0);
157065bab451SStephen Boyd 	venum_post_pmresr(vval, fval);
157193499918SStephen Boyd 
157293499918SStephen Boyd 	/* Reset PMxEVNCTCR to sane default */
157393499918SStephen Boyd 	for (idx = ARMV7_IDX_CYCLE_COUNTER; idx < nb_cnt; ++idx) {
157493499918SStephen Boyd 		armv7_pmnc_select_counter(idx);
157593499918SStephen Boyd 		asm volatile("mcr p15, 0, %0, c9, c15, 0" : : "r" (0));
157693499918SStephen Boyd 	}
157793499918SStephen Boyd 
1578b7aafe99SStephen Boyd }
1579b7aafe99SStephen Boyd 
krait_event_to_bit(struct perf_event * event,unsigned int region,unsigned int group)1580b7aafe99SStephen Boyd static int krait_event_to_bit(struct perf_event *event, unsigned int region,
1581b7aafe99SStephen Boyd 			      unsigned int group)
1582b7aafe99SStephen Boyd {
1583b7aafe99SStephen Boyd 	int bit;
1584b7aafe99SStephen Boyd 	struct hw_perf_event *hwc = &event->hw;
1585b7aafe99SStephen Boyd 	struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu);
1586b7aafe99SStephen Boyd 
1587b7aafe99SStephen Boyd 	if (hwc->config_base & VENUM_EVENT)
1588b7aafe99SStephen Boyd 		bit = KRAIT_VPMRESR0_GROUP0;
1589b7aafe99SStephen Boyd 	else
1590b7aafe99SStephen Boyd 		bit = krait_get_pmresrn_event(region);
1591b7aafe99SStephen Boyd 	bit -= krait_get_pmresrn_event(0);
1592b7aafe99SStephen Boyd 	bit += group;
1593b7aafe99SStephen Boyd 	/*
1594b7aafe99SStephen Boyd 	 * Lower bits are reserved for use by the counters (see
1595b7aafe99SStephen Boyd 	 * armv7pmu_get_event_idx() for more info)
1596b7aafe99SStephen Boyd 	 */
1597b7aafe99SStephen Boyd 	bit += ARMV7_IDX_COUNTER_LAST(cpu_pmu) + 1;
1598b7aafe99SStephen Boyd 
1599b7aafe99SStephen Boyd 	return bit;
1600b7aafe99SStephen Boyd }
1601b7aafe99SStephen Boyd 
1602b7aafe99SStephen Boyd /*
1603b7aafe99SStephen Boyd  * We check for column exclusion constraints here.
1604b7aafe99SStephen Boyd  * Two events cant use the same group within a pmresr register.
1605b7aafe99SStephen Boyd  */
krait_pmu_get_event_idx(struct pmu_hw_events * cpuc,struct perf_event * event)1606b7aafe99SStephen Boyd static int krait_pmu_get_event_idx(struct pmu_hw_events *cpuc,
1607b7aafe99SStephen Boyd 				   struct perf_event *event)
1608b7aafe99SStephen Boyd {
1609b7aafe99SStephen Boyd 	int idx;
16106a78371aSRussell King 	int bit = -1;
1611b7aafe99SStephen Boyd 	struct hw_perf_event *hwc = &event->hw;
161265bab451SStephen Boyd 	unsigned int region = EVENT_REGION(hwc->config_base);
161365bab451SStephen Boyd 	unsigned int code = EVENT_CODE(hwc->config_base);
161465bab451SStephen Boyd 	unsigned int group = EVENT_GROUP(hwc->config_base);
161565bab451SStephen Boyd 	bool venum_event = EVENT_VENUM(hwc->config_base);
161665bab451SStephen Boyd 	bool krait_event = EVENT_CPU(hwc->config_base);
1617b7aafe99SStephen Boyd 
161865bab451SStephen Boyd 	if (venum_event || krait_event) {
1619b7aafe99SStephen Boyd 		/* Ignore invalid events */
1620b7aafe99SStephen Boyd 		if (group > 3 || region > 2)
1621b7aafe99SStephen Boyd 			return -EINVAL;
162265bab451SStephen Boyd 		if (venum_event && (code & 0xe0))
1623b7aafe99SStephen Boyd 			return -EINVAL;
1624b7aafe99SStephen Boyd 
1625b7aafe99SStephen Boyd 		bit = krait_event_to_bit(event, region, group);
1626b7aafe99SStephen Boyd 		if (test_and_set_bit(bit, cpuc->used_mask))
1627b7aafe99SStephen Boyd 			return -EAGAIN;
1628b7aafe99SStephen Boyd 	}
1629b7aafe99SStephen Boyd 
1630b7aafe99SStephen Boyd 	idx = armv7pmu_get_event_idx(cpuc, event);
16316a78371aSRussell King 	if (idx < 0 && bit >= 0)
1632b7aafe99SStephen Boyd 		clear_bit(bit, cpuc->used_mask);
1633b7aafe99SStephen Boyd 
1634b7aafe99SStephen Boyd 	return idx;
1635b7aafe99SStephen Boyd }
1636b7aafe99SStephen Boyd 
krait_pmu_clear_event_idx(struct pmu_hw_events * cpuc,struct perf_event * event)1637b7aafe99SStephen Boyd static void krait_pmu_clear_event_idx(struct pmu_hw_events *cpuc,
1638b7aafe99SStephen Boyd 				      struct perf_event *event)
1639b7aafe99SStephen Boyd {
1640b7aafe99SStephen Boyd 	int bit;
1641b7aafe99SStephen Boyd 	struct hw_perf_event *hwc = &event->hw;
164265bab451SStephen Boyd 	unsigned int region = EVENT_REGION(hwc->config_base);
164365bab451SStephen Boyd 	unsigned int group = EVENT_GROUP(hwc->config_base);
164465bab451SStephen Boyd 	bool venum_event = EVENT_VENUM(hwc->config_base);
164565bab451SStephen Boyd 	bool krait_event = EVENT_CPU(hwc->config_base);
1646b7aafe99SStephen Boyd 
16477dfc8db1SSuzuki K Poulose 	armv7pmu_clear_event_idx(cpuc, event);
164865bab451SStephen Boyd 	if (venum_event || krait_event) {
1649b7aafe99SStephen Boyd 		bit = krait_event_to_bit(event, region, group);
1650b7aafe99SStephen Boyd 		clear_bit(bit, cpuc->used_mask);
1651b7aafe99SStephen Boyd 	}
1652b7aafe99SStephen Boyd }
1653b7aafe99SStephen Boyd 
krait_pmu_init(struct arm_pmu * cpu_pmu)16542a3391cdSStephen Boyd static int krait_pmu_init(struct arm_pmu *cpu_pmu)
16552a3391cdSStephen Boyd {
16562a3391cdSStephen Boyd 	armv7pmu_init(cpu_pmu);
16573d1ff755SMark Rutland 	cpu_pmu->name		= "armv7_krait";
16582a3391cdSStephen Boyd 	/* Some early versions of Krait don't support PC write events */
16592a3391cdSStephen Boyd 	if (of_property_read_bool(cpu_pmu->plat_device->dev.of_node,
16602a3391cdSStephen Boyd 				  "qcom,no-pc-write"))
16612a3391cdSStephen Boyd 		cpu_pmu->map_event = krait_map_event_no_branch;
16622a3391cdSStephen Boyd 	else
16632a3391cdSStephen Boyd 		cpu_pmu->map_event = krait_map_event;
16642a3391cdSStephen Boyd 	cpu_pmu->set_event_filter = armv7pmu_set_event_filter;
1665b7aafe99SStephen Boyd 	cpu_pmu->reset		= krait_pmu_reset;
1666b7aafe99SStephen Boyd 	cpu_pmu->enable		= krait_pmu_enable_event;
1667b7aafe99SStephen Boyd 	cpu_pmu->disable	= krait_pmu_disable_event;
1668b7aafe99SStephen Boyd 	cpu_pmu->get_event_idx	= krait_pmu_get_event_idx;
1669b7aafe99SStephen Boyd 	cpu_pmu->clear_event_idx = krait_pmu_clear_event_idx;
16700e3038d1SMark Rutland 	return armv7_probe_num_events(cpu_pmu);
16712a3391cdSStephen Boyd }
1672341e42c4SStephen Boyd 
1673341e42c4SStephen Boyd /*
1674341e42c4SStephen Boyd  * Scorpion Local Performance Monitor Register (LPMn)
1675341e42c4SStephen Boyd  *
1676341e42c4SStephen Boyd  *            31   30     24     16     8      0
1677341e42c4SStephen Boyd  *            +--------------------------------+
1678341e42c4SStephen Boyd  *  LPM0      | EN |  CC  |  CC  |  CC  |  CC  |   N = 1, R = 0
1679341e42c4SStephen Boyd  *            +--------------------------------+
1680341e42c4SStephen Boyd  *  LPM1      | EN |  CC  |  CC  |  CC  |  CC  |   N = 1, R = 1
1681341e42c4SStephen Boyd  *            +--------------------------------+
1682341e42c4SStephen Boyd  *  LPM2      | EN |  CC  |  CC  |  CC  |  CC  |   N = 1, R = 2
1683341e42c4SStephen Boyd  *            +--------------------------------+
1684341e42c4SStephen Boyd  *  L2LPM     | EN |  CC  |  CC  |  CC  |  CC  |   N = 1, R = 3
1685341e42c4SStephen Boyd  *            +--------------------------------+
1686341e42c4SStephen Boyd  *  VLPM      | EN |  CC  |  CC  |  CC  |  CC  |   N = 2, R = ?
1687341e42c4SStephen Boyd  *            +--------------------------------+
1688341e42c4SStephen Boyd  *              EN | G=3  | G=2  | G=1  | G=0
1689341e42c4SStephen Boyd  *
1690341e42c4SStephen Boyd  *
1691341e42c4SStephen Boyd  *  Event Encoding:
1692341e42c4SStephen Boyd  *
1693341e42c4SStephen Boyd  *      hwc->config_base = 0xNRCCG
1694341e42c4SStephen Boyd  *
1695341e42c4SStephen Boyd  *      N  = prefix, 1 for Scorpion CPU (LPMn/L2LPM), 2 for Venum VFP (VLPM)
1696341e42c4SStephen Boyd  *      R  = region register
1697341e42c4SStephen Boyd  *      CC = class of events the group G is choosing from
1698341e42c4SStephen Boyd  *      G  = group or particular event
1699341e42c4SStephen Boyd  *
1700341e42c4SStephen Boyd  *  Example: 0x12021 is a Scorpion CPU event in LPM2's group 1 with code 2
1701341e42c4SStephen Boyd  *
1702341e42c4SStephen Boyd  *  A region (R) corresponds to a piece of the CPU (execution unit, instruction
1703341e42c4SStephen Boyd  *  unit, etc.) while the event code (CC) corresponds to a particular class of
1704341e42c4SStephen Boyd  *  events (interrupts for example). An event code is broken down into
1705341e42c4SStephen Boyd  *  groups (G) that can be mapped into the PMU (irq, fiqs, and irq+fiqs for
1706341e42c4SStephen Boyd  *  example).
1707341e42c4SStephen Boyd  */
1708341e42c4SStephen Boyd 
scorpion_read_pmresrn(int n)1709341e42c4SStephen Boyd static u32 scorpion_read_pmresrn(int n)
1710341e42c4SStephen Boyd {
1711341e42c4SStephen Boyd 	u32 val;
1712341e42c4SStephen Boyd 
1713341e42c4SStephen Boyd 	switch (n) {
1714341e42c4SStephen Boyd 	case 0:
1715341e42c4SStephen Boyd 		asm volatile("mrc p15, 0, %0, c15, c0, 0" : "=r" (val));
1716341e42c4SStephen Boyd 		break;
1717341e42c4SStephen Boyd 	case 1:
1718341e42c4SStephen Boyd 		asm volatile("mrc p15, 1, %0, c15, c0, 0" : "=r" (val));
1719341e42c4SStephen Boyd 		break;
1720341e42c4SStephen Boyd 	case 2:
1721341e42c4SStephen Boyd 		asm volatile("mrc p15, 2, %0, c15, c0, 0" : "=r" (val));
1722341e42c4SStephen Boyd 		break;
1723341e42c4SStephen Boyd 	case 3:
1724341e42c4SStephen Boyd 		asm volatile("mrc p15, 3, %0, c15, c2, 0" : "=r" (val));
1725341e42c4SStephen Boyd 		break;
1726341e42c4SStephen Boyd 	default:
1727341e42c4SStephen Boyd 		BUG(); /* Should be validated in scorpion_pmu_get_event_idx() */
1728341e42c4SStephen Boyd 	}
1729341e42c4SStephen Boyd 
1730341e42c4SStephen Boyd 	return val;
1731341e42c4SStephen Boyd }
1732341e42c4SStephen Boyd 
scorpion_write_pmresrn(int n,u32 val)1733341e42c4SStephen Boyd static void scorpion_write_pmresrn(int n, u32 val)
1734341e42c4SStephen Boyd {
1735341e42c4SStephen Boyd 	switch (n) {
1736341e42c4SStephen Boyd 	case 0:
1737341e42c4SStephen Boyd 		asm volatile("mcr p15, 0, %0, c15, c0, 0" : : "r" (val));
1738341e42c4SStephen Boyd 		break;
1739341e42c4SStephen Boyd 	case 1:
1740341e42c4SStephen Boyd 		asm volatile("mcr p15, 1, %0, c15, c0, 0" : : "r" (val));
1741341e42c4SStephen Boyd 		break;
1742341e42c4SStephen Boyd 	case 2:
1743341e42c4SStephen Boyd 		asm volatile("mcr p15, 2, %0, c15, c0, 0" : : "r" (val));
1744341e42c4SStephen Boyd 		break;
1745341e42c4SStephen Boyd 	case 3:
1746341e42c4SStephen Boyd 		asm volatile("mcr p15, 3, %0, c15, c2, 0" : : "r" (val));
1747341e42c4SStephen Boyd 		break;
1748341e42c4SStephen Boyd 	default:
1749341e42c4SStephen Boyd 		BUG(); /* Should be validated in scorpion_pmu_get_event_idx() */
1750341e42c4SStephen Boyd 	}
1751341e42c4SStephen Boyd }
1752341e42c4SStephen Boyd 
scorpion_get_pmresrn_event(unsigned int region)1753341e42c4SStephen Boyd static u32 scorpion_get_pmresrn_event(unsigned int region)
1754341e42c4SStephen Boyd {
1755341e42c4SStephen Boyd 	static const u32 pmresrn_table[] = { SCORPION_LPM0_GROUP0,
1756341e42c4SStephen Boyd 					     SCORPION_LPM1_GROUP0,
1757341e42c4SStephen Boyd 					     SCORPION_LPM2_GROUP0,
1758341e42c4SStephen Boyd 					     SCORPION_L2LPM_GROUP0 };
1759341e42c4SStephen Boyd 	return pmresrn_table[region];
1760341e42c4SStephen Boyd }
1761341e42c4SStephen Boyd 
scorpion_evt_setup(int idx,u32 config_base)1762341e42c4SStephen Boyd static void scorpion_evt_setup(int idx, u32 config_base)
1763341e42c4SStephen Boyd {
1764341e42c4SStephen Boyd 	u32 val;
1765341e42c4SStephen Boyd 	u32 mask;
1766341e42c4SStephen Boyd 	u32 vval, fval;
1767341e42c4SStephen Boyd 	unsigned int region = EVENT_REGION(config_base);
1768341e42c4SStephen Boyd 	unsigned int group = EVENT_GROUP(config_base);
1769341e42c4SStephen Boyd 	unsigned int code = EVENT_CODE(config_base);
1770341e42c4SStephen Boyd 	unsigned int group_shift;
1771341e42c4SStephen Boyd 	bool venum_event = EVENT_VENUM(config_base);
1772341e42c4SStephen Boyd 
1773341e42c4SStephen Boyd 	group_shift = group * 8;
1774341e42c4SStephen Boyd 	mask = 0xff << group_shift;
1775341e42c4SStephen Boyd 
1776341e42c4SStephen Boyd 	/* Configure evtsel for the region and group */
1777341e42c4SStephen Boyd 	if (venum_event)
1778341e42c4SStephen Boyd 		val = SCORPION_VLPM_GROUP0;
1779341e42c4SStephen Boyd 	else
1780341e42c4SStephen Boyd 		val = scorpion_get_pmresrn_event(region);
1781341e42c4SStephen Boyd 	val += group;
1782341e42c4SStephen Boyd 	/* Mix in mode-exclusion bits */
1783341e42c4SStephen Boyd 	val |= config_base & (ARMV7_EXCLUDE_USER | ARMV7_EXCLUDE_PL1);
1784341e42c4SStephen Boyd 	armv7_pmnc_write_evtsel(idx, val);
1785341e42c4SStephen Boyd 
1786341e42c4SStephen Boyd 	asm volatile("mcr p15, 0, %0, c9, c15, 0" : : "r" (0));
1787341e42c4SStephen Boyd 
1788341e42c4SStephen Boyd 	if (venum_event) {
1789341e42c4SStephen Boyd 		venum_pre_pmresr(&vval, &fval);
1790341e42c4SStephen Boyd 		val = venum_read_pmresr();
1791341e42c4SStephen Boyd 		val &= ~mask;
1792341e42c4SStephen Boyd 		val |= code << group_shift;
1793341e42c4SStephen Boyd 		val |= PMRESRn_EN;
1794341e42c4SStephen Boyd 		venum_write_pmresr(val);
1795341e42c4SStephen Boyd 		venum_post_pmresr(vval, fval);
1796341e42c4SStephen Boyd 	} else {
1797341e42c4SStephen Boyd 		val = scorpion_read_pmresrn(region);
1798341e42c4SStephen Boyd 		val &= ~mask;
1799341e42c4SStephen Boyd 		val |= code << group_shift;
1800341e42c4SStephen Boyd 		val |= PMRESRn_EN;
1801341e42c4SStephen Boyd 		scorpion_write_pmresrn(region, val);
1802341e42c4SStephen Boyd 	}
1803341e42c4SStephen Boyd }
1804341e42c4SStephen Boyd 
scorpion_clearpmu(u32 config_base)1805341e42c4SStephen Boyd static void scorpion_clearpmu(u32 config_base)
1806341e42c4SStephen Boyd {
1807341e42c4SStephen Boyd 	u32 val;
1808341e42c4SStephen Boyd 	u32 vval, fval;
1809341e42c4SStephen Boyd 	unsigned int region = EVENT_REGION(config_base);
1810341e42c4SStephen Boyd 	unsigned int group = EVENT_GROUP(config_base);
1811341e42c4SStephen Boyd 	bool venum_event = EVENT_VENUM(config_base);
1812341e42c4SStephen Boyd 
1813341e42c4SStephen Boyd 	if (venum_event) {
1814341e42c4SStephen Boyd 		venum_pre_pmresr(&vval, &fval);
1815341e42c4SStephen Boyd 		val = venum_read_pmresr();
1816341e42c4SStephen Boyd 		val = clear_pmresrn_group(val, group);
1817341e42c4SStephen Boyd 		venum_write_pmresr(val);
1818341e42c4SStephen Boyd 		venum_post_pmresr(vval, fval);
1819341e42c4SStephen Boyd 	} else {
1820341e42c4SStephen Boyd 		val = scorpion_read_pmresrn(region);
1821341e42c4SStephen Boyd 		val = clear_pmresrn_group(val, group);
1822341e42c4SStephen Boyd 		scorpion_write_pmresrn(region, val);
1823341e42c4SStephen Boyd 	}
1824341e42c4SStephen Boyd }
1825341e42c4SStephen Boyd 
scorpion_pmu_disable_event(struct perf_event * event)1826341e42c4SStephen Boyd static void scorpion_pmu_disable_event(struct perf_event *event)
1827341e42c4SStephen Boyd {
1828341e42c4SStephen Boyd 	unsigned long flags;
1829341e42c4SStephen Boyd 	struct hw_perf_event *hwc = &event->hw;
1830341e42c4SStephen Boyd 	int idx = hwc->idx;
1831341e42c4SStephen Boyd 	struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu);
1832341e42c4SStephen Boyd 	struct pmu_hw_events *events = this_cpu_ptr(cpu_pmu->hw_events);
1833341e42c4SStephen Boyd 
1834341e42c4SStephen Boyd 	/* Disable counter and interrupt */
1835341e42c4SStephen Boyd 	raw_spin_lock_irqsave(&events->pmu_lock, flags);
1836341e42c4SStephen Boyd 
1837341e42c4SStephen Boyd 	/* Disable counter */
1838341e42c4SStephen Boyd 	armv7_pmnc_disable_counter(idx);
1839341e42c4SStephen Boyd 
1840341e42c4SStephen Boyd 	/*
1841341e42c4SStephen Boyd 	 * Clear pmresr code (if destined for PMNx counters)
1842341e42c4SStephen Boyd 	 */
1843341e42c4SStephen Boyd 	if (hwc->config_base & KRAIT_EVENT_MASK)
1844341e42c4SStephen Boyd 		scorpion_clearpmu(hwc->config_base);
1845341e42c4SStephen Boyd 
1846341e42c4SStephen Boyd 	/* Disable interrupt for this counter */
1847341e42c4SStephen Boyd 	armv7_pmnc_disable_intens(idx);
1848341e42c4SStephen Boyd 
1849341e42c4SStephen Boyd 	raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
1850341e42c4SStephen Boyd }
1851341e42c4SStephen Boyd 
scorpion_pmu_enable_event(struct perf_event * event)1852341e42c4SStephen Boyd static void scorpion_pmu_enable_event(struct perf_event *event)
1853341e42c4SStephen Boyd {
1854341e42c4SStephen Boyd 	unsigned long flags;
1855341e42c4SStephen Boyd 	struct hw_perf_event *hwc = &event->hw;
1856341e42c4SStephen Boyd 	int idx = hwc->idx;
1857341e42c4SStephen Boyd 	struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu);
1858341e42c4SStephen Boyd 	struct pmu_hw_events *events = this_cpu_ptr(cpu_pmu->hw_events);
1859341e42c4SStephen Boyd 
1860341e42c4SStephen Boyd 	/*
1861341e42c4SStephen Boyd 	 * Enable counter and interrupt, and set the counter to count
1862341e42c4SStephen Boyd 	 * the event that we're interested in.
1863341e42c4SStephen Boyd 	 */
1864341e42c4SStephen Boyd 	raw_spin_lock_irqsave(&events->pmu_lock, flags);
1865341e42c4SStephen Boyd 
1866341e42c4SStephen Boyd 	/* Disable counter */
1867341e42c4SStephen Boyd 	armv7_pmnc_disable_counter(idx);
1868341e42c4SStephen Boyd 
1869341e42c4SStephen Boyd 	/*
1870341e42c4SStephen Boyd 	 * Set event (if destined for PMNx counters)
1871341e42c4SStephen Boyd 	 * We don't set the event for the cycle counter because we
1872341e42c4SStephen Boyd 	 * don't have the ability to perform event filtering.
1873341e42c4SStephen Boyd 	 */
1874341e42c4SStephen Boyd 	if (hwc->config_base & KRAIT_EVENT_MASK)
1875341e42c4SStephen Boyd 		scorpion_evt_setup(idx, hwc->config_base);
1876341e42c4SStephen Boyd 	else if (idx != ARMV7_IDX_CYCLE_COUNTER)
1877341e42c4SStephen Boyd 		armv7_pmnc_write_evtsel(idx, hwc->config_base);
1878341e42c4SStephen Boyd 
1879341e42c4SStephen Boyd 	/* Enable interrupt for this counter */
1880341e42c4SStephen Boyd 	armv7_pmnc_enable_intens(idx);
1881341e42c4SStephen Boyd 
1882341e42c4SStephen Boyd 	/* Enable counter */
1883341e42c4SStephen Boyd 	armv7_pmnc_enable_counter(idx);
1884341e42c4SStephen Boyd 
1885341e42c4SStephen Boyd 	raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
1886341e42c4SStephen Boyd }
1887341e42c4SStephen Boyd 
scorpion_pmu_reset(void * info)1888341e42c4SStephen Boyd static void scorpion_pmu_reset(void *info)
1889341e42c4SStephen Boyd {
1890341e42c4SStephen Boyd 	u32 vval, fval;
1891341e42c4SStephen Boyd 	struct arm_pmu *cpu_pmu = info;
1892341e42c4SStephen Boyd 	u32 idx, nb_cnt = cpu_pmu->num_events;
1893341e42c4SStephen Boyd 
1894341e42c4SStephen Boyd 	armv7pmu_reset(info);
1895341e42c4SStephen Boyd 
1896341e42c4SStephen Boyd 	/* Clear all pmresrs */
1897341e42c4SStephen Boyd 	scorpion_write_pmresrn(0, 0);
1898341e42c4SStephen Boyd 	scorpion_write_pmresrn(1, 0);
1899341e42c4SStephen Boyd 	scorpion_write_pmresrn(2, 0);
1900341e42c4SStephen Boyd 	scorpion_write_pmresrn(3, 0);
1901341e42c4SStephen Boyd 
1902341e42c4SStephen Boyd 	venum_pre_pmresr(&vval, &fval);
1903341e42c4SStephen Boyd 	venum_write_pmresr(0);
1904341e42c4SStephen Boyd 	venum_post_pmresr(vval, fval);
1905341e42c4SStephen Boyd 
1906341e42c4SStephen Boyd 	/* Reset PMxEVNCTCR to sane default */
1907341e42c4SStephen Boyd 	for (idx = ARMV7_IDX_CYCLE_COUNTER; idx < nb_cnt; ++idx) {
1908341e42c4SStephen Boyd 		armv7_pmnc_select_counter(idx);
1909341e42c4SStephen Boyd 		asm volatile("mcr p15, 0, %0, c9, c15, 0" : : "r" (0));
1910341e42c4SStephen Boyd 	}
1911341e42c4SStephen Boyd }
1912341e42c4SStephen Boyd 
scorpion_event_to_bit(struct perf_event * event,unsigned int region,unsigned int group)1913341e42c4SStephen Boyd static int scorpion_event_to_bit(struct perf_event *event, unsigned int region,
1914341e42c4SStephen Boyd 			      unsigned int group)
1915341e42c4SStephen Boyd {
1916341e42c4SStephen Boyd 	int bit;
1917341e42c4SStephen Boyd 	struct hw_perf_event *hwc = &event->hw;
1918341e42c4SStephen Boyd 	struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu);
1919341e42c4SStephen Boyd 
1920341e42c4SStephen Boyd 	if (hwc->config_base & VENUM_EVENT)
1921341e42c4SStephen Boyd 		bit = SCORPION_VLPM_GROUP0;
1922341e42c4SStephen Boyd 	else
1923341e42c4SStephen Boyd 		bit = scorpion_get_pmresrn_event(region);
1924341e42c4SStephen Boyd 	bit -= scorpion_get_pmresrn_event(0);
1925341e42c4SStephen Boyd 	bit += group;
1926341e42c4SStephen Boyd 	/*
1927341e42c4SStephen Boyd 	 * Lower bits are reserved for use by the counters (see
1928341e42c4SStephen Boyd 	 * armv7pmu_get_event_idx() for more info)
1929341e42c4SStephen Boyd 	 */
1930341e42c4SStephen Boyd 	bit += ARMV7_IDX_COUNTER_LAST(cpu_pmu) + 1;
1931341e42c4SStephen Boyd 
1932341e42c4SStephen Boyd 	return bit;
1933341e42c4SStephen Boyd }
1934341e42c4SStephen Boyd 
1935341e42c4SStephen Boyd /*
1936341e42c4SStephen Boyd  * We check for column exclusion constraints here.
1937341e42c4SStephen Boyd  * Two events cant use the same group within a pmresr register.
1938341e42c4SStephen Boyd  */
scorpion_pmu_get_event_idx(struct pmu_hw_events * cpuc,struct perf_event * event)1939341e42c4SStephen Boyd static int scorpion_pmu_get_event_idx(struct pmu_hw_events *cpuc,
1940341e42c4SStephen Boyd 				   struct perf_event *event)
1941341e42c4SStephen Boyd {
1942341e42c4SStephen Boyd 	int idx;
1943341e42c4SStephen Boyd 	int bit = -1;
1944341e42c4SStephen Boyd 	struct hw_perf_event *hwc = &event->hw;
1945341e42c4SStephen Boyd 	unsigned int region = EVENT_REGION(hwc->config_base);
1946341e42c4SStephen Boyd 	unsigned int group = EVENT_GROUP(hwc->config_base);
1947341e42c4SStephen Boyd 	bool venum_event = EVENT_VENUM(hwc->config_base);
1948341e42c4SStephen Boyd 	bool scorpion_event = EVENT_CPU(hwc->config_base);
1949341e42c4SStephen Boyd 
1950341e42c4SStephen Boyd 	if (venum_event || scorpion_event) {
1951341e42c4SStephen Boyd 		/* Ignore invalid events */
1952341e42c4SStephen Boyd 		if (group > 3 || region > 3)
1953341e42c4SStephen Boyd 			return -EINVAL;
1954341e42c4SStephen Boyd 
1955341e42c4SStephen Boyd 		bit = scorpion_event_to_bit(event, region, group);
1956341e42c4SStephen Boyd 		if (test_and_set_bit(bit, cpuc->used_mask))
1957341e42c4SStephen Boyd 			return -EAGAIN;
1958341e42c4SStephen Boyd 	}
1959341e42c4SStephen Boyd 
1960341e42c4SStephen Boyd 	idx = armv7pmu_get_event_idx(cpuc, event);
1961341e42c4SStephen Boyd 	if (idx < 0 && bit >= 0)
1962341e42c4SStephen Boyd 		clear_bit(bit, cpuc->used_mask);
1963341e42c4SStephen Boyd 
1964341e42c4SStephen Boyd 	return idx;
1965341e42c4SStephen Boyd }
1966341e42c4SStephen Boyd 
scorpion_pmu_clear_event_idx(struct pmu_hw_events * cpuc,struct perf_event * event)1967341e42c4SStephen Boyd static void scorpion_pmu_clear_event_idx(struct pmu_hw_events *cpuc,
1968341e42c4SStephen Boyd 				      struct perf_event *event)
1969341e42c4SStephen Boyd {
1970341e42c4SStephen Boyd 	int bit;
1971341e42c4SStephen Boyd 	struct hw_perf_event *hwc = &event->hw;
1972341e42c4SStephen Boyd 	unsigned int region = EVENT_REGION(hwc->config_base);
1973341e42c4SStephen Boyd 	unsigned int group = EVENT_GROUP(hwc->config_base);
1974341e42c4SStephen Boyd 	bool venum_event = EVENT_VENUM(hwc->config_base);
1975341e42c4SStephen Boyd 	bool scorpion_event = EVENT_CPU(hwc->config_base);
1976341e42c4SStephen Boyd 
19777dfc8db1SSuzuki K Poulose 	armv7pmu_clear_event_idx(cpuc, event);
1978341e42c4SStephen Boyd 	if (venum_event || scorpion_event) {
1979341e42c4SStephen Boyd 		bit = scorpion_event_to_bit(event, region, group);
1980341e42c4SStephen Boyd 		clear_bit(bit, cpuc->used_mask);
1981341e42c4SStephen Boyd 	}
1982341e42c4SStephen Boyd }
1983341e42c4SStephen Boyd 
scorpion_pmu_init(struct arm_pmu * cpu_pmu)1984341e42c4SStephen Boyd static int scorpion_pmu_init(struct arm_pmu *cpu_pmu)
1985341e42c4SStephen Boyd {
1986341e42c4SStephen Boyd 	armv7pmu_init(cpu_pmu);
1987341e42c4SStephen Boyd 	cpu_pmu->name		= "armv7_scorpion";
1988341e42c4SStephen Boyd 	cpu_pmu->map_event	= scorpion_map_event;
1989341e42c4SStephen Boyd 	cpu_pmu->reset		= scorpion_pmu_reset;
1990341e42c4SStephen Boyd 	cpu_pmu->enable		= scorpion_pmu_enable_event;
1991341e42c4SStephen Boyd 	cpu_pmu->disable	= scorpion_pmu_disable_event;
1992341e42c4SStephen Boyd 	cpu_pmu->get_event_idx	= scorpion_pmu_get_event_idx;
1993341e42c4SStephen Boyd 	cpu_pmu->clear_event_idx = scorpion_pmu_clear_event_idx;
19940e3038d1SMark Rutland 	return armv7_probe_num_events(cpu_pmu);
1995341e42c4SStephen Boyd }
1996341e42c4SStephen Boyd 
scorpion_mp_pmu_init(struct arm_pmu * cpu_pmu)1997341e42c4SStephen Boyd static int scorpion_mp_pmu_init(struct arm_pmu *cpu_pmu)
1998341e42c4SStephen Boyd {
1999341e42c4SStephen Boyd 	armv7pmu_init(cpu_pmu);
2000341e42c4SStephen Boyd 	cpu_pmu->name		= "armv7_scorpion_mp";
2001341e42c4SStephen Boyd 	cpu_pmu->map_event	= scorpion_map_event;
2002341e42c4SStephen Boyd 	cpu_pmu->reset		= scorpion_pmu_reset;
2003341e42c4SStephen Boyd 	cpu_pmu->enable		= scorpion_pmu_enable_event;
2004341e42c4SStephen Boyd 	cpu_pmu->disable	= scorpion_pmu_disable_event;
2005341e42c4SStephen Boyd 	cpu_pmu->get_event_idx	= scorpion_pmu_get_event_idx;
2006341e42c4SStephen Boyd 	cpu_pmu->clear_event_idx = scorpion_pmu_clear_event_idx;
20070e3038d1SMark Rutland 	return armv7_probe_num_events(cpu_pmu);
2008341e42c4SStephen Boyd }
200929ba0f37SMark Rutland 
201029ba0f37SMark Rutland static const struct of_device_id armv7_pmu_of_device_ids[] = {
201129ba0f37SMark Rutland 	{.compatible = "arm,cortex-a17-pmu",	.data = armv7_a17_pmu_init},
201229ba0f37SMark Rutland 	{.compatible = "arm,cortex-a15-pmu",	.data = armv7_a15_pmu_init},
201329ba0f37SMark Rutland 	{.compatible = "arm,cortex-a12-pmu",	.data = armv7_a12_pmu_init},
201429ba0f37SMark Rutland 	{.compatible = "arm,cortex-a9-pmu",	.data = armv7_a9_pmu_init},
201529ba0f37SMark Rutland 	{.compatible = "arm,cortex-a8-pmu",	.data = armv7_a8_pmu_init},
201629ba0f37SMark Rutland 	{.compatible = "arm,cortex-a7-pmu",	.data = armv7_a7_pmu_init},
201729ba0f37SMark Rutland 	{.compatible = "arm,cortex-a5-pmu",	.data = armv7_a5_pmu_init},
201829ba0f37SMark Rutland 	{.compatible = "qcom,krait-pmu",	.data = krait_pmu_init},
201929ba0f37SMark Rutland 	{.compatible = "qcom,scorpion-pmu",	.data = scorpion_pmu_init},
202029ba0f37SMark Rutland 	{.compatible = "qcom,scorpion-mp-pmu",	.data = scorpion_mp_pmu_init},
202129ba0f37SMark Rutland 	{},
202229ba0f37SMark Rutland };
202329ba0f37SMark Rutland 
202429ba0f37SMark Rutland static const struct pmu_probe_info armv7_pmu_probe_table[] = {
202529ba0f37SMark Rutland 	ARM_PMU_PROBE(ARM_CPU_PART_CORTEX_A8, armv7_a8_pmu_init),
202629ba0f37SMark Rutland 	ARM_PMU_PROBE(ARM_CPU_PART_CORTEX_A9, armv7_a9_pmu_init),
202729ba0f37SMark Rutland 	{ /* sentinel value */ }
202829ba0f37SMark Rutland };
202929ba0f37SMark Rutland 
203029ba0f37SMark Rutland 
armv7_pmu_device_probe(struct platform_device * pdev)203129ba0f37SMark Rutland static int armv7_pmu_device_probe(struct platform_device *pdev)
203243eab878SWill Deacon {
203329ba0f37SMark Rutland 	return arm_pmu_device_probe(pdev, armv7_pmu_of_device_ids,
203429ba0f37SMark Rutland 				    armv7_pmu_probe_table);
203543eab878SWill Deacon }
203643eab878SWill Deacon 
203729ba0f37SMark Rutland static struct platform_driver armv7_pmu_driver = {
203829ba0f37SMark Rutland 	.driver		= {
203929ba0f37SMark Rutland 		.name	= "armv7-pmu",
204029ba0f37SMark Rutland 		.of_match_table = armv7_pmu_of_device_ids,
204164b2f025SStefan Agner 		.suppress_bind_attrs = true,
204229ba0f37SMark Rutland 	},
204329ba0f37SMark Rutland 	.probe		= armv7_pmu_device_probe,
204429ba0f37SMark Rutland };
20450c205cbeSWill Deacon 
2046b128cb55SGeliang Tang builtin_platform_driver(armv7_pmu_driver);
204743eab878SWill Deacon #endif	/* CONFIG_CPU_V7 */
2048