xref: /openbmc/linux/arch/arc/include/asm/perf_event.h (revision add48ba4)
1 /* SPDX-License-Identifier: GPL-2.0-only */
2 /*
3  * Linux performance counter support for ARC
4  *
5  * Copyright (C) 2014-2015 Synopsys, Inc. (www.synopsys.com)
6  * Copyright (C) 2011-2013 Synopsys, Inc. (www.synopsys.com)
7  */
8 
9 #ifndef __ASM_PERF_EVENT_H
10 #define __ASM_PERF_EVENT_H
11 
12 /* Max number of counters that PCT block may ever have */
13 #define ARC_PERF_MAX_COUNTERS	32
14 
15 #define ARC_REG_CC_BUILD	0xF6
16 #define ARC_REG_CC_INDEX	0x240
17 #define ARC_REG_CC_NAME0	0x241
18 #define ARC_REG_CC_NAME1	0x242
19 
20 #define ARC_REG_PCT_BUILD	0xF5
21 #define ARC_REG_PCT_COUNTL	0x250
22 #define ARC_REG_PCT_COUNTH	0x251
23 #define ARC_REG_PCT_SNAPL	0x252
24 #define ARC_REG_PCT_SNAPH	0x253
25 #define ARC_REG_PCT_CONFIG	0x254
26 #define ARC_REG_PCT_CONTROL	0x255
27 #define ARC_REG_PCT_INDEX	0x256
28 #define ARC_REG_PCT_INT_CNTL	0x25C
29 #define ARC_REG_PCT_INT_CNTH	0x25D
30 #define ARC_REG_PCT_INT_CTRL	0x25E
31 #define ARC_REG_PCT_INT_ACT	0x25F
32 
33 #define ARC_REG_PCT_CONFIG_USER	(1 << 18)	/* count in user mode */
34 #define ARC_REG_PCT_CONFIG_KERN	(1 << 19)	/* count in kernel mode */
35 
36 #define ARC_REG_PCT_CONTROL_CC	(1 << 16)	/* clear counts */
37 #define ARC_REG_PCT_CONTROL_SN	(1 << 17)	/* snapshot */
38 
39 struct arc_reg_pct_build {
40 #ifdef CONFIG_CPU_BIG_ENDIAN
41 	unsigned int m:8, c:8, r:5, i:1, s:2, v:8;
42 #else
43 	unsigned int v:8, s:2, i:1, r:5, c:8, m:8;
44 #endif
45 };
46 
47 struct arc_reg_cc_build {
48 #ifdef CONFIG_CPU_BIG_ENDIAN
49 	unsigned int c:16, r:8, v:8;
50 #else
51 	unsigned int v:8, r:8, c:16;
52 #endif
53 };
54 
55 #define PERF_COUNT_ARC_DCLM	(PERF_COUNT_HW_MAX + 0)
56 #define PERF_COUNT_ARC_DCSM	(PERF_COUNT_HW_MAX + 1)
57 #define PERF_COUNT_ARC_ICM	(PERF_COUNT_HW_MAX + 2)
58 #define PERF_COUNT_ARC_BPOK	(PERF_COUNT_HW_MAX + 3)
59 #define PERF_COUNT_ARC_EDTLB	(PERF_COUNT_HW_MAX + 4)
60 #define PERF_COUNT_ARC_EITLB	(PERF_COUNT_HW_MAX + 5)
61 #define PERF_COUNT_ARC_LDC	(PERF_COUNT_HW_MAX + 6)
62 #define PERF_COUNT_ARC_STC	(PERF_COUNT_HW_MAX + 7)
63 
64 #define PERF_COUNT_ARC_HW_MAX	(PERF_COUNT_HW_MAX + 8)
65 
66 /*
67  * Some ARC pct quirks:
68  *
69  * PERF_COUNT_HW_STALLED_CYCLES_BACKEND
70  * PERF_COUNT_HW_STALLED_CYCLES_FRONTEND
71  *	The ARC 700 can either measure stalls per pipeline stage, or all stalls
72  *	combined; for now we assign all stalls to STALLED_CYCLES_BACKEND
73  *	and all pipeline flushes (e.g. caused by mispredicts, etc.) to
74  *	STALLED_CYCLES_FRONTEND.
75  *
76  *	We could start multiple performance counters and combine everything
77  *	afterwards, but that makes it complicated.
78  *
79  *	Note that I$ cache misses aren't counted by either of the two!
80  */
81 
82 /*
83  * ARC PCT has hardware conditions with fixed "names" but variable "indexes"
84  * (based on a specific RTL build)
85  * Below is the static map between perf generic/arc specific event_id and
86  * h/w condition names.
87  * At the time of probe, we loop thru each index and find it's name to
88  * complete the mapping of perf event_id to h/w index as latter is needed
89  * to program the counter really
90  */
91 static const char * const arc_pmu_ev_hw_map[] = {
92 	/* count cycles */
93 	[PERF_COUNT_HW_CPU_CYCLES] = "crun",
94 	[PERF_COUNT_HW_REF_CPU_CYCLES] = "crun",
95 	[PERF_COUNT_HW_BUS_CYCLES] = "crun",
96 
97 	[PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = "bflush",
98 	[PERF_COUNT_HW_STALLED_CYCLES_BACKEND] = "bstall",
99 
100 	/* counts condition */
101 	[PERF_COUNT_HW_INSTRUCTIONS] = "iall",
102 	/* All jump instructions that are taken */
103 	[PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = "ijmptak",
104 #ifdef CONFIG_ISA_ARCV2
105 	[PERF_COUNT_HW_BRANCH_MISSES] = "bpmp",
106 #else
107 	[PERF_COUNT_ARC_BPOK]         = "bpok",	  /* NP-NT, PT-T, PNT-NT */
108 	[PERF_COUNT_HW_BRANCH_MISSES] = "bpfail", /* NP-T, PT-NT, PNT-T */
109 #endif
110 	[PERF_COUNT_ARC_LDC] = "imemrdc",	/* Instr: mem read cached */
111 	[PERF_COUNT_ARC_STC] = "imemwrc",	/* Instr: mem write cached */
112 
113 	[PERF_COUNT_ARC_DCLM] = "dclm",		/* D-cache Load Miss */
114 	[PERF_COUNT_ARC_DCSM] = "dcsm",		/* D-cache Store Miss */
115 	[PERF_COUNT_ARC_ICM] = "icm",		/* I-cache Miss */
116 	[PERF_COUNT_ARC_EDTLB] = "edtlb",	/* D-TLB Miss */
117 	[PERF_COUNT_ARC_EITLB] = "eitlb",	/* I-TLB Miss */
118 
119 	[PERF_COUNT_HW_CACHE_REFERENCES] = "imemrdc",	/* Instr: mem read cached */
120 	[PERF_COUNT_HW_CACHE_MISSES] = "dclm",		/* D-cache Load Miss */
121 };
122 
123 #define C(_x)			PERF_COUNT_HW_CACHE_##_x
124 #define CACHE_OP_UNSUPPORTED	0xffff
125 
126 static const unsigned arc_pmu_cache_map[C(MAX)][C(OP_MAX)][C(RESULT_MAX)] = {
127 	[C(L1D)] = {
128 		[C(OP_READ)] = {
129 			[C(RESULT_ACCESS)]	= PERF_COUNT_ARC_LDC,
130 			[C(RESULT_MISS)]	= PERF_COUNT_ARC_DCLM,
131 		},
132 		[C(OP_WRITE)] = {
133 			[C(RESULT_ACCESS)]	= PERF_COUNT_ARC_STC,
134 			[C(RESULT_MISS)]	= PERF_COUNT_ARC_DCSM,
135 		},
136 		[C(OP_PREFETCH)] = {
137 			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
138 			[C(RESULT_MISS)]	= CACHE_OP_UNSUPPORTED,
139 		},
140 	},
141 	[C(L1I)] = {
142 		[C(OP_READ)] = {
143 			[C(RESULT_ACCESS)]	= PERF_COUNT_HW_INSTRUCTIONS,
144 			[C(RESULT_MISS)]	= PERF_COUNT_ARC_ICM,
145 		},
146 		[C(OP_WRITE)] = {
147 			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
148 			[C(RESULT_MISS)]	= CACHE_OP_UNSUPPORTED,
149 		},
150 		[C(OP_PREFETCH)] = {
151 			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
152 			[C(RESULT_MISS)]	= CACHE_OP_UNSUPPORTED,
153 		},
154 	},
155 	[C(LL)] = {
156 		[C(OP_READ)] = {
157 			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
158 			[C(RESULT_MISS)]	= CACHE_OP_UNSUPPORTED,
159 		},
160 		[C(OP_WRITE)] = {
161 			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
162 			[C(RESULT_MISS)]	= CACHE_OP_UNSUPPORTED,
163 		},
164 		[C(OP_PREFETCH)] = {
165 			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
166 			[C(RESULT_MISS)]	= CACHE_OP_UNSUPPORTED,
167 		},
168 	},
169 	[C(DTLB)] = {
170 		[C(OP_READ)] = {
171 			[C(RESULT_ACCESS)]	= PERF_COUNT_ARC_LDC,
172 			[C(RESULT_MISS)]	= PERF_COUNT_ARC_EDTLB,
173 		},
174 			/* DTLB LD/ST Miss not segregated by h/w*/
175 		[C(OP_WRITE)] = {
176 			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
177 			[C(RESULT_MISS)]	= CACHE_OP_UNSUPPORTED,
178 		},
179 		[C(OP_PREFETCH)] = {
180 			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
181 			[C(RESULT_MISS)]	= CACHE_OP_UNSUPPORTED,
182 		},
183 	},
184 	[C(ITLB)] = {
185 		[C(OP_READ)] = {
186 			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
187 			[C(RESULT_MISS)]	= PERF_COUNT_ARC_EITLB,
188 		},
189 		[C(OP_WRITE)] = {
190 			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
191 			[C(RESULT_MISS)]	= CACHE_OP_UNSUPPORTED,
192 		},
193 		[C(OP_PREFETCH)] = {
194 			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
195 			[C(RESULT_MISS)]	= CACHE_OP_UNSUPPORTED,
196 		},
197 	},
198 	[C(BPU)] = {
199 		[C(OP_READ)] = {
200 			[C(RESULT_ACCESS)] = PERF_COUNT_HW_BRANCH_INSTRUCTIONS,
201 			[C(RESULT_MISS)]	= PERF_COUNT_HW_BRANCH_MISSES,
202 		},
203 		[C(OP_WRITE)] = {
204 			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
205 			[C(RESULT_MISS)]	= CACHE_OP_UNSUPPORTED,
206 		},
207 		[C(OP_PREFETCH)] = {
208 			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
209 			[C(RESULT_MISS)]	= CACHE_OP_UNSUPPORTED,
210 		},
211 	},
212 	[C(NODE)] = {
213 		[C(OP_READ)] = {
214 			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
215 			[C(RESULT_MISS)]	= CACHE_OP_UNSUPPORTED,
216 		},
217 		[C(OP_WRITE)] = {
218 			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
219 			[C(RESULT_MISS)]	= CACHE_OP_UNSUPPORTED,
220 		},
221 		[C(OP_PREFETCH)] = {
222 			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
223 			[C(RESULT_MISS)]	= CACHE_OP_UNSUPPORTED,
224 		},
225 	},
226 };
227 
228 #endif /* __ASM_PERF_EVENT_H */
229