xref: /openbmc/linux/arch/powerpc/perf/power8-pmu.c (revision 7bcae826)
1 /*
2  * Performance counter support for POWER8 processors.
3  *
4  * Copyright 2009 Paul Mackerras, IBM Corporation.
5  * Copyright 2013 Michael Ellerman, IBM Corporation.
6  *
7  * This program is free software; you can redistribute it and/or
8  * modify it under the terms of the GNU General Public License
9  * as published by the Free Software Foundation; either version
10  * 2 of the License, or (at your option) any later version.
11  */
12 
13 #define pr_fmt(fmt)	"power8-pmu: " fmt
14 
15 #include "isa207-common.h"
16 
17 /*
18  * Some power8 event codes.
19  */
20 #define EVENT(_name, _code)	_name = _code,
21 
22 enum {
23 #include "power8-events-list.h"
24 };
25 
26 #undef EVENT
27 
28 /* MMCRA IFM bits - POWER8 */
29 #define	POWER8_MMCRA_IFM1		0x0000000040000000UL
30 #define	POWER8_MMCRA_IFM2		0x0000000080000000UL
31 #define	POWER8_MMCRA_IFM3		0x00000000C0000000UL
32 
33 /* PowerISA v2.07 format attribute structure*/
34 extern struct attribute_group isa207_pmu_format_group;
35 
36 /* Table of alternatives, sorted by column 0 */
37 static const unsigned int event_alternatives[][MAX_ALT] = {
38 	{ PM_MRK_ST_CMPL,		PM_MRK_ST_CMPL_ALT },
39 	{ PM_BR_MRK_2PATH,		PM_BR_MRK_2PATH_ALT },
40 	{ PM_L3_CO_MEPF,		PM_L3_CO_MEPF_ALT },
41 	{ PM_MRK_DATA_FROM_L2MISS,	PM_MRK_DATA_FROM_L2MISS_ALT },
42 	{ PM_CMPLU_STALL_ALT,		PM_CMPLU_STALL },
43 	{ PM_BR_2PATH,			PM_BR_2PATH_ALT },
44 	{ PM_INST_DISP,			PM_INST_DISP_ALT },
45 	{ PM_RUN_CYC_ALT,		PM_RUN_CYC },
46 	{ PM_MRK_FILT_MATCH,		PM_MRK_FILT_MATCH_ALT },
47 	{ PM_LD_MISS_L1,		PM_LD_MISS_L1_ALT },
48 	{ PM_RUN_INST_CMPL_ALT,		PM_RUN_INST_CMPL },
49 };
50 
51 /*
52  * Scan the alternatives table for a match and return the
53  * index into the alternatives table if found, else -1.
54  */
55 static int find_alternative(u64 event)
56 {
57 	int i, j;
58 
59 	for (i = 0; i < ARRAY_SIZE(event_alternatives); ++i) {
60 		if (event < event_alternatives[i][0])
61 			break;
62 
63 		for (j = 0; j < MAX_ALT && event_alternatives[i][j]; ++j)
64 			if (event == event_alternatives[i][j])
65 				return i;
66 	}
67 
68 	return -1;
69 }
70 
71 static int power8_get_alternatives(u64 event, unsigned int flags, u64 alt[])
72 {
73 	int i, j, num_alt = 0;
74 	u64 alt_event;
75 
76 	alt[num_alt++] = event;
77 
78 	i = find_alternative(event);
79 	if (i >= 0) {
80 		/* Filter out the original event, it's already in alt[0] */
81 		for (j = 0; j < MAX_ALT; ++j) {
82 			alt_event = event_alternatives[i][j];
83 			if (alt_event && alt_event != event)
84 				alt[num_alt++] = alt_event;
85 		}
86 	}
87 
88 	if (flags & PPMU_ONLY_COUNT_RUN) {
89 		/*
90 		 * We're only counting in RUN state, so PM_CYC is equivalent to
91 		 * PM_RUN_CYC and PM_INST_CMPL === PM_RUN_INST_CMPL.
92 		 */
93 		j = num_alt;
94 		for (i = 0; i < num_alt; ++i) {
95 			switch (alt[i]) {
96 			case PM_CYC:
97 				alt[j++] = PM_RUN_CYC;
98 				break;
99 			case PM_RUN_CYC:
100 				alt[j++] = PM_CYC;
101 				break;
102 			case PM_INST_CMPL:
103 				alt[j++] = PM_RUN_INST_CMPL;
104 				break;
105 			case PM_RUN_INST_CMPL:
106 				alt[j++] = PM_INST_CMPL;
107 				break;
108 			}
109 		}
110 		num_alt = j;
111 	}
112 
113 	return num_alt;
114 }
115 
116 GENERIC_EVENT_ATTR(cpu-cycles,			PM_CYC);
117 GENERIC_EVENT_ATTR(stalled-cycles-frontend,	PM_GCT_NOSLOT_CYC);
118 GENERIC_EVENT_ATTR(stalled-cycles-backend,	PM_CMPLU_STALL);
119 GENERIC_EVENT_ATTR(instructions,		PM_INST_CMPL);
120 GENERIC_EVENT_ATTR(branch-instructions,		PM_BRU_FIN);
121 GENERIC_EVENT_ATTR(branch-misses,		PM_BR_MPRED_CMPL);
122 GENERIC_EVENT_ATTR(cache-references,		PM_LD_REF_L1);
123 GENERIC_EVENT_ATTR(cache-misses,		PM_LD_MISS_L1);
124 
125 CACHE_EVENT_ATTR(L1-dcache-load-misses,		PM_LD_MISS_L1);
126 CACHE_EVENT_ATTR(L1-dcache-loads,		PM_LD_REF_L1);
127 
128 CACHE_EVENT_ATTR(L1-dcache-prefetches,		PM_L1_PREF);
129 CACHE_EVENT_ATTR(L1-dcache-store-misses,	PM_ST_MISS_L1);
130 CACHE_EVENT_ATTR(L1-icache-load-misses,		PM_L1_ICACHE_MISS);
131 CACHE_EVENT_ATTR(L1-icache-loads,		PM_INST_FROM_L1);
132 CACHE_EVENT_ATTR(L1-icache-prefetches,		PM_IC_PREF_WRITE);
133 
134 CACHE_EVENT_ATTR(LLC-load-misses,		PM_DATA_FROM_L3MISS);
135 CACHE_EVENT_ATTR(LLC-loads,			PM_DATA_FROM_L3);
136 CACHE_EVENT_ATTR(LLC-prefetches,		PM_L3_PREF_ALL);
137 CACHE_EVENT_ATTR(LLC-store-misses,		PM_L2_ST_MISS);
138 CACHE_EVENT_ATTR(LLC-stores,			PM_L2_ST);
139 
140 CACHE_EVENT_ATTR(branch-load-misses,		PM_BR_MPRED_CMPL);
141 CACHE_EVENT_ATTR(branch-loads,			PM_BRU_FIN);
142 CACHE_EVENT_ATTR(dTLB-load-misses,		PM_DTLB_MISS);
143 CACHE_EVENT_ATTR(iTLB-load-misses,		PM_ITLB_MISS);
144 
145 static struct attribute *power8_events_attr[] = {
146 	GENERIC_EVENT_PTR(PM_CYC),
147 	GENERIC_EVENT_PTR(PM_GCT_NOSLOT_CYC),
148 	GENERIC_EVENT_PTR(PM_CMPLU_STALL),
149 	GENERIC_EVENT_PTR(PM_INST_CMPL),
150 	GENERIC_EVENT_PTR(PM_BRU_FIN),
151 	GENERIC_EVENT_PTR(PM_BR_MPRED_CMPL),
152 	GENERIC_EVENT_PTR(PM_LD_REF_L1),
153 	GENERIC_EVENT_PTR(PM_LD_MISS_L1),
154 
155 	CACHE_EVENT_PTR(PM_LD_MISS_L1),
156 	CACHE_EVENT_PTR(PM_LD_REF_L1),
157 	CACHE_EVENT_PTR(PM_L1_PREF),
158 	CACHE_EVENT_PTR(PM_ST_MISS_L1),
159 	CACHE_EVENT_PTR(PM_L1_ICACHE_MISS),
160 	CACHE_EVENT_PTR(PM_INST_FROM_L1),
161 	CACHE_EVENT_PTR(PM_IC_PREF_WRITE),
162 	CACHE_EVENT_PTR(PM_DATA_FROM_L3MISS),
163 	CACHE_EVENT_PTR(PM_DATA_FROM_L3),
164 	CACHE_EVENT_PTR(PM_L3_PREF_ALL),
165 	CACHE_EVENT_PTR(PM_L2_ST_MISS),
166 	CACHE_EVENT_PTR(PM_L2_ST),
167 
168 	CACHE_EVENT_PTR(PM_BR_MPRED_CMPL),
169 	CACHE_EVENT_PTR(PM_BRU_FIN),
170 
171 	CACHE_EVENT_PTR(PM_DTLB_MISS),
172 	CACHE_EVENT_PTR(PM_ITLB_MISS),
173 	NULL
174 };
175 
176 static struct attribute_group power8_pmu_events_group = {
177 	.name = "events",
178 	.attrs = power8_events_attr,
179 };
180 
181 static const struct attribute_group *power8_pmu_attr_groups[] = {
182 	&isa207_pmu_format_group,
183 	&power8_pmu_events_group,
184 	NULL,
185 };
186 
187 static int power8_generic_events[] = {
188 	[PERF_COUNT_HW_CPU_CYCLES] =			PM_CYC,
189 	[PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] =	PM_GCT_NOSLOT_CYC,
190 	[PERF_COUNT_HW_STALLED_CYCLES_BACKEND] =	PM_CMPLU_STALL,
191 	[PERF_COUNT_HW_INSTRUCTIONS] =			PM_INST_CMPL,
192 	[PERF_COUNT_HW_BRANCH_INSTRUCTIONS] =		PM_BRU_FIN,
193 	[PERF_COUNT_HW_BRANCH_MISSES] =			PM_BR_MPRED_CMPL,
194 	[PERF_COUNT_HW_CACHE_REFERENCES] =		PM_LD_REF_L1,
195 	[PERF_COUNT_HW_CACHE_MISSES] =			PM_LD_MISS_L1,
196 };
197 
198 static u64 power8_bhrb_filter_map(u64 branch_sample_type)
199 {
200 	u64 pmu_bhrb_filter = 0;
201 
202 	/* BHRB and regular PMU events share the same privilege state
203 	 * filter configuration. BHRB is always recorded along with a
204 	 * regular PMU event. As the privilege state filter is handled
205 	 * in the basic PMC configuration of the accompanying regular
206 	 * PMU event, we ignore any separate BHRB specific request.
207 	 */
208 
209 	/* No branch filter requested */
210 	if (branch_sample_type & PERF_SAMPLE_BRANCH_ANY)
211 		return pmu_bhrb_filter;
212 
213 	/* Invalid branch filter options - HW does not support */
214 	if (branch_sample_type & PERF_SAMPLE_BRANCH_ANY_RETURN)
215 		return -1;
216 
217 	if (branch_sample_type & PERF_SAMPLE_BRANCH_IND_CALL)
218 		return -1;
219 
220 	if (branch_sample_type & PERF_SAMPLE_BRANCH_CALL)
221 		return -1;
222 
223 	if (branch_sample_type & PERF_SAMPLE_BRANCH_ANY_CALL) {
224 		pmu_bhrb_filter |= POWER8_MMCRA_IFM1;
225 		return pmu_bhrb_filter;
226 	}
227 
228 	/* Every thing else is unsupported */
229 	return -1;
230 }
231 
232 static void power8_config_bhrb(u64 pmu_bhrb_filter)
233 {
234 	/* Enable BHRB filter in PMU */
235 	mtspr(SPRN_MMCRA, (mfspr(SPRN_MMCRA) | pmu_bhrb_filter));
236 }
237 
238 #define C(x)	PERF_COUNT_HW_CACHE_##x
239 
240 /*
241  * Table of generalized cache-related events.
242  * 0 means not supported, -1 means nonsensical, other values
243  * are event codes.
244  */
245 static int power8_cache_events[C(MAX)][C(OP_MAX)][C(RESULT_MAX)] = {
246 	[ C(L1D) ] = {
247 		[ C(OP_READ) ] = {
248 			[ C(RESULT_ACCESS) ] = PM_LD_REF_L1,
249 			[ C(RESULT_MISS)   ] = PM_LD_MISS_L1,
250 		},
251 		[ C(OP_WRITE) ] = {
252 			[ C(RESULT_ACCESS) ] = 0,
253 			[ C(RESULT_MISS)   ] = PM_ST_MISS_L1,
254 		},
255 		[ C(OP_PREFETCH) ] = {
256 			[ C(RESULT_ACCESS) ] = PM_L1_PREF,
257 			[ C(RESULT_MISS)   ] = 0,
258 		},
259 	},
260 	[ C(L1I) ] = {
261 		[ C(OP_READ) ] = {
262 			[ C(RESULT_ACCESS) ] = PM_INST_FROM_L1,
263 			[ C(RESULT_MISS)   ] = PM_L1_ICACHE_MISS,
264 		},
265 		[ C(OP_WRITE) ] = {
266 			[ C(RESULT_ACCESS) ] = PM_L1_DEMAND_WRITE,
267 			[ C(RESULT_MISS)   ] = -1,
268 		},
269 		[ C(OP_PREFETCH) ] = {
270 			[ C(RESULT_ACCESS) ] = PM_IC_PREF_WRITE,
271 			[ C(RESULT_MISS)   ] = 0,
272 		},
273 	},
274 	[ C(LL) ] = {
275 		[ C(OP_READ) ] = {
276 			[ C(RESULT_ACCESS) ] = PM_DATA_FROM_L3,
277 			[ C(RESULT_MISS)   ] = PM_DATA_FROM_L3MISS,
278 		},
279 		[ C(OP_WRITE) ] = {
280 			[ C(RESULT_ACCESS) ] = PM_L2_ST,
281 			[ C(RESULT_MISS)   ] = PM_L2_ST_MISS,
282 		},
283 		[ C(OP_PREFETCH) ] = {
284 			[ C(RESULT_ACCESS) ] = PM_L3_PREF_ALL,
285 			[ C(RESULT_MISS)   ] = 0,
286 		},
287 	},
288 	[ C(DTLB) ] = {
289 		[ C(OP_READ) ] = {
290 			[ C(RESULT_ACCESS) ] = 0,
291 			[ C(RESULT_MISS)   ] = PM_DTLB_MISS,
292 		},
293 		[ C(OP_WRITE) ] = {
294 			[ C(RESULT_ACCESS) ] = -1,
295 			[ C(RESULT_MISS)   ] = -1,
296 		},
297 		[ C(OP_PREFETCH) ] = {
298 			[ C(RESULT_ACCESS) ] = -1,
299 			[ C(RESULT_MISS)   ] = -1,
300 		},
301 	},
302 	[ C(ITLB) ] = {
303 		[ C(OP_READ) ] = {
304 			[ C(RESULT_ACCESS) ] = 0,
305 			[ C(RESULT_MISS)   ] = PM_ITLB_MISS,
306 		},
307 		[ C(OP_WRITE) ] = {
308 			[ C(RESULT_ACCESS) ] = -1,
309 			[ C(RESULT_MISS)   ] = -1,
310 		},
311 		[ C(OP_PREFETCH) ] = {
312 			[ C(RESULT_ACCESS) ] = -1,
313 			[ C(RESULT_MISS)   ] = -1,
314 		},
315 	},
316 	[ C(BPU) ] = {
317 		[ C(OP_READ) ] = {
318 			[ C(RESULT_ACCESS) ] = PM_BRU_FIN,
319 			[ C(RESULT_MISS)   ] = PM_BR_MPRED_CMPL,
320 		},
321 		[ C(OP_WRITE) ] = {
322 			[ C(RESULT_ACCESS) ] = -1,
323 			[ C(RESULT_MISS)   ] = -1,
324 		},
325 		[ C(OP_PREFETCH) ] = {
326 			[ C(RESULT_ACCESS) ] = -1,
327 			[ C(RESULT_MISS)   ] = -1,
328 		},
329 	},
330 	[ C(NODE) ] = {
331 		[ C(OP_READ) ] = {
332 			[ C(RESULT_ACCESS) ] = -1,
333 			[ C(RESULT_MISS)   ] = -1,
334 		},
335 		[ C(OP_WRITE) ] = {
336 			[ C(RESULT_ACCESS) ] = -1,
337 			[ C(RESULT_MISS)   ] = -1,
338 		},
339 		[ C(OP_PREFETCH) ] = {
340 			[ C(RESULT_ACCESS) ] = -1,
341 			[ C(RESULT_MISS)   ] = -1,
342 		},
343 	},
344 };
345 
346 #undef C
347 
348 static struct power_pmu power8_pmu = {
349 	.name			= "POWER8",
350 	.n_counter		= MAX_PMU_COUNTERS,
351 	.max_alternatives	= MAX_ALT + 1,
352 	.add_fields		= ISA207_ADD_FIELDS,
353 	.test_adder		= ISA207_TEST_ADDER,
354 	.compute_mmcr		= isa207_compute_mmcr,
355 	.config_bhrb		= power8_config_bhrb,
356 	.bhrb_filter_map	= power8_bhrb_filter_map,
357 	.get_constraint		= isa207_get_constraint,
358 	.get_alternatives	= power8_get_alternatives,
359 	.disable_pmc		= isa207_disable_pmc,
360 	.flags			= PPMU_HAS_SIER | PPMU_ARCH_207S,
361 	.n_generic		= ARRAY_SIZE(power8_generic_events),
362 	.generic_events		= power8_generic_events,
363 	.cache_events		= &power8_cache_events,
364 	.attr_groups		= power8_pmu_attr_groups,
365 	.bhrb_nr		= 32,
366 };
367 
368 static int __init init_power8_pmu(void)
369 {
370 	int rc;
371 
372 	if (!cur_cpu_spec->oprofile_cpu_type ||
373 	    strcmp(cur_cpu_spec->oprofile_cpu_type, "ppc64/power8"))
374 		return -ENODEV;
375 
376 	rc = register_power_pmu(&power8_pmu);
377 	if (rc)
378 		return rc;
379 
380 	/* Tell userspace that EBB is supported */
381 	cur_cpu_spec->cpu_user_features2 |= PPC_FEATURE2_EBB;
382 
383 	if (cpu_has_feature(CPU_FTR_PMAO_BUG))
384 		pr_info("PMAO restore workaround active.\n");
385 
386 	return 0;
387 }
388 early_initcall(init_power8_pmu);
389