xref: /openbmc/linux/arch/powerpc/perf/e500-pmu.c (revision ec3eb9d9)
12874c5fdSThomas Gleixner // SPDX-License-Identifier: GPL-2.0-or-later
2f2699491SMichael Ellerman /*
3f2699491SMichael Ellerman  * Performance counter support for e500 family processors.
4f2699491SMichael Ellerman  *
5f2699491SMichael Ellerman  * Copyright 2008-2009 Paul Mackerras, IBM Corporation.
6f2699491SMichael Ellerman  * Copyright 2010 Freescale Semiconductor, Inc.
7f2699491SMichael Ellerman  */
8f2699491SMichael Ellerman #include <linux/string.h>
9f2699491SMichael Ellerman #include <linux/perf_event.h>
10f2699491SMichael Ellerman #include <asm/reg.h>
11f2699491SMichael Ellerman #include <asm/cputable.h>
12f2699491SMichael Ellerman 
13f2699491SMichael Ellerman /*
14f2699491SMichael Ellerman  * Map of generic hardware event types to hardware events
15f2699491SMichael Ellerman  * Zero if unsupported
16f2699491SMichael Ellerman  */
17f2699491SMichael Ellerman static int e500_generic_events[] = {
18f2699491SMichael Ellerman 	[PERF_COUNT_HW_CPU_CYCLES] = 1,
19f2699491SMichael Ellerman 	[PERF_COUNT_HW_INSTRUCTIONS] = 2,
20f2699491SMichael Ellerman 	[PERF_COUNT_HW_CACHE_MISSES] = 41, /* Data L1 cache reloads */
21f2699491SMichael Ellerman 	[PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = 12,
22f2699491SMichael Ellerman 	[PERF_COUNT_HW_BRANCH_MISSES] = 15,
2315fab56eSChris Freehill 	[PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = 18,
2415fab56eSChris Freehill 	[PERF_COUNT_HW_STALLED_CYCLES_BACKEND] = 19,
25f2699491SMichael Ellerman };
26f2699491SMichael Ellerman 
27f2699491SMichael Ellerman #define C(x)	PERF_COUNT_HW_CACHE_##x
28f2699491SMichael Ellerman 
29f2699491SMichael Ellerman /*
30f2699491SMichael Ellerman  * Table of generalized cache-related events.
31f2699491SMichael Ellerman  * 0 means not supported, -1 means nonsensical, other values
32f2699491SMichael Ellerman  * are event codes.
33f2699491SMichael Ellerman  */
34f2699491SMichael Ellerman static int e500_cache_events[C(MAX)][C(OP_MAX)][C(RESULT_MAX)] = {
35f2699491SMichael Ellerman 	/*
36f2699491SMichael Ellerman 	 * D-cache misses are not split into read/write/prefetch;
37f2699491SMichael Ellerman 	 * use raw event 41.
38f2699491SMichael Ellerman 	 */
39f2699491SMichael Ellerman 	[C(L1D)] = {		/* 	RESULT_ACCESS	RESULT_MISS */
40f2699491SMichael Ellerman 		[C(OP_READ)] = {	27,		0	},
41f2699491SMichael Ellerman 		[C(OP_WRITE)] = {	28,		0	},
42f2699491SMichael Ellerman 		[C(OP_PREFETCH)] = {	29,		0	},
43f2699491SMichael Ellerman 	},
44f2699491SMichael Ellerman 	[C(L1I)] = {		/* 	RESULT_ACCESS	RESULT_MISS */
45f2699491SMichael Ellerman 		[C(OP_READ)] = {	2,		60	},
46f2699491SMichael Ellerman 		[C(OP_WRITE)] = {	-1,		-1	},
47f2699491SMichael Ellerman 		[C(OP_PREFETCH)] = {	0,		0	},
48f2699491SMichael Ellerman 	},
49f2699491SMichael Ellerman 	/*
50f2699491SMichael Ellerman 	 * Assuming LL means L2, it's not a good match for this model.
51f2699491SMichael Ellerman 	 * It allocates only on L1 castout or explicit prefetch, and
52f2699491SMichael Ellerman 	 * does not have separate read/write events (but it does have
53f2699491SMichael Ellerman 	 * separate instruction/data events).
54f2699491SMichael Ellerman 	 */
55f2699491SMichael Ellerman 	[C(LL)] = {		/* 	RESULT_ACCESS	RESULT_MISS */
56f2699491SMichael Ellerman 		[C(OP_READ)] = {	0,		0	},
57f2699491SMichael Ellerman 		[C(OP_WRITE)] = {	0,		0	},
58f2699491SMichael Ellerman 		[C(OP_PREFETCH)] = {	0,		0	},
59f2699491SMichael Ellerman 	},
60f2699491SMichael Ellerman 	/*
61f2699491SMichael Ellerman 	 * There are data/instruction MMU misses, but that's a miss on
62f2699491SMichael Ellerman 	 * the chip's internal level-one TLB which is probably not
63f2699491SMichael Ellerman 	 * what the user wants.  Instead, unified level-two TLB misses
64f2699491SMichael Ellerman 	 * are reported here.
65f2699491SMichael Ellerman 	 */
66f2699491SMichael Ellerman 	[C(DTLB)] = {		/* 	RESULT_ACCESS	RESULT_MISS */
67f2699491SMichael Ellerman 		[C(OP_READ)] = {	26,		66	},
68f2699491SMichael Ellerman 		[C(OP_WRITE)] = {	-1,		-1	},
69f2699491SMichael Ellerman 		[C(OP_PREFETCH)] = {	-1,		-1	},
70f2699491SMichael Ellerman 	},
71f2699491SMichael Ellerman 	[C(BPU)] = {		/* 	RESULT_ACCESS	RESULT_MISS */
72f2699491SMichael Ellerman 		[C(OP_READ)] = {	12,		15 	},
73f2699491SMichael Ellerman 		[C(OP_WRITE)] = {	-1,		-1	},
74f2699491SMichael Ellerman 		[C(OP_PREFETCH)] = {	-1,		-1	},
75f2699491SMichael Ellerman 	},
76f2699491SMichael Ellerman 	[C(NODE)] = {		/* 	RESULT_ACCESS	RESULT_MISS */
77f2699491SMichael Ellerman 		[C(OP_READ)] = {	-1,		-1 	},
78f2699491SMichael Ellerman 		[C(OP_WRITE)] = {	-1,		-1	},
79f2699491SMichael Ellerman 		[C(OP_PREFETCH)] = {	-1,		-1	},
80f2699491SMichael Ellerman 	},
81f2699491SMichael Ellerman };
82f2699491SMichael Ellerman 
83f2699491SMichael Ellerman static int num_events = 128;
84f2699491SMichael Ellerman 
85f2699491SMichael Ellerman /* Upper half of event id is PMLCb, for threshold events */
e500_xlate_event(u64 event_id)86f2699491SMichael Ellerman static u64 e500_xlate_event(u64 event_id)
87f2699491SMichael Ellerman {
88f2699491SMichael Ellerman 	u32 event_low = (u32)event_id;
89f2699491SMichael Ellerman 	u64 ret;
90f2699491SMichael Ellerman 
91f2699491SMichael Ellerman 	if (event_low >= num_events)
92f2699491SMichael Ellerman 		return 0;
93f2699491SMichael Ellerman 
94f2699491SMichael Ellerman 	ret = FSL_EMB_EVENT_VALID;
95f2699491SMichael Ellerman 
96f2699491SMichael Ellerman 	if (event_low >= 76 && event_low <= 81) {
97f2699491SMichael Ellerman 		ret |= FSL_EMB_EVENT_RESTRICTED;
98f2699491SMichael Ellerman 		ret |= event_id &
99f2699491SMichael Ellerman 		       (FSL_EMB_EVENT_THRESHMUL | FSL_EMB_EVENT_THRESH);
100f2699491SMichael Ellerman 	} else if (event_id &
101f2699491SMichael Ellerman 	           (FSL_EMB_EVENT_THRESHMUL | FSL_EMB_EVENT_THRESH)) {
102f2699491SMichael Ellerman 		/* Threshold requested on non-threshold event */
103f2699491SMichael Ellerman 		return 0;
104f2699491SMichael Ellerman 	}
105f2699491SMichael Ellerman 
106f2699491SMichael Ellerman 	return ret;
107f2699491SMichael Ellerman }
108f2699491SMichael Ellerman 
109f2699491SMichael Ellerman static struct fsl_emb_pmu e500_pmu = {
110f2699491SMichael Ellerman 	.name			= "e500 family",
111f2699491SMichael Ellerman 	.n_counter		= 4,
112f2699491SMichael Ellerman 	.n_restricted		= 2,
113f2699491SMichael Ellerman 	.xlate_event		= e500_xlate_event,
114f2699491SMichael Ellerman 	.n_generic		= ARRAY_SIZE(e500_generic_events),
115f2699491SMichael Ellerman 	.generic_events		= e500_generic_events,
116f2699491SMichael Ellerman 	.cache_events		= &e500_cache_events,
117f2699491SMichael Ellerman };
118f2699491SMichael Ellerman 
init_e500_pmu(void)119f2699491SMichael Ellerman static int init_e500_pmu(void)
120f2699491SMichael Ellerman {
121*ec3eb9d9SRashmica Gupta 	unsigned int pvr = mfspr(SPRN_PVR);
122f2699491SMichael Ellerman 
123*ec3eb9d9SRashmica Gupta 	/* ec500mc */
124*ec3eb9d9SRashmica Gupta 	if (PVR_VER(pvr) == PVR_VER_E500MC || PVR_VER(pvr) == PVR_VER_E5500)
125f2699491SMichael Ellerman 		num_events = 256;
126*ec3eb9d9SRashmica Gupta 	/* e500 */
127*ec3eb9d9SRashmica Gupta 	else if (PVR_VER(pvr) != PVR_VER_E500V1 && PVR_VER(pvr) != PVR_VER_E500V2)
128f2699491SMichael Ellerman 		return -ENODEV;
129f2699491SMichael Ellerman 
130f2699491SMichael Ellerman 	return register_fsl_emb_pmu(&e500_pmu);
131f2699491SMichael Ellerman }
132f2699491SMichael Ellerman 
133f2699491SMichael Ellerman early_initcall(init_e500_pmu);
134