xref: /openbmc/linux/arch/powerpc/perf/mpc7450-pmu.c (revision 1ac731c529cd4d6adbce134754b51ff7d822b145)
12874c5fdSThomas Gleixner // SPDX-License-Identifier: GPL-2.0-or-later
2f2699491SMichael Ellerman /*
3f2699491SMichael Ellerman  * Performance counter support for MPC7450-family processors.
4f2699491SMichael Ellerman  *
5f2699491SMichael Ellerman  * Copyright 2008-2009 Paul Mackerras, IBM Corporation.
6f2699491SMichael Ellerman  */
7f2699491SMichael Ellerman #include <linux/string.h>
8f2699491SMichael Ellerman #include <linux/perf_event.h>
9f2699491SMichael Ellerman #include <asm/reg.h>
10f2699491SMichael Ellerman #include <asm/cputable.h>
11f2699491SMichael Ellerman 
12f2699491SMichael Ellerman #define N_COUNTER	6	/* Number of hardware counters */
13f2699491SMichael Ellerman #define MAX_ALT		3	/* Maximum number of event alternative codes */
14f2699491SMichael Ellerman 
15f2699491SMichael Ellerman /*
16f2699491SMichael Ellerman  * Bits in event code for MPC7450 family
17f2699491SMichael Ellerman  */
18f2699491SMichael Ellerman #define PM_THRMULT_MSKS	0x40000
19f2699491SMichael Ellerman #define PM_THRESH_SH	12
20f2699491SMichael Ellerman #define PM_THRESH_MSK	0x3f
21f2699491SMichael Ellerman #define PM_PMC_SH	8
22f2699491SMichael Ellerman #define PM_PMC_MSK	7
23f2699491SMichael Ellerman #define PM_PMCSEL_MSK	0x7f
24f2699491SMichael Ellerman 
25f2699491SMichael Ellerman /*
26f2699491SMichael Ellerman  * Classify events according to how specific their PMC requirements are.
27f2699491SMichael Ellerman  * Result is:
28f2699491SMichael Ellerman  *	0: can go on any PMC
29f2699491SMichael Ellerman  *	1: can go on PMCs 1-4
30f2699491SMichael Ellerman  *	2: can go on PMCs 1,2,4
31f2699491SMichael Ellerman  *	3: can go on PMCs 1 or 2
32f2699491SMichael Ellerman  *	4: can only go on one PMC
33f2699491SMichael Ellerman  *	-1: event code is invalid
34f2699491SMichael Ellerman  */
35f2699491SMichael Ellerman #define N_CLASSES	5
36f2699491SMichael Ellerman 
mpc7450_classify_event(u32 event)37f2699491SMichael Ellerman static int mpc7450_classify_event(u32 event)
38f2699491SMichael Ellerman {
39f2699491SMichael Ellerman 	int pmc;
40f2699491SMichael Ellerman 
41f2699491SMichael Ellerman 	pmc = (event >> PM_PMC_SH) & PM_PMC_MSK;
42f2699491SMichael Ellerman 	if (pmc) {
43f2699491SMichael Ellerman 		if (pmc > N_COUNTER)
44f2699491SMichael Ellerman 			return -1;
45f2699491SMichael Ellerman 		return 4;
46f2699491SMichael Ellerman 	}
47f2699491SMichael Ellerman 	event &= PM_PMCSEL_MSK;
48f2699491SMichael Ellerman 	if (event <= 1)
49f2699491SMichael Ellerman 		return 0;
50f2699491SMichael Ellerman 	if (event <= 7)
51f2699491SMichael Ellerman 		return 1;
52f2699491SMichael Ellerman 	if (event <= 13)
53f2699491SMichael Ellerman 		return 2;
54f2699491SMichael Ellerman 	if (event <= 22)
55f2699491SMichael Ellerman 		return 3;
56f2699491SMichael Ellerman 	return -1;
57f2699491SMichael Ellerman }
58f2699491SMichael Ellerman 
59f2699491SMichael Ellerman /*
60f2699491SMichael Ellerman  * Events using threshold and possible threshold scale:
61f2699491SMichael Ellerman  *	code	scale?	name
62f2699491SMichael Ellerman  *	11e	N	PM_INSTQ_EXCEED_CYC
63f2699491SMichael Ellerman  *	11f	N	PM_ALTV_IQ_EXCEED_CYC
64f2699491SMichael Ellerman  *	128	Y	PM_DTLB_SEARCH_EXCEED_CYC
65f2699491SMichael Ellerman  *	12b	Y	PM_LD_MISS_EXCEED_L1_CYC
66f2699491SMichael Ellerman  *	220	N	PM_CQ_EXCEED_CYC
67f2699491SMichael Ellerman  *	30c	N	PM_GPR_RB_EXCEED_CYC
68f2699491SMichael Ellerman  *	30d	?	PM_FPR_IQ_EXCEED_CYC ?
69f2699491SMichael Ellerman  *	311	Y	PM_ITLB_SEARCH_EXCEED
70f2699491SMichael Ellerman  *	410	N	PM_GPR_IQ_EXCEED_CYC
71f2699491SMichael Ellerman  */
72f2699491SMichael Ellerman 
73f2699491SMichael Ellerman /*
74f2699491SMichael Ellerman  * Return use of threshold and threshold scale bits:
75f2699491SMichael Ellerman  * 0 = uses neither, 1 = uses threshold, 2 = uses both
76f2699491SMichael Ellerman  */
mpc7450_threshold_use(u32 event)77f2699491SMichael Ellerman static int mpc7450_threshold_use(u32 event)
78f2699491SMichael Ellerman {
79f2699491SMichael Ellerman 	int pmc, sel;
80f2699491SMichael Ellerman 
81f2699491SMichael Ellerman 	pmc = (event >> PM_PMC_SH) & PM_PMC_MSK;
82f2699491SMichael Ellerman 	sel = event & PM_PMCSEL_MSK;
83f2699491SMichael Ellerman 	switch (pmc) {
84f2699491SMichael Ellerman 	case 1:
85f2699491SMichael Ellerman 		if (sel == 0x1e || sel == 0x1f)
86f2699491SMichael Ellerman 			return 1;
87f2699491SMichael Ellerman 		if (sel == 0x28 || sel == 0x2b)
88f2699491SMichael Ellerman 			return 2;
89f2699491SMichael Ellerman 		break;
90f2699491SMichael Ellerman 	case 2:
91f2699491SMichael Ellerman 		if (sel == 0x20)
92f2699491SMichael Ellerman 			return 1;
93f2699491SMichael Ellerman 		break;
94f2699491SMichael Ellerman 	case 3:
95f2699491SMichael Ellerman 		if (sel == 0xc || sel == 0xd)
96f2699491SMichael Ellerman 			return 1;
97f2699491SMichael Ellerman 		if (sel == 0x11)
98f2699491SMichael Ellerman 			return 2;
99f2699491SMichael Ellerman 		break;
100f2699491SMichael Ellerman 	case 4:
101f2699491SMichael Ellerman 		if (sel == 0x10)
102f2699491SMichael Ellerman 			return 1;
103f2699491SMichael Ellerman 		break;
104f2699491SMichael Ellerman 	}
105f2699491SMichael Ellerman 	return 0;
106f2699491SMichael Ellerman }
107f2699491SMichael Ellerman 
108f2699491SMichael Ellerman /*
109f2699491SMichael Ellerman  * Layout of constraint bits:
110f2699491SMichael Ellerman  * 33222222222211111111110000000000
111f2699491SMichael Ellerman  * 10987654321098765432109876543210
112f2699491SMichael Ellerman  *  |<    ><  > < > < ><><><><><><>
113f2699491SMichael Ellerman  *  TS TV   G4   G3  G2P6P5P4P3P2P1
114f2699491SMichael Ellerman  *
115f2699491SMichael Ellerman  * P1 - P6
116f2699491SMichael Ellerman  *	0 - 11: Count of events needing PMC1 .. PMC6
117f2699491SMichael Ellerman  *
118f2699491SMichael Ellerman  * G2
119f2699491SMichael Ellerman  *	12 - 14: Count of events needing PMC1 or PMC2
120f2699491SMichael Ellerman  *
121f2699491SMichael Ellerman  * G3
122f2699491SMichael Ellerman  *	16 - 18: Count of events needing PMC1, PMC2 or PMC4
123f2699491SMichael Ellerman  *
124f2699491SMichael Ellerman  * G4
125f2699491SMichael Ellerman  *	20 - 23: Count of events needing PMC1, PMC2, PMC3 or PMC4
126f2699491SMichael Ellerman  *
127f2699491SMichael Ellerman  * TV
128f2699491SMichael Ellerman  *	24 - 29: Threshold value requested
129f2699491SMichael Ellerman  *
130f2699491SMichael Ellerman  * TS
131f2699491SMichael Ellerman  *	30: Threshold scale value requested
132f2699491SMichael Ellerman  */
133f2699491SMichael Ellerman 
134f2699491SMichael Ellerman static u32 pmcbits[N_COUNTER][2] = {
135f2699491SMichael Ellerman 	{ 0x00844002, 0x00111001 },	/* PMC1 mask, value: P1,G2,G3,G4 */
136f2699491SMichael Ellerman 	{ 0x00844008, 0x00111004 },	/* PMC2: P2,G2,G3,G4 */
137f2699491SMichael Ellerman 	{ 0x00800020, 0x00100010 },	/* PMC3: P3,G4 */
138f2699491SMichael Ellerman 	{ 0x00840080, 0x00110040 },	/* PMC4: P4,G3,G4 */
139f2699491SMichael Ellerman 	{ 0x00000200, 0x00000100 },	/* PMC5: P5 */
140f2699491SMichael Ellerman 	{ 0x00000800, 0x00000400 }	/* PMC6: P6 */
141f2699491SMichael Ellerman };
142f2699491SMichael Ellerman 
143f2699491SMichael Ellerman static u32 classbits[N_CLASSES - 1][2] = {
144f2699491SMichael Ellerman 	{ 0x00000000, 0x00000000 },	/* class 0: no constraint */
145f2699491SMichael Ellerman 	{ 0x00800000, 0x00100000 },	/* class 1: G4 */
146f2699491SMichael Ellerman 	{ 0x00040000, 0x00010000 },	/* class 2: G3 */
147f2699491SMichael Ellerman 	{ 0x00004000, 0x00001000 },	/* class 3: G2 */
148f2699491SMichael Ellerman };
149f2699491SMichael Ellerman 
mpc7450_get_constraint(u64 event,unsigned long * maskp,unsigned long * valp,u64 event_config1 __maybe_unused)150f2699491SMichael Ellerman static int mpc7450_get_constraint(u64 event, unsigned long *maskp,
15182d2c16bSKajol Jain 				  unsigned long *valp, u64 event_config1 __maybe_unused)
152f2699491SMichael Ellerman {
153f2699491SMichael Ellerman 	int pmc, class;
154f2699491SMichael Ellerman 	u32 mask, value;
155f2699491SMichael Ellerman 	int thresh, tuse;
156f2699491SMichael Ellerman 
157f2699491SMichael Ellerman 	class = mpc7450_classify_event(event);
158f2699491SMichael Ellerman 	if (class < 0)
159f2699491SMichael Ellerman 		return -1;
160f2699491SMichael Ellerman 	if (class == 4) {
161f2699491SMichael Ellerman 		pmc = ((unsigned int)event >> PM_PMC_SH) & PM_PMC_MSK;
162f2699491SMichael Ellerman 		mask  = pmcbits[pmc - 1][0];
163f2699491SMichael Ellerman 		value = pmcbits[pmc - 1][1];
164f2699491SMichael Ellerman 	} else {
165f2699491SMichael Ellerman 		mask  = classbits[class][0];
166f2699491SMichael Ellerman 		value = classbits[class][1];
167f2699491SMichael Ellerman 	}
168f2699491SMichael Ellerman 
169f2699491SMichael Ellerman 	tuse = mpc7450_threshold_use(event);
170f2699491SMichael Ellerman 	if (tuse) {
171f2699491SMichael Ellerman 		thresh = ((unsigned int)event >> PM_THRESH_SH) & PM_THRESH_MSK;
172f2699491SMichael Ellerman 		mask  |= 0x3f << 24;
173f2699491SMichael Ellerman 		value |= thresh << 24;
174f2699491SMichael Ellerman 		if (tuse == 2) {
175f2699491SMichael Ellerman 			mask |= 0x40000000;
176f2699491SMichael Ellerman 			if ((unsigned int)event & PM_THRMULT_MSKS)
177f2699491SMichael Ellerman 				value |= 0x40000000;
178f2699491SMichael Ellerman 		}
179f2699491SMichael Ellerman 	}
180f2699491SMichael Ellerman 
181f2699491SMichael Ellerman 	*maskp = mask;
182f2699491SMichael Ellerman 	*valp = value;
183f2699491SMichael Ellerman 	return 0;
184f2699491SMichael Ellerman }
185f2699491SMichael Ellerman 
186f2699491SMichael Ellerman static const unsigned int event_alternatives[][MAX_ALT] = {
187f2699491SMichael Ellerman 	{ 0x217, 0x317 },		/* PM_L1_DCACHE_MISS */
188f2699491SMichael Ellerman 	{ 0x418, 0x50f, 0x60f },	/* PM_SNOOP_RETRY */
189f2699491SMichael Ellerman 	{ 0x502, 0x602 },		/* PM_L2_HIT */
190f2699491SMichael Ellerman 	{ 0x503, 0x603 },		/* PM_L3_HIT */
191f2699491SMichael Ellerman 	{ 0x504, 0x604 },		/* PM_L2_ICACHE_MISS */
192f2699491SMichael Ellerman 	{ 0x505, 0x605 },		/* PM_L3_ICACHE_MISS */
193f2699491SMichael Ellerman 	{ 0x506, 0x606 },		/* PM_L2_DCACHE_MISS */
194f2699491SMichael Ellerman 	{ 0x507, 0x607 },		/* PM_L3_DCACHE_MISS */
195f2699491SMichael Ellerman 	{ 0x50a, 0x623 },		/* PM_LD_HIT_L3 */
196f2699491SMichael Ellerman 	{ 0x50b, 0x624 },		/* PM_ST_HIT_L3 */
197f2699491SMichael Ellerman 	{ 0x50d, 0x60d },		/* PM_L2_TOUCH_HIT */
198f2699491SMichael Ellerman 	{ 0x50e, 0x60e },		/* PM_L3_TOUCH_HIT */
199f2699491SMichael Ellerman 	{ 0x512, 0x612 },		/* PM_INT_LOCAL */
200f2699491SMichael Ellerman 	{ 0x513, 0x61d },		/* PM_L2_MISS */
201f2699491SMichael Ellerman 	{ 0x514, 0x61e },		/* PM_L3_MISS */
202f2699491SMichael Ellerman };
203f2699491SMichael Ellerman 
204f2699491SMichael Ellerman /*
205f2699491SMichael Ellerman  * Scan the alternatives table for a match and return the
206f2699491SMichael Ellerman  * index into the alternatives table if found, else -1.
207f2699491SMichael Ellerman  */
find_alternative(u32 event)208f2699491SMichael Ellerman static int find_alternative(u32 event)
209f2699491SMichael Ellerman {
210f2699491SMichael Ellerman 	int i, j;
211f2699491SMichael Ellerman 
212f2699491SMichael Ellerman 	for (i = 0; i < ARRAY_SIZE(event_alternatives); ++i) {
213f2699491SMichael Ellerman 		if (event < event_alternatives[i][0])
214f2699491SMichael Ellerman 			break;
215f2699491SMichael Ellerman 		for (j = 0; j < MAX_ALT && event_alternatives[i][j]; ++j)
216f2699491SMichael Ellerman 			if (event == event_alternatives[i][j])
217f2699491SMichael Ellerman 				return i;
218f2699491SMichael Ellerman 	}
219f2699491SMichael Ellerman 	return -1;
220f2699491SMichael Ellerman }
221f2699491SMichael Ellerman 
mpc7450_get_alternatives(u64 event,unsigned int flags,u64 alt[])222f2699491SMichael Ellerman static int mpc7450_get_alternatives(u64 event, unsigned int flags, u64 alt[])
223f2699491SMichael Ellerman {
224f2699491SMichael Ellerman 	int i, j, nalt = 1;
225f2699491SMichael Ellerman 	u32 ae;
226f2699491SMichael Ellerman 
227f2699491SMichael Ellerman 	alt[0] = event;
228f2699491SMichael Ellerman 	nalt = 1;
229f2699491SMichael Ellerman 	i = find_alternative((u32)event);
230f2699491SMichael Ellerman 	if (i >= 0) {
231f2699491SMichael Ellerman 		for (j = 0; j < MAX_ALT; ++j) {
232f2699491SMichael Ellerman 			ae = event_alternatives[i][j];
233f2699491SMichael Ellerman 			if (ae && ae != (u32)event)
234f2699491SMichael Ellerman 				alt[nalt++] = ae;
235f2699491SMichael Ellerman 		}
236f2699491SMichael Ellerman 	}
237f2699491SMichael Ellerman 	return nalt;
238f2699491SMichael Ellerman }
239f2699491SMichael Ellerman 
240f2699491SMichael Ellerman /*
241f2699491SMichael Ellerman  * Bitmaps of which PMCs each class can use for classes 0 - 3.
242f2699491SMichael Ellerman  * Bit i is set if PMC i+1 is usable.
243f2699491SMichael Ellerman  */
244f2699491SMichael Ellerman static const u8 classmap[N_CLASSES] = {
245f2699491SMichael Ellerman 	0x3f, 0x0f, 0x0b, 0x03, 0
246f2699491SMichael Ellerman };
247f2699491SMichael Ellerman 
248f2699491SMichael Ellerman /* Bit position and width of each PMCSEL field */
249f2699491SMichael Ellerman static const int pmcsel_shift[N_COUNTER] = {
250f2699491SMichael Ellerman 	6,	0,	27,	22,	17,	11
251f2699491SMichael Ellerman };
252f2699491SMichael Ellerman static const u32 pmcsel_mask[N_COUNTER] = {
253f2699491SMichael Ellerman 	0x7f,	0x3f,	0x1f,	0x1f,	0x1f,	0x3f
254f2699491SMichael Ellerman };
255f2699491SMichael Ellerman 
256f2699491SMichael Ellerman /*
257f2699491SMichael Ellerman  * Compute MMCR0/1/2 values for a set of events.
258f2699491SMichael Ellerman  */
mpc7450_compute_mmcr(u64 event[],int n_ev,unsigned int hwc[],struct mmcr_regs * mmcr,struct perf_event * pevents[],u32 flags __maybe_unused)2598abd818fSMichael Ellerman static int mpc7450_compute_mmcr(u64 event[], int n_ev, unsigned int hwc[],
26078d76819SAthira Rajeev 				struct mmcr_regs *mmcr,
26182d2c16bSKajol Jain 				struct perf_event *pevents[],
26282d2c16bSKajol Jain 				u32 flags __maybe_unused)
263f2699491SMichael Ellerman {
264f2699491SMichael Ellerman 	u8 event_index[N_CLASSES][N_COUNTER];
265f2699491SMichael Ellerman 	int n_classevent[N_CLASSES];
266f2699491SMichael Ellerman 	int i, j, class, tuse;
267f2699491SMichael Ellerman 	u32 pmc_inuse = 0, pmc_avail;
268f2699491SMichael Ellerman 	u32 mmcr0 = 0, mmcr1 = 0, mmcr2 = 0;
269f2699491SMichael Ellerman 	u32 ev, pmc, thresh;
270f2699491SMichael Ellerman 
271f2699491SMichael Ellerman 	if (n_ev > N_COUNTER)
272f2699491SMichael Ellerman 		return -1;
273f2699491SMichael Ellerman 
274f2699491SMichael Ellerman 	/* First pass: count usage in each class */
275f2699491SMichael Ellerman 	for (i = 0; i < N_CLASSES; ++i)
276f2699491SMichael Ellerman 		n_classevent[i] = 0;
277f2699491SMichael Ellerman 	for (i = 0; i < n_ev; ++i) {
278f2699491SMichael Ellerman 		class = mpc7450_classify_event(event[i]);
279f2699491SMichael Ellerman 		if (class < 0)
280f2699491SMichael Ellerman 			return -1;
281f2699491SMichael Ellerman 		j = n_classevent[class]++;
282f2699491SMichael Ellerman 		event_index[class][j] = i;
283f2699491SMichael Ellerman 	}
284f2699491SMichael Ellerman 
285f2699491SMichael Ellerman 	/* Second pass: allocate PMCs from most specific event to least */
286f2699491SMichael Ellerman 	for (class = N_CLASSES - 1; class >= 0; --class) {
287f2699491SMichael Ellerman 		for (i = 0; i < n_classevent[class]; ++i) {
288f2699491SMichael Ellerman 			ev = event[event_index[class][i]];
289f2699491SMichael Ellerman 			if (class == 4) {
290f2699491SMichael Ellerman 				pmc = (ev >> PM_PMC_SH) & PM_PMC_MSK;
291f2699491SMichael Ellerman 				if (pmc_inuse & (1 << (pmc - 1)))
292f2699491SMichael Ellerman 					return -1;
293f2699491SMichael Ellerman 			} else {
294f2699491SMichael Ellerman 				/* Find a suitable PMC */
295f2699491SMichael Ellerman 				pmc_avail = classmap[class] & ~pmc_inuse;
296f2699491SMichael Ellerman 				if (!pmc_avail)
297f2699491SMichael Ellerman 					return -1;
298f2699491SMichael Ellerman 				pmc = ffs(pmc_avail);
299f2699491SMichael Ellerman 			}
300f2699491SMichael Ellerman 			pmc_inuse |= 1 << (pmc - 1);
301f2699491SMichael Ellerman 
302f2699491SMichael Ellerman 			tuse = mpc7450_threshold_use(ev);
303f2699491SMichael Ellerman 			if (tuse) {
304f2699491SMichael Ellerman 				thresh = (ev >> PM_THRESH_SH) & PM_THRESH_MSK;
305f2699491SMichael Ellerman 				mmcr0 |= thresh << 16;
306f2699491SMichael Ellerman 				if (tuse == 2 && (ev & PM_THRMULT_MSKS))
307f2699491SMichael Ellerman 					mmcr2 = 0x80000000;
308f2699491SMichael Ellerman 			}
309f2699491SMichael Ellerman 			ev &= pmcsel_mask[pmc - 1];
310f2699491SMichael Ellerman 			ev <<= pmcsel_shift[pmc - 1];
311f2699491SMichael Ellerman 			if (pmc <= 2)
312f2699491SMichael Ellerman 				mmcr0 |= ev;
313f2699491SMichael Ellerman 			else
314f2699491SMichael Ellerman 				mmcr1 |= ev;
315f2699491SMichael Ellerman 			hwc[event_index[class][i]] = pmc - 1;
316f2699491SMichael Ellerman 		}
317f2699491SMichael Ellerman 	}
318f2699491SMichael Ellerman 
319f2699491SMichael Ellerman 	if (pmc_inuse & 1)
320f2699491SMichael Ellerman 		mmcr0 |= MMCR0_PMC1CE;
321f2699491SMichael Ellerman 	if (pmc_inuse & 0x3e)
322f2699491SMichael Ellerman 		mmcr0 |= MMCR0_PMCnCE;
323f2699491SMichael Ellerman 
324f2699491SMichael Ellerman 	/* Return MMCRx values */
32578d76819SAthira Rajeev 	mmcr->mmcr0 = mmcr0;
32678d76819SAthira Rajeev 	mmcr->mmcr1 = mmcr1;
32778d76819SAthira Rajeev 	mmcr->mmcr2 = mmcr2;
32878d76819SAthira Rajeev 	/*
32978d76819SAthira Rajeev 	 * 32-bit doesn't have an MMCRA and uses SPRN_MMCR2 to define
33078d76819SAthira Rajeev 	 * SPRN_MMCRA. So assign mmcra of cpu_hw_events with `mmcr2`
33178d76819SAthira Rajeev 	 * value to ensure that any write to this SPRN_MMCRA will
33278d76819SAthira Rajeev 	 * use mmcr2 value.
33378d76819SAthira Rajeev 	 */
33478d76819SAthira Rajeev 	mmcr->mmcra = mmcr2;
335f2699491SMichael Ellerman 	return 0;
336f2699491SMichael Ellerman }
337f2699491SMichael Ellerman 
338f2699491SMichael Ellerman /*
339f2699491SMichael Ellerman  * Disable counting by a PMC.
340f2699491SMichael Ellerman  * Note that the pmc argument is 0-based here, not 1-based.
341f2699491SMichael Ellerman  */
mpc7450_disable_pmc(unsigned int pmc,struct mmcr_regs * mmcr)34278d76819SAthira Rajeev static void mpc7450_disable_pmc(unsigned int pmc, struct mmcr_regs *mmcr)
343f2699491SMichael Ellerman {
344f2699491SMichael Ellerman 	if (pmc <= 1)
34578d76819SAthira Rajeev 		mmcr->mmcr0 &= ~(pmcsel_mask[pmc] << pmcsel_shift[pmc]);
346f2699491SMichael Ellerman 	else
34778d76819SAthira Rajeev 		mmcr->mmcr1 &= ~(pmcsel_mask[pmc] << pmcsel_shift[pmc]);
348f2699491SMichael Ellerman }
349f2699491SMichael Ellerman 
350f2699491SMichael Ellerman static int mpc7450_generic_events[] = {
351f2699491SMichael Ellerman 	[PERF_COUNT_HW_CPU_CYCLES]		= 1,
352f2699491SMichael Ellerman 	[PERF_COUNT_HW_INSTRUCTIONS]		= 2,
353f2699491SMichael Ellerman 	[PERF_COUNT_HW_CACHE_MISSES]		= 0x217, /* PM_L1_DCACHE_MISS */
354f2699491SMichael Ellerman 	[PERF_COUNT_HW_BRANCH_INSTRUCTIONS]	= 0x122, /* PM_BR_CMPL */
355f2699491SMichael Ellerman 	[PERF_COUNT_HW_BRANCH_MISSES] 		= 0x41c, /* PM_BR_MPRED */
356f2699491SMichael Ellerman };
357f2699491SMichael Ellerman 
358f2699491SMichael Ellerman #define C(x)	PERF_COUNT_HW_CACHE_##x
359f2699491SMichael Ellerman 
360f2699491SMichael Ellerman /*
361f2699491SMichael Ellerman  * Table of generalized cache-related events.
362f2699491SMichael Ellerman  * 0 means not supported, -1 means nonsensical, other values
363f2699491SMichael Ellerman  * are event codes.
364f2699491SMichael Ellerman  */
3659d4fc86dSAthira Rajeev static u64 mpc7450_cache_events[C(MAX)][C(OP_MAX)][C(RESULT_MAX)] = {
366f2699491SMichael Ellerman 	[C(L1D)] = {		/* 	RESULT_ACCESS	RESULT_MISS */
367f2699491SMichael Ellerman 		[C(OP_READ)] = {	0,		0x225	},
368f2699491SMichael Ellerman 		[C(OP_WRITE)] = {	0,		0x227	},
369f2699491SMichael Ellerman 		[C(OP_PREFETCH)] = {	0,		0	},
370f2699491SMichael Ellerman 	},
371f2699491SMichael Ellerman 	[C(L1I)] = {		/* 	RESULT_ACCESS	RESULT_MISS */
372f2699491SMichael Ellerman 		[C(OP_READ)] = {	0x129,		0x115	},
373f2699491SMichael Ellerman 		[C(OP_WRITE)] = {	-1,		-1	},
374f2699491SMichael Ellerman 		[C(OP_PREFETCH)] = {	0x634,		0	},
375f2699491SMichael Ellerman 	},
376f2699491SMichael Ellerman 	[C(LL)] = {		/* 	RESULT_ACCESS	RESULT_MISS */
377f2699491SMichael Ellerman 		[C(OP_READ)] = {	0,		0	},
378f2699491SMichael Ellerman 		[C(OP_WRITE)] = {	0,		0	},
379f2699491SMichael Ellerman 		[C(OP_PREFETCH)] = {	0,		0	},
380f2699491SMichael Ellerman 	},
381f2699491SMichael Ellerman 	[C(DTLB)] = {		/* 	RESULT_ACCESS	RESULT_MISS */
382f2699491SMichael Ellerman 		[C(OP_READ)] = {	0,		0x312	},
383f2699491SMichael Ellerman 		[C(OP_WRITE)] = {	-1,		-1	},
384f2699491SMichael Ellerman 		[C(OP_PREFETCH)] = {	-1,		-1	},
385f2699491SMichael Ellerman 	},
386f2699491SMichael Ellerman 	[C(ITLB)] = {		/* 	RESULT_ACCESS	RESULT_MISS */
387f2699491SMichael Ellerman 		[C(OP_READ)] = {	0,		0x223	},
388f2699491SMichael Ellerman 		[C(OP_WRITE)] = {	-1,		-1	},
389f2699491SMichael Ellerman 		[C(OP_PREFETCH)] = {	-1,		-1	},
390f2699491SMichael Ellerman 	},
391f2699491SMichael Ellerman 	[C(BPU)] = {		/* 	RESULT_ACCESS	RESULT_MISS */
392f2699491SMichael Ellerman 		[C(OP_READ)] = {	0x122,		0x41c	},
393f2699491SMichael Ellerman 		[C(OP_WRITE)] = {	-1,		-1	},
394f2699491SMichael Ellerman 		[C(OP_PREFETCH)] = {	-1,		-1	},
395f2699491SMichael Ellerman 	},
396f2699491SMichael Ellerman 	[C(NODE)] = {		/* 	RESULT_ACCESS	RESULT_MISS */
397f2699491SMichael Ellerman 		[C(OP_READ)] = {	-1,		-1	},
398f2699491SMichael Ellerman 		[C(OP_WRITE)] = {	-1,		-1	},
399f2699491SMichael Ellerman 		[C(OP_PREFETCH)] = {	-1,		-1	},
400f2699491SMichael Ellerman 	},
401f2699491SMichael Ellerman };
402f2699491SMichael Ellerman 
403f2699491SMichael Ellerman struct power_pmu mpc7450_pmu = {
404f2699491SMichael Ellerman 	.name			= "MPC7450 family",
405f2699491SMichael Ellerman 	.n_counter		= N_COUNTER,
406f2699491SMichael Ellerman 	.max_alternatives	= MAX_ALT,
407f2699491SMichael Ellerman 	.add_fields		= 0x00111555ul,
408f2699491SMichael Ellerman 	.test_adder		= 0x00301000ul,
409f2699491SMichael Ellerman 	.compute_mmcr		= mpc7450_compute_mmcr,
410f2699491SMichael Ellerman 	.get_constraint		= mpc7450_get_constraint,
411f2699491SMichael Ellerman 	.get_alternatives	= mpc7450_get_alternatives,
412f2699491SMichael Ellerman 	.disable_pmc		= mpc7450_disable_pmc,
413f2699491SMichael Ellerman 	.n_generic		= ARRAY_SIZE(mpc7450_generic_events),
414f2699491SMichael Ellerman 	.generic_events		= mpc7450_generic_events,
415f2699491SMichael Ellerman 	.cache_events		= &mpc7450_cache_events,
416f2699491SMichael Ellerman };
417f2699491SMichael Ellerman 
init_mpc7450_pmu(void)418f2699491SMichael Ellerman static int __init init_mpc7450_pmu(void)
419f2699491SMichael Ellerman {
420*e7299f96SChristophe Leroy 	if (!pvr_version_is(PVR_VER_7450) && !pvr_version_is(PVR_VER_7455) &&
421*e7299f96SChristophe Leroy 	    !pvr_version_is(PVR_VER_7447) && !pvr_version_is(PVR_VER_7447A) &&
422*e7299f96SChristophe Leroy 	    !pvr_version_is(PVR_VER_7448))
423f2699491SMichael Ellerman 		return -ENODEV;
424f2699491SMichael Ellerman 
425f2699491SMichael Ellerman 	return register_power_pmu(&mpc7450_pmu);
426f2699491SMichael Ellerman }
427f2699491SMichael Ellerman 
428f2699491SMichael Ellerman early_initcall(init_mpc7450_pmu);
429