1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * Common Performance counter support functions for PowerISA v2.07 processors.
4  *
5  * Copyright 2009 Paul Mackerras, IBM Corporation.
6  * Copyright 2013 Michael Ellerman, IBM Corporation.
7  * Copyright 2016 Madhavan Srinivasan, IBM Corporation.
8  */
9 #include "isa207-common.h"
10 
11 PMU_FORMAT_ATTR(event,		"config:0-49");
12 PMU_FORMAT_ATTR(pmcxsel,	"config:0-7");
13 PMU_FORMAT_ATTR(mark,		"config:8");
14 PMU_FORMAT_ATTR(combine,	"config:11");
15 PMU_FORMAT_ATTR(unit,		"config:12-15");
16 PMU_FORMAT_ATTR(pmc,		"config:16-19");
17 PMU_FORMAT_ATTR(cache_sel,	"config:20-23");
18 PMU_FORMAT_ATTR(sample_mode,	"config:24-28");
19 PMU_FORMAT_ATTR(thresh_sel,	"config:29-31");
20 PMU_FORMAT_ATTR(thresh_stop,	"config:32-35");
21 PMU_FORMAT_ATTR(thresh_start,	"config:36-39");
22 PMU_FORMAT_ATTR(thresh_cmp,	"config:40-49");
23 
24 struct attribute *isa207_pmu_format_attr[] = {
25 	&format_attr_event.attr,
26 	&format_attr_pmcxsel.attr,
27 	&format_attr_mark.attr,
28 	&format_attr_combine.attr,
29 	&format_attr_unit.attr,
30 	&format_attr_pmc.attr,
31 	&format_attr_cache_sel.attr,
32 	&format_attr_sample_mode.attr,
33 	&format_attr_thresh_sel.attr,
34 	&format_attr_thresh_stop.attr,
35 	&format_attr_thresh_start.attr,
36 	&format_attr_thresh_cmp.attr,
37 	NULL,
38 };
39 
40 struct attribute_group isa207_pmu_format_group = {
41 	.name = "format",
42 	.attrs = isa207_pmu_format_attr,
43 };
44 
45 static inline bool event_is_fab_match(u64 event)
46 {
47 	/* Only check pmc, unit and pmcxsel, ignore the edge bit (0) */
48 	event &= 0xff0fe;
49 
50 	/* PM_MRK_FAB_RSP_MATCH & PM_MRK_FAB_RSP_MATCH_CYC */
51 	return (event == 0x30056 || event == 0x4f052);
52 }
53 
54 static bool is_event_valid(u64 event)
55 {
56 	u64 valid_mask = EVENT_VALID_MASK;
57 
58 	if (cpu_has_feature(CPU_FTR_ARCH_300))
59 		valid_mask = p9_EVENT_VALID_MASK;
60 
61 	return !(event & ~valid_mask);
62 }
63 
64 static inline bool is_event_marked(u64 event)
65 {
66 	if (event & EVENT_IS_MARKED)
67 		return true;
68 
69 	return false;
70 }
71 
72 static void mmcra_sdar_mode(u64 event, unsigned long *mmcra)
73 {
74 	/*
75 	 * MMCRA[SDAR_MODE] specifices how the SDAR should be updated in
76 	 * continous sampling mode.
77 	 *
78 	 * Incase of Power8:
79 	 * MMCRA[SDAR_MODE] will be programmed as "0b01" for continous sampling
80 	 * mode and will be un-changed when setting MMCRA[63] (Marked events).
81 	 *
82 	 * Incase of Power9:
83 	 * Marked event: MMCRA[SDAR_MODE] will be set to 0b00 ('No Updates'),
84 	 *               or if group already have any marked events.
85 	 * For rest
86 	 *	MMCRA[SDAR_MODE] will be set from event code.
87 	 *      If sdar_mode from event is zero, default to 0b01. Hardware
88 	 *      requires that we set a non-zero value.
89 	 */
90 	if (cpu_has_feature(CPU_FTR_ARCH_300)) {
91 		if (is_event_marked(event) || (*mmcra & MMCRA_SAMPLE_ENABLE))
92 			*mmcra &= MMCRA_SDAR_MODE_NO_UPDATES;
93 		else if (p9_SDAR_MODE(event))
94 			*mmcra |=  p9_SDAR_MODE(event) << MMCRA_SDAR_MODE_SHIFT;
95 		else
96 			*mmcra |= MMCRA_SDAR_MODE_DCACHE;
97 	} else
98 		*mmcra |= MMCRA_SDAR_MODE_TLB;
99 }
100 
101 static u64 thresh_cmp_val(u64 value)
102 {
103 	if (cpu_has_feature(CPU_FTR_ARCH_300))
104 		return value << p9_MMCRA_THR_CMP_SHIFT;
105 
106 	return value << MMCRA_THR_CMP_SHIFT;
107 }
108 
109 static unsigned long combine_from_event(u64 event)
110 {
111 	if (cpu_has_feature(CPU_FTR_ARCH_300))
112 		return p9_EVENT_COMBINE(event);
113 
114 	return EVENT_COMBINE(event);
115 }
116 
117 static unsigned long combine_shift(unsigned long pmc)
118 {
119 	if (cpu_has_feature(CPU_FTR_ARCH_300))
120 		return p9_MMCR1_COMBINE_SHIFT(pmc);
121 
122 	return MMCR1_COMBINE_SHIFT(pmc);
123 }
124 
125 static inline bool event_is_threshold(u64 event)
126 {
127 	return (event >> EVENT_THR_SEL_SHIFT) & EVENT_THR_SEL_MASK;
128 }
129 
130 static bool is_thresh_cmp_valid(u64 event)
131 {
132 	unsigned int cmp, exp;
133 
134 	/*
135 	 * Check the mantissa upper two bits are not zero, unless the
136 	 * exponent is also zero. See the THRESH_CMP_MANTISSA doc.
137 	 */
138 	cmp = (event >> EVENT_THR_CMP_SHIFT) & EVENT_THR_CMP_MASK;
139 	exp = cmp >> 7;
140 
141 	if (exp && (cmp & 0x60) == 0)
142 		return false;
143 
144 	return true;
145 }
146 
147 static unsigned int dc_ic_rld_quad_l1_sel(u64 event)
148 {
149 	unsigned int cache;
150 
151 	cache = (event >> EVENT_CACHE_SEL_SHIFT) & MMCR1_DC_IC_QUAL_MASK;
152 	return cache;
153 }
154 
155 static inline u64 isa207_find_source(u64 idx, u32 sub_idx)
156 {
157 	u64 ret = PERF_MEM_NA;
158 
159 	switch(idx) {
160 	case 0:
161 		/* Nothing to do */
162 		break;
163 	case 1:
164 		ret = PH(LVL, L1);
165 		break;
166 	case 2:
167 		ret = PH(LVL, L2);
168 		break;
169 	case 3:
170 		ret = PH(LVL, L3);
171 		break;
172 	case 4:
173 		if (sub_idx <= 1)
174 			ret = PH(LVL, LOC_RAM);
175 		else if (sub_idx > 1 && sub_idx <= 2)
176 			ret = PH(LVL, REM_RAM1);
177 		else
178 			ret = PH(LVL, REM_RAM2);
179 		ret |= P(SNOOP, HIT);
180 		break;
181 	case 5:
182 		ret = PH(LVL, REM_CCE1);
183 		if ((sub_idx == 0) || (sub_idx == 2) || (sub_idx == 4))
184 			ret |= P(SNOOP, HIT);
185 		else if ((sub_idx == 1) || (sub_idx == 3) || (sub_idx == 5))
186 			ret |= P(SNOOP, HITM);
187 		break;
188 	case 6:
189 		ret = PH(LVL, REM_CCE2);
190 		if ((sub_idx == 0) || (sub_idx == 2))
191 			ret |= P(SNOOP, HIT);
192 		else if ((sub_idx == 1) || (sub_idx == 3))
193 			ret |= P(SNOOP, HITM);
194 		break;
195 	case 7:
196 		ret = PM(LVL, L1);
197 		break;
198 	}
199 
200 	return ret;
201 }
202 
203 void isa207_get_mem_data_src(union perf_mem_data_src *dsrc, u32 flags,
204 							struct pt_regs *regs)
205 {
206 	u64 idx;
207 	u32 sub_idx;
208 	u64 sier;
209 	u64 val;
210 
211 	/* Skip if no SIER support */
212 	if (!(flags & PPMU_HAS_SIER)) {
213 		dsrc->val = 0;
214 		return;
215 	}
216 
217 	sier = mfspr(SPRN_SIER);
218 	val = (sier & ISA207_SIER_TYPE_MASK) >> ISA207_SIER_TYPE_SHIFT;
219 	if (val == 1 || val == 2) {
220 		idx = (sier & ISA207_SIER_LDST_MASK) >> ISA207_SIER_LDST_SHIFT;
221 		sub_idx = (sier & ISA207_SIER_DATA_SRC_MASK) >> ISA207_SIER_DATA_SRC_SHIFT;
222 
223 		dsrc->val = isa207_find_source(idx, sub_idx);
224 		dsrc->val |= (val == 1) ? P(OP, LOAD) : P(OP, STORE);
225 	}
226 }
227 
228 void isa207_get_mem_weight(u64 *weight)
229 {
230 	u64 mmcra = mfspr(SPRN_MMCRA);
231 	u64 exp = MMCRA_THR_CTR_EXP(mmcra);
232 	u64 mantissa = MMCRA_THR_CTR_MANT(mmcra);
233 	u64 sier = mfspr(SPRN_SIER);
234 	u64 val = (sier & ISA207_SIER_TYPE_MASK) >> ISA207_SIER_TYPE_SHIFT;
235 
236 	if (val == 0 || val == 7)
237 		*weight = 0;
238 	else
239 		*weight = mantissa << (2 * exp);
240 }
241 
242 int isa207_get_constraint(u64 event, unsigned long *maskp, unsigned long *valp)
243 {
244 	unsigned int unit, pmc, cache, ebb;
245 	unsigned long mask, value;
246 
247 	mask = value = 0;
248 
249 	if (!is_event_valid(event))
250 		return -1;
251 
252 	pmc   = (event >> EVENT_PMC_SHIFT)        & EVENT_PMC_MASK;
253 	unit  = (event >> EVENT_UNIT_SHIFT)       & EVENT_UNIT_MASK;
254 	cache = (event >> EVENT_CACHE_SEL_SHIFT)  & EVENT_CACHE_SEL_MASK;
255 	ebb   = (event >> EVENT_EBB_SHIFT)        & EVENT_EBB_MASK;
256 
257 	if (pmc) {
258 		u64 base_event;
259 
260 		if (pmc > 6)
261 			return -1;
262 
263 		/* Ignore Linux defined bits when checking event below */
264 		base_event = event & ~EVENT_LINUX_MASK;
265 
266 		if (pmc >= 5 && base_event != 0x500fa &&
267 				base_event != 0x600f4)
268 			return -1;
269 
270 		mask  |= CNST_PMC_MASK(pmc);
271 		value |= CNST_PMC_VAL(pmc);
272 	}
273 
274 	if (pmc <= 4) {
275 		/*
276 		 * Add to number of counters in use. Note this includes events with
277 		 * a PMC of 0 - they still need a PMC, it's just assigned later.
278 		 * Don't count events on PMC 5 & 6, there is only one valid event
279 		 * on each of those counters, and they are handled above.
280 		 */
281 		mask  |= CNST_NC_MASK;
282 		value |= CNST_NC_VAL;
283 	}
284 
285 	if (unit >= 6 && unit <= 9) {
286 		if (cpu_has_feature(CPU_FTR_ARCH_300)) {
287 			mask  |= CNST_CACHE_GROUP_MASK;
288 			value |= CNST_CACHE_GROUP_VAL(event & 0xff);
289 
290 			mask |= CNST_CACHE_PMC4_MASK;
291 			if (pmc == 4)
292 				value |= CNST_CACHE_PMC4_VAL;
293 		} else if (cache & 0x7) {
294 			/*
295 			 * L2/L3 events contain a cache selector field, which is
296 			 * supposed to be programmed into MMCRC. However MMCRC is only
297 			 * HV writable, and there is no API for guest kernels to modify
298 			 * it. The solution is for the hypervisor to initialise the
299 			 * field to zeroes, and for us to only ever allow events that
300 			 * have a cache selector of zero. The bank selector (bit 3) is
301 			 * irrelevant, as long as the rest of the value is 0.
302 			 */
303 			return -1;
304 		}
305 
306 	} else if (cpu_has_feature(CPU_FTR_ARCH_300) || (event & EVENT_IS_L1)) {
307 		mask  |= CNST_L1_QUAL_MASK;
308 		value |= CNST_L1_QUAL_VAL(cache);
309 	}
310 
311 	if (is_event_marked(event)) {
312 		mask  |= CNST_SAMPLE_MASK;
313 		value |= CNST_SAMPLE_VAL(event >> EVENT_SAMPLE_SHIFT);
314 	}
315 
316 	if (cpu_has_feature(CPU_FTR_ARCH_300))  {
317 		if (event_is_threshold(event) && is_thresh_cmp_valid(event)) {
318 			mask  |= CNST_THRESH_MASK;
319 			value |= CNST_THRESH_VAL(event >> EVENT_THRESH_SHIFT);
320 		}
321 	} else {
322 		/*
323 		 * Special case for PM_MRK_FAB_RSP_MATCH and PM_MRK_FAB_RSP_MATCH_CYC,
324 		 * the threshold control bits are used for the match value.
325 		 */
326 		if (event_is_fab_match(event)) {
327 			mask  |= CNST_FAB_MATCH_MASK;
328 			value |= CNST_FAB_MATCH_VAL(event >> EVENT_THR_CTL_SHIFT);
329 		} else {
330 			if (!is_thresh_cmp_valid(event))
331 				return -1;
332 
333 			mask  |= CNST_THRESH_MASK;
334 			value |= CNST_THRESH_VAL(event >> EVENT_THRESH_SHIFT);
335 		}
336 	}
337 
338 	if (!pmc && ebb)
339 		/* EBB events must specify the PMC */
340 		return -1;
341 
342 	if (event & EVENT_WANTS_BHRB) {
343 		if (!ebb)
344 			/* Only EBB events can request BHRB */
345 			return -1;
346 
347 		mask  |= CNST_IFM_MASK;
348 		value |= CNST_IFM_VAL(event >> EVENT_IFM_SHIFT);
349 	}
350 
351 	/*
352 	 * All events must agree on EBB, either all request it or none.
353 	 * EBB events are pinned & exclusive, so this should never actually
354 	 * hit, but we leave it as a fallback in case.
355 	 */
356 	mask  |= CNST_EBB_VAL(ebb);
357 	value |= CNST_EBB_MASK;
358 
359 	*maskp = mask;
360 	*valp = value;
361 
362 	return 0;
363 }
364 
365 int isa207_compute_mmcr(u64 event[], int n_ev,
366 			       unsigned int hwc[], unsigned long mmcr[],
367 			       struct perf_event *pevents[])
368 {
369 	unsigned long mmcra, mmcr1, mmcr2, unit, combine, psel, cache, val;
370 	unsigned int pmc, pmc_inuse;
371 	int i;
372 
373 	pmc_inuse = 0;
374 
375 	/* First pass to count resource use */
376 	for (i = 0; i < n_ev; ++i) {
377 		pmc = (event[i] >> EVENT_PMC_SHIFT) & EVENT_PMC_MASK;
378 		if (pmc)
379 			pmc_inuse |= 1 << pmc;
380 	}
381 
382 	mmcra = mmcr1 = mmcr2 = 0;
383 
384 	/* Second pass: assign PMCs, set all MMCR1 fields */
385 	for (i = 0; i < n_ev; ++i) {
386 		pmc     = (event[i] >> EVENT_PMC_SHIFT) & EVENT_PMC_MASK;
387 		unit    = (event[i] >> EVENT_UNIT_SHIFT) & EVENT_UNIT_MASK;
388 		combine = combine_from_event(event[i]);
389 		psel    =  event[i] & EVENT_PSEL_MASK;
390 
391 		if (!pmc) {
392 			for (pmc = 1; pmc <= 4; ++pmc) {
393 				if (!(pmc_inuse & (1 << pmc)))
394 					break;
395 			}
396 
397 			pmc_inuse |= 1 << pmc;
398 		}
399 
400 		if (pmc <= 4) {
401 			mmcr1 |= unit << MMCR1_UNIT_SHIFT(pmc);
402 			mmcr1 |= combine << combine_shift(pmc);
403 			mmcr1 |= psel << MMCR1_PMCSEL_SHIFT(pmc);
404 		}
405 
406 		/* In continuous sampling mode, update SDAR on TLB miss */
407 		mmcra_sdar_mode(event[i], &mmcra);
408 
409 		if (cpu_has_feature(CPU_FTR_ARCH_300)) {
410 			cache = dc_ic_rld_quad_l1_sel(event[i]);
411 			mmcr1 |= (cache) << MMCR1_DC_IC_QUAL_SHIFT;
412 		} else {
413 			if (event[i] & EVENT_IS_L1) {
414 				cache = dc_ic_rld_quad_l1_sel(event[i]);
415 				mmcr1 |= (cache) << MMCR1_DC_IC_QUAL_SHIFT;
416 			}
417 		}
418 
419 		if (is_event_marked(event[i])) {
420 			mmcra |= MMCRA_SAMPLE_ENABLE;
421 
422 			val = (event[i] >> EVENT_SAMPLE_SHIFT) & EVENT_SAMPLE_MASK;
423 			if (val) {
424 				mmcra |= (val &  3) << MMCRA_SAMP_MODE_SHIFT;
425 				mmcra |= (val >> 2) << MMCRA_SAMP_ELIG_SHIFT;
426 			}
427 		}
428 
429 		/*
430 		 * PM_MRK_FAB_RSP_MATCH and PM_MRK_FAB_RSP_MATCH_CYC,
431 		 * the threshold bits are used for the match value.
432 		 */
433 		if (!cpu_has_feature(CPU_FTR_ARCH_300) && event_is_fab_match(event[i])) {
434 			mmcr1 |= ((event[i] >> EVENT_THR_CTL_SHIFT) &
435 				  EVENT_THR_CTL_MASK) << MMCR1_FAB_SHIFT;
436 		} else {
437 			val = (event[i] >> EVENT_THR_CTL_SHIFT) & EVENT_THR_CTL_MASK;
438 			mmcra |= val << MMCRA_THR_CTL_SHIFT;
439 			val = (event[i] >> EVENT_THR_SEL_SHIFT) & EVENT_THR_SEL_MASK;
440 			mmcra |= val << MMCRA_THR_SEL_SHIFT;
441 			val = (event[i] >> EVENT_THR_CMP_SHIFT) & EVENT_THR_CMP_MASK;
442 			mmcra |= thresh_cmp_val(val);
443 		}
444 
445 		if (event[i] & EVENT_WANTS_BHRB) {
446 			val = (event[i] >> EVENT_IFM_SHIFT) & EVENT_IFM_MASK;
447 			mmcra |= val << MMCRA_IFM_SHIFT;
448 		}
449 
450 		if (pevents[i]->attr.exclude_user)
451 			mmcr2 |= MMCR2_FCP(pmc);
452 
453 		if (pevents[i]->attr.exclude_hv)
454 			mmcr2 |= MMCR2_FCH(pmc);
455 
456 		if (pevents[i]->attr.exclude_kernel) {
457 			if (cpu_has_feature(CPU_FTR_HVMODE))
458 				mmcr2 |= MMCR2_FCH(pmc);
459 			else
460 				mmcr2 |= MMCR2_FCS(pmc);
461 		}
462 
463 		hwc[i] = pmc - 1;
464 	}
465 
466 	/* Return MMCRx values */
467 	mmcr[0] = 0;
468 
469 	/* pmc_inuse is 1-based */
470 	if (pmc_inuse & 2)
471 		mmcr[0] = MMCR0_PMC1CE;
472 
473 	if (pmc_inuse & 0x7c)
474 		mmcr[0] |= MMCR0_PMCjCE;
475 
476 	/* If we're not using PMC 5 or 6, freeze them */
477 	if (!(pmc_inuse & 0x60))
478 		mmcr[0] |= MMCR0_FC56;
479 
480 	mmcr[1] = mmcr1;
481 	mmcr[2] = mmcra;
482 	mmcr[3] = mmcr2;
483 
484 	return 0;
485 }
486 
487 void isa207_disable_pmc(unsigned int pmc, unsigned long mmcr[])
488 {
489 	if (pmc <= 3)
490 		mmcr[1] &= ~(0xffUL << MMCR1_PMCSEL_SHIFT(pmc + 1));
491 }
492 
493 static int find_alternative(u64 event, const unsigned int ev_alt[][MAX_ALT], int size)
494 {
495 	int i, j;
496 
497 	for (i = 0; i < size; ++i) {
498 		if (event < ev_alt[i][0])
499 			break;
500 
501 		for (j = 0; j < MAX_ALT && ev_alt[i][j]; ++j)
502 			if (event == ev_alt[i][j])
503 				return i;
504 	}
505 
506 	return -1;
507 }
508 
509 int isa207_get_alternatives(u64 event, u64 alt[], int size, unsigned int flags,
510 					const unsigned int ev_alt[][MAX_ALT])
511 {
512 	int i, j, num_alt = 0;
513 	u64 alt_event;
514 
515 	alt[num_alt++] = event;
516 	i = find_alternative(event, ev_alt, size);
517 	if (i >= 0) {
518 		/* Filter out the original event, it's already in alt[0] */
519 		for (j = 0; j < MAX_ALT; ++j) {
520 			alt_event = ev_alt[i][j];
521 			if (alt_event && alt_event != event)
522 				alt[num_alt++] = alt_event;
523 		}
524 	}
525 
526 	if (flags & PPMU_ONLY_COUNT_RUN) {
527 		/*
528 		 * We're only counting in RUN state, so PM_CYC is equivalent to
529 		 * PM_RUN_CYC and PM_INST_CMPL === PM_RUN_INST_CMPL.
530 		 */
531 		j = num_alt;
532 		for (i = 0; i < num_alt; ++i) {
533 			switch (alt[i]) {
534 			case 0x1e:			/* PMC_CYC */
535 				alt[j++] = 0x600f4;	/* PM_RUN_CYC */
536 				break;
537 			case 0x600f4:
538 				alt[j++] = 0x1e;
539 				break;
540 			case 0x2:			/* PM_INST_CMPL */
541 				alt[j++] = 0x500fa;	/* PM_RUN_INST_CMPL */
542 				break;
543 			case 0x500fa:
544 				alt[j++] = 0x2;
545 				break;
546 			}
547 		}
548 		num_alt = j;
549 	}
550 
551 	return num_alt;
552 }
553