1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3 * Common Performance counter support functions for PowerISA v2.07 processors.
4 *
5 * Copyright 2009 Paul Mackerras, IBM Corporation.
6 * Copyright 2013 Michael Ellerman, IBM Corporation.
7 * Copyright 2016 Madhavan Srinivasan, IBM Corporation.
8 */
9 #include "isa207-common.h"
10
11 PMU_FORMAT_ATTR(event, "config:0-49");
12 PMU_FORMAT_ATTR(pmcxsel, "config:0-7");
13 PMU_FORMAT_ATTR(mark, "config:8");
14 PMU_FORMAT_ATTR(combine, "config:11");
15 PMU_FORMAT_ATTR(unit, "config:12-15");
16 PMU_FORMAT_ATTR(pmc, "config:16-19");
17 PMU_FORMAT_ATTR(cache_sel, "config:20-23");
18 PMU_FORMAT_ATTR(sample_mode, "config:24-28");
19 PMU_FORMAT_ATTR(thresh_sel, "config:29-31");
20 PMU_FORMAT_ATTR(thresh_stop, "config:32-35");
21 PMU_FORMAT_ATTR(thresh_start, "config:36-39");
22 PMU_FORMAT_ATTR(thresh_cmp, "config:40-49");
23
24 static struct attribute *isa207_pmu_format_attr[] = {
25 &format_attr_event.attr,
26 &format_attr_pmcxsel.attr,
27 &format_attr_mark.attr,
28 &format_attr_combine.attr,
29 &format_attr_unit.attr,
30 &format_attr_pmc.attr,
31 &format_attr_cache_sel.attr,
32 &format_attr_sample_mode.attr,
33 &format_attr_thresh_sel.attr,
34 &format_attr_thresh_stop.attr,
35 &format_attr_thresh_start.attr,
36 &format_attr_thresh_cmp.attr,
37 NULL,
38 };
39
40 const struct attribute_group isa207_pmu_format_group = {
41 .name = "format",
42 .attrs = isa207_pmu_format_attr,
43 };
44
event_is_fab_match(u64 event)45 static inline bool event_is_fab_match(u64 event)
46 {
47 /* Only check pmc, unit and pmcxsel, ignore the edge bit (0) */
48 event &= 0xff0fe;
49
50 /* PM_MRK_FAB_RSP_MATCH & PM_MRK_FAB_RSP_MATCH_CYC */
51 return (event == 0x30056 || event == 0x4f052);
52 }
53
is_event_valid(u64 event)54 static bool is_event_valid(u64 event)
55 {
56 u64 valid_mask = EVENT_VALID_MASK;
57
58 if (cpu_has_feature(CPU_FTR_ARCH_31))
59 valid_mask = p10_EVENT_VALID_MASK;
60 else if (cpu_has_feature(CPU_FTR_ARCH_300))
61 valid_mask = p9_EVENT_VALID_MASK;
62
63 return !(event & ~valid_mask);
64 }
65
is_event_marked(u64 event)66 static inline bool is_event_marked(u64 event)
67 {
68 if (event & EVENT_IS_MARKED)
69 return true;
70
71 return false;
72 }
73
sdar_mod_val(u64 event)74 static unsigned long sdar_mod_val(u64 event)
75 {
76 if (cpu_has_feature(CPU_FTR_ARCH_31))
77 return p10_SDAR_MODE(event);
78
79 return p9_SDAR_MODE(event);
80 }
81
mmcra_sdar_mode(u64 event,unsigned long * mmcra)82 static void mmcra_sdar_mode(u64 event, unsigned long *mmcra)
83 {
84 /*
85 * MMCRA[SDAR_MODE] specifies how the SDAR should be updated in
86 * continuous sampling mode.
87 *
88 * Incase of Power8:
89 * MMCRA[SDAR_MODE] will be programmed as "0b01" for continuous sampling
90 * mode and will be un-changed when setting MMCRA[63] (Marked events).
91 *
92 * Incase of Power9/power10:
93 * Marked event: MMCRA[SDAR_MODE] will be set to 0b00 ('No Updates'),
94 * or if group already have any marked events.
95 * For rest
96 * MMCRA[SDAR_MODE] will be set from event code.
97 * If sdar_mode from event is zero, default to 0b01. Hardware
98 * requires that we set a non-zero value.
99 */
100 if (cpu_has_feature(CPU_FTR_ARCH_300)) {
101 if (is_event_marked(event) || (*mmcra & MMCRA_SAMPLE_ENABLE))
102 *mmcra &= MMCRA_SDAR_MODE_NO_UPDATES;
103 else if (sdar_mod_val(event))
104 *mmcra |= sdar_mod_val(event) << MMCRA_SDAR_MODE_SHIFT;
105 else
106 *mmcra |= MMCRA_SDAR_MODE_DCACHE;
107 } else
108 *mmcra |= MMCRA_SDAR_MODE_TLB;
109 }
110
p10_thresh_cmp_val(u64 value)111 static int p10_thresh_cmp_val(u64 value)
112 {
113 int exp = 0;
114 u64 result = value;
115
116 if (!value)
117 return value;
118
119 /*
120 * Incase of P10, thresh_cmp value is not part of raw event code
121 * and provided via attr.config1 parameter. To program threshold in MMCRA,
122 * take a 18 bit number N and shift right 2 places and increment
123 * the exponent E by 1 until the upper 10 bits of N are zero.
124 * Write E to the threshold exponent and write the lower 8 bits of N
125 * to the threshold mantissa.
126 * The max threshold that can be written is 261120.
127 */
128 if (cpu_has_feature(CPU_FTR_ARCH_31)) {
129 if (value > 261120)
130 value = 261120;
131 while ((64 - __builtin_clzl(value)) > 8) {
132 exp++;
133 value >>= 2;
134 }
135
136 /*
137 * Note that it is invalid to write a mantissa with the
138 * upper 2 bits of mantissa being zero, unless the
139 * exponent is also zero.
140 */
141 if (!(value & 0xC0) && exp)
142 result = -1;
143 else
144 result = (exp << 8) | value;
145 }
146 return result;
147 }
148
thresh_cmp_val(u64 value)149 static u64 thresh_cmp_val(u64 value)
150 {
151 if (cpu_has_feature(CPU_FTR_ARCH_31))
152 value = p10_thresh_cmp_val(value);
153
154 /*
155 * Since location of threshold compare bits in MMCRA
156 * is different for p8, using different shift value.
157 */
158 if (cpu_has_feature(CPU_FTR_ARCH_300))
159 return value << p9_MMCRA_THR_CMP_SHIFT;
160 else
161 return value << MMCRA_THR_CMP_SHIFT;
162 }
163
combine_from_event(u64 event)164 static unsigned long combine_from_event(u64 event)
165 {
166 if (cpu_has_feature(CPU_FTR_ARCH_300))
167 return p9_EVENT_COMBINE(event);
168
169 return EVENT_COMBINE(event);
170 }
171
combine_shift(unsigned long pmc)172 static unsigned long combine_shift(unsigned long pmc)
173 {
174 if (cpu_has_feature(CPU_FTR_ARCH_300))
175 return p9_MMCR1_COMBINE_SHIFT(pmc);
176
177 return MMCR1_COMBINE_SHIFT(pmc);
178 }
179
event_is_threshold(u64 event)180 static inline bool event_is_threshold(u64 event)
181 {
182 return (event >> EVENT_THR_SEL_SHIFT) & EVENT_THR_SEL_MASK;
183 }
184
is_thresh_cmp_valid(u64 event)185 static bool is_thresh_cmp_valid(u64 event)
186 {
187 unsigned int cmp, exp;
188
189 if (cpu_has_feature(CPU_FTR_ARCH_31))
190 return p10_thresh_cmp_val(event) >= 0;
191
192 /*
193 * Check the mantissa upper two bits are not zero, unless the
194 * exponent is also zero. See the THRESH_CMP_MANTISSA doc.
195 */
196
197 cmp = (event >> EVENT_THR_CMP_SHIFT) & EVENT_THR_CMP_MASK;
198 exp = cmp >> 7;
199
200 if (exp && (cmp & 0x60) == 0)
201 return false;
202
203 return true;
204 }
205
dc_ic_rld_quad_l1_sel(u64 event)206 static unsigned int dc_ic_rld_quad_l1_sel(u64 event)
207 {
208 unsigned int cache;
209
210 cache = (event >> EVENT_CACHE_SEL_SHIFT) & MMCR1_DC_IC_QUAL_MASK;
211 return cache;
212 }
213
isa207_find_source(u64 idx,u32 sub_idx)214 static inline u64 isa207_find_source(u64 idx, u32 sub_idx)
215 {
216 u64 ret = PERF_MEM_NA;
217
218 switch(idx) {
219 case 0:
220 /* Nothing to do */
221 break;
222 case 1:
223 ret = PH(LVL, L1) | LEVEL(L1) | P(SNOOP, HIT);
224 break;
225 case 2:
226 ret = PH(LVL, L2) | LEVEL(L2) | P(SNOOP, HIT);
227 break;
228 case 3:
229 ret = PH(LVL, L3) | LEVEL(L3) | P(SNOOP, HIT);
230 break;
231 case 4:
232 if (cpu_has_feature(CPU_FTR_ARCH_31)) {
233 ret = P(SNOOP, HIT);
234
235 if (sub_idx == 1)
236 ret |= PH(LVL, LOC_RAM) | LEVEL(RAM);
237 else if (sub_idx == 2 || sub_idx == 3)
238 ret |= P(LVL, HIT) | LEVEL(PMEM);
239 else if (sub_idx == 4)
240 ret |= PH(LVL, REM_RAM1) | REM | LEVEL(RAM) | P(HOPS, 2);
241 else if (sub_idx == 5 || sub_idx == 7)
242 ret |= P(LVL, HIT) | LEVEL(PMEM) | REM;
243 else if (sub_idx == 6)
244 ret |= PH(LVL, REM_RAM2) | REM | LEVEL(RAM) | P(HOPS, 3);
245 } else {
246 if (sub_idx <= 1)
247 ret = PH(LVL, LOC_RAM);
248 else if (sub_idx > 1 && sub_idx <= 2)
249 ret = PH(LVL, REM_RAM1);
250 else
251 ret = PH(LVL, REM_RAM2);
252 ret |= P(SNOOP, HIT);
253 }
254 break;
255 case 5:
256 if (cpu_has_feature(CPU_FTR_ARCH_31)) {
257 ret = REM | P(HOPS, 0);
258
259 if (sub_idx == 0 || sub_idx == 4)
260 ret |= PH(LVL, L2) | LEVEL(L2) | P(SNOOP, HIT);
261 else if (sub_idx == 1 || sub_idx == 5)
262 ret |= PH(LVL, L2) | LEVEL(L2) | P(SNOOP, HITM);
263 else if (sub_idx == 2 || sub_idx == 6)
264 ret |= PH(LVL, L3) | LEVEL(L3) | P(SNOOP, HIT);
265 else if (sub_idx == 3 || sub_idx == 7)
266 ret |= PH(LVL, L3) | LEVEL(L3) | P(SNOOP, HITM);
267 } else {
268 if (sub_idx == 0)
269 ret = PH(LVL, L2) | LEVEL(L2) | REM | P(SNOOP, HIT) | P(HOPS, 0);
270 else if (sub_idx == 1)
271 ret = PH(LVL, L2) | LEVEL(L2) | REM | P(SNOOP, HITM) | P(HOPS, 0);
272 else if (sub_idx == 2 || sub_idx == 4)
273 ret = PH(LVL, L3) | LEVEL(L3) | REM | P(SNOOP, HIT) | P(HOPS, 0);
274 else if (sub_idx == 3 || sub_idx == 5)
275 ret = PH(LVL, L3) | LEVEL(L3) | REM | P(SNOOP, HITM) | P(HOPS, 0);
276 }
277 break;
278 case 6:
279 if (cpu_has_feature(CPU_FTR_ARCH_31)) {
280 if (sub_idx == 0)
281 ret = PH(LVL, REM_CCE1) | LEVEL(ANY_CACHE) | REM |
282 P(SNOOP, HIT) | P(HOPS, 2);
283 else if (sub_idx == 1)
284 ret = PH(LVL, REM_CCE1) | LEVEL(ANY_CACHE) | REM |
285 P(SNOOP, HITM) | P(HOPS, 2);
286 else if (sub_idx == 2)
287 ret = PH(LVL, REM_CCE2) | LEVEL(ANY_CACHE) | REM |
288 P(SNOOP, HIT) | P(HOPS, 3);
289 else if (sub_idx == 3)
290 ret = PH(LVL, REM_CCE2) | LEVEL(ANY_CACHE) | REM |
291 P(SNOOP, HITM) | P(HOPS, 3);
292 } else {
293 ret = PH(LVL, REM_CCE2);
294 if (sub_idx == 0 || sub_idx == 2)
295 ret |= P(SNOOP, HIT);
296 else if (sub_idx == 1 || sub_idx == 3)
297 ret |= P(SNOOP, HITM);
298 }
299 break;
300 case 7:
301 ret = PM(LVL, L1);
302 break;
303 }
304
305 return ret;
306 }
307
isa207_get_mem_data_src(union perf_mem_data_src * dsrc,u32 flags,struct pt_regs * regs)308 void isa207_get_mem_data_src(union perf_mem_data_src *dsrc, u32 flags,
309 struct pt_regs *regs)
310 {
311 u64 idx;
312 u32 sub_idx;
313 u64 sier;
314 u64 val;
315
316 /* Skip if no SIER support */
317 if (!(flags & PPMU_HAS_SIER)) {
318 dsrc->val = 0;
319 return;
320 }
321
322 sier = mfspr(SPRN_SIER);
323 val = (sier & ISA207_SIER_TYPE_MASK) >> ISA207_SIER_TYPE_SHIFT;
324 if (val != 1 && val != 2 && !(val == 7 && cpu_has_feature(CPU_FTR_ARCH_31))) {
325 dsrc->val = 0;
326 return;
327 }
328
329 idx = (sier & ISA207_SIER_LDST_MASK) >> ISA207_SIER_LDST_SHIFT;
330 sub_idx = (sier & ISA207_SIER_DATA_SRC_MASK) >> ISA207_SIER_DATA_SRC_SHIFT;
331
332 dsrc->val = isa207_find_source(idx, sub_idx);
333 if (val == 7) {
334 u64 mmcra;
335 u32 op_type;
336
337 /*
338 * Type 0b111 denotes either larx or stcx instruction. Use the
339 * MMCRA sampling bits [57:59] along with the type value
340 * to determine the exact instruction type. If the sampling
341 * criteria is neither load or store, set the type as default
342 * to NA.
343 */
344 mmcra = mfspr(SPRN_MMCRA);
345
346 op_type = (mmcra >> MMCRA_SAMP_ELIG_SHIFT) & MMCRA_SAMP_ELIG_MASK;
347 switch (op_type) {
348 case 5:
349 dsrc->val |= P(OP, LOAD);
350 break;
351 case 7:
352 dsrc->val |= P(OP, STORE);
353 break;
354 default:
355 dsrc->val |= P(OP, NA);
356 break;
357 }
358 } else {
359 dsrc->val |= (val == 1) ? P(OP, LOAD) : P(OP, STORE);
360 }
361 }
362
isa207_get_mem_weight(u64 * weight,u64 type)363 void isa207_get_mem_weight(u64 *weight, u64 type)
364 {
365 union perf_sample_weight *weight_fields;
366 u64 weight_lat;
367 u64 mmcra = mfspr(SPRN_MMCRA);
368 u64 exp = MMCRA_THR_CTR_EXP(mmcra);
369 u64 mantissa = MMCRA_THR_CTR_MANT(mmcra);
370 u64 sier = mfspr(SPRN_SIER);
371 u64 val = (sier & ISA207_SIER_TYPE_MASK) >> ISA207_SIER_TYPE_SHIFT;
372
373 if (cpu_has_feature(CPU_FTR_ARCH_31))
374 mantissa = P10_MMCRA_THR_CTR_MANT(mmcra);
375
376 if (val == 0 || (val == 7 && !cpu_has_feature(CPU_FTR_ARCH_31)))
377 weight_lat = 0;
378 else
379 weight_lat = mantissa << (2 * exp);
380
381 /*
382 * Use 64 bit weight field (full) if sample type is
383 * WEIGHT.
384 *
385 * if sample type is WEIGHT_STRUCT:
386 * - store memory latency in the lower 32 bits.
387 * - For ISA v3.1, use remaining two 16 bit fields of
388 * perf_sample_weight to store cycle counter values
389 * from sier2.
390 */
391 weight_fields = (union perf_sample_weight *)weight;
392 if (type & PERF_SAMPLE_WEIGHT)
393 weight_fields->full = weight_lat;
394 else {
395 weight_fields->var1_dw = (u32)weight_lat;
396 if (cpu_has_feature(CPU_FTR_ARCH_31)) {
397 weight_fields->var2_w = P10_SIER2_FINISH_CYC(mfspr(SPRN_SIER2));
398 weight_fields->var3_w = P10_SIER2_DISPATCH_CYC(mfspr(SPRN_SIER2));
399 }
400 }
401 }
402
isa207_get_constraint(u64 event,unsigned long * maskp,unsigned long * valp,u64 event_config1)403 int isa207_get_constraint(u64 event, unsigned long *maskp, unsigned long *valp, u64 event_config1)
404 {
405 unsigned int unit, pmc, cache, ebb;
406 unsigned long mask, value;
407
408 mask = value = 0;
409
410 if (!is_event_valid(event))
411 return -1;
412
413 pmc = (event >> EVENT_PMC_SHIFT) & EVENT_PMC_MASK;
414 unit = (event >> EVENT_UNIT_SHIFT) & EVENT_UNIT_MASK;
415 if (cpu_has_feature(CPU_FTR_ARCH_31))
416 cache = (event >> EVENT_CACHE_SEL_SHIFT) &
417 p10_EVENT_CACHE_SEL_MASK;
418 else
419 cache = (event >> EVENT_CACHE_SEL_SHIFT) &
420 EVENT_CACHE_SEL_MASK;
421 ebb = (event >> EVENT_EBB_SHIFT) & EVENT_EBB_MASK;
422
423 if (pmc) {
424 u64 base_event;
425
426 if (pmc > 6)
427 return -1;
428
429 /* Ignore Linux defined bits when checking event below */
430 base_event = event & ~EVENT_LINUX_MASK;
431
432 if (pmc >= 5 && base_event != 0x500fa &&
433 base_event != 0x600f4)
434 return -1;
435
436 mask |= CNST_PMC_MASK(pmc);
437 value |= CNST_PMC_VAL(pmc);
438
439 /*
440 * PMC5 and PMC6 are used to count cycles and instructions and
441 * they do not support most of the constraint bits. Add a check
442 * to exclude PMC5/6 from most of the constraints except for
443 * EBB/BHRB.
444 */
445 if (pmc >= 5)
446 goto ebb_bhrb;
447 }
448
449 if (pmc <= 4) {
450 /*
451 * Add to number of counters in use. Note this includes events with
452 * a PMC of 0 - they still need a PMC, it's just assigned later.
453 * Don't count events on PMC 5 & 6, there is only one valid event
454 * on each of those counters, and they are handled above.
455 */
456 mask |= CNST_NC_MASK;
457 value |= CNST_NC_VAL;
458 }
459
460 if (unit >= 6 && unit <= 9) {
461 if (cpu_has_feature(CPU_FTR_ARCH_31)) {
462 if (unit == 6) {
463 mask |= CNST_L2L3_GROUP_MASK;
464 value |= CNST_L2L3_GROUP_VAL(event >> p10_L2L3_EVENT_SHIFT);
465 }
466 } else if (cpu_has_feature(CPU_FTR_ARCH_300)) {
467 mask |= CNST_CACHE_GROUP_MASK;
468 value |= CNST_CACHE_GROUP_VAL(event & 0xff);
469
470 mask |= CNST_CACHE_PMC4_MASK;
471 if (pmc == 4)
472 value |= CNST_CACHE_PMC4_VAL;
473 } else if (cache & 0x7) {
474 /*
475 * L2/L3 events contain a cache selector field, which is
476 * supposed to be programmed into MMCRC. However MMCRC is only
477 * HV writable, and there is no API for guest kernels to modify
478 * it. The solution is for the hypervisor to initialise the
479 * field to zeroes, and for us to only ever allow events that
480 * have a cache selector of zero. The bank selector (bit 3) is
481 * irrelevant, as long as the rest of the value is 0.
482 */
483 return -1;
484 }
485
486 } else if (cpu_has_feature(CPU_FTR_ARCH_300) || (event & EVENT_IS_L1)) {
487 mask |= CNST_L1_QUAL_MASK;
488 value |= CNST_L1_QUAL_VAL(cache);
489 }
490
491 if (cpu_has_feature(CPU_FTR_ARCH_31)) {
492 mask |= CNST_RADIX_SCOPE_GROUP_MASK;
493 value |= CNST_RADIX_SCOPE_GROUP_VAL(event >> p10_EVENT_RADIX_SCOPE_QUAL_SHIFT);
494 }
495
496 if (is_event_marked(event)) {
497 mask |= CNST_SAMPLE_MASK;
498 value |= CNST_SAMPLE_VAL(event >> EVENT_SAMPLE_SHIFT);
499 }
500
501 if (cpu_has_feature(CPU_FTR_ARCH_31)) {
502 if (event_is_threshold(event) && is_thresh_cmp_valid(event_config1)) {
503 mask |= CNST_THRESH_CTL_SEL_MASK;
504 value |= CNST_THRESH_CTL_SEL_VAL(event >> EVENT_THRESH_SHIFT);
505 mask |= p10_CNST_THRESH_CMP_MASK;
506 value |= p10_CNST_THRESH_CMP_VAL(p10_thresh_cmp_val(event_config1));
507 } else if (event_is_threshold(event))
508 return -1;
509 } else if (cpu_has_feature(CPU_FTR_ARCH_300)) {
510 if (event_is_threshold(event) && is_thresh_cmp_valid(event)) {
511 mask |= CNST_THRESH_MASK;
512 value |= CNST_THRESH_VAL(event >> EVENT_THRESH_SHIFT);
513 } else if (event_is_threshold(event))
514 return -1;
515 } else {
516 /*
517 * Special case for PM_MRK_FAB_RSP_MATCH and PM_MRK_FAB_RSP_MATCH_CYC,
518 * the threshold control bits are used for the match value.
519 */
520 if (event_is_fab_match(event)) {
521 mask |= CNST_FAB_MATCH_MASK;
522 value |= CNST_FAB_MATCH_VAL(event >> EVENT_THR_CTL_SHIFT);
523 } else {
524 if (!is_thresh_cmp_valid(event))
525 return -1;
526
527 mask |= CNST_THRESH_MASK;
528 value |= CNST_THRESH_VAL(event >> EVENT_THRESH_SHIFT);
529 }
530 }
531
532 ebb_bhrb:
533 if (!pmc && ebb)
534 /* EBB events must specify the PMC */
535 return -1;
536
537 if (event & EVENT_WANTS_BHRB) {
538 if (!ebb)
539 /* Only EBB events can request BHRB */
540 return -1;
541
542 mask |= CNST_IFM_MASK;
543 value |= CNST_IFM_VAL(event >> EVENT_IFM_SHIFT);
544 }
545
546 /*
547 * All events must agree on EBB, either all request it or none.
548 * EBB events are pinned & exclusive, so this should never actually
549 * hit, but we leave it as a fallback in case.
550 */
551 mask |= CNST_EBB_MASK;
552 value |= CNST_EBB_VAL(ebb);
553
554 *maskp = mask;
555 *valp = value;
556
557 return 0;
558 }
559
isa207_compute_mmcr(u64 event[],int n_ev,unsigned int hwc[],struct mmcr_regs * mmcr,struct perf_event * pevents[],u32 flags)560 int isa207_compute_mmcr(u64 event[], int n_ev,
561 unsigned int hwc[], struct mmcr_regs *mmcr,
562 struct perf_event *pevents[], u32 flags)
563 {
564 unsigned long mmcra, mmcr1, mmcr2, unit, combine, psel, cache, val;
565 unsigned long mmcr3;
566 unsigned int pmc, pmc_inuse;
567 int i;
568
569 pmc_inuse = 0;
570
571 /* First pass to count resource use */
572 for (i = 0; i < n_ev; ++i) {
573 pmc = (event[i] >> EVENT_PMC_SHIFT) & EVENT_PMC_MASK;
574 if (pmc)
575 pmc_inuse |= 1 << pmc;
576 }
577
578 mmcra = mmcr1 = mmcr2 = mmcr3 = 0;
579
580 /*
581 * Disable bhrb unless explicitly requested
582 * by setting MMCRA (BHRBRD) bit.
583 */
584 if (cpu_has_feature(CPU_FTR_ARCH_31))
585 mmcra |= MMCRA_BHRB_DISABLE;
586
587 /* Second pass: assign PMCs, set all MMCR1 fields */
588 for (i = 0; i < n_ev; ++i) {
589 pmc = (event[i] >> EVENT_PMC_SHIFT) & EVENT_PMC_MASK;
590 unit = (event[i] >> EVENT_UNIT_SHIFT) & EVENT_UNIT_MASK;
591 combine = combine_from_event(event[i]);
592 psel = event[i] & EVENT_PSEL_MASK;
593
594 if (!pmc) {
595 for (pmc = 1; pmc <= 4; ++pmc) {
596 if (!(pmc_inuse & (1 << pmc)))
597 break;
598 }
599
600 pmc_inuse |= 1 << pmc;
601 }
602
603 if (pmc <= 4) {
604 mmcr1 |= unit << MMCR1_UNIT_SHIFT(pmc);
605 mmcr1 |= combine << combine_shift(pmc);
606 mmcr1 |= psel << MMCR1_PMCSEL_SHIFT(pmc);
607 }
608
609 /* In continuous sampling mode, update SDAR on TLB miss */
610 mmcra_sdar_mode(event[i], &mmcra);
611
612 if (cpu_has_feature(CPU_FTR_ARCH_300)) {
613 cache = dc_ic_rld_quad_l1_sel(event[i]);
614 mmcr1 |= (cache) << MMCR1_DC_IC_QUAL_SHIFT;
615 } else {
616 if (event[i] & EVENT_IS_L1) {
617 cache = dc_ic_rld_quad_l1_sel(event[i]);
618 mmcr1 |= (cache) << MMCR1_DC_IC_QUAL_SHIFT;
619 }
620 }
621
622 /* Set RADIX_SCOPE_QUAL bit */
623 if (cpu_has_feature(CPU_FTR_ARCH_31)) {
624 val = (event[i] >> p10_EVENT_RADIX_SCOPE_QUAL_SHIFT) &
625 p10_EVENT_RADIX_SCOPE_QUAL_MASK;
626 mmcr1 |= val << p10_MMCR1_RADIX_SCOPE_QUAL_SHIFT;
627 }
628
629 if (is_event_marked(event[i])) {
630 mmcra |= MMCRA_SAMPLE_ENABLE;
631
632 val = (event[i] >> EVENT_SAMPLE_SHIFT) & EVENT_SAMPLE_MASK;
633 if (val) {
634 mmcra |= (val & 3) << MMCRA_SAMP_MODE_SHIFT;
635 mmcra |= (val >> 2) << MMCRA_SAMP_ELIG_SHIFT;
636 }
637 }
638
639 /*
640 * PM_MRK_FAB_RSP_MATCH and PM_MRK_FAB_RSP_MATCH_CYC,
641 * the threshold bits are used for the match value.
642 */
643 if (!cpu_has_feature(CPU_FTR_ARCH_300) && event_is_fab_match(event[i])) {
644 mmcr1 |= ((event[i] >> EVENT_THR_CTL_SHIFT) &
645 EVENT_THR_CTL_MASK) << MMCR1_FAB_SHIFT;
646 } else {
647 val = (event[i] >> EVENT_THR_CTL_SHIFT) & EVENT_THR_CTL_MASK;
648 mmcra |= val << MMCRA_THR_CTL_SHIFT;
649 val = (event[i] >> EVENT_THR_SEL_SHIFT) & EVENT_THR_SEL_MASK;
650 mmcra |= val << MMCRA_THR_SEL_SHIFT;
651 if (!cpu_has_feature(CPU_FTR_ARCH_31)) {
652 val = (event[i] >> EVENT_THR_CMP_SHIFT) &
653 EVENT_THR_CMP_MASK;
654 mmcra |= thresh_cmp_val(val);
655 } else if (flags & PPMU_HAS_ATTR_CONFIG1) {
656 val = (pevents[i]->attr.config1 >> p10_EVENT_THR_CMP_SHIFT) &
657 p10_EVENT_THR_CMP_MASK;
658 mmcra |= thresh_cmp_val(val);
659 }
660 }
661
662 if (cpu_has_feature(CPU_FTR_ARCH_31) && (unit == 6)) {
663 val = (event[i] >> p10_L2L3_EVENT_SHIFT) &
664 p10_EVENT_L2L3_SEL_MASK;
665 mmcr2 |= val << p10_L2L3_SEL_SHIFT;
666 }
667
668 if (event[i] & EVENT_WANTS_BHRB) {
669 val = (event[i] >> EVENT_IFM_SHIFT) & EVENT_IFM_MASK;
670 mmcra |= val << MMCRA_IFM_SHIFT;
671 }
672
673 /* set MMCRA (BHRBRD) to 0 if there is user request for BHRB */
674 if (cpu_has_feature(CPU_FTR_ARCH_31) &&
675 (has_branch_stack(pevents[i]) || (event[i] & EVENT_WANTS_BHRB)))
676 mmcra &= ~MMCRA_BHRB_DISABLE;
677
678 if (pevents[i]->attr.exclude_user)
679 mmcr2 |= MMCR2_FCP(pmc);
680
681 if (pevents[i]->attr.exclude_hv)
682 mmcr2 |= MMCR2_FCH(pmc);
683
684 if (pevents[i]->attr.exclude_kernel) {
685 if (cpu_has_feature(CPU_FTR_HVMODE))
686 mmcr2 |= MMCR2_FCH(pmc);
687 else
688 mmcr2 |= MMCR2_FCS(pmc);
689 }
690
691 if (pevents[i]->attr.exclude_idle)
692 mmcr2 |= MMCR2_FCWAIT(pmc);
693
694 if (cpu_has_feature(CPU_FTR_ARCH_31)) {
695 if (pmc <= 4) {
696 val = (event[i] >> p10_EVENT_MMCR3_SHIFT) &
697 p10_EVENT_MMCR3_MASK;
698 mmcr3 |= val << MMCR3_SHIFT(pmc);
699 }
700 }
701
702 hwc[i] = pmc - 1;
703 }
704
705 /* Return MMCRx values */
706 mmcr->mmcr0 = 0;
707
708 /* pmc_inuse is 1-based */
709 if (pmc_inuse & 2)
710 mmcr->mmcr0 = MMCR0_PMC1CE;
711
712 if (pmc_inuse & 0x7c)
713 mmcr->mmcr0 |= MMCR0_PMCjCE;
714
715 /* If we're not using PMC 5 or 6, freeze them */
716 if (!(pmc_inuse & 0x60))
717 mmcr->mmcr0 |= MMCR0_FC56;
718
719 /*
720 * Set mmcr0 (PMCCEXT) for p10 which
721 * will restrict access to group B registers
722 * when MMCR0 PMCC=0b00.
723 */
724 if (cpu_has_feature(CPU_FTR_ARCH_31))
725 mmcr->mmcr0 |= MMCR0_PMCCEXT;
726
727 mmcr->mmcr1 = mmcr1;
728 mmcr->mmcra = mmcra;
729 mmcr->mmcr2 = mmcr2;
730 mmcr->mmcr3 = mmcr3;
731
732 return 0;
733 }
734
isa207_disable_pmc(unsigned int pmc,struct mmcr_regs * mmcr)735 void isa207_disable_pmc(unsigned int pmc, struct mmcr_regs *mmcr)
736 {
737 if (pmc <= 3)
738 mmcr->mmcr1 &= ~(0xffUL << MMCR1_PMCSEL_SHIFT(pmc + 1));
739 }
740
find_alternative(u64 event,const unsigned int ev_alt[][MAX_ALT],int size)741 static int find_alternative(u64 event, const unsigned int ev_alt[][MAX_ALT], int size)
742 {
743 int i, j;
744
745 for (i = 0; i < size; ++i) {
746 if (event < ev_alt[i][0])
747 break;
748
749 for (j = 0; j < MAX_ALT && ev_alt[i][j]; ++j)
750 if (event == ev_alt[i][j])
751 return i;
752 }
753
754 return -1;
755 }
756
isa207_get_alternatives(u64 event,u64 alt[],int size,unsigned int flags,const unsigned int ev_alt[][MAX_ALT])757 int isa207_get_alternatives(u64 event, u64 alt[], int size, unsigned int flags,
758 const unsigned int ev_alt[][MAX_ALT])
759 {
760 int i, j, num_alt = 0;
761 u64 alt_event;
762
763 alt[num_alt++] = event;
764 i = find_alternative(event, ev_alt, size);
765 if (i >= 0) {
766 /* Filter out the original event, it's already in alt[0] */
767 for (j = 0; j < MAX_ALT; ++j) {
768 alt_event = ev_alt[i][j];
769 if (alt_event && alt_event != event)
770 alt[num_alt++] = alt_event;
771 }
772 }
773
774 if (flags & PPMU_ONLY_COUNT_RUN) {
775 /*
776 * We're only counting in RUN state, so PM_CYC is equivalent to
777 * PM_RUN_CYC and PM_INST_CMPL === PM_RUN_INST_CMPL.
778 */
779 j = num_alt;
780 for (i = 0; i < num_alt; ++i) {
781 switch (alt[i]) {
782 case 0x1e: /* PMC_CYC */
783 alt[j++] = 0x600f4; /* PM_RUN_CYC */
784 break;
785 case 0x600f4:
786 alt[j++] = 0x1e;
787 break;
788 case 0x2: /* PM_INST_CMPL */
789 alt[j++] = 0x500fa; /* PM_RUN_INST_CMPL */
790 break;
791 case 0x500fa:
792 alt[j++] = 0x2;
793 break;
794 }
795 }
796 num_alt = j;
797 }
798
799 return num_alt;
800 }
801
isa3XX_check_attr_config(struct perf_event * ev)802 int isa3XX_check_attr_config(struct perf_event *ev)
803 {
804 u64 val, sample_mode;
805 u64 event = ev->attr.config;
806
807 val = (event >> EVENT_SAMPLE_SHIFT) & EVENT_SAMPLE_MASK;
808 sample_mode = val & 0x3;
809
810 /*
811 * MMCRA[61:62] is Random Sampling Mode (SM).
812 * value of 0b11 is reserved.
813 */
814 if (sample_mode == 0x3)
815 return -EINVAL;
816
817 /*
818 * Check for all reserved value
819 * Source: Performance Monitoring Unit User Guide
820 */
821 switch (val) {
822 case 0x5:
823 case 0x9:
824 case 0xD:
825 case 0x19:
826 case 0x1D:
827 case 0x1A:
828 case 0x1E:
829 return -EINVAL;
830 }
831
832 /*
833 * MMCRA[48:51]/[52:55]) Threshold Start/Stop
834 * Events Selection.
835 * 0b11110000/0b00001111 is reserved.
836 */
837 val = (event >> EVENT_THR_CTL_SHIFT) & EVENT_THR_CTL_MASK;
838 if (((val & 0xF0) == 0xF0) || ((val & 0xF) == 0xF))
839 return -EINVAL;
840
841 return 0;
842 }
843