1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * Performance counter support for POWER10 processors. 4 * 5 * Copyright 2020 Madhavan Srinivasan, IBM Corporation. 6 * Copyright 2020 Athira Rajeev, IBM Corporation. 7 */ 8 9 #define pr_fmt(fmt) "power10-pmu: " fmt 10 11 #include "isa207-common.h" 12 13 /* 14 * Raw event encoding for Power10: 15 * 16 * 60 56 52 48 44 40 36 32 17 * | - - - - | - - - - | - - - - | - - - - | - - - - | - - - - | - - - - | - - - - | 18 * | | [ ] [ src_match ] [ src_mask ] | [ ] [ l2l3_sel ] [ thresh_ctl ] 19 * | | | | | | 20 * | | *- IFM (Linux) | | thresh start/stop -* 21 * | *- BHRB (Linux) | src_sel 22 * *- EBB (Linux) *invert_bit 23 * 24 * 28 24 20 16 12 8 4 0 25 * | - - - - | - - - - | - - - - | - - - - | - - - - | - - - - | - - - - | - - - - | 26 * [ ] [ sample ] [ ] [ ] [ pmc ] [unit ] [ ] m [ pmcxsel ] 27 * | | | | | | 28 * | | | | | *- mark 29 * | | | *- L1/L2/L3 cache_sel | 30 * | | sdar_mode | 31 * | *- sampling mode for marked events *- combine 32 * | 33 * *- thresh_sel 34 * 35 * Below uses IBM bit numbering. 36 * 37 * MMCR1[x:y] = unit (PMCxUNIT) 38 * MMCR1[24] = pmc1combine[0] 39 * MMCR1[25] = pmc1combine[1] 40 * MMCR1[26] = pmc2combine[0] 41 * MMCR1[27] = pmc2combine[1] 42 * MMCR1[28] = pmc3combine[0] 43 * MMCR1[29] = pmc3combine[1] 44 * MMCR1[30] = pmc4combine[0] 45 * MMCR1[31] = pmc4combine[1] 46 * 47 * if pmc == 3 and unit == 0 and pmcxsel[0:6] == 0b0101011 48 * MMCR1[20:27] = thresh_ctl 49 * else if pmc == 4 and unit == 0xf and pmcxsel[0:6] == 0b0101001 50 * MMCR1[20:27] = thresh_ctl 51 * else 52 * MMCRA[48:55] = thresh_ctl (THRESH START/END) 53 * 54 * if thresh_sel: 55 * MMCRA[45:47] = thresh_sel 56 * 57 * if l2l3_sel: 58 * MMCR2[56:60] = l2l3_sel[0:4] 59 * 60 * MMCR1[16] = cache_sel[0] 61 * MMCR1[17] = cache_sel[1] 62 * 63 * if mark: 64 * MMCRA[63] = 1 (SAMPLE_ENABLE) 65 * MMCRA[57:59] = sample[0:2] (RAND_SAMP_ELIG) 66 * MMCRA[61:62] = sample[3:4] (RAND_SAMP_MODE) 67 * 68 * if EBB and BHRB: 69 * MMCRA[32:33] = IFM 70 * 71 * MMCRA[SDAR_MODE] = sdar_mode[0:1] 72 */ 73 74 /* 75 * Some power10 event codes. 76 */ 77 #define EVENT(_name, _code) enum{_name = _code} 78 79 #include "power10-events-list.h" 80 81 #undef EVENT 82 83 /* MMCRA IFM bits - POWER10 */ 84 #define POWER10_MMCRA_IFM1 0x0000000040000000UL 85 #define POWER10_MMCRA_IFM2 0x0000000080000000UL 86 #define POWER10_MMCRA_IFM3 0x00000000C0000000UL 87 #define POWER10_MMCRA_BHRB_MASK 0x00000000C0000000UL 88 89 extern u64 PERF_REG_EXTENDED_MASK; 90 91 /* Table of alternatives, sorted by column 0 */ 92 static const unsigned int power10_event_alternatives[][MAX_ALT] = { 93 { PM_RUN_CYC_ALT, PM_RUN_CYC }, 94 { PM_RUN_INST_CMPL_ALT, PM_RUN_INST_CMPL }, 95 }; 96 97 static int power10_get_alternatives(u64 event, unsigned int flags, u64 alt[]) 98 { 99 int num_alt = 0; 100 101 num_alt = isa207_get_alternatives(event, alt, 102 ARRAY_SIZE(power10_event_alternatives), flags, 103 power10_event_alternatives); 104 105 return num_alt; 106 } 107 108 GENERIC_EVENT_ATTR(cpu-cycles, PM_RUN_CYC); 109 GENERIC_EVENT_ATTR(instructions, PM_RUN_INST_CMPL); 110 GENERIC_EVENT_ATTR(branch-instructions, PM_BR_CMPL); 111 GENERIC_EVENT_ATTR(branch-misses, PM_BR_MPRED_CMPL); 112 GENERIC_EVENT_ATTR(cache-references, PM_LD_REF_L1); 113 GENERIC_EVENT_ATTR(cache-misses, PM_LD_MISS_L1); 114 GENERIC_EVENT_ATTR(mem-loads, MEM_LOADS); 115 GENERIC_EVENT_ATTR(mem-stores, MEM_STORES); 116 117 CACHE_EVENT_ATTR(L1-dcache-load-misses, PM_LD_MISS_L1); 118 CACHE_EVENT_ATTR(L1-dcache-loads, PM_LD_REF_L1); 119 CACHE_EVENT_ATTR(L1-dcache-prefetches, PM_LD_PREFETCH_CACHE_LINE_MISS); 120 CACHE_EVENT_ATTR(L1-dcache-store-misses, PM_ST_MISS_L1); 121 CACHE_EVENT_ATTR(L1-icache-load-misses, PM_L1_ICACHE_MISS); 122 CACHE_EVENT_ATTR(L1-icache-loads, PM_INST_FROM_L1); 123 CACHE_EVENT_ATTR(L1-icache-prefetches, PM_IC_PREF_REQ); 124 CACHE_EVENT_ATTR(LLC-load-misses, PM_DATA_FROM_L3MISS); 125 CACHE_EVENT_ATTR(LLC-loads, PM_DATA_FROM_L3); 126 CACHE_EVENT_ATTR(branch-load-misses, PM_BR_MPRED_CMPL); 127 CACHE_EVENT_ATTR(branch-loads, PM_BR_CMPL); 128 CACHE_EVENT_ATTR(dTLB-load-misses, PM_DTLB_MISS); 129 CACHE_EVENT_ATTR(iTLB-load-misses, PM_ITLB_MISS); 130 131 static struct attribute *power10_events_attr[] = { 132 GENERIC_EVENT_PTR(PM_RUN_CYC), 133 GENERIC_EVENT_PTR(PM_RUN_INST_CMPL), 134 GENERIC_EVENT_PTR(PM_BR_CMPL), 135 GENERIC_EVENT_PTR(PM_BR_MPRED_CMPL), 136 GENERIC_EVENT_PTR(PM_LD_REF_L1), 137 GENERIC_EVENT_PTR(PM_LD_MISS_L1), 138 GENERIC_EVENT_PTR(MEM_LOADS), 139 GENERIC_EVENT_PTR(MEM_STORES), 140 CACHE_EVENT_PTR(PM_LD_MISS_L1), 141 CACHE_EVENT_PTR(PM_LD_REF_L1), 142 CACHE_EVENT_PTR(PM_LD_PREFETCH_CACHE_LINE_MISS), 143 CACHE_EVENT_PTR(PM_ST_MISS_L1), 144 CACHE_EVENT_PTR(PM_L1_ICACHE_MISS), 145 CACHE_EVENT_PTR(PM_INST_FROM_L1), 146 CACHE_EVENT_PTR(PM_IC_PREF_REQ), 147 CACHE_EVENT_PTR(PM_DATA_FROM_L3MISS), 148 CACHE_EVENT_PTR(PM_DATA_FROM_L3), 149 CACHE_EVENT_PTR(PM_BR_MPRED_CMPL), 150 CACHE_EVENT_PTR(PM_BR_CMPL), 151 CACHE_EVENT_PTR(PM_DTLB_MISS), 152 CACHE_EVENT_PTR(PM_ITLB_MISS), 153 NULL 154 }; 155 156 static struct attribute_group power10_pmu_events_group = { 157 .name = "events", 158 .attrs = power10_events_attr, 159 }; 160 161 PMU_FORMAT_ATTR(event, "config:0-59"); 162 PMU_FORMAT_ATTR(pmcxsel, "config:0-7"); 163 PMU_FORMAT_ATTR(mark, "config:8"); 164 PMU_FORMAT_ATTR(combine, "config:10-11"); 165 PMU_FORMAT_ATTR(unit, "config:12-15"); 166 PMU_FORMAT_ATTR(pmc, "config:16-19"); 167 PMU_FORMAT_ATTR(cache_sel, "config:20-21"); 168 PMU_FORMAT_ATTR(sdar_mode, "config:22-23"); 169 PMU_FORMAT_ATTR(sample_mode, "config:24-28"); 170 PMU_FORMAT_ATTR(thresh_sel, "config:29-31"); 171 PMU_FORMAT_ATTR(thresh_stop, "config:32-35"); 172 PMU_FORMAT_ATTR(thresh_start, "config:36-39"); 173 PMU_FORMAT_ATTR(l2l3_sel, "config:40-44"); 174 PMU_FORMAT_ATTR(src_sel, "config:45-46"); 175 PMU_FORMAT_ATTR(invert_bit, "config:47"); 176 PMU_FORMAT_ATTR(src_mask, "config:48-53"); 177 PMU_FORMAT_ATTR(src_match, "config:54-59"); 178 179 static struct attribute *power10_pmu_format_attr[] = { 180 &format_attr_event.attr, 181 &format_attr_pmcxsel.attr, 182 &format_attr_mark.attr, 183 &format_attr_combine.attr, 184 &format_attr_unit.attr, 185 &format_attr_pmc.attr, 186 &format_attr_cache_sel.attr, 187 &format_attr_sdar_mode.attr, 188 &format_attr_sample_mode.attr, 189 &format_attr_thresh_sel.attr, 190 &format_attr_thresh_stop.attr, 191 &format_attr_thresh_start.attr, 192 &format_attr_l2l3_sel.attr, 193 &format_attr_src_sel.attr, 194 &format_attr_invert_bit.attr, 195 &format_attr_src_mask.attr, 196 &format_attr_src_match.attr, 197 NULL, 198 }; 199 200 static struct attribute_group power10_pmu_format_group = { 201 .name = "format", 202 .attrs = power10_pmu_format_attr, 203 }; 204 205 static const struct attribute_group *power10_pmu_attr_groups[] = { 206 &power10_pmu_format_group, 207 &power10_pmu_events_group, 208 NULL, 209 }; 210 211 static int power10_generic_events[] = { 212 [PERF_COUNT_HW_CPU_CYCLES] = PM_RUN_CYC, 213 [PERF_COUNT_HW_INSTRUCTIONS] = PM_RUN_INST_CMPL, 214 [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = PM_BR_CMPL, 215 [PERF_COUNT_HW_BRANCH_MISSES] = PM_BR_MPRED_CMPL, 216 [PERF_COUNT_HW_CACHE_REFERENCES] = PM_LD_REF_L1, 217 [PERF_COUNT_HW_CACHE_MISSES] = PM_LD_MISS_L1, 218 }; 219 220 static u64 power10_bhrb_filter_map(u64 branch_sample_type) 221 { 222 u64 pmu_bhrb_filter = 0; 223 224 /* BHRB and regular PMU events share the same privilege state 225 * filter configuration. BHRB is always recorded along with a 226 * regular PMU event. As the privilege state filter is handled 227 * in the basic PMC configuration of the accompanying regular 228 * PMU event, we ignore any separate BHRB specific request. 229 */ 230 231 /* No branch filter requested */ 232 if (branch_sample_type & PERF_SAMPLE_BRANCH_ANY) 233 return pmu_bhrb_filter; 234 235 /* Invalid branch filter options - HW does not support */ 236 if (branch_sample_type & PERF_SAMPLE_BRANCH_ANY_RETURN) 237 return -1; 238 239 if (branch_sample_type & PERF_SAMPLE_BRANCH_IND_CALL) { 240 pmu_bhrb_filter |= POWER10_MMCRA_IFM2; 241 return pmu_bhrb_filter; 242 } 243 244 if (branch_sample_type & PERF_SAMPLE_BRANCH_COND) { 245 pmu_bhrb_filter |= POWER10_MMCRA_IFM3; 246 return pmu_bhrb_filter; 247 } 248 249 if (branch_sample_type & PERF_SAMPLE_BRANCH_CALL) 250 return -1; 251 252 if (branch_sample_type & PERF_SAMPLE_BRANCH_ANY_CALL) { 253 pmu_bhrb_filter |= POWER10_MMCRA_IFM1; 254 return pmu_bhrb_filter; 255 } 256 257 /* Every thing else is unsupported */ 258 return -1; 259 } 260 261 static void power10_config_bhrb(u64 pmu_bhrb_filter) 262 { 263 pmu_bhrb_filter &= POWER10_MMCRA_BHRB_MASK; 264 265 /* Enable BHRB filter in PMU */ 266 mtspr(SPRN_MMCRA, (mfspr(SPRN_MMCRA) | pmu_bhrb_filter)); 267 } 268 269 #define C(x) PERF_COUNT_HW_CACHE_##x 270 271 /* 272 * Table of generalized cache-related events. 273 * 0 means not supported, -1 means nonsensical, other values 274 * are event codes. 275 */ 276 static u64 power10_cache_events[C(MAX)][C(OP_MAX)][C(RESULT_MAX)] = { 277 [C(L1D)] = { 278 [C(OP_READ)] = { 279 [C(RESULT_ACCESS)] = PM_LD_REF_L1, 280 [C(RESULT_MISS)] = PM_LD_MISS_L1, 281 }, 282 [C(OP_WRITE)] = { 283 [C(RESULT_ACCESS)] = 0, 284 [C(RESULT_MISS)] = PM_ST_MISS_L1, 285 }, 286 [C(OP_PREFETCH)] = { 287 [C(RESULT_ACCESS)] = PM_LD_PREFETCH_CACHE_LINE_MISS, 288 [C(RESULT_MISS)] = 0, 289 }, 290 }, 291 [C(L1I)] = { 292 [C(OP_READ)] = { 293 [C(RESULT_ACCESS)] = PM_INST_FROM_L1, 294 [C(RESULT_MISS)] = PM_L1_ICACHE_MISS, 295 }, 296 [C(OP_WRITE)] = { 297 [C(RESULT_ACCESS)] = PM_INST_FROM_L1MISS, 298 [C(RESULT_MISS)] = -1, 299 }, 300 [C(OP_PREFETCH)] = { 301 [C(RESULT_ACCESS)] = PM_IC_PREF_REQ, 302 [C(RESULT_MISS)] = 0, 303 }, 304 }, 305 [C(LL)] = { 306 [C(OP_READ)] = { 307 [C(RESULT_ACCESS)] = PM_DATA_FROM_L3, 308 [C(RESULT_MISS)] = PM_DATA_FROM_L3MISS, 309 }, 310 [C(OP_WRITE)] = { 311 [C(RESULT_ACCESS)] = -1, 312 [C(RESULT_MISS)] = -1, 313 }, 314 [C(OP_PREFETCH)] = { 315 [C(RESULT_ACCESS)] = -1, 316 [C(RESULT_MISS)] = 0, 317 }, 318 }, 319 [C(DTLB)] = { 320 [C(OP_READ)] = { 321 [C(RESULT_ACCESS)] = 0, 322 [C(RESULT_MISS)] = PM_DTLB_MISS, 323 }, 324 [C(OP_WRITE)] = { 325 [C(RESULT_ACCESS)] = -1, 326 [C(RESULT_MISS)] = -1, 327 }, 328 [C(OP_PREFETCH)] = { 329 [C(RESULT_ACCESS)] = -1, 330 [C(RESULT_MISS)] = -1, 331 }, 332 }, 333 [C(ITLB)] = { 334 [C(OP_READ)] = { 335 [C(RESULT_ACCESS)] = 0, 336 [C(RESULT_MISS)] = PM_ITLB_MISS, 337 }, 338 [C(OP_WRITE)] = { 339 [C(RESULT_ACCESS)] = -1, 340 [C(RESULT_MISS)] = -1, 341 }, 342 [C(OP_PREFETCH)] = { 343 [C(RESULT_ACCESS)] = -1, 344 [C(RESULT_MISS)] = -1, 345 }, 346 }, 347 [C(BPU)] = { 348 [C(OP_READ)] = { 349 [C(RESULT_ACCESS)] = PM_BR_CMPL, 350 [C(RESULT_MISS)] = PM_BR_MPRED_CMPL, 351 }, 352 [C(OP_WRITE)] = { 353 [C(RESULT_ACCESS)] = -1, 354 [C(RESULT_MISS)] = -1, 355 }, 356 [C(OP_PREFETCH)] = { 357 [C(RESULT_ACCESS)] = -1, 358 [C(RESULT_MISS)] = -1, 359 }, 360 }, 361 [C(NODE)] = { 362 [C(OP_READ)] = { 363 [C(RESULT_ACCESS)] = -1, 364 [C(RESULT_MISS)] = -1, 365 }, 366 [C(OP_WRITE)] = { 367 [C(RESULT_ACCESS)] = -1, 368 [C(RESULT_MISS)] = -1, 369 }, 370 [C(OP_PREFETCH)] = { 371 [C(RESULT_ACCESS)] = -1, 372 [C(RESULT_MISS)] = -1, 373 }, 374 }, 375 }; 376 377 #undef C 378 379 static struct power_pmu power10_pmu = { 380 .name = "POWER10", 381 .n_counter = MAX_PMU_COUNTERS, 382 .add_fields = ISA207_ADD_FIELDS, 383 .test_adder = ISA207_TEST_ADDER, 384 .group_constraint_mask = CNST_CACHE_PMC4_MASK, 385 .group_constraint_val = CNST_CACHE_PMC4_VAL, 386 .compute_mmcr = isa207_compute_mmcr, 387 .config_bhrb = power10_config_bhrb, 388 .bhrb_filter_map = power10_bhrb_filter_map, 389 .get_constraint = isa207_get_constraint, 390 .get_alternatives = power10_get_alternatives, 391 .get_mem_data_src = isa207_get_mem_data_src, 392 .get_mem_weight = isa207_get_mem_weight, 393 .disable_pmc = isa207_disable_pmc, 394 .flags = PPMU_HAS_SIER | PPMU_ARCH_207S | 395 PPMU_ARCH_31, 396 .n_generic = ARRAY_SIZE(power10_generic_events), 397 .generic_events = power10_generic_events, 398 .cache_events = &power10_cache_events, 399 .attr_groups = power10_pmu_attr_groups, 400 .bhrb_nr = 32, 401 .capabilities = PERF_PMU_CAP_EXTENDED_REGS, 402 }; 403 404 int init_power10_pmu(void) 405 { 406 int rc; 407 408 /* Comes from cpu_specs[] */ 409 if (!cur_cpu_spec->oprofile_cpu_type || 410 strcmp(cur_cpu_spec->oprofile_cpu_type, "ppc64/power10")) 411 return -ENODEV; 412 413 /* Set the PERF_REG_EXTENDED_MASK here */ 414 PERF_REG_EXTENDED_MASK = PERF_REG_PMU_MASK_31; 415 416 rc = register_power_pmu(&power10_pmu); 417 if (rc) 418 return rc; 419 420 /* Tell userspace that EBB is supported */ 421 cur_cpu_spec->cpu_user_features2 |= PPC_FEATURE2_EBB; 422 423 return 0; 424 } 425