1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * Performance counter support for POWER8 processors. 4 * 5 * Copyright 2009 Paul Mackerras, IBM Corporation. 6 * Copyright 2013 Michael Ellerman, IBM Corporation. 7 */ 8 9 #define pr_fmt(fmt) "power8-pmu: " fmt 10 11 #include "isa207-common.h" 12 13 /* 14 * Some power8 event codes. 15 */ 16 #define EVENT(_name, _code) _name = _code, 17 18 enum { 19 #include "power8-events-list.h" 20 }; 21 22 #undef EVENT 23 24 /* MMCRA IFM bits - POWER8 */ 25 #define POWER8_MMCRA_IFM1 0x0000000040000000UL 26 #define POWER8_MMCRA_IFM2 0x0000000080000000UL 27 #define POWER8_MMCRA_IFM3 0x00000000C0000000UL 28 29 /* 30 * Raw event encoding for PowerISA v2.07 (Power8): 31 * 32 * 60 56 52 48 44 40 36 32 33 * | - - - - | - - - - | - - - - | - - - - | - - - - | - - - - | - - - - | - - - - | 34 * | | [ ] [ thresh_cmp ] [ thresh_ctl ] 35 * | | | | 36 * | | *- IFM (Linux) thresh start/stop OR FAB match -* 37 * | *- BHRB (Linux) 38 * *- EBB (Linux) 39 * 40 * 28 24 20 16 12 8 4 0 41 * | - - - - | - - - - | - - - - | - - - - | - - - - | - - - - | - - - - | - - - - | 42 * [ ] [ sample ] [cache] [ pmc ] [unit ] c m [ pmcxsel ] 43 * | | | | | 44 * | | | | *- mark 45 * | | *- L1/L2/L3 cache_sel | 46 * | | | 47 * | *- sampling mode for marked events *- combine 48 * | 49 * *- thresh_sel 50 * 51 * Below uses IBM bit numbering. 52 * 53 * MMCR1[x:y] = unit (PMCxUNIT) 54 * MMCR1[x] = combine (PMCxCOMB) 55 * 56 * if pmc == 3 and unit == 0 and pmcxsel[0:6] == 0b0101011 57 * # PM_MRK_FAB_RSP_MATCH 58 * MMCR1[20:27] = thresh_ctl (FAB_CRESP_MATCH / FAB_TYPE_MATCH) 59 * else if pmc == 4 and unit == 0xf and pmcxsel[0:6] == 0b0101001 60 * # PM_MRK_FAB_RSP_MATCH_CYC 61 * MMCR1[20:27] = thresh_ctl (FAB_CRESP_MATCH / FAB_TYPE_MATCH) 62 * else 63 * MMCRA[48:55] = thresh_ctl (THRESH START/END) 64 * 65 * if thresh_sel: 66 * MMCRA[45:47] = thresh_sel 67 * 68 * if thresh_cmp: 69 * MMCRA[22:24] = thresh_cmp[0:2] 70 * MMCRA[25:31] = thresh_cmp[3:9] 71 * 72 * if unit == 6 or unit == 7 73 * MMCRC[53:55] = cache_sel[1:3] (L2EVENT_SEL) 74 * else if unit == 8 or unit == 9: 75 * if cache_sel[0] == 0: # L3 bank 76 * MMCRC[47:49] = cache_sel[1:3] (L3EVENT_SEL0) 77 * else if cache_sel[0] == 1: 78 * MMCRC[50:51] = cache_sel[2:3] (L3EVENT_SEL1) 79 * else if cache_sel[1]: # L1 event 80 * MMCR1[16] = cache_sel[2] 81 * MMCR1[17] = cache_sel[3] 82 * 83 * if mark: 84 * MMCRA[63] = 1 (SAMPLE_ENABLE) 85 * MMCRA[57:59] = sample[0:2] (RAND_SAMP_ELIG) 86 * MMCRA[61:62] = sample[3:4] (RAND_SAMP_MODE) 87 * 88 * if EBB and BHRB: 89 * MMCRA[32:33] = IFM 90 * 91 */ 92 93 /* PowerISA v2.07 format attribute structure*/ 94 extern struct attribute_group isa207_pmu_format_group; 95 96 /* Table of alternatives, sorted by column 0 */ 97 static const unsigned int event_alternatives[][MAX_ALT] = { 98 { PM_MRK_ST_CMPL, PM_MRK_ST_CMPL_ALT }, 99 { PM_BR_MRK_2PATH, PM_BR_MRK_2PATH_ALT }, 100 { PM_L3_CO_MEPF, PM_L3_CO_MEPF_ALT }, 101 { PM_MRK_DATA_FROM_L2MISS, PM_MRK_DATA_FROM_L2MISS_ALT }, 102 { PM_CMPLU_STALL_ALT, PM_CMPLU_STALL }, 103 { PM_BR_2PATH, PM_BR_2PATH_ALT }, 104 { PM_INST_DISP, PM_INST_DISP_ALT }, 105 { PM_RUN_CYC_ALT, PM_RUN_CYC }, 106 { PM_MRK_FILT_MATCH, PM_MRK_FILT_MATCH_ALT }, 107 { PM_LD_MISS_L1, PM_LD_MISS_L1_ALT }, 108 { PM_RUN_INST_CMPL_ALT, PM_RUN_INST_CMPL }, 109 }; 110 111 static int power8_get_alternatives(u64 event, unsigned int flags, u64 alt[]) 112 { 113 int num_alt = 0; 114 115 num_alt = isa207_get_alternatives(event, alt, 116 ARRAY_SIZE(event_alternatives), flags, 117 event_alternatives); 118 119 return num_alt; 120 } 121 122 GENERIC_EVENT_ATTR(cpu-cycles, PM_CYC); 123 GENERIC_EVENT_ATTR(stalled-cycles-frontend, PM_GCT_NOSLOT_CYC); 124 GENERIC_EVENT_ATTR(stalled-cycles-backend, PM_CMPLU_STALL); 125 GENERIC_EVENT_ATTR(instructions, PM_INST_CMPL); 126 GENERIC_EVENT_ATTR(branch-instructions, PM_BRU_FIN); 127 GENERIC_EVENT_ATTR(branch-misses, PM_BR_MPRED_CMPL); 128 GENERIC_EVENT_ATTR(cache-references, PM_LD_REF_L1); 129 GENERIC_EVENT_ATTR(cache-misses, PM_LD_MISS_L1); 130 GENERIC_EVENT_ATTR(mem_access, MEM_ACCESS); 131 132 CACHE_EVENT_ATTR(L1-dcache-load-misses, PM_LD_MISS_L1); 133 CACHE_EVENT_ATTR(L1-dcache-loads, PM_LD_REF_L1); 134 135 CACHE_EVENT_ATTR(L1-dcache-prefetches, PM_L1_PREF); 136 CACHE_EVENT_ATTR(L1-dcache-store-misses, PM_ST_MISS_L1); 137 CACHE_EVENT_ATTR(L1-icache-load-misses, PM_L1_ICACHE_MISS); 138 CACHE_EVENT_ATTR(L1-icache-loads, PM_INST_FROM_L1); 139 CACHE_EVENT_ATTR(L1-icache-prefetches, PM_IC_PREF_WRITE); 140 141 CACHE_EVENT_ATTR(LLC-load-misses, PM_DATA_FROM_L3MISS); 142 CACHE_EVENT_ATTR(LLC-loads, PM_DATA_FROM_L3); 143 CACHE_EVENT_ATTR(LLC-prefetches, PM_L3_PREF_ALL); 144 CACHE_EVENT_ATTR(LLC-store-misses, PM_L2_ST_MISS); 145 CACHE_EVENT_ATTR(LLC-stores, PM_L2_ST); 146 147 CACHE_EVENT_ATTR(branch-load-misses, PM_BR_MPRED_CMPL); 148 CACHE_EVENT_ATTR(branch-loads, PM_BRU_FIN); 149 CACHE_EVENT_ATTR(dTLB-load-misses, PM_DTLB_MISS); 150 CACHE_EVENT_ATTR(iTLB-load-misses, PM_ITLB_MISS); 151 152 static struct attribute *power8_events_attr[] = { 153 GENERIC_EVENT_PTR(PM_CYC), 154 GENERIC_EVENT_PTR(PM_GCT_NOSLOT_CYC), 155 GENERIC_EVENT_PTR(PM_CMPLU_STALL), 156 GENERIC_EVENT_PTR(PM_INST_CMPL), 157 GENERIC_EVENT_PTR(PM_BRU_FIN), 158 GENERIC_EVENT_PTR(PM_BR_MPRED_CMPL), 159 GENERIC_EVENT_PTR(PM_LD_REF_L1), 160 GENERIC_EVENT_PTR(PM_LD_MISS_L1), 161 GENERIC_EVENT_PTR(MEM_ACCESS), 162 163 CACHE_EVENT_PTR(PM_LD_MISS_L1), 164 CACHE_EVENT_PTR(PM_LD_REF_L1), 165 CACHE_EVENT_PTR(PM_L1_PREF), 166 CACHE_EVENT_PTR(PM_ST_MISS_L1), 167 CACHE_EVENT_PTR(PM_L1_ICACHE_MISS), 168 CACHE_EVENT_PTR(PM_INST_FROM_L1), 169 CACHE_EVENT_PTR(PM_IC_PREF_WRITE), 170 CACHE_EVENT_PTR(PM_DATA_FROM_L3MISS), 171 CACHE_EVENT_PTR(PM_DATA_FROM_L3), 172 CACHE_EVENT_PTR(PM_L3_PREF_ALL), 173 CACHE_EVENT_PTR(PM_L2_ST_MISS), 174 CACHE_EVENT_PTR(PM_L2_ST), 175 176 CACHE_EVENT_PTR(PM_BR_MPRED_CMPL), 177 CACHE_EVENT_PTR(PM_BRU_FIN), 178 179 CACHE_EVENT_PTR(PM_DTLB_MISS), 180 CACHE_EVENT_PTR(PM_ITLB_MISS), 181 NULL 182 }; 183 184 static struct attribute_group power8_pmu_events_group = { 185 .name = "events", 186 .attrs = power8_events_attr, 187 }; 188 189 static const struct attribute_group *power8_pmu_attr_groups[] = { 190 &isa207_pmu_format_group, 191 &power8_pmu_events_group, 192 NULL, 193 }; 194 195 static int power8_generic_events[] = { 196 [PERF_COUNT_HW_CPU_CYCLES] = PM_CYC, 197 [PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = PM_GCT_NOSLOT_CYC, 198 [PERF_COUNT_HW_STALLED_CYCLES_BACKEND] = PM_CMPLU_STALL, 199 [PERF_COUNT_HW_INSTRUCTIONS] = PM_INST_CMPL, 200 [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = PM_BRU_FIN, 201 [PERF_COUNT_HW_BRANCH_MISSES] = PM_BR_MPRED_CMPL, 202 [PERF_COUNT_HW_CACHE_REFERENCES] = PM_LD_REF_L1, 203 [PERF_COUNT_HW_CACHE_MISSES] = PM_LD_MISS_L1, 204 }; 205 206 static u64 power8_bhrb_filter_map(u64 branch_sample_type) 207 { 208 u64 pmu_bhrb_filter = 0; 209 210 /* BHRB and regular PMU events share the same privilege state 211 * filter configuration. BHRB is always recorded along with a 212 * regular PMU event. As the privilege state filter is handled 213 * in the basic PMC configuration of the accompanying regular 214 * PMU event, we ignore any separate BHRB specific request. 215 */ 216 217 /* No branch filter requested */ 218 if (branch_sample_type & PERF_SAMPLE_BRANCH_ANY) 219 return pmu_bhrb_filter; 220 221 /* Invalid branch filter options - HW does not support */ 222 if (branch_sample_type & PERF_SAMPLE_BRANCH_ANY_RETURN) 223 return -1; 224 225 if (branch_sample_type & PERF_SAMPLE_BRANCH_IND_CALL) 226 return -1; 227 228 if (branch_sample_type & PERF_SAMPLE_BRANCH_CALL) 229 return -1; 230 231 if (branch_sample_type & PERF_SAMPLE_BRANCH_ANY_CALL) { 232 pmu_bhrb_filter |= POWER8_MMCRA_IFM1; 233 return pmu_bhrb_filter; 234 } 235 236 /* Every thing else is unsupported */ 237 return -1; 238 } 239 240 static void power8_config_bhrb(u64 pmu_bhrb_filter) 241 { 242 /* Enable BHRB filter in PMU */ 243 mtspr(SPRN_MMCRA, (mfspr(SPRN_MMCRA) | pmu_bhrb_filter)); 244 } 245 246 #define C(x) PERF_COUNT_HW_CACHE_##x 247 248 /* 249 * Table of generalized cache-related events. 250 * 0 means not supported, -1 means nonsensical, other values 251 * are event codes. 252 */ 253 static int power8_cache_events[C(MAX)][C(OP_MAX)][C(RESULT_MAX)] = { 254 [ C(L1D) ] = { 255 [ C(OP_READ) ] = { 256 [ C(RESULT_ACCESS) ] = PM_LD_REF_L1, 257 [ C(RESULT_MISS) ] = PM_LD_MISS_L1, 258 }, 259 [ C(OP_WRITE) ] = { 260 [ C(RESULT_ACCESS) ] = 0, 261 [ C(RESULT_MISS) ] = PM_ST_MISS_L1, 262 }, 263 [ C(OP_PREFETCH) ] = { 264 [ C(RESULT_ACCESS) ] = PM_L1_PREF, 265 [ C(RESULT_MISS) ] = 0, 266 }, 267 }, 268 [ C(L1I) ] = { 269 [ C(OP_READ) ] = { 270 [ C(RESULT_ACCESS) ] = PM_INST_FROM_L1, 271 [ C(RESULT_MISS) ] = PM_L1_ICACHE_MISS, 272 }, 273 [ C(OP_WRITE) ] = { 274 [ C(RESULT_ACCESS) ] = PM_L1_DEMAND_WRITE, 275 [ C(RESULT_MISS) ] = -1, 276 }, 277 [ C(OP_PREFETCH) ] = { 278 [ C(RESULT_ACCESS) ] = PM_IC_PREF_WRITE, 279 [ C(RESULT_MISS) ] = 0, 280 }, 281 }, 282 [ C(LL) ] = { 283 [ C(OP_READ) ] = { 284 [ C(RESULT_ACCESS) ] = PM_DATA_FROM_L3, 285 [ C(RESULT_MISS) ] = PM_DATA_FROM_L3MISS, 286 }, 287 [ C(OP_WRITE) ] = { 288 [ C(RESULT_ACCESS) ] = PM_L2_ST, 289 [ C(RESULT_MISS) ] = PM_L2_ST_MISS, 290 }, 291 [ C(OP_PREFETCH) ] = { 292 [ C(RESULT_ACCESS) ] = PM_L3_PREF_ALL, 293 [ C(RESULT_MISS) ] = 0, 294 }, 295 }, 296 [ C(DTLB) ] = { 297 [ C(OP_READ) ] = { 298 [ C(RESULT_ACCESS) ] = 0, 299 [ C(RESULT_MISS) ] = PM_DTLB_MISS, 300 }, 301 [ C(OP_WRITE) ] = { 302 [ C(RESULT_ACCESS) ] = -1, 303 [ C(RESULT_MISS) ] = -1, 304 }, 305 [ C(OP_PREFETCH) ] = { 306 [ C(RESULT_ACCESS) ] = -1, 307 [ C(RESULT_MISS) ] = -1, 308 }, 309 }, 310 [ C(ITLB) ] = { 311 [ C(OP_READ) ] = { 312 [ C(RESULT_ACCESS) ] = 0, 313 [ C(RESULT_MISS) ] = PM_ITLB_MISS, 314 }, 315 [ C(OP_WRITE) ] = { 316 [ C(RESULT_ACCESS) ] = -1, 317 [ C(RESULT_MISS) ] = -1, 318 }, 319 [ C(OP_PREFETCH) ] = { 320 [ C(RESULT_ACCESS) ] = -1, 321 [ C(RESULT_MISS) ] = -1, 322 }, 323 }, 324 [ C(BPU) ] = { 325 [ C(OP_READ) ] = { 326 [ C(RESULT_ACCESS) ] = PM_BRU_FIN, 327 [ C(RESULT_MISS) ] = PM_BR_MPRED_CMPL, 328 }, 329 [ C(OP_WRITE) ] = { 330 [ C(RESULT_ACCESS) ] = -1, 331 [ C(RESULT_MISS) ] = -1, 332 }, 333 [ C(OP_PREFETCH) ] = { 334 [ C(RESULT_ACCESS) ] = -1, 335 [ C(RESULT_MISS) ] = -1, 336 }, 337 }, 338 [ C(NODE) ] = { 339 [ C(OP_READ) ] = { 340 [ C(RESULT_ACCESS) ] = -1, 341 [ C(RESULT_MISS) ] = -1, 342 }, 343 [ C(OP_WRITE) ] = { 344 [ C(RESULT_ACCESS) ] = -1, 345 [ C(RESULT_MISS) ] = -1, 346 }, 347 [ C(OP_PREFETCH) ] = { 348 [ C(RESULT_ACCESS) ] = -1, 349 [ C(RESULT_MISS) ] = -1, 350 }, 351 }, 352 }; 353 354 #undef C 355 356 static struct power_pmu power8_pmu = { 357 .name = "POWER8", 358 .n_counter = MAX_PMU_COUNTERS, 359 .max_alternatives = MAX_ALT + 1, 360 .add_fields = ISA207_ADD_FIELDS, 361 .test_adder = ISA207_TEST_ADDER, 362 .compute_mmcr = isa207_compute_mmcr, 363 .config_bhrb = power8_config_bhrb, 364 .bhrb_filter_map = power8_bhrb_filter_map, 365 .get_constraint = isa207_get_constraint, 366 .get_alternatives = power8_get_alternatives, 367 .get_mem_data_src = isa207_get_mem_data_src, 368 .get_mem_weight = isa207_get_mem_weight, 369 .disable_pmc = isa207_disable_pmc, 370 .flags = PPMU_HAS_SIER | PPMU_ARCH_207S, 371 .n_generic = ARRAY_SIZE(power8_generic_events), 372 .generic_events = power8_generic_events, 373 .cache_events = &power8_cache_events, 374 .attr_groups = power8_pmu_attr_groups, 375 .bhrb_nr = 32, 376 }; 377 378 int init_power8_pmu(void) 379 { 380 int rc; 381 382 if (!cur_cpu_spec->oprofile_cpu_type || 383 strcmp(cur_cpu_spec->oprofile_cpu_type, "ppc64/power8")) 384 return -ENODEV; 385 386 rc = register_power_pmu(&power8_pmu); 387 if (rc) 388 return rc; 389 390 /* Tell userspace that EBB is supported */ 391 cur_cpu_spec->cpu_user_features2 |= PPC_FEATURE2_EBB; 392 393 if (cpu_has_feature(CPU_FTR_PMAO_BUG)) 394 pr_info("PMAO restore workaround active.\n"); 395 396 return 0; 397 } 398