1 /* 2 * Performance counter support for POWER8 processors. 3 * 4 * Copyright 2009 Paul Mackerras, IBM Corporation. 5 * Copyright 2013 Michael Ellerman, IBM Corporation. 6 * 7 * This program is free software; you can redistribute it and/or 8 * modify it under the terms of the GNU General Public License 9 * as published by the Free Software Foundation; either version 10 * 2 of the License, or (at your option) any later version. 11 */ 12 13 #define pr_fmt(fmt) "power8-pmu: " fmt 14 15 #include "isa207-common.h" 16 17 /* 18 * Some power8 event codes. 19 */ 20 #define EVENT(_name, _code) _name = _code, 21 22 enum { 23 #include "power8-events-list.h" 24 }; 25 26 #undef EVENT 27 28 /* MMCRA IFM bits - POWER8 */ 29 #define POWER8_MMCRA_IFM1 0x0000000040000000UL 30 #define POWER8_MMCRA_IFM2 0x0000000080000000UL 31 #define POWER8_MMCRA_IFM3 0x00000000C0000000UL 32 33 /* 34 * Raw event encoding for PowerISA v2.07 (Power8): 35 * 36 * 60 56 52 48 44 40 36 32 37 * | - - - - | - - - - | - - - - | - - - - | - - - - | - - - - | - - - - | - - - - | 38 * | | [ ] [ thresh_cmp ] [ thresh_ctl ] 39 * | | | | 40 * | | *- IFM (Linux) thresh start/stop OR FAB match -* 41 * | *- BHRB (Linux) 42 * *- EBB (Linux) 43 * 44 * 28 24 20 16 12 8 4 0 45 * | - - - - | - - - - | - - - - | - - - - | - - - - | - - - - | - - - - | - - - - | 46 * [ ] [ sample ] [cache] [ pmc ] [unit ] c m [ pmcxsel ] 47 * | | | | | 48 * | | | | *- mark 49 * | | *- L1/L2/L3 cache_sel | 50 * | | | 51 * | *- sampling mode for marked events *- combine 52 * | 53 * *- thresh_sel 54 * 55 * Below uses IBM bit numbering. 56 * 57 * MMCR1[x:y] = unit (PMCxUNIT) 58 * MMCR1[x] = combine (PMCxCOMB) 59 * 60 * if pmc == 3 and unit == 0 and pmcxsel[0:6] == 0b0101011 61 * # PM_MRK_FAB_RSP_MATCH 62 * MMCR1[20:27] = thresh_ctl (FAB_CRESP_MATCH / FAB_TYPE_MATCH) 63 * else if pmc == 4 and unit == 0xf and pmcxsel[0:6] == 0b0101001 64 * # PM_MRK_FAB_RSP_MATCH_CYC 65 * MMCR1[20:27] = thresh_ctl (FAB_CRESP_MATCH / FAB_TYPE_MATCH) 66 * else 67 * MMCRA[48:55] = thresh_ctl (THRESH START/END) 68 * 69 * if thresh_sel: 70 * MMCRA[45:47] = thresh_sel 71 * 72 * if thresh_cmp: 73 * MMCRA[22:24] = thresh_cmp[0:2] 74 * MMCRA[25:31] = thresh_cmp[3:9] 75 * 76 * if unit == 6 or unit == 7 77 * MMCRC[53:55] = cache_sel[1:3] (L2EVENT_SEL) 78 * else if unit == 8 or unit == 9: 79 * if cache_sel[0] == 0: # L3 bank 80 * MMCRC[47:49] = cache_sel[1:3] (L3EVENT_SEL0) 81 * else if cache_sel[0] == 1: 82 * MMCRC[50:51] = cache_sel[2:3] (L3EVENT_SEL1) 83 * else if cache_sel[1]: # L1 event 84 * MMCR1[16] = cache_sel[2] 85 * MMCR1[17] = cache_sel[3] 86 * 87 * if mark: 88 * MMCRA[63] = 1 (SAMPLE_ENABLE) 89 * MMCRA[57:59] = sample[0:2] (RAND_SAMP_ELIG) 90 * MMCRA[61:62] = sample[3:4] (RAND_SAMP_MODE) 91 * 92 * if EBB and BHRB: 93 * MMCRA[32:33] = IFM 94 * 95 */ 96 97 /* PowerISA v2.07 format attribute structure*/ 98 extern struct attribute_group isa207_pmu_format_group; 99 100 /* Table of alternatives, sorted by column 0 */ 101 static const unsigned int event_alternatives[][MAX_ALT] = { 102 { PM_MRK_ST_CMPL, PM_MRK_ST_CMPL_ALT }, 103 { PM_BR_MRK_2PATH, PM_BR_MRK_2PATH_ALT }, 104 { PM_L3_CO_MEPF, PM_L3_CO_MEPF_ALT }, 105 { PM_MRK_DATA_FROM_L2MISS, PM_MRK_DATA_FROM_L2MISS_ALT }, 106 { PM_CMPLU_STALL_ALT, PM_CMPLU_STALL }, 107 { PM_BR_2PATH, PM_BR_2PATH_ALT }, 108 { PM_INST_DISP, PM_INST_DISP_ALT }, 109 { PM_RUN_CYC_ALT, PM_RUN_CYC }, 110 { PM_MRK_FILT_MATCH, PM_MRK_FILT_MATCH_ALT }, 111 { PM_LD_MISS_L1, PM_LD_MISS_L1_ALT }, 112 { PM_RUN_INST_CMPL_ALT, PM_RUN_INST_CMPL }, 113 }; 114 115 static int power8_get_alternatives(u64 event, unsigned int flags, u64 alt[]) 116 { 117 int num_alt = 0; 118 119 num_alt = isa207_get_alternatives(event, alt, 120 ARRAY_SIZE(event_alternatives), flags, 121 event_alternatives); 122 123 return num_alt; 124 } 125 126 GENERIC_EVENT_ATTR(cpu-cycles, PM_CYC); 127 GENERIC_EVENT_ATTR(stalled-cycles-frontend, PM_GCT_NOSLOT_CYC); 128 GENERIC_EVENT_ATTR(stalled-cycles-backend, PM_CMPLU_STALL); 129 GENERIC_EVENT_ATTR(instructions, PM_INST_CMPL); 130 GENERIC_EVENT_ATTR(branch-instructions, PM_BRU_FIN); 131 GENERIC_EVENT_ATTR(branch-misses, PM_BR_MPRED_CMPL); 132 GENERIC_EVENT_ATTR(cache-references, PM_LD_REF_L1); 133 GENERIC_EVENT_ATTR(cache-misses, PM_LD_MISS_L1); 134 GENERIC_EVENT_ATTR(mem_access, MEM_ACCESS); 135 136 CACHE_EVENT_ATTR(L1-dcache-load-misses, PM_LD_MISS_L1); 137 CACHE_EVENT_ATTR(L1-dcache-loads, PM_LD_REF_L1); 138 139 CACHE_EVENT_ATTR(L1-dcache-prefetches, PM_L1_PREF); 140 CACHE_EVENT_ATTR(L1-dcache-store-misses, PM_ST_MISS_L1); 141 CACHE_EVENT_ATTR(L1-icache-load-misses, PM_L1_ICACHE_MISS); 142 CACHE_EVENT_ATTR(L1-icache-loads, PM_INST_FROM_L1); 143 CACHE_EVENT_ATTR(L1-icache-prefetches, PM_IC_PREF_WRITE); 144 145 CACHE_EVENT_ATTR(LLC-load-misses, PM_DATA_FROM_L3MISS); 146 CACHE_EVENT_ATTR(LLC-loads, PM_DATA_FROM_L3); 147 CACHE_EVENT_ATTR(LLC-prefetches, PM_L3_PREF_ALL); 148 CACHE_EVENT_ATTR(LLC-store-misses, PM_L2_ST_MISS); 149 CACHE_EVENT_ATTR(LLC-stores, PM_L2_ST); 150 151 CACHE_EVENT_ATTR(branch-load-misses, PM_BR_MPRED_CMPL); 152 CACHE_EVENT_ATTR(branch-loads, PM_BRU_FIN); 153 CACHE_EVENT_ATTR(dTLB-load-misses, PM_DTLB_MISS); 154 CACHE_EVENT_ATTR(iTLB-load-misses, PM_ITLB_MISS); 155 156 static struct attribute *power8_events_attr[] = { 157 GENERIC_EVENT_PTR(PM_CYC), 158 GENERIC_EVENT_PTR(PM_GCT_NOSLOT_CYC), 159 GENERIC_EVENT_PTR(PM_CMPLU_STALL), 160 GENERIC_EVENT_PTR(PM_INST_CMPL), 161 GENERIC_EVENT_PTR(PM_BRU_FIN), 162 GENERIC_EVENT_PTR(PM_BR_MPRED_CMPL), 163 GENERIC_EVENT_PTR(PM_LD_REF_L1), 164 GENERIC_EVENT_PTR(PM_LD_MISS_L1), 165 GENERIC_EVENT_PTR(MEM_ACCESS), 166 167 CACHE_EVENT_PTR(PM_LD_MISS_L1), 168 CACHE_EVENT_PTR(PM_LD_REF_L1), 169 CACHE_EVENT_PTR(PM_L1_PREF), 170 CACHE_EVENT_PTR(PM_ST_MISS_L1), 171 CACHE_EVENT_PTR(PM_L1_ICACHE_MISS), 172 CACHE_EVENT_PTR(PM_INST_FROM_L1), 173 CACHE_EVENT_PTR(PM_IC_PREF_WRITE), 174 CACHE_EVENT_PTR(PM_DATA_FROM_L3MISS), 175 CACHE_EVENT_PTR(PM_DATA_FROM_L3), 176 CACHE_EVENT_PTR(PM_L3_PREF_ALL), 177 CACHE_EVENT_PTR(PM_L2_ST_MISS), 178 CACHE_EVENT_PTR(PM_L2_ST), 179 180 CACHE_EVENT_PTR(PM_BR_MPRED_CMPL), 181 CACHE_EVENT_PTR(PM_BRU_FIN), 182 183 CACHE_EVENT_PTR(PM_DTLB_MISS), 184 CACHE_EVENT_PTR(PM_ITLB_MISS), 185 NULL 186 }; 187 188 static struct attribute_group power8_pmu_events_group = { 189 .name = "events", 190 .attrs = power8_events_attr, 191 }; 192 193 static const struct attribute_group *power8_pmu_attr_groups[] = { 194 &isa207_pmu_format_group, 195 &power8_pmu_events_group, 196 NULL, 197 }; 198 199 static int power8_generic_events[] = { 200 [PERF_COUNT_HW_CPU_CYCLES] = PM_CYC, 201 [PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = PM_GCT_NOSLOT_CYC, 202 [PERF_COUNT_HW_STALLED_CYCLES_BACKEND] = PM_CMPLU_STALL, 203 [PERF_COUNT_HW_INSTRUCTIONS] = PM_INST_CMPL, 204 [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = PM_BRU_FIN, 205 [PERF_COUNT_HW_BRANCH_MISSES] = PM_BR_MPRED_CMPL, 206 [PERF_COUNT_HW_CACHE_REFERENCES] = PM_LD_REF_L1, 207 [PERF_COUNT_HW_CACHE_MISSES] = PM_LD_MISS_L1, 208 }; 209 210 static u64 power8_bhrb_filter_map(u64 branch_sample_type) 211 { 212 u64 pmu_bhrb_filter = 0; 213 214 /* BHRB and regular PMU events share the same privilege state 215 * filter configuration. BHRB is always recorded along with a 216 * regular PMU event. As the privilege state filter is handled 217 * in the basic PMC configuration of the accompanying regular 218 * PMU event, we ignore any separate BHRB specific request. 219 */ 220 221 /* No branch filter requested */ 222 if (branch_sample_type & PERF_SAMPLE_BRANCH_ANY) 223 return pmu_bhrb_filter; 224 225 /* Invalid branch filter options - HW does not support */ 226 if (branch_sample_type & PERF_SAMPLE_BRANCH_ANY_RETURN) 227 return -1; 228 229 if (branch_sample_type & PERF_SAMPLE_BRANCH_IND_CALL) 230 return -1; 231 232 if (branch_sample_type & PERF_SAMPLE_BRANCH_CALL) 233 return -1; 234 235 if (branch_sample_type & PERF_SAMPLE_BRANCH_ANY_CALL) { 236 pmu_bhrb_filter |= POWER8_MMCRA_IFM1; 237 return pmu_bhrb_filter; 238 } 239 240 /* Every thing else is unsupported */ 241 return -1; 242 } 243 244 static void power8_config_bhrb(u64 pmu_bhrb_filter) 245 { 246 /* Enable BHRB filter in PMU */ 247 mtspr(SPRN_MMCRA, (mfspr(SPRN_MMCRA) | pmu_bhrb_filter)); 248 } 249 250 #define C(x) PERF_COUNT_HW_CACHE_##x 251 252 /* 253 * Table of generalized cache-related events. 254 * 0 means not supported, -1 means nonsensical, other values 255 * are event codes. 256 */ 257 static int power8_cache_events[C(MAX)][C(OP_MAX)][C(RESULT_MAX)] = { 258 [ C(L1D) ] = { 259 [ C(OP_READ) ] = { 260 [ C(RESULT_ACCESS) ] = PM_LD_REF_L1, 261 [ C(RESULT_MISS) ] = PM_LD_MISS_L1, 262 }, 263 [ C(OP_WRITE) ] = { 264 [ C(RESULT_ACCESS) ] = 0, 265 [ C(RESULT_MISS) ] = PM_ST_MISS_L1, 266 }, 267 [ C(OP_PREFETCH) ] = { 268 [ C(RESULT_ACCESS) ] = PM_L1_PREF, 269 [ C(RESULT_MISS) ] = 0, 270 }, 271 }, 272 [ C(L1I) ] = { 273 [ C(OP_READ) ] = { 274 [ C(RESULT_ACCESS) ] = PM_INST_FROM_L1, 275 [ C(RESULT_MISS) ] = PM_L1_ICACHE_MISS, 276 }, 277 [ C(OP_WRITE) ] = { 278 [ C(RESULT_ACCESS) ] = PM_L1_DEMAND_WRITE, 279 [ C(RESULT_MISS) ] = -1, 280 }, 281 [ C(OP_PREFETCH) ] = { 282 [ C(RESULT_ACCESS) ] = PM_IC_PREF_WRITE, 283 [ C(RESULT_MISS) ] = 0, 284 }, 285 }, 286 [ C(LL) ] = { 287 [ C(OP_READ) ] = { 288 [ C(RESULT_ACCESS) ] = PM_DATA_FROM_L3, 289 [ C(RESULT_MISS) ] = PM_DATA_FROM_L3MISS, 290 }, 291 [ C(OP_WRITE) ] = { 292 [ C(RESULT_ACCESS) ] = PM_L2_ST, 293 [ C(RESULT_MISS) ] = PM_L2_ST_MISS, 294 }, 295 [ C(OP_PREFETCH) ] = { 296 [ C(RESULT_ACCESS) ] = PM_L3_PREF_ALL, 297 [ C(RESULT_MISS) ] = 0, 298 }, 299 }, 300 [ C(DTLB) ] = { 301 [ C(OP_READ) ] = { 302 [ C(RESULT_ACCESS) ] = 0, 303 [ C(RESULT_MISS) ] = PM_DTLB_MISS, 304 }, 305 [ C(OP_WRITE) ] = { 306 [ C(RESULT_ACCESS) ] = -1, 307 [ C(RESULT_MISS) ] = -1, 308 }, 309 [ C(OP_PREFETCH) ] = { 310 [ C(RESULT_ACCESS) ] = -1, 311 [ C(RESULT_MISS) ] = -1, 312 }, 313 }, 314 [ C(ITLB) ] = { 315 [ C(OP_READ) ] = { 316 [ C(RESULT_ACCESS) ] = 0, 317 [ C(RESULT_MISS) ] = PM_ITLB_MISS, 318 }, 319 [ C(OP_WRITE) ] = { 320 [ C(RESULT_ACCESS) ] = -1, 321 [ C(RESULT_MISS) ] = -1, 322 }, 323 [ C(OP_PREFETCH) ] = { 324 [ C(RESULT_ACCESS) ] = -1, 325 [ C(RESULT_MISS) ] = -1, 326 }, 327 }, 328 [ C(BPU) ] = { 329 [ C(OP_READ) ] = { 330 [ C(RESULT_ACCESS) ] = PM_BRU_FIN, 331 [ C(RESULT_MISS) ] = PM_BR_MPRED_CMPL, 332 }, 333 [ C(OP_WRITE) ] = { 334 [ C(RESULT_ACCESS) ] = -1, 335 [ C(RESULT_MISS) ] = -1, 336 }, 337 [ C(OP_PREFETCH) ] = { 338 [ C(RESULT_ACCESS) ] = -1, 339 [ C(RESULT_MISS) ] = -1, 340 }, 341 }, 342 [ C(NODE) ] = { 343 [ C(OP_READ) ] = { 344 [ C(RESULT_ACCESS) ] = -1, 345 [ C(RESULT_MISS) ] = -1, 346 }, 347 [ C(OP_WRITE) ] = { 348 [ C(RESULT_ACCESS) ] = -1, 349 [ C(RESULT_MISS) ] = -1, 350 }, 351 [ C(OP_PREFETCH) ] = { 352 [ C(RESULT_ACCESS) ] = -1, 353 [ C(RESULT_MISS) ] = -1, 354 }, 355 }, 356 }; 357 358 #undef C 359 360 static struct power_pmu power8_pmu = { 361 .name = "POWER8", 362 .n_counter = MAX_PMU_COUNTERS, 363 .max_alternatives = MAX_ALT + 1, 364 .add_fields = ISA207_ADD_FIELDS, 365 .test_adder = ISA207_TEST_ADDER, 366 .compute_mmcr = isa207_compute_mmcr, 367 .config_bhrb = power8_config_bhrb, 368 .bhrb_filter_map = power8_bhrb_filter_map, 369 .get_constraint = isa207_get_constraint, 370 .get_alternatives = power8_get_alternatives, 371 .get_mem_data_src = isa207_get_mem_data_src, 372 .get_mem_weight = isa207_get_mem_weight, 373 .disable_pmc = isa207_disable_pmc, 374 .flags = PPMU_HAS_SIER | PPMU_ARCH_207S, 375 .n_generic = ARRAY_SIZE(power8_generic_events), 376 .generic_events = power8_generic_events, 377 .cache_events = &power8_cache_events, 378 .attr_groups = power8_pmu_attr_groups, 379 .bhrb_nr = 32, 380 }; 381 382 int init_power8_pmu(void) 383 { 384 int rc; 385 386 if (!cur_cpu_spec->oprofile_cpu_type || 387 strcmp(cur_cpu_spec->oprofile_cpu_type, "ppc64/power8")) 388 return -ENODEV; 389 390 rc = register_power_pmu(&power8_pmu); 391 if (rc) 392 return rc; 393 394 /* Tell userspace that EBB is supported */ 395 cur_cpu_spec->cpu_user_features2 |= PPC_FEATURE2_EBB; 396 397 if (cpu_has_feature(CPU_FTR_PMAO_BUG)) 398 pr_info("PMAO restore workaround active.\n"); 399 400 return 0; 401 } 402