1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * RISC-V performance counter support. 4 * 5 * Copyright (C) 2021 Western Digital Corporation or its affiliates. 6 * 7 * This code is based on ARM perf event code which is in turn based on 8 * sparc64 and x86 code. 9 */ 10 11 #define pr_fmt(fmt) "riscv-pmu-sbi: " fmt 12 13 #include <linux/mod_devicetable.h> 14 #include <linux/perf/riscv_pmu.h> 15 #include <linux/platform_device.h> 16 #include <linux/irq.h> 17 #include <linux/irqdomain.h> 18 #include <linux/of_irq.h> 19 #include <linux/of.h> 20 #include <linux/cpu_pm.h> 21 #include <linux/sched/clock.h> 22 23 #include <asm/errata_list.h> 24 #include <asm/sbi.h> 25 #include <asm/hwcap.h> 26 27 PMU_FORMAT_ATTR(event, "config:0-47"); 28 PMU_FORMAT_ATTR(firmware, "config:63"); 29 30 static struct attribute *riscv_arch_formats_attr[] = { 31 &format_attr_event.attr, 32 &format_attr_firmware.attr, 33 NULL, 34 }; 35 36 static struct attribute_group riscv_pmu_format_group = { 37 .name = "format", 38 .attrs = riscv_arch_formats_attr, 39 }; 40 41 static const struct attribute_group *riscv_pmu_attr_groups[] = { 42 &riscv_pmu_format_group, 43 NULL, 44 }; 45 46 /* 47 * RISC-V doesn't have heterogeneous harts yet. This need to be part of 48 * per_cpu in case of harts with different pmu counters 49 */ 50 static union sbi_pmu_ctr_info *pmu_ctr_list; 51 static bool riscv_pmu_use_irq; 52 static unsigned int riscv_pmu_irq_num; 53 static unsigned int riscv_pmu_irq; 54 55 /* Cache the available counters in a bitmask */ 56 static unsigned long cmask; 57 58 struct sbi_pmu_event_data { 59 union { 60 union { 61 struct hw_gen_event { 62 uint32_t event_code:16; 63 uint32_t event_type:4; 64 uint32_t reserved:12; 65 } hw_gen_event; 66 struct hw_cache_event { 67 uint32_t result_id:1; 68 uint32_t op_id:2; 69 uint32_t cache_id:13; 70 uint32_t event_type:4; 71 uint32_t reserved:12; 72 } hw_cache_event; 73 }; 74 uint32_t event_idx; 75 }; 76 }; 77 78 static const struct sbi_pmu_event_data pmu_hw_event_map[] = { 79 [PERF_COUNT_HW_CPU_CYCLES] = {.hw_gen_event = { 80 SBI_PMU_HW_CPU_CYCLES, 81 SBI_PMU_EVENT_TYPE_HW, 0}}, 82 [PERF_COUNT_HW_INSTRUCTIONS] = {.hw_gen_event = { 83 SBI_PMU_HW_INSTRUCTIONS, 84 SBI_PMU_EVENT_TYPE_HW, 0}}, 85 [PERF_COUNT_HW_CACHE_REFERENCES] = {.hw_gen_event = { 86 SBI_PMU_HW_CACHE_REFERENCES, 87 SBI_PMU_EVENT_TYPE_HW, 0}}, 88 [PERF_COUNT_HW_CACHE_MISSES] = {.hw_gen_event = { 89 SBI_PMU_HW_CACHE_MISSES, 90 SBI_PMU_EVENT_TYPE_HW, 0}}, 91 [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = {.hw_gen_event = { 92 SBI_PMU_HW_BRANCH_INSTRUCTIONS, 93 SBI_PMU_EVENT_TYPE_HW, 0}}, 94 [PERF_COUNT_HW_BRANCH_MISSES] = {.hw_gen_event = { 95 SBI_PMU_HW_BRANCH_MISSES, 96 SBI_PMU_EVENT_TYPE_HW, 0}}, 97 [PERF_COUNT_HW_BUS_CYCLES] = {.hw_gen_event = { 98 SBI_PMU_HW_BUS_CYCLES, 99 SBI_PMU_EVENT_TYPE_HW, 0}}, 100 [PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = {.hw_gen_event = { 101 SBI_PMU_HW_STALLED_CYCLES_FRONTEND, 102 SBI_PMU_EVENT_TYPE_HW, 0}}, 103 [PERF_COUNT_HW_STALLED_CYCLES_BACKEND] = {.hw_gen_event = { 104 SBI_PMU_HW_STALLED_CYCLES_BACKEND, 105 SBI_PMU_EVENT_TYPE_HW, 0}}, 106 [PERF_COUNT_HW_REF_CPU_CYCLES] = {.hw_gen_event = { 107 SBI_PMU_HW_REF_CPU_CYCLES, 108 SBI_PMU_EVENT_TYPE_HW, 0}}, 109 }; 110 111 #define C(x) PERF_COUNT_HW_CACHE_##x 112 static const struct sbi_pmu_event_data pmu_cache_event_map[PERF_COUNT_HW_CACHE_MAX] 113 [PERF_COUNT_HW_CACHE_OP_MAX] 114 [PERF_COUNT_HW_CACHE_RESULT_MAX] = { 115 [C(L1D)] = { 116 [C(OP_READ)] = { 117 [C(RESULT_ACCESS)] = {.hw_cache_event = {C(RESULT_ACCESS), 118 C(OP_READ), C(L1D), SBI_PMU_EVENT_TYPE_CACHE, 0}}, 119 [C(RESULT_MISS)] = {.hw_cache_event = {C(RESULT_MISS), 120 C(OP_READ), C(L1D), SBI_PMU_EVENT_TYPE_CACHE, 0}}, 121 }, 122 [C(OP_WRITE)] = { 123 [C(RESULT_ACCESS)] = {.hw_cache_event = {C(RESULT_ACCESS), 124 C(OP_WRITE), C(L1D), SBI_PMU_EVENT_TYPE_CACHE, 0}}, 125 [C(RESULT_MISS)] = {.hw_cache_event = {C(RESULT_MISS), 126 C(OP_WRITE), C(L1D), SBI_PMU_EVENT_TYPE_CACHE, 0}}, 127 }, 128 [C(OP_PREFETCH)] = { 129 [C(RESULT_ACCESS)] = {.hw_cache_event = {C(RESULT_ACCESS), 130 C(OP_PREFETCH), C(L1D), SBI_PMU_EVENT_TYPE_CACHE, 0}}, 131 [C(RESULT_MISS)] = {.hw_cache_event = {C(RESULT_MISS), 132 C(OP_PREFETCH), C(L1D), SBI_PMU_EVENT_TYPE_CACHE, 0}}, 133 }, 134 }, 135 [C(L1I)] = { 136 [C(OP_READ)] = { 137 [C(RESULT_ACCESS)] = {.hw_cache_event = {C(RESULT_ACCESS), 138 C(OP_READ), C(L1I), SBI_PMU_EVENT_TYPE_CACHE, 0}}, 139 [C(RESULT_MISS)] = {.hw_cache_event = {C(RESULT_MISS), C(OP_READ), 140 C(L1I), SBI_PMU_EVENT_TYPE_CACHE, 0}}, 141 }, 142 [C(OP_WRITE)] = { 143 [C(RESULT_ACCESS)] = {.hw_cache_event = {C(RESULT_ACCESS), 144 C(OP_WRITE), C(L1I), SBI_PMU_EVENT_TYPE_CACHE, 0}}, 145 [C(RESULT_MISS)] = {.hw_cache_event = {C(RESULT_MISS), 146 C(OP_WRITE), C(L1I), SBI_PMU_EVENT_TYPE_CACHE, 0}}, 147 }, 148 [C(OP_PREFETCH)] = { 149 [C(RESULT_ACCESS)] = {.hw_cache_event = {C(RESULT_ACCESS), 150 C(OP_PREFETCH), C(L1I), SBI_PMU_EVENT_TYPE_CACHE, 0}}, 151 [C(RESULT_MISS)] = {.hw_cache_event = {C(RESULT_MISS), 152 C(OP_PREFETCH), C(L1I), SBI_PMU_EVENT_TYPE_CACHE, 0}}, 153 }, 154 }, 155 [C(LL)] = { 156 [C(OP_READ)] = { 157 [C(RESULT_ACCESS)] = {.hw_cache_event = {C(RESULT_ACCESS), 158 C(OP_READ), C(LL), SBI_PMU_EVENT_TYPE_CACHE, 0}}, 159 [C(RESULT_MISS)] = {.hw_cache_event = {C(RESULT_MISS), 160 C(OP_READ), C(LL), SBI_PMU_EVENT_TYPE_CACHE, 0}}, 161 }, 162 [C(OP_WRITE)] = { 163 [C(RESULT_ACCESS)] = {.hw_cache_event = {C(RESULT_ACCESS), 164 C(OP_WRITE), C(LL), SBI_PMU_EVENT_TYPE_CACHE, 0}}, 165 [C(RESULT_MISS)] = {.hw_cache_event = {C(RESULT_MISS), 166 C(OP_WRITE), C(LL), SBI_PMU_EVENT_TYPE_CACHE, 0}}, 167 }, 168 [C(OP_PREFETCH)] = { 169 [C(RESULT_ACCESS)] = {.hw_cache_event = {C(RESULT_ACCESS), 170 C(OP_PREFETCH), C(LL), SBI_PMU_EVENT_TYPE_CACHE, 0}}, 171 [C(RESULT_MISS)] = {.hw_cache_event = {C(RESULT_MISS), 172 C(OP_PREFETCH), C(LL), SBI_PMU_EVENT_TYPE_CACHE, 0}}, 173 }, 174 }, 175 [C(DTLB)] = { 176 [C(OP_READ)] = { 177 [C(RESULT_ACCESS)] = {.hw_cache_event = {C(RESULT_ACCESS), 178 C(OP_READ), C(DTLB), SBI_PMU_EVENT_TYPE_CACHE, 0}}, 179 [C(RESULT_MISS)] = {.hw_cache_event = {C(RESULT_MISS), 180 C(OP_READ), C(DTLB), SBI_PMU_EVENT_TYPE_CACHE, 0}}, 181 }, 182 [C(OP_WRITE)] = { 183 [C(RESULT_ACCESS)] = {.hw_cache_event = {C(RESULT_ACCESS), 184 C(OP_WRITE), C(DTLB), SBI_PMU_EVENT_TYPE_CACHE, 0}}, 185 [C(RESULT_MISS)] = {.hw_cache_event = {C(RESULT_MISS), 186 C(OP_WRITE), C(DTLB), SBI_PMU_EVENT_TYPE_CACHE, 0}}, 187 }, 188 [C(OP_PREFETCH)] = { 189 [C(RESULT_ACCESS)] = {.hw_cache_event = {C(RESULT_ACCESS), 190 C(OP_PREFETCH), C(DTLB), SBI_PMU_EVENT_TYPE_CACHE, 0}}, 191 [C(RESULT_MISS)] = {.hw_cache_event = {C(RESULT_MISS), 192 C(OP_PREFETCH), C(DTLB), SBI_PMU_EVENT_TYPE_CACHE, 0}}, 193 }, 194 }, 195 [C(ITLB)] = { 196 [C(OP_READ)] = { 197 [C(RESULT_ACCESS)] = {.hw_cache_event = {C(RESULT_ACCESS), 198 C(OP_READ), C(ITLB), SBI_PMU_EVENT_TYPE_CACHE, 0}}, 199 [C(RESULT_MISS)] = {.hw_cache_event = {C(RESULT_MISS), 200 C(OP_READ), C(ITLB), SBI_PMU_EVENT_TYPE_CACHE, 0}}, 201 }, 202 [C(OP_WRITE)] = { 203 [C(RESULT_ACCESS)] = {.hw_cache_event = {C(RESULT_ACCESS), 204 C(OP_WRITE), C(ITLB), SBI_PMU_EVENT_TYPE_CACHE, 0}}, 205 [C(RESULT_MISS)] = {.hw_cache_event = {C(RESULT_MISS), 206 C(OP_WRITE), C(ITLB), SBI_PMU_EVENT_TYPE_CACHE, 0}}, 207 }, 208 [C(OP_PREFETCH)] = { 209 [C(RESULT_ACCESS)] = {.hw_cache_event = {C(RESULT_ACCESS), 210 C(OP_PREFETCH), C(ITLB), SBI_PMU_EVENT_TYPE_CACHE, 0}}, 211 [C(RESULT_MISS)] = {.hw_cache_event = {C(RESULT_MISS), 212 C(OP_PREFETCH), C(ITLB), SBI_PMU_EVENT_TYPE_CACHE, 0}}, 213 }, 214 }, 215 [C(BPU)] = { 216 [C(OP_READ)] = { 217 [C(RESULT_ACCESS)] = {.hw_cache_event = {C(RESULT_ACCESS), 218 C(OP_READ), C(BPU), SBI_PMU_EVENT_TYPE_CACHE, 0}}, 219 [C(RESULT_MISS)] = {.hw_cache_event = {C(RESULT_MISS), 220 C(OP_READ), C(BPU), SBI_PMU_EVENT_TYPE_CACHE, 0}}, 221 }, 222 [C(OP_WRITE)] = { 223 [C(RESULT_ACCESS)] = {.hw_cache_event = {C(RESULT_ACCESS), 224 C(OP_WRITE), C(BPU), SBI_PMU_EVENT_TYPE_CACHE, 0}}, 225 [C(RESULT_MISS)] = {.hw_cache_event = {C(RESULT_MISS), 226 C(OP_WRITE), C(BPU), SBI_PMU_EVENT_TYPE_CACHE, 0}}, 227 }, 228 [C(OP_PREFETCH)] = { 229 [C(RESULT_ACCESS)] = {.hw_cache_event = {C(RESULT_ACCESS), 230 C(OP_PREFETCH), C(BPU), SBI_PMU_EVENT_TYPE_CACHE, 0}}, 231 [C(RESULT_MISS)] = {.hw_cache_event = {C(RESULT_MISS), 232 C(OP_PREFETCH), C(BPU), SBI_PMU_EVENT_TYPE_CACHE, 0}}, 233 }, 234 }, 235 [C(NODE)] = { 236 [C(OP_READ)] = { 237 [C(RESULT_ACCESS)] = {.hw_cache_event = {C(RESULT_ACCESS), 238 C(OP_READ), C(NODE), SBI_PMU_EVENT_TYPE_CACHE, 0}}, 239 [C(RESULT_MISS)] = {.hw_cache_event = {C(RESULT_MISS), 240 C(OP_READ), C(NODE), SBI_PMU_EVENT_TYPE_CACHE, 0}}, 241 }, 242 [C(OP_WRITE)] = { 243 [C(RESULT_ACCESS)] = {.hw_cache_event = {C(RESULT_ACCESS), 244 C(OP_WRITE), C(NODE), SBI_PMU_EVENT_TYPE_CACHE, 0}}, 245 [C(RESULT_MISS)] = {.hw_cache_event = {C(RESULT_MISS), 246 C(OP_WRITE), C(NODE), SBI_PMU_EVENT_TYPE_CACHE, 0}}, 247 }, 248 [C(OP_PREFETCH)] = { 249 [C(RESULT_ACCESS)] = {.hw_cache_event = {C(RESULT_ACCESS), 250 C(OP_PREFETCH), C(NODE), SBI_PMU_EVENT_TYPE_CACHE, 0}}, 251 [C(RESULT_MISS)] = {.hw_cache_event = {C(RESULT_MISS), 252 C(OP_PREFETCH), C(NODE), SBI_PMU_EVENT_TYPE_CACHE, 0}}, 253 }, 254 }, 255 }; 256 257 static int pmu_sbi_ctr_get_width(int idx) 258 { 259 return pmu_ctr_list[idx].width; 260 } 261 262 static bool pmu_sbi_ctr_is_fw(int cidx) 263 { 264 union sbi_pmu_ctr_info *info; 265 266 info = &pmu_ctr_list[cidx]; 267 if (!info) 268 return false; 269 270 return (info->type == SBI_PMU_CTR_TYPE_FW) ? true : false; 271 } 272 273 /* 274 * Returns the counter width of a programmable counter and number of hardware 275 * counters. As we don't support heterogeneous CPUs yet, it is okay to just 276 * return the counter width of the first programmable counter. 277 */ 278 int riscv_pmu_get_hpm_info(u32 *hw_ctr_width, u32 *num_hw_ctr) 279 { 280 int i; 281 union sbi_pmu_ctr_info *info; 282 u32 hpm_width = 0, hpm_count = 0; 283 284 if (!cmask) 285 return -EINVAL; 286 287 for_each_set_bit(i, &cmask, RISCV_MAX_COUNTERS) { 288 info = &pmu_ctr_list[i]; 289 if (!info) 290 continue; 291 if (!hpm_width && info->csr != CSR_CYCLE && info->csr != CSR_INSTRET) 292 hpm_width = info->width; 293 if (info->type == SBI_PMU_CTR_TYPE_HW) 294 hpm_count++; 295 } 296 297 *hw_ctr_width = hpm_width; 298 *num_hw_ctr = hpm_count; 299 300 return 0; 301 } 302 EXPORT_SYMBOL_GPL(riscv_pmu_get_hpm_info); 303 304 static unsigned long pmu_sbi_get_filter_flags(struct perf_event *event) 305 { 306 unsigned long cflags = 0; 307 bool guest_events = false; 308 309 if (event->attr.config1 & RISCV_PMU_CONFIG1_GUEST_EVENTS) 310 guest_events = true; 311 if (event->attr.exclude_kernel) 312 cflags |= guest_events ? SBI_PMU_CFG_FLAG_SET_VSINH : SBI_PMU_CFG_FLAG_SET_SINH; 313 if (event->attr.exclude_user) 314 cflags |= guest_events ? SBI_PMU_CFG_FLAG_SET_VUINH : SBI_PMU_CFG_FLAG_SET_UINH; 315 if (guest_events && event->attr.exclude_hv) 316 cflags |= SBI_PMU_CFG_FLAG_SET_SINH; 317 if (event->attr.exclude_host) 318 cflags |= SBI_PMU_CFG_FLAG_SET_UINH | SBI_PMU_CFG_FLAG_SET_SINH; 319 if (event->attr.exclude_guest) 320 cflags |= SBI_PMU_CFG_FLAG_SET_VSINH | SBI_PMU_CFG_FLAG_SET_VUINH; 321 322 return cflags; 323 } 324 325 static int pmu_sbi_ctr_get_idx(struct perf_event *event) 326 { 327 struct hw_perf_event *hwc = &event->hw; 328 struct riscv_pmu *rvpmu = to_riscv_pmu(event->pmu); 329 struct cpu_hw_events *cpuc = this_cpu_ptr(rvpmu->hw_events); 330 struct sbiret ret; 331 int idx; 332 uint64_t cbase = 0; 333 unsigned long cflags = 0; 334 335 cflags = pmu_sbi_get_filter_flags(event); 336 /* retrieve the available counter index */ 337 #if defined(CONFIG_32BIT) 338 ret = sbi_ecall(SBI_EXT_PMU, SBI_EXT_PMU_COUNTER_CFG_MATCH, cbase, 339 rvpmu->cmask, cflags, hwc->event_base, hwc->config, 340 hwc->config >> 32); 341 #else 342 ret = sbi_ecall(SBI_EXT_PMU, SBI_EXT_PMU_COUNTER_CFG_MATCH, cbase, 343 rvpmu->cmask, cflags, hwc->event_base, hwc->config, 0); 344 #endif 345 if (ret.error) { 346 pr_debug("Not able to find a counter for event %lx config %llx\n", 347 hwc->event_base, hwc->config); 348 return sbi_err_map_linux_errno(ret.error); 349 } 350 351 idx = ret.value; 352 if (!test_bit(idx, &rvpmu->cmask) || !pmu_ctr_list[idx].value) 353 return -ENOENT; 354 355 /* Additional sanity check for the counter id */ 356 if (pmu_sbi_ctr_is_fw(idx)) { 357 if (!test_and_set_bit(idx, cpuc->used_fw_ctrs)) 358 return idx; 359 } else { 360 if (!test_and_set_bit(idx, cpuc->used_hw_ctrs)) 361 return idx; 362 } 363 364 return -ENOENT; 365 } 366 367 static void pmu_sbi_ctr_clear_idx(struct perf_event *event) 368 { 369 370 struct hw_perf_event *hwc = &event->hw; 371 struct riscv_pmu *rvpmu = to_riscv_pmu(event->pmu); 372 struct cpu_hw_events *cpuc = this_cpu_ptr(rvpmu->hw_events); 373 int idx = hwc->idx; 374 375 if (pmu_sbi_ctr_is_fw(idx)) 376 clear_bit(idx, cpuc->used_fw_ctrs); 377 else 378 clear_bit(idx, cpuc->used_hw_ctrs); 379 } 380 381 static int pmu_event_find_cache(u64 config) 382 { 383 unsigned int cache_type, cache_op, cache_result, ret; 384 385 cache_type = (config >> 0) & 0xff; 386 if (cache_type >= PERF_COUNT_HW_CACHE_MAX) 387 return -EINVAL; 388 389 cache_op = (config >> 8) & 0xff; 390 if (cache_op >= PERF_COUNT_HW_CACHE_OP_MAX) 391 return -EINVAL; 392 393 cache_result = (config >> 16) & 0xff; 394 if (cache_result >= PERF_COUNT_HW_CACHE_RESULT_MAX) 395 return -EINVAL; 396 397 ret = pmu_cache_event_map[cache_type][cache_op][cache_result].event_idx; 398 399 return ret; 400 } 401 402 static bool pmu_sbi_is_fw_event(struct perf_event *event) 403 { 404 u32 type = event->attr.type; 405 u64 config = event->attr.config; 406 407 if ((type == PERF_TYPE_RAW) && ((config >> 63) == 1)) 408 return true; 409 else 410 return false; 411 } 412 413 static int pmu_sbi_event_map(struct perf_event *event, u64 *econfig) 414 { 415 u32 type = event->attr.type; 416 u64 config = event->attr.config; 417 int bSoftware; 418 u64 raw_config_val; 419 int ret; 420 421 switch (type) { 422 case PERF_TYPE_HARDWARE: 423 if (config >= PERF_COUNT_HW_MAX) 424 return -EINVAL; 425 ret = pmu_hw_event_map[event->attr.config].event_idx; 426 break; 427 case PERF_TYPE_HW_CACHE: 428 ret = pmu_event_find_cache(config); 429 break; 430 case PERF_TYPE_RAW: 431 /* 432 * As per SBI specification, the upper 16 bits must be unused for 433 * a raw event. Use the MSB (63b) to distinguish between hardware 434 * raw event and firmware events. 435 */ 436 bSoftware = config >> 63; 437 raw_config_val = config & RISCV_PMU_RAW_EVENT_MASK; 438 if (bSoftware) { 439 if (raw_config_val < SBI_PMU_FW_MAX) 440 ret = (raw_config_val & 0xFFFF) | 441 (SBI_PMU_EVENT_TYPE_FW << 16); 442 else 443 return -EINVAL; 444 } else { 445 ret = RISCV_PMU_RAW_EVENT_IDX; 446 *econfig = raw_config_val; 447 } 448 break; 449 default: 450 ret = -EINVAL; 451 break; 452 } 453 454 return ret; 455 } 456 457 static u64 pmu_sbi_ctr_read(struct perf_event *event) 458 { 459 struct hw_perf_event *hwc = &event->hw; 460 int idx = hwc->idx; 461 struct sbiret ret; 462 union sbi_pmu_ctr_info info; 463 u64 val = 0; 464 465 if (pmu_sbi_is_fw_event(event)) { 466 ret = sbi_ecall(SBI_EXT_PMU, SBI_EXT_PMU_COUNTER_FW_READ, 467 hwc->idx, 0, 0, 0, 0, 0); 468 if (!ret.error) 469 val = ret.value; 470 } else { 471 info = pmu_ctr_list[idx]; 472 val = riscv_pmu_ctr_read_csr(info.csr); 473 if (IS_ENABLED(CONFIG_32BIT)) 474 val = ((u64)riscv_pmu_ctr_read_csr(info.csr + 0x80)) << 31 | val; 475 } 476 477 return val; 478 } 479 480 static void pmu_sbi_ctr_start(struct perf_event *event, u64 ival) 481 { 482 struct sbiret ret; 483 struct hw_perf_event *hwc = &event->hw; 484 unsigned long flag = SBI_PMU_START_FLAG_SET_INIT_VALUE; 485 486 #if defined(CONFIG_32BIT) 487 ret = sbi_ecall(SBI_EXT_PMU, SBI_EXT_PMU_COUNTER_START, hwc->idx, 488 1, flag, ival, ival >> 32, 0); 489 #else 490 ret = sbi_ecall(SBI_EXT_PMU, SBI_EXT_PMU_COUNTER_START, hwc->idx, 491 1, flag, ival, 0, 0); 492 #endif 493 if (ret.error && (ret.error != SBI_ERR_ALREADY_STARTED)) 494 pr_err("Starting counter idx %d failed with error %d\n", 495 hwc->idx, sbi_err_map_linux_errno(ret.error)); 496 } 497 498 static void pmu_sbi_ctr_stop(struct perf_event *event, unsigned long flag) 499 { 500 struct sbiret ret; 501 struct hw_perf_event *hwc = &event->hw; 502 503 ret = sbi_ecall(SBI_EXT_PMU, SBI_EXT_PMU_COUNTER_STOP, hwc->idx, 1, flag, 0, 0, 0); 504 if (ret.error && (ret.error != SBI_ERR_ALREADY_STOPPED) && 505 flag != SBI_PMU_STOP_FLAG_RESET) 506 pr_err("Stopping counter idx %d failed with error %d\n", 507 hwc->idx, sbi_err_map_linux_errno(ret.error)); 508 } 509 510 static int pmu_sbi_find_num_ctrs(void) 511 { 512 struct sbiret ret; 513 514 ret = sbi_ecall(SBI_EXT_PMU, SBI_EXT_PMU_NUM_COUNTERS, 0, 0, 0, 0, 0, 0); 515 if (!ret.error) 516 return ret.value; 517 else 518 return sbi_err_map_linux_errno(ret.error); 519 } 520 521 static int pmu_sbi_get_ctrinfo(int nctr, unsigned long *mask) 522 { 523 struct sbiret ret; 524 int i, num_hw_ctr = 0, num_fw_ctr = 0; 525 union sbi_pmu_ctr_info cinfo; 526 527 pmu_ctr_list = kcalloc(nctr, sizeof(*pmu_ctr_list), GFP_KERNEL); 528 if (!pmu_ctr_list) 529 return -ENOMEM; 530 531 for (i = 0; i < nctr; i++) { 532 ret = sbi_ecall(SBI_EXT_PMU, SBI_EXT_PMU_COUNTER_GET_INFO, i, 0, 0, 0, 0, 0); 533 if (ret.error) 534 /* The logical counter ids are not expected to be contiguous */ 535 continue; 536 537 *mask |= BIT(i); 538 539 cinfo.value = ret.value; 540 if (cinfo.type == SBI_PMU_CTR_TYPE_FW) 541 num_fw_ctr++; 542 else 543 num_hw_ctr++; 544 pmu_ctr_list[i].value = cinfo.value; 545 } 546 547 pr_info("%d firmware and %d hardware counters\n", num_fw_ctr, num_hw_ctr); 548 549 return 0; 550 } 551 552 static inline void pmu_sbi_stop_all(struct riscv_pmu *pmu) 553 { 554 /* 555 * No need to check the error because we are disabling all the counters 556 * which may include counters that are not enabled yet. 557 */ 558 sbi_ecall(SBI_EXT_PMU, SBI_EXT_PMU_COUNTER_STOP, 559 0, pmu->cmask, 0, 0, 0, 0); 560 } 561 562 static inline void pmu_sbi_stop_hw_ctrs(struct riscv_pmu *pmu) 563 { 564 struct cpu_hw_events *cpu_hw_evt = this_cpu_ptr(pmu->hw_events); 565 566 /* No need to check the error here as we can't do anything about the error */ 567 sbi_ecall(SBI_EXT_PMU, SBI_EXT_PMU_COUNTER_STOP, 0, 568 cpu_hw_evt->used_hw_ctrs[0], 0, 0, 0, 0); 569 } 570 571 /* 572 * This function starts all the used counters in two step approach. 573 * Any counter that did not overflow can be start in a single step 574 * while the overflowed counters need to be started with updated initialization 575 * value. 576 */ 577 static inline void pmu_sbi_start_overflow_mask(struct riscv_pmu *pmu, 578 unsigned long ctr_ovf_mask) 579 { 580 int idx = 0; 581 struct cpu_hw_events *cpu_hw_evt = this_cpu_ptr(pmu->hw_events); 582 struct perf_event *event; 583 unsigned long flag = SBI_PMU_START_FLAG_SET_INIT_VALUE; 584 unsigned long ctr_start_mask = 0; 585 uint64_t max_period; 586 struct hw_perf_event *hwc; 587 u64 init_val = 0; 588 589 ctr_start_mask = cpu_hw_evt->used_hw_ctrs[0] & ~ctr_ovf_mask; 590 591 /* Start all the counters that did not overflow in a single shot */ 592 sbi_ecall(SBI_EXT_PMU, SBI_EXT_PMU_COUNTER_START, 0, ctr_start_mask, 593 0, 0, 0, 0); 594 595 /* Reinitialize and start all the counter that overflowed */ 596 while (ctr_ovf_mask) { 597 if (ctr_ovf_mask & 0x01) { 598 event = cpu_hw_evt->events[idx]; 599 hwc = &event->hw; 600 max_period = riscv_pmu_ctr_get_width_mask(event); 601 init_val = local64_read(&hwc->prev_count) & max_period; 602 #if defined(CONFIG_32BIT) 603 sbi_ecall(SBI_EXT_PMU, SBI_EXT_PMU_COUNTER_START, idx, 1, 604 flag, init_val, init_val >> 32, 0); 605 #else 606 sbi_ecall(SBI_EXT_PMU, SBI_EXT_PMU_COUNTER_START, idx, 1, 607 flag, init_val, 0, 0); 608 #endif 609 perf_event_update_userpage(event); 610 } 611 ctr_ovf_mask = ctr_ovf_mask >> 1; 612 idx++; 613 } 614 } 615 616 static irqreturn_t pmu_sbi_ovf_handler(int irq, void *dev) 617 { 618 struct perf_sample_data data; 619 struct pt_regs *regs; 620 struct hw_perf_event *hw_evt; 621 union sbi_pmu_ctr_info *info; 622 int lidx, hidx, fidx; 623 struct riscv_pmu *pmu; 624 struct perf_event *event; 625 unsigned long overflow; 626 unsigned long overflowed_ctrs = 0; 627 struct cpu_hw_events *cpu_hw_evt = dev; 628 u64 start_clock = sched_clock(); 629 630 if (WARN_ON_ONCE(!cpu_hw_evt)) 631 return IRQ_NONE; 632 633 /* Firmware counter don't support overflow yet */ 634 fidx = find_first_bit(cpu_hw_evt->used_hw_ctrs, RISCV_MAX_COUNTERS); 635 event = cpu_hw_evt->events[fidx]; 636 if (!event) { 637 csr_clear(CSR_SIP, BIT(riscv_pmu_irq_num)); 638 return IRQ_NONE; 639 } 640 641 pmu = to_riscv_pmu(event->pmu); 642 pmu_sbi_stop_hw_ctrs(pmu); 643 644 /* Overflow status register should only be read after counter are stopped */ 645 ALT_SBI_PMU_OVERFLOW(overflow); 646 647 /* 648 * Overflow interrupt pending bit should only be cleared after stopping 649 * all the counters to avoid any race condition. 650 */ 651 csr_clear(CSR_SIP, BIT(riscv_pmu_irq_num)); 652 653 /* No overflow bit is set */ 654 if (!overflow) 655 return IRQ_NONE; 656 657 regs = get_irq_regs(); 658 659 for_each_set_bit(lidx, cpu_hw_evt->used_hw_ctrs, RISCV_MAX_COUNTERS) { 660 struct perf_event *event = cpu_hw_evt->events[lidx]; 661 662 /* Skip if invalid event or user did not request a sampling */ 663 if (!event || !is_sampling_event(event)) 664 continue; 665 666 info = &pmu_ctr_list[lidx]; 667 /* Do a sanity check */ 668 if (!info || info->type != SBI_PMU_CTR_TYPE_HW) 669 continue; 670 671 /* compute hardware counter index */ 672 hidx = info->csr - CSR_CYCLE; 673 /* check if the corresponding bit is set in sscountovf */ 674 if (!(overflow & (1 << hidx))) 675 continue; 676 677 /* 678 * Keep a track of overflowed counters so that they can be started 679 * with updated initial value. 680 */ 681 overflowed_ctrs |= 1 << lidx; 682 hw_evt = &event->hw; 683 riscv_pmu_event_update(event); 684 perf_sample_data_init(&data, 0, hw_evt->last_period); 685 if (riscv_pmu_event_set_period(event)) { 686 /* 687 * Unlike other ISAs, RISC-V don't have to disable interrupts 688 * to avoid throttling here. As per the specification, the 689 * interrupt remains disabled until the OF bit is set. 690 * Interrupts are enabled again only during the start. 691 * TODO: We will need to stop the guest counters once 692 * virtualization support is added. 693 */ 694 perf_event_overflow(event, &data, regs); 695 } 696 } 697 698 pmu_sbi_start_overflow_mask(pmu, overflowed_ctrs); 699 perf_sample_event_took(sched_clock() - start_clock); 700 701 return IRQ_HANDLED; 702 } 703 704 static int pmu_sbi_starting_cpu(unsigned int cpu, struct hlist_node *node) 705 { 706 struct riscv_pmu *pmu = hlist_entry_safe(node, struct riscv_pmu, node); 707 struct cpu_hw_events *cpu_hw_evt = this_cpu_ptr(pmu->hw_events); 708 709 /* 710 * Enable the access for CYCLE, TIME, and INSTRET CSRs from userspace, 711 * as is necessary to maintain uABI compatibility. 712 */ 713 csr_write(CSR_SCOUNTEREN, 0x7); 714 715 /* Stop all the counters so that they can be enabled from perf */ 716 pmu_sbi_stop_all(pmu); 717 718 if (riscv_pmu_use_irq) { 719 cpu_hw_evt->irq = riscv_pmu_irq; 720 csr_clear(CSR_IP, BIT(riscv_pmu_irq_num)); 721 csr_set(CSR_IE, BIT(riscv_pmu_irq_num)); 722 enable_percpu_irq(riscv_pmu_irq, IRQ_TYPE_NONE); 723 } 724 725 return 0; 726 } 727 728 static int pmu_sbi_dying_cpu(unsigned int cpu, struct hlist_node *node) 729 { 730 if (riscv_pmu_use_irq) { 731 disable_percpu_irq(riscv_pmu_irq); 732 csr_clear(CSR_IE, BIT(riscv_pmu_irq_num)); 733 } 734 735 /* Disable all counters access for user mode now */ 736 csr_write(CSR_SCOUNTEREN, 0x0); 737 738 return 0; 739 } 740 741 static int pmu_sbi_setup_irqs(struct riscv_pmu *pmu, struct platform_device *pdev) 742 { 743 int ret; 744 struct cpu_hw_events __percpu *hw_events = pmu->hw_events; 745 struct device_node *cpu, *child; 746 struct irq_domain *domain = NULL; 747 748 if (riscv_isa_extension_available(NULL, SSCOFPMF)) { 749 riscv_pmu_irq_num = RV_IRQ_PMU; 750 riscv_pmu_use_irq = true; 751 } else if (IS_ENABLED(CONFIG_ERRATA_THEAD_PMU) && 752 riscv_cached_mvendorid(0) == THEAD_VENDOR_ID && 753 riscv_cached_marchid(0) == 0 && 754 riscv_cached_mimpid(0) == 0) { 755 riscv_pmu_irq_num = THEAD_C9XX_RV_IRQ_PMU; 756 riscv_pmu_use_irq = true; 757 } 758 759 if (!riscv_pmu_use_irq) 760 return -EOPNOTSUPP; 761 762 for_each_of_cpu_node(cpu) { 763 child = of_get_compatible_child(cpu, "riscv,cpu-intc"); 764 if (!child) { 765 pr_err("Failed to find INTC node\n"); 766 of_node_put(cpu); 767 return -ENODEV; 768 } 769 domain = irq_find_host(child); 770 of_node_put(child); 771 if (domain) { 772 of_node_put(cpu); 773 break; 774 } 775 } 776 if (!domain) { 777 pr_err("Failed to find INTC IRQ root domain\n"); 778 return -ENODEV; 779 } 780 781 riscv_pmu_irq = irq_create_mapping(domain, riscv_pmu_irq_num); 782 if (!riscv_pmu_irq) { 783 pr_err("Failed to map PMU interrupt for node\n"); 784 return -ENODEV; 785 } 786 787 ret = request_percpu_irq(riscv_pmu_irq, pmu_sbi_ovf_handler, "riscv-pmu", hw_events); 788 if (ret) { 789 pr_err("registering percpu irq failed [%d]\n", ret); 790 return ret; 791 } 792 793 return 0; 794 } 795 796 #ifdef CONFIG_CPU_PM 797 static int riscv_pm_pmu_notify(struct notifier_block *b, unsigned long cmd, 798 void *v) 799 { 800 struct riscv_pmu *rvpmu = container_of(b, struct riscv_pmu, riscv_pm_nb); 801 struct cpu_hw_events *cpuc = this_cpu_ptr(rvpmu->hw_events); 802 int enabled = bitmap_weight(cpuc->used_hw_ctrs, RISCV_MAX_COUNTERS); 803 struct perf_event *event; 804 int idx; 805 806 if (!enabled) 807 return NOTIFY_OK; 808 809 for (idx = 0; idx < RISCV_MAX_COUNTERS; idx++) { 810 event = cpuc->events[idx]; 811 if (!event) 812 continue; 813 814 switch (cmd) { 815 case CPU_PM_ENTER: 816 /* 817 * Stop and update the counter 818 */ 819 riscv_pmu_stop(event, PERF_EF_UPDATE); 820 break; 821 case CPU_PM_EXIT: 822 case CPU_PM_ENTER_FAILED: 823 /* 824 * Restore and enable the counter. 825 */ 826 riscv_pmu_start(event, PERF_EF_RELOAD); 827 break; 828 default: 829 break; 830 } 831 } 832 833 return NOTIFY_OK; 834 } 835 836 static int riscv_pm_pmu_register(struct riscv_pmu *pmu) 837 { 838 pmu->riscv_pm_nb.notifier_call = riscv_pm_pmu_notify; 839 return cpu_pm_register_notifier(&pmu->riscv_pm_nb); 840 } 841 842 static void riscv_pm_pmu_unregister(struct riscv_pmu *pmu) 843 { 844 cpu_pm_unregister_notifier(&pmu->riscv_pm_nb); 845 } 846 #else 847 static inline int riscv_pm_pmu_register(struct riscv_pmu *pmu) { return 0; } 848 static inline void riscv_pm_pmu_unregister(struct riscv_pmu *pmu) { } 849 #endif 850 851 static void riscv_pmu_destroy(struct riscv_pmu *pmu) 852 { 853 riscv_pm_pmu_unregister(pmu); 854 cpuhp_state_remove_instance(CPUHP_AP_PERF_RISCV_STARTING, &pmu->node); 855 } 856 857 static int pmu_sbi_device_probe(struct platform_device *pdev) 858 { 859 struct riscv_pmu *pmu = NULL; 860 int ret = -ENODEV; 861 int num_counters; 862 863 pr_info("SBI PMU extension is available\n"); 864 pmu = riscv_pmu_alloc(); 865 if (!pmu) 866 return -ENOMEM; 867 868 num_counters = pmu_sbi_find_num_ctrs(); 869 if (num_counters < 0) { 870 pr_err("SBI PMU extension doesn't provide any counters\n"); 871 goto out_free; 872 } 873 874 /* cache all the information about counters now */ 875 if (pmu_sbi_get_ctrinfo(num_counters, &cmask)) 876 goto out_free; 877 878 ret = pmu_sbi_setup_irqs(pmu, pdev); 879 if (ret < 0) { 880 pr_info("Perf sampling/filtering is not supported as sscof extension is not available\n"); 881 pmu->pmu.capabilities |= PERF_PMU_CAP_NO_INTERRUPT; 882 pmu->pmu.capabilities |= PERF_PMU_CAP_NO_EXCLUDE; 883 } 884 885 pmu->pmu.attr_groups = riscv_pmu_attr_groups; 886 pmu->cmask = cmask; 887 pmu->ctr_start = pmu_sbi_ctr_start; 888 pmu->ctr_stop = pmu_sbi_ctr_stop; 889 pmu->event_map = pmu_sbi_event_map; 890 pmu->ctr_get_idx = pmu_sbi_ctr_get_idx; 891 pmu->ctr_get_width = pmu_sbi_ctr_get_width; 892 pmu->ctr_clear_idx = pmu_sbi_ctr_clear_idx; 893 pmu->ctr_read = pmu_sbi_ctr_read; 894 895 ret = cpuhp_state_add_instance(CPUHP_AP_PERF_RISCV_STARTING, &pmu->node); 896 if (ret) 897 return ret; 898 899 ret = riscv_pm_pmu_register(pmu); 900 if (ret) 901 goto out_unregister; 902 903 ret = perf_pmu_register(&pmu->pmu, "cpu", PERF_TYPE_RAW); 904 if (ret) 905 goto out_unregister; 906 907 return 0; 908 909 out_unregister: 910 riscv_pmu_destroy(pmu); 911 912 out_free: 913 kfree(pmu); 914 return ret; 915 } 916 917 static struct platform_driver pmu_sbi_driver = { 918 .probe = pmu_sbi_device_probe, 919 .driver = { 920 .name = RISCV_PMU_PDEV_NAME, 921 }, 922 }; 923 924 static int __init pmu_sbi_devinit(void) 925 { 926 int ret; 927 struct platform_device *pdev; 928 929 if (sbi_spec_version < sbi_mk_version(0, 3) || 930 sbi_probe_extension(SBI_EXT_PMU) <= 0) { 931 return 0; 932 } 933 934 ret = cpuhp_setup_state_multi(CPUHP_AP_PERF_RISCV_STARTING, 935 "perf/riscv/pmu:starting", 936 pmu_sbi_starting_cpu, pmu_sbi_dying_cpu); 937 if (ret) { 938 pr_err("CPU hotplug notifier could not be registered: %d\n", 939 ret); 940 return ret; 941 } 942 943 ret = platform_driver_register(&pmu_sbi_driver); 944 if (ret) 945 return ret; 946 947 pdev = platform_device_register_simple(RISCV_PMU_PDEV_NAME, -1, NULL, 0); 948 if (IS_ERR(pdev)) { 949 platform_driver_unregister(&pmu_sbi_driver); 950 return PTR_ERR(pdev); 951 } 952 953 /* Notify legacy implementation that SBI pmu is available*/ 954 riscv_pmu_legacy_skip_init(); 955 956 return ret; 957 } 958 device_initcall(pmu_sbi_devinit) 959