Lines Matching +full:riscv +full:- +full:sbi

1 // SPDX-License-Identifier: GPL-2.0
3 * RISC-V performance counter support.
11 #define pr_fmt(fmt) "riscv-pmu-sbi: " fmt
24 #include <asm/sbi.h>
35 PMU_FORMAT_ATTR(event, "config:0-47");
58 * RISC-V doesn't have heterogeneous harts yet. This need to be part of
281 return (info->type == SBI_PMU_CTR_TYPE_FW) ? true : false; in pmu_sbi_ctr_is_fw()
296 return -EINVAL; in riscv_pmu_get_hpm_info()
302 if (!hpm_width && info->csr != CSR_CYCLE && info->csr != CSR_INSTRET) in riscv_pmu_get_hpm_info()
303 hpm_width = info->width; in riscv_pmu_get_hpm_info()
304 if (info->type == SBI_PMU_CTR_TYPE_HW) in riscv_pmu_get_hpm_info()
317 return pmu_ctr_list[event->hw.idx].csr - CSR_CYCLE; in pmu_sbi_csr_index()
325 if (event->attr.config1 & RISCV_PMU_CONFIG1_GUEST_EVENTS) in pmu_sbi_get_filter_flags()
327 if (event->attr.exclude_kernel) in pmu_sbi_get_filter_flags()
329 if (event->attr.exclude_user) in pmu_sbi_get_filter_flags()
331 if (guest_events && event->attr.exclude_hv) in pmu_sbi_get_filter_flags()
333 if (event->attr.exclude_host) in pmu_sbi_get_filter_flags()
335 if (event->attr.exclude_guest) in pmu_sbi_get_filter_flags()
343 struct hw_perf_event *hwc = &event->hw; in pmu_sbi_ctr_get_idx()
344 struct riscv_pmu *rvpmu = to_riscv_pmu(event->pmu); in pmu_sbi_ctr_get_idx()
345 struct cpu_hw_events *cpuc = this_cpu_ptr(rvpmu->hw_events); in pmu_sbi_ctr_get_idx()
348 uint64_t cbase = 0, cmask = rvpmu->cmask; in pmu_sbi_ctr_get_idx()
358 if ((hwc->flags & PERF_EVENT_FLAG_LEGACY) && (event->attr.type == PERF_TYPE_HARDWARE)) { in pmu_sbi_ctr_get_idx()
359 if (event->attr.config == PERF_COUNT_HW_CPU_CYCLES) { in pmu_sbi_ctr_get_idx()
362 } else if (event->attr.config == PERF_COUNT_HW_INSTRUCTIONS) { in pmu_sbi_ctr_get_idx()
364 cmask = 1UL << (CSR_INSTRET - CSR_CYCLE); in pmu_sbi_ctr_get_idx()
371 cmask, cflags, hwc->event_base, hwc->config, in pmu_sbi_ctr_get_idx()
372 hwc->config >> 32); in pmu_sbi_ctr_get_idx()
375 cmask, cflags, hwc->event_base, hwc->config, 0); in pmu_sbi_ctr_get_idx()
379 hwc->event_base, hwc->config); in pmu_sbi_ctr_get_idx()
384 if (!test_bit(idx, &rvpmu->cmask) || !pmu_ctr_list[idx].value) in pmu_sbi_ctr_get_idx()
385 return -ENOENT; in pmu_sbi_ctr_get_idx()
389 if (!test_and_set_bit(idx, cpuc->used_fw_ctrs)) in pmu_sbi_ctr_get_idx()
392 if (!test_and_set_bit(idx, cpuc->used_hw_ctrs)) in pmu_sbi_ctr_get_idx()
396 return -ENOENT; in pmu_sbi_ctr_get_idx()
402 struct hw_perf_event *hwc = &event->hw; in pmu_sbi_ctr_clear_idx()
403 struct riscv_pmu *rvpmu = to_riscv_pmu(event->pmu); in pmu_sbi_ctr_clear_idx()
404 struct cpu_hw_events *cpuc = this_cpu_ptr(rvpmu->hw_events); in pmu_sbi_ctr_clear_idx()
405 int idx = hwc->idx; in pmu_sbi_ctr_clear_idx()
408 clear_bit(idx, cpuc->used_fw_ctrs); in pmu_sbi_ctr_clear_idx()
410 clear_bit(idx, cpuc->used_hw_ctrs); in pmu_sbi_ctr_clear_idx()
419 return -EINVAL; in pmu_event_find_cache()
423 return -EINVAL; in pmu_event_find_cache()
427 return -EINVAL; in pmu_event_find_cache()
436 u32 type = event->attr.type; in pmu_sbi_is_fw_event()
437 u64 config = event->attr.config; in pmu_sbi_is_fw_event()
447 u32 type = event->attr.type; in pmu_sbi_event_map()
448 u64 config = event->attr.config; in pmu_sbi_event_map()
456 return -EINVAL; in pmu_sbi_event_map()
457 ret = pmu_hw_event_map[event->attr.config].event_idx; in pmu_sbi_event_map()
464 * As per SBI specification, the upper 16 bits must be unused for in pmu_sbi_event_map()
479 ret = -EINVAL; in pmu_sbi_event_map()
488 struct hw_perf_event *hwc = &event->hw; in pmu_sbi_ctr_read()
489 int idx = hwc->idx; in pmu_sbi_ctr_read()
496 hwc->idx, 0, 0, 0, 0, 0); in pmu_sbi_ctr_read()
513 if (event->hw.idx != -1) in pmu_sbi_set_scounteren()
522 if (event->hw.idx != -1) in pmu_sbi_reset_scounteren()
530 struct hw_perf_event *hwc = &event->hw; in pmu_sbi_ctr_start()
534 ret = sbi_ecall(SBI_EXT_PMU, SBI_EXT_PMU_COUNTER_START, hwc->idx, in pmu_sbi_ctr_start()
537 ret = sbi_ecall(SBI_EXT_PMU, SBI_EXT_PMU_COUNTER_START, hwc->idx, in pmu_sbi_ctr_start()
542 hwc->idx, sbi_err_map_linux_errno(ret.error)); in pmu_sbi_ctr_start()
544 if ((hwc->flags & PERF_EVENT_FLAG_USER_ACCESS) && in pmu_sbi_ctr_start()
545 (hwc->flags & PERF_EVENT_FLAG_USER_READ_CNT)) in pmu_sbi_ctr_start()
552 struct hw_perf_event *hwc = &event->hw; in pmu_sbi_ctr_stop()
554 if ((hwc->flags & PERF_EVENT_FLAG_USER_ACCESS) && in pmu_sbi_ctr_stop()
555 (hwc->flags & PERF_EVENT_FLAG_USER_READ_CNT)) in pmu_sbi_ctr_stop()
558 ret = sbi_ecall(SBI_EXT_PMU, SBI_EXT_PMU_COUNTER_STOP, hwc->idx, 1, flag, 0, 0, 0); in pmu_sbi_ctr_stop()
562 hwc->idx, sbi_err_map_linux_errno(ret.error)); in pmu_sbi_ctr_stop()
584 return -ENOMEM; in pmu_sbi_get_ctrinfo()
614 0, pmu->cmask, SBI_PMU_STOP_FLAG_RESET, 0, 0, 0); in pmu_sbi_stop_all()
619 struct cpu_hw_events *cpu_hw_evt = this_cpu_ptr(pmu->hw_events); in pmu_sbi_stop_hw_ctrs()
623 cpu_hw_evt->used_hw_ctrs[0], 0, 0, 0, 0); in pmu_sbi_stop_hw_ctrs()
636 struct cpu_hw_events *cpu_hw_evt = this_cpu_ptr(pmu->hw_events); in pmu_sbi_start_overflow_mask()
644 ctr_start_mask = cpu_hw_evt->used_hw_ctrs[0] & ~ctr_ovf_mask; in pmu_sbi_start_overflow_mask()
653 event = cpu_hw_evt->events[idx]; in pmu_sbi_start_overflow_mask()
654 hwc = &event->hw; in pmu_sbi_start_overflow_mask()
656 init_val = local64_read(&hwc->prev_count) & max_period; in pmu_sbi_start_overflow_mask()
689 fidx = find_first_bit(cpu_hw_evt->used_hw_ctrs, RISCV_MAX_COUNTERS); in pmu_sbi_ovf_handler()
695 event = cpu_hw_evt->events[fidx]; in pmu_sbi_ovf_handler()
701 pmu = to_riscv_pmu(event->pmu); in pmu_sbi_ovf_handler()
719 for_each_set_bit(lidx, cpu_hw_evt->used_hw_ctrs, RISCV_MAX_COUNTERS) { in pmu_sbi_ovf_handler()
720 struct perf_event *event = cpu_hw_evt->events[lidx]; in pmu_sbi_ovf_handler()
728 if (!info || info->type != SBI_PMU_CTR_TYPE_HW) in pmu_sbi_ovf_handler()
732 hidx = info->csr - CSR_CYCLE; in pmu_sbi_ovf_handler()
742 hw_evt = &event->hw; in pmu_sbi_ovf_handler()
744 perf_sample_data_init(&data, 0, hw_evt->last_period); in pmu_sbi_ovf_handler()
747 * Unlike other ISAs, RISC-V don't have to disable interrupts in pmu_sbi_ovf_handler()
759 perf_sample_event_took(sched_clock() - start_clock); in pmu_sbi_ovf_handler()
767 struct cpu_hw_events *cpu_hw_evt = this_cpu_ptr(pmu->hw_events); in pmu_sbi_starting_cpu()
782 cpu_hw_evt->irq = riscv_pmu_irq; in pmu_sbi_starting_cpu()
807 struct cpu_hw_events __percpu *hw_events = pmu->hw_events; in pmu_sbi_setup_irqs()
822 return -EOPNOTSUPP; in pmu_sbi_setup_irqs()
828 return -ENODEV; in pmu_sbi_setup_irqs()
834 return -ENODEV; in pmu_sbi_setup_irqs()
837 ret = request_percpu_irq(riscv_pmu_irq, pmu_sbi_ovf_handler, "riscv-pmu", hw_events); in pmu_sbi_setup_irqs()
851 struct cpu_hw_events *cpuc = this_cpu_ptr(rvpmu->hw_events); in riscv_pm_pmu_notify()
852 int enabled = bitmap_weight(cpuc->used_hw_ctrs, RISCV_MAX_COUNTERS); in riscv_pm_pmu_notify()
860 event = cpuc->events[idx]; in riscv_pm_pmu_notify()
888 pmu->riscv_pm_nb.notifier_call = riscv_pm_pmu_notify; in riscv_pm_pmu_register()
889 return cpu_pm_register_notifier(&pmu->riscv_pm_nb); in riscv_pm_pmu_register()
894 cpu_pm_unregister_notifier(&pmu->riscv_pm_nb); in riscv_pm_pmu_unregister()
904 cpuhp_state_remove_instance(CPUHP_AP_PERF_RISCV_STARTING, &pmu->node); in riscv_pmu_destroy()
914 event->hw.flags |= PERF_EVENT_FLAG_NO_USER_ACCESS; in pmu_sbi_event_init()
916 event->hw.flags |= PERF_EVENT_FLAG_USER_ACCESS; in pmu_sbi_event_init()
918 event->hw.flags |= PERF_EVENT_FLAG_LEGACY; in pmu_sbi_event_init()
923 if (event->hw.flags & PERF_EVENT_FLAG_NO_USER_ACCESS) in pmu_sbi_event_mapped()
926 if (event->hw.flags & PERF_EVENT_FLAG_LEGACY) { in pmu_sbi_event_mapped()
927 if (event->attr.config != PERF_COUNT_HW_CPU_CYCLES && in pmu_sbi_event_mapped()
928 event->attr.config != PERF_COUNT_HW_INSTRUCTIONS) { in pmu_sbi_event_mapped()
942 event->hw.flags |= PERF_EVENT_FLAG_USER_READ_CNT; in pmu_sbi_event_mapped()
951 if (event->hw.flags & PERF_EVENT_FLAG_USER_ACCESS) in pmu_sbi_event_mapped()
958 if (event->hw.flags & PERF_EVENT_FLAG_NO_USER_ACCESS) in pmu_sbi_event_unmapped()
961 if (event->hw.flags & PERF_EVENT_FLAG_LEGACY) { in pmu_sbi_event_unmapped()
962 if (event->attr.config != PERF_COUNT_HW_CPU_CYCLES && in pmu_sbi_event_unmapped()
963 event->attr.config != PERF_COUNT_HW_INSTRUCTIONS) { in pmu_sbi_event_unmapped()
974 event->hw.flags &= ~PERF_EVENT_FLAG_USER_READ_CNT; in pmu_sbi_event_unmapped()
976 if (event->hw.flags & PERF_EVENT_FLAG_USER_ACCESS) in pmu_sbi_event_unmapped()
1025 int ret = -ENODEV; in pmu_sbi_device_probe()
1028 pr_info("SBI PMU extension is available\n"); in pmu_sbi_device_probe()
1031 return -ENOMEM; in pmu_sbi_device_probe()
1035 pr_err("SBI PMU extension doesn't provide any counters\n"); in pmu_sbi_device_probe()
1039 /* It is possible to get from SBI more than max number of counters */ in pmu_sbi_device_probe()
1042 …pr_info("SBI returned more than maximum number of counters. Limiting the number of counters to %d\… in pmu_sbi_device_probe()
1052 pmu->pmu.capabilities |= PERF_PMU_CAP_NO_INTERRUPT; in pmu_sbi_device_probe()
1053 pmu->pmu.capabilities |= PERF_PMU_CAP_NO_EXCLUDE; in pmu_sbi_device_probe()
1056 pmu->pmu.attr_groups = riscv_pmu_attr_groups; in pmu_sbi_device_probe()
1057 pmu->cmask = cmask; in pmu_sbi_device_probe()
1058 pmu->ctr_start = pmu_sbi_ctr_start; in pmu_sbi_device_probe()
1059 pmu->ctr_stop = pmu_sbi_ctr_stop; in pmu_sbi_device_probe()
1060 pmu->event_map = pmu_sbi_event_map; in pmu_sbi_device_probe()
1061 pmu->ctr_get_idx = pmu_sbi_ctr_get_idx; in pmu_sbi_device_probe()
1062 pmu->ctr_get_width = pmu_sbi_ctr_get_width; in pmu_sbi_device_probe()
1063 pmu->ctr_clear_idx = pmu_sbi_ctr_clear_idx; in pmu_sbi_device_probe()
1064 pmu->ctr_read = pmu_sbi_ctr_read; in pmu_sbi_device_probe()
1065 pmu->event_init = pmu_sbi_event_init; in pmu_sbi_device_probe()
1066 pmu->event_mapped = pmu_sbi_event_mapped; in pmu_sbi_device_probe()
1067 pmu->event_unmapped = pmu_sbi_event_unmapped; in pmu_sbi_device_probe()
1068 pmu->csr_index = pmu_sbi_csr_index; in pmu_sbi_device_probe()
1070 ret = cpuhp_state_add_instance(CPUHP_AP_PERF_RISCV_STARTING, &pmu->node); in pmu_sbi_device_probe()
1078 ret = perf_pmu_register(&pmu->pmu, "cpu", PERF_TYPE_RAW); in pmu_sbi_device_probe()
1112 "perf/riscv/pmu:starting", in pmu_sbi_devinit()
1124 pdev = platform_device_register_simple(RISCV_PMU_SBI_PDEV_NAME, -1, NULL, 0); in pmu_sbi_devinit()
1130 /* Notify legacy implementation that SBI pmu is available*/ in pmu_sbi_devinit()