19b3e150eSAtish Patra // SPDX-License-Identifier: GPL-2.0
29b3e150eSAtish Patra /*
39b3e150eSAtish Patra * RISC-V performance counter support.
49b3e150eSAtish Patra *
59b3e150eSAtish Patra * Copyright (C) 2021 Western Digital Corporation or its affiliates.
69b3e150eSAtish Patra *
79b3e150eSAtish Patra * This implementation is based on old RISC-V perf and ARM perf event code
89b3e150eSAtish Patra * which are in turn based on sparc64 and x86 code.
99b3e150eSAtish Patra */
109b3e150eSAtish Patra
119b3e150eSAtish Patra #include <linux/mod_devicetable.h>
129b3e150eSAtish Patra #include <linux/perf/riscv_pmu.h>
139b3e150eSAtish Patra #include <linux/platform_device.h>
149b3e150eSAtish Patra
159b3e150eSAtish Patra #define RISCV_PMU_LEGACY_CYCLE 0
16e8b785e9SAlexandre Ghiti #define RISCV_PMU_LEGACY_INSTRET 2
179b3e150eSAtish Patra
189b3e150eSAtish Patra static bool pmu_init_done;
199b3e150eSAtish Patra
pmu_legacy_ctr_get_idx(struct perf_event * event)209b3e150eSAtish Patra static int pmu_legacy_ctr_get_idx(struct perf_event *event)
219b3e150eSAtish Patra {
229b3e150eSAtish Patra struct perf_event_attr *attr = &event->attr;
239b3e150eSAtish Patra
249b3e150eSAtish Patra if (event->attr.type != PERF_TYPE_HARDWARE)
259b3e150eSAtish Patra return -EOPNOTSUPP;
269b3e150eSAtish Patra if (attr->config == PERF_COUNT_HW_CPU_CYCLES)
279b3e150eSAtish Patra return RISCV_PMU_LEGACY_CYCLE;
289b3e150eSAtish Patra else if (attr->config == PERF_COUNT_HW_INSTRUCTIONS)
299b3e150eSAtish Patra return RISCV_PMU_LEGACY_INSTRET;
309b3e150eSAtish Patra else
319b3e150eSAtish Patra return -EOPNOTSUPP;
329b3e150eSAtish Patra }
339b3e150eSAtish Patra
349b3e150eSAtish Patra /* For legacy config & counter index are same */
pmu_legacy_event_map(struct perf_event * event,u64 * config)359b3e150eSAtish Patra static int pmu_legacy_event_map(struct perf_event *event, u64 *config)
369b3e150eSAtish Patra {
379b3e150eSAtish Patra return pmu_legacy_ctr_get_idx(event);
389b3e150eSAtish Patra }
399b3e150eSAtish Patra
40*e0d17ee8SVadim Shakirov /* cycle & instret are always 64 bit, one bit less according to SBI spec */
pmu_legacy_ctr_get_width(int idx)41*e0d17ee8SVadim Shakirov static int pmu_legacy_ctr_get_width(int idx)
42*e0d17ee8SVadim Shakirov {
43*e0d17ee8SVadim Shakirov return 63;
44*e0d17ee8SVadim Shakirov }
45*e0d17ee8SVadim Shakirov
pmu_legacy_read_ctr(struct perf_event * event)469b3e150eSAtish Patra static u64 pmu_legacy_read_ctr(struct perf_event *event)
479b3e150eSAtish Patra {
489b3e150eSAtish Patra struct hw_perf_event *hwc = &event->hw;
499b3e150eSAtish Patra int idx = hwc->idx;
509b3e150eSAtish Patra u64 val;
519b3e150eSAtish Patra
529b3e150eSAtish Patra if (idx == RISCV_PMU_LEGACY_CYCLE) {
539b3e150eSAtish Patra val = riscv_pmu_ctr_read_csr(CSR_CYCLE);
549b3e150eSAtish Patra if (IS_ENABLED(CONFIG_32BIT))
559b3e150eSAtish Patra val = (u64)riscv_pmu_ctr_read_csr(CSR_CYCLEH) << 32 | val;
569b3e150eSAtish Patra } else if (idx == RISCV_PMU_LEGACY_INSTRET) {
579b3e150eSAtish Patra val = riscv_pmu_ctr_read_csr(CSR_INSTRET);
589b3e150eSAtish Patra if (IS_ENABLED(CONFIG_32BIT))
599b3e150eSAtish Patra val = ((u64)riscv_pmu_ctr_read_csr(CSR_INSTRETH)) << 32 | val;
609b3e150eSAtish Patra } else
619b3e150eSAtish Patra return 0;
629b3e150eSAtish Patra
639b3e150eSAtish Patra return val;
649b3e150eSAtish Patra }
659b3e150eSAtish Patra
pmu_legacy_ctr_start(struct perf_event * event,u64 ival)669b3e150eSAtish Patra static void pmu_legacy_ctr_start(struct perf_event *event, u64 ival)
679b3e150eSAtish Patra {
689b3e150eSAtish Patra struct hw_perf_event *hwc = &event->hw;
699b3e150eSAtish Patra u64 initial_val = pmu_legacy_read_ctr(event);
709b3e150eSAtish Patra
719b3e150eSAtish Patra /**
729b3e150eSAtish Patra * The legacy method doesn't really have a start/stop method.
739b3e150eSAtish Patra * It also can not update the counter with a initial value.
749b3e150eSAtish Patra * But we still need to set the prev_count so that read() can compute
759b3e150eSAtish Patra * the delta. Just use the current counter value to set the prev_count.
769b3e150eSAtish Patra */
779b3e150eSAtish Patra local64_set(&hwc->prev_count, initial_val);
789b3e150eSAtish Patra }
799b3e150eSAtish Patra
pmu_legacy_csr_index(struct perf_event * event)8050be3428SAlexandre Ghiti static uint8_t pmu_legacy_csr_index(struct perf_event *event)
8150be3428SAlexandre Ghiti {
8250be3428SAlexandre Ghiti return event->hw.idx;
8350be3428SAlexandre Ghiti }
8450be3428SAlexandre Ghiti
pmu_legacy_event_mapped(struct perf_event * event,struct mm_struct * mm)8550be3428SAlexandre Ghiti static void pmu_legacy_event_mapped(struct perf_event *event, struct mm_struct *mm)
8650be3428SAlexandre Ghiti {
8750be3428SAlexandre Ghiti if (event->attr.config != PERF_COUNT_HW_CPU_CYCLES &&
8850be3428SAlexandre Ghiti event->attr.config != PERF_COUNT_HW_INSTRUCTIONS)
8950be3428SAlexandre Ghiti return;
9050be3428SAlexandre Ghiti
9150be3428SAlexandre Ghiti event->hw.flags |= PERF_EVENT_FLAG_USER_READ_CNT;
9250be3428SAlexandre Ghiti }
9350be3428SAlexandre Ghiti
pmu_legacy_event_unmapped(struct perf_event * event,struct mm_struct * mm)9450be3428SAlexandre Ghiti static void pmu_legacy_event_unmapped(struct perf_event *event, struct mm_struct *mm)
9550be3428SAlexandre Ghiti {
9650be3428SAlexandre Ghiti if (event->attr.config != PERF_COUNT_HW_CPU_CYCLES &&
9750be3428SAlexandre Ghiti event->attr.config != PERF_COUNT_HW_INSTRUCTIONS)
9850be3428SAlexandre Ghiti return;
9950be3428SAlexandre Ghiti
10050be3428SAlexandre Ghiti event->hw.flags &= ~PERF_EVENT_FLAG_USER_READ_CNT;
10150be3428SAlexandre Ghiti }
10250be3428SAlexandre Ghiti
10396264230SConor Dooley /*
1049b3e150eSAtish Patra * This is just a simple implementation to allow legacy implementations
1059b3e150eSAtish Patra * compatible with new RISC-V PMU driver framework.
1069b3e150eSAtish Patra * This driver only allows reading two counters i.e CYCLE & INSTRET.
1079b3e150eSAtish Patra * However, it can not start or stop the counter. Thus, it is not very useful
1089b3e150eSAtish Patra * will be removed in future.
1099b3e150eSAtish Patra */
pmu_legacy_init(struct riscv_pmu * pmu)1109b3e150eSAtish Patra static void pmu_legacy_init(struct riscv_pmu *pmu)
1119b3e150eSAtish Patra {
1129b3e150eSAtish Patra pr_info("Legacy PMU implementation is available\n");
1139b3e150eSAtish Patra
1141537bf26SSergey Matyukevich pmu->cmask = BIT(RISCV_PMU_LEGACY_CYCLE) |
1151537bf26SSergey Matyukevich BIT(RISCV_PMU_LEGACY_INSTRET);
1169b3e150eSAtish Patra pmu->ctr_start = pmu_legacy_ctr_start;
1179b3e150eSAtish Patra pmu->ctr_stop = NULL;
1189b3e150eSAtish Patra pmu->event_map = pmu_legacy_event_map;
1199b3e150eSAtish Patra pmu->ctr_get_idx = pmu_legacy_ctr_get_idx;
120*e0d17ee8SVadim Shakirov pmu->ctr_get_width = pmu_legacy_ctr_get_width;
1219b3e150eSAtish Patra pmu->ctr_clear_idx = NULL;
1229b3e150eSAtish Patra pmu->ctr_read = pmu_legacy_read_ctr;
12350be3428SAlexandre Ghiti pmu->event_mapped = pmu_legacy_event_mapped;
12450be3428SAlexandre Ghiti pmu->event_unmapped = pmu_legacy_event_unmapped;
12550be3428SAlexandre Ghiti pmu->csr_index = pmu_legacy_csr_index;
1262f8c0947SVadim Shakirov pmu->pmu.capabilities |= PERF_PMU_CAP_NO_INTERRUPT;
1272f8c0947SVadim Shakirov pmu->pmu.capabilities |= PERF_PMU_CAP_NO_EXCLUDE;
1289b3e150eSAtish Patra
1299b3e150eSAtish Patra perf_pmu_register(&pmu->pmu, "cpu", PERF_TYPE_RAW);
1309b3e150eSAtish Patra }
1319b3e150eSAtish Patra
pmu_legacy_device_probe(struct platform_device * pdev)1329b3e150eSAtish Patra static int pmu_legacy_device_probe(struct platform_device *pdev)
1339b3e150eSAtish Patra {
1349b3e150eSAtish Patra struct riscv_pmu *pmu = NULL;
1359b3e150eSAtish Patra
1369b3e150eSAtish Patra pmu = riscv_pmu_alloc();
1379b3e150eSAtish Patra if (!pmu)
1389b3e150eSAtish Patra return -ENOMEM;
1399b3e150eSAtish Patra pmu_legacy_init(pmu);
1409b3e150eSAtish Patra
1419b3e150eSAtish Patra return 0;
1429b3e150eSAtish Patra }
1439b3e150eSAtish Patra
1449b3e150eSAtish Patra static struct platform_driver pmu_legacy_driver = {
1459b3e150eSAtish Patra .probe = pmu_legacy_device_probe,
1469b3e150eSAtish Patra .driver = {
1479b3e150eSAtish Patra .name = RISCV_PMU_LEGACY_PDEV_NAME,
1489b3e150eSAtish Patra },
1499b3e150eSAtish Patra };
1509b3e150eSAtish Patra
riscv_pmu_legacy_devinit(void)1519b3e150eSAtish Patra static int __init riscv_pmu_legacy_devinit(void)
1529b3e150eSAtish Patra {
1539b3e150eSAtish Patra int ret;
1549b3e150eSAtish Patra struct platform_device *pdev;
1559b3e150eSAtish Patra
1569b3e150eSAtish Patra if (likely(pmu_init_done))
1579b3e150eSAtish Patra return 0;
1589b3e150eSAtish Patra
1599b3e150eSAtish Patra ret = platform_driver_register(&pmu_legacy_driver);
1609b3e150eSAtish Patra if (ret)
1619b3e150eSAtish Patra return ret;
1629b3e150eSAtish Patra
1639b3e150eSAtish Patra pdev = platform_device_register_simple(RISCV_PMU_LEGACY_PDEV_NAME, -1, NULL, 0);
1649b3e150eSAtish Patra if (IS_ERR(pdev)) {
1659b3e150eSAtish Patra platform_driver_unregister(&pmu_legacy_driver);
1669b3e150eSAtish Patra return PTR_ERR(pdev);
1679b3e150eSAtish Patra }
1689b3e150eSAtish Patra
1699b3e150eSAtish Patra return ret;
1709b3e150eSAtish Patra }
1719b3e150eSAtish Patra late_initcall(riscv_pmu_legacy_devinit);
1729b3e150eSAtish Patra
riscv_pmu_legacy_skip_init(void)1739b3e150eSAtish Patra void riscv_pmu_legacy_skip_init(void)
1749b3e150eSAtish Patra {
1759b3e150eSAtish Patra pmu_init_done = true;
1769b3e150eSAtish Patra }
177