1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * RISC-V performance counter support.
4  *
5  * Copyright (C) 2021 Western Digital Corporation or its affiliates.
6  *
7  * This implementation is based on old RISC-V perf and ARM perf event code
8  * which are in turn based on sparc64 and x86 code.
9  */
10 
11 #include <linux/mod_devicetable.h>
12 #include <linux/perf/riscv_pmu.h>
13 #include <linux/platform_device.h>
14 
15 #define RISCV_PMU_LEGACY_CYCLE		0
16 #define RISCV_PMU_LEGACY_INSTRET	2
17 
18 static bool pmu_init_done;
19 
pmu_legacy_ctr_get_idx(struct perf_event * event)20 static int pmu_legacy_ctr_get_idx(struct perf_event *event)
21 {
22 	struct perf_event_attr *attr = &event->attr;
23 
24 	if (event->attr.type != PERF_TYPE_HARDWARE)
25 		return -EOPNOTSUPP;
26 	if (attr->config == PERF_COUNT_HW_CPU_CYCLES)
27 		return RISCV_PMU_LEGACY_CYCLE;
28 	else if (attr->config == PERF_COUNT_HW_INSTRUCTIONS)
29 		return RISCV_PMU_LEGACY_INSTRET;
30 	else
31 		return -EOPNOTSUPP;
32 }
33 
34 /* For legacy config & counter index are same */
pmu_legacy_event_map(struct perf_event * event,u64 * config)35 static int pmu_legacy_event_map(struct perf_event *event, u64 *config)
36 {
37 	return pmu_legacy_ctr_get_idx(event);
38 }
39 
40 /* cycle & instret are always 64 bit, one bit less according to SBI spec */
pmu_legacy_ctr_get_width(int idx)41 static int pmu_legacy_ctr_get_width(int idx)
42 {
43 	return 63;
44 }
45 
pmu_legacy_read_ctr(struct perf_event * event)46 static u64 pmu_legacy_read_ctr(struct perf_event *event)
47 {
48 	struct hw_perf_event *hwc = &event->hw;
49 	int idx = hwc->idx;
50 	u64 val;
51 
52 	if (idx == RISCV_PMU_LEGACY_CYCLE) {
53 		val = riscv_pmu_ctr_read_csr(CSR_CYCLE);
54 		if (IS_ENABLED(CONFIG_32BIT))
55 			val = (u64)riscv_pmu_ctr_read_csr(CSR_CYCLEH) << 32 | val;
56 	} else if (idx == RISCV_PMU_LEGACY_INSTRET) {
57 		val = riscv_pmu_ctr_read_csr(CSR_INSTRET);
58 		if (IS_ENABLED(CONFIG_32BIT))
59 			val = ((u64)riscv_pmu_ctr_read_csr(CSR_INSTRETH)) << 32 | val;
60 	} else
61 		return 0;
62 
63 	return val;
64 }
65 
pmu_legacy_ctr_start(struct perf_event * event,u64 ival)66 static void pmu_legacy_ctr_start(struct perf_event *event, u64 ival)
67 {
68 	struct hw_perf_event *hwc = &event->hw;
69 	u64 initial_val = pmu_legacy_read_ctr(event);
70 
71 	/**
72 	 * The legacy method doesn't really have a start/stop method.
73 	 * It also can not update the counter with a initial value.
74 	 * But we still need to set the prev_count so that read() can compute
75 	 * the delta. Just use the current counter value to set the prev_count.
76 	 */
77 	local64_set(&hwc->prev_count, initial_val);
78 }
79 
pmu_legacy_csr_index(struct perf_event * event)80 static uint8_t pmu_legacy_csr_index(struct perf_event *event)
81 {
82 	return event->hw.idx;
83 }
84 
pmu_legacy_event_mapped(struct perf_event * event,struct mm_struct * mm)85 static void pmu_legacy_event_mapped(struct perf_event *event, struct mm_struct *mm)
86 {
87 	if (event->attr.config != PERF_COUNT_HW_CPU_CYCLES &&
88 	    event->attr.config != PERF_COUNT_HW_INSTRUCTIONS)
89 		return;
90 
91 	event->hw.flags |= PERF_EVENT_FLAG_USER_READ_CNT;
92 }
93 
pmu_legacy_event_unmapped(struct perf_event * event,struct mm_struct * mm)94 static void pmu_legacy_event_unmapped(struct perf_event *event, struct mm_struct *mm)
95 {
96 	if (event->attr.config != PERF_COUNT_HW_CPU_CYCLES &&
97 	    event->attr.config != PERF_COUNT_HW_INSTRUCTIONS)
98 		return;
99 
100 	event->hw.flags &= ~PERF_EVENT_FLAG_USER_READ_CNT;
101 }
102 
103 /*
104  * This is just a simple implementation to allow legacy implementations
105  * compatible with new RISC-V PMU driver framework.
106  * This driver only allows reading two counters i.e CYCLE & INSTRET.
107  * However, it can not start or stop the counter. Thus, it is not very useful
108  * will be removed in future.
109  */
pmu_legacy_init(struct riscv_pmu * pmu)110 static void pmu_legacy_init(struct riscv_pmu *pmu)
111 {
112 	pr_info("Legacy PMU implementation is available\n");
113 
114 	pmu->cmask = BIT(RISCV_PMU_LEGACY_CYCLE) |
115 		BIT(RISCV_PMU_LEGACY_INSTRET);
116 	pmu->ctr_start = pmu_legacy_ctr_start;
117 	pmu->ctr_stop = NULL;
118 	pmu->event_map = pmu_legacy_event_map;
119 	pmu->ctr_get_idx = pmu_legacy_ctr_get_idx;
120 	pmu->ctr_get_width = pmu_legacy_ctr_get_width;
121 	pmu->ctr_clear_idx = NULL;
122 	pmu->ctr_read = pmu_legacy_read_ctr;
123 	pmu->event_mapped = pmu_legacy_event_mapped;
124 	pmu->event_unmapped = pmu_legacy_event_unmapped;
125 	pmu->csr_index = pmu_legacy_csr_index;
126 	pmu->pmu.capabilities |= PERF_PMU_CAP_NO_INTERRUPT;
127 	pmu->pmu.capabilities |= PERF_PMU_CAP_NO_EXCLUDE;
128 
129 	perf_pmu_register(&pmu->pmu, "cpu", PERF_TYPE_RAW);
130 }
131 
pmu_legacy_device_probe(struct platform_device * pdev)132 static int pmu_legacy_device_probe(struct platform_device *pdev)
133 {
134 	struct riscv_pmu *pmu = NULL;
135 
136 	pmu = riscv_pmu_alloc();
137 	if (!pmu)
138 		return -ENOMEM;
139 	pmu_legacy_init(pmu);
140 
141 	return 0;
142 }
143 
144 static struct platform_driver pmu_legacy_driver = {
145 	.probe		= pmu_legacy_device_probe,
146 	.driver		= {
147 		.name	= RISCV_PMU_LEGACY_PDEV_NAME,
148 	},
149 };
150 
riscv_pmu_legacy_devinit(void)151 static int __init riscv_pmu_legacy_devinit(void)
152 {
153 	int ret;
154 	struct platform_device *pdev;
155 
156 	if (likely(pmu_init_done))
157 		return 0;
158 
159 	ret = platform_driver_register(&pmu_legacy_driver);
160 	if (ret)
161 		return ret;
162 
163 	pdev = platform_device_register_simple(RISCV_PMU_LEGACY_PDEV_NAME, -1, NULL, 0);
164 	if (IS_ERR(pdev)) {
165 		platform_driver_unregister(&pmu_legacy_driver);
166 		return PTR_ERR(pdev);
167 	}
168 
169 	return ret;
170 }
171 late_initcall(riscv_pmu_legacy_devinit);
172 
riscv_pmu_legacy_skip_init(void)173 void riscv_pmu_legacy_skip_init(void)
174 {
175 	pmu_init_done = true;
176 }
177