1e9991434SAtish Patra // SPDX-License-Identifier: GPL-2.0
2e9991434SAtish Patra /*
3e9991434SAtish Patra * RISC-V performance counter support.
4e9991434SAtish Patra *
5e9991434SAtish Patra * Copyright (C) 2021 Western Digital Corporation or its affiliates.
6e9991434SAtish Patra *
7e9991434SAtish Patra * This code is based on ARM perf event code which is in turn based on
8e9991434SAtish Patra * sparc64 and x86 code.
9e9991434SAtish Patra */
10e9991434SAtish Patra
11e9991434SAtish Patra #define pr_fmt(fmt) "riscv-pmu-sbi: " fmt
12e9991434SAtish Patra
13e9991434SAtish Patra #include <linux/mod_devicetable.h>
14e9991434SAtish Patra #include <linux/perf/riscv_pmu.h>
15e9991434SAtish Patra #include <linux/platform_device.h>
164905ec2fSAtish Patra #include <linux/irq.h>
174905ec2fSAtish Patra #include <linux/irqdomain.h>
184905ec2fSAtish Patra #include <linux/of_irq.h>
194905ec2fSAtish Patra #include <linux/of.h>
20e9a023f2SEric Lin #include <linux/cpu_pm.h>
21096b52fdSSergey Matyukevich #include <linux/sched/clock.h>
22e9991434SAtish Patra
2365e9fb08SHeiko Stuebner #include <asm/errata_list.h>
24e9991434SAtish Patra #include <asm/sbi.h>
254905ec2fSAtish Patra #include <asm/hwcap.h>
26e9991434SAtish Patra
27cc4c07c8SAlexandre Ghiti #define SYSCTL_NO_USER_ACCESS 0
28cc4c07c8SAlexandre Ghiti #define SYSCTL_USER_ACCESS 1
29cc4c07c8SAlexandre Ghiti #define SYSCTL_LEGACY 2
30cc4c07c8SAlexandre Ghiti
31cc4c07c8SAlexandre Ghiti #define PERF_EVENT_FLAG_NO_USER_ACCESS BIT(SYSCTL_NO_USER_ACCESS)
32cc4c07c8SAlexandre Ghiti #define PERF_EVENT_FLAG_USER_ACCESS BIT(SYSCTL_USER_ACCESS)
33cc4c07c8SAlexandre Ghiti #define PERF_EVENT_FLAG_LEGACY BIT(SYSCTL_LEGACY)
34cc4c07c8SAlexandre Ghiti
3526fabd6dSNikita Shubin PMU_FORMAT_ATTR(event, "config:0-47");
3626fabd6dSNikita Shubin PMU_FORMAT_ATTR(firmware, "config:63");
3726fabd6dSNikita Shubin
3826fabd6dSNikita Shubin static struct attribute *riscv_arch_formats_attr[] = {
3926fabd6dSNikita Shubin &format_attr_event.attr,
4026fabd6dSNikita Shubin &format_attr_firmware.attr,
4126fabd6dSNikita Shubin NULL,
4226fabd6dSNikita Shubin };
4326fabd6dSNikita Shubin
4426fabd6dSNikita Shubin static struct attribute_group riscv_pmu_format_group = {
4526fabd6dSNikita Shubin .name = "format",
4626fabd6dSNikita Shubin .attrs = riscv_arch_formats_attr,
4726fabd6dSNikita Shubin };
4826fabd6dSNikita Shubin
4926fabd6dSNikita Shubin static const struct attribute_group *riscv_pmu_attr_groups[] = {
5026fabd6dSNikita Shubin &riscv_pmu_format_group,
5126fabd6dSNikita Shubin NULL,
5226fabd6dSNikita Shubin };
5326fabd6dSNikita Shubin
54cc4c07c8SAlexandre Ghiti /* Allow user mode access by default */
55cc4c07c8SAlexandre Ghiti static int sysctl_perf_user_access __read_mostly = SYSCTL_USER_ACCESS;
56cc4c07c8SAlexandre Ghiti
57c7a9dceaSPalmer Dabbelt /*
58585e351fSAtish Patra * RISC-V doesn't have heterogeneous harts yet. This need to be part of
59e9991434SAtish Patra * per_cpu in case of harts with different pmu counters
60e9991434SAtish Patra */
61e9991434SAtish Patra static union sbi_pmu_ctr_info *pmu_ctr_list;
6265e9fb08SHeiko Stuebner static bool riscv_pmu_use_irq;
6365e9fb08SHeiko Stuebner static unsigned int riscv_pmu_irq_num;
644905ec2fSAtish Patra static unsigned int riscv_pmu_irq;
65e9991434SAtish Patra
66585e351fSAtish Patra /* Cache the available counters in a bitmask */
67585e351fSAtish Patra static unsigned long cmask;
68585e351fSAtish Patra
69e9991434SAtish Patra struct sbi_pmu_event_data {
70e9991434SAtish Patra union {
71e9991434SAtish Patra union {
72e9991434SAtish Patra struct hw_gen_event {
73e9991434SAtish Patra uint32_t event_code:16;
74e9991434SAtish Patra uint32_t event_type:4;
75e9991434SAtish Patra uint32_t reserved:12;
76e9991434SAtish Patra } hw_gen_event;
77e9991434SAtish Patra struct hw_cache_event {
78e9991434SAtish Patra uint32_t result_id:1;
79e9991434SAtish Patra uint32_t op_id:2;
80e9991434SAtish Patra uint32_t cache_id:13;
81e9991434SAtish Patra uint32_t event_type:4;
82e9991434SAtish Patra uint32_t reserved:12;
83e9991434SAtish Patra } hw_cache_event;
84e9991434SAtish Patra };
85e9991434SAtish Patra uint32_t event_idx;
86e9991434SAtish Patra };
87e9991434SAtish Patra };
88e9991434SAtish Patra
89e9991434SAtish Patra static const struct sbi_pmu_event_data pmu_hw_event_map[] = {
90e9991434SAtish Patra [PERF_COUNT_HW_CPU_CYCLES] = {.hw_gen_event = {
91e9991434SAtish Patra SBI_PMU_HW_CPU_CYCLES,
92e9991434SAtish Patra SBI_PMU_EVENT_TYPE_HW, 0}},
93e9991434SAtish Patra [PERF_COUNT_HW_INSTRUCTIONS] = {.hw_gen_event = {
94e9991434SAtish Patra SBI_PMU_HW_INSTRUCTIONS,
95e9991434SAtish Patra SBI_PMU_EVENT_TYPE_HW, 0}},
96e9991434SAtish Patra [PERF_COUNT_HW_CACHE_REFERENCES] = {.hw_gen_event = {
97e9991434SAtish Patra SBI_PMU_HW_CACHE_REFERENCES,
98e9991434SAtish Patra SBI_PMU_EVENT_TYPE_HW, 0}},
99e9991434SAtish Patra [PERF_COUNT_HW_CACHE_MISSES] = {.hw_gen_event = {
100e9991434SAtish Patra SBI_PMU_HW_CACHE_MISSES,
101e9991434SAtish Patra SBI_PMU_EVENT_TYPE_HW, 0}},
102e9991434SAtish Patra [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = {.hw_gen_event = {
103e9991434SAtish Patra SBI_PMU_HW_BRANCH_INSTRUCTIONS,
104e9991434SAtish Patra SBI_PMU_EVENT_TYPE_HW, 0}},
105e9991434SAtish Patra [PERF_COUNT_HW_BRANCH_MISSES] = {.hw_gen_event = {
106e9991434SAtish Patra SBI_PMU_HW_BRANCH_MISSES,
107e9991434SAtish Patra SBI_PMU_EVENT_TYPE_HW, 0}},
108e9991434SAtish Patra [PERF_COUNT_HW_BUS_CYCLES] = {.hw_gen_event = {
109e9991434SAtish Patra SBI_PMU_HW_BUS_CYCLES,
110e9991434SAtish Patra SBI_PMU_EVENT_TYPE_HW, 0}},
111e9991434SAtish Patra [PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = {.hw_gen_event = {
112e9991434SAtish Patra SBI_PMU_HW_STALLED_CYCLES_FRONTEND,
113e9991434SAtish Patra SBI_PMU_EVENT_TYPE_HW, 0}},
114e9991434SAtish Patra [PERF_COUNT_HW_STALLED_CYCLES_BACKEND] = {.hw_gen_event = {
115e9991434SAtish Patra SBI_PMU_HW_STALLED_CYCLES_BACKEND,
116e9991434SAtish Patra SBI_PMU_EVENT_TYPE_HW, 0}},
117e9991434SAtish Patra [PERF_COUNT_HW_REF_CPU_CYCLES] = {.hw_gen_event = {
118e9991434SAtish Patra SBI_PMU_HW_REF_CPU_CYCLES,
119e9991434SAtish Patra SBI_PMU_EVENT_TYPE_HW, 0}},
120e9991434SAtish Patra };
121e9991434SAtish Patra
122e9991434SAtish Patra #define C(x) PERF_COUNT_HW_CACHE_##x
123e9991434SAtish Patra static const struct sbi_pmu_event_data pmu_cache_event_map[PERF_COUNT_HW_CACHE_MAX]
124e9991434SAtish Patra [PERF_COUNT_HW_CACHE_OP_MAX]
125e9991434SAtish Patra [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
126e9991434SAtish Patra [C(L1D)] = {
127e9991434SAtish Patra [C(OP_READ)] = {
128e9991434SAtish Patra [C(RESULT_ACCESS)] = {.hw_cache_event = {C(RESULT_ACCESS),
129e9991434SAtish Patra C(OP_READ), C(L1D), SBI_PMU_EVENT_TYPE_CACHE, 0}},
130e9991434SAtish Patra [C(RESULT_MISS)] = {.hw_cache_event = {C(RESULT_MISS),
131e9991434SAtish Patra C(OP_READ), C(L1D), SBI_PMU_EVENT_TYPE_CACHE, 0}},
132e9991434SAtish Patra },
133e9991434SAtish Patra [C(OP_WRITE)] = {
134e9991434SAtish Patra [C(RESULT_ACCESS)] = {.hw_cache_event = {C(RESULT_ACCESS),
135e9991434SAtish Patra C(OP_WRITE), C(L1D), SBI_PMU_EVENT_TYPE_CACHE, 0}},
136e9991434SAtish Patra [C(RESULT_MISS)] = {.hw_cache_event = {C(RESULT_MISS),
137e9991434SAtish Patra C(OP_WRITE), C(L1D), SBI_PMU_EVENT_TYPE_CACHE, 0}},
138e9991434SAtish Patra },
139e9991434SAtish Patra [C(OP_PREFETCH)] = {
140e9991434SAtish Patra [C(RESULT_ACCESS)] = {.hw_cache_event = {C(RESULT_ACCESS),
141e9991434SAtish Patra C(OP_PREFETCH), C(L1D), SBI_PMU_EVENT_TYPE_CACHE, 0}},
142e9991434SAtish Patra [C(RESULT_MISS)] = {.hw_cache_event = {C(RESULT_MISS),
143e9991434SAtish Patra C(OP_PREFETCH), C(L1D), SBI_PMU_EVENT_TYPE_CACHE, 0}},
144e9991434SAtish Patra },
145e9991434SAtish Patra },
146e9991434SAtish Patra [C(L1I)] = {
147e9991434SAtish Patra [C(OP_READ)] = {
148e9991434SAtish Patra [C(RESULT_ACCESS)] = {.hw_cache_event = {C(RESULT_ACCESS),
149e9991434SAtish Patra C(OP_READ), C(L1I), SBI_PMU_EVENT_TYPE_CACHE, 0}},
150e9991434SAtish Patra [C(RESULT_MISS)] = {.hw_cache_event = {C(RESULT_MISS), C(OP_READ),
151e9991434SAtish Patra C(L1I), SBI_PMU_EVENT_TYPE_CACHE, 0}},
152e9991434SAtish Patra },
153e9991434SAtish Patra [C(OP_WRITE)] = {
154e9991434SAtish Patra [C(RESULT_ACCESS)] = {.hw_cache_event = {C(RESULT_ACCESS),
155e9991434SAtish Patra C(OP_WRITE), C(L1I), SBI_PMU_EVENT_TYPE_CACHE, 0}},
156e9991434SAtish Patra [C(RESULT_MISS)] = {.hw_cache_event = {C(RESULT_MISS),
157e9991434SAtish Patra C(OP_WRITE), C(L1I), SBI_PMU_EVENT_TYPE_CACHE, 0}},
158e9991434SAtish Patra },
159e9991434SAtish Patra [C(OP_PREFETCH)] = {
160e9991434SAtish Patra [C(RESULT_ACCESS)] = {.hw_cache_event = {C(RESULT_ACCESS),
161e9991434SAtish Patra C(OP_PREFETCH), C(L1I), SBI_PMU_EVENT_TYPE_CACHE, 0}},
162e9991434SAtish Patra [C(RESULT_MISS)] = {.hw_cache_event = {C(RESULT_MISS),
163e9991434SAtish Patra C(OP_PREFETCH), C(L1I), SBI_PMU_EVENT_TYPE_CACHE, 0}},
164e9991434SAtish Patra },
165e9991434SAtish Patra },
166e9991434SAtish Patra [C(LL)] = {
167e9991434SAtish Patra [C(OP_READ)] = {
168e9991434SAtish Patra [C(RESULT_ACCESS)] = {.hw_cache_event = {C(RESULT_ACCESS),
169e9991434SAtish Patra C(OP_READ), C(LL), SBI_PMU_EVENT_TYPE_CACHE, 0}},
170e9991434SAtish Patra [C(RESULT_MISS)] = {.hw_cache_event = {C(RESULT_MISS),
171e9991434SAtish Patra C(OP_READ), C(LL), SBI_PMU_EVENT_TYPE_CACHE, 0}},
172e9991434SAtish Patra },
173e9991434SAtish Patra [C(OP_WRITE)] = {
174e9991434SAtish Patra [C(RESULT_ACCESS)] = {.hw_cache_event = {C(RESULT_ACCESS),
175e9991434SAtish Patra C(OP_WRITE), C(LL), SBI_PMU_EVENT_TYPE_CACHE, 0}},
176e9991434SAtish Patra [C(RESULT_MISS)] = {.hw_cache_event = {C(RESULT_MISS),
177e9991434SAtish Patra C(OP_WRITE), C(LL), SBI_PMU_EVENT_TYPE_CACHE, 0}},
178e9991434SAtish Patra },
179e9991434SAtish Patra [C(OP_PREFETCH)] = {
180e9991434SAtish Patra [C(RESULT_ACCESS)] = {.hw_cache_event = {C(RESULT_ACCESS),
181e9991434SAtish Patra C(OP_PREFETCH), C(LL), SBI_PMU_EVENT_TYPE_CACHE, 0}},
182e9991434SAtish Patra [C(RESULT_MISS)] = {.hw_cache_event = {C(RESULT_MISS),
183e9991434SAtish Patra C(OP_PREFETCH), C(LL), SBI_PMU_EVENT_TYPE_CACHE, 0}},
184e9991434SAtish Patra },
185e9991434SAtish Patra },
186e9991434SAtish Patra [C(DTLB)] = {
187e9991434SAtish Patra [C(OP_READ)] = {
188e9991434SAtish Patra [C(RESULT_ACCESS)] = {.hw_cache_event = {C(RESULT_ACCESS),
189e9991434SAtish Patra C(OP_READ), C(DTLB), SBI_PMU_EVENT_TYPE_CACHE, 0}},
190e9991434SAtish Patra [C(RESULT_MISS)] = {.hw_cache_event = {C(RESULT_MISS),
191e9991434SAtish Patra C(OP_READ), C(DTLB), SBI_PMU_EVENT_TYPE_CACHE, 0}},
192e9991434SAtish Patra },
193e9991434SAtish Patra [C(OP_WRITE)] = {
194e9991434SAtish Patra [C(RESULT_ACCESS)] = {.hw_cache_event = {C(RESULT_ACCESS),
195e9991434SAtish Patra C(OP_WRITE), C(DTLB), SBI_PMU_EVENT_TYPE_CACHE, 0}},
196e9991434SAtish Patra [C(RESULT_MISS)] = {.hw_cache_event = {C(RESULT_MISS),
197e9991434SAtish Patra C(OP_WRITE), C(DTLB), SBI_PMU_EVENT_TYPE_CACHE, 0}},
198e9991434SAtish Patra },
199e9991434SAtish Patra [C(OP_PREFETCH)] = {
200e9991434SAtish Patra [C(RESULT_ACCESS)] = {.hw_cache_event = {C(RESULT_ACCESS),
201e9991434SAtish Patra C(OP_PREFETCH), C(DTLB), SBI_PMU_EVENT_TYPE_CACHE, 0}},
202e9991434SAtish Patra [C(RESULT_MISS)] = {.hw_cache_event = {C(RESULT_MISS),
203e9991434SAtish Patra C(OP_PREFETCH), C(DTLB), SBI_PMU_EVENT_TYPE_CACHE, 0}},
204e9991434SAtish Patra },
205e9991434SAtish Patra },
206e9991434SAtish Patra [C(ITLB)] = {
207e9991434SAtish Patra [C(OP_READ)] = {
208e9991434SAtish Patra [C(RESULT_ACCESS)] = {.hw_cache_event = {C(RESULT_ACCESS),
209e9991434SAtish Patra C(OP_READ), C(ITLB), SBI_PMU_EVENT_TYPE_CACHE, 0}},
210e9991434SAtish Patra [C(RESULT_MISS)] = {.hw_cache_event = {C(RESULT_MISS),
211e9991434SAtish Patra C(OP_READ), C(ITLB), SBI_PMU_EVENT_TYPE_CACHE, 0}},
212e9991434SAtish Patra },
213e9991434SAtish Patra [C(OP_WRITE)] = {
214e9991434SAtish Patra [C(RESULT_ACCESS)] = {.hw_cache_event = {C(RESULT_ACCESS),
215e9991434SAtish Patra C(OP_WRITE), C(ITLB), SBI_PMU_EVENT_TYPE_CACHE, 0}},
216e9991434SAtish Patra [C(RESULT_MISS)] = {.hw_cache_event = {C(RESULT_MISS),
217e9991434SAtish Patra C(OP_WRITE), C(ITLB), SBI_PMU_EVENT_TYPE_CACHE, 0}},
218e9991434SAtish Patra },
219e9991434SAtish Patra [C(OP_PREFETCH)] = {
220e9991434SAtish Patra [C(RESULT_ACCESS)] = {.hw_cache_event = {C(RESULT_ACCESS),
221e9991434SAtish Patra C(OP_PREFETCH), C(ITLB), SBI_PMU_EVENT_TYPE_CACHE, 0}},
222e9991434SAtish Patra [C(RESULT_MISS)] = {.hw_cache_event = {C(RESULT_MISS),
223e9991434SAtish Patra C(OP_PREFETCH), C(ITLB), SBI_PMU_EVENT_TYPE_CACHE, 0}},
224e9991434SAtish Patra },
225e9991434SAtish Patra },
226e9991434SAtish Patra [C(BPU)] = {
227e9991434SAtish Patra [C(OP_READ)] = {
228e9991434SAtish Patra [C(RESULT_ACCESS)] = {.hw_cache_event = {C(RESULT_ACCESS),
229e9991434SAtish Patra C(OP_READ), C(BPU), SBI_PMU_EVENT_TYPE_CACHE, 0}},
230e9991434SAtish Patra [C(RESULT_MISS)] = {.hw_cache_event = {C(RESULT_MISS),
231e9991434SAtish Patra C(OP_READ), C(BPU), SBI_PMU_EVENT_TYPE_CACHE, 0}},
232e9991434SAtish Patra },
233e9991434SAtish Patra [C(OP_WRITE)] = {
234e9991434SAtish Patra [C(RESULT_ACCESS)] = {.hw_cache_event = {C(RESULT_ACCESS),
235e9991434SAtish Patra C(OP_WRITE), C(BPU), SBI_PMU_EVENT_TYPE_CACHE, 0}},
236e9991434SAtish Patra [C(RESULT_MISS)] = {.hw_cache_event = {C(RESULT_MISS),
237e9991434SAtish Patra C(OP_WRITE), C(BPU), SBI_PMU_EVENT_TYPE_CACHE, 0}},
238e9991434SAtish Patra },
239e9991434SAtish Patra [C(OP_PREFETCH)] = {
240e9991434SAtish Patra [C(RESULT_ACCESS)] = {.hw_cache_event = {C(RESULT_ACCESS),
241e9991434SAtish Patra C(OP_PREFETCH), C(BPU), SBI_PMU_EVENT_TYPE_CACHE, 0}},
242e9991434SAtish Patra [C(RESULT_MISS)] = {.hw_cache_event = {C(RESULT_MISS),
243e9991434SAtish Patra C(OP_PREFETCH), C(BPU), SBI_PMU_EVENT_TYPE_CACHE, 0}},
244e9991434SAtish Patra },
245e9991434SAtish Patra },
246e9991434SAtish Patra [C(NODE)] = {
247e9991434SAtish Patra [C(OP_READ)] = {
248e9991434SAtish Patra [C(RESULT_ACCESS)] = {.hw_cache_event = {C(RESULT_ACCESS),
249e9991434SAtish Patra C(OP_READ), C(NODE), SBI_PMU_EVENT_TYPE_CACHE, 0}},
250e9991434SAtish Patra [C(RESULT_MISS)] = {.hw_cache_event = {C(RESULT_MISS),
251e9991434SAtish Patra C(OP_READ), C(NODE), SBI_PMU_EVENT_TYPE_CACHE, 0}},
252e9991434SAtish Patra },
253e9991434SAtish Patra [C(OP_WRITE)] = {
254e9991434SAtish Patra [C(RESULT_ACCESS)] = {.hw_cache_event = {C(RESULT_ACCESS),
255e9991434SAtish Patra C(OP_WRITE), C(NODE), SBI_PMU_EVENT_TYPE_CACHE, 0}},
256e9991434SAtish Patra [C(RESULT_MISS)] = {.hw_cache_event = {C(RESULT_MISS),
257e9991434SAtish Patra C(OP_WRITE), C(NODE), SBI_PMU_EVENT_TYPE_CACHE, 0}},
258e9991434SAtish Patra },
259e9991434SAtish Patra [C(OP_PREFETCH)] = {
260e9991434SAtish Patra [C(RESULT_ACCESS)] = {.hw_cache_event = {C(RESULT_ACCESS),
261e9991434SAtish Patra C(OP_PREFETCH), C(NODE), SBI_PMU_EVENT_TYPE_CACHE, 0}},
262e9991434SAtish Patra [C(RESULT_MISS)] = {.hw_cache_event = {C(RESULT_MISS),
263e9991434SAtish Patra C(OP_PREFETCH), C(NODE), SBI_PMU_EVENT_TYPE_CACHE, 0}},
264e9991434SAtish Patra },
265e9991434SAtish Patra },
266e9991434SAtish Patra };
267e9991434SAtish Patra
pmu_sbi_ctr_get_width(int idx)268e9991434SAtish Patra static int pmu_sbi_ctr_get_width(int idx)
269e9991434SAtish Patra {
270e9991434SAtish Patra return pmu_ctr_list[idx].width;
271e9991434SAtish Patra }
272e9991434SAtish Patra
pmu_sbi_ctr_is_fw(int cidx)273e9991434SAtish Patra static bool pmu_sbi_ctr_is_fw(int cidx)
274e9991434SAtish Patra {
275e9991434SAtish Patra union sbi_pmu_ctr_info *info;
276e9991434SAtish Patra
277e9991434SAtish Patra info = &pmu_ctr_list[cidx];
278e9991434SAtish Patra if (!info)
279e9991434SAtish Patra return false;
280e9991434SAtish Patra
281e9991434SAtish Patra return (info->type == SBI_PMU_CTR_TYPE_FW) ? true : false;
282e9991434SAtish Patra }
283e9991434SAtish Patra
284585e351fSAtish Patra /*
285585e351fSAtish Patra * Returns the counter width of a programmable counter and number of hardware
286585e351fSAtish Patra * counters. As we don't support heterogeneous CPUs yet, it is okay to just
287585e351fSAtish Patra * return the counter width of the first programmable counter.
288585e351fSAtish Patra */
riscv_pmu_get_hpm_info(u32 * hw_ctr_width,u32 * num_hw_ctr)289585e351fSAtish Patra int riscv_pmu_get_hpm_info(u32 *hw_ctr_width, u32 *num_hw_ctr)
290585e351fSAtish Patra {
291585e351fSAtish Patra int i;
292585e351fSAtish Patra union sbi_pmu_ctr_info *info;
293585e351fSAtish Patra u32 hpm_width = 0, hpm_count = 0;
294585e351fSAtish Patra
295585e351fSAtish Patra if (!cmask)
296585e351fSAtish Patra return -EINVAL;
297585e351fSAtish Patra
298585e351fSAtish Patra for_each_set_bit(i, &cmask, RISCV_MAX_COUNTERS) {
299585e351fSAtish Patra info = &pmu_ctr_list[i];
300585e351fSAtish Patra if (!info)
301585e351fSAtish Patra continue;
302585e351fSAtish Patra if (!hpm_width && info->csr != CSR_CYCLE && info->csr != CSR_INSTRET)
303585e351fSAtish Patra hpm_width = info->width;
304585e351fSAtish Patra if (info->type == SBI_PMU_CTR_TYPE_HW)
305585e351fSAtish Patra hpm_count++;
306585e351fSAtish Patra }
307585e351fSAtish Patra
308585e351fSAtish Patra *hw_ctr_width = hpm_width;
309585e351fSAtish Patra *num_hw_ctr = hpm_count;
310585e351fSAtish Patra
311585e351fSAtish Patra return 0;
312585e351fSAtish Patra }
313585e351fSAtish Patra EXPORT_SYMBOL_GPL(riscv_pmu_get_hpm_info);
314585e351fSAtish Patra
pmu_sbi_csr_index(struct perf_event * event)315cc4c07c8SAlexandre Ghiti static uint8_t pmu_sbi_csr_index(struct perf_event *event)
316cc4c07c8SAlexandre Ghiti {
317cc4c07c8SAlexandre Ghiti return pmu_ctr_list[event->hw.idx].csr - CSR_CYCLE;
318cc4c07c8SAlexandre Ghiti }
319cc4c07c8SAlexandre Ghiti
pmu_sbi_get_filter_flags(struct perf_event * event)3208929283aSAtish Patra static unsigned long pmu_sbi_get_filter_flags(struct perf_event *event)
3218929283aSAtish Patra {
3228929283aSAtish Patra unsigned long cflags = 0;
3238929283aSAtish Patra bool guest_events = false;
3248929283aSAtish Patra
3258929283aSAtish Patra if (event->attr.config1 & RISCV_PMU_CONFIG1_GUEST_EVENTS)
3268929283aSAtish Patra guest_events = true;
3278929283aSAtish Patra if (event->attr.exclude_kernel)
3288929283aSAtish Patra cflags |= guest_events ? SBI_PMU_CFG_FLAG_SET_VSINH : SBI_PMU_CFG_FLAG_SET_SINH;
3298929283aSAtish Patra if (event->attr.exclude_user)
3308929283aSAtish Patra cflags |= guest_events ? SBI_PMU_CFG_FLAG_SET_VUINH : SBI_PMU_CFG_FLAG_SET_UINH;
3318929283aSAtish Patra if (guest_events && event->attr.exclude_hv)
3328929283aSAtish Patra cflags |= SBI_PMU_CFG_FLAG_SET_SINH;
3338929283aSAtish Patra if (event->attr.exclude_host)
3348929283aSAtish Patra cflags |= SBI_PMU_CFG_FLAG_SET_UINH | SBI_PMU_CFG_FLAG_SET_SINH;
3358929283aSAtish Patra if (event->attr.exclude_guest)
3368929283aSAtish Patra cflags |= SBI_PMU_CFG_FLAG_SET_VSINH | SBI_PMU_CFG_FLAG_SET_VUINH;
3378929283aSAtish Patra
3388929283aSAtish Patra return cflags;
3398929283aSAtish Patra }
3408929283aSAtish Patra
pmu_sbi_ctr_get_idx(struct perf_event * event)341e9991434SAtish Patra static int pmu_sbi_ctr_get_idx(struct perf_event *event)
342e9991434SAtish Patra {
343e9991434SAtish Patra struct hw_perf_event *hwc = &event->hw;
344e9991434SAtish Patra struct riscv_pmu *rvpmu = to_riscv_pmu(event->pmu);
345e9991434SAtish Patra struct cpu_hw_events *cpuc = this_cpu_ptr(rvpmu->hw_events);
346e9991434SAtish Patra struct sbiret ret;
347e9991434SAtish Patra int idx;
348cc4c07c8SAlexandre Ghiti uint64_t cbase = 0, cmask = rvpmu->cmask;
349e9991434SAtish Patra unsigned long cflags = 0;
350e9991434SAtish Patra
3518929283aSAtish Patra cflags = pmu_sbi_get_filter_flags(event);
352cc4c07c8SAlexandre Ghiti
353cc4c07c8SAlexandre Ghiti /*
354cc4c07c8SAlexandre Ghiti * In legacy mode, we have to force the fixed counters for those events
355cc4c07c8SAlexandre Ghiti * but not in the user access mode as we want to use the other counters
356cc4c07c8SAlexandre Ghiti * that support sampling/filtering.
357cc4c07c8SAlexandre Ghiti */
358*fa12277fSShifrin Dmitry if ((hwc->flags & PERF_EVENT_FLAG_LEGACY) && (event->attr.type == PERF_TYPE_HARDWARE)) {
359cc4c07c8SAlexandre Ghiti if (event->attr.config == PERF_COUNT_HW_CPU_CYCLES) {
360cc4c07c8SAlexandre Ghiti cflags |= SBI_PMU_CFG_FLAG_SKIP_MATCH;
361cc4c07c8SAlexandre Ghiti cmask = 1;
362cc4c07c8SAlexandre Ghiti } else if (event->attr.config == PERF_COUNT_HW_INSTRUCTIONS) {
363cc4c07c8SAlexandre Ghiti cflags |= SBI_PMU_CFG_FLAG_SKIP_MATCH;
364cc4c07c8SAlexandre Ghiti cmask = 1UL << (CSR_INSTRET - CSR_CYCLE);
365cc4c07c8SAlexandre Ghiti }
366cc4c07c8SAlexandre Ghiti }
367cc4c07c8SAlexandre Ghiti
368e9991434SAtish Patra /* retrieve the available counter index */
3690209b583SAtish Patra #if defined(CONFIG_32BIT)
3701537bf26SSergey Matyukevich ret = sbi_ecall(SBI_EXT_PMU, SBI_EXT_PMU_COUNTER_CFG_MATCH, cbase,
371cc4c07c8SAlexandre Ghiti cmask, cflags, hwc->event_base, hwc->config,
3721537bf26SSergey Matyukevich hwc->config >> 32);
3730209b583SAtish Patra #else
3741537bf26SSergey Matyukevich ret = sbi_ecall(SBI_EXT_PMU, SBI_EXT_PMU_COUNTER_CFG_MATCH, cbase,
375cc4c07c8SAlexandre Ghiti cmask, cflags, hwc->event_base, hwc->config, 0);
3760209b583SAtish Patra #endif
377e9991434SAtish Patra if (ret.error) {
378e9991434SAtish Patra pr_debug("Not able to find a counter for event %lx config %llx\n",
379e9991434SAtish Patra hwc->event_base, hwc->config);
380e9991434SAtish Patra return sbi_err_map_linux_errno(ret.error);
381e9991434SAtish Patra }
382e9991434SAtish Patra
383e9991434SAtish Patra idx = ret.value;
3841537bf26SSergey Matyukevich if (!test_bit(idx, &rvpmu->cmask) || !pmu_ctr_list[idx].value)
385e9991434SAtish Patra return -ENOENT;
386e9991434SAtish Patra
387e9991434SAtish Patra /* Additional sanity check for the counter id */
388e9991434SAtish Patra if (pmu_sbi_ctr_is_fw(idx)) {
389e9991434SAtish Patra if (!test_and_set_bit(idx, cpuc->used_fw_ctrs))
390e9991434SAtish Patra return idx;
391e9991434SAtish Patra } else {
392e9991434SAtish Patra if (!test_and_set_bit(idx, cpuc->used_hw_ctrs))
393e9991434SAtish Patra return idx;
394e9991434SAtish Patra }
395e9991434SAtish Patra
396e9991434SAtish Patra return -ENOENT;
397e9991434SAtish Patra }
398e9991434SAtish Patra
pmu_sbi_ctr_clear_idx(struct perf_event * event)399e9991434SAtish Patra static void pmu_sbi_ctr_clear_idx(struct perf_event *event)
400e9991434SAtish Patra {
401e9991434SAtish Patra
402e9991434SAtish Patra struct hw_perf_event *hwc = &event->hw;
403e9991434SAtish Patra struct riscv_pmu *rvpmu = to_riscv_pmu(event->pmu);
404e9991434SAtish Patra struct cpu_hw_events *cpuc = this_cpu_ptr(rvpmu->hw_events);
405e9991434SAtish Patra int idx = hwc->idx;
406e9991434SAtish Patra
407e9991434SAtish Patra if (pmu_sbi_ctr_is_fw(idx))
408e9991434SAtish Patra clear_bit(idx, cpuc->used_fw_ctrs);
409e9991434SAtish Patra else
410e9991434SAtish Patra clear_bit(idx, cpuc->used_hw_ctrs);
411e9991434SAtish Patra }
412e9991434SAtish Patra
pmu_event_find_cache(u64 config)413e9991434SAtish Patra static int pmu_event_find_cache(u64 config)
414e9991434SAtish Patra {
415e9991434SAtish Patra unsigned int cache_type, cache_op, cache_result, ret;
416e9991434SAtish Patra
417e9991434SAtish Patra cache_type = (config >> 0) & 0xff;
418e9991434SAtish Patra if (cache_type >= PERF_COUNT_HW_CACHE_MAX)
419e9991434SAtish Patra return -EINVAL;
420e9991434SAtish Patra
421e9991434SAtish Patra cache_op = (config >> 8) & 0xff;
422e9991434SAtish Patra if (cache_op >= PERF_COUNT_HW_CACHE_OP_MAX)
423e9991434SAtish Patra return -EINVAL;
424e9991434SAtish Patra
425e9991434SAtish Patra cache_result = (config >> 16) & 0xff;
426e9991434SAtish Patra if (cache_result >= PERF_COUNT_HW_CACHE_RESULT_MAX)
427e9991434SAtish Patra return -EINVAL;
428e9991434SAtish Patra
429e9991434SAtish Patra ret = pmu_cache_event_map[cache_type][cache_op][cache_result].event_idx;
430e9991434SAtish Patra
431e9991434SAtish Patra return ret;
432e9991434SAtish Patra }
433e9991434SAtish Patra
pmu_sbi_is_fw_event(struct perf_event * event)434e9991434SAtish Patra static bool pmu_sbi_is_fw_event(struct perf_event *event)
435e9991434SAtish Patra {
436e9991434SAtish Patra u32 type = event->attr.type;
437e9991434SAtish Patra u64 config = event->attr.config;
438e9991434SAtish Patra
439e9991434SAtish Patra if ((type == PERF_TYPE_RAW) && ((config >> 63) == 1))
440e9991434SAtish Patra return true;
441e9991434SAtish Patra else
442e9991434SAtish Patra return false;
443e9991434SAtish Patra }
444e9991434SAtish Patra
pmu_sbi_event_map(struct perf_event * event,u64 * econfig)445e9991434SAtish Patra static int pmu_sbi_event_map(struct perf_event *event, u64 *econfig)
446e9991434SAtish Patra {
447e9991434SAtish Patra u32 type = event->attr.type;
448e9991434SAtish Patra u64 config = event->attr.config;
449e9991434SAtish Patra int bSoftware;
450e9991434SAtish Patra u64 raw_config_val;
451e9991434SAtish Patra int ret;
452e9991434SAtish Patra
453e9991434SAtish Patra switch (type) {
454e9991434SAtish Patra case PERF_TYPE_HARDWARE:
455e9991434SAtish Patra if (config >= PERF_COUNT_HW_MAX)
456e9991434SAtish Patra return -EINVAL;
457e9991434SAtish Patra ret = pmu_hw_event_map[event->attr.config].event_idx;
458e9991434SAtish Patra break;
459e9991434SAtish Patra case PERF_TYPE_HW_CACHE:
460e9991434SAtish Patra ret = pmu_event_find_cache(config);
461e9991434SAtish Patra break;
462e9991434SAtish Patra case PERF_TYPE_RAW:
463e9991434SAtish Patra /*
464e9991434SAtish Patra * As per SBI specification, the upper 16 bits must be unused for
465e9991434SAtish Patra * a raw event. Use the MSB (63b) to distinguish between hardware
466e9991434SAtish Patra * raw event and firmware events.
467e9991434SAtish Patra */
468e9991434SAtish Patra bSoftware = config >> 63;
469e9991434SAtish Patra raw_config_val = config & RISCV_PMU_RAW_EVENT_MASK;
470e9991434SAtish Patra if (bSoftware) {
471e9991434SAtish Patra ret = (raw_config_val & 0xFFFF) |
472e9991434SAtish Patra (SBI_PMU_EVENT_TYPE_FW << 16);
473e9991434SAtish Patra } else {
474e9991434SAtish Patra ret = RISCV_PMU_RAW_EVENT_IDX;
475e9991434SAtish Patra *econfig = raw_config_val;
476e9991434SAtish Patra }
477e9991434SAtish Patra break;
478e9991434SAtish Patra default:
479e9991434SAtish Patra ret = -EINVAL;
480e9991434SAtish Patra break;
481e9991434SAtish Patra }
482e9991434SAtish Patra
483e9991434SAtish Patra return ret;
484e9991434SAtish Patra }
485e9991434SAtish Patra
pmu_sbi_ctr_read(struct perf_event * event)486e9991434SAtish Patra static u64 pmu_sbi_ctr_read(struct perf_event *event)
487e9991434SAtish Patra {
488e9991434SAtish Patra struct hw_perf_event *hwc = &event->hw;
489e9991434SAtish Patra int idx = hwc->idx;
490e9991434SAtish Patra struct sbiret ret;
491e9991434SAtish Patra union sbi_pmu_ctr_info info;
492e9991434SAtish Patra u64 val = 0;
493e9991434SAtish Patra
494e9991434SAtish Patra if (pmu_sbi_is_fw_event(event)) {
495e9991434SAtish Patra ret = sbi_ecall(SBI_EXT_PMU, SBI_EXT_PMU_COUNTER_FW_READ,
496e9991434SAtish Patra hwc->idx, 0, 0, 0, 0, 0);
497e9991434SAtish Patra if (!ret.error)
498e9991434SAtish Patra val = ret.value;
499e9991434SAtish Patra } else {
500e9991434SAtish Patra info = pmu_ctr_list[idx];
501e9991434SAtish Patra val = riscv_pmu_ctr_read_csr(info.csr);
502e9991434SAtish Patra if (IS_ENABLED(CONFIG_32BIT))
503e9991434SAtish Patra val = ((u64)riscv_pmu_ctr_read_csr(info.csr + 0x80)) << 31 | val;
504e9991434SAtish Patra }
505e9991434SAtish Patra
506e9991434SAtish Patra return val;
507e9991434SAtish Patra }
508e9991434SAtish Patra
pmu_sbi_set_scounteren(void * arg)509cc4c07c8SAlexandre Ghiti static void pmu_sbi_set_scounteren(void *arg)
510cc4c07c8SAlexandre Ghiti {
511cc4c07c8SAlexandre Ghiti struct perf_event *event = (struct perf_event *)arg;
512cc4c07c8SAlexandre Ghiti
5133fec3233SAlexandre Ghiti if (event->hw.idx != -1)
514cc4c07c8SAlexandre Ghiti csr_write(CSR_SCOUNTEREN,
5153ede8e94SFei Wu csr_read(CSR_SCOUNTEREN) | BIT(pmu_sbi_csr_index(event)));
516cc4c07c8SAlexandre Ghiti }
517cc4c07c8SAlexandre Ghiti
pmu_sbi_reset_scounteren(void * arg)518cc4c07c8SAlexandre Ghiti static void pmu_sbi_reset_scounteren(void *arg)
519cc4c07c8SAlexandre Ghiti {
520cc4c07c8SAlexandre Ghiti struct perf_event *event = (struct perf_event *)arg;
521cc4c07c8SAlexandre Ghiti
5223fec3233SAlexandre Ghiti if (event->hw.idx != -1)
523cc4c07c8SAlexandre Ghiti csr_write(CSR_SCOUNTEREN,
5243ede8e94SFei Wu csr_read(CSR_SCOUNTEREN) & ~BIT(pmu_sbi_csr_index(event)));
525cc4c07c8SAlexandre Ghiti }
526cc4c07c8SAlexandre Ghiti
pmu_sbi_ctr_start(struct perf_event * event,u64 ival)527e9991434SAtish Patra static void pmu_sbi_ctr_start(struct perf_event *event, u64 ival)
528e9991434SAtish Patra {
529e9991434SAtish Patra struct sbiret ret;
530e9991434SAtish Patra struct hw_perf_event *hwc = &event->hw;
531e9991434SAtish Patra unsigned long flag = SBI_PMU_START_FLAG_SET_INIT_VALUE;
532e9991434SAtish Patra
5330209b583SAtish Patra #if defined(CONFIG_32BIT)
534e9991434SAtish Patra ret = sbi_ecall(SBI_EXT_PMU, SBI_EXT_PMU_COUNTER_START, hwc->idx,
535e9991434SAtish Patra 1, flag, ival, ival >> 32, 0);
5360209b583SAtish Patra #else
5370209b583SAtish Patra ret = sbi_ecall(SBI_EXT_PMU, SBI_EXT_PMU_COUNTER_START, hwc->idx,
5380209b583SAtish Patra 1, flag, ival, 0, 0);
5390209b583SAtish Patra #endif
540e9991434SAtish Patra if (ret.error && (ret.error != SBI_ERR_ALREADY_STARTED))
541e9991434SAtish Patra pr_err("Starting counter idx %d failed with error %d\n",
542e9991434SAtish Patra hwc->idx, sbi_err_map_linux_errno(ret.error));
543cc4c07c8SAlexandre Ghiti
544cc4c07c8SAlexandre Ghiti if ((hwc->flags & PERF_EVENT_FLAG_USER_ACCESS) &&
545cc4c07c8SAlexandre Ghiti (hwc->flags & PERF_EVENT_FLAG_USER_READ_CNT))
54685be1a73SAlexandre Ghiti pmu_sbi_set_scounteren((void *)event);
547e9991434SAtish Patra }
548e9991434SAtish Patra
pmu_sbi_ctr_stop(struct perf_event * event,unsigned long flag)549e9991434SAtish Patra static void pmu_sbi_ctr_stop(struct perf_event *event, unsigned long flag)
550e9991434SAtish Patra {
551e9991434SAtish Patra struct sbiret ret;
552e9991434SAtish Patra struct hw_perf_event *hwc = &event->hw;
553e9991434SAtish Patra
554cc4c07c8SAlexandre Ghiti if ((hwc->flags & PERF_EVENT_FLAG_USER_ACCESS) &&
555cc4c07c8SAlexandre Ghiti (hwc->flags & PERF_EVENT_FLAG_USER_READ_CNT))
55685be1a73SAlexandre Ghiti pmu_sbi_reset_scounteren((void *)event);
557cc4c07c8SAlexandre Ghiti
558e9991434SAtish Patra ret = sbi_ecall(SBI_EXT_PMU, SBI_EXT_PMU_COUNTER_STOP, hwc->idx, 1, flag, 0, 0, 0);
559e9991434SAtish Patra if (ret.error && (ret.error != SBI_ERR_ALREADY_STOPPED) &&
560e9991434SAtish Patra flag != SBI_PMU_STOP_FLAG_RESET)
561e9991434SAtish Patra pr_err("Stopping counter idx %d failed with error %d\n",
562e9991434SAtish Patra hwc->idx, sbi_err_map_linux_errno(ret.error));
563e9991434SAtish Patra }
564e9991434SAtish Patra
pmu_sbi_find_num_ctrs(void)565e9991434SAtish Patra static int pmu_sbi_find_num_ctrs(void)
566e9991434SAtish Patra {
567e9991434SAtish Patra struct sbiret ret;
568e9991434SAtish Patra
569e9991434SAtish Patra ret = sbi_ecall(SBI_EXT_PMU, SBI_EXT_PMU_NUM_COUNTERS, 0, 0, 0, 0, 0, 0);
570e9991434SAtish Patra if (!ret.error)
571e9991434SAtish Patra return ret.value;
572e9991434SAtish Patra else
573e9991434SAtish Patra return sbi_err_map_linux_errno(ret.error);
574e9991434SAtish Patra }
575e9991434SAtish Patra
pmu_sbi_get_ctrinfo(int nctr,unsigned long * mask)5761537bf26SSergey Matyukevich static int pmu_sbi_get_ctrinfo(int nctr, unsigned long *mask)
577e9991434SAtish Patra {
578e9991434SAtish Patra struct sbiret ret;
579e9991434SAtish Patra int i, num_hw_ctr = 0, num_fw_ctr = 0;
580e9991434SAtish Patra union sbi_pmu_ctr_info cinfo;
581e9991434SAtish Patra
582e9991434SAtish Patra pmu_ctr_list = kcalloc(nctr, sizeof(*pmu_ctr_list), GFP_KERNEL);
583e9991434SAtish Patra if (!pmu_ctr_list)
584e9991434SAtish Patra return -ENOMEM;
585e9991434SAtish Patra
58620e0fbabSSergey Matyukevich for (i = 0; i < nctr; i++) {
587e9991434SAtish Patra ret = sbi_ecall(SBI_EXT_PMU, SBI_EXT_PMU_COUNTER_GET_INFO, i, 0, 0, 0, 0, 0);
588e9991434SAtish Patra if (ret.error)
589e9991434SAtish Patra /* The logical counter ids are not expected to be contiguous */
590e9991434SAtish Patra continue;
5911537bf26SSergey Matyukevich
5921537bf26SSergey Matyukevich *mask |= BIT(i);
5931537bf26SSergey Matyukevich
594e9991434SAtish Patra cinfo.value = ret.value;
595e9991434SAtish Patra if (cinfo.type == SBI_PMU_CTR_TYPE_FW)
596e9991434SAtish Patra num_fw_ctr++;
597e9991434SAtish Patra else
598e9991434SAtish Patra num_hw_ctr++;
599e9991434SAtish Patra pmu_ctr_list[i].value = cinfo.value;
600e9991434SAtish Patra }
601e9991434SAtish Patra
602e9991434SAtish Patra pr_info("%d firmware and %d hardware counters\n", num_fw_ctr, num_hw_ctr);
603e9991434SAtish Patra
604e9991434SAtish Patra return 0;
605e9991434SAtish Patra }
606e9991434SAtish Patra
pmu_sbi_stop_all(struct riscv_pmu * pmu)6074905ec2fSAtish Patra static inline void pmu_sbi_stop_all(struct riscv_pmu *pmu)
6084905ec2fSAtish Patra {
609c7a9dceaSPalmer Dabbelt /*
6104905ec2fSAtish Patra * No need to check the error because we are disabling all the counters
6114905ec2fSAtish Patra * which may include counters that are not enabled yet.
6124905ec2fSAtish Patra */
6134905ec2fSAtish Patra sbi_ecall(SBI_EXT_PMU, SBI_EXT_PMU_COUNTER_STOP,
614b0fe6e72SSamuel Holland 0, pmu->cmask, SBI_PMU_STOP_FLAG_RESET, 0, 0, 0);
6154905ec2fSAtish Patra }
6164905ec2fSAtish Patra
pmu_sbi_stop_hw_ctrs(struct riscv_pmu * pmu)6174905ec2fSAtish Patra static inline void pmu_sbi_stop_hw_ctrs(struct riscv_pmu *pmu)
6184905ec2fSAtish Patra {
6194905ec2fSAtish Patra struct cpu_hw_events *cpu_hw_evt = this_cpu_ptr(pmu->hw_events);
6204905ec2fSAtish Patra
6214905ec2fSAtish Patra /* No need to check the error here as we can't do anything about the error */
6224905ec2fSAtish Patra sbi_ecall(SBI_EXT_PMU, SBI_EXT_PMU_COUNTER_STOP, 0,
6234905ec2fSAtish Patra cpu_hw_evt->used_hw_ctrs[0], 0, 0, 0, 0);
6244905ec2fSAtish Patra }
6254905ec2fSAtish Patra
626c7a9dceaSPalmer Dabbelt /*
6274905ec2fSAtish Patra * This function starts all the used counters in two step approach.
6284905ec2fSAtish Patra * Any counter that did not overflow can be start in a single step
6294905ec2fSAtish Patra * while the overflowed counters need to be started with updated initialization
6304905ec2fSAtish Patra * value.
6314905ec2fSAtish Patra */
pmu_sbi_start_overflow_mask(struct riscv_pmu * pmu,unsigned long ctr_ovf_mask)6324905ec2fSAtish Patra static inline void pmu_sbi_start_overflow_mask(struct riscv_pmu *pmu,
6334905ec2fSAtish Patra unsigned long ctr_ovf_mask)
6344905ec2fSAtish Patra {
6354905ec2fSAtish Patra int idx = 0;
6364905ec2fSAtish Patra struct cpu_hw_events *cpu_hw_evt = this_cpu_ptr(pmu->hw_events);
6374905ec2fSAtish Patra struct perf_event *event;
6384905ec2fSAtish Patra unsigned long flag = SBI_PMU_START_FLAG_SET_INIT_VALUE;
6394905ec2fSAtish Patra unsigned long ctr_start_mask = 0;
6404905ec2fSAtish Patra uint64_t max_period;
6414905ec2fSAtish Patra struct hw_perf_event *hwc;
6424905ec2fSAtish Patra u64 init_val = 0;
6434905ec2fSAtish Patra
6444905ec2fSAtish Patra ctr_start_mask = cpu_hw_evt->used_hw_ctrs[0] & ~ctr_ovf_mask;
6454905ec2fSAtish Patra
6464905ec2fSAtish Patra /* Start all the counters that did not overflow in a single shot */
6474905ec2fSAtish Patra sbi_ecall(SBI_EXT_PMU, SBI_EXT_PMU_COUNTER_START, 0, ctr_start_mask,
6484905ec2fSAtish Patra 0, 0, 0, 0);
6494905ec2fSAtish Patra
6504905ec2fSAtish Patra /* Reinitialize and start all the counter that overflowed */
6514905ec2fSAtish Patra while (ctr_ovf_mask) {
6524905ec2fSAtish Patra if (ctr_ovf_mask & 0x01) {
6534905ec2fSAtish Patra event = cpu_hw_evt->events[idx];
6544905ec2fSAtish Patra hwc = &event->hw;
6554905ec2fSAtish Patra max_period = riscv_pmu_ctr_get_width_mask(event);
6564905ec2fSAtish Patra init_val = local64_read(&hwc->prev_count) & max_period;
657acc1b919SAtish Patra #if defined(CONFIG_32BIT)
658acc1b919SAtish Patra sbi_ecall(SBI_EXT_PMU, SBI_EXT_PMU_COUNTER_START, idx, 1,
659acc1b919SAtish Patra flag, init_val, init_val >> 32, 0);
660acc1b919SAtish Patra #else
6614905ec2fSAtish Patra sbi_ecall(SBI_EXT_PMU, SBI_EXT_PMU_COUNTER_START, idx, 1,
6624905ec2fSAtish Patra flag, init_val, 0, 0);
663acc1b919SAtish Patra #endif
664133a6d1fSAtish Patra perf_event_update_userpage(event);
6654905ec2fSAtish Patra }
6664905ec2fSAtish Patra ctr_ovf_mask = ctr_ovf_mask >> 1;
6674905ec2fSAtish Patra idx++;
6684905ec2fSAtish Patra }
6694905ec2fSAtish Patra }
6704905ec2fSAtish Patra
pmu_sbi_ovf_handler(int irq,void * dev)6714905ec2fSAtish Patra static irqreturn_t pmu_sbi_ovf_handler(int irq, void *dev)
6724905ec2fSAtish Patra {
6734905ec2fSAtish Patra struct perf_sample_data data;
6744905ec2fSAtish Patra struct pt_regs *regs;
6754905ec2fSAtish Patra struct hw_perf_event *hw_evt;
6764905ec2fSAtish Patra union sbi_pmu_ctr_info *info;
6774905ec2fSAtish Patra int lidx, hidx, fidx;
6784905ec2fSAtish Patra struct riscv_pmu *pmu;
6794905ec2fSAtish Patra struct perf_event *event;
6804905ec2fSAtish Patra unsigned long overflow;
6814905ec2fSAtish Patra unsigned long overflowed_ctrs = 0;
6824905ec2fSAtish Patra struct cpu_hw_events *cpu_hw_evt = dev;
683096b52fdSSergey Matyukevich u64 start_clock = sched_clock();
6844905ec2fSAtish Patra
6854905ec2fSAtish Patra if (WARN_ON_ONCE(!cpu_hw_evt))
6864905ec2fSAtish Patra return IRQ_NONE;
6874905ec2fSAtish Patra
6884905ec2fSAtish Patra /* Firmware counter don't support overflow yet */
6894905ec2fSAtish Patra fidx = find_first_bit(cpu_hw_evt->used_hw_ctrs, RISCV_MAX_COUNTERS);
69045a0de41SAlexandre Ghiti if (fidx == RISCV_MAX_COUNTERS) {
69145a0de41SAlexandre Ghiti csr_clear(CSR_SIP, BIT(riscv_pmu_irq_num));
69245a0de41SAlexandre Ghiti return IRQ_NONE;
69345a0de41SAlexandre Ghiti }
69445a0de41SAlexandre Ghiti
6954905ec2fSAtish Patra event = cpu_hw_evt->events[fidx];
6964905ec2fSAtish Patra if (!event) {
69765e9fb08SHeiko Stuebner csr_clear(CSR_SIP, BIT(riscv_pmu_irq_num));
6984905ec2fSAtish Patra return IRQ_NONE;
6994905ec2fSAtish Patra }
7004905ec2fSAtish Patra
7014905ec2fSAtish Patra pmu = to_riscv_pmu(event->pmu);
7024905ec2fSAtish Patra pmu_sbi_stop_hw_ctrs(pmu);
7034905ec2fSAtish Patra
7044905ec2fSAtish Patra /* Overflow status register should only be read after counter are stopped */
70565e9fb08SHeiko Stuebner ALT_SBI_PMU_OVERFLOW(overflow);
7064905ec2fSAtish Patra
707c7a9dceaSPalmer Dabbelt /*
7084905ec2fSAtish Patra * Overflow interrupt pending bit should only be cleared after stopping
7094905ec2fSAtish Patra * all the counters to avoid any race condition.
7104905ec2fSAtish Patra */
71165e9fb08SHeiko Stuebner csr_clear(CSR_SIP, BIT(riscv_pmu_irq_num));
7124905ec2fSAtish Patra
7134905ec2fSAtish Patra /* No overflow bit is set */
7144905ec2fSAtish Patra if (!overflow)
7154905ec2fSAtish Patra return IRQ_NONE;
7164905ec2fSAtish Patra
7174905ec2fSAtish Patra regs = get_irq_regs();
7184905ec2fSAtish Patra
7194905ec2fSAtish Patra for_each_set_bit(lidx, cpu_hw_evt->used_hw_ctrs, RISCV_MAX_COUNTERS) {
7204905ec2fSAtish Patra struct perf_event *event = cpu_hw_evt->events[lidx];
7214905ec2fSAtish Patra
7224905ec2fSAtish Patra /* Skip if invalid event or user did not request a sampling */
7234905ec2fSAtish Patra if (!event || !is_sampling_event(event))
7244905ec2fSAtish Patra continue;
7254905ec2fSAtish Patra
7264905ec2fSAtish Patra info = &pmu_ctr_list[lidx];
7274905ec2fSAtish Patra /* Do a sanity check */
7284905ec2fSAtish Patra if (!info || info->type != SBI_PMU_CTR_TYPE_HW)
7294905ec2fSAtish Patra continue;
7304905ec2fSAtish Patra
7314905ec2fSAtish Patra /* compute hardware counter index */
7324905ec2fSAtish Patra hidx = info->csr - CSR_CYCLE;
7334905ec2fSAtish Patra /* check if the corresponding bit is set in sscountovf */
7343ede8e94SFei Wu if (!(overflow & BIT(hidx)))
7354905ec2fSAtish Patra continue;
7364905ec2fSAtish Patra
7374905ec2fSAtish Patra /*
7384905ec2fSAtish Patra * Keep a track of overflowed counters so that they can be started
7394905ec2fSAtish Patra * with updated initial value.
7404905ec2fSAtish Patra */
7413ede8e94SFei Wu overflowed_ctrs |= BIT(lidx);
7424905ec2fSAtish Patra hw_evt = &event->hw;
7434905ec2fSAtish Patra riscv_pmu_event_update(event);
7444905ec2fSAtish Patra perf_sample_data_init(&data, 0, hw_evt->last_period);
7454905ec2fSAtish Patra if (riscv_pmu_event_set_period(event)) {
7464905ec2fSAtish Patra /*
7474905ec2fSAtish Patra * Unlike other ISAs, RISC-V don't have to disable interrupts
7484905ec2fSAtish Patra * to avoid throttling here. As per the specification, the
7494905ec2fSAtish Patra * interrupt remains disabled until the OF bit is set.
7504905ec2fSAtish Patra * Interrupts are enabled again only during the start.
7514905ec2fSAtish Patra * TODO: We will need to stop the guest counters once
7524905ec2fSAtish Patra * virtualization support is added.
7534905ec2fSAtish Patra */
7544905ec2fSAtish Patra perf_event_overflow(event, &data, regs);
7554905ec2fSAtish Patra }
7564905ec2fSAtish Patra }
757096b52fdSSergey Matyukevich
7584905ec2fSAtish Patra pmu_sbi_start_overflow_mask(pmu, overflowed_ctrs);
759096b52fdSSergey Matyukevich perf_sample_event_took(sched_clock() - start_clock);
7604905ec2fSAtish Patra
7614905ec2fSAtish Patra return IRQ_HANDLED;
7624905ec2fSAtish Patra }
7634905ec2fSAtish Patra
pmu_sbi_starting_cpu(unsigned int cpu,struct hlist_node * node)764e9991434SAtish Patra static int pmu_sbi_starting_cpu(unsigned int cpu, struct hlist_node *node)
765e9991434SAtish Patra {
766e9991434SAtish Patra struct riscv_pmu *pmu = hlist_entry_safe(node, struct riscv_pmu, node);
7674905ec2fSAtish Patra struct cpu_hw_events *cpu_hw_evt = this_cpu_ptr(pmu->hw_events);
768e9991434SAtish Patra
7695a5294fbSPalmer Dabbelt /*
770cc4c07c8SAlexandre Ghiti * We keep enabling userspace access to CYCLE, TIME and INSTRET via the
771cc4c07c8SAlexandre Ghiti * legacy option but that will be removed in the future.
7725a5294fbSPalmer Dabbelt */
773cc4c07c8SAlexandre Ghiti if (sysctl_perf_user_access == SYSCTL_LEGACY)
7745a5294fbSPalmer Dabbelt csr_write(CSR_SCOUNTEREN, 0x7);
775cc4c07c8SAlexandre Ghiti else
776cc4c07c8SAlexandre Ghiti csr_write(CSR_SCOUNTEREN, 0x2);
777e9991434SAtish Patra
778e9991434SAtish Patra /* Stop all the counters so that they can be enabled from perf */
7794905ec2fSAtish Patra pmu_sbi_stop_all(pmu);
7804905ec2fSAtish Patra
78165e9fb08SHeiko Stuebner if (riscv_pmu_use_irq) {
7824905ec2fSAtish Patra cpu_hw_evt->irq = riscv_pmu_irq;
78365e9fb08SHeiko Stuebner csr_clear(CSR_IP, BIT(riscv_pmu_irq_num));
78465e9fb08SHeiko Stuebner csr_set(CSR_IE, BIT(riscv_pmu_irq_num));
7854905ec2fSAtish Patra enable_percpu_irq(riscv_pmu_irq, IRQ_TYPE_NONE);
7864905ec2fSAtish Patra }
787e9991434SAtish Patra
788e9991434SAtish Patra return 0;
789e9991434SAtish Patra }
790e9991434SAtish Patra
pmu_sbi_dying_cpu(unsigned int cpu,struct hlist_node * node)791e9991434SAtish Patra static int pmu_sbi_dying_cpu(unsigned int cpu, struct hlist_node *node)
792e9991434SAtish Patra {
79365e9fb08SHeiko Stuebner if (riscv_pmu_use_irq) {
7944905ec2fSAtish Patra disable_percpu_irq(riscv_pmu_irq);
79565e9fb08SHeiko Stuebner csr_clear(CSR_IE, BIT(riscv_pmu_irq_num));
7964905ec2fSAtish Patra }
7974905ec2fSAtish Patra
798e9991434SAtish Patra /* Disable all counters access for user mode now */
799e9991434SAtish Patra csr_write(CSR_SCOUNTEREN, 0x0);
800e9991434SAtish Patra
801e9991434SAtish Patra return 0;
802e9991434SAtish Patra }
803e9991434SAtish Patra
pmu_sbi_setup_irqs(struct riscv_pmu * pmu,struct platform_device * pdev)8044905ec2fSAtish Patra static int pmu_sbi_setup_irqs(struct riscv_pmu *pmu, struct platform_device *pdev)
8054905ec2fSAtish Patra {
8064905ec2fSAtish Patra int ret;
8074905ec2fSAtish Patra struct cpu_hw_events __percpu *hw_events = pmu->hw_events;
8084905ec2fSAtish Patra struct irq_domain *domain = NULL;
8094905ec2fSAtish Patra
81065e9fb08SHeiko Stuebner if (riscv_isa_extension_available(NULL, SSCOFPMF)) {
81165e9fb08SHeiko Stuebner riscv_pmu_irq_num = RV_IRQ_PMU;
81265e9fb08SHeiko Stuebner riscv_pmu_use_irq = true;
81365e9fb08SHeiko Stuebner } else if (IS_ENABLED(CONFIG_ERRATA_THEAD_PMU) &&
81465e9fb08SHeiko Stuebner riscv_cached_mvendorid(0) == THEAD_VENDOR_ID &&
81565e9fb08SHeiko Stuebner riscv_cached_marchid(0) == 0 &&
81665e9fb08SHeiko Stuebner riscv_cached_mimpid(0) == 0) {
81765e9fb08SHeiko Stuebner riscv_pmu_irq_num = THEAD_C9XX_RV_IRQ_PMU;
81865e9fb08SHeiko Stuebner riscv_pmu_use_irq = true;
81965e9fb08SHeiko Stuebner }
82065e9fb08SHeiko Stuebner
82165e9fb08SHeiko Stuebner if (!riscv_pmu_use_irq)
8224905ec2fSAtish Patra return -EOPNOTSUPP;
8234905ec2fSAtish Patra
824ca7473cbSSunil V L domain = irq_find_matching_fwnode(riscv_get_intc_hwnode(),
825ca7473cbSSunil V L DOMAIN_BUS_ANY);
8264905ec2fSAtish Patra if (!domain) {
8274905ec2fSAtish Patra pr_err("Failed to find INTC IRQ root domain\n");
8284905ec2fSAtish Patra return -ENODEV;
8294905ec2fSAtish Patra }
8304905ec2fSAtish Patra
83165e9fb08SHeiko Stuebner riscv_pmu_irq = irq_create_mapping(domain, riscv_pmu_irq_num);
8324905ec2fSAtish Patra if (!riscv_pmu_irq) {
8334905ec2fSAtish Patra pr_err("Failed to map PMU interrupt for node\n");
8344905ec2fSAtish Patra return -ENODEV;
8354905ec2fSAtish Patra }
8364905ec2fSAtish Patra
8374905ec2fSAtish Patra ret = request_percpu_irq(riscv_pmu_irq, pmu_sbi_ovf_handler, "riscv-pmu", hw_events);
8384905ec2fSAtish Patra if (ret) {
8394905ec2fSAtish Patra pr_err("registering percpu irq failed [%d]\n", ret);
8404905ec2fSAtish Patra return ret;
8414905ec2fSAtish Patra }
8424905ec2fSAtish Patra
8434905ec2fSAtish Patra return 0;
8444905ec2fSAtish Patra }
8454905ec2fSAtish Patra
846e9a023f2SEric Lin #ifdef CONFIG_CPU_PM
riscv_pm_pmu_notify(struct notifier_block * b,unsigned long cmd,void * v)847e9a023f2SEric Lin static int riscv_pm_pmu_notify(struct notifier_block *b, unsigned long cmd,
848e9a023f2SEric Lin void *v)
849e9a023f2SEric Lin {
850e9a023f2SEric Lin struct riscv_pmu *rvpmu = container_of(b, struct riscv_pmu, riscv_pm_nb);
851e9a023f2SEric Lin struct cpu_hw_events *cpuc = this_cpu_ptr(rvpmu->hw_events);
852e9a023f2SEric Lin int enabled = bitmap_weight(cpuc->used_hw_ctrs, RISCV_MAX_COUNTERS);
853e9a023f2SEric Lin struct perf_event *event;
854e9a023f2SEric Lin int idx;
855e9a023f2SEric Lin
856e9a023f2SEric Lin if (!enabled)
857e9a023f2SEric Lin return NOTIFY_OK;
858e9a023f2SEric Lin
859e9a023f2SEric Lin for (idx = 0; idx < RISCV_MAX_COUNTERS; idx++) {
860e9a023f2SEric Lin event = cpuc->events[idx];
861e9a023f2SEric Lin if (!event)
862e9a023f2SEric Lin continue;
863e9a023f2SEric Lin
864e9a023f2SEric Lin switch (cmd) {
865e9a023f2SEric Lin case CPU_PM_ENTER:
866e9a023f2SEric Lin /*
867e9a023f2SEric Lin * Stop and update the counter
868e9a023f2SEric Lin */
869e9a023f2SEric Lin riscv_pmu_stop(event, PERF_EF_UPDATE);
870e9a023f2SEric Lin break;
871e9a023f2SEric Lin case CPU_PM_EXIT:
872e9a023f2SEric Lin case CPU_PM_ENTER_FAILED:
873e9a023f2SEric Lin /*
874e9a023f2SEric Lin * Restore and enable the counter.
875e9a023f2SEric Lin */
8761c38b061SPeter Zijlstra riscv_pmu_start(event, PERF_EF_RELOAD);
877e9a023f2SEric Lin break;
878e9a023f2SEric Lin default:
879e9a023f2SEric Lin break;
880e9a023f2SEric Lin }
881e9a023f2SEric Lin }
882e9a023f2SEric Lin
883e9a023f2SEric Lin return NOTIFY_OK;
884e9a023f2SEric Lin }
885e9a023f2SEric Lin
riscv_pm_pmu_register(struct riscv_pmu * pmu)886e9a023f2SEric Lin static int riscv_pm_pmu_register(struct riscv_pmu *pmu)
887e9a023f2SEric Lin {
888e9a023f2SEric Lin pmu->riscv_pm_nb.notifier_call = riscv_pm_pmu_notify;
889e9a023f2SEric Lin return cpu_pm_register_notifier(&pmu->riscv_pm_nb);
890e9a023f2SEric Lin }
891e9a023f2SEric Lin
riscv_pm_pmu_unregister(struct riscv_pmu * pmu)892e9a023f2SEric Lin static void riscv_pm_pmu_unregister(struct riscv_pmu *pmu)
893e9a023f2SEric Lin {
894e9a023f2SEric Lin cpu_pm_unregister_notifier(&pmu->riscv_pm_nb);
895e9a023f2SEric Lin }
896e9a023f2SEric Lin #else
riscv_pm_pmu_register(struct riscv_pmu * pmu)897e9a023f2SEric Lin static inline int riscv_pm_pmu_register(struct riscv_pmu *pmu) { return 0; }
riscv_pm_pmu_unregister(struct riscv_pmu * pmu)898e9a023f2SEric Lin static inline void riscv_pm_pmu_unregister(struct riscv_pmu *pmu) { }
899e9a023f2SEric Lin #endif
900e9a023f2SEric Lin
riscv_pmu_destroy(struct riscv_pmu * pmu)901e9a023f2SEric Lin static void riscv_pmu_destroy(struct riscv_pmu *pmu)
902e9a023f2SEric Lin {
903e9a023f2SEric Lin riscv_pm_pmu_unregister(pmu);
904e9a023f2SEric Lin cpuhp_state_remove_instance(CPUHP_AP_PERF_RISCV_STARTING, &pmu->node);
905e9a023f2SEric Lin }
906e9a023f2SEric Lin
pmu_sbi_event_init(struct perf_event * event)907cc4c07c8SAlexandre Ghiti static void pmu_sbi_event_init(struct perf_event *event)
908cc4c07c8SAlexandre Ghiti {
909cc4c07c8SAlexandre Ghiti /*
910cc4c07c8SAlexandre Ghiti * The permissions are set at event_init so that we do not depend
911cc4c07c8SAlexandre Ghiti * on the sysctl value that can change.
912cc4c07c8SAlexandre Ghiti */
913cc4c07c8SAlexandre Ghiti if (sysctl_perf_user_access == SYSCTL_NO_USER_ACCESS)
914cc4c07c8SAlexandre Ghiti event->hw.flags |= PERF_EVENT_FLAG_NO_USER_ACCESS;
915cc4c07c8SAlexandre Ghiti else if (sysctl_perf_user_access == SYSCTL_USER_ACCESS)
916cc4c07c8SAlexandre Ghiti event->hw.flags |= PERF_EVENT_FLAG_USER_ACCESS;
917cc4c07c8SAlexandre Ghiti else
918cc4c07c8SAlexandre Ghiti event->hw.flags |= PERF_EVENT_FLAG_LEGACY;
919cc4c07c8SAlexandre Ghiti }
920cc4c07c8SAlexandre Ghiti
pmu_sbi_event_mapped(struct perf_event * event,struct mm_struct * mm)921cc4c07c8SAlexandre Ghiti static void pmu_sbi_event_mapped(struct perf_event *event, struct mm_struct *mm)
922cc4c07c8SAlexandre Ghiti {
923cc4c07c8SAlexandre Ghiti if (event->hw.flags & PERF_EVENT_FLAG_NO_USER_ACCESS)
924cc4c07c8SAlexandre Ghiti return;
925cc4c07c8SAlexandre Ghiti
926cc4c07c8SAlexandre Ghiti if (event->hw.flags & PERF_EVENT_FLAG_LEGACY) {
927cc4c07c8SAlexandre Ghiti if (event->attr.config != PERF_COUNT_HW_CPU_CYCLES &&
928cc4c07c8SAlexandre Ghiti event->attr.config != PERF_COUNT_HW_INSTRUCTIONS) {
929cc4c07c8SAlexandre Ghiti return;
930cc4c07c8SAlexandre Ghiti }
931cc4c07c8SAlexandre Ghiti }
932cc4c07c8SAlexandre Ghiti
933cc4c07c8SAlexandre Ghiti /*
934cc4c07c8SAlexandre Ghiti * The user mmapped the event to directly access it: this is where
935cc4c07c8SAlexandre Ghiti * we determine based on sysctl_perf_user_access if we grant userspace
936cc4c07c8SAlexandre Ghiti * the direct access to this event. That means that within the same
937cc4c07c8SAlexandre Ghiti * task, some events may be directly accessible and some other may not,
938cc4c07c8SAlexandre Ghiti * if the user changes the value of sysctl_perf_user_accesss in the
939cc4c07c8SAlexandre Ghiti * meantime.
940cc4c07c8SAlexandre Ghiti */
941cc4c07c8SAlexandre Ghiti
942cc4c07c8SAlexandre Ghiti event->hw.flags |= PERF_EVENT_FLAG_USER_READ_CNT;
943cc4c07c8SAlexandre Ghiti
944cc4c07c8SAlexandre Ghiti /*
945cc4c07c8SAlexandre Ghiti * We must enable userspace access *before* advertising in the user page
946cc4c07c8SAlexandre Ghiti * that it is possible to do so to avoid any race.
947cc4c07c8SAlexandre Ghiti * And we must notify all cpus here because threads that currently run
948cc4c07c8SAlexandre Ghiti * on other cpus will try to directly access the counter too without
949cc4c07c8SAlexandre Ghiti * calling pmu_sbi_ctr_start.
950cc4c07c8SAlexandre Ghiti */
951cc4c07c8SAlexandre Ghiti if (event->hw.flags & PERF_EVENT_FLAG_USER_ACCESS)
952cc4c07c8SAlexandre Ghiti on_each_cpu_mask(mm_cpumask(mm),
953cc4c07c8SAlexandre Ghiti pmu_sbi_set_scounteren, (void *)event, 1);
954cc4c07c8SAlexandre Ghiti }
955cc4c07c8SAlexandre Ghiti
pmu_sbi_event_unmapped(struct perf_event * event,struct mm_struct * mm)956cc4c07c8SAlexandre Ghiti static void pmu_sbi_event_unmapped(struct perf_event *event, struct mm_struct *mm)
957cc4c07c8SAlexandre Ghiti {
958cc4c07c8SAlexandre Ghiti if (event->hw.flags & PERF_EVENT_FLAG_NO_USER_ACCESS)
959cc4c07c8SAlexandre Ghiti return;
960cc4c07c8SAlexandre Ghiti
961cc4c07c8SAlexandre Ghiti if (event->hw.flags & PERF_EVENT_FLAG_LEGACY) {
962cc4c07c8SAlexandre Ghiti if (event->attr.config != PERF_COUNT_HW_CPU_CYCLES &&
963cc4c07c8SAlexandre Ghiti event->attr.config != PERF_COUNT_HW_INSTRUCTIONS) {
964cc4c07c8SAlexandre Ghiti return;
965cc4c07c8SAlexandre Ghiti }
966cc4c07c8SAlexandre Ghiti }
967cc4c07c8SAlexandre Ghiti
968cc4c07c8SAlexandre Ghiti /*
969cc4c07c8SAlexandre Ghiti * Here we can directly remove user access since the user does not have
970cc4c07c8SAlexandre Ghiti * access to the user page anymore so we avoid the racy window where the
971cc4c07c8SAlexandre Ghiti * user could have read cap_user_rdpmc to true right before we disable
972cc4c07c8SAlexandre Ghiti * it.
973cc4c07c8SAlexandre Ghiti */
974cc4c07c8SAlexandre Ghiti event->hw.flags &= ~PERF_EVENT_FLAG_USER_READ_CNT;
975cc4c07c8SAlexandre Ghiti
976cc4c07c8SAlexandre Ghiti if (event->hw.flags & PERF_EVENT_FLAG_USER_ACCESS)
977cc4c07c8SAlexandre Ghiti on_each_cpu_mask(mm_cpumask(mm),
978cc4c07c8SAlexandre Ghiti pmu_sbi_reset_scounteren, (void *)event, 1);
979cc4c07c8SAlexandre Ghiti }
980cc4c07c8SAlexandre Ghiti
riscv_pmu_update_counter_access(void * info)981cc4c07c8SAlexandre Ghiti static void riscv_pmu_update_counter_access(void *info)
982cc4c07c8SAlexandre Ghiti {
983cc4c07c8SAlexandre Ghiti if (sysctl_perf_user_access == SYSCTL_LEGACY)
984cc4c07c8SAlexandre Ghiti csr_write(CSR_SCOUNTEREN, 0x7);
985cc4c07c8SAlexandre Ghiti else
986cc4c07c8SAlexandre Ghiti csr_write(CSR_SCOUNTEREN, 0x2);
987cc4c07c8SAlexandre Ghiti }
988cc4c07c8SAlexandre Ghiti
riscv_pmu_proc_user_access_handler(struct ctl_table * table,int write,void * buffer,size_t * lenp,loff_t * ppos)989cc4c07c8SAlexandre Ghiti static int riscv_pmu_proc_user_access_handler(struct ctl_table *table,
990cc4c07c8SAlexandre Ghiti int write, void *buffer,
991cc4c07c8SAlexandre Ghiti size_t *lenp, loff_t *ppos)
992cc4c07c8SAlexandre Ghiti {
993cc4c07c8SAlexandre Ghiti int prev = sysctl_perf_user_access;
994cc4c07c8SAlexandre Ghiti int ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
995cc4c07c8SAlexandre Ghiti
996cc4c07c8SAlexandre Ghiti /*
997cc4c07c8SAlexandre Ghiti * Test against the previous value since we clear SCOUNTEREN when
998cc4c07c8SAlexandre Ghiti * sysctl_perf_user_access is set to SYSCTL_USER_ACCESS, but we should
999cc4c07c8SAlexandre Ghiti * not do that if that was already the case.
1000cc4c07c8SAlexandre Ghiti */
1001cc4c07c8SAlexandre Ghiti if (ret || !write || prev == sysctl_perf_user_access)
1002cc4c07c8SAlexandre Ghiti return ret;
1003cc4c07c8SAlexandre Ghiti
1004cc4c07c8SAlexandre Ghiti on_each_cpu(riscv_pmu_update_counter_access, NULL, 1);
1005cc4c07c8SAlexandre Ghiti
1006cc4c07c8SAlexandre Ghiti return 0;
1007cc4c07c8SAlexandre Ghiti }
1008cc4c07c8SAlexandre Ghiti
1009cc4c07c8SAlexandre Ghiti static struct ctl_table sbi_pmu_sysctl_table[] = {
1010cc4c07c8SAlexandre Ghiti {
1011cc4c07c8SAlexandre Ghiti .procname = "perf_user_access",
1012cc4c07c8SAlexandre Ghiti .data = &sysctl_perf_user_access,
1013cc4c07c8SAlexandre Ghiti .maxlen = sizeof(unsigned int),
1014cc4c07c8SAlexandre Ghiti .mode = 0644,
1015cc4c07c8SAlexandre Ghiti .proc_handler = riscv_pmu_proc_user_access_handler,
1016cc4c07c8SAlexandre Ghiti .extra1 = SYSCTL_ZERO,
1017cc4c07c8SAlexandre Ghiti .extra2 = SYSCTL_TWO,
1018cc4c07c8SAlexandre Ghiti },
1019cc4c07c8SAlexandre Ghiti { }
1020cc4c07c8SAlexandre Ghiti };
1021cc4c07c8SAlexandre Ghiti
pmu_sbi_device_probe(struct platform_device * pdev)1022e9991434SAtish Patra static int pmu_sbi_device_probe(struct platform_device *pdev)
1023e9991434SAtish Patra {
1024e9991434SAtish Patra struct riscv_pmu *pmu = NULL;
10254905ec2fSAtish Patra int ret = -ENODEV;
10261537bf26SSergey Matyukevich int num_counters;
1027e9991434SAtish Patra
1028e9991434SAtish Patra pr_info("SBI PMU extension is available\n");
1029e9991434SAtish Patra pmu = riscv_pmu_alloc();
1030e9991434SAtish Patra if (!pmu)
1031e9991434SAtish Patra return -ENOMEM;
1032e9991434SAtish Patra
1033e9991434SAtish Patra num_counters = pmu_sbi_find_num_ctrs();
1034e9991434SAtish Patra if (num_counters < 0) {
1035e9991434SAtish Patra pr_err("SBI PMU extension doesn't provide any counters\n");
10364905ec2fSAtish Patra goto out_free;
1037e9991434SAtish Patra }
1038e9991434SAtish Patra
1039ee95b88dSViacheslav Mitrofanov /* It is possible to get from SBI more than max number of counters */
1040ee95b88dSViacheslav Mitrofanov if (num_counters > RISCV_MAX_COUNTERS) {
1041ee95b88dSViacheslav Mitrofanov num_counters = RISCV_MAX_COUNTERS;
1042ee95b88dSViacheslav Mitrofanov pr_info("SBI returned more than maximum number of counters. Limiting the number of counters to %d\n", num_counters);
1043ee95b88dSViacheslav Mitrofanov }
1044ee95b88dSViacheslav Mitrofanov
1045e9991434SAtish Patra /* cache all the information about counters now */
10461537bf26SSergey Matyukevich if (pmu_sbi_get_ctrinfo(num_counters, &cmask))
10474905ec2fSAtish Patra goto out_free;
1048e9991434SAtish Patra
10494905ec2fSAtish Patra ret = pmu_sbi_setup_irqs(pmu, pdev);
10504905ec2fSAtish Patra if (ret < 0) {
10514905ec2fSAtish Patra pr_info("Perf sampling/filtering is not supported as sscof extension is not available\n");
10524905ec2fSAtish Patra pmu->pmu.capabilities |= PERF_PMU_CAP_NO_INTERRUPT;
10534905ec2fSAtish Patra pmu->pmu.capabilities |= PERF_PMU_CAP_NO_EXCLUDE;
10544905ec2fSAtish Patra }
10551537bf26SSergey Matyukevich
105626fabd6dSNikita Shubin pmu->pmu.attr_groups = riscv_pmu_attr_groups;
10571537bf26SSergey Matyukevich pmu->cmask = cmask;
1058e9991434SAtish Patra pmu->ctr_start = pmu_sbi_ctr_start;
1059e9991434SAtish Patra pmu->ctr_stop = pmu_sbi_ctr_stop;
1060e9991434SAtish Patra pmu->event_map = pmu_sbi_event_map;
1061e9991434SAtish Patra pmu->ctr_get_idx = pmu_sbi_ctr_get_idx;
1062e9991434SAtish Patra pmu->ctr_get_width = pmu_sbi_ctr_get_width;
1063e9991434SAtish Patra pmu->ctr_clear_idx = pmu_sbi_ctr_clear_idx;
1064e9991434SAtish Patra pmu->ctr_read = pmu_sbi_ctr_read;
1065cc4c07c8SAlexandre Ghiti pmu->event_init = pmu_sbi_event_init;
1066cc4c07c8SAlexandre Ghiti pmu->event_mapped = pmu_sbi_event_mapped;
1067cc4c07c8SAlexandre Ghiti pmu->event_unmapped = pmu_sbi_event_unmapped;
1068cc4c07c8SAlexandre Ghiti pmu->csr_index = pmu_sbi_csr_index;
1069e9991434SAtish Patra
1070e9991434SAtish Patra ret = cpuhp_state_add_instance(CPUHP_AP_PERF_RISCV_STARTING, &pmu->node);
1071e9991434SAtish Patra if (ret)
1072e9991434SAtish Patra return ret;
1073e9991434SAtish Patra
1074e9a023f2SEric Lin ret = riscv_pm_pmu_register(pmu);
1075e9a023f2SEric Lin if (ret)
1076e9a023f2SEric Lin goto out_unregister;
1077e9a023f2SEric Lin
1078e9991434SAtish Patra ret = perf_pmu_register(&pmu->pmu, "cpu", PERF_TYPE_RAW);
1079e9a023f2SEric Lin if (ret)
1080e9a023f2SEric Lin goto out_unregister;
1081e9991434SAtish Patra
1082cc4c07c8SAlexandre Ghiti register_sysctl("kernel", sbi_pmu_sysctl_table);
1083cc4c07c8SAlexandre Ghiti
1084e9991434SAtish Patra return 0;
10854905ec2fSAtish Patra
1086e9a023f2SEric Lin out_unregister:
1087e9a023f2SEric Lin riscv_pmu_destroy(pmu);
1088e9a023f2SEric Lin
10894905ec2fSAtish Patra out_free:
10904905ec2fSAtish Patra kfree(pmu);
10914905ec2fSAtish Patra return ret;
1092e9991434SAtish Patra }
1093e9991434SAtish Patra
1094e9991434SAtish Patra static struct platform_driver pmu_sbi_driver = {
1095e9991434SAtish Patra .probe = pmu_sbi_device_probe,
1096e9991434SAtish Patra .driver = {
1097d5ac062dSAlexandre Ghiti .name = RISCV_PMU_SBI_PDEV_NAME,
1098e9991434SAtish Patra },
1099e9991434SAtish Patra };
1100e9991434SAtish Patra
pmu_sbi_devinit(void)1101e9991434SAtish Patra static int __init pmu_sbi_devinit(void)
1102e9991434SAtish Patra {
1103e9991434SAtish Patra int ret;
1104e9991434SAtish Patra struct platform_device *pdev;
1105e9991434SAtish Patra
1106e9991434SAtish Patra if (sbi_spec_version < sbi_mk_version(0, 3) ||
110741cad828SAndrew Jones !sbi_probe_extension(SBI_EXT_PMU)) {
1108e9991434SAtish Patra return 0;
1109e9991434SAtish Patra }
1110e9991434SAtish Patra
1111e9991434SAtish Patra ret = cpuhp_setup_state_multi(CPUHP_AP_PERF_RISCV_STARTING,
1112e9991434SAtish Patra "perf/riscv/pmu:starting",
1113e9991434SAtish Patra pmu_sbi_starting_cpu, pmu_sbi_dying_cpu);
1114e9991434SAtish Patra if (ret) {
1115e9991434SAtish Patra pr_err("CPU hotplug notifier could not be registered: %d\n",
1116e9991434SAtish Patra ret);
1117e9991434SAtish Patra return ret;
1118e9991434SAtish Patra }
1119e9991434SAtish Patra
1120e9991434SAtish Patra ret = platform_driver_register(&pmu_sbi_driver);
1121e9991434SAtish Patra if (ret)
1122e9991434SAtish Patra return ret;
1123e9991434SAtish Patra
1124d5ac062dSAlexandre Ghiti pdev = platform_device_register_simple(RISCV_PMU_SBI_PDEV_NAME, -1, NULL, 0);
1125e9991434SAtish Patra if (IS_ERR(pdev)) {
1126e9991434SAtish Patra platform_driver_unregister(&pmu_sbi_driver);
1127e9991434SAtish Patra return PTR_ERR(pdev);
1128e9991434SAtish Patra }
1129e9991434SAtish Patra
1130e9991434SAtish Patra /* Notify legacy implementation that SBI pmu is available*/
1131e9991434SAtish Patra riscv_pmu_legacy_skip_init();
1132e9991434SAtish Patra
1133e9991434SAtish Patra return ret;
1134e9991434SAtish Patra }
1135e9991434SAtish Patra device_initcall(pmu_sbi_devinit)
1136