Lines Matching +full:event +full:-

1 // SPDX-License-Identifier: GPL-2.0
3 * RISC-V performance counter support.
7 * This implementation is based on old RISC-V perf and ARM perf event code
21 static bool riscv_perf_user_access(struct perf_event *event) in riscv_perf_user_access() argument
23 return ((event->attr.type == PERF_TYPE_HARDWARE) || in riscv_perf_user_access()
24 (event->attr.type == PERF_TYPE_HW_CACHE) || in riscv_perf_user_access()
25 (event->attr.type == PERF_TYPE_RAW)) && in riscv_perf_user_access()
26 !!(event->hw.flags & PERF_EVENT_FLAG_USER_READ_CNT) && in riscv_perf_user_access()
27 (event->hw.idx != -1); in riscv_perf_user_access()
30 void arch_perf_update_userpage(struct perf_event *event, in arch_perf_update_userpage() argument
37 userpg->cap_user_time = 0; in arch_perf_update_userpage()
38 userpg->cap_user_time_zero = 0; in arch_perf_update_userpage()
39 userpg->cap_user_time_short = 0; in arch_perf_update_userpage()
40 userpg->cap_user_rdpmc = riscv_perf_user_access(event); in arch_perf_update_userpage()
44 * The counters are 64-bit but the priv spec doesn't mandate all the in arch_perf_update_userpage()
48 if (userpg->cap_user_rdpmc) in arch_perf_update_userpage()
49 userpg->pmc_width = to_riscv_pmu(event->pmu)->ctr_get_width(event->hw.idx) + 1; in arch_perf_update_userpage()
55 userpg->time_mult = rd->mult; in arch_perf_update_userpage()
56 userpg->time_shift = rd->shift; in arch_perf_update_userpage()
57 userpg->time_zero = rd->epoch_ns; in arch_perf_update_userpage()
58 userpg->time_cycles = rd->epoch_cyc; in arch_perf_update_userpage()
59 userpg->time_mask = rd->sched_clock_mask; in arch_perf_update_userpage()
66 ns = mul_u64_u32_shr(rd->epoch_cyc, rd->mult, rd->shift); in arch_perf_update_userpage()
67 userpg->time_zero -= ns; in arch_perf_update_userpage()
71 userpg->time_offset = userpg->time_zero - now; in arch_perf_update_userpage()
76 * 32-bit value (now specifies a 64-bit value) - refer in arch_perf_update_userpage()
79 if (userpg->time_shift == 32) { in arch_perf_update_userpage()
80 userpg->time_shift = 31; in arch_perf_update_userpage()
81 userpg->time_mult >>= 1; in arch_perf_update_userpage()
88 userpg->cap_user_time = 1; in arch_perf_update_userpage()
89 userpg->cap_user_time_zero = 1; in arch_perf_update_userpage()
90 userpg->cap_user_time_short = 1; in arch_perf_update_userpage()
141 return -EINVAL; in riscv_pmu_ctr_read_csr()
147 u64 riscv_pmu_ctr_get_width_mask(struct perf_event *event) in riscv_pmu_ctr_get_width_mask() argument
150 struct riscv_pmu *rvpmu = to_riscv_pmu(event->pmu); in riscv_pmu_ctr_get_width_mask()
151 struct hw_perf_event *hwc = &event->hw; in riscv_pmu_ctr_get_width_mask()
153 if (hwc->idx == -1) in riscv_pmu_ctr_get_width_mask()
155 cwidth = rvpmu->ctr_get_width(0); in riscv_pmu_ctr_get_width_mask()
157 cwidth = rvpmu->ctr_get_width(hwc->idx); in riscv_pmu_ctr_get_width_mask()
162 u64 riscv_pmu_event_update(struct perf_event *event) in riscv_pmu_event_update() argument
164 struct riscv_pmu *rvpmu = to_riscv_pmu(event->pmu); in riscv_pmu_event_update()
165 struct hw_perf_event *hwc = &event->hw; in riscv_pmu_event_update()
170 if (!rvpmu->ctr_read) in riscv_pmu_event_update()
173 cmask = riscv_pmu_ctr_get_width_mask(event); in riscv_pmu_event_update()
176 prev_raw_count = local64_read(&hwc->prev_count); in riscv_pmu_event_update()
177 new_raw_count = rvpmu->ctr_read(event); in riscv_pmu_event_update()
178 oldval = local64_cmpxchg(&hwc->prev_count, prev_raw_count, in riscv_pmu_event_update()
182 delta = (new_raw_count - prev_raw_count) & cmask; in riscv_pmu_event_update()
183 local64_add(delta, &event->count); in riscv_pmu_event_update()
184 local64_sub(delta, &hwc->period_left); in riscv_pmu_event_update()
189 void riscv_pmu_stop(struct perf_event *event, int flags) in riscv_pmu_stop() argument
191 struct hw_perf_event *hwc = &event->hw; in riscv_pmu_stop()
192 struct riscv_pmu *rvpmu = to_riscv_pmu(event->pmu); in riscv_pmu_stop()
194 WARN_ON_ONCE(hwc->state & PERF_HES_STOPPED); in riscv_pmu_stop()
196 if (!(hwc->state & PERF_HES_STOPPED)) { in riscv_pmu_stop()
197 if (rvpmu->ctr_stop) { in riscv_pmu_stop()
198 rvpmu->ctr_stop(event, 0); in riscv_pmu_stop()
199 hwc->state |= PERF_HES_STOPPED; in riscv_pmu_stop()
201 riscv_pmu_event_update(event); in riscv_pmu_stop()
202 hwc->state |= PERF_HES_UPTODATE; in riscv_pmu_stop()
206 int riscv_pmu_event_set_period(struct perf_event *event) in riscv_pmu_event_set_period() argument
208 struct hw_perf_event *hwc = &event->hw; in riscv_pmu_event_set_period()
209 s64 left = local64_read(&hwc->period_left); in riscv_pmu_event_set_period()
210 s64 period = hwc->sample_period; in riscv_pmu_event_set_period()
212 uint64_t max_period = riscv_pmu_ctr_get_width_mask(event); in riscv_pmu_event_set_period()
214 if (unlikely(left <= -period)) { in riscv_pmu_event_set_period()
216 local64_set(&hwc->period_left, left); in riscv_pmu_event_set_period()
217 hwc->last_period = period; in riscv_pmu_event_set_period()
223 local64_set(&hwc->period_left, left); in riscv_pmu_event_set_period()
224 hwc->last_period = period; in riscv_pmu_event_set_period()
237 local64_set(&hwc->prev_count, (u64)-left); in riscv_pmu_event_set_period()
239 perf_event_update_userpage(event); in riscv_pmu_event_set_period()
244 void riscv_pmu_start(struct perf_event *event, int flags) in riscv_pmu_start() argument
246 struct hw_perf_event *hwc = &event->hw; in riscv_pmu_start()
247 struct riscv_pmu *rvpmu = to_riscv_pmu(event->pmu); in riscv_pmu_start()
248 uint64_t max_period = riscv_pmu_ctr_get_width_mask(event); in riscv_pmu_start()
252 WARN_ON_ONCE(!(event->hw.state & PERF_HES_UPTODATE)); in riscv_pmu_start()
254 hwc->state = 0; in riscv_pmu_start()
255 riscv_pmu_event_set_period(event); in riscv_pmu_start()
256 init_val = local64_read(&hwc->prev_count) & max_period; in riscv_pmu_start()
257 rvpmu->ctr_start(event, init_val); in riscv_pmu_start()
258 perf_event_update_userpage(event); in riscv_pmu_start()
261 static int riscv_pmu_add(struct perf_event *event, int flags) in riscv_pmu_add() argument
263 struct riscv_pmu *rvpmu = to_riscv_pmu(event->pmu); in riscv_pmu_add()
264 struct cpu_hw_events *cpuc = this_cpu_ptr(rvpmu->hw_events); in riscv_pmu_add()
265 struct hw_perf_event *hwc = &event->hw; in riscv_pmu_add()
268 idx = rvpmu->ctr_get_idx(event); in riscv_pmu_add()
272 hwc->idx = idx; in riscv_pmu_add()
273 cpuc->events[idx] = event; in riscv_pmu_add()
274 cpuc->n_events++; in riscv_pmu_add()
275 hwc->state = PERF_HES_UPTODATE | PERF_HES_STOPPED; in riscv_pmu_add()
277 riscv_pmu_start(event, PERF_EF_RELOAD); in riscv_pmu_add()
280 perf_event_update_userpage(event); in riscv_pmu_add()
285 static void riscv_pmu_del(struct perf_event *event, int flags) in riscv_pmu_del() argument
287 struct riscv_pmu *rvpmu = to_riscv_pmu(event->pmu); in riscv_pmu_del()
288 struct cpu_hw_events *cpuc = this_cpu_ptr(rvpmu->hw_events); in riscv_pmu_del()
289 struct hw_perf_event *hwc = &event->hw; in riscv_pmu_del()
291 riscv_pmu_stop(event, PERF_EF_UPDATE); in riscv_pmu_del()
292 cpuc->events[hwc->idx] = NULL; in riscv_pmu_del()
294 if (rvpmu->ctr_stop) in riscv_pmu_del()
295 rvpmu->ctr_stop(event, RISCV_PMU_STOP_FLAG_RESET); in riscv_pmu_del()
296 cpuc->n_events--; in riscv_pmu_del()
297 if (rvpmu->ctr_clear_idx) in riscv_pmu_del()
298 rvpmu->ctr_clear_idx(event); in riscv_pmu_del()
299 perf_event_update_userpage(event); in riscv_pmu_del()
300 hwc->idx = -1; in riscv_pmu_del()
303 static void riscv_pmu_read(struct perf_event *event) in riscv_pmu_read() argument
305 riscv_pmu_event_update(event); in riscv_pmu_read()
308 static int riscv_pmu_event_init(struct perf_event *event) in riscv_pmu_event_init() argument
310 struct hw_perf_event *hwc = &event->hw; in riscv_pmu_event_init()
311 struct riscv_pmu *rvpmu = to_riscv_pmu(event->pmu); in riscv_pmu_event_init()
317 if (has_branch_stack(event)) in riscv_pmu_event_init()
318 return -EOPNOTSUPP; in riscv_pmu_event_init()
320 hwc->flags = 0; in riscv_pmu_event_init()
321 mapped_event = rvpmu->event_map(event, &event_config); in riscv_pmu_event_init()
323 pr_debug("event %x:%llx not supported\n", event->attr.type, in riscv_pmu_event_init()
324 event->attr.config); in riscv_pmu_event_init()
329 * idx is set to -1 because the index of a general event should not be in riscv_pmu_event_init()
330 * decided until binding to some counter in pmu->add(). in riscv_pmu_event_init()
334 hwc->config = event_config; in riscv_pmu_event_init()
335 hwc->idx = -1; in riscv_pmu_event_init()
336 hwc->event_base = mapped_event; in riscv_pmu_event_init()
338 if (rvpmu->event_init) in riscv_pmu_event_init()
339 rvpmu->event_init(event); in riscv_pmu_event_init()
341 if (!is_sampling_event(event)) { in riscv_pmu_event_init()
343 * For non-sampling runs, limit the sample_period to half in riscv_pmu_event_init()
348 cmask = riscv_pmu_ctr_get_width_mask(event); in riscv_pmu_event_init()
349 hwc->sample_period = cmask >> 1; in riscv_pmu_event_init()
350 hwc->last_period = hwc->sample_period; in riscv_pmu_event_init()
351 local64_set(&hwc->period_left, hwc->sample_period); in riscv_pmu_event_init()
357 static int riscv_pmu_event_idx(struct perf_event *event) in riscv_pmu_event_idx() argument
359 struct riscv_pmu *rvpmu = to_riscv_pmu(event->pmu); in riscv_pmu_event_idx()
361 if (!(event->hw.flags & PERF_EVENT_FLAG_USER_READ_CNT)) in riscv_pmu_event_idx()
364 if (rvpmu->csr_index) in riscv_pmu_event_idx()
365 return rvpmu->csr_index(event) + 1; in riscv_pmu_event_idx()
370 static void riscv_pmu_event_mapped(struct perf_event *event, struct mm_struct *mm) in riscv_pmu_event_mapped() argument
372 struct riscv_pmu *rvpmu = to_riscv_pmu(event->pmu); in riscv_pmu_event_mapped()
374 if (rvpmu->event_mapped) { in riscv_pmu_event_mapped()
375 rvpmu->event_mapped(event, mm); in riscv_pmu_event_mapped()
376 perf_event_update_userpage(event); in riscv_pmu_event_mapped()
380 static void riscv_pmu_event_unmapped(struct perf_event *event, struct mm_struct *mm) in riscv_pmu_event_unmapped() argument
382 struct riscv_pmu *rvpmu = to_riscv_pmu(event->pmu); in riscv_pmu_event_unmapped()
384 if (rvpmu->event_unmapped) { in riscv_pmu_event_unmapped()
385 rvpmu->event_unmapped(event, mm); in riscv_pmu_event_unmapped()
386 perf_event_update_userpage(event); in riscv_pmu_event_unmapped()
400 pmu->hw_events = alloc_percpu_gfp(struct cpu_hw_events, GFP_KERNEL); in riscv_pmu_alloc()
401 if (!pmu->hw_events) { in riscv_pmu_alloc()
402 pr_info("failed to allocate per-cpu PMU data.\n"); in riscv_pmu_alloc()
407 cpuc = per_cpu_ptr(pmu->hw_events, cpuid); in riscv_pmu_alloc()
408 cpuc->n_events = 0; in riscv_pmu_alloc()
410 cpuc->events[i] = NULL; in riscv_pmu_alloc()
412 pmu->pmu = (struct pmu) { in riscv_pmu_alloc()