Lines Matching full:pmu
125 struct pmu pmu; member
135 #define to_cn10k_ddr_pmu(p) container_of(p, struct cn10k_ddr_pmu, pmu)
233 struct cn10k_ddr_pmu *pmu = dev_get_drvdata(dev); in cn10k_ddr_perf_cpumask_show() local
235 return cpumap_print_to_pagebuf(true, buf, cpumask_of(pmu->cpu)); in cn10k_ddr_perf_cpumask_show()
289 static int cn10k_ddr_perf_alloc_counter(struct cn10k_ddr_pmu *pmu, in cn10k_ddr_perf_alloc_counter() argument
297 pmu->events[DDRC_PERF_READ_COUNTER_IDX] = event; in cn10k_ddr_perf_alloc_counter()
303 pmu->events[DDRC_PERF_WRITE_COUNTER_IDX] = event; in cn10k_ddr_perf_alloc_counter()
309 if (pmu->events[i] == NULL) { in cn10k_ddr_perf_alloc_counter()
310 pmu->events[i] = event; in cn10k_ddr_perf_alloc_counter()
318 static void cn10k_ddr_perf_free_counter(struct cn10k_ddr_pmu *pmu, int counter) in cn10k_ddr_perf_free_counter() argument
320 pmu->events[counter] = NULL; in cn10k_ddr_perf_free_counter()
325 struct cn10k_ddr_pmu *pmu = to_cn10k_ddr_pmu(event->pmu); in cn10k_ddr_perf_event_init() local
328 if (event->attr.type != event->pmu->type) in cn10k_ddr_perf_event_init()
332 dev_info(pmu->dev, "Sampling not supported!\n"); in cn10k_ddr_perf_event_init()
337 dev_warn(pmu->dev, "Can't provide per-task data!\n"); in cn10k_ddr_perf_event_init()
342 if (event->group_leader->pmu != event->pmu && in cn10k_ddr_perf_event_init()
349 event->cpu = pmu->cpu; in cn10k_ddr_perf_event_init()
354 static void cn10k_ddr_perf_counter_enable(struct cn10k_ddr_pmu *pmu, in cn10k_ddr_perf_counter_enable() argument
367 val = readq_relaxed(pmu->base + reg); in cn10k_ddr_perf_counter_enable()
374 writeq_relaxed(val, pmu->base + reg); in cn10k_ddr_perf_counter_enable()
376 val = readq_relaxed(pmu->base + DDRC_PERF_CNT_FREERUN_EN); in cn10k_ddr_perf_counter_enable()
388 writeq_relaxed(val, pmu->base + DDRC_PERF_CNT_FREERUN_EN); in cn10k_ddr_perf_counter_enable()
392 static u64 cn10k_ddr_perf_read_counter(struct cn10k_ddr_pmu *pmu, int counter) in cn10k_ddr_perf_read_counter() argument
397 return readq_relaxed(pmu->base + DDRC_PERF_CNT_VALUE_RD_OP); in cn10k_ddr_perf_read_counter()
400 return readq_relaxed(pmu->base + DDRC_PERF_CNT_VALUE_WR_OP); in cn10k_ddr_perf_read_counter()
402 val = readq_relaxed(pmu->base + DDRC_PERF_CNT_VALUE(counter)); in cn10k_ddr_perf_read_counter()
408 struct cn10k_ddr_pmu *pmu = to_cn10k_ddr_pmu(event->pmu); in cn10k_ddr_perf_event_update() local
414 new_count = cn10k_ddr_perf_read_counter(pmu, hwc->idx); in cn10k_ddr_perf_event_update()
424 struct cn10k_ddr_pmu *pmu = to_cn10k_ddr_pmu(event->pmu); in cn10k_ddr_perf_event_start() local
430 cn10k_ddr_perf_counter_enable(pmu, counter, true); in cn10k_ddr_perf_event_start()
437 struct cn10k_ddr_pmu *pmu = to_cn10k_ddr_pmu(event->pmu); in cn10k_ddr_perf_event_add() local
444 counter = cn10k_ddr_perf_alloc_counter(pmu, event); in cn10k_ddr_perf_event_add()
448 pmu->active_events++; in cn10k_ddr_perf_event_add()
451 if (pmu->active_events == 1) in cn10k_ddr_perf_event_add()
452 hrtimer_start(&pmu->hrtimer, cn10k_ddr_pmu_timer_period(), in cn10k_ddr_perf_event_add()
462 writeq_relaxed(val, pmu->base + reg_offset); in cn10k_ddr_perf_event_add()
470 writeq_relaxed(val, pmu->base + DDRC_PERF_CNT_FREERUN_CTRL); in cn10k_ddr_perf_event_add()
483 struct cn10k_ddr_pmu *pmu = to_cn10k_ddr_pmu(event->pmu); in cn10k_ddr_perf_event_stop() local
487 cn10k_ddr_perf_counter_enable(pmu, counter, false); in cn10k_ddr_perf_event_stop()
497 struct cn10k_ddr_pmu *pmu = to_cn10k_ddr_pmu(event->pmu); in cn10k_ddr_perf_event_del() local
503 cn10k_ddr_perf_free_counter(pmu, counter); in cn10k_ddr_perf_event_del()
504 pmu->active_events--; in cn10k_ddr_perf_event_del()
508 if (pmu->active_events == 0) in cn10k_ddr_perf_event_del()
509 hrtimer_cancel(&pmu->hrtimer); in cn10k_ddr_perf_event_del()
512 static void cn10k_ddr_perf_pmu_enable(struct pmu *pmu) in cn10k_ddr_perf_pmu_enable() argument
514 struct cn10k_ddr_pmu *ddr_pmu = to_cn10k_ddr_pmu(pmu); in cn10k_ddr_perf_pmu_enable()
520 static void cn10k_ddr_perf_pmu_disable(struct pmu *pmu) in cn10k_ddr_perf_pmu_disable() argument
522 struct cn10k_ddr_pmu *ddr_pmu = to_cn10k_ddr_pmu(pmu); in cn10k_ddr_perf_pmu_disable()
528 static void cn10k_ddr_perf_event_update_all(struct cn10k_ddr_pmu *pmu) in cn10k_ddr_perf_event_update_all() argument
534 if (pmu->events[i] == NULL) in cn10k_ddr_perf_event_update_all()
537 cn10k_ddr_perf_event_update(pmu->events[i]); in cn10k_ddr_perf_event_update_all()
542 if (pmu->events[i] == NULL) in cn10k_ddr_perf_event_update_all()
545 hwc = &pmu->events[i]->hw; in cn10k_ddr_perf_event_update_all()
550 static irqreturn_t cn10k_ddr_pmu_overflow_handler(struct cn10k_ddr_pmu *pmu) in cn10k_ddr_pmu_overflow_handler() argument
558 event = pmu->events[DDRC_PERF_READ_COUNTER_IDX]; in cn10k_ddr_pmu_overflow_handler()
562 new_count = cn10k_ddr_perf_read_counter(pmu, hwc->idx); in cn10k_ddr_pmu_overflow_handler()
571 event = pmu->events[DDRC_PERF_WRITE_COUNTER_IDX]; in cn10k_ddr_pmu_overflow_handler()
575 new_count = cn10k_ddr_perf_read_counter(pmu, hwc->idx); in cn10k_ddr_pmu_overflow_handler()
585 if (pmu->events[i] == NULL) in cn10k_ddr_pmu_overflow_handler()
588 value = cn10k_ddr_perf_read_counter(pmu, i); in cn10k_ddr_pmu_overflow_handler()
591 cn10k_ddr_perf_event_update_all(pmu); in cn10k_ddr_pmu_overflow_handler()
592 cn10k_ddr_perf_pmu_disable(&pmu->pmu); in cn10k_ddr_pmu_overflow_handler()
593 cn10k_ddr_perf_pmu_enable(&pmu->pmu); in cn10k_ddr_pmu_overflow_handler()
602 struct cn10k_ddr_pmu *pmu = container_of(hrtimer, struct cn10k_ddr_pmu, in cn10k_ddr_pmu_timer_handler() local
607 cn10k_ddr_pmu_overflow_handler(pmu); in cn10k_ddr_pmu_timer_handler()
616 struct cn10k_ddr_pmu *pmu = hlist_entry_safe(node, struct cn10k_ddr_pmu, in cn10k_ddr_pmu_offline_cpu() local
620 if (cpu != pmu->cpu) in cn10k_ddr_pmu_offline_cpu()
627 perf_pmu_migrate_context(&pmu->pmu, cpu, target); in cn10k_ddr_pmu_offline_cpu()
628 pmu->cpu = target; in cn10k_ddr_pmu_offline_cpu()
653 /* Setup the PMU counter to work in manual mode */ in cn10k_ddr_perf_probe()
657 ddr_pmu->pmu = (struct pmu) { in cn10k_ddr_perf_probe()
687 ret = perf_pmu_register(&ddr_pmu->pmu, name, -1); in cn10k_ddr_perf_probe()
691 pr_info("CN10K DDR PMU Driver for ddrc@%llx\n", res->start); in cn10k_ddr_perf_probe()
708 perf_pmu_unregister(&ddr_pmu->pmu); in cn10k_ddr_perf_remove()
714 { .compatible = "marvell,cn10k-ddr-pmu", },
730 .name = "cn10k-ddr-pmu",