Lines Matching refs:pmc
36 static u64 kvm_pmu_get_sample_period(struct kvm_pmc *pmc) in kvm_pmu_get_sample_period() argument
38 u64 counter_val_mask = GENMASK(pmc->cinfo.width, 0); in kvm_pmu_get_sample_period()
41 if (!pmc->counter_val) in kvm_pmu_get_sample_period()
44 sample_period = (-pmc->counter_val) & counter_val_mask; in kvm_pmu_get_sample_period()
77 static void kvm_pmu_release_perf_event(struct kvm_pmc *pmc) in kvm_pmu_release_perf_event() argument
79 if (pmc->perf_event) { in kvm_pmu_release_perf_event()
80 perf_event_disable(pmc->perf_event); in kvm_pmu_release_perf_event()
81 perf_event_release_kernel(pmc->perf_event); in kvm_pmu_release_perf_event()
82 pmc->perf_event = NULL; in kvm_pmu_release_perf_event()
203 struct kvm_pmc *pmc; in pmu_ctr_read() local
207 pmc = &kvpmu->pmc[cidx]; in pmu_ctr_read()
209 if (pmc->cinfo.type == SBI_PMU_CTR_TYPE_FW) { in pmu_ctr_read()
210 fevent_code = get_event_code(pmc->event_idx); in pmu_ctr_read()
211 pmc->counter_val = kvpmu->fw_event[fevent_code].value; in pmu_ctr_read()
212 } else if (pmc->perf_event) { in pmu_ctr_read()
213 pmc->counter_val += perf_event_read_value(pmc->perf_event, &enabled, &running); in pmu_ctr_read()
217 *out_val = pmc->counter_val; in pmu_ctr_read()
232 static int kvm_pmu_create_perf_event(struct kvm_pmc *pmc, struct perf_event_attr *attr, in kvm_pmu_create_perf_event() argument
237 kvm_pmu_release_perf_event(pmc); in kvm_pmu_create_perf_event()
241 pmc->counter_val = 0; in kvm_pmu_create_perf_event()
248 attr->sample_period = kvm_pmu_get_sample_period(pmc); in kvm_pmu_create_perf_event()
250 event = perf_event_create_kernel_counter(attr, -1, current, NULL, pmc); in kvm_pmu_create_perf_event()
256 pmc->perf_event = event; in kvm_pmu_create_perf_event()
258 perf_event_enable(pmc->perf_event); in kvm_pmu_create_perf_event()
333 retdata->out_val = kvpmu->pmc[cidx].cinfo.value; in kvm_riscv_vcpu_pmu_ctr_info()
344 struct kvm_pmc *pmc; in kvm_riscv_vcpu_pmu_ctr_start() local
357 pmc = &kvpmu->pmc[pmc_index]; in kvm_riscv_vcpu_pmu_ctr_start()
359 pmc->counter_val = ival; in kvm_riscv_vcpu_pmu_ctr_start()
360 if (pmc->cinfo.type == SBI_PMU_CTR_TYPE_FW) { in kvm_riscv_vcpu_pmu_ctr_start()
361 fevent_code = get_event_code(pmc->event_idx); in kvm_riscv_vcpu_pmu_ctr_start()
374 kvpmu->fw_event[fevent_code].value = pmc->counter_val; in kvm_riscv_vcpu_pmu_ctr_start()
375 } else if (pmc->perf_event) { in kvm_riscv_vcpu_pmu_ctr_start()
376 if (unlikely(pmc->started)) { in kvm_riscv_vcpu_pmu_ctr_start()
380 perf_event_period(pmc->perf_event, kvm_pmu_get_sample_period(pmc)); in kvm_riscv_vcpu_pmu_ctr_start()
381 perf_event_enable(pmc->perf_event); in kvm_riscv_vcpu_pmu_ctr_start()
382 pmc->started = true; in kvm_riscv_vcpu_pmu_ctr_start()
401 struct kvm_pmc *pmc; in kvm_riscv_vcpu_pmu_ctr_stop() local
414 pmc = &kvpmu->pmc[pmc_index]; in kvm_riscv_vcpu_pmu_ctr_stop()
415 if (pmc->cinfo.type == SBI_PMU_CTR_TYPE_FW) { in kvm_riscv_vcpu_pmu_ctr_stop()
416 fevent_code = get_event_code(pmc->event_idx); in kvm_riscv_vcpu_pmu_ctr_stop()
426 } else if (pmc->perf_event) { in kvm_riscv_vcpu_pmu_ctr_stop()
427 if (pmc->started) { in kvm_riscv_vcpu_pmu_ctr_stop()
429 perf_event_disable(pmc->perf_event); in kvm_riscv_vcpu_pmu_ctr_stop()
430 pmc->started = false; in kvm_riscv_vcpu_pmu_ctr_stop()
437 pmc->counter_val += perf_event_read_value(pmc->perf_event, in kvm_riscv_vcpu_pmu_ctr_stop()
439 kvm_pmu_release_perf_event(pmc); in kvm_riscv_vcpu_pmu_ctr_stop()
445 pmc->event_idx = SBI_PMU_EVENT_IDX_INVALID; in kvm_riscv_vcpu_pmu_ctr_stop()
466 struct kvm_pmc *pmc = NULL; in kvm_riscv_vcpu_pmu_ctr_cfg_match() local
512 pmc = &kvpmu->pmc[ctr_idx]; in kvm_riscv_vcpu_pmu_ctr_cfg_match()
513 pmc->idx = ctr_idx; in kvm_riscv_vcpu_pmu_ctr_cfg_match()
519 ret = kvm_pmu_create_perf_event(pmc, &attr, flags, eidx, evtdata); in kvm_riscv_vcpu_pmu_ctr_cfg_match()
525 pmc->event_idx = eidx; in kvm_riscv_vcpu_pmu_ctr_cfg_match()
549 struct kvm_pmc *pmc; in kvm_riscv_vcpu_pmu_init() local
585 pmc = &kvpmu->pmc[i]; in kvm_riscv_vcpu_pmu_init()
586 pmc->idx = i; in kvm_riscv_vcpu_pmu_init()
587 pmc->event_idx = SBI_PMU_EVENT_IDX_INVALID; in kvm_riscv_vcpu_pmu_init()
589 pmc->cinfo.type = SBI_PMU_CTR_TYPE_HW; in kvm_riscv_vcpu_pmu_init()
592 pmc->cinfo.width = 63; in kvm_riscv_vcpu_pmu_init()
594 pmc->cinfo.width = hpm_width; in kvm_riscv_vcpu_pmu_init()
601 pmc->cinfo.csr = CSR_CYCLE + i; in kvm_riscv_vcpu_pmu_init()
603 pmc->cinfo.type = SBI_PMU_CTR_TYPE_FW; in kvm_riscv_vcpu_pmu_init()
604 pmc->cinfo.width = BITS_PER_LONG - 1; in kvm_riscv_vcpu_pmu_init()
614 struct kvm_pmc *pmc; in kvm_riscv_vcpu_pmu_deinit() local
621 pmc = &kvpmu->pmc[i]; in kvm_riscv_vcpu_pmu_deinit()
622 pmc->counter_val = 0; in kvm_riscv_vcpu_pmu_deinit()
623 kvm_pmu_release_perf_event(pmc); in kvm_riscv_vcpu_pmu_deinit()
624 pmc->event_idx = SBI_PMU_EVENT_IDX_INVALID; in kvm_riscv_vcpu_pmu_deinit()