18f0153ecSAtish Patra /* SPDX-License-Identifier: GPL-2.0-only */
28f0153ecSAtish Patra /*
38f0153ecSAtish Patra * Copyright (c) 2023 Rivos Inc
48f0153ecSAtish Patra *
58f0153ecSAtish Patra * Authors:
68f0153ecSAtish Patra * Atish Patra <atishp@rivosinc.com>
78f0153ecSAtish Patra */
88f0153ecSAtish Patra
98f0153ecSAtish Patra #ifndef __KVM_VCPU_RISCV_PMU_H
108f0153ecSAtish Patra #define __KVM_VCPU_RISCV_PMU_H
118f0153ecSAtish Patra
128f0153ecSAtish Patra #include <linux/perf/riscv_pmu.h>
133c39f253SAtish Patra #include <asm/kvm_vcpu_insn.h>
148f0153ecSAtish Patra #include <asm/sbi.h>
158f0153ecSAtish Patra
168f0153ecSAtish Patra #ifdef CONFIG_RISCV_PMU_SBI
178f0153ecSAtish Patra #define RISCV_KVM_MAX_FW_CTRS 32
188f0153ecSAtish Patra #define RISCV_KVM_MAX_HW_CTRS 32
198f0153ecSAtish Patra #define RISCV_KVM_MAX_COUNTERS (RISCV_KVM_MAX_HW_CTRS + RISCV_KVM_MAX_FW_CTRS)
208f0153ecSAtish Patra static_assert(RISCV_KVM_MAX_COUNTERS <= 64);
218f0153ecSAtish Patra
22badc3868SAtish Patra struct kvm_fw_event {
23badc3868SAtish Patra /* Current value of the event */
24badc3868SAtish Patra unsigned long value;
25badc3868SAtish Patra
26badc3868SAtish Patra /* Event monitoring status */
27badc3868SAtish Patra bool started;
28badc3868SAtish Patra };
29badc3868SAtish Patra
308f0153ecSAtish Patra /* Per virtual pmu counter data */
318f0153ecSAtish Patra struct kvm_pmc {
328f0153ecSAtish Patra u8 idx;
338f0153ecSAtish Patra struct perf_event *perf_event;
348f0153ecSAtish Patra u64 counter_val;
358f0153ecSAtish Patra union sbi_pmu_ctr_info cinfo;
368f0153ecSAtish Patra /* Event monitoring status */
378f0153ecSAtish Patra bool started;
38badc3868SAtish Patra /* Monitoring event ID */
39badc3868SAtish Patra unsigned long event_idx;
408f0153ecSAtish Patra };
418f0153ecSAtish Patra
428f0153ecSAtish Patra /* PMU data structure per vcpu */
438f0153ecSAtish Patra struct kvm_pmu {
448f0153ecSAtish Patra struct kvm_pmc pmc[RISCV_KVM_MAX_COUNTERS];
45badc3868SAtish Patra struct kvm_fw_event fw_event[RISCV_KVM_MAX_FW_CTRS];
468f0153ecSAtish Patra /* Number of the virtual firmware counters available */
478f0153ecSAtish Patra int num_fw_ctrs;
488f0153ecSAtish Patra /* Number of the virtual hardware counters available */
498f0153ecSAtish Patra int num_hw_ctrs;
508f0153ecSAtish Patra /* A flag to indicate that pmu initialization is done */
518f0153ecSAtish Patra bool init_done;
528f0153ecSAtish Patra /* Bit map of all the virtual counter used */
538f0153ecSAtish Patra DECLARE_BITMAP(pmc_in_use, RISCV_KVM_MAX_COUNTERS);
548f0153ecSAtish Patra };
558f0153ecSAtish Patra
568f0153ecSAtish Patra #define vcpu_to_pmu(vcpu) (&(vcpu)->arch.pmu_context)
578f0153ecSAtish Patra #define pmu_to_vcpu(pmu) (container_of((pmu), struct kvm_vcpu, arch.pmu_context))
588f0153ecSAtish Patra
59a9ac6c37SAtish Patra #if defined(CONFIG_32BIT)
60a9ac6c37SAtish Patra #define KVM_RISCV_VCPU_HPMCOUNTER_CSR_FUNCS \
61*032ca566SAtish Patra {.base = CSR_CYCLEH, .count = 32, .func = kvm_riscv_vcpu_pmu_read_hpm }, \
62*032ca566SAtish Patra {.base = CSR_CYCLE, .count = 32, .func = kvm_riscv_vcpu_pmu_read_hpm },
63a9ac6c37SAtish Patra #else
64a9ac6c37SAtish Patra #define KVM_RISCV_VCPU_HPMCOUNTER_CSR_FUNCS \
65*032ca566SAtish Patra {.base = CSR_CYCLE, .count = 32, .func = kvm_riscv_vcpu_pmu_read_hpm },
66a9ac6c37SAtish Patra #endif
67a9ac6c37SAtish Patra
68badc3868SAtish Patra int kvm_riscv_vcpu_pmu_incr_fw(struct kvm_vcpu *vcpu, unsigned long fid);
69a9ac6c37SAtish Patra int kvm_riscv_vcpu_pmu_read_hpm(struct kvm_vcpu *vcpu, unsigned int csr_num,
70a9ac6c37SAtish Patra unsigned long *val, unsigned long new_val,
71a9ac6c37SAtish Patra unsigned long wr_mask);
72a9ac6c37SAtish Patra
738f0153ecSAtish Patra int kvm_riscv_vcpu_pmu_num_ctrs(struct kvm_vcpu *vcpu, struct kvm_vcpu_sbi_return *retdata);
748f0153ecSAtish Patra int kvm_riscv_vcpu_pmu_ctr_info(struct kvm_vcpu *vcpu, unsigned long cidx,
758f0153ecSAtish Patra struct kvm_vcpu_sbi_return *retdata);
768f0153ecSAtish Patra int kvm_riscv_vcpu_pmu_ctr_start(struct kvm_vcpu *vcpu, unsigned long ctr_base,
778f0153ecSAtish Patra unsigned long ctr_mask, unsigned long flags, u64 ival,
788f0153ecSAtish Patra struct kvm_vcpu_sbi_return *retdata);
798f0153ecSAtish Patra int kvm_riscv_vcpu_pmu_ctr_stop(struct kvm_vcpu *vcpu, unsigned long ctr_base,
808f0153ecSAtish Patra unsigned long ctr_mask, unsigned long flags,
818f0153ecSAtish Patra struct kvm_vcpu_sbi_return *retdata);
828f0153ecSAtish Patra int kvm_riscv_vcpu_pmu_ctr_cfg_match(struct kvm_vcpu *vcpu, unsigned long ctr_base,
838f0153ecSAtish Patra unsigned long ctr_mask, unsigned long flags,
848f0153ecSAtish Patra unsigned long eidx, u64 evtdata,
858f0153ecSAtish Patra struct kvm_vcpu_sbi_return *retdata);
868f0153ecSAtish Patra int kvm_riscv_vcpu_pmu_ctr_read(struct kvm_vcpu *vcpu, unsigned long cidx,
878f0153ecSAtish Patra struct kvm_vcpu_sbi_return *retdata);
888f0153ecSAtish Patra void kvm_riscv_vcpu_pmu_init(struct kvm_vcpu *vcpu);
898f0153ecSAtish Patra void kvm_riscv_vcpu_pmu_deinit(struct kvm_vcpu *vcpu);
908f0153ecSAtish Patra void kvm_riscv_vcpu_pmu_reset(struct kvm_vcpu *vcpu);
918f0153ecSAtish Patra
928f0153ecSAtish Patra #else
938f0153ecSAtish Patra struct kvm_pmu {
948f0153ecSAtish Patra };
958f0153ecSAtish Patra
kvm_riscv_vcpu_pmu_read_legacy(struct kvm_vcpu * vcpu,unsigned int csr_num,unsigned long * val,unsigned long new_val,unsigned long wr_mask)963c39f253SAtish Patra static inline int kvm_riscv_vcpu_pmu_read_legacy(struct kvm_vcpu *vcpu, unsigned int csr_num,
973c39f253SAtish Patra unsigned long *val, unsigned long new_val,
983c39f253SAtish Patra unsigned long wr_mask)
993c39f253SAtish Patra {
1003c39f253SAtish Patra if (csr_num == CSR_CYCLE || csr_num == CSR_INSTRET) {
1013c39f253SAtish Patra *val = 0;
1023c39f253SAtish Patra return KVM_INSN_CONTINUE_NEXT_SEPC;
1033c39f253SAtish Patra } else {
1043c39f253SAtish Patra return KVM_INSN_ILLEGAL_TRAP;
1053c39f253SAtish Patra }
1063c39f253SAtish Patra }
1073c39f253SAtish Patra
108a9ac6c37SAtish Patra #define KVM_RISCV_VCPU_HPMCOUNTER_CSR_FUNCS \
1093c39f253SAtish Patra {.base = CSR_CYCLE, .count = 3, .func = kvm_riscv_vcpu_pmu_read_legacy },
110a9ac6c37SAtish Patra
kvm_riscv_vcpu_pmu_init(struct kvm_vcpu * vcpu)1118f0153ecSAtish Patra static inline void kvm_riscv_vcpu_pmu_init(struct kvm_vcpu *vcpu) {}
kvm_riscv_vcpu_pmu_incr_fw(struct kvm_vcpu * vcpu,unsigned long fid)112badc3868SAtish Patra static inline int kvm_riscv_vcpu_pmu_incr_fw(struct kvm_vcpu *vcpu, unsigned long fid)
113badc3868SAtish Patra {
114badc3868SAtish Patra return 0;
115badc3868SAtish Patra }
116badc3868SAtish Patra
kvm_riscv_vcpu_pmu_deinit(struct kvm_vcpu * vcpu)1178f0153ecSAtish Patra static inline void kvm_riscv_vcpu_pmu_deinit(struct kvm_vcpu *vcpu) {}
kvm_riscv_vcpu_pmu_reset(struct kvm_vcpu * vcpu)1188f0153ecSAtish Patra static inline void kvm_riscv_vcpu_pmu_reset(struct kvm_vcpu *vcpu) {}
1198f0153ecSAtish Patra #endif /* CONFIG_RISCV_PMU_SBI */
1208f0153ecSAtish Patra #endif /* !__KVM_VCPU_RISCV_PMU_H */
121