1 /* SPDX-License-Identifier: GPL-2.0-only */
2 /*
3 * Copyright (c) 2023 Rivos Inc
4 *
5 * Authors:
6 * Atish Patra <atishp@rivosinc.com>
7 */
8
9 #ifndef __KVM_VCPU_RISCV_PMU_H
10 #define __KVM_VCPU_RISCV_PMU_H
11
12 #include <linux/perf/riscv_pmu.h>
13 #include <asm/kvm_vcpu_insn.h>
14 #include <asm/sbi.h>
15
16 #ifdef CONFIG_RISCV_PMU_SBI
17 #define RISCV_KVM_MAX_FW_CTRS 32
18 #define RISCV_KVM_MAX_HW_CTRS 32
19 #define RISCV_KVM_MAX_COUNTERS (RISCV_KVM_MAX_HW_CTRS + RISCV_KVM_MAX_FW_CTRS)
20 static_assert(RISCV_KVM_MAX_COUNTERS <= 64);
21
22 struct kvm_fw_event {
23 /* Current value of the event */
24 unsigned long value;
25
26 /* Event monitoring status */
27 bool started;
28 };
29
30 /* Per virtual pmu counter data */
31 struct kvm_pmc {
32 u8 idx;
33 struct perf_event *perf_event;
34 u64 counter_val;
35 union sbi_pmu_ctr_info cinfo;
36 /* Event monitoring status */
37 bool started;
38 /* Monitoring event ID */
39 unsigned long event_idx;
40 };
41
42 /* PMU data structure per vcpu */
43 struct kvm_pmu {
44 struct kvm_pmc pmc[RISCV_KVM_MAX_COUNTERS];
45 struct kvm_fw_event fw_event[RISCV_KVM_MAX_FW_CTRS];
46 /* Number of the virtual firmware counters available */
47 int num_fw_ctrs;
48 /* Number of the virtual hardware counters available */
49 int num_hw_ctrs;
50 /* A flag to indicate that pmu initialization is done */
51 bool init_done;
52 /* Bit map of all the virtual counter used */
53 DECLARE_BITMAP(pmc_in_use, RISCV_KVM_MAX_COUNTERS);
54 };
55
56 #define vcpu_to_pmu(vcpu) (&(vcpu)->arch.pmu_context)
57 #define pmu_to_vcpu(pmu) (container_of((pmu), struct kvm_vcpu, arch.pmu_context))
58
59 #if defined(CONFIG_32BIT)
60 #define KVM_RISCV_VCPU_HPMCOUNTER_CSR_FUNCS \
61 {.base = CSR_CYCLEH, .count = 32, .func = kvm_riscv_vcpu_pmu_read_hpm }, \
62 {.base = CSR_CYCLE, .count = 32, .func = kvm_riscv_vcpu_pmu_read_hpm },
63 #else
64 #define KVM_RISCV_VCPU_HPMCOUNTER_CSR_FUNCS \
65 {.base = CSR_CYCLE, .count = 32, .func = kvm_riscv_vcpu_pmu_read_hpm },
66 #endif
67
68 int kvm_riscv_vcpu_pmu_incr_fw(struct kvm_vcpu *vcpu, unsigned long fid);
69 int kvm_riscv_vcpu_pmu_read_hpm(struct kvm_vcpu *vcpu, unsigned int csr_num,
70 unsigned long *val, unsigned long new_val,
71 unsigned long wr_mask);
72
73 int kvm_riscv_vcpu_pmu_num_ctrs(struct kvm_vcpu *vcpu, struct kvm_vcpu_sbi_return *retdata);
74 int kvm_riscv_vcpu_pmu_ctr_info(struct kvm_vcpu *vcpu, unsigned long cidx,
75 struct kvm_vcpu_sbi_return *retdata);
76 int kvm_riscv_vcpu_pmu_ctr_start(struct kvm_vcpu *vcpu, unsigned long ctr_base,
77 unsigned long ctr_mask, unsigned long flags, u64 ival,
78 struct kvm_vcpu_sbi_return *retdata);
79 int kvm_riscv_vcpu_pmu_ctr_stop(struct kvm_vcpu *vcpu, unsigned long ctr_base,
80 unsigned long ctr_mask, unsigned long flags,
81 struct kvm_vcpu_sbi_return *retdata);
82 int kvm_riscv_vcpu_pmu_ctr_cfg_match(struct kvm_vcpu *vcpu, unsigned long ctr_base,
83 unsigned long ctr_mask, unsigned long flags,
84 unsigned long eidx, u64 evtdata,
85 struct kvm_vcpu_sbi_return *retdata);
86 int kvm_riscv_vcpu_pmu_ctr_read(struct kvm_vcpu *vcpu, unsigned long cidx,
87 struct kvm_vcpu_sbi_return *retdata);
88 void kvm_riscv_vcpu_pmu_init(struct kvm_vcpu *vcpu);
89 void kvm_riscv_vcpu_pmu_deinit(struct kvm_vcpu *vcpu);
90 void kvm_riscv_vcpu_pmu_reset(struct kvm_vcpu *vcpu);
91
92 #else
93 struct kvm_pmu {
94 };
95
kvm_riscv_vcpu_pmu_read_legacy(struct kvm_vcpu * vcpu,unsigned int csr_num,unsigned long * val,unsigned long new_val,unsigned long wr_mask)96 static inline int kvm_riscv_vcpu_pmu_read_legacy(struct kvm_vcpu *vcpu, unsigned int csr_num,
97 unsigned long *val, unsigned long new_val,
98 unsigned long wr_mask)
99 {
100 if (csr_num == CSR_CYCLE || csr_num == CSR_INSTRET) {
101 *val = 0;
102 return KVM_INSN_CONTINUE_NEXT_SEPC;
103 } else {
104 return KVM_INSN_ILLEGAL_TRAP;
105 }
106 }
107
108 #define KVM_RISCV_VCPU_HPMCOUNTER_CSR_FUNCS \
109 {.base = CSR_CYCLE, .count = 3, .func = kvm_riscv_vcpu_pmu_read_legacy },
110
kvm_riscv_vcpu_pmu_init(struct kvm_vcpu * vcpu)111 static inline void kvm_riscv_vcpu_pmu_init(struct kvm_vcpu *vcpu) {}
kvm_riscv_vcpu_pmu_incr_fw(struct kvm_vcpu * vcpu,unsigned long fid)112 static inline int kvm_riscv_vcpu_pmu_incr_fw(struct kvm_vcpu *vcpu, unsigned long fid)
113 {
114 return 0;
115 }
116
kvm_riscv_vcpu_pmu_deinit(struct kvm_vcpu * vcpu)117 static inline void kvm_riscv_vcpu_pmu_deinit(struct kvm_vcpu *vcpu) {}
kvm_riscv_vcpu_pmu_reset(struct kvm_vcpu * vcpu)118 static inline void kvm_riscv_vcpu_pmu_reset(struct kvm_vcpu *vcpu) {}
119 #endif /* CONFIG_RISCV_PMU_SBI */
120 #endif /* !__KVM_VCPU_RISCV_PMU_H */
121