xref: /openbmc/linux/arch/x86/kvm/pmu.h (revision 6a5cba7bed35580effda9fb1872b274da47e6b23)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef __KVM_X86_PMU_H
3 #define __KVM_X86_PMU_H
4 
5 #include <linux/nospec.h>
6 
7 #define vcpu_to_pmu(vcpu) (&(vcpu)->arch.pmu)
8 #define pmu_to_vcpu(pmu)  (container_of((pmu), struct kvm_vcpu, arch.pmu))
9 #define pmc_to_pmu(pmc)   (&(pmc)->vcpu->arch.pmu)
10 
11 #define MSR_IA32_MISC_ENABLE_PMU_RO_MASK (MSR_IA32_MISC_ENABLE_PEBS_UNAVAIL |	\
12 					  MSR_IA32_MISC_ENABLE_BTS_UNAVAIL)
13 
14 /* retrieve the 4 bits for EN and PMI out of IA32_FIXED_CTR_CTRL */
15 #define fixed_ctrl_field(ctrl_reg, idx) (((ctrl_reg) >> ((idx)*4)) & 0xf)
16 
17 #define VMWARE_BACKDOOR_PMC_HOST_TSC		0x10000
18 #define VMWARE_BACKDOOR_PMC_REAL_TIME		0x10001
19 #define VMWARE_BACKDOOR_PMC_APPARENT_TIME	0x10002
20 
21 struct kvm_event_hw_type_mapping {
22 	u8 eventsel;
23 	u8 unit_mask;
24 	unsigned event_type;
25 };
26 
27 struct kvm_pmu_ops {
28 	bool (*hw_event_available)(struct kvm_pmc *pmc);
29 	bool (*pmc_is_enabled)(struct kvm_pmc *pmc);
30 	struct kvm_pmc *(*pmc_idx_to_pmc)(struct kvm_pmu *pmu, int pmc_idx);
31 	struct kvm_pmc *(*rdpmc_ecx_to_pmc)(struct kvm_vcpu *vcpu,
32 		unsigned int idx, u64 *mask);
33 	struct kvm_pmc *(*msr_idx_to_pmc)(struct kvm_vcpu *vcpu, u32 msr);
34 	bool (*is_valid_rdpmc_ecx)(struct kvm_vcpu *vcpu, unsigned int idx);
35 	bool (*is_valid_msr)(struct kvm_vcpu *vcpu, u32 msr);
36 	int (*get_msr)(struct kvm_vcpu *vcpu, struct msr_data *msr_info);
37 	int (*set_msr)(struct kvm_vcpu *vcpu, struct msr_data *msr_info);
38 	void (*refresh)(struct kvm_vcpu *vcpu);
39 	void (*init)(struct kvm_vcpu *vcpu);
40 	void (*reset)(struct kvm_vcpu *vcpu);
41 	void (*deliver_pmi)(struct kvm_vcpu *vcpu);
42 	void (*cleanup)(struct kvm_vcpu *vcpu);
43 
44 	const u64 EVENTSEL_EVENT;
45 };
46 
47 void kvm_pmu_ops_update(const struct kvm_pmu_ops *pmu_ops);
48 
49 static inline u64 pmc_bitmask(struct kvm_pmc *pmc)
50 {
51 	struct kvm_pmu *pmu = pmc_to_pmu(pmc);
52 
53 	return pmu->counter_bitmask[pmc->type];
54 }
55 
56 static inline u64 pmc_read_counter(struct kvm_pmc *pmc)
57 {
58 	u64 counter, enabled, running;
59 
60 	counter = pmc->counter;
61 	if (pmc->perf_event && !pmc->is_paused)
62 		counter += perf_event_read_value(pmc->perf_event,
63 						 &enabled, &running);
64 	/* FIXME: Scaling needed? */
65 	return counter & pmc_bitmask(pmc);
66 }
67 
68 static inline void pmc_release_perf_event(struct kvm_pmc *pmc)
69 {
70 	if (pmc->perf_event) {
71 		perf_event_release_kernel(pmc->perf_event);
72 		pmc->perf_event = NULL;
73 		pmc->current_config = 0;
74 		pmc_to_pmu(pmc)->event_count--;
75 	}
76 }
77 
78 static inline void pmc_stop_counter(struct kvm_pmc *pmc)
79 {
80 	if (pmc->perf_event) {
81 		pmc->counter = pmc_read_counter(pmc);
82 		pmc_release_perf_event(pmc);
83 	}
84 }
85 
86 static inline bool pmc_is_gp(struct kvm_pmc *pmc)
87 {
88 	return pmc->type == KVM_PMC_GP;
89 }
90 
91 static inline bool pmc_is_fixed(struct kvm_pmc *pmc)
92 {
93 	return pmc->type == KVM_PMC_FIXED;
94 }
95 
96 static inline bool kvm_valid_perf_global_ctrl(struct kvm_pmu *pmu,
97 						 u64 data)
98 {
99 	return !(pmu->global_ctrl_mask & data);
100 }
101 
102 /* returns general purpose PMC with the specified MSR. Note that it can be
103  * used for both PERFCTRn and EVNTSELn; that is why it accepts base as a
104  * parameter to tell them apart.
105  */
106 static inline struct kvm_pmc *get_gp_pmc(struct kvm_pmu *pmu, u32 msr,
107 					 u32 base)
108 {
109 	if (msr >= base && msr < base + pmu->nr_arch_gp_counters) {
110 		u32 index = array_index_nospec(msr - base,
111 					       pmu->nr_arch_gp_counters);
112 
113 		return &pmu->gp_counters[index];
114 	}
115 
116 	return NULL;
117 }
118 
119 /* returns fixed PMC with the specified MSR */
120 static inline struct kvm_pmc *get_fixed_pmc(struct kvm_pmu *pmu, u32 msr)
121 {
122 	int base = MSR_CORE_PERF_FIXED_CTR0;
123 
124 	if (msr >= base && msr < base + pmu->nr_arch_fixed_counters) {
125 		u32 index = array_index_nospec(msr - base,
126 					       pmu->nr_arch_fixed_counters);
127 
128 		return &pmu->fixed_counters[index];
129 	}
130 
131 	return NULL;
132 }
133 
134 static inline u64 get_sample_period(struct kvm_pmc *pmc, u64 counter_value)
135 {
136 	u64 sample_period = (-counter_value) & pmc_bitmask(pmc);
137 
138 	if (!sample_period)
139 		sample_period = pmc_bitmask(pmc) + 1;
140 	return sample_period;
141 }
142 
143 static inline void pmc_update_sample_period(struct kvm_pmc *pmc)
144 {
145 	if (!pmc->perf_event || pmc->is_paused ||
146 	    !is_sampling_event(pmc->perf_event))
147 		return;
148 
149 	perf_event_period(pmc->perf_event,
150 			  get_sample_period(pmc, pmc->counter));
151 }
152 
153 static inline bool pmc_speculative_in_use(struct kvm_pmc *pmc)
154 {
155 	struct kvm_pmu *pmu = pmc_to_pmu(pmc);
156 
157 	if (pmc_is_fixed(pmc))
158 		return fixed_ctrl_field(pmu->fixed_ctr_ctrl,
159 					pmc->idx - INTEL_PMC_IDX_FIXED) & 0x3;
160 
161 	return pmc->eventsel & ARCH_PERFMON_EVENTSEL_ENABLE;
162 }
163 
164 extern struct x86_pmu_capability kvm_pmu_cap;
165 
166 static inline void kvm_init_pmu_capability(void)
167 {
168 	bool is_intel = boot_cpu_data.x86_vendor == X86_VENDOR_INTEL;
169 
170 	perf_get_x86_pmu_capability(&kvm_pmu_cap);
171 
172 	 /*
173 	  * For Intel, only support guest architectural pmu
174 	  * on a host with architectural pmu.
175 	  */
176 	if ((is_intel && !kvm_pmu_cap.version) || !kvm_pmu_cap.num_counters_gp)
177 		enable_pmu = false;
178 
179 	if (!enable_pmu) {
180 		memset(&kvm_pmu_cap, 0, sizeof(kvm_pmu_cap));
181 		return;
182 	}
183 
184 	kvm_pmu_cap.version = min(kvm_pmu_cap.version, 2);
185 	kvm_pmu_cap.num_counters_fixed = min(kvm_pmu_cap.num_counters_fixed,
186 					     KVM_PMC_MAX_FIXED);
187 }
188 
189 static inline void kvm_pmu_request_counter_reprogam(struct kvm_pmc *pmc)
190 {
191 	set_bit(pmc->idx, pmc_to_pmu(pmc)->reprogram_pmi);
192 	kvm_make_request(KVM_REQ_PMU, pmc->vcpu);
193 }
194 
195 void kvm_pmu_deliver_pmi(struct kvm_vcpu *vcpu);
196 void kvm_pmu_handle_event(struct kvm_vcpu *vcpu);
197 int kvm_pmu_rdpmc(struct kvm_vcpu *vcpu, unsigned pmc, u64 *data);
198 bool kvm_pmu_is_valid_rdpmc_ecx(struct kvm_vcpu *vcpu, unsigned int idx);
199 bool kvm_pmu_is_valid_msr(struct kvm_vcpu *vcpu, u32 msr);
200 int kvm_pmu_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info);
201 int kvm_pmu_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info);
202 void kvm_pmu_refresh(struct kvm_vcpu *vcpu);
203 void kvm_pmu_reset(struct kvm_vcpu *vcpu);
204 void kvm_pmu_init(struct kvm_vcpu *vcpu);
205 void kvm_pmu_cleanup(struct kvm_vcpu *vcpu);
206 void kvm_pmu_destroy(struct kvm_vcpu *vcpu);
207 int kvm_vm_ioctl_set_pmu_event_filter(struct kvm *kvm, void __user *argp);
208 void kvm_pmu_trigger_event(struct kvm_vcpu *vcpu, u64 perf_hw_id);
209 
210 bool is_vmware_backdoor_pmc(u32 pmc_idx);
211 
212 extern struct kvm_pmu_ops intel_pmu_ops;
213 extern struct kvm_pmu_ops amd_pmu_ops;
214 #endif /* __KVM_X86_PMU_H */
215