xref: /openbmc/linux/arch/x86/kvm/svm/pmu.c (revision d87c25e8)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * KVM PMU support for AMD
4  *
5  * Copyright 2015, Red Hat, Inc. and/or its affiliates.
6  *
7  * Author:
8  *   Wei Huang <wei@redhat.com>
9  *
10  * Implementation is based on pmu_intel.c file
11  */
12 #include <linux/types.h>
13 #include <linux/kvm_host.h>
14 #include <linux/perf_event.h>
15 #include "x86.h"
16 #include "cpuid.h"
17 #include "lapic.h"
18 #include "pmu.h"
19 #include "svm.h"
20 
21 enum pmu_type {
22 	PMU_TYPE_COUNTER = 0,
23 	PMU_TYPE_EVNTSEL,
24 };
25 
26 enum index {
27 	INDEX_ZERO = 0,
28 	INDEX_ONE,
29 	INDEX_TWO,
30 	INDEX_THREE,
31 	INDEX_FOUR,
32 	INDEX_FIVE,
33 	INDEX_ERROR,
34 };
35 
36 /* duplicated from amd_perfmon_event_map, K7 and above should work. */
37 static struct kvm_event_hw_type_mapping amd_event_mapping[] = {
38 	[0] = { 0x76, 0x00, PERF_COUNT_HW_CPU_CYCLES },
39 	[1] = { 0xc0, 0x00, PERF_COUNT_HW_INSTRUCTIONS },
40 	[2] = { 0x7d, 0x07, PERF_COUNT_HW_CACHE_REFERENCES },
41 	[3] = { 0x7e, 0x07, PERF_COUNT_HW_CACHE_MISSES },
42 	[4] = { 0xc2, 0x00, PERF_COUNT_HW_BRANCH_INSTRUCTIONS },
43 	[5] = { 0xc3, 0x00, PERF_COUNT_HW_BRANCH_MISSES },
44 	[6] = { 0xd0, 0x00, PERF_COUNT_HW_STALLED_CYCLES_FRONTEND },
45 	[7] = { 0xd1, 0x00, PERF_COUNT_HW_STALLED_CYCLES_BACKEND },
46 };
47 
48 static unsigned int get_msr_base(struct kvm_pmu *pmu, enum pmu_type type)
49 {
50 	struct kvm_vcpu *vcpu = pmu_to_vcpu(pmu);
51 
52 	if (guest_cpuid_has(vcpu, X86_FEATURE_PERFCTR_CORE)) {
53 		if (type == PMU_TYPE_COUNTER)
54 			return MSR_F15H_PERF_CTR;
55 		else
56 			return MSR_F15H_PERF_CTL;
57 	} else {
58 		if (type == PMU_TYPE_COUNTER)
59 			return MSR_K7_PERFCTR0;
60 		else
61 			return MSR_K7_EVNTSEL0;
62 	}
63 }
64 
65 static enum index msr_to_index(u32 msr)
66 {
67 	switch (msr) {
68 	case MSR_F15H_PERF_CTL0:
69 	case MSR_F15H_PERF_CTR0:
70 	case MSR_K7_EVNTSEL0:
71 	case MSR_K7_PERFCTR0:
72 		return INDEX_ZERO;
73 	case MSR_F15H_PERF_CTL1:
74 	case MSR_F15H_PERF_CTR1:
75 	case MSR_K7_EVNTSEL1:
76 	case MSR_K7_PERFCTR1:
77 		return INDEX_ONE;
78 	case MSR_F15H_PERF_CTL2:
79 	case MSR_F15H_PERF_CTR2:
80 	case MSR_K7_EVNTSEL2:
81 	case MSR_K7_PERFCTR2:
82 		return INDEX_TWO;
83 	case MSR_F15H_PERF_CTL3:
84 	case MSR_F15H_PERF_CTR3:
85 	case MSR_K7_EVNTSEL3:
86 	case MSR_K7_PERFCTR3:
87 		return INDEX_THREE;
88 	case MSR_F15H_PERF_CTL4:
89 	case MSR_F15H_PERF_CTR4:
90 		return INDEX_FOUR;
91 	case MSR_F15H_PERF_CTL5:
92 	case MSR_F15H_PERF_CTR5:
93 		return INDEX_FIVE;
94 	default:
95 		return INDEX_ERROR;
96 	}
97 }
98 
99 static inline struct kvm_pmc *get_gp_pmc_amd(struct kvm_pmu *pmu, u32 msr,
100 					     enum pmu_type type)
101 {
102 	struct kvm_vcpu *vcpu = pmu_to_vcpu(pmu);
103 
104 	if (!enable_pmu)
105 		return NULL;
106 
107 	switch (msr) {
108 	case MSR_F15H_PERF_CTL0:
109 	case MSR_F15H_PERF_CTL1:
110 	case MSR_F15H_PERF_CTL2:
111 	case MSR_F15H_PERF_CTL3:
112 	case MSR_F15H_PERF_CTL4:
113 	case MSR_F15H_PERF_CTL5:
114 		if (!guest_cpuid_has(vcpu, X86_FEATURE_PERFCTR_CORE))
115 			return NULL;
116 		fallthrough;
117 	case MSR_K7_EVNTSEL0 ... MSR_K7_EVNTSEL3:
118 		if (type != PMU_TYPE_EVNTSEL)
119 			return NULL;
120 		break;
121 	case MSR_F15H_PERF_CTR0:
122 	case MSR_F15H_PERF_CTR1:
123 	case MSR_F15H_PERF_CTR2:
124 	case MSR_F15H_PERF_CTR3:
125 	case MSR_F15H_PERF_CTR4:
126 	case MSR_F15H_PERF_CTR5:
127 		if (!guest_cpuid_has(vcpu, X86_FEATURE_PERFCTR_CORE))
128 			return NULL;
129 		fallthrough;
130 	case MSR_K7_PERFCTR0 ... MSR_K7_PERFCTR3:
131 		if (type != PMU_TYPE_COUNTER)
132 			return NULL;
133 		break;
134 	default:
135 		return NULL;
136 	}
137 
138 	return &pmu->gp_counters[msr_to_index(msr)];
139 }
140 
141 static unsigned int amd_pmc_perf_hw_id(struct kvm_pmc *pmc)
142 {
143 	u8 event_select = pmc->eventsel & ARCH_PERFMON_EVENTSEL_EVENT;
144 	u8 unit_mask = (pmc->eventsel & ARCH_PERFMON_EVENTSEL_UMASK) >> 8;
145 	int i;
146 
147 	/* return PERF_COUNT_HW_MAX as AMD doesn't have fixed events */
148 	if (WARN_ON(pmc_is_fixed(pmc)))
149 		return PERF_COUNT_HW_MAX;
150 
151 	for (i = 0; i < ARRAY_SIZE(amd_event_mapping); i++)
152 		if (amd_event_mapping[i].eventsel == event_select
153 		    && amd_event_mapping[i].unit_mask == unit_mask)
154 			break;
155 
156 	if (i == ARRAY_SIZE(amd_event_mapping))
157 		return PERF_COUNT_HW_MAX;
158 
159 	return amd_event_mapping[i].event_type;
160 }
161 
162 /* check if a PMC is enabled by comparing it against global_ctrl bits. Because
163  * AMD CPU doesn't have global_ctrl MSR, all PMCs are enabled (return TRUE).
164  */
165 static bool amd_pmc_is_enabled(struct kvm_pmc *pmc)
166 {
167 	return true;
168 }
169 
170 static struct kvm_pmc *amd_pmc_idx_to_pmc(struct kvm_pmu *pmu, int pmc_idx)
171 {
172 	unsigned int base = get_msr_base(pmu, PMU_TYPE_COUNTER);
173 	struct kvm_vcpu *vcpu = pmu_to_vcpu(pmu);
174 
175 	if (guest_cpuid_has(vcpu, X86_FEATURE_PERFCTR_CORE)) {
176 		/*
177 		 * The idx is contiguous. The MSRs are not. The counter MSRs
178 		 * are interleaved with the event select MSRs.
179 		 */
180 		pmc_idx *= 2;
181 	}
182 
183 	return get_gp_pmc_amd(pmu, base + pmc_idx, PMU_TYPE_COUNTER);
184 }
185 
186 static bool amd_is_valid_rdpmc_ecx(struct kvm_vcpu *vcpu, unsigned int idx)
187 {
188 	struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
189 
190 	idx &= ~(3u << 30);
191 
192 	return idx < pmu->nr_arch_gp_counters;
193 }
194 
195 /* idx is the ECX register of RDPMC instruction */
196 static struct kvm_pmc *amd_rdpmc_ecx_to_pmc(struct kvm_vcpu *vcpu,
197 	unsigned int idx, u64 *mask)
198 {
199 	struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
200 	struct kvm_pmc *counters;
201 
202 	idx &= ~(3u << 30);
203 	if (idx >= pmu->nr_arch_gp_counters)
204 		return NULL;
205 	counters = pmu->gp_counters;
206 
207 	return &counters[idx];
208 }
209 
210 static bool amd_is_valid_msr(struct kvm_vcpu *vcpu, u32 msr)
211 {
212 	/* All MSRs refer to exactly one PMC, so msr_idx_to_pmc is enough.  */
213 	return false;
214 }
215 
216 static struct kvm_pmc *amd_msr_idx_to_pmc(struct kvm_vcpu *vcpu, u32 msr)
217 {
218 	struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
219 	struct kvm_pmc *pmc;
220 
221 	pmc = get_gp_pmc_amd(pmu, msr, PMU_TYPE_COUNTER);
222 	pmc = pmc ? pmc : get_gp_pmc_amd(pmu, msr, PMU_TYPE_EVNTSEL);
223 
224 	return pmc;
225 }
226 
227 static int amd_pmu_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
228 {
229 	struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
230 	struct kvm_pmc *pmc;
231 	u32 msr = msr_info->index;
232 
233 	/* MSR_PERFCTRn */
234 	pmc = get_gp_pmc_amd(pmu, msr, PMU_TYPE_COUNTER);
235 	if (pmc) {
236 		msr_info->data = pmc_read_counter(pmc);
237 		return 0;
238 	}
239 	/* MSR_EVNTSELn */
240 	pmc = get_gp_pmc_amd(pmu, msr, PMU_TYPE_EVNTSEL);
241 	if (pmc) {
242 		msr_info->data = pmc->eventsel;
243 		return 0;
244 	}
245 
246 	return 1;
247 }
248 
249 static int amd_pmu_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
250 {
251 	struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
252 	struct kvm_pmc *pmc;
253 	u32 msr = msr_info->index;
254 	u64 data = msr_info->data;
255 
256 	/* MSR_PERFCTRn */
257 	pmc = get_gp_pmc_amd(pmu, msr, PMU_TYPE_COUNTER);
258 	if (pmc) {
259 		pmc->counter += data - pmc_read_counter(pmc);
260 		return 0;
261 	}
262 	/* MSR_EVNTSELn */
263 	pmc = get_gp_pmc_amd(pmu, msr, PMU_TYPE_EVNTSEL);
264 	if (pmc) {
265 		if (data == pmc->eventsel)
266 			return 0;
267 		if (!(data & pmu->reserved_bits)) {
268 			reprogram_gp_counter(pmc, data);
269 			return 0;
270 		}
271 	}
272 
273 	return 1;
274 }
275 
276 static void amd_pmu_refresh(struct kvm_vcpu *vcpu)
277 {
278 	struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
279 
280 	if (guest_cpuid_has(vcpu, X86_FEATURE_PERFCTR_CORE))
281 		pmu->nr_arch_gp_counters = AMD64_NUM_COUNTERS_CORE;
282 	else
283 		pmu->nr_arch_gp_counters = AMD64_NUM_COUNTERS;
284 
285 	pmu->counter_bitmask[KVM_PMC_GP] = ((u64)1 << 48) - 1;
286 	pmu->reserved_bits = 0xfffffff000280000ull;
287 	pmu->version = 1;
288 	/* not applicable to AMD; but clean them to prevent any fall out */
289 	pmu->counter_bitmask[KVM_PMC_FIXED] = 0;
290 	pmu->nr_arch_fixed_counters = 0;
291 	pmu->global_status = 0;
292 	bitmap_set(pmu->all_valid_pmc_idx, 0, pmu->nr_arch_gp_counters);
293 }
294 
295 static void amd_pmu_init(struct kvm_vcpu *vcpu)
296 {
297 	struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
298 	int i;
299 
300 	BUILD_BUG_ON(AMD64_NUM_COUNTERS_CORE > INTEL_PMC_MAX_GENERIC);
301 
302 	for (i = 0; i < AMD64_NUM_COUNTERS_CORE ; i++) {
303 		pmu->gp_counters[i].type = KVM_PMC_GP;
304 		pmu->gp_counters[i].vcpu = vcpu;
305 		pmu->gp_counters[i].idx = i;
306 		pmu->gp_counters[i].current_config = 0;
307 	}
308 }
309 
310 static void amd_pmu_reset(struct kvm_vcpu *vcpu)
311 {
312 	struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
313 	int i;
314 
315 	for (i = 0; i < AMD64_NUM_COUNTERS_CORE; i++) {
316 		struct kvm_pmc *pmc = &pmu->gp_counters[i];
317 
318 		pmc_stop_counter(pmc);
319 		pmc->counter = pmc->eventsel = 0;
320 	}
321 }
322 
323 struct kvm_pmu_ops amd_pmu_ops = {
324 	.pmc_perf_hw_id = amd_pmc_perf_hw_id,
325 	.pmc_is_enabled = amd_pmc_is_enabled,
326 	.pmc_idx_to_pmc = amd_pmc_idx_to_pmc,
327 	.rdpmc_ecx_to_pmc = amd_rdpmc_ecx_to_pmc,
328 	.msr_idx_to_pmc = amd_msr_idx_to_pmc,
329 	.is_valid_rdpmc_ecx = amd_is_valid_rdpmc_ecx,
330 	.is_valid_msr = amd_is_valid_msr,
331 	.get_msr = amd_pmu_get_msr,
332 	.set_msr = amd_pmu_set_msr,
333 	.refresh = amd_pmu_refresh,
334 	.init = amd_pmu_init,
335 	.reset = amd_pmu_reset,
336 };
337