xref: /openbmc/linux/arch/x86/kvm/vmx/pmu_intel.c (revision 9a6b55ac)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * KVM PMU support for Intel CPUs
4  *
5  * Copyright 2011 Red Hat, Inc. and/or its affiliates.
6  *
7  * Authors:
8  *   Avi Kivity   <avi@redhat.com>
9  *   Gleb Natapov <gleb@redhat.com>
10  */
11 #include <linux/types.h>
12 #include <linux/kvm_host.h>
13 #include <linux/perf_event.h>
14 #include <asm/perf_event.h>
15 #include "x86.h"
16 #include "cpuid.h"
17 #include "lapic.h"
18 #include "nested.h"
19 #include "pmu.h"
20 
21 static struct kvm_event_hw_type_mapping intel_arch_events[] = {
22 	/* Index must match CPUID 0x0A.EBX bit vector */
23 	[0] = { 0x3c, 0x00, PERF_COUNT_HW_CPU_CYCLES },
24 	[1] = { 0xc0, 0x00, PERF_COUNT_HW_INSTRUCTIONS },
25 	[2] = { 0x3c, 0x01, PERF_COUNT_HW_BUS_CYCLES  },
26 	[3] = { 0x2e, 0x4f, PERF_COUNT_HW_CACHE_REFERENCES },
27 	[4] = { 0x2e, 0x41, PERF_COUNT_HW_CACHE_MISSES },
28 	[5] = { 0xc4, 0x00, PERF_COUNT_HW_BRANCH_INSTRUCTIONS },
29 	[6] = { 0xc5, 0x00, PERF_COUNT_HW_BRANCH_MISSES },
30 	[7] = { 0x00, 0x30, PERF_COUNT_HW_REF_CPU_CYCLES },
31 };
32 
33 /* mapping between fixed pmc index and intel_arch_events array */
34 static int fixed_pmc_events[] = {1, 0, 7};
35 
36 static void reprogram_fixed_counters(struct kvm_pmu *pmu, u64 data)
37 {
38 	int i;
39 
40 	for (i = 0; i < pmu->nr_arch_fixed_counters; i++) {
41 		u8 new_ctrl = fixed_ctrl_field(data, i);
42 		u8 old_ctrl = fixed_ctrl_field(pmu->fixed_ctr_ctrl, i);
43 		struct kvm_pmc *pmc;
44 
45 		pmc = get_fixed_pmc(pmu, MSR_CORE_PERF_FIXED_CTR0 + i);
46 
47 		if (old_ctrl == new_ctrl)
48 			continue;
49 
50 		__set_bit(INTEL_PMC_IDX_FIXED + i, pmu->pmc_in_use);
51 		reprogram_fixed_counter(pmc, new_ctrl, i);
52 	}
53 
54 	pmu->fixed_ctr_ctrl = data;
55 }
56 
57 /* function is called when global control register has been updated. */
58 static void global_ctrl_changed(struct kvm_pmu *pmu, u64 data)
59 {
60 	int bit;
61 	u64 diff = pmu->global_ctrl ^ data;
62 
63 	pmu->global_ctrl = data;
64 
65 	for_each_set_bit(bit, (unsigned long *)&diff, X86_PMC_IDX_MAX)
66 		reprogram_counter(pmu, bit);
67 }
68 
69 static unsigned intel_find_arch_event(struct kvm_pmu *pmu,
70 				      u8 event_select,
71 				      u8 unit_mask)
72 {
73 	int i;
74 
75 	for (i = 0; i < ARRAY_SIZE(intel_arch_events); i++)
76 		if (intel_arch_events[i].eventsel == event_select
77 		    && intel_arch_events[i].unit_mask == unit_mask
78 		    && (pmu->available_event_types & (1 << i)))
79 			break;
80 
81 	if (i == ARRAY_SIZE(intel_arch_events))
82 		return PERF_COUNT_HW_MAX;
83 
84 	return intel_arch_events[i].event_type;
85 }
86 
87 static unsigned intel_find_fixed_event(int idx)
88 {
89 	if (idx >= ARRAY_SIZE(fixed_pmc_events))
90 		return PERF_COUNT_HW_MAX;
91 
92 	return intel_arch_events[fixed_pmc_events[idx]].event_type;
93 }
94 
95 /* check if a PMC is enabled by comparing it with globl_ctrl bits. */
96 static bool intel_pmc_is_enabled(struct kvm_pmc *pmc)
97 {
98 	struct kvm_pmu *pmu = pmc_to_pmu(pmc);
99 
100 	return test_bit(pmc->idx, (unsigned long *)&pmu->global_ctrl);
101 }
102 
103 static struct kvm_pmc *intel_pmc_idx_to_pmc(struct kvm_pmu *pmu, int pmc_idx)
104 {
105 	if (pmc_idx < INTEL_PMC_IDX_FIXED)
106 		return get_gp_pmc(pmu, MSR_P6_EVNTSEL0 + pmc_idx,
107 				  MSR_P6_EVNTSEL0);
108 	else {
109 		u32 idx = pmc_idx - INTEL_PMC_IDX_FIXED;
110 
111 		return get_fixed_pmc(pmu, idx + MSR_CORE_PERF_FIXED_CTR0);
112 	}
113 }
114 
115 /* returns 0 if idx's corresponding MSR exists; otherwise returns 1. */
116 static int intel_is_valid_rdpmc_ecx(struct kvm_vcpu *vcpu, unsigned int idx)
117 {
118 	struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
119 	bool fixed = idx & (1u << 30);
120 
121 	idx &= ~(3u << 30);
122 
123 	return (!fixed && idx >= pmu->nr_arch_gp_counters) ||
124 		(fixed && idx >= pmu->nr_arch_fixed_counters);
125 }
126 
127 static struct kvm_pmc *intel_rdpmc_ecx_to_pmc(struct kvm_vcpu *vcpu,
128 					    unsigned int idx, u64 *mask)
129 {
130 	struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
131 	bool fixed = idx & (1u << 30);
132 	struct kvm_pmc *counters;
133 
134 	idx &= ~(3u << 30);
135 	if (!fixed && idx >= pmu->nr_arch_gp_counters)
136 		return NULL;
137 	if (fixed && idx >= pmu->nr_arch_fixed_counters)
138 		return NULL;
139 	counters = fixed ? pmu->fixed_counters : pmu->gp_counters;
140 	*mask &= pmu->counter_bitmask[fixed ? KVM_PMC_FIXED : KVM_PMC_GP];
141 
142 	return &counters[idx];
143 }
144 
145 static bool intel_is_valid_msr(struct kvm_vcpu *vcpu, u32 msr)
146 {
147 	struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
148 	int ret;
149 
150 	switch (msr) {
151 	case MSR_CORE_PERF_FIXED_CTR_CTRL:
152 	case MSR_CORE_PERF_GLOBAL_STATUS:
153 	case MSR_CORE_PERF_GLOBAL_CTRL:
154 	case MSR_CORE_PERF_GLOBAL_OVF_CTRL:
155 		ret = pmu->version > 1;
156 		break;
157 	default:
158 		ret = get_gp_pmc(pmu, msr, MSR_IA32_PERFCTR0) ||
159 			get_gp_pmc(pmu, msr, MSR_P6_EVNTSEL0) ||
160 			get_fixed_pmc(pmu, msr);
161 		break;
162 	}
163 
164 	return ret;
165 }
166 
167 static struct kvm_pmc *intel_msr_idx_to_pmc(struct kvm_vcpu *vcpu, u32 msr)
168 {
169 	struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
170 	struct kvm_pmc *pmc;
171 
172 	pmc = get_fixed_pmc(pmu, msr);
173 	pmc = pmc ? pmc : get_gp_pmc(pmu, msr, MSR_P6_EVNTSEL0);
174 	pmc = pmc ? pmc : get_gp_pmc(pmu, msr, MSR_IA32_PERFCTR0);
175 
176 	return pmc;
177 }
178 
179 static int intel_pmu_get_msr(struct kvm_vcpu *vcpu, u32 msr, u64 *data)
180 {
181 	struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
182 	struct kvm_pmc *pmc;
183 
184 	switch (msr) {
185 	case MSR_CORE_PERF_FIXED_CTR_CTRL:
186 		*data = pmu->fixed_ctr_ctrl;
187 		return 0;
188 	case MSR_CORE_PERF_GLOBAL_STATUS:
189 		*data = pmu->global_status;
190 		return 0;
191 	case MSR_CORE_PERF_GLOBAL_CTRL:
192 		*data = pmu->global_ctrl;
193 		return 0;
194 	case MSR_CORE_PERF_GLOBAL_OVF_CTRL:
195 		*data = pmu->global_ovf_ctrl;
196 		return 0;
197 	default:
198 		if ((pmc = get_gp_pmc(pmu, msr, MSR_IA32_PERFCTR0))) {
199 			u64 val = pmc_read_counter(pmc);
200 			*data = val & pmu->counter_bitmask[KVM_PMC_GP];
201 			return 0;
202 		} else if ((pmc = get_fixed_pmc(pmu, msr))) {
203 			u64 val = pmc_read_counter(pmc);
204 			*data = val & pmu->counter_bitmask[KVM_PMC_FIXED];
205 			return 0;
206 		} else if ((pmc = get_gp_pmc(pmu, msr, MSR_P6_EVNTSEL0))) {
207 			*data = pmc->eventsel;
208 			return 0;
209 		}
210 	}
211 
212 	return 1;
213 }
214 
215 static int intel_pmu_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
216 {
217 	struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
218 	struct kvm_pmc *pmc;
219 	u32 msr = msr_info->index;
220 	u64 data = msr_info->data;
221 
222 	switch (msr) {
223 	case MSR_CORE_PERF_FIXED_CTR_CTRL:
224 		if (pmu->fixed_ctr_ctrl == data)
225 			return 0;
226 		if (!(data & 0xfffffffffffff444ull)) {
227 			reprogram_fixed_counters(pmu, data);
228 			return 0;
229 		}
230 		break;
231 	case MSR_CORE_PERF_GLOBAL_STATUS:
232 		if (msr_info->host_initiated) {
233 			pmu->global_status = data;
234 			return 0;
235 		}
236 		break; /* RO MSR */
237 	case MSR_CORE_PERF_GLOBAL_CTRL:
238 		if (pmu->global_ctrl == data)
239 			return 0;
240 		if (kvm_valid_perf_global_ctrl(pmu, data)) {
241 			global_ctrl_changed(pmu, data);
242 			return 0;
243 		}
244 		break;
245 	case MSR_CORE_PERF_GLOBAL_OVF_CTRL:
246 		if (!(data & pmu->global_ovf_ctrl_mask)) {
247 			if (!msr_info->host_initiated)
248 				pmu->global_status &= ~data;
249 			pmu->global_ovf_ctrl = data;
250 			return 0;
251 		}
252 		break;
253 	default:
254 		if ((pmc = get_gp_pmc(pmu, msr, MSR_IA32_PERFCTR0))) {
255 			if (msr_info->host_initiated)
256 				pmc->counter = data;
257 			else
258 				pmc->counter = (s32)data;
259 			return 0;
260 		} else if ((pmc = get_fixed_pmc(pmu, msr))) {
261 			pmc->counter = data;
262 			return 0;
263 		} else if ((pmc = get_gp_pmc(pmu, msr, MSR_P6_EVNTSEL0))) {
264 			if (data == pmc->eventsel)
265 				return 0;
266 			if (!(data & pmu->reserved_bits)) {
267 				reprogram_gp_counter(pmc, data);
268 				return 0;
269 			}
270 		}
271 	}
272 
273 	return 1;
274 }
275 
276 static void intel_pmu_refresh(struct kvm_vcpu *vcpu)
277 {
278 	struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
279 	struct x86_pmu_capability x86_pmu;
280 	struct kvm_cpuid_entry2 *entry;
281 	union cpuid10_eax eax;
282 	union cpuid10_edx edx;
283 
284 	pmu->nr_arch_gp_counters = 0;
285 	pmu->nr_arch_fixed_counters = 0;
286 	pmu->counter_bitmask[KVM_PMC_GP] = 0;
287 	pmu->counter_bitmask[KVM_PMC_FIXED] = 0;
288 	pmu->version = 0;
289 	pmu->reserved_bits = 0xffffffff00200000ull;
290 
291 	entry = kvm_find_cpuid_entry(vcpu, 0xa, 0);
292 	if (!entry)
293 		return;
294 	eax.full = entry->eax;
295 	edx.full = entry->edx;
296 
297 	pmu->version = eax.split.version_id;
298 	if (!pmu->version)
299 		return;
300 
301 	perf_get_x86_pmu_capability(&x86_pmu);
302 
303 	pmu->nr_arch_gp_counters = min_t(int, eax.split.num_counters,
304 					 x86_pmu.num_counters_gp);
305 	pmu->counter_bitmask[KVM_PMC_GP] = ((u64)1 << eax.split.bit_width) - 1;
306 	pmu->available_event_types = ~entry->ebx &
307 					((1ull << eax.split.mask_length) - 1);
308 
309 	if (pmu->version == 1) {
310 		pmu->nr_arch_fixed_counters = 0;
311 	} else {
312 		pmu->nr_arch_fixed_counters =
313 			min_t(int, edx.split.num_counters_fixed,
314 			      x86_pmu.num_counters_fixed);
315 		pmu->counter_bitmask[KVM_PMC_FIXED] =
316 			((u64)1 << edx.split.bit_width_fixed) - 1;
317 	}
318 
319 	pmu->global_ctrl = ((1ull << pmu->nr_arch_gp_counters) - 1) |
320 		(((1ull << pmu->nr_arch_fixed_counters) - 1) << INTEL_PMC_IDX_FIXED);
321 	pmu->global_ctrl_mask = ~pmu->global_ctrl;
322 	pmu->global_ovf_ctrl_mask = pmu->global_ctrl_mask
323 			& ~(MSR_CORE_PERF_GLOBAL_OVF_CTRL_OVF_BUF |
324 			    MSR_CORE_PERF_GLOBAL_OVF_CTRL_COND_CHGD);
325 	if (kvm_x86_ops->pt_supported())
326 		pmu->global_ovf_ctrl_mask &=
327 				~MSR_CORE_PERF_GLOBAL_OVF_CTRL_TRACE_TOPA_PMI;
328 
329 	entry = kvm_find_cpuid_entry(vcpu, 7, 0);
330 	if (entry &&
331 	    (boot_cpu_has(X86_FEATURE_HLE) || boot_cpu_has(X86_FEATURE_RTM)) &&
332 	    (entry->ebx & (X86_FEATURE_HLE|X86_FEATURE_RTM)))
333 		pmu->reserved_bits ^= HSW_IN_TX|HSW_IN_TX_CHECKPOINTED;
334 
335 	bitmap_set(pmu->all_valid_pmc_idx,
336 		0, pmu->nr_arch_gp_counters);
337 	bitmap_set(pmu->all_valid_pmc_idx,
338 		INTEL_PMC_MAX_GENERIC, pmu->nr_arch_fixed_counters);
339 
340 	nested_vmx_pmu_entry_exit_ctls_update(vcpu);
341 }
342 
343 static void intel_pmu_init(struct kvm_vcpu *vcpu)
344 {
345 	int i;
346 	struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
347 
348 	for (i = 0; i < INTEL_PMC_MAX_GENERIC; i++) {
349 		pmu->gp_counters[i].type = KVM_PMC_GP;
350 		pmu->gp_counters[i].vcpu = vcpu;
351 		pmu->gp_counters[i].idx = i;
352 		pmu->gp_counters[i].current_config = 0;
353 	}
354 
355 	for (i = 0; i < INTEL_PMC_MAX_FIXED; i++) {
356 		pmu->fixed_counters[i].type = KVM_PMC_FIXED;
357 		pmu->fixed_counters[i].vcpu = vcpu;
358 		pmu->fixed_counters[i].idx = i + INTEL_PMC_IDX_FIXED;
359 		pmu->fixed_counters[i].current_config = 0;
360 	}
361 }
362 
363 static void intel_pmu_reset(struct kvm_vcpu *vcpu)
364 {
365 	struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
366 	struct kvm_pmc *pmc = NULL;
367 	int i;
368 
369 	for (i = 0; i < INTEL_PMC_MAX_GENERIC; i++) {
370 		pmc = &pmu->gp_counters[i];
371 
372 		pmc_stop_counter(pmc);
373 		pmc->counter = pmc->eventsel = 0;
374 	}
375 
376 	for (i = 0; i < INTEL_PMC_MAX_FIXED; i++) {
377 		pmc = &pmu->fixed_counters[i];
378 
379 		pmc_stop_counter(pmc);
380 		pmc->counter = 0;
381 	}
382 
383 	pmu->fixed_ctr_ctrl = pmu->global_ctrl = pmu->global_status =
384 		pmu->global_ovf_ctrl = 0;
385 }
386 
387 struct kvm_pmu_ops intel_pmu_ops = {
388 	.find_arch_event = intel_find_arch_event,
389 	.find_fixed_event = intel_find_fixed_event,
390 	.pmc_is_enabled = intel_pmc_is_enabled,
391 	.pmc_idx_to_pmc = intel_pmc_idx_to_pmc,
392 	.rdpmc_ecx_to_pmc = intel_rdpmc_ecx_to_pmc,
393 	.msr_idx_to_pmc = intel_msr_idx_to_pmc,
394 	.is_valid_rdpmc_ecx = intel_is_valid_rdpmc_ecx,
395 	.is_valid_msr = intel_is_valid_msr,
396 	.get_msr = intel_pmu_get_msr,
397 	.set_msr = intel_pmu_set_msr,
398 	.refresh = intel_pmu_refresh,
399 	.init = intel_pmu_init,
400 	.reset = intel_pmu_reset,
401 };
402