xref: /openbmc/linux/arch/x86/kvm/vmx/pmu_intel.c (revision 9d4fa1a1)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * KVM PMU support for Intel CPUs
4  *
5  * Copyright 2011 Red Hat, Inc. and/or its affiliates.
6  *
7  * Authors:
8  *   Avi Kivity   <avi@redhat.com>
9  *   Gleb Natapov <gleb@redhat.com>
10  */
11 #include <linux/types.h>
12 #include <linux/kvm_host.h>
13 #include <linux/perf_event.h>
14 #include <asm/perf_event.h>
15 #include "x86.h"
16 #include "cpuid.h"
17 #include "lapic.h"
18 #include "nested.h"
19 #include "pmu.h"
20 
21 static struct kvm_event_hw_type_mapping intel_arch_events[] = {
22 	/* Index must match CPUID 0x0A.EBX bit vector */
23 	[0] = { 0x3c, 0x00, PERF_COUNT_HW_CPU_CYCLES },
24 	[1] = { 0xc0, 0x00, PERF_COUNT_HW_INSTRUCTIONS },
25 	[2] = { 0x3c, 0x01, PERF_COUNT_HW_BUS_CYCLES  },
26 	[3] = { 0x2e, 0x4f, PERF_COUNT_HW_CACHE_REFERENCES },
27 	[4] = { 0x2e, 0x41, PERF_COUNT_HW_CACHE_MISSES },
28 	[5] = { 0xc4, 0x00, PERF_COUNT_HW_BRANCH_INSTRUCTIONS },
29 	[6] = { 0xc5, 0x00, PERF_COUNT_HW_BRANCH_MISSES },
30 	[7] = { 0x00, 0x30, PERF_COUNT_HW_REF_CPU_CYCLES },
31 };
32 
33 /* mapping between fixed pmc index and intel_arch_events array */
34 static int fixed_pmc_events[] = {1, 0, 7};
35 
36 static void reprogram_fixed_counters(struct kvm_pmu *pmu, u64 data)
37 {
38 	int i;
39 
40 	for (i = 0; i < pmu->nr_arch_fixed_counters; i++) {
41 		u8 new_ctrl = fixed_ctrl_field(data, i);
42 		u8 old_ctrl = fixed_ctrl_field(pmu->fixed_ctr_ctrl, i);
43 		struct kvm_pmc *pmc;
44 
45 		pmc = get_fixed_pmc(pmu, MSR_CORE_PERF_FIXED_CTR0 + i);
46 
47 		if (old_ctrl == new_ctrl)
48 			continue;
49 
50 		__set_bit(INTEL_PMC_IDX_FIXED + i, pmu->pmc_in_use);
51 		reprogram_fixed_counter(pmc, new_ctrl, i);
52 	}
53 
54 	pmu->fixed_ctr_ctrl = data;
55 }
56 
57 /* function is called when global control register has been updated. */
58 static void global_ctrl_changed(struct kvm_pmu *pmu, u64 data)
59 {
60 	int bit;
61 	u64 diff = pmu->global_ctrl ^ data;
62 
63 	pmu->global_ctrl = data;
64 
65 	for_each_set_bit(bit, (unsigned long *)&diff, X86_PMC_IDX_MAX)
66 		reprogram_counter(pmu, bit);
67 }
68 
69 static unsigned intel_find_arch_event(struct kvm_pmu *pmu,
70 				      u8 event_select,
71 				      u8 unit_mask)
72 {
73 	int i;
74 
75 	for (i = 0; i < ARRAY_SIZE(intel_arch_events); i++)
76 		if (intel_arch_events[i].eventsel == event_select
77 		    && intel_arch_events[i].unit_mask == unit_mask
78 		    && (pmu->available_event_types & (1 << i)))
79 			break;
80 
81 	if (i == ARRAY_SIZE(intel_arch_events))
82 		return PERF_COUNT_HW_MAX;
83 
84 	return intel_arch_events[i].event_type;
85 }
86 
87 static unsigned intel_find_fixed_event(int idx)
88 {
89 	u32 event;
90 	size_t size = ARRAY_SIZE(fixed_pmc_events);
91 
92 	if (idx >= size)
93 		return PERF_COUNT_HW_MAX;
94 
95 	event = fixed_pmc_events[array_index_nospec(idx, size)];
96 	return intel_arch_events[event].event_type;
97 }
98 
99 /* check if a PMC is enabled by comparing it with globl_ctrl bits. */
100 static bool intel_pmc_is_enabled(struct kvm_pmc *pmc)
101 {
102 	struct kvm_pmu *pmu = pmc_to_pmu(pmc);
103 
104 	return test_bit(pmc->idx, (unsigned long *)&pmu->global_ctrl);
105 }
106 
107 static struct kvm_pmc *intel_pmc_idx_to_pmc(struct kvm_pmu *pmu, int pmc_idx)
108 {
109 	if (pmc_idx < INTEL_PMC_IDX_FIXED)
110 		return get_gp_pmc(pmu, MSR_P6_EVNTSEL0 + pmc_idx,
111 				  MSR_P6_EVNTSEL0);
112 	else {
113 		u32 idx = pmc_idx - INTEL_PMC_IDX_FIXED;
114 
115 		return get_fixed_pmc(pmu, idx + MSR_CORE_PERF_FIXED_CTR0);
116 	}
117 }
118 
119 /* returns 0 if idx's corresponding MSR exists; otherwise returns 1. */
120 static int intel_is_valid_rdpmc_ecx(struct kvm_vcpu *vcpu, unsigned int idx)
121 {
122 	struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
123 	bool fixed = idx & (1u << 30);
124 
125 	idx &= ~(3u << 30);
126 
127 	return (!fixed && idx >= pmu->nr_arch_gp_counters) ||
128 		(fixed && idx >= pmu->nr_arch_fixed_counters);
129 }
130 
131 static struct kvm_pmc *intel_rdpmc_ecx_to_pmc(struct kvm_vcpu *vcpu,
132 					    unsigned int idx, u64 *mask)
133 {
134 	struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
135 	bool fixed = idx & (1u << 30);
136 	struct kvm_pmc *counters;
137 	unsigned int num_counters;
138 
139 	idx &= ~(3u << 30);
140 	if (fixed) {
141 		counters = pmu->fixed_counters;
142 		num_counters = pmu->nr_arch_fixed_counters;
143 	} else {
144 		counters = pmu->gp_counters;
145 		num_counters = pmu->nr_arch_gp_counters;
146 	}
147 	if (idx >= num_counters)
148 		return NULL;
149 	*mask &= pmu->counter_bitmask[fixed ? KVM_PMC_FIXED : KVM_PMC_GP];
150 	return &counters[array_index_nospec(idx, num_counters)];
151 }
152 
153 static bool intel_is_valid_msr(struct kvm_vcpu *vcpu, u32 msr)
154 {
155 	struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
156 	int ret;
157 
158 	switch (msr) {
159 	case MSR_CORE_PERF_FIXED_CTR_CTRL:
160 	case MSR_CORE_PERF_GLOBAL_STATUS:
161 	case MSR_CORE_PERF_GLOBAL_CTRL:
162 	case MSR_CORE_PERF_GLOBAL_OVF_CTRL:
163 		ret = pmu->version > 1;
164 		break;
165 	default:
166 		ret = get_gp_pmc(pmu, msr, MSR_IA32_PERFCTR0) ||
167 			get_gp_pmc(pmu, msr, MSR_P6_EVNTSEL0) ||
168 			get_fixed_pmc(pmu, msr);
169 		break;
170 	}
171 
172 	return ret;
173 }
174 
175 static struct kvm_pmc *intel_msr_idx_to_pmc(struct kvm_vcpu *vcpu, u32 msr)
176 {
177 	struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
178 	struct kvm_pmc *pmc;
179 
180 	pmc = get_fixed_pmc(pmu, msr);
181 	pmc = pmc ? pmc : get_gp_pmc(pmu, msr, MSR_P6_EVNTSEL0);
182 	pmc = pmc ? pmc : get_gp_pmc(pmu, msr, MSR_IA32_PERFCTR0);
183 
184 	return pmc;
185 }
186 
187 static int intel_pmu_get_msr(struct kvm_vcpu *vcpu, u32 msr, u64 *data)
188 {
189 	struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
190 	struct kvm_pmc *pmc;
191 
192 	switch (msr) {
193 	case MSR_CORE_PERF_FIXED_CTR_CTRL:
194 		*data = pmu->fixed_ctr_ctrl;
195 		return 0;
196 	case MSR_CORE_PERF_GLOBAL_STATUS:
197 		*data = pmu->global_status;
198 		return 0;
199 	case MSR_CORE_PERF_GLOBAL_CTRL:
200 		*data = pmu->global_ctrl;
201 		return 0;
202 	case MSR_CORE_PERF_GLOBAL_OVF_CTRL:
203 		*data = pmu->global_ovf_ctrl;
204 		return 0;
205 	default:
206 		if ((pmc = get_gp_pmc(pmu, msr, MSR_IA32_PERFCTR0))) {
207 			u64 val = pmc_read_counter(pmc);
208 			*data = val & pmu->counter_bitmask[KVM_PMC_GP];
209 			return 0;
210 		} else if ((pmc = get_fixed_pmc(pmu, msr))) {
211 			u64 val = pmc_read_counter(pmc);
212 			*data = val & pmu->counter_bitmask[KVM_PMC_FIXED];
213 			return 0;
214 		} else if ((pmc = get_gp_pmc(pmu, msr, MSR_P6_EVNTSEL0))) {
215 			*data = pmc->eventsel;
216 			return 0;
217 		}
218 	}
219 
220 	return 1;
221 }
222 
223 static int intel_pmu_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
224 {
225 	struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
226 	struct kvm_pmc *pmc;
227 	u32 msr = msr_info->index;
228 	u64 data = msr_info->data;
229 
230 	switch (msr) {
231 	case MSR_CORE_PERF_FIXED_CTR_CTRL:
232 		if (pmu->fixed_ctr_ctrl == data)
233 			return 0;
234 		if (!(data & 0xfffffffffffff444ull)) {
235 			reprogram_fixed_counters(pmu, data);
236 			return 0;
237 		}
238 		break;
239 	case MSR_CORE_PERF_GLOBAL_STATUS:
240 		if (msr_info->host_initiated) {
241 			pmu->global_status = data;
242 			return 0;
243 		}
244 		break; /* RO MSR */
245 	case MSR_CORE_PERF_GLOBAL_CTRL:
246 		if (pmu->global_ctrl == data)
247 			return 0;
248 		if (kvm_valid_perf_global_ctrl(pmu, data)) {
249 			global_ctrl_changed(pmu, data);
250 			return 0;
251 		}
252 		break;
253 	case MSR_CORE_PERF_GLOBAL_OVF_CTRL:
254 		if (!(data & pmu->global_ovf_ctrl_mask)) {
255 			if (!msr_info->host_initiated)
256 				pmu->global_status &= ~data;
257 			pmu->global_ovf_ctrl = data;
258 			return 0;
259 		}
260 		break;
261 	default:
262 		if ((pmc = get_gp_pmc(pmu, msr, MSR_IA32_PERFCTR0))) {
263 			if (!msr_info->host_initiated)
264 				data = (s64)(s32)data;
265 			pmc->counter += data - pmc_read_counter(pmc);
266 			if (pmc->perf_event)
267 				perf_event_period(pmc->perf_event,
268 						  get_sample_period(pmc, data));
269 			return 0;
270 		} else if ((pmc = get_fixed_pmc(pmu, msr))) {
271 			pmc->counter += data - pmc_read_counter(pmc);
272 			if (pmc->perf_event)
273 				perf_event_period(pmc->perf_event,
274 						  get_sample_period(pmc, data));
275 			return 0;
276 		} else if ((pmc = get_gp_pmc(pmu, msr, MSR_P6_EVNTSEL0))) {
277 			if (data == pmc->eventsel)
278 				return 0;
279 			if (!(data & pmu->reserved_bits)) {
280 				reprogram_gp_counter(pmc, data);
281 				return 0;
282 			}
283 		}
284 	}
285 
286 	return 1;
287 }
288 
289 static void intel_pmu_refresh(struct kvm_vcpu *vcpu)
290 {
291 	struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
292 	struct x86_pmu_capability x86_pmu;
293 	struct kvm_cpuid_entry2 *entry;
294 	union cpuid10_eax eax;
295 	union cpuid10_edx edx;
296 
297 	pmu->nr_arch_gp_counters = 0;
298 	pmu->nr_arch_fixed_counters = 0;
299 	pmu->counter_bitmask[KVM_PMC_GP] = 0;
300 	pmu->counter_bitmask[KVM_PMC_FIXED] = 0;
301 	pmu->version = 0;
302 	pmu->reserved_bits = 0xffffffff00200000ull;
303 
304 	entry = kvm_find_cpuid_entry(vcpu, 0xa, 0);
305 	if (!entry)
306 		return;
307 	eax.full = entry->eax;
308 	edx.full = entry->edx;
309 
310 	pmu->version = eax.split.version_id;
311 	if (!pmu->version)
312 		return;
313 
314 	perf_get_x86_pmu_capability(&x86_pmu);
315 
316 	pmu->nr_arch_gp_counters = min_t(int, eax.split.num_counters,
317 					 x86_pmu.num_counters_gp);
318 	pmu->counter_bitmask[KVM_PMC_GP] = ((u64)1 << eax.split.bit_width) - 1;
319 	pmu->available_event_types = ~entry->ebx &
320 					((1ull << eax.split.mask_length) - 1);
321 
322 	if (pmu->version == 1) {
323 		pmu->nr_arch_fixed_counters = 0;
324 	} else {
325 		pmu->nr_arch_fixed_counters =
326 			min_t(int, edx.split.num_counters_fixed,
327 			      x86_pmu.num_counters_fixed);
328 		pmu->counter_bitmask[KVM_PMC_FIXED] =
329 			((u64)1 << edx.split.bit_width_fixed) - 1;
330 	}
331 
332 	pmu->global_ctrl = ((1ull << pmu->nr_arch_gp_counters) - 1) |
333 		(((1ull << pmu->nr_arch_fixed_counters) - 1) << INTEL_PMC_IDX_FIXED);
334 	pmu->global_ctrl_mask = ~pmu->global_ctrl;
335 	pmu->global_ovf_ctrl_mask = pmu->global_ctrl_mask
336 			& ~(MSR_CORE_PERF_GLOBAL_OVF_CTRL_OVF_BUF |
337 			    MSR_CORE_PERF_GLOBAL_OVF_CTRL_COND_CHGD);
338 	if (vmx_pt_mode_is_host_guest())
339 		pmu->global_ovf_ctrl_mask &=
340 				~MSR_CORE_PERF_GLOBAL_OVF_CTRL_TRACE_TOPA_PMI;
341 
342 	entry = kvm_find_cpuid_entry(vcpu, 7, 0);
343 	if (entry &&
344 	    (boot_cpu_has(X86_FEATURE_HLE) || boot_cpu_has(X86_FEATURE_RTM)) &&
345 	    (entry->ebx & (X86_FEATURE_HLE|X86_FEATURE_RTM)))
346 		pmu->reserved_bits ^= HSW_IN_TX|HSW_IN_TX_CHECKPOINTED;
347 
348 	bitmap_set(pmu->all_valid_pmc_idx,
349 		0, pmu->nr_arch_gp_counters);
350 	bitmap_set(pmu->all_valid_pmc_idx,
351 		INTEL_PMC_MAX_GENERIC, pmu->nr_arch_fixed_counters);
352 
353 	nested_vmx_pmu_entry_exit_ctls_update(vcpu);
354 }
355 
356 static void intel_pmu_init(struct kvm_vcpu *vcpu)
357 {
358 	int i;
359 	struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
360 
361 	for (i = 0; i < INTEL_PMC_MAX_GENERIC; i++) {
362 		pmu->gp_counters[i].type = KVM_PMC_GP;
363 		pmu->gp_counters[i].vcpu = vcpu;
364 		pmu->gp_counters[i].idx = i;
365 		pmu->gp_counters[i].current_config = 0;
366 	}
367 
368 	for (i = 0; i < INTEL_PMC_MAX_FIXED; i++) {
369 		pmu->fixed_counters[i].type = KVM_PMC_FIXED;
370 		pmu->fixed_counters[i].vcpu = vcpu;
371 		pmu->fixed_counters[i].idx = i + INTEL_PMC_IDX_FIXED;
372 		pmu->fixed_counters[i].current_config = 0;
373 	}
374 }
375 
376 static void intel_pmu_reset(struct kvm_vcpu *vcpu)
377 {
378 	struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
379 	struct kvm_pmc *pmc = NULL;
380 	int i;
381 
382 	for (i = 0; i < INTEL_PMC_MAX_GENERIC; i++) {
383 		pmc = &pmu->gp_counters[i];
384 
385 		pmc_stop_counter(pmc);
386 		pmc->counter = pmc->eventsel = 0;
387 	}
388 
389 	for (i = 0; i < INTEL_PMC_MAX_FIXED; i++) {
390 		pmc = &pmu->fixed_counters[i];
391 
392 		pmc_stop_counter(pmc);
393 		pmc->counter = 0;
394 	}
395 
396 	pmu->fixed_ctr_ctrl = pmu->global_ctrl = pmu->global_status =
397 		pmu->global_ovf_ctrl = 0;
398 }
399 
400 struct kvm_pmu_ops intel_pmu_ops = {
401 	.find_arch_event = intel_find_arch_event,
402 	.find_fixed_event = intel_find_fixed_event,
403 	.pmc_is_enabled = intel_pmc_is_enabled,
404 	.pmc_idx_to_pmc = intel_pmc_idx_to_pmc,
405 	.rdpmc_ecx_to_pmc = intel_rdpmc_ecx_to_pmc,
406 	.msr_idx_to_pmc = intel_msr_idx_to_pmc,
407 	.is_valid_rdpmc_ecx = intel_is_valid_rdpmc_ecx,
408 	.is_valid_msr = intel_is_valid_msr,
409 	.get_msr = intel_pmu_get_msr,
410 	.set_msr = intel_pmu_set_msr,
411 	.refresh = intel_pmu_refresh,
412 	.init = intel_pmu_init,
413 	.reset = intel_pmu_reset,
414 };
415