xref: /openbmc/linux/arch/x86/kvm/vmx/pmu_intel.c (revision a6377d90)
1 /*
2  * KVM PMU support for Intel CPUs
3  *
4  * Copyright 2011 Red Hat, Inc. and/or its affiliates.
5  *
6  * Authors:
7  *   Avi Kivity   <avi@redhat.com>
8  *   Gleb Natapov <gleb@redhat.com>
9  *
10  * This work is licensed under the terms of the GNU GPL, version 2.  See
11  * the COPYING file in the top-level directory.
12  *
13  */
14 #include <linux/types.h>
15 #include <linux/kvm_host.h>
16 #include <linux/perf_event.h>
17 #include <asm/perf_event.h>
18 #include "x86.h"
19 #include "cpuid.h"
20 #include "lapic.h"
21 #include "pmu.h"
22 
23 static struct kvm_event_hw_type_mapping intel_arch_events[] = {
24 	/* Index must match CPUID 0x0A.EBX bit vector */
25 	[0] = { 0x3c, 0x00, PERF_COUNT_HW_CPU_CYCLES },
26 	[1] = { 0xc0, 0x00, PERF_COUNT_HW_INSTRUCTIONS },
27 	[2] = { 0x3c, 0x01, PERF_COUNT_HW_BUS_CYCLES  },
28 	[3] = { 0x2e, 0x4f, PERF_COUNT_HW_CACHE_REFERENCES },
29 	[4] = { 0x2e, 0x41, PERF_COUNT_HW_CACHE_MISSES },
30 	[5] = { 0xc4, 0x00, PERF_COUNT_HW_BRANCH_INSTRUCTIONS },
31 	[6] = { 0xc5, 0x00, PERF_COUNT_HW_BRANCH_MISSES },
32 	[7] = { 0x00, 0x30, PERF_COUNT_HW_REF_CPU_CYCLES },
33 };
34 
35 /* mapping between fixed pmc index and intel_arch_events array */
36 static int fixed_pmc_events[] = {1, 0, 7};
37 
38 static void reprogram_fixed_counters(struct kvm_pmu *pmu, u64 data)
39 {
40 	int i;
41 
42 	for (i = 0; i < pmu->nr_arch_fixed_counters; i++) {
43 		u8 new_ctrl = fixed_ctrl_field(data, i);
44 		u8 old_ctrl = fixed_ctrl_field(pmu->fixed_ctr_ctrl, i);
45 		struct kvm_pmc *pmc;
46 
47 		pmc = get_fixed_pmc(pmu, MSR_CORE_PERF_FIXED_CTR0 + i);
48 
49 		if (old_ctrl == new_ctrl)
50 			continue;
51 
52 		reprogram_fixed_counter(pmc, new_ctrl, i);
53 	}
54 
55 	pmu->fixed_ctr_ctrl = data;
56 }
57 
58 /* function is called when global control register has been updated. */
59 static void global_ctrl_changed(struct kvm_pmu *pmu, u64 data)
60 {
61 	int bit;
62 	u64 diff = pmu->global_ctrl ^ data;
63 
64 	pmu->global_ctrl = data;
65 
66 	for_each_set_bit(bit, (unsigned long *)&diff, X86_PMC_IDX_MAX)
67 		reprogram_counter(pmu, bit);
68 }
69 
70 static unsigned intel_find_arch_event(struct kvm_pmu *pmu,
71 				      u8 event_select,
72 				      u8 unit_mask)
73 {
74 	int i;
75 
76 	for (i = 0; i < ARRAY_SIZE(intel_arch_events); i++)
77 		if (intel_arch_events[i].eventsel == event_select
78 		    && intel_arch_events[i].unit_mask == unit_mask
79 		    && (pmu->available_event_types & (1 << i)))
80 			break;
81 
82 	if (i == ARRAY_SIZE(intel_arch_events))
83 		return PERF_COUNT_HW_MAX;
84 
85 	return intel_arch_events[i].event_type;
86 }
87 
88 static unsigned intel_find_fixed_event(int idx)
89 {
90 	if (idx >= ARRAY_SIZE(fixed_pmc_events))
91 		return PERF_COUNT_HW_MAX;
92 
93 	return intel_arch_events[fixed_pmc_events[idx]].event_type;
94 }
95 
96 /* check if a PMC is enabled by comparing it with globl_ctrl bits. */
97 static bool intel_pmc_is_enabled(struct kvm_pmc *pmc)
98 {
99 	struct kvm_pmu *pmu = pmc_to_pmu(pmc);
100 
101 	return test_bit(pmc->idx, (unsigned long *)&pmu->global_ctrl);
102 }
103 
104 static struct kvm_pmc *intel_pmc_idx_to_pmc(struct kvm_pmu *pmu, int pmc_idx)
105 {
106 	if (pmc_idx < INTEL_PMC_IDX_FIXED)
107 		return get_gp_pmc(pmu, MSR_P6_EVNTSEL0 + pmc_idx,
108 				  MSR_P6_EVNTSEL0);
109 	else {
110 		u32 idx = pmc_idx - INTEL_PMC_IDX_FIXED;
111 
112 		return get_fixed_pmc(pmu, idx + MSR_CORE_PERF_FIXED_CTR0);
113 	}
114 }
115 
116 /* returns 0 if idx's corresponding MSR exists; otherwise returns 1. */
117 static int intel_is_valid_msr_idx(struct kvm_vcpu *vcpu, unsigned idx)
118 {
119 	struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
120 	bool fixed = idx & (1u << 30);
121 
122 	idx &= ~(3u << 30);
123 
124 	return (!fixed && idx >= pmu->nr_arch_gp_counters) ||
125 		(fixed && idx >= pmu->nr_arch_fixed_counters);
126 }
127 
128 static struct kvm_pmc *intel_msr_idx_to_pmc(struct kvm_vcpu *vcpu,
129 					    unsigned idx, u64 *mask)
130 {
131 	struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
132 	bool fixed = idx & (1u << 30);
133 	struct kvm_pmc *counters;
134 
135 	idx &= ~(3u << 30);
136 	if (!fixed && idx >= pmu->nr_arch_gp_counters)
137 		return NULL;
138 	if (fixed && idx >= pmu->nr_arch_fixed_counters)
139 		return NULL;
140 	counters = fixed ? pmu->fixed_counters : pmu->gp_counters;
141 	*mask &= pmu->counter_bitmask[fixed ? KVM_PMC_FIXED : KVM_PMC_GP];
142 
143 	return &counters[idx];
144 }
145 
146 static bool intel_is_valid_msr(struct kvm_vcpu *vcpu, u32 msr)
147 {
148 	struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
149 	int ret;
150 
151 	switch (msr) {
152 	case MSR_CORE_PERF_FIXED_CTR_CTRL:
153 	case MSR_CORE_PERF_GLOBAL_STATUS:
154 	case MSR_CORE_PERF_GLOBAL_CTRL:
155 	case MSR_CORE_PERF_GLOBAL_OVF_CTRL:
156 		ret = pmu->version > 1;
157 		break;
158 	default:
159 		ret = get_gp_pmc(pmu, msr, MSR_IA32_PERFCTR0) ||
160 			get_gp_pmc(pmu, msr, MSR_P6_EVNTSEL0) ||
161 			get_fixed_pmc(pmu, msr);
162 		break;
163 	}
164 
165 	return ret;
166 }
167 
168 static int intel_pmu_get_msr(struct kvm_vcpu *vcpu, u32 msr, u64 *data)
169 {
170 	struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
171 	struct kvm_pmc *pmc;
172 
173 	switch (msr) {
174 	case MSR_CORE_PERF_FIXED_CTR_CTRL:
175 		*data = pmu->fixed_ctr_ctrl;
176 		return 0;
177 	case MSR_CORE_PERF_GLOBAL_STATUS:
178 		*data = pmu->global_status;
179 		return 0;
180 	case MSR_CORE_PERF_GLOBAL_CTRL:
181 		*data = pmu->global_ctrl;
182 		return 0;
183 	case MSR_CORE_PERF_GLOBAL_OVF_CTRL:
184 		*data = pmu->global_ovf_ctrl;
185 		return 0;
186 	default:
187 		if ((pmc = get_gp_pmc(pmu, msr, MSR_IA32_PERFCTR0))) {
188 			u64 val = pmc_read_counter(pmc);
189 			*data = val & pmu->counter_bitmask[KVM_PMC_GP];
190 			return 0;
191 		} else if ((pmc = get_fixed_pmc(pmu, msr))) {
192 			u64 val = pmc_read_counter(pmc);
193 			*data = val & pmu->counter_bitmask[KVM_PMC_FIXED];
194 			return 0;
195 		} else if ((pmc = get_gp_pmc(pmu, msr, MSR_P6_EVNTSEL0))) {
196 			*data = pmc->eventsel;
197 			return 0;
198 		}
199 	}
200 
201 	return 1;
202 }
203 
204 static int intel_pmu_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
205 {
206 	struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
207 	struct kvm_pmc *pmc;
208 	u32 msr = msr_info->index;
209 	u64 data = msr_info->data;
210 
211 	switch (msr) {
212 	case MSR_CORE_PERF_FIXED_CTR_CTRL:
213 		if (pmu->fixed_ctr_ctrl == data)
214 			return 0;
215 		if (!(data & 0xfffffffffffff444ull)) {
216 			reprogram_fixed_counters(pmu, data);
217 			return 0;
218 		}
219 		break;
220 	case MSR_CORE_PERF_GLOBAL_STATUS:
221 		if (msr_info->host_initiated) {
222 			pmu->global_status = data;
223 			return 0;
224 		}
225 		break; /* RO MSR */
226 	case MSR_CORE_PERF_GLOBAL_CTRL:
227 		if (pmu->global_ctrl == data)
228 			return 0;
229 		if (!(data & pmu->global_ctrl_mask)) {
230 			global_ctrl_changed(pmu, data);
231 			return 0;
232 		}
233 		break;
234 	case MSR_CORE_PERF_GLOBAL_OVF_CTRL:
235 		if (!(data & pmu->global_ovf_ctrl_mask)) {
236 			if (!msr_info->host_initiated)
237 				pmu->global_status &= ~data;
238 			pmu->global_ovf_ctrl = data;
239 			return 0;
240 		}
241 		break;
242 	default:
243 		if ((pmc = get_gp_pmc(pmu, msr, MSR_IA32_PERFCTR0))) {
244 			if (msr_info->host_initiated)
245 				pmc->counter = data;
246 			else
247 				pmc->counter = (s32)data;
248 			return 0;
249 		} else if ((pmc = get_fixed_pmc(pmu, msr))) {
250 			pmc->counter = data;
251 			return 0;
252 		} else if ((pmc = get_gp_pmc(pmu, msr, MSR_P6_EVNTSEL0))) {
253 			if (data == pmc->eventsel)
254 				return 0;
255 			if (!(data & pmu->reserved_bits)) {
256 				reprogram_gp_counter(pmc, data);
257 				return 0;
258 			}
259 		}
260 	}
261 
262 	return 1;
263 }
264 
265 static void intel_pmu_refresh(struct kvm_vcpu *vcpu)
266 {
267 	struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
268 	struct kvm_cpuid_entry2 *entry;
269 	union cpuid10_eax eax;
270 	union cpuid10_edx edx;
271 
272 	pmu->nr_arch_gp_counters = 0;
273 	pmu->nr_arch_fixed_counters = 0;
274 	pmu->counter_bitmask[KVM_PMC_GP] = 0;
275 	pmu->counter_bitmask[KVM_PMC_FIXED] = 0;
276 	pmu->version = 0;
277 	pmu->reserved_bits = 0xffffffff00200000ull;
278 
279 	entry = kvm_find_cpuid_entry(vcpu, 0xa, 0);
280 	if (!entry)
281 		return;
282 	eax.full = entry->eax;
283 	edx.full = entry->edx;
284 
285 	pmu->version = eax.split.version_id;
286 	if (!pmu->version)
287 		return;
288 
289 	pmu->nr_arch_gp_counters = min_t(int, eax.split.num_counters,
290 					INTEL_PMC_MAX_GENERIC);
291 	pmu->counter_bitmask[KVM_PMC_GP] = ((u64)1 << eax.split.bit_width) - 1;
292 	pmu->available_event_types = ~entry->ebx &
293 					((1ull << eax.split.mask_length) - 1);
294 
295 	if (pmu->version == 1) {
296 		pmu->nr_arch_fixed_counters = 0;
297 	} else {
298 		pmu->nr_arch_fixed_counters =
299 			min_t(int, edx.split.num_counters_fixed,
300 				INTEL_PMC_MAX_FIXED);
301 		pmu->counter_bitmask[KVM_PMC_FIXED] =
302 			((u64)1 << edx.split.bit_width_fixed) - 1;
303 	}
304 
305 	pmu->global_ctrl = ((1ull << pmu->nr_arch_gp_counters) - 1) |
306 		(((1ull << pmu->nr_arch_fixed_counters) - 1) << INTEL_PMC_IDX_FIXED);
307 	pmu->global_ctrl_mask = ~pmu->global_ctrl;
308 	pmu->global_ovf_ctrl_mask = pmu->global_ctrl_mask
309 			& ~(MSR_CORE_PERF_GLOBAL_OVF_CTRL_OVF_BUF |
310 			    MSR_CORE_PERF_GLOBAL_OVF_CTRL_COND_CHGD);
311 	if (kvm_x86_ops->pt_supported())
312 		pmu->global_ovf_ctrl_mask &=
313 				~MSR_CORE_PERF_GLOBAL_OVF_CTRL_TRACE_TOPA_PMI;
314 
315 	entry = kvm_find_cpuid_entry(vcpu, 7, 0);
316 	if (entry &&
317 	    (boot_cpu_has(X86_FEATURE_HLE) || boot_cpu_has(X86_FEATURE_RTM)) &&
318 	    (entry->ebx & (X86_FEATURE_HLE|X86_FEATURE_RTM)))
319 		pmu->reserved_bits ^= HSW_IN_TX|HSW_IN_TX_CHECKPOINTED;
320 }
321 
322 static void intel_pmu_init(struct kvm_vcpu *vcpu)
323 {
324 	int i;
325 	struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
326 
327 	for (i = 0; i < INTEL_PMC_MAX_GENERIC; i++) {
328 		pmu->gp_counters[i].type = KVM_PMC_GP;
329 		pmu->gp_counters[i].vcpu = vcpu;
330 		pmu->gp_counters[i].idx = i;
331 	}
332 
333 	for (i = 0; i < INTEL_PMC_MAX_FIXED; i++) {
334 		pmu->fixed_counters[i].type = KVM_PMC_FIXED;
335 		pmu->fixed_counters[i].vcpu = vcpu;
336 		pmu->fixed_counters[i].idx = i + INTEL_PMC_IDX_FIXED;
337 	}
338 }
339 
340 static void intel_pmu_reset(struct kvm_vcpu *vcpu)
341 {
342 	struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
343 	int i;
344 
345 	for (i = 0; i < INTEL_PMC_MAX_GENERIC; i++) {
346 		struct kvm_pmc *pmc = &pmu->gp_counters[i];
347 
348 		pmc_stop_counter(pmc);
349 		pmc->counter = pmc->eventsel = 0;
350 	}
351 
352 	for (i = 0; i < INTEL_PMC_MAX_FIXED; i++)
353 		pmc_stop_counter(&pmu->fixed_counters[i]);
354 
355 	pmu->fixed_ctr_ctrl = pmu->global_ctrl = pmu->global_status =
356 		pmu->global_ovf_ctrl = 0;
357 }
358 
359 struct kvm_pmu_ops intel_pmu_ops = {
360 	.find_arch_event = intel_find_arch_event,
361 	.find_fixed_event = intel_find_fixed_event,
362 	.pmc_is_enabled = intel_pmc_is_enabled,
363 	.pmc_idx_to_pmc = intel_pmc_idx_to_pmc,
364 	.msr_idx_to_pmc = intel_msr_idx_to_pmc,
365 	.is_valid_msr_idx = intel_is_valid_msr_idx,
366 	.is_valid_msr = intel_is_valid_msr,
367 	.get_msr = intel_pmu_get_msr,
368 	.set_msr = intel_pmu_set_msr,
369 	.refresh = intel_pmu_refresh,
370 	.init = intel_pmu_init,
371 	.reset = intel_pmu_reset,
372 };
373