xref: /openbmc/linux/arch/x86/kvm/vmx/pmu_intel.c (revision 359745d7)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * KVM PMU support for Intel CPUs
4  *
5  * Copyright 2011 Red Hat, Inc. and/or its affiliates.
6  *
7  * Authors:
8  *   Avi Kivity   <avi@redhat.com>
9  *   Gleb Natapov <gleb@redhat.com>
10  */
11 #include <linux/types.h>
12 #include <linux/kvm_host.h>
13 #include <linux/perf_event.h>
14 #include <asm/perf_event.h>
15 #include "x86.h"
16 #include "cpuid.h"
17 #include "lapic.h"
18 #include "nested.h"
19 #include "pmu.h"
20 
21 #define MSR_PMC_FULL_WIDTH_BIT      (MSR_IA32_PMC0 - MSR_IA32_PERFCTR0)
22 
23 static struct kvm_event_hw_type_mapping intel_arch_events[] = {
24 	/* Index must match CPUID 0x0A.EBX bit vector */
25 	[0] = { 0x3c, 0x00, PERF_COUNT_HW_CPU_CYCLES },
26 	[1] = { 0xc0, 0x00, PERF_COUNT_HW_INSTRUCTIONS },
27 	[2] = { 0x3c, 0x01, PERF_COUNT_HW_BUS_CYCLES  },
28 	[3] = { 0x2e, 0x4f, PERF_COUNT_HW_CACHE_REFERENCES },
29 	[4] = { 0x2e, 0x41, PERF_COUNT_HW_CACHE_MISSES },
30 	[5] = { 0xc4, 0x00, PERF_COUNT_HW_BRANCH_INSTRUCTIONS },
31 	[6] = { 0xc5, 0x00, PERF_COUNT_HW_BRANCH_MISSES },
32 	[7] = { 0x00, 0x03, PERF_COUNT_HW_REF_CPU_CYCLES },
33 };
34 
35 /* mapping between fixed pmc index and intel_arch_events array */
36 static int fixed_pmc_events[] = {1, 0, 7};
37 
38 static void reprogram_fixed_counters(struct kvm_pmu *pmu, u64 data)
39 {
40 	int i;
41 
42 	for (i = 0; i < pmu->nr_arch_fixed_counters; i++) {
43 		u8 new_ctrl = fixed_ctrl_field(data, i);
44 		u8 old_ctrl = fixed_ctrl_field(pmu->fixed_ctr_ctrl, i);
45 		struct kvm_pmc *pmc;
46 
47 		pmc = get_fixed_pmc(pmu, MSR_CORE_PERF_FIXED_CTR0 + i);
48 
49 		if (old_ctrl == new_ctrl)
50 			continue;
51 
52 		__set_bit(INTEL_PMC_IDX_FIXED + i, pmu->pmc_in_use);
53 		reprogram_fixed_counter(pmc, new_ctrl, i);
54 	}
55 
56 	pmu->fixed_ctr_ctrl = data;
57 }
58 
59 /* function is called when global control register has been updated. */
60 static void global_ctrl_changed(struct kvm_pmu *pmu, u64 data)
61 {
62 	int bit;
63 	u64 diff = pmu->global_ctrl ^ data;
64 
65 	pmu->global_ctrl = data;
66 
67 	for_each_set_bit(bit, (unsigned long *)&diff, X86_PMC_IDX_MAX)
68 		reprogram_counter(pmu, bit);
69 }
70 
71 static unsigned int intel_pmc_perf_hw_id(struct kvm_pmc *pmc)
72 {
73 	struct kvm_pmu *pmu = pmc_to_pmu(pmc);
74 	u8 event_select = pmc->eventsel & ARCH_PERFMON_EVENTSEL_EVENT;
75 	u8 unit_mask = (pmc->eventsel & ARCH_PERFMON_EVENTSEL_UMASK) >> 8;
76 	int i;
77 
78 	for (i = 0; i < ARRAY_SIZE(intel_arch_events); i++)
79 		if (intel_arch_events[i].eventsel == event_select &&
80 		    intel_arch_events[i].unit_mask == unit_mask &&
81 		    (pmc_is_fixed(pmc) || pmu->available_event_types & (1 << i)))
82 			break;
83 
84 	if (i == ARRAY_SIZE(intel_arch_events))
85 		return PERF_COUNT_HW_MAX;
86 
87 	return intel_arch_events[i].event_type;
88 }
89 
90 /* check if a PMC is enabled by comparing it with globl_ctrl bits. */
91 static bool intel_pmc_is_enabled(struct kvm_pmc *pmc)
92 {
93 	struct kvm_pmu *pmu = pmc_to_pmu(pmc);
94 
95 	return test_bit(pmc->idx, (unsigned long *)&pmu->global_ctrl);
96 }
97 
98 static struct kvm_pmc *intel_pmc_idx_to_pmc(struct kvm_pmu *pmu, int pmc_idx)
99 {
100 	if (pmc_idx < INTEL_PMC_IDX_FIXED)
101 		return get_gp_pmc(pmu, MSR_P6_EVNTSEL0 + pmc_idx,
102 				  MSR_P6_EVNTSEL0);
103 	else {
104 		u32 idx = pmc_idx - INTEL_PMC_IDX_FIXED;
105 
106 		return get_fixed_pmc(pmu, idx + MSR_CORE_PERF_FIXED_CTR0);
107 	}
108 }
109 
110 static bool intel_is_valid_rdpmc_ecx(struct kvm_vcpu *vcpu, unsigned int idx)
111 {
112 	struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
113 	bool fixed = idx & (1u << 30);
114 
115 	idx &= ~(3u << 30);
116 
117 	return fixed ? idx < pmu->nr_arch_fixed_counters
118 		     : idx < pmu->nr_arch_gp_counters;
119 }
120 
121 static struct kvm_pmc *intel_rdpmc_ecx_to_pmc(struct kvm_vcpu *vcpu,
122 					    unsigned int idx, u64 *mask)
123 {
124 	struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
125 	bool fixed = idx & (1u << 30);
126 	struct kvm_pmc *counters;
127 	unsigned int num_counters;
128 
129 	idx &= ~(3u << 30);
130 	if (fixed) {
131 		counters = pmu->fixed_counters;
132 		num_counters = pmu->nr_arch_fixed_counters;
133 	} else {
134 		counters = pmu->gp_counters;
135 		num_counters = pmu->nr_arch_gp_counters;
136 	}
137 	if (idx >= num_counters)
138 		return NULL;
139 	*mask &= pmu->counter_bitmask[fixed ? KVM_PMC_FIXED : KVM_PMC_GP];
140 	return &counters[array_index_nospec(idx, num_counters)];
141 }
142 
143 static inline u64 vcpu_get_perf_capabilities(struct kvm_vcpu *vcpu)
144 {
145 	if (!guest_cpuid_has(vcpu, X86_FEATURE_PDCM))
146 		return 0;
147 
148 	return vcpu->arch.perf_capabilities;
149 }
150 
151 static inline bool fw_writes_is_enabled(struct kvm_vcpu *vcpu)
152 {
153 	return (vcpu_get_perf_capabilities(vcpu) & PMU_CAP_FW_WRITES) != 0;
154 }
155 
156 static inline struct kvm_pmc *get_fw_gp_pmc(struct kvm_pmu *pmu, u32 msr)
157 {
158 	if (!fw_writes_is_enabled(pmu_to_vcpu(pmu)))
159 		return NULL;
160 
161 	return get_gp_pmc(pmu, msr, MSR_IA32_PMC0);
162 }
163 
164 bool intel_pmu_lbr_is_compatible(struct kvm_vcpu *vcpu)
165 {
166 	/*
167 	 * As a first step, a guest could only enable LBR feature if its
168 	 * cpu model is the same as the host because the LBR registers
169 	 * would be pass-through to the guest and they're model specific.
170 	 */
171 	return boot_cpu_data.x86_model == guest_cpuid_model(vcpu);
172 }
173 
174 bool intel_pmu_lbr_is_enabled(struct kvm_vcpu *vcpu)
175 {
176 	struct x86_pmu_lbr *lbr = vcpu_to_lbr_records(vcpu);
177 
178 	return lbr->nr && (vcpu_get_perf_capabilities(vcpu) & PMU_CAP_LBR_FMT);
179 }
180 
181 static bool intel_pmu_is_valid_lbr_msr(struct kvm_vcpu *vcpu, u32 index)
182 {
183 	struct x86_pmu_lbr *records = vcpu_to_lbr_records(vcpu);
184 	bool ret = false;
185 
186 	if (!intel_pmu_lbr_is_enabled(vcpu))
187 		return ret;
188 
189 	ret = (index == MSR_LBR_SELECT) || (index == MSR_LBR_TOS) ||
190 		(index >= records->from && index < records->from + records->nr) ||
191 		(index >= records->to && index < records->to + records->nr);
192 
193 	if (!ret && records->info)
194 		ret = (index >= records->info && index < records->info + records->nr);
195 
196 	return ret;
197 }
198 
199 static bool intel_is_valid_msr(struct kvm_vcpu *vcpu, u32 msr)
200 {
201 	struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
202 	int ret;
203 
204 	switch (msr) {
205 	case MSR_CORE_PERF_FIXED_CTR_CTRL:
206 	case MSR_CORE_PERF_GLOBAL_STATUS:
207 	case MSR_CORE_PERF_GLOBAL_CTRL:
208 	case MSR_CORE_PERF_GLOBAL_OVF_CTRL:
209 		ret = pmu->version > 1;
210 		break;
211 	default:
212 		ret = get_gp_pmc(pmu, msr, MSR_IA32_PERFCTR0) ||
213 			get_gp_pmc(pmu, msr, MSR_P6_EVNTSEL0) ||
214 			get_fixed_pmc(pmu, msr) || get_fw_gp_pmc(pmu, msr) ||
215 			intel_pmu_is_valid_lbr_msr(vcpu, msr);
216 		break;
217 	}
218 
219 	return ret;
220 }
221 
222 static struct kvm_pmc *intel_msr_idx_to_pmc(struct kvm_vcpu *vcpu, u32 msr)
223 {
224 	struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
225 	struct kvm_pmc *pmc;
226 
227 	pmc = get_fixed_pmc(pmu, msr);
228 	pmc = pmc ? pmc : get_gp_pmc(pmu, msr, MSR_P6_EVNTSEL0);
229 	pmc = pmc ? pmc : get_gp_pmc(pmu, msr, MSR_IA32_PERFCTR0);
230 
231 	return pmc;
232 }
233 
234 static inline void intel_pmu_release_guest_lbr_event(struct kvm_vcpu *vcpu)
235 {
236 	struct lbr_desc *lbr_desc = vcpu_to_lbr_desc(vcpu);
237 
238 	if (lbr_desc->event) {
239 		perf_event_release_kernel(lbr_desc->event);
240 		lbr_desc->event = NULL;
241 		vcpu_to_pmu(vcpu)->event_count--;
242 	}
243 }
244 
245 int intel_pmu_create_guest_lbr_event(struct kvm_vcpu *vcpu)
246 {
247 	struct lbr_desc *lbr_desc = vcpu_to_lbr_desc(vcpu);
248 	struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
249 	struct perf_event *event;
250 
251 	/*
252 	 * The perf_event_attr is constructed in the minimum efficient way:
253 	 * - set 'pinned = true' to make it task pinned so that if another
254 	 *   cpu pinned event reclaims LBR, the event->oncpu will be set to -1;
255 	 * - set '.exclude_host = true' to record guest branches behavior;
256 	 *
257 	 * - set '.config = INTEL_FIXED_VLBR_EVENT' to indicates host perf
258 	 *   schedule the event without a real HW counter but a fake one;
259 	 *   check is_guest_lbr_event() and __intel_get_event_constraints();
260 	 *
261 	 * - set 'sample_type = PERF_SAMPLE_BRANCH_STACK' and
262 	 *   'branch_sample_type = PERF_SAMPLE_BRANCH_CALL_STACK |
263 	 *   PERF_SAMPLE_BRANCH_USER' to configure it as a LBR callstack
264 	 *   event, which helps KVM to save/restore guest LBR records
265 	 *   during host context switches and reduces quite a lot overhead,
266 	 *   check branch_user_callstack() and intel_pmu_lbr_sched_task();
267 	 */
268 	struct perf_event_attr attr = {
269 		.type = PERF_TYPE_RAW,
270 		.size = sizeof(attr),
271 		.config = INTEL_FIXED_VLBR_EVENT,
272 		.sample_type = PERF_SAMPLE_BRANCH_STACK,
273 		.pinned = true,
274 		.exclude_host = true,
275 		.branch_sample_type = PERF_SAMPLE_BRANCH_CALL_STACK |
276 					PERF_SAMPLE_BRANCH_USER,
277 	};
278 
279 	if (unlikely(lbr_desc->event)) {
280 		__set_bit(INTEL_PMC_IDX_FIXED_VLBR, pmu->pmc_in_use);
281 		return 0;
282 	}
283 
284 	event = perf_event_create_kernel_counter(&attr, -1,
285 						current, NULL, NULL);
286 	if (IS_ERR(event)) {
287 		pr_debug_ratelimited("%s: failed %ld\n",
288 					__func__, PTR_ERR(event));
289 		return PTR_ERR(event);
290 	}
291 	lbr_desc->event = event;
292 	pmu->event_count++;
293 	__set_bit(INTEL_PMC_IDX_FIXED_VLBR, pmu->pmc_in_use);
294 	return 0;
295 }
296 
297 /*
298  * It's safe to access LBR msrs from guest when they have not
299  * been passthrough since the host would help restore or reset
300  * the LBR msrs records when the guest LBR event is scheduled in.
301  */
302 static bool intel_pmu_handle_lbr_msrs_access(struct kvm_vcpu *vcpu,
303 				     struct msr_data *msr_info, bool read)
304 {
305 	struct lbr_desc *lbr_desc = vcpu_to_lbr_desc(vcpu);
306 	u32 index = msr_info->index;
307 
308 	if (!intel_pmu_is_valid_lbr_msr(vcpu, index))
309 		return false;
310 
311 	if (!lbr_desc->event && intel_pmu_create_guest_lbr_event(vcpu) < 0)
312 		goto dummy;
313 
314 	/*
315 	 * Disable irq to ensure the LBR feature doesn't get reclaimed by the
316 	 * host at the time the value is read from the msr, and this avoids the
317 	 * host LBR value to be leaked to the guest. If LBR has been reclaimed,
318 	 * return 0 on guest reads.
319 	 */
320 	local_irq_disable();
321 	if (lbr_desc->event->state == PERF_EVENT_STATE_ACTIVE) {
322 		if (read)
323 			rdmsrl(index, msr_info->data);
324 		else
325 			wrmsrl(index, msr_info->data);
326 		__set_bit(INTEL_PMC_IDX_FIXED_VLBR, vcpu_to_pmu(vcpu)->pmc_in_use);
327 		local_irq_enable();
328 		return true;
329 	}
330 	clear_bit(INTEL_PMC_IDX_FIXED_VLBR, vcpu_to_pmu(vcpu)->pmc_in_use);
331 	local_irq_enable();
332 
333 dummy:
334 	if (read)
335 		msr_info->data = 0;
336 	return true;
337 }
338 
339 static int intel_pmu_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
340 {
341 	struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
342 	struct kvm_pmc *pmc;
343 	u32 msr = msr_info->index;
344 
345 	switch (msr) {
346 	case MSR_CORE_PERF_FIXED_CTR_CTRL:
347 		msr_info->data = pmu->fixed_ctr_ctrl;
348 		return 0;
349 	case MSR_CORE_PERF_GLOBAL_STATUS:
350 		msr_info->data = pmu->global_status;
351 		return 0;
352 	case MSR_CORE_PERF_GLOBAL_CTRL:
353 		msr_info->data = pmu->global_ctrl;
354 		return 0;
355 	case MSR_CORE_PERF_GLOBAL_OVF_CTRL:
356 		msr_info->data = 0;
357 		return 0;
358 	default:
359 		if ((pmc = get_gp_pmc(pmu, msr, MSR_IA32_PERFCTR0)) ||
360 		    (pmc = get_gp_pmc(pmu, msr, MSR_IA32_PMC0))) {
361 			u64 val = pmc_read_counter(pmc);
362 			msr_info->data =
363 				val & pmu->counter_bitmask[KVM_PMC_GP];
364 			return 0;
365 		} else if ((pmc = get_fixed_pmc(pmu, msr))) {
366 			u64 val = pmc_read_counter(pmc);
367 			msr_info->data =
368 				val & pmu->counter_bitmask[KVM_PMC_FIXED];
369 			return 0;
370 		} else if ((pmc = get_gp_pmc(pmu, msr, MSR_P6_EVNTSEL0))) {
371 			msr_info->data = pmc->eventsel;
372 			return 0;
373 		} else if (intel_pmu_handle_lbr_msrs_access(vcpu, msr_info, true))
374 			return 0;
375 	}
376 
377 	return 1;
378 }
379 
380 static int intel_pmu_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
381 {
382 	struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
383 	struct kvm_pmc *pmc;
384 	u32 msr = msr_info->index;
385 	u64 data = msr_info->data;
386 
387 	switch (msr) {
388 	case MSR_CORE_PERF_FIXED_CTR_CTRL:
389 		if (pmu->fixed_ctr_ctrl == data)
390 			return 0;
391 		if (!(data & 0xfffffffffffff444ull)) {
392 			reprogram_fixed_counters(pmu, data);
393 			return 0;
394 		}
395 		break;
396 	case MSR_CORE_PERF_GLOBAL_STATUS:
397 		if (msr_info->host_initiated) {
398 			pmu->global_status = data;
399 			return 0;
400 		}
401 		break; /* RO MSR */
402 	case MSR_CORE_PERF_GLOBAL_CTRL:
403 		if (pmu->global_ctrl == data)
404 			return 0;
405 		if (kvm_valid_perf_global_ctrl(pmu, data)) {
406 			global_ctrl_changed(pmu, data);
407 			return 0;
408 		}
409 		break;
410 	case MSR_CORE_PERF_GLOBAL_OVF_CTRL:
411 		if (!(data & pmu->global_ovf_ctrl_mask)) {
412 			if (!msr_info->host_initiated)
413 				pmu->global_status &= ~data;
414 			return 0;
415 		}
416 		break;
417 	default:
418 		if ((pmc = get_gp_pmc(pmu, msr, MSR_IA32_PERFCTR0)) ||
419 		    (pmc = get_gp_pmc(pmu, msr, MSR_IA32_PMC0))) {
420 			if ((msr & MSR_PMC_FULL_WIDTH_BIT) &&
421 			    (data & ~pmu->counter_bitmask[KVM_PMC_GP]))
422 				return 1;
423 			if (!msr_info->host_initiated &&
424 			    !(msr & MSR_PMC_FULL_WIDTH_BIT))
425 				data = (s64)(s32)data;
426 			pmc->counter += data - pmc_read_counter(pmc);
427 			if (pmc->perf_event && !pmc->is_paused)
428 				perf_event_period(pmc->perf_event,
429 						  get_sample_period(pmc, data));
430 			return 0;
431 		} else if ((pmc = get_fixed_pmc(pmu, msr))) {
432 			pmc->counter += data - pmc_read_counter(pmc);
433 			if (pmc->perf_event && !pmc->is_paused)
434 				perf_event_period(pmc->perf_event,
435 						  get_sample_period(pmc, data));
436 			return 0;
437 		} else if ((pmc = get_gp_pmc(pmu, msr, MSR_P6_EVNTSEL0))) {
438 			if (data == pmc->eventsel)
439 				return 0;
440 			if (!(data & pmu->reserved_bits)) {
441 				reprogram_gp_counter(pmc, data);
442 				return 0;
443 			}
444 		} else if (intel_pmu_handle_lbr_msrs_access(vcpu, msr_info, false))
445 			return 0;
446 	}
447 
448 	return 1;
449 }
450 
451 static void setup_fixed_pmc_eventsel(struct kvm_pmu *pmu)
452 {
453 	size_t size = ARRAY_SIZE(fixed_pmc_events);
454 	struct kvm_pmc *pmc;
455 	u32 event;
456 	int i;
457 
458 	for (i = 0; i < pmu->nr_arch_fixed_counters; i++) {
459 		pmc = &pmu->fixed_counters[i];
460 		event = fixed_pmc_events[array_index_nospec(i, size)];
461 		pmc->eventsel = (intel_arch_events[event].unit_mask << 8) |
462 			intel_arch_events[event].eventsel;
463 	}
464 }
465 
466 static void intel_pmu_refresh(struct kvm_vcpu *vcpu)
467 {
468 	struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
469 	struct lbr_desc *lbr_desc = vcpu_to_lbr_desc(vcpu);
470 
471 	struct x86_pmu_capability x86_pmu;
472 	struct kvm_cpuid_entry2 *entry;
473 	union cpuid10_eax eax;
474 	union cpuid10_edx edx;
475 
476 	pmu->nr_arch_gp_counters = 0;
477 	pmu->nr_arch_fixed_counters = 0;
478 	pmu->counter_bitmask[KVM_PMC_GP] = 0;
479 	pmu->counter_bitmask[KVM_PMC_FIXED] = 0;
480 	pmu->version = 0;
481 	pmu->reserved_bits = 0xffffffff00200000ull;
482 
483 	entry = kvm_find_cpuid_entry(vcpu, 0xa, 0);
484 	if (!entry)
485 		return;
486 	eax.full = entry->eax;
487 	edx.full = entry->edx;
488 
489 	pmu->version = eax.split.version_id;
490 	if (!pmu->version)
491 		return;
492 
493 	perf_get_x86_pmu_capability(&x86_pmu);
494 
495 	pmu->nr_arch_gp_counters = min_t(int, eax.split.num_counters,
496 					 x86_pmu.num_counters_gp);
497 	eax.split.bit_width = min_t(int, eax.split.bit_width, x86_pmu.bit_width_gp);
498 	pmu->counter_bitmask[KVM_PMC_GP] = ((u64)1 << eax.split.bit_width) - 1;
499 	eax.split.mask_length = min_t(int, eax.split.mask_length, x86_pmu.events_mask_len);
500 	pmu->available_event_types = ~entry->ebx &
501 					((1ull << eax.split.mask_length) - 1);
502 
503 	if (pmu->version == 1) {
504 		pmu->nr_arch_fixed_counters = 0;
505 	} else {
506 		pmu->nr_arch_fixed_counters =
507 			min3(ARRAY_SIZE(fixed_pmc_events),
508 			     (size_t) edx.split.num_counters_fixed,
509 			     (size_t) x86_pmu.num_counters_fixed);
510 		edx.split.bit_width_fixed = min_t(int,
511 			edx.split.bit_width_fixed, x86_pmu.bit_width_fixed);
512 		pmu->counter_bitmask[KVM_PMC_FIXED] =
513 			((u64)1 << edx.split.bit_width_fixed) - 1;
514 		setup_fixed_pmc_eventsel(pmu);
515 	}
516 
517 	pmu->global_ctrl = ((1ull << pmu->nr_arch_gp_counters) - 1) |
518 		(((1ull << pmu->nr_arch_fixed_counters) - 1) << INTEL_PMC_IDX_FIXED);
519 	pmu->global_ctrl_mask = ~pmu->global_ctrl;
520 	pmu->global_ovf_ctrl_mask = pmu->global_ctrl_mask
521 			& ~(MSR_CORE_PERF_GLOBAL_OVF_CTRL_OVF_BUF |
522 			    MSR_CORE_PERF_GLOBAL_OVF_CTRL_COND_CHGD);
523 	if (vmx_pt_mode_is_host_guest())
524 		pmu->global_ovf_ctrl_mask &=
525 				~MSR_CORE_PERF_GLOBAL_OVF_CTRL_TRACE_TOPA_PMI;
526 
527 	entry = kvm_find_cpuid_entry(vcpu, 7, 0);
528 	if (entry &&
529 	    (boot_cpu_has(X86_FEATURE_HLE) || boot_cpu_has(X86_FEATURE_RTM)) &&
530 	    (entry->ebx & (X86_FEATURE_HLE|X86_FEATURE_RTM)))
531 		pmu->reserved_bits ^= HSW_IN_TX|HSW_IN_TX_CHECKPOINTED;
532 
533 	bitmap_set(pmu->all_valid_pmc_idx,
534 		0, pmu->nr_arch_gp_counters);
535 	bitmap_set(pmu->all_valid_pmc_idx,
536 		INTEL_PMC_MAX_GENERIC, pmu->nr_arch_fixed_counters);
537 
538 	nested_vmx_pmu_entry_exit_ctls_update(vcpu);
539 
540 	if (intel_pmu_lbr_is_compatible(vcpu))
541 		x86_perf_get_lbr(&lbr_desc->records);
542 	else
543 		lbr_desc->records.nr = 0;
544 
545 	if (lbr_desc->records.nr)
546 		bitmap_set(pmu->all_valid_pmc_idx, INTEL_PMC_IDX_FIXED_VLBR, 1);
547 }
548 
549 static void intel_pmu_init(struct kvm_vcpu *vcpu)
550 {
551 	int i;
552 	struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
553 	struct lbr_desc *lbr_desc = vcpu_to_lbr_desc(vcpu);
554 
555 	for (i = 0; i < INTEL_PMC_MAX_GENERIC; i++) {
556 		pmu->gp_counters[i].type = KVM_PMC_GP;
557 		pmu->gp_counters[i].vcpu = vcpu;
558 		pmu->gp_counters[i].idx = i;
559 		pmu->gp_counters[i].current_config = 0;
560 	}
561 
562 	for (i = 0; i < INTEL_PMC_MAX_FIXED; i++) {
563 		pmu->fixed_counters[i].type = KVM_PMC_FIXED;
564 		pmu->fixed_counters[i].vcpu = vcpu;
565 		pmu->fixed_counters[i].idx = i + INTEL_PMC_IDX_FIXED;
566 		pmu->fixed_counters[i].current_config = 0;
567 	}
568 
569 	vcpu->arch.perf_capabilities = vmx_get_perf_capabilities();
570 	lbr_desc->records.nr = 0;
571 	lbr_desc->event = NULL;
572 	lbr_desc->msr_passthrough = false;
573 }
574 
575 static void intel_pmu_reset(struct kvm_vcpu *vcpu)
576 {
577 	struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
578 	struct kvm_pmc *pmc = NULL;
579 	int i;
580 
581 	for (i = 0; i < INTEL_PMC_MAX_GENERIC; i++) {
582 		pmc = &pmu->gp_counters[i];
583 
584 		pmc_stop_counter(pmc);
585 		pmc->counter = pmc->eventsel = 0;
586 	}
587 
588 	for (i = 0; i < INTEL_PMC_MAX_FIXED; i++) {
589 		pmc = &pmu->fixed_counters[i];
590 
591 		pmc_stop_counter(pmc);
592 		pmc->counter = 0;
593 	}
594 
595 	pmu->fixed_ctr_ctrl = pmu->global_ctrl = pmu->global_status = 0;
596 
597 	intel_pmu_release_guest_lbr_event(vcpu);
598 }
599 
600 /*
601  * Emulate LBR_On_PMI behavior for 1 < pmu.version < 4.
602  *
603  * If Freeze_LBR_On_PMI = 1, the LBR is frozen on PMI and
604  * the KVM emulates to clear the LBR bit (bit 0) in IA32_DEBUGCTL.
605  *
606  * Guest needs to re-enable LBR to resume branches recording.
607  */
608 static void intel_pmu_legacy_freezing_lbrs_on_pmi(struct kvm_vcpu *vcpu)
609 {
610 	u64 data = vmcs_read64(GUEST_IA32_DEBUGCTL);
611 
612 	if (data & DEBUGCTLMSR_FREEZE_LBRS_ON_PMI) {
613 		data &= ~DEBUGCTLMSR_LBR;
614 		vmcs_write64(GUEST_IA32_DEBUGCTL, data);
615 	}
616 }
617 
618 static void intel_pmu_deliver_pmi(struct kvm_vcpu *vcpu)
619 {
620 	u8 version = vcpu_to_pmu(vcpu)->version;
621 
622 	if (!intel_pmu_lbr_is_enabled(vcpu))
623 		return;
624 
625 	if (version > 1 && version < 4)
626 		intel_pmu_legacy_freezing_lbrs_on_pmi(vcpu);
627 }
628 
629 static void vmx_update_intercept_for_lbr_msrs(struct kvm_vcpu *vcpu, bool set)
630 {
631 	struct x86_pmu_lbr *lbr = vcpu_to_lbr_records(vcpu);
632 	int i;
633 
634 	for (i = 0; i < lbr->nr; i++) {
635 		vmx_set_intercept_for_msr(vcpu, lbr->from + i, MSR_TYPE_RW, set);
636 		vmx_set_intercept_for_msr(vcpu, lbr->to + i, MSR_TYPE_RW, set);
637 		if (lbr->info)
638 			vmx_set_intercept_for_msr(vcpu, lbr->info + i, MSR_TYPE_RW, set);
639 	}
640 
641 	vmx_set_intercept_for_msr(vcpu, MSR_LBR_SELECT, MSR_TYPE_RW, set);
642 	vmx_set_intercept_for_msr(vcpu, MSR_LBR_TOS, MSR_TYPE_RW, set);
643 }
644 
645 static inline void vmx_disable_lbr_msrs_passthrough(struct kvm_vcpu *vcpu)
646 {
647 	struct lbr_desc *lbr_desc = vcpu_to_lbr_desc(vcpu);
648 
649 	if (!lbr_desc->msr_passthrough)
650 		return;
651 
652 	vmx_update_intercept_for_lbr_msrs(vcpu, true);
653 	lbr_desc->msr_passthrough = false;
654 }
655 
656 static inline void vmx_enable_lbr_msrs_passthrough(struct kvm_vcpu *vcpu)
657 {
658 	struct lbr_desc *lbr_desc = vcpu_to_lbr_desc(vcpu);
659 
660 	if (lbr_desc->msr_passthrough)
661 		return;
662 
663 	vmx_update_intercept_for_lbr_msrs(vcpu, false);
664 	lbr_desc->msr_passthrough = true;
665 }
666 
667 /*
668  * Higher priority host perf events (e.g. cpu pinned) could reclaim the
669  * pmu resources (e.g. LBR) that were assigned to the guest. This is
670  * usually done via ipi calls (more details in perf_install_in_context).
671  *
672  * Before entering the non-root mode (with irq disabled here), double
673  * confirm that the pmu features enabled to the guest are not reclaimed
674  * by higher priority host events. Otherwise, disallow vcpu's access to
675  * the reclaimed features.
676  */
677 void vmx_passthrough_lbr_msrs(struct kvm_vcpu *vcpu)
678 {
679 	struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
680 	struct lbr_desc *lbr_desc = vcpu_to_lbr_desc(vcpu);
681 
682 	if (!lbr_desc->event) {
683 		vmx_disable_lbr_msrs_passthrough(vcpu);
684 		if (vmcs_read64(GUEST_IA32_DEBUGCTL) & DEBUGCTLMSR_LBR)
685 			goto warn;
686 		if (test_bit(INTEL_PMC_IDX_FIXED_VLBR, pmu->pmc_in_use))
687 			goto warn;
688 		return;
689 	}
690 
691 	if (lbr_desc->event->state < PERF_EVENT_STATE_ACTIVE) {
692 		vmx_disable_lbr_msrs_passthrough(vcpu);
693 		__clear_bit(INTEL_PMC_IDX_FIXED_VLBR, pmu->pmc_in_use);
694 		goto warn;
695 	} else
696 		vmx_enable_lbr_msrs_passthrough(vcpu);
697 
698 	return;
699 
700 warn:
701 	pr_warn_ratelimited("kvm: vcpu-%d: fail to passthrough LBR.\n",
702 		vcpu->vcpu_id);
703 }
704 
705 static void intel_pmu_cleanup(struct kvm_vcpu *vcpu)
706 {
707 	if (!(vmcs_read64(GUEST_IA32_DEBUGCTL) & DEBUGCTLMSR_LBR))
708 		intel_pmu_release_guest_lbr_event(vcpu);
709 }
710 
711 struct kvm_pmu_ops intel_pmu_ops = {
712 	.pmc_perf_hw_id = intel_pmc_perf_hw_id,
713 	.pmc_is_enabled = intel_pmc_is_enabled,
714 	.pmc_idx_to_pmc = intel_pmc_idx_to_pmc,
715 	.rdpmc_ecx_to_pmc = intel_rdpmc_ecx_to_pmc,
716 	.msr_idx_to_pmc = intel_msr_idx_to_pmc,
717 	.is_valid_rdpmc_ecx = intel_is_valid_rdpmc_ecx,
718 	.is_valid_msr = intel_is_valid_msr,
719 	.get_msr = intel_pmu_get_msr,
720 	.set_msr = intel_pmu_set_msr,
721 	.refresh = intel_pmu_refresh,
722 	.init = intel_pmu_init,
723 	.reset = intel_pmu_reset,
724 	.deliver_pmi = intel_pmu_deliver_pmi,
725 	.cleanup = intel_pmu_cleanup,
726 };
727