xref: /openbmc/linux/arch/x86/kvm/pmu.c (revision aaa746ad)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Kernel-based Virtual Machine -- Performance Monitoring Unit support
4  *
5  * Copyright 2015 Red Hat, Inc. and/or its affiliates.
6  *
7  * Authors:
8  *   Avi Kivity   <avi@redhat.com>
9  *   Gleb Natapov <gleb@redhat.com>
10  *   Wei Huang    <wei@redhat.com>
11  */
12 
13 #include <linux/types.h>
14 #include <linux/kvm_host.h>
15 #include <linux/perf_event.h>
16 #include <linux/bsearch.h>
17 #include <linux/sort.h>
18 #include <asm/perf_event.h>
19 #include <asm/cpu_device_id.h>
20 #include "x86.h"
21 #include "cpuid.h"
22 #include "lapic.h"
23 #include "pmu.h"
24 
25 /* This is enough to filter the vast majority of currently defined events. */
26 #define KVM_PMU_EVENT_FILTER_MAX_EVENTS 300
27 
28 struct x86_pmu_capability __read_mostly kvm_pmu_cap;
29 EXPORT_SYMBOL_GPL(kvm_pmu_cap);
30 
31 static const struct x86_cpu_id vmx_icl_pebs_cpu[] = {
32 	X86_MATCH_INTEL_FAM6_MODEL(ICELAKE_D, NULL),
33 	X86_MATCH_INTEL_FAM6_MODEL(ICELAKE_X, NULL),
34 	{}
35 };
36 
37 /* NOTE:
38  * - Each perf counter is defined as "struct kvm_pmc";
39  * - There are two types of perf counters: general purpose (gp) and fixed.
40  *   gp counters are stored in gp_counters[] and fixed counters are stored
41  *   in fixed_counters[] respectively. Both of them are part of "struct
42  *   kvm_pmu";
43  * - pmu.c understands the difference between gp counters and fixed counters.
44  *   However AMD doesn't support fixed-counters;
45  * - There are three types of index to access perf counters (PMC):
46  *     1. MSR (named msr): For example Intel has MSR_IA32_PERFCTRn and AMD
47  *        has MSR_K7_PERFCTRn and, for families 15H and later,
48  *        MSR_F15H_PERF_CTRn, where MSR_F15H_PERF_CTR[0-3] are
49  *        aliased to MSR_K7_PERFCTRn.
50  *     2. MSR Index (named idx): This normally is used by RDPMC instruction.
51  *        For instance AMD RDPMC instruction uses 0000_0003h in ECX to access
52  *        C001_0007h (MSR_K7_PERCTR3). Intel has a similar mechanism, except
53  *        that it also supports fixed counters. idx can be used to as index to
54  *        gp and fixed counters.
55  *     3. Global PMC Index (named pmc): pmc is an index specific to PMU
56  *        code. Each pmc, stored in kvm_pmc.idx field, is unique across
57  *        all perf counters (both gp and fixed). The mapping relationship
58  *        between pmc and perf counters is as the following:
59  *        * Intel: [0 .. KVM_INTEL_PMC_MAX_GENERIC-1] <=> gp counters
60  *                 [INTEL_PMC_IDX_FIXED .. INTEL_PMC_IDX_FIXED + 2] <=> fixed
61  *        * AMD:   [0 .. AMD64_NUM_COUNTERS-1] and, for families 15H
62  *          and later, [0 .. AMD64_NUM_COUNTERS_CORE-1] <=> gp counters
63  */
64 
65 static struct kvm_pmu_ops kvm_pmu_ops __read_mostly;
66 
67 #define KVM_X86_PMU_OP(func)					     \
68 	DEFINE_STATIC_CALL_NULL(kvm_x86_pmu_##func,			     \
69 				*(((struct kvm_pmu_ops *)0)->func));
70 #define KVM_X86_PMU_OP_OPTIONAL KVM_X86_PMU_OP
71 #include <asm/kvm-x86-pmu-ops.h>
72 
73 void kvm_pmu_ops_update(const struct kvm_pmu_ops *pmu_ops)
74 {
75 	memcpy(&kvm_pmu_ops, pmu_ops, sizeof(kvm_pmu_ops));
76 
77 #define __KVM_X86_PMU_OP(func) \
78 	static_call_update(kvm_x86_pmu_##func, kvm_pmu_ops.func);
79 #define KVM_X86_PMU_OP(func) \
80 	WARN_ON(!kvm_pmu_ops.func); __KVM_X86_PMU_OP(func)
81 #define KVM_X86_PMU_OP_OPTIONAL __KVM_X86_PMU_OP
82 #include <asm/kvm-x86-pmu-ops.h>
83 #undef __KVM_X86_PMU_OP
84 }
85 
86 static inline bool pmc_is_enabled(struct kvm_pmc *pmc)
87 {
88 	return static_call(kvm_x86_pmu_pmc_is_enabled)(pmc);
89 }
90 
91 static void kvm_pmi_trigger_fn(struct irq_work *irq_work)
92 {
93 	struct kvm_pmu *pmu = container_of(irq_work, struct kvm_pmu, irq_work);
94 	struct kvm_vcpu *vcpu = pmu_to_vcpu(pmu);
95 
96 	kvm_pmu_deliver_pmi(vcpu);
97 }
98 
99 static inline void __kvm_perf_overflow(struct kvm_pmc *pmc, bool in_pmi)
100 {
101 	struct kvm_pmu *pmu = pmc_to_pmu(pmc);
102 	bool skip_pmi = false;
103 
104 	if (pmc->perf_event && pmc->perf_event->attr.precise_ip) {
105 		if (!in_pmi) {
106 			/*
107 			 * TODO: KVM is currently _choosing_ to not generate records
108 			 * for emulated instructions, avoiding BUFFER_OVF PMI when
109 			 * there are no records. Strictly speaking, it should be done
110 			 * as well in the right context to improve sampling accuracy.
111 			 */
112 			skip_pmi = true;
113 		} else {
114 			/* Indicate PEBS overflow PMI to guest. */
115 			skip_pmi = __test_and_set_bit(GLOBAL_STATUS_BUFFER_OVF_BIT,
116 						      (unsigned long *)&pmu->global_status);
117 		}
118 	} else {
119 		__set_bit(pmc->idx, (unsigned long *)&pmu->global_status);
120 	}
121 
122 	if (!pmc->intr || skip_pmi)
123 		return;
124 
125 	/*
126 	 * Inject PMI. If vcpu was in a guest mode during NMI PMI
127 	 * can be ejected on a guest mode re-entry. Otherwise we can't
128 	 * be sure that vcpu wasn't executing hlt instruction at the
129 	 * time of vmexit and is not going to re-enter guest mode until
130 	 * woken up. So we should wake it, but this is impossible from
131 	 * NMI context. Do it from irq work instead.
132 	 */
133 	if (in_pmi && !kvm_handling_nmi_from_guest(pmc->vcpu))
134 		irq_work_queue(&pmc_to_pmu(pmc)->irq_work);
135 	else
136 		kvm_make_request(KVM_REQ_PMI, pmc->vcpu);
137 }
138 
139 static void kvm_perf_overflow(struct perf_event *perf_event,
140 			      struct perf_sample_data *data,
141 			      struct pt_regs *regs)
142 {
143 	struct kvm_pmc *pmc = perf_event->overflow_handler_context;
144 
145 	/*
146 	 * Ignore overflow events for counters that are scheduled to be
147 	 * reprogrammed, e.g. if a PMI for the previous event races with KVM's
148 	 * handling of a related guest WRMSR.
149 	 */
150 	if (test_and_set_bit(pmc->idx, pmc_to_pmu(pmc)->reprogram_pmi))
151 		return;
152 
153 	__kvm_perf_overflow(pmc, true);
154 
155 	kvm_make_request(KVM_REQ_PMU, pmc->vcpu);
156 }
157 
158 static int pmc_reprogram_counter(struct kvm_pmc *pmc, u32 type, u64 config,
159 				 bool exclude_user, bool exclude_kernel,
160 				 bool intr)
161 {
162 	struct kvm_pmu *pmu = pmc_to_pmu(pmc);
163 	struct perf_event *event;
164 	struct perf_event_attr attr = {
165 		.type = type,
166 		.size = sizeof(attr),
167 		.pinned = true,
168 		.exclude_idle = true,
169 		.exclude_host = 1,
170 		.exclude_user = exclude_user,
171 		.exclude_kernel = exclude_kernel,
172 		.config = config,
173 	};
174 	bool pebs = test_bit(pmc->idx, (unsigned long *)&pmu->pebs_enable);
175 
176 	attr.sample_period = get_sample_period(pmc, pmc->counter);
177 
178 	if ((attr.config & HSW_IN_TX_CHECKPOINTED) &&
179 	    guest_cpuid_is_intel(pmc->vcpu)) {
180 		/*
181 		 * HSW_IN_TX_CHECKPOINTED is not supported with nonzero
182 		 * period. Just clear the sample period so at least
183 		 * allocating the counter doesn't fail.
184 		 */
185 		attr.sample_period = 0;
186 	}
187 	if (pebs) {
188 		/*
189 		 * The non-zero precision level of guest event makes the ordinary
190 		 * guest event becomes a guest PEBS event and triggers the host
191 		 * PEBS PMI handler to determine whether the PEBS overflow PMI
192 		 * comes from the host counters or the guest.
193 		 *
194 		 * For most PEBS hardware events, the difference in the software
195 		 * precision levels of guest and host PEBS events will not affect
196 		 * the accuracy of the PEBS profiling result, because the "event IP"
197 		 * in the PEBS record is calibrated on the guest side.
198 		 *
199 		 * On Icelake everything is fine. Other hardware (GLC+, TNT+) that
200 		 * could possibly care here is unsupported and needs changes.
201 		 */
202 		attr.precise_ip = 1;
203 		if (x86_match_cpu(vmx_icl_pebs_cpu) && pmc->idx == 32)
204 			attr.precise_ip = 3;
205 	}
206 
207 	event = perf_event_create_kernel_counter(&attr, -1, current,
208 						 kvm_perf_overflow, pmc);
209 	if (IS_ERR(event)) {
210 		pr_debug_ratelimited("kvm_pmu: event creation failed %ld for pmc->idx = %d\n",
211 			    PTR_ERR(event), pmc->idx);
212 		return PTR_ERR(event);
213 	}
214 
215 	pmc->perf_event = event;
216 	pmc_to_pmu(pmc)->event_count++;
217 	pmc->is_paused = false;
218 	pmc->intr = intr || pebs;
219 	return 0;
220 }
221 
222 static void pmc_pause_counter(struct kvm_pmc *pmc)
223 {
224 	u64 counter = pmc->counter;
225 
226 	if (!pmc->perf_event || pmc->is_paused)
227 		return;
228 
229 	/* update counter, reset event value to avoid redundant accumulation */
230 	counter += perf_event_pause(pmc->perf_event, true);
231 	pmc->counter = counter & pmc_bitmask(pmc);
232 	pmc->is_paused = true;
233 }
234 
235 static bool pmc_resume_counter(struct kvm_pmc *pmc)
236 {
237 	if (!pmc->perf_event)
238 		return false;
239 
240 	/* recalibrate sample period and check if it's accepted by perf core */
241 	if (perf_event_period(pmc->perf_event,
242 			      get_sample_period(pmc, pmc->counter)))
243 		return false;
244 
245 	if (test_bit(pmc->idx, (unsigned long *)&pmc_to_pmu(pmc)->pebs_enable) !=
246 	    (!!pmc->perf_event->attr.precise_ip))
247 		return false;
248 
249 	/* reuse perf_event to serve as pmc_reprogram_counter() does*/
250 	perf_event_enable(pmc->perf_event);
251 	pmc->is_paused = false;
252 
253 	return true;
254 }
255 
256 static int cmp_u64(const void *pa, const void *pb)
257 {
258 	u64 a = *(u64 *)pa;
259 	u64 b = *(u64 *)pb;
260 
261 	return (a > b) - (a < b);
262 }
263 
264 static bool check_pmu_event_filter(struct kvm_pmc *pmc)
265 {
266 	struct kvm_pmu_event_filter *filter;
267 	struct kvm *kvm = pmc->vcpu->kvm;
268 	bool allow_event = true;
269 	__u64 key;
270 	int idx;
271 
272 	if (!static_call(kvm_x86_pmu_hw_event_available)(pmc))
273 		return false;
274 
275 	filter = srcu_dereference(kvm->arch.pmu_event_filter, &kvm->srcu);
276 	if (!filter)
277 		goto out;
278 
279 	if (pmc_is_gp(pmc)) {
280 		key = pmc->eventsel & AMD64_RAW_EVENT_MASK_NB;
281 		if (bsearch(&key, filter->events, filter->nevents,
282 			    sizeof(__u64), cmp_u64))
283 			allow_event = filter->action == KVM_PMU_EVENT_ALLOW;
284 		else
285 			allow_event = filter->action == KVM_PMU_EVENT_DENY;
286 	} else {
287 		idx = pmc->idx - INTEL_PMC_IDX_FIXED;
288 		if (filter->action == KVM_PMU_EVENT_DENY &&
289 		    test_bit(idx, (ulong *)&filter->fixed_counter_bitmap))
290 			allow_event = false;
291 		if (filter->action == KVM_PMU_EVENT_ALLOW &&
292 		    !test_bit(idx, (ulong *)&filter->fixed_counter_bitmap))
293 			allow_event = false;
294 	}
295 
296 out:
297 	return allow_event;
298 }
299 
300 static void reprogram_counter(struct kvm_pmc *pmc)
301 {
302 	struct kvm_pmu *pmu = pmc_to_pmu(pmc);
303 	u64 eventsel = pmc->eventsel;
304 	u64 new_config = eventsel;
305 	u8 fixed_ctr_ctrl;
306 
307 	pmc_pause_counter(pmc);
308 
309 	if (!pmc_speculative_in_use(pmc) || !pmc_is_enabled(pmc))
310 		goto reprogram_complete;
311 
312 	if (!check_pmu_event_filter(pmc))
313 		goto reprogram_complete;
314 
315 	if (pmc->counter < pmc->prev_counter)
316 		__kvm_perf_overflow(pmc, false);
317 
318 	if (eventsel & ARCH_PERFMON_EVENTSEL_PIN_CONTROL)
319 		printk_once("kvm pmu: pin control bit is ignored\n");
320 
321 	if (pmc_is_fixed(pmc)) {
322 		fixed_ctr_ctrl = fixed_ctrl_field(pmu->fixed_ctr_ctrl,
323 						  pmc->idx - INTEL_PMC_IDX_FIXED);
324 		if (fixed_ctr_ctrl & 0x1)
325 			eventsel |= ARCH_PERFMON_EVENTSEL_OS;
326 		if (fixed_ctr_ctrl & 0x2)
327 			eventsel |= ARCH_PERFMON_EVENTSEL_USR;
328 		if (fixed_ctr_ctrl & 0x8)
329 			eventsel |= ARCH_PERFMON_EVENTSEL_INT;
330 		new_config = (u64)fixed_ctr_ctrl;
331 	}
332 
333 	if (pmc->current_config == new_config && pmc_resume_counter(pmc))
334 		goto reprogram_complete;
335 
336 	pmc_release_perf_event(pmc);
337 
338 	pmc->current_config = new_config;
339 
340 	/*
341 	 * If reprogramming fails, e.g. due to contention, leave the counter's
342 	 * regprogram bit set, i.e. opportunistically try again on the next PMU
343 	 * refresh.  Don't make a new request as doing so can stall the guest
344 	 * if reprogramming repeatedly fails.
345 	 */
346 	if (pmc_reprogram_counter(pmc, PERF_TYPE_RAW,
347 				  (eventsel & pmu->raw_event_mask),
348 				  !(eventsel & ARCH_PERFMON_EVENTSEL_USR),
349 				  !(eventsel & ARCH_PERFMON_EVENTSEL_OS),
350 				  eventsel & ARCH_PERFMON_EVENTSEL_INT))
351 		return;
352 
353 reprogram_complete:
354 	clear_bit(pmc->idx, (unsigned long *)&pmc_to_pmu(pmc)->reprogram_pmi);
355 	pmc->prev_counter = 0;
356 }
357 
358 void kvm_pmu_handle_event(struct kvm_vcpu *vcpu)
359 {
360 	struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
361 	int bit;
362 
363 	for_each_set_bit(bit, pmu->reprogram_pmi, X86_PMC_IDX_MAX) {
364 		struct kvm_pmc *pmc = static_call(kvm_x86_pmu_pmc_idx_to_pmc)(pmu, bit);
365 
366 		if (unlikely(!pmc)) {
367 			clear_bit(bit, pmu->reprogram_pmi);
368 			continue;
369 		}
370 
371 		reprogram_counter(pmc);
372 	}
373 
374 	/*
375 	 * Unused perf_events are only released if the corresponding MSRs
376 	 * weren't accessed during the last vCPU time slice. kvm_arch_sched_in
377 	 * triggers KVM_REQ_PMU if cleanup is needed.
378 	 */
379 	if (unlikely(pmu->need_cleanup))
380 		kvm_pmu_cleanup(vcpu);
381 }
382 
383 /* check if idx is a valid index to access PMU */
384 bool kvm_pmu_is_valid_rdpmc_ecx(struct kvm_vcpu *vcpu, unsigned int idx)
385 {
386 	return static_call(kvm_x86_pmu_is_valid_rdpmc_ecx)(vcpu, idx);
387 }
388 
389 bool is_vmware_backdoor_pmc(u32 pmc_idx)
390 {
391 	switch (pmc_idx) {
392 	case VMWARE_BACKDOOR_PMC_HOST_TSC:
393 	case VMWARE_BACKDOOR_PMC_REAL_TIME:
394 	case VMWARE_BACKDOOR_PMC_APPARENT_TIME:
395 		return true;
396 	}
397 	return false;
398 }
399 
400 static int kvm_pmu_rdpmc_vmware(struct kvm_vcpu *vcpu, unsigned idx, u64 *data)
401 {
402 	u64 ctr_val;
403 
404 	switch (idx) {
405 	case VMWARE_BACKDOOR_PMC_HOST_TSC:
406 		ctr_val = rdtsc();
407 		break;
408 	case VMWARE_BACKDOOR_PMC_REAL_TIME:
409 		ctr_val = ktime_get_boottime_ns();
410 		break;
411 	case VMWARE_BACKDOOR_PMC_APPARENT_TIME:
412 		ctr_val = ktime_get_boottime_ns() +
413 			vcpu->kvm->arch.kvmclock_offset;
414 		break;
415 	default:
416 		return 1;
417 	}
418 
419 	*data = ctr_val;
420 	return 0;
421 }
422 
423 int kvm_pmu_rdpmc(struct kvm_vcpu *vcpu, unsigned idx, u64 *data)
424 {
425 	bool fast_mode = idx & (1u << 31);
426 	struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
427 	struct kvm_pmc *pmc;
428 	u64 mask = fast_mode ? ~0u : ~0ull;
429 
430 	if (!pmu->version)
431 		return 1;
432 
433 	if (is_vmware_backdoor_pmc(idx))
434 		return kvm_pmu_rdpmc_vmware(vcpu, idx, data);
435 
436 	pmc = static_call(kvm_x86_pmu_rdpmc_ecx_to_pmc)(vcpu, idx, &mask);
437 	if (!pmc)
438 		return 1;
439 
440 	if (!(kvm_read_cr4(vcpu) & X86_CR4_PCE) &&
441 	    (static_call(kvm_x86_get_cpl)(vcpu) != 0) &&
442 	    (kvm_read_cr0(vcpu) & X86_CR0_PE))
443 		return 1;
444 
445 	*data = pmc_read_counter(pmc) & mask;
446 	return 0;
447 }
448 
449 void kvm_pmu_deliver_pmi(struct kvm_vcpu *vcpu)
450 {
451 	if (lapic_in_kernel(vcpu)) {
452 		static_call_cond(kvm_x86_pmu_deliver_pmi)(vcpu);
453 		kvm_apic_local_deliver(vcpu->arch.apic, APIC_LVTPC);
454 	}
455 }
456 
457 bool kvm_pmu_is_valid_msr(struct kvm_vcpu *vcpu, u32 msr)
458 {
459 	return static_call(kvm_x86_pmu_msr_idx_to_pmc)(vcpu, msr) ||
460 		static_call(kvm_x86_pmu_is_valid_msr)(vcpu, msr);
461 }
462 
463 static void kvm_pmu_mark_pmc_in_use(struct kvm_vcpu *vcpu, u32 msr)
464 {
465 	struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
466 	struct kvm_pmc *pmc = static_call(kvm_x86_pmu_msr_idx_to_pmc)(vcpu, msr);
467 
468 	if (pmc)
469 		__set_bit(pmc->idx, pmu->pmc_in_use);
470 }
471 
472 int kvm_pmu_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
473 {
474 	return static_call(kvm_x86_pmu_get_msr)(vcpu, msr_info);
475 }
476 
477 int kvm_pmu_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
478 {
479 	kvm_pmu_mark_pmc_in_use(vcpu, msr_info->index);
480 	return static_call(kvm_x86_pmu_set_msr)(vcpu, msr_info);
481 }
482 
483 /* refresh PMU settings. This function generally is called when underlying
484  * settings are changed (such as changes of PMU CPUID by guest VMs), which
485  * should rarely happen.
486  */
487 void kvm_pmu_refresh(struct kvm_vcpu *vcpu)
488 {
489 	static_call(kvm_x86_pmu_refresh)(vcpu);
490 }
491 
492 void kvm_pmu_reset(struct kvm_vcpu *vcpu)
493 {
494 	struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
495 
496 	irq_work_sync(&pmu->irq_work);
497 	static_call(kvm_x86_pmu_reset)(vcpu);
498 }
499 
500 void kvm_pmu_init(struct kvm_vcpu *vcpu)
501 {
502 	struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
503 
504 	memset(pmu, 0, sizeof(*pmu));
505 	static_call(kvm_x86_pmu_init)(vcpu);
506 	init_irq_work(&pmu->irq_work, kvm_pmi_trigger_fn);
507 	pmu->event_count = 0;
508 	pmu->need_cleanup = false;
509 	kvm_pmu_refresh(vcpu);
510 }
511 
512 /* Release perf_events for vPMCs that have been unused for a full time slice.  */
513 void kvm_pmu_cleanup(struct kvm_vcpu *vcpu)
514 {
515 	struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
516 	struct kvm_pmc *pmc = NULL;
517 	DECLARE_BITMAP(bitmask, X86_PMC_IDX_MAX);
518 	int i;
519 
520 	pmu->need_cleanup = false;
521 
522 	bitmap_andnot(bitmask, pmu->all_valid_pmc_idx,
523 		      pmu->pmc_in_use, X86_PMC_IDX_MAX);
524 
525 	for_each_set_bit(i, bitmask, X86_PMC_IDX_MAX) {
526 		pmc = static_call(kvm_x86_pmu_pmc_idx_to_pmc)(pmu, i);
527 
528 		if (pmc && pmc->perf_event && !pmc_speculative_in_use(pmc))
529 			pmc_stop_counter(pmc);
530 	}
531 
532 	static_call_cond(kvm_x86_pmu_cleanup)(vcpu);
533 
534 	bitmap_zero(pmu->pmc_in_use, X86_PMC_IDX_MAX);
535 }
536 
537 void kvm_pmu_destroy(struct kvm_vcpu *vcpu)
538 {
539 	kvm_pmu_reset(vcpu);
540 }
541 
542 static void kvm_pmu_incr_counter(struct kvm_pmc *pmc)
543 {
544 	pmc->prev_counter = pmc->counter;
545 	pmc->counter = (pmc->counter + 1) & pmc_bitmask(pmc);
546 	kvm_pmu_request_counter_reprogam(pmc);
547 }
548 
549 static inline bool eventsel_match_perf_hw_id(struct kvm_pmc *pmc,
550 	unsigned int perf_hw_id)
551 {
552 	return !((pmc->eventsel ^ perf_get_hw_event_config(perf_hw_id)) &
553 		AMD64_RAW_EVENT_MASK_NB);
554 }
555 
556 static inline bool cpl_is_matched(struct kvm_pmc *pmc)
557 {
558 	bool select_os, select_user;
559 	u64 config;
560 
561 	if (pmc_is_gp(pmc)) {
562 		config = pmc->eventsel;
563 		select_os = config & ARCH_PERFMON_EVENTSEL_OS;
564 		select_user = config & ARCH_PERFMON_EVENTSEL_USR;
565 	} else {
566 		config = fixed_ctrl_field(pmc_to_pmu(pmc)->fixed_ctr_ctrl,
567 					  pmc->idx - INTEL_PMC_IDX_FIXED);
568 		select_os = config & 0x1;
569 		select_user = config & 0x2;
570 	}
571 
572 	return (static_call(kvm_x86_get_cpl)(pmc->vcpu) == 0) ? select_os : select_user;
573 }
574 
575 void kvm_pmu_trigger_event(struct kvm_vcpu *vcpu, u64 perf_hw_id)
576 {
577 	struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
578 	struct kvm_pmc *pmc;
579 	int i;
580 
581 	for_each_set_bit(i, pmu->all_valid_pmc_idx, X86_PMC_IDX_MAX) {
582 		pmc = static_call(kvm_x86_pmu_pmc_idx_to_pmc)(pmu, i);
583 
584 		if (!pmc || !pmc_is_enabled(pmc) || !pmc_speculative_in_use(pmc))
585 			continue;
586 
587 		/* Ignore checks for edge detect, pin control, invert and CMASK bits */
588 		if (eventsel_match_perf_hw_id(pmc, perf_hw_id) && cpl_is_matched(pmc))
589 			kvm_pmu_incr_counter(pmc);
590 	}
591 }
592 EXPORT_SYMBOL_GPL(kvm_pmu_trigger_event);
593 
594 int kvm_vm_ioctl_set_pmu_event_filter(struct kvm *kvm, void __user *argp)
595 {
596 	struct kvm_pmu_event_filter tmp, *filter;
597 	struct kvm_vcpu *vcpu;
598 	unsigned long i;
599 	size_t size;
600 	int r;
601 
602 	if (copy_from_user(&tmp, argp, sizeof(tmp)))
603 		return -EFAULT;
604 
605 	if (tmp.action != KVM_PMU_EVENT_ALLOW &&
606 	    tmp.action != KVM_PMU_EVENT_DENY)
607 		return -EINVAL;
608 
609 	if (tmp.flags != 0)
610 		return -EINVAL;
611 
612 	if (tmp.nevents > KVM_PMU_EVENT_FILTER_MAX_EVENTS)
613 		return -E2BIG;
614 
615 	size = struct_size(filter, events, tmp.nevents);
616 	filter = kmalloc(size, GFP_KERNEL_ACCOUNT);
617 	if (!filter)
618 		return -ENOMEM;
619 
620 	r = -EFAULT;
621 	if (copy_from_user(filter, argp, size))
622 		goto cleanup;
623 
624 	/* Ensure nevents can't be changed between the user copies. */
625 	*filter = tmp;
626 
627 	/*
628 	 * Sort the in-kernel list so that we can search it with bsearch.
629 	 */
630 	sort(&filter->events, filter->nevents, sizeof(__u64), cmp_u64, NULL);
631 
632 	mutex_lock(&kvm->lock);
633 	filter = rcu_replace_pointer(kvm->arch.pmu_event_filter, filter,
634 				     mutex_is_locked(&kvm->lock));
635 	synchronize_srcu_expedited(&kvm->srcu);
636 
637 	BUILD_BUG_ON(sizeof(((struct kvm_pmu *)0)->reprogram_pmi) >
638 		     sizeof(((struct kvm_pmu *)0)->__reprogram_pmi));
639 
640 	kvm_for_each_vcpu(i, vcpu, kvm)
641 		atomic64_set(&vcpu_to_pmu(vcpu)->__reprogram_pmi, -1ull);
642 
643 	kvm_make_all_cpus_request(kvm, KVM_REQ_PMU);
644 
645 	mutex_unlock(&kvm->lock);
646 
647 	r = 0;
648 cleanup:
649 	kfree(filter);
650 	return r;
651 }
652