xref: /openbmc/linux/arch/arm64/kvm/pmu-emul.c (revision 130abaa1)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (C) 2015 Linaro Ltd.
4  * Author: Shannon Zhao <shannon.zhao@linaro.org>
5  */
6 
7 #include <linux/cpu.h>
8 #include <linux/kvm.h>
9 #include <linux/kvm_host.h>
10 #include <linux/list.h>
11 #include <linux/perf_event.h>
12 #include <linux/perf/arm_pmu.h>
13 #include <linux/uaccess.h>
14 #include <asm/kvm_emulate.h>
15 #include <kvm/arm_pmu.h>
16 #include <kvm/arm_vgic.h>
17 #include <asm/arm_pmuv3.h>
18 
19 #define PERF_ATTR_CFG1_COUNTER_64BIT	BIT(0)
20 
21 DEFINE_STATIC_KEY_FALSE(kvm_arm_pmu_available);
22 
23 static LIST_HEAD(arm_pmus);
24 static DEFINE_MUTEX(arm_pmus_lock);
25 
26 static void kvm_pmu_create_perf_event(struct kvm_pmc *pmc);
27 static void kvm_pmu_release_perf_event(struct kvm_pmc *pmc);
28 
kvm_pmc_to_vcpu(const struct kvm_pmc * pmc)29 static struct kvm_vcpu *kvm_pmc_to_vcpu(const struct kvm_pmc *pmc)
30 {
31 	return container_of(pmc, struct kvm_vcpu, arch.pmu.pmc[pmc->idx]);
32 }
33 
kvm_vcpu_idx_to_pmc(struct kvm_vcpu * vcpu,int cnt_idx)34 static struct kvm_pmc *kvm_vcpu_idx_to_pmc(struct kvm_vcpu *vcpu, int cnt_idx)
35 {
36 	return &vcpu->arch.pmu.pmc[cnt_idx];
37 }
38 
__kvm_pmu_event_mask(unsigned int pmuver)39 static u32 __kvm_pmu_event_mask(unsigned int pmuver)
40 {
41 	switch (pmuver) {
42 	case ID_AA64DFR0_EL1_PMUVer_IMP:
43 		return GENMASK(9, 0);
44 	case ID_AA64DFR0_EL1_PMUVer_V3P1:
45 	case ID_AA64DFR0_EL1_PMUVer_V3P4:
46 	case ID_AA64DFR0_EL1_PMUVer_V3P5:
47 	case ID_AA64DFR0_EL1_PMUVer_V3P7:
48 		return GENMASK(15, 0);
49 	default:		/* Shouldn't be here, just for sanity */
50 		WARN_ONCE(1, "Unknown PMU version %d\n", pmuver);
51 		return 0;
52 	}
53 }
54 
kvm_pmu_event_mask(struct kvm * kvm)55 static u32 kvm_pmu_event_mask(struct kvm *kvm)
56 {
57 	u64 dfr0 = IDREG(kvm, SYS_ID_AA64DFR0_EL1);
58 	u8 pmuver = SYS_FIELD_GET(ID_AA64DFR0_EL1, PMUVer, dfr0);
59 
60 	return __kvm_pmu_event_mask(pmuver);
61 }
62 
63 /**
64  * kvm_pmc_is_64bit - determine if counter is 64bit
65  * @pmc: counter context
66  */
kvm_pmc_is_64bit(struct kvm_pmc * pmc)67 static bool kvm_pmc_is_64bit(struct kvm_pmc *pmc)
68 {
69 	return (pmc->idx == ARMV8_PMU_CYCLE_IDX ||
70 		kvm_pmu_is_3p5(kvm_pmc_to_vcpu(pmc)));
71 }
72 
kvm_pmc_has_64bit_overflow(struct kvm_pmc * pmc)73 static bool kvm_pmc_has_64bit_overflow(struct kvm_pmc *pmc)
74 {
75 	u64 val = __vcpu_sys_reg(kvm_pmc_to_vcpu(pmc), PMCR_EL0);
76 
77 	return (pmc->idx < ARMV8_PMU_CYCLE_IDX && (val & ARMV8_PMU_PMCR_LP)) ||
78 	       (pmc->idx == ARMV8_PMU_CYCLE_IDX && (val & ARMV8_PMU_PMCR_LC));
79 }
80 
kvm_pmu_counter_can_chain(struct kvm_pmc * pmc)81 static bool kvm_pmu_counter_can_chain(struct kvm_pmc *pmc)
82 {
83 	return (!(pmc->idx & 1) && (pmc->idx + 1) < ARMV8_PMU_CYCLE_IDX &&
84 		!kvm_pmc_has_64bit_overflow(pmc));
85 }
86 
counter_index_to_reg(u64 idx)87 static u32 counter_index_to_reg(u64 idx)
88 {
89 	return (idx == ARMV8_PMU_CYCLE_IDX) ? PMCCNTR_EL0 : PMEVCNTR0_EL0 + idx;
90 }
91 
counter_index_to_evtreg(u64 idx)92 static u32 counter_index_to_evtreg(u64 idx)
93 {
94 	return (idx == ARMV8_PMU_CYCLE_IDX) ? PMCCFILTR_EL0 : PMEVTYPER0_EL0 + idx;
95 }
96 
kvm_pmu_get_pmc_value(struct kvm_pmc * pmc)97 static u64 kvm_pmu_get_pmc_value(struct kvm_pmc *pmc)
98 {
99 	struct kvm_vcpu *vcpu = kvm_pmc_to_vcpu(pmc);
100 	u64 counter, reg, enabled, running;
101 
102 	reg = counter_index_to_reg(pmc->idx);
103 	counter = __vcpu_sys_reg(vcpu, reg);
104 
105 	/*
106 	 * The real counter value is equal to the value of counter register plus
107 	 * the value perf event counts.
108 	 */
109 	if (pmc->perf_event)
110 		counter += perf_event_read_value(pmc->perf_event, &enabled,
111 						 &running);
112 
113 	if (!kvm_pmc_is_64bit(pmc))
114 		counter = lower_32_bits(counter);
115 
116 	return counter;
117 }
118 
119 /**
120  * kvm_pmu_get_counter_value - get PMU counter value
121  * @vcpu: The vcpu pointer
122  * @select_idx: The counter index
123  */
kvm_pmu_get_counter_value(struct kvm_vcpu * vcpu,u64 select_idx)124 u64 kvm_pmu_get_counter_value(struct kvm_vcpu *vcpu, u64 select_idx)
125 {
126 	if (!kvm_vcpu_has_pmu(vcpu))
127 		return 0;
128 
129 	return kvm_pmu_get_pmc_value(kvm_vcpu_idx_to_pmc(vcpu, select_idx));
130 }
131 
kvm_pmu_set_pmc_value(struct kvm_pmc * pmc,u64 val,bool force)132 static void kvm_pmu_set_pmc_value(struct kvm_pmc *pmc, u64 val, bool force)
133 {
134 	struct kvm_vcpu *vcpu = kvm_pmc_to_vcpu(pmc);
135 	u64 reg;
136 
137 	kvm_pmu_release_perf_event(pmc);
138 
139 	reg = counter_index_to_reg(pmc->idx);
140 
141 	if (vcpu_mode_is_32bit(vcpu) && pmc->idx != ARMV8_PMU_CYCLE_IDX &&
142 	    !force) {
143 		/*
144 		 * Even with PMUv3p5, AArch32 cannot write to the top
145 		 * 32bit of the counters. The only possible course of
146 		 * action is to use PMCR.P, which will reset them to
147 		 * 0 (the only use of the 'force' parameter).
148 		 */
149 		val  = __vcpu_sys_reg(vcpu, reg) & GENMASK(63, 32);
150 		val |= lower_32_bits(val);
151 	}
152 
153 	__vcpu_sys_reg(vcpu, reg) = val;
154 
155 	/* Recreate the perf event to reflect the updated sample_period */
156 	kvm_pmu_create_perf_event(pmc);
157 }
158 
159 /**
160  * kvm_pmu_set_counter_value - set PMU counter value
161  * @vcpu: The vcpu pointer
162  * @select_idx: The counter index
163  * @val: The counter value
164  */
kvm_pmu_set_counter_value(struct kvm_vcpu * vcpu,u64 select_idx,u64 val)165 void kvm_pmu_set_counter_value(struct kvm_vcpu *vcpu, u64 select_idx, u64 val)
166 {
167 	if (!kvm_vcpu_has_pmu(vcpu))
168 		return;
169 
170 	kvm_pmu_set_pmc_value(kvm_vcpu_idx_to_pmc(vcpu, select_idx), val, false);
171 }
172 
173 /**
174  * kvm_pmu_release_perf_event - remove the perf event
175  * @pmc: The PMU counter pointer
176  */
kvm_pmu_release_perf_event(struct kvm_pmc * pmc)177 static void kvm_pmu_release_perf_event(struct kvm_pmc *pmc)
178 {
179 	if (pmc->perf_event) {
180 		perf_event_disable(pmc->perf_event);
181 		perf_event_release_kernel(pmc->perf_event);
182 		pmc->perf_event = NULL;
183 	}
184 }
185 
186 /**
187  * kvm_pmu_stop_counter - stop PMU counter
188  * @pmc: The PMU counter pointer
189  *
190  * If this counter has been configured to monitor some event, release it here.
191  */
kvm_pmu_stop_counter(struct kvm_pmc * pmc)192 static void kvm_pmu_stop_counter(struct kvm_pmc *pmc)
193 {
194 	struct kvm_vcpu *vcpu = kvm_pmc_to_vcpu(pmc);
195 	u64 reg, val;
196 
197 	if (!pmc->perf_event)
198 		return;
199 
200 	val = kvm_pmu_get_pmc_value(pmc);
201 
202 	reg = counter_index_to_reg(pmc->idx);
203 
204 	__vcpu_sys_reg(vcpu, reg) = val;
205 
206 	kvm_pmu_release_perf_event(pmc);
207 }
208 
209 /**
210  * kvm_pmu_vcpu_init - assign pmu counter idx for cpu
211  * @vcpu: The vcpu pointer
212  *
213  */
kvm_pmu_vcpu_init(struct kvm_vcpu * vcpu)214 void kvm_pmu_vcpu_init(struct kvm_vcpu *vcpu)
215 {
216 	int i;
217 	struct kvm_pmu *pmu = &vcpu->arch.pmu;
218 
219 	for (i = 0; i < ARMV8_PMU_MAX_COUNTERS; i++)
220 		pmu->pmc[i].idx = i;
221 }
222 
223 /**
224  * kvm_pmu_vcpu_reset - reset pmu state for cpu
225  * @vcpu: The vcpu pointer
226  *
227  */
kvm_pmu_vcpu_reset(struct kvm_vcpu * vcpu)228 void kvm_pmu_vcpu_reset(struct kvm_vcpu *vcpu)
229 {
230 	unsigned long mask = kvm_pmu_valid_counter_mask(vcpu);
231 	int i;
232 
233 	for_each_set_bit(i, &mask, 32)
234 		kvm_pmu_stop_counter(kvm_vcpu_idx_to_pmc(vcpu, i));
235 }
236 
237 /**
238  * kvm_pmu_vcpu_destroy - free perf event of PMU for cpu
239  * @vcpu: The vcpu pointer
240  *
241  */
kvm_pmu_vcpu_destroy(struct kvm_vcpu * vcpu)242 void kvm_pmu_vcpu_destroy(struct kvm_vcpu *vcpu)
243 {
244 	int i;
245 
246 	for (i = 0; i < ARMV8_PMU_MAX_COUNTERS; i++)
247 		kvm_pmu_release_perf_event(kvm_vcpu_idx_to_pmc(vcpu, i));
248 	irq_work_sync(&vcpu->arch.pmu.overflow_work);
249 }
250 
kvm_pmu_valid_counter_mask(struct kvm_vcpu * vcpu)251 u64 kvm_pmu_valid_counter_mask(struct kvm_vcpu *vcpu)
252 {
253 	u64 val = __vcpu_sys_reg(vcpu, PMCR_EL0) >> ARMV8_PMU_PMCR_N_SHIFT;
254 
255 	val &= ARMV8_PMU_PMCR_N_MASK;
256 	if (val == 0)
257 		return BIT(ARMV8_PMU_CYCLE_IDX);
258 	else
259 		return GENMASK(val - 1, 0) | BIT(ARMV8_PMU_CYCLE_IDX);
260 }
261 
262 /**
263  * kvm_pmu_enable_counter_mask - enable selected PMU counters
264  * @vcpu: The vcpu pointer
265  * @val: the value guest writes to PMCNTENSET register
266  *
267  * Call perf_event_enable to start counting the perf event
268  */
kvm_pmu_enable_counter_mask(struct kvm_vcpu * vcpu,u64 val)269 void kvm_pmu_enable_counter_mask(struct kvm_vcpu *vcpu, u64 val)
270 {
271 	int i;
272 	if (!kvm_vcpu_has_pmu(vcpu))
273 		return;
274 
275 	if (!(__vcpu_sys_reg(vcpu, PMCR_EL0) & ARMV8_PMU_PMCR_E) || !val)
276 		return;
277 
278 	for (i = 0; i < ARMV8_PMU_MAX_COUNTERS; i++) {
279 		struct kvm_pmc *pmc;
280 
281 		if (!(val & BIT(i)))
282 			continue;
283 
284 		pmc = kvm_vcpu_idx_to_pmc(vcpu, i);
285 
286 		if (!pmc->perf_event) {
287 			kvm_pmu_create_perf_event(pmc);
288 		} else {
289 			perf_event_enable(pmc->perf_event);
290 			if (pmc->perf_event->state != PERF_EVENT_STATE_ACTIVE)
291 				kvm_debug("fail to enable perf event\n");
292 		}
293 	}
294 }
295 
296 /**
297  * kvm_pmu_disable_counter_mask - disable selected PMU counters
298  * @vcpu: The vcpu pointer
299  * @val: the value guest writes to PMCNTENCLR register
300  *
301  * Call perf_event_disable to stop counting the perf event
302  */
kvm_pmu_disable_counter_mask(struct kvm_vcpu * vcpu,u64 val)303 void kvm_pmu_disable_counter_mask(struct kvm_vcpu *vcpu, u64 val)
304 {
305 	int i;
306 
307 	if (!kvm_vcpu_has_pmu(vcpu) || !val)
308 		return;
309 
310 	for (i = 0; i < ARMV8_PMU_MAX_COUNTERS; i++) {
311 		struct kvm_pmc *pmc;
312 
313 		if (!(val & BIT(i)))
314 			continue;
315 
316 		pmc = kvm_vcpu_idx_to_pmc(vcpu, i);
317 
318 		if (pmc->perf_event)
319 			perf_event_disable(pmc->perf_event);
320 	}
321 }
322 
kvm_pmu_overflow_status(struct kvm_vcpu * vcpu)323 static u64 kvm_pmu_overflow_status(struct kvm_vcpu *vcpu)
324 {
325 	u64 reg = 0;
326 
327 	if ((__vcpu_sys_reg(vcpu, PMCR_EL0) & ARMV8_PMU_PMCR_E)) {
328 		reg = __vcpu_sys_reg(vcpu, PMOVSSET_EL0);
329 		reg &= __vcpu_sys_reg(vcpu, PMINTENSET_EL1);
330 	}
331 
332 	return reg;
333 }
334 
kvm_pmu_update_state(struct kvm_vcpu * vcpu)335 static void kvm_pmu_update_state(struct kvm_vcpu *vcpu)
336 {
337 	struct kvm_pmu *pmu = &vcpu->arch.pmu;
338 	bool overflow;
339 
340 	if (!kvm_vcpu_has_pmu(vcpu))
341 		return;
342 
343 	overflow = !!kvm_pmu_overflow_status(vcpu);
344 	if (pmu->irq_level == overflow)
345 		return;
346 
347 	pmu->irq_level = overflow;
348 
349 	if (likely(irqchip_in_kernel(vcpu->kvm))) {
350 		int ret = kvm_vgic_inject_irq(vcpu->kvm, vcpu->vcpu_id,
351 					      pmu->irq_num, overflow, pmu);
352 		WARN_ON(ret);
353 	}
354 }
355 
kvm_pmu_should_notify_user(struct kvm_vcpu * vcpu)356 bool kvm_pmu_should_notify_user(struct kvm_vcpu *vcpu)
357 {
358 	struct kvm_pmu *pmu = &vcpu->arch.pmu;
359 	struct kvm_sync_regs *sregs = &vcpu->run->s.regs;
360 	bool run_level = sregs->device_irq_level & KVM_ARM_DEV_PMU;
361 
362 	if (likely(irqchip_in_kernel(vcpu->kvm)))
363 		return false;
364 
365 	return pmu->irq_level != run_level;
366 }
367 
368 /*
369  * Reflect the PMU overflow interrupt output level into the kvm_run structure
370  */
kvm_pmu_update_run(struct kvm_vcpu * vcpu)371 void kvm_pmu_update_run(struct kvm_vcpu *vcpu)
372 {
373 	struct kvm_sync_regs *regs = &vcpu->run->s.regs;
374 
375 	/* Populate the timer bitmap for user space */
376 	regs->device_irq_level &= ~KVM_ARM_DEV_PMU;
377 	if (vcpu->arch.pmu.irq_level)
378 		regs->device_irq_level |= KVM_ARM_DEV_PMU;
379 }
380 
381 /**
382  * kvm_pmu_flush_hwstate - flush pmu state to cpu
383  * @vcpu: The vcpu pointer
384  *
385  * Check if the PMU has overflowed while we were running in the host, and inject
386  * an interrupt if that was the case.
387  */
kvm_pmu_flush_hwstate(struct kvm_vcpu * vcpu)388 void kvm_pmu_flush_hwstate(struct kvm_vcpu *vcpu)
389 {
390 	kvm_pmu_update_state(vcpu);
391 }
392 
393 /**
394  * kvm_pmu_sync_hwstate - sync pmu state from cpu
395  * @vcpu: The vcpu pointer
396  *
397  * Check if the PMU has overflowed while we were running in the guest, and
398  * inject an interrupt if that was the case.
399  */
kvm_pmu_sync_hwstate(struct kvm_vcpu * vcpu)400 void kvm_pmu_sync_hwstate(struct kvm_vcpu *vcpu)
401 {
402 	kvm_pmu_update_state(vcpu);
403 }
404 
405 /**
406  * When perf interrupt is an NMI, we cannot safely notify the vcpu corresponding
407  * to the event.
408  * This is why we need a callback to do it once outside of the NMI context.
409  */
kvm_pmu_perf_overflow_notify_vcpu(struct irq_work * work)410 static void kvm_pmu_perf_overflow_notify_vcpu(struct irq_work *work)
411 {
412 	struct kvm_vcpu *vcpu;
413 
414 	vcpu = container_of(work, struct kvm_vcpu, arch.pmu.overflow_work);
415 	kvm_vcpu_kick(vcpu);
416 }
417 
418 /*
419  * Perform an increment on any of the counters described in @mask,
420  * generating the overflow if required, and propagate it as a chained
421  * event if possible.
422  */
kvm_pmu_counter_increment(struct kvm_vcpu * vcpu,unsigned long mask,u32 event)423 static void kvm_pmu_counter_increment(struct kvm_vcpu *vcpu,
424 				      unsigned long mask, u32 event)
425 {
426 	int i;
427 
428 	if (!(__vcpu_sys_reg(vcpu, PMCR_EL0) & ARMV8_PMU_PMCR_E))
429 		return;
430 
431 	/* Weed out disabled counters */
432 	mask &= __vcpu_sys_reg(vcpu, PMCNTENSET_EL0);
433 
434 	for_each_set_bit(i, &mask, ARMV8_PMU_CYCLE_IDX) {
435 		struct kvm_pmc *pmc = kvm_vcpu_idx_to_pmc(vcpu, i);
436 		u64 type, reg;
437 
438 		/* Filter on event type */
439 		type = __vcpu_sys_reg(vcpu, counter_index_to_evtreg(i));
440 		type &= kvm_pmu_event_mask(vcpu->kvm);
441 		if (type != event)
442 			continue;
443 
444 		/* Increment this counter */
445 		reg = __vcpu_sys_reg(vcpu, counter_index_to_reg(i)) + 1;
446 		if (!kvm_pmc_is_64bit(pmc))
447 			reg = lower_32_bits(reg);
448 		__vcpu_sys_reg(vcpu, counter_index_to_reg(i)) = reg;
449 
450 		/* No overflow? move on */
451 		if (kvm_pmc_has_64bit_overflow(pmc) ? reg : lower_32_bits(reg))
452 			continue;
453 
454 		/* Mark overflow */
455 		__vcpu_sys_reg(vcpu, PMOVSSET_EL0) |= BIT(i);
456 
457 		if (kvm_pmu_counter_can_chain(pmc))
458 			kvm_pmu_counter_increment(vcpu, BIT(i + 1),
459 						  ARMV8_PMUV3_PERFCTR_CHAIN);
460 	}
461 }
462 
463 /* Compute the sample period for a given counter value */
compute_period(struct kvm_pmc * pmc,u64 counter)464 static u64 compute_period(struct kvm_pmc *pmc, u64 counter)
465 {
466 	u64 val;
467 
468 	if (kvm_pmc_is_64bit(pmc) && kvm_pmc_has_64bit_overflow(pmc))
469 		val = (-counter) & GENMASK(63, 0);
470 	else
471 		val = (-counter) & GENMASK(31, 0);
472 
473 	return val;
474 }
475 
476 /**
477  * When the perf event overflows, set the overflow status and inform the vcpu.
478  */
kvm_pmu_perf_overflow(struct perf_event * perf_event,struct perf_sample_data * data,struct pt_regs * regs)479 static void kvm_pmu_perf_overflow(struct perf_event *perf_event,
480 				  struct perf_sample_data *data,
481 				  struct pt_regs *regs)
482 {
483 	struct kvm_pmc *pmc = perf_event->overflow_handler_context;
484 	struct arm_pmu *cpu_pmu = to_arm_pmu(perf_event->pmu);
485 	struct kvm_vcpu *vcpu = kvm_pmc_to_vcpu(pmc);
486 	int idx = pmc->idx;
487 	u64 period;
488 
489 	cpu_pmu->pmu.stop(perf_event, PERF_EF_UPDATE);
490 
491 	/*
492 	 * Reset the sample period to the architectural limit,
493 	 * i.e. the point where the counter overflows.
494 	 */
495 	period = compute_period(pmc, local64_read(&perf_event->count));
496 
497 	local64_set(&perf_event->hw.period_left, 0);
498 	perf_event->attr.sample_period = period;
499 	perf_event->hw.sample_period = period;
500 
501 	__vcpu_sys_reg(vcpu, PMOVSSET_EL0) |= BIT(idx);
502 
503 	if (kvm_pmu_counter_can_chain(pmc))
504 		kvm_pmu_counter_increment(vcpu, BIT(idx + 1),
505 					  ARMV8_PMUV3_PERFCTR_CHAIN);
506 
507 	if (kvm_pmu_overflow_status(vcpu)) {
508 		kvm_make_request(KVM_REQ_IRQ_PENDING, vcpu);
509 
510 		if (!in_nmi())
511 			kvm_vcpu_kick(vcpu);
512 		else
513 			irq_work_queue(&vcpu->arch.pmu.overflow_work);
514 	}
515 
516 	cpu_pmu->pmu.start(perf_event, PERF_EF_RELOAD);
517 }
518 
519 /**
520  * kvm_pmu_software_increment - do software increment
521  * @vcpu: The vcpu pointer
522  * @val: the value guest writes to PMSWINC register
523  */
kvm_pmu_software_increment(struct kvm_vcpu * vcpu,u64 val)524 void kvm_pmu_software_increment(struct kvm_vcpu *vcpu, u64 val)
525 {
526 	kvm_pmu_counter_increment(vcpu, val, ARMV8_PMUV3_PERFCTR_SW_INCR);
527 }
528 
529 /**
530  * kvm_pmu_handle_pmcr - handle PMCR register
531  * @vcpu: The vcpu pointer
532  * @val: the value guest writes to PMCR register
533  */
kvm_pmu_handle_pmcr(struct kvm_vcpu * vcpu,u64 val)534 void kvm_pmu_handle_pmcr(struct kvm_vcpu *vcpu, u64 val)
535 {
536 	int i;
537 
538 	if (!kvm_vcpu_has_pmu(vcpu))
539 		return;
540 
541 	/* Fixup PMCR_EL0 to reconcile the PMU version and the LP bit */
542 	if (!kvm_pmu_is_3p5(vcpu))
543 		val &= ~ARMV8_PMU_PMCR_LP;
544 
545 	/* The reset bits don't indicate any state, and shouldn't be saved. */
546 	__vcpu_sys_reg(vcpu, PMCR_EL0) = val & ~(ARMV8_PMU_PMCR_C | ARMV8_PMU_PMCR_P);
547 
548 	if (val & ARMV8_PMU_PMCR_E) {
549 		kvm_pmu_enable_counter_mask(vcpu,
550 		       __vcpu_sys_reg(vcpu, PMCNTENSET_EL0));
551 	} else {
552 		kvm_pmu_disable_counter_mask(vcpu,
553 		       __vcpu_sys_reg(vcpu, PMCNTENSET_EL0));
554 	}
555 
556 	if (val & ARMV8_PMU_PMCR_C)
557 		kvm_pmu_set_counter_value(vcpu, ARMV8_PMU_CYCLE_IDX, 0);
558 
559 	if (val & ARMV8_PMU_PMCR_P) {
560 		unsigned long mask = kvm_pmu_valid_counter_mask(vcpu);
561 		mask &= ~BIT(ARMV8_PMU_CYCLE_IDX);
562 		for_each_set_bit(i, &mask, 32)
563 			kvm_pmu_set_pmc_value(kvm_vcpu_idx_to_pmc(vcpu, i), 0, true);
564 	}
565 	kvm_vcpu_pmu_restore_guest(vcpu);
566 }
567 
kvm_pmu_counter_is_enabled(struct kvm_pmc * pmc)568 static bool kvm_pmu_counter_is_enabled(struct kvm_pmc *pmc)
569 {
570 	struct kvm_vcpu *vcpu = kvm_pmc_to_vcpu(pmc);
571 	return (__vcpu_sys_reg(vcpu, PMCR_EL0) & ARMV8_PMU_PMCR_E) &&
572 	       (__vcpu_sys_reg(vcpu, PMCNTENSET_EL0) & BIT(pmc->idx));
573 }
574 
575 /**
576  * kvm_pmu_create_perf_event - create a perf event for a counter
577  * @pmc: Counter context
578  */
kvm_pmu_create_perf_event(struct kvm_pmc * pmc)579 static void kvm_pmu_create_perf_event(struct kvm_pmc *pmc)
580 {
581 	struct kvm_vcpu *vcpu = kvm_pmc_to_vcpu(pmc);
582 	struct arm_pmu *arm_pmu = vcpu->kvm->arch.arm_pmu;
583 	struct perf_event *event;
584 	struct perf_event_attr attr;
585 	u64 eventsel, reg, data;
586 
587 	reg = counter_index_to_evtreg(pmc->idx);
588 	data = __vcpu_sys_reg(vcpu, reg);
589 
590 	kvm_pmu_stop_counter(pmc);
591 	if (pmc->idx == ARMV8_PMU_CYCLE_IDX)
592 		eventsel = ARMV8_PMUV3_PERFCTR_CPU_CYCLES;
593 	else
594 		eventsel = data & kvm_pmu_event_mask(vcpu->kvm);
595 
596 	/*
597 	 * Neither SW increment nor chained events need to be backed
598 	 * by a perf event.
599 	 */
600 	if (eventsel == ARMV8_PMUV3_PERFCTR_SW_INCR ||
601 	    eventsel == ARMV8_PMUV3_PERFCTR_CHAIN)
602 		return;
603 
604 	/*
605 	 * If we have a filter in place and that the event isn't allowed, do
606 	 * not install a perf event either.
607 	 */
608 	if (vcpu->kvm->arch.pmu_filter &&
609 	    !test_bit(eventsel, vcpu->kvm->arch.pmu_filter))
610 		return;
611 
612 	memset(&attr, 0, sizeof(struct perf_event_attr));
613 	attr.type = arm_pmu->pmu.type;
614 	attr.size = sizeof(attr);
615 	attr.pinned = 1;
616 	attr.disabled = !kvm_pmu_counter_is_enabled(pmc);
617 	attr.exclude_user = data & ARMV8_PMU_EXCLUDE_EL0 ? 1 : 0;
618 	attr.exclude_kernel = data & ARMV8_PMU_EXCLUDE_EL1 ? 1 : 0;
619 	attr.exclude_hv = 1; /* Don't count EL2 events */
620 	attr.exclude_host = 1; /* Don't count host events */
621 	attr.config = eventsel;
622 
623 	/*
624 	 * If counting with a 64bit counter, advertise it to the perf
625 	 * code, carefully dealing with the initial sample period
626 	 * which also depends on the overflow.
627 	 */
628 	if (kvm_pmc_is_64bit(pmc))
629 		attr.config1 |= PERF_ATTR_CFG1_COUNTER_64BIT;
630 
631 	attr.sample_period = compute_period(pmc, kvm_pmu_get_pmc_value(pmc));
632 
633 	event = perf_event_create_kernel_counter(&attr, -1, current,
634 						 kvm_pmu_perf_overflow, pmc);
635 
636 	if (IS_ERR(event)) {
637 		pr_err_once("kvm: pmu event creation failed %ld\n",
638 			    PTR_ERR(event));
639 		return;
640 	}
641 
642 	pmc->perf_event = event;
643 }
644 
645 /**
646  * kvm_pmu_set_counter_event_type - set selected counter to monitor some event
647  * @vcpu: The vcpu pointer
648  * @data: The data guest writes to PMXEVTYPER_EL0
649  * @select_idx: The number of selected counter
650  *
651  * When OS accesses PMXEVTYPER_EL0, that means it wants to set a PMC to count an
652  * event with given hardware event number. Here we call perf_event API to
653  * emulate this action and create a kernel perf event for it.
654  */
kvm_pmu_set_counter_event_type(struct kvm_vcpu * vcpu,u64 data,u64 select_idx)655 void kvm_pmu_set_counter_event_type(struct kvm_vcpu *vcpu, u64 data,
656 				    u64 select_idx)
657 {
658 	struct kvm_pmc *pmc = kvm_vcpu_idx_to_pmc(vcpu, select_idx);
659 	u64 reg, mask;
660 
661 	if (!kvm_vcpu_has_pmu(vcpu))
662 		return;
663 
664 	mask  =  ARMV8_PMU_EVTYPE_MASK;
665 	mask &= ~ARMV8_PMU_EVTYPE_EVENT;
666 	mask |= kvm_pmu_event_mask(vcpu->kvm);
667 
668 	reg = counter_index_to_evtreg(pmc->idx);
669 
670 	__vcpu_sys_reg(vcpu, reg) = data & mask;
671 
672 	kvm_pmu_create_perf_event(pmc);
673 }
674 
kvm_host_pmu_init(struct arm_pmu * pmu)675 void kvm_host_pmu_init(struct arm_pmu *pmu)
676 {
677 	struct arm_pmu_entry *entry;
678 
679 	/*
680 	 * Check the sanitised PMU version for the system, as KVM does not
681 	 * support implementations where PMUv3 exists on a subset of CPUs.
682 	 */
683 	if (!pmuv3_implemented(kvm_arm_pmu_get_pmuver_limit()))
684 		return;
685 
686 	mutex_lock(&arm_pmus_lock);
687 
688 	entry = kmalloc(sizeof(*entry), GFP_KERNEL);
689 	if (!entry)
690 		goto out_unlock;
691 
692 	entry->arm_pmu = pmu;
693 	list_add_tail(&entry->entry, &arm_pmus);
694 
695 	if (list_is_singular(&arm_pmus))
696 		static_branch_enable(&kvm_arm_pmu_available);
697 
698 out_unlock:
699 	mutex_unlock(&arm_pmus_lock);
700 }
701 
kvm_pmu_probe_armpmu(void)702 static struct arm_pmu *kvm_pmu_probe_armpmu(void)
703 {
704 	struct arm_pmu *tmp, *pmu = NULL;
705 	struct arm_pmu_entry *entry;
706 	int cpu;
707 
708 	mutex_lock(&arm_pmus_lock);
709 
710 	/*
711 	 * It is safe to use a stale cpu to iterate the list of PMUs so long as
712 	 * the same value is used for the entirety of the loop. Given this, and
713 	 * the fact that no percpu data is used for the lookup there is no need
714 	 * to disable preemption.
715 	 *
716 	 * It is still necessary to get a valid cpu, though, to probe for the
717 	 * default PMU instance as userspace is not required to specify a PMU
718 	 * type. In order to uphold the preexisting behavior KVM selects the
719 	 * PMU instance for the core where the first call to the
720 	 * KVM_ARM_VCPU_PMU_V3_CTRL attribute group occurs. A dependent use case
721 	 * would be a user with disdain of all things big.LITTLE that affines
722 	 * the VMM to a particular cluster of cores.
723 	 *
724 	 * In any case, userspace should just do the sane thing and use the UAPI
725 	 * to select a PMU type directly. But, be wary of the baggage being
726 	 * carried here.
727 	 */
728 	cpu = raw_smp_processor_id();
729 	list_for_each_entry(entry, &arm_pmus, entry) {
730 		tmp = entry->arm_pmu;
731 
732 		if (cpumask_test_cpu(cpu, &tmp->supported_cpus)) {
733 			pmu = tmp;
734 			break;
735 		}
736 	}
737 
738 	mutex_unlock(&arm_pmus_lock);
739 
740 	return pmu;
741 }
742 
kvm_pmu_get_pmceid(struct kvm_vcpu * vcpu,bool pmceid1)743 u64 kvm_pmu_get_pmceid(struct kvm_vcpu *vcpu, bool pmceid1)
744 {
745 	unsigned long *bmap = vcpu->kvm->arch.pmu_filter;
746 	u64 val, mask = 0;
747 	int base, i, nr_events;
748 
749 	if (!kvm_vcpu_has_pmu(vcpu))
750 		return 0;
751 
752 	if (!pmceid1) {
753 		val = read_sysreg(pmceid0_el0);
754 		/* always support CHAIN */
755 		val |= BIT(ARMV8_PMUV3_PERFCTR_CHAIN);
756 		base = 0;
757 	} else {
758 		val = read_sysreg(pmceid1_el0);
759 		/*
760 		 * Don't advertise STALL_SLOT*, as PMMIR_EL0 is handled
761 		 * as RAZ
762 		 */
763 		val &= ~(BIT_ULL(ARMV8_PMUV3_PERFCTR_STALL_SLOT - 32) |
764 			 BIT_ULL(ARMV8_PMUV3_PERFCTR_STALL_SLOT_FRONTEND - 32) |
765 			 BIT_ULL(ARMV8_PMUV3_PERFCTR_STALL_SLOT_BACKEND - 32));
766 		base = 32;
767 	}
768 
769 	if (!bmap)
770 		return val;
771 
772 	nr_events = kvm_pmu_event_mask(vcpu->kvm) + 1;
773 
774 	for (i = 0; i < 32; i += 8) {
775 		u64 byte;
776 
777 		byte = bitmap_get_value8(bmap, base + i);
778 		mask |= byte << i;
779 		if (nr_events >= (0x4000 + base + 32)) {
780 			byte = bitmap_get_value8(bmap, 0x4000 + base + i);
781 			mask |= byte << (32 + i);
782 		}
783 	}
784 
785 	return val & mask;
786 }
787 
kvm_arm_pmu_v3_enable(struct kvm_vcpu * vcpu)788 int kvm_arm_pmu_v3_enable(struct kvm_vcpu *vcpu)
789 {
790 	if (!kvm_vcpu_has_pmu(vcpu))
791 		return 0;
792 
793 	if (!vcpu->arch.pmu.created)
794 		return -EINVAL;
795 
796 	/*
797 	 * A valid interrupt configuration for the PMU is either to have a
798 	 * properly configured interrupt number and using an in-kernel
799 	 * irqchip, or to not have an in-kernel GIC and not set an IRQ.
800 	 */
801 	if (irqchip_in_kernel(vcpu->kvm)) {
802 		int irq = vcpu->arch.pmu.irq_num;
803 		/*
804 		 * If we are using an in-kernel vgic, at this point we know
805 		 * the vgic will be initialized, so we can check the PMU irq
806 		 * number against the dimensions of the vgic and make sure
807 		 * it's valid.
808 		 */
809 		if (!irq_is_ppi(irq) && !vgic_valid_spi(vcpu->kvm, irq))
810 			return -EINVAL;
811 	} else if (kvm_arm_pmu_irq_initialized(vcpu)) {
812 		   return -EINVAL;
813 	}
814 
815 	/* One-off reload of the PMU on first run */
816 	kvm_make_request(KVM_REQ_RELOAD_PMU, vcpu);
817 
818 	return 0;
819 }
820 
kvm_arm_pmu_v3_init(struct kvm_vcpu * vcpu)821 static int kvm_arm_pmu_v3_init(struct kvm_vcpu *vcpu)
822 {
823 	if (irqchip_in_kernel(vcpu->kvm)) {
824 		int ret;
825 
826 		/*
827 		 * If using the PMU with an in-kernel virtual GIC
828 		 * implementation, we require the GIC to be already
829 		 * initialized when initializing the PMU.
830 		 */
831 		if (!vgic_initialized(vcpu->kvm))
832 			return -ENODEV;
833 
834 		if (!kvm_arm_pmu_irq_initialized(vcpu))
835 			return -ENXIO;
836 
837 		ret = kvm_vgic_set_owner(vcpu, vcpu->arch.pmu.irq_num,
838 					 &vcpu->arch.pmu);
839 		if (ret)
840 			return ret;
841 	}
842 
843 	init_irq_work(&vcpu->arch.pmu.overflow_work,
844 		      kvm_pmu_perf_overflow_notify_vcpu);
845 
846 	vcpu->arch.pmu.created = true;
847 	return 0;
848 }
849 
850 /*
851  * For one VM the interrupt type must be same for each vcpu.
852  * As a PPI, the interrupt number is the same for all vcpus,
853  * while as an SPI it must be a separate number per vcpu.
854  */
pmu_irq_is_valid(struct kvm * kvm,int irq)855 static bool pmu_irq_is_valid(struct kvm *kvm, int irq)
856 {
857 	unsigned long i;
858 	struct kvm_vcpu *vcpu;
859 
860 	kvm_for_each_vcpu(i, vcpu, kvm) {
861 		if (!kvm_arm_pmu_irq_initialized(vcpu))
862 			continue;
863 
864 		if (irq_is_ppi(irq)) {
865 			if (vcpu->arch.pmu.irq_num != irq)
866 				return false;
867 		} else {
868 			if (vcpu->arch.pmu.irq_num == irq)
869 				return false;
870 		}
871 	}
872 
873 	return true;
874 }
875 
kvm_arm_pmu_v3_set_pmu(struct kvm_vcpu * vcpu,int pmu_id)876 static int kvm_arm_pmu_v3_set_pmu(struct kvm_vcpu *vcpu, int pmu_id)
877 {
878 	struct kvm *kvm = vcpu->kvm;
879 	struct arm_pmu_entry *entry;
880 	struct arm_pmu *arm_pmu;
881 	int ret = -ENXIO;
882 
883 	lockdep_assert_held(&kvm->arch.config_lock);
884 	mutex_lock(&arm_pmus_lock);
885 
886 	list_for_each_entry(entry, &arm_pmus, entry) {
887 		arm_pmu = entry->arm_pmu;
888 		if (arm_pmu->pmu.type == pmu_id) {
889 			if (kvm_vm_has_ran_once(kvm) ||
890 			    (kvm->arch.pmu_filter && kvm->arch.arm_pmu != arm_pmu)) {
891 				ret = -EBUSY;
892 				break;
893 			}
894 
895 			kvm->arch.arm_pmu = arm_pmu;
896 			cpumask_copy(kvm->arch.supported_cpus, &arm_pmu->supported_cpus);
897 			ret = 0;
898 			break;
899 		}
900 	}
901 
902 	mutex_unlock(&arm_pmus_lock);
903 	return ret;
904 }
905 
kvm_arm_pmu_v3_set_attr(struct kvm_vcpu * vcpu,struct kvm_device_attr * attr)906 int kvm_arm_pmu_v3_set_attr(struct kvm_vcpu *vcpu, struct kvm_device_attr *attr)
907 {
908 	struct kvm *kvm = vcpu->kvm;
909 
910 	lockdep_assert_held(&kvm->arch.config_lock);
911 
912 	if (!kvm_vcpu_has_pmu(vcpu))
913 		return -ENODEV;
914 
915 	if (vcpu->arch.pmu.created)
916 		return -EBUSY;
917 
918 	if (!kvm->arch.arm_pmu) {
919 		/*
920 		 * No PMU set, get the default one.
921 		 *
922 		 * The observant among you will notice that the supported_cpus
923 		 * mask does not get updated for the default PMU even though it
924 		 * is quite possible the selected instance supports only a
925 		 * subset of cores in the system. This is intentional, and
926 		 * upholds the preexisting behavior on heterogeneous systems
927 		 * where vCPUs can be scheduled on any core but the guest
928 		 * counters could stop working.
929 		 */
930 		kvm->arch.arm_pmu = kvm_pmu_probe_armpmu();
931 		if (!kvm->arch.arm_pmu)
932 			return -ENODEV;
933 	}
934 
935 	switch (attr->attr) {
936 	case KVM_ARM_VCPU_PMU_V3_IRQ: {
937 		int __user *uaddr = (int __user *)(long)attr->addr;
938 		int irq;
939 
940 		if (!irqchip_in_kernel(kvm))
941 			return -EINVAL;
942 
943 		if (get_user(irq, uaddr))
944 			return -EFAULT;
945 
946 		/* The PMU overflow interrupt can be a PPI or a valid SPI. */
947 		if (!(irq_is_ppi(irq) || irq_is_spi(irq)))
948 			return -EINVAL;
949 
950 		if (!pmu_irq_is_valid(kvm, irq))
951 			return -EINVAL;
952 
953 		if (kvm_arm_pmu_irq_initialized(vcpu))
954 			return -EBUSY;
955 
956 		kvm_debug("Set kvm ARM PMU irq: %d\n", irq);
957 		vcpu->arch.pmu.irq_num = irq;
958 		return 0;
959 	}
960 	case KVM_ARM_VCPU_PMU_V3_FILTER: {
961 		u8 pmuver = kvm_arm_pmu_get_pmuver_limit();
962 		struct kvm_pmu_event_filter __user *uaddr;
963 		struct kvm_pmu_event_filter filter;
964 		int nr_events;
965 
966 		/*
967 		 * Allow userspace to specify an event filter for the entire
968 		 * event range supported by PMUVer of the hardware, rather
969 		 * than the guest's PMUVer for KVM backward compatibility.
970 		 */
971 		nr_events = __kvm_pmu_event_mask(pmuver) + 1;
972 
973 		uaddr = (struct kvm_pmu_event_filter __user *)(long)attr->addr;
974 
975 		if (copy_from_user(&filter, uaddr, sizeof(filter)))
976 			return -EFAULT;
977 
978 		if (((u32)filter.base_event + filter.nevents) > nr_events ||
979 		    (filter.action != KVM_PMU_EVENT_ALLOW &&
980 		     filter.action != KVM_PMU_EVENT_DENY))
981 			return -EINVAL;
982 
983 		if (kvm_vm_has_ran_once(kvm))
984 			return -EBUSY;
985 
986 		if (!kvm->arch.pmu_filter) {
987 			kvm->arch.pmu_filter = bitmap_alloc(nr_events, GFP_KERNEL_ACCOUNT);
988 			if (!kvm->arch.pmu_filter)
989 				return -ENOMEM;
990 
991 			/*
992 			 * The default depends on the first applied filter.
993 			 * If it allows events, the default is to deny.
994 			 * Conversely, if the first filter denies a set of
995 			 * events, the default is to allow.
996 			 */
997 			if (filter.action == KVM_PMU_EVENT_ALLOW)
998 				bitmap_zero(kvm->arch.pmu_filter, nr_events);
999 			else
1000 				bitmap_fill(kvm->arch.pmu_filter, nr_events);
1001 		}
1002 
1003 		if (filter.action == KVM_PMU_EVENT_ALLOW)
1004 			bitmap_set(kvm->arch.pmu_filter, filter.base_event, filter.nevents);
1005 		else
1006 			bitmap_clear(kvm->arch.pmu_filter, filter.base_event, filter.nevents);
1007 
1008 		return 0;
1009 	}
1010 	case KVM_ARM_VCPU_PMU_V3_SET_PMU: {
1011 		int __user *uaddr = (int __user *)(long)attr->addr;
1012 		int pmu_id;
1013 
1014 		if (get_user(pmu_id, uaddr))
1015 			return -EFAULT;
1016 
1017 		return kvm_arm_pmu_v3_set_pmu(vcpu, pmu_id);
1018 	}
1019 	case KVM_ARM_VCPU_PMU_V3_INIT:
1020 		return kvm_arm_pmu_v3_init(vcpu);
1021 	}
1022 
1023 	return -ENXIO;
1024 }
1025 
kvm_arm_pmu_v3_get_attr(struct kvm_vcpu * vcpu,struct kvm_device_attr * attr)1026 int kvm_arm_pmu_v3_get_attr(struct kvm_vcpu *vcpu, struct kvm_device_attr *attr)
1027 {
1028 	switch (attr->attr) {
1029 	case KVM_ARM_VCPU_PMU_V3_IRQ: {
1030 		int __user *uaddr = (int __user *)(long)attr->addr;
1031 		int irq;
1032 
1033 		if (!irqchip_in_kernel(vcpu->kvm))
1034 			return -EINVAL;
1035 
1036 		if (!kvm_vcpu_has_pmu(vcpu))
1037 			return -ENODEV;
1038 
1039 		if (!kvm_arm_pmu_irq_initialized(vcpu))
1040 			return -ENXIO;
1041 
1042 		irq = vcpu->arch.pmu.irq_num;
1043 		return put_user(irq, uaddr);
1044 	}
1045 	}
1046 
1047 	return -ENXIO;
1048 }
1049 
kvm_arm_pmu_v3_has_attr(struct kvm_vcpu * vcpu,struct kvm_device_attr * attr)1050 int kvm_arm_pmu_v3_has_attr(struct kvm_vcpu *vcpu, struct kvm_device_attr *attr)
1051 {
1052 	switch (attr->attr) {
1053 	case KVM_ARM_VCPU_PMU_V3_IRQ:
1054 	case KVM_ARM_VCPU_PMU_V3_INIT:
1055 	case KVM_ARM_VCPU_PMU_V3_FILTER:
1056 	case KVM_ARM_VCPU_PMU_V3_SET_PMU:
1057 		if (kvm_vcpu_has_pmu(vcpu))
1058 			return 0;
1059 	}
1060 
1061 	return -ENXIO;
1062 }
1063 
kvm_arm_pmu_get_pmuver_limit(void)1064 u8 kvm_arm_pmu_get_pmuver_limit(void)
1065 {
1066 	u64 tmp;
1067 
1068 	tmp = read_sanitised_ftr_reg(SYS_ID_AA64DFR0_EL1);
1069 	tmp = cpuid_feature_cap_perfmon_field(tmp,
1070 					      ID_AA64DFR0_EL1_PMUVer_SHIFT,
1071 					      ID_AA64DFR0_EL1_PMUVer_V3P5);
1072 	return FIELD_GET(ARM64_FEATURE_MASK(ID_AA64DFR0_EL1_PMUVer), tmp);
1073 }
1074