1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Copyright (C) 2015 Linaro Ltd.
4 * Author: Shannon Zhao <shannon.zhao@linaro.org>
5 */
6
7 #include <linux/cpu.h>
8 #include <linux/kvm.h>
9 #include <linux/kvm_host.h>
10 #include <linux/list.h>
11 #include <linux/perf_event.h>
12 #include <linux/perf/arm_pmu.h>
13 #include <linux/uaccess.h>
14 #include <asm/kvm_emulate.h>
15 #include <kvm/arm_pmu.h>
16 #include <kvm/arm_vgic.h>
17 #include <asm/arm_pmuv3.h>
18
19 #define PERF_ATTR_CFG1_COUNTER_64BIT BIT(0)
20
21 DEFINE_STATIC_KEY_FALSE(kvm_arm_pmu_available);
22
23 static LIST_HEAD(arm_pmus);
24 static DEFINE_MUTEX(arm_pmus_lock);
25
26 static void kvm_pmu_create_perf_event(struct kvm_pmc *pmc);
27 static void kvm_pmu_release_perf_event(struct kvm_pmc *pmc);
28
kvm_pmc_to_vcpu(const struct kvm_pmc * pmc)29 static struct kvm_vcpu *kvm_pmc_to_vcpu(const struct kvm_pmc *pmc)
30 {
31 return container_of(pmc, struct kvm_vcpu, arch.pmu.pmc[pmc->idx]);
32 }
33
kvm_vcpu_idx_to_pmc(struct kvm_vcpu * vcpu,int cnt_idx)34 static struct kvm_pmc *kvm_vcpu_idx_to_pmc(struct kvm_vcpu *vcpu, int cnt_idx)
35 {
36 return &vcpu->arch.pmu.pmc[cnt_idx];
37 }
38
__kvm_pmu_event_mask(unsigned int pmuver)39 static u32 __kvm_pmu_event_mask(unsigned int pmuver)
40 {
41 switch (pmuver) {
42 case ID_AA64DFR0_EL1_PMUVer_IMP:
43 return GENMASK(9, 0);
44 case ID_AA64DFR0_EL1_PMUVer_V3P1:
45 case ID_AA64DFR0_EL1_PMUVer_V3P4:
46 case ID_AA64DFR0_EL1_PMUVer_V3P5:
47 case ID_AA64DFR0_EL1_PMUVer_V3P7:
48 return GENMASK(15, 0);
49 default: /* Shouldn't be here, just for sanity */
50 WARN_ONCE(1, "Unknown PMU version %d\n", pmuver);
51 return 0;
52 }
53 }
54
kvm_pmu_event_mask(struct kvm * kvm)55 static u32 kvm_pmu_event_mask(struct kvm *kvm)
56 {
57 u64 dfr0 = IDREG(kvm, SYS_ID_AA64DFR0_EL1);
58 u8 pmuver = SYS_FIELD_GET(ID_AA64DFR0_EL1, PMUVer, dfr0);
59
60 return __kvm_pmu_event_mask(pmuver);
61 }
62
63 /**
64 * kvm_pmc_is_64bit - determine if counter is 64bit
65 * @pmc: counter context
66 */
kvm_pmc_is_64bit(struct kvm_pmc * pmc)67 static bool kvm_pmc_is_64bit(struct kvm_pmc *pmc)
68 {
69 return (pmc->idx == ARMV8_PMU_CYCLE_IDX ||
70 kvm_pmu_is_3p5(kvm_pmc_to_vcpu(pmc)));
71 }
72
kvm_pmc_has_64bit_overflow(struct kvm_pmc * pmc)73 static bool kvm_pmc_has_64bit_overflow(struct kvm_pmc *pmc)
74 {
75 u64 val = __vcpu_sys_reg(kvm_pmc_to_vcpu(pmc), PMCR_EL0);
76
77 return (pmc->idx < ARMV8_PMU_CYCLE_IDX && (val & ARMV8_PMU_PMCR_LP)) ||
78 (pmc->idx == ARMV8_PMU_CYCLE_IDX && (val & ARMV8_PMU_PMCR_LC));
79 }
80
kvm_pmu_counter_can_chain(struct kvm_pmc * pmc)81 static bool kvm_pmu_counter_can_chain(struct kvm_pmc *pmc)
82 {
83 return (!(pmc->idx & 1) && (pmc->idx + 1) < ARMV8_PMU_CYCLE_IDX &&
84 !kvm_pmc_has_64bit_overflow(pmc));
85 }
86
counter_index_to_reg(u64 idx)87 static u32 counter_index_to_reg(u64 idx)
88 {
89 return (idx == ARMV8_PMU_CYCLE_IDX) ? PMCCNTR_EL0 : PMEVCNTR0_EL0 + idx;
90 }
91
counter_index_to_evtreg(u64 idx)92 static u32 counter_index_to_evtreg(u64 idx)
93 {
94 return (idx == ARMV8_PMU_CYCLE_IDX) ? PMCCFILTR_EL0 : PMEVTYPER0_EL0 + idx;
95 }
96
kvm_pmu_get_pmc_value(struct kvm_pmc * pmc)97 static u64 kvm_pmu_get_pmc_value(struct kvm_pmc *pmc)
98 {
99 struct kvm_vcpu *vcpu = kvm_pmc_to_vcpu(pmc);
100 u64 counter, reg, enabled, running;
101
102 reg = counter_index_to_reg(pmc->idx);
103 counter = __vcpu_sys_reg(vcpu, reg);
104
105 /*
106 * The real counter value is equal to the value of counter register plus
107 * the value perf event counts.
108 */
109 if (pmc->perf_event)
110 counter += perf_event_read_value(pmc->perf_event, &enabled,
111 &running);
112
113 if (!kvm_pmc_is_64bit(pmc))
114 counter = lower_32_bits(counter);
115
116 return counter;
117 }
118
119 /**
120 * kvm_pmu_get_counter_value - get PMU counter value
121 * @vcpu: The vcpu pointer
122 * @select_idx: The counter index
123 */
kvm_pmu_get_counter_value(struct kvm_vcpu * vcpu,u64 select_idx)124 u64 kvm_pmu_get_counter_value(struct kvm_vcpu *vcpu, u64 select_idx)
125 {
126 if (!kvm_vcpu_has_pmu(vcpu))
127 return 0;
128
129 return kvm_pmu_get_pmc_value(kvm_vcpu_idx_to_pmc(vcpu, select_idx));
130 }
131
kvm_pmu_set_pmc_value(struct kvm_pmc * pmc,u64 val,bool force)132 static void kvm_pmu_set_pmc_value(struct kvm_pmc *pmc, u64 val, bool force)
133 {
134 struct kvm_vcpu *vcpu = kvm_pmc_to_vcpu(pmc);
135 u64 reg;
136
137 kvm_pmu_release_perf_event(pmc);
138
139 reg = counter_index_to_reg(pmc->idx);
140
141 if (vcpu_mode_is_32bit(vcpu) && pmc->idx != ARMV8_PMU_CYCLE_IDX &&
142 !force) {
143 /*
144 * Even with PMUv3p5, AArch32 cannot write to the top
145 * 32bit of the counters. The only possible course of
146 * action is to use PMCR.P, which will reset them to
147 * 0 (the only use of the 'force' parameter).
148 */
149 val = __vcpu_sys_reg(vcpu, reg) & GENMASK(63, 32);
150 val |= lower_32_bits(val);
151 }
152
153 __vcpu_sys_reg(vcpu, reg) = val;
154
155 /* Recreate the perf event to reflect the updated sample_period */
156 kvm_pmu_create_perf_event(pmc);
157 }
158
159 /**
160 * kvm_pmu_set_counter_value - set PMU counter value
161 * @vcpu: The vcpu pointer
162 * @select_idx: The counter index
163 * @val: The counter value
164 */
kvm_pmu_set_counter_value(struct kvm_vcpu * vcpu,u64 select_idx,u64 val)165 void kvm_pmu_set_counter_value(struct kvm_vcpu *vcpu, u64 select_idx, u64 val)
166 {
167 if (!kvm_vcpu_has_pmu(vcpu))
168 return;
169
170 kvm_pmu_set_pmc_value(kvm_vcpu_idx_to_pmc(vcpu, select_idx), val, false);
171 }
172
173 /**
174 * kvm_pmu_release_perf_event - remove the perf event
175 * @pmc: The PMU counter pointer
176 */
kvm_pmu_release_perf_event(struct kvm_pmc * pmc)177 static void kvm_pmu_release_perf_event(struct kvm_pmc *pmc)
178 {
179 if (pmc->perf_event) {
180 perf_event_disable(pmc->perf_event);
181 perf_event_release_kernel(pmc->perf_event);
182 pmc->perf_event = NULL;
183 }
184 }
185
186 /**
187 * kvm_pmu_stop_counter - stop PMU counter
188 * @pmc: The PMU counter pointer
189 *
190 * If this counter has been configured to monitor some event, release it here.
191 */
kvm_pmu_stop_counter(struct kvm_pmc * pmc)192 static void kvm_pmu_stop_counter(struct kvm_pmc *pmc)
193 {
194 struct kvm_vcpu *vcpu = kvm_pmc_to_vcpu(pmc);
195 u64 reg, val;
196
197 if (!pmc->perf_event)
198 return;
199
200 val = kvm_pmu_get_pmc_value(pmc);
201
202 reg = counter_index_to_reg(pmc->idx);
203
204 __vcpu_sys_reg(vcpu, reg) = val;
205
206 kvm_pmu_release_perf_event(pmc);
207 }
208
209 /**
210 * kvm_pmu_vcpu_init - assign pmu counter idx for cpu
211 * @vcpu: The vcpu pointer
212 *
213 */
kvm_pmu_vcpu_init(struct kvm_vcpu * vcpu)214 void kvm_pmu_vcpu_init(struct kvm_vcpu *vcpu)
215 {
216 int i;
217 struct kvm_pmu *pmu = &vcpu->arch.pmu;
218
219 for (i = 0; i < ARMV8_PMU_MAX_COUNTERS; i++)
220 pmu->pmc[i].idx = i;
221 }
222
223 /**
224 * kvm_pmu_vcpu_reset - reset pmu state for cpu
225 * @vcpu: The vcpu pointer
226 *
227 */
kvm_pmu_vcpu_reset(struct kvm_vcpu * vcpu)228 void kvm_pmu_vcpu_reset(struct kvm_vcpu *vcpu)
229 {
230 unsigned long mask = kvm_pmu_valid_counter_mask(vcpu);
231 int i;
232
233 for_each_set_bit(i, &mask, 32)
234 kvm_pmu_stop_counter(kvm_vcpu_idx_to_pmc(vcpu, i));
235 }
236
237 /**
238 * kvm_pmu_vcpu_destroy - free perf event of PMU for cpu
239 * @vcpu: The vcpu pointer
240 *
241 */
kvm_pmu_vcpu_destroy(struct kvm_vcpu * vcpu)242 void kvm_pmu_vcpu_destroy(struct kvm_vcpu *vcpu)
243 {
244 int i;
245
246 for (i = 0; i < ARMV8_PMU_MAX_COUNTERS; i++)
247 kvm_pmu_release_perf_event(kvm_vcpu_idx_to_pmc(vcpu, i));
248 irq_work_sync(&vcpu->arch.pmu.overflow_work);
249 }
250
kvm_pmu_valid_counter_mask(struct kvm_vcpu * vcpu)251 u64 kvm_pmu_valid_counter_mask(struct kvm_vcpu *vcpu)
252 {
253 u64 val = __vcpu_sys_reg(vcpu, PMCR_EL0) >> ARMV8_PMU_PMCR_N_SHIFT;
254
255 val &= ARMV8_PMU_PMCR_N_MASK;
256 if (val == 0)
257 return BIT(ARMV8_PMU_CYCLE_IDX);
258 else
259 return GENMASK(val - 1, 0) | BIT(ARMV8_PMU_CYCLE_IDX);
260 }
261
262 /**
263 * kvm_pmu_enable_counter_mask - enable selected PMU counters
264 * @vcpu: The vcpu pointer
265 * @val: the value guest writes to PMCNTENSET register
266 *
267 * Call perf_event_enable to start counting the perf event
268 */
kvm_pmu_enable_counter_mask(struct kvm_vcpu * vcpu,u64 val)269 void kvm_pmu_enable_counter_mask(struct kvm_vcpu *vcpu, u64 val)
270 {
271 int i;
272 if (!kvm_vcpu_has_pmu(vcpu))
273 return;
274
275 if (!(__vcpu_sys_reg(vcpu, PMCR_EL0) & ARMV8_PMU_PMCR_E) || !val)
276 return;
277
278 for (i = 0; i < ARMV8_PMU_MAX_COUNTERS; i++) {
279 struct kvm_pmc *pmc;
280
281 if (!(val & BIT(i)))
282 continue;
283
284 pmc = kvm_vcpu_idx_to_pmc(vcpu, i);
285
286 if (!pmc->perf_event) {
287 kvm_pmu_create_perf_event(pmc);
288 } else {
289 perf_event_enable(pmc->perf_event);
290 if (pmc->perf_event->state != PERF_EVENT_STATE_ACTIVE)
291 kvm_debug("fail to enable perf event\n");
292 }
293 }
294 }
295
296 /**
297 * kvm_pmu_disable_counter_mask - disable selected PMU counters
298 * @vcpu: The vcpu pointer
299 * @val: the value guest writes to PMCNTENCLR register
300 *
301 * Call perf_event_disable to stop counting the perf event
302 */
kvm_pmu_disable_counter_mask(struct kvm_vcpu * vcpu,u64 val)303 void kvm_pmu_disable_counter_mask(struct kvm_vcpu *vcpu, u64 val)
304 {
305 int i;
306
307 if (!kvm_vcpu_has_pmu(vcpu) || !val)
308 return;
309
310 for (i = 0; i < ARMV8_PMU_MAX_COUNTERS; i++) {
311 struct kvm_pmc *pmc;
312
313 if (!(val & BIT(i)))
314 continue;
315
316 pmc = kvm_vcpu_idx_to_pmc(vcpu, i);
317
318 if (pmc->perf_event)
319 perf_event_disable(pmc->perf_event);
320 }
321 }
322
kvm_pmu_overflow_status(struct kvm_vcpu * vcpu)323 static u64 kvm_pmu_overflow_status(struct kvm_vcpu *vcpu)
324 {
325 u64 reg = 0;
326
327 if ((__vcpu_sys_reg(vcpu, PMCR_EL0) & ARMV8_PMU_PMCR_E)) {
328 reg = __vcpu_sys_reg(vcpu, PMOVSSET_EL0);
329 reg &= __vcpu_sys_reg(vcpu, PMCNTENSET_EL0);
330 reg &= __vcpu_sys_reg(vcpu, PMINTENSET_EL1);
331 }
332
333 return reg;
334 }
335
kvm_pmu_update_state(struct kvm_vcpu * vcpu)336 static void kvm_pmu_update_state(struct kvm_vcpu *vcpu)
337 {
338 struct kvm_pmu *pmu = &vcpu->arch.pmu;
339 bool overflow;
340
341 if (!kvm_vcpu_has_pmu(vcpu))
342 return;
343
344 overflow = !!kvm_pmu_overflow_status(vcpu);
345 if (pmu->irq_level == overflow)
346 return;
347
348 pmu->irq_level = overflow;
349
350 if (likely(irqchip_in_kernel(vcpu->kvm))) {
351 int ret = kvm_vgic_inject_irq(vcpu->kvm, vcpu->vcpu_id,
352 pmu->irq_num, overflow, pmu);
353 WARN_ON(ret);
354 }
355 }
356
kvm_pmu_should_notify_user(struct kvm_vcpu * vcpu)357 bool kvm_pmu_should_notify_user(struct kvm_vcpu *vcpu)
358 {
359 struct kvm_pmu *pmu = &vcpu->arch.pmu;
360 struct kvm_sync_regs *sregs = &vcpu->run->s.regs;
361 bool run_level = sregs->device_irq_level & KVM_ARM_DEV_PMU;
362
363 if (likely(irqchip_in_kernel(vcpu->kvm)))
364 return false;
365
366 return pmu->irq_level != run_level;
367 }
368
369 /*
370 * Reflect the PMU overflow interrupt output level into the kvm_run structure
371 */
kvm_pmu_update_run(struct kvm_vcpu * vcpu)372 void kvm_pmu_update_run(struct kvm_vcpu *vcpu)
373 {
374 struct kvm_sync_regs *regs = &vcpu->run->s.regs;
375
376 /* Populate the timer bitmap for user space */
377 regs->device_irq_level &= ~KVM_ARM_DEV_PMU;
378 if (vcpu->arch.pmu.irq_level)
379 regs->device_irq_level |= KVM_ARM_DEV_PMU;
380 }
381
382 /**
383 * kvm_pmu_flush_hwstate - flush pmu state to cpu
384 * @vcpu: The vcpu pointer
385 *
386 * Check if the PMU has overflowed while we were running in the host, and inject
387 * an interrupt if that was the case.
388 */
kvm_pmu_flush_hwstate(struct kvm_vcpu * vcpu)389 void kvm_pmu_flush_hwstate(struct kvm_vcpu *vcpu)
390 {
391 kvm_pmu_update_state(vcpu);
392 }
393
394 /**
395 * kvm_pmu_sync_hwstate - sync pmu state from cpu
396 * @vcpu: The vcpu pointer
397 *
398 * Check if the PMU has overflowed while we were running in the guest, and
399 * inject an interrupt if that was the case.
400 */
kvm_pmu_sync_hwstate(struct kvm_vcpu * vcpu)401 void kvm_pmu_sync_hwstate(struct kvm_vcpu *vcpu)
402 {
403 kvm_pmu_update_state(vcpu);
404 }
405
406 /**
407 * When perf interrupt is an NMI, we cannot safely notify the vcpu corresponding
408 * to the event.
409 * This is why we need a callback to do it once outside of the NMI context.
410 */
kvm_pmu_perf_overflow_notify_vcpu(struct irq_work * work)411 static void kvm_pmu_perf_overflow_notify_vcpu(struct irq_work *work)
412 {
413 struct kvm_vcpu *vcpu;
414
415 vcpu = container_of(work, struct kvm_vcpu, arch.pmu.overflow_work);
416 kvm_vcpu_kick(vcpu);
417 }
418
419 /*
420 * Perform an increment on any of the counters described in @mask,
421 * generating the overflow if required, and propagate it as a chained
422 * event if possible.
423 */
kvm_pmu_counter_increment(struct kvm_vcpu * vcpu,unsigned long mask,u32 event)424 static void kvm_pmu_counter_increment(struct kvm_vcpu *vcpu,
425 unsigned long mask, u32 event)
426 {
427 int i;
428
429 if (!(__vcpu_sys_reg(vcpu, PMCR_EL0) & ARMV8_PMU_PMCR_E))
430 return;
431
432 /* Weed out disabled counters */
433 mask &= __vcpu_sys_reg(vcpu, PMCNTENSET_EL0);
434
435 for_each_set_bit(i, &mask, ARMV8_PMU_CYCLE_IDX) {
436 struct kvm_pmc *pmc = kvm_vcpu_idx_to_pmc(vcpu, i);
437 u64 type, reg;
438
439 /* Filter on event type */
440 type = __vcpu_sys_reg(vcpu, counter_index_to_evtreg(i));
441 type &= kvm_pmu_event_mask(vcpu->kvm);
442 if (type != event)
443 continue;
444
445 /* Increment this counter */
446 reg = __vcpu_sys_reg(vcpu, counter_index_to_reg(i)) + 1;
447 if (!kvm_pmc_is_64bit(pmc))
448 reg = lower_32_bits(reg);
449 __vcpu_sys_reg(vcpu, counter_index_to_reg(i)) = reg;
450
451 /* No overflow? move on */
452 if (kvm_pmc_has_64bit_overflow(pmc) ? reg : lower_32_bits(reg))
453 continue;
454
455 /* Mark overflow */
456 __vcpu_sys_reg(vcpu, PMOVSSET_EL0) |= BIT(i);
457
458 if (kvm_pmu_counter_can_chain(pmc))
459 kvm_pmu_counter_increment(vcpu, BIT(i + 1),
460 ARMV8_PMUV3_PERFCTR_CHAIN);
461 }
462 }
463
464 /* Compute the sample period for a given counter value */
compute_period(struct kvm_pmc * pmc,u64 counter)465 static u64 compute_period(struct kvm_pmc *pmc, u64 counter)
466 {
467 u64 val;
468
469 if (kvm_pmc_is_64bit(pmc) && kvm_pmc_has_64bit_overflow(pmc))
470 val = (-counter) & GENMASK(63, 0);
471 else
472 val = (-counter) & GENMASK(31, 0);
473
474 return val;
475 }
476
477 /**
478 * When the perf event overflows, set the overflow status and inform the vcpu.
479 */
kvm_pmu_perf_overflow(struct perf_event * perf_event,struct perf_sample_data * data,struct pt_regs * regs)480 static void kvm_pmu_perf_overflow(struct perf_event *perf_event,
481 struct perf_sample_data *data,
482 struct pt_regs *regs)
483 {
484 struct kvm_pmc *pmc = perf_event->overflow_handler_context;
485 struct arm_pmu *cpu_pmu = to_arm_pmu(perf_event->pmu);
486 struct kvm_vcpu *vcpu = kvm_pmc_to_vcpu(pmc);
487 int idx = pmc->idx;
488 u64 period;
489
490 cpu_pmu->pmu.stop(perf_event, PERF_EF_UPDATE);
491
492 /*
493 * Reset the sample period to the architectural limit,
494 * i.e. the point where the counter overflows.
495 */
496 period = compute_period(pmc, local64_read(&perf_event->count));
497
498 local64_set(&perf_event->hw.period_left, 0);
499 perf_event->attr.sample_period = period;
500 perf_event->hw.sample_period = period;
501
502 __vcpu_sys_reg(vcpu, PMOVSSET_EL0) |= BIT(idx);
503
504 if (kvm_pmu_counter_can_chain(pmc))
505 kvm_pmu_counter_increment(vcpu, BIT(idx + 1),
506 ARMV8_PMUV3_PERFCTR_CHAIN);
507
508 if (kvm_pmu_overflow_status(vcpu)) {
509 kvm_make_request(KVM_REQ_IRQ_PENDING, vcpu);
510
511 if (!in_nmi())
512 kvm_vcpu_kick(vcpu);
513 else
514 irq_work_queue(&vcpu->arch.pmu.overflow_work);
515 }
516
517 cpu_pmu->pmu.start(perf_event, PERF_EF_RELOAD);
518 }
519
520 /**
521 * kvm_pmu_software_increment - do software increment
522 * @vcpu: The vcpu pointer
523 * @val: the value guest writes to PMSWINC register
524 */
kvm_pmu_software_increment(struct kvm_vcpu * vcpu,u64 val)525 void kvm_pmu_software_increment(struct kvm_vcpu *vcpu, u64 val)
526 {
527 kvm_pmu_counter_increment(vcpu, val, ARMV8_PMUV3_PERFCTR_SW_INCR);
528 }
529
530 /**
531 * kvm_pmu_handle_pmcr - handle PMCR register
532 * @vcpu: The vcpu pointer
533 * @val: the value guest writes to PMCR register
534 */
kvm_pmu_handle_pmcr(struct kvm_vcpu * vcpu,u64 val)535 void kvm_pmu_handle_pmcr(struct kvm_vcpu *vcpu, u64 val)
536 {
537 int i;
538
539 if (!kvm_vcpu_has_pmu(vcpu))
540 return;
541
542 /* Fixup PMCR_EL0 to reconcile the PMU version and the LP bit */
543 if (!kvm_pmu_is_3p5(vcpu))
544 val &= ~ARMV8_PMU_PMCR_LP;
545
546 /* The reset bits don't indicate any state, and shouldn't be saved. */
547 __vcpu_sys_reg(vcpu, PMCR_EL0) = val & ~(ARMV8_PMU_PMCR_C | ARMV8_PMU_PMCR_P);
548
549 if (val & ARMV8_PMU_PMCR_E) {
550 kvm_pmu_enable_counter_mask(vcpu,
551 __vcpu_sys_reg(vcpu, PMCNTENSET_EL0));
552 } else {
553 kvm_pmu_disable_counter_mask(vcpu,
554 __vcpu_sys_reg(vcpu, PMCNTENSET_EL0));
555 }
556
557 if (val & ARMV8_PMU_PMCR_C)
558 kvm_pmu_set_counter_value(vcpu, ARMV8_PMU_CYCLE_IDX, 0);
559
560 if (val & ARMV8_PMU_PMCR_P) {
561 unsigned long mask = kvm_pmu_valid_counter_mask(vcpu);
562 mask &= ~BIT(ARMV8_PMU_CYCLE_IDX);
563 for_each_set_bit(i, &mask, 32)
564 kvm_pmu_set_pmc_value(kvm_vcpu_idx_to_pmc(vcpu, i), 0, true);
565 }
566 kvm_vcpu_pmu_restore_guest(vcpu);
567 }
568
kvm_pmu_counter_is_enabled(struct kvm_pmc * pmc)569 static bool kvm_pmu_counter_is_enabled(struct kvm_pmc *pmc)
570 {
571 struct kvm_vcpu *vcpu = kvm_pmc_to_vcpu(pmc);
572 return (__vcpu_sys_reg(vcpu, PMCR_EL0) & ARMV8_PMU_PMCR_E) &&
573 (__vcpu_sys_reg(vcpu, PMCNTENSET_EL0) & BIT(pmc->idx));
574 }
575
576 /**
577 * kvm_pmu_create_perf_event - create a perf event for a counter
578 * @pmc: Counter context
579 */
kvm_pmu_create_perf_event(struct kvm_pmc * pmc)580 static void kvm_pmu_create_perf_event(struct kvm_pmc *pmc)
581 {
582 struct kvm_vcpu *vcpu = kvm_pmc_to_vcpu(pmc);
583 struct arm_pmu *arm_pmu = vcpu->kvm->arch.arm_pmu;
584 struct perf_event *event;
585 struct perf_event_attr attr;
586 u64 eventsel, reg, data;
587
588 reg = counter_index_to_evtreg(pmc->idx);
589 data = __vcpu_sys_reg(vcpu, reg);
590
591 kvm_pmu_stop_counter(pmc);
592 if (pmc->idx == ARMV8_PMU_CYCLE_IDX)
593 eventsel = ARMV8_PMUV3_PERFCTR_CPU_CYCLES;
594 else
595 eventsel = data & kvm_pmu_event_mask(vcpu->kvm);
596
597 /*
598 * Neither SW increment nor chained events need to be backed
599 * by a perf event.
600 */
601 if (eventsel == ARMV8_PMUV3_PERFCTR_SW_INCR ||
602 eventsel == ARMV8_PMUV3_PERFCTR_CHAIN)
603 return;
604
605 /*
606 * If we have a filter in place and that the event isn't allowed, do
607 * not install a perf event either.
608 */
609 if (vcpu->kvm->arch.pmu_filter &&
610 !test_bit(eventsel, vcpu->kvm->arch.pmu_filter))
611 return;
612
613 memset(&attr, 0, sizeof(struct perf_event_attr));
614 attr.type = arm_pmu->pmu.type;
615 attr.size = sizeof(attr);
616 attr.pinned = 1;
617 attr.disabled = !kvm_pmu_counter_is_enabled(pmc);
618 attr.exclude_user = data & ARMV8_PMU_EXCLUDE_EL0 ? 1 : 0;
619 attr.exclude_kernel = data & ARMV8_PMU_EXCLUDE_EL1 ? 1 : 0;
620 attr.exclude_hv = 1; /* Don't count EL2 events */
621 attr.exclude_host = 1; /* Don't count host events */
622 attr.config = eventsel;
623
624 /*
625 * If counting with a 64bit counter, advertise it to the perf
626 * code, carefully dealing with the initial sample period
627 * which also depends on the overflow.
628 */
629 if (kvm_pmc_is_64bit(pmc))
630 attr.config1 |= PERF_ATTR_CFG1_COUNTER_64BIT;
631
632 attr.sample_period = compute_period(pmc, kvm_pmu_get_pmc_value(pmc));
633
634 event = perf_event_create_kernel_counter(&attr, -1, current,
635 kvm_pmu_perf_overflow, pmc);
636
637 if (IS_ERR(event)) {
638 pr_err_once("kvm: pmu event creation failed %ld\n",
639 PTR_ERR(event));
640 return;
641 }
642
643 pmc->perf_event = event;
644 }
645
646 /**
647 * kvm_pmu_set_counter_event_type - set selected counter to monitor some event
648 * @vcpu: The vcpu pointer
649 * @data: The data guest writes to PMXEVTYPER_EL0
650 * @select_idx: The number of selected counter
651 *
652 * When OS accesses PMXEVTYPER_EL0, that means it wants to set a PMC to count an
653 * event with given hardware event number. Here we call perf_event API to
654 * emulate this action and create a kernel perf event for it.
655 */
kvm_pmu_set_counter_event_type(struct kvm_vcpu * vcpu,u64 data,u64 select_idx)656 void kvm_pmu_set_counter_event_type(struct kvm_vcpu *vcpu, u64 data,
657 u64 select_idx)
658 {
659 struct kvm_pmc *pmc = kvm_vcpu_idx_to_pmc(vcpu, select_idx);
660 u64 reg, mask;
661
662 if (!kvm_vcpu_has_pmu(vcpu))
663 return;
664
665 mask = ARMV8_PMU_EVTYPE_MASK;
666 mask &= ~ARMV8_PMU_EVTYPE_EVENT;
667 mask |= kvm_pmu_event_mask(vcpu->kvm);
668
669 reg = counter_index_to_evtreg(pmc->idx);
670
671 __vcpu_sys_reg(vcpu, reg) = data & mask;
672
673 kvm_pmu_create_perf_event(pmc);
674 }
675
kvm_host_pmu_init(struct arm_pmu * pmu)676 void kvm_host_pmu_init(struct arm_pmu *pmu)
677 {
678 struct arm_pmu_entry *entry;
679
680 /*
681 * Check the sanitised PMU version for the system, as KVM does not
682 * support implementations where PMUv3 exists on a subset of CPUs.
683 */
684 if (!pmuv3_implemented(kvm_arm_pmu_get_pmuver_limit()))
685 return;
686
687 mutex_lock(&arm_pmus_lock);
688
689 entry = kmalloc(sizeof(*entry), GFP_KERNEL);
690 if (!entry)
691 goto out_unlock;
692
693 entry->arm_pmu = pmu;
694 list_add_tail(&entry->entry, &arm_pmus);
695
696 if (list_is_singular(&arm_pmus))
697 static_branch_enable(&kvm_arm_pmu_available);
698
699 out_unlock:
700 mutex_unlock(&arm_pmus_lock);
701 }
702
kvm_pmu_probe_armpmu(void)703 static struct arm_pmu *kvm_pmu_probe_armpmu(void)
704 {
705 struct arm_pmu *tmp, *pmu = NULL;
706 struct arm_pmu_entry *entry;
707 int cpu;
708
709 mutex_lock(&arm_pmus_lock);
710
711 /*
712 * It is safe to use a stale cpu to iterate the list of PMUs so long as
713 * the same value is used for the entirety of the loop. Given this, and
714 * the fact that no percpu data is used for the lookup there is no need
715 * to disable preemption.
716 *
717 * It is still necessary to get a valid cpu, though, to probe for the
718 * default PMU instance as userspace is not required to specify a PMU
719 * type. In order to uphold the preexisting behavior KVM selects the
720 * PMU instance for the core where the first call to the
721 * KVM_ARM_VCPU_PMU_V3_CTRL attribute group occurs. A dependent use case
722 * would be a user with disdain of all things big.LITTLE that affines
723 * the VMM to a particular cluster of cores.
724 *
725 * In any case, userspace should just do the sane thing and use the UAPI
726 * to select a PMU type directly. But, be wary of the baggage being
727 * carried here.
728 */
729 cpu = raw_smp_processor_id();
730 list_for_each_entry(entry, &arm_pmus, entry) {
731 tmp = entry->arm_pmu;
732
733 if (cpumask_test_cpu(cpu, &tmp->supported_cpus)) {
734 pmu = tmp;
735 break;
736 }
737 }
738
739 mutex_unlock(&arm_pmus_lock);
740
741 return pmu;
742 }
743
kvm_pmu_get_pmceid(struct kvm_vcpu * vcpu,bool pmceid1)744 u64 kvm_pmu_get_pmceid(struct kvm_vcpu *vcpu, bool pmceid1)
745 {
746 unsigned long *bmap = vcpu->kvm->arch.pmu_filter;
747 u64 val, mask = 0;
748 int base, i, nr_events;
749
750 if (!kvm_vcpu_has_pmu(vcpu))
751 return 0;
752
753 if (!pmceid1) {
754 val = read_sysreg(pmceid0_el0);
755 /* always support CHAIN */
756 val |= BIT(ARMV8_PMUV3_PERFCTR_CHAIN);
757 base = 0;
758 } else {
759 val = read_sysreg(pmceid1_el0);
760 /*
761 * Don't advertise STALL_SLOT*, as PMMIR_EL0 is handled
762 * as RAZ
763 */
764 val &= ~(BIT_ULL(ARMV8_PMUV3_PERFCTR_STALL_SLOT - 32) |
765 BIT_ULL(ARMV8_PMUV3_PERFCTR_STALL_SLOT_FRONTEND - 32) |
766 BIT_ULL(ARMV8_PMUV3_PERFCTR_STALL_SLOT_BACKEND - 32));
767 base = 32;
768 }
769
770 if (!bmap)
771 return val;
772
773 nr_events = kvm_pmu_event_mask(vcpu->kvm) + 1;
774
775 for (i = 0; i < 32; i += 8) {
776 u64 byte;
777
778 byte = bitmap_get_value8(bmap, base + i);
779 mask |= byte << i;
780 if (nr_events >= (0x4000 + base + 32)) {
781 byte = bitmap_get_value8(bmap, 0x4000 + base + i);
782 mask |= byte << (32 + i);
783 }
784 }
785
786 return val & mask;
787 }
788
kvm_arm_pmu_v3_enable(struct kvm_vcpu * vcpu)789 int kvm_arm_pmu_v3_enable(struct kvm_vcpu *vcpu)
790 {
791 if (!kvm_vcpu_has_pmu(vcpu))
792 return 0;
793
794 if (!vcpu->arch.pmu.created)
795 return -EINVAL;
796
797 /*
798 * A valid interrupt configuration for the PMU is either to have a
799 * properly configured interrupt number and using an in-kernel
800 * irqchip, or to not have an in-kernel GIC and not set an IRQ.
801 */
802 if (irqchip_in_kernel(vcpu->kvm)) {
803 int irq = vcpu->arch.pmu.irq_num;
804 /*
805 * If we are using an in-kernel vgic, at this point we know
806 * the vgic will be initialized, so we can check the PMU irq
807 * number against the dimensions of the vgic and make sure
808 * it's valid.
809 */
810 if (!irq_is_ppi(irq) && !vgic_valid_spi(vcpu->kvm, irq))
811 return -EINVAL;
812 } else if (kvm_arm_pmu_irq_initialized(vcpu)) {
813 return -EINVAL;
814 }
815
816 /* One-off reload of the PMU on first run */
817 kvm_make_request(KVM_REQ_RELOAD_PMU, vcpu);
818
819 return 0;
820 }
821
kvm_arm_pmu_v3_init(struct kvm_vcpu * vcpu)822 static int kvm_arm_pmu_v3_init(struct kvm_vcpu *vcpu)
823 {
824 if (irqchip_in_kernel(vcpu->kvm)) {
825 int ret;
826
827 /*
828 * If using the PMU with an in-kernel virtual GIC
829 * implementation, we require the GIC to be already
830 * initialized when initializing the PMU.
831 */
832 if (!vgic_initialized(vcpu->kvm))
833 return -ENODEV;
834
835 if (!kvm_arm_pmu_irq_initialized(vcpu))
836 return -ENXIO;
837
838 ret = kvm_vgic_set_owner(vcpu, vcpu->arch.pmu.irq_num,
839 &vcpu->arch.pmu);
840 if (ret)
841 return ret;
842 }
843
844 init_irq_work(&vcpu->arch.pmu.overflow_work,
845 kvm_pmu_perf_overflow_notify_vcpu);
846
847 vcpu->arch.pmu.created = true;
848 return 0;
849 }
850
851 /*
852 * For one VM the interrupt type must be same for each vcpu.
853 * As a PPI, the interrupt number is the same for all vcpus,
854 * while as an SPI it must be a separate number per vcpu.
855 */
pmu_irq_is_valid(struct kvm * kvm,int irq)856 static bool pmu_irq_is_valid(struct kvm *kvm, int irq)
857 {
858 unsigned long i;
859 struct kvm_vcpu *vcpu;
860
861 kvm_for_each_vcpu(i, vcpu, kvm) {
862 if (!kvm_arm_pmu_irq_initialized(vcpu))
863 continue;
864
865 if (irq_is_ppi(irq)) {
866 if (vcpu->arch.pmu.irq_num != irq)
867 return false;
868 } else {
869 if (vcpu->arch.pmu.irq_num == irq)
870 return false;
871 }
872 }
873
874 return true;
875 }
876
kvm_arm_pmu_v3_set_pmu(struct kvm_vcpu * vcpu,int pmu_id)877 static int kvm_arm_pmu_v3_set_pmu(struct kvm_vcpu *vcpu, int pmu_id)
878 {
879 struct kvm *kvm = vcpu->kvm;
880 struct arm_pmu_entry *entry;
881 struct arm_pmu *arm_pmu;
882 int ret = -ENXIO;
883
884 lockdep_assert_held(&kvm->arch.config_lock);
885 mutex_lock(&arm_pmus_lock);
886
887 list_for_each_entry(entry, &arm_pmus, entry) {
888 arm_pmu = entry->arm_pmu;
889 if (arm_pmu->pmu.type == pmu_id) {
890 if (kvm_vm_has_ran_once(kvm) ||
891 (kvm->arch.pmu_filter && kvm->arch.arm_pmu != arm_pmu)) {
892 ret = -EBUSY;
893 break;
894 }
895
896 kvm->arch.arm_pmu = arm_pmu;
897 cpumask_copy(kvm->arch.supported_cpus, &arm_pmu->supported_cpus);
898 ret = 0;
899 break;
900 }
901 }
902
903 mutex_unlock(&arm_pmus_lock);
904 return ret;
905 }
906
kvm_arm_pmu_v3_set_attr(struct kvm_vcpu * vcpu,struct kvm_device_attr * attr)907 int kvm_arm_pmu_v3_set_attr(struct kvm_vcpu *vcpu, struct kvm_device_attr *attr)
908 {
909 struct kvm *kvm = vcpu->kvm;
910
911 lockdep_assert_held(&kvm->arch.config_lock);
912
913 if (!kvm_vcpu_has_pmu(vcpu))
914 return -ENODEV;
915
916 if (vcpu->arch.pmu.created)
917 return -EBUSY;
918
919 if (!kvm->arch.arm_pmu) {
920 /*
921 * No PMU set, get the default one.
922 *
923 * The observant among you will notice that the supported_cpus
924 * mask does not get updated for the default PMU even though it
925 * is quite possible the selected instance supports only a
926 * subset of cores in the system. This is intentional, and
927 * upholds the preexisting behavior on heterogeneous systems
928 * where vCPUs can be scheduled on any core but the guest
929 * counters could stop working.
930 */
931 kvm->arch.arm_pmu = kvm_pmu_probe_armpmu();
932 if (!kvm->arch.arm_pmu)
933 return -ENODEV;
934 }
935
936 switch (attr->attr) {
937 case KVM_ARM_VCPU_PMU_V3_IRQ: {
938 int __user *uaddr = (int __user *)(long)attr->addr;
939 int irq;
940
941 if (!irqchip_in_kernel(kvm))
942 return -EINVAL;
943
944 if (get_user(irq, uaddr))
945 return -EFAULT;
946
947 /* The PMU overflow interrupt can be a PPI or a valid SPI. */
948 if (!(irq_is_ppi(irq) || irq_is_spi(irq)))
949 return -EINVAL;
950
951 if (!pmu_irq_is_valid(kvm, irq))
952 return -EINVAL;
953
954 if (kvm_arm_pmu_irq_initialized(vcpu))
955 return -EBUSY;
956
957 kvm_debug("Set kvm ARM PMU irq: %d\n", irq);
958 vcpu->arch.pmu.irq_num = irq;
959 return 0;
960 }
961 case KVM_ARM_VCPU_PMU_V3_FILTER: {
962 u8 pmuver = kvm_arm_pmu_get_pmuver_limit();
963 struct kvm_pmu_event_filter __user *uaddr;
964 struct kvm_pmu_event_filter filter;
965 int nr_events;
966
967 /*
968 * Allow userspace to specify an event filter for the entire
969 * event range supported by PMUVer of the hardware, rather
970 * than the guest's PMUVer for KVM backward compatibility.
971 */
972 nr_events = __kvm_pmu_event_mask(pmuver) + 1;
973
974 uaddr = (struct kvm_pmu_event_filter __user *)(long)attr->addr;
975
976 if (copy_from_user(&filter, uaddr, sizeof(filter)))
977 return -EFAULT;
978
979 if (((u32)filter.base_event + filter.nevents) > nr_events ||
980 (filter.action != KVM_PMU_EVENT_ALLOW &&
981 filter.action != KVM_PMU_EVENT_DENY))
982 return -EINVAL;
983
984 if (kvm_vm_has_ran_once(kvm))
985 return -EBUSY;
986
987 if (!kvm->arch.pmu_filter) {
988 kvm->arch.pmu_filter = bitmap_alloc(nr_events, GFP_KERNEL_ACCOUNT);
989 if (!kvm->arch.pmu_filter)
990 return -ENOMEM;
991
992 /*
993 * The default depends on the first applied filter.
994 * If it allows events, the default is to deny.
995 * Conversely, if the first filter denies a set of
996 * events, the default is to allow.
997 */
998 if (filter.action == KVM_PMU_EVENT_ALLOW)
999 bitmap_zero(kvm->arch.pmu_filter, nr_events);
1000 else
1001 bitmap_fill(kvm->arch.pmu_filter, nr_events);
1002 }
1003
1004 if (filter.action == KVM_PMU_EVENT_ALLOW)
1005 bitmap_set(kvm->arch.pmu_filter, filter.base_event, filter.nevents);
1006 else
1007 bitmap_clear(kvm->arch.pmu_filter, filter.base_event, filter.nevents);
1008
1009 return 0;
1010 }
1011 case KVM_ARM_VCPU_PMU_V3_SET_PMU: {
1012 int __user *uaddr = (int __user *)(long)attr->addr;
1013 int pmu_id;
1014
1015 if (get_user(pmu_id, uaddr))
1016 return -EFAULT;
1017
1018 return kvm_arm_pmu_v3_set_pmu(vcpu, pmu_id);
1019 }
1020 case KVM_ARM_VCPU_PMU_V3_INIT:
1021 return kvm_arm_pmu_v3_init(vcpu);
1022 }
1023
1024 return -ENXIO;
1025 }
1026
kvm_arm_pmu_v3_get_attr(struct kvm_vcpu * vcpu,struct kvm_device_attr * attr)1027 int kvm_arm_pmu_v3_get_attr(struct kvm_vcpu *vcpu, struct kvm_device_attr *attr)
1028 {
1029 switch (attr->attr) {
1030 case KVM_ARM_VCPU_PMU_V3_IRQ: {
1031 int __user *uaddr = (int __user *)(long)attr->addr;
1032 int irq;
1033
1034 if (!irqchip_in_kernel(vcpu->kvm))
1035 return -EINVAL;
1036
1037 if (!kvm_vcpu_has_pmu(vcpu))
1038 return -ENODEV;
1039
1040 if (!kvm_arm_pmu_irq_initialized(vcpu))
1041 return -ENXIO;
1042
1043 irq = vcpu->arch.pmu.irq_num;
1044 return put_user(irq, uaddr);
1045 }
1046 }
1047
1048 return -ENXIO;
1049 }
1050
kvm_arm_pmu_v3_has_attr(struct kvm_vcpu * vcpu,struct kvm_device_attr * attr)1051 int kvm_arm_pmu_v3_has_attr(struct kvm_vcpu *vcpu, struct kvm_device_attr *attr)
1052 {
1053 switch (attr->attr) {
1054 case KVM_ARM_VCPU_PMU_V3_IRQ:
1055 case KVM_ARM_VCPU_PMU_V3_INIT:
1056 case KVM_ARM_VCPU_PMU_V3_FILTER:
1057 case KVM_ARM_VCPU_PMU_V3_SET_PMU:
1058 if (kvm_vcpu_has_pmu(vcpu))
1059 return 0;
1060 }
1061
1062 return -ENXIO;
1063 }
1064
kvm_arm_pmu_get_pmuver_limit(void)1065 u8 kvm_arm_pmu_get_pmuver_limit(void)
1066 {
1067 u64 tmp;
1068
1069 tmp = read_sanitised_ftr_reg(SYS_ID_AA64DFR0_EL1);
1070 tmp = cpuid_feature_cap_perfmon_field(tmp,
1071 ID_AA64DFR0_EL1_PMUVer_SHIFT,
1072 ID_AA64DFR0_EL1_PMUVer_V3P5);
1073 return FIELD_GET(ARM64_FEATURE_MASK(ID_AA64DFR0_EL1_PMUVer), tmp);
1074 }
1075