xref: /openbmc/linux/arch/arm64/kvm/pmu-emul.c (revision 5421db1b)
19ed24f4bSMarc Zyngier // SPDX-License-Identifier: GPL-2.0-only
29ed24f4bSMarc Zyngier /*
39ed24f4bSMarc Zyngier  * Copyright (C) 2015 Linaro Ltd.
49ed24f4bSMarc Zyngier  * Author: Shannon Zhao <shannon.zhao@linaro.org>
59ed24f4bSMarc Zyngier  */
69ed24f4bSMarc Zyngier 
79ed24f4bSMarc Zyngier #include <linux/cpu.h>
89ed24f4bSMarc Zyngier #include <linux/kvm.h>
99ed24f4bSMarc Zyngier #include <linux/kvm_host.h>
109ed24f4bSMarc Zyngier #include <linux/perf_event.h>
119ed24f4bSMarc Zyngier #include <linux/perf/arm_pmu.h>
129ed24f4bSMarc Zyngier #include <linux/uaccess.h>
139ed24f4bSMarc Zyngier #include <asm/kvm_emulate.h>
149ed24f4bSMarc Zyngier #include <kvm/arm_pmu.h>
159ed24f4bSMarc Zyngier #include <kvm/arm_vgic.h>
169ed24f4bSMarc Zyngier 
179ed24f4bSMarc Zyngier static void kvm_pmu_create_perf_event(struct kvm_vcpu *vcpu, u64 select_idx);
189ed24f4bSMarc Zyngier static void kvm_pmu_update_pmc_chained(struct kvm_vcpu *vcpu, u64 select_idx);
199ed24f4bSMarc Zyngier static void kvm_pmu_stop_counter(struct kvm_vcpu *vcpu, struct kvm_pmc *pmc);
209ed24f4bSMarc Zyngier 
219ed24f4bSMarc Zyngier #define PERF_ATTR_CFG1_KVM_PMU_CHAINED 0x1
229ed24f4bSMarc Zyngier 
23fd65a3b5SMarc Zyngier static u32 kvm_pmu_event_mask(struct kvm *kvm)
24fd65a3b5SMarc Zyngier {
25fd65a3b5SMarc Zyngier 	switch (kvm->arch.pmuver) {
268e26d11fSMarc Zyngier 	case ID_AA64DFR0_PMUVER_8_0:
27fd65a3b5SMarc Zyngier 		return GENMASK(9, 0);
288e26d11fSMarc Zyngier 	case ID_AA64DFR0_PMUVER_8_1:
298e26d11fSMarc Zyngier 	case ID_AA64DFR0_PMUVER_8_4:
308e26d11fSMarc Zyngier 	case ID_AA64DFR0_PMUVER_8_5:
31fd65a3b5SMarc Zyngier 		return GENMASK(15, 0);
32fd65a3b5SMarc Zyngier 	default:		/* Shouldn't be here, just for sanity */
33fd65a3b5SMarc Zyngier 		WARN_ONCE(1, "Unknown PMU version %d\n", kvm->arch.pmuver);
34fd65a3b5SMarc Zyngier 		return 0;
35fd65a3b5SMarc Zyngier 	}
36fd65a3b5SMarc Zyngier }
37fd65a3b5SMarc Zyngier 
389ed24f4bSMarc Zyngier /**
399ed24f4bSMarc Zyngier  * kvm_pmu_idx_is_64bit - determine if select_idx is a 64bit counter
409ed24f4bSMarc Zyngier  * @vcpu: The vcpu pointer
419ed24f4bSMarc Zyngier  * @select_idx: The counter index
429ed24f4bSMarc Zyngier  */
439ed24f4bSMarc Zyngier static bool kvm_pmu_idx_is_64bit(struct kvm_vcpu *vcpu, u64 select_idx)
449ed24f4bSMarc Zyngier {
459ed24f4bSMarc Zyngier 	return (select_idx == ARMV8_PMU_CYCLE_IDX &&
469ed24f4bSMarc Zyngier 		__vcpu_sys_reg(vcpu, PMCR_EL0) & ARMV8_PMU_PMCR_LC);
479ed24f4bSMarc Zyngier }
489ed24f4bSMarc Zyngier 
499ed24f4bSMarc Zyngier static struct kvm_vcpu *kvm_pmc_to_vcpu(struct kvm_pmc *pmc)
509ed24f4bSMarc Zyngier {
519ed24f4bSMarc Zyngier 	struct kvm_pmu *pmu;
529ed24f4bSMarc Zyngier 	struct kvm_vcpu_arch *vcpu_arch;
539ed24f4bSMarc Zyngier 
549ed24f4bSMarc Zyngier 	pmc -= pmc->idx;
559ed24f4bSMarc Zyngier 	pmu = container_of(pmc, struct kvm_pmu, pmc[0]);
569ed24f4bSMarc Zyngier 	vcpu_arch = container_of(pmu, struct kvm_vcpu_arch, pmu);
579ed24f4bSMarc Zyngier 	return container_of(vcpu_arch, struct kvm_vcpu, arch);
589ed24f4bSMarc Zyngier }
599ed24f4bSMarc Zyngier 
609ed24f4bSMarc Zyngier /**
619ed24f4bSMarc Zyngier  * kvm_pmu_pmc_is_chained - determine if the pmc is chained
629ed24f4bSMarc Zyngier  * @pmc: The PMU counter pointer
639ed24f4bSMarc Zyngier  */
649ed24f4bSMarc Zyngier static bool kvm_pmu_pmc_is_chained(struct kvm_pmc *pmc)
659ed24f4bSMarc Zyngier {
669ed24f4bSMarc Zyngier 	struct kvm_vcpu *vcpu = kvm_pmc_to_vcpu(pmc);
679ed24f4bSMarc Zyngier 
689ed24f4bSMarc Zyngier 	return test_bit(pmc->idx >> 1, vcpu->arch.pmu.chained);
699ed24f4bSMarc Zyngier }
709ed24f4bSMarc Zyngier 
719ed24f4bSMarc Zyngier /**
729ed24f4bSMarc Zyngier  * kvm_pmu_idx_is_high_counter - determine if select_idx is a high/low counter
739ed24f4bSMarc Zyngier  * @select_idx: The counter index
749ed24f4bSMarc Zyngier  */
759ed24f4bSMarc Zyngier static bool kvm_pmu_idx_is_high_counter(u64 select_idx)
769ed24f4bSMarc Zyngier {
779ed24f4bSMarc Zyngier 	return select_idx & 0x1;
789ed24f4bSMarc Zyngier }
799ed24f4bSMarc Zyngier 
809ed24f4bSMarc Zyngier /**
819ed24f4bSMarc Zyngier  * kvm_pmu_get_canonical_pmc - obtain the canonical pmc
829ed24f4bSMarc Zyngier  * @pmc: The PMU counter pointer
839ed24f4bSMarc Zyngier  *
849ed24f4bSMarc Zyngier  * When a pair of PMCs are chained together we use the low counter (canonical)
859ed24f4bSMarc Zyngier  * to hold the underlying perf event.
869ed24f4bSMarc Zyngier  */
879ed24f4bSMarc Zyngier static struct kvm_pmc *kvm_pmu_get_canonical_pmc(struct kvm_pmc *pmc)
889ed24f4bSMarc Zyngier {
899ed24f4bSMarc Zyngier 	if (kvm_pmu_pmc_is_chained(pmc) &&
909ed24f4bSMarc Zyngier 	    kvm_pmu_idx_is_high_counter(pmc->idx))
919ed24f4bSMarc Zyngier 		return pmc - 1;
929ed24f4bSMarc Zyngier 
939ed24f4bSMarc Zyngier 	return pmc;
949ed24f4bSMarc Zyngier }
959ed24f4bSMarc Zyngier static struct kvm_pmc *kvm_pmu_get_alternate_pmc(struct kvm_pmc *pmc)
969ed24f4bSMarc Zyngier {
979ed24f4bSMarc Zyngier 	if (kvm_pmu_idx_is_high_counter(pmc->idx))
989ed24f4bSMarc Zyngier 		return pmc - 1;
999ed24f4bSMarc Zyngier 	else
1009ed24f4bSMarc Zyngier 		return pmc + 1;
1019ed24f4bSMarc Zyngier }
1029ed24f4bSMarc Zyngier 
1039ed24f4bSMarc Zyngier /**
1049ed24f4bSMarc Zyngier  * kvm_pmu_idx_has_chain_evtype - determine if the event type is chain
1059ed24f4bSMarc Zyngier  * @vcpu: The vcpu pointer
1069ed24f4bSMarc Zyngier  * @select_idx: The counter index
1079ed24f4bSMarc Zyngier  */
1089ed24f4bSMarc Zyngier static bool kvm_pmu_idx_has_chain_evtype(struct kvm_vcpu *vcpu, u64 select_idx)
1099ed24f4bSMarc Zyngier {
1109ed24f4bSMarc Zyngier 	u64 eventsel, reg;
1119ed24f4bSMarc Zyngier 
1129ed24f4bSMarc Zyngier 	select_idx |= 0x1;
1139ed24f4bSMarc Zyngier 
1149ed24f4bSMarc Zyngier 	if (select_idx == ARMV8_PMU_CYCLE_IDX)
1159ed24f4bSMarc Zyngier 		return false;
1169ed24f4bSMarc Zyngier 
1179ed24f4bSMarc Zyngier 	reg = PMEVTYPER0_EL0 + select_idx;
118fd65a3b5SMarc Zyngier 	eventsel = __vcpu_sys_reg(vcpu, reg) & kvm_pmu_event_mask(vcpu->kvm);
1199ed24f4bSMarc Zyngier 
1209ed24f4bSMarc Zyngier 	return eventsel == ARMV8_PMUV3_PERFCTR_CHAIN;
1219ed24f4bSMarc Zyngier }
1229ed24f4bSMarc Zyngier 
1239ed24f4bSMarc Zyngier /**
1249ed24f4bSMarc Zyngier  * kvm_pmu_get_pair_counter_value - get PMU counter value
1259ed24f4bSMarc Zyngier  * @vcpu: The vcpu pointer
1269ed24f4bSMarc Zyngier  * @pmc: The PMU counter pointer
1279ed24f4bSMarc Zyngier  */
1289ed24f4bSMarc Zyngier static u64 kvm_pmu_get_pair_counter_value(struct kvm_vcpu *vcpu,
1299ed24f4bSMarc Zyngier 					  struct kvm_pmc *pmc)
1309ed24f4bSMarc Zyngier {
1319ed24f4bSMarc Zyngier 	u64 counter, counter_high, reg, enabled, running;
1329ed24f4bSMarc Zyngier 
1339ed24f4bSMarc Zyngier 	if (kvm_pmu_pmc_is_chained(pmc)) {
1349ed24f4bSMarc Zyngier 		pmc = kvm_pmu_get_canonical_pmc(pmc);
1359ed24f4bSMarc Zyngier 		reg = PMEVCNTR0_EL0 + pmc->idx;
1369ed24f4bSMarc Zyngier 
1379ed24f4bSMarc Zyngier 		counter = __vcpu_sys_reg(vcpu, reg);
1389ed24f4bSMarc Zyngier 		counter_high = __vcpu_sys_reg(vcpu, reg + 1);
1399ed24f4bSMarc Zyngier 
1409ed24f4bSMarc Zyngier 		counter = lower_32_bits(counter) | (counter_high << 32);
1419ed24f4bSMarc Zyngier 	} else {
1429ed24f4bSMarc Zyngier 		reg = (pmc->idx == ARMV8_PMU_CYCLE_IDX)
1439ed24f4bSMarc Zyngier 		      ? PMCCNTR_EL0 : PMEVCNTR0_EL0 + pmc->idx;
1449ed24f4bSMarc Zyngier 		counter = __vcpu_sys_reg(vcpu, reg);
1459ed24f4bSMarc Zyngier 	}
1469ed24f4bSMarc Zyngier 
1479ed24f4bSMarc Zyngier 	/*
1489ed24f4bSMarc Zyngier 	 * The real counter value is equal to the value of counter register plus
1499ed24f4bSMarc Zyngier 	 * the value perf event counts.
1509ed24f4bSMarc Zyngier 	 */
1519ed24f4bSMarc Zyngier 	if (pmc->perf_event)
1529ed24f4bSMarc Zyngier 		counter += perf_event_read_value(pmc->perf_event, &enabled,
1539ed24f4bSMarc Zyngier 						 &running);
1549ed24f4bSMarc Zyngier 
1559ed24f4bSMarc Zyngier 	return counter;
1569ed24f4bSMarc Zyngier }
1579ed24f4bSMarc Zyngier 
1589ed24f4bSMarc Zyngier /**
1599ed24f4bSMarc Zyngier  * kvm_pmu_get_counter_value - get PMU counter value
1609ed24f4bSMarc Zyngier  * @vcpu: The vcpu pointer
1619ed24f4bSMarc Zyngier  * @select_idx: The counter index
1629ed24f4bSMarc Zyngier  */
1639ed24f4bSMarc Zyngier u64 kvm_pmu_get_counter_value(struct kvm_vcpu *vcpu, u64 select_idx)
1649ed24f4bSMarc Zyngier {
1659ed24f4bSMarc Zyngier 	u64 counter;
1669ed24f4bSMarc Zyngier 	struct kvm_pmu *pmu = &vcpu->arch.pmu;
1679ed24f4bSMarc Zyngier 	struct kvm_pmc *pmc = &pmu->pmc[select_idx];
1689ed24f4bSMarc Zyngier 
1699ed24f4bSMarc Zyngier 	counter = kvm_pmu_get_pair_counter_value(vcpu, pmc);
1709ed24f4bSMarc Zyngier 
1719ed24f4bSMarc Zyngier 	if (kvm_pmu_pmc_is_chained(pmc) &&
1729ed24f4bSMarc Zyngier 	    kvm_pmu_idx_is_high_counter(select_idx))
1739ed24f4bSMarc Zyngier 		counter = upper_32_bits(counter);
1749ed24f4bSMarc Zyngier 	else if (select_idx != ARMV8_PMU_CYCLE_IDX)
1759ed24f4bSMarc Zyngier 		counter = lower_32_bits(counter);
1769ed24f4bSMarc Zyngier 
1779ed24f4bSMarc Zyngier 	return counter;
1789ed24f4bSMarc Zyngier }
1799ed24f4bSMarc Zyngier 
1809ed24f4bSMarc Zyngier /**
1819ed24f4bSMarc Zyngier  * kvm_pmu_set_counter_value - set PMU counter value
1829ed24f4bSMarc Zyngier  * @vcpu: The vcpu pointer
1839ed24f4bSMarc Zyngier  * @select_idx: The counter index
1849ed24f4bSMarc Zyngier  * @val: The counter value
1859ed24f4bSMarc Zyngier  */
1869ed24f4bSMarc Zyngier void kvm_pmu_set_counter_value(struct kvm_vcpu *vcpu, u64 select_idx, u64 val)
1879ed24f4bSMarc Zyngier {
1889ed24f4bSMarc Zyngier 	u64 reg;
1899ed24f4bSMarc Zyngier 
1909ed24f4bSMarc Zyngier 	reg = (select_idx == ARMV8_PMU_CYCLE_IDX)
1919ed24f4bSMarc Zyngier 	      ? PMCCNTR_EL0 : PMEVCNTR0_EL0 + select_idx;
1929ed24f4bSMarc Zyngier 	__vcpu_sys_reg(vcpu, reg) += (s64)val - kvm_pmu_get_counter_value(vcpu, select_idx);
1939ed24f4bSMarc Zyngier 
1949ed24f4bSMarc Zyngier 	/* Recreate the perf event to reflect the updated sample_period */
1959ed24f4bSMarc Zyngier 	kvm_pmu_create_perf_event(vcpu, select_idx);
1969ed24f4bSMarc Zyngier }
1979ed24f4bSMarc Zyngier 
1989ed24f4bSMarc Zyngier /**
1999ed24f4bSMarc Zyngier  * kvm_pmu_release_perf_event - remove the perf event
2009ed24f4bSMarc Zyngier  * @pmc: The PMU counter pointer
2019ed24f4bSMarc Zyngier  */
2029ed24f4bSMarc Zyngier static void kvm_pmu_release_perf_event(struct kvm_pmc *pmc)
2039ed24f4bSMarc Zyngier {
2049ed24f4bSMarc Zyngier 	pmc = kvm_pmu_get_canonical_pmc(pmc);
2059ed24f4bSMarc Zyngier 	if (pmc->perf_event) {
2069ed24f4bSMarc Zyngier 		perf_event_disable(pmc->perf_event);
2079ed24f4bSMarc Zyngier 		perf_event_release_kernel(pmc->perf_event);
2089ed24f4bSMarc Zyngier 		pmc->perf_event = NULL;
2099ed24f4bSMarc Zyngier 	}
2109ed24f4bSMarc Zyngier }
2119ed24f4bSMarc Zyngier 
2129ed24f4bSMarc Zyngier /**
2139ed24f4bSMarc Zyngier  * kvm_pmu_stop_counter - stop PMU counter
2149ed24f4bSMarc Zyngier  * @pmc: The PMU counter pointer
2159ed24f4bSMarc Zyngier  *
2169ed24f4bSMarc Zyngier  * If this counter has been configured to monitor some event, release it here.
2179ed24f4bSMarc Zyngier  */
2189ed24f4bSMarc Zyngier static void kvm_pmu_stop_counter(struct kvm_vcpu *vcpu, struct kvm_pmc *pmc)
2199ed24f4bSMarc Zyngier {
2209ed24f4bSMarc Zyngier 	u64 counter, reg, val;
2219ed24f4bSMarc Zyngier 
2229ed24f4bSMarc Zyngier 	pmc = kvm_pmu_get_canonical_pmc(pmc);
2239ed24f4bSMarc Zyngier 	if (!pmc->perf_event)
2249ed24f4bSMarc Zyngier 		return;
2259ed24f4bSMarc Zyngier 
2269ed24f4bSMarc Zyngier 	counter = kvm_pmu_get_pair_counter_value(vcpu, pmc);
2279ed24f4bSMarc Zyngier 
2289ed24f4bSMarc Zyngier 	if (pmc->idx == ARMV8_PMU_CYCLE_IDX) {
2299ed24f4bSMarc Zyngier 		reg = PMCCNTR_EL0;
2309ed24f4bSMarc Zyngier 		val = counter;
2319ed24f4bSMarc Zyngier 	} else {
2329ed24f4bSMarc Zyngier 		reg = PMEVCNTR0_EL0 + pmc->idx;
2339ed24f4bSMarc Zyngier 		val = lower_32_bits(counter);
2349ed24f4bSMarc Zyngier 	}
2359ed24f4bSMarc Zyngier 
2369ed24f4bSMarc Zyngier 	__vcpu_sys_reg(vcpu, reg) = val;
2379ed24f4bSMarc Zyngier 
2389ed24f4bSMarc Zyngier 	if (kvm_pmu_pmc_is_chained(pmc))
2399ed24f4bSMarc Zyngier 		__vcpu_sys_reg(vcpu, reg + 1) = upper_32_bits(counter);
2409ed24f4bSMarc Zyngier 
2419ed24f4bSMarc Zyngier 	kvm_pmu_release_perf_event(pmc);
2429ed24f4bSMarc Zyngier }
2439ed24f4bSMarc Zyngier 
2449ed24f4bSMarc Zyngier /**
2459ed24f4bSMarc Zyngier  * kvm_pmu_vcpu_init - assign pmu counter idx for cpu
2469ed24f4bSMarc Zyngier  * @vcpu: The vcpu pointer
2479ed24f4bSMarc Zyngier  *
2489ed24f4bSMarc Zyngier  */
2499ed24f4bSMarc Zyngier void kvm_pmu_vcpu_init(struct kvm_vcpu *vcpu)
2509ed24f4bSMarc Zyngier {
2519ed24f4bSMarc Zyngier 	int i;
2529ed24f4bSMarc Zyngier 	struct kvm_pmu *pmu = &vcpu->arch.pmu;
2539ed24f4bSMarc Zyngier 
2549ed24f4bSMarc Zyngier 	for (i = 0; i < ARMV8_PMU_MAX_COUNTERS; i++)
2559ed24f4bSMarc Zyngier 		pmu->pmc[i].idx = i;
2569ed24f4bSMarc Zyngier }
2579ed24f4bSMarc Zyngier 
2589ed24f4bSMarc Zyngier /**
2599ed24f4bSMarc Zyngier  * kvm_pmu_vcpu_reset - reset pmu state for cpu
2609ed24f4bSMarc Zyngier  * @vcpu: The vcpu pointer
2619ed24f4bSMarc Zyngier  *
2629ed24f4bSMarc Zyngier  */
2639ed24f4bSMarc Zyngier void kvm_pmu_vcpu_reset(struct kvm_vcpu *vcpu)
2649ed24f4bSMarc Zyngier {
2659ed24f4bSMarc Zyngier 	unsigned long mask = kvm_pmu_valid_counter_mask(vcpu);
2669ed24f4bSMarc Zyngier 	struct kvm_pmu *pmu = &vcpu->arch.pmu;
2679ed24f4bSMarc Zyngier 	int i;
2689ed24f4bSMarc Zyngier 
2699ed24f4bSMarc Zyngier 	for_each_set_bit(i, &mask, 32)
2709ed24f4bSMarc Zyngier 		kvm_pmu_stop_counter(vcpu, &pmu->pmc[i]);
2719ed24f4bSMarc Zyngier 
2729ed24f4bSMarc Zyngier 	bitmap_zero(vcpu->arch.pmu.chained, ARMV8_PMU_MAX_COUNTER_PAIRS);
2739ed24f4bSMarc Zyngier }
2749ed24f4bSMarc Zyngier 
2759ed24f4bSMarc Zyngier /**
2769ed24f4bSMarc Zyngier  * kvm_pmu_vcpu_destroy - free perf event of PMU for cpu
2779ed24f4bSMarc Zyngier  * @vcpu: The vcpu pointer
2789ed24f4bSMarc Zyngier  *
2799ed24f4bSMarc Zyngier  */
2809ed24f4bSMarc Zyngier void kvm_pmu_vcpu_destroy(struct kvm_vcpu *vcpu)
2819ed24f4bSMarc Zyngier {
2829ed24f4bSMarc Zyngier 	int i;
2839ed24f4bSMarc Zyngier 	struct kvm_pmu *pmu = &vcpu->arch.pmu;
2849ed24f4bSMarc Zyngier 
2859ed24f4bSMarc Zyngier 	for (i = 0; i < ARMV8_PMU_MAX_COUNTERS; i++)
2869ed24f4bSMarc Zyngier 		kvm_pmu_release_perf_event(&pmu->pmc[i]);
28795e92e45SJulien Thierry 	irq_work_sync(&vcpu->arch.pmu.overflow_work);
2889ed24f4bSMarc Zyngier }
2899ed24f4bSMarc Zyngier 
2909ed24f4bSMarc Zyngier u64 kvm_pmu_valid_counter_mask(struct kvm_vcpu *vcpu)
2919ed24f4bSMarc Zyngier {
2929ed24f4bSMarc Zyngier 	u64 val = __vcpu_sys_reg(vcpu, PMCR_EL0) >> ARMV8_PMU_PMCR_N_SHIFT;
2939ed24f4bSMarc Zyngier 
2949ed24f4bSMarc Zyngier 	val &= ARMV8_PMU_PMCR_N_MASK;
2959ed24f4bSMarc Zyngier 	if (val == 0)
2969ed24f4bSMarc Zyngier 		return BIT(ARMV8_PMU_CYCLE_IDX);
2979ed24f4bSMarc Zyngier 	else
2989ed24f4bSMarc Zyngier 		return GENMASK(val - 1, 0) | BIT(ARMV8_PMU_CYCLE_IDX);
2999ed24f4bSMarc Zyngier }
3009ed24f4bSMarc Zyngier 
3019ed24f4bSMarc Zyngier /**
3029ed24f4bSMarc Zyngier  * kvm_pmu_enable_counter_mask - enable selected PMU counters
3039ed24f4bSMarc Zyngier  * @vcpu: The vcpu pointer
3049ed24f4bSMarc Zyngier  * @val: the value guest writes to PMCNTENSET register
3059ed24f4bSMarc Zyngier  *
3069ed24f4bSMarc Zyngier  * Call perf_event_enable to start counting the perf event
3079ed24f4bSMarc Zyngier  */
3089ed24f4bSMarc Zyngier void kvm_pmu_enable_counter_mask(struct kvm_vcpu *vcpu, u64 val)
3099ed24f4bSMarc Zyngier {
3109ed24f4bSMarc Zyngier 	int i;
3119ed24f4bSMarc Zyngier 	struct kvm_pmu *pmu = &vcpu->arch.pmu;
3129ed24f4bSMarc Zyngier 	struct kvm_pmc *pmc;
3139ed24f4bSMarc Zyngier 
3149ed24f4bSMarc Zyngier 	if (!(__vcpu_sys_reg(vcpu, PMCR_EL0) & ARMV8_PMU_PMCR_E) || !val)
3159ed24f4bSMarc Zyngier 		return;
3169ed24f4bSMarc Zyngier 
3179ed24f4bSMarc Zyngier 	for (i = 0; i < ARMV8_PMU_MAX_COUNTERS; i++) {
3189ed24f4bSMarc Zyngier 		if (!(val & BIT(i)))
3199ed24f4bSMarc Zyngier 			continue;
3209ed24f4bSMarc Zyngier 
3219ed24f4bSMarc Zyngier 		pmc = &pmu->pmc[i];
3229ed24f4bSMarc Zyngier 
3239ed24f4bSMarc Zyngier 		/* A change in the enable state may affect the chain state */
3249ed24f4bSMarc Zyngier 		kvm_pmu_update_pmc_chained(vcpu, i);
3259ed24f4bSMarc Zyngier 		kvm_pmu_create_perf_event(vcpu, i);
3269ed24f4bSMarc Zyngier 
3279ed24f4bSMarc Zyngier 		/* At this point, pmc must be the canonical */
3289ed24f4bSMarc Zyngier 		if (pmc->perf_event) {
3299ed24f4bSMarc Zyngier 			perf_event_enable(pmc->perf_event);
3309ed24f4bSMarc Zyngier 			if (pmc->perf_event->state != PERF_EVENT_STATE_ACTIVE)
3319ed24f4bSMarc Zyngier 				kvm_debug("fail to enable perf event\n");
3329ed24f4bSMarc Zyngier 		}
3339ed24f4bSMarc Zyngier 	}
3349ed24f4bSMarc Zyngier }
3359ed24f4bSMarc Zyngier 
3369ed24f4bSMarc Zyngier /**
3379ed24f4bSMarc Zyngier  * kvm_pmu_disable_counter_mask - disable selected PMU counters
3389ed24f4bSMarc Zyngier  * @vcpu: The vcpu pointer
3399ed24f4bSMarc Zyngier  * @val: the value guest writes to PMCNTENCLR register
3409ed24f4bSMarc Zyngier  *
3419ed24f4bSMarc Zyngier  * Call perf_event_disable to stop counting the perf event
3429ed24f4bSMarc Zyngier  */
3439ed24f4bSMarc Zyngier void kvm_pmu_disable_counter_mask(struct kvm_vcpu *vcpu, u64 val)
3449ed24f4bSMarc Zyngier {
3459ed24f4bSMarc Zyngier 	int i;
3469ed24f4bSMarc Zyngier 	struct kvm_pmu *pmu = &vcpu->arch.pmu;
3479ed24f4bSMarc Zyngier 	struct kvm_pmc *pmc;
3489ed24f4bSMarc Zyngier 
3499ed24f4bSMarc Zyngier 	if (!val)
3509ed24f4bSMarc Zyngier 		return;
3519ed24f4bSMarc Zyngier 
3529ed24f4bSMarc Zyngier 	for (i = 0; i < ARMV8_PMU_MAX_COUNTERS; i++) {
3539ed24f4bSMarc Zyngier 		if (!(val & BIT(i)))
3549ed24f4bSMarc Zyngier 			continue;
3559ed24f4bSMarc Zyngier 
3569ed24f4bSMarc Zyngier 		pmc = &pmu->pmc[i];
3579ed24f4bSMarc Zyngier 
3589ed24f4bSMarc Zyngier 		/* A change in the enable state may affect the chain state */
3599ed24f4bSMarc Zyngier 		kvm_pmu_update_pmc_chained(vcpu, i);
3609ed24f4bSMarc Zyngier 		kvm_pmu_create_perf_event(vcpu, i);
3619ed24f4bSMarc Zyngier 
3629ed24f4bSMarc Zyngier 		/* At this point, pmc must be the canonical */
3639ed24f4bSMarc Zyngier 		if (pmc->perf_event)
3649ed24f4bSMarc Zyngier 			perf_event_disable(pmc->perf_event);
3659ed24f4bSMarc Zyngier 	}
3669ed24f4bSMarc Zyngier }
3679ed24f4bSMarc Zyngier 
3689ed24f4bSMarc Zyngier static u64 kvm_pmu_overflow_status(struct kvm_vcpu *vcpu)
3699ed24f4bSMarc Zyngier {
3709ed24f4bSMarc Zyngier 	u64 reg = 0;
3719ed24f4bSMarc Zyngier 
3729ed24f4bSMarc Zyngier 	if ((__vcpu_sys_reg(vcpu, PMCR_EL0) & ARMV8_PMU_PMCR_E)) {
3739ed24f4bSMarc Zyngier 		reg = __vcpu_sys_reg(vcpu, PMOVSSET_EL0);
3749ed24f4bSMarc Zyngier 		reg &= __vcpu_sys_reg(vcpu, PMCNTENSET_EL0);
3759ed24f4bSMarc Zyngier 		reg &= __vcpu_sys_reg(vcpu, PMINTENSET_EL1);
3769ed24f4bSMarc Zyngier 		reg &= kvm_pmu_valid_counter_mask(vcpu);
3779ed24f4bSMarc Zyngier 	}
3789ed24f4bSMarc Zyngier 
3799ed24f4bSMarc Zyngier 	return reg;
3809ed24f4bSMarc Zyngier }
3819ed24f4bSMarc Zyngier 
3829ed24f4bSMarc Zyngier static void kvm_pmu_update_state(struct kvm_vcpu *vcpu)
3839ed24f4bSMarc Zyngier {
3849ed24f4bSMarc Zyngier 	struct kvm_pmu *pmu = &vcpu->arch.pmu;
3859ed24f4bSMarc Zyngier 	bool overflow;
3869ed24f4bSMarc Zyngier 
38746acf89dSMarc Zyngier 	if (!kvm_vcpu_has_pmu(vcpu))
3889ed24f4bSMarc Zyngier 		return;
3899ed24f4bSMarc Zyngier 
3909ed24f4bSMarc Zyngier 	overflow = !!kvm_pmu_overflow_status(vcpu);
3919ed24f4bSMarc Zyngier 	if (pmu->irq_level == overflow)
3929ed24f4bSMarc Zyngier 		return;
3939ed24f4bSMarc Zyngier 
3949ed24f4bSMarc Zyngier 	pmu->irq_level = overflow;
3959ed24f4bSMarc Zyngier 
3969ed24f4bSMarc Zyngier 	if (likely(irqchip_in_kernel(vcpu->kvm))) {
3979ed24f4bSMarc Zyngier 		int ret = kvm_vgic_inject_irq(vcpu->kvm, vcpu->vcpu_id,
3989ed24f4bSMarc Zyngier 					      pmu->irq_num, overflow, pmu);
3999ed24f4bSMarc Zyngier 		WARN_ON(ret);
4009ed24f4bSMarc Zyngier 	}
4019ed24f4bSMarc Zyngier }
4029ed24f4bSMarc Zyngier 
4039ed24f4bSMarc Zyngier bool kvm_pmu_should_notify_user(struct kvm_vcpu *vcpu)
4049ed24f4bSMarc Zyngier {
4059ed24f4bSMarc Zyngier 	struct kvm_pmu *pmu = &vcpu->arch.pmu;
4069ed24f4bSMarc Zyngier 	struct kvm_sync_regs *sregs = &vcpu->run->s.regs;
4079ed24f4bSMarc Zyngier 	bool run_level = sregs->device_irq_level & KVM_ARM_DEV_PMU;
4089ed24f4bSMarc Zyngier 
4099ed24f4bSMarc Zyngier 	if (likely(irqchip_in_kernel(vcpu->kvm)))
4109ed24f4bSMarc Zyngier 		return false;
4119ed24f4bSMarc Zyngier 
4129ed24f4bSMarc Zyngier 	return pmu->irq_level != run_level;
4139ed24f4bSMarc Zyngier }
4149ed24f4bSMarc Zyngier 
4159ed24f4bSMarc Zyngier /*
4169ed24f4bSMarc Zyngier  * Reflect the PMU overflow interrupt output level into the kvm_run structure
4179ed24f4bSMarc Zyngier  */
4189ed24f4bSMarc Zyngier void kvm_pmu_update_run(struct kvm_vcpu *vcpu)
4199ed24f4bSMarc Zyngier {
4209ed24f4bSMarc Zyngier 	struct kvm_sync_regs *regs = &vcpu->run->s.regs;
4219ed24f4bSMarc Zyngier 
4229ed24f4bSMarc Zyngier 	/* Populate the timer bitmap for user space */
4239ed24f4bSMarc Zyngier 	regs->device_irq_level &= ~KVM_ARM_DEV_PMU;
4249ed24f4bSMarc Zyngier 	if (vcpu->arch.pmu.irq_level)
4259ed24f4bSMarc Zyngier 		regs->device_irq_level |= KVM_ARM_DEV_PMU;
4269ed24f4bSMarc Zyngier }
4279ed24f4bSMarc Zyngier 
4289ed24f4bSMarc Zyngier /**
4299ed24f4bSMarc Zyngier  * kvm_pmu_flush_hwstate - flush pmu state to cpu
4309ed24f4bSMarc Zyngier  * @vcpu: The vcpu pointer
4319ed24f4bSMarc Zyngier  *
4329ed24f4bSMarc Zyngier  * Check if the PMU has overflowed while we were running in the host, and inject
4339ed24f4bSMarc Zyngier  * an interrupt if that was the case.
4349ed24f4bSMarc Zyngier  */
4359ed24f4bSMarc Zyngier void kvm_pmu_flush_hwstate(struct kvm_vcpu *vcpu)
4369ed24f4bSMarc Zyngier {
4379ed24f4bSMarc Zyngier 	kvm_pmu_update_state(vcpu);
4389ed24f4bSMarc Zyngier }
4399ed24f4bSMarc Zyngier 
4409ed24f4bSMarc Zyngier /**
4419ed24f4bSMarc Zyngier  * kvm_pmu_sync_hwstate - sync pmu state from cpu
4429ed24f4bSMarc Zyngier  * @vcpu: The vcpu pointer
4439ed24f4bSMarc Zyngier  *
4449ed24f4bSMarc Zyngier  * Check if the PMU has overflowed while we were running in the guest, and
4459ed24f4bSMarc Zyngier  * inject an interrupt if that was the case.
4469ed24f4bSMarc Zyngier  */
4479ed24f4bSMarc Zyngier void kvm_pmu_sync_hwstate(struct kvm_vcpu *vcpu)
4489ed24f4bSMarc Zyngier {
4499ed24f4bSMarc Zyngier 	kvm_pmu_update_state(vcpu);
4509ed24f4bSMarc Zyngier }
4519ed24f4bSMarc Zyngier 
4529ed24f4bSMarc Zyngier /**
45395e92e45SJulien Thierry  * When perf interrupt is an NMI, we cannot safely notify the vcpu corresponding
45495e92e45SJulien Thierry  * to the event.
45595e92e45SJulien Thierry  * This is why we need a callback to do it once outside of the NMI context.
45695e92e45SJulien Thierry  */
45795e92e45SJulien Thierry static void kvm_pmu_perf_overflow_notify_vcpu(struct irq_work *work)
45895e92e45SJulien Thierry {
45995e92e45SJulien Thierry 	struct kvm_vcpu *vcpu;
46095e92e45SJulien Thierry 	struct kvm_pmu *pmu;
46195e92e45SJulien Thierry 
46295e92e45SJulien Thierry 	pmu = container_of(work, struct kvm_pmu, overflow_work);
46395e92e45SJulien Thierry 	vcpu = kvm_pmc_to_vcpu(pmu->pmc);
46495e92e45SJulien Thierry 
46595e92e45SJulien Thierry 	kvm_vcpu_kick(vcpu);
46695e92e45SJulien Thierry }
46795e92e45SJulien Thierry 
46895e92e45SJulien Thierry /**
4699ed24f4bSMarc Zyngier  * When the perf event overflows, set the overflow status and inform the vcpu.
4709ed24f4bSMarc Zyngier  */
4719ed24f4bSMarc Zyngier static void kvm_pmu_perf_overflow(struct perf_event *perf_event,
4729ed24f4bSMarc Zyngier 				  struct perf_sample_data *data,
4739ed24f4bSMarc Zyngier 				  struct pt_regs *regs)
4749ed24f4bSMarc Zyngier {
4759ed24f4bSMarc Zyngier 	struct kvm_pmc *pmc = perf_event->overflow_handler_context;
4769ed24f4bSMarc Zyngier 	struct arm_pmu *cpu_pmu = to_arm_pmu(perf_event->pmu);
4779ed24f4bSMarc Zyngier 	struct kvm_vcpu *vcpu = kvm_pmc_to_vcpu(pmc);
4789ed24f4bSMarc Zyngier 	int idx = pmc->idx;
4799ed24f4bSMarc Zyngier 	u64 period;
4809ed24f4bSMarc Zyngier 
4819ed24f4bSMarc Zyngier 	cpu_pmu->pmu.stop(perf_event, PERF_EF_UPDATE);
4829ed24f4bSMarc Zyngier 
4839ed24f4bSMarc Zyngier 	/*
4849ed24f4bSMarc Zyngier 	 * Reset the sample period to the architectural limit,
4859ed24f4bSMarc Zyngier 	 * i.e. the point where the counter overflows.
4869ed24f4bSMarc Zyngier 	 */
4879ed24f4bSMarc Zyngier 	period = -(local64_read(&perf_event->count));
4889ed24f4bSMarc Zyngier 
4899ed24f4bSMarc Zyngier 	if (!kvm_pmu_idx_is_64bit(vcpu, pmc->idx))
4909ed24f4bSMarc Zyngier 		period &= GENMASK(31, 0);
4919ed24f4bSMarc Zyngier 
4929ed24f4bSMarc Zyngier 	local64_set(&perf_event->hw.period_left, 0);
4939ed24f4bSMarc Zyngier 	perf_event->attr.sample_period = period;
4949ed24f4bSMarc Zyngier 	perf_event->hw.sample_period = period;
4959ed24f4bSMarc Zyngier 
4969ed24f4bSMarc Zyngier 	__vcpu_sys_reg(vcpu, PMOVSSET_EL0) |= BIT(idx);
4979ed24f4bSMarc Zyngier 
4989ed24f4bSMarc Zyngier 	if (kvm_pmu_overflow_status(vcpu)) {
4999ed24f4bSMarc Zyngier 		kvm_make_request(KVM_REQ_IRQ_PENDING, vcpu);
50095e92e45SJulien Thierry 
50195e92e45SJulien Thierry 		if (!in_nmi())
5029ed24f4bSMarc Zyngier 			kvm_vcpu_kick(vcpu);
50395e92e45SJulien Thierry 		else
50495e92e45SJulien Thierry 			irq_work_queue(&vcpu->arch.pmu.overflow_work);
5059ed24f4bSMarc Zyngier 	}
5069ed24f4bSMarc Zyngier 
5079ed24f4bSMarc Zyngier 	cpu_pmu->pmu.start(perf_event, PERF_EF_RELOAD);
5089ed24f4bSMarc Zyngier }
5099ed24f4bSMarc Zyngier 
5109ed24f4bSMarc Zyngier /**
5119ed24f4bSMarc Zyngier  * kvm_pmu_software_increment - do software increment
5129ed24f4bSMarc Zyngier  * @vcpu: The vcpu pointer
5139ed24f4bSMarc Zyngier  * @val: the value guest writes to PMSWINC register
5149ed24f4bSMarc Zyngier  */
5159ed24f4bSMarc Zyngier void kvm_pmu_software_increment(struct kvm_vcpu *vcpu, u64 val)
5169ed24f4bSMarc Zyngier {
5179ed24f4bSMarc Zyngier 	struct kvm_pmu *pmu = &vcpu->arch.pmu;
5189ed24f4bSMarc Zyngier 	int i;
5199ed24f4bSMarc Zyngier 
5209ed24f4bSMarc Zyngier 	if (!(__vcpu_sys_reg(vcpu, PMCR_EL0) & ARMV8_PMU_PMCR_E))
5219ed24f4bSMarc Zyngier 		return;
5229ed24f4bSMarc Zyngier 
5239ed24f4bSMarc Zyngier 	/* Weed out disabled counters */
5249ed24f4bSMarc Zyngier 	val &= __vcpu_sys_reg(vcpu, PMCNTENSET_EL0);
5259ed24f4bSMarc Zyngier 
5269ed24f4bSMarc Zyngier 	for (i = 0; i < ARMV8_PMU_CYCLE_IDX; i++) {
5279ed24f4bSMarc Zyngier 		u64 type, reg;
5289ed24f4bSMarc Zyngier 
5299ed24f4bSMarc Zyngier 		if (!(val & BIT(i)))
5309ed24f4bSMarc Zyngier 			continue;
5319ed24f4bSMarc Zyngier 
5329ed24f4bSMarc Zyngier 		/* PMSWINC only applies to ... SW_INC! */
5339ed24f4bSMarc Zyngier 		type = __vcpu_sys_reg(vcpu, PMEVTYPER0_EL0 + i);
534fd65a3b5SMarc Zyngier 		type &= kvm_pmu_event_mask(vcpu->kvm);
5359ed24f4bSMarc Zyngier 		if (type != ARMV8_PMUV3_PERFCTR_SW_INCR)
5369ed24f4bSMarc Zyngier 			continue;
5379ed24f4bSMarc Zyngier 
5389ed24f4bSMarc Zyngier 		/* increment this even SW_INC counter */
5399ed24f4bSMarc Zyngier 		reg = __vcpu_sys_reg(vcpu, PMEVCNTR0_EL0 + i) + 1;
5409ed24f4bSMarc Zyngier 		reg = lower_32_bits(reg);
5419ed24f4bSMarc Zyngier 		__vcpu_sys_reg(vcpu, PMEVCNTR0_EL0 + i) = reg;
5429ed24f4bSMarc Zyngier 
5439ed24f4bSMarc Zyngier 		if (reg) /* no overflow on the low part */
5449ed24f4bSMarc Zyngier 			continue;
5459ed24f4bSMarc Zyngier 
5469ed24f4bSMarc Zyngier 		if (kvm_pmu_pmc_is_chained(&pmu->pmc[i])) {
5479ed24f4bSMarc Zyngier 			/* increment the high counter */
5489ed24f4bSMarc Zyngier 			reg = __vcpu_sys_reg(vcpu, PMEVCNTR0_EL0 + i + 1) + 1;
5499ed24f4bSMarc Zyngier 			reg = lower_32_bits(reg);
5509ed24f4bSMarc Zyngier 			__vcpu_sys_reg(vcpu, PMEVCNTR0_EL0 + i + 1) = reg;
5519ed24f4bSMarc Zyngier 			if (!reg) /* mark overflow on the high counter */
5529ed24f4bSMarc Zyngier 				__vcpu_sys_reg(vcpu, PMOVSSET_EL0) |= BIT(i + 1);
5539ed24f4bSMarc Zyngier 		} else {
5549ed24f4bSMarc Zyngier 			/* mark overflow on low counter */
5559ed24f4bSMarc Zyngier 			__vcpu_sys_reg(vcpu, PMOVSSET_EL0) |= BIT(i);
5569ed24f4bSMarc Zyngier 		}
5579ed24f4bSMarc Zyngier 	}
5589ed24f4bSMarc Zyngier }
5599ed24f4bSMarc Zyngier 
5609ed24f4bSMarc Zyngier /**
5619ed24f4bSMarc Zyngier  * kvm_pmu_handle_pmcr - handle PMCR register
5629ed24f4bSMarc Zyngier  * @vcpu: The vcpu pointer
5639ed24f4bSMarc Zyngier  * @val: the value guest writes to PMCR register
5649ed24f4bSMarc Zyngier  */
5659ed24f4bSMarc Zyngier void kvm_pmu_handle_pmcr(struct kvm_vcpu *vcpu, u64 val)
5669ed24f4bSMarc Zyngier {
5679ed24f4bSMarc Zyngier 	unsigned long mask = kvm_pmu_valid_counter_mask(vcpu);
5689ed24f4bSMarc Zyngier 	int i;
5699ed24f4bSMarc Zyngier 
5709ed24f4bSMarc Zyngier 	if (val & ARMV8_PMU_PMCR_E) {
5719ed24f4bSMarc Zyngier 		kvm_pmu_enable_counter_mask(vcpu,
5729ed24f4bSMarc Zyngier 		       __vcpu_sys_reg(vcpu, PMCNTENSET_EL0) & mask);
5739ed24f4bSMarc Zyngier 	} else {
5749ed24f4bSMarc Zyngier 		kvm_pmu_disable_counter_mask(vcpu, mask);
5759ed24f4bSMarc Zyngier 	}
5769ed24f4bSMarc Zyngier 
5779ed24f4bSMarc Zyngier 	if (val & ARMV8_PMU_PMCR_C)
5789ed24f4bSMarc Zyngier 		kvm_pmu_set_counter_value(vcpu, ARMV8_PMU_CYCLE_IDX, 0);
5799ed24f4bSMarc Zyngier 
5809ed24f4bSMarc Zyngier 	if (val & ARMV8_PMU_PMCR_P) {
5819ed24f4bSMarc Zyngier 		for_each_set_bit(i, &mask, 32)
5829ed24f4bSMarc Zyngier 			kvm_pmu_set_counter_value(vcpu, i, 0);
5839ed24f4bSMarc Zyngier 	}
5849ed24f4bSMarc Zyngier }
5859ed24f4bSMarc Zyngier 
5869ed24f4bSMarc Zyngier static bool kvm_pmu_counter_is_enabled(struct kvm_vcpu *vcpu, u64 select_idx)
5879ed24f4bSMarc Zyngier {
5889ed24f4bSMarc Zyngier 	return (__vcpu_sys_reg(vcpu, PMCR_EL0) & ARMV8_PMU_PMCR_E) &&
5899ed24f4bSMarc Zyngier 	       (__vcpu_sys_reg(vcpu, PMCNTENSET_EL0) & BIT(select_idx));
5909ed24f4bSMarc Zyngier }
5919ed24f4bSMarc Zyngier 
5929ed24f4bSMarc Zyngier /**
5939ed24f4bSMarc Zyngier  * kvm_pmu_create_perf_event - create a perf event for a counter
5949ed24f4bSMarc Zyngier  * @vcpu: The vcpu pointer
5959ed24f4bSMarc Zyngier  * @select_idx: The number of selected counter
5969ed24f4bSMarc Zyngier  */
5979ed24f4bSMarc Zyngier static void kvm_pmu_create_perf_event(struct kvm_vcpu *vcpu, u64 select_idx)
5989ed24f4bSMarc Zyngier {
5999ed24f4bSMarc Zyngier 	struct kvm_pmu *pmu = &vcpu->arch.pmu;
6009ed24f4bSMarc Zyngier 	struct kvm_pmc *pmc;
6019ed24f4bSMarc Zyngier 	struct perf_event *event;
6029ed24f4bSMarc Zyngier 	struct perf_event_attr attr;
6039ed24f4bSMarc Zyngier 	u64 eventsel, counter, reg, data;
6049ed24f4bSMarc Zyngier 
6059ed24f4bSMarc Zyngier 	/*
6069ed24f4bSMarc Zyngier 	 * For chained counters the event type and filtering attributes are
6079ed24f4bSMarc Zyngier 	 * obtained from the low/even counter. We also use this counter to
6089ed24f4bSMarc Zyngier 	 * determine if the event is enabled/disabled.
6099ed24f4bSMarc Zyngier 	 */
6109ed24f4bSMarc Zyngier 	pmc = kvm_pmu_get_canonical_pmc(&pmu->pmc[select_idx]);
6119ed24f4bSMarc Zyngier 
6129ed24f4bSMarc Zyngier 	reg = (pmc->idx == ARMV8_PMU_CYCLE_IDX)
6139ed24f4bSMarc Zyngier 	      ? PMCCFILTR_EL0 : PMEVTYPER0_EL0 + pmc->idx;
6149ed24f4bSMarc Zyngier 	data = __vcpu_sys_reg(vcpu, reg);
6159ed24f4bSMarc Zyngier 
6169ed24f4bSMarc Zyngier 	kvm_pmu_stop_counter(vcpu, pmc);
617d7eec236SMarc Zyngier 	if (pmc->idx == ARMV8_PMU_CYCLE_IDX)
618d7eec236SMarc Zyngier 		eventsel = ARMV8_PMUV3_PERFCTR_CPU_CYCLES;
619d7eec236SMarc Zyngier 	else
620d7eec236SMarc Zyngier 		eventsel = data & kvm_pmu_event_mask(vcpu->kvm);
6219ed24f4bSMarc Zyngier 
622d7eec236SMarc Zyngier 	/* Software increment event doesn't need to be backed by a perf event */
623d7eec236SMarc Zyngier 	if (eventsel == ARMV8_PMUV3_PERFCTR_SW_INCR)
624d7eec236SMarc Zyngier 		return;
625d7eec236SMarc Zyngier 
626d7eec236SMarc Zyngier 	/*
627d7eec236SMarc Zyngier 	 * If we have a filter in place and that the event isn't allowed, do
628d7eec236SMarc Zyngier 	 * not install a perf event either.
629d7eec236SMarc Zyngier 	 */
630d7eec236SMarc Zyngier 	if (vcpu->kvm->arch.pmu_filter &&
631d7eec236SMarc Zyngier 	    !test_bit(eventsel, vcpu->kvm->arch.pmu_filter))
6329ed24f4bSMarc Zyngier 		return;
6339ed24f4bSMarc Zyngier 
6349ed24f4bSMarc Zyngier 	memset(&attr, 0, sizeof(struct perf_event_attr));
6359ed24f4bSMarc Zyngier 	attr.type = PERF_TYPE_RAW;
6369ed24f4bSMarc Zyngier 	attr.size = sizeof(attr);
6379ed24f4bSMarc Zyngier 	attr.pinned = 1;
6389ed24f4bSMarc Zyngier 	attr.disabled = !kvm_pmu_counter_is_enabled(vcpu, pmc->idx);
6399ed24f4bSMarc Zyngier 	attr.exclude_user = data & ARMV8_PMU_EXCLUDE_EL0 ? 1 : 0;
6409ed24f4bSMarc Zyngier 	attr.exclude_kernel = data & ARMV8_PMU_EXCLUDE_EL1 ? 1 : 0;
6419ed24f4bSMarc Zyngier 	attr.exclude_hv = 1; /* Don't count EL2 events */
6429ed24f4bSMarc Zyngier 	attr.exclude_host = 1; /* Don't count host events */
643d7eec236SMarc Zyngier 	attr.config = eventsel;
6449ed24f4bSMarc Zyngier 
6459ed24f4bSMarc Zyngier 	counter = kvm_pmu_get_pair_counter_value(vcpu, pmc);
6469ed24f4bSMarc Zyngier 
6479ed24f4bSMarc Zyngier 	if (kvm_pmu_pmc_is_chained(pmc)) {
6489ed24f4bSMarc Zyngier 		/**
6499ed24f4bSMarc Zyngier 		 * The initial sample period (overflow count) of an event. For
6509ed24f4bSMarc Zyngier 		 * chained counters we only support overflow interrupts on the
6519ed24f4bSMarc Zyngier 		 * high counter.
6529ed24f4bSMarc Zyngier 		 */
6539ed24f4bSMarc Zyngier 		attr.sample_period = (-counter) & GENMASK(63, 0);
6549ed24f4bSMarc Zyngier 		attr.config1 |= PERF_ATTR_CFG1_KVM_PMU_CHAINED;
6559ed24f4bSMarc Zyngier 
6569ed24f4bSMarc Zyngier 		event = perf_event_create_kernel_counter(&attr, -1, current,
6579ed24f4bSMarc Zyngier 							 kvm_pmu_perf_overflow,
6589ed24f4bSMarc Zyngier 							 pmc + 1);
6599ed24f4bSMarc Zyngier 	} else {
6609ed24f4bSMarc Zyngier 		/* The initial sample period (overflow count) of an event. */
6619ed24f4bSMarc Zyngier 		if (kvm_pmu_idx_is_64bit(vcpu, pmc->idx))
6629ed24f4bSMarc Zyngier 			attr.sample_period = (-counter) & GENMASK(63, 0);
6639ed24f4bSMarc Zyngier 		else
6649ed24f4bSMarc Zyngier 			attr.sample_period = (-counter) & GENMASK(31, 0);
6659ed24f4bSMarc Zyngier 
6669ed24f4bSMarc Zyngier 		event = perf_event_create_kernel_counter(&attr, -1, current,
6679ed24f4bSMarc Zyngier 						 kvm_pmu_perf_overflow, pmc);
6689ed24f4bSMarc Zyngier 	}
6699ed24f4bSMarc Zyngier 
6709ed24f4bSMarc Zyngier 	if (IS_ERR(event)) {
6719ed24f4bSMarc Zyngier 		pr_err_once("kvm: pmu event creation failed %ld\n",
6729ed24f4bSMarc Zyngier 			    PTR_ERR(event));
6739ed24f4bSMarc Zyngier 		return;
6749ed24f4bSMarc Zyngier 	}
6759ed24f4bSMarc Zyngier 
6769ed24f4bSMarc Zyngier 	pmc->perf_event = event;
6779ed24f4bSMarc Zyngier }
6789ed24f4bSMarc Zyngier 
6799ed24f4bSMarc Zyngier /**
6809ed24f4bSMarc Zyngier  * kvm_pmu_update_pmc_chained - update chained bitmap
6819ed24f4bSMarc Zyngier  * @vcpu: The vcpu pointer
6829ed24f4bSMarc Zyngier  * @select_idx: The number of selected counter
6839ed24f4bSMarc Zyngier  *
6849ed24f4bSMarc Zyngier  * Update the chained bitmap based on the event type written in the
6859ed24f4bSMarc Zyngier  * typer register and the enable state of the odd register.
6869ed24f4bSMarc Zyngier  */
6879ed24f4bSMarc Zyngier static void kvm_pmu_update_pmc_chained(struct kvm_vcpu *vcpu, u64 select_idx)
6889ed24f4bSMarc Zyngier {
6899ed24f4bSMarc Zyngier 	struct kvm_pmu *pmu = &vcpu->arch.pmu;
6909ed24f4bSMarc Zyngier 	struct kvm_pmc *pmc = &pmu->pmc[select_idx], *canonical_pmc;
6919ed24f4bSMarc Zyngier 	bool new_state, old_state;
6929ed24f4bSMarc Zyngier 
6939ed24f4bSMarc Zyngier 	old_state = kvm_pmu_pmc_is_chained(pmc);
6949ed24f4bSMarc Zyngier 	new_state = kvm_pmu_idx_has_chain_evtype(vcpu, pmc->idx) &&
6959ed24f4bSMarc Zyngier 		    kvm_pmu_counter_is_enabled(vcpu, pmc->idx | 0x1);
6969ed24f4bSMarc Zyngier 
6979ed24f4bSMarc Zyngier 	if (old_state == new_state)
6989ed24f4bSMarc Zyngier 		return;
6999ed24f4bSMarc Zyngier 
7009ed24f4bSMarc Zyngier 	canonical_pmc = kvm_pmu_get_canonical_pmc(pmc);
7019ed24f4bSMarc Zyngier 	kvm_pmu_stop_counter(vcpu, canonical_pmc);
7029ed24f4bSMarc Zyngier 	if (new_state) {
7039ed24f4bSMarc Zyngier 		/*
7049ed24f4bSMarc Zyngier 		 * During promotion from !chained to chained we must ensure
7059ed24f4bSMarc Zyngier 		 * the adjacent counter is stopped and its event destroyed
7069ed24f4bSMarc Zyngier 		 */
7079ed24f4bSMarc Zyngier 		kvm_pmu_stop_counter(vcpu, kvm_pmu_get_alternate_pmc(pmc));
7089ed24f4bSMarc Zyngier 		set_bit(pmc->idx >> 1, vcpu->arch.pmu.chained);
7099ed24f4bSMarc Zyngier 		return;
7109ed24f4bSMarc Zyngier 	}
7119ed24f4bSMarc Zyngier 	clear_bit(pmc->idx >> 1, vcpu->arch.pmu.chained);
7129ed24f4bSMarc Zyngier }
7139ed24f4bSMarc Zyngier 
7149ed24f4bSMarc Zyngier /**
7159ed24f4bSMarc Zyngier  * kvm_pmu_set_counter_event_type - set selected counter to monitor some event
7169ed24f4bSMarc Zyngier  * @vcpu: The vcpu pointer
7179ed24f4bSMarc Zyngier  * @data: The data guest writes to PMXEVTYPER_EL0
7189ed24f4bSMarc Zyngier  * @select_idx: The number of selected counter
7199ed24f4bSMarc Zyngier  *
7209ed24f4bSMarc Zyngier  * When OS accesses PMXEVTYPER_EL0, that means it wants to set a PMC to count an
7219ed24f4bSMarc Zyngier  * event with given hardware event number. Here we call perf_event API to
7229ed24f4bSMarc Zyngier  * emulate this action and create a kernel perf event for it.
7239ed24f4bSMarc Zyngier  */
7249ed24f4bSMarc Zyngier void kvm_pmu_set_counter_event_type(struct kvm_vcpu *vcpu, u64 data,
7259ed24f4bSMarc Zyngier 				    u64 select_idx)
7269ed24f4bSMarc Zyngier {
727fd65a3b5SMarc Zyngier 	u64 reg, mask;
728fd65a3b5SMarc Zyngier 
729fd65a3b5SMarc Zyngier 	mask  =  ARMV8_PMU_EVTYPE_MASK;
730fd65a3b5SMarc Zyngier 	mask &= ~ARMV8_PMU_EVTYPE_EVENT;
731fd65a3b5SMarc Zyngier 	mask |= kvm_pmu_event_mask(vcpu->kvm);
7329ed24f4bSMarc Zyngier 
7339ed24f4bSMarc Zyngier 	reg = (select_idx == ARMV8_PMU_CYCLE_IDX)
7349ed24f4bSMarc Zyngier 	      ? PMCCFILTR_EL0 : PMEVTYPER0_EL0 + select_idx;
7359ed24f4bSMarc Zyngier 
736fd65a3b5SMarc Zyngier 	__vcpu_sys_reg(vcpu, reg) = data & mask;
7379ed24f4bSMarc Zyngier 
7389ed24f4bSMarc Zyngier 	kvm_pmu_update_pmc_chained(vcpu, select_idx);
7399ed24f4bSMarc Zyngier 	kvm_pmu_create_perf_event(vcpu, select_idx);
7409ed24f4bSMarc Zyngier }
7419ed24f4bSMarc Zyngier 
742*5421db1bSMarc Zyngier int kvm_pmu_probe_pmuver(void)
743fd65a3b5SMarc Zyngier {
744fd65a3b5SMarc Zyngier 	struct perf_event_attr attr = { };
745fd65a3b5SMarc Zyngier 	struct perf_event *event;
746fd65a3b5SMarc Zyngier 	struct arm_pmu *pmu;
747fd65a3b5SMarc Zyngier 	int pmuver = 0xf;
748fd65a3b5SMarc Zyngier 
749fd65a3b5SMarc Zyngier 	/*
750fd65a3b5SMarc Zyngier 	 * Create a dummy event that only counts user cycles. As we'll never
751fd65a3b5SMarc Zyngier 	 * leave this function with the event being live, it will never
752fd65a3b5SMarc Zyngier 	 * count anything. But it allows us to probe some of the PMU
753fd65a3b5SMarc Zyngier 	 * details. Yes, this is terrible.
754fd65a3b5SMarc Zyngier 	 */
755fd65a3b5SMarc Zyngier 	attr.type = PERF_TYPE_RAW;
756fd65a3b5SMarc Zyngier 	attr.size = sizeof(attr);
757fd65a3b5SMarc Zyngier 	attr.pinned = 1;
758fd65a3b5SMarc Zyngier 	attr.disabled = 0;
759fd65a3b5SMarc Zyngier 	attr.exclude_user = 0;
760fd65a3b5SMarc Zyngier 	attr.exclude_kernel = 1;
761fd65a3b5SMarc Zyngier 	attr.exclude_hv = 1;
762fd65a3b5SMarc Zyngier 	attr.exclude_host = 1;
763fd65a3b5SMarc Zyngier 	attr.config = ARMV8_PMUV3_PERFCTR_CPU_CYCLES;
764fd65a3b5SMarc Zyngier 	attr.sample_period = GENMASK(63, 0);
765fd65a3b5SMarc Zyngier 
766fd65a3b5SMarc Zyngier 	event = perf_event_create_kernel_counter(&attr, -1, current,
767fd65a3b5SMarc Zyngier 						 kvm_pmu_perf_overflow, &attr);
768fd65a3b5SMarc Zyngier 
769fd65a3b5SMarc Zyngier 	if (IS_ERR(event)) {
770fd65a3b5SMarc Zyngier 		pr_err_once("kvm: pmu event creation failed %ld\n",
771fd65a3b5SMarc Zyngier 			    PTR_ERR(event));
772fd65a3b5SMarc Zyngier 		return 0xf;
773fd65a3b5SMarc Zyngier 	}
774fd65a3b5SMarc Zyngier 
775fd65a3b5SMarc Zyngier 	if (event->pmu) {
776fd65a3b5SMarc Zyngier 		pmu = to_arm_pmu(event->pmu);
777fd65a3b5SMarc Zyngier 		if (pmu->pmuver)
778fd65a3b5SMarc Zyngier 			pmuver = pmu->pmuver;
779fd65a3b5SMarc Zyngier 	}
780fd65a3b5SMarc Zyngier 
781fd65a3b5SMarc Zyngier 	perf_event_disable(event);
782fd65a3b5SMarc Zyngier 	perf_event_release_kernel(event);
783fd65a3b5SMarc Zyngier 
784fd65a3b5SMarc Zyngier 	return pmuver;
785fd65a3b5SMarc Zyngier }
786fd65a3b5SMarc Zyngier 
78788865becSMarc Zyngier u64 kvm_pmu_get_pmceid(struct kvm_vcpu *vcpu, bool pmceid1)
78888865becSMarc Zyngier {
78988865becSMarc Zyngier 	unsigned long *bmap = vcpu->kvm->arch.pmu_filter;
79088865becSMarc Zyngier 	u64 val, mask = 0;
7919529aaa0SMarc Zyngier 	int base, i, nr_events;
79288865becSMarc Zyngier 
79388865becSMarc Zyngier 	if (!pmceid1) {
79488865becSMarc Zyngier 		val = read_sysreg(pmceid0_el0);
79588865becSMarc Zyngier 		base = 0;
79688865becSMarc Zyngier 	} else {
79788865becSMarc Zyngier 		val = read_sysreg(pmceid1_el0);
79846081078SMarc Zyngier 		/*
79946081078SMarc Zyngier 		 * Don't advertise STALL_SLOT, as PMMIR_EL0 is handled
80046081078SMarc Zyngier 		 * as RAZ
80146081078SMarc Zyngier 		 */
80246081078SMarc Zyngier 		if (vcpu->kvm->arch.pmuver >= ID_AA64DFR0_PMUVER_8_4)
80346081078SMarc Zyngier 			val &= ~BIT_ULL(ARMV8_PMUV3_PERFCTR_STALL_SLOT - 32);
80488865becSMarc Zyngier 		base = 32;
80588865becSMarc Zyngier 	}
80688865becSMarc Zyngier 
80788865becSMarc Zyngier 	if (!bmap)
80888865becSMarc Zyngier 		return val;
80988865becSMarc Zyngier 
8109529aaa0SMarc Zyngier 	nr_events = kvm_pmu_event_mask(vcpu->kvm) + 1;
8119529aaa0SMarc Zyngier 
81288865becSMarc Zyngier 	for (i = 0; i < 32; i += 8) {
81388865becSMarc Zyngier 		u64 byte;
81488865becSMarc Zyngier 
81588865becSMarc Zyngier 		byte = bitmap_get_value8(bmap, base + i);
81688865becSMarc Zyngier 		mask |= byte << i;
8179529aaa0SMarc Zyngier 		if (nr_events >= (0x4000 + base + 32)) {
81888865becSMarc Zyngier 			byte = bitmap_get_value8(bmap, 0x4000 + base + i);
81988865becSMarc Zyngier 			mask |= byte << (32 + i);
82088865becSMarc Zyngier 		}
8219529aaa0SMarc Zyngier 	}
82288865becSMarc Zyngier 
82388865becSMarc Zyngier 	return val & mask;
82488865becSMarc Zyngier }
82588865becSMarc Zyngier 
8269ed24f4bSMarc Zyngier int kvm_arm_pmu_v3_enable(struct kvm_vcpu *vcpu)
8279ed24f4bSMarc Zyngier {
8289bbfa4b5SAlexandru Elisei 	if (!kvm_vcpu_has_pmu(vcpu))
8299ed24f4bSMarc Zyngier 		return 0;
8309ed24f4bSMarc Zyngier 
8319bbfa4b5SAlexandru Elisei 	if (!vcpu->arch.pmu.created)
8329bbfa4b5SAlexandru Elisei 		return -EINVAL;
8339bbfa4b5SAlexandru Elisei 
8349ed24f4bSMarc Zyngier 	/*
8359ed24f4bSMarc Zyngier 	 * A valid interrupt configuration for the PMU is either to have a
8369ed24f4bSMarc Zyngier 	 * properly configured interrupt number and using an in-kernel
8379ed24f4bSMarc Zyngier 	 * irqchip, or to not have an in-kernel GIC and not set an IRQ.
8389ed24f4bSMarc Zyngier 	 */
8399ed24f4bSMarc Zyngier 	if (irqchip_in_kernel(vcpu->kvm)) {
8409ed24f4bSMarc Zyngier 		int irq = vcpu->arch.pmu.irq_num;
8419ed24f4bSMarc Zyngier 		/*
8429ed24f4bSMarc Zyngier 		 * If we are using an in-kernel vgic, at this point we know
8439ed24f4bSMarc Zyngier 		 * the vgic will be initialized, so we can check the PMU irq
8449ed24f4bSMarc Zyngier 		 * number against the dimensions of the vgic and make sure
8459ed24f4bSMarc Zyngier 		 * it's valid.
8469ed24f4bSMarc Zyngier 		 */
8479ed24f4bSMarc Zyngier 		if (!irq_is_ppi(irq) && !vgic_valid_spi(vcpu->kvm, irq))
8489ed24f4bSMarc Zyngier 			return -EINVAL;
8499ed24f4bSMarc Zyngier 	} else if (kvm_arm_pmu_irq_initialized(vcpu)) {
8509ed24f4bSMarc Zyngier 		   return -EINVAL;
8519ed24f4bSMarc Zyngier 	}
8529ed24f4bSMarc Zyngier 
8539ed24f4bSMarc Zyngier 	return 0;
8549ed24f4bSMarc Zyngier }
8559ed24f4bSMarc Zyngier 
8569ed24f4bSMarc Zyngier static int kvm_arm_pmu_v3_init(struct kvm_vcpu *vcpu)
8579ed24f4bSMarc Zyngier {
8589ed24f4bSMarc Zyngier 	if (irqchip_in_kernel(vcpu->kvm)) {
8599ed24f4bSMarc Zyngier 		int ret;
8609ed24f4bSMarc Zyngier 
8619ed24f4bSMarc Zyngier 		/*
8629ed24f4bSMarc Zyngier 		 * If using the PMU with an in-kernel virtual GIC
8639ed24f4bSMarc Zyngier 		 * implementation, we require the GIC to be already
8649ed24f4bSMarc Zyngier 		 * initialized when initializing the PMU.
8659ed24f4bSMarc Zyngier 		 */
8669ed24f4bSMarc Zyngier 		if (!vgic_initialized(vcpu->kvm))
8679ed24f4bSMarc Zyngier 			return -ENODEV;
8689ed24f4bSMarc Zyngier 
8699ed24f4bSMarc Zyngier 		if (!kvm_arm_pmu_irq_initialized(vcpu))
8709ed24f4bSMarc Zyngier 			return -ENXIO;
8719ed24f4bSMarc Zyngier 
8729ed24f4bSMarc Zyngier 		ret = kvm_vgic_set_owner(vcpu, vcpu->arch.pmu.irq_num,
8739ed24f4bSMarc Zyngier 					 &vcpu->arch.pmu);
8749ed24f4bSMarc Zyngier 		if (ret)
8759ed24f4bSMarc Zyngier 			return ret;
8769ed24f4bSMarc Zyngier 	}
8779ed24f4bSMarc Zyngier 
87895e92e45SJulien Thierry 	init_irq_work(&vcpu->arch.pmu.overflow_work,
87995e92e45SJulien Thierry 		      kvm_pmu_perf_overflow_notify_vcpu);
88095e92e45SJulien Thierry 
8819ed24f4bSMarc Zyngier 	vcpu->arch.pmu.created = true;
8829ed24f4bSMarc Zyngier 	return 0;
8839ed24f4bSMarc Zyngier }
8849ed24f4bSMarc Zyngier 
8859ed24f4bSMarc Zyngier /*
8869ed24f4bSMarc Zyngier  * For one VM the interrupt type must be same for each vcpu.
8879ed24f4bSMarc Zyngier  * As a PPI, the interrupt number is the same for all vcpus,
8889ed24f4bSMarc Zyngier  * while as an SPI it must be a separate number per vcpu.
8899ed24f4bSMarc Zyngier  */
8909ed24f4bSMarc Zyngier static bool pmu_irq_is_valid(struct kvm *kvm, int irq)
8919ed24f4bSMarc Zyngier {
8929ed24f4bSMarc Zyngier 	int i;
8939ed24f4bSMarc Zyngier 	struct kvm_vcpu *vcpu;
8949ed24f4bSMarc Zyngier 
8959ed24f4bSMarc Zyngier 	kvm_for_each_vcpu(i, vcpu, kvm) {
8969ed24f4bSMarc Zyngier 		if (!kvm_arm_pmu_irq_initialized(vcpu))
8979ed24f4bSMarc Zyngier 			continue;
8989ed24f4bSMarc Zyngier 
8999ed24f4bSMarc Zyngier 		if (irq_is_ppi(irq)) {
9009ed24f4bSMarc Zyngier 			if (vcpu->arch.pmu.irq_num != irq)
9019ed24f4bSMarc Zyngier 				return false;
9029ed24f4bSMarc Zyngier 		} else {
9039ed24f4bSMarc Zyngier 			if (vcpu->arch.pmu.irq_num == irq)
9049ed24f4bSMarc Zyngier 				return false;
9059ed24f4bSMarc Zyngier 		}
9069ed24f4bSMarc Zyngier 	}
9079ed24f4bSMarc Zyngier 
9089ed24f4bSMarc Zyngier 	return true;
9099ed24f4bSMarc Zyngier }
9109ed24f4bSMarc Zyngier 
9119ed24f4bSMarc Zyngier int kvm_arm_pmu_v3_set_attr(struct kvm_vcpu *vcpu, struct kvm_device_attr *attr)
9129ed24f4bSMarc Zyngier {
91377da4303SMarc Zyngier 	if (!kvm_vcpu_has_pmu(vcpu))
91442223fb1SMarc Zyngier 		return -ENODEV;
91542223fb1SMarc Zyngier 
91642223fb1SMarc Zyngier 	if (vcpu->arch.pmu.created)
91742223fb1SMarc Zyngier 		return -EBUSY;
91842223fb1SMarc Zyngier 
919fd65a3b5SMarc Zyngier 	if (!vcpu->kvm->arch.pmuver)
920fd65a3b5SMarc Zyngier 		vcpu->kvm->arch.pmuver = kvm_pmu_probe_pmuver();
921fd65a3b5SMarc Zyngier 
922fd65a3b5SMarc Zyngier 	if (vcpu->kvm->arch.pmuver == 0xf)
923fd65a3b5SMarc Zyngier 		return -ENODEV;
924fd65a3b5SMarc Zyngier 
9259ed24f4bSMarc Zyngier 	switch (attr->attr) {
9269ed24f4bSMarc Zyngier 	case KVM_ARM_VCPU_PMU_V3_IRQ: {
9279ed24f4bSMarc Zyngier 		int __user *uaddr = (int __user *)(long)attr->addr;
9289ed24f4bSMarc Zyngier 		int irq;
9299ed24f4bSMarc Zyngier 
9309ed24f4bSMarc Zyngier 		if (!irqchip_in_kernel(vcpu->kvm))
9319ed24f4bSMarc Zyngier 			return -EINVAL;
9329ed24f4bSMarc Zyngier 
9339ed24f4bSMarc Zyngier 		if (get_user(irq, uaddr))
9349ed24f4bSMarc Zyngier 			return -EFAULT;
9359ed24f4bSMarc Zyngier 
9369ed24f4bSMarc Zyngier 		/* The PMU overflow interrupt can be a PPI or a valid SPI. */
9379ed24f4bSMarc Zyngier 		if (!(irq_is_ppi(irq) || irq_is_spi(irq)))
9389ed24f4bSMarc Zyngier 			return -EINVAL;
9399ed24f4bSMarc Zyngier 
9409ed24f4bSMarc Zyngier 		if (!pmu_irq_is_valid(vcpu->kvm, irq))
9419ed24f4bSMarc Zyngier 			return -EINVAL;
9429ed24f4bSMarc Zyngier 
9439ed24f4bSMarc Zyngier 		if (kvm_arm_pmu_irq_initialized(vcpu))
9449ed24f4bSMarc Zyngier 			return -EBUSY;
9459ed24f4bSMarc Zyngier 
9469ed24f4bSMarc Zyngier 		kvm_debug("Set kvm ARM PMU irq: %d\n", irq);
9479ed24f4bSMarc Zyngier 		vcpu->arch.pmu.irq_num = irq;
9489ed24f4bSMarc Zyngier 		return 0;
9499ed24f4bSMarc Zyngier 	}
950d7eec236SMarc Zyngier 	case KVM_ARM_VCPU_PMU_V3_FILTER: {
951d7eec236SMarc Zyngier 		struct kvm_pmu_event_filter __user *uaddr;
952d7eec236SMarc Zyngier 		struct kvm_pmu_event_filter filter;
953d7eec236SMarc Zyngier 		int nr_events;
954d7eec236SMarc Zyngier 
955d7eec236SMarc Zyngier 		nr_events = kvm_pmu_event_mask(vcpu->kvm) + 1;
956d7eec236SMarc Zyngier 
957d7eec236SMarc Zyngier 		uaddr = (struct kvm_pmu_event_filter __user *)(long)attr->addr;
958d7eec236SMarc Zyngier 
959d7eec236SMarc Zyngier 		if (copy_from_user(&filter, uaddr, sizeof(filter)))
960d7eec236SMarc Zyngier 			return -EFAULT;
961d7eec236SMarc Zyngier 
962d7eec236SMarc Zyngier 		if (((u32)filter.base_event + filter.nevents) > nr_events ||
963d7eec236SMarc Zyngier 		    (filter.action != KVM_PMU_EVENT_ALLOW &&
964d7eec236SMarc Zyngier 		     filter.action != KVM_PMU_EVENT_DENY))
965d7eec236SMarc Zyngier 			return -EINVAL;
966d7eec236SMarc Zyngier 
967d7eec236SMarc Zyngier 		mutex_lock(&vcpu->kvm->lock);
968d7eec236SMarc Zyngier 
969d7eec236SMarc Zyngier 		if (!vcpu->kvm->arch.pmu_filter) {
970d7eec236SMarc Zyngier 			vcpu->kvm->arch.pmu_filter = bitmap_alloc(nr_events, GFP_KERNEL);
971d7eec236SMarc Zyngier 			if (!vcpu->kvm->arch.pmu_filter) {
972d7eec236SMarc Zyngier 				mutex_unlock(&vcpu->kvm->lock);
973d7eec236SMarc Zyngier 				return -ENOMEM;
974d7eec236SMarc Zyngier 			}
975d7eec236SMarc Zyngier 
976d7eec236SMarc Zyngier 			/*
977d7eec236SMarc Zyngier 			 * The default depends on the first applied filter.
978d7eec236SMarc Zyngier 			 * If it allows events, the default is to deny.
979d7eec236SMarc Zyngier 			 * Conversely, if the first filter denies a set of
980d7eec236SMarc Zyngier 			 * events, the default is to allow.
981d7eec236SMarc Zyngier 			 */
982d7eec236SMarc Zyngier 			if (filter.action == KVM_PMU_EVENT_ALLOW)
983d7eec236SMarc Zyngier 				bitmap_zero(vcpu->kvm->arch.pmu_filter, nr_events);
984d7eec236SMarc Zyngier 			else
985d7eec236SMarc Zyngier 				bitmap_fill(vcpu->kvm->arch.pmu_filter, nr_events);
986d7eec236SMarc Zyngier 		}
987d7eec236SMarc Zyngier 
988d7eec236SMarc Zyngier 		if (filter.action == KVM_PMU_EVENT_ALLOW)
989d7eec236SMarc Zyngier 			bitmap_set(vcpu->kvm->arch.pmu_filter, filter.base_event, filter.nevents);
990d7eec236SMarc Zyngier 		else
991d7eec236SMarc Zyngier 			bitmap_clear(vcpu->kvm->arch.pmu_filter, filter.base_event, filter.nevents);
992d7eec236SMarc Zyngier 
993d7eec236SMarc Zyngier 		mutex_unlock(&vcpu->kvm->lock);
994d7eec236SMarc Zyngier 
995d7eec236SMarc Zyngier 		return 0;
996d7eec236SMarc Zyngier 	}
9979ed24f4bSMarc Zyngier 	case KVM_ARM_VCPU_PMU_V3_INIT:
9989ed24f4bSMarc Zyngier 		return kvm_arm_pmu_v3_init(vcpu);
9999ed24f4bSMarc Zyngier 	}
10009ed24f4bSMarc Zyngier 
10019ed24f4bSMarc Zyngier 	return -ENXIO;
10029ed24f4bSMarc Zyngier }
10039ed24f4bSMarc Zyngier 
10049ed24f4bSMarc Zyngier int kvm_arm_pmu_v3_get_attr(struct kvm_vcpu *vcpu, struct kvm_device_attr *attr)
10059ed24f4bSMarc Zyngier {
10069ed24f4bSMarc Zyngier 	switch (attr->attr) {
10079ed24f4bSMarc Zyngier 	case KVM_ARM_VCPU_PMU_V3_IRQ: {
10089ed24f4bSMarc Zyngier 		int __user *uaddr = (int __user *)(long)attr->addr;
10099ed24f4bSMarc Zyngier 		int irq;
10109ed24f4bSMarc Zyngier 
10119ed24f4bSMarc Zyngier 		if (!irqchip_in_kernel(vcpu->kvm))
10129ed24f4bSMarc Zyngier 			return -EINVAL;
10139ed24f4bSMarc Zyngier 
101414bda7a9SMarc Zyngier 		if (!kvm_vcpu_has_pmu(vcpu))
10159ed24f4bSMarc Zyngier 			return -ENODEV;
10169ed24f4bSMarc Zyngier 
10179ed24f4bSMarc Zyngier 		if (!kvm_arm_pmu_irq_initialized(vcpu))
10189ed24f4bSMarc Zyngier 			return -ENXIO;
10199ed24f4bSMarc Zyngier 
10209ed24f4bSMarc Zyngier 		irq = vcpu->arch.pmu.irq_num;
10219ed24f4bSMarc Zyngier 		return put_user(irq, uaddr);
10229ed24f4bSMarc Zyngier 	}
10239ed24f4bSMarc Zyngier 	}
10249ed24f4bSMarc Zyngier 
10259ed24f4bSMarc Zyngier 	return -ENXIO;
10269ed24f4bSMarc Zyngier }
10279ed24f4bSMarc Zyngier 
10289ed24f4bSMarc Zyngier int kvm_arm_pmu_v3_has_attr(struct kvm_vcpu *vcpu, struct kvm_device_attr *attr)
10299ed24f4bSMarc Zyngier {
10309ed24f4bSMarc Zyngier 	switch (attr->attr) {
10319ed24f4bSMarc Zyngier 	case KVM_ARM_VCPU_PMU_V3_IRQ:
10329ed24f4bSMarc Zyngier 	case KVM_ARM_VCPU_PMU_V3_INIT:
1033d7eec236SMarc Zyngier 	case KVM_ARM_VCPU_PMU_V3_FILTER:
103477da4303SMarc Zyngier 		if (kvm_vcpu_has_pmu(vcpu))
10359ed24f4bSMarc Zyngier 			return 0;
10369ed24f4bSMarc Zyngier 	}
10379ed24f4bSMarc Zyngier 
10389ed24f4bSMarc Zyngier 	return -ENXIO;
10399ed24f4bSMarc Zyngier }
1040