xref: /openbmc/linux/arch/arm64/kvm/pmu-emul.c (revision 40e54cad)
19ed24f4bSMarc Zyngier // SPDX-License-Identifier: GPL-2.0-only
29ed24f4bSMarc Zyngier /*
39ed24f4bSMarc Zyngier  * Copyright (C) 2015 Linaro Ltd.
49ed24f4bSMarc Zyngier  * Author: Shannon Zhao <shannon.zhao@linaro.org>
59ed24f4bSMarc Zyngier  */
69ed24f4bSMarc Zyngier 
79ed24f4bSMarc Zyngier #include <linux/cpu.h>
89ed24f4bSMarc Zyngier #include <linux/kvm.h>
99ed24f4bSMarc Zyngier #include <linux/kvm_host.h>
10db858060SAlexandru Elisei #include <linux/list.h>
119ed24f4bSMarc Zyngier #include <linux/perf_event.h>
129ed24f4bSMarc Zyngier #include <linux/perf/arm_pmu.h>
139ed24f4bSMarc Zyngier #include <linux/uaccess.h>
149ed24f4bSMarc Zyngier #include <asm/kvm_emulate.h>
159ed24f4bSMarc Zyngier #include <kvm/arm_pmu.h>
169ed24f4bSMarc Zyngier #include <kvm/arm_vgic.h>
179ed24f4bSMarc Zyngier 
18bead0220SMarc Zyngier #define PERF_ATTR_CFG1_COUNTER_64BIT	BIT(0)
19bead0220SMarc Zyngier 
20be399d82SSean Christopherson DEFINE_STATIC_KEY_FALSE(kvm_arm_pmu_available);
21be399d82SSean Christopherson 
22db858060SAlexandru Elisei static LIST_HEAD(arm_pmus);
23db858060SAlexandru Elisei static DEFINE_MUTEX(arm_pmus_lock);
24db858060SAlexandru Elisei 
25d56bdce5SMarc Zyngier static void kvm_pmu_create_perf_event(struct kvm_pmc *pmc);
269917264dSMarc Zyngier static void kvm_pmu_release_perf_event(struct kvm_pmc *pmc);
279ed24f4bSMarc Zyngier 
28d56bdce5SMarc Zyngier static struct kvm_vcpu *kvm_pmc_to_vcpu(const struct kvm_pmc *pmc)
29d56bdce5SMarc Zyngier {
30d56bdce5SMarc Zyngier 	return container_of(pmc, struct kvm_vcpu, arch.pmu.pmc[pmc->idx]);
31d56bdce5SMarc Zyngier }
32d56bdce5SMarc Zyngier 
33d56bdce5SMarc Zyngier static struct kvm_pmc *kvm_vcpu_idx_to_pmc(struct kvm_vcpu *vcpu, int cnt_idx)
34d56bdce5SMarc Zyngier {
35d56bdce5SMarc Zyngier 	return &vcpu->arch.pmu.pmc[cnt_idx];
36d56bdce5SMarc Zyngier }
37d56bdce5SMarc Zyngier 
38fd65a3b5SMarc Zyngier static u32 kvm_pmu_event_mask(struct kvm *kvm)
39fd65a3b5SMarc Zyngier {
4046b18782SMarc Zyngier 	unsigned int pmuver;
4146b18782SMarc Zyngier 
4246b18782SMarc Zyngier 	pmuver = kvm->arch.arm_pmu->pmuver;
4346b18782SMarc Zyngier 
4446b18782SMarc Zyngier 	switch (pmuver) {
45121a8fc0SMark Brown 	case ID_AA64DFR0_EL1_PMUVer_IMP:
46fd65a3b5SMarc Zyngier 		return GENMASK(9, 0);
47121a8fc0SMark Brown 	case ID_AA64DFR0_EL1_PMUVer_V3P1:
48121a8fc0SMark Brown 	case ID_AA64DFR0_EL1_PMUVer_V3P4:
49121a8fc0SMark Brown 	case ID_AA64DFR0_EL1_PMUVer_V3P5:
50121a8fc0SMark Brown 	case ID_AA64DFR0_EL1_PMUVer_V3P7:
51fd65a3b5SMarc Zyngier 		return GENMASK(15, 0);
52fd65a3b5SMarc Zyngier 	default:		/* Shouldn't be here, just for sanity */
5346b18782SMarc Zyngier 		WARN_ONCE(1, "Unknown PMU version %d\n", pmuver);
54fd65a3b5SMarc Zyngier 		return 0;
55fd65a3b5SMarc Zyngier 	}
56fd65a3b5SMarc Zyngier }
57fd65a3b5SMarc Zyngier 
589ed24f4bSMarc Zyngier /**
59d56bdce5SMarc Zyngier  * kvm_pmc_is_64bit - determine if counter is 64bit
60d56bdce5SMarc Zyngier  * @pmc: counter context
619ed24f4bSMarc Zyngier  */
62d56bdce5SMarc Zyngier static bool kvm_pmc_is_64bit(struct kvm_pmc *pmc)
639ed24f4bSMarc Zyngier {
64d56bdce5SMarc Zyngier 	return (pmc->idx == ARMV8_PMU_CYCLE_IDX ||
65d56bdce5SMarc Zyngier 		kvm_pmu_is_3p5(kvm_pmc_to_vcpu(pmc)));
66c82d28cbSMarc Zyngier }
67c82d28cbSMarc Zyngier 
68d56bdce5SMarc Zyngier static bool kvm_pmc_has_64bit_overflow(struct kvm_pmc *pmc)
69c82d28cbSMarc Zyngier {
70d56bdce5SMarc Zyngier 	u64 val = __vcpu_sys_reg(kvm_pmc_to_vcpu(pmc), PMCR_EL0);
7111af4c37SMarc Zyngier 
72d56bdce5SMarc Zyngier 	return (pmc->idx < ARMV8_PMU_CYCLE_IDX && (val & ARMV8_PMU_PMCR_LP)) ||
73d56bdce5SMarc Zyngier 	       (pmc->idx == ARMV8_PMU_CYCLE_IDX && (val & ARMV8_PMU_PMCR_LC));
749ed24f4bSMarc Zyngier }
759ed24f4bSMarc Zyngier 
76d56bdce5SMarc Zyngier static bool kvm_pmu_counter_can_chain(struct kvm_pmc *pmc)
77bead0220SMarc Zyngier {
78d56bdce5SMarc Zyngier 	return (!(pmc->idx & 1) && (pmc->idx + 1) < ARMV8_PMU_CYCLE_IDX &&
79d56bdce5SMarc Zyngier 		!kvm_pmc_has_64bit_overflow(pmc));
809ed24f4bSMarc Zyngier }
819ed24f4bSMarc Zyngier 
820cb9c3c8SMarc Zyngier static u32 counter_index_to_reg(u64 idx)
830cb9c3c8SMarc Zyngier {
840cb9c3c8SMarc Zyngier 	return (idx == ARMV8_PMU_CYCLE_IDX) ? PMCCNTR_EL0 : PMEVCNTR0_EL0 + idx;
850cb9c3c8SMarc Zyngier }
860cb9c3c8SMarc Zyngier 
870cb9c3c8SMarc Zyngier static u32 counter_index_to_evtreg(u64 idx)
880cb9c3c8SMarc Zyngier {
890cb9c3c8SMarc Zyngier 	return (idx == ARMV8_PMU_CYCLE_IDX) ? PMCCFILTR_EL0 : PMEVTYPER0_EL0 + idx;
900cb9c3c8SMarc Zyngier }
910cb9c3c8SMarc Zyngier 
92d56bdce5SMarc Zyngier static u64 kvm_pmu_get_pmc_value(struct kvm_pmc *pmc)
939ed24f4bSMarc Zyngier {
94d56bdce5SMarc Zyngier 	struct kvm_vcpu *vcpu = kvm_pmc_to_vcpu(pmc);
95bead0220SMarc Zyngier 	u64 counter, reg, enabled, running;
969ed24f4bSMarc Zyngier 
97d56bdce5SMarc Zyngier 	reg = counter_index_to_reg(pmc->idx);
989ed24f4bSMarc Zyngier 	counter = __vcpu_sys_reg(vcpu, reg);
999ed24f4bSMarc Zyngier 
1009ed24f4bSMarc Zyngier 	/*
1019ed24f4bSMarc Zyngier 	 * The real counter value is equal to the value of counter register plus
1029ed24f4bSMarc Zyngier 	 * the value perf event counts.
1039ed24f4bSMarc Zyngier 	 */
1049ed24f4bSMarc Zyngier 	if (pmc->perf_event)
1059ed24f4bSMarc Zyngier 		counter += perf_event_read_value(pmc->perf_event, &enabled,
1069ed24f4bSMarc Zyngier 						 &running);
1079ed24f4bSMarc Zyngier 
108d56bdce5SMarc Zyngier 	if (!kvm_pmc_is_64bit(pmc))
1099ed24f4bSMarc Zyngier 		counter = lower_32_bits(counter);
1109ed24f4bSMarc Zyngier 
1119ed24f4bSMarc Zyngier 	return counter;
1129ed24f4bSMarc Zyngier }
1139ed24f4bSMarc Zyngier 
114d56bdce5SMarc Zyngier /**
115d56bdce5SMarc Zyngier  * kvm_pmu_get_counter_value - get PMU counter value
116d56bdce5SMarc Zyngier  * @vcpu: The vcpu pointer
117d56bdce5SMarc Zyngier  * @select_idx: The counter index
118d56bdce5SMarc Zyngier  */
119d56bdce5SMarc Zyngier u64 kvm_pmu_get_counter_value(struct kvm_vcpu *vcpu, u64 select_idx)
1209ed24f4bSMarc Zyngier {
121d56bdce5SMarc Zyngier 	if (!kvm_vcpu_has_pmu(vcpu))
122d56bdce5SMarc Zyngier 		return 0;
123d56bdce5SMarc Zyngier 
124d56bdce5SMarc Zyngier 	return kvm_pmu_get_pmc_value(kvm_vcpu_idx_to_pmc(vcpu, select_idx));
125d56bdce5SMarc Zyngier }
126d56bdce5SMarc Zyngier 
127d56bdce5SMarc Zyngier static void kvm_pmu_set_pmc_value(struct kvm_pmc *pmc, u64 val, bool force)
128d56bdce5SMarc Zyngier {
129d56bdce5SMarc Zyngier 	struct kvm_vcpu *vcpu = kvm_pmc_to_vcpu(pmc);
1309ed24f4bSMarc Zyngier 	u64 reg;
1319ed24f4bSMarc Zyngier 
132d56bdce5SMarc Zyngier 	kvm_pmu_release_perf_event(pmc);
1338f6379e2SAlexandru Elisei 
134d56bdce5SMarc Zyngier 	reg = counter_index_to_reg(pmc->idx);
1359917264dSMarc Zyngier 
136d56bdce5SMarc Zyngier 	if (vcpu_mode_is_32bit(vcpu) && pmc->idx != ARMV8_PMU_CYCLE_IDX &&
13726d2d059SMarc Zyngier 	    !force) {
13826d2d059SMarc Zyngier 		/*
13926d2d059SMarc Zyngier 		 * Even with PMUv3p5, AArch32 cannot write to the top
14026d2d059SMarc Zyngier 		 * 32bit of the counters. The only possible course of
14126d2d059SMarc Zyngier 		 * action is to use PMCR.P, which will reset them to
14226d2d059SMarc Zyngier 		 * 0 (the only use of the 'force' parameter).
14326d2d059SMarc Zyngier 		 */
14426d2d059SMarc Zyngier 		val  = __vcpu_sys_reg(vcpu, reg) & GENMASK(63, 32);
14526d2d059SMarc Zyngier 		val |= lower_32_bits(val);
14626d2d059SMarc Zyngier 	}
14726d2d059SMarc Zyngier 
1489917264dSMarc Zyngier 	__vcpu_sys_reg(vcpu, reg) = val;
1499ed24f4bSMarc Zyngier 
1509ed24f4bSMarc Zyngier 	/* Recreate the perf event to reflect the updated sample_period */
151d56bdce5SMarc Zyngier 	kvm_pmu_create_perf_event(pmc);
1529ed24f4bSMarc Zyngier }
1539ed24f4bSMarc Zyngier 
1549ed24f4bSMarc Zyngier /**
15526d2d059SMarc Zyngier  * kvm_pmu_set_counter_value - set PMU counter value
15626d2d059SMarc Zyngier  * @vcpu: The vcpu pointer
15726d2d059SMarc Zyngier  * @select_idx: The counter index
15826d2d059SMarc Zyngier  * @val: The counter value
15926d2d059SMarc Zyngier  */
16026d2d059SMarc Zyngier void kvm_pmu_set_counter_value(struct kvm_vcpu *vcpu, u64 select_idx, u64 val)
16126d2d059SMarc Zyngier {
162d56bdce5SMarc Zyngier 	if (!kvm_vcpu_has_pmu(vcpu))
163d56bdce5SMarc Zyngier 		return;
164d56bdce5SMarc Zyngier 
165d56bdce5SMarc Zyngier 	kvm_pmu_set_pmc_value(kvm_vcpu_idx_to_pmc(vcpu, select_idx), val, false);
16626d2d059SMarc Zyngier }
16726d2d059SMarc Zyngier 
16826d2d059SMarc Zyngier /**
1699ed24f4bSMarc Zyngier  * kvm_pmu_release_perf_event - remove the perf event
1709ed24f4bSMarc Zyngier  * @pmc: The PMU counter pointer
1719ed24f4bSMarc Zyngier  */
1729ed24f4bSMarc Zyngier static void kvm_pmu_release_perf_event(struct kvm_pmc *pmc)
1739ed24f4bSMarc Zyngier {
1749ed24f4bSMarc Zyngier 	if (pmc->perf_event) {
1759ed24f4bSMarc Zyngier 		perf_event_disable(pmc->perf_event);
1769ed24f4bSMarc Zyngier 		perf_event_release_kernel(pmc->perf_event);
1779ed24f4bSMarc Zyngier 		pmc->perf_event = NULL;
1789ed24f4bSMarc Zyngier 	}
1799ed24f4bSMarc Zyngier }
1809ed24f4bSMarc Zyngier 
1819ed24f4bSMarc Zyngier /**
1829ed24f4bSMarc Zyngier  * kvm_pmu_stop_counter - stop PMU counter
1839ed24f4bSMarc Zyngier  * @pmc: The PMU counter pointer
1849ed24f4bSMarc Zyngier  *
1859ed24f4bSMarc Zyngier  * If this counter has been configured to monitor some event, release it here.
1869ed24f4bSMarc Zyngier  */
187d56bdce5SMarc Zyngier static void kvm_pmu_stop_counter(struct kvm_pmc *pmc)
1889ed24f4bSMarc Zyngier {
189d56bdce5SMarc Zyngier 	struct kvm_vcpu *vcpu = kvm_pmc_to_vcpu(pmc);
1900f1e172bSMarc Zyngier 	u64 reg, val;
1919ed24f4bSMarc Zyngier 
1929ed24f4bSMarc Zyngier 	if (!pmc->perf_event)
1939ed24f4bSMarc Zyngier 		return;
1949ed24f4bSMarc Zyngier 
195d56bdce5SMarc Zyngier 	val = kvm_pmu_get_pmc_value(pmc);
1969ed24f4bSMarc Zyngier 
1970cb9c3c8SMarc Zyngier 	reg = counter_index_to_reg(pmc->idx);
1989ed24f4bSMarc Zyngier 
1999ed24f4bSMarc Zyngier 	__vcpu_sys_reg(vcpu, reg) = val;
2009ed24f4bSMarc Zyngier 
2019ed24f4bSMarc Zyngier 	kvm_pmu_release_perf_event(pmc);
2029ed24f4bSMarc Zyngier }
2039ed24f4bSMarc Zyngier 
2049ed24f4bSMarc Zyngier /**
2059ed24f4bSMarc Zyngier  * kvm_pmu_vcpu_init - assign pmu counter idx for cpu
2069ed24f4bSMarc Zyngier  * @vcpu: The vcpu pointer
2079ed24f4bSMarc Zyngier  *
2089ed24f4bSMarc Zyngier  */
2099ed24f4bSMarc Zyngier void kvm_pmu_vcpu_init(struct kvm_vcpu *vcpu)
2109ed24f4bSMarc Zyngier {
2119ed24f4bSMarc Zyngier 	int i;
2129ed24f4bSMarc Zyngier 	struct kvm_pmu *pmu = &vcpu->arch.pmu;
2139ed24f4bSMarc Zyngier 
2149ed24f4bSMarc Zyngier 	for (i = 0; i < ARMV8_PMU_MAX_COUNTERS; i++)
2159ed24f4bSMarc Zyngier 		pmu->pmc[i].idx = i;
2169ed24f4bSMarc Zyngier }
2179ed24f4bSMarc Zyngier 
2189ed24f4bSMarc Zyngier /**
2199ed24f4bSMarc Zyngier  * kvm_pmu_vcpu_reset - reset pmu state for cpu
2209ed24f4bSMarc Zyngier  * @vcpu: The vcpu pointer
2219ed24f4bSMarc Zyngier  *
2229ed24f4bSMarc Zyngier  */
2239ed24f4bSMarc Zyngier void kvm_pmu_vcpu_reset(struct kvm_vcpu *vcpu)
2249ed24f4bSMarc Zyngier {
2259ed24f4bSMarc Zyngier 	unsigned long mask = kvm_pmu_valid_counter_mask(vcpu);
2269ed24f4bSMarc Zyngier 	int i;
2279ed24f4bSMarc Zyngier 
2289ed24f4bSMarc Zyngier 	for_each_set_bit(i, &mask, 32)
229d56bdce5SMarc Zyngier 		kvm_pmu_stop_counter(kvm_vcpu_idx_to_pmc(vcpu, i));
2309ed24f4bSMarc Zyngier }
2319ed24f4bSMarc Zyngier 
2329ed24f4bSMarc Zyngier /**
2339ed24f4bSMarc Zyngier  * kvm_pmu_vcpu_destroy - free perf event of PMU for cpu
2349ed24f4bSMarc Zyngier  * @vcpu: The vcpu pointer
2359ed24f4bSMarc Zyngier  *
2369ed24f4bSMarc Zyngier  */
2379ed24f4bSMarc Zyngier void kvm_pmu_vcpu_destroy(struct kvm_vcpu *vcpu)
2389ed24f4bSMarc Zyngier {
2399ed24f4bSMarc Zyngier 	int i;
2409ed24f4bSMarc Zyngier 
2419ed24f4bSMarc Zyngier 	for (i = 0; i < ARMV8_PMU_MAX_COUNTERS; i++)
242d56bdce5SMarc Zyngier 		kvm_pmu_release_perf_event(kvm_vcpu_idx_to_pmc(vcpu, i));
24395e92e45SJulien Thierry 	irq_work_sync(&vcpu->arch.pmu.overflow_work);
2449ed24f4bSMarc Zyngier }
2459ed24f4bSMarc Zyngier 
2469ed24f4bSMarc Zyngier u64 kvm_pmu_valid_counter_mask(struct kvm_vcpu *vcpu)
2479ed24f4bSMarc Zyngier {
2489ed24f4bSMarc Zyngier 	u64 val = __vcpu_sys_reg(vcpu, PMCR_EL0) >> ARMV8_PMU_PMCR_N_SHIFT;
2499ed24f4bSMarc Zyngier 
2509ed24f4bSMarc Zyngier 	val &= ARMV8_PMU_PMCR_N_MASK;
2519ed24f4bSMarc Zyngier 	if (val == 0)
2529ed24f4bSMarc Zyngier 		return BIT(ARMV8_PMU_CYCLE_IDX);
2539ed24f4bSMarc Zyngier 	else
2549ed24f4bSMarc Zyngier 		return GENMASK(val - 1, 0) | BIT(ARMV8_PMU_CYCLE_IDX);
2559ed24f4bSMarc Zyngier }
2569ed24f4bSMarc Zyngier 
2579ed24f4bSMarc Zyngier /**
2589ed24f4bSMarc Zyngier  * kvm_pmu_enable_counter_mask - enable selected PMU counters
2599ed24f4bSMarc Zyngier  * @vcpu: The vcpu pointer
2609ed24f4bSMarc Zyngier  * @val: the value guest writes to PMCNTENSET register
2619ed24f4bSMarc Zyngier  *
2629ed24f4bSMarc Zyngier  * Call perf_event_enable to start counting the perf event
2639ed24f4bSMarc Zyngier  */
2649ed24f4bSMarc Zyngier void kvm_pmu_enable_counter_mask(struct kvm_vcpu *vcpu, u64 val)
2659ed24f4bSMarc Zyngier {
2669ed24f4bSMarc Zyngier 	int i;
2678f6379e2SAlexandru Elisei 	if (!kvm_vcpu_has_pmu(vcpu))
2688f6379e2SAlexandru Elisei 		return;
2698f6379e2SAlexandru Elisei 
2709ed24f4bSMarc Zyngier 	if (!(__vcpu_sys_reg(vcpu, PMCR_EL0) & ARMV8_PMU_PMCR_E) || !val)
2719ed24f4bSMarc Zyngier 		return;
2729ed24f4bSMarc Zyngier 
2739ed24f4bSMarc Zyngier 	for (i = 0; i < ARMV8_PMU_MAX_COUNTERS; i++) {
274d56bdce5SMarc Zyngier 		struct kvm_pmc *pmc;
275d56bdce5SMarc Zyngier 
2769ed24f4bSMarc Zyngier 		if (!(val & BIT(i)))
2779ed24f4bSMarc Zyngier 			continue;
2789ed24f4bSMarc Zyngier 
279d56bdce5SMarc Zyngier 		pmc = kvm_vcpu_idx_to_pmc(vcpu, i);
2809ed24f4bSMarc Zyngier 
281bead0220SMarc Zyngier 		if (!pmc->perf_event) {
282d56bdce5SMarc Zyngier 			kvm_pmu_create_perf_event(pmc);
283bead0220SMarc Zyngier 		} else {
2849ed24f4bSMarc Zyngier 			perf_event_enable(pmc->perf_event);
2859ed24f4bSMarc Zyngier 			if (pmc->perf_event->state != PERF_EVENT_STATE_ACTIVE)
2869ed24f4bSMarc Zyngier 				kvm_debug("fail to enable perf event\n");
2879ed24f4bSMarc Zyngier 		}
2889ed24f4bSMarc Zyngier 	}
2899ed24f4bSMarc Zyngier }
2909ed24f4bSMarc Zyngier 
2919ed24f4bSMarc Zyngier /**
2929ed24f4bSMarc Zyngier  * kvm_pmu_disable_counter_mask - disable selected PMU counters
2939ed24f4bSMarc Zyngier  * @vcpu: The vcpu pointer
2949ed24f4bSMarc Zyngier  * @val: the value guest writes to PMCNTENCLR register
2959ed24f4bSMarc Zyngier  *
2969ed24f4bSMarc Zyngier  * Call perf_event_disable to stop counting the perf event
2979ed24f4bSMarc Zyngier  */
2989ed24f4bSMarc Zyngier void kvm_pmu_disable_counter_mask(struct kvm_vcpu *vcpu, u64 val)
2999ed24f4bSMarc Zyngier {
3009ed24f4bSMarc Zyngier 	int i;
3019ed24f4bSMarc Zyngier 
3028f6379e2SAlexandru Elisei 	if (!kvm_vcpu_has_pmu(vcpu) || !val)
3039ed24f4bSMarc Zyngier 		return;
3049ed24f4bSMarc Zyngier 
3059ed24f4bSMarc Zyngier 	for (i = 0; i < ARMV8_PMU_MAX_COUNTERS; i++) {
306d56bdce5SMarc Zyngier 		struct kvm_pmc *pmc;
307d56bdce5SMarc Zyngier 
3089ed24f4bSMarc Zyngier 		if (!(val & BIT(i)))
3099ed24f4bSMarc Zyngier 			continue;
3109ed24f4bSMarc Zyngier 
311d56bdce5SMarc Zyngier 		pmc = kvm_vcpu_idx_to_pmc(vcpu, i);
3129ed24f4bSMarc Zyngier 
3139ed24f4bSMarc Zyngier 		if (pmc->perf_event)
3149ed24f4bSMarc Zyngier 			perf_event_disable(pmc->perf_event);
3159ed24f4bSMarc Zyngier 	}
3169ed24f4bSMarc Zyngier }
3179ed24f4bSMarc Zyngier 
3189ed24f4bSMarc Zyngier static u64 kvm_pmu_overflow_status(struct kvm_vcpu *vcpu)
3199ed24f4bSMarc Zyngier {
3209ed24f4bSMarc Zyngier 	u64 reg = 0;
3219ed24f4bSMarc Zyngier 
3229ed24f4bSMarc Zyngier 	if ((__vcpu_sys_reg(vcpu, PMCR_EL0) & ARMV8_PMU_PMCR_E)) {
3239ed24f4bSMarc Zyngier 		reg = __vcpu_sys_reg(vcpu, PMOVSSET_EL0);
3249ed24f4bSMarc Zyngier 		reg &= __vcpu_sys_reg(vcpu, PMCNTENSET_EL0);
3259ed24f4bSMarc Zyngier 		reg &= __vcpu_sys_reg(vcpu, PMINTENSET_EL1);
3269ed24f4bSMarc Zyngier 	}
3279ed24f4bSMarc Zyngier 
3289ed24f4bSMarc Zyngier 	return reg;
3299ed24f4bSMarc Zyngier }
3309ed24f4bSMarc Zyngier 
3319ed24f4bSMarc Zyngier static void kvm_pmu_update_state(struct kvm_vcpu *vcpu)
3329ed24f4bSMarc Zyngier {
3339ed24f4bSMarc Zyngier 	struct kvm_pmu *pmu = &vcpu->arch.pmu;
3349ed24f4bSMarc Zyngier 	bool overflow;
3359ed24f4bSMarc Zyngier 
33646acf89dSMarc Zyngier 	if (!kvm_vcpu_has_pmu(vcpu))
3379ed24f4bSMarc Zyngier 		return;
3389ed24f4bSMarc Zyngier 
3399ed24f4bSMarc Zyngier 	overflow = !!kvm_pmu_overflow_status(vcpu);
3409ed24f4bSMarc Zyngier 	if (pmu->irq_level == overflow)
3419ed24f4bSMarc Zyngier 		return;
3429ed24f4bSMarc Zyngier 
3439ed24f4bSMarc Zyngier 	pmu->irq_level = overflow;
3449ed24f4bSMarc Zyngier 
3459ed24f4bSMarc Zyngier 	if (likely(irqchip_in_kernel(vcpu->kvm))) {
3469ed24f4bSMarc Zyngier 		int ret = kvm_vgic_inject_irq(vcpu->kvm, vcpu->vcpu_id,
3479ed24f4bSMarc Zyngier 					      pmu->irq_num, overflow, pmu);
3489ed24f4bSMarc Zyngier 		WARN_ON(ret);
3499ed24f4bSMarc Zyngier 	}
3509ed24f4bSMarc Zyngier }
3519ed24f4bSMarc Zyngier 
3529ed24f4bSMarc Zyngier bool kvm_pmu_should_notify_user(struct kvm_vcpu *vcpu)
3539ed24f4bSMarc Zyngier {
3549ed24f4bSMarc Zyngier 	struct kvm_pmu *pmu = &vcpu->arch.pmu;
3559ed24f4bSMarc Zyngier 	struct kvm_sync_regs *sregs = &vcpu->run->s.regs;
3569ed24f4bSMarc Zyngier 	bool run_level = sregs->device_irq_level & KVM_ARM_DEV_PMU;
3579ed24f4bSMarc Zyngier 
3589ed24f4bSMarc Zyngier 	if (likely(irqchip_in_kernel(vcpu->kvm)))
3599ed24f4bSMarc Zyngier 		return false;
3609ed24f4bSMarc Zyngier 
3619ed24f4bSMarc Zyngier 	return pmu->irq_level != run_level;
3629ed24f4bSMarc Zyngier }
3639ed24f4bSMarc Zyngier 
3649ed24f4bSMarc Zyngier /*
3659ed24f4bSMarc Zyngier  * Reflect the PMU overflow interrupt output level into the kvm_run structure
3669ed24f4bSMarc Zyngier  */
3679ed24f4bSMarc Zyngier void kvm_pmu_update_run(struct kvm_vcpu *vcpu)
3689ed24f4bSMarc Zyngier {
3699ed24f4bSMarc Zyngier 	struct kvm_sync_regs *regs = &vcpu->run->s.regs;
3709ed24f4bSMarc Zyngier 
3719ed24f4bSMarc Zyngier 	/* Populate the timer bitmap for user space */
3729ed24f4bSMarc Zyngier 	regs->device_irq_level &= ~KVM_ARM_DEV_PMU;
3739ed24f4bSMarc Zyngier 	if (vcpu->arch.pmu.irq_level)
3749ed24f4bSMarc Zyngier 		regs->device_irq_level |= KVM_ARM_DEV_PMU;
3759ed24f4bSMarc Zyngier }
3769ed24f4bSMarc Zyngier 
3779ed24f4bSMarc Zyngier /**
3789ed24f4bSMarc Zyngier  * kvm_pmu_flush_hwstate - flush pmu state to cpu
3799ed24f4bSMarc Zyngier  * @vcpu: The vcpu pointer
3809ed24f4bSMarc Zyngier  *
3819ed24f4bSMarc Zyngier  * Check if the PMU has overflowed while we were running in the host, and inject
3829ed24f4bSMarc Zyngier  * an interrupt if that was the case.
3839ed24f4bSMarc Zyngier  */
3849ed24f4bSMarc Zyngier void kvm_pmu_flush_hwstate(struct kvm_vcpu *vcpu)
3859ed24f4bSMarc Zyngier {
3869ed24f4bSMarc Zyngier 	kvm_pmu_update_state(vcpu);
3879ed24f4bSMarc Zyngier }
3889ed24f4bSMarc Zyngier 
3899ed24f4bSMarc Zyngier /**
3909ed24f4bSMarc Zyngier  * kvm_pmu_sync_hwstate - sync pmu state from cpu
3919ed24f4bSMarc Zyngier  * @vcpu: The vcpu pointer
3929ed24f4bSMarc Zyngier  *
3939ed24f4bSMarc Zyngier  * Check if the PMU has overflowed while we were running in the guest, and
3949ed24f4bSMarc Zyngier  * inject an interrupt if that was the case.
3959ed24f4bSMarc Zyngier  */
3969ed24f4bSMarc Zyngier void kvm_pmu_sync_hwstate(struct kvm_vcpu *vcpu)
3979ed24f4bSMarc Zyngier {
3989ed24f4bSMarc Zyngier 	kvm_pmu_update_state(vcpu);
3999ed24f4bSMarc Zyngier }
4009ed24f4bSMarc Zyngier 
4019ed24f4bSMarc Zyngier /**
40295e92e45SJulien Thierry  * When perf interrupt is an NMI, we cannot safely notify the vcpu corresponding
40395e92e45SJulien Thierry  * to the event.
40495e92e45SJulien Thierry  * This is why we need a callback to do it once outside of the NMI context.
40595e92e45SJulien Thierry  */
40695e92e45SJulien Thierry static void kvm_pmu_perf_overflow_notify_vcpu(struct irq_work *work)
40795e92e45SJulien Thierry {
40895e92e45SJulien Thierry 	struct kvm_vcpu *vcpu;
40995e92e45SJulien Thierry 
4109bad925dSMarc Zyngier 	vcpu = container_of(work, struct kvm_vcpu, arch.pmu.overflow_work);
41195e92e45SJulien Thierry 	kvm_vcpu_kick(vcpu);
41295e92e45SJulien Thierry }
41395e92e45SJulien Thierry 
414bead0220SMarc Zyngier /*
415bead0220SMarc Zyngier  * Perform an increment on any of the counters described in @mask,
416bead0220SMarc Zyngier  * generating the overflow if required, and propagate it as a chained
417bead0220SMarc Zyngier  * event if possible.
418bead0220SMarc Zyngier  */
419bead0220SMarc Zyngier static void kvm_pmu_counter_increment(struct kvm_vcpu *vcpu,
420bead0220SMarc Zyngier 				      unsigned long mask, u32 event)
421bead0220SMarc Zyngier {
422bead0220SMarc Zyngier 	int i;
423bead0220SMarc Zyngier 
424bead0220SMarc Zyngier 	if (!(__vcpu_sys_reg(vcpu, PMCR_EL0) & ARMV8_PMU_PMCR_E))
425bead0220SMarc Zyngier 		return;
426bead0220SMarc Zyngier 
427bead0220SMarc Zyngier 	/* Weed out disabled counters */
428bead0220SMarc Zyngier 	mask &= __vcpu_sys_reg(vcpu, PMCNTENSET_EL0);
429bead0220SMarc Zyngier 
430bead0220SMarc Zyngier 	for_each_set_bit(i, &mask, ARMV8_PMU_CYCLE_IDX) {
431d56bdce5SMarc Zyngier 		struct kvm_pmc *pmc = kvm_vcpu_idx_to_pmc(vcpu, i);
432bead0220SMarc Zyngier 		u64 type, reg;
433bead0220SMarc Zyngier 
434bead0220SMarc Zyngier 		/* Filter on event type */
4350cb9c3c8SMarc Zyngier 		type = __vcpu_sys_reg(vcpu, counter_index_to_evtreg(i));
436bead0220SMarc Zyngier 		type &= kvm_pmu_event_mask(vcpu->kvm);
437bead0220SMarc Zyngier 		if (type != event)
438bead0220SMarc Zyngier 			continue;
439bead0220SMarc Zyngier 
440bead0220SMarc Zyngier 		/* Increment this counter */
4410cb9c3c8SMarc Zyngier 		reg = __vcpu_sys_reg(vcpu, counter_index_to_reg(i)) + 1;
442d56bdce5SMarc Zyngier 		if (!kvm_pmc_is_64bit(pmc))
443bead0220SMarc Zyngier 			reg = lower_32_bits(reg);
4440cb9c3c8SMarc Zyngier 		__vcpu_sys_reg(vcpu, counter_index_to_reg(i)) = reg;
445bead0220SMarc Zyngier 
446001d85bdSMarc Zyngier 		/* No overflow? move on */
447d56bdce5SMarc Zyngier 		if (kvm_pmc_has_64bit_overflow(pmc) ? reg : lower_32_bits(reg))
448bead0220SMarc Zyngier 			continue;
449bead0220SMarc Zyngier 
450bead0220SMarc Zyngier 		/* Mark overflow */
451bead0220SMarc Zyngier 		__vcpu_sys_reg(vcpu, PMOVSSET_EL0) |= BIT(i);
452bead0220SMarc Zyngier 
453d56bdce5SMarc Zyngier 		if (kvm_pmu_counter_can_chain(pmc))
454bead0220SMarc Zyngier 			kvm_pmu_counter_increment(vcpu, BIT(i + 1),
455bead0220SMarc Zyngier 						  ARMV8_PMUV3_PERFCTR_CHAIN);
456bead0220SMarc Zyngier 	}
457bead0220SMarc Zyngier }
458bead0220SMarc Zyngier 
459c82d28cbSMarc Zyngier /* Compute the sample period for a given counter value */
460d56bdce5SMarc Zyngier static u64 compute_period(struct kvm_pmc *pmc, u64 counter)
461c82d28cbSMarc Zyngier {
462c82d28cbSMarc Zyngier 	u64 val;
463c82d28cbSMarc Zyngier 
46458ff6569SMarc Zyngier 	if (kvm_pmc_is_64bit(pmc) && kvm_pmc_has_64bit_overflow(pmc))
465c82d28cbSMarc Zyngier 		val = (-counter) & GENMASK(63, 0);
46658ff6569SMarc Zyngier 	else
467c82d28cbSMarc Zyngier 		val = (-counter) & GENMASK(31, 0);
468c82d28cbSMarc Zyngier 
469c82d28cbSMarc Zyngier 	return val;
470c82d28cbSMarc Zyngier }
471c82d28cbSMarc Zyngier 
47295e92e45SJulien Thierry /**
4739ed24f4bSMarc Zyngier  * When the perf event overflows, set the overflow status and inform the vcpu.
4749ed24f4bSMarc Zyngier  */
4759ed24f4bSMarc Zyngier static void kvm_pmu_perf_overflow(struct perf_event *perf_event,
4769ed24f4bSMarc Zyngier 				  struct perf_sample_data *data,
4779ed24f4bSMarc Zyngier 				  struct pt_regs *regs)
4789ed24f4bSMarc Zyngier {
4799ed24f4bSMarc Zyngier 	struct kvm_pmc *pmc = perf_event->overflow_handler_context;
4809ed24f4bSMarc Zyngier 	struct arm_pmu *cpu_pmu = to_arm_pmu(perf_event->pmu);
4819ed24f4bSMarc Zyngier 	struct kvm_vcpu *vcpu = kvm_pmc_to_vcpu(pmc);
4829ed24f4bSMarc Zyngier 	int idx = pmc->idx;
4839ed24f4bSMarc Zyngier 	u64 period;
4849ed24f4bSMarc Zyngier 
4859ed24f4bSMarc Zyngier 	cpu_pmu->pmu.stop(perf_event, PERF_EF_UPDATE);
4869ed24f4bSMarc Zyngier 
4879ed24f4bSMarc Zyngier 	/*
4889ed24f4bSMarc Zyngier 	 * Reset the sample period to the architectural limit,
4899ed24f4bSMarc Zyngier 	 * i.e. the point where the counter overflows.
4909ed24f4bSMarc Zyngier 	 */
491d56bdce5SMarc Zyngier 	period = compute_period(pmc, local64_read(&perf_event->count));
4929ed24f4bSMarc Zyngier 
4939ed24f4bSMarc Zyngier 	local64_set(&perf_event->hw.period_left, 0);
4949ed24f4bSMarc Zyngier 	perf_event->attr.sample_period = period;
4959ed24f4bSMarc Zyngier 	perf_event->hw.sample_period = period;
4969ed24f4bSMarc Zyngier 
4979ed24f4bSMarc Zyngier 	__vcpu_sys_reg(vcpu, PMOVSSET_EL0) |= BIT(idx);
4989ed24f4bSMarc Zyngier 
499d56bdce5SMarc Zyngier 	if (kvm_pmu_counter_can_chain(pmc))
500bead0220SMarc Zyngier 		kvm_pmu_counter_increment(vcpu, BIT(idx + 1),
501bead0220SMarc Zyngier 					  ARMV8_PMUV3_PERFCTR_CHAIN);
502bead0220SMarc Zyngier 
5039ed24f4bSMarc Zyngier 	if (kvm_pmu_overflow_status(vcpu)) {
5049ed24f4bSMarc Zyngier 		kvm_make_request(KVM_REQ_IRQ_PENDING, vcpu);
50595e92e45SJulien Thierry 
50695e92e45SJulien Thierry 		if (!in_nmi())
5079ed24f4bSMarc Zyngier 			kvm_vcpu_kick(vcpu);
50895e92e45SJulien Thierry 		else
50995e92e45SJulien Thierry 			irq_work_queue(&vcpu->arch.pmu.overflow_work);
5109ed24f4bSMarc Zyngier 	}
5119ed24f4bSMarc Zyngier 
5129ed24f4bSMarc Zyngier 	cpu_pmu->pmu.start(perf_event, PERF_EF_RELOAD);
5139ed24f4bSMarc Zyngier }
5149ed24f4bSMarc Zyngier 
5159ed24f4bSMarc Zyngier /**
5169ed24f4bSMarc Zyngier  * kvm_pmu_software_increment - do software increment
5179ed24f4bSMarc Zyngier  * @vcpu: The vcpu pointer
5189ed24f4bSMarc Zyngier  * @val: the value guest writes to PMSWINC register
5199ed24f4bSMarc Zyngier  */
5209ed24f4bSMarc Zyngier void kvm_pmu_software_increment(struct kvm_vcpu *vcpu, u64 val)
5219ed24f4bSMarc Zyngier {
522bead0220SMarc Zyngier 	kvm_pmu_counter_increment(vcpu, val, ARMV8_PMUV3_PERFCTR_SW_INCR);
5239ed24f4bSMarc Zyngier }
5249ed24f4bSMarc Zyngier 
5259ed24f4bSMarc Zyngier /**
5269ed24f4bSMarc Zyngier  * kvm_pmu_handle_pmcr - handle PMCR register
5279ed24f4bSMarc Zyngier  * @vcpu: The vcpu pointer
5289ed24f4bSMarc Zyngier  * @val: the value guest writes to PMCR register
5299ed24f4bSMarc Zyngier  */
5309ed24f4bSMarc Zyngier void kvm_pmu_handle_pmcr(struct kvm_vcpu *vcpu, u64 val)
5319ed24f4bSMarc Zyngier {
5329ed24f4bSMarc Zyngier 	int i;
5339ed24f4bSMarc Zyngier 
5348f6379e2SAlexandru Elisei 	if (!kvm_vcpu_has_pmu(vcpu))
5358f6379e2SAlexandru Elisei 		return;
5368f6379e2SAlexandru Elisei 
53764d6820dSMarc Zyngier 	/* Fixup PMCR_EL0 to reconcile the PMU version and the LP bit */
53864d6820dSMarc Zyngier 	if (!kvm_pmu_is_3p5(vcpu))
53964d6820dSMarc Zyngier 		val &= ~ARMV8_PMU_PMCR_LP;
54064d6820dSMarc Zyngier 
541f6da81f6SReiji Watanabe 	/* The reset bits don't indicate any state, and shouldn't be saved. */
542f6da81f6SReiji Watanabe 	__vcpu_sys_reg(vcpu, PMCR_EL0) = val & ~(ARMV8_PMU_PMCR_C | ARMV8_PMU_PMCR_P);
54364d6820dSMarc Zyngier 
5449ed24f4bSMarc Zyngier 	if (val & ARMV8_PMU_PMCR_E) {
5459ed24f4bSMarc Zyngier 		kvm_pmu_enable_counter_mask(vcpu,
546f5eff400SMarc Zyngier 		       __vcpu_sys_reg(vcpu, PMCNTENSET_EL0));
5479ed24f4bSMarc Zyngier 	} else {
548ca4f202dSAlexandre Chartre 		kvm_pmu_disable_counter_mask(vcpu,
549ca4f202dSAlexandre Chartre 		       __vcpu_sys_reg(vcpu, PMCNTENSET_EL0));
5509ed24f4bSMarc Zyngier 	}
5519ed24f4bSMarc Zyngier 
5529ed24f4bSMarc Zyngier 	if (val & ARMV8_PMU_PMCR_C)
5539ed24f4bSMarc Zyngier 		kvm_pmu_set_counter_value(vcpu, ARMV8_PMU_CYCLE_IDX, 0);
5549ed24f4bSMarc Zyngier 
5559ed24f4bSMarc Zyngier 	if (val & ARMV8_PMU_PMCR_P) {
556ca4f202dSAlexandre Chartre 		unsigned long mask = kvm_pmu_valid_counter_mask(vcpu);
5572a71fabfSAlexandru Elisei 		mask &= ~BIT(ARMV8_PMU_CYCLE_IDX);
5589ed24f4bSMarc Zyngier 		for_each_set_bit(i, &mask, 32)
559d56bdce5SMarc Zyngier 			kvm_pmu_set_pmc_value(kvm_vcpu_idx_to_pmc(vcpu, i), 0, true);
5609ed24f4bSMarc Zyngier 	}
561f9ea835eSReiji Watanabe 	kvm_vcpu_pmu_restore_guest(vcpu);
5629ed24f4bSMarc Zyngier }
5639ed24f4bSMarc Zyngier 
564d56bdce5SMarc Zyngier static bool kvm_pmu_counter_is_enabled(struct kvm_pmc *pmc)
5659ed24f4bSMarc Zyngier {
566d56bdce5SMarc Zyngier 	struct kvm_vcpu *vcpu = kvm_pmc_to_vcpu(pmc);
5679ed24f4bSMarc Zyngier 	return (__vcpu_sys_reg(vcpu, PMCR_EL0) & ARMV8_PMU_PMCR_E) &&
568d56bdce5SMarc Zyngier 	       (__vcpu_sys_reg(vcpu, PMCNTENSET_EL0) & BIT(pmc->idx));
5699ed24f4bSMarc Zyngier }
5709ed24f4bSMarc Zyngier 
5719ed24f4bSMarc Zyngier /**
5729ed24f4bSMarc Zyngier  * kvm_pmu_create_perf_event - create a perf event for a counter
573d56bdce5SMarc Zyngier  * @pmc: Counter context
5749ed24f4bSMarc Zyngier  */
575d56bdce5SMarc Zyngier static void kvm_pmu_create_perf_event(struct kvm_pmc *pmc)
5769ed24f4bSMarc Zyngier {
577d56bdce5SMarc Zyngier 	struct kvm_vcpu *vcpu = kvm_pmc_to_vcpu(pmc);
57846b18782SMarc Zyngier 	struct arm_pmu *arm_pmu = vcpu->kvm->arch.arm_pmu;
5799ed24f4bSMarc Zyngier 	struct perf_event *event;
5809ed24f4bSMarc Zyngier 	struct perf_event_attr attr;
581d56bdce5SMarc Zyngier 	u64 eventsel, reg, data;
5829ed24f4bSMarc Zyngier 
583d56bdce5SMarc Zyngier 	reg = counter_index_to_evtreg(pmc->idx);
5849ed24f4bSMarc Zyngier 	data = __vcpu_sys_reg(vcpu, reg);
5859ed24f4bSMarc Zyngier 
586d56bdce5SMarc Zyngier 	kvm_pmu_stop_counter(pmc);
587d7eec236SMarc Zyngier 	if (pmc->idx == ARMV8_PMU_CYCLE_IDX)
588d7eec236SMarc Zyngier 		eventsel = ARMV8_PMUV3_PERFCTR_CPU_CYCLES;
589d7eec236SMarc Zyngier 	else
590d7eec236SMarc Zyngier 		eventsel = data & kvm_pmu_event_mask(vcpu->kvm);
5919ed24f4bSMarc Zyngier 
592bead0220SMarc Zyngier 	/*
593bead0220SMarc Zyngier 	 * Neither SW increment nor chained events need to be backed
594bead0220SMarc Zyngier 	 * by a perf event.
595bead0220SMarc Zyngier 	 */
596bead0220SMarc Zyngier 	if (eventsel == ARMV8_PMUV3_PERFCTR_SW_INCR ||
597bead0220SMarc Zyngier 	    eventsel == ARMV8_PMUV3_PERFCTR_CHAIN)
598d7eec236SMarc Zyngier 		return;
599d7eec236SMarc Zyngier 
600d7eec236SMarc Zyngier 	/*
601d7eec236SMarc Zyngier 	 * If we have a filter in place and that the event isn't allowed, do
602d7eec236SMarc Zyngier 	 * not install a perf event either.
603d7eec236SMarc Zyngier 	 */
604d7eec236SMarc Zyngier 	if (vcpu->kvm->arch.pmu_filter &&
605d7eec236SMarc Zyngier 	    !test_bit(eventsel, vcpu->kvm->arch.pmu_filter))
6069ed24f4bSMarc Zyngier 		return;
6079ed24f4bSMarc Zyngier 
6089ed24f4bSMarc Zyngier 	memset(&attr, 0, sizeof(struct perf_event_attr));
60946b18782SMarc Zyngier 	attr.type = arm_pmu->pmu.type;
6109ed24f4bSMarc Zyngier 	attr.size = sizeof(attr);
6119ed24f4bSMarc Zyngier 	attr.pinned = 1;
612d56bdce5SMarc Zyngier 	attr.disabled = !kvm_pmu_counter_is_enabled(pmc);
6139ed24f4bSMarc Zyngier 	attr.exclude_user = data & ARMV8_PMU_EXCLUDE_EL0 ? 1 : 0;
6149ed24f4bSMarc Zyngier 	attr.exclude_kernel = data & ARMV8_PMU_EXCLUDE_EL1 ? 1 : 0;
6159ed24f4bSMarc Zyngier 	attr.exclude_hv = 1; /* Don't count EL2 events */
6169ed24f4bSMarc Zyngier 	attr.exclude_host = 1; /* Don't count host events */
617d7eec236SMarc Zyngier 	attr.config = eventsel;
6189ed24f4bSMarc Zyngier 
619bead0220SMarc Zyngier 	/*
620bead0220SMarc Zyngier 	 * If counting with a 64bit counter, advertise it to the perf
621c82d28cbSMarc Zyngier 	 * code, carefully dealing with the initial sample period
622c82d28cbSMarc Zyngier 	 * which also depends on the overflow.
6239ed24f4bSMarc Zyngier 	 */
624d56bdce5SMarc Zyngier 	if (kvm_pmc_is_64bit(pmc))
625bead0220SMarc Zyngier 		attr.config1 |= PERF_ATTR_CFG1_COUNTER_64BIT;
626c82d28cbSMarc Zyngier 
627d56bdce5SMarc Zyngier 	attr.sample_period = compute_period(pmc, kvm_pmu_get_pmc_value(pmc));
6289ed24f4bSMarc Zyngier 
6299ed24f4bSMarc Zyngier 	event = perf_event_create_kernel_counter(&attr, -1, current,
6309ed24f4bSMarc Zyngier 						 kvm_pmu_perf_overflow, pmc);
6319ed24f4bSMarc Zyngier 
6329ed24f4bSMarc Zyngier 	if (IS_ERR(event)) {
6339ed24f4bSMarc Zyngier 		pr_err_once("kvm: pmu event creation failed %ld\n",
6349ed24f4bSMarc Zyngier 			    PTR_ERR(event));
6359ed24f4bSMarc Zyngier 		return;
6369ed24f4bSMarc Zyngier 	}
6379ed24f4bSMarc Zyngier 
6389ed24f4bSMarc Zyngier 	pmc->perf_event = event;
6399ed24f4bSMarc Zyngier }
6409ed24f4bSMarc Zyngier 
6419ed24f4bSMarc Zyngier /**
6429ed24f4bSMarc Zyngier  * kvm_pmu_set_counter_event_type - set selected counter to monitor some event
6439ed24f4bSMarc Zyngier  * @vcpu: The vcpu pointer
6449ed24f4bSMarc Zyngier  * @data: The data guest writes to PMXEVTYPER_EL0
6459ed24f4bSMarc Zyngier  * @select_idx: The number of selected counter
6469ed24f4bSMarc Zyngier  *
6479ed24f4bSMarc Zyngier  * When OS accesses PMXEVTYPER_EL0, that means it wants to set a PMC to count an
6489ed24f4bSMarc Zyngier  * event with given hardware event number. Here we call perf_event API to
6499ed24f4bSMarc Zyngier  * emulate this action and create a kernel perf event for it.
6509ed24f4bSMarc Zyngier  */
6519ed24f4bSMarc Zyngier void kvm_pmu_set_counter_event_type(struct kvm_vcpu *vcpu, u64 data,
6529ed24f4bSMarc Zyngier 				    u64 select_idx)
6539ed24f4bSMarc Zyngier {
654d56bdce5SMarc Zyngier 	struct kvm_pmc *pmc = kvm_vcpu_idx_to_pmc(vcpu, select_idx);
655fd65a3b5SMarc Zyngier 	u64 reg, mask;
656fd65a3b5SMarc Zyngier 
6578f6379e2SAlexandru Elisei 	if (!kvm_vcpu_has_pmu(vcpu))
6588f6379e2SAlexandru Elisei 		return;
6598f6379e2SAlexandru Elisei 
660fd65a3b5SMarc Zyngier 	mask  =  ARMV8_PMU_EVTYPE_MASK;
661fd65a3b5SMarc Zyngier 	mask &= ~ARMV8_PMU_EVTYPE_EVENT;
662fd65a3b5SMarc Zyngier 	mask |= kvm_pmu_event_mask(vcpu->kvm);
6639ed24f4bSMarc Zyngier 
664d56bdce5SMarc Zyngier 	reg = counter_index_to_evtreg(pmc->idx);
6659ed24f4bSMarc Zyngier 
666fd65a3b5SMarc Zyngier 	__vcpu_sys_reg(vcpu, reg) = data & mask;
6679ed24f4bSMarc Zyngier 
668d56bdce5SMarc Zyngier 	kvm_pmu_create_perf_event(pmc);
6699ed24f4bSMarc Zyngier }
6709ed24f4bSMarc Zyngier 
671e840f42aSMarc Zyngier void kvm_host_pmu_init(struct arm_pmu *pmu)
672e840f42aSMarc Zyngier {
673db858060SAlexandru Elisei 	struct arm_pmu_entry *entry;
674db858060SAlexandru Elisei 
67586815735SAnshuman Khandual 	if (pmu->pmuver == ID_AA64DFR0_EL1_PMUVer_NI ||
67686815735SAnshuman Khandual 	    pmu->pmuver == ID_AA64DFR0_EL1_PMUVer_IMP_DEF)
677db858060SAlexandru Elisei 		return;
678db858060SAlexandru Elisei 
679db858060SAlexandru Elisei 	mutex_lock(&arm_pmus_lock);
680db858060SAlexandru Elisei 
681db858060SAlexandru Elisei 	entry = kmalloc(sizeof(*entry), GFP_KERNEL);
682db858060SAlexandru Elisei 	if (!entry)
683db858060SAlexandru Elisei 		goto out_unlock;
684db858060SAlexandru Elisei 
685db858060SAlexandru Elisei 	entry->arm_pmu = pmu;
686db858060SAlexandru Elisei 	list_add_tail(&entry->entry, &arm_pmus);
687db858060SAlexandru Elisei 
688db858060SAlexandru Elisei 	if (list_is_singular(&arm_pmus))
689e840f42aSMarc Zyngier 		static_branch_enable(&kvm_arm_pmu_available);
690db858060SAlexandru Elisei 
691db858060SAlexandru Elisei out_unlock:
692db858060SAlexandru Elisei 	mutex_unlock(&arm_pmus_lock);
693e840f42aSMarc Zyngier }
694e840f42aSMarc Zyngier 
69546b18782SMarc Zyngier static struct arm_pmu *kvm_pmu_probe_armpmu(void)
696fd65a3b5SMarc Zyngier {
6971c913a1cSOliver Upton 	struct arm_pmu *tmp, *pmu = NULL;
6981c913a1cSOliver Upton 	struct arm_pmu_entry *entry;
6991c913a1cSOliver Upton 	int cpu;
700fd65a3b5SMarc Zyngier 
7011c913a1cSOliver Upton 	mutex_lock(&arm_pmus_lock);
702fd65a3b5SMarc Zyngier 
7031c913a1cSOliver Upton 	cpu = smp_processor_id();
7041c913a1cSOliver Upton 	list_for_each_entry(entry, &arm_pmus, entry) {
7051c913a1cSOliver Upton 		tmp = entry->arm_pmu;
706fd65a3b5SMarc Zyngier 
7071c913a1cSOliver Upton 		if (cpumask_test_cpu(cpu, &tmp->supported_cpus)) {
7081c913a1cSOliver Upton 			pmu = tmp;
7091c913a1cSOliver Upton 			break;
7101c913a1cSOliver Upton 		}
711fd65a3b5SMarc Zyngier 	}
712fd65a3b5SMarc Zyngier 
7131c913a1cSOliver Upton 	mutex_unlock(&arm_pmus_lock);
714fd65a3b5SMarc Zyngier 
71546b18782SMarc Zyngier 	return pmu;
716fd65a3b5SMarc Zyngier }
717fd65a3b5SMarc Zyngier 
71888865becSMarc Zyngier u64 kvm_pmu_get_pmceid(struct kvm_vcpu *vcpu, bool pmceid1)
71988865becSMarc Zyngier {
72088865becSMarc Zyngier 	unsigned long *bmap = vcpu->kvm->arch.pmu_filter;
72188865becSMarc Zyngier 	u64 val, mask = 0;
7229529aaa0SMarc Zyngier 	int base, i, nr_events;
72388865becSMarc Zyngier 
7248f6379e2SAlexandru Elisei 	if (!kvm_vcpu_has_pmu(vcpu))
7258f6379e2SAlexandru Elisei 		return 0;
7268f6379e2SAlexandru Elisei 
72788865becSMarc Zyngier 	if (!pmceid1) {
72888865becSMarc Zyngier 		val = read_sysreg(pmceid0_el0);
729acdd8a4eSMarc Zyngier 		/* always support CHAIN */
730acdd8a4eSMarc Zyngier 		val |= BIT(ARMV8_PMUV3_PERFCTR_CHAIN);
73188865becSMarc Zyngier 		base = 0;
73288865becSMarc Zyngier 	} else {
73388865becSMarc Zyngier 		val = read_sysreg(pmceid1_el0);
73446081078SMarc Zyngier 		/*
73546081078SMarc Zyngier 		 * Don't advertise STALL_SLOT, as PMMIR_EL0 is handled
73646081078SMarc Zyngier 		 * as RAZ
73746081078SMarc Zyngier 		 */
738121a8fc0SMark Brown 		if (vcpu->kvm->arch.arm_pmu->pmuver >= ID_AA64DFR0_EL1_PMUVer_V3P4)
73946081078SMarc Zyngier 			val &= ~BIT_ULL(ARMV8_PMUV3_PERFCTR_STALL_SLOT - 32);
74088865becSMarc Zyngier 		base = 32;
74188865becSMarc Zyngier 	}
74288865becSMarc Zyngier 
74388865becSMarc Zyngier 	if (!bmap)
74488865becSMarc Zyngier 		return val;
74588865becSMarc Zyngier 
7469529aaa0SMarc Zyngier 	nr_events = kvm_pmu_event_mask(vcpu->kvm) + 1;
7479529aaa0SMarc Zyngier 
74888865becSMarc Zyngier 	for (i = 0; i < 32; i += 8) {
74988865becSMarc Zyngier 		u64 byte;
75088865becSMarc Zyngier 
75188865becSMarc Zyngier 		byte = bitmap_get_value8(bmap, base + i);
75288865becSMarc Zyngier 		mask |= byte << i;
7539529aaa0SMarc Zyngier 		if (nr_events >= (0x4000 + base + 32)) {
75488865becSMarc Zyngier 			byte = bitmap_get_value8(bmap, 0x4000 + base + i);
75588865becSMarc Zyngier 			mask |= byte << (32 + i);
75688865becSMarc Zyngier 		}
7579529aaa0SMarc Zyngier 	}
75888865becSMarc Zyngier 
75988865becSMarc Zyngier 	return val & mask;
76088865becSMarc Zyngier }
76188865becSMarc Zyngier 
7629ed24f4bSMarc Zyngier int kvm_arm_pmu_v3_enable(struct kvm_vcpu *vcpu)
7639ed24f4bSMarc Zyngier {
7649bbfa4b5SAlexandru Elisei 	if (!kvm_vcpu_has_pmu(vcpu))
7659ed24f4bSMarc Zyngier 		return 0;
7669ed24f4bSMarc Zyngier 
7679bbfa4b5SAlexandru Elisei 	if (!vcpu->arch.pmu.created)
7689bbfa4b5SAlexandru Elisei 		return -EINVAL;
7699bbfa4b5SAlexandru Elisei 
7709ed24f4bSMarc Zyngier 	/*
7719ed24f4bSMarc Zyngier 	 * A valid interrupt configuration for the PMU is either to have a
7729ed24f4bSMarc Zyngier 	 * properly configured interrupt number and using an in-kernel
7739ed24f4bSMarc Zyngier 	 * irqchip, or to not have an in-kernel GIC and not set an IRQ.
7749ed24f4bSMarc Zyngier 	 */
7759ed24f4bSMarc Zyngier 	if (irqchip_in_kernel(vcpu->kvm)) {
7769ed24f4bSMarc Zyngier 		int irq = vcpu->arch.pmu.irq_num;
7779ed24f4bSMarc Zyngier 		/*
7789ed24f4bSMarc Zyngier 		 * If we are using an in-kernel vgic, at this point we know
7799ed24f4bSMarc Zyngier 		 * the vgic will be initialized, so we can check the PMU irq
7809ed24f4bSMarc Zyngier 		 * number against the dimensions of the vgic and make sure
7819ed24f4bSMarc Zyngier 		 * it's valid.
7829ed24f4bSMarc Zyngier 		 */
7839ed24f4bSMarc Zyngier 		if (!irq_is_ppi(irq) && !vgic_valid_spi(vcpu->kvm, irq))
7849ed24f4bSMarc Zyngier 			return -EINVAL;
7859ed24f4bSMarc Zyngier 	} else if (kvm_arm_pmu_irq_initialized(vcpu)) {
7869ed24f4bSMarc Zyngier 		   return -EINVAL;
7879ed24f4bSMarc Zyngier 	}
7889ed24f4bSMarc Zyngier 
789d0c94c49SMarc Zyngier 	/* One-off reload of the PMU on first run */
790d0c94c49SMarc Zyngier 	kvm_make_request(KVM_REQ_RELOAD_PMU, vcpu);
791d0c94c49SMarc Zyngier 
7929ed24f4bSMarc Zyngier 	return 0;
7939ed24f4bSMarc Zyngier }
7949ed24f4bSMarc Zyngier 
7959ed24f4bSMarc Zyngier static int kvm_arm_pmu_v3_init(struct kvm_vcpu *vcpu)
7969ed24f4bSMarc Zyngier {
7979ed24f4bSMarc Zyngier 	if (irqchip_in_kernel(vcpu->kvm)) {
7989ed24f4bSMarc Zyngier 		int ret;
7999ed24f4bSMarc Zyngier 
8009ed24f4bSMarc Zyngier 		/*
8019ed24f4bSMarc Zyngier 		 * If using the PMU with an in-kernel virtual GIC
8029ed24f4bSMarc Zyngier 		 * implementation, we require the GIC to be already
8039ed24f4bSMarc Zyngier 		 * initialized when initializing the PMU.
8049ed24f4bSMarc Zyngier 		 */
8059ed24f4bSMarc Zyngier 		if (!vgic_initialized(vcpu->kvm))
8069ed24f4bSMarc Zyngier 			return -ENODEV;
8079ed24f4bSMarc Zyngier 
8089ed24f4bSMarc Zyngier 		if (!kvm_arm_pmu_irq_initialized(vcpu))
8099ed24f4bSMarc Zyngier 			return -ENXIO;
8109ed24f4bSMarc Zyngier 
8119ed24f4bSMarc Zyngier 		ret = kvm_vgic_set_owner(vcpu, vcpu->arch.pmu.irq_num,
8129ed24f4bSMarc Zyngier 					 &vcpu->arch.pmu);
8139ed24f4bSMarc Zyngier 		if (ret)
8149ed24f4bSMarc Zyngier 			return ret;
8159ed24f4bSMarc Zyngier 	}
8169ed24f4bSMarc Zyngier 
81795e92e45SJulien Thierry 	init_irq_work(&vcpu->arch.pmu.overflow_work,
81895e92e45SJulien Thierry 		      kvm_pmu_perf_overflow_notify_vcpu);
81995e92e45SJulien Thierry 
8209ed24f4bSMarc Zyngier 	vcpu->arch.pmu.created = true;
8219ed24f4bSMarc Zyngier 	return 0;
8229ed24f4bSMarc Zyngier }
8239ed24f4bSMarc Zyngier 
8249ed24f4bSMarc Zyngier /*
8259ed24f4bSMarc Zyngier  * For one VM the interrupt type must be same for each vcpu.
8269ed24f4bSMarc Zyngier  * As a PPI, the interrupt number is the same for all vcpus,
8279ed24f4bSMarc Zyngier  * while as an SPI it must be a separate number per vcpu.
8289ed24f4bSMarc Zyngier  */
8299ed24f4bSMarc Zyngier static bool pmu_irq_is_valid(struct kvm *kvm, int irq)
8309ed24f4bSMarc Zyngier {
83146808a4cSMarc Zyngier 	unsigned long i;
8329ed24f4bSMarc Zyngier 	struct kvm_vcpu *vcpu;
8339ed24f4bSMarc Zyngier 
8349ed24f4bSMarc Zyngier 	kvm_for_each_vcpu(i, vcpu, kvm) {
8359ed24f4bSMarc Zyngier 		if (!kvm_arm_pmu_irq_initialized(vcpu))
8369ed24f4bSMarc Zyngier 			continue;
8379ed24f4bSMarc Zyngier 
8389ed24f4bSMarc Zyngier 		if (irq_is_ppi(irq)) {
8399ed24f4bSMarc Zyngier 			if (vcpu->arch.pmu.irq_num != irq)
8409ed24f4bSMarc Zyngier 				return false;
8419ed24f4bSMarc Zyngier 		} else {
8429ed24f4bSMarc Zyngier 			if (vcpu->arch.pmu.irq_num == irq)
8439ed24f4bSMarc Zyngier 				return false;
8449ed24f4bSMarc Zyngier 		}
8459ed24f4bSMarc Zyngier 	}
8469ed24f4bSMarc Zyngier 
8479ed24f4bSMarc Zyngier 	return true;
8489ed24f4bSMarc Zyngier }
8499ed24f4bSMarc Zyngier 
8506ee7fca2SAlexandru Elisei static int kvm_arm_pmu_v3_set_pmu(struct kvm_vcpu *vcpu, int pmu_id)
8516ee7fca2SAlexandru Elisei {
8526ee7fca2SAlexandru Elisei 	struct kvm *kvm = vcpu->kvm;
8536ee7fca2SAlexandru Elisei 	struct arm_pmu_entry *entry;
8546ee7fca2SAlexandru Elisei 	struct arm_pmu *arm_pmu;
8556ee7fca2SAlexandru Elisei 	int ret = -ENXIO;
8566ee7fca2SAlexandru Elisei 
8574bba7f7dSOliver Upton 	lockdep_assert_held(&kvm->arch.config_lock);
8586ee7fca2SAlexandru Elisei 	mutex_lock(&arm_pmus_lock);
8596ee7fca2SAlexandru Elisei 
8606ee7fca2SAlexandru Elisei 	list_for_each_entry(entry, &arm_pmus, entry) {
8616ee7fca2SAlexandru Elisei 		arm_pmu = entry->arm_pmu;
8626ee7fca2SAlexandru Elisei 		if (arm_pmu->pmu.type == pmu_id) {
863de40bb8aSOliver Upton 			if (kvm_vm_has_ran_once(kvm) ||
8646ee7fca2SAlexandru Elisei 			    (kvm->arch.pmu_filter && kvm->arch.arm_pmu != arm_pmu)) {
8656ee7fca2SAlexandru Elisei 				ret = -EBUSY;
8666ee7fca2SAlexandru Elisei 				break;
8676ee7fca2SAlexandru Elisei 			}
8686ee7fca2SAlexandru Elisei 
8696ee7fca2SAlexandru Elisei 			kvm->arch.arm_pmu = arm_pmu;
870583cda1bSAlexandru Elisei 			cpumask_copy(kvm->arch.supported_cpus, &arm_pmu->supported_cpus);
8716ee7fca2SAlexandru Elisei 			ret = 0;
8726ee7fca2SAlexandru Elisei 			break;
8736ee7fca2SAlexandru Elisei 		}
8746ee7fca2SAlexandru Elisei 	}
8756ee7fca2SAlexandru Elisei 
8766ee7fca2SAlexandru Elisei 	mutex_unlock(&arm_pmus_lock);
8776ee7fca2SAlexandru Elisei 	return ret;
8786ee7fca2SAlexandru Elisei }
8796ee7fca2SAlexandru Elisei 
8809ed24f4bSMarc Zyngier int kvm_arm_pmu_v3_set_attr(struct kvm_vcpu *vcpu, struct kvm_device_attr *attr)
8819ed24f4bSMarc Zyngier {
8825177fe91SMarc Zyngier 	struct kvm *kvm = vcpu->kvm;
8835177fe91SMarc Zyngier 
8844bba7f7dSOliver Upton 	lockdep_assert_held(&kvm->arch.config_lock);
8854bba7f7dSOliver Upton 
88677da4303SMarc Zyngier 	if (!kvm_vcpu_has_pmu(vcpu))
88742223fb1SMarc Zyngier 		return -ENODEV;
88842223fb1SMarc Zyngier 
88942223fb1SMarc Zyngier 	if (vcpu->arch.pmu.created)
89042223fb1SMarc Zyngier 		return -EBUSY;
89142223fb1SMarc Zyngier 
89246b18782SMarc Zyngier 	if (!kvm->arch.arm_pmu) {
893*40e54cadSOliver Upton 		/*
894*40e54cadSOliver Upton 		 * No PMU set, get the default one.
895*40e54cadSOliver Upton 		 *
896*40e54cadSOliver Upton 		 * The observant among you will notice that the supported_cpus
897*40e54cadSOliver Upton 		 * mask does not get updated for the default PMU even though it
898*40e54cadSOliver Upton 		 * is quite possible the selected instance supports only a
899*40e54cadSOliver Upton 		 * subset of cores in the system. This is intentional, and
900*40e54cadSOliver Upton 		 * upholds the preexisting behavior on heterogeneous systems
901*40e54cadSOliver Upton 		 * where vCPUs can be scheduled on any core but the guest
902*40e54cadSOliver Upton 		 * counters could stop working.
903*40e54cadSOliver Upton 		 */
90446b18782SMarc Zyngier 		kvm->arch.arm_pmu = kvm_pmu_probe_armpmu();
9054bba7f7dSOliver Upton 		if (!kvm->arch.arm_pmu)
906fd65a3b5SMarc Zyngier 			return -ENODEV;
90746b18782SMarc Zyngier 	}
908fd65a3b5SMarc Zyngier 
9099ed24f4bSMarc Zyngier 	switch (attr->attr) {
9109ed24f4bSMarc Zyngier 	case KVM_ARM_VCPU_PMU_V3_IRQ: {
9119ed24f4bSMarc Zyngier 		int __user *uaddr = (int __user *)(long)attr->addr;
9129ed24f4bSMarc Zyngier 		int irq;
9139ed24f4bSMarc Zyngier 
9145177fe91SMarc Zyngier 		if (!irqchip_in_kernel(kvm))
9159ed24f4bSMarc Zyngier 			return -EINVAL;
9169ed24f4bSMarc Zyngier 
9179ed24f4bSMarc Zyngier 		if (get_user(irq, uaddr))
9189ed24f4bSMarc Zyngier 			return -EFAULT;
9199ed24f4bSMarc Zyngier 
9209ed24f4bSMarc Zyngier 		/* The PMU overflow interrupt can be a PPI or a valid SPI. */
9219ed24f4bSMarc Zyngier 		if (!(irq_is_ppi(irq) || irq_is_spi(irq)))
9229ed24f4bSMarc Zyngier 			return -EINVAL;
9239ed24f4bSMarc Zyngier 
9245177fe91SMarc Zyngier 		if (!pmu_irq_is_valid(kvm, irq))
9259ed24f4bSMarc Zyngier 			return -EINVAL;
9269ed24f4bSMarc Zyngier 
9279ed24f4bSMarc Zyngier 		if (kvm_arm_pmu_irq_initialized(vcpu))
9289ed24f4bSMarc Zyngier 			return -EBUSY;
9299ed24f4bSMarc Zyngier 
9309ed24f4bSMarc Zyngier 		kvm_debug("Set kvm ARM PMU irq: %d\n", irq);
9319ed24f4bSMarc Zyngier 		vcpu->arch.pmu.irq_num = irq;
9329ed24f4bSMarc Zyngier 		return 0;
9339ed24f4bSMarc Zyngier 	}
934d7eec236SMarc Zyngier 	case KVM_ARM_VCPU_PMU_V3_FILTER: {
935d7eec236SMarc Zyngier 		struct kvm_pmu_event_filter __user *uaddr;
936d7eec236SMarc Zyngier 		struct kvm_pmu_event_filter filter;
937d7eec236SMarc Zyngier 		int nr_events;
938d7eec236SMarc Zyngier 
9395177fe91SMarc Zyngier 		nr_events = kvm_pmu_event_mask(kvm) + 1;
940d7eec236SMarc Zyngier 
941d7eec236SMarc Zyngier 		uaddr = (struct kvm_pmu_event_filter __user *)(long)attr->addr;
942d7eec236SMarc Zyngier 
943d7eec236SMarc Zyngier 		if (copy_from_user(&filter, uaddr, sizeof(filter)))
944d7eec236SMarc Zyngier 			return -EFAULT;
945d7eec236SMarc Zyngier 
946d7eec236SMarc Zyngier 		if (((u32)filter.base_event + filter.nevents) > nr_events ||
947d7eec236SMarc Zyngier 		    (filter.action != KVM_PMU_EVENT_ALLOW &&
948d7eec236SMarc Zyngier 		     filter.action != KVM_PMU_EVENT_DENY))
949d7eec236SMarc Zyngier 			return -EINVAL;
950d7eec236SMarc Zyngier 
9516dcf7316SMarc Zyngier 		if (kvm_vm_has_ran_once(kvm))
9525177fe91SMarc Zyngier 			return -EBUSY;
9535177fe91SMarc Zyngier 
9545177fe91SMarc Zyngier 		if (!kvm->arch.pmu_filter) {
9555177fe91SMarc Zyngier 			kvm->arch.pmu_filter = bitmap_alloc(nr_events, GFP_KERNEL_ACCOUNT);
9564bba7f7dSOliver Upton 			if (!kvm->arch.pmu_filter)
957d7eec236SMarc Zyngier 				return -ENOMEM;
958d7eec236SMarc Zyngier 
959d7eec236SMarc Zyngier 			/*
960d7eec236SMarc Zyngier 			 * The default depends on the first applied filter.
961d7eec236SMarc Zyngier 			 * If it allows events, the default is to deny.
962d7eec236SMarc Zyngier 			 * Conversely, if the first filter denies a set of
963d7eec236SMarc Zyngier 			 * events, the default is to allow.
964d7eec236SMarc Zyngier 			 */
965d7eec236SMarc Zyngier 			if (filter.action == KVM_PMU_EVENT_ALLOW)
9665177fe91SMarc Zyngier 				bitmap_zero(kvm->arch.pmu_filter, nr_events);
967d7eec236SMarc Zyngier 			else
9685177fe91SMarc Zyngier 				bitmap_fill(kvm->arch.pmu_filter, nr_events);
969d7eec236SMarc Zyngier 		}
970d7eec236SMarc Zyngier 
971d7eec236SMarc Zyngier 		if (filter.action == KVM_PMU_EVENT_ALLOW)
9725177fe91SMarc Zyngier 			bitmap_set(kvm->arch.pmu_filter, filter.base_event, filter.nevents);
973d7eec236SMarc Zyngier 		else
9745177fe91SMarc Zyngier 			bitmap_clear(kvm->arch.pmu_filter, filter.base_event, filter.nevents);
975d7eec236SMarc Zyngier 
976d7eec236SMarc Zyngier 		return 0;
977d7eec236SMarc Zyngier 	}
9786ee7fca2SAlexandru Elisei 	case KVM_ARM_VCPU_PMU_V3_SET_PMU: {
9796ee7fca2SAlexandru Elisei 		int __user *uaddr = (int __user *)(long)attr->addr;
9806ee7fca2SAlexandru Elisei 		int pmu_id;
9816ee7fca2SAlexandru Elisei 
9826ee7fca2SAlexandru Elisei 		if (get_user(pmu_id, uaddr))
9836ee7fca2SAlexandru Elisei 			return -EFAULT;
9846ee7fca2SAlexandru Elisei 
9856ee7fca2SAlexandru Elisei 		return kvm_arm_pmu_v3_set_pmu(vcpu, pmu_id);
9866ee7fca2SAlexandru Elisei 	}
9879ed24f4bSMarc Zyngier 	case KVM_ARM_VCPU_PMU_V3_INIT:
9889ed24f4bSMarc Zyngier 		return kvm_arm_pmu_v3_init(vcpu);
9899ed24f4bSMarc Zyngier 	}
9909ed24f4bSMarc Zyngier 
9919ed24f4bSMarc Zyngier 	return -ENXIO;
9929ed24f4bSMarc Zyngier }
9939ed24f4bSMarc Zyngier 
9949ed24f4bSMarc Zyngier int kvm_arm_pmu_v3_get_attr(struct kvm_vcpu *vcpu, struct kvm_device_attr *attr)
9959ed24f4bSMarc Zyngier {
9969ed24f4bSMarc Zyngier 	switch (attr->attr) {
9979ed24f4bSMarc Zyngier 	case KVM_ARM_VCPU_PMU_V3_IRQ: {
9989ed24f4bSMarc Zyngier 		int __user *uaddr = (int __user *)(long)attr->addr;
9999ed24f4bSMarc Zyngier 		int irq;
10009ed24f4bSMarc Zyngier 
10019ed24f4bSMarc Zyngier 		if (!irqchip_in_kernel(vcpu->kvm))
10029ed24f4bSMarc Zyngier 			return -EINVAL;
10039ed24f4bSMarc Zyngier 
100414bda7a9SMarc Zyngier 		if (!kvm_vcpu_has_pmu(vcpu))
10059ed24f4bSMarc Zyngier 			return -ENODEV;
10069ed24f4bSMarc Zyngier 
10079ed24f4bSMarc Zyngier 		if (!kvm_arm_pmu_irq_initialized(vcpu))
10089ed24f4bSMarc Zyngier 			return -ENXIO;
10099ed24f4bSMarc Zyngier 
10109ed24f4bSMarc Zyngier 		irq = vcpu->arch.pmu.irq_num;
10119ed24f4bSMarc Zyngier 		return put_user(irq, uaddr);
10129ed24f4bSMarc Zyngier 	}
10139ed24f4bSMarc Zyngier 	}
10149ed24f4bSMarc Zyngier 
10159ed24f4bSMarc Zyngier 	return -ENXIO;
10169ed24f4bSMarc Zyngier }
10179ed24f4bSMarc Zyngier 
10189ed24f4bSMarc Zyngier int kvm_arm_pmu_v3_has_attr(struct kvm_vcpu *vcpu, struct kvm_device_attr *attr)
10199ed24f4bSMarc Zyngier {
10209ed24f4bSMarc Zyngier 	switch (attr->attr) {
10219ed24f4bSMarc Zyngier 	case KVM_ARM_VCPU_PMU_V3_IRQ:
10229ed24f4bSMarc Zyngier 	case KVM_ARM_VCPU_PMU_V3_INIT:
1023d7eec236SMarc Zyngier 	case KVM_ARM_VCPU_PMU_V3_FILTER:
10246ee7fca2SAlexandru Elisei 	case KVM_ARM_VCPU_PMU_V3_SET_PMU:
102577da4303SMarc Zyngier 		if (kvm_vcpu_has_pmu(vcpu))
10269ed24f4bSMarc Zyngier 			return 0;
10279ed24f4bSMarc Zyngier 	}
10289ed24f4bSMarc Zyngier 
10299ed24f4bSMarc Zyngier 	return -ENXIO;
10309ed24f4bSMarc Zyngier }
10313d0dba57SMarc Zyngier 
10323d0dba57SMarc Zyngier u8 kvm_arm_pmu_get_pmuver_limit(void)
10333d0dba57SMarc Zyngier {
10343d0dba57SMarc Zyngier 	u64 tmp;
10353d0dba57SMarc Zyngier 
10363d0dba57SMarc Zyngier 	tmp = read_sanitised_ftr_reg(SYS_ID_AA64DFR0_EL1);
10373d0dba57SMarc Zyngier 	tmp = cpuid_feature_cap_perfmon_field(tmp,
10383d0dba57SMarc Zyngier 					      ID_AA64DFR0_EL1_PMUVer_SHIFT,
10391f7c9782SMarc Zyngier 					      ID_AA64DFR0_EL1_PMUVer_V3P5);
10403d0dba57SMarc Zyngier 	return FIELD_GET(ARM64_FEATURE_MASK(ID_AA64DFR0_EL1_PMUVer), tmp);
10413d0dba57SMarc Zyngier }
1042