xref: /openbmc/linux/arch/arm64/kvm/pmu-emul.c (revision 64b81000)
19ed24f4bSMarc Zyngier // SPDX-License-Identifier: GPL-2.0-only
29ed24f4bSMarc Zyngier /*
39ed24f4bSMarc Zyngier  * Copyright (C) 2015 Linaro Ltd.
49ed24f4bSMarc Zyngier  * Author: Shannon Zhao <shannon.zhao@linaro.org>
59ed24f4bSMarc Zyngier  */
69ed24f4bSMarc Zyngier 
79ed24f4bSMarc Zyngier #include <linux/cpu.h>
89ed24f4bSMarc Zyngier #include <linux/kvm.h>
99ed24f4bSMarc Zyngier #include <linux/kvm_host.h>
10db858060SAlexandru Elisei #include <linux/list.h>
119ed24f4bSMarc Zyngier #include <linux/perf_event.h>
129ed24f4bSMarc Zyngier #include <linux/perf/arm_pmu.h>
139ed24f4bSMarc Zyngier #include <linux/uaccess.h>
149ed24f4bSMarc Zyngier #include <asm/kvm_emulate.h>
159ed24f4bSMarc Zyngier #include <kvm/arm_pmu.h>
169ed24f4bSMarc Zyngier #include <kvm/arm_vgic.h>
17ec3eb9edSReiji Watanabe #include <asm/arm_pmuv3.h>
189ed24f4bSMarc Zyngier 
19bead0220SMarc Zyngier #define PERF_ATTR_CFG1_COUNTER_64BIT	BIT(0)
20bead0220SMarc Zyngier 
21be399d82SSean Christopherson DEFINE_STATIC_KEY_FALSE(kvm_arm_pmu_available);
22be399d82SSean Christopherson 
23db858060SAlexandru Elisei static LIST_HEAD(arm_pmus);
24db858060SAlexandru Elisei static DEFINE_MUTEX(arm_pmus_lock);
25db858060SAlexandru Elisei 
26d56bdce5SMarc Zyngier static void kvm_pmu_create_perf_event(struct kvm_pmc *pmc);
279917264dSMarc Zyngier static void kvm_pmu_release_perf_event(struct kvm_pmc *pmc);
289ed24f4bSMarc Zyngier 
kvm_pmc_to_vcpu(const struct kvm_pmc * pmc)29d56bdce5SMarc Zyngier static struct kvm_vcpu *kvm_pmc_to_vcpu(const struct kvm_pmc *pmc)
30d56bdce5SMarc Zyngier {
31d56bdce5SMarc Zyngier 	return container_of(pmc, struct kvm_vcpu, arch.pmu.pmc[pmc->idx]);
32d56bdce5SMarc Zyngier }
33d56bdce5SMarc Zyngier 
kvm_vcpu_idx_to_pmc(struct kvm_vcpu * vcpu,int cnt_idx)34d56bdce5SMarc Zyngier static struct kvm_pmc *kvm_vcpu_idx_to_pmc(struct kvm_vcpu *vcpu, int cnt_idx)
35d56bdce5SMarc Zyngier {
36d56bdce5SMarc Zyngier 	return &vcpu->arch.pmu.pmc[cnt_idx];
37d56bdce5SMarc Zyngier }
38d56bdce5SMarc Zyngier 
__kvm_pmu_event_mask(unsigned int pmuver)39335ca49fSReiji Watanabe static u32 __kvm_pmu_event_mask(unsigned int pmuver)
40fd65a3b5SMarc Zyngier {
4146b18782SMarc Zyngier 	switch (pmuver) {
42121a8fc0SMark Brown 	case ID_AA64DFR0_EL1_PMUVer_IMP:
43fd65a3b5SMarc Zyngier 		return GENMASK(9, 0);
44121a8fc0SMark Brown 	case ID_AA64DFR0_EL1_PMUVer_V3P1:
45121a8fc0SMark Brown 	case ID_AA64DFR0_EL1_PMUVer_V3P4:
46121a8fc0SMark Brown 	case ID_AA64DFR0_EL1_PMUVer_V3P5:
47121a8fc0SMark Brown 	case ID_AA64DFR0_EL1_PMUVer_V3P7:
48fd65a3b5SMarc Zyngier 		return GENMASK(15, 0);
49fd65a3b5SMarc Zyngier 	default:		/* Shouldn't be here, just for sanity */
5046b18782SMarc Zyngier 		WARN_ONCE(1, "Unknown PMU version %d\n", pmuver);
51fd65a3b5SMarc Zyngier 		return 0;
52fd65a3b5SMarc Zyngier 	}
53fd65a3b5SMarc Zyngier }
54fd65a3b5SMarc Zyngier 
kvm_pmu_event_mask(struct kvm * kvm)55335ca49fSReiji Watanabe static u32 kvm_pmu_event_mask(struct kvm *kvm)
56335ca49fSReiji Watanabe {
57335ca49fSReiji Watanabe 	u64 dfr0 = IDREG(kvm, SYS_ID_AA64DFR0_EL1);
58335ca49fSReiji Watanabe 	u8 pmuver = SYS_FIELD_GET(ID_AA64DFR0_EL1, PMUVer, dfr0);
59335ca49fSReiji Watanabe 
60335ca49fSReiji Watanabe 	return __kvm_pmu_event_mask(pmuver);
61335ca49fSReiji Watanabe }
62335ca49fSReiji Watanabe 
639ed24f4bSMarc Zyngier /**
64d56bdce5SMarc Zyngier  * kvm_pmc_is_64bit - determine if counter is 64bit
65d56bdce5SMarc Zyngier  * @pmc: counter context
669ed24f4bSMarc Zyngier  */
kvm_pmc_is_64bit(struct kvm_pmc * pmc)67d56bdce5SMarc Zyngier static bool kvm_pmc_is_64bit(struct kvm_pmc *pmc)
689ed24f4bSMarc Zyngier {
69d56bdce5SMarc Zyngier 	return (pmc->idx == ARMV8_PMU_CYCLE_IDX ||
70d56bdce5SMarc Zyngier 		kvm_pmu_is_3p5(kvm_pmc_to_vcpu(pmc)));
71c82d28cbSMarc Zyngier }
72c82d28cbSMarc Zyngier 
kvm_pmc_has_64bit_overflow(struct kvm_pmc * pmc)73d56bdce5SMarc Zyngier static bool kvm_pmc_has_64bit_overflow(struct kvm_pmc *pmc)
74c82d28cbSMarc Zyngier {
75d56bdce5SMarc Zyngier 	u64 val = __vcpu_sys_reg(kvm_pmc_to_vcpu(pmc), PMCR_EL0);
7611af4c37SMarc Zyngier 
77d56bdce5SMarc Zyngier 	return (pmc->idx < ARMV8_PMU_CYCLE_IDX && (val & ARMV8_PMU_PMCR_LP)) ||
78d56bdce5SMarc Zyngier 	       (pmc->idx == ARMV8_PMU_CYCLE_IDX && (val & ARMV8_PMU_PMCR_LC));
799ed24f4bSMarc Zyngier }
809ed24f4bSMarc Zyngier 
kvm_pmu_counter_can_chain(struct kvm_pmc * pmc)81d56bdce5SMarc Zyngier static bool kvm_pmu_counter_can_chain(struct kvm_pmc *pmc)
82bead0220SMarc Zyngier {
83d56bdce5SMarc Zyngier 	return (!(pmc->idx & 1) && (pmc->idx + 1) < ARMV8_PMU_CYCLE_IDX &&
84d56bdce5SMarc Zyngier 		!kvm_pmc_has_64bit_overflow(pmc));
859ed24f4bSMarc Zyngier }
869ed24f4bSMarc Zyngier 
counter_index_to_reg(u64 idx)870cb9c3c8SMarc Zyngier static u32 counter_index_to_reg(u64 idx)
880cb9c3c8SMarc Zyngier {
890cb9c3c8SMarc Zyngier 	return (idx == ARMV8_PMU_CYCLE_IDX) ? PMCCNTR_EL0 : PMEVCNTR0_EL0 + idx;
900cb9c3c8SMarc Zyngier }
910cb9c3c8SMarc Zyngier 
counter_index_to_evtreg(u64 idx)920cb9c3c8SMarc Zyngier static u32 counter_index_to_evtreg(u64 idx)
930cb9c3c8SMarc Zyngier {
940cb9c3c8SMarc Zyngier 	return (idx == ARMV8_PMU_CYCLE_IDX) ? PMCCFILTR_EL0 : PMEVTYPER0_EL0 + idx;
950cb9c3c8SMarc Zyngier }
960cb9c3c8SMarc Zyngier 
kvm_pmu_get_pmc_value(struct kvm_pmc * pmc)97d56bdce5SMarc Zyngier static u64 kvm_pmu_get_pmc_value(struct kvm_pmc *pmc)
989ed24f4bSMarc Zyngier {
99d56bdce5SMarc Zyngier 	struct kvm_vcpu *vcpu = kvm_pmc_to_vcpu(pmc);
100bead0220SMarc Zyngier 	u64 counter, reg, enabled, running;
1019ed24f4bSMarc Zyngier 
102d56bdce5SMarc Zyngier 	reg = counter_index_to_reg(pmc->idx);
1039ed24f4bSMarc Zyngier 	counter = __vcpu_sys_reg(vcpu, reg);
1049ed24f4bSMarc Zyngier 
1059ed24f4bSMarc Zyngier 	/*
1069ed24f4bSMarc Zyngier 	 * The real counter value is equal to the value of counter register plus
1079ed24f4bSMarc Zyngier 	 * the value perf event counts.
1089ed24f4bSMarc Zyngier 	 */
1099ed24f4bSMarc Zyngier 	if (pmc->perf_event)
1109ed24f4bSMarc Zyngier 		counter += perf_event_read_value(pmc->perf_event, &enabled,
1119ed24f4bSMarc Zyngier 						 &running);
1129ed24f4bSMarc Zyngier 
113d56bdce5SMarc Zyngier 	if (!kvm_pmc_is_64bit(pmc))
1149ed24f4bSMarc Zyngier 		counter = lower_32_bits(counter);
1159ed24f4bSMarc Zyngier 
1169ed24f4bSMarc Zyngier 	return counter;
1179ed24f4bSMarc Zyngier }
1189ed24f4bSMarc Zyngier 
119d56bdce5SMarc Zyngier /**
120d56bdce5SMarc Zyngier  * kvm_pmu_get_counter_value - get PMU counter value
121d56bdce5SMarc Zyngier  * @vcpu: The vcpu pointer
122d56bdce5SMarc Zyngier  * @select_idx: The counter index
123d56bdce5SMarc Zyngier  */
kvm_pmu_get_counter_value(struct kvm_vcpu * vcpu,u64 select_idx)124d56bdce5SMarc Zyngier u64 kvm_pmu_get_counter_value(struct kvm_vcpu *vcpu, u64 select_idx)
1259ed24f4bSMarc Zyngier {
126d56bdce5SMarc Zyngier 	if (!kvm_vcpu_has_pmu(vcpu))
127d56bdce5SMarc Zyngier 		return 0;
128d56bdce5SMarc Zyngier 
129d56bdce5SMarc Zyngier 	return kvm_pmu_get_pmc_value(kvm_vcpu_idx_to_pmc(vcpu, select_idx));
130d56bdce5SMarc Zyngier }
131d56bdce5SMarc Zyngier 
kvm_pmu_set_pmc_value(struct kvm_pmc * pmc,u64 val,bool force)132d56bdce5SMarc Zyngier static void kvm_pmu_set_pmc_value(struct kvm_pmc *pmc, u64 val, bool force)
133d56bdce5SMarc Zyngier {
134d56bdce5SMarc Zyngier 	struct kvm_vcpu *vcpu = kvm_pmc_to_vcpu(pmc);
1359ed24f4bSMarc Zyngier 	u64 reg;
1369ed24f4bSMarc Zyngier 
137d56bdce5SMarc Zyngier 	kvm_pmu_release_perf_event(pmc);
1388f6379e2SAlexandru Elisei 
139d56bdce5SMarc Zyngier 	reg = counter_index_to_reg(pmc->idx);
1409917264dSMarc Zyngier 
141d56bdce5SMarc Zyngier 	if (vcpu_mode_is_32bit(vcpu) && pmc->idx != ARMV8_PMU_CYCLE_IDX &&
14226d2d059SMarc Zyngier 	    !force) {
14326d2d059SMarc Zyngier 		/*
14426d2d059SMarc Zyngier 		 * Even with PMUv3p5, AArch32 cannot write to the top
14526d2d059SMarc Zyngier 		 * 32bit of the counters. The only possible course of
14626d2d059SMarc Zyngier 		 * action is to use PMCR.P, which will reset them to
14726d2d059SMarc Zyngier 		 * 0 (the only use of the 'force' parameter).
14826d2d059SMarc Zyngier 		 */
14926d2d059SMarc Zyngier 		val  = __vcpu_sys_reg(vcpu, reg) & GENMASK(63, 32);
15026d2d059SMarc Zyngier 		val |= lower_32_bits(val);
15126d2d059SMarc Zyngier 	}
15226d2d059SMarc Zyngier 
1539917264dSMarc Zyngier 	__vcpu_sys_reg(vcpu, reg) = val;
1549ed24f4bSMarc Zyngier 
1559ed24f4bSMarc Zyngier 	/* Recreate the perf event to reflect the updated sample_period */
156d56bdce5SMarc Zyngier 	kvm_pmu_create_perf_event(pmc);
1579ed24f4bSMarc Zyngier }
1589ed24f4bSMarc Zyngier 
1599ed24f4bSMarc Zyngier /**
16026d2d059SMarc Zyngier  * kvm_pmu_set_counter_value - set PMU counter value
16126d2d059SMarc Zyngier  * @vcpu: The vcpu pointer
16226d2d059SMarc Zyngier  * @select_idx: The counter index
16326d2d059SMarc Zyngier  * @val: The counter value
16426d2d059SMarc Zyngier  */
kvm_pmu_set_counter_value(struct kvm_vcpu * vcpu,u64 select_idx,u64 val)16526d2d059SMarc Zyngier void kvm_pmu_set_counter_value(struct kvm_vcpu *vcpu, u64 select_idx, u64 val)
16626d2d059SMarc Zyngier {
167d56bdce5SMarc Zyngier 	if (!kvm_vcpu_has_pmu(vcpu))
168d56bdce5SMarc Zyngier 		return;
169d56bdce5SMarc Zyngier 
170d56bdce5SMarc Zyngier 	kvm_pmu_set_pmc_value(kvm_vcpu_idx_to_pmc(vcpu, select_idx), val, false);
17126d2d059SMarc Zyngier }
17226d2d059SMarc Zyngier 
17326d2d059SMarc Zyngier /**
1749ed24f4bSMarc Zyngier  * kvm_pmu_release_perf_event - remove the perf event
1759ed24f4bSMarc Zyngier  * @pmc: The PMU counter pointer
1769ed24f4bSMarc Zyngier  */
kvm_pmu_release_perf_event(struct kvm_pmc * pmc)1779ed24f4bSMarc Zyngier static void kvm_pmu_release_perf_event(struct kvm_pmc *pmc)
1789ed24f4bSMarc Zyngier {
1799ed24f4bSMarc Zyngier 	if (pmc->perf_event) {
1809ed24f4bSMarc Zyngier 		perf_event_disable(pmc->perf_event);
1819ed24f4bSMarc Zyngier 		perf_event_release_kernel(pmc->perf_event);
1829ed24f4bSMarc Zyngier 		pmc->perf_event = NULL;
1839ed24f4bSMarc Zyngier 	}
1849ed24f4bSMarc Zyngier }
1859ed24f4bSMarc Zyngier 
1869ed24f4bSMarc Zyngier /**
1879ed24f4bSMarc Zyngier  * kvm_pmu_stop_counter - stop PMU counter
1889ed24f4bSMarc Zyngier  * @pmc: The PMU counter pointer
1899ed24f4bSMarc Zyngier  *
1909ed24f4bSMarc Zyngier  * If this counter has been configured to monitor some event, release it here.
1919ed24f4bSMarc Zyngier  */
kvm_pmu_stop_counter(struct kvm_pmc * pmc)192d56bdce5SMarc Zyngier static void kvm_pmu_stop_counter(struct kvm_pmc *pmc)
1939ed24f4bSMarc Zyngier {
194d56bdce5SMarc Zyngier 	struct kvm_vcpu *vcpu = kvm_pmc_to_vcpu(pmc);
1950f1e172bSMarc Zyngier 	u64 reg, val;
1969ed24f4bSMarc Zyngier 
1979ed24f4bSMarc Zyngier 	if (!pmc->perf_event)
1989ed24f4bSMarc Zyngier 		return;
1999ed24f4bSMarc Zyngier 
200d56bdce5SMarc Zyngier 	val = kvm_pmu_get_pmc_value(pmc);
2019ed24f4bSMarc Zyngier 
2020cb9c3c8SMarc Zyngier 	reg = counter_index_to_reg(pmc->idx);
2039ed24f4bSMarc Zyngier 
2049ed24f4bSMarc Zyngier 	__vcpu_sys_reg(vcpu, reg) = val;
2059ed24f4bSMarc Zyngier 
2069ed24f4bSMarc Zyngier 	kvm_pmu_release_perf_event(pmc);
2079ed24f4bSMarc Zyngier }
2089ed24f4bSMarc Zyngier 
2099ed24f4bSMarc Zyngier /**
2109ed24f4bSMarc Zyngier  * kvm_pmu_vcpu_init - assign pmu counter idx for cpu
2119ed24f4bSMarc Zyngier  * @vcpu: The vcpu pointer
2129ed24f4bSMarc Zyngier  *
2139ed24f4bSMarc Zyngier  */
kvm_pmu_vcpu_init(struct kvm_vcpu * vcpu)2149ed24f4bSMarc Zyngier void kvm_pmu_vcpu_init(struct kvm_vcpu *vcpu)
2159ed24f4bSMarc Zyngier {
2169ed24f4bSMarc Zyngier 	int i;
2179ed24f4bSMarc Zyngier 	struct kvm_pmu *pmu = &vcpu->arch.pmu;
2189ed24f4bSMarc Zyngier 
2199ed24f4bSMarc Zyngier 	for (i = 0; i < ARMV8_PMU_MAX_COUNTERS; i++)
2209ed24f4bSMarc Zyngier 		pmu->pmc[i].idx = i;
2219ed24f4bSMarc Zyngier }
2229ed24f4bSMarc Zyngier 
2239ed24f4bSMarc Zyngier /**
2249ed24f4bSMarc Zyngier  * kvm_pmu_vcpu_reset - reset pmu state for cpu
2259ed24f4bSMarc Zyngier  * @vcpu: The vcpu pointer
2269ed24f4bSMarc Zyngier  *
2279ed24f4bSMarc Zyngier  */
kvm_pmu_vcpu_reset(struct kvm_vcpu * vcpu)2289ed24f4bSMarc Zyngier void kvm_pmu_vcpu_reset(struct kvm_vcpu *vcpu)
2299ed24f4bSMarc Zyngier {
2309ed24f4bSMarc Zyngier 	unsigned long mask = kvm_pmu_valid_counter_mask(vcpu);
2319ed24f4bSMarc Zyngier 	int i;
2329ed24f4bSMarc Zyngier 
2339ed24f4bSMarc Zyngier 	for_each_set_bit(i, &mask, 32)
234d56bdce5SMarc Zyngier 		kvm_pmu_stop_counter(kvm_vcpu_idx_to_pmc(vcpu, i));
2359ed24f4bSMarc Zyngier }
2369ed24f4bSMarc Zyngier 
2379ed24f4bSMarc Zyngier /**
2389ed24f4bSMarc Zyngier  * kvm_pmu_vcpu_destroy - free perf event of PMU for cpu
2399ed24f4bSMarc Zyngier  * @vcpu: The vcpu pointer
2409ed24f4bSMarc Zyngier  *
2419ed24f4bSMarc Zyngier  */
kvm_pmu_vcpu_destroy(struct kvm_vcpu * vcpu)2429ed24f4bSMarc Zyngier void kvm_pmu_vcpu_destroy(struct kvm_vcpu *vcpu)
2439ed24f4bSMarc Zyngier {
2449ed24f4bSMarc Zyngier 	int i;
2459ed24f4bSMarc Zyngier 
2469ed24f4bSMarc Zyngier 	for (i = 0; i < ARMV8_PMU_MAX_COUNTERS; i++)
247d56bdce5SMarc Zyngier 		kvm_pmu_release_perf_event(kvm_vcpu_idx_to_pmc(vcpu, i));
24895e92e45SJulien Thierry 	irq_work_sync(&vcpu->arch.pmu.overflow_work);
2499ed24f4bSMarc Zyngier }
2509ed24f4bSMarc Zyngier 
kvm_pmu_valid_counter_mask(struct kvm_vcpu * vcpu)2519ed24f4bSMarc Zyngier u64 kvm_pmu_valid_counter_mask(struct kvm_vcpu *vcpu)
2529ed24f4bSMarc Zyngier {
2539ed24f4bSMarc Zyngier 	u64 val = __vcpu_sys_reg(vcpu, PMCR_EL0) >> ARMV8_PMU_PMCR_N_SHIFT;
2549ed24f4bSMarc Zyngier 
2559ed24f4bSMarc Zyngier 	val &= ARMV8_PMU_PMCR_N_MASK;
2569ed24f4bSMarc Zyngier 	if (val == 0)
2579ed24f4bSMarc Zyngier 		return BIT(ARMV8_PMU_CYCLE_IDX);
2589ed24f4bSMarc Zyngier 	else
2599ed24f4bSMarc Zyngier 		return GENMASK(val - 1, 0) | BIT(ARMV8_PMU_CYCLE_IDX);
2609ed24f4bSMarc Zyngier }
2619ed24f4bSMarc Zyngier 
2629ed24f4bSMarc Zyngier /**
2639ed24f4bSMarc Zyngier  * kvm_pmu_enable_counter_mask - enable selected PMU counters
2649ed24f4bSMarc Zyngier  * @vcpu: The vcpu pointer
2659ed24f4bSMarc Zyngier  * @val: the value guest writes to PMCNTENSET register
2669ed24f4bSMarc Zyngier  *
2679ed24f4bSMarc Zyngier  * Call perf_event_enable to start counting the perf event
2689ed24f4bSMarc Zyngier  */
kvm_pmu_enable_counter_mask(struct kvm_vcpu * vcpu,u64 val)2699ed24f4bSMarc Zyngier void kvm_pmu_enable_counter_mask(struct kvm_vcpu *vcpu, u64 val)
2709ed24f4bSMarc Zyngier {
2719ed24f4bSMarc Zyngier 	int i;
2728f6379e2SAlexandru Elisei 	if (!kvm_vcpu_has_pmu(vcpu))
2738f6379e2SAlexandru Elisei 		return;
2748f6379e2SAlexandru Elisei 
2759ed24f4bSMarc Zyngier 	if (!(__vcpu_sys_reg(vcpu, PMCR_EL0) & ARMV8_PMU_PMCR_E) || !val)
2769ed24f4bSMarc Zyngier 		return;
2779ed24f4bSMarc Zyngier 
2789ed24f4bSMarc Zyngier 	for (i = 0; i < ARMV8_PMU_MAX_COUNTERS; i++) {
279d56bdce5SMarc Zyngier 		struct kvm_pmc *pmc;
280d56bdce5SMarc Zyngier 
2819ed24f4bSMarc Zyngier 		if (!(val & BIT(i)))
2829ed24f4bSMarc Zyngier 			continue;
2839ed24f4bSMarc Zyngier 
284d56bdce5SMarc Zyngier 		pmc = kvm_vcpu_idx_to_pmc(vcpu, i);
2859ed24f4bSMarc Zyngier 
286bead0220SMarc Zyngier 		if (!pmc->perf_event) {
287d56bdce5SMarc Zyngier 			kvm_pmu_create_perf_event(pmc);
288bead0220SMarc Zyngier 		} else {
2899ed24f4bSMarc Zyngier 			perf_event_enable(pmc->perf_event);
2909ed24f4bSMarc Zyngier 			if (pmc->perf_event->state != PERF_EVENT_STATE_ACTIVE)
2919ed24f4bSMarc Zyngier 				kvm_debug("fail to enable perf event\n");
2929ed24f4bSMarc Zyngier 		}
2939ed24f4bSMarc Zyngier 	}
2949ed24f4bSMarc Zyngier }
2959ed24f4bSMarc Zyngier 
2969ed24f4bSMarc Zyngier /**
2979ed24f4bSMarc Zyngier  * kvm_pmu_disable_counter_mask - disable selected PMU counters
2989ed24f4bSMarc Zyngier  * @vcpu: The vcpu pointer
2999ed24f4bSMarc Zyngier  * @val: the value guest writes to PMCNTENCLR register
3009ed24f4bSMarc Zyngier  *
3019ed24f4bSMarc Zyngier  * Call perf_event_disable to stop counting the perf event
3029ed24f4bSMarc Zyngier  */
kvm_pmu_disable_counter_mask(struct kvm_vcpu * vcpu,u64 val)3039ed24f4bSMarc Zyngier void kvm_pmu_disable_counter_mask(struct kvm_vcpu *vcpu, u64 val)
3049ed24f4bSMarc Zyngier {
3059ed24f4bSMarc Zyngier 	int i;
3069ed24f4bSMarc Zyngier 
3078f6379e2SAlexandru Elisei 	if (!kvm_vcpu_has_pmu(vcpu) || !val)
3089ed24f4bSMarc Zyngier 		return;
3099ed24f4bSMarc Zyngier 
3109ed24f4bSMarc Zyngier 	for (i = 0; i < ARMV8_PMU_MAX_COUNTERS; i++) {
311d56bdce5SMarc Zyngier 		struct kvm_pmc *pmc;
312d56bdce5SMarc Zyngier 
3139ed24f4bSMarc Zyngier 		if (!(val & BIT(i)))
3149ed24f4bSMarc Zyngier 			continue;
3159ed24f4bSMarc Zyngier 
316d56bdce5SMarc Zyngier 		pmc = kvm_vcpu_idx_to_pmc(vcpu, i);
3179ed24f4bSMarc Zyngier 
3189ed24f4bSMarc Zyngier 		if (pmc->perf_event)
3199ed24f4bSMarc Zyngier 			perf_event_disable(pmc->perf_event);
3209ed24f4bSMarc Zyngier 	}
3219ed24f4bSMarc Zyngier }
3229ed24f4bSMarc Zyngier 
kvm_pmu_overflow_status(struct kvm_vcpu * vcpu)3239ed24f4bSMarc Zyngier static u64 kvm_pmu_overflow_status(struct kvm_vcpu *vcpu)
3249ed24f4bSMarc Zyngier {
3259ed24f4bSMarc Zyngier 	u64 reg = 0;
3269ed24f4bSMarc Zyngier 
3279ed24f4bSMarc Zyngier 	if ((__vcpu_sys_reg(vcpu, PMCR_EL0) & ARMV8_PMU_PMCR_E)) {
3289ed24f4bSMarc Zyngier 		reg = __vcpu_sys_reg(vcpu, PMOVSSET_EL0);
3299ed24f4bSMarc Zyngier 		reg &= __vcpu_sys_reg(vcpu, PMCNTENSET_EL0);
3309ed24f4bSMarc Zyngier 		reg &= __vcpu_sys_reg(vcpu, PMINTENSET_EL1);
3319ed24f4bSMarc Zyngier 	}
3329ed24f4bSMarc Zyngier 
3339ed24f4bSMarc Zyngier 	return reg;
3349ed24f4bSMarc Zyngier }
3359ed24f4bSMarc Zyngier 
kvm_pmu_update_state(struct kvm_vcpu * vcpu)3369ed24f4bSMarc Zyngier static void kvm_pmu_update_state(struct kvm_vcpu *vcpu)
3379ed24f4bSMarc Zyngier {
3389ed24f4bSMarc Zyngier 	struct kvm_pmu *pmu = &vcpu->arch.pmu;
3399ed24f4bSMarc Zyngier 	bool overflow;
3409ed24f4bSMarc Zyngier 
34146acf89dSMarc Zyngier 	if (!kvm_vcpu_has_pmu(vcpu))
3429ed24f4bSMarc Zyngier 		return;
3439ed24f4bSMarc Zyngier 
3449ed24f4bSMarc Zyngier 	overflow = !!kvm_pmu_overflow_status(vcpu);
3459ed24f4bSMarc Zyngier 	if (pmu->irq_level == overflow)
3469ed24f4bSMarc Zyngier 		return;
3479ed24f4bSMarc Zyngier 
3489ed24f4bSMarc Zyngier 	pmu->irq_level = overflow;
3499ed24f4bSMarc Zyngier 
3509ed24f4bSMarc Zyngier 	if (likely(irqchip_in_kernel(vcpu->kvm))) {
3519ed24f4bSMarc Zyngier 		int ret = kvm_vgic_inject_irq(vcpu->kvm, vcpu->vcpu_id,
3529ed24f4bSMarc Zyngier 					      pmu->irq_num, overflow, pmu);
3539ed24f4bSMarc Zyngier 		WARN_ON(ret);
3549ed24f4bSMarc Zyngier 	}
3559ed24f4bSMarc Zyngier }
3569ed24f4bSMarc Zyngier 
kvm_pmu_should_notify_user(struct kvm_vcpu * vcpu)3579ed24f4bSMarc Zyngier bool kvm_pmu_should_notify_user(struct kvm_vcpu *vcpu)
3589ed24f4bSMarc Zyngier {
3599ed24f4bSMarc Zyngier 	struct kvm_pmu *pmu = &vcpu->arch.pmu;
3609ed24f4bSMarc Zyngier 	struct kvm_sync_regs *sregs = &vcpu->run->s.regs;
3619ed24f4bSMarc Zyngier 	bool run_level = sregs->device_irq_level & KVM_ARM_DEV_PMU;
3629ed24f4bSMarc Zyngier 
3639ed24f4bSMarc Zyngier 	if (likely(irqchip_in_kernel(vcpu->kvm)))
3649ed24f4bSMarc Zyngier 		return false;
3659ed24f4bSMarc Zyngier 
3669ed24f4bSMarc Zyngier 	return pmu->irq_level != run_level;
3679ed24f4bSMarc Zyngier }
3689ed24f4bSMarc Zyngier 
3699ed24f4bSMarc Zyngier /*
3709ed24f4bSMarc Zyngier  * Reflect the PMU overflow interrupt output level into the kvm_run structure
3719ed24f4bSMarc Zyngier  */
kvm_pmu_update_run(struct kvm_vcpu * vcpu)3729ed24f4bSMarc Zyngier void kvm_pmu_update_run(struct kvm_vcpu *vcpu)
3739ed24f4bSMarc Zyngier {
3749ed24f4bSMarc Zyngier 	struct kvm_sync_regs *regs = &vcpu->run->s.regs;
3759ed24f4bSMarc Zyngier 
3769ed24f4bSMarc Zyngier 	/* Populate the timer bitmap for user space */
3779ed24f4bSMarc Zyngier 	regs->device_irq_level &= ~KVM_ARM_DEV_PMU;
3789ed24f4bSMarc Zyngier 	if (vcpu->arch.pmu.irq_level)
3799ed24f4bSMarc Zyngier 		regs->device_irq_level |= KVM_ARM_DEV_PMU;
3809ed24f4bSMarc Zyngier }
3819ed24f4bSMarc Zyngier 
3829ed24f4bSMarc Zyngier /**
3839ed24f4bSMarc Zyngier  * kvm_pmu_flush_hwstate - flush pmu state to cpu
3849ed24f4bSMarc Zyngier  * @vcpu: The vcpu pointer
3859ed24f4bSMarc Zyngier  *
3869ed24f4bSMarc Zyngier  * Check if the PMU has overflowed while we were running in the host, and inject
3879ed24f4bSMarc Zyngier  * an interrupt if that was the case.
3889ed24f4bSMarc Zyngier  */
kvm_pmu_flush_hwstate(struct kvm_vcpu * vcpu)3899ed24f4bSMarc Zyngier void kvm_pmu_flush_hwstate(struct kvm_vcpu *vcpu)
3909ed24f4bSMarc Zyngier {
3919ed24f4bSMarc Zyngier 	kvm_pmu_update_state(vcpu);
3929ed24f4bSMarc Zyngier }
3939ed24f4bSMarc Zyngier 
3949ed24f4bSMarc Zyngier /**
3959ed24f4bSMarc Zyngier  * kvm_pmu_sync_hwstate - sync pmu state from cpu
3969ed24f4bSMarc Zyngier  * @vcpu: The vcpu pointer
3979ed24f4bSMarc Zyngier  *
3989ed24f4bSMarc Zyngier  * Check if the PMU has overflowed while we were running in the guest, and
3999ed24f4bSMarc Zyngier  * inject an interrupt if that was the case.
4009ed24f4bSMarc Zyngier  */
kvm_pmu_sync_hwstate(struct kvm_vcpu * vcpu)4019ed24f4bSMarc Zyngier void kvm_pmu_sync_hwstate(struct kvm_vcpu *vcpu)
4029ed24f4bSMarc Zyngier {
4039ed24f4bSMarc Zyngier 	kvm_pmu_update_state(vcpu);
4049ed24f4bSMarc Zyngier }
4059ed24f4bSMarc Zyngier 
4069ed24f4bSMarc Zyngier /**
40795e92e45SJulien Thierry  * When perf interrupt is an NMI, we cannot safely notify the vcpu corresponding
40895e92e45SJulien Thierry  * to the event.
40995e92e45SJulien Thierry  * This is why we need a callback to do it once outside of the NMI context.
41095e92e45SJulien Thierry  */
kvm_pmu_perf_overflow_notify_vcpu(struct irq_work * work)41195e92e45SJulien Thierry static void kvm_pmu_perf_overflow_notify_vcpu(struct irq_work *work)
41295e92e45SJulien Thierry {
41395e92e45SJulien Thierry 	struct kvm_vcpu *vcpu;
41495e92e45SJulien Thierry 
4159bad925dSMarc Zyngier 	vcpu = container_of(work, struct kvm_vcpu, arch.pmu.overflow_work);
41695e92e45SJulien Thierry 	kvm_vcpu_kick(vcpu);
41795e92e45SJulien Thierry }
41895e92e45SJulien Thierry 
419bead0220SMarc Zyngier /*
420bead0220SMarc Zyngier  * Perform an increment on any of the counters described in @mask,
421bead0220SMarc Zyngier  * generating the overflow if required, and propagate it as a chained
422bead0220SMarc Zyngier  * event if possible.
423bead0220SMarc Zyngier  */
kvm_pmu_counter_increment(struct kvm_vcpu * vcpu,unsigned long mask,u32 event)424bead0220SMarc Zyngier static void kvm_pmu_counter_increment(struct kvm_vcpu *vcpu,
425bead0220SMarc Zyngier 				      unsigned long mask, u32 event)
426bead0220SMarc Zyngier {
427bead0220SMarc Zyngier 	int i;
428bead0220SMarc Zyngier 
429bead0220SMarc Zyngier 	if (!(__vcpu_sys_reg(vcpu, PMCR_EL0) & ARMV8_PMU_PMCR_E))
430bead0220SMarc Zyngier 		return;
431bead0220SMarc Zyngier 
432bead0220SMarc Zyngier 	/* Weed out disabled counters */
433bead0220SMarc Zyngier 	mask &= __vcpu_sys_reg(vcpu, PMCNTENSET_EL0);
434bead0220SMarc Zyngier 
435bead0220SMarc Zyngier 	for_each_set_bit(i, &mask, ARMV8_PMU_CYCLE_IDX) {
436d56bdce5SMarc Zyngier 		struct kvm_pmc *pmc = kvm_vcpu_idx_to_pmc(vcpu, i);
437bead0220SMarc Zyngier 		u64 type, reg;
438bead0220SMarc Zyngier 
439bead0220SMarc Zyngier 		/* Filter on event type */
4400cb9c3c8SMarc Zyngier 		type = __vcpu_sys_reg(vcpu, counter_index_to_evtreg(i));
441bead0220SMarc Zyngier 		type &= kvm_pmu_event_mask(vcpu->kvm);
442bead0220SMarc Zyngier 		if (type != event)
443bead0220SMarc Zyngier 			continue;
444bead0220SMarc Zyngier 
445bead0220SMarc Zyngier 		/* Increment this counter */
4460cb9c3c8SMarc Zyngier 		reg = __vcpu_sys_reg(vcpu, counter_index_to_reg(i)) + 1;
447d56bdce5SMarc Zyngier 		if (!kvm_pmc_is_64bit(pmc))
448bead0220SMarc Zyngier 			reg = lower_32_bits(reg);
4490cb9c3c8SMarc Zyngier 		__vcpu_sys_reg(vcpu, counter_index_to_reg(i)) = reg;
450bead0220SMarc Zyngier 
451001d85bdSMarc Zyngier 		/* No overflow? move on */
452d56bdce5SMarc Zyngier 		if (kvm_pmc_has_64bit_overflow(pmc) ? reg : lower_32_bits(reg))
453bead0220SMarc Zyngier 			continue;
454bead0220SMarc Zyngier 
455bead0220SMarc Zyngier 		/* Mark overflow */
456bead0220SMarc Zyngier 		__vcpu_sys_reg(vcpu, PMOVSSET_EL0) |= BIT(i);
457bead0220SMarc Zyngier 
458d56bdce5SMarc Zyngier 		if (kvm_pmu_counter_can_chain(pmc))
459bead0220SMarc Zyngier 			kvm_pmu_counter_increment(vcpu, BIT(i + 1),
460bead0220SMarc Zyngier 						  ARMV8_PMUV3_PERFCTR_CHAIN);
461bead0220SMarc Zyngier 	}
462bead0220SMarc Zyngier }
463bead0220SMarc Zyngier 
464c82d28cbSMarc Zyngier /* Compute the sample period for a given counter value */
compute_period(struct kvm_pmc * pmc,u64 counter)465d56bdce5SMarc Zyngier static u64 compute_period(struct kvm_pmc *pmc, u64 counter)
466c82d28cbSMarc Zyngier {
467c82d28cbSMarc Zyngier 	u64 val;
468c82d28cbSMarc Zyngier 
46958ff6569SMarc Zyngier 	if (kvm_pmc_is_64bit(pmc) && kvm_pmc_has_64bit_overflow(pmc))
470c82d28cbSMarc Zyngier 		val = (-counter) & GENMASK(63, 0);
47158ff6569SMarc Zyngier 	else
472c82d28cbSMarc Zyngier 		val = (-counter) & GENMASK(31, 0);
473c82d28cbSMarc Zyngier 
474c82d28cbSMarc Zyngier 	return val;
475c82d28cbSMarc Zyngier }
476c82d28cbSMarc Zyngier 
47795e92e45SJulien Thierry /**
4789ed24f4bSMarc Zyngier  * When the perf event overflows, set the overflow status and inform the vcpu.
4799ed24f4bSMarc Zyngier  */
kvm_pmu_perf_overflow(struct perf_event * perf_event,struct perf_sample_data * data,struct pt_regs * regs)4809ed24f4bSMarc Zyngier static void kvm_pmu_perf_overflow(struct perf_event *perf_event,
4819ed24f4bSMarc Zyngier 				  struct perf_sample_data *data,
4829ed24f4bSMarc Zyngier 				  struct pt_regs *regs)
4839ed24f4bSMarc Zyngier {
4849ed24f4bSMarc Zyngier 	struct kvm_pmc *pmc = perf_event->overflow_handler_context;
4859ed24f4bSMarc Zyngier 	struct arm_pmu *cpu_pmu = to_arm_pmu(perf_event->pmu);
4869ed24f4bSMarc Zyngier 	struct kvm_vcpu *vcpu = kvm_pmc_to_vcpu(pmc);
4879ed24f4bSMarc Zyngier 	int idx = pmc->idx;
4889ed24f4bSMarc Zyngier 	u64 period;
4899ed24f4bSMarc Zyngier 
4909ed24f4bSMarc Zyngier 	cpu_pmu->pmu.stop(perf_event, PERF_EF_UPDATE);
4919ed24f4bSMarc Zyngier 
4929ed24f4bSMarc Zyngier 	/*
4939ed24f4bSMarc Zyngier 	 * Reset the sample period to the architectural limit,
4949ed24f4bSMarc Zyngier 	 * i.e. the point where the counter overflows.
4959ed24f4bSMarc Zyngier 	 */
496d56bdce5SMarc Zyngier 	period = compute_period(pmc, local64_read(&perf_event->count));
4979ed24f4bSMarc Zyngier 
4989ed24f4bSMarc Zyngier 	local64_set(&perf_event->hw.period_left, 0);
4999ed24f4bSMarc Zyngier 	perf_event->attr.sample_period = period;
5009ed24f4bSMarc Zyngier 	perf_event->hw.sample_period = period;
5019ed24f4bSMarc Zyngier 
5029ed24f4bSMarc Zyngier 	__vcpu_sys_reg(vcpu, PMOVSSET_EL0) |= BIT(idx);
5039ed24f4bSMarc Zyngier 
504d56bdce5SMarc Zyngier 	if (kvm_pmu_counter_can_chain(pmc))
505bead0220SMarc Zyngier 		kvm_pmu_counter_increment(vcpu, BIT(idx + 1),
506bead0220SMarc Zyngier 					  ARMV8_PMUV3_PERFCTR_CHAIN);
507bead0220SMarc Zyngier 
5089ed24f4bSMarc Zyngier 	if (kvm_pmu_overflow_status(vcpu)) {
5099ed24f4bSMarc Zyngier 		kvm_make_request(KVM_REQ_IRQ_PENDING, vcpu);
51095e92e45SJulien Thierry 
51195e92e45SJulien Thierry 		if (!in_nmi())
5129ed24f4bSMarc Zyngier 			kvm_vcpu_kick(vcpu);
51395e92e45SJulien Thierry 		else
51495e92e45SJulien Thierry 			irq_work_queue(&vcpu->arch.pmu.overflow_work);
5159ed24f4bSMarc Zyngier 	}
5169ed24f4bSMarc Zyngier 
5179ed24f4bSMarc Zyngier 	cpu_pmu->pmu.start(perf_event, PERF_EF_RELOAD);
5189ed24f4bSMarc Zyngier }
5199ed24f4bSMarc Zyngier 
5209ed24f4bSMarc Zyngier /**
5219ed24f4bSMarc Zyngier  * kvm_pmu_software_increment - do software increment
5229ed24f4bSMarc Zyngier  * @vcpu: The vcpu pointer
5239ed24f4bSMarc Zyngier  * @val: the value guest writes to PMSWINC register
5249ed24f4bSMarc Zyngier  */
kvm_pmu_software_increment(struct kvm_vcpu * vcpu,u64 val)5259ed24f4bSMarc Zyngier void kvm_pmu_software_increment(struct kvm_vcpu *vcpu, u64 val)
5269ed24f4bSMarc Zyngier {
527bead0220SMarc Zyngier 	kvm_pmu_counter_increment(vcpu, val, ARMV8_PMUV3_PERFCTR_SW_INCR);
5289ed24f4bSMarc Zyngier }
5299ed24f4bSMarc Zyngier 
5309ed24f4bSMarc Zyngier /**
5319ed24f4bSMarc Zyngier  * kvm_pmu_handle_pmcr - handle PMCR register
5329ed24f4bSMarc Zyngier  * @vcpu: The vcpu pointer
5339ed24f4bSMarc Zyngier  * @val: the value guest writes to PMCR register
5349ed24f4bSMarc Zyngier  */
kvm_pmu_handle_pmcr(struct kvm_vcpu * vcpu,u64 val)5359ed24f4bSMarc Zyngier void kvm_pmu_handle_pmcr(struct kvm_vcpu *vcpu, u64 val)
5369ed24f4bSMarc Zyngier {
5379ed24f4bSMarc Zyngier 	int i;
5389ed24f4bSMarc Zyngier 
5398f6379e2SAlexandru Elisei 	if (!kvm_vcpu_has_pmu(vcpu))
5408f6379e2SAlexandru Elisei 		return;
5418f6379e2SAlexandru Elisei 
54264d6820dSMarc Zyngier 	/* Fixup PMCR_EL0 to reconcile the PMU version and the LP bit */
54364d6820dSMarc Zyngier 	if (!kvm_pmu_is_3p5(vcpu))
54464d6820dSMarc Zyngier 		val &= ~ARMV8_PMU_PMCR_LP;
54564d6820dSMarc Zyngier 
546f6da81f6SReiji Watanabe 	/* The reset bits don't indicate any state, and shouldn't be saved. */
547f6da81f6SReiji Watanabe 	__vcpu_sys_reg(vcpu, PMCR_EL0) = val & ~(ARMV8_PMU_PMCR_C | ARMV8_PMU_PMCR_P);
54864d6820dSMarc Zyngier 
5499ed24f4bSMarc Zyngier 	if (val & ARMV8_PMU_PMCR_E) {
5509ed24f4bSMarc Zyngier 		kvm_pmu_enable_counter_mask(vcpu,
551f5eff400SMarc Zyngier 		       __vcpu_sys_reg(vcpu, PMCNTENSET_EL0));
5529ed24f4bSMarc Zyngier 	} else {
553ca4f202dSAlexandre Chartre 		kvm_pmu_disable_counter_mask(vcpu,
554ca4f202dSAlexandre Chartre 		       __vcpu_sys_reg(vcpu, PMCNTENSET_EL0));
5559ed24f4bSMarc Zyngier 	}
5569ed24f4bSMarc Zyngier 
5579ed24f4bSMarc Zyngier 	if (val & ARMV8_PMU_PMCR_C)
5589ed24f4bSMarc Zyngier 		kvm_pmu_set_counter_value(vcpu, ARMV8_PMU_CYCLE_IDX, 0);
5599ed24f4bSMarc Zyngier 
5609ed24f4bSMarc Zyngier 	if (val & ARMV8_PMU_PMCR_P) {
561ca4f202dSAlexandre Chartre 		unsigned long mask = kvm_pmu_valid_counter_mask(vcpu);
5622a71fabfSAlexandru Elisei 		mask &= ~BIT(ARMV8_PMU_CYCLE_IDX);
5639ed24f4bSMarc Zyngier 		for_each_set_bit(i, &mask, 32)
564d56bdce5SMarc Zyngier 			kvm_pmu_set_pmc_value(kvm_vcpu_idx_to_pmc(vcpu, i), 0, true);
5659ed24f4bSMarc Zyngier 	}
566f9ea835eSReiji Watanabe 	kvm_vcpu_pmu_restore_guest(vcpu);
5679ed24f4bSMarc Zyngier }
5689ed24f4bSMarc Zyngier 
kvm_pmu_counter_is_enabled(struct kvm_pmc * pmc)569d56bdce5SMarc Zyngier static bool kvm_pmu_counter_is_enabled(struct kvm_pmc *pmc)
5709ed24f4bSMarc Zyngier {
571d56bdce5SMarc Zyngier 	struct kvm_vcpu *vcpu = kvm_pmc_to_vcpu(pmc);
5729ed24f4bSMarc Zyngier 	return (__vcpu_sys_reg(vcpu, PMCR_EL0) & ARMV8_PMU_PMCR_E) &&
573d56bdce5SMarc Zyngier 	       (__vcpu_sys_reg(vcpu, PMCNTENSET_EL0) & BIT(pmc->idx));
5749ed24f4bSMarc Zyngier }
5759ed24f4bSMarc Zyngier 
5769ed24f4bSMarc Zyngier /**
5779ed24f4bSMarc Zyngier  * kvm_pmu_create_perf_event - create a perf event for a counter
578d56bdce5SMarc Zyngier  * @pmc: Counter context
5799ed24f4bSMarc Zyngier  */
kvm_pmu_create_perf_event(struct kvm_pmc * pmc)580d56bdce5SMarc Zyngier static void kvm_pmu_create_perf_event(struct kvm_pmc *pmc)
5819ed24f4bSMarc Zyngier {
582d56bdce5SMarc Zyngier 	struct kvm_vcpu *vcpu = kvm_pmc_to_vcpu(pmc);
58346b18782SMarc Zyngier 	struct arm_pmu *arm_pmu = vcpu->kvm->arch.arm_pmu;
5849ed24f4bSMarc Zyngier 	struct perf_event *event;
5859ed24f4bSMarc Zyngier 	struct perf_event_attr attr;
586d56bdce5SMarc Zyngier 	u64 eventsel, reg, data;
5879ed24f4bSMarc Zyngier 
588d56bdce5SMarc Zyngier 	reg = counter_index_to_evtreg(pmc->idx);
5899ed24f4bSMarc Zyngier 	data = __vcpu_sys_reg(vcpu, reg);
5909ed24f4bSMarc Zyngier 
591d56bdce5SMarc Zyngier 	kvm_pmu_stop_counter(pmc);
592d7eec236SMarc Zyngier 	if (pmc->idx == ARMV8_PMU_CYCLE_IDX)
593d7eec236SMarc Zyngier 		eventsel = ARMV8_PMUV3_PERFCTR_CPU_CYCLES;
594d7eec236SMarc Zyngier 	else
595d7eec236SMarc Zyngier 		eventsel = data & kvm_pmu_event_mask(vcpu->kvm);
5969ed24f4bSMarc Zyngier 
597bead0220SMarc Zyngier 	/*
598bead0220SMarc Zyngier 	 * Neither SW increment nor chained events need to be backed
599bead0220SMarc Zyngier 	 * by a perf event.
600bead0220SMarc Zyngier 	 */
601bead0220SMarc Zyngier 	if (eventsel == ARMV8_PMUV3_PERFCTR_SW_INCR ||
602bead0220SMarc Zyngier 	    eventsel == ARMV8_PMUV3_PERFCTR_CHAIN)
603d7eec236SMarc Zyngier 		return;
604d7eec236SMarc Zyngier 
605d7eec236SMarc Zyngier 	/*
606d7eec236SMarc Zyngier 	 * If we have a filter in place and that the event isn't allowed, do
607d7eec236SMarc Zyngier 	 * not install a perf event either.
608d7eec236SMarc Zyngier 	 */
609d7eec236SMarc Zyngier 	if (vcpu->kvm->arch.pmu_filter &&
610d7eec236SMarc Zyngier 	    !test_bit(eventsel, vcpu->kvm->arch.pmu_filter))
6119ed24f4bSMarc Zyngier 		return;
6129ed24f4bSMarc Zyngier 
6139ed24f4bSMarc Zyngier 	memset(&attr, 0, sizeof(struct perf_event_attr));
61446b18782SMarc Zyngier 	attr.type = arm_pmu->pmu.type;
6159ed24f4bSMarc Zyngier 	attr.size = sizeof(attr);
6169ed24f4bSMarc Zyngier 	attr.pinned = 1;
617d56bdce5SMarc Zyngier 	attr.disabled = !kvm_pmu_counter_is_enabled(pmc);
6189ed24f4bSMarc Zyngier 	attr.exclude_user = data & ARMV8_PMU_EXCLUDE_EL0 ? 1 : 0;
6199ed24f4bSMarc Zyngier 	attr.exclude_kernel = data & ARMV8_PMU_EXCLUDE_EL1 ? 1 : 0;
6209ed24f4bSMarc Zyngier 	attr.exclude_hv = 1; /* Don't count EL2 events */
6219ed24f4bSMarc Zyngier 	attr.exclude_host = 1; /* Don't count host events */
622d7eec236SMarc Zyngier 	attr.config = eventsel;
6239ed24f4bSMarc Zyngier 
624bead0220SMarc Zyngier 	/*
625bead0220SMarc Zyngier 	 * If counting with a 64bit counter, advertise it to the perf
626c82d28cbSMarc Zyngier 	 * code, carefully dealing with the initial sample period
627c82d28cbSMarc Zyngier 	 * which also depends on the overflow.
6289ed24f4bSMarc Zyngier 	 */
629d56bdce5SMarc Zyngier 	if (kvm_pmc_is_64bit(pmc))
630bead0220SMarc Zyngier 		attr.config1 |= PERF_ATTR_CFG1_COUNTER_64BIT;
631c82d28cbSMarc Zyngier 
632d56bdce5SMarc Zyngier 	attr.sample_period = compute_period(pmc, kvm_pmu_get_pmc_value(pmc));
6339ed24f4bSMarc Zyngier 
6349ed24f4bSMarc Zyngier 	event = perf_event_create_kernel_counter(&attr, -1, current,
6359ed24f4bSMarc Zyngier 						 kvm_pmu_perf_overflow, pmc);
6369ed24f4bSMarc Zyngier 
6379ed24f4bSMarc Zyngier 	if (IS_ERR(event)) {
6389ed24f4bSMarc Zyngier 		pr_err_once("kvm: pmu event creation failed %ld\n",
6399ed24f4bSMarc Zyngier 			    PTR_ERR(event));
6409ed24f4bSMarc Zyngier 		return;
6419ed24f4bSMarc Zyngier 	}
6429ed24f4bSMarc Zyngier 
6439ed24f4bSMarc Zyngier 	pmc->perf_event = event;
6449ed24f4bSMarc Zyngier }
6459ed24f4bSMarc Zyngier 
6469ed24f4bSMarc Zyngier /**
6479ed24f4bSMarc Zyngier  * kvm_pmu_set_counter_event_type - set selected counter to monitor some event
6489ed24f4bSMarc Zyngier  * @vcpu: The vcpu pointer
6499ed24f4bSMarc Zyngier  * @data: The data guest writes to PMXEVTYPER_EL0
6509ed24f4bSMarc Zyngier  * @select_idx: The number of selected counter
6519ed24f4bSMarc Zyngier  *
6529ed24f4bSMarc Zyngier  * When OS accesses PMXEVTYPER_EL0, that means it wants to set a PMC to count an
6539ed24f4bSMarc Zyngier  * event with given hardware event number. Here we call perf_event API to
6549ed24f4bSMarc Zyngier  * emulate this action and create a kernel perf event for it.
6559ed24f4bSMarc Zyngier  */
kvm_pmu_set_counter_event_type(struct kvm_vcpu * vcpu,u64 data,u64 select_idx)6569ed24f4bSMarc Zyngier void kvm_pmu_set_counter_event_type(struct kvm_vcpu *vcpu, u64 data,
6579ed24f4bSMarc Zyngier 				    u64 select_idx)
6589ed24f4bSMarc Zyngier {
659d56bdce5SMarc Zyngier 	struct kvm_pmc *pmc = kvm_vcpu_idx_to_pmc(vcpu, select_idx);
660fd65a3b5SMarc Zyngier 	u64 reg, mask;
661fd65a3b5SMarc Zyngier 
6628f6379e2SAlexandru Elisei 	if (!kvm_vcpu_has_pmu(vcpu))
6638f6379e2SAlexandru Elisei 		return;
6648f6379e2SAlexandru Elisei 
665fd65a3b5SMarc Zyngier 	mask  =  ARMV8_PMU_EVTYPE_MASK;
666fd65a3b5SMarc Zyngier 	mask &= ~ARMV8_PMU_EVTYPE_EVENT;
667fd65a3b5SMarc Zyngier 	mask |= kvm_pmu_event_mask(vcpu->kvm);
6689ed24f4bSMarc Zyngier 
669d56bdce5SMarc Zyngier 	reg = counter_index_to_evtreg(pmc->idx);
6709ed24f4bSMarc Zyngier 
671fd65a3b5SMarc Zyngier 	__vcpu_sys_reg(vcpu, reg) = data & mask;
6729ed24f4bSMarc Zyngier 
673d56bdce5SMarc Zyngier 	kvm_pmu_create_perf_event(pmc);
6749ed24f4bSMarc Zyngier }
6759ed24f4bSMarc Zyngier 
kvm_host_pmu_init(struct arm_pmu * pmu)676e840f42aSMarc Zyngier void kvm_host_pmu_init(struct arm_pmu *pmu)
677e840f42aSMarc Zyngier {
678db858060SAlexandru Elisei 	struct arm_pmu_entry *entry;
679db858060SAlexandru Elisei 
680ec3eb9edSReiji Watanabe 	/*
681ec3eb9edSReiji Watanabe 	 * Check the sanitised PMU version for the system, as KVM does not
682ec3eb9edSReiji Watanabe 	 * support implementations where PMUv3 exists on a subset of CPUs.
683ec3eb9edSReiji Watanabe 	 */
684ec3eb9edSReiji Watanabe 	if (!pmuv3_implemented(kvm_arm_pmu_get_pmuver_limit()))
685db858060SAlexandru Elisei 		return;
686db858060SAlexandru Elisei 
687db858060SAlexandru Elisei 	mutex_lock(&arm_pmus_lock);
688db858060SAlexandru Elisei 
689db858060SAlexandru Elisei 	entry = kmalloc(sizeof(*entry), GFP_KERNEL);
690db858060SAlexandru Elisei 	if (!entry)
691db858060SAlexandru Elisei 		goto out_unlock;
692db858060SAlexandru Elisei 
693db858060SAlexandru Elisei 	entry->arm_pmu = pmu;
694db858060SAlexandru Elisei 	list_add_tail(&entry->entry, &arm_pmus);
695db858060SAlexandru Elisei 
696db858060SAlexandru Elisei 	if (list_is_singular(&arm_pmus))
697e840f42aSMarc Zyngier 		static_branch_enable(&kvm_arm_pmu_available);
698db858060SAlexandru Elisei 
699db858060SAlexandru Elisei out_unlock:
700db858060SAlexandru Elisei 	mutex_unlock(&arm_pmus_lock);
701e840f42aSMarc Zyngier }
702e840f42aSMarc Zyngier 
kvm_pmu_probe_armpmu(void)70346b18782SMarc Zyngier static struct arm_pmu *kvm_pmu_probe_armpmu(void)
704fd65a3b5SMarc Zyngier {
7051c913a1cSOliver Upton 	struct arm_pmu *tmp, *pmu = NULL;
7061c913a1cSOliver Upton 	struct arm_pmu_entry *entry;
7071c913a1cSOliver Upton 	int cpu;
708fd65a3b5SMarc Zyngier 
7091c913a1cSOliver Upton 	mutex_lock(&arm_pmus_lock);
710fd65a3b5SMarc Zyngier 
71130c60ddaSOliver Upton 	/*
71230c60ddaSOliver Upton 	 * It is safe to use a stale cpu to iterate the list of PMUs so long as
71330c60ddaSOliver Upton 	 * the same value is used for the entirety of the loop. Given this, and
71430c60ddaSOliver Upton 	 * the fact that no percpu data is used for the lookup there is no need
71530c60ddaSOliver Upton 	 * to disable preemption.
71630c60ddaSOliver Upton 	 *
71730c60ddaSOliver Upton 	 * It is still necessary to get a valid cpu, though, to probe for the
71830c60ddaSOliver Upton 	 * default PMU instance as userspace is not required to specify a PMU
71930c60ddaSOliver Upton 	 * type. In order to uphold the preexisting behavior KVM selects the
72030c60ddaSOliver Upton 	 * PMU instance for the core where the first call to the
72130c60ddaSOliver Upton 	 * KVM_ARM_VCPU_PMU_V3_CTRL attribute group occurs. A dependent use case
72230c60ddaSOliver Upton 	 * would be a user with disdain of all things big.LITTLE that affines
72330c60ddaSOliver Upton 	 * the VMM to a particular cluster of cores.
72430c60ddaSOliver Upton 	 *
72530c60ddaSOliver Upton 	 * In any case, userspace should just do the sane thing and use the UAPI
72630c60ddaSOliver Upton 	 * to select a PMU type directly. But, be wary of the baggage being
72730c60ddaSOliver Upton 	 * carried here.
72830c60ddaSOliver Upton 	 */
72930c60ddaSOliver Upton 	cpu = raw_smp_processor_id();
7301c913a1cSOliver Upton 	list_for_each_entry(entry, &arm_pmus, entry) {
7311c913a1cSOliver Upton 		tmp = entry->arm_pmu;
732fd65a3b5SMarc Zyngier 
7331c913a1cSOliver Upton 		if (cpumask_test_cpu(cpu, &tmp->supported_cpus)) {
7341c913a1cSOliver Upton 			pmu = tmp;
7351c913a1cSOliver Upton 			break;
7361c913a1cSOliver Upton 		}
737fd65a3b5SMarc Zyngier 	}
738fd65a3b5SMarc Zyngier 
7391c913a1cSOliver Upton 	mutex_unlock(&arm_pmus_lock);
740fd65a3b5SMarc Zyngier 
74146b18782SMarc Zyngier 	return pmu;
742fd65a3b5SMarc Zyngier }
743fd65a3b5SMarc Zyngier 
kvm_pmu_get_pmceid(struct kvm_vcpu * vcpu,bool pmceid1)74488865becSMarc Zyngier u64 kvm_pmu_get_pmceid(struct kvm_vcpu *vcpu, bool pmceid1)
74588865becSMarc Zyngier {
74688865becSMarc Zyngier 	unsigned long *bmap = vcpu->kvm->arch.pmu_filter;
74788865becSMarc Zyngier 	u64 val, mask = 0;
7489529aaa0SMarc Zyngier 	int base, i, nr_events;
74988865becSMarc Zyngier 
7508f6379e2SAlexandru Elisei 	if (!kvm_vcpu_has_pmu(vcpu))
7518f6379e2SAlexandru Elisei 		return 0;
7528f6379e2SAlexandru Elisei 
75388865becSMarc Zyngier 	if (!pmceid1) {
75488865becSMarc Zyngier 		val = read_sysreg(pmceid0_el0);
755acdd8a4eSMarc Zyngier 		/* always support CHAIN */
756acdd8a4eSMarc Zyngier 		val |= BIT(ARMV8_PMUV3_PERFCTR_CHAIN);
75788865becSMarc Zyngier 		base = 0;
75888865becSMarc Zyngier 	} else {
75988865becSMarc Zyngier 		val = read_sysreg(pmceid1_el0);
76046081078SMarc Zyngier 		/*
761*64b81000SReiji Watanabe 		 * Don't advertise STALL_SLOT*, as PMMIR_EL0 is handled
76246081078SMarc Zyngier 		 * as RAZ
76346081078SMarc Zyngier 		 */
764*64b81000SReiji Watanabe 		val &= ~(BIT_ULL(ARMV8_PMUV3_PERFCTR_STALL_SLOT - 32) |
765*64b81000SReiji Watanabe 			 BIT_ULL(ARMV8_PMUV3_PERFCTR_STALL_SLOT_FRONTEND - 32) |
766*64b81000SReiji Watanabe 			 BIT_ULL(ARMV8_PMUV3_PERFCTR_STALL_SLOT_BACKEND - 32));
76788865becSMarc Zyngier 		base = 32;
76888865becSMarc Zyngier 	}
76988865becSMarc Zyngier 
77088865becSMarc Zyngier 	if (!bmap)
77188865becSMarc Zyngier 		return val;
77288865becSMarc Zyngier 
7739529aaa0SMarc Zyngier 	nr_events = kvm_pmu_event_mask(vcpu->kvm) + 1;
7749529aaa0SMarc Zyngier 
77588865becSMarc Zyngier 	for (i = 0; i < 32; i += 8) {
77688865becSMarc Zyngier 		u64 byte;
77788865becSMarc Zyngier 
77888865becSMarc Zyngier 		byte = bitmap_get_value8(bmap, base + i);
77988865becSMarc Zyngier 		mask |= byte << i;
7809529aaa0SMarc Zyngier 		if (nr_events >= (0x4000 + base + 32)) {
78188865becSMarc Zyngier 			byte = bitmap_get_value8(bmap, 0x4000 + base + i);
78288865becSMarc Zyngier 			mask |= byte << (32 + i);
78388865becSMarc Zyngier 		}
7849529aaa0SMarc Zyngier 	}
78588865becSMarc Zyngier 
78688865becSMarc Zyngier 	return val & mask;
78788865becSMarc Zyngier }
78888865becSMarc Zyngier 
kvm_arm_pmu_v3_enable(struct kvm_vcpu * vcpu)7899ed24f4bSMarc Zyngier int kvm_arm_pmu_v3_enable(struct kvm_vcpu *vcpu)
7909ed24f4bSMarc Zyngier {
7919bbfa4b5SAlexandru Elisei 	if (!kvm_vcpu_has_pmu(vcpu))
7929ed24f4bSMarc Zyngier 		return 0;
7939ed24f4bSMarc Zyngier 
7949bbfa4b5SAlexandru Elisei 	if (!vcpu->arch.pmu.created)
7959bbfa4b5SAlexandru Elisei 		return -EINVAL;
7969bbfa4b5SAlexandru Elisei 
7979ed24f4bSMarc Zyngier 	/*
7989ed24f4bSMarc Zyngier 	 * A valid interrupt configuration for the PMU is either to have a
7999ed24f4bSMarc Zyngier 	 * properly configured interrupt number and using an in-kernel
8009ed24f4bSMarc Zyngier 	 * irqchip, or to not have an in-kernel GIC and not set an IRQ.
8019ed24f4bSMarc Zyngier 	 */
8029ed24f4bSMarc Zyngier 	if (irqchip_in_kernel(vcpu->kvm)) {
8039ed24f4bSMarc Zyngier 		int irq = vcpu->arch.pmu.irq_num;
8049ed24f4bSMarc Zyngier 		/*
8059ed24f4bSMarc Zyngier 		 * If we are using an in-kernel vgic, at this point we know
8069ed24f4bSMarc Zyngier 		 * the vgic will be initialized, so we can check the PMU irq
8079ed24f4bSMarc Zyngier 		 * number against the dimensions of the vgic and make sure
8089ed24f4bSMarc Zyngier 		 * it's valid.
8099ed24f4bSMarc Zyngier 		 */
8109ed24f4bSMarc Zyngier 		if (!irq_is_ppi(irq) && !vgic_valid_spi(vcpu->kvm, irq))
8119ed24f4bSMarc Zyngier 			return -EINVAL;
8129ed24f4bSMarc Zyngier 	} else if (kvm_arm_pmu_irq_initialized(vcpu)) {
8139ed24f4bSMarc Zyngier 		   return -EINVAL;
8149ed24f4bSMarc Zyngier 	}
8159ed24f4bSMarc Zyngier 
816d0c94c49SMarc Zyngier 	/* One-off reload of the PMU on first run */
817d0c94c49SMarc Zyngier 	kvm_make_request(KVM_REQ_RELOAD_PMU, vcpu);
818d0c94c49SMarc Zyngier 
8199ed24f4bSMarc Zyngier 	return 0;
8209ed24f4bSMarc Zyngier }
8219ed24f4bSMarc Zyngier 
kvm_arm_pmu_v3_init(struct kvm_vcpu * vcpu)8229ed24f4bSMarc Zyngier static int kvm_arm_pmu_v3_init(struct kvm_vcpu *vcpu)
8239ed24f4bSMarc Zyngier {
8249ed24f4bSMarc Zyngier 	if (irqchip_in_kernel(vcpu->kvm)) {
8259ed24f4bSMarc Zyngier 		int ret;
8269ed24f4bSMarc Zyngier 
8279ed24f4bSMarc Zyngier 		/*
8289ed24f4bSMarc Zyngier 		 * If using the PMU with an in-kernel virtual GIC
8299ed24f4bSMarc Zyngier 		 * implementation, we require the GIC to be already
8309ed24f4bSMarc Zyngier 		 * initialized when initializing the PMU.
8319ed24f4bSMarc Zyngier 		 */
8329ed24f4bSMarc Zyngier 		if (!vgic_initialized(vcpu->kvm))
8339ed24f4bSMarc Zyngier 			return -ENODEV;
8349ed24f4bSMarc Zyngier 
8359ed24f4bSMarc Zyngier 		if (!kvm_arm_pmu_irq_initialized(vcpu))
8369ed24f4bSMarc Zyngier 			return -ENXIO;
8379ed24f4bSMarc Zyngier 
8389ed24f4bSMarc Zyngier 		ret = kvm_vgic_set_owner(vcpu, vcpu->arch.pmu.irq_num,
8399ed24f4bSMarc Zyngier 					 &vcpu->arch.pmu);
8409ed24f4bSMarc Zyngier 		if (ret)
8419ed24f4bSMarc Zyngier 			return ret;
8429ed24f4bSMarc Zyngier 	}
8439ed24f4bSMarc Zyngier 
84495e92e45SJulien Thierry 	init_irq_work(&vcpu->arch.pmu.overflow_work,
84595e92e45SJulien Thierry 		      kvm_pmu_perf_overflow_notify_vcpu);
84695e92e45SJulien Thierry 
8479ed24f4bSMarc Zyngier 	vcpu->arch.pmu.created = true;
8489ed24f4bSMarc Zyngier 	return 0;
8499ed24f4bSMarc Zyngier }
8509ed24f4bSMarc Zyngier 
8519ed24f4bSMarc Zyngier /*
8529ed24f4bSMarc Zyngier  * For one VM the interrupt type must be same for each vcpu.
8539ed24f4bSMarc Zyngier  * As a PPI, the interrupt number is the same for all vcpus,
8549ed24f4bSMarc Zyngier  * while as an SPI it must be a separate number per vcpu.
8559ed24f4bSMarc Zyngier  */
pmu_irq_is_valid(struct kvm * kvm,int irq)8569ed24f4bSMarc Zyngier static bool pmu_irq_is_valid(struct kvm *kvm, int irq)
8579ed24f4bSMarc Zyngier {
85846808a4cSMarc Zyngier 	unsigned long i;
8599ed24f4bSMarc Zyngier 	struct kvm_vcpu *vcpu;
8609ed24f4bSMarc Zyngier 
8619ed24f4bSMarc Zyngier 	kvm_for_each_vcpu(i, vcpu, kvm) {
8629ed24f4bSMarc Zyngier 		if (!kvm_arm_pmu_irq_initialized(vcpu))
8639ed24f4bSMarc Zyngier 			continue;
8649ed24f4bSMarc Zyngier 
8659ed24f4bSMarc Zyngier 		if (irq_is_ppi(irq)) {
8669ed24f4bSMarc Zyngier 			if (vcpu->arch.pmu.irq_num != irq)
8679ed24f4bSMarc Zyngier 				return false;
8689ed24f4bSMarc Zyngier 		} else {
8699ed24f4bSMarc Zyngier 			if (vcpu->arch.pmu.irq_num == irq)
8709ed24f4bSMarc Zyngier 				return false;
8719ed24f4bSMarc Zyngier 		}
8729ed24f4bSMarc Zyngier 	}
8739ed24f4bSMarc Zyngier 
8749ed24f4bSMarc Zyngier 	return true;
8759ed24f4bSMarc Zyngier }
8769ed24f4bSMarc Zyngier 
kvm_arm_pmu_v3_set_pmu(struct kvm_vcpu * vcpu,int pmu_id)8776ee7fca2SAlexandru Elisei static int kvm_arm_pmu_v3_set_pmu(struct kvm_vcpu *vcpu, int pmu_id)
8786ee7fca2SAlexandru Elisei {
8796ee7fca2SAlexandru Elisei 	struct kvm *kvm = vcpu->kvm;
8806ee7fca2SAlexandru Elisei 	struct arm_pmu_entry *entry;
8816ee7fca2SAlexandru Elisei 	struct arm_pmu *arm_pmu;
8826ee7fca2SAlexandru Elisei 	int ret = -ENXIO;
8836ee7fca2SAlexandru Elisei 
8844bba7f7dSOliver Upton 	lockdep_assert_held(&kvm->arch.config_lock);
8856ee7fca2SAlexandru Elisei 	mutex_lock(&arm_pmus_lock);
8866ee7fca2SAlexandru Elisei 
8876ee7fca2SAlexandru Elisei 	list_for_each_entry(entry, &arm_pmus, entry) {
8886ee7fca2SAlexandru Elisei 		arm_pmu = entry->arm_pmu;
8896ee7fca2SAlexandru Elisei 		if (arm_pmu->pmu.type == pmu_id) {
890de40bb8aSOliver Upton 			if (kvm_vm_has_ran_once(kvm) ||
8916ee7fca2SAlexandru Elisei 			    (kvm->arch.pmu_filter && kvm->arch.arm_pmu != arm_pmu)) {
8926ee7fca2SAlexandru Elisei 				ret = -EBUSY;
8936ee7fca2SAlexandru Elisei 				break;
8946ee7fca2SAlexandru Elisei 			}
8956ee7fca2SAlexandru Elisei 
8966ee7fca2SAlexandru Elisei 			kvm->arch.arm_pmu = arm_pmu;
897583cda1bSAlexandru Elisei 			cpumask_copy(kvm->arch.supported_cpus, &arm_pmu->supported_cpus);
8986ee7fca2SAlexandru Elisei 			ret = 0;
8996ee7fca2SAlexandru Elisei 			break;
9006ee7fca2SAlexandru Elisei 		}
9016ee7fca2SAlexandru Elisei 	}
9026ee7fca2SAlexandru Elisei 
9036ee7fca2SAlexandru Elisei 	mutex_unlock(&arm_pmus_lock);
9046ee7fca2SAlexandru Elisei 	return ret;
9056ee7fca2SAlexandru Elisei }
9066ee7fca2SAlexandru Elisei 
kvm_arm_pmu_v3_set_attr(struct kvm_vcpu * vcpu,struct kvm_device_attr * attr)9079ed24f4bSMarc Zyngier int kvm_arm_pmu_v3_set_attr(struct kvm_vcpu *vcpu, struct kvm_device_attr *attr)
9089ed24f4bSMarc Zyngier {
9095177fe91SMarc Zyngier 	struct kvm *kvm = vcpu->kvm;
9105177fe91SMarc Zyngier 
9114bba7f7dSOliver Upton 	lockdep_assert_held(&kvm->arch.config_lock);
9124bba7f7dSOliver Upton 
91377da4303SMarc Zyngier 	if (!kvm_vcpu_has_pmu(vcpu))
91442223fb1SMarc Zyngier 		return -ENODEV;
91542223fb1SMarc Zyngier 
91642223fb1SMarc Zyngier 	if (vcpu->arch.pmu.created)
91742223fb1SMarc Zyngier 		return -EBUSY;
91842223fb1SMarc Zyngier 
91946b18782SMarc Zyngier 	if (!kvm->arch.arm_pmu) {
92040e54cadSOliver Upton 		/*
92140e54cadSOliver Upton 		 * No PMU set, get the default one.
92240e54cadSOliver Upton 		 *
92340e54cadSOliver Upton 		 * The observant among you will notice that the supported_cpus
92440e54cadSOliver Upton 		 * mask does not get updated for the default PMU even though it
92540e54cadSOliver Upton 		 * is quite possible the selected instance supports only a
92640e54cadSOliver Upton 		 * subset of cores in the system. This is intentional, and
92740e54cadSOliver Upton 		 * upholds the preexisting behavior on heterogeneous systems
92840e54cadSOliver Upton 		 * where vCPUs can be scheduled on any core but the guest
92940e54cadSOliver Upton 		 * counters could stop working.
93040e54cadSOliver Upton 		 */
93146b18782SMarc Zyngier 		kvm->arch.arm_pmu = kvm_pmu_probe_armpmu();
9324bba7f7dSOliver Upton 		if (!kvm->arch.arm_pmu)
933fd65a3b5SMarc Zyngier 			return -ENODEV;
93446b18782SMarc Zyngier 	}
935fd65a3b5SMarc Zyngier 
9369ed24f4bSMarc Zyngier 	switch (attr->attr) {
9379ed24f4bSMarc Zyngier 	case KVM_ARM_VCPU_PMU_V3_IRQ: {
9389ed24f4bSMarc Zyngier 		int __user *uaddr = (int __user *)(long)attr->addr;
9399ed24f4bSMarc Zyngier 		int irq;
9409ed24f4bSMarc Zyngier 
9415177fe91SMarc Zyngier 		if (!irqchip_in_kernel(kvm))
9429ed24f4bSMarc Zyngier 			return -EINVAL;
9439ed24f4bSMarc Zyngier 
9449ed24f4bSMarc Zyngier 		if (get_user(irq, uaddr))
9459ed24f4bSMarc Zyngier 			return -EFAULT;
9469ed24f4bSMarc Zyngier 
9479ed24f4bSMarc Zyngier 		/* The PMU overflow interrupt can be a PPI or a valid SPI. */
9489ed24f4bSMarc Zyngier 		if (!(irq_is_ppi(irq) || irq_is_spi(irq)))
9499ed24f4bSMarc Zyngier 			return -EINVAL;
9509ed24f4bSMarc Zyngier 
9515177fe91SMarc Zyngier 		if (!pmu_irq_is_valid(kvm, irq))
9529ed24f4bSMarc Zyngier 			return -EINVAL;
9539ed24f4bSMarc Zyngier 
9549ed24f4bSMarc Zyngier 		if (kvm_arm_pmu_irq_initialized(vcpu))
9559ed24f4bSMarc Zyngier 			return -EBUSY;
9569ed24f4bSMarc Zyngier 
9579ed24f4bSMarc Zyngier 		kvm_debug("Set kvm ARM PMU irq: %d\n", irq);
9589ed24f4bSMarc Zyngier 		vcpu->arch.pmu.irq_num = irq;
9599ed24f4bSMarc Zyngier 		return 0;
9609ed24f4bSMarc Zyngier 	}
961d7eec236SMarc Zyngier 	case KVM_ARM_VCPU_PMU_V3_FILTER: {
962335ca49fSReiji Watanabe 		u8 pmuver = kvm_arm_pmu_get_pmuver_limit();
963d7eec236SMarc Zyngier 		struct kvm_pmu_event_filter __user *uaddr;
964d7eec236SMarc Zyngier 		struct kvm_pmu_event_filter filter;
965d7eec236SMarc Zyngier 		int nr_events;
966d7eec236SMarc Zyngier 
967335ca49fSReiji Watanabe 		/*
968335ca49fSReiji Watanabe 		 * Allow userspace to specify an event filter for the entire
969335ca49fSReiji Watanabe 		 * event range supported by PMUVer of the hardware, rather
970335ca49fSReiji Watanabe 		 * than the guest's PMUVer for KVM backward compatibility.
971335ca49fSReiji Watanabe 		 */
972335ca49fSReiji Watanabe 		nr_events = __kvm_pmu_event_mask(pmuver) + 1;
973d7eec236SMarc Zyngier 
974d7eec236SMarc Zyngier 		uaddr = (struct kvm_pmu_event_filter __user *)(long)attr->addr;
975d7eec236SMarc Zyngier 
976d7eec236SMarc Zyngier 		if (copy_from_user(&filter, uaddr, sizeof(filter)))
977d7eec236SMarc Zyngier 			return -EFAULT;
978d7eec236SMarc Zyngier 
979d7eec236SMarc Zyngier 		if (((u32)filter.base_event + filter.nevents) > nr_events ||
980d7eec236SMarc Zyngier 		    (filter.action != KVM_PMU_EVENT_ALLOW &&
981d7eec236SMarc Zyngier 		     filter.action != KVM_PMU_EVENT_DENY))
982d7eec236SMarc Zyngier 			return -EINVAL;
983d7eec236SMarc Zyngier 
9846dcf7316SMarc Zyngier 		if (kvm_vm_has_ran_once(kvm))
9855177fe91SMarc Zyngier 			return -EBUSY;
9865177fe91SMarc Zyngier 
9875177fe91SMarc Zyngier 		if (!kvm->arch.pmu_filter) {
9885177fe91SMarc Zyngier 			kvm->arch.pmu_filter = bitmap_alloc(nr_events, GFP_KERNEL_ACCOUNT);
9894bba7f7dSOliver Upton 			if (!kvm->arch.pmu_filter)
990d7eec236SMarc Zyngier 				return -ENOMEM;
991d7eec236SMarc Zyngier 
992d7eec236SMarc Zyngier 			/*
993d7eec236SMarc Zyngier 			 * The default depends on the first applied filter.
994d7eec236SMarc Zyngier 			 * If it allows events, the default is to deny.
995d7eec236SMarc Zyngier 			 * Conversely, if the first filter denies a set of
996d7eec236SMarc Zyngier 			 * events, the default is to allow.
997d7eec236SMarc Zyngier 			 */
998d7eec236SMarc Zyngier 			if (filter.action == KVM_PMU_EVENT_ALLOW)
9995177fe91SMarc Zyngier 				bitmap_zero(kvm->arch.pmu_filter, nr_events);
1000d7eec236SMarc Zyngier 			else
10015177fe91SMarc Zyngier 				bitmap_fill(kvm->arch.pmu_filter, nr_events);
1002d7eec236SMarc Zyngier 		}
1003d7eec236SMarc Zyngier 
1004d7eec236SMarc Zyngier 		if (filter.action == KVM_PMU_EVENT_ALLOW)
10055177fe91SMarc Zyngier 			bitmap_set(kvm->arch.pmu_filter, filter.base_event, filter.nevents);
1006d7eec236SMarc Zyngier 		else
10075177fe91SMarc Zyngier 			bitmap_clear(kvm->arch.pmu_filter, filter.base_event, filter.nevents);
1008d7eec236SMarc Zyngier 
1009d7eec236SMarc Zyngier 		return 0;
1010d7eec236SMarc Zyngier 	}
10116ee7fca2SAlexandru Elisei 	case KVM_ARM_VCPU_PMU_V3_SET_PMU: {
10126ee7fca2SAlexandru Elisei 		int __user *uaddr = (int __user *)(long)attr->addr;
10136ee7fca2SAlexandru Elisei 		int pmu_id;
10146ee7fca2SAlexandru Elisei 
10156ee7fca2SAlexandru Elisei 		if (get_user(pmu_id, uaddr))
10166ee7fca2SAlexandru Elisei 			return -EFAULT;
10176ee7fca2SAlexandru Elisei 
10186ee7fca2SAlexandru Elisei 		return kvm_arm_pmu_v3_set_pmu(vcpu, pmu_id);
10196ee7fca2SAlexandru Elisei 	}
10209ed24f4bSMarc Zyngier 	case KVM_ARM_VCPU_PMU_V3_INIT:
10219ed24f4bSMarc Zyngier 		return kvm_arm_pmu_v3_init(vcpu);
10229ed24f4bSMarc Zyngier 	}
10239ed24f4bSMarc Zyngier 
10249ed24f4bSMarc Zyngier 	return -ENXIO;
10259ed24f4bSMarc Zyngier }
10269ed24f4bSMarc Zyngier 
kvm_arm_pmu_v3_get_attr(struct kvm_vcpu * vcpu,struct kvm_device_attr * attr)10279ed24f4bSMarc Zyngier int kvm_arm_pmu_v3_get_attr(struct kvm_vcpu *vcpu, struct kvm_device_attr *attr)
10289ed24f4bSMarc Zyngier {
10299ed24f4bSMarc Zyngier 	switch (attr->attr) {
10309ed24f4bSMarc Zyngier 	case KVM_ARM_VCPU_PMU_V3_IRQ: {
10319ed24f4bSMarc Zyngier 		int __user *uaddr = (int __user *)(long)attr->addr;
10329ed24f4bSMarc Zyngier 		int irq;
10339ed24f4bSMarc Zyngier 
10349ed24f4bSMarc Zyngier 		if (!irqchip_in_kernel(vcpu->kvm))
10359ed24f4bSMarc Zyngier 			return -EINVAL;
10369ed24f4bSMarc Zyngier 
103714bda7a9SMarc Zyngier 		if (!kvm_vcpu_has_pmu(vcpu))
10389ed24f4bSMarc Zyngier 			return -ENODEV;
10399ed24f4bSMarc Zyngier 
10409ed24f4bSMarc Zyngier 		if (!kvm_arm_pmu_irq_initialized(vcpu))
10419ed24f4bSMarc Zyngier 			return -ENXIO;
10429ed24f4bSMarc Zyngier 
10439ed24f4bSMarc Zyngier 		irq = vcpu->arch.pmu.irq_num;
10449ed24f4bSMarc Zyngier 		return put_user(irq, uaddr);
10459ed24f4bSMarc Zyngier 	}
10469ed24f4bSMarc Zyngier 	}
10479ed24f4bSMarc Zyngier 
10489ed24f4bSMarc Zyngier 	return -ENXIO;
10499ed24f4bSMarc Zyngier }
10509ed24f4bSMarc Zyngier 
kvm_arm_pmu_v3_has_attr(struct kvm_vcpu * vcpu,struct kvm_device_attr * attr)10519ed24f4bSMarc Zyngier int kvm_arm_pmu_v3_has_attr(struct kvm_vcpu *vcpu, struct kvm_device_attr *attr)
10529ed24f4bSMarc Zyngier {
10539ed24f4bSMarc Zyngier 	switch (attr->attr) {
10549ed24f4bSMarc Zyngier 	case KVM_ARM_VCPU_PMU_V3_IRQ:
10559ed24f4bSMarc Zyngier 	case KVM_ARM_VCPU_PMU_V3_INIT:
1056d7eec236SMarc Zyngier 	case KVM_ARM_VCPU_PMU_V3_FILTER:
10576ee7fca2SAlexandru Elisei 	case KVM_ARM_VCPU_PMU_V3_SET_PMU:
105877da4303SMarc Zyngier 		if (kvm_vcpu_has_pmu(vcpu))
10599ed24f4bSMarc Zyngier 			return 0;
10609ed24f4bSMarc Zyngier 	}
10619ed24f4bSMarc Zyngier 
10629ed24f4bSMarc Zyngier 	return -ENXIO;
10639ed24f4bSMarc Zyngier }
10643d0dba57SMarc Zyngier 
kvm_arm_pmu_get_pmuver_limit(void)10653d0dba57SMarc Zyngier u8 kvm_arm_pmu_get_pmuver_limit(void)
10663d0dba57SMarc Zyngier {
10673d0dba57SMarc Zyngier 	u64 tmp;
10683d0dba57SMarc Zyngier 
10693d0dba57SMarc Zyngier 	tmp = read_sanitised_ftr_reg(SYS_ID_AA64DFR0_EL1);
10703d0dba57SMarc Zyngier 	tmp = cpuid_feature_cap_perfmon_field(tmp,
10713d0dba57SMarc Zyngier 					      ID_AA64DFR0_EL1_PMUVer_SHIFT,
10721f7c9782SMarc Zyngier 					      ID_AA64DFR0_EL1_PMUVer_V3P5);
10733d0dba57SMarc Zyngier 	return FIELD_GET(ARM64_FEATURE_MASK(ID_AA64DFR0_EL1_PMUVer), tmp);
10743d0dba57SMarc Zyngier }
1075