xref: /openbmc/linux/arch/arm64/kvm/pmu-emul.c (revision fcf37b38)
19ed24f4bSMarc Zyngier // SPDX-License-Identifier: GPL-2.0-only
29ed24f4bSMarc Zyngier /*
39ed24f4bSMarc Zyngier  * Copyright (C) 2015 Linaro Ltd.
49ed24f4bSMarc Zyngier  * Author: Shannon Zhao <shannon.zhao@linaro.org>
59ed24f4bSMarc Zyngier  */
69ed24f4bSMarc Zyngier 
79ed24f4bSMarc Zyngier #include <linux/cpu.h>
89ed24f4bSMarc Zyngier #include <linux/kvm.h>
99ed24f4bSMarc Zyngier #include <linux/kvm_host.h>
10db858060SAlexandru Elisei #include <linux/list.h>
119ed24f4bSMarc Zyngier #include <linux/perf_event.h>
129ed24f4bSMarc Zyngier #include <linux/perf/arm_pmu.h>
139ed24f4bSMarc Zyngier #include <linux/uaccess.h>
149ed24f4bSMarc Zyngier #include <asm/kvm_emulate.h>
159ed24f4bSMarc Zyngier #include <kvm/arm_pmu.h>
169ed24f4bSMarc Zyngier #include <kvm/arm_vgic.h>
179ed24f4bSMarc Zyngier 
18be399d82SSean Christopherson DEFINE_STATIC_KEY_FALSE(kvm_arm_pmu_available);
19be399d82SSean Christopherson 
20db858060SAlexandru Elisei static LIST_HEAD(arm_pmus);
21db858060SAlexandru Elisei static DEFINE_MUTEX(arm_pmus_lock);
22db858060SAlexandru Elisei 
239ed24f4bSMarc Zyngier static void kvm_pmu_create_perf_event(struct kvm_vcpu *vcpu, u64 select_idx);
249ed24f4bSMarc Zyngier static void kvm_pmu_update_pmc_chained(struct kvm_vcpu *vcpu, u64 select_idx);
259ed24f4bSMarc Zyngier static void kvm_pmu_stop_counter(struct kvm_vcpu *vcpu, struct kvm_pmc *pmc);
269ed24f4bSMarc Zyngier 
279ed24f4bSMarc Zyngier #define PERF_ATTR_CFG1_KVM_PMU_CHAINED 0x1
289ed24f4bSMarc Zyngier 
29fd65a3b5SMarc Zyngier static u32 kvm_pmu_event_mask(struct kvm *kvm)
30fd65a3b5SMarc Zyngier {
3146b18782SMarc Zyngier 	unsigned int pmuver;
3246b18782SMarc Zyngier 
3346b18782SMarc Zyngier 	pmuver = kvm->arch.arm_pmu->pmuver;
3446b18782SMarc Zyngier 
3546b18782SMarc Zyngier 	switch (pmuver) {
36*fcf37b38SMark Brown 	case ID_AA64DFR0_EL1_PMUVer_8_0:
37fd65a3b5SMarc Zyngier 		return GENMASK(9, 0);
38*fcf37b38SMark Brown 	case ID_AA64DFR0_EL1_PMUVer_8_1:
39*fcf37b38SMark Brown 	case ID_AA64DFR0_EL1_PMUVer_8_4:
40*fcf37b38SMark Brown 	case ID_AA64DFR0_EL1_PMUVer_8_5:
41*fcf37b38SMark Brown 	case ID_AA64DFR0_EL1_PMUVer_8_7:
42fd65a3b5SMarc Zyngier 		return GENMASK(15, 0);
43fd65a3b5SMarc Zyngier 	default:		/* Shouldn't be here, just for sanity */
4446b18782SMarc Zyngier 		WARN_ONCE(1, "Unknown PMU version %d\n", pmuver);
45fd65a3b5SMarc Zyngier 		return 0;
46fd65a3b5SMarc Zyngier 	}
47fd65a3b5SMarc Zyngier }
48fd65a3b5SMarc Zyngier 
499ed24f4bSMarc Zyngier /**
509ed24f4bSMarc Zyngier  * kvm_pmu_idx_is_64bit - determine if select_idx is a 64bit counter
519ed24f4bSMarc Zyngier  * @vcpu: The vcpu pointer
529ed24f4bSMarc Zyngier  * @select_idx: The counter index
539ed24f4bSMarc Zyngier  */
549ed24f4bSMarc Zyngier static bool kvm_pmu_idx_is_64bit(struct kvm_vcpu *vcpu, u64 select_idx)
559ed24f4bSMarc Zyngier {
569ed24f4bSMarc Zyngier 	return (select_idx == ARMV8_PMU_CYCLE_IDX &&
579ed24f4bSMarc Zyngier 		__vcpu_sys_reg(vcpu, PMCR_EL0) & ARMV8_PMU_PMCR_LC);
589ed24f4bSMarc Zyngier }
599ed24f4bSMarc Zyngier 
609ed24f4bSMarc Zyngier static struct kvm_vcpu *kvm_pmc_to_vcpu(struct kvm_pmc *pmc)
619ed24f4bSMarc Zyngier {
629ed24f4bSMarc Zyngier 	struct kvm_pmu *pmu;
639ed24f4bSMarc Zyngier 	struct kvm_vcpu_arch *vcpu_arch;
649ed24f4bSMarc Zyngier 
659ed24f4bSMarc Zyngier 	pmc -= pmc->idx;
669ed24f4bSMarc Zyngier 	pmu = container_of(pmc, struct kvm_pmu, pmc[0]);
679ed24f4bSMarc Zyngier 	vcpu_arch = container_of(pmu, struct kvm_vcpu_arch, pmu);
689ed24f4bSMarc Zyngier 	return container_of(vcpu_arch, struct kvm_vcpu, arch);
699ed24f4bSMarc Zyngier }
709ed24f4bSMarc Zyngier 
719ed24f4bSMarc Zyngier /**
729ed24f4bSMarc Zyngier  * kvm_pmu_pmc_is_chained - determine if the pmc is chained
739ed24f4bSMarc Zyngier  * @pmc: The PMU counter pointer
749ed24f4bSMarc Zyngier  */
759ed24f4bSMarc Zyngier static bool kvm_pmu_pmc_is_chained(struct kvm_pmc *pmc)
769ed24f4bSMarc Zyngier {
779ed24f4bSMarc Zyngier 	struct kvm_vcpu *vcpu = kvm_pmc_to_vcpu(pmc);
789ed24f4bSMarc Zyngier 
799ed24f4bSMarc Zyngier 	return test_bit(pmc->idx >> 1, vcpu->arch.pmu.chained);
809ed24f4bSMarc Zyngier }
819ed24f4bSMarc Zyngier 
829ed24f4bSMarc Zyngier /**
839ed24f4bSMarc Zyngier  * kvm_pmu_idx_is_high_counter - determine if select_idx is a high/low counter
849ed24f4bSMarc Zyngier  * @select_idx: The counter index
859ed24f4bSMarc Zyngier  */
869ed24f4bSMarc Zyngier static bool kvm_pmu_idx_is_high_counter(u64 select_idx)
879ed24f4bSMarc Zyngier {
889ed24f4bSMarc Zyngier 	return select_idx & 0x1;
899ed24f4bSMarc Zyngier }
909ed24f4bSMarc Zyngier 
919ed24f4bSMarc Zyngier /**
929ed24f4bSMarc Zyngier  * kvm_pmu_get_canonical_pmc - obtain the canonical pmc
939ed24f4bSMarc Zyngier  * @pmc: The PMU counter pointer
949ed24f4bSMarc Zyngier  *
959ed24f4bSMarc Zyngier  * When a pair of PMCs are chained together we use the low counter (canonical)
969ed24f4bSMarc Zyngier  * to hold the underlying perf event.
979ed24f4bSMarc Zyngier  */
989ed24f4bSMarc Zyngier static struct kvm_pmc *kvm_pmu_get_canonical_pmc(struct kvm_pmc *pmc)
999ed24f4bSMarc Zyngier {
1009ed24f4bSMarc Zyngier 	if (kvm_pmu_pmc_is_chained(pmc) &&
1019ed24f4bSMarc Zyngier 	    kvm_pmu_idx_is_high_counter(pmc->idx))
1029ed24f4bSMarc Zyngier 		return pmc - 1;
1039ed24f4bSMarc Zyngier 
1049ed24f4bSMarc Zyngier 	return pmc;
1059ed24f4bSMarc Zyngier }
1069ed24f4bSMarc Zyngier static struct kvm_pmc *kvm_pmu_get_alternate_pmc(struct kvm_pmc *pmc)
1079ed24f4bSMarc Zyngier {
1089ed24f4bSMarc Zyngier 	if (kvm_pmu_idx_is_high_counter(pmc->idx))
1099ed24f4bSMarc Zyngier 		return pmc - 1;
1109ed24f4bSMarc Zyngier 	else
1119ed24f4bSMarc Zyngier 		return pmc + 1;
1129ed24f4bSMarc Zyngier }
1139ed24f4bSMarc Zyngier 
1149ed24f4bSMarc Zyngier /**
1159ed24f4bSMarc Zyngier  * kvm_pmu_idx_has_chain_evtype - determine if the event type is chain
1169ed24f4bSMarc Zyngier  * @vcpu: The vcpu pointer
1179ed24f4bSMarc Zyngier  * @select_idx: The counter index
1189ed24f4bSMarc Zyngier  */
1199ed24f4bSMarc Zyngier static bool kvm_pmu_idx_has_chain_evtype(struct kvm_vcpu *vcpu, u64 select_idx)
1209ed24f4bSMarc Zyngier {
1219ed24f4bSMarc Zyngier 	u64 eventsel, reg;
1229ed24f4bSMarc Zyngier 
1239ed24f4bSMarc Zyngier 	select_idx |= 0x1;
1249ed24f4bSMarc Zyngier 
1259ed24f4bSMarc Zyngier 	if (select_idx == ARMV8_PMU_CYCLE_IDX)
1269ed24f4bSMarc Zyngier 		return false;
1279ed24f4bSMarc Zyngier 
1289ed24f4bSMarc Zyngier 	reg = PMEVTYPER0_EL0 + select_idx;
129fd65a3b5SMarc Zyngier 	eventsel = __vcpu_sys_reg(vcpu, reg) & kvm_pmu_event_mask(vcpu->kvm);
1309ed24f4bSMarc Zyngier 
1319ed24f4bSMarc Zyngier 	return eventsel == ARMV8_PMUV3_PERFCTR_CHAIN;
1329ed24f4bSMarc Zyngier }
1339ed24f4bSMarc Zyngier 
1349ed24f4bSMarc Zyngier /**
1359ed24f4bSMarc Zyngier  * kvm_pmu_get_pair_counter_value - get PMU counter value
1369ed24f4bSMarc Zyngier  * @vcpu: The vcpu pointer
1379ed24f4bSMarc Zyngier  * @pmc: The PMU counter pointer
1389ed24f4bSMarc Zyngier  */
1399ed24f4bSMarc Zyngier static u64 kvm_pmu_get_pair_counter_value(struct kvm_vcpu *vcpu,
1409ed24f4bSMarc Zyngier 					  struct kvm_pmc *pmc)
1419ed24f4bSMarc Zyngier {
1429ed24f4bSMarc Zyngier 	u64 counter, counter_high, reg, enabled, running;
1439ed24f4bSMarc Zyngier 
1449ed24f4bSMarc Zyngier 	if (kvm_pmu_pmc_is_chained(pmc)) {
1459ed24f4bSMarc Zyngier 		pmc = kvm_pmu_get_canonical_pmc(pmc);
1469ed24f4bSMarc Zyngier 		reg = PMEVCNTR0_EL0 + pmc->idx;
1479ed24f4bSMarc Zyngier 
1489ed24f4bSMarc Zyngier 		counter = __vcpu_sys_reg(vcpu, reg);
1499ed24f4bSMarc Zyngier 		counter_high = __vcpu_sys_reg(vcpu, reg + 1);
1509ed24f4bSMarc Zyngier 
1519ed24f4bSMarc Zyngier 		counter = lower_32_bits(counter) | (counter_high << 32);
1529ed24f4bSMarc Zyngier 	} else {
1539ed24f4bSMarc Zyngier 		reg = (pmc->idx == ARMV8_PMU_CYCLE_IDX)
1549ed24f4bSMarc Zyngier 		      ? PMCCNTR_EL0 : PMEVCNTR0_EL0 + pmc->idx;
1559ed24f4bSMarc Zyngier 		counter = __vcpu_sys_reg(vcpu, reg);
1569ed24f4bSMarc Zyngier 	}
1579ed24f4bSMarc Zyngier 
1589ed24f4bSMarc Zyngier 	/*
1599ed24f4bSMarc Zyngier 	 * The real counter value is equal to the value of counter register plus
1609ed24f4bSMarc Zyngier 	 * the value perf event counts.
1619ed24f4bSMarc Zyngier 	 */
1629ed24f4bSMarc Zyngier 	if (pmc->perf_event)
1639ed24f4bSMarc Zyngier 		counter += perf_event_read_value(pmc->perf_event, &enabled,
1649ed24f4bSMarc Zyngier 						 &running);
1659ed24f4bSMarc Zyngier 
1669ed24f4bSMarc Zyngier 	return counter;
1679ed24f4bSMarc Zyngier }
1689ed24f4bSMarc Zyngier 
1699ed24f4bSMarc Zyngier /**
1709ed24f4bSMarc Zyngier  * kvm_pmu_get_counter_value - get PMU counter value
1719ed24f4bSMarc Zyngier  * @vcpu: The vcpu pointer
1729ed24f4bSMarc Zyngier  * @select_idx: The counter index
1739ed24f4bSMarc Zyngier  */
1749ed24f4bSMarc Zyngier u64 kvm_pmu_get_counter_value(struct kvm_vcpu *vcpu, u64 select_idx)
1759ed24f4bSMarc Zyngier {
1769ed24f4bSMarc Zyngier 	u64 counter;
1779ed24f4bSMarc Zyngier 	struct kvm_pmu *pmu = &vcpu->arch.pmu;
1789ed24f4bSMarc Zyngier 	struct kvm_pmc *pmc = &pmu->pmc[select_idx];
1799ed24f4bSMarc Zyngier 
1808f6379e2SAlexandru Elisei 	if (!kvm_vcpu_has_pmu(vcpu))
1818f6379e2SAlexandru Elisei 		return 0;
1828f6379e2SAlexandru Elisei 
1839ed24f4bSMarc Zyngier 	counter = kvm_pmu_get_pair_counter_value(vcpu, pmc);
1849ed24f4bSMarc Zyngier 
1859ed24f4bSMarc Zyngier 	if (kvm_pmu_pmc_is_chained(pmc) &&
1869ed24f4bSMarc Zyngier 	    kvm_pmu_idx_is_high_counter(select_idx))
1879ed24f4bSMarc Zyngier 		counter = upper_32_bits(counter);
1889ed24f4bSMarc Zyngier 	else if (select_idx != ARMV8_PMU_CYCLE_IDX)
1899ed24f4bSMarc Zyngier 		counter = lower_32_bits(counter);
1909ed24f4bSMarc Zyngier 
1919ed24f4bSMarc Zyngier 	return counter;
1929ed24f4bSMarc Zyngier }
1939ed24f4bSMarc Zyngier 
1949ed24f4bSMarc Zyngier /**
1959ed24f4bSMarc Zyngier  * kvm_pmu_set_counter_value - set PMU counter value
1969ed24f4bSMarc Zyngier  * @vcpu: The vcpu pointer
1979ed24f4bSMarc Zyngier  * @select_idx: The counter index
1989ed24f4bSMarc Zyngier  * @val: The counter value
1999ed24f4bSMarc Zyngier  */
2009ed24f4bSMarc Zyngier void kvm_pmu_set_counter_value(struct kvm_vcpu *vcpu, u64 select_idx, u64 val)
2019ed24f4bSMarc Zyngier {
2029ed24f4bSMarc Zyngier 	u64 reg;
2039ed24f4bSMarc Zyngier 
2048f6379e2SAlexandru Elisei 	if (!kvm_vcpu_has_pmu(vcpu))
2058f6379e2SAlexandru Elisei 		return;
2068f6379e2SAlexandru Elisei 
2079ed24f4bSMarc Zyngier 	reg = (select_idx == ARMV8_PMU_CYCLE_IDX)
2089ed24f4bSMarc Zyngier 	      ? PMCCNTR_EL0 : PMEVCNTR0_EL0 + select_idx;
2099ed24f4bSMarc Zyngier 	__vcpu_sys_reg(vcpu, reg) += (s64)val - kvm_pmu_get_counter_value(vcpu, select_idx);
2109ed24f4bSMarc Zyngier 
2119ed24f4bSMarc Zyngier 	/* Recreate the perf event to reflect the updated sample_period */
2129ed24f4bSMarc Zyngier 	kvm_pmu_create_perf_event(vcpu, select_idx);
2139ed24f4bSMarc Zyngier }
2149ed24f4bSMarc Zyngier 
2159ed24f4bSMarc Zyngier /**
2169ed24f4bSMarc Zyngier  * kvm_pmu_release_perf_event - remove the perf event
2179ed24f4bSMarc Zyngier  * @pmc: The PMU counter pointer
2189ed24f4bSMarc Zyngier  */
2199ed24f4bSMarc Zyngier static void kvm_pmu_release_perf_event(struct kvm_pmc *pmc)
2209ed24f4bSMarc Zyngier {
2219ed24f4bSMarc Zyngier 	pmc = kvm_pmu_get_canonical_pmc(pmc);
2229ed24f4bSMarc Zyngier 	if (pmc->perf_event) {
2239ed24f4bSMarc Zyngier 		perf_event_disable(pmc->perf_event);
2249ed24f4bSMarc Zyngier 		perf_event_release_kernel(pmc->perf_event);
2259ed24f4bSMarc Zyngier 		pmc->perf_event = NULL;
2269ed24f4bSMarc Zyngier 	}
2279ed24f4bSMarc Zyngier }
2289ed24f4bSMarc Zyngier 
2299ed24f4bSMarc Zyngier /**
2309ed24f4bSMarc Zyngier  * kvm_pmu_stop_counter - stop PMU counter
2319ed24f4bSMarc Zyngier  * @pmc: The PMU counter pointer
2329ed24f4bSMarc Zyngier  *
2339ed24f4bSMarc Zyngier  * If this counter has been configured to monitor some event, release it here.
2349ed24f4bSMarc Zyngier  */
2359ed24f4bSMarc Zyngier static void kvm_pmu_stop_counter(struct kvm_vcpu *vcpu, struct kvm_pmc *pmc)
2369ed24f4bSMarc Zyngier {
2379ed24f4bSMarc Zyngier 	u64 counter, reg, val;
2389ed24f4bSMarc Zyngier 
2399ed24f4bSMarc Zyngier 	pmc = kvm_pmu_get_canonical_pmc(pmc);
2409ed24f4bSMarc Zyngier 	if (!pmc->perf_event)
2419ed24f4bSMarc Zyngier 		return;
2429ed24f4bSMarc Zyngier 
2439ed24f4bSMarc Zyngier 	counter = kvm_pmu_get_pair_counter_value(vcpu, pmc);
2449ed24f4bSMarc Zyngier 
2459ed24f4bSMarc Zyngier 	if (pmc->idx == ARMV8_PMU_CYCLE_IDX) {
2469ed24f4bSMarc Zyngier 		reg = PMCCNTR_EL0;
2479ed24f4bSMarc Zyngier 		val = counter;
2489ed24f4bSMarc Zyngier 	} else {
2499ed24f4bSMarc Zyngier 		reg = PMEVCNTR0_EL0 + pmc->idx;
2509ed24f4bSMarc Zyngier 		val = lower_32_bits(counter);
2519ed24f4bSMarc Zyngier 	}
2529ed24f4bSMarc Zyngier 
2539ed24f4bSMarc Zyngier 	__vcpu_sys_reg(vcpu, reg) = val;
2549ed24f4bSMarc Zyngier 
2559ed24f4bSMarc Zyngier 	if (kvm_pmu_pmc_is_chained(pmc))
2569ed24f4bSMarc Zyngier 		__vcpu_sys_reg(vcpu, reg + 1) = upper_32_bits(counter);
2579ed24f4bSMarc Zyngier 
2589ed24f4bSMarc Zyngier 	kvm_pmu_release_perf_event(pmc);
2599ed24f4bSMarc Zyngier }
2609ed24f4bSMarc Zyngier 
2619ed24f4bSMarc Zyngier /**
2629ed24f4bSMarc Zyngier  * kvm_pmu_vcpu_init - assign pmu counter idx for cpu
2639ed24f4bSMarc Zyngier  * @vcpu: The vcpu pointer
2649ed24f4bSMarc Zyngier  *
2659ed24f4bSMarc Zyngier  */
2669ed24f4bSMarc Zyngier void kvm_pmu_vcpu_init(struct kvm_vcpu *vcpu)
2679ed24f4bSMarc Zyngier {
2689ed24f4bSMarc Zyngier 	int i;
2699ed24f4bSMarc Zyngier 	struct kvm_pmu *pmu = &vcpu->arch.pmu;
2709ed24f4bSMarc Zyngier 
2719ed24f4bSMarc Zyngier 	for (i = 0; i < ARMV8_PMU_MAX_COUNTERS; i++)
2729ed24f4bSMarc Zyngier 		pmu->pmc[i].idx = i;
2739ed24f4bSMarc Zyngier }
2749ed24f4bSMarc Zyngier 
2759ed24f4bSMarc Zyngier /**
2769ed24f4bSMarc Zyngier  * kvm_pmu_vcpu_reset - reset pmu state for cpu
2779ed24f4bSMarc Zyngier  * @vcpu: The vcpu pointer
2789ed24f4bSMarc Zyngier  *
2799ed24f4bSMarc Zyngier  */
2809ed24f4bSMarc Zyngier void kvm_pmu_vcpu_reset(struct kvm_vcpu *vcpu)
2819ed24f4bSMarc Zyngier {
2829ed24f4bSMarc Zyngier 	unsigned long mask = kvm_pmu_valid_counter_mask(vcpu);
2839ed24f4bSMarc Zyngier 	struct kvm_pmu *pmu = &vcpu->arch.pmu;
2849ed24f4bSMarc Zyngier 	int i;
2859ed24f4bSMarc Zyngier 
2869ed24f4bSMarc Zyngier 	for_each_set_bit(i, &mask, 32)
2879ed24f4bSMarc Zyngier 		kvm_pmu_stop_counter(vcpu, &pmu->pmc[i]);
2889ed24f4bSMarc Zyngier 
2899ed24f4bSMarc Zyngier 	bitmap_zero(vcpu->arch.pmu.chained, ARMV8_PMU_MAX_COUNTER_PAIRS);
2909ed24f4bSMarc Zyngier }
2919ed24f4bSMarc Zyngier 
2929ed24f4bSMarc Zyngier /**
2939ed24f4bSMarc Zyngier  * kvm_pmu_vcpu_destroy - free perf event of PMU for cpu
2949ed24f4bSMarc Zyngier  * @vcpu: The vcpu pointer
2959ed24f4bSMarc Zyngier  *
2969ed24f4bSMarc Zyngier  */
2979ed24f4bSMarc Zyngier void kvm_pmu_vcpu_destroy(struct kvm_vcpu *vcpu)
2989ed24f4bSMarc Zyngier {
2999ed24f4bSMarc Zyngier 	int i;
3009ed24f4bSMarc Zyngier 	struct kvm_pmu *pmu = &vcpu->arch.pmu;
3019ed24f4bSMarc Zyngier 
3029ed24f4bSMarc Zyngier 	for (i = 0; i < ARMV8_PMU_MAX_COUNTERS; i++)
3039ed24f4bSMarc Zyngier 		kvm_pmu_release_perf_event(&pmu->pmc[i]);
30495e92e45SJulien Thierry 	irq_work_sync(&vcpu->arch.pmu.overflow_work);
3059ed24f4bSMarc Zyngier }
3069ed24f4bSMarc Zyngier 
3079ed24f4bSMarc Zyngier u64 kvm_pmu_valid_counter_mask(struct kvm_vcpu *vcpu)
3089ed24f4bSMarc Zyngier {
3099ed24f4bSMarc Zyngier 	u64 val = __vcpu_sys_reg(vcpu, PMCR_EL0) >> ARMV8_PMU_PMCR_N_SHIFT;
3109ed24f4bSMarc Zyngier 
3119ed24f4bSMarc Zyngier 	val &= ARMV8_PMU_PMCR_N_MASK;
3129ed24f4bSMarc Zyngier 	if (val == 0)
3139ed24f4bSMarc Zyngier 		return BIT(ARMV8_PMU_CYCLE_IDX);
3149ed24f4bSMarc Zyngier 	else
3159ed24f4bSMarc Zyngier 		return GENMASK(val - 1, 0) | BIT(ARMV8_PMU_CYCLE_IDX);
3169ed24f4bSMarc Zyngier }
3179ed24f4bSMarc Zyngier 
3189ed24f4bSMarc Zyngier /**
3199ed24f4bSMarc Zyngier  * kvm_pmu_enable_counter_mask - enable selected PMU counters
3209ed24f4bSMarc Zyngier  * @vcpu: The vcpu pointer
3219ed24f4bSMarc Zyngier  * @val: the value guest writes to PMCNTENSET register
3229ed24f4bSMarc Zyngier  *
3239ed24f4bSMarc Zyngier  * Call perf_event_enable to start counting the perf event
3249ed24f4bSMarc Zyngier  */
3259ed24f4bSMarc Zyngier void kvm_pmu_enable_counter_mask(struct kvm_vcpu *vcpu, u64 val)
3269ed24f4bSMarc Zyngier {
3279ed24f4bSMarc Zyngier 	int i;
3289ed24f4bSMarc Zyngier 	struct kvm_pmu *pmu = &vcpu->arch.pmu;
3299ed24f4bSMarc Zyngier 	struct kvm_pmc *pmc;
3309ed24f4bSMarc Zyngier 
3318f6379e2SAlexandru Elisei 	if (!kvm_vcpu_has_pmu(vcpu))
3328f6379e2SAlexandru Elisei 		return;
3338f6379e2SAlexandru Elisei 
3349ed24f4bSMarc Zyngier 	if (!(__vcpu_sys_reg(vcpu, PMCR_EL0) & ARMV8_PMU_PMCR_E) || !val)
3359ed24f4bSMarc Zyngier 		return;
3369ed24f4bSMarc Zyngier 
3379ed24f4bSMarc Zyngier 	for (i = 0; i < ARMV8_PMU_MAX_COUNTERS; i++) {
3389ed24f4bSMarc Zyngier 		if (!(val & BIT(i)))
3399ed24f4bSMarc Zyngier 			continue;
3409ed24f4bSMarc Zyngier 
3419ed24f4bSMarc Zyngier 		pmc = &pmu->pmc[i];
3429ed24f4bSMarc Zyngier 
3439ed24f4bSMarc Zyngier 		/* A change in the enable state may affect the chain state */
3449ed24f4bSMarc Zyngier 		kvm_pmu_update_pmc_chained(vcpu, i);
3459ed24f4bSMarc Zyngier 		kvm_pmu_create_perf_event(vcpu, i);
3469ed24f4bSMarc Zyngier 
3479ed24f4bSMarc Zyngier 		/* At this point, pmc must be the canonical */
3489ed24f4bSMarc Zyngier 		if (pmc->perf_event) {
3499ed24f4bSMarc Zyngier 			perf_event_enable(pmc->perf_event);
3509ed24f4bSMarc Zyngier 			if (pmc->perf_event->state != PERF_EVENT_STATE_ACTIVE)
3519ed24f4bSMarc Zyngier 				kvm_debug("fail to enable perf event\n");
3529ed24f4bSMarc Zyngier 		}
3539ed24f4bSMarc Zyngier 	}
3549ed24f4bSMarc Zyngier }
3559ed24f4bSMarc Zyngier 
3569ed24f4bSMarc Zyngier /**
3579ed24f4bSMarc Zyngier  * kvm_pmu_disable_counter_mask - disable selected PMU counters
3589ed24f4bSMarc Zyngier  * @vcpu: The vcpu pointer
3599ed24f4bSMarc Zyngier  * @val: the value guest writes to PMCNTENCLR register
3609ed24f4bSMarc Zyngier  *
3619ed24f4bSMarc Zyngier  * Call perf_event_disable to stop counting the perf event
3629ed24f4bSMarc Zyngier  */
3639ed24f4bSMarc Zyngier void kvm_pmu_disable_counter_mask(struct kvm_vcpu *vcpu, u64 val)
3649ed24f4bSMarc Zyngier {
3659ed24f4bSMarc Zyngier 	int i;
3669ed24f4bSMarc Zyngier 	struct kvm_pmu *pmu = &vcpu->arch.pmu;
3679ed24f4bSMarc Zyngier 	struct kvm_pmc *pmc;
3689ed24f4bSMarc Zyngier 
3698f6379e2SAlexandru Elisei 	if (!kvm_vcpu_has_pmu(vcpu) || !val)
3709ed24f4bSMarc Zyngier 		return;
3719ed24f4bSMarc Zyngier 
3729ed24f4bSMarc Zyngier 	for (i = 0; i < ARMV8_PMU_MAX_COUNTERS; i++) {
3739ed24f4bSMarc Zyngier 		if (!(val & BIT(i)))
3749ed24f4bSMarc Zyngier 			continue;
3759ed24f4bSMarc Zyngier 
3769ed24f4bSMarc Zyngier 		pmc = &pmu->pmc[i];
3779ed24f4bSMarc Zyngier 
3789ed24f4bSMarc Zyngier 		/* A change in the enable state may affect the chain state */
3799ed24f4bSMarc Zyngier 		kvm_pmu_update_pmc_chained(vcpu, i);
3809ed24f4bSMarc Zyngier 		kvm_pmu_create_perf_event(vcpu, i);
3819ed24f4bSMarc Zyngier 
3829ed24f4bSMarc Zyngier 		/* At this point, pmc must be the canonical */
3839ed24f4bSMarc Zyngier 		if (pmc->perf_event)
3849ed24f4bSMarc Zyngier 			perf_event_disable(pmc->perf_event);
3859ed24f4bSMarc Zyngier 	}
3869ed24f4bSMarc Zyngier }
3879ed24f4bSMarc Zyngier 
3889ed24f4bSMarc Zyngier static u64 kvm_pmu_overflow_status(struct kvm_vcpu *vcpu)
3899ed24f4bSMarc Zyngier {
3909ed24f4bSMarc Zyngier 	u64 reg = 0;
3919ed24f4bSMarc Zyngier 
3929ed24f4bSMarc Zyngier 	if ((__vcpu_sys_reg(vcpu, PMCR_EL0) & ARMV8_PMU_PMCR_E)) {
3939ed24f4bSMarc Zyngier 		reg = __vcpu_sys_reg(vcpu, PMOVSSET_EL0);
3949ed24f4bSMarc Zyngier 		reg &= __vcpu_sys_reg(vcpu, PMCNTENSET_EL0);
3959ed24f4bSMarc Zyngier 		reg &= __vcpu_sys_reg(vcpu, PMINTENSET_EL1);
3969ed24f4bSMarc Zyngier 	}
3979ed24f4bSMarc Zyngier 
3989ed24f4bSMarc Zyngier 	return reg;
3999ed24f4bSMarc Zyngier }
4009ed24f4bSMarc Zyngier 
4019ed24f4bSMarc Zyngier static void kvm_pmu_update_state(struct kvm_vcpu *vcpu)
4029ed24f4bSMarc Zyngier {
4039ed24f4bSMarc Zyngier 	struct kvm_pmu *pmu = &vcpu->arch.pmu;
4049ed24f4bSMarc Zyngier 	bool overflow;
4059ed24f4bSMarc Zyngier 
40646acf89dSMarc Zyngier 	if (!kvm_vcpu_has_pmu(vcpu))
4079ed24f4bSMarc Zyngier 		return;
4089ed24f4bSMarc Zyngier 
4099ed24f4bSMarc Zyngier 	overflow = !!kvm_pmu_overflow_status(vcpu);
4109ed24f4bSMarc Zyngier 	if (pmu->irq_level == overflow)
4119ed24f4bSMarc Zyngier 		return;
4129ed24f4bSMarc Zyngier 
4139ed24f4bSMarc Zyngier 	pmu->irq_level = overflow;
4149ed24f4bSMarc Zyngier 
4159ed24f4bSMarc Zyngier 	if (likely(irqchip_in_kernel(vcpu->kvm))) {
4169ed24f4bSMarc Zyngier 		int ret = kvm_vgic_inject_irq(vcpu->kvm, vcpu->vcpu_id,
4179ed24f4bSMarc Zyngier 					      pmu->irq_num, overflow, pmu);
4189ed24f4bSMarc Zyngier 		WARN_ON(ret);
4199ed24f4bSMarc Zyngier 	}
4209ed24f4bSMarc Zyngier }
4219ed24f4bSMarc Zyngier 
4229ed24f4bSMarc Zyngier bool kvm_pmu_should_notify_user(struct kvm_vcpu *vcpu)
4239ed24f4bSMarc Zyngier {
4249ed24f4bSMarc Zyngier 	struct kvm_pmu *pmu = &vcpu->arch.pmu;
4259ed24f4bSMarc Zyngier 	struct kvm_sync_regs *sregs = &vcpu->run->s.regs;
4269ed24f4bSMarc Zyngier 	bool run_level = sregs->device_irq_level & KVM_ARM_DEV_PMU;
4279ed24f4bSMarc Zyngier 
4289ed24f4bSMarc Zyngier 	if (likely(irqchip_in_kernel(vcpu->kvm)))
4299ed24f4bSMarc Zyngier 		return false;
4309ed24f4bSMarc Zyngier 
4319ed24f4bSMarc Zyngier 	return pmu->irq_level != run_level;
4329ed24f4bSMarc Zyngier }
4339ed24f4bSMarc Zyngier 
4349ed24f4bSMarc Zyngier /*
4359ed24f4bSMarc Zyngier  * Reflect the PMU overflow interrupt output level into the kvm_run structure
4369ed24f4bSMarc Zyngier  */
4379ed24f4bSMarc Zyngier void kvm_pmu_update_run(struct kvm_vcpu *vcpu)
4389ed24f4bSMarc Zyngier {
4399ed24f4bSMarc Zyngier 	struct kvm_sync_regs *regs = &vcpu->run->s.regs;
4409ed24f4bSMarc Zyngier 
4419ed24f4bSMarc Zyngier 	/* Populate the timer bitmap for user space */
4429ed24f4bSMarc Zyngier 	regs->device_irq_level &= ~KVM_ARM_DEV_PMU;
4439ed24f4bSMarc Zyngier 	if (vcpu->arch.pmu.irq_level)
4449ed24f4bSMarc Zyngier 		regs->device_irq_level |= KVM_ARM_DEV_PMU;
4459ed24f4bSMarc Zyngier }
4469ed24f4bSMarc Zyngier 
4479ed24f4bSMarc Zyngier /**
4489ed24f4bSMarc Zyngier  * kvm_pmu_flush_hwstate - flush pmu state to cpu
4499ed24f4bSMarc Zyngier  * @vcpu: The vcpu pointer
4509ed24f4bSMarc Zyngier  *
4519ed24f4bSMarc Zyngier  * Check if the PMU has overflowed while we were running in the host, and inject
4529ed24f4bSMarc Zyngier  * an interrupt if that was the case.
4539ed24f4bSMarc Zyngier  */
4549ed24f4bSMarc Zyngier void kvm_pmu_flush_hwstate(struct kvm_vcpu *vcpu)
4559ed24f4bSMarc Zyngier {
4569ed24f4bSMarc Zyngier 	kvm_pmu_update_state(vcpu);
4579ed24f4bSMarc Zyngier }
4589ed24f4bSMarc Zyngier 
4599ed24f4bSMarc Zyngier /**
4609ed24f4bSMarc Zyngier  * kvm_pmu_sync_hwstate - sync pmu state from cpu
4619ed24f4bSMarc Zyngier  * @vcpu: The vcpu pointer
4629ed24f4bSMarc Zyngier  *
4639ed24f4bSMarc Zyngier  * Check if the PMU has overflowed while we were running in the guest, and
4649ed24f4bSMarc Zyngier  * inject an interrupt if that was the case.
4659ed24f4bSMarc Zyngier  */
4669ed24f4bSMarc Zyngier void kvm_pmu_sync_hwstate(struct kvm_vcpu *vcpu)
4679ed24f4bSMarc Zyngier {
4689ed24f4bSMarc Zyngier 	kvm_pmu_update_state(vcpu);
4699ed24f4bSMarc Zyngier }
4709ed24f4bSMarc Zyngier 
4719ed24f4bSMarc Zyngier /**
47295e92e45SJulien Thierry  * When perf interrupt is an NMI, we cannot safely notify the vcpu corresponding
47395e92e45SJulien Thierry  * to the event.
47495e92e45SJulien Thierry  * This is why we need a callback to do it once outside of the NMI context.
47595e92e45SJulien Thierry  */
47695e92e45SJulien Thierry static void kvm_pmu_perf_overflow_notify_vcpu(struct irq_work *work)
47795e92e45SJulien Thierry {
47895e92e45SJulien Thierry 	struct kvm_vcpu *vcpu;
47995e92e45SJulien Thierry 	struct kvm_pmu *pmu;
48095e92e45SJulien Thierry 
48195e92e45SJulien Thierry 	pmu = container_of(work, struct kvm_pmu, overflow_work);
48295e92e45SJulien Thierry 	vcpu = kvm_pmc_to_vcpu(pmu->pmc);
48395e92e45SJulien Thierry 
48495e92e45SJulien Thierry 	kvm_vcpu_kick(vcpu);
48595e92e45SJulien Thierry }
48695e92e45SJulien Thierry 
48795e92e45SJulien Thierry /**
4889ed24f4bSMarc Zyngier  * When the perf event overflows, set the overflow status and inform the vcpu.
4899ed24f4bSMarc Zyngier  */
4909ed24f4bSMarc Zyngier static void kvm_pmu_perf_overflow(struct perf_event *perf_event,
4919ed24f4bSMarc Zyngier 				  struct perf_sample_data *data,
4929ed24f4bSMarc Zyngier 				  struct pt_regs *regs)
4939ed24f4bSMarc Zyngier {
4949ed24f4bSMarc Zyngier 	struct kvm_pmc *pmc = perf_event->overflow_handler_context;
4959ed24f4bSMarc Zyngier 	struct arm_pmu *cpu_pmu = to_arm_pmu(perf_event->pmu);
4969ed24f4bSMarc Zyngier 	struct kvm_vcpu *vcpu = kvm_pmc_to_vcpu(pmc);
4979ed24f4bSMarc Zyngier 	int idx = pmc->idx;
4989ed24f4bSMarc Zyngier 	u64 period;
4999ed24f4bSMarc Zyngier 
5009ed24f4bSMarc Zyngier 	cpu_pmu->pmu.stop(perf_event, PERF_EF_UPDATE);
5019ed24f4bSMarc Zyngier 
5029ed24f4bSMarc Zyngier 	/*
5039ed24f4bSMarc Zyngier 	 * Reset the sample period to the architectural limit,
5049ed24f4bSMarc Zyngier 	 * i.e. the point where the counter overflows.
5059ed24f4bSMarc Zyngier 	 */
5069ed24f4bSMarc Zyngier 	period = -(local64_read(&perf_event->count));
5079ed24f4bSMarc Zyngier 
5089ed24f4bSMarc Zyngier 	if (!kvm_pmu_idx_is_64bit(vcpu, pmc->idx))
5099ed24f4bSMarc Zyngier 		period &= GENMASK(31, 0);
5109ed24f4bSMarc Zyngier 
5119ed24f4bSMarc Zyngier 	local64_set(&perf_event->hw.period_left, 0);
5129ed24f4bSMarc Zyngier 	perf_event->attr.sample_period = period;
5139ed24f4bSMarc Zyngier 	perf_event->hw.sample_period = period;
5149ed24f4bSMarc Zyngier 
5159ed24f4bSMarc Zyngier 	__vcpu_sys_reg(vcpu, PMOVSSET_EL0) |= BIT(idx);
5169ed24f4bSMarc Zyngier 
5179ed24f4bSMarc Zyngier 	if (kvm_pmu_overflow_status(vcpu)) {
5189ed24f4bSMarc Zyngier 		kvm_make_request(KVM_REQ_IRQ_PENDING, vcpu);
51995e92e45SJulien Thierry 
52095e92e45SJulien Thierry 		if (!in_nmi())
5219ed24f4bSMarc Zyngier 			kvm_vcpu_kick(vcpu);
52295e92e45SJulien Thierry 		else
52395e92e45SJulien Thierry 			irq_work_queue(&vcpu->arch.pmu.overflow_work);
5249ed24f4bSMarc Zyngier 	}
5259ed24f4bSMarc Zyngier 
5269ed24f4bSMarc Zyngier 	cpu_pmu->pmu.start(perf_event, PERF_EF_RELOAD);
5279ed24f4bSMarc Zyngier }
5289ed24f4bSMarc Zyngier 
5299ed24f4bSMarc Zyngier /**
5309ed24f4bSMarc Zyngier  * kvm_pmu_software_increment - do software increment
5319ed24f4bSMarc Zyngier  * @vcpu: The vcpu pointer
5329ed24f4bSMarc Zyngier  * @val: the value guest writes to PMSWINC register
5339ed24f4bSMarc Zyngier  */
5349ed24f4bSMarc Zyngier void kvm_pmu_software_increment(struct kvm_vcpu *vcpu, u64 val)
5359ed24f4bSMarc Zyngier {
5369ed24f4bSMarc Zyngier 	struct kvm_pmu *pmu = &vcpu->arch.pmu;
5379ed24f4bSMarc Zyngier 	int i;
5389ed24f4bSMarc Zyngier 
5398f6379e2SAlexandru Elisei 	if (!kvm_vcpu_has_pmu(vcpu))
5408f6379e2SAlexandru Elisei 		return;
5418f6379e2SAlexandru Elisei 
5429ed24f4bSMarc Zyngier 	if (!(__vcpu_sys_reg(vcpu, PMCR_EL0) & ARMV8_PMU_PMCR_E))
5439ed24f4bSMarc Zyngier 		return;
5449ed24f4bSMarc Zyngier 
5459ed24f4bSMarc Zyngier 	/* Weed out disabled counters */
5469ed24f4bSMarc Zyngier 	val &= __vcpu_sys_reg(vcpu, PMCNTENSET_EL0);
5479ed24f4bSMarc Zyngier 
5489ed24f4bSMarc Zyngier 	for (i = 0; i < ARMV8_PMU_CYCLE_IDX; i++) {
5499ed24f4bSMarc Zyngier 		u64 type, reg;
5509ed24f4bSMarc Zyngier 
5519ed24f4bSMarc Zyngier 		if (!(val & BIT(i)))
5529ed24f4bSMarc Zyngier 			continue;
5539ed24f4bSMarc Zyngier 
5549ed24f4bSMarc Zyngier 		/* PMSWINC only applies to ... SW_INC! */
5559ed24f4bSMarc Zyngier 		type = __vcpu_sys_reg(vcpu, PMEVTYPER0_EL0 + i);
556fd65a3b5SMarc Zyngier 		type &= kvm_pmu_event_mask(vcpu->kvm);
5579ed24f4bSMarc Zyngier 		if (type != ARMV8_PMUV3_PERFCTR_SW_INCR)
5589ed24f4bSMarc Zyngier 			continue;
5599ed24f4bSMarc Zyngier 
5609ed24f4bSMarc Zyngier 		/* increment this even SW_INC counter */
5619ed24f4bSMarc Zyngier 		reg = __vcpu_sys_reg(vcpu, PMEVCNTR0_EL0 + i) + 1;
5629ed24f4bSMarc Zyngier 		reg = lower_32_bits(reg);
5639ed24f4bSMarc Zyngier 		__vcpu_sys_reg(vcpu, PMEVCNTR0_EL0 + i) = reg;
5649ed24f4bSMarc Zyngier 
5659ed24f4bSMarc Zyngier 		if (reg) /* no overflow on the low part */
5669ed24f4bSMarc Zyngier 			continue;
5679ed24f4bSMarc Zyngier 
5689ed24f4bSMarc Zyngier 		if (kvm_pmu_pmc_is_chained(&pmu->pmc[i])) {
5699ed24f4bSMarc Zyngier 			/* increment the high counter */
5709ed24f4bSMarc Zyngier 			reg = __vcpu_sys_reg(vcpu, PMEVCNTR0_EL0 + i + 1) + 1;
5719ed24f4bSMarc Zyngier 			reg = lower_32_bits(reg);
5729ed24f4bSMarc Zyngier 			__vcpu_sys_reg(vcpu, PMEVCNTR0_EL0 + i + 1) = reg;
5739ed24f4bSMarc Zyngier 			if (!reg) /* mark overflow on the high counter */
5749ed24f4bSMarc Zyngier 				__vcpu_sys_reg(vcpu, PMOVSSET_EL0) |= BIT(i + 1);
5759ed24f4bSMarc Zyngier 		} else {
5769ed24f4bSMarc Zyngier 			/* mark overflow on low counter */
5779ed24f4bSMarc Zyngier 			__vcpu_sys_reg(vcpu, PMOVSSET_EL0) |= BIT(i);
5789ed24f4bSMarc Zyngier 		}
5799ed24f4bSMarc Zyngier 	}
5809ed24f4bSMarc Zyngier }
5819ed24f4bSMarc Zyngier 
5829ed24f4bSMarc Zyngier /**
5839ed24f4bSMarc Zyngier  * kvm_pmu_handle_pmcr - handle PMCR register
5849ed24f4bSMarc Zyngier  * @vcpu: The vcpu pointer
5859ed24f4bSMarc Zyngier  * @val: the value guest writes to PMCR register
5869ed24f4bSMarc Zyngier  */
5879ed24f4bSMarc Zyngier void kvm_pmu_handle_pmcr(struct kvm_vcpu *vcpu, u64 val)
5889ed24f4bSMarc Zyngier {
5899ed24f4bSMarc Zyngier 	int i;
5909ed24f4bSMarc Zyngier 
5918f6379e2SAlexandru Elisei 	if (!kvm_vcpu_has_pmu(vcpu))
5928f6379e2SAlexandru Elisei 		return;
5938f6379e2SAlexandru Elisei 
5949ed24f4bSMarc Zyngier 	if (val & ARMV8_PMU_PMCR_E) {
5959ed24f4bSMarc Zyngier 		kvm_pmu_enable_counter_mask(vcpu,
596f5eff400SMarc Zyngier 		       __vcpu_sys_reg(vcpu, PMCNTENSET_EL0));
5979ed24f4bSMarc Zyngier 	} else {
598ca4f202dSAlexandre Chartre 		kvm_pmu_disable_counter_mask(vcpu,
599ca4f202dSAlexandre Chartre 		       __vcpu_sys_reg(vcpu, PMCNTENSET_EL0));
6009ed24f4bSMarc Zyngier 	}
6019ed24f4bSMarc Zyngier 
6029ed24f4bSMarc Zyngier 	if (val & ARMV8_PMU_PMCR_C)
6039ed24f4bSMarc Zyngier 		kvm_pmu_set_counter_value(vcpu, ARMV8_PMU_CYCLE_IDX, 0);
6049ed24f4bSMarc Zyngier 
6059ed24f4bSMarc Zyngier 	if (val & ARMV8_PMU_PMCR_P) {
606ca4f202dSAlexandre Chartre 		unsigned long mask = kvm_pmu_valid_counter_mask(vcpu);
6072a71fabfSAlexandru Elisei 		mask &= ~BIT(ARMV8_PMU_CYCLE_IDX);
6089ed24f4bSMarc Zyngier 		for_each_set_bit(i, &mask, 32)
6099ed24f4bSMarc Zyngier 			kvm_pmu_set_counter_value(vcpu, i, 0);
6109ed24f4bSMarc Zyngier 	}
6119ed24f4bSMarc Zyngier }
6129ed24f4bSMarc Zyngier 
6139ed24f4bSMarc Zyngier static bool kvm_pmu_counter_is_enabled(struct kvm_vcpu *vcpu, u64 select_idx)
6149ed24f4bSMarc Zyngier {
6159ed24f4bSMarc Zyngier 	return (__vcpu_sys_reg(vcpu, PMCR_EL0) & ARMV8_PMU_PMCR_E) &&
6169ed24f4bSMarc Zyngier 	       (__vcpu_sys_reg(vcpu, PMCNTENSET_EL0) & BIT(select_idx));
6179ed24f4bSMarc Zyngier }
6189ed24f4bSMarc Zyngier 
6199ed24f4bSMarc Zyngier /**
6209ed24f4bSMarc Zyngier  * kvm_pmu_create_perf_event - create a perf event for a counter
6219ed24f4bSMarc Zyngier  * @vcpu: The vcpu pointer
6229ed24f4bSMarc Zyngier  * @select_idx: The number of selected counter
6239ed24f4bSMarc Zyngier  */
6249ed24f4bSMarc Zyngier static void kvm_pmu_create_perf_event(struct kvm_vcpu *vcpu, u64 select_idx)
6259ed24f4bSMarc Zyngier {
62646b18782SMarc Zyngier 	struct arm_pmu *arm_pmu = vcpu->kvm->arch.arm_pmu;
6279ed24f4bSMarc Zyngier 	struct kvm_pmu *pmu = &vcpu->arch.pmu;
6289ed24f4bSMarc Zyngier 	struct kvm_pmc *pmc;
6299ed24f4bSMarc Zyngier 	struct perf_event *event;
6309ed24f4bSMarc Zyngier 	struct perf_event_attr attr;
6319ed24f4bSMarc Zyngier 	u64 eventsel, counter, reg, data;
6329ed24f4bSMarc Zyngier 
6339ed24f4bSMarc Zyngier 	/*
6349ed24f4bSMarc Zyngier 	 * For chained counters the event type and filtering attributes are
6359ed24f4bSMarc Zyngier 	 * obtained from the low/even counter. We also use this counter to
6369ed24f4bSMarc Zyngier 	 * determine if the event is enabled/disabled.
6379ed24f4bSMarc Zyngier 	 */
6389ed24f4bSMarc Zyngier 	pmc = kvm_pmu_get_canonical_pmc(&pmu->pmc[select_idx]);
6399ed24f4bSMarc Zyngier 
6409ed24f4bSMarc Zyngier 	reg = (pmc->idx == ARMV8_PMU_CYCLE_IDX)
6419ed24f4bSMarc Zyngier 	      ? PMCCFILTR_EL0 : PMEVTYPER0_EL0 + pmc->idx;
6429ed24f4bSMarc Zyngier 	data = __vcpu_sys_reg(vcpu, reg);
6439ed24f4bSMarc Zyngier 
6449ed24f4bSMarc Zyngier 	kvm_pmu_stop_counter(vcpu, pmc);
645d7eec236SMarc Zyngier 	if (pmc->idx == ARMV8_PMU_CYCLE_IDX)
646d7eec236SMarc Zyngier 		eventsel = ARMV8_PMUV3_PERFCTR_CPU_CYCLES;
647d7eec236SMarc Zyngier 	else
648d7eec236SMarc Zyngier 		eventsel = data & kvm_pmu_event_mask(vcpu->kvm);
6499ed24f4bSMarc Zyngier 
650d7eec236SMarc Zyngier 	/* Software increment event doesn't need to be backed by a perf event */
651d7eec236SMarc Zyngier 	if (eventsel == ARMV8_PMUV3_PERFCTR_SW_INCR)
652d7eec236SMarc Zyngier 		return;
653d7eec236SMarc Zyngier 
654d7eec236SMarc Zyngier 	/*
655d7eec236SMarc Zyngier 	 * If we have a filter in place and that the event isn't allowed, do
656d7eec236SMarc Zyngier 	 * not install a perf event either.
657d7eec236SMarc Zyngier 	 */
658d7eec236SMarc Zyngier 	if (vcpu->kvm->arch.pmu_filter &&
659d7eec236SMarc Zyngier 	    !test_bit(eventsel, vcpu->kvm->arch.pmu_filter))
6609ed24f4bSMarc Zyngier 		return;
6619ed24f4bSMarc Zyngier 
6629ed24f4bSMarc Zyngier 	memset(&attr, 0, sizeof(struct perf_event_attr));
66346b18782SMarc Zyngier 	attr.type = arm_pmu->pmu.type;
6649ed24f4bSMarc Zyngier 	attr.size = sizeof(attr);
6659ed24f4bSMarc Zyngier 	attr.pinned = 1;
6669ed24f4bSMarc Zyngier 	attr.disabled = !kvm_pmu_counter_is_enabled(vcpu, pmc->idx);
6679ed24f4bSMarc Zyngier 	attr.exclude_user = data & ARMV8_PMU_EXCLUDE_EL0 ? 1 : 0;
6689ed24f4bSMarc Zyngier 	attr.exclude_kernel = data & ARMV8_PMU_EXCLUDE_EL1 ? 1 : 0;
6699ed24f4bSMarc Zyngier 	attr.exclude_hv = 1; /* Don't count EL2 events */
6709ed24f4bSMarc Zyngier 	attr.exclude_host = 1; /* Don't count host events */
671d7eec236SMarc Zyngier 	attr.config = eventsel;
6729ed24f4bSMarc Zyngier 
6739ed24f4bSMarc Zyngier 	counter = kvm_pmu_get_pair_counter_value(vcpu, pmc);
6749ed24f4bSMarc Zyngier 
6759ed24f4bSMarc Zyngier 	if (kvm_pmu_pmc_is_chained(pmc)) {
6769ed24f4bSMarc Zyngier 		/**
6779ed24f4bSMarc Zyngier 		 * The initial sample period (overflow count) of an event. For
6789ed24f4bSMarc Zyngier 		 * chained counters we only support overflow interrupts on the
6799ed24f4bSMarc Zyngier 		 * high counter.
6809ed24f4bSMarc Zyngier 		 */
6819ed24f4bSMarc Zyngier 		attr.sample_period = (-counter) & GENMASK(63, 0);
6829ed24f4bSMarc Zyngier 		attr.config1 |= PERF_ATTR_CFG1_KVM_PMU_CHAINED;
6839ed24f4bSMarc Zyngier 
6849ed24f4bSMarc Zyngier 		event = perf_event_create_kernel_counter(&attr, -1, current,
6859ed24f4bSMarc Zyngier 							 kvm_pmu_perf_overflow,
6869ed24f4bSMarc Zyngier 							 pmc + 1);
6879ed24f4bSMarc Zyngier 	} else {
6889ed24f4bSMarc Zyngier 		/* The initial sample period (overflow count) of an event. */
6899ed24f4bSMarc Zyngier 		if (kvm_pmu_idx_is_64bit(vcpu, pmc->idx))
6909ed24f4bSMarc Zyngier 			attr.sample_period = (-counter) & GENMASK(63, 0);
6919ed24f4bSMarc Zyngier 		else
6929ed24f4bSMarc Zyngier 			attr.sample_period = (-counter) & GENMASK(31, 0);
6939ed24f4bSMarc Zyngier 
6949ed24f4bSMarc Zyngier 		event = perf_event_create_kernel_counter(&attr, -1, current,
6959ed24f4bSMarc Zyngier 						 kvm_pmu_perf_overflow, pmc);
6969ed24f4bSMarc Zyngier 	}
6979ed24f4bSMarc Zyngier 
6989ed24f4bSMarc Zyngier 	if (IS_ERR(event)) {
6999ed24f4bSMarc Zyngier 		pr_err_once("kvm: pmu event creation failed %ld\n",
7009ed24f4bSMarc Zyngier 			    PTR_ERR(event));
7019ed24f4bSMarc Zyngier 		return;
7029ed24f4bSMarc Zyngier 	}
7039ed24f4bSMarc Zyngier 
7049ed24f4bSMarc Zyngier 	pmc->perf_event = event;
7059ed24f4bSMarc Zyngier }
7069ed24f4bSMarc Zyngier 
7079ed24f4bSMarc Zyngier /**
7089ed24f4bSMarc Zyngier  * kvm_pmu_update_pmc_chained - update chained bitmap
7099ed24f4bSMarc Zyngier  * @vcpu: The vcpu pointer
7109ed24f4bSMarc Zyngier  * @select_idx: The number of selected counter
7119ed24f4bSMarc Zyngier  *
7129ed24f4bSMarc Zyngier  * Update the chained bitmap based on the event type written in the
7139ed24f4bSMarc Zyngier  * typer register and the enable state of the odd register.
7149ed24f4bSMarc Zyngier  */
7159ed24f4bSMarc Zyngier static void kvm_pmu_update_pmc_chained(struct kvm_vcpu *vcpu, u64 select_idx)
7169ed24f4bSMarc Zyngier {
7179ed24f4bSMarc Zyngier 	struct kvm_pmu *pmu = &vcpu->arch.pmu;
7189ed24f4bSMarc Zyngier 	struct kvm_pmc *pmc = &pmu->pmc[select_idx], *canonical_pmc;
7199ed24f4bSMarc Zyngier 	bool new_state, old_state;
7209ed24f4bSMarc Zyngier 
7219ed24f4bSMarc Zyngier 	old_state = kvm_pmu_pmc_is_chained(pmc);
7229ed24f4bSMarc Zyngier 	new_state = kvm_pmu_idx_has_chain_evtype(vcpu, pmc->idx) &&
7239ed24f4bSMarc Zyngier 		    kvm_pmu_counter_is_enabled(vcpu, pmc->idx | 0x1);
7249ed24f4bSMarc Zyngier 
7259ed24f4bSMarc Zyngier 	if (old_state == new_state)
7269ed24f4bSMarc Zyngier 		return;
7279ed24f4bSMarc Zyngier 
7289ed24f4bSMarc Zyngier 	canonical_pmc = kvm_pmu_get_canonical_pmc(pmc);
7299ed24f4bSMarc Zyngier 	kvm_pmu_stop_counter(vcpu, canonical_pmc);
7309ed24f4bSMarc Zyngier 	if (new_state) {
7319ed24f4bSMarc Zyngier 		/*
7329ed24f4bSMarc Zyngier 		 * During promotion from !chained to chained we must ensure
7339ed24f4bSMarc Zyngier 		 * the adjacent counter is stopped and its event destroyed
7349ed24f4bSMarc Zyngier 		 */
7359ed24f4bSMarc Zyngier 		kvm_pmu_stop_counter(vcpu, kvm_pmu_get_alternate_pmc(pmc));
7369ed24f4bSMarc Zyngier 		set_bit(pmc->idx >> 1, vcpu->arch.pmu.chained);
7379ed24f4bSMarc Zyngier 		return;
7389ed24f4bSMarc Zyngier 	}
7399ed24f4bSMarc Zyngier 	clear_bit(pmc->idx >> 1, vcpu->arch.pmu.chained);
7409ed24f4bSMarc Zyngier }
7419ed24f4bSMarc Zyngier 
7429ed24f4bSMarc Zyngier /**
7439ed24f4bSMarc Zyngier  * kvm_pmu_set_counter_event_type - set selected counter to monitor some event
7449ed24f4bSMarc Zyngier  * @vcpu: The vcpu pointer
7459ed24f4bSMarc Zyngier  * @data: The data guest writes to PMXEVTYPER_EL0
7469ed24f4bSMarc Zyngier  * @select_idx: The number of selected counter
7479ed24f4bSMarc Zyngier  *
7489ed24f4bSMarc Zyngier  * When OS accesses PMXEVTYPER_EL0, that means it wants to set a PMC to count an
7499ed24f4bSMarc Zyngier  * event with given hardware event number. Here we call perf_event API to
7509ed24f4bSMarc Zyngier  * emulate this action and create a kernel perf event for it.
7519ed24f4bSMarc Zyngier  */
7529ed24f4bSMarc Zyngier void kvm_pmu_set_counter_event_type(struct kvm_vcpu *vcpu, u64 data,
7539ed24f4bSMarc Zyngier 				    u64 select_idx)
7549ed24f4bSMarc Zyngier {
755fd65a3b5SMarc Zyngier 	u64 reg, mask;
756fd65a3b5SMarc Zyngier 
7578f6379e2SAlexandru Elisei 	if (!kvm_vcpu_has_pmu(vcpu))
7588f6379e2SAlexandru Elisei 		return;
7598f6379e2SAlexandru Elisei 
760fd65a3b5SMarc Zyngier 	mask  =  ARMV8_PMU_EVTYPE_MASK;
761fd65a3b5SMarc Zyngier 	mask &= ~ARMV8_PMU_EVTYPE_EVENT;
762fd65a3b5SMarc Zyngier 	mask |= kvm_pmu_event_mask(vcpu->kvm);
7639ed24f4bSMarc Zyngier 
7649ed24f4bSMarc Zyngier 	reg = (select_idx == ARMV8_PMU_CYCLE_IDX)
7659ed24f4bSMarc Zyngier 	      ? PMCCFILTR_EL0 : PMEVTYPER0_EL0 + select_idx;
7669ed24f4bSMarc Zyngier 
767fd65a3b5SMarc Zyngier 	__vcpu_sys_reg(vcpu, reg) = data & mask;
7689ed24f4bSMarc Zyngier 
7699ed24f4bSMarc Zyngier 	kvm_pmu_update_pmc_chained(vcpu, select_idx);
7709ed24f4bSMarc Zyngier 	kvm_pmu_create_perf_event(vcpu, select_idx);
7719ed24f4bSMarc Zyngier }
7729ed24f4bSMarc Zyngier 
773e840f42aSMarc Zyngier void kvm_host_pmu_init(struct arm_pmu *pmu)
774e840f42aSMarc Zyngier {
775db858060SAlexandru Elisei 	struct arm_pmu_entry *entry;
776db858060SAlexandru Elisei 
777*fcf37b38SMark Brown 	if (pmu->pmuver == 0 || pmu->pmuver == ID_AA64DFR0_EL1_PMUVer_IMP_DEF)
778db858060SAlexandru Elisei 		return;
779db858060SAlexandru Elisei 
780db858060SAlexandru Elisei 	mutex_lock(&arm_pmus_lock);
781db858060SAlexandru Elisei 
782db858060SAlexandru Elisei 	entry = kmalloc(sizeof(*entry), GFP_KERNEL);
783db858060SAlexandru Elisei 	if (!entry)
784db858060SAlexandru Elisei 		goto out_unlock;
785db858060SAlexandru Elisei 
786db858060SAlexandru Elisei 	entry->arm_pmu = pmu;
787db858060SAlexandru Elisei 	list_add_tail(&entry->entry, &arm_pmus);
788db858060SAlexandru Elisei 
789db858060SAlexandru Elisei 	if (list_is_singular(&arm_pmus))
790e840f42aSMarc Zyngier 		static_branch_enable(&kvm_arm_pmu_available);
791db858060SAlexandru Elisei 
792db858060SAlexandru Elisei out_unlock:
793db858060SAlexandru Elisei 	mutex_unlock(&arm_pmus_lock);
794e840f42aSMarc Zyngier }
795e840f42aSMarc Zyngier 
79646b18782SMarc Zyngier static struct arm_pmu *kvm_pmu_probe_armpmu(void)
797fd65a3b5SMarc Zyngier {
798fd65a3b5SMarc Zyngier 	struct perf_event_attr attr = { };
799fd65a3b5SMarc Zyngier 	struct perf_event *event;
80046b18782SMarc Zyngier 	struct arm_pmu *pmu = NULL;
801fd65a3b5SMarc Zyngier 
802fd65a3b5SMarc Zyngier 	/*
803fd65a3b5SMarc Zyngier 	 * Create a dummy event that only counts user cycles. As we'll never
804fd65a3b5SMarc Zyngier 	 * leave this function with the event being live, it will never
805fd65a3b5SMarc Zyngier 	 * count anything. But it allows us to probe some of the PMU
806fd65a3b5SMarc Zyngier 	 * details. Yes, this is terrible.
807fd65a3b5SMarc Zyngier 	 */
808fd65a3b5SMarc Zyngier 	attr.type = PERF_TYPE_RAW;
809fd65a3b5SMarc Zyngier 	attr.size = sizeof(attr);
810fd65a3b5SMarc Zyngier 	attr.pinned = 1;
811fd65a3b5SMarc Zyngier 	attr.disabled = 0;
812fd65a3b5SMarc Zyngier 	attr.exclude_user = 0;
813fd65a3b5SMarc Zyngier 	attr.exclude_kernel = 1;
814fd65a3b5SMarc Zyngier 	attr.exclude_hv = 1;
815fd65a3b5SMarc Zyngier 	attr.exclude_host = 1;
816fd65a3b5SMarc Zyngier 	attr.config = ARMV8_PMUV3_PERFCTR_CPU_CYCLES;
817fd65a3b5SMarc Zyngier 	attr.sample_period = GENMASK(63, 0);
818fd65a3b5SMarc Zyngier 
819fd65a3b5SMarc Zyngier 	event = perf_event_create_kernel_counter(&attr, -1, current,
820fd65a3b5SMarc Zyngier 						 kvm_pmu_perf_overflow, &attr);
821fd65a3b5SMarc Zyngier 
822fd65a3b5SMarc Zyngier 	if (IS_ERR(event)) {
823fd65a3b5SMarc Zyngier 		pr_err_once("kvm: pmu event creation failed %ld\n",
824fd65a3b5SMarc Zyngier 			    PTR_ERR(event));
82546b18782SMarc Zyngier 		return NULL;
826fd65a3b5SMarc Zyngier 	}
827fd65a3b5SMarc Zyngier 
828fd65a3b5SMarc Zyngier 	if (event->pmu) {
829fd65a3b5SMarc Zyngier 		pmu = to_arm_pmu(event->pmu);
83046b18782SMarc Zyngier 		if (pmu->pmuver == 0 ||
831*fcf37b38SMark Brown 		    pmu->pmuver == ID_AA64DFR0_EL1_PMUVer_IMP_DEF)
83246b18782SMarc Zyngier 			pmu = NULL;
833fd65a3b5SMarc Zyngier 	}
834fd65a3b5SMarc Zyngier 
835fd65a3b5SMarc Zyngier 	perf_event_disable(event);
836fd65a3b5SMarc Zyngier 	perf_event_release_kernel(event);
837fd65a3b5SMarc Zyngier 
83846b18782SMarc Zyngier 	return pmu;
839fd65a3b5SMarc Zyngier }
840fd65a3b5SMarc Zyngier 
84188865becSMarc Zyngier u64 kvm_pmu_get_pmceid(struct kvm_vcpu *vcpu, bool pmceid1)
84288865becSMarc Zyngier {
84388865becSMarc Zyngier 	unsigned long *bmap = vcpu->kvm->arch.pmu_filter;
84488865becSMarc Zyngier 	u64 val, mask = 0;
8459529aaa0SMarc Zyngier 	int base, i, nr_events;
84688865becSMarc Zyngier 
8478f6379e2SAlexandru Elisei 	if (!kvm_vcpu_has_pmu(vcpu))
8488f6379e2SAlexandru Elisei 		return 0;
8498f6379e2SAlexandru Elisei 
85088865becSMarc Zyngier 	if (!pmceid1) {
85188865becSMarc Zyngier 		val = read_sysreg(pmceid0_el0);
85288865becSMarc Zyngier 		base = 0;
85388865becSMarc Zyngier 	} else {
85488865becSMarc Zyngier 		val = read_sysreg(pmceid1_el0);
85546081078SMarc Zyngier 		/*
85646081078SMarc Zyngier 		 * Don't advertise STALL_SLOT, as PMMIR_EL0 is handled
85746081078SMarc Zyngier 		 * as RAZ
85846081078SMarc Zyngier 		 */
859*fcf37b38SMark Brown 		if (vcpu->kvm->arch.arm_pmu->pmuver >= ID_AA64DFR0_EL1_PMUVer_8_4)
86046081078SMarc Zyngier 			val &= ~BIT_ULL(ARMV8_PMUV3_PERFCTR_STALL_SLOT - 32);
86188865becSMarc Zyngier 		base = 32;
86288865becSMarc Zyngier 	}
86388865becSMarc Zyngier 
86488865becSMarc Zyngier 	if (!bmap)
86588865becSMarc Zyngier 		return val;
86688865becSMarc Zyngier 
8679529aaa0SMarc Zyngier 	nr_events = kvm_pmu_event_mask(vcpu->kvm) + 1;
8689529aaa0SMarc Zyngier 
86988865becSMarc Zyngier 	for (i = 0; i < 32; i += 8) {
87088865becSMarc Zyngier 		u64 byte;
87188865becSMarc Zyngier 
87288865becSMarc Zyngier 		byte = bitmap_get_value8(bmap, base + i);
87388865becSMarc Zyngier 		mask |= byte << i;
8749529aaa0SMarc Zyngier 		if (nr_events >= (0x4000 + base + 32)) {
87588865becSMarc Zyngier 			byte = bitmap_get_value8(bmap, 0x4000 + base + i);
87688865becSMarc Zyngier 			mask |= byte << (32 + i);
87788865becSMarc Zyngier 		}
8789529aaa0SMarc Zyngier 	}
87988865becSMarc Zyngier 
88088865becSMarc Zyngier 	return val & mask;
88188865becSMarc Zyngier }
88288865becSMarc Zyngier 
8839ed24f4bSMarc Zyngier int kvm_arm_pmu_v3_enable(struct kvm_vcpu *vcpu)
8849ed24f4bSMarc Zyngier {
8859bbfa4b5SAlexandru Elisei 	if (!kvm_vcpu_has_pmu(vcpu))
8869ed24f4bSMarc Zyngier 		return 0;
8879ed24f4bSMarc Zyngier 
8889bbfa4b5SAlexandru Elisei 	if (!vcpu->arch.pmu.created)
8899bbfa4b5SAlexandru Elisei 		return -EINVAL;
8909bbfa4b5SAlexandru Elisei 
8919ed24f4bSMarc Zyngier 	/*
8929ed24f4bSMarc Zyngier 	 * A valid interrupt configuration for the PMU is either to have a
8939ed24f4bSMarc Zyngier 	 * properly configured interrupt number and using an in-kernel
8949ed24f4bSMarc Zyngier 	 * irqchip, or to not have an in-kernel GIC and not set an IRQ.
8959ed24f4bSMarc Zyngier 	 */
8969ed24f4bSMarc Zyngier 	if (irqchip_in_kernel(vcpu->kvm)) {
8979ed24f4bSMarc Zyngier 		int irq = vcpu->arch.pmu.irq_num;
8989ed24f4bSMarc Zyngier 		/*
8999ed24f4bSMarc Zyngier 		 * If we are using an in-kernel vgic, at this point we know
9009ed24f4bSMarc Zyngier 		 * the vgic will be initialized, so we can check the PMU irq
9019ed24f4bSMarc Zyngier 		 * number against the dimensions of the vgic and make sure
9029ed24f4bSMarc Zyngier 		 * it's valid.
9039ed24f4bSMarc Zyngier 		 */
9049ed24f4bSMarc Zyngier 		if (!irq_is_ppi(irq) && !vgic_valid_spi(vcpu->kvm, irq))
9059ed24f4bSMarc Zyngier 			return -EINVAL;
9069ed24f4bSMarc Zyngier 	} else if (kvm_arm_pmu_irq_initialized(vcpu)) {
9079ed24f4bSMarc Zyngier 		   return -EINVAL;
9089ed24f4bSMarc Zyngier 	}
9099ed24f4bSMarc Zyngier 
910d0c94c49SMarc Zyngier 	/* One-off reload of the PMU on first run */
911d0c94c49SMarc Zyngier 	kvm_make_request(KVM_REQ_RELOAD_PMU, vcpu);
912d0c94c49SMarc Zyngier 
9139ed24f4bSMarc Zyngier 	return 0;
9149ed24f4bSMarc Zyngier }
9159ed24f4bSMarc Zyngier 
9169ed24f4bSMarc Zyngier static int kvm_arm_pmu_v3_init(struct kvm_vcpu *vcpu)
9179ed24f4bSMarc Zyngier {
9189ed24f4bSMarc Zyngier 	if (irqchip_in_kernel(vcpu->kvm)) {
9199ed24f4bSMarc Zyngier 		int ret;
9209ed24f4bSMarc Zyngier 
9219ed24f4bSMarc Zyngier 		/*
9229ed24f4bSMarc Zyngier 		 * If using the PMU with an in-kernel virtual GIC
9239ed24f4bSMarc Zyngier 		 * implementation, we require the GIC to be already
9249ed24f4bSMarc Zyngier 		 * initialized when initializing the PMU.
9259ed24f4bSMarc Zyngier 		 */
9269ed24f4bSMarc Zyngier 		if (!vgic_initialized(vcpu->kvm))
9279ed24f4bSMarc Zyngier 			return -ENODEV;
9289ed24f4bSMarc Zyngier 
9299ed24f4bSMarc Zyngier 		if (!kvm_arm_pmu_irq_initialized(vcpu))
9309ed24f4bSMarc Zyngier 			return -ENXIO;
9319ed24f4bSMarc Zyngier 
9329ed24f4bSMarc Zyngier 		ret = kvm_vgic_set_owner(vcpu, vcpu->arch.pmu.irq_num,
9339ed24f4bSMarc Zyngier 					 &vcpu->arch.pmu);
9349ed24f4bSMarc Zyngier 		if (ret)
9359ed24f4bSMarc Zyngier 			return ret;
9369ed24f4bSMarc Zyngier 	}
9379ed24f4bSMarc Zyngier 
93895e92e45SJulien Thierry 	init_irq_work(&vcpu->arch.pmu.overflow_work,
93995e92e45SJulien Thierry 		      kvm_pmu_perf_overflow_notify_vcpu);
94095e92e45SJulien Thierry 
9419ed24f4bSMarc Zyngier 	vcpu->arch.pmu.created = true;
9429ed24f4bSMarc Zyngier 	return 0;
9439ed24f4bSMarc Zyngier }
9449ed24f4bSMarc Zyngier 
9459ed24f4bSMarc Zyngier /*
9469ed24f4bSMarc Zyngier  * For one VM the interrupt type must be same for each vcpu.
9479ed24f4bSMarc Zyngier  * As a PPI, the interrupt number is the same for all vcpus,
9489ed24f4bSMarc Zyngier  * while as an SPI it must be a separate number per vcpu.
9499ed24f4bSMarc Zyngier  */
9509ed24f4bSMarc Zyngier static bool pmu_irq_is_valid(struct kvm *kvm, int irq)
9519ed24f4bSMarc Zyngier {
95246808a4cSMarc Zyngier 	unsigned long i;
9539ed24f4bSMarc Zyngier 	struct kvm_vcpu *vcpu;
9549ed24f4bSMarc Zyngier 
9559ed24f4bSMarc Zyngier 	kvm_for_each_vcpu(i, vcpu, kvm) {
9569ed24f4bSMarc Zyngier 		if (!kvm_arm_pmu_irq_initialized(vcpu))
9579ed24f4bSMarc Zyngier 			continue;
9589ed24f4bSMarc Zyngier 
9599ed24f4bSMarc Zyngier 		if (irq_is_ppi(irq)) {
9609ed24f4bSMarc Zyngier 			if (vcpu->arch.pmu.irq_num != irq)
9619ed24f4bSMarc Zyngier 				return false;
9629ed24f4bSMarc Zyngier 		} else {
9639ed24f4bSMarc Zyngier 			if (vcpu->arch.pmu.irq_num == irq)
9649ed24f4bSMarc Zyngier 				return false;
9659ed24f4bSMarc Zyngier 		}
9669ed24f4bSMarc Zyngier 	}
9679ed24f4bSMarc Zyngier 
9689ed24f4bSMarc Zyngier 	return true;
9699ed24f4bSMarc Zyngier }
9709ed24f4bSMarc Zyngier 
9716ee7fca2SAlexandru Elisei static int kvm_arm_pmu_v3_set_pmu(struct kvm_vcpu *vcpu, int pmu_id)
9726ee7fca2SAlexandru Elisei {
9736ee7fca2SAlexandru Elisei 	struct kvm *kvm = vcpu->kvm;
9746ee7fca2SAlexandru Elisei 	struct arm_pmu_entry *entry;
9756ee7fca2SAlexandru Elisei 	struct arm_pmu *arm_pmu;
9766ee7fca2SAlexandru Elisei 	int ret = -ENXIO;
9776ee7fca2SAlexandru Elisei 
9786ee7fca2SAlexandru Elisei 	mutex_lock(&kvm->lock);
9796ee7fca2SAlexandru Elisei 	mutex_lock(&arm_pmus_lock);
9806ee7fca2SAlexandru Elisei 
9816ee7fca2SAlexandru Elisei 	list_for_each_entry(entry, &arm_pmus, entry) {
9826ee7fca2SAlexandru Elisei 		arm_pmu = entry->arm_pmu;
9836ee7fca2SAlexandru Elisei 		if (arm_pmu->pmu.type == pmu_id) {
98406394531SMarc Zyngier 			if (test_bit(KVM_ARCH_FLAG_HAS_RAN_ONCE, &kvm->arch.flags) ||
9856ee7fca2SAlexandru Elisei 			    (kvm->arch.pmu_filter && kvm->arch.arm_pmu != arm_pmu)) {
9866ee7fca2SAlexandru Elisei 				ret = -EBUSY;
9876ee7fca2SAlexandru Elisei 				break;
9886ee7fca2SAlexandru Elisei 			}
9896ee7fca2SAlexandru Elisei 
9906ee7fca2SAlexandru Elisei 			kvm->arch.arm_pmu = arm_pmu;
991583cda1bSAlexandru Elisei 			cpumask_copy(kvm->arch.supported_cpus, &arm_pmu->supported_cpus);
9926ee7fca2SAlexandru Elisei 			ret = 0;
9936ee7fca2SAlexandru Elisei 			break;
9946ee7fca2SAlexandru Elisei 		}
9956ee7fca2SAlexandru Elisei 	}
9966ee7fca2SAlexandru Elisei 
9976ee7fca2SAlexandru Elisei 	mutex_unlock(&arm_pmus_lock);
9986ee7fca2SAlexandru Elisei 	mutex_unlock(&kvm->lock);
9996ee7fca2SAlexandru Elisei 	return ret;
10006ee7fca2SAlexandru Elisei }
10016ee7fca2SAlexandru Elisei 
10029ed24f4bSMarc Zyngier int kvm_arm_pmu_v3_set_attr(struct kvm_vcpu *vcpu, struct kvm_device_attr *attr)
10039ed24f4bSMarc Zyngier {
10045177fe91SMarc Zyngier 	struct kvm *kvm = vcpu->kvm;
10055177fe91SMarc Zyngier 
100677da4303SMarc Zyngier 	if (!kvm_vcpu_has_pmu(vcpu))
100742223fb1SMarc Zyngier 		return -ENODEV;
100842223fb1SMarc Zyngier 
100942223fb1SMarc Zyngier 	if (vcpu->arch.pmu.created)
101042223fb1SMarc Zyngier 		return -EBUSY;
101142223fb1SMarc Zyngier 
101246b18782SMarc Zyngier 	mutex_lock(&kvm->lock);
101346b18782SMarc Zyngier 	if (!kvm->arch.arm_pmu) {
101446b18782SMarc Zyngier 		/* No PMU set, get the default one */
101546b18782SMarc Zyngier 		kvm->arch.arm_pmu = kvm_pmu_probe_armpmu();
101646b18782SMarc Zyngier 		if (!kvm->arch.arm_pmu) {
101746b18782SMarc Zyngier 			mutex_unlock(&kvm->lock);
1018fd65a3b5SMarc Zyngier 			return -ENODEV;
101946b18782SMarc Zyngier 		}
102046b18782SMarc Zyngier 	}
102146b18782SMarc Zyngier 	mutex_unlock(&kvm->lock);
1022fd65a3b5SMarc Zyngier 
10239ed24f4bSMarc Zyngier 	switch (attr->attr) {
10249ed24f4bSMarc Zyngier 	case KVM_ARM_VCPU_PMU_V3_IRQ: {
10259ed24f4bSMarc Zyngier 		int __user *uaddr = (int __user *)(long)attr->addr;
10269ed24f4bSMarc Zyngier 		int irq;
10279ed24f4bSMarc Zyngier 
10285177fe91SMarc Zyngier 		if (!irqchip_in_kernel(kvm))
10299ed24f4bSMarc Zyngier 			return -EINVAL;
10309ed24f4bSMarc Zyngier 
10319ed24f4bSMarc Zyngier 		if (get_user(irq, uaddr))
10329ed24f4bSMarc Zyngier 			return -EFAULT;
10339ed24f4bSMarc Zyngier 
10349ed24f4bSMarc Zyngier 		/* The PMU overflow interrupt can be a PPI or a valid SPI. */
10359ed24f4bSMarc Zyngier 		if (!(irq_is_ppi(irq) || irq_is_spi(irq)))
10369ed24f4bSMarc Zyngier 			return -EINVAL;
10379ed24f4bSMarc Zyngier 
10385177fe91SMarc Zyngier 		if (!pmu_irq_is_valid(kvm, irq))
10399ed24f4bSMarc Zyngier 			return -EINVAL;
10409ed24f4bSMarc Zyngier 
10419ed24f4bSMarc Zyngier 		if (kvm_arm_pmu_irq_initialized(vcpu))
10429ed24f4bSMarc Zyngier 			return -EBUSY;
10439ed24f4bSMarc Zyngier 
10449ed24f4bSMarc Zyngier 		kvm_debug("Set kvm ARM PMU irq: %d\n", irq);
10459ed24f4bSMarc Zyngier 		vcpu->arch.pmu.irq_num = irq;
10469ed24f4bSMarc Zyngier 		return 0;
10479ed24f4bSMarc Zyngier 	}
1048d7eec236SMarc Zyngier 	case KVM_ARM_VCPU_PMU_V3_FILTER: {
1049d7eec236SMarc Zyngier 		struct kvm_pmu_event_filter __user *uaddr;
1050d7eec236SMarc Zyngier 		struct kvm_pmu_event_filter filter;
1051d7eec236SMarc Zyngier 		int nr_events;
1052d7eec236SMarc Zyngier 
10535177fe91SMarc Zyngier 		nr_events = kvm_pmu_event_mask(kvm) + 1;
1054d7eec236SMarc Zyngier 
1055d7eec236SMarc Zyngier 		uaddr = (struct kvm_pmu_event_filter __user *)(long)attr->addr;
1056d7eec236SMarc Zyngier 
1057d7eec236SMarc Zyngier 		if (copy_from_user(&filter, uaddr, sizeof(filter)))
1058d7eec236SMarc Zyngier 			return -EFAULT;
1059d7eec236SMarc Zyngier 
1060d7eec236SMarc Zyngier 		if (((u32)filter.base_event + filter.nevents) > nr_events ||
1061d7eec236SMarc Zyngier 		    (filter.action != KVM_PMU_EVENT_ALLOW &&
1062d7eec236SMarc Zyngier 		     filter.action != KVM_PMU_EVENT_DENY))
1063d7eec236SMarc Zyngier 			return -EINVAL;
1064d7eec236SMarc Zyngier 
10655177fe91SMarc Zyngier 		mutex_lock(&kvm->lock);
1066d7eec236SMarc Zyngier 
106706394531SMarc Zyngier 		if (test_bit(KVM_ARCH_FLAG_HAS_RAN_ONCE, &kvm->arch.flags)) {
10685177fe91SMarc Zyngier 			mutex_unlock(&kvm->lock);
10695177fe91SMarc Zyngier 			return -EBUSY;
10705177fe91SMarc Zyngier 		}
10715177fe91SMarc Zyngier 
10725177fe91SMarc Zyngier 		if (!kvm->arch.pmu_filter) {
10735177fe91SMarc Zyngier 			kvm->arch.pmu_filter = bitmap_alloc(nr_events, GFP_KERNEL_ACCOUNT);
10745177fe91SMarc Zyngier 			if (!kvm->arch.pmu_filter) {
10755177fe91SMarc Zyngier 				mutex_unlock(&kvm->lock);
1076d7eec236SMarc Zyngier 				return -ENOMEM;
1077d7eec236SMarc Zyngier 			}
1078d7eec236SMarc Zyngier 
1079d7eec236SMarc Zyngier 			/*
1080d7eec236SMarc Zyngier 			 * The default depends on the first applied filter.
1081d7eec236SMarc Zyngier 			 * If it allows events, the default is to deny.
1082d7eec236SMarc Zyngier 			 * Conversely, if the first filter denies a set of
1083d7eec236SMarc Zyngier 			 * events, the default is to allow.
1084d7eec236SMarc Zyngier 			 */
1085d7eec236SMarc Zyngier 			if (filter.action == KVM_PMU_EVENT_ALLOW)
10865177fe91SMarc Zyngier 				bitmap_zero(kvm->arch.pmu_filter, nr_events);
1087d7eec236SMarc Zyngier 			else
10885177fe91SMarc Zyngier 				bitmap_fill(kvm->arch.pmu_filter, nr_events);
1089d7eec236SMarc Zyngier 		}
1090d7eec236SMarc Zyngier 
1091d7eec236SMarc Zyngier 		if (filter.action == KVM_PMU_EVENT_ALLOW)
10925177fe91SMarc Zyngier 			bitmap_set(kvm->arch.pmu_filter, filter.base_event, filter.nevents);
1093d7eec236SMarc Zyngier 		else
10945177fe91SMarc Zyngier 			bitmap_clear(kvm->arch.pmu_filter, filter.base_event, filter.nevents);
1095d7eec236SMarc Zyngier 
10965177fe91SMarc Zyngier 		mutex_unlock(&kvm->lock);
1097d7eec236SMarc Zyngier 
1098d7eec236SMarc Zyngier 		return 0;
1099d7eec236SMarc Zyngier 	}
11006ee7fca2SAlexandru Elisei 	case KVM_ARM_VCPU_PMU_V3_SET_PMU: {
11016ee7fca2SAlexandru Elisei 		int __user *uaddr = (int __user *)(long)attr->addr;
11026ee7fca2SAlexandru Elisei 		int pmu_id;
11036ee7fca2SAlexandru Elisei 
11046ee7fca2SAlexandru Elisei 		if (get_user(pmu_id, uaddr))
11056ee7fca2SAlexandru Elisei 			return -EFAULT;
11066ee7fca2SAlexandru Elisei 
11076ee7fca2SAlexandru Elisei 		return kvm_arm_pmu_v3_set_pmu(vcpu, pmu_id);
11086ee7fca2SAlexandru Elisei 	}
11099ed24f4bSMarc Zyngier 	case KVM_ARM_VCPU_PMU_V3_INIT:
11109ed24f4bSMarc Zyngier 		return kvm_arm_pmu_v3_init(vcpu);
11119ed24f4bSMarc Zyngier 	}
11129ed24f4bSMarc Zyngier 
11139ed24f4bSMarc Zyngier 	return -ENXIO;
11149ed24f4bSMarc Zyngier }
11159ed24f4bSMarc Zyngier 
11169ed24f4bSMarc Zyngier int kvm_arm_pmu_v3_get_attr(struct kvm_vcpu *vcpu, struct kvm_device_attr *attr)
11179ed24f4bSMarc Zyngier {
11189ed24f4bSMarc Zyngier 	switch (attr->attr) {
11199ed24f4bSMarc Zyngier 	case KVM_ARM_VCPU_PMU_V3_IRQ: {
11209ed24f4bSMarc Zyngier 		int __user *uaddr = (int __user *)(long)attr->addr;
11219ed24f4bSMarc Zyngier 		int irq;
11229ed24f4bSMarc Zyngier 
11239ed24f4bSMarc Zyngier 		if (!irqchip_in_kernel(vcpu->kvm))
11249ed24f4bSMarc Zyngier 			return -EINVAL;
11259ed24f4bSMarc Zyngier 
112614bda7a9SMarc Zyngier 		if (!kvm_vcpu_has_pmu(vcpu))
11279ed24f4bSMarc Zyngier 			return -ENODEV;
11289ed24f4bSMarc Zyngier 
11299ed24f4bSMarc Zyngier 		if (!kvm_arm_pmu_irq_initialized(vcpu))
11309ed24f4bSMarc Zyngier 			return -ENXIO;
11319ed24f4bSMarc Zyngier 
11329ed24f4bSMarc Zyngier 		irq = vcpu->arch.pmu.irq_num;
11339ed24f4bSMarc Zyngier 		return put_user(irq, uaddr);
11349ed24f4bSMarc Zyngier 	}
11359ed24f4bSMarc Zyngier 	}
11369ed24f4bSMarc Zyngier 
11379ed24f4bSMarc Zyngier 	return -ENXIO;
11389ed24f4bSMarc Zyngier }
11399ed24f4bSMarc Zyngier 
11409ed24f4bSMarc Zyngier int kvm_arm_pmu_v3_has_attr(struct kvm_vcpu *vcpu, struct kvm_device_attr *attr)
11419ed24f4bSMarc Zyngier {
11429ed24f4bSMarc Zyngier 	switch (attr->attr) {
11439ed24f4bSMarc Zyngier 	case KVM_ARM_VCPU_PMU_V3_IRQ:
11449ed24f4bSMarc Zyngier 	case KVM_ARM_VCPU_PMU_V3_INIT:
1145d7eec236SMarc Zyngier 	case KVM_ARM_VCPU_PMU_V3_FILTER:
11466ee7fca2SAlexandru Elisei 	case KVM_ARM_VCPU_PMU_V3_SET_PMU:
114777da4303SMarc Zyngier 		if (kvm_vcpu_has_pmu(vcpu))
11489ed24f4bSMarc Zyngier 			return 0;
11499ed24f4bSMarc Zyngier 	}
11509ed24f4bSMarc Zyngier 
11519ed24f4bSMarc Zyngier 	return -ENXIO;
11529ed24f4bSMarc Zyngier }
1153