xref: /openbmc/linux/arch/arm64/kvm/pmu-emul.c (revision 00e228b3)
19ed24f4bSMarc Zyngier // SPDX-License-Identifier: GPL-2.0-only
29ed24f4bSMarc Zyngier /*
39ed24f4bSMarc Zyngier  * Copyright (C) 2015 Linaro Ltd.
49ed24f4bSMarc Zyngier  * Author: Shannon Zhao <shannon.zhao@linaro.org>
59ed24f4bSMarc Zyngier  */
69ed24f4bSMarc Zyngier 
79ed24f4bSMarc Zyngier #include <linux/cpu.h>
89ed24f4bSMarc Zyngier #include <linux/kvm.h>
99ed24f4bSMarc Zyngier #include <linux/kvm_host.h>
109ed24f4bSMarc Zyngier #include <linux/perf_event.h>
119ed24f4bSMarc Zyngier #include <linux/perf/arm_pmu.h>
129ed24f4bSMarc Zyngier #include <linux/uaccess.h>
139ed24f4bSMarc Zyngier #include <asm/kvm_emulate.h>
149ed24f4bSMarc Zyngier #include <kvm/arm_pmu.h>
159ed24f4bSMarc Zyngier #include <kvm/arm_vgic.h>
169ed24f4bSMarc Zyngier 
179ed24f4bSMarc Zyngier static void kvm_pmu_create_perf_event(struct kvm_vcpu *vcpu, u64 select_idx);
189ed24f4bSMarc Zyngier static void kvm_pmu_update_pmc_chained(struct kvm_vcpu *vcpu, u64 select_idx);
199ed24f4bSMarc Zyngier static void kvm_pmu_stop_counter(struct kvm_vcpu *vcpu, struct kvm_pmc *pmc);
209ed24f4bSMarc Zyngier 
219ed24f4bSMarc Zyngier #define PERF_ATTR_CFG1_KVM_PMU_CHAINED 0x1
229ed24f4bSMarc Zyngier 
23fd65a3b5SMarc Zyngier static u32 kvm_pmu_event_mask(struct kvm *kvm)
24fd65a3b5SMarc Zyngier {
25fd65a3b5SMarc Zyngier 	switch (kvm->arch.pmuver) {
268e26d11fSMarc Zyngier 	case ID_AA64DFR0_PMUVER_8_0:
27fd65a3b5SMarc Zyngier 		return GENMASK(9, 0);
288e26d11fSMarc Zyngier 	case ID_AA64DFR0_PMUVER_8_1:
298e26d11fSMarc Zyngier 	case ID_AA64DFR0_PMUVER_8_4:
308e26d11fSMarc Zyngier 	case ID_AA64DFR0_PMUVER_8_5:
31*00e228b3SMarc Zyngier 	case ID_AA64DFR0_PMUVER_8_7:
32fd65a3b5SMarc Zyngier 		return GENMASK(15, 0);
33fd65a3b5SMarc Zyngier 	default:		/* Shouldn't be here, just for sanity */
34fd65a3b5SMarc Zyngier 		WARN_ONCE(1, "Unknown PMU version %d\n", kvm->arch.pmuver);
35fd65a3b5SMarc Zyngier 		return 0;
36fd65a3b5SMarc Zyngier 	}
37fd65a3b5SMarc Zyngier }
38fd65a3b5SMarc Zyngier 
399ed24f4bSMarc Zyngier /**
409ed24f4bSMarc Zyngier  * kvm_pmu_idx_is_64bit - determine if select_idx is a 64bit counter
419ed24f4bSMarc Zyngier  * @vcpu: The vcpu pointer
429ed24f4bSMarc Zyngier  * @select_idx: The counter index
439ed24f4bSMarc Zyngier  */
449ed24f4bSMarc Zyngier static bool kvm_pmu_idx_is_64bit(struct kvm_vcpu *vcpu, u64 select_idx)
459ed24f4bSMarc Zyngier {
469ed24f4bSMarc Zyngier 	return (select_idx == ARMV8_PMU_CYCLE_IDX &&
479ed24f4bSMarc Zyngier 		__vcpu_sys_reg(vcpu, PMCR_EL0) & ARMV8_PMU_PMCR_LC);
489ed24f4bSMarc Zyngier }
499ed24f4bSMarc Zyngier 
509ed24f4bSMarc Zyngier static struct kvm_vcpu *kvm_pmc_to_vcpu(struct kvm_pmc *pmc)
519ed24f4bSMarc Zyngier {
529ed24f4bSMarc Zyngier 	struct kvm_pmu *pmu;
539ed24f4bSMarc Zyngier 	struct kvm_vcpu_arch *vcpu_arch;
549ed24f4bSMarc Zyngier 
559ed24f4bSMarc Zyngier 	pmc -= pmc->idx;
569ed24f4bSMarc Zyngier 	pmu = container_of(pmc, struct kvm_pmu, pmc[0]);
579ed24f4bSMarc Zyngier 	vcpu_arch = container_of(pmu, struct kvm_vcpu_arch, pmu);
589ed24f4bSMarc Zyngier 	return container_of(vcpu_arch, struct kvm_vcpu, arch);
599ed24f4bSMarc Zyngier }
609ed24f4bSMarc Zyngier 
619ed24f4bSMarc Zyngier /**
629ed24f4bSMarc Zyngier  * kvm_pmu_pmc_is_chained - determine if the pmc is chained
639ed24f4bSMarc Zyngier  * @pmc: The PMU counter pointer
649ed24f4bSMarc Zyngier  */
659ed24f4bSMarc Zyngier static bool kvm_pmu_pmc_is_chained(struct kvm_pmc *pmc)
669ed24f4bSMarc Zyngier {
679ed24f4bSMarc Zyngier 	struct kvm_vcpu *vcpu = kvm_pmc_to_vcpu(pmc);
689ed24f4bSMarc Zyngier 
699ed24f4bSMarc Zyngier 	return test_bit(pmc->idx >> 1, vcpu->arch.pmu.chained);
709ed24f4bSMarc Zyngier }
719ed24f4bSMarc Zyngier 
729ed24f4bSMarc Zyngier /**
739ed24f4bSMarc Zyngier  * kvm_pmu_idx_is_high_counter - determine if select_idx is a high/low counter
749ed24f4bSMarc Zyngier  * @select_idx: The counter index
759ed24f4bSMarc Zyngier  */
769ed24f4bSMarc Zyngier static bool kvm_pmu_idx_is_high_counter(u64 select_idx)
779ed24f4bSMarc Zyngier {
789ed24f4bSMarc Zyngier 	return select_idx & 0x1;
799ed24f4bSMarc Zyngier }
809ed24f4bSMarc Zyngier 
819ed24f4bSMarc Zyngier /**
829ed24f4bSMarc Zyngier  * kvm_pmu_get_canonical_pmc - obtain the canonical pmc
839ed24f4bSMarc Zyngier  * @pmc: The PMU counter pointer
849ed24f4bSMarc Zyngier  *
859ed24f4bSMarc Zyngier  * When a pair of PMCs are chained together we use the low counter (canonical)
869ed24f4bSMarc Zyngier  * to hold the underlying perf event.
879ed24f4bSMarc Zyngier  */
889ed24f4bSMarc Zyngier static struct kvm_pmc *kvm_pmu_get_canonical_pmc(struct kvm_pmc *pmc)
899ed24f4bSMarc Zyngier {
909ed24f4bSMarc Zyngier 	if (kvm_pmu_pmc_is_chained(pmc) &&
919ed24f4bSMarc Zyngier 	    kvm_pmu_idx_is_high_counter(pmc->idx))
929ed24f4bSMarc Zyngier 		return pmc - 1;
939ed24f4bSMarc Zyngier 
949ed24f4bSMarc Zyngier 	return pmc;
959ed24f4bSMarc Zyngier }
969ed24f4bSMarc Zyngier static struct kvm_pmc *kvm_pmu_get_alternate_pmc(struct kvm_pmc *pmc)
979ed24f4bSMarc Zyngier {
989ed24f4bSMarc Zyngier 	if (kvm_pmu_idx_is_high_counter(pmc->idx))
999ed24f4bSMarc Zyngier 		return pmc - 1;
1009ed24f4bSMarc Zyngier 	else
1019ed24f4bSMarc Zyngier 		return pmc + 1;
1029ed24f4bSMarc Zyngier }
1039ed24f4bSMarc Zyngier 
1049ed24f4bSMarc Zyngier /**
1059ed24f4bSMarc Zyngier  * kvm_pmu_idx_has_chain_evtype - determine if the event type is chain
1069ed24f4bSMarc Zyngier  * @vcpu: The vcpu pointer
1079ed24f4bSMarc Zyngier  * @select_idx: The counter index
1089ed24f4bSMarc Zyngier  */
1099ed24f4bSMarc Zyngier static bool kvm_pmu_idx_has_chain_evtype(struct kvm_vcpu *vcpu, u64 select_idx)
1109ed24f4bSMarc Zyngier {
1119ed24f4bSMarc Zyngier 	u64 eventsel, reg;
1129ed24f4bSMarc Zyngier 
1139ed24f4bSMarc Zyngier 	select_idx |= 0x1;
1149ed24f4bSMarc Zyngier 
1159ed24f4bSMarc Zyngier 	if (select_idx == ARMV8_PMU_CYCLE_IDX)
1169ed24f4bSMarc Zyngier 		return false;
1179ed24f4bSMarc Zyngier 
1189ed24f4bSMarc Zyngier 	reg = PMEVTYPER0_EL0 + select_idx;
119fd65a3b5SMarc Zyngier 	eventsel = __vcpu_sys_reg(vcpu, reg) & kvm_pmu_event_mask(vcpu->kvm);
1209ed24f4bSMarc Zyngier 
1219ed24f4bSMarc Zyngier 	return eventsel == ARMV8_PMUV3_PERFCTR_CHAIN;
1229ed24f4bSMarc Zyngier }
1239ed24f4bSMarc Zyngier 
1249ed24f4bSMarc Zyngier /**
1259ed24f4bSMarc Zyngier  * kvm_pmu_get_pair_counter_value - get PMU counter value
1269ed24f4bSMarc Zyngier  * @vcpu: The vcpu pointer
1279ed24f4bSMarc Zyngier  * @pmc: The PMU counter pointer
1289ed24f4bSMarc Zyngier  */
1299ed24f4bSMarc Zyngier static u64 kvm_pmu_get_pair_counter_value(struct kvm_vcpu *vcpu,
1309ed24f4bSMarc Zyngier 					  struct kvm_pmc *pmc)
1319ed24f4bSMarc Zyngier {
1329ed24f4bSMarc Zyngier 	u64 counter, counter_high, reg, enabled, running;
1339ed24f4bSMarc Zyngier 
1349ed24f4bSMarc Zyngier 	if (kvm_pmu_pmc_is_chained(pmc)) {
1359ed24f4bSMarc Zyngier 		pmc = kvm_pmu_get_canonical_pmc(pmc);
1369ed24f4bSMarc Zyngier 		reg = PMEVCNTR0_EL0 + pmc->idx;
1379ed24f4bSMarc Zyngier 
1389ed24f4bSMarc Zyngier 		counter = __vcpu_sys_reg(vcpu, reg);
1399ed24f4bSMarc Zyngier 		counter_high = __vcpu_sys_reg(vcpu, reg + 1);
1409ed24f4bSMarc Zyngier 
1419ed24f4bSMarc Zyngier 		counter = lower_32_bits(counter) | (counter_high << 32);
1429ed24f4bSMarc Zyngier 	} else {
1439ed24f4bSMarc Zyngier 		reg = (pmc->idx == ARMV8_PMU_CYCLE_IDX)
1449ed24f4bSMarc Zyngier 		      ? PMCCNTR_EL0 : PMEVCNTR0_EL0 + pmc->idx;
1459ed24f4bSMarc Zyngier 		counter = __vcpu_sys_reg(vcpu, reg);
1469ed24f4bSMarc Zyngier 	}
1479ed24f4bSMarc Zyngier 
1489ed24f4bSMarc Zyngier 	/*
1499ed24f4bSMarc Zyngier 	 * The real counter value is equal to the value of counter register plus
1509ed24f4bSMarc Zyngier 	 * the value perf event counts.
1519ed24f4bSMarc Zyngier 	 */
1529ed24f4bSMarc Zyngier 	if (pmc->perf_event)
1539ed24f4bSMarc Zyngier 		counter += perf_event_read_value(pmc->perf_event, &enabled,
1549ed24f4bSMarc Zyngier 						 &running);
1559ed24f4bSMarc Zyngier 
1569ed24f4bSMarc Zyngier 	return counter;
1579ed24f4bSMarc Zyngier }
1589ed24f4bSMarc Zyngier 
1599ed24f4bSMarc Zyngier /**
1609ed24f4bSMarc Zyngier  * kvm_pmu_get_counter_value - get PMU counter value
1619ed24f4bSMarc Zyngier  * @vcpu: The vcpu pointer
1629ed24f4bSMarc Zyngier  * @select_idx: The counter index
1639ed24f4bSMarc Zyngier  */
1649ed24f4bSMarc Zyngier u64 kvm_pmu_get_counter_value(struct kvm_vcpu *vcpu, u64 select_idx)
1659ed24f4bSMarc Zyngier {
1669ed24f4bSMarc Zyngier 	u64 counter;
1679ed24f4bSMarc Zyngier 	struct kvm_pmu *pmu = &vcpu->arch.pmu;
1689ed24f4bSMarc Zyngier 	struct kvm_pmc *pmc = &pmu->pmc[select_idx];
1699ed24f4bSMarc Zyngier 
1709ed24f4bSMarc Zyngier 	counter = kvm_pmu_get_pair_counter_value(vcpu, pmc);
1719ed24f4bSMarc Zyngier 
1729ed24f4bSMarc Zyngier 	if (kvm_pmu_pmc_is_chained(pmc) &&
1739ed24f4bSMarc Zyngier 	    kvm_pmu_idx_is_high_counter(select_idx))
1749ed24f4bSMarc Zyngier 		counter = upper_32_bits(counter);
1759ed24f4bSMarc Zyngier 	else if (select_idx != ARMV8_PMU_CYCLE_IDX)
1769ed24f4bSMarc Zyngier 		counter = lower_32_bits(counter);
1779ed24f4bSMarc Zyngier 
1789ed24f4bSMarc Zyngier 	return counter;
1799ed24f4bSMarc Zyngier }
1809ed24f4bSMarc Zyngier 
1819ed24f4bSMarc Zyngier /**
1829ed24f4bSMarc Zyngier  * kvm_pmu_set_counter_value - set PMU counter value
1839ed24f4bSMarc Zyngier  * @vcpu: The vcpu pointer
1849ed24f4bSMarc Zyngier  * @select_idx: The counter index
1859ed24f4bSMarc Zyngier  * @val: The counter value
1869ed24f4bSMarc Zyngier  */
1879ed24f4bSMarc Zyngier void kvm_pmu_set_counter_value(struct kvm_vcpu *vcpu, u64 select_idx, u64 val)
1889ed24f4bSMarc Zyngier {
1899ed24f4bSMarc Zyngier 	u64 reg;
1909ed24f4bSMarc Zyngier 
1919ed24f4bSMarc Zyngier 	reg = (select_idx == ARMV8_PMU_CYCLE_IDX)
1929ed24f4bSMarc Zyngier 	      ? PMCCNTR_EL0 : PMEVCNTR0_EL0 + select_idx;
1939ed24f4bSMarc Zyngier 	__vcpu_sys_reg(vcpu, reg) += (s64)val - kvm_pmu_get_counter_value(vcpu, select_idx);
1949ed24f4bSMarc Zyngier 
1959ed24f4bSMarc Zyngier 	/* Recreate the perf event to reflect the updated sample_period */
1969ed24f4bSMarc Zyngier 	kvm_pmu_create_perf_event(vcpu, select_idx);
1979ed24f4bSMarc Zyngier }
1989ed24f4bSMarc Zyngier 
1999ed24f4bSMarc Zyngier /**
2009ed24f4bSMarc Zyngier  * kvm_pmu_release_perf_event - remove the perf event
2019ed24f4bSMarc Zyngier  * @pmc: The PMU counter pointer
2029ed24f4bSMarc Zyngier  */
2039ed24f4bSMarc Zyngier static void kvm_pmu_release_perf_event(struct kvm_pmc *pmc)
2049ed24f4bSMarc Zyngier {
2059ed24f4bSMarc Zyngier 	pmc = kvm_pmu_get_canonical_pmc(pmc);
2069ed24f4bSMarc Zyngier 	if (pmc->perf_event) {
2079ed24f4bSMarc Zyngier 		perf_event_disable(pmc->perf_event);
2089ed24f4bSMarc Zyngier 		perf_event_release_kernel(pmc->perf_event);
2099ed24f4bSMarc Zyngier 		pmc->perf_event = NULL;
2109ed24f4bSMarc Zyngier 	}
2119ed24f4bSMarc Zyngier }
2129ed24f4bSMarc Zyngier 
2139ed24f4bSMarc Zyngier /**
2149ed24f4bSMarc Zyngier  * kvm_pmu_stop_counter - stop PMU counter
2159ed24f4bSMarc Zyngier  * @pmc: The PMU counter pointer
2169ed24f4bSMarc Zyngier  *
2179ed24f4bSMarc Zyngier  * If this counter has been configured to monitor some event, release it here.
2189ed24f4bSMarc Zyngier  */
2199ed24f4bSMarc Zyngier static void kvm_pmu_stop_counter(struct kvm_vcpu *vcpu, struct kvm_pmc *pmc)
2209ed24f4bSMarc Zyngier {
2219ed24f4bSMarc Zyngier 	u64 counter, reg, val;
2229ed24f4bSMarc Zyngier 
2239ed24f4bSMarc Zyngier 	pmc = kvm_pmu_get_canonical_pmc(pmc);
2249ed24f4bSMarc Zyngier 	if (!pmc->perf_event)
2259ed24f4bSMarc Zyngier 		return;
2269ed24f4bSMarc Zyngier 
2279ed24f4bSMarc Zyngier 	counter = kvm_pmu_get_pair_counter_value(vcpu, pmc);
2289ed24f4bSMarc Zyngier 
2299ed24f4bSMarc Zyngier 	if (pmc->idx == ARMV8_PMU_CYCLE_IDX) {
2309ed24f4bSMarc Zyngier 		reg = PMCCNTR_EL0;
2319ed24f4bSMarc Zyngier 		val = counter;
2329ed24f4bSMarc Zyngier 	} else {
2339ed24f4bSMarc Zyngier 		reg = PMEVCNTR0_EL0 + pmc->idx;
2349ed24f4bSMarc Zyngier 		val = lower_32_bits(counter);
2359ed24f4bSMarc Zyngier 	}
2369ed24f4bSMarc Zyngier 
2379ed24f4bSMarc Zyngier 	__vcpu_sys_reg(vcpu, reg) = val;
2389ed24f4bSMarc Zyngier 
2399ed24f4bSMarc Zyngier 	if (kvm_pmu_pmc_is_chained(pmc))
2409ed24f4bSMarc Zyngier 		__vcpu_sys_reg(vcpu, reg + 1) = upper_32_bits(counter);
2419ed24f4bSMarc Zyngier 
2429ed24f4bSMarc Zyngier 	kvm_pmu_release_perf_event(pmc);
2439ed24f4bSMarc Zyngier }
2449ed24f4bSMarc Zyngier 
2459ed24f4bSMarc Zyngier /**
2469ed24f4bSMarc Zyngier  * kvm_pmu_vcpu_init - assign pmu counter idx for cpu
2479ed24f4bSMarc Zyngier  * @vcpu: The vcpu pointer
2489ed24f4bSMarc Zyngier  *
2499ed24f4bSMarc Zyngier  */
2509ed24f4bSMarc Zyngier void kvm_pmu_vcpu_init(struct kvm_vcpu *vcpu)
2519ed24f4bSMarc Zyngier {
2529ed24f4bSMarc Zyngier 	int i;
2539ed24f4bSMarc Zyngier 	struct kvm_pmu *pmu = &vcpu->arch.pmu;
2549ed24f4bSMarc Zyngier 
2559ed24f4bSMarc Zyngier 	for (i = 0; i < ARMV8_PMU_MAX_COUNTERS; i++)
2569ed24f4bSMarc Zyngier 		pmu->pmc[i].idx = i;
2579ed24f4bSMarc Zyngier }
2589ed24f4bSMarc Zyngier 
2599ed24f4bSMarc Zyngier /**
2609ed24f4bSMarc Zyngier  * kvm_pmu_vcpu_reset - reset pmu state for cpu
2619ed24f4bSMarc Zyngier  * @vcpu: The vcpu pointer
2629ed24f4bSMarc Zyngier  *
2639ed24f4bSMarc Zyngier  */
2649ed24f4bSMarc Zyngier void kvm_pmu_vcpu_reset(struct kvm_vcpu *vcpu)
2659ed24f4bSMarc Zyngier {
2669ed24f4bSMarc Zyngier 	unsigned long mask = kvm_pmu_valid_counter_mask(vcpu);
2679ed24f4bSMarc Zyngier 	struct kvm_pmu *pmu = &vcpu->arch.pmu;
2689ed24f4bSMarc Zyngier 	int i;
2699ed24f4bSMarc Zyngier 
2709ed24f4bSMarc Zyngier 	for_each_set_bit(i, &mask, 32)
2719ed24f4bSMarc Zyngier 		kvm_pmu_stop_counter(vcpu, &pmu->pmc[i]);
2729ed24f4bSMarc Zyngier 
2739ed24f4bSMarc Zyngier 	bitmap_zero(vcpu->arch.pmu.chained, ARMV8_PMU_MAX_COUNTER_PAIRS);
2749ed24f4bSMarc Zyngier }
2759ed24f4bSMarc Zyngier 
2769ed24f4bSMarc Zyngier /**
2779ed24f4bSMarc Zyngier  * kvm_pmu_vcpu_destroy - free perf event of PMU for cpu
2789ed24f4bSMarc Zyngier  * @vcpu: The vcpu pointer
2799ed24f4bSMarc Zyngier  *
2809ed24f4bSMarc Zyngier  */
2819ed24f4bSMarc Zyngier void kvm_pmu_vcpu_destroy(struct kvm_vcpu *vcpu)
2829ed24f4bSMarc Zyngier {
2839ed24f4bSMarc Zyngier 	int i;
2849ed24f4bSMarc Zyngier 	struct kvm_pmu *pmu = &vcpu->arch.pmu;
2859ed24f4bSMarc Zyngier 
2869ed24f4bSMarc Zyngier 	for (i = 0; i < ARMV8_PMU_MAX_COUNTERS; i++)
2879ed24f4bSMarc Zyngier 		kvm_pmu_release_perf_event(&pmu->pmc[i]);
28895e92e45SJulien Thierry 	irq_work_sync(&vcpu->arch.pmu.overflow_work);
2899ed24f4bSMarc Zyngier }
2909ed24f4bSMarc Zyngier 
2919ed24f4bSMarc Zyngier u64 kvm_pmu_valid_counter_mask(struct kvm_vcpu *vcpu)
2929ed24f4bSMarc Zyngier {
2939ed24f4bSMarc Zyngier 	u64 val = __vcpu_sys_reg(vcpu, PMCR_EL0) >> ARMV8_PMU_PMCR_N_SHIFT;
2949ed24f4bSMarc Zyngier 
2959ed24f4bSMarc Zyngier 	val &= ARMV8_PMU_PMCR_N_MASK;
2969ed24f4bSMarc Zyngier 	if (val == 0)
2979ed24f4bSMarc Zyngier 		return BIT(ARMV8_PMU_CYCLE_IDX);
2989ed24f4bSMarc Zyngier 	else
2999ed24f4bSMarc Zyngier 		return GENMASK(val - 1, 0) | BIT(ARMV8_PMU_CYCLE_IDX);
3009ed24f4bSMarc Zyngier }
3019ed24f4bSMarc Zyngier 
3029ed24f4bSMarc Zyngier /**
3039ed24f4bSMarc Zyngier  * kvm_pmu_enable_counter_mask - enable selected PMU counters
3049ed24f4bSMarc Zyngier  * @vcpu: The vcpu pointer
3059ed24f4bSMarc Zyngier  * @val: the value guest writes to PMCNTENSET register
3069ed24f4bSMarc Zyngier  *
3079ed24f4bSMarc Zyngier  * Call perf_event_enable to start counting the perf event
3089ed24f4bSMarc Zyngier  */
3099ed24f4bSMarc Zyngier void kvm_pmu_enable_counter_mask(struct kvm_vcpu *vcpu, u64 val)
3109ed24f4bSMarc Zyngier {
3119ed24f4bSMarc Zyngier 	int i;
3129ed24f4bSMarc Zyngier 	struct kvm_pmu *pmu = &vcpu->arch.pmu;
3139ed24f4bSMarc Zyngier 	struct kvm_pmc *pmc;
3149ed24f4bSMarc Zyngier 
3159ed24f4bSMarc Zyngier 	if (!(__vcpu_sys_reg(vcpu, PMCR_EL0) & ARMV8_PMU_PMCR_E) || !val)
3169ed24f4bSMarc Zyngier 		return;
3179ed24f4bSMarc Zyngier 
3189ed24f4bSMarc Zyngier 	for (i = 0; i < ARMV8_PMU_MAX_COUNTERS; i++) {
3199ed24f4bSMarc Zyngier 		if (!(val & BIT(i)))
3209ed24f4bSMarc Zyngier 			continue;
3219ed24f4bSMarc Zyngier 
3229ed24f4bSMarc Zyngier 		pmc = &pmu->pmc[i];
3239ed24f4bSMarc Zyngier 
3249ed24f4bSMarc Zyngier 		/* A change in the enable state may affect the chain state */
3259ed24f4bSMarc Zyngier 		kvm_pmu_update_pmc_chained(vcpu, i);
3269ed24f4bSMarc Zyngier 		kvm_pmu_create_perf_event(vcpu, i);
3279ed24f4bSMarc Zyngier 
3289ed24f4bSMarc Zyngier 		/* At this point, pmc must be the canonical */
3299ed24f4bSMarc Zyngier 		if (pmc->perf_event) {
3309ed24f4bSMarc Zyngier 			perf_event_enable(pmc->perf_event);
3319ed24f4bSMarc Zyngier 			if (pmc->perf_event->state != PERF_EVENT_STATE_ACTIVE)
3329ed24f4bSMarc Zyngier 				kvm_debug("fail to enable perf event\n");
3339ed24f4bSMarc Zyngier 		}
3349ed24f4bSMarc Zyngier 	}
3359ed24f4bSMarc Zyngier }
3369ed24f4bSMarc Zyngier 
3379ed24f4bSMarc Zyngier /**
3389ed24f4bSMarc Zyngier  * kvm_pmu_disable_counter_mask - disable selected PMU counters
3399ed24f4bSMarc Zyngier  * @vcpu: The vcpu pointer
3409ed24f4bSMarc Zyngier  * @val: the value guest writes to PMCNTENCLR register
3419ed24f4bSMarc Zyngier  *
3429ed24f4bSMarc Zyngier  * Call perf_event_disable to stop counting the perf event
3439ed24f4bSMarc Zyngier  */
3449ed24f4bSMarc Zyngier void kvm_pmu_disable_counter_mask(struct kvm_vcpu *vcpu, u64 val)
3459ed24f4bSMarc Zyngier {
3469ed24f4bSMarc Zyngier 	int i;
3479ed24f4bSMarc Zyngier 	struct kvm_pmu *pmu = &vcpu->arch.pmu;
3489ed24f4bSMarc Zyngier 	struct kvm_pmc *pmc;
3499ed24f4bSMarc Zyngier 
3509ed24f4bSMarc Zyngier 	if (!val)
3519ed24f4bSMarc Zyngier 		return;
3529ed24f4bSMarc Zyngier 
3539ed24f4bSMarc Zyngier 	for (i = 0; i < ARMV8_PMU_MAX_COUNTERS; i++) {
3549ed24f4bSMarc Zyngier 		if (!(val & BIT(i)))
3559ed24f4bSMarc Zyngier 			continue;
3569ed24f4bSMarc Zyngier 
3579ed24f4bSMarc Zyngier 		pmc = &pmu->pmc[i];
3589ed24f4bSMarc Zyngier 
3599ed24f4bSMarc Zyngier 		/* A change in the enable state may affect the chain state */
3609ed24f4bSMarc Zyngier 		kvm_pmu_update_pmc_chained(vcpu, i);
3619ed24f4bSMarc Zyngier 		kvm_pmu_create_perf_event(vcpu, i);
3629ed24f4bSMarc Zyngier 
3639ed24f4bSMarc Zyngier 		/* At this point, pmc must be the canonical */
3649ed24f4bSMarc Zyngier 		if (pmc->perf_event)
3659ed24f4bSMarc Zyngier 			perf_event_disable(pmc->perf_event);
3669ed24f4bSMarc Zyngier 	}
3679ed24f4bSMarc Zyngier }
3689ed24f4bSMarc Zyngier 
3699ed24f4bSMarc Zyngier static u64 kvm_pmu_overflow_status(struct kvm_vcpu *vcpu)
3709ed24f4bSMarc Zyngier {
3719ed24f4bSMarc Zyngier 	u64 reg = 0;
3729ed24f4bSMarc Zyngier 
3739ed24f4bSMarc Zyngier 	if ((__vcpu_sys_reg(vcpu, PMCR_EL0) & ARMV8_PMU_PMCR_E)) {
3749ed24f4bSMarc Zyngier 		reg = __vcpu_sys_reg(vcpu, PMOVSSET_EL0);
3759ed24f4bSMarc Zyngier 		reg &= __vcpu_sys_reg(vcpu, PMCNTENSET_EL0);
3769ed24f4bSMarc Zyngier 		reg &= __vcpu_sys_reg(vcpu, PMINTENSET_EL1);
3779ed24f4bSMarc Zyngier 	}
3789ed24f4bSMarc Zyngier 
3799ed24f4bSMarc Zyngier 	return reg;
3809ed24f4bSMarc Zyngier }
3819ed24f4bSMarc Zyngier 
3829ed24f4bSMarc Zyngier static void kvm_pmu_update_state(struct kvm_vcpu *vcpu)
3839ed24f4bSMarc Zyngier {
3849ed24f4bSMarc Zyngier 	struct kvm_pmu *pmu = &vcpu->arch.pmu;
3859ed24f4bSMarc Zyngier 	bool overflow;
3869ed24f4bSMarc Zyngier 
38746acf89dSMarc Zyngier 	if (!kvm_vcpu_has_pmu(vcpu))
3889ed24f4bSMarc Zyngier 		return;
3899ed24f4bSMarc Zyngier 
3909ed24f4bSMarc Zyngier 	overflow = !!kvm_pmu_overflow_status(vcpu);
3919ed24f4bSMarc Zyngier 	if (pmu->irq_level == overflow)
3929ed24f4bSMarc Zyngier 		return;
3939ed24f4bSMarc Zyngier 
3949ed24f4bSMarc Zyngier 	pmu->irq_level = overflow;
3959ed24f4bSMarc Zyngier 
3969ed24f4bSMarc Zyngier 	if (likely(irqchip_in_kernel(vcpu->kvm))) {
3979ed24f4bSMarc Zyngier 		int ret = kvm_vgic_inject_irq(vcpu->kvm, vcpu->vcpu_id,
3989ed24f4bSMarc Zyngier 					      pmu->irq_num, overflow, pmu);
3999ed24f4bSMarc Zyngier 		WARN_ON(ret);
4009ed24f4bSMarc Zyngier 	}
4019ed24f4bSMarc Zyngier }
4029ed24f4bSMarc Zyngier 
4039ed24f4bSMarc Zyngier bool kvm_pmu_should_notify_user(struct kvm_vcpu *vcpu)
4049ed24f4bSMarc Zyngier {
4059ed24f4bSMarc Zyngier 	struct kvm_pmu *pmu = &vcpu->arch.pmu;
4069ed24f4bSMarc Zyngier 	struct kvm_sync_regs *sregs = &vcpu->run->s.regs;
4079ed24f4bSMarc Zyngier 	bool run_level = sregs->device_irq_level & KVM_ARM_DEV_PMU;
4089ed24f4bSMarc Zyngier 
4099ed24f4bSMarc Zyngier 	if (likely(irqchip_in_kernel(vcpu->kvm)))
4109ed24f4bSMarc Zyngier 		return false;
4119ed24f4bSMarc Zyngier 
4129ed24f4bSMarc Zyngier 	return pmu->irq_level != run_level;
4139ed24f4bSMarc Zyngier }
4149ed24f4bSMarc Zyngier 
4159ed24f4bSMarc Zyngier /*
4169ed24f4bSMarc Zyngier  * Reflect the PMU overflow interrupt output level into the kvm_run structure
4179ed24f4bSMarc Zyngier  */
4189ed24f4bSMarc Zyngier void kvm_pmu_update_run(struct kvm_vcpu *vcpu)
4199ed24f4bSMarc Zyngier {
4209ed24f4bSMarc Zyngier 	struct kvm_sync_regs *regs = &vcpu->run->s.regs;
4219ed24f4bSMarc Zyngier 
4229ed24f4bSMarc Zyngier 	/* Populate the timer bitmap for user space */
4239ed24f4bSMarc Zyngier 	regs->device_irq_level &= ~KVM_ARM_DEV_PMU;
4249ed24f4bSMarc Zyngier 	if (vcpu->arch.pmu.irq_level)
4259ed24f4bSMarc Zyngier 		regs->device_irq_level |= KVM_ARM_DEV_PMU;
4269ed24f4bSMarc Zyngier }
4279ed24f4bSMarc Zyngier 
4289ed24f4bSMarc Zyngier /**
4299ed24f4bSMarc Zyngier  * kvm_pmu_flush_hwstate - flush pmu state to cpu
4309ed24f4bSMarc Zyngier  * @vcpu: The vcpu pointer
4319ed24f4bSMarc Zyngier  *
4329ed24f4bSMarc Zyngier  * Check if the PMU has overflowed while we were running in the host, and inject
4339ed24f4bSMarc Zyngier  * an interrupt if that was the case.
4349ed24f4bSMarc Zyngier  */
4359ed24f4bSMarc Zyngier void kvm_pmu_flush_hwstate(struct kvm_vcpu *vcpu)
4369ed24f4bSMarc Zyngier {
4379ed24f4bSMarc Zyngier 	kvm_pmu_update_state(vcpu);
4389ed24f4bSMarc Zyngier }
4399ed24f4bSMarc Zyngier 
4409ed24f4bSMarc Zyngier /**
4419ed24f4bSMarc Zyngier  * kvm_pmu_sync_hwstate - sync pmu state from cpu
4429ed24f4bSMarc Zyngier  * @vcpu: The vcpu pointer
4439ed24f4bSMarc Zyngier  *
4449ed24f4bSMarc Zyngier  * Check if the PMU has overflowed while we were running in the guest, and
4459ed24f4bSMarc Zyngier  * inject an interrupt if that was the case.
4469ed24f4bSMarc Zyngier  */
4479ed24f4bSMarc Zyngier void kvm_pmu_sync_hwstate(struct kvm_vcpu *vcpu)
4489ed24f4bSMarc Zyngier {
4499ed24f4bSMarc Zyngier 	kvm_pmu_update_state(vcpu);
4509ed24f4bSMarc Zyngier }
4519ed24f4bSMarc Zyngier 
4529ed24f4bSMarc Zyngier /**
45395e92e45SJulien Thierry  * When perf interrupt is an NMI, we cannot safely notify the vcpu corresponding
45495e92e45SJulien Thierry  * to the event.
45595e92e45SJulien Thierry  * This is why we need a callback to do it once outside of the NMI context.
45695e92e45SJulien Thierry  */
45795e92e45SJulien Thierry static void kvm_pmu_perf_overflow_notify_vcpu(struct irq_work *work)
45895e92e45SJulien Thierry {
45995e92e45SJulien Thierry 	struct kvm_vcpu *vcpu;
46095e92e45SJulien Thierry 	struct kvm_pmu *pmu;
46195e92e45SJulien Thierry 
46295e92e45SJulien Thierry 	pmu = container_of(work, struct kvm_pmu, overflow_work);
46395e92e45SJulien Thierry 	vcpu = kvm_pmc_to_vcpu(pmu->pmc);
46495e92e45SJulien Thierry 
46595e92e45SJulien Thierry 	kvm_vcpu_kick(vcpu);
46695e92e45SJulien Thierry }
46795e92e45SJulien Thierry 
46895e92e45SJulien Thierry /**
4699ed24f4bSMarc Zyngier  * When the perf event overflows, set the overflow status and inform the vcpu.
4709ed24f4bSMarc Zyngier  */
4719ed24f4bSMarc Zyngier static void kvm_pmu_perf_overflow(struct perf_event *perf_event,
4729ed24f4bSMarc Zyngier 				  struct perf_sample_data *data,
4739ed24f4bSMarc Zyngier 				  struct pt_regs *regs)
4749ed24f4bSMarc Zyngier {
4759ed24f4bSMarc Zyngier 	struct kvm_pmc *pmc = perf_event->overflow_handler_context;
4769ed24f4bSMarc Zyngier 	struct arm_pmu *cpu_pmu = to_arm_pmu(perf_event->pmu);
4779ed24f4bSMarc Zyngier 	struct kvm_vcpu *vcpu = kvm_pmc_to_vcpu(pmc);
4789ed24f4bSMarc Zyngier 	int idx = pmc->idx;
4799ed24f4bSMarc Zyngier 	u64 period;
4809ed24f4bSMarc Zyngier 
4819ed24f4bSMarc Zyngier 	cpu_pmu->pmu.stop(perf_event, PERF_EF_UPDATE);
4829ed24f4bSMarc Zyngier 
4839ed24f4bSMarc Zyngier 	/*
4849ed24f4bSMarc Zyngier 	 * Reset the sample period to the architectural limit,
4859ed24f4bSMarc Zyngier 	 * i.e. the point where the counter overflows.
4869ed24f4bSMarc Zyngier 	 */
4879ed24f4bSMarc Zyngier 	period = -(local64_read(&perf_event->count));
4889ed24f4bSMarc Zyngier 
4899ed24f4bSMarc Zyngier 	if (!kvm_pmu_idx_is_64bit(vcpu, pmc->idx))
4909ed24f4bSMarc Zyngier 		period &= GENMASK(31, 0);
4919ed24f4bSMarc Zyngier 
4929ed24f4bSMarc Zyngier 	local64_set(&perf_event->hw.period_left, 0);
4939ed24f4bSMarc Zyngier 	perf_event->attr.sample_period = period;
4949ed24f4bSMarc Zyngier 	perf_event->hw.sample_period = period;
4959ed24f4bSMarc Zyngier 
4969ed24f4bSMarc Zyngier 	__vcpu_sys_reg(vcpu, PMOVSSET_EL0) |= BIT(idx);
4979ed24f4bSMarc Zyngier 
4989ed24f4bSMarc Zyngier 	if (kvm_pmu_overflow_status(vcpu)) {
4999ed24f4bSMarc Zyngier 		kvm_make_request(KVM_REQ_IRQ_PENDING, vcpu);
50095e92e45SJulien Thierry 
50195e92e45SJulien Thierry 		if (!in_nmi())
5029ed24f4bSMarc Zyngier 			kvm_vcpu_kick(vcpu);
50395e92e45SJulien Thierry 		else
50495e92e45SJulien Thierry 			irq_work_queue(&vcpu->arch.pmu.overflow_work);
5059ed24f4bSMarc Zyngier 	}
5069ed24f4bSMarc Zyngier 
5079ed24f4bSMarc Zyngier 	cpu_pmu->pmu.start(perf_event, PERF_EF_RELOAD);
5089ed24f4bSMarc Zyngier }
5099ed24f4bSMarc Zyngier 
5109ed24f4bSMarc Zyngier /**
5119ed24f4bSMarc Zyngier  * kvm_pmu_software_increment - do software increment
5129ed24f4bSMarc Zyngier  * @vcpu: The vcpu pointer
5139ed24f4bSMarc Zyngier  * @val: the value guest writes to PMSWINC register
5149ed24f4bSMarc Zyngier  */
5159ed24f4bSMarc Zyngier void kvm_pmu_software_increment(struct kvm_vcpu *vcpu, u64 val)
5169ed24f4bSMarc Zyngier {
5179ed24f4bSMarc Zyngier 	struct kvm_pmu *pmu = &vcpu->arch.pmu;
5189ed24f4bSMarc Zyngier 	int i;
5199ed24f4bSMarc Zyngier 
5209ed24f4bSMarc Zyngier 	if (!(__vcpu_sys_reg(vcpu, PMCR_EL0) & ARMV8_PMU_PMCR_E))
5219ed24f4bSMarc Zyngier 		return;
5229ed24f4bSMarc Zyngier 
5239ed24f4bSMarc Zyngier 	/* Weed out disabled counters */
5249ed24f4bSMarc Zyngier 	val &= __vcpu_sys_reg(vcpu, PMCNTENSET_EL0);
5259ed24f4bSMarc Zyngier 
5269ed24f4bSMarc Zyngier 	for (i = 0; i < ARMV8_PMU_CYCLE_IDX; i++) {
5279ed24f4bSMarc Zyngier 		u64 type, reg;
5289ed24f4bSMarc Zyngier 
5299ed24f4bSMarc Zyngier 		if (!(val & BIT(i)))
5309ed24f4bSMarc Zyngier 			continue;
5319ed24f4bSMarc Zyngier 
5329ed24f4bSMarc Zyngier 		/* PMSWINC only applies to ... SW_INC! */
5339ed24f4bSMarc Zyngier 		type = __vcpu_sys_reg(vcpu, PMEVTYPER0_EL0 + i);
534fd65a3b5SMarc Zyngier 		type &= kvm_pmu_event_mask(vcpu->kvm);
5359ed24f4bSMarc Zyngier 		if (type != ARMV8_PMUV3_PERFCTR_SW_INCR)
5369ed24f4bSMarc Zyngier 			continue;
5379ed24f4bSMarc Zyngier 
5389ed24f4bSMarc Zyngier 		/* increment this even SW_INC counter */
5399ed24f4bSMarc Zyngier 		reg = __vcpu_sys_reg(vcpu, PMEVCNTR0_EL0 + i) + 1;
5409ed24f4bSMarc Zyngier 		reg = lower_32_bits(reg);
5419ed24f4bSMarc Zyngier 		__vcpu_sys_reg(vcpu, PMEVCNTR0_EL0 + i) = reg;
5429ed24f4bSMarc Zyngier 
5439ed24f4bSMarc Zyngier 		if (reg) /* no overflow on the low part */
5449ed24f4bSMarc Zyngier 			continue;
5459ed24f4bSMarc Zyngier 
5469ed24f4bSMarc Zyngier 		if (kvm_pmu_pmc_is_chained(&pmu->pmc[i])) {
5479ed24f4bSMarc Zyngier 			/* increment the high counter */
5489ed24f4bSMarc Zyngier 			reg = __vcpu_sys_reg(vcpu, PMEVCNTR0_EL0 + i + 1) + 1;
5499ed24f4bSMarc Zyngier 			reg = lower_32_bits(reg);
5509ed24f4bSMarc Zyngier 			__vcpu_sys_reg(vcpu, PMEVCNTR0_EL0 + i + 1) = reg;
5519ed24f4bSMarc Zyngier 			if (!reg) /* mark overflow on the high counter */
5529ed24f4bSMarc Zyngier 				__vcpu_sys_reg(vcpu, PMOVSSET_EL0) |= BIT(i + 1);
5539ed24f4bSMarc Zyngier 		} else {
5549ed24f4bSMarc Zyngier 			/* mark overflow on low counter */
5559ed24f4bSMarc Zyngier 			__vcpu_sys_reg(vcpu, PMOVSSET_EL0) |= BIT(i);
5569ed24f4bSMarc Zyngier 		}
5579ed24f4bSMarc Zyngier 	}
5589ed24f4bSMarc Zyngier }
5599ed24f4bSMarc Zyngier 
5609ed24f4bSMarc Zyngier /**
5619ed24f4bSMarc Zyngier  * kvm_pmu_handle_pmcr - handle PMCR register
5629ed24f4bSMarc Zyngier  * @vcpu: The vcpu pointer
5639ed24f4bSMarc Zyngier  * @val: the value guest writes to PMCR register
5649ed24f4bSMarc Zyngier  */
5659ed24f4bSMarc Zyngier void kvm_pmu_handle_pmcr(struct kvm_vcpu *vcpu, u64 val)
5669ed24f4bSMarc Zyngier {
5679ed24f4bSMarc Zyngier 	int i;
5689ed24f4bSMarc Zyngier 
5699ed24f4bSMarc Zyngier 	if (val & ARMV8_PMU_PMCR_E) {
5709ed24f4bSMarc Zyngier 		kvm_pmu_enable_counter_mask(vcpu,
571f5eff400SMarc Zyngier 		       __vcpu_sys_reg(vcpu, PMCNTENSET_EL0));
5729ed24f4bSMarc Zyngier 	} else {
573ca4f202dSAlexandre Chartre 		kvm_pmu_disable_counter_mask(vcpu,
574ca4f202dSAlexandre Chartre 		       __vcpu_sys_reg(vcpu, PMCNTENSET_EL0));
5759ed24f4bSMarc Zyngier 	}
5769ed24f4bSMarc Zyngier 
5779ed24f4bSMarc Zyngier 	if (val & ARMV8_PMU_PMCR_C)
5789ed24f4bSMarc Zyngier 		kvm_pmu_set_counter_value(vcpu, ARMV8_PMU_CYCLE_IDX, 0);
5799ed24f4bSMarc Zyngier 
5809ed24f4bSMarc Zyngier 	if (val & ARMV8_PMU_PMCR_P) {
581ca4f202dSAlexandre Chartre 		unsigned long mask = kvm_pmu_valid_counter_mask(vcpu);
5822a71fabfSAlexandru Elisei 		mask &= ~BIT(ARMV8_PMU_CYCLE_IDX);
5839ed24f4bSMarc Zyngier 		for_each_set_bit(i, &mask, 32)
5849ed24f4bSMarc Zyngier 			kvm_pmu_set_counter_value(vcpu, i, 0);
5859ed24f4bSMarc Zyngier 	}
5869ed24f4bSMarc Zyngier }
5879ed24f4bSMarc Zyngier 
5889ed24f4bSMarc Zyngier static bool kvm_pmu_counter_is_enabled(struct kvm_vcpu *vcpu, u64 select_idx)
5899ed24f4bSMarc Zyngier {
5909ed24f4bSMarc Zyngier 	return (__vcpu_sys_reg(vcpu, PMCR_EL0) & ARMV8_PMU_PMCR_E) &&
5919ed24f4bSMarc Zyngier 	       (__vcpu_sys_reg(vcpu, PMCNTENSET_EL0) & BIT(select_idx));
5929ed24f4bSMarc Zyngier }
5939ed24f4bSMarc Zyngier 
5949ed24f4bSMarc Zyngier /**
5959ed24f4bSMarc Zyngier  * kvm_pmu_create_perf_event - create a perf event for a counter
5969ed24f4bSMarc Zyngier  * @vcpu: The vcpu pointer
5979ed24f4bSMarc Zyngier  * @select_idx: The number of selected counter
5989ed24f4bSMarc Zyngier  */
5999ed24f4bSMarc Zyngier static void kvm_pmu_create_perf_event(struct kvm_vcpu *vcpu, u64 select_idx)
6009ed24f4bSMarc Zyngier {
6019ed24f4bSMarc Zyngier 	struct kvm_pmu *pmu = &vcpu->arch.pmu;
6029ed24f4bSMarc Zyngier 	struct kvm_pmc *pmc;
6039ed24f4bSMarc Zyngier 	struct perf_event *event;
6049ed24f4bSMarc Zyngier 	struct perf_event_attr attr;
6059ed24f4bSMarc Zyngier 	u64 eventsel, counter, reg, data;
6069ed24f4bSMarc Zyngier 
6079ed24f4bSMarc Zyngier 	/*
6089ed24f4bSMarc Zyngier 	 * For chained counters the event type and filtering attributes are
6099ed24f4bSMarc Zyngier 	 * obtained from the low/even counter. We also use this counter to
6109ed24f4bSMarc Zyngier 	 * determine if the event is enabled/disabled.
6119ed24f4bSMarc Zyngier 	 */
6129ed24f4bSMarc Zyngier 	pmc = kvm_pmu_get_canonical_pmc(&pmu->pmc[select_idx]);
6139ed24f4bSMarc Zyngier 
6149ed24f4bSMarc Zyngier 	reg = (pmc->idx == ARMV8_PMU_CYCLE_IDX)
6159ed24f4bSMarc Zyngier 	      ? PMCCFILTR_EL0 : PMEVTYPER0_EL0 + pmc->idx;
6169ed24f4bSMarc Zyngier 	data = __vcpu_sys_reg(vcpu, reg);
6179ed24f4bSMarc Zyngier 
6189ed24f4bSMarc Zyngier 	kvm_pmu_stop_counter(vcpu, pmc);
619d7eec236SMarc Zyngier 	if (pmc->idx == ARMV8_PMU_CYCLE_IDX)
620d7eec236SMarc Zyngier 		eventsel = ARMV8_PMUV3_PERFCTR_CPU_CYCLES;
621d7eec236SMarc Zyngier 	else
622d7eec236SMarc Zyngier 		eventsel = data & kvm_pmu_event_mask(vcpu->kvm);
6239ed24f4bSMarc Zyngier 
624d7eec236SMarc Zyngier 	/* Software increment event doesn't need to be backed by a perf event */
625d7eec236SMarc Zyngier 	if (eventsel == ARMV8_PMUV3_PERFCTR_SW_INCR)
626d7eec236SMarc Zyngier 		return;
627d7eec236SMarc Zyngier 
628d7eec236SMarc Zyngier 	/*
629d7eec236SMarc Zyngier 	 * If we have a filter in place and that the event isn't allowed, do
630d7eec236SMarc Zyngier 	 * not install a perf event either.
631d7eec236SMarc Zyngier 	 */
632d7eec236SMarc Zyngier 	if (vcpu->kvm->arch.pmu_filter &&
633d7eec236SMarc Zyngier 	    !test_bit(eventsel, vcpu->kvm->arch.pmu_filter))
6349ed24f4bSMarc Zyngier 		return;
6359ed24f4bSMarc Zyngier 
6369ed24f4bSMarc Zyngier 	memset(&attr, 0, sizeof(struct perf_event_attr));
6379ed24f4bSMarc Zyngier 	attr.type = PERF_TYPE_RAW;
6389ed24f4bSMarc Zyngier 	attr.size = sizeof(attr);
6399ed24f4bSMarc Zyngier 	attr.pinned = 1;
6409ed24f4bSMarc Zyngier 	attr.disabled = !kvm_pmu_counter_is_enabled(vcpu, pmc->idx);
6419ed24f4bSMarc Zyngier 	attr.exclude_user = data & ARMV8_PMU_EXCLUDE_EL0 ? 1 : 0;
6429ed24f4bSMarc Zyngier 	attr.exclude_kernel = data & ARMV8_PMU_EXCLUDE_EL1 ? 1 : 0;
6439ed24f4bSMarc Zyngier 	attr.exclude_hv = 1; /* Don't count EL2 events */
6449ed24f4bSMarc Zyngier 	attr.exclude_host = 1; /* Don't count host events */
645d7eec236SMarc Zyngier 	attr.config = eventsel;
6469ed24f4bSMarc Zyngier 
6479ed24f4bSMarc Zyngier 	counter = kvm_pmu_get_pair_counter_value(vcpu, pmc);
6489ed24f4bSMarc Zyngier 
6499ed24f4bSMarc Zyngier 	if (kvm_pmu_pmc_is_chained(pmc)) {
6509ed24f4bSMarc Zyngier 		/**
6519ed24f4bSMarc Zyngier 		 * The initial sample period (overflow count) of an event. For
6529ed24f4bSMarc Zyngier 		 * chained counters we only support overflow interrupts on the
6539ed24f4bSMarc Zyngier 		 * high counter.
6549ed24f4bSMarc Zyngier 		 */
6559ed24f4bSMarc Zyngier 		attr.sample_period = (-counter) & GENMASK(63, 0);
6569ed24f4bSMarc Zyngier 		attr.config1 |= PERF_ATTR_CFG1_KVM_PMU_CHAINED;
6579ed24f4bSMarc Zyngier 
6589ed24f4bSMarc Zyngier 		event = perf_event_create_kernel_counter(&attr, -1, current,
6599ed24f4bSMarc Zyngier 							 kvm_pmu_perf_overflow,
6609ed24f4bSMarc Zyngier 							 pmc + 1);
6619ed24f4bSMarc Zyngier 	} else {
6629ed24f4bSMarc Zyngier 		/* The initial sample period (overflow count) of an event. */
6639ed24f4bSMarc Zyngier 		if (kvm_pmu_idx_is_64bit(vcpu, pmc->idx))
6649ed24f4bSMarc Zyngier 			attr.sample_period = (-counter) & GENMASK(63, 0);
6659ed24f4bSMarc Zyngier 		else
6669ed24f4bSMarc Zyngier 			attr.sample_period = (-counter) & GENMASK(31, 0);
6679ed24f4bSMarc Zyngier 
6689ed24f4bSMarc Zyngier 		event = perf_event_create_kernel_counter(&attr, -1, current,
6699ed24f4bSMarc Zyngier 						 kvm_pmu_perf_overflow, pmc);
6709ed24f4bSMarc Zyngier 	}
6719ed24f4bSMarc Zyngier 
6729ed24f4bSMarc Zyngier 	if (IS_ERR(event)) {
6739ed24f4bSMarc Zyngier 		pr_err_once("kvm: pmu event creation failed %ld\n",
6749ed24f4bSMarc Zyngier 			    PTR_ERR(event));
6759ed24f4bSMarc Zyngier 		return;
6769ed24f4bSMarc Zyngier 	}
6779ed24f4bSMarc Zyngier 
6789ed24f4bSMarc Zyngier 	pmc->perf_event = event;
6799ed24f4bSMarc Zyngier }
6809ed24f4bSMarc Zyngier 
6819ed24f4bSMarc Zyngier /**
6829ed24f4bSMarc Zyngier  * kvm_pmu_update_pmc_chained - update chained bitmap
6839ed24f4bSMarc Zyngier  * @vcpu: The vcpu pointer
6849ed24f4bSMarc Zyngier  * @select_idx: The number of selected counter
6859ed24f4bSMarc Zyngier  *
6869ed24f4bSMarc Zyngier  * Update the chained bitmap based on the event type written in the
6879ed24f4bSMarc Zyngier  * typer register and the enable state of the odd register.
6889ed24f4bSMarc Zyngier  */
6899ed24f4bSMarc Zyngier static void kvm_pmu_update_pmc_chained(struct kvm_vcpu *vcpu, u64 select_idx)
6909ed24f4bSMarc Zyngier {
6919ed24f4bSMarc Zyngier 	struct kvm_pmu *pmu = &vcpu->arch.pmu;
6929ed24f4bSMarc Zyngier 	struct kvm_pmc *pmc = &pmu->pmc[select_idx], *canonical_pmc;
6939ed24f4bSMarc Zyngier 	bool new_state, old_state;
6949ed24f4bSMarc Zyngier 
6959ed24f4bSMarc Zyngier 	old_state = kvm_pmu_pmc_is_chained(pmc);
6969ed24f4bSMarc Zyngier 	new_state = kvm_pmu_idx_has_chain_evtype(vcpu, pmc->idx) &&
6979ed24f4bSMarc Zyngier 		    kvm_pmu_counter_is_enabled(vcpu, pmc->idx | 0x1);
6989ed24f4bSMarc Zyngier 
6999ed24f4bSMarc Zyngier 	if (old_state == new_state)
7009ed24f4bSMarc Zyngier 		return;
7019ed24f4bSMarc Zyngier 
7029ed24f4bSMarc Zyngier 	canonical_pmc = kvm_pmu_get_canonical_pmc(pmc);
7039ed24f4bSMarc Zyngier 	kvm_pmu_stop_counter(vcpu, canonical_pmc);
7049ed24f4bSMarc Zyngier 	if (new_state) {
7059ed24f4bSMarc Zyngier 		/*
7069ed24f4bSMarc Zyngier 		 * During promotion from !chained to chained we must ensure
7079ed24f4bSMarc Zyngier 		 * the adjacent counter is stopped and its event destroyed
7089ed24f4bSMarc Zyngier 		 */
7099ed24f4bSMarc Zyngier 		kvm_pmu_stop_counter(vcpu, kvm_pmu_get_alternate_pmc(pmc));
7109ed24f4bSMarc Zyngier 		set_bit(pmc->idx >> 1, vcpu->arch.pmu.chained);
7119ed24f4bSMarc Zyngier 		return;
7129ed24f4bSMarc Zyngier 	}
7139ed24f4bSMarc Zyngier 	clear_bit(pmc->idx >> 1, vcpu->arch.pmu.chained);
7149ed24f4bSMarc Zyngier }
7159ed24f4bSMarc Zyngier 
7169ed24f4bSMarc Zyngier /**
7179ed24f4bSMarc Zyngier  * kvm_pmu_set_counter_event_type - set selected counter to monitor some event
7189ed24f4bSMarc Zyngier  * @vcpu: The vcpu pointer
7199ed24f4bSMarc Zyngier  * @data: The data guest writes to PMXEVTYPER_EL0
7209ed24f4bSMarc Zyngier  * @select_idx: The number of selected counter
7219ed24f4bSMarc Zyngier  *
7229ed24f4bSMarc Zyngier  * When OS accesses PMXEVTYPER_EL0, that means it wants to set a PMC to count an
7239ed24f4bSMarc Zyngier  * event with given hardware event number. Here we call perf_event API to
7249ed24f4bSMarc Zyngier  * emulate this action and create a kernel perf event for it.
7259ed24f4bSMarc Zyngier  */
7269ed24f4bSMarc Zyngier void kvm_pmu_set_counter_event_type(struct kvm_vcpu *vcpu, u64 data,
7279ed24f4bSMarc Zyngier 				    u64 select_idx)
7289ed24f4bSMarc Zyngier {
729fd65a3b5SMarc Zyngier 	u64 reg, mask;
730fd65a3b5SMarc Zyngier 
731fd65a3b5SMarc Zyngier 	mask  =  ARMV8_PMU_EVTYPE_MASK;
732fd65a3b5SMarc Zyngier 	mask &= ~ARMV8_PMU_EVTYPE_EVENT;
733fd65a3b5SMarc Zyngier 	mask |= kvm_pmu_event_mask(vcpu->kvm);
7349ed24f4bSMarc Zyngier 
7359ed24f4bSMarc Zyngier 	reg = (select_idx == ARMV8_PMU_CYCLE_IDX)
7369ed24f4bSMarc Zyngier 	      ? PMCCFILTR_EL0 : PMEVTYPER0_EL0 + select_idx;
7379ed24f4bSMarc Zyngier 
738fd65a3b5SMarc Zyngier 	__vcpu_sys_reg(vcpu, reg) = data & mask;
7399ed24f4bSMarc Zyngier 
7409ed24f4bSMarc Zyngier 	kvm_pmu_update_pmc_chained(vcpu, select_idx);
7419ed24f4bSMarc Zyngier 	kvm_pmu_create_perf_event(vcpu, select_idx);
7429ed24f4bSMarc Zyngier }
7439ed24f4bSMarc Zyngier 
744e840f42aSMarc Zyngier void kvm_host_pmu_init(struct arm_pmu *pmu)
745e840f42aSMarc Zyngier {
746e840f42aSMarc Zyngier 	if (pmu->pmuver != 0 && pmu->pmuver != ID_AA64DFR0_PMUVER_IMP_DEF &&
747e840f42aSMarc Zyngier 	    !kvm_arm_support_pmu_v3() && !is_protected_kvm_enabled())
748e840f42aSMarc Zyngier 		static_branch_enable(&kvm_arm_pmu_available);
749e840f42aSMarc Zyngier }
750e840f42aSMarc Zyngier 
751e840f42aSMarc Zyngier static int kvm_pmu_probe_pmuver(void)
752fd65a3b5SMarc Zyngier {
753fd65a3b5SMarc Zyngier 	struct perf_event_attr attr = { };
754fd65a3b5SMarc Zyngier 	struct perf_event *event;
755fd65a3b5SMarc Zyngier 	struct arm_pmu *pmu;
7566fadc124SAnshuman Khandual 	int pmuver = ID_AA64DFR0_PMUVER_IMP_DEF;
757fd65a3b5SMarc Zyngier 
758fd65a3b5SMarc Zyngier 	/*
759fd65a3b5SMarc Zyngier 	 * Create a dummy event that only counts user cycles. As we'll never
760fd65a3b5SMarc Zyngier 	 * leave this function with the event being live, it will never
761fd65a3b5SMarc Zyngier 	 * count anything. But it allows us to probe some of the PMU
762fd65a3b5SMarc Zyngier 	 * details. Yes, this is terrible.
763fd65a3b5SMarc Zyngier 	 */
764fd65a3b5SMarc Zyngier 	attr.type = PERF_TYPE_RAW;
765fd65a3b5SMarc Zyngier 	attr.size = sizeof(attr);
766fd65a3b5SMarc Zyngier 	attr.pinned = 1;
767fd65a3b5SMarc Zyngier 	attr.disabled = 0;
768fd65a3b5SMarc Zyngier 	attr.exclude_user = 0;
769fd65a3b5SMarc Zyngier 	attr.exclude_kernel = 1;
770fd65a3b5SMarc Zyngier 	attr.exclude_hv = 1;
771fd65a3b5SMarc Zyngier 	attr.exclude_host = 1;
772fd65a3b5SMarc Zyngier 	attr.config = ARMV8_PMUV3_PERFCTR_CPU_CYCLES;
773fd65a3b5SMarc Zyngier 	attr.sample_period = GENMASK(63, 0);
774fd65a3b5SMarc Zyngier 
775fd65a3b5SMarc Zyngier 	event = perf_event_create_kernel_counter(&attr, -1, current,
776fd65a3b5SMarc Zyngier 						 kvm_pmu_perf_overflow, &attr);
777fd65a3b5SMarc Zyngier 
778fd65a3b5SMarc Zyngier 	if (IS_ERR(event)) {
779fd65a3b5SMarc Zyngier 		pr_err_once("kvm: pmu event creation failed %ld\n",
780fd65a3b5SMarc Zyngier 			    PTR_ERR(event));
7816fadc124SAnshuman Khandual 		return ID_AA64DFR0_PMUVER_IMP_DEF;
782fd65a3b5SMarc Zyngier 	}
783fd65a3b5SMarc Zyngier 
784fd65a3b5SMarc Zyngier 	if (event->pmu) {
785fd65a3b5SMarc Zyngier 		pmu = to_arm_pmu(event->pmu);
786fd65a3b5SMarc Zyngier 		if (pmu->pmuver)
787fd65a3b5SMarc Zyngier 			pmuver = pmu->pmuver;
788fd65a3b5SMarc Zyngier 	}
789fd65a3b5SMarc Zyngier 
790fd65a3b5SMarc Zyngier 	perf_event_disable(event);
791fd65a3b5SMarc Zyngier 	perf_event_release_kernel(event);
792fd65a3b5SMarc Zyngier 
793fd65a3b5SMarc Zyngier 	return pmuver;
794fd65a3b5SMarc Zyngier }
795fd65a3b5SMarc Zyngier 
79688865becSMarc Zyngier u64 kvm_pmu_get_pmceid(struct kvm_vcpu *vcpu, bool pmceid1)
79788865becSMarc Zyngier {
79888865becSMarc Zyngier 	unsigned long *bmap = vcpu->kvm->arch.pmu_filter;
79988865becSMarc Zyngier 	u64 val, mask = 0;
8009529aaa0SMarc Zyngier 	int base, i, nr_events;
80188865becSMarc Zyngier 
80288865becSMarc Zyngier 	if (!pmceid1) {
80388865becSMarc Zyngier 		val = read_sysreg(pmceid0_el0);
80488865becSMarc Zyngier 		base = 0;
80588865becSMarc Zyngier 	} else {
80688865becSMarc Zyngier 		val = read_sysreg(pmceid1_el0);
80746081078SMarc Zyngier 		/*
80846081078SMarc Zyngier 		 * Don't advertise STALL_SLOT, as PMMIR_EL0 is handled
80946081078SMarc Zyngier 		 * as RAZ
81046081078SMarc Zyngier 		 */
81146081078SMarc Zyngier 		if (vcpu->kvm->arch.pmuver >= ID_AA64DFR0_PMUVER_8_4)
81246081078SMarc Zyngier 			val &= ~BIT_ULL(ARMV8_PMUV3_PERFCTR_STALL_SLOT - 32);
81388865becSMarc Zyngier 		base = 32;
81488865becSMarc Zyngier 	}
81588865becSMarc Zyngier 
81688865becSMarc Zyngier 	if (!bmap)
81788865becSMarc Zyngier 		return val;
81888865becSMarc Zyngier 
8199529aaa0SMarc Zyngier 	nr_events = kvm_pmu_event_mask(vcpu->kvm) + 1;
8209529aaa0SMarc Zyngier 
82188865becSMarc Zyngier 	for (i = 0; i < 32; i += 8) {
82288865becSMarc Zyngier 		u64 byte;
82388865becSMarc Zyngier 
82488865becSMarc Zyngier 		byte = bitmap_get_value8(bmap, base + i);
82588865becSMarc Zyngier 		mask |= byte << i;
8269529aaa0SMarc Zyngier 		if (nr_events >= (0x4000 + base + 32)) {
82788865becSMarc Zyngier 			byte = bitmap_get_value8(bmap, 0x4000 + base + i);
82888865becSMarc Zyngier 			mask |= byte << (32 + i);
82988865becSMarc Zyngier 		}
8309529aaa0SMarc Zyngier 	}
83188865becSMarc Zyngier 
83288865becSMarc Zyngier 	return val & mask;
83388865becSMarc Zyngier }
83488865becSMarc Zyngier 
8359ed24f4bSMarc Zyngier int kvm_arm_pmu_v3_enable(struct kvm_vcpu *vcpu)
8369ed24f4bSMarc Zyngier {
8379bbfa4b5SAlexandru Elisei 	if (!kvm_vcpu_has_pmu(vcpu))
8389ed24f4bSMarc Zyngier 		return 0;
8399ed24f4bSMarc Zyngier 
8409bbfa4b5SAlexandru Elisei 	if (!vcpu->arch.pmu.created)
8419bbfa4b5SAlexandru Elisei 		return -EINVAL;
8429bbfa4b5SAlexandru Elisei 
8439ed24f4bSMarc Zyngier 	/*
8449ed24f4bSMarc Zyngier 	 * A valid interrupt configuration for the PMU is either to have a
8459ed24f4bSMarc Zyngier 	 * properly configured interrupt number and using an in-kernel
8469ed24f4bSMarc Zyngier 	 * irqchip, or to not have an in-kernel GIC and not set an IRQ.
8479ed24f4bSMarc Zyngier 	 */
8489ed24f4bSMarc Zyngier 	if (irqchip_in_kernel(vcpu->kvm)) {
8499ed24f4bSMarc Zyngier 		int irq = vcpu->arch.pmu.irq_num;
8509ed24f4bSMarc Zyngier 		/*
8519ed24f4bSMarc Zyngier 		 * If we are using an in-kernel vgic, at this point we know
8529ed24f4bSMarc Zyngier 		 * the vgic will be initialized, so we can check the PMU irq
8539ed24f4bSMarc Zyngier 		 * number against the dimensions of the vgic and make sure
8549ed24f4bSMarc Zyngier 		 * it's valid.
8559ed24f4bSMarc Zyngier 		 */
8569ed24f4bSMarc Zyngier 		if (!irq_is_ppi(irq) && !vgic_valid_spi(vcpu->kvm, irq))
8579ed24f4bSMarc Zyngier 			return -EINVAL;
8589ed24f4bSMarc Zyngier 	} else if (kvm_arm_pmu_irq_initialized(vcpu)) {
8599ed24f4bSMarc Zyngier 		   return -EINVAL;
8609ed24f4bSMarc Zyngier 	}
8619ed24f4bSMarc Zyngier 
862d0c94c49SMarc Zyngier 	/* One-off reload of the PMU on first run */
863d0c94c49SMarc Zyngier 	kvm_make_request(KVM_REQ_RELOAD_PMU, vcpu);
864d0c94c49SMarc Zyngier 
8659ed24f4bSMarc Zyngier 	return 0;
8669ed24f4bSMarc Zyngier }
8679ed24f4bSMarc Zyngier 
8689ed24f4bSMarc Zyngier static int kvm_arm_pmu_v3_init(struct kvm_vcpu *vcpu)
8699ed24f4bSMarc Zyngier {
8709ed24f4bSMarc Zyngier 	if (irqchip_in_kernel(vcpu->kvm)) {
8719ed24f4bSMarc Zyngier 		int ret;
8729ed24f4bSMarc Zyngier 
8739ed24f4bSMarc Zyngier 		/*
8749ed24f4bSMarc Zyngier 		 * If using the PMU with an in-kernel virtual GIC
8759ed24f4bSMarc Zyngier 		 * implementation, we require the GIC to be already
8769ed24f4bSMarc Zyngier 		 * initialized when initializing the PMU.
8779ed24f4bSMarc Zyngier 		 */
8789ed24f4bSMarc Zyngier 		if (!vgic_initialized(vcpu->kvm))
8799ed24f4bSMarc Zyngier 			return -ENODEV;
8809ed24f4bSMarc Zyngier 
8819ed24f4bSMarc Zyngier 		if (!kvm_arm_pmu_irq_initialized(vcpu))
8829ed24f4bSMarc Zyngier 			return -ENXIO;
8839ed24f4bSMarc Zyngier 
8849ed24f4bSMarc Zyngier 		ret = kvm_vgic_set_owner(vcpu, vcpu->arch.pmu.irq_num,
8859ed24f4bSMarc Zyngier 					 &vcpu->arch.pmu);
8869ed24f4bSMarc Zyngier 		if (ret)
8879ed24f4bSMarc Zyngier 			return ret;
8889ed24f4bSMarc Zyngier 	}
8899ed24f4bSMarc Zyngier 
89095e92e45SJulien Thierry 	init_irq_work(&vcpu->arch.pmu.overflow_work,
89195e92e45SJulien Thierry 		      kvm_pmu_perf_overflow_notify_vcpu);
89295e92e45SJulien Thierry 
8939ed24f4bSMarc Zyngier 	vcpu->arch.pmu.created = true;
8949ed24f4bSMarc Zyngier 	return 0;
8959ed24f4bSMarc Zyngier }
8969ed24f4bSMarc Zyngier 
8979ed24f4bSMarc Zyngier /*
8989ed24f4bSMarc Zyngier  * For one VM the interrupt type must be same for each vcpu.
8999ed24f4bSMarc Zyngier  * As a PPI, the interrupt number is the same for all vcpus,
9009ed24f4bSMarc Zyngier  * while as an SPI it must be a separate number per vcpu.
9019ed24f4bSMarc Zyngier  */
9029ed24f4bSMarc Zyngier static bool pmu_irq_is_valid(struct kvm *kvm, int irq)
9039ed24f4bSMarc Zyngier {
9049ed24f4bSMarc Zyngier 	int i;
9059ed24f4bSMarc Zyngier 	struct kvm_vcpu *vcpu;
9069ed24f4bSMarc Zyngier 
9079ed24f4bSMarc Zyngier 	kvm_for_each_vcpu(i, vcpu, kvm) {
9089ed24f4bSMarc Zyngier 		if (!kvm_arm_pmu_irq_initialized(vcpu))
9099ed24f4bSMarc Zyngier 			continue;
9109ed24f4bSMarc Zyngier 
9119ed24f4bSMarc Zyngier 		if (irq_is_ppi(irq)) {
9129ed24f4bSMarc Zyngier 			if (vcpu->arch.pmu.irq_num != irq)
9139ed24f4bSMarc Zyngier 				return false;
9149ed24f4bSMarc Zyngier 		} else {
9159ed24f4bSMarc Zyngier 			if (vcpu->arch.pmu.irq_num == irq)
9169ed24f4bSMarc Zyngier 				return false;
9179ed24f4bSMarc Zyngier 		}
9189ed24f4bSMarc Zyngier 	}
9199ed24f4bSMarc Zyngier 
9209ed24f4bSMarc Zyngier 	return true;
9219ed24f4bSMarc Zyngier }
9229ed24f4bSMarc Zyngier 
9239ed24f4bSMarc Zyngier int kvm_arm_pmu_v3_set_attr(struct kvm_vcpu *vcpu, struct kvm_device_attr *attr)
9249ed24f4bSMarc Zyngier {
92577da4303SMarc Zyngier 	if (!kvm_vcpu_has_pmu(vcpu))
92642223fb1SMarc Zyngier 		return -ENODEV;
92742223fb1SMarc Zyngier 
92842223fb1SMarc Zyngier 	if (vcpu->arch.pmu.created)
92942223fb1SMarc Zyngier 		return -EBUSY;
93042223fb1SMarc Zyngier 
931fd65a3b5SMarc Zyngier 	if (!vcpu->kvm->arch.pmuver)
932fd65a3b5SMarc Zyngier 		vcpu->kvm->arch.pmuver = kvm_pmu_probe_pmuver();
933fd65a3b5SMarc Zyngier 
9346fadc124SAnshuman Khandual 	if (vcpu->kvm->arch.pmuver == ID_AA64DFR0_PMUVER_IMP_DEF)
935fd65a3b5SMarc Zyngier 		return -ENODEV;
936fd65a3b5SMarc Zyngier 
9379ed24f4bSMarc Zyngier 	switch (attr->attr) {
9389ed24f4bSMarc Zyngier 	case KVM_ARM_VCPU_PMU_V3_IRQ: {
9399ed24f4bSMarc Zyngier 		int __user *uaddr = (int __user *)(long)attr->addr;
9409ed24f4bSMarc Zyngier 		int irq;
9419ed24f4bSMarc Zyngier 
9429ed24f4bSMarc Zyngier 		if (!irqchip_in_kernel(vcpu->kvm))
9439ed24f4bSMarc Zyngier 			return -EINVAL;
9449ed24f4bSMarc Zyngier 
9459ed24f4bSMarc Zyngier 		if (get_user(irq, uaddr))
9469ed24f4bSMarc Zyngier 			return -EFAULT;
9479ed24f4bSMarc Zyngier 
9489ed24f4bSMarc Zyngier 		/* The PMU overflow interrupt can be a PPI or a valid SPI. */
9499ed24f4bSMarc Zyngier 		if (!(irq_is_ppi(irq) || irq_is_spi(irq)))
9509ed24f4bSMarc Zyngier 			return -EINVAL;
9519ed24f4bSMarc Zyngier 
9529ed24f4bSMarc Zyngier 		if (!pmu_irq_is_valid(vcpu->kvm, irq))
9539ed24f4bSMarc Zyngier 			return -EINVAL;
9549ed24f4bSMarc Zyngier 
9559ed24f4bSMarc Zyngier 		if (kvm_arm_pmu_irq_initialized(vcpu))
9569ed24f4bSMarc Zyngier 			return -EBUSY;
9579ed24f4bSMarc Zyngier 
9589ed24f4bSMarc Zyngier 		kvm_debug("Set kvm ARM PMU irq: %d\n", irq);
9599ed24f4bSMarc Zyngier 		vcpu->arch.pmu.irq_num = irq;
9609ed24f4bSMarc Zyngier 		return 0;
9619ed24f4bSMarc Zyngier 	}
962d7eec236SMarc Zyngier 	case KVM_ARM_VCPU_PMU_V3_FILTER: {
963d7eec236SMarc Zyngier 		struct kvm_pmu_event_filter __user *uaddr;
964d7eec236SMarc Zyngier 		struct kvm_pmu_event_filter filter;
965d7eec236SMarc Zyngier 		int nr_events;
966d7eec236SMarc Zyngier 
967d7eec236SMarc Zyngier 		nr_events = kvm_pmu_event_mask(vcpu->kvm) + 1;
968d7eec236SMarc Zyngier 
969d7eec236SMarc Zyngier 		uaddr = (struct kvm_pmu_event_filter __user *)(long)attr->addr;
970d7eec236SMarc Zyngier 
971d7eec236SMarc Zyngier 		if (copy_from_user(&filter, uaddr, sizeof(filter)))
972d7eec236SMarc Zyngier 			return -EFAULT;
973d7eec236SMarc Zyngier 
974d7eec236SMarc Zyngier 		if (((u32)filter.base_event + filter.nevents) > nr_events ||
975d7eec236SMarc Zyngier 		    (filter.action != KVM_PMU_EVENT_ALLOW &&
976d7eec236SMarc Zyngier 		     filter.action != KVM_PMU_EVENT_DENY))
977d7eec236SMarc Zyngier 			return -EINVAL;
978d7eec236SMarc Zyngier 
979d7eec236SMarc Zyngier 		mutex_lock(&vcpu->kvm->lock);
980d7eec236SMarc Zyngier 
981d7eec236SMarc Zyngier 		if (!vcpu->kvm->arch.pmu_filter) {
982115bae92SJia He 			vcpu->kvm->arch.pmu_filter = bitmap_alloc(nr_events, GFP_KERNEL_ACCOUNT);
983d7eec236SMarc Zyngier 			if (!vcpu->kvm->arch.pmu_filter) {
984d7eec236SMarc Zyngier 				mutex_unlock(&vcpu->kvm->lock);
985d7eec236SMarc Zyngier 				return -ENOMEM;
986d7eec236SMarc Zyngier 			}
987d7eec236SMarc Zyngier 
988d7eec236SMarc Zyngier 			/*
989d7eec236SMarc Zyngier 			 * The default depends on the first applied filter.
990d7eec236SMarc Zyngier 			 * If it allows events, the default is to deny.
991d7eec236SMarc Zyngier 			 * Conversely, if the first filter denies a set of
992d7eec236SMarc Zyngier 			 * events, the default is to allow.
993d7eec236SMarc Zyngier 			 */
994d7eec236SMarc Zyngier 			if (filter.action == KVM_PMU_EVENT_ALLOW)
995d7eec236SMarc Zyngier 				bitmap_zero(vcpu->kvm->arch.pmu_filter, nr_events);
996d7eec236SMarc Zyngier 			else
997d7eec236SMarc Zyngier 				bitmap_fill(vcpu->kvm->arch.pmu_filter, nr_events);
998d7eec236SMarc Zyngier 		}
999d7eec236SMarc Zyngier 
1000d7eec236SMarc Zyngier 		if (filter.action == KVM_PMU_EVENT_ALLOW)
1001d7eec236SMarc Zyngier 			bitmap_set(vcpu->kvm->arch.pmu_filter, filter.base_event, filter.nevents);
1002d7eec236SMarc Zyngier 		else
1003d7eec236SMarc Zyngier 			bitmap_clear(vcpu->kvm->arch.pmu_filter, filter.base_event, filter.nevents);
1004d7eec236SMarc Zyngier 
1005d7eec236SMarc Zyngier 		mutex_unlock(&vcpu->kvm->lock);
1006d7eec236SMarc Zyngier 
1007d7eec236SMarc Zyngier 		return 0;
1008d7eec236SMarc Zyngier 	}
10099ed24f4bSMarc Zyngier 	case KVM_ARM_VCPU_PMU_V3_INIT:
10109ed24f4bSMarc Zyngier 		return kvm_arm_pmu_v3_init(vcpu);
10119ed24f4bSMarc Zyngier 	}
10129ed24f4bSMarc Zyngier 
10139ed24f4bSMarc Zyngier 	return -ENXIO;
10149ed24f4bSMarc Zyngier }
10159ed24f4bSMarc Zyngier 
10169ed24f4bSMarc Zyngier int kvm_arm_pmu_v3_get_attr(struct kvm_vcpu *vcpu, struct kvm_device_attr *attr)
10179ed24f4bSMarc Zyngier {
10189ed24f4bSMarc Zyngier 	switch (attr->attr) {
10199ed24f4bSMarc Zyngier 	case KVM_ARM_VCPU_PMU_V3_IRQ: {
10209ed24f4bSMarc Zyngier 		int __user *uaddr = (int __user *)(long)attr->addr;
10219ed24f4bSMarc Zyngier 		int irq;
10229ed24f4bSMarc Zyngier 
10239ed24f4bSMarc Zyngier 		if (!irqchip_in_kernel(vcpu->kvm))
10249ed24f4bSMarc Zyngier 			return -EINVAL;
10259ed24f4bSMarc Zyngier 
102614bda7a9SMarc Zyngier 		if (!kvm_vcpu_has_pmu(vcpu))
10279ed24f4bSMarc Zyngier 			return -ENODEV;
10289ed24f4bSMarc Zyngier 
10299ed24f4bSMarc Zyngier 		if (!kvm_arm_pmu_irq_initialized(vcpu))
10309ed24f4bSMarc Zyngier 			return -ENXIO;
10319ed24f4bSMarc Zyngier 
10329ed24f4bSMarc Zyngier 		irq = vcpu->arch.pmu.irq_num;
10339ed24f4bSMarc Zyngier 		return put_user(irq, uaddr);
10349ed24f4bSMarc Zyngier 	}
10359ed24f4bSMarc Zyngier 	}
10369ed24f4bSMarc Zyngier 
10379ed24f4bSMarc Zyngier 	return -ENXIO;
10389ed24f4bSMarc Zyngier }
10399ed24f4bSMarc Zyngier 
10409ed24f4bSMarc Zyngier int kvm_arm_pmu_v3_has_attr(struct kvm_vcpu *vcpu, struct kvm_device_attr *attr)
10419ed24f4bSMarc Zyngier {
10429ed24f4bSMarc Zyngier 	switch (attr->attr) {
10439ed24f4bSMarc Zyngier 	case KVM_ARM_VCPU_PMU_V3_IRQ:
10449ed24f4bSMarc Zyngier 	case KVM_ARM_VCPU_PMU_V3_INIT:
1045d7eec236SMarc Zyngier 	case KVM_ARM_VCPU_PMU_V3_FILTER:
104677da4303SMarc Zyngier 		if (kvm_vcpu_has_pmu(vcpu))
10479ed24f4bSMarc Zyngier 			return 0;
10489ed24f4bSMarc Zyngier 	}
10499ed24f4bSMarc Zyngier 
10509ed24f4bSMarc Zyngier 	return -ENXIO;
10519ed24f4bSMarc Zyngier }
1052