xref: /openbmc/linux/arch/arm64/kvm/pmu-emul.c (revision 06394531)
19ed24f4bSMarc Zyngier // SPDX-License-Identifier: GPL-2.0-only
29ed24f4bSMarc Zyngier /*
39ed24f4bSMarc Zyngier  * Copyright (C) 2015 Linaro Ltd.
49ed24f4bSMarc Zyngier  * Author: Shannon Zhao <shannon.zhao@linaro.org>
59ed24f4bSMarc Zyngier  */
69ed24f4bSMarc Zyngier 
79ed24f4bSMarc Zyngier #include <linux/cpu.h>
89ed24f4bSMarc Zyngier #include <linux/kvm.h>
99ed24f4bSMarc Zyngier #include <linux/kvm_host.h>
10db858060SAlexandru Elisei #include <linux/list.h>
119ed24f4bSMarc Zyngier #include <linux/perf_event.h>
129ed24f4bSMarc Zyngier #include <linux/perf/arm_pmu.h>
139ed24f4bSMarc Zyngier #include <linux/uaccess.h>
149ed24f4bSMarc Zyngier #include <asm/kvm_emulate.h>
159ed24f4bSMarc Zyngier #include <kvm/arm_pmu.h>
169ed24f4bSMarc Zyngier #include <kvm/arm_vgic.h>
179ed24f4bSMarc Zyngier 
18be399d82SSean Christopherson DEFINE_STATIC_KEY_FALSE(kvm_arm_pmu_available);
19be399d82SSean Christopherson 
20db858060SAlexandru Elisei static LIST_HEAD(arm_pmus);
21db858060SAlexandru Elisei static DEFINE_MUTEX(arm_pmus_lock);
22db858060SAlexandru Elisei 
239ed24f4bSMarc Zyngier static void kvm_pmu_create_perf_event(struct kvm_vcpu *vcpu, u64 select_idx);
249ed24f4bSMarc Zyngier static void kvm_pmu_update_pmc_chained(struct kvm_vcpu *vcpu, u64 select_idx);
259ed24f4bSMarc Zyngier static void kvm_pmu_stop_counter(struct kvm_vcpu *vcpu, struct kvm_pmc *pmc);
269ed24f4bSMarc Zyngier 
279ed24f4bSMarc Zyngier #define PERF_ATTR_CFG1_KVM_PMU_CHAINED 0x1
289ed24f4bSMarc Zyngier 
29fd65a3b5SMarc Zyngier static u32 kvm_pmu_event_mask(struct kvm *kvm)
30fd65a3b5SMarc Zyngier {
3146b18782SMarc Zyngier 	unsigned int pmuver;
3246b18782SMarc Zyngier 
3346b18782SMarc Zyngier 	pmuver = kvm->arch.arm_pmu->pmuver;
3446b18782SMarc Zyngier 
3546b18782SMarc Zyngier 	switch (pmuver) {
368e26d11fSMarc Zyngier 	case ID_AA64DFR0_PMUVER_8_0:
37fd65a3b5SMarc Zyngier 		return GENMASK(9, 0);
388e26d11fSMarc Zyngier 	case ID_AA64DFR0_PMUVER_8_1:
398e26d11fSMarc Zyngier 	case ID_AA64DFR0_PMUVER_8_4:
408e26d11fSMarc Zyngier 	case ID_AA64DFR0_PMUVER_8_5:
4100e228b3SMarc Zyngier 	case ID_AA64DFR0_PMUVER_8_7:
42fd65a3b5SMarc Zyngier 		return GENMASK(15, 0);
43fd65a3b5SMarc Zyngier 	default:		/* Shouldn't be here, just for sanity */
4446b18782SMarc Zyngier 		WARN_ONCE(1, "Unknown PMU version %d\n", pmuver);
45fd65a3b5SMarc Zyngier 		return 0;
46fd65a3b5SMarc Zyngier 	}
47fd65a3b5SMarc Zyngier }
48fd65a3b5SMarc Zyngier 
499ed24f4bSMarc Zyngier /**
509ed24f4bSMarc Zyngier  * kvm_pmu_idx_is_64bit - determine if select_idx is a 64bit counter
519ed24f4bSMarc Zyngier  * @vcpu: The vcpu pointer
529ed24f4bSMarc Zyngier  * @select_idx: The counter index
539ed24f4bSMarc Zyngier  */
549ed24f4bSMarc Zyngier static bool kvm_pmu_idx_is_64bit(struct kvm_vcpu *vcpu, u64 select_idx)
559ed24f4bSMarc Zyngier {
569ed24f4bSMarc Zyngier 	return (select_idx == ARMV8_PMU_CYCLE_IDX &&
579ed24f4bSMarc Zyngier 		__vcpu_sys_reg(vcpu, PMCR_EL0) & ARMV8_PMU_PMCR_LC);
589ed24f4bSMarc Zyngier }
599ed24f4bSMarc Zyngier 
609ed24f4bSMarc Zyngier static struct kvm_vcpu *kvm_pmc_to_vcpu(struct kvm_pmc *pmc)
619ed24f4bSMarc Zyngier {
629ed24f4bSMarc Zyngier 	struct kvm_pmu *pmu;
639ed24f4bSMarc Zyngier 	struct kvm_vcpu_arch *vcpu_arch;
649ed24f4bSMarc Zyngier 
659ed24f4bSMarc Zyngier 	pmc -= pmc->idx;
669ed24f4bSMarc Zyngier 	pmu = container_of(pmc, struct kvm_pmu, pmc[0]);
679ed24f4bSMarc Zyngier 	vcpu_arch = container_of(pmu, struct kvm_vcpu_arch, pmu);
689ed24f4bSMarc Zyngier 	return container_of(vcpu_arch, struct kvm_vcpu, arch);
699ed24f4bSMarc Zyngier }
709ed24f4bSMarc Zyngier 
719ed24f4bSMarc Zyngier /**
729ed24f4bSMarc Zyngier  * kvm_pmu_pmc_is_chained - determine if the pmc is chained
739ed24f4bSMarc Zyngier  * @pmc: The PMU counter pointer
749ed24f4bSMarc Zyngier  */
759ed24f4bSMarc Zyngier static bool kvm_pmu_pmc_is_chained(struct kvm_pmc *pmc)
769ed24f4bSMarc Zyngier {
779ed24f4bSMarc Zyngier 	struct kvm_vcpu *vcpu = kvm_pmc_to_vcpu(pmc);
789ed24f4bSMarc Zyngier 
799ed24f4bSMarc Zyngier 	return test_bit(pmc->idx >> 1, vcpu->arch.pmu.chained);
809ed24f4bSMarc Zyngier }
819ed24f4bSMarc Zyngier 
829ed24f4bSMarc Zyngier /**
839ed24f4bSMarc Zyngier  * kvm_pmu_idx_is_high_counter - determine if select_idx is a high/low counter
849ed24f4bSMarc Zyngier  * @select_idx: The counter index
859ed24f4bSMarc Zyngier  */
869ed24f4bSMarc Zyngier static bool kvm_pmu_idx_is_high_counter(u64 select_idx)
879ed24f4bSMarc Zyngier {
889ed24f4bSMarc Zyngier 	return select_idx & 0x1;
899ed24f4bSMarc Zyngier }
909ed24f4bSMarc Zyngier 
919ed24f4bSMarc Zyngier /**
929ed24f4bSMarc Zyngier  * kvm_pmu_get_canonical_pmc - obtain the canonical pmc
939ed24f4bSMarc Zyngier  * @pmc: The PMU counter pointer
949ed24f4bSMarc Zyngier  *
959ed24f4bSMarc Zyngier  * When a pair of PMCs are chained together we use the low counter (canonical)
969ed24f4bSMarc Zyngier  * to hold the underlying perf event.
979ed24f4bSMarc Zyngier  */
989ed24f4bSMarc Zyngier static struct kvm_pmc *kvm_pmu_get_canonical_pmc(struct kvm_pmc *pmc)
999ed24f4bSMarc Zyngier {
1009ed24f4bSMarc Zyngier 	if (kvm_pmu_pmc_is_chained(pmc) &&
1019ed24f4bSMarc Zyngier 	    kvm_pmu_idx_is_high_counter(pmc->idx))
1029ed24f4bSMarc Zyngier 		return pmc - 1;
1039ed24f4bSMarc Zyngier 
1049ed24f4bSMarc Zyngier 	return pmc;
1059ed24f4bSMarc Zyngier }
1069ed24f4bSMarc Zyngier static struct kvm_pmc *kvm_pmu_get_alternate_pmc(struct kvm_pmc *pmc)
1079ed24f4bSMarc Zyngier {
1089ed24f4bSMarc Zyngier 	if (kvm_pmu_idx_is_high_counter(pmc->idx))
1099ed24f4bSMarc Zyngier 		return pmc - 1;
1109ed24f4bSMarc Zyngier 	else
1119ed24f4bSMarc Zyngier 		return pmc + 1;
1129ed24f4bSMarc Zyngier }
1139ed24f4bSMarc Zyngier 
1149ed24f4bSMarc Zyngier /**
1159ed24f4bSMarc Zyngier  * kvm_pmu_idx_has_chain_evtype - determine if the event type is chain
1169ed24f4bSMarc Zyngier  * @vcpu: The vcpu pointer
1179ed24f4bSMarc Zyngier  * @select_idx: The counter index
1189ed24f4bSMarc Zyngier  */
1199ed24f4bSMarc Zyngier static bool kvm_pmu_idx_has_chain_evtype(struct kvm_vcpu *vcpu, u64 select_idx)
1209ed24f4bSMarc Zyngier {
1219ed24f4bSMarc Zyngier 	u64 eventsel, reg;
1229ed24f4bSMarc Zyngier 
1239ed24f4bSMarc Zyngier 	select_idx |= 0x1;
1249ed24f4bSMarc Zyngier 
1259ed24f4bSMarc Zyngier 	if (select_idx == ARMV8_PMU_CYCLE_IDX)
1269ed24f4bSMarc Zyngier 		return false;
1279ed24f4bSMarc Zyngier 
1289ed24f4bSMarc Zyngier 	reg = PMEVTYPER0_EL0 + select_idx;
129fd65a3b5SMarc Zyngier 	eventsel = __vcpu_sys_reg(vcpu, reg) & kvm_pmu_event_mask(vcpu->kvm);
1309ed24f4bSMarc Zyngier 
1319ed24f4bSMarc Zyngier 	return eventsel == ARMV8_PMUV3_PERFCTR_CHAIN;
1329ed24f4bSMarc Zyngier }
1339ed24f4bSMarc Zyngier 
1349ed24f4bSMarc Zyngier /**
1359ed24f4bSMarc Zyngier  * kvm_pmu_get_pair_counter_value - get PMU counter value
1369ed24f4bSMarc Zyngier  * @vcpu: The vcpu pointer
1379ed24f4bSMarc Zyngier  * @pmc: The PMU counter pointer
1389ed24f4bSMarc Zyngier  */
1399ed24f4bSMarc Zyngier static u64 kvm_pmu_get_pair_counter_value(struct kvm_vcpu *vcpu,
1409ed24f4bSMarc Zyngier 					  struct kvm_pmc *pmc)
1419ed24f4bSMarc Zyngier {
1429ed24f4bSMarc Zyngier 	u64 counter, counter_high, reg, enabled, running;
1439ed24f4bSMarc Zyngier 
1449ed24f4bSMarc Zyngier 	if (kvm_pmu_pmc_is_chained(pmc)) {
1459ed24f4bSMarc Zyngier 		pmc = kvm_pmu_get_canonical_pmc(pmc);
1469ed24f4bSMarc Zyngier 		reg = PMEVCNTR0_EL0 + pmc->idx;
1479ed24f4bSMarc Zyngier 
1489ed24f4bSMarc Zyngier 		counter = __vcpu_sys_reg(vcpu, reg);
1499ed24f4bSMarc Zyngier 		counter_high = __vcpu_sys_reg(vcpu, reg + 1);
1509ed24f4bSMarc Zyngier 
1519ed24f4bSMarc Zyngier 		counter = lower_32_bits(counter) | (counter_high << 32);
1529ed24f4bSMarc Zyngier 	} else {
1539ed24f4bSMarc Zyngier 		reg = (pmc->idx == ARMV8_PMU_CYCLE_IDX)
1549ed24f4bSMarc Zyngier 		      ? PMCCNTR_EL0 : PMEVCNTR0_EL0 + pmc->idx;
1559ed24f4bSMarc Zyngier 		counter = __vcpu_sys_reg(vcpu, reg);
1569ed24f4bSMarc Zyngier 	}
1579ed24f4bSMarc Zyngier 
1589ed24f4bSMarc Zyngier 	/*
1599ed24f4bSMarc Zyngier 	 * The real counter value is equal to the value of counter register plus
1609ed24f4bSMarc Zyngier 	 * the value perf event counts.
1619ed24f4bSMarc Zyngier 	 */
1629ed24f4bSMarc Zyngier 	if (pmc->perf_event)
1639ed24f4bSMarc Zyngier 		counter += perf_event_read_value(pmc->perf_event, &enabled,
1649ed24f4bSMarc Zyngier 						 &running);
1659ed24f4bSMarc Zyngier 
1669ed24f4bSMarc Zyngier 	return counter;
1679ed24f4bSMarc Zyngier }
1689ed24f4bSMarc Zyngier 
1699ed24f4bSMarc Zyngier /**
1709ed24f4bSMarc Zyngier  * kvm_pmu_get_counter_value - get PMU counter value
1719ed24f4bSMarc Zyngier  * @vcpu: The vcpu pointer
1729ed24f4bSMarc Zyngier  * @select_idx: The counter index
1739ed24f4bSMarc Zyngier  */
1749ed24f4bSMarc Zyngier u64 kvm_pmu_get_counter_value(struct kvm_vcpu *vcpu, u64 select_idx)
1759ed24f4bSMarc Zyngier {
1769ed24f4bSMarc Zyngier 	u64 counter;
1779ed24f4bSMarc Zyngier 	struct kvm_pmu *pmu = &vcpu->arch.pmu;
1789ed24f4bSMarc Zyngier 	struct kvm_pmc *pmc = &pmu->pmc[select_idx];
1799ed24f4bSMarc Zyngier 
1809ed24f4bSMarc Zyngier 	counter = kvm_pmu_get_pair_counter_value(vcpu, pmc);
1819ed24f4bSMarc Zyngier 
1829ed24f4bSMarc Zyngier 	if (kvm_pmu_pmc_is_chained(pmc) &&
1839ed24f4bSMarc Zyngier 	    kvm_pmu_idx_is_high_counter(select_idx))
1849ed24f4bSMarc Zyngier 		counter = upper_32_bits(counter);
1859ed24f4bSMarc Zyngier 	else if (select_idx != ARMV8_PMU_CYCLE_IDX)
1869ed24f4bSMarc Zyngier 		counter = lower_32_bits(counter);
1879ed24f4bSMarc Zyngier 
1889ed24f4bSMarc Zyngier 	return counter;
1899ed24f4bSMarc Zyngier }
1909ed24f4bSMarc Zyngier 
1919ed24f4bSMarc Zyngier /**
1929ed24f4bSMarc Zyngier  * kvm_pmu_set_counter_value - set PMU counter value
1939ed24f4bSMarc Zyngier  * @vcpu: The vcpu pointer
1949ed24f4bSMarc Zyngier  * @select_idx: The counter index
1959ed24f4bSMarc Zyngier  * @val: The counter value
1969ed24f4bSMarc Zyngier  */
1979ed24f4bSMarc Zyngier void kvm_pmu_set_counter_value(struct kvm_vcpu *vcpu, u64 select_idx, u64 val)
1989ed24f4bSMarc Zyngier {
1999ed24f4bSMarc Zyngier 	u64 reg;
2009ed24f4bSMarc Zyngier 
2019ed24f4bSMarc Zyngier 	reg = (select_idx == ARMV8_PMU_CYCLE_IDX)
2029ed24f4bSMarc Zyngier 	      ? PMCCNTR_EL0 : PMEVCNTR0_EL0 + select_idx;
2039ed24f4bSMarc Zyngier 	__vcpu_sys_reg(vcpu, reg) += (s64)val - kvm_pmu_get_counter_value(vcpu, select_idx);
2049ed24f4bSMarc Zyngier 
2059ed24f4bSMarc Zyngier 	/* Recreate the perf event to reflect the updated sample_period */
2069ed24f4bSMarc Zyngier 	kvm_pmu_create_perf_event(vcpu, select_idx);
2079ed24f4bSMarc Zyngier }
2089ed24f4bSMarc Zyngier 
2099ed24f4bSMarc Zyngier /**
2109ed24f4bSMarc Zyngier  * kvm_pmu_release_perf_event - remove the perf event
2119ed24f4bSMarc Zyngier  * @pmc: The PMU counter pointer
2129ed24f4bSMarc Zyngier  */
2139ed24f4bSMarc Zyngier static void kvm_pmu_release_perf_event(struct kvm_pmc *pmc)
2149ed24f4bSMarc Zyngier {
2159ed24f4bSMarc Zyngier 	pmc = kvm_pmu_get_canonical_pmc(pmc);
2169ed24f4bSMarc Zyngier 	if (pmc->perf_event) {
2179ed24f4bSMarc Zyngier 		perf_event_disable(pmc->perf_event);
2189ed24f4bSMarc Zyngier 		perf_event_release_kernel(pmc->perf_event);
2199ed24f4bSMarc Zyngier 		pmc->perf_event = NULL;
2209ed24f4bSMarc Zyngier 	}
2219ed24f4bSMarc Zyngier }
2229ed24f4bSMarc Zyngier 
2239ed24f4bSMarc Zyngier /**
2249ed24f4bSMarc Zyngier  * kvm_pmu_stop_counter - stop PMU counter
2259ed24f4bSMarc Zyngier  * @pmc: The PMU counter pointer
2269ed24f4bSMarc Zyngier  *
2279ed24f4bSMarc Zyngier  * If this counter has been configured to monitor some event, release it here.
2289ed24f4bSMarc Zyngier  */
2299ed24f4bSMarc Zyngier static void kvm_pmu_stop_counter(struct kvm_vcpu *vcpu, struct kvm_pmc *pmc)
2309ed24f4bSMarc Zyngier {
2319ed24f4bSMarc Zyngier 	u64 counter, reg, val;
2329ed24f4bSMarc Zyngier 
2339ed24f4bSMarc Zyngier 	pmc = kvm_pmu_get_canonical_pmc(pmc);
2349ed24f4bSMarc Zyngier 	if (!pmc->perf_event)
2359ed24f4bSMarc Zyngier 		return;
2369ed24f4bSMarc Zyngier 
2379ed24f4bSMarc Zyngier 	counter = kvm_pmu_get_pair_counter_value(vcpu, pmc);
2389ed24f4bSMarc Zyngier 
2399ed24f4bSMarc Zyngier 	if (pmc->idx == ARMV8_PMU_CYCLE_IDX) {
2409ed24f4bSMarc Zyngier 		reg = PMCCNTR_EL0;
2419ed24f4bSMarc Zyngier 		val = counter;
2429ed24f4bSMarc Zyngier 	} else {
2439ed24f4bSMarc Zyngier 		reg = PMEVCNTR0_EL0 + pmc->idx;
2449ed24f4bSMarc Zyngier 		val = lower_32_bits(counter);
2459ed24f4bSMarc Zyngier 	}
2469ed24f4bSMarc Zyngier 
2479ed24f4bSMarc Zyngier 	__vcpu_sys_reg(vcpu, reg) = val;
2489ed24f4bSMarc Zyngier 
2499ed24f4bSMarc Zyngier 	if (kvm_pmu_pmc_is_chained(pmc))
2509ed24f4bSMarc Zyngier 		__vcpu_sys_reg(vcpu, reg + 1) = upper_32_bits(counter);
2519ed24f4bSMarc Zyngier 
2529ed24f4bSMarc Zyngier 	kvm_pmu_release_perf_event(pmc);
2539ed24f4bSMarc Zyngier }
2549ed24f4bSMarc Zyngier 
2559ed24f4bSMarc Zyngier /**
2569ed24f4bSMarc Zyngier  * kvm_pmu_vcpu_init - assign pmu counter idx for cpu
2579ed24f4bSMarc Zyngier  * @vcpu: The vcpu pointer
2589ed24f4bSMarc Zyngier  *
2599ed24f4bSMarc Zyngier  */
2609ed24f4bSMarc Zyngier void kvm_pmu_vcpu_init(struct kvm_vcpu *vcpu)
2619ed24f4bSMarc Zyngier {
2629ed24f4bSMarc Zyngier 	int i;
2639ed24f4bSMarc Zyngier 	struct kvm_pmu *pmu = &vcpu->arch.pmu;
2649ed24f4bSMarc Zyngier 
2659ed24f4bSMarc Zyngier 	for (i = 0; i < ARMV8_PMU_MAX_COUNTERS; i++)
2669ed24f4bSMarc Zyngier 		pmu->pmc[i].idx = i;
2679ed24f4bSMarc Zyngier }
2689ed24f4bSMarc Zyngier 
2699ed24f4bSMarc Zyngier /**
2709ed24f4bSMarc Zyngier  * kvm_pmu_vcpu_reset - reset pmu state for cpu
2719ed24f4bSMarc Zyngier  * @vcpu: The vcpu pointer
2729ed24f4bSMarc Zyngier  *
2739ed24f4bSMarc Zyngier  */
2749ed24f4bSMarc Zyngier void kvm_pmu_vcpu_reset(struct kvm_vcpu *vcpu)
2759ed24f4bSMarc Zyngier {
2769ed24f4bSMarc Zyngier 	unsigned long mask = kvm_pmu_valid_counter_mask(vcpu);
2779ed24f4bSMarc Zyngier 	struct kvm_pmu *pmu = &vcpu->arch.pmu;
2789ed24f4bSMarc Zyngier 	int i;
2799ed24f4bSMarc Zyngier 
2809ed24f4bSMarc Zyngier 	for_each_set_bit(i, &mask, 32)
2819ed24f4bSMarc Zyngier 		kvm_pmu_stop_counter(vcpu, &pmu->pmc[i]);
2829ed24f4bSMarc Zyngier 
2839ed24f4bSMarc Zyngier 	bitmap_zero(vcpu->arch.pmu.chained, ARMV8_PMU_MAX_COUNTER_PAIRS);
2849ed24f4bSMarc Zyngier }
2859ed24f4bSMarc Zyngier 
2869ed24f4bSMarc Zyngier /**
2879ed24f4bSMarc Zyngier  * kvm_pmu_vcpu_destroy - free perf event of PMU for cpu
2889ed24f4bSMarc Zyngier  * @vcpu: The vcpu pointer
2899ed24f4bSMarc Zyngier  *
2909ed24f4bSMarc Zyngier  */
2919ed24f4bSMarc Zyngier void kvm_pmu_vcpu_destroy(struct kvm_vcpu *vcpu)
2929ed24f4bSMarc Zyngier {
2939ed24f4bSMarc Zyngier 	int i;
2949ed24f4bSMarc Zyngier 	struct kvm_pmu *pmu = &vcpu->arch.pmu;
2959ed24f4bSMarc Zyngier 
2969ed24f4bSMarc Zyngier 	for (i = 0; i < ARMV8_PMU_MAX_COUNTERS; i++)
2979ed24f4bSMarc Zyngier 		kvm_pmu_release_perf_event(&pmu->pmc[i]);
29895e92e45SJulien Thierry 	irq_work_sync(&vcpu->arch.pmu.overflow_work);
2999ed24f4bSMarc Zyngier }
3009ed24f4bSMarc Zyngier 
3019ed24f4bSMarc Zyngier u64 kvm_pmu_valid_counter_mask(struct kvm_vcpu *vcpu)
3029ed24f4bSMarc Zyngier {
3039ed24f4bSMarc Zyngier 	u64 val = __vcpu_sys_reg(vcpu, PMCR_EL0) >> ARMV8_PMU_PMCR_N_SHIFT;
3049ed24f4bSMarc Zyngier 
3059ed24f4bSMarc Zyngier 	val &= ARMV8_PMU_PMCR_N_MASK;
3069ed24f4bSMarc Zyngier 	if (val == 0)
3079ed24f4bSMarc Zyngier 		return BIT(ARMV8_PMU_CYCLE_IDX);
3089ed24f4bSMarc Zyngier 	else
3099ed24f4bSMarc Zyngier 		return GENMASK(val - 1, 0) | BIT(ARMV8_PMU_CYCLE_IDX);
3109ed24f4bSMarc Zyngier }
3119ed24f4bSMarc Zyngier 
3129ed24f4bSMarc Zyngier /**
3139ed24f4bSMarc Zyngier  * kvm_pmu_enable_counter_mask - enable selected PMU counters
3149ed24f4bSMarc Zyngier  * @vcpu: The vcpu pointer
3159ed24f4bSMarc Zyngier  * @val: the value guest writes to PMCNTENSET register
3169ed24f4bSMarc Zyngier  *
3179ed24f4bSMarc Zyngier  * Call perf_event_enable to start counting the perf event
3189ed24f4bSMarc Zyngier  */
3199ed24f4bSMarc Zyngier void kvm_pmu_enable_counter_mask(struct kvm_vcpu *vcpu, u64 val)
3209ed24f4bSMarc Zyngier {
3219ed24f4bSMarc Zyngier 	int i;
3229ed24f4bSMarc Zyngier 	struct kvm_pmu *pmu = &vcpu->arch.pmu;
3239ed24f4bSMarc Zyngier 	struct kvm_pmc *pmc;
3249ed24f4bSMarc Zyngier 
3259ed24f4bSMarc Zyngier 	if (!(__vcpu_sys_reg(vcpu, PMCR_EL0) & ARMV8_PMU_PMCR_E) || !val)
3269ed24f4bSMarc Zyngier 		return;
3279ed24f4bSMarc Zyngier 
3289ed24f4bSMarc Zyngier 	for (i = 0; i < ARMV8_PMU_MAX_COUNTERS; i++) {
3299ed24f4bSMarc Zyngier 		if (!(val & BIT(i)))
3309ed24f4bSMarc Zyngier 			continue;
3319ed24f4bSMarc Zyngier 
3329ed24f4bSMarc Zyngier 		pmc = &pmu->pmc[i];
3339ed24f4bSMarc Zyngier 
3349ed24f4bSMarc Zyngier 		/* A change in the enable state may affect the chain state */
3359ed24f4bSMarc Zyngier 		kvm_pmu_update_pmc_chained(vcpu, i);
3369ed24f4bSMarc Zyngier 		kvm_pmu_create_perf_event(vcpu, i);
3379ed24f4bSMarc Zyngier 
3389ed24f4bSMarc Zyngier 		/* At this point, pmc must be the canonical */
3399ed24f4bSMarc Zyngier 		if (pmc->perf_event) {
3409ed24f4bSMarc Zyngier 			perf_event_enable(pmc->perf_event);
3419ed24f4bSMarc Zyngier 			if (pmc->perf_event->state != PERF_EVENT_STATE_ACTIVE)
3429ed24f4bSMarc Zyngier 				kvm_debug("fail to enable perf event\n");
3439ed24f4bSMarc Zyngier 		}
3449ed24f4bSMarc Zyngier 	}
3459ed24f4bSMarc Zyngier }
3469ed24f4bSMarc Zyngier 
3479ed24f4bSMarc Zyngier /**
3489ed24f4bSMarc Zyngier  * kvm_pmu_disable_counter_mask - disable selected PMU counters
3499ed24f4bSMarc Zyngier  * @vcpu: The vcpu pointer
3509ed24f4bSMarc Zyngier  * @val: the value guest writes to PMCNTENCLR register
3519ed24f4bSMarc Zyngier  *
3529ed24f4bSMarc Zyngier  * Call perf_event_disable to stop counting the perf event
3539ed24f4bSMarc Zyngier  */
3549ed24f4bSMarc Zyngier void kvm_pmu_disable_counter_mask(struct kvm_vcpu *vcpu, u64 val)
3559ed24f4bSMarc Zyngier {
3569ed24f4bSMarc Zyngier 	int i;
3579ed24f4bSMarc Zyngier 	struct kvm_pmu *pmu = &vcpu->arch.pmu;
3589ed24f4bSMarc Zyngier 	struct kvm_pmc *pmc;
3599ed24f4bSMarc Zyngier 
3609ed24f4bSMarc Zyngier 	if (!val)
3619ed24f4bSMarc Zyngier 		return;
3629ed24f4bSMarc Zyngier 
3639ed24f4bSMarc Zyngier 	for (i = 0; i < ARMV8_PMU_MAX_COUNTERS; i++) {
3649ed24f4bSMarc Zyngier 		if (!(val & BIT(i)))
3659ed24f4bSMarc Zyngier 			continue;
3669ed24f4bSMarc Zyngier 
3679ed24f4bSMarc Zyngier 		pmc = &pmu->pmc[i];
3689ed24f4bSMarc Zyngier 
3699ed24f4bSMarc Zyngier 		/* A change in the enable state may affect the chain state */
3709ed24f4bSMarc Zyngier 		kvm_pmu_update_pmc_chained(vcpu, i);
3719ed24f4bSMarc Zyngier 		kvm_pmu_create_perf_event(vcpu, i);
3729ed24f4bSMarc Zyngier 
3739ed24f4bSMarc Zyngier 		/* At this point, pmc must be the canonical */
3749ed24f4bSMarc Zyngier 		if (pmc->perf_event)
3759ed24f4bSMarc Zyngier 			perf_event_disable(pmc->perf_event);
3769ed24f4bSMarc Zyngier 	}
3779ed24f4bSMarc Zyngier }
3789ed24f4bSMarc Zyngier 
3799ed24f4bSMarc Zyngier static u64 kvm_pmu_overflow_status(struct kvm_vcpu *vcpu)
3809ed24f4bSMarc Zyngier {
3819ed24f4bSMarc Zyngier 	u64 reg = 0;
3829ed24f4bSMarc Zyngier 
3839ed24f4bSMarc Zyngier 	if ((__vcpu_sys_reg(vcpu, PMCR_EL0) & ARMV8_PMU_PMCR_E)) {
3849ed24f4bSMarc Zyngier 		reg = __vcpu_sys_reg(vcpu, PMOVSSET_EL0);
3859ed24f4bSMarc Zyngier 		reg &= __vcpu_sys_reg(vcpu, PMCNTENSET_EL0);
3869ed24f4bSMarc Zyngier 		reg &= __vcpu_sys_reg(vcpu, PMINTENSET_EL1);
3879ed24f4bSMarc Zyngier 	}
3889ed24f4bSMarc Zyngier 
3899ed24f4bSMarc Zyngier 	return reg;
3909ed24f4bSMarc Zyngier }
3919ed24f4bSMarc Zyngier 
3929ed24f4bSMarc Zyngier static void kvm_pmu_update_state(struct kvm_vcpu *vcpu)
3939ed24f4bSMarc Zyngier {
3949ed24f4bSMarc Zyngier 	struct kvm_pmu *pmu = &vcpu->arch.pmu;
3959ed24f4bSMarc Zyngier 	bool overflow;
3969ed24f4bSMarc Zyngier 
39746acf89dSMarc Zyngier 	if (!kvm_vcpu_has_pmu(vcpu))
3989ed24f4bSMarc Zyngier 		return;
3999ed24f4bSMarc Zyngier 
4009ed24f4bSMarc Zyngier 	overflow = !!kvm_pmu_overflow_status(vcpu);
4019ed24f4bSMarc Zyngier 	if (pmu->irq_level == overflow)
4029ed24f4bSMarc Zyngier 		return;
4039ed24f4bSMarc Zyngier 
4049ed24f4bSMarc Zyngier 	pmu->irq_level = overflow;
4059ed24f4bSMarc Zyngier 
4069ed24f4bSMarc Zyngier 	if (likely(irqchip_in_kernel(vcpu->kvm))) {
4079ed24f4bSMarc Zyngier 		int ret = kvm_vgic_inject_irq(vcpu->kvm, vcpu->vcpu_id,
4089ed24f4bSMarc Zyngier 					      pmu->irq_num, overflow, pmu);
4099ed24f4bSMarc Zyngier 		WARN_ON(ret);
4109ed24f4bSMarc Zyngier 	}
4119ed24f4bSMarc Zyngier }
4129ed24f4bSMarc Zyngier 
4139ed24f4bSMarc Zyngier bool kvm_pmu_should_notify_user(struct kvm_vcpu *vcpu)
4149ed24f4bSMarc Zyngier {
4159ed24f4bSMarc Zyngier 	struct kvm_pmu *pmu = &vcpu->arch.pmu;
4169ed24f4bSMarc Zyngier 	struct kvm_sync_regs *sregs = &vcpu->run->s.regs;
4179ed24f4bSMarc Zyngier 	bool run_level = sregs->device_irq_level & KVM_ARM_DEV_PMU;
4189ed24f4bSMarc Zyngier 
4199ed24f4bSMarc Zyngier 	if (likely(irqchip_in_kernel(vcpu->kvm)))
4209ed24f4bSMarc Zyngier 		return false;
4219ed24f4bSMarc Zyngier 
4229ed24f4bSMarc Zyngier 	return pmu->irq_level != run_level;
4239ed24f4bSMarc Zyngier }
4249ed24f4bSMarc Zyngier 
4259ed24f4bSMarc Zyngier /*
4269ed24f4bSMarc Zyngier  * Reflect the PMU overflow interrupt output level into the kvm_run structure
4279ed24f4bSMarc Zyngier  */
4289ed24f4bSMarc Zyngier void kvm_pmu_update_run(struct kvm_vcpu *vcpu)
4299ed24f4bSMarc Zyngier {
4309ed24f4bSMarc Zyngier 	struct kvm_sync_regs *regs = &vcpu->run->s.regs;
4319ed24f4bSMarc Zyngier 
4329ed24f4bSMarc Zyngier 	/* Populate the timer bitmap for user space */
4339ed24f4bSMarc Zyngier 	regs->device_irq_level &= ~KVM_ARM_DEV_PMU;
4349ed24f4bSMarc Zyngier 	if (vcpu->arch.pmu.irq_level)
4359ed24f4bSMarc Zyngier 		regs->device_irq_level |= KVM_ARM_DEV_PMU;
4369ed24f4bSMarc Zyngier }
4379ed24f4bSMarc Zyngier 
4389ed24f4bSMarc Zyngier /**
4399ed24f4bSMarc Zyngier  * kvm_pmu_flush_hwstate - flush pmu state to cpu
4409ed24f4bSMarc Zyngier  * @vcpu: The vcpu pointer
4419ed24f4bSMarc Zyngier  *
4429ed24f4bSMarc Zyngier  * Check if the PMU has overflowed while we were running in the host, and inject
4439ed24f4bSMarc Zyngier  * an interrupt if that was the case.
4449ed24f4bSMarc Zyngier  */
4459ed24f4bSMarc Zyngier void kvm_pmu_flush_hwstate(struct kvm_vcpu *vcpu)
4469ed24f4bSMarc Zyngier {
4479ed24f4bSMarc Zyngier 	kvm_pmu_update_state(vcpu);
4489ed24f4bSMarc Zyngier }
4499ed24f4bSMarc Zyngier 
4509ed24f4bSMarc Zyngier /**
4519ed24f4bSMarc Zyngier  * kvm_pmu_sync_hwstate - sync pmu state from cpu
4529ed24f4bSMarc Zyngier  * @vcpu: The vcpu pointer
4539ed24f4bSMarc Zyngier  *
4549ed24f4bSMarc Zyngier  * Check if the PMU has overflowed while we were running in the guest, and
4559ed24f4bSMarc Zyngier  * inject an interrupt if that was the case.
4569ed24f4bSMarc Zyngier  */
4579ed24f4bSMarc Zyngier void kvm_pmu_sync_hwstate(struct kvm_vcpu *vcpu)
4589ed24f4bSMarc Zyngier {
4599ed24f4bSMarc Zyngier 	kvm_pmu_update_state(vcpu);
4609ed24f4bSMarc Zyngier }
4619ed24f4bSMarc Zyngier 
4629ed24f4bSMarc Zyngier /**
46395e92e45SJulien Thierry  * When perf interrupt is an NMI, we cannot safely notify the vcpu corresponding
46495e92e45SJulien Thierry  * to the event.
46595e92e45SJulien Thierry  * This is why we need a callback to do it once outside of the NMI context.
46695e92e45SJulien Thierry  */
46795e92e45SJulien Thierry static void kvm_pmu_perf_overflow_notify_vcpu(struct irq_work *work)
46895e92e45SJulien Thierry {
46995e92e45SJulien Thierry 	struct kvm_vcpu *vcpu;
47095e92e45SJulien Thierry 	struct kvm_pmu *pmu;
47195e92e45SJulien Thierry 
47295e92e45SJulien Thierry 	pmu = container_of(work, struct kvm_pmu, overflow_work);
47395e92e45SJulien Thierry 	vcpu = kvm_pmc_to_vcpu(pmu->pmc);
47495e92e45SJulien Thierry 
47595e92e45SJulien Thierry 	kvm_vcpu_kick(vcpu);
47695e92e45SJulien Thierry }
47795e92e45SJulien Thierry 
47895e92e45SJulien Thierry /**
4799ed24f4bSMarc Zyngier  * When the perf event overflows, set the overflow status and inform the vcpu.
4809ed24f4bSMarc Zyngier  */
4819ed24f4bSMarc Zyngier static void kvm_pmu_perf_overflow(struct perf_event *perf_event,
4829ed24f4bSMarc Zyngier 				  struct perf_sample_data *data,
4839ed24f4bSMarc Zyngier 				  struct pt_regs *regs)
4849ed24f4bSMarc Zyngier {
4859ed24f4bSMarc Zyngier 	struct kvm_pmc *pmc = perf_event->overflow_handler_context;
4869ed24f4bSMarc Zyngier 	struct arm_pmu *cpu_pmu = to_arm_pmu(perf_event->pmu);
4879ed24f4bSMarc Zyngier 	struct kvm_vcpu *vcpu = kvm_pmc_to_vcpu(pmc);
4889ed24f4bSMarc Zyngier 	int idx = pmc->idx;
4899ed24f4bSMarc Zyngier 	u64 period;
4909ed24f4bSMarc Zyngier 
4919ed24f4bSMarc Zyngier 	cpu_pmu->pmu.stop(perf_event, PERF_EF_UPDATE);
4929ed24f4bSMarc Zyngier 
4939ed24f4bSMarc Zyngier 	/*
4949ed24f4bSMarc Zyngier 	 * Reset the sample period to the architectural limit,
4959ed24f4bSMarc Zyngier 	 * i.e. the point where the counter overflows.
4969ed24f4bSMarc Zyngier 	 */
4979ed24f4bSMarc Zyngier 	period = -(local64_read(&perf_event->count));
4989ed24f4bSMarc Zyngier 
4999ed24f4bSMarc Zyngier 	if (!kvm_pmu_idx_is_64bit(vcpu, pmc->idx))
5009ed24f4bSMarc Zyngier 		period &= GENMASK(31, 0);
5019ed24f4bSMarc Zyngier 
5029ed24f4bSMarc Zyngier 	local64_set(&perf_event->hw.period_left, 0);
5039ed24f4bSMarc Zyngier 	perf_event->attr.sample_period = period;
5049ed24f4bSMarc Zyngier 	perf_event->hw.sample_period = period;
5059ed24f4bSMarc Zyngier 
5069ed24f4bSMarc Zyngier 	__vcpu_sys_reg(vcpu, PMOVSSET_EL0) |= BIT(idx);
5079ed24f4bSMarc Zyngier 
5089ed24f4bSMarc Zyngier 	if (kvm_pmu_overflow_status(vcpu)) {
5099ed24f4bSMarc Zyngier 		kvm_make_request(KVM_REQ_IRQ_PENDING, vcpu);
51095e92e45SJulien Thierry 
51195e92e45SJulien Thierry 		if (!in_nmi())
5129ed24f4bSMarc Zyngier 			kvm_vcpu_kick(vcpu);
51395e92e45SJulien Thierry 		else
51495e92e45SJulien Thierry 			irq_work_queue(&vcpu->arch.pmu.overflow_work);
5159ed24f4bSMarc Zyngier 	}
5169ed24f4bSMarc Zyngier 
5179ed24f4bSMarc Zyngier 	cpu_pmu->pmu.start(perf_event, PERF_EF_RELOAD);
5189ed24f4bSMarc Zyngier }
5199ed24f4bSMarc Zyngier 
5209ed24f4bSMarc Zyngier /**
5219ed24f4bSMarc Zyngier  * kvm_pmu_software_increment - do software increment
5229ed24f4bSMarc Zyngier  * @vcpu: The vcpu pointer
5239ed24f4bSMarc Zyngier  * @val: the value guest writes to PMSWINC register
5249ed24f4bSMarc Zyngier  */
5259ed24f4bSMarc Zyngier void kvm_pmu_software_increment(struct kvm_vcpu *vcpu, u64 val)
5269ed24f4bSMarc Zyngier {
5279ed24f4bSMarc Zyngier 	struct kvm_pmu *pmu = &vcpu->arch.pmu;
5289ed24f4bSMarc Zyngier 	int i;
5299ed24f4bSMarc Zyngier 
5309ed24f4bSMarc Zyngier 	if (!(__vcpu_sys_reg(vcpu, PMCR_EL0) & ARMV8_PMU_PMCR_E))
5319ed24f4bSMarc Zyngier 		return;
5329ed24f4bSMarc Zyngier 
5339ed24f4bSMarc Zyngier 	/* Weed out disabled counters */
5349ed24f4bSMarc Zyngier 	val &= __vcpu_sys_reg(vcpu, PMCNTENSET_EL0);
5359ed24f4bSMarc Zyngier 
5369ed24f4bSMarc Zyngier 	for (i = 0; i < ARMV8_PMU_CYCLE_IDX; i++) {
5379ed24f4bSMarc Zyngier 		u64 type, reg;
5389ed24f4bSMarc Zyngier 
5399ed24f4bSMarc Zyngier 		if (!(val & BIT(i)))
5409ed24f4bSMarc Zyngier 			continue;
5419ed24f4bSMarc Zyngier 
5429ed24f4bSMarc Zyngier 		/* PMSWINC only applies to ... SW_INC! */
5439ed24f4bSMarc Zyngier 		type = __vcpu_sys_reg(vcpu, PMEVTYPER0_EL0 + i);
544fd65a3b5SMarc Zyngier 		type &= kvm_pmu_event_mask(vcpu->kvm);
5459ed24f4bSMarc Zyngier 		if (type != ARMV8_PMUV3_PERFCTR_SW_INCR)
5469ed24f4bSMarc Zyngier 			continue;
5479ed24f4bSMarc Zyngier 
5489ed24f4bSMarc Zyngier 		/* increment this even SW_INC counter */
5499ed24f4bSMarc Zyngier 		reg = __vcpu_sys_reg(vcpu, PMEVCNTR0_EL0 + i) + 1;
5509ed24f4bSMarc Zyngier 		reg = lower_32_bits(reg);
5519ed24f4bSMarc Zyngier 		__vcpu_sys_reg(vcpu, PMEVCNTR0_EL0 + i) = reg;
5529ed24f4bSMarc Zyngier 
5539ed24f4bSMarc Zyngier 		if (reg) /* no overflow on the low part */
5549ed24f4bSMarc Zyngier 			continue;
5559ed24f4bSMarc Zyngier 
5569ed24f4bSMarc Zyngier 		if (kvm_pmu_pmc_is_chained(&pmu->pmc[i])) {
5579ed24f4bSMarc Zyngier 			/* increment the high counter */
5589ed24f4bSMarc Zyngier 			reg = __vcpu_sys_reg(vcpu, PMEVCNTR0_EL0 + i + 1) + 1;
5599ed24f4bSMarc Zyngier 			reg = lower_32_bits(reg);
5609ed24f4bSMarc Zyngier 			__vcpu_sys_reg(vcpu, PMEVCNTR0_EL0 + i + 1) = reg;
5619ed24f4bSMarc Zyngier 			if (!reg) /* mark overflow on the high counter */
5629ed24f4bSMarc Zyngier 				__vcpu_sys_reg(vcpu, PMOVSSET_EL0) |= BIT(i + 1);
5639ed24f4bSMarc Zyngier 		} else {
5649ed24f4bSMarc Zyngier 			/* mark overflow on low counter */
5659ed24f4bSMarc Zyngier 			__vcpu_sys_reg(vcpu, PMOVSSET_EL0) |= BIT(i);
5669ed24f4bSMarc Zyngier 		}
5679ed24f4bSMarc Zyngier 	}
5689ed24f4bSMarc Zyngier }
5699ed24f4bSMarc Zyngier 
5709ed24f4bSMarc Zyngier /**
5719ed24f4bSMarc Zyngier  * kvm_pmu_handle_pmcr - handle PMCR register
5729ed24f4bSMarc Zyngier  * @vcpu: The vcpu pointer
5739ed24f4bSMarc Zyngier  * @val: the value guest writes to PMCR register
5749ed24f4bSMarc Zyngier  */
5759ed24f4bSMarc Zyngier void kvm_pmu_handle_pmcr(struct kvm_vcpu *vcpu, u64 val)
5769ed24f4bSMarc Zyngier {
5779ed24f4bSMarc Zyngier 	int i;
5789ed24f4bSMarc Zyngier 
5799ed24f4bSMarc Zyngier 	if (val & ARMV8_PMU_PMCR_E) {
5809ed24f4bSMarc Zyngier 		kvm_pmu_enable_counter_mask(vcpu,
581f5eff400SMarc Zyngier 		       __vcpu_sys_reg(vcpu, PMCNTENSET_EL0));
5829ed24f4bSMarc Zyngier 	} else {
583ca4f202dSAlexandre Chartre 		kvm_pmu_disable_counter_mask(vcpu,
584ca4f202dSAlexandre Chartre 		       __vcpu_sys_reg(vcpu, PMCNTENSET_EL0));
5859ed24f4bSMarc Zyngier 	}
5869ed24f4bSMarc Zyngier 
5879ed24f4bSMarc Zyngier 	if (val & ARMV8_PMU_PMCR_C)
5889ed24f4bSMarc Zyngier 		kvm_pmu_set_counter_value(vcpu, ARMV8_PMU_CYCLE_IDX, 0);
5899ed24f4bSMarc Zyngier 
5909ed24f4bSMarc Zyngier 	if (val & ARMV8_PMU_PMCR_P) {
591ca4f202dSAlexandre Chartre 		unsigned long mask = kvm_pmu_valid_counter_mask(vcpu);
5922a71fabfSAlexandru Elisei 		mask &= ~BIT(ARMV8_PMU_CYCLE_IDX);
5939ed24f4bSMarc Zyngier 		for_each_set_bit(i, &mask, 32)
5949ed24f4bSMarc Zyngier 			kvm_pmu_set_counter_value(vcpu, i, 0);
5959ed24f4bSMarc Zyngier 	}
5969ed24f4bSMarc Zyngier }
5979ed24f4bSMarc Zyngier 
5989ed24f4bSMarc Zyngier static bool kvm_pmu_counter_is_enabled(struct kvm_vcpu *vcpu, u64 select_idx)
5999ed24f4bSMarc Zyngier {
6009ed24f4bSMarc Zyngier 	return (__vcpu_sys_reg(vcpu, PMCR_EL0) & ARMV8_PMU_PMCR_E) &&
6019ed24f4bSMarc Zyngier 	       (__vcpu_sys_reg(vcpu, PMCNTENSET_EL0) & BIT(select_idx));
6029ed24f4bSMarc Zyngier }
6039ed24f4bSMarc Zyngier 
6049ed24f4bSMarc Zyngier /**
6059ed24f4bSMarc Zyngier  * kvm_pmu_create_perf_event - create a perf event for a counter
6069ed24f4bSMarc Zyngier  * @vcpu: The vcpu pointer
6079ed24f4bSMarc Zyngier  * @select_idx: The number of selected counter
6089ed24f4bSMarc Zyngier  */
6099ed24f4bSMarc Zyngier static void kvm_pmu_create_perf_event(struct kvm_vcpu *vcpu, u64 select_idx)
6109ed24f4bSMarc Zyngier {
61146b18782SMarc Zyngier 	struct arm_pmu *arm_pmu = vcpu->kvm->arch.arm_pmu;
6129ed24f4bSMarc Zyngier 	struct kvm_pmu *pmu = &vcpu->arch.pmu;
6139ed24f4bSMarc Zyngier 	struct kvm_pmc *pmc;
6149ed24f4bSMarc Zyngier 	struct perf_event *event;
6159ed24f4bSMarc Zyngier 	struct perf_event_attr attr;
6169ed24f4bSMarc Zyngier 	u64 eventsel, counter, reg, data;
6179ed24f4bSMarc Zyngier 
6189ed24f4bSMarc Zyngier 	/*
6199ed24f4bSMarc Zyngier 	 * For chained counters the event type and filtering attributes are
6209ed24f4bSMarc Zyngier 	 * obtained from the low/even counter. We also use this counter to
6219ed24f4bSMarc Zyngier 	 * determine if the event is enabled/disabled.
6229ed24f4bSMarc Zyngier 	 */
6239ed24f4bSMarc Zyngier 	pmc = kvm_pmu_get_canonical_pmc(&pmu->pmc[select_idx]);
6249ed24f4bSMarc Zyngier 
6259ed24f4bSMarc Zyngier 	reg = (pmc->idx == ARMV8_PMU_CYCLE_IDX)
6269ed24f4bSMarc Zyngier 	      ? PMCCFILTR_EL0 : PMEVTYPER0_EL0 + pmc->idx;
6279ed24f4bSMarc Zyngier 	data = __vcpu_sys_reg(vcpu, reg);
6289ed24f4bSMarc Zyngier 
6299ed24f4bSMarc Zyngier 	kvm_pmu_stop_counter(vcpu, pmc);
630d7eec236SMarc Zyngier 	if (pmc->idx == ARMV8_PMU_CYCLE_IDX)
631d7eec236SMarc Zyngier 		eventsel = ARMV8_PMUV3_PERFCTR_CPU_CYCLES;
632d7eec236SMarc Zyngier 	else
633d7eec236SMarc Zyngier 		eventsel = data & kvm_pmu_event_mask(vcpu->kvm);
6349ed24f4bSMarc Zyngier 
635d7eec236SMarc Zyngier 	/* Software increment event doesn't need to be backed by a perf event */
636d7eec236SMarc Zyngier 	if (eventsel == ARMV8_PMUV3_PERFCTR_SW_INCR)
637d7eec236SMarc Zyngier 		return;
638d7eec236SMarc Zyngier 
639d7eec236SMarc Zyngier 	/*
640d7eec236SMarc Zyngier 	 * If we have a filter in place and that the event isn't allowed, do
641d7eec236SMarc Zyngier 	 * not install a perf event either.
642d7eec236SMarc Zyngier 	 */
643d7eec236SMarc Zyngier 	if (vcpu->kvm->arch.pmu_filter &&
644d7eec236SMarc Zyngier 	    !test_bit(eventsel, vcpu->kvm->arch.pmu_filter))
6459ed24f4bSMarc Zyngier 		return;
6469ed24f4bSMarc Zyngier 
6479ed24f4bSMarc Zyngier 	memset(&attr, 0, sizeof(struct perf_event_attr));
64846b18782SMarc Zyngier 	attr.type = arm_pmu->pmu.type;
6499ed24f4bSMarc Zyngier 	attr.size = sizeof(attr);
6509ed24f4bSMarc Zyngier 	attr.pinned = 1;
6519ed24f4bSMarc Zyngier 	attr.disabled = !kvm_pmu_counter_is_enabled(vcpu, pmc->idx);
6529ed24f4bSMarc Zyngier 	attr.exclude_user = data & ARMV8_PMU_EXCLUDE_EL0 ? 1 : 0;
6539ed24f4bSMarc Zyngier 	attr.exclude_kernel = data & ARMV8_PMU_EXCLUDE_EL1 ? 1 : 0;
6549ed24f4bSMarc Zyngier 	attr.exclude_hv = 1; /* Don't count EL2 events */
6559ed24f4bSMarc Zyngier 	attr.exclude_host = 1; /* Don't count host events */
656d7eec236SMarc Zyngier 	attr.config = eventsel;
6579ed24f4bSMarc Zyngier 
6589ed24f4bSMarc Zyngier 	counter = kvm_pmu_get_pair_counter_value(vcpu, pmc);
6599ed24f4bSMarc Zyngier 
6609ed24f4bSMarc Zyngier 	if (kvm_pmu_pmc_is_chained(pmc)) {
6619ed24f4bSMarc Zyngier 		/**
6629ed24f4bSMarc Zyngier 		 * The initial sample period (overflow count) of an event. For
6639ed24f4bSMarc Zyngier 		 * chained counters we only support overflow interrupts on the
6649ed24f4bSMarc Zyngier 		 * high counter.
6659ed24f4bSMarc Zyngier 		 */
6669ed24f4bSMarc Zyngier 		attr.sample_period = (-counter) & GENMASK(63, 0);
6679ed24f4bSMarc Zyngier 		attr.config1 |= PERF_ATTR_CFG1_KVM_PMU_CHAINED;
6689ed24f4bSMarc Zyngier 
6699ed24f4bSMarc Zyngier 		event = perf_event_create_kernel_counter(&attr, -1, current,
6709ed24f4bSMarc Zyngier 							 kvm_pmu_perf_overflow,
6719ed24f4bSMarc Zyngier 							 pmc + 1);
6729ed24f4bSMarc Zyngier 	} else {
6739ed24f4bSMarc Zyngier 		/* The initial sample period (overflow count) of an event. */
6749ed24f4bSMarc Zyngier 		if (kvm_pmu_idx_is_64bit(vcpu, pmc->idx))
6759ed24f4bSMarc Zyngier 			attr.sample_period = (-counter) & GENMASK(63, 0);
6769ed24f4bSMarc Zyngier 		else
6779ed24f4bSMarc Zyngier 			attr.sample_period = (-counter) & GENMASK(31, 0);
6789ed24f4bSMarc Zyngier 
6799ed24f4bSMarc Zyngier 		event = perf_event_create_kernel_counter(&attr, -1, current,
6809ed24f4bSMarc Zyngier 						 kvm_pmu_perf_overflow, pmc);
6819ed24f4bSMarc Zyngier 	}
6829ed24f4bSMarc Zyngier 
6839ed24f4bSMarc Zyngier 	if (IS_ERR(event)) {
6849ed24f4bSMarc Zyngier 		pr_err_once("kvm: pmu event creation failed %ld\n",
6859ed24f4bSMarc Zyngier 			    PTR_ERR(event));
6869ed24f4bSMarc Zyngier 		return;
6879ed24f4bSMarc Zyngier 	}
6889ed24f4bSMarc Zyngier 
6899ed24f4bSMarc Zyngier 	pmc->perf_event = event;
6909ed24f4bSMarc Zyngier }
6919ed24f4bSMarc Zyngier 
6929ed24f4bSMarc Zyngier /**
6939ed24f4bSMarc Zyngier  * kvm_pmu_update_pmc_chained - update chained bitmap
6949ed24f4bSMarc Zyngier  * @vcpu: The vcpu pointer
6959ed24f4bSMarc Zyngier  * @select_idx: The number of selected counter
6969ed24f4bSMarc Zyngier  *
6979ed24f4bSMarc Zyngier  * Update the chained bitmap based on the event type written in the
6989ed24f4bSMarc Zyngier  * typer register and the enable state of the odd register.
6999ed24f4bSMarc Zyngier  */
7009ed24f4bSMarc Zyngier static void kvm_pmu_update_pmc_chained(struct kvm_vcpu *vcpu, u64 select_idx)
7019ed24f4bSMarc Zyngier {
7029ed24f4bSMarc Zyngier 	struct kvm_pmu *pmu = &vcpu->arch.pmu;
7039ed24f4bSMarc Zyngier 	struct kvm_pmc *pmc = &pmu->pmc[select_idx], *canonical_pmc;
7049ed24f4bSMarc Zyngier 	bool new_state, old_state;
7059ed24f4bSMarc Zyngier 
7069ed24f4bSMarc Zyngier 	old_state = kvm_pmu_pmc_is_chained(pmc);
7079ed24f4bSMarc Zyngier 	new_state = kvm_pmu_idx_has_chain_evtype(vcpu, pmc->idx) &&
7089ed24f4bSMarc Zyngier 		    kvm_pmu_counter_is_enabled(vcpu, pmc->idx | 0x1);
7099ed24f4bSMarc Zyngier 
7109ed24f4bSMarc Zyngier 	if (old_state == new_state)
7119ed24f4bSMarc Zyngier 		return;
7129ed24f4bSMarc Zyngier 
7139ed24f4bSMarc Zyngier 	canonical_pmc = kvm_pmu_get_canonical_pmc(pmc);
7149ed24f4bSMarc Zyngier 	kvm_pmu_stop_counter(vcpu, canonical_pmc);
7159ed24f4bSMarc Zyngier 	if (new_state) {
7169ed24f4bSMarc Zyngier 		/*
7179ed24f4bSMarc Zyngier 		 * During promotion from !chained to chained we must ensure
7189ed24f4bSMarc Zyngier 		 * the adjacent counter is stopped and its event destroyed
7199ed24f4bSMarc Zyngier 		 */
7209ed24f4bSMarc Zyngier 		kvm_pmu_stop_counter(vcpu, kvm_pmu_get_alternate_pmc(pmc));
7219ed24f4bSMarc Zyngier 		set_bit(pmc->idx >> 1, vcpu->arch.pmu.chained);
7229ed24f4bSMarc Zyngier 		return;
7239ed24f4bSMarc Zyngier 	}
7249ed24f4bSMarc Zyngier 	clear_bit(pmc->idx >> 1, vcpu->arch.pmu.chained);
7259ed24f4bSMarc Zyngier }
7269ed24f4bSMarc Zyngier 
7279ed24f4bSMarc Zyngier /**
7289ed24f4bSMarc Zyngier  * kvm_pmu_set_counter_event_type - set selected counter to monitor some event
7299ed24f4bSMarc Zyngier  * @vcpu: The vcpu pointer
7309ed24f4bSMarc Zyngier  * @data: The data guest writes to PMXEVTYPER_EL0
7319ed24f4bSMarc Zyngier  * @select_idx: The number of selected counter
7329ed24f4bSMarc Zyngier  *
7339ed24f4bSMarc Zyngier  * When OS accesses PMXEVTYPER_EL0, that means it wants to set a PMC to count an
7349ed24f4bSMarc Zyngier  * event with given hardware event number. Here we call perf_event API to
7359ed24f4bSMarc Zyngier  * emulate this action and create a kernel perf event for it.
7369ed24f4bSMarc Zyngier  */
7379ed24f4bSMarc Zyngier void kvm_pmu_set_counter_event_type(struct kvm_vcpu *vcpu, u64 data,
7389ed24f4bSMarc Zyngier 				    u64 select_idx)
7399ed24f4bSMarc Zyngier {
740fd65a3b5SMarc Zyngier 	u64 reg, mask;
741fd65a3b5SMarc Zyngier 
742fd65a3b5SMarc Zyngier 	mask  =  ARMV8_PMU_EVTYPE_MASK;
743fd65a3b5SMarc Zyngier 	mask &= ~ARMV8_PMU_EVTYPE_EVENT;
744fd65a3b5SMarc Zyngier 	mask |= kvm_pmu_event_mask(vcpu->kvm);
7459ed24f4bSMarc Zyngier 
7469ed24f4bSMarc Zyngier 	reg = (select_idx == ARMV8_PMU_CYCLE_IDX)
7479ed24f4bSMarc Zyngier 	      ? PMCCFILTR_EL0 : PMEVTYPER0_EL0 + select_idx;
7489ed24f4bSMarc Zyngier 
749fd65a3b5SMarc Zyngier 	__vcpu_sys_reg(vcpu, reg) = data & mask;
7509ed24f4bSMarc Zyngier 
7519ed24f4bSMarc Zyngier 	kvm_pmu_update_pmc_chained(vcpu, select_idx);
7529ed24f4bSMarc Zyngier 	kvm_pmu_create_perf_event(vcpu, select_idx);
7539ed24f4bSMarc Zyngier }
7549ed24f4bSMarc Zyngier 
755e840f42aSMarc Zyngier void kvm_host_pmu_init(struct arm_pmu *pmu)
756e840f42aSMarc Zyngier {
757db858060SAlexandru Elisei 	struct arm_pmu_entry *entry;
758db858060SAlexandru Elisei 
759db858060SAlexandru Elisei 	if (pmu->pmuver == 0 || pmu->pmuver == ID_AA64DFR0_PMUVER_IMP_DEF ||
760db858060SAlexandru Elisei 	    is_protected_kvm_enabled())
761db858060SAlexandru Elisei 		return;
762db858060SAlexandru Elisei 
763db858060SAlexandru Elisei 	mutex_lock(&arm_pmus_lock);
764db858060SAlexandru Elisei 
765db858060SAlexandru Elisei 	entry = kmalloc(sizeof(*entry), GFP_KERNEL);
766db858060SAlexandru Elisei 	if (!entry)
767db858060SAlexandru Elisei 		goto out_unlock;
768db858060SAlexandru Elisei 
769db858060SAlexandru Elisei 	entry->arm_pmu = pmu;
770db858060SAlexandru Elisei 	list_add_tail(&entry->entry, &arm_pmus);
771db858060SAlexandru Elisei 
772db858060SAlexandru Elisei 	if (list_is_singular(&arm_pmus))
773e840f42aSMarc Zyngier 		static_branch_enable(&kvm_arm_pmu_available);
774db858060SAlexandru Elisei 
775db858060SAlexandru Elisei out_unlock:
776db858060SAlexandru Elisei 	mutex_unlock(&arm_pmus_lock);
777e840f42aSMarc Zyngier }
778e840f42aSMarc Zyngier 
77946b18782SMarc Zyngier static struct arm_pmu *kvm_pmu_probe_armpmu(void)
780fd65a3b5SMarc Zyngier {
781fd65a3b5SMarc Zyngier 	struct perf_event_attr attr = { };
782fd65a3b5SMarc Zyngier 	struct perf_event *event;
78346b18782SMarc Zyngier 	struct arm_pmu *pmu = NULL;
784fd65a3b5SMarc Zyngier 
785fd65a3b5SMarc Zyngier 	/*
786fd65a3b5SMarc Zyngier 	 * Create a dummy event that only counts user cycles. As we'll never
787fd65a3b5SMarc Zyngier 	 * leave this function with the event being live, it will never
788fd65a3b5SMarc Zyngier 	 * count anything. But it allows us to probe some of the PMU
789fd65a3b5SMarc Zyngier 	 * details. Yes, this is terrible.
790fd65a3b5SMarc Zyngier 	 */
791fd65a3b5SMarc Zyngier 	attr.type = PERF_TYPE_RAW;
792fd65a3b5SMarc Zyngier 	attr.size = sizeof(attr);
793fd65a3b5SMarc Zyngier 	attr.pinned = 1;
794fd65a3b5SMarc Zyngier 	attr.disabled = 0;
795fd65a3b5SMarc Zyngier 	attr.exclude_user = 0;
796fd65a3b5SMarc Zyngier 	attr.exclude_kernel = 1;
797fd65a3b5SMarc Zyngier 	attr.exclude_hv = 1;
798fd65a3b5SMarc Zyngier 	attr.exclude_host = 1;
799fd65a3b5SMarc Zyngier 	attr.config = ARMV8_PMUV3_PERFCTR_CPU_CYCLES;
800fd65a3b5SMarc Zyngier 	attr.sample_period = GENMASK(63, 0);
801fd65a3b5SMarc Zyngier 
802fd65a3b5SMarc Zyngier 	event = perf_event_create_kernel_counter(&attr, -1, current,
803fd65a3b5SMarc Zyngier 						 kvm_pmu_perf_overflow, &attr);
804fd65a3b5SMarc Zyngier 
805fd65a3b5SMarc Zyngier 	if (IS_ERR(event)) {
806fd65a3b5SMarc Zyngier 		pr_err_once("kvm: pmu event creation failed %ld\n",
807fd65a3b5SMarc Zyngier 			    PTR_ERR(event));
80846b18782SMarc Zyngier 		return NULL;
809fd65a3b5SMarc Zyngier 	}
810fd65a3b5SMarc Zyngier 
811fd65a3b5SMarc Zyngier 	if (event->pmu) {
812fd65a3b5SMarc Zyngier 		pmu = to_arm_pmu(event->pmu);
81346b18782SMarc Zyngier 		if (pmu->pmuver == 0 ||
81446b18782SMarc Zyngier 		    pmu->pmuver == ID_AA64DFR0_PMUVER_IMP_DEF)
81546b18782SMarc Zyngier 			pmu = NULL;
816fd65a3b5SMarc Zyngier 	}
817fd65a3b5SMarc Zyngier 
818fd65a3b5SMarc Zyngier 	perf_event_disable(event);
819fd65a3b5SMarc Zyngier 	perf_event_release_kernel(event);
820fd65a3b5SMarc Zyngier 
82146b18782SMarc Zyngier 	return pmu;
822fd65a3b5SMarc Zyngier }
823fd65a3b5SMarc Zyngier 
82488865becSMarc Zyngier u64 kvm_pmu_get_pmceid(struct kvm_vcpu *vcpu, bool pmceid1)
82588865becSMarc Zyngier {
82688865becSMarc Zyngier 	unsigned long *bmap = vcpu->kvm->arch.pmu_filter;
82788865becSMarc Zyngier 	u64 val, mask = 0;
8289529aaa0SMarc Zyngier 	int base, i, nr_events;
82988865becSMarc Zyngier 
83088865becSMarc Zyngier 	if (!pmceid1) {
83188865becSMarc Zyngier 		val = read_sysreg(pmceid0_el0);
83288865becSMarc Zyngier 		base = 0;
83388865becSMarc Zyngier 	} else {
83488865becSMarc Zyngier 		val = read_sysreg(pmceid1_el0);
83546081078SMarc Zyngier 		/*
83646081078SMarc Zyngier 		 * Don't advertise STALL_SLOT, as PMMIR_EL0 is handled
83746081078SMarc Zyngier 		 * as RAZ
83846081078SMarc Zyngier 		 */
83946b18782SMarc Zyngier 		if (vcpu->kvm->arch.arm_pmu->pmuver >= ID_AA64DFR0_PMUVER_8_4)
84046081078SMarc Zyngier 			val &= ~BIT_ULL(ARMV8_PMUV3_PERFCTR_STALL_SLOT - 32);
84188865becSMarc Zyngier 		base = 32;
84288865becSMarc Zyngier 	}
84388865becSMarc Zyngier 
84488865becSMarc Zyngier 	if (!bmap)
84588865becSMarc Zyngier 		return val;
84688865becSMarc Zyngier 
8479529aaa0SMarc Zyngier 	nr_events = kvm_pmu_event_mask(vcpu->kvm) + 1;
8489529aaa0SMarc Zyngier 
84988865becSMarc Zyngier 	for (i = 0; i < 32; i += 8) {
85088865becSMarc Zyngier 		u64 byte;
85188865becSMarc Zyngier 
85288865becSMarc Zyngier 		byte = bitmap_get_value8(bmap, base + i);
85388865becSMarc Zyngier 		mask |= byte << i;
8549529aaa0SMarc Zyngier 		if (nr_events >= (0x4000 + base + 32)) {
85588865becSMarc Zyngier 			byte = bitmap_get_value8(bmap, 0x4000 + base + i);
85688865becSMarc Zyngier 			mask |= byte << (32 + i);
85788865becSMarc Zyngier 		}
8589529aaa0SMarc Zyngier 	}
85988865becSMarc Zyngier 
86088865becSMarc Zyngier 	return val & mask;
86188865becSMarc Zyngier }
86288865becSMarc Zyngier 
8639ed24f4bSMarc Zyngier int kvm_arm_pmu_v3_enable(struct kvm_vcpu *vcpu)
8649ed24f4bSMarc Zyngier {
8659bbfa4b5SAlexandru Elisei 	if (!kvm_vcpu_has_pmu(vcpu))
8669ed24f4bSMarc Zyngier 		return 0;
8679ed24f4bSMarc Zyngier 
8689bbfa4b5SAlexandru Elisei 	if (!vcpu->arch.pmu.created)
8699bbfa4b5SAlexandru Elisei 		return -EINVAL;
8709bbfa4b5SAlexandru Elisei 
8719ed24f4bSMarc Zyngier 	/*
8729ed24f4bSMarc Zyngier 	 * A valid interrupt configuration for the PMU is either to have a
8739ed24f4bSMarc Zyngier 	 * properly configured interrupt number and using an in-kernel
8749ed24f4bSMarc Zyngier 	 * irqchip, or to not have an in-kernel GIC and not set an IRQ.
8759ed24f4bSMarc Zyngier 	 */
8769ed24f4bSMarc Zyngier 	if (irqchip_in_kernel(vcpu->kvm)) {
8779ed24f4bSMarc Zyngier 		int irq = vcpu->arch.pmu.irq_num;
8789ed24f4bSMarc Zyngier 		/*
8799ed24f4bSMarc Zyngier 		 * If we are using an in-kernel vgic, at this point we know
8809ed24f4bSMarc Zyngier 		 * the vgic will be initialized, so we can check the PMU irq
8819ed24f4bSMarc Zyngier 		 * number against the dimensions of the vgic and make sure
8829ed24f4bSMarc Zyngier 		 * it's valid.
8839ed24f4bSMarc Zyngier 		 */
8849ed24f4bSMarc Zyngier 		if (!irq_is_ppi(irq) && !vgic_valid_spi(vcpu->kvm, irq))
8859ed24f4bSMarc Zyngier 			return -EINVAL;
8869ed24f4bSMarc Zyngier 	} else if (kvm_arm_pmu_irq_initialized(vcpu)) {
8879ed24f4bSMarc Zyngier 		   return -EINVAL;
8889ed24f4bSMarc Zyngier 	}
8899ed24f4bSMarc Zyngier 
890d0c94c49SMarc Zyngier 	/* One-off reload of the PMU on first run */
891d0c94c49SMarc Zyngier 	kvm_make_request(KVM_REQ_RELOAD_PMU, vcpu);
892d0c94c49SMarc Zyngier 
8939ed24f4bSMarc Zyngier 	return 0;
8949ed24f4bSMarc Zyngier }
8959ed24f4bSMarc Zyngier 
8969ed24f4bSMarc Zyngier static int kvm_arm_pmu_v3_init(struct kvm_vcpu *vcpu)
8979ed24f4bSMarc Zyngier {
8989ed24f4bSMarc Zyngier 	if (irqchip_in_kernel(vcpu->kvm)) {
8999ed24f4bSMarc Zyngier 		int ret;
9009ed24f4bSMarc Zyngier 
9019ed24f4bSMarc Zyngier 		/*
9029ed24f4bSMarc Zyngier 		 * If using the PMU with an in-kernel virtual GIC
9039ed24f4bSMarc Zyngier 		 * implementation, we require the GIC to be already
9049ed24f4bSMarc Zyngier 		 * initialized when initializing the PMU.
9059ed24f4bSMarc Zyngier 		 */
9069ed24f4bSMarc Zyngier 		if (!vgic_initialized(vcpu->kvm))
9079ed24f4bSMarc Zyngier 			return -ENODEV;
9089ed24f4bSMarc Zyngier 
9099ed24f4bSMarc Zyngier 		if (!kvm_arm_pmu_irq_initialized(vcpu))
9109ed24f4bSMarc Zyngier 			return -ENXIO;
9119ed24f4bSMarc Zyngier 
9129ed24f4bSMarc Zyngier 		ret = kvm_vgic_set_owner(vcpu, vcpu->arch.pmu.irq_num,
9139ed24f4bSMarc Zyngier 					 &vcpu->arch.pmu);
9149ed24f4bSMarc Zyngier 		if (ret)
9159ed24f4bSMarc Zyngier 			return ret;
9169ed24f4bSMarc Zyngier 	}
9179ed24f4bSMarc Zyngier 
91895e92e45SJulien Thierry 	init_irq_work(&vcpu->arch.pmu.overflow_work,
91995e92e45SJulien Thierry 		      kvm_pmu_perf_overflow_notify_vcpu);
92095e92e45SJulien Thierry 
9219ed24f4bSMarc Zyngier 	vcpu->arch.pmu.created = true;
9229ed24f4bSMarc Zyngier 	return 0;
9239ed24f4bSMarc Zyngier }
9249ed24f4bSMarc Zyngier 
9259ed24f4bSMarc Zyngier /*
9269ed24f4bSMarc Zyngier  * For one VM the interrupt type must be same for each vcpu.
9279ed24f4bSMarc Zyngier  * As a PPI, the interrupt number is the same for all vcpus,
9289ed24f4bSMarc Zyngier  * while as an SPI it must be a separate number per vcpu.
9299ed24f4bSMarc Zyngier  */
9309ed24f4bSMarc Zyngier static bool pmu_irq_is_valid(struct kvm *kvm, int irq)
9319ed24f4bSMarc Zyngier {
93246808a4cSMarc Zyngier 	unsigned long i;
9339ed24f4bSMarc Zyngier 	struct kvm_vcpu *vcpu;
9349ed24f4bSMarc Zyngier 
9359ed24f4bSMarc Zyngier 	kvm_for_each_vcpu(i, vcpu, kvm) {
9369ed24f4bSMarc Zyngier 		if (!kvm_arm_pmu_irq_initialized(vcpu))
9379ed24f4bSMarc Zyngier 			continue;
9389ed24f4bSMarc Zyngier 
9399ed24f4bSMarc Zyngier 		if (irq_is_ppi(irq)) {
9409ed24f4bSMarc Zyngier 			if (vcpu->arch.pmu.irq_num != irq)
9419ed24f4bSMarc Zyngier 				return false;
9429ed24f4bSMarc Zyngier 		} else {
9439ed24f4bSMarc Zyngier 			if (vcpu->arch.pmu.irq_num == irq)
9449ed24f4bSMarc Zyngier 				return false;
9459ed24f4bSMarc Zyngier 		}
9469ed24f4bSMarc Zyngier 	}
9479ed24f4bSMarc Zyngier 
9489ed24f4bSMarc Zyngier 	return true;
9499ed24f4bSMarc Zyngier }
9509ed24f4bSMarc Zyngier 
9516ee7fca2SAlexandru Elisei static int kvm_arm_pmu_v3_set_pmu(struct kvm_vcpu *vcpu, int pmu_id)
9526ee7fca2SAlexandru Elisei {
9536ee7fca2SAlexandru Elisei 	struct kvm *kvm = vcpu->kvm;
9546ee7fca2SAlexandru Elisei 	struct arm_pmu_entry *entry;
9556ee7fca2SAlexandru Elisei 	struct arm_pmu *arm_pmu;
9566ee7fca2SAlexandru Elisei 	int ret = -ENXIO;
9576ee7fca2SAlexandru Elisei 
9586ee7fca2SAlexandru Elisei 	mutex_lock(&kvm->lock);
9596ee7fca2SAlexandru Elisei 	mutex_lock(&arm_pmus_lock);
9606ee7fca2SAlexandru Elisei 
9616ee7fca2SAlexandru Elisei 	list_for_each_entry(entry, &arm_pmus, entry) {
9626ee7fca2SAlexandru Elisei 		arm_pmu = entry->arm_pmu;
9636ee7fca2SAlexandru Elisei 		if (arm_pmu->pmu.type == pmu_id) {
964*06394531SMarc Zyngier 			if (test_bit(KVM_ARCH_FLAG_HAS_RAN_ONCE, &kvm->arch.flags) ||
9656ee7fca2SAlexandru Elisei 			    (kvm->arch.pmu_filter && kvm->arch.arm_pmu != arm_pmu)) {
9666ee7fca2SAlexandru Elisei 				ret = -EBUSY;
9676ee7fca2SAlexandru Elisei 				break;
9686ee7fca2SAlexandru Elisei 			}
9696ee7fca2SAlexandru Elisei 
9706ee7fca2SAlexandru Elisei 			kvm->arch.arm_pmu = arm_pmu;
971583cda1bSAlexandru Elisei 			cpumask_copy(kvm->arch.supported_cpus, &arm_pmu->supported_cpus);
9726ee7fca2SAlexandru Elisei 			ret = 0;
9736ee7fca2SAlexandru Elisei 			break;
9746ee7fca2SAlexandru Elisei 		}
9756ee7fca2SAlexandru Elisei 	}
9766ee7fca2SAlexandru Elisei 
9776ee7fca2SAlexandru Elisei 	mutex_unlock(&arm_pmus_lock);
9786ee7fca2SAlexandru Elisei 	mutex_unlock(&kvm->lock);
9796ee7fca2SAlexandru Elisei 	return ret;
9806ee7fca2SAlexandru Elisei }
9816ee7fca2SAlexandru Elisei 
9829ed24f4bSMarc Zyngier int kvm_arm_pmu_v3_set_attr(struct kvm_vcpu *vcpu, struct kvm_device_attr *attr)
9839ed24f4bSMarc Zyngier {
9845177fe91SMarc Zyngier 	struct kvm *kvm = vcpu->kvm;
9855177fe91SMarc Zyngier 
98677da4303SMarc Zyngier 	if (!kvm_vcpu_has_pmu(vcpu))
98742223fb1SMarc Zyngier 		return -ENODEV;
98842223fb1SMarc Zyngier 
98942223fb1SMarc Zyngier 	if (vcpu->arch.pmu.created)
99042223fb1SMarc Zyngier 		return -EBUSY;
99142223fb1SMarc Zyngier 
99246b18782SMarc Zyngier 	mutex_lock(&kvm->lock);
99346b18782SMarc Zyngier 	if (!kvm->arch.arm_pmu) {
99446b18782SMarc Zyngier 		/* No PMU set, get the default one */
99546b18782SMarc Zyngier 		kvm->arch.arm_pmu = kvm_pmu_probe_armpmu();
99646b18782SMarc Zyngier 		if (!kvm->arch.arm_pmu) {
99746b18782SMarc Zyngier 			mutex_unlock(&kvm->lock);
998fd65a3b5SMarc Zyngier 			return -ENODEV;
99946b18782SMarc Zyngier 		}
100046b18782SMarc Zyngier 	}
100146b18782SMarc Zyngier 	mutex_unlock(&kvm->lock);
1002fd65a3b5SMarc Zyngier 
10039ed24f4bSMarc Zyngier 	switch (attr->attr) {
10049ed24f4bSMarc Zyngier 	case KVM_ARM_VCPU_PMU_V3_IRQ: {
10059ed24f4bSMarc Zyngier 		int __user *uaddr = (int __user *)(long)attr->addr;
10069ed24f4bSMarc Zyngier 		int irq;
10079ed24f4bSMarc Zyngier 
10085177fe91SMarc Zyngier 		if (!irqchip_in_kernel(kvm))
10099ed24f4bSMarc Zyngier 			return -EINVAL;
10109ed24f4bSMarc Zyngier 
10119ed24f4bSMarc Zyngier 		if (get_user(irq, uaddr))
10129ed24f4bSMarc Zyngier 			return -EFAULT;
10139ed24f4bSMarc Zyngier 
10149ed24f4bSMarc Zyngier 		/* The PMU overflow interrupt can be a PPI or a valid SPI. */
10159ed24f4bSMarc Zyngier 		if (!(irq_is_ppi(irq) || irq_is_spi(irq)))
10169ed24f4bSMarc Zyngier 			return -EINVAL;
10179ed24f4bSMarc Zyngier 
10185177fe91SMarc Zyngier 		if (!pmu_irq_is_valid(kvm, irq))
10199ed24f4bSMarc Zyngier 			return -EINVAL;
10209ed24f4bSMarc Zyngier 
10219ed24f4bSMarc Zyngier 		if (kvm_arm_pmu_irq_initialized(vcpu))
10229ed24f4bSMarc Zyngier 			return -EBUSY;
10239ed24f4bSMarc Zyngier 
10249ed24f4bSMarc Zyngier 		kvm_debug("Set kvm ARM PMU irq: %d\n", irq);
10259ed24f4bSMarc Zyngier 		vcpu->arch.pmu.irq_num = irq;
10269ed24f4bSMarc Zyngier 		return 0;
10279ed24f4bSMarc Zyngier 	}
1028d7eec236SMarc Zyngier 	case KVM_ARM_VCPU_PMU_V3_FILTER: {
1029d7eec236SMarc Zyngier 		struct kvm_pmu_event_filter __user *uaddr;
1030d7eec236SMarc Zyngier 		struct kvm_pmu_event_filter filter;
1031d7eec236SMarc Zyngier 		int nr_events;
1032d7eec236SMarc Zyngier 
10335177fe91SMarc Zyngier 		nr_events = kvm_pmu_event_mask(kvm) + 1;
1034d7eec236SMarc Zyngier 
1035d7eec236SMarc Zyngier 		uaddr = (struct kvm_pmu_event_filter __user *)(long)attr->addr;
1036d7eec236SMarc Zyngier 
1037d7eec236SMarc Zyngier 		if (copy_from_user(&filter, uaddr, sizeof(filter)))
1038d7eec236SMarc Zyngier 			return -EFAULT;
1039d7eec236SMarc Zyngier 
1040d7eec236SMarc Zyngier 		if (((u32)filter.base_event + filter.nevents) > nr_events ||
1041d7eec236SMarc Zyngier 		    (filter.action != KVM_PMU_EVENT_ALLOW &&
1042d7eec236SMarc Zyngier 		     filter.action != KVM_PMU_EVENT_DENY))
1043d7eec236SMarc Zyngier 			return -EINVAL;
1044d7eec236SMarc Zyngier 
10455177fe91SMarc Zyngier 		mutex_lock(&kvm->lock);
1046d7eec236SMarc Zyngier 
1047*06394531SMarc Zyngier 		if (test_bit(KVM_ARCH_FLAG_HAS_RAN_ONCE, &kvm->arch.flags)) {
10485177fe91SMarc Zyngier 			mutex_unlock(&kvm->lock);
10495177fe91SMarc Zyngier 			return -EBUSY;
10505177fe91SMarc Zyngier 		}
10515177fe91SMarc Zyngier 
10525177fe91SMarc Zyngier 		if (!kvm->arch.pmu_filter) {
10535177fe91SMarc Zyngier 			kvm->arch.pmu_filter = bitmap_alloc(nr_events, GFP_KERNEL_ACCOUNT);
10545177fe91SMarc Zyngier 			if (!kvm->arch.pmu_filter) {
10555177fe91SMarc Zyngier 				mutex_unlock(&kvm->lock);
1056d7eec236SMarc Zyngier 				return -ENOMEM;
1057d7eec236SMarc Zyngier 			}
1058d7eec236SMarc Zyngier 
1059d7eec236SMarc Zyngier 			/*
1060d7eec236SMarc Zyngier 			 * The default depends on the first applied filter.
1061d7eec236SMarc Zyngier 			 * If it allows events, the default is to deny.
1062d7eec236SMarc Zyngier 			 * Conversely, if the first filter denies a set of
1063d7eec236SMarc Zyngier 			 * events, the default is to allow.
1064d7eec236SMarc Zyngier 			 */
1065d7eec236SMarc Zyngier 			if (filter.action == KVM_PMU_EVENT_ALLOW)
10665177fe91SMarc Zyngier 				bitmap_zero(kvm->arch.pmu_filter, nr_events);
1067d7eec236SMarc Zyngier 			else
10685177fe91SMarc Zyngier 				bitmap_fill(kvm->arch.pmu_filter, nr_events);
1069d7eec236SMarc Zyngier 		}
1070d7eec236SMarc Zyngier 
1071d7eec236SMarc Zyngier 		if (filter.action == KVM_PMU_EVENT_ALLOW)
10725177fe91SMarc Zyngier 			bitmap_set(kvm->arch.pmu_filter, filter.base_event, filter.nevents);
1073d7eec236SMarc Zyngier 		else
10745177fe91SMarc Zyngier 			bitmap_clear(kvm->arch.pmu_filter, filter.base_event, filter.nevents);
1075d7eec236SMarc Zyngier 
10765177fe91SMarc Zyngier 		mutex_unlock(&kvm->lock);
1077d7eec236SMarc Zyngier 
1078d7eec236SMarc Zyngier 		return 0;
1079d7eec236SMarc Zyngier 	}
10806ee7fca2SAlexandru Elisei 	case KVM_ARM_VCPU_PMU_V3_SET_PMU: {
10816ee7fca2SAlexandru Elisei 		int __user *uaddr = (int __user *)(long)attr->addr;
10826ee7fca2SAlexandru Elisei 		int pmu_id;
10836ee7fca2SAlexandru Elisei 
10846ee7fca2SAlexandru Elisei 		if (get_user(pmu_id, uaddr))
10856ee7fca2SAlexandru Elisei 			return -EFAULT;
10866ee7fca2SAlexandru Elisei 
10876ee7fca2SAlexandru Elisei 		return kvm_arm_pmu_v3_set_pmu(vcpu, pmu_id);
10886ee7fca2SAlexandru Elisei 	}
10899ed24f4bSMarc Zyngier 	case KVM_ARM_VCPU_PMU_V3_INIT:
10909ed24f4bSMarc Zyngier 		return kvm_arm_pmu_v3_init(vcpu);
10919ed24f4bSMarc Zyngier 	}
10929ed24f4bSMarc Zyngier 
10939ed24f4bSMarc Zyngier 	return -ENXIO;
10949ed24f4bSMarc Zyngier }
10959ed24f4bSMarc Zyngier 
10969ed24f4bSMarc Zyngier int kvm_arm_pmu_v3_get_attr(struct kvm_vcpu *vcpu, struct kvm_device_attr *attr)
10979ed24f4bSMarc Zyngier {
10989ed24f4bSMarc Zyngier 	switch (attr->attr) {
10999ed24f4bSMarc Zyngier 	case KVM_ARM_VCPU_PMU_V3_IRQ: {
11009ed24f4bSMarc Zyngier 		int __user *uaddr = (int __user *)(long)attr->addr;
11019ed24f4bSMarc Zyngier 		int irq;
11029ed24f4bSMarc Zyngier 
11039ed24f4bSMarc Zyngier 		if (!irqchip_in_kernel(vcpu->kvm))
11049ed24f4bSMarc Zyngier 			return -EINVAL;
11059ed24f4bSMarc Zyngier 
110614bda7a9SMarc Zyngier 		if (!kvm_vcpu_has_pmu(vcpu))
11079ed24f4bSMarc Zyngier 			return -ENODEV;
11089ed24f4bSMarc Zyngier 
11099ed24f4bSMarc Zyngier 		if (!kvm_arm_pmu_irq_initialized(vcpu))
11109ed24f4bSMarc Zyngier 			return -ENXIO;
11119ed24f4bSMarc Zyngier 
11129ed24f4bSMarc Zyngier 		irq = vcpu->arch.pmu.irq_num;
11139ed24f4bSMarc Zyngier 		return put_user(irq, uaddr);
11149ed24f4bSMarc Zyngier 	}
11159ed24f4bSMarc Zyngier 	}
11169ed24f4bSMarc Zyngier 
11179ed24f4bSMarc Zyngier 	return -ENXIO;
11189ed24f4bSMarc Zyngier }
11199ed24f4bSMarc Zyngier 
11209ed24f4bSMarc Zyngier int kvm_arm_pmu_v3_has_attr(struct kvm_vcpu *vcpu, struct kvm_device_attr *attr)
11219ed24f4bSMarc Zyngier {
11229ed24f4bSMarc Zyngier 	switch (attr->attr) {
11239ed24f4bSMarc Zyngier 	case KVM_ARM_VCPU_PMU_V3_IRQ:
11249ed24f4bSMarc Zyngier 	case KVM_ARM_VCPU_PMU_V3_INIT:
1125d7eec236SMarc Zyngier 	case KVM_ARM_VCPU_PMU_V3_FILTER:
11266ee7fca2SAlexandru Elisei 	case KVM_ARM_VCPU_PMU_V3_SET_PMU:
112777da4303SMarc Zyngier 		if (kvm_vcpu_has_pmu(vcpu))
11289ed24f4bSMarc Zyngier 			return 0;
11299ed24f4bSMarc Zyngier 	}
11309ed24f4bSMarc Zyngier 
11319ed24f4bSMarc Zyngier 	return -ENXIO;
11329ed24f4bSMarc Zyngier }
1133