xref: /openbmc/linux/arch/arm64/kvm/pmu-emul.c (revision 0cb9c3c8)
19ed24f4bSMarc Zyngier // SPDX-License-Identifier: GPL-2.0-only
29ed24f4bSMarc Zyngier /*
39ed24f4bSMarc Zyngier  * Copyright (C) 2015 Linaro Ltd.
49ed24f4bSMarc Zyngier  * Author: Shannon Zhao <shannon.zhao@linaro.org>
59ed24f4bSMarc Zyngier  */
69ed24f4bSMarc Zyngier 
79ed24f4bSMarc Zyngier #include <linux/cpu.h>
89ed24f4bSMarc Zyngier #include <linux/kvm.h>
99ed24f4bSMarc Zyngier #include <linux/kvm_host.h>
10db858060SAlexandru Elisei #include <linux/list.h>
119ed24f4bSMarc Zyngier #include <linux/perf_event.h>
129ed24f4bSMarc Zyngier #include <linux/perf/arm_pmu.h>
139ed24f4bSMarc Zyngier #include <linux/uaccess.h>
149ed24f4bSMarc Zyngier #include <asm/kvm_emulate.h>
159ed24f4bSMarc Zyngier #include <kvm/arm_pmu.h>
169ed24f4bSMarc Zyngier #include <kvm/arm_vgic.h>
179ed24f4bSMarc Zyngier 
18bead0220SMarc Zyngier #define PERF_ATTR_CFG1_COUNTER_64BIT	BIT(0)
19bead0220SMarc Zyngier 
20be399d82SSean Christopherson DEFINE_STATIC_KEY_FALSE(kvm_arm_pmu_available);
21be399d82SSean Christopherson 
22db858060SAlexandru Elisei static LIST_HEAD(arm_pmus);
23db858060SAlexandru Elisei static DEFINE_MUTEX(arm_pmus_lock);
24db858060SAlexandru Elisei 
259ed24f4bSMarc Zyngier static void kvm_pmu_create_perf_event(struct kvm_vcpu *vcpu, u64 select_idx);
269ed24f4bSMarc Zyngier 
27fd65a3b5SMarc Zyngier static u32 kvm_pmu_event_mask(struct kvm *kvm)
28fd65a3b5SMarc Zyngier {
2946b18782SMarc Zyngier 	unsigned int pmuver;
3046b18782SMarc Zyngier 
3146b18782SMarc Zyngier 	pmuver = kvm->arch.arm_pmu->pmuver;
3246b18782SMarc Zyngier 
3346b18782SMarc Zyngier 	switch (pmuver) {
34121a8fc0SMark Brown 	case ID_AA64DFR0_EL1_PMUVer_IMP:
35fd65a3b5SMarc Zyngier 		return GENMASK(9, 0);
36121a8fc0SMark Brown 	case ID_AA64DFR0_EL1_PMUVer_V3P1:
37121a8fc0SMark Brown 	case ID_AA64DFR0_EL1_PMUVer_V3P4:
38121a8fc0SMark Brown 	case ID_AA64DFR0_EL1_PMUVer_V3P5:
39121a8fc0SMark Brown 	case ID_AA64DFR0_EL1_PMUVer_V3P7:
40fd65a3b5SMarc Zyngier 		return GENMASK(15, 0);
41fd65a3b5SMarc Zyngier 	default:		/* Shouldn't be here, just for sanity */
4246b18782SMarc Zyngier 		WARN_ONCE(1, "Unknown PMU version %d\n", pmuver);
43fd65a3b5SMarc Zyngier 		return 0;
44fd65a3b5SMarc Zyngier 	}
45fd65a3b5SMarc Zyngier }
46fd65a3b5SMarc Zyngier 
479ed24f4bSMarc Zyngier /**
489ed24f4bSMarc Zyngier  * kvm_pmu_idx_is_64bit - determine if select_idx is a 64bit counter
499ed24f4bSMarc Zyngier  * @vcpu: The vcpu pointer
509ed24f4bSMarc Zyngier  * @select_idx: The counter index
519ed24f4bSMarc Zyngier  */
529ed24f4bSMarc Zyngier static bool kvm_pmu_idx_is_64bit(struct kvm_vcpu *vcpu, u64 select_idx)
539ed24f4bSMarc Zyngier {
54c82d28cbSMarc Zyngier 	return (select_idx == ARMV8_PMU_CYCLE_IDX);
55c82d28cbSMarc Zyngier }
56c82d28cbSMarc Zyngier 
57c82d28cbSMarc Zyngier static bool kvm_pmu_idx_has_64bit_overflow(struct kvm_vcpu *vcpu, u64 select_idx)
58c82d28cbSMarc Zyngier {
599ed24f4bSMarc Zyngier 	return (select_idx == ARMV8_PMU_CYCLE_IDX &&
609ed24f4bSMarc Zyngier 		__vcpu_sys_reg(vcpu, PMCR_EL0) & ARMV8_PMU_PMCR_LC);
619ed24f4bSMarc Zyngier }
629ed24f4bSMarc Zyngier 
63bead0220SMarc Zyngier static bool kvm_pmu_counter_can_chain(struct kvm_vcpu *vcpu, u64 idx)
64bead0220SMarc Zyngier {
65c82d28cbSMarc Zyngier 	return (!(idx & 1) && (idx + 1) < ARMV8_PMU_CYCLE_IDX &&
66c82d28cbSMarc Zyngier 		!kvm_pmu_idx_has_64bit_overflow(vcpu, idx));
67bead0220SMarc Zyngier }
68bead0220SMarc Zyngier 
699ed24f4bSMarc Zyngier static struct kvm_vcpu *kvm_pmc_to_vcpu(struct kvm_pmc *pmc)
709ed24f4bSMarc Zyngier {
719ed24f4bSMarc Zyngier 	struct kvm_pmu *pmu;
729ed24f4bSMarc Zyngier 	struct kvm_vcpu_arch *vcpu_arch;
739ed24f4bSMarc Zyngier 
749ed24f4bSMarc Zyngier 	pmc -= pmc->idx;
759ed24f4bSMarc Zyngier 	pmu = container_of(pmc, struct kvm_pmu, pmc[0]);
769ed24f4bSMarc Zyngier 	vcpu_arch = container_of(pmu, struct kvm_vcpu_arch, pmu);
779ed24f4bSMarc Zyngier 	return container_of(vcpu_arch, struct kvm_vcpu, arch);
789ed24f4bSMarc Zyngier }
799ed24f4bSMarc Zyngier 
80*0cb9c3c8SMarc Zyngier static u32 counter_index_to_reg(u64 idx)
81*0cb9c3c8SMarc Zyngier {
82*0cb9c3c8SMarc Zyngier 	return (idx == ARMV8_PMU_CYCLE_IDX) ? PMCCNTR_EL0 : PMEVCNTR0_EL0 + idx;
83*0cb9c3c8SMarc Zyngier }
84*0cb9c3c8SMarc Zyngier 
85*0cb9c3c8SMarc Zyngier static u32 counter_index_to_evtreg(u64 idx)
86*0cb9c3c8SMarc Zyngier {
87*0cb9c3c8SMarc Zyngier 	return (idx == ARMV8_PMU_CYCLE_IDX) ? PMCCFILTR_EL0 : PMEVTYPER0_EL0 + idx;
88*0cb9c3c8SMarc Zyngier }
89*0cb9c3c8SMarc Zyngier 
909ed24f4bSMarc Zyngier /**
91bead0220SMarc Zyngier  * kvm_pmu_get_counter_value - get PMU counter value
929ed24f4bSMarc Zyngier  * @vcpu: The vcpu pointer
939ed24f4bSMarc Zyngier  * @select_idx: The counter index
949ed24f4bSMarc Zyngier  */
95bead0220SMarc Zyngier u64 kvm_pmu_get_counter_value(struct kvm_vcpu *vcpu, u64 select_idx)
969ed24f4bSMarc Zyngier {
97bead0220SMarc Zyngier 	u64 counter, reg, enabled, running;
98bead0220SMarc Zyngier 	struct kvm_pmu *pmu = &vcpu->arch.pmu;
99bead0220SMarc Zyngier 	struct kvm_pmc *pmc = &pmu->pmc[select_idx];
1009ed24f4bSMarc Zyngier 
101bead0220SMarc Zyngier 	if (!kvm_vcpu_has_pmu(vcpu))
102bead0220SMarc Zyngier 		return 0;
1039ed24f4bSMarc Zyngier 
104*0cb9c3c8SMarc Zyngier 	reg = counter_index_to_reg(select_idx);
1059ed24f4bSMarc Zyngier 	counter = __vcpu_sys_reg(vcpu, reg);
1069ed24f4bSMarc Zyngier 
1079ed24f4bSMarc Zyngier 	/*
1089ed24f4bSMarc Zyngier 	 * The real counter value is equal to the value of counter register plus
1099ed24f4bSMarc Zyngier 	 * the value perf event counts.
1109ed24f4bSMarc Zyngier 	 */
1119ed24f4bSMarc Zyngier 	if (pmc->perf_event)
1129ed24f4bSMarc Zyngier 		counter += perf_event_read_value(pmc->perf_event, &enabled,
1139ed24f4bSMarc Zyngier 						 &running);
1149ed24f4bSMarc Zyngier 
115c82d28cbSMarc Zyngier 	if (!kvm_pmu_idx_is_64bit(vcpu, select_idx))
1169ed24f4bSMarc Zyngier 		counter = lower_32_bits(counter);
1179ed24f4bSMarc Zyngier 
1189ed24f4bSMarc Zyngier 	return counter;
1199ed24f4bSMarc Zyngier }
1209ed24f4bSMarc Zyngier 
1219ed24f4bSMarc Zyngier /**
1229ed24f4bSMarc Zyngier  * kvm_pmu_set_counter_value - set PMU counter value
1239ed24f4bSMarc Zyngier  * @vcpu: The vcpu pointer
1249ed24f4bSMarc Zyngier  * @select_idx: The counter index
1259ed24f4bSMarc Zyngier  * @val: The counter value
1269ed24f4bSMarc Zyngier  */
1279ed24f4bSMarc Zyngier void kvm_pmu_set_counter_value(struct kvm_vcpu *vcpu, u64 select_idx, u64 val)
1289ed24f4bSMarc Zyngier {
1299ed24f4bSMarc Zyngier 	u64 reg;
1309ed24f4bSMarc Zyngier 
1318f6379e2SAlexandru Elisei 	if (!kvm_vcpu_has_pmu(vcpu))
1328f6379e2SAlexandru Elisei 		return;
1338f6379e2SAlexandru Elisei 
134*0cb9c3c8SMarc Zyngier 	reg = counter_index_to_reg(select_idx);
1359ed24f4bSMarc Zyngier 	__vcpu_sys_reg(vcpu, reg) += (s64)val - kvm_pmu_get_counter_value(vcpu, select_idx);
1369ed24f4bSMarc Zyngier 
1379ed24f4bSMarc Zyngier 	/* Recreate the perf event to reflect the updated sample_period */
1389ed24f4bSMarc Zyngier 	kvm_pmu_create_perf_event(vcpu, select_idx);
1399ed24f4bSMarc Zyngier }
1409ed24f4bSMarc Zyngier 
1419ed24f4bSMarc Zyngier /**
1429ed24f4bSMarc Zyngier  * kvm_pmu_release_perf_event - remove the perf event
1439ed24f4bSMarc Zyngier  * @pmc: The PMU counter pointer
1449ed24f4bSMarc Zyngier  */
1459ed24f4bSMarc Zyngier static void kvm_pmu_release_perf_event(struct kvm_pmc *pmc)
1469ed24f4bSMarc Zyngier {
1479ed24f4bSMarc Zyngier 	if (pmc->perf_event) {
1489ed24f4bSMarc Zyngier 		perf_event_disable(pmc->perf_event);
1499ed24f4bSMarc Zyngier 		perf_event_release_kernel(pmc->perf_event);
1509ed24f4bSMarc Zyngier 		pmc->perf_event = NULL;
1519ed24f4bSMarc Zyngier 	}
1529ed24f4bSMarc Zyngier }
1539ed24f4bSMarc Zyngier 
1549ed24f4bSMarc Zyngier /**
1559ed24f4bSMarc Zyngier  * kvm_pmu_stop_counter - stop PMU counter
1569ed24f4bSMarc Zyngier  * @pmc: The PMU counter pointer
1579ed24f4bSMarc Zyngier  *
1589ed24f4bSMarc Zyngier  * If this counter has been configured to monitor some event, release it here.
1599ed24f4bSMarc Zyngier  */
1609ed24f4bSMarc Zyngier static void kvm_pmu_stop_counter(struct kvm_vcpu *vcpu, struct kvm_pmc *pmc)
1619ed24f4bSMarc Zyngier {
1620f1e172bSMarc Zyngier 	u64 reg, val;
1639ed24f4bSMarc Zyngier 
1649ed24f4bSMarc Zyngier 	if (!pmc->perf_event)
1659ed24f4bSMarc Zyngier 		return;
1669ed24f4bSMarc Zyngier 
1670f1e172bSMarc Zyngier 	val = kvm_pmu_get_counter_value(vcpu, pmc->idx);
1689ed24f4bSMarc Zyngier 
169*0cb9c3c8SMarc Zyngier 	reg = counter_index_to_reg(pmc->idx);
1709ed24f4bSMarc Zyngier 
1719ed24f4bSMarc Zyngier 	__vcpu_sys_reg(vcpu, reg) = val;
1729ed24f4bSMarc Zyngier 
1739ed24f4bSMarc Zyngier 	kvm_pmu_release_perf_event(pmc);
1749ed24f4bSMarc Zyngier }
1759ed24f4bSMarc Zyngier 
1769ed24f4bSMarc Zyngier /**
1779ed24f4bSMarc Zyngier  * kvm_pmu_vcpu_init - assign pmu counter idx for cpu
1789ed24f4bSMarc Zyngier  * @vcpu: The vcpu pointer
1799ed24f4bSMarc Zyngier  *
1809ed24f4bSMarc Zyngier  */
1819ed24f4bSMarc Zyngier void kvm_pmu_vcpu_init(struct kvm_vcpu *vcpu)
1829ed24f4bSMarc Zyngier {
1839ed24f4bSMarc Zyngier 	int i;
1849ed24f4bSMarc Zyngier 	struct kvm_pmu *pmu = &vcpu->arch.pmu;
1859ed24f4bSMarc Zyngier 
1869ed24f4bSMarc Zyngier 	for (i = 0; i < ARMV8_PMU_MAX_COUNTERS; i++)
1879ed24f4bSMarc Zyngier 		pmu->pmc[i].idx = i;
1889ed24f4bSMarc Zyngier }
1899ed24f4bSMarc Zyngier 
1909ed24f4bSMarc Zyngier /**
1919ed24f4bSMarc Zyngier  * kvm_pmu_vcpu_reset - reset pmu state for cpu
1929ed24f4bSMarc Zyngier  * @vcpu: The vcpu pointer
1939ed24f4bSMarc Zyngier  *
1949ed24f4bSMarc Zyngier  */
1959ed24f4bSMarc Zyngier void kvm_pmu_vcpu_reset(struct kvm_vcpu *vcpu)
1969ed24f4bSMarc Zyngier {
1979ed24f4bSMarc Zyngier 	unsigned long mask = kvm_pmu_valid_counter_mask(vcpu);
1989ed24f4bSMarc Zyngier 	struct kvm_pmu *pmu = &vcpu->arch.pmu;
1999ed24f4bSMarc Zyngier 	int i;
2009ed24f4bSMarc Zyngier 
2019ed24f4bSMarc Zyngier 	for_each_set_bit(i, &mask, 32)
2029ed24f4bSMarc Zyngier 		kvm_pmu_stop_counter(vcpu, &pmu->pmc[i]);
2039ed24f4bSMarc Zyngier }
2049ed24f4bSMarc Zyngier 
2059ed24f4bSMarc Zyngier /**
2069ed24f4bSMarc Zyngier  * kvm_pmu_vcpu_destroy - free perf event of PMU for cpu
2079ed24f4bSMarc Zyngier  * @vcpu: The vcpu pointer
2089ed24f4bSMarc Zyngier  *
2099ed24f4bSMarc Zyngier  */
2109ed24f4bSMarc Zyngier void kvm_pmu_vcpu_destroy(struct kvm_vcpu *vcpu)
2119ed24f4bSMarc Zyngier {
2129ed24f4bSMarc Zyngier 	int i;
2139ed24f4bSMarc Zyngier 	struct kvm_pmu *pmu = &vcpu->arch.pmu;
2149ed24f4bSMarc Zyngier 
2159ed24f4bSMarc Zyngier 	for (i = 0; i < ARMV8_PMU_MAX_COUNTERS; i++)
2169ed24f4bSMarc Zyngier 		kvm_pmu_release_perf_event(&pmu->pmc[i]);
21795e92e45SJulien Thierry 	irq_work_sync(&vcpu->arch.pmu.overflow_work);
2189ed24f4bSMarc Zyngier }
2199ed24f4bSMarc Zyngier 
2209ed24f4bSMarc Zyngier u64 kvm_pmu_valid_counter_mask(struct kvm_vcpu *vcpu)
2219ed24f4bSMarc Zyngier {
2229ed24f4bSMarc Zyngier 	u64 val = __vcpu_sys_reg(vcpu, PMCR_EL0) >> ARMV8_PMU_PMCR_N_SHIFT;
2239ed24f4bSMarc Zyngier 
2249ed24f4bSMarc Zyngier 	val &= ARMV8_PMU_PMCR_N_MASK;
2259ed24f4bSMarc Zyngier 	if (val == 0)
2269ed24f4bSMarc Zyngier 		return BIT(ARMV8_PMU_CYCLE_IDX);
2279ed24f4bSMarc Zyngier 	else
2289ed24f4bSMarc Zyngier 		return GENMASK(val - 1, 0) | BIT(ARMV8_PMU_CYCLE_IDX);
2299ed24f4bSMarc Zyngier }
2309ed24f4bSMarc Zyngier 
2319ed24f4bSMarc Zyngier /**
2329ed24f4bSMarc Zyngier  * kvm_pmu_enable_counter_mask - enable selected PMU counters
2339ed24f4bSMarc Zyngier  * @vcpu: The vcpu pointer
2349ed24f4bSMarc Zyngier  * @val: the value guest writes to PMCNTENSET register
2359ed24f4bSMarc Zyngier  *
2369ed24f4bSMarc Zyngier  * Call perf_event_enable to start counting the perf event
2379ed24f4bSMarc Zyngier  */
2389ed24f4bSMarc Zyngier void kvm_pmu_enable_counter_mask(struct kvm_vcpu *vcpu, u64 val)
2399ed24f4bSMarc Zyngier {
2409ed24f4bSMarc Zyngier 	int i;
2419ed24f4bSMarc Zyngier 	struct kvm_pmu *pmu = &vcpu->arch.pmu;
2429ed24f4bSMarc Zyngier 	struct kvm_pmc *pmc;
2439ed24f4bSMarc Zyngier 
2448f6379e2SAlexandru Elisei 	if (!kvm_vcpu_has_pmu(vcpu))
2458f6379e2SAlexandru Elisei 		return;
2468f6379e2SAlexandru Elisei 
2479ed24f4bSMarc Zyngier 	if (!(__vcpu_sys_reg(vcpu, PMCR_EL0) & ARMV8_PMU_PMCR_E) || !val)
2489ed24f4bSMarc Zyngier 		return;
2499ed24f4bSMarc Zyngier 
2509ed24f4bSMarc Zyngier 	for (i = 0; i < ARMV8_PMU_MAX_COUNTERS; i++) {
2519ed24f4bSMarc Zyngier 		if (!(val & BIT(i)))
2529ed24f4bSMarc Zyngier 			continue;
2539ed24f4bSMarc Zyngier 
2549ed24f4bSMarc Zyngier 		pmc = &pmu->pmc[i];
2559ed24f4bSMarc Zyngier 
256bead0220SMarc Zyngier 		if (!pmc->perf_event) {
2579ed24f4bSMarc Zyngier 			kvm_pmu_create_perf_event(vcpu, i);
258bead0220SMarc Zyngier 		} else {
2599ed24f4bSMarc Zyngier 			perf_event_enable(pmc->perf_event);
2609ed24f4bSMarc Zyngier 			if (pmc->perf_event->state != PERF_EVENT_STATE_ACTIVE)
2619ed24f4bSMarc Zyngier 				kvm_debug("fail to enable perf event\n");
2629ed24f4bSMarc Zyngier 		}
2639ed24f4bSMarc Zyngier 	}
2649ed24f4bSMarc Zyngier }
2659ed24f4bSMarc Zyngier 
2669ed24f4bSMarc Zyngier /**
2679ed24f4bSMarc Zyngier  * kvm_pmu_disable_counter_mask - disable selected PMU counters
2689ed24f4bSMarc Zyngier  * @vcpu: The vcpu pointer
2699ed24f4bSMarc Zyngier  * @val: the value guest writes to PMCNTENCLR register
2709ed24f4bSMarc Zyngier  *
2719ed24f4bSMarc Zyngier  * Call perf_event_disable to stop counting the perf event
2729ed24f4bSMarc Zyngier  */
2739ed24f4bSMarc Zyngier void kvm_pmu_disable_counter_mask(struct kvm_vcpu *vcpu, u64 val)
2749ed24f4bSMarc Zyngier {
2759ed24f4bSMarc Zyngier 	int i;
2769ed24f4bSMarc Zyngier 	struct kvm_pmu *pmu = &vcpu->arch.pmu;
2779ed24f4bSMarc Zyngier 	struct kvm_pmc *pmc;
2789ed24f4bSMarc Zyngier 
2798f6379e2SAlexandru Elisei 	if (!kvm_vcpu_has_pmu(vcpu) || !val)
2809ed24f4bSMarc Zyngier 		return;
2819ed24f4bSMarc Zyngier 
2829ed24f4bSMarc Zyngier 	for (i = 0; i < ARMV8_PMU_MAX_COUNTERS; i++) {
2839ed24f4bSMarc Zyngier 		if (!(val & BIT(i)))
2849ed24f4bSMarc Zyngier 			continue;
2859ed24f4bSMarc Zyngier 
2869ed24f4bSMarc Zyngier 		pmc = &pmu->pmc[i];
2879ed24f4bSMarc Zyngier 
2889ed24f4bSMarc Zyngier 		if (pmc->perf_event)
2899ed24f4bSMarc Zyngier 			perf_event_disable(pmc->perf_event);
2909ed24f4bSMarc Zyngier 	}
2919ed24f4bSMarc Zyngier }
2929ed24f4bSMarc Zyngier 
2939ed24f4bSMarc Zyngier static u64 kvm_pmu_overflow_status(struct kvm_vcpu *vcpu)
2949ed24f4bSMarc Zyngier {
2959ed24f4bSMarc Zyngier 	u64 reg = 0;
2969ed24f4bSMarc Zyngier 
2979ed24f4bSMarc Zyngier 	if ((__vcpu_sys_reg(vcpu, PMCR_EL0) & ARMV8_PMU_PMCR_E)) {
2989ed24f4bSMarc Zyngier 		reg = __vcpu_sys_reg(vcpu, PMOVSSET_EL0);
2999ed24f4bSMarc Zyngier 		reg &= __vcpu_sys_reg(vcpu, PMCNTENSET_EL0);
3009ed24f4bSMarc Zyngier 		reg &= __vcpu_sys_reg(vcpu, PMINTENSET_EL1);
3019ed24f4bSMarc Zyngier 	}
3029ed24f4bSMarc Zyngier 
3039ed24f4bSMarc Zyngier 	return reg;
3049ed24f4bSMarc Zyngier }
3059ed24f4bSMarc Zyngier 
3069ed24f4bSMarc Zyngier static void kvm_pmu_update_state(struct kvm_vcpu *vcpu)
3079ed24f4bSMarc Zyngier {
3089ed24f4bSMarc Zyngier 	struct kvm_pmu *pmu = &vcpu->arch.pmu;
3099ed24f4bSMarc Zyngier 	bool overflow;
3109ed24f4bSMarc Zyngier 
31146acf89dSMarc Zyngier 	if (!kvm_vcpu_has_pmu(vcpu))
3129ed24f4bSMarc Zyngier 		return;
3139ed24f4bSMarc Zyngier 
3149ed24f4bSMarc Zyngier 	overflow = !!kvm_pmu_overflow_status(vcpu);
3159ed24f4bSMarc Zyngier 	if (pmu->irq_level == overflow)
3169ed24f4bSMarc Zyngier 		return;
3179ed24f4bSMarc Zyngier 
3189ed24f4bSMarc Zyngier 	pmu->irq_level = overflow;
3199ed24f4bSMarc Zyngier 
3209ed24f4bSMarc Zyngier 	if (likely(irqchip_in_kernel(vcpu->kvm))) {
3219ed24f4bSMarc Zyngier 		int ret = kvm_vgic_inject_irq(vcpu->kvm, vcpu->vcpu_id,
3229ed24f4bSMarc Zyngier 					      pmu->irq_num, overflow, pmu);
3239ed24f4bSMarc Zyngier 		WARN_ON(ret);
3249ed24f4bSMarc Zyngier 	}
3259ed24f4bSMarc Zyngier }
3269ed24f4bSMarc Zyngier 
3279ed24f4bSMarc Zyngier bool kvm_pmu_should_notify_user(struct kvm_vcpu *vcpu)
3289ed24f4bSMarc Zyngier {
3299ed24f4bSMarc Zyngier 	struct kvm_pmu *pmu = &vcpu->arch.pmu;
3309ed24f4bSMarc Zyngier 	struct kvm_sync_regs *sregs = &vcpu->run->s.regs;
3319ed24f4bSMarc Zyngier 	bool run_level = sregs->device_irq_level & KVM_ARM_DEV_PMU;
3329ed24f4bSMarc Zyngier 
3339ed24f4bSMarc Zyngier 	if (likely(irqchip_in_kernel(vcpu->kvm)))
3349ed24f4bSMarc Zyngier 		return false;
3359ed24f4bSMarc Zyngier 
3369ed24f4bSMarc Zyngier 	return pmu->irq_level != run_level;
3379ed24f4bSMarc Zyngier }
3389ed24f4bSMarc Zyngier 
3399ed24f4bSMarc Zyngier /*
3409ed24f4bSMarc Zyngier  * Reflect the PMU overflow interrupt output level into the kvm_run structure
3419ed24f4bSMarc Zyngier  */
3429ed24f4bSMarc Zyngier void kvm_pmu_update_run(struct kvm_vcpu *vcpu)
3439ed24f4bSMarc Zyngier {
3449ed24f4bSMarc Zyngier 	struct kvm_sync_regs *regs = &vcpu->run->s.regs;
3459ed24f4bSMarc Zyngier 
3469ed24f4bSMarc Zyngier 	/* Populate the timer bitmap for user space */
3479ed24f4bSMarc Zyngier 	regs->device_irq_level &= ~KVM_ARM_DEV_PMU;
3489ed24f4bSMarc Zyngier 	if (vcpu->arch.pmu.irq_level)
3499ed24f4bSMarc Zyngier 		regs->device_irq_level |= KVM_ARM_DEV_PMU;
3509ed24f4bSMarc Zyngier }
3519ed24f4bSMarc Zyngier 
3529ed24f4bSMarc Zyngier /**
3539ed24f4bSMarc Zyngier  * kvm_pmu_flush_hwstate - flush pmu state to cpu
3549ed24f4bSMarc Zyngier  * @vcpu: The vcpu pointer
3559ed24f4bSMarc Zyngier  *
3569ed24f4bSMarc Zyngier  * Check if the PMU has overflowed while we were running in the host, and inject
3579ed24f4bSMarc Zyngier  * an interrupt if that was the case.
3589ed24f4bSMarc Zyngier  */
3599ed24f4bSMarc Zyngier void kvm_pmu_flush_hwstate(struct kvm_vcpu *vcpu)
3609ed24f4bSMarc Zyngier {
3619ed24f4bSMarc Zyngier 	kvm_pmu_update_state(vcpu);
3629ed24f4bSMarc Zyngier }
3639ed24f4bSMarc Zyngier 
3649ed24f4bSMarc Zyngier /**
3659ed24f4bSMarc Zyngier  * kvm_pmu_sync_hwstate - sync pmu state from cpu
3669ed24f4bSMarc Zyngier  * @vcpu: The vcpu pointer
3679ed24f4bSMarc Zyngier  *
3689ed24f4bSMarc Zyngier  * Check if the PMU has overflowed while we were running in the guest, and
3699ed24f4bSMarc Zyngier  * inject an interrupt if that was the case.
3709ed24f4bSMarc Zyngier  */
3719ed24f4bSMarc Zyngier void kvm_pmu_sync_hwstate(struct kvm_vcpu *vcpu)
3729ed24f4bSMarc Zyngier {
3739ed24f4bSMarc Zyngier 	kvm_pmu_update_state(vcpu);
3749ed24f4bSMarc Zyngier }
3759ed24f4bSMarc Zyngier 
3769ed24f4bSMarc Zyngier /**
37795e92e45SJulien Thierry  * When perf interrupt is an NMI, we cannot safely notify the vcpu corresponding
37895e92e45SJulien Thierry  * to the event.
37995e92e45SJulien Thierry  * This is why we need a callback to do it once outside of the NMI context.
38095e92e45SJulien Thierry  */
38195e92e45SJulien Thierry static void kvm_pmu_perf_overflow_notify_vcpu(struct irq_work *work)
38295e92e45SJulien Thierry {
38395e92e45SJulien Thierry 	struct kvm_vcpu *vcpu;
38495e92e45SJulien Thierry 	struct kvm_pmu *pmu;
38595e92e45SJulien Thierry 
38695e92e45SJulien Thierry 	pmu = container_of(work, struct kvm_pmu, overflow_work);
38795e92e45SJulien Thierry 	vcpu = kvm_pmc_to_vcpu(pmu->pmc);
38895e92e45SJulien Thierry 
38995e92e45SJulien Thierry 	kvm_vcpu_kick(vcpu);
39095e92e45SJulien Thierry }
39195e92e45SJulien Thierry 
392bead0220SMarc Zyngier /*
393bead0220SMarc Zyngier  * Perform an increment on any of the counters described in @mask,
394bead0220SMarc Zyngier  * generating the overflow if required, and propagate it as a chained
395bead0220SMarc Zyngier  * event if possible.
396bead0220SMarc Zyngier  */
397bead0220SMarc Zyngier static void kvm_pmu_counter_increment(struct kvm_vcpu *vcpu,
398bead0220SMarc Zyngier 				      unsigned long mask, u32 event)
399bead0220SMarc Zyngier {
400bead0220SMarc Zyngier 	int i;
401bead0220SMarc Zyngier 
402bead0220SMarc Zyngier 	if (!(__vcpu_sys_reg(vcpu, PMCR_EL0) & ARMV8_PMU_PMCR_E))
403bead0220SMarc Zyngier 		return;
404bead0220SMarc Zyngier 
405bead0220SMarc Zyngier 	/* Weed out disabled counters */
406bead0220SMarc Zyngier 	mask &= __vcpu_sys_reg(vcpu, PMCNTENSET_EL0);
407bead0220SMarc Zyngier 
408bead0220SMarc Zyngier 	for_each_set_bit(i, &mask, ARMV8_PMU_CYCLE_IDX) {
409bead0220SMarc Zyngier 		u64 type, reg;
410bead0220SMarc Zyngier 
411bead0220SMarc Zyngier 		/* Filter on event type */
412*0cb9c3c8SMarc Zyngier 		type = __vcpu_sys_reg(vcpu, counter_index_to_evtreg(i));
413bead0220SMarc Zyngier 		type &= kvm_pmu_event_mask(vcpu->kvm);
414bead0220SMarc Zyngier 		if (type != event)
415bead0220SMarc Zyngier 			continue;
416bead0220SMarc Zyngier 
417bead0220SMarc Zyngier 		/* Increment this counter */
418*0cb9c3c8SMarc Zyngier 		reg = __vcpu_sys_reg(vcpu, counter_index_to_reg(i)) + 1;
4190f1e172bSMarc Zyngier 		if (!kvm_pmu_idx_is_64bit(vcpu, i))
420bead0220SMarc Zyngier 			reg = lower_32_bits(reg);
421*0cb9c3c8SMarc Zyngier 		__vcpu_sys_reg(vcpu, counter_index_to_reg(i)) = reg;
422bead0220SMarc Zyngier 
423001d85bdSMarc Zyngier 		/* No overflow? move on */
424001d85bdSMarc Zyngier 		if (kvm_pmu_idx_has_64bit_overflow(vcpu, i) ? reg : lower_32_bits(reg))
425bead0220SMarc Zyngier 			continue;
426bead0220SMarc Zyngier 
427bead0220SMarc Zyngier 		/* Mark overflow */
428bead0220SMarc Zyngier 		__vcpu_sys_reg(vcpu, PMOVSSET_EL0) |= BIT(i);
429bead0220SMarc Zyngier 
430bead0220SMarc Zyngier 		if (kvm_pmu_counter_can_chain(vcpu, i))
431bead0220SMarc Zyngier 			kvm_pmu_counter_increment(vcpu, BIT(i + 1),
432bead0220SMarc Zyngier 						  ARMV8_PMUV3_PERFCTR_CHAIN);
433bead0220SMarc Zyngier 	}
434bead0220SMarc Zyngier }
435bead0220SMarc Zyngier 
436c82d28cbSMarc Zyngier /* Compute the sample period for a given counter value */
437c82d28cbSMarc Zyngier static u64 compute_period(struct kvm_vcpu *vcpu, u64 select_idx, u64 counter)
438c82d28cbSMarc Zyngier {
439c82d28cbSMarc Zyngier 	u64 val;
440c82d28cbSMarc Zyngier 
441c82d28cbSMarc Zyngier 	if (kvm_pmu_idx_is_64bit(vcpu, select_idx)) {
442c82d28cbSMarc Zyngier 		if (!kvm_pmu_idx_has_64bit_overflow(vcpu, select_idx))
443c82d28cbSMarc Zyngier 			val = -(counter & GENMASK(31, 0));
444c82d28cbSMarc Zyngier 		else
445c82d28cbSMarc Zyngier 			val = (-counter) & GENMASK(63, 0);
446c82d28cbSMarc Zyngier 	} else {
447c82d28cbSMarc Zyngier 		val = (-counter) & GENMASK(31, 0);
448c82d28cbSMarc Zyngier 	}
449c82d28cbSMarc Zyngier 
450c82d28cbSMarc Zyngier 	return val;
451c82d28cbSMarc Zyngier }
452c82d28cbSMarc Zyngier 
45395e92e45SJulien Thierry /**
4549ed24f4bSMarc Zyngier  * When the perf event overflows, set the overflow status and inform the vcpu.
4559ed24f4bSMarc Zyngier  */
4569ed24f4bSMarc Zyngier static void kvm_pmu_perf_overflow(struct perf_event *perf_event,
4579ed24f4bSMarc Zyngier 				  struct perf_sample_data *data,
4589ed24f4bSMarc Zyngier 				  struct pt_regs *regs)
4599ed24f4bSMarc Zyngier {
4609ed24f4bSMarc Zyngier 	struct kvm_pmc *pmc = perf_event->overflow_handler_context;
4619ed24f4bSMarc Zyngier 	struct arm_pmu *cpu_pmu = to_arm_pmu(perf_event->pmu);
4629ed24f4bSMarc Zyngier 	struct kvm_vcpu *vcpu = kvm_pmc_to_vcpu(pmc);
4639ed24f4bSMarc Zyngier 	int idx = pmc->idx;
4649ed24f4bSMarc Zyngier 	u64 period;
4659ed24f4bSMarc Zyngier 
4669ed24f4bSMarc Zyngier 	cpu_pmu->pmu.stop(perf_event, PERF_EF_UPDATE);
4679ed24f4bSMarc Zyngier 
4689ed24f4bSMarc Zyngier 	/*
4699ed24f4bSMarc Zyngier 	 * Reset the sample period to the architectural limit,
4709ed24f4bSMarc Zyngier 	 * i.e. the point where the counter overflows.
4719ed24f4bSMarc Zyngier 	 */
472c82d28cbSMarc Zyngier 	period = compute_period(vcpu, idx, local64_read(&perf_event->count));
4739ed24f4bSMarc Zyngier 
4749ed24f4bSMarc Zyngier 	local64_set(&perf_event->hw.period_left, 0);
4759ed24f4bSMarc Zyngier 	perf_event->attr.sample_period = period;
4769ed24f4bSMarc Zyngier 	perf_event->hw.sample_period = period;
4779ed24f4bSMarc Zyngier 
4789ed24f4bSMarc Zyngier 	__vcpu_sys_reg(vcpu, PMOVSSET_EL0) |= BIT(idx);
4799ed24f4bSMarc Zyngier 
480bead0220SMarc Zyngier 	if (kvm_pmu_counter_can_chain(vcpu, idx))
481bead0220SMarc Zyngier 		kvm_pmu_counter_increment(vcpu, BIT(idx + 1),
482bead0220SMarc Zyngier 					  ARMV8_PMUV3_PERFCTR_CHAIN);
483bead0220SMarc Zyngier 
4849ed24f4bSMarc Zyngier 	if (kvm_pmu_overflow_status(vcpu)) {
4859ed24f4bSMarc Zyngier 		kvm_make_request(KVM_REQ_IRQ_PENDING, vcpu);
48695e92e45SJulien Thierry 
48795e92e45SJulien Thierry 		if (!in_nmi())
4889ed24f4bSMarc Zyngier 			kvm_vcpu_kick(vcpu);
48995e92e45SJulien Thierry 		else
49095e92e45SJulien Thierry 			irq_work_queue(&vcpu->arch.pmu.overflow_work);
4919ed24f4bSMarc Zyngier 	}
4929ed24f4bSMarc Zyngier 
4939ed24f4bSMarc Zyngier 	cpu_pmu->pmu.start(perf_event, PERF_EF_RELOAD);
4949ed24f4bSMarc Zyngier }
4959ed24f4bSMarc Zyngier 
4969ed24f4bSMarc Zyngier /**
4979ed24f4bSMarc Zyngier  * kvm_pmu_software_increment - do software increment
4989ed24f4bSMarc Zyngier  * @vcpu: The vcpu pointer
4999ed24f4bSMarc Zyngier  * @val: the value guest writes to PMSWINC register
5009ed24f4bSMarc Zyngier  */
5019ed24f4bSMarc Zyngier void kvm_pmu_software_increment(struct kvm_vcpu *vcpu, u64 val)
5029ed24f4bSMarc Zyngier {
503bead0220SMarc Zyngier 	kvm_pmu_counter_increment(vcpu, val, ARMV8_PMUV3_PERFCTR_SW_INCR);
5049ed24f4bSMarc Zyngier }
5059ed24f4bSMarc Zyngier 
5069ed24f4bSMarc Zyngier /**
5079ed24f4bSMarc Zyngier  * kvm_pmu_handle_pmcr - handle PMCR register
5089ed24f4bSMarc Zyngier  * @vcpu: The vcpu pointer
5099ed24f4bSMarc Zyngier  * @val: the value guest writes to PMCR register
5109ed24f4bSMarc Zyngier  */
5119ed24f4bSMarc Zyngier void kvm_pmu_handle_pmcr(struct kvm_vcpu *vcpu, u64 val)
5129ed24f4bSMarc Zyngier {
5139ed24f4bSMarc Zyngier 	int i;
5149ed24f4bSMarc Zyngier 
5158f6379e2SAlexandru Elisei 	if (!kvm_vcpu_has_pmu(vcpu))
5168f6379e2SAlexandru Elisei 		return;
5178f6379e2SAlexandru Elisei 
5189ed24f4bSMarc Zyngier 	if (val & ARMV8_PMU_PMCR_E) {
5199ed24f4bSMarc Zyngier 		kvm_pmu_enable_counter_mask(vcpu,
520f5eff400SMarc Zyngier 		       __vcpu_sys_reg(vcpu, PMCNTENSET_EL0));
5219ed24f4bSMarc Zyngier 	} else {
522ca4f202dSAlexandre Chartre 		kvm_pmu_disable_counter_mask(vcpu,
523ca4f202dSAlexandre Chartre 		       __vcpu_sys_reg(vcpu, PMCNTENSET_EL0));
5249ed24f4bSMarc Zyngier 	}
5259ed24f4bSMarc Zyngier 
5269ed24f4bSMarc Zyngier 	if (val & ARMV8_PMU_PMCR_C)
5279ed24f4bSMarc Zyngier 		kvm_pmu_set_counter_value(vcpu, ARMV8_PMU_CYCLE_IDX, 0);
5289ed24f4bSMarc Zyngier 
5299ed24f4bSMarc Zyngier 	if (val & ARMV8_PMU_PMCR_P) {
530ca4f202dSAlexandre Chartre 		unsigned long mask = kvm_pmu_valid_counter_mask(vcpu);
5312a71fabfSAlexandru Elisei 		mask &= ~BIT(ARMV8_PMU_CYCLE_IDX);
5329ed24f4bSMarc Zyngier 		for_each_set_bit(i, &mask, 32)
5339ed24f4bSMarc Zyngier 			kvm_pmu_set_counter_value(vcpu, i, 0);
5349ed24f4bSMarc Zyngier 	}
5359ed24f4bSMarc Zyngier }
5369ed24f4bSMarc Zyngier 
5379ed24f4bSMarc Zyngier static bool kvm_pmu_counter_is_enabled(struct kvm_vcpu *vcpu, u64 select_idx)
5389ed24f4bSMarc Zyngier {
5399ed24f4bSMarc Zyngier 	return (__vcpu_sys_reg(vcpu, PMCR_EL0) & ARMV8_PMU_PMCR_E) &&
5409ed24f4bSMarc Zyngier 	       (__vcpu_sys_reg(vcpu, PMCNTENSET_EL0) & BIT(select_idx));
5419ed24f4bSMarc Zyngier }
5429ed24f4bSMarc Zyngier 
5439ed24f4bSMarc Zyngier /**
5449ed24f4bSMarc Zyngier  * kvm_pmu_create_perf_event - create a perf event for a counter
5459ed24f4bSMarc Zyngier  * @vcpu: The vcpu pointer
5469ed24f4bSMarc Zyngier  * @select_idx: The number of selected counter
5479ed24f4bSMarc Zyngier  */
5489ed24f4bSMarc Zyngier static void kvm_pmu_create_perf_event(struct kvm_vcpu *vcpu, u64 select_idx)
5499ed24f4bSMarc Zyngier {
55046b18782SMarc Zyngier 	struct arm_pmu *arm_pmu = vcpu->kvm->arch.arm_pmu;
5519ed24f4bSMarc Zyngier 	struct kvm_pmu *pmu = &vcpu->arch.pmu;
552bead0220SMarc Zyngier 	struct kvm_pmc *pmc = &pmu->pmc[select_idx];
5539ed24f4bSMarc Zyngier 	struct perf_event *event;
5549ed24f4bSMarc Zyngier 	struct perf_event_attr attr;
5559ed24f4bSMarc Zyngier 	u64 eventsel, counter, reg, data;
5569ed24f4bSMarc Zyngier 
557*0cb9c3c8SMarc Zyngier 	reg = counter_index_to_evtreg(select_idx);
5589ed24f4bSMarc Zyngier 	data = __vcpu_sys_reg(vcpu, reg);
5599ed24f4bSMarc Zyngier 
5609ed24f4bSMarc Zyngier 	kvm_pmu_stop_counter(vcpu, pmc);
561d7eec236SMarc Zyngier 	if (pmc->idx == ARMV8_PMU_CYCLE_IDX)
562d7eec236SMarc Zyngier 		eventsel = ARMV8_PMUV3_PERFCTR_CPU_CYCLES;
563d7eec236SMarc Zyngier 	else
564d7eec236SMarc Zyngier 		eventsel = data & kvm_pmu_event_mask(vcpu->kvm);
5659ed24f4bSMarc Zyngier 
566bead0220SMarc Zyngier 	/*
567bead0220SMarc Zyngier 	 * Neither SW increment nor chained events need to be backed
568bead0220SMarc Zyngier 	 * by a perf event.
569bead0220SMarc Zyngier 	 */
570bead0220SMarc Zyngier 	if (eventsel == ARMV8_PMUV3_PERFCTR_SW_INCR ||
571bead0220SMarc Zyngier 	    eventsel == ARMV8_PMUV3_PERFCTR_CHAIN)
572d7eec236SMarc Zyngier 		return;
573d7eec236SMarc Zyngier 
574d7eec236SMarc Zyngier 	/*
575d7eec236SMarc Zyngier 	 * If we have a filter in place and that the event isn't allowed, do
576d7eec236SMarc Zyngier 	 * not install a perf event either.
577d7eec236SMarc Zyngier 	 */
578d7eec236SMarc Zyngier 	if (vcpu->kvm->arch.pmu_filter &&
579d7eec236SMarc Zyngier 	    !test_bit(eventsel, vcpu->kvm->arch.pmu_filter))
5809ed24f4bSMarc Zyngier 		return;
5819ed24f4bSMarc Zyngier 
5829ed24f4bSMarc Zyngier 	memset(&attr, 0, sizeof(struct perf_event_attr));
58346b18782SMarc Zyngier 	attr.type = arm_pmu->pmu.type;
5849ed24f4bSMarc Zyngier 	attr.size = sizeof(attr);
5859ed24f4bSMarc Zyngier 	attr.pinned = 1;
5869ed24f4bSMarc Zyngier 	attr.disabled = !kvm_pmu_counter_is_enabled(vcpu, pmc->idx);
5879ed24f4bSMarc Zyngier 	attr.exclude_user = data & ARMV8_PMU_EXCLUDE_EL0 ? 1 : 0;
5889ed24f4bSMarc Zyngier 	attr.exclude_kernel = data & ARMV8_PMU_EXCLUDE_EL1 ? 1 : 0;
5899ed24f4bSMarc Zyngier 	attr.exclude_hv = 1; /* Don't count EL2 events */
5909ed24f4bSMarc Zyngier 	attr.exclude_host = 1; /* Don't count host events */
591d7eec236SMarc Zyngier 	attr.config = eventsel;
5929ed24f4bSMarc Zyngier 
593bead0220SMarc Zyngier 	counter = kvm_pmu_get_counter_value(vcpu, select_idx);
5949ed24f4bSMarc Zyngier 
595bead0220SMarc Zyngier 	/*
596bead0220SMarc Zyngier 	 * If counting with a 64bit counter, advertise it to the perf
597c82d28cbSMarc Zyngier 	 * code, carefully dealing with the initial sample period
598c82d28cbSMarc Zyngier 	 * which also depends on the overflow.
5999ed24f4bSMarc Zyngier 	 */
600c82d28cbSMarc Zyngier 	if (kvm_pmu_idx_is_64bit(vcpu, select_idx))
601bead0220SMarc Zyngier 		attr.config1 |= PERF_ATTR_CFG1_COUNTER_64BIT;
602c82d28cbSMarc Zyngier 
603c82d28cbSMarc Zyngier 	attr.sample_period = compute_period(vcpu, select_idx, counter);
6049ed24f4bSMarc Zyngier 
6059ed24f4bSMarc Zyngier 	event = perf_event_create_kernel_counter(&attr, -1, current,
6069ed24f4bSMarc Zyngier 						 kvm_pmu_perf_overflow, pmc);
6079ed24f4bSMarc Zyngier 
6089ed24f4bSMarc Zyngier 	if (IS_ERR(event)) {
6099ed24f4bSMarc Zyngier 		pr_err_once("kvm: pmu event creation failed %ld\n",
6109ed24f4bSMarc Zyngier 			    PTR_ERR(event));
6119ed24f4bSMarc Zyngier 		return;
6129ed24f4bSMarc Zyngier 	}
6139ed24f4bSMarc Zyngier 
6149ed24f4bSMarc Zyngier 	pmc->perf_event = event;
6159ed24f4bSMarc Zyngier }
6169ed24f4bSMarc Zyngier 
6179ed24f4bSMarc Zyngier /**
6189ed24f4bSMarc Zyngier  * kvm_pmu_set_counter_event_type - set selected counter to monitor some event
6199ed24f4bSMarc Zyngier  * @vcpu: The vcpu pointer
6209ed24f4bSMarc Zyngier  * @data: The data guest writes to PMXEVTYPER_EL0
6219ed24f4bSMarc Zyngier  * @select_idx: The number of selected counter
6229ed24f4bSMarc Zyngier  *
6239ed24f4bSMarc Zyngier  * When OS accesses PMXEVTYPER_EL0, that means it wants to set a PMC to count an
6249ed24f4bSMarc Zyngier  * event with given hardware event number. Here we call perf_event API to
6259ed24f4bSMarc Zyngier  * emulate this action and create a kernel perf event for it.
6269ed24f4bSMarc Zyngier  */
6279ed24f4bSMarc Zyngier void kvm_pmu_set_counter_event_type(struct kvm_vcpu *vcpu, u64 data,
6289ed24f4bSMarc Zyngier 				    u64 select_idx)
6299ed24f4bSMarc Zyngier {
630fd65a3b5SMarc Zyngier 	u64 reg, mask;
631fd65a3b5SMarc Zyngier 
6328f6379e2SAlexandru Elisei 	if (!kvm_vcpu_has_pmu(vcpu))
6338f6379e2SAlexandru Elisei 		return;
6348f6379e2SAlexandru Elisei 
635fd65a3b5SMarc Zyngier 	mask  =  ARMV8_PMU_EVTYPE_MASK;
636fd65a3b5SMarc Zyngier 	mask &= ~ARMV8_PMU_EVTYPE_EVENT;
637fd65a3b5SMarc Zyngier 	mask |= kvm_pmu_event_mask(vcpu->kvm);
6389ed24f4bSMarc Zyngier 
639*0cb9c3c8SMarc Zyngier 	reg = counter_index_to_evtreg(select_idx);
6409ed24f4bSMarc Zyngier 
641fd65a3b5SMarc Zyngier 	__vcpu_sys_reg(vcpu, reg) = data & mask;
6429ed24f4bSMarc Zyngier 
6439ed24f4bSMarc Zyngier 	kvm_pmu_create_perf_event(vcpu, select_idx);
6449ed24f4bSMarc Zyngier }
6459ed24f4bSMarc Zyngier 
646e840f42aSMarc Zyngier void kvm_host_pmu_init(struct arm_pmu *pmu)
647e840f42aSMarc Zyngier {
648db858060SAlexandru Elisei 	struct arm_pmu_entry *entry;
649db858060SAlexandru Elisei 
650fcf37b38SMark Brown 	if (pmu->pmuver == 0 || pmu->pmuver == ID_AA64DFR0_EL1_PMUVer_IMP_DEF)
651db858060SAlexandru Elisei 		return;
652db858060SAlexandru Elisei 
653db858060SAlexandru Elisei 	mutex_lock(&arm_pmus_lock);
654db858060SAlexandru Elisei 
655db858060SAlexandru Elisei 	entry = kmalloc(sizeof(*entry), GFP_KERNEL);
656db858060SAlexandru Elisei 	if (!entry)
657db858060SAlexandru Elisei 		goto out_unlock;
658db858060SAlexandru Elisei 
659db858060SAlexandru Elisei 	entry->arm_pmu = pmu;
660db858060SAlexandru Elisei 	list_add_tail(&entry->entry, &arm_pmus);
661db858060SAlexandru Elisei 
662db858060SAlexandru Elisei 	if (list_is_singular(&arm_pmus))
663e840f42aSMarc Zyngier 		static_branch_enable(&kvm_arm_pmu_available);
664db858060SAlexandru Elisei 
665db858060SAlexandru Elisei out_unlock:
666db858060SAlexandru Elisei 	mutex_unlock(&arm_pmus_lock);
667e840f42aSMarc Zyngier }
668e840f42aSMarc Zyngier 
66946b18782SMarc Zyngier static struct arm_pmu *kvm_pmu_probe_armpmu(void)
670fd65a3b5SMarc Zyngier {
671fd65a3b5SMarc Zyngier 	struct perf_event_attr attr = { };
672fd65a3b5SMarc Zyngier 	struct perf_event *event;
67346b18782SMarc Zyngier 	struct arm_pmu *pmu = NULL;
674fd65a3b5SMarc Zyngier 
675fd65a3b5SMarc Zyngier 	/*
676fd65a3b5SMarc Zyngier 	 * Create a dummy event that only counts user cycles. As we'll never
677fd65a3b5SMarc Zyngier 	 * leave this function with the event being live, it will never
678fd65a3b5SMarc Zyngier 	 * count anything. But it allows us to probe some of the PMU
679fd65a3b5SMarc Zyngier 	 * details. Yes, this is terrible.
680fd65a3b5SMarc Zyngier 	 */
681fd65a3b5SMarc Zyngier 	attr.type = PERF_TYPE_RAW;
682fd65a3b5SMarc Zyngier 	attr.size = sizeof(attr);
683fd65a3b5SMarc Zyngier 	attr.pinned = 1;
684fd65a3b5SMarc Zyngier 	attr.disabled = 0;
685fd65a3b5SMarc Zyngier 	attr.exclude_user = 0;
686fd65a3b5SMarc Zyngier 	attr.exclude_kernel = 1;
687fd65a3b5SMarc Zyngier 	attr.exclude_hv = 1;
688fd65a3b5SMarc Zyngier 	attr.exclude_host = 1;
689fd65a3b5SMarc Zyngier 	attr.config = ARMV8_PMUV3_PERFCTR_CPU_CYCLES;
690fd65a3b5SMarc Zyngier 	attr.sample_period = GENMASK(63, 0);
691fd65a3b5SMarc Zyngier 
692fd65a3b5SMarc Zyngier 	event = perf_event_create_kernel_counter(&attr, -1, current,
693fd65a3b5SMarc Zyngier 						 kvm_pmu_perf_overflow, &attr);
694fd65a3b5SMarc Zyngier 
695fd65a3b5SMarc Zyngier 	if (IS_ERR(event)) {
696fd65a3b5SMarc Zyngier 		pr_err_once("kvm: pmu event creation failed %ld\n",
697fd65a3b5SMarc Zyngier 			    PTR_ERR(event));
69846b18782SMarc Zyngier 		return NULL;
699fd65a3b5SMarc Zyngier 	}
700fd65a3b5SMarc Zyngier 
701fd65a3b5SMarc Zyngier 	if (event->pmu) {
702fd65a3b5SMarc Zyngier 		pmu = to_arm_pmu(event->pmu);
70346b18782SMarc Zyngier 		if (pmu->pmuver == 0 ||
704fcf37b38SMark Brown 		    pmu->pmuver == ID_AA64DFR0_EL1_PMUVer_IMP_DEF)
70546b18782SMarc Zyngier 			pmu = NULL;
706fd65a3b5SMarc Zyngier 	}
707fd65a3b5SMarc Zyngier 
708fd65a3b5SMarc Zyngier 	perf_event_disable(event);
709fd65a3b5SMarc Zyngier 	perf_event_release_kernel(event);
710fd65a3b5SMarc Zyngier 
71146b18782SMarc Zyngier 	return pmu;
712fd65a3b5SMarc Zyngier }
713fd65a3b5SMarc Zyngier 
71488865becSMarc Zyngier u64 kvm_pmu_get_pmceid(struct kvm_vcpu *vcpu, bool pmceid1)
71588865becSMarc Zyngier {
71688865becSMarc Zyngier 	unsigned long *bmap = vcpu->kvm->arch.pmu_filter;
71788865becSMarc Zyngier 	u64 val, mask = 0;
7189529aaa0SMarc Zyngier 	int base, i, nr_events;
71988865becSMarc Zyngier 
7208f6379e2SAlexandru Elisei 	if (!kvm_vcpu_has_pmu(vcpu))
7218f6379e2SAlexandru Elisei 		return 0;
7228f6379e2SAlexandru Elisei 
72388865becSMarc Zyngier 	if (!pmceid1) {
72488865becSMarc Zyngier 		val = read_sysreg(pmceid0_el0);
725acdd8a4eSMarc Zyngier 		/* always support CHAIN */
726acdd8a4eSMarc Zyngier 		val |= BIT(ARMV8_PMUV3_PERFCTR_CHAIN);
72788865becSMarc Zyngier 		base = 0;
72888865becSMarc Zyngier 	} else {
72988865becSMarc Zyngier 		val = read_sysreg(pmceid1_el0);
73046081078SMarc Zyngier 		/*
73146081078SMarc Zyngier 		 * Don't advertise STALL_SLOT, as PMMIR_EL0 is handled
73246081078SMarc Zyngier 		 * as RAZ
73346081078SMarc Zyngier 		 */
734121a8fc0SMark Brown 		if (vcpu->kvm->arch.arm_pmu->pmuver >= ID_AA64DFR0_EL1_PMUVer_V3P4)
73546081078SMarc Zyngier 			val &= ~BIT_ULL(ARMV8_PMUV3_PERFCTR_STALL_SLOT - 32);
73688865becSMarc Zyngier 		base = 32;
73788865becSMarc Zyngier 	}
73888865becSMarc Zyngier 
73988865becSMarc Zyngier 	if (!bmap)
74088865becSMarc Zyngier 		return val;
74188865becSMarc Zyngier 
7429529aaa0SMarc Zyngier 	nr_events = kvm_pmu_event_mask(vcpu->kvm) + 1;
7439529aaa0SMarc Zyngier 
74488865becSMarc Zyngier 	for (i = 0; i < 32; i += 8) {
74588865becSMarc Zyngier 		u64 byte;
74688865becSMarc Zyngier 
74788865becSMarc Zyngier 		byte = bitmap_get_value8(bmap, base + i);
74888865becSMarc Zyngier 		mask |= byte << i;
7499529aaa0SMarc Zyngier 		if (nr_events >= (0x4000 + base + 32)) {
75088865becSMarc Zyngier 			byte = bitmap_get_value8(bmap, 0x4000 + base + i);
75188865becSMarc Zyngier 			mask |= byte << (32 + i);
75288865becSMarc Zyngier 		}
7539529aaa0SMarc Zyngier 	}
75488865becSMarc Zyngier 
75588865becSMarc Zyngier 	return val & mask;
75688865becSMarc Zyngier }
75788865becSMarc Zyngier 
7589ed24f4bSMarc Zyngier int kvm_arm_pmu_v3_enable(struct kvm_vcpu *vcpu)
7599ed24f4bSMarc Zyngier {
7609bbfa4b5SAlexandru Elisei 	if (!kvm_vcpu_has_pmu(vcpu))
7619ed24f4bSMarc Zyngier 		return 0;
7629ed24f4bSMarc Zyngier 
7639bbfa4b5SAlexandru Elisei 	if (!vcpu->arch.pmu.created)
7649bbfa4b5SAlexandru Elisei 		return -EINVAL;
7659bbfa4b5SAlexandru Elisei 
7669ed24f4bSMarc Zyngier 	/*
7679ed24f4bSMarc Zyngier 	 * A valid interrupt configuration for the PMU is either to have a
7689ed24f4bSMarc Zyngier 	 * properly configured interrupt number and using an in-kernel
7699ed24f4bSMarc Zyngier 	 * irqchip, or to not have an in-kernel GIC and not set an IRQ.
7709ed24f4bSMarc Zyngier 	 */
7719ed24f4bSMarc Zyngier 	if (irqchip_in_kernel(vcpu->kvm)) {
7729ed24f4bSMarc Zyngier 		int irq = vcpu->arch.pmu.irq_num;
7739ed24f4bSMarc Zyngier 		/*
7749ed24f4bSMarc Zyngier 		 * If we are using an in-kernel vgic, at this point we know
7759ed24f4bSMarc Zyngier 		 * the vgic will be initialized, so we can check the PMU irq
7769ed24f4bSMarc Zyngier 		 * number against the dimensions of the vgic and make sure
7779ed24f4bSMarc Zyngier 		 * it's valid.
7789ed24f4bSMarc Zyngier 		 */
7799ed24f4bSMarc Zyngier 		if (!irq_is_ppi(irq) && !vgic_valid_spi(vcpu->kvm, irq))
7809ed24f4bSMarc Zyngier 			return -EINVAL;
7819ed24f4bSMarc Zyngier 	} else if (kvm_arm_pmu_irq_initialized(vcpu)) {
7829ed24f4bSMarc Zyngier 		   return -EINVAL;
7839ed24f4bSMarc Zyngier 	}
7849ed24f4bSMarc Zyngier 
785d0c94c49SMarc Zyngier 	/* One-off reload of the PMU on first run */
786d0c94c49SMarc Zyngier 	kvm_make_request(KVM_REQ_RELOAD_PMU, vcpu);
787d0c94c49SMarc Zyngier 
7889ed24f4bSMarc Zyngier 	return 0;
7899ed24f4bSMarc Zyngier }
7909ed24f4bSMarc Zyngier 
7919ed24f4bSMarc Zyngier static int kvm_arm_pmu_v3_init(struct kvm_vcpu *vcpu)
7929ed24f4bSMarc Zyngier {
7939ed24f4bSMarc Zyngier 	if (irqchip_in_kernel(vcpu->kvm)) {
7949ed24f4bSMarc Zyngier 		int ret;
7959ed24f4bSMarc Zyngier 
7969ed24f4bSMarc Zyngier 		/*
7979ed24f4bSMarc Zyngier 		 * If using the PMU with an in-kernel virtual GIC
7989ed24f4bSMarc Zyngier 		 * implementation, we require the GIC to be already
7999ed24f4bSMarc Zyngier 		 * initialized when initializing the PMU.
8009ed24f4bSMarc Zyngier 		 */
8019ed24f4bSMarc Zyngier 		if (!vgic_initialized(vcpu->kvm))
8029ed24f4bSMarc Zyngier 			return -ENODEV;
8039ed24f4bSMarc Zyngier 
8049ed24f4bSMarc Zyngier 		if (!kvm_arm_pmu_irq_initialized(vcpu))
8059ed24f4bSMarc Zyngier 			return -ENXIO;
8069ed24f4bSMarc Zyngier 
8079ed24f4bSMarc Zyngier 		ret = kvm_vgic_set_owner(vcpu, vcpu->arch.pmu.irq_num,
8089ed24f4bSMarc Zyngier 					 &vcpu->arch.pmu);
8099ed24f4bSMarc Zyngier 		if (ret)
8109ed24f4bSMarc Zyngier 			return ret;
8119ed24f4bSMarc Zyngier 	}
8129ed24f4bSMarc Zyngier 
81395e92e45SJulien Thierry 	init_irq_work(&vcpu->arch.pmu.overflow_work,
81495e92e45SJulien Thierry 		      kvm_pmu_perf_overflow_notify_vcpu);
81595e92e45SJulien Thierry 
8169ed24f4bSMarc Zyngier 	vcpu->arch.pmu.created = true;
8179ed24f4bSMarc Zyngier 	return 0;
8189ed24f4bSMarc Zyngier }
8199ed24f4bSMarc Zyngier 
8209ed24f4bSMarc Zyngier /*
8219ed24f4bSMarc Zyngier  * For one VM the interrupt type must be same for each vcpu.
8229ed24f4bSMarc Zyngier  * As a PPI, the interrupt number is the same for all vcpus,
8239ed24f4bSMarc Zyngier  * while as an SPI it must be a separate number per vcpu.
8249ed24f4bSMarc Zyngier  */
8259ed24f4bSMarc Zyngier static bool pmu_irq_is_valid(struct kvm *kvm, int irq)
8269ed24f4bSMarc Zyngier {
82746808a4cSMarc Zyngier 	unsigned long i;
8289ed24f4bSMarc Zyngier 	struct kvm_vcpu *vcpu;
8299ed24f4bSMarc Zyngier 
8309ed24f4bSMarc Zyngier 	kvm_for_each_vcpu(i, vcpu, kvm) {
8319ed24f4bSMarc Zyngier 		if (!kvm_arm_pmu_irq_initialized(vcpu))
8329ed24f4bSMarc Zyngier 			continue;
8339ed24f4bSMarc Zyngier 
8349ed24f4bSMarc Zyngier 		if (irq_is_ppi(irq)) {
8359ed24f4bSMarc Zyngier 			if (vcpu->arch.pmu.irq_num != irq)
8369ed24f4bSMarc Zyngier 				return false;
8379ed24f4bSMarc Zyngier 		} else {
8389ed24f4bSMarc Zyngier 			if (vcpu->arch.pmu.irq_num == irq)
8399ed24f4bSMarc Zyngier 				return false;
8409ed24f4bSMarc Zyngier 		}
8419ed24f4bSMarc Zyngier 	}
8429ed24f4bSMarc Zyngier 
8439ed24f4bSMarc Zyngier 	return true;
8449ed24f4bSMarc Zyngier }
8459ed24f4bSMarc Zyngier 
8466ee7fca2SAlexandru Elisei static int kvm_arm_pmu_v3_set_pmu(struct kvm_vcpu *vcpu, int pmu_id)
8476ee7fca2SAlexandru Elisei {
8486ee7fca2SAlexandru Elisei 	struct kvm *kvm = vcpu->kvm;
8496ee7fca2SAlexandru Elisei 	struct arm_pmu_entry *entry;
8506ee7fca2SAlexandru Elisei 	struct arm_pmu *arm_pmu;
8516ee7fca2SAlexandru Elisei 	int ret = -ENXIO;
8526ee7fca2SAlexandru Elisei 
8536ee7fca2SAlexandru Elisei 	mutex_lock(&kvm->lock);
8546ee7fca2SAlexandru Elisei 	mutex_lock(&arm_pmus_lock);
8556ee7fca2SAlexandru Elisei 
8566ee7fca2SAlexandru Elisei 	list_for_each_entry(entry, &arm_pmus, entry) {
8576ee7fca2SAlexandru Elisei 		arm_pmu = entry->arm_pmu;
8586ee7fca2SAlexandru Elisei 		if (arm_pmu->pmu.type == pmu_id) {
85906394531SMarc Zyngier 			if (test_bit(KVM_ARCH_FLAG_HAS_RAN_ONCE, &kvm->arch.flags) ||
8606ee7fca2SAlexandru Elisei 			    (kvm->arch.pmu_filter && kvm->arch.arm_pmu != arm_pmu)) {
8616ee7fca2SAlexandru Elisei 				ret = -EBUSY;
8626ee7fca2SAlexandru Elisei 				break;
8636ee7fca2SAlexandru Elisei 			}
8646ee7fca2SAlexandru Elisei 
8656ee7fca2SAlexandru Elisei 			kvm->arch.arm_pmu = arm_pmu;
866583cda1bSAlexandru Elisei 			cpumask_copy(kvm->arch.supported_cpus, &arm_pmu->supported_cpus);
8676ee7fca2SAlexandru Elisei 			ret = 0;
8686ee7fca2SAlexandru Elisei 			break;
8696ee7fca2SAlexandru Elisei 		}
8706ee7fca2SAlexandru Elisei 	}
8716ee7fca2SAlexandru Elisei 
8726ee7fca2SAlexandru Elisei 	mutex_unlock(&arm_pmus_lock);
8736ee7fca2SAlexandru Elisei 	mutex_unlock(&kvm->lock);
8746ee7fca2SAlexandru Elisei 	return ret;
8756ee7fca2SAlexandru Elisei }
8766ee7fca2SAlexandru Elisei 
8779ed24f4bSMarc Zyngier int kvm_arm_pmu_v3_set_attr(struct kvm_vcpu *vcpu, struct kvm_device_attr *attr)
8789ed24f4bSMarc Zyngier {
8795177fe91SMarc Zyngier 	struct kvm *kvm = vcpu->kvm;
8805177fe91SMarc Zyngier 
88177da4303SMarc Zyngier 	if (!kvm_vcpu_has_pmu(vcpu))
88242223fb1SMarc Zyngier 		return -ENODEV;
88342223fb1SMarc Zyngier 
88442223fb1SMarc Zyngier 	if (vcpu->arch.pmu.created)
88542223fb1SMarc Zyngier 		return -EBUSY;
88642223fb1SMarc Zyngier 
88746b18782SMarc Zyngier 	mutex_lock(&kvm->lock);
88846b18782SMarc Zyngier 	if (!kvm->arch.arm_pmu) {
88946b18782SMarc Zyngier 		/* No PMU set, get the default one */
89046b18782SMarc Zyngier 		kvm->arch.arm_pmu = kvm_pmu_probe_armpmu();
89146b18782SMarc Zyngier 		if (!kvm->arch.arm_pmu) {
89246b18782SMarc Zyngier 			mutex_unlock(&kvm->lock);
893fd65a3b5SMarc Zyngier 			return -ENODEV;
89446b18782SMarc Zyngier 		}
89546b18782SMarc Zyngier 	}
89646b18782SMarc Zyngier 	mutex_unlock(&kvm->lock);
897fd65a3b5SMarc Zyngier 
8989ed24f4bSMarc Zyngier 	switch (attr->attr) {
8999ed24f4bSMarc Zyngier 	case KVM_ARM_VCPU_PMU_V3_IRQ: {
9009ed24f4bSMarc Zyngier 		int __user *uaddr = (int __user *)(long)attr->addr;
9019ed24f4bSMarc Zyngier 		int irq;
9029ed24f4bSMarc Zyngier 
9035177fe91SMarc Zyngier 		if (!irqchip_in_kernel(kvm))
9049ed24f4bSMarc Zyngier 			return -EINVAL;
9059ed24f4bSMarc Zyngier 
9069ed24f4bSMarc Zyngier 		if (get_user(irq, uaddr))
9079ed24f4bSMarc Zyngier 			return -EFAULT;
9089ed24f4bSMarc Zyngier 
9099ed24f4bSMarc Zyngier 		/* The PMU overflow interrupt can be a PPI or a valid SPI. */
9109ed24f4bSMarc Zyngier 		if (!(irq_is_ppi(irq) || irq_is_spi(irq)))
9119ed24f4bSMarc Zyngier 			return -EINVAL;
9129ed24f4bSMarc Zyngier 
9135177fe91SMarc Zyngier 		if (!pmu_irq_is_valid(kvm, irq))
9149ed24f4bSMarc Zyngier 			return -EINVAL;
9159ed24f4bSMarc Zyngier 
9169ed24f4bSMarc Zyngier 		if (kvm_arm_pmu_irq_initialized(vcpu))
9179ed24f4bSMarc Zyngier 			return -EBUSY;
9189ed24f4bSMarc Zyngier 
9199ed24f4bSMarc Zyngier 		kvm_debug("Set kvm ARM PMU irq: %d\n", irq);
9209ed24f4bSMarc Zyngier 		vcpu->arch.pmu.irq_num = irq;
9219ed24f4bSMarc Zyngier 		return 0;
9229ed24f4bSMarc Zyngier 	}
923d7eec236SMarc Zyngier 	case KVM_ARM_VCPU_PMU_V3_FILTER: {
924d7eec236SMarc Zyngier 		struct kvm_pmu_event_filter __user *uaddr;
925d7eec236SMarc Zyngier 		struct kvm_pmu_event_filter filter;
926d7eec236SMarc Zyngier 		int nr_events;
927d7eec236SMarc Zyngier 
9285177fe91SMarc Zyngier 		nr_events = kvm_pmu_event_mask(kvm) + 1;
929d7eec236SMarc Zyngier 
930d7eec236SMarc Zyngier 		uaddr = (struct kvm_pmu_event_filter __user *)(long)attr->addr;
931d7eec236SMarc Zyngier 
932d7eec236SMarc Zyngier 		if (copy_from_user(&filter, uaddr, sizeof(filter)))
933d7eec236SMarc Zyngier 			return -EFAULT;
934d7eec236SMarc Zyngier 
935d7eec236SMarc Zyngier 		if (((u32)filter.base_event + filter.nevents) > nr_events ||
936d7eec236SMarc Zyngier 		    (filter.action != KVM_PMU_EVENT_ALLOW &&
937d7eec236SMarc Zyngier 		     filter.action != KVM_PMU_EVENT_DENY))
938d7eec236SMarc Zyngier 			return -EINVAL;
939d7eec236SMarc Zyngier 
9405177fe91SMarc Zyngier 		mutex_lock(&kvm->lock);
941d7eec236SMarc Zyngier 
94206394531SMarc Zyngier 		if (test_bit(KVM_ARCH_FLAG_HAS_RAN_ONCE, &kvm->arch.flags)) {
9435177fe91SMarc Zyngier 			mutex_unlock(&kvm->lock);
9445177fe91SMarc Zyngier 			return -EBUSY;
9455177fe91SMarc Zyngier 		}
9465177fe91SMarc Zyngier 
9475177fe91SMarc Zyngier 		if (!kvm->arch.pmu_filter) {
9485177fe91SMarc Zyngier 			kvm->arch.pmu_filter = bitmap_alloc(nr_events, GFP_KERNEL_ACCOUNT);
9495177fe91SMarc Zyngier 			if (!kvm->arch.pmu_filter) {
9505177fe91SMarc Zyngier 				mutex_unlock(&kvm->lock);
951d7eec236SMarc Zyngier 				return -ENOMEM;
952d7eec236SMarc Zyngier 			}
953d7eec236SMarc Zyngier 
954d7eec236SMarc Zyngier 			/*
955d7eec236SMarc Zyngier 			 * The default depends on the first applied filter.
956d7eec236SMarc Zyngier 			 * If it allows events, the default is to deny.
957d7eec236SMarc Zyngier 			 * Conversely, if the first filter denies a set of
958d7eec236SMarc Zyngier 			 * events, the default is to allow.
959d7eec236SMarc Zyngier 			 */
960d7eec236SMarc Zyngier 			if (filter.action == KVM_PMU_EVENT_ALLOW)
9615177fe91SMarc Zyngier 				bitmap_zero(kvm->arch.pmu_filter, nr_events);
962d7eec236SMarc Zyngier 			else
9635177fe91SMarc Zyngier 				bitmap_fill(kvm->arch.pmu_filter, nr_events);
964d7eec236SMarc Zyngier 		}
965d7eec236SMarc Zyngier 
966d7eec236SMarc Zyngier 		if (filter.action == KVM_PMU_EVENT_ALLOW)
9675177fe91SMarc Zyngier 			bitmap_set(kvm->arch.pmu_filter, filter.base_event, filter.nevents);
968d7eec236SMarc Zyngier 		else
9695177fe91SMarc Zyngier 			bitmap_clear(kvm->arch.pmu_filter, filter.base_event, filter.nevents);
970d7eec236SMarc Zyngier 
9715177fe91SMarc Zyngier 		mutex_unlock(&kvm->lock);
972d7eec236SMarc Zyngier 
973d7eec236SMarc Zyngier 		return 0;
974d7eec236SMarc Zyngier 	}
9756ee7fca2SAlexandru Elisei 	case KVM_ARM_VCPU_PMU_V3_SET_PMU: {
9766ee7fca2SAlexandru Elisei 		int __user *uaddr = (int __user *)(long)attr->addr;
9776ee7fca2SAlexandru Elisei 		int pmu_id;
9786ee7fca2SAlexandru Elisei 
9796ee7fca2SAlexandru Elisei 		if (get_user(pmu_id, uaddr))
9806ee7fca2SAlexandru Elisei 			return -EFAULT;
9816ee7fca2SAlexandru Elisei 
9826ee7fca2SAlexandru Elisei 		return kvm_arm_pmu_v3_set_pmu(vcpu, pmu_id);
9836ee7fca2SAlexandru Elisei 	}
9849ed24f4bSMarc Zyngier 	case KVM_ARM_VCPU_PMU_V3_INIT:
9859ed24f4bSMarc Zyngier 		return kvm_arm_pmu_v3_init(vcpu);
9869ed24f4bSMarc Zyngier 	}
9879ed24f4bSMarc Zyngier 
9889ed24f4bSMarc Zyngier 	return -ENXIO;
9899ed24f4bSMarc Zyngier }
9909ed24f4bSMarc Zyngier 
9919ed24f4bSMarc Zyngier int kvm_arm_pmu_v3_get_attr(struct kvm_vcpu *vcpu, struct kvm_device_attr *attr)
9929ed24f4bSMarc Zyngier {
9939ed24f4bSMarc Zyngier 	switch (attr->attr) {
9949ed24f4bSMarc Zyngier 	case KVM_ARM_VCPU_PMU_V3_IRQ: {
9959ed24f4bSMarc Zyngier 		int __user *uaddr = (int __user *)(long)attr->addr;
9969ed24f4bSMarc Zyngier 		int irq;
9979ed24f4bSMarc Zyngier 
9989ed24f4bSMarc Zyngier 		if (!irqchip_in_kernel(vcpu->kvm))
9999ed24f4bSMarc Zyngier 			return -EINVAL;
10009ed24f4bSMarc Zyngier 
100114bda7a9SMarc Zyngier 		if (!kvm_vcpu_has_pmu(vcpu))
10029ed24f4bSMarc Zyngier 			return -ENODEV;
10039ed24f4bSMarc Zyngier 
10049ed24f4bSMarc Zyngier 		if (!kvm_arm_pmu_irq_initialized(vcpu))
10059ed24f4bSMarc Zyngier 			return -ENXIO;
10069ed24f4bSMarc Zyngier 
10079ed24f4bSMarc Zyngier 		irq = vcpu->arch.pmu.irq_num;
10089ed24f4bSMarc Zyngier 		return put_user(irq, uaddr);
10099ed24f4bSMarc Zyngier 	}
10109ed24f4bSMarc Zyngier 	}
10119ed24f4bSMarc Zyngier 
10129ed24f4bSMarc Zyngier 	return -ENXIO;
10139ed24f4bSMarc Zyngier }
10149ed24f4bSMarc Zyngier 
10159ed24f4bSMarc Zyngier int kvm_arm_pmu_v3_has_attr(struct kvm_vcpu *vcpu, struct kvm_device_attr *attr)
10169ed24f4bSMarc Zyngier {
10179ed24f4bSMarc Zyngier 	switch (attr->attr) {
10189ed24f4bSMarc Zyngier 	case KVM_ARM_VCPU_PMU_V3_IRQ:
10199ed24f4bSMarc Zyngier 	case KVM_ARM_VCPU_PMU_V3_INIT:
1020d7eec236SMarc Zyngier 	case KVM_ARM_VCPU_PMU_V3_FILTER:
10216ee7fca2SAlexandru Elisei 	case KVM_ARM_VCPU_PMU_V3_SET_PMU:
102277da4303SMarc Zyngier 		if (kvm_vcpu_has_pmu(vcpu))
10239ed24f4bSMarc Zyngier 			return 0;
10249ed24f4bSMarc Zyngier 	}
10259ed24f4bSMarc Zyngier 
10269ed24f4bSMarc Zyngier 	return -ENXIO;
10279ed24f4bSMarc Zyngier }
1028