xref: /openbmc/linux/arch/riscv/kvm/vcpu_pmu.c (revision ee1cd5048959de496cd005c50b137212a5b62062)
18f0153ecSAtish Patra // SPDX-License-Identifier: GPL-2.0
28f0153ecSAtish Patra /*
38f0153ecSAtish Patra  * Copyright (c) 2023 Rivos Inc
48f0153ecSAtish Patra  *
58f0153ecSAtish Patra  * Authors:
68f0153ecSAtish Patra  *     Atish Patra <atishp@rivosinc.com>
78f0153ecSAtish Patra  */
88f0153ecSAtish Patra 
98f0153ecSAtish Patra #define pr_fmt(fmt)	"riscv-kvm-pmu: " fmt
108f0153ecSAtish Patra #include <linux/errno.h>
118f0153ecSAtish Patra #include <linux/err.h>
128f0153ecSAtish Patra #include <linux/kvm_host.h>
138f0153ecSAtish Patra #include <linux/perf/riscv_pmu.h>
148f0153ecSAtish Patra #include <asm/csr.h>
158f0153ecSAtish Patra #include <asm/kvm_vcpu_sbi.h>
168f0153ecSAtish Patra #include <asm/kvm_vcpu_pmu.h>
170cb74b65SAtish Patra #include <linux/bitops.h>
188f0153ecSAtish Patra 
198f0153ecSAtish Patra #define kvm_pmu_num_counters(pmu) ((pmu)->num_hw_ctrs + (pmu)->num_fw_ctrs)
200cb74b65SAtish Patra #define get_event_type(x) (((x) & SBI_PMU_EVENT_IDX_TYPE_MASK) >> 16)
210cb74b65SAtish Patra #define get_event_code(x) ((x) & SBI_PMU_EVENT_IDX_CODE_MASK)
220cb74b65SAtish Patra 
230cb74b65SAtish Patra static enum perf_hw_id hw_event_perf_map[SBI_PMU_HW_GENERAL_MAX] = {
240cb74b65SAtish Patra 	[SBI_PMU_HW_CPU_CYCLES] = PERF_COUNT_HW_CPU_CYCLES,
250cb74b65SAtish Patra 	[SBI_PMU_HW_INSTRUCTIONS] = PERF_COUNT_HW_INSTRUCTIONS,
260cb74b65SAtish Patra 	[SBI_PMU_HW_CACHE_REFERENCES] = PERF_COUNT_HW_CACHE_REFERENCES,
270cb74b65SAtish Patra 	[SBI_PMU_HW_CACHE_MISSES] = PERF_COUNT_HW_CACHE_MISSES,
280cb74b65SAtish Patra 	[SBI_PMU_HW_BRANCH_INSTRUCTIONS] = PERF_COUNT_HW_BRANCH_INSTRUCTIONS,
290cb74b65SAtish Patra 	[SBI_PMU_HW_BRANCH_MISSES] = PERF_COUNT_HW_BRANCH_MISSES,
300cb74b65SAtish Patra 	[SBI_PMU_HW_BUS_CYCLES] = PERF_COUNT_HW_BUS_CYCLES,
310cb74b65SAtish Patra 	[SBI_PMU_HW_STALLED_CYCLES_FRONTEND] = PERF_COUNT_HW_STALLED_CYCLES_FRONTEND,
320cb74b65SAtish Patra 	[SBI_PMU_HW_STALLED_CYCLES_BACKEND] = PERF_COUNT_HW_STALLED_CYCLES_BACKEND,
330cb74b65SAtish Patra 	[SBI_PMU_HW_REF_CPU_CYCLES] = PERF_COUNT_HW_REF_CPU_CYCLES,
340cb74b65SAtish Patra };
350cb74b65SAtish Patra 
kvm_pmu_get_sample_period(struct kvm_pmc * pmc)360cb74b65SAtish Patra static u64 kvm_pmu_get_sample_period(struct kvm_pmc *pmc)
370cb74b65SAtish Patra {
380cb74b65SAtish Patra 	u64 counter_val_mask = GENMASK(pmc->cinfo.width, 0);
390cb74b65SAtish Patra 	u64 sample_period;
400cb74b65SAtish Patra 
410cb74b65SAtish Patra 	if (!pmc->counter_val)
42*1ee64446SAtish Patra 		sample_period = counter_val_mask;
430cb74b65SAtish Patra 	else
440cb74b65SAtish Patra 		sample_period = (-pmc->counter_val) & counter_val_mask;
450cb74b65SAtish Patra 
460cb74b65SAtish Patra 	return sample_period;
470cb74b65SAtish Patra }
480cb74b65SAtish Patra 
kvm_pmu_get_perf_event_type(unsigned long eidx)490cb74b65SAtish Patra static u32 kvm_pmu_get_perf_event_type(unsigned long eidx)
500cb74b65SAtish Patra {
510cb74b65SAtish Patra 	enum sbi_pmu_event_type etype = get_event_type(eidx);
520cb74b65SAtish Patra 	u32 type = PERF_TYPE_MAX;
530cb74b65SAtish Patra 
540cb74b65SAtish Patra 	switch (etype) {
550cb74b65SAtish Patra 	case SBI_PMU_EVENT_TYPE_HW:
560cb74b65SAtish Patra 		type = PERF_TYPE_HARDWARE;
570cb74b65SAtish Patra 		break;
580cb74b65SAtish Patra 	case SBI_PMU_EVENT_TYPE_CACHE:
590cb74b65SAtish Patra 		type = PERF_TYPE_HW_CACHE;
600cb74b65SAtish Patra 		break;
610cb74b65SAtish Patra 	case SBI_PMU_EVENT_TYPE_RAW:
620cb74b65SAtish Patra 	case SBI_PMU_EVENT_TYPE_FW:
630cb74b65SAtish Patra 		type = PERF_TYPE_RAW;
640cb74b65SAtish Patra 		break;
650cb74b65SAtish Patra 	default:
660cb74b65SAtish Patra 		break;
670cb74b65SAtish Patra 	}
680cb74b65SAtish Patra 
690cb74b65SAtish Patra 	return type;
700cb74b65SAtish Patra }
710cb74b65SAtish Patra 
kvm_pmu_is_fw_event(unsigned long eidx)720cb74b65SAtish Patra static bool kvm_pmu_is_fw_event(unsigned long eidx)
730cb74b65SAtish Patra {
740cb74b65SAtish Patra 	return get_event_type(eidx) == SBI_PMU_EVENT_TYPE_FW;
750cb74b65SAtish Patra }
760cb74b65SAtish Patra 
kvm_pmu_release_perf_event(struct kvm_pmc * pmc)770cb74b65SAtish Patra static void kvm_pmu_release_perf_event(struct kvm_pmc *pmc)
780cb74b65SAtish Patra {
790cb74b65SAtish Patra 	if (pmc->perf_event) {
800cb74b65SAtish Patra 		perf_event_disable(pmc->perf_event);
810cb74b65SAtish Patra 		perf_event_release_kernel(pmc->perf_event);
820cb74b65SAtish Patra 		pmc->perf_event = NULL;
830cb74b65SAtish Patra 	}
840cb74b65SAtish Patra }
850cb74b65SAtish Patra 
kvm_pmu_get_perf_event_hw_config(u32 sbi_event_code)860cb74b65SAtish Patra static u64 kvm_pmu_get_perf_event_hw_config(u32 sbi_event_code)
870cb74b65SAtish Patra {
880cb74b65SAtish Patra 	return hw_event_perf_map[sbi_event_code];
890cb74b65SAtish Patra }
900cb74b65SAtish Patra 
kvm_pmu_get_perf_event_cache_config(u32 sbi_event_code)910cb74b65SAtish Patra static u64 kvm_pmu_get_perf_event_cache_config(u32 sbi_event_code)
920cb74b65SAtish Patra {
930cb74b65SAtish Patra 	u64 config = U64_MAX;
940cb74b65SAtish Patra 	unsigned int cache_type, cache_op, cache_result;
950cb74b65SAtish Patra 
960cb74b65SAtish Patra 	/* All the cache event masks lie within 0xFF. No separate masking is necessary */
970cb74b65SAtish Patra 	cache_type = (sbi_event_code & SBI_PMU_EVENT_CACHE_ID_CODE_MASK) >>
980cb74b65SAtish Patra 		      SBI_PMU_EVENT_CACHE_ID_SHIFT;
990cb74b65SAtish Patra 	cache_op = (sbi_event_code & SBI_PMU_EVENT_CACHE_OP_ID_CODE_MASK) >>
1000cb74b65SAtish Patra 		    SBI_PMU_EVENT_CACHE_OP_SHIFT;
1010cb74b65SAtish Patra 	cache_result = sbi_event_code & SBI_PMU_EVENT_CACHE_RESULT_ID_CODE_MASK;
1020cb74b65SAtish Patra 
1030cb74b65SAtish Patra 	if (cache_type >= PERF_COUNT_HW_CACHE_MAX ||
1040cb74b65SAtish Patra 	    cache_op >= PERF_COUNT_HW_CACHE_OP_MAX ||
1050cb74b65SAtish Patra 	    cache_result >= PERF_COUNT_HW_CACHE_RESULT_MAX)
1060cb74b65SAtish Patra 		return config;
1070cb74b65SAtish Patra 
1080cb74b65SAtish Patra 	config = cache_type | (cache_op << 8) | (cache_result << 16);
1090cb74b65SAtish Patra 
1100cb74b65SAtish Patra 	return config;
1110cb74b65SAtish Patra }
1120cb74b65SAtish Patra 
kvm_pmu_get_perf_event_config(unsigned long eidx,uint64_t evt_data)1130cb74b65SAtish Patra static u64 kvm_pmu_get_perf_event_config(unsigned long eidx, uint64_t evt_data)
1140cb74b65SAtish Patra {
1150cb74b65SAtish Patra 	enum sbi_pmu_event_type etype = get_event_type(eidx);
1160cb74b65SAtish Patra 	u32 ecode = get_event_code(eidx);
1170cb74b65SAtish Patra 	u64 config = U64_MAX;
1180cb74b65SAtish Patra 
1190cb74b65SAtish Patra 	switch (etype) {
1200cb74b65SAtish Patra 	case SBI_PMU_EVENT_TYPE_HW:
1210cb74b65SAtish Patra 		if (ecode < SBI_PMU_HW_GENERAL_MAX)
1220cb74b65SAtish Patra 			config = kvm_pmu_get_perf_event_hw_config(ecode);
1230cb74b65SAtish Patra 		break;
1240cb74b65SAtish Patra 	case SBI_PMU_EVENT_TYPE_CACHE:
1250cb74b65SAtish Patra 		config = kvm_pmu_get_perf_event_cache_config(ecode);
1260cb74b65SAtish Patra 		break;
1270cb74b65SAtish Patra 	case SBI_PMU_EVENT_TYPE_RAW:
1280cb74b65SAtish Patra 		config = evt_data & RISCV_PMU_RAW_EVENT_MASK;
1290cb74b65SAtish Patra 		break;
1300cb74b65SAtish Patra 	case SBI_PMU_EVENT_TYPE_FW:
1310cb74b65SAtish Patra 		if (ecode < SBI_PMU_FW_MAX)
1320cb74b65SAtish Patra 			config = (1ULL << 63) | ecode;
1330cb74b65SAtish Patra 		break;
1340cb74b65SAtish Patra 	default:
1350cb74b65SAtish Patra 		break;
1360cb74b65SAtish Patra 	}
1370cb74b65SAtish Patra 
1380cb74b65SAtish Patra 	return config;
1390cb74b65SAtish Patra }
1400cb74b65SAtish Patra 
kvm_pmu_get_fixed_pmc_index(unsigned long eidx)1410cb74b65SAtish Patra static int kvm_pmu_get_fixed_pmc_index(unsigned long eidx)
1420cb74b65SAtish Patra {
1430cb74b65SAtish Patra 	u32 etype = kvm_pmu_get_perf_event_type(eidx);
1440cb74b65SAtish Patra 	u32 ecode = get_event_code(eidx);
1450cb74b65SAtish Patra 
1460cb74b65SAtish Patra 	if (etype != SBI_PMU_EVENT_TYPE_HW)
1470cb74b65SAtish Patra 		return -EINVAL;
1480cb74b65SAtish Patra 
1490cb74b65SAtish Patra 	if (ecode == SBI_PMU_HW_CPU_CYCLES)
1500cb74b65SAtish Patra 		return 0;
1510cb74b65SAtish Patra 	else if (ecode == SBI_PMU_HW_INSTRUCTIONS)
1520cb74b65SAtish Patra 		return 2;
1530cb74b65SAtish Patra 	else
1540cb74b65SAtish Patra 		return -EINVAL;
1550cb74b65SAtish Patra }
1560cb74b65SAtish Patra 
kvm_pmu_get_programmable_pmc_index(struct kvm_pmu * kvpmu,unsigned long eidx,unsigned long cbase,unsigned long cmask)1570cb74b65SAtish Patra static int kvm_pmu_get_programmable_pmc_index(struct kvm_pmu *kvpmu, unsigned long eidx,
1580cb74b65SAtish Patra 					      unsigned long cbase, unsigned long cmask)
1590cb74b65SAtish Patra {
1600cb74b65SAtish Patra 	int ctr_idx = -1;
1610cb74b65SAtish Patra 	int i, pmc_idx;
1620cb74b65SAtish Patra 	int min, max;
1630cb74b65SAtish Patra 
1640cb74b65SAtish Patra 	if (kvm_pmu_is_fw_event(eidx)) {
1650cb74b65SAtish Patra 		/* Firmware counters are mapped 1:1 starting from num_hw_ctrs for simplicity */
1660cb74b65SAtish Patra 		min = kvpmu->num_hw_ctrs;
1670cb74b65SAtish Patra 		max = min + kvpmu->num_fw_ctrs;
1680cb74b65SAtish Patra 	} else {
1690cb74b65SAtish Patra 		/* First 3 counters are reserved for fixed counters */
1700cb74b65SAtish Patra 		min = 3;
1710cb74b65SAtish Patra 		max = kvpmu->num_hw_ctrs;
1720cb74b65SAtish Patra 	}
1730cb74b65SAtish Patra 
1740cb74b65SAtish Patra 	for_each_set_bit(i, &cmask, BITS_PER_LONG) {
1750cb74b65SAtish Patra 		pmc_idx = i + cbase;
1760cb74b65SAtish Patra 		if ((pmc_idx >= min && pmc_idx < max) &&
1770cb74b65SAtish Patra 		    !test_bit(pmc_idx, kvpmu->pmc_in_use)) {
1780cb74b65SAtish Patra 			ctr_idx = pmc_idx;
1790cb74b65SAtish Patra 			break;
1800cb74b65SAtish Patra 		}
1810cb74b65SAtish Patra 	}
1820cb74b65SAtish Patra 
1830cb74b65SAtish Patra 	return ctr_idx;
1840cb74b65SAtish Patra }
1850cb74b65SAtish Patra 
pmu_get_pmc_index(struct kvm_pmu * pmu,unsigned long eidx,unsigned long cbase,unsigned long cmask)1860cb74b65SAtish Patra static int pmu_get_pmc_index(struct kvm_pmu *pmu, unsigned long eidx,
1870cb74b65SAtish Patra 			     unsigned long cbase, unsigned long cmask)
1880cb74b65SAtish Patra {
1890cb74b65SAtish Patra 	int ret;
1900cb74b65SAtish Patra 
1910cb74b65SAtish Patra 	/* Fixed counters need to be have fixed mapping as they have different width */
1920cb74b65SAtish Patra 	ret = kvm_pmu_get_fixed_pmc_index(eidx);
1930cb74b65SAtish Patra 	if (ret >= 0)
1940cb74b65SAtish Patra 		return ret;
1950cb74b65SAtish Patra 
1960cb74b65SAtish Patra 	return kvm_pmu_get_programmable_pmc_index(pmu, eidx, cbase, cmask);
1970cb74b65SAtish Patra }
1988f0153ecSAtish Patra 
pmu_ctr_read(struct kvm_vcpu * vcpu,unsigned long cidx,unsigned long * out_val)199a9ac6c37SAtish Patra static int pmu_ctr_read(struct kvm_vcpu *vcpu, unsigned long cidx,
200a9ac6c37SAtish Patra 			unsigned long *out_val)
201a9ac6c37SAtish Patra {
202a9ac6c37SAtish Patra 	struct kvm_pmu *kvpmu = vcpu_to_pmu(vcpu);
203a9ac6c37SAtish Patra 	struct kvm_pmc *pmc;
204a9ac6c37SAtish Patra 	u64 enabled, running;
205badc3868SAtish Patra 	int fevent_code;
206a9ac6c37SAtish Patra 
207a9ac6c37SAtish Patra 	pmc = &kvpmu->pmc[cidx];
208a9ac6c37SAtish Patra 
209badc3868SAtish Patra 	if (pmc->cinfo.type == SBI_PMU_CTR_TYPE_FW) {
210badc3868SAtish Patra 		fevent_code = get_event_code(pmc->event_idx);
211badc3868SAtish Patra 		pmc->counter_val = kvpmu->fw_event[fevent_code].value;
212badc3868SAtish Patra 	} else if (pmc->perf_event) {
213a9ac6c37SAtish Patra 		pmc->counter_val += perf_event_read_value(pmc->perf_event, &enabled, &running);
214badc3868SAtish Patra 	} else {
215badc3868SAtish Patra 		return -EINVAL;
216badc3868SAtish Patra 	}
217a9ac6c37SAtish Patra 	*out_val = pmc->counter_val;
218a9ac6c37SAtish Patra 
219a9ac6c37SAtish Patra 	return 0;
220a9ac6c37SAtish Patra }
221a9ac6c37SAtish Patra 
kvm_pmu_validate_counter_mask(struct kvm_pmu * kvpmu,unsigned long ctr_base,unsigned long ctr_mask)2220cb74b65SAtish Patra static int kvm_pmu_validate_counter_mask(struct kvm_pmu *kvpmu, unsigned long ctr_base,
2230cb74b65SAtish Patra 					 unsigned long ctr_mask)
2240cb74b65SAtish Patra {
2250cb74b65SAtish Patra 	/* Make sure the we have a valid counter mask requested from the caller */
2260cb74b65SAtish Patra 	if (!ctr_mask || (ctr_base + __fls(ctr_mask) >= kvm_pmu_num_counters(kvpmu)))
2270cb74b65SAtish Patra 		return -EINVAL;
2280cb74b65SAtish Patra 
2290cb74b65SAtish Patra 	return 0;
2300cb74b65SAtish Patra }
2310cb74b65SAtish Patra 
kvm_pmu_create_perf_event(struct kvm_pmc * pmc,struct perf_event_attr * attr,unsigned long flags,unsigned long eidx,unsigned long evtdata)232badc3868SAtish Patra static int kvm_pmu_create_perf_event(struct kvm_pmc *pmc, struct perf_event_attr *attr,
233badc3868SAtish Patra 				     unsigned long flags, unsigned long eidx, unsigned long evtdata)
234badc3868SAtish Patra {
235badc3868SAtish Patra 	struct perf_event *event;
236badc3868SAtish Patra 
237badc3868SAtish Patra 	kvm_pmu_release_perf_event(pmc);
238badc3868SAtish Patra 	attr->config = kvm_pmu_get_perf_event_config(eidx, evtdata);
239badc3868SAtish Patra 	if (flags & SBI_PMU_CFG_FLAG_CLEAR_VALUE) {
240badc3868SAtish Patra 		//TODO: Do we really want to clear the value in hardware counter
241badc3868SAtish Patra 		pmc->counter_val = 0;
242badc3868SAtish Patra 	}
243badc3868SAtish Patra 
244badc3868SAtish Patra 	/*
245badc3868SAtish Patra 	 * Set the default sample_period for now. The guest specified value
246badc3868SAtish Patra 	 * will be updated in the start call.
247badc3868SAtish Patra 	 */
248badc3868SAtish Patra 	attr->sample_period = kvm_pmu_get_sample_period(pmc);
249badc3868SAtish Patra 
250badc3868SAtish Patra 	event = perf_event_create_kernel_counter(attr, -1, current, NULL, pmc);
251badc3868SAtish Patra 	if (IS_ERR(event)) {
252badc3868SAtish Patra 		pr_err("kvm pmu event creation failed for eidx %lx: %ld\n", eidx, PTR_ERR(event));
253badc3868SAtish Patra 		return PTR_ERR(event);
254badc3868SAtish Patra 	}
255badc3868SAtish Patra 
256badc3868SAtish Patra 	pmc->perf_event = event;
257badc3868SAtish Patra 	if (flags & SBI_PMU_CFG_FLAG_AUTO_START)
258badc3868SAtish Patra 		perf_event_enable(pmc->perf_event);
259badc3868SAtish Patra 
260badc3868SAtish Patra 	return 0;
261badc3868SAtish Patra }
262badc3868SAtish Patra 
kvm_riscv_vcpu_pmu_incr_fw(struct kvm_vcpu * vcpu,unsigned long fid)263badc3868SAtish Patra int kvm_riscv_vcpu_pmu_incr_fw(struct kvm_vcpu *vcpu, unsigned long fid)
264badc3868SAtish Patra {
265badc3868SAtish Patra 	struct kvm_pmu *kvpmu = vcpu_to_pmu(vcpu);
266badc3868SAtish Patra 	struct kvm_fw_event *fevent;
267badc3868SAtish Patra 
268badc3868SAtish Patra 	if (!kvpmu || fid >= SBI_PMU_FW_MAX)
269badc3868SAtish Patra 		return -EINVAL;
270badc3868SAtish Patra 
271badc3868SAtish Patra 	fevent = &kvpmu->fw_event[fid];
272badc3868SAtish Patra 	if (fevent->started)
273badc3868SAtish Patra 		fevent->value++;
274badc3868SAtish Patra 
275badc3868SAtish Patra 	return 0;
276badc3868SAtish Patra }
277badc3868SAtish Patra 
kvm_riscv_vcpu_pmu_read_hpm(struct kvm_vcpu * vcpu,unsigned int csr_num,unsigned long * val,unsigned long new_val,unsigned long wr_mask)278a9ac6c37SAtish Patra int kvm_riscv_vcpu_pmu_read_hpm(struct kvm_vcpu *vcpu, unsigned int csr_num,
279a9ac6c37SAtish Patra 				unsigned long *val, unsigned long new_val,
280a9ac6c37SAtish Patra 				unsigned long wr_mask)
281a9ac6c37SAtish Patra {
282a9ac6c37SAtish Patra 	struct kvm_pmu *kvpmu = vcpu_to_pmu(vcpu);
283a9ac6c37SAtish Patra 	int cidx, ret = KVM_INSN_CONTINUE_NEXT_SEPC;
284a9ac6c37SAtish Patra 
285a9ac6c37SAtish Patra 	if (!kvpmu || !kvpmu->init_done) {
286a9ac6c37SAtish Patra 		/*
287a9ac6c37SAtish Patra 		 * In absence of sscofpmf in the platform, the guest OS may use
288a9ac6c37SAtish Patra 		 * the legacy PMU driver to read cycle/instret. In that case,
289a9ac6c37SAtish Patra 		 * just return 0 to avoid any illegal trap. However, any other
290a9ac6c37SAtish Patra 		 * hpmcounter access should result in illegal trap as they must
291a9ac6c37SAtish Patra 		 * be access through SBI PMU only.
292a9ac6c37SAtish Patra 		 */
293a9ac6c37SAtish Patra 		if (csr_num == CSR_CYCLE || csr_num == CSR_INSTRET) {
294a9ac6c37SAtish Patra 			*val = 0;
295a9ac6c37SAtish Patra 			return ret;
296a9ac6c37SAtish Patra 		} else {
297a9ac6c37SAtish Patra 			return KVM_INSN_ILLEGAL_TRAP;
298a9ac6c37SAtish Patra 		}
299a9ac6c37SAtish Patra 	}
300a9ac6c37SAtish Patra 
301a9ac6c37SAtish Patra 	/* The counter CSR are read only. Thus, any write should result in illegal traps */
302a9ac6c37SAtish Patra 	if (wr_mask)
303a9ac6c37SAtish Patra 		return KVM_INSN_ILLEGAL_TRAP;
304a9ac6c37SAtish Patra 
305a9ac6c37SAtish Patra 	cidx = csr_num - CSR_CYCLE;
306a9ac6c37SAtish Patra 
307a9ac6c37SAtish Patra 	if (pmu_ctr_read(vcpu, cidx, val) < 0)
308a9ac6c37SAtish Patra 		return KVM_INSN_ILLEGAL_TRAP;
309a9ac6c37SAtish Patra 
310a9ac6c37SAtish Patra 	return ret;
311a9ac6c37SAtish Patra }
312a9ac6c37SAtish Patra 
kvm_riscv_vcpu_pmu_num_ctrs(struct kvm_vcpu * vcpu,struct kvm_vcpu_sbi_return * retdata)3138f0153ecSAtish Patra int kvm_riscv_vcpu_pmu_num_ctrs(struct kvm_vcpu *vcpu,
3148f0153ecSAtish Patra 				struct kvm_vcpu_sbi_return *retdata)
3158f0153ecSAtish Patra {
3168f0153ecSAtish Patra 	struct kvm_pmu *kvpmu = vcpu_to_pmu(vcpu);
3178f0153ecSAtish Patra 
3188f0153ecSAtish Patra 	retdata->out_val = kvm_pmu_num_counters(kvpmu);
3198f0153ecSAtish Patra 
3208f0153ecSAtish Patra 	return 0;
3218f0153ecSAtish Patra }
3228f0153ecSAtish Patra 
kvm_riscv_vcpu_pmu_ctr_info(struct kvm_vcpu * vcpu,unsigned long cidx,struct kvm_vcpu_sbi_return * retdata)3238f0153ecSAtish Patra int kvm_riscv_vcpu_pmu_ctr_info(struct kvm_vcpu *vcpu, unsigned long cidx,
3248f0153ecSAtish Patra 				struct kvm_vcpu_sbi_return *retdata)
3258f0153ecSAtish Patra {
3268f0153ecSAtish Patra 	struct kvm_pmu *kvpmu = vcpu_to_pmu(vcpu);
3278f0153ecSAtish Patra 
3288f0153ecSAtish Patra 	if (cidx > RISCV_KVM_MAX_COUNTERS || cidx == 1) {
3298f0153ecSAtish Patra 		retdata->err_val = SBI_ERR_INVALID_PARAM;
3308f0153ecSAtish Patra 		return 0;
3318f0153ecSAtish Patra 	}
3328f0153ecSAtish Patra 
3338f0153ecSAtish Patra 	retdata->out_val = kvpmu->pmc[cidx].cinfo.value;
3348f0153ecSAtish Patra 
3358f0153ecSAtish Patra 	return 0;
3368f0153ecSAtish Patra }
3378f0153ecSAtish Patra 
kvm_riscv_vcpu_pmu_ctr_start(struct kvm_vcpu * vcpu,unsigned long ctr_base,unsigned long ctr_mask,unsigned long flags,u64 ival,struct kvm_vcpu_sbi_return * retdata)3388f0153ecSAtish Patra int kvm_riscv_vcpu_pmu_ctr_start(struct kvm_vcpu *vcpu, unsigned long ctr_base,
3398f0153ecSAtish Patra 				 unsigned long ctr_mask, unsigned long flags, u64 ival,
3408f0153ecSAtish Patra 				 struct kvm_vcpu_sbi_return *retdata)
3418f0153ecSAtish Patra {
3420cb74b65SAtish Patra 	struct kvm_pmu *kvpmu = vcpu_to_pmu(vcpu);
3430cb74b65SAtish Patra 	int i, pmc_index, sbiret = 0;
3440cb74b65SAtish Patra 	struct kvm_pmc *pmc;
345badc3868SAtish Patra 	int fevent_code;
3460cb74b65SAtish Patra 
3470cb74b65SAtish Patra 	if (kvm_pmu_validate_counter_mask(kvpmu, ctr_base, ctr_mask) < 0) {
3480cb74b65SAtish Patra 		sbiret = SBI_ERR_INVALID_PARAM;
3490cb74b65SAtish Patra 		goto out;
3500cb74b65SAtish Patra 	}
3510cb74b65SAtish Patra 
3520cb74b65SAtish Patra 	/* Start the counters that have been configured and requested by the guest */
3530cb74b65SAtish Patra 	for_each_set_bit(i, &ctr_mask, RISCV_MAX_COUNTERS) {
3540cb74b65SAtish Patra 		pmc_index = i + ctr_base;
3550cb74b65SAtish Patra 		if (!test_bit(pmc_index, kvpmu->pmc_in_use))
3560cb74b65SAtish Patra 			continue;
3570cb74b65SAtish Patra 		pmc = &kvpmu->pmc[pmc_index];
3580cb74b65SAtish Patra 		if (flags & SBI_PMU_START_FLAG_SET_INIT_VALUE)
3590cb74b65SAtish Patra 			pmc->counter_val = ival;
360badc3868SAtish Patra 		if (pmc->cinfo.type == SBI_PMU_CTR_TYPE_FW) {
361badc3868SAtish Patra 			fevent_code = get_event_code(pmc->event_idx);
362badc3868SAtish Patra 			if (fevent_code >= SBI_PMU_FW_MAX) {
363badc3868SAtish Patra 				sbiret = SBI_ERR_INVALID_PARAM;
364badc3868SAtish Patra 				goto out;
365badc3868SAtish Patra 			}
366badc3868SAtish Patra 
367badc3868SAtish Patra 			/* Check if the counter was already started for some reason */
368badc3868SAtish Patra 			if (kvpmu->fw_event[fevent_code].started) {
369badc3868SAtish Patra 				sbiret = SBI_ERR_ALREADY_STARTED;
370badc3868SAtish Patra 				continue;
371badc3868SAtish Patra 			}
372badc3868SAtish Patra 
373badc3868SAtish Patra 			kvpmu->fw_event[fevent_code].started = true;
374badc3868SAtish Patra 			kvpmu->fw_event[fevent_code].value = pmc->counter_val;
375badc3868SAtish Patra 		} else if (pmc->perf_event) {
3760cb74b65SAtish Patra 			if (unlikely(pmc->started)) {
3770cb74b65SAtish Patra 				sbiret = SBI_ERR_ALREADY_STARTED;
3780cb74b65SAtish Patra 				continue;
3790cb74b65SAtish Patra 			}
3800cb74b65SAtish Patra 			perf_event_period(pmc->perf_event, kvm_pmu_get_sample_period(pmc));
3810cb74b65SAtish Patra 			perf_event_enable(pmc->perf_event);
3820cb74b65SAtish Patra 			pmc->started = true;
3830cb74b65SAtish Patra 		} else {
3840cb74b65SAtish Patra 			sbiret = SBI_ERR_INVALID_PARAM;
3850cb74b65SAtish Patra 		}
3860cb74b65SAtish Patra 	}
3870cb74b65SAtish Patra 
3880cb74b65SAtish Patra out:
3890cb74b65SAtish Patra 	retdata->err_val = sbiret;
3900cb74b65SAtish Patra 
3918f0153ecSAtish Patra 	return 0;
3928f0153ecSAtish Patra }
3938f0153ecSAtish Patra 
kvm_riscv_vcpu_pmu_ctr_stop(struct kvm_vcpu * vcpu,unsigned long ctr_base,unsigned long ctr_mask,unsigned long flags,struct kvm_vcpu_sbi_return * retdata)3948f0153ecSAtish Patra int kvm_riscv_vcpu_pmu_ctr_stop(struct kvm_vcpu *vcpu, unsigned long ctr_base,
3958f0153ecSAtish Patra 				unsigned long ctr_mask, unsigned long flags,
3968f0153ecSAtish Patra 				struct kvm_vcpu_sbi_return *retdata)
3978f0153ecSAtish Patra {
3980cb74b65SAtish Patra 	struct kvm_pmu *kvpmu = vcpu_to_pmu(vcpu);
3990cb74b65SAtish Patra 	int i, pmc_index, sbiret = 0;
4000cb74b65SAtish Patra 	u64 enabled, running;
4010cb74b65SAtish Patra 	struct kvm_pmc *pmc;
402badc3868SAtish Patra 	int fevent_code;
4030cb74b65SAtish Patra 
4040cb74b65SAtish Patra 	if (kvm_pmu_validate_counter_mask(kvpmu, ctr_base, ctr_mask) < 0) {
4050cb74b65SAtish Patra 		sbiret = SBI_ERR_INVALID_PARAM;
4060cb74b65SAtish Patra 		goto out;
4070cb74b65SAtish Patra 	}
4080cb74b65SAtish Patra 
4090cb74b65SAtish Patra 	/* Stop the counters that have been configured and requested by the guest */
4100cb74b65SAtish Patra 	for_each_set_bit(i, &ctr_mask, RISCV_MAX_COUNTERS) {
4110cb74b65SAtish Patra 		pmc_index = i + ctr_base;
4120cb74b65SAtish Patra 		if (!test_bit(pmc_index, kvpmu->pmc_in_use))
4130cb74b65SAtish Patra 			continue;
4140cb74b65SAtish Patra 		pmc = &kvpmu->pmc[pmc_index];
415badc3868SAtish Patra 		if (pmc->cinfo.type == SBI_PMU_CTR_TYPE_FW) {
416badc3868SAtish Patra 			fevent_code = get_event_code(pmc->event_idx);
417badc3868SAtish Patra 			if (fevent_code >= SBI_PMU_FW_MAX) {
418badc3868SAtish Patra 				sbiret = SBI_ERR_INVALID_PARAM;
419badc3868SAtish Patra 				goto out;
420badc3868SAtish Patra 			}
421badc3868SAtish Patra 
422badc3868SAtish Patra 			if (!kvpmu->fw_event[fevent_code].started)
423badc3868SAtish Patra 				sbiret = SBI_ERR_ALREADY_STOPPED;
424badc3868SAtish Patra 
425badc3868SAtish Patra 			kvpmu->fw_event[fevent_code].started = false;
426badc3868SAtish Patra 		} else if (pmc->perf_event) {
4270cb74b65SAtish Patra 			if (pmc->started) {
4280cb74b65SAtish Patra 				/* Stop counting the counter */
4290cb74b65SAtish Patra 				perf_event_disable(pmc->perf_event);
4300cb74b65SAtish Patra 				pmc->started = false;
4310cb74b65SAtish Patra 			} else {
4320cb74b65SAtish Patra 				sbiret = SBI_ERR_ALREADY_STOPPED;
4330cb74b65SAtish Patra 			}
4340cb74b65SAtish Patra 
4350cb74b65SAtish Patra 			if (flags & SBI_PMU_STOP_FLAG_RESET) {
4360cb74b65SAtish Patra 				/* Relase the counter if this is a reset request */
4370cb74b65SAtish Patra 				pmc->counter_val += perf_event_read_value(pmc->perf_event,
4380cb74b65SAtish Patra 									  &enabled, &running);
4390cb74b65SAtish Patra 				kvm_pmu_release_perf_event(pmc);
4400cb74b65SAtish Patra 			}
4410cb74b65SAtish Patra 		} else {
4420cb74b65SAtish Patra 			sbiret = SBI_ERR_INVALID_PARAM;
4430cb74b65SAtish Patra 		}
444badc3868SAtish Patra 		if (flags & SBI_PMU_STOP_FLAG_RESET) {
445badc3868SAtish Patra 			pmc->event_idx = SBI_PMU_EVENT_IDX_INVALID;
446badc3868SAtish Patra 			clear_bit(pmc_index, kvpmu->pmc_in_use);
447badc3868SAtish Patra 		}
4480cb74b65SAtish Patra 	}
4490cb74b65SAtish Patra 
4500cb74b65SAtish Patra out:
4510cb74b65SAtish Patra 	retdata->err_val = sbiret;
4520cb74b65SAtish Patra 
4538f0153ecSAtish Patra 	return 0;
4548f0153ecSAtish Patra }
4558f0153ecSAtish Patra 
kvm_riscv_vcpu_pmu_ctr_cfg_match(struct kvm_vcpu * vcpu,unsigned long ctr_base,unsigned long ctr_mask,unsigned long flags,unsigned long eidx,u64 evtdata,struct kvm_vcpu_sbi_return * retdata)4568f0153ecSAtish Patra int kvm_riscv_vcpu_pmu_ctr_cfg_match(struct kvm_vcpu *vcpu, unsigned long ctr_base,
4578f0153ecSAtish Patra 				     unsigned long ctr_mask, unsigned long flags,
4588f0153ecSAtish Patra 				     unsigned long eidx, u64 evtdata,
4598f0153ecSAtish Patra 				     struct kvm_vcpu_sbi_return *retdata)
4608f0153ecSAtish Patra {
461badc3868SAtish Patra 	int ctr_idx, ret, sbiret = 0;
462badc3868SAtish Patra 	bool is_fevent;
463badc3868SAtish Patra 	unsigned long event_code;
4640cb74b65SAtish Patra 	u32 etype = kvm_pmu_get_perf_event_type(eidx);
4650cb74b65SAtish Patra 	struct kvm_pmu *kvpmu = vcpu_to_pmu(vcpu);
466badc3868SAtish Patra 	struct kvm_pmc *pmc = NULL;
4670cb74b65SAtish Patra 	struct perf_event_attr attr = {
4680cb74b65SAtish Patra 		.type = etype,
4690cb74b65SAtish Patra 		.size = sizeof(struct perf_event_attr),
4700cb74b65SAtish Patra 		.pinned = true,
4710cb74b65SAtish Patra 		/*
4720cb74b65SAtish Patra 		 * It should never reach here if the platform doesn't support the sscofpmf
4730cb74b65SAtish Patra 		 * extension as mode filtering won't work without it.
4740cb74b65SAtish Patra 		 */
4750cb74b65SAtish Patra 		.exclude_host = true,
4760cb74b65SAtish Patra 		.exclude_hv = true,
4770cb74b65SAtish Patra 		.exclude_user = !!(flags & SBI_PMU_CFG_FLAG_SET_UINH),
4780cb74b65SAtish Patra 		.exclude_kernel = !!(flags & SBI_PMU_CFG_FLAG_SET_SINH),
4790cb74b65SAtish Patra 		.config1 = RISCV_PMU_CONFIG1_GUEST_EVENTS,
4800cb74b65SAtish Patra 	};
4810cb74b65SAtish Patra 
4820cb74b65SAtish Patra 	if (kvm_pmu_validate_counter_mask(kvpmu, ctr_base, ctr_mask) < 0) {
4830cb74b65SAtish Patra 		sbiret = SBI_ERR_INVALID_PARAM;
4840cb74b65SAtish Patra 		goto out;
4850cb74b65SAtish Patra 	}
4860cb74b65SAtish Patra 
487badc3868SAtish Patra 	event_code = get_event_code(eidx);
488badc3868SAtish Patra 	is_fevent = kvm_pmu_is_fw_event(eidx);
489badc3868SAtish Patra 	if (is_fevent && event_code >= SBI_PMU_FW_MAX) {
4900cb74b65SAtish Patra 		sbiret = SBI_ERR_NOT_SUPPORTED;
4910cb74b65SAtish Patra 		goto out;
4920cb74b65SAtish Patra 	}
4930cb74b65SAtish Patra 
4940cb74b65SAtish Patra 	/*
4950cb74b65SAtish Patra 	 * SKIP_MATCH flag indicates the caller is aware of the assigned counter
4960cb74b65SAtish Patra 	 * for this event. Just do a sanity check if it already marked used.
4970cb74b65SAtish Patra 	 */
4980cb74b65SAtish Patra 	if (flags & SBI_PMU_CFG_FLAG_SKIP_MATCH) {
4990cb74b65SAtish Patra 		if (!test_bit(ctr_base + __ffs(ctr_mask), kvpmu->pmc_in_use)) {
5000cb74b65SAtish Patra 			sbiret = SBI_ERR_FAILURE;
5010cb74b65SAtish Patra 			goto out;
5020cb74b65SAtish Patra 		}
5030cb74b65SAtish Patra 		ctr_idx = ctr_base + __ffs(ctr_mask);
5040cb74b65SAtish Patra 	} else  {
5050cb74b65SAtish Patra 		ctr_idx = pmu_get_pmc_index(kvpmu, eidx, ctr_base, ctr_mask);
5060cb74b65SAtish Patra 		if (ctr_idx < 0) {
5070cb74b65SAtish Patra 			sbiret = SBI_ERR_NOT_SUPPORTED;
5080cb74b65SAtish Patra 			goto out;
5090cb74b65SAtish Patra 		}
5100cb74b65SAtish Patra 	}
5110cb74b65SAtish Patra 
5120cb74b65SAtish Patra 	pmc = &kvpmu->pmc[ctr_idx];
5130cb74b65SAtish Patra 	pmc->idx = ctr_idx;
5140cb74b65SAtish Patra 
515badc3868SAtish Patra 	if (is_fevent) {
516badc3868SAtish Patra 		if (flags & SBI_PMU_CFG_FLAG_AUTO_START)
517badc3868SAtish Patra 			kvpmu->fw_event[event_code].started = true;
518badc3868SAtish Patra 	} else {
519badc3868SAtish Patra 		ret = kvm_pmu_create_perf_event(pmc, &attr, flags, eidx, evtdata);
520badc3868SAtish Patra 		if (ret)
521badc3868SAtish Patra 			return ret;
5220cb74b65SAtish Patra 	}
5230cb74b65SAtish Patra 
5240cb74b65SAtish Patra 	set_bit(ctr_idx, kvpmu->pmc_in_use);
525badc3868SAtish Patra 	pmc->event_idx = eidx;
5260cb74b65SAtish Patra 	retdata->out_val = ctr_idx;
5270cb74b65SAtish Patra out:
5280cb74b65SAtish Patra 	retdata->err_val = sbiret;
5290cb74b65SAtish Patra 
5308f0153ecSAtish Patra 	return 0;
5318f0153ecSAtish Patra }
5328f0153ecSAtish Patra 
kvm_riscv_vcpu_pmu_ctr_read(struct kvm_vcpu * vcpu,unsigned long cidx,struct kvm_vcpu_sbi_return * retdata)5338f0153ecSAtish Patra int kvm_riscv_vcpu_pmu_ctr_read(struct kvm_vcpu *vcpu, unsigned long cidx,
5348f0153ecSAtish Patra 				struct kvm_vcpu_sbi_return *retdata)
5358f0153ecSAtish Patra {
536a9ac6c37SAtish Patra 	int ret;
537a9ac6c37SAtish Patra 
538a9ac6c37SAtish Patra 	ret = pmu_ctr_read(vcpu, cidx, &retdata->out_val);
539a9ac6c37SAtish Patra 	if (ret == -EINVAL)
540a9ac6c37SAtish Patra 		retdata->err_val = SBI_ERR_INVALID_PARAM;
541a9ac6c37SAtish Patra 
5428f0153ecSAtish Patra 	return 0;
5438f0153ecSAtish Patra }
5448f0153ecSAtish Patra 
kvm_riscv_vcpu_pmu_init(struct kvm_vcpu * vcpu)5458f0153ecSAtish Patra void kvm_riscv_vcpu_pmu_init(struct kvm_vcpu *vcpu)
5468f0153ecSAtish Patra {
5478f0153ecSAtish Patra 	int i = 0, ret, num_hw_ctrs = 0, hpm_width = 0;
5488f0153ecSAtish Patra 	struct kvm_pmu *kvpmu = vcpu_to_pmu(vcpu);
5498f0153ecSAtish Patra 	struct kvm_pmc *pmc;
5508f0153ecSAtish Patra 
551470926a2SAtish Patra 	/*
552470926a2SAtish Patra 	 * PMU functionality should be only available to guests if privilege mode
553470926a2SAtish Patra 	 * filtering is available in the host. Otherwise, guest will always count
554470926a2SAtish Patra 	 * events while the execution is in hypervisor mode.
555470926a2SAtish Patra 	 */
556470926a2SAtish Patra 	if (!riscv_isa_extension_available(NULL, SSCOFPMF))
557470926a2SAtish Patra 		return;
558470926a2SAtish Patra 
5598f0153ecSAtish Patra 	ret = riscv_pmu_get_hpm_info(&hpm_width, &num_hw_ctrs);
5608f0153ecSAtish Patra 	if (ret < 0 || !hpm_width || !num_hw_ctrs)
5618f0153ecSAtish Patra 		return;
5628f0153ecSAtish Patra 
5638f0153ecSAtish Patra 	/*
5648f0153ecSAtish Patra 	 * Increase the number of hardware counters to offset the time counter.
5658f0153ecSAtish Patra 	 */
5668f0153ecSAtish Patra 	kvpmu->num_hw_ctrs = num_hw_ctrs + 1;
5678f0153ecSAtish Patra 	kvpmu->num_fw_ctrs = SBI_PMU_FW_MAX;
568badc3868SAtish Patra 	memset(&kvpmu->fw_event, 0, SBI_PMU_FW_MAX * sizeof(struct kvm_fw_event));
5698f0153ecSAtish Patra 
5708f0153ecSAtish Patra 	if (kvpmu->num_hw_ctrs > RISCV_KVM_MAX_HW_CTRS) {
5718f0153ecSAtish Patra 		pr_warn_once("Limiting the hardware counters to 32 as specified by the ISA");
5728f0153ecSAtish Patra 		kvpmu->num_hw_ctrs = RISCV_KVM_MAX_HW_CTRS;
5738f0153ecSAtish Patra 	}
5748f0153ecSAtish Patra 
5758f0153ecSAtish Patra 	/*
5768f0153ecSAtish Patra 	 * There is no correlation between the logical hardware counter and virtual counters.
5778f0153ecSAtish Patra 	 * However, we need to encode a hpmcounter CSR in the counter info field so that
5788f0153ecSAtish Patra 	 * KVM can trap n emulate the read. This works well in the migration use case as
5798f0153ecSAtish Patra 	 * KVM doesn't care if the actual hpmcounter is available in the hardware or not.
5808f0153ecSAtish Patra 	 */
5818f0153ecSAtish Patra 	for (i = 0; i < kvm_pmu_num_counters(kvpmu); i++) {
5828f0153ecSAtish Patra 		/* TIME CSR shouldn't be read from perf interface */
5838f0153ecSAtish Patra 		if (i == 1)
5848f0153ecSAtish Patra 			continue;
5858f0153ecSAtish Patra 		pmc = &kvpmu->pmc[i];
5868f0153ecSAtish Patra 		pmc->idx = i;
587badc3868SAtish Patra 		pmc->event_idx = SBI_PMU_EVENT_IDX_INVALID;
5888f0153ecSAtish Patra 		if (i < kvpmu->num_hw_ctrs) {
5898f0153ecSAtish Patra 			pmc->cinfo.type = SBI_PMU_CTR_TYPE_HW;
5908f0153ecSAtish Patra 			if (i < 3)
5918f0153ecSAtish Patra 				/* CY, IR counters */
5928f0153ecSAtish Patra 				pmc->cinfo.width = 63;
5938f0153ecSAtish Patra 			else
5948f0153ecSAtish Patra 				pmc->cinfo.width = hpm_width;
5958f0153ecSAtish Patra 			/*
5968f0153ecSAtish Patra 			 * The CSR number doesn't have any relation with the logical
5978f0153ecSAtish Patra 			 * hardware counters. The CSR numbers are encoded sequentially
5988f0153ecSAtish Patra 			 * to avoid maintaining a map between the virtual counter
5998f0153ecSAtish Patra 			 * and CSR number.
6008f0153ecSAtish Patra 			 */
6018f0153ecSAtish Patra 			pmc->cinfo.csr = CSR_CYCLE + i;
6028f0153ecSAtish Patra 		} else {
6038f0153ecSAtish Patra 			pmc->cinfo.type = SBI_PMU_CTR_TYPE_FW;
6048f0153ecSAtish Patra 			pmc->cinfo.width = BITS_PER_LONG - 1;
6058f0153ecSAtish Patra 		}
6068f0153ecSAtish Patra 	}
6078f0153ecSAtish Patra 
6088f0153ecSAtish Patra 	kvpmu->init_done = true;
6098f0153ecSAtish Patra }
6108f0153ecSAtish Patra 
kvm_riscv_vcpu_pmu_deinit(struct kvm_vcpu * vcpu)6118f0153ecSAtish Patra void kvm_riscv_vcpu_pmu_deinit(struct kvm_vcpu *vcpu)
6128f0153ecSAtish Patra {
6130cb74b65SAtish Patra 	struct kvm_pmu *kvpmu = vcpu_to_pmu(vcpu);
6140cb74b65SAtish Patra 	struct kvm_pmc *pmc;
6150cb74b65SAtish Patra 	int i;
6160cb74b65SAtish Patra 
6170cb74b65SAtish Patra 	if (!kvpmu)
6180cb74b65SAtish Patra 		return;
6190cb74b65SAtish Patra 
6200cb74b65SAtish Patra 	for_each_set_bit(i, kvpmu->pmc_in_use, RISCV_MAX_COUNTERS) {
6210cb74b65SAtish Patra 		pmc = &kvpmu->pmc[i];
6220cb74b65SAtish Patra 		pmc->counter_val = 0;
6230cb74b65SAtish Patra 		kvm_pmu_release_perf_event(pmc);
624badc3868SAtish Patra 		pmc->event_idx = SBI_PMU_EVENT_IDX_INVALID;
6250cb74b65SAtish Patra 	}
6260cb74b65SAtish Patra 	bitmap_zero(kvpmu->pmc_in_use, RISCV_MAX_COUNTERS);
627badc3868SAtish Patra 	memset(&kvpmu->fw_event, 0, SBI_PMU_FW_MAX * sizeof(struct kvm_fw_event));
6288f0153ecSAtish Patra }
6298f0153ecSAtish Patra 
kvm_riscv_vcpu_pmu_reset(struct kvm_vcpu * vcpu)6308f0153ecSAtish Patra void kvm_riscv_vcpu_pmu_reset(struct kvm_vcpu *vcpu)
6318f0153ecSAtish Patra {
6328f0153ecSAtish Patra 	kvm_riscv_vcpu_pmu_deinit(vcpu);
6338f0153ecSAtish Patra }
634