xref: /openbmc/linux/include/kvm/arm_pmu.h (revision 1a931707ad4a46e79d4ecfee56d8f6e8cc8d4f28)
1caab277bSThomas Gleixner /* SPDX-License-Identifier: GPL-2.0-only */
204fe4726SShannon Zhao /*
304fe4726SShannon Zhao  * Copyright (C) 2015 Linaro Ltd.
404fe4726SShannon Zhao  * Author: Shannon Zhao <shannon.zhao@linaro.org>
504fe4726SShannon Zhao  */
604fe4726SShannon Zhao 
704fe4726SShannon Zhao #ifndef __ASM_ARM_KVM_PMU_H
804fe4726SShannon Zhao #define __ASM_ARM_KVM_PMU_H
904fe4726SShannon Zhao 
1004fe4726SShannon Zhao #include <linux/perf_event.h>
1104fe4726SShannon Zhao #include <linux/perf/arm_pmuv3.h>
1204fe4726SShannon Zhao 
13051ff581SShannon Zhao #define ARMV8_PMU_CYCLE_IDX		(ARMV8_PMU_MAX_COUNTERS - 1)
14051ff581SShannon Zhao 
158cbebc41SMarc Zyngier #if IS_ENABLED(CONFIG_HW_PERF_EVENTS) && IS_ENABLED(CONFIG_KVM)
160efce9daSSudeep Holla 
1704fe4726SShannon Zhao struct kvm_pmc {
1804fe4726SShannon Zhao 	u8 idx;	/* index into the pmu->pmc array */
1904fe4726SShannon Zhao 	struct perf_event *perf_event;
2004fe4726SShannon Zhao };
2104fe4726SShannon Zhao 
2284d751a0SFuad Tabba struct kvm_pmu_events {
2384d751a0SFuad Tabba 	u32 events_host;
2484d751a0SFuad Tabba 	u32 events_guest;
2584d751a0SFuad Tabba };
2684d751a0SFuad Tabba 
2704fe4726SShannon Zhao struct kvm_pmu {
28e987a4c6SFuad Tabba 	struct irq_work overflow_work;
2984d751a0SFuad Tabba 	struct kvm_pmu_events events;
3004fe4726SShannon Zhao 	struct kvm_pmc pmc[ARMV8_PMU_MAX_COUNTERS];
31e987a4c6SFuad Tabba 	int irq_num;
32a2befacfSChristoffer Dall 	bool created;
33b02386ebSShannon Zhao 	bool irq_level;
3404fe4726SShannon Zhao };
35ab946834SShannon Zhao 
36db858060SAlexandru Elisei struct arm_pmu_entry {
37db858060SAlexandru Elisei 	struct list_head entry;
38db858060SAlexandru Elisei 	struct arm_pmu *arm_pmu;
39db858060SAlexandru Elisei };
40db858060SAlexandru Elisei 
41be399d82SSean Christopherson DECLARE_STATIC_KEY_FALSE(kvm_arm_pmu_available);
42be399d82SSean Christopherson 
kvm_arm_support_pmu_v3(void)43be399d82SSean Christopherson static __always_inline bool kvm_arm_support_pmu_v3(void)
44be399d82SSean Christopherson {
45be399d82SSean Christopherson 	return static_branch_likely(&kvm_arm_pmu_available);
46be399d82SSean Christopherson }
47be399d82SSean Christopherson 
48bb0c70bcSShannon Zhao #define kvm_arm_pmu_irq_initialized(v)	((v)->arch.pmu.irq_num >= VGIC_NR_SGIS)
49051ff581SShannon Zhao u64 kvm_pmu_get_counter_value(struct kvm_vcpu *vcpu, u64 select_idx);
50051ff581SShannon Zhao void kvm_pmu_set_counter_value(struct kvm_vcpu *vcpu, u64 select_idx, u64 val);
5196b0eebcSShannon Zhao u64 kvm_pmu_valid_counter_mask(struct kvm_vcpu *vcpu);
5288865becSMarc Zyngier u64 kvm_pmu_get_pmceid(struct kvm_vcpu *vcpu, bool pmceid1);
53bca031e2SZenghui Yu void kvm_pmu_vcpu_init(struct kvm_vcpu *vcpu);
542aa36e98SShannon Zhao void kvm_pmu_vcpu_reset(struct kvm_vcpu *vcpu);
555f0a714aSShannon Zhao void kvm_pmu_vcpu_destroy(struct kvm_vcpu *vcpu);
56418e5ca8SAndrew Murray void kvm_pmu_disable_counter_mask(struct kvm_vcpu *vcpu, u64 val);
57418e5ca8SAndrew Murray void kvm_pmu_enable_counter_mask(struct kvm_vcpu *vcpu, u64 val);
58b02386ebSShannon Zhao void kvm_pmu_flush_hwstate(struct kvm_vcpu *vcpu);
59b02386ebSShannon Zhao void kvm_pmu_sync_hwstate(struct kvm_vcpu *vcpu);
603dbbdf78SChristoffer Dall bool kvm_pmu_should_notify_user(struct kvm_vcpu *vcpu);
613dbbdf78SChristoffer Dall void kvm_pmu_update_run(struct kvm_vcpu *vcpu);
627a0adc70SShannon Zhao void kvm_pmu_software_increment(struct kvm_vcpu *vcpu, u64 val);
6376993739SShannon Zhao void kvm_pmu_handle_pmcr(struct kvm_vcpu *vcpu, u64 val);
647f766358SShannon Zhao void kvm_pmu_set_counter_event_type(struct kvm_vcpu *vcpu, u64 data,
657f766358SShannon Zhao 				    u64 select_idx);
66bb0c70bcSShannon Zhao int kvm_arm_pmu_v3_set_attr(struct kvm_vcpu *vcpu,
67bb0c70bcSShannon Zhao 			    struct kvm_device_attr *attr);
68bb0c70bcSShannon Zhao int kvm_arm_pmu_v3_get_attr(struct kvm_vcpu *vcpu,
69bb0c70bcSShannon Zhao 			    struct kvm_device_attr *attr);
70bb0c70bcSShannon Zhao int kvm_arm_pmu_v3_has_attr(struct kvm_vcpu *vcpu,
71bb0c70bcSShannon Zhao 			    struct kvm_device_attr *attr);
72a2befacfSChristoffer Dall int kvm_arm_pmu_v3_enable(struct kvm_vcpu *vcpu);
7320492a62SMarc Zyngier 
7420492a62SMarc Zyngier struct kvm_pmu_events *kvm_get_pmu_events(void);
7520492a62SMarc Zyngier void kvm_vcpu_pmu_restore_guest(struct kvm_vcpu *vcpu);
7620492a62SMarc Zyngier void kvm_vcpu_pmu_restore_host(struct kvm_vcpu *vcpu);
7720492a62SMarc Zyngier void kvm_vcpu_pmu_resync_el0(void);
7820492a62SMarc Zyngier 
7920492a62SMarc Zyngier #define kvm_vcpu_has_pmu(vcpu)					\
8020492a62SMarc Zyngier 	(test_bit(KVM_ARM_VCPU_PMU_V3, (vcpu)->arch.features))
8120492a62SMarc Zyngier 
8220492a62SMarc Zyngier /*
8320492a62SMarc Zyngier  * Updates the vcpu's view of the pmu events for this cpu.
8420492a62SMarc Zyngier  * Must be called before every vcpu run after disabling interrupts, to ensure
8520492a62SMarc Zyngier  * that an interrupt cannot fire and update the structure.
8620492a62SMarc Zyngier  */
8720492a62SMarc Zyngier #define kvm_pmu_update_vcpu_events(vcpu)				\
8820492a62SMarc Zyngier 	do {								\
8920492a62SMarc Zyngier 		if (!has_vhe() && kvm_arm_support_pmu_v3())		\
9020492a62SMarc Zyngier 			vcpu->arch.pmu.events = *kvm_get_pmu_events();	\
9120492a62SMarc Zyngier 	} while (0)
92*11af4c37SMarc Zyngier 
93*11af4c37SMarc Zyngier /*
94*11af4c37SMarc Zyngier  * Evaluates as true when emulating PMUv3p5, and false otherwise.
95*11af4c37SMarc Zyngier  */
96*11af4c37SMarc Zyngier #define kvm_pmu_is_3p5(vcpu) ({						\
97*11af4c37SMarc Zyngier 	u64 val = IDREG(vcpu->kvm, SYS_ID_AA64DFR0_EL1);		\
983d0dba57SMarc Zyngier 	u8 pmuver = SYS_FIELD_GET(ID_AA64DFR0_EL1, PMUVer, val);	\
993d0dba57SMarc Zyngier 									\
10004fe4726SShannon Zhao 	pmuver >= ID_AA64DFR0_EL1_PMUVer_V3P5;				\
10104fe4726SShannon Zhao })
10204fe4726SShannon Zhao 
103ab946834SShannon Zhao u8 kvm_arm_pmu_get_pmuver_limit(void);
104be399d82SSean Christopherson 
105be399d82SSean Christopherson #else
106be399d82SSean Christopherson struct kvm_pmu {
107be399d82SSean Christopherson };
108be399d82SSean Christopherson 
kvm_arm_support_pmu_v3(void)109bb0c70bcSShannon Zhao static inline bool kvm_arm_support_pmu_v3(void)
110051ff581SShannon Zhao {
111051ff581SShannon Zhao 	return false;
112051ff581SShannon Zhao }
113051ff581SShannon Zhao 
114051ff581SShannon Zhao #define kvm_arm_pmu_irq_initialized(v)	(false)
kvm_pmu_get_counter_value(struct kvm_vcpu * vcpu,u64 select_idx)115051ff581SShannon Zhao static inline u64 kvm_pmu_get_counter_value(struct kvm_vcpu *vcpu,
116051ff581SShannon Zhao 					    u64 select_idx)
11796b0eebcSShannon Zhao {
11896b0eebcSShannon Zhao 	return 0;
11996b0eebcSShannon Zhao }
kvm_pmu_set_counter_value(struct kvm_vcpu * vcpu,u64 select_idx,u64 val)12096b0eebcSShannon Zhao static inline void kvm_pmu_set_counter_value(struct kvm_vcpu *vcpu,
121bca031e2SZenghui Yu 					     u64 select_idx, u64 val) {}
kvm_pmu_valid_counter_mask(struct kvm_vcpu * vcpu)1222aa36e98SShannon Zhao static inline u64 kvm_pmu_valid_counter_mask(struct kvm_vcpu *vcpu)
1235f0a714aSShannon Zhao {
124418e5ca8SAndrew Murray 	return 0;
125418e5ca8SAndrew Murray }
kvm_pmu_vcpu_init(struct kvm_vcpu * vcpu)126b02386ebSShannon Zhao static inline void kvm_pmu_vcpu_init(struct kvm_vcpu *vcpu) {}
kvm_pmu_vcpu_reset(struct kvm_vcpu * vcpu)127b02386ebSShannon Zhao static inline void kvm_pmu_vcpu_reset(struct kvm_vcpu *vcpu) {}
kvm_pmu_vcpu_destroy(struct kvm_vcpu * vcpu)1283dbbdf78SChristoffer Dall static inline void kvm_pmu_vcpu_destroy(struct kvm_vcpu *vcpu) {}
kvm_pmu_disable_counter_mask(struct kvm_vcpu * vcpu,u64 val)1293dbbdf78SChristoffer Dall static inline void kvm_pmu_disable_counter_mask(struct kvm_vcpu *vcpu, u64 val) {}
kvm_pmu_enable_counter_mask(struct kvm_vcpu * vcpu,u64 val)1303dbbdf78SChristoffer Dall static inline void kvm_pmu_enable_counter_mask(struct kvm_vcpu *vcpu, u64 val) {}
kvm_pmu_flush_hwstate(struct kvm_vcpu * vcpu)1313dbbdf78SChristoffer Dall static inline void kvm_pmu_flush_hwstate(struct kvm_vcpu *vcpu) {}
kvm_pmu_sync_hwstate(struct kvm_vcpu * vcpu)1323dbbdf78SChristoffer Dall static inline void kvm_pmu_sync_hwstate(struct kvm_vcpu *vcpu) {}
kvm_pmu_should_notify_user(struct kvm_vcpu * vcpu)1337a0adc70SShannon Zhao static inline bool kvm_pmu_should_notify_user(struct kvm_vcpu *vcpu)
13476993739SShannon Zhao {
1357f766358SShannon Zhao 	return false;
1367f766358SShannon Zhao }
kvm_pmu_update_run(struct kvm_vcpu * vcpu)137bb0c70bcSShannon Zhao static inline void kvm_pmu_update_run(struct kvm_vcpu *vcpu) {}
kvm_pmu_software_increment(struct kvm_vcpu * vcpu,u64 val)138bb0c70bcSShannon Zhao static inline void kvm_pmu_software_increment(struct kvm_vcpu *vcpu, u64 val) {}
kvm_pmu_handle_pmcr(struct kvm_vcpu * vcpu,u64 val)139bb0c70bcSShannon Zhao static inline void kvm_pmu_handle_pmcr(struct kvm_vcpu *vcpu, u64 val) {}
kvm_pmu_set_counter_event_type(struct kvm_vcpu * vcpu,u64 data,u64 select_idx)140bb0c70bcSShannon Zhao static inline void kvm_pmu_set_counter_event_type(struct kvm_vcpu *vcpu,
141bb0c70bcSShannon Zhao 						  u64 data, u64 select_idx) {}
kvm_arm_pmu_v3_set_attr(struct kvm_vcpu * vcpu,struct kvm_device_attr * attr)142bb0c70bcSShannon Zhao static inline int kvm_arm_pmu_v3_set_attr(struct kvm_vcpu *vcpu,
143bb0c70bcSShannon Zhao 					  struct kvm_device_attr *attr)
144bb0c70bcSShannon Zhao {
145bb0c70bcSShannon Zhao 	return -ENXIO;
146bb0c70bcSShannon Zhao }
kvm_arm_pmu_v3_get_attr(struct kvm_vcpu * vcpu,struct kvm_device_attr * attr)147bb0c70bcSShannon Zhao static inline int kvm_arm_pmu_v3_get_attr(struct kvm_vcpu *vcpu,
148bb0c70bcSShannon Zhao 					  struct kvm_device_attr *attr)
149bb0c70bcSShannon Zhao {
150bb0c70bcSShannon Zhao 	return -ENXIO;
151bb0c70bcSShannon Zhao }
kvm_arm_pmu_v3_has_attr(struct kvm_vcpu * vcpu,struct kvm_device_attr * attr)152a2befacfSChristoffer Dall static inline int kvm_arm_pmu_v3_has_attr(struct kvm_vcpu *vcpu,
153a2befacfSChristoffer Dall 					  struct kvm_device_attr *attr)
154a2befacfSChristoffer Dall {
155a2befacfSChristoffer Dall 	return -ENXIO;
15688865becSMarc Zyngier }
kvm_arm_pmu_v3_enable(struct kvm_vcpu * vcpu)15788865becSMarc Zyngier static inline int kvm_arm_pmu_v3_enable(struct kvm_vcpu *vcpu)
15888865becSMarc Zyngier {
15988865becSMarc Zyngier 	return 0;
1605421db1bSMarc Zyngier }
kvm_pmu_get_pmceid(struct kvm_vcpu * vcpu,bool pmceid1)16120492a62SMarc Zyngier static inline u64 kvm_pmu_get_pmceid(struct kvm_vcpu *vcpu, bool pmceid1)
162*11af4c37SMarc Zyngier {
16320492a62SMarc Zyngier 	return 0;
16420492a62SMarc Zyngier }
16520492a62SMarc Zyngier 
1663d0dba57SMarc Zyngier #define kvm_vcpu_has_pmu(vcpu)		({ false; })
1673d0dba57SMarc Zyngier #define kvm_pmu_is_3p5(vcpu)		({ false; })
kvm_pmu_update_vcpu_events(struct kvm_vcpu * vcpu)1683d0dba57SMarc Zyngier static inline void kvm_pmu_update_vcpu_events(struct kvm_vcpu *vcpu) {}
kvm_vcpu_pmu_restore_guest(struct kvm_vcpu * vcpu)1693d0dba57SMarc Zyngier static inline void kvm_vcpu_pmu_restore_guest(struct kvm_vcpu *vcpu) {}
kvm_vcpu_pmu_restore_host(struct kvm_vcpu * vcpu)17020492a62SMarc Zyngier static inline void kvm_vcpu_pmu_restore_host(struct kvm_vcpu *vcpu) {}
kvm_arm_pmu_get_pmuver_limit(void)17104fe4726SShannon Zhao static inline u8 kvm_arm_pmu_get_pmuver_limit(void)
17204fe4726SShannon Zhao {
17304fe4726SShannon Zhao 	return 0;
174 }
kvm_vcpu_pmu_resync_el0(void)175 static inline void kvm_vcpu_pmu_resync_el0(void) {}
176 
177 #endif
178 
179 #endif
180