1b2441318SGreg Kroah-Hartman /* SPDX-License-Identifier: GPL-2.0 */ 2474a5bb9SWei Huang #ifndef __KVM_X86_PMU_H 3474a5bb9SWei Huang #define __KVM_X86_PMU_H 4474a5bb9SWei Huang 513c5183aSMarios Pomonis #include <linux/nospec.h> 613c5183aSMarios Pomonis 7474a5bb9SWei Huang #define vcpu_to_pmu(vcpu) (&(vcpu)->arch.pmu) 8474a5bb9SWei Huang #define pmu_to_vcpu(pmu) (container_of((pmu), struct kvm_vcpu, arch.pmu)) 9474a5bb9SWei Huang #define pmc_to_pmu(pmc) (&(pmc)->vcpu->arch.pmu) 10474a5bb9SWei Huang 11b9181c8eSLike Xu #define MSR_IA32_MISC_ENABLE_PMU_RO_MASK (MSR_IA32_MISC_ENABLE_PEBS_UNAVAIL | \ 12b9181c8eSLike Xu MSR_IA32_MISC_ENABLE_BTS_UNAVAIL) 13b9181c8eSLike Xu 1425462f7fSWei Huang /* retrieve the 4 bits for EN and PMI out of IA32_FIXED_CTR_CTRL */ 1525462f7fSWei Huang #define fixed_ctrl_field(ctrl_reg, idx) (((ctrl_reg) >> ((idx)*4)) & 0xf) 1625462f7fSWei Huang 172d7921c4SArbel Moshe #define VMWARE_BACKDOOR_PMC_HOST_TSC 0x10000 182d7921c4SArbel Moshe #define VMWARE_BACKDOOR_PMC_REAL_TIME 0x10001 192d7921c4SArbel Moshe #define VMWARE_BACKDOOR_PMC_APPARENT_TIME 0x10002 202d7921c4SArbel Moshe 2125462f7fSWei Huang struct kvm_pmu_ops { 227aadaa98SLike Xu bool (*hw_event_available)(struct kvm_pmc *pmc); 2325462f7fSWei Huang bool (*pmc_is_enabled)(struct kvm_pmc *pmc); 2425462f7fSWei Huang struct kvm_pmc *(*pmc_idx_to_pmc)(struct kvm_pmu *pmu, int pmc_idx); 2598ff80f5SLike Xu struct kvm_pmc *(*rdpmc_ecx_to_pmc)(struct kvm_vcpu *vcpu, 2698ff80f5SLike Xu unsigned int idx, u64 *mask); 27c900c156SLike Xu struct kvm_pmc *(*msr_idx_to_pmc)(struct kvm_vcpu *vcpu, u32 msr); 28e6cd31f1SJim Mattson bool (*is_valid_rdpmc_ecx)(struct kvm_vcpu *vcpu, unsigned int idx); 29545feb96SSean Christopherson bool (*is_valid_msr)(struct kvm_vcpu *vcpu, u32 msr); 30cbd71758SWei Wang int (*get_msr)(struct kvm_vcpu *vcpu, struct msr_data *msr_info); 3125462f7fSWei Huang int (*set_msr)(struct kvm_vcpu *vcpu, struct msr_data *msr_info); 3225462f7fSWei Huang void (*refresh)(struct kvm_vcpu *vcpu); 3325462f7fSWei Huang void (*init)(struct kvm_vcpu *vcpu); 3425462f7fSWei Huang void (*reset)(struct kvm_vcpu *vcpu); 35e6209a3bSLike Xu void (*deliver_pmi)(struct kvm_vcpu *vcpu); 369aa4f622SLike Xu void (*cleanup)(struct kvm_vcpu *vcpu); 376a5cba7bSAaron Lewis 386a5cba7bSAaron Lewis const u64 EVENTSEL_EVENT; 398911ce66SSean Christopherson const int MAX_NR_GP_COUNTERS; 4025462f7fSWei Huang }; 4125462f7fSWei Huang 428f969c0cSLike Xu void kvm_pmu_ops_update(const struct kvm_pmu_ops *pmu_ops); 438f969c0cSLike Xu 44*c85cdc1cSLike Xu static inline bool kvm_pmu_has_perf_global_ctrl(struct kvm_pmu *pmu) 45*c85cdc1cSLike Xu { 46*c85cdc1cSLike Xu /* 47*c85cdc1cSLike Xu * Architecturally, Intel's SDM states that IA32_PERF_GLOBAL_CTRL is 48*c85cdc1cSLike Xu * supported if "CPUID.0AH: EAX[7:0] > 0", i.e. if the PMU version is 49*c85cdc1cSLike Xu * greater than zero. However, KVM only exposes and emulates the MSR 50*c85cdc1cSLike Xu * to/for the guest if the guest PMU supports at least "Architectural 51*c85cdc1cSLike Xu * Performance Monitoring Version 2". 52*c85cdc1cSLike Xu * 53*c85cdc1cSLike Xu * AMD's version of PERF_GLOBAL_CTRL conveniently shows up with v2. 54*c85cdc1cSLike Xu */ 55*c85cdc1cSLike Xu return pmu->version > 1; 56*c85cdc1cSLike Xu } 57*c85cdc1cSLike Xu 5825462f7fSWei Huang static inline u64 pmc_bitmask(struct kvm_pmc *pmc) 5925462f7fSWei Huang { 6025462f7fSWei Huang struct kvm_pmu *pmu = pmc_to_pmu(pmc); 6125462f7fSWei Huang 6225462f7fSWei Huang return pmu->counter_bitmask[pmc->type]; 6325462f7fSWei Huang } 6425462f7fSWei Huang 6525462f7fSWei Huang static inline u64 pmc_read_counter(struct kvm_pmc *pmc) 6625462f7fSWei Huang { 6725462f7fSWei Huang u64 counter, enabled, running; 6825462f7fSWei Huang 6925462f7fSWei Huang counter = pmc->counter; 70e79f49c3SLike Xu if (pmc->perf_event && !pmc->is_paused) 7125462f7fSWei Huang counter += perf_event_read_value(pmc->perf_event, 7225462f7fSWei Huang &enabled, &running); 7325462f7fSWei Huang /* FIXME: Scaling needed? */ 7425462f7fSWei Huang return counter & pmc_bitmask(pmc); 7525462f7fSWei Huang } 7625462f7fSWei Huang 77a6da0d77SLike Xu static inline void pmc_release_perf_event(struct kvm_pmc *pmc) 78a6da0d77SLike Xu { 79a6da0d77SLike Xu if (pmc->perf_event) { 80a6da0d77SLike Xu perf_event_release_kernel(pmc->perf_event); 81a6da0d77SLike Xu pmc->perf_event = NULL; 82a6da0d77SLike Xu pmc->current_config = 0; 83b35e5548SLike Xu pmc_to_pmu(pmc)->event_count--; 84a6da0d77SLike Xu } 85a6da0d77SLike Xu } 86a6da0d77SLike Xu 8725462f7fSWei Huang static inline void pmc_stop_counter(struct kvm_pmc *pmc) 8825462f7fSWei Huang { 8925462f7fSWei Huang if (pmc->perf_event) { 9025462f7fSWei Huang pmc->counter = pmc_read_counter(pmc); 91a6da0d77SLike Xu pmc_release_perf_event(pmc); 9225462f7fSWei Huang } 9325462f7fSWei Huang } 9425462f7fSWei Huang 9525462f7fSWei Huang static inline bool pmc_is_gp(struct kvm_pmc *pmc) 9625462f7fSWei Huang { 9725462f7fSWei Huang return pmc->type == KVM_PMC_GP; 9825462f7fSWei Huang } 9925462f7fSWei Huang 10025462f7fSWei Huang static inline bool pmc_is_fixed(struct kvm_pmc *pmc) 10125462f7fSWei Huang { 10225462f7fSWei Huang return pmc->type == KVM_PMC_FIXED; 10325462f7fSWei Huang } 10425462f7fSWei Huang 1059477f444SOliver Upton static inline bool kvm_valid_perf_global_ctrl(struct kvm_pmu *pmu, 1069477f444SOliver Upton u64 data) 1079477f444SOliver Upton { 1089477f444SOliver Upton return !(pmu->global_ctrl_mask & data); 1099477f444SOliver Upton } 1109477f444SOliver Upton 11125462f7fSWei Huang /* returns general purpose PMC with the specified MSR. Note that it can be 11225462f7fSWei Huang * used for both PERFCTRn and EVNTSELn; that is why it accepts base as a 113d9f6e12fSIngo Molnar * parameter to tell them apart. 11425462f7fSWei Huang */ 11525462f7fSWei Huang static inline struct kvm_pmc *get_gp_pmc(struct kvm_pmu *pmu, u32 msr, 11625462f7fSWei Huang u32 base) 11725462f7fSWei Huang { 11813c5183aSMarios Pomonis if (msr >= base && msr < base + pmu->nr_arch_gp_counters) { 11913c5183aSMarios Pomonis u32 index = array_index_nospec(msr - base, 12013c5183aSMarios Pomonis pmu->nr_arch_gp_counters); 12113c5183aSMarios Pomonis 12213c5183aSMarios Pomonis return &pmu->gp_counters[index]; 12313c5183aSMarios Pomonis } 12425462f7fSWei Huang 12525462f7fSWei Huang return NULL; 12625462f7fSWei Huang } 12725462f7fSWei Huang 12825462f7fSWei Huang /* returns fixed PMC with the specified MSR */ 12925462f7fSWei Huang static inline struct kvm_pmc *get_fixed_pmc(struct kvm_pmu *pmu, u32 msr) 13025462f7fSWei Huang { 13125462f7fSWei Huang int base = MSR_CORE_PERF_FIXED_CTR0; 13225462f7fSWei Huang 13313c5183aSMarios Pomonis if (msr >= base && msr < base + pmu->nr_arch_fixed_counters) { 13413c5183aSMarios Pomonis u32 index = array_index_nospec(msr - base, 13513c5183aSMarios Pomonis pmu->nr_arch_fixed_counters); 13613c5183aSMarios Pomonis 13713c5183aSMarios Pomonis return &pmu->fixed_counters[index]; 13813c5183aSMarios Pomonis } 13925462f7fSWei Huang 14025462f7fSWei Huang return NULL; 14125462f7fSWei Huang } 14225462f7fSWei Huang 143168d918fSEric Hankland static inline u64 get_sample_period(struct kvm_pmc *pmc, u64 counter_value) 144168d918fSEric Hankland { 145168d918fSEric Hankland u64 sample_period = (-counter_value) & pmc_bitmask(pmc); 146168d918fSEric Hankland 147168d918fSEric Hankland if (!sample_period) 148168d918fSEric Hankland sample_period = pmc_bitmask(pmc) + 1; 149168d918fSEric Hankland return sample_period; 150168d918fSEric Hankland } 151168d918fSEric Hankland 15275189d1dSLike Xu static inline void pmc_update_sample_period(struct kvm_pmc *pmc) 15375189d1dSLike Xu { 15455c590adSLike Xu if (!pmc->perf_event || pmc->is_paused || 15555c590adSLike Xu !is_sampling_event(pmc->perf_event)) 15675189d1dSLike Xu return; 15775189d1dSLike Xu 15875189d1dSLike Xu perf_event_period(pmc->perf_event, 15975189d1dSLike Xu get_sample_period(pmc, pmc->counter)); 16075189d1dSLike Xu } 16175189d1dSLike Xu 16263f21f32SLike Xu static inline bool pmc_speculative_in_use(struct kvm_pmc *pmc) 16363f21f32SLike Xu { 16463f21f32SLike Xu struct kvm_pmu *pmu = pmc_to_pmu(pmc); 16563f21f32SLike Xu 16663f21f32SLike Xu if (pmc_is_fixed(pmc)) 16763f21f32SLike Xu return fixed_ctrl_field(pmu->fixed_ctr_ctrl, 16863f21f32SLike Xu pmc->idx - INTEL_PMC_IDX_FIXED) & 0x3; 16963f21f32SLike Xu 17063f21f32SLike Xu return pmc->eventsel & ARCH_PERFMON_EVENTSEL_ENABLE; 17163f21f32SLike Xu } 17263f21f32SLike Xu 173968635abSLike Xu extern struct x86_pmu_capability kvm_pmu_cap; 174968635abSLike Xu 1758911ce66SSean Christopherson static inline void kvm_init_pmu_capability(const struct kvm_pmu_ops *pmu_ops) 176968635abSLike Xu { 177d7808f73SLike Xu bool is_intel = boot_cpu_data.x86_vendor == X86_VENDOR_INTEL; 178d7808f73SLike Xu 1794d7404e5SSean Christopherson /* 1804d7404e5SSean Christopherson * Hybrid PMUs don't play nice with virtualization without careful 1814d7404e5SSean Christopherson * configuration by userspace, and KVM's APIs for reporting supported 1824d7404e5SSean Christopherson * vPMU features do not account for hybrid PMUs. Disable vPMU support 1834d7404e5SSean Christopherson * for hybrid PMUs until KVM gains a way to let userspace opt-in. 1844d7404e5SSean Christopherson */ 1854d7404e5SSean Christopherson if (cpu_feature_enabled(X86_FEATURE_HYBRID_CPU)) 1864d7404e5SSean Christopherson enable_pmu = false; 1874d7404e5SSean Christopherson 1884d7404e5SSean Christopherson if (enable_pmu) { 189968635abSLike Xu perf_get_x86_pmu_capability(&kvm_pmu_cap); 190968635abSLike Xu 191968635abSLike Xu /* 192d7808f73SLike Xu * For Intel, only support guest architectural pmu 193d7808f73SLike Xu * on a host with architectural pmu. 194968635abSLike Xu */ 1954d7404e5SSean Christopherson if ((is_intel && !kvm_pmu_cap.version) || 1964d7404e5SSean Christopherson !kvm_pmu_cap.num_counters_gp) 197d7808f73SLike Xu enable_pmu = false; 1984d7404e5SSean Christopherson } 1996ef25aa0SLike Xu 2006ef25aa0SLike Xu if (!enable_pmu) { 2016ef25aa0SLike Xu memset(&kvm_pmu_cap, 0, sizeof(kvm_pmu_cap)); 202d7808f73SLike Xu return; 203d7808f73SLike Xu } 204968635abSLike Xu 205968635abSLike Xu kvm_pmu_cap.version = min(kvm_pmu_cap.version, 2); 2068911ce66SSean Christopherson kvm_pmu_cap.num_counters_gp = min(kvm_pmu_cap.num_counters_gp, 2078911ce66SSean Christopherson pmu_ops->MAX_NR_GP_COUNTERS); 208968635abSLike Xu kvm_pmu_cap.num_counters_fixed = min(kvm_pmu_cap.num_counters_fixed, 209968635abSLike Xu KVM_PMC_MAX_FIXED); 210968635abSLike Xu } 211968635abSLike Xu 2124fa5843dSLike Xu static inline void kvm_pmu_request_counter_reprogram(struct kvm_pmc *pmc) 21368fb4757SLike Xu { 21468fb4757SLike Xu set_bit(pmc->idx, pmc_to_pmu(pmc)->reprogram_pmi); 21568fb4757SLike Xu kvm_make_request(KVM_REQ_PMU, pmc->vcpu); 21668fb4757SLike Xu } 21725462f7fSWei Huang 2188de18543SLike Xu static inline void reprogram_counters(struct kvm_pmu *pmu, u64 diff) 2198de18543SLike Xu { 2208de18543SLike Xu int bit; 2218de18543SLike Xu 2228de18543SLike Xu if (!diff) 2238de18543SLike Xu return; 2248de18543SLike Xu 2258de18543SLike Xu for_each_set_bit(bit, (unsigned long *)&diff, X86_PMC_IDX_MAX) 2268de18543SLike Xu set_bit(bit, pmu->reprogram_pmi); 2278de18543SLike Xu kvm_make_request(KVM_REQ_PMU, pmu_to_vcpu(pmu)); 2288de18543SLike Xu } 2298de18543SLike Xu 230474a5bb9SWei Huang void kvm_pmu_deliver_pmi(struct kvm_vcpu *vcpu); 231474a5bb9SWei Huang void kvm_pmu_handle_event(struct kvm_vcpu *vcpu); 232474a5bb9SWei Huang int kvm_pmu_rdpmc(struct kvm_vcpu *vcpu, unsigned pmc, u64 *data); 233e6cd31f1SJim Mattson bool kvm_pmu_is_valid_rdpmc_ecx(struct kvm_vcpu *vcpu, unsigned int idx); 234545feb96SSean Christopherson bool kvm_pmu_is_valid_msr(struct kvm_vcpu *vcpu, u32 msr); 235cbd71758SWei Wang int kvm_pmu_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info); 236474a5bb9SWei Huang int kvm_pmu_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info); 237474a5bb9SWei Huang void kvm_pmu_refresh(struct kvm_vcpu *vcpu); 238474a5bb9SWei Huang void kvm_pmu_reset(struct kvm_vcpu *vcpu); 239474a5bb9SWei Huang void kvm_pmu_init(struct kvm_vcpu *vcpu); 240b35e5548SLike Xu void kvm_pmu_cleanup(struct kvm_vcpu *vcpu); 241474a5bb9SWei Huang void kvm_pmu_destroy(struct kvm_vcpu *vcpu); 24266bb8a06SEric Hankland int kvm_vm_ioctl_set_pmu_event_filter(struct kvm *kvm, void __user *argp); 2439cd803d4SEric Hankland void kvm_pmu_trigger_event(struct kvm_vcpu *vcpu, u64 perf_hw_id); 244474a5bb9SWei Huang 2452d7921c4SArbel Moshe bool is_vmware_backdoor_pmc(u32 pmc_idx); 2462d7921c4SArbel Moshe 24725462f7fSWei Huang extern struct kvm_pmu_ops intel_pmu_ops; 24825462f7fSWei Huang extern struct kvm_pmu_ops amd_pmu_ops; 249474a5bb9SWei Huang #endif /* __KVM_X86_PMU_H */ 250