1b2441318SGreg Kroah-Hartman /* SPDX-License-Identifier: GPL-2.0 */ 2474a5bb9SWei Huang #ifndef __KVM_X86_PMU_H 3474a5bb9SWei Huang #define __KVM_X86_PMU_H 4474a5bb9SWei Huang 5474a5bb9SWei Huang #define vcpu_to_pmu(vcpu) (&(vcpu)->arch.pmu) 6474a5bb9SWei Huang #define pmu_to_vcpu(pmu) (container_of((pmu), struct kvm_vcpu, arch.pmu)) 7474a5bb9SWei Huang #define pmc_to_pmu(pmc) (&(pmc)->vcpu->arch.pmu) 8474a5bb9SWei Huang 925462f7fSWei Huang /* retrieve the 4 bits for EN and PMI out of IA32_FIXED_CTR_CTRL */ 1025462f7fSWei Huang #define fixed_ctrl_field(ctrl_reg, idx) (((ctrl_reg) >> ((idx)*4)) & 0xf) 1125462f7fSWei Huang 122d7921c4SArbel Moshe #define VMWARE_BACKDOOR_PMC_HOST_TSC 0x10000 132d7921c4SArbel Moshe #define VMWARE_BACKDOOR_PMC_REAL_TIME 0x10001 142d7921c4SArbel Moshe #define VMWARE_BACKDOOR_PMC_APPARENT_TIME 0x10002 152d7921c4SArbel Moshe 16474a5bb9SWei Huang struct kvm_event_hw_type_mapping { 17474a5bb9SWei Huang u8 eventsel; 18474a5bb9SWei Huang u8 unit_mask; 19474a5bb9SWei Huang unsigned event_type; 20474a5bb9SWei Huang }; 21474a5bb9SWei Huang 2225462f7fSWei Huang struct kvm_pmu_ops { 2325462f7fSWei Huang unsigned (*find_arch_event)(struct kvm_pmu *pmu, u8 event_select, 2425462f7fSWei Huang u8 unit_mask); 2525462f7fSWei Huang unsigned (*find_fixed_event)(int idx); 2625462f7fSWei Huang bool (*pmc_is_enabled)(struct kvm_pmc *pmc); 2725462f7fSWei Huang struct kvm_pmc *(*pmc_idx_to_pmc)(struct kvm_pmu *pmu, int pmc_idx); 2898ff80f5SLike Xu struct kvm_pmc *(*rdpmc_ecx_to_pmc)(struct kvm_vcpu *vcpu, 2998ff80f5SLike Xu unsigned int idx, u64 *mask); 30c900c156SLike Xu struct kvm_pmc *(*msr_idx_to_pmc)(struct kvm_vcpu *vcpu, u32 msr); 3198ff80f5SLike Xu int (*is_valid_rdpmc_ecx)(struct kvm_vcpu *vcpu, unsigned int idx); 3225462f7fSWei Huang bool (*is_valid_msr)(struct kvm_vcpu *vcpu, u32 msr); 3325462f7fSWei Huang int (*get_msr)(struct kvm_vcpu *vcpu, u32 msr, u64 *data); 3425462f7fSWei Huang int (*set_msr)(struct kvm_vcpu *vcpu, struct msr_data *msr_info); 3525462f7fSWei Huang void (*refresh)(struct kvm_vcpu *vcpu); 3625462f7fSWei Huang void (*init)(struct kvm_vcpu *vcpu); 3725462f7fSWei Huang void (*reset)(struct kvm_vcpu *vcpu); 3825462f7fSWei Huang }; 3925462f7fSWei Huang 4025462f7fSWei Huang static inline u64 pmc_bitmask(struct kvm_pmc *pmc) 4125462f7fSWei Huang { 4225462f7fSWei Huang struct kvm_pmu *pmu = pmc_to_pmu(pmc); 4325462f7fSWei Huang 4425462f7fSWei Huang return pmu->counter_bitmask[pmc->type]; 4525462f7fSWei Huang } 4625462f7fSWei Huang 4725462f7fSWei Huang static inline u64 pmc_read_counter(struct kvm_pmc *pmc) 4825462f7fSWei Huang { 4925462f7fSWei Huang u64 counter, enabled, running; 5025462f7fSWei Huang 5125462f7fSWei Huang counter = pmc->counter; 5225462f7fSWei Huang if (pmc->perf_event) 5325462f7fSWei Huang counter += perf_event_read_value(pmc->perf_event, 5425462f7fSWei Huang &enabled, &running); 5525462f7fSWei Huang /* FIXME: Scaling needed? */ 5625462f7fSWei Huang return counter & pmc_bitmask(pmc); 5725462f7fSWei Huang } 5825462f7fSWei Huang 59a6da0d77SLike Xu static inline void pmc_release_perf_event(struct kvm_pmc *pmc) 60a6da0d77SLike Xu { 61a6da0d77SLike Xu if (pmc->perf_event) { 62a6da0d77SLike Xu perf_event_release_kernel(pmc->perf_event); 63a6da0d77SLike Xu pmc->perf_event = NULL; 64a6da0d77SLike Xu pmc->current_config = 0; 65b35e5548SLike Xu pmc_to_pmu(pmc)->event_count--; 66a6da0d77SLike Xu } 67a6da0d77SLike Xu } 68a6da0d77SLike Xu 6925462f7fSWei Huang static inline void pmc_stop_counter(struct kvm_pmc *pmc) 7025462f7fSWei Huang { 7125462f7fSWei Huang if (pmc->perf_event) { 7225462f7fSWei Huang pmc->counter = pmc_read_counter(pmc); 73a6da0d77SLike Xu pmc_release_perf_event(pmc); 7425462f7fSWei Huang } 7525462f7fSWei Huang } 7625462f7fSWei Huang 7725462f7fSWei Huang static inline bool pmc_is_gp(struct kvm_pmc *pmc) 7825462f7fSWei Huang { 7925462f7fSWei Huang return pmc->type == KVM_PMC_GP; 8025462f7fSWei Huang } 8125462f7fSWei Huang 8225462f7fSWei Huang static inline bool pmc_is_fixed(struct kvm_pmc *pmc) 8325462f7fSWei Huang { 8425462f7fSWei Huang return pmc->type == KVM_PMC_FIXED; 8525462f7fSWei Huang } 8625462f7fSWei Huang 8725462f7fSWei Huang static inline bool pmc_is_enabled(struct kvm_pmc *pmc) 8825462f7fSWei Huang { 8925462f7fSWei Huang return kvm_x86_ops->pmu_ops->pmc_is_enabled(pmc); 9025462f7fSWei Huang } 9125462f7fSWei Huang 92*9477f444SOliver Upton static inline bool kvm_valid_perf_global_ctrl(struct kvm_pmu *pmu, 93*9477f444SOliver Upton u64 data) 94*9477f444SOliver Upton { 95*9477f444SOliver Upton return !(pmu->global_ctrl_mask & data); 96*9477f444SOliver Upton } 97*9477f444SOliver Upton 9825462f7fSWei Huang /* returns general purpose PMC with the specified MSR. Note that it can be 9925462f7fSWei Huang * used for both PERFCTRn and EVNTSELn; that is why it accepts base as a 10025462f7fSWei Huang * paramenter to tell them apart. 10125462f7fSWei Huang */ 10225462f7fSWei Huang static inline struct kvm_pmc *get_gp_pmc(struct kvm_pmu *pmu, u32 msr, 10325462f7fSWei Huang u32 base) 10425462f7fSWei Huang { 10525462f7fSWei Huang if (msr >= base && msr < base + pmu->nr_arch_gp_counters) 10625462f7fSWei Huang return &pmu->gp_counters[msr - base]; 10725462f7fSWei Huang 10825462f7fSWei Huang return NULL; 10925462f7fSWei Huang } 11025462f7fSWei Huang 11125462f7fSWei Huang /* returns fixed PMC with the specified MSR */ 11225462f7fSWei Huang static inline struct kvm_pmc *get_fixed_pmc(struct kvm_pmu *pmu, u32 msr) 11325462f7fSWei Huang { 11425462f7fSWei Huang int base = MSR_CORE_PERF_FIXED_CTR0; 11525462f7fSWei Huang 11625462f7fSWei Huang if (msr >= base && msr < base + pmu->nr_arch_fixed_counters) 11725462f7fSWei Huang return &pmu->fixed_counters[msr - base]; 11825462f7fSWei Huang 11925462f7fSWei Huang return NULL; 12025462f7fSWei Huang } 12125462f7fSWei Huang 12225462f7fSWei Huang void reprogram_gp_counter(struct kvm_pmc *pmc, u64 eventsel); 12325462f7fSWei Huang void reprogram_fixed_counter(struct kvm_pmc *pmc, u8 ctrl, int fixed_idx); 12425462f7fSWei Huang void reprogram_counter(struct kvm_pmu *pmu, int pmc_idx); 12525462f7fSWei Huang 126474a5bb9SWei Huang void kvm_pmu_deliver_pmi(struct kvm_vcpu *vcpu); 127474a5bb9SWei Huang void kvm_pmu_handle_event(struct kvm_vcpu *vcpu); 128474a5bb9SWei Huang int kvm_pmu_rdpmc(struct kvm_vcpu *vcpu, unsigned pmc, u64 *data); 12998ff80f5SLike Xu int kvm_pmu_is_valid_rdpmc_ecx(struct kvm_vcpu *vcpu, unsigned int idx); 130474a5bb9SWei Huang bool kvm_pmu_is_valid_msr(struct kvm_vcpu *vcpu, u32 msr); 131474a5bb9SWei Huang int kvm_pmu_get_msr(struct kvm_vcpu *vcpu, u32 msr, u64 *data); 132474a5bb9SWei Huang int kvm_pmu_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info); 133474a5bb9SWei Huang void kvm_pmu_refresh(struct kvm_vcpu *vcpu); 134474a5bb9SWei Huang void kvm_pmu_reset(struct kvm_vcpu *vcpu); 135474a5bb9SWei Huang void kvm_pmu_init(struct kvm_vcpu *vcpu); 136b35e5548SLike Xu void kvm_pmu_cleanup(struct kvm_vcpu *vcpu); 137474a5bb9SWei Huang void kvm_pmu_destroy(struct kvm_vcpu *vcpu); 13866bb8a06SEric Hankland int kvm_vm_ioctl_set_pmu_event_filter(struct kvm *kvm, void __user *argp); 139474a5bb9SWei Huang 1402d7921c4SArbel Moshe bool is_vmware_backdoor_pmc(u32 pmc_idx); 1412d7921c4SArbel Moshe 14225462f7fSWei Huang extern struct kvm_pmu_ops intel_pmu_ops; 14325462f7fSWei Huang extern struct kvm_pmu_ops amd_pmu_ops; 144474a5bb9SWei Huang #endif /* __KVM_X86_PMU_H */ 145