1 /* SPDX-License-Identifier: GPL-2.0 */ 2 #ifndef __KVM_X86_PMU_H 3 #define __KVM_X86_PMU_H 4 5 #include <linux/nospec.h> 6 7 #define vcpu_to_pmu(vcpu) (&(vcpu)->arch.pmu) 8 #define pmu_to_vcpu(pmu) (container_of((pmu), struct kvm_vcpu, arch.pmu)) 9 #define pmc_to_pmu(pmc) (&(pmc)->vcpu->arch.pmu) 10 11 /* retrieve the 4 bits for EN and PMI out of IA32_FIXED_CTR_CTRL */ 12 #define fixed_ctrl_field(ctrl_reg, idx) (((ctrl_reg) >> ((idx)*4)) & 0xf) 13 14 #define VMWARE_BACKDOOR_PMC_HOST_TSC 0x10000 15 #define VMWARE_BACKDOOR_PMC_REAL_TIME 0x10001 16 #define VMWARE_BACKDOOR_PMC_APPARENT_TIME 0x10002 17 18 #define MAX_FIXED_COUNTERS 3 19 20 struct kvm_event_hw_type_mapping { 21 u8 eventsel; 22 u8 unit_mask; 23 unsigned event_type; 24 }; 25 26 struct kvm_pmu_ops { 27 unsigned (*find_arch_event)(struct kvm_pmu *pmu, u8 event_select, 28 u8 unit_mask); 29 unsigned (*find_fixed_event)(int idx); 30 bool (*pmc_is_enabled)(struct kvm_pmc *pmc); 31 struct kvm_pmc *(*pmc_idx_to_pmc)(struct kvm_pmu *pmu, int pmc_idx); 32 struct kvm_pmc *(*rdpmc_ecx_to_pmc)(struct kvm_vcpu *vcpu, 33 unsigned int idx, u64 *mask); 34 struct kvm_pmc *(*msr_idx_to_pmc)(struct kvm_vcpu *vcpu, u32 msr); 35 int (*is_valid_rdpmc_ecx)(struct kvm_vcpu *vcpu, unsigned int idx); 36 bool (*is_valid_msr)(struct kvm_vcpu *vcpu, u32 msr); 37 int (*get_msr)(struct kvm_vcpu *vcpu, struct msr_data *msr_info); 38 int (*set_msr)(struct kvm_vcpu *vcpu, struct msr_data *msr_info); 39 void (*refresh)(struct kvm_vcpu *vcpu); 40 void (*init)(struct kvm_vcpu *vcpu); 41 void (*reset)(struct kvm_vcpu *vcpu); 42 void (*deliver_pmi)(struct kvm_vcpu *vcpu); 43 void (*cleanup)(struct kvm_vcpu *vcpu); 44 }; 45 46 static inline u64 pmc_bitmask(struct kvm_pmc *pmc) 47 { 48 struct kvm_pmu *pmu = pmc_to_pmu(pmc); 49 50 return pmu->counter_bitmask[pmc->type]; 51 } 52 53 static inline u64 pmc_read_counter(struct kvm_pmc *pmc) 54 { 55 u64 counter, enabled, running; 56 57 counter = pmc->counter; 58 if (pmc->perf_event) 59 counter += perf_event_read_value(pmc->perf_event, 60 &enabled, &running); 61 /* FIXME: Scaling needed? */ 62 return counter & pmc_bitmask(pmc); 63 } 64 65 static inline void pmc_release_perf_event(struct kvm_pmc *pmc) 66 { 67 if (pmc->perf_event) { 68 perf_event_release_kernel(pmc->perf_event); 69 pmc->perf_event = NULL; 70 pmc->current_config = 0; 71 pmc_to_pmu(pmc)->event_count--; 72 } 73 } 74 75 static inline void pmc_stop_counter(struct kvm_pmc *pmc) 76 { 77 if (pmc->perf_event) { 78 pmc->counter = pmc_read_counter(pmc); 79 pmc_release_perf_event(pmc); 80 } 81 } 82 83 static inline bool pmc_is_gp(struct kvm_pmc *pmc) 84 { 85 return pmc->type == KVM_PMC_GP; 86 } 87 88 static inline bool pmc_is_fixed(struct kvm_pmc *pmc) 89 { 90 return pmc->type == KVM_PMC_FIXED; 91 } 92 93 static inline bool pmc_is_enabled(struct kvm_pmc *pmc) 94 { 95 return kvm_x86_ops.pmu_ops->pmc_is_enabled(pmc); 96 } 97 98 static inline bool kvm_valid_perf_global_ctrl(struct kvm_pmu *pmu, 99 u64 data) 100 { 101 return !(pmu->global_ctrl_mask & data); 102 } 103 104 /* returns general purpose PMC with the specified MSR. Note that it can be 105 * used for both PERFCTRn and EVNTSELn; that is why it accepts base as a 106 * parameter to tell them apart. 107 */ 108 static inline struct kvm_pmc *get_gp_pmc(struct kvm_pmu *pmu, u32 msr, 109 u32 base) 110 { 111 if (msr >= base && msr < base + pmu->nr_arch_gp_counters) { 112 u32 index = array_index_nospec(msr - base, 113 pmu->nr_arch_gp_counters); 114 115 return &pmu->gp_counters[index]; 116 } 117 118 return NULL; 119 } 120 121 /* returns fixed PMC with the specified MSR */ 122 static inline struct kvm_pmc *get_fixed_pmc(struct kvm_pmu *pmu, u32 msr) 123 { 124 int base = MSR_CORE_PERF_FIXED_CTR0; 125 126 if (msr >= base && msr < base + pmu->nr_arch_fixed_counters) { 127 u32 index = array_index_nospec(msr - base, 128 pmu->nr_arch_fixed_counters); 129 130 return &pmu->fixed_counters[index]; 131 } 132 133 return NULL; 134 } 135 136 static inline u64 get_sample_period(struct kvm_pmc *pmc, u64 counter_value) 137 { 138 u64 sample_period = (-counter_value) & pmc_bitmask(pmc); 139 140 if (!sample_period) 141 sample_period = pmc_bitmask(pmc) + 1; 142 return sample_period; 143 } 144 145 void reprogram_gp_counter(struct kvm_pmc *pmc, u64 eventsel); 146 void reprogram_fixed_counter(struct kvm_pmc *pmc, u8 ctrl, int fixed_idx); 147 void reprogram_counter(struct kvm_pmu *pmu, int pmc_idx); 148 149 void kvm_pmu_deliver_pmi(struct kvm_vcpu *vcpu); 150 void kvm_pmu_handle_event(struct kvm_vcpu *vcpu); 151 int kvm_pmu_rdpmc(struct kvm_vcpu *vcpu, unsigned pmc, u64 *data); 152 int kvm_pmu_is_valid_rdpmc_ecx(struct kvm_vcpu *vcpu, unsigned int idx); 153 bool kvm_pmu_is_valid_msr(struct kvm_vcpu *vcpu, u32 msr); 154 int kvm_pmu_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info); 155 int kvm_pmu_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info); 156 void kvm_pmu_refresh(struct kvm_vcpu *vcpu); 157 void kvm_pmu_reset(struct kvm_vcpu *vcpu); 158 void kvm_pmu_init(struct kvm_vcpu *vcpu); 159 void kvm_pmu_cleanup(struct kvm_vcpu *vcpu); 160 void kvm_pmu_destroy(struct kvm_vcpu *vcpu); 161 int kvm_vm_ioctl_set_pmu_event_filter(struct kvm *kvm, void __user *argp); 162 163 bool is_vmware_backdoor_pmc(u32 pmc_idx); 164 165 extern struct kvm_pmu_ops intel_pmu_ops; 166 extern struct kvm_pmu_ops amd_pmu_ops; 167 #endif /* __KVM_X86_PMU_H */ 168