pmu.h (63f21f326fc9e068d04c2c1d0a722e8db65588ba) | pmu.h (968635abd5f5986f3cb6f15602d365cf1b551c5d) |
---|---|
1/* SPDX-License-Identifier: GPL-2.0 */ 2#ifndef __KVM_X86_PMU_H 3#define __KVM_X86_PMU_H 4 5#include <linux/nospec.h> 6 7#include <asm/cpu_device_id.h> 8 --- 149 unchanged lines hidden (view full) --- 158 159 if (pmc_is_fixed(pmc)) 160 return fixed_ctrl_field(pmu->fixed_ctr_ctrl, 161 pmc->idx - INTEL_PMC_IDX_FIXED) & 0x3; 162 163 return pmc->eventsel & ARCH_PERFMON_EVENTSEL_ENABLE; 164} 165 | 1/* SPDX-License-Identifier: GPL-2.0 */ 2#ifndef __KVM_X86_PMU_H 3#define __KVM_X86_PMU_H 4 5#include <linux/nospec.h> 6 7#include <asm/cpu_device_id.h> 8 --- 149 unchanged lines hidden (view full) --- 158 159 if (pmc_is_fixed(pmc)) 160 return fixed_ctrl_field(pmu->fixed_ctr_ctrl, 161 pmc->idx - INTEL_PMC_IDX_FIXED) & 0x3; 162 163 return pmc->eventsel & ARCH_PERFMON_EVENTSEL_ENABLE; 164} 165 |
166extern struct x86_pmu_capability kvm_pmu_cap; 167 168static inline void kvm_init_pmu_capability(void) 169{ 170 perf_get_x86_pmu_capability(&kvm_pmu_cap); 171 172 /* 173 * Only support guest architectural pmu on 174 * a host with architectural pmu. 175 */ 176 if (!kvm_pmu_cap.version) 177 memset(&kvm_pmu_cap, 0, sizeof(kvm_pmu_cap)); 178 179 kvm_pmu_cap.version = min(kvm_pmu_cap.version, 2); 180 kvm_pmu_cap.num_counters_fixed = min(kvm_pmu_cap.num_counters_fixed, 181 KVM_PMC_MAX_FIXED); 182} 183 |
|
166void reprogram_gp_counter(struct kvm_pmc *pmc, u64 eventsel); 167void reprogram_fixed_counter(struct kvm_pmc *pmc, u8 ctrl, int fixed_idx); 168void reprogram_counter(struct kvm_pmu *pmu, int pmc_idx); 169 170void kvm_pmu_deliver_pmi(struct kvm_vcpu *vcpu); 171void kvm_pmu_handle_event(struct kvm_vcpu *vcpu); 172int kvm_pmu_rdpmc(struct kvm_vcpu *vcpu, unsigned pmc, u64 *data); 173bool kvm_pmu_is_valid_rdpmc_ecx(struct kvm_vcpu *vcpu, unsigned int idx); 174bool kvm_pmu_is_valid_msr(struct kvm_vcpu *vcpu, u32 msr); 175int kvm_pmu_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info); 176int kvm_pmu_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info); 177void kvm_pmu_refresh(struct kvm_vcpu *vcpu); 178void kvm_pmu_reset(struct kvm_vcpu *vcpu); 179void kvm_pmu_init(struct kvm_vcpu *vcpu); 180void kvm_pmu_cleanup(struct kvm_vcpu *vcpu); 181void kvm_pmu_destroy(struct kvm_vcpu *vcpu); 182int kvm_vm_ioctl_set_pmu_event_filter(struct kvm *kvm, void __user *argp); 183void kvm_pmu_trigger_event(struct kvm_vcpu *vcpu, u64 perf_hw_id); | 184void reprogram_gp_counter(struct kvm_pmc *pmc, u64 eventsel); 185void reprogram_fixed_counter(struct kvm_pmc *pmc, u8 ctrl, int fixed_idx); 186void reprogram_counter(struct kvm_pmu *pmu, int pmc_idx); 187 188void kvm_pmu_deliver_pmi(struct kvm_vcpu *vcpu); 189void kvm_pmu_handle_event(struct kvm_vcpu *vcpu); 190int kvm_pmu_rdpmc(struct kvm_vcpu *vcpu, unsigned pmc, u64 *data); 191bool kvm_pmu_is_valid_rdpmc_ecx(struct kvm_vcpu *vcpu, unsigned int idx); 192bool kvm_pmu_is_valid_msr(struct kvm_vcpu *vcpu, u32 msr); 193int kvm_pmu_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info); 194int kvm_pmu_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info); 195void kvm_pmu_refresh(struct kvm_vcpu *vcpu); 196void kvm_pmu_reset(struct kvm_vcpu *vcpu); 197void kvm_pmu_init(struct kvm_vcpu *vcpu); 198void kvm_pmu_cleanup(struct kvm_vcpu *vcpu); 199void kvm_pmu_destroy(struct kvm_vcpu *vcpu); 200int kvm_vm_ioctl_set_pmu_event_filter(struct kvm *kvm, void __user *argp); 201void kvm_pmu_trigger_event(struct kvm_vcpu *vcpu, u64 perf_hw_id); |
202void kvm_init_pmu_capability(void); |
|
184 185bool is_vmware_backdoor_pmc(u32 pmc_idx); 186 187extern struct kvm_pmu_ops intel_pmu_ops; 188extern struct kvm_pmu_ops amd_pmu_ops; 189#endif /* __KVM_X86_PMU_H */ | 203 204bool is_vmware_backdoor_pmc(u32 pmc_idx); 205 206extern struct kvm_pmu_ops intel_pmu_ops; 207extern struct kvm_pmu_ops amd_pmu_ops; 208#endif /* __KVM_X86_PMU_H */ |