146a010ddSJoerg Roedel // SPDX-License-Identifier: GPL-2.0-only 246a010ddSJoerg Roedel /* 346a010ddSJoerg Roedel * KVM PMU support for AMD 446a010ddSJoerg Roedel * 546a010ddSJoerg Roedel * Copyright 2015, Red Hat, Inc. and/or its affiliates. 646a010ddSJoerg Roedel * 746a010ddSJoerg Roedel * Author: 846a010ddSJoerg Roedel * Wei Huang <wei@redhat.com> 946a010ddSJoerg Roedel * 1046a010ddSJoerg Roedel * Implementation is based on pmu_intel.c file 1146a010ddSJoerg Roedel */ 128d20bd63SSean Christopherson #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 138d20bd63SSean Christopherson 1446a010ddSJoerg Roedel #include <linux/types.h> 1546a010ddSJoerg Roedel #include <linux/kvm_host.h> 1646a010ddSJoerg Roedel #include <linux/perf_event.h> 1746a010ddSJoerg Roedel #include "x86.h" 1846a010ddSJoerg Roedel #include "cpuid.h" 1946a010ddSJoerg Roedel #include "lapic.h" 2046a010ddSJoerg Roedel #include "pmu.h" 21b1d66dadSLike Xu #include "svm.h" 2246a010ddSJoerg Roedel 2346a010ddSJoerg Roedel enum pmu_type { 2446a010ddSJoerg Roedel PMU_TYPE_COUNTER = 0, 2546a010ddSJoerg Roedel PMU_TYPE_EVNTSEL, 2646a010ddSJoerg Roedel }; 2746a010ddSJoerg Roedel 28ea5cbc9fSLike Xu static struct kvm_pmc *amd_pmc_idx_to_pmc(struct kvm_pmu *pmu, int pmc_idx) 2946a010ddSJoerg Roedel { 30ea5cbc9fSLike Xu unsigned int num_counters = pmu->nr_arch_gp_counters; 31ea5cbc9fSLike Xu 32ea5cbc9fSLike Xu if (pmc_idx >= num_counters) 33ea5cbc9fSLike Xu return NULL; 34ea5cbc9fSLike Xu 35ea5cbc9fSLike Xu return &pmu->gp_counters[array_index_nospec(pmc_idx, num_counters)]; 3646a010ddSJoerg Roedel } 3746a010ddSJoerg Roedel 3846a010ddSJoerg Roedel static inline struct kvm_pmc *get_gp_pmc_amd(struct kvm_pmu *pmu, u32 msr, 3946a010ddSJoerg Roedel enum pmu_type type) 4046a010ddSJoerg Roedel { 411973caddSVitaly Kuznetsov struct kvm_vcpu *vcpu = pmu_to_vcpu(pmu); 42ea5cbc9fSLike Xu unsigned int idx; 431973caddSVitaly Kuznetsov 44ba7bb663SDavid Dunn if (!vcpu->kvm->arch.enable_pmu) 45b1d66dadSLike Xu return NULL; 46b1d66dadSLike Xu 4746a010ddSJoerg Roedel switch (msr) { 48ea5cbc9fSLike Xu case MSR_F15H_PERF_CTL0 ... MSR_F15H_PERF_CTR5: 491973caddSVitaly Kuznetsov if (!guest_cpuid_has(vcpu, X86_FEATURE_PERFCTR_CORE)) 501973caddSVitaly Kuznetsov return NULL; 51ea5cbc9fSLike Xu /* 52ea5cbc9fSLike Xu * Each PMU counter has a pair of CTL and CTR MSRs. CTLn 53ea5cbc9fSLike Xu * MSRs (accessed via EVNTSEL) are even, CTRn MSRs are odd. 54ea5cbc9fSLike Xu */ 55ea5cbc9fSLike Xu idx = (unsigned int)((msr - MSR_F15H_PERF_CTL0) / 2); 56ea5cbc9fSLike Xu if (!(msr & 0x1) != (type == PMU_TYPE_EVNTSEL)) 57ea5cbc9fSLike Xu return NULL; 58ea5cbc9fSLike Xu break; 5946a010ddSJoerg Roedel case MSR_K7_EVNTSEL0 ... MSR_K7_EVNTSEL3: 6046a010ddSJoerg Roedel if (type != PMU_TYPE_EVNTSEL) 6146a010ddSJoerg Roedel return NULL; 62ea5cbc9fSLike Xu idx = msr - MSR_K7_EVNTSEL0; 6346a010ddSJoerg Roedel break; 6446a010ddSJoerg Roedel case MSR_K7_PERFCTR0 ... MSR_K7_PERFCTR3: 6546a010ddSJoerg Roedel if (type != PMU_TYPE_COUNTER) 6646a010ddSJoerg Roedel return NULL; 67ea5cbc9fSLike Xu idx = msr - MSR_K7_PERFCTR0; 6846a010ddSJoerg Roedel break; 6946a010ddSJoerg Roedel default: 7046a010ddSJoerg Roedel return NULL; 7146a010ddSJoerg Roedel } 7246a010ddSJoerg Roedel 73ea5cbc9fSLike Xu return amd_pmc_idx_to_pmc(pmu, idx); 7446a010ddSJoerg Roedel } 7546a010ddSJoerg Roedel 767aadaa98SLike Xu static bool amd_hw_event_available(struct kvm_pmc *pmc) 7746a010ddSJoerg Roedel { 787aadaa98SLike Xu return true; 7946a010ddSJoerg Roedel } 8046a010ddSJoerg Roedel 8146a010ddSJoerg Roedel /* check if a PMC is enabled by comparing it against global_ctrl bits. Because 8246a010ddSJoerg Roedel * AMD CPU doesn't have global_ctrl MSR, all PMCs are enabled (return TRUE). 8346a010ddSJoerg Roedel */ 8446a010ddSJoerg Roedel static bool amd_pmc_is_enabled(struct kvm_pmc *pmc) 8546a010ddSJoerg Roedel { 8646a010ddSJoerg Roedel return true; 8746a010ddSJoerg Roedel } 8846a010ddSJoerg Roedel 89e6cd31f1SJim Mattson static bool amd_is_valid_rdpmc_ecx(struct kvm_vcpu *vcpu, unsigned int idx) 9046a010ddSJoerg Roedel { 9146a010ddSJoerg Roedel struct kvm_pmu *pmu = vcpu_to_pmu(vcpu); 9246a010ddSJoerg Roedel 9346a010ddSJoerg Roedel idx &= ~(3u << 30); 9446a010ddSJoerg Roedel 95e6cd31f1SJim Mattson return idx < pmu->nr_arch_gp_counters; 9646a010ddSJoerg Roedel } 9746a010ddSJoerg Roedel 9846a010ddSJoerg Roedel /* idx is the ECX register of RDPMC instruction */ 9946a010ddSJoerg Roedel static struct kvm_pmc *amd_rdpmc_ecx_to_pmc(struct kvm_vcpu *vcpu, 10046a010ddSJoerg Roedel unsigned int idx, u64 *mask) 10146a010ddSJoerg Roedel { 1025c6a67f4SLike Xu return amd_pmc_idx_to_pmc(vcpu_to_pmu(vcpu), idx & ~(3u << 30)); 10346a010ddSJoerg Roedel } 10446a010ddSJoerg Roedel 105545feb96SSean Christopherson static bool amd_is_valid_msr(struct kvm_vcpu *vcpu, u32 msr) 10646a010ddSJoerg Roedel { 10746a010ddSJoerg Roedel /* All MSRs refer to exactly one PMC, so msr_idx_to_pmc is enough. */ 10846a010ddSJoerg Roedel return false; 10946a010ddSJoerg Roedel } 11046a010ddSJoerg Roedel 11146a010ddSJoerg Roedel static struct kvm_pmc *amd_msr_idx_to_pmc(struct kvm_vcpu *vcpu, u32 msr) 11246a010ddSJoerg Roedel { 11346a010ddSJoerg Roedel struct kvm_pmu *pmu = vcpu_to_pmu(vcpu); 11446a010ddSJoerg Roedel struct kvm_pmc *pmc; 11546a010ddSJoerg Roedel 11646a010ddSJoerg Roedel pmc = get_gp_pmc_amd(pmu, msr, PMU_TYPE_COUNTER); 11746a010ddSJoerg Roedel pmc = pmc ? pmc : get_gp_pmc_amd(pmu, msr, PMU_TYPE_EVNTSEL); 11846a010ddSJoerg Roedel 11946a010ddSJoerg Roedel return pmc; 12046a010ddSJoerg Roedel } 12146a010ddSJoerg Roedel 122cbd71758SWei Wang static int amd_pmu_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info) 12346a010ddSJoerg Roedel { 12446a010ddSJoerg Roedel struct kvm_pmu *pmu = vcpu_to_pmu(vcpu); 12546a010ddSJoerg Roedel struct kvm_pmc *pmc; 126cbd71758SWei Wang u32 msr = msr_info->index; 12746a010ddSJoerg Roedel 12846a010ddSJoerg Roedel /* MSR_PERFCTRn */ 12946a010ddSJoerg Roedel pmc = get_gp_pmc_amd(pmu, msr, PMU_TYPE_COUNTER); 13046a010ddSJoerg Roedel if (pmc) { 131cbd71758SWei Wang msr_info->data = pmc_read_counter(pmc); 13246a010ddSJoerg Roedel return 0; 13346a010ddSJoerg Roedel } 13446a010ddSJoerg Roedel /* MSR_EVNTSELn */ 13546a010ddSJoerg Roedel pmc = get_gp_pmc_amd(pmu, msr, PMU_TYPE_EVNTSEL); 13646a010ddSJoerg Roedel if (pmc) { 137cbd71758SWei Wang msr_info->data = pmc->eventsel; 13846a010ddSJoerg Roedel return 0; 13946a010ddSJoerg Roedel } 14046a010ddSJoerg Roedel 14146a010ddSJoerg Roedel return 1; 14246a010ddSJoerg Roedel } 14346a010ddSJoerg Roedel 14446a010ddSJoerg Roedel static int amd_pmu_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info) 14546a010ddSJoerg Roedel { 14646a010ddSJoerg Roedel struct kvm_pmu *pmu = vcpu_to_pmu(vcpu); 14746a010ddSJoerg Roedel struct kvm_pmc *pmc; 14846a010ddSJoerg Roedel u32 msr = msr_info->index; 14946a010ddSJoerg Roedel u64 data = msr_info->data; 15046a010ddSJoerg Roedel 15146a010ddSJoerg Roedel /* MSR_PERFCTRn */ 15246a010ddSJoerg Roedel pmc = get_gp_pmc_amd(pmu, msr, PMU_TYPE_COUNTER); 15346a010ddSJoerg Roedel if (pmc) { 15446a010ddSJoerg Roedel pmc->counter += data - pmc_read_counter(pmc); 15575189d1dSLike Xu pmc_update_sample_period(pmc); 15646a010ddSJoerg Roedel return 0; 15746a010ddSJoerg Roedel } 15846a010ddSJoerg Roedel /* MSR_EVNTSELn */ 15946a010ddSJoerg Roedel pmc = get_gp_pmc_amd(pmu, msr, PMU_TYPE_EVNTSEL); 16046a010ddSJoerg Roedel if (pmc) { 1619b026073SJim Mattson data &= ~pmu->reserved_bits; 162fb121aafSLike Xu if (data != pmc->eventsel) { 163fb121aafSLike Xu pmc->eventsel = data; 16468fb4757SLike Xu kvm_pmu_request_counter_reprogam(pmc); 165fb121aafSLike Xu } 16646a010ddSJoerg Roedel return 0; 16746a010ddSJoerg Roedel } 16846a010ddSJoerg Roedel 16946a010ddSJoerg Roedel return 1; 17046a010ddSJoerg Roedel } 17146a010ddSJoerg Roedel 17246a010ddSJoerg Roedel static void amd_pmu_refresh(struct kvm_vcpu *vcpu) 17346a010ddSJoerg Roedel { 17446a010ddSJoerg Roedel struct kvm_pmu *pmu = vcpu_to_pmu(vcpu); 17546a010ddSJoerg Roedel 17646a010ddSJoerg Roedel if (guest_cpuid_has(vcpu, X86_FEATURE_PERFCTR_CORE)) 17746a010ddSJoerg Roedel pmu->nr_arch_gp_counters = AMD64_NUM_COUNTERS_CORE; 17846a010ddSJoerg Roedel else 17946a010ddSJoerg Roedel pmu->nr_arch_gp_counters = AMD64_NUM_COUNTERS; 18046a010ddSJoerg Roedel 18146a010ddSJoerg Roedel pmu->counter_bitmask[KVM_PMC_GP] = ((u64)1 << 48) - 1; 182cb1d220dSLike Xu pmu->reserved_bits = 0xfffffff000280000ull; 18395b065bfSJim Mattson pmu->raw_event_mask = AMD64_RAW_EVENT_MASK; 18446a010ddSJoerg Roedel pmu->version = 1; 18546a010ddSJoerg Roedel /* not applicable to AMD; but clean them to prevent any fall out */ 18646a010ddSJoerg Roedel pmu->counter_bitmask[KVM_PMC_FIXED] = 0; 18746a010ddSJoerg Roedel pmu->nr_arch_fixed_counters = 0; 18846a010ddSJoerg Roedel pmu->global_status = 0; 18946a010ddSJoerg Roedel bitmap_set(pmu->all_valid_pmc_idx, 0, pmu->nr_arch_gp_counters); 19046a010ddSJoerg Roedel } 19146a010ddSJoerg Roedel 19246a010ddSJoerg Roedel static void amd_pmu_init(struct kvm_vcpu *vcpu) 19346a010ddSJoerg Roedel { 19446a010ddSJoerg Roedel struct kvm_pmu *pmu = vcpu_to_pmu(vcpu); 19546a010ddSJoerg Roedel int i; 19646a010ddSJoerg Roedel 197556f3c9aSLike Xu BUILD_BUG_ON(KVM_AMD_PMC_MAX_GENERIC > AMD64_NUM_COUNTERS_CORE); 198556f3c9aSLike Xu BUILD_BUG_ON(KVM_AMD_PMC_MAX_GENERIC > INTEL_PMC_MAX_GENERIC); 19946a010ddSJoerg Roedel 200556f3c9aSLike Xu for (i = 0; i < KVM_AMD_PMC_MAX_GENERIC ; i++) { 20146a010ddSJoerg Roedel pmu->gp_counters[i].type = KVM_PMC_GP; 20246a010ddSJoerg Roedel pmu->gp_counters[i].vcpu = vcpu; 20346a010ddSJoerg Roedel pmu->gp_counters[i].idx = i; 20446a010ddSJoerg Roedel pmu->gp_counters[i].current_config = 0; 20546a010ddSJoerg Roedel } 20646a010ddSJoerg Roedel } 20746a010ddSJoerg Roedel 20846a010ddSJoerg Roedel static void amd_pmu_reset(struct kvm_vcpu *vcpu) 20946a010ddSJoerg Roedel { 21046a010ddSJoerg Roedel struct kvm_pmu *pmu = vcpu_to_pmu(vcpu); 21146a010ddSJoerg Roedel int i; 21246a010ddSJoerg Roedel 213556f3c9aSLike Xu for (i = 0; i < KVM_AMD_PMC_MAX_GENERIC; i++) { 21446a010ddSJoerg Roedel struct kvm_pmc *pmc = &pmu->gp_counters[i]; 21546a010ddSJoerg Roedel 21646a010ddSJoerg Roedel pmc_stop_counter(pmc); 217de0f6195SLike Xu pmc->counter = pmc->prev_counter = pmc->eventsel = 0; 21846a010ddSJoerg Roedel } 21946a010ddSJoerg Roedel } 22046a010ddSJoerg Roedel 22134886e79SLike Xu struct kvm_pmu_ops amd_pmu_ops __initdata = { 2227aadaa98SLike Xu .hw_event_available = amd_hw_event_available, 22346a010ddSJoerg Roedel .pmc_is_enabled = amd_pmc_is_enabled, 22446a010ddSJoerg Roedel .pmc_idx_to_pmc = amd_pmc_idx_to_pmc, 22546a010ddSJoerg Roedel .rdpmc_ecx_to_pmc = amd_rdpmc_ecx_to_pmc, 22646a010ddSJoerg Roedel .msr_idx_to_pmc = amd_msr_idx_to_pmc, 22746a010ddSJoerg Roedel .is_valid_rdpmc_ecx = amd_is_valid_rdpmc_ecx, 22846a010ddSJoerg Roedel .is_valid_msr = amd_is_valid_msr, 22946a010ddSJoerg Roedel .get_msr = amd_pmu_get_msr, 23046a010ddSJoerg Roedel .set_msr = amd_pmu_set_msr, 23146a010ddSJoerg Roedel .refresh = amd_pmu_refresh, 23246a010ddSJoerg Roedel .init = amd_pmu_init, 23346a010ddSJoerg Roedel .reset = amd_pmu_reset, 234*6a5cba7bSAaron Lewis .EVENTSEL_EVENT = AMD64_EVENTSEL_EVENT, 23546a010ddSJoerg Roedel }; 236