xref: /openbmc/linux/arch/x86/kvm/svm/pmu.c (revision 85250a24)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * KVM PMU support for AMD
4  *
5  * Copyright 2015, Red Hat, Inc. and/or its affiliates.
6  *
7  * Author:
8  *   Wei Huang <wei@redhat.com>
9  *
10  * Implementation is based on pmu_intel.c file
11  */
12 #include <linux/types.h>
13 #include <linux/kvm_host.h>
14 #include <linux/perf_event.h>
15 #include "x86.h"
16 #include "cpuid.h"
17 #include "lapic.h"
18 #include "pmu.h"
19 #include "svm.h"
20 
21 enum pmu_type {
22 	PMU_TYPE_COUNTER = 0,
23 	PMU_TYPE_EVNTSEL,
24 };
25 
26 static struct kvm_pmc *amd_pmc_idx_to_pmc(struct kvm_pmu *pmu, int pmc_idx)
27 {
28 	unsigned int num_counters = pmu->nr_arch_gp_counters;
29 
30 	if (pmc_idx >= num_counters)
31 		return NULL;
32 
33 	return &pmu->gp_counters[array_index_nospec(pmc_idx, num_counters)];
34 }
35 
36 static inline struct kvm_pmc *get_gp_pmc_amd(struct kvm_pmu *pmu, u32 msr,
37 					     enum pmu_type type)
38 {
39 	struct kvm_vcpu *vcpu = pmu_to_vcpu(pmu);
40 	unsigned int idx;
41 
42 	if (!vcpu->kvm->arch.enable_pmu)
43 		return NULL;
44 
45 	switch (msr) {
46 	case MSR_F15H_PERF_CTL0 ... MSR_F15H_PERF_CTR5:
47 		if (!guest_cpuid_has(vcpu, X86_FEATURE_PERFCTR_CORE))
48 			return NULL;
49 		/*
50 		 * Each PMU counter has a pair of CTL and CTR MSRs. CTLn
51 		 * MSRs (accessed via EVNTSEL) are even, CTRn MSRs are odd.
52 		 */
53 		idx = (unsigned int)((msr - MSR_F15H_PERF_CTL0) / 2);
54 		if (!(msr & 0x1) != (type == PMU_TYPE_EVNTSEL))
55 			return NULL;
56 		break;
57 	case MSR_K7_EVNTSEL0 ... MSR_K7_EVNTSEL3:
58 		if (type != PMU_TYPE_EVNTSEL)
59 			return NULL;
60 		idx = msr - MSR_K7_EVNTSEL0;
61 		break;
62 	case MSR_K7_PERFCTR0 ... MSR_K7_PERFCTR3:
63 		if (type != PMU_TYPE_COUNTER)
64 			return NULL;
65 		idx = msr - MSR_K7_PERFCTR0;
66 		break;
67 	default:
68 		return NULL;
69 	}
70 
71 	return amd_pmc_idx_to_pmc(pmu, idx);
72 }
73 
74 static bool amd_hw_event_available(struct kvm_pmc *pmc)
75 {
76 	return true;
77 }
78 
79 /* check if a PMC is enabled by comparing it against global_ctrl bits. Because
80  * AMD CPU doesn't have global_ctrl MSR, all PMCs are enabled (return TRUE).
81  */
82 static bool amd_pmc_is_enabled(struct kvm_pmc *pmc)
83 {
84 	return true;
85 }
86 
87 static bool amd_is_valid_rdpmc_ecx(struct kvm_vcpu *vcpu, unsigned int idx)
88 {
89 	struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
90 
91 	idx &= ~(3u << 30);
92 
93 	return idx < pmu->nr_arch_gp_counters;
94 }
95 
96 /* idx is the ECX register of RDPMC instruction */
97 static struct kvm_pmc *amd_rdpmc_ecx_to_pmc(struct kvm_vcpu *vcpu,
98 	unsigned int idx, u64 *mask)
99 {
100 	return amd_pmc_idx_to_pmc(vcpu_to_pmu(vcpu), idx & ~(3u << 30));
101 }
102 
103 static bool amd_is_valid_msr(struct kvm_vcpu *vcpu, u32 msr)
104 {
105 	/* All MSRs refer to exactly one PMC, so msr_idx_to_pmc is enough.  */
106 	return false;
107 }
108 
109 static struct kvm_pmc *amd_msr_idx_to_pmc(struct kvm_vcpu *vcpu, u32 msr)
110 {
111 	struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
112 	struct kvm_pmc *pmc;
113 
114 	pmc = get_gp_pmc_amd(pmu, msr, PMU_TYPE_COUNTER);
115 	pmc = pmc ? pmc : get_gp_pmc_amd(pmu, msr, PMU_TYPE_EVNTSEL);
116 
117 	return pmc;
118 }
119 
120 static int amd_pmu_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
121 {
122 	struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
123 	struct kvm_pmc *pmc;
124 	u32 msr = msr_info->index;
125 
126 	/* MSR_PERFCTRn */
127 	pmc = get_gp_pmc_amd(pmu, msr, PMU_TYPE_COUNTER);
128 	if (pmc) {
129 		msr_info->data = pmc_read_counter(pmc);
130 		return 0;
131 	}
132 	/* MSR_EVNTSELn */
133 	pmc = get_gp_pmc_amd(pmu, msr, PMU_TYPE_EVNTSEL);
134 	if (pmc) {
135 		msr_info->data = pmc->eventsel;
136 		return 0;
137 	}
138 
139 	return 1;
140 }
141 
142 static int amd_pmu_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
143 {
144 	struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
145 	struct kvm_pmc *pmc;
146 	u32 msr = msr_info->index;
147 	u64 data = msr_info->data;
148 
149 	/* MSR_PERFCTRn */
150 	pmc = get_gp_pmc_amd(pmu, msr, PMU_TYPE_COUNTER);
151 	if (pmc) {
152 		pmc->counter += data - pmc_read_counter(pmc);
153 		pmc_update_sample_period(pmc);
154 		return 0;
155 	}
156 	/* MSR_EVNTSELn */
157 	pmc = get_gp_pmc_amd(pmu, msr, PMU_TYPE_EVNTSEL);
158 	if (pmc) {
159 		data &= ~pmu->reserved_bits;
160 		if (data != pmc->eventsel) {
161 			pmc->eventsel = data;
162 			reprogram_counter(pmc);
163 		}
164 		return 0;
165 	}
166 
167 	return 1;
168 }
169 
170 static void amd_pmu_refresh(struct kvm_vcpu *vcpu)
171 {
172 	struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
173 
174 	if (guest_cpuid_has(vcpu, X86_FEATURE_PERFCTR_CORE))
175 		pmu->nr_arch_gp_counters = AMD64_NUM_COUNTERS_CORE;
176 	else
177 		pmu->nr_arch_gp_counters = AMD64_NUM_COUNTERS;
178 
179 	pmu->counter_bitmask[KVM_PMC_GP] = ((u64)1 << 48) - 1;
180 	pmu->reserved_bits = 0xfffffff000280000ull;
181 	pmu->raw_event_mask = AMD64_RAW_EVENT_MASK;
182 	pmu->version = 1;
183 	/* not applicable to AMD; but clean them to prevent any fall out */
184 	pmu->counter_bitmask[KVM_PMC_FIXED] = 0;
185 	pmu->nr_arch_fixed_counters = 0;
186 	pmu->global_status = 0;
187 	bitmap_set(pmu->all_valid_pmc_idx, 0, pmu->nr_arch_gp_counters);
188 }
189 
190 static void amd_pmu_init(struct kvm_vcpu *vcpu)
191 {
192 	struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
193 	int i;
194 
195 	BUILD_BUG_ON(AMD64_NUM_COUNTERS_CORE > INTEL_PMC_MAX_GENERIC);
196 
197 	for (i = 0; i < AMD64_NUM_COUNTERS_CORE ; i++) {
198 		pmu->gp_counters[i].type = KVM_PMC_GP;
199 		pmu->gp_counters[i].vcpu = vcpu;
200 		pmu->gp_counters[i].idx = i;
201 		pmu->gp_counters[i].current_config = 0;
202 	}
203 }
204 
205 static void amd_pmu_reset(struct kvm_vcpu *vcpu)
206 {
207 	struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
208 	int i;
209 
210 	for (i = 0; i < AMD64_NUM_COUNTERS_CORE; i++) {
211 		struct kvm_pmc *pmc = &pmu->gp_counters[i];
212 
213 		pmc_stop_counter(pmc);
214 		pmc->counter = pmc->eventsel = 0;
215 	}
216 }
217 
218 struct kvm_pmu_ops amd_pmu_ops __initdata = {
219 	.hw_event_available = amd_hw_event_available,
220 	.pmc_is_enabled = amd_pmc_is_enabled,
221 	.pmc_idx_to_pmc = amd_pmc_idx_to_pmc,
222 	.rdpmc_ecx_to_pmc = amd_rdpmc_ecx_to_pmc,
223 	.msr_idx_to_pmc = amd_msr_idx_to_pmc,
224 	.is_valid_rdpmc_ecx = amd_is_valid_rdpmc_ecx,
225 	.is_valid_msr = amd_is_valid_msr,
226 	.get_msr = amd_pmu_get_msr,
227 	.set_msr = amd_pmu_set_msr,
228 	.refresh = amd_pmu_refresh,
229 	.init = amd_pmu_init,
230 	.reset = amd_pmu_reset,
231 };
232