1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * KVM PMU support for AMD 4 * 5 * Copyright 2015, Red Hat, Inc. and/or its affiliates. 6 * 7 * Author: 8 * Wei Huang <wei@redhat.com> 9 * 10 * Implementation is based on pmu_intel.c file 11 */ 12 #include <linux/types.h> 13 #include <linux/kvm_host.h> 14 #include <linux/perf_event.h> 15 #include "x86.h" 16 #include "cpuid.h" 17 #include "lapic.h" 18 #include "pmu.h" 19 20 enum pmu_type { 21 PMU_TYPE_COUNTER = 0, 22 PMU_TYPE_EVNTSEL, 23 }; 24 25 enum index { 26 INDEX_ZERO = 0, 27 INDEX_ONE, 28 INDEX_TWO, 29 INDEX_THREE, 30 INDEX_FOUR, 31 INDEX_FIVE, 32 INDEX_ERROR, 33 }; 34 35 /* duplicated from amd_perfmon_event_map, K7 and above should work. */ 36 static struct kvm_event_hw_type_mapping amd_event_mapping[] = { 37 [0] = { 0x76, 0x00, PERF_COUNT_HW_CPU_CYCLES }, 38 [1] = { 0xc0, 0x00, PERF_COUNT_HW_INSTRUCTIONS }, 39 [2] = { 0x7d, 0x07, PERF_COUNT_HW_CACHE_REFERENCES }, 40 [3] = { 0x7e, 0x07, PERF_COUNT_HW_CACHE_MISSES }, 41 [4] = { 0xc2, 0x00, PERF_COUNT_HW_BRANCH_INSTRUCTIONS }, 42 [5] = { 0xc3, 0x00, PERF_COUNT_HW_BRANCH_MISSES }, 43 [6] = { 0xd0, 0x00, PERF_COUNT_HW_STALLED_CYCLES_FRONTEND }, 44 [7] = { 0xd1, 0x00, PERF_COUNT_HW_STALLED_CYCLES_BACKEND }, 45 }; 46 47 static unsigned int get_msr_base(struct kvm_pmu *pmu, enum pmu_type type) 48 { 49 struct kvm_vcpu *vcpu = pmu_to_vcpu(pmu); 50 51 if (guest_cpuid_has(vcpu, X86_FEATURE_PERFCTR_CORE)) { 52 if (type == PMU_TYPE_COUNTER) 53 return MSR_F15H_PERF_CTR; 54 else 55 return MSR_F15H_PERF_CTL; 56 } else { 57 if (type == PMU_TYPE_COUNTER) 58 return MSR_K7_PERFCTR0; 59 else 60 return MSR_K7_EVNTSEL0; 61 } 62 } 63 64 static enum index msr_to_index(u32 msr) 65 { 66 switch (msr) { 67 case MSR_F15H_PERF_CTL0: 68 case MSR_F15H_PERF_CTR0: 69 case MSR_K7_EVNTSEL0: 70 case MSR_K7_PERFCTR0: 71 return INDEX_ZERO; 72 case MSR_F15H_PERF_CTL1: 73 case MSR_F15H_PERF_CTR1: 74 case MSR_K7_EVNTSEL1: 75 case MSR_K7_PERFCTR1: 76 return INDEX_ONE; 77 case MSR_F15H_PERF_CTL2: 78 case MSR_F15H_PERF_CTR2: 79 case MSR_K7_EVNTSEL2: 80 case MSR_K7_PERFCTR2: 81 return INDEX_TWO; 82 case MSR_F15H_PERF_CTL3: 83 case MSR_F15H_PERF_CTR3: 84 case MSR_K7_EVNTSEL3: 85 case MSR_K7_PERFCTR3: 86 return INDEX_THREE; 87 case MSR_F15H_PERF_CTL4: 88 case MSR_F15H_PERF_CTR4: 89 return INDEX_FOUR; 90 case MSR_F15H_PERF_CTL5: 91 case MSR_F15H_PERF_CTR5: 92 return INDEX_FIVE; 93 default: 94 return INDEX_ERROR; 95 } 96 } 97 98 static inline struct kvm_pmc *get_gp_pmc_amd(struct kvm_pmu *pmu, u32 msr, 99 enum pmu_type type) 100 { 101 struct kvm_vcpu *vcpu = pmu_to_vcpu(pmu); 102 103 switch (msr) { 104 case MSR_F15H_PERF_CTL0: 105 case MSR_F15H_PERF_CTL1: 106 case MSR_F15H_PERF_CTL2: 107 case MSR_F15H_PERF_CTL3: 108 case MSR_F15H_PERF_CTL4: 109 case MSR_F15H_PERF_CTL5: 110 if (!guest_cpuid_has(vcpu, X86_FEATURE_PERFCTR_CORE)) 111 return NULL; 112 fallthrough; 113 case MSR_K7_EVNTSEL0 ... MSR_K7_EVNTSEL3: 114 if (type != PMU_TYPE_EVNTSEL) 115 return NULL; 116 break; 117 case MSR_F15H_PERF_CTR0: 118 case MSR_F15H_PERF_CTR1: 119 case MSR_F15H_PERF_CTR2: 120 case MSR_F15H_PERF_CTR3: 121 case MSR_F15H_PERF_CTR4: 122 case MSR_F15H_PERF_CTR5: 123 if (!guest_cpuid_has(vcpu, X86_FEATURE_PERFCTR_CORE)) 124 return NULL; 125 fallthrough; 126 case MSR_K7_PERFCTR0 ... MSR_K7_PERFCTR3: 127 if (type != PMU_TYPE_COUNTER) 128 return NULL; 129 break; 130 default: 131 return NULL; 132 } 133 134 return &pmu->gp_counters[msr_to_index(msr)]; 135 } 136 137 static unsigned amd_find_arch_event(struct kvm_pmu *pmu, 138 u8 event_select, 139 u8 unit_mask) 140 { 141 int i; 142 143 for (i = 0; i < ARRAY_SIZE(amd_event_mapping); i++) 144 if (amd_event_mapping[i].eventsel == event_select 145 && amd_event_mapping[i].unit_mask == unit_mask) 146 break; 147 148 if (i == ARRAY_SIZE(amd_event_mapping)) 149 return PERF_COUNT_HW_MAX; 150 151 return amd_event_mapping[i].event_type; 152 } 153 154 /* return PERF_COUNT_HW_MAX as AMD doesn't have fixed events */ 155 static unsigned amd_find_fixed_event(int idx) 156 { 157 return PERF_COUNT_HW_MAX; 158 } 159 160 /* check if a PMC is enabled by comparing it against global_ctrl bits. Because 161 * AMD CPU doesn't have global_ctrl MSR, all PMCs are enabled (return TRUE). 162 */ 163 static bool amd_pmc_is_enabled(struct kvm_pmc *pmc) 164 { 165 return true; 166 } 167 168 static struct kvm_pmc *amd_pmc_idx_to_pmc(struct kvm_pmu *pmu, int pmc_idx) 169 { 170 unsigned int base = get_msr_base(pmu, PMU_TYPE_COUNTER); 171 struct kvm_vcpu *vcpu = pmu_to_vcpu(pmu); 172 173 if (guest_cpuid_has(vcpu, X86_FEATURE_PERFCTR_CORE)) { 174 /* 175 * The idx is contiguous. The MSRs are not. The counter MSRs 176 * are interleaved with the event select MSRs. 177 */ 178 pmc_idx *= 2; 179 } 180 181 return get_gp_pmc_amd(pmu, base + pmc_idx, PMU_TYPE_COUNTER); 182 } 183 184 static bool amd_is_valid_rdpmc_ecx(struct kvm_vcpu *vcpu, unsigned int idx) 185 { 186 struct kvm_pmu *pmu = vcpu_to_pmu(vcpu); 187 188 idx &= ~(3u << 30); 189 190 return idx < pmu->nr_arch_gp_counters; 191 } 192 193 /* idx is the ECX register of RDPMC instruction */ 194 static struct kvm_pmc *amd_rdpmc_ecx_to_pmc(struct kvm_vcpu *vcpu, 195 unsigned int idx, u64 *mask) 196 { 197 struct kvm_pmu *pmu = vcpu_to_pmu(vcpu); 198 struct kvm_pmc *counters; 199 200 idx &= ~(3u << 30); 201 if (idx >= pmu->nr_arch_gp_counters) 202 return NULL; 203 counters = pmu->gp_counters; 204 205 return &counters[idx]; 206 } 207 208 static bool amd_is_valid_msr(struct kvm_vcpu *vcpu, u32 msr) 209 { 210 /* All MSRs refer to exactly one PMC, so msr_idx_to_pmc is enough. */ 211 return false; 212 } 213 214 static struct kvm_pmc *amd_msr_idx_to_pmc(struct kvm_vcpu *vcpu, u32 msr) 215 { 216 struct kvm_pmu *pmu = vcpu_to_pmu(vcpu); 217 struct kvm_pmc *pmc; 218 219 pmc = get_gp_pmc_amd(pmu, msr, PMU_TYPE_COUNTER); 220 pmc = pmc ? pmc : get_gp_pmc_amd(pmu, msr, PMU_TYPE_EVNTSEL); 221 222 return pmc; 223 } 224 225 static int amd_pmu_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info) 226 { 227 struct kvm_pmu *pmu = vcpu_to_pmu(vcpu); 228 struct kvm_pmc *pmc; 229 u32 msr = msr_info->index; 230 231 /* MSR_PERFCTRn */ 232 pmc = get_gp_pmc_amd(pmu, msr, PMU_TYPE_COUNTER); 233 if (pmc) { 234 msr_info->data = pmc_read_counter(pmc); 235 return 0; 236 } 237 /* MSR_EVNTSELn */ 238 pmc = get_gp_pmc_amd(pmu, msr, PMU_TYPE_EVNTSEL); 239 if (pmc) { 240 msr_info->data = pmc->eventsel; 241 return 0; 242 } 243 244 return 1; 245 } 246 247 static int amd_pmu_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info) 248 { 249 struct kvm_pmu *pmu = vcpu_to_pmu(vcpu); 250 struct kvm_pmc *pmc; 251 u32 msr = msr_info->index; 252 u64 data = msr_info->data; 253 254 /* MSR_PERFCTRn */ 255 pmc = get_gp_pmc_amd(pmu, msr, PMU_TYPE_COUNTER); 256 if (pmc) { 257 pmc->counter += data - pmc_read_counter(pmc); 258 return 0; 259 } 260 /* MSR_EVNTSELn */ 261 pmc = get_gp_pmc_amd(pmu, msr, PMU_TYPE_EVNTSEL); 262 if (pmc) { 263 if (data == pmc->eventsel) 264 return 0; 265 if (!(data & pmu->reserved_bits)) { 266 reprogram_gp_counter(pmc, data); 267 return 0; 268 } 269 } 270 271 return 1; 272 } 273 274 static void amd_pmu_refresh(struct kvm_vcpu *vcpu) 275 { 276 struct kvm_pmu *pmu = vcpu_to_pmu(vcpu); 277 278 if (guest_cpuid_has(vcpu, X86_FEATURE_PERFCTR_CORE)) 279 pmu->nr_arch_gp_counters = AMD64_NUM_COUNTERS_CORE; 280 else 281 pmu->nr_arch_gp_counters = AMD64_NUM_COUNTERS; 282 283 pmu->counter_bitmask[KVM_PMC_GP] = ((u64)1 << 48) - 1; 284 pmu->reserved_bits = 0xfffffff000280000ull; 285 pmu->version = 1; 286 /* not applicable to AMD; but clean them to prevent any fall out */ 287 pmu->counter_bitmask[KVM_PMC_FIXED] = 0; 288 pmu->nr_arch_fixed_counters = 0; 289 pmu->global_status = 0; 290 bitmap_set(pmu->all_valid_pmc_idx, 0, pmu->nr_arch_gp_counters); 291 } 292 293 static void amd_pmu_init(struct kvm_vcpu *vcpu) 294 { 295 struct kvm_pmu *pmu = vcpu_to_pmu(vcpu); 296 int i; 297 298 BUILD_BUG_ON(AMD64_NUM_COUNTERS_CORE > INTEL_PMC_MAX_GENERIC); 299 300 for (i = 0; i < AMD64_NUM_COUNTERS_CORE ; i++) { 301 pmu->gp_counters[i].type = KVM_PMC_GP; 302 pmu->gp_counters[i].vcpu = vcpu; 303 pmu->gp_counters[i].idx = i; 304 pmu->gp_counters[i].current_config = 0; 305 } 306 } 307 308 static void amd_pmu_reset(struct kvm_vcpu *vcpu) 309 { 310 struct kvm_pmu *pmu = vcpu_to_pmu(vcpu); 311 int i; 312 313 for (i = 0; i < AMD64_NUM_COUNTERS_CORE; i++) { 314 struct kvm_pmc *pmc = &pmu->gp_counters[i]; 315 316 pmc_stop_counter(pmc); 317 pmc->counter = pmc->eventsel = 0; 318 } 319 } 320 321 struct kvm_pmu_ops amd_pmu_ops = { 322 .find_arch_event = amd_find_arch_event, 323 .find_fixed_event = amd_find_fixed_event, 324 .pmc_is_enabled = amd_pmc_is_enabled, 325 .pmc_idx_to_pmc = amd_pmc_idx_to_pmc, 326 .rdpmc_ecx_to_pmc = amd_rdpmc_ecx_to_pmc, 327 .msr_idx_to_pmc = amd_msr_idx_to_pmc, 328 .is_valid_rdpmc_ecx = amd_is_valid_rdpmc_ecx, 329 .is_valid_msr = amd_is_valid_msr, 330 .get_msr = amd_pmu_get_msr, 331 .set_msr = amd_pmu_set_msr, 332 .refresh = amd_pmu_refresh, 333 .init = amd_pmu_init, 334 .reset = amd_pmu_reset, 335 }; 336