1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * KVM PMU support for Intel CPUs 4 * 5 * Copyright 2011 Red Hat, Inc. and/or its affiliates. 6 * 7 * Authors: 8 * Avi Kivity <avi@redhat.com> 9 * Gleb Natapov <gleb@redhat.com> 10 */ 11 #include <linux/types.h> 12 #include <linux/kvm_host.h> 13 #include <linux/perf_event.h> 14 #include <asm/perf_event.h> 15 #include "x86.h" 16 #include "cpuid.h" 17 #include "lapic.h" 18 #include "nested.h" 19 #include "pmu.h" 20 21 #define MSR_PMC_FULL_WIDTH_BIT (MSR_IA32_PMC0 - MSR_IA32_PERFCTR0) 22 23 static struct kvm_event_hw_type_mapping intel_arch_events[] = { 24 /* Index must match CPUID 0x0A.EBX bit vector */ 25 [0] = { 0x3c, 0x00, PERF_COUNT_HW_CPU_CYCLES }, 26 [1] = { 0xc0, 0x00, PERF_COUNT_HW_INSTRUCTIONS }, 27 [2] = { 0x3c, 0x01, PERF_COUNT_HW_BUS_CYCLES }, 28 [3] = { 0x2e, 0x4f, PERF_COUNT_HW_CACHE_REFERENCES }, 29 [4] = { 0x2e, 0x41, PERF_COUNT_HW_CACHE_MISSES }, 30 [5] = { 0xc4, 0x00, PERF_COUNT_HW_BRANCH_INSTRUCTIONS }, 31 [6] = { 0xc5, 0x00, PERF_COUNT_HW_BRANCH_MISSES }, 32 [7] = { 0x00, 0x03, PERF_COUNT_HW_REF_CPU_CYCLES }, 33 }; 34 35 /* mapping between fixed pmc index and intel_arch_events array */ 36 static int fixed_pmc_events[] = {1, 0, 7}; 37 38 static void reprogram_fixed_counters(struct kvm_pmu *pmu, u64 data) 39 { 40 int i; 41 42 for (i = 0; i < pmu->nr_arch_fixed_counters; i++) { 43 u8 new_ctrl = fixed_ctrl_field(data, i); 44 u8 old_ctrl = fixed_ctrl_field(pmu->fixed_ctr_ctrl, i); 45 struct kvm_pmc *pmc; 46 47 pmc = get_fixed_pmc(pmu, MSR_CORE_PERF_FIXED_CTR0 + i); 48 49 if (old_ctrl == new_ctrl) 50 continue; 51 52 __set_bit(INTEL_PMC_IDX_FIXED + i, pmu->pmc_in_use); 53 reprogram_fixed_counter(pmc, new_ctrl, i); 54 } 55 56 pmu->fixed_ctr_ctrl = data; 57 } 58 59 /* function is called when global control register has been updated. */ 60 static void global_ctrl_changed(struct kvm_pmu *pmu, u64 data) 61 { 62 int bit; 63 u64 diff = pmu->global_ctrl ^ data; 64 65 pmu->global_ctrl = data; 66 67 for_each_set_bit(bit, (unsigned long *)&diff, X86_PMC_IDX_MAX) 68 reprogram_counter(pmu, bit); 69 } 70 71 static unsigned intel_find_arch_event(struct kvm_pmu *pmu, 72 u8 event_select, 73 u8 unit_mask) 74 { 75 int i; 76 77 for (i = 0; i < ARRAY_SIZE(intel_arch_events); i++) 78 if (intel_arch_events[i].eventsel == event_select 79 && intel_arch_events[i].unit_mask == unit_mask 80 && (pmu->available_event_types & (1 << i))) 81 break; 82 83 if (i == ARRAY_SIZE(intel_arch_events)) 84 return PERF_COUNT_HW_MAX; 85 86 return intel_arch_events[i].event_type; 87 } 88 89 static unsigned intel_find_fixed_event(int idx) 90 { 91 u32 event; 92 size_t size = ARRAY_SIZE(fixed_pmc_events); 93 94 if (idx >= size) 95 return PERF_COUNT_HW_MAX; 96 97 event = fixed_pmc_events[array_index_nospec(idx, size)]; 98 return intel_arch_events[event].event_type; 99 } 100 101 /* check if a PMC is enabled by comparing it with globl_ctrl bits. */ 102 static bool intel_pmc_is_enabled(struct kvm_pmc *pmc) 103 { 104 struct kvm_pmu *pmu = pmc_to_pmu(pmc); 105 106 return test_bit(pmc->idx, (unsigned long *)&pmu->global_ctrl); 107 } 108 109 static struct kvm_pmc *intel_pmc_idx_to_pmc(struct kvm_pmu *pmu, int pmc_idx) 110 { 111 if (pmc_idx < INTEL_PMC_IDX_FIXED) 112 return get_gp_pmc(pmu, MSR_P6_EVNTSEL0 + pmc_idx, 113 MSR_P6_EVNTSEL0); 114 else { 115 u32 idx = pmc_idx - INTEL_PMC_IDX_FIXED; 116 117 return get_fixed_pmc(pmu, idx + MSR_CORE_PERF_FIXED_CTR0); 118 } 119 } 120 121 static bool intel_is_valid_rdpmc_ecx(struct kvm_vcpu *vcpu, unsigned int idx) 122 { 123 struct kvm_pmu *pmu = vcpu_to_pmu(vcpu); 124 bool fixed = idx & (1u << 30); 125 126 idx &= ~(3u << 30); 127 128 return fixed ? idx < pmu->nr_arch_fixed_counters 129 : idx < pmu->nr_arch_gp_counters; 130 } 131 132 static struct kvm_pmc *intel_rdpmc_ecx_to_pmc(struct kvm_vcpu *vcpu, 133 unsigned int idx, u64 *mask) 134 { 135 struct kvm_pmu *pmu = vcpu_to_pmu(vcpu); 136 bool fixed = idx & (1u << 30); 137 struct kvm_pmc *counters; 138 unsigned int num_counters; 139 140 idx &= ~(3u << 30); 141 if (fixed) { 142 counters = pmu->fixed_counters; 143 num_counters = pmu->nr_arch_fixed_counters; 144 } else { 145 counters = pmu->gp_counters; 146 num_counters = pmu->nr_arch_gp_counters; 147 } 148 if (idx >= num_counters) 149 return NULL; 150 *mask &= pmu->counter_bitmask[fixed ? KVM_PMC_FIXED : KVM_PMC_GP]; 151 return &counters[array_index_nospec(idx, num_counters)]; 152 } 153 154 static inline u64 vcpu_get_perf_capabilities(struct kvm_vcpu *vcpu) 155 { 156 if (!guest_cpuid_has(vcpu, X86_FEATURE_PDCM)) 157 return 0; 158 159 return vcpu->arch.perf_capabilities; 160 } 161 162 static inline bool fw_writes_is_enabled(struct kvm_vcpu *vcpu) 163 { 164 return (vcpu_get_perf_capabilities(vcpu) & PMU_CAP_FW_WRITES) != 0; 165 } 166 167 static inline struct kvm_pmc *get_fw_gp_pmc(struct kvm_pmu *pmu, u32 msr) 168 { 169 if (!fw_writes_is_enabled(pmu_to_vcpu(pmu))) 170 return NULL; 171 172 return get_gp_pmc(pmu, msr, MSR_IA32_PMC0); 173 } 174 175 bool intel_pmu_lbr_is_compatible(struct kvm_vcpu *vcpu) 176 { 177 /* 178 * As a first step, a guest could only enable LBR feature if its 179 * cpu model is the same as the host because the LBR registers 180 * would be pass-through to the guest and they're model specific. 181 */ 182 return boot_cpu_data.x86_model == guest_cpuid_model(vcpu); 183 } 184 185 bool intel_pmu_lbr_is_enabled(struct kvm_vcpu *vcpu) 186 { 187 struct x86_pmu_lbr *lbr = vcpu_to_lbr_records(vcpu); 188 189 return lbr->nr && (vcpu_get_perf_capabilities(vcpu) & PMU_CAP_LBR_FMT); 190 } 191 192 static bool intel_pmu_is_valid_lbr_msr(struct kvm_vcpu *vcpu, u32 index) 193 { 194 struct x86_pmu_lbr *records = vcpu_to_lbr_records(vcpu); 195 bool ret = false; 196 197 if (!intel_pmu_lbr_is_enabled(vcpu)) 198 return ret; 199 200 ret = (index == MSR_LBR_SELECT) || (index == MSR_LBR_TOS) || 201 (index >= records->from && index < records->from + records->nr) || 202 (index >= records->to && index < records->to + records->nr); 203 204 if (!ret && records->info) 205 ret = (index >= records->info && index < records->info + records->nr); 206 207 return ret; 208 } 209 210 static bool intel_is_valid_msr(struct kvm_vcpu *vcpu, u32 msr) 211 { 212 struct kvm_pmu *pmu = vcpu_to_pmu(vcpu); 213 int ret; 214 215 switch (msr) { 216 case MSR_CORE_PERF_FIXED_CTR_CTRL: 217 case MSR_CORE_PERF_GLOBAL_STATUS: 218 case MSR_CORE_PERF_GLOBAL_CTRL: 219 case MSR_CORE_PERF_GLOBAL_OVF_CTRL: 220 ret = pmu->version > 1; 221 break; 222 default: 223 ret = get_gp_pmc(pmu, msr, MSR_IA32_PERFCTR0) || 224 get_gp_pmc(pmu, msr, MSR_P6_EVNTSEL0) || 225 get_fixed_pmc(pmu, msr) || get_fw_gp_pmc(pmu, msr) || 226 intel_pmu_is_valid_lbr_msr(vcpu, msr); 227 break; 228 } 229 230 return ret; 231 } 232 233 static struct kvm_pmc *intel_msr_idx_to_pmc(struct kvm_vcpu *vcpu, u32 msr) 234 { 235 struct kvm_pmu *pmu = vcpu_to_pmu(vcpu); 236 struct kvm_pmc *pmc; 237 238 pmc = get_fixed_pmc(pmu, msr); 239 pmc = pmc ? pmc : get_gp_pmc(pmu, msr, MSR_P6_EVNTSEL0); 240 pmc = pmc ? pmc : get_gp_pmc(pmu, msr, MSR_IA32_PERFCTR0); 241 242 return pmc; 243 } 244 245 static inline void intel_pmu_release_guest_lbr_event(struct kvm_vcpu *vcpu) 246 { 247 struct lbr_desc *lbr_desc = vcpu_to_lbr_desc(vcpu); 248 249 if (lbr_desc->event) { 250 perf_event_release_kernel(lbr_desc->event); 251 lbr_desc->event = NULL; 252 vcpu_to_pmu(vcpu)->event_count--; 253 } 254 } 255 256 int intel_pmu_create_guest_lbr_event(struct kvm_vcpu *vcpu) 257 { 258 struct lbr_desc *lbr_desc = vcpu_to_lbr_desc(vcpu); 259 struct kvm_pmu *pmu = vcpu_to_pmu(vcpu); 260 struct perf_event *event; 261 262 /* 263 * The perf_event_attr is constructed in the minimum efficient way: 264 * - set 'pinned = true' to make it task pinned so that if another 265 * cpu pinned event reclaims LBR, the event->oncpu will be set to -1; 266 * - set '.exclude_host = true' to record guest branches behavior; 267 * 268 * - set '.config = INTEL_FIXED_VLBR_EVENT' to indicates host perf 269 * schedule the event without a real HW counter but a fake one; 270 * check is_guest_lbr_event() and __intel_get_event_constraints(); 271 * 272 * - set 'sample_type = PERF_SAMPLE_BRANCH_STACK' and 273 * 'branch_sample_type = PERF_SAMPLE_BRANCH_CALL_STACK | 274 * PERF_SAMPLE_BRANCH_USER' to configure it as a LBR callstack 275 * event, which helps KVM to save/restore guest LBR records 276 * during host context switches and reduces quite a lot overhead, 277 * check branch_user_callstack() and intel_pmu_lbr_sched_task(); 278 */ 279 struct perf_event_attr attr = { 280 .type = PERF_TYPE_RAW, 281 .size = sizeof(attr), 282 .config = INTEL_FIXED_VLBR_EVENT, 283 .sample_type = PERF_SAMPLE_BRANCH_STACK, 284 .pinned = true, 285 .exclude_host = true, 286 .branch_sample_type = PERF_SAMPLE_BRANCH_CALL_STACK | 287 PERF_SAMPLE_BRANCH_USER, 288 }; 289 290 if (unlikely(lbr_desc->event)) { 291 __set_bit(INTEL_PMC_IDX_FIXED_VLBR, pmu->pmc_in_use); 292 return 0; 293 } 294 295 event = perf_event_create_kernel_counter(&attr, -1, 296 current, NULL, NULL); 297 if (IS_ERR(event)) { 298 pr_debug_ratelimited("%s: failed %ld\n", 299 __func__, PTR_ERR(event)); 300 return PTR_ERR(event); 301 } 302 lbr_desc->event = event; 303 pmu->event_count++; 304 __set_bit(INTEL_PMC_IDX_FIXED_VLBR, pmu->pmc_in_use); 305 return 0; 306 } 307 308 /* 309 * It's safe to access LBR msrs from guest when they have not 310 * been passthrough since the host would help restore or reset 311 * the LBR msrs records when the guest LBR event is scheduled in. 312 */ 313 static bool intel_pmu_handle_lbr_msrs_access(struct kvm_vcpu *vcpu, 314 struct msr_data *msr_info, bool read) 315 { 316 struct lbr_desc *lbr_desc = vcpu_to_lbr_desc(vcpu); 317 u32 index = msr_info->index; 318 319 if (!intel_pmu_is_valid_lbr_msr(vcpu, index)) 320 return false; 321 322 if (!lbr_desc->event && intel_pmu_create_guest_lbr_event(vcpu) < 0) 323 goto dummy; 324 325 /* 326 * Disable irq to ensure the LBR feature doesn't get reclaimed by the 327 * host at the time the value is read from the msr, and this avoids the 328 * host LBR value to be leaked to the guest. If LBR has been reclaimed, 329 * return 0 on guest reads. 330 */ 331 local_irq_disable(); 332 if (lbr_desc->event->state == PERF_EVENT_STATE_ACTIVE) { 333 if (read) 334 rdmsrl(index, msr_info->data); 335 else 336 wrmsrl(index, msr_info->data); 337 __set_bit(INTEL_PMC_IDX_FIXED_VLBR, vcpu_to_pmu(vcpu)->pmc_in_use); 338 local_irq_enable(); 339 return true; 340 } 341 clear_bit(INTEL_PMC_IDX_FIXED_VLBR, vcpu_to_pmu(vcpu)->pmc_in_use); 342 local_irq_enable(); 343 344 dummy: 345 if (read) 346 msr_info->data = 0; 347 return true; 348 } 349 350 static int intel_pmu_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info) 351 { 352 struct kvm_pmu *pmu = vcpu_to_pmu(vcpu); 353 struct kvm_pmc *pmc; 354 u32 msr = msr_info->index; 355 356 switch (msr) { 357 case MSR_CORE_PERF_FIXED_CTR_CTRL: 358 msr_info->data = pmu->fixed_ctr_ctrl; 359 return 0; 360 case MSR_CORE_PERF_GLOBAL_STATUS: 361 msr_info->data = pmu->global_status; 362 return 0; 363 case MSR_CORE_PERF_GLOBAL_CTRL: 364 msr_info->data = pmu->global_ctrl; 365 return 0; 366 case MSR_CORE_PERF_GLOBAL_OVF_CTRL: 367 msr_info->data = 0; 368 return 0; 369 default: 370 if ((pmc = get_gp_pmc(pmu, msr, MSR_IA32_PERFCTR0)) || 371 (pmc = get_gp_pmc(pmu, msr, MSR_IA32_PMC0))) { 372 u64 val = pmc_read_counter(pmc); 373 msr_info->data = 374 val & pmu->counter_bitmask[KVM_PMC_GP]; 375 return 0; 376 } else if ((pmc = get_fixed_pmc(pmu, msr))) { 377 u64 val = pmc_read_counter(pmc); 378 msr_info->data = 379 val & pmu->counter_bitmask[KVM_PMC_FIXED]; 380 return 0; 381 } else if ((pmc = get_gp_pmc(pmu, msr, MSR_P6_EVNTSEL0))) { 382 msr_info->data = pmc->eventsel; 383 return 0; 384 } else if (intel_pmu_handle_lbr_msrs_access(vcpu, msr_info, true)) 385 return 0; 386 } 387 388 return 1; 389 } 390 391 static int intel_pmu_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info) 392 { 393 struct kvm_pmu *pmu = vcpu_to_pmu(vcpu); 394 struct kvm_pmc *pmc; 395 u32 msr = msr_info->index; 396 u64 data = msr_info->data; 397 398 switch (msr) { 399 case MSR_CORE_PERF_FIXED_CTR_CTRL: 400 if (pmu->fixed_ctr_ctrl == data) 401 return 0; 402 if (!(data & 0xfffffffffffff444ull)) { 403 reprogram_fixed_counters(pmu, data); 404 return 0; 405 } 406 break; 407 case MSR_CORE_PERF_GLOBAL_STATUS: 408 if (msr_info->host_initiated) { 409 pmu->global_status = data; 410 return 0; 411 } 412 break; /* RO MSR */ 413 case MSR_CORE_PERF_GLOBAL_CTRL: 414 if (pmu->global_ctrl == data) 415 return 0; 416 if (kvm_valid_perf_global_ctrl(pmu, data)) { 417 global_ctrl_changed(pmu, data); 418 return 0; 419 } 420 break; 421 case MSR_CORE_PERF_GLOBAL_OVF_CTRL: 422 if (!(data & pmu->global_ovf_ctrl_mask)) { 423 if (!msr_info->host_initiated) 424 pmu->global_status &= ~data; 425 return 0; 426 } 427 break; 428 default: 429 if ((pmc = get_gp_pmc(pmu, msr, MSR_IA32_PERFCTR0)) || 430 (pmc = get_gp_pmc(pmu, msr, MSR_IA32_PMC0))) { 431 if ((msr & MSR_PMC_FULL_WIDTH_BIT) && 432 (data & ~pmu->counter_bitmask[KVM_PMC_GP])) 433 return 1; 434 if (!msr_info->host_initiated && 435 !(msr & MSR_PMC_FULL_WIDTH_BIT)) 436 data = (s64)(s32)data; 437 pmc->counter += data - pmc_read_counter(pmc); 438 if (pmc->perf_event && !pmc->is_paused) 439 perf_event_period(pmc->perf_event, 440 get_sample_period(pmc, data)); 441 return 0; 442 } else if ((pmc = get_fixed_pmc(pmu, msr))) { 443 pmc->counter += data - pmc_read_counter(pmc); 444 if (pmc->perf_event && !pmc->is_paused) 445 perf_event_period(pmc->perf_event, 446 get_sample_period(pmc, data)); 447 return 0; 448 } else if ((pmc = get_gp_pmc(pmu, msr, MSR_P6_EVNTSEL0))) { 449 if (data == pmc->eventsel) 450 return 0; 451 if (!(data & pmu->reserved_bits)) { 452 reprogram_gp_counter(pmc, data); 453 return 0; 454 } 455 } else if (intel_pmu_handle_lbr_msrs_access(vcpu, msr_info, false)) 456 return 0; 457 } 458 459 return 1; 460 } 461 462 static void intel_pmu_refresh(struct kvm_vcpu *vcpu) 463 { 464 struct kvm_pmu *pmu = vcpu_to_pmu(vcpu); 465 struct lbr_desc *lbr_desc = vcpu_to_lbr_desc(vcpu); 466 467 struct x86_pmu_capability x86_pmu; 468 struct kvm_cpuid_entry2 *entry; 469 union cpuid10_eax eax; 470 union cpuid10_edx edx; 471 472 pmu->nr_arch_gp_counters = 0; 473 pmu->nr_arch_fixed_counters = 0; 474 pmu->counter_bitmask[KVM_PMC_GP] = 0; 475 pmu->counter_bitmask[KVM_PMC_FIXED] = 0; 476 pmu->version = 0; 477 pmu->reserved_bits = 0xffffffff00200000ull; 478 479 entry = kvm_find_cpuid_entry(vcpu, 0xa, 0); 480 if (!entry) 481 return; 482 eax.full = entry->eax; 483 edx.full = entry->edx; 484 485 pmu->version = eax.split.version_id; 486 if (!pmu->version) 487 return; 488 489 perf_get_x86_pmu_capability(&x86_pmu); 490 491 pmu->nr_arch_gp_counters = min_t(int, eax.split.num_counters, 492 x86_pmu.num_counters_gp); 493 eax.split.bit_width = min_t(int, eax.split.bit_width, x86_pmu.bit_width_gp); 494 pmu->counter_bitmask[KVM_PMC_GP] = ((u64)1 << eax.split.bit_width) - 1; 495 eax.split.mask_length = min_t(int, eax.split.mask_length, x86_pmu.events_mask_len); 496 pmu->available_event_types = ~entry->ebx & 497 ((1ull << eax.split.mask_length) - 1); 498 499 if (pmu->version == 1) { 500 pmu->nr_arch_fixed_counters = 0; 501 } else { 502 pmu->nr_arch_fixed_counters = 503 min_t(int, edx.split.num_counters_fixed, 504 x86_pmu.num_counters_fixed); 505 edx.split.bit_width_fixed = min_t(int, 506 edx.split.bit_width_fixed, x86_pmu.bit_width_fixed); 507 pmu->counter_bitmask[KVM_PMC_FIXED] = 508 ((u64)1 << edx.split.bit_width_fixed) - 1; 509 } 510 511 pmu->global_ctrl = ((1ull << pmu->nr_arch_gp_counters) - 1) | 512 (((1ull << pmu->nr_arch_fixed_counters) - 1) << INTEL_PMC_IDX_FIXED); 513 pmu->global_ctrl_mask = ~pmu->global_ctrl; 514 pmu->global_ovf_ctrl_mask = pmu->global_ctrl_mask 515 & ~(MSR_CORE_PERF_GLOBAL_OVF_CTRL_OVF_BUF | 516 MSR_CORE_PERF_GLOBAL_OVF_CTRL_COND_CHGD); 517 if (vmx_pt_mode_is_host_guest()) 518 pmu->global_ovf_ctrl_mask &= 519 ~MSR_CORE_PERF_GLOBAL_OVF_CTRL_TRACE_TOPA_PMI; 520 521 entry = kvm_find_cpuid_entry(vcpu, 7, 0); 522 if (entry && 523 (boot_cpu_has(X86_FEATURE_HLE) || boot_cpu_has(X86_FEATURE_RTM)) && 524 (entry->ebx & (X86_FEATURE_HLE|X86_FEATURE_RTM))) 525 pmu->reserved_bits ^= HSW_IN_TX|HSW_IN_TX_CHECKPOINTED; 526 527 bitmap_set(pmu->all_valid_pmc_idx, 528 0, pmu->nr_arch_gp_counters); 529 bitmap_set(pmu->all_valid_pmc_idx, 530 INTEL_PMC_MAX_GENERIC, pmu->nr_arch_fixed_counters); 531 532 nested_vmx_pmu_entry_exit_ctls_update(vcpu); 533 534 if (intel_pmu_lbr_is_compatible(vcpu)) 535 x86_perf_get_lbr(&lbr_desc->records); 536 else 537 lbr_desc->records.nr = 0; 538 539 if (lbr_desc->records.nr) 540 bitmap_set(pmu->all_valid_pmc_idx, INTEL_PMC_IDX_FIXED_VLBR, 1); 541 } 542 543 static void intel_pmu_init(struct kvm_vcpu *vcpu) 544 { 545 int i; 546 struct kvm_pmu *pmu = vcpu_to_pmu(vcpu); 547 struct lbr_desc *lbr_desc = vcpu_to_lbr_desc(vcpu); 548 549 for (i = 0; i < INTEL_PMC_MAX_GENERIC; i++) { 550 pmu->gp_counters[i].type = KVM_PMC_GP; 551 pmu->gp_counters[i].vcpu = vcpu; 552 pmu->gp_counters[i].idx = i; 553 pmu->gp_counters[i].current_config = 0; 554 } 555 556 for (i = 0; i < INTEL_PMC_MAX_FIXED; i++) { 557 pmu->fixed_counters[i].type = KVM_PMC_FIXED; 558 pmu->fixed_counters[i].vcpu = vcpu; 559 pmu->fixed_counters[i].idx = i + INTEL_PMC_IDX_FIXED; 560 pmu->fixed_counters[i].current_config = 0; 561 } 562 563 vcpu->arch.perf_capabilities = vmx_get_perf_capabilities(); 564 lbr_desc->records.nr = 0; 565 lbr_desc->event = NULL; 566 lbr_desc->msr_passthrough = false; 567 } 568 569 static void intel_pmu_reset(struct kvm_vcpu *vcpu) 570 { 571 struct kvm_pmu *pmu = vcpu_to_pmu(vcpu); 572 struct kvm_pmc *pmc = NULL; 573 int i; 574 575 for (i = 0; i < INTEL_PMC_MAX_GENERIC; i++) { 576 pmc = &pmu->gp_counters[i]; 577 578 pmc_stop_counter(pmc); 579 pmc->counter = pmc->eventsel = 0; 580 } 581 582 for (i = 0; i < INTEL_PMC_MAX_FIXED; i++) { 583 pmc = &pmu->fixed_counters[i]; 584 585 pmc_stop_counter(pmc); 586 pmc->counter = 0; 587 } 588 589 pmu->fixed_ctr_ctrl = pmu->global_ctrl = pmu->global_status = 0; 590 591 intel_pmu_release_guest_lbr_event(vcpu); 592 } 593 594 /* 595 * Emulate LBR_On_PMI behavior for 1 < pmu.version < 4. 596 * 597 * If Freeze_LBR_On_PMI = 1, the LBR is frozen on PMI and 598 * the KVM emulates to clear the LBR bit (bit 0) in IA32_DEBUGCTL. 599 * 600 * Guest needs to re-enable LBR to resume branches recording. 601 */ 602 static void intel_pmu_legacy_freezing_lbrs_on_pmi(struct kvm_vcpu *vcpu) 603 { 604 u64 data = vmcs_read64(GUEST_IA32_DEBUGCTL); 605 606 if (data & DEBUGCTLMSR_FREEZE_LBRS_ON_PMI) { 607 data &= ~DEBUGCTLMSR_LBR; 608 vmcs_write64(GUEST_IA32_DEBUGCTL, data); 609 } 610 } 611 612 static void intel_pmu_deliver_pmi(struct kvm_vcpu *vcpu) 613 { 614 u8 version = vcpu_to_pmu(vcpu)->version; 615 616 if (!intel_pmu_lbr_is_enabled(vcpu)) 617 return; 618 619 if (version > 1 && version < 4) 620 intel_pmu_legacy_freezing_lbrs_on_pmi(vcpu); 621 } 622 623 static void vmx_update_intercept_for_lbr_msrs(struct kvm_vcpu *vcpu, bool set) 624 { 625 struct x86_pmu_lbr *lbr = vcpu_to_lbr_records(vcpu); 626 int i; 627 628 for (i = 0; i < lbr->nr; i++) { 629 vmx_set_intercept_for_msr(vcpu, lbr->from + i, MSR_TYPE_RW, set); 630 vmx_set_intercept_for_msr(vcpu, lbr->to + i, MSR_TYPE_RW, set); 631 if (lbr->info) 632 vmx_set_intercept_for_msr(vcpu, lbr->info + i, MSR_TYPE_RW, set); 633 } 634 635 vmx_set_intercept_for_msr(vcpu, MSR_LBR_SELECT, MSR_TYPE_RW, set); 636 vmx_set_intercept_for_msr(vcpu, MSR_LBR_TOS, MSR_TYPE_RW, set); 637 } 638 639 static inline void vmx_disable_lbr_msrs_passthrough(struct kvm_vcpu *vcpu) 640 { 641 struct lbr_desc *lbr_desc = vcpu_to_lbr_desc(vcpu); 642 643 if (!lbr_desc->msr_passthrough) 644 return; 645 646 vmx_update_intercept_for_lbr_msrs(vcpu, true); 647 lbr_desc->msr_passthrough = false; 648 } 649 650 static inline void vmx_enable_lbr_msrs_passthrough(struct kvm_vcpu *vcpu) 651 { 652 struct lbr_desc *lbr_desc = vcpu_to_lbr_desc(vcpu); 653 654 if (lbr_desc->msr_passthrough) 655 return; 656 657 vmx_update_intercept_for_lbr_msrs(vcpu, false); 658 lbr_desc->msr_passthrough = true; 659 } 660 661 /* 662 * Higher priority host perf events (e.g. cpu pinned) could reclaim the 663 * pmu resources (e.g. LBR) that were assigned to the guest. This is 664 * usually done via ipi calls (more details in perf_install_in_context). 665 * 666 * Before entering the non-root mode (with irq disabled here), double 667 * confirm that the pmu features enabled to the guest are not reclaimed 668 * by higher priority host events. Otherwise, disallow vcpu's access to 669 * the reclaimed features. 670 */ 671 void vmx_passthrough_lbr_msrs(struct kvm_vcpu *vcpu) 672 { 673 struct kvm_pmu *pmu = vcpu_to_pmu(vcpu); 674 struct lbr_desc *lbr_desc = vcpu_to_lbr_desc(vcpu); 675 676 if (!lbr_desc->event) { 677 vmx_disable_lbr_msrs_passthrough(vcpu); 678 if (vmcs_read64(GUEST_IA32_DEBUGCTL) & DEBUGCTLMSR_LBR) 679 goto warn; 680 if (test_bit(INTEL_PMC_IDX_FIXED_VLBR, pmu->pmc_in_use)) 681 goto warn; 682 return; 683 } 684 685 if (lbr_desc->event->state < PERF_EVENT_STATE_ACTIVE) { 686 vmx_disable_lbr_msrs_passthrough(vcpu); 687 __clear_bit(INTEL_PMC_IDX_FIXED_VLBR, pmu->pmc_in_use); 688 goto warn; 689 } else 690 vmx_enable_lbr_msrs_passthrough(vcpu); 691 692 return; 693 694 warn: 695 pr_warn_ratelimited("kvm: vcpu-%d: fail to passthrough LBR.\n", 696 vcpu->vcpu_id); 697 } 698 699 static void intel_pmu_cleanup(struct kvm_vcpu *vcpu) 700 { 701 if (!(vmcs_read64(GUEST_IA32_DEBUGCTL) & DEBUGCTLMSR_LBR)) 702 intel_pmu_release_guest_lbr_event(vcpu); 703 } 704 705 struct kvm_pmu_ops intel_pmu_ops = { 706 .find_arch_event = intel_find_arch_event, 707 .find_fixed_event = intel_find_fixed_event, 708 .pmc_is_enabled = intel_pmc_is_enabled, 709 .pmc_idx_to_pmc = intel_pmc_idx_to_pmc, 710 .rdpmc_ecx_to_pmc = intel_rdpmc_ecx_to_pmc, 711 .msr_idx_to_pmc = intel_msr_idx_to_pmc, 712 .is_valid_rdpmc_ecx = intel_is_valid_rdpmc_ecx, 713 .is_valid_msr = intel_is_valid_msr, 714 .get_msr = intel_pmu_get_msr, 715 .set_msr = intel_pmu_set_msr, 716 .refresh = intel_pmu_refresh, 717 .init = intel_pmu_init, 718 .reset = intel_pmu_reset, 719 .deliver_pmi = intel_pmu_deliver_pmi, 720 .cleanup = intel_pmu_cleanup, 721 }; 722