Lines Matching refs:pmu

71 static void reprogram_fixed_counters(struct kvm_pmu *pmu, u64 data)  in reprogram_fixed_counters()  argument
74 u64 old_fixed_ctr_ctrl = pmu->fixed_ctr_ctrl; in reprogram_fixed_counters()
77 pmu->fixed_ctr_ctrl = data; in reprogram_fixed_counters()
78 for (i = 0; i < pmu->nr_arch_fixed_counters; i++) { in reprogram_fixed_counters()
85 pmc = get_fixed_pmc(pmu, MSR_CORE_PERF_FIXED_CTR0 + i); in reprogram_fixed_counters()
87 __set_bit(INTEL_PMC_IDX_FIXED + i, pmu->pmc_in_use); in reprogram_fixed_counters()
92 static struct kvm_pmc *intel_pmc_idx_to_pmc(struct kvm_pmu *pmu, int pmc_idx) in intel_pmc_idx_to_pmc() argument
95 return get_gp_pmc(pmu, MSR_P6_EVNTSEL0 + pmc_idx, in intel_pmc_idx_to_pmc()
100 return get_fixed_pmc(pmu, idx + MSR_CORE_PERF_FIXED_CTR0); in intel_pmc_idx_to_pmc()
106 struct kvm_pmu *pmu = pmc_to_pmu(pmc); in intel_hw_event_available() local
122 return pmu->available_event_types & BIT(i); in intel_hw_event_available()
130 struct kvm_pmu *pmu = vcpu_to_pmu(vcpu); in intel_is_valid_rdpmc_ecx() local
135 return fixed ? idx < pmu->nr_arch_fixed_counters in intel_is_valid_rdpmc_ecx()
136 : idx < pmu->nr_arch_gp_counters; in intel_is_valid_rdpmc_ecx()
142 struct kvm_pmu *pmu = vcpu_to_pmu(vcpu); in intel_rdpmc_ecx_to_pmc() local
149 counters = pmu->fixed_counters; in intel_rdpmc_ecx_to_pmc()
150 num_counters = pmu->nr_arch_fixed_counters; in intel_rdpmc_ecx_to_pmc()
152 counters = pmu->gp_counters; in intel_rdpmc_ecx_to_pmc()
153 num_counters = pmu->nr_arch_gp_counters; in intel_rdpmc_ecx_to_pmc()
157 *mask &= pmu->counter_bitmask[fixed ? KVM_PMC_FIXED : KVM_PMC_GP]; in intel_rdpmc_ecx_to_pmc()
174 static inline struct kvm_pmc *get_fw_gp_pmc(struct kvm_pmu *pmu, u32 msr) in get_fw_gp_pmc() argument
176 if (!fw_writes_is_enabled(pmu_to_vcpu(pmu))) in get_fw_gp_pmc()
179 return get_gp_pmc(pmu, msr, MSR_IA32_PMC0); in get_fw_gp_pmc()
202 struct kvm_pmu *pmu = vcpu_to_pmu(vcpu); in intel_is_valid_msr() local
208 return kvm_pmu_has_perf_global_ctrl(pmu); in intel_is_valid_msr()
221 ret = get_gp_pmc(pmu, msr, MSR_IA32_PERFCTR0) || in intel_is_valid_msr()
222 get_gp_pmc(pmu, msr, MSR_P6_EVNTSEL0) || in intel_is_valid_msr()
223 get_fixed_pmc(pmu, msr) || get_fw_gp_pmc(pmu, msr) || in intel_is_valid_msr()
233 struct kvm_pmu *pmu = vcpu_to_pmu(vcpu); in intel_msr_idx_to_pmc() local
236 pmc = get_fixed_pmc(pmu, msr); in intel_msr_idx_to_pmc()
237 pmc = pmc ? pmc : get_gp_pmc(pmu, msr, MSR_P6_EVNTSEL0); in intel_msr_idx_to_pmc()
238 pmc = pmc ? pmc : get_gp_pmc(pmu, msr, MSR_IA32_PERFCTR0); in intel_msr_idx_to_pmc()
257 struct kvm_pmu *pmu = vcpu_to_pmu(vcpu); in intel_pmu_create_guest_lbr_event() local
289 __set_bit(INTEL_PMC_IDX_FIXED_VLBR, pmu->pmc_in_use); in intel_pmu_create_guest_lbr_event()
301 pmu->event_count++; in intel_pmu_create_guest_lbr_event()
302 __set_bit(INTEL_PMC_IDX_FIXED_VLBR, pmu->pmc_in_use); in intel_pmu_create_guest_lbr_event()
350 struct kvm_pmu *pmu = vcpu_to_pmu(vcpu); in intel_pmu_get_msr() local
356 msr_info->data = pmu->fixed_ctr_ctrl; in intel_pmu_get_msr()
359 msr_info->data = pmu->pebs_enable; in intel_pmu_get_msr()
362 msr_info->data = pmu->ds_area; in intel_pmu_get_msr()
365 msr_info->data = pmu->pebs_data_cfg; in intel_pmu_get_msr()
368 if ((pmc = get_gp_pmc(pmu, msr, MSR_IA32_PERFCTR0)) || in intel_pmu_get_msr()
369 (pmc = get_gp_pmc(pmu, msr, MSR_IA32_PMC0))) { in intel_pmu_get_msr()
372 val & pmu->counter_bitmask[KVM_PMC_GP]; in intel_pmu_get_msr()
374 } else if ((pmc = get_fixed_pmc(pmu, msr))) { in intel_pmu_get_msr()
377 val & pmu->counter_bitmask[KVM_PMC_FIXED]; in intel_pmu_get_msr()
379 } else if ((pmc = get_gp_pmc(pmu, msr, MSR_P6_EVNTSEL0))) { in intel_pmu_get_msr()
393 struct kvm_pmu *pmu = vcpu_to_pmu(vcpu); in intel_pmu_set_msr() local
401 if (data & pmu->fixed_ctr_ctrl_mask) in intel_pmu_set_msr()
404 if (pmu->fixed_ctr_ctrl != data) in intel_pmu_set_msr()
405 reprogram_fixed_counters(pmu, data); in intel_pmu_set_msr()
408 if (data & pmu->pebs_enable_mask) in intel_pmu_set_msr()
411 if (pmu->pebs_enable != data) { in intel_pmu_set_msr()
412 diff = pmu->pebs_enable ^ data; in intel_pmu_set_msr()
413 pmu->pebs_enable = data; in intel_pmu_set_msr()
414 reprogram_counters(pmu, diff); in intel_pmu_set_msr()
421 pmu->ds_area = data; in intel_pmu_set_msr()
424 if (data & pmu->pebs_data_cfg_mask) in intel_pmu_set_msr()
427 pmu->pebs_data_cfg = data; in intel_pmu_set_msr()
430 if ((pmc = get_gp_pmc(pmu, msr, MSR_IA32_PERFCTR0)) || in intel_pmu_set_msr()
431 (pmc = get_gp_pmc(pmu, msr, MSR_IA32_PMC0))) { in intel_pmu_set_msr()
433 (data & ~pmu->counter_bitmask[KVM_PMC_GP])) in intel_pmu_set_msr()
442 } else if ((pmc = get_fixed_pmc(pmu, msr))) { in intel_pmu_set_msr()
446 } else if ((pmc = get_gp_pmc(pmu, msr, MSR_P6_EVNTSEL0))) { in intel_pmu_set_msr()
447 reserved_bits = pmu->reserved_bits; in intel_pmu_set_msr()
449 (pmu->raw_event_mask & HSW_IN_TX_CHECKPOINTED)) in intel_pmu_set_msr()
469 static void setup_fixed_pmc_eventsel(struct kvm_pmu *pmu) in setup_fixed_pmc_eventsel() argument
475 for (i = 0; i < pmu->nr_arch_fixed_counters; i++) { in setup_fixed_pmc_eventsel()
477 struct kvm_pmc *pmc = &pmu->fixed_counters[index]; in setup_fixed_pmc_eventsel()
487 struct kvm_pmu *pmu = vcpu_to_pmu(vcpu); in intel_pmu_refresh() local
513 pmu->version = eax.split.version_id; in intel_pmu_refresh()
514 if (!pmu->version) in intel_pmu_refresh()
517 pmu->nr_arch_gp_counters = min_t(int, eax.split.num_counters, in intel_pmu_refresh()
521 pmu->counter_bitmask[KVM_PMC_GP] = ((u64)1 << eax.split.bit_width) - 1; in intel_pmu_refresh()
524 pmu->available_event_types = ~entry->ebx & in intel_pmu_refresh()
527 if (pmu->version == 1) { in intel_pmu_refresh()
528 pmu->nr_arch_fixed_counters = 0; in intel_pmu_refresh()
530 pmu->nr_arch_fixed_counters = min_t(int, edx.split.num_counters_fixed, in intel_pmu_refresh()
534 pmu->counter_bitmask[KVM_PMC_FIXED] = in intel_pmu_refresh()
536 setup_fixed_pmc_eventsel(pmu); in intel_pmu_refresh()
539 for (i = 0; i < pmu->nr_arch_fixed_counters; i++) in intel_pmu_refresh()
540 pmu->fixed_ctr_ctrl_mask &= ~(0xbull << (i * 4)); in intel_pmu_refresh()
541 counter_mask = ~(((1ull << pmu->nr_arch_gp_counters) - 1) | in intel_pmu_refresh()
542 (((1ull << pmu->nr_arch_fixed_counters) - 1) << INTEL_PMC_IDX_FIXED)); in intel_pmu_refresh()
543 pmu->global_ctrl_mask = counter_mask; in intel_pmu_refresh()
550 pmu->global_status_mask = pmu->global_ctrl_mask in intel_pmu_refresh()
554 pmu->global_status_mask &= in intel_pmu_refresh()
561 pmu->reserved_bits ^= HSW_IN_TX; in intel_pmu_refresh()
562 pmu->raw_event_mask |= (HSW_IN_TX|HSW_IN_TX_CHECKPOINTED); in intel_pmu_refresh()
565 bitmap_set(pmu->all_valid_pmc_idx, in intel_pmu_refresh()
566 0, pmu->nr_arch_gp_counters); in intel_pmu_refresh()
567 bitmap_set(pmu->all_valid_pmc_idx, in intel_pmu_refresh()
568 INTEL_PMC_MAX_GENERIC, pmu->nr_arch_fixed_counters); in intel_pmu_refresh()
578 bitmap_set(pmu->all_valid_pmc_idx, INTEL_PMC_IDX_FIXED_VLBR, 1); in intel_pmu_refresh()
582 pmu->pebs_enable_mask = counter_mask; in intel_pmu_refresh()
583 pmu->reserved_bits &= ~ICL_EVENTSEL_ADAPTIVE; in intel_pmu_refresh()
584 for (i = 0; i < pmu->nr_arch_fixed_counters; i++) { in intel_pmu_refresh()
585 pmu->fixed_ctr_ctrl_mask &= in intel_pmu_refresh()
588 pmu->pebs_data_cfg_mask = ~0xff00000full; in intel_pmu_refresh()
590 pmu->pebs_enable_mask = in intel_pmu_refresh()
591 ~((1ull << pmu->nr_arch_gp_counters) - 1); in intel_pmu_refresh()
599 struct kvm_pmu *pmu = vcpu_to_pmu(vcpu); in intel_pmu_init() local
603 pmu->gp_counters[i].type = KVM_PMC_GP; in intel_pmu_init()
604 pmu->gp_counters[i].vcpu = vcpu; in intel_pmu_init()
605 pmu->gp_counters[i].idx = i; in intel_pmu_init()
606 pmu->gp_counters[i].current_config = 0; in intel_pmu_init()
610 pmu->fixed_counters[i].type = KVM_PMC_FIXED; in intel_pmu_init()
611 pmu->fixed_counters[i].vcpu = vcpu; in intel_pmu_init()
612 pmu->fixed_counters[i].idx = i + INTEL_PMC_IDX_FIXED; in intel_pmu_init()
613 pmu->fixed_counters[i].current_config = 0; in intel_pmu_init()
705 struct kvm_pmu *pmu = vcpu_to_pmu(vcpu); in vmx_passthrough_lbr_msrs() local
712 if (test_bit(INTEL_PMC_IDX_FIXED_VLBR, pmu->pmc_in_use)) in vmx_passthrough_lbr_msrs()
719 __clear_bit(INTEL_PMC_IDX_FIXED_VLBR, pmu->pmc_in_use); in vmx_passthrough_lbr_msrs()
736 void intel_pmu_cross_mapped_check(struct kvm_pmu *pmu) in intel_pmu_cross_mapped_check() argument
741 for_each_set_bit(bit, (unsigned long *)&pmu->global_ctrl, in intel_pmu_cross_mapped_check()
743 pmc = intel_pmc_idx_to_pmc(pmu, bit); in intel_pmu_cross_mapped_check()
755 pmu->host_cross_mapped_mask |= BIT_ULL(hw_idx); in intel_pmu_cross_mapped_check()