Lines Matching full:pmc
55 * - There are three types of index to access perf counters (PMC):
65 * 3. Global PMC Index (named pmc): pmc is an index specific to PMU
66 * code. Each pmc, stored in kvm_pmc.idx field, is unique across
68 * between pmc and perf counters is as the following:
96 static inline void __kvm_perf_overflow(struct kvm_pmc *pmc, bool in_pmi) in __kvm_perf_overflow() argument
98 struct kvm_pmu *pmu = pmc_to_pmu(pmc); in __kvm_perf_overflow()
101 if (pmc->perf_event && pmc->perf_event->attr.precise_ip) { in __kvm_perf_overflow()
116 __set_bit(pmc->idx, (unsigned long *)&pmu->global_status); in __kvm_perf_overflow()
119 if (pmc->intr && !skip_pmi) in __kvm_perf_overflow()
120 kvm_make_request(KVM_REQ_PMI, pmc->vcpu); in __kvm_perf_overflow()
127 struct kvm_pmc *pmc = perf_event->overflow_handler_context; in kvm_perf_overflow() local
134 if (test_and_set_bit(pmc->idx, pmc_to_pmu(pmc)->reprogram_pmi)) in kvm_perf_overflow()
137 __kvm_perf_overflow(pmc, true); in kvm_perf_overflow()
139 kvm_make_request(KVM_REQ_PMU, pmc->vcpu); in kvm_perf_overflow()
142 static u64 pmc_get_pebs_precise_level(struct kvm_pmc *pmc) in pmc_get_pebs_precise_level() argument
151 if ((pmc->idx == 0 && x86_match_cpu(vmx_pebs_pdist_cpu)) || in pmc_get_pebs_precise_level()
152 (pmc->idx == 32 && x86_match_cpu(vmx_pebs_pdir_cpu))) in pmc_get_pebs_precise_level()
164 static int pmc_reprogram_counter(struct kvm_pmc *pmc, u32 type, u64 config, in pmc_reprogram_counter() argument
168 struct kvm_pmu *pmu = pmc_to_pmu(pmc); in pmc_reprogram_counter()
180 bool pebs = test_bit(pmc->idx, (unsigned long *)&pmu->pebs_enable); in pmc_reprogram_counter()
182 attr.sample_period = get_sample_period(pmc, pmc->counter); in pmc_reprogram_counter()
185 guest_cpuid_is_intel(pmc->vcpu)) { in pmc_reprogram_counter()
200 attr.precise_ip = pmc_get_pebs_precise_level(pmc); in pmc_reprogram_counter()
204 kvm_perf_overflow, pmc); in pmc_reprogram_counter()
206 pr_debug_ratelimited("kvm_pmu: event creation failed %ld for pmc->idx = %d\n", in pmc_reprogram_counter()
207 PTR_ERR(event), pmc->idx); in pmc_reprogram_counter()
211 pmc->perf_event = event; in pmc_reprogram_counter()
212 pmc_to_pmu(pmc)->event_count++; in pmc_reprogram_counter()
213 pmc->is_paused = false; in pmc_reprogram_counter()
214 pmc->intr = intr || pebs; in pmc_reprogram_counter()
218 static void pmc_pause_counter(struct kvm_pmc *pmc) in pmc_pause_counter() argument
220 u64 counter = pmc->counter; in pmc_pause_counter()
222 if (!pmc->perf_event || pmc->is_paused) in pmc_pause_counter()
226 counter += perf_event_pause(pmc->perf_event, true); in pmc_pause_counter()
227 pmc->counter = counter & pmc_bitmask(pmc); in pmc_pause_counter()
228 pmc->is_paused = true; in pmc_pause_counter()
231 static bool pmc_resume_counter(struct kvm_pmc *pmc) in pmc_resume_counter() argument
233 if (!pmc->perf_event) in pmc_resume_counter()
237 if (is_sampling_event(pmc->perf_event) && in pmc_resume_counter()
238 perf_event_period(pmc->perf_event, in pmc_resume_counter()
239 get_sample_period(pmc, pmc->counter))) in pmc_resume_counter()
242 if (test_bit(pmc->idx, (unsigned long *)&pmc_to_pmu(pmc)->pebs_enable) != in pmc_resume_counter()
243 (!!pmc->perf_event->attr.precise_ip)) in pmc_resume_counter()
247 perf_event_enable(pmc->perf_event); in pmc_resume_counter()
248 pmc->is_paused = false; in pmc_resume_counter()
253 static void pmc_release_perf_event(struct kvm_pmc *pmc) in pmc_release_perf_event() argument
255 if (pmc->perf_event) { in pmc_release_perf_event()
256 perf_event_release_kernel(pmc->perf_event); in pmc_release_perf_event()
257 pmc->perf_event = NULL; in pmc_release_perf_event()
258 pmc->current_config = 0; in pmc_release_perf_event()
259 pmc_to_pmu(pmc)->event_count--; in pmc_release_perf_event()
263 static void pmc_stop_counter(struct kvm_pmc *pmc) in pmc_stop_counter() argument
265 if (pmc->perf_event) { in pmc_stop_counter()
266 pmc->counter = pmc_read_counter(pmc); in pmc_stop_counter()
267 pmc_release_perf_event(pmc); in pmc_stop_counter()
377 static bool check_pmu_event_filter(struct kvm_pmc *pmc) in check_pmu_event_filter() argument
380 struct kvm *kvm = pmc->vcpu->kvm; in check_pmu_event_filter()
386 if (pmc_is_gp(pmc)) in check_pmu_event_filter()
387 return is_gp_event_allowed(filter, pmc->eventsel); in check_pmu_event_filter()
389 return is_fixed_event_allowed(filter, pmc->idx); in check_pmu_event_filter()
392 static bool pmc_event_is_allowed(struct kvm_pmc *pmc) in pmc_event_is_allowed() argument
394 return pmc_is_globally_enabled(pmc) && pmc_speculative_in_use(pmc) && in pmc_event_is_allowed()
395 static_call(kvm_x86_pmu_hw_event_available)(pmc) && in pmc_event_is_allowed()
396 check_pmu_event_filter(pmc); in pmc_event_is_allowed()
399 static void reprogram_counter(struct kvm_pmc *pmc) in reprogram_counter() argument
401 struct kvm_pmu *pmu = pmc_to_pmu(pmc); in reprogram_counter()
402 u64 eventsel = pmc->eventsel; in reprogram_counter()
406 pmc_pause_counter(pmc); in reprogram_counter()
408 if (!pmc_event_is_allowed(pmc)) in reprogram_counter()
411 if (pmc->counter < pmc->prev_counter) in reprogram_counter()
412 __kvm_perf_overflow(pmc, false); in reprogram_counter()
417 if (pmc_is_fixed(pmc)) { in reprogram_counter()
419 pmc->idx - INTEL_PMC_IDX_FIXED); in reprogram_counter()
429 if (pmc->current_config == new_config && pmc_resume_counter(pmc)) in reprogram_counter()
432 pmc_release_perf_event(pmc); in reprogram_counter()
434 pmc->current_config = new_config; in reprogram_counter()
442 if (pmc_reprogram_counter(pmc, PERF_TYPE_RAW, in reprogram_counter()
450 clear_bit(pmc->idx, (unsigned long *)&pmc_to_pmu(pmc)->reprogram_pmi); in reprogram_counter()
451 pmc->prev_counter = 0; in reprogram_counter()
460 struct kvm_pmc *pmc = static_call(kvm_x86_pmu_pmc_idx_to_pmc)(pmu, bit); in kvm_pmu_handle_event() local
462 if (unlikely(!pmc)) { in kvm_pmu_handle_event()
467 reprogram_counter(pmc); in kvm_pmu_handle_event()
523 struct kvm_pmc *pmc; in kvm_pmu_rdpmc() local
532 pmc = static_call(kvm_x86_pmu_rdpmc_ecx_to_pmc)(vcpu, idx, &mask); in kvm_pmu_rdpmc()
533 if (!pmc) in kvm_pmu_rdpmc()
541 *data = pmc_read_counter(pmc) & mask; in kvm_pmu_rdpmc()
570 struct kvm_pmc *pmc = static_call(kvm_x86_pmu_msr_idx_to_pmc)(vcpu, msr); in kvm_pmu_mark_pmc_in_use() local
572 if (pmc) in kvm_pmu_mark_pmc_in_use()
573 __set_bit(pmc->idx, pmu->pmc_in_use); in kvm_pmu_mark_pmc_in_use()
663 struct kvm_pmc *pmc; in kvm_pmu_reset() local
671 pmc = static_call(kvm_x86_pmu_pmc_idx_to_pmc)(pmu, i); in kvm_pmu_reset()
672 if (!pmc) in kvm_pmu_reset()
675 pmc_stop_counter(pmc); in kvm_pmu_reset()
676 pmc->counter = 0; in kvm_pmu_reset()
678 if (pmc_is_gp(pmc)) in kvm_pmu_reset()
679 pmc->eventsel = 0; in kvm_pmu_reset()
750 struct kvm_pmc *pmc = NULL; in kvm_pmu_cleanup() local
760 pmc = static_call(kvm_x86_pmu_pmc_idx_to_pmc)(pmu, i); in kvm_pmu_cleanup()
762 if (pmc && pmc->perf_event && !pmc_speculative_in_use(pmc)) in kvm_pmu_cleanup()
763 pmc_stop_counter(pmc); in kvm_pmu_cleanup()
776 static void kvm_pmu_incr_counter(struct kvm_pmc *pmc) in kvm_pmu_incr_counter() argument
778 pmc->prev_counter = pmc->counter; in kvm_pmu_incr_counter()
779 pmc->counter = (pmc->counter + 1) & pmc_bitmask(pmc); in kvm_pmu_incr_counter()
780 kvm_pmu_request_counter_reprogram(pmc); in kvm_pmu_incr_counter()
783 static inline bool eventsel_match_perf_hw_id(struct kvm_pmc *pmc, in eventsel_match_perf_hw_id() argument
786 return !((pmc->eventsel ^ perf_get_hw_event_config(perf_hw_id)) & in eventsel_match_perf_hw_id()
790 static inline bool cpl_is_matched(struct kvm_pmc *pmc) in cpl_is_matched() argument
795 if (pmc_is_gp(pmc)) { in cpl_is_matched()
796 config = pmc->eventsel; in cpl_is_matched()
800 config = fixed_ctrl_field(pmc_to_pmu(pmc)->fixed_ctr_ctrl, in cpl_is_matched()
801 pmc->idx - INTEL_PMC_IDX_FIXED); in cpl_is_matched()
806 return (static_call(kvm_x86_get_cpl)(pmc->vcpu) == 0) ? select_os : select_user; in cpl_is_matched()
812 struct kvm_pmc *pmc; in kvm_pmu_trigger_event() local
816 pmc = static_call(kvm_x86_pmu_pmc_idx_to_pmc)(pmu, i); in kvm_pmu_trigger_event()
818 if (!pmc || !pmc_event_is_allowed(pmc)) in kvm_pmu_trigger_event()
822 if (eventsel_match_perf_hw_id(pmc, perf_hw_id) && cpl_is_matched(pmc)) in kvm_pmu_trigger_event()
823 kvm_pmu_incr_counter(pmc); in kvm_pmu_trigger_event()