Lines Matching refs:pmu

88 	struct pmu *pmu; /* for custom pmu ops */  member
122 struct pmu pmu; member
151 struct intel_uncore_pmu *pmu; member
220 return container_of(dev_get_drvdata(dev), struct intel_uncore_pmu, pmu); in dev_to_uncore_pmu()
260 if (offset < box->pmu->type->mmio_map_size) in uncore_mmio_is_valid_offset()
264 offset, box->pmu->type->name); in uncore_mmio_is_valid_offset()
272 return box->pmu->type->box_ctl + in uncore_mmio_box_ctl()
273 box->pmu->type->mmio_offset * box->pmu->pmu_idx; in uncore_mmio_box_ctl()
278 return box->pmu->type->box_ctl; in uncore_pci_box_ctl()
283 return box->pmu->type->fixed_ctl; in uncore_pci_fixed_ctl()
288 return box->pmu->type->fixed_ctr; in uncore_pci_fixed_ctr()
295 return idx * 8 + box->pmu->type->event_ctl; in uncore_pci_event_ctl()
297 return idx * 4 + box->pmu->type->event_ctl; in uncore_pci_event_ctl()
303 return idx * 8 + box->pmu->type->perf_ctr; in uncore_pci_perf_ctr()
308 struct intel_uncore_pmu *pmu = box->pmu; in uncore_msr_box_offset() local
309 return pmu->type->msr_offsets ? in uncore_msr_box_offset()
310 pmu->type->msr_offsets[pmu->pmu_idx] : in uncore_msr_box_offset()
311 pmu->type->msr_offset * pmu->pmu_idx; in uncore_msr_box_offset()
316 if (!box->pmu->type->box_ctl) in uncore_msr_box_ctl()
318 return box->pmu->type->box_ctl + uncore_msr_box_offset(box); in uncore_msr_box_ctl()
323 if (!box->pmu->type->fixed_ctl) in uncore_msr_fixed_ctl()
325 return box->pmu->type->fixed_ctl + uncore_msr_box_offset(box); in uncore_msr_fixed_ctl()
330 return box->pmu->type->fixed_ctr + uncore_msr_box_offset(box); in uncore_msr_fixed_ctr()
380 struct intel_uncore_pmu *pmu = box->pmu; in uncore_freerunning_counter() local
382 return pmu->type->freerunning[type].counter_base + in uncore_freerunning_counter()
383 pmu->type->freerunning[type].counter_offset * idx + in uncore_freerunning_counter()
384 (pmu->type->freerunning[type].box_offsets ? in uncore_freerunning_counter()
385 pmu->type->freerunning[type].box_offsets[pmu->pmu_idx] : in uncore_freerunning_counter()
386 pmu->type->freerunning[type].box_offset * pmu->pmu_idx); in uncore_freerunning_counter()
394 (box->pmu->type->pair_ctr_ctl ? 2 * idx : idx); in uncore_msr_event_ctl()
396 return box->pmu->type->event_ctl + in uncore_msr_event_ctl()
397 (box->pmu->type->pair_ctr_ctl ? 2 * idx : idx) + in uncore_msr_event_ctl()
407 (box->pmu->type->pair_ctr_ctl ? 2 * idx : idx); in uncore_msr_perf_ctr()
409 return box->pmu->type->perf_ctr + in uncore_msr_perf_ctr()
410 (box->pmu->type->pair_ctr_ctl ? 2 * idx : idx) + in uncore_msr_perf_ctr()
453 return box->pmu->type->perf_ctr_bits; in uncore_perf_ctr_bits()
458 return box->pmu->type->fixed_ctr_bits; in uncore_fixed_ctr_bits()
467 return box->pmu->type->freerunning[type].bits; in uncore_freerunning_bits()
475 return box->pmu->type->freerunning[type].num_counters; in uncore_num_freerunning()
481 return box->pmu->type->num_freerunning_types; in uncore_num_freerunning_types()
496 return box->pmu->type->num_counters; in uncore_num_counters()
520 box->pmu->type->ops->disable_event(box, event); in uncore_disable_event()
526 box->pmu->type->ops->enable_event(box, event); in uncore_enable_event()
532 return box->pmu->type->ops->read_counter(box, event); in uncore_read_counter()
538 if (box->pmu->type->ops->init_box) in uncore_box_init()
539 box->pmu->type->ops->init_box(box); in uncore_box_init()
546 if (box->pmu->type->ops->exit_box) in uncore_box_exit()
547 box->pmu->type->ops->exit_box(box); in uncore_box_exit()
558 return container_of(event->pmu, struct intel_uncore_pmu, pmu); in uncore_event_to_pmu()
566 struct intel_uncore_box *uncore_pmu_to_box(struct intel_uncore_pmu *pmu, int cpu);
583 void uncore_get_alias_name(char *pmu_name, struct intel_uncore_pmu *pmu);