Lines Matching refs:box

148 u64 uncore_msr_read_counter(struct intel_uncore_box *box, struct perf_event *event)  in uncore_msr_read_counter()  argument
157 void uncore_mmio_exit_box(struct intel_uncore_box *box) in uncore_mmio_exit_box() argument
159 if (box->io_addr) in uncore_mmio_exit_box()
160 iounmap(box->io_addr); in uncore_mmio_exit_box()
163 u64 uncore_mmio_read_counter(struct intel_uncore_box *box, in uncore_mmio_read_counter() argument
166 if (!box->io_addr) in uncore_mmio_read_counter()
169 if (!uncore_mmio_is_valid_offset(box, event->hw.event_base)) in uncore_mmio_read_counter()
172 return readq(box->io_addr + event->hw.event_base); in uncore_mmio_read_counter()
179 uncore_get_constraint(struct intel_uncore_box *box, struct perf_event *event) in uncore_get_constraint() argument
193 (!uncore_box_is_fake(box) && reg1->alloc)) in uncore_get_constraint()
196 er = &box->shared_regs[reg1->idx]; in uncore_get_constraint()
208 if (!uncore_box_is_fake(box)) in uncore_get_constraint()
216 void uncore_put_constraint(struct intel_uncore_box *box, struct perf_event *event) in uncore_put_constraint() argument
229 if (uncore_box_is_fake(box) || !reg1->alloc) in uncore_put_constraint()
232 er = &box->shared_regs[reg1->idx]; in uncore_put_constraint()
237 u64 uncore_shared_reg_config(struct intel_uncore_box *box, int idx) in uncore_shared_reg_config() argument
243 er = &box->shared_regs[idx]; in uncore_shared_reg_config()
252 static void uncore_assign_hw_event(struct intel_uncore_box *box, in uncore_assign_hw_event() argument
258 hwc->last_tag = ++box->tags[idx]; in uncore_assign_hw_event()
261 hwc->event_base = uncore_fixed_ctr(box); in uncore_assign_hw_event()
262 hwc->config_base = uncore_fixed_ctl(box); in uncore_assign_hw_event()
266 hwc->config_base = uncore_event_ctl(box, hwc->idx); in uncore_assign_hw_event()
267 hwc->event_base = uncore_perf_ctr(box, hwc->idx); in uncore_assign_hw_event()
270 void uncore_perf_event_update(struct intel_uncore_box *box, struct perf_event *event) in uncore_perf_event_update() argument
276 shift = 64 - uncore_freerunning_bits(box, event); in uncore_perf_event_update()
278 shift = 64 - uncore_fixed_ctr_bits(box); in uncore_perf_event_update()
280 shift = 64 - uncore_perf_ctr_bits(box); in uncore_perf_event_update()
285 new_count = uncore_read_counter(box, event); in uncore_perf_event_update()
302 struct intel_uncore_box *box; in uncore_pmu_hrtimer() local
307 box = container_of(hrtimer, struct intel_uncore_box, hrtimer); in uncore_pmu_hrtimer()
308 if (!box->n_active || box->cpu != smp_processor_id()) in uncore_pmu_hrtimer()
320 list_for_each_entry(event, &box->active_list, active_entry) { in uncore_pmu_hrtimer()
321 uncore_perf_event_update(box, event); in uncore_pmu_hrtimer()
324 for_each_set_bit(bit, box->active_mask, UNCORE_PMC_IDX_MAX) in uncore_pmu_hrtimer()
325 uncore_perf_event_update(box, box->events[bit]); in uncore_pmu_hrtimer()
329 hrtimer_forward_now(hrtimer, ns_to_ktime(box->hrtimer_duration)); in uncore_pmu_hrtimer()
333 void uncore_pmu_start_hrtimer(struct intel_uncore_box *box) in uncore_pmu_start_hrtimer() argument
335 hrtimer_start(&box->hrtimer, ns_to_ktime(box->hrtimer_duration), in uncore_pmu_start_hrtimer()
339 void uncore_pmu_cancel_hrtimer(struct intel_uncore_box *box) in uncore_pmu_cancel_hrtimer() argument
341 hrtimer_cancel(&box->hrtimer); in uncore_pmu_cancel_hrtimer()
344 static void uncore_pmu_init_hrtimer(struct intel_uncore_box *box) in uncore_pmu_init_hrtimer() argument
346 hrtimer_init(&box->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); in uncore_pmu_init_hrtimer()
347 box->hrtimer.function = uncore_pmu_hrtimer; in uncore_pmu_init_hrtimer()
354 struct intel_uncore_box *box; in uncore_alloc_box() local
356 size = sizeof(*box) + numshared * sizeof(struct intel_uncore_extra_reg); in uncore_alloc_box()
358 box = kzalloc_node(size, GFP_KERNEL, node); in uncore_alloc_box()
359 if (!box) in uncore_alloc_box()
363 raw_spin_lock_init(&box->shared_regs[i].lock); in uncore_alloc_box()
365 uncore_pmu_init_hrtimer(box); in uncore_alloc_box()
366 box->cpu = -1; in uncore_alloc_box()
367 box->dieid = -1; in uncore_alloc_box()
370 box->hrtimer_duration = UNCORE_PMU_HRTIMER_INTERVAL; in uncore_alloc_box()
372 INIT_LIST_HEAD(&box->active_list); in uncore_alloc_box()
374 return box; in uncore_alloc_box()
383 static bool is_box_event(struct intel_uncore_box *box, struct perf_event *event) in is_box_event() argument
385 return &box->pmu->pmu == event->pmu; in is_box_event()
389 uncore_collect_events(struct intel_uncore_box *box, struct perf_event *leader, in uncore_collect_events() argument
395 max_count = box->pmu->type->num_counters; in uncore_collect_events()
396 if (box->pmu->type->fixed_ctl) in uncore_collect_events()
399 if (box->n_events >= max_count) in uncore_collect_events()
402 n = box->n_events; in uncore_collect_events()
404 if (is_box_event(box, leader)) { in uncore_collect_events()
405 box->event_list[n] = leader; in uncore_collect_events()
413 if (!is_box_event(box, event) || in uncore_collect_events()
420 box->event_list[n] = event; in uncore_collect_events()
427 uncore_get_event_constraint(struct intel_uncore_box *box, struct perf_event *event) in uncore_get_event_constraint() argument
429 struct intel_uncore_type *type = box->pmu->type; in uncore_get_event_constraint()
433 c = type->ops->get_constraint(box, event); in uncore_get_event_constraint()
451 static void uncore_put_event_constraint(struct intel_uncore_box *box, in uncore_put_event_constraint() argument
454 if (box->pmu->type->ops->put_constraint) in uncore_put_event_constraint()
455 box->pmu->type->ops->put_constraint(box, event); in uncore_put_event_constraint()
458 static int uncore_assign_events(struct intel_uncore_box *box, int assign[], int n) in uncore_assign_events() argument
468 c = uncore_get_event_constraint(box, box->event_list[i]); in uncore_assign_events()
469 box->event_constraint[i] = c; in uncore_assign_events()
476 hwc = &box->event_list[i]->hw; in uncore_assign_events()
477 c = box->event_constraint[i]; in uncore_assign_events()
497 ret = perf_assign_events(box->event_constraint, n, in uncore_assign_events()
502 uncore_put_event_constraint(box, box->event_list[i]); in uncore_assign_events()
509 struct intel_uncore_box *box = uncore_event_to_box(event); in uncore_pmu_event_start() local
522 list_add_tail(&event->active_entry, &box->active_list); in uncore_pmu_event_start()
524 uncore_read_counter(box, event)); in uncore_pmu_event_start()
525 if (box->n_active++ == 0) in uncore_pmu_event_start()
526 uncore_pmu_start_hrtimer(box); in uncore_pmu_event_start()
534 box->events[idx] = event; in uncore_pmu_event_start()
535 box->n_active++; in uncore_pmu_event_start()
536 __set_bit(idx, box->active_mask); in uncore_pmu_event_start()
538 local64_set(&event->hw.prev_count, uncore_read_counter(box, event)); in uncore_pmu_event_start()
539 uncore_enable_event(box, event); in uncore_pmu_event_start()
541 if (box->n_active == 1) in uncore_pmu_event_start()
542 uncore_pmu_start_hrtimer(box); in uncore_pmu_event_start()
547 struct intel_uncore_box *box = uncore_event_to_box(event); in uncore_pmu_event_stop() local
553 if (--box->n_active == 0) in uncore_pmu_event_stop()
554 uncore_pmu_cancel_hrtimer(box); in uncore_pmu_event_stop()
555 uncore_perf_event_update(box, event); in uncore_pmu_event_stop()
559 if (__test_and_clear_bit(hwc->idx, box->active_mask)) { in uncore_pmu_event_stop()
560 uncore_disable_event(box, event); in uncore_pmu_event_stop()
561 box->n_active--; in uncore_pmu_event_stop()
562 box->events[hwc->idx] = NULL; in uncore_pmu_event_stop()
566 if (box->n_active == 0) in uncore_pmu_event_stop()
567 uncore_pmu_cancel_hrtimer(box); in uncore_pmu_event_stop()
575 uncore_perf_event_update(box, event); in uncore_pmu_event_stop()
582 struct intel_uncore_box *box = uncore_event_to_box(event); in uncore_pmu_event_add() local
587 if (!box) in uncore_pmu_event_add()
601 ret = n = uncore_collect_events(box, event, false); in uncore_pmu_event_add()
609 ret = uncore_assign_events(box, assign, n); in uncore_pmu_event_add()
614 for (i = 0; i < box->n_events; i++) { in uncore_pmu_event_add()
615 event = box->event_list[i]; in uncore_pmu_event_add()
619 hwc->last_tag == box->tags[assign[i]]) in uncore_pmu_event_add()
633 event = box->event_list[i]; in uncore_pmu_event_add()
637 hwc->last_tag != box->tags[assign[i]]) in uncore_pmu_event_add()
638 uncore_assign_hw_event(box, event, assign[i]); in uncore_pmu_event_add()
639 else if (i < box->n_events) in uncore_pmu_event_add()
647 box->n_events = n; in uncore_pmu_event_add()
654 struct intel_uncore_box *box = uncore_event_to_box(event); in uncore_pmu_event_del() local
667 for (i = 0; i < box->n_events; i++) { in uncore_pmu_event_del()
668 if (event == box->event_list[i]) { in uncore_pmu_event_del()
669 uncore_put_event_constraint(box, event); in uncore_pmu_event_del()
671 for (++i; i < box->n_events; i++) in uncore_pmu_event_del()
672 box->event_list[i - 1] = box->event_list[i]; in uncore_pmu_event_del()
674 --box->n_events; in uncore_pmu_event_del()
685 struct intel_uncore_box *box = uncore_event_to_box(event); in uncore_pmu_event_read() local
686 uncore_perf_event_update(box, event); in uncore_pmu_event_read()
735 struct intel_uncore_box *box; in uncore_pmu_event_init() local
757 box = uncore_pmu_to_box(pmu, event->cpu); in uncore_pmu_event_init()
758 if (!box || box->cpu < 0) in uncore_pmu_event_init()
760 event->cpu = box->cpu; in uncore_pmu_event_init()
761 event->pmu_private = box; in uncore_pmu_event_init()
785 if (!check_valid_freerunning_event(box, event)) in uncore_pmu_event_init()
794 event->hw.event_base = uncore_freerunning_counter(box, event); in uncore_pmu_event_init()
799 ret = pmu->type->ops->hw_config(box, event); in uncore_pmu_event_init()
816 struct intel_uncore_box *box; in uncore_pmu_enable() local
820 box = uncore_pmu_to_box(uncore_pmu, smp_processor_id()); in uncore_pmu_enable()
821 if (!box) in uncore_pmu_enable()
825 uncore_pmu->type->ops->enable_box(box); in uncore_pmu_enable()
831 struct intel_uncore_box *box; in uncore_pmu_disable() local
835 box = uncore_pmu_to_box(uncore_pmu, smp_processor_id()); in uncore_pmu_disable()
836 if (!box) in uncore_pmu_disable()
840 uncore_pmu->type->ops->disable_box(box); in uncore_pmu_disable()
1147 struct intel_uncore_box *box; in uncore_pci_pmu_register() local
1153 box = uncore_alloc_box(type, NUMA_NO_NODE); in uncore_pci_pmu_register()
1154 if (!box) in uncore_pci_pmu_register()
1162 atomic_inc(&box->refcnt); in uncore_pci_pmu_register()
1163 box->dieid = die; in uncore_pci_pmu_register()
1164 box->pci_dev = pdev; in uncore_pci_pmu_register()
1165 box->pmu = pmu; in uncore_pci_pmu_register()
1166 uncore_box_init(box); in uncore_pci_pmu_register()
1168 pmu->boxes[die] = box; in uncore_pci_pmu_register()
1176 uncore_box_exit(box); in uncore_pci_pmu_register()
1177 kfree(box); in uncore_pci_pmu_register()
1238 struct intel_uncore_box *box = pmu->boxes[die]; in uncore_pci_pmu_unregister() local
1243 uncore_box_exit(box); in uncore_pci_pmu_unregister()
1244 kfree(box); in uncore_pci_pmu_unregister()
1249 struct intel_uncore_box *box; in uncore_pci_remove() local
1256 box = pci_get_drvdata(pdev); in uncore_pci_remove()
1257 if (!box) { in uncore_pci_remove()
1268 pmu = box->pmu; in uncore_pci_remove()
1460 struct intel_uncore_box *box; in uncore_change_type_ctx() local
1465 box = pmu->boxes[die]; in uncore_change_type_ctx()
1466 if (!box) in uncore_change_type_ctx()
1470 WARN_ON_ONCE(box->cpu != -1); in uncore_change_type_ctx()
1471 box->cpu = new_cpu; in uncore_change_type_ctx()
1475 WARN_ON_ONCE(box->cpu != old_cpu); in uncore_change_type_ctx()
1476 box->cpu = -1; in uncore_change_type_ctx()
1480 uncore_pmu_cancel_hrtimer(box); in uncore_change_type_ctx()
1482 box->cpu = new_cpu; in uncore_change_type_ctx()
1497 struct intel_uncore_box *box; in uncore_box_unref() local
1504 box = pmu->boxes[id]; in uncore_box_unref()
1505 if (box && atomic_dec_return(&box->refcnt) == 0) in uncore_box_unref()
1506 uncore_box_exit(box); in uncore_box_unref()
1542 struct intel_uncore_box *box, *tmp; in allocate_boxes() local
1555 box = uncore_alloc_box(type, cpu_to_node(cpu)); in allocate_boxes()
1556 if (!box) in allocate_boxes()
1558 box->pmu = pmu; in allocate_boxes()
1559 box->dieid = die; in allocate_boxes()
1560 list_add(&box->active_list, &allocated); in allocate_boxes()
1564 list_for_each_entry_safe(box, tmp, &allocated, active_list) { in allocate_boxes()
1565 list_del_init(&box->active_list); in allocate_boxes()
1566 box->pmu->boxes[die] = box; in allocate_boxes()
1571 list_for_each_entry_safe(box, tmp, &allocated, active_list) { in allocate_boxes()
1572 list_del_init(&box->active_list); in allocate_boxes()
1573 kfree(box); in allocate_boxes()
1583 struct intel_uncore_box *box; in uncore_box_ref() local
1594 box = pmu->boxes[id]; in uncore_box_ref()
1595 if (box && atomic_inc_return(&box->refcnt) == 1) in uncore_box_ref()
1596 uncore_box_init(box); in uncore_box_ref()