Lines Matching refs:pmu

142 static bool pmu_needs_timer(struct i915_pmu *pmu)  in pmu_needs_timer()  argument
144 struct drm_i915_private *i915 = container_of(pmu, typeof(*i915), pmu); in pmu_needs_timer()
152 enable = pmu->enable; in pmu_needs_timer()
194 static u64 read_sample(struct i915_pmu *pmu, unsigned int gt_id, int sample) in read_sample() argument
196 return pmu->sample[gt_id][sample].cur; in read_sample()
200 store_sample(struct i915_pmu *pmu, unsigned int gt_id, int sample, u64 val) in store_sample() argument
202 pmu->sample[gt_id][sample].cur = val; in store_sample()
206 add_sample_mult(struct i915_pmu *pmu, unsigned int gt_id, int sample, u32 val, u32 mul) in add_sample_mult() argument
208 pmu->sample[gt_id][sample].cur += mul_u32_u32(val, mul); in add_sample_mult()
215 struct i915_pmu *pmu = &i915->pmu; in get_rc6() local
226 spin_lock_irqsave(&pmu->lock, flags); in get_rc6()
229 store_sample(pmu, gt_id, __I915_SAMPLE_RC6, val); in get_rc6()
238 val = ktime_since_raw(pmu->sleep_last[gt_id]); in get_rc6()
239 val += read_sample(pmu, gt_id, __I915_SAMPLE_RC6); in get_rc6()
242 if (val < read_sample(pmu, gt_id, __I915_SAMPLE_RC6_LAST_REPORTED)) in get_rc6()
243 val = read_sample(pmu, gt_id, __I915_SAMPLE_RC6_LAST_REPORTED); in get_rc6()
245 store_sample(pmu, gt_id, __I915_SAMPLE_RC6_LAST_REPORTED, val); in get_rc6()
247 spin_unlock_irqrestore(&pmu->lock, flags); in get_rc6()
252 static void init_rc6(struct i915_pmu *pmu) in init_rc6() argument
254 struct drm_i915_private *i915 = container_of(pmu, typeof(*i915), pmu); in init_rc6()
264 store_sample(pmu, i, __I915_SAMPLE_RC6, val); in init_rc6()
265 store_sample(pmu, i, __I915_SAMPLE_RC6_LAST_REPORTED, in init_rc6()
267 pmu->sleep_last[i] = ktime_get_raw(); in init_rc6()
274 struct i915_pmu *pmu = &gt->i915->pmu; in park_rc6() local
276 store_sample(pmu, gt->info.id, __I915_SAMPLE_RC6, __get_rc6(gt)); in park_rc6()
277 pmu->sleep_last[gt->info.id] = ktime_get_raw(); in park_rc6()
280 static void __i915_pmu_maybe_start_timer(struct i915_pmu *pmu) in __i915_pmu_maybe_start_timer() argument
282 if (!pmu->timer_enabled && pmu_needs_timer(pmu)) { in __i915_pmu_maybe_start_timer()
283 pmu->timer_enabled = true; in __i915_pmu_maybe_start_timer()
284 pmu->timer_last = ktime_get(); in __i915_pmu_maybe_start_timer()
285 hrtimer_start_range_ns(&pmu->timer, in __i915_pmu_maybe_start_timer()
293 struct i915_pmu *pmu = &gt->i915->pmu; in i915_pmu_gt_parked() local
295 if (!pmu->base.event_init) in i915_pmu_gt_parked()
298 spin_lock_irq(&pmu->lock); in i915_pmu_gt_parked()
306 pmu->unparked &= ~BIT(gt->info.id); in i915_pmu_gt_parked()
307 if (pmu->unparked == 0) in i915_pmu_gt_parked()
308 pmu->timer_enabled = false; in i915_pmu_gt_parked()
310 spin_unlock_irq(&pmu->lock); in i915_pmu_gt_parked()
315 struct i915_pmu *pmu = &gt->i915->pmu; in i915_pmu_gt_unparked() local
317 if (!pmu->base.event_init) in i915_pmu_gt_unparked()
320 spin_lock_irq(&pmu->lock); in i915_pmu_gt_unparked()
325 if (pmu->unparked == 0) in i915_pmu_gt_unparked()
326 __i915_pmu_maybe_start_timer(pmu); in i915_pmu_gt_unparked()
328 pmu->unparked |= BIT(gt->info.id); in i915_pmu_gt_unparked()
330 spin_unlock_irq(&pmu->lock); in i915_pmu_gt_unparked()
351 struct intel_engine_pmu *pmu = &engine->pmu; in engine_sample() local
360 add_sample(&pmu->sample[I915_SAMPLE_WAIT], period_ns); in engine_sample()
362 add_sample(&pmu->sample[I915_SAMPLE_SEMA], period_ns); in engine_sample()
381 add_sample(&pmu->sample[I915_SAMPLE_BUSY], period_ns); in engine_sample()
392 if ((i915->pmu.enable & ENGINE_SAMPLE_MASK) == 0) in engines_sample()
399 if (!engine->pmu.enable) in engines_sample()
418 frequency_sampling_enabled(struct i915_pmu *pmu, unsigned int gt) in frequency_sampling_enabled() argument
420 return pmu->enable & in frequency_sampling_enabled()
430 struct i915_pmu *pmu = &i915->pmu; in frequency_sample() local
433 if (!frequency_sampling_enabled(pmu, gt_id)) in frequency_sample()
440 if (pmu->enable & config_mask(__I915_PMU_ACTUAL_FREQUENCY(gt_id))) { in frequency_sample()
456 add_sample_mult(pmu, gt_id, __I915_SAMPLE_FREQ_ACT, in frequency_sample()
460 if (pmu->enable & config_mask(__I915_PMU_REQUESTED_FREQUENCY(gt_id))) { in frequency_sample()
461 add_sample_mult(pmu, gt_id, __I915_SAMPLE_FREQ_REQ, in frequency_sample()
472 container_of(hrtimer, struct drm_i915_private, pmu.timer); in i915_sample()
473 struct i915_pmu *pmu = &i915->pmu; in i915_sample() local
479 if (!READ_ONCE(pmu->timer_enabled)) in i915_sample()
483 period_ns = ktime_to_ns(ktime_sub(now, pmu->timer_last)); in i915_sample()
484 pmu->timer_last = now; in i915_sample()
494 if (!(pmu->unparked & BIT(i))) in i915_sample()
509 container_of(event->pmu, typeof(*i915), pmu.base); in i915_pmu_event_destroy()
576 container_of(event->pmu, typeof(*i915), pmu.base); in engine_event_init()
590 container_of(event->pmu, typeof(*i915), pmu.base); in i915_pmu_event_init()
591 struct i915_pmu *pmu = &i915->pmu; in i915_pmu_event_init() local
594 if (pmu->closed) in i915_pmu_event_init()
597 if (event->attr.type != event->pmu->type) in i915_pmu_event_init()
632 container_of(event->pmu, typeof(*i915), pmu.base); in __i915_pmu_event_read()
633 struct i915_pmu *pmu = &i915->pmu; in __i915_pmu_event_read() local
653 val = engine->pmu.sample[sample].cur; in __i915_pmu_event_read()
662 div_u64(read_sample(pmu, gt_id, in __i915_pmu_event_read()
668 div_u64(read_sample(pmu, gt_id, in __i915_pmu_event_read()
673 val = READ_ONCE(pmu->irq_count); in __i915_pmu_event_read()
690 container_of(event->pmu, typeof(*i915), pmu.base); in i915_pmu_event_read()
692 struct i915_pmu *pmu = &i915->pmu; in i915_pmu_event_read() local
695 if (pmu->closed) { in i915_pmu_event_read()
712 container_of(event->pmu, typeof(*i915), pmu.base); in i915_pmu_enable()
714 struct i915_pmu *pmu = &i915->pmu; in i915_pmu_enable() local
720 spin_lock_irqsave(&pmu->lock, flags); in i915_pmu_enable()
726 BUILD_BUG_ON(ARRAY_SIZE(pmu->enable_count) != I915_PMU_MASK_BITS); in i915_pmu_enable()
727 GEM_BUG_ON(bit >= ARRAY_SIZE(pmu->enable_count)); in i915_pmu_enable()
728 GEM_BUG_ON(pmu->enable_count[bit] == ~0); in i915_pmu_enable()
730 pmu->enable |= BIT(bit); in i915_pmu_enable()
731 pmu->enable_count[bit]++; in i915_pmu_enable()
736 __i915_pmu_maybe_start_timer(pmu); in i915_pmu_enable()
750 BUILD_BUG_ON(ARRAY_SIZE(engine->pmu.enable_count) != in i915_pmu_enable()
752 BUILD_BUG_ON(ARRAY_SIZE(engine->pmu.sample) != in i915_pmu_enable()
754 GEM_BUG_ON(sample >= ARRAY_SIZE(engine->pmu.enable_count)); in i915_pmu_enable()
755 GEM_BUG_ON(sample >= ARRAY_SIZE(engine->pmu.sample)); in i915_pmu_enable()
756 GEM_BUG_ON(engine->pmu.enable_count[sample] == ~0); in i915_pmu_enable()
758 engine->pmu.enable |= BIT(sample); in i915_pmu_enable()
759 engine->pmu.enable_count[sample]++; in i915_pmu_enable()
762 spin_unlock_irqrestore(&pmu->lock, flags); in i915_pmu_enable()
776 container_of(event->pmu, typeof(*i915), pmu.base); in i915_pmu_disable()
778 struct i915_pmu *pmu = &i915->pmu; in i915_pmu_disable() local
784 spin_lock_irqsave(&pmu->lock, flags); in i915_pmu_disable()
794 GEM_BUG_ON(sample >= ARRAY_SIZE(engine->pmu.enable_count)); in i915_pmu_disable()
795 GEM_BUG_ON(sample >= ARRAY_SIZE(engine->pmu.sample)); in i915_pmu_disable()
796 GEM_BUG_ON(engine->pmu.enable_count[sample] == 0); in i915_pmu_disable()
802 if (--engine->pmu.enable_count[sample] == 0) in i915_pmu_disable()
803 engine->pmu.enable &= ~BIT(sample); in i915_pmu_disable()
806 GEM_BUG_ON(bit >= ARRAY_SIZE(pmu->enable_count)); in i915_pmu_disable()
807 GEM_BUG_ON(pmu->enable_count[bit] == 0); in i915_pmu_disable()
812 if (--pmu->enable_count[bit] == 0) { in i915_pmu_disable()
813 pmu->enable &= ~BIT(bit); in i915_pmu_disable()
814 pmu->timer_enabled &= pmu_needs_timer(pmu); in i915_pmu_disable()
817 spin_unlock_irqrestore(&pmu->lock, flags); in i915_pmu_disable()
823 container_of(event->pmu, typeof(*i915), pmu.base); in i915_pmu_event_start()
824 struct i915_pmu *pmu = &i915->pmu; in i915_pmu_event_start() local
826 if (pmu->closed) in i915_pmu_event_start()
836 container_of(event->pmu, typeof(*i915), pmu.base); in i915_pmu_event_stop()
837 struct i915_pmu *pmu = &i915->pmu; in i915_pmu_event_stop() local
839 if (pmu->closed) in i915_pmu_event_stop()
853 container_of(event->pmu, typeof(*i915), pmu.base); in i915_pmu_event_add()
854 struct i915_pmu *pmu = &i915->pmu; in i915_pmu_event_add() local
856 if (pmu->closed) in i915_pmu_event_add()
984 create_event_attributes(struct i915_pmu *pmu) in create_event_attributes() argument
986 struct drm_i915_private *i915 = container_of(pmu, typeof(*i915), pmu); in create_event_attributes()
1119 pmu->i915_attr = i915_attr; in create_event_attributes()
1120 pmu->pmu_attr = pmu_attr; in create_event_attributes()
1136 static void free_event_attributes(struct i915_pmu *pmu) in free_event_attributes() argument
1138 struct attribute **attr_iter = pmu->events_attr_group.attrs; in free_event_attributes()
1143 kfree(pmu->events_attr_group.attrs); in free_event_attributes()
1144 kfree(pmu->i915_attr); in free_event_attributes()
1145 kfree(pmu->pmu_attr); in free_event_attributes()
1147 pmu->events_attr_group.attrs = NULL; in free_event_attributes()
1148 pmu->i915_attr = NULL; in free_event_attributes()
1149 pmu->pmu_attr = NULL; in free_event_attributes()
1154 struct i915_pmu *pmu = hlist_entry_safe(node, typeof(*pmu), cpuhp.node); in i915_pmu_cpu_online() local
1156 GEM_BUG_ON(!pmu->base.event_init); in i915_pmu_cpu_online()
1167 struct i915_pmu *pmu = hlist_entry_safe(node, typeof(*pmu), cpuhp.node); in i915_pmu_cpu_offline() local
1170 GEM_BUG_ON(!pmu->base.event_init); in i915_pmu_cpu_offline()
1176 if (pmu->closed) in i915_pmu_cpu_offline()
1189 if (target < nr_cpu_ids && target != pmu->cpuhp.cpu) { in i915_pmu_cpu_offline()
1190 perf_pmu_migrate_context(&pmu->base, cpu, target); in i915_pmu_cpu_offline()
1191 pmu->cpuhp.cpu = target; in i915_pmu_cpu_offline()
1222 static int i915_pmu_register_cpuhp_state(struct i915_pmu *pmu) in i915_pmu_register_cpuhp_state() argument
1227 return cpuhp_state_add_instance(cpuhp_slot, &pmu->cpuhp.node); in i915_pmu_register_cpuhp_state()
1230 static void i915_pmu_unregister_cpuhp_state(struct i915_pmu *pmu) in i915_pmu_unregister_cpuhp_state() argument
1232 cpuhp_state_remove_instance(cpuhp_slot, &pmu->cpuhp.node); in i915_pmu_unregister_cpuhp_state()
1248 struct i915_pmu *pmu = &i915->pmu; in i915_pmu_register() local
1251 &pmu->events_attr_group, in i915_pmu_register()
1263 spin_lock_init(&pmu->lock); in i915_pmu_register()
1264 hrtimer_init(&pmu->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); in i915_pmu_register()
1265 pmu->timer.function = i915_sample; in i915_pmu_register()
1266 pmu->cpuhp.cpu = -1; in i915_pmu_register()
1267 init_rc6(pmu); in i915_pmu_register()
1270 pmu->name = kasprintf(GFP_KERNEL, in i915_pmu_register()
1273 if (pmu->name) { in i915_pmu_register()
1275 strreplace((char *)pmu->name, ':', '_'); in i915_pmu_register()
1278 pmu->name = "i915"; in i915_pmu_register()
1280 if (!pmu->name) in i915_pmu_register()
1283 pmu->events_attr_group.name = "events"; in i915_pmu_register()
1284 pmu->events_attr_group.attrs = create_event_attributes(pmu); in i915_pmu_register()
1285 if (!pmu->events_attr_group.attrs) in i915_pmu_register()
1288 pmu->base.attr_groups = kmemdup(attr_groups, sizeof(attr_groups), in i915_pmu_register()
1290 if (!pmu->base.attr_groups) in i915_pmu_register()
1293 pmu->base.module = THIS_MODULE; in i915_pmu_register()
1294 pmu->base.task_ctx_nr = perf_invalid_context; in i915_pmu_register()
1295 pmu->base.event_init = i915_pmu_event_init; in i915_pmu_register()
1296 pmu->base.add = i915_pmu_event_add; in i915_pmu_register()
1297 pmu->base.del = i915_pmu_event_del; in i915_pmu_register()
1298 pmu->base.start = i915_pmu_event_start; in i915_pmu_register()
1299 pmu->base.stop = i915_pmu_event_stop; in i915_pmu_register()
1300 pmu->base.read = i915_pmu_event_read; in i915_pmu_register()
1301 pmu->base.event_idx = i915_pmu_event_event_idx; in i915_pmu_register()
1303 ret = perf_pmu_register(&pmu->base, pmu->name, -1); in i915_pmu_register()
1307 ret = i915_pmu_register_cpuhp_state(pmu); in i915_pmu_register()
1314 perf_pmu_unregister(&pmu->base); in i915_pmu_register()
1316 kfree(pmu->base.attr_groups); in i915_pmu_register()
1318 pmu->base.event_init = NULL; in i915_pmu_register()
1319 free_event_attributes(pmu); in i915_pmu_register()
1322 kfree(pmu->name); in i915_pmu_register()
1329 struct i915_pmu *pmu = &i915->pmu; in i915_pmu_unregister() local
1331 if (!pmu->base.event_init) in i915_pmu_unregister()
1339 pmu->closed = true; in i915_pmu_unregister()
1342 hrtimer_cancel(&pmu->timer); in i915_pmu_unregister()
1344 i915_pmu_unregister_cpuhp_state(pmu); in i915_pmu_unregister()
1346 perf_pmu_unregister(&pmu->base); in i915_pmu_unregister()
1347 pmu->base.event_init = NULL; in i915_pmu_unregister()
1348 kfree(pmu->base.attr_groups); in i915_pmu_unregister()
1350 kfree(pmu->name); in i915_pmu_unregister()
1351 free_event_attributes(pmu); in i915_pmu_unregister()