1b46a33e2STvrtko Ursulin /* 2058a9b43SMichal Wajdeczko * SPDX-License-Identifier: MIT 3b46a33e2STvrtko Ursulin * 4058a9b43SMichal Wajdeczko * Copyright © 2017-2018 Intel Corporation 5b46a33e2STvrtko Ursulin */ 6b46a33e2STvrtko Ursulin 7447ae316SNicolai Stange #include <linux/irq.h> 83b4ed2e2SVincent Guittot #include <linux/pm_runtime.h> 9112ed2d3SChris Wilson 10112ed2d3SChris Wilson #include "gt/intel_engine.h" 1151fbd8deSChris Wilson #include "gt/intel_engine_pm.h" 12750e76b4SChris Wilson #include "gt/intel_engine_user.h" 1351fbd8deSChris Wilson #include "gt/intel_gt_pm.h" 14c1132367SAndi Shyti #include "gt/intel_rc6.h" 153e7abf81SAndi Shyti #include "gt/intel_rps.h" 16112ed2d3SChris Wilson 17058a9b43SMichal Wajdeczko #include "i915_drv.h" 18ecbb5fb7SJani Nikula #include "i915_pmu.h" 19ecbb5fb7SJani Nikula #include "intel_pm.h" 20b46a33e2STvrtko Ursulin 21b46a33e2STvrtko Ursulin /* Frequency for the sampling timer for events which need it. */ 22b46a33e2STvrtko Ursulin #define FREQUENCY 200 23b46a33e2STvrtko Ursulin #define PERIOD max_t(u64, 10000, NSEC_PER_SEC / FREQUENCY) 24b46a33e2STvrtko Ursulin 25b46a33e2STvrtko Ursulin #define ENGINE_SAMPLE_MASK \ 26b46a33e2STvrtko Ursulin (BIT(I915_SAMPLE_BUSY) | \ 27b46a33e2STvrtko Ursulin BIT(I915_SAMPLE_WAIT) | \ 28b46a33e2STvrtko Ursulin BIT(I915_SAMPLE_SEMA)) 29b46a33e2STvrtko Ursulin 30b46a33e2STvrtko Ursulin #define ENGINE_SAMPLE_BITS (1 << I915_PMU_SAMPLE_BITS) 31b46a33e2STvrtko Ursulin 32141a0895SChris Wilson static cpumask_t i915_pmu_cpumask; 33*537f9c84STvrtko Ursulin static unsigned int i915_pmu_target_cpu = -1; 34b46a33e2STvrtko Ursulin 35b46a33e2STvrtko Ursulin static u8 engine_config_sample(u64 config) 36b46a33e2STvrtko Ursulin { 37b46a33e2STvrtko Ursulin return config & I915_PMU_SAMPLE_MASK; 38b46a33e2STvrtko Ursulin } 39b46a33e2STvrtko Ursulin 40b46a33e2STvrtko Ursulin static u8 engine_event_sample(struct perf_event *event) 41b46a33e2STvrtko Ursulin { 42b46a33e2STvrtko Ursulin return engine_config_sample(event->attr.config); 43b46a33e2STvrtko Ursulin } 44b46a33e2STvrtko Ursulin 45b46a33e2STvrtko Ursulin static u8 engine_event_class(struct perf_event *event) 46b46a33e2STvrtko Ursulin { 47b46a33e2STvrtko Ursulin return (event->attr.config >> I915_PMU_CLASS_SHIFT) & 0xff; 48b46a33e2STvrtko Ursulin } 49b46a33e2STvrtko Ursulin 50b46a33e2STvrtko Ursulin static u8 engine_event_instance(struct perf_event *event) 51b46a33e2STvrtko Ursulin { 52b46a33e2STvrtko Ursulin return (event->attr.config >> I915_PMU_SAMPLE_BITS) & 0xff; 53b46a33e2STvrtko Ursulin } 54b46a33e2STvrtko Ursulin 55b46a33e2STvrtko Ursulin static bool is_engine_config(u64 config) 56b46a33e2STvrtko Ursulin { 57b46a33e2STvrtko Ursulin return config < __I915_PMU_OTHER(0); 58b46a33e2STvrtko Ursulin } 59b46a33e2STvrtko Ursulin 60b46a33e2STvrtko Ursulin static unsigned int config_enabled_bit(u64 config) 61b46a33e2STvrtko Ursulin { 62b46a33e2STvrtko Ursulin if (is_engine_config(config)) 63b46a33e2STvrtko Ursulin return engine_config_sample(config); 64b46a33e2STvrtko Ursulin else 65b46a33e2STvrtko Ursulin return ENGINE_SAMPLE_BITS + (config - __I915_PMU_OTHER(0)); 66b46a33e2STvrtko Ursulin } 67b46a33e2STvrtko Ursulin 68b46a33e2STvrtko Ursulin static u64 config_enabled_mask(u64 config) 69b46a33e2STvrtko Ursulin { 70b46a33e2STvrtko Ursulin return BIT_ULL(config_enabled_bit(config)); 71b46a33e2STvrtko Ursulin } 72b46a33e2STvrtko Ursulin 73b46a33e2STvrtko Ursulin static bool is_engine_event(struct perf_event *event) 74b46a33e2STvrtko Ursulin { 75b46a33e2STvrtko Ursulin return is_engine_config(event->attr.config); 76b46a33e2STvrtko Ursulin } 77b46a33e2STvrtko Ursulin 78b46a33e2STvrtko Ursulin static unsigned int event_enabled_bit(struct perf_event *event) 79b46a33e2STvrtko Ursulin { 80b46a33e2STvrtko Ursulin return config_enabled_bit(event->attr.config); 81b46a33e2STvrtko Ursulin } 82b46a33e2STvrtko Ursulin 83908091c8STvrtko Ursulin static bool pmu_needs_timer(struct i915_pmu *pmu, bool gpu_active) 84feff0dc6STvrtko Ursulin { 85908091c8STvrtko Ursulin struct drm_i915_private *i915 = container_of(pmu, typeof(*i915), pmu); 86feff0dc6STvrtko Ursulin u64 enable; 87feff0dc6STvrtko Ursulin 88feff0dc6STvrtko Ursulin /* 89feff0dc6STvrtko Ursulin * Only some counters need the sampling timer. 90feff0dc6STvrtko Ursulin * 91feff0dc6STvrtko Ursulin * We start with a bitmask of all currently enabled events. 92feff0dc6STvrtko Ursulin */ 93908091c8STvrtko Ursulin enable = pmu->enable; 94feff0dc6STvrtko Ursulin 95feff0dc6STvrtko Ursulin /* 96feff0dc6STvrtko Ursulin * Mask out all the ones which do not need the timer, or in 97feff0dc6STvrtko Ursulin * other words keep all the ones that could need the timer. 98feff0dc6STvrtko Ursulin */ 99feff0dc6STvrtko Ursulin enable &= config_enabled_mask(I915_PMU_ACTUAL_FREQUENCY) | 100feff0dc6STvrtko Ursulin config_enabled_mask(I915_PMU_REQUESTED_FREQUENCY) | 101feff0dc6STvrtko Ursulin ENGINE_SAMPLE_MASK; 102feff0dc6STvrtko Ursulin 103feff0dc6STvrtko Ursulin /* 104feff0dc6STvrtko Ursulin * When the GPU is idle per-engine counters do not need to be 105feff0dc6STvrtko Ursulin * running so clear those bits out. 106feff0dc6STvrtko Ursulin */ 107feff0dc6STvrtko Ursulin if (!gpu_active) 108feff0dc6STvrtko Ursulin enable &= ~ENGINE_SAMPLE_MASK; 109b3add01eSTvrtko Ursulin /* 110b3add01eSTvrtko Ursulin * Also there is software busyness tracking available we do not 111b3add01eSTvrtko Ursulin * need the timer for I915_SAMPLE_BUSY counter. 112b3add01eSTvrtko Ursulin */ 113bf73fc0fSChris Wilson else if (i915->caps.scheduler & I915_SCHEDULER_CAP_ENGINE_BUSY_STATS) 114b3add01eSTvrtko Ursulin enable &= ~BIT(I915_SAMPLE_BUSY); 115feff0dc6STvrtko Ursulin 116feff0dc6STvrtko Ursulin /* 117feff0dc6STvrtko Ursulin * If some bits remain it means we need the sampling timer running. 118feff0dc6STvrtko Ursulin */ 119feff0dc6STvrtko Ursulin return enable; 120feff0dc6STvrtko Ursulin } 121feff0dc6STvrtko Ursulin 122c1132367SAndi Shyti static u64 __get_rc6(struct intel_gt *gt) 12316ffe73cSChris Wilson { 12416ffe73cSChris Wilson struct drm_i915_private *i915 = gt->i915; 12516ffe73cSChris Wilson u64 val; 12616ffe73cSChris Wilson 127c1132367SAndi Shyti val = intel_rc6_residency_ns(>->rc6, 12816ffe73cSChris Wilson IS_VALLEYVIEW(i915) ? 12916ffe73cSChris Wilson VLV_GT_RENDER_RC6 : 13016ffe73cSChris Wilson GEN6_GT_GFX_RC6); 13116ffe73cSChris Wilson 13216ffe73cSChris Wilson if (HAS_RC6p(i915)) 133c1132367SAndi Shyti val += intel_rc6_residency_ns(>->rc6, GEN6_GT_GFX_RC6p); 13416ffe73cSChris Wilson 13516ffe73cSChris Wilson if (HAS_RC6pp(i915)) 136c1132367SAndi Shyti val += intel_rc6_residency_ns(>->rc6, GEN6_GT_GFX_RC6pp); 13716ffe73cSChris Wilson 13816ffe73cSChris Wilson return val; 13916ffe73cSChris Wilson } 14016ffe73cSChris Wilson 14116ffe73cSChris Wilson #if IS_ENABLED(CONFIG_PM) 14216ffe73cSChris Wilson 14316ffe73cSChris Wilson static inline s64 ktime_since(const ktime_t kt) 14416ffe73cSChris Wilson { 14516ffe73cSChris Wilson return ktime_to_ns(ktime_sub(ktime_get(), kt)); 14616ffe73cSChris Wilson } 14716ffe73cSChris Wilson 148df6a4205STvrtko Ursulin static u64 get_rc6(struct intel_gt *gt) 14916ffe73cSChris Wilson { 150df6a4205STvrtko Ursulin struct drm_i915_private *i915 = gt->i915; 151df6a4205STvrtko Ursulin struct i915_pmu *pmu = &i915->pmu; 152df6a4205STvrtko Ursulin unsigned long flags; 153df6a4205STvrtko Ursulin bool awake = false; 15416ffe73cSChris Wilson u64 val; 15516ffe73cSChris Wilson 156df6a4205STvrtko Ursulin if (intel_gt_pm_get_if_awake(gt)) { 157df6a4205STvrtko Ursulin val = __get_rc6(gt); 158df6a4205STvrtko Ursulin intel_gt_pm_put_async(gt); 159df6a4205STvrtko Ursulin awake = true; 160df6a4205STvrtko Ursulin } 161df6a4205STvrtko Ursulin 162df6a4205STvrtko Ursulin spin_lock_irqsave(&pmu->lock, flags); 163df6a4205STvrtko Ursulin 164df6a4205STvrtko Ursulin if (awake) { 165df6a4205STvrtko Ursulin pmu->sample[__I915_SAMPLE_RC6].cur = val; 166df6a4205STvrtko Ursulin } else { 16716ffe73cSChris Wilson /* 16816ffe73cSChris Wilson * We think we are runtime suspended. 16916ffe73cSChris Wilson * 17016ffe73cSChris Wilson * Report the delta from when the device was suspended to now, 17116ffe73cSChris Wilson * on top of the last known real value, as the approximated RC6 17216ffe73cSChris Wilson * counter value. 17316ffe73cSChris Wilson */ 17416ffe73cSChris Wilson val = ktime_since(pmu->sleep_last); 17516ffe73cSChris Wilson val += pmu->sample[__I915_SAMPLE_RC6].cur; 17616ffe73cSChris Wilson } 17716ffe73cSChris Wilson 178df6a4205STvrtko Ursulin if (val < pmu->sample[__I915_SAMPLE_RC6_LAST_REPORTED].cur) 179df6a4205STvrtko Ursulin val = pmu->sample[__I915_SAMPLE_RC6_LAST_REPORTED].cur; 18016ffe73cSChris Wilson else 181df6a4205STvrtko Ursulin pmu->sample[__I915_SAMPLE_RC6_LAST_REPORTED].cur = val; 18216ffe73cSChris Wilson 18316ffe73cSChris Wilson spin_unlock_irqrestore(&pmu->lock, flags); 18416ffe73cSChris Wilson 18516ffe73cSChris Wilson return val; 18616ffe73cSChris Wilson } 18716ffe73cSChris Wilson 18816ffe73cSChris Wilson static void park_rc6(struct drm_i915_private *i915) 189feff0dc6STvrtko Ursulin { 190908091c8STvrtko Ursulin struct i915_pmu *pmu = &i915->pmu; 191908091c8STvrtko Ursulin 19216ffe73cSChris Wilson if (pmu->enable & config_enabled_mask(I915_PMU_RC6_RESIDENCY)) 193df6a4205STvrtko Ursulin pmu->sample[__I915_SAMPLE_RC6].cur = __get_rc6(&i915->gt); 194feff0dc6STvrtko Ursulin 19516ffe73cSChris Wilson pmu->sleep_last = ktime_get(); 196feff0dc6STvrtko Ursulin } 197feff0dc6STvrtko Ursulin 19816ffe73cSChris Wilson #else 19916ffe73cSChris Wilson 20016ffe73cSChris Wilson static u64 get_rc6(struct intel_gt *gt) 20116ffe73cSChris Wilson { 20216ffe73cSChris Wilson return __get_rc6(gt); 20316ffe73cSChris Wilson } 20416ffe73cSChris Wilson 20516ffe73cSChris Wilson static void park_rc6(struct drm_i915_private *i915) {} 20616ffe73cSChris Wilson 20716ffe73cSChris Wilson #endif 20816ffe73cSChris Wilson 209908091c8STvrtko Ursulin static void __i915_pmu_maybe_start_timer(struct i915_pmu *pmu) 210feff0dc6STvrtko Ursulin { 211908091c8STvrtko Ursulin if (!pmu->timer_enabled && pmu_needs_timer(pmu, true)) { 212908091c8STvrtko Ursulin pmu->timer_enabled = true; 213908091c8STvrtko Ursulin pmu->timer_last = ktime_get(); 214908091c8STvrtko Ursulin hrtimer_start_range_ns(&pmu->timer, 215feff0dc6STvrtko Ursulin ns_to_ktime(PERIOD), 0, 216feff0dc6STvrtko Ursulin HRTIMER_MODE_REL_PINNED); 217feff0dc6STvrtko Ursulin } 218feff0dc6STvrtko Ursulin } 219feff0dc6STvrtko Ursulin 22016ffe73cSChris Wilson void i915_pmu_gt_parked(struct drm_i915_private *i915) 22116ffe73cSChris Wilson { 22216ffe73cSChris Wilson struct i915_pmu *pmu = &i915->pmu; 22316ffe73cSChris Wilson 22416ffe73cSChris Wilson if (!pmu->base.event_init) 22516ffe73cSChris Wilson return; 22616ffe73cSChris Wilson 22716ffe73cSChris Wilson spin_lock_irq(&pmu->lock); 22816ffe73cSChris Wilson 22916ffe73cSChris Wilson park_rc6(i915); 23016ffe73cSChris Wilson 23116ffe73cSChris Wilson /* 23216ffe73cSChris Wilson * Signal sampling timer to stop if only engine events are enabled and 23316ffe73cSChris Wilson * GPU went idle. 23416ffe73cSChris Wilson */ 23516ffe73cSChris Wilson pmu->timer_enabled = pmu_needs_timer(pmu, false); 23616ffe73cSChris Wilson 23716ffe73cSChris Wilson spin_unlock_irq(&pmu->lock); 23816ffe73cSChris Wilson } 23916ffe73cSChris Wilson 240feff0dc6STvrtko Ursulin void i915_pmu_gt_unparked(struct drm_i915_private *i915) 241feff0dc6STvrtko Ursulin { 242908091c8STvrtko Ursulin struct i915_pmu *pmu = &i915->pmu; 243908091c8STvrtko Ursulin 244908091c8STvrtko Ursulin if (!pmu->base.event_init) 245feff0dc6STvrtko Ursulin return; 246feff0dc6STvrtko Ursulin 247908091c8STvrtko Ursulin spin_lock_irq(&pmu->lock); 24816ffe73cSChris Wilson 249feff0dc6STvrtko Ursulin /* 250feff0dc6STvrtko Ursulin * Re-enable sampling timer when GPU goes active. 251feff0dc6STvrtko Ursulin */ 252908091c8STvrtko Ursulin __i915_pmu_maybe_start_timer(pmu); 25316ffe73cSChris Wilson 254908091c8STvrtko Ursulin spin_unlock_irq(&pmu->lock); 255feff0dc6STvrtko Ursulin } 256feff0dc6STvrtko Ursulin 257b46a33e2STvrtko Ursulin static void 2589f473ecfSTvrtko Ursulin add_sample(struct i915_pmu_sample *sample, u32 val) 259b46a33e2STvrtko Ursulin { 2609f473ecfSTvrtko Ursulin sample->cur += val; 261b46a33e2STvrtko Ursulin } 262b46a33e2STvrtko Ursulin 263d79e1bd6SChris Wilson static bool exclusive_mmio_access(const struct drm_i915_private *i915) 264d79e1bd6SChris Wilson { 265d79e1bd6SChris Wilson /* 266d79e1bd6SChris Wilson * We have to avoid concurrent mmio cache line access on gen7 or 267d79e1bd6SChris Wilson * risk a machine hang. For a fun history lesson dig out the old 268d79e1bd6SChris Wilson * userspace intel_gpu_top and run it on Ivybridge or Haswell! 269d79e1bd6SChris Wilson */ 270d79e1bd6SChris Wilson return IS_GEN(i915, 7); 271d79e1bd6SChris Wilson } 272d79e1bd6SChris Wilson 2736ec81b82SArnd Bergmann static void engine_sample(struct intel_engine_cs *engine, unsigned int period_ns) 274b46a33e2STvrtko Ursulin { 275d0aa694bSChris Wilson struct intel_engine_pmu *pmu = &engine->pmu; 276d0aa694bSChris Wilson bool busy; 277b46a33e2STvrtko Ursulin u32 val; 278b46a33e2STvrtko Ursulin 27928fba096STvrtko Ursulin val = ENGINE_READ_FW(engine, RING_CTL); 280d0aa694bSChris Wilson if (val == 0) /* powerwell off => engine idle */ 2816ec81b82SArnd Bergmann return; 282b46a33e2STvrtko Ursulin 2839f473ecfSTvrtko Ursulin if (val & RING_WAIT) 284d0aa694bSChris Wilson add_sample(&pmu->sample[I915_SAMPLE_WAIT], period_ns); 2859f473ecfSTvrtko Ursulin if (val & RING_WAIT_SEMAPHORE) 286d0aa694bSChris Wilson add_sample(&pmu->sample[I915_SAMPLE_SEMA], period_ns); 287b46a33e2STvrtko Ursulin 28854fc577dSTvrtko Ursulin /* No need to sample when busy stats are supported. */ 28954fc577dSTvrtko Ursulin if (intel_engine_supports_stats(engine)) 2906ec81b82SArnd Bergmann return; 29154fc577dSTvrtko Ursulin 292d0aa694bSChris Wilson /* 293d0aa694bSChris Wilson * While waiting on a semaphore or event, MI_MODE reports the 294d0aa694bSChris Wilson * ring as idle. However, previously using the seqno, and with 295d0aa694bSChris Wilson * execlists sampling, we account for the ring waiting as the 296d0aa694bSChris Wilson * engine being busy. Therefore, we record the sample as being 297d0aa694bSChris Wilson * busy if either waiting or !idle. 298d0aa694bSChris Wilson */ 299d0aa694bSChris Wilson busy = val & (RING_WAIT_SEMAPHORE | RING_WAIT); 300d0aa694bSChris Wilson if (!busy) { 30128fba096STvrtko Ursulin val = ENGINE_READ_FW(engine, RING_MI_MODE); 302d0aa694bSChris Wilson busy = !(val & MODE_IDLE); 303d0aa694bSChris Wilson } 304d0aa694bSChris Wilson if (busy) 305d0aa694bSChris Wilson add_sample(&pmu->sample[I915_SAMPLE_BUSY], period_ns); 3066ec81b82SArnd Bergmann } 307b46a33e2STvrtko Ursulin 3086ec81b82SArnd Bergmann static void 3096ec81b82SArnd Bergmann engines_sample(struct intel_gt *gt, unsigned int period_ns) 3106ec81b82SArnd Bergmann { 3116ec81b82SArnd Bergmann struct drm_i915_private *i915 = gt->i915; 3126ec81b82SArnd Bergmann struct intel_engine_cs *engine; 3136ec81b82SArnd Bergmann enum intel_engine_id id; 3146ec81b82SArnd Bergmann unsigned long flags; 3156ec81b82SArnd Bergmann 3166ec81b82SArnd Bergmann if ((i915->pmu.enable & ENGINE_SAMPLE_MASK) == 0) 3176ec81b82SArnd Bergmann return; 3186ec81b82SArnd Bergmann 3196ec81b82SArnd Bergmann if (!intel_gt_pm_is_awake(gt)) 3206ec81b82SArnd Bergmann return; 3216ec81b82SArnd Bergmann 3226ec81b82SArnd Bergmann for_each_engine(engine, gt, id) { 3236ec81b82SArnd Bergmann if (!intel_engine_pm_get_if_awake(engine)) 3246ec81b82SArnd Bergmann continue; 3256ec81b82SArnd Bergmann 3266ec81b82SArnd Bergmann if (exclusive_mmio_access(i915)) { 3276ec81b82SArnd Bergmann spin_lock_irqsave(&engine->uncore->lock, flags); 3286ec81b82SArnd Bergmann engine_sample(engine, period_ns); 3296ec81b82SArnd Bergmann spin_unlock_irqrestore(&engine->uncore->lock, flags); 3306ec81b82SArnd Bergmann } else { 3316ec81b82SArnd Bergmann engine_sample(engine, period_ns); 3326ec81b82SArnd Bergmann } 3336ec81b82SArnd Bergmann 33407779a76SChris Wilson intel_engine_pm_put_async(engine); 33551fbd8deSChris Wilson } 336b46a33e2STvrtko Ursulin } 337b46a33e2STvrtko Ursulin 3389f473ecfSTvrtko Ursulin static void 3399f473ecfSTvrtko Ursulin add_sample_mult(struct i915_pmu_sample *sample, u32 val, u32 mul) 3409f473ecfSTvrtko Ursulin { 3419f473ecfSTvrtko Ursulin sample->cur += mul_u32_u32(val, mul); 3429f473ecfSTvrtko Ursulin } 3439f473ecfSTvrtko Ursulin 344b66ecd04STvrtko Ursulin static bool frequency_sampling_enabled(struct i915_pmu *pmu) 345b66ecd04STvrtko Ursulin { 346b66ecd04STvrtko Ursulin return pmu->enable & 347b66ecd04STvrtko Ursulin (config_enabled_mask(I915_PMU_ACTUAL_FREQUENCY) | 348b66ecd04STvrtko Ursulin config_enabled_mask(I915_PMU_REQUESTED_FREQUENCY)); 349b66ecd04STvrtko Ursulin } 350b66ecd04STvrtko Ursulin 3519f473ecfSTvrtko Ursulin static void 35208ce5c64STvrtko Ursulin frequency_sample(struct intel_gt *gt, unsigned int period_ns) 353b46a33e2STvrtko Ursulin { 35408ce5c64STvrtko Ursulin struct drm_i915_private *i915 = gt->i915; 35508ce5c64STvrtko Ursulin struct intel_uncore *uncore = gt->uncore; 35608ce5c64STvrtko Ursulin struct i915_pmu *pmu = &i915->pmu; 3573e7abf81SAndi Shyti struct intel_rps *rps = >->rps; 35808ce5c64STvrtko Ursulin 359b66ecd04STvrtko Ursulin if (!frequency_sampling_enabled(pmu)) 360b66ecd04STvrtko Ursulin return; 361b66ecd04STvrtko Ursulin 362b66ecd04STvrtko Ursulin /* Report 0/0 (actual/requested) frequency while parked. */ 363b66ecd04STvrtko Ursulin if (!intel_gt_pm_get_if_awake(gt)) 364b66ecd04STvrtko Ursulin return; 365b66ecd04STvrtko Ursulin 36608ce5c64STvrtko Ursulin if (pmu->enable & config_enabled_mask(I915_PMU_ACTUAL_FREQUENCY)) { 367b46a33e2STvrtko Ursulin u32 val; 368b46a33e2STvrtko Ursulin 369c1c82d26SChris Wilson /* 370c1c82d26SChris Wilson * We take a quick peek here without using forcewake 371c1c82d26SChris Wilson * so that we don't perturb the system under observation 372c1c82d26SChris Wilson * (forcewake => !rc6 => increased power use). We expect 373c1c82d26SChris Wilson * that if the read fails because it is outside of the 374c1c82d26SChris Wilson * mmio power well, then it will return 0 -- in which 375c1c82d26SChris Wilson * case we assume the system is running at the intended 376c1c82d26SChris Wilson * frequency. Fortunately, the read should rarely fail! 377c1c82d26SChris Wilson */ 378b66ecd04STvrtko Ursulin val = intel_uncore_read_fw(uncore, GEN6_RPSTAT1); 379b66ecd04STvrtko Ursulin if (val) 380e03512edSAndi Shyti val = intel_rps_get_cagf(rps, val); 381b66ecd04STvrtko Ursulin else 382b66ecd04STvrtko Ursulin val = rps->cur_freq; 383b46a33e2STvrtko Ursulin 38408ce5c64STvrtko Ursulin add_sample_mult(&pmu->sample[__I915_SAMPLE_FREQ_ACT], 385b66ecd04STvrtko Ursulin intel_gpu_freq(rps, val), period_ns / 1000); 386b46a33e2STvrtko Ursulin } 387b46a33e2STvrtko Ursulin 38808ce5c64STvrtko Ursulin if (pmu->enable & config_enabled_mask(I915_PMU_REQUESTED_FREQUENCY)) { 38908ce5c64STvrtko Ursulin add_sample_mult(&pmu->sample[__I915_SAMPLE_FREQ_REQ], 3903e7abf81SAndi Shyti intel_gpu_freq(rps, rps->cur_freq), 3919f473ecfSTvrtko Ursulin period_ns / 1000); 392b46a33e2STvrtko Ursulin } 393b66ecd04STvrtko Ursulin 394b66ecd04STvrtko Ursulin intel_gt_pm_put_async(gt); 395b46a33e2STvrtko Ursulin } 396b46a33e2STvrtko Ursulin 397b46a33e2STvrtko Ursulin static enum hrtimer_restart i915_sample(struct hrtimer *hrtimer) 398b46a33e2STvrtko Ursulin { 399b46a33e2STvrtko Ursulin struct drm_i915_private *i915 = 400b46a33e2STvrtko Ursulin container_of(hrtimer, struct drm_i915_private, pmu.timer); 401908091c8STvrtko Ursulin struct i915_pmu *pmu = &i915->pmu; 40208ce5c64STvrtko Ursulin struct intel_gt *gt = &i915->gt; 4039f473ecfSTvrtko Ursulin unsigned int period_ns; 4049f473ecfSTvrtko Ursulin ktime_t now; 405b46a33e2STvrtko Ursulin 406908091c8STvrtko Ursulin if (!READ_ONCE(pmu->timer_enabled)) 407b46a33e2STvrtko Ursulin return HRTIMER_NORESTART; 408b46a33e2STvrtko Ursulin 4099f473ecfSTvrtko Ursulin now = ktime_get(); 410908091c8STvrtko Ursulin period_ns = ktime_to_ns(ktime_sub(now, pmu->timer_last)); 411908091c8STvrtko Ursulin pmu->timer_last = now; 412b46a33e2STvrtko Ursulin 4139f473ecfSTvrtko Ursulin /* 4149f473ecfSTvrtko Ursulin * Strictly speaking the passed in period may not be 100% accurate for 4159f473ecfSTvrtko Ursulin * all internal calculation, since some amount of time can be spent on 4169f473ecfSTvrtko Ursulin * grabbing the forcewake. However the potential error from timer call- 4179f473ecfSTvrtko Ursulin * back delay greatly dominates this so we keep it simple. 4189f473ecfSTvrtko Ursulin */ 41908ce5c64STvrtko Ursulin engines_sample(gt, period_ns); 42008ce5c64STvrtko Ursulin frequency_sample(gt, period_ns); 4219f473ecfSTvrtko Ursulin 4229f473ecfSTvrtko Ursulin hrtimer_forward(hrtimer, now, ns_to_ktime(PERIOD)); 4239f473ecfSTvrtko Ursulin 424b46a33e2STvrtko Ursulin return HRTIMER_RESTART; 425b46a33e2STvrtko Ursulin } 426b46a33e2STvrtko Ursulin 4270cd4684dSTvrtko Ursulin static u64 count_interrupts(struct drm_i915_private *i915) 4280cd4684dSTvrtko Ursulin { 4290cd4684dSTvrtko Ursulin /* open-coded kstat_irqs() */ 4300cd4684dSTvrtko Ursulin struct irq_desc *desc = irq_to_desc(i915->drm.pdev->irq); 4310cd4684dSTvrtko Ursulin u64 sum = 0; 4320cd4684dSTvrtko Ursulin int cpu; 4330cd4684dSTvrtko Ursulin 4340cd4684dSTvrtko Ursulin if (!desc || !desc->kstat_irqs) 4350cd4684dSTvrtko Ursulin return 0; 4360cd4684dSTvrtko Ursulin 4370cd4684dSTvrtko Ursulin for_each_possible_cpu(cpu) 4380cd4684dSTvrtko Ursulin sum += *per_cpu_ptr(desc->kstat_irqs, cpu); 4390cd4684dSTvrtko Ursulin 4400cd4684dSTvrtko Ursulin return sum; 4410cd4684dSTvrtko Ursulin } 4420cd4684dSTvrtko Ursulin 443b46a33e2STvrtko Ursulin static void i915_pmu_event_destroy(struct perf_event *event) 444b46a33e2STvrtko Ursulin { 445bf07f6ebSPankaj Bharadiya struct drm_i915_private *i915 = 446bf07f6ebSPankaj Bharadiya container_of(event->pmu, typeof(*i915), pmu.base); 447bf07f6ebSPankaj Bharadiya 448bf07f6ebSPankaj Bharadiya drm_WARN_ON(&i915->drm, event->parent); 449b00bccb3STvrtko Ursulin 450b00bccb3STvrtko Ursulin drm_dev_put(&i915->drm); 451b46a33e2STvrtko Ursulin } 452b46a33e2STvrtko Ursulin 453109ec558STvrtko Ursulin static int 454109ec558STvrtko Ursulin engine_event_status(struct intel_engine_cs *engine, 455109ec558STvrtko Ursulin enum drm_i915_pmu_engine_sample sample) 456b46a33e2STvrtko Ursulin { 457109ec558STvrtko Ursulin switch (sample) { 458b46a33e2STvrtko Ursulin case I915_SAMPLE_BUSY: 459b46a33e2STvrtko Ursulin case I915_SAMPLE_WAIT: 460b46a33e2STvrtko Ursulin break; 461b46a33e2STvrtko Ursulin case I915_SAMPLE_SEMA: 462109ec558STvrtko Ursulin if (INTEL_GEN(engine->i915) < 6) 463b46a33e2STvrtko Ursulin return -ENODEV; 464b46a33e2STvrtko Ursulin break; 465b46a33e2STvrtko Ursulin default: 466b46a33e2STvrtko Ursulin return -ENOENT; 467b46a33e2STvrtko Ursulin } 468b46a33e2STvrtko Ursulin 469b46a33e2STvrtko Ursulin return 0; 470b46a33e2STvrtko Ursulin } 471b46a33e2STvrtko Ursulin 472109ec558STvrtko Ursulin static int 473109ec558STvrtko Ursulin config_status(struct drm_i915_private *i915, u64 config) 474109ec558STvrtko Ursulin { 475109ec558STvrtko Ursulin switch (config) { 476109ec558STvrtko Ursulin case I915_PMU_ACTUAL_FREQUENCY: 477109ec558STvrtko Ursulin if (IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915)) 478109ec558STvrtko Ursulin /* Requires a mutex for sampling! */ 479109ec558STvrtko Ursulin return -ENODEV; 480109ec558STvrtko Ursulin /* Fall-through. */ 481109ec558STvrtko Ursulin case I915_PMU_REQUESTED_FREQUENCY: 482109ec558STvrtko Ursulin if (INTEL_GEN(i915) < 6) 483109ec558STvrtko Ursulin return -ENODEV; 484109ec558STvrtko Ursulin break; 485109ec558STvrtko Ursulin case I915_PMU_INTERRUPTS: 486109ec558STvrtko Ursulin break; 487109ec558STvrtko Ursulin case I915_PMU_RC6_RESIDENCY: 488109ec558STvrtko Ursulin if (!HAS_RC6(i915)) 489109ec558STvrtko Ursulin return -ENODEV; 490109ec558STvrtko Ursulin break; 491109ec558STvrtko Ursulin default: 492109ec558STvrtko Ursulin return -ENOENT; 493109ec558STvrtko Ursulin } 494109ec558STvrtko Ursulin 495109ec558STvrtko Ursulin return 0; 496109ec558STvrtko Ursulin } 497109ec558STvrtko Ursulin 498109ec558STvrtko Ursulin static int engine_event_init(struct perf_event *event) 499109ec558STvrtko Ursulin { 500109ec558STvrtko Ursulin struct drm_i915_private *i915 = 501109ec558STvrtko Ursulin container_of(event->pmu, typeof(*i915), pmu.base); 502109ec558STvrtko Ursulin struct intel_engine_cs *engine; 503109ec558STvrtko Ursulin 504109ec558STvrtko Ursulin engine = intel_engine_lookup_user(i915, engine_event_class(event), 505109ec558STvrtko Ursulin engine_event_instance(event)); 506109ec558STvrtko Ursulin if (!engine) 507109ec558STvrtko Ursulin return -ENODEV; 508109ec558STvrtko Ursulin 509426d0073SChris Wilson return engine_event_status(engine, engine_event_sample(event)); 510109ec558STvrtko Ursulin } 511109ec558STvrtko Ursulin 512b46a33e2STvrtko Ursulin static int i915_pmu_event_init(struct perf_event *event) 513b46a33e2STvrtko Ursulin { 514b46a33e2STvrtko Ursulin struct drm_i915_private *i915 = 515b46a33e2STvrtko Ursulin container_of(event->pmu, typeof(*i915), pmu.base); 516b00bccb3STvrtko Ursulin struct i915_pmu *pmu = &i915->pmu; 5170426c046STvrtko Ursulin int ret; 518b46a33e2STvrtko Ursulin 519b00bccb3STvrtko Ursulin if (pmu->closed) 520b00bccb3STvrtko Ursulin return -ENODEV; 521b00bccb3STvrtko Ursulin 522b46a33e2STvrtko Ursulin if (event->attr.type != event->pmu->type) 523b46a33e2STvrtko Ursulin return -ENOENT; 524b46a33e2STvrtko Ursulin 525b46a33e2STvrtko Ursulin /* unsupported modes and filters */ 526b46a33e2STvrtko Ursulin if (event->attr.sample_period) /* no sampling */ 527b46a33e2STvrtko Ursulin return -EINVAL; 528b46a33e2STvrtko Ursulin 529b46a33e2STvrtko Ursulin if (has_branch_stack(event)) 530b46a33e2STvrtko Ursulin return -EOPNOTSUPP; 531b46a33e2STvrtko Ursulin 532b46a33e2STvrtko Ursulin if (event->cpu < 0) 533b46a33e2STvrtko Ursulin return -EINVAL; 534b46a33e2STvrtko Ursulin 5350426c046STvrtko Ursulin /* only allow running on one cpu at a time */ 5360426c046STvrtko Ursulin if (!cpumask_test_cpu(event->cpu, &i915_pmu_cpumask)) 53700a79722STvrtko Ursulin return -EINVAL; 538b46a33e2STvrtko Ursulin 539109ec558STvrtko Ursulin if (is_engine_event(event)) 540b46a33e2STvrtko Ursulin ret = engine_event_init(event); 541109ec558STvrtko Ursulin else 542109ec558STvrtko Ursulin ret = config_status(i915, event->attr.config); 543b46a33e2STvrtko Ursulin if (ret) 544b46a33e2STvrtko Ursulin return ret; 545b46a33e2STvrtko Ursulin 546b00bccb3STvrtko Ursulin if (!event->parent) { 547b00bccb3STvrtko Ursulin drm_dev_get(&i915->drm); 548b46a33e2STvrtko Ursulin event->destroy = i915_pmu_event_destroy; 549b00bccb3STvrtko Ursulin } 550b46a33e2STvrtko Ursulin 551b46a33e2STvrtko Ursulin return 0; 552b46a33e2STvrtko Ursulin } 553b46a33e2STvrtko Ursulin 554ad055fb8STvrtko Ursulin static u64 __i915_pmu_event_read(struct perf_event *event) 555b46a33e2STvrtko Ursulin { 556b46a33e2STvrtko Ursulin struct drm_i915_private *i915 = 557b46a33e2STvrtko Ursulin container_of(event->pmu, typeof(*i915), pmu.base); 558908091c8STvrtko Ursulin struct i915_pmu *pmu = &i915->pmu; 559b46a33e2STvrtko Ursulin u64 val = 0; 560b46a33e2STvrtko Ursulin 561b46a33e2STvrtko Ursulin if (is_engine_event(event)) { 562b46a33e2STvrtko Ursulin u8 sample = engine_event_sample(event); 563b46a33e2STvrtko Ursulin struct intel_engine_cs *engine; 564b46a33e2STvrtko Ursulin 565b46a33e2STvrtko Ursulin engine = intel_engine_lookup_user(i915, 566b46a33e2STvrtko Ursulin engine_event_class(event), 567b46a33e2STvrtko Ursulin engine_event_instance(event)); 568b46a33e2STvrtko Ursulin 56948a1b8d4SPankaj Bharadiya if (drm_WARN_ON_ONCE(&i915->drm, !engine)) { 570b46a33e2STvrtko Ursulin /* Do nothing */ 571b3add01eSTvrtko Ursulin } else if (sample == I915_SAMPLE_BUSY && 572b2f78cdaSTvrtko Ursulin intel_engine_supports_stats(engine)) { 573810b7ee3SChris Wilson ktime_t unused; 574810b7ee3SChris Wilson 575810b7ee3SChris Wilson val = ktime_to_ns(intel_engine_get_busy_time(engine, 576810b7ee3SChris Wilson &unused)); 577b46a33e2STvrtko Ursulin } else { 578b46a33e2STvrtko Ursulin val = engine->pmu.sample[sample].cur; 579b46a33e2STvrtko Ursulin } 580b46a33e2STvrtko Ursulin } else { 581b46a33e2STvrtko Ursulin switch (event->attr.config) { 582b46a33e2STvrtko Ursulin case I915_PMU_ACTUAL_FREQUENCY: 583b46a33e2STvrtko Ursulin val = 584908091c8STvrtko Ursulin div_u64(pmu->sample[__I915_SAMPLE_FREQ_ACT].cur, 5859f473ecfSTvrtko Ursulin USEC_PER_SEC /* to MHz */); 586b46a33e2STvrtko Ursulin break; 587b46a33e2STvrtko Ursulin case I915_PMU_REQUESTED_FREQUENCY: 588b46a33e2STvrtko Ursulin val = 589908091c8STvrtko Ursulin div_u64(pmu->sample[__I915_SAMPLE_FREQ_REQ].cur, 5909f473ecfSTvrtko Ursulin USEC_PER_SEC /* to MHz */); 591b46a33e2STvrtko Ursulin break; 5920cd4684dSTvrtko Ursulin case I915_PMU_INTERRUPTS: 5930cd4684dSTvrtko Ursulin val = count_interrupts(i915); 5940cd4684dSTvrtko Ursulin break; 5956060b6aeSTvrtko Ursulin case I915_PMU_RC6_RESIDENCY: 596518ea582STvrtko Ursulin val = get_rc6(&i915->gt); 5976060b6aeSTvrtko Ursulin break; 598b46a33e2STvrtko Ursulin } 599b46a33e2STvrtko Ursulin } 600b46a33e2STvrtko Ursulin 601b46a33e2STvrtko Ursulin return val; 602b46a33e2STvrtko Ursulin } 603b46a33e2STvrtko Ursulin 604b46a33e2STvrtko Ursulin static void i915_pmu_event_read(struct perf_event *event) 605b46a33e2STvrtko Ursulin { 606b00bccb3STvrtko Ursulin struct drm_i915_private *i915 = 607b00bccb3STvrtko Ursulin container_of(event->pmu, typeof(*i915), pmu.base); 608b46a33e2STvrtko Ursulin struct hw_perf_event *hwc = &event->hw; 609b00bccb3STvrtko Ursulin struct i915_pmu *pmu = &i915->pmu; 610b46a33e2STvrtko Ursulin u64 prev, new; 611b46a33e2STvrtko Ursulin 612b00bccb3STvrtko Ursulin if (pmu->closed) { 613b00bccb3STvrtko Ursulin event->hw.state = PERF_HES_STOPPED; 614b00bccb3STvrtko Ursulin return; 615b00bccb3STvrtko Ursulin } 616b46a33e2STvrtko Ursulin again: 617b46a33e2STvrtko Ursulin prev = local64_read(&hwc->prev_count); 618ad055fb8STvrtko Ursulin new = __i915_pmu_event_read(event); 619b46a33e2STvrtko Ursulin 620b46a33e2STvrtko Ursulin if (local64_cmpxchg(&hwc->prev_count, prev, new) != prev) 621b46a33e2STvrtko Ursulin goto again; 622b46a33e2STvrtko Ursulin 623b46a33e2STvrtko Ursulin local64_add(new - prev, &event->count); 624b46a33e2STvrtko Ursulin } 625b46a33e2STvrtko Ursulin 626b46a33e2STvrtko Ursulin static void i915_pmu_enable(struct perf_event *event) 627b46a33e2STvrtko Ursulin { 628b46a33e2STvrtko Ursulin struct drm_i915_private *i915 = 629b46a33e2STvrtko Ursulin container_of(event->pmu, typeof(*i915), pmu.base); 630b46a33e2STvrtko Ursulin unsigned int bit = event_enabled_bit(event); 631908091c8STvrtko Ursulin struct i915_pmu *pmu = &i915->pmu; 632f4e9894bSChris Wilson intel_wakeref_t wakeref; 633b46a33e2STvrtko Ursulin unsigned long flags; 634b46a33e2STvrtko Ursulin 635f4e9894bSChris Wilson wakeref = intel_runtime_pm_get(&i915->runtime_pm); 636908091c8STvrtko Ursulin spin_lock_irqsave(&pmu->lock, flags); 637b46a33e2STvrtko Ursulin 638b46a33e2STvrtko Ursulin /* 639b46a33e2STvrtko Ursulin * Update the bitmask of enabled events and increment 640b46a33e2STvrtko Ursulin * the event reference counter. 641b46a33e2STvrtko Ursulin */ 642908091c8STvrtko Ursulin BUILD_BUG_ON(ARRAY_SIZE(pmu->enable_count) != I915_PMU_MASK_BITS); 643908091c8STvrtko Ursulin GEM_BUG_ON(bit >= ARRAY_SIZE(pmu->enable_count)); 644908091c8STvrtko Ursulin GEM_BUG_ON(pmu->enable_count[bit] == ~0); 645f4e9894bSChris Wilson 646f4e9894bSChris Wilson if (pmu->enable_count[bit] == 0 && 647f4e9894bSChris Wilson config_enabled_mask(I915_PMU_RC6_RESIDENCY) & BIT_ULL(bit)) { 648f4e9894bSChris Wilson pmu->sample[__I915_SAMPLE_RC6_LAST_REPORTED].cur = 0; 649f4e9894bSChris Wilson pmu->sample[__I915_SAMPLE_RC6].cur = __get_rc6(&i915->gt); 650f4e9894bSChris Wilson pmu->sleep_last = ktime_get(); 651f4e9894bSChris Wilson } 652f4e9894bSChris Wilson 653908091c8STvrtko Ursulin pmu->enable |= BIT_ULL(bit); 654908091c8STvrtko Ursulin pmu->enable_count[bit]++; 655b46a33e2STvrtko Ursulin 656b46a33e2STvrtko Ursulin /* 657feff0dc6STvrtko Ursulin * Start the sampling timer if needed and not already enabled. 658feff0dc6STvrtko Ursulin */ 659908091c8STvrtko Ursulin __i915_pmu_maybe_start_timer(pmu); 660feff0dc6STvrtko Ursulin 661feff0dc6STvrtko Ursulin /* 662b46a33e2STvrtko Ursulin * For per-engine events the bitmask and reference counting 663b46a33e2STvrtko Ursulin * is stored per engine. 664b46a33e2STvrtko Ursulin */ 665b46a33e2STvrtko Ursulin if (is_engine_event(event)) { 666b46a33e2STvrtko Ursulin u8 sample = engine_event_sample(event); 667b46a33e2STvrtko Ursulin struct intel_engine_cs *engine; 668b46a33e2STvrtko Ursulin 669b46a33e2STvrtko Ursulin engine = intel_engine_lookup_user(i915, 670b46a33e2STvrtko Ursulin engine_event_class(event), 671b46a33e2STvrtko Ursulin engine_event_instance(event)); 672b46a33e2STvrtko Ursulin 67326a11deeSTvrtko Ursulin BUILD_BUG_ON(ARRAY_SIZE(engine->pmu.enable_count) != 67426a11deeSTvrtko Ursulin I915_ENGINE_SAMPLE_COUNT); 67526a11deeSTvrtko Ursulin BUILD_BUG_ON(ARRAY_SIZE(engine->pmu.sample) != 67626a11deeSTvrtko Ursulin I915_ENGINE_SAMPLE_COUNT); 67726a11deeSTvrtko Ursulin GEM_BUG_ON(sample >= ARRAY_SIZE(engine->pmu.enable_count)); 67826a11deeSTvrtko Ursulin GEM_BUG_ON(sample >= ARRAY_SIZE(engine->pmu.sample)); 679b46a33e2STvrtko Ursulin GEM_BUG_ON(engine->pmu.enable_count[sample] == ~0); 68026a11deeSTvrtko Ursulin 68126a11deeSTvrtko Ursulin engine->pmu.enable |= BIT(sample); 682b2f78cdaSTvrtko Ursulin engine->pmu.enable_count[sample]++; 683b46a33e2STvrtko Ursulin } 684b46a33e2STvrtko Ursulin 685908091c8STvrtko Ursulin spin_unlock_irqrestore(&pmu->lock, flags); 686ad055fb8STvrtko Ursulin 687b46a33e2STvrtko Ursulin /* 688b46a33e2STvrtko Ursulin * Store the current counter value so we can report the correct delta 689b46a33e2STvrtko Ursulin * for all listeners. Even when the event was already enabled and has 690b46a33e2STvrtko Ursulin * an existing non-zero value. 691b46a33e2STvrtko Ursulin */ 692ad055fb8STvrtko Ursulin local64_set(&event->hw.prev_count, __i915_pmu_event_read(event)); 693f4e9894bSChris Wilson 694f4e9894bSChris Wilson intel_runtime_pm_put(&i915->runtime_pm, wakeref); 695b46a33e2STvrtko Ursulin } 696b46a33e2STvrtko Ursulin 697b46a33e2STvrtko Ursulin static void i915_pmu_disable(struct perf_event *event) 698b46a33e2STvrtko Ursulin { 699b46a33e2STvrtko Ursulin struct drm_i915_private *i915 = 700b46a33e2STvrtko Ursulin container_of(event->pmu, typeof(*i915), pmu.base); 701b46a33e2STvrtko Ursulin unsigned int bit = event_enabled_bit(event); 702908091c8STvrtko Ursulin struct i915_pmu *pmu = &i915->pmu; 703b46a33e2STvrtko Ursulin unsigned long flags; 704b46a33e2STvrtko Ursulin 705908091c8STvrtko Ursulin spin_lock_irqsave(&pmu->lock, flags); 706b46a33e2STvrtko Ursulin 707b46a33e2STvrtko Ursulin if (is_engine_event(event)) { 708b46a33e2STvrtko Ursulin u8 sample = engine_event_sample(event); 709b46a33e2STvrtko Ursulin struct intel_engine_cs *engine; 710b46a33e2STvrtko Ursulin 711b46a33e2STvrtko Ursulin engine = intel_engine_lookup_user(i915, 712b46a33e2STvrtko Ursulin engine_event_class(event), 713b46a33e2STvrtko Ursulin engine_event_instance(event)); 71426a11deeSTvrtko Ursulin 71526a11deeSTvrtko Ursulin GEM_BUG_ON(sample >= ARRAY_SIZE(engine->pmu.enable_count)); 71626a11deeSTvrtko Ursulin GEM_BUG_ON(sample >= ARRAY_SIZE(engine->pmu.sample)); 717b46a33e2STvrtko Ursulin GEM_BUG_ON(engine->pmu.enable_count[sample] == 0); 71826a11deeSTvrtko Ursulin 719b46a33e2STvrtko Ursulin /* 720b46a33e2STvrtko Ursulin * Decrement the reference count and clear the enabled 721b46a33e2STvrtko Ursulin * bitmask when the last listener on an event goes away. 722b46a33e2STvrtko Ursulin */ 723b2f78cdaSTvrtko Ursulin if (--engine->pmu.enable_count[sample] == 0) 724b46a33e2STvrtko Ursulin engine->pmu.enable &= ~BIT(sample); 725b46a33e2STvrtko Ursulin } 726b46a33e2STvrtko Ursulin 727908091c8STvrtko Ursulin GEM_BUG_ON(bit >= ARRAY_SIZE(pmu->enable_count)); 728908091c8STvrtko Ursulin GEM_BUG_ON(pmu->enable_count[bit] == 0); 729b46a33e2STvrtko Ursulin /* 730b46a33e2STvrtko Ursulin * Decrement the reference count and clear the enabled 731b46a33e2STvrtko Ursulin * bitmask when the last listener on an event goes away. 732b46a33e2STvrtko Ursulin */ 733908091c8STvrtko Ursulin if (--pmu->enable_count[bit] == 0) { 734908091c8STvrtko Ursulin pmu->enable &= ~BIT_ULL(bit); 735908091c8STvrtko Ursulin pmu->timer_enabled &= pmu_needs_timer(pmu, true); 736feff0dc6STvrtko Ursulin } 737b46a33e2STvrtko Ursulin 738908091c8STvrtko Ursulin spin_unlock_irqrestore(&pmu->lock, flags); 739b46a33e2STvrtko Ursulin } 740b46a33e2STvrtko Ursulin 741b46a33e2STvrtko Ursulin static void i915_pmu_event_start(struct perf_event *event, int flags) 742b46a33e2STvrtko Ursulin { 743b00bccb3STvrtko Ursulin struct drm_i915_private *i915 = 744b00bccb3STvrtko Ursulin container_of(event->pmu, typeof(*i915), pmu.base); 745b00bccb3STvrtko Ursulin struct i915_pmu *pmu = &i915->pmu; 746b00bccb3STvrtko Ursulin 747b00bccb3STvrtko Ursulin if (pmu->closed) 748b00bccb3STvrtko Ursulin return; 749b00bccb3STvrtko Ursulin 750b46a33e2STvrtko Ursulin i915_pmu_enable(event); 751b46a33e2STvrtko Ursulin event->hw.state = 0; 752b46a33e2STvrtko Ursulin } 753b46a33e2STvrtko Ursulin 754b46a33e2STvrtko Ursulin static void i915_pmu_event_stop(struct perf_event *event, int flags) 755b46a33e2STvrtko Ursulin { 756b46a33e2STvrtko Ursulin if (flags & PERF_EF_UPDATE) 757b46a33e2STvrtko Ursulin i915_pmu_event_read(event); 758b46a33e2STvrtko Ursulin i915_pmu_disable(event); 759b46a33e2STvrtko Ursulin event->hw.state = PERF_HES_STOPPED; 760b46a33e2STvrtko Ursulin } 761b46a33e2STvrtko Ursulin 762b46a33e2STvrtko Ursulin static int i915_pmu_event_add(struct perf_event *event, int flags) 763b46a33e2STvrtko Ursulin { 764b00bccb3STvrtko Ursulin struct drm_i915_private *i915 = 765b00bccb3STvrtko Ursulin container_of(event->pmu, typeof(*i915), pmu.base); 766b00bccb3STvrtko Ursulin struct i915_pmu *pmu = &i915->pmu; 767b00bccb3STvrtko Ursulin 768b00bccb3STvrtko Ursulin if (pmu->closed) 769b00bccb3STvrtko Ursulin return -ENODEV; 770b00bccb3STvrtko Ursulin 771b46a33e2STvrtko Ursulin if (flags & PERF_EF_START) 772b46a33e2STvrtko Ursulin i915_pmu_event_start(event, flags); 773b46a33e2STvrtko Ursulin 774b46a33e2STvrtko Ursulin return 0; 775b46a33e2STvrtko Ursulin } 776b46a33e2STvrtko Ursulin 777b46a33e2STvrtko Ursulin static void i915_pmu_event_del(struct perf_event *event, int flags) 778b46a33e2STvrtko Ursulin { 779b46a33e2STvrtko Ursulin i915_pmu_event_stop(event, PERF_EF_UPDATE); 780b46a33e2STvrtko Ursulin } 781b46a33e2STvrtko Ursulin 782b46a33e2STvrtko Ursulin static int i915_pmu_event_event_idx(struct perf_event *event) 783b46a33e2STvrtko Ursulin { 784b46a33e2STvrtko Ursulin return 0; 785b46a33e2STvrtko Ursulin } 786b46a33e2STvrtko Ursulin 787b7d3aabfSChris Wilson struct i915_str_attribute { 788b7d3aabfSChris Wilson struct device_attribute attr; 789b7d3aabfSChris Wilson const char *str; 790b7d3aabfSChris Wilson }; 791b7d3aabfSChris Wilson 792b46a33e2STvrtko Ursulin static ssize_t i915_pmu_format_show(struct device *dev, 793b46a33e2STvrtko Ursulin struct device_attribute *attr, char *buf) 794b46a33e2STvrtko Ursulin { 795b7d3aabfSChris Wilson struct i915_str_attribute *eattr; 796b46a33e2STvrtko Ursulin 797b7d3aabfSChris Wilson eattr = container_of(attr, struct i915_str_attribute, attr); 798b7d3aabfSChris Wilson return sprintf(buf, "%s\n", eattr->str); 799b46a33e2STvrtko Ursulin } 800b46a33e2STvrtko Ursulin 801b46a33e2STvrtko Ursulin #define I915_PMU_FORMAT_ATTR(_name, _config) \ 802b7d3aabfSChris Wilson (&((struct i915_str_attribute[]) { \ 803b46a33e2STvrtko Ursulin { .attr = __ATTR(_name, 0444, i915_pmu_format_show, NULL), \ 804b7d3aabfSChris Wilson .str = _config, } \ 805b46a33e2STvrtko Ursulin })[0].attr.attr) 806b46a33e2STvrtko Ursulin 807b46a33e2STvrtko Ursulin static struct attribute *i915_pmu_format_attrs[] = { 808b46a33e2STvrtko Ursulin I915_PMU_FORMAT_ATTR(i915_eventid, "config:0-20"), 809b46a33e2STvrtko Ursulin NULL, 810b46a33e2STvrtko Ursulin }; 811b46a33e2STvrtko Ursulin 812b46a33e2STvrtko Ursulin static const struct attribute_group i915_pmu_format_attr_group = { 813b46a33e2STvrtko Ursulin .name = "format", 814b46a33e2STvrtko Ursulin .attrs = i915_pmu_format_attrs, 815b46a33e2STvrtko Ursulin }; 816b46a33e2STvrtko Ursulin 817b7d3aabfSChris Wilson struct i915_ext_attribute { 818b7d3aabfSChris Wilson struct device_attribute attr; 819b7d3aabfSChris Wilson unsigned long val; 820b7d3aabfSChris Wilson }; 821b7d3aabfSChris Wilson 822b46a33e2STvrtko Ursulin static ssize_t i915_pmu_event_show(struct device *dev, 823b46a33e2STvrtko Ursulin struct device_attribute *attr, char *buf) 824b46a33e2STvrtko Ursulin { 825b7d3aabfSChris Wilson struct i915_ext_attribute *eattr; 826b46a33e2STvrtko Ursulin 827b7d3aabfSChris Wilson eattr = container_of(attr, struct i915_ext_attribute, attr); 828b7d3aabfSChris Wilson return sprintf(buf, "config=0x%lx\n", eattr->val); 829b46a33e2STvrtko Ursulin } 830b46a33e2STvrtko Ursulin 831b46a33e2STvrtko Ursulin static ssize_t 832b46a33e2STvrtko Ursulin i915_pmu_get_attr_cpumask(struct device *dev, 833b46a33e2STvrtko Ursulin struct device_attribute *attr, 834b46a33e2STvrtko Ursulin char *buf) 835b46a33e2STvrtko Ursulin { 836b46a33e2STvrtko Ursulin return cpumap_print_to_pagebuf(true, buf, &i915_pmu_cpumask); 837b46a33e2STvrtko Ursulin } 838b46a33e2STvrtko Ursulin 839b46a33e2STvrtko Ursulin static DEVICE_ATTR(cpumask, 0444, i915_pmu_get_attr_cpumask, NULL); 840b46a33e2STvrtko Ursulin 841b46a33e2STvrtko Ursulin static struct attribute *i915_cpumask_attrs[] = { 842b46a33e2STvrtko Ursulin &dev_attr_cpumask.attr, 843b46a33e2STvrtko Ursulin NULL, 844b46a33e2STvrtko Ursulin }; 845b46a33e2STvrtko Ursulin 846109ec558STvrtko Ursulin static const struct attribute_group i915_pmu_cpumask_attr_group = { 847b46a33e2STvrtko Ursulin .attrs = i915_cpumask_attrs, 848b46a33e2STvrtko Ursulin }; 849b46a33e2STvrtko Ursulin 850109ec558STvrtko Ursulin #define __event(__config, __name, __unit) \ 851109ec558STvrtko Ursulin { \ 852109ec558STvrtko Ursulin .config = (__config), \ 853109ec558STvrtko Ursulin .name = (__name), \ 854109ec558STvrtko Ursulin .unit = (__unit), \ 855109ec558STvrtko Ursulin } 856109ec558STvrtko Ursulin 857109ec558STvrtko Ursulin #define __engine_event(__sample, __name) \ 858109ec558STvrtko Ursulin { \ 859109ec558STvrtko Ursulin .sample = (__sample), \ 860109ec558STvrtko Ursulin .name = (__name), \ 861109ec558STvrtko Ursulin } 862109ec558STvrtko Ursulin 863109ec558STvrtko Ursulin static struct i915_ext_attribute * 864109ec558STvrtko Ursulin add_i915_attr(struct i915_ext_attribute *attr, const char *name, u64 config) 865109ec558STvrtko Ursulin { 8662bbba4e9SChris Wilson sysfs_attr_init(&attr->attr.attr); 867109ec558STvrtko Ursulin attr->attr.attr.name = name; 868109ec558STvrtko Ursulin attr->attr.attr.mode = 0444; 869109ec558STvrtko Ursulin attr->attr.show = i915_pmu_event_show; 870109ec558STvrtko Ursulin attr->val = config; 871109ec558STvrtko Ursulin 872109ec558STvrtko Ursulin return ++attr; 873109ec558STvrtko Ursulin } 874109ec558STvrtko Ursulin 875109ec558STvrtko Ursulin static struct perf_pmu_events_attr * 876109ec558STvrtko Ursulin add_pmu_attr(struct perf_pmu_events_attr *attr, const char *name, 877109ec558STvrtko Ursulin const char *str) 878109ec558STvrtko Ursulin { 8792bbba4e9SChris Wilson sysfs_attr_init(&attr->attr.attr); 880109ec558STvrtko Ursulin attr->attr.attr.name = name; 881109ec558STvrtko Ursulin attr->attr.attr.mode = 0444; 882109ec558STvrtko Ursulin attr->attr.show = perf_event_sysfs_show; 883109ec558STvrtko Ursulin attr->event_str = str; 884109ec558STvrtko Ursulin 885109ec558STvrtko Ursulin return ++attr; 886109ec558STvrtko Ursulin } 887109ec558STvrtko Ursulin 888109ec558STvrtko Ursulin static struct attribute ** 889908091c8STvrtko Ursulin create_event_attributes(struct i915_pmu *pmu) 890109ec558STvrtko Ursulin { 891908091c8STvrtko Ursulin struct drm_i915_private *i915 = container_of(pmu, typeof(*i915), pmu); 892109ec558STvrtko Ursulin static const struct { 893109ec558STvrtko Ursulin u64 config; 894109ec558STvrtko Ursulin const char *name; 895109ec558STvrtko Ursulin const char *unit; 896109ec558STvrtko Ursulin } events[] = { 897e88866efSChris Wilson __event(I915_PMU_ACTUAL_FREQUENCY, "actual-frequency", "M"), 898e88866efSChris Wilson __event(I915_PMU_REQUESTED_FREQUENCY, "requested-frequency", "M"), 899109ec558STvrtko Ursulin __event(I915_PMU_INTERRUPTS, "interrupts", NULL), 900109ec558STvrtko Ursulin __event(I915_PMU_RC6_RESIDENCY, "rc6-residency", "ns"), 901109ec558STvrtko Ursulin }; 902109ec558STvrtko Ursulin static const struct { 903109ec558STvrtko Ursulin enum drm_i915_pmu_engine_sample sample; 904109ec558STvrtko Ursulin char *name; 905109ec558STvrtko Ursulin } engine_events[] = { 906109ec558STvrtko Ursulin __engine_event(I915_SAMPLE_BUSY, "busy"), 907109ec558STvrtko Ursulin __engine_event(I915_SAMPLE_SEMA, "sema"), 908109ec558STvrtko Ursulin __engine_event(I915_SAMPLE_WAIT, "wait"), 909109ec558STvrtko Ursulin }; 910109ec558STvrtko Ursulin unsigned int count = 0; 911109ec558STvrtko Ursulin struct perf_pmu_events_attr *pmu_attr = NULL, *pmu_iter; 912109ec558STvrtko Ursulin struct i915_ext_attribute *i915_attr = NULL, *i915_iter; 913109ec558STvrtko Ursulin struct attribute **attr = NULL, **attr_iter; 914109ec558STvrtko Ursulin struct intel_engine_cs *engine; 915109ec558STvrtko Ursulin unsigned int i; 916109ec558STvrtko Ursulin 917109ec558STvrtko Ursulin /* Count how many counters we will be exposing. */ 918109ec558STvrtko Ursulin for (i = 0; i < ARRAY_SIZE(events); i++) { 919109ec558STvrtko Ursulin if (!config_status(i915, events[i].config)) 920109ec558STvrtko Ursulin count++; 921109ec558STvrtko Ursulin } 922109ec558STvrtko Ursulin 923750e76b4SChris Wilson for_each_uabi_engine(engine, i915) { 924109ec558STvrtko Ursulin for (i = 0; i < ARRAY_SIZE(engine_events); i++) { 925109ec558STvrtko Ursulin if (!engine_event_status(engine, 926109ec558STvrtko Ursulin engine_events[i].sample)) 927109ec558STvrtko Ursulin count++; 928109ec558STvrtko Ursulin } 929109ec558STvrtko Ursulin } 930109ec558STvrtko Ursulin 931109ec558STvrtko Ursulin /* Allocate attribute objects and table. */ 932dd5fec87STvrtko Ursulin i915_attr = kcalloc(count, sizeof(*i915_attr), GFP_KERNEL); 933109ec558STvrtko Ursulin if (!i915_attr) 934109ec558STvrtko Ursulin goto err_alloc; 935109ec558STvrtko Ursulin 936dd5fec87STvrtko Ursulin pmu_attr = kcalloc(count, sizeof(*pmu_attr), GFP_KERNEL); 937109ec558STvrtko Ursulin if (!pmu_attr) 938109ec558STvrtko Ursulin goto err_alloc; 939109ec558STvrtko Ursulin 940109ec558STvrtko Ursulin /* Max one pointer of each attribute type plus a termination entry. */ 941dd5fec87STvrtko Ursulin attr = kcalloc(count * 2 + 1, sizeof(*attr), GFP_KERNEL); 942109ec558STvrtko Ursulin if (!attr) 943109ec558STvrtko Ursulin goto err_alloc; 944109ec558STvrtko Ursulin 945109ec558STvrtko Ursulin i915_iter = i915_attr; 946109ec558STvrtko Ursulin pmu_iter = pmu_attr; 947109ec558STvrtko Ursulin attr_iter = attr; 948109ec558STvrtko Ursulin 949109ec558STvrtko Ursulin /* Initialize supported non-engine counters. */ 950109ec558STvrtko Ursulin for (i = 0; i < ARRAY_SIZE(events); i++) { 951109ec558STvrtko Ursulin char *str; 952109ec558STvrtko Ursulin 953109ec558STvrtko Ursulin if (config_status(i915, events[i].config)) 954109ec558STvrtko Ursulin continue; 955109ec558STvrtko Ursulin 956109ec558STvrtko Ursulin str = kstrdup(events[i].name, GFP_KERNEL); 957109ec558STvrtko Ursulin if (!str) 958109ec558STvrtko Ursulin goto err; 959109ec558STvrtko Ursulin 960109ec558STvrtko Ursulin *attr_iter++ = &i915_iter->attr.attr; 961109ec558STvrtko Ursulin i915_iter = add_i915_attr(i915_iter, str, events[i].config); 962109ec558STvrtko Ursulin 963109ec558STvrtko Ursulin if (events[i].unit) { 964109ec558STvrtko Ursulin str = kasprintf(GFP_KERNEL, "%s.unit", events[i].name); 965109ec558STvrtko Ursulin if (!str) 966109ec558STvrtko Ursulin goto err; 967109ec558STvrtko Ursulin 968109ec558STvrtko Ursulin *attr_iter++ = &pmu_iter->attr.attr; 969109ec558STvrtko Ursulin pmu_iter = add_pmu_attr(pmu_iter, str, events[i].unit); 970109ec558STvrtko Ursulin } 971109ec558STvrtko Ursulin } 972109ec558STvrtko Ursulin 973109ec558STvrtko Ursulin /* Initialize supported engine counters. */ 974750e76b4SChris Wilson for_each_uabi_engine(engine, i915) { 975109ec558STvrtko Ursulin for (i = 0; i < ARRAY_SIZE(engine_events); i++) { 976109ec558STvrtko Ursulin char *str; 977109ec558STvrtko Ursulin 978109ec558STvrtko Ursulin if (engine_event_status(engine, 979109ec558STvrtko Ursulin engine_events[i].sample)) 980109ec558STvrtko Ursulin continue; 981109ec558STvrtko Ursulin 982109ec558STvrtko Ursulin str = kasprintf(GFP_KERNEL, "%s-%s", 983109ec558STvrtko Ursulin engine->name, engine_events[i].name); 984109ec558STvrtko Ursulin if (!str) 985109ec558STvrtko Ursulin goto err; 986109ec558STvrtko Ursulin 987109ec558STvrtko Ursulin *attr_iter++ = &i915_iter->attr.attr; 988109ec558STvrtko Ursulin i915_iter = 989109ec558STvrtko Ursulin add_i915_attr(i915_iter, str, 9908810bc56STvrtko Ursulin __I915_PMU_ENGINE(engine->uabi_class, 991750e76b4SChris Wilson engine->uabi_instance, 992109ec558STvrtko Ursulin engine_events[i].sample)); 993109ec558STvrtko Ursulin 994109ec558STvrtko Ursulin str = kasprintf(GFP_KERNEL, "%s-%s.unit", 995109ec558STvrtko Ursulin engine->name, engine_events[i].name); 996109ec558STvrtko Ursulin if (!str) 997109ec558STvrtko Ursulin goto err; 998109ec558STvrtko Ursulin 999109ec558STvrtko Ursulin *attr_iter++ = &pmu_iter->attr.attr; 1000109ec558STvrtko Ursulin pmu_iter = add_pmu_attr(pmu_iter, str, "ns"); 1001109ec558STvrtko Ursulin } 1002109ec558STvrtko Ursulin } 1003109ec558STvrtko Ursulin 1004908091c8STvrtko Ursulin pmu->i915_attr = i915_attr; 1005908091c8STvrtko Ursulin pmu->pmu_attr = pmu_attr; 1006109ec558STvrtko Ursulin 1007109ec558STvrtko Ursulin return attr; 1008109ec558STvrtko Ursulin 1009109ec558STvrtko Ursulin err:; 1010109ec558STvrtko Ursulin for (attr_iter = attr; *attr_iter; attr_iter++) 1011109ec558STvrtko Ursulin kfree((*attr_iter)->name); 1012109ec558STvrtko Ursulin 1013109ec558STvrtko Ursulin err_alloc: 1014109ec558STvrtko Ursulin kfree(attr); 1015109ec558STvrtko Ursulin kfree(i915_attr); 1016109ec558STvrtko Ursulin kfree(pmu_attr); 1017109ec558STvrtko Ursulin 1018109ec558STvrtko Ursulin return NULL; 1019109ec558STvrtko Ursulin } 1020109ec558STvrtko Ursulin 1021908091c8STvrtko Ursulin static void free_event_attributes(struct i915_pmu *pmu) 1022109ec558STvrtko Ursulin { 102346129dc1SMichał Winiarski struct attribute **attr_iter = pmu->events_attr_group.attrs; 1024109ec558STvrtko Ursulin 1025109ec558STvrtko Ursulin for (; *attr_iter; attr_iter++) 1026109ec558STvrtko Ursulin kfree((*attr_iter)->name); 1027109ec558STvrtko Ursulin 102846129dc1SMichał Winiarski kfree(pmu->events_attr_group.attrs); 1029908091c8STvrtko Ursulin kfree(pmu->i915_attr); 1030908091c8STvrtko Ursulin kfree(pmu->pmu_attr); 1031109ec558STvrtko Ursulin 103246129dc1SMichał Winiarski pmu->events_attr_group.attrs = NULL; 1033908091c8STvrtko Ursulin pmu->i915_attr = NULL; 1034908091c8STvrtko Ursulin pmu->pmu_attr = NULL; 1035109ec558STvrtko Ursulin } 1036109ec558STvrtko Ursulin 1037b46a33e2STvrtko Ursulin static int i915_pmu_cpu_online(unsigned int cpu, struct hlist_node *node) 1038b46a33e2STvrtko Ursulin { 1039f5a179d4SMichał Winiarski struct i915_pmu *pmu = hlist_entry_safe(node, typeof(*pmu), cpuhp.node); 1040b46a33e2STvrtko Ursulin 1041b46a33e2STvrtko Ursulin GEM_BUG_ON(!pmu->base.event_init); 1042b46a33e2STvrtko Ursulin 1043b46a33e2STvrtko Ursulin /* Select the first online CPU as a designated reader. */ 10440426c046STvrtko Ursulin if (!cpumask_weight(&i915_pmu_cpumask)) 1045b46a33e2STvrtko Ursulin cpumask_set_cpu(cpu, &i915_pmu_cpumask); 1046b46a33e2STvrtko Ursulin 1047b46a33e2STvrtko Ursulin return 0; 1048b46a33e2STvrtko Ursulin } 1049b46a33e2STvrtko Ursulin 1050b46a33e2STvrtko Ursulin static int i915_pmu_cpu_offline(unsigned int cpu, struct hlist_node *node) 1051b46a33e2STvrtko Ursulin { 1052f5a179d4SMichał Winiarski struct i915_pmu *pmu = hlist_entry_safe(node, typeof(*pmu), cpuhp.node); 1053*537f9c84STvrtko Ursulin unsigned int target = i915_pmu_target_cpu; 1054b46a33e2STvrtko Ursulin 1055b46a33e2STvrtko Ursulin GEM_BUG_ON(!pmu->base.event_init); 1056b46a33e2STvrtko Ursulin 1057*537f9c84STvrtko Ursulin /* 1058*537f9c84STvrtko Ursulin * Unregistering an instance generates a CPU offline event which we must 1059*537f9c84STvrtko Ursulin * ignore to avoid incorrectly modifying the shared i915_pmu_cpumask. 1060*537f9c84STvrtko Ursulin */ 1061*537f9c84STvrtko Ursulin if (pmu->closed) 1062*537f9c84STvrtko Ursulin return 0; 1063*537f9c84STvrtko Ursulin 1064b46a33e2STvrtko Ursulin if (cpumask_test_and_clear_cpu(cpu, &i915_pmu_cpumask)) { 1065b46a33e2STvrtko Ursulin target = cpumask_any_but(topology_sibling_cpumask(cpu), cpu); 1066*537f9c84STvrtko Ursulin 1067b46a33e2STvrtko Ursulin /* Migrate events if there is a valid target */ 1068b46a33e2STvrtko Ursulin if (target < nr_cpu_ids) { 1069b46a33e2STvrtko Ursulin cpumask_set_cpu(target, &i915_pmu_cpumask); 1070*537f9c84STvrtko Ursulin i915_pmu_target_cpu = target; 1071b46a33e2STvrtko Ursulin } 1072b46a33e2STvrtko Ursulin } 1073b46a33e2STvrtko Ursulin 1074*537f9c84STvrtko Ursulin if (target < nr_cpu_ids && target != pmu->cpuhp.cpu) { 1075*537f9c84STvrtko Ursulin perf_pmu_migrate_context(&pmu->base, cpu, target); 1076*537f9c84STvrtko Ursulin pmu->cpuhp.cpu = target; 1077*537f9c84STvrtko Ursulin } 1078*537f9c84STvrtko Ursulin 1079b46a33e2STvrtko Ursulin return 0; 1080b46a33e2STvrtko Ursulin } 1081b46a33e2STvrtko Ursulin 1082*537f9c84STvrtko Ursulin static enum cpuhp_state cpuhp_slot = CPUHP_INVALID; 1083*537f9c84STvrtko Ursulin 1084*537f9c84STvrtko Ursulin void i915_pmu_init(void) 1085b46a33e2STvrtko Ursulin { 1086b46a33e2STvrtko Ursulin int ret; 1087b46a33e2STvrtko Ursulin 1088b46a33e2STvrtko Ursulin ret = cpuhp_setup_state_multi(CPUHP_AP_ONLINE_DYN, 1089b46a33e2STvrtko Ursulin "perf/x86/intel/i915:online", 1090b46a33e2STvrtko Ursulin i915_pmu_cpu_online, 1091b46a33e2STvrtko Ursulin i915_pmu_cpu_offline); 1092b46a33e2STvrtko Ursulin if (ret < 0) 1093*537f9c84STvrtko Ursulin pr_notice("Failed to setup cpuhp state for i915 PMU! (%d)\n", 1094*537f9c84STvrtko Ursulin ret); 1095*537f9c84STvrtko Ursulin else 1096*537f9c84STvrtko Ursulin cpuhp_slot = ret; 1097b46a33e2STvrtko Ursulin } 1098b46a33e2STvrtko Ursulin 1099*537f9c84STvrtko Ursulin void i915_pmu_exit(void) 1100*537f9c84STvrtko Ursulin { 1101*537f9c84STvrtko Ursulin if (cpuhp_slot != CPUHP_INVALID) 1102*537f9c84STvrtko Ursulin cpuhp_remove_multi_state(cpuhp_slot); 1103*537f9c84STvrtko Ursulin } 1104*537f9c84STvrtko Ursulin 1105*537f9c84STvrtko Ursulin static int i915_pmu_register_cpuhp_state(struct i915_pmu *pmu) 1106*537f9c84STvrtko Ursulin { 1107*537f9c84STvrtko Ursulin if (cpuhp_slot == CPUHP_INVALID) 1108*537f9c84STvrtko Ursulin return -EINVAL; 1109*537f9c84STvrtko Ursulin 1110*537f9c84STvrtko Ursulin return cpuhp_state_add_instance(cpuhp_slot, &pmu->cpuhp.node); 1111b46a33e2STvrtko Ursulin } 1112b46a33e2STvrtko Ursulin 1113908091c8STvrtko Ursulin static void i915_pmu_unregister_cpuhp_state(struct i915_pmu *pmu) 1114b46a33e2STvrtko Ursulin { 1115*537f9c84STvrtko Ursulin cpuhp_state_remove_instance(cpuhp_slot, &pmu->cpuhp.node); 1116b46a33e2STvrtko Ursulin } 1117b46a33e2STvrtko Ursulin 111805488673STvrtko Ursulin static bool is_igp(struct drm_i915_private *i915) 111905488673STvrtko Ursulin { 112005488673STvrtko Ursulin struct pci_dev *pdev = i915->drm.pdev; 112105488673STvrtko Ursulin 112205488673STvrtko Ursulin /* IGP is 0000:00:02.0 */ 112305488673STvrtko Ursulin return pci_domain_nr(pdev->bus) == 0 && 112405488673STvrtko Ursulin pdev->bus->number == 0 && 112505488673STvrtko Ursulin PCI_SLOT(pdev->devfn) == 2 && 112605488673STvrtko Ursulin PCI_FUNC(pdev->devfn) == 0; 112705488673STvrtko Ursulin } 112805488673STvrtko Ursulin 1129b46a33e2STvrtko Ursulin void i915_pmu_register(struct drm_i915_private *i915) 1130b46a33e2STvrtko Ursulin { 1131908091c8STvrtko Ursulin struct i915_pmu *pmu = &i915->pmu; 113246129dc1SMichał Winiarski const struct attribute_group *attr_groups[] = { 113346129dc1SMichał Winiarski &i915_pmu_format_attr_group, 113446129dc1SMichał Winiarski &pmu->events_attr_group, 113546129dc1SMichał Winiarski &i915_pmu_cpumask_attr_group, 113646129dc1SMichał Winiarski NULL 113746129dc1SMichał Winiarski }; 113846129dc1SMichał Winiarski 1139fb26eee0STvrtko Ursulin int ret = -ENOMEM; 1140b46a33e2STvrtko Ursulin 1141b46a33e2STvrtko Ursulin if (INTEL_GEN(i915) <= 2) { 11421900aba5SJani Nikula drm_info(&i915->drm, "PMU not supported for this GPU."); 1143b46a33e2STvrtko Ursulin return; 1144b46a33e2STvrtko Ursulin } 1145b46a33e2STvrtko Ursulin 1146908091c8STvrtko Ursulin spin_lock_init(&pmu->lock); 1147908091c8STvrtko Ursulin hrtimer_init(&pmu->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); 1148908091c8STvrtko Ursulin pmu->timer.function = i915_sample; 1149*537f9c84STvrtko Ursulin pmu->cpuhp.cpu = -1; 1150b46a33e2STvrtko Ursulin 1151aebf3b52STvrtko Ursulin if (!is_igp(i915)) { 115205488673STvrtko Ursulin pmu->name = kasprintf(GFP_KERNEL, 1153aebf3b52STvrtko Ursulin "i915_%s", 115405488673STvrtko Ursulin dev_name(i915->drm.dev)); 1155aebf3b52STvrtko Ursulin if (pmu->name) { 1156aebf3b52STvrtko Ursulin /* tools/perf reserves colons as special. */ 1157aebf3b52STvrtko Ursulin strreplace((char *)pmu->name, ':', '_'); 1158aebf3b52STvrtko Ursulin } 1159aebf3b52STvrtko Ursulin } else { 116005488673STvrtko Ursulin pmu->name = "i915"; 1161aebf3b52STvrtko Ursulin } 116205488673STvrtko Ursulin if (!pmu->name) 1163b46a33e2STvrtko Ursulin goto err; 1164b46a33e2STvrtko Ursulin 116546129dc1SMichał Winiarski pmu->events_attr_group.name = "events"; 116646129dc1SMichał Winiarski pmu->events_attr_group.attrs = create_event_attributes(pmu); 116746129dc1SMichał Winiarski if (!pmu->events_attr_group.attrs) 1168c442292aSChris Wilson goto err_name; 1169c442292aSChris Wilson 117046129dc1SMichał Winiarski pmu->base.attr_groups = kmemdup(attr_groups, sizeof(attr_groups), 117146129dc1SMichał Winiarski GFP_KERNEL); 117246129dc1SMichał Winiarski if (!pmu->base.attr_groups) 117346129dc1SMichał Winiarski goto err_attr; 117446129dc1SMichał Winiarski 1175df3ab3cbSChris Wilson pmu->base.module = THIS_MODULE; 1176c442292aSChris Wilson pmu->base.task_ctx_nr = perf_invalid_context; 1177c442292aSChris Wilson pmu->base.event_init = i915_pmu_event_init; 1178c442292aSChris Wilson pmu->base.add = i915_pmu_event_add; 1179c442292aSChris Wilson pmu->base.del = i915_pmu_event_del; 1180c442292aSChris Wilson pmu->base.start = i915_pmu_event_start; 1181c442292aSChris Wilson pmu->base.stop = i915_pmu_event_stop; 1182c442292aSChris Wilson pmu->base.read = i915_pmu_event_read; 1183c442292aSChris Wilson pmu->base.event_idx = i915_pmu_event_event_idx; 1184c442292aSChris Wilson 118505488673STvrtko Ursulin ret = perf_pmu_register(&pmu->base, pmu->name, -1); 118605488673STvrtko Ursulin if (ret) 118746129dc1SMichał Winiarski goto err_groups; 118805488673STvrtko Ursulin 1189908091c8STvrtko Ursulin ret = i915_pmu_register_cpuhp_state(pmu); 1190b46a33e2STvrtko Ursulin if (ret) 1191b46a33e2STvrtko Ursulin goto err_unreg; 1192b46a33e2STvrtko Ursulin 1193b46a33e2STvrtko Ursulin return; 1194b46a33e2STvrtko Ursulin 1195b46a33e2STvrtko Ursulin err_unreg: 1196908091c8STvrtko Ursulin perf_pmu_unregister(&pmu->base); 119746129dc1SMichał Winiarski err_groups: 119846129dc1SMichał Winiarski kfree(pmu->base.attr_groups); 1199c442292aSChris Wilson err_attr: 1200c442292aSChris Wilson pmu->base.event_init = NULL; 1201c442292aSChris Wilson free_event_attributes(pmu); 120205488673STvrtko Ursulin err_name: 120305488673STvrtko Ursulin if (!is_igp(i915)) 120405488673STvrtko Ursulin kfree(pmu->name); 1205b46a33e2STvrtko Ursulin err: 12061900aba5SJani Nikula drm_notice(&i915->drm, "Failed to register PMU!\n"); 1207b46a33e2STvrtko Ursulin } 1208b46a33e2STvrtko Ursulin 1209b46a33e2STvrtko Ursulin void i915_pmu_unregister(struct drm_i915_private *i915) 1210b46a33e2STvrtko Ursulin { 1211908091c8STvrtko Ursulin struct i915_pmu *pmu = &i915->pmu; 1212908091c8STvrtko Ursulin 1213908091c8STvrtko Ursulin if (!pmu->base.event_init) 1214b46a33e2STvrtko Ursulin return; 1215b46a33e2STvrtko Ursulin 1216b00bccb3STvrtko Ursulin /* 1217b00bccb3STvrtko Ursulin * "Disconnect" the PMU callbacks - since all are atomic synchronize_rcu 1218b00bccb3STvrtko Ursulin * ensures all currently executing ones will have exited before we 1219b00bccb3STvrtko Ursulin * proceed with unregistration. 1220b00bccb3STvrtko Ursulin */ 1221b00bccb3STvrtko Ursulin pmu->closed = true; 1222b00bccb3STvrtko Ursulin synchronize_rcu(); 1223b46a33e2STvrtko Ursulin 1224908091c8STvrtko Ursulin hrtimer_cancel(&pmu->timer); 1225b46a33e2STvrtko Ursulin 1226908091c8STvrtko Ursulin i915_pmu_unregister_cpuhp_state(pmu); 1227b46a33e2STvrtko Ursulin 1228908091c8STvrtko Ursulin perf_pmu_unregister(&pmu->base); 1229908091c8STvrtko Ursulin pmu->base.event_init = NULL; 123046129dc1SMichał Winiarski kfree(pmu->base.attr_groups); 123105488673STvrtko Ursulin if (!is_igp(i915)) 123205488673STvrtko Ursulin kfree(pmu->name); 1233908091c8STvrtko Ursulin free_event_attributes(pmu); 1234b46a33e2STvrtko Ursulin } 1235