1b46a33e2STvrtko Ursulin /* 2058a9b43SMichal Wajdeczko * SPDX-License-Identifier: MIT 3b46a33e2STvrtko Ursulin * 4058a9b43SMichal Wajdeczko * Copyright © 2017-2018 Intel Corporation 5b46a33e2STvrtko Ursulin */ 6b46a33e2STvrtko Ursulin 7447ae316SNicolai Stange #include <linux/irq.h> 83b4ed2e2SVincent Guittot #include <linux/pm_runtime.h> 9112ed2d3SChris Wilson 10112ed2d3SChris Wilson #include "gt/intel_engine.h" 1151fbd8deSChris Wilson #include "gt/intel_engine_pm.h" 12750e76b4SChris Wilson #include "gt/intel_engine_user.h" 1351fbd8deSChris Wilson #include "gt/intel_gt_pm.h" 14c1132367SAndi Shyti #include "gt/intel_rc6.h" 153e7abf81SAndi Shyti #include "gt/intel_rps.h" 16112ed2d3SChris Wilson 17058a9b43SMichal Wajdeczko #include "i915_drv.h" 18ecbb5fb7SJani Nikula #include "i915_pmu.h" 19ecbb5fb7SJani Nikula #include "intel_pm.h" 20b46a33e2STvrtko Ursulin 21b46a33e2STvrtko Ursulin /* Frequency for the sampling timer for events which need it. */ 22b46a33e2STvrtko Ursulin #define FREQUENCY 200 23b46a33e2STvrtko Ursulin #define PERIOD max_t(u64, 10000, NSEC_PER_SEC / FREQUENCY) 24b46a33e2STvrtko Ursulin 25b46a33e2STvrtko Ursulin #define ENGINE_SAMPLE_MASK \ 26b46a33e2STvrtko Ursulin (BIT(I915_SAMPLE_BUSY) | \ 27b46a33e2STvrtko Ursulin BIT(I915_SAMPLE_WAIT) | \ 28b46a33e2STvrtko Ursulin BIT(I915_SAMPLE_SEMA)) 29b46a33e2STvrtko Ursulin 30141a0895SChris Wilson static cpumask_t i915_pmu_cpumask; 31537f9c84STvrtko Ursulin static unsigned int i915_pmu_target_cpu = -1; 32b46a33e2STvrtko Ursulin 33b46a33e2STvrtko Ursulin static u8 engine_config_sample(u64 config) 34b46a33e2STvrtko Ursulin { 35b46a33e2STvrtko Ursulin return config & I915_PMU_SAMPLE_MASK; 36b46a33e2STvrtko Ursulin } 37b46a33e2STvrtko Ursulin 38b46a33e2STvrtko Ursulin static u8 engine_event_sample(struct perf_event *event) 39b46a33e2STvrtko Ursulin { 40b46a33e2STvrtko Ursulin return engine_config_sample(event->attr.config); 41b46a33e2STvrtko Ursulin } 42b46a33e2STvrtko Ursulin 43b46a33e2STvrtko Ursulin static u8 engine_event_class(struct perf_event *event) 44b46a33e2STvrtko Ursulin { 45b46a33e2STvrtko Ursulin return (event->attr.config >> I915_PMU_CLASS_SHIFT) & 0xff; 46b46a33e2STvrtko Ursulin } 47b46a33e2STvrtko Ursulin 48b46a33e2STvrtko Ursulin static u8 engine_event_instance(struct perf_event *event) 49b46a33e2STvrtko Ursulin { 50b46a33e2STvrtko Ursulin return (event->attr.config >> I915_PMU_SAMPLE_BITS) & 0xff; 51b46a33e2STvrtko Ursulin } 52b46a33e2STvrtko Ursulin 53b46a33e2STvrtko Ursulin static bool is_engine_config(u64 config) 54b46a33e2STvrtko Ursulin { 55b46a33e2STvrtko Ursulin return config < __I915_PMU_OTHER(0); 56b46a33e2STvrtko Ursulin } 57b46a33e2STvrtko Ursulin 58*348fb0cbSTvrtko Ursulin static unsigned int other_bit(const u64 config) 59*348fb0cbSTvrtko Ursulin { 60*348fb0cbSTvrtko Ursulin unsigned int val; 61*348fb0cbSTvrtko Ursulin 62*348fb0cbSTvrtko Ursulin switch (config) { 63*348fb0cbSTvrtko Ursulin case I915_PMU_ACTUAL_FREQUENCY: 64*348fb0cbSTvrtko Ursulin val = __I915_PMU_ACTUAL_FREQUENCY_ENABLED; 65*348fb0cbSTvrtko Ursulin break; 66*348fb0cbSTvrtko Ursulin case I915_PMU_REQUESTED_FREQUENCY: 67*348fb0cbSTvrtko Ursulin val = __I915_PMU_REQUESTED_FREQUENCY_ENABLED; 68*348fb0cbSTvrtko Ursulin break; 69*348fb0cbSTvrtko Ursulin case I915_PMU_RC6_RESIDENCY: 70*348fb0cbSTvrtko Ursulin val = __I915_PMU_RC6_RESIDENCY_ENABLED; 71*348fb0cbSTvrtko Ursulin break; 72*348fb0cbSTvrtko Ursulin default: 73*348fb0cbSTvrtko Ursulin /* 74*348fb0cbSTvrtko Ursulin * Events that do not require sampling, or tracking state 75*348fb0cbSTvrtko Ursulin * transitions between enabled and disabled can be ignored. 76*348fb0cbSTvrtko Ursulin */ 77*348fb0cbSTvrtko Ursulin return -1; 78*348fb0cbSTvrtko Ursulin } 79*348fb0cbSTvrtko Ursulin 80*348fb0cbSTvrtko Ursulin return I915_ENGINE_SAMPLE_COUNT + val; 81*348fb0cbSTvrtko Ursulin } 82*348fb0cbSTvrtko Ursulin 83*348fb0cbSTvrtko Ursulin static unsigned int config_bit(const u64 config) 84b46a33e2STvrtko Ursulin { 85b46a33e2STvrtko Ursulin if (is_engine_config(config)) 86b46a33e2STvrtko Ursulin return engine_config_sample(config); 87b46a33e2STvrtko Ursulin else 88*348fb0cbSTvrtko Ursulin return other_bit(config); 89b46a33e2STvrtko Ursulin } 90b46a33e2STvrtko Ursulin 91*348fb0cbSTvrtko Ursulin static u64 config_mask(u64 config) 92b46a33e2STvrtko Ursulin { 93*348fb0cbSTvrtko Ursulin return BIT_ULL(config_bit(config)); 94b46a33e2STvrtko Ursulin } 95b46a33e2STvrtko Ursulin 96b46a33e2STvrtko Ursulin static bool is_engine_event(struct perf_event *event) 97b46a33e2STvrtko Ursulin { 98b46a33e2STvrtko Ursulin return is_engine_config(event->attr.config); 99b46a33e2STvrtko Ursulin } 100b46a33e2STvrtko Ursulin 101*348fb0cbSTvrtko Ursulin static unsigned int event_bit(struct perf_event *event) 102b46a33e2STvrtko Ursulin { 103*348fb0cbSTvrtko Ursulin return config_bit(event->attr.config); 104*348fb0cbSTvrtko Ursulin } 105*348fb0cbSTvrtko Ursulin 106*348fb0cbSTvrtko Ursulin static bool event_read_needs_wakeref(const struct perf_event *event) 107*348fb0cbSTvrtko Ursulin { 108*348fb0cbSTvrtko Ursulin return event->attr.config == I915_PMU_RC6_RESIDENCY; 109b46a33e2STvrtko Ursulin } 110b46a33e2STvrtko Ursulin 111908091c8STvrtko Ursulin static bool pmu_needs_timer(struct i915_pmu *pmu, bool gpu_active) 112feff0dc6STvrtko Ursulin { 113908091c8STvrtko Ursulin struct drm_i915_private *i915 = container_of(pmu, typeof(*i915), pmu); 114*348fb0cbSTvrtko Ursulin u32 enable; 115feff0dc6STvrtko Ursulin 116feff0dc6STvrtko Ursulin /* 117feff0dc6STvrtko Ursulin * Only some counters need the sampling timer. 118feff0dc6STvrtko Ursulin * 119feff0dc6STvrtko Ursulin * We start with a bitmask of all currently enabled events. 120feff0dc6STvrtko Ursulin */ 121908091c8STvrtko Ursulin enable = pmu->enable; 122feff0dc6STvrtko Ursulin 123feff0dc6STvrtko Ursulin /* 124feff0dc6STvrtko Ursulin * Mask out all the ones which do not need the timer, or in 125feff0dc6STvrtko Ursulin * other words keep all the ones that could need the timer. 126feff0dc6STvrtko Ursulin */ 127*348fb0cbSTvrtko Ursulin enable &= config_mask(I915_PMU_ACTUAL_FREQUENCY) | 128*348fb0cbSTvrtko Ursulin config_mask(I915_PMU_REQUESTED_FREQUENCY) | 129feff0dc6STvrtko Ursulin ENGINE_SAMPLE_MASK; 130feff0dc6STvrtko Ursulin 131feff0dc6STvrtko Ursulin /* 132feff0dc6STvrtko Ursulin * When the GPU is idle per-engine counters do not need to be 133feff0dc6STvrtko Ursulin * running so clear those bits out. 134feff0dc6STvrtko Ursulin */ 135feff0dc6STvrtko Ursulin if (!gpu_active) 136feff0dc6STvrtko Ursulin enable &= ~ENGINE_SAMPLE_MASK; 137b3add01eSTvrtko Ursulin /* 138b3add01eSTvrtko Ursulin * Also there is software busyness tracking available we do not 139b3add01eSTvrtko Ursulin * need the timer for I915_SAMPLE_BUSY counter. 140b3add01eSTvrtko Ursulin */ 141bf73fc0fSChris Wilson else if (i915->caps.scheduler & I915_SCHEDULER_CAP_ENGINE_BUSY_STATS) 142b3add01eSTvrtko Ursulin enable &= ~BIT(I915_SAMPLE_BUSY); 143feff0dc6STvrtko Ursulin 144feff0dc6STvrtko Ursulin /* 145feff0dc6STvrtko Ursulin * If some bits remain it means we need the sampling timer running. 146feff0dc6STvrtko Ursulin */ 147feff0dc6STvrtko Ursulin return enable; 148feff0dc6STvrtko Ursulin } 149feff0dc6STvrtko Ursulin 150c1132367SAndi Shyti static u64 __get_rc6(struct intel_gt *gt) 15116ffe73cSChris Wilson { 15216ffe73cSChris Wilson struct drm_i915_private *i915 = gt->i915; 15316ffe73cSChris Wilson u64 val; 15416ffe73cSChris Wilson 155c1132367SAndi Shyti val = intel_rc6_residency_ns(>->rc6, 15616ffe73cSChris Wilson IS_VALLEYVIEW(i915) ? 15716ffe73cSChris Wilson VLV_GT_RENDER_RC6 : 15816ffe73cSChris Wilson GEN6_GT_GFX_RC6); 15916ffe73cSChris Wilson 16016ffe73cSChris Wilson if (HAS_RC6p(i915)) 161c1132367SAndi Shyti val += intel_rc6_residency_ns(>->rc6, GEN6_GT_GFX_RC6p); 16216ffe73cSChris Wilson 16316ffe73cSChris Wilson if (HAS_RC6pp(i915)) 164c1132367SAndi Shyti val += intel_rc6_residency_ns(>->rc6, GEN6_GT_GFX_RC6pp); 16516ffe73cSChris Wilson 16616ffe73cSChris Wilson return val; 16716ffe73cSChris Wilson } 16816ffe73cSChris Wilson 16916ffe73cSChris Wilson #if IS_ENABLED(CONFIG_PM) 17016ffe73cSChris Wilson 17116ffe73cSChris Wilson static inline s64 ktime_since(const ktime_t kt) 17216ffe73cSChris Wilson { 17316ffe73cSChris Wilson return ktime_to_ns(ktime_sub(ktime_get(), kt)); 17416ffe73cSChris Wilson } 17516ffe73cSChris Wilson 176df6a4205STvrtko Ursulin static u64 get_rc6(struct intel_gt *gt) 17716ffe73cSChris Wilson { 178df6a4205STvrtko Ursulin struct drm_i915_private *i915 = gt->i915; 179df6a4205STvrtko Ursulin struct i915_pmu *pmu = &i915->pmu; 180df6a4205STvrtko Ursulin unsigned long flags; 181df6a4205STvrtko Ursulin bool awake = false; 18216ffe73cSChris Wilson u64 val; 18316ffe73cSChris Wilson 184df6a4205STvrtko Ursulin if (intel_gt_pm_get_if_awake(gt)) { 185df6a4205STvrtko Ursulin val = __get_rc6(gt); 186df6a4205STvrtko Ursulin intel_gt_pm_put_async(gt); 187df6a4205STvrtko Ursulin awake = true; 188df6a4205STvrtko Ursulin } 189df6a4205STvrtko Ursulin 190df6a4205STvrtko Ursulin spin_lock_irqsave(&pmu->lock, flags); 191df6a4205STvrtko Ursulin 192df6a4205STvrtko Ursulin if (awake) { 193df6a4205STvrtko Ursulin pmu->sample[__I915_SAMPLE_RC6].cur = val; 194df6a4205STvrtko Ursulin } else { 19516ffe73cSChris Wilson /* 19616ffe73cSChris Wilson * We think we are runtime suspended. 19716ffe73cSChris Wilson * 19816ffe73cSChris Wilson * Report the delta from when the device was suspended to now, 19916ffe73cSChris Wilson * on top of the last known real value, as the approximated RC6 20016ffe73cSChris Wilson * counter value. 20116ffe73cSChris Wilson */ 20216ffe73cSChris Wilson val = ktime_since(pmu->sleep_last); 20316ffe73cSChris Wilson val += pmu->sample[__I915_SAMPLE_RC6].cur; 20416ffe73cSChris Wilson } 20516ffe73cSChris Wilson 206df6a4205STvrtko Ursulin if (val < pmu->sample[__I915_SAMPLE_RC6_LAST_REPORTED].cur) 207df6a4205STvrtko Ursulin val = pmu->sample[__I915_SAMPLE_RC6_LAST_REPORTED].cur; 20816ffe73cSChris Wilson else 209df6a4205STvrtko Ursulin pmu->sample[__I915_SAMPLE_RC6_LAST_REPORTED].cur = val; 21016ffe73cSChris Wilson 21116ffe73cSChris Wilson spin_unlock_irqrestore(&pmu->lock, flags); 21216ffe73cSChris Wilson 21316ffe73cSChris Wilson return val; 21416ffe73cSChris Wilson } 21516ffe73cSChris Wilson 21616ffe73cSChris Wilson static void park_rc6(struct drm_i915_private *i915) 217feff0dc6STvrtko Ursulin { 218908091c8STvrtko Ursulin struct i915_pmu *pmu = &i915->pmu; 219908091c8STvrtko Ursulin 220*348fb0cbSTvrtko Ursulin if (pmu->enable & config_mask(I915_PMU_RC6_RESIDENCY)) 221df6a4205STvrtko Ursulin pmu->sample[__I915_SAMPLE_RC6].cur = __get_rc6(&i915->gt); 222feff0dc6STvrtko Ursulin 22316ffe73cSChris Wilson pmu->sleep_last = ktime_get(); 224feff0dc6STvrtko Ursulin } 225feff0dc6STvrtko Ursulin 22616ffe73cSChris Wilson #else 22716ffe73cSChris Wilson 22816ffe73cSChris Wilson static u64 get_rc6(struct intel_gt *gt) 22916ffe73cSChris Wilson { 23016ffe73cSChris Wilson return __get_rc6(gt); 23116ffe73cSChris Wilson } 23216ffe73cSChris Wilson 23316ffe73cSChris Wilson static void park_rc6(struct drm_i915_private *i915) {} 23416ffe73cSChris Wilson 23516ffe73cSChris Wilson #endif 23616ffe73cSChris Wilson 237908091c8STvrtko Ursulin static void __i915_pmu_maybe_start_timer(struct i915_pmu *pmu) 238feff0dc6STvrtko Ursulin { 239908091c8STvrtko Ursulin if (!pmu->timer_enabled && pmu_needs_timer(pmu, true)) { 240908091c8STvrtko Ursulin pmu->timer_enabled = true; 241908091c8STvrtko Ursulin pmu->timer_last = ktime_get(); 242908091c8STvrtko Ursulin hrtimer_start_range_ns(&pmu->timer, 243feff0dc6STvrtko Ursulin ns_to_ktime(PERIOD), 0, 244feff0dc6STvrtko Ursulin HRTIMER_MODE_REL_PINNED); 245feff0dc6STvrtko Ursulin } 246feff0dc6STvrtko Ursulin } 247feff0dc6STvrtko Ursulin 24816ffe73cSChris Wilson void i915_pmu_gt_parked(struct drm_i915_private *i915) 24916ffe73cSChris Wilson { 25016ffe73cSChris Wilson struct i915_pmu *pmu = &i915->pmu; 25116ffe73cSChris Wilson 25216ffe73cSChris Wilson if (!pmu->base.event_init) 25316ffe73cSChris Wilson return; 25416ffe73cSChris Wilson 25516ffe73cSChris Wilson spin_lock_irq(&pmu->lock); 25616ffe73cSChris Wilson 25716ffe73cSChris Wilson park_rc6(i915); 25816ffe73cSChris Wilson 25916ffe73cSChris Wilson /* 26016ffe73cSChris Wilson * Signal sampling timer to stop if only engine events are enabled and 26116ffe73cSChris Wilson * GPU went idle. 26216ffe73cSChris Wilson */ 26316ffe73cSChris Wilson pmu->timer_enabled = pmu_needs_timer(pmu, false); 26416ffe73cSChris Wilson 26516ffe73cSChris Wilson spin_unlock_irq(&pmu->lock); 26616ffe73cSChris Wilson } 26716ffe73cSChris Wilson 268feff0dc6STvrtko Ursulin void i915_pmu_gt_unparked(struct drm_i915_private *i915) 269feff0dc6STvrtko Ursulin { 270908091c8STvrtko Ursulin struct i915_pmu *pmu = &i915->pmu; 271908091c8STvrtko Ursulin 272908091c8STvrtko Ursulin if (!pmu->base.event_init) 273feff0dc6STvrtko Ursulin return; 274feff0dc6STvrtko Ursulin 275908091c8STvrtko Ursulin spin_lock_irq(&pmu->lock); 27616ffe73cSChris Wilson 277feff0dc6STvrtko Ursulin /* 278feff0dc6STvrtko Ursulin * Re-enable sampling timer when GPU goes active. 279feff0dc6STvrtko Ursulin */ 280908091c8STvrtko Ursulin __i915_pmu_maybe_start_timer(pmu); 28116ffe73cSChris Wilson 282908091c8STvrtko Ursulin spin_unlock_irq(&pmu->lock); 283feff0dc6STvrtko Ursulin } 284feff0dc6STvrtko Ursulin 285b46a33e2STvrtko Ursulin static void 2869f473ecfSTvrtko Ursulin add_sample(struct i915_pmu_sample *sample, u32 val) 287b46a33e2STvrtko Ursulin { 2889f473ecfSTvrtko Ursulin sample->cur += val; 289b46a33e2STvrtko Ursulin } 290b46a33e2STvrtko Ursulin 291d79e1bd6SChris Wilson static bool exclusive_mmio_access(const struct drm_i915_private *i915) 292d79e1bd6SChris Wilson { 293d79e1bd6SChris Wilson /* 294d79e1bd6SChris Wilson * We have to avoid concurrent mmio cache line access on gen7 or 295d79e1bd6SChris Wilson * risk a machine hang. For a fun history lesson dig out the old 296d79e1bd6SChris Wilson * userspace intel_gpu_top and run it on Ivybridge or Haswell! 297d79e1bd6SChris Wilson */ 298d79e1bd6SChris Wilson return IS_GEN(i915, 7); 299d79e1bd6SChris Wilson } 300d79e1bd6SChris Wilson 3016ec81b82SArnd Bergmann static void engine_sample(struct intel_engine_cs *engine, unsigned int period_ns) 302b46a33e2STvrtko Ursulin { 303d0aa694bSChris Wilson struct intel_engine_pmu *pmu = &engine->pmu; 304d0aa694bSChris Wilson bool busy; 305b46a33e2STvrtko Ursulin u32 val; 306b46a33e2STvrtko Ursulin 30728fba096STvrtko Ursulin val = ENGINE_READ_FW(engine, RING_CTL); 308d0aa694bSChris Wilson if (val == 0) /* powerwell off => engine idle */ 3096ec81b82SArnd Bergmann return; 310b46a33e2STvrtko Ursulin 3119f473ecfSTvrtko Ursulin if (val & RING_WAIT) 312d0aa694bSChris Wilson add_sample(&pmu->sample[I915_SAMPLE_WAIT], period_ns); 3139f473ecfSTvrtko Ursulin if (val & RING_WAIT_SEMAPHORE) 314d0aa694bSChris Wilson add_sample(&pmu->sample[I915_SAMPLE_SEMA], period_ns); 315b46a33e2STvrtko Ursulin 31654fc577dSTvrtko Ursulin /* No need to sample when busy stats are supported. */ 31754fc577dSTvrtko Ursulin if (intel_engine_supports_stats(engine)) 3186ec81b82SArnd Bergmann return; 31954fc577dSTvrtko Ursulin 320d0aa694bSChris Wilson /* 321d0aa694bSChris Wilson * While waiting on a semaphore or event, MI_MODE reports the 322d0aa694bSChris Wilson * ring as idle. However, previously using the seqno, and with 323d0aa694bSChris Wilson * execlists sampling, we account for the ring waiting as the 324d0aa694bSChris Wilson * engine being busy. Therefore, we record the sample as being 325d0aa694bSChris Wilson * busy if either waiting or !idle. 326d0aa694bSChris Wilson */ 327d0aa694bSChris Wilson busy = val & (RING_WAIT_SEMAPHORE | RING_WAIT); 328d0aa694bSChris Wilson if (!busy) { 32928fba096STvrtko Ursulin val = ENGINE_READ_FW(engine, RING_MI_MODE); 330d0aa694bSChris Wilson busy = !(val & MODE_IDLE); 331d0aa694bSChris Wilson } 332d0aa694bSChris Wilson if (busy) 333d0aa694bSChris Wilson add_sample(&pmu->sample[I915_SAMPLE_BUSY], period_ns); 3346ec81b82SArnd Bergmann } 335b46a33e2STvrtko Ursulin 3366ec81b82SArnd Bergmann static void 3376ec81b82SArnd Bergmann engines_sample(struct intel_gt *gt, unsigned int period_ns) 3386ec81b82SArnd Bergmann { 3396ec81b82SArnd Bergmann struct drm_i915_private *i915 = gt->i915; 3406ec81b82SArnd Bergmann struct intel_engine_cs *engine; 3416ec81b82SArnd Bergmann enum intel_engine_id id; 3426ec81b82SArnd Bergmann unsigned long flags; 3436ec81b82SArnd Bergmann 3446ec81b82SArnd Bergmann if ((i915->pmu.enable & ENGINE_SAMPLE_MASK) == 0) 3456ec81b82SArnd Bergmann return; 3466ec81b82SArnd Bergmann 3476ec81b82SArnd Bergmann if (!intel_gt_pm_is_awake(gt)) 3486ec81b82SArnd Bergmann return; 3496ec81b82SArnd Bergmann 3506ec81b82SArnd Bergmann for_each_engine(engine, gt, id) { 3516ec81b82SArnd Bergmann if (!intel_engine_pm_get_if_awake(engine)) 3526ec81b82SArnd Bergmann continue; 3536ec81b82SArnd Bergmann 3546ec81b82SArnd Bergmann if (exclusive_mmio_access(i915)) { 3556ec81b82SArnd Bergmann spin_lock_irqsave(&engine->uncore->lock, flags); 3566ec81b82SArnd Bergmann engine_sample(engine, period_ns); 3576ec81b82SArnd Bergmann spin_unlock_irqrestore(&engine->uncore->lock, flags); 3586ec81b82SArnd Bergmann } else { 3596ec81b82SArnd Bergmann engine_sample(engine, period_ns); 3606ec81b82SArnd Bergmann } 3616ec81b82SArnd Bergmann 36207779a76SChris Wilson intel_engine_pm_put_async(engine); 36351fbd8deSChris Wilson } 364b46a33e2STvrtko Ursulin } 365b46a33e2STvrtko Ursulin 3669f473ecfSTvrtko Ursulin static void 3679f473ecfSTvrtko Ursulin add_sample_mult(struct i915_pmu_sample *sample, u32 val, u32 mul) 3689f473ecfSTvrtko Ursulin { 3699f473ecfSTvrtko Ursulin sample->cur += mul_u32_u32(val, mul); 3709f473ecfSTvrtko Ursulin } 3719f473ecfSTvrtko Ursulin 372b66ecd04STvrtko Ursulin static bool frequency_sampling_enabled(struct i915_pmu *pmu) 373b66ecd04STvrtko Ursulin { 374b66ecd04STvrtko Ursulin return pmu->enable & 375*348fb0cbSTvrtko Ursulin (config_mask(I915_PMU_ACTUAL_FREQUENCY) | 376*348fb0cbSTvrtko Ursulin config_mask(I915_PMU_REQUESTED_FREQUENCY)); 377b66ecd04STvrtko Ursulin } 378b66ecd04STvrtko Ursulin 3799f473ecfSTvrtko Ursulin static void 38008ce5c64STvrtko Ursulin frequency_sample(struct intel_gt *gt, unsigned int period_ns) 381b46a33e2STvrtko Ursulin { 38208ce5c64STvrtko Ursulin struct drm_i915_private *i915 = gt->i915; 38308ce5c64STvrtko Ursulin struct intel_uncore *uncore = gt->uncore; 38408ce5c64STvrtko Ursulin struct i915_pmu *pmu = &i915->pmu; 3853e7abf81SAndi Shyti struct intel_rps *rps = >->rps; 38608ce5c64STvrtko Ursulin 387b66ecd04STvrtko Ursulin if (!frequency_sampling_enabled(pmu)) 388b66ecd04STvrtko Ursulin return; 389b66ecd04STvrtko Ursulin 390b66ecd04STvrtko Ursulin /* Report 0/0 (actual/requested) frequency while parked. */ 391b66ecd04STvrtko Ursulin if (!intel_gt_pm_get_if_awake(gt)) 392b66ecd04STvrtko Ursulin return; 393b66ecd04STvrtko Ursulin 394*348fb0cbSTvrtko Ursulin if (pmu->enable & config_mask(I915_PMU_ACTUAL_FREQUENCY)) { 395b46a33e2STvrtko Ursulin u32 val; 396b46a33e2STvrtko Ursulin 397c1c82d26SChris Wilson /* 398c1c82d26SChris Wilson * We take a quick peek here without using forcewake 399c1c82d26SChris Wilson * so that we don't perturb the system under observation 400c1c82d26SChris Wilson * (forcewake => !rc6 => increased power use). We expect 401c1c82d26SChris Wilson * that if the read fails because it is outside of the 402c1c82d26SChris Wilson * mmio power well, then it will return 0 -- in which 403c1c82d26SChris Wilson * case we assume the system is running at the intended 404c1c82d26SChris Wilson * frequency. Fortunately, the read should rarely fail! 405c1c82d26SChris Wilson */ 406b66ecd04STvrtko Ursulin val = intel_uncore_read_fw(uncore, GEN6_RPSTAT1); 407b66ecd04STvrtko Ursulin if (val) 408e03512edSAndi Shyti val = intel_rps_get_cagf(rps, val); 409b66ecd04STvrtko Ursulin else 410b66ecd04STvrtko Ursulin val = rps->cur_freq; 411b46a33e2STvrtko Ursulin 41208ce5c64STvrtko Ursulin add_sample_mult(&pmu->sample[__I915_SAMPLE_FREQ_ACT], 413b66ecd04STvrtko Ursulin intel_gpu_freq(rps, val), period_ns / 1000); 414b46a33e2STvrtko Ursulin } 415b46a33e2STvrtko Ursulin 416*348fb0cbSTvrtko Ursulin if (pmu->enable & config_mask(I915_PMU_REQUESTED_FREQUENCY)) { 41708ce5c64STvrtko Ursulin add_sample_mult(&pmu->sample[__I915_SAMPLE_FREQ_REQ], 4183e7abf81SAndi Shyti intel_gpu_freq(rps, rps->cur_freq), 4199f473ecfSTvrtko Ursulin period_ns / 1000); 420b46a33e2STvrtko Ursulin } 421b66ecd04STvrtko Ursulin 422b66ecd04STvrtko Ursulin intel_gt_pm_put_async(gt); 423b46a33e2STvrtko Ursulin } 424b46a33e2STvrtko Ursulin 425b46a33e2STvrtko Ursulin static enum hrtimer_restart i915_sample(struct hrtimer *hrtimer) 426b46a33e2STvrtko Ursulin { 427b46a33e2STvrtko Ursulin struct drm_i915_private *i915 = 428b46a33e2STvrtko Ursulin container_of(hrtimer, struct drm_i915_private, pmu.timer); 429908091c8STvrtko Ursulin struct i915_pmu *pmu = &i915->pmu; 43008ce5c64STvrtko Ursulin struct intel_gt *gt = &i915->gt; 4319f473ecfSTvrtko Ursulin unsigned int period_ns; 4329f473ecfSTvrtko Ursulin ktime_t now; 433b46a33e2STvrtko Ursulin 434908091c8STvrtko Ursulin if (!READ_ONCE(pmu->timer_enabled)) 435b46a33e2STvrtko Ursulin return HRTIMER_NORESTART; 436b46a33e2STvrtko Ursulin 4379f473ecfSTvrtko Ursulin now = ktime_get(); 438908091c8STvrtko Ursulin period_ns = ktime_to_ns(ktime_sub(now, pmu->timer_last)); 439908091c8STvrtko Ursulin pmu->timer_last = now; 440b46a33e2STvrtko Ursulin 4419f473ecfSTvrtko Ursulin /* 4429f473ecfSTvrtko Ursulin * Strictly speaking the passed in period may not be 100% accurate for 4439f473ecfSTvrtko Ursulin * all internal calculation, since some amount of time can be spent on 4449f473ecfSTvrtko Ursulin * grabbing the forcewake. However the potential error from timer call- 4459f473ecfSTvrtko Ursulin * back delay greatly dominates this so we keep it simple. 4469f473ecfSTvrtko Ursulin */ 44708ce5c64STvrtko Ursulin engines_sample(gt, period_ns); 44808ce5c64STvrtko Ursulin frequency_sample(gt, period_ns); 4499f473ecfSTvrtko Ursulin 4509f473ecfSTvrtko Ursulin hrtimer_forward(hrtimer, now, ns_to_ktime(PERIOD)); 4519f473ecfSTvrtko Ursulin 452b46a33e2STvrtko Ursulin return HRTIMER_RESTART; 453b46a33e2STvrtko Ursulin } 454b46a33e2STvrtko Ursulin 4550cd4684dSTvrtko Ursulin static u64 count_interrupts(struct drm_i915_private *i915) 4560cd4684dSTvrtko Ursulin { 4570cd4684dSTvrtko Ursulin /* open-coded kstat_irqs() */ 4580cd4684dSTvrtko Ursulin struct irq_desc *desc = irq_to_desc(i915->drm.pdev->irq); 4590cd4684dSTvrtko Ursulin u64 sum = 0; 4600cd4684dSTvrtko Ursulin int cpu; 4610cd4684dSTvrtko Ursulin 4620cd4684dSTvrtko Ursulin if (!desc || !desc->kstat_irqs) 4630cd4684dSTvrtko Ursulin return 0; 4640cd4684dSTvrtko Ursulin 4650cd4684dSTvrtko Ursulin for_each_possible_cpu(cpu) 4660cd4684dSTvrtko Ursulin sum += *per_cpu_ptr(desc->kstat_irqs, cpu); 4670cd4684dSTvrtko Ursulin 4680cd4684dSTvrtko Ursulin return sum; 4690cd4684dSTvrtko Ursulin } 4700cd4684dSTvrtko Ursulin 471b46a33e2STvrtko Ursulin static void i915_pmu_event_destroy(struct perf_event *event) 472b46a33e2STvrtko Ursulin { 473bf07f6ebSPankaj Bharadiya struct drm_i915_private *i915 = 474bf07f6ebSPankaj Bharadiya container_of(event->pmu, typeof(*i915), pmu.base); 475bf07f6ebSPankaj Bharadiya 476bf07f6ebSPankaj Bharadiya drm_WARN_ON(&i915->drm, event->parent); 477b00bccb3STvrtko Ursulin 478b00bccb3STvrtko Ursulin drm_dev_put(&i915->drm); 479b46a33e2STvrtko Ursulin } 480b46a33e2STvrtko Ursulin 481109ec558STvrtko Ursulin static int 482109ec558STvrtko Ursulin engine_event_status(struct intel_engine_cs *engine, 483109ec558STvrtko Ursulin enum drm_i915_pmu_engine_sample sample) 484b46a33e2STvrtko Ursulin { 485109ec558STvrtko Ursulin switch (sample) { 486b46a33e2STvrtko Ursulin case I915_SAMPLE_BUSY: 487b46a33e2STvrtko Ursulin case I915_SAMPLE_WAIT: 488b46a33e2STvrtko Ursulin break; 489b46a33e2STvrtko Ursulin case I915_SAMPLE_SEMA: 490109ec558STvrtko Ursulin if (INTEL_GEN(engine->i915) < 6) 491b46a33e2STvrtko Ursulin return -ENODEV; 492b46a33e2STvrtko Ursulin break; 493b46a33e2STvrtko Ursulin default: 494b46a33e2STvrtko Ursulin return -ENOENT; 495b46a33e2STvrtko Ursulin } 496b46a33e2STvrtko Ursulin 497b46a33e2STvrtko Ursulin return 0; 498b46a33e2STvrtko Ursulin } 499b46a33e2STvrtko Ursulin 500109ec558STvrtko Ursulin static int 501109ec558STvrtko Ursulin config_status(struct drm_i915_private *i915, u64 config) 502109ec558STvrtko Ursulin { 503109ec558STvrtko Ursulin switch (config) { 504109ec558STvrtko Ursulin case I915_PMU_ACTUAL_FREQUENCY: 505109ec558STvrtko Ursulin if (IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915)) 506109ec558STvrtko Ursulin /* Requires a mutex for sampling! */ 507109ec558STvrtko Ursulin return -ENODEV; 508df561f66SGustavo A. R. Silva fallthrough; 509109ec558STvrtko Ursulin case I915_PMU_REQUESTED_FREQUENCY: 510109ec558STvrtko Ursulin if (INTEL_GEN(i915) < 6) 511109ec558STvrtko Ursulin return -ENODEV; 512109ec558STvrtko Ursulin break; 513109ec558STvrtko Ursulin case I915_PMU_INTERRUPTS: 514109ec558STvrtko Ursulin break; 515109ec558STvrtko Ursulin case I915_PMU_RC6_RESIDENCY: 516109ec558STvrtko Ursulin if (!HAS_RC6(i915)) 517109ec558STvrtko Ursulin return -ENODEV; 518109ec558STvrtko Ursulin break; 519109ec558STvrtko Ursulin default: 520109ec558STvrtko Ursulin return -ENOENT; 521109ec558STvrtko Ursulin } 522109ec558STvrtko Ursulin 523109ec558STvrtko Ursulin return 0; 524109ec558STvrtko Ursulin } 525109ec558STvrtko Ursulin 526109ec558STvrtko Ursulin static int engine_event_init(struct perf_event *event) 527109ec558STvrtko Ursulin { 528109ec558STvrtko Ursulin struct drm_i915_private *i915 = 529109ec558STvrtko Ursulin container_of(event->pmu, typeof(*i915), pmu.base); 530109ec558STvrtko Ursulin struct intel_engine_cs *engine; 531109ec558STvrtko Ursulin 532109ec558STvrtko Ursulin engine = intel_engine_lookup_user(i915, engine_event_class(event), 533109ec558STvrtko Ursulin engine_event_instance(event)); 534109ec558STvrtko Ursulin if (!engine) 535109ec558STvrtko Ursulin return -ENODEV; 536109ec558STvrtko Ursulin 537426d0073SChris Wilson return engine_event_status(engine, engine_event_sample(event)); 538109ec558STvrtko Ursulin } 539109ec558STvrtko Ursulin 540b46a33e2STvrtko Ursulin static int i915_pmu_event_init(struct perf_event *event) 541b46a33e2STvrtko Ursulin { 542b46a33e2STvrtko Ursulin struct drm_i915_private *i915 = 543b46a33e2STvrtko Ursulin container_of(event->pmu, typeof(*i915), pmu.base); 544b00bccb3STvrtko Ursulin struct i915_pmu *pmu = &i915->pmu; 5450426c046STvrtko Ursulin int ret; 546b46a33e2STvrtko Ursulin 547b00bccb3STvrtko Ursulin if (pmu->closed) 548b00bccb3STvrtko Ursulin return -ENODEV; 549b00bccb3STvrtko Ursulin 550b46a33e2STvrtko Ursulin if (event->attr.type != event->pmu->type) 551b46a33e2STvrtko Ursulin return -ENOENT; 552b46a33e2STvrtko Ursulin 553b46a33e2STvrtko Ursulin /* unsupported modes and filters */ 554b46a33e2STvrtko Ursulin if (event->attr.sample_period) /* no sampling */ 555b46a33e2STvrtko Ursulin return -EINVAL; 556b46a33e2STvrtko Ursulin 557b46a33e2STvrtko Ursulin if (has_branch_stack(event)) 558b46a33e2STvrtko Ursulin return -EOPNOTSUPP; 559b46a33e2STvrtko Ursulin 560b46a33e2STvrtko Ursulin if (event->cpu < 0) 561b46a33e2STvrtko Ursulin return -EINVAL; 562b46a33e2STvrtko Ursulin 5630426c046STvrtko Ursulin /* only allow running on one cpu at a time */ 5640426c046STvrtko Ursulin if (!cpumask_test_cpu(event->cpu, &i915_pmu_cpumask)) 56500a79722STvrtko Ursulin return -EINVAL; 566b46a33e2STvrtko Ursulin 567109ec558STvrtko Ursulin if (is_engine_event(event)) 568b46a33e2STvrtko Ursulin ret = engine_event_init(event); 569109ec558STvrtko Ursulin else 570109ec558STvrtko Ursulin ret = config_status(i915, event->attr.config); 571b46a33e2STvrtko Ursulin if (ret) 572b46a33e2STvrtko Ursulin return ret; 573b46a33e2STvrtko Ursulin 574b00bccb3STvrtko Ursulin if (!event->parent) { 575b00bccb3STvrtko Ursulin drm_dev_get(&i915->drm); 576b46a33e2STvrtko Ursulin event->destroy = i915_pmu_event_destroy; 577b00bccb3STvrtko Ursulin } 578b46a33e2STvrtko Ursulin 579b46a33e2STvrtko Ursulin return 0; 580b46a33e2STvrtko Ursulin } 581b46a33e2STvrtko Ursulin 582ad055fb8STvrtko Ursulin static u64 __i915_pmu_event_read(struct perf_event *event) 583b46a33e2STvrtko Ursulin { 584b46a33e2STvrtko Ursulin struct drm_i915_private *i915 = 585b46a33e2STvrtko Ursulin container_of(event->pmu, typeof(*i915), pmu.base); 586908091c8STvrtko Ursulin struct i915_pmu *pmu = &i915->pmu; 587b46a33e2STvrtko Ursulin u64 val = 0; 588b46a33e2STvrtko Ursulin 589b46a33e2STvrtko Ursulin if (is_engine_event(event)) { 590b46a33e2STvrtko Ursulin u8 sample = engine_event_sample(event); 591b46a33e2STvrtko Ursulin struct intel_engine_cs *engine; 592b46a33e2STvrtko Ursulin 593b46a33e2STvrtko Ursulin engine = intel_engine_lookup_user(i915, 594b46a33e2STvrtko Ursulin engine_event_class(event), 595b46a33e2STvrtko Ursulin engine_event_instance(event)); 596b46a33e2STvrtko Ursulin 59748a1b8d4SPankaj Bharadiya if (drm_WARN_ON_ONCE(&i915->drm, !engine)) { 598b46a33e2STvrtko Ursulin /* Do nothing */ 599b3add01eSTvrtko Ursulin } else if (sample == I915_SAMPLE_BUSY && 600b2f78cdaSTvrtko Ursulin intel_engine_supports_stats(engine)) { 601810b7ee3SChris Wilson ktime_t unused; 602810b7ee3SChris Wilson 603810b7ee3SChris Wilson val = ktime_to_ns(intel_engine_get_busy_time(engine, 604810b7ee3SChris Wilson &unused)); 605b46a33e2STvrtko Ursulin } else { 606b46a33e2STvrtko Ursulin val = engine->pmu.sample[sample].cur; 607b46a33e2STvrtko Ursulin } 608b46a33e2STvrtko Ursulin } else { 609b46a33e2STvrtko Ursulin switch (event->attr.config) { 610b46a33e2STvrtko Ursulin case I915_PMU_ACTUAL_FREQUENCY: 611b46a33e2STvrtko Ursulin val = 612908091c8STvrtko Ursulin div_u64(pmu->sample[__I915_SAMPLE_FREQ_ACT].cur, 6139f473ecfSTvrtko Ursulin USEC_PER_SEC /* to MHz */); 614b46a33e2STvrtko Ursulin break; 615b46a33e2STvrtko Ursulin case I915_PMU_REQUESTED_FREQUENCY: 616b46a33e2STvrtko Ursulin val = 617908091c8STvrtko Ursulin div_u64(pmu->sample[__I915_SAMPLE_FREQ_REQ].cur, 6189f473ecfSTvrtko Ursulin USEC_PER_SEC /* to MHz */); 619b46a33e2STvrtko Ursulin break; 6200cd4684dSTvrtko Ursulin case I915_PMU_INTERRUPTS: 6210cd4684dSTvrtko Ursulin val = count_interrupts(i915); 6220cd4684dSTvrtko Ursulin break; 6236060b6aeSTvrtko Ursulin case I915_PMU_RC6_RESIDENCY: 624518ea582STvrtko Ursulin val = get_rc6(&i915->gt); 6256060b6aeSTvrtko Ursulin break; 626b46a33e2STvrtko Ursulin } 627b46a33e2STvrtko Ursulin } 628b46a33e2STvrtko Ursulin 629b46a33e2STvrtko Ursulin return val; 630b46a33e2STvrtko Ursulin } 631b46a33e2STvrtko Ursulin 632b46a33e2STvrtko Ursulin static void i915_pmu_event_read(struct perf_event *event) 633b46a33e2STvrtko Ursulin { 634b00bccb3STvrtko Ursulin struct drm_i915_private *i915 = 635b00bccb3STvrtko Ursulin container_of(event->pmu, typeof(*i915), pmu.base); 636b46a33e2STvrtko Ursulin struct hw_perf_event *hwc = &event->hw; 637b00bccb3STvrtko Ursulin struct i915_pmu *pmu = &i915->pmu; 638b46a33e2STvrtko Ursulin u64 prev, new; 639b46a33e2STvrtko Ursulin 640b00bccb3STvrtko Ursulin if (pmu->closed) { 641b00bccb3STvrtko Ursulin event->hw.state = PERF_HES_STOPPED; 642b00bccb3STvrtko Ursulin return; 643b00bccb3STvrtko Ursulin } 644b46a33e2STvrtko Ursulin again: 645b46a33e2STvrtko Ursulin prev = local64_read(&hwc->prev_count); 646ad055fb8STvrtko Ursulin new = __i915_pmu_event_read(event); 647b46a33e2STvrtko Ursulin 648b46a33e2STvrtko Ursulin if (local64_cmpxchg(&hwc->prev_count, prev, new) != prev) 649b46a33e2STvrtko Ursulin goto again; 650b46a33e2STvrtko Ursulin 651b46a33e2STvrtko Ursulin local64_add(new - prev, &event->count); 652b46a33e2STvrtko Ursulin } 653b46a33e2STvrtko Ursulin 654b46a33e2STvrtko Ursulin static void i915_pmu_enable(struct perf_event *event) 655b46a33e2STvrtko Ursulin { 656b46a33e2STvrtko Ursulin struct drm_i915_private *i915 = 657b46a33e2STvrtko Ursulin container_of(event->pmu, typeof(*i915), pmu.base); 658*348fb0cbSTvrtko Ursulin bool need_wakeref = event_read_needs_wakeref(event); 659908091c8STvrtko Ursulin struct i915_pmu *pmu = &i915->pmu; 660*348fb0cbSTvrtko Ursulin intel_wakeref_t wakeref = 0; 661b46a33e2STvrtko Ursulin unsigned long flags; 662*348fb0cbSTvrtko Ursulin unsigned int bit; 663b46a33e2STvrtko Ursulin 664*348fb0cbSTvrtko Ursulin if (need_wakeref) 665f4e9894bSChris Wilson wakeref = intel_runtime_pm_get(&i915->runtime_pm); 666*348fb0cbSTvrtko Ursulin 667*348fb0cbSTvrtko Ursulin bit = event_bit(event); 668*348fb0cbSTvrtko Ursulin if (bit == -1) 669*348fb0cbSTvrtko Ursulin goto update; 670*348fb0cbSTvrtko Ursulin 671908091c8STvrtko Ursulin spin_lock_irqsave(&pmu->lock, flags); 672b46a33e2STvrtko Ursulin 673b46a33e2STvrtko Ursulin /* 674b46a33e2STvrtko Ursulin * Update the bitmask of enabled events and increment 675b46a33e2STvrtko Ursulin * the event reference counter. 676b46a33e2STvrtko Ursulin */ 677908091c8STvrtko Ursulin BUILD_BUG_ON(ARRAY_SIZE(pmu->enable_count) != I915_PMU_MASK_BITS); 678908091c8STvrtko Ursulin GEM_BUG_ON(bit >= ARRAY_SIZE(pmu->enable_count)); 679908091c8STvrtko Ursulin GEM_BUG_ON(pmu->enable_count[bit] == ~0); 680f4e9894bSChris Wilson 681f4e9894bSChris Wilson if (pmu->enable_count[bit] == 0 && 682*348fb0cbSTvrtko Ursulin config_mask(I915_PMU_RC6_RESIDENCY) & BIT_ULL(bit)) { 683f4e9894bSChris Wilson pmu->sample[__I915_SAMPLE_RC6_LAST_REPORTED].cur = 0; 684f4e9894bSChris Wilson pmu->sample[__I915_SAMPLE_RC6].cur = __get_rc6(&i915->gt); 685f4e9894bSChris Wilson pmu->sleep_last = ktime_get(); 686f4e9894bSChris Wilson } 687f4e9894bSChris Wilson 688908091c8STvrtko Ursulin pmu->enable |= BIT_ULL(bit); 689908091c8STvrtko Ursulin pmu->enable_count[bit]++; 690b46a33e2STvrtko Ursulin 691b46a33e2STvrtko Ursulin /* 692feff0dc6STvrtko Ursulin * Start the sampling timer if needed and not already enabled. 693feff0dc6STvrtko Ursulin */ 694908091c8STvrtko Ursulin __i915_pmu_maybe_start_timer(pmu); 695feff0dc6STvrtko Ursulin 696feff0dc6STvrtko Ursulin /* 697b46a33e2STvrtko Ursulin * For per-engine events the bitmask and reference counting 698b46a33e2STvrtko Ursulin * is stored per engine. 699b46a33e2STvrtko Ursulin */ 700b46a33e2STvrtko Ursulin if (is_engine_event(event)) { 701b46a33e2STvrtko Ursulin u8 sample = engine_event_sample(event); 702b46a33e2STvrtko Ursulin struct intel_engine_cs *engine; 703b46a33e2STvrtko Ursulin 704b46a33e2STvrtko Ursulin engine = intel_engine_lookup_user(i915, 705b46a33e2STvrtko Ursulin engine_event_class(event), 706b46a33e2STvrtko Ursulin engine_event_instance(event)); 707b46a33e2STvrtko Ursulin 70826a11deeSTvrtko Ursulin BUILD_BUG_ON(ARRAY_SIZE(engine->pmu.enable_count) != 70926a11deeSTvrtko Ursulin I915_ENGINE_SAMPLE_COUNT); 71026a11deeSTvrtko Ursulin BUILD_BUG_ON(ARRAY_SIZE(engine->pmu.sample) != 71126a11deeSTvrtko Ursulin I915_ENGINE_SAMPLE_COUNT); 71226a11deeSTvrtko Ursulin GEM_BUG_ON(sample >= ARRAY_SIZE(engine->pmu.enable_count)); 71326a11deeSTvrtko Ursulin GEM_BUG_ON(sample >= ARRAY_SIZE(engine->pmu.sample)); 714b46a33e2STvrtko Ursulin GEM_BUG_ON(engine->pmu.enable_count[sample] == ~0); 71526a11deeSTvrtko Ursulin 71626a11deeSTvrtko Ursulin engine->pmu.enable |= BIT(sample); 717b2f78cdaSTvrtko Ursulin engine->pmu.enable_count[sample]++; 718b46a33e2STvrtko Ursulin } 719b46a33e2STvrtko Ursulin 720908091c8STvrtko Ursulin spin_unlock_irqrestore(&pmu->lock, flags); 721ad055fb8STvrtko Ursulin 722*348fb0cbSTvrtko Ursulin update: 723b46a33e2STvrtko Ursulin /* 724b46a33e2STvrtko Ursulin * Store the current counter value so we can report the correct delta 725b46a33e2STvrtko Ursulin * for all listeners. Even when the event was already enabled and has 726b46a33e2STvrtko Ursulin * an existing non-zero value. 727b46a33e2STvrtko Ursulin */ 728ad055fb8STvrtko Ursulin local64_set(&event->hw.prev_count, __i915_pmu_event_read(event)); 729f4e9894bSChris Wilson 730*348fb0cbSTvrtko Ursulin if (wakeref) 731f4e9894bSChris Wilson intel_runtime_pm_put(&i915->runtime_pm, wakeref); 732b46a33e2STvrtko Ursulin } 733b46a33e2STvrtko Ursulin 734b46a33e2STvrtko Ursulin static void i915_pmu_disable(struct perf_event *event) 735b46a33e2STvrtko Ursulin { 736b46a33e2STvrtko Ursulin struct drm_i915_private *i915 = 737b46a33e2STvrtko Ursulin container_of(event->pmu, typeof(*i915), pmu.base); 738*348fb0cbSTvrtko Ursulin unsigned int bit = event_bit(event); 739908091c8STvrtko Ursulin struct i915_pmu *pmu = &i915->pmu; 740b46a33e2STvrtko Ursulin unsigned long flags; 741b46a33e2STvrtko Ursulin 742*348fb0cbSTvrtko Ursulin if (bit == -1) 743*348fb0cbSTvrtko Ursulin return; 744*348fb0cbSTvrtko Ursulin 745908091c8STvrtko Ursulin spin_lock_irqsave(&pmu->lock, flags); 746b46a33e2STvrtko Ursulin 747b46a33e2STvrtko Ursulin if (is_engine_event(event)) { 748b46a33e2STvrtko Ursulin u8 sample = engine_event_sample(event); 749b46a33e2STvrtko Ursulin struct intel_engine_cs *engine; 750b46a33e2STvrtko Ursulin 751b46a33e2STvrtko Ursulin engine = intel_engine_lookup_user(i915, 752b46a33e2STvrtko Ursulin engine_event_class(event), 753b46a33e2STvrtko Ursulin engine_event_instance(event)); 75426a11deeSTvrtko Ursulin 75526a11deeSTvrtko Ursulin GEM_BUG_ON(sample >= ARRAY_SIZE(engine->pmu.enable_count)); 75626a11deeSTvrtko Ursulin GEM_BUG_ON(sample >= ARRAY_SIZE(engine->pmu.sample)); 757b46a33e2STvrtko Ursulin GEM_BUG_ON(engine->pmu.enable_count[sample] == 0); 75826a11deeSTvrtko Ursulin 759b46a33e2STvrtko Ursulin /* 760b46a33e2STvrtko Ursulin * Decrement the reference count and clear the enabled 761b46a33e2STvrtko Ursulin * bitmask when the last listener on an event goes away. 762b46a33e2STvrtko Ursulin */ 763b2f78cdaSTvrtko Ursulin if (--engine->pmu.enable_count[sample] == 0) 764b46a33e2STvrtko Ursulin engine->pmu.enable &= ~BIT(sample); 765b46a33e2STvrtko Ursulin } 766b46a33e2STvrtko Ursulin 767908091c8STvrtko Ursulin GEM_BUG_ON(bit >= ARRAY_SIZE(pmu->enable_count)); 768908091c8STvrtko Ursulin GEM_BUG_ON(pmu->enable_count[bit] == 0); 769b46a33e2STvrtko Ursulin /* 770b46a33e2STvrtko Ursulin * Decrement the reference count and clear the enabled 771b46a33e2STvrtko Ursulin * bitmask when the last listener on an event goes away. 772b46a33e2STvrtko Ursulin */ 773908091c8STvrtko Ursulin if (--pmu->enable_count[bit] == 0) { 774908091c8STvrtko Ursulin pmu->enable &= ~BIT_ULL(bit); 775908091c8STvrtko Ursulin pmu->timer_enabled &= pmu_needs_timer(pmu, true); 776feff0dc6STvrtko Ursulin } 777b46a33e2STvrtko Ursulin 778908091c8STvrtko Ursulin spin_unlock_irqrestore(&pmu->lock, flags); 779b46a33e2STvrtko Ursulin } 780b46a33e2STvrtko Ursulin 781b46a33e2STvrtko Ursulin static void i915_pmu_event_start(struct perf_event *event, int flags) 782b46a33e2STvrtko Ursulin { 783b00bccb3STvrtko Ursulin struct drm_i915_private *i915 = 784b00bccb3STvrtko Ursulin container_of(event->pmu, typeof(*i915), pmu.base); 785b00bccb3STvrtko Ursulin struct i915_pmu *pmu = &i915->pmu; 786b00bccb3STvrtko Ursulin 787b00bccb3STvrtko Ursulin if (pmu->closed) 788b00bccb3STvrtko Ursulin return; 789b00bccb3STvrtko Ursulin 790b46a33e2STvrtko Ursulin i915_pmu_enable(event); 791b46a33e2STvrtko Ursulin event->hw.state = 0; 792b46a33e2STvrtko Ursulin } 793b46a33e2STvrtko Ursulin 794b46a33e2STvrtko Ursulin static void i915_pmu_event_stop(struct perf_event *event, int flags) 795b46a33e2STvrtko Ursulin { 796b46a33e2STvrtko Ursulin if (flags & PERF_EF_UPDATE) 797b46a33e2STvrtko Ursulin i915_pmu_event_read(event); 798b46a33e2STvrtko Ursulin i915_pmu_disable(event); 799b46a33e2STvrtko Ursulin event->hw.state = PERF_HES_STOPPED; 800b46a33e2STvrtko Ursulin } 801b46a33e2STvrtko Ursulin 802b46a33e2STvrtko Ursulin static int i915_pmu_event_add(struct perf_event *event, int flags) 803b46a33e2STvrtko Ursulin { 804b00bccb3STvrtko Ursulin struct drm_i915_private *i915 = 805b00bccb3STvrtko Ursulin container_of(event->pmu, typeof(*i915), pmu.base); 806b00bccb3STvrtko Ursulin struct i915_pmu *pmu = &i915->pmu; 807b00bccb3STvrtko Ursulin 808b00bccb3STvrtko Ursulin if (pmu->closed) 809b00bccb3STvrtko Ursulin return -ENODEV; 810b00bccb3STvrtko Ursulin 811b46a33e2STvrtko Ursulin if (flags & PERF_EF_START) 812b46a33e2STvrtko Ursulin i915_pmu_event_start(event, flags); 813b46a33e2STvrtko Ursulin 814b46a33e2STvrtko Ursulin return 0; 815b46a33e2STvrtko Ursulin } 816b46a33e2STvrtko Ursulin 817b46a33e2STvrtko Ursulin static void i915_pmu_event_del(struct perf_event *event, int flags) 818b46a33e2STvrtko Ursulin { 819b46a33e2STvrtko Ursulin i915_pmu_event_stop(event, PERF_EF_UPDATE); 820b46a33e2STvrtko Ursulin } 821b46a33e2STvrtko Ursulin 822b46a33e2STvrtko Ursulin static int i915_pmu_event_event_idx(struct perf_event *event) 823b46a33e2STvrtko Ursulin { 824b46a33e2STvrtko Ursulin return 0; 825b46a33e2STvrtko Ursulin } 826b46a33e2STvrtko Ursulin 827b7d3aabfSChris Wilson struct i915_str_attribute { 828b7d3aabfSChris Wilson struct device_attribute attr; 829b7d3aabfSChris Wilson const char *str; 830b7d3aabfSChris Wilson }; 831b7d3aabfSChris Wilson 832b46a33e2STvrtko Ursulin static ssize_t i915_pmu_format_show(struct device *dev, 833b46a33e2STvrtko Ursulin struct device_attribute *attr, char *buf) 834b46a33e2STvrtko Ursulin { 835b7d3aabfSChris Wilson struct i915_str_attribute *eattr; 836b46a33e2STvrtko Ursulin 837b7d3aabfSChris Wilson eattr = container_of(attr, struct i915_str_attribute, attr); 838b7d3aabfSChris Wilson return sprintf(buf, "%s\n", eattr->str); 839b46a33e2STvrtko Ursulin } 840b46a33e2STvrtko Ursulin 841b46a33e2STvrtko Ursulin #define I915_PMU_FORMAT_ATTR(_name, _config) \ 842b7d3aabfSChris Wilson (&((struct i915_str_attribute[]) { \ 843b46a33e2STvrtko Ursulin { .attr = __ATTR(_name, 0444, i915_pmu_format_show, NULL), \ 844b7d3aabfSChris Wilson .str = _config, } \ 845b46a33e2STvrtko Ursulin })[0].attr.attr) 846b46a33e2STvrtko Ursulin 847b46a33e2STvrtko Ursulin static struct attribute *i915_pmu_format_attrs[] = { 848b46a33e2STvrtko Ursulin I915_PMU_FORMAT_ATTR(i915_eventid, "config:0-20"), 849b46a33e2STvrtko Ursulin NULL, 850b46a33e2STvrtko Ursulin }; 851b46a33e2STvrtko Ursulin 852b46a33e2STvrtko Ursulin static const struct attribute_group i915_pmu_format_attr_group = { 853b46a33e2STvrtko Ursulin .name = "format", 854b46a33e2STvrtko Ursulin .attrs = i915_pmu_format_attrs, 855b46a33e2STvrtko Ursulin }; 856b46a33e2STvrtko Ursulin 857b7d3aabfSChris Wilson struct i915_ext_attribute { 858b7d3aabfSChris Wilson struct device_attribute attr; 859b7d3aabfSChris Wilson unsigned long val; 860b7d3aabfSChris Wilson }; 861b7d3aabfSChris Wilson 862b46a33e2STvrtko Ursulin static ssize_t i915_pmu_event_show(struct device *dev, 863b46a33e2STvrtko Ursulin struct device_attribute *attr, char *buf) 864b46a33e2STvrtko Ursulin { 865b7d3aabfSChris Wilson struct i915_ext_attribute *eattr; 866b46a33e2STvrtko Ursulin 867b7d3aabfSChris Wilson eattr = container_of(attr, struct i915_ext_attribute, attr); 868b7d3aabfSChris Wilson return sprintf(buf, "config=0x%lx\n", eattr->val); 869b46a33e2STvrtko Ursulin } 870b46a33e2STvrtko Ursulin 871b46a33e2STvrtko Ursulin static ssize_t 872b46a33e2STvrtko Ursulin i915_pmu_get_attr_cpumask(struct device *dev, 873b46a33e2STvrtko Ursulin struct device_attribute *attr, 874b46a33e2STvrtko Ursulin char *buf) 875b46a33e2STvrtko Ursulin { 876b46a33e2STvrtko Ursulin return cpumap_print_to_pagebuf(true, buf, &i915_pmu_cpumask); 877b46a33e2STvrtko Ursulin } 878b46a33e2STvrtko Ursulin 879b46a33e2STvrtko Ursulin static DEVICE_ATTR(cpumask, 0444, i915_pmu_get_attr_cpumask, NULL); 880b46a33e2STvrtko Ursulin 881b46a33e2STvrtko Ursulin static struct attribute *i915_cpumask_attrs[] = { 882b46a33e2STvrtko Ursulin &dev_attr_cpumask.attr, 883b46a33e2STvrtko Ursulin NULL, 884b46a33e2STvrtko Ursulin }; 885b46a33e2STvrtko Ursulin 886109ec558STvrtko Ursulin static const struct attribute_group i915_pmu_cpumask_attr_group = { 887b46a33e2STvrtko Ursulin .attrs = i915_cpumask_attrs, 888b46a33e2STvrtko Ursulin }; 889b46a33e2STvrtko Ursulin 890109ec558STvrtko Ursulin #define __event(__config, __name, __unit) \ 891109ec558STvrtko Ursulin { \ 892109ec558STvrtko Ursulin .config = (__config), \ 893109ec558STvrtko Ursulin .name = (__name), \ 894109ec558STvrtko Ursulin .unit = (__unit), \ 895109ec558STvrtko Ursulin } 896109ec558STvrtko Ursulin 897109ec558STvrtko Ursulin #define __engine_event(__sample, __name) \ 898109ec558STvrtko Ursulin { \ 899109ec558STvrtko Ursulin .sample = (__sample), \ 900109ec558STvrtko Ursulin .name = (__name), \ 901109ec558STvrtko Ursulin } 902109ec558STvrtko Ursulin 903109ec558STvrtko Ursulin static struct i915_ext_attribute * 904109ec558STvrtko Ursulin add_i915_attr(struct i915_ext_attribute *attr, const char *name, u64 config) 905109ec558STvrtko Ursulin { 9062bbba4e9SChris Wilson sysfs_attr_init(&attr->attr.attr); 907109ec558STvrtko Ursulin attr->attr.attr.name = name; 908109ec558STvrtko Ursulin attr->attr.attr.mode = 0444; 909109ec558STvrtko Ursulin attr->attr.show = i915_pmu_event_show; 910109ec558STvrtko Ursulin attr->val = config; 911109ec558STvrtko Ursulin 912109ec558STvrtko Ursulin return ++attr; 913109ec558STvrtko Ursulin } 914109ec558STvrtko Ursulin 915109ec558STvrtko Ursulin static struct perf_pmu_events_attr * 916109ec558STvrtko Ursulin add_pmu_attr(struct perf_pmu_events_attr *attr, const char *name, 917109ec558STvrtko Ursulin const char *str) 918109ec558STvrtko Ursulin { 9192bbba4e9SChris Wilson sysfs_attr_init(&attr->attr.attr); 920109ec558STvrtko Ursulin attr->attr.attr.name = name; 921109ec558STvrtko Ursulin attr->attr.attr.mode = 0444; 922109ec558STvrtko Ursulin attr->attr.show = perf_event_sysfs_show; 923109ec558STvrtko Ursulin attr->event_str = str; 924109ec558STvrtko Ursulin 925109ec558STvrtko Ursulin return ++attr; 926109ec558STvrtko Ursulin } 927109ec558STvrtko Ursulin 928109ec558STvrtko Ursulin static struct attribute ** 929908091c8STvrtko Ursulin create_event_attributes(struct i915_pmu *pmu) 930109ec558STvrtko Ursulin { 931908091c8STvrtko Ursulin struct drm_i915_private *i915 = container_of(pmu, typeof(*i915), pmu); 932109ec558STvrtko Ursulin static const struct { 933109ec558STvrtko Ursulin u64 config; 934109ec558STvrtko Ursulin const char *name; 935109ec558STvrtko Ursulin const char *unit; 936109ec558STvrtko Ursulin } events[] = { 937e88866efSChris Wilson __event(I915_PMU_ACTUAL_FREQUENCY, "actual-frequency", "M"), 938e88866efSChris Wilson __event(I915_PMU_REQUESTED_FREQUENCY, "requested-frequency", "M"), 939109ec558STvrtko Ursulin __event(I915_PMU_INTERRUPTS, "interrupts", NULL), 940109ec558STvrtko Ursulin __event(I915_PMU_RC6_RESIDENCY, "rc6-residency", "ns"), 941109ec558STvrtko Ursulin }; 942109ec558STvrtko Ursulin static const struct { 943109ec558STvrtko Ursulin enum drm_i915_pmu_engine_sample sample; 944109ec558STvrtko Ursulin char *name; 945109ec558STvrtko Ursulin } engine_events[] = { 946109ec558STvrtko Ursulin __engine_event(I915_SAMPLE_BUSY, "busy"), 947109ec558STvrtko Ursulin __engine_event(I915_SAMPLE_SEMA, "sema"), 948109ec558STvrtko Ursulin __engine_event(I915_SAMPLE_WAIT, "wait"), 949109ec558STvrtko Ursulin }; 950109ec558STvrtko Ursulin unsigned int count = 0; 951109ec558STvrtko Ursulin struct perf_pmu_events_attr *pmu_attr = NULL, *pmu_iter; 952109ec558STvrtko Ursulin struct i915_ext_attribute *i915_attr = NULL, *i915_iter; 953109ec558STvrtko Ursulin struct attribute **attr = NULL, **attr_iter; 954109ec558STvrtko Ursulin struct intel_engine_cs *engine; 955109ec558STvrtko Ursulin unsigned int i; 956109ec558STvrtko Ursulin 957109ec558STvrtko Ursulin /* Count how many counters we will be exposing. */ 958109ec558STvrtko Ursulin for (i = 0; i < ARRAY_SIZE(events); i++) { 959109ec558STvrtko Ursulin if (!config_status(i915, events[i].config)) 960109ec558STvrtko Ursulin count++; 961109ec558STvrtko Ursulin } 962109ec558STvrtko Ursulin 963750e76b4SChris Wilson for_each_uabi_engine(engine, i915) { 964109ec558STvrtko Ursulin for (i = 0; i < ARRAY_SIZE(engine_events); i++) { 965109ec558STvrtko Ursulin if (!engine_event_status(engine, 966109ec558STvrtko Ursulin engine_events[i].sample)) 967109ec558STvrtko Ursulin count++; 968109ec558STvrtko Ursulin } 969109ec558STvrtko Ursulin } 970109ec558STvrtko Ursulin 971109ec558STvrtko Ursulin /* Allocate attribute objects and table. */ 972dd5fec87STvrtko Ursulin i915_attr = kcalloc(count, sizeof(*i915_attr), GFP_KERNEL); 973109ec558STvrtko Ursulin if (!i915_attr) 974109ec558STvrtko Ursulin goto err_alloc; 975109ec558STvrtko Ursulin 976dd5fec87STvrtko Ursulin pmu_attr = kcalloc(count, sizeof(*pmu_attr), GFP_KERNEL); 977109ec558STvrtko Ursulin if (!pmu_attr) 978109ec558STvrtko Ursulin goto err_alloc; 979109ec558STvrtko Ursulin 980109ec558STvrtko Ursulin /* Max one pointer of each attribute type plus a termination entry. */ 981dd5fec87STvrtko Ursulin attr = kcalloc(count * 2 + 1, sizeof(*attr), GFP_KERNEL); 982109ec558STvrtko Ursulin if (!attr) 983109ec558STvrtko Ursulin goto err_alloc; 984109ec558STvrtko Ursulin 985109ec558STvrtko Ursulin i915_iter = i915_attr; 986109ec558STvrtko Ursulin pmu_iter = pmu_attr; 987109ec558STvrtko Ursulin attr_iter = attr; 988109ec558STvrtko Ursulin 989109ec558STvrtko Ursulin /* Initialize supported non-engine counters. */ 990109ec558STvrtko Ursulin for (i = 0; i < ARRAY_SIZE(events); i++) { 991109ec558STvrtko Ursulin char *str; 992109ec558STvrtko Ursulin 993109ec558STvrtko Ursulin if (config_status(i915, events[i].config)) 994109ec558STvrtko Ursulin continue; 995109ec558STvrtko Ursulin 996109ec558STvrtko Ursulin str = kstrdup(events[i].name, GFP_KERNEL); 997109ec558STvrtko Ursulin if (!str) 998109ec558STvrtko Ursulin goto err; 999109ec558STvrtko Ursulin 1000109ec558STvrtko Ursulin *attr_iter++ = &i915_iter->attr.attr; 1001109ec558STvrtko Ursulin i915_iter = add_i915_attr(i915_iter, str, events[i].config); 1002109ec558STvrtko Ursulin 1003109ec558STvrtko Ursulin if (events[i].unit) { 1004109ec558STvrtko Ursulin str = kasprintf(GFP_KERNEL, "%s.unit", events[i].name); 1005109ec558STvrtko Ursulin if (!str) 1006109ec558STvrtko Ursulin goto err; 1007109ec558STvrtko Ursulin 1008109ec558STvrtko Ursulin *attr_iter++ = &pmu_iter->attr.attr; 1009109ec558STvrtko Ursulin pmu_iter = add_pmu_attr(pmu_iter, str, events[i].unit); 1010109ec558STvrtko Ursulin } 1011109ec558STvrtko Ursulin } 1012109ec558STvrtko Ursulin 1013109ec558STvrtko Ursulin /* Initialize supported engine counters. */ 1014750e76b4SChris Wilson for_each_uabi_engine(engine, i915) { 1015109ec558STvrtko Ursulin for (i = 0; i < ARRAY_SIZE(engine_events); i++) { 1016109ec558STvrtko Ursulin char *str; 1017109ec558STvrtko Ursulin 1018109ec558STvrtko Ursulin if (engine_event_status(engine, 1019109ec558STvrtko Ursulin engine_events[i].sample)) 1020109ec558STvrtko Ursulin continue; 1021109ec558STvrtko Ursulin 1022109ec558STvrtko Ursulin str = kasprintf(GFP_KERNEL, "%s-%s", 1023109ec558STvrtko Ursulin engine->name, engine_events[i].name); 1024109ec558STvrtko Ursulin if (!str) 1025109ec558STvrtko Ursulin goto err; 1026109ec558STvrtko Ursulin 1027109ec558STvrtko Ursulin *attr_iter++ = &i915_iter->attr.attr; 1028109ec558STvrtko Ursulin i915_iter = 1029109ec558STvrtko Ursulin add_i915_attr(i915_iter, str, 10308810bc56STvrtko Ursulin __I915_PMU_ENGINE(engine->uabi_class, 1031750e76b4SChris Wilson engine->uabi_instance, 1032109ec558STvrtko Ursulin engine_events[i].sample)); 1033109ec558STvrtko Ursulin 1034109ec558STvrtko Ursulin str = kasprintf(GFP_KERNEL, "%s-%s.unit", 1035109ec558STvrtko Ursulin engine->name, engine_events[i].name); 1036109ec558STvrtko Ursulin if (!str) 1037109ec558STvrtko Ursulin goto err; 1038109ec558STvrtko Ursulin 1039109ec558STvrtko Ursulin *attr_iter++ = &pmu_iter->attr.attr; 1040109ec558STvrtko Ursulin pmu_iter = add_pmu_attr(pmu_iter, str, "ns"); 1041109ec558STvrtko Ursulin } 1042109ec558STvrtko Ursulin } 1043109ec558STvrtko Ursulin 1044908091c8STvrtko Ursulin pmu->i915_attr = i915_attr; 1045908091c8STvrtko Ursulin pmu->pmu_attr = pmu_attr; 1046109ec558STvrtko Ursulin 1047109ec558STvrtko Ursulin return attr; 1048109ec558STvrtko Ursulin 1049109ec558STvrtko Ursulin err:; 1050109ec558STvrtko Ursulin for (attr_iter = attr; *attr_iter; attr_iter++) 1051109ec558STvrtko Ursulin kfree((*attr_iter)->name); 1052109ec558STvrtko Ursulin 1053109ec558STvrtko Ursulin err_alloc: 1054109ec558STvrtko Ursulin kfree(attr); 1055109ec558STvrtko Ursulin kfree(i915_attr); 1056109ec558STvrtko Ursulin kfree(pmu_attr); 1057109ec558STvrtko Ursulin 1058109ec558STvrtko Ursulin return NULL; 1059109ec558STvrtko Ursulin } 1060109ec558STvrtko Ursulin 1061908091c8STvrtko Ursulin static void free_event_attributes(struct i915_pmu *pmu) 1062109ec558STvrtko Ursulin { 106346129dc1SMichał Winiarski struct attribute **attr_iter = pmu->events_attr_group.attrs; 1064109ec558STvrtko Ursulin 1065109ec558STvrtko Ursulin for (; *attr_iter; attr_iter++) 1066109ec558STvrtko Ursulin kfree((*attr_iter)->name); 1067109ec558STvrtko Ursulin 106846129dc1SMichał Winiarski kfree(pmu->events_attr_group.attrs); 1069908091c8STvrtko Ursulin kfree(pmu->i915_attr); 1070908091c8STvrtko Ursulin kfree(pmu->pmu_attr); 1071109ec558STvrtko Ursulin 107246129dc1SMichał Winiarski pmu->events_attr_group.attrs = NULL; 1073908091c8STvrtko Ursulin pmu->i915_attr = NULL; 1074908091c8STvrtko Ursulin pmu->pmu_attr = NULL; 1075109ec558STvrtko Ursulin } 1076109ec558STvrtko Ursulin 1077b46a33e2STvrtko Ursulin static int i915_pmu_cpu_online(unsigned int cpu, struct hlist_node *node) 1078b46a33e2STvrtko Ursulin { 1079f5a179d4SMichał Winiarski struct i915_pmu *pmu = hlist_entry_safe(node, typeof(*pmu), cpuhp.node); 1080b46a33e2STvrtko Ursulin 1081b46a33e2STvrtko Ursulin GEM_BUG_ON(!pmu->base.event_init); 1082b46a33e2STvrtko Ursulin 1083b46a33e2STvrtko Ursulin /* Select the first online CPU as a designated reader. */ 10840426c046STvrtko Ursulin if (!cpumask_weight(&i915_pmu_cpumask)) 1085b46a33e2STvrtko Ursulin cpumask_set_cpu(cpu, &i915_pmu_cpumask); 1086b46a33e2STvrtko Ursulin 1087b46a33e2STvrtko Ursulin return 0; 1088b46a33e2STvrtko Ursulin } 1089b46a33e2STvrtko Ursulin 1090b46a33e2STvrtko Ursulin static int i915_pmu_cpu_offline(unsigned int cpu, struct hlist_node *node) 1091b46a33e2STvrtko Ursulin { 1092f5a179d4SMichał Winiarski struct i915_pmu *pmu = hlist_entry_safe(node, typeof(*pmu), cpuhp.node); 1093537f9c84STvrtko Ursulin unsigned int target = i915_pmu_target_cpu; 1094b46a33e2STvrtko Ursulin 1095b46a33e2STvrtko Ursulin GEM_BUG_ON(!pmu->base.event_init); 1096b46a33e2STvrtko Ursulin 1097537f9c84STvrtko Ursulin /* 1098537f9c84STvrtko Ursulin * Unregistering an instance generates a CPU offline event which we must 1099537f9c84STvrtko Ursulin * ignore to avoid incorrectly modifying the shared i915_pmu_cpumask. 1100537f9c84STvrtko Ursulin */ 1101537f9c84STvrtko Ursulin if (pmu->closed) 1102537f9c84STvrtko Ursulin return 0; 1103537f9c84STvrtko Ursulin 1104b46a33e2STvrtko Ursulin if (cpumask_test_and_clear_cpu(cpu, &i915_pmu_cpumask)) { 1105b46a33e2STvrtko Ursulin target = cpumask_any_but(topology_sibling_cpumask(cpu), cpu); 1106537f9c84STvrtko Ursulin 1107b46a33e2STvrtko Ursulin /* Migrate events if there is a valid target */ 1108b46a33e2STvrtko Ursulin if (target < nr_cpu_ids) { 1109b46a33e2STvrtko Ursulin cpumask_set_cpu(target, &i915_pmu_cpumask); 1110537f9c84STvrtko Ursulin i915_pmu_target_cpu = target; 1111b46a33e2STvrtko Ursulin } 1112b46a33e2STvrtko Ursulin } 1113b46a33e2STvrtko Ursulin 1114537f9c84STvrtko Ursulin if (target < nr_cpu_ids && target != pmu->cpuhp.cpu) { 1115537f9c84STvrtko Ursulin perf_pmu_migrate_context(&pmu->base, cpu, target); 1116537f9c84STvrtko Ursulin pmu->cpuhp.cpu = target; 1117537f9c84STvrtko Ursulin } 1118537f9c84STvrtko Ursulin 1119b46a33e2STvrtko Ursulin return 0; 1120b46a33e2STvrtko Ursulin } 1121b46a33e2STvrtko Ursulin 1122537f9c84STvrtko Ursulin static enum cpuhp_state cpuhp_slot = CPUHP_INVALID; 1123537f9c84STvrtko Ursulin 1124537f9c84STvrtko Ursulin void i915_pmu_init(void) 1125b46a33e2STvrtko Ursulin { 1126b46a33e2STvrtko Ursulin int ret; 1127b46a33e2STvrtko Ursulin 1128b46a33e2STvrtko Ursulin ret = cpuhp_setup_state_multi(CPUHP_AP_ONLINE_DYN, 1129b46a33e2STvrtko Ursulin "perf/x86/intel/i915:online", 1130b46a33e2STvrtko Ursulin i915_pmu_cpu_online, 1131b46a33e2STvrtko Ursulin i915_pmu_cpu_offline); 1132b46a33e2STvrtko Ursulin if (ret < 0) 1133537f9c84STvrtko Ursulin pr_notice("Failed to setup cpuhp state for i915 PMU! (%d)\n", 1134537f9c84STvrtko Ursulin ret); 1135537f9c84STvrtko Ursulin else 1136537f9c84STvrtko Ursulin cpuhp_slot = ret; 1137b46a33e2STvrtko Ursulin } 1138b46a33e2STvrtko Ursulin 1139537f9c84STvrtko Ursulin void i915_pmu_exit(void) 1140537f9c84STvrtko Ursulin { 1141537f9c84STvrtko Ursulin if (cpuhp_slot != CPUHP_INVALID) 1142537f9c84STvrtko Ursulin cpuhp_remove_multi_state(cpuhp_slot); 1143537f9c84STvrtko Ursulin } 1144537f9c84STvrtko Ursulin 1145537f9c84STvrtko Ursulin static int i915_pmu_register_cpuhp_state(struct i915_pmu *pmu) 1146537f9c84STvrtko Ursulin { 1147537f9c84STvrtko Ursulin if (cpuhp_slot == CPUHP_INVALID) 1148537f9c84STvrtko Ursulin return -EINVAL; 1149537f9c84STvrtko Ursulin 1150537f9c84STvrtko Ursulin return cpuhp_state_add_instance(cpuhp_slot, &pmu->cpuhp.node); 1151b46a33e2STvrtko Ursulin } 1152b46a33e2STvrtko Ursulin 1153908091c8STvrtko Ursulin static void i915_pmu_unregister_cpuhp_state(struct i915_pmu *pmu) 1154b46a33e2STvrtko Ursulin { 1155537f9c84STvrtko Ursulin cpuhp_state_remove_instance(cpuhp_slot, &pmu->cpuhp.node); 1156b46a33e2STvrtko Ursulin } 1157b46a33e2STvrtko Ursulin 115805488673STvrtko Ursulin static bool is_igp(struct drm_i915_private *i915) 115905488673STvrtko Ursulin { 116005488673STvrtko Ursulin struct pci_dev *pdev = i915->drm.pdev; 116105488673STvrtko Ursulin 116205488673STvrtko Ursulin /* IGP is 0000:00:02.0 */ 116305488673STvrtko Ursulin return pci_domain_nr(pdev->bus) == 0 && 116405488673STvrtko Ursulin pdev->bus->number == 0 && 116505488673STvrtko Ursulin PCI_SLOT(pdev->devfn) == 2 && 116605488673STvrtko Ursulin PCI_FUNC(pdev->devfn) == 0; 116705488673STvrtko Ursulin } 116805488673STvrtko Ursulin 1169b46a33e2STvrtko Ursulin void i915_pmu_register(struct drm_i915_private *i915) 1170b46a33e2STvrtko Ursulin { 1171908091c8STvrtko Ursulin struct i915_pmu *pmu = &i915->pmu; 117246129dc1SMichał Winiarski const struct attribute_group *attr_groups[] = { 117346129dc1SMichał Winiarski &i915_pmu_format_attr_group, 117446129dc1SMichał Winiarski &pmu->events_attr_group, 117546129dc1SMichał Winiarski &i915_pmu_cpumask_attr_group, 117646129dc1SMichał Winiarski NULL 117746129dc1SMichał Winiarski }; 117846129dc1SMichał Winiarski 1179fb26eee0STvrtko Ursulin int ret = -ENOMEM; 1180b46a33e2STvrtko Ursulin 1181b46a33e2STvrtko Ursulin if (INTEL_GEN(i915) <= 2) { 11821900aba5SJani Nikula drm_info(&i915->drm, "PMU not supported for this GPU."); 1183b46a33e2STvrtko Ursulin return; 1184b46a33e2STvrtko Ursulin } 1185b46a33e2STvrtko Ursulin 1186908091c8STvrtko Ursulin spin_lock_init(&pmu->lock); 1187908091c8STvrtko Ursulin hrtimer_init(&pmu->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); 1188908091c8STvrtko Ursulin pmu->timer.function = i915_sample; 1189537f9c84STvrtko Ursulin pmu->cpuhp.cpu = -1; 1190b46a33e2STvrtko Ursulin 1191aebf3b52STvrtko Ursulin if (!is_igp(i915)) { 119205488673STvrtko Ursulin pmu->name = kasprintf(GFP_KERNEL, 1193aebf3b52STvrtko Ursulin "i915_%s", 119405488673STvrtko Ursulin dev_name(i915->drm.dev)); 1195aebf3b52STvrtko Ursulin if (pmu->name) { 1196aebf3b52STvrtko Ursulin /* tools/perf reserves colons as special. */ 1197aebf3b52STvrtko Ursulin strreplace((char *)pmu->name, ':', '_'); 1198aebf3b52STvrtko Ursulin } 1199aebf3b52STvrtko Ursulin } else { 120005488673STvrtko Ursulin pmu->name = "i915"; 1201aebf3b52STvrtko Ursulin } 120205488673STvrtko Ursulin if (!pmu->name) 1203b46a33e2STvrtko Ursulin goto err; 1204b46a33e2STvrtko Ursulin 120546129dc1SMichał Winiarski pmu->events_attr_group.name = "events"; 120646129dc1SMichał Winiarski pmu->events_attr_group.attrs = create_event_attributes(pmu); 120746129dc1SMichał Winiarski if (!pmu->events_attr_group.attrs) 1208c442292aSChris Wilson goto err_name; 1209c442292aSChris Wilson 121046129dc1SMichał Winiarski pmu->base.attr_groups = kmemdup(attr_groups, sizeof(attr_groups), 121146129dc1SMichał Winiarski GFP_KERNEL); 121246129dc1SMichał Winiarski if (!pmu->base.attr_groups) 121346129dc1SMichał Winiarski goto err_attr; 121446129dc1SMichał Winiarski 1215df3ab3cbSChris Wilson pmu->base.module = THIS_MODULE; 1216c442292aSChris Wilson pmu->base.task_ctx_nr = perf_invalid_context; 1217c442292aSChris Wilson pmu->base.event_init = i915_pmu_event_init; 1218c442292aSChris Wilson pmu->base.add = i915_pmu_event_add; 1219c442292aSChris Wilson pmu->base.del = i915_pmu_event_del; 1220c442292aSChris Wilson pmu->base.start = i915_pmu_event_start; 1221c442292aSChris Wilson pmu->base.stop = i915_pmu_event_stop; 1222c442292aSChris Wilson pmu->base.read = i915_pmu_event_read; 1223c442292aSChris Wilson pmu->base.event_idx = i915_pmu_event_event_idx; 1224c442292aSChris Wilson 122505488673STvrtko Ursulin ret = perf_pmu_register(&pmu->base, pmu->name, -1); 122605488673STvrtko Ursulin if (ret) 122746129dc1SMichał Winiarski goto err_groups; 122805488673STvrtko Ursulin 1229908091c8STvrtko Ursulin ret = i915_pmu_register_cpuhp_state(pmu); 1230b46a33e2STvrtko Ursulin if (ret) 1231b46a33e2STvrtko Ursulin goto err_unreg; 1232b46a33e2STvrtko Ursulin 1233b46a33e2STvrtko Ursulin return; 1234b46a33e2STvrtko Ursulin 1235b46a33e2STvrtko Ursulin err_unreg: 1236908091c8STvrtko Ursulin perf_pmu_unregister(&pmu->base); 123746129dc1SMichał Winiarski err_groups: 123846129dc1SMichał Winiarski kfree(pmu->base.attr_groups); 1239c442292aSChris Wilson err_attr: 1240c442292aSChris Wilson pmu->base.event_init = NULL; 1241c442292aSChris Wilson free_event_attributes(pmu); 124205488673STvrtko Ursulin err_name: 124305488673STvrtko Ursulin if (!is_igp(i915)) 124405488673STvrtko Ursulin kfree(pmu->name); 1245b46a33e2STvrtko Ursulin err: 12461900aba5SJani Nikula drm_notice(&i915->drm, "Failed to register PMU!\n"); 1247b46a33e2STvrtko Ursulin } 1248b46a33e2STvrtko Ursulin 1249b46a33e2STvrtko Ursulin void i915_pmu_unregister(struct drm_i915_private *i915) 1250b46a33e2STvrtko Ursulin { 1251908091c8STvrtko Ursulin struct i915_pmu *pmu = &i915->pmu; 1252908091c8STvrtko Ursulin 1253908091c8STvrtko Ursulin if (!pmu->base.event_init) 1254b46a33e2STvrtko Ursulin return; 1255b46a33e2STvrtko Ursulin 1256b00bccb3STvrtko Ursulin /* 1257b00bccb3STvrtko Ursulin * "Disconnect" the PMU callbacks - since all are atomic synchronize_rcu 1258b00bccb3STvrtko Ursulin * ensures all currently executing ones will have exited before we 1259b00bccb3STvrtko Ursulin * proceed with unregistration. 1260b00bccb3STvrtko Ursulin */ 1261b00bccb3STvrtko Ursulin pmu->closed = true; 1262b00bccb3STvrtko Ursulin synchronize_rcu(); 1263b46a33e2STvrtko Ursulin 1264908091c8STvrtko Ursulin hrtimer_cancel(&pmu->timer); 1265b46a33e2STvrtko Ursulin 1266908091c8STvrtko Ursulin i915_pmu_unregister_cpuhp_state(pmu); 1267b46a33e2STvrtko Ursulin 1268908091c8STvrtko Ursulin perf_pmu_unregister(&pmu->base); 1269908091c8STvrtko Ursulin pmu->base.event_init = NULL; 127046129dc1SMichał Winiarski kfree(pmu->base.attr_groups); 127105488673STvrtko Ursulin if (!is_igp(i915)) 127205488673STvrtko Ursulin kfree(pmu->name); 1273908091c8STvrtko Ursulin free_event_attributes(pmu); 1274b46a33e2STvrtko Ursulin } 1275