1b46a33e2STvrtko Ursulin /* 2058a9b43SMichal Wajdeczko * SPDX-License-Identifier: MIT 3b46a33e2STvrtko Ursulin * 4058a9b43SMichal Wajdeczko * Copyright © 2017-2018 Intel Corporation 5b46a33e2STvrtko Ursulin */ 6b46a33e2STvrtko Ursulin 73b4ed2e2SVincent Guittot #include <linux/pm_runtime.h> 8112ed2d3SChris Wilson 9112ed2d3SChris Wilson #include "gt/intel_engine.h" 1051fbd8deSChris Wilson #include "gt/intel_engine_pm.h" 11202b1f4cSMatt Roper #include "gt/intel_engine_regs.h" 12750e76b4SChris Wilson #include "gt/intel_engine_user.h" 1351fbd8deSChris Wilson #include "gt/intel_gt_pm.h" 140d6419e9SMatt Roper #include "gt/intel_gt_regs.h" 15c1132367SAndi Shyti #include "gt/intel_rc6.h" 163e7abf81SAndi Shyti #include "gt/intel_rps.h" 17112ed2d3SChris Wilson 18058a9b43SMichal Wajdeczko #include "i915_drv.h" 19ecbb5fb7SJani Nikula #include "i915_pmu.h" 20ecbb5fb7SJani Nikula #include "intel_pm.h" 21b46a33e2STvrtko Ursulin 22b46a33e2STvrtko Ursulin /* Frequency for the sampling timer for events which need it. */ 23b46a33e2STvrtko Ursulin #define FREQUENCY 200 24b46a33e2STvrtko Ursulin #define PERIOD max_t(u64, 10000, NSEC_PER_SEC / FREQUENCY) 25b46a33e2STvrtko Ursulin 26b46a33e2STvrtko Ursulin #define ENGINE_SAMPLE_MASK \ 27b46a33e2STvrtko Ursulin (BIT(I915_SAMPLE_BUSY) | \ 28b46a33e2STvrtko Ursulin BIT(I915_SAMPLE_WAIT) | \ 29b46a33e2STvrtko Ursulin BIT(I915_SAMPLE_SEMA)) 30b46a33e2STvrtko Ursulin 31141a0895SChris Wilson static cpumask_t i915_pmu_cpumask; 32537f9c84STvrtko Ursulin static unsigned int i915_pmu_target_cpu = -1; 33b46a33e2STvrtko Ursulin 34b46a33e2STvrtko Ursulin static u8 engine_config_sample(u64 config) 35b46a33e2STvrtko Ursulin { 36b46a33e2STvrtko Ursulin return config & I915_PMU_SAMPLE_MASK; 37b46a33e2STvrtko Ursulin } 38b46a33e2STvrtko Ursulin 39b46a33e2STvrtko Ursulin static u8 engine_event_sample(struct perf_event *event) 40b46a33e2STvrtko Ursulin { 41b46a33e2STvrtko Ursulin return engine_config_sample(event->attr.config); 42b46a33e2STvrtko Ursulin } 43b46a33e2STvrtko Ursulin 44b46a33e2STvrtko Ursulin static u8 engine_event_class(struct perf_event *event) 45b46a33e2STvrtko Ursulin { 46b46a33e2STvrtko Ursulin return (event->attr.config >> I915_PMU_CLASS_SHIFT) & 0xff; 47b46a33e2STvrtko Ursulin } 48b46a33e2STvrtko Ursulin 49b46a33e2STvrtko Ursulin static u8 engine_event_instance(struct perf_event *event) 50b46a33e2STvrtko Ursulin { 51b46a33e2STvrtko Ursulin return (event->attr.config >> I915_PMU_SAMPLE_BITS) & 0xff; 52b46a33e2STvrtko Ursulin } 53b46a33e2STvrtko Ursulin 54b46a33e2STvrtko Ursulin static bool is_engine_config(u64 config) 55b46a33e2STvrtko Ursulin { 56b46a33e2STvrtko Ursulin return config < __I915_PMU_OTHER(0); 57b46a33e2STvrtko Ursulin } 58b46a33e2STvrtko Ursulin 59348fb0cbSTvrtko Ursulin static unsigned int other_bit(const u64 config) 60348fb0cbSTvrtko Ursulin { 61348fb0cbSTvrtko Ursulin unsigned int val; 62348fb0cbSTvrtko Ursulin 63348fb0cbSTvrtko Ursulin switch (config) { 64348fb0cbSTvrtko Ursulin case I915_PMU_ACTUAL_FREQUENCY: 65348fb0cbSTvrtko Ursulin val = __I915_PMU_ACTUAL_FREQUENCY_ENABLED; 66348fb0cbSTvrtko Ursulin break; 67348fb0cbSTvrtko Ursulin case I915_PMU_REQUESTED_FREQUENCY: 68348fb0cbSTvrtko Ursulin val = __I915_PMU_REQUESTED_FREQUENCY_ENABLED; 69348fb0cbSTvrtko Ursulin break; 70348fb0cbSTvrtko Ursulin case I915_PMU_RC6_RESIDENCY: 71348fb0cbSTvrtko Ursulin val = __I915_PMU_RC6_RESIDENCY_ENABLED; 72348fb0cbSTvrtko Ursulin break; 73348fb0cbSTvrtko Ursulin default: 74348fb0cbSTvrtko Ursulin /* 75348fb0cbSTvrtko Ursulin * Events that do not require sampling, or tracking state 76348fb0cbSTvrtko Ursulin * transitions between enabled and disabled can be ignored. 77348fb0cbSTvrtko Ursulin */ 78348fb0cbSTvrtko Ursulin return -1; 79348fb0cbSTvrtko Ursulin } 80348fb0cbSTvrtko Ursulin 81348fb0cbSTvrtko Ursulin return I915_ENGINE_SAMPLE_COUNT + val; 82348fb0cbSTvrtko Ursulin } 83348fb0cbSTvrtko Ursulin 84348fb0cbSTvrtko Ursulin static unsigned int config_bit(const u64 config) 85b46a33e2STvrtko Ursulin { 86b46a33e2STvrtko Ursulin if (is_engine_config(config)) 87b46a33e2STvrtko Ursulin return engine_config_sample(config); 88b46a33e2STvrtko Ursulin else 89348fb0cbSTvrtko Ursulin return other_bit(config); 90b46a33e2STvrtko Ursulin } 91b46a33e2STvrtko Ursulin 92348fb0cbSTvrtko Ursulin static u64 config_mask(u64 config) 93b46a33e2STvrtko Ursulin { 94348fb0cbSTvrtko Ursulin return BIT_ULL(config_bit(config)); 95b46a33e2STvrtko Ursulin } 96b46a33e2STvrtko Ursulin 97b46a33e2STvrtko Ursulin static bool is_engine_event(struct perf_event *event) 98b46a33e2STvrtko Ursulin { 99b46a33e2STvrtko Ursulin return is_engine_config(event->attr.config); 100b46a33e2STvrtko Ursulin } 101b46a33e2STvrtko Ursulin 102348fb0cbSTvrtko Ursulin static unsigned int event_bit(struct perf_event *event) 103b46a33e2STvrtko Ursulin { 104348fb0cbSTvrtko Ursulin return config_bit(event->attr.config); 105b46a33e2STvrtko Ursulin } 106b46a33e2STvrtko Ursulin 107908091c8STvrtko Ursulin static bool pmu_needs_timer(struct i915_pmu *pmu, bool gpu_active) 108feff0dc6STvrtko Ursulin { 109908091c8STvrtko Ursulin struct drm_i915_private *i915 = container_of(pmu, typeof(*i915), pmu); 110348fb0cbSTvrtko Ursulin u32 enable; 111feff0dc6STvrtko Ursulin 112feff0dc6STvrtko Ursulin /* 113feff0dc6STvrtko Ursulin * Only some counters need the sampling timer. 114feff0dc6STvrtko Ursulin * 115feff0dc6STvrtko Ursulin * We start with a bitmask of all currently enabled events. 116feff0dc6STvrtko Ursulin */ 117908091c8STvrtko Ursulin enable = pmu->enable; 118feff0dc6STvrtko Ursulin 119feff0dc6STvrtko Ursulin /* 120feff0dc6STvrtko Ursulin * Mask out all the ones which do not need the timer, or in 121feff0dc6STvrtko Ursulin * other words keep all the ones that could need the timer. 122feff0dc6STvrtko Ursulin */ 123348fb0cbSTvrtko Ursulin enable &= config_mask(I915_PMU_ACTUAL_FREQUENCY) | 124348fb0cbSTvrtko Ursulin config_mask(I915_PMU_REQUESTED_FREQUENCY) | 125feff0dc6STvrtko Ursulin ENGINE_SAMPLE_MASK; 126feff0dc6STvrtko Ursulin 127feff0dc6STvrtko Ursulin /* 128feff0dc6STvrtko Ursulin * When the GPU is idle per-engine counters do not need to be 129feff0dc6STvrtko Ursulin * running so clear those bits out. 130feff0dc6STvrtko Ursulin */ 131feff0dc6STvrtko Ursulin if (!gpu_active) 132feff0dc6STvrtko Ursulin enable &= ~ENGINE_SAMPLE_MASK; 133b3add01eSTvrtko Ursulin /* 134b3add01eSTvrtko Ursulin * Also there is software busyness tracking available we do not 135b3add01eSTvrtko Ursulin * need the timer for I915_SAMPLE_BUSY counter. 136b3add01eSTvrtko Ursulin */ 137bf73fc0fSChris Wilson else if (i915->caps.scheduler & I915_SCHEDULER_CAP_ENGINE_BUSY_STATS) 138b3add01eSTvrtko Ursulin enable &= ~BIT(I915_SAMPLE_BUSY); 139feff0dc6STvrtko Ursulin 140feff0dc6STvrtko Ursulin /* 141feff0dc6STvrtko Ursulin * If some bits remain it means we need the sampling timer running. 142feff0dc6STvrtko Ursulin */ 143feff0dc6STvrtko Ursulin return enable; 144feff0dc6STvrtko Ursulin } 145feff0dc6STvrtko Ursulin 146c1132367SAndi Shyti static u64 __get_rc6(struct intel_gt *gt) 14716ffe73cSChris Wilson { 14816ffe73cSChris Wilson struct drm_i915_private *i915 = gt->i915; 14916ffe73cSChris Wilson u64 val; 15016ffe73cSChris Wilson 151c1132367SAndi Shyti val = intel_rc6_residency_ns(>->rc6, 15216ffe73cSChris Wilson IS_VALLEYVIEW(i915) ? 15316ffe73cSChris Wilson VLV_GT_RENDER_RC6 : 15416ffe73cSChris Wilson GEN6_GT_GFX_RC6); 15516ffe73cSChris Wilson 15616ffe73cSChris Wilson if (HAS_RC6p(i915)) 157c1132367SAndi Shyti val += intel_rc6_residency_ns(>->rc6, GEN6_GT_GFX_RC6p); 15816ffe73cSChris Wilson 15916ffe73cSChris Wilson if (HAS_RC6pp(i915)) 160c1132367SAndi Shyti val += intel_rc6_residency_ns(>->rc6, GEN6_GT_GFX_RC6pp); 16116ffe73cSChris Wilson 16216ffe73cSChris Wilson return val; 16316ffe73cSChris Wilson } 16416ffe73cSChris Wilson 165c51c29fbSTvrtko Ursulin static inline s64 ktime_since_raw(const ktime_t kt) 16616ffe73cSChris Wilson { 167c51c29fbSTvrtko Ursulin return ktime_to_ns(ktime_sub(ktime_get_raw(), kt)); 16816ffe73cSChris Wilson } 16916ffe73cSChris Wilson 170df6a4205STvrtko Ursulin static u64 get_rc6(struct intel_gt *gt) 17116ffe73cSChris Wilson { 172df6a4205STvrtko Ursulin struct drm_i915_private *i915 = gt->i915; 173df6a4205STvrtko Ursulin struct i915_pmu *pmu = &i915->pmu; 174df6a4205STvrtko Ursulin unsigned long flags; 175df6a4205STvrtko Ursulin bool awake = false; 17616ffe73cSChris Wilson u64 val; 17716ffe73cSChris Wilson 178df6a4205STvrtko Ursulin if (intel_gt_pm_get_if_awake(gt)) { 179df6a4205STvrtko Ursulin val = __get_rc6(gt); 180df6a4205STvrtko Ursulin intel_gt_pm_put_async(gt); 181df6a4205STvrtko Ursulin awake = true; 182df6a4205STvrtko Ursulin } 183df6a4205STvrtko Ursulin 184df6a4205STvrtko Ursulin spin_lock_irqsave(&pmu->lock, flags); 185df6a4205STvrtko Ursulin 186df6a4205STvrtko Ursulin if (awake) { 187df6a4205STvrtko Ursulin pmu->sample[__I915_SAMPLE_RC6].cur = val; 188df6a4205STvrtko Ursulin } else { 18916ffe73cSChris Wilson /* 19016ffe73cSChris Wilson * We think we are runtime suspended. 19116ffe73cSChris Wilson * 19216ffe73cSChris Wilson * Report the delta from when the device was suspended to now, 19316ffe73cSChris Wilson * on top of the last known real value, as the approximated RC6 19416ffe73cSChris Wilson * counter value. 19516ffe73cSChris Wilson */ 196c51c29fbSTvrtko Ursulin val = ktime_since_raw(pmu->sleep_last); 19716ffe73cSChris Wilson val += pmu->sample[__I915_SAMPLE_RC6].cur; 19816ffe73cSChris Wilson } 19916ffe73cSChris Wilson 200df6a4205STvrtko Ursulin if (val < pmu->sample[__I915_SAMPLE_RC6_LAST_REPORTED].cur) 201df6a4205STvrtko Ursulin val = pmu->sample[__I915_SAMPLE_RC6_LAST_REPORTED].cur; 20216ffe73cSChris Wilson else 203df6a4205STvrtko Ursulin pmu->sample[__I915_SAMPLE_RC6_LAST_REPORTED].cur = val; 20416ffe73cSChris Wilson 20516ffe73cSChris Wilson spin_unlock_irqrestore(&pmu->lock, flags); 20616ffe73cSChris Wilson 20716ffe73cSChris Wilson return val; 20816ffe73cSChris Wilson } 20916ffe73cSChris Wilson 210dbe13ae1STvrtko Ursulin static void init_rc6(struct i915_pmu *pmu) 211dbe13ae1STvrtko Ursulin { 212dbe13ae1STvrtko Ursulin struct drm_i915_private *i915 = container_of(pmu, typeof(*i915), pmu); 213dbe13ae1STvrtko Ursulin intel_wakeref_t wakeref; 214dbe13ae1STvrtko Ursulin 2152cbc876dSMichał Winiarski with_intel_runtime_pm(to_gt(i915)->uncore->rpm, wakeref) { 2162cbc876dSMichał Winiarski pmu->sample[__I915_SAMPLE_RC6].cur = __get_rc6(to_gt(i915)); 217dbe13ae1STvrtko Ursulin pmu->sample[__I915_SAMPLE_RC6_LAST_REPORTED].cur = 218dbe13ae1STvrtko Ursulin pmu->sample[__I915_SAMPLE_RC6].cur; 219c51c29fbSTvrtko Ursulin pmu->sleep_last = ktime_get_raw(); 220dbe13ae1STvrtko Ursulin } 221dbe13ae1STvrtko Ursulin } 222dbe13ae1STvrtko Ursulin 22316ffe73cSChris Wilson static void park_rc6(struct drm_i915_private *i915) 224feff0dc6STvrtko Ursulin { 225908091c8STvrtko Ursulin struct i915_pmu *pmu = &i915->pmu; 226908091c8STvrtko Ursulin 2272cbc876dSMichał Winiarski pmu->sample[__I915_SAMPLE_RC6].cur = __get_rc6(to_gt(i915)); 228c51c29fbSTvrtko Ursulin pmu->sleep_last = ktime_get_raw(); 229feff0dc6STvrtko Ursulin } 230feff0dc6STvrtko Ursulin 231908091c8STvrtko Ursulin static void __i915_pmu_maybe_start_timer(struct i915_pmu *pmu) 232feff0dc6STvrtko Ursulin { 233908091c8STvrtko Ursulin if (!pmu->timer_enabled && pmu_needs_timer(pmu, true)) { 234908091c8STvrtko Ursulin pmu->timer_enabled = true; 235908091c8STvrtko Ursulin pmu->timer_last = ktime_get(); 236908091c8STvrtko Ursulin hrtimer_start_range_ns(&pmu->timer, 237feff0dc6STvrtko Ursulin ns_to_ktime(PERIOD), 0, 238feff0dc6STvrtko Ursulin HRTIMER_MODE_REL_PINNED); 239feff0dc6STvrtko Ursulin } 240feff0dc6STvrtko Ursulin } 241feff0dc6STvrtko Ursulin 24216ffe73cSChris Wilson void i915_pmu_gt_parked(struct drm_i915_private *i915) 24316ffe73cSChris Wilson { 24416ffe73cSChris Wilson struct i915_pmu *pmu = &i915->pmu; 24516ffe73cSChris Wilson 24616ffe73cSChris Wilson if (!pmu->base.event_init) 24716ffe73cSChris Wilson return; 24816ffe73cSChris Wilson 24916ffe73cSChris Wilson spin_lock_irq(&pmu->lock); 25016ffe73cSChris Wilson 25116ffe73cSChris Wilson park_rc6(i915); 25216ffe73cSChris Wilson 25316ffe73cSChris Wilson /* 25416ffe73cSChris Wilson * Signal sampling timer to stop if only engine events are enabled and 25516ffe73cSChris Wilson * GPU went idle. 25616ffe73cSChris Wilson */ 25716ffe73cSChris Wilson pmu->timer_enabled = pmu_needs_timer(pmu, false); 25816ffe73cSChris Wilson 25916ffe73cSChris Wilson spin_unlock_irq(&pmu->lock); 26016ffe73cSChris Wilson } 26116ffe73cSChris Wilson 262feff0dc6STvrtko Ursulin void i915_pmu_gt_unparked(struct drm_i915_private *i915) 263feff0dc6STvrtko Ursulin { 264908091c8STvrtko Ursulin struct i915_pmu *pmu = &i915->pmu; 265908091c8STvrtko Ursulin 266908091c8STvrtko Ursulin if (!pmu->base.event_init) 267feff0dc6STvrtko Ursulin return; 268feff0dc6STvrtko Ursulin 269908091c8STvrtko Ursulin spin_lock_irq(&pmu->lock); 27016ffe73cSChris Wilson 271feff0dc6STvrtko Ursulin /* 272feff0dc6STvrtko Ursulin * Re-enable sampling timer when GPU goes active. 273feff0dc6STvrtko Ursulin */ 274908091c8STvrtko Ursulin __i915_pmu_maybe_start_timer(pmu); 27516ffe73cSChris Wilson 276908091c8STvrtko Ursulin spin_unlock_irq(&pmu->lock); 277feff0dc6STvrtko Ursulin } 278feff0dc6STvrtko Ursulin 279b46a33e2STvrtko Ursulin static void 2809f473ecfSTvrtko Ursulin add_sample(struct i915_pmu_sample *sample, u32 val) 281b46a33e2STvrtko Ursulin { 2829f473ecfSTvrtko Ursulin sample->cur += val; 283b46a33e2STvrtko Ursulin } 284b46a33e2STvrtko Ursulin 285d79e1bd6SChris Wilson static bool exclusive_mmio_access(const struct drm_i915_private *i915) 286d79e1bd6SChris Wilson { 287d79e1bd6SChris Wilson /* 288d79e1bd6SChris Wilson * We have to avoid concurrent mmio cache line access on gen7 or 289d79e1bd6SChris Wilson * risk a machine hang. For a fun history lesson dig out the old 290d79e1bd6SChris Wilson * userspace intel_gpu_top and run it on Ivybridge or Haswell! 291d79e1bd6SChris Wilson */ 292651e7d48SLucas De Marchi return GRAPHICS_VER(i915) == 7; 293d79e1bd6SChris Wilson } 294d79e1bd6SChris Wilson 2956ec81b82SArnd Bergmann static void engine_sample(struct intel_engine_cs *engine, unsigned int period_ns) 296b46a33e2STvrtko Ursulin { 297d0aa694bSChris Wilson struct intel_engine_pmu *pmu = &engine->pmu; 298d0aa694bSChris Wilson bool busy; 299b46a33e2STvrtko Ursulin u32 val; 300b46a33e2STvrtko Ursulin 30128fba096STvrtko Ursulin val = ENGINE_READ_FW(engine, RING_CTL); 302d0aa694bSChris Wilson if (val == 0) /* powerwell off => engine idle */ 3036ec81b82SArnd Bergmann return; 304b46a33e2STvrtko Ursulin 3059f473ecfSTvrtko Ursulin if (val & RING_WAIT) 306d0aa694bSChris Wilson add_sample(&pmu->sample[I915_SAMPLE_WAIT], period_ns); 3079f473ecfSTvrtko Ursulin if (val & RING_WAIT_SEMAPHORE) 308d0aa694bSChris Wilson add_sample(&pmu->sample[I915_SAMPLE_SEMA], period_ns); 309b46a33e2STvrtko Ursulin 31054fc577dSTvrtko Ursulin /* No need to sample when busy stats are supported. */ 31154fc577dSTvrtko Ursulin if (intel_engine_supports_stats(engine)) 3126ec81b82SArnd Bergmann return; 31354fc577dSTvrtko Ursulin 314d0aa694bSChris Wilson /* 315d0aa694bSChris Wilson * While waiting on a semaphore or event, MI_MODE reports the 316d0aa694bSChris Wilson * ring as idle. However, previously using the seqno, and with 317d0aa694bSChris Wilson * execlists sampling, we account for the ring waiting as the 318d0aa694bSChris Wilson * engine being busy. Therefore, we record the sample as being 319d0aa694bSChris Wilson * busy if either waiting or !idle. 320d0aa694bSChris Wilson */ 321d0aa694bSChris Wilson busy = val & (RING_WAIT_SEMAPHORE | RING_WAIT); 322d0aa694bSChris Wilson if (!busy) { 32328fba096STvrtko Ursulin val = ENGINE_READ_FW(engine, RING_MI_MODE); 324d0aa694bSChris Wilson busy = !(val & MODE_IDLE); 325d0aa694bSChris Wilson } 326d0aa694bSChris Wilson if (busy) 327d0aa694bSChris Wilson add_sample(&pmu->sample[I915_SAMPLE_BUSY], period_ns); 3286ec81b82SArnd Bergmann } 329b46a33e2STvrtko Ursulin 3306ec81b82SArnd Bergmann static void 3316ec81b82SArnd Bergmann engines_sample(struct intel_gt *gt, unsigned int period_ns) 3326ec81b82SArnd Bergmann { 3336ec81b82SArnd Bergmann struct drm_i915_private *i915 = gt->i915; 3346ec81b82SArnd Bergmann struct intel_engine_cs *engine; 3356ec81b82SArnd Bergmann enum intel_engine_id id; 3366ec81b82SArnd Bergmann unsigned long flags; 3376ec81b82SArnd Bergmann 3386ec81b82SArnd Bergmann if ((i915->pmu.enable & ENGINE_SAMPLE_MASK) == 0) 3396ec81b82SArnd Bergmann return; 3406ec81b82SArnd Bergmann 3416ec81b82SArnd Bergmann if (!intel_gt_pm_is_awake(gt)) 3426ec81b82SArnd Bergmann return; 3436ec81b82SArnd Bergmann 3446ec81b82SArnd Bergmann for_each_engine(engine, gt, id) { 3456ec81b82SArnd Bergmann if (!intel_engine_pm_get_if_awake(engine)) 3466ec81b82SArnd Bergmann continue; 3476ec81b82SArnd Bergmann 3486ec81b82SArnd Bergmann if (exclusive_mmio_access(i915)) { 3496ec81b82SArnd Bergmann spin_lock_irqsave(&engine->uncore->lock, flags); 3506ec81b82SArnd Bergmann engine_sample(engine, period_ns); 3516ec81b82SArnd Bergmann spin_unlock_irqrestore(&engine->uncore->lock, flags); 3526ec81b82SArnd Bergmann } else { 3536ec81b82SArnd Bergmann engine_sample(engine, period_ns); 3546ec81b82SArnd Bergmann } 3556ec81b82SArnd Bergmann 35607779a76SChris Wilson intel_engine_pm_put_async(engine); 35751fbd8deSChris Wilson } 358b46a33e2STvrtko Ursulin } 359b46a33e2STvrtko Ursulin 3609f473ecfSTvrtko Ursulin static void 3619f473ecfSTvrtko Ursulin add_sample_mult(struct i915_pmu_sample *sample, u32 val, u32 mul) 3629f473ecfSTvrtko Ursulin { 3639f473ecfSTvrtko Ursulin sample->cur += mul_u32_u32(val, mul); 3649f473ecfSTvrtko Ursulin } 3659f473ecfSTvrtko Ursulin 366b66ecd04STvrtko Ursulin static bool frequency_sampling_enabled(struct i915_pmu *pmu) 367b66ecd04STvrtko Ursulin { 368b66ecd04STvrtko Ursulin return pmu->enable & 369348fb0cbSTvrtko Ursulin (config_mask(I915_PMU_ACTUAL_FREQUENCY) | 370348fb0cbSTvrtko Ursulin config_mask(I915_PMU_REQUESTED_FREQUENCY)); 371b66ecd04STvrtko Ursulin } 372b66ecd04STvrtko Ursulin 3739f473ecfSTvrtko Ursulin static void 37408ce5c64STvrtko Ursulin frequency_sample(struct intel_gt *gt, unsigned int period_ns) 375b46a33e2STvrtko Ursulin { 37608ce5c64STvrtko Ursulin struct drm_i915_private *i915 = gt->i915; 37708ce5c64STvrtko Ursulin struct intel_uncore *uncore = gt->uncore; 37808ce5c64STvrtko Ursulin struct i915_pmu *pmu = &i915->pmu; 3793e7abf81SAndi Shyti struct intel_rps *rps = >->rps; 38008ce5c64STvrtko Ursulin 381b66ecd04STvrtko Ursulin if (!frequency_sampling_enabled(pmu)) 382b66ecd04STvrtko Ursulin return; 383b66ecd04STvrtko Ursulin 384b66ecd04STvrtko Ursulin /* Report 0/0 (actual/requested) frequency while parked. */ 385b66ecd04STvrtko Ursulin if (!intel_gt_pm_get_if_awake(gt)) 386b66ecd04STvrtko Ursulin return; 387b66ecd04STvrtko Ursulin 388348fb0cbSTvrtko Ursulin if (pmu->enable & config_mask(I915_PMU_ACTUAL_FREQUENCY)) { 389b46a33e2STvrtko Ursulin u32 val; 390b46a33e2STvrtko Ursulin 391c1c82d26SChris Wilson /* 392c1c82d26SChris Wilson * We take a quick peek here without using forcewake 393c1c82d26SChris Wilson * so that we don't perturb the system under observation 394c1c82d26SChris Wilson * (forcewake => !rc6 => increased power use). We expect 395c1c82d26SChris Wilson * that if the read fails because it is outside of the 396c1c82d26SChris Wilson * mmio power well, then it will return 0 -- in which 397c1c82d26SChris Wilson * case we assume the system is running at the intended 398c1c82d26SChris Wilson * frequency. Fortunately, the read should rarely fail! 399c1c82d26SChris Wilson */ 400b66ecd04STvrtko Ursulin val = intel_uncore_read_fw(uncore, GEN6_RPSTAT1); 401b66ecd04STvrtko Ursulin if (val) 402e03512edSAndi Shyti val = intel_rps_get_cagf(rps, val); 403b66ecd04STvrtko Ursulin else 404b66ecd04STvrtko Ursulin val = rps->cur_freq; 405b46a33e2STvrtko Ursulin 40608ce5c64STvrtko Ursulin add_sample_mult(&pmu->sample[__I915_SAMPLE_FREQ_ACT], 407b66ecd04STvrtko Ursulin intel_gpu_freq(rps, val), period_ns / 1000); 408b46a33e2STvrtko Ursulin } 409b46a33e2STvrtko Ursulin 410348fb0cbSTvrtko Ursulin if (pmu->enable & config_mask(I915_PMU_REQUESTED_FREQUENCY)) { 41108ce5c64STvrtko Ursulin add_sample_mult(&pmu->sample[__I915_SAMPLE_FREQ_REQ], 41241e5c17eSVinay Belgaumkar intel_rps_get_requested_frequency(rps), 4139f473ecfSTvrtko Ursulin period_ns / 1000); 414b46a33e2STvrtko Ursulin } 415b66ecd04STvrtko Ursulin 416b66ecd04STvrtko Ursulin intel_gt_pm_put_async(gt); 417b46a33e2STvrtko Ursulin } 418b46a33e2STvrtko Ursulin 419b46a33e2STvrtko Ursulin static enum hrtimer_restart i915_sample(struct hrtimer *hrtimer) 420b46a33e2STvrtko Ursulin { 421b46a33e2STvrtko Ursulin struct drm_i915_private *i915 = 422b46a33e2STvrtko Ursulin container_of(hrtimer, struct drm_i915_private, pmu.timer); 423908091c8STvrtko Ursulin struct i915_pmu *pmu = &i915->pmu; 4242cbc876dSMichał Winiarski struct intel_gt *gt = to_gt(i915); 4259f473ecfSTvrtko Ursulin unsigned int period_ns; 4269f473ecfSTvrtko Ursulin ktime_t now; 427b46a33e2STvrtko Ursulin 428908091c8STvrtko Ursulin if (!READ_ONCE(pmu->timer_enabled)) 429b46a33e2STvrtko Ursulin return HRTIMER_NORESTART; 430b46a33e2STvrtko Ursulin 4319f473ecfSTvrtko Ursulin now = ktime_get(); 432908091c8STvrtko Ursulin period_ns = ktime_to_ns(ktime_sub(now, pmu->timer_last)); 433908091c8STvrtko Ursulin pmu->timer_last = now; 434b46a33e2STvrtko Ursulin 4359f473ecfSTvrtko Ursulin /* 4369f473ecfSTvrtko Ursulin * Strictly speaking the passed in period may not be 100% accurate for 4379f473ecfSTvrtko Ursulin * all internal calculation, since some amount of time can be spent on 4389f473ecfSTvrtko Ursulin * grabbing the forcewake. However the potential error from timer call- 4399f473ecfSTvrtko Ursulin * back delay greatly dominates this so we keep it simple. 4409f473ecfSTvrtko Ursulin */ 44108ce5c64STvrtko Ursulin engines_sample(gt, period_ns); 44208ce5c64STvrtko Ursulin frequency_sample(gt, period_ns); 4439f473ecfSTvrtko Ursulin 4449f473ecfSTvrtko Ursulin hrtimer_forward(hrtimer, now, ns_to_ktime(PERIOD)); 4459f473ecfSTvrtko Ursulin 446b46a33e2STvrtko Ursulin return HRTIMER_RESTART; 447b46a33e2STvrtko Ursulin } 448b46a33e2STvrtko Ursulin 449b46a33e2STvrtko Ursulin static void i915_pmu_event_destroy(struct perf_event *event) 450b46a33e2STvrtko Ursulin { 451bf07f6ebSPankaj Bharadiya struct drm_i915_private *i915 = 452bf07f6ebSPankaj Bharadiya container_of(event->pmu, typeof(*i915), pmu.base); 453bf07f6ebSPankaj Bharadiya 454bf07f6ebSPankaj Bharadiya drm_WARN_ON(&i915->drm, event->parent); 455b00bccb3STvrtko Ursulin 456b00bccb3STvrtko Ursulin drm_dev_put(&i915->drm); 457b46a33e2STvrtko Ursulin } 458b46a33e2STvrtko Ursulin 459109ec558STvrtko Ursulin static int 460109ec558STvrtko Ursulin engine_event_status(struct intel_engine_cs *engine, 461109ec558STvrtko Ursulin enum drm_i915_pmu_engine_sample sample) 462b46a33e2STvrtko Ursulin { 463109ec558STvrtko Ursulin switch (sample) { 464b46a33e2STvrtko Ursulin case I915_SAMPLE_BUSY: 465b46a33e2STvrtko Ursulin case I915_SAMPLE_WAIT: 466b46a33e2STvrtko Ursulin break; 467b46a33e2STvrtko Ursulin case I915_SAMPLE_SEMA: 468651e7d48SLucas De Marchi if (GRAPHICS_VER(engine->i915) < 6) 469b46a33e2STvrtko Ursulin return -ENODEV; 470b46a33e2STvrtko Ursulin break; 471b46a33e2STvrtko Ursulin default: 472b46a33e2STvrtko Ursulin return -ENOENT; 473b46a33e2STvrtko Ursulin } 474b46a33e2STvrtko Ursulin 475b46a33e2STvrtko Ursulin return 0; 476b46a33e2STvrtko Ursulin } 477b46a33e2STvrtko Ursulin 478109ec558STvrtko Ursulin static int 479109ec558STvrtko Ursulin config_status(struct drm_i915_private *i915, u64 config) 480109ec558STvrtko Ursulin { 4812cbc876dSMichał Winiarski struct intel_gt *gt = to_gt(i915); 482399cd979STvrtko Ursulin 483109ec558STvrtko Ursulin switch (config) { 484109ec558STvrtko Ursulin case I915_PMU_ACTUAL_FREQUENCY: 485109ec558STvrtko Ursulin if (IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915)) 486109ec558STvrtko Ursulin /* Requires a mutex for sampling! */ 487109ec558STvrtko Ursulin return -ENODEV; 488df561f66SGustavo A. R. Silva fallthrough; 489109ec558STvrtko Ursulin case I915_PMU_REQUESTED_FREQUENCY: 490651e7d48SLucas De Marchi if (GRAPHICS_VER(i915) < 6) 491109ec558STvrtko Ursulin return -ENODEV; 492109ec558STvrtko Ursulin break; 493109ec558STvrtko Ursulin case I915_PMU_INTERRUPTS: 494109ec558STvrtko Ursulin break; 495109ec558STvrtko Ursulin case I915_PMU_RC6_RESIDENCY: 496399cd979STvrtko Ursulin if (!gt->rc6.supported) 497109ec558STvrtko Ursulin return -ENODEV; 498109ec558STvrtko Ursulin break; 4998c3b1ba0SChris Wilson case I915_PMU_SOFTWARE_GT_AWAKE_TIME: 5008c3b1ba0SChris Wilson break; 501109ec558STvrtko Ursulin default: 502109ec558STvrtko Ursulin return -ENOENT; 503109ec558STvrtko Ursulin } 504109ec558STvrtko Ursulin 505109ec558STvrtko Ursulin return 0; 506109ec558STvrtko Ursulin } 507109ec558STvrtko Ursulin 508109ec558STvrtko Ursulin static int engine_event_init(struct perf_event *event) 509109ec558STvrtko Ursulin { 510109ec558STvrtko Ursulin struct drm_i915_private *i915 = 511109ec558STvrtko Ursulin container_of(event->pmu, typeof(*i915), pmu.base); 512109ec558STvrtko Ursulin struct intel_engine_cs *engine; 513109ec558STvrtko Ursulin 514109ec558STvrtko Ursulin engine = intel_engine_lookup_user(i915, engine_event_class(event), 515109ec558STvrtko Ursulin engine_event_instance(event)); 516109ec558STvrtko Ursulin if (!engine) 517109ec558STvrtko Ursulin return -ENODEV; 518109ec558STvrtko Ursulin 519426d0073SChris Wilson return engine_event_status(engine, engine_event_sample(event)); 520109ec558STvrtko Ursulin } 521109ec558STvrtko Ursulin 522b46a33e2STvrtko Ursulin static int i915_pmu_event_init(struct perf_event *event) 523b46a33e2STvrtko Ursulin { 524b46a33e2STvrtko Ursulin struct drm_i915_private *i915 = 525b46a33e2STvrtko Ursulin container_of(event->pmu, typeof(*i915), pmu.base); 526b00bccb3STvrtko Ursulin struct i915_pmu *pmu = &i915->pmu; 5270426c046STvrtko Ursulin int ret; 528b46a33e2STvrtko Ursulin 529b00bccb3STvrtko Ursulin if (pmu->closed) 530b00bccb3STvrtko Ursulin return -ENODEV; 531b00bccb3STvrtko Ursulin 532b46a33e2STvrtko Ursulin if (event->attr.type != event->pmu->type) 533b46a33e2STvrtko Ursulin return -ENOENT; 534b46a33e2STvrtko Ursulin 535b46a33e2STvrtko Ursulin /* unsupported modes and filters */ 536b46a33e2STvrtko Ursulin if (event->attr.sample_period) /* no sampling */ 537b46a33e2STvrtko Ursulin return -EINVAL; 538b46a33e2STvrtko Ursulin 539b46a33e2STvrtko Ursulin if (has_branch_stack(event)) 540b46a33e2STvrtko Ursulin return -EOPNOTSUPP; 541b46a33e2STvrtko Ursulin 542b46a33e2STvrtko Ursulin if (event->cpu < 0) 543b46a33e2STvrtko Ursulin return -EINVAL; 544b46a33e2STvrtko Ursulin 5450426c046STvrtko Ursulin /* only allow running on one cpu at a time */ 5460426c046STvrtko Ursulin if (!cpumask_test_cpu(event->cpu, &i915_pmu_cpumask)) 54700a79722STvrtko Ursulin return -EINVAL; 548b46a33e2STvrtko Ursulin 549109ec558STvrtko Ursulin if (is_engine_event(event)) 550b46a33e2STvrtko Ursulin ret = engine_event_init(event); 551109ec558STvrtko Ursulin else 552109ec558STvrtko Ursulin ret = config_status(i915, event->attr.config); 553b46a33e2STvrtko Ursulin if (ret) 554b46a33e2STvrtko Ursulin return ret; 555b46a33e2STvrtko Ursulin 556b00bccb3STvrtko Ursulin if (!event->parent) { 557b00bccb3STvrtko Ursulin drm_dev_get(&i915->drm); 558b46a33e2STvrtko Ursulin event->destroy = i915_pmu_event_destroy; 559b00bccb3STvrtko Ursulin } 560b46a33e2STvrtko Ursulin 561b46a33e2STvrtko Ursulin return 0; 562b46a33e2STvrtko Ursulin } 563b46a33e2STvrtko Ursulin 564ad055fb8STvrtko Ursulin static u64 __i915_pmu_event_read(struct perf_event *event) 565b46a33e2STvrtko Ursulin { 566b46a33e2STvrtko Ursulin struct drm_i915_private *i915 = 567b46a33e2STvrtko Ursulin container_of(event->pmu, typeof(*i915), pmu.base); 568908091c8STvrtko Ursulin struct i915_pmu *pmu = &i915->pmu; 569b46a33e2STvrtko Ursulin u64 val = 0; 570b46a33e2STvrtko Ursulin 571b46a33e2STvrtko Ursulin if (is_engine_event(event)) { 572b46a33e2STvrtko Ursulin u8 sample = engine_event_sample(event); 573b46a33e2STvrtko Ursulin struct intel_engine_cs *engine; 574b46a33e2STvrtko Ursulin 575b46a33e2STvrtko Ursulin engine = intel_engine_lookup_user(i915, 576b46a33e2STvrtko Ursulin engine_event_class(event), 577b46a33e2STvrtko Ursulin engine_event_instance(event)); 578b46a33e2STvrtko Ursulin 57948a1b8d4SPankaj Bharadiya if (drm_WARN_ON_ONCE(&i915->drm, !engine)) { 580b46a33e2STvrtko Ursulin /* Do nothing */ 581b3add01eSTvrtko Ursulin } else if (sample == I915_SAMPLE_BUSY && 582b2f78cdaSTvrtko Ursulin intel_engine_supports_stats(engine)) { 583810b7ee3SChris Wilson ktime_t unused; 584810b7ee3SChris Wilson 585810b7ee3SChris Wilson val = ktime_to_ns(intel_engine_get_busy_time(engine, 586810b7ee3SChris Wilson &unused)); 587b46a33e2STvrtko Ursulin } else { 588b46a33e2STvrtko Ursulin val = engine->pmu.sample[sample].cur; 589b46a33e2STvrtko Ursulin } 590b46a33e2STvrtko Ursulin } else { 591b46a33e2STvrtko Ursulin switch (event->attr.config) { 592b46a33e2STvrtko Ursulin case I915_PMU_ACTUAL_FREQUENCY: 593b46a33e2STvrtko Ursulin val = 594908091c8STvrtko Ursulin div_u64(pmu->sample[__I915_SAMPLE_FREQ_ACT].cur, 5959f473ecfSTvrtko Ursulin USEC_PER_SEC /* to MHz */); 596b46a33e2STvrtko Ursulin break; 597b46a33e2STvrtko Ursulin case I915_PMU_REQUESTED_FREQUENCY: 598b46a33e2STvrtko Ursulin val = 599908091c8STvrtko Ursulin div_u64(pmu->sample[__I915_SAMPLE_FREQ_REQ].cur, 6009f473ecfSTvrtko Ursulin USEC_PER_SEC /* to MHz */); 601b46a33e2STvrtko Ursulin break; 6020cd4684dSTvrtko Ursulin case I915_PMU_INTERRUPTS: 6039c6508b9SThomas Gleixner val = READ_ONCE(pmu->irq_count); 6040cd4684dSTvrtko Ursulin break; 6056060b6aeSTvrtko Ursulin case I915_PMU_RC6_RESIDENCY: 6062cbc876dSMichał Winiarski val = get_rc6(to_gt(i915)); 6076060b6aeSTvrtko Ursulin break; 6088c3b1ba0SChris Wilson case I915_PMU_SOFTWARE_GT_AWAKE_TIME: 6092cbc876dSMichał Winiarski val = ktime_to_ns(intel_gt_get_awake_time(to_gt(i915))); 6108c3b1ba0SChris Wilson break; 611b46a33e2STvrtko Ursulin } 612b46a33e2STvrtko Ursulin } 613b46a33e2STvrtko Ursulin 614b46a33e2STvrtko Ursulin return val; 615b46a33e2STvrtko Ursulin } 616b46a33e2STvrtko Ursulin 617b46a33e2STvrtko Ursulin static void i915_pmu_event_read(struct perf_event *event) 618b46a33e2STvrtko Ursulin { 619b00bccb3STvrtko Ursulin struct drm_i915_private *i915 = 620b00bccb3STvrtko Ursulin container_of(event->pmu, typeof(*i915), pmu.base); 621b46a33e2STvrtko Ursulin struct hw_perf_event *hwc = &event->hw; 622b00bccb3STvrtko Ursulin struct i915_pmu *pmu = &i915->pmu; 623b46a33e2STvrtko Ursulin u64 prev, new; 624b46a33e2STvrtko Ursulin 625b00bccb3STvrtko Ursulin if (pmu->closed) { 626b00bccb3STvrtko Ursulin event->hw.state = PERF_HES_STOPPED; 627b00bccb3STvrtko Ursulin return; 628b00bccb3STvrtko Ursulin } 629b46a33e2STvrtko Ursulin again: 630b46a33e2STvrtko Ursulin prev = local64_read(&hwc->prev_count); 631ad055fb8STvrtko Ursulin new = __i915_pmu_event_read(event); 632b46a33e2STvrtko Ursulin 633b46a33e2STvrtko Ursulin if (local64_cmpxchg(&hwc->prev_count, prev, new) != prev) 634b46a33e2STvrtko Ursulin goto again; 635b46a33e2STvrtko Ursulin 636b46a33e2STvrtko Ursulin local64_add(new - prev, &event->count); 637b46a33e2STvrtko Ursulin } 638b46a33e2STvrtko Ursulin 639b46a33e2STvrtko Ursulin static void i915_pmu_enable(struct perf_event *event) 640b46a33e2STvrtko Ursulin { 641b46a33e2STvrtko Ursulin struct drm_i915_private *i915 = 642b46a33e2STvrtko Ursulin container_of(event->pmu, typeof(*i915), pmu.base); 643908091c8STvrtko Ursulin struct i915_pmu *pmu = &i915->pmu; 644b46a33e2STvrtko Ursulin unsigned long flags; 645348fb0cbSTvrtko Ursulin unsigned int bit; 646b46a33e2STvrtko Ursulin 647348fb0cbSTvrtko Ursulin bit = event_bit(event); 648348fb0cbSTvrtko Ursulin if (bit == -1) 649348fb0cbSTvrtko Ursulin goto update; 650348fb0cbSTvrtko Ursulin 651908091c8STvrtko Ursulin spin_lock_irqsave(&pmu->lock, flags); 652b46a33e2STvrtko Ursulin 653b46a33e2STvrtko Ursulin /* 654b46a33e2STvrtko Ursulin * Update the bitmask of enabled events and increment 655b46a33e2STvrtko Ursulin * the event reference counter. 656b46a33e2STvrtko Ursulin */ 657908091c8STvrtko Ursulin BUILD_BUG_ON(ARRAY_SIZE(pmu->enable_count) != I915_PMU_MASK_BITS); 658908091c8STvrtko Ursulin GEM_BUG_ON(bit >= ARRAY_SIZE(pmu->enable_count)); 659908091c8STvrtko Ursulin GEM_BUG_ON(pmu->enable_count[bit] == ~0); 660f4e9894bSChris Wilson 661908091c8STvrtko Ursulin pmu->enable |= BIT_ULL(bit); 662908091c8STvrtko Ursulin pmu->enable_count[bit]++; 663b46a33e2STvrtko Ursulin 664b46a33e2STvrtko Ursulin /* 665feff0dc6STvrtko Ursulin * Start the sampling timer if needed and not already enabled. 666feff0dc6STvrtko Ursulin */ 667908091c8STvrtko Ursulin __i915_pmu_maybe_start_timer(pmu); 668feff0dc6STvrtko Ursulin 669feff0dc6STvrtko Ursulin /* 670b46a33e2STvrtko Ursulin * For per-engine events the bitmask and reference counting 671b46a33e2STvrtko Ursulin * is stored per engine. 672b46a33e2STvrtko Ursulin */ 673b46a33e2STvrtko Ursulin if (is_engine_event(event)) { 674b46a33e2STvrtko Ursulin u8 sample = engine_event_sample(event); 675b46a33e2STvrtko Ursulin struct intel_engine_cs *engine; 676b46a33e2STvrtko Ursulin 677b46a33e2STvrtko Ursulin engine = intel_engine_lookup_user(i915, 678b46a33e2STvrtko Ursulin engine_event_class(event), 679b46a33e2STvrtko Ursulin engine_event_instance(event)); 680b46a33e2STvrtko Ursulin 68126a11deeSTvrtko Ursulin BUILD_BUG_ON(ARRAY_SIZE(engine->pmu.enable_count) != 68226a11deeSTvrtko Ursulin I915_ENGINE_SAMPLE_COUNT); 68326a11deeSTvrtko Ursulin BUILD_BUG_ON(ARRAY_SIZE(engine->pmu.sample) != 68426a11deeSTvrtko Ursulin I915_ENGINE_SAMPLE_COUNT); 68526a11deeSTvrtko Ursulin GEM_BUG_ON(sample >= ARRAY_SIZE(engine->pmu.enable_count)); 68626a11deeSTvrtko Ursulin GEM_BUG_ON(sample >= ARRAY_SIZE(engine->pmu.sample)); 687b46a33e2STvrtko Ursulin GEM_BUG_ON(engine->pmu.enable_count[sample] == ~0); 68826a11deeSTvrtko Ursulin 68926a11deeSTvrtko Ursulin engine->pmu.enable |= BIT(sample); 690b2f78cdaSTvrtko Ursulin engine->pmu.enable_count[sample]++; 691b46a33e2STvrtko Ursulin } 692b46a33e2STvrtko Ursulin 693908091c8STvrtko Ursulin spin_unlock_irqrestore(&pmu->lock, flags); 694ad055fb8STvrtko Ursulin 695348fb0cbSTvrtko Ursulin update: 696b46a33e2STvrtko Ursulin /* 697b46a33e2STvrtko Ursulin * Store the current counter value so we can report the correct delta 698b46a33e2STvrtko Ursulin * for all listeners. Even when the event was already enabled and has 699b46a33e2STvrtko Ursulin * an existing non-zero value. 700b46a33e2STvrtko Ursulin */ 701ad055fb8STvrtko Ursulin local64_set(&event->hw.prev_count, __i915_pmu_event_read(event)); 702b46a33e2STvrtko Ursulin } 703b46a33e2STvrtko Ursulin 704b46a33e2STvrtko Ursulin static void i915_pmu_disable(struct perf_event *event) 705b46a33e2STvrtko Ursulin { 706b46a33e2STvrtko Ursulin struct drm_i915_private *i915 = 707b46a33e2STvrtko Ursulin container_of(event->pmu, typeof(*i915), pmu.base); 708348fb0cbSTvrtko Ursulin unsigned int bit = event_bit(event); 709908091c8STvrtko Ursulin struct i915_pmu *pmu = &i915->pmu; 710b46a33e2STvrtko Ursulin unsigned long flags; 711b46a33e2STvrtko Ursulin 712348fb0cbSTvrtko Ursulin if (bit == -1) 713348fb0cbSTvrtko Ursulin return; 714348fb0cbSTvrtko Ursulin 715908091c8STvrtko Ursulin spin_lock_irqsave(&pmu->lock, flags); 716b46a33e2STvrtko Ursulin 717b46a33e2STvrtko Ursulin if (is_engine_event(event)) { 718b46a33e2STvrtko Ursulin u8 sample = engine_event_sample(event); 719b46a33e2STvrtko Ursulin struct intel_engine_cs *engine; 720b46a33e2STvrtko Ursulin 721b46a33e2STvrtko Ursulin engine = intel_engine_lookup_user(i915, 722b46a33e2STvrtko Ursulin engine_event_class(event), 723b46a33e2STvrtko Ursulin engine_event_instance(event)); 72426a11deeSTvrtko Ursulin 72526a11deeSTvrtko Ursulin GEM_BUG_ON(sample >= ARRAY_SIZE(engine->pmu.enable_count)); 72626a11deeSTvrtko Ursulin GEM_BUG_ON(sample >= ARRAY_SIZE(engine->pmu.sample)); 727b46a33e2STvrtko Ursulin GEM_BUG_ON(engine->pmu.enable_count[sample] == 0); 72826a11deeSTvrtko Ursulin 729b46a33e2STvrtko Ursulin /* 730b46a33e2STvrtko Ursulin * Decrement the reference count and clear the enabled 731b46a33e2STvrtko Ursulin * bitmask when the last listener on an event goes away. 732b46a33e2STvrtko Ursulin */ 733b2f78cdaSTvrtko Ursulin if (--engine->pmu.enable_count[sample] == 0) 734b46a33e2STvrtko Ursulin engine->pmu.enable &= ~BIT(sample); 735b46a33e2STvrtko Ursulin } 736b46a33e2STvrtko Ursulin 737908091c8STvrtko Ursulin GEM_BUG_ON(bit >= ARRAY_SIZE(pmu->enable_count)); 738908091c8STvrtko Ursulin GEM_BUG_ON(pmu->enable_count[bit] == 0); 739b46a33e2STvrtko Ursulin /* 740b46a33e2STvrtko Ursulin * Decrement the reference count and clear the enabled 741b46a33e2STvrtko Ursulin * bitmask when the last listener on an event goes away. 742b46a33e2STvrtko Ursulin */ 743908091c8STvrtko Ursulin if (--pmu->enable_count[bit] == 0) { 744908091c8STvrtko Ursulin pmu->enable &= ~BIT_ULL(bit); 745908091c8STvrtko Ursulin pmu->timer_enabled &= pmu_needs_timer(pmu, true); 746feff0dc6STvrtko Ursulin } 747b46a33e2STvrtko Ursulin 748908091c8STvrtko Ursulin spin_unlock_irqrestore(&pmu->lock, flags); 749b46a33e2STvrtko Ursulin } 750b46a33e2STvrtko Ursulin 751b46a33e2STvrtko Ursulin static void i915_pmu_event_start(struct perf_event *event, int flags) 752b46a33e2STvrtko Ursulin { 753b00bccb3STvrtko Ursulin struct drm_i915_private *i915 = 754b00bccb3STvrtko Ursulin container_of(event->pmu, typeof(*i915), pmu.base); 755b00bccb3STvrtko Ursulin struct i915_pmu *pmu = &i915->pmu; 756b00bccb3STvrtko Ursulin 757b00bccb3STvrtko Ursulin if (pmu->closed) 758b00bccb3STvrtko Ursulin return; 759b00bccb3STvrtko Ursulin 760b46a33e2STvrtko Ursulin i915_pmu_enable(event); 761b46a33e2STvrtko Ursulin event->hw.state = 0; 762b46a33e2STvrtko Ursulin } 763b46a33e2STvrtko Ursulin 764b46a33e2STvrtko Ursulin static void i915_pmu_event_stop(struct perf_event *event, int flags) 765b46a33e2STvrtko Ursulin { 766b46a33e2STvrtko Ursulin if (flags & PERF_EF_UPDATE) 767b46a33e2STvrtko Ursulin i915_pmu_event_read(event); 768b46a33e2STvrtko Ursulin i915_pmu_disable(event); 769b46a33e2STvrtko Ursulin event->hw.state = PERF_HES_STOPPED; 770b46a33e2STvrtko Ursulin } 771b46a33e2STvrtko Ursulin 772b46a33e2STvrtko Ursulin static int i915_pmu_event_add(struct perf_event *event, int flags) 773b46a33e2STvrtko Ursulin { 774b00bccb3STvrtko Ursulin struct drm_i915_private *i915 = 775b00bccb3STvrtko Ursulin container_of(event->pmu, typeof(*i915), pmu.base); 776b00bccb3STvrtko Ursulin struct i915_pmu *pmu = &i915->pmu; 777b00bccb3STvrtko Ursulin 778b00bccb3STvrtko Ursulin if (pmu->closed) 779b00bccb3STvrtko Ursulin return -ENODEV; 780b00bccb3STvrtko Ursulin 781b46a33e2STvrtko Ursulin if (flags & PERF_EF_START) 782b46a33e2STvrtko Ursulin i915_pmu_event_start(event, flags); 783b46a33e2STvrtko Ursulin 784b46a33e2STvrtko Ursulin return 0; 785b46a33e2STvrtko Ursulin } 786b46a33e2STvrtko Ursulin 787b46a33e2STvrtko Ursulin static void i915_pmu_event_del(struct perf_event *event, int flags) 788b46a33e2STvrtko Ursulin { 789b46a33e2STvrtko Ursulin i915_pmu_event_stop(event, PERF_EF_UPDATE); 790b46a33e2STvrtko Ursulin } 791b46a33e2STvrtko Ursulin 792b46a33e2STvrtko Ursulin static int i915_pmu_event_event_idx(struct perf_event *event) 793b46a33e2STvrtko Ursulin { 794b46a33e2STvrtko Ursulin return 0; 795b46a33e2STvrtko Ursulin } 796b46a33e2STvrtko Ursulin 797b7d3aabfSChris Wilson struct i915_str_attribute { 798b7d3aabfSChris Wilson struct device_attribute attr; 799b7d3aabfSChris Wilson const char *str; 800b7d3aabfSChris Wilson }; 801b7d3aabfSChris Wilson 802b46a33e2STvrtko Ursulin static ssize_t i915_pmu_format_show(struct device *dev, 803b46a33e2STvrtko Ursulin struct device_attribute *attr, char *buf) 804b46a33e2STvrtko Ursulin { 805b7d3aabfSChris Wilson struct i915_str_attribute *eattr; 806b46a33e2STvrtko Ursulin 807b7d3aabfSChris Wilson eattr = container_of(attr, struct i915_str_attribute, attr); 808b7d3aabfSChris Wilson return sprintf(buf, "%s\n", eattr->str); 809b46a33e2STvrtko Ursulin } 810b46a33e2STvrtko Ursulin 811b46a33e2STvrtko Ursulin #define I915_PMU_FORMAT_ATTR(_name, _config) \ 812b7d3aabfSChris Wilson (&((struct i915_str_attribute[]) { \ 813b46a33e2STvrtko Ursulin { .attr = __ATTR(_name, 0444, i915_pmu_format_show, NULL), \ 814b7d3aabfSChris Wilson .str = _config, } \ 815b46a33e2STvrtko Ursulin })[0].attr.attr) 816b46a33e2STvrtko Ursulin 817b46a33e2STvrtko Ursulin static struct attribute *i915_pmu_format_attrs[] = { 818b46a33e2STvrtko Ursulin I915_PMU_FORMAT_ATTR(i915_eventid, "config:0-20"), 819b46a33e2STvrtko Ursulin NULL, 820b46a33e2STvrtko Ursulin }; 821b46a33e2STvrtko Ursulin 822b46a33e2STvrtko Ursulin static const struct attribute_group i915_pmu_format_attr_group = { 823b46a33e2STvrtko Ursulin .name = "format", 824b46a33e2STvrtko Ursulin .attrs = i915_pmu_format_attrs, 825b46a33e2STvrtko Ursulin }; 826b46a33e2STvrtko Ursulin 827b7d3aabfSChris Wilson struct i915_ext_attribute { 828b7d3aabfSChris Wilson struct device_attribute attr; 829b7d3aabfSChris Wilson unsigned long val; 830b7d3aabfSChris Wilson }; 831b7d3aabfSChris Wilson 832b46a33e2STvrtko Ursulin static ssize_t i915_pmu_event_show(struct device *dev, 833b46a33e2STvrtko Ursulin struct device_attribute *attr, char *buf) 834b46a33e2STvrtko Ursulin { 835b7d3aabfSChris Wilson struct i915_ext_attribute *eattr; 836b46a33e2STvrtko Ursulin 837b7d3aabfSChris Wilson eattr = container_of(attr, struct i915_ext_attribute, attr); 838b7d3aabfSChris Wilson return sprintf(buf, "config=0x%lx\n", eattr->val); 839b46a33e2STvrtko Ursulin } 840b46a33e2STvrtko Ursulin 841177f30c6SYueHaibing static ssize_t cpumask_show(struct device *dev, 842177f30c6SYueHaibing struct device_attribute *attr, char *buf) 843b46a33e2STvrtko Ursulin { 844b46a33e2STvrtko Ursulin return cpumap_print_to_pagebuf(true, buf, &i915_pmu_cpumask); 845b46a33e2STvrtko Ursulin } 846b46a33e2STvrtko Ursulin 847177f30c6SYueHaibing static DEVICE_ATTR_RO(cpumask); 848b46a33e2STvrtko Ursulin 849b46a33e2STvrtko Ursulin static struct attribute *i915_cpumask_attrs[] = { 850b46a33e2STvrtko Ursulin &dev_attr_cpumask.attr, 851b46a33e2STvrtko Ursulin NULL, 852b46a33e2STvrtko Ursulin }; 853b46a33e2STvrtko Ursulin 854109ec558STvrtko Ursulin static const struct attribute_group i915_pmu_cpumask_attr_group = { 855b46a33e2STvrtko Ursulin .attrs = i915_cpumask_attrs, 856b46a33e2STvrtko Ursulin }; 857b46a33e2STvrtko Ursulin 858109ec558STvrtko Ursulin #define __event(__config, __name, __unit) \ 859109ec558STvrtko Ursulin { \ 860109ec558STvrtko Ursulin .config = (__config), \ 861109ec558STvrtko Ursulin .name = (__name), \ 862109ec558STvrtko Ursulin .unit = (__unit), \ 863109ec558STvrtko Ursulin } 864109ec558STvrtko Ursulin 865109ec558STvrtko Ursulin #define __engine_event(__sample, __name) \ 866109ec558STvrtko Ursulin { \ 867109ec558STvrtko Ursulin .sample = (__sample), \ 868109ec558STvrtko Ursulin .name = (__name), \ 869109ec558STvrtko Ursulin } 870109ec558STvrtko Ursulin 871109ec558STvrtko Ursulin static struct i915_ext_attribute * 872109ec558STvrtko Ursulin add_i915_attr(struct i915_ext_attribute *attr, const char *name, u64 config) 873109ec558STvrtko Ursulin { 8742bbba4e9SChris Wilson sysfs_attr_init(&attr->attr.attr); 875109ec558STvrtko Ursulin attr->attr.attr.name = name; 876109ec558STvrtko Ursulin attr->attr.attr.mode = 0444; 877109ec558STvrtko Ursulin attr->attr.show = i915_pmu_event_show; 878109ec558STvrtko Ursulin attr->val = config; 879109ec558STvrtko Ursulin 880109ec558STvrtko Ursulin return ++attr; 881109ec558STvrtko Ursulin } 882109ec558STvrtko Ursulin 883109ec558STvrtko Ursulin static struct perf_pmu_events_attr * 884109ec558STvrtko Ursulin add_pmu_attr(struct perf_pmu_events_attr *attr, const char *name, 885109ec558STvrtko Ursulin const char *str) 886109ec558STvrtko Ursulin { 8872bbba4e9SChris Wilson sysfs_attr_init(&attr->attr.attr); 888109ec558STvrtko Ursulin attr->attr.attr.name = name; 889109ec558STvrtko Ursulin attr->attr.attr.mode = 0444; 890109ec558STvrtko Ursulin attr->attr.show = perf_event_sysfs_show; 891109ec558STvrtko Ursulin attr->event_str = str; 892109ec558STvrtko Ursulin 893109ec558STvrtko Ursulin return ++attr; 894109ec558STvrtko Ursulin } 895109ec558STvrtko Ursulin 896109ec558STvrtko Ursulin static struct attribute ** 897908091c8STvrtko Ursulin create_event_attributes(struct i915_pmu *pmu) 898109ec558STvrtko Ursulin { 899908091c8STvrtko Ursulin struct drm_i915_private *i915 = container_of(pmu, typeof(*i915), pmu); 900109ec558STvrtko Ursulin static const struct { 901109ec558STvrtko Ursulin u64 config; 902109ec558STvrtko Ursulin const char *name; 903109ec558STvrtko Ursulin const char *unit; 904109ec558STvrtko Ursulin } events[] = { 905e88866efSChris Wilson __event(I915_PMU_ACTUAL_FREQUENCY, "actual-frequency", "M"), 906e88866efSChris Wilson __event(I915_PMU_REQUESTED_FREQUENCY, "requested-frequency", "M"), 907109ec558STvrtko Ursulin __event(I915_PMU_INTERRUPTS, "interrupts", NULL), 908109ec558STvrtko Ursulin __event(I915_PMU_RC6_RESIDENCY, "rc6-residency", "ns"), 9098c3b1ba0SChris Wilson __event(I915_PMU_SOFTWARE_GT_AWAKE_TIME, "software-gt-awake-time", "ns"), 910109ec558STvrtko Ursulin }; 911109ec558STvrtko Ursulin static const struct { 912109ec558STvrtko Ursulin enum drm_i915_pmu_engine_sample sample; 913109ec558STvrtko Ursulin char *name; 914109ec558STvrtko Ursulin } engine_events[] = { 915109ec558STvrtko Ursulin __engine_event(I915_SAMPLE_BUSY, "busy"), 916109ec558STvrtko Ursulin __engine_event(I915_SAMPLE_SEMA, "sema"), 917109ec558STvrtko Ursulin __engine_event(I915_SAMPLE_WAIT, "wait"), 918109ec558STvrtko Ursulin }; 919109ec558STvrtko Ursulin unsigned int count = 0; 920109ec558STvrtko Ursulin struct perf_pmu_events_attr *pmu_attr = NULL, *pmu_iter; 921109ec558STvrtko Ursulin struct i915_ext_attribute *i915_attr = NULL, *i915_iter; 922109ec558STvrtko Ursulin struct attribute **attr = NULL, **attr_iter; 923109ec558STvrtko Ursulin struct intel_engine_cs *engine; 924109ec558STvrtko Ursulin unsigned int i; 925109ec558STvrtko Ursulin 926109ec558STvrtko Ursulin /* Count how many counters we will be exposing. */ 927109ec558STvrtko Ursulin for (i = 0; i < ARRAY_SIZE(events); i++) { 928109ec558STvrtko Ursulin if (!config_status(i915, events[i].config)) 929109ec558STvrtko Ursulin count++; 930109ec558STvrtko Ursulin } 931109ec558STvrtko Ursulin 932750e76b4SChris Wilson for_each_uabi_engine(engine, i915) { 933109ec558STvrtko Ursulin for (i = 0; i < ARRAY_SIZE(engine_events); i++) { 934109ec558STvrtko Ursulin if (!engine_event_status(engine, 935109ec558STvrtko Ursulin engine_events[i].sample)) 936109ec558STvrtko Ursulin count++; 937109ec558STvrtko Ursulin } 938109ec558STvrtko Ursulin } 939109ec558STvrtko Ursulin 940109ec558STvrtko Ursulin /* Allocate attribute objects and table. */ 941dd5fec87STvrtko Ursulin i915_attr = kcalloc(count, sizeof(*i915_attr), GFP_KERNEL); 942109ec558STvrtko Ursulin if (!i915_attr) 943109ec558STvrtko Ursulin goto err_alloc; 944109ec558STvrtko Ursulin 945dd5fec87STvrtko Ursulin pmu_attr = kcalloc(count, sizeof(*pmu_attr), GFP_KERNEL); 946109ec558STvrtko Ursulin if (!pmu_attr) 947109ec558STvrtko Ursulin goto err_alloc; 948109ec558STvrtko Ursulin 949109ec558STvrtko Ursulin /* Max one pointer of each attribute type plus a termination entry. */ 950dd5fec87STvrtko Ursulin attr = kcalloc(count * 2 + 1, sizeof(*attr), GFP_KERNEL); 951109ec558STvrtko Ursulin if (!attr) 952109ec558STvrtko Ursulin goto err_alloc; 953109ec558STvrtko Ursulin 954109ec558STvrtko Ursulin i915_iter = i915_attr; 955109ec558STvrtko Ursulin pmu_iter = pmu_attr; 956109ec558STvrtko Ursulin attr_iter = attr; 957109ec558STvrtko Ursulin 958109ec558STvrtko Ursulin /* Initialize supported non-engine counters. */ 959109ec558STvrtko Ursulin for (i = 0; i < ARRAY_SIZE(events); i++) { 960109ec558STvrtko Ursulin char *str; 961109ec558STvrtko Ursulin 962109ec558STvrtko Ursulin if (config_status(i915, events[i].config)) 963109ec558STvrtko Ursulin continue; 964109ec558STvrtko Ursulin 965109ec558STvrtko Ursulin str = kstrdup(events[i].name, GFP_KERNEL); 966109ec558STvrtko Ursulin if (!str) 967109ec558STvrtko Ursulin goto err; 968109ec558STvrtko Ursulin 969109ec558STvrtko Ursulin *attr_iter++ = &i915_iter->attr.attr; 970109ec558STvrtko Ursulin i915_iter = add_i915_attr(i915_iter, str, events[i].config); 971109ec558STvrtko Ursulin 972109ec558STvrtko Ursulin if (events[i].unit) { 973109ec558STvrtko Ursulin str = kasprintf(GFP_KERNEL, "%s.unit", events[i].name); 974109ec558STvrtko Ursulin if (!str) 975109ec558STvrtko Ursulin goto err; 976109ec558STvrtko Ursulin 977109ec558STvrtko Ursulin *attr_iter++ = &pmu_iter->attr.attr; 978109ec558STvrtko Ursulin pmu_iter = add_pmu_attr(pmu_iter, str, events[i].unit); 979109ec558STvrtko Ursulin } 980109ec558STvrtko Ursulin } 981109ec558STvrtko Ursulin 982109ec558STvrtko Ursulin /* Initialize supported engine counters. */ 983750e76b4SChris Wilson for_each_uabi_engine(engine, i915) { 984109ec558STvrtko Ursulin for (i = 0; i < ARRAY_SIZE(engine_events); i++) { 985109ec558STvrtko Ursulin char *str; 986109ec558STvrtko Ursulin 987109ec558STvrtko Ursulin if (engine_event_status(engine, 988109ec558STvrtko Ursulin engine_events[i].sample)) 989109ec558STvrtko Ursulin continue; 990109ec558STvrtko Ursulin 991109ec558STvrtko Ursulin str = kasprintf(GFP_KERNEL, "%s-%s", 992109ec558STvrtko Ursulin engine->name, engine_events[i].name); 993109ec558STvrtko Ursulin if (!str) 994109ec558STvrtko Ursulin goto err; 995109ec558STvrtko Ursulin 996109ec558STvrtko Ursulin *attr_iter++ = &i915_iter->attr.attr; 997109ec558STvrtko Ursulin i915_iter = 998109ec558STvrtko Ursulin add_i915_attr(i915_iter, str, 9998810bc56STvrtko Ursulin __I915_PMU_ENGINE(engine->uabi_class, 1000750e76b4SChris Wilson engine->uabi_instance, 1001109ec558STvrtko Ursulin engine_events[i].sample)); 1002109ec558STvrtko Ursulin 1003109ec558STvrtko Ursulin str = kasprintf(GFP_KERNEL, "%s-%s.unit", 1004109ec558STvrtko Ursulin engine->name, engine_events[i].name); 1005109ec558STvrtko Ursulin if (!str) 1006109ec558STvrtko Ursulin goto err; 1007109ec558STvrtko Ursulin 1008109ec558STvrtko Ursulin *attr_iter++ = &pmu_iter->attr.attr; 1009109ec558STvrtko Ursulin pmu_iter = add_pmu_attr(pmu_iter, str, "ns"); 1010109ec558STvrtko Ursulin } 1011109ec558STvrtko Ursulin } 1012109ec558STvrtko Ursulin 1013908091c8STvrtko Ursulin pmu->i915_attr = i915_attr; 1014908091c8STvrtko Ursulin pmu->pmu_attr = pmu_attr; 1015109ec558STvrtko Ursulin 1016109ec558STvrtko Ursulin return attr; 1017109ec558STvrtko Ursulin 1018109ec558STvrtko Ursulin err:; 1019109ec558STvrtko Ursulin for (attr_iter = attr; *attr_iter; attr_iter++) 1020109ec558STvrtko Ursulin kfree((*attr_iter)->name); 1021109ec558STvrtko Ursulin 1022109ec558STvrtko Ursulin err_alloc: 1023109ec558STvrtko Ursulin kfree(attr); 1024109ec558STvrtko Ursulin kfree(i915_attr); 1025109ec558STvrtko Ursulin kfree(pmu_attr); 1026109ec558STvrtko Ursulin 1027109ec558STvrtko Ursulin return NULL; 1028109ec558STvrtko Ursulin } 1029109ec558STvrtko Ursulin 1030908091c8STvrtko Ursulin static void free_event_attributes(struct i915_pmu *pmu) 1031109ec558STvrtko Ursulin { 103246129dc1SMichał Winiarski struct attribute **attr_iter = pmu->events_attr_group.attrs; 1033109ec558STvrtko Ursulin 1034109ec558STvrtko Ursulin for (; *attr_iter; attr_iter++) 1035109ec558STvrtko Ursulin kfree((*attr_iter)->name); 1036109ec558STvrtko Ursulin 103746129dc1SMichał Winiarski kfree(pmu->events_attr_group.attrs); 1038908091c8STvrtko Ursulin kfree(pmu->i915_attr); 1039908091c8STvrtko Ursulin kfree(pmu->pmu_attr); 1040109ec558STvrtko Ursulin 104146129dc1SMichał Winiarski pmu->events_attr_group.attrs = NULL; 1042908091c8STvrtko Ursulin pmu->i915_attr = NULL; 1043908091c8STvrtko Ursulin pmu->pmu_attr = NULL; 1044109ec558STvrtko Ursulin } 1045109ec558STvrtko Ursulin 1046b46a33e2STvrtko Ursulin static int i915_pmu_cpu_online(unsigned int cpu, struct hlist_node *node) 1047b46a33e2STvrtko Ursulin { 1048f5a179d4SMichał Winiarski struct i915_pmu *pmu = hlist_entry_safe(node, typeof(*pmu), cpuhp.node); 1049b46a33e2STvrtko Ursulin 1050b46a33e2STvrtko Ursulin GEM_BUG_ON(!pmu->base.event_init); 1051b46a33e2STvrtko Ursulin 1052b46a33e2STvrtko Ursulin /* Select the first online CPU as a designated reader. */ 1053*a37e94feSYury Norov if (cpumask_empty(&i915_pmu_cpumask)) 1054b46a33e2STvrtko Ursulin cpumask_set_cpu(cpu, &i915_pmu_cpumask); 1055b46a33e2STvrtko Ursulin 1056b46a33e2STvrtko Ursulin return 0; 1057b46a33e2STvrtko Ursulin } 1058b46a33e2STvrtko Ursulin 1059b46a33e2STvrtko Ursulin static int i915_pmu_cpu_offline(unsigned int cpu, struct hlist_node *node) 1060b46a33e2STvrtko Ursulin { 1061f5a179d4SMichał Winiarski struct i915_pmu *pmu = hlist_entry_safe(node, typeof(*pmu), cpuhp.node); 1062537f9c84STvrtko Ursulin unsigned int target = i915_pmu_target_cpu; 1063b46a33e2STvrtko Ursulin 1064b46a33e2STvrtko Ursulin GEM_BUG_ON(!pmu->base.event_init); 1065b46a33e2STvrtko Ursulin 1066537f9c84STvrtko Ursulin /* 1067537f9c84STvrtko Ursulin * Unregistering an instance generates a CPU offline event which we must 1068537f9c84STvrtko Ursulin * ignore to avoid incorrectly modifying the shared i915_pmu_cpumask. 1069537f9c84STvrtko Ursulin */ 1070537f9c84STvrtko Ursulin if (pmu->closed) 1071537f9c84STvrtko Ursulin return 0; 1072537f9c84STvrtko Ursulin 1073b46a33e2STvrtko Ursulin if (cpumask_test_and_clear_cpu(cpu, &i915_pmu_cpumask)) { 1074b46a33e2STvrtko Ursulin target = cpumask_any_but(topology_sibling_cpumask(cpu), cpu); 1075537f9c84STvrtko Ursulin 1076b46a33e2STvrtko Ursulin /* Migrate events if there is a valid target */ 1077b46a33e2STvrtko Ursulin if (target < nr_cpu_ids) { 1078b46a33e2STvrtko Ursulin cpumask_set_cpu(target, &i915_pmu_cpumask); 1079537f9c84STvrtko Ursulin i915_pmu_target_cpu = target; 1080b46a33e2STvrtko Ursulin } 1081b46a33e2STvrtko Ursulin } 1082b46a33e2STvrtko Ursulin 1083537f9c84STvrtko Ursulin if (target < nr_cpu_ids && target != pmu->cpuhp.cpu) { 1084537f9c84STvrtko Ursulin perf_pmu_migrate_context(&pmu->base, cpu, target); 1085537f9c84STvrtko Ursulin pmu->cpuhp.cpu = target; 1086537f9c84STvrtko Ursulin } 1087537f9c84STvrtko Ursulin 1088b46a33e2STvrtko Ursulin return 0; 1089b46a33e2STvrtko Ursulin } 1090b46a33e2STvrtko Ursulin 1091537f9c84STvrtko Ursulin static enum cpuhp_state cpuhp_slot = CPUHP_INVALID; 1092537f9c84STvrtko Ursulin 1093a04ea6aeSJason Ekstrand int i915_pmu_init(void) 1094b46a33e2STvrtko Ursulin { 1095b46a33e2STvrtko Ursulin int ret; 1096b46a33e2STvrtko Ursulin 1097b46a33e2STvrtko Ursulin ret = cpuhp_setup_state_multi(CPUHP_AP_ONLINE_DYN, 1098b46a33e2STvrtko Ursulin "perf/x86/intel/i915:online", 1099b46a33e2STvrtko Ursulin i915_pmu_cpu_online, 1100b46a33e2STvrtko Ursulin i915_pmu_cpu_offline); 1101b46a33e2STvrtko Ursulin if (ret < 0) 1102537f9c84STvrtko Ursulin pr_notice("Failed to setup cpuhp state for i915 PMU! (%d)\n", 1103537f9c84STvrtko Ursulin ret); 1104537f9c84STvrtko Ursulin else 1105537f9c84STvrtko Ursulin cpuhp_slot = ret; 1106a04ea6aeSJason Ekstrand 1107a04ea6aeSJason Ekstrand return 0; 1108b46a33e2STvrtko Ursulin } 1109b46a33e2STvrtko Ursulin 1110537f9c84STvrtko Ursulin void i915_pmu_exit(void) 1111537f9c84STvrtko Ursulin { 1112537f9c84STvrtko Ursulin if (cpuhp_slot != CPUHP_INVALID) 1113537f9c84STvrtko Ursulin cpuhp_remove_multi_state(cpuhp_slot); 1114537f9c84STvrtko Ursulin } 1115537f9c84STvrtko Ursulin 1116537f9c84STvrtko Ursulin static int i915_pmu_register_cpuhp_state(struct i915_pmu *pmu) 1117537f9c84STvrtko Ursulin { 1118537f9c84STvrtko Ursulin if (cpuhp_slot == CPUHP_INVALID) 1119537f9c84STvrtko Ursulin return -EINVAL; 1120537f9c84STvrtko Ursulin 1121537f9c84STvrtko Ursulin return cpuhp_state_add_instance(cpuhp_slot, &pmu->cpuhp.node); 1122b46a33e2STvrtko Ursulin } 1123b46a33e2STvrtko Ursulin 1124908091c8STvrtko Ursulin static void i915_pmu_unregister_cpuhp_state(struct i915_pmu *pmu) 1125b46a33e2STvrtko Ursulin { 1126537f9c84STvrtko Ursulin cpuhp_state_remove_instance(cpuhp_slot, &pmu->cpuhp.node); 1127b46a33e2STvrtko Ursulin } 1128b46a33e2STvrtko Ursulin 112905488673STvrtko Ursulin static bool is_igp(struct drm_i915_private *i915) 113005488673STvrtko Ursulin { 11318ff5446aSThomas Zimmermann struct pci_dev *pdev = to_pci_dev(i915->drm.dev); 113205488673STvrtko Ursulin 113305488673STvrtko Ursulin /* IGP is 0000:00:02.0 */ 113405488673STvrtko Ursulin return pci_domain_nr(pdev->bus) == 0 && 113505488673STvrtko Ursulin pdev->bus->number == 0 && 113605488673STvrtko Ursulin PCI_SLOT(pdev->devfn) == 2 && 113705488673STvrtko Ursulin PCI_FUNC(pdev->devfn) == 0; 113805488673STvrtko Ursulin } 113905488673STvrtko Ursulin 1140b46a33e2STvrtko Ursulin void i915_pmu_register(struct drm_i915_private *i915) 1141b46a33e2STvrtko Ursulin { 1142908091c8STvrtko Ursulin struct i915_pmu *pmu = &i915->pmu; 114346129dc1SMichał Winiarski const struct attribute_group *attr_groups[] = { 114446129dc1SMichał Winiarski &i915_pmu_format_attr_group, 114546129dc1SMichał Winiarski &pmu->events_attr_group, 114646129dc1SMichał Winiarski &i915_pmu_cpumask_attr_group, 114746129dc1SMichał Winiarski NULL 114846129dc1SMichał Winiarski }; 114946129dc1SMichał Winiarski 1150fb26eee0STvrtko Ursulin int ret = -ENOMEM; 1151b46a33e2STvrtko Ursulin 1152651e7d48SLucas De Marchi if (GRAPHICS_VER(i915) <= 2) { 11531900aba5SJani Nikula drm_info(&i915->drm, "PMU not supported for this GPU."); 1154b46a33e2STvrtko Ursulin return; 1155b46a33e2STvrtko Ursulin } 1156b46a33e2STvrtko Ursulin 1157908091c8STvrtko Ursulin spin_lock_init(&pmu->lock); 1158908091c8STvrtko Ursulin hrtimer_init(&pmu->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); 1159908091c8STvrtko Ursulin pmu->timer.function = i915_sample; 1160537f9c84STvrtko Ursulin pmu->cpuhp.cpu = -1; 1161dbe13ae1STvrtko Ursulin init_rc6(pmu); 1162b46a33e2STvrtko Ursulin 1163aebf3b52STvrtko Ursulin if (!is_igp(i915)) { 116405488673STvrtko Ursulin pmu->name = kasprintf(GFP_KERNEL, 1165aebf3b52STvrtko Ursulin "i915_%s", 116605488673STvrtko Ursulin dev_name(i915->drm.dev)); 1167aebf3b52STvrtko Ursulin if (pmu->name) { 1168aebf3b52STvrtko Ursulin /* tools/perf reserves colons as special. */ 1169aebf3b52STvrtko Ursulin strreplace((char *)pmu->name, ':', '_'); 1170aebf3b52STvrtko Ursulin } 1171aebf3b52STvrtko Ursulin } else { 117205488673STvrtko Ursulin pmu->name = "i915"; 1173aebf3b52STvrtko Ursulin } 117405488673STvrtko Ursulin if (!pmu->name) 1175b46a33e2STvrtko Ursulin goto err; 1176b46a33e2STvrtko Ursulin 117746129dc1SMichał Winiarski pmu->events_attr_group.name = "events"; 117846129dc1SMichał Winiarski pmu->events_attr_group.attrs = create_event_attributes(pmu); 117946129dc1SMichał Winiarski if (!pmu->events_attr_group.attrs) 1180c442292aSChris Wilson goto err_name; 1181c442292aSChris Wilson 118246129dc1SMichał Winiarski pmu->base.attr_groups = kmemdup(attr_groups, sizeof(attr_groups), 118346129dc1SMichał Winiarski GFP_KERNEL); 118446129dc1SMichał Winiarski if (!pmu->base.attr_groups) 118546129dc1SMichał Winiarski goto err_attr; 118646129dc1SMichał Winiarski 1187df3ab3cbSChris Wilson pmu->base.module = THIS_MODULE; 1188c442292aSChris Wilson pmu->base.task_ctx_nr = perf_invalid_context; 1189c442292aSChris Wilson pmu->base.event_init = i915_pmu_event_init; 1190c442292aSChris Wilson pmu->base.add = i915_pmu_event_add; 1191c442292aSChris Wilson pmu->base.del = i915_pmu_event_del; 1192c442292aSChris Wilson pmu->base.start = i915_pmu_event_start; 1193c442292aSChris Wilson pmu->base.stop = i915_pmu_event_stop; 1194c442292aSChris Wilson pmu->base.read = i915_pmu_event_read; 1195c442292aSChris Wilson pmu->base.event_idx = i915_pmu_event_event_idx; 1196c442292aSChris Wilson 119705488673STvrtko Ursulin ret = perf_pmu_register(&pmu->base, pmu->name, -1); 119805488673STvrtko Ursulin if (ret) 119946129dc1SMichał Winiarski goto err_groups; 120005488673STvrtko Ursulin 1201908091c8STvrtko Ursulin ret = i915_pmu_register_cpuhp_state(pmu); 1202b46a33e2STvrtko Ursulin if (ret) 1203b46a33e2STvrtko Ursulin goto err_unreg; 1204b46a33e2STvrtko Ursulin 1205b46a33e2STvrtko Ursulin return; 1206b46a33e2STvrtko Ursulin 1207b46a33e2STvrtko Ursulin err_unreg: 1208908091c8STvrtko Ursulin perf_pmu_unregister(&pmu->base); 120946129dc1SMichał Winiarski err_groups: 121046129dc1SMichał Winiarski kfree(pmu->base.attr_groups); 1211c442292aSChris Wilson err_attr: 1212c442292aSChris Wilson pmu->base.event_init = NULL; 1213c442292aSChris Wilson free_event_attributes(pmu); 121405488673STvrtko Ursulin err_name: 121505488673STvrtko Ursulin if (!is_igp(i915)) 121605488673STvrtko Ursulin kfree(pmu->name); 1217b46a33e2STvrtko Ursulin err: 12181900aba5SJani Nikula drm_notice(&i915->drm, "Failed to register PMU!\n"); 1219b46a33e2STvrtko Ursulin } 1220b46a33e2STvrtko Ursulin 1221b46a33e2STvrtko Ursulin void i915_pmu_unregister(struct drm_i915_private *i915) 1222b46a33e2STvrtko Ursulin { 1223908091c8STvrtko Ursulin struct i915_pmu *pmu = &i915->pmu; 1224908091c8STvrtko Ursulin 1225908091c8STvrtko Ursulin if (!pmu->base.event_init) 1226b46a33e2STvrtko Ursulin return; 1227b46a33e2STvrtko Ursulin 1228b00bccb3STvrtko Ursulin /* 1229b00bccb3STvrtko Ursulin * "Disconnect" the PMU callbacks - since all are atomic synchronize_rcu 1230b00bccb3STvrtko Ursulin * ensures all currently executing ones will have exited before we 1231b00bccb3STvrtko Ursulin * proceed with unregistration. 1232b00bccb3STvrtko Ursulin */ 1233b00bccb3STvrtko Ursulin pmu->closed = true; 1234b00bccb3STvrtko Ursulin synchronize_rcu(); 1235b46a33e2STvrtko Ursulin 1236908091c8STvrtko Ursulin hrtimer_cancel(&pmu->timer); 1237b46a33e2STvrtko Ursulin 1238908091c8STvrtko Ursulin i915_pmu_unregister_cpuhp_state(pmu); 1239b46a33e2STvrtko Ursulin 1240908091c8STvrtko Ursulin perf_pmu_unregister(&pmu->base); 1241908091c8STvrtko Ursulin pmu->base.event_init = NULL; 124246129dc1SMichał Winiarski kfree(pmu->base.attr_groups); 124305488673STvrtko Ursulin if (!is_igp(i915)) 124405488673STvrtko Ursulin kfree(pmu->name); 1245908091c8STvrtko Ursulin free_event_attributes(pmu); 1246b46a33e2STvrtko Ursulin } 1247