1*b46a33e2STvrtko Ursulin /* 2*b46a33e2STvrtko Ursulin * Copyright © 2017 Intel Corporation 3*b46a33e2STvrtko Ursulin * 4*b46a33e2STvrtko Ursulin * Permission is hereby granted, free of charge, to any person obtaining a 5*b46a33e2STvrtko Ursulin * copy of this software and associated documentation files (the "Software"), 6*b46a33e2STvrtko Ursulin * to deal in the Software without restriction, including without limitation 7*b46a33e2STvrtko Ursulin * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8*b46a33e2STvrtko Ursulin * and/or sell copies of the Software, and to permit persons to whom the 9*b46a33e2STvrtko Ursulin * Software is furnished to do so, subject to the following conditions: 10*b46a33e2STvrtko Ursulin * 11*b46a33e2STvrtko Ursulin * The above copyright notice and this permission notice (including the next 12*b46a33e2STvrtko Ursulin * paragraph) shall be included in all copies or substantial portions of the 13*b46a33e2STvrtko Ursulin * Software. 14*b46a33e2STvrtko Ursulin * 15*b46a33e2STvrtko Ursulin * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16*b46a33e2STvrtko Ursulin * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17*b46a33e2STvrtko Ursulin * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 18*b46a33e2STvrtko Ursulin * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19*b46a33e2STvrtko Ursulin * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 20*b46a33e2STvrtko Ursulin * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS 21*b46a33e2STvrtko Ursulin * IN THE SOFTWARE. 22*b46a33e2STvrtko Ursulin * 23*b46a33e2STvrtko Ursulin */ 24*b46a33e2STvrtko Ursulin 25*b46a33e2STvrtko Ursulin #include <linux/perf_event.h> 26*b46a33e2STvrtko Ursulin #include <linux/pm_runtime.h> 27*b46a33e2STvrtko Ursulin 28*b46a33e2STvrtko Ursulin #include "i915_drv.h" 29*b46a33e2STvrtko Ursulin #include "i915_pmu.h" 30*b46a33e2STvrtko Ursulin #include "intel_ringbuffer.h" 31*b46a33e2STvrtko Ursulin 32*b46a33e2STvrtko Ursulin /* Frequency for the sampling timer for events which need it. */ 33*b46a33e2STvrtko Ursulin #define FREQUENCY 200 34*b46a33e2STvrtko Ursulin #define PERIOD max_t(u64, 10000, NSEC_PER_SEC / FREQUENCY) 35*b46a33e2STvrtko Ursulin 36*b46a33e2STvrtko Ursulin #define ENGINE_SAMPLE_MASK \ 37*b46a33e2STvrtko Ursulin (BIT(I915_SAMPLE_BUSY) | \ 38*b46a33e2STvrtko Ursulin BIT(I915_SAMPLE_WAIT) | \ 39*b46a33e2STvrtko Ursulin BIT(I915_SAMPLE_SEMA)) 40*b46a33e2STvrtko Ursulin 41*b46a33e2STvrtko Ursulin #define ENGINE_SAMPLE_BITS (1 << I915_PMU_SAMPLE_BITS) 42*b46a33e2STvrtko Ursulin 43*b46a33e2STvrtko Ursulin static cpumask_t i915_pmu_cpumask = CPU_MASK_NONE; 44*b46a33e2STvrtko Ursulin 45*b46a33e2STvrtko Ursulin static u8 engine_config_sample(u64 config) 46*b46a33e2STvrtko Ursulin { 47*b46a33e2STvrtko Ursulin return config & I915_PMU_SAMPLE_MASK; 48*b46a33e2STvrtko Ursulin } 49*b46a33e2STvrtko Ursulin 50*b46a33e2STvrtko Ursulin static u8 engine_event_sample(struct perf_event *event) 51*b46a33e2STvrtko Ursulin { 52*b46a33e2STvrtko Ursulin return engine_config_sample(event->attr.config); 53*b46a33e2STvrtko Ursulin } 54*b46a33e2STvrtko Ursulin 55*b46a33e2STvrtko Ursulin static u8 engine_event_class(struct perf_event *event) 56*b46a33e2STvrtko Ursulin { 57*b46a33e2STvrtko Ursulin return (event->attr.config >> I915_PMU_CLASS_SHIFT) & 0xff; 58*b46a33e2STvrtko Ursulin } 59*b46a33e2STvrtko Ursulin 60*b46a33e2STvrtko Ursulin static u8 engine_event_instance(struct perf_event *event) 61*b46a33e2STvrtko Ursulin { 62*b46a33e2STvrtko Ursulin return (event->attr.config >> I915_PMU_SAMPLE_BITS) & 0xff; 63*b46a33e2STvrtko Ursulin } 64*b46a33e2STvrtko Ursulin 65*b46a33e2STvrtko Ursulin static bool is_engine_config(u64 config) 66*b46a33e2STvrtko Ursulin { 67*b46a33e2STvrtko Ursulin return config < __I915_PMU_OTHER(0); 68*b46a33e2STvrtko Ursulin } 69*b46a33e2STvrtko Ursulin 70*b46a33e2STvrtko Ursulin static unsigned int config_enabled_bit(u64 config) 71*b46a33e2STvrtko Ursulin { 72*b46a33e2STvrtko Ursulin if (is_engine_config(config)) 73*b46a33e2STvrtko Ursulin return engine_config_sample(config); 74*b46a33e2STvrtko Ursulin else 75*b46a33e2STvrtko Ursulin return ENGINE_SAMPLE_BITS + (config - __I915_PMU_OTHER(0)); 76*b46a33e2STvrtko Ursulin } 77*b46a33e2STvrtko Ursulin 78*b46a33e2STvrtko Ursulin static u64 config_enabled_mask(u64 config) 79*b46a33e2STvrtko Ursulin { 80*b46a33e2STvrtko Ursulin return BIT_ULL(config_enabled_bit(config)); 81*b46a33e2STvrtko Ursulin } 82*b46a33e2STvrtko Ursulin 83*b46a33e2STvrtko Ursulin static bool is_engine_event(struct perf_event *event) 84*b46a33e2STvrtko Ursulin { 85*b46a33e2STvrtko Ursulin return is_engine_config(event->attr.config); 86*b46a33e2STvrtko Ursulin } 87*b46a33e2STvrtko Ursulin 88*b46a33e2STvrtko Ursulin static unsigned int event_enabled_bit(struct perf_event *event) 89*b46a33e2STvrtko Ursulin { 90*b46a33e2STvrtko Ursulin return config_enabled_bit(event->attr.config); 91*b46a33e2STvrtko Ursulin } 92*b46a33e2STvrtko Ursulin 93*b46a33e2STvrtko Ursulin static bool grab_forcewake(struct drm_i915_private *i915, bool fw) 94*b46a33e2STvrtko Ursulin { 95*b46a33e2STvrtko Ursulin if (!fw) 96*b46a33e2STvrtko Ursulin intel_uncore_forcewake_get(i915, FORCEWAKE_ALL); 97*b46a33e2STvrtko Ursulin 98*b46a33e2STvrtko Ursulin return true; 99*b46a33e2STvrtko Ursulin } 100*b46a33e2STvrtko Ursulin 101*b46a33e2STvrtko Ursulin static void 102*b46a33e2STvrtko Ursulin update_sample(struct i915_pmu_sample *sample, u32 unit, u32 val) 103*b46a33e2STvrtko Ursulin { 104*b46a33e2STvrtko Ursulin /* 105*b46a33e2STvrtko Ursulin * Since we are doing stochastic sampling for these counters, 106*b46a33e2STvrtko Ursulin * average the delta with the previous value for better accuracy. 107*b46a33e2STvrtko Ursulin */ 108*b46a33e2STvrtko Ursulin sample->cur += div_u64(mul_u32_u32(sample->prev + val, unit), 2); 109*b46a33e2STvrtko Ursulin sample->prev = val; 110*b46a33e2STvrtko Ursulin } 111*b46a33e2STvrtko Ursulin 112*b46a33e2STvrtko Ursulin static void engines_sample(struct drm_i915_private *dev_priv) 113*b46a33e2STvrtko Ursulin { 114*b46a33e2STvrtko Ursulin struct intel_engine_cs *engine; 115*b46a33e2STvrtko Ursulin enum intel_engine_id id; 116*b46a33e2STvrtko Ursulin bool fw = false; 117*b46a33e2STvrtko Ursulin 118*b46a33e2STvrtko Ursulin if ((dev_priv->pmu.enable & ENGINE_SAMPLE_MASK) == 0) 119*b46a33e2STvrtko Ursulin return; 120*b46a33e2STvrtko Ursulin 121*b46a33e2STvrtko Ursulin if (!dev_priv->gt.awake) 122*b46a33e2STvrtko Ursulin return; 123*b46a33e2STvrtko Ursulin 124*b46a33e2STvrtko Ursulin if (!intel_runtime_pm_get_if_in_use(dev_priv)) 125*b46a33e2STvrtko Ursulin return; 126*b46a33e2STvrtko Ursulin 127*b46a33e2STvrtko Ursulin for_each_engine(engine, dev_priv, id) { 128*b46a33e2STvrtko Ursulin u32 current_seqno = intel_engine_get_seqno(engine); 129*b46a33e2STvrtko Ursulin u32 last_seqno = intel_engine_last_submit(engine); 130*b46a33e2STvrtko Ursulin u32 val; 131*b46a33e2STvrtko Ursulin 132*b46a33e2STvrtko Ursulin val = !i915_seqno_passed(current_seqno, last_seqno); 133*b46a33e2STvrtko Ursulin 134*b46a33e2STvrtko Ursulin update_sample(&engine->pmu.sample[I915_SAMPLE_BUSY], 135*b46a33e2STvrtko Ursulin PERIOD, val); 136*b46a33e2STvrtko Ursulin 137*b46a33e2STvrtko Ursulin if (val && (engine->pmu.enable & 138*b46a33e2STvrtko Ursulin (BIT(I915_SAMPLE_WAIT) | BIT(I915_SAMPLE_SEMA)))) { 139*b46a33e2STvrtko Ursulin fw = grab_forcewake(dev_priv, fw); 140*b46a33e2STvrtko Ursulin 141*b46a33e2STvrtko Ursulin val = I915_READ_FW(RING_CTL(engine->mmio_base)); 142*b46a33e2STvrtko Ursulin } else { 143*b46a33e2STvrtko Ursulin val = 0; 144*b46a33e2STvrtko Ursulin } 145*b46a33e2STvrtko Ursulin 146*b46a33e2STvrtko Ursulin update_sample(&engine->pmu.sample[I915_SAMPLE_WAIT], 147*b46a33e2STvrtko Ursulin PERIOD, !!(val & RING_WAIT)); 148*b46a33e2STvrtko Ursulin 149*b46a33e2STvrtko Ursulin update_sample(&engine->pmu.sample[I915_SAMPLE_SEMA], 150*b46a33e2STvrtko Ursulin PERIOD, !!(val & RING_WAIT_SEMAPHORE)); 151*b46a33e2STvrtko Ursulin } 152*b46a33e2STvrtko Ursulin 153*b46a33e2STvrtko Ursulin if (fw) 154*b46a33e2STvrtko Ursulin intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL); 155*b46a33e2STvrtko Ursulin 156*b46a33e2STvrtko Ursulin intel_runtime_pm_put(dev_priv); 157*b46a33e2STvrtko Ursulin } 158*b46a33e2STvrtko Ursulin 159*b46a33e2STvrtko Ursulin static void frequency_sample(struct drm_i915_private *dev_priv) 160*b46a33e2STvrtko Ursulin { 161*b46a33e2STvrtko Ursulin if (dev_priv->pmu.enable & 162*b46a33e2STvrtko Ursulin config_enabled_mask(I915_PMU_ACTUAL_FREQUENCY)) { 163*b46a33e2STvrtko Ursulin u32 val; 164*b46a33e2STvrtko Ursulin 165*b46a33e2STvrtko Ursulin val = dev_priv->gt_pm.rps.cur_freq; 166*b46a33e2STvrtko Ursulin if (dev_priv->gt.awake && 167*b46a33e2STvrtko Ursulin intel_runtime_pm_get_if_in_use(dev_priv)) { 168*b46a33e2STvrtko Ursulin val = intel_get_cagf(dev_priv, 169*b46a33e2STvrtko Ursulin I915_READ_NOTRACE(GEN6_RPSTAT1)); 170*b46a33e2STvrtko Ursulin intel_runtime_pm_put(dev_priv); 171*b46a33e2STvrtko Ursulin } 172*b46a33e2STvrtko Ursulin 173*b46a33e2STvrtko Ursulin update_sample(&dev_priv->pmu.sample[__I915_SAMPLE_FREQ_ACT], 174*b46a33e2STvrtko Ursulin 1, intel_gpu_freq(dev_priv, val)); 175*b46a33e2STvrtko Ursulin } 176*b46a33e2STvrtko Ursulin 177*b46a33e2STvrtko Ursulin if (dev_priv->pmu.enable & 178*b46a33e2STvrtko Ursulin config_enabled_mask(I915_PMU_REQUESTED_FREQUENCY)) { 179*b46a33e2STvrtko Ursulin update_sample(&dev_priv->pmu.sample[__I915_SAMPLE_FREQ_REQ], 1, 180*b46a33e2STvrtko Ursulin intel_gpu_freq(dev_priv, 181*b46a33e2STvrtko Ursulin dev_priv->gt_pm.rps.cur_freq)); 182*b46a33e2STvrtko Ursulin } 183*b46a33e2STvrtko Ursulin } 184*b46a33e2STvrtko Ursulin 185*b46a33e2STvrtko Ursulin static enum hrtimer_restart i915_sample(struct hrtimer *hrtimer) 186*b46a33e2STvrtko Ursulin { 187*b46a33e2STvrtko Ursulin struct drm_i915_private *i915 = 188*b46a33e2STvrtko Ursulin container_of(hrtimer, struct drm_i915_private, pmu.timer); 189*b46a33e2STvrtko Ursulin 190*b46a33e2STvrtko Ursulin if (i915->pmu.enable == 0) 191*b46a33e2STvrtko Ursulin return HRTIMER_NORESTART; 192*b46a33e2STvrtko Ursulin 193*b46a33e2STvrtko Ursulin engines_sample(i915); 194*b46a33e2STvrtko Ursulin frequency_sample(i915); 195*b46a33e2STvrtko Ursulin 196*b46a33e2STvrtko Ursulin hrtimer_forward_now(hrtimer, ns_to_ktime(PERIOD)); 197*b46a33e2STvrtko Ursulin return HRTIMER_RESTART; 198*b46a33e2STvrtko Ursulin } 199*b46a33e2STvrtko Ursulin 200*b46a33e2STvrtko Ursulin static void i915_pmu_event_destroy(struct perf_event *event) 201*b46a33e2STvrtko Ursulin { 202*b46a33e2STvrtko Ursulin WARN_ON(event->parent); 203*b46a33e2STvrtko Ursulin } 204*b46a33e2STvrtko Ursulin 205*b46a33e2STvrtko Ursulin static int engine_event_init(struct perf_event *event) 206*b46a33e2STvrtko Ursulin { 207*b46a33e2STvrtko Ursulin struct drm_i915_private *i915 = 208*b46a33e2STvrtko Ursulin container_of(event->pmu, typeof(*i915), pmu.base); 209*b46a33e2STvrtko Ursulin 210*b46a33e2STvrtko Ursulin if (!intel_engine_lookup_user(i915, engine_event_class(event), 211*b46a33e2STvrtko Ursulin engine_event_instance(event))) 212*b46a33e2STvrtko Ursulin return -ENODEV; 213*b46a33e2STvrtko Ursulin 214*b46a33e2STvrtko Ursulin switch (engine_event_sample(event)) { 215*b46a33e2STvrtko Ursulin case I915_SAMPLE_BUSY: 216*b46a33e2STvrtko Ursulin case I915_SAMPLE_WAIT: 217*b46a33e2STvrtko Ursulin break; 218*b46a33e2STvrtko Ursulin case I915_SAMPLE_SEMA: 219*b46a33e2STvrtko Ursulin if (INTEL_GEN(i915) < 6) 220*b46a33e2STvrtko Ursulin return -ENODEV; 221*b46a33e2STvrtko Ursulin break; 222*b46a33e2STvrtko Ursulin default: 223*b46a33e2STvrtko Ursulin return -ENOENT; 224*b46a33e2STvrtko Ursulin } 225*b46a33e2STvrtko Ursulin 226*b46a33e2STvrtko Ursulin return 0; 227*b46a33e2STvrtko Ursulin } 228*b46a33e2STvrtko Ursulin 229*b46a33e2STvrtko Ursulin static int i915_pmu_event_init(struct perf_event *event) 230*b46a33e2STvrtko Ursulin { 231*b46a33e2STvrtko Ursulin struct drm_i915_private *i915 = 232*b46a33e2STvrtko Ursulin container_of(event->pmu, typeof(*i915), pmu.base); 233*b46a33e2STvrtko Ursulin int cpu, ret; 234*b46a33e2STvrtko Ursulin 235*b46a33e2STvrtko Ursulin if (event->attr.type != event->pmu->type) 236*b46a33e2STvrtko Ursulin return -ENOENT; 237*b46a33e2STvrtko Ursulin 238*b46a33e2STvrtko Ursulin /* unsupported modes and filters */ 239*b46a33e2STvrtko Ursulin if (event->attr.sample_period) /* no sampling */ 240*b46a33e2STvrtko Ursulin return -EINVAL; 241*b46a33e2STvrtko Ursulin 242*b46a33e2STvrtko Ursulin if (has_branch_stack(event)) 243*b46a33e2STvrtko Ursulin return -EOPNOTSUPP; 244*b46a33e2STvrtko Ursulin 245*b46a33e2STvrtko Ursulin if (event->cpu < 0) 246*b46a33e2STvrtko Ursulin return -EINVAL; 247*b46a33e2STvrtko Ursulin 248*b46a33e2STvrtko Ursulin cpu = cpumask_any_and(&i915_pmu_cpumask, 249*b46a33e2STvrtko Ursulin topology_sibling_cpumask(event->cpu)); 250*b46a33e2STvrtko Ursulin if (cpu >= nr_cpu_ids) 251*b46a33e2STvrtko Ursulin return -ENODEV; 252*b46a33e2STvrtko Ursulin 253*b46a33e2STvrtko Ursulin if (is_engine_event(event)) { 254*b46a33e2STvrtko Ursulin ret = engine_event_init(event); 255*b46a33e2STvrtko Ursulin } else { 256*b46a33e2STvrtko Ursulin ret = 0; 257*b46a33e2STvrtko Ursulin switch (event->attr.config) { 258*b46a33e2STvrtko Ursulin case I915_PMU_ACTUAL_FREQUENCY: 259*b46a33e2STvrtko Ursulin if (IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915)) 260*b46a33e2STvrtko Ursulin /* Requires a mutex for sampling! */ 261*b46a33e2STvrtko Ursulin ret = -ENODEV; 262*b46a33e2STvrtko Ursulin case I915_PMU_REQUESTED_FREQUENCY: 263*b46a33e2STvrtko Ursulin if (INTEL_GEN(i915) < 6) 264*b46a33e2STvrtko Ursulin ret = -ENODEV; 265*b46a33e2STvrtko Ursulin break; 266*b46a33e2STvrtko Ursulin default: 267*b46a33e2STvrtko Ursulin ret = -ENOENT; 268*b46a33e2STvrtko Ursulin break; 269*b46a33e2STvrtko Ursulin } 270*b46a33e2STvrtko Ursulin } 271*b46a33e2STvrtko Ursulin if (ret) 272*b46a33e2STvrtko Ursulin return ret; 273*b46a33e2STvrtko Ursulin 274*b46a33e2STvrtko Ursulin event->cpu = cpu; 275*b46a33e2STvrtko Ursulin if (!event->parent) 276*b46a33e2STvrtko Ursulin event->destroy = i915_pmu_event_destroy; 277*b46a33e2STvrtko Ursulin 278*b46a33e2STvrtko Ursulin return 0; 279*b46a33e2STvrtko Ursulin } 280*b46a33e2STvrtko Ursulin 281*b46a33e2STvrtko Ursulin static u64 __i915_pmu_event_read(struct perf_event *event) 282*b46a33e2STvrtko Ursulin { 283*b46a33e2STvrtko Ursulin struct drm_i915_private *i915 = 284*b46a33e2STvrtko Ursulin container_of(event->pmu, typeof(*i915), pmu.base); 285*b46a33e2STvrtko Ursulin u64 val = 0; 286*b46a33e2STvrtko Ursulin 287*b46a33e2STvrtko Ursulin if (is_engine_event(event)) { 288*b46a33e2STvrtko Ursulin u8 sample = engine_event_sample(event); 289*b46a33e2STvrtko Ursulin struct intel_engine_cs *engine; 290*b46a33e2STvrtko Ursulin 291*b46a33e2STvrtko Ursulin engine = intel_engine_lookup_user(i915, 292*b46a33e2STvrtko Ursulin engine_event_class(event), 293*b46a33e2STvrtko Ursulin engine_event_instance(event)); 294*b46a33e2STvrtko Ursulin 295*b46a33e2STvrtko Ursulin if (WARN_ON_ONCE(!engine)) { 296*b46a33e2STvrtko Ursulin /* Do nothing */ 297*b46a33e2STvrtko Ursulin } else { 298*b46a33e2STvrtko Ursulin val = engine->pmu.sample[sample].cur; 299*b46a33e2STvrtko Ursulin } 300*b46a33e2STvrtko Ursulin } else { 301*b46a33e2STvrtko Ursulin switch (event->attr.config) { 302*b46a33e2STvrtko Ursulin case I915_PMU_ACTUAL_FREQUENCY: 303*b46a33e2STvrtko Ursulin val = 304*b46a33e2STvrtko Ursulin div_u64(i915->pmu.sample[__I915_SAMPLE_FREQ_ACT].cur, 305*b46a33e2STvrtko Ursulin FREQUENCY); 306*b46a33e2STvrtko Ursulin break; 307*b46a33e2STvrtko Ursulin case I915_PMU_REQUESTED_FREQUENCY: 308*b46a33e2STvrtko Ursulin val = 309*b46a33e2STvrtko Ursulin div_u64(i915->pmu.sample[__I915_SAMPLE_FREQ_REQ].cur, 310*b46a33e2STvrtko Ursulin FREQUENCY); 311*b46a33e2STvrtko Ursulin break; 312*b46a33e2STvrtko Ursulin } 313*b46a33e2STvrtko Ursulin } 314*b46a33e2STvrtko Ursulin 315*b46a33e2STvrtko Ursulin return val; 316*b46a33e2STvrtko Ursulin } 317*b46a33e2STvrtko Ursulin 318*b46a33e2STvrtko Ursulin static void i915_pmu_event_read(struct perf_event *event) 319*b46a33e2STvrtko Ursulin { 320*b46a33e2STvrtko Ursulin struct hw_perf_event *hwc = &event->hw; 321*b46a33e2STvrtko Ursulin u64 prev, new; 322*b46a33e2STvrtko Ursulin 323*b46a33e2STvrtko Ursulin again: 324*b46a33e2STvrtko Ursulin prev = local64_read(&hwc->prev_count); 325*b46a33e2STvrtko Ursulin new = __i915_pmu_event_read(event); 326*b46a33e2STvrtko Ursulin 327*b46a33e2STvrtko Ursulin if (local64_cmpxchg(&hwc->prev_count, prev, new) != prev) 328*b46a33e2STvrtko Ursulin goto again; 329*b46a33e2STvrtko Ursulin 330*b46a33e2STvrtko Ursulin local64_add(new - prev, &event->count); 331*b46a33e2STvrtko Ursulin } 332*b46a33e2STvrtko Ursulin 333*b46a33e2STvrtko Ursulin static void i915_pmu_enable(struct perf_event *event) 334*b46a33e2STvrtko Ursulin { 335*b46a33e2STvrtko Ursulin struct drm_i915_private *i915 = 336*b46a33e2STvrtko Ursulin container_of(event->pmu, typeof(*i915), pmu.base); 337*b46a33e2STvrtko Ursulin unsigned int bit = event_enabled_bit(event); 338*b46a33e2STvrtko Ursulin unsigned long flags; 339*b46a33e2STvrtko Ursulin 340*b46a33e2STvrtko Ursulin spin_lock_irqsave(&i915->pmu.lock, flags); 341*b46a33e2STvrtko Ursulin 342*b46a33e2STvrtko Ursulin /* 343*b46a33e2STvrtko Ursulin * Start the sampling timer when enabling the first event. 344*b46a33e2STvrtko Ursulin */ 345*b46a33e2STvrtko Ursulin if (i915->pmu.enable == 0) 346*b46a33e2STvrtko Ursulin hrtimer_start_range_ns(&i915->pmu.timer, 347*b46a33e2STvrtko Ursulin ns_to_ktime(PERIOD), 0, 348*b46a33e2STvrtko Ursulin HRTIMER_MODE_REL_PINNED); 349*b46a33e2STvrtko Ursulin 350*b46a33e2STvrtko Ursulin /* 351*b46a33e2STvrtko Ursulin * Update the bitmask of enabled events and increment 352*b46a33e2STvrtko Ursulin * the event reference counter. 353*b46a33e2STvrtko Ursulin */ 354*b46a33e2STvrtko Ursulin GEM_BUG_ON(bit >= I915_PMU_MASK_BITS); 355*b46a33e2STvrtko Ursulin GEM_BUG_ON(i915->pmu.enable_count[bit] == ~0); 356*b46a33e2STvrtko Ursulin i915->pmu.enable |= BIT_ULL(bit); 357*b46a33e2STvrtko Ursulin i915->pmu.enable_count[bit]++; 358*b46a33e2STvrtko Ursulin 359*b46a33e2STvrtko Ursulin /* 360*b46a33e2STvrtko Ursulin * For per-engine events the bitmask and reference counting 361*b46a33e2STvrtko Ursulin * is stored per engine. 362*b46a33e2STvrtko Ursulin */ 363*b46a33e2STvrtko Ursulin if (is_engine_event(event)) { 364*b46a33e2STvrtko Ursulin u8 sample = engine_event_sample(event); 365*b46a33e2STvrtko Ursulin struct intel_engine_cs *engine; 366*b46a33e2STvrtko Ursulin 367*b46a33e2STvrtko Ursulin engine = intel_engine_lookup_user(i915, 368*b46a33e2STvrtko Ursulin engine_event_class(event), 369*b46a33e2STvrtko Ursulin engine_event_instance(event)); 370*b46a33e2STvrtko Ursulin GEM_BUG_ON(!engine); 371*b46a33e2STvrtko Ursulin engine->pmu.enable |= BIT(sample); 372*b46a33e2STvrtko Ursulin 373*b46a33e2STvrtko Ursulin GEM_BUG_ON(sample >= I915_PMU_SAMPLE_BITS); 374*b46a33e2STvrtko Ursulin GEM_BUG_ON(engine->pmu.enable_count[sample] == ~0); 375*b46a33e2STvrtko Ursulin engine->pmu.enable_count[sample]++; 376*b46a33e2STvrtko Ursulin } 377*b46a33e2STvrtko Ursulin 378*b46a33e2STvrtko Ursulin /* 379*b46a33e2STvrtko Ursulin * Store the current counter value so we can report the correct delta 380*b46a33e2STvrtko Ursulin * for all listeners. Even when the event was already enabled and has 381*b46a33e2STvrtko Ursulin * an existing non-zero value. 382*b46a33e2STvrtko Ursulin */ 383*b46a33e2STvrtko Ursulin local64_set(&event->hw.prev_count, __i915_pmu_event_read(event)); 384*b46a33e2STvrtko Ursulin 385*b46a33e2STvrtko Ursulin spin_unlock_irqrestore(&i915->pmu.lock, flags); 386*b46a33e2STvrtko Ursulin } 387*b46a33e2STvrtko Ursulin 388*b46a33e2STvrtko Ursulin static void i915_pmu_disable(struct perf_event *event) 389*b46a33e2STvrtko Ursulin { 390*b46a33e2STvrtko Ursulin struct drm_i915_private *i915 = 391*b46a33e2STvrtko Ursulin container_of(event->pmu, typeof(*i915), pmu.base); 392*b46a33e2STvrtko Ursulin unsigned int bit = event_enabled_bit(event); 393*b46a33e2STvrtko Ursulin unsigned long flags; 394*b46a33e2STvrtko Ursulin 395*b46a33e2STvrtko Ursulin spin_lock_irqsave(&i915->pmu.lock, flags); 396*b46a33e2STvrtko Ursulin 397*b46a33e2STvrtko Ursulin if (is_engine_event(event)) { 398*b46a33e2STvrtko Ursulin u8 sample = engine_event_sample(event); 399*b46a33e2STvrtko Ursulin struct intel_engine_cs *engine; 400*b46a33e2STvrtko Ursulin 401*b46a33e2STvrtko Ursulin engine = intel_engine_lookup_user(i915, 402*b46a33e2STvrtko Ursulin engine_event_class(event), 403*b46a33e2STvrtko Ursulin engine_event_instance(event)); 404*b46a33e2STvrtko Ursulin GEM_BUG_ON(!engine); 405*b46a33e2STvrtko Ursulin GEM_BUG_ON(sample >= I915_PMU_SAMPLE_BITS); 406*b46a33e2STvrtko Ursulin GEM_BUG_ON(engine->pmu.enable_count[sample] == 0); 407*b46a33e2STvrtko Ursulin /* 408*b46a33e2STvrtko Ursulin * Decrement the reference count and clear the enabled 409*b46a33e2STvrtko Ursulin * bitmask when the last listener on an event goes away. 410*b46a33e2STvrtko Ursulin */ 411*b46a33e2STvrtko Ursulin if (--engine->pmu.enable_count[sample] == 0) 412*b46a33e2STvrtko Ursulin engine->pmu.enable &= ~BIT(sample); 413*b46a33e2STvrtko Ursulin } 414*b46a33e2STvrtko Ursulin 415*b46a33e2STvrtko Ursulin GEM_BUG_ON(bit >= I915_PMU_MASK_BITS); 416*b46a33e2STvrtko Ursulin GEM_BUG_ON(i915->pmu.enable_count[bit] == 0); 417*b46a33e2STvrtko Ursulin /* 418*b46a33e2STvrtko Ursulin * Decrement the reference count and clear the enabled 419*b46a33e2STvrtko Ursulin * bitmask when the last listener on an event goes away. 420*b46a33e2STvrtko Ursulin */ 421*b46a33e2STvrtko Ursulin if (--i915->pmu.enable_count[bit] == 0) 422*b46a33e2STvrtko Ursulin i915->pmu.enable &= ~BIT_ULL(bit); 423*b46a33e2STvrtko Ursulin 424*b46a33e2STvrtko Ursulin spin_unlock_irqrestore(&i915->pmu.lock, flags); 425*b46a33e2STvrtko Ursulin } 426*b46a33e2STvrtko Ursulin 427*b46a33e2STvrtko Ursulin static void i915_pmu_event_start(struct perf_event *event, int flags) 428*b46a33e2STvrtko Ursulin { 429*b46a33e2STvrtko Ursulin i915_pmu_enable(event); 430*b46a33e2STvrtko Ursulin event->hw.state = 0; 431*b46a33e2STvrtko Ursulin } 432*b46a33e2STvrtko Ursulin 433*b46a33e2STvrtko Ursulin static void i915_pmu_event_stop(struct perf_event *event, int flags) 434*b46a33e2STvrtko Ursulin { 435*b46a33e2STvrtko Ursulin if (flags & PERF_EF_UPDATE) 436*b46a33e2STvrtko Ursulin i915_pmu_event_read(event); 437*b46a33e2STvrtko Ursulin i915_pmu_disable(event); 438*b46a33e2STvrtko Ursulin event->hw.state = PERF_HES_STOPPED; 439*b46a33e2STvrtko Ursulin } 440*b46a33e2STvrtko Ursulin 441*b46a33e2STvrtko Ursulin static int i915_pmu_event_add(struct perf_event *event, int flags) 442*b46a33e2STvrtko Ursulin { 443*b46a33e2STvrtko Ursulin if (flags & PERF_EF_START) 444*b46a33e2STvrtko Ursulin i915_pmu_event_start(event, flags); 445*b46a33e2STvrtko Ursulin 446*b46a33e2STvrtko Ursulin return 0; 447*b46a33e2STvrtko Ursulin } 448*b46a33e2STvrtko Ursulin 449*b46a33e2STvrtko Ursulin static void i915_pmu_event_del(struct perf_event *event, int flags) 450*b46a33e2STvrtko Ursulin { 451*b46a33e2STvrtko Ursulin i915_pmu_event_stop(event, PERF_EF_UPDATE); 452*b46a33e2STvrtko Ursulin } 453*b46a33e2STvrtko Ursulin 454*b46a33e2STvrtko Ursulin static int i915_pmu_event_event_idx(struct perf_event *event) 455*b46a33e2STvrtko Ursulin { 456*b46a33e2STvrtko Ursulin return 0; 457*b46a33e2STvrtko Ursulin } 458*b46a33e2STvrtko Ursulin 459*b46a33e2STvrtko Ursulin static ssize_t i915_pmu_format_show(struct device *dev, 460*b46a33e2STvrtko Ursulin struct device_attribute *attr, char *buf) 461*b46a33e2STvrtko Ursulin { 462*b46a33e2STvrtko Ursulin struct dev_ext_attribute *eattr; 463*b46a33e2STvrtko Ursulin 464*b46a33e2STvrtko Ursulin eattr = container_of(attr, struct dev_ext_attribute, attr); 465*b46a33e2STvrtko Ursulin return sprintf(buf, "%s\n", (char *)eattr->var); 466*b46a33e2STvrtko Ursulin } 467*b46a33e2STvrtko Ursulin 468*b46a33e2STvrtko Ursulin #define I915_PMU_FORMAT_ATTR(_name, _config) \ 469*b46a33e2STvrtko Ursulin (&((struct dev_ext_attribute[]) { \ 470*b46a33e2STvrtko Ursulin { .attr = __ATTR(_name, 0444, i915_pmu_format_show, NULL), \ 471*b46a33e2STvrtko Ursulin .var = (void *)_config, } \ 472*b46a33e2STvrtko Ursulin })[0].attr.attr) 473*b46a33e2STvrtko Ursulin 474*b46a33e2STvrtko Ursulin static struct attribute *i915_pmu_format_attrs[] = { 475*b46a33e2STvrtko Ursulin I915_PMU_FORMAT_ATTR(i915_eventid, "config:0-20"), 476*b46a33e2STvrtko Ursulin NULL, 477*b46a33e2STvrtko Ursulin }; 478*b46a33e2STvrtko Ursulin 479*b46a33e2STvrtko Ursulin static const struct attribute_group i915_pmu_format_attr_group = { 480*b46a33e2STvrtko Ursulin .name = "format", 481*b46a33e2STvrtko Ursulin .attrs = i915_pmu_format_attrs, 482*b46a33e2STvrtko Ursulin }; 483*b46a33e2STvrtko Ursulin 484*b46a33e2STvrtko Ursulin static ssize_t i915_pmu_event_show(struct device *dev, 485*b46a33e2STvrtko Ursulin struct device_attribute *attr, char *buf) 486*b46a33e2STvrtko Ursulin { 487*b46a33e2STvrtko Ursulin struct dev_ext_attribute *eattr; 488*b46a33e2STvrtko Ursulin 489*b46a33e2STvrtko Ursulin eattr = container_of(attr, struct dev_ext_attribute, attr); 490*b46a33e2STvrtko Ursulin return sprintf(buf, "config=0x%lx\n", (unsigned long)eattr->var); 491*b46a33e2STvrtko Ursulin } 492*b46a33e2STvrtko Ursulin 493*b46a33e2STvrtko Ursulin #define I915_EVENT_ATTR(_name, _config) \ 494*b46a33e2STvrtko Ursulin (&((struct dev_ext_attribute[]) { \ 495*b46a33e2STvrtko Ursulin { .attr = __ATTR(_name, 0444, i915_pmu_event_show, NULL), \ 496*b46a33e2STvrtko Ursulin .var = (void *)_config, } \ 497*b46a33e2STvrtko Ursulin })[0].attr.attr) 498*b46a33e2STvrtko Ursulin 499*b46a33e2STvrtko Ursulin #define I915_EVENT_STR(_name, _str) \ 500*b46a33e2STvrtko Ursulin (&((struct perf_pmu_events_attr[]) { \ 501*b46a33e2STvrtko Ursulin { .attr = __ATTR(_name, 0444, perf_event_sysfs_show, NULL), \ 502*b46a33e2STvrtko Ursulin .id = 0, \ 503*b46a33e2STvrtko Ursulin .event_str = _str, } \ 504*b46a33e2STvrtko Ursulin })[0].attr.attr) 505*b46a33e2STvrtko Ursulin 506*b46a33e2STvrtko Ursulin #define I915_EVENT(_name, _config, _unit) \ 507*b46a33e2STvrtko Ursulin I915_EVENT_ATTR(_name, _config), \ 508*b46a33e2STvrtko Ursulin I915_EVENT_STR(_name.unit, _unit) 509*b46a33e2STvrtko Ursulin 510*b46a33e2STvrtko Ursulin #define I915_ENGINE_EVENT(_name, _class, _instance, _sample) \ 511*b46a33e2STvrtko Ursulin I915_EVENT_ATTR(_name, __I915_PMU_ENGINE(_class, _instance, _sample)), \ 512*b46a33e2STvrtko Ursulin I915_EVENT_STR(_name.unit, "ns") 513*b46a33e2STvrtko Ursulin 514*b46a33e2STvrtko Ursulin #define I915_ENGINE_EVENTS(_name, _class, _instance) \ 515*b46a33e2STvrtko Ursulin I915_ENGINE_EVENT(_name##_instance-busy, _class, _instance, I915_SAMPLE_BUSY), \ 516*b46a33e2STvrtko Ursulin I915_ENGINE_EVENT(_name##_instance-sema, _class, _instance, I915_SAMPLE_SEMA), \ 517*b46a33e2STvrtko Ursulin I915_ENGINE_EVENT(_name##_instance-wait, _class, _instance, I915_SAMPLE_WAIT) 518*b46a33e2STvrtko Ursulin 519*b46a33e2STvrtko Ursulin static struct attribute *i915_pmu_events_attrs[] = { 520*b46a33e2STvrtko Ursulin I915_ENGINE_EVENTS(rcs, I915_ENGINE_CLASS_RENDER, 0), 521*b46a33e2STvrtko Ursulin I915_ENGINE_EVENTS(bcs, I915_ENGINE_CLASS_COPY, 0), 522*b46a33e2STvrtko Ursulin I915_ENGINE_EVENTS(vcs, I915_ENGINE_CLASS_VIDEO, 0), 523*b46a33e2STvrtko Ursulin I915_ENGINE_EVENTS(vcs, I915_ENGINE_CLASS_VIDEO, 1), 524*b46a33e2STvrtko Ursulin I915_ENGINE_EVENTS(vecs, I915_ENGINE_CLASS_VIDEO_ENHANCE, 0), 525*b46a33e2STvrtko Ursulin 526*b46a33e2STvrtko Ursulin I915_EVENT(actual-frequency, I915_PMU_ACTUAL_FREQUENCY, "MHz"), 527*b46a33e2STvrtko Ursulin I915_EVENT(requested-frequency, I915_PMU_REQUESTED_FREQUENCY, "MHz"), 528*b46a33e2STvrtko Ursulin 529*b46a33e2STvrtko Ursulin NULL, 530*b46a33e2STvrtko Ursulin }; 531*b46a33e2STvrtko Ursulin 532*b46a33e2STvrtko Ursulin static const struct attribute_group i915_pmu_events_attr_group = { 533*b46a33e2STvrtko Ursulin .name = "events", 534*b46a33e2STvrtko Ursulin .attrs = i915_pmu_events_attrs, 535*b46a33e2STvrtko Ursulin }; 536*b46a33e2STvrtko Ursulin 537*b46a33e2STvrtko Ursulin static ssize_t 538*b46a33e2STvrtko Ursulin i915_pmu_get_attr_cpumask(struct device *dev, 539*b46a33e2STvrtko Ursulin struct device_attribute *attr, 540*b46a33e2STvrtko Ursulin char *buf) 541*b46a33e2STvrtko Ursulin { 542*b46a33e2STvrtko Ursulin return cpumap_print_to_pagebuf(true, buf, &i915_pmu_cpumask); 543*b46a33e2STvrtko Ursulin } 544*b46a33e2STvrtko Ursulin 545*b46a33e2STvrtko Ursulin static DEVICE_ATTR(cpumask, 0444, i915_pmu_get_attr_cpumask, NULL); 546*b46a33e2STvrtko Ursulin 547*b46a33e2STvrtko Ursulin static struct attribute *i915_cpumask_attrs[] = { 548*b46a33e2STvrtko Ursulin &dev_attr_cpumask.attr, 549*b46a33e2STvrtko Ursulin NULL, 550*b46a33e2STvrtko Ursulin }; 551*b46a33e2STvrtko Ursulin 552*b46a33e2STvrtko Ursulin static struct attribute_group i915_pmu_cpumask_attr_group = { 553*b46a33e2STvrtko Ursulin .attrs = i915_cpumask_attrs, 554*b46a33e2STvrtko Ursulin }; 555*b46a33e2STvrtko Ursulin 556*b46a33e2STvrtko Ursulin static const struct attribute_group *i915_pmu_attr_groups[] = { 557*b46a33e2STvrtko Ursulin &i915_pmu_format_attr_group, 558*b46a33e2STvrtko Ursulin &i915_pmu_events_attr_group, 559*b46a33e2STvrtko Ursulin &i915_pmu_cpumask_attr_group, 560*b46a33e2STvrtko Ursulin NULL 561*b46a33e2STvrtko Ursulin }; 562*b46a33e2STvrtko Ursulin 563*b46a33e2STvrtko Ursulin #ifdef CONFIG_HOTPLUG_CPU 564*b46a33e2STvrtko Ursulin static int i915_pmu_cpu_online(unsigned int cpu, struct hlist_node *node) 565*b46a33e2STvrtko Ursulin { 566*b46a33e2STvrtko Ursulin struct i915_pmu *pmu = hlist_entry_safe(node, typeof(*pmu), node); 567*b46a33e2STvrtko Ursulin unsigned int target; 568*b46a33e2STvrtko Ursulin 569*b46a33e2STvrtko Ursulin GEM_BUG_ON(!pmu->base.event_init); 570*b46a33e2STvrtko Ursulin 571*b46a33e2STvrtko Ursulin target = cpumask_any_and(&i915_pmu_cpumask, &i915_pmu_cpumask); 572*b46a33e2STvrtko Ursulin /* Select the first online CPU as a designated reader. */ 573*b46a33e2STvrtko Ursulin if (target >= nr_cpu_ids) 574*b46a33e2STvrtko Ursulin cpumask_set_cpu(cpu, &i915_pmu_cpumask); 575*b46a33e2STvrtko Ursulin 576*b46a33e2STvrtko Ursulin return 0; 577*b46a33e2STvrtko Ursulin } 578*b46a33e2STvrtko Ursulin 579*b46a33e2STvrtko Ursulin static int i915_pmu_cpu_offline(unsigned int cpu, struct hlist_node *node) 580*b46a33e2STvrtko Ursulin { 581*b46a33e2STvrtko Ursulin struct i915_pmu *pmu = hlist_entry_safe(node, typeof(*pmu), node); 582*b46a33e2STvrtko Ursulin unsigned int target; 583*b46a33e2STvrtko Ursulin 584*b46a33e2STvrtko Ursulin GEM_BUG_ON(!pmu->base.event_init); 585*b46a33e2STvrtko Ursulin 586*b46a33e2STvrtko Ursulin if (cpumask_test_and_clear_cpu(cpu, &i915_pmu_cpumask)) { 587*b46a33e2STvrtko Ursulin target = cpumask_any_but(topology_sibling_cpumask(cpu), cpu); 588*b46a33e2STvrtko Ursulin /* Migrate events if there is a valid target */ 589*b46a33e2STvrtko Ursulin if (target < nr_cpu_ids) { 590*b46a33e2STvrtko Ursulin cpumask_set_cpu(target, &i915_pmu_cpumask); 591*b46a33e2STvrtko Ursulin perf_pmu_migrate_context(&pmu->base, cpu, target); 592*b46a33e2STvrtko Ursulin } 593*b46a33e2STvrtko Ursulin } 594*b46a33e2STvrtko Ursulin 595*b46a33e2STvrtko Ursulin return 0; 596*b46a33e2STvrtko Ursulin } 597*b46a33e2STvrtko Ursulin 598*b46a33e2STvrtko Ursulin static enum cpuhp_state cpuhp_slot = CPUHP_INVALID; 599*b46a33e2STvrtko Ursulin #endif 600*b46a33e2STvrtko Ursulin 601*b46a33e2STvrtko Ursulin static int i915_pmu_register_cpuhp_state(struct drm_i915_private *i915) 602*b46a33e2STvrtko Ursulin { 603*b46a33e2STvrtko Ursulin #ifdef CONFIG_HOTPLUG_CPU 604*b46a33e2STvrtko Ursulin enum cpuhp_state slot; 605*b46a33e2STvrtko Ursulin int ret; 606*b46a33e2STvrtko Ursulin 607*b46a33e2STvrtko Ursulin ret = cpuhp_setup_state_multi(CPUHP_AP_ONLINE_DYN, 608*b46a33e2STvrtko Ursulin "perf/x86/intel/i915:online", 609*b46a33e2STvrtko Ursulin i915_pmu_cpu_online, 610*b46a33e2STvrtko Ursulin i915_pmu_cpu_offline); 611*b46a33e2STvrtko Ursulin if (ret < 0) 612*b46a33e2STvrtko Ursulin return ret; 613*b46a33e2STvrtko Ursulin 614*b46a33e2STvrtko Ursulin slot = ret; 615*b46a33e2STvrtko Ursulin ret = cpuhp_state_add_instance(slot, &i915->pmu.node); 616*b46a33e2STvrtko Ursulin if (ret) { 617*b46a33e2STvrtko Ursulin cpuhp_remove_multi_state(slot); 618*b46a33e2STvrtko Ursulin return ret; 619*b46a33e2STvrtko Ursulin } 620*b46a33e2STvrtko Ursulin 621*b46a33e2STvrtko Ursulin cpuhp_slot = slot; 622*b46a33e2STvrtko Ursulin #endif 623*b46a33e2STvrtko Ursulin return 0; 624*b46a33e2STvrtko Ursulin } 625*b46a33e2STvrtko Ursulin 626*b46a33e2STvrtko Ursulin static void i915_pmu_unregister_cpuhp_state(struct drm_i915_private *i915) 627*b46a33e2STvrtko Ursulin { 628*b46a33e2STvrtko Ursulin #ifdef CONFIG_HOTPLUG_CPU 629*b46a33e2STvrtko Ursulin WARN_ON(cpuhp_slot == CPUHP_INVALID); 630*b46a33e2STvrtko Ursulin WARN_ON(cpuhp_state_remove_instance(cpuhp_slot, &i915->pmu.node)); 631*b46a33e2STvrtko Ursulin cpuhp_remove_multi_state(cpuhp_slot); 632*b46a33e2STvrtko Ursulin #endif 633*b46a33e2STvrtko Ursulin } 634*b46a33e2STvrtko Ursulin 635*b46a33e2STvrtko Ursulin void i915_pmu_register(struct drm_i915_private *i915) 636*b46a33e2STvrtko Ursulin { 637*b46a33e2STvrtko Ursulin int ret; 638*b46a33e2STvrtko Ursulin 639*b46a33e2STvrtko Ursulin if (INTEL_GEN(i915) <= 2) { 640*b46a33e2STvrtko Ursulin DRM_INFO("PMU not supported for this GPU."); 641*b46a33e2STvrtko Ursulin return; 642*b46a33e2STvrtko Ursulin } 643*b46a33e2STvrtko Ursulin 644*b46a33e2STvrtko Ursulin i915->pmu.base.attr_groups = i915_pmu_attr_groups; 645*b46a33e2STvrtko Ursulin i915->pmu.base.task_ctx_nr = perf_invalid_context; 646*b46a33e2STvrtko Ursulin i915->pmu.base.event_init = i915_pmu_event_init; 647*b46a33e2STvrtko Ursulin i915->pmu.base.add = i915_pmu_event_add; 648*b46a33e2STvrtko Ursulin i915->pmu.base.del = i915_pmu_event_del; 649*b46a33e2STvrtko Ursulin i915->pmu.base.start = i915_pmu_event_start; 650*b46a33e2STvrtko Ursulin i915->pmu.base.stop = i915_pmu_event_stop; 651*b46a33e2STvrtko Ursulin i915->pmu.base.read = i915_pmu_event_read; 652*b46a33e2STvrtko Ursulin i915->pmu.base.event_idx = i915_pmu_event_event_idx; 653*b46a33e2STvrtko Ursulin 654*b46a33e2STvrtko Ursulin spin_lock_init(&i915->pmu.lock); 655*b46a33e2STvrtko Ursulin hrtimer_init(&i915->pmu.timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); 656*b46a33e2STvrtko Ursulin i915->pmu.timer.function = i915_sample; 657*b46a33e2STvrtko Ursulin 658*b46a33e2STvrtko Ursulin ret = perf_pmu_register(&i915->pmu.base, "i915", -1); 659*b46a33e2STvrtko Ursulin if (ret) 660*b46a33e2STvrtko Ursulin goto err; 661*b46a33e2STvrtko Ursulin 662*b46a33e2STvrtko Ursulin ret = i915_pmu_register_cpuhp_state(i915); 663*b46a33e2STvrtko Ursulin if (ret) 664*b46a33e2STvrtko Ursulin goto err_unreg; 665*b46a33e2STvrtko Ursulin 666*b46a33e2STvrtko Ursulin return; 667*b46a33e2STvrtko Ursulin 668*b46a33e2STvrtko Ursulin err_unreg: 669*b46a33e2STvrtko Ursulin perf_pmu_unregister(&i915->pmu.base); 670*b46a33e2STvrtko Ursulin err: 671*b46a33e2STvrtko Ursulin i915->pmu.base.event_init = NULL; 672*b46a33e2STvrtko Ursulin DRM_NOTE("Failed to register PMU! (err=%d)\n", ret); 673*b46a33e2STvrtko Ursulin } 674*b46a33e2STvrtko Ursulin 675*b46a33e2STvrtko Ursulin void i915_pmu_unregister(struct drm_i915_private *i915) 676*b46a33e2STvrtko Ursulin { 677*b46a33e2STvrtko Ursulin if (!i915->pmu.base.event_init) 678*b46a33e2STvrtko Ursulin return; 679*b46a33e2STvrtko Ursulin 680*b46a33e2STvrtko Ursulin WARN_ON(i915->pmu.enable); 681*b46a33e2STvrtko Ursulin 682*b46a33e2STvrtko Ursulin hrtimer_cancel(&i915->pmu.timer); 683*b46a33e2STvrtko Ursulin 684*b46a33e2STvrtko Ursulin i915_pmu_unregister_cpuhp_state(i915); 685*b46a33e2STvrtko Ursulin 686*b46a33e2STvrtko Ursulin perf_pmu_unregister(&i915->pmu.base); 687*b46a33e2STvrtko Ursulin i915->pmu.base.event_init = NULL; 688*b46a33e2STvrtko Ursulin } 689