xref: /openbmc/linux/drivers/gpu/drm/i915/i915_pmu.c (revision 6060b6aec03c76f9ce0977b70c27429d39d2956e)
1b46a33e2STvrtko Ursulin /*
2b46a33e2STvrtko Ursulin  * Copyright © 2017 Intel Corporation
3b46a33e2STvrtko Ursulin  *
4b46a33e2STvrtko Ursulin  * Permission is hereby granted, free of charge, to any person obtaining a
5b46a33e2STvrtko Ursulin  * copy of this software and associated documentation files (the "Software"),
6b46a33e2STvrtko Ursulin  * to deal in the Software without restriction, including without limitation
7b46a33e2STvrtko Ursulin  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8b46a33e2STvrtko Ursulin  * and/or sell copies of the Software, and to permit persons to whom the
9b46a33e2STvrtko Ursulin  * Software is furnished to do so, subject to the following conditions:
10b46a33e2STvrtko Ursulin  *
11b46a33e2STvrtko Ursulin  * The above copyright notice and this permission notice (including the next
12b46a33e2STvrtko Ursulin  * paragraph) shall be included in all copies or substantial portions of the
13b46a33e2STvrtko Ursulin  * Software.
14b46a33e2STvrtko Ursulin  *
15b46a33e2STvrtko Ursulin  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16b46a33e2STvrtko Ursulin  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17b46a33e2STvrtko Ursulin  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18b46a33e2STvrtko Ursulin  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19b46a33e2STvrtko Ursulin  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20b46a33e2STvrtko Ursulin  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21b46a33e2STvrtko Ursulin  * IN THE SOFTWARE.
22b46a33e2STvrtko Ursulin  *
23b46a33e2STvrtko Ursulin  */
24b46a33e2STvrtko Ursulin 
25b46a33e2STvrtko Ursulin #include <linux/perf_event.h>
26b46a33e2STvrtko Ursulin #include <linux/pm_runtime.h>
27b46a33e2STvrtko Ursulin 
28b46a33e2STvrtko Ursulin #include "i915_drv.h"
29b46a33e2STvrtko Ursulin #include "i915_pmu.h"
30b46a33e2STvrtko Ursulin #include "intel_ringbuffer.h"
31b46a33e2STvrtko Ursulin 
32b46a33e2STvrtko Ursulin /* Frequency for the sampling timer for events which need it. */
33b46a33e2STvrtko Ursulin #define FREQUENCY 200
34b46a33e2STvrtko Ursulin #define PERIOD max_t(u64, 10000, NSEC_PER_SEC / FREQUENCY)
35b46a33e2STvrtko Ursulin 
36b46a33e2STvrtko Ursulin #define ENGINE_SAMPLE_MASK \
37b46a33e2STvrtko Ursulin 	(BIT(I915_SAMPLE_BUSY) | \
38b46a33e2STvrtko Ursulin 	 BIT(I915_SAMPLE_WAIT) | \
39b46a33e2STvrtko Ursulin 	 BIT(I915_SAMPLE_SEMA))
40b46a33e2STvrtko Ursulin 
41b46a33e2STvrtko Ursulin #define ENGINE_SAMPLE_BITS (1 << I915_PMU_SAMPLE_BITS)
42b46a33e2STvrtko Ursulin 
43b46a33e2STvrtko Ursulin static cpumask_t i915_pmu_cpumask = CPU_MASK_NONE;
44b46a33e2STvrtko Ursulin 
45b46a33e2STvrtko Ursulin static u8 engine_config_sample(u64 config)
46b46a33e2STvrtko Ursulin {
47b46a33e2STvrtko Ursulin 	return config & I915_PMU_SAMPLE_MASK;
48b46a33e2STvrtko Ursulin }
49b46a33e2STvrtko Ursulin 
50b46a33e2STvrtko Ursulin static u8 engine_event_sample(struct perf_event *event)
51b46a33e2STvrtko Ursulin {
52b46a33e2STvrtko Ursulin 	return engine_config_sample(event->attr.config);
53b46a33e2STvrtko Ursulin }
54b46a33e2STvrtko Ursulin 
55b46a33e2STvrtko Ursulin static u8 engine_event_class(struct perf_event *event)
56b46a33e2STvrtko Ursulin {
57b46a33e2STvrtko Ursulin 	return (event->attr.config >> I915_PMU_CLASS_SHIFT) & 0xff;
58b46a33e2STvrtko Ursulin }
59b46a33e2STvrtko Ursulin 
60b46a33e2STvrtko Ursulin static u8 engine_event_instance(struct perf_event *event)
61b46a33e2STvrtko Ursulin {
62b46a33e2STvrtko Ursulin 	return (event->attr.config >> I915_PMU_SAMPLE_BITS) & 0xff;
63b46a33e2STvrtko Ursulin }
64b46a33e2STvrtko Ursulin 
65b46a33e2STvrtko Ursulin static bool is_engine_config(u64 config)
66b46a33e2STvrtko Ursulin {
67b46a33e2STvrtko Ursulin 	return config < __I915_PMU_OTHER(0);
68b46a33e2STvrtko Ursulin }
69b46a33e2STvrtko Ursulin 
70b46a33e2STvrtko Ursulin static unsigned int config_enabled_bit(u64 config)
71b46a33e2STvrtko Ursulin {
72b46a33e2STvrtko Ursulin 	if (is_engine_config(config))
73b46a33e2STvrtko Ursulin 		return engine_config_sample(config);
74b46a33e2STvrtko Ursulin 	else
75b46a33e2STvrtko Ursulin 		return ENGINE_SAMPLE_BITS + (config - __I915_PMU_OTHER(0));
76b46a33e2STvrtko Ursulin }
77b46a33e2STvrtko Ursulin 
78b46a33e2STvrtko Ursulin static u64 config_enabled_mask(u64 config)
79b46a33e2STvrtko Ursulin {
80b46a33e2STvrtko Ursulin 	return BIT_ULL(config_enabled_bit(config));
81b46a33e2STvrtko Ursulin }
82b46a33e2STvrtko Ursulin 
83b46a33e2STvrtko Ursulin static bool is_engine_event(struct perf_event *event)
84b46a33e2STvrtko Ursulin {
85b46a33e2STvrtko Ursulin 	return is_engine_config(event->attr.config);
86b46a33e2STvrtko Ursulin }
87b46a33e2STvrtko Ursulin 
88b46a33e2STvrtko Ursulin static unsigned int event_enabled_bit(struct perf_event *event)
89b46a33e2STvrtko Ursulin {
90b46a33e2STvrtko Ursulin 	return config_enabled_bit(event->attr.config);
91b46a33e2STvrtko Ursulin }
92b46a33e2STvrtko Ursulin 
93b3add01eSTvrtko Ursulin static bool supports_busy_stats(struct drm_i915_private *i915)
94b3add01eSTvrtko Ursulin {
95b3add01eSTvrtko Ursulin 	return INTEL_GEN(i915) >= 8;
96b3add01eSTvrtko Ursulin }
97b3add01eSTvrtko Ursulin 
98feff0dc6STvrtko Ursulin static bool pmu_needs_timer(struct drm_i915_private *i915, bool gpu_active)
99feff0dc6STvrtko Ursulin {
100feff0dc6STvrtko Ursulin 	u64 enable;
101feff0dc6STvrtko Ursulin 
102feff0dc6STvrtko Ursulin 	/*
103feff0dc6STvrtko Ursulin 	 * Only some counters need the sampling timer.
104feff0dc6STvrtko Ursulin 	 *
105feff0dc6STvrtko Ursulin 	 * We start with a bitmask of all currently enabled events.
106feff0dc6STvrtko Ursulin 	 */
107feff0dc6STvrtko Ursulin 	enable = i915->pmu.enable;
108feff0dc6STvrtko Ursulin 
109feff0dc6STvrtko Ursulin 	/*
110feff0dc6STvrtko Ursulin 	 * Mask out all the ones which do not need the timer, or in
111feff0dc6STvrtko Ursulin 	 * other words keep all the ones that could need the timer.
112feff0dc6STvrtko Ursulin 	 */
113feff0dc6STvrtko Ursulin 	enable &= config_enabled_mask(I915_PMU_ACTUAL_FREQUENCY) |
114feff0dc6STvrtko Ursulin 		  config_enabled_mask(I915_PMU_REQUESTED_FREQUENCY) |
115feff0dc6STvrtko Ursulin 		  ENGINE_SAMPLE_MASK;
116feff0dc6STvrtko Ursulin 
117feff0dc6STvrtko Ursulin 	/*
118feff0dc6STvrtko Ursulin 	 * When the GPU is idle per-engine counters do not need to be
119feff0dc6STvrtko Ursulin 	 * running so clear those bits out.
120feff0dc6STvrtko Ursulin 	 */
121feff0dc6STvrtko Ursulin 	if (!gpu_active)
122feff0dc6STvrtko Ursulin 		enable &= ~ENGINE_SAMPLE_MASK;
123b3add01eSTvrtko Ursulin 	/*
124b3add01eSTvrtko Ursulin 	 * Also there is software busyness tracking available we do not
125b3add01eSTvrtko Ursulin 	 * need the timer for I915_SAMPLE_BUSY counter.
126b3add01eSTvrtko Ursulin 	 */
127b3add01eSTvrtko Ursulin 	else if (supports_busy_stats(i915))
128b3add01eSTvrtko Ursulin 		enable &= ~BIT(I915_SAMPLE_BUSY);
129feff0dc6STvrtko Ursulin 
130feff0dc6STvrtko Ursulin 	/*
131feff0dc6STvrtko Ursulin 	 * If some bits remain it means we need the sampling timer running.
132feff0dc6STvrtko Ursulin 	 */
133feff0dc6STvrtko Ursulin 	return enable;
134feff0dc6STvrtko Ursulin }
135feff0dc6STvrtko Ursulin 
136feff0dc6STvrtko Ursulin void i915_pmu_gt_parked(struct drm_i915_private *i915)
137feff0dc6STvrtko Ursulin {
138feff0dc6STvrtko Ursulin 	if (!i915->pmu.base.event_init)
139feff0dc6STvrtko Ursulin 		return;
140feff0dc6STvrtko Ursulin 
141feff0dc6STvrtko Ursulin 	spin_lock_irq(&i915->pmu.lock);
142feff0dc6STvrtko Ursulin 	/*
143feff0dc6STvrtko Ursulin 	 * Signal sampling timer to stop if only engine events are enabled and
144feff0dc6STvrtko Ursulin 	 * GPU went idle.
145feff0dc6STvrtko Ursulin 	 */
146feff0dc6STvrtko Ursulin 	i915->pmu.timer_enabled = pmu_needs_timer(i915, false);
147feff0dc6STvrtko Ursulin 	spin_unlock_irq(&i915->pmu.lock);
148feff0dc6STvrtko Ursulin }
149feff0dc6STvrtko Ursulin 
150feff0dc6STvrtko Ursulin static void __i915_pmu_maybe_start_timer(struct drm_i915_private *i915)
151feff0dc6STvrtko Ursulin {
152feff0dc6STvrtko Ursulin 	if (!i915->pmu.timer_enabled && pmu_needs_timer(i915, true)) {
153feff0dc6STvrtko Ursulin 		i915->pmu.timer_enabled = true;
154feff0dc6STvrtko Ursulin 		hrtimer_start_range_ns(&i915->pmu.timer,
155feff0dc6STvrtko Ursulin 				       ns_to_ktime(PERIOD), 0,
156feff0dc6STvrtko Ursulin 				       HRTIMER_MODE_REL_PINNED);
157feff0dc6STvrtko Ursulin 	}
158feff0dc6STvrtko Ursulin }
159feff0dc6STvrtko Ursulin 
160feff0dc6STvrtko Ursulin void i915_pmu_gt_unparked(struct drm_i915_private *i915)
161feff0dc6STvrtko Ursulin {
162feff0dc6STvrtko Ursulin 	if (!i915->pmu.base.event_init)
163feff0dc6STvrtko Ursulin 		return;
164feff0dc6STvrtko Ursulin 
165feff0dc6STvrtko Ursulin 	spin_lock_irq(&i915->pmu.lock);
166feff0dc6STvrtko Ursulin 	/*
167feff0dc6STvrtko Ursulin 	 * Re-enable sampling timer when GPU goes active.
168feff0dc6STvrtko Ursulin 	 */
169feff0dc6STvrtko Ursulin 	__i915_pmu_maybe_start_timer(i915);
170feff0dc6STvrtko Ursulin 	spin_unlock_irq(&i915->pmu.lock);
171feff0dc6STvrtko Ursulin }
172feff0dc6STvrtko Ursulin 
173b46a33e2STvrtko Ursulin static bool grab_forcewake(struct drm_i915_private *i915, bool fw)
174b46a33e2STvrtko Ursulin {
175b46a33e2STvrtko Ursulin 	if (!fw)
176b46a33e2STvrtko Ursulin 		intel_uncore_forcewake_get(i915, FORCEWAKE_ALL);
177b46a33e2STvrtko Ursulin 
178b46a33e2STvrtko Ursulin 	return true;
179b46a33e2STvrtko Ursulin }
180b46a33e2STvrtko Ursulin 
181b46a33e2STvrtko Ursulin static void
182b46a33e2STvrtko Ursulin update_sample(struct i915_pmu_sample *sample, u32 unit, u32 val)
183b46a33e2STvrtko Ursulin {
184b46a33e2STvrtko Ursulin 	/*
185b46a33e2STvrtko Ursulin 	 * Since we are doing stochastic sampling for these counters,
186b46a33e2STvrtko Ursulin 	 * average the delta with the previous value for better accuracy.
187b46a33e2STvrtko Ursulin 	 */
188b46a33e2STvrtko Ursulin 	sample->cur += div_u64(mul_u32_u32(sample->prev + val, unit), 2);
189b46a33e2STvrtko Ursulin 	sample->prev = val;
190b46a33e2STvrtko Ursulin }
191b46a33e2STvrtko Ursulin 
192b46a33e2STvrtko Ursulin static void engines_sample(struct drm_i915_private *dev_priv)
193b46a33e2STvrtko Ursulin {
194b46a33e2STvrtko Ursulin 	struct intel_engine_cs *engine;
195b46a33e2STvrtko Ursulin 	enum intel_engine_id id;
196b46a33e2STvrtko Ursulin 	bool fw = false;
197b46a33e2STvrtko Ursulin 
198b46a33e2STvrtko Ursulin 	if ((dev_priv->pmu.enable & ENGINE_SAMPLE_MASK) == 0)
199b46a33e2STvrtko Ursulin 		return;
200b46a33e2STvrtko Ursulin 
201b46a33e2STvrtko Ursulin 	if (!dev_priv->gt.awake)
202b46a33e2STvrtko Ursulin 		return;
203b46a33e2STvrtko Ursulin 
204b46a33e2STvrtko Ursulin 	if (!intel_runtime_pm_get_if_in_use(dev_priv))
205b46a33e2STvrtko Ursulin 		return;
206b46a33e2STvrtko Ursulin 
207b46a33e2STvrtko Ursulin 	for_each_engine(engine, dev_priv, id) {
208b46a33e2STvrtko Ursulin 		u32 current_seqno = intel_engine_get_seqno(engine);
209b46a33e2STvrtko Ursulin 		u32 last_seqno = intel_engine_last_submit(engine);
210b46a33e2STvrtko Ursulin 		u32 val;
211b46a33e2STvrtko Ursulin 
212b46a33e2STvrtko Ursulin 		val = !i915_seqno_passed(current_seqno, last_seqno);
213b46a33e2STvrtko Ursulin 
214b46a33e2STvrtko Ursulin 		update_sample(&engine->pmu.sample[I915_SAMPLE_BUSY],
215b46a33e2STvrtko Ursulin 			      PERIOD, val);
216b46a33e2STvrtko Ursulin 
217b46a33e2STvrtko Ursulin 		if (val && (engine->pmu.enable &
218b46a33e2STvrtko Ursulin 		    (BIT(I915_SAMPLE_WAIT) | BIT(I915_SAMPLE_SEMA)))) {
219b46a33e2STvrtko Ursulin 			fw = grab_forcewake(dev_priv, fw);
220b46a33e2STvrtko Ursulin 
221b46a33e2STvrtko Ursulin 			val = I915_READ_FW(RING_CTL(engine->mmio_base));
222b46a33e2STvrtko Ursulin 		} else {
223b46a33e2STvrtko Ursulin 			val = 0;
224b46a33e2STvrtko Ursulin 		}
225b46a33e2STvrtko Ursulin 
226b46a33e2STvrtko Ursulin 		update_sample(&engine->pmu.sample[I915_SAMPLE_WAIT],
227b46a33e2STvrtko Ursulin 			      PERIOD, !!(val & RING_WAIT));
228b46a33e2STvrtko Ursulin 
229b46a33e2STvrtko Ursulin 		update_sample(&engine->pmu.sample[I915_SAMPLE_SEMA],
230b46a33e2STvrtko Ursulin 			      PERIOD, !!(val & RING_WAIT_SEMAPHORE));
231b46a33e2STvrtko Ursulin 	}
232b46a33e2STvrtko Ursulin 
233b46a33e2STvrtko Ursulin 	if (fw)
234b46a33e2STvrtko Ursulin 		intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
235b46a33e2STvrtko Ursulin 
236b46a33e2STvrtko Ursulin 	intel_runtime_pm_put(dev_priv);
237b46a33e2STvrtko Ursulin }
238b46a33e2STvrtko Ursulin 
239b46a33e2STvrtko Ursulin static void frequency_sample(struct drm_i915_private *dev_priv)
240b46a33e2STvrtko Ursulin {
241b46a33e2STvrtko Ursulin 	if (dev_priv->pmu.enable &
242b46a33e2STvrtko Ursulin 	    config_enabled_mask(I915_PMU_ACTUAL_FREQUENCY)) {
243b46a33e2STvrtko Ursulin 		u32 val;
244b46a33e2STvrtko Ursulin 
245b46a33e2STvrtko Ursulin 		val = dev_priv->gt_pm.rps.cur_freq;
246b46a33e2STvrtko Ursulin 		if (dev_priv->gt.awake &&
247b46a33e2STvrtko Ursulin 		    intel_runtime_pm_get_if_in_use(dev_priv)) {
248b46a33e2STvrtko Ursulin 			val = intel_get_cagf(dev_priv,
249b46a33e2STvrtko Ursulin 					     I915_READ_NOTRACE(GEN6_RPSTAT1));
250b46a33e2STvrtko Ursulin 			intel_runtime_pm_put(dev_priv);
251b46a33e2STvrtko Ursulin 		}
252b46a33e2STvrtko Ursulin 
253b46a33e2STvrtko Ursulin 		update_sample(&dev_priv->pmu.sample[__I915_SAMPLE_FREQ_ACT],
254b46a33e2STvrtko Ursulin 			      1, intel_gpu_freq(dev_priv, val));
255b46a33e2STvrtko Ursulin 	}
256b46a33e2STvrtko Ursulin 
257b46a33e2STvrtko Ursulin 	if (dev_priv->pmu.enable &
258b46a33e2STvrtko Ursulin 	    config_enabled_mask(I915_PMU_REQUESTED_FREQUENCY)) {
259b46a33e2STvrtko Ursulin 		update_sample(&dev_priv->pmu.sample[__I915_SAMPLE_FREQ_REQ], 1,
260b46a33e2STvrtko Ursulin 			      intel_gpu_freq(dev_priv,
261b46a33e2STvrtko Ursulin 					     dev_priv->gt_pm.rps.cur_freq));
262b46a33e2STvrtko Ursulin 	}
263b46a33e2STvrtko Ursulin }
264b46a33e2STvrtko Ursulin 
265b46a33e2STvrtko Ursulin static enum hrtimer_restart i915_sample(struct hrtimer *hrtimer)
266b46a33e2STvrtko Ursulin {
267b46a33e2STvrtko Ursulin 	struct drm_i915_private *i915 =
268b46a33e2STvrtko Ursulin 		container_of(hrtimer, struct drm_i915_private, pmu.timer);
269b46a33e2STvrtko Ursulin 
270feff0dc6STvrtko Ursulin 	if (!READ_ONCE(i915->pmu.timer_enabled))
271b46a33e2STvrtko Ursulin 		return HRTIMER_NORESTART;
272b46a33e2STvrtko Ursulin 
273b46a33e2STvrtko Ursulin 	engines_sample(i915);
274b46a33e2STvrtko Ursulin 	frequency_sample(i915);
275b46a33e2STvrtko Ursulin 
276b46a33e2STvrtko Ursulin 	hrtimer_forward_now(hrtimer, ns_to_ktime(PERIOD));
277b46a33e2STvrtko Ursulin 	return HRTIMER_RESTART;
278b46a33e2STvrtko Ursulin }
279b46a33e2STvrtko Ursulin 
2800cd4684dSTvrtko Ursulin static u64 count_interrupts(struct drm_i915_private *i915)
2810cd4684dSTvrtko Ursulin {
2820cd4684dSTvrtko Ursulin 	/* open-coded kstat_irqs() */
2830cd4684dSTvrtko Ursulin 	struct irq_desc *desc = irq_to_desc(i915->drm.pdev->irq);
2840cd4684dSTvrtko Ursulin 	u64 sum = 0;
2850cd4684dSTvrtko Ursulin 	int cpu;
2860cd4684dSTvrtko Ursulin 
2870cd4684dSTvrtko Ursulin 	if (!desc || !desc->kstat_irqs)
2880cd4684dSTvrtko Ursulin 		return 0;
2890cd4684dSTvrtko Ursulin 
2900cd4684dSTvrtko Ursulin 	for_each_possible_cpu(cpu)
2910cd4684dSTvrtko Ursulin 		sum += *per_cpu_ptr(desc->kstat_irqs, cpu);
2920cd4684dSTvrtko Ursulin 
2930cd4684dSTvrtko Ursulin 	return sum;
2940cd4684dSTvrtko Ursulin }
2950cd4684dSTvrtko Ursulin 
296b46a33e2STvrtko Ursulin static void i915_pmu_event_destroy(struct perf_event *event)
297b46a33e2STvrtko Ursulin {
298b46a33e2STvrtko Ursulin 	WARN_ON(event->parent);
299b46a33e2STvrtko Ursulin }
300b46a33e2STvrtko Ursulin 
301b46a33e2STvrtko Ursulin static int engine_event_init(struct perf_event *event)
302b46a33e2STvrtko Ursulin {
303b46a33e2STvrtko Ursulin 	struct drm_i915_private *i915 =
304b46a33e2STvrtko Ursulin 		container_of(event->pmu, typeof(*i915), pmu.base);
305b46a33e2STvrtko Ursulin 
306b46a33e2STvrtko Ursulin 	if (!intel_engine_lookup_user(i915, engine_event_class(event),
307b46a33e2STvrtko Ursulin 				      engine_event_instance(event)))
308b46a33e2STvrtko Ursulin 		return -ENODEV;
309b46a33e2STvrtko Ursulin 
310b46a33e2STvrtko Ursulin 	switch (engine_event_sample(event)) {
311b46a33e2STvrtko Ursulin 	case I915_SAMPLE_BUSY:
312b46a33e2STvrtko Ursulin 	case I915_SAMPLE_WAIT:
313b46a33e2STvrtko Ursulin 		break;
314b46a33e2STvrtko Ursulin 	case I915_SAMPLE_SEMA:
315b46a33e2STvrtko Ursulin 		if (INTEL_GEN(i915) < 6)
316b46a33e2STvrtko Ursulin 			return -ENODEV;
317b46a33e2STvrtko Ursulin 		break;
318b46a33e2STvrtko Ursulin 	default:
319b46a33e2STvrtko Ursulin 		return -ENOENT;
320b46a33e2STvrtko Ursulin 	}
321b46a33e2STvrtko Ursulin 
322b46a33e2STvrtko Ursulin 	return 0;
323b46a33e2STvrtko Ursulin }
324b46a33e2STvrtko Ursulin 
325b46a33e2STvrtko Ursulin static int i915_pmu_event_init(struct perf_event *event)
326b46a33e2STvrtko Ursulin {
327b46a33e2STvrtko Ursulin 	struct drm_i915_private *i915 =
328b46a33e2STvrtko Ursulin 		container_of(event->pmu, typeof(*i915), pmu.base);
329b46a33e2STvrtko Ursulin 	int cpu, ret;
330b46a33e2STvrtko Ursulin 
331b46a33e2STvrtko Ursulin 	if (event->attr.type != event->pmu->type)
332b46a33e2STvrtko Ursulin 		return -ENOENT;
333b46a33e2STvrtko Ursulin 
334b46a33e2STvrtko Ursulin 	/* unsupported modes and filters */
335b46a33e2STvrtko Ursulin 	if (event->attr.sample_period) /* no sampling */
336b46a33e2STvrtko Ursulin 		return -EINVAL;
337b46a33e2STvrtko Ursulin 
338b46a33e2STvrtko Ursulin 	if (has_branch_stack(event))
339b46a33e2STvrtko Ursulin 		return -EOPNOTSUPP;
340b46a33e2STvrtko Ursulin 
341b46a33e2STvrtko Ursulin 	if (event->cpu < 0)
342b46a33e2STvrtko Ursulin 		return -EINVAL;
343b46a33e2STvrtko Ursulin 
344b46a33e2STvrtko Ursulin 	cpu = cpumask_any_and(&i915_pmu_cpumask,
345b46a33e2STvrtko Ursulin 			      topology_sibling_cpumask(event->cpu));
346b46a33e2STvrtko Ursulin 	if (cpu >= nr_cpu_ids)
347b46a33e2STvrtko Ursulin 		return -ENODEV;
348b46a33e2STvrtko Ursulin 
349b46a33e2STvrtko Ursulin 	if (is_engine_event(event)) {
350b46a33e2STvrtko Ursulin 		ret = engine_event_init(event);
351b46a33e2STvrtko Ursulin 	} else {
352b46a33e2STvrtko Ursulin 		ret = 0;
353b46a33e2STvrtko Ursulin 		switch (event->attr.config) {
354b46a33e2STvrtko Ursulin 		case I915_PMU_ACTUAL_FREQUENCY:
355b46a33e2STvrtko Ursulin 			if (IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915))
356b46a33e2STvrtko Ursulin 				 /* Requires a mutex for sampling! */
357b46a33e2STvrtko Ursulin 				ret = -ENODEV;
358b46a33e2STvrtko Ursulin 		case I915_PMU_REQUESTED_FREQUENCY:
359b46a33e2STvrtko Ursulin 			if (INTEL_GEN(i915) < 6)
360b46a33e2STvrtko Ursulin 				ret = -ENODEV;
361b46a33e2STvrtko Ursulin 			break;
3620cd4684dSTvrtko Ursulin 		case I915_PMU_INTERRUPTS:
3630cd4684dSTvrtko Ursulin 			break;
364*6060b6aeSTvrtko Ursulin 		case I915_PMU_RC6_RESIDENCY:
365*6060b6aeSTvrtko Ursulin 			if (!HAS_RC6(i915))
366*6060b6aeSTvrtko Ursulin 				ret = -ENODEV;
367*6060b6aeSTvrtko Ursulin 			break;
368*6060b6aeSTvrtko Ursulin 		case I915_PMU_RC6p_RESIDENCY:
369*6060b6aeSTvrtko Ursulin 		case I915_PMU_RC6pp_RESIDENCY:
370*6060b6aeSTvrtko Ursulin 			if (!HAS_RC6p(i915))
371*6060b6aeSTvrtko Ursulin 				ret = -ENODEV;
372*6060b6aeSTvrtko Ursulin 			break;
373b46a33e2STvrtko Ursulin 		default:
374b46a33e2STvrtko Ursulin 			ret = -ENOENT;
375b46a33e2STvrtko Ursulin 			break;
376b46a33e2STvrtko Ursulin 		}
377b46a33e2STvrtko Ursulin 	}
378b46a33e2STvrtko Ursulin 	if (ret)
379b46a33e2STvrtko Ursulin 		return ret;
380b46a33e2STvrtko Ursulin 
381b46a33e2STvrtko Ursulin 	event->cpu = cpu;
382b46a33e2STvrtko Ursulin 	if (!event->parent)
383b46a33e2STvrtko Ursulin 		event->destroy = i915_pmu_event_destroy;
384b46a33e2STvrtko Ursulin 
385b46a33e2STvrtko Ursulin 	return 0;
386b46a33e2STvrtko Ursulin }
387b46a33e2STvrtko Ursulin 
388b46a33e2STvrtko Ursulin static u64 __i915_pmu_event_read(struct perf_event *event)
389b46a33e2STvrtko Ursulin {
390b46a33e2STvrtko Ursulin 	struct drm_i915_private *i915 =
391b46a33e2STvrtko Ursulin 		container_of(event->pmu, typeof(*i915), pmu.base);
392b46a33e2STvrtko Ursulin 	u64 val = 0;
393b46a33e2STvrtko Ursulin 
394b46a33e2STvrtko Ursulin 	if (is_engine_event(event)) {
395b46a33e2STvrtko Ursulin 		u8 sample = engine_event_sample(event);
396b46a33e2STvrtko Ursulin 		struct intel_engine_cs *engine;
397b46a33e2STvrtko Ursulin 
398b46a33e2STvrtko Ursulin 		engine = intel_engine_lookup_user(i915,
399b46a33e2STvrtko Ursulin 						  engine_event_class(event),
400b46a33e2STvrtko Ursulin 						  engine_event_instance(event));
401b46a33e2STvrtko Ursulin 
402b46a33e2STvrtko Ursulin 		if (WARN_ON_ONCE(!engine)) {
403b46a33e2STvrtko Ursulin 			/* Do nothing */
404b3add01eSTvrtko Ursulin 		} else if (sample == I915_SAMPLE_BUSY &&
405b3add01eSTvrtko Ursulin 			   engine->pmu.busy_stats) {
406b3add01eSTvrtko Ursulin 			val = ktime_to_ns(intel_engine_get_busy_time(engine));
407b46a33e2STvrtko Ursulin 		} else {
408b46a33e2STvrtko Ursulin 			val = engine->pmu.sample[sample].cur;
409b46a33e2STvrtko Ursulin 		}
410b46a33e2STvrtko Ursulin 	} else {
411b46a33e2STvrtko Ursulin 		switch (event->attr.config) {
412b46a33e2STvrtko Ursulin 		case I915_PMU_ACTUAL_FREQUENCY:
413b46a33e2STvrtko Ursulin 			val =
414b46a33e2STvrtko Ursulin 			   div_u64(i915->pmu.sample[__I915_SAMPLE_FREQ_ACT].cur,
415b46a33e2STvrtko Ursulin 				   FREQUENCY);
416b46a33e2STvrtko Ursulin 			break;
417b46a33e2STvrtko Ursulin 		case I915_PMU_REQUESTED_FREQUENCY:
418b46a33e2STvrtko Ursulin 			val =
419b46a33e2STvrtko Ursulin 			   div_u64(i915->pmu.sample[__I915_SAMPLE_FREQ_REQ].cur,
420b46a33e2STvrtko Ursulin 				   FREQUENCY);
421b46a33e2STvrtko Ursulin 			break;
4220cd4684dSTvrtko Ursulin 		case I915_PMU_INTERRUPTS:
4230cd4684dSTvrtko Ursulin 			val = count_interrupts(i915);
4240cd4684dSTvrtko Ursulin 			break;
425*6060b6aeSTvrtko Ursulin 		case I915_PMU_RC6_RESIDENCY:
426*6060b6aeSTvrtko Ursulin 			intel_runtime_pm_get(i915);
427*6060b6aeSTvrtko Ursulin 			val = intel_rc6_residency_ns(i915,
428*6060b6aeSTvrtko Ursulin 						     IS_VALLEYVIEW(i915) ?
429*6060b6aeSTvrtko Ursulin 						     VLV_GT_RENDER_RC6 :
430*6060b6aeSTvrtko Ursulin 						     GEN6_GT_GFX_RC6);
431*6060b6aeSTvrtko Ursulin 			intel_runtime_pm_put(i915);
432*6060b6aeSTvrtko Ursulin 			break;
433*6060b6aeSTvrtko Ursulin 		case I915_PMU_RC6p_RESIDENCY:
434*6060b6aeSTvrtko Ursulin 			intel_runtime_pm_get(i915);
435*6060b6aeSTvrtko Ursulin 			val = intel_rc6_residency_ns(i915, GEN6_GT_GFX_RC6p);
436*6060b6aeSTvrtko Ursulin 			intel_runtime_pm_put(i915);
437*6060b6aeSTvrtko Ursulin 			break;
438*6060b6aeSTvrtko Ursulin 		case I915_PMU_RC6pp_RESIDENCY:
439*6060b6aeSTvrtko Ursulin 			intel_runtime_pm_get(i915);
440*6060b6aeSTvrtko Ursulin 			val = intel_rc6_residency_ns(i915, GEN6_GT_GFX_RC6pp);
441*6060b6aeSTvrtko Ursulin 			intel_runtime_pm_put(i915);
442*6060b6aeSTvrtko Ursulin 			break;
443b46a33e2STvrtko Ursulin 		}
444b46a33e2STvrtko Ursulin 	}
445b46a33e2STvrtko Ursulin 
446b46a33e2STvrtko Ursulin 	return val;
447b46a33e2STvrtko Ursulin }
448b46a33e2STvrtko Ursulin 
449b46a33e2STvrtko Ursulin static void i915_pmu_event_read(struct perf_event *event)
450b46a33e2STvrtko Ursulin {
451b46a33e2STvrtko Ursulin 	struct hw_perf_event *hwc = &event->hw;
452b46a33e2STvrtko Ursulin 	u64 prev, new;
453b46a33e2STvrtko Ursulin 
454b46a33e2STvrtko Ursulin again:
455b46a33e2STvrtko Ursulin 	prev = local64_read(&hwc->prev_count);
456b46a33e2STvrtko Ursulin 	new = __i915_pmu_event_read(event);
457b46a33e2STvrtko Ursulin 
458b46a33e2STvrtko Ursulin 	if (local64_cmpxchg(&hwc->prev_count, prev, new) != prev)
459b46a33e2STvrtko Ursulin 		goto again;
460b46a33e2STvrtko Ursulin 
461b46a33e2STvrtko Ursulin 	local64_add(new - prev, &event->count);
462b46a33e2STvrtko Ursulin }
463b46a33e2STvrtko Ursulin 
464b3add01eSTvrtko Ursulin static bool engine_needs_busy_stats(struct intel_engine_cs *engine)
465b3add01eSTvrtko Ursulin {
466b3add01eSTvrtko Ursulin 	return supports_busy_stats(engine->i915) &&
467b3add01eSTvrtko Ursulin 	       (engine->pmu.enable & BIT(I915_SAMPLE_BUSY));
468b3add01eSTvrtko Ursulin }
469b3add01eSTvrtko Ursulin 
470b46a33e2STvrtko Ursulin static void i915_pmu_enable(struct perf_event *event)
471b46a33e2STvrtko Ursulin {
472b46a33e2STvrtko Ursulin 	struct drm_i915_private *i915 =
473b46a33e2STvrtko Ursulin 		container_of(event->pmu, typeof(*i915), pmu.base);
474b46a33e2STvrtko Ursulin 	unsigned int bit = event_enabled_bit(event);
475b46a33e2STvrtko Ursulin 	unsigned long flags;
476b46a33e2STvrtko Ursulin 
477b46a33e2STvrtko Ursulin 	spin_lock_irqsave(&i915->pmu.lock, flags);
478b46a33e2STvrtko Ursulin 
479b46a33e2STvrtko Ursulin 	/*
480b46a33e2STvrtko Ursulin 	 * Update the bitmask of enabled events and increment
481b46a33e2STvrtko Ursulin 	 * the event reference counter.
482b46a33e2STvrtko Ursulin 	 */
483b46a33e2STvrtko Ursulin 	GEM_BUG_ON(bit >= I915_PMU_MASK_BITS);
484b46a33e2STvrtko Ursulin 	GEM_BUG_ON(i915->pmu.enable_count[bit] == ~0);
485b46a33e2STvrtko Ursulin 	i915->pmu.enable |= BIT_ULL(bit);
486b46a33e2STvrtko Ursulin 	i915->pmu.enable_count[bit]++;
487b46a33e2STvrtko Ursulin 
488b46a33e2STvrtko Ursulin 	/*
489feff0dc6STvrtko Ursulin 	 * Start the sampling timer if needed and not already enabled.
490feff0dc6STvrtko Ursulin 	 */
491feff0dc6STvrtko Ursulin 	__i915_pmu_maybe_start_timer(i915);
492feff0dc6STvrtko Ursulin 
493feff0dc6STvrtko Ursulin 	/*
494b46a33e2STvrtko Ursulin 	 * For per-engine events the bitmask and reference counting
495b46a33e2STvrtko Ursulin 	 * is stored per engine.
496b46a33e2STvrtko Ursulin 	 */
497b46a33e2STvrtko Ursulin 	if (is_engine_event(event)) {
498b46a33e2STvrtko Ursulin 		u8 sample = engine_event_sample(event);
499b46a33e2STvrtko Ursulin 		struct intel_engine_cs *engine;
500b46a33e2STvrtko Ursulin 
501b46a33e2STvrtko Ursulin 		engine = intel_engine_lookup_user(i915,
502b46a33e2STvrtko Ursulin 						  engine_event_class(event),
503b46a33e2STvrtko Ursulin 						  engine_event_instance(event));
504b46a33e2STvrtko Ursulin 		GEM_BUG_ON(!engine);
505b46a33e2STvrtko Ursulin 		engine->pmu.enable |= BIT(sample);
506b46a33e2STvrtko Ursulin 
507b46a33e2STvrtko Ursulin 		GEM_BUG_ON(sample >= I915_PMU_SAMPLE_BITS);
508b46a33e2STvrtko Ursulin 		GEM_BUG_ON(engine->pmu.enable_count[sample] == ~0);
509b3add01eSTvrtko Ursulin 		if (engine->pmu.enable_count[sample]++ == 0) {
510b3add01eSTvrtko Ursulin 			/*
511b3add01eSTvrtko Ursulin 			 * Enable engine busy stats tracking if needed or
512b3add01eSTvrtko Ursulin 			 * alternatively cancel the scheduled disable.
513b3add01eSTvrtko Ursulin 			 *
514b3add01eSTvrtko Ursulin 			 * If the delayed disable was pending, cancel it and
515b3add01eSTvrtko Ursulin 			 * in this case do not enable since it already is.
516b3add01eSTvrtko Ursulin 			 */
517b3add01eSTvrtko Ursulin 			if (engine_needs_busy_stats(engine) &&
518b3add01eSTvrtko Ursulin 			    !engine->pmu.busy_stats) {
519b3add01eSTvrtko Ursulin 				engine->pmu.busy_stats = true;
520b3add01eSTvrtko Ursulin 				if (!cancel_delayed_work(&engine->pmu.disable_busy_stats))
521b3add01eSTvrtko Ursulin 					intel_enable_engine_stats(engine);
522b3add01eSTvrtko Ursulin 			}
523b3add01eSTvrtko Ursulin 		}
524b46a33e2STvrtko Ursulin 	}
525b46a33e2STvrtko Ursulin 
526b46a33e2STvrtko Ursulin 	/*
527b46a33e2STvrtko Ursulin 	 * Store the current counter value so we can report the correct delta
528b46a33e2STvrtko Ursulin 	 * for all listeners. Even when the event was already enabled and has
529b46a33e2STvrtko Ursulin 	 * an existing non-zero value.
530b46a33e2STvrtko Ursulin 	 */
531b46a33e2STvrtko Ursulin 	local64_set(&event->hw.prev_count, __i915_pmu_event_read(event));
532b46a33e2STvrtko Ursulin 
533b46a33e2STvrtko Ursulin 	spin_unlock_irqrestore(&i915->pmu.lock, flags);
534b46a33e2STvrtko Ursulin }
535b46a33e2STvrtko Ursulin 
536b3add01eSTvrtko Ursulin static void __disable_busy_stats(struct work_struct *work)
537b3add01eSTvrtko Ursulin {
538b3add01eSTvrtko Ursulin 	struct intel_engine_cs *engine =
539b3add01eSTvrtko Ursulin 	       container_of(work, typeof(*engine), pmu.disable_busy_stats.work);
540b3add01eSTvrtko Ursulin 
541b3add01eSTvrtko Ursulin 	intel_disable_engine_stats(engine);
542b3add01eSTvrtko Ursulin }
543b3add01eSTvrtko Ursulin 
544b46a33e2STvrtko Ursulin static void i915_pmu_disable(struct perf_event *event)
545b46a33e2STvrtko Ursulin {
546b46a33e2STvrtko Ursulin 	struct drm_i915_private *i915 =
547b46a33e2STvrtko Ursulin 		container_of(event->pmu, typeof(*i915), pmu.base);
548b46a33e2STvrtko Ursulin 	unsigned int bit = event_enabled_bit(event);
549b46a33e2STvrtko Ursulin 	unsigned long flags;
550b46a33e2STvrtko Ursulin 
551b46a33e2STvrtko Ursulin 	spin_lock_irqsave(&i915->pmu.lock, flags);
552b46a33e2STvrtko Ursulin 
553b46a33e2STvrtko Ursulin 	if (is_engine_event(event)) {
554b46a33e2STvrtko Ursulin 		u8 sample = engine_event_sample(event);
555b46a33e2STvrtko Ursulin 		struct intel_engine_cs *engine;
556b46a33e2STvrtko Ursulin 
557b46a33e2STvrtko Ursulin 		engine = intel_engine_lookup_user(i915,
558b46a33e2STvrtko Ursulin 						  engine_event_class(event),
559b46a33e2STvrtko Ursulin 						  engine_event_instance(event));
560b46a33e2STvrtko Ursulin 		GEM_BUG_ON(!engine);
561b46a33e2STvrtko Ursulin 		GEM_BUG_ON(sample >= I915_PMU_SAMPLE_BITS);
562b46a33e2STvrtko Ursulin 		GEM_BUG_ON(engine->pmu.enable_count[sample] == 0);
563b46a33e2STvrtko Ursulin 		/*
564b46a33e2STvrtko Ursulin 		 * Decrement the reference count and clear the enabled
565b46a33e2STvrtko Ursulin 		 * bitmask when the last listener on an event goes away.
566b46a33e2STvrtko Ursulin 		 */
567b3add01eSTvrtko Ursulin 		if (--engine->pmu.enable_count[sample] == 0) {
568b46a33e2STvrtko Ursulin 			engine->pmu.enable &= ~BIT(sample);
569b3add01eSTvrtko Ursulin 			if (!engine_needs_busy_stats(engine) &&
570b3add01eSTvrtko Ursulin 			    engine->pmu.busy_stats) {
571b3add01eSTvrtko Ursulin 				engine->pmu.busy_stats = false;
572b3add01eSTvrtko Ursulin 				/*
573b3add01eSTvrtko Ursulin 				 * We request a delayed disable to handle the
574b3add01eSTvrtko Ursulin 				 * rapid on/off cycles on events, which can
575b3add01eSTvrtko Ursulin 				 * happen when tools like perf stat start, in a
576b3add01eSTvrtko Ursulin 				 * nicer way.
577b3add01eSTvrtko Ursulin 				 *
578b3add01eSTvrtko Ursulin 				 * In addition, this also helps with busy stats
579b3add01eSTvrtko Ursulin 				 * accuracy with background CPU offline/online
580b3add01eSTvrtko Ursulin 				 * migration events.
581b3add01eSTvrtko Ursulin 				 */
582b3add01eSTvrtko Ursulin 				queue_delayed_work(system_wq,
583b3add01eSTvrtko Ursulin 						   &engine->pmu.disable_busy_stats,
584b3add01eSTvrtko Ursulin 						   round_jiffies_up_relative(HZ));
585b3add01eSTvrtko Ursulin 			}
586b3add01eSTvrtko Ursulin 		}
587b46a33e2STvrtko Ursulin 	}
588b46a33e2STvrtko Ursulin 
589b46a33e2STvrtko Ursulin 	GEM_BUG_ON(bit >= I915_PMU_MASK_BITS);
590b46a33e2STvrtko Ursulin 	GEM_BUG_ON(i915->pmu.enable_count[bit] == 0);
591b46a33e2STvrtko Ursulin 	/*
592b46a33e2STvrtko Ursulin 	 * Decrement the reference count and clear the enabled
593b46a33e2STvrtko Ursulin 	 * bitmask when the last listener on an event goes away.
594b46a33e2STvrtko Ursulin 	 */
595feff0dc6STvrtko Ursulin 	if (--i915->pmu.enable_count[bit] == 0) {
596b46a33e2STvrtko Ursulin 		i915->pmu.enable &= ~BIT_ULL(bit);
597feff0dc6STvrtko Ursulin 		i915->pmu.timer_enabled &= pmu_needs_timer(i915, true);
598feff0dc6STvrtko Ursulin 	}
599b46a33e2STvrtko Ursulin 
600b46a33e2STvrtko Ursulin 	spin_unlock_irqrestore(&i915->pmu.lock, flags);
601b46a33e2STvrtko Ursulin }
602b46a33e2STvrtko Ursulin 
603b46a33e2STvrtko Ursulin static void i915_pmu_event_start(struct perf_event *event, int flags)
604b46a33e2STvrtko Ursulin {
605b46a33e2STvrtko Ursulin 	i915_pmu_enable(event);
606b46a33e2STvrtko Ursulin 	event->hw.state = 0;
607b46a33e2STvrtko Ursulin }
608b46a33e2STvrtko Ursulin 
609b46a33e2STvrtko Ursulin static void i915_pmu_event_stop(struct perf_event *event, int flags)
610b46a33e2STvrtko Ursulin {
611b46a33e2STvrtko Ursulin 	if (flags & PERF_EF_UPDATE)
612b46a33e2STvrtko Ursulin 		i915_pmu_event_read(event);
613b46a33e2STvrtko Ursulin 	i915_pmu_disable(event);
614b46a33e2STvrtko Ursulin 	event->hw.state = PERF_HES_STOPPED;
615b46a33e2STvrtko Ursulin }
616b46a33e2STvrtko Ursulin 
617b46a33e2STvrtko Ursulin static int i915_pmu_event_add(struct perf_event *event, int flags)
618b46a33e2STvrtko Ursulin {
619b46a33e2STvrtko Ursulin 	if (flags & PERF_EF_START)
620b46a33e2STvrtko Ursulin 		i915_pmu_event_start(event, flags);
621b46a33e2STvrtko Ursulin 
622b46a33e2STvrtko Ursulin 	return 0;
623b46a33e2STvrtko Ursulin }
624b46a33e2STvrtko Ursulin 
625b46a33e2STvrtko Ursulin static void i915_pmu_event_del(struct perf_event *event, int flags)
626b46a33e2STvrtko Ursulin {
627b46a33e2STvrtko Ursulin 	i915_pmu_event_stop(event, PERF_EF_UPDATE);
628b46a33e2STvrtko Ursulin }
629b46a33e2STvrtko Ursulin 
630b46a33e2STvrtko Ursulin static int i915_pmu_event_event_idx(struct perf_event *event)
631b46a33e2STvrtko Ursulin {
632b46a33e2STvrtko Ursulin 	return 0;
633b46a33e2STvrtko Ursulin }
634b46a33e2STvrtko Ursulin 
635b46a33e2STvrtko Ursulin static ssize_t i915_pmu_format_show(struct device *dev,
636b46a33e2STvrtko Ursulin 				    struct device_attribute *attr, char *buf)
637b46a33e2STvrtko Ursulin {
638b46a33e2STvrtko Ursulin 	struct dev_ext_attribute *eattr;
639b46a33e2STvrtko Ursulin 
640b46a33e2STvrtko Ursulin 	eattr = container_of(attr, struct dev_ext_attribute, attr);
641b46a33e2STvrtko Ursulin 	return sprintf(buf, "%s\n", (char *)eattr->var);
642b46a33e2STvrtko Ursulin }
643b46a33e2STvrtko Ursulin 
644b46a33e2STvrtko Ursulin #define I915_PMU_FORMAT_ATTR(_name, _config) \
645b46a33e2STvrtko Ursulin 	(&((struct dev_ext_attribute[]) { \
646b46a33e2STvrtko Ursulin 		{ .attr = __ATTR(_name, 0444, i915_pmu_format_show, NULL), \
647b46a33e2STvrtko Ursulin 		  .var = (void *)_config, } \
648b46a33e2STvrtko Ursulin 	})[0].attr.attr)
649b46a33e2STvrtko Ursulin 
650b46a33e2STvrtko Ursulin static struct attribute *i915_pmu_format_attrs[] = {
651b46a33e2STvrtko Ursulin 	I915_PMU_FORMAT_ATTR(i915_eventid, "config:0-20"),
652b46a33e2STvrtko Ursulin 	NULL,
653b46a33e2STvrtko Ursulin };
654b46a33e2STvrtko Ursulin 
655b46a33e2STvrtko Ursulin static const struct attribute_group i915_pmu_format_attr_group = {
656b46a33e2STvrtko Ursulin 	.name = "format",
657b46a33e2STvrtko Ursulin 	.attrs = i915_pmu_format_attrs,
658b46a33e2STvrtko Ursulin };
659b46a33e2STvrtko Ursulin 
660b46a33e2STvrtko Ursulin static ssize_t i915_pmu_event_show(struct device *dev,
661b46a33e2STvrtko Ursulin 				   struct device_attribute *attr, char *buf)
662b46a33e2STvrtko Ursulin {
663b46a33e2STvrtko Ursulin 	struct dev_ext_attribute *eattr;
664b46a33e2STvrtko Ursulin 
665b46a33e2STvrtko Ursulin 	eattr = container_of(attr, struct dev_ext_attribute, attr);
666b46a33e2STvrtko Ursulin 	return sprintf(buf, "config=0x%lx\n", (unsigned long)eattr->var);
667b46a33e2STvrtko Ursulin }
668b46a33e2STvrtko Ursulin 
669b46a33e2STvrtko Ursulin #define I915_EVENT_ATTR(_name, _config) \
670b46a33e2STvrtko Ursulin 	(&((struct dev_ext_attribute[]) { \
671b46a33e2STvrtko Ursulin 		{ .attr = __ATTR(_name, 0444, i915_pmu_event_show, NULL), \
672b46a33e2STvrtko Ursulin 		  .var = (void *)_config, } \
673b46a33e2STvrtko Ursulin 	})[0].attr.attr)
674b46a33e2STvrtko Ursulin 
675b46a33e2STvrtko Ursulin #define I915_EVENT_STR(_name, _str) \
676b46a33e2STvrtko Ursulin 	(&((struct perf_pmu_events_attr[]) { \
677b46a33e2STvrtko Ursulin 		{ .attr	     = __ATTR(_name, 0444, perf_event_sysfs_show, NULL), \
678b46a33e2STvrtko Ursulin 		  .id	     = 0, \
679b46a33e2STvrtko Ursulin 		  .event_str = _str, } \
680b46a33e2STvrtko Ursulin 	})[0].attr.attr)
681b46a33e2STvrtko Ursulin 
682b46a33e2STvrtko Ursulin #define I915_EVENT(_name, _config, _unit) \
683b46a33e2STvrtko Ursulin 	I915_EVENT_ATTR(_name, _config), \
684b46a33e2STvrtko Ursulin 	I915_EVENT_STR(_name.unit, _unit)
685b46a33e2STvrtko Ursulin 
686b46a33e2STvrtko Ursulin #define I915_ENGINE_EVENT(_name, _class, _instance, _sample) \
687b46a33e2STvrtko Ursulin 	I915_EVENT_ATTR(_name, __I915_PMU_ENGINE(_class, _instance, _sample)), \
688b46a33e2STvrtko Ursulin 	I915_EVENT_STR(_name.unit, "ns")
689b46a33e2STvrtko Ursulin 
690b46a33e2STvrtko Ursulin #define I915_ENGINE_EVENTS(_name, _class, _instance) \
691b46a33e2STvrtko Ursulin 	I915_ENGINE_EVENT(_name##_instance-busy, _class, _instance, I915_SAMPLE_BUSY), \
692b46a33e2STvrtko Ursulin 	I915_ENGINE_EVENT(_name##_instance-sema, _class, _instance, I915_SAMPLE_SEMA), \
693b46a33e2STvrtko Ursulin 	I915_ENGINE_EVENT(_name##_instance-wait, _class, _instance, I915_SAMPLE_WAIT)
694b46a33e2STvrtko Ursulin 
695b46a33e2STvrtko Ursulin static struct attribute *i915_pmu_events_attrs[] = {
696b46a33e2STvrtko Ursulin 	I915_ENGINE_EVENTS(rcs, I915_ENGINE_CLASS_RENDER, 0),
697b46a33e2STvrtko Ursulin 	I915_ENGINE_EVENTS(bcs, I915_ENGINE_CLASS_COPY, 0),
698b46a33e2STvrtko Ursulin 	I915_ENGINE_EVENTS(vcs, I915_ENGINE_CLASS_VIDEO, 0),
699b46a33e2STvrtko Ursulin 	I915_ENGINE_EVENTS(vcs, I915_ENGINE_CLASS_VIDEO, 1),
700b46a33e2STvrtko Ursulin 	I915_ENGINE_EVENTS(vecs, I915_ENGINE_CLASS_VIDEO_ENHANCE, 0),
701b46a33e2STvrtko Ursulin 
702b46a33e2STvrtko Ursulin 	I915_EVENT(actual-frequency,    I915_PMU_ACTUAL_FREQUENCY,    "MHz"),
703b46a33e2STvrtko Ursulin 	I915_EVENT(requested-frequency, I915_PMU_REQUESTED_FREQUENCY, "MHz"),
704b46a33e2STvrtko Ursulin 
7050cd4684dSTvrtko Ursulin 	I915_EVENT_ATTR(interrupts, I915_PMU_INTERRUPTS),
7060cd4684dSTvrtko Ursulin 
707*6060b6aeSTvrtko Ursulin 	I915_EVENT(rc6-residency,   I915_PMU_RC6_RESIDENCY,   "ns"),
708*6060b6aeSTvrtko Ursulin 	I915_EVENT(rc6p-residency,  I915_PMU_RC6p_RESIDENCY,  "ns"),
709*6060b6aeSTvrtko Ursulin 	I915_EVENT(rc6pp-residency, I915_PMU_RC6pp_RESIDENCY, "ns"),
710*6060b6aeSTvrtko Ursulin 
711b46a33e2STvrtko Ursulin 	NULL,
712b46a33e2STvrtko Ursulin };
713b46a33e2STvrtko Ursulin 
714b46a33e2STvrtko Ursulin static const struct attribute_group i915_pmu_events_attr_group = {
715b46a33e2STvrtko Ursulin 	.name = "events",
716b46a33e2STvrtko Ursulin 	.attrs = i915_pmu_events_attrs,
717b46a33e2STvrtko Ursulin };
718b46a33e2STvrtko Ursulin 
719b46a33e2STvrtko Ursulin static ssize_t
720b46a33e2STvrtko Ursulin i915_pmu_get_attr_cpumask(struct device *dev,
721b46a33e2STvrtko Ursulin 			  struct device_attribute *attr,
722b46a33e2STvrtko Ursulin 			  char *buf)
723b46a33e2STvrtko Ursulin {
724b46a33e2STvrtko Ursulin 	return cpumap_print_to_pagebuf(true, buf, &i915_pmu_cpumask);
725b46a33e2STvrtko Ursulin }
726b46a33e2STvrtko Ursulin 
727b46a33e2STvrtko Ursulin static DEVICE_ATTR(cpumask, 0444, i915_pmu_get_attr_cpumask, NULL);
728b46a33e2STvrtko Ursulin 
729b46a33e2STvrtko Ursulin static struct attribute *i915_cpumask_attrs[] = {
730b46a33e2STvrtko Ursulin 	&dev_attr_cpumask.attr,
731b46a33e2STvrtko Ursulin 	NULL,
732b46a33e2STvrtko Ursulin };
733b46a33e2STvrtko Ursulin 
734b46a33e2STvrtko Ursulin static struct attribute_group i915_pmu_cpumask_attr_group = {
735b46a33e2STvrtko Ursulin 	.attrs = i915_cpumask_attrs,
736b46a33e2STvrtko Ursulin };
737b46a33e2STvrtko Ursulin 
738b46a33e2STvrtko Ursulin static const struct attribute_group *i915_pmu_attr_groups[] = {
739b46a33e2STvrtko Ursulin 	&i915_pmu_format_attr_group,
740b46a33e2STvrtko Ursulin 	&i915_pmu_events_attr_group,
741b46a33e2STvrtko Ursulin 	&i915_pmu_cpumask_attr_group,
742b46a33e2STvrtko Ursulin 	NULL
743b46a33e2STvrtko Ursulin };
744b46a33e2STvrtko Ursulin 
745b46a33e2STvrtko Ursulin #ifdef CONFIG_HOTPLUG_CPU
746b46a33e2STvrtko Ursulin static int i915_pmu_cpu_online(unsigned int cpu, struct hlist_node *node)
747b46a33e2STvrtko Ursulin {
748b46a33e2STvrtko Ursulin 	struct i915_pmu *pmu = hlist_entry_safe(node, typeof(*pmu), node);
749b46a33e2STvrtko Ursulin 	unsigned int target;
750b46a33e2STvrtko Ursulin 
751b46a33e2STvrtko Ursulin 	GEM_BUG_ON(!pmu->base.event_init);
752b46a33e2STvrtko Ursulin 
753b46a33e2STvrtko Ursulin 	target = cpumask_any_and(&i915_pmu_cpumask, &i915_pmu_cpumask);
754b46a33e2STvrtko Ursulin 	/* Select the first online CPU as a designated reader. */
755b46a33e2STvrtko Ursulin 	if (target >= nr_cpu_ids)
756b46a33e2STvrtko Ursulin 		cpumask_set_cpu(cpu, &i915_pmu_cpumask);
757b46a33e2STvrtko Ursulin 
758b46a33e2STvrtko Ursulin 	return 0;
759b46a33e2STvrtko Ursulin }
760b46a33e2STvrtko Ursulin 
761b46a33e2STvrtko Ursulin static int i915_pmu_cpu_offline(unsigned int cpu, struct hlist_node *node)
762b46a33e2STvrtko Ursulin {
763b46a33e2STvrtko Ursulin 	struct i915_pmu *pmu = hlist_entry_safe(node, typeof(*pmu), node);
764b46a33e2STvrtko Ursulin 	unsigned int target;
765b46a33e2STvrtko Ursulin 
766b46a33e2STvrtko Ursulin 	GEM_BUG_ON(!pmu->base.event_init);
767b46a33e2STvrtko Ursulin 
768b46a33e2STvrtko Ursulin 	if (cpumask_test_and_clear_cpu(cpu, &i915_pmu_cpumask)) {
769b46a33e2STvrtko Ursulin 		target = cpumask_any_but(topology_sibling_cpumask(cpu), cpu);
770b46a33e2STvrtko Ursulin 		/* Migrate events if there is a valid target */
771b46a33e2STvrtko Ursulin 		if (target < nr_cpu_ids) {
772b46a33e2STvrtko Ursulin 			cpumask_set_cpu(target, &i915_pmu_cpumask);
773b46a33e2STvrtko Ursulin 			perf_pmu_migrate_context(&pmu->base, cpu, target);
774b46a33e2STvrtko Ursulin 		}
775b46a33e2STvrtko Ursulin 	}
776b46a33e2STvrtko Ursulin 
777b46a33e2STvrtko Ursulin 	return 0;
778b46a33e2STvrtko Ursulin }
779b46a33e2STvrtko Ursulin 
780b46a33e2STvrtko Ursulin static enum cpuhp_state cpuhp_slot = CPUHP_INVALID;
781b46a33e2STvrtko Ursulin #endif
782b46a33e2STvrtko Ursulin 
783b46a33e2STvrtko Ursulin static int i915_pmu_register_cpuhp_state(struct drm_i915_private *i915)
784b46a33e2STvrtko Ursulin {
785b46a33e2STvrtko Ursulin #ifdef CONFIG_HOTPLUG_CPU
786b46a33e2STvrtko Ursulin 	enum cpuhp_state slot;
787b46a33e2STvrtko Ursulin 	int ret;
788b46a33e2STvrtko Ursulin 
789b46a33e2STvrtko Ursulin 	ret = cpuhp_setup_state_multi(CPUHP_AP_ONLINE_DYN,
790b46a33e2STvrtko Ursulin 				      "perf/x86/intel/i915:online",
791b46a33e2STvrtko Ursulin 				      i915_pmu_cpu_online,
792b46a33e2STvrtko Ursulin 				      i915_pmu_cpu_offline);
793b46a33e2STvrtko Ursulin 	if (ret < 0)
794b46a33e2STvrtko Ursulin 		return ret;
795b46a33e2STvrtko Ursulin 
796b46a33e2STvrtko Ursulin 	slot = ret;
797b46a33e2STvrtko Ursulin 	ret = cpuhp_state_add_instance(slot, &i915->pmu.node);
798b46a33e2STvrtko Ursulin 	if (ret) {
799b46a33e2STvrtko Ursulin 		cpuhp_remove_multi_state(slot);
800b46a33e2STvrtko Ursulin 		return ret;
801b46a33e2STvrtko Ursulin 	}
802b46a33e2STvrtko Ursulin 
803b46a33e2STvrtko Ursulin 	cpuhp_slot = slot;
804b46a33e2STvrtko Ursulin #endif
805b46a33e2STvrtko Ursulin 	return 0;
806b46a33e2STvrtko Ursulin }
807b46a33e2STvrtko Ursulin 
808b46a33e2STvrtko Ursulin static void i915_pmu_unregister_cpuhp_state(struct drm_i915_private *i915)
809b46a33e2STvrtko Ursulin {
810b46a33e2STvrtko Ursulin #ifdef CONFIG_HOTPLUG_CPU
811b46a33e2STvrtko Ursulin 	WARN_ON(cpuhp_slot == CPUHP_INVALID);
812b46a33e2STvrtko Ursulin 	WARN_ON(cpuhp_state_remove_instance(cpuhp_slot, &i915->pmu.node));
813b46a33e2STvrtko Ursulin 	cpuhp_remove_multi_state(cpuhp_slot);
814b46a33e2STvrtko Ursulin #endif
815b46a33e2STvrtko Ursulin }
816b46a33e2STvrtko Ursulin 
817b46a33e2STvrtko Ursulin void i915_pmu_register(struct drm_i915_private *i915)
818b46a33e2STvrtko Ursulin {
819b3add01eSTvrtko Ursulin 	struct intel_engine_cs *engine;
820b3add01eSTvrtko Ursulin 	enum intel_engine_id id;
821b46a33e2STvrtko Ursulin 	int ret;
822b46a33e2STvrtko Ursulin 
823b46a33e2STvrtko Ursulin 	if (INTEL_GEN(i915) <= 2) {
824b46a33e2STvrtko Ursulin 		DRM_INFO("PMU not supported for this GPU.");
825b46a33e2STvrtko Ursulin 		return;
826b46a33e2STvrtko Ursulin 	}
827b46a33e2STvrtko Ursulin 
828b46a33e2STvrtko Ursulin 	i915->pmu.base.attr_groups	= i915_pmu_attr_groups;
829b46a33e2STvrtko Ursulin 	i915->pmu.base.task_ctx_nr	= perf_invalid_context;
830b46a33e2STvrtko Ursulin 	i915->pmu.base.event_init	= i915_pmu_event_init;
831b46a33e2STvrtko Ursulin 	i915->pmu.base.add		= i915_pmu_event_add;
832b46a33e2STvrtko Ursulin 	i915->pmu.base.del		= i915_pmu_event_del;
833b46a33e2STvrtko Ursulin 	i915->pmu.base.start		= i915_pmu_event_start;
834b46a33e2STvrtko Ursulin 	i915->pmu.base.stop		= i915_pmu_event_stop;
835b46a33e2STvrtko Ursulin 	i915->pmu.base.read		= i915_pmu_event_read;
836b46a33e2STvrtko Ursulin 	i915->pmu.base.event_idx	= i915_pmu_event_event_idx;
837b46a33e2STvrtko Ursulin 
838b46a33e2STvrtko Ursulin 	spin_lock_init(&i915->pmu.lock);
839b46a33e2STvrtko Ursulin 	hrtimer_init(&i915->pmu.timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
840b46a33e2STvrtko Ursulin 	i915->pmu.timer.function = i915_sample;
841b46a33e2STvrtko Ursulin 
842b3add01eSTvrtko Ursulin 	for_each_engine(engine, i915, id)
843b3add01eSTvrtko Ursulin 		INIT_DELAYED_WORK(&engine->pmu.disable_busy_stats,
844b3add01eSTvrtko Ursulin 				  __disable_busy_stats);
845b3add01eSTvrtko Ursulin 
846b46a33e2STvrtko Ursulin 	ret = perf_pmu_register(&i915->pmu.base, "i915", -1);
847b46a33e2STvrtko Ursulin 	if (ret)
848b46a33e2STvrtko Ursulin 		goto err;
849b46a33e2STvrtko Ursulin 
850b46a33e2STvrtko Ursulin 	ret = i915_pmu_register_cpuhp_state(i915);
851b46a33e2STvrtko Ursulin 	if (ret)
852b46a33e2STvrtko Ursulin 		goto err_unreg;
853b46a33e2STvrtko Ursulin 
854b46a33e2STvrtko Ursulin 	return;
855b46a33e2STvrtko Ursulin 
856b46a33e2STvrtko Ursulin err_unreg:
857b46a33e2STvrtko Ursulin 	perf_pmu_unregister(&i915->pmu.base);
858b46a33e2STvrtko Ursulin err:
859b46a33e2STvrtko Ursulin 	i915->pmu.base.event_init = NULL;
860b46a33e2STvrtko Ursulin 	DRM_NOTE("Failed to register PMU! (err=%d)\n", ret);
861b46a33e2STvrtko Ursulin }
862b46a33e2STvrtko Ursulin 
863b46a33e2STvrtko Ursulin void i915_pmu_unregister(struct drm_i915_private *i915)
864b46a33e2STvrtko Ursulin {
865b3add01eSTvrtko Ursulin 	struct intel_engine_cs *engine;
866b3add01eSTvrtko Ursulin 	enum intel_engine_id id;
867b3add01eSTvrtko Ursulin 
868b46a33e2STvrtko Ursulin 	if (!i915->pmu.base.event_init)
869b46a33e2STvrtko Ursulin 		return;
870b46a33e2STvrtko Ursulin 
871b46a33e2STvrtko Ursulin 	WARN_ON(i915->pmu.enable);
872b46a33e2STvrtko Ursulin 
873b46a33e2STvrtko Ursulin 	hrtimer_cancel(&i915->pmu.timer);
874b46a33e2STvrtko Ursulin 
875b3add01eSTvrtko Ursulin 	for_each_engine(engine, i915, id) {
876b3add01eSTvrtko Ursulin 		GEM_BUG_ON(engine->pmu.busy_stats);
877b3add01eSTvrtko Ursulin 		flush_delayed_work(&engine->pmu.disable_busy_stats);
878b3add01eSTvrtko Ursulin 	}
879b3add01eSTvrtko Ursulin 
880b46a33e2STvrtko Ursulin 	i915_pmu_unregister_cpuhp_state(i915);
881b46a33e2STvrtko Ursulin 
882b46a33e2STvrtko Ursulin 	perf_pmu_unregister(&i915->pmu.base);
883b46a33e2STvrtko Ursulin 	i915->pmu.base.event_init = NULL;
884b46a33e2STvrtko Ursulin }
885