xref: /openbmc/linux/drivers/gpu/drm/i915/i915_perf.c (revision 2f828fb2)
1 /*
2  * Copyright © 2015-2016 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  *
23  * Authors:
24  *   Robert Bragg <robert@sixbynine.org>
25  */
26 
27 
28 /**
29  * DOC: i915 Perf Overview
30  *
31  * Gen graphics supports a large number of performance counters that can help
32  * driver and application developers understand and optimize their use of the
33  * GPU.
34  *
35  * This i915 perf interface enables userspace to configure and open a file
36  * descriptor representing a stream of GPU metrics which can then be read() as
37  * a stream of sample records.
38  *
39  * The interface is particularly suited to exposing buffered metrics that are
40  * captured by DMA from the GPU, unsynchronized with and unrelated to the CPU.
41  *
42  * Streams representing a single context are accessible to applications with a
43  * corresponding drm file descriptor, such that OpenGL can use the interface
44  * without special privileges. Access to system-wide metrics requires root
45  * privileges by default, unless changed via the dev.i915.perf_event_paranoid
46  * sysctl option.
47  *
48  */
49 
50 /**
51  * DOC: i915 Perf History and Comparison with Core Perf
52  *
53  * The interface was initially inspired by the core Perf infrastructure but
54  * some notable differences are:
55  *
56  * i915 perf file descriptors represent a "stream" instead of an "event"; where
57  * a perf event primarily corresponds to a single 64bit value, while a stream
58  * might sample sets of tightly-coupled counters, depending on the
59  * configuration.  For example the Gen OA unit isn't designed to support
60  * orthogonal configurations of individual counters; it's configured for a set
61  * of related counters. Samples for an i915 perf stream capturing OA metrics
62  * will include a set of counter values packed in a compact HW specific format.
63  * The OA unit supports a number of different packing formats which can be
64  * selected by the user opening the stream. Perf has support for grouping
65  * events, but each event in the group is configured, validated and
66  * authenticated individually with separate system calls.
67  *
68  * i915 perf stream configurations are provided as an array of u64 (key,value)
69  * pairs, instead of a fixed struct with multiple miscellaneous config members,
70  * interleaved with event-type specific members.
71  *
72  * i915 perf doesn't support exposing metrics via an mmap'd circular buffer.
73  * The supported metrics are being written to memory by the GPU unsynchronized
74  * with the CPU, using HW specific packing formats for counter sets. Sometimes
75  * the constraints on HW configuration require reports to be filtered before it
76  * would be acceptable to expose them to unprivileged applications - to hide
77  * the metrics of other processes/contexts. For these use cases a read() based
78  * interface is a good fit, and provides an opportunity to filter data as it
79  * gets copied from the GPU mapped buffers to userspace buffers.
80  *
81  *
82  * Issues hit with first prototype based on Core Perf
83  * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
84  *
85  * The first prototype of this driver was based on the core perf
86  * infrastructure, and while we did make that mostly work, with some changes to
87  * perf, we found we were breaking or working around too many assumptions baked
88  * into perf's currently cpu centric design.
89  *
90  * In the end we didn't see a clear benefit to making perf's implementation and
91  * interface more complex by changing design assumptions while we knew we still
92  * wouldn't be able to use any existing perf based userspace tools.
93  *
94  * Also considering the Gen specific nature of the Observability hardware and
95  * how userspace will sometimes need to combine i915 perf OA metrics with
96  * side-band OA data captured via MI_REPORT_PERF_COUNT commands; we're
97  * expecting the interface to be used by a platform specific userspace such as
98  * OpenGL or tools. This is to say; we aren't inherently missing out on having
99  * a standard vendor/architecture agnostic interface by not using perf.
100  *
101  *
102  * For posterity, in case we might re-visit trying to adapt core perf to be
103  * better suited to exposing i915 metrics these were the main pain points we
104  * hit:
105  *
106  * - The perf based OA PMU driver broke some significant design assumptions:
107  *
108  *   Existing perf pmus are used for profiling work on a cpu and we were
109  *   introducing the idea of _IS_DEVICE pmus with different security
110  *   implications, the need to fake cpu-related data (such as user/kernel
111  *   registers) to fit with perf's current design, and adding _DEVICE records
112  *   as a way to forward device-specific status records.
113  *
114  *   The OA unit writes reports of counters into a circular buffer, without
115  *   involvement from the CPU, making our PMU driver the first of a kind.
116  *
117  *   Given the way we were periodically forward data from the GPU-mapped, OA
118  *   buffer to perf's buffer, those bursts of sample writes looked to perf like
119  *   we were sampling too fast and so we had to subvert its throttling checks.
120  *
121  *   Perf supports groups of counters and allows those to be read via
122  *   transactions internally but transactions currently seem designed to be
123  *   explicitly initiated from the cpu (say in response to a userspace read())
124  *   and while we could pull a report out of the OA buffer we can't
125  *   trigger a report from the cpu on demand.
126  *
127  *   Related to being report based; the OA counters are configured in HW as a
128  *   set while perf generally expects counter configurations to be orthogonal.
129  *   Although counters can be associated with a group leader as they are
130  *   opened, there's no clear precedent for being able to provide group-wide
131  *   configuration attributes (for example we want to let userspace choose the
132  *   OA unit report format used to capture all counters in a set, or specify a
133  *   GPU context to filter metrics on). We avoided using perf's grouping
134  *   feature and forwarded OA reports to userspace via perf's 'raw' sample
135  *   field. This suited our userspace well considering how coupled the counters
136  *   are when dealing with normalizing. It would be inconvenient to split
137  *   counters up into separate events, only to require userspace to recombine
138  *   them. For Mesa it's also convenient to be forwarded raw, periodic reports
139  *   for combining with the side-band raw reports it captures using
140  *   MI_REPORT_PERF_COUNT commands.
141  *
142  *   - As a side note on perf's grouping feature; there was also some concern
143  *     that using PERF_FORMAT_GROUP as a way to pack together counter values
144  *     would quite drastically inflate our sample sizes, which would likely
145  *     lower the effective sampling resolutions we could use when the available
146  *     memory bandwidth is limited.
147  *
148  *     With the OA unit's report formats, counters are packed together as 32
149  *     or 40bit values, with the largest report size being 256 bytes.
150  *
151  *     PERF_FORMAT_GROUP values are 64bit, but there doesn't appear to be a
152  *     documented ordering to the values, implying PERF_FORMAT_ID must also be
153  *     used to add a 64bit ID before each value; giving 16 bytes per counter.
154  *
155  *   Related to counter orthogonality; we can't time share the OA unit, while
156  *   event scheduling is a central design idea within perf for allowing
157  *   userspace to open + enable more events than can be configured in HW at any
158  *   one time.  The OA unit is not designed to allow re-configuration while in
159  *   use. We can't reconfigure the OA unit without losing internal OA unit
160  *   state which we can't access explicitly to save and restore. Reconfiguring
161  *   the OA unit is also relatively slow, involving ~100 register writes. From
162  *   userspace Mesa also depends on a stable OA configuration when emitting
163  *   MI_REPORT_PERF_COUNT commands and importantly the OA unit can't be
164  *   disabled while there are outstanding MI_RPC commands lest we hang the
165  *   command streamer.
166  *
167  *   The contents of sample records aren't extensible by device drivers (i.e.
168  *   the sample_type bits). As an example; Sourab Gupta had been looking to
169  *   attach GPU timestamps to our OA samples. We were shoehorning OA reports
170  *   into sample records by using the 'raw' field, but it's tricky to pack more
171  *   than one thing into this field because events/core.c currently only lets a
172  *   pmu give a single raw data pointer plus len which will be copied into the
173  *   ring buffer. To include more than the OA report we'd have to copy the
174  *   report into an intermediate larger buffer. I'd been considering allowing a
175  *   vector of data+len values to be specified for copying the raw data, but
176  *   it felt like a kludge to being using the raw field for this purpose.
177  *
178  * - It felt like our perf based PMU was making some technical compromises
179  *   just for the sake of using perf:
180  *
181  *   perf_event_open() requires events to either relate to a pid or a specific
182  *   cpu core, while our device pmu related to neither.  Events opened with a
183  *   pid will be automatically enabled/disabled according to the scheduling of
184  *   that process - so not appropriate for us. When an event is related to a
185  *   cpu id, perf ensures pmu methods will be invoked via an inter process
186  *   interrupt on that core. To avoid invasive changes our userspace opened OA
187  *   perf events for a specific cpu. This was workable but it meant the
188  *   majority of the OA driver ran in atomic context, including all OA report
189  *   forwarding, which wasn't really necessary in our case and seems to make
190  *   our locking requirements somewhat complex as we handled the interaction
191  *   with the rest of the i915 driver.
192  */
193 
194 #include <linux/anon_inodes.h>
195 #include <linux/sizes.h>
196 #include <linux/uuid.h>
197 
198 #include "i915_drv.h"
199 #include "i915_oa_hsw.h"
200 #include "i915_oa_bdw.h"
201 #include "i915_oa_chv.h"
202 #include "i915_oa_sklgt2.h"
203 #include "i915_oa_sklgt3.h"
204 #include "i915_oa_sklgt4.h"
205 #include "i915_oa_bxt.h"
206 #include "i915_oa_kblgt2.h"
207 #include "i915_oa_kblgt3.h"
208 #include "i915_oa_glk.h"
209 #include "i915_oa_cflgt2.h"
210 
211 /* HW requires this to be a power of two, between 128k and 16M, though driver
212  * is currently generally designed assuming the largest 16M size is used such
213  * that the overflow cases are unlikely in normal operation.
214  */
215 #define OA_BUFFER_SIZE		SZ_16M
216 
217 #define OA_TAKEN(tail, head)	((tail - head) & (OA_BUFFER_SIZE - 1))
218 
219 /**
220  * DOC: OA Tail Pointer Race
221  *
222  * There's a HW race condition between OA unit tail pointer register updates and
223  * writes to memory whereby the tail pointer can sometimes get ahead of what's
224  * been written out to the OA buffer so far (in terms of what's visible to the
225  * CPU).
226  *
227  * Although this can be observed explicitly while copying reports to userspace
228  * by checking for a zeroed report-id field in tail reports, we want to account
229  * for this earlier, as part of the oa_buffer_check to avoid lots of redundant
230  * read() attempts.
231  *
232  * In effect we define a tail pointer for reading that lags the real tail
233  * pointer by at least %OA_TAIL_MARGIN_NSEC nanoseconds, which gives enough
234  * time for the corresponding reports to become visible to the CPU.
235  *
236  * To manage this we actually track two tail pointers:
237  *  1) An 'aging' tail with an associated timestamp that is tracked until we
238  *     can trust the corresponding data is visible to the CPU; at which point
239  *     it is considered 'aged'.
240  *  2) An 'aged' tail that can be used for read()ing.
241  *
242  * The two separate pointers let us decouple read()s from tail pointer aging.
243  *
244  * The tail pointers are checked and updated at a limited rate within a hrtimer
245  * callback (the same callback that is used for delivering POLLIN events)
246  *
247  * Initially the tails are marked invalid with %INVALID_TAIL_PTR which
248  * indicates that an updated tail pointer is needed.
249  *
250  * Most of the implementation details for this workaround are in
251  * oa_buffer_check_unlocked() and _append_oa_reports()
252  *
253  * Note for posterity: previously the driver used to define an effective tail
254  * pointer that lagged the real pointer by a 'tail margin' measured in bytes
255  * derived from %OA_TAIL_MARGIN_NSEC and the configured sampling frequency.
256  * This was flawed considering that the OA unit may also automatically generate
257  * non-periodic reports (such as on context switch) or the OA unit may be
258  * enabled without any periodic sampling.
259  */
260 #define OA_TAIL_MARGIN_NSEC	100000ULL
261 #define INVALID_TAIL_PTR	0xffffffff
262 
263 /* frequency for checking whether the OA unit has written new reports to the
264  * circular OA buffer...
265  */
266 #define POLL_FREQUENCY 200
267 #define POLL_PERIOD (NSEC_PER_SEC / POLL_FREQUENCY)
268 
269 /* for sysctl proc_dointvec_minmax of dev.i915.perf_stream_paranoid */
270 static int zero;
271 static int one = 1;
272 static u32 i915_perf_stream_paranoid = true;
273 
274 /* The maximum exponent the hardware accepts is 63 (essentially it selects one
275  * of the 64bit timestamp bits to trigger reports from) but there's currently
276  * no known use case for sampling as infrequently as once per 47 thousand years.
277  *
278  * Since the timestamps included in OA reports are only 32bits it seems
279  * reasonable to limit the OA exponent where it's still possible to account for
280  * overflow in OA report timestamps.
281  */
282 #define OA_EXPONENT_MAX 31
283 
284 #define INVALID_CTX_ID 0xffffffff
285 
286 /* On Gen8+ automatically triggered OA reports include a 'reason' field... */
287 #define OAREPORT_REASON_MASK           0x3f
288 #define OAREPORT_REASON_SHIFT          19
289 #define OAREPORT_REASON_TIMER          (1<<0)
290 #define OAREPORT_REASON_CTX_SWITCH     (1<<3)
291 #define OAREPORT_REASON_CLK_RATIO      (1<<5)
292 
293 
294 /* For sysctl proc_dointvec_minmax of i915_oa_max_sample_rate
295  *
296  * The highest sampling frequency we can theoretically program the OA unit
297  * with is always half the timestamp frequency: E.g. 6.25Mhz for Haswell.
298  *
299  * Initialized just before we register the sysctl parameter.
300  */
301 static int oa_sample_rate_hard_limit;
302 
303 /* Theoretically we can program the OA unit to sample every 160ns but don't
304  * allow that by default unless root...
305  *
306  * The default threshold of 100000Hz is based on perf's similar
307  * kernel.perf_event_max_sample_rate sysctl parameter.
308  */
309 static u32 i915_oa_max_sample_rate = 100000;
310 
311 /* XXX: beware if future OA HW adds new report formats that the current
312  * code assumes all reports have a power-of-two size and ~(size - 1) can
313  * be used as a mask to align the OA tail pointer.
314  */
315 static struct i915_oa_format hsw_oa_formats[I915_OA_FORMAT_MAX] = {
316 	[I915_OA_FORMAT_A13]	    = { 0, 64 },
317 	[I915_OA_FORMAT_A29]	    = { 1, 128 },
318 	[I915_OA_FORMAT_A13_B8_C8]  = { 2, 128 },
319 	/* A29_B8_C8 Disallowed as 192 bytes doesn't factor into buffer size */
320 	[I915_OA_FORMAT_B4_C8]	    = { 4, 64 },
321 	[I915_OA_FORMAT_A45_B8_C8]  = { 5, 256 },
322 	[I915_OA_FORMAT_B4_C8_A16]  = { 6, 128 },
323 	[I915_OA_FORMAT_C4_B8]	    = { 7, 64 },
324 };
325 
326 static struct i915_oa_format gen8_plus_oa_formats[I915_OA_FORMAT_MAX] = {
327 	[I915_OA_FORMAT_A12]		    = { 0, 64 },
328 	[I915_OA_FORMAT_A12_B8_C8]	    = { 2, 128 },
329 	[I915_OA_FORMAT_A32u40_A4u32_B8_C8] = { 5, 256 },
330 	[I915_OA_FORMAT_C4_B8]		    = { 7, 64 },
331 };
332 
333 #define SAMPLE_OA_REPORT      (1<<0)
334 
335 /**
336  * struct perf_open_properties - for validated properties given to open a stream
337  * @sample_flags: `DRM_I915_PERF_PROP_SAMPLE_*` properties are tracked as flags
338  * @single_context: Whether a single or all gpu contexts should be monitored
339  * @ctx_handle: A gem ctx handle for use with @single_context
340  * @metrics_set: An ID for an OA unit metric set advertised via sysfs
341  * @oa_format: An OA unit HW report format
342  * @oa_periodic: Whether to enable periodic OA unit sampling
343  * @oa_period_exponent: The OA unit sampling period is derived from this
344  *
345  * As read_properties_unlocked() enumerates and validates the properties given
346  * to open a stream of metrics the configuration is built up in the structure
347  * which starts out zero initialized.
348  */
349 struct perf_open_properties {
350 	u32 sample_flags;
351 
352 	u64 single_context:1;
353 	u64 ctx_handle;
354 
355 	/* OA sampling state */
356 	int metrics_set;
357 	int oa_format;
358 	bool oa_periodic;
359 	int oa_period_exponent;
360 };
361 
362 static void free_oa_config(struct drm_i915_private *dev_priv,
363 			   struct i915_oa_config *oa_config)
364 {
365 	if (!PTR_ERR(oa_config->flex_regs))
366 		kfree(oa_config->flex_regs);
367 	if (!PTR_ERR(oa_config->b_counter_regs))
368 		kfree(oa_config->b_counter_regs);
369 	if (!PTR_ERR(oa_config->mux_regs))
370 		kfree(oa_config->mux_regs);
371 	kfree(oa_config);
372 }
373 
374 static void put_oa_config(struct drm_i915_private *dev_priv,
375 			  struct i915_oa_config *oa_config)
376 {
377 	if (!atomic_dec_and_test(&oa_config->ref_count))
378 		return;
379 
380 	free_oa_config(dev_priv, oa_config);
381 }
382 
383 static int get_oa_config(struct drm_i915_private *dev_priv,
384 			 int metrics_set,
385 			 struct i915_oa_config **out_config)
386 {
387 	int ret;
388 
389 	if (metrics_set == 1) {
390 		*out_config = &dev_priv->perf.oa.test_config;
391 		atomic_inc(&dev_priv->perf.oa.test_config.ref_count);
392 		return 0;
393 	}
394 
395 	ret = mutex_lock_interruptible(&dev_priv->perf.metrics_lock);
396 	if (ret)
397 		return ret;
398 
399 	*out_config = idr_find(&dev_priv->perf.metrics_idr, metrics_set);
400 	if (!*out_config)
401 		ret = -EINVAL;
402 	else
403 		atomic_inc(&(*out_config)->ref_count);
404 
405 	mutex_unlock(&dev_priv->perf.metrics_lock);
406 
407 	return ret;
408 }
409 
410 static u32 gen8_oa_hw_tail_read(struct drm_i915_private *dev_priv)
411 {
412 	return I915_READ(GEN8_OATAILPTR) & GEN8_OATAILPTR_MASK;
413 }
414 
415 static u32 gen7_oa_hw_tail_read(struct drm_i915_private *dev_priv)
416 {
417 	u32 oastatus1 = I915_READ(GEN7_OASTATUS1);
418 
419 	return oastatus1 & GEN7_OASTATUS1_TAIL_MASK;
420 }
421 
422 /**
423  * oa_buffer_check_unlocked - check for data and update tail ptr state
424  * @dev_priv: i915 device instance
425  *
426  * This is either called via fops (for blocking reads in user ctx) or the poll
427  * check hrtimer (atomic ctx) to check the OA buffer tail pointer and check
428  * if there is data available for userspace to read.
429  *
430  * This function is central to providing a workaround for the OA unit tail
431  * pointer having a race with respect to what data is visible to the CPU.
432  * It is responsible for reading tail pointers from the hardware and giving
433  * the pointers time to 'age' before they are made available for reading.
434  * (See description of OA_TAIL_MARGIN_NSEC above for further details.)
435  *
436  * Besides returning true when there is data available to read() this function
437  * also has the side effect of updating the oa_buffer.tails[], .aging_timestamp
438  * and .aged_tail_idx state used for reading.
439  *
440  * Note: It's safe to read OA config state here unlocked, assuming that this is
441  * only called while the stream is enabled, while the global OA configuration
442  * can't be modified.
443  *
444  * Returns: %true if the OA buffer contains data, else %false
445  */
446 static bool oa_buffer_check_unlocked(struct drm_i915_private *dev_priv)
447 {
448 	int report_size = dev_priv->perf.oa.oa_buffer.format_size;
449 	unsigned long flags;
450 	unsigned int aged_idx;
451 	u32 head, hw_tail, aged_tail, aging_tail;
452 	u64 now;
453 
454 	/* We have to consider the (unlikely) possibility that read() errors
455 	 * could result in an OA buffer reset which might reset the head,
456 	 * tails[] and aged_tail state.
457 	 */
458 	spin_lock_irqsave(&dev_priv->perf.oa.oa_buffer.ptr_lock, flags);
459 
460 	/* NB: The head we observe here might effectively be a little out of
461 	 * date (between head and tails[aged_idx].offset if there is currently
462 	 * a read() in progress.
463 	 */
464 	head = dev_priv->perf.oa.oa_buffer.head;
465 
466 	aged_idx = dev_priv->perf.oa.oa_buffer.aged_tail_idx;
467 	aged_tail = dev_priv->perf.oa.oa_buffer.tails[aged_idx].offset;
468 	aging_tail = dev_priv->perf.oa.oa_buffer.tails[!aged_idx].offset;
469 
470 	hw_tail = dev_priv->perf.oa.ops.oa_hw_tail_read(dev_priv);
471 
472 	/* The tail pointer increases in 64 byte increments,
473 	 * not in report_size steps...
474 	 */
475 	hw_tail &= ~(report_size - 1);
476 
477 	now = ktime_get_mono_fast_ns();
478 
479 	/* Update the aged tail
480 	 *
481 	 * Flip the tail pointer available for read()s once the aging tail is
482 	 * old enough to trust that the corresponding data will be visible to
483 	 * the CPU...
484 	 *
485 	 * Do this before updating the aging pointer in case we may be able to
486 	 * immediately start aging a new pointer too (if new data has become
487 	 * available) without needing to wait for a later hrtimer callback.
488 	 */
489 	if (aging_tail != INVALID_TAIL_PTR &&
490 	    ((now - dev_priv->perf.oa.oa_buffer.aging_timestamp) >
491 	     OA_TAIL_MARGIN_NSEC)) {
492 
493 		aged_idx ^= 1;
494 		dev_priv->perf.oa.oa_buffer.aged_tail_idx = aged_idx;
495 
496 		aged_tail = aging_tail;
497 
498 		/* Mark that we need a new pointer to start aging... */
499 		dev_priv->perf.oa.oa_buffer.tails[!aged_idx].offset = INVALID_TAIL_PTR;
500 		aging_tail = INVALID_TAIL_PTR;
501 	}
502 
503 	/* Update the aging tail
504 	 *
505 	 * We throttle aging tail updates until we have a new tail that
506 	 * represents >= one report more data than is already available for
507 	 * reading. This ensures there will be enough data for a successful
508 	 * read once this new pointer has aged and ensures we will give the new
509 	 * pointer time to age.
510 	 */
511 	if (aging_tail == INVALID_TAIL_PTR &&
512 	    (aged_tail == INVALID_TAIL_PTR ||
513 	     OA_TAKEN(hw_tail, aged_tail) >= report_size)) {
514 		struct i915_vma *vma = dev_priv->perf.oa.oa_buffer.vma;
515 		u32 gtt_offset = i915_ggtt_offset(vma);
516 
517 		/* Be paranoid and do a bounds check on the pointer read back
518 		 * from hardware, just in case some spurious hardware condition
519 		 * could put the tail out of bounds...
520 		 */
521 		if (hw_tail >= gtt_offset &&
522 		    hw_tail < (gtt_offset + OA_BUFFER_SIZE)) {
523 			dev_priv->perf.oa.oa_buffer.tails[!aged_idx].offset =
524 				aging_tail = hw_tail;
525 			dev_priv->perf.oa.oa_buffer.aging_timestamp = now;
526 		} else {
527 			DRM_ERROR("Ignoring spurious out of range OA buffer tail pointer = %u\n",
528 				  hw_tail);
529 		}
530 	}
531 
532 	spin_unlock_irqrestore(&dev_priv->perf.oa.oa_buffer.ptr_lock, flags);
533 
534 	return aged_tail == INVALID_TAIL_PTR ?
535 		false : OA_TAKEN(aged_tail, head) >= report_size;
536 }
537 
538 /**
539  * append_oa_status - Appends a status record to a userspace read() buffer.
540  * @stream: An i915-perf stream opened for OA metrics
541  * @buf: destination buffer given by userspace
542  * @count: the number of bytes userspace wants to read
543  * @offset: (inout): the current position for writing into @buf
544  * @type: The kind of status to report to userspace
545  *
546  * Writes a status record (such as `DRM_I915_PERF_RECORD_OA_REPORT_LOST`)
547  * into the userspace read() buffer.
548  *
549  * The @buf @offset will only be updated on success.
550  *
551  * Returns: 0 on success, negative error code on failure.
552  */
553 static int append_oa_status(struct i915_perf_stream *stream,
554 			    char __user *buf,
555 			    size_t count,
556 			    size_t *offset,
557 			    enum drm_i915_perf_record_type type)
558 {
559 	struct drm_i915_perf_record_header header = { type, 0, sizeof(header) };
560 
561 	if ((count - *offset) < header.size)
562 		return -ENOSPC;
563 
564 	if (copy_to_user(buf + *offset, &header, sizeof(header)))
565 		return -EFAULT;
566 
567 	(*offset) += header.size;
568 
569 	return 0;
570 }
571 
572 /**
573  * append_oa_sample - Copies single OA report into userspace read() buffer.
574  * @stream: An i915-perf stream opened for OA metrics
575  * @buf: destination buffer given by userspace
576  * @count: the number of bytes userspace wants to read
577  * @offset: (inout): the current position for writing into @buf
578  * @report: A single OA report to (optionally) include as part of the sample
579  *
580  * The contents of a sample are configured through `DRM_I915_PERF_PROP_SAMPLE_*`
581  * properties when opening a stream, tracked as `stream->sample_flags`. This
582  * function copies the requested components of a single sample to the given
583  * read() @buf.
584  *
585  * The @buf @offset will only be updated on success.
586  *
587  * Returns: 0 on success, negative error code on failure.
588  */
589 static int append_oa_sample(struct i915_perf_stream *stream,
590 			    char __user *buf,
591 			    size_t count,
592 			    size_t *offset,
593 			    const u8 *report)
594 {
595 	struct drm_i915_private *dev_priv = stream->dev_priv;
596 	int report_size = dev_priv->perf.oa.oa_buffer.format_size;
597 	struct drm_i915_perf_record_header header;
598 	u32 sample_flags = stream->sample_flags;
599 
600 	header.type = DRM_I915_PERF_RECORD_SAMPLE;
601 	header.pad = 0;
602 	header.size = stream->sample_size;
603 
604 	if ((count - *offset) < header.size)
605 		return -ENOSPC;
606 
607 	buf += *offset;
608 	if (copy_to_user(buf, &header, sizeof(header)))
609 		return -EFAULT;
610 	buf += sizeof(header);
611 
612 	if (sample_flags & SAMPLE_OA_REPORT) {
613 		if (copy_to_user(buf, report, report_size))
614 			return -EFAULT;
615 	}
616 
617 	(*offset) += header.size;
618 
619 	return 0;
620 }
621 
622 /**
623  * Copies all buffered OA reports into userspace read() buffer.
624  * @stream: An i915-perf stream opened for OA metrics
625  * @buf: destination buffer given by userspace
626  * @count: the number of bytes userspace wants to read
627  * @offset: (inout): the current position for writing into @buf
628  *
629  * Notably any error condition resulting in a short read (-%ENOSPC or
630  * -%EFAULT) will be returned even though one or more records may
631  * have been successfully copied. In this case it's up to the caller
632  * to decide if the error should be squashed before returning to
633  * userspace.
634  *
635  * Note: reports are consumed from the head, and appended to the
636  * tail, so the tail chases the head?... If you think that's mad
637  * and back-to-front you're not alone, but this follows the
638  * Gen PRM naming convention.
639  *
640  * Returns: 0 on success, negative error code on failure.
641  */
642 static int gen8_append_oa_reports(struct i915_perf_stream *stream,
643 				  char __user *buf,
644 				  size_t count,
645 				  size_t *offset)
646 {
647 	struct drm_i915_private *dev_priv = stream->dev_priv;
648 	int report_size = dev_priv->perf.oa.oa_buffer.format_size;
649 	u8 *oa_buf_base = dev_priv->perf.oa.oa_buffer.vaddr;
650 	u32 gtt_offset = i915_ggtt_offset(dev_priv->perf.oa.oa_buffer.vma);
651 	u32 mask = (OA_BUFFER_SIZE - 1);
652 	size_t start_offset = *offset;
653 	unsigned long flags;
654 	unsigned int aged_tail_idx;
655 	u32 head, tail;
656 	u32 taken;
657 	int ret = 0;
658 
659 	if (WARN_ON(!stream->enabled))
660 		return -EIO;
661 
662 	spin_lock_irqsave(&dev_priv->perf.oa.oa_buffer.ptr_lock, flags);
663 
664 	head = dev_priv->perf.oa.oa_buffer.head;
665 	aged_tail_idx = dev_priv->perf.oa.oa_buffer.aged_tail_idx;
666 	tail = dev_priv->perf.oa.oa_buffer.tails[aged_tail_idx].offset;
667 
668 	spin_unlock_irqrestore(&dev_priv->perf.oa.oa_buffer.ptr_lock, flags);
669 
670 	/*
671 	 * An invalid tail pointer here means we're still waiting for the poll
672 	 * hrtimer callback to give us a pointer
673 	 */
674 	if (tail == INVALID_TAIL_PTR)
675 		return -EAGAIN;
676 
677 	/*
678 	 * NB: oa_buffer.head/tail include the gtt_offset which we don't want
679 	 * while indexing relative to oa_buf_base.
680 	 */
681 	head -= gtt_offset;
682 	tail -= gtt_offset;
683 
684 	/*
685 	 * An out of bounds or misaligned head or tail pointer implies a driver
686 	 * bug since we validate + align the tail pointers we read from the
687 	 * hardware and we are in full control of the head pointer which should
688 	 * only be incremented by multiples of the report size (notably also
689 	 * all a power of two).
690 	 */
691 	if (WARN_ONCE(head > OA_BUFFER_SIZE || head % report_size ||
692 		      tail > OA_BUFFER_SIZE || tail % report_size,
693 		      "Inconsistent OA buffer pointers: head = %u, tail = %u\n",
694 		      head, tail))
695 		return -EIO;
696 
697 
698 	for (/* none */;
699 	     (taken = OA_TAKEN(tail, head));
700 	     head = (head + report_size) & mask) {
701 		u8 *report = oa_buf_base + head;
702 		u32 *report32 = (void *)report;
703 		u32 ctx_id;
704 		u32 reason;
705 
706 		/*
707 		 * All the report sizes factor neatly into the buffer
708 		 * size so we never expect to see a report split
709 		 * between the beginning and end of the buffer.
710 		 *
711 		 * Given the initial alignment check a misalignment
712 		 * here would imply a driver bug that would result
713 		 * in an overrun.
714 		 */
715 		if (WARN_ON((OA_BUFFER_SIZE - head) < report_size)) {
716 			DRM_ERROR("Spurious OA head ptr: non-integral report offset\n");
717 			break;
718 		}
719 
720 		/*
721 		 * The reason field includes flags identifying what
722 		 * triggered this specific report (mostly timer
723 		 * triggered or e.g. due to a context switch).
724 		 *
725 		 * This field is never expected to be zero so we can
726 		 * check that the report isn't invalid before copying
727 		 * it to userspace...
728 		 */
729 		reason = ((report32[0] >> OAREPORT_REASON_SHIFT) &
730 			  OAREPORT_REASON_MASK);
731 		if (reason == 0) {
732 			if (__ratelimit(&dev_priv->perf.oa.spurious_report_rs))
733 				DRM_NOTE("Skipping spurious, invalid OA report\n");
734 			continue;
735 		}
736 
737 		/*
738 		 * XXX: Just keep the lower 21 bits for now since I'm not
739 		 * entirely sure if the HW touches any of the higher bits in
740 		 * this field
741 		 */
742 		ctx_id = report32[2] & 0x1fffff;
743 
744 		/*
745 		 * Squash whatever is in the CTX_ID field if it's marked as
746 		 * invalid to be sure we avoid false-positive, single-context
747 		 * filtering below...
748 		 *
749 		 * Note: that we don't clear the valid_ctx_bit so userspace can
750 		 * understand that the ID has been squashed by the kernel.
751 		 */
752 		if (!(report32[0] & dev_priv->perf.oa.gen8_valid_ctx_bit))
753 			ctx_id = report32[2] = INVALID_CTX_ID;
754 
755 		/*
756 		 * NB: For Gen 8 the OA unit no longer supports clock gating
757 		 * off for a specific context and the kernel can't securely
758 		 * stop the counters from updating as system-wide / global
759 		 * values.
760 		 *
761 		 * Automatic reports now include a context ID so reports can be
762 		 * filtered on the cpu but it's not worth trying to
763 		 * automatically subtract/hide counter progress for other
764 		 * contexts while filtering since we can't stop userspace
765 		 * issuing MI_REPORT_PERF_COUNT commands which would still
766 		 * provide a side-band view of the real values.
767 		 *
768 		 * To allow userspace (such as Mesa/GL_INTEL_performance_query)
769 		 * to normalize counters for a single filtered context then it
770 		 * needs be forwarded bookend context-switch reports so that it
771 		 * can track switches in between MI_REPORT_PERF_COUNT commands
772 		 * and can itself subtract/ignore the progress of counters
773 		 * associated with other contexts. Note that the hardware
774 		 * automatically triggers reports when switching to a new
775 		 * context which are tagged with the ID of the newly active
776 		 * context. To avoid the complexity (and likely fragility) of
777 		 * reading ahead while parsing reports to try and minimize
778 		 * forwarding redundant context switch reports (i.e. between
779 		 * other, unrelated contexts) we simply elect to forward them
780 		 * all.
781 		 *
782 		 * We don't rely solely on the reason field to identify context
783 		 * switches since it's not-uncommon for periodic samples to
784 		 * identify a switch before any 'context switch' report.
785 		 */
786 		if (!dev_priv->perf.oa.exclusive_stream->ctx ||
787 		    dev_priv->perf.oa.specific_ctx_id == ctx_id ||
788 		    (dev_priv->perf.oa.oa_buffer.last_ctx_id ==
789 		     dev_priv->perf.oa.specific_ctx_id) ||
790 		    reason & OAREPORT_REASON_CTX_SWITCH) {
791 
792 			/*
793 			 * While filtering for a single context we avoid
794 			 * leaking the IDs of other contexts.
795 			 */
796 			if (dev_priv->perf.oa.exclusive_stream->ctx &&
797 			    dev_priv->perf.oa.specific_ctx_id != ctx_id) {
798 				report32[2] = INVALID_CTX_ID;
799 			}
800 
801 			ret = append_oa_sample(stream, buf, count, offset,
802 					       report);
803 			if (ret)
804 				break;
805 
806 			dev_priv->perf.oa.oa_buffer.last_ctx_id = ctx_id;
807 		}
808 
809 		/*
810 		 * The above reason field sanity check is based on
811 		 * the assumption that the OA buffer is initially
812 		 * zeroed and we reset the field after copying so the
813 		 * check is still meaningful once old reports start
814 		 * being overwritten.
815 		 */
816 		report32[0] = 0;
817 	}
818 
819 	if (start_offset != *offset) {
820 		spin_lock_irqsave(&dev_priv->perf.oa.oa_buffer.ptr_lock, flags);
821 
822 		/*
823 		 * We removed the gtt_offset for the copy loop above, indexing
824 		 * relative to oa_buf_base so put back here...
825 		 */
826 		head += gtt_offset;
827 
828 		I915_WRITE(GEN8_OAHEADPTR, head & GEN8_OAHEADPTR_MASK);
829 		dev_priv->perf.oa.oa_buffer.head = head;
830 
831 		spin_unlock_irqrestore(&dev_priv->perf.oa.oa_buffer.ptr_lock, flags);
832 	}
833 
834 	return ret;
835 }
836 
837 /**
838  * gen8_oa_read - copy status records then buffered OA reports
839  * @stream: An i915-perf stream opened for OA metrics
840  * @buf: destination buffer given by userspace
841  * @count: the number of bytes userspace wants to read
842  * @offset: (inout): the current position for writing into @buf
843  *
844  * Checks OA unit status registers and if necessary appends corresponding
845  * status records for userspace (such as for a buffer full condition) and then
846  * initiate appending any buffered OA reports.
847  *
848  * Updates @offset according to the number of bytes successfully copied into
849  * the userspace buffer.
850  *
851  * NB: some data may be successfully copied to the userspace buffer
852  * even if an error is returned, and this is reflected in the
853  * updated @offset.
854  *
855  * Returns: zero on success or a negative error code
856  */
857 static int gen8_oa_read(struct i915_perf_stream *stream,
858 			char __user *buf,
859 			size_t count,
860 			size_t *offset)
861 {
862 	struct drm_i915_private *dev_priv = stream->dev_priv;
863 	u32 oastatus;
864 	int ret;
865 
866 	if (WARN_ON(!dev_priv->perf.oa.oa_buffer.vaddr))
867 		return -EIO;
868 
869 	oastatus = I915_READ(GEN8_OASTATUS);
870 
871 	/*
872 	 * We treat OABUFFER_OVERFLOW as a significant error:
873 	 *
874 	 * Although theoretically we could handle this more gracefully
875 	 * sometimes, some Gens don't correctly suppress certain
876 	 * automatically triggered reports in this condition and so we
877 	 * have to assume that old reports are now being trampled
878 	 * over.
879 	 *
880 	 * Considering how we don't currently give userspace control
881 	 * over the OA buffer size and always configure a large 16MB
882 	 * buffer, then a buffer overflow does anyway likely indicate
883 	 * that something has gone quite badly wrong.
884 	 */
885 	if (oastatus & GEN8_OASTATUS_OABUFFER_OVERFLOW) {
886 		ret = append_oa_status(stream, buf, count, offset,
887 				       DRM_I915_PERF_RECORD_OA_BUFFER_LOST);
888 		if (ret)
889 			return ret;
890 
891 		DRM_DEBUG("OA buffer overflow (exponent = %d): force restart\n",
892 			  dev_priv->perf.oa.period_exponent);
893 
894 		dev_priv->perf.oa.ops.oa_disable(dev_priv);
895 		dev_priv->perf.oa.ops.oa_enable(dev_priv);
896 
897 		/*
898 		 * Note: .oa_enable() is expected to re-init the oabuffer and
899 		 * reset GEN8_OASTATUS for us
900 		 */
901 		oastatus = I915_READ(GEN8_OASTATUS);
902 	}
903 
904 	if (oastatus & GEN8_OASTATUS_REPORT_LOST) {
905 		ret = append_oa_status(stream, buf, count, offset,
906 				       DRM_I915_PERF_RECORD_OA_REPORT_LOST);
907 		if (ret)
908 			return ret;
909 		I915_WRITE(GEN8_OASTATUS,
910 			   oastatus & ~GEN8_OASTATUS_REPORT_LOST);
911 	}
912 
913 	return gen8_append_oa_reports(stream, buf, count, offset);
914 }
915 
916 /**
917  * Copies all buffered OA reports into userspace read() buffer.
918  * @stream: An i915-perf stream opened for OA metrics
919  * @buf: destination buffer given by userspace
920  * @count: the number of bytes userspace wants to read
921  * @offset: (inout): the current position for writing into @buf
922  *
923  * Notably any error condition resulting in a short read (-%ENOSPC or
924  * -%EFAULT) will be returned even though one or more records may
925  * have been successfully copied. In this case it's up to the caller
926  * to decide if the error should be squashed before returning to
927  * userspace.
928  *
929  * Note: reports are consumed from the head, and appended to the
930  * tail, so the tail chases the head?... If you think that's mad
931  * and back-to-front you're not alone, but this follows the
932  * Gen PRM naming convention.
933  *
934  * Returns: 0 on success, negative error code on failure.
935  */
936 static int gen7_append_oa_reports(struct i915_perf_stream *stream,
937 				  char __user *buf,
938 				  size_t count,
939 				  size_t *offset)
940 {
941 	struct drm_i915_private *dev_priv = stream->dev_priv;
942 	int report_size = dev_priv->perf.oa.oa_buffer.format_size;
943 	u8 *oa_buf_base = dev_priv->perf.oa.oa_buffer.vaddr;
944 	u32 gtt_offset = i915_ggtt_offset(dev_priv->perf.oa.oa_buffer.vma);
945 	u32 mask = (OA_BUFFER_SIZE - 1);
946 	size_t start_offset = *offset;
947 	unsigned long flags;
948 	unsigned int aged_tail_idx;
949 	u32 head, tail;
950 	u32 taken;
951 	int ret = 0;
952 
953 	if (WARN_ON(!stream->enabled))
954 		return -EIO;
955 
956 	spin_lock_irqsave(&dev_priv->perf.oa.oa_buffer.ptr_lock, flags);
957 
958 	head = dev_priv->perf.oa.oa_buffer.head;
959 	aged_tail_idx = dev_priv->perf.oa.oa_buffer.aged_tail_idx;
960 	tail = dev_priv->perf.oa.oa_buffer.tails[aged_tail_idx].offset;
961 
962 	spin_unlock_irqrestore(&dev_priv->perf.oa.oa_buffer.ptr_lock, flags);
963 
964 	/* An invalid tail pointer here means we're still waiting for the poll
965 	 * hrtimer callback to give us a pointer
966 	 */
967 	if (tail == INVALID_TAIL_PTR)
968 		return -EAGAIN;
969 
970 	/* NB: oa_buffer.head/tail include the gtt_offset which we don't want
971 	 * while indexing relative to oa_buf_base.
972 	 */
973 	head -= gtt_offset;
974 	tail -= gtt_offset;
975 
976 	/* An out of bounds or misaligned head or tail pointer implies a driver
977 	 * bug since we validate + align the tail pointers we read from the
978 	 * hardware and we are in full control of the head pointer which should
979 	 * only be incremented by multiples of the report size (notably also
980 	 * all a power of two).
981 	 */
982 	if (WARN_ONCE(head > OA_BUFFER_SIZE || head % report_size ||
983 		      tail > OA_BUFFER_SIZE || tail % report_size,
984 		      "Inconsistent OA buffer pointers: head = %u, tail = %u\n",
985 		      head, tail))
986 		return -EIO;
987 
988 
989 	for (/* none */;
990 	     (taken = OA_TAKEN(tail, head));
991 	     head = (head + report_size) & mask) {
992 		u8 *report = oa_buf_base + head;
993 		u32 *report32 = (void *)report;
994 
995 		/* All the report sizes factor neatly into the buffer
996 		 * size so we never expect to see a report split
997 		 * between the beginning and end of the buffer.
998 		 *
999 		 * Given the initial alignment check a misalignment
1000 		 * here would imply a driver bug that would result
1001 		 * in an overrun.
1002 		 */
1003 		if (WARN_ON((OA_BUFFER_SIZE - head) < report_size)) {
1004 			DRM_ERROR("Spurious OA head ptr: non-integral report offset\n");
1005 			break;
1006 		}
1007 
1008 		/* The report-ID field for periodic samples includes
1009 		 * some undocumented flags related to what triggered
1010 		 * the report and is never expected to be zero so we
1011 		 * can check that the report isn't invalid before
1012 		 * copying it to userspace...
1013 		 */
1014 		if (report32[0] == 0) {
1015 			if (__ratelimit(&dev_priv->perf.oa.spurious_report_rs))
1016 				DRM_NOTE("Skipping spurious, invalid OA report\n");
1017 			continue;
1018 		}
1019 
1020 		ret = append_oa_sample(stream, buf, count, offset, report);
1021 		if (ret)
1022 			break;
1023 
1024 		/* The above report-id field sanity check is based on
1025 		 * the assumption that the OA buffer is initially
1026 		 * zeroed and we reset the field after copying so the
1027 		 * check is still meaningful once old reports start
1028 		 * being overwritten.
1029 		 */
1030 		report32[0] = 0;
1031 	}
1032 
1033 	if (start_offset != *offset) {
1034 		spin_lock_irqsave(&dev_priv->perf.oa.oa_buffer.ptr_lock, flags);
1035 
1036 		/* We removed the gtt_offset for the copy loop above, indexing
1037 		 * relative to oa_buf_base so put back here...
1038 		 */
1039 		head += gtt_offset;
1040 
1041 		I915_WRITE(GEN7_OASTATUS2,
1042 			   ((head & GEN7_OASTATUS2_HEAD_MASK) |
1043 			    OA_MEM_SELECT_GGTT));
1044 		dev_priv->perf.oa.oa_buffer.head = head;
1045 
1046 		spin_unlock_irqrestore(&dev_priv->perf.oa.oa_buffer.ptr_lock, flags);
1047 	}
1048 
1049 	return ret;
1050 }
1051 
1052 /**
1053  * gen7_oa_read - copy status records then buffered OA reports
1054  * @stream: An i915-perf stream opened for OA metrics
1055  * @buf: destination buffer given by userspace
1056  * @count: the number of bytes userspace wants to read
1057  * @offset: (inout): the current position for writing into @buf
1058  *
1059  * Checks Gen 7 specific OA unit status registers and if necessary appends
1060  * corresponding status records for userspace (such as for a buffer full
1061  * condition) and then initiate appending any buffered OA reports.
1062  *
1063  * Updates @offset according to the number of bytes successfully copied into
1064  * the userspace buffer.
1065  *
1066  * Returns: zero on success or a negative error code
1067  */
1068 static int gen7_oa_read(struct i915_perf_stream *stream,
1069 			char __user *buf,
1070 			size_t count,
1071 			size_t *offset)
1072 {
1073 	struct drm_i915_private *dev_priv = stream->dev_priv;
1074 	u32 oastatus1;
1075 	int ret;
1076 
1077 	if (WARN_ON(!dev_priv->perf.oa.oa_buffer.vaddr))
1078 		return -EIO;
1079 
1080 	oastatus1 = I915_READ(GEN7_OASTATUS1);
1081 
1082 	/* XXX: On Haswell we don't have a safe way to clear oastatus1
1083 	 * bits while the OA unit is enabled (while the tail pointer
1084 	 * may be updated asynchronously) so we ignore status bits
1085 	 * that have already been reported to userspace.
1086 	 */
1087 	oastatus1 &= ~dev_priv->perf.oa.gen7_latched_oastatus1;
1088 
1089 	/* We treat OABUFFER_OVERFLOW as a significant error:
1090 	 *
1091 	 * - The status can be interpreted to mean that the buffer is
1092 	 *   currently full (with a higher precedence than OA_TAKEN()
1093 	 *   which will start to report a near-empty buffer after an
1094 	 *   overflow) but it's awkward that we can't clear the status
1095 	 *   on Haswell, so without a reset we won't be able to catch
1096 	 *   the state again.
1097 	 *
1098 	 * - Since it also implies the HW has started overwriting old
1099 	 *   reports it may also affect our sanity checks for invalid
1100 	 *   reports when copying to userspace that assume new reports
1101 	 *   are being written to cleared memory.
1102 	 *
1103 	 * - In the future we may want to introduce a flight recorder
1104 	 *   mode where the driver will automatically maintain a safe
1105 	 *   guard band between head/tail, avoiding this overflow
1106 	 *   condition, but we avoid the added driver complexity for
1107 	 *   now.
1108 	 */
1109 	if (unlikely(oastatus1 & GEN7_OASTATUS1_OABUFFER_OVERFLOW)) {
1110 		ret = append_oa_status(stream, buf, count, offset,
1111 				       DRM_I915_PERF_RECORD_OA_BUFFER_LOST);
1112 		if (ret)
1113 			return ret;
1114 
1115 		DRM_DEBUG("OA buffer overflow (exponent = %d): force restart\n",
1116 			  dev_priv->perf.oa.period_exponent);
1117 
1118 		dev_priv->perf.oa.ops.oa_disable(dev_priv);
1119 		dev_priv->perf.oa.ops.oa_enable(dev_priv);
1120 
1121 		oastatus1 = I915_READ(GEN7_OASTATUS1);
1122 	}
1123 
1124 	if (unlikely(oastatus1 & GEN7_OASTATUS1_REPORT_LOST)) {
1125 		ret = append_oa_status(stream, buf, count, offset,
1126 				       DRM_I915_PERF_RECORD_OA_REPORT_LOST);
1127 		if (ret)
1128 			return ret;
1129 		dev_priv->perf.oa.gen7_latched_oastatus1 |=
1130 			GEN7_OASTATUS1_REPORT_LOST;
1131 	}
1132 
1133 	return gen7_append_oa_reports(stream, buf, count, offset);
1134 }
1135 
1136 /**
1137  * i915_oa_wait_unlocked - handles blocking IO until OA data available
1138  * @stream: An i915-perf stream opened for OA metrics
1139  *
1140  * Called when userspace tries to read() from a blocking stream FD opened
1141  * for OA metrics. It waits until the hrtimer callback finds a non-empty
1142  * OA buffer and wakes us.
1143  *
1144  * Note: it's acceptable to have this return with some false positives
1145  * since any subsequent read handling will return -EAGAIN if there isn't
1146  * really data ready for userspace yet.
1147  *
1148  * Returns: zero on success or a negative error code
1149  */
1150 static int i915_oa_wait_unlocked(struct i915_perf_stream *stream)
1151 {
1152 	struct drm_i915_private *dev_priv = stream->dev_priv;
1153 
1154 	/* We would wait indefinitely if periodic sampling is not enabled */
1155 	if (!dev_priv->perf.oa.periodic)
1156 		return -EIO;
1157 
1158 	return wait_event_interruptible(dev_priv->perf.oa.poll_wq,
1159 					oa_buffer_check_unlocked(dev_priv));
1160 }
1161 
1162 /**
1163  * i915_oa_poll_wait - call poll_wait() for an OA stream poll()
1164  * @stream: An i915-perf stream opened for OA metrics
1165  * @file: An i915 perf stream file
1166  * @wait: poll() state table
1167  *
1168  * For handling userspace polling on an i915 perf stream opened for OA metrics,
1169  * this starts a poll_wait with the wait queue that our hrtimer callback wakes
1170  * when it sees data ready to read in the circular OA buffer.
1171  */
1172 static void i915_oa_poll_wait(struct i915_perf_stream *stream,
1173 			      struct file *file,
1174 			      poll_table *wait)
1175 {
1176 	struct drm_i915_private *dev_priv = stream->dev_priv;
1177 
1178 	poll_wait(file, &dev_priv->perf.oa.poll_wq, wait);
1179 }
1180 
1181 /**
1182  * i915_oa_read - just calls through to &i915_oa_ops->read
1183  * @stream: An i915-perf stream opened for OA metrics
1184  * @buf: destination buffer given by userspace
1185  * @count: the number of bytes userspace wants to read
1186  * @offset: (inout): the current position for writing into @buf
1187  *
1188  * Updates @offset according to the number of bytes successfully copied into
1189  * the userspace buffer.
1190  *
1191  * Returns: zero on success or a negative error code
1192  */
1193 static int i915_oa_read(struct i915_perf_stream *stream,
1194 			char __user *buf,
1195 			size_t count,
1196 			size_t *offset)
1197 {
1198 	struct drm_i915_private *dev_priv = stream->dev_priv;
1199 
1200 	return dev_priv->perf.oa.ops.read(stream, buf, count, offset);
1201 }
1202 
1203 /**
1204  * oa_get_render_ctx_id - determine and hold ctx hw id
1205  * @stream: An i915-perf stream opened for OA metrics
1206  *
1207  * Determine the render context hw id, and ensure it remains fixed for the
1208  * lifetime of the stream. This ensures that we don't have to worry about
1209  * updating the context ID in OACONTROL on the fly.
1210  *
1211  * Returns: zero on success or a negative error code
1212  */
1213 static int oa_get_render_ctx_id(struct i915_perf_stream *stream)
1214 {
1215 	struct drm_i915_private *dev_priv = stream->dev_priv;
1216 
1217 	if (i915_modparams.enable_execlists)
1218 		dev_priv->perf.oa.specific_ctx_id = stream->ctx->hw_id;
1219 	else {
1220 		struct intel_engine_cs *engine = dev_priv->engine[RCS];
1221 		struct intel_ring *ring;
1222 		int ret;
1223 
1224 		ret = i915_mutex_lock_interruptible(&dev_priv->drm);
1225 		if (ret)
1226 			return ret;
1227 
1228 		/*
1229 		 * As the ID is the gtt offset of the context's vma we
1230 		 * pin the vma to ensure the ID remains fixed.
1231 		 *
1232 		 * NB: implied RCS engine...
1233 		 */
1234 		ring = engine->context_pin(engine, stream->ctx);
1235 		mutex_unlock(&dev_priv->drm.struct_mutex);
1236 		if (IS_ERR(ring))
1237 			return PTR_ERR(ring);
1238 
1239 
1240 		/*
1241 		 * Explicitly track the ID (instead of calling
1242 		 * i915_ggtt_offset() on the fly) considering the difference
1243 		 * with gen8+ and execlists
1244 		 */
1245 		dev_priv->perf.oa.specific_ctx_id =
1246 			i915_ggtt_offset(stream->ctx->engine[engine->id].state);
1247 	}
1248 
1249 	return 0;
1250 }
1251 
1252 /**
1253  * oa_put_render_ctx_id - counterpart to oa_get_render_ctx_id releases hold
1254  * @stream: An i915-perf stream opened for OA metrics
1255  *
1256  * In case anything needed doing to ensure the context HW ID would remain valid
1257  * for the lifetime of the stream, then that can be undone here.
1258  */
1259 static void oa_put_render_ctx_id(struct i915_perf_stream *stream)
1260 {
1261 	struct drm_i915_private *dev_priv = stream->dev_priv;
1262 
1263 	if (i915_modparams.enable_execlists) {
1264 		dev_priv->perf.oa.specific_ctx_id = INVALID_CTX_ID;
1265 	} else {
1266 		struct intel_engine_cs *engine = dev_priv->engine[RCS];
1267 
1268 		mutex_lock(&dev_priv->drm.struct_mutex);
1269 
1270 		dev_priv->perf.oa.specific_ctx_id = INVALID_CTX_ID;
1271 		engine->context_unpin(engine, stream->ctx);
1272 
1273 		mutex_unlock(&dev_priv->drm.struct_mutex);
1274 	}
1275 }
1276 
1277 static void
1278 free_oa_buffer(struct drm_i915_private *i915)
1279 {
1280 	mutex_lock(&i915->drm.struct_mutex);
1281 
1282 	i915_gem_object_unpin_map(i915->perf.oa.oa_buffer.vma->obj);
1283 	i915_vma_unpin(i915->perf.oa.oa_buffer.vma);
1284 	i915_gem_object_put(i915->perf.oa.oa_buffer.vma->obj);
1285 
1286 	i915->perf.oa.oa_buffer.vma = NULL;
1287 	i915->perf.oa.oa_buffer.vaddr = NULL;
1288 
1289 	mutex_unlock(&i915->drm.struct_mutex);
1290 }
1291 
1292 static void i915_oa_stream_destroy(struct i915_perf_stream *stream)
1293 {
1294 	struct drm_i915_private *dev_priv = stream->dev_priv;
1295 
1296 	BUG_ON(stream != dev_priv->perf.oa.exclusive_stream);
1297 
1298 	/*
1299 	 * Unset exclusive_stream first, it will be checked while disabling
1300 	 * the metric set on gen8+.
1301 	 */
1302 	mutex_lock(&dev_priv->drm.struct_mutex);
1303 	dev_priv->perf.oa.exclusive_stream = NULL;
1304 	mutex_unlock(&dev_priv->drm.struct_mutex);
1305 
1306 	dev_priv->perf.oa.ops.disable_metric_set(dev_priv);
1307 
1308 	free_oa_buffer(dev_priv);
1309 
1310 	intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
1311 	intel_runtime_pm_put(dev_priv);
1312 
1313 	if (stream->ctx)
1314 		oa_put_render_ctx_id(stream);
1315 
1316 	put_oa_config(dev_priv, stream->oa_config);
1317 
1318 	if (dev_priv->perf.oa.spurious_report_rs.missed) {
1319 		DRM_NOTE("%d spurious OA report notices suppressed due to ratelimiting\n",
1320 			 dev_priv->perf.oa.spurious_report_rs.missed);
1321 	}
1322 }
1323 
1324 static void gen7_init_oa_buffer(struct drm_i915_private *dev_priv)
1325 {
1326 	u32 gtt_offset = i915_ggtt_offset(dev_priv->perf.oa.oa_buffer.vma);
1327 	unsigned long flags;
1328 
1329 	spin_lock_irqsave(&dev_priv->perf.oa.oa_buffer.ptr_lock, flags);
1330 
1331 	/* Pre-DevBDW: OABUFFER must be set with counters off,
1332 	 * before OASTATUS1, but after OASTATUS2
1333 	 */
1334 	I915_WRITE(GEN7_OASTATUS2, gtt_offset | OA_MEM_SELECT_GGTT); /* head */
1335 	dev_priv->perf.oa.oa_buffer.head = gtt_offset;
1336 
1337 	I915_WRITE(GEN7_OABUFFER, gtt_offset);
1338 
1339 	I915_WRITE(GEN7_OASTATUS1, gtt_offset | OABUFFER_SIZE_16M); /* tail */
1340 
1341 	/* Mark that we need updated tail pointers to read from... */
1342 	dev_priv->perf.oa.oa_buffer.tails[0].offset = INVALID_TAIL_PTR;
1343 	dev_priv->perf.oa.oa_buffer.tails[1].offset = INVALID_TAIL_PTR;
1344 
1345 	spin_unlock_irqrestore(&dev_priv->perf.oa.oa_buffer.ptr_lock, flags);
1346 
1347 	/* On Haswell we have to track which OASTATUS1 flags we've
1348 	 * already seen since they can't be cleared while periodic
1349 	 * sampling is enabled.
1350 	 */
1351 	dev_priv->perf.oa.gen7_latched_oastatus1 = 0;
1352 
1353 	/* NB: although the OA buffer will initially be allocated
1354 	 * zeroed via shmfs (and so this memset is redundant when
1355 	 * first allocating), we may re-init the OA buffer, either
1356 	 * when re-enabling a stream or in error/reset paths.
1357 	 *
1358 	 * The reason we clear the buffer for each re-init is for the
1359 	 * sanity check in gen7_append_oa_reports() that looks at the
1360 	 * report-id field to make sure it's non-zero which relies on
1361 	 * the assumption that new reports are being written to zeroed
1362 	 * memory...
1363 	 */
1364 	memset(dev_priv->perf.oa.oa_buffer.vaddr, 0, OA_BUFFER_SIZE);
1365 
1366 	/* Maybe make ->pollin per-stream state if we support multiple
1367 	 * concurrent streams in the future.
1368 	 */
1369 	dev_priv->perf.oa.pollin = false;
1370 }
1371 
1372 static void gen8_init_oa_buffer(struct drm_i915_private *dev_priv)
1373 {
1374 	u32 gtt_offset = i915_ggtt_offset(dev_priv->perf.oa.oa_buffer.vma);
1375 	unsigned long flags;
1376 
1377 	spin_lock_irqsave(&dev_priv->perf.oa.oa_buffer.ptr_lock, flags);
1378 
1379 	I915_WRITE(GEN8_OASTATUS, 0);
1380 	I915_WRITE(GEN8_OAHEADPTR, gtt_offset);
1381 	dev_priv->perf.oa.oa_buffer.head = gtt_offset;
1382 
1383 	I915_WRITE(GEN8_OABUFFER_UDW, 0);
1384 
1385 	/*
1386 	 * PRM says:
1387 	 *
1388 	 *  "This MMIO must be set before the OATAILPTR
1389 	 *  register and after the OAHEADPTR register. This is
1390 	 *  to enable proper functionality of the overflow
1391 	 *  bit."
1392 	 */
1393 	I915_WRITE(GEN8_OABUFFER, gtt_offset |
1394 		   OABUFFER_SIZE_16M | OA_MEM_SELECT_GGTT);
1395 	I915_WRITE(GEN8_OATAILPTR, gtt_offset & GEN8_OATAILPTR_MASK);
1396 
1397 	/* Mark that we need updated tail pointers to read from... */
1398 	dev_priv->perf.oa.oa_buffer.tails[0].offset = INVALID_TAIL_PTR;
1399 	dev_priv->perf.oa.oa_buffer.tails[1].offset = INVALID_TAIL_PTR;
1400 
1401 	/*
1402 	 * Reset state used to recognise context switches, affecting which
1403 	 * reports we will forward to userspace while filtering for a single
1404 	 * context.
1405 	 */
1406 	dev_priv->perf.oa.oa_buffer.last_ctx_id = INVALID_CTX_ID;
1407 
1408 	spin_unlock_irqrestore(&dev_priv->perf.oa.oa_buffer.ptr_lock, flags);
1409 
1410 	/*
1411 	 * NB: although the OA buffer will initially be allocated
1412 	 * zeroed via shmfs (and so this memset is redundant when
1413 	 * first allocating), we may re-init the OA buffer, either
1414 	 * when re-enabling a stream or in error/reset paths.
1415 	 *
1416 	 * The reason we clear the buffer for each re-init is for the
1417 	 * sanity check in gen8_append_oa_reports() that looks at the
1418 	 * reason field to make sure it's non-zero which relies on
1419 	 * the assumption that new reports are being written to zeroed
1420 	 * memory...
1421 	 */
1422 	memset(dev_priv->perf.oa.oa_buffer.vaddr, 0, OA_BUFFER_SIZE);
1423 
1424 	/*
1425 	 * Maybe make ->pollin per-stream state if we support multiple
1426 	 * concurrent streams in the future.
1427 	 */
1428 	dev_priv->perf.oa.pollin = false;
1429 }
1430 
1431 static int alloc_oa_buffer(struct drm_i915_private *dev_priv)
1432 {
1433 	struct drm_i915_gem_object *bo;
1434 	struct i915_vma *vma;
1435 	int ret;
1436 
1437 	if (WARN_ON(dev_priv->perf.oa.oa_buffer.vma))
1438 		return -ENODEV;
1439 
1440 	ret = i915_mutex_lock_interruptible(&dev_priv->drm);
1441 	if (ret)
1442 		return ret;
1443 
1444 	BUILD_BUG_ON_NOT_POWER_OF_2(OA_BUFFER_SIZE);
1445 	BUILD_BUG_ON(OA_BUFFER_SIZE < SZ_128K || OA_BUFFER_SIZE > SZ_16M);
1446 
1447 	bo = i915_gem_object_create(dev_priv, OA_BUFFER_SIZE);
1448 	if (IS_ERR(bo)) {
1449 		DRM_ERROR("Failed to allocate OA buffer\n");
1450 		ret = PTR_ERR(bo);
1451 		goto unlock;
1452 	}
1453 
1454 	ret = i915_gem_object_set_cache_level(bo, I915_CACHE_LLC);
1455 	if (ret)
1456 		goto err_unref;
1457 
1458 	/* PreHSW required 512K alignment, HSW requires 16M */
1459 	vma = i915_gem_object_ggtt_pin(bo, NULL, 0, SZ_16M, 0);
1460 	if (IS_ERR(vma)) {
1461 		ret = PTR_ERR(vma);
1462 		goto err_unref;
1463 	}
1464 	dev_priv->perf.oa.oa_buffer.vma = vma;
1465 
1466 	dev_priv->perf.oa.oa_buffer.vaddr =
1467 		i915_gem_object_pin_map(bo, I915_MAP_WB);
1468 	if (IS_ERR(dev_priv->perf.oa.oa_buffer.vaddr)) {
1469 		ret = PTR_ERR(dev_priv->perf.oa.oa_buffer.vaddr);
1470 		goto err_unpin;
1471 	}
1472 
1473 	dev_priv->perf.oa.ops.init_oa_buffer(dev_priv);
1474 
1475 	DRM_DEBUG_DRIVER("OA Buffer initialized, gtt offset = 0x%x, vaddr = %p\n",
1476 			 i915_ggtt_offset(dev_priv->perf.oa.oa_buffer.vma),
1477 			 dev_priv->perf.oa.oa_buffer.vaddr);
1478 
1479 	goto unlock;
1480 
1481 err_unpin:
1482 	__i915_vma_unpin(vma);
1483 
1484 err_unref:
1485 	i915_gem_object_put(bo);
1486 
1487 	dev_priv->perf.oa.oa_buffer.vaddr = NULL;
1488 	dev_priv->perf.oa.oa_buffer.vma = NULL;
1489 
1490 unlock:
1491 	mutex_unlock(&dev_priv->drm.struct_mutex);
1492 	return ret;
1493 }
1494 
1495 static void config_oa_regs(struct drm_i915_private *dev_priv,
1496 			   const struct i915_oa_reg *regs,
1497 			   u32 n_regs)
1498 {
1499 	u32 i;
1500 
1501 	for (i = 0; i < n_regs; i++) {
1502 		const struct i915_oa_reg *reg = regs + i;
1503 
1504 		I915_WRITE(reg->addr, reg->value);
1505 	}
1506 }
1507 
1508 static int hsw_enable_metric_set(struct drm_i915_private *dev_priv,
1509 				 const struct i915_oa_config *oa_config)
1510 {
1511 	/* PRM:
1512 	 *
1513 	 * OA unit is using “crclk” for its functionality. When trunk
1514 	 * level clock gating takes place, OA clock would be gated,
1515 	 * unable to count the events from non-render clock domain.
1516 	 * Render clock gating must be disabled when OA is enabled to
1517 	 * count the events from non-render domain. Unit level clock
1518 	 * gating for RCS should also be disabled.
1519 	 */
1520 	I915_WRITE(GEN7_MISCCPCTL, (I915_READ(GEN7_MISCCPCTL) &
1521 				    ~GEN7_DOP_CLOCK_GATE_ENABLE));
1522 	I915_WRITE(GEN6_UCGCTL1, (I915_READ(GEN6_UCGCTL1) |
1523 				  GEN6_CSUNIT_CLOCK_GATE_DISABLE));
1524 
1525 	config_oa_regs(dev_priv, oa_config->mux_regs, oa_config->mux_regs_len);
1526 
1527 	/* It apparently takes a fairly long time for a new MUX
1528 	 * configuration to be be applied after these register writes.
1529 	 * This delay duration was derived empirically based on the
1530 	 * render_basic config but hopefully it covers the maximum
1531 	 * configuration latency.
1532 	 *
1533 	 * As a fallback, the checks in _append_oa_reports() to skip
1534 	 * invalid OA reports do also seem to work to discard reports
1535 	 * generated before this config has completed - albeit not
1536 	 * silently.
1537 	 *
1538 	 * Unfortunately this is essentially a magic number, since we
1539 	 * don't currently know of a reliable mechanism for predicting
1540 	 * how long the MUX config will take to apply and besides
1541 	 * seeing invalid reports we don't know of a reliable way to
1542 	 * explicitly check that the MUX config has landed.
1543 	 *
1544 	 * It's even possible we've miss characterized the underlying
1545 	 * problem - it just seems like the simplest explanation why
1546 	 * a delay at this location would mitigate any invalid reports.
1547 	 */
1548 	usleep_range(15000, 20000);
1549 
1550 	config_oa_regs(dev_priv, oa_config->b_counter_regs,
1551 		       oa_config->b_counter_regs_len);
1552 
1553 	return 0;
1554 }
1555 
1556 static void hsw_disable_metric_set(struct drm_i915_private *dev_priv)
1557 {
1558 	I915_WRITE(GEN6_UCGCTL1, (I915_READ(GEN6_UCGCTL1) &
1559 				  ~GEN6_CSUNIT_CLOCK_GATE_DISABLE));
1560 	I915_WRITE(GEN7_MISCCPCTL, (I915_READ(GEN7_MISCCPCTL) |
1561 				    GEN7_DOP_CLOCK_GATE_ENABLE));
1562 
1563 	I915_WRITE(GDT_CHICKEN_BITS, (I915_READ(GDT_CHICKEN_BITS) &
1564 				      ~GT_NOA_ENABLE));
1565 }
1566 
1567 /*
1568  * NB: It must always remain pointer safe to run this even if the OA unit
1569  * has been disabled.
1570  *
1571  * It's fine to put out-of-date values into these per-context registers
1572  * in the case that the OA unit has been disabled.
1573  */
1574 static void gen8_update_reg_state_unlocked(struct i915_gem_context *ctx,
1575 					   u32 *reg_state,
1576 					   const struct i915_oa_config *oa_config)
1577 {
1578 	struct drm_i915_private *dev_priv = ctx->i915;
1579 	u32 ctx_oactxctrl = dev_priv->perf.oa.ctx_oactxctrl_offset;
1580 	u32 ctx_flexeu0 = dev_priv->perf.oa.ctx_flexeu0_offset;
1581 	/* The MMIO offsets for Flex EU registers aren't contiguous */
1582 	u32 flex_mmio[] = {
1583 		i915_mmio_reg_offset(EU_PERF_CNTL0),
1584 		i915_mmio_reg_offset(EU_PERF_CNTL1),
1585 		i915_mmio_reg_offset(EU_PERF_CNTL2),
1586 		i915_mmio_reg_offset(EU_PERF_CNTL3),
1587 		i915_mmio_reg_offset(EU_PERF_CNTL4),
1588 		i915_mmio_reg_offset(EU_PERF_CNTL5),
1589 		i915_mmio_reg_offset(EU_PERF_CNTL6),
1590 	};
1591 	int i;
1592 
1593 	reg_state[ctx_oactxctrl] = i915_mmio_reg_offset(GEN8_OACTXCONTROL);
1594 	reg_state[ctx_oactxctrl+1] = (dev_priv->perf.oa.period_exponent <<
1595 				      GEN8_OA_TIMER_PERIOD_SHIFT) |
1596 				     (dev_priv->perf.oa.periodic ?
1597 				      GEN8_OA_TIMER_ENABLE : 0) |
1598 				     GEN8_OA_COUNTER_RESUME;
1599 
1600 	for (i = 0; i < ARRAY_SIZE(flex_mmio); i++) {
1601 		u32 state_offset = ctx_flexeu0 + i * 2;
1602 		u32 mmio = flex_mmio[i];
1603 
1604 		/*
1605 		 * This arbitrary default will select the 'EU FPU0 Pipeline
1606 		 * Active' event. In the future it's anticipated that there
1607 		 * will be an explicit 'No Event' we can select, but not yet...
1608 		 */
1609 		u32 value = 0;
1610 
1611 		if (oa_config) {
1612 			u32 j;
1613 
1614 			for (j = 0; j < oa_config->flex_regs_len; j++) {
1615 				if (i915_mmio_reg_offset(oa_config->flex_regs[j].addr) == mmio) {
1616 					value = oa_config->flex_regs[j].value;
1617 					break;
1618 				}
1619 			}
1620 		}
1621 
1622 		reg_state[state_offset] = mmio;
1623 		reg_state[state_offset+1] = value;
1624 	}
1625 }
1626 
1627 /*
1628  * Same as gen8_update_reg_state_unlocked only through the batchbuffer. This
1629  * is only used by the kernel context.
1630  */
1631 static int gen8_emit_oa_config(struct drm_i915_gem_request *req,
1632 			       const struct i915_oa_config *oa_config)
1633 {
1634 	struct drm_i915_private *dev_priv = req->i915;
1635 	/* The MMIO offsets for Flex EU registers aren't contiguous */
1636 	u32 flex_mmio[] = {
1637 		i915_mmio_reg_offset(EU_PERF_CNTL0),
1638 		i915_mmio_reg_offset(EU_PERF_CNTL1),
1639 		i915_mmio_reg_offset(EU_PERF_CNTL2),
1640 		i915_mmio_reg_offset(EU_PERF_CNTL3),
1641 		i915_mmio_reg_offset(EU_PERF_CNTL4),
1642 		i915_mmio_reg_offset(EU_PERF_CNTL5),
1643 		i915_mmio_reg_offset(EU_PERF_CNTL6),
1644 	};
1645 	u32 *cs;
1646 	int i;
1647 
1648 	cs = intel_ring_begin(req, ARRAY_SIZE(flex_mmio) * 2 + 4);
1649 	if (IS_ERR(cs))
1650 		return PTR_ERR(cs);
1651 
1652 	*cs++ = MI_LOAD_REGISTER_IMM(ARRAY_SIZE(flex_mmio) + 1);
1653 
1654 	*cs++ = i915_mmio_reg_offset(GEN8_OACTXCONTROL);
1655 	*cs++ = (dev_priv->perf.oa.period_exponent << GEN8_OA_TIMER_PERIOD_SHIFT) |
1656 		(dev_priv->perf.oa.periodic ? GEN8_OA_TIMER_ENABLE : 0) |
1657 		GEN8_OA_COUNTER_RESUME;
1658 
1659 	for (i = 0; i < ARRAY_SIZE(flex_mmio); i++) {
1660 		u32 mmio = flex_mmio[i];
1661 
1662 		/*
1663 		 * This arbitrary default will select the 'EU FPU0 Pipeline
1664 		 * Active' event. In the future it's anticipated that there
1665 		 * will be an explicit 'No Event' we can select, but not
1666 		 * yet...
1667 		 */
1668 		u32 value = 0;
1669 
1670 		if (oa_config) {
1671 			u32 j;
1672 
1673 			for (j = 0; j < oa_config->flex_regs_len; j++) {
1674 				if (i915_mmio_reg_offset(oa_config->flex_regs[j].addr) == mmio) {
1675 					value = oa_config->flex_regs[j].value;
1676 					break;
1677 				}
1678 			}
1679 		}
1680 
1681 		*cs++ = mmio;
1682 		*cs++ = value;
1683 	}
1684 
1685 	*cs++ = MI_NOOP;
1686 	intel_ring_advance(req, cs);
1687 
1688 	return 0;
1689 }
1690 
1691 static int gen8_switch_to_updated_kernel_context(struct drm_i915_private *dev_priv,
1692 						 const struct i915_oa_config *oa_config)
1693 {
1694 	struct intel_engine_cs *engine = dev_priv->engine[RCS];
1695 	struct i915_gem_timeline *timeline;
1696 	struct drm_i915_gem_request *req;
1697 	int ret;
1698 
1699 	lockdep_assert_held(&dev_priv->drm.struct_mutex);
1700 
1701 	i915_gem_retire_requests(dev_priv);
1702 
1703 	req = i915_gem_request_alloc(engine, dev_priv->kernel_context);
1704 	if (IS_ERR(req))
1705 		return PTR_ERR(req);
1706 
1707 	ret = gen8_emit_oa_config(req, oa_config);
1708 	if (ret) {
1709 		i915_add_request(req);
1710 		return ret;
1711 	}
1712 
1713 	/* Queue this switch after all other activity */
1714 	list_for_each_entry(timeline, &dev_priv->gt.timelines, link) {
1715 		struct drm_i915_gem_request *prev;
1716 		struct intel_timeline *tl;
1717 
1718 		tl = &timeline->engine[engine->id];
1719 		prev = i915_gem_active_raw(&tl->last_request,
1720 					   &dev_priv->drm.struct_mutex);
1721 		if (prev)
1722 			i915_sw_fence_await_sw_fence_gfp(&req->submit,
1723 							 &prev->submit,
1724 							 GFP_KERNEL);
1725 	}
1726 
1727 	ret = i915_switch_context(req);
1728 	i915_add_request(req);
1729 
1730 	return ret;
1731 }
1732 
1733 /*
1734  * Manages updating the per-context aspects of the OA stream
1735  * configuration across all contexts.
1736  *
1737  * The awkward consideration here is that OACTXCONTROL controls the
1738  * exponent for periodic sampling which is primarily used for system
1739  * wide profiling where we'd like a consistent sampling period even in
1740  * the face of context switches.
1741  *
1742  * Our approach of updating the register state context (as opposed to
1743  * say using a workaround batch buffer) ensures that the hardware
1744  * won't automatically reload an out-of-date timer exponent even
1745  * transiently before a WA BB could be parsed.
1746  *
1747  * This function needs to:
1748  * - Ensure the currently running context's per-context OA state is
1749  *   updated
1750  * - Ensure that all existing contexts will have the correct per-context
1751  *   OA state if they are scheduled for use.
1752  * - Ensure any new contexts will be initialized with the correct
1753  *   per-context OA state.
1754  *
1755  * Note: it's only the RCS/Render context that has any OA state.
1756  */
1757 static int gen8_configure_all_contexts(struct drm_i915_private *dev_priv,
1758 				       const struct i915_oa_config *oa_config,
1759 				       bool interruptible)
1760 {
1761 	struct i915_gem_context *ctx;
1762 	int ret;
1763 	unsigned int wait_flags = I915_WAIT_LOCKED;
1764 
1765 	if (interruptible) {
1766 		ret = i915_mutex_lock_interruptible(&dev_priv->drm);
1767 		if (ret)
1768 			return ret;
1769 
1770 		wait_flags |= I915_WAIT_INTERRUPTIBLE;
1771 	} else {
1772 		mutex_lock(&dev_priv->drm.struct_mutex);
1773 	}
1774 
1775 	/* Switch away from any user context. */
1776 	ret = gen8_switch_to_updated_kernel_context(dev_priv, oa_config);
1777 	if (ret)
1778 		goto out;
1779 
1780 	/*
1781 	 * The OA register config is setup through the context image. This image
1782 	 * might be written to by the GPU on context switch (in particular on
1783 	 * lite-restore). This means we can't safely update a context's image,
1784 	 * if this context is scheduled/submitted to run on the GPU.
1785 	 *
1786 	 * We could emit the OA register config through the batch buffer but
1787 	 * this might leave small interval of time where the OA unit is
1788 	 * configured at an invalid sampling period.
1789 	 *
1790 	 * So far the best way to work around this issue seems to be draining
1791 	 * the GPU from any submitted work.
1792 	 */
1793 	ret = i915_gem_wait_for_idle(dev_priv, wait_flags);
1794 	if (ret)
1795 		goto out;
1796 
1797 	/* Update all contexts now that we've stalled the submission. */
1798 	list_for_each_entry(ctx, &dev_priv->contexts.list, link) {
1799 		struct intel_context *ce = &ctx->engine[RCS];
1800 		u32 *regs;
1801 
1802 		/* OA settings will be set upon first use */
1803 		if (!ce->state)
1804 			continue;
1805 
1806 		regs = i915_gem_object_pin_map(ce->state->obj, I915_MAP_WB);
1807 		if (IS_ERR(regs)) {
1808 			ret = PTR_ERR(regs);
1809 			goto out;
1810 		}
1811 
1812 		ce->state->obj->mm.dirty = true;
1813 		regs += LRC_STATE_PN * PAGE_SIZE / sizeof(*regs);
1814 
1815 		gen8_update_reg_state_unlocked(ctx, regs, oa_config);
1816 
1817 		i915_gem_object_unpin_map(ce->state->obj);
1818 	}
1819 
1820  out:
1821 	mutex_unlock(&dev_priv->drm.struct_mutex);
1822 
1823 	return ret;
1824 }
1825 
1826 static int gen8_enable_metric_set(struct drm_i915_private *dev_priv,
1827 				  const struct i915_oa_config *oa_config)
1828 {
1829 	int ret;
1830 
1831 	/*
1832 	 * We disable slice/unslice clock ratio change reports on SKL since
1833 	 * they are too noisy. The HW generates a lot of redundant reports
1834 	 * where the ratio hasn't really changed causing a lot of redundant
1835 	 * work to processes and increasing the chances we'll hit buffer
1836 	 * overruns.
1837 	 *
1838 	 * Although we don't currently use the 'disable overrun' OABUFFER
1839 	 * feature it's worth noting that clock ratio reports have to be
1840 	 * disabled before considering to use that feature since the HW doesn't
1841 	 * correctly block these reports.
1842 	 *
1843 	 * Currently none of the high-level metrics we have depend on knowing
1844 	 * this ratio to normalize.
1845 	 *
1846 	 * Note: This register is not power context saved and restored, but
1847 	 * that's OK considering that we disable RC6 while the OA unit is
1848 	 * enabled.
1849 	 *
1850 	 * The _INCLUDE_CLK_RATIO bit allows the slice/unslice frequency to
1851 	 * be read back from automatically triggered reports, as part of the
1852 	 * RPT_ID field.
1853 	 */
1854 	if (IS_GEN9(dev_priv)) {
1855 		I915_WRITE(GEN8_OA_DEBUG,
1856 			   _MASKED_BIT_ENABLE(GEN9_OA_DEBUG_DISABLE_CLK_RATIO_REPORTS |
1857 					      GEN9_OA_DEBUG_INCLUDE_CLK_RATIO));
1858 	}
1859 
1860 	/*
1861 	 * Update all contexts prior writing the mux configurations as we need
1862 	 * to make sure all slices/subslices are ON before writing to NOA
1863 	 * registers.
1864 	 */
1865 	ret = gen8_configure_all_contexts(dev_priv, oa_config, true);
1866 	if (ret)
1867 		return ret;
1868 
1869 	config_oa_regs(dev_priv, oa_config->mux_regs, oa_config->mux_regs_len);
1870 
1871 	config_oa_regs(dev_priv, oa_config->b_counter_regs,
1872 		       oa_config->b_counter_regs_len);
1873 
1874 	return 0;
1875 }
1876 
1877 static void gen8_disable_metric_set(struct drm_i915_private *dev_priv)
1878 {
1879 	/* Reset all contexts' slices/subslices configurations. */
1880 	gen8_configure_all_contexts(dev_priv, NULL, false);
1881 
1882 	I915_WRITE(GDT_CHICKEN_BITS, (I915_READ(GDT_CHICKEN_BITS) &
1883 				      ~GT_NOA_ENABLE));
1884 
1885 }
1886 
1887 static void gen7_oa_enable(struct drm_i915_private *dev_priv)
1888 {
1889 	/*
1890 	 * Reset buf pointers so we don't forward reports from before now.
1891 	 *
1892 	 * Think carefully if considering trying to avoid this, since it
1893 	 * also ensures status flags and the buffer itself are cleared
1894 	 * in error paths, and we have checks for invalid reports based
1895 	 * on the assumption that certain fields are written to zeroed
1896 	 * memory which this helps maintains.
1897 	 */
1898 	gen7_init_oa_buffer(dev_priv);
1899 
1900 	if (dev_priv->perf.oa.exclusive_stream->enabled) {
1901 		struct i915_gem_context *ctx =
1902 			dev_priv->perf.oa.exclusive_stream->ctx;
1903 		u32 ctx_id = dev_priv->perf.oa.specific_ctx_id;
1904 
1905 		bool periodic = dev_priv->perf.oa.periodic;
1906 		u32 period_exponent = dev_priv->perf.oa.period_exponent;
1907 		u32 report_format = dev_priv->perf.oa.oa_buffer.format;
1908 
1909 		I915_WRITE(GEN7_OACONTROL,
1910 			   (ctx_id & GEN7_OACONTROL_CTX_MASK) |
1911 			   (period_exponent <<
1912 			    GEN7_OACONTROL_TIMER_PERIOD_SHIFT) |
1913 			   (periodic ? GEN7_OACONTROL_TIMER_ENABLE : 0) |
1914 			   (report_format << GEN7_OACONTROL_FORMAT_SHIFT) |
1915 			   (ctx ? GEN7_OACONTROL_PER_CTX_ENABLE : 0) |
1916 			   GEN7_OACONTROL_ENABLE);
1917 	} else
1918 		I915_WRITE(GEN7_OACONTROL, 0);
1919 }
1920 
1921 static void gen8_oa_enable(struct drm_i915_private *dev_priv)
1922 {
1923 	u32 report_format = dev_priv->perf.oa.oa_buffer.format;
1924 
1925 	/*
1926 	 * Reset buf pointers so we don't forward reports from before now.
1927 	 *
1928 	 * Think carefully if considering trying to avoid this, since it
1929 	 * also ensures status flags and the buffer itself are cleared
1930 	 * in error paths, and we have checks for invalid reports based
1931 	 * on the assumption that certain fields are written to zeroed
1932 	 * memory which this helps maintains.
1933 	 */
1934 	gen8_init_oa_buffer(dev_priv);
1935 
1936 	/*
1937 	 * Note: we don't rely on the hardware to perform single context
1938 	 * filtering and instead filter on the cpu based on the context-id
1939 	 * field of reports
1940 	 */
1941 	I915_WRITE(GEN8_OACONTROL, (report_format <<
1942 				    GEN8_OA_REPORT_FORMAT_SHIFT) |
1943 				   GEN8_OA_COUNTER_ENABLE);
1944 }
1945 
1946 /**
1947  * i915_oa_stream_enable - handle `I915_PERF_IOCTL_ENABLE` for OA stream
1948  * @stream: An i915 perf stream opened for OA metrics
1949  *
1950  * [Re]enables hardware periodic sampling according to the period configured
1951  * when opening the stream. This also starts a hrtimer that will periodically
1952  * check for data in the circular OA buffer for notifying userspace (e.g.
1953  * during a read() or poll()).
1954  */
1955 static void i915_oa_stream_enable(struct i915_perf_stream *stream)
1956 {
1957 	struct drm_i915_private *dev_priv = stream->dev_priv;
1958 
1959 	dev_priv->perf.oa.ops.oa_enable(dev_priv);
1960 
1961 	if (dev_priv->perf.oa.periodic)
1962 		hrtimer_start(&dev_priv->perf.oa.poll_check_timer,
1963 			      ns_to_ktime(POLL_PERIOD),
1964 			      HRTIMER_MODE_REL_PINNED);
1965 }
1966 
1967 static void gen7_oa_disable(struct drm_i915_private *dev_priv)
1968 {
1969 	I915_WRITE(GEN7_OACONTROL, 0);
1970 }
1971 
1972 static void gen8_oa_disable(struct drm_i915_private *dev_priv)
1973 {
1974 	I915_WRITE(GEN8_OACONTROL, 0);
1975 }
1976 
1977 /**
1978  * i915_oa_stream_disable - handle `I915_PERF_IOCTL_DISABLE` for OA stream
1979  * @stream: An i915 perf stream opened for OA metrics
1980  *
1981  * Stops the OA unit from periodically writing counter reports into the
1982  * circular OA buffer. This also stops the hrtimer that periodically checks for
1983  * data in the circular OA buffer, for notifying userspace.
1984  */
1985 static void i915_oa_stream_disable(struct i915_perf_stream *stream)
1986 {
1987 	struct drm_i915_private *dev_priv = stream->dev_priv;
1988 
1989 	dev_priv->perf.oa.ops.oa_disable(dev_priv);
1990 
1991 	if (dev_priv->perf.oa.periodic)
1992 		hrtimer_cancel(&dev_priv->perf.oa.poll_check_timer);
1993 }
1994 
1995 static const struct i915_perf_stream_ops i915_oa_stream_ops = {
1996 	.destroy = i915_oa_stream_destroy,
1997 	.enable = i915_oa_stream_enable,
1998 	.disable = i915_oa_stream_disable,
1999 	.wait_unlocked = i915_oa_wait_unlocked,
2000 	.poll_wait = i915_oa_poll_wait,
2001 	.read = i915_oa_read,
2002 };
2003 
2004 /**
2005  * i915_oa_stream_init - validate combined props for OA stream and init
2006  * @stream: An i915 perf stream
2007  * @param: The open parameters passed to `DRM_I915_PERF_OPEN`
2008  * @props: The property state that configures stream (individually validated)
2009  *
2010  * While read_properties_unlocked() validates properties in isolation it
2011  * doesn't ensure that the combination necessarily makes sense.
2012  *
2013  * At this point it has been determined that userspace wants a stream of
2014  * OA metrics, but still we need to further validate the combined
2015  * properties are OK.
2016  *
2017  * If the configuration makes sense then we can allocate memory for
2018  * a circular OA buffer and apply the requested metric set configuration.
2019  *
2020  * Returns: zero on success or a negative error code.
2021  */
2022 static int i915_oa_stream_init(struct i915_perf_stream *stream,
2023 			       struct drm_i915_perf_open_param *param,
2024 			       struct perf_open_properties *props)
2025 {
2026 	struct drm_i915_private *dev_priv = stream->dev_priv;
2027 	int format_size;
2028 	int ret;
2029 
2030 	/* If the sysfs metrics/ directory wasn't registered for some
2031 	 * reason then don't let userspace try their luck with config
2032 	 * IDs
2033 	 */
2034 	if (!dev_priv->perf.metrics_kobj) {
2035 		DRM_DEBUG("OA metrics weren't advertised via sysfs\n");
2036 		return -EINVAL;
2037 	}
2038 
2039 	if (!(props->sample_flags & SAMPLE_OA_REPORT)) {
2040 		DRM_DEBUG("Only OA report sampling supported\n");
2041 		return -EINVAL;
2042 	}
2043 
2044 	if (!dev_priv->perf.oa.ops.init_oa_buffer) {
2045 		DRM_DEBUG("OA unit not supported\n");
2046 		return -ENODEV;
2047 	}
2048 
2049 	/* To avoid the complexity of having to accurately filter
2050 	 * counter reports and marshal to the appropriate client
2051 	 * we currently only allow exclusive access
2052 	 */
2053 	if (dev_priv->perf.oa.exclusive_stream) {
2054 		DRM_DEBUG("OA unit already in use\n");
2055 		return -EBUSY;
2056 	}
2057 
2058 	if (!props->oa_format) {
2059 		DRM_DEBUG("OA report format not specified\n");
2060 		return -EINVAL;
2061 	}
2062 
2063 	/* We set up some ratelimit state to potentially throttle any _NOTES
2064 	 * about spurious, invalid OA reports which we don't forward to
2065 	 * userspace.
2066 	 *
2067 	 * The initialization is associated with opening the stream (not driver
2068 	 * init) considering we print a _NOTE about any throttling when closing
2069 	 * the stream instead of waiting until driver _fini which no one would
2070 	 * ever see.
2071 	 *
2072 	 * Using the same limiting factors as printk_ratelimit()
2073 	 */
2074 	ratelimit_state_init(&dev_priv->perf.oa.spurious_report_rs,
2075 			     5 * HZ, 10);
2076 	/* Since we use a DRM_NOTE for spurious reports it would be
2077 	 * inconsistent to let __ratelimit() automatically print a warning for
2078 	 * throttling.
2079 	 */
2080 	ratelimit_set_flags(&dev_priv->perf.oa.spurious_report_rs,
2081 			    RATELIMIT_MSG_ON_RELEASE);
2082 
2083 	stream->sample_size = sizeof(struct drm_i915_perf_record_header);
2084 
2085 	format_size = dev_priv->perf.oa.oa_formats[props->oa_format].size;
2086 
2087 	stream->sample_flags |= SAMPLE_OA_REPORT;
2088 	stream->sample_size += format_size;
2089 
2090 	dev_priv->perf.oa.oa_buffer.format_size = format_size;
2091 	if (WARN_ON(dev_priv->perf.oa.oa_buffer.format_size == 0))
2092 		return -EINVAL;
2093 
2094 	dev_priv->perf.oa.oa_buffer.format =
2095 		dev_priv->perf.oa.oa_formats[props->oa_format].format;
2096 
2097 	dev_priv->perf.oa.periodic = props->oa_periodic;
2098 	if (dev_priv->perf.oa.periodic)
2099 		dev_priv->perf.oa.period_exponent = props->oa_period_exponent;
2100 
2101 	if (stream->ctx) {
2102 		ret = oa_get_render_ctx_id(stream);
2103 		if (ret)
2104 			return ret;
2105 	}
2106 
2107 	ret = get_oa_config(dev_priv, props->metrics_set, &stream->oa_config);
2108 	if (ret)
2109 		goto err_config;
2110 
2111 	/* PRM - observability performance counters:
2112 	 *
2113 	 *   OACONTROL, performance counter enable, note:
2114 	 *
2115 	 *   "When this bit is set, in order to have coherent counts,
2116 	 *   RC6 power state and trunk clock gating must be disabled.
2117 	 *   This can be achieved by programming MMIO registers as
2118 	 *   0xA094=0 and 0xA090[31]=1"
2119 	 *
2120 	 *   In our case we are expecting that taking pm + FORCEWAKE
2121 	 *   references will effectively disable RC6.
2122 	 */
2123 	intel_runtime_pm_get(dev_priv);
2124 	intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
2125 
2126 	ret = alloc_oa_buffer(dev_priv);
2127 	if (ret)
2128 		goto err_oa_buf_alloc;
2129 
2130 	ret = dev_priv->perf.oa.ops.enable_metric_set(dev_priv,
2131 						      stream->oa_config);
2132 	if (ret)
2133 		goto err_enable;
2134 
2135 	stream->ops = &i915_oa_stream_ops;
2136 
2137 	/* Lock device for exclusive_stream access late because
2138 	 * enable_metric_set() might lock as well on gen8+.
2139 	 */
2140 	ret = i915_mutex_lock_interruptible(&dev_priv->drm);
2141 	if (ret)
2142 		goto err_lock;
2143 
2144 	dev_priv->perf.oa.exclusive_stream = stream;
2145 
2146 	mutex_unlock(&dev_priv->drm.struct_mutex);
2147 
2148 	return 0;
2149 
2150 err_lock:
2151 	dev_priv->perf.oa.ops.disable_metric_set(dev_priv);
2152 
2153 err_enable:
2154 	free_oa_buffer(dev_priv);
2155 
2156 err_oa_buf_alloc:
2157 	put_oa_config(dev_priv, stream->oa_config);
2158 
2159 	intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
2160 	intel_runtime_pm_put(dev_priv);
2161 
2162 err_config:
2163 	if (stream->ctx)
2164 		oa_put_render_ctx_id(stream);
2165 
2166 	return ret;
2167 }
2168 
2169 void i915_oa_init_reg_state(struct intel_engine_cs *engine,
2170 			    struct i915_gem_context *ctx,
2171 			    u32 *reg_state)
2172 {
2173 	struct i915_perf_stream *stream;
2174 
2175 	if (engine->id != RCS)
2176 		return;
2177 
2178 	stream = engine->i915->perf.oa.exclusive_stream;
2179 	if (stream)
2180 		gen8_update_reg_state_unlocked(ctx, reg_state, stream->oa_config);
2181 }
2182 
2183 /**
2184  * i915_perf_read_locked - &i915_perf_stream_ops->read with error normalisation
2185  * @stream: An i915 perf stream
2186  * @file: An i915 perf stream file
2187  * @buf: destination buffer given by userspace
2188  * @count: the number of bytes userspace wants to read
2189  * @ppos: (inout) file seek position (unused)
2190  *
2191  * Besides wrapping &i915_perf_stream_ops->read this provides a common place to
2192  * ensure that if we've successfully copied any data then reporting that takes
2193  * precedence over any internal error status, so the data isn't lost.
2194  *
2195  * For example ret will be -ENOSPC whenever there is more buffered data than
2196  * can be copied to userspace, but that's only interesting if we weren't able
2197  * to copy some data because it implies the userspace buffer is too small to
2198  * receive a single record (and we never split records).
2199  *
2200  * Another case with ret == -EFAULT is more of a grey area since it would seem
2201  * like bad form for userspace to ask us to overrun its buffer, but the user
2202  * knows best:
2203  *
2204  *   http://yarchive.net/comp/linux/partial_reads_writes.html
2205  *
2206  * Returns: The number of bytes copied or a negative error code on failure.
2207  */
2208 static ssize_t i915_perf_read_locked(struct i915_perf_stream *stream,
2209 				     struct file *file,
2210 				     char __user *buf,
2211 				     size_t count,
2212 				     loff_t *ppos)
2213 {
2214 	/* Note we keep the offset (aka bytes read) separate from any
2215 	 * error status so that the final check for whether we return
2216 	 * the bytes read with a higher precedence than any error (see
2217 	 * comment below) doesn't need to be handled/duplicated in
2218 	 * stream->ops->read() implementations.
2219 	 */
2220 	size_t offset = 0;
2221 	int ret = stream->ops->read(stream, buf, count, &offset);
2222 
2223 	return offset ?: (ret ?: -EAGAIN);
2224 }
2225 
2226 /**
2227  * i915_perf_read - handles read() FOP for i915 perf stream FDs
2228  * @file: An i915 perf stream file
2229  * @buf: destination buffer given by userspace
2230  * @count: the number of bytes userspace wants to read
2231  * @ppos: (inout) file seek position (unused)
2232  *
2233  * The entry point for handling a read() on a stream file descriptor from
2234  * userspace. Most of the work is left to the i915_perf_read_locked() and
2235  * &i915_perf_stream_ops->read but to save having stream implementations (of
2236  * which we might have multiple later) we handle blocking read here.
2237  *
2238  * We can also consistently treat trying to read from a disabled stream
2239  * as an IO error so implementations can assume the stream is enabled
2240  * while reading.
2241  *
2242  * Returns: The number of bytes copied or a negative error code on failure.
2243  */
2244 static ssize_t i915_perf_read(struct file *file,
2245 			      char __user *buf,
2246 			      size_t count,
2247 			      loff_t *ppos)
2248 {
2249 	struct i915_perf_stream *stream = file->private_data;
2250 	struct drm_i915_private *dev_priv = stream->dev_priv;
2251 	ssize_t ret;
2252 
2253 	/* To ensure it's handled consistently we simply treat all reads of a
2254 	 * disabled stream as an error. In particular it might otherwise lead
2255 	 * to a deadlock for blocking file descriptors...
2256 	 */
2257 	if (!stream->enabled)
2258 		return -EIO;
2259 
2260 	if (!(file->f_flags & O_NONBLOCK)) {
2261 		/* There's the small chance of false positives from
2262 		 * stream->ops->wait_unlocked.
2263 		 *
2264 		 * E.g. with single context filtering since we only wait until
2265 		 * oabuffer has >= 1 report we don't immediately know whether
2266 		 * any reports really belong to the current context
2267 		 */
2268 		do {
2269 			ret = stream->ops->wait_unlocked(stream);
2270 			if (ret)
2271 				return ret;
2272 
2273 			mutex_lock(&dev_priv->perf.lock);
2274 			ret = i915_perf_read_locked(stream, file,
2275 						    buf, count, ppos);
2276 			mutex_unlock(&dev_priv->perf.lock);
2277 		} while (ret == -EAGAIN);
2278 	} else {
2279 		mutex_lock(&dev_priv->perf.lock);
2280 		ret = i915_perf_read_locked(stream, file, buf, count, ppos);
2281 		mutex_unlock(&dev_priv->perf.lock);
2282 	}
2283 
2284 	/* We allow the poll checking to sometimes report false positive POLLIN
2285 	 * events where we might actually report EAGAIN on read() if there's
2286 	 * not really any data available. In this situation though we don't
2287 	 * want to enter a busy loop between poll() reporting a POLLIN event
2288 	 * and read() returning -EAGAIN. Clearing the oa.pollin state here
2289 	 * effectively ensures we back off until the next hrtimer callback
2290 	 * before reporting another POLLIN event.
2291 	 */
2292 	if (ret >= 0 || ret == -EAGAIN) {
2293 		/* Maybe make ->pollin per-stream state if we support multiple
2294 		 * concurrent streams in the future.
2295 		 */
2296 		dev_priv->perf.oa.pollin = false;
2297 	}
2298 
2299 	return ret;
2300 }
2301 
2302 static enum hrtimer_restart oa_poll_check_timer_cb(struct hrtimer *hrtimer)
2303 {
2304 	struct drm_i915_private *dev_priv =
2305 		container_of(hrtimer, typeof(*dev_priv),
2306 			     perf.oa.poll_check_timer);
2307 
2308 	if (oa_buffer_check_unlocked(dev_priv)) {
2309 		dev_priv->perf.oa.pollin = true;
2310 		wake_up(&dev_priv->perf.oa.poll_wq);
2311 	}
2312 
2313 	hrtimer_forward_now(hrtimer, ns_to_ktime(POLL_PERIOD));
2314 
2315 	return HRTIMER_RESTART;
2316 }
2317 
2318 /**
2319  * i915_perf_poll_locked - poll_wait() with a suitable wait queue for stream
2320  * @dev_priv: i915 device instance
2321  * @stream: An i915 perf stream
2322  * @file: An i915 perf stream file
2323  * @wait: poll() state table
2324  *
2325  * For handling userspace polling on an i915 perf stream, this calls through to
2326  * &i915_perf_stream_ops->poll_wait to call poll_wait() with a wait queue that
2327  * will be woken for new stream data.
2328  *
2329  * Note: The &drm_i915_private->perf.lock mutex has been taken to serialize
2330  * with any non-file-operation driver hooks.
2331  *
2332  * Returns: any poll events that are ready without sleeping
2333  */
2334 static unsigned int i915_perf_poll_locked(struct drm_i915_private *dev_priv,
2335 					  struct i915_perf_stream *stream,
2336 					  struct file *file,
2337 					  poll_table *wait)
2338 {
2339 	unsigned int events = 0;
2340 
2341 	stream->ops->poll_wait(stream, file, wait);
2342 
2343 	/* Note: we don't explicitly check whether there's something to read
2344 	 * here since this path may be very hot depending on what else
2345 	 * userspace is polling, or on the timeout in use. We rely solely on
2346 	 * the hrtimer/oa_poll_check_timer_cb to notify us when there are
2347 	 * samples to read.
2348 	 */
2349 	if (dev_priv->perf.oa.pollin)
2350 		events |= POLLIN;
2351 
2352 	return events;
2353 }
2354 
2355 /**
2356  * i915_perf_poll - call poll_wait() with a suitable wait queue for stream
2357  * @file: An i915 perf stream file
2358  * @wait: poll() state table
2359  *
2360  * For handling userspace polling on an i915 perf stream, this ensures
2361  * poll_wait() gets called with a wait queue that will be woken for new stream
2362  * data.
2363  *
2364  * Note: Implementation deferred to i915_perf_poll_locked()
2365  *
2366  * Returns: any poll events that are ready without sleeping
2367  */
2368 static unsigned int i915_perf_poll(struct file *file, poll_table *wait)
2369 {
2370 	struct i915_perf_stream *stream = file->private_data;
2371 	struct drm_i915_private *dev_priv = stream->dev_priv;
2372 	int ret;
2373 
2374 	mutex_lock(&dev_priv->perf.lock);
2375 	ret = i915_perf_poll_locked(dev_priv, stream, file, wait);
2376 	mutex_unlock(&dev_priv->perf.lock);
2377 
2378 	return ret;
2379 }
2380 
2381 /**
2382  * i915_perf_enable_locked - handle `I915_PERF_IOCTL_ENABLE` ioctl
2383  * @stream: A disabled i915 perf stream
2384  *
2385  * [Re]enables the associated capture of data for this stream.
2386  *
2387  * If a stream was previously enabled then there's currently no intention
2388  * to provide userspace any guarantee about the preservation of previously
2389  * buffered data.
2390  */
2391 static void i915_perf_enable_locked(struct i915_perf_stream *stream)
2392 {
2393 	if (stream->enabled)
2394 		return;
2395 
2396 	/* Allow stream->ops->enable() to refer to this */
2397 	stream->enabled = true;
2398 
2399 	if (stream->ops->enable)
2400 		stream->ops->enable(stream);
2401 }
2402 
2403 /**
2404  * i915_perf_disable_locked - handle `I915_PERF_IOCTL_DISABLE` ioctl
2405  * @stream: An enabled i915 perf stream
2406  *
2407  * Disables the associated capture of data for this stream.
2408  *
2409  * The intention is that disabling an re-enabling a stream will ideally be
2410  * cheaper than destroying and re-opening a stream with the same configuration,
2411  * though there are no formal guarantees about what state or buffered data
2412  * must be retained between disabling and re-enabling a stream.
2413  *
2414  * Note: while a stream is disabled it's considered an error for userspace
2415  * to attempt to read from the stream (-EIO).
2416  */
2417 static void i915_perf_disable_locked(struct i915_perf_stream *stream)
2418 {
2419 	if (!stream->enabled)
2420 		return;
2421 
2422 	/* Allow stream->ops->disable() to refer to this */
2423 	stream->enabled = false;
2424 
2425 	if (stream->ops->disable)
2426 		stream->ops->disable(stream);
2427 }
2428 
2429 /**
2430  * i915_perf_ioctl - support ioctl() usage with i915 perf stream FDs
2431  * @stream: An i915 perf stream
2432  * @cmd: the ioctl request
2433  * @arg: the ioctl data
2434  *
2435  * Note: The &drm_i915_private->perf.lock mutex has been taken to serialize
2436  * with any non-file-operation driver hooks.
2437  *
2438  * Returns: zero on success or a negative error code. Returns -EINVAL for
2439  * an unknown ioctl request.
2440  */
2441 static long i915_perf_ioctl_locked(struct i915_perf_stream *stream,
2442 				   unsigned int cmd,
2443 				   unsigned long arg)
2444 {
2445 	switch (cmd) {
2446 	case I915_PERF_IOCTL_ENABLE:
2447 		i915_perf_enable_locked(stream);
2448 		return 0;
2449 	case I915_PERF_IOCTL_DISABLE:
2450 		i915_perf_disable_locked(stream);
2451 		return 0;
2452 	}
2453 
2454 	return -EINVAL;
2455 }
2456 
2457 /**
2458  * i915_perf_ioctl - support ioctl() usage with i915 perf stream FDs
2459  * @file: An i915 perf stream file
2460  * @cmd: the ioctl request
2461  * @arg: the ioctl data
2462  *
2463  * Implementation deferred to i915_perf_ioctl_locked().
2464  *
2465  * Returns: zero on success or a negative error code. Returns -EINVAL for
2466  * an unknown ioctl request.
2467  */
2468 static long i915_perf_ioctl(struct file *file,
2469 			    unsigned int cmd,
2470 			    unsigned long arg)
2471 {
2472 	struct i915_perf_stream *stream = file->private_data;
2473 	struct drm_i915_private *dev_priv = stream->dev_priv;
2474 	long ret;
2475 
2476 	mutex_lock(&dev_priv->perf.lock);
2477 	ret = i915_perf_ioctl_locked(stream, cmd, arg);
2478 	mutex_unlock(&dev_priv->perf.lock);
2479 
2480 	return ret;
2481 }
2482 
2483 /**
2484  * i915_perf_destroy_locked - destroy an i915 perf stream
2485  * @stream: An i915 perf stream
2486  *
2487  * Frees all resources associated with the given i915 perf @stream, disabling
2488  * any associated data capture in the process.
2489  *
2490  * Note: The &drm_i915_private->perf.lock mutex has been taken to serialize
2491  * with any non-file-operation driver hooks.
2492  */
2493 static void i915_perf_destroy_locked(struct i915_perf_stream *stream)
2494 {
2495 	if (stream->enabled)
2496 		i915_perf_disable_locked(stream);
2497 
2498 	if (stream->ops->destroy)
2499 		stream->ops->destroy(stream);
2500 
2501 	list_del(&stream->link);
2502 
2503 	if (stream->ctx)
2504 		i915_gem_context_put(stream->ctx);
2505 
2506 	kfree(stream);
2507 }
2508 
2509 /**
2510  * i915_perf_release - handles userspace close() of a stream file
2511  * @inode: anonymous inode associated with file
2512  * @file: An i915 perf stream file
2513  *
2514  * Cleans up any resources associated with an open i915 perf stream file.
2515  *
2516  * NB: close() can't really fail from the userspace point of view.
2517  *
2518  * Returns: zero on success or a negative error code.
2519  */
2520 static int i915_perf_release(struct inode *inode, struct file *file)
2521 {
2522 	struct i915_perf_stream *stream = file->private_data;
2523 	struct drm_i915_private *dev_priv = stream->dev_priv;
2524 
2525 	mutex_lock(&dev_priv->perf.lock);
2526 	i915_perf_destroy_locked(stream);
2527 	mutex_unlock(&dev_priv->perf.lock);
2528 
2529 	return 0;
2530 }
2531 
2532 
2533 static const struct file_operations fops = {
2534 	.owner		= THIS_MODULE,
2535 	.llseek		= no_llseek,
2536 	.release	= i915_perf_release,
2537 	.poll		= i915_perf_poll,
2538 	.read		= i915_perf_read,
2539 	.unlocked_ioctl	= i915_perf_ioctl,
2540 	/* Our ioctl have no arguments, so it's safe to use the same function
2541 	 * to handle 32bits compatibility.
2542 	 */
2543 	.compat_ioctl   = i915_perf_ioctl,
2544 };
2545 
2546 
2547 /**
2548  * i915_perf_open_ioctl_locked - DRM ioctl() for userspace to open a stream FD
2549  * @dev_priv: i915 device instance
2550  * @param: The open parameters passed to 'DRM_I915_PERF_OPEN`
2551  * @props: individually validated u64 property value pairs
2552  * @file: drm file
2553  *
2554  * See i915_perf_ioctl_open() for interface details.
2555  *
2556  * Implements further stream config validation and stream initialization on
2557  * behalf of i915_perf_open_ioctl() with the &drm_i915_private->perf.lock mutex
2558  * taken to serialize with any non-file-operation driver hooks.
2559  *
2560  * Note: at this point the @props have only been validated in isolation and
2561  * it's still necessary to validate that the combination of properties makes
2562  * sense.
2563  *
2564  * In the case where userspace is interested in OA unit metrics then further
2565  * config validation and stream initialization details will be handled by
2566  * i915_oa_stream_init(). The code here should only validate config state that
2567  * will be relevant to all stream types / backends.
2568  *
2569  * Returns: zero on success or a negative error code.
2570  */
2571 static int
2572 i915_perf_open_ioctl_locked(struct drm_i915_private *dev_priv,
2573 			    struct drm_i915_perf_open_param *param,
2574 			    struct perf_open_properties *props,
2575 			    struct drm_file *file)
2576 {
2577 	struct i915_gem_context *specific_ctx = NULL;
2578 	struct i915_perf_stream *stream = NULL;
2579 	unsigned long f_flags = 0;
2580 	bool privileged_op = true;
2581 	int stream_fd;
2582 	int ret;
2583 
2584 	if (props->single_context) {
2585 		u32 ctx_handle = props->ctx_handle;
2586 		struct drm_i915_file_private *file_priv = file->driver_priv;
2587 
2588 		specific_ctx = i915_gem_context_lookup(file_priv, ctx_handle);
2589 		if (!specific_ctx) {
2590 			DRM_DEBUG("Failed to look up context with ID %u for opening perf stream\n",
2591 				  ctx_handle);
2592 			ret = -ENOENT;
2593 			goto err;
2594 		}
2595 	}
2596 
2597 	/*
2598 	 * On Haswell the OA unit supports clock gating off for a specific
2599 	 * context and in this mode there's no visibility of metrics for the
2600 	 * rest of the system, which we consider acceptable for a
2601 	 * non-privileged client.
2602 	 *
2603 	 * For Gen8+ the OA unit no longer supports clock gating off for a
2604 	 * specific context and the kernel can't securely stop the counters
2605 	 * from updating as system-wide / global values. Even though we can
2606 	 * filter reports based on the included context ID we can't block
2607 	 * clients from seeing the raw / global counter values via
2608 	 * MI_REPORT_PERF_COUNT commands and so consider it a privileged op to
2609 	 * enable the OA unit by default.
2610 	 */
2611 	if (IS_HASWELL(dev_priv) && specific_ctx)
2612 		privileged_op = false;
2613 
2614 	/* Similar to perf's kernel.perf_paranoid_cpu sysctl option
2615 	 * we check a dev.i915.perf_stream_paranoid sysctl option
2616 	 * to determine if it's ok to access system wide OA counters
2617 	 * without CAP_SYS_ADMIN privileges.
2618 	 */
2619 	if (privileged_op &&
2620 	    i915_perf_stream_paranoid && !capable(CAP_SYS_ADMIN)) {
2621 		DRM_DEBUG("Insufficient privileges to open system-wide i915 perf stream\n");
2622 		ret = -EACCES;
2623 		goto err_ctx;
2624 	}
2625 
2626 	stream = kzalloc(sizeof(*stream), GFP_KERNEL);
2627 	if (!stream) {
2628 		ret = -ENOMEM;
2629 		goto err_ctx;
2630 	}
2631 
2632 	stream->dev_priv = dev_priv;
2633 	stream->ctx = specific_ctx;
2634 
2635 	ret = i915_oa_stream_init(stream, param, props);
2636 	if (ret)
2637 		goto err_alloc;
2638 
2639 	/* we avoid simply assigning stream->sample_flags = props->sample_flags
2640 	 * to have _stream_init check the combination of sample flags more
2641 	 * thoroughly, but still this is the expected result at this point.
2642 	 */
2643 	if (WARN_ON(stream->sample_flags != props->sample_flags)) {
2644 		ret = -ENODEV;
2645 		goto err_flags;
2646 	}
2647 
2648 	list_add(&stream->link, &dev_priv->perf.streams);
2649 
2650 	if (param->flags & I915_PERF_FLAG_FD_CLOEXEC)
2651 		f_flags |= O_CLOEXEC;
2652 	if (param->flags & I915_PERF_FLAG_FD_NONBLOCK)
2653 		f_flags |= O_NONBLOCK;
2654 
2655 	stream_fd = anon_inode_getfd("[i915_perf]", &fops, stream, f_flags);
2656 	if (stream_fd < 0) {
2657 		ret = stream_fd;
2658 		goto err_open;
2659 	}
2660 
2661 	if (!(param->flags & I915_PERF_FLAG_DISABLED))
2662 		i915_perf_enable_locked(stream);
2663 
2664 	return stream_fd;
2665 
2666 err_open:
2667 	list_del(&stream->link);
2668 err_flags:
2669 	if (stream->ops->destroy)
2670 		stream->ops->destroy(stream);
2671 err_alloc:
2672 	kfree(stream);
2673 err_ctx:
2674 	if (specific_ctx)
2675 		i915_gem_context_put(specific_ctx);
2676 err:
2677 	return ret;
2678 }
2679 
2680 static u64 oa_exponent_to_ns(struct drm_i915_private *dev_priv, int exponent)
2681 {
2682 	return div_u64(1000000000ULL * (2ULL << exponent),
2683 		       dev_priv->perf.oa.timestamp_frequency);
2684 }
2685 
2686 /**
2687  * read_properties_unlocked - validate + copy userspace stream open properties
2688  * @dev_priv: i915 device instance
2689  * @uprops: The array of u64 key value pairs given by userspace
2690  * @n_props: The number of key value pairs expected in @uprops
2691  * @props: The stream configuration built up while validating properties
2692  *
2693  * Note this function only validates properties in isolation it doesn't
2694  * validate that the combination of properties makes sense or that all
2695  * properties necessary for a particular kind of stream have been set.
2696  *
2697  * Note that there currently aren't any ordering requirements for properties so
2698  * we shouldn't validate or assume anything about ordering here. This doesn't
2699  * rule out defining new properties with ordering requirements in the future.
2700  */
2701 static int read_properties_unlocked(struct drm_i915_private *dev_priv,
2702 				    u64 __user *uprops,
2703 				    u32 n_props,
2704 				    struct perf_open_properties *props)
2705 {
2706 	u64 __user *uprop = uprops;
2707 	u32 i;
2708 
2709 	memset(props, 0, sizeof(struct perf_open_properties));
2710 
2711 	if (!n_props) {
2712 		DRM_DEBUG("No i915 perf properties given\n");
2713 		return -EINVAL;
2714 	}
2715 
2716 	/* Considering that ID = 0 is reserved and assuming that we don't
2717 	 * (currently) expect any configurations to ever specify duplicate
2718 	 * values for a particular property ID then the last _PROP_MAX value is
2719 	 * one greater than the maximum number of properties we expect to get
2720 	 * from userspace.
2721 	 */
2722 	if (n_props >= DRM_I915_PERF_PROP_MAX) {
2723 		DRM_DEBUG("More i915 perf properties specified than exist\n");
2724 		return -EINVAL;
2725 	}
2726 
2727 	for (i = 0; i < n_props; i++) {
2728 		u64 oa_period, oa_freq_hz;
2729 		u64 id, value;
2730 		int ret;
2731 
2732 		ret = get_user(id, uprop);
2733 		if (ret)
2734 			return ret;
2735 
2736 		ret = get_user(value, uprop + 1);
2737 		if (ret)
2738 			return ret;
2739 
2740 		if (id == 0 || id >= DRM_I915_PERF_PROP_MAX) {
2741 			DRM_DEBUG("Unknown i915 perf property ID\n");
2742 			return -EINVAL;
2743 		}
2744 
2745 		switch ((enum drm_i915_perf_property_id)id) {
2746 		case DRM_I915_PERF_PROP_CTX_HANDLE:
2747 			props->single_context = 1;
2748 			props->ctx_handle = value;
2749 			break;
2750 		case DRM_I915_PERF_PROP_SAMPLE_OA:
2751 			props->sample_flags |= SAMPLE_OA_REPORT;
2752 			break;
2753 		case DRM_I915_PERF_PROP_OA_METRICS_SET:
2754 			if (value == 0) {
2755 				DRM_DEBUG("Unknown OA metric set ID\n");
2756 				return -EINVAL;
2757 			}
2758 			props->metrics_set = value;
2759 			break;
2760 		case DRM_I915_PERF_PROP_OA_FORMAT:
2761 			if (value == 0 || value >= I915_OA_FORMAT_MAX) {
2762 				DRM_DEBUG("Out-of-range OA report format %llu\n",
2763 					  value);
2764 				return -EINVAL;
2765 			}
2766 			if (!dev_priv->perf.oa.oa_formats[value].size) {
2767 				DRM_DEBUG("Unsupported OA report format %llu\n",
2768 					  value);
2769 				return -EINVAL;
2770 			}
2771 			props->oa_format = value;
2772 			break;
2773 		case DRM_I915_PERF_PROP_OA_EXPONENT:
2774 			if (value > OA_EXPONENT_MAX) {
2775 				DRM_DEBUG("OA timer exponent too high (> %u)\n",
2776 					 OA_EXPONENT_MAX);
2777 				return -EINVAL;
2778 			}
2779 
2780 			/* Theoretically we can program the OA unit to sample
2781 			 * e.g. every 160ns for HSW, 167ns for BDW/SKL or 104ns
2782 			 * for BXT. We don't allow such high sampling
2783 			 * frequencies by default unless root.
2784 			 */
2785 
2786 			BUILD_BUG_ON(sizeof(oa_period) != 8);
2787 			oa_period = oa_exponent_to_ns(dev_priv, value);
2788 
2789 			/* This check is primarily to ensure that oa_period <=
2790 			 * UINT32_MAX (before passing to do_div which only
2791 			 * accepts a u32 denominator), but we can also skip
2792 			 * checking anything < 1Hz which implicitly can't be
2793 			 * limited via an integer oa_max_sample_rate.
2794 			 */
2795 			if (oa_period <= NSEC_PER_SEC) {
2796 				u64 tmp = NSEC_PER_SEC;
2797 				do_div(tmp, oa_period);
2798 				oa_freq_hz = tmp;
2799 			} else
2800 				oa_freq_hz = 0;
2801 
2802 			if (oa_freq_hz > i915_oa_max_sample_rate &&
2803 			    !capable(CAP_SYS_ADMIN)) {
2804 				DRM_DEBUG("OA exponent would exceed the max sampling frequency (sysctl dev.i915.oa_max_sample_rate) %uHz without root privileges\n",
2805 					  i915_oa_max_sample_rate);
2806 				return -EACCES;
2807 			}
2808 
2809 			props->oa_periodic = true;
2810 			props->oa_period_exponent = value;
2811 			break;
2812 		case DRM_I915_PERF_PROP_MAX:
2813 			MISSING_CASE(id);
2814 			return -EINVAL;
2815 		}
2816 
2817 		uprop += 2;
2818 	}
2819 
2820 	return 0;
2821 }
2822 
2823 /**
2824  * i915_perf_open_ioctl - DRM ioctl() for userspace to open a stream FD
2825  * @dev: drm device
2826  * @data: ioctl data copied from userspace (unvalidated)
2827  * @file: drm file
2828  *
2829  * Validates the stream open parameters given by userspace including flags
2830  * and an array of u64 key, value pair properties.
2831  *
2832  * Very little is assumed up front about the nature of the stream being
2833  * opened (for instance we don't assume it's for periodic OA unit metrics). An
2834  * i915-perf stream is expected to be a suitable interface for other forms of
2835  * buffered data written by the GPU besides periodic OA metrics.
2836  *
2837  * Note we copy the properties from userspace outside of the i915 perf
2838  * mutex to avoid an awkward lockdep with mmap_sem.
2839  *
2840  * Most of the implementation details are handled by
2841  * i915_perf_open_ioctl_locked() after taking the &drm_i915_private->perf.lock
2842  * mutex for serializing with any non-file-operation driver hooks.
2843  *
2844  * Return: A newly opened i915 Perf stream file descriptor or negative
2845  * error code on failure.
2846  */
2847 int i915_perf_open_ioctl(struct drm_device *dev, void *data,
2848 			 struct drm_file *file)
2849 {
2850 	struct drm_i915_private *dev_priv = dev->dev_private;
2851 	struct drm_i915_perf_open_param *param = data;
2852 	struct perf_open_properties props;
2853 	u32 known_open_flags;
2854 	int ret;
2855 
2856 	if (!dev_priv->perf.initialized) {
2857 		DRM_DEBUG("i915 perf interface not available for this system\n");
2858 		return -ENOTSUPP;
2859 	}
2860 
2861 	known_open_flags = I915_PERF_FLAG_FD_CLOEXEC |
2862 			   I915_PERF_FLAG_FD_NONBLOCK |
2863 			   I915_PERF_FLAG_DISABLED;
2864 	if (param->flags & ~known_open_flags) {
2865 		DRM_DEBUG("Unknown drm_i915_perf_open_param flag\n");
2866 		return -EINVAL;
2867 	}
2868 
2869 	ret = read_properties_unlocked(dev_priv,
2870 				       u64_to_user_ptr(param->properties_ptr),
2871 				       param->num_properties,
2872 				       &props);
2873 	if (ret)
2874 		return ret;
2875 
2876 	mutex_lock(&dev_priv->perf.lock);
2877 	ret = i915_perf_open_ioctl_locked(dev_priv, param, &props, file);
2878 	mutex_unlock(&dev_priv->perf.lock);
2879 
2880 	return ret;
2881 }
2882 
2883 /**
2884  * i915_perf_register - exposes i915-perf to userspace
2885  * @dev_priv: i915 device instance
2886  *
2887  * In particular OA metric sets are advertised under a sysfs metrics/
2888  * directory allowing userspace to enumerate valid IDs that can be
2889  * used to open an i915-perf stream.
2890  */
2891 void i915_perf_register(struct drm_i915_private *dev_priv)
2892 {
2893 	int ret;
2894 
2895 	if (!dev_priv->perf.initialized)
2896 		return;
2897 
2898 	/* To be sure we're synchronized with an attempted
2899 	 * i915_perf_open_ioctl(); considering that we register after
2900 	 * being exposed to userspace.
2901 	 */
2902 	mutex_lock(&dev_priv->perf.lock);
2903 
2904 	dev_priv->perf.metrics_kobj =
2905 		kobject_create_and_add("metrics",
2906 				       &dev_priv->drm.primary->kdev->kobj);
2907 	if (!dev_priv->perf.metrics_kobj)
2908 		goto exit;
2909 
2910 	sysfs_attr_init(&dev_priv->perf.oa.test_config.sysfs_metric_id.attr);
2911 
2912 	if (IS_HASWELL(dev_priv)) {
2913 		i915_perf_load_test_config_hsw(dev_priv);
2914 	} else if (IS_BROADWELL(dev_priv)) {
2915 		i915_perf_load_test_config_bdw(dev_priv);
2916 	} else if (IS_CHERRYVIEW(dev_priv)) {
2917 		i915_perf_load_test_config_chv(dev_priv);
2918 	} else if (IS_SKYLAKE(dev_priv)) {
2919 		if (IS_SKL_GT2(dev_priv))
2920 			i915_perf_load_test_config_sklgt2(dev_priv);
2921 		else if (IS_SKL_GT3(dev_priv))
2922 			i915_perf_load_test_config_sklgt3(dev_priv);
2923 		else if (IS_SKL_GT4(dev_priv))
2924 			i915_perf_load_test_config_sklgt4(dev_priv);
2925 	} else if (IS_BROXTON(dev_priv)) {
2926 		i915_perf_load_test_config_bxt(dev_priv);
2927 	} else if (IS_KABYLAKE(dev_priv)) {
2928 		if (IS_KBL_GT2(dev_priv))
2929 			i915_perf_load_test_config_kblgt2(dev_priv);
2930 		else if (IS_KBL_GT3(dev_priv))
2931 			i915_perf_load_test_config_kblgt3(dev_priv);
2932 	} else if (IS_GEMINILAKE(dev_priv)) {
2933 		i915_perf_load_test_config_glk(dev_priv);
2934 	} else if (IS_COFFEELAKE(dev_priv)) {
2935 		if (IS_CFL_GT2(dev_priv))
2936 			i915_perf_load_test_config_cflgt2(dev_priv);
2937 	}
2938 
2939 	if (dev_priv->perf.oa.test_config.id == 0)
2940 		goto sysfs_error;
2941 
2942 	ret = sysfs_create_group(dev_priv->perf.metrics_kobj,
2943 				 &dev_priv->perf.oa.test_config.sysfs_metric);
2944 	if (ret)
2945 		goto sysfs_error;
2946 
2947 	atomic_set(&dev_priv->perf.oa.test_config.ref_count, 1);
2948 
2949 	goto exit;
2950 
2951 sysfs_error:
2952 	kobject_put(dev_priv->perf.metrics_kobj);
2953 	dev_priv->perf.metrics_kobj = NULL;
2954 
2955 exit:
2956 	mutex_unlock(&dev_priv->perf.lock);
2957 }
2958 
2959 /**
2960  * i915_perf_unregister - hide i915-perf from userspace
2961  * @dev_priv: i915 device instance
2962  *
2963  * i915-perf state cleanup is split up into an 'unregister' and
2964  * 'deinit' phase where the interface is first hidden from
2965  * userspace by i915_perf_unregister() before cleaning up
2966  * remaining state in i915_perf_fini().
2967  */
2968 void i915_perf_unregister(struct drm_i915_private *dev_priv)
2969 {
2970 	if (!dev_priv->perf.metrics_kobj)
2971 		return;
2972 
2973 	sysfs_remove_group(dev_priv->perf.metrics_kobj,
2974 			   &dev_priv->perf.oa.test_config.sysfs_metric);
2975 
2976 	kobject_put(dev_priv->perf.metrics_kobj);
2977 	dev_priv->perf.metrics_kobj = NULL;
2978 }
2979 
2980 static bool gen8_is_valid_flex_addr(struct drm_i915_private *dev_priv, u32 addr)
2981 {
2982 	static const i915_reg_t flex_eu_regs[] = {
2983 		EU_PERF_CNTL0,
2984 		EU_PERF_CNTL1,
2985 		EU_PERF_CNTL2,
2986 		EU_PERF_CNTL3,
2987 		EU_PERF_CNTL4,
2988 		EU_PERF_CNTL5,
2989 		EU_PERF_CNTL6,
2990 	};
2991 	int i;
2992 
2993 	for (i = 0; i < ARRAY_SIZE(flex_eu_regs); i++) {
2994 		if (flex_eu_regs[i].reg == addr)
2995 			return true;
2996 	}
2997 	return false;
2998 }
2999 
3000 static bool gen7_is_valid_b_counter_addr(struct drm_i915_private *dev_priv, u32 addr)
3001 {
3002 	return (addr >= OASTARTTRIG1.reg && addr <= OASTARTTRIG8.reg) ||
3003 		(addr >= OAREPORTTRIG1.reg && addr <= OAREPORTTRIG8.reg) ||
3004 		(addr >= OACEC0_0.reg && addr <= OACEC7_1.reg);
3005 }
3006 
3007 static bool gen7_is_valid_mux_addr(struct drm_i915_private *dev_priv, u32 addr)
3008 {
3009 	return addr == HALF_SLICE_CHICKEN2.reg ||
3010 		(addr >= MICRO_BP0_0.reg && addr <= NOA_WRITE.reg) ||
3011 		(addr >= OA_PERFCNT1_LO.reg && addr <= OA_PERFCNT2_HI.reg) ||
3012 		(addr >= OA_PERFMATRIX_LO.reg && addr <= OA_PERFMATRIX_HI.reg);
3013 }
3014 
3015 static bool gen8_is_valid_mux_addr(struct drm_i915_private *dev_priv, u32 addr)
3016 {
3017 	return gen7_is_valid_mux_addr(dev_priv, addr) ||
3018 		addr == WAIT_FOR_RC6_EXIT.reg ||
3019 		(addr >= RPM_CONFIG0.reg && addr <= NOA_CONFIG(8).reg);
3020 }
3021 
3022 static bool hsw_is_valid_mux_addr(struct drm_i915_private *dev_priv, u32 addr)
3023 {
3024 	return gen7_is_valid_mux_addr(dev_priv, addr) ||
3025 		(addr >= 0x25100 && addr <= 0x2FF90) ||
3026 		addr == 0x9ec0;
3027 }
3028 
3029 static bool chv_is_valid_mux_addr(struct drm_i915_private *dev_priv, u32 addr)
3030 {
3031 	return gen7_is_valid_mux_addr(dev_priv, addr) ||
3032 		(addr >= 0x182300 && addr <= 0x1823A4);
3033 }
3034 
3035 static uint32_t mask_reg_value(u32 reg, u32 val)
3036 {
3037 	/* HALF_SLICE_CHICKEN2 is programmed with a the
3038 	 * WaDisableSTUnitPowerOptimization workaround. Make sure the value
3039 	 * programmed by userspace doesn't change this.
3040 	 */
3041 	if (HALF_SLICE_CHICKEN2.reg == reg)
3042 		val = val & ~_MASKED_BIT_ENABLE(GEN8_ST_PO_DISABLE);
3043 
3044 	/* WAIT_FOR_RC6_EXIT has only one bit fullfilling the function
3045 	 * indicated by its name and a bunch of selection fields used by OA
3046 	 * configs.
3047 	 */
3048 	if (WAIT_FOR_RC6_EXIT.reg == reg)
3049 		val = val & ~_MASKED_BIT_ENABLE(HSW_WAIT_FOR_RC6_EXIT_ENABLE);
3050 
3051 	return val;
3052 }
3053 
3054 static struct i915_oa_reg *alloc_oa_regs(struct drm_i915_private *dev_priv,
3055 					 bool (*is_valid)(struct drm_i915_private *dev_priv, u32 addr),
3056 					 u32 __user *regs,
3057 					 u32 n_regs)
3058 {
3059 	struct i915_oa_reg *oa_regs;
3060 	int err;
3061 	u32 i;
3062 
3063 	if (!n_regs)
3064 		return NULL;
3065 
3066 	if (!access_ok(VERIFY_READ, regs, n_regs * sizeof(u32) * 2))
3067 		return ERR_PTR(-EFAULT);
3068 
3069 	/* No is_valid function means we're not allowing any register to be programmed. */
3070 	GEM_BUG_ON(!is_valid);
3071 	if (!is_valid)
3072 		return ERR_PTR(-EINVAL);
3073 
3074 	oa_regs = kmalloc_array(n_regs, sizeof(*oa_regs), GFP_KERNEL);
3075 	if (!oa_regs)
3076 		return ERR_PTR(-ENOMEM);
3077 
3078 	for (i = 0; i < n_regs; i++) {
3079 		u32 addr, value;
3080 
3081 		err = get_user(addr, regs);
3082 		if (err)
3083 			goto addr_err;
3084 
3085 		if (!is_valid(dev_priv, addr)) {
3086 			DRM_DEBUG("Invalid oa_reg address: %X\n", addr);
3087 			err = -EINVAL;
3088 			goto addr_err;
3089 		}
3090 
3091 		err = get_user(value, regs + 1);
3092 		if (err)
3093 			goto addr_err;
3094 
3095 		oa_regs[i].addr = _MMIO(addr);
3096 		oa_regs[i].value = mask_reg_value(addr, value);
3097 
3098 		regs += 2;
3099 	}
3100 
3101 	return oa_regs;
3102 
3103 addr_err:
3104 	kfree(oa_regs);
3105 	return ERR_PTR(err);
3106 }
3107 
3108 static ssize_t show_dynamic_id(struct device *dev,
3109 			       struct device_attribute *attr,
3110 			       char *buf)
3111 {
3112 	struct i915_oa_config *oa_config =
3113 		container_of(attr, typeof(*oa_config), sysfs_metric_id);
3114 
3115 	return sprintf(buf, "%d\n", oa_config->id);
3116 }
3117 
3118 static int create_dynamic_oa_sysfs_entry(struct drm_i915_private *dev_priv,
3119 					 struct i915_oa_config *oa_config)
3120 {
3121 	sysfs_attr_init(&oa_config->sysfs_metric_id.attr);
3122 	oa_config->sysfs_metric_id.attr.name = "id";
3123 	oa_config->sysfs_metric_id.attr.mode = S_IRUGO;
3124 	oa_config->sysfs_metric_id.show = show_dynamic_id;
3125 	oa_config->sysfs_metric_id.store = NULL;
3126 
3127 	oa_config->attrs[0] = &oa_config->sysfs_metric_id.attr;
3128 	oa_config->attrs[1] = NULL;
3129 
3130 	oa_config->sysfs_metric.name = oa_config->uuid;
3131 	oa_config->sysfs_metric.attrs = oa_config->attrs;
3132 
3133 	return sysfs_create_group(dev_priv->perf.metrics_kobj,
3134 				  &oa_config->sysfs_metric);
3135 }
3136 
3137 /**
3138  * i915_perf_add_config_ioctl - DRM ioctl() for userspace to add a new OA config
3139  * @dev: drm device
3140  * @data: ioctl data (pointer to struct drm_i915_perf_oa_config) copied from
3141  *        userspace (unvalidated)
3142  * @file: drm file
3143  *
3144  * Validates the submitted OA register to be saved into a new OA config that
3145  * can then be used for programming the OA unit and its NOA network.
3146  *
3147  * Returns: A new allocated config number to be used with the perf open ioctl
3148  * or a negative error code on failure.
3149  */
3150 int i915_perf_add_config_ioctl(struct drm_device *dev, void *data,
3151 			       struct drm_file *file)
3152 {
3153 	struct drm_i915_private *dev_priv = dev->dev_private;
3154 	struct drm_i915_perf_oa_config *args = data;
3155 	struct i915_oa_config *oa_config, *tmp;
3156 	int err, id;
3157 
3158 	if (!dev_priv->perf.initialized) {
3159 		DRM_DEBUG("i915 perf interface not available for this system\n");
3160 		return -ENOTSUPP;
3161 	}
3162 
3163 	if (!dev_priv->perf.metrics_kobj) {
3164 		DRM_DEBUG("OA metrics weren't advertised via sysfs\n");
3165 		return -EINVAL;
3166 	}
3167 
3168 	if (i915_perf_stream_paranoid && !capable(CAP_SYS_ADMIN)) {
3169 		DRM_DEBUG("Insufficient privileges to add i915 OA config\n");
3170 		return -EACCES;
3171 	}
3172 
3173 	if ((!args->mux_regs_ptr || !args->n_mux_regs) &&
3174 	    (!args->boolean_regs_ptr || !args->n_boolean_regs) &&
3175 	    (!args->flex_regs_ptr || !args->n_flex_regs)) {
3176 		DRM_DEBUG("No OA registers given\n");
3177 		return -EINVAL;
3178 	}
3179 
3180 	oa_config = kzalloc(sizeof(*oa_config), GFP_KERNEL);
3181 	if (!oa_config) {
3182 		DRM_DEBUG("Failed to allocate memory for the OA config\n");
3183 		return -ENOMEM;
3184 	}
3185 
3186 	atomic_set(&oa_config->ref_count, 1);
3187 
3188 	if (!uuid_is_valid(args->uuid)) {
3189 		DRM_DEBUG("Invalid uuid format for OA config\n");
3190 		err = -EINVAL;
3191 		goto reg_err;
3192 	}
3193 
3194 	/* Last character in oa_config->uuid will be 0 because oa_config is
3195 	 * kzalloc.
3196 	 */
3197 	memcpy(oa_config->uuid, args->uuid, sizeof(args->uuid));
3198 
3199 	oa_config->mux_regs_len = args->n_mux_regs;
3200 	oa_config->mux_regs =
3201 		alloc_oa_regs(dev_priv,
3202 			      dev_priv->perf.oa.ops.is_valid_mux_reg,
3203 			      u64_to_user_ptr(args->mux_regs_ptr),
3204 			      args->n_mux_regs);
3205 
3206 	if (IS_ERR(oa_config->mux_regs)) {
3207 		DRM_DEBUG("Failed to create OA config for mux_regs\n");
3208 		err = PTR_ERR(oa_config->mux_regs);
3209 		goto reg_err;
3210 	}
3211 
3212 	oa_config->b_counter_regs_len = args->n_boolean_regs;
3213 	oa_config->b_counter_regs =
3214 		alloc_oa_regs(dev_priv,
3215 			      dev_priv->perf.oa.ops.is_valid_b_counter_reg,
3216 			      u64_to_user_ptr(args->boolean_regs_ptr),
3217 			      args->n_boolean_regs);
3218 
3219 	if (IS_ERR(oa_config->b_counter_regs)) {
3220 		DRM_DEBUG("Failed to create OA config for b_counter_regs\n");
3221 		err = PTR_ERR(oa_config->b_counter_regs);
3222 		goto reg_err;
3223 	}
3224 
3225 	if (INTEL_GEN(dev_priv) < 8) {
3226 		if (args->n_flex_regs != 0) {
3227 			err = -EINVAL;
3228 			goto reg_err;
3229 		}
3230 	} else {
3231 		oa_config->flex_regs_len = args->n_flex_regs;
3232 		oa_config->flex_regs =
3233 			alloc_oa_regs(dev_priv,
3234 				      dev_priv->perf.oa.ops.is_valid_flex_reg,
3235 				      u64_to_user_ptr(args->flex_regs_ptr),
3236 				      args->n_flex_regs);
3237 
3238 		if (IS_ERR(oa_config->flex_regs)) {
3239 			DRM_DEBUG("Failed to create OA config for flex_regs\n");
3240 			err = PTR_ERR(oa_config->flex_regs);
3241 			goto reg_err;
3242 		}
3243 	}
3244 
3245 	err = mutex_lock_interruptible(&dev_priv->perf.metrics_lock);
3246 	if (err)
3247 		goto reg_err;
3248 
3249 	/* We shouldn't have too many configs, so this iteration shouldn't be
3250 	 * too costly.
3251 	 */
3252 	idr_for_each_entry(&dev_priv->perf.metrics_idr, tmp, id) {
3253 		if (!strcmp(tmp->uuid, oa_config->uuid)) {
3254 			DRM_DEBUG("OA config already exists with this uuid\n");
3255 			err = -EADDRINUSE;
3256 			goto sysfs_err;
3257 		}
3258 	}
3259 
3260 	err = create_dynamic_oa_sysfs_entry(dev_priv, oa_config);
3261 	if (err) {
3262 		DRM_DEBUG("Failed to create sysfs entry for OA config\n");
3263 		goto sysfs_err;
3264 	}
3265 
3266 	/* Config id 0 is invalid, id 1 for kernel stored test config. */
3267 	oa_config->id = idr_alloc(&dev_priv->perf.metrics_idr,
3268 				  oa_config, 2,
3269 				  0, GFP_KERNEL);
3270 	if (oa_config->id < 0) {
3271 		DRM_DEBUG("Failed to create sysfs entry for OA config\n");
3272 		err = oa_config->id;
3273 		goto sysfs_err;
3274 	}
3275 
3276 	mutex_unlock(&dev_priv->perf.metrics_lock);
3277 
3278 	return oa_config->id;
3279 
3280 sysfs_err:
3281 	mutex_unlock(&dev_priv->perf.metrics_lock);
3282 reg_err:
3283 	put_oa_config(dev_priv, oa_config);
3284 	DRM_DEBUG("Failed to add new OA config\n");
3285 	return err;
3286 }
3287 
3288 /**
3289  * i915_perf_remove_config_ioctl - DRM ioctl() for userspace to remove an OA config
3290  * @dev: drm device
3291  * @data: ioctl data (pointer to u64 integer) copied from userspace
3292  * @file: drm file
3293  *
3294  * Configs can be removed while being used, the will stop appearing in sysfs
3295  * and their content will be freed when the stream using the config is closed.
3296  *
3297  * Returns: 0 on success or a negative error code on failure.
3298  */
3299 int i915_perf_remove_config_ioctl(struct drm_device *dev, void *data,
3300 				  struct drm_file *file)
3301 {
3302 	struct drm_i915_private *dev_priv = dev->dev_private;
3303 	u64 *arg = data;
3304 	struct i915_oa_config *oa_config;
3305 	int ret;
3306 
3307 	if (!dev_priv->perf.initialized) {
3308 		DRM_DEBUG("i915 perf interface not available for this system\n");
3309 		return -ENOTSUPP;
3310 	}
3311 
3312 	if (i915_perf_stream_paranoid && !capable(CAP_SYS_ADMIN)) {
3313 		DRM_DEBUG("Insufficient privileges to remove i915 OA config\n");
3314 		return -EACCES;
3315 	}
3316 
3317 	ret = mutex_lock_interruptible(&dev_priv->perf.metrics_lock);
3318 	if (ret)
3319 		goto lock_err;
3320 
3321 	oa_config = idr_find(&dev_priv->perf.metrics_idr, *arg);
3322 	if (!oa_config) {
3323 		DRM_DEBUG("Failed to remove unknown OA config\n");
3324 		ret = -ENOENT;
3325 		goto config_err;
3326 	}
3327 
3328 	GEM_BUG_ON(*arg != oa_config->id);
3329 
3330 	sysfs_remove_group(dev_priv->perf.metrics_kobj,
3331 			   &oa_config->sysfs_metric);
3332 
3333 	idr_remove(&dev_priv->perf.metrics_idr, *arg);
3334 	put_oa_config(dev_priv, oa_config);
3335 
3336 config_err:
3337 	mutex_unlock(&dev_priv->perf.metrics_lock);
3338 lock_err:
3339 	return ret;
3340 }
3341 
3342 static struct ctl_table oa_table[] = {
3343 	{
3344 	 .procname = "perf_stream_paranoid",
3345 	 .data = &i915_perf_stream_paranoid,
3346 	 .maxlen = sizeof(i915_perf_stream_paranoid),
3347 	 .mode = 0644,
3348 	 .proc_handler = proc_dointvec_minmax,
3349 	 .extra1 = &zero,
3350 	 .extra2 = &one,
3351 	 },
3352 	{
3353 	 .procname = "oa_max_sample_rate",
3354 	 .data = &i915_oa_max_sample_rate,
3355 	 .maxlen = sizeof(i915_oa_max_sample_rate),
3356 	 .mode = 0644,
3357 	 .proc_handler = proc_dointvec_minmax,
3358 	 .extra1 = &zero,
3359 	 .extra2 = &oa_sample_rate_hard_limit,
3360 	 },
3361 	{}
3362 };
3363 
3364 static struct ctl_table i915_root[] = {
3365 	{
3366 	 .procname = "i915",
3367 	 .maxlen = 0,
3368 	 .mode = 0555,
3369 	 .child = oa_table,
3370 	 },
3371 	{}
3372 };
3373 
3374 static struct ctl_table dev_root[] = {
3375 	{
3376 	 .procname = "dev",
3377 	 .maxlen = 0,
3378 	 .mode = 0555,
3379 	 .child = i915_root,
3380 	 },
3381 	{}
3382 };
3383 
3384 /**
3385  * i915_perf_init - initialize i915-perf state on module load
3386  * @dev_priv: i915 device instance
3387  *
3388  * Initializes i915-perf state without exposing anything to userspace.
3389  *
3390  * Note: i915-perf initialization is split into an 'init' and 'register'
3391  * phase with the i915_perf_register() exposing state to userspace.
3392  */
3393 void i915_perf_init(struct drm_i915_private *dev_priv)
3394 {
3395 	dev_priv->perf.oa.timestamp_frequency = 0;
3396 
3397 	if (IS_HASWELL(dev_priv)) {
3398 		dev_priv->perf.oa.ops.is_valid_b_counter_reg =
3399 			gen7_is_valid_b_counter_addr;
3400 		dev_priv->perf.oa.ops.is_valid_mux_reg =
3401 			hsw_is_valid_mux_addr;
3402 		dev_priv->perf.oa.ops.is_valid_flex_reg = NULL;
3403 		dev_priv->perf.oa.ops.init_oa_buffer = gen7_init_oa_buffer;
3404 		dev_priv->perf.oa.ops.enable_metric_set = hsw_enable_metric_set;
3405 		dev_priv->perf.oa.ops.disable_metric_set = hsw_disable_metric_set;
3406 		dev_priv->perf.oa.ops.oa_enable = gen7_oa_enable;
3407 		dev_priv->perf.oa.ops.oa_disable = gen7_oa_disable;
3408 		dev_priv->perf.oa.ops.read = gen7_oa_read;
3409 		dev_priv->perf.oa.ops.oa_hw_tail_read =
3410 			gen7_oa_hw_tail_read;
3411 
3412 		dev_priv->perf.oa.timestamp_frequency = 12500000;
3413 
3414 		dev_priv->perf.oa.oa_formats = hsw_oa_formats;
3415 	} else if (i915_modparams.enable_execlists) {
3416 		/* Note: that although we could theoretically also support the
3417 		 * legacy ringbuffer mode on BDW (and earlier iterations of
3418 		 * this driver, before upstreaming did this) it didn't seem
3419 		 * worth the complexity to maintain now that BDW+ enable
3420 		 * execlist mode by default.
3421 		 */
3422 		dev_priv->perf.oa.ops.is_valid_b_counter_reg =
3423 			gen7_is_valid_b_counter_addr;
3424 		dev_priv->perf.oa.ops.is_valid_mux_reg =
3425 			gen8_is_valid_mux_addr;
3426 		dev_priv->perf.oa.ops.is_valid_flex_reg =
3427 			gen8_is_valid_flex_addr;
3428 
3429 		dev_priv->perf.oa.ops.init_oa_buffer = gen8_init_oa_buffer;
3430 		dev_priv->perf.oa.ops.enable_metric_set = gen8_enable_metric_set;
3431 		dev_priv->perf.oa.ops.disable_metric_set = gen8_disable_metric_set;
3432 		dev_priv->perf.oa.ops.oa_enable = gen8_oa_enable;
3433 		dev_priv->perf.oa.ops.oa_disable = gen8_oa_disable;
3434 		dev_priv->perf.oa.ops.read = gen8_oa_read;
3435 		dev_priv->perf.oa.ops.oa_hw_tail_read = gen8_oa_hw_tail_read;
3436 
3437 		dev_priv->perf.oa.oa_formats = gen8_plus_oa_formats;
3438 
3439 		if (IS_GEN8(dev_priv)) {
3440 			dev_priv->perf.oa.ctx_oactxctrl_offset = 0x120;
3441 			dev_priv->perf.oa.ctx_flexeu0_offset = 0x2ce;
3442 
3443 			dev_priv->perf.oa.timestamp_frequency = 12500000;
3444 
3445 			dev_priv->perf.oa.gen8_valid_ctx_bit = (1<<25);
3446 			if (IS_CHERRYVIEW(dev_priv)) {
3447 				dev_priv->perf.oa.ops.is_valid_mux_reg =
3448 					chv_is_valid_mux_addr;
3449 			}
3450 		} else if (IS_GEN9(dev_priv)) {
3451 			dev_priv->perf.oa.ctx_oactxctrl_offset = 0x128;
3452 			dev_priv->perf.oa.ctx_flexeu0_offset = 0x3de;
3453 
3454 			dev_priv->perf.oa.gen8_valid_ctx_bit = (1<<16);
3455 
3456 			switch (dev_priv->info.platform) {
3457 			case INTEL_BROXTON:
3458 			case INTEL_GEMINILAKE:
3459 				dev_priv->perf.oa.timestamp_frequency = 19200000;
3460 				break;
3461 			case INTEL_SKYLAKE:
3462 			case INTEL_KABYLAKE:
3463 			case INTEL_COFFEELAKE:
3464 				dev_priv->perf.oa.timestamp_frequency = 12000000;
3465 				break;
3466 			default:
3467 				/* Leave timestamp_frequency to 0 so we can
3468 				 * detect unsupported platforms.
3469 				 */
3470 				break;
3471 			}
3472 		}
3473 	}
3474 
3475 	if (dev_priv->perf.oa.timestamp_frequency) {
3476 		hrtimer_init(&dev_priv->perf.oa.poll_check_timer,
3477 				CLOCK_MONOTONIC, HRTIMER_MODE_REL);
3478 		dev_priv->perf.oa.poll_check_timer.function = oa_poll_check_timer_cb;
3479 		init_waitqueue_head(&dev_priv->perf.oa.poll_wq);
3480 
3481 		INIT_LIST_HEAD(&dev_priv->perf.streams);
3482 		mutex_init(&dev_priv->perf.lock);
3483 		spin_lock_init(&dev_priv->perf.oa.oa_buffer.ptr_lock);
3484 
3485 		oa_sample_rate_hard_limit =
3486 			dev_priv->perf.oa.timestamp_frequency / 2;
3487 		dev_priv->perf.sysctl_header = register_sysctl_table(dev_root);
3488 
3489 		mutex_init(&dev_priv->perf.metrics_lock);
3490 		idr_init(&dev_priv->perf.metrics_idr);
3491 
3492 		dev_priv->perf.initialized = true;
3493 	}
3494 }
3495 
3496 static int destroy_config(int id, void *p, void *data)
3497 {
3498 	struct drm_i915_private *dev_priv = data;
3499 	struct i915_oa_config *oa_config = p;
3500 
3501 	put_oa_config(dev_priv, oa_config);
3502 
3503 	return 0;
3504 }
3505 
3506 /**
3507  * i915_perf_fini - Counter part to i915_perf_init()
3508  * @dev_priv: i915 device instance
3509  */
3510 void i915_perf_fini(struct drm_i915_private *dev_priv)
3511 {
3512 	if (!dev_priv->perf.initialized)
3513 		return;
3514 
3515 	idr_for_each(&dev_priv->perf.metrics_idr, destroy_config, dev_priv);
3516 	idr_destroy(&dev_priv->perf.metrics_idr);
3517 
3518 	unregister_sysctl_table(dev_priv->perf.sysctl_header);
3519 
3520 	memset(&dev_priv->perf.oa.ops, 0, sizeof(dev_priv->perf.oa.ops));
3521 
3522 	dev_priv->perf.initialized = false;
3523 }
3524