xref: /openbmc/linux/drivers/gpu/drm/i915/i915_perf.c (revision aaa746ad)
1 /*
2  * Copyright © 2015-2016 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  *
23  * Authors:
24  *   Robert Bragg <robert@sixbynine.org>
25  */
26 
27 
28 /**
29  * DOC: i915 Perf Overview
30  *
31  * Gen graphics supports a large number of performance counters that can help
32  * driver and application developers understand and optimize their use of the
33  * GPU.
34  *
35  * This i915 perf interface enables userspace to configure and open a file
36  * descriptor representing a stream of GPU metrics which can then be read() as
37  * a stream of sample records.
38  *
39  * The interface is particularly suited to exposing buffered metrics that are
40  * captured by DMA from the GPU, unsynchronized with and unrelated to the CPU.
41  *
42  * Streams representing a single context are accessible to applications with a
43  * corresponding drm file descriptor, such that OpenGL can use the interface
44  * without special privileges. Access to system-wide metrics requires root
45  * privileges by default, unless changed via the dev.i915.perf_event_paranoid
46  * sysctl option.
47  *
48  */
49 
50 /**
51  * DOC: i915 Perf History and Comparison with Core Perf
52  *
53  * The interface was initially inspired by the core Perf infrastructure but
54  * some notable differences are:
55  *
56  * i915 perf file descriptors represent a "stream" instead of an "event"; where
57  * a perf event primarily corresponds to a single 64bit value, while a stream
58  * might sample sets of tightly-coupled counters, depending on the
59  * configuration.  For example the Gen OA unit isn't designed to support
60  * orthogonal configurations of individual counters; it's configured for a set
61  * of related counters. Samples for an i915 perf stream capturing OA metrics
62  * will include a set of counter values packed in a compact HW specific format.
63  * The OA unit supports a number of different packing formats which can be
64  * selected by the user opening the stream. Perf has support for grouping
65  * events, but each event in the group is configured, validated and
66  * authenticated individually with separate system calls.
67  *
68  * i915 perf stream configurations are provided as an array of u64 (key,value)
69  * pairs, instead of a fixed struct with multiple miscellaneous config members,
70  * interleaved with event-type specific members.
71  *
72  * i915 perf doesn't support exposing metrics via an mmap'd circular buffer.
73  * The supported metrics are being written to memory by the GPU unsynchronized
74  * with the CPU, using HW specific packing formats for counter sets. Sometimes
75  * the constraints on HW configuration require reports to be filtered before it
76  * would be acceptable to expose them to unprivileged applications - to hide
77  * the metrics of other processes/contexts. For these use cases a read() based
78  * interface is a good fit, and provides an opportunity to filter data as it
79  * gets copied from the GPU mapped buffers to userspace buffers.
80  *
81  *
82  * Issues hit with first prototype based on Core Perf
83  * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
84  *
85  * The first prototype of this driver was based on the core perf
86  * infrastructure, and while we did make that mostly work, with some changes to
87  * perf, we found we were breaking or working around too many assumptions baked
88  * into perf's currently cpu centric design.
89  *
90  * In the end we didn't see a clear benefit to making perf's implementation and
91  * interface more complex by changing design assumptions while we knew we still
92  * wouldn't be able to use any existing perf based userspace tools.
93  *
94  * Also considering the Gen specific nature of the Observability hardware and
95  * how userspace will sometimes need to combine i915 perf OA metrics with
96  * side-band OA data captured via MI_REPORT_PERF_COUNT commands; we're
97  * expecting the interface to be used by a platform specific userspace such as
98  * OpenGL or tools. This is to say; we aren't inherently missing out on having
99  * a standard vendor/architecture agnostic interface by not using perf.
100  *
101  *
102  * For posterity, in case we might re-visit trying to adapt core perf to be
103  * better suited to exposing i915 metrics these were the main pain points we
104  * hit:
105  *
106  * - The perf based OA PMU driver broke some significant design assumptions:
107  *
108  *   Existing perf pmus are used for profiling work on a cpu and we were
109  *   introducing the idea of _IS_DEVICE pmus with different security
110  *   implications, the need to fake cpu-related data (such as user/kernel
111  *   registers) to fit with perf's current design, and adding _DEVICE records
112  *   as a way to forward device-specific status records.
113  *
114  *   The OA unit writes reports of counters into a circular buffer, without
115  *   involvement from the CPU, making our PMU driver the first of a kind.
116  *
117  *   Given the way we were periodically forward data from the GPU-mapped, OA
118  *   buffer to perf's buffer, those bursts of sample writes looked to perf like
119  *   we were sampling too fast and so we had to subvert its throttling checks.
120  *
121  *   Perf supports groups of counters and allows those to be read via
122  *   transactions internally but transactions currently seem designed to be
123  *   explicitly initiated from the cpu (say in response to a userspace read())
124  *   and while we could pull a report out of the OA buffer we can't
125  *   trigger a report from the cpu on demand.
126  *
127  *   Related to being report based; the OA counters are configured in HW as a
128  *   set while perf generally expects counter configurations to be orthogonal.
129  *   Although counters can be associated with a group leader as they are
130  *   opened, there's no clear precedent for being able to provide group-wide
131  *   configuration attributes (for example we want to let userspace choose the
132  *   OA unit report format used to capture all counters in a set, or specify a
133  *   GPU context to filter metrics on). We avoided using perf's grouping
134  *   feature and forwarded OA reports to userspace via perf's 'raw' sample
135  *   field. This suited our userspace well considering how coupled the counters
136  *   are when dealing with normalizing. It would be inconvenient to split
137  *   counters up into separate events, only to require userspace to recombine
138  *   them. For Mesa it's also convenient to be forwarded raw, periodic reports
139  *   for combining with the side-band raw reports it captures using
140  *   MI_REPORT_PERF_COUNT commands.
141  *
142  *   - As a side note on perf's grouping feature; there was also some concern
143  *     that using PERF_FORMAT_GROUP as a way to pack together counter values
144  *     would quite drastically inflate our sample sizes, which would likely
145  *     lower the effective sampling resolutions we could use when the available
146  *     memory bandwidth is limited.
147  *
148  *     With the OA unit's report formats, counters are packed together as 32
149  *     or 40bit values, with the largest report size being 256 bytes.
150  *
151  *     PERF_FORMAT_GROUP values are 64bit, but there doesn't appear to be a
152  *     documented ordering to the values, implying PERF_FORMAT_ID must also be
153  *     used to add a 64bit ID before each value; giving 16 bytes per counter.
154  *
155  *   Related to counter orthogonality; we can't time share the OA unit, while
156  *   event scheduling is a central design idea within perf for allowing
157  *   userspace to open + enable more events than can be configured in HW at any
158  *   one time.  The OA unit is not designed to allow re-configuration while in
159  *   use. We can't reconfigure the OA unit without losing internal OA unit
160  *   state which we can't access explicitly to save and restore. Reconfiguring
161  *   the OA unit is also relatively slow, involving ~100 register writes. From
162  *   userspace Mesa also depends on a stable OA configuration when emitting
163  *   MI_REPORT_PERF_COUNT commands and importantly the OA unit can't be
164  *   disabled while there are outstanding MI_RPC commands lest we hang the
165  *   command streamer.
166  *
167  *   The contents of sample records aren't extensible by device drivers (i.e.
168  *   the sample_type bits). As an example; Sourab Gupta had been looking to
169  *   attach GPU timestamps to our OA samples. We were shoehorning OA reports
170  *   into sample records by using the 'raw' field, but it's tricky to pack more
171  *   than one thing into this field because events/core.c currently only lets a
172  *   pmu give a single raw data pointer plus len which will be copied into the
173  *   ring buffer. To include more than the OA report we'd have to copy the
174  *   report into an intermediate larger buffer. I'd been considering allowing a
175  *   vector of data+len values to be specified for copying the raw data, but
176  *   it felt like a kludge to being using the raw field for this purpose.
177  *
178  * - It felt like our perf based PMU was making some technical compromises
179  *   just for the sake of using perf:
180  *
181  *   perf_event_open() requires events to either relate to a pid or a specific
182  *   cpu core, while our device pmu related to neither.  Events opened with a
183  *   pid will be automatically enabled/disabled according to the scheduling of
184  *   that process - so not appropriate for us. When an event is related to a
185  *   cpu id, perf ensures pmu methods will be invoked via an inter process
186  *   interrupt on that core. To avoid invasive changes our userspace opened OA
187  *   perf events for a specific cpu. This was workable but it meant the
188  *   majority of the OA driver ran in atomic context, including all OA report
189  *   forwarding, which wasn't really necessary in our case and seems to make
190  *   our locking requirements somewhat complex as we handled the interaction
191  *   with the rest of the i915 driver.
192  */
193 
194 #include <linux/anon_inodes.h>
195 #include <linux/sizes.h>
196 #include <linux/uuid.h>
197 
198 #include "gem/i915_gem_context.h"
199 #include "gem/i915_gem_internal.h"
200 #include "gt/intel_engine_pm.h"
201 #include "gt/intel_engine_regs.h"
202 #include "gt/intel_engine_user.h"
203 #include "gt/intel_execlists_submission.h"
204 #include "gt/intel_gpu_commands.h"
205 #include "gt/intel_gt.h"
206 #include "gt/intel_gt_clock_utils.h"
207 #include "gt/intel_gt_mcr.h"
208 #include "gt/intel_gt_regs.h"
209 #include "gt/intel_lrc.h"
210 #include "gt/intel_lrc_reg.h"
211 #include "gt/intel_ring.h"
212 #include "gt/uc/intel_guc_slpc.h"
213 
214 #include "i915_drv.h"
215 #include "i915_file_private.h"
216 #include "i915_perf.h"
217 #include "i915_perf_oa_regs.h"
218 #include "i915_reg.h"
219 
220 /* HW requires this to be a power of two, between 128k and 16M, though driver
221  * is currently generally designed assuming the largest 16M size is used such
222  * that the overflow cases are unlikely in normal operation.
223  */
224 #define OA_BUFFER_SIZE		SZ_16M
225 
226 #define OA_TAKEN(tail, head)	((tail - head) & (OA_BUFFER_SIZE - 1))
227 
228 /**
229  * DOC: OA Tail Pointer Race
230  *
231  * There's a HW race condition between OA unit tail pointer register updates and
232  * writes to memory whereby the tail pointer can sometimes get ahead of what's
233  * been written out to the OA buffer so far (in terms of what's visible to the
234  * CPU).
235  *
236  * Although this can be observed explicitly while copying reports to userspace
237  * by checking for a zeroed report-id field in tail reports, we want to account
238  * for this earlier, as part of the oa_buffer_check_unlocked to avoid lots of
239  * redundant read() attempts.
240  *
241  * We workaround this issue in oa_buffer_check_unlocked() by reading the reports
242  * in the OA buffer, starting from the tail reported by the HW until we find a
243  * report with its first 2 dwords not 0 meaning its previous report is
244  * completely in memory and ready to be read. Those dwords are also set to 0
245  * once read and the whole buffer is cleared upon OA buffer initialization. The
246  * first dword is the reason for this report while the second is the timestamp,
247  * making the chances of having those 2 fields at 0 fairly unlikely. A more
248  * detailed explanation is available in oa_buffer_check_unlocked().
249  *
250  * Most of the implementation details for this workaround are in
251  * oa_buffer_check_unlocked() and _append_oa_reports()
252  *
253  * Note for posterity: previously the driver used to define an effective tail
254  * pointer that lagged the real pointer by a 'tail margin' measured in bytes
255  * derived from %OA_TAIL_MARGIN_NSEC and the configured sampling frequency.
256  * This was flawed considering that the OA unit may also automatically generate
257  * non-periodic reports (such as on context switch) or the OA unit may be
258  * enabled without any periodic sampling.
259  */
260 #define OA_TAIL_MARGIN_NSEC	100000ULL
261 #define INVALID_TAIL_PTR	0xffffffff
262 
263 /* The default frequency for checking whether the OA unit has written new
264  * reports to the circular OA buffer...
265  */
266 #define DEFAULT_POLL_FREQUENCY_HZ 200
267 #define DEFAULT_POLL_PERIOD_NS (NSEC_PER_SEC / DEFAULT_POLL_FREQUENCY_HZ)
268 
269 /* for sysctl proc_dointvec_minmax of dev.i915.perf_stream_paranoid */
270 static u32 i915_perf_stream_paranoid = true;
271 
272 /* The maximum exponent the hardware accepts is 63 (essentially it selects one
273  * of the 64bit timestamp bits to trigger reports from) but there's currently
274  * no known use case for sampling as infrequently as once per 47 thousand years.
275  *
276  * Since the timestamps included in OA reports are only 32bits it seems
277  * reasonable to limit the OA exponent where it's still possible to account for
278  * overflow in OA report timestamps.
279  */
280 #define OA_EXPONENT_MAX 31
281 
282 #define INVALID_CTX_ID 0xffffffff
283 
284 /* On Gen8+ automatically triggered OA reports include a 'reason' field... */
285 #define OAREPORT_REASON_MASK           0x3f
286 #define OAREPORT_REASON_MASK_EXTENDED  0x7f
287 #define OAREPORT_REASON_SHIFT          19
288 #define OAREPORT_REASON_TIMER          (1<<0)
289 #define OAREPORT_REASON_CTX_SWITCH     (1<<3)
290 #define OAREPORT_REASON_CLK_RATIO      (1<<5)
291 
292 #define HAS_MI_SET_PREDICATE(i915) (GRAPHICS_VER_FULL(i915) >= IP_VER(12, 50))
293 
294 /* For sysctl proc_dointvec_minmax of i915_oa_max_sample_rate
295  *
296  * The highest sampling frequency we can theoretically program the OA unit
297  * with is always half the timestamp frequency: E.g. 6.25Mhz for Haswell.
298  *
299  * Initialized just before we register the sysctl parameter.
300  */
301 static int oa_sample_rate_hard_limit;
302 
303 /* Theoretically we can program the OA unit to sample every 160ns but don't
304  * allow that by default unless root...
305  *
306  * The default threshold of 100000Hz is based on perf's similar
307  * kernel.perf_event_max_sample_rate sysctl parameter.
308  */
309 static u32 i915_oa_max_sample_rate = 100000;
310 
311 /* XXX: beware if future OA HW adds new report formats that the current
312  * code assumes all reports have a power-of-two size and ~(size - 1) can
313  * be used as a mask to align the OA tail pointer.
314  */
315 static const struct i915_oa_format oa_formats[I915_OA_FORMAT_MAX] = {
316 	[I915_OA_FORMAT_A13]	    = { 0, 64 },
317 	[I915_OA_FORMAT_A29]	    = { 1, 128 },
318 	[I915_OA_FORMAT_A13_B8_C8]  = { 2, 128 },
319 	/* A29_B8_C8 Disallowed as 192 bytes doesn't factor into buffer size */
320 	[I915_OA_FORMAT_B4_C8]	    = { 4, 64 },
321 	[I915_OA_FORMAT_A45_B8_C8]  = { 5, 256 },
322 	[I915_OA_FORMAT_B4_C8_A16]  = { 6, 128 },
323 	[I915_OA_FORMAT_C4_B8]	    = { 7, 64 },
324 	[I915_OA_FORMAT_A12]		    = { 0, 64 },
325 	[I915_OA_FORMAT_A12_B8_C8]	    = { 2, 128 },
326 	[I915_OA_FORMAT_A32u40_A4u32_B8_C8] = { 5, 256 },
327 	[I915_OAR_FORMAT_A32u40_A4u32_B8_C8]    = { 5, 256 },
328 	[I915_OA_FORMAT_A24u40_A14u32_B8_C8]    = { 5, 256 },
329 };
330 
331 #define SAMPLE_OA_REPORT      (1<<0)
332 
333 /**
334  * struct perf_open_properties - for validated properties given to open a stream
335  * @sample_flags: `DRM_I915_PERF_PROP_SAMPLE_*` properties are tracked as flags
336  * @single_context: Whether a single or all gpu contexts should be monitored
337  * @hold_preemption: Whether the preemption is disabled for the filtered
338  *                   context
339  * @ctx_handle: A gem ctx handle for use with @single_context
340  * @metrics_set: An ID for an OA unit metric set advertised via sysfs
341  * @oa_format: An OA unit HW report format
342  * @oa_periodic: Whether to enable periodic OA unit sampling
343  * @oa_period_exponent: The OA unit sampling period is derived from this
344  * @engine: The engine (typically rcs0) being monitored by the OA unit
345  * @has_sseu: Whether @sseu was specified by userspace
346  * @sseu: internal SSEU configuration computed either from the userspace
347  *        specified configuration in the opening parameters or a default value
348  *        (see get_default_sseu_config())
349  * @poll_oa_period: The period in nanoseconds at which the CPU will check for OA
350  * data availability
351  *
352  * As read_properties_unlocked() enumerates and validates the properties given
353  * to open a stream of metrics the configuration is built up in the structure
354  * which starts out zero initialized.
355  */
356 struct perf_open_properties {
357 	u32 sample_flags;
358 
359 	u64 single_context:1;
360 	u64 hold_preemption:1;
361 	u64 ctx_handle;
362 
363 	/* OA sampling state */
364 	int metrics_set;
365 	int oa_format;
366 	bool oa_periodic;
367 	int oa_period_exponent;
368 
369 	struct intel_engine_cs *engine;
370 
371 	bool has_sseu;
372 	struct intel_sseu sseu;
373 
374 	u64 poll_oa_period;
375 };
376 
377 struct i915_oa_config_bo {
378 	struct llist_node node;
379 
380 	struct i915_oa_config *oa_config;
381 	struct i915_vma *vma;
382 };
383 
384 static struct ctl_table_header *sysctl_header;
385 
386 static enum hrtimer_restart oa_poll_check_timer_cb(struct hrtimer *hrtimer);
387 
388 void i915_oa_config_release(struct kref *ref)
389 {
390 	struct i915_oa_config *oa_config =
391 		container_of(ref, typeof(*oa_config), ref);
392 
393 	kfree(oa_config->flex_regs);
394 	kfree(oa_config->b_counter_regs);
395 	kfree(oa_config->mux_regs);
396 
397 	kfree_rcu(oa_config, rcu);
398 }
399 
400 struct i915_oa_config *
401 i915_perf_get_oa_config(struct i915_perf *perf, int metrics_set)
402 {
403 	struct i915_oa_config *oa_config;
404 
405 	rcu_read_lock();
406 	oa_config = idr_find(&perf->metrics_idr, metrics_set);
407 	if (oa_config)
408 		oa_config = i915_oa_config_get(oa_config);
409 	rcu_read_unlock();
410 
411 	return oa_config;
412 }
413 
414 static void free_oa_config_bo(struct i915_oa_config_bo *oa_bo)
415 {
416 	i915_oa_config_put(oa_bo->oa_config);
417 	i915_vma_put(oa_bo->vma);
418 	kfree(oa_bo);
419 }
420 
421 static u32 gen12_oa_hw_tail_read(struct i915_perf_stream *stream)
422 {
423 	struct intel_uncore *uncore = stream->uncore;
424 
425 	return intel_uncore_read(uncore, GEN12_OAG_OATAILPTR) &
426 	       GEN12_OAG_OATAILPTR_MASK;
427 }
428 
429 static u32 gen8_oa_hw_tail_read(struct i915_perf_stream *stream)
430 {
431 	struct intel_uncore *uncore = stream->uncore;
432 
433 	return intel_uncore_read(uncore, GEN8_OATAILPTR) & GEN8_OATAILPTR_MASK;
434 }
435 
436 static u32 gen7_oa_hw_tail_read(struct i915_perf_stream *stream)
437 {
438 	struct intel_uncore *uncore = stream->uncore;
439 	u32 oastatus1 = intel_uncore_read(uncore, GEN7_OASTATUS1);
440 
441 	return oastatus1 & GEN7_OASTATUS1_TAIL_MASK;
442 }
443 
444 /**
445  * oa_buffer_check_unlocked - check for data and update tail ptr state
446  * @stream: i915 stream instance
447  *
448  * This is either called via fops (for blocking reads in user ctx) or the poll
449  * check hrtimer (atomic ctx) to check the OA buffer tail pointer and check
450  * if there is data available for userspace to read.
451  *
452  * This function is central to providing a workaround for the OA unit tail
453  * pointer having a race with respect to what data is visible to the CPU.
454  * It is responsible for reading tail pointers from the hardware and giving
455  * the pointers time to 'age' before they are made available for reading.
456  * (See description of OA_TAIL_MARGIN_NSEC above for further details.)
457  *
458  * Besides returning true when there is data available to read() this function
459  * also updates the tail, aging_tail and aging_timestamp in the oa_buffer
460  * object.
461  *
462  * Note: It's safe to read OA config state here unlocked, assuming that this is
463  * only called while the stream is enabled, while the global OA configuration
464  * can't be modified.
465  *
466  * Returns: %true if the OA buffer contains data, else %false
467  */
468 static bool oa_buffer_check_unlocked(struct i915_perf_stream *stream)
469 {
470 	u32 gtt_offset = i915_ggtt_offset(stream->oa_buffer.vma);
471 	int report_size = stream->oa_buffer.format->size;
472 	unsigned long flags;
473 	bool pollin;
474 	u32 hw_tail;
475 	u64 now;
476 
477 	/* We have to consider the (unlikely) possibility that read() errors
478 	 * could result in an OA buffer reset which might reset the head and
479 	 * tail state.
480 	 */
481 	spin_lock_irqsave(&stream->oa_buffer.ptr_lock, flags);
482 
483 	hw_tail = stream->perf->ops.oa_hw_tail_read(stream);
484 
485 	/* The tail pointer increases in 64 byte increments,
486 	 * not in report_size steps...
487 	 */
488 	hw_tail &= ~(report_size - 1);
489 
490 	now = ktime_get_mono_fast_ns();
491 
492 	if (hw_tail == stream->oa_buffer.aging_tail &&
493 	    (now - stream->oa_buffer.aging_timestamp) > OA_TAIL_MARGIN_NSEC) {
494 		/* If the HW tail hasn't move since the last check and the HW
495 		 * tail has been aging for long enough, declare it the new
496 		 * tail.
497 		 */
498 		stream->oa_buffer.tail = stream->oa_buffer.aging_tail;
499 	} else {
500 		u32 head, tail, aged_tail;
501 
502 		/* NB: The head we observe here might effectively be a little
503 		 * out of date. If a read() is in progress, the head could be
504 		 * anywhere between this head and stream->oa_buffer.tail.
505 		 */
506 		head = stream->oa_buffer.head - gtt_offset;
507 		aged_tail = stream->oa_buffer.tail - gtt_offset;
508 
509 		hw_tail -= gtt_offset;
510 		tail = hw_tail;
511 
512 		/* Walk the stream backward until we find a report with dword 0
513 		 * & 1 not at 0. Since the circular buffer pointers progress by
514 		 * increments of 64 bytes and that reports can be up to 256
515 		 * bytes long, we can't tell whether a report has fully landed
516 		 * in memory before the first 2 dwords of the following report
517 		 * have effectively landed.
518 		 *
519 		 * This is assuming that the writes of the OA unit land in
520 		 * memory in the order they were written to.
521 		 * If not : (╯°□°)╯︵ ┻━┻
522 		 */
523 		while (OA_TAKEN(tail, aged_tail) >= report_size) {
524 			u32 *report32 = (void *)(stream->oa_buffer.vaddr + tail);
525 
526 			if (report32[0] != 0 || report32[1] != 0)
527 				break;
528 
529 			tail = (tail - report_size) & (OA_BUFFER_SIZE - 1);
530 		}
531 
532 		if (OA_TAKEN(hw_tail, tail) > report_size &&
533 		    __ratelimit(&stream->perf->tail_pointer_race))
534 			drm_notice(&stream->uncore->i915->drm,
535 				   "unlanded report(s) head=0x%x tail=0x%x hw_tail=0x%x\n",
536 				   head, tail, hw_tail);
537 
538 		stream->oa_buffer.tail = gtt_offset + tail;
539 		stream->oa_buffer.aging_tail = gtt_offset + hw_tail;
540 		stream->oa_buffer.aging_timestamp = now;
541 	}
542 
543 	pollin = OA_TAKEN(stream->oa_buffer.tail - gtt_offset,
544 			  stream->oa_buffer.head - gtt_offset) >= report_size;
545 
546 	spin_unlock_irqrestore(&stream->oa_buffer.ptr_lock, flags);
547 
548 	return pollin;
549 }
550 
551 /**
552  * append_oa_status - Appends a status record to a userspace read() buffer.
553  * @stream: An i915-perf stream opened for OA metrics
554  * @buf: destination buffer given by userspace
555  * @count: the number of bytes userspace wants to read
556  * @offset: (inout): the current position for writing into @buf
557  * @type: The kind of status to report to userspace
558  *
559  * Writes a status record (such as `DRM_I915_PERF_RECORD_OA_REPORT_LOST`)
560  * into the userspace read() buffer.
561  *
562  * The @buf @offset will only be updated on success.
563  *
564  * Returns: 0 on success, negative error code on failure.
565  */
566 static int append_oa_status(struct i915_perf_stream *stream,
567 			    char __user *buf,
568 			    size_t count,
569 			    size_t *offset,
570 			    enum drm_i915_perf_record_type type)
571 {
572 	struct drm_i915_perf_record_header header = { type, 0, sizeof(header) };
573 
574 	if ((count - *offset) < header.size)
575 		return -ENOSPC;
576 
577 	if (copy_to_user(buf + *offset, &header, sizeof(header)))
578 		return -EFAULT;
579 
580 	(*offset) += header.size;
581 
582 	return 0;
583 }
584 
585 /**
586  * append_oa_sample - Copies single OA report into userspace read() buffer.
587  * @stream: An i915-perf stream opened for OA metrics
588  * @buf: destination buffer given by userspace
589  * @count: the number of bytes userspace wants to read
590  * @offset: (inout): the current position for writing into @buf
591  * @report: A single OA report to (optionally) include as part of the sample
592  *
593  * The contents of a sample are configured through `DRM_I915_PERF_PROP_SAMPLE_*`
594  * properties when opening a stream, tracked as `stream->sample_flags`. This
595  * function copies the requested components of a single sample to the given
596  * read() @buf.
597  *
598  * The @buf @offset will only be updated on success.
599  *
600  * Returns: 0 on success, negative error code on failure.
601  */
602 static int append_oa_sample(struct i915_perf_stream *stream,
603 			    char __user *buf,
604 			    size_t count,
605 			    size_t *offset,
606 			    const u8 *report)
607 {
608 	int report_size = stream->oa_buffer.format->size;
609 	struct drm_i915_perf_record_header header;
610 
611 	header.type = DRM_I915_PERF_RECORD_SAMPLE;
612 	header.pad = 0;
613 	header.size = stream->sample_size;
614 
615 	if ((count - *offset) < header.size)
616 		return -ENOSPC;
617 
618 	buf += *offset;
619 	if (copy_to_user(buf, &header, sizeof(header)))
620 		return -EFAULT;
621 	buf += sizeof(header);
622 
623 	if (copy_to_user(buf, report, report_size))
624 		return -EFAULT;
625 
626 	(*offset) += header.size;
627 
628 	return 0;
629 }
630 
631 /**
632  * gen8_append_oa_reports - Copies all buffered OA reports into
633  *			    userspace read() buffer.
634  * @stream: An i915-perf stream opened for OA metrics
635  * @buf: destination buffer given by userspace
636  * @count: the number of bytes userspace wants to read
637  * @offset: (inout): the current position for writing into @buf
638  *
639  * Notably any error condition resulting in a short read (-%ENOSPC or
640  * -%EFAULT) will be returned even though one or more records may
641  * have been successfully copied. In this case it's up to the caller
642  * to decide if the error should be squashed before returning to
643  * userspace.
644  *
645  * Note: reports are consumed from the head, and appended to the
646  * tail, so the tail chases the head?... If you think that's mad
647  * and back-to-front you're not alone, but this follows the
648  * Gen PRM naming convention.
649  *
650  * Returns: 0 on success, negative error code on failure.
651  */
652 static int gen8_append_oa_reports(struct i915_perf_stream *stream,
653 				  char __user *buf,
654 				  size_t count,
655 				  size_t *offset)
656 {
657 	struct intel_uncore *uncore = stream->uncore;
658 	int report_size = stream->oa_buffer.format->size;
659 	u8 *oa_buf_base = stream->oa_buffer.vaddr;
660 	u32 gtt_offset = i915_ggtt_offset(stream->oa_buffer.vma);
661 	u32 mask = (OA_BUFFER_SIZE - 1);
662 	size_t start_offset = *offset;
663 	unsigned long flags;
664 	u32 head, tail;
665 	int ret = 0;
666 
667 	if (drm_WARN_ON(&uncore->i915->drm, !stream->enabled))
668 		return -EIO;
669 
670 	spin_lock_irqsave(&stream->oa_buffer.ptr_lock, flags);
671 
672 	head = stream->oa_buffer.head;
673 	tail = stream->oa_buffer.tail;
674 
675 	spin_unlock_irqrestore(&stream->oa_buffer.ptr_lock, flags);
676 
677 	/*
678 	 * NB: oa_buffer.head/tail include the gtt_offset which we don't want
679 	 * while indexing relative to oa_buf_base.
680 	 */
681 	head -= gtt_offset;
682 	tail -= gtt_offset;
683 
684 	/*
685 	 * An out of bounds or misaligned head or tail pointer implies a driver
686 	 * bug since we validate + align the tail pointers we read from the
687 	 * hardware and we are in full control of the head pointer which should
688 	 * only be incremented by multiples of the report size (notably also
689 	 * all a power of two).
690 	 */
691 	if (drm_WARN_ONCE(&uncore->i915->drm,
692 			  head > OA_BUFFER_SIZE || head % report_size ||
693 			  tail > OA_BUFFER_SIZE || tail % report_size,
694 			  "Inconsistent OA buffer pointers: head = %u, tail = %u\n",
695 			  head, tail))
696 		return -EIO;
697 
698 
699 	for (/* none */;
700 	     OA_TAKEN(tail, head);
701 	     head = (head + report_size) & mask) {
702 		u8 *report = oa_buf_base + head;
703 		u32 *report32 = (void *)report;
704 		u32 ctx_id;
705 		u32 reason;
706 
707 		/*
708 		 * All the report sizes factor neatly into the buffer
709 		 * size so we never expect to see a report split
710 		 * between the beginning and end of the buffer.
711 		 *
712 		 * Given the initial alignment check a misalignment
713 		 * here would imply a driver bug that would result
714 		 * in an overrun.
715 		 */
716 		if (drm_WARN_ON(&uncore->i915->drm,
717 				(OA_BUFFER_SIZE - head) < report_size)) {
718 			drm_err(&uncore->i915->drm,
719 				"Spurious OA head ptr: non-integral report offset\n");
720 			break;
721 		}
722 
723 		/*
724 		 * The reason field includes flags identifying what
725 		 * triggered this specific report (mostly timer
726 		 * triggered or e.g. due to a context switch).
727 		 *
728 		 * This field is never expected to be zero so we can
729 		 * check that the report isn't invalid before copying
730 		 * it to userspace...
731 		 */
732 		reason = ((report32[0] >> OAREPORT_REASON_SHIFT) &
733 			  (GRAPHICS_VER(stream->perf->i915) == 12 ?
734 			   OAREPORT_REASON_MASK_EXTENDED :
735 			   OAREPORT_REASON_MASK));
736 
737 		ctx_id = report32[2] & stream->specific_ctx_id_mask;
738 
739 		/*
740 		 * Squash whatever is in the CTX_ID field if it's marked as
741 		 * invalid to be sure we avoid false-positive, single-context
742 		 * filtering below...
743 		 *
744 		 * Note: that we don't clear the valid_ctx_bit so userspace can
745 		 * understand that the ID has been squashed by the kernel.
746 		 */
747 		if (!(report32[0] & stream->perf->gen8_valid_ctx_bit) &&
748 		    GRAPHICS_VER(stream->perf->i915) <= 11)
749 			ctx_id = report32[2] = INVALID_CTX_ID;
750 
751 		/*
752 		 * NB: For Gen 8 the OA unit no longer supports clock gating
753 		 * off for a specific context and the kernel can't securely
754 		 * stop the counters from updating as system-wide / global
755 		 * values.
756 		 *
757 		 * Automatic reports now include a context ID so reports can be
758 		 * filtered on the cpu but it's not worth trying to
759 		 * automatically subtract/hide counter progress for other
760 		 * contexts while filtering since we can't stop userspace
761 		 * issuing MI_REPORT_PERF_COUNT commands which would still
762 		 * provide a side-band view of the real values.
763 		 *
764 		 * To allow userspace (such as Mesa/GL_INTEL_performance_query)
765 		 * to normalize counters for a single filtered context then it
766 		 * needs be forwarded bookend context-switch reports so that it
767 		 * can track switches in between MI_REPORT_PERF_COUNT commands
768 		 * and can itself subtract/ignore the progress of counters
769 		 * associated with other contexts. Note that the hardware
770 		 * automatically triggers reports when switching to a new
771 		 * context which are tagged with the ID of the newly active
772 		 * context. To avoid the complexity (and likely fragility) of
773 		 * reading ahead while parsing reports to try and minimize
774 		 * forwarding redundant context switch reports (i.e. between
775 		 * other, unrelated contexts) we simply elect to forward them
776 		 * all.
777 		 *
778 		 * We don't rely solely on the reason field to identify context
779 		 * switches since it's not-uncommon for periodic samples to
780 		 * identify a switch before any 'context switch' report.
781 		 */
782 		if (!stream->ctx ||
783 		    stream->specific_ctx_id == ctx_id ||
784 		    stream->oa_buffer.last_ctx_id == stream->specific_ctx_id ||
785 		    reason & OAREPORT_REASON_CTX_SWITCH) {
786 
787 			/*
788 			 * While filtering for a single context we avoid
789 			 * leaking the IDs of other contexts.
790 			 */
791 			if (stream->ctx &&
792 			    stream->specific_ctx_id != ctx_id) {
793 				report32[2] = INVALID_CTX_ID;
794 			}
795 
796 			ret = append_oa_sample(stream, buf, count, offset,
797 					       report);
798 			if (ret)
799 				break;
800 
801 			stream->oa_buffer.last_ctx_id = ctx_id;
802 		}
803 
804 		/*
805 		 * Clear out the first 2 dword as a mean to detect unlanded
806 		 * reports.
807 		 */
808 		report32[0] = 0;
809 		report32[1] = 0;
810 	}
811 
812 	if (start_offset != *offset) {
813 		i915_reg_t oaheadptr;
814 
815 		oaheadptr = GRAPHICS_VER(stream->perf->i915) == 12 ?
816 			    GEN12_OAG_OAHEADPTR : GEN8_OAHEADPTR;
817 
818 		spin_lock_irqsave(&stream->oa_buffer.ptr_lock, flags);
819 
820 		/*
821 		 * We removed the gtt_offset for the copy loop above, indexing
822 		 * relative to oa_buf_base so put back here...
823 		 */
824 		head += gtt_offset;
825 		intel_uncore_write(uncore, oaheadptr,
826 				   head & GEN12_OAG_OAHEADPTR_MASK);
827 		stream->oa_buffer.head = head;
828 
829 		spin_unlock_irqrestore(&stream->oa_buffer.ptr_lock, flags);
830 	}
831 
832 	return ret;
833 }
834 
835 /**
836  * gen8_oa_read - copy status records then buffered OA reports
837  * @stream: An i915-perf stream opened for OA metrics
838  * @buf: destination buffer given by userspace
839  * @count: the number of bytes userspace wants to read
840  * @offset: (inout): the current position for writing into @buf
841  *
842  * Checks OA unit status registers and if necessary appends corresponding
843  * status records for userspace (such as for a buffer full condition) and then
844  * initiate appending any buffered OA reports.
845  *
846  * Updates @offset according to the number of bytes successfully copied into
847  * the userspace buffer.
848  *
849  * NB: some data may be successfully copied to the userspace buffer
850  * even if an error is returned, and this is reflected in the
851  * updated @offset.
852  *
853  * Returns: zero on success or a negative error code
854  */
855 static int gen8_oa_read(struct i915_perf_stream *stream,
856 			char __user *buf,
857 			size_t count,
858 			size_t *offset)
859 {
860 	struct intel_uncore *uncore = stream->uncore;
861 	u32 oastatus;
862 	i915_reg_t oastatus_reg;
863 	int ret;
864 
865 	if (drm_WARN_ON(&uncore->i915->drm, !stream->oa_buffer.vaddr))
866 		return -EIO;
867 
868 	oastatus_reg = GRAPHICS_VER(stream->perf->i915) == 12 ?
869 		       GEN12_OAG_OASTATUS : GEN8_OASTATUS;
870 
871 	oastatus = intel_uncore_read(uncore, oastatus_reg);
872 
873 	/*
874 	 * We treat OABUFFER_OVERFLOW as a significant error:
875 	 *
876 	 * Although theoretically we could handle this more gracefully
877 	 * sometimes, some Gens don't correctly suppress certain
878 	 * automatically triggered reports in this condition and so we
879 	 * have to assume that old reports are now being trampled
880 	 * over.
881 	 *
882 	 * Considering how we don't currently give userspace control
883 	 * over the OA buffer size and always configure a large 16MB
884 	 * buffer, then a buffer overflow does anyway likely indicate
885 	 * that something has gone quite badly wrong.
886 	 */
887 	if (oastatus & GEN8_OASTATUS_OABUFFER_OVERFLOW) {
888 		ret = append_oa_status(stream, buf, count, offset,
889 				       DRM_I915_PERF_RECORD_OA_BUFFER_LOST);
890 		if (ret)
891 			return ret;
892 
893 		drm_dbg(&stream->perf->i915->drm,
894 			"OA buffer overflow (exponent = %d): force restart\n",
895 			stream->period_exponent);
896 
897 		stream->perf->ops.oa_disable(stream);
898 		stream->perf->ops.oa_enable(stream);
899 
900 		/*
901 		 * Note: .oa_enable() is expected to re-init the oabuffer and
902 		 * reset GEN8_OASTATUS for us
903 		 */
904 		oastatus = intel_uncore_read(uncore, oastatus_reg);
905 	}
906 
907 	if (oastatus & GEN8_OASTATUS_REPORT_LOST) {
908 		ret = append_oa_status(stream, buf, count, offset,
909 				       DRM_I915_PERF_RECORD_OA_REPORT_LOST);
910 		if (ret)
911 			return ret;
912 
913 		intel_uncore_rmw(uncore, oastatus_reg,
914 				 GEN8_OASTATUS_COUNTER_OVERFLOW |
915 				 GEN8_OASTATUS_REPORT_LOST,
916 				 IS_GRAPHICS_VER(uncore->i915, 8, 11) ?
917 				 (GEN8_OASTATUS_HEAD_POINTER_WRAP |
918 				  GEN8_OASTATUS_TAIL_POINTER_WRAP) : 0);
919 	}
920 
921 	return gen8_append_oa_reports(stream, buf, count, offset);
922 }
923 
924 /**
925  * gen7_append_oa_reports - Copies all buffered OA reports into
926  *			    userspace read() buffer.
927  * @stream: An i915-perf stream opened for OA metrics
928  * @buf: destination buffer given by userspace
929  * @count: the number of bytes userspace wants to read
930  * @offset: (inout): the current position for writing into @buf
931  *
932  * Notably any error condition resulting in a short read (-%ENOSPC or
933  * -%EFAULT) will be returned even though one or more records may
934  * have been successfully copied. In this case it's up to the caller
935  * to decide if the error should be squashed before returning to
936  * userspace.
937  *
938  * Note: reports are consumed from the head, and appended to the
939  * tail, so the tail chases the head?... If you think that's mad
940  * and back-to-front you're not alone, but this follows the
941  * Gen PRM naming convention.
942  *
943  * Returns: 0 on success, negative error code on failure.
944  */
945 static int gen7_append_oa_reports(struct i915_perf_stream *stream,
946 				  char __user *buf,
947 				  size_t count,
948 				  size_t *offset)
949 {
950 	struct intel_uncore *uncore = stream->uncore;
951 	int report_size = stream->oa_buffer.format->size;
952 	u8 *oa_buf_base = stream->oa_buffer.vaddr;
953 	u32 gtt_offset = i915_ggtt_offset(stream->oa_buffer.vma);
954 	u32 mask = (OA_BUFFER_SIZE - 1);
955 	size_t start_offset = *offset;
956 	unsigned long flags;
957 	u32 head, tail;
958 	int ret = 0;
959 
960 	if (drm_WARN_ON(&uncore->i915->drm, !stream->enabled))
961 		return -EIO;
962 
963 	spin_lock_irqsave(&stream->oa_buffer.ptr_lock, flags);
964 
965 	head = stream->oa_buffer.head;
966 	tail = stream->oa_buffer.tail;
967 
968 	spin_unlock_irqrestore(&stream->oa_buffer.ptr_lock, flags);
969 
970 	/* NB: oa_buffer.head/tail include the gtt_offset which we don't want
971 	 * while indexing relative to oa_buf_base.
972 	 */
973 	head -= gtt_offset;
974 	tail -= gtt_offset;
975 
976 	/* An out of bounds or misaligned head or tail pointer implies a driver
977 	 * bug since we validate + align the tail pointers we read from the
978 	 * hardware and we are in full control of the head pointer which should
979 	 * only be incremented by multiples of the report size (notably also
980 	 * all a power of two).
981 	 */
982 	if (drm_WARN_ONCE(&uncore->i915->drm,
983 			  head > OA_BUFFER_SIZE || head % report_size ||
984 			  tail > OA_BUFFER_SIZE || tail % report_size,
985 			  "Inconsistent OA buffer pointers: head = %u, tail = %u\n",
986 			  head, tail))
987 		return -EIO;
988 
989 
990 	for (/* none */;
991 	     OA_TAKEN(tail, head);
992 	     head = (head + report_size) & mask) {
993 		u8 *report = oa_buf_base + head;
994 		u32 *report32 = (void *)report;
995 
996 		/* All the report sizes factor neatly into the buffer
997 		 * size so we never expect to see a report split
998 		 * between the beginning and end of the buffer.
999 		 *
1000 		 * Given the initial alignment check a misalignment
1001 		 * here would imply a driver bug that would result
1002 		 * in an overrun.
1003 		 */
1004 		if (drm_WARN_ON(&uncore->i915->drm,
1005 				(OA_BUFFER_SIZE - head) < report_size)) {
1006 			drm_err(&uncore->i915->drm,
1007 				"Spurious OA head ptr: non-integral report offset\n");
1008 			break;
1009 		}
1010 
1011 		/* The report-ID field for periodic samples includes
1012 		 * some undocumented flags related to what triggered
1013 		 * the report and is never expected to be zero so we
1014 		 * can check that the report isn't invalid before
1015 		 * copying it to userspace...
1016 		 */
1017 		if (report32[0] == 0) {
1018 			if (__ratelimit(&stream->perf->spurious_report_rs))
1019 				drm_notice(&uncore->i915->drm,
1020 					   "Skipping spurious, invalid OA report\n");
1021 			continue;
1022 		}
1023 
1024 		ret = append_oa_sample(stream, buf, count, offset, report);
1025 		if (ret)
1026 			break;
1027 
1028 		/* Clear out the first 2 dwords as a mean to detect unlanded
1029 		 * reports.
1030 		 */
1031 		report32[0] = 0;
1032 		report32[1] = 0;
1033 	}
1034 
1035 	if (start_offset != *offset) {
1036 		spin_lock_irqsave(&stream->oa_buffer.ptr_lock, flags);
1037 
1038 		/* We removed the gtt_offset for the copy loop above, indexing
1039 		 * relative to oa_buf_base so put back here...
1040 		 */
1041 		head += gtt_offset;
1042 
1043 		intel_uncore_write(uncore, GEN7_OASTATUS2,
1044 				   (head & GEN7_OASTATUS2_HEAD_MASK) |
1045 				   GEN7_OASTATUS2_MEM_SELECT_GGTT);
1046 		stream->oa_buffer.head = head;
1047 
1048 		spin_unlock_irqrestore(&stream->oa_buffer.ptr_lock, flags);
1049 	}
1050 
1051 	return ret;
1052 }
1053 
1054 /**
1055  * gen7_oa_read - copy status records then buffered OA reports
1056  * @stream: An i915-perf stream opened for OA metrics
1057  * @buf: destination buffer given by userspace
1058  * @count: the number of bytes userspace wants to read
1059  * @offset: (inout): the current position for writing into @buf
1060  *
1061  * Checks Gen 7 specific OA unit status registers and if necessary appends
1062  * corresponding status records for userspace (such as for a buffer full
1063  * condition) and then initiate appending any buffered OA reports.
1064  *
1065  * Updates @offset according to the number of bytes successfully copied into
1066  * the userspace buffer.
1067  *
1068  * Returns: zero on success or a negative error code
1069  */
1070 static int gen7_oa_read(struct i915_perf_stream *stream,
1071 			char __user *buf,
1072 			size_t count,
1073 			size_t *offset)
1074 {
1075 	struct intel_uncore *uncore = stream->uncore;
1076 	u32 oastatus1;
1077 	int ret;
1078 
1079 	if (drm_WARN_ON(&uncore->i915->drm, !stream->oa_buffer.vaddr))
1080 		return -EIO;
1081 
1082 	oastatus1 = intel_uncore_read(uncore, GEN7_OASTATUS1);
1083 
1084 	/* XXX: On Haswell we don't have a safe way to clear oastatus1
1085 	 * bits while the OA unit is enabled (while the tail pointer
1086 	 * may be updated asynchronously) so we ignore status bits
1087 	 * that have already been reported to userspace.
1088 	 */
1089 	oastatus1 &= ~stream->perf->gen7_latched_oastatus1;
1090 
1091 	/* We treat OABUFFER_OVERFLOW as a significant error:
1092 	 *
1093 	 * - The status can be interpreted to mean that the buffer is
1094 	 *   currently full (with a higher precedence than OA_TAKEN()
1095 	 *   which will start to report a near-empty buffer after an
1096 	 *   overflow) but it's awkward that we can't clear the status
1097 	 *   on Haswell, so without a reset we won't be able to catch
1098 	 *   the state again.
1099 	 *
1100 	 * - Since it also implies the HW has started overwriting old
1101 	 *   reports it may also affect our sanity checks for invalid
1102 	 *   reports when copying to userspace that assume new reports
1103 	 *   are being written to cleared memory.
1104 	 *
1105 	 * - In the future we may want to introduce a flight recorder
1106 	 *   mode where the driver will automatically maintain a safe
1107 	 *   guard band between head/tail, avoiding this overflow
1108 	 *   condition, but we avoid the added driver complexity for
1109 	 *   now.
1110 	 */
1111 	if (unlikely(oastatus1 & GEN7_OASTATUS1_OABUFFER_OVERFLOW)) {
1112 		ret = append_oa_status(stream, buf, count, offset,
1113 				       DRM_I915_PERF_RECORD_OA_BUFFER_LOST);
1114 		if (ret)
1115 			return ret;
1116 
1117 		drm_dbg(&stream->perf->i915->drm,
1118 			"OA buffer overflow (exponent = %d): force restart\n",
1119 			stream->period_exponent);
1120 
1121 		stream->perf->ops.oa_disable(stream);
1122 		stream->perf->ops.oa_enable(stream);
1123 
1124 		oastatus1 = intel_uncore_read(uncore, GEN7_OASTATUS1);
1125 	}
1126 
1127 	if (unlikely(oastatus1 & GEN7_OASTATUS1_REPORT_LOST)) {
1128 		ret = append_oa_status(stream, buf, count, offset,
1129 				       DRM_I915_PERF_RECORD_OA_REPORT_LOST);
1130 		if (ret)
1131 			return ret;
1132 		stream->perf->gen7_latched_oastatus1 |=
1133 			GEN7_OASTATUS1_REPORT_LOST;
1134 	}
1135 
1136 	return gen7_append_oa_reports(stream, buf, count, offset);
1137 }
1138 
1139 /**
1140  * i915_oa_wait_unlocked - handles blocking IO until OA data available
1141  * @stream: An i915-perf stream opened for OA metrics
1142  *
1143  * Called when userspace tries to read() from a blocking stream FD opened
1144  * for OA metrics. It waits until the hrtimer callback finds a non-empty
1145  * OA buffer and wakes us.
1146  *
1147  * Note: it's acceptable to have this return with some false positives
1148  * since any subsequent read handling will return -EAGAIN if there isn't
1149  * really data ready for userspace yet.
1150  *
1151  * Returns: zero on success or a negative error code
1152  */
1153 static int i915_oa_wait_unlocked(struct i915_perf_stream *stream)
1154 {
1155 	/* We would wait indefinitely if periodic sampling is not enabled */
1156 	if (!stream->periodic)
1157 		return -EIO;
1158 
1159 	return wait_event_interruptible(stream->poll_wq,
1160 					oa_buffer_check_unlocked(stream));
1161 }
1162 
1163 /**
1164  * i915_oa_poll_wait - call poll_wait() for an OA stream poll()
1165  * @stream: An i915-perf stream opened for OA metrics
1166  * @file: An i915 perf stream file
1167  * @wait: poll() state table
1168  *
1169  * For handling userspace polling on an i915 perf stream opened for OA metrics,
1170  * this starts a poll_wait with the wait queue that our hrtimer callback wakes
1171  * when it sees data ready to read in the circular OA buffer.
1172  */
1173 static void i915_oa_poll_wait(struct i915_perf_stream *stream,
1174 			      struct file *file,
1175 			      poll_table *wait)
1176 {
1177 	poll_wait(file, &stream->poll_wq, wait);
1178 }
1179 
1180 /**
1181  * i915_oa_read - just calls through to &i915_oa_ops->read
1182  * @stream: An i915-perf stream opened for OA metrics
1183  * @buf: destination buffer given by userspace
1184  * @count: the number of bytes userspace wants to read
1185  * @offset: (inout): the current position for writing into @buf
1186  *
1187  * Updates @offset according to the number of bytes successfully copied into
1188  * the userspace buffer.
1189  *
1190  * Returns: zero on success or a negative error code
1191  */
1192 static int i915_oa_read(struct i915_perf_stream *stream,
1193 			char __user *buf,
1194 			size_t count,
1195 			size_t *offset)
1196 {
1197 	return stream->perf->ops.read(stream, buf, count, offset);
1198 }
1199 
1200 static struct intel_context *oa_pin_context(struct i915_perf_stream *stream)
1201 {
1202 	struct i915_gem_engines_iter it;
1203 	struct i915_gem_context *ctx = stream->ctx;
1204 	struct intel_context *ce;
1205 	struct i915_gem_ww_ctx ww;
1206 	int err = -ENODEV;
1207 
1208 	for_each_gem_engine(ce, i915_gem_context_lock_engines(ctx), it) {
1209 		if (ce->engine != stream->engine) /* first match! */
1210 			continue;
1211 
1212 		err = 0;
1213 		break;
1214 	}
1215 	i915_gem_context_unlock_engines(ctx);
1216 
1217 	if (err)
1218 		return ERR_PTR(err);
1219 
1220 	i915_gem_ww_ctx_init(&ww, true);
1221 retry:
1222 	/*
1223 	 * As the ID is the gtt offset of the context's vma we
1224 	 * pin the vma to ensure the ID remains fixed.
1225 	 */
1226 	err = intel_context_pin_ww(ce, &ww);
1227 	if (err == -EDEADLK) {
1228 		err = i915_gem_ww_ctx_backoff(&ww);
1229 		if (!err)
1230 			goto retry;
1231 	}
1232 	i915_gem_ww_ctx_fini(&ww);
1233 
1234 	if (err)
1235 		return ERR_PTR(err);
1236 
1237 	stream->pinned_ctx = ce;
1238 	return stream->pinned_ctx;
1239 }
1240 
1241 static int
1242 __store_reg_to_mem(struct i915_request *rq, i915_reg_t reg, u32 ggtt_offset)
1243 {
1244 	u32 *cs, cmd;
1245 
1246 	cmd = MI_STORE_REGISTER_MEM | MI_SRM_LRM_GLOBAL_GTT;
1247 	if (GRAPHICS_VER(rq->engine->i915) >= 8)
1248 		cmd++;
1249 
1250 	cs = intel_ring_begin(rq, 4);
1251 	if (IS_ERR(cs))
1252 		return PTR_ERR(cs);
1253 
1254 	*cs++ = cmd;
1255 	*cs++ = i915_mmio_reg_offset(reg);
1256 	*cs++ = ggtt_offset;
1257 	*cs++ = 0;
1258 
1259 	intel_ring_advance(rq, cs);
1260 
1261 	return 0;
1262 }
1263 
1264 static int
1265 __read_reg(struct intel_context *ce, i915_reg_t reg, u32 ggtt_offset)
1266 {
1267 	struct i915_request *rq;
1268 	int err;
1269 
1270 	rq = i915_request_create(ce);
1271 	if (IS_ERR(rq))
1272 		return PTR_ERR(rq);
1273 
1274 	i915_request_get(rq);
1275 
1276 	err = __store_reg_to_mem(rq, reg, ggtt_offset);
1277 
1278 	i915_request_add(rq);
1279 	if (!err && i915_request_wait(rq, 0, HZ / 2) < 0)
1280 		err = -ETIME;
1281 
1282 	i915_request_put(rq);
1283 
1284 	return err;
1285 }
1286 
1287 static int
1288 gen12_guc_sw_ctx_id(struct intel_context *ce, u32 *ctx_id)
1289 {
1290 	struct i915_vma *scratch;
1291 	u32 *val;
1292 	int err;
1293 
1294 	scratch = __vm_create_scratch_for_read_pinned(&ce->engine->gt->ggtt->vm, 4);
1295 	if (IS_ERR(scratch))
1296 		return PTR_ERR(scratch);
1297 
1298 	err = i915_vma_sync(scratch);
1299 	if (err)
1300 		goto err_scratch;
1301 
1302 	err = __read_reg(ce, RING_EXECLIST_STATUS_HI(ce->engine->mmio_base),
1303 			 i915_ggtt_offset(scratch));
1304 	if (err)
1305 		goto err_scratch;
1306 
1307 	val = i915_gem_object_pin_map_unlocked(scratch->obj, I915_MAP_WB);
1308 	if (IS_ERR(val)) {
1309 		err = PTR_ERR(val);
1310 		goto err_scratch;
1311 	}
1312 
1313 	*ctx_id = *val;
1314 	i915_gem_object_unpin_map(scratch->obj);
1315 
1316 err_scratch:
1317 	i915_vma_unpin_and_release(&scratch, 0);
1318 	return err;
1319 }
1320 
1321 /*
1322  * For execlist mode of submission, pick an unused context id
1323  * 0 - (NUM_CONTEXT_TAG -1) are used by other contexts
1324  * XXX_MAX_CONTEXT_HW_ID is used by idle context
1325  *
1326  * For GuC mode of submission read context id from the upper dword of the
1327  * EXECLIST_STATUS register. Note that we read this value only once and expect
1328  * that the value stays fixed for the entire OA use case. There are cases where
1329  * GuC KMD implementation may deregister a context to reuse it's context id, but
1330  * we prevent that from happening to the OA context by pinning it.
1331  */
1332 static int gen12_get_render_context_id(struct i915_perf_stream *stream)
1333 {
1334 	u32 ctx_id, mask;
1335 	int ret;
1336 
1337 	if (intel_engine_uses_guc(stream->engine)) {
1338 		ret = gen12_guc_sw_ctx_id(stream->pinned_ctx, &ctx_id);
1339 		if (ret)
1340 			return ret;
1341 
1342 		mask = ((1U << GEN12_GUC_SW_CTX_ID_WIDTH) - 1) <<
1343 			(GEN12_GUC_SW_CTX_ID_SHIFT - 32);
1344 	} else if (GRAPHICS_VER_FULL(stream->engine->i915) >= IP_VER(12, 50)) {
1345 		ctx_id = (XEHP_MAX_CONTEXT_HW_ID - 1) <<
1346 			(XEHP_SW_CTX_ID_SHIFT - 32);
1347 
1348 		mask = ((1U << XEHP_SW_CTX_ID_WIDTH) - 1) <<
1349 			(XEHP_SW_CTX_ID_SHIFT - 32);
1350 	} else {
1351 		ctx_id = (GEN12_MAX_CONTEXT_HW_ID - 1) <<
1352 			 (GEN11_SW_CTX_ID_SHIFT - 32);
1353 
1354 		mask = ((1U << GEN11_SW_CTX_ID_WIDTH) - 1) <<
1355 			(GEN11_SW_CTX_ID_SHIFT - 32);
1356 	}
1357 	stream->specific_ctx_id = ctx_id & mask;
1358 	stream->specific_ctx_id_mask = mask;
1359 
1360 	return 0;
1361 }
1362 
1363 static bool oa_find_reg_in_lri(u32 *state, u32 reg, u32 *offset, u32 end)
1364 {
1365 	u32 idx = *offset;
1366 	u32 len = min(MI_LRI_LEN(state[idx]) + idx, end);
1367 	bool found = false;
1368 
1369 	idx++;
1370 	for (; idx < len; idx += 2) {
1371 		if (state[idx] == reg) {
1372 			found = true;
1373 			break;
1374 		}
1375 	}
1376 
1377 	*offset = idx;
1378 	return found;
1379 }
1380 
1381 static u32 oa_context_image_offset(struct intel_context *ce, u32 reg)
1382 {
1383 	u32 offset, len = (ce->engine->context_size - PAGE_SIZE) / 4;
1384 	u32 *state = ce->lrc_reg_state;
1385 
1386 	for (offset = 0; offset < len; ) {
1387 		if (IS_MI_LRI_CMD(state[offset])) {
1388 			/*
1389 			 * We expect reg-value pairs in MI_LRI command, so
1390 			 * MI_LRI_LEN() should be even, if not, issue a warning.
1391 			 */
1392 			drm_WARN_ON(&ce->engine->i915->drm,
1393 				    MI_LRI_LEN(state[offset]) & 0x1);
1394 
1395 			if (oa_find_reg_in_lri(state, reg, &offset, len))
1396 				break;
1397 		} else {
1398 			offset++;
1399 		}
1400 	}
1401 
1402 	return offset < len ? offset : U32_MAX;
1403 }
1404 
1405 static int set_oa_ctx_ctrl_offset(struct intel_context *ce)
1406 {
1407 	i915_reg_t reg = GEN12_OACTXCONTROL(ce->engine->mmio_base);
1408 	struct i915_perf *perf = &ce->engine->i915->perf;
1409 	u32 offset = perf->ctx_oactxctrl_offset;
1410 
1411 	/* Do this only once. Failure is stored as offset of U32_MAX */
1412 	if (offset)
1413 		goto exit;
1414 
1415 	offset = oa_context_image_offset(ce, i915_mmio_reg_offset(reg));
1416 	perf->ctx_oactxctrl_offset = offset;
1417 
1418 	drm_dbg(&ce->engine->i915->drm,
1419 		"%s oa ctx control at 0x%08x dword offset\n",
1420 		ce->engine->name, offset);
1421 
1422 exit:
1423 	return offset && offset != U32_MAX ? 0 : -ENODEV;
1424 }
1425 
1426 static bool engine_supports_mi_query(struct intel_engine_cs *engine)
1427 {
1428 	return engine->class == RENDER_CLASS;
1429 }
1430 
1431 /**
1432  * oa_get_render_ctx_id - determine and hold ctx hw id
1433  * @stream: An i915-perf stream opened for OA metrics
1434  *
1435  * Determine the render context hw id, and ensure it remains fixed for the
1436  * lifetime of the stream. This ensures that we don't have to worry about
1437  * updating the context ID in OACONTROL on the fly.
1438  *
1439  * Returns: zero on success or a negative error code
1440  */
1441 static int oa_get_render_ctx_id(struct i915_perf_stream *stream)
1442 {
1443 	struct intel_context *ce;
1444 	int ret = 0;
1445 
1446 	ce = oa_pin_context(stream);
1447 	if (IS_ERR(ce))
1448 		return PTR_ERR(ce);
1449 
1450 	if (engine_supports_mi_query(stream->engine)) {
1451 		/*
1452 		 * We are enabling perf query here. If we don't find the context
1453 		 * offset here, just return an error.
1454 		 */
1455 		ret = set_oa_ctx_ctrl_offset(ce);
1456 		if (ret) {
1457 			intel_context_unpin(ce);
1458 			drm_err(&stream->perf->i915->drm,
1459 				"Enabling perf query failed for %s\n",
1460 				stream->engine->name);
1461 			return ret;
1462 		}
1463 	}
1464 
1465 	switch (GRAPHICS_VER(ce->engine->i915)) {
1466 	case 7: {
1467 		/*
1468 		 * On Haswell we don't do any post processing of the reports
1469 		 * and don't need to use the mask.
1470 		 */
1471 		stream->specific_ctx_id = i915_ggtt_offset(ce->state);
1472 		stream->specific_ctx_id_mask = 0;
1473 		break;
1474 	}
1475 
1476 	case 8:
1477 	case 9:
1478 		if (intel_engine_uses_guc(ce->engine)) {
1479 			/*
1480 			 * When using GuC, the context descriptor we write in
1481 			 * i915 is read by GuC and rewritten before it's
1482 			 * actually written into the hardware. The LRCA is
1483 			 * what is put into the context id field of the
1484 			 * context descriptor by GuC. Because it's aligned to
1485 			 * a page, the lower 12bits are always at 0 and
1486 			 * dropped by GuC. They won't be part of the context
1487 			 * ID in the OA reports, so squash those lower bits.
1488 			 */
1489 			stream->specific_ctx_id = ce->lrc.lrca >> 12;
1490 
1491 			/*
1492 			 * GuC uses the top bit to signal proxy submission, so
1493 			 * ignore that bit.
1494 			 */
1495 			stream->specific_ctx_id_mask =
1496 				(1U << (GEN8_CTX_ID_WIDTH - 1)) - 1;
1497 		} else {
1498 			stream->specific_ctx_id_mask =
1499 				(1U << GEN8_CTX_ID_WIDTH) - 1;
1500 			stream->specific_ctx_id = stream->specific_ctx_id_mask;
1501 		}
1502 		break;
1503 
1504 	case 11:
1505 	case 12:
1506 		ret = gen12_get_render_context_id(stream);
1507 		break;
1508 
1509 	default:
1510 		MISSING_CASE(GRAPHICS_VER(ce->engine->i915));
1511 	}
1512 
1513 	ce->tag = stream->specific_ctx_id;
1514 
1515 	drm_dbg(&stream->perf->i915->drm,
1516 		"filtering on ctx_id=0x%x ctx_id_mask=0x%x\n",
1517 		stream->specific_ctx_id,
1518 		stream->specific_ctx_id_mask);
1519 
1520 	return ret;
1521 }
1522 
1523 /**
1524  * oa_put_render_ctx_id - counterpart to oa_get_render_ctx_id releases hold
1525  * @stream: An i915-perf stream opened for OA metrics
1526  *
1527  * In case anything needed doing to ensure the context HW ID would remain valid
1528  * for the lifetime of the stream, then that can be undone here.
1529  */
1530 static void oa_put_render_ctx_id(struct i915_perf_stream *stream)
1531 {
1532 	struct intel_context *ce;
1533 
1534 	ce = fetch_and_zero(&stream->pinned_ctx);
1535 	if (ce) {
1536 		ce->tag = 0; /* recomputed on next submission after parking */
1537 		intel_context_unpin(ce);
1538 	}
1539 
1540 	stream->specific_ctx_id = INVALID_CTX_ID;
1541 	stream->specific_ctx_id_mask = 0;
1542 }
1543 
1544 static void
1545 free_oa_buffer(struct i915_perf_stream *stream)
1546 {
1547 	i915_vma_unpin_and_release(&stream->oa_buffer.vma,
1548 				   I915_VMA_RELEASE_MAP);
1549 
1550 	stream->oa_buffer.vaddr = NULL;
1551 }
1552 
1553 static void
1554 free_oa_configs(struct i915_perf_stream *stream)
1555 {
1556 	struct i915_oa_config_bo *oa_bo, *tmp;
1557 
1558 	i915_oa_config_put(stream->oa_config);
1559 	llist_for_each_entry_safe(oa_bo, tmp, stream->oa_config_bos.first, node)
1560 		free_oa_config_bo(oa_bo);
1561 }
1562 
1563 static void
1564 free_noa_wait(struct i915_perf_stream *stream)
1565 {
1566 	i915_vma_unpin_and_release(&stream->noa_wait, 0);
1567 }
1568 
1569 static void i915_oa_stream_destroy(struct i915_perf_stream *stream)
1570 {
1571 	struct i915_perf *perf = stream->perf;
1572 	struct intel_gt *gt = stream->engine->gt;
1573 
1574 	if (WARN_ON(stream != gt->perf.exclusive_stream))
1575 		return;
1576 
1577 	/*
1578 	 * Unset exclusive_stream first, it will be checked while disabling
1579 	 * the metric set on gen8+.
1580 	 *
1581 	 * See i915_oa_init_reg_state() and lrc_configure_all_contexts()
1582 	 */
1583 	WRITE_ONCE(gt->perf.exclusive_stream, NULL);
1584 	perf->ops.disable_metric_set(stream);
1585 
1586 	free_oa_buffer(stream);
1587 
1588 	/*
1589 	 * Wa_16011777198:dg2: Unset the override of GUCRC mode to enable rc6.
1590 	 */
1591 	if (intel_uc_uses_guc_rc(&gt->uc) &&
1592 	    (IS_DG2_GRAPHICS_STEP(gt->i915, G10, STEP_A0, STEP_C0) ||
1593 	     IS_DG2_GRAPHICS_STEP(gt->i915, G11, STEP_A0, STEP_B0)))
1594 		drm_WARN_ON(&gt->i915->drm,
1595 			    intel_guc_slpc_unset_gucrc_mode(&gt->uc.guc.slpc));
1596 
1597 	intel_uncore_forcewake_put(stream->uncore, FORCEWAKE_ALL);
1598 	intel_engine_pm_put(stream->engine);
1599 
1600 	if (stream->ctx)
1601 		oa_put_render_ctx_id(stream);
1602 
1603 	free_oa_configs(stream);
1604 	free_noa_wait(stream);
1605 
1606 	if (perf->spurious_report_rs.missed) {
1607 		drm_notice(&gt->i915->drm,
1608 			   "%d spurious OA report notices suppressed due to ratelimiting\n",
1609 			   perf->spurious_report_rs.missed);
1610 	}
1611 }
1612 
1613 static void gen7_init_oa_buffer(struct i915_perf_stream *stream)
1614 {
1615 	struct intel_uncore *uncore = stream->uncore;
1616 	u32 gtt_offset = i915_ggtt_offset(stream->oa_buffer.vma);
1617 	unsigned long flags;
1618 
1619 	spin_lock_irqsave(&stream->oa_buffer.ptr_lock, flags);
1620 
1621 	/* Pre-DevBDW: OABUFFER must be set with counters off,
1622 	 * before OASTATUS1, but after OASTATUS2
1623 	 */
1624 	intel_uncore_write(uncore, GEN7_OASTATUS2, /* head */
1625 			   gtt_offset | GEN7_OASTATUS2_MEM_SELECT_GGTT);
1626 	stream->oa_buffer.head = gtt_offset;
1627 
1628 	intel_uncore_write(uncore, GEN7_OABUFFER, gtt_offset);
1629 
1630 	intel_uncore_write(uncore, GEN7_OASTATUS1, /* tail */
1631 			   gtt_offset | OABUFFER_SIZE_16M);
1632 
1633 	/* Mark that we need updated tail pointers to read from... */
1634 	stream->oa_buffer.aging_tail = INVALID_TAIL_PTR;
1635 	stream->oa_buffer.tail = gtt_offset;
1636 
1637 	spin_unlock_irqrestore(&stream->oa_buffer.ptr_lock, flags);
1638 
1639 	/* On Haswell we have to track which OASTATUS1 flags we've
1640 	 * already seen since they can't be cleared while periodic
1641 	 * sampling is enabled.
1642 	 */
1643 	stream->perf->gen7_latched_oastatus1 = 0;
1644 
1645 	/* NB: although the OA buffer will initially be allocated
1646 	 * zeroed via shmfs (and so this memset is redundant when
1647 	 * first allocating), we may re-init the OA buffer, either
1648 	 * when re-enabling a stream or in error/reset paths.
1649 	 *
1650 	 * The reason we clear the buffer for each re-init is for the
1651 	 * sanity check in gen7_append_oa_reports() that looks at the
1652 	 * report-id field to make sure it's non-zero which relies on
1653 	 * the assumption that new reports are being written to zeroed
1654 	 * memory...
1655 	 */
1656 	memset(stream->oa_buffer.vaddr, 0, OA_BUFFER_SIZE);
1657 }
1658 
1659 static void gen8_init_oa_buffer(struct i915_perf_stream *stream)
1660 {
1661 	struct intel_uncore *uncore = stream->uncore;
1662 	u32 gtt_offset = i915_ggtt_offset(stream->oa_buffer.vma);
1663 	unsigned long flags;
1664 
1665 	spin_lock_irqsave(&stream->oa_buffer.ptr_lock, flags);
1666 
1667 	intel_uncore_write(uncore, GEN8_OASTATUS, 0);
1668 	intel_uncore_write(uncore, GEN8_OAHEADPTR, gtt_offset);
1669 	stream->oa_buffer.head = gtt_offset;
1670 
1671 	intel_uncore_write(uncore, GEN8_OABUFFER_UDW, 0);
1672 
1673 	/*
1674 	 * PRM says:
1675 	 *
1676 	 *  "This MMIO must be set before the OATAILPTR
1677 	 *  register and after the OAHEADPTR register. This is
1678 	 *  to enable proper functionality of the overflow
1679 	 *  bit."
1680 	 */
1681 	intel_uncore_write(uncore, GEN8_OABUFFER, gtt_offset |
1682 		   OABUFFER_SIZE_16M | GEN8_OABUFFER_MEM_SELECT_GGTT);
1683 	intel_uncore_write(uncore, GEN8_OATAILPTR, gtt_offset & GEN8_OATAILPTR_MASK);
1684 
1685 	/* Mark that we need updated tail pointers to read from... */
1686 	stream->oa_buffer.aging_tail = INVALID_TAIL_PTR;
1687 	stream->oa_buffer.tail = gtt_offset;
1688 
1689 	/*
1690 	 * Reset state used to recognise context switches, affecting which
1691 	 * reports we will forward to userspace while filtering for a single
1692 	 * context.
1693 	 */
1694 	stream->oa_buffer.last_ctx_id = INVALID_CTX_ID;
1695 
1696 	spin_unlock_irqrestore(&stream->oa_buffer.ptr_lock, flags);
1697 
1698 	/*
1699 	 * NB: although the OA buffer will initially be allocated
1700 	 * zeroed via shmfs (and so this memset is redundant when
1701 	 * first allocating), we may re-init the OA buffer, either
1702 	 * when re-enabling a stream or in error/reset paths.
1703 	 *
1704 	 * The reason we clear the buffer for each re-init is for the
1705 	 * sanity check in gen8_append_oa_reports() that looks at the
1706 	 * reason field to make sure it's non-zero which relies on
1707 	 * the assumption that new reports are being written to zeroed
1708 	 * memory...
1709 	 */
1710 	memset(stream->oa_buffer.vaddr, 0, OA_BUFFER_SIZE);
1711 }
1712 
1713 static void gen12_init_oa_buffer(struct i915_perf_stream *stream)
1714 {
1715 	struct intel_uncore *uncore = stream->uncore;
1716 	u32 gtt_offset = i915_ggtt_offset(stream->oa_buffer.vma);
1717 	unsigned long flags;
1718 
1719 	spin_lock_irqsave(&stream->oa_buffer.ptr_lock, flags);
1720 
1721 	intel_uncore_write(uncore, GEN12_OAG_OASTATUS, 0);
1722 	intel_uncore_write(uncore, GEN12_OAG_OAHEADPTR,
1723 			   gtt_offset & GEN12_OAG_OAHEADPTR_MASK);
1724 	stream->oa_buffer.head = gtt_offset;
1725 
1726 	/*
1727 	 * PRM says:
1728 	 *
1729 	 *  "This MMIO must be set before the OATAILPTR
1730 	 *  register and after the OAHEADPTR register. This is
1731 	 *  to enable proper functionality of the overflow
1732 	 *  bit."
1733 	 */
1734 	intel_uncore_write(uncore, GEN12_OAG_OABUFFER, gtt_offset |
1735 			   OABUFFER_SIZE_16M | GEN8_OABUFFER_MEM_SELECT_GGTT);
1736 	intel_uncore_write(uncore, GEN12_OAG_OATAILPTR,
1737 			   gtt_offset & GEN12_OAG_OATAILPTR_MASK);
1738 
1739 	/* Mark that we need updated tail pointers to read from... */
1740 	stream->oa_buffer.aging_tail = INVALID_TAIL_PTR;
1741 	stream->oa_buffer.tail = gtt_offset;
1742 
1743 	/*
1744 	 * Reset state used to recognise context switches, affecting which
1745 	 * reports we will forward to userspace while filtering for a single
1746 	 * context.
1747 	 */
1748 	stream->oa_buffer.last_ctx_id = INVALID_CTX_ID;
1749 
1750 	spin_unlock_irqrestore(&stream->oa_buffer.ptr_lock, flags);
1751 
1752 	/*
1753 	 * NB: although the OA buffer will initially be allocated
1754 	 * zeroed via shmfs (and so this memset is redundant when
1755 	 * first allocating), we may re-init the OA buffer, either
1756 	 * when re-enabling a stream or in error/reset paths.
1757 	 *
1758 	 * The reason we clear the buffer for each re-init is for the
1759 	 * sanity check in gen8_append_oa_reports() that looks at the
1760 	 * reason field to make sure it's non-zero which relies on
1761 	 * the assumption that new reports are being written to zeroed
1762 	 * memory...
1763 	 */
1764 	memset(stream->oa_buffer.vaddr, 0,
1765 	       stream->oa_buffer.vma->size);
1766 }
1767 
1768 static int alloc_oa_buffer(struct i915_perf_stream *stream)
1769 {
1770 	struct drm_i915_private *i915 = stream->perf->i915;
1771 	struct intel_gt *gt = stream->engine->gt;
1772 	struct drm_i915_gem_object *bo;
1773 	struct i915_vma *vma;
1774 	int ret;
1775 
1776 	if (drm_WARN_ON(&i915->drm, stream->oa_buffer.vma))
1777 		return -ENODEV;
1778 
1779 	BUILD_BUG_ON_NOT_POWER_OF_2(OA_BUFFER_SIZE);
1780 	BUILD_BUG_ON(OA_BUFFER_SIZE < SZ_128K || OA_BUFFER_SIZE > SZ_16M);
1781 
1782 	bo = i915_gem_object_create_shmem(stream->perf->i915, OA_BUFFER_SIZE);
1783 	if (IS_ERR(bo)) {
1784 		drm_err(&i915->drm, "Failed to allocate OA buffer\n");
1785 		return PTR_ERR(bo);
1786 	}
1787 
1788 	i915_gem_object_set_cache_coherency(bo, I915_CACHE_LLC);
1789 
1790 	/* PreHSW required 512K alignment, HSW requires 16M */
1791 	vma = i915_vma_instance(bo, &gt->ggtt->vm, NULL);
1792 	if (IS_ERR(vma)) {
1793 		ret = PTR_ERR(vma);
1794 		goto err_unref;
1795 	}
1796 
1797 	/*
1798 	 * PreHSW required 512K alignment.
1799 	 * HSW and onwards, align to requested size of OA buffer.
1800 	 */
1801 	ret = i915_vma_pin(vma, 0, SZ_16M, PIN_GLOBAL | PIN_HIGH);
1802 	if (ret) {
1803 		drm_err(&gt->i915->drm, "Failed to pin OA buffer %d\n", ret);
1804 		goto err_unref;
1805 	}
1806 
1807 	stream->oa_buffer.vma = vma;
1808 
1809 	stream->oa_buffer.vaddr =
1810 		i915_gem_object_pin_map_unlocked(bo, I915_MAP_WB);
1811 	if (IS_ERR(stream->oa_buffer.vaddr)) {
1812 		ret = PTR_ERR(stream->oa_buffer.vaddr);
1813 		goto err_unpin;
1814 	}
1815 
1816 	return 0;
1817 
1818 err_unpin:
1819 	__i915_vma_unpin(vma);
1820 
1821 err_unref:
1822 	i915_gem_object_put(bo);
1823 
1824 	stream->oa_buffer.vaddr = NULL;
1825 	stream->oa_buffer.vma = NULL;
1826 
1827 	return ret;
1828 }
1829 
1830 static u32 *save_restore_register(struct i915_perf_stream *stream, u32 *cs,
1831 				  bool save, i915_reg_t reg, u32 offset,
1832 				  u32 dword_count)
1833 {
1834 	u32 cmd;
1835 	u32 d;
1836 
1837 	cmd = save ? MI_STORE_REGISTER_MEM : MI_LOAD_REGISTER_MEM;
1838 	cmd |= MI_SRM_LRM_GLOBAL_GTT;
1839 	if (GRAPHICS_VER(stream->perf->i915) >= 8)
1840 		cmd++;
1841 
1842 	for (d = 0; d < dword_count; d++) {
1843 		*cs++ = cmd;
1844 		*cs++ = i915_mmio_reg_offset(reg) + 4 * d;
1845 		*cs++ = intel_gt_scratch_offset(stream->engine->gt,
1846 						offset) + 4 * d;
1847 		*cs++ = 0;
1848 	}
1849 
1850 	return cs;
1851 }
1852 
1853 static int alloc_noa_wait(struct i915_perf_stream *stream)
1854 {
1855 	struct drm_i915_private *i915 = stream->perf->i915;
1856 	struct intel_gt *gt = stream->engine->gt;
1857 	struct drm_i915_gem_object *bo;
1858 	struct i915_vma *vma;
1859 	const u64 delay_ticks = 0xffffffffffffffff -
1860 		intel_gt_ns_to_clock_interval(to_gt(stream->perf->i915),
1861 		atomic64_read(&stream->perf->noa_programming_delay));
1862 	const u32 base = stream->engine->mmio_base;
1863 #define CS_GPR(x) GEN8_RING_CS_GPR(base, x)
1864 	u32 *batch, *ts0, *cs, *jump;
1865 	struct i915_gem_ww_ctx ww;
1866 	int ret, i;
1867 	enum {
1868 		START_TS,
1869 		NOW_TS,
1870 		DELTA_TS,
1871 		JUMP_PREDICATE,
1872 		DELTA_TARGET,
1873 		N_CS_GPR
1874 	};
1875 	i915_reg_t mi_predicate_result = HAS_MI_SET_PREDICATE(i915) ?
1876 					  MI_PREDICATE_RESULT_2_ENGINE(base) :
1877 					  MI_PREDICATE_RESULT_1(RENDER_RING_BASE);
1878 
1879 	bo = i915_gem_object_create_internal(i915, 4096);
1880 	if (IS_ERR(bo)) {
1881 		drm_err(&i915->drm,
1882 			"Failed to allocate NOA wait batchbuffer\n");
1883 		return PTR_ERR(bo);
1884 	}
1885 
1886 	i915_gem_ww_ctx_init(&ww, true);
1887 retry:
1888 	ret = i915_gem_object_lock(bo, &ww);
1889 	if (ret)
1890 		goto out_ww;
1891 
1892 	/*
1893 	 * We pin in GGTT because we jump into this buffer now because
1894 	 * multiple OA config BOs will have a jump to this address and it
1895 	 * needs to be fixed during the lifetime of the i915/perf stream.
1896 	 */
1897 	vma = i915_vma_instance(bo, &gt->ggtt->vm, NULL);
1898 	if (IS_ERR(vma)) {
1899 		ret = PTR_ERR(vma);
1900 		goto out_ww;
1901 	}
1902 
1903 	ret = i915_vma_pin_ww(vma, &ww, 0, 0, PIN_GLOBAL | PIN_HIGH);
1904 	if (ret)
1905 		goto out_ww;
1906 
1907 	batch = cs = i915_gem_object_pin_map(bo, I915_MAP_WB);
1908 	if (IS_ERR(batch)) {
1909 		ret = PTR_ERR(batch);
1910 		goto err_unpin;
1911 	}
1912 
1913 	/* Save registers. */
1914 	for (i = 0; i < N_CS_GPR; i++)
1915 		cs = save_restore_register(
1916 			stream, cs, true /* save */, CS_GPR(i),
1917 			INTEL_GT_SCRATCH_FIELD_PERF_CS_GPR + 8 * i, 2);
1918 	cs = save_restore_register(
1919 		stream, cs, true /* save */, mi_predicate_result,
1920 		INTEL_GT_SCRATCH_FIELD_PERF_PREDICATE_RESULT_1, 1);
1921 
1922 	/* First timestamp snapshot location. */
1923 	ts0 = cs;
1924 
1925 	/*
1926 	 * Initial snapshot of the timestamp register to implement the wait.
1927 	 * We work with 32b values, so clear out the top 32b bits of the
1928 	 * register because the ALU works 64bits.
1929 	 */
1930 	*cs++ = MI_LOAD_REGISTER_IMM(1);
1931 	*cs++ = i915_mmio_reg_offset(CS_GPR(START_TS)) + 4;
1932 	*cs++ = 0;
1933 	*cs++ = MI_LOAD_REGISTER_REG | (3 - 2);
1934 	*cs++ = i915_mmio_reg_offset(RING_TIMESTAMP(base));
1935 	*cs++ = i915_mmio_reg_offset(CS_GPR(START_TS));
1936 
1937 	/*
1938 	 * This is the location we're going to jump back into until the
1939 	 * required amount of time has passed.
1940 	 */
1941 	jump = cs;
1942 
1943 	/*
1944 	 * Take another snapshot of the timestamp register. Take care to clear
1945 	 * up the top 32bits of CS_GPR(1) as we're using it for other
1946 	 * operations below.
1947 	 */
1948 	*cs++ = MI_LOAD_REGISTER_IMM(1);
1949 	*cs++ = i915_mmio_reg_offset(CS_GPR(NOW_TS)) + 4;
1950 	*cs++ = 0;
1951 	*cs++ = MI_LOAD_REGISTER_REG | (3 - 2);
1952 	*cs++ = i915_mmio_reg_offset(RING_TIMESTAMP(base));
1953 	*cs++ = i915_mmio_reg_offset(CS_GPR(NOW_TS));
1954 
1955 	/*
1956 	 * Do a diff between the 2 timestamps and store the result back into
1957 	 * CS_GPR(1).
1958 	 */
1959 	*cs++ = MI_MATH(5);
1960 	*cs++ = MI_MATH_LOAD(MI_MATH_REG_SRCA, MI_MATH_REG(NOW_TS));
1961 	*cs++ = MI_MATH_LOAD(MI_MATH_REG_SRCB, MI_MATH_REG(START_TS));
1962 	*cs++ = MI_MATH_SUB;
1963 	*cs++ = MI_MATH_STORE(MI_MATH_REG(DELTA_TS), MI_MATH_REG_ACCU);
1964 	*cs++ = MI_MATH_STORE(MI_MATH_REG(JUMP_PREDICATE), MI_MATH_REG_CF);
1965 
1966 	/*
1967 	 * Transfer the carry flag (set to 1 if ts1 < ts0, meaning the
1968 	 * timestamp have rolled over the 32bits) into the predicate register
1969 	 * to be used for the predicated jump.
1970 	 */
1971 	*cs++ = MI_LOAD_REGISTER_REG | (3 - 2);
1972 	*cs++ = i915_mmio_reg_offset(CS_GPR(JUMP_PREDICATE));
1973 	*cs++ = i915_mmio_reg_offset(mi_predicate_result);
1974 
1975 	if (HAS_MI_SET_PREDICATE(i915))
1976 		*cs++ = MI_SET_PREDICATE | 1;
1977 
1978 	/* Restart from the beginning if we had timestamps roll over. */
1979 	*cs++ = (GRAPHICS_VER(i915) < 8 ?
1980 		 MI_BATCH_BUFFER_START :
1981 		 MI_BATCH_BUFFER_START_GEN8) |
1982 		MI_BATCH_PREDICATE;
1983 	*cs++ = i915_ggtt_offset(vma) + (ts0 - batch) * 4;
1984 	*cs++ = 0;
1985 
1986 	if (HAS_MI_SET_PREDICATE(i915))
1987 		*cs++ = MI_SET_PREDICATE;
1988 
1989 	/*
1990 	 * Now add the diff between to previous timestamps and add it to :
1991 	 *      (((1 * << 64) - 1) - delay_ns)
1992 	 *
1993 	 * When the Carry Flag contains 1 this means the elapsed time is
1994 	 * longer than the expected delay, and we can exit the wait loop.
1995 	 */
1996 	*cs++ = MI_LOAD_REGISTER_IMM(2);
1997 	*cs++ = i915_mmio_reg_offset(CS_GPR(DELTA_TARGET));
1998 	*cs++ = lower_32_bits(delay_ticks);
1999 	*cs++ = i915_mmio_reg_offset(CS_GPR(DELTA_TARGET)) + 4;
2000 	*cs++ = upper_32_bits(delay_ticks);
2001 
2002 	*cs++ = MI_MATH(4);
2003 	*cs++ = MI_MATH_LOAD(MI_MATH_REG_SRCA, MI_MATH_REG(DELTA_TS));
2004 	*cs++ = MI_MATH_LOAD(MI_MATH_REG_SRCB, MI_MATH_REG(DELTA_TARGET));
2005 	*cs++ = MI_MATH_ADD;
2006 	*cs++ = MI_MATH_STOREINV(MI_MATH_REG(JUMP_PREDICATE), MI_MATH_REG_CF);
2007 
2008 	*cs++ = MI_ARB_CHECK;
2009 
2010 	/*
2011 	 * Transfer the result into the predicate register to be used for the
2012 	 * predicated jump.
2013 	 */
2014 	*cs++ = MI_LOAD_REGISTER_REG | (3 - 2);
2015 	*cs++ = i915_mmio_reg_offset(CS_GPR(JUMP_PREDICATE));
2016 	*cs++ = i915_mmio_reg_offset(mi_predicate_result);
2017 
2018 	if (HAS_MI_SET_PREDICATE(i915))
2019 		*cs++ = MI_SET_PREDICATE | 1;
2020 
2021 	/* Predicate the jump.  */
2022 	*cs++ = (GRAPHICS_VER(i915) < 8 ?
2023 		 MI_BATCH_BUFFER_START :
2024 		 MI_BATCH_BUFFER_START_GEN8) |
2025 		MI_BATCH_PREDICATE;
2026 	*cs++ = i915_ggtt_offset(vma) + (jump - batch) * 4;
2027 	*cs++ = 0;
2028 
2029 	if (HAS_MI_SET_PREDICATE(i915))
2030 		*cs++ = MI_SET_PREDICATE;
2031 
2032 	/* Restore registers. */
2033 	for (i = 0; i < N_CS_GPR; i++)
2034 		cs = save_restore_register(
2035 			stream, cs, false /* restore */, CS_GPR(i),
2036 			INTEL_GT_SCRATCH_FIELD_PERF_CS_GPR + 8 * i, 2);
2037 	cs = save_restore_register(
2038 		stream, cs, false /* restore */, mi_predicate_result,
2039 		INTEL_GT_SCRATCH_FIELD_PERF_PREDICATE_RESULT_1, 1);
2040 
2041 	/* And return to the ring. */
2042 	*cs++ = MI_BATCH_BUFFER_END;
2043 
2044 	GEM_BUG_ON(cs - batch > PAGE_SIZE / sizeof(*batch));
2045 
2046 	i915_gem_object_flush_map(bo);
2047 	__i915_gem_object_release_map(bo);
2048 
2049 	stream->noa_wait = vma;
2050 	goto out_ww;
2051 
2052 err_unpin:
2053 	i915_vma_unpin_and_release(&vma, 0);
2054 out_ww:
2055 	if (ret == -EDEADLK) {
2056 		ret = i915_gem_ww_ctx_backoff(&ww);
2057 		if (!ret)
2058 			goto retry;
2059 	}
2060 	i915_gem_ww_ctx_fini(&ww);
2061 	if (ret)
2062 		i915_gem_object_put(bo);
2063 	return ret;
2064 }
2065 
2066 static u32 *write_cs_mi_lri(u32 *cs,
2067 			    const struct i915_oa_reg *reg_data,
2068 			    u32 n_regs)
2069 {
2070 	u32 i;
2071 
2072 	for (i = 0; i < n_regs; i++) {
2073 		if ((i % MI_LOAD_REGISTER_IMM_MAX_REGS) == 0) {
2074 			u32 n_lri = min_t(u32,
2075 					  n_regs - i,
2076 					  MI_LOAD_REGISTER_IMM_MAX_REGS);
2077 
2078 			*cs++ = MI_LOAD_REGISTER_IMM(n_lri);
2079 		}
2080 		*cs++ = i915_mmio_reg_offset(reg_data[i].addr);
2081 		*cs++ = reg_data[i].value;
2082 	}
2083 
2084 	return cs;
2085 }
2086 
2087 static int num_lri_dwords(int num_regs)
2088 {
2089 	int count = 0;
2090 
2091 	if (num_regs > 0) {
2092 		count += DIV_ROUND_UP(num_regs, MI_LOAD_REGISTER_IMM_MAX_REGS);
2093 		count += num_regs * 2;
2094 	}
2095 
2096 	return count;
2097 }
2098 
2099 static struct i915_oa_config_bo *
2100 alloc_oa_config_buffer(struct i915_perf_stream *stream,
2101 		       struct i915_oa_config *oa_config)
2102 {
2103 	struct drm_i915_gem_object *obj;
2104 	struct i915_oa_config_bo *oa_bo;
2105 	struct i915_gem_ww_ctx ww;
2106 	size_t config_length = 0;
2107 	u32 *cs;
2108 	int err;
2109 
2110 	oa_bo = kzalloc(sizeof(*oa_bo), GFP_KERNEL);
2111 	if (!oa_bo)
2112 		return ERR_PTR(-ENOMEM);
2113 
2114 	config_length += num_lri_dwords(oa_config->mux_regs_len);
2115 	config_length += num_lri_dwords(oa_config->b_counter_regs_len);
2116 	config_length += num_lri_dwords(oa_config->flex_regs_len);
2117 	config_length += 3; /* MI_BATCH_BUFFER_START */
2118 	config_length = ALIGN(sizeof(u32) * config_length, I915_GTT_PAGE_SIZE);
2119 
2120 	obj = i915_gem_object_create_shmem(stream->perf->i915, config_length);
2121 	if (IS_ERR(obj)) {
2122 		err = PTR_ERR(obj);
2123 		goto err_free;
2124 	}
2125 
2126 	i915_gem_ww_ctx_init(&ww, true);
2127 retry:
2128 	err = i915_gem_object_lock(obj, &ww);
2129 	if (err)
2130 		goto out_ww;
2131 
2132 	cs = i915_gem_object_pin_map(obj, I915_MAP_WB);
2133 	if (IS_ERR(cs)) {
2134 		err = PTR_ERR(cs);
2135 		goto out_ww;
2136 	}
2137 
2138 	cs = write_cs_mi_lri(cs,
2139 			     oa_config->mux_regs,
2140 			     oa_config->mux_regs_len);
2141 	cs = write_cs_mi_lri(cs,
2142 			     oa_config->b_counter_regs,
2143 			     oa_config->b_counter_regs_len);
2144 	cs = write_cs_mi_lri(cs,
2145 			     oa_config->flex_regs,
2146 			     oa_config->flex_regs_len);
2147 
2148 	/* Jump into the active wait. */
2149 	*cs++ = (GRAPHICS_VER(stream->perf->i915) < 8 ?
2150 		 MI_BATCH_BUFFER_START :
2151 		 MI_BATCH_BUFFER_START_GEN8);
2152 	*cs++ = i915_ggtt_offset(stream->noa_wait);
2153 	*cs++ = 0;
2154 
2155 	i915_gem_object_flush_map(obj);
2156 	__i915_gem_object_release_map(obj);
2157 
2158 	oa_bo->vma = i915_vma_instance(obj,
2159 				       &stream->engine->gt->ggtt->vm,
2160 				       NULL);
2161 	if (IS_ERR(oa_bo->vma)) {
2162 		err = PTR_ERR(oa_bo->vma);
2163 		goto out_ww;
2164 	}
2165 
2166 	oa_bo->oa_config = i915_oa_config_get(oa_config);
2167 	llist_add(&oa_bo->node, &stream->oa_config_bos);
2168 
2169 out_ww:
2170 	if (err == -EDEADLK) {
2171 		err = i915_gem_ww_ctx_backoff(&ww);
2172 		if (!err)
2173 			goto retry;
2174 	}
2175 	i915_gem_ww_ctx_fini(&ww);
2176 
2177 	if (err)
2178 		i915_gem_object_put(obj);
2179 err_free:
2180 	if (err) {
2181 		kfree(oa_bo);
2182 		return ERR_PTR(err);
2183 	}
2184 	return oa_bo;
2185 }
2186 
2187 static struct i915_vma *
2188 get_oa_vma(struct i915_perf_stream *stream, struct i915_oa_config *oa_config)
2189 {
2190 	struct i915_oa_config_bo *oa_bo;
2191 
2192 	/*
2193 	 * Look for the buffer in the already allocated BOs attached
2194 	 * to the stream.
2195 	 */
2196 	llist_for_each_entry(oa_bo, stream->oa_config_bos.first, node) {
2197 		if (oa_bo->oa_config == oa_config &&
2198 		    memcmp(oa_bo->oa_config->uuid,
2199 			   oa_config->uuid,
2200 			   sizeof(oa_config->uuid)) == 0)
2201 			goto out;
2202 	}
2203 
2204 	oa_bo = alloc_oa_config_buffer(stream, oa_config);
2205 	if (IS_ERR(oa_bo))
2206 		return ERR_CAST(oa_bo);
2207 
2208 out:
2209 	return i915_vma_get(oa_bo->vma);
2210 }
2211 
2212 static int
2213 emit_oa_config(struct i915_perf_stream *stream,
2214 	       struct i915_oa_config *oa_config,
2215 	       struct intel_context *ce,
2216 	       struct i915_active *active)
2217 {
2218 	struct i915_request *rq;
2219 	struct i915_vma *vma;
2220 	struct i915_gem_ww_ctx ww;
2221 	int err;
2222 
2223 	vma = get_oa_vma(stream, oa_config);
2224 	if (IS_ERR(vma))
2225 		return PTR_ERR(vma);
2226 
2227 	i915_gem_ww_ctx_init(&ww, true);
2228 retry:
2229 	err = i915_gem_object_lock(vma->obj, &ww);
2230 	if (err)
2231 		goto err;
2232 
2233 	err = i915_vma_pin_ww(vma, &ww, 0, 0, PIN_GLOBAL | PIN_HIGH);
2234 	if (err)
2235 		goto err;
2236 
2237 	intel_engine_pm_get(ce->engine);
2238 	rq = i915_request_create(ce);
2239 	intel_engine_pm_put(ce->engine);
2240 	if (IS_ERR(rq)) {
2241 		err = PTR_ERR(rq);
2242 		goto err_vma_unpin;
2243 	}
2244 
2245 	if (!IS_ERR_OR_NULL(active)) {
2246 		/* After all individual context modifications */
2247 		err = i915_request_await_active(rq, active,
2248 						I915_ACTIVE_AWAIT_ACTIVE);
2249 		if (err)
2250 			goto err_add_request;
2251 
2252 		err = i915_active_add_request(active, rq);
2253 		if (err)
2254 			goto err_add_request;
2255 	}
2256 
2257 	err = i915_vma_move_to_active(vma, rq, 0);
2258 	if (err)
2259 		goto err_add_request;
2260 
2261 	err = rq->engine->emit_bb_start(rq,
2262 					vma->node.start, 0,
2263 					I915_DISPATCH_SECURE);
2264 	if (err)
2265 		goto err_add_request;
2266 
2267 err_add_request:
2268 	i915_request_add(rq);
2269 err_vma_unpin:
2270 	i915_vma_unpin(vma);
2271 err:
2272 	if (err == -EDEADLK) {
2273 		err = i915_gem_ww_ctx_backoff(&ww);
2274 		if (!err)
2275 			goto retry;
2276 	}
2277 
2278 	i915_gem_ww_ctx_fini(&ww);
2279 	i915_vma_put(vma);
2280 	return err;
2281 }
2282 
2283 static struct intel_context *oa_context(struct i915_perf_stream *stream)
2284 {
2285 	return stream->pinned_ctx ?: stream->engine->kernel_context;
2286 }
2287 
2288 static int
2289 hsw_enable_metric_set(struct i915_perf_stream *stream,
2290 		      struct i915_active *active)
2291 {
2292 	struct intel_uncore *uncore = stream->uncore;
2293 
2294 	/*
2295 	 * PRM:
2296 	 *
2297 	 * OA unit is using “crclk” for its functionality. When trunk
2298 	 * level clock gating takes place, OA clock would be gated,
2299 	 * unable to count the events from non-render clock domain.
2300 	 * Render clock gating must be disabled when OA is enabled to
2301 	 * count the events from non-render domain. Unit level clock
2302 	 * gating for RCS should also be disabled.
2303 	 */
2304 	intel_uncore_rmw(uncore, GEN7_MISCCPCTL,
2305 			 GEN7_DOP_CLOCK_GATE_ENABLE, 0);
2306 	intel_uncore_rmw(uncore, GEN6_UCGCTL1,
2307 			 0, GEN6_CSUNIT_CLOCK_GATE_DISABLE);
2308 
2309 	return emit_oa_config(stream,
2310 			      stream->oa_config, oa_context(stream),
2311 			      active);
2312 }
2313 
2314 static void hsw_disable_metric_set(struct i915_perf_stream *stream)
2315 {
2316 	struct intel_uncore *uncore = stream->uncore;
2317 
2318 	intel_uncore_rmw(uncore, GEN6_UCGCTL1,
2319 			 GEN6_CSUNIT_CLOCK_GATE_DISABLE, 0);
2320 	intel_uncore_rmw(uncore, GEN7_MISCCPCTL,
2321 			 0, GEN7_DOP_CLOCK_GATE_ENABLE);
2322 
2323 	intel_uncore_rmw(uncore, GDT_CHICKEN_BITS, GT_NOA_ENABLE, 0);
2324 }
2325 
2326 static u32 oa_config_flex_reg(const struct i915_oa_config *oa_config,
2327 			      i915_reg_t reg)
2328 {
2329 	u32 mmio = i915_mmio_reg_offset(reg);
2330 	int i;
2331 
2332 	/*
2333 	 * This arbitrary default will select the 'EU FPU0 Pipeline
2334 	 * Active' event. In the future it's anticipated that there
2335 	 * will be an explicit 'No Event' we can select, but not yet...
2336 	 */
2337 	if (!oa_config)
2338 		return 0;
2339 
2340 	for (i = 0; i < oa_config->flex_regs_len; i++) {
2341 		if (i915_mmio_reg_offset(oa_config->flex_regs[i].addr) == mmio)
2342 			return oa_config->flex_regs[i].value;
2343 	}
2344 
2345 	return 0;
2346 }
2347 /*
2348  * NB: It must always remain pointer safe to run this even if the OA unit
2349  * has been disabled.
2350  *
2351  * It's fine to put out-of-date values into these per-context registers
2352  * in the case that the OA unit has been disabled.
2353  */
2354 static void
2355 gen8_update_reg_state_unlocked(const struct intel_context *ce,
2356 			       const struct i915_perf_stream *stream)
2357 {
2358 	u32 ctx_oactxctrl = stream->perf->ctx_oactxctrl_offset;
2359 	u32 ctx_flexeu0 = stream->perf->ctx_flexeu0_offset;
2360 	/* The MMIO offsets for Flex EU registers aren't contiguous */
2361 	static const i915_reg_t flex_regs[] = {
2362 		EU_PERF_CNTL0,
2363 		EU_PERF_CNTL1,
2364 		EU_PERF_CNTL2,
2365 		EU_PERF_CNTL3,
2366 		EU_PERF_CNTL4,
2367 		EU_PERF_CNTL5,
2368 		EU_PERF_CNTL6,
2369 	};
2370 	u32 *reg_state = ce->lrc_reg_state;
2371 	int i;
2372 
2373 	reg_state[ctx_oactxctrl + 1] =
2374 		(stream->period_exponent << GEN8_OA_TIMER_PERIOD_SHIFT) |
2375 		(stream->periodic ? GEN8_OA_TIMER_ENABLE : 0) |
2376 		GEN8_OA_COUNTER_RESUME;
2377 
2378 	for (i = 0; i < ARRAY_SIZE(flex_regs); i++)
2379 		reg_state[ctx_flexeu0 + i * 2 + 1] =
2380 			oa_config_flex_reg(stream->oa_config, flex_regs[i]);
2381 }
2382 
2383 struct flex {
2384 	i915_reg_t reg;
2385 	u32 offset;
2386 	u32 value;
2387 };
2388 
2389 static int
2390 gen8_store_flex(struct i915_request *rq,
2391 		struct intel_context *ce,
2392 		const struct flex *flex, unsigned int count)
2393 {
2394 	u32 offset;
2395 	u32 *cs;
2396 
2397 	cs = intel_ring_begin(rq, 4 * count);
2398 	if (IS_ERR(cs))
2399 		return PTR_ERR(cs);
2400 
2401 	offset = i915_ggtt_offset(ce->state) + LRC_STATE_OFFSET;
2402 	do {
2403 		*cs++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT;
2404 		*cs++ = offset + flex->offset * sizeof(u32);
2405 		*cs++ = 0;
2406 		*cs++ = flex->value;
2407 	} while (flex++, --count);
2408 
2409 	intel_ring_advance(rq, cs);
2410 
2411 	return 0;
2412 }
2413 
2414 static int
2415 gen8_load_flex(struct i915_request *rq,
2416 	       struct intel_context *ce,
2417 	       const struct flex *flex, unsigned int count)
2418 {
2419 	u32 *cs;
2420 
2421 	GEM_BUG_ON(!count || count > 63);
2422 
2423 	cs = intel_ring_begin(rq, 2 * count + 2);
2424 	if (IS_ERR(cs))
2425 		return PTR_ERR(cs);
2426 
2427 	*cs++ = MI_LOAD_REGISTER_IMM(count);
2428 	do {
2429 		*cs++ = i915_mmio_reg_offset(flex->reg);
2430 		*cs++ = flex->value;
2431 	} while (flex++, --count);
2432 	*cs++ = MI_NOOP;
2433 
2434 	intel_ring_advance(rq, cs);
2435 
2436 	return 0;
2437 }
2438 
2439 static int gen8_modify_context(struct intel_context *ce,
2440 			       const struct flex *flex, unsigned int count)
2441 {
2442 	struct i915_request *rq;
2443 	int err;
2444 
2445 	rq = intel_engine_create_kernel_request(ce->engine);
2446 	if (IS_ERR(rq))
2447 		return PTR_ERR(rq);
2448 
2449 	/* Serialise with the remote context */
2450 	err = intel_context_prepare_remote_request(ce, rq);
2451 	if (err == 0)
2452 		err = gen8_store_flex(rq, ce, flex, count);
2453 
2454 	i915_request_add(rq);
2455 	return err;
2456 }
2457 
2458 static int
2459 gen8_modify_self(struct intel_context *ce,
2460 		 const struct flex *flex, unsigned int count,
2461 		 struct i915_active *active)
2462 {
2463 	struct i915_request *rq;
2464 	int err;
2465 
2466 	intel_engine_pm_get(ce->engine);
2467 	rq = i915_request_create(ce);
2468 	intel_engine_pm_put(ce->engine);
2469 	if (IS_ERR(rq))
2470 		return PTR_ERR(rq);
2471 
2472 	if (!IS_ERR_OR_NULL(active)) {
2473 		err = i915_active_add_request(active, rq);
2474 		if (err)
2475 			goto err_add_request;
2476 	}
2477 
2478 	err = gen8_load_flex(rq, ce, flex, count);
2479 	if (err)
2480 		goto err_add_request;
2481 
2482 err_add_request:
2483 	i915_request_add(rq);
2484 	return err;
2485 }
2486 
2487 static int gen8_configure_context(struct i915_gem_context *ctx,
2488 				  struct flex *flex, unsigned int count)
2489 {
2490 	struct i915_gem_engines_iter it;
2491 	struct intel_context *ce;
2492 	int err = 0;
2493 
2494 	for_each_gem_engine(ce, i915_gem_context_lock_engines(ctx), it) {
2495 		GEM_BUG_ON(ce == ce->engine->kernel_context);
2496 
2497 		if (ce->engine->class != RENDER_CLASS)
2498 			continue;
2499 
2500 		/* Otherwise OA settings will be set upon first use */
2501 		if (!intel_context_pin_if_active(ce))
2502 			continue;
2503 
2504 		flex->value = intel_sseu_make_rpcs(ce->engine->gt, &ce->sseu);
2505 		err = gen8_modify_context(ce, flex, count);
2506 
2507 		intel_context_unpin(ce);
2508 		if (err)
2509 			break;
2510 	}
2511 	i915_gem_context_unlock_engines(ctx);
2512 
2513 	return err;
2514 }
2515 
2516 static int gen12_configure_oar_context(struct i915_perf_stream *stream,
2517 				       struct i915_active *active)
2518 {
2519 	int err;
2520 	struct intel_context *ce = stream->pinned_ctx;
2521 	u32 format = stream->oa_buffer.format->format;
2522 	u32 offset = stream->perf->ctx_oactxctrl_offset;
2523 	struct flex regs_context[] = {
2524 		{
2525 			GEN8_OACTXCONTROL,
2526 			offset + 1,
2527 			active ? GEN8_OA_COUNTER_RESUME : 0,
2528 		},
2529 	};
2530 	/* Offsets in regs_lri are not used since this configuration is only
2531 	 * applied using LRI. Initialize the correct offsets for posterity.
2532 	 */
2533 #define GEN12_OAR_OACONTROL_OFFSET 0x5B0
2534 	struct flex regs_lri[] = {
2535 		{
2536 			GEN12_OAR_OACONTROL,
2537 			GEN12_OAR_OACONTROL_OFFSET + 1,
2538 			(format << GEN12_OAR_OACONTROL_COUNTER_FORMAT_SHIFT) |
2539 			(active ? GEN12_OAR_OACONTROL_COUNTER_ENABLE : 0)
2540 		},
2541 		{
2542 			RING_CONTEXT_CONTROL(ce->engine->mmio_base),
2543 			CTX_CONTEXT_CONTROL,
2544 			_MASKED_FIELD(GEN12_CTX_CTRL_OAR_CONTEXT_ENABLE,
2545 				      active ?
2546 				      GEN12_CTX_CTRL_OAR_CONTEXT_ENABLE :
2547 				      0)
2548 		},
2549 	};
2550 
2551 	/* Modify the context image of pinned context with regs_context */
2552 	err = intel_context_lock_pinned(ce);
2553 	if (err)
2554 		return err;
2555 
2556 	err = gen8_modify_context(ce, regs_context,
2557 				  ARRAY_SIZE(regs_context));
2558 	intel_context_unlock_pinned(ce);
2559 	if (err)
2560 		return err;
2561 
2562 	/* Apply regs_lri using LRI with pinned context */
2563 	return gen8_modify_self(ce, regs_lri, ARRAY_SIZE(regs_lri), active);
2564 }
2565 
2566 /*
2567  * Manages updating the per-context aspects of the OA stream
2568  * configuration across all contexts.
2569  *
2570  * The awkward consideration here is that OACTXCONTROL controls the
2571  * exponent for periodic sampling which is primarily used for system
2572  * wide profiling where we'd like a consistent sampling period even in
2573  * the face of context switches.
2574  *
2575  * Our approach of updating the register state context (as opposed to
2576  * say using a workaround batch buffer) ensures that the hardware
2577  * won't automatically reload an out-of-date timer exponent even
2578  * transiently before a WA BB could be parsed.
2579  *
2580  * This function needs to:
2581  * - Ensure the currently running context's per-context OA state is
2582  *   updated
2583  * - Ensure that all existing contexts will have the correct per-context
2584  *   OA state if they are scheduled for use.
2585  * - Ensure any new contexts will be initialized with the correct
2586  *   per-context OA state.
2587  *
2588  * Note: it's only the RCS/Render context that has any OA state.
2589  * Note: the first flex register passed must always be R_PWR_CLK_STATE
2590  */
2591 static int
2592 oa_configure_all_contexts(struct i915_perf_stream *stream,
2593 			  struct flex *regs,
2594 			  size_t num_regs,
2595 			  struct i915_active *active)
2596 {
2597 	struct drm_i915_private *i915 = stream->perf->i915;
2598 	struct intel_engine_cs *engine;
2599 	struct intel_gt *gt = stream->engine->gt;
2600 	struct i915_gem_context *ctx, *cn;
2601 	int err;
2602 
2603 	lockdep_assert_held(&gt->perf.lock);
2604 
2605 	/*
2606 	 * The OA register config is setup through the context image. This image
2607 	 * might be written to by the GPU on context switch (in particular on
2608 	 * lite-restore). This means we can't safely update a context's image,
2609 	 * if this context is scheduled/submitted to run on the GPU.
2610 	 *
2611 	 * We could emit the OA register config through the batch buffer but
2612 	 * this might leave small interval of time where the OA unit is
2613 	 * configured at an invalid sampling period.
2614 	 *
2615 	 * Note that since we emit all requests from a single ring, there
2616 	 * is still an implicit global barrier here that may cause a high
2617 	 * priority context to wait for an otherwise independent low priority
2618 	 * context. Contexts idle at the time of reconfiguration are not
2619 	 * trapped behind the barrier.
2620 	 */
2621 	spin_lock(&i915->gem.contexts.lock);
2622 	list_for_each_entry_safe(ctx, cn, &i915->gem.contexts.list, link) {
2623 		if (!kref_get_unless_zero(&ctx->ref))
2624 			continue;
2625 
2626 		spin_unlock(&i915->gem.contexts.lock);
2627 
2628 		err = gen8_configure_context(ctx, regs, num_regs);
2629 		if (err) {
2630 			i915_gem_context_put(ctx);
2631 			return err;
2632 		}
2633 
2634 		spin_lock(&i915->gem.contexts.lock);
2635 		list_safe_reset_next(ctx, cn, link);
2636 		i915_gem_context_put(ctx);
2637 	}
2638 	spin_unlock(&i915->gem.contexts.lock);
2639 
2640 	/*
2641 	 * After updating all other contexts, we need to modify ourselves.
2642 	 * If we don't modify the kernel_context, we do not get events while
2643 	 * idle.
2644 	 */
2645 	for_each_uabi_engine(engine, i915) {
2646 		struct intel_context *ce = engine->kernel_context;
2647 
2648 		if (engine->class != RENDER_CLASS)
2649 			continue;
2650 
2651 		regs[0].value = intel_sseu_make_rpcs(engine->gt, &ce->sseu);
2652 
2653 		err = gen8_modify_self(ce, regs, num_regs, active);
2654 		if (err)
2655 			return err;
2656 	}
2657 
2658 	return 0;
2659 }
2660 
2661 static int
2662 gen12_configure_all_contexts(struct i915_perf_stream *stream,
2663 			     const struct i915_oa_config *oa_config,
2664 			     struct i915_active *active)
2665 {
2666 	struct flex regs[] = {
2667 		{
2668 			GEN8_R_PWR_CLK_STATE(RENDER_RING_BASE),
2669 			CTX_R_PWR_CLK_STATE,
2670 		},
2671 	};
2672 
2673 	return oa_configure_all_contexts(stream,
2674 					 regs, ARRAY_SIZE(regs),
2675 					 active);
2676 }
2677 
2678 static int
2679 lrc_configure_all_contexts(struct i915_perf_stream *stream,
2680 			   const struct i915_oa_config *oa_config,
2681 			   struct i915_active *active)
2682 {
2683 	u32 ctx_oactxctrl = stream->perf->ctx_oactxctrl_offset;
2684 	/* The MMIO offsets for Flex EU registers aren't contiguous */
2685 	const u32 ctx_flexeu0 = stream->perf->ctx_flexeu0_offset;
2686 #define ctx_flexeuN(N) (ctx_flexeu0 + 2 * (N) + 1)
2687 	struct flex regs[] = {
2688 		{
2689 			GEN8_R_PWR_CLK_STATE(RENDER_RING_BASE),
2690 			CTX_R_PWR_CLK_STATE,
2691 		},
2692 		{
2693 			GEN8_OACTXCONTROL,
2694 			ctx_oactxctrl + 1,
2695 		},
2696 		{ EU_PERF_CNTL0, ctx_flexeuN(0) },
2697 		{ EU_PERF_CNTL1, ctx_flexeuN(1) },
2698 		{ EU_PERF_CNTL2, ctx_flexeuN(2) },
2699 		{ EU_PERF_CNTL3, ctx_flexeuN(3) },
2700 		{ EU_PERF_CNTL4, ctx_flexeuN(4) },
2701 		{ EU_PERF_CNTL5, ctx_flexeuN(5) },
2702 		{ EU_PERF_CNTL6, ctx_flexeuN(6) },
2703 	};
2704 #undef ctx_flexeuN
2705 	int i;
2706 
2707 	regs[1].value =
2708 		(stream->period_exponent << GEN8_OA_TIMER_PERIOD_SHIFT) |
2709 		(stream->periodic ? GEN8_OA_TIMER_ENABLE : 0) |
2710 		GEN8_OA_COUNTER_RESUME;
2711 
2712 	for (i = 2; i < ARRAY_SIZE(regs); i++)
2713 		regs[i].value = oa_config_flex_reg(oa_config, regs[i].reg);
2714 
2715 	return oa_configure_all_contexts(stream,
2716 					 regs, ARRAY_SIZE(regs),
2717 					 active);
2718 }
2719 
2720 static int
2721 gen8_enable_metric_set(struct i915_perf_stream *stream,
2722 		       struct i915_active *active)
2723 {
2724 	struct intel_uncore *uncore = stream->uncore;
2725 	struct i915_oa_config *oa_config = stream->oa_config;
2726 	int ret;
2727 
2728 	/*
2729 	 * We disable slice/unslice clock ratio change reports on SKL since
2730 	 * they are too noisy. The HW generates a lot of redundant reports
2731 	 * where the ratio hasn't really changed causing a lot of redundant
2732 	 * work to processes and increasing the chances we'll hit buffer
2733 	 * overruns.
2734 	 *
2735 	 * Although we don't currently use the 'disable overrun' OABUFFER
2736 	 * feature it's worth noting that clock ratio reports have to be
2737 	 * disabled before considering to use that feature since the HW doesn't
2738 	 * correctly block these reports.
2739 	 *
2740 	 * Currently none of the high-level metrics we have depend on knowing
2741 	 * this ratio to normalize.
2742 	 *
2743 	 * Note: This register is not power context saved and restored, but
2744 	 * that's OK considering that we disable RC6 while the OA unit is
2745 	 * enabled.
2746 	 *
2747 	 * The _INCLUDE_CLK_RATIO bit allows the slice/unslice frequency to
2748 	 * be read back from automatically triggered reports, as part of the
2749 	 * RPT_ID field.
2750 	 */
2751 	if (IS_GRAPHICS_VER(stream->perf->i915, 9, 11)) {
2752 		intel_uncore_write(uncore, GEN8_OA_DEBUG,
2753 				   _MASKED_BIT_ENABLE(GEN9_OA_DEBUG_DISABLE_CLK_RATIO_REPORTS |
2754 						      GEN9_OA_DEBUG_INCLUDE_CLK_RATIO));
2755 	}
2756 
2757 	/*
2758 	 * Update all contexts prior writing the mux configurations as we need
2759 	 * to make sure all slices/subslices are ON before writing to NOA
2760 	 * registers.
2761 	 */
2762 	ret = lrc_configure_all_contexts(stream, oa_config, active);
2763 	if (ret)
2764 		return ret;
2765 
2766 	return emit_oa_config(stream,
2767 			      stream->oa_config, oa_context(stream),
2768 			      active);
2769 }
2770 
2771 static u32 oag_report_ctx_switches(const struct i915_perf_stream *stream)
2772 {
2773 	return _MASKED_FIELD(GEN12_OAG_OA_DEBUG_DISABLE_CTX_SWITCH_REPORTS,
2774 			     (stream->sample_flags & SAMPLE_OA_REPORT) ?
2775 			     0 : GEN12_OAG_OA_DEBUG_DISABLE_CTX_SWITCH_REPORTS);
2776 }
2777 
2778 static int
2779 gen12_enable_metric_set(struct i915_perf_stream *stream,
2780 			struct i915_active *active)
2781 {
2782 	struct drm_i915_private *i915 = stream->perf->i915;
2783 	struct intel_uncore *uncore = stream->uncore;
2784 	struct i915_oa_config *oa_config = stream->oa_config;
2785 	bool periodic = stream->periodic;
2786 	u32 period_exponent = stream->period_exponent;
2787 	u32 sqcnt1;
2788 	int ret;
2789 
2790 	/*
2791 	 * Wa_1508761755:xehpsdv, dg2
2792 	 * EU NOA signals behave incorrectly if EU clock gating is enabled.
2793 	 * Disable thread stall DOP gating and EU DOP gating.
2794 	 */
2795 	if (IS_XEHPSDV(i915) || IS_DG2(i915)) {
2796 		intel_gt_mcr_multicast_write(uncore->gt, GEN8_ROW_CHICKEN,
2797 					     _MASKED_BIT_ENABLE(STALL_DOP_GATING_DISABLE));
2798 		intel_uncore_write(uncore, GEN7_ROW_CHICKEN2,
2799 				   _MASKED_BIT_ENABLE(GEN12_DISABLE_DOP_GATING));
2800 	}
2801 
2802 	intel_uncore_write(uncore, GEN12_OAG_OA_DEBUG,
2803 			   /* Disable clk ratio reports, like previous Gens. */
2804 			   _MASKED_BIT_ENABLE(GEN12_OAG_OA_DEBUG_DISABLE_CLK_RATIO_REPORTS |
2805 					      GEN12_OAG_OA_DEBUG_INCLUDE_CLK_RATIO) |
2806 			   /*
2807 			    * If the user didn't require OA reports, instruct
2808 			    * the hardware not to emit ctx switch reports.
2809 			    */
2810 			   oag_report_ctx_switches(stream));
2811 
2812 	intel_uncore_write(uncore, GEN12_OAG_OAGLBCTXCTRL, periodic ?
2813 			   (GEN12_OAG_OAGLBCTXCTRL_COUNTER_RESUME |
2814 			    GEN12_OAG_OAGLBCTXCTRL_TIMER_ENABLE |
2815 			    (period_exponent << GEN12_OAG_OAGLBCTXCTRL_TIMER_PERIOD_SHIFT))
2816 			    : 0);
2817 
2818 	/*
2819 	 * Initialize Super Queue Internal Cnt Register
2820 	 * Set PMON Enable in order to collect valid metrics.
2821 	 * Enable byets per clock reporting in OA for XEHPSDV onward.
2822 	 */
2823 	sqcnt1 = GEN12_SQCNT1_PMON_ENABLE |
2824 		 (HAS_OA_BPC_REPORTING(i915) ? GEN12_SQCNT1_OABPC : 0);
2825 
2826 	intel_uncore_rmw(uncore, GEN12_SQCNT1, 0, sqcnt1);
2827 
2828 	/*
2829 	 * Update all contexts prior writing the mux configurations as we need
2830 	 * to make sure all slices/subslices are ON before writing to NOA
2831 	 * registers.
2832 	 */
2833 	ret = gen12_configure_all_contexts(stream, oa_config, active);
2834 	if (ret)
2835 		return ret;
2836 
2837 	/*
2838 	 * For Gen12, performance counters are context
2839 	 * saved/restored. Only enable it for the context that
2840 	 * requested this.
2841 	 */
2842 	if (stream->ctx) {
2843 		ret = gen12_configure_oar_context(stream, active);
2844 		if (ret)
2845 			return ret;
2846 	}
2847 
2848 	return emit_oa_config(stream,
2849 			      stream->oa_config, oa_context(stream),
2850 			      active);
2851 }
2852 
2853 static void gen8_disable_metric_set(struct i915_perf_stream *stream)
2854 {
2855 	struct intel_uncore *uncore = stream->uncore;
2856 
2857 	/* Reset all contexts' slices/subslices configurations. */
2858 	lrc_configure_all_contexts(stream, NULL, NULL);
2859 
2860 	intel_uncore_rmw(uncore, GDT_CHICKEN_BITS, GT_NOA_ENABLE, 0);
2861 }
2862 
2863 static void gen11_disable_metric_set(struct i915_perf_stream *stream)
2864 {
2865 	struct intel_uncore *uncore = stream->uncore;
2866 
2867 	/* Reset all contexts' slices/subslices configurations. */
2868 	lrc_configure_all_contexts(stream, NULL, NULL);
2869 
2870 	/* Make sure we disable noa to save power. */
2871 	intel_uncore_rmw(uncore, RPM_CONFIG1, GEN10_GT_NOA_ENABLE, 0);
2872 }
2873 
2874 static void gen12_disable_metric_set(struct i915_perf_stream *stream)
2875 {
2876 	struct intel_uncore *uncore = stream->uncore;
2877 	struct drm_i915_private *i915 = stream->perf->i915;
2878 	u32 sqcnt1;
2879 
2880 	/*
2881 	 * Wa_1508761755:xehpsdv, dg2
2882 	 * Enable thread stall DOP gating and EU DOP gating.
2883 	 */
2884 	if (IS_XEHPSDV(i915) || IS_DG2(i915)) {
2885 		intel_gt_mcr_multicast_write(uncore->gt, GEN8_ROW_CHICKEN,
2886 					     _MASKED_BIT_DISABLE(STALL_DOP_GATING_DISABLE));
2887 		intel_uncore_write(uncore, GEN7_ROW_CHICKEN2,
2888 				   _MASKED_BIT_DISABLE(GEN12_DISABLE_DOP_GATING));
2889 	}
2890 
2891 	/* Reset all contexts' slices/subslices configurations. */
2892 	gen12_configure_all_contexts(stream, NULL, NULL);
2893 
2894 	/* disable the context save/restore or OAR counters */
2895 	if (stream->ctx)
2896 		gen12_configure_oar_context(stream, NULL);
2897 
2898 	/* Make sure we disable noa to save power. */
2899 	intel_uncore_rmw(uncore, RPM_CONFIG1, GEN10_GT_NOA_ENABLE, 0);
2900 
2901 	sqcnt1 = GEN12_SQCNT1_PMON_ENABLE |
2902 		 (HAS_OA_BPC_REPORTING(i915) ? GEN12_SQCNT1_OABPC : 0);
2903 
2904 	/* Reset PMON Enable to save power. */
2905 	intel_uncore_rmw(uncore, GEN12_SQCNT1, sqcnt1, 0);
2906 }
2907 
2908 static void gen7_oa_enable(struct i915_perf_stream *stream)
2909 {
2910 	struct intel_uncore *uncore = stream->uncore;
2911 	struct i915_gem_context *ctx = stream->ctx;
2912 	u32 ctx_id = stream->specific_ctx_id;
2913 	bool periodic = stream->periodic;
2914 	u32 period_exponent = stream->period_exponent;
2915 	u32 report_format = stream->oa_buffer.format->format;
2916 
2917 	/*
2918 	 * Reset buf pointers so we don't forward reports from before now.
2919 	 *
2920 	 * Think carefully if considering trying to avoid this, since it
2921 	 * also ensures status flags and the buffer itself are cleared
2922 	 * in error paths, and we have checks for invalid reports based
2923 	 * on the assumption that certain fields are written to zeroed
2924 	 * memory which this helps maintains.
2925 	 */
2926 	gen7_init_oa_buffer(stream);
2927 
2928 	intel_uncore_write(uncore, GEN7_OACONTROL,
2929 			   (ctx_id & GEN7_OACONTROL_CTX_MASK) |
2930 			   (period_exponent <<
2931 			    GEN7_OACONTROL_TIMER_PERIOD_SHIFT) |
2932 			   (periodic ? GEN7_OACONTROL_TIMER_ENABLE : 0) |
2933 			   (report_format << GEN7_OACONTROL_FORMAT_SHIFT) |
2934 			   (ctx ? GEN7_OACONTROL_PER_CTX_ENABLE : 0) |
2935 			   GEN7_OACONTROL_ENABLE);
2936 }
2937 
2938 static void gen8_oa_enable(struct i915_perf_stream *stream)
2939 {
2940 	struct intel_uncore *uncore = stream->uncore;
2941 	u32 report_format = stream->oa_buffer.format->format;
2942 
2943 	/*
2944 	 * Reset buf pointers so we don't forward reports from before now.
2945 	 *
2946 	 * Think carefully if considering trying to avoid this, since it
2947 	 * also ensures status flags and the buffer itself are cleared
2948 	 * in error paths, and we have checks for invalid reports based
2949 	 * on the assumption that certain fields are written to zeroed
2950 	 * memory which this helps maintains.
2951 	 */
2952 	gen8_init_oa_buffer(stream);
2953 
2954 	/*
2955 	 * Note: we don't rely on the hardware to perform single context
2956 	 * filtering and instead filter on the cpu based on the context-id
2957 	 * field of reports
2958 	 */
2959 	intel_uncore_write(uncore, GEN8_OACONTROL,
2960 			   (report_format << GEN8_OA_REPORT_FORMAT_SHIFT) |
2961 			   GEN8_OA_COUNTER_ENABLE);
2962 }
2963 
2964 static void gen12_oa_enable(struct i915_perf_stream *stream)
2965 {
2966 	struct intel_uncore *uncore = stream->uncore;
2967 	u32 report_format = stream->oa_buffer.format->format;
2968 
2969 	/*
2970 	 * If we don't want OA reports from the OA buffer, then we don't even
2971 	 * need to program the OAG unit.
2972 	 */
2973 	if (!(stream->sample_flags & SAMPLE_OA_REPORT))
2974 		return;
2975 
2976 	gen12_init_oa_buffer(stream);
2977 
2978 	intel_uncore_write(uncore, GEN12_OAG_OACONTROL,
2979 			   (report_format << GEN12_OAG_OACONTROL_OA_COUNTER_FORMAT_SHIFT) |
2980 			   GEN12_OAG_OACONTROL_OA_COUNTER_ENABLE);
2981 }
2982 
2983 /**
2984  * i915_oa_stream_enable - handle `I915_PERF_IOCTL_ENABLE` for OA stream
2985  * @stream: An i915 perf stream opened for OA metrics
2986  *
2987  * [Re]enables hardware periodic sampling according to the period configured
2988  * when opening the stream. This also starts a hrtimer that will periodically
2989  * check for data in the circular OA buffer for notifying userspace (e.g.
2990  * during a read() or poll()).
2991  */
2992 static void i915_oa_stream_enable(struct i915_perf_stream *stream)
2993 {
2994 	stream->pollin = false;
2995 
2996 	stream->perf->ops.oa_enable(stream);
2997 
2998 	if (stream->sample_flags & SAMPLE_OA_REPORT)
2999 		hrtimer_start(&stream->poll_check_timer,
3000 			      ns_to_ktime(stream->poll_oa_period),
3001 			      HRTIMER_MODE_REL_PINNED);
3002 }
3003 
3004 static void gen7_oa_disable(struct i915_perf_stream *stream)
3005 {
3006 	struct intel_uncore *uncore = stream->uncore;
3007 
3008 	intel_uncore_write(uncore, GEN7_OACONTROL, 0);
3009 	if (intel_wait_for_register(uncore,
3010 				    GEN7_OACONTROL, GEN7_OACONTROL_ENABLE, 0,
3011 				    50))
3012 		drm_err(&stream->perf->i915->drm,
3013 			"wait for OA to be disabled timed out\n");
3014 }
3015 
3016 static void gen8_oa_disable(struct i915_perf_stream *stream)
3017 {
3018 	struct intel_uncore *uncore = stream->uncore;
3019 
3020 	intel_uncore_write(uncore, GEN8_OACONTROL, 0);
3021 	if (intel_wait_for_register(uncore,
3022 				    GEN8_OACONTROL, GEN8_OA_COUNTER_ENABLE, 0,
3023 				    50))
3024 		drm_err(&stream->perf->i915->drm,
3025 			"wait for OA to be disabled timed out\n");
3026 }
3027 
3028 static void gen12_oa_disable(struct i915_perf_stream *stream)
3029 {
3030 	struct intel_uncore *uncore = stream->uncore;
3031 
3032 	intel_uncore_write(uncore, GEN12_OAG_OACONTROL, 0);
3033 	if (intel_wait_for_register(uncore,
3034 				    GEN12_OAG_OACONTROL,
3035 				    GEN12_OAG_OACONTROL_OA_COUNTER_ENABLE, 0,
3036 				    50))
3037 		drm_err(&stream->perf->i915->drm,
3038 			"wait for OA to be disabled timed out\n");
3039 
3040 	intel_uncore_write(uncore, GEN12_OA_TLB_INV_CR, 1);
3041 	if (intel_wait_for_register(uncore,
3042 				    GEN12_OA_TLB_INV_CR,
3043 				    1, 0,
3044 				    50))
3045 		drm_err(&stream->perf->i915->drm,
3046 			"wait for OA tlb invalidate timed out\n");
3047 }
3048 
3049 /**
3050  * i915_oa_stream_disable - handle `I915_PERF_IOCTL_DISABLE` for OA stream
3051  * @stream: An i915 perf stream opened for OA metrics
3052  *
3053  * Stops the OA unit from periodically writing counter reports into the
3054  * circular OA buffer. This also stops the hrtimer that periodically checks for
3055  * data in the circular OA buffer, for notifying userspace.
3056  */
3057 static void i915_oa_stream_disable(struct i915_perf_stream *stream)
3058 {
3059 	stream->perf->ops.oa_disable(stream);
3060 
3061 	if (stream->sample_flags & SAMPLE_OA_REPORT)
3062 		hrtimer_cancel(&stream->poll_check_timer);
3063 }
3064 
3065 static const struct i915_perf_stream_ops i915_oa_stream_ops = {
3066 	.destroy = i915_oa_stream_destroy,
3067 	.enable = i915_oa_stream_enable,
3068 	.disable = i915_oa_stream_disable,
3069 	.wait_unlocked = i915_oa_wait_unlocked,
3070 	.poll_wait = i915_oa_poll_wait,
3071 	.read = i915_oa_read,
3072 };
3073 
3074 static int i915_perf_stream_enable_sync(struct i915_perf_stream *stream)
3075 {
3076 	struct i915_active *active;
3077 	int err;
3078 
3079 	active = i915_active_create();
3080 	if (!active)
3081 		return -ENOMEM;
3082 
3083 	err = stream->perf->ops.enable_metric_set(stream, active);
3084 	if (err == 0)
3085 		__i915_active_wait(active, TASK_UNINTERRUPTIBLE);
3086 
3087 	i915_active_put(active);
3088 	return err;
3089 }
3090 
3091 static void
3092 get_default_sseu_config(struct intel_sseu *out_sseu,
3093 			struct intel_engine_cs *engine)
3094 {
3095 	const struct sseu_dev_info *devinfo_sseu = &engine->gt->info.sseu;
3096 
3097 	*out_sseu = intel_sseu_from_device_info(devinfo_sseu);
3098 
3099 	if (GRAPHICS_VER(engine->i915) == 11) {
3100 		/*
3101 		 * We only need subslice count so it doesn't matter which ones
3102 		 * we select - just turn off low bits in the amount of half of
3103 		 * all available subslices per slice.
3104 		 */
3105 		out_sseu->subslice_mask =
3106 			~(~0 << (hweight8(out_sseu->subslice_mask) / 2));
3107 		out_sseu->slice_mask = 0x1;
3108 	}
3109 }
3110 
3111 static int
3112 get_sseu_config(struct intel_sseu *out_sseu,
3113 		struct intel_engine_cs *engine,
3114 		const struct drm_i915_gem_context_param_sseu *drm_sseu)
3115 {
3116 	if (drm_sseu->engine.engine_class != engine->uabi_class ||
3117 	    drm_sseu->engine.engine_instance != engine->uabi_instance)
3118 		return -EINVAL;
3119 
3120 	return i915_gem_user_to_context_sseu(engine->gt, drm_sseu, out_sseu);
3121 }
3122 
3123 /*
3124  * OA timestamp frequency = CS timestamp frequency in most platforms. On some
3125  * platforms OA unit ignores the CTC_SHIFT and the 2 timestamps differ. In such
3126  * cases, return the adjusted CS timestamp frequency to the user.
3127  */
3128 u32 i915_perf_oa_timestamp_frequency(struct drm_i915_private *i915)
3129 {
3130 	/* Wa_18013179988:dg2 */
3131 	if (IS_DG2(i915)) {
3132 		intel_wakeref_t wakeref;
3133 		u32 reg, shift;
3134 
3135 		with_intel_runtime_pm(to_gt(i915)->uncore->rpm, wakeref)
3136 			reg = intel_uncore_read(to_gt(i915)->uncore, RPM_CONFIG0);
3137 
3138 		shift = REG_FIELD_GET(GEN10_RPM_CONFIG0_CTC_SHIFT_PARAMETER_MASK,
3139 				      reg);
3140 
3141 		return to_gt(i915)->clock_frequency << (3 - shift);
3142 	}
3143 
3144 	return to_gt(i915)->clock_frequency;
3145 }
3146 
3147 /**
3148  * i915_oa_stream_init - validate combined props for OA stream and init
3149  * @stream: An i915 perf stream
3150  * @param: The open parameters passed to `DRM_I915_PERF_OPEN`
3151  * @props: The property state that configures stream (individually validated)
3152  *
3153  * While read_properties_unlocked() validates properties in isolation it
3154  * doesn't ensure that the combination necessarily makes sense.
3155  *
3156  * At this point it has been determined that userspace wants a stream of
3157  * OA metrics, but still we need to further validate the combined
3158  * properties are OK.
3159  *
3160  * If the configuration makes sense then we can allocate memory for
3161  * a circular OA buffer and apply the requested metric set configuration.
3162  *
3163  * Returns: zero on success or a negative error code.
3164  */
3165 static int i915_oa_stream_init(struct i915_perf_stream *stream,
3166 			       struct drm_i915_perf_open_param *param,
3167 			       struct perf_open_properties *props)
3168 {
3169 	struct drm_i915_private *i915 = stream->perf->i915;
3170 	struct i915_perf *perf = stream->perf;
3171 	struct intel_gt *gt;
3172 	int ret;
3173 
3174 	if (!props->engine) {
3175 		drm_dbg(&stream->perf->i915->drm,
3176 			"OA engine not specified\n");
3177 		return -EINVAL;
3178 	}
3179 	gt = props->engine->gt;
3180 
3181 	/*
3182 	 * If the sysfs metrics/ directory wasn't registered for some
3183 	 * reason then don't let userspace try their luck with config
3184 	 * IDs
3185 	 */
3186 	if (!perf->metrics_kobj) {
3187 		drm_dbg(&stream->perf->i915->drm,
3188 			"OA metrics weren't advertised via sysfs\n");
3189 		return -EINVAL;
3190 	}
3191 
3192 	if (!(props->sample_flags & SAMPLE_OA_REPORT) &&
3193 	    (GRAPHICS_VER(perf->i915) < 12 || !stream->ctx)) {
3194 		drm_dbg(&stream->perf->i915->drm,
3195 			"Only OA report sampling supported\n");
3196 		return -EINVAL;
3197 	}
3198 
3199 	if (!perf->ops.enable_metric_set) {
3200 		drm_dbg(&stream->perf->i915->drm,
3201 			"OA unit not supported\n");
3202 		return -ENODEV;
3203 	}
3204 
3205 	/*
3206 	 * To avoid the complexity of having to accurately filter
3207 	 * counter reports and marshal to the appropriate client
3208 	 * we currently only allow exclusive access
3209 	 */
3210 	if (gt->perf.exclusive_stream) {
3211 		drm_dbg(&stream->perf->i915->drm,
3212 			"OA unit already in use\n");
3213 		return -EBUSY;
3214 	}
3215 
3216 	if (!props->oa_format) {
3217 		drm_dbg(&stream->perf->i915->drm,
3218 			"OA report format not specified\n");
3219 		return -EINVAL;
3220 	}
3221 
3222 	stream->engine = props->engine;
3223 	stream->uncore = stream->engine->gt->uncore;
3224 
3225 	stream->sample_size = sizeof(struct drm_i915_perf_record_header);
3226 
3227 	stream->oa_buffer.format = &perf->oa_formats[props->oa_format];
3228 	if (drm_WARN_ON(&i915->drm, stream->oa_buffer.format->size == 0))
3229 		return -EINVAL;
3230 
3231 	stream->sample_flags = props->sample_flags;
3232 	stream->sample_size += stream->oa_buffer.format->size;
3233 
3234 	stream->hold_preemption = props->hold_preemption;
3235 
3236 	stream->periodic = props->oa_periodic;
3237 	if (stream->periodic)
3238 		stream->period_exponent = props->oa_period_exponent;
3239 
3240 	if (stream->ctx) {
3241 		ret = oa_get_render_ctx_id(stream);
3242 		if (ret) {
3243 			drm_dbg(&stream->perf->i915->drm,
3244 				"Invalid context id to filter with\n");
3245 			return ret;
3246 		}
3247 	}
3248 
3249 	ret = alloc_noa_wait(stream);
3250 	if (ret) {
3251 		drm_dbg(&stream->perf->i915->drm,
3252 			"Unable to allocate NOA wait batch buffer\n");
3253 		goto err_noa_wait_alloc;
3254 	}
3255 
3256 	stream->oa_config = i915_perf_get_oa_config(perf, props->metrics_set);
3257 	if (!stream->oa_config) {
3258 		drm_dbg(&stream->perf->i915->drm,
3259 			"Invalid OA config id=%i\n", props->metrics_set);
3260 		ret = -EINVAL;
3261 		goto err_config;
3262 	}
3263 
3264 	/* PRM - observability performance counters:
3265 	 *
3266 	 *   OACONTROL, performance counter enable, note:
3267 	 *
3268 	 *   "When this bit is set, in order to have coherent counts,
3269 	 *   RC6 power state and trunk clock gating must be disabled.
3270 	 *   This can be achieved by programming MMIO registers as
3271 	 *   0xA094=0 and 0xA090[31]=1"
3272 	 *
3273 	 *   In our case we are expecting that taking pm + FORCEWAKE
3274 	 *   references will effectively disable RC6.
3275 	 */
3276 	intel_engine_pm_get(stream->engine);
3277 	intel_uncore_forcewake_get(stream->uncore, FORCEWAKE_ALL);
3278 
3279 	/*
3280 	 * Wa_16011777198:dg2: GuC resets render as part of the Wa. This causes
3281 	 * OA to lose the configuration state. Prevent this by overriding GUCRC
3282 	 * mode.
3283 	 */
3284 	if (intel_uc_uses_guc_rc(&gt->uc) &&
3285 	    (IS_DG2_GRAPHICS_STEP(gt->i915, G10, STEP_A0, STEP_C0) ||
3286 	     IS_DG2_GRAPHICS_STEP(gt->i915, G11, STEP_A0, STEP_B0))) {
3287 		ret = intel_guc_slpc_override_gucrc_mode(&gt->uc.guc.slpc,
3288 							 SLPC_GUCRC_MODE_GUCRC_NO_RC6);
3289 		if (ret) {
3290 			drm_dbg(&stream->perf->i915->drm,
3291 				"Unable to override gucrc mode\n");
3292 			goto err_config;
3293 		}
3294 	}
3295 
3296 	ret = alloc_oa_buffer(stream);
3297 	if (ret)
3298 		goto err_oa_buf_alloc;
3299 
3300 	stream->ops = &i915_oa_stream_ops;
3301 
3302 	stream->engine->gt->perf.sseu = props->sseu;
3303 	WRITE_ONCE(gt->perf.exclusive_stream, stream);
3304 
3305 	ret = i915_perf_stream_enable_sync(stream);
3306 	if (ret) {
3307 		drm_dbg(&stream->perf->i915->drm,
3308 			"Unable to enable metric set\n");
3309 		goto err_enable;
3310 	}
3311 
3312 	drm_dbg(&stream->perf->i915->drm,
3313 		"opening stream oa config uuid=%s\n",
3314 		  stream->oa_config->uuid);
3315 
3316 	hrtimer_init(&stream->poll_check_timer,
3317 		     CLOCK_MONOTONIC, HRTIMER_MODE_REL);
3318 	stream->poll_check_timer.function = oa_poll_check_timer_cb;
3319 	init_waitqueue_head(&stream->poll_wq);
3320 	spin_lock_init(&stream->oa_buffer.ptr_lock);
3321 	mutex_init(&stream->lock);
3322 
3323 	return 0;
3324 
3325 err_enable:
3326 	WRITE_ONCE(gt->perf.exclusive_stream, NULL);
3327 	perf->ops.disable_metric_set(stream);
3328 
3329 	free_oa_buffer(stream);
3330 
3331 err_oa_buf_alloc:
3332 	free_oa_configs(stream);
3333 
3334 	intel_uncore_forcewake_put(stream->uncore, FORCEWAKE_ALL);
3335 	intel_engine_pm_put(stream->engine);
3336 
3337 err_config:
3338 	free_noa_wait(stream);
3339 
3340 err_noa_wait_alloc:
3341 	if (stream->ctx)
3342 		oa_put_render_ctx_id(stream);
3343 
3344 	return ret;
3345 }
3346 
3347 void i915_oa_init_reg_state(const struct intel_context *ce,
3348 			    const struct intel_engine_cs *engine)
3349 {
3350 	struct i915_perf_stream *stream;
3351 
3352 	if (engine->class != RENDER_CLASS)
3353 		return;
3354 
3355 	/* perf.exclusive_stream serialised by lrc_configure_all_contexts() */
3356 	stream = READ_ONCE(engine->gt->perf.exclusive_stream);
3357 	if (stream && GRAPHICS_VER(stream->perf->i915) < 12)
3358 		gen8_update_reg_state_unlocked(ce, stream);
3359 }
3360 
3361 /**
3362  * i915_perf_read - handles read() FOP for i915 perf stream FDs
3363  * @file: An i915 perf stream file
3364  * @buf: destination buffer given by userspace
3365  * @count: the number of bytes userspace wants to read
3366  * @ppos: (inout) file seek position (unused)
3367  *
3368  * The entry point for handling a read() on a stream file descriptor from
3369  * userspace. Most of the work is left to the i915_perf_read_locked() and
3370  * &i915_perf_stream_ops->read but to save having stream implementations (of
3371  * which we might have multiple later) we handle blocking read here.
3372  *
3373  * We can also consistently treat trying to read from a disabled stream
3374  * as an IO error so implementations can assume the stream is enabled
3375  * while reading.
3376  *
3377  * Returns: The number of bytes copied or a negative error code on failure.
3378  */
3379 static ssize_t i915_perf_read(struct file *file,
3380 			      char __user *buf,
3381 			      size_t count,
3382 			      loff_t *ppos)
3383 {
3384 	struct i915_perf_stream *stream = file->private_data;
3385 	size_t offset = 0;
3386 	int ret;
3387 
3388 	/* To ensure it's handled consistently we simply treat all reads of a
3389 	 * disabled stream as an error. In particular it might otherwise lead
3390 	 * to a deadlock for blocking file descriptors...
3391 	 */
3392 	if (!stream->enabled || !(stream->sample_flags & SAMPLE_OA_REPORT))
3393 		return -EIO;
3394 
3395 	if (!(file->f_flags & O_NONBLOCK)) {
3396 		/* There's the small chance of false positives from
3397 		 * stream->ops->wait_unlocked.
3398 		 *
3399 		 * E.g. with single context filtering since we only wait until
3400 		 * oabuffer has >= 1 report we don't immediately know whether
3401 		 * any reports really belong to the current context
3402 		 */
3403 		do {
3404 			ret = stream->ops->wait_unlocked(stream);
3405 			if (ret)
3406 				return ret;
3407 
3408 			mutex_lock(&stream->lock);
3409 			ret = stream->ops->read(stream, buf, count, &offset);
3410 			mutex_unlock(&stream->lock);
3411 		} while (!offset && !ret);
3412 	} else {
3413 		mutex_lock(&stream->lock);
3414 		ret = stream->ops->read(stream, buf, count, &offset);
3415 		mutex_unlock(&stream->lock);
3416 	}
3417 
3418 	/* We allow the poll checking to sometimes report false positive EPOLLIN
3419 	 * events where we might actually report EAGAIN on read() if there's
3420 	 * not really any data available. In this situation though we don't
3421 	 * want to enter a busy loop between poll() reporting a EPOLLIN event
3422 	 * and read() returning -EAGAIN. Clearing the oa.pollin state here
3423 	 * effectively ensures we back off until the next hrtimer callback
3424 	 * before reporting another EPOLLIN event.
3425 	 * The exception to this is if ops->read() returned -ENOSPC which means
3426 	 * that more OA data is available than could fit in the user provided
3427 	 * buffer. In this case we want the next poll() call to not block.
3428 	 */
3429 	if (ret != -ENOSPC)
3430 		stream->pollin = false;
3431 
3432 	/* Possible values for ret are 0, -EFAULT, -ENOSPC, -EIO, ... */
3433 	return offset ?: (ret ?: -EAGAIN);
3434 }
3435 
3436 static enum hrtimer_restart oa_poll_check_timer_cb(struct hrtimer *hrtimer)
3437 {
3438 	struct i915_perf_stream *stream =
3439 		container_of(hrtimer, typeof(*stream), poll_check_timer);
3440 
3441 	if (oa_buffer_check_unlocked(stream)) {
3442 		stream->pollin = true;
3443 		wake_up(&stream->poll_wq);
3444 	}
3445 
3446 	hrtimer_forward_now(hrtimer,
3447 			    ns_to_ktime(stream->poll_oa_period));
3448 
3449 	return HRTIMER_RESTART;
3450 }
3451 
3452 /**
3453  * i915_perf_poll_locked - poll_wait() with a suitable wait queue for stream
3454  * @stream: An i915 perf stream
3455  * @file: An i915 perf stream file
3456  * @wait: poll() state table
3457  *
3458  * For handling userspace polling on an i915 perf stream, this calls through to
3459  * &i915_perf_stream_ops->poll_wait to call poll_wait() with a wait queue that
3460  * will be woken for new stream data.
3461  *
3462  * Returns: any poll events that are ready without sleeping
3463  */
3464 static __poll_t i915_perf_poll_locked(struct i915_perf_stream *stream,
3465 				      struct file *file,
3466 				      poll_table *wait)
3467 {
3468 	__poll_t events = 0;
3469 
3470 	stream->ops->poll_wait(stream, file, wait);
3471 
3472 	/* Note: we don't explicitly check whether there's something to read
3473 	 * here since this path may be very hot depending on what else
3474 	 * userspace is polling, or on the timeout in use. We rely solely on
3475 	 * the hrtimer/oa_poll_check_timer_cb to notify us when there are
3476 	 * samples to read.
3477 	 */
3478 	if (stream->pollin)
3479 		events |= EPOLLIN;
3480 
3481 	return events;
3482 }
3483 
3484 /**
3485  * i915_perf_poll - call poll_wait() with a suitable wait queue for stream
3486  * @file: An i915 perf stream file
3487  * @wait: poll() state table
3488  *
3489  * For handling userspace polling on an i915 perf stream, this ensures
3490  * poll_wait() gets called with a wait queue that will be woken for new stream
3491  * data.
3492  *
3493  * Note: Implementation deferred to i915_perf_poll_locked()
3494  *
3495  * Returns: any poll events that are ready without sleeping
3496  */
3497 static __poll_t i915_perf_poll(struct file *file, poll_table *wait)
3498 {
3499 	struct i915_perf_stream *stream = file->private_data;
3500 	__poll_t ret;
3501 
3502 	mutex_lock(&stream->lock);
3503 	ret = i915_perf_poll_locked(stream, file, wait);
3504 	mutex_unlock(&stream->lock);
3505 
3506 	return ret;
3507 }
3508 
3509 /**
3510  * i915_perf_enable_locked - handle `I915_PERF_IOCTL_ENABLE` ioctl
3511  * @stream: A disabled i915 perf stream
3512  *
3513  * [Re]enables the associated capture of data for this stream.
3514  *
3515  * If a stream was previously enabled then there's currently no intention
3516  * to provide userspace any guarantee about the preservation of previously
3517  * buffered data.
3518  */
3519 static void i915_perf_enable_locked(struct i915_perf_stream *stream)
3520 {
3521 	if (stream->enabled)
3522 		return;
3523 
3524 	/* Allow stream->ops->enable() to refer to this */
3525 	stream->enabled = true;
3526 
3527 	if (stream->ops->enable)
3528 		stream->ops->enable(stream);
3529 
3530 	if (stream->hold_preemption)
3531 		intel_context_set_nopreempt(stream->pinned_ctx);
3532 }
3533 
3534 /**
3535  * i915_perf_disable_locked - handle `I915_PERF_IOCTL_DISABLE` ioctl
3536  * @stream: An enabled i915 perf stream
3537  *
3538  * Disables the associated capture of data for this stream.
3539  *
3540  * The intention is that disabling an re-enabling a stream will ideally be
3541  * cheaper than destroying and re-opening a stream with the same configuration,
3542  * though there are no formal guarantees about what state or buffered data
3543  * must be retained between disabling and re-enabling a stream.
3544  *
3545  * Note: while a stream is disabled it's considered an error for userspace
3546  * to attempt to read from the stream (-EIO).
3547  */
3548 static void i915_perf_disable_locked(struct i915_perf_stream *stream)
3549 {
3550 	if (!stream->enabled)
3551 		return;
3552 
3553 	/* Allow stream->ops->disable() to refer to this */
3554 	stream->enabled = false;
3555 
3556 	if (stream->hold_preemption)
3557 		intel_context_clear_nopreempt(stream->pinned_ctx);
3558 
3559 	if (stream->ops->disable)
3560 		stream->ops->disable(stream);
3561 }
3562 
3563 static long i915_perf_config_locked(struct i915_perf_stream *stream,
3564 				    unsigned long metrics_set)
3565 {
3566 	struct i915_oa_config *config;
3567 	long ret = stream->oa_config->id;
3568 
3569 	config = i915_perf_get_oa_config(stream->perf, metrics_set);
3570 	if (!config)
3571 		return -EINVAL;
3572 
3573 	if (config != stream->oa_config) {
3574 		int err;
3575 
3576 		/*
3577 		 * If OA is bound to a specific context, emit the
3578 		 * reconfiguration inline from that context. The update
3579 		 * will then be ordered with respect to submission on that
3580 		 * context.
3581 		 *
3582 		 * When set globally, we use a low priority kernel context,
3583 		 * so it will effectively take effect when idle.
3584 		 */
3585 		err = emit_oa_config(stream, config, oa_context(stream), NULL);
3586 		if (!err)
3587 			config = xchg(&stream->oa_config, config);
3588 		else
3589 			ret = err;
3590 	}
3591 
3592 	i915_oa_config_put(config);
3593 
3594 	return ret;
3595 }
3596 
3597 /**
3598  * i915_perf_ioctl_locked - support ioctl() usage with i915 perf stream FDs
3599  * @stream: An i915 perf stream
3600  * @cmd: the ioctl request
3601  * @arg: the ioctl data
3602  *
3603  * Returns: zero on success or a negative error code. Returns -EINVAL for
3604  * an unknown ioctl request.
3605  */
3606 static long i915_perf_ioctl_locked(struct i915_perf_stream *stream,
3607 				   unsigned int cmd,
3608 				   unsigned long arg)
3609 {
3610 	switch (cmd) {
3611 	case I915_PERF_IOCTL_ENABLE:
3612 		i915_perf_enable_locked(stream);
3613 		return 0;
3614 	case I915_PERF_IOCTL_DISABLE:
3615 		i915_perf_disable_locked(stream);
3616 		return 0;
3617 	case I915_PERF_IOCTL_CONFIG:
3618 		return i915_perf_config_locked(stream, arg);
3619 	}
3620 
3621 	return -EINVAL;
3622 }
3623 
3624 /**
3625  * i915_perf_ioctl - support ioctl() usage with i915 perf stream FDs
3626  * @file: An i915 perf stream file
3627  * @cmd: the ioctl request
3628  * @arg: the ioctl data
3629  *
3630  * Implementation deferred to i915_perf_ioctl_locked().
3631  *
3632  * Returns: zero on success or a negative error code. Returns -EINVAL for
3633  * an unknown ioctl request.
3634  */
3635 static long i915_perf_ioctl(struct file *file,
3636 			    unsigned int cmd,
3637 			    unsigned long arg)
3638 {
3639 	struct i915_perf_stream *stream = file->private_data;
3640 	long ret;
3641 
3642 	mutex_lock(&stream->lock);
3643 	ret = i915_perf_ioctl_locked(stream, cmd, arg);
3644 	mutex_unlock(&stream->lock);
3645 
3646 	return ret;
3647 }
3648 
3649 /**
3650  * i915_perf_destroy_locked - destroy an i915 perf stream
3651  * @stream: An i915 perf stream
3652  *
3653  * Frees all resources associated with the given i915 perf @stream, disabling
3654  * any associated data capture in the process.
3655  *
3656  * Note: The &gt->perf.lock mutex has been taken to serialize
3657  * with any non-file-operation driver hooks.
3658  */
3659 static void i915_perf_destroy_locked(struct i915_perf_stream *stream)
3660 {
3661 	if (stream->enabled)
3662 		i915_perf_disable_locked(stream);
3663 
3664 	if (stream->ops->destroy)
3665 		stream->ops->destroy(stream);
3666 
3667 	if (stream->ctx)
3668 		i915_gem_context_put(stream->ctx);
3669 
3670 	kfree(stream);
3671 }
3672 
3673 /**
3674  * i915_perf_release - handles userspace close() of a stream file
3675  * @inode: anonymous inode associated with file
3676  * @file: An i915 perf stream file
3677  *
3678  * Cleans up any resources associated with an open i915 perf stream file.
3679  *
3680  * NB: close() can't really fail from the userspace point of view.
3681  *
3682  * Returns: zero on success or a negative error code.
3683  */
3684 static int i915_perf_release(struct inode *inode, struct file *file)
3685 {
3686 	struct i915_perf_stream *stream = file->private_data;
3687 	struct i915_perf *perf = stream->perf;
3688 	struct intel_gt *gt = stream->engine->gt;
3689 
3690 	/*
3691 	 * Within this call, we know that the fd is being closed and we have no
3692 	 * other user of stream->lock. Use the perf lock to destroy the stream
3693 	 * here.
3694 	 */
3695 	mutex_lock(&gt->perf.lock);
3696 	i915_perf_destroy_locked(stream);
3697 	mutex_unlock(&gt->perf.lock);
3698 
3699 	/* Release the reference the perf stream kept on the driver. */
3700 	drm_dev_put(&perf->i915->drm);
3701 
3702 	return 0;
3703 }
3704 
3705 
3706 static const struct file_operations fops = {
3707 	.owner		= THIS_MODULE,
3708 	.llseek		= no_llseek,
3709 	.release	= i915_perf_release,
3710 	.poll		= i915_perf_poll,
3711 	.read		= i915_perf_read,
3712 	.unlocked_ioctl	= i915_perf_ioctl,
3713 	/* Our ioctl have no arguments, so it's safe to use the same function
3714 	 * to handle 32bits compatibility.
3715 	 */
3716 	.compat_ioctl   = i915_perf_ioctl,
3717 };
3718 
3719 
3720 /**
3721  * i915_perf_open_ioctl_locked - DRM ioctl() for userspace to open a stream FD
3722  * @perf: i915 perf instance
3723  * @param: The open parameters passed to 'DRM_I915_PERF_OPEN`
3724  * @props: individually validated u64 property value pairs
3725  * @file: drm file
3726  *
3727  * See i915_perf_ioctl_open() for interface details.
3728  *
3729  * Implements further stream config validation and stream initialization on
3730  * behalf of i915_perf_open_ioctl() with the &gt->perf.lock mutex
3731  * taken to serialize with any non-file-operation driver hooks.
3732  *
3733  * Note: at this point the @props have only been validated in isolation and
3734  * it's still necessary to validate that the combination of properties makes
3735  * sense.
3736  *
3737  * In the case where userspace is interested in OA unit metrics then further
3738  * config validation and stream initialization details will be handled by
3739  * i915_oa_stream_init(). The code here should only validate config state that
3740  * will be relevant to all stream types / backends.
3741  *
3742  * Returns: zero on success or a negative error code.
3743  */
3744 static int
3745 i915_perf_open_ioctl_locked(struct i915_perf *perf,
3746 			    struct drm_i915_perf_open_param *param,
3747 			    struct perf_open_properties *props,
3748 			    struct drm_file *file)
3749 {
3750 	struct i915_gem_context *specific_ctx = NULL;
3751 	struct i915_perf_stream *stream = NULL;
3752 	unsigned long f_flags = 0;
3753 	bool privileged_op = true;
3754 	int stream_fd;
3755 	int ret;
3756 
3757 	if (props->single_context) {
3758 		u32 ctx_handle = props->ctx_handle;
3759 		struct drm_i915_file_private *file_priv = file->driver_priv;
3760 
3761 		specific_ctx = i915_gem_context_lookup(file_priv, ctx_handle);
3762 		if (IS_ERR(specific_ctx)) {
3763 			drm_dbg(&perf->i915->drm,
3764 				"Failed to look up context with ID %u for opening perf stream\n",
3765 				  ctx_handle);
3766 			ret = PTR_ERR(specific_ctx);
3767 			goto err;
3768 		}
3769 	}
3770 
3771 	/*
3772 	 * On Haswell the OA unit supports clock gating off for a specific
3773 	 * context and in this mode there's no visibility of metrics for the
3774 	 * rest of the system, which we consider acceptable for a
3775 	 * non-privileged client.
3776 	 *
3777 	 * For Gen8->11 the OA unit no longer supports clock gating off for a
3778 	 * specific context and the kernel can't securely stop the counters
3779 	 * from updating as system-wide / global values. Even though we can
3780 	 * filter reports based on the included context ID we can't block
3781 	 * clients from seeing the raw / global counter values via
3782 	 * MI_REPORT_PERF_COUNT commands and so consider it a privileged op to
3783 	 * enable the OA unit by default.
3784 	 *
3785 	 * For Gen12+ we gain a new OAR unit that only monitors the RCS on a
3786 	 * per context basis. So we can relax requirements there if the user
3787 	 * doesn't request global stream access (i.e. query based sampling
3788 	 * using MI_RECORD_PERF_COUNT.
3789 	 */
3790 	if (IS_HASWELL(perf->i915) && specific_ctx)
3791 		privileged_op = false;
3792 	else if (GRAPHICS_VER(perf->i915) == 12 && specific_ctx &&
3793 		 (props->sample_flags & SAMPLE_OA_REPORT) == 0)
3794 		privileged_op = false;
3795 
3796 	if (props->hold_preemption) {
3797 		if (!props->single_context) {
3798 			drm_dbg(&perf->i915->drm,
3799 				"preemption disable with no context\n");
3800 			ret = -EINVAL;
3801 			goto err;
3802 		}
3803 		privileged_op = true;
3804 	}
3805 
3806 	/*
3807 	 * Asking for SSEU configuration is a priviliged operation.
3808 	 */
3809 	if (props->has_sseu)
3810 		privileged_op = true;
3811 	else
3812 		get_default_sseu_config(&props->sseu, props->engine);
3813 
3814 	/* Similar to perf's kernel.perf_paranoid_cpu sysctl option
3815 	 * we check a dev.i915.perf_stream_paranoid sysctl option
3816 	 * to determine if it's ok to access system wide OA counters
3817 	 * without CAP_PERFMON or CAP_SYS_ADMIN privileges.
3818 	 */
3819 	if (privileged_op &&
3820 	    i915_perf_stream_paranoid && !perfmon_capable()) {
3821 		drm_dbg(&perf->i915->drm,
3822 			"Insufficient privileges to open i915 perf stream\n");
3823 		ret = -EACCES;
3824 		goto err_ctx;
3825 	}
3826 
3827 	stream = kzalloc(sizeof(*stream), GFP_KERNEL);
3828 	if (!stream) {
3829 		ret = -ENOMEM;
3830 		goto err_ctx;
3831 	}
3832 
3833 	stream->perf = perf;
3834 	stream->ctx = specific_ctx;
3835 	stream->poll_oa_period = props->poll_oa_period;
3836 
3837 	ret = i915_oa_stream_init(stream, param, props);
3838 	if (ret)
3839 		goto err_alloc;
3840 
3841 	/* we avoid simply assigning stream->sample_flags = props->sample_flags
3842 	 * to have _stream_init check the combination of sample flags more
3843 	 * thoroughly, but still this is the expected result at this point.
3844 	 */
3845 	if (WARN_ON(stream->sample_flags != props->sample_flags)) {
3846 		ret = -ENODEV;
3847 		goto err_flags;
3848 	}
3849 
3850 	if (param->flags & I915_PERF_FLAG_FD_CLOEXEC)
3851 		f_flags |= O_CLOEXEC;
3852 	if (param->flags & I915_PERF_FLAG_FD_NONBLOCK)
3853 		f_flags |= O_NONBLOCK;
3854 
3855 	stream_fd = anon_inode_getfd("[i915_perf]", &fops, stream, f_flags);
3856 	if (stream_fd < 0) {
3857 		ret = stream_fd;
3858 		goto err_flags;
3859 	}
3860 
3861 	if (!(param->flags & I915_PERF_FLAG_DISABLED))
3862 		i915_perf_enable_locked(stream);
3863 
3864 	/* Take a reference on the driver that will be kept with stream_fd
3865 	 * until its release.
3866 	 */
3867 	drm_dev_get(&perf->i915->drm);
3868 
3869 	return stream_fd;
3870 
3871 err_flags:
3872 	if (stream->ops->destroy)
3873 		stream->ops->destroy(stream);
3874 err_alloc:
3875 	kfree(stream);
3876 err_ctx:
3877 	if (specific_ctx)
3878 		i915_gem_context_put(specific_ctx);
3879 err:
3880 	return ret;
3881 }
3882 
3883 static u64 oa_exponent_to_ns(struct i915_perf *perf, int exponent)
3884 {
3885 	u64 nom = (2ULL << exponent) * NSEC_PER_SEC;
3886 	u32 den = i915_perf_oa_timestamp_frequency(perf->i915);
3887 
3888 	return div_u64(nom + den - 1, den);
3889 }
3890 
3891 static __always_inline bool
3892 oa_format_valid(struct i915_perf *perf, enum drm_i915_oa_format format)
3893 {
3894 	return test_bit(format, perf->format_mask);
3895 }
3896 
3897 static __always_inline void
3898 oa_format_add(struct i915_perf *perf, enum drm_i915_oa_format format)
3899 {
3900 	__set_bit(format, perf->format_mask);
3901 }
3902 
3903 /**
3904  * read_properties_unlocked - validate + copy userspace stream open properties
3905  * @perf: i915 perf instance
3906  * @uprops: The array of u64 key value pairs given by userspace
3907  * @n_props: The number of key value pairs expected in @uprops
3908  * @props: The stream configuration built up while validating properties
3909  *
3910  * Note this function only validates properties in isolation it doesn't
3911  * validate that the combination of properties makes sense or that all
3912  * properties necessary for a particular kind of stream have been set.
3913  *
3914  * Note that there currently aren't any ordering requirements for properties so
3915  * we shouldn't validate or assume anything about ordering here. This doesn't
3916  * rule out defining new properties with ordering requirements in the future.
3917  */
3918 static int read_properties_unlocked(struct i915_perf *perf,
3919 				    u64 __user *uprops,
3920 				    u32 n_props,
3921 				    struct perf_open_properties *props)
3922 {
3923 	u64 __user *uprop = uprops;
3924 	u32 i;
3925 	int ret;
3926 
3927 	memset(props, 0, sizeof(struct perf_open_properties));
3928 	props->poll_oa_period = DEFAULT_POLL_PERIOD_NS;
3929 
3930 	if (!n_props) {
3931 		drm_dbg(&perf->i915->drm,
3932 			"No i915 perf properties given\n");
3933 		return -EINVAL;
3934 	}
3935 
3936 	/* At the moment we only support using i915-perf on the RCS. */
3937 	props->engine = intel_engine_lookup_user(perf->i915,
3938 						 I915_ENGINE_CLASS_RENDER,
3939 						 0);
3940 	if (!props->engine) {
3941 		drm_dbg(&perf->i915->drm,
3942 			"No RENDER-capable engines\n");
3943 		return -EINVAL;
3944 	}
3945 
3946 	/* Considering that ID = 0 is reserved and assuming that we don't
3947 	 * (currently) expect any configurations to ever specify duplicate
3948 	 * values for a particular property ID then the last _PROP_MAX value is
3949 	 * one greater than the maximum number of properties we expect to get
3950 	 * from userspace.
3951 	 */
3952 	if (n_props >= DRM_I915_PERF_PROP_MAX) {
3953 		drm_dbg(&perf->i915->drm,
3954 			"More i915 perf properties specified than exist\n");
3955 		return -EINVAL;
3956 	}
3957 
3958 	for (i = 0; i < n_props; i++) {
3959 		u64 oa_period, oa_freq_hz;
3960 		u64 id, value;
3961 
3962 		ret = get_user(id, uprop);
3963 		if (ret)
3964 			return ret;
3965 
3966 		ret = get_user(value, uprop + 1);
3967 		if (ret)
3968 			return ret;
3969 
3970 		if (id == 0 || id >= DRM_I915_PERF_PROP_MAX) {
3971 			drm_dbg(&perf->i915->drm,
3972 				"Unknown i915 perf property ID\n");
3973 			return -EINVAL;
3974 		}
3975 
3976 		switch ((enum drm_i915_perf_property_id)id) {
3977 		case DRM_I915_PERF_PROP_CTX_HANDLE:
3978 			props->single_context = 1;
3979 			props->ctx_handle = value;
3980 			break;
3981 		case DRM_I915_PERF_PROP_SAMPLE_OA:
3982 			if (value)
3983 				props->sample_flags |= SAMPLE_OA_REPORT;
3984 			break;
3985 		case DRM_I915_PERF_PROP_OA_METRICS_SET:
3986 			if (value == 0) {
3987 				drm_dbg(&perf->i915->drm,
3988 					"Unknown OA metric set ID\n");
3989 				return -EINVAL;
3990 			}
3991 			props->metrics_set = value;
3992 			break;
3993 		case DRM_I915_PERF_PROP_OA_FORMAT:
3994 			if (value == 0 || value >= I915_OA_FORMAT_MAX) {
3995 				drm_dbg(&perf->i915->drm,
3996 					"Out-of-range OA report format %llu\n",
3997 					  value);
3998 				return -EINVAL;
3999 			}
4000 			if (!oa_format_valid(perf, value)) {
4001 				drm_dbg(&perf->i915->drm,
4002 					"Unsupported OA report format %llu\n",
4003 					  value);
4004 				return -EINVAL;
4005 			}
4006 			props->oa_format = value;
4007 			break;
4008 		case DRM_I915_PERF_PROP_OA_EXPONENT:
4009 			if (value > OA_EXPONENT_MAX) {
4010 				drm_dbg(&perf->i915->drm,
4011 					"OA timer exponent too high (> %u)\n",
4012 					 OA_EXPONENT_MAX);
4013 				return -EINVAL;
4014 			}
4015 
4016 			/* Theoretically we can program the OA unit to sample
4017 			 * e.g. every 160ns for HSW, 167ns for BDW/SKL or 104ns
4018 			 * for BXT. We don't allow such high sampling
4019 			 * frequencies by default unless root.
4020 			 */
4021 
4022 			BUILD_BUG_ON(sizeof(oa_period) != 8);
4023 			oa_period = oa_exponent_to_ns(perf, value);
4024 
4025 			/* This check is primarily to ensure that oa_period <=
4026 			 * UINT32_MAX (before passing to do_div which only
4027 			 * accepts a u32 denominator), but we can also skip
4028 			 * checking anything < 1Hz which implicitly can't be
4029 			 * limited via an integer oa_max_sample_rate.
4030 			 */
4031 			if (oa_period <= NSEC_PER_SEC) {
4032 				u64 tmp = NSEC_PER_SEC;
4033 				do_div(tmp, oa_period);
4034 				oa_freq_hz = tmp;
4035 			} else
4036 				oa_freq_hz = 0;
4037 
4038 			if (oa_freq_hz > i915_oa_max_sample_rate && !perfmon_capable()) {
4039 				drm_dbg(&perf->i915->drm,
4040 					"OA exponent would exceed the max sampling frequency (sysctl dev.i915.oa_max_sample_rate) %uHz without CAP_PERFMON or CAP_SYS_ADMIN privileges\n",
4041 					  i915_oa_max_sample_rate);
4042 				return -EACCES;
4043 			}
4044 
4045 			props->oa_periodic = true;
4046 			props->oa_period_exponent = value;
4047 			break;
4048 		case DRM_I915_PERF_PROP_HOLD_PREEMPTION:
4049 			props->hold_preemption = !!value;
4050 			break;
4051 		case DRM_I915_PERF_PROP_GLOBAL_SSEU: {
4052 			struct drm_i915_gem_context_param_sseu user_sseu;
4053 
4054 			if (GRAPHICS_VER_FULL(perf->i915) >= IP_VER(12, 50)) {
4055 				drm_dbg(&perf->i915->drm,
4056 					"SSEU config not supported on gfx %x\n",
4057 					GRAPHICS_VER_FULL(perf->i915));
4058 				return -ENODEV;
4059 			}
4060 
4061 			if (copy_from_user(&user_sseu,
4062 					   u64_to_user_ptr(value),
4063 					   sizeof(user_sseu))) {
4064 				drm_dbg(&perf->i915->drm,
4065 					"Unable to copy global sseu parameter\n");
4066 				return -EFAULT;
4067 			}
4068 
4069 			ret = get_sseu_config(&props->sseu, props->engine, &user_sseu);
4070 			if (ret) {
4071 				drm_dbg(&perf->i915->drm,
4072 					"Invalid SSEU configuration\n");
4073 				return ret;
4074 			}
4075 			props->has_sseu = true;
4076 			break;
4077 		}
4078 		case DRM_I915_PERF_PROP_POLL_OA_PERIOD:
4079 			if (value < 100000 /* 100us */) {
4080 				drm_dbg(&perf->i915->drm,
4081 					"OA availability timer too small (%lluns < 100us)\n",
4082 					  value);
4083 				return -EINVAL;
4084 			}
4085 			props->poll_oa_period = value;
4086 			break;
4087 		case DRM_I915_PERF_PROP_MAX:
4088 			MISSING_CASE(id);
4089 			return -EINVAL;
4090 		}
4091 
4092 		uprop += 2;
4093 	}
4094 
4095 	return 0;
4096 }
4097 
4098 /**
4099  * i915_perf_open_ioctl - DRM ioctl() for userspace to open a stream FD
4100  * @dev: drm device
4101  * @data: ioctl data copied from userspace (unvalidated)
4102  * @file: drm file
4103  *
4104  * Validates the stream open parameters given by userspace including flags
4105  * and an array of u64 key, value pair properties.
4106  *
4107  * Very little is assumed up front about the nature of the stream being
4108  * opened (for instance we don't assume it's for periodic OA unit metrics). An
4109  * i915-perf stream is expected to be a suitable interface for other forms of
4110  * buffered data written by the GPU besides periodic OA metrics.
4111  *
4112  * Note we copy the properties from userspace outside of the i915 perf
4113  * mutex to avoid an awkward lockdep with mmap_lock.
4114  *
4115  * Most of the implementation details are handled by
4116  * i915_perf_open_ioctl_locked() after taking the &gt->perf.lock
4117  * mutex for serializing with any non-file-operation driver hooks.
4118  *
4119  * Return: A newly opened i915 Perf stream file descriptor or negative
4120  * error code on failure.
4121  */
4122 int i915_perf_open_ioctl(struct drm_device *dev, void *data,
4123 			 struct drm_file *file)
4124 {
4125 	struct i915_perf *perf = &to_i915(dev)->perf;
4126 	struct drm_i915_perf_open_param *param = data;
4127 	struct intel_gt *gt;
4128 	struct perf_open_properties props;
4129 	u32 known_open_flags;
4130 	int ret;
4131 
4132 	if (!perf->i915) {
4133 		drm_dbg(&perf->i915->drm,
4134 			"i915 perf interface not available for this system\n");
4135 		return -ENOTSUPP;
4136 	}
4137 
4138 	known_open_flags = I915_PERF_FLAG_FD_CLOEXEC |
4139 			   I915_PERF_FLAG_FD_NONBLOCK |
4140 			   I915_PERF_FLAG_DISABLED;
4141 	if (param->flags & ~known_open_flags) {
4142 		drm_dbg(&perf->i915->drm,
4143 			"Unknown drm_i915_perf_open_param flag\n");
4144 		return -EINVAL;
4145 	}
4146 
4147 	ret = read_properties_unlocked(perf,
4148 				       u64_to_user_ptr(param->properties_ptr),
4149 				       param->num_properties,
4150 				       &props);
4151 	if (ret)
4152 		return ret;
4153 
4154 	gt = props.engine->gt;
4155 
4156 	mutex_lock(&gt->perf.lock);
4157 	ret = i915_perf_open_ioctl_locked(perf, param, &props, file);
4158 	mutex_unlock(&gt->perf.lock);
4159 
4160 	return ret;
4161 }
4162 
4163 /**
4164  * i915_perf_register - exposes i915-perf to userspace
4165  * @i915: i915 device instance
4166  *
4167  * In particular OA metric sets are advertised under a sysfs metrics/
4168  * directory allowing userspace to enumerate valid IDs that can be
4169  * used to open an i915-perf stream.
4170  */
4171 void i915_perf_register(struct drm_i915_private *i915)
4172 {
4173 	struct i915_perf *perf = &i915->perf;
4174 	struct intel_gt *gt = to_gt(i915);
4175 
4176 	if (!perf->i915)
4177 		return;
4178 
4179 	/* To be sure we're synchronized with an attempted
4180 	 * i915_perf_open_ioctl(); considering that we register after
4181 	 * being exposed to userspace.
4182 	 */
4183 	mutex_lock(&gt->perf.lock);
4184 
4185 	perf->metrics_kobj =
4186 		kobject_create_and_add("metrics",
4187 				       &i915->drm.primary->kdev->kobj);
4188 
4189 	mutex_unlock(&gt->perf.lock);
4190 }
4191 
4192 /**
4193  * i915_perf_unregister - hide i915-perf from userspace
4194  * @i915: i915 device instance
4195  *
4196  * i915-perf state cleanup is split up into an 'unregister' and
4197  * 'deinit' phase where the interface is first hidden from
4198  * userspace by i915_perf_unregister() before cleaning up
4199  * remaining state in i915_perf_fini().
4200  */
4201 void i915_perf_unregister(struct drm_i915_private *i915)
4202 {
4203 	struct i915_perf *perf = &i915->perf;
4204 
4205 	if (!perf->metrics_kobj)
4206 		return;
4207 
4208 	kobject_put(perf->metrics_kobj);
4209 	perf->metrics_kobj = NULL;
4210 }
4211 
4212 static bool gen8_is_valid_flex_addr(struct i915_perf *perf, u32 addr)
4213 {
4214 	static const i915_reg_t flex_eu_regs[] = {
4215 		EU_PERF_CNTL0,
4216 		EU_PERF_CNTL1,
4217 		EU_PERF_CNTL2,
4218 		EU_PERF_CNTL3,
4219 		EU_PERF_CNTL4,
4220 		EU_PERF_CNTL5,
4221 		EU_PERF_CNTL6,
4222 	};
4223 	int i;
4224 
4225 	for (i = 0; i < ARRAY_SIZE(flex_eu_regs); i++) {
4226 		if (i915_mmio_reg_offset(flex_eu_regs[i]) == addr)
4227 			return true;
4228 	}
4229 	return false;
4230 }
4231 
4232 static bool reg_in_range_table(u32 addr, const struct i915_range *table)
4233 {
4234 	while (table->start || table->end) {
4235 		if (addr >= table->start && addr <= table->end)
4236 			return true;
4237 
4238 		table++;
4239 	}
4240 
4241 	return false;
4242 }
4243 
4244 #define REG_EQUAL(addr, mmio) \
4245 	((addr) == i915_mmio_reg_offset(mmio))
4246 
4247 static const struct i915_range gen7_oa_b_counters[] = {
4248 	{ .start = 0x2710, .end = 0x272c },	/* OASTARTTRIG[1-8] */
4249 	{ .start = 0x2740, .end = 0x275c },	/* OAREPORTTRIG[1-8] */
4250 	{ .start = 0x2770, .end = 0x27ac },	/* OACEC[0-7][0-1] */
4251 	{}
4252 };
4253 
4254 static const struct i915_range gen12_oa_b_counters[] = {
4255 	{ .start = 0x2b2c, .end = 0x2b2c },	/* GEN12_OAG_OA_PESS */
4256 	{ .start = 0xd900, .end = 0xd91c },	/* GEN12_OAG_OASTARTTRIG[1-8] */
4257 	{ .start = 0xd920, .end = 0xd93c },	/* GEN12_OAG_OAREPORTTRIG1[1-8] */
4258 	{ .start = 0xd940, .end = 0xd97c },	/* GEN12_OAG_CEC[0-7][0-1] */
4259 	{ .start = 0xdc00, .end = 0xdc3c },	/* GEN12_OAG_SCEC[0-7][0-1] */
4260 	{ .start = 0xdc40, .end = 0xdc40 },	/* GEN12_OAG_SPCTR_CNF */
4261 	{ .start = 0xdc44, .end = 0xdc44 },	/* GEN12_OAA_DBG_REG */
4262 	{}
4263 };
4264 
4265 static const struct i915_range xehp_oa_b_counters[] = {
4266 	{ .start = 0xdc48, .end = 0xdc48 },	/* OAA_ENABLE_REG */
4267 	{ .start = 0xdd00, .end = 0xdd48 },	/* OAG_LCE0_0 - OAA_LENABLE_REG */
4268 };
4269 
4270 static const struct i915_range gen7_oa_mux_regs[] = {
4271 	{ .start = 0x91b8, .end = 0x91cc },	/* OA_PERFCNT[1-2], OA_PERFMATRIX */
4272 	{ .start = 0x9800, .end = 0x9888 },	/* MICRO_BP0_0 - NOA_WRITE */
4273 	{ .start = 0xe180, .end = 0xe180 },	/* HALF_SLICE_CHICKEN2 */
4274 	{}
4275 };
4276 
4277 static const struct i915_range hsw_oa_mux_regs[] = {
4278 	{ .start = 0x09e80, .end = 0x09ea4 }, /* HSW_MBVID2_NOA[0-9] */
4279 	{ .start = 0x09ec0, .end = 0x09ec0 }, /* HSW_MBVID2_MISR0 */
4280 	{ .start = 0x25100, .end = 0x2ff90 },
4281 	{}
4282 };
4283 
4284 static const struct i915_range chv_oa_mux_regs[] = {
4285 	{ .start = 0x182300, .end = 0x1823a4 },
4286 	{}
4287 };
4288 
4289 static const struct i915_range gen8_oa_mux_regs[] = {
4290 	{ .start = 0x0d00, .end = 0x0d2c },	/* RPM_CONFIG[0-1], NOA_CONFIG[0-8] */
4291 	{ .start = 0x20cc, .end = 0x20cc },	/* WAIT_FOR_RC6_EXIT */
4292 	{}
4293 };
4294 
4295 static const struct i915_range gen11_oa_mux_regs[] = {
4296 	{ .start = 0x91c8, .end = 0x91dc },	/* OA_PERFCNT[3-4] */
4297 	{}
4298 };
4299 
4300 static const struct i915_range gen12_oa_mux_regs[] = {
4301 	{ .start = 0x0d00, .end = 0x0d04 },     /* RPM_CONFIG[0-1] */
4302 	{ .start = 0x0d0c, .end = 0x0d2c },     /* NOA_CONFIG[0-8] */
4303 	{ .start = 0x9840, .end = 0x9840 },	/* GDT_CHICKEN_BITS */
4304 	{ .start = 0x9884, .end = 0x9888 },	/* NOA_WRITE */
4305 	{ .start = 0x20cc, .end = 0x20cc },	/* WAIT_FOR_RC6_EXIT */
4306 	{}
4307 };
4308 
4309 static bool gen7_is_valid_b_counter_addr(struct i915_perf *perf, u32 addr)
4310 {
4311 	return reg_in_range_table(addr, gen7_oa_b_counters);
4312 }
4313 
4314 static bool gen8_is_valid_mux_addr(struct i915_perf *perf, u32 addr)
4315 {
4316 	return reg_in_range_table(addr, gen7_oa_mux_regs) ||
4317 		reg_in_range_table(addr, gen8_oa_mux_regs);
4318 }
4319 
4320 static bool gen11_is_valid_mux_addr(struct i915_perf *perf, u32 addr)
4321 {
4322 	return reg_in_range_table(addr, gen7_oa_mux_regs) ||
4323 		reg_in_range_table(addr, gen8_oa_mux_regs) ||
4324 		reg_in_range_table(addr, gen11_oa_mux_regs);
4325 }
4326 
4327 static bool hsw_is_valid_mux_addr(struct i915_perf *perf, u32 addr)
4328 {
4329 	return reg_in_range_table(addr, gen7_oa_mux_regs) ||
4330 		reg_in_range_table(addr, hsw_oa_mux_regs);
4331 }
4332 
4333 static bool chv_is_valid_mux_addr(struct i915_perf *perf, u32 addr)
4334 {
4335 	return reg_in_range_table(addr, gen7_oa_mux_regs) ||
4336 		reg_in_range_table(addr, chv_oa_mux_regs);
4337 }
4338 
4339 static bool gen12_is_valid_b_counter_addr(struct i915_perf *perf, u32 addr)
4340 {
4341 	return reg_in_range_table(addr, gen12_oa_b_counters);
4342 }
4343 
4344 static bool xehp_is_valid_b_counter_addr(struct i915_perf *perf, u32 addr)
4345 {
4346 	return reg_in_range_table(addr, xehp_oa_b_counters) ||
4347 		reg_in_range_table(addr, gen12_oa_b_counters);
4348 }
4349 
4350 static bool gen12_is_valid_mux_addr(struct i915_perf *perf, u32 addr)
4351 {
4352 	return reg_in_range_table(addr, gen12_oa_mux_regs);
4353 }
4354 
4355 static u32 mask_reg_value(u32 reg, u32 val)
4356 {
4357 	/* HALF_SLICE_CHICKEN2 is programmed with a the
4358 	 * WaDisableSTUnitPowerOptimization workaround. Make sure the value
4359 	 * programmed by userspace doesn't change this.
4360 	 */
4361 	if (REG_EQUAL(reg, HALF_SLICE_CHICKEN2))
4362 		val = val & ~_MASKED_BIT_ENABLE(GEN8_ST_PO_DISABLE);
4363 
4364 	/* WAIT_FOR_RC6_EXIT has only one bit fullfilling the function
4365 	 * indicated by its name and a bunch of selection fields used by OA
4366 	 * configs.
4367 	 */
4368 	if (REG_EQUAL(reg, WAIT_FOR_RC6_EXIT))
4369 		val = val & ~_MASKED_BIT_ENABLE(HSW_WAIT_FOR_RC6_EXIT_ENABLE);
4370 
4371 	return val;
4372 }
4373 
4374 static struct i915_oa_reg *alloc_oa_regs(struct i915_perf *perf,
4375 					 bool (*is_valid)(struct i915_perf *perf, u32 addr),
4376 					 u32 __user *regs,
4377 					 u32 n_regs)
4378 {
4379 	struct i915_oa_reg *oa_regs;
4380 	int err;
4381 	u32 i;
4382 
4383 	if (!n_regs)
4384 		return NULL;
4385 
4386 	/* No is_valid function means we're not allowing any register to be programmed. */
4387 	GEM_BUG_ON(!is_valid);
4388 	if (!is_valid)
4389 		return ERR_PTR(-EINVAL);
4390 
4391 	oa_regs = kmalloc_array(n_regs, sizeof(*oa_regs), GFP_KERNEL);
4392 	if (!oa_regs)
4393 		return ERR_PTR(-ENOMEM);
4394 
4395 	for (i = 0; i < n_regs; i++) {
4396 		u32 addr, value;
4397 
4398 		err = get_user(addr, regs);
4399 		if (err)
4400 			goto addr_err;
4401 
4402 		if (!is_valid(perf, addr)) {
4403 			drm_dbg(&perf->i915->drm,
4404 				"Invalid oa_reg address: %X\n", addr);
4405 			err = -EINVAL;
4406 			goto addr_err;
4407 		}
4408 
4409 		err = get_user(value, regs + 1);
4410 		if (err)
4411 			goto addr_err;
4412 
4413 		oa_regs[i].addr = _MMIO(addr);
4414 		oa_regs[i].value = mask_reg_value(addr, value);
4415 
4416 		regs += 2;
4417 	}
4418 
4419 	return oa_regs;
4420 
4421 addr_err:
4422 	kfree(oa_regs);
4423 	return ERR_PTR(err);
4424 }
4425 
4426 static ssize_t show_dynamic_id(struct kobject *kobj,
4427 			       struct kobj_attribute *attr,
4428 			       char *buf)
4429 {
4430 	struct i915_oa_config *oa_config =
4431 		container_of(attr, typeof(*oa_config), sysfs_metric_id);
4432 
4433 	return sprintf(buf, "%d\n", oa_config->id);
4434 }
4435 
4436 static int create_dynamic_oa_sysfs_entry(struct i915_perf *perf,
4437 					 struct i915_oa_config *oa_config)
4438 {
4439 	sysfs_attr_init(&oa_config->sysfs_metric_id.attr);
4440 	oa_config->sysfs_metric_id.attr.name = "id";
4441 	oa_config->sysfs_metric_id.attr.mode = S_IRUGO;
4442 	oa_config->sysfs_metric_id.show = show_dynamic_id;
4443 	oa_config->sysfs_metric_id.store = NULL;
4444 
4445 	oa_config->attrs[0] = &oa_config->sysfs_metric_id.attr;
4446 	oa_config->attrs[1] = NULL;
4447 
4448 	oa_config->sysfs_metric.name = oa_config->uuid;
4449 	oa_config->sysfs_metric.attrs = oa_config->attrs;
4450 
4451 	return sysfs_create_group(perf->metrics_kobj,
4452 				  &oa_config->sysfs_metric);
4453 }
4454 
4455 /**
4456  * i915_perf_add_config_ioctl - DRM ioctl() for userspace to add a new OA config
4457  * @dev: drm device
4458  * @data: ioctl data (pointer to struct drm_i915_perf_oa_config) copied from
4459  *        userspace (unvalidated)
4460  * @file: drm file
4461  *
4462  * Validates the submitted OA register to be saved into a new OA config that
4463  * can then be used for programming the OA unit and its NOA network.
4464  *
4465  * Returns: A new allocated config number to be used with the perf open ioctl
4466  * or a negative error code on failure.
4467  */
4468 int i915_perf_add_config_ioctl(struct drm_device *dev, void *data,
4469 			       struct drm_file *file)
4470 {
4471 	struct i915_perf *perf = &to_i915(dev)->perf;
4472 	struct drm_i915_perf_oa_config *args = data;
4473 	struct i915_oa_config *oa_config, *tmp;
4474 	struct i915_oa_reg *regs;
4475 	int err, id;
4476 
4477 	if (!perf->i915) {
4478 		drm_dbg(&perf->i915->drm,
4479 			"i915 perf interface not available for this system\n");
4480 		return -ENOTSUPP;
4481 	}
4482 
4483 	if (!perf->metrics_kobj) {
4484 		drm_dbg(&perf->i915->drm,
4485 			"OA metrics weren't advertised via sysfs\n");
4486 		return -EINVAL;
4487 	}
4488 
4489 	if (i915_perf_stream_paranoid && !perfmon_capable()) {
4490 		drm_dbg(&perf->i915->drm,
4491 			"Insufficient privileges to add i915 OA config\n");
4492 		return -EACCES;
4493 	}
4494 
4495 	if ((!args->mux_regs_ptr || !args->n_mux_regs) &&
4496 	    (!args->boolean_regs_ptr || !args->n_boolean_regs) &&
4497 	    (!args->flex_regs_ptr || !args->n_flex_regs)) {
4498 		drm_dbg(&perf->i915->drm,
4499 			"No OA registers given\n");
4500 		return -EINVAL;
4501 	}
4502 
4503 	oa_config = kzalloc(sizeof(*oa_config), GFP_KERNEL);
4504 	if (!oa_config) {
4505 		drm_dbg(&perf->i915->drm,
4506 			"Failed to allocate memory for the OA config\n");
4507 		return -ENOMEM;
4508 	}
4509 
4510 	oa_config->perf = perf;
4511 	kref_init(&oa_config->ref);
4512 
4513 	if (!uuid_is_valid(args->uuid)) {
4514 		drm_dbg(&perf->i915->drm,
4515 			"Invalid uuid format for OA config\n");
4516 		err = -EINVAL;
4517 		goto reg_err;
4518 	}
4519 
4520 	/* Last character in oa_config->uuid will be 0 because oa_config is
4521 	 * kzalloc.
4522 	 */
4523 	memcpy(oa_config->uuid, args->uuid, sizeof(args->uuid));
4524 
4525 	oa_config->mux_regs_len = args->n_mux_regs;
4526 	regs = alloc_oa_regs(perf,
4527 			     perf->ops.is_valid_mux_reg,
4528 			     u64_to_user_ptr(args->mux_regs_ptr),
4529 			     args->n_mux_regs);
4530 
4531 	if (IS_ERR(regs)) {
4532 		drm_dbg(&perf->i915->drm,
4533 			"Failed to create OA config for mux_regs\n");
4534 		err = PTR_ERR(regs);
4535 		goto reg_err;
4536 	}
4537 	oa_config->mux_regs = regs;
4538 
4539 	oa_config->b_counter_regs_len = args->n_boolean_regs;
4540 	regs = alloc_oa_regs(perf,
4541 			     perf->ops.is_valid_b_counter_reg,
4542 			     u64_to_user_ptr(args->boolean_regs_ptr),
4543 			     args->n_boolean_regs);
4544 
4545 	if (IS_ERR(regs)) {
4546 		drm_dbg(&perf->i915->drm,
4547 			"Failed to create OA config for b_counter_regs\n");
4548 		err = PTR_ERR(regs);
4549 		goto reg_err;
4550 	}
4551 	oa_config->b_counter_regs = regs;
4552 
4553 	if (GRAPHICS_VER(perf->i915) < 8) {
4554 		if (args->n_flex_regs != 0) {
4555 			err = -EINVAL;
4556 			goto reg_err;
4557 		}
4558 	} else {
4559 		oa_config->flex_regs_len = args->n_flex_regs;
4560 		regs = alloc_oa_regs(perf,
4561 				     perf->ops.is_valid_flex_reg,
4562 				     u64_to_user_ptr(args->flex_regs_ptr),
4563 				     args->n_flex_regs);
4564 
4565 		if (IS_ERR(regs)) {
4566 			drm_dbg(&perf->i915->drm,
4567 				"Failed to create OA config for flex_regs\n");
4568 			err = PTR_ERR(regs);
4569 			goto reg_err;
4570 		}
4571 		oa_config->flex_regs = regs;
4572 	}
4573 
4574 	err = mutex_lock_interruptible(&perf->metrics_lock);
4575 	if (err)
4576 		goto reg_err;
4577 
4578 	/* We shouldn't have too many configs, so this iteration shouldn't be
4579 	 * too costly.
4580 	 */
4581 	idr_for_each_entry(&perf->metrics_idr, tmp, id) {
4582 		if (!strcmp(tmp->uuid, oa_config->uuid)) {
4583 			drm_dbg(&perf->i915->drm,
4584 				"OA config already exists with this uuid\n");
4585 			err = -EADDRINUSE;
4586 			goto sysfs_err;
4587 		}
4588 	}
4589 
4590 	err = create_dynamic_oa_sysfs_entry(perf, oa_config);
4591 	if (err) {
4592 		drm_dbg(&perf->i915->drm,
4593 			"Failed to create sysfs entry for OA config\n");
4594 		goto sysfs_err;
4595 	}
4596 
4597 	/* Config id 0 is invalid, id 1 for kernel stored test config. */
4598 	oa_config->id = idr_alloc(&perf->metrics_idr,
4599 				  oa_config, 2,
4600 				  0, GFP_KERNEL);
4601 	if (oa_config->id < 0) {
4602 		drm_dbg(&perf->i915->drm,
4603 			"Failed to create sysfs entry for OA config\n");
4604 		err = oa_config->id;
4605 		goto sysfs_err;
4606 	}
4607 
4608 	mutex_unlock(&perf->metrics_lock);
4609 
4610 	drm_dbg(&perf->i915->drm,
4611 		"Added config %s id=%i\n", oa_config->uuid, oa_config->id);
4612 
4613 	return oa_config->id;
4614 
4615 sysfs_err:
4616 	mutex_unlock(&perf->metrics_lock);
4617 reg_err:
4618 	i915_oa_config_put(oa_config);
4619 	drm_dbg(&perf->i915->drm,
4620 		"Failed to add new OA config\n");
4621 	return err;
4622 }
4623 
4624 /**
4625  * i915_perf_remove_config_ioctl - DRM ioctl() for userspace to remove an OA config
4626  * @dev: drm device
4627  * @data: ioctl data (pointer to u64 integer) copied from userspace
4628  * @file: drm file
4629  *
4630  * Configs can be removed while being used, the will stop appearing in sysfs
4631  * and their content will be freed when the stream using the config is closed.
4632  *
4633  * Returns: 0 on success or a negative error code on failure.
4634  */
4635 int i915_perf_remove_config_ioctl(struct drm_device *dev, void *data,
4636 				  struct drm_file *file)
4637 {
4638 	struct i915_perf *perf = &to_i915(dev)->perf;
4639 	u64 *arg = data;
4640 	struct i915_oa_config *oa_config;
4641 	int ret;
4642 
4643 	if (!perf->i915) {
4644 		drm_dbg(&perf->i915->drm,
4645 			"i915 perf interface not available for this system\n");
4646 		return -ENOTSUPP;
4647 	}
4648 
4649 	if (i915_perf_stream_paranoid && !perfmon_capable()) {
4650 		drm_dbg(&perf->i915->drm,
4651 			"Insufficient privileges to remove i915 OA config\n");
4652 		return -EACCES;
4653 	}
4654 
4655 	ret = mutex_lock_interruptible(&perf->metrics_lock);
4656 	if (ret)
4657 		return ret;
4658 
4659 	oa_config = idr_find(&perf->metrics_idr, *arg);
4660 	if (!oa_config) {
4661 		drm_dbg(&perf->i915->drm,
4662 			"Failed to remove unknown OA config\n");
4663 		ret = -ENOENT;
4664 		goto err_unlock;
4665 	}
4666 
4667 	GEM_BUG_ON(*arg != oa_config->id);
4668 
4669 	sysfs_remove_group(perf->metrics_kobj, &oa_config->sysfs_metric);
4670 
4671 	idr_remove(&perf->metrics_idr, *arg);
4672 
4673 	mutex_unlock(&perf->metrics_lock);
4674 
4675 	drm_dbg(&perf->i915->drm,
4676 		"Removed config %s id=%i\n", oa_config->uuid, oa_config->id);
4677 
4678 	i915_oa_config_put(oa_config);
4679 
4680 	return 0;
4681 
4682 err_unlock:
4683 	mutex_unlock(&perf->metrics_lock);
4684 	return ret;
4685 }
4686 
4687 static struct ctl_table oa_table[] = {
4688 	{
4689 	 .procname = "perf_stream_paranoid",
4690 	 .data = &i915_perf_stream_paranoid,
4691 	 .maxlen = sizeof(i915_perf_stream_paranoid),
4692 	 .mode = 0644,
4693 	 .proc_handler = proc_dointvec_minmax,
4694 	 .extra1 = SYSCTL_ZERO,
4695 	 .extra2 = SYSCTL_ONE,
4696 	 },
4697 	{
4698 	 .procname = "oa_max_sample_rate",
4699 	 .data = &i915_oa_max_sample_rate,
4700 	 .maxlen = sizeof(i915_oa_max_sample_rate),
4701 	 .mode = 0644,
4702 	 .proc_handler = proc_dointvec_minmax,
4703 	 .extra1 = SYSCTL_ZERO,
4704 	 .extra2 = &oa_sample_rate_hard_limit,
4705 	 },
4706 	{}
4707 };
4708 
4709 static void oa_init_supported_formats(struct i915_perf *perf)
4710 {
4711 	struct drm_i915_private *i915 = perf->i915;
4712 	enum intel_platform platform = INTEL_INFO(i915)->platform;
4713 
4714 	switch (platform) {
4715 	case INTEL_HASWELL:
4716 		oa_format_add(perf, I915_OA_FORMAT_A13);
4717 		oa_format_add(perf, I915_OA_FORMAT_A13);
4718 		oa_format_add(perf, I915_OA_FORMAT_A29);
4719 		oa_format_add(perf, I915_OA_FORMAT_A13_B8_C8);
4720 		oa_format_add(perf, I915_OA_FORMAT_B4_C8);
4721 		oa_format_add(perf, I915_OA_FORMAT_A45_B8_C8);
4722 		oa_format_add(perf, I915_OA_FORMAT_B4_C8_A16);
4723 		oa_format_add(perf, I915_OA_FORMAT_C4_B8);
4724 		break;
4725 
4726 	case INTEL_BROADWELL:
4727 	case INTEL_CHERRYVIEW:
4728 	case INTEL_SKYLAKE:
4729 	case INTEL_BROXTON:
4730 	case INTEL_KABYLAKE:
4731 	case INTEL_GEMINILAKE:
4732 	case INTEL_COFFEELAKE:
4733 	case INTEL_COMETLAKE:
4734 	case INTEL_ICELAKE:
4735 	case INTEL_ELKHARTLAKE:
4736 	case INTEL_JASPERLAKE:
4737 	case INTEL_TIGERLAKE:
4738 	case INTEL_ROCKETLAKE:
4739 	case INTEL_DG1:
4740 	case INTEL_ALDERLAKE_S:
4741 	case INTEL_ALDERLAKE_P:
4742 		oa_format_add(perf, I915_OA_FORMAT_A12);
4743 		oa_format_add(perf, I915_OA_FORMAT_A12_B8_C8);
4744 		oa_format_add(perf, I915_OA_FORMAT_A32u40_A4u32_B8_C8);
4745 		oa_format_add(perf, I915_OA_FORMAT_C4_B8);
4746 		break;
4747 
4748 	case INTEL_DG2:
4749 		oa_format_add(perf, I915_OAR_FORMAT_A32u40_A4u32_B8_C8);
4750 		oa_format_add(perf, I915_OA_FORMAT_A24u40_A14u32_B8_C8);
4751 		break;
4752 
4753 	default:
4754 		MISSING_CASE(platform);
4755 	}
4756 }
4757 
4758 static void i915_perf_init_info(struct drm_i915_private *i915)
4759 {
4760 	struct i915_perf *perf = &i915->perf;
4761 
4762 	switch (GRAPHICS_VER(i915)) {
4763 	case 8:
4764 		perf->ctx_oactxctrl_offset = 0x120;
4765 		perf->ctx_flexeu0_offset = 0x2ce;
4766 		perf->gen8_valid_ctx_bit = BIT(25);
4767 		break;
4768 	case 9:
4769 		perf->ctx_oactxctrl_offset = 0x128;
4770 		perf->ctx_flexeu0_offset = 0x3de;
4771 		perf->gen8_valid_ctx_bit = BIT(16);
4772 		break;
4773 	case 11:
4774 		perf->ctx_oactxctrl_offset = 0x124;
4775 		perf->ctx_flexeu0_offset = 0x78e;
4776 		perf->gen8_valid_ctx_bit = BIT(16);
4777 		break;
4778 	case 12:
4779 		/*
4780 		 * Calculate offset at runtime in oa_pin_context for gen12 and
4781 		 * cache the value in perf->ctx_oactxctrl_offset.
4782 		 */
4783 		break;
4784 	default:
4785 		MISSING_CASE(GRAPHICS_VER(i915));
4786 	}
4787 }
4788 
4789 /**
4790  * i915_perf_init - initialize i915-perf state on module bind
4791  * @i915: i915 device instance
4792  *
4793  * Initializes i915-perf state without exposing anything to userspace.
4794  *
4795  * Note: i915-perf initialization is split into an 'init' and 'register'
4796  * phase with the i915_perf_register() exposing state to userspace.
4797  */
4798 void i915_perf_init(struct drm_i915_private *i915)
4799 {
4800 	struct i915_perf *perf = &i915->perf;
4801 
4802 	perf->oa_formats = oa_formats;
4803 	if (IS_HASWELL(i915)) {
4804 		perf->ops.is_valid_b_counter_reg = gen7_is_valid_b_counter_addr;
4805 		perf->ops.is_valid_mux_reg = hsw_is_valid_mux_addr;
4806 		perf->ops.is_valid_flex_reg = NULL;
4807 		perf->ops.enable_metric_set = hsw_enable_metric_set;
4808 		perf->ops.disable_metric_set = hsw_disable_metric_set;
4809 		perf->ops.oa_enable = gen7_oa_enable;
4810 		perf->ops.oa_disable = gen7_oa_disable;
4811 		perf->ops.read = gen7_oa_read;
4812 		perf->ops.oa_hw_tail_read = gen7_oa_hw_tail_read;
4813 	} else if (HAS_LOGICAL_RING_CONTEXTS(i915)) {
4814 		/* Note: that although we could theoretically also support the
4815 		 * legacy ringbuffer mode on BDW (and earlier iterations of
4816 		 * this driver, before upstreaming did this) it didn't seem
4817 		 * worth the complexity to maintain now that BDW+ enable
4818 		 * execlist mode by default.
4819 		 */
4820 		perf->ops.read = gen8_oa_read;
4821 		i915_perf_init_info(i915);
4822 
4823 		if (IS_GRAPHICS_VER(i915, 8, 9)) {
4824 			perf->ops.is_valid_b_counter_reg =
4825 				gen7_is_valid_b_counter_addr;
4826 			perf->ops.is_valid_mux_reg =
4827 				gen8_is_valid_mux_addr;
4828 			perf->ops.is_valid_flex_reg =
4829 				gen8_is_valid_flex_addr;
4830 
4831 			if (IS_CHERRYVIEW(i915)) {
4832 				perf->ops.is_valid_mux_reg =
4833 					chv_is_valid_mux_addr;
4834 			}
4835 
4836 			perf->ops.oa_enable = gen8_oa_enable;
4837 			perf->ops.oa_disable = gen8_oa_disable;
4838 			perf->ops.enable_metric_set = gen8_enable_metric_set;
4839 			perf->ops.disable_metric_set = gen8_disable_metric_set;
4840 			perf->ops.oa_hw_tail_read = gen8_oa_hw_tail_read;
4841 		} else if (GRAPHICS_VER(i915) == 11) {
4842 			perf->ops.is_valid_b_counter_reg =
4843 				gen7_is_valid_b_counter_addr;
4844 			perf->ops.is_valid_mux_reg =
4845 				gen11_is_valid_mux_addr;
4846 			perf->ops.is_valid_flex_reg =
4847 				gen8_is_valid_flex_addr;
4848 
4849 			perf->ops.oa_enable = gen8_oa_enable;
4850 			perf->ops.oa_disable = gen8_oa_disable;
4851 			perf->ops.enable_metric_set = gen8_enable_metric_set;
4852 			perf->ops.disable_metric_set = gen11_disable_metric_set;
4853 			perf->ops.oa_hw_tail_read = gen8_oa_hw_tail_read;
4854 		} else if (GRAPHICS_VER(i915) == 12) {
4855 			perf->ops.is_valid_b_counter_reg =
4856 				HAS_OA_SLICE_CONTRIB_LIMITS(i915) ?
4857 				xehp_is_valid_b_counter_addr :
4858 				gen12_is_valid_b_counter_addr;
4859 			perf->ops.is_valid_mux_reg =
4860 				gen12_is_valid_mux_addr;
4861 			perf->ops.is_valid_flex_reg =
4862 				gen8_is_valid_flex_addr;
4863 
4864 			perf->ops.oa_enable = gen12_oa_enable;
4865 			perf->ops.oa_disable = gen12_oa_disable;
4866 			perf->ops.enable_metric_set = gen12_enable_metric_set;
4867 			perf->ops.disable_metric_set = gen12_disable_metric_set;
4868 			perf->ops.oa_hw_tail_read = gen12_oa_hw_tail_read;
4869 		}
4870 	}
4871 
4872 	if (perf->ops.enable_metric_set) {
4873 		struct intel_gt *gt;
4874 		int i;
4875 
4876 		for_each_gt(gt, i915, i)
4877 			mutex_init(&gt->perf.lock);
4878 
4879 		/* Choose a representative limit */
4880 		oa_sample_rate_hard_limit = to_gt(i915)->clock_frequency / 2;
4881 
4882 		mutex_init(&perf->metrics_lock);
4883 		idr_init_base(&perf->metrics_idr, 1);
4884 
4885 		/* We set up some ratelimit state to potentially throttle any
4886 		 * _NOTES about spurious, invalid OA reports which we don't
4887 		 * forward to userspace.
4888 		 *
4889 		 * We print a _NOTE about any throttling when closing the
4890 		 * stream instead of waiting until driver _fini which no one
4891 		 * would ever see.
4892 		 *
4893 		 * Using the same limiting factors as printk_ratelimit()
4894 		 */
4895 		ratelimit_state_init(&perf->spurious_report_rs, 5 * HZ, 10);
4896 		/* Since we use a DRM_NOTE for spurious reports it would be
4897 		 * inconsistent to let __ratelimit() automatically print a
4898 		 * warning for throttling.
4899 		 */
4900 		ratelimit_set_flags(&perf->spurious_report_rs,
4901 				    RATELIMIT_MSG_ON_RELEASE);
4902 
4903 		ratelimit_state_init(&perf->tail_pointer_race,
4904 				     5 * HZ, 10);
4905 		ratelimit_set_flags(&perf->tail_pointer_race,
4906 				    RATELIMIT_MSG_ON_RELEASE);
4907 
4908 		atomic64_set(&perf->noa_programming_delay,
4909 			     500 * 1000 /* 500us */);
4910 
4911 		perf->i915 = i915;
4912 
4913 		oa_init_supported_formats(perf);
4914 	}
4915 }
4916 
4917 static int destroy_config(int id, void *p, void *data)
4918 {
4919 	i915_oa_config_put(p);
4920 	return 0;
4921 }
4922 
4923 int i915_perf_sysctl_register(void)
4924 {
4925 	sysctl_header = register_sysctl("dev/i915", oa_table);
4926 	return 0;
4927 }
4928 
4929 void i915_perf_sysctl_unregister(void)
4930 {
4931 	unregister_sysctl_table(sysctl_header);
4932 }
4933 
4934 /**
4935  * i915_perf_fini - Counter part to i915_perf_init()
4936  * @i915: i915 device instance
4937  */
4938 void i915_perf_fini(struct drm_i915_private *i915)
4939 {
4940 	struct i915_perf *perf = &i915->perf;
4941 
4942 	if (!perf->i915)
4943 		return;
4944 
4945 	idr_for_each(&perf->metrics_idr, destroy_config, perf);
4946 	idr_destroy(&perf->metrics_idr);
4947 
4948 	memset(&perf->ops, 0, sizeof(perf->ops));
4949 	perf->i915 = NULL;
4950 }
4951 
4952 /**
4953  * i915_perf_ioctl_version - Version of the i915-perf subsystem
4954  *
4955  * This version number is used by userspace to detect available features.
4956  */
4957 int i915_perf_ioctl_version(void)
4958 {
4959 	/*
4960 	 * 1: Initial version
4961 	 *   I915_PERF_IOCTL_ENABLE
4962 	 *   I915_PERF_IOCTL_DISABLE
4963 	 *
4964 	 * 2: Added runtime modification of OA config.
4965 	 *   I915_PERF_IOCTL_CONFIG
4966 	 *
4967 	 * 3: Add DRM_I915_PERF_PROP_HOLD_PREEMPTION parameter to hold
4968 	 *    preemption on a particular context so that performance data is
4969 	 *    accessible from a delta of MI_RPC reports without looking at the
4970 	 *    OA buffer.
4971 	 *
4972 	 * 4: Add DRM_I915_PERF_PROP_ALLOWED_SSEU to limit what contexts can
4973 	 *    be run for the duration of the performance recording based on
4974 	 *    their SSEU configuration.
4975 	 *
4976 	 * 5: Add DRM_I915_PERF_PROP_POLL_OA_PERIOD parameter that controls the
4977 	 *    interval for the hrtimer used to check for OA data.
4978 	 */
4979 	return 5;
4980 }
4981 
4982 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
4983 #include "selftests/i915_perf.c"
4984 #endif
4985