xref: /openbmc/linux/drivers/gpu/drm/i915/i915_perf.c (revision 301306a9)
1 /*
2  * Copyright © 2015-2016 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  *
23  * Authors:
24  *   Robert Bragg <robert@sixbynine.org>
25  */
26 
27 
28 /**
29  * DOC: i915 Perf Overview
30  *
31  * Gen graphics supports a large number of performance counters that can help
32  * driver and application developers understand and optimize their use of the
33  * GPU.
34  *
35  * This i915 perf interface enables userspace to configure and open a file
36  * descriptor representing a stream of GPU metrics which can then be read() as
37  * a stream of sample records.
38  *
39  * The interface is particularly suited to exposing buffered metrics that are
40  * captured by DMA from the GPU, unsynchronized with and unrelated to the CPU.
41  *
42  * Streams representing a single context are accessible to applications with a
43  * corresponding drm file descriptor, such that OpenGL can use the interface
44  * without special privileges. Access to system-wide metrics requires root
45  * privileges by default, unless changed via the dev.i915.perf_event_paranoid
46  * sysctl option.
47  *
48  */
49 
50 /**
51  * DOC: i915 Perf History and Comparison with Core Perf
52  *
53  * The interface was initially inspired by the core Perf infrastructure but
54  * some notable differences are:
55  *
56  * i915 perf file descriptors represent a "stream" instead of an "event"; where
57  * a perf event primarily corresponds to a single 64bit value, while a stream
58  * might sample sets of tightly-coupled counters, depending on the
59  * configuration.  For example the Gen OA unit isn't designed to support
60  * orthogonal configurations of individual counters; it's configured for a set
61  * of related counters. Samples for an i915 perf stream capturing OA metrics
62  * will include a set of counter values packed in a compact HW specific format.
63  * The OA unit supports a number of different packing formats which can be
64  * selected by the user opening the stream. Perf has support for grouping
65  * events, but each event in the group is configured, validated and
66  * authenticated individually with separate system calls.
67  *
68  * i915 perf stream configurations are provided as an array of u64 (key,value)
69  * pairs, instead of a fixed struct with multiple miscellaneous config members,
70  * interleaved with event-type specific members.
71  *
72  * i915 perf doesn't support exposing metrics via an mmap'd circular buffer.
73  * The supported metrics are being written to memory by the GPU unsynchronized
74  * with the CPU, using HW specific packing formats for counter sets. Sometimes
75  * the constraints on HW configuration require reports to be filtered before it
76  * would be acceptable to expose them to unprivileged applications - to hide
77  * the metrics of other processes/contexts. For these use cases a read() based
78  * interface is a good fit, and provides an opportunity to filter data as it
79  * gets copied from the GPU mapped buffers to userspace buffers.
80  *
81  *
82  * Issues hit with first prototype based on Core Perf
83  * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
84  *
85  * The first prototype of this driver was based on the core perf
86  * infrastructure, and while we did make that mostly work, with some changes to
87  * perf, we found we were breaking or working around too many assumptions baked
88  * into perf's currently cpu centric design.
89  *
90  * In the end we didn't see a clear benefit to making perf's implementation and
91  * interface more complex by changing design assumptions while we knew we still
92  * wouldn't be able to use any existing perf based userspace tools.
93  *
94  * Also considering the Gen specific nature of the Observability hardware and
95  * how userspace will sometimes need to combine i915 perf OA metrics with
96  * side-band OA data captured via MI_REPORT_PERF_COUNT commands; we're
97  * expecting the interface to be used by a platform specific userspace such as
98  * OpenGL or tools. This is to say; we aren't inherently missing out on having
99  * a standard vendor/architecture agnostic interface by not using perf.
100  *
101  *
102  * For posterity, in case we might re-visit trying to adapt core perf to be
103  * better suited to exposing i915 metrics these were the main pain points we
104  * hit:
105  *
106  * - The perf based OA PMU driver broke some significant design assumptions:
107  *
108  *   Existing perf pmus are used for profiling work on a cpu and we were
109  *   introducing the idea of _IS_DEVICE pmus with different security
110  *   implications, the need to fake cpu-related data (such as user/kernel
111  *   registers) to fit with perf's current design, and adding _DEVICE records
112  *   as a way to forward device-specific status records.
113  *
114  *   The OA unit writes reports of counters into a circular buffer, without
115  *   involvement from the CPU, making our PMU driver the first of a kind.
116  *
117  *   Given the way we were periodically forward data from the GPU-mapped, OA
118  *   buffer to perf's buffer, those bursts of sample writes looked to perf like
119  *   we were sampling too fast and so we had to subvert its throttling checks.
120  *
121  *   Perf supports groups of counters and allows those to be read via
122  *   transactions internally but transactions currently seem designed to be
123  *   explicitly initiated from the cpu (say in response to a userspace read())
124  *   and while we could pull a report out of the OA buffer we can't
125  *   trigger a report from the cpu on demand.
126  *
127  *   Related to being report based; the OA counters are configured in HW as a
128  *   set while perf generally expects counter configurations to be orthogonal.
129  *   Although counters can be associated with a group leader as they are
130  *   opened, there's no clear precedent for being able to provide group-wide
131  *   configuration attributes (for example we want to let userspace choose the
132  *   OA unit report format used to capture all counters in a set, or specify a
133  *   GPU context to filter metrics on). We avoided using perf's grouping
134  *   feature and forwarded OA reports to userspace via perf's 'raw' sample
135  *   field. This suited our userspace well considering how coupled the counters
136  *   are when dealing with normalizing. It would be inconvenient to split
137  *   counters up into separate events, only to require userspace to recombine
138  *   them. For Mesa it's also convenient to be forwarded raw, periodic reports
139  *   for combining with the side-band raw reports it captures using
140  *   MI_REPORT_PERF_COUNT commands.
141  *
142  *   - As a side note on perf's grouping feature; there was also some concern
143  *     that using PERF_FORMAT_GROUP as a way to pack together counter values
144  *     would quite drastically inflate our sample sizes, which would likely
145  *     lower the effective sampling resolutions we could use when the available
146  *     memory bandwidth is limited.
147  *
148  *     With the OA unit's report formats, counters are packed together as 32
149  *     or 40bit values, with the largest report size being 256 bytes.
150  *
151  *     PERF_FORMAT_GROUP values are 64bit, but there doesn't appear to be a
152  *     documented ordering to the values, implying PERF_FORMAT_ID must also be
153  *     used to add a 64bit ID before each value; giving 16 bytes per counter.
154  *
155  *   Related to counter orthogonality; we can't time share the OA unit, while
156  *   event scheduling is a central design idea within perf for allowing
157  *   userspace to open + enable more events than can be configured in HW at any
158  *   one time.  The OA unit is not designed to allow re-configuration while in
159  *   use. We can't reconfigure the OA unit without losing internal OA unit
160  *   state which we can't access explicitly to save and restore. Reconfiguring
161  *   the OA unit is also relatively slow, involving ~100 register writes. From
162  *   userspace Mesa also depends on a stable OA configuration when emitting
163  *   MI_REPORT_PERF_COUNT commands and importantly the OA unit can't be
164  *   disabled while there are outstanding MI_RPC commands lest we hang the
165  *   command streamer.
166  *
167  *   The contents of sample records aren't extensible by device drivers (i.e.
168  *   the sample_type bits). As an example; Sourab Gupta had been looking to
169  *   attach GPU timestamps to our OA samples. We were shoehorning OA reports
170  *   into sample records by using the 'raw' field, but it's tricky to pack more
171  *   than one thing into this field because events/core.c currently only lets a
172  *   pmu give a single raw data pointer plus len which will be copied into the
173  *   ring buffer. To include more than the OA report we'd have to copy the
174  *   report into an intermediate larger buffer. I'd been considering allowing a
175  *   vector of data+len values to be specified for copying the raw data, but
176  *   it felt like a kludge to being using the raw field for this purpose.
177  *
178  * - It felt like our perf based PMU was making some technical compromises
179  *   just for the sake of using perf:
180  *
181  *   perf_event_open() requires events to either relate to a pid or a specific
182  *   cpu core, while our device pmu related to neither.  Events opened with a
183  *   pid will be automatically enabled/disabled according to the scheduling of
184  *   that process - so not appropriate for us. When an event is related to a
185  *   cpu id, perf ensures pmu methods will be invoked via an inter process
186  *   interrupt on that core. To avoid invasive changes our userspace opened OA
187  *   perf events for a specific cpu. This was workable but it meant the
188  *   majority of the OA driver ran in atomic context, including all OA report
189  *   forwarding, which wasn't really necessary in our case and seems to make
190  *   our locking requirements somewhat complex as we handled the interaction
191  *   with the rest of the i915 driver.
192  */
193 
194 #include <linux/anon_inodes.h>
195 #include <linux/sizes.h>
196 #include <linux/uuid.h>
197 
198 #include "gem/i915_gem_context.h"
199 #include "gt/intel_engine_pm.h"
200 #include "gt/intel_engine_regs.h"
201 #include "gt/intel_engine_user.h"
202 #include "gt/intel_execlists_submission.h"
203 #include "gt/intel_gpu_commands.h"
204 #include "gt/intel_gt.h"
205 #include "gt/intel_gt_clock_utils.h"
206 #include "gt/intel_gt_regs.h"
207 #include "gt/intel_lrc.h"
208 #include "gt/intel_ring.h"
209 
210 #include "i915_drv.h"
211 #include "i915_perf.h"
212 #include "i915_perf_oa_regs.h"
213 
214 /* HW requires this to be a power of two, between 128k and 16M, though driver
215  * is currently generally designed assuming the largest 16M size is used such
216  * that the overflow cases are unlikely in normal operation.
217  */
218 #define OA_BUFFER_SIZE		SZ_16M
219 
220 #define OA_TAKEN(tail, head)	((tail - head) & (OA_BUFFER_SIZE - 1))
221 
222 /**
223  * DOC: OA Tail Pointer Race
224  *
225  * There's a HW race condition between OA unit tail pointer register updates and
226  * writes to memory whereby the tail pointer can sometimes get ahead of what's
227  * been written out to the OA buffer so far (in terms of what's visible to the
228  * CPU).
229  *
230  * Although this can be observed explicitly while copying reports to userspace
231  * by checking for a zeroed report-id field in tail reports, we want to account
232  * for this earlier, as part of the oa_buffer_check_unlocked to avoid lots of
233  * redundant read() attempts.
234  *
235  * We workaround this issue in oa_buffer_check_unlocked() by reading the reports
236  * in the OA buffer, starting from the tail reported by the HW until we find a
237  * report with its first 2 dwords not 0 meaning its previous report is
238  * completely in memory and ready to be read. Those dwords are also set to 0
239  * once read and the whole buffer is cleared upon OA buffer initialization. The
240  * first dword is the reason for this report while the second is the timestamp,
241  * making the chances of having those 2 fields at 0 fairly unlikely. A more
242  * detailed explanation is available in oa_buffer_check_unlocked().
243  *
244  * Most of the implementation details for this workaround are in
245  * oa_buffer_check_unlocked() and _append_oa_reports()
246  *
247  * Note for posterity: previously the driver used to define an effective tail
248  * pointer that lagged the real pointer by a 'tail margin' measured in bytes
249  * derived from %OA_TAIL_MARGIN_NSEC and the configured sampling frequency.
250  * This was flawed considering that the OA unit may also automatically generate
251  * non-periodic reports (such as on context switch) or the OA unit may be
252  * enabled without any periodic sampling.
253  */
254 #define OA_TAIL_MARGIN_NSEC	100000ULL
255 #define INVALID_TAIL_PTR	0xffffffff
256 
257 /* The default frequency for checking whether the OA unit has written new
258  * reports to the circular OA buffer...
259  */
260 #define DEFAULT_POLL_FREQUENCY_HZ 200
261 #define DEFAULT_POLL_PERIOD_NS (NSEC_PER_SEC / DEFAULT_POLL_FREQUENCY_HZ)
262 
263 /* for sysctl proc_dointvec_minmax of dev.i915.perf_stream_paranoid */
264 static u32 i915_perf_stream_paranoid = true;
265 
266 /* The maximum exponent the hardware accepts is 63 (essentially it selects one
267  * of the 64bit timestamp bits to trigger reports from) but there's currently
268  * no known use case for sampling as infrequently as once per 47 thousand years.
269  *
270  * Since the timestamps included in OA reports are only 32bits it seems
271  * reasonable to limit the OA exponent where it's still possible to account for
272  * overflow in OA report timestamps.
273  */
274 #define OA_EXPONENT_MAX 31
275 
276 #define INVALID_CTX_ID 0xffffffff
277 
278 /* On Gen8+ automatically triggered OA reports include a 'reason' field... */
279 #define OAREPORT_REASON_MASK           0x3f
280 #define OAREPORT_REASON_MASK_EXTENDED  0x7f
281 #define OAREPORT_REASON_SHIFT          19
282 #define OAREPORT_REASON_TIMER          (1<<0)
283 #define OAREPORT_REASON_CTX_SWITCH     (1<<3)
284 #define OAREPORT_REASON_CLK_RATIO      (1<<5)
285 
286 
287 /* For sysctl proc_dointvec_minmax of i915_oa_max_sample_rate
288  *
289  * The highest sampling frequency we can theoretically program the OA unit
290  * with is always half the timestamp frequency: E.g. 6.25Mhz for Haswell.
291  *
292  * Initialized just before we register the sysctl parameter.
293  */
294 static int oa_sample_rate_hard_limit;
295 
296 /* Theoretically we can program the OA unit to sample every 160ns but don't
297  * allow that by default unless root...
298  *
299  * The default threshold of 100000Hz is based on perf's similar
300  * kernel.perf_event_max_sample_rate sysctl parameter.
301  */
302 static u32 i915_oa_max_sample_rate = 100000;
303 
304 /* XXX: beware if future OA HW adds new report formats that the current
305  * code assumes all reports have a power-of-two size and ~(size - 1) can
306  * be used as a mask to align the OA tail pointer.
307  */
308 static const struct i915_oa_format oa_formats[I915_OA_FORMAT_MAX] = {
309 	[I915_OA_FORMAT_A13]	    = { 0, 64 },
310 	[I915_OA_FORMAT_A29]	    = { 1, 128 },
311 	[I915_OA_FORMAT_A13_B8_C8]  = { 2, 128 },
312 	/* A29_B8_C8 Disallowed as 192 bytes doesn't factor into buffer size */
313 	[I915_OA_FORMAT_B4_C8]	    = { 4, 64 },
314 	[I915_OA_FORMAT_A45_B8_C8]  = { 5, 256 },
315 	[I915_OA_FORMAT_B4_C8_A16]  = { 6, 128 },
316 	[I915_OA_FORMAT_C4_B8]	    = { 7, 64 },
317 	[I915_OA_FORMAT_A12]		    = { 0, 64 },
318 	[I915_OA_FORMAT_A12_B8_C8]	    = { 2, 128 },
319 	[I915_OA_FORMAT_A32u40_A4u32_B8_C8] = { 5, 256 },
320 };
321 
322 #define SAMPLE_OA_REPORT      (1<<0)
323 
324 /**
325  * struct perf_open_properties - for validated properties given to open a stream
326  * @sample_flags: `DRM_I915_PERF_PROP_SAMPLE_*` properties are tracked as flags
327  * @single_context: Whether a single or all gpu contexts should be monitored
328  * @hold_preemption: Whether the preemption is disabled for the filtered
329  *                   context
330  * @ctx_handle: A gem ctx handle for use with @single_context
331  * @metrics_set: An ID for an OA unit metric set advertised via sysfs
332  * @oa_format: An OA unit HW report format
333  * @oa_periodic: Whether to enable periodic OA unit sampling
334  * @oa_period_exponent: The OA unit sampling period is derived from this
335  * @engine: The engine (typically rcs0) being monitored by the OA unit
336  * @has_sseu: Whether @sseu was specified by userspace
337  * @sseu: internal SSEU configuration computed either from the userspace
338  *        specified configuration in the opening parameters or a default value
339  *        (see get_default_sseu_config())
340  * @poll_oa_period: The period in nanoseconds at which the CPU will check for OA
341  * data availability
342  *
343  * As read_properties_unlocked() enumerates and validates the properties given
344  * to open a stream of metrics the configuration is built up in the structure
345  * which starts out zero initialized.
346  */
347 struct perf_open_properties {
348 	u32 sample_flags;
349 
350 	u64 single_context:1;
351 	u64 hold_preemption:1;
352 	u64 ctx_handle;
353 
354 	/* OA sampling state */
355 	int metrics_set;
356 	int oa_format;
357 	bool oa_periodic;
358 	int oa_period_exponent;
359 
360 	struct intel_engine_cs *engine;
361 
362 	bool has_sseu;
363 	struct intel_sseu sseu;
364 
365 	u64 poll_oa_period;
366 };
367 
368 struct i915_oa_config_bo {
369 	struct llist_node node;
370 
371 	struct i915_oa_config *oa_config;
372 	struct i915_vma *vma;
373 };
374 
375 static struct ctl_table_header *sysctl_header;
376 
377 static enum hrtimer_restart oa_poll_check_timer_cb(struct hrtimer *hrtimer);
378 
379 void i915_oa_config_release(struct kref *ref)
380 {
381 	struct i915_oa_config *oa_config =
382 		container_of(ref, typeof(*oa_config), ref);
383 
384 	kfree(oa_config->flex_regs);
385 	kfree(oa_config->b_counter_regs);
386 	kfree(oa_config->mux_regs);
387 
388 	kfree_rcu(oa_config, rcu);
389 }
390 
391 struct i915_oa_config *
392 i915_perf_get_oa_config(struct i915_perf *perf, int metrics_set)
393 {
394 	struct i915_oa_config *oa_config;
395 
396 	rcu_read_lock();
397 	oa_config = idr_find(&perf->metrics_idr, metrics_set);
398 	if (oa_config)
399 		oa_config = i915_oa_config_get(oa_config);
400 	rcu_read_unlock();
401 
402 	return oa_config;
403 }
404 
405 static void free_oa_config_bo(struct i915_oa_config_bo *oa_bo)
406 {
407 	i915_oa_config_put(oa_bo->oa_config);
408 	i915_vma_put(oa_bo->vma);
409 	kfree(oa_bo);
410 }
411 
412 static u32 gen12_oa_hw_tail_read(struct i915_perf_stream *stream)
413 {
414 	struct intel_uncore *uncore = stream->uncore;
415 
416 	return intel_uncore_read(uncore, GEN12_OAG_OATAILPTR) &
417 	       GEN12_OAG_OATAILPTR_MASK;
418 }
419 
420 static u32 gen8_oa_hw_tail_read(struct i915_perf_stream *stream)
421 {
422 	struct intel_uncore *uncore = stream->uncore;
423 
424 	return intel_uncore_read(uncore, GEN8_OATAILPTR) & GEN8_OATAILPTR_MASK;
425 }
426 
427 static u32 gen7_oa_hw_tail_read(struct i915_perf_stream *stream)
428 {
429 	struct intel_uncore *uncore = stream->uncore;
430 	u32 oastatus1 = intel_uncore_read(uncore, GEN7_OASTATUS1);
431 
432 	return oastatus1 & GEN7_OASTATUS1_TAIL_MASK;
433 }
434 
435 /**
436  * oa_buffer_check_unlocked - check for data and update tail ptr state
437  * @stream: i915 stream instance
438  *
439  * This is either called via fops (for blocking reads in user ctx) or the poll
440  * check hrtimer (atomic ctx) to check the OA buffer tail pointer and check
441  * if there is data available for userspace to read.
442  *
443  * This function is central to providing a workaround for the OA unit tail
444  * pointer having a race with respect to what data is visible to the CPU.
445  * It is responsible for reading tail pointers from the hardware and giving
446  * the pointers time to 'age' before they are made available for reading.
447  * (See description of OA_TAIL_MARGIN_NSEC above for further details.)
448  *
449  * Besides returning true when there is data available to read() this function
450  * also updates the tail, aging_tail and aging_timestamp in the oa_buffer
451  * object.
452  *
453  * Note: It's safe to read OA config state here unlocked, assuming that this is
454  * only called while the stream is enabled, while the global OA configuration
455  * can't be modified.
456  *
457  * Returns: %true if the OA buffer contains data, else %false
458  */
459 static bool oa_buffer_check_unlocked(struct i915_perf_stream *stream)
460 {
461 	u32 gtt_offset = i915_ggtt_offset(stream->oa_buffer.vma);
462 	int report_size = stream->oa_buffer.format_size;
463 	unsigned long flags;
464 	bool pollin;
465 	u32 hw_tail;
466 	u64 now;
467 
468 	/* We have to consider the (unlikely) possibility that read() errors
469 	 * could result in an OA buffer reset which might reset the head and
470 	 * tail state.
471 	 */
472 	spin_lock_irqsave(&stream->oa_buffer.ptr_lock, flags);
473 
474 	hw_tail = stream->perf->ops.oa_hw_tail_read(stream);
475 
476 	/* The tail pointer increases in 64 byte increments,
477 	 * not in report_size steps...
478 	 */
479 	hw_tail &= ~(report_size - 1);
480 
481 	now = ktime_get_mono_fast_ns();
482 
483 	if (hw_tail == stream->oa_buffer.aging_tail &&
484 	    (now - stream->oa_buffer.aging_timestamp) > OA_TAIL_MARGIN_NSEC) {
485 		/* If the HW tail hasn't move since the last check and the HW
486 		 * tail has been aging for long enough, declare it the new
487 		 * tail.
488 		 */
489 		stream->oa_buffer.tail = stream->oa_buffer.aging_tail;
490 	} else {
491 		u32 head, tail, aged_tail;
492 
493 		/* NB: The head we observe here might effectively be a little
494 		 * out of date. If a read() is in progress, the head could be
495 		 * anywhere between this head and stream->oa_buffer.tail.
496 		 */
497 		head = stream->oa_buffer.head - gtt_offset;
498 		aged_tail = stream->oa_buffer.tail - gtt_offset;
499 
500 		hw_tail -= gtt_offset;
501 		tail = hw_tail;
502 
503 		/* Walk the stream backward until we find a report with dword 0
504 		 * & 1 not at 0. Since the circular buffer pointers progress by
505 		 * increments of 64 bytes and that reports can be up to 256
506 		 * bytes long, we can't tell whether a report has fully landed
507 		 * in memory before the first 2 dwords of the following report
508 		 * have effectively landed.
509 		 *
510 		 * This is assuming that the writes of the OA unit land in
511 		 * memory in the order they were written to.
512 		 * If not : (╯°□°)╯︵ ┻━┻
513 		 */
514 		while (OA_TAKEN(tail, aged_tail) >= report_size) {
515 			u32 *report32 = (void *)(stream->oa_buffer.vaddr + tail);
516 
517 			if (report32[0] != 0 || report32[1] != 0)
518 				break;
519 
520 			tail = (tail - report_size) & (OA_BUFFER_SIZE - 1);
521 		}
522 
523 		if (OA_TAKEN(hw_tail, tail) > report_size &&
524 		    __ratelimit(&stream->perf->tail_pointer_race))
525 			DRM_NOTE("unlanded report(s) head=0x%x "
526 				 "tail=0x%x hw_tail=0x%x\n",
527 				 head, tail, hw_tail);
528 
529 		stream->oa_buffer.tail = gtt_offset + tail;
530 		stream->oa_buffer.aging_tail = gtt_offset + hw_tail;
531 		stream->oa_buffer.aging_timestamp = now;
532 	}
533 
534 	pollin = OA_TAKEN(stream->oa_buffer.tail - gtt_offset,
535 			  stream->oa_buffer.head - gtt_offset) >= report_size;
536 
537 	spin_unlock_irqrestore(&stream->oa_buffer.ptr_lock, flags);
538 
539 	return pollin;
540 }
541 
542 /**
543  * append_oa_status - Appends a status record to a userspace read() buffer.
544  * @stream: An i915-perf stream opened for OA metrics
545  * @buf: destination buffer given by userspace
546  * @count: the number of bytes userspace wants to read
547  * @offset: (inout): the current position for writing into @buf
548  * @type: The kind of status to report to userspace
549  *
550  * Writes a status record (such as `DRM_I915_PERF_RECORD_OA_REPORT_LOST`)
551  * into the userspace read() buffer.
552  *
553  * The @buf @offset will only be updated on success.
554  *
555  * Returns: 0 on success, negative error code on failure.
556  */
557 static int append_oa_status(struct i915_perf_stream *stream,
558 			    char __user *buf,
559 			    size_t count,
560 			    size_t *offset,
561 			    enum drm_i915_perf_record_type type)
562 {
563 	struct drm_i915_perf_record_header header = { type, 0, sizeof(header) };
564 
565 	if ((count - *offset) < header.size)
566 		return -ENOSPC;
567 
568 	if (copy_to_user(buf + *offset, &header, sizeof(header)))
569 		return -EFAULT;
570 
571 	(*offset) += header.size;
572 
573 	return 0;
574 }
575 
576 /**
577  * append_oa_sample - Copies single OA report into userspace read() buffer.
578  * @stream: An i915-perf stream opened for OA metrics
579  * @buf: destination buffer given by userspace
580  * @count: the number of bytes userspace wants to read
581  * @offset: (inout): the current position for writing into @buf
582  * @report: A single OA report to (optionally) include as part of the sample
583  *
584  * The contents of a sample are configured through `DRM_I915_PERF_PROP_SAMPLE_*`
585  * properties when opening a stream, tracked as `stream->sample_flags`. This
586  * function copies the requested components of a single sample to the given
587  * read() @buf.
588  *
589  * The @buf @offset will only be updated on success.
590  *
591  * Returns: 0 on success, negative error code on failure.
592  */
593 static int append_oa_sample(struct i915_perf_stream *stream,
594 			    char __user *buf,
595 			    size_t count,
596 			    size_t *offset,
597 			    const u8 *report)
598 {
599 	int report_size = stream->oa_buffer.format_size;
600 	struct drm_i915_perf_record_header header;
601 
602 	header.type = DRM_I915_PERF_RECORD_SAMPLE;
603 	header.pad = 0;
604 	header.size = stream->sample_size;
605 
606 	if ((count - *offset) < header.size)
607 		return -ENOSPC;
608 
609 	buf += *offset;
610 	if (copy_to_user(buf, &header, sizeof(header)))
611 		return -EFAULT;
612 	buf += sizeof(header);
613 
614 	if (copy_to_user(buf, report, report_size))
615 		return -EFAULT;
616 
617 	(*offset) += header.size;
618 
619 	return 0;
620 }
621 
622 /**
623  * gen8_append_oa_reports - Copies all buffered OA reports into
624  *			    userspace read() buffer.
625  * @stream: An i915-perf stream opened for OA metrics
626  * @buf: destination buffer given by userspace
627  * @count: the number of bytes userspace wants to read
628  * @offset: (inout): the current position for writing into @buf
629  *
630  * Notably any error condition resulting in a short read (-%ENOSPC or
631  * -%EFAULT) will be returned even though one or more records may
632  * have been successfully copied. In this case it's up to the caller
633  * to decide if the error should be squashed before returning to
634  * userspace.
635  *
636  * Note: reports are consumed from the head, and appended to the
637  * tail, so the tail chases the head?... If you think that's mad
638  * and back-to-front you're not alone, but this follows the
639  * Gen PRM naming convention.
640  *
641  * Returns: 0 on success, negative error code on failure.
642  */
643 static int gen8_append_oa_reports(struct i915_perf_stream *stream,
644 				  char __user *buf,
645 				  size_t count,
646 				  size_t *offset)
647 {
648 	struct intel_uncore *uncore = stream->uncore;
649 	int report_size = stream->oa_buffer.format_size;
650 	u8 *oa_buf_base = stream->oa_buffer.vaddr;
651 	u32 gtt_offset = i915_ggtt_offset(stream->oa_buffer.vma);
652 	u32 mask = (OA_BUFFER_SIZE - 1);
653 	size_t start_offset = *offset;
654 	unsigned long flags;
655 	u32 head, tail;
656 	u32 taken;
657 	int ret = 0;
658 
659 	if (drm_WARN_ON(&uncore->i915->drm, !stream->enabled))
660 		return -EIO;
661 
662 	spin_lock_irqsave(&stream->oa_buffer.ptr_lock, flags);
663 
664 	head = stream->oa_buffer.head;
665 	tail = stream->oa_buffer.tail;
666 
667 	spin_unlock_irqrestore(&stream->oa_buffer.ptr_lock, flags);
668 
669 	/*
670 	 * NB: oa_buffer.head/tail include the gtt_offset which we don't want
671 	 * while indexing relative to oa_buf_base.
672 	 */
673 	head -= gtt_offset;
674 	tail -= gtt_offset;
675 
676 	/*
677 	 * An out of bounds or misaligned head or tail pointer implies a driver
678 	 * bug since we validate + align the tail pointers we read from the
679 	 * hardware and we are in full control of the head pointer which should
680 	 * only be incremented by multiples of the report size (notably also
681 	 * all a power of two).
682 	 */
683 	if (drm_WARN_ONCE(&uncore->i915->drm,
684 			  head > OA_BUFFER_SIZE || head % report_size ||
685 			  tail > OA_BUFFER_SIZE || tail % report_size,
686 			  "Inconsistent OA buffer pointers: head = %u, tail = %u\n",
687 			  head, tail))
688 		return -EIO;
689 
690 
691 	for (/* none */;
692 	     (taken = OA_TAKEN(tail, head));
693 	     head = (head + report_size) & mask) {
694 		u8 *report = oa_buf_base + head;
695 		u32 *report32 = (void *)report;
696 		u32 ctx_id;
697 		u32 reason;
698 
699 		/*
700 		 * All the report sizes factor neatly into the buffer
701 		 * size so we never expect to see a report split
702 		 * between the beginning and end of the buffer.
703 		 *
704 		 * Given the initial alignment check a misalignment
705 		 * here would imply a driver bug that would result
706 		 * in an overrun.
707 		 */
708 		if (drm_WARN_ON(&uncore->i915->drm,
709 				(OA_BUFFER_SIZE - head) < report_size)) {
710 			drm_err(&uncore->i915->drm,
711 				"Spurious OA head ptr: non-integral report offset\n");
712 			break;
713 		}
714 
715 		/*
716 		 * The reason field includes flags identifying what
717 		 * triggered this specific report (mostly timer
718 		 * triggered or e.g. due to a context switch).
719 		 *
720 		 * This field is never expected to be zero so we can
721 		 * check that the report isn't invalid before copying
722 		 * it to userspace...
723 		 */
724 		reason = ((report32[0] >> OAREPORT_REASON_SHIFT) &
725 			  (GRAPHICS_VER(stream->perf->i915) == 12 ?
726 			   OAREPORT_REASON_MASK_EXTENDED :
727 			   OAREPORT_REASON_MASK));
728 
729 		ctx_id = report32[2] & stream->specific_ctx_id_mask;
730 
731 		/*
732 		 * Squash whatever is in the CTX_ID field if it's marked as
733 		 * invalid to be sure we avoid false-positive, single-context
734 		 * filtering below...
735 		 *
736 		 * Note: that we don't clear the valid_ctx_bit so userspace can
737 		 * understand that the ID has been squashed by the kernel.
738 		 */
739 		if (!(report32[0] & stream->perf->gen8_valid_ctx_bit) &&
740 		    GRAPHICS_VER(stream->perf->i915) <= 11)
741 			ctx_id = report32[2] = INVALID_CTX_ID;
742 
743 		/*
744 		 * NB: For Gen 8 the OA unit no longer supports clock gating
745 		 * off for a specific context and the kernel can't securely
746 		 * stop the counters from updating as system-wide / global
747 		 * values.
748 		 *
749 		 * Automatic reports now include a context ID so reports can be
750 		 * filtered on the cpu but it's not worth trying to
751 		 * automatically subtract/hide counter progress for other
752 		 * contexts while filtering since we can't stop userspace
753 		 * issuing MI_REPORT_PERF_COUNT commands which would still
754 		 * provide a side-band view of the real values.
755 		 *
756 		 * To allow userspace (such as Mesa/GL_INTEL_performance_query)
757 		 * to normalize counters for a single filtered context then it
758 		 * needs be forwarded bookend context-switch reports so that it
759 		 * can track switches in between MI_REPORT_PERF_COUNT commands
760 		 * and can itself subtract/ignore the progress of counters
761 		 * associated with other contexts. Note that the hardware
762 		 * automatically triggers reports when switching to a new
763 		 * context which are tagged with the ID of the newly active
764 		 * context. To avoid the complexity (and likely fragility) of
765 		 * reading ahead while parsing reports to try and minimize
766 		 * forwarding redundant context switch reports (i.e. between
767 		 * other, unrelated contexts) we simply elect to forward them
768 		 * all.
769 		 *
770 		 * We don't rely solely on the reason field to identify context
771 		 * switches since it's not-uncommon for periodic samples to
772 		 * identify a switch before any 'context switch' report.
773 		 */
774 		if (!stream->perf->exclusive_stream->ctx ||
775 		    stream->specific_ctx_id == ctx_id ||
776 		    stream->oa_buffer.last_ctx_id == stream->specific_ctx_id ||
777 		    reason & OAREPORT_REASON_CTX_SWITCH) {
778 
779 			/*
780 			 * While filtering for a single context we avoid
781 			 * leaking the IDs of other contexts.
782 			 */
783 			if (stream->perf->exclusive_stream->ctx &&
784 			    stream->specific_ctx_id != ctx_id) {
785 				report32[2] = INVALID_CTX_ID;
786 			}
787 
788 			ret = append_oa_sample(stream, buf, count, offset,
789 					       report);
790 			if (ret)
791 				break;
792 
793 			stream->oa_buffer.last_ctx_id = ctx_id;
794 		}
795 
796 		/*
797 		 * Clear out the first 2 dword as a mean to detect unlanded
798 		 * reports.
799 		 */
800 		report32[0] = 0;
801 		report32[1] = 0;
802 	}
803 
804 	if (start_offset != *offset) {
805 		i915_reg_t oaheadptr;
806 
807 		oaheadptr = GRAPHICS_VER(stream->perf->i915) == 12 ?
808 			    GEN12_OAG_OAHEADPTR : GEN8_OAHEADPTR;
809 
810 		spin_lock_irqsave(&stream->oa_buffer.ptr_lock, flags);
811 
812 		/*
813 		 * We removed the gtt_offset for the copy loop above, indexing
814 		 * relative to oa_buf_base so put back here...
815 		 */
816 		head += gtt_offset;
817 		intel_uncore_write(uncore, oaheadptr,
818 				   head & GEN12_OAG_OAHEADPTR_MASK);
819 		stream->oa_buffer.head = head;
820 
821 		spin_unlock_irqrestore(&stream->oa_buffer.ptr_lock, flags);
822 	}
823 
824 	return ret;
825 }
826 
827 /**
828  * gen8_oa_read - copy status records then buffered OA reports
829  * @stream: An i915-perf stream opened for OA metrics
830  * @buf: destination buffer given by userspace
831  * @count: the number of bytes userspace wants to read
832  * @offset: (inout): the current position for writing into @buf
833  *
834  * Checks OA unit status registers and if necessary appends corresponding
835  * status records for userspace (such as for a buffer full condition) and then
836  * initiate appending any buffered OA reports.
837  *
838  * Updates @offset according to the number of bytes successfully copied into
839  * the userspace buffer.
840  *
841  * NB: some data may be successfully copied to the userspace buffer
842  * even if an error is returned, and this is reflected in the
843  * updated @offset.
844  *
845  * Returns: zero on success or a negative error code
846  */
847 static int gen8_oa_read(struct i915_perf_stream *stream,
848 			char __user *buf,
849 			size_t count,
850 			size_t *offset)
851 {
852 	struct intel_uncore *uncore = stream->uncore;
853 	u32 oastatus;
854 	i915_reg_t oastatus_reg;
855 	int ret;
856 
857 	if (drm_WARN_ON(&uncore->i915->drm, !stream->oa_buffer.vaddr))
858 		return -EIO;
859 
860 	oastatus_reg = GRAPHICS_VER(stream->perf->i915) == 12 ?
861 		       GEN12_OAG_OASTATUS : GEN8_OASTATUS;
862 
863 	oastatus = intel_uncore_read(uncore, oastatus_reg);
864 
865 	/*
866 	 * We treat OABUFFER_OVERFLOW as a significant error:
867 	 *
868 	 * Although theoretically we could handle this more gracefully
869 	 * sometimes, some Gens don't correctly suppress certain
870 	 * automatically triggered reports in this condition and so we
871 	 * have to assume that old reports are now being trampled
872 	 * over.
873 	 *
874 	 * Considering how we don't currently give userspace control
875 	 * over the OA buffer size and always configure a large 16MB
876 	 * buffer, then a buffer overflow does anyway likely indicate
877 	 * that something has gone quite badly wrong.
878 	 */
879 	if (oastatus & GEN8_OASTATUS_OABUFFER_OVERFLOW) {
880 		ret = append_oa_status(stream, buf, count, offset,
881 				       DRM_I915_PERF_RECORD_OA_BUFFER_LOST);
882 		if (ret)
883 			return ret;
884 
885 		DRM_DEBUG("OA buffer overflow (exponent = %d): force restart\n",
886 			  stream->period_exponent);
887 
888 		stream->perf->ops.oa_disable(stream);
889 		stream->perf->ops.oa_enable(stream);
890 
891 		/*
892 		 * Note: .oa_enable() is expected to re-init the oabuffer and
893 		 * reset GEN8_OASTATUS for us
894 		 */
895 		oastatus = intel_uncore_read(uncore, oastatus_reg);
896 	}
897 
898 	if (oastatus & GEN8_OASTATUS_REPORT_LOST) {
899 		ret = append_oa_status(stream, buf, count, offset,
900 				       DRM_I915_PERF_RECORD_OA_REPORT_LOST);
901 		if (ret)
902 			return ret;
903 
904 		intel_uncore_rmw(uncore, oastatus_reg,
905 				 GEN8_OASTATUS_COUNTER_OVERFLOW |
906 				 GEN8_OASTATUS_REPORT_LOST,
907 				 IS_GRAPHICS_VER(uncore->i915, 8, 11) ?
908 				 (GEN8_OASTATUS_HEAD_POINTER_WRAP |
909 				  GEN8_OASTATUS_TAIL_POINTER_WRAP) : 0);
910 	}
911 
912 	return gen8_append_oa_reports(stream, buf, count, offset);
913 }
914 
915 /**
916  * gen7_append_oa_reports - Copies all buffered OA reports into
917  *			    userspace read() buffer.
918  * @stream: An i915-perf stream opened for OA metrics
919  * @buf: destination buffer given by userspace
920  * @count: the number of bytes userspace wants to read
921  * @offset: (inout): the current position for writing into @buf
922  *
923  * Notably any error condition resulting in a short read (-%ENOSPC or
924  * -%EFAULT) will be returned even though one or more records may
925  * have been successfully copied. In this case it's up to the caller
926  * to decide if the error should be squashed before returning to
927  * userspace.
928  *
929  * Note: reports are consumed from the head, and appended to the
930  * tail, so the tail chases the head?... If you think that's mad
931  * and back-to-front you're not alone, but this follows the
932  * Gen PRM naming convention.
933  *
934  * Returns: 0 on success, negative error code on failure.
935  */
936 static int gen7_append_oa_reports(struct i915_perf_stream *stream,
937 				  char __user *buf,
938 				  size_t count,
939 				  size_t *offset)
940 {
941 	struct intel_uncore *uncore = stream->uncore;
942 	int report_size = stream->oa_buffer.format_size;
943 	u8 *oa_buf_base = stream->oa_buffer.vaddr;
944 	u32 gtt_offset = i915_ggtt_offset(stream->oa_buffer.vma);
945 	u32 mask = (OA_BUFFER_SIZE - 1);
946 	size_t start_offset = *offset;
947 	unsigned long flags;
948 	u32 head, tail;
949 	u32 taken;
950 	int ret = 0;
951 
952 	if (drm_WARN_ON(&uncore->i915->drm, !stream->enabled))
953 		return -EIO;
954 
955 	spin_lock_irqsave(&stream->oa_buffer.ptr_lock, flags);
956 
957 	head = stream->oa_buffer.head;
958 	tail = stream->oa_buffer.tail;
959 
960 	spin_unlock_irqrestore(&stream->oa_buffer.ptr_lock, flags);
961 
962 	/* NB: oa_buffer.head/tail include the gtt_offset which we don't want
963 	 * while indexing relative to oa_buf_base.
964 	 */
965 	head -= gtt_offset;
966 	tail -= gtt_offset;
967 
968 	/* An out of bounds or misaligned head or tail pointer implies a driver
969 	 * bug since we validate + align the tail pointers we read from the
970 	 * hardware and we are in full control of the head pointer which should
971 	 * only be incremented by multiples of the report size (notably also
972 	 * all a power of two).
973 	 */
974 	if (drm_WARN_ONCE(&uncore->i915->drm,
975 			  head > OA_BUFFER_SIZE || head % report_size ||
976 			  tail > OA_BUFFER_SIZE || tail % report_size,
977 			  "Inconsistent OA buffer pointers: head = %u, tail = %u\n",
978 			  head, tail))
979 		return -EIO;
980 
981 
982 	for (/* none */;
983 	     (taken = OA_TAKEN(tail, head));
984 	     head = (head + report_size) & mask) {
985 		u8 *report = oa_buf_base + head;
986 		u32 *report32 = (void *)report;
987 
988 		/* All the report sizes factor neatly into the buffer
989 		 * size so we never expect to see a report split
990 		 * between the beginning and end of the buffer.
991 		 *
992 		 * Given the initial alignment check a misalignment
993 		 * here would imply a driver bug that would result
994 		 * in an overrun.
995 		 */
996 		if (drm_WARN_ON(&uncore->i915->drm,
997 				(OA_BUFFER_SIZE - head) < report_size)) {
998 			drm_err(&uncore->i915->drm,
999 				"Spurious OA head ptr: non-integral report offset\n");
1000 			break;
1001 		}
1002 
1003 		/* The report-ID field for periodic samples includes
1004 		 * some undocumented flags related to what triggered
1005 		 * the report and is never expected to be zero so we
1006 		 * can check that the report isn't invalid before
1007 		 * copying it to userspace...
1008 		 */
1009 		if (report32[0] == 0) {
1010 			if (__ratelimit(&stream->perf->spurious_report_rs))
1011 				DRM_NOTE("Skipping spurious, invalid OA report\n");
1012 			continue;
1013 		}
1014 
1015 		ret = append_oa_sample(stream, buf, count, offset, report);
1016 		if (ret)
1017 			break;
1018 
1019 		/* Clear out the first 2 dwords as a mean to detect unlanded
1020 		 * reports.
1021 		 */
1022 		report32[0] = 0;
1023 		report32[1] = 0;
1024 	}
1025 
1026 	if (start_offset != *offset) {
1027 		spin_lock_irqsave(&stream->oa_buffer.ptr_lock, flags);
1028 
1029 		/* We removed the gtt_offset for the copy loop above, indexing
1030 		 * relative to oa_buf_base so put back here...
1031 		 */
1032 		head += gtt_offset;
1033 
1034 		intel_uncore_write(uncore, GEN7_OASTATUS2,
1035 				   (head & GEN7_OASTATUS2_HEAD_MASK) |
1036 				   GEN7_OASTATUS2_MEM_SELECT_GGTT);
1037 		stream->oa_buffer.head = head;
1038 
1039 		spin_unlock_irqrestore(&stream->oa_buffer.ptr_lock, flags);
1040 	}
1041 
1042 	return ret;
1043 }
1044 
1045 /**
1046  * gen7_oa_read - copy status records then buffered OA reports
1047  * @stream: An i915-perf stream opened for OA metrics
1048  * @buf: destination buffer given by userspace
1049  * @count: the number of bytes userspace wants to read
1050  * @offset: (inout): the current position for writing into @buf
1051  *
1052  * Checks Gen 7 specific OA unit status registers and if necessary appends
1053  * corresponding status records for userspace (such as for a buffer full
1054  * condition) and then initiate appending any buffered OA reports.
1055  *
1056  * Updates @offset according to the number of bytes successfully copied into
1057  * the userspace buffer.
1058  *
1059  * Returns: zero on success or a negative error code
1060  */
1061 static int gen7_oa_read(struct i915_perf_stream *stream,
1062 			char __user *buf,
1063 			size_t count,
1064 			size_t *offset)
1065 {
1066 	struct intel_uncore *uncore = stream->uncore;
1067 	u32 oastatus1;
1068 	int ret;
1069 
1070 	if (drm_WARN_ON(&uncore->i915->drm, !stream->oa_buffer.vaddr))
1071 		return -EIO;
1072 
1073 	oastatus1 = intel_uncore_read(uncore, GEN7_OASTATUS1);
1074 
1075 	/* XXX: On Haswell we don't have a safe way to clear oastatus1
1076 	 * bits while the OA unit is enabled (while the tail pointer
1077 	 * may be updated asynchronously) so we ignore status bits
1078 	 * that have already been reported to userspace.
1079 	 */
1080 	oastatus1 &= ~stream->perf->gen7_latched_oastatus1;
1081 
1082 	/* We treat OABUFFER_OVERFLOW as a significant error:
1083 	 *
1084 	 * - The status can be interpreted to mean that the buffer is
1085 	 *   currently full (with a higher precedence than OA_TAKEN()
1086 	 *   which will start to report a near-empty buffer after an
1087 	 *   overflow) but it's awkward that we can't clear the status
1088 	 *   on Haswell, so without a reset we won't be able to catch
1089 	 *   the state again.
1090 	 *
1091 	 * - Since it also implies the HW has started overwriting old
1092 	 *   reports it may also affect our sanity checks for invalid
1093 	 *   reports when copying to userspace that assume new reports
1094 	 *   are being written to cleared memory.
1095 	 *
1096 	 * - In the future we may want to introduce a flight recorder
1097 	 *   mode where the driver will automatically maintain a safe
1098 	 *   guard band between head/tail, avoiding this overflow
1099 	 *   condition, but we avoid the added driver complexity for
1100 	 *   now.
1101 	 */
1102 	if (unlikely(oastatus1 & GEN7_OASTATUS1_OABUFFER_OVERFLOW)) {
1103 		ret = append_oa_status(stream, buf, count, offset,
1104 				       DRM_I915_PERF_RECORD_OA_BUFFER_LOST);
1105 		if (ret)
1106 			return ret;
1107 
1108 		DRM_DEBUG("OA buffer overflow (exponent = %d): force restart\n",
1109 			  stream->period_exponent);
1110 
1111 		stream->perf->ops.oa_disable(stream);
1112 		stream->perf->ops.oa_enable(stream);
1113 
1114 		oastatus1 = intel_uncore_read(uncore, GEN7_OASTATUS1);
1115 	}
1116 
1117 	if (unlikely(oastatus1 & GEN7_OASTATUS1_REPORT_LOST)) {
1118 		ret = append_oa_status(stream, buf, count, offset,
1119 				       DRM_I915_PERF_RECORD_OA_REPORT_LOST);
1120 		if (ret)
1121 			return ret;
1122 		stream->perf->gen7_latched_oastatus1 |=
1123 			GEN7_OASTATUS1_REPORT_LOST;
1124 	}
1125 
1126 	return gen7_append_oa_reports(stream, buf, count, offset);
1127 }
1128 
1129 /**
1130  * i915_oa_wait_unlocked - handles blocking IO until OA data available
1131  * @stream: An i915-perf stream opened for OA metrics
1132  *
1133  * Called when userspace tries to read() from a blocking stream FD opened
1134  * for OA metrics. It waits until the hrtimer callback finds a non-empty
1135  * OA buffer and wakes us.
1136  *
1137  * Note: it's acceptable to have this return with some false positives
1138  * since any subsequent read handling will return -EAGAIN if there isn't
1139  * really data ready for userspace yet.
1140  *
1141  * Returns: zero on success or a negative error code
1142  */
1143 static int i915_oa_wait_unlocked(struct i915_perf_stream *stream)
1144 {
1145 	/* We would wait indefinitely if periodic sampling is not enabled */
1146 	if (!stream->periodic)
1147 		return -EIO;
1148 
1149 	return wait_event_interruptible(stream->poll_wq,
1150 					oa_buffer_check_unlocked(stream));
1151 }
1152 
1153 /**
1154  * i915_oa_poll_wait - call poll_wait() for an OA stream poll()
1155  * @stream: An i915-perf stream opened for OA metrics
1156  * @file: An i915 perf stream file
1157  * @wait: poll() state table
1158  *
1159  * For handling userspace polling on an i915 perf stream opened for OA metrics,
1160  * this starts a poll_wait with the wait queue that our hrtimer callback wakes
1161  * when it sees data ready to read in the circular OA buffer.
1162  */
1163 static void i915_oa_poll_wait(struct i915_perf_stream *stream,
1164 			      struct file *file,
1165 			      poll_table *wait)
1166 {
1167 	poll_wait(file, &stream->poll_wq, wait);
1168 }
1169 
1170 /**
1171  * i915_oa_read - just calls through to &i915_oa_ops->read
1172  * @stream: An i915-perf stream opened for OA metrics
1173  * @buf: destination buffer given by userspace
1174  * @count: the number of bytes userspace wants to read
1175  * @offset: (inout): the current position for writing into @buf
1176  *
1177  * Updates @offset according to the number of bytes successfully copied into
1178  * the userspace buffer.
1179  *
1180  * Returns: zero on success or a negative error code
1181  */
1182 static int i915_oa_read(struct i915_perf_stream *stream,
1183 			char __user *buf,
1184 			size_t count,
1185 			size_t *offset)
1186 {
1187 	return stream->perf->ops.read(stream, buf, count, offset);
1188 }
1189 
1190 static struct intel_context *oa_pin_context(struct i915_perf_stream *stream)
1191 {
1192 	struct i915_gem_engines_iter it;
1193 	struct i915_gem_context *ctx = stream->ctx;
1194 	struct intel_context *ce;
1195 	struct i915_gem_ww_ctx ww;
1196 	int err = -ENODEV;
1197 
1198 	for_each_gem_engine(ce, i915_gem_context_lock_engines(ctx), it) {
1199 		if (ce->engine != stream->engine) /* first match! */
1200 			continue;
1201 
1202 		err = 0;
1203 		break;
1204 	}
1205 	i915_gem_context_unlock_engines(ctx);
1206 
1207 	if (err)
1208 		return ERR_PTR(err);
1209 
1210 	i915_gem_ww_ctx_init(&ww, true);
1211 retry:
1212 	/*
1213 	 * As the ID is the gtt offset of the context's vma we
1214 	 * pin the vma to ensure the ID remains fixed.
1215 	 */
1216 	err = intel_context_pin_ww(ce, &ww);
1217 	if (err == -EDEADLK) {
1218 		err = i915_gem_ww_ctx_backoff(&ww);
1219 		if (!err)
1220 			goto retry;
1221 	}
1222 	i915_gem_ww_ctx_fini(&ww);
1223 
1224 	if (err)
1225 		return ERR_PTR(err);
1226 
1227 	stream->pinned_ctx = ce;
1228 	return stream->pinned_ctx;
1229 }
1230 
1231 /**
1232  * oa_get_render_ctx_id - determine and hold ctx hw id
1233  * @stream: An i915-perf stream opened for OA metrics
1234  *
1235  * Determine the render context hw id, and ensure it remains fixed for the
1236  * lifetime of the stream. This ensures that we don't have to worry about
1237  * updating the context ID in OACONTROL on the fly.
1238  *
1239  * Returns: zero on success or a negative error code
1240  */
1241 static int oa_get_render_ctx_id(struct i915_perf_stream *stream)
1242 {
1243 	struct intel_context *ce;
1244 
1245 	ce = oa_pin_context(stream);
1246 	if (IS_ERR(ce))
1247 		return PTR_ERR(ce);
1248 
1249 	switch (GRAPHICS_VER(ce->engine->i915)) {
1250 	case 7: {
1251 		/*
1252 		 * On Haswell we don't do any post processing of the reports
1253 		 * and don't need to use the mask.
1254 		 */
1255 		stream->specific_ctx_id = i915_ggtt_offset(ce->state);
1256 		stream->specific_ctx_id_mask = 0;
1257 		break;
1258 	}
1259 
1260 	case 8:
1261 	case 9:
1262 		if (intel_engine_uses_guc(ce->engine)) {
1263 			/*
1264 			 * When using GuC, the context descriptor we write in
1265 			 * i915 is read by GuC and rewritten before it's
1266 			 * actually written into the hardware. The LRCA is
1267 			 * what is put into the context id field of the
1268 			 * context descriptor by GuC. Because it's aligned to
1269 			 * a page, the lower 12bits are always at 0 and
1270 			 * dropped by GuC. They won't be part of the context
1271 			 * ID in the OA reports, so squash those lower bits.
1272 			 */
1273 			stream->specific_ctx_id = ce->lrc.lrca >> 12;
1274 
1275 			/*
1276 			 * GuC uses the top bit to signal proxy submission, so
1277 			 * ignore that bit.
1278 			 */
1279 			stream->specific_ctx_id_mask =
1280 				(1U << (GEN8_CTX_ID_WIDTH - 1)) - 1;
1281 		} else {
1282 			stream->specific_ctx_id_mask =
1283 				(1U << GEN8_CTX_ID_WIDTH) - 1;
1284 			stream->specific_ctx_id = stream->specific_ctx_id_mask;
1285 		}
1286 		break;
1287 
1288 	case 11:
1289 	case 12:
1290 		if (GRAPHICS_VER_FULL(ce->engine->i915) >= IP_VER(12, 50)) {
1291 			stream->specific_ctx_id_mask =
1292 				((1U << XEHP_SW_CTX_ID_WIDTH) - 1) <<
1293 				(XEHP_SW_CTX_ID_SHIFT - 32);
1294 			stream->specific_ctx_id =
1295 				(XEHP_MAX_CONTEXT_HW_ID - 1) <<
1296 				(XEHP_SW_CTX_ID_SHIFT - 32);
1297 		} else {
1298 			stream->specific_ctx_id_mask =
1299 				((1U << GEN11_SW_CTX_ID_WIDTH) - 1) << (GEN11_SW_CTX_ID_SHIFT - 32);
1300 			/*
1301 			 * Pick an unused context id
1302 			 * 0 - BITS_PER_LONG are used by other contexts
1303 			 * GEN12_MAX_CONTEXT_HW_ID (0x7ff) is used by idle context
1304 			 */
1305 			stream->specific_ctx_id =
1306 				(GEN12_MAX_CONTEXT_HW_ID - 1) << (GEN11_SW_CTX_ID_SHIFT - 32);
1307 		}
1308 		break;
1309 
1310 	default:
1311 		MISSING_CASE(GRAPHICS_VER(ce->engine->i915));
1312 	}
1313 
1314 	ce->tag = stream->specific_ctx_id;
1315 
1316 	drm_dbg(&stream->perf->i915->drm,
1317 		"filtering on ctx_id=0x%x ctx_id_mask=0x%x\n",
1318 		stream->specific_ctx_id,
1319 		stream->specific_ctx_id_mask);
1320 
1321 	return 0;
1322 }
1323 
1324 /**
1325  * oa_put_render_ctx_id - counterpart to oa_get_render_ctx_id releases hold
1326  * @stream: An i915-perf stream opened for OA metrics
1327  *
1328  * In case anything needed doing to ensure the context HW ID would remain valid
1329  * for the lifetime of the stream, then that can be undone here.
1330  */
1331 static void oa_put_render_ctx_id(struct i915_perf_stream *stream)
1332 {
1333 	struct intel_context *ce;
1334 
1335 	ce = fetch_and_zero(&stream->pinned_ctx);
1336 	if (ce) {
1337 		ce->tag = 0; /* recomputed on next submission after parking */
1338 		intel_context_unpin(ce);
1339 	}
1340 
1341 	stream->specific_ctx_id = INVALID_CTX_ID;
1342 	stream->specific_ctx_id_mask = 0;
1343 }
1344 
1345 static void
1346 free_oa_buffer(struct i915_perf_stream *stream)
1347 {
1348 	i915_vma_unpin_and_release(&stream->oa_buffer.vma,
1349 				   I915_VMA_RELEASE_MAP);
1350 
1351 	stream->oa_buffer.vaddr = NULL;
1352 }
1353 
1354 static void
1355 free_oa_configs(struct i915_perf_stream *stream)
1356 {
1357 	struct i915_oa_config_bo *oa_bo, *tmp;
1358 
1359 	i915_oa_config_put(stream->oa_config);
1360 	llist_for_each_entry_safe(oa_bo, tmp, stream->oa_config_bos.first, node)
1361 		free_oa_config_bo(oa_bo);
1362 }
1363 
1364 static void
1365 free_noa_wait(struct i915_perf_stream *stream)
1366 {
1367 	i915_vma_unpin_and_release(&stream->noa_wait, 0);
1368 }
1369 
1370 static void i915_oa_stream_destroy(struct i915_perf_stream *stream)
1371 {
1372 	struct i915_perf *perf = stream->perf;
1373 
1374 	BUG_ON(stream != perf->exclusive_stream);
1375 
1376 	/*
1377 	 * Unset exclusive_stream first, it will be checked while disabling
1378 	 * the metric set on gen8+.
1379 	 *
1380 	 * See i915_oa_init_reg_state() and lrc_configure_all_contexts()
1381 	 */
1382 	WRITE_ONCE(perf->exclusive_stream, NULL);
1383 	perf->ops.disable_metric_set(stream);
1384 
1385 	free_oa_buffer(stream);
1386 
1387 	intel_uncore_forcewake_put(stream->uncore, FORCEWAKE_ALL);
1388 	intel_engine_pm_put(stream->engine);
1389 
1390 	if (stream->ctx)
1391 		oa_put_render_ctx_id(stream);
1392 
1393 	free_oa_configs(stream);
1394 	free_noa_wait(stream);
1395 
1396 	if (perf->spurious_report_rs.missed) {
1397 		DRM_NOTE("%d spurious OA report notices suppressed due to ratelimiting\n",
1398 			 perf->spurious_report_rs.missed);
1399 	}
1400 }
1401 
1402 static void gen7_init_oa_buffer(struct i915_perf_stream *stream)
1403 {
1404 	struct intel_uncore *uncore = stream->uncore;
1405 	u32 gtt_offset = i915_ggtt_offset(stream->oa_buffer.vma);
1406 	unsigned long flags;
1407 
1408 	spin_lock_irqsave(&stream->oa_buffer.ptr_lock, flags);
1409 
1410 	/* Pre-DevBDW: OABUFFER must be set with counters off,
1411 	 * before OASTATUS1, but after OASTATUS2
1412 	 */
1413 	intel_uncore_write(uncore, GEN7_OASTATUS2, /* head */
1414 			   gtt_offset | GEN7_OASTATUS2_MEM_SELECT_GGTT);
1415 	stream->oa_buffer.head = gtt_offset;
1416 
1417 	intel_uncore_write(uncore, GEN7_OABUFFER, gtt_offset);
1418 
1419 	intel_uncore_write(uncore, GEN7_OASTATUS1, /* tail */
1420 			   gtt_offset | OABUFFER_SIZE_16M);
1421 
1422 	/* Mark that we need updated tail pointers to read from... */
1423 	stream->oa_buffer.aging_tail = INVALID_TAIL_PTR;
1424 	stream->oa_buffer.tail = gtt_offset;
1425 
1426 	spin_unlock_irqrestore(&stream->oa_buffer.ptr_lock, flags);
1427 
1428 	/* On Haswell we have to track which OASTATUS1 flags we've
1429 	 * already seen since they can't be cleared while periodic
1430 	 * sampling is enabled.
1431 	 */
1432 	stream->perf->gen7_latched_oastatus1 = 0;
1433 
1434 	/* NB: although the OA buffer will initially be allocated
1435 	 * zeroed via shmfs (and so this memset is redundant when
1436 	 * first allocating), we may re-init the OA buffer, either
1437 	 * when re-enabling a stream or in error/reset paths.
1438 	 *
1439 	 * The reason we clear the buffer for each re-init is for the
1440 	 * sanity check in gen7_append_oa_reports() that looks at the
1441 	 * report-id field to make sure it's non-zero which relies on
1442 	 * the assumption that new reports are being written to zeroed
1443 	 * memory...
1444 	 */
1445 	memset(stream->oa_buffer.vaddr, 0, OA_BUFFER_SIZE);
1446 }
1447 
1448 static void gen8_init_oa_buffer(struct i915_perf_stream *stream)
1449 {
1450 	struct intel_uncore *uncore = stream->uncore;
1451 	u32 gtt_offset = i915_ggtt_offset(stream->oa_buffer.vma);
1452 	unsigned long flags;
1453 
1454 	spin_lock_irqsave(&stream->oa_buffer.ptr_lock, flags);
1455 
1456 	intel_uncore_write(uncore, GEN8_OASTATUS, 0);
1457 	intel_uncore_write(uncore, GEN8_OAHEADPTR, gtt_offset);
1458 	stream->oa_buffer.head = gtt_offset;
1459 
1460 	intel_uncore_write(uncore, GEN8_OABUFFER_UDW, 0);
1461 
1462 	/*
1463 	 * PRM says:
1464 	 *
1465 	 *  "This MMIO must be set before the OATAILPTR
1466 	 *  register and after the OAHEADPTR register. This is
1467 	 *  to enable proper functionality of the overflow
1468 	 *  bit."
1469 	 */
1470 	intel_uncore_write(uncore, GEN8_OABUFFER, gtt_offset |
1471 		   OABUFFER_SIZE_16M | GEN8_OABUFFER_MEM_SELECT_GGTT);
1472 	intel_uncore_write(uncore, GEN8_OATAILPTR, gtt_offset & GEN8_OATAILPTR_MASK);
1473 
1474 	/* Mark that we need updated tail pointers to read from... */
1475 	stream->oa_buffer.aging_tail = INVALID_TAIL_PTR;
1476 	stream->oa_buffer.tail = gtt_offset;
1477 
1478 	/*
1479 	 * Reset state used to recognise context switches, affecting which
1480 	 * reports we will forward to userspace while filtering for a single
1481 	 * context.
1482 	 */
1483 	stream->oa_buffer.last_ctx_id = INVALID_CTX_ID;
1484 
1485 	spin_unlock_irqrestore(&stream->oa_buffer.ptr_lock, flags);
1486 
1487 	/*
1488 	 * NB: although the OA buffer will initially be allocated
1489 	 * zeroed via shmfs (and so this memset is redundant when
1490 	 * first allocating), we may re-init the OA buffer, either
1491 	 * when re-enabling a stream or in error/reset paths.
1492 	 *
1493 	 * The reason we clear the buffer for each re-init is for the
1494 	 * sanity check in gen8_append_oa_reports() that looks at the
1495 	 * reason field to make sure it's non-zero which relies on
1496 	 * the assumption that new reports are being written to zeroed
1497 	 * memory...
1498 	 */
1499 	memset(stream->oa_buffer.vaddr, 0, OA_BUFFER_SIZE);
1500 }
1501 
1502 static void gen12_init_oa_buffer(struct i915_perf_stream *stream)
1503 {
1504 	struct intel_uncore *uncore = stream->uncore;
1505 	u32 gtt_offset = i915_ggtt_offset(stream->oa_buffer.vma);
1506 	unsigned long flags;
1507 
1508 	spin_lock_irqsave(&stream->oa_buffer.ptr_lock, flags);
1509 
1510 	intel_uncore_write(uncore, GEN12_OAG_OASTATUS, 0);
1511 	intel_uncore_write(uncore, GEN12_OAG_OAHEADPTR,
1512 			   gtt_offset & GEN12_OAG_OAHEADPTR_MASK);
1513 	stream->oa_buffer.head = gtt_offset;
1514 
1515 	/*
1516 	 * PRM says:
1517 	 *
1518 	 *  "This MMIO must be set before the OATAILPTR
1519 	 *  register and after the OAHEADPTR register. This is
1520 	 *  to enable proper functionality of the overflow
1521 	 *  bit."
1522 	 */
1523 	intel_uncore_write(uncore, GEN12_OAG_OABUFFER, gtt_offset |
1524 			   OABUFFER_SIZE_16M | GEN8_OABUFFER_MEM_SELECT_GGTT);
1525 	intel_uncore_write(uncore, GEN12_OAG_OATAILPTR,
1526 			   gtt_offset & GEN12_OAG_OATAILPTR_MASK);
1527 
1528 	/* Mark that we need updated tail pointers to read from... */
1529 	stream->oa_buffer.aging_tail = INVALID_TAIL_PTR;
1530 	stream->oa_buffer.tail = gtt_offset;
1531 
1532 	/*
1533 	 * Reset state used to recognise context switches, affecting which
1534 	 * reports we will forward to userspace while filtering for a single
1535 	 * context.
1536 	 */
1537 	stream->oa_buffer.last_ctx_id = INVALID_CTX_ID;
1538 
1539 	spin_unlock_irqrestore(&stream->oa_buffer.ptr_lock, flags);
1540 
1541 	/*
1542 	 * NB: although the OA buffer will initially be allocated
1543 	 * zeroed via shmfs (and so this memset is redundant when
1544 	 * first allocating), we may re-init the OA buffer, either
1545 	 * when re-enabling a stream or in error/reset paths.
1546 	 *
1547 	 * The reason we clear the buffer for each re-init is for the
1548 	 * sanity check in gen8_append_oa_reports() that looks at the
1549 	 * reason field to make sure it's non-zero which relies on
1550 	 * the assumption that new reports are being written to zeroed
1551 	 * memory...
1552 	 */
1553 	memset(stream->oa_buffer.vaddr, 0,
1554 	       stream->oa_buffer.vma->size);
1555 }
1556 
1557 static int alloc_oa_buffer(struct i915_perf_stream *stream)
1558 {
1559 	struct drm_i915_private *i915 = stream->perf->i915;
1560 	struct drm_i915_gem_object *bo;
1561 	struct i915_vma *vma;
1562 	int ret;
1563 
1564 	if (drm_WARN_ON(&i915->drm, stream->oa_buffer.vma))
1565 		return -ENODEV;
1566 
1567 	BUILD_BUG_ON_NOT_POWER_OF_2(OA_BUFFER_SIZE);
1568 	BUILD_BUG_ON(OA_BUFFER_SIZE < SZ_128K || OA_BUFFER_SIZE > SZ_16M);
1569 
1570 	bo = i915_gem_object_create_shmem(stream->perf->i915, OA_BUFFER_SIZE);
1571 	if (IS_ERR(bo)) {
1572 		drm_err(&i915->drm, "Failed to allocate OA buffer\n");
1573 		return PTR_ERR(bo);
1574 	}
1575 
1576 	i915_gem_object_set_cache_coherency(bo, I915_CACHE_LLC);
1577 
1578 	/* PreHSW required 512K alignment, HSW requires 16M */
1579 	vma = i915_gem_object_ggtt_pin(bo, NULL, 0, SZ_16M, 0);
1580 	if (IS_ERR(vma)) {
1581 		ret = PTR_ERR(vma);
1582 		goto err_unref;
1583 	}
1584 	stream->oa_buffer.vma = vma;
1585 
1586 	stream->oa_buffer.vaddr =
1587 		i915_gem_object_pin_map_unlocked(bo, I915_MAP_WB);
1588 	if (IS_ERR(stream->oa_buffer.vaddr)) {
1589 		ret = PTR_ERR(stream->oa_buffer.vaddr);
1590 		goto err_unpin;
1591 	}
1592 
1593 	return 0;
1594 
1595 err_unpin:
1596 	__i915_vma_unpin(vma);
1597 
1598 err_unref:
1599 	i915_gem_object_put(bo);
1600 
1601 	stream->oa_buffer.vaddr = NULL;
1602 	stream->oa_buffer.vma = NULL;
1603 
1604 	return ret;
1605 }
1606 
1607 static u32 *save_restore_register(struct i915_perf_stream *stream, u32 *cs,
1608 				  bool save, i915_reg_t reg, u32 offset,
1609 				  u32 dword_count)
1610 {
1611 	u32 cmd;
1612 	u32 d;
1613 
1614 	cmd = save ? MI_STORE_REGISTER_MEM : MI_LOAD_REGISTER_MEM;
1615 	cmd |= MI_SRM_LRM_GLOBAL_GTT;
1616 	if (GRAPHICS_VER(stream->perf->i915) >= 8)
1617 		cmd++;
1618 
1619 	for (d = 0; d < dword_count; d++) {
1620 		*cs++ = cmd;
1621 		*cs++ = i915_mmio_reg_offset(reg) + 4 * d;
1622 		*cs++ = intel_gt_scratch_offset(stream->engine->gt,
1623 						offset) + 4 * d;
1624 		*cs++ = 0;
1625 	}
1626 
1627 	return cs;
1628 }
1629 
1630 static int alloc_noa_wait(struct i915_perf_stream *stream)
1631 {
1632 	struct drm_i915_private *i915 = stream->perf->i915;
1633 	struct drm_i915_gem_object *bo;
1634 	struct i915_vma *vma;
1635 	const u64 delay_ticks = 0xffffffffffffffff -
1636 		intel_gt_ns_to_clock_interval(stream->perf->i915->ggtt.vm.gt,
1637 					      atomic64_read(&stream->perf->noa_programming_delay));
1638 	const u32 base = stream->engine->mmio_base;
1639 #define CS_GPR(x) GEN8_RING_CS_GPR(base, x)
1640 	u32 *batch, *ts0, *cs, *jump;
1641 	struct i915_gem_ww_ctx ww;
1642 	int ret, i;
1643 	enum {
1644 		START_TS,
1645 		NOW_TS,
1646 		DELTA_TS,
1647 		JUMP_PREDICATE,
1648 		DELTA_TARGET,
1649 		N_CS_GPR
1650 	};
1651 
1652 	bo = i915_gem_object_create_internal(i915, 4096);
1653 	if (IS_ERR(bo)) {
1654 		drm_err(&i915->drm,
1655 			"Failed to allocate NOA wait batchbuffer\n");
1656 		return PTR_ERR(bo);
1657 	}
1658 
1659 	i915_gem_ww_ctx_init(&ww, true);
1660 retry:
1661 	ret = i915_gem_object_lock(bo, &ww);
1662 	if (ret)
1663 		goto out_ww;
1664 
1665 	/*
1666 	 * We pin in GGTT because we jump into this buffer now because
1667 	 * multiple OA config BOs will have a jump to this address and it
1668 	 * needs to be fixed during the lifetime of the i915/perf stream.
1669 	 */
1670 	vma = i915_gem_object_ggtt_pin_ww(bo, &ww, NULL, 0, 0, PIN_HIGH);
1671 	if (IS_ERR(vma)) {
1672 		ret = PTR_ERR(vma);
1673 		goto out_ww;
1674 	}
1675 
1676 	batch = cs = i915_gem_object_pin_map(bo, I915_MAP_WB);
1677 	if (IS_ERR(batch)) {
1678 		ret = PTR_ERR(batch);
1679 		goto err_unpin;
1680 	}
1681 
1682 	/* Save registers. */
1683 	for (i = 0; i < N_CS_GPR; i++)
1684 		cs = save_restore_register(
1685 			stream, cs, true /* save */, CS_GPR(i),
1686 			INTEL_GT_SCRATCH_FIELD_PERF_CS_GPR + 8 * i, 2);
1687 	cs = save_restore_register(
1688 		stream, cs, true /* save */, MI_PREDICATE_RESULT_1(RENDER_RING_BASE),
1689 		INTEL_GT_SCRATCH_FIELD_PERF_PREDICATE_RESULT_1, 1);
1690 
1691 	/* First timestamp snapshot location. */
1692 	ts0 = cs;
1693 
1694 	/*
1695 	 * Initial snapshot of the timestamp register to implement the wait.
1696 	 * We work with 32b values, so clear out the top 32b bits of the
1697 	 * register because the ALU works 64bits.
1698 	 */
1699 	*cs++ = MI_LOAD_REGISTER_IMM(1);
1700 	*cs++ = i915_mmio_reg_offset(CS_GPR(START_TS)) + 4;
1701 	*cs++ = 0;
1702 	*cs++ = MI_LOAD_REGISTER_REG | (3 - 2);
1703 	*cs++ = i915_mmio_reg_offset(RING_TIMESTAMP(base));
1704 	*cs++ = i915_mmio_reg_offset(CS_GPR(START_TS));
1705 
1706 	/*
1707 	 * This is the location we're going to jump back into until the
1708 	 * required amount of time has passed.
1709 	 */
1710 	jump = cs;
1711 
1712 	/*
1713 	 * Take another snapshot of the timestamp register. Take care to clear
1714 	 * up the top 32bits of CS_GPR(1) as we're using it for other
1715 	 * operations below.
1716 	 */
1717 	*cs++ = MI_LOAD_REGISTER_IMM(1);
1718 	*cs++ = i915_mmio_reg_offset(CS_GPR(NOW_TS)) + 4;
1719 	*cs++ = 0;
1720 	*cs++ = MI_LOAD_REGISTER_REG | (3 - 2);
1721 	*cs++ = i915_mmio_reg_offset(RING_TIMESTAMP(base));
1722 	*cs++ = i915_mmio_reg_offset(CS_GPR(NOW_TS));
1723 
1724 	/*
1725 	 * Do a diff between the 2 timestamps and store the result back into
1726 	 * CS_GPR(1).
1727 	 */
1728 	*cs++ = MI_MATH(5);
1729 	*cs++ = MI_MATH_LOAD(MI_MATH_REG_SRCA, MI_MATH_REG(NOW_TS));
1730 	*cs++ = MI_MATH_LOAD(MI_MATH_REG_SRCB, MI_MATH_REG(START_TS));
1731 	*cs++ = MI_MATH_SUB;
1732 	*cs++ = MI_MATH_STORE(MI_MATH_REG(DELTA_TS), MI_MATH_REG_ACCU);
1733 	*cs++ = MI_MATH_STORE(MI_MATH_REG(JUMP_PREDICATE), MI_MATH_REG_CF);
1734 
1735 	/*
1736 	 * Transfer the carry flag (set to 1 if ts1 < ts0, meaning the
1737 	 * timestamp have rolled over the 32bits) into the predicate register
1738 	 * to be used for the predicated jump.
1739 	 */
1740 	*cs++ = MI_LOAD_REGISTER_REG | (3 - 2);
1741 	*cs++ = i915_mmio_reg_offset(CS_GPR(JUMP_PREDICATE));
1742 	*cs++ = i915_mmio_reg_offset(MI_PREDICATE_RESULT_1(RENDER_RING_BASE));
1743 
1744 	/* Restart from the beginning if we had timestamps roll over. */
1745 	*cs++ = (GRAPHICS_VER(i915) < 8 ?
1746 		 MI_BATCH_BUFFER_START :
1747 		 MI_BATCH_BUFFER_START_GEN8) |
1748 		MI_BATCH_PREDICATE;
1749 	*cs++ = i915_ggtt_offset(vma) + (ts0 - batch) * 4;
1750 	*cs++ = 0;
1751 
1752 	/*
1753 	 * Now add the diff between to previous timestamps and add it to :
1754 	 *      (((1 * << 64) - 1) - delay_ns)
1755 	 *
1756 	 * When the Carry Flag contains 1 this means the elapsed time is
1757 	 * longer than the expected delay, and we can exit the wait loop.
1758 	 */
1759 	*cs++ = MI_LOAD_REGISTER_IMM(2);
1760 	*cs++ = i915_mmio_reg_offset(CS_GPR(DELTA_TARGET));
1761 	*cs++ = lower_32_bits(delay_ticks);
1762 	*cs++ = i915_mmio_reg_offset(CS_GPR(DELTA_TARGET)) + 4;
1763 	*cs++ = upper_32_bits(delay_ticks);
1764 
1765 	*cs++ = MI_MATH(4);
1766 	*cs++ = MI_MATH_LOAD(MI_MATH_REG_SRCA, MI_MATH_REG(DELTA_TS));
1767 	*cs++ = MI_MATH_LOAD(MI_MATH_REG_SRCB, MI_MATH_REG(DELTA_TARGET));
1768 	*cs++ = MI_MATH_ADD;
1769 	*cs++ = MI_MATH_STOREINV(MI_MATH_REG(JUMP_PREDICATE), MI_MATH_REG_CF);
1770 
1771 	*cs++ = MI_ARB_CHECK;
1772 
1773 	/*
1774 	 * Transfer the result into the predicate register to be used for the
1775 	 * predicated jump.
1776 	 */
1777 	*cs++ = MI_LOAD_REGISTER_REG | (3 - 2);
1778 	*cs++ = i915_mmio_reg_offset(CS_GPR(JUMP_PREDICATE));
1779 	*cs++ = i915_mmio_reg_offset(MI_PREDICATE_RESULT_1(RENDER_RING_BASE));
1780 
1781 	/* Predicate the jump.  */
1782 	*cs++ = (GRAPHICS_VER(i915) < 8 ?
1783 		 MI_BATCH_BUFFER_START :
1784 		 MI_BATCH_BUFFER_START_GEN8) |
1785 		MI_BATCH_PREDICATE;
1786 	*cs++ = i915_ggtt_offset(vma) + (jump - batch) * 4;
1787 	*cs++ = 0;
1788 
1789 	/* Restore registers. */
1790 	for (i = 0; i < N_CS_GPR; i++)
1791 		cs = save_restore_register(
1792 			stream, cs, false /* restore */, CS_GPR(i),
1793 			INTEL_GT_SCRATCH_FIELD_PERF_CS_GPR + 8 * i, 2);
1794 	cs = save_restore_register(
1795 		stream, cs, false /* restore */, MI_PREDICATE_RESULT_1(RENDER_RING_BASE),
1796 		INTEL_GT_SCRATCH_FIELD_PERF_PREDICATE_RESULT_1, 1);
1797 
1798 	/* And return to the ring. */
1799 	*cs++ = MI_BATCH_BUFFER_END;
1800 
1801 	GEM_BUG_ON(cs - batch > PAGE_SIZE / sizeof(*batch));
1802 
1803 	i915_gem_object_flush_map(bo);
1804 	__i915_gem_object_release_map(bo);
1805 
1806 	stream->noa_wait = vma;
1807 	goto out_ww;
1808 
1809 err_unpin:
1810 	i915_vma_unpin_and_release(&vma, 0);
1811 out_ww:
1812 	if (ret == -EDEADLK) {
1813 		ret = i915_gem_ww_ctx_backoff(&ww);
1814 		if (!ret)
1815 			goto retry;
1816 	}
1817 	i915_gem_ww_ctx_fini(&ww);
1818 	if (ret)
1819 		i915_gem_object_put(bo);
1820 	return ret;
1821 }
1822 
1823 static u32 *write_cs_mi_lri(u32 *cs,
1824 			    const struct i915_oa_reg *reg_data,
1825 			    u32 n_regs)
1826 {
1827 	u32 i;
1828 
1829 	for (i = 0; i < n_regs; i++) {
1830 		if ((i % MI_LOAD_REGISTER_IMM_MAX_REGS) == 0) {
1831 			u32 n_lri = min_t(u32,
1832 					  n_regs - i,
1833 					  MI_LOAD_REGISTER_IMM_MAX_REGS);
1834 
1835 			*cs++ = MI_LOAD_REGISTER_IMM(n_lri);
1836 		}
1837 		*cs++ = i915_mmio_reg_offset(reg_data[i].addr);
1838 		*cs++ = reg_data[i].value;
1839 	}
1840 
1841 	return cs;
1842 }
1843 
1844 static int num_lri_dwords(int num_regs)
1845 {
1846 	int count = 0;
1847 
1848 	if (num_regs > 0) {
1849 		count += DIV_ROUND_UP(num_regs, MI_LOAD_REGISTER_IMM_MAX_REGS);
1850 		count += num_regs * 2;
1851 	}
1852 
1853 	return count;
1854 }
1855 
1856 static struct i915_oa_config_bo *
1857 alloc_oa_config_buffer(struct i915_perf_stream *stream,
1858 		       struct i915_oa_config *oa_config)
1859 {
1860 	struct drm_i915_gem_object *obj;
1861 	struct i915_oa_config_bo *oa_bo;
1862 	struct i915_gem_ww_ctx ww;
1863 	size_t config_length = 0;
1864 	u32 *cs;
1865 	int err;
1866 
1867 	oa_bo = kzalloc(sizeof(*oa_bo), GFP_KERNEL);
1868 	if (!oa_bo)
1869 		return ERR_PTR(-ENOMEM);
1870 
1871 	config_length += num_lri_dwords(oa_config->mux_regs_len);
1872 	config_length += num_lri_dwords(oa_config->b_counter_regs_len);
1873 	config_length += num_lri_dwords(oa_config->flex_regs_len);
1874 	config_length += 3; /* MI_BATCH_BUFFER_START */
1875 	config_length = ALIGN(sizeof(u32) * config_length, I915_GTT_PAGE_SIZE);
1876 
1877 	obj = i915_gem_object_create_shmem(stream->perf->i915, config_length);
1878 	if (IS_ERR(obj)) {
1879 		err = PTR_ERR(obj);
1880 		goto err_free;
1881 	}
1882 
1883 	i915_gem_ww_ctx_init(&ww, true);
1884 retry:
1885 	err = i915_gem_object_lock(obj, &ww);
1886 	if (err)
1887 		goto out_ww;
1888 
1889 	cs = i915_gem_object_pin_map(obj, I915_MAP_WB);
1890 	if (IS_ERR(cs)) {
1891 		err = PTR_ERR(cs);
1892 		goto out_ww;
1893 	}
1894 
1895 	cs = write_cs_mi_lri(cs,
1896 			     oa_config->mux_regs,
1897 			     oa_config->mux_regs_len);
1898 	cs = write_cs_mi_lri(cs,
1899 			     oa_config->b_counter_regs,
1900 			     oa_config->b_counter_regs_len);
1901 	cs = write_cs_mi_lri(cs,
1902 			     oa_config->flex_regs,
1903 			     oa_config->flex_regs_len);
1904 
1905 	/* Jump into the active wait. */
1906 	*cs++ = (GRAPHICS_VER(stream->perf->i915) < 8 ?
1907 		 MI_BATCH_BUFFER_START :
1908 		 MI_BATCH_BUFFER_START_GEN8);
1909 	*cs++ = i915_ggtt_offset(stream->noa_wait);
1910 	*cs++ = 0;
1911 
1912 	i915_gem_object_flush_map(obj);
1913 	__i915_gem_object_release_map(obj);
1914 
1915 	oa_bo->vma = i915_vma_instance(obj,
1916 				       &stream->engine->gt->ggtt->vm,
1917 				       NULL);
1918 	if (IS_ERR(oa_bo->vma)) {
1919 		err = PTR_ERR(oa_bo->vma);
1920 		goto out_ww;
1921 	}
1922 
1923 	oa_bo->oa_config = i915_oa_config_get(oa_config);
1924 	llist_add(&oa_bo->node, &stream->oa_config_bos);
1925 
1926 out_ww:
1927 	if (err == -EDEADLK) {
1928 		err = i915_gem_ww_ctx_backoff(&ww);
1929 		if (!err)
1930 			goto retry;
1931 	}
1932 	i915_gem_ww_ctx_fini(&ww);
1933 
1934 	if (err)
1935 		i915_gem_object_put(obj);
1936 err_free:
1937 	if (err) {
1938 		kfree(oa_bo);
1939 		return ERR_PTR(err);
1940 	}
1941 	return oa_bo;
1942 }
1943 
1944 static struct i915_vma *
1945 get_oa_vma(struct i915_perf_stream *stream, struct i915_oa_config *oa_config)
1946 {
1947 	struct i915_oa_config_bo *oa_bo;
1948 
1949 	/*
1950 	 * Look for the buffer in the already allocated BOs attached
1951 	 * to the stream.
1952 	 */
1953 	llist_for_each_entry(oa_bo, stream->oa_config_bos.first, node) {
1954 		if (oa_bo->oa_config == oa_config &&
1955 		    memcmp(oa_bo->oa_config->uuid,
1956 			   oa_config->uuid,
1957 			   sizeof(oa_config->uuid)) == 0)
1958 			goto out;
1959 	}
1960 
1961 	oa_bo = alloc_oa_config_buffer(stream, oa_config);
1962 	if (IS_ERR(oa_bo))
1963 		return ERR_CAST(oa_bo);
1964 
1965 out:
1966 	return i915_vma_get(oa_bo->vma);
1967 }
1968 
1969 static int
1970 emit_oa_config(struct i915_perf_stream *stream,
1971 	       struct i915_oa_config *oa_config,
1972 	       struct intel_context *ce,
1973 	       struct i915_active *active)
1974 {
1975 	struct i915_request *rq;
1976 	struct i915_vma *vma;
1977 	struct i915_gem_ww_ctx ww;
1978 	int err;
1979 
1980 	vma = get_oa_vma(stream, oa_config);
1981 	if (IS_ERR(vma))
1982 		return PTR_ERR(vma);
1983 
1984 	i915_gem_ww_ctx_init(&ww, true);
1985 retry:
1986 	err = i915_gem_object_lock(vma->obj, &ww);
1987 	if (err)
1988 		goto err;
1989 
1990 	err = i915_vma_pin_ww(vma, &ww, 0, 0, PIN_GLOBAL | PIN_HIGH);
1991 	if (err)
1992 		goto err;
1993 
1994 	intel_engine_pm_get(ce->engine);
1995 	rq = i915_request_create(ce);
1996 	intel_engine_pm_put(ce->engine);
1997 	if (IS_ERR(rq)) {
1998 		err = PTR_ERR(rq);
1999 		goto err_vma_unpin;
2000 	}
2001 
2002 	if (!IS_ERR_OR_NULL(active)) {
2003 		/* After all individual context modifications */
2004 		err = i915_request_await_active(rq, active,
2005 						I915_ACTIVE_AWAIT_ACTIVE);
2006 		if (err)
2007 			goto err_add_request;
2008 
2009 		err = i915_active_add_request(active, rq);
2010 		if (err)
2011 			goto err_add_request;
2012 	}
2013 
2014 	err = i915_request_await_object(rq, vma->obj, 0);
2015 	if (!err)
2016 		err = i915_vma_move_to_active(vma, rq, 0);
2017 	if (err)
2018 		goto err_add_request;
2019 
2020 	err = rq->engine->emit_bb_start(rq,
2021 					vma->node.start, 0,
2022 					I915_DISPATCH_SECURE);
2023 	if (err)
2024 		goto err_add_request;
2025 
2026 err_add_request:
2027 	i915_request_add(rq);
2028 err_vma_unpin:
2029 	i915_vma_unpin(vma);
2030 err:
2031 	if (err == -EDEADLK) {
2032 		err = i915_gem_ww_ctx_backoff(&ww);
2033 		if (!err)
2034 			goto retry;
2035 	}
2036 
2037 	i915_gem_ww_ctx_fini(&ww);
2038 	i915_vma_put(vma);
2039 	return err;
2040 }
2041 
2042 static struct intel_context *oa_context(struct i915_perf_stream *stream)
2043 {
2044 	return stream->pinned_ctx ?: stream->engine->kernel_context;
2045 }
2046 
2047 static int
2048 hsw_enable_metric_set(struct i915_perf_stream *stream,
2049 		      struct i915_active *active)
2050 {
2051 	struct intel_uncore *uncore = stream->uncore;
2052 
2053 	/*
2054 	 * PRM:
2055 	 *
2056 	 * OA unit is using “crclk” for its functionality. When trunk
2057 	 * level clock gating takes place, OA clock would be gated,
2058 	 * unable to count the events from non-render clock domain.
2059 	 * Render clock gating must be disabled when OA is enabled to
2060 	 * count the events from non-render domain. Unit level clock
2061 	 * gating for RCS should also be disabled.
2062 	 */
2063 	intel_uncore_rmw(uncore, GEN7_MISCCPCTL,
2064 			 GEN7_DOP_CLOCK_GATE_ENABLE, 0);
2065 	intel_uncore_rmw(uncore, GEN6_UCGCTL1,
2066 			 0, GEN6_CSUNIT_CLOCK_GATE_DISABLE);
2067 
2068 	return emit_oa_config(stream,
2069 			      stream->oa_config, oa_context(stream),
2070 			      active);
2071 }
2072 
2073 static void hsw_disable_metric_set(struct i915_perf_stream *stream)
2074 {
2075 	struct intel_uncore *uncore = stream->uncore;
2076 
2077 	intel_uncore_rmw(uncore, GEN6_UCGCTL1,
2078 			 GEN6_CSUNIT_CLOCK_GATE_DISABLE, 0);
2079 	intel_uncore_rmw(uncore, GEN7_MISCCPCTL,
2080 			 0, GEN7_DOP_CLOCK_GATE_ENABLE);
2081 
2082 	intel_uncore_rmw(uncore, GDT_CHICKEN_BITS, GT_NOA_ENABLE, 0);
2083 }
2084 
2085 static u32 oa_config_flex_reg(const struct i915_oa_config *oa_config,
2086 			      i915_reg_t reg)
2087 {
2088 	u32 mmio = i915_mmio_reg_offset(reg);
2089 	int i;
2090 
2091 	/*
2092 	 * This arbitrary default will select the 'EU FPU0 Pipeline
2093 	 * Active' event. In the future it's anticipated that there
2094 	 * will be an explicit 'No Event' we can select, but not yet...
2095 	 */
2096 	if (!oa_config)
2097 		return 0;
2098 
2099 	for (i = 0; i < oa_config->flex_regs_len; i++) {
2100 		if (i915_mmio_reg_offset(oa_config->flex_regs[i].addr) == mmio)
2101 			return oa_config->flex_regs[i].value;
2102 	}
2103 
2104 	return 0;
2105 }
2106 /*
2107  * NB: It must always remain pointer safe to run this even if the OA unit
2108  * has been disabled.
2109  *
2110  * It's fine to put out-of-date values into these per-context registers
2111  * in the case that the OA unit has been disabled.
2112  */
2113 static void
2114 gen8_update_reg_state_unlocked(const struct intel_context *ce,
2115 			       const struct i915_perf_stream *stream)
2116 {
2117 	u32 ctx_oactxctrl = stream->perf->ctx_oactxctrl_offset;
2118 	u32 ctx_flexeu0 = stream->perf->ctx_flexeu0_offset;
2119 	/* The MMIO offsets for Flex EU registers aren't contiguous */
2120 	i915_reg_t flex_regs[] = {
2121 		EU_PERF_CNTL0,
2122 		EU_PERF_CNTL1,
2123 		EU_PERF_CNTL2,
2124 		EU_PERF_CNTL3,
2125 		EU_PERF_CNTL4,
2126 		EU_PERF_CNTL5,
2127 		EU_PERF_CNTL6,
2128 	};
2129 	u32 *reg_state = ce->lrc_reg_state;
2130 	int i;
2131 
2132 	reg_state[ctx_oactxctrl + 1] =
2133 		(stream->period_exponent << GEN8_OA_TIMER_PERIOD_SHIFT) |
2134 		(stream->periodic ? GEN8_OA_TIMER_ENABLE : 0) |
2135 		GEN8_OA_COUNTER_RESUME;
2136 
2137 	for (i = 0; i < ARRAY_SIZE(flex_regs); i++)
2138 		reg_state[ctx_flexeu0 + i * 2 + 1] =
2139 			oa_config_flex_reg(stream->oa_config, flex_regs[i]);
2140 }
2141 
2142 struct flex {
2143 	i915_reg_t reg;
2144 	u32 offset;
2145 	u32 value;
2146 };
2147 
2148 static int
2149 gen8_store_flex(struct i915_request *rq,
2150 		struct intel_context *ce,
2151 		const struct flex *flex, unsigned int count)
2152 {
2153 	u32 offset;
2154 	u32 *cs;
2155 
2156 	cs = intel_ring_begin(rq, 4 * count);
2157 	if (IS_ERR(cs))
2158 		return PTR_ERR(cs);
2159 
2160 	offset = i915_ggtt_offset(ce->state) + LRC_STATE_OFFSET;
2161 	do {
2162 		*cs++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT;
2163 		*cs++ = offset + flex->offset * sizeof(u32);
2164 		*cs++ = 0;
2165 		*cs++ = flex->value;
2166 	} while (flex++, --count);
2167 
2168 	intel_ring_advance(rq, cs);
2169 
2170 	return 0;
2171 }
2172 
2173 static int
2174 gen8_load_flex(struct i915_request *rq,
2175 	       struct intel_context *ce,
2176 	       const struct flex *flex, unsigned int count)
2177 {
2178 	u32 *cs;
2179 
2180 	GEM_BUG_ON(!count || count > 63);
2181 
2182 	cs = intel_ring_begin(rq, 2 * count + 2);
2183 	if (IS_ERR(cs))
2184 		return PTR_ERR(cs);
2185 
2186 	*cs++ = MI_LOAD_REGISTER_IMM(count);
2187 	do {
2188 		*cs++ = i915_mmio_reg_offset(flex->reg);
2189 		*cs++ = flex->value;
2190 	} while (flex++, --count);
2191 	*cs++ = MI_NOOP;
2192 
2193 	intel_ring_advance(rq, cs);
2194 
2195 	return 0;
2196 }
2197 
2198 static int gen8_modify_context(struct intel_context *ce,
2199 			       const struct flex *flex, unsigned int count)
2200 {
2201 	struct i915_request *rq;
2202 	int err;
2203 
2204 	rq = intel_engine_create_kernel_request(ce->engine);
2205 	if (IS_ERR(rq))
2206 		return PTR_ERR(rq);
2207 
2208 	/* Serialise with the remote context */
2209 	err = intel_context_prepare_remote_request(ce, rq);
2210 	if (err == 0)
2211 		err = gen8_store_flex(rq, ce, flex, count);
2212 
2213 	i915_request_add(rq);
2214 	return err;
2215 }
2216 
2217 static int
2218 gen8_modify_self(struct intel_context *ce,
2219 		 const struct flex *flex, unsigned int count,
2220 		 struct i915_active *active)
2221 {
2222 	struct i915_request *rq;
2223 	int err;
2224 
2225 	intel_engine_pm_get(ce->engine);
2226 	rq = i915_request_create(ce);
2227 	intel_engine_pm_put(ce->engine);
2228 	if (IS_ERR(rq))
2229 		return PTR_ERR(rq);
2230 
2231 	if (!IS_ERR_OR_NULL(active)) {
2232 		err = i915_active_add_request(active, rq);
2233 		if (err)
2234 			goto err_add_request;
2235 	}
2236 
2237 	err = gen8_load_flex(rq, ce, flex, count);
2238 	if (err)
2239 		goto err_add_request;
2240 
2241 err_add_request:
2242 	i915_request_add(rq);
2243 	return err;
2244 }
2245 
2246 static int gen8_configure_context(struct i915_gem_context *ctx,
2247 				  struct flex *flex, unsigned int count)
2248 {
2249 	struct i915_gem_engines_iter it;
2250 	struct intel_context *ce;
2251 	int err = 0;
2252 
2253 	for_each_gem_engine(ce, i915_gem_context_lock_engines(ctx), it) {
2254 		GEM_BUG_ON(ce == ce->engine->kernel_context);
2255 
2256 		if (ce->engine->class != RENDER_CLASS)
2257 			continue;
2258 
2259 		/* Otherwise OA settings will be set upon first use */
2260 		if (!intel_context_pin_if_active(ce))
2261 			continue;
2262 
2263 		flex->value = intel_sseu_make_rpcs(ce->engine->gt, &ce->sseu);
2264 		err = gen8_modify_context(ce, flex, count);
2265 
2266 		intel_context_unpin(ce);
2267 		if (err)
2268 			break;
2269 	}
2270 	i915_gem_context_unlock_engines(ctx);
2271 
2272 	return err;
2273 }
2274 
2275 static int gen12_configure_oar_context(struct i915_perf_stream *stream,
2276 				       struct i915_active *active)
2277 {
2278 	int err;
2279 	struct intel_context *ce = stream->pinned_ctx;
2280 	u32 format = stream->oa_buffer.format;
2281 	struct flex regs_context[] = {
2282 		{
2283 			GEN8_OACTXCONTROL,
2284 			stream->perf->ctx_oactxctrl_offset + 1,
2285 			active ? GEN8_OA_COUNTER_RESUME : 0,
2286 		},
2287 	};
2288 	/* Offsets in regs_lri are not used since this configuration is only
2289 	 * applied using LRI. Initialize the correct offsets for posterity.
2290 	 */
2291 #define GEN12_OAR_OACONTROL_OFFSET 0x5B0
2292 	struct flex regs_lri[] = {
2293 		{
2294 			GEN12_OAR_OACONTROL,
2295 			GEN12_OAR_OACONTROL_OFFSET + 1,
2296 			(format << GEN12_OAR_OACONTROL_COUNTER_FORMAT_SHIFT) |
2297 			(active ? GEN12_OAR_OACONTROL_COUNTER_ENABLE : 0)
2298 		},
2299 		{
2300 			RING_CONTEXT_CONTROL(ce->engine->mmio_base),
2301 			CTX_CONTEXT_CONTROL,
2302 			_MASKED_FIELD(GEN12_CTX_CTRL_OAR_CONTEXT_ENABLE,
2303 				      active ?
2304 				      GEN12_CTX_CTRL_OAR_CONTEXT_ENABLE :
2305 				      0)
2306 		},
2307 	};
2308 
2309 	/* Modify the context image of pinned context with regs_context*/
2310 	err = intel_context_lock_pinned(ce);
2311 	if (err)
2312 		return err;
2313 
2314 	err = gen8_modify_context(ce, regs_context, ARRAY_SIZE(regs_context));
2315 	intel_context_unlock_pinned(ce);
2316 	if (err)
2317 		return err;
2318 
2319 	/* Apply regs_lri using LRI with pinned context */
2320 	return gen8_modify_self(ce, regs_lri, ARRAY_SIZE(regs_lri), active);
2321 }
2322 
2323 /*
2324  * Manages updating the per-context aspects of the OA stream
2325  * configuration across all contexts.
2326  *
2327  * The awkward consideration here is that OACTXCONTROL controls the
2328  * exponent for periodic sampling which is primarily used for system
2329  * wide profiling where we'd like a consistent sampling period even in
2330  * the face of context switches.
2331  *
2332  * Our approach of updating the register state context (as opposed to
2333  * say using a workaround batch buffer) ensures that the hardware
2334  * won't automatically reload an out-of-date timer exponent even
2335  * transiently before a WA BB could be parsed.
2336  *
2337  * This function needs to:
2338  * - Ensure the currently running context's per-context OA state is
2339  *   updated
2340  * - Ensure that all existing contexts will have the correct per-context
2341  *   OA state if they are scheduled for use.
2342  * - Ensure any new contexts will be initialized with the correct
2343  *   per-context OA state.
2344  *
2345  * Note: it's only the RCS/Render context that has any OA state.
2346  * Note: the first flex register passed must always be R_PWR_CLK_STATE
2347  */
2348 static int
2349 oa_configure_all_contexts(struct i915_perf_stream *stream,
2350 			  struct flex *regs,
2351 			  size_t num_regs,
2352 			  struct i915_active *active)
2353 {
2354 	struct drm_i915_private *i915 = stream->perf->i915;
2355 	struct intel_engine_cs *engine;
2356 	struct i915_gem_context *ctx, *cn;
2357 	int err;
2358 
2359 	lockdep_assert_held(&stream->perf->lock);
2360 
2361 	/*
2362 	 * The OA register config is setup through the context image. This image
2363 	 * might be written to by the GPU on context switch (in particular on
2364 	 * lite-restore). This means we can't safely update a context's image,
2365 	 * if this context is scheduled/submitted to run on the GPU.
2366 	 *
2367 	 * We could emit the OA register config through the batch buffer but
2368 	 * this might leave small interval of time where the OA unit is
2369 	 * configured at an invalid sampling period.
2370 	 *
2371 	 * Note that since we emit all requests from a single ring, there
2372 	 * is still an implicit global barrier here that may cause a high
2373 	 * priority context to wait for an otherwise independent low priority
2374 	 * context. Contexts idle at the time of reconfiguration are not
2375 	 * trapped behind the barrier.
2376 	 */
2377 	spin_lock(&i915->gem.contexts.lock);
2378 	list_for_each_entry_safe(ctx, cn, &i915->gem.contexts.list, link) {
2379 		if (!kref_get_unless_zero(&ctx->ref))
2380 			continue;
2381 
2382 		spin_unlock(&i915->gem.contexts.lock);
2383 
2384 		err = gen8_configure_context(ctx, regs, num_regs);
2385 		if (err) {
2386 			i915_gem_context_put(ctx);
2387 			return err;
2388 		}
2389 
2390 		spin_lock(&i915->gem.contexts.lock);
2391 		list_safe_reset_next(ctx, cn, link);
2392 		i915_gem_context_put(ctx);
2393 	}
2394 	spin_unlock(&i915->gem.contexts.lock);
2395 
2396 	/*
2397 	 * After updating all other contexts, we need to modify ourselves.
2398 	 * If we don't modify the kernel_context, we do not get events while
2399 	 * idle.
2400 	 */
2401 	for_each_uabi_engine(engine, i915) {
2402 		struct intel_context *ce = engine->kernel_context;
2403 
2404 		if (engine->class != RENDER_CLASS)
2405 			continue;
2406 
2407 		regs[0].value = intel_sseu_make_rpcs(engine->gt, &ce->sseu);
2408 
2409 		err = gen8_modify_self(ce, regs, num_regs, active);
2410 		if (err)
2411 			return err;
2412 	}
2413 
2414 	return 0;
2415 }
2416 
2417 static int
2418 gen12_configure_all_contexts(struct i915_perf_stream *stream,
2419 			     const struct i915_oa_config *oa_config,
2420 			     struct i915_active *active)
2421 {
2422 	struct flex regs[] = {
2423 		{
2424 			GEN8_R_PWR_CLK_STATE(RENDER_RING_BASE),
2425 			CTX_R_PWR_CLK_STATE,
2426 		},
2427 	};
2428 
2429 	return oa_configure_all_contexts(stream,
2430 					 regs, ARRAY_SIZE(regs),
2431 					 active);
2432 }
2433 
2434 static int
2435 lrc_configure_all_contexts(struct i915_perf_stream *stream,
2436 			   const struct i915_oa_config *oa_config,
2437 			   struct i915_active *active)
2438 {
2439 	/* The MMIO offsets for Flex EU registers aren't contiguous */
2440 	const u32 ctx_flexeu0 = stream->perf->ctx_flexeu0_offset;
2441 #define ctx_flexeuN(N) (ctx_flexeu0 + 2 * (N) + 1)
2442 	struct flex regs[] = {
2443 		{
2444 			GEN8_R_PWR_CLK_STATE(RENDER_RING_BASE),
2445 			CTX_R_PWR_CLK_STATE,
2446 		},
2447 		{
2448 			GEN8_OACTXCONTROL,
2449 			stream->perf->ctx_oactxctrl_offset + 1,
2450 		},
2451 		{ EU_PERF_CNTL0, ctx_flexeuN(0) },
2452 		{ EU_PERF_CNTL1, ctx_flexeuN(1) },
2453 		{ EU_PERF_CNTL2, ctx_flexeuN(2) },
2454 		{ EU_PERF_CNTL3, ctx_flexeuN(3) },
2455 		{ EU_PERF_CNTL4, ctx_flexeuN(4) },
2456 		{ EU_PERF_CNTL5, ctx_flexeuN(5) },
2457 		{ EU_PERF_CNTL6, ctx_flexeuN(6) },
2458 	};
2459 #undef ctx_flexeuN
2460 	int i;
2461 
2462 	regs[1].value =
2463 		(stream->period_exponent << GEN8_OA_TIMER_PERIOD_SHIFT) |
2464 		(stream->periodic ? GEN8_OA_TIMER_ENABLE : 0) |
2465 		GEN8_OA_COUNTER_RESUME;
2466 
2467 	for (i = 2; i < ARRAY_SIZE(regs); i++)
2468 		regs[i].value = oa_config_flex_reg(oa_config, regs[i].reg);
2469 
2470 	return oa_configure_all_contexts(stream,
2471 					 regs, ARRAY_SIZE(regs),
2472 					 active);
2473 }
2474 
2475 static int
2476 gen8_enable_metric_set(struct i915_perf_stream *stream,
2477 		       struct i915_active *active)
2478 {
2479 	struct intel_uncore *uncore = stream->uncore;
2480 	struct i915_oa_config *oa_config = stream->oa_config;
2481 	int ret;
2482 
2483 	/*
2484 	 * We disable slice/unslice clock ratio change reports on SKL since
2485 	 * they are too noisy. The HW generates a lot of redundant reports
2486 	 * where the ratio hasn't really changed causing a lot of redundant
2487 	 * work to processes and increasing the chances we'll hit buffer
2488 	 * overruns.
2489 	 *
2490 	 * Although we don't currently use the 'disable overrun' OABUFFER
2491 	 * feature it's worth noting that clock ratio reports have to be
2492 	 * disabled before considering to use that feature since the HW doesn't
2493 	 * correctly block these reports.
2494 	 *
2495 	 * Currently none of the high-level metrics we have depend on knowing
2496 	 * this ratio to normalize.
2497 	 *
2498 	 * Note: This register is not power context saved and restored, but
2499 	 * that's OK considering that we disable RC6 while the OA unit is
2500 	 * enabled.
2501 	 *
2502 	 * The _INCLUDE_CLK_RATIO bit allows the slice/unslice frequency to
2503 	 * be read back from automatically triggered reports, as part of the
2504 	 * RPT_ID field.
2505 	 */
2506 	if (IS_GRAPHICS_VER(stream->perf->i915, 9, 11)) {
2507 		intel_uncore_write(uncore, GEN8_OA_DEBUG,
2508 				   _MASKED_BIT_ENABLE(GEN9_OA_DEBUG_DISABLE_CLK_RATIO_REPORTS |
2509 						      GEN9_OA_DEBUG_INCLUDE_CLK_RATIO));
2510 	}
2511 
2512 	/*
2513 	 * Update all contexts prior writing the mux configurations as we need
2514 	 * to make sure all slices/subslices are ON before writing to NOA
2515 	 * registers.
2516 	 */
2517 	ret = lrc_configure_all_contexts(stream, oa_config, active);
2518 	if (ret)
2519 		return ret;
2520 
2521 	return emit_oa_config(stream,
2522 			      stream->oa_config, oa_context(stream),
2523 			      active);
2524 }
2525 
2526 static u32 oag_report_ctx_switches(const struct i915_perf_stream *stream)
2527 {
2528 	return _MASKED_FIELD(GEN12_OAG_OA_DEBUG_DISABLE_CTX_SWITCH_REPORTS,
2529 			     (stream->sample_flags & SAMPLE_OA_REPORT) ?
2530 			     0 : GEN12_OAG_OA_DEBUG_DISABLE_CTX_SWITCH_REPORTS);
2531 }
2532 
2533 static int
2534 gen12_enable_metric_set(struct i915_perf_stream *stream,
2535 			struct i915_active *active)
2536 {
2537 	struct intel_uncore *uncore = stream->uncore;
2538 	struct i915_oa_config *oa_config = stream->oa_config;
2539 	bool periodic = stream->periodic;
2540 	u32 period_exponent = stream->period_exponent;
2541 	int ret;
2542 
2543 	intel_uncore_write(uncore, GEN12_OAG_OA_DEBUG,
2544 			   /* Disable clk ratio reports, like previous Gens. */
2545 			   _MASKED_BIT_ENABLE(GEN12_OAG_OA_DEBUG_DISABLE_CLK_RATIO_REPORTS |
2546 					      GEN12_OAG_OA_DEBUG_INCLUDE_CLK_RATIO) |
2547 			   /*
2548 			    * If the user didn't require OA reports, instruct
2549 			    * the hardware not to emit ctx switch reports.
2550 			    */
2551 			   oag_report_ctx_switches(stream));
2552 
2553 	intel_uncore_write(uncore, GEN12_OAG_OAGLBCTXCTRL, periodic ?
2554 			   (GEN12_OAG_OAGLBCTXCTRL_COUNTER_RESUME |
2555 			    GEN12_OAG_OAGLBCTXCTRL_TIMER_ENABLE |
2556 			    (period_exponent << GEN12_OAG_OAGLBCTXCTRL_TIMER_PERIOD_SHIFT))
2557 			    : 0);
2558 
2559 	/*
2560 	 * Update all contexts prior writing the mux configurations as we need
2561 	 * to make sure all slices/subslices are ON before writing to NOA
2562 	 * registers.
2563 	 */
2564 	ret = gen12_configure_all_contexts(stream, oa_config, active);
2565 	if (ret)
2566 		return ret;
2567 
2568 	/*
2569 	 * For Gen12, performance counters are context
2570 	 * saved/restored. Only enable it for the context that
2571 	 * requested this.
2572 	 */
2573 	if (stream->ctx) {
2574 		ret = gen12_configure_oar_context(stream, active);
2575 		if (ret)
2576 			return ret;
2577 	}
2578 
2579 	return emit_oa_config(stream,
2580 			      stream->oa_config, oa_context(stream),
2581 			      active);
2582 }
2583 
2584 static void gen8_disable_metric_set(struct i915_perf_stream *stream)
2585 {
2586 	struct intel_uncore *uncore = stream->uncore;
2587 
2588 	/* Reset all contexts' slices/subslices configurations. */
2589 	lrc_configure_all_contexts(stream, NULL, NULL);
2590 
2591 	intel_uncore_rmw(uncore, GDT_CHICKEN_BITS, GT_NOA_ENABLE, 0);
2592 }
2593 
2594 static void gen11_disable_metric_set(struct i915_perf_stream *stream)
2595 {
2596 	struct intel_uncore *uncore = stream->uncore;
2597 
2598 	/* Reset all contexts' slices/subslices configurations. */
2599 	lrc_configure_all_contexts(stream, NULL, NULL);
2600 
2601 	/* Make sure we disable noa to save power. */
2602 	intel_uncore_rmw(uncore, RPM_CONFIG1, GEN10_GT_NOA_ENABLE, 0);
2603 }
2604 
2605 static void gen12_disable_metric_set(struct i915_perf_stream *stream)
2606 {
2607 	struct intel_uncore *uncore = stream->uncore;
2608 
2609 	/* Reset all contexts' slices/subslices configurations. */
2610 	gen12_configure_all_contexts(stream, NULL, NULL);
2611 
2612 	/* disable the context save/restore or OAR counters */
2613 	if (stream->ctx)
2614 		gen12_configure_oar_context(stream, NULL);
2615 
2616 	/* Make sure we disable noa to save power. */
2617 	intel_uncore_rmw(uncore, RPM_CONFIG1, GEN10_GT_NOA_ENABLE, 0);
2618 }
2619 
2620 static void gen7_oa_enable(struct i915_perf_stream *stream)
2621 {
2622 	struct intel_uncore *uncore = stream->uncore;
2623 	struct i915_gem_context *ctx = stream->ctx;
2624 	u32 ctx_id = stream->specific_ctx_id;
2625 	bool periodic = stream->periodic;
2626 	u32 period_exponent = stream->period_exponent;
2627 	u32 report_format = stream->oa_buffer.format;
2628 
2629 	/*
2630 	 * Reset buf pointers so we don't forward reports from before now.
2631 	 *
2632 	 * Think carefully if considering trying to avoid this, since it
2633 	 * also ensures status flags and the buffer itself are cleared
2634 	 * in error paths, and we have checks for invalid reports based
2635 	 * on the assumption that certain fields are written to zeroed
2636 	 * memory which this helps maintains.
2637 	 */
2638 	gen7_init_oa_buffer(stream);
2639 
2640 	intel_uncore_write(uncore, GEN7_OACONTROL,
2641 			   (ctx_id & GEN7_OACONTROL_CTX_MASK) |
2642 			   (period_exponent <<
2643 			    GEN7_OACONTROL_TIMER_PERIOD_SHIFT) |
2644 			   (periodic ? GEN7_OACONTROL_TIMER_ENABLE : 0) |
2645 			   (report_format << GEN7_OACONTROL_FORMAT_SHIFT) |
2646 			   (ctx ? GEN7_OACONTROL_PER_CTX_ENABLE : 0) |
2647 			   GEN7_OACONTROL_ENABLE);
2648 }
2649 
2650 static void gen8_oa_enable(struct i915_perf_stream *stream)
2651 {
2652 	struct intel_uncore *uncore = stream->uncore;
2653 	u32 report_format = stream->oa_buffer.format;
2654 
2655 	/*
2656 	 * Reset buf pointers so we don't forward reports from before now.
2657 	 *
2658 	 * Think carefully if considering trying to avoid this, since it
2659 	 * also ensures status flags and the buffer itself are cleared
2660 	 * in error paths, and we have checks for invalid reports based
2661 	 * on the assumption that certain fields are written to zeroed
2662 	 * memory which this helps maintains.
2663 	 */
2664 	gen8_init_oa_buffer(stream);
2665 
2666 	/*
2667 	 * Note: we don't rely on the hardware to perform single context
2668 	 * filtering and instead filter on the cpu based on the context-id
2669 	 * field of reports
2670 	 */
2671 	intel_uncore_write(uncore, GEN8_OACONTROL,
2672 			   (report_format << GEN8_OA_REPORT_FORMAT_SHIFT) |
2673 			   GEN8_OA_COUNTER_ENABLE);
2674 }
2675 
2676 static void gen12_oa_enable(struct i915_perf_stream *stream)
2677 {
2678 	struct intel_uncore *uncore = stream->uncore;
2679 	u32 report_format = stream->oa_buffer.format;
2680 
2681 	/*
2682 	 * If we don't want OA reports from the OA buffer, then we don't even
2683 	 * need to program the OAG unit.
2684 	 */
2685 	if (!(stream->sample_flags & SAMPLE_OA_REPORT))
2686 		return;
2687 
2688 	gen12_init_oa_buffer(stream);
2689 
2690 	intel_uncore_write(uncore, GEN12_OAG_OACONTROL,
2691 			   (report_format << GEN12_OAG_OACONTROL_OA_COUNTER_FORMAT_SHIFT) |
2692 			   GEN12_OAG_OACONTROL_OA_COUNTER_ENABLE);
2693 }
2694 
2695 /**
2696  * i915_oa_stream_enable - handle `I915_PERF_IOCTL_ENABLE` for OA stream
2697  * @stream: An i915 perf stream opened for OA metrics
2698  *
2699  * [Re]enables hardware periodic sampling according to the period configured
2700  * when opening the stream. This also starts a hrtimer that will periodically
2701  * check for data in the circular OA buffer for notifying userspace (e.g.
2702  * during a read() or poll()).
2703  */
2704 static void i915_oa_stream_enable(struct i915_perf_stream *stream)
2705 {
2706 	stream->pollin = false;
2707 
2708 	stream->perf->ops.oa_enable(stream);
2709 
2710 	if (stream->sample_flags & SAMPLE_OA_REPORT)
2711 		hrtimer_start(&stream->poll_check_timer,
2712 			      ns_to_ktime(stream->poll_oa_period),
2713 			      HRTIMER_MODE_REL_PINNED);
2714 }
2715 
2716 static void gen7_oa_disable(struct i915_perf_stream *stream)
2717 {
2718 	struct intel_uncore *uncore = stream->uncore;
2719 
2720 	intel_uncore_write(uncore, GEN7_OACONTROL, 0);
2721 	if (intel_wait_for_register(uncore,
2722 				    GEN7_OACONTROL, GEN7_OACONTROL_ENABLE, 0,
2723 				    50))
2724 		drm_err(&stream->perf->i915->drm,
2725 			"wait for OA to be disabled timed out\n");
2726 }
2727 
2728 static void gen8_oa_disable(struct i915_perf_stream *stream)
2729 {
2730 	struct intel_uncore *uncore = stream->uncore;
2731 
2732 	intel_uncore_write(uncore, GEN8_OACONTROL, 0);
2733 	if (intel_wait_for_register(uncore,
2734 				    GEN8_OACONTROL, GEN8_OA_COUNTER_ENABLE, 0,
2735 				    50))
2736 		drm_err(&stream->perf->i915->drm,
2737 			"wait for OA to be disabled timed out\n");
2738 }
2739 
2740 static void gen12_oa_disable(struct i915_perf_stream *stream)
2741 {
2742 	struct intel_uncore *uncore = stream->uncore;
2743 
2744 	intel_uncore_write(uncore, GEN12_OAG_OACONTROL, 0);
2745 	if (intel_wait_for_register(uncore,
2746 				    GEN12_OAG_OACONTROL,
2747 				    GEN12_OAG_OACONTROL_OA_COUNTER_ENABLE, 0,
2748 				    50))
2749 		drm_err(&stream->perf->i915->drm,
2750 			"wait for OA to be disabled timed out\n");
2751 
2752 	intel_uncore_write(uncore, GEN12_OA_TLB_INV_CR, 1);
2753 	if (intel_wait_for_register(uncore,
2754 				    GEN12_OA_TLB_INV_CR,
2755 				    1, 0,
2756 				    50))
2757 		drm_err(&stream->perf->i915->drm,
2758 			"wait for OA tlb invalidate timed out\n");
2759 }
2760 
2761 /**
2762  * i915_oa_stream_disable - handle `I915_PERF_IOCTL_DISABLE` for OA stream
2763  * @stream: An i915 perf stream opened for OA metrics
2764  *
2765  * Stops the OA unit from periodically writing counter reports into the
2766  * circular OA buffer. This also stops the hrtimer that periodically checks for
2767  * data in the circular OA buffer, for notifying userspace.
2768  */
2769 static void i915_oa_stream_disable(struct i915_perf_stream *stream)
2770 {
2771 	stream->perf->ops.oa_disable(stream);
2772 
2773 	if (stream->sample_flags & SAMPLE_OA_REPORT)
2774 		hrtimer_cancel(&stream->poll_check_timer);
2775 }
2776 
2777 static const struct i915_perf_stream_ops i915_oa_stream_ops = {
2778 	.destroy = i915_oa_stream_destroy,
2779 	.enable = i915_oa_stream_enable,
2780 	.disable = i915_oa_stream_disable,
2781 	.wait_unlocked = i915_oa_wait_unlocked,
2782 	.poll_wait = i915_oa_poll_wait,
2783 	.read = i915_oa_read,
2784 };
2785 
2786 static int i915_perf_stream_enable_sync(struct i915_perf_stream *stream)
2787 {
2788 	struct i915_active *active;
2789 	int err;
2790 
2791 	active = i915_active_create();
2792 	if (!active)
2793 		return -ENOMEM;
2794 
2795 	err = stream->perf->ops.enable_metric_set(stream, active);
2796 	if (err == 0)
2797 		__i915_active_wait(active, TASK_UNINTERRUPTIBLE);
2798 
2799 	i915_active_put(active);
2800 	return err;
2801 }
2802 
2803 static void
2804 get_default_sseu_config(struct intel_sseu *out_sseu,
2805 			struct intel_engine_cs *engine)
2806 {
2807 	const struct sseu_dev_info *devinfo_sseu = &engine->gt->info.sseu;
2808 
2809 	*out_sseu = intel_sseu_from_device_info(devinfo_sseu);
2810 
2811 	if (GRAPHICS_VER(engine->i915) == 11) {
2812 		/*
2813 		 * We only need subslice count so it doesn't matter which ones
2814 		 * we select - just turn off low bits in the amount of half of
2815 		 * all available subslices per slice.
2816 		 */
2817 		out_sseu->subslice_mask =
2818 			~(~0 << (hweight8(out_sseu->subslice_mask) / 2));
2819 		out_sseu->slice_mask = 0x1;
2820 	}
2821 }
2822 
2823 static int
2824 get_sseu_config(struct intel_sseu *out_sseu,
2825 		struct intel_engine_cs *engine,
2826 		const struct drm_i915_gem_context_param_sseu *drm_sseu)
2827 {
2828 	if (drm_sseu->engine.engine_class != engine->uabi_class ||
2829 	    drm_sseu->engine.engine_instance != engine->uabi_instance)
2830 		return -EINVAL;
2831 
2832 	return i915_gem_user_to_context_sseu(engine->gt, drm_sseu, out_sseu);
2833 }
2834 
2835 /**
2836  * i915_oa_stream_init - validate combined props for OA stream and init
2837  * @stream: An i915 perf stream
2838  * @param: The open parameters passed to `DRM_I915_PERF_OPEN`
2839  * @props: The property state that configures stream (individually validated)
2840  *
2841  * While read_properties_unlocked() validates properties in isolation it
2842  * doesn't ensure that the combination necessarily makes sense.
2843  *
2844  * At this point it has been determined that userspace wants a stream of
2845  * OA metrics, but still we need to further validate the combined
2846  * properties are OK.
2847  *
2848  * If the configuration makes sense then we can allocate memory for
2849  * a circular OA buffer and apply the requested metric set configuration.
2850  *
2851  * Returns: zero on success or a negative error code.
2852  */
2853 static int i915_oa_stream_init(struct i915_perf_stream *stream,
2854 			       struct drm_i915_perf_open_param *param,
2855 			       struct perf_open_properties *props)
2856 {
2857 	struct drm_i915_private *i915 = stream->perf->i915;
2858 	struct i915_perf *perf = stream->perf;
2859 	int format_size;
2860 	int ret;
2861 
2862 	if (!props->engine) {
2863 		DRM_DEBUG("OA engine not specified\n");
2864 		return -EINVAL;
2865 	}
2866 
2867 	/*
2868 	 * If the sysfs metrics/ directory wasn't registered for some
2869 	 * reason then don't let userspace try their luck with config
2870 	 * IDs
2871 	 */
2872 	if (!perf->metrics_kobj) {
2873 		DRM_DEBUG("OA metrics weren't advertised via sysfs\n");
2874 		return -EINVAL;
2875 	}
2876 
2877 	if (!(props->sample_flags & SAMPLE_OA_REPORT) &&
2878 	    (GRAPHICS_VER(perf->i915) < 12 || !stream->ctx)) {
2879 		DRM_DEBUG("Only OA report sampling supported\n");
2880 		return -EINVAL;
2881 	}
2882 
2883 	if (!perf->ops.enable_metric_set) {
2884 		DRM_DEBUG("OA unit not supported\n");
2885 		return -ENODEV;
2886 	}
2887 
2888 	/*
2889 	 * To avoid the complexity of having to accurately filter
2890 	 * counter reports and marshal to the appropriate client
2891 	 * we currently only allow exclusive access
2892 	 */
2893 	if (perf->exclusive_stream) {
2894 		DRM_DEBUG("OA unit already in use\n");
2895 		return -EBUSY;
2896 	}
2897 
2898 	if (!props->oa_format) {
2899 		DRM_DEBUG("OA report format not specified\n");
2900 		return -EINVAL;
2901 	}
2902 
2903 	stream->engine = props->engine;
2904 	stream->uncore = stream->engine->gt->uncore;
2905 
2906 	stream->sample_size = sizeof(struct drm_i915_perf_record_header);
2907 
2908 	format_size = perf->oa_formats[props->oa_format].size;
2909 
2910 	stream->sample_flags = props->sample_flags;
2911 	stream->sample_size += format_size;
2912 
2913 	stream->oa_buffer.format_size = format_size;
2914 	if (drm_WARN_ON(&i915->drm, stream->oa_buffer.format_size == 0))
2915 		return -EINVAL;
2916 
2917 	stream->hold_preemption = props->hold_preemption;
2918 
2919 	stream->oa_buffer.format =
2920 		perf->oa_formats[props->oa_format].format;
2921 
2922 	stream->periodic = props->oa_periodic;
2923 	if (stream->periodic)
2924 		stream->period_exponent = props->oa_period_exponent;
2925 
2926 	if (stream->ctx) {
2927 		ret = oa_get_render_ctx_id(stream);
2928 		if (ret) {
2929 			DRM_DEBUG("Invalid context id to filter with\n");
2930 			return ret;
2931 		}
2932 	}
2933 
2934 	ret = alloc_noa_wait(stream);
2935 	if (ret) {
2936 		DRM_DEBUG("Unable to allocate NOA wait batch buffer\n");
2937 		goto err_noa_wait_alloc;
2938 	}
2939 
2940 	stream->oa_config = i915_perf_get_oa_config(perf, props->metrics_set);
2941 	if (!stream->oa_config) {
2942 		DRM_DEBUG("Invalid OA config id=%i\n", props->metrics_set);
2943 		ret = -EINVAL;
2944 		goto err_config;
2945 	}
2946 
2947 	/* PRM - observability performance counters:
2948 	 *
2949 	 *   OACONTROL, performance counter enable, note:
2950 	 *
2951 	 *   "When this bit is set, in order to have coherent counts,
2952 	 *   RC6 power state and trunk clock gating must be disabled.
2953 	 *   This can be achieved by programming MMIO registers as
2954 	 *   0xA094=0 and 0xA090[31]=1"
2955 	 *
2956 	 *   In our case we are expecting that taking pm + FORCEWAKE
2957 	 *   references will effectively disable RC6.
2958 	 */
2959 	intel_engine_pm_get(stream->engine);
2960 	intel_uncore_forcewake_get(stream->uncore, FORCEWAKE_ALL);
2961 
2962 	ret = alloc_oa_buffer(stream);
2963 	if (ret)
2964 		goto err_oa_buf_alloc;
2965 
2966 	stream->ops = &i915_oa_stream_ops;
2967 
2968 	perf->sseu = props->sseu;
2969 	WRITE_ONCE(perf->exclusive_stream, stream);
2970 
2971 	ret = i915_perf_stream_enable_sync(stream);
2972 	if (ret) {
2973 		DRM_DEBUG("Unable to enable metric set\n");
2974 		goto err_enable;
2975 	}
2976 
2977 	DRM_DEBUG("opening stream oa config uuid=%s\n",
2978 		  stream->oa_config->uuid);
2979 
2980 	hrtimer_init(&stream->poll_check_timer,
2981 		     CLOCK_MONOTONIC, HRTIMER_MODE_REL);
2982 	stream->poll_check_timer.function = oa_poll_check_timer_cb;
2983 	init_waitqueue_head(&stream->poll_wq);
2984 	spin_lock_init(&stream->oa_buffer.ptr_lock);
2985 
2986 	return 0;
2987 
2988 err_enable:
2989 	WRITE_ONCE(perf->exclusive_stream, NULL);
2990 	perf->ops.disable_metric_set(stream);
2991 
2992 	free_oa_buffer(stream);
2993 
2994 err_oa_buf_alloc:
2995 	free_oa_configs(stream);
2996 
2997 	intel_uncore_forcewake_put(stream->uncore, FORCEWAKE_ALL);
2998 	intel_engine_pm_put(stream->engine);
2999 
3000 err_config:
3001 	free_noa_wait(stream);
3002 
3003 err_noa_wait_alloc:
3004 	if (stream->ctx)
3005 		oa_put_render_ctx_id(stream);
3006 
3007 	return ret;
3008 }
3009 
3010 void i915_oa_init_reg_state(const struct intel_context *ce,
3011 			    const struct intel_engine_cs *engine)
3012 {
3013 	struct i915_perf_stream *stream;
3014 
3015 	if (engine->class != RENDER_CLASS)
3016 		return;
3017 
3018 	/* perf.exclusive_stream serialised by lrc_configure_all_contexts() */
3019 	stream = READ_ONCE(engine->i915->perf.exclusive_stream);
3020 	if (stream && GRAPHICS_VER(stream->perf->i915) < 12)
3021 		gen8_update_reg_state_unlocked(ce, stream);
3022 }
3023 
3024 /**
3025  * i915_perf_read - handles read() FOP for i915 perf stream FDs
3026  * @file: An i915 perf stream file
3027  * @buf: destination buffer given by userspace
3028  * @count: the number of bytes userspace wants to read
3029  * @ppos: (inout) file seek position (unused)
3030  *
3031  * The entry point for handling a read() on a stream file descriptor from
3032  * userspace. Most of the work is left to the i915_perf_read_locked() and
3033  * &i915_perf_stream_ops->read but to save having stream implementations (of
3034  * which we might have multiple later) we handle blocking read here.
3035  *
3036  * We can also consistently treat trying to read from a disabled stream
3037  * as an IO error so implementations can assume the stream is enabled
3038  * while reading.
3039  *
3040  * Returns: The number of bytes copied or a negative error code on failure.
3041  */
3042 static ssize_t i915_perf_read(struct file *file,
3043 			      char __user *buf,
3044 			      size_t count,
3045 			      loff_t *ppos)
3046 {
3047 	struct i915_perf_stream *stream = file->private_data;
3048 	struct i915_perf *perf = stream->perf;
3049 	size_t offset = 0;
3050 	int ret;
3051 
3052 	/* To ensure it's handled consistently we simply treat all reads of a
3053 	 * disabled stream as an error. In particular it might otherwise lead
3054 	 * to a deadlock for blocking file descriptors...
3055 	 */
3056 	if (!stream->enabled || !(stream->sample_flags & SAMPLE_OA_REPORT))
3057 		return -EIO;
3058 
3059 	if (!(file->f_flags & O_NONBLOCK)) {
3060 		/* There's the small chance of false positives from
3061 		 * stream->ops->wait_unlocked.
3062 		 *
3063 		 * E.g. with single context filtering since we only wait until
3064 		 * oabuffer has >= 1 report we don't immediately know whether
3065 		 * any reports really belong to the current context
3066 		 */
3067 		do {
3068 			ret = stream->ops->wait_unlocked(stream);
3069 			if (ret)
3070 				return ret;
3071 
3072 			mutex_lock(&perf->lock);
3073 			ret = stream->ops->read(stream, buf, count, &offset);
3074 			mutex_unlock(&perf->lock);
3075 		} while (!offset && !ret);
3076 	} else {
3077 		mutex_lock(&perf->lock);
3078 		ret = stream->ops->read(stream, buf, count, &offset);
3079 		mutex_unlock(&perf->lock);
3080 	}
3081 
3082 	/* We allow the poll checking to sometimes report false positive EPOLLIN
3083 	 * events where we might actually report EAGAIN on read() if there's
3084 	 * not really any data available. In this situation though we don't
3085 	 * want to enter a busy loop between poll() reporting a EPOLLIN event
3086 	 * and read() returning -EAGAIN. Clearing the oa.pollin state here
3087 	 * effectively ensures we back off until the next hrtimer callback
3088 	 * before reporting another EPOLLIN event.
3089 	 * The exception to this is if ops->read() returned -ENOSPC which means
3090 	 * that more OA data is available than could fit in the user provided
3091 	 * buffer. In this case we want the next poll() call to not block.
3092 	 */
3093 	if (ret != -ENOSPC)
3094 		stream->pollin = false;
3095 
3096 	/* Possible values for ret are 0, -EFAULT, -ENOSPC, -EIO, ... */
3097 	return offset ?: (ret ?: -EAGAIN);
3098 }
3099 
3100 static enum hrtimer_restart oa_poll_check_timer_cb(struct hrtimer *hrtimer)
3101 {
3102 	struct i915_perf_stream *stream =
3103 		container_of(hrtimer, typeof(*stream), poll_check_timer);
3104 
3105 	if (oa_buffer_check_unlocked(stream)) {
3106 		stream->pollin = true;
3107 		wake_up(&stream->poll_wq);
3108 	}
3109 
3110 	hrtimer_forward_now(hrtimer,
3111 			    ns_to_ktime(stream->poll_oa_period));
3112 
3113 	return HRTIMER_RESTART;
3114 }
3115 
3116 /**
3117  * i915_perf_poll_locked - poll_wait() with a suitable wait queue for stream
3118  * @stream: An i915 perf stream
3119  * @file: An i915 perf stream file
3120  * @wait: poll() state table
3121  *
3122  * For handling userspace polling on an i915 perf stream, this calls through to
3123  * &i915_perf_stream_ops->poll_wait to call poll_wait() with a wait queue that
3124  * will be woken for new stream data.
3125  *
3126  * Note: The &perf->lock mutex has been taken to serialize
3127  * with any non-file-operation driver hooks.
3128  *
3129  * Returns: any poll events that are ready without sleeping
3130  */
3131 static __poll_t i915_perf_poll_locked(struct i915_perf_stream *stream,
3132 				      struct file *file,
3133 				      poll_table *wait)
3134 {
3135 	__poll_t events = 0;
3136 
3137 	stream->ops->poll_wait(stream, file, wait);
3138 
3139 	/* Note: we don't explicitly check whether there's something to read
3140 	 * here since this path may be very hot depending on what else
3141 	 * userspace is polling, or on the timeout in use. We rely solely on
3142 	 * the hrtimer/oa_poll_check_timer_cb to notify us when there are
3143 	 * samples to read.
3144 	 */
3145 	if (stream->pollin)
3146 		events |= EPOLLIN;
3147 
3148 	return events;
3149 }
3150 
3151 /**
3152  * i915_perf_poll - call poll_wait() with a suitable wait queue for stream
3153  * @file: An i915 perf stream file
3154  * @wait: poll() state table
3155  *
3156  * For handling userspace polling on an i915 perf stream, this ensures
3157  * poll_wait() gets called with a wait queue that will be woken for new stream
3158  * data.
3159  *
3160  * Note: Implementation deferred to i915_perf_poll_locked()
3161  *
3162  * Returns: any poll events that are ready without sleeping
3163  */
3164 static __poll_t i915_perf_poll(struct file *file, poll_table *wait)
3165 {
3166 	struct i915_perf_stream *stream = file->private_data;
3167 	struct i915_perf *perf = stream->perf;
3168 	__poll_t ret;
3169 
3170 	mutex_lock(&perf->lock);
3171 	ret = i915_perf_poll_locked(stream, file, wait);
3172 	mutex_unlock(&perf->lock);
3173 
3174 	return ret;
3175 }
3176 
3177 /**
3178  * i915_perf_enable_locked - handle `I915_PERF_IOCTL_ENABLE` ioctl
3179  * @stream: A disabled i915 perf stream
3180  *
3181  * [Re]enables the associated capture of data for this stream.
3182  *
3183  * If a stream was previously enabled then there's currently no intention
3184  * to provide userspace any guarantee about the preservation of previously
3185  * buffered data.
3186  */
3187 static void i915_perf_enable_locked(struct i915_perf_stream *stream)
3188 {
3189 	if (stream->enabled)
3190 		return;
3191 
3192 	/* Allow stream->ops->enable() to refer to this */
3193 	stream->enabled = true;
3194 
3195 	if (stream->ops->enable)
3196 		stream->ops->enable(stream);
3197 
3198 	if (stream->hold_preemption)
3199 		intel_context_set_nopreempt(stream->pinned_ctx);
3200 }
3201 
3202 /**
3203  * i915_perf_disable_locked - handle `I915_PERF_IOCTL_DISABLE` ioctl
3204  * @stream: An enabled i915 perf stream
3205  *
3206  * Disables the associated capture of data for this stream.
3207  *
3208  * The intention is that disabling an re-enabling a stream will ideally be
3209  * cheaper than destroying and re-opening a stream with the same configuration,
3210  * though there are no formal guarantees about what state or buffered data
3211  * must be retained between disabling and re-enabling a stream.
3212  *
3213  * Note: while a stream is disabled it's considered an error for userspace
3214  * to attempt to read from the stream (-EIO).
3215  */
3216 static void i915_perf_disable_locked(struct i915_perf_stream *stream)
3217 {
3218 	if (!stream->enabled)
3219 		return;
3220 
3221 	/* Allow stream->ops->disable() to refer to this */
3222 	stream->enabled = false;
3223 
3224 	if (stream->hold_preemption)
3225 		intel_context_clear_nopreempt(stream->pinned_ctx);
3226 
3227 	if (stream->ops->disable)
3228 		stream->ops->disable(stream);
3229 }
3230 
3231 static long i915_perf_config_locked(struct i915_perf_stream *stream,
3232 				    unsigned long metrics_set)
3233 {
3234 	struct i915_oa_config *config;
3235 	long ret = stream->oa_config->id;
3236 
3237 	config = i915_perf_get_oa_config(stream->perf, metrics_set);
3238 	if (!config)
3239 		return -EINVAL;
3240 
3241 	if (config != stream->oa_config) {
3242 		int err;
3243 
3244 		/*
3245 		 * If OA is bound to a specific context, emit the
3246 		 * reconfiguration inline from that context. The update
3247 		 * will then be ordered with respect to submission on that
3248 		 * context.
3249 		 *
3250 		 * When set globally, we use a low priority kernel context,
3251 		 * so it will effectively take effect when idle.
3252 		 */
3253 		err = emit_oa_config(stream, config, oa_context(stream), NULL);
3254 		if (!err)
3255 			config = xchg(&stream->oa_config, config);
3256 		else
3257 			ret = err;
3258 	}
3259 
3260 	i915_oa_config_put(config);
3261 
3262 	return ret;
3263 }
3264 
3265 /**
3266  * i915_perf_ioctl_locked - support ioctl() usage with i915 perf stream FDs
3267  * @stream: An i915 perf stream
3268  * @cmd: the ioctl request
3269  * @arg: the ioctl data
3270  *
3271  * Note: The &perf->lock mutex has been taken to serialize
3272  * with any non-file-operation driver hooks.
3273  *
3274  * Returns: zero on success or a negative error code. Returns -EINVAL for
3275  * an unknown ioctl request.
3276  */
3277 static long i915_perf_ioctl_locked(struct i915_perf_stream *stream,
3278 				   unsigned int cmd,
3279 				   unsigned long arg)
3280 {
3281 	switch (cmd) {
3282 	case I915_PERF_IOCTL_ENABLE:
3283 		i915_perf_enable_locked(stream);
3284 		return 0;
3285 	case I915_PERF_IOCTL_DISABLE:
3286 		i915_perf_disable_locked(stream);
3287 		return 0;
3288 	case I915_PERF_IOCTL_CONFIG:
3289 		return i915_perf_config_locked(stream, arg);
3290 	}
3291 
3292 	return -EINVAL;
3293 }
3294 
3295 /**
3296  * i915_perf_ioctl - support ioctl() usage with i915 perf stream FDs
3297  * @file: An i915 perf stream file
3298  * @cmd: the ioctl request
3299  * @arg: the ioctl data
3300  *
3301  * Implementation deferred to i915_perf_ioctl_locked().
3302  *
3303  * Returns: zero on success or a negative error code. Returns -EINVAL for
3304  * an unknown ioctl request.
3305  */
3306 static long i915_perf_ioctl(struct file *file,
3307 			    unsigned int cmd,
3308 			    unsigned long arg)
3309 {
3310 	struct i915_perf_stream *stream = file->private_data;
3311 	struct i915_perf *perf = stream->perf;
3312 	long ret;
3313 
3314 	mutex_lock(&perf->lock);
3315 	ret = i915_perf_ioctl_locked(stream, cmd, arg);
3316 	mutex_unlock(&perf->lock);
3317 
3318 	return ret;
3319 }
3320 
3321 /**
3322  * i915_perf_destroy_locked - destroy an i915 perf stream
3323  * @stream: An i915 perf stream
3324  *
3325  * Frees all resources associated with the given i915 perf @stream, disabling
3326  * any associated data capture in the process.
3327  *
3328  * Note: The &perf->lock mutex has been taken to serialize
3329  * with any non-file-operation driver hooks.
3330  */
3331 static void i915_perf_destroy_locked(struct i915_perf_stream *stream)
3332 {
3333 	if (stream->enabled)
3334 		i915_perf_disable_locked(stream);
3335 
3336 	if (stream->ops->destroy)
3337 		stream->ops->destroy(stream);
3338 
3339 	if (stream->ctx)
3340 		i915_gem_context_put(stream->ctx);
3341 
3342 	kfree(stream);
3343 }
3344 
3345 /**
3346  * i915_perf_release - handles userspace close() of a stream file
3347  * @inode: anonymous inode associated with file
3348  * @file: An i915 perf stream file
3349  *
3350  * Cleans up any resources associated with an open i915 perf stream file.
3351  *
3352  * NB: close() can't really fail from the userspace point of view.
3353  *
3354  * Returns: zero on success or a negative error code.
3355  */
3356 static int i915_perf_release(struct inode *inode, struct file *file)
3357 {
3358 	struct i915_perf_stream *stream = file->private_data;
3359 	struct i915_perf *perf = stream->perf;
3360 
3361 	mutex_lock(&perf->lock);
3362 	i915_perf_destroy_locked(stream);
3363 	mutex_unlock(&perf->lock);
3364 
3365 	/* Release the reference the perf stream kept on the driver. */
3366 	drm_dev_put(&perf->i915->drm);
3367 
3368 	return 0;
3369 }
3370 
3371 
3372 static const struct file_operations fops = {
3373 	.owner		= THIS_MODULE,
3374 	.llseek		= no_llseek,
3375 	.release	= i915_perf_release,
3376 	.poll		= i915_perf_poll,
3377 	.read		= i915_perf_read,
3378 	.unlocked_ioctl	= i915_perf_ioctl,
3379 	/* Our ioctl have no arguments, so it's safe to use the same function
3380 	 * to handle 32bits compatibility.
3381 	 */
3382 	.compat_ioctl   = i915_perf_ioctl,
3383 };
3384 
3385 
3386 /**
3387  * i915_perf_open_ioctl_locked - DRM ioctl() for userspace to open a stream FD
3388  * @perf: i915 perf instance
3389  * @param: The open parameters passed to 'DRM_I915_PERF_OPEN`
3390  * @props: individually validated u64 property value pairs
3391  * @file: drm file
3392  *
3393  * See i915_perf_ioctl_open() for interface details.
3394  *
3395  * Implements further stream config validation and stream initialization on
3396  * behalf of i915_perf_open_ioctl() with the &perf->lock mutex
3397  * taken to serialize with any non-file-operation driver hooks.
3398  *
3399  * Note: at this point the @props have only been validated in isolation and
3400  * it's still necessary to validate that the combination of properties makes
3401  * sense.
3402  *
3403  * In the case where userspace is interested in OA unit metrics then further
3404  * config validation and stream initialization details will be handled by
3405  * i915_oa_stream_init(). The code here should only validate config state that
3406  * will be relevant to all stream types / backends.
3407  *
3408  * Returns: zero on success or a negative error code.
3409  */
3410 static int
3411 i915_perf_open_ioctl_locked(struct i915_perf *perf,
3412 			    struct drm_i915_perf_open_param *param,
3413 			    struct perf_open_properties *props,
3414 			    struct drm_file *file)
3415 {
3416 	struct i915_gem_context *specific_ctx = NULL;
3417 	struct i915_perf_stream *stream = NULL;
3418 	unsigned long f_flags = 0;
3419 	bool privileged_op = true;
3420 	int stream_fd;
3421 	int ret;
3422 
3423 	if (props->single_context) {
3424 		u32 ctx_handle = props->ctx_handle;
3425 		struct drm_i915_file_private *file_priv = file->driver_priv;
3426 
3427 		specific_ctx = i915_gem_context_lookup(file_priv, ctx_handle);
3428 		if (IS_ERR(specific_ctx)) {
3429 			DRM_DEBUG("Failed to look up context with ID %u for opening perf stream\n",
3430 				  ctx_handle);
3431 			ret = PTR_ERR(specific_ctx);
3432 			goto err;
3433 		}
3434 	}
3435 
3436 	/*
3437 	 * On Haswell the OA unit supports clock gating off for a specific
3438 	 * context and in this mode there's no visibility of metrics for the
3439 	 * rest of the system, which we consider acceptable for a
3440 	 * non-privileged client.
3441 	 *
3442 	 * For Gen8->11 the OA unit no longer supports clock gating off for a
3443 	 * specific context and the kernel can't securely stop the counters
3444 	 * from updating as system-wide / global values. Even though we can
3445 	 * filter reports based on the included context ID we can't block
3446 	 * clients from seeing the raw / global counter values via
3447 	 * MI_REPORT_PERF_COUNT commands and so consider it a privileged op to
3448 	 * enable the OA unit by default.
3449 	 *
3450 	 * For Gen12+ we gain a new OAR unit that only monitors the RCS on a
3451 	 * per context basis. So we can relax requirements there if the user
3452 	 * doesn't request global stream access (i.e. query based sampling
3453 	 * using MI_RECORD_PERF_COUNT.
3454 	 */
3455 	if (IS_HASWELL(perf->i915) && specific_ctx)
3456 		privileged_op = false;
3457 	else if (GRAPHICS_VER(perf->i915) == 12 && specific_ctx &&
3458 		 (props->sample_flags & SAMPLE_OA_REPORT) == 0)
3459 		privileged_op = false;
3460 
3461 	if (props->hold_preemption) {
3462 		if (!props->single_context) {
3463 			DRM_DEBUG("preemption disable with no context\n");
3464 			ret = -EINVAL;
3465 			goto err;
3466 		}
3467 		privileged_op = true;
3468 	}
3469 
3470 	/*
3471 	 * Asking for SSEU configuration is a priviliged operation.
3472 	 */
3473 	if (props->has_sseu)
3474 		privileged_op = true;
3475 	else
3476 		get_default_sseu_config(&props->sseu, props->engine);
3477 
3478 	/* Similar to perf's kernel.perf_paranoid_cpu sysctl option
3479 	 * we check a dev.i915.perf_stream_paranoid sysctl option
3480 	 * to determine if it's ok to access system wide OA counters
3481 	 * without CAP_PERFMON or CAP_SYS_ADMIN privileges.
3482 	 */
3483 	if (privileged_op &&
3484 	    i915_perf_stream_paranoid && !perfmon_capable()) {
3485 		DRM_DEBUG("Insufficient privileges to open i915 perf stream\n");
3486 		ret = -EACCES;
3487 		goto err_ctx;
3488 	}
3489 
3490 	stream = kzalloc(sizeof(*stream), GFP_KERNEL);
3491 	if (!stream) {
3492 		ret = -ENOMEM;
3493 		goto err_ctx;
3494 	}
3495 
3496 	stream->perf = perf;
3497 	stream->ctx = specific_ctx;
3498 	stream->poll_oa_period = props->poll_oa_period;
3499 
3500 	ret = i915_oa_stream_init(stream, param, props);
3501 	if (ret)
3502 		goto err_alloc;
3503 
3504 	/* we avoid simply assigning stream->sample_flags = props->sample_flags
3505 	 * to have _stream_init check the combination of sample flags more
3506 	 * thoroughly, but still this is the expected result at this point.
3507 	 */
3508 	if (WARN_ON(stream->sample_flags != props->sample_flags)) {
3509 		ret = -ENODEV;
3510 		goto err_flags;
3511 	}
3512 
3513 	if (param->flags & I915_PERF_FLAG_FD_CLOEXEC)
3514 		f_flags |= O_CLOEXEC;
3515 	if (param->flags & I915_PERF_FLAG_FD_NONBLOCK)
3516 		f_flags |= O_NONBLOCK;
3517 
3518 	stream_fd = anon_inode_getfd("[i915_perf]", &fops, stream, f_flags);
3519 	if (stream_fd < 0) {
3520 		ret = stream_fd;
3521 		goto err_flags;
3522 	}
3523 
3524 	if (!(param->flags & I915_PERF_FLAG_DISABLED))
3525 		i915_perf_enable_locked(stream);
3526 
3527 	/* Take a reference on the driver that will be kept with stream_fd
3528 	 * until its release.
3529 	 */
3530 	drm_dev_get(&perf->i915->drm);
3531 
3532 	return stream_fd;
3533 
3534 err_flags:
3535 	if (stream->ops->destroy)
3536 		stream->ops->destroy(stream);
3537 err_alloc:
3538 	kfree(stream);
3539 err_ctx:
3540 	if (specific_ctx)
3541 		i915_gem_context_put(specific_ctx);
3542 err:
3543 	return ret;
3544 }
3545 
3546 static u64 oa_exponent_to_ns(struct i915_perf *perf, int exponent)
3547 {
3548 	return intel_gt_clock_interval_to_ns(perf->i915->ggtt.vm.gt,
3549 					     2ULL << exponent);
3550 }
3551 
3552 static __always_inline bool
3553 oa_format_valid(struct i915_perf *perf, enum drm_i915_oa_format format)
3554 {
3555 	return test_bit(format, perf->format_mask);
3556 }
3557 
3558 static __always_inline void
3559 oa_format_add(struct i915_perf *perf, enum drm_i915_oa_format format)
3560 {
3561 	__set_bit(format, perf->format_mask);
3562 }
3563 
3564 /**
3565  * read_properties_unlocked - validate + copy userspace stream open properties
3566  * @perf: i915 perf instance
3567  * @uprops: The array of u64 key value pairs given by userspace
3568  * @n_props: The number of key value pairs expected in @uprops
3569  * @props: The stream configuration built up while validating properties
3570  *
3571  * Note this function only validates properties in isolation it doesn't
3572  * validate that the combination of properties makes sense or that all
3573  * properties necessary for a particular kind of stream have been set.
3574  *
3575  * Note that there currently aren't any ordering requirements for properties so
3576  * we shouldn't validate or assume anything about ordering here. This doesn't
3577  * rule out defining new properties with ordering requirements in the future.
3578  */
3579 static int read_properties_unlocked(struct i915_perf *perf,
3580 				    u64 __user *uprops,
3581 				    u32 n_props,
3582 				    struct perf_open_properties *props)
3583 {
3584 	u64 __user *uprop = uprops;
3585 	u32 i;
3586 	int ret;
3587 
3588 	memset(props, 0, sizeof(struct perf_open_properties));
3589 	props->poll_oa_period = DEFAULT_POLL_PERIOD_NS;
3590 
3591 	if (!n_props) {
3592 		DRM_DEBUG("No i915 perf properties given\n");
3593 		return -EINVAL;
3594 	}
3595 
3596 	/* At the moment we only support using i915-perf on the RCS. */
3597 	props->engine = intel_engine_lookup_user(perf->i915,
3598 						 I915_ENGINE_CLASS_RENDER,
3599 						 0);
3600 	if (!props->engine) {
3601 		DRM_DEBUG("No RENDER-capable engines\n");
3602 		return -EINVAL;
3603 	}
3604 
3605 	/* Considering that ID = 0 is reserved and assuming that we don't
3606 	 * (currently) expect any configurations to ever specify duplicate
3607 	 * values for a particular property ID then the last _PROP_MAX value is
3608 	 * one greater than the maximum number of properties we expect to get
3609 	 * from userspace.
3610 	 */
3611 	if (n_props >= DRM_I915_PERF_PROP_MAX) {
3612 		DRM_DEBUG("More i915 perf properties specified than exist\n");
3613 		return -EINVAL;
3614 	}
3615 
3616 	for (i = 0; i < n_props; i++) {
3617 		u64 oa_period, oa_freq_hz;
3618 		u64 id, value;
3619 
3620 		ret = get_user(id, uprop);
3621 		if (ret)
3622 			return ret;
3623 
3624 		ret = get_user(value, uprop + 1);
3625 		if (ret)
3626 			return ret;
3627 
3628 		if (id == 0 || id >= DRM_I915_PERF_PROP_MAX) {
3629 			DRM_DEBUG("Unknown i915 perf property ID\n");
3630 			return -EINVAL;
3631 		}
3632 
3633 		switch ((enum drm_i915_perf_property_id)id) {
3634 		case DRM_I915_PERF_PROP_CTX_HANDLE:
3635 			props->single_context = 1;
3636 			props->ctx_handle = value;
3637 			break;
3638 		case DRM_I915_PERF_PROP_SAMPLE_OA:
3639 			if (value)
3640 				props->sample_flags |= SAMPLE_OA_REPORT;
3641 			break;
3642 		case DRM_I915_PERF_PROP_OA_METRICS_SET:
3643 			if (value == 0) {
3644 				DRM_DEBUG("Unknown OA metric set ID\n");
3645 				return -EINVAL;
3646 			}
3647 			props->metrics_set = value;
3648 			break;
3649 		case DRM_I915_PERF_PROP_OA_FORMAT:
3650 			if (value == 0 || value >= I915_OA_FORMAT_MAX) {
3651 				DRM_DEBUG("Out-of-range OA report format %llu\n",
3652 					  value);
3653 				return -EINVAL;
3654 			}
3655 			if (!oa_format_valid(perf, value)) {
3656 				DRM_DEBUG("Unsupported OA report format %llu\n",
3657 					  value);
3658 				return -EINVAL;
3659 			}
3660 			props->oa_format = value;
3661 			break;
3662 		case DRM_I915_PERF_PROP_OA_EXPONENT:
3663 			if (value > OA_EXPONENT_MAX) {
3664 				DRM_DEBUG("OA timer exponent too high (> %u)\n",
3665 					 OA_EXPONENT_MAX);
3666 				return -EINVAL;
3667 			}
3668 
3669 			/* Theoretically we can program the OA unit to sample
3670 			 * e.g. every 160ns for HSW, 167ns for BDW/SKL or 104ns
3671 			 * for BXT. We don't allow such high sampling
3672 			 * frequencies by default unless root.
3673 			 */
3674 
3675 			BUILD_BUG_ON(sizeof(oa_period) != 8);
3676 			oa_period = oa_exponent_to_ns(perf, value);
3677 
3678 			/* This check is primarily to ensure that oa_period <=
3679 			 * UINT32_MAX (before passing to do_div which only
3680 			 * accepts a u32 denominator), but we can also skip
3681 			 * checking anything < 1Hz which implicitly can't be
3682 			 * limited via an integer oa_max_sample_rate.
3683 			 */
3684 			if (oa_period <= NSEC_PER_SEC) {
3685 				u64 tmp = NSEC_PER_SEC;
3686 				do_div(tmp, oa_period);
3687 				oa_freq_hz = tmp;
3688 			} else
3689 				oa_freq_hz = 0;
3690 
3691 			if (oa_freq_hz > i915_oa_max_sample_rate && !perfmon_capable()) {
3692 				DRM_DEBUG("OA exponent would exceed the max sampling frequency (sysctl dev.i915.oa_max_sample_rate) %uHz without CAP_PERFMON or CAP_SYS_ADMIN privileges\n",
3693 					  i915_oa_max_sample_rate);
3694 				return -EACCES;
3695 			}
3696 
3697 			props->oa_periodic = true;
3698 			props->oa_period_exponent = value;
3699 			break;
3700 		case DRM_I915_PERF_PROP_HOLD_PREEMPTION:
3701 			props->hold_preemption = !!value;
3702 			break;
3703 		case DRM_I915_PERF_PROP_GLOBAL_SSEU: {
3704 			struct drm_i915_gem_context_param_sseu user_sseu;
3705 
3706 			if (copy_from_user(&user_sseu,
3707 					   u64_to_user_ptr(value),
3708 					   sizeof(user_sseu))) {
3709 				DRM_DEBUG("Unable to copy global sseu parameter\n");
3710 				return -EFAULT;
3711 			}
3712 
3713 			ret = get_sseu_config(&props->sseu, props->engine, &user_sseu);
3714 			if (ret) {
3715 				DRM_DEBUG("Invalid SSEU configuration\n");
3716 				return ret;
3717 			}
3718 			props->has_sseu = true;
3719 			break;
3720 		}
3721 		case DRM_I915_PERF_PROP_POLL_OA_PERIOD:
3722 			if (value < 100000 /* 100us */) {
3723 				DRM_DEBUG("OA availability timer too small (%lluns < 100us)\n",
3724 					  value);
3725 				return -EINVAL;
3726 			}
3727 			props->poll_oa_period = value;
3728 			break;
3729 		case DRM_I915_PERF_PROP_MAX:
3730 			MISSING_CASE(id);
3731 			return -EINVAL;
3732 		}
3733 
3734 		uprop += 2;
3735 	}
3736 
3737 	return 0;
3738 }
3739 
3740 /**
3741  * i915_perf_open_ioctl - DRM ioctl() for userspace to open a stream FD
3742  * @dev: drm device
3743  * @data: ioctl data copied from userspace (unvalidated)
3744  * @file: drm file
3745  *
3746  * Validates the stream open parameters given by userspace including flags
3747  * and an array of u64 key, value pair properties.
3748  *
3749  * Very little is assumed up front about the nature of the stream being
3750  * opened (for instance we don't assume it's for periodic OA unit metrics). An
3751  * i915-perf stream is expected to be a suitable interface for other forms of
3752  * buffered data written by the GPU besides periodic OA metrics.
3753  *
3754  * Note we copy the properties from userspace outside of the i915 perf
3755  * mutex to avoid an awkward lockdep with mmap_lock.
3756  *
3757  * Most of the implementation details are handled by
3758  * i915_perf_open_ioctl_locked() after taking the &perf->lock
3759  * mutex for serializing with any non-file-operation driver hooks.
3760  *
3761  * Return: A newly opened i915 Perf stream file descriptor or negative
3762  * error code on failure.
3763  */
3764 int i915_perf_open_ioctl(struct drm_device *dev, void *data,
3765 			 struct drm_file *file)
3766 {
3767 	struct i915_perf *perf = &to_i915(dev)->perf;
3768 	struct drm_i915_perf_open_param *param = data;
3769 	struct perf_open_properties props;
3770 	u32 known_open_flags;
3771 	int ret;
3772 
3773 	if (!perf->i915) {
3774 		DRM_DEBUG("i915 perf interface not available for this system\n");
3775 		return -ENOTSUPP;
3776 	}
3777 
3778 	known_open_flags = I915_PERF_FLAG_FD_CLOEXEC |
3779 			   I915_PERF_FLAG_FD_NONBLOCK |
3780 			   I915_PERF_FLAG_DISABLED;
3781 	if (param->flags & ~known_open_flags) {
3782 		DRM_DEBUG("Unknown drm_i915_perf_open_param flag\n");
3783 		return -EINVAL;
3784 	}
3785 
3786 	ret = read_properties_unlocked(perf,
3787 				       u64_to_user_ptr(param->properties_ptr),
3788 				       param->num_properties,
3789 				       &props);
3790 	if (ret)
3791 		return ret;
3792 
3793 	mutex_lock(&perf->lock);
3794 	ret = i915_perf_open_ioctl_locked(perf, param, &props, file);
3795 	mutex_unlock(&perf->lock);
3796 
3797 	return ret;
3798 }
3799 
3800 /**
3801  * i915_perf_register - exposes i915-perf to userspace
3802  * @i915: i915 device instance
3803  *
3804  * In particular OA metric sets are advertised under a sysfs metrics/
3805  * directory allowing userspace to enumerate valid IDs that can be
3806  * used to open an i915-perf stream.
3807  */
3808 void i915_perf_register(struct drm_i915_private *i915)
3809 {
3810 	struct i915_perf *perf = &i915->perf;
3811 
3812 	if (!perf->i915)
3813 		return;
3814 
3815 	/* To be sure we're synchronized with an attempted
3816 	 * i915_perf_open_ioctl(); considering that we register after
3817 	 * being exposed to userspace.
3818 	 */
3819 	mutex_lock(&perf->lock);
3820 
3821 	perf->metrics_kobj =
3822 		kobject_create_and_add("metrics",
3823 				       &i915->drm.primary->kdev->kobj);
3824 
3825 	mutex_unlock(&perf->lock);
3826 }
3827 
3828 /**
3829  * i915_perf_unregister - hide i915-perf from userspace
3830  * @i915: i915 device instance
3831  *
3832  * i915-perf state cleanup is split up into an 'unregister' and
3833  * 'deinit' phase where the interface is first hidden from
3834  * userspace by i915_perf_unregister() before cleaning up
3835  * remaining state in i915_perf_fini().
3836  */
3837 void i915_perf_unregister(struct drm_i915_private *i915)
3838 {
3839 	struct i915_perf *perf = &i915->perf;
3840 
3841 	if (!perf->metrics_kobj)
3842 		return;
3843 
3844 	kobject_put(perf->metrics_kobj);
3845 	perf->metrics_kobj = NULL;
3846 }
3847 
3848 static bool gen8_is_valid_flex_addr(struct i915_perf *perf, u32 addr)
3849 {
3850 	static const i915_reg_t flex_eu_regs[] = {
3851 		EU_PERF_CNTL0,
3852 		EU_PERF_CNTL1,
3853 		EU_PERF_CNTL2,
3854 		EU_PERF_CNTL3,
3855 		EU_PERF_CNTL4,
3856 		EU_PERF_CNTL5,
3857 		EU_PERF_CNTL6,
3858 	};
3859 	int i;
3860 
3861 	for (i = 0; i < ARRAY_SIZE(flex_eu_regs); i++) {
3862 		if (i915_mmio_reg_offset(flex_eu_regs[i]) == addr)
3863 			return true;
3864 	}
3865 	return false;
3866 }
3867 
3868 static bool reg_in_range_table(u32 addr, const struct i915_range *table)
3869 {
3870 	while (table->start || table->end) {
3871 		if (addr >= table->start && addr <= table->end)
3872 			return true;
3873 
3874 		table++;
3875 	}
3876 
3877 	return false;
3878 }
3879 
3880 #define REG_EQUAL(addr, mmio) \
3881 	((addr) == i915_mmio_reg_offset(mmio))
3882 
3883 static const struct i915_range gen7_oa_b_counters[] = {
3884 	{ .start = 0x2710, .end = 0x272c },	/* OASTARTTRIG[1-8] */
3885 	{ .start = 0x2740, .end = 0x275c },	/* OAREPORTTRIG[1-8] */
3886 	{ .start = 0x2770, .end = 0x27ac },	/* OACEC[0-7][0-1] */
3887 	{}
3888 };
3889 
3890 static const struct i915_range gen12_oa_b_counters[] = {
3891 	{ .start = 0x2b2c, .end = 0x2b2c },	/* GEN12_OAG_OA_PESS */
3892 	{ .start = 0xd900, .end = 0xd91c },	/* GEN12_OAG_OASTARTTRIG[1-8] */
3893 	{ .start = 0xd920, .end = 0xd93c },	/* GEN12_OAG_OAREPORTTRIG1[1-8] */
3894 	{ .start = 0xd940, .end = 0xd97c },	/* GEN12_OAG_CEC[0-7][0-1] */
3895 	{ .start = 0xdc00, .end = 0xdc3c },	/* GEN12_OAG_SCEC[0-7][0-1] */
3896 	{ .start = 0xdc40, .end = 0xdc40 },	/* GEN12_OAG_SPCTR_CNF */
3897 	{ .start = 0xdc44, .end = 0xdc44 },	/* GEN12_OAA_DBG_REG */
3898 	{}
3899 };
3900 
3901 static const struct i915_range gen7_oa_mux_regs[] = {
3902 	{ .start = 0x91b8, .end = 0x91cc },	/* OA_PERFCNT[1-2], OA_PERFMATRIX */
3903 	{ .start = 0x9800, .end = 0x9888 },	/* MICRO_BP0_0 - NOA_WRITE */
3904 	{ .start = 0xe180, .end = 0xe180 },	/* HALF_SLICE_CHICKEN2 */
3905 	{}
3906 };
3907 
3908 static const struct i915_range hsw_oa_mux_regs[] = {
3909 	{ .start = 0x09e80, .end = 0x09ea4 }, /* HSW_MBVID2_NOA[0-9] */
3910 	{ .start = 0x09ec0, .end = 0x09ec0 }, /* HSW_MBVID2_MISR0 */
3911 	{ .start = 0x25100, .end = 0x2ff90 },
3912 	{}
3913 };
3914 
3915 static const struct i915_range chv_oa_mux_regs[] = {
3916 	{ .start = 0x182300, .end = 0x1823a4 },
3917 	{}
3918 };
3919 
3920 static const struct i915_range gen8_oa_mux_regs[] = {
3921 	{ .start = 0x0d00, .end = 0x0d2c },	/* RPM_CONFIG[0-1], NOA_CONFIG[0-8] */
3922 	{ .start = 0x20cc, .end = 0x20cc },	/* WAIT_FOR_RC6_EXIT */
3923 	{}
3924 };
3925 
3926 static const struct i915_range gen11_oa_mux_regs[] = {
3927 	{ .start = 0x91c8, .end = 0x91dc },	/* OA_PERFCNT[3-4] */
3928 	{}
3929 };
3930 
3931 static const struct i915_range gen12_oa_mux_regs[] = {
3932 	{ .start = 0x0d00, .end = 0x0d04 },     /* RPM_CONFIG[0-1] */
3933 	{ .start = 0x0d0c, .end = 0x0d2c },     /* NOA_CONFIG[0-8] */
3934 	{ .start = 0x9840, .end = 0x9840 },	/* GDT_CHICKEN_BITS */
3935 	{ .start = 0x9884, .end = 0x9888 },	/* NOA_WRITE */
3936 	{ .start = 0x20cc, .end = 0x20cc },	/* WAIT_FOR_RC6_EXIT */
3937 	{}
3938 };
3939 
3940 static bool gen7_is_valid_b_counter_addr(struct i915_perf *perf, u32 addr)
3941 {
3942 	return reg_in_range_table(addr, gen7_oa_b_counters);
3943 }
3944 
3945 static bool gen8_is_valid_mux_addr(struct i915_perf *perf, u32 addr)
3946 {
3947 	return reg_in_range_table(addr, gen7_oa_mux_regs) ||
3948 		reg_in_range_table(addr, gen8_oa_mux_regs);
3949 }
3950 
3951 static bool gen11_is_valid_mux_addr(struct i915_perf *perf, u32 addr)
3952 {
3953 	return reg_in_range_table(addr, gen7_oa_mux_regs) ||
3954 		reg_in_range_table(addr, gen8_oa_mux_regs) ||
3955 		reg_in_range_table(addr, gen11_oa_mux_regs);
3956 }
3957 
3958 static bool hsw_is_valid_mux_addr(struct i915_perf *perf, u32 addr)
3959 {
3960 	return reg_in_range_table(addr, gen7_oa_mux_regs) ||
3961 		reg_in_range_table(addr, hsw_oa_mux_regs);
3962 }
3963 
3964 static bool chv_is_valid_mux_addr(struct i915_perf *perf, u32 addr)
3965 {
3966 	return reg_in_range_table(addr, gen7_oa_mux_regs) ||
3967 		reg_in_range_table(addr, chv_oa_mux_regs);
3968 }
3969 
3970 static bool gen12_is_valid_b_counter_addr(struct i915_perf *perf, u32 addr)
3971 {
3972 	return reg_in_range_table(addr, gen12_oa_b_counters);
3973 }
3974 
3975 static bool gen12_is_valid_mux_addr(struct i915_perf *perf, u32 addr)
3976 {
3977 	return reg_in_range_table(addr, gen12_oa_mux_regs);
3978 }
3979 
3980 static u32 mask_reg_value(u32 reg, u32 val)
3981 {
3982 	/* HALF_SLICE_CHICKEN2 is programmed with a the
3983 	 * WaDisableSTUnitPowerOptimization workaround. Make sure the value
3984 	 * programmed by userspace doesn't change this.
3985 	 */
3986 	if (REG_EQUAL(reg, HALF_SLICE_CHICKEN2))
3987 		val = val & ~_MASKED_BIT_ENABLE(GEN8_ST_PO_DISABLE);
3988 
3989 	/* WAIT_FOR_RC6_EXIT has only one bit fullfilling the function
3990 	 * indicated by its name and a bunch of selection fields used by OA
3991 	 * configs.
3992 	 */
3993 	if (REG_EQUAL(reg, WAIT_FOR_RC6_EXIT))
3994 		val = val & ~_MASKED_BIT_ENABLE(HSW_WAIT_FOR_RC6_EXIT_ENABLE);
3995 
3996 	return val;
3997 }
3998 
3999 static struct i915_oa_reg *alloc_oa_regs(struct i915_perf *perf,
4000 					 bool (*is_valid)(struct i915_perf *perf, u32 addr),
4001 					 u32 __user *regs,
4002 					 u32 n_regs)
4003 {
4004 	struct i915_oa_reg *oa_regs;
4005 	int err;
4006 	u32 i;
4007 
4008 	if (!n_regs)
4009 		return NULL;
4010 
4011 	/* No is_valid function means we're not allowing any register to be programmed. */
4012 	GEM_BUG_ON(!is_valid);
4013 	if (!is_valid)
4014 		return ERR_PTR(-EINVAL);
4015 
4016 	oa_regs = kmalloc_array(n_regs, sizeof(*oa_regs), GFP_KERNEL);
4017 	if (!oa_regs)
4018 		return ERR_PTR(-ENOMEM);
4019 
4020 	for (i = 0; i < n_regs; i++) {
4021 		u32 addr, value;
4022 
4023 		err = get_user(addr, regs);
4024 		if (err)
4025 			goto addr_err;
4026 
4027 		if (!is_valid(perf, addr)) {
4028 			DRM_DEBUG("Invalid oa_reg address: %X\n", addr);
4029 			err = -EINVAL;
4030 			goto addr_err;
4031 		}
4032 
4033 		err = get_user(value, regs + 1);
4034 		if (err)
4035 			goto addr_err;
4036 
4037 		oa_regs[i].addr = _MMIO(addr);
4038 		oa_regs[i].value = mask_reg_value(addr, value);
4039 
4040 		regs += 2;
4041 	}
4042 
4043 	return oa_regs;
4044 
4045 addr_err:
4046 	kfree(oa_regs);
4047 	return ERR_PTR(err);
4048 }
4049 
4050 static ssize_t show_dynamic_id(struct device *dev,
4051 			       struct device_attribute *attr,
4052 			       char *buf)
4053 {
4054 	struct i915_oa_config *oa_config =
4055 		container_of(attr, typeof(*oa_config), sysfs_metric_id);
4056 
4057 	return sprintf(buf, "%d\n", oa_config->id);
4058 }
4059 
4060 static int create_dynamic_oa_sysfs_entry(struct i915_perf *perf,
4061 					 struct i915_oa_config *oa_config)
4062 {
4063 	sysfs_attr_init(&oa_config->sysfs_metric_id.attr);
4064 	oa_config->sysfs_metric_id.attr.name = "id";
4065 	oa_config->sysfs_metric_id.attr.mode = S_IRUGO;
4066 	oa_config->sysfs_metric_id.show = show_dynamic_id;
4067 	oa_config->sysfs_metric_id.store = NULL;
4068 
4069 	oa_config->attrs[0] = &oa_config->sysfs_metric_id.attr;
4070 	oa_config->attrs[1] = NULL;
4071 
4072 	oa_config->sysfs_metric.name = oa_config->uuid;
4073 	oa_config->sysfs_metric.attrs = oa_config->attrs;
4074 
4075 	return sysfs_create_group(perf->metrics_kobj,
4076 				  &oa_config->sysfs_metric);
4077 }
4078 
4079 /**
4080  * i915_perf_add_config_ioctl - DRM ioctl() for userspace to add a new OA config
4081  * @dev: drm device
4082  * @data: ioctl data (pointer to struct drm_i915_perf_oa_config) copied from
4083  *        userspace (unvalidated)
4084  * @file: drm file
4085  *
4086  * Validates the submitted OA register to be saved into a new OA config that
4087  * can then be used for programming the OA unit and its NOA network.
4088  *
4089  * Returns: A new allocated config number to be used with the perf open ioctl
4090  * or a negative error code on failure.
4091  */
4092 int i915_perf_add_config_ioctl(struct drm_device *dev, void *data,
4093 			       struct drm_file *file)
4094 {
4095 	struct i915_perf *perf = &to_i915(dev)->perf;
4096 	struct drm_i915_perf_oa_config *args = data;
4097 	struct i915_oa_config *oa_config, *tmp;
4098 	struct i915_oa_reg *regs;
4099 	int err, id;
4100 
4101 	if (!perf->i915) {
4102 		DRM_DEBUG("i915 perf interface not available for this system\n");
4103 		return -ENOTSUPP;
4104 	}
4105 
4106 	if (!perf->metrics_kobj) {
4107 		DRM_DEBUG("OA metrics weren't advertised via sysfs\n");
4108 		return -EINVAL;
4109 	}
4110 
4111 	if (i915_perf_stream_paranoid && !perfmon_capable()) {
4112 		DRM_DEBUG("Insufficient privileges to add i915 OA config\n");
4113 		return -EACCES;
4114 	}
4115 
4116 	if ((!args->mux_regs_ptr || !args->n_mux_regs) &&
4117 	    (!args->boolean_regs_ptr || !args->n_boolean_regs) &&
4118 	    (!args->flex_regs_ptr || !args->n_flex_regs)) {
4119 		DRM_DEBUG("No OA registers given\n");
4120 		return -EINVAL;
4121 	}
4122 
4123 	oa_config = kzalloc(sizeof(*oa_config), GFP_KERNEL);
4124 	if (!oa_config) {
4125 		DRM_DEBUG("Failed to allocate memory for the OA config\n");
4126 		return -ENOMEM;
4127 	}
4128 
4129 	oa_config->perf = perf;
4130 	kref_init(&oa_config->ref);
4131 
4132 	if (!uuid_is_valid(args->uuid)) {
4133 		DRM_DEBUG("Invalid uuid format for OA config\n");
4134 		err = -EINVAL;
4135 		goto reg_err;
4136 	}
4137 
4138 	/* Last character in oa_config->uuid will be 0 because oa_config is
4139 	 * kzalloc.
4140 	 */
4141 	memcpy(oa_config->uuid, args->uuid, sizeof(args->uuid));
4142 
4143 	oa_config->mux_regs_len = args->n_mux_regs;
4144 	regs = alloc_oa_regs(perf,
4145 			     perf->ops.is_valid_mux_reg,
4146 			     u64_to_user_ptr(args->mux_regs_ptr),
4147 			     args->n_mux_regs);
4148 
4149 	if (IS_ERR(regs)) {
4150 		DRM_DEBUG("Failed to create OA config for mux_regs\n");
4151 		err = PTR_ERR(regs);
4152 		goto reg_err;
4153 	}
4154 	oa_config->mux_regs = regs;
4155 
4156 	oa_config->b_counter_regs_len = args->n_boolean_regs;
4157 	regs = alloc_oa_regs(perf,
4158 			     perf->ops.is_valid_b_counter_reg,
4159 			     u64_to_user_ptr(args->boolean_regs_ptr),
4160 			     args->n_boolean_regs);
4161 
4162 	if (IS_ERR(regs)) {
4163 		DRM_DEBUG("Failed to create OA config for b_counter_regs\n");
4164 		err = PTR_ERR(regs);
4165 		goto reg_err;
4166 	}
4167 	oa_config->b_counter_regs = regs;
4168 
4169 	if (GRAPHICS_VER(perf->i915) < 8) {
4170 		if (args->n_flex_regs != 0) {
4171 			err = -EINVAL;
4172 			goto reg_err;
4173 		}
4174 	} else {
4175 		oa_config->flex_regs_len = args->n_flex_regs;
4176 		regs = alloc_oa_regs(perf,
4177 				     perf->ops.is_valid_flex_reg,
4178 				     u64_to_user_ptr(args->flex_regs_ptr),
4179 				     args->n_flex_regs);
4180 
4181 		if (IS_ERR(regs)) {
4182 			DRM_DEBUG("Failed to create OA config for flex_regs\n");
4183 			err = PTR_ERR(regs);
4184 			goto reg_err;
4185 		}
4186 		oa_config->flex_regs = regs;
4187 	}
4188 
4189 	err = mutex_lock_interruptible(&perf->metrics_lock);
4190 	if (err)
4191 		goto reg_err;
4192 
4193 	/* We shouldn't have too many configs, so this iteration shouldn't be
4194 	 * too costly.
4195 	 */
4196 	idr_for_each_entry(&perf->metrics_idr, tmp, id) {
4197 		if (!strcmp(tmp->uuid, oa_config->uuid)) {
4198 			DRM_DEBUG("OA config already exists with this uuid\n");
4199 			err = -EADDRINUSE;
4200 			goto sysfs_err;
4201 		}
4202 	}
4203 
4204 	err = create_dynamic_oa_sysfs_entry(perf, oa_config);
4205 	if (err) {
4206 		DRM_DEBUG("Failed to create sysfs entry for OA config\n");
4207 		goto sysfs_err;
4208 	}
4209 
4210 	/* Config id 0 is invalid, id 1 for kernel stored test config. */
4211 	oa_config->id = idr_alloc(&perf->metrics_idr,
4212 				  oa_config, 2,
4213 				  0, GFP_KERNEL);
4214 	if (oa_config->id < 0) {
4215 		DRM_DEBUG("Failed to create sysfs entry for OA config\n");
4216 		err = oa_config->id;
4217 		goto sysfs_err;
4218 	}
4219 
4220 	mutex_unlock(&perf->metrics_lock);
4221 
4222 	DRM_DEBUG("Added config %s id=%i\n", oa_config->uuid, oa_config->id);
4223 
4224 	return oa_config->id;
4225 
4226 sysfs_err:
4227 	mutex_unlock(&perf->metrics_lock);
4228 reg_err:
4229 	i915_oa_config_put(oa_config);
4230 	DRM_DEBUG("Failed to add new OA config\n");
4231 	return err;
4232 }
4233 
4234 /**
4235  * i915_perf_remove_config_ioctl - DRM ioctl() for userspace to remove an OA config
4236  * @dev: drm device
4237  * @data: ioctl data (pointer to u64 integer) copied from userspace
4238  * @file: drm file
4239  *
4240  * Configs can be removed while being used, the will stop appearing in sysfs
4241  * and their content will be freed when the stream using the config is closed.
4242  *
4243  * Returns: 0 on success or a negative error code on failure.
4244  */
4245 int i915_perf_remove_config_ioctl(struct drm_device *dev, void *data,
4246 				  struct drm_file *file)
4247 {
4248 	struct i915_perf *perf = &to_i915(dev)->perf;
4249 	u64 *arg = data;
4250 	struct i915_oa_config *oa_config;
4251 	int ret;
4252 
4253 	if (!perf->i915) {
4254 		DRM_DEBUG("i915 perf interface not available for this system\n");
4255 		return -ENOTSUPP;
4256 	}
4257 
4258 	if (i915_perf_stream_paranoid && !perfmon_capable()) {
4259 		DRM_DEBUG("Insufficient privileges to remove i915 OA config\n");
4260 		return -EACCES;
4261 	}
4262 
4263 	ret = mutex_lock_interruptible(&perf->metrics_lock);
4264 	if (ret)
4265 		return ret;
4266 
4267 	oa_config = idr_find(&perf->metrics_idr, *arg);
4268 	if (!oa_config) {
4269 		DRM_DEBUG("Failed to remove unknown OA config\n");
4270 		ret = -ENOENT;
4271 		goto err_unlock;
4272 	}
4273 
4274 	GEM_BUG_ON(*arg != oa_config->id);
4275 
4276 	sysfs_remove_group(perf->metrics_kobj, &oa_config->sysfs_metric);
4277 
4278 	idr_remove(&perf->metrics_idr, *arg);
4279 
4280 	mutex_unlock(&perf->metrics_lock);
4281 
4282 	DRM_DEBUG("Removed config %s id=%i\n", oa_config->uuid, oa_config->id);
4283 
4284 	i915_oa_config_put(oa_config);
4285 
4286 	return 0;
4287 
4288 err_unlock:
4289 	mutex_unlock(&perf->metrics_lock);
4290 	return ret;
4291 }
4292 
4293 static struct ctl_table oa_table[] = {
4294 	{
4295 	 .procname = "perf_stream_paranoid",
4296 	 .data = &i915_perf_stream_paranoid,
4297 	 .maxlen = sizeof(i915_perf_stream_paranoid),
4298 	 .mode = 0644,
4299 	 .proc_handler = proc_dointvec_minmax,
4300 	 .extra1 = SYSCTL_ZERO,
4301 	 .extra2 = SYSCTL_ONE,
4302 	 },
4303 	{
4304 	 .procname = "oa_max_sample_rate",
4305 	 .data = &i915_oa_max_sample_rate,
4306 	 .maxlen = sizeof(i915_oa_max_sample_rate),
4307 	 .mode = 0644,
4308 	 .proc_handler = proc_dointvec_minmax,
4309 	 .extra1 = SYSCTL_ZERO,
4310 	 .extra2 = &oa_sample_rate_hard_limit,
4311 	 },
4312 	{}
4313 };
4314 
4315 static void oa_init_supported_formats(struct i915_perf *perf)
4316 {
4317 	struct drm_i915_private *i915 = perf->i915;
4318 	enum intel_platform platform = INTEL_INFO(i915)->platform;
4319 
4320 	switch (platform) {
4321 	case INTEL_HASWELL:
4322 		oa_format_add(perf, I915_OA_FORMAT_A13);
4323 		oa_format_add(perf, I915_OA_FORMAT_A13);
4324 		oa_format_add(perf, I915_OA_FORMAT_A29);
4325 		oa_format_add(perf, I915_OA_FORMAT_A13_B8_C8);
4326 		oa_format_add(perf, I915_OA_FORMAT_B4_C8);
4327 		oa_format_add(perf, I915_OA_FORMAT_A45_B8_C8);
4328 		oa_format_add(perf, I915_OA_FORMAT_B4_C8_A16);
4329 		oa_format_add(perf, I915_OA_FORMAT_C4_B8);
4330 		break;
4331 
4332 	case INTEL_BROADWELL:
4333 	case INTEL_CHERRYVIEW:
4334 	case INTEL_SKYLAKE:
4335 	case INTEL_BROXTON:
4336 	case INTEL_KABYLAKE:
4337 	case INTEL_GEMINILAKE:
4338 	case INTEL_COFFEELAKE:
4339 	case INTEL_COMETLAKE:
4340 	case INTEL_ICELAKE:
4341 	case INTEL_ELKHARTLAKE:
4342 	case INTEL_JASPERLAKE:
4343 	case INTEL_TIGERLAKE:
4344 	case INTEL_ROCKETLAKE:
4345 	case INTEL_DG1:
4346 	case INTEL_ALDERLAKE_S:
4347 	case INTEL_ALDERLAKE_P:
4348 		oa_format_add(perf, I915_OA_FORMAT_A12);
4349 		oa_format_add(perf, I915_OA_FORMAT_A12_B8_C8);
4350 		oa_format_add(perf, I915_OA_FORMAT_A32u40_A4u32_B8_C8);
4351 		oa_format_add(perf, I915_OA_FORMAT_C4_B8);
4352 		break;
4353 
4354 	default:
4355 		MISSING_CASE(platform);
4356 	}
4357 }
4358 
4359 /**
4360  * i915_perf_init - initialize i915-perf state on module bind
4361  * @i915: i915 device instance
4362  *
4363  * Initializes i915-perf state without exposing anything to userspace.
4364  *
4365  * Note: i915-perf initialization is split into an 'init' and 'register'
4366  * phase with the i915_perf_register() exposing state to userspace.
4367  */
4368 void i915_perf_init(struct drm_i915_private *i915)
4369 {
4370 	struct i915_perf *perf = &i915->perf;
4371 
4372 	/* XXX const struct i915_perf_ops! */
4373 
4374 	perf->oa_formats = oa_formats;
4375 	if (IS_HASWELL(i915)) {
4376 		perf->ops.is_valid_b_counter_reg = gen7_is_valid_b_counter_addr;
4377 		perf->ops.is_valid_mux_reg = hsw_is_valid_mux_addr;
4378 		perf->ops.is_valid_flex_reg = NULL;
4379 		perf->ops.enable_metric_set = hsw_enable_metric_set;
4380 		perf->ops.disable_metric_set = hsw_disable_metric_set;
4381 		perf->ops.oa_enable = gen7_oa_enable;
4382 		perf->ops.oa_disable = gen7_oa_disable;
4383 		perf->ops.read = gen7_oa_read;
4384 		perf->ops.oa_hw_tail_read = gen7_oa_hw_tail_read;
4385 	} else if (HAS_LOGICAL_RING_CONTEXTS(i915)) {
4386 		/* Note: that although we could theoretically also support the
4387 		 * legacy ringbuffer mode on BDW (and earlier iterations of
4388 		 * this driver, before upstreaming did this) it didn't seem
4389 		 * worth the complexity to maintain now that BDW+ enable
4390 		 * execlist mode by default.
4391 		 */
4392 		perf->ops.read = gen8_oa_read;
4393 
4394 		if (IS_GRAPHICS_VER(i915, 8, 9)) {
4395 			perf->ops.is_valid_b_counter_reg =
4396 				gen7_is_valid_b_counter_addr;
4397 			perf->ops.is_valid_mux_reg =
4398 				gen8_is_valid_mux_addr;
4399 			perf->ops.is_valid_flex_reg =
4400 				gen8_is_valid_flex_addr;
4401 
4402 			if (IS_CHERRYVIEW(i915)) {
4403 				perf->ops.is_valid_mux_reg =
4404 					chv_is_valid_mux_addr;
4405 			}
4406 
4407 			perf->ops.oa_enable = gen8_oa_enable;
4408 			perf->ops.oa_disable = gen8_oa_disable;
4409 			perf->ops.enable_metric_set = gen8_enable_metric_set;
4410 			perf->ops.disable_metric_set = gen8_disable_metric_set;
4411 			perf->ops.oa_hw_tail_read = gen8_oa_hw_tail_read;
4412 
4413 			if (GRAPHICS_VER(i915) == 8) {
4414 				perf->ctx_oactxctrl_offset = 0x120;
4415 				perf->ctx_flexeu0_offset = 0x2ce;
4416 
4417 				perf->gen8_valid_ctx_bit = BIT(25);
4418 			} else {
4419 				perf->ctx_oactxctrl_offset = 0x128;
4420 				perf->ctx_flexeu0_offset = 0x3de;
4421 
4422 				perf->gen8_valid_ctx_bit = BIT(16);
4423 			}
4424 		} else if (GRAPHICS_VER(i915) == 11) {
4425 			perf->ops.is_valid_b_counter_reg =
4426 				gen7_is_valid_b_counter_addr;
4427 			perf->ops.is_valid_mux_reg =
4428 				gen11_is_valid_mux_addr;
4429 			perf->ops.is_valid_flex_reg =
4430 				gen8_is_valid_flex_addr;
4431 
4432 			perf->ops.oa_enable = gen8_oa_enable;
4433 			perf->ops.oa_disable = gen8_oa_disable;
4434 			perf->ops.enable_metric_set = gen8_enable_metric_set;
4435 			perf->ops.disable_metric_set = gen11_disable_metric_set;
4436 			perf->ops.oa_hw_tail_read = gen8_oa_hw_tail_read;
4437 
4438 			perf->ctx_oactxctrl_offset = 0x124;
4439 			perf->ctx_flexeu0_offset = 0x78e;
4440 
4441 			perf->gen8_valid_ctx_bit = BIT(16);
4442 		} else if (GRAPHICS_VER(i915) == 12) {
4443 			perf->ops.is_valid_b_counter_reg =
4444 				gen12_is_valid_b_counter_addr;
4445 			perf->ops.is_valid_mux_reg =
4446 				gen12_is_valid_mux_addr;
4447 			perf->ops.is_valid_flex_reg =
4448 				gen8_is_valid_flex_addr;
4449 
4450 			perf->ops.oa_enable = gen12_oa_enable;
4451 			perf->ops.oa_disable = gen12_oa_disable;
4452 			perf->ops.enable_metric_set = gen12_enable_metric_set;
4453 			perf->ops.disable_metric_set = gen12_disable_metric_set;
4454 			perf->ops.oa_hw_tail_read = gen12_oa_hw_tail_read;
4455 
4456 			perf->ctx_flexeu0_offset = 0;
4457 			perf->ctx_oactxctrl_offset = 0x144;
4458 		}
4459 	}
4460 
4461 	if (perf->ops.enable_metric_set) {
4462 		mutex_init(&perf->lock);
4463 
4464 		/* Choose a representative limit */
4465 		oa_sample_rate_hard_limit = to_gt(i915)->clock_frequency / 2;
4466 
4467 		mutex_init(&perf->metrics_lock);
4468 		idr_init_base(&perf->metrics_idr, 1);
4469 
4470 		/* We set up some ratelimit state to potentially throttle any
4471 		 * _NOTES about spurious, invalid OA reports which we don't
4472 		 * forward to userspace.
4473 		 *
4474 		 * We print a _NOTE about any throttling when closing the
4475 		 * stream instead of waiting until driver _fini which no one
4476 		 * would ever see.
4477 		 *
4478 		 * Using the same limiting factors as printk_ratelimit()
4479 		 */
4480 		ratelimit_state_init(&perf->spurious_report_rs, 5 * HZ, 10);
4481 		/* Since we use a DRM_NOTE for spurious reports it would be
4482 		 * inconsistent to let __ratelimit() automatically print a
4483 		 * warning for throttling.
4484 		 */
4485 		ratelimit_set_flags(&perf->spurious_report_rs,
4486 				    RATELIMIT_MSG_ON_RELEASE);
4487 
4488 		ratelimit_state_init(&perf->tail_pointer_race,
4489 				     5 * HZ, 10);
4490 		ratelimit_set_flags(&perf->tail_pointer_race,
4491 				    RATELIMIT_MSG_ON_RELEASE);
4492 
4493 		atomic64_set(&perf->noa_programming_delay,
4494 			     500 * 1000 /* 500us */);
4495 
4496 		perf->i915 = i915;
4497 
4498 		oa_init_supported_formats(perf);
4499 	}
4500 }
4501 
4502 static int destroy_config(int id, void *p, void *data)
4503 {
4504 	i915_oa_config_put(p);
4505 	return 0;
4506 }
4507 
4508 int i915_perf_sysctl_register(void)
4509 {
4510 	sysctl_header = register_sysctl("dev/i915", oa_table);
4511 	return 0;
4512 }
4513 
4514 void i915_perf_sysctl_unregister(void)
4515 {
4516 	unregister_sysctl_table(sysctl_header);
4517 }
4518 
4519 /**
4520  * i915_perf_fini - Counter part to i915_perf_init()
4521  * @i915: i915 device instance
4522  */
4523 void i915_perf_fini(struct drm_i915_private *i915)
4524 {
4525 	struct i915_perf *perf = &i915->perf;
4526 
4527 	if (!perf->i915)
4528 		return;
4529 
4530 	idr_for_each(&perf->metrics_idr, destroy_config, perf);
4531 	idr_destroy(&perf->metrics_idr);
4532 
4533 	memset(&perf->ops, 0, sizeof(perf->ops));
4534 	perf->i915 = NULL;
4535 }
4536 
4537 /**
4538  * i915_perf_ioctl_version - Version of the i915-perf subsystem
4539  *
4540  * This version number is used by userspace to detect available features.
4541  */
4542 int i915_perf_ioctl_version(void)
4543 {
4544 	/*
4545 	 * 1: Initial version
4546 	 *   I915_PERF_IOCTL_ENABLE
4547 	 *   I915_PERF_IOCTL_DISABLE
4548 	 *
4549 	 * 2: Added runtime modification of OA config.
4550 	 *   I915_PERF_IOCTL_CONFIG
4551 	 *
4552 	 * 3: Add DRM_I915_PERF_PROP_HOLD_PREEMPTION parameter to hold
4553 	 *    preemption on a particular context so that performance data is
4554 	 *    accessible from a delta of MI_RPC reports without looking at the
4555 	 *    OA buffer.
4556 	 *
4557 	 * 4: Add DRM_I915_PERF_PROP_ALLOWED_SSEU to limit what contexts can
4558 	 *    be run for the duration of the performance recording based on
4559 	 *    their SSEU configuration.
4560 	 *
4561 	 * 5: Add DRM_I915_PERF_PROP_POLL_OA_PERIOD parameter that controls the
4562 	 *    interval for the hrtimer used to check for OA data.
4563 	 */
4564 	return 5;
4565 }
4566 
4567 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
4568 #include "selftests/i915_perf.c"
4569 #endif
4570