xref: /openbmc/linux/drivers/gpu/drm/i915/i915_perf.c (revision 31e67366)
1 /*
2  * Copyright © 2015-2016 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  *
23  * Authors:
24  *   Robert Bragg <robert@sixbynine.org>
25  */
26 
27 
28 /**
29  * DOC: i915 Perf Overview
30  *
31  * Gen graphics supports a large number of performance counters that can help
32  * driver and application developers understand and optimize their use of the
33  * GPU.
34  *
35  * This i915 perf interface enables userspace to configure and open a file
36  * descriptor representing a stream of GPU metrics which can then be read() as
37  * a stream of sample records.
38  *
39  * The interface is particularly suited to exposing buffered metrics that are
40  * captured by DMA from the GPU, unsynchronized with and unrelated to the CPU.
41  *
42  * Streams representing a single context are accessible to applications with a
43  * corresponding drm file descriptor, such that OpenGL can use the interface
44  * without special privileges. Access to system-wide metrics requires root
45  * privileges by default, unless changed via the dev.i915.perf_event_paranoid
46  * sysctl option.
47  *
48  */
49 
50 /**
51  * DOC: i915 Perf History and Comparison with Core Perf
52  *
53  * The interface was initially inspired by the core Perf infrastructure but
54  * some notable differences are:
55  *
56  * i915 perf file descriptors represent a "stream" instead of an "event"; where
57  * a perf event primarily corresponds to a single 64bit value, while a stream
58  * might sample sets of tightly-coupled counters, depending on the
59  * configuration.  For example the Gen OA unit isn't designed to support
60  * orthogonal configurations of individual counters; it's configured for a set
61  * of related counters. Samples for an i915 perf stream capturing OA metrics
62  * will include a set of counter values packed in a compact HW specific format.
63  * The OA unit supports a number of different packing formats which can be
64  * selected by the user opening the stream. Perf has support for grouping
65  * events, but each event in the group is configured, validated and
66  * authenticated individually with separate system calls.
67  *
68  * i915 perf stream configurations are provided as an array of u64 (key,value)
69  * pairs, instead of a fixed struct with multiple miscellaneous config members,
70  * interleaved with event-type specific members.
71  *
72  * i915 perf doesn't support exposing metrics via an mmap'd circular buffer.
73  * The supported metrics are being written to memory by the GPU unsynchronized
74  * with the CPU, using HW specific packing formats for counter sets. Sometimes
75  * the constraints on HW configuration require reports to be filtered before it
76  * would be acceptable to expose them to unprivileged applications - to hide
77  * the metrics of other processes/contexts. For these use cases a read() based
78  * interface is a good fit, and provides an opportunity to filter data as it
79  * gets copied from the GPU mapped buffers to userspace buffers.
80  *
81  *
82  * Issues hit with first prototype based on Core Perf
83  * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
84  *
85  * The first prototype of this driver was based on the core perf
86  * infrastructure, and while we did make that mostly work, with some changes to
87  * perf, we found we were breaking or working around too many assumptions baked
88  * into perf's currently cpu centric design.
89  *
90  * In the end we didn't see a clear benefit to making perf's implementation and
91  * interface more complex by changing design assumptions while we knew we still
92  * wouldn't be able to use any existing perf based userspace tools.
93  *
94  * Also considering the Gen specific nature of the Observability hardware and
95  * how userspace will sometimes need to combine i915 perf OA metrics with
96  * side-band OA data captured via MI_REPORT_PERF_COUNT commands; we're
97  * expecting the interface to be used by a platform specific userspace such as
98  * OpenGL or tools. This is to say; we aren't inherently missing out on having
99  * a standard vendor/architecture agnostic interface by not using perf.
100  *
101  *
102  * For posterity, in case we might re-visit trying to adapt core perf to be
103  * better suited to exposing i915 metrics these were the main pain points we
104  * hit:
105  *
106  * - The perf based OA PMU driver broke some significant design assumptions:
107  *
108  *   Existing perf pmus are used for profiling work on a cpu and we were
109  *   introducing the idea of _IS_DEVICE pmus with different security
110  *   implications, the need to fake cpu-related data (such as user/kernel
111  *   registers) to fit with perf's current design, and adding _DEVICE records
112  *   as a way to forward device-specific status records.
113  *
114  *   The OA unit writes reports of counters into a circular buffer, without
115  *   involvement from the CPU, making our PMU driver the first of a kind.
116  *
117  *   Given the way we were periodically forward data from the GPU-mapped, OA
118  *   buffer to perf's buffer, those bursts of sample writes looked to perf like
119  *   we were sampling too fast and so we had to subvert its throttling checks.
120  *
121  *   Perf supports groups of counters and allows those to be read via
122  *   transactions internally but transactions currently seem designed to be
123  *   explicitly initiated from the cpu (say in response to a userspace read())
124  *   and while we could pull a report out of the OA buffer we can't
125  *   trigger a report from the cpu on demand.
126  *
127  *   Related to being report based; the OA counters are configured in HW as a
128  *   set while perf generally expects counter configurations to be orthogonal.
129  *   Although counters can be associated with a group leader as they are
130  *   opened, there's no clear precedent for being able to provide group-wide
131  *   configuration attributes (for example we want to let userspace choose the
132  *   OA unit report format used to capture all counters in a set, or specify a
133  *   GPU context to filter metrics on). We avoided using perf's grouping
134  *   feature and forwarded OA reports to userspace via perf's 'raw' sample
135  *   field. This suited our userspace well considering how coupled the counters
136  *   are when dealing with normalizing. It would be inconvenient to split
137  *   counters up into separate events, only to require userspace to recombine
138  *   them. For Mesa it's also convenient to be forwarded raw, periodic reports
139  *   for combining with the side-band raw reports it captures using
140  *   MI_REPORT_PERF_COUNT commands.
141  *
142  *   - As a side note on perf's grouping feature; there was also some concern
143  *     that using PERF_FORMAT_GROUP as a way to pack together counter values
144  *     would quite drastically inflate our sample sizes, which would likely
145  *     lower the effective sampling resolutions we could use when the available
146  *     memory bandwidth is limited.
147  *
148  *     With the OA unit's report formats, counters are packed together as 32
149  *     or 40bit values, with the largest report size being 256 bytes.
150  *
151  *     PERF_FORMAT_GROUP values are 64bit, but there doesn't appear to be a
152  *     documented ordering to the values, implying PERF_FORMAT_ID must also be
153  *     used to add a 64bit ID before each value; giving 16 bytes per counter.
154  *
155  *   Related to counter orthogonality; we can't time share the OA unit, while
156  *   event scheduling is a central design idea within perf for allowing
157  *   userspace to open + enable more events than can be configured in HW at any
158  *   one time.  The OA unit is not designed to allow re-configuration while in
159  *   use. We can't reconfigure the OA unit without losing internal OA unit
160  *   state which we can't access explicitly to save and restore. Reconfiguring
161  *   the OA unit is also relatively slow, involving ~100 register writes. From
162  *   userspace Mesa also depends on a stable OA configuration when emitting
163  *   MI_REPORT_PERF_COUNT commands and importantly the OA unit can't be
164  *   disabled while there are outstanding MI_RPC commands lest we hang the
165  *   command streamer.
166  *
167  *   The contents of sample records aren't extensible by device drivers (i.e.
168  *   the sample_type bits). As an example; Sourab Gupta had been looking to
169  *   attach GPU timestamps to our OA samples. We were shoehorning OA reports
170  *   into sample records by using the 'raw' field, but it's tricky to pack more
171  *   than one thing into this field because events/core.c currently only lets a
172  *   pmu give a single raw data pointer plus len which will be copied into the
173  *   ring buffer. To include more than the OA report we'd have to copy the
174  *   report into an intermediate larger buffer. I'd been considering allowing a
175  *   vector of data+len values to be specified for copying the raw data, but
176  *   it felt like a kludge to being using the raw field for this purpose.
177  *
178  * - It felt like our perf based PMU was making some technical compromises
179  *   just for the sake of using perf:
180  *
181  *   perf_event_open() requires events to either relate to a pid or a specific
182  *   cpu core, while our device pmu related to neither.  Events opened with a
183  *   pid will be automatically enabled/disabled according to the scheduling of
184  *   that process - so not appropriate for us. When an event is related to a
185  *   cpu id, perf ensures pmu methods will be invoked via an inter process
186  *   interrupt on that core. To avoid invasive changes our userspace opened OA
187  *   perf events for a specific cpu. This was workable but it meant the
188  *   majority of the OA driver ran in atomic context, including all OA report
189  *   forwarding, which wasn't really necessary in our case and seems to make
190  *   our locking requirements somewhat complex as we handled the interaction
191  *   with the rest of the i915 driver.
192  */
193 
194 #include <linux/anon_inodes.h>
195 #include <linux/sizes.h>
196 #include <linux/uuid.h>
197 
198 #include "gem/i915_gem_context.h"
199 #include "gt/intel_engine_pm.h"
200 #include "gt/intel_engine_user.h"
201 #include "gt/intel_execlists_submission.h"
202 #include "gt/intel_gpu_commands.h"
203 #include "gt/intel_gt.h"
204 #include "gt/intel_gt_clock_utils.h"
205 #include "gt/intel_lrc.h"
206 #include "gt/intel_ring.h"
207 
208 #include "i915_drv.h"
209 #include "i915_perf.h"
210 
211 /* HW requires this to be a power of two, between 128k and 16M, though driver
212  * is currently generally designed assuming the largest 16M size is used such
213  * that the overflow cases are unlikely in normal operation.
214  */
215 #define OA_BUFFER_SIZE		SZ_16M
216 
217 #define OA_TAKEN(tail, head)	((tail - head) & (OA_BUFFER_SIZE - 1))
218 
219 /**
220  * DOC: OA Tail Pointer Race
221  *
222  * There's a HW race condition between OA unit tail pointer register updates and
223  * writes to memory whereby the tail pointer can sometimes get ahead of what's
224  * been written out to the OA buffer so far (in terms of what's visible to the
225  * CPU).
226  *
227  * Although this can be observed explicitly while copying reports to userspace
228  * by checking for a zeroed report-id field in tail reports, we want to account
229  * for this earlier, as part of the oa_buffer_check_unlocked to avoid lots of
230  * redundant read() attempts.
231  *
232  * We workaround this issue in oa_buffer_check_unlocked() by reading the reports
233  * in the OA buffer, starting from the tail reported by the HW until we find a
234  * report with its first 2 dwords not 0 meaning its previous report is
235  * completely in memory and ready to be read. Those dwords are also set to 0
236  * once read and the whole buffer is cleared upon OA buffer initialization. The
237  * first dword is the reason for this report while the second is the timestamp,
238  * making the chances of having those 2 fields at 0 fairly unlikely. A more
239  * detailed explanation is available in oa_buffer_check_unlocked().
240  *
241  * Most of the implementation details for this workaround are in
242  * oa_buffer_check_unlocked() and _append_oa_reports()
243  *
244  * Note for posterity: previously the driver used to define an effective tail
245  * pointer that lagged the real pointer by a 'tail margin' measured in bytes
246  * derived from %OA_TAIL_MARGIN_NSEC and the configured sampling frequency.
247  * This was flawed considering that the OA unit may also automatically generate
248  * non-periodic reports (such as on context switch) or the OA unit may be
249  * enabled without any periodic sampling.
250  */
251 #define OA_TAIL_MARGIN_NSEC	100000ULL
252 #define INVALID_TAIL_PTR	0xffffffff
253 
254 /* The default frequency for checking whether the OA unit has written new
255  * reports to the circular OA buffer...
256  */
257 #define DEFAULT_POLL_FREQUENCY_HZ 200
258 #define DEFAULT_POLL_PERIOD_NS (NSEC_PER_SEC / DEFAULT_POLL_FREQUENCY_HZ)
259 
260 /* for sysctl proc_dointvec_minmax of dev.i915.perf_stream_paranoid */
261 static u32 i915_perf_stream_paranoid = true;
262 
263 /* The maximum exponent the hardware accepts is 63 (essentially it selects one
264  * of the 64bit timestamp bits to trigger reports from) but there's currently
265  * no known use case for sampling as infrequently as once per 47 thousand years.
266  *
267  * Since the timestamps included in OA reports are only 32bits it seems
268  * reasonable to limit the OA exponent where it's still possible to account for
269  * overflow in OA report timestamps.
270  */
271 #define OA_EXPONENT_MAX 31
272 
273 #define INVALID_CTX_ID 0xffffffff
274 
275 /* On Gen8+ automatically triggered OA reports include a 'reason' field... */
276 #define OAREPORT_REASON_MASK           0x3f
277 #define OAREPORT_REASON_MASK_EXTENDED  0x7f
278 #define OAREPORT_REASON_SHIFT          19
279 #define OAREPORT_REASON_TIMER          (1<<0)
280 #define OAREPORT_REASON_CTX_SWITCH     (1<<3)
281 #define OAREPORT_REASON_CLK_RATIO      (1<<5)
282 
283 
284 /* For sysctl proc_dointvec_minmax of i915_oa_max_sample_rate
285  *
286  * The highest sampling frequency we can theoretically program the OA unit
287  * with is always half the timestamp frequency: E.g. 6.25Mhz for Haswell.
288  *
289  * Initialized just before we register the sysctl parameter.
290  */
291 static int oa_sample_rate_hard_limit;
292 
293 /* Theoretically we can program the OA unit to sample every 160ns but don't
294  * allow that by default unless root...
295  *
296  * The default threshold of 100000Hz is based on perf's similar
297  * kernel.perf_event_max_sample_rate sysctl parameter.
298  */
299 static u32 i915_oa_max_sample_rate = 100000;
300 
301 /* XXX: beware if future OA HW adds new report formats that the current
302  * code assumes all reports have a power-of-two size and ~(size - 1) can
303  * be used as a mask to align the OA tail pointer.
304  */
305 static const struct i915_oa_format hsw_oa_formats[I915_OA_FORMAT_MAX] = {
306 	[I915_OA_FORMAT_A13]	    = { 0, 64 },
307 	[I915_OA_FORMAT_A29]	    = { 1, 128 },
308 	[I915_OA_FORMAT_A13_B8_C8]  = { 2, 128 },
309 	/* A29_B8_C8 Disallowed as 192 bytes doesn't factor into buffer size */
310 	[I915_OA_FORMAT_B4_C8]	    = { 4, 64 },
311 	[I915_OA_FORMAT_A45_B8_C8]  = { 5, 256 },
312 	[I915_OA_FORMAT_B4_C8_A16]  = { 6, 128 },
313 	[I915_OA_FORMAT_C4_B8]	    = { 7, 64 },
314 };
315 
316 static const struct i915_oa_format gen8_plus_oa_formats[I915_OA_FORMAT_MAX] = {
317 	[I915_OA_FORMAT_A12]		    = { 0, 64 },
318 	[I915_OA_FORMAT_A12_B8_C8]	    = { 2, 128 },
319 	[I915_OA_FORMAT_A32u40_A4u32_B8_C8] = { 5, 256 },
320 	[I915_OA_FORMAT_C4_B8]		    = { 7, 64 },
321 };
322 
323 static const struct i915_oa_format gen12_oa_formats[I915_OA_FORMAT_MAX] = {
324 	[I915_OA_FORMAT_A32u40_A4u32_B8_C8] = { 5, 256 },
325 };
326 
327 #define SAMPLE_OA_REPORT      (1<<0)
328 
329 /**
330  * struct perf_open_properties - for validated properties given to open a stream
331  * @sample_flags: `DRM_I915_PERF_PROP_SAMPLE_*` properties are tracked as flags
332  * @single_context: Whether a single or all gpu contexts should be monitored
333  * @hold_preemption: Whether the preemption is disabled for the filtered
334  *                   context
335  * @ctx_handle: A gem ctx handle for use with @single_context
336  * @metrics_set: An ID for an OA unit metric set advertised via sysfs
337  * @oa_format: An OA unit HW report format
338  * @oa_periodic: Whether to enable periodic OA unit sampling
339  * @oa_period_exponent: The OA unit sampling period is derived from this
340  * @engine: The engine (typically rcs0) being monitored by the OA unit
341  * @has_sseu: Whether @sseu was specified by userspace
342  * @sseu: internal SSEU configuration computed either from the userspace
343  *        specified configuration in the opening parameters or a default value
344  *        (see get_default_sseu_config())
345  * @poll_oa_period: The period in nanoseconds at which the CPU will check for OA
346  * data availability
347  *
348  * As read_properties_unlocked() enumerates and validates the properties given
349  * to open a stream of metrics the configuration is built up in the structure
350  * which starts out zero initialized.
351  */
352 struct perf_open_properties {
353 	u32 sample_flags;
354 
355 	u64 single_context:1;
356 	u64 hold_preemption:1;
357 	u64 ctx_handle;
358 
359 	/* OA sampling state */
360 	int metrics_set;
361 	int oa_format;
362 	bool oa_periodic;
363 	int oa_period_exponent;
364 
365 	struct intel_engine_cs *engine;
366 
367 	bool has_sseu;
368 	struct intel_sseu sseu;
369 
370 	u64 poll_oa_period;
371 };
372 
373 struct i915_oa_config_bo {
374 	struct llist_node node;
375 
376 	struct i915_oa_config *oa_config;
377 	struct i915_vma *vma;
378 };
379 
380 static struct ctl_table_header *sysctl_header;
381 
382 static enum hrtimer_restart oa_poll_check_timer_cb(struct hrtimer *hrtimer);
383 
384 void i915_oa_config_release(struct kref *ref)
385 {
386 	struct i915_oa_config *oa_config =
387 		container_of(ref, typeof(*oa_config), ref);
388 
389 	kfree(oa_config->flex_regs);
390 	kfree(oa_config->b_counter_regs);
391 	kfree(oa_config->mux_regs);
392 
393 	kfree_rcu(oa_config, rcu);
394 }
395 
396 struct i915_oa_config *
397 i915_perf_get_oa_config(struct i915_perf *perf, int metrics_set)
398 {
399 	struct i915_oa_config *oa_config;
400 
401 	rcu_read_lock();
402 	oa_config = idr_find(&perf->metrics_idr, metrics_set);
403 	if (oa_config)
404 		oa_config = i915_oa_config_get(oa_config);
405 	rcu_read_unlock();
406 
407 	return oa_config;
408 }
409 
410 static void free_oa_config_bo(struct i915_oa_config_bo *oa_bo)
411 {
412 	i915_oa_config_put(oa_bo->oa_config);
413 	i915_vma_put(oa_bo->vma);
414 	kfree(oa_bo);
415 }
416 
417 static u32 gen12_oa_hw_tail_read(struct i915_perf_stream *stream)
418 {
419 	struct intel_uncore *uncore = stream->uncore;
420 
421 	return intel_uncore_read(uncore, GEN12_OAG_OATAILPTR) &
422 	       GEN12_OAG_OATAILPTR_MASK;
423 }
424 
425 static u32 gen8_oa_hw_tail_read(struct i915_perf_stream *stream)
426 {
427 	struct intel_uncore *uncore = stream->uncore;
428 
429 	return intel_uncore_read(uncore, GEN8_OATAILPTR) & GEN8_OATAILPTR_MASK;
430 }
431 
432 static u32 gen7_oa_hw_tail_read(struct i915_perf_stream *stream)
433 {
434 	struct intel_uncore *uncore = stream->uncore;
435 	u32 oastatus1 = intel_uncore_read(uncore, GEN7_OASTATUS1);
436 
437 	return oastatus1 & GEN7_OASTATUS1_TAIL_MASK;
438 }
439 
440 /**
441  * oa_buffer_check_unlocked - check for data and update tail ptr state
442  * @stream: i915 stream instance
443  *
444  * This is either called via fops (for blocking reads in user ctx) or the poll
445  * check hrtimer (atomic ctx) to check the OA buffer tail pointer and check
446  * if there is data available for userspace to read.
447  *
448  * This function is central to providing a workaround for the OA unit tail
449  * pointer having a race with respect to what data is visible to the CPU.
450  * It is responsible for reading tail pointers from the hardware and giving
451  * the pointers time to 'age' before they are made available for reading.
452  * (See description of OA_TAIL_MARGIN_NSEC above for further details.)
453  *
454  * Besides returning true when there is data available to read() this function
455  * also updates the tail, aging_tail and aging_timestamp in the oa_buffer
456  * object.
457  *
458  * Note: It's safe to read OA config state here unlocked, assuming that this is
459  * only called while the stream is enabled, while the global OA configuration
460  * can't be modified.
461  *
462  * Returns: %true if the OA buffer contains data, else %false
463  */
464 static bool oa_buffer_check_unlocked(struct i915_perf_stream *stream)
465 {
466 	u32 gtt_offset = i915_ggtt_offset(stream->oa_buffer.vma);
467 	int report_size = stream->oa_buffer.format_size;
468 	unsigned long flags;
469 	bool pollin;
470 	u32 hw_tail;
471 	u64 now;
472 
473 	/* We have to consider the (unlikely) possibility that read() errors
474 	 * could result in an OA buffer reset which might reset the head and
475 	 * tail state.
476 	 */
477 	spin_lock_irqsave(&stream->oa_buffer.ptr_lock, flags);
478 
479 	hw_tail = stream->perf->ops.oa_hw_tail_read(stream);
480 
481 	/* The tail pointer increases in 64 byte increments,
482 	 * not in report_size steps...
483 	 */
484 	hw_tail &= ~(report_size - 1);
485 
486 	now = ktime_get_mono_fast_ns();
487 
488 	if (hw_tail == stream->oa_buffer.aging_tail &&
489 	    (now - stream->oa_buffer.aging_timestamp) > OA_TAIL_MARGIN_NSEC) {
490 		/* If the HW tail hasn't move since the last check and the HW
491 		 * tail has been aging for long enough, declare it the new
492 		 * tail.
493 		 */
494 		stream->oa_buffer.tail = stream->oa_buffer.aging_tail;
495 	} else {
496 		u32 head, tail, aged_tail;
497 
498 		/* NB: The head we observe here might effectively be a little
499 		 * out of date. If a read() is in progress, the head could be
500 		 * anywhere between this head and stream->oa_buffer.tail.
501 		 */
502 		head = stream->oa_buffer.head - gtt_offset;
503 		aged_tail = stream->oa_buffer.tail - gtt_offset;
504 
505 		hw_tail -= gtt_offset;
506 		tail = hw_tail;
507 
508 		/* Walk the stream backward until we find a report with dword 0
509 		 * & 1 not at 0. Since the circular buffer pointers progress by
510 		 * increments of 64 bytes and that reports can be up to 256
511 		 * bytes long, we can't tell whether a report has fully landed
512 		 * in memory before the first 2 dwords of the following report
513 		 * have effectively landed.
514 		 *
515 		 * This is assuming that the writes of the OA unit land in
516 		 * memory in the order they were written to.
517 		 * If not : (╯°□°)╯︵ ┻━┻
518 		 */
519 		while (OA_TAKEN(tail, aged_tail) >= report_size) {
520 			u32 *report32 = (void *)(stream->oa_buffer.vaddr + tail);
521 
522 			if (report32[0] != 0 || report32[1] != 0)
523 				break;
524 
525 			tail = (tail - report_size) & (OA_BUFFER_SIZE - 1);
526 		}
527 
528 		if (OA_TAKEN(hw_tail, tail) > report_size &&
529 		    __ratelimit(&stream->perf->tail_pointer_race))
530 			DRM_NOTE("unlanded report(s) head=0x%x "
531 				 "tail=0x%x hw_tail=0x%x\n",
532 				 head, tail, hw_tail);
533 
534 		stream->oa_buffer.tail = gtt_offset + tail;
535 		stream->oa_buffer.aging_tail = gtt_offset + hw_tail;
536 		stream->oa_buffer.aging_timestamp = now;
537 	}
538 
539 	pollin = OA_TAKEN(stream->oa_buffer.tail - gtt_offset,
540 			  stream->oa_buffer.head - gtt_offset) >= report_size;
541 
542 	spin_unlock_irqrestore(&stream->oa_buffer.ptr_lock, flags);
543 
544 	return pollin;
545 }
546 
547 /**
548  * append_oa_status - Appends a status record to a userspace read() buffer.
549  * @stream: An i915-perf stream opened for OA metrics
550  * @buf: destination buffer given by userspace
551  * @count: the number of bytes userspace wants to read
552  * @offset: (inout): the current position for writing into @buf
553  * @type: The kind of status to report to userspace
554  *
555  * Writes a status record (such as `DRM_I915_PERF_RECORD_OA_REPORT_LOST`)
556  * into the userspace read() buffer.
557  *
558  * The @buf @offset will only be updated on success.
559  *
560  * Returns: 0 on success, negative error code on failure.
561  */
562 static int append_oa_status(struct i915_perf_stream *stream,
563 			    char __user *buf,
564 			    size_t count,
565 			    size_t *offset,
566 			    enum drm_i915_perf_record_type type)
567 {
568 	struct drm_i915_perf_record_header header = { type, 0, sizeof(header) };
569 
570 	if ((count - *offset) < header.size)
571 		return -ENOSPC;
572 
573 	if (copy_to_user(buf + *offset, &header, sizeof(header)))
574 		return -EFAULT;
575 
576 	(*offset) += header.size;
577 
578 	return 0;
579 }
580 
581 /**
582  * append_oa_sample - Copies single OA report into userspace read() buffer.
583  * @stream: An i915-perf stream opened for OA metrics
584  * @buf: destination buffer given by userspace
585  * @count: the number of bytes userspace wants to read
586  * @offset: (inout): the current position for writing into @buf
587  * @report: A single OA report to (optionally) include as part of the sample
588  *
589  * The contents of a sample are configured through `DRM_I915_PERF_PROP_SAMPLE_*`
590  * properties when opening a stream, tracked as `stream->sample_flags`. This
591  * function copies the requested components of a single sample to the given
592  * read() @buf.
593  *
594  * The @buf @offset will only be updated on success.
595  *
596  * Returns: 0 on success, negative error code on failure.
597  */
598 static int append_oa_sample(struct i915_perf_stream *stream,
599 			    char __user *buf,
600 			    size_t count,
601 			    size_t *offset,
602 			    const u8 *report)
603 {
604 	int report_size = stream->oa_buffer.format_size;
605 	struct drm_i915_perf_record_header header;
606 	u32 sample_flags = stream->sample_flags;
607 
608 	header.type = DRM_I915_PERF_RECORD_SAMPLE;
609 	header.pad = 0;
610 	header.size = stream->sample_size;
611 
612 	if ((count - *offset) < header.size)
613 		return -ENOSPC;
614 
615 	buf += *offset;
616 	if (copy_to_user(buf, &header, sizeof(header)))
617 		return -EFAULT;
618 	buf += sizeof(header);
619 
620 	if (sample_flags & SAMPLE_OA_REPORT) {
621 		if (copy_to_user(buf, report, report_size))
622 			return -EFAULT;
623 	}
624 
625 	(*offset) += header.size;
626 
627 	return 0;
628 }
629 
630 /**
631  * gen8_append_oa_reports - Copies all buffered OA reports into
632  *			    userspace read() buffer.
633  * @stream: An i915-perf stream opened for OA metrics
634  * @buf: destination buffer given by userspace
635  * @count: the number of bytes userspace wants to read
636  * @offset: (inout): the current position for writing into @buf
637  *
638  * Notably any error condition resulting in a short read (-%ENOSPC or
639  * -%EFAULT) will be returned even though one or more records may
640  * have been successfully copied. In this case it's up to the caller
641  * to decide if the error should be squashed before returning to
642  * userspace.
643  *
644  * Note: reports are consumed from the head, and appended to the
645  * tail, so the tail chases the head?... If you think that's mad
646  * and back-to-front you're not alone, but this follows the
647  * Gen PRM naming convention.
648  *
649  * Returns: 0 on success, negative error code on failure.
650  */
651 static int gen8_append_oa_reports(struct i915_perf_stream *stream,
652 				  char __user *buf,
653 				  size_t count,
654 				  size_t *offset)
655 {
656 	struct intel_uncore *uncore = stream->uncore;
657 	int report_size = stream->oa_buffer.format_size;
658 	u8 *oa_buf_base = stream->oa_buffer.vaddr;
659 	u32 gtt_offset = i915_ggtt_offset(stream->oa_buffer.vma);
660 	u32 mask = (OA_BUFFER_SIZE - 1);
661 	size_t start_offset = *offset;
662 	unsigned long flags;
663 	u32 head, tail;
664 	u32 taken;
665 	int ret = 0;
666 
667 	if (drm_WARN_ON(&uncore->i915->drm, !stream->enabled))
668 		return -EIO;
669 
670 	spin_lock_irqsave(&stream->oa_buffer.ptr_lock, flags);
671 
672 	head = stream->oa_buffer.head;
673 	tail = stream->oa_buffer.tail;
674 
675 	spin_unlock_irqrestore(&stream->oa_buffer.ptr_lock, flags);
676 
677 	/*
678 	 * NB: oa_buffer.head/tail include the gtt_offset which we don't want
679 	 * while indexing relative to oa_buf_base.
680 	 */
681 	head -= gtt_offset;
682 	tail -= gtt_offset;
683 
684 	/*
685 	 * An out of bounds or misaligned head or tail pointer implies a driver
686 	 * bug since we validate + align the tail pointers we read from the
687 	 * hardware and we are in full control of the head pointer which should
688 	 * only be incremented by multiples of the report size (notably also
689 	 * all a power of two).
690 	 */
691 	if (drm_WARN_ONCE(&uncore->i915->drm,
692 			  head > OA_BUFFER_SIZE || head % report_size ||
693 			  tail > OA_BUFFER_SIZE || tail % report_size,
694 			  "Inconsistent OA buffer pointers: head = %u, tail = %u\n",
695 			  head, tail))
696 		return -EIO;
697 
698 
699 	for (/* none */;
700 	     (taken = OA_TAKEN(tail, head));
701 	     head = (head + report_size) & mask) {
702 		u8 *report = oa_buf_base + head;
703 		u32 *report32 = (void *)report;
704 		u32 ctx_id;
705 		u32 reason;
706 
707 		/*
708 		 * All the report sizes factor neatly into the buffer
709 		 * size so we never expect to see a report split
710 		 * between the beginning and end of the buffer.
711 		 *
712 		 * Given the initial alignment check a misalignment
713 		 * here would imply a driver bug that would result
714 		 * in an overrun.
715 		 */
716 		if (drm_WARN_ON(&uncore->i915->drm,
717 				(OA_BUFFER_SIZE - head) < report_size)) {
718 			drm_err(&uncore->i915->drm,
719 				"Spurious OA head ptr: non-integral report offset\n");
720 			break;
721 		}
722 
723 		/*
724 		 * The reason field includes flags identifying what
725 		 * triggered this specific report (mostly timer
726 		 * triggered or e.g. due to a context switch).
727 		 *
728 		 * This field is never expected to be zero so we can
729 		 * check that the report isn't invalid before copying
730 		 * it to userspace...
731 		 */
732 		reason = ((report32[0] >> OAREPORT_REASON_SHIFT) &
733 			  (IS_GEN(stream->perf->i915, 12) ?
734 			   OAREPORT_REASON_MASK_EXTENDED :
735 			   OAREPORT_REASON_MASK));
736 		if (reason == 0) {
737 			if (__ratelimit(&stream->perf->spurious_report_rs))
738 				DRM_NOTE("Skipping spurious, invalid OA report\n");
739 			continue;
740 		}
741 
742 		ctx_id = report32[2] & stream->specific_ctx_id_mask;
743 
744 		/*
745 		 * Squash whatever is in the CTX_ID field if it's marked as
746 		 * invalid to be sure we avoid false-positive, single-context
747 		 * filtering below...
748 		 *
749 		 * Note: that we don't clear the valid_ctx_bit so userspace can
750 		 * understand that the ID has been squashed by the kernel.
751 		 */
752 		if (!(report32[0] & stream->perf->gen8_valid_ctx_bit) &&
753 		    INTEL_GEN(stream->perf->i915) <= 11)
754 			ctx_id = report32[2] = INVALID_CTX_ID;
755 
756 		/*
757 		 * NB: For Gen 8 the OA unit no longer supports clock gating
758 		 * off for a specific context and the kernel can't securely
759 		 * stop the counters from updating as system-wide / global
760 		 * values.
761 		 *
762 		 * Automatic reports now include a context ID so reports can be
763 		 * filtered on the cpu but it's not worth trying to
764 		 * automatically subtract/hide counter progress for other
765 		 * contexts while filtering since we can't stop userspace
766 		 * issuing MI_REPORT_PERF_COUNT commands which would still
767 		 * provide a side-band view of the real values.
768 		 *
769 		 * To allow userspace (such as Mesa/GL_INTEL_performance_query)
770 		 * to normalize counters for a single filtered context then it
771 		 * needs be forwarded bookend context-switch reports so that it
772 		 * can track switches in between MI_REPORT_PERF_COUNT commands
773 		 * and can itself subtract/ignore the progress of counters
774 		 * associated with other contexts. Note that the hardware
775 		 * automatically triggers reports when switching to a new
776 		 * context which are tagged with the ID of the newly active
777 		 * context. To avoid the complexity (and likely fragility) of
778 		 * reading ahead while parsing reports to try and minimize
779 		 * forwarding redundant context switch reports (i.e. between
780 		 * other, unrelated contexts) we simply elect to forward them
781 		 * all.
782 		 *
783 		 * We don't rely solely on the reason field to identify context
784 		 * switches since it's not-uncommon for periodic samples to
785 		 * identify a switch before any 'context switch' report.
786 		 */
787 		if (!stream->perf->exclusive_stream->ctx ||
788 		    stream->specific_ctx_id == ctx_id ||
789 		    stream->oa_buffer.last_ctx_id == stream->specific_ctx_id ||
790 		    reason & OAREPORT_REASON_CTX_SWITCH) {
791 
792 			/*
793 			 * While filtering for a single context we avoid
794 			 * leaking the IDs of other contexts.
795 			 */
796 			if (stream->perf->exclusive_stream->ctx &&
797 			    stream->specific_ctx_id != ctx_id) {
798 				report32[2] = INVALID_CTX_ID;
799 			}
800 
801 			ret = append_oa_sample(stream, buf, count, offset,
802 					       report);
803 			if (ret)
804 				break;
805 
806 			stream->oa_buffer.last_ctx_id = ctx_id;
807 		}
808 
809 		/*
810 		 * Clear out the first 2 dword as a mean to detect unlanded
811 		 * reports.
812 		 */
813 		report32[0] = 0;
814 		report32[1] = 0;
815 	}
816 
817 	if (start_offset != *offset) {
818 		i915_reg_t oaheadptr;
819 
820 		oaheadptr = IS_GEN(stream->perf->i915, 12) ?
821 			    GEN12_OAG_OAHEADPTR : GEN8_OAHEADPTR;
822 
823 		spin_lock_irqsave(&stream->oa_buffer.ptr_lock, flags);
824 
825 		/*
826 		 * We removed the gtt_offset for the copy loop above, indexing
827 		 * relative to oa_buf_base so put back here...
828 		 */
829 		head += gtt_offset;
830 		intel_uncore_write(uncore, oaheadptr,
831 				   head & GEN12_OAG_OAHEADPTR_MASK);
832 		stream->oa_buffer.head = head;
833 
834 		spin_unlock_irqrestore(&stream->oa_buffer.ptr_lock, flags);
835 	}
836 
837 	return ret;
838 }
839 
840 /**
841  * gen8_oa_read - copy status records then buffered OA reports
842  * @stream: An i915-perf stream opened for OA metrics
843  * @buf: destination buffer given by userspace
844  * @count: the number of bytes userspace wants to read
845  * @offset: (inout): the current position for writing into @buf
846  *
847  * Checks OA unit status registers and if necessary appends corresponding
848  * status records for userspace (such as for a buffer full condition) and then
849  * initiate appending any buffered OA reports.
850  *
851  * Updates @offset according to the number of bytes successfully copied into
852  * the userspace buffer.
853  *
854  * NB: some data may be successfully copied to the userspace buffer
855  * even if an error is returned, and this is reflected in the
856  * updated @offset.
857  *
858  * Returns: zero on success or a negative error code
859  */
860 static int gen8_oa_read(struct i915_perf_stream *stream,
861 			char __user *buf,
862 			size_t count,
863 			size_t *offset)
864 {
865 	struct intel_uncore *uncore = stream->uncore;
866 	u32 oastatus;
867 	i915_reg_t oastatus_reg;
868 	int ret;
869 
870 	if (drm_WARN_ON(&uncore->i915->drm, !stream->oa_buffer.vaddr))
871 		return -EIO;
872 
873 	oastatus_reg = IS_GEN(stream->perf->i915, 12) ?
874 		       GEN12_OAG_OASTATUS : GEN8_OASTATUS;
875 
876 	oastatus = intel_uncore_read(uncore, oastatus_reg);
877 
878 	/*
879 	 * We treat OABUFFER_OVERFLOW as a significant error:
880 	 *
881 	 * Although theoretically we could handle this more gracefully
882 	 * sometimes, some Gens don't correctly suppress certain
883 	 * automatically triggered reports in this condition and so we
884 	 * have to assume that old reports are now being trampled
885 	 * over.
886 	 *
887 	 * Considering how we don't currently give userspace control
888 	 * over the OA buffer size and always configure a large 16MB
889 	 * buffer, then a buffer overflow does anyway likely indicate
890 	 * that something has gone quite badly wrong.
891 	 */
892 	if (oastatus & GEN8_OASTATUS_OABUFFER_OVERFLOW) {
893 		ret = append_oa_status(stream, buf, count, offset,
894 				       DRM_I915_PERF_RECORD_OA_BUFFER_LOST);
895 		if (ret)
896 			return ret;
897 
898 		DRM_DEBUG("OA buffer overflow (exponent = %d): force restart\n",
899 			  stream->period_exponent);
900 
901 		stream->perf->ops.oa_disable(stream);
902 		stream->perf->ops.oa_enable(stream);
903 
904 		/*
905 		 * Note: .oa_enable() is expected to re-init the oabuffer and
906 		 * reset GEN8_OASTATUS for us
907 		 */
908 		oastatus = intel_uncore_read(uncore, oastatus_reg);
909 	}
910 
911 	if (oastatus & GEN8_OASTATUS_REPORT_LOST) {
912 		ret = append_oa_status(stream, buf, count, offset,
913 				       DRM_I915_PERF_RECORD_OA_REPORT_LOST);
914 		if (ret)
915 			return ret;
916 
917 		intel_uncore_rmw(uncore, oastatus_reg,
918 				 GEN8_OASTATUS_COUNTER_OVERFLOW |
919 				 GEN8_OASTATUS_REPORT_LOST,
920 				 IS_GEN_RANGE(uncore->i915, 8, 11) ?
921 				 (GEN8_OASTATUS_HEAD_POINTER_WRAP |
922 				  GEN8_OASTATUS_TAIL_POINTER_WRAP) : 0);
923 	}
924 
925 	return gen8_append_oa_reports(stream, buf, count, offset);
926 }
927 
928 /**
929  * gen7_append_oa_reports - Copies all buffered OA reports into
930  *			    userspace read() buffer.
931  * @stream: An i915-perf stream opened for OA metrics
932  * @buf: destination buffer given by userspace
933  * @count: the number of bytes userspace wants to read
934  * @offset: (inout): the current position for writing into @buf
935  *
936  * Notably any error condition resulting in a short read (-%ENOSPC or
937  * -%EFAULT) will be returned even though one or more records may
938  * have been successfully copied. In this case it's up to the caller
939  * to decide if the error should be squashed before returning to
940  * userspace.
941  *
942  * Note: reports are consumed from the head, and appended to the
943  * tail, so the tail chases the head?... If you think that's mad
944  * and back-to-front you're not alone, but this follows the
945  * Gen PRM naming convention.
946  *
947  * Returns: 0 on success, negative error code on failure.
948  */
949 static int gen7_append_oa_reports(struct i915_perf_stream *stream,
950 				  char __user *buf,
951 				  size_t count,
952 				  size_t *offset)
953 {
954 	struct intel_uncore *uncore = stream->uncore;
955 	int report_size = stream->oa_buffer.format_size;
956 	u8 *oa_buf_base = stream->oa_buffer.vaddr;
957 	u32 gtt_offset = i915_ggtt_offset(stream->oa_buffer.vma);
958 	u32 mask = (OA_BUFFER_SIZE - 1);
959 	size_t start_offset = *offset;
960 	unsigned long flags;
961 	u32 head, tail;
962 	u32 taken;
963 	int ret = 0;
964 
965 	if (drm_WARN_ON(&uncore->i915->drm, !stream->enabled))
966 		return -EIO;
967 
968 	spin_lock_irqsave(&stream->oa_buffer.ptr_lock, flags);
969 
970 	head = stream->oa_buffer.head;
971 	tail = stream->oa_buffer.tail;
972 
973 	spin_unlock_irqrestore(&stream->oa_buffer.ptr_lock, flags);
974 
975 	/* NB: oa_buffer.head/tail include the gtt_offset which we don't want
976 	 * while indexing relative to oa_buf_base.
977 	 */
978 	head -= gtt_offset;
979 	tail -= gtt_offset;
980 
981 	/* An out of bounds or misaligned head or tail pointer implies a driver
982 	 * bug since we validate + align the tail pointers we read from the
983 	 * hardware and we are in full control of the head pointer which should
984 	 * only be incremented by multiples of the report size (notably also
985 	 * all a power of two).
986 	 */
987 	if (drm_WARN_ONCE(&uncore->i915->drm,
988 			  head > OA_BUFFER_SIZE || head % report_size ||
989 			  tail > OA_BUFFER_SIZE || tail % report_size,
990 			  "Inconsistent OA buffer pointers: head = %u, tail = %u\n",
991 			  head, tail))
992 		return -EIO;
993 
994 
995 	for (/* none */;
996 	     (taken = OA_TAKEN(tail, head));
997 	     head = (head + report_size) & mask) {
998 		u8 *report = oa_buf_base + head;
999 		u32 *report32 = (void *)report;
1000 
1001 		/* All the report sizes factor neatly into the buffer
1002 		 * size so we never expect to see a report split
1003 		 * between the beginning and end of the buffer.
1004 		 *
1005 		 * Given the initial alignment check a misalignment
1006 		 * here would imply a driver bug that would result
1007 		 * in an overrun.
1008 		 */
1009 		if (drm_WARN_ON(&uncore->i915->drm,
1010 				(OA_BUFFER_SIZE - head) < report_size)) {
1011 			drm_err(&uncore->i915->drm,
1012 				"Spurious OA head ptr: non-integral report offset\n");
1013 			break;
1014 		}
1015 
1016 		/* The report-ID field for periodic samples includes
1017 		 * some undocumented flags related to what triggered
1018 		 * the report and is never expected to be zero so we
1019 		 * can check that the report isn't invalid before
1020 		 * copying it to userspace...
1021 		 */
1022 		if (report32[0] == 0) {
1023 			if (__ratelimit(&stream->perf->spurious_report_rs))
1024 				DRM_NOTE("Skipping spurious, invalid OA report\n");
1025 			continue;
1026 		}
1027 
1028 		ret = append_oa_sample(stream, buf, count, offset, report);
1029 		if (ret)
1030 			break;
1031 
1032 		/* Clear out the first 2 dwords as a mean to detect unlanded
1033 		 * reports.
1034 		 */
1035 		report32[0] = 0;
1036 		report32[1] = 0;
1037 	}
1038 
1039 	if (start_offset != *offset) {
1040 		spin_lock_irqsave(&stream->oa_buffer.ptr_lock, flags);
1041 
1042 		/* We removed the gtt_offset for the copy loop above, indexing
1043 		 * relative to oa_buf_base so put back here...
1044 		 */
1045 		head += gtt_offset;
1046 
1047 		intel_uncore_write(uncore, GEN7_OASTATUS2,
1048 				   (head & GEN7_OASTATUS2_HEAD_MASK) |
1049 				   GEN7_OASTATUS2_MEM_SELECT_GGTT);
1050 		stream->oa_buffer.head = head;
1051 
1052 		spin_unlock_irqrestore(&stream->oa_buffer.ptr_lock, flags);
1053 	}
1054 
1055 	return ret;
1056 }
1057 
1058 /**
1059  * gen7_oa_read - copy status records then buffered OA reports
1060  * @stream: An i915-perf stream opened for OA metrics
1061  * @buf: destination buffer given by userspace
1062  * @count: the number of bytes userspace wants to read
1063  * @offset: (inout): the current position for writing into @buf
1064  *
1065  * Checks Gen 7 specific OA unit status registers and if necessary appends
1066  * corresponding status records for userspace (such as for a buffer full
1067  * condition) and then initiate appending any buffered OA reports.
1068  *
1069  * Updates @offset according to the number of bytes successfully copied into
1070  * the userspace buffer.
1071  *
1072  * Returns: zero on success or a negative error code
1073  */
1074 static int gen7_oa_read(struct i915_perf_stream *stream,
1075 			char __user *buf,
1076 			size_t count,
1077 			size_t *offset)
1078 {
1079 	struct intel_uncore *uncore = stream->uncore;
1080 	u32 oastatus1;
1081 	int ret;
1082 
1083 	if (drm_WARN_ON(&uncore->i915->drm, !stream->oa_buffer.vaddr))
1084 		return -EIO;
1085 
1086 	oastatus1 = intel_uncore_read(uncore, GEN7_OASTATUS1);
1087 
1088 	/* XXX: On Haswell we don't have a safe way to clear oastatus1
1089 	 * bits while the OA unit is enabled (while the tail pointer
1090 	 * may be updated asynchronously) so we ignore status bits
1091 	 * that have already been reported to userspace.
1092 	 */
1093 	oastatus1 &= ~stream->perf->gen7_latched_oastatus1;
1094 
1095 	/* We treat OABUFFER_OVERFLOW as a significant error:
1096 	 *
1097 	 * - The status can be interpreted to mean that the buffer is
1098 	 *   currently full (with a higher precedence than OA_TAKEN()
1099 	 *   which will start to report a near-empty buffer after an
1100 	 *   overflow) but it's awkward that we can't clear the status
1101 	 *   on Haswell, so without a reset we won't be able to catch
1102 	 *   the state again.
1103 	 *
1104 	 * - Since it also implies the HW has started overwriting old
1105 	 *   reports it may also affect our sanity checks for invalid
1106 	 *   reports when copying to userspace that assume new reports
1107 	 *   are being written to cleared memory.
1108 	 *
1109 	 * - In the future we may want to introduce a flight recorder
1110 	 *   mode where the driver will automatically maintain a safe
1111 	 *   guard band between head/tail, avoiding this overflow
1112 	 *   condition, but we avoid the added driver complexity for
1113 	 *   now.
1114 	 */
1115 	if (unlikely(oastatus1 & GEN7_OASTATUS1_OABUFFER_OVERFLOW)) {
1116 		ret = append_oa_status(stream, buf, count, offset,
1117 				       DRM_I915_PERF_RECORD_OA_BUFFER_LOST);
1118 		if (ret)
1119 			return ret;
1120 
1121 		DRM_DEBUG("OA buffer overflow (exponent = %d): force restart\n",
1122 			  stream->period_exponent);
1123 
1124 		stream->perf->ops.oa_disable(stream);
1125 		stream->perf->ops.oa_enable(stream);
1126 
1127 		oastatus1 = intel_uncore_read(uncore, GEN7_OASTATUS1);
1128 	}
1129 
1130 	if (unlikely(oastatus1 & GEN7_OASTATUS1_REPORT_LOST)) {
1131 		ret = append_oa_status(stream, buf, count, offset,
1132 				       DRM_I915_PERF_RECORD_OA_REPORT_LOST);
1133 		if (ret)
1134 			return ret;
1135 		stream->perf->gen7_latched_oastatus1 |=
1136 			GEN7_OASTATUS1_REPORT_LOST;
1137 	}
1138 
1139 	return gen7_append_oa_reports(stream, buf, count, offset);
1140 }
1141 
1142 /**
1143  * i915_oa_wait_unlocked - handles blocking IO until OA data available
1144  * @stream: An i915-perf stream opened for OA metrics
1145  *
1146  * Called when userspace tries to read() from a blocking stream FD opened
1147  * for OA metrics. It waits until the hrtimer callback finds a non-empty
1148  * OA buffer and wakes us.
1149  *
1150  * Note: it's acceptable to have this return with some false positives
1151  * since any subsequent read handling will return -EAGAIN if there isn't
1152  * really data ready for userspace yet.
1153  *
1154  * Returns: zero on success or a negative error code
1155  */
1156 static int i915_oa_wait_unlocked(struct i915_perf_stream *stream)
1157 {
1158 	/* We would wait indefinitely if periodic sampling is not enabled */
1159 	if (!stream->periodic)
1160 		return -EIO;
1161 
1162 	return wait_event_interruptible(stream->poll_wq,
1163 					oa_buffer_check_unlocked(stream));
1164 }
1165 
1166 /**
1167  * i915_oa_poll_wait - call poll_wait() for an OA stream poll()
1168  * @stream: An i915-perf stream opened for OA metrics
1169  * @file: An i915 perf stream file
1170  * @wait: poll() state table
1171  *
1172  * For handling userspace polling on an i915 perf stream opened for OA metrics,
1173  * this starts a poll_wait with the wait queue that our hrtimer callback wakes
1174  * when it sees data ready to read in the circular OA buffer.
1175  */
1176 static void i915_oa_poll_wait(struct i915_perf_stream *stream,
1177 			      struct file *file,
1178 			      poll_table *wait)
1179 {
1180 	poll_wait(file, &stream->poll_wq, wait);
1181 }
1182 
1183 /**
1184  * i915_oa_read - just calls through to &i915_oa_ops->read
1185  * @stream: An i915-perf stream opened for OA metrics
1186  * @buf: destination buffer given by userspace
1187  * @count: the number of bytes userspace wants to read
1188  * @offset: (inout): the current position for writing into @buf
1189  *
1190  * Updates @offset according to the number of bytes successfully copied into
1191  * the userspace buffer.
1192  *
1193  * Returns: zero on success or a negative error code
1194  */
1195 static int i915_oa_read(struct i915_perf_stream *stream,
1196 			char __user *buf,
1197 			size_t count,
1198 			size_t *offset)
1199 {
1200 	return stream->perf->ops.read(stream, buf, count, offset);
1201 }
1202 
1203 static struct intel_context *oa_pin_context(struct i915_perf_stream *stream)
1204 {
1205 	struct i915_gem_engines_iter it;
1206 	struct i915_gem_context *ctx = stream->ctx;
1207 	struct intel_context *ce;
1208 	struct i915_gem_ww_ctx ww;
1209 	int err = -ENODEV;
1210 
1211 	for_each_gem_engine(ce, i915_gem_context_lock_engines(ctx), it) {
1212 		if (ce->engine != stream->engine) /* first match! */
1213 			continue;
1214 
1215 		err = 0;
1216 		break;
1217 	}
1218 	i915_gem_context_unlock_engines(ctx);
1219 
1220 	if (err)
1221 		return ERR_PTR(err);
1222 
1223 	i915_gem_ww_ctx_init(&ww, true);
1224 retry:
1225 	/*
1226 	 * As the ID is the gtt offset of the context's vma we
1227 	 * pin the vma to ensure the ID remains fixed.
1228 	 */
1229 	err = intel_context_pin_ww(ce, &ww);
1230 	if (err == -EDEADLK) {
1231 		err = i915_gem_ww_ctx_backoff(&ww);
1232 		if (!err)
1233 			goto retry;
1234 	}
1235 	i915_gem_ww_ctx_fini(&ww);
1236 
1237 	if (err)
1238 		return ERR_PTR(err);
1239 
1240 	stream->pinned_ctx = ce;
1241 	return stream->pinned_ctx;
1242 }
1243 
1244 /**
1245  * oa_get_render_ctx_id - determine and hold ctx hw id
1246  * @stream: An i915-perf stream opened for OA metrics
1247  *
1248  * Determine the render context hw id, and ensure it remains fixed for the
1249  * lifetime of the stream. This ensures that we don't have to worry about
1250  * updating the context ID in OACONTROL on the fly.
1251  *
1252  * Returns: zero on success or a negative error code
1253  */
1254 static int oa_get_render_ctx_id(struct i915_perf_stream *stream)
1255 {
1256 	struct intel_context *ce;
1257 
1258 	ce = oa_pin_context(stream);
1259 	if (IS_ERR(ce))
1260 		return PTR_ERR(ce);
1261 
1262 	switch (INTEL_GEN(ce->engine->i915)) {
1263 	case 7: {
1264 		/*
1265 		 * On Haswell we don't do any post processing of the reports
1266 		 * and don't need to use the mask.
1267 		 */
1268 		stream->specific_ctx_id = i915_ggtt_offset(ce->state);
1269 		stream->specific_ctx_id_mask = 0;
1270 		break;
1271 	}
1272 
1273 	case 8:
1274 	case 9:
1275 	case 10:
1276 		if (intel_engine_in_execlists_submission_mode(ce->engine)) {
1277 			stream->specific_ctx_id_mask =
1278 				(1U << GEN8_CTX_ID_WIDTH) - 1;
1279 			stream->specific_ctx_id = stream->specific_ctx_id_mask;
1280 		} else {
1281 			/*
1282 			 * When using GuC, the context descriptor we write in
1283 			 * i915 is read by GuC and rewritten before it's
1284 			 * actually written into the hardware. The LRCA is
1285 			 * what is put into the context id field of the
1286 			 * context descriptor by GuC. Because it's aligned to
1287 			 * a page, the lower 12bits are always at 0 and
1288 			 * dropped by GuC. They won't be part of the context
1289 			 * ID in the OA reports, so squash those lower bits.
1290 			 */
1291 			stream->specific_ctx_id = ce->lrc.lrca >> 12;
1292 
1293 			/*
1294 			 * GuC uses the top bit to signal proxy submission, so
1295 			 * ignore that bit.
1296 			 */
1297 			stream->specific_ctx_id_mask =
1298 				(1U << (GEN8_CTX_ID_WIDTH - 1)) - 1;
1299 		}
1300 		break;
1301 
1302 	case 11:
1303 	case 12: {
1304 		stream->specific_ctx_id_mask =
1305 			((1U << GEN11_SW_CTX_ID_WIDTH) - 1) << (GEN11_SW_CTX_ID_SHIFT - 32);
1306 		/*
1307 		 * Pick an unused context id
1308 		 * 0 - BITS_PER_LONG are used by other contexts
1309 		 * GEN12_MAX_CONTEXT_HW_ID (0x7ff) is used by idle context
1310 		 */
1311 		stream->specific_ctx_id = (GEN12_MAX_CONTEXT_HW_ID - 1) << (GEN11_SW_CTX_ID_SHIFT - 32);
1312 		break;
1313 	}
1314 
1315 	default:
1316 		MISSING_CASE(INTEL_GEN(ce->engine->i915));
1317 	}
1318 
1319 	ce->tag = stream->specific_ctx_id;
1320 
1321 	drm_dbg(&stream->perf->i915->drm,
1322 		"filtering on ctx_id=0x%x ctx_id_mask=0x%x\n",
1323 		stream->specific_ctx_id,
1324 		stream->specific_ctx_id_mask);
1325 
1326 	return 0;
1327 }
1328 
1329 /**
1330  * oa_put_render_ctx_id - counterpart to oa_get_render_ctx_id releases hold
1331  * @stream: An i915-perf stream opened for OA metrics
1332  *
1333  * In case anything needed doing to ensure the context HW ID would remain valid
1334  * for the lifetime of the stream, then that can be undone here.
1335  */
1336 static void oa_put_render_ctx_id(struct i915_perf_stream *stream)
1337 {
1338 	struct intel_context *ce;
1339 
1340 	ce = fetch_and_zero(&stream->pinned_ctx);
1341 	if (ce) {
1342 		ce->tag = 0; /* recomputed on next submission after parking */
1343 		intel_context_unpin(ce);
1344 	}
1345 
1346 	stream->specific_ctx_id = INVALID_CTX_ID;
1347 	stream->specific_ctx_id_mask = 0;
1348 }
1349 
1350 static void
1351 free_oa_buffer(struct i915_perf_stream *stream)
1352 {
1353 	i915_vma_unpin_and_release(&stream->oa_buffer.vma,
1354 				   I915_VMA_RELEASE_MAP);
1355 
1356 	stream->oa_buffer.vaddr = NULL;
1357 }
1358 
1359 static void
1360 free_oa_configs(struct i915_perf_stream *stream)
1361 {
1362 	struct i915_oa_config_bo *oa_bo, *tmp;
1363 
1364 	i915_oa_config_put(stream->oa_config);
1365 	llist_for_each_entry_safe(oa_bo, tmp, stream->oa_config_bos.first, node)
1366 		free_oa_config_bo(oa_bo);
1367 }
1368 
1369 static void
1370 free_noa_wait(struct i915_perf_stream *stream)
1371 {
1372 	i915_vma_unpin_and_release(&stream->noa_wait, 0);
1373 }
1374 
1375 static void i915_oa_stream_destroy(struct i915_perf_stream *stream)
1376 {
1377 	struct i915_perf *perf = stream->perf;
1378 
1379 	BUG_ON(stream != perf->exclusive_stream);
1380 
1381 	/*
1382 	 * Unset exclusive_stream first, it will be checked while disabling
1383 	 * the metric set on gen8+.
1384 	 *
1385 	 * See i915_oa_init_reg_state() and lrc_configure_all_contexts()
1386 	 */
1387 	WRITE_ONCE(perf->exclusive_stream, NULL);
1388 	perf->ops.disable_metric_set(stream);
1389 
1390 	free_oa_buffer(stream);
1391 
1392 	intel_uncore_forcewake_put(stream->uncore, FORCEWAKE_ALL);
1393 	intel_engine_pm_put(stream->engine);
1394 
1395 	if (stream->ctx)
1396 		oa_put_render_ctx_id(stream);
1397 
1398 	free_oa_configs(stream);
1399 	free_noa_wait(stream);
1400 
1401 	if (perf->spurious_report_rs.missed) {
1402 		DRM_NOTE("%d spurious OA report notices suppressed due to ratelimiting\n",
1403 			 perf->spurious_report_rs.missed);
1404 	}
1405 }
1406 
1407 static void gen7_init_oa_buffer(struct i915_perf_stream *stream)
1408 {
1409 	struct intel_uncore *uncore = stream->uncore;
1410 	u32 gtt_offset = i915_ggtt_offset(stream->oa_buffer.vma);
1411 	unsigned long flags;
1412 
1413 	spin_lock_irqsave(&stream->oa_buffer.ptr_lock, flags);
1414 
1415 	/* Pre-DevBDW: OABUFFER must be set with counters off,
1416 	 * before OASTATUS1, but after OASTATUS2
1417 	 */
1418 	intel_uncore_write(uncore, GEN7_OASTATUS2, /* head */
1419 			   gtt_offset | GEN7_OASTATUS2_MEM_SELECT_GGTT);
1420 	stream->oa_buffer.head = gtt_offset;
1421 
1422 	intel_uncore_write(uncore, GEN7_OABUFFER, gtt_offset);
1423 
1424 	intel_uncore_write(uncore, GEN7_OASTATUS1, /* tail */
1425 			   gtt_offset | OABUFFER_SIZE_16M);
1426 
1427 	/* Mark that we need updated tail pointers to read from... */
1428 	stream->oa_buffer.aging_tail = INVALID_TAIL_PTR;
1429 	stream->oa_buffer.tail = gtt_offset;
1430 
1431 	spin_unlock_irqrestore(&stream->oa_buffer.ptr_lock, flags);
1432 
1433 	/* On Haswell we have to track which OASTATUS1 flags we've
1434 	 * already seen since they can't be cleared while periodic
1435 	 * sampling is enabled.
1436 	 */
1437 	stream->perf->gen7_latched_oastatus1 = 0;
1438 
1439 	/* NB: although the OA buffer will initially be allocated
1440 	 * zeroed via shmfs (and so this memset is redundant when
1441 	 * first allocating), we may re-init the OA buffer, either
1442 	 * when re-enabling a stream or in error/reset paths.
1443 	 *
1444 	 * The reason we clear the buffer for each re-init is for the
1445 	 * sanity check in gen7_append_oa_reports() that looks at the
1446 	 * report-id field to make sure it's non-zero which relies on
1447 	 * the assumption that new reports are being written to zeroed
1448 	 * memory...
1449 	 */
1450 	memset(stream->oa_buffer.vaddr, 0, OA_BUFFER_SIZE);
1451 }
1452 
1453 static void gen8_init_oa_buffer(struct i915_perf_stream *stream)
1454 {
1455 	struct intel_uncore *uncore = stream->uncore;
1456 	u32 gtt_offset = i915_ggtt_offset(stream->oa_buffer.vma);
1457 	unsigned long flags;
1458 
1459 	spin_lock_irqsave(&stream->oa_buffer.ptr_lock, flags);
1460 
1461 	intel_uncore_write(uncore, GEN8_OASTATUS, 0);
1462 	intel_uncore_write(uncore, GEN8_OAHEADPTR, gtt_offset);
1463 	stream->oa_buffer.head = gtt_offset;
1464 
1465 	intel_uncore_write(uncore, GEN8_OABUFFER_UDW, 0);
1466 
1467 	/*
1468 	 * PRM says:
1469 	 *
1470 	 *  "This MMIO must be set before the OATAILPTR
1471 	 *  register and after the OAHEADPTR register. This is
1472 	 *  to enable proper functionality of the overflow
1473 	 *  bit."
1474 	 */
1475 	intel_uncore_write(uncore, GEN8_OABUFFER, gtt_offset |
1476 		   OABUFFER_SIZE_16M | GEN8_OABUFFER_MEM_SELECT_GGTT);
1477 	intel_uncore_write(uncore, GEN8_OATAILPTR, gtt_offset & GEN8_OATAILPTR_MASK);
1478 
1479 	/* Mark that we need updated tail pointers to read from... */
1480 	stream->oa_buffer.aging_tail = INVALID_TAIL_PTR;
1481 	stream->oa_buffer.tail = gtt_offset;
1482 
1483 	/*
1484 	 * Reset state used to recognise context switches, affecting which
1485 	 * reports we will forward to userspace while filtering for a single
1486 	 * context.
1487 	 */
1488 	stream->oa_buffer.last_ctx_id = INVALID_CTX_ID;
1489 
1490 	spin_unlock_irqrestore(&stream->oa_buffer.ptr_lock, flags);
1491 
1492 	/*
1493 	 * NB: although the OA buffer will initially be allocated
1494 	 * zeroed via shmfs (and so this memset is redundant when
1495 	 * first allocating), we may re-init the OA buffer, either
1496 	 * when re-enabling a stream or in error/reset paths.
1497 	 *
1498 	 * The reason we clear the buffer for each re-init is for the
1499 	 * sanity check in gen8_append_oa_reports() that looks at the
1500 	 * reason field to make sure it's non-zero which relies on
1501 	 * the assumption that new reports are being written to zeroed
1502 	 * memory...
1503 	 */
1504 	memset(stream->oa_buffer.vaddr, 0, OA_BUFFER_SIZE);
1505 }
1506 
1507 static void gen12_init_oa_buffer(struct i915_perf_stream *stream)
1508 {
1509 	struct intel_uncore *uncore = stream->uncore;
1510 	u32 gtt_offset = i915_ggtt_offset(stream->oa_buffer.vma);
1511 	unsigned long flags;
1512 
1513 	spin_lock_irqsave(&stream->oa_buffer.ptr_lock, flags);
1514 
1515 	intel_uncore_write(uncore, GEN12_OAG_OASTATUS, 0);
1516 	intel_uncore_write(uncore, GEN12_OAG_OAHEADPTR,
1517 			   gtt_offset & GEN12_OAG_OAHEADPTR_MASK);
1518 	stream->oa_buffer.head = gtt_offset;
1519 
1520 	/*
1521 	 * PRM says:
1522 	 *
1523 	 *  "This MMIO must be set before the OATAILPTR
1524 	 *  register and after the OAHEADPTR register. This is
1525 	 *  to enable proper functionality of the overflow
1526 	 *  bit."
1527 	 */
1528 	intel_uncore_write(uncore, GEN12_OAG_OABUFFER, gtt_offset |
1529 			   OABUFFER_SIZE_16M | GEN8_OABUFFER_MEM_SELECT_GGTT);
1530 	intel_uncore_write(uncore, GEN12_OAG_OATAILPTR,
1531 			   gtt_offset & GEN12_OAG_OATAILPTR_MASK);
1532 
1533 	/* Mark that we need updated tail pointers to read from... */
1534 	stream->oa_buffer.aging_tail = INVALID_TAIL_PTR;
1535 	stream->oa_buffer.tail = gtt_offset;
1536 
1537 	/*
1538 	 * Reset state used to recognise context switches, affecting which
1539 	 * reports we will forward to userspace while filtering for a single
1540 	 * context.
1541 	 */
1542 	stream->oa_buffer.last_ctx_id = INVALID_CTX_ID;
1543 
1544 	spin_unlock_irqrestore(&stream->oa_buffer.ptr_lock, flags);
1545 
1546 	/*
1547 	 * NB: although the OA buffer will initially be allocated
1548 	 * zeroed via shmfs (and so this memset is redundant when
1549 	 * first allocating), we may re-init the OA buffer, either
1550 	 * when re-enabling a stream or in error/reset paths.
1551 	 *
1552 	 * The reason we clear the buffer for each re-init is for the
1553 	 * sanity check in gen8_append_oa_reports() that looks at the
1554 	 * reason field to make sure it's non-zero which relies on
1555 	 * the assumption that new reports are being written to zeroed
1556 	 * memory...
1557 	 */
1558 	memset(stream->oa_buffer.vaddr, 0,
1559 	       stream->oa_buffer.vma->size);
1560 }
1561 
1562 static int alloc_oa_buffer(struct i915_perf_stream *stream)
1563 {
1564 	struct drm_i915_private *i915 = stream->perf->i915;
1565 	struct drm_i915_gem_object *bo;
1566 	struct i915_vma *vma;
1567 	int ret;
1568 
1569 	if (drm_WARN_ON(&i915->drm, stream->oa_buffer.vma))
1570 		return -ENODEV;
1571 
1572 	BUILD_BUG_ON_NOT_POWER_OF_2(OA_BUFFER_SIZE);
1573 	BUILD_BUG_ON(OA_BUFFER_SIZE < SZ_128K || OA_BUFFER_SIZE > SZ_16M);
1574 
1575 	bo = i915_gem_object_create_shmem(stream->perf->i915, OA_BUFFER_SIZE);
1576 	if (IS_ERR(bo)) {
1577 		drm_err(&i915->drm, "Failed to allocate OA buffer\n");
1578 		return PTR_ERR(bo);
1579 	}
1580 
1581 	i915_gem_object_set_cache_coherency(bo, I915_CACHE_LLC);
1582 
1583 	/* PreHSW required 512K alignment, HSW requires 16M */
1584 	vma = i915_gem_object_ggtt_pin(bo, NULL, 0, SZ_16M, 0);
1585 	if (IS_ERR(vma)) {
1586 		ret = PTR_ERR(vma);
1587 		goto err_unref;
1588 	}
1589 	stream->oa_buffer.vma = vma;
1590 
1591 	stream->oa_buffer.vaddr =
1592 		i915_gem_object_pin_map(bo, I915_MAP_WB);
1593 	if (IS_ERR(stream->oa_buffer.vaddr)) {
1594 		ret = PTR_ERR(stream->oa_buffer.vaddr);
1595 		goto err_unpin;
1596 	}
1597 
1598 	return 0;
1599 
1600 err_unpin:
1601 	__i915_vma_unpin(vma);
1602 
1603 err_unref:
1604 	i915_gem_object_put(bo);
1605 
1606 	stream->oa_buffer.vaddr = NULL;
1607 	stream->oa_buffer.vma = NULL;
1608 
1609 	return ret;
1610 }
1611 
1612 static u32 *save_restore_register(struct i915_perf_stream *stream, u32 *cs,
1613 				  bool save, i915_reg_t reg, u32 offset,
1614 				  u32 dword_count)
1615 {
1616 	u32 cmd;
1617 	u32 d;
1618 
1619 	cmd = save ? MI_STORE_REGISTER_MEM : MI_LOAD_REGISTER_MEM;
1620 	cmd |= MI_SRM_LRM_GLOBAL_GTT;
1621 	if (INTEL_GEN(stream->perf->i915) >= 8)
1622 		cmd++;
1623 
1624 	for (d = 0; d < dword_count; d++) {
1625 		*cs++ = cmd;
1626 		*cs++ = i915_mmio_reg_offset(reg) + 4 * d;
1627 		*cs++ = intel_gt_scratch_offset(stream->engine->gt,
1628 						offset) + 4 * d;
1629 		*cs++ = 0;
1630 	}
1631 
1632 	return cs;
1633 }
1634 
1635 static int alloc_noa_wait(struct i915_perf_stream *stream)
1636 {
1637 	struct drm_i915_private *i915 = stream->perf->i915;
1638 	struct drm_i915_gem_object *bo;
1639 	struct i915_vma *vma;
1640 	const u64 delay_ticks = 0xffffffffffffffff -
1641 		intel_gt_ns_to_clock_interval(stream->perf->i915->ggtt.vm.gt,
1642 					      atomic64_read(&stream->perf->noa_programming_delay));
1643 	const u32 base = stream->engine->mmio_base;
1644 #define CS_GPR(x) GEN8_RING_CS_GPR(base, x)
1645 	u32 *batch, *ts0, *cs, *jump;
1646 	int ret, i;
1647 	enum {
1648 		START_TS,
1649 		NOW_TS,
1650 		DELTA_TS,
1651 		JUMP_PREDICATE,
1652 		DELTA_TARGET,
1653 		N_CS_GPR
1654 	};
1655 
1656 	bo = i915_gem_object_create_internal(i915, 4096);
1657 	if (IS_ERR(bo)) {
1658 		drm_err(&i915->drm,
1659 			"Failed to allocate NOA wait batchbuffer\n");
1660 		return PTR_ERR(bo);
1661 	}
1662 
1663 	/*
1664 	 * We pin in GGTT because we jump into this buffer now because
1665 	 * multiple OA config BOs will have a jump to this address and it
1666 	 * needs to be fixed during the lifetime of the i915/perf stream.
1667 	 */
1668 	vma = i915_gem_object_ggtt_pin(bo, NULL, 0, 0, PIN_HIGH);
1669 	if (IS_ERR(vma)) {
1670 		ret = PTR_ERR(vma);
1671 		goto err_unref;
1672 	}
1673 
1674 	batch = cs = i915_gem_object_pin_map(bo, I915_MAP_WB);
1675 	if (IS_ERR(batch)) {
1676 		ret = PTR_ERR(batch);
1677 		goto err_unpin;
1678 	}
1679 
1680 	/* Save registers. */
1681 	for (i = 0; i < N_CS_GPR; i++)
1682 		cs = save_restore_register(
1683 			stream, cs, true /* save */, CS_GPR(i),
1684 			INTEL_GT_SCRATCH_FIELD_PERF_CS_GPR + 8 * i, 2);
1685 	cs = save_restore_register(
1686 		stream, cs, true /* save */, MI_PREDICATE_RESULT_1,
1687 		INTEL_GT_SCRATCH_FIELD_PERF_PREDICATE_RESULT_1, 1);
1688 
1689 	/* First timestamp snapshot location. */
1690 	ts0 = cs;
1691 
1692 	/*
1693 	 * Initial snapshot of the timestamp register to implement the wait.
1694 	 * We work with 32b values, so clear out the top 32b bits of the
1695 	 * register because the ALU works 64bits.
1696 	 */
1697 	*cs++ = MI_LOAD_REGISTER_IMM(1);
1698 	*cs++ = i915_mmio_reg_offset(CS_GPR(START_TS)) + 4;
1699 	*cs++ = 0;
1700 	*cs++ = MI_LOAD_REGISTER_REG | (3 - 2);
1701 	*cs++ = i915_mmio_reg_offset(RING_TIMESTAMP(base));
1702 	*cs++ = i915_mmio_reg_offset(CS_GPR(START_TS));
1703 
1704 	/*
1705 	 * This is the location we're going to jump back into until the
1706 	 * required amount of time has passed.
1707 	 */
1708 	jump = cs;
1709 
1710 	/*
1711 	 * Take another snapshot of the timestamp register. Take care to clear
1712 	 * up the top 32bits of CS_GPR(1) as we're using it for other
1713 	 * operations below.
1714 	 */
1715 	*cs++ = MI_LOAD_REGISTER_IMM(1);
1716 	*cs++ = i915_mmio_reg_offset(CS_GPR(NOW_TS)) + 4;
1717 	*cs++ = 0;
1718 	*cs++ = MI_LOAD_REGISTER_REG | (3 - 2);
1719 	*cs++ = i915_mmio_reg_offset(RING_TIMESTAMP(base));
1720 	*cs++ = i915_mmio_reg_offset(CS_GPR(NOW_TS));
1721 
1722 	/*
1723 	 * Do a diff between the 2 timestamps and store the result back into
1724 	 * CS_GPR(1).
1725 	 */
1726 	*cs++ = MI_MATH(5);
1727 	*cs++ = MI_MATH_LOAD(MI_MATH_REG_SRCA, MI_MATH_REG(NOW_TS));
1728 	*cs++ = MI_MATH_LOAD(MI_MATH_REG_SRCB, MI_MATH_REG(START_TS));
1729 	*cs++ = MI_MATH_SUB;
1730 	*cs++ = MI_MATH_STORE(MI_MATH_REG(DELTA_TS), MI_MATH_REG_ACCU);
1731 	*cs++ = MI_MATH_STORE(MI_MATH_REG(JUMP_PREDICATE), MI_MATH_REG_CF);
1732 
1733 	/*
1734 	 * Transfer the carry flag (set to 1 if ts1 < ts0, meaning the
1735 	 * timestamp have rolled over the 32bits) into the predicate register
1736 	 * to be used for the predicated jump.
1737 	 */
1738 	*cs++ = MI_LOAD_REGISTER_REG | (3 - 2);
1739 	*cs++ = i915_mmio_reg_offset(CS_GPR(JUMP_PREDICATE));
1740 	*cs++ = i915_mmio_reg_offset(MI_PREDICATE_RESULT_1);
1741 
1742 	/* Restart from the beginning if we had timestamps roll over. */
1743 	*cs++ = (INTEL_GEN(i915) < 8 ?
1744 		 MI_BATCH_BUFFER_START :
1745 		 MI_BATCH_BUFFER_START_GEN8) |
1746 		MI_BATCH_PREDICATE;
1747 	*cs++ = i915_ggtt_offset(vma) + (ts0 - batch) * 4;
1748 	*cs++ = 0;
1749 
1750 	/*
1751 	 * Now add the diff between to previous timestamps and add it to :
1752 	 *      (((1 * << 64) - 1) - delay_ns)
1753 	 *
1754 	 * When the Carry Flag contains 1 this means the elapsed time is
1755 	 * longer than the expected delay, and we can exit the wait loop.
1756 	 */
1757 	*cs++ = MI_LOAD_REGISTER_IMM(2);
1758 	*cs++ = i915_mmio_reg_offset(CS_GPR(DELTA_TARGET));
1759 	*cs++ = lower_32_bits(delay_ticks);
1760 	*cs++ = i915_mmio_reg_offset(CS_GPR(DELTA_TARGET)) + 4;
1761 	*cs++ = upper_32_bits(delay_ticks);
1762 
1763 	*cs++ = MI_MATH(4);
1764 	*cs++ = MI_MATH_LOAD(MI_MATH_REG_SRCA, MI_MATH_REG(DELTA_TS));
1765 	*cs++ = MI_MATH_LOAD(MI_MATH_REG_SRCB, MI_MATH_REG(DELTA_TARGET));
1766 	*cs++ = MI_MATH_ADD;
1767 	*cs++ = MI_MATH_STOREINV(MI_MATH_REG(JUMP_PREDICATE), MI_MATH_REG_CF);
1768 
1769 	*cs++ = MI_ARB_CHECK;
1770 
1771 	/*
1772 	 * Transfer the result into the predicate register to be used for the
1773 	 * predicated jump.
1774 	 */
1775 	*cs++ = MI_LOAD_REGISTER_REG | (3 - 2);
1776 	*cs++ = i915_mmio_reg_offset(CS_GPR(JUMP_PREDICATE));
1777 	*cs++ = i915_mmio_reg_offset(MI_PREDICATE_RESULT_1);
1778 
1779 	/* Predicate the jump.  */
1780 	*cs++ = (INTEL_GEN(i915) < 8 ?
1781 		 MI_BATCH_BUFFER_START :
1782 		 MI_BATCH_BUFFER_START_GEN8) |
1783 		MI_BATCH_PREDICATE;
1784 	*cs++ = i915_ggtt_offset(vma) + (jump - batch) * 4;
1785 	*cs++ = 0;
1786 
1787 	/* Restore registers. */
1788 	for (i = 0; i < N_CS_GPR; i++)
1789 		cs = save_restore_register(
1790 			stream, cs, false /* restore */, CS_GPR(i),
1791 			INTEL_GT_SCRATCH_FIELD_PERF_CS_GPR + 8 * i, 2);
1792 	cs = save_restore_register(
1793 		stream, cs, false /* restore */, MI_PREDICATE_RESULT_1,
1794 		INTEL_GT_SCRATCH_FIELD_PERF_PREDICATE_RESULT_1, 1);
1795 
1796 	/* And return to the ring. */
1797 	*cs++ = MI_BATCH_BUFFER_END;
1798 
1799 	GEM_BUG_ON(cs - batch > PAGE_SIZE / sizeof(*batch));
1800 
1801 	i915_gem_object_flush_map(bo);
1802 	__i915_gem_object_release_map(bo);
1803 
1804 	stream->noa_wait = vma;
1805 	return 0;
1806 
1807 err_unpin:
1808 	i915_vma_unpin_and_release(&vma, 0);
1809 err_unref:
1810 	i915_gem_object_put(bo);
1811 	return ret;
1812 }
1813 
1814 static u32 *write_cs_mi_lri(u32 *cs,
1815 			    const struct i915_oa_reg *reg_data,
1816 			    u32 n_regs)
1817 {
1818 	u32 i;
1819 
1820 	for (i = 0; i < n_regs; i++) {
1821 		if ((i % MI_LOAD_REGISTER_IMM_MAX_REGS) == 0) {
1822 			u32 n_lri = min_t(u32,
1823 					  n_regs - i,
1824 					  MI_LOAD_REGISTER_IMM_MAX_REGS);
1825 
1826 			*cs++ = MI_LOAD_REGISTER_IMM(n_lri);
1827 		}
1828 		*cs++ = i915_mmio_reg_offset(reg_data[i].addr);
1829 		*cs++ = reg_data[i].value;
1830 	}
1831 
1832 	return cs;
1833 }
1834 
1835 static int num_lri_dwords(int num_regs)
1836 {
1837 	int count = 0;
1838 
1839 	if (num_regs > 0) {
1840 		count += DIV_ROUND_UP(num_regs, MI_LOAD_REGISTER_IMM_MAX_REGS);
1841 		count += num_regs * 2;
1842 	}
1843 
1844 	return count;
1845 }
1846 
1847 static struct i915_oa_config_bo *
1848 alloc_oa_config_buffer(struct i915_perf_stream *stream,
1849 		       struct i915_oa_config *oa_config)
1850 {
1851 	struct drm_i915_gem_object *obj;
1852 	struct i915_oa_config_bo *oa_bo;
1853 	size_t config_length = 0;
1854 	u32 *cs;
1855 	int err;
1856 
1857 	oa_bo = kzalloc(sizeof(*oa_bo), GFP_KERNEL);
1858 	if (!oa_bo)
1859 		return ERR_PTR(-ENOMEM);
1860 
1861 	config_length += num_lri_dwords(oa_config->mux_regs_len);
1862 	config_length += num_lri_dwords(oa_config->b_counter_regs_len);
1863 	config_length += num_lri_dwords(oa_config->flex_regs_len);
1864 	config_length += 3; /* MI_BATCH_BUFFER_START */
1865 	config_length = ALIGN(sizeof(u32) * config_length, I915_GTT_PAGE_SIZE);
1866 
1867 	obj = i915_gem_object_create_shmem(stream->perf->i915, config_length);
1868 	if (IS_ERR(obj)) {
1869 		err = PTR_ERR(obj);
1870 		goto err_free;
1871 	}
1872 
1873 	cs = i915_gem_object_pin_map(obj, I915_MAP_WB);
1874 	if (IS_ERR(cs)) {
1875 		err = PTR_ERR(cs);
1876 		goto err_oa_bo;
1877 	}
1878 
1879 	cs = write_cs_mi_lri(cs,
1880 			     oa_config->mux_regs,
1881 			     oa_config->mux_regs_len);
1882 	cs = write_cs_mi_lri(cs,
1883 			     oa_config->b_counter_regs,
1884 			     oa_config->b_counter_regs_len);
1885 	cs = write_cs_mi_lri(cs,
1886 			     oa_config->flex_regs,
1887 			     oa_config->flex_regs_len);
1888 
1889 	/* Jump into the active wait. */
1890 	*cs++ = (INTEL_GEN(stream->perf->i915) < 8 ?
1891 		 MI_BATCH_BUFFER_START :
1892 		 MI_BATCH_BUFFER_START_GEN8);
1893 	*cs++ = i915_ggtt_offset(stream->noa_wait);
1894 	*cs++ = 0;
1895 
1896 	i915_gem_object_flush_map(obj);
1897 	__i915_gem_object_release_map(obj);
1898 
1899 	oa_bo->vma = i915_vma_instance(obj,
1900 				       &stream->engine->gt->ggtt->vm,
1901 				       NULL);
1902 	if (IS_ERR(oa_bo->vma)) {
1903 		err = PTR_ERR(oa_bo->vma);
1904 		goto err_oa_bo;
1905 	}
1906 
1907 	oa_bo->oa_config = i915_oa_config_get(oa_config);
1908 	llist_add(&oa_bo->node, &stream->oa_config_bos);
1909 
1910 	return oa_bo;
1911 
1912 err_oa_bo:
1913 	i915_gem_object_put(obj);
1914 err_free:
1915 	kfree(oa_bo);
1916 	return ERR_PTR(err);
1917 }
1918 
1919 static struct i915_vma *
1920 get_oa_vma(struct i915_perf_stream *stream, struct i915_oa_config *oa_config)
1921 {
1922 	struct i915_oa_config_bo *oa_bo;
1923 
1924 	/*
1925 	 * Look for the buffer in the already allocated BOs attached
1926 	 * to the stream.
1927 	 */
1928 	llist_for_each_entry(oa_bo, stream->oa_config_bos.first, node) {
1929 		if (oa_bo->oa_config == oa_config &&
1930 		    memcmp(oa_bo->oa_config->uuid,
1931 			   oa_config->uuid,
1932 			   sizeof(oa_config->uuid)) == 0)
1933 			goto out;
1934 	}
1935 
1936 	oa_bo = alloc_oa_config_buffer(stream, oa_config);
1937 	if (IS_ERR(oa_bo))
1938 		return ERR_CAST(oa_bo);
1939 
1940 out:
1941 	return i915_vma_get(oa_bo->vma);
1942 }
1943 
1944 static int
1945 emit_oa_config(struct i915_perf_stream *stream,
1946 	       struct i915_oa_config *oa_config,
1947 	       struct intel_context *ce,
1948 	       struct i915_active *active)
1949 {
1950 	struct i915_request *rq;
1951 	struct i915_vma *vma;
1952 	struct i915_gem_ww_ctx ww;
1953 	int err;
1954 
1955 	vma = get_oa_vma(stream, oa_config);
1956 	if (IS_ERR(vma))
1957 		return PTR_ERR(vma);
1958 
1959 	i915_gem_ww_ctx_init(&ww, true);
1960 retry:
1961 	err = i915_gem_object_lock(vma->obj, &ww);
1962 	if (err)
1963 		goto err;
1964 
1965 	err = i915_vma_pin_ww(vma, &ww, 0, 0, PIN_GLOBAL | PIN_HIGH);
1966 	if (err)
1967 		goto err;
1968 
1969 	intel_engine_pm_get(ce->engine);
1970 	rq = i915_request_create(ce);
1971 	intel_engine_pm_put(ce->engine);
1972 	if (IS_ERR(rq)) {
1973 		err = PTR_ERR(rq);
1974 		goto err_vma_unpin;
1975 	}
1976 
1977 	if (!IS_ERR_OR_NULL(active)) {
1978 		/* After all individual context modifications */
1979 		err = i915_request_await_active(rq, active,
1980 						I915_ACTIVE_AWAIT_ACTIVE);
1981 		if (err)
1982 			goto err_add_request;
1983 
1984 		err = i915_active_add_request(active, rq);
1985 		if (err)
1986 			goto err_add_request;
1987 	}
1988 
1989 	err = i915_request_await_object(rq, vma->obj, 0);
1990 	if (!err)
1991 		err = i915_vma_move_to_active(vma, rq, 0);
1992 	if (err)
1993 		goto err_add_request;
1994 
1995 	err = rq->engine->emit_bb_start(rq,
1996 					vma->node.start, 0,
1997 					I915_DISPATCH_SECURE);
1998 	if (err)
1999 		goto err_add_request;
2000 
2001 err_add_request:
2002 	i915_request_add(rq);
2003 err_vma_unpin:
2004 	i915_vma_unpin(vma);
2005 err:
2006 	if (err == -EDEADLK) {
2007 		err = i915_gem_ww_ctx_backoff(&ww);
2008 		if (!err)
2009 			goto retry;
2010 	}
2011 
2012 	i915_gem_ww_ctx_fini(&ww);
2013 	i915_vma_put(vma);
2014 	return err;
2015 }
2016 
2017 static struct intel_context *oa_context(struct i915_perf_stream *stream)
2018 {
2019 	return stream->pinned_ctx ?: stream->engine->kernel_context;
2020 }
2021 
2022 static int
2023 hsw_enable_metric_set(struct i915_perf_stream *stream,
2024 		      struct i915_active *active)
2025 {
2026 	struct intel_uncore *uncore = stream->uncore;
2027 
2028 	/*
2029 	 * PRM:
2030 	 *
2031 	 * OA unit is using “crclk” for its functionality. When trunk
2032 	 * level clock gating takes place, OA clock would be gated,
2033 	 * unable to count the events from non-render clock domain.
2034 	 * Render clock gating must be disabled when OA is enabled to
2035 	 * count the events from non-render domain. Unit level clock
2036 	 * gating for RCS should also be disabled.
2037 	 */
2038 	intel_uncore_rmw(uncore, GEN7_MISCCPCTL,
2039 			 GEN7_DOP_CLOCK_GATE_ENABLE, 0);
2040 	intel_uncore_rmw(uncore, GEN6_UCGCTL1,
2041 			 0, GEN6_CSUNIT_CLOCK_GATE_DISABLE);
2042 
2043 	return emit_oa_config(stream,
2044 			      stream->oa_config, oa_context(stream),
2045 			      active);
2046 }
2047 
2048 static void hsw_disable_metric_set(struct i915_perf_stream *stream)
2049 {
2050 	struct intel_uncore *uncore = stream->uncore;
2051 
2052 	intel_uncore_rmw(uncore, GEN6_UCGCTL1,
2053 			 GEN6_CSUNIT_CLOCK_GATE_DISABLE, 0);
2054 	intel_uncore_rmw(uncore, GEN7_MISCCPCTL,
2055 			 0, GEN7_DOP_CLOCK_GATE_ENABLE);
2056 
2057 	intel_uncore_rmw(uncore, GDT_CHICKEN_BITS, GT_NOA_ENABLE, 0);
2058 }
2059 
2060 static u32 oa_config_flex_reg(const struct i915_oa_config *oa_config,
2061 			      i915_reg_t reg)
2062 {
2063 	u32 mmio = i915_mmio_reg_offset(reg);
2064 	int i;
2065 
2066 	/*
2067 	 * This arbitrary default will select the 'EU FPU0 Pipeline
2068 	 * Active' event. In the future it's anticipated that there
2069 	 * will be an explicit 'No Event' we can select, but not yet...
2070 	 */
2071 	if (!oa_config)
2072 		return 0;
2073 
2074 	for (i = 0; i < oa_config->flex_regs_len; i++) {
2075 		if (i915_mmio_reg_offset(oa_config->flex_regs[i].addr) == mmio)
2076 			return oa_config->flex_regs[i].value;
2077 	}
2078 
2079 	return 0;
2080 }
2081 /*
2082  * NB: It must always remain pointer safe to run this even if the OA unit
2083  * has been disabled.
2084  *
2085  * It's fine to put out-of-date values into these per-context registers
2086  * in the case that the OA unit has been disabled.
2087  */
2088 static void
2089 gen8_update_reg_state_unlocked(const struct intel_context *ce,
2090 			       const struct i915_perf_stream *stream)
2091 {
2092 	u32 ctx_oactxctrl = stream->perf->ctx_oactxctrl_offset;
2093 	u32 ctx_flexeu0 = stream->perf->ctx_flexeu0_offset;
2094 	/* The MMIO offsets for Flex EU registers aren't contiguous */
2095 	i915_reg_t flex_regs[] = {
2096 		EU_PERF_CNTL0,
2097 		EU_PERF_CNTL1,
2098 		EU_PERF_CNTL2,
2099 		EU_PERF_CNTL3,
2100 		EU_PERF_CNTL4,
2101 		EU_PERF_CNTL5,
2102 		EU_PERF_CNTL6,
2103 	};
2104 	u32 *reg_state = ce->lrc_reg_state;
2105 	int i;
2106 
2107 	reg_state[ctx_oactxctrl + 1] =
2108 		(stream->period_exponent << GEN8_OA_TIMER_PERIOD_SHIFT) |
2109 		(stream->periodic ? GEN8_OA_TIMER_ENABLE : 0) |
2110 		GEN8_OA_COUNTER_RESUME;
2111 
2112 	for (i = 0; i < ARRAY_SIZE(flex_regs); i++)
2113 		reg_state[ctx_flexeu0 + i * 2 + 1] =
2114 			oa_config_flex_reg(stream->oa_config, flex_regs[i]);
2115 }
2116 
2117 struct flex {
2118 	i915_reg_t reg;
2119 	u32 offset;
2120 	u32 value;
2121 };
2122 
2123 static int
2124 gen8_store_flex(struct i915_request *rq,
2125 		struct intel_context *ce,
2126 		const struct flex *flex, unsigned int count)
2127 {
2128 	u32 offset;
2129 	u32 *cs;
2130 
2131 	cs = intel_ring_begin(rq, 4 * count);
2132 	if (IS_ERR(cs))
2133 		return PTR_ERR(cs);
2134 
2135 	offset = i915_ggtt_offset(ce->state) + LRC_STATE_OFFSET;
2136 	do {
2137 		*cs++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT;
2138 		*cs++ = offset + flex->offset * sizeof(u32);
2139 		*cs++ = 0;
2140 		*cs++ = flex->value;
2141 	} while (flex++, --count);
2142 
2143 	intel_ring_advance(rq, cs);
2144 
2145 	return 0;
2146 }
2147 
2148 static int
2149 gen8_load_flex(struct i915_request *rq,
2150 	       struct intel_context *ce,
2151 	       const struct flex *flex, unsigned int count)
2152 {
2153 	u32 *cs;
2154 
2155 	GEM_BUG_ON(!count || count > 63);
2156 
2157 	cs = intel_ring_begin(rq, 2 * count + 2);
2158 	if (IS_ERR(cs))
2159 		return PTR_ERR(cs);
2160 
2161 	*cs++ = MI_LOAD_REGISTER_IMM(count);
2162 	do {
2163 		*cs++ = i915_mmio_reg_offset(flex->reg);
2164 		*cs++ = flex->value;
2165 	} while (flex++, --count);
2166 	*cs++ = MI_NOOP;
2167 
2168 	intel_ring_advance(rq, cs);
2169 
2170 	return 0;
2171 }
2172 
2173 static int gen8_modify_context(struct intel_context *ce,
2174 			       const struct flex *flex, unsigned int count)
2175 {
2176 	struct i915_request *rq;
2177 	int err;
2178 
2179 	rq = intel_engine_create_kernel_request(ce->engine);
2180 	if (IS_ERR(rq))
2181 		return PTR_ERR(rq);
2182 
2183 	/* Serialise with the remote context */
2184 	err = intel_context_prepare_remote_request(ce, rq);
2185 	if (err == 0)
2186 		err = gen8_store_flex(rq, ce, flex, count);
2187 
2188 	i915_request_add(rq);
2189 	return err;
2190 }
2191 
2192 static int
2193 gen8_modify_self(struct intel_context *ce,
2194 		 const struct flex *flex, unsigned int count,
2195 		 struct i915_active *active)
2196 {
2197 	struct i915_request *rq;
2198 	int err;
2199 
2200 	intel_engine_pm_get(ce->engine);
2201 	rq = i915_request_create(ce);
2202 	intel_engine_pm_put(ce->engine);
2203 	if (IS_ERR(rq))
2204 		return PTR_ERR(rq);
2205 
2206 	if (!IS_ERR_OR_NULL(active)) {
2207 		err = i915_active_add_request(active, rq);
2208 		if (err)
2209 			goto err_add_request;
2210 	}
2211 
2212 	err = gen8_load_flex(rq, ce, flex, count);
2213 	if (err)
2214 		goto err_add_request;
2215 
2216 err_add_request:
2217 	i915_request_add(rq);
2218 	return err;
2219 }
2220 
2221 static int gen8_configure_context(struct i915_gem_context *ctx,
2222 				  struct flex *flex, unsigned int count)
2223 {
2224 	struct i915_gem_engines_iter it;
2225 	struct intel_context *ce;
2226 	int err = 0;
2227 
2228 	for_each_gem_engine(ce, i915_gem_context_lock_engines(ctx), it) {
2229 		GEM_BUG_ON(ce == ce->engine->kernel_context);
2230 
2231 		if (ce->engine->class != RENDER_CLASS)
2232 			continue;
2233 
2234 		/* Otherwise OA settings will be set upon first use */
2235 		if (!intel_context_pin_if_active(ce))
2236 			continue;
2237 
2238 		flex->value = intel_sseu_make_rpcs(ce->engine->gt, &ce->sseu);
2239 		err = gen8_modify_context(ce, flex, count);
2240 
2241 		intel_context_unpin(ce);
2242 		if (err)
2243 			break;
2244 	}
2245 	i915_gem_context_unlock_engines(ctx);
2246 
2247 	return err;
2248 }
2249 
2250 static int gen12_configure_oar_context(struct i915_perf_stream *stream,
2251 				       struct i915_active *active)
2252 {
2253 	int err;
2254 	struct intel_context *ce = stream->pinned_ctx;
2255 	u32 format = stream->oa_buffer.format;
2256 	struct flex regs_context[] = {
2257 		{
2258 			GEN8_OACTXCONTROL,
2259 			stream->perf->ctx_oactxctrl_offset + 1,
2260 			active ? GEN8_OA_COUNTER_RESUME : 0,
2261 		},
2262 	};
2263 	/* Offsets in regs_lri are not used since this configuration is only
2264 	 * applied using LRI. Initialize the correct offsets for posterity.
2265 	 */
2266 #define GEN12_OAR_OACONTROL_OFFSET 0x5B0
2267 	struct flex regs_lri[] = {
2268 		{
2269 			GEN12_OAR_OACONTROL,
2270 			GEN12_OAR_OACONTROL_OFFSET + 1,
2271 			(format << GEN12_OAR_OACONTROL_COUNTER_FORMAT_SHIFT) |
2272 			(active ? GEN12_OAR_OACONTROL_COUNTER_ENABLE : 0)
2273 		},
2274 		{
2275 			RING_CONTEXT_CONTROL(ce->engine->mmio_base),
2276 			CTX_CONTEXT_CONTROL,
2277 			_MASKED_FIELD(GEN12_CTX_CTRL_OAR_CONTEXT_ENABLE,
2278 				      active ?
2279 				      GEN12_CTX_CTRL_OAR_CONTEXT_ENABLE :
2280 				      0)
2281 		},
2282 	};
2283 
2284 	/* Modify the context image of pinned context with regs_context*/
2285 	err = intel_context_lock_pinned(ce);
2286 	if (err)
2287 		return err;
2288 
2289 	err = gen8_modify_context(ce, regs_context, ARRAY_SIZE(regs_context));
2290 	intel_context_unlock_pinned(ce);
2291 	if (err)
2292 		return err;
2293 
2294 	/* Apply regs_lri using LRI with pinned context */
2295 	return gen8_modify_self(ce, regs_lri, ARRAY_SIZE(regs_lri), active);
2296 }
2297 
2298 /*
2299  * Manages updating the per-context aspects of the OA stream
2300  * configuration across all contexts.
2301  *
2302  * The awkward consideration here is that OACTXCONTROL controls the
2303  * exponent for periodic sampling which is primarily used for system
2304  * wide profiling where we'd like a consistent sampling period even in
2305  * the face of context switches.
2306  *
2307  * Our approach of updating the register state context (as opposed to
2308  * say using a workaround batch buffer) ensures that the hardware
2309  * won't automatically reload an out-of-date timer exponent even
2310  * transiently before a WA BB could be parsed.
2311  *
2312  * This function needs to:
2313  * - Ensure the currently running context's per-context OA state is
2314  *   updated
2315  * - Ensure that all existing contexts will have the correct per-context
2316  *   OA state if they are scheduled for use.
2317  * - Ensure any new contexts will be initialized with the correct
2318  *   per-context OA state.
2319  *
2320  * Note: it's only the RCS/Render context that has any OA state.
2321  * Note: the first flex register passed must always be R_PWR_CLK_STATE
2322  */
2323 static int
2324 oa_configure_all_contexts(struct i915_perf_stream *stream,
2325 			  struct flex *regs,
2326 			  size_t num_regs,
2327 			  struct i915_active *active)
2328 {
2329 	struct drm_i915_private *i915 = stream->perf->i915;
2330 	struct intel_engine_cs *engine;
2331 	struct i915_gem_context *ctx, *cn;
2332 	int err;
2333 
2334 	lockdep_assert_held(&stream->perf->lock);
2335 
2336 	/*
2337 	 * The OA register config is setup through the context image. This image
2338 	 * might be written to by the GPU on context switch (in particular on
2339 	 * lite-restore). This means we can't safely update a context's image,
2340 	 * if this context is scheduled/submitted to run on the GPU.
2341 	 *
2342 	 * We could emit the OA register config through the batch buffer but
2343 	 * this might leave small interval of time where the OA unit is
2344 	 * configured at an invalid sampling period.
2345 	 *
2346 	 * Note that since we emit all requests from a single ring, there
2347 	 * is still an implicit global barrier here that may cause a high
2348 	 * priority context to wait for an otherwise independent low priority
2349 	 * context. Contexts idle at the time of reconfiguration are not
2350 	 * trapped behind the barrier.
2351 	 */
2352 	spin_lock(&i915->gem.contexts.lock);
2353 	list_for_each_entry_safe(ctx, cn, &i915->gem.contexts.list, link) {
2354 		if (!kref_get_unless_zero(&ctx->ref))
2355 			continue;
2356 
2357 		spin_unlock(&i915->gem.contexts.lock);
2358 
2359 		err = gen8_configure_context(ctx, regs, num_regs);
2360 		if (err) {
2361 			i915_gem_context_put(ctx);
2362 			return err;
2363 		}
2364 
2365 		spin_lock(&i915->gem.contexts.lock);
2366 		list_safe_reset_next(ctx, cn, link);
2367 		i915_gem_context_put(ctx);
2368 	}
2369 	spin_unlock(&i915->gem.contexts.lock);
2370 
2371 	/*
2372 	 * After updating all other contexts, we need to modify ourselves.
2373 	 * If we don't modify the kernel_context, we do not get events while
2374 	 * idle.
2375 	 */
2376 	for_each_uabi_engine(engine, i915) {
2377 		struct intel_context *ce = engine->kernel_context;
2378 
2379 		if (engine->class != RENDER_CLASS)
2380 			continue;
2381 
2382 		regs[0].value = intel_sseu_make_rpcs(engine->gt, &ce->sseu);
2383 
2384 		err = gen8_modify_self(ce, regs, num_regs, active);
2385 		if (err)
2386 			return err;
2387 	}
2388 
2389 	return 0;
2390 }
2391 
2392 static int
2393 gen12_configure_all_contexts(struct i915_perf_stream *stream,
2394 			     const struct i915_oa_config *oa_config,
2395 			     struct i915_active *active)
2396 {
2397 	struct flex regs[] = {
2398 		{
2399 			GEN8_R_PWR_CLK_STATE,
2400 			CTX_R_PWR_CLK_STATE,
2401 		},
2402 	};
2403 
2404 	return oa_configure_all_contexts(stream,
2405 					 regs, ARRAY_SIZE(regs),
2406 					 active);
2407 }
2408 
2409 static int
2410 lrc_configure_all_contexts(struct i915_perf_stream *stream,
2411 			   const struct i915_oa_config *oa_config,
2412 			   struct i915_active *active)
2413 {
2414 	/* The MMIO offsets for Flex EU registers aren't contiguous */
2415 	const u32 ctx_flexeu0 = stream->perf->ctx_flexeu0_offset;
2416 #define ctx_flexeuN(N) (ctx_flexeu0 + 2 * (N) + 1)
2417 	struct flex regs[] = {
2418 		{
2419 			GEN8_R_PWR_CLK_STATE,
2420 			CTX_R_PWR_CLK_STATE,
2421 		},
2422 		{
2423 			GEN8_OACTXCONTROL,
2424 			stream->perf->ctx_oactxctrl_offset + 1,
2425 		},
2426 		{ EU_PERF_CNTL0, ctx_flexeuN(0) },
2427 		{ EU_PERF_CNTL1, ctx_flexeuN(1) },
2428 		{ EU_PERF_CNTL2, ctx_flexeuN(2) },
2429 		{ EU_PERF_CNTL3, ctx_flexeuN(3) },
2430 		{ EU_PERF_CNTL4, ctx_flexeuN(4) },
2431 		{ EU_PERF_CNTL5, ctx_flexeuN(5) },
2432 		{ EU_PERF_CNTL6, ctx_flexeuN(6) },
2433 	};
2434 #undef ctx_flexeuN
2435 	int i;
2436 
2437 	regs[1].value =
2438 		(stream->period_exponent << GEN8_OA_TIMER_PERIOD_SHIFT) |
2439 		(stream->periodic ? GEN8_OA_TIMER_ENABLE : 0) |
2440 		GEN8_OA_COUNTER_RESUME;
2441 
2442 	for (i = 2; i < ARRAY_SIZE(regs); i++)
2443 		regs[i].value = oa_config_flex_reg(oa_config, regs[i].reg);
2444 
2445 	return oa_configure_all_contexts(stream,
2446 					 regs, ARRAY_SIZE(regs),
2447 					 active);
2448 }
2449 
2450 static int
2451 gen8_enable_metric_set(struct i915_perf_stream *stream,
2452 		       struct i915_active *active)
2453 {
2454 	struct intel_uncore *uncore = stream->uncore;
2455 	struct i915_oa_config *oa_config = stream->oa_config;
2456 	int ret;
2457 
2458 	/*
2459 	 * We disable slice/unslice clock ratio change reports on SKL since
2460 	 * they are too noisy. The HW generates a lot of redundant reports
2461 	 * where the ratio hasn't really changed causing a lot of redundant
2462 	 * work to processes and increasing the chances we'll hit buffer
2463 	 * overruns.
2464 	 *
2465 	 * Although we don't currently use the 'disable overrun' OABUFFER
2466 	 * feature it's worth noting that clock ratio reports have to be
2467 	 * disabled before considering to use that feature since the HW doesn't
2468 	 * correctly block these reports.
2469 	 *
2470 	 * Currently none of the high-level metrics we have depend on knowing
2471 	 * this ratio to normalize.
2472 	 *
2473 	 * Note: This register is not power context saved and restored, but
2474 	 * that's OK considering that we disable RC6 while the OA unit is
2475 	 * enabled.
2476 	 *
2477 	 * The _INCLUDE_CLK_RATIO bit allows the slice/unslice frequency to
2478 	 * be read back from automatically triggered reports, as part of the
2479 	 * RPT_ID field.
2480 	 */
2481 	if (IS_GEN_RANGE(stream->perf->i915, 9, 11)) {
2482 		intel_uncore_write(uncore, GEN8_OA_DEBUG,
2483 				   _MASKED_BIT_ENABLE(GEN9_OA_DEBUG_DISABLE_CLK_RATIO_REPORTS |
2484 						      GEN9_OA_DEBUG_INCLUDE_CLK_RATIO));
2485 	}
2486 
2487 	/*
2488 	 * Update all contexts prior writing the mux configurations as we need
2489 	 * to make sure all slices/subslices are ON before writing to NOA
2490 	 * registers.
2491 	 */
2492 	ret = lrc_configure_all_contexts(stream, oa_config, active);
2493 	if (ret)
2494 		return ret;
2495 
2496 	return emit_oa_config(stream,
2497 			      stream->oa_config, oa_context(stream),
2498 			      active);
2499 }
2500 
2501 static u32 oag_report_ctx_switches(const struct i915_perf_stream *stream)
2502 {
2503 	return _MASKED_FIELD(GEN12_OAG_OA_DEBUG_DISABLE_CTX_SWITCH_REPORTS,
2504 			     (stream->sample_flags & SAMPLE_OA_REPORT) ?
2505 			     0 : GEN12_OAG_OA_DEBUG_DISABLE_CTX_SWITCH_REPORTS);
2506 }
2507 
2508 static int
2509 gen12_enable_metric_set(struct i915_perf_stream *stream,
2510 			struct i915_active *active)
2511 {
2512 	struct intel_uncore *uncore = stream->uncore;
2513 	struct i915_oa_config *oa_config = stream->oa_config;
2514 	bool periodic = stream->periodic;
2515 	u32 period_exponent = stream->period_exponent;
2516 	int ret;
2517 
2518 	intel_uncore_write(uncore, GEN12_OAG_OA_DEBUG,
2519 			   /* Disable clk ratio reports, like previous Gens. */
2520 			   _MASKED_BIT_ENABLE(GEN12_OAG_OA_DEBUG_DISABLE_CLK_RATIO_REPORTS |
2521 					      GEN12_OAG_OA_DEBUG_INCLUDE_CLK_RATIO) |
2522 			   /*
2523 			    * If the user didn't require OA reports, instruct
2524 			    * the hardware not to emit ctx switch reports.
2525 			    */
2526 			   oag_report_ctx_switches(stream));
2527 
2528 	intel_uncore_write(uncore, GEN12_OAG_OAGLBCTXCTRL, periodic ?
2529 			   (GEN12_OAG_OAGLBCTXCTRL_COUNTER_RESUME |
2530 			    GEN12_OAG_OAGLBCTXCTRL_TIMER_ENABLE |
2531 			    (period_exponent << GEN12_OAG_OAGLBCTXCTRL_TIMER_PERIOD_SHIFT))
2532 			    : 0);
2533 
2534 	/*
2535 	 * Update all contexts prior writing the mux configurations as we need
2536 	 * to make sure all slices/subslices are ON before writing to NOA
2537 	 * registers.
2538 	 */
2539 	ret = gen12_configure_all_contexts(stream, oa_config, active);
2540 	if (ret)
2541 		return ret;
2542 
2543 	/*
2544 	 * For Gen12, performance counters are context
2545 	 * saved/restored. Only enable it for the context that
2546 	 * requested this.
2547 	 */
2548 	if (stream->ctx) {
2549 		ret = gen12_configure_oar_context(stream, active);
2550 		if (ret)
2551 			return ret;
2552 	}
2553 
2554 	return emit_oa_config(stream,
2555 			      stream->oa_config, oa_context(stream),
2556 			      active);
2557 }
2558 
2559 static void gen8_disable_metric_set(struct i915_perf_stream *stream)
2560 {
2561 	struct intel_uncore *uncore = stream->uncore;
2562 
2563 	/* Reset all contexts' slices/subslices configurations. */
2564 	lrc_configure_all_contexts(stream, NULL, NULL);
2565 
2566 	intel_uncore_rmw(uncore, GDT_CHICKEN_BITS, GT_NOA_ENABLE, 0);
2567 }
2568 
2569 static void gen10_disable_metric_set(struct i915_perf_stream *stream)
2570 {
2571 	struct intel_uncore *uncore = stream->uncore;
2572 
2573 	/* Reset all contexts' slices/subslices configurations. */
2574 	lrc_configure_all_contexts(stream, NULL, NULL);
2575 
2576 	/* Make sure we disable noa to save power. */
2577 	intel_uncore_rmw(uncore, RPM_CONFIG1, GEN10_GT_NOA_ENABLE, 0);
2578 }
2579 
2580 static void gen12_disable_metric_set(struct i915_perf_stream *stream)
2581 {
2582 	struct intel_uncore *uncore = stream->uncore;
2583 
2584 	/* Reset all contexts' slices/subslices configurations. */
2585 	gen12_configure_all_contexts(stream, NULL, NULL);
2586 
2587 	/* disable the context save/restore or OAR counters */
2588 	if (stream->ctx)
2589 		gen12_configure_oar_context(stream, NULL);
2590 
2591 	/* Make sure we disable noa to save power. */
2592 	intel_uncore_rmw(uncore, RPM_CONFIG1, GEN10_GT_NOA_ENABLE, 0);
2593 }
2594 
2595 static void gen7_oa_enable(struct i915_perf_stream *stream)
2596 {
2597 	struct intel_uncore *uncore = stream->uncore;
2598 	struct i915_gem_context *ctx = stream->ctx;
2599 	u32 ctx_id = stream->specific_ctx_id;
2600 	bool periodic = stream->periodic;
2601 	u32 period_exponent = stream->period_exponent;
2602 	u32 report_format = stream->oa_buffer.format;
2603 
2604 	/*
2605 	 * Reset buf pointers so we don't forward reports from before now.
2606 	 *
2607 	 * Think carefully if considering trying to avoid this, since it
2608 	 * also ensures status flags and the buffer itself are cleared
2609 	 * in error paths, and we have checks for invalid reports based
2610 	 * on the assumption that certain fields are written to zeroed
2611 	 * memory which this helps maintains.
2612 	 */
2613 	gen7_init_oa_buffer(stream);
2614 
2615 	intel_uncore_write(uncore, GEN7_OACONTROL,
2616 			   (ctx_id & GEN7_OACONTROL_CTX_MASK) |
2617 			   (period_exponent <<
2618 			    GEN7_OACONTROL_TIMER_PERIOD_SHIFT) |
2619 			   (periodic ? GEN7_OACONTROL_TIMER_ENABLE : 0) |
2620 			   (report_format << GEN7_OACONTROL_FORMAT_SHIFT) |
2621 			   (ctx ? GEN7_OACONTROL_PER_CTX_ENABLE : 0) |
2622 			   GEN7_OACONTROL_ENABLE);
2623 }
2624 
2625 static void gen8_oa_enable(struct i915_perf_stream *stream)
2626 {
2627 	struct intel_uncore *uncore = stream->uncore;
2628 	u32 report_format = stream->oa_buffer.format;
2629 
2630 	/*
2631 	 * Reset buf pointers so we don't forward reports from before now.
2632 	 *
2633 	 * Think carefully if considering trying to avoid this, since it
2634 	 * also ensures status flags and the buffer itself are cleared
2635 	 * in error paths, and we have checks for invalid reports based
2636 	 * on the assumption that certain fields are written to zeroed
2637 	 * memory which this helps maintains.
2638 	 */
2639 	gen8_init_oa_buffer(stream);
2640 
2641 	/*
2642 	 * Note: we don't rely on the hardware to perform single context
2643 	 * filtering and instead filter on the cpu based on the context-id
2644 	 * field of reports
2645 	 */
2646 	intel_uncore_write(uncore, GEN8_OACONTROL,
2647 			   (report_format << GEN8_OA_REPORT_FORMAT_SHIFT) |
2648 			   GEN8_OA_COUNTER_ENABLE);
2649 }
2650 
2651 static void gen12_oa_enable(struct i915_perf_stream *stream)
2652 {
2653 	struct intel_uncore *uncore = stream->uncore;
2654 	u32 report_format = stream->oa_buffer.format;
2655 
2656 	/*
2657 	 * If we don't want OA reports from the OA buffer, then we don't even
2658 	 * need to program the OAG unit.
2659 	 */
2660 	if (!(stream->sample_flags & SAMPLE_OA_REPORT))
2661 		return;
2662 
2663 	gen12_init_oa_buffer(stream);
2664 
2665 	intel_uncore_write(uncore, GEN12_OAG_OACONTROL,
2666 			   (report_format << GEN12_OAG_OACONTROL_OA_COUNTER_FORMAT_SHIFT) |
2667 			   GEN12_OAG_OACONTROL_OA_COUNTER_ENABLE);
2668 }
2669 
2670 /**
2671  * i915_oa_stream_enable - handle `I915_PERF_IOCTL_ENABLE` for OA stream
2672  * @stream: An i915 perf stream opened for OA metrics
2673  *
2674  * [Re]enables hardware periodic sampling according to the period configured
2675  * when opening the stream. This also starts a hrtimer that will periodically
2676  * check for data in the circular OA buffer for notifying userspace (e.g.
2677  * during a read() or poll()).
2678  */
2679 static void i915_oa_stream_enable(struct i915_perf_stream *stream)
2680 {
2681 	stream->pollin = false;
2682 
2683 	stream->perf->ops.oa_enable(stream);
2684 
2685 	if (stream->periodic)
2686 		hrtimer_start(&stream->poll_check_timer,
2687 			      ns_to_ktime(stream->poll_oa_period),
2688 			      HRTIMER_MODE_REL_PINNED);
2689 }
2690 
2691 static void gen7_oa_disable(struct i915_perf_stream *stream)
2692 {
2693 	struct intel_uncore *uncore = stream->uncore;
2694 
2695 	intel_uncore_write(uncore, GEN7_OACONTROL, 0);
2696 	if (intel_wait_for_register(uncore,
2697 				    GEN7_OACONTROL, GEN7_OACONTROL_ENABLE, 0,
2698 				    50))
2699 		drm_err(&stream->perf->i915->drm,
2700 			"wait for OA to be disabled timed out\n");
2701 }
2702 
2703 static void gen8_oa_disable(struct i915_perf_stream *stream)
2704 {
2705 	struct intel_uncore *uncore = stream->uncore;
2706 
2707 	intel_uncore_write(uncore, GEN8_OACONTROL, 0);
2708 	if (intel_wait_for_register(uncore,
2709 				    GEN8_OACONTROL, GEN8_OA_COUNTER_ENABLE, 0,
2710 				    50))
2711 		drm_err(&stream->perf->i915->drm,
2712 			"wait for OA to be disabled timed out\n");
2713 }
2714 
2715 static void gen12_oa_disable(struct i915_perf_stream *stream)
2716 {
2717 	struct intel_uncore *uncore = stream->uncore;
2718 
2719 	intel_uncore_write(uncore, GEN12_OAG_OACONTROL, 0);
2720 	if (intel_wait_for_register(uncore,
2721 				    GEN12_OAG_OACONTROL,
2722 				    GEN12_OAG_OACONTROL_OA_COUNTER_ENABLE, 0,
2723 				    50))
2724 		drm_err(&stream->perf->i915->drm,
2725 			"wait for OA to be disabled timed out\n");
2726 
2727 	intel_uncore_write(uncore, GEN12_OA_TLB_INV_CR, 1);
2728 	if (intel_wait_for_register(uncore,
2729 				    GEN12_OA_TLB_INV_CR,
2730 				    1, 0,
2731 				    50))
2732 		drm_err(&stream->perf->i915->drm,
2733 			"wait for OA tlb invalidate timed out\n");
2734 }
2735 
2736 /**
2737  * i915_oa_stream_disable - handle `I915_PERF_IOCTL_DISABLE` for OA stream
2738  * @stream: An i915 perf stream opened for OA metrics
2739  *
2740  * Stops the OA unit from periodically writing counter reports into the
2741  * circular OA buffer. This also stops the hrtimer that periodically checks for
2742  * data in the circular OA buffer, for notifying userspace.
2743  */
2744 static void i915_oa_stream_disable(struct i915_perf_stream *stream)
2745 {
2746 	stream->perf->ops.oa_disable(stream);
2747 
2748 	if (stream->periodic)
2749 		hrtimer_cancel(&stream->poll_check_timer);
2750 }
2751 
2752 static const struct i915_perf_stream_ops i915_oa_stream_ops = {
2753 	.destroy = i915_oa_stream_destroy,
2754 	.enable = i915_oa_stream_enable,
2755 	.disable = i915_oa_stream_disable,
2756 	.wait_unlocked = i915_oa_wait_unlocked,
2757 	.poll_wait = i915_oa_poll_wait,
2758 	.read = i915_oa_read,
2759 };
2760 
2761 static int i915_perf_stream_enable_sync(struct i915_perf_stream *stream)
2762 {
2763 	struct i915_active *active;
2764 	int err;
2765 
2766 	active = i915_active_create();
2767 	if (!active)
2768 		return -ENOMEM;
2769 
2770 	err = stream->perf->ops.enable_metric_set(stream, active);
2771 	if (err == 0)
2772 		__i915_active_wait(active, TASK_UNINTERRUPTIBLE);
2773 
2774 	i915_active_put(active);
2775 	return err;
2776 }
2777 
2778 static void
2779 get_default_sseu_config(struct intel_sseu *out_sseu,
2780 			struct intel_engine_cs *engine)
2781 {
2782 	const struct sseu_dev_info *devinfo_sseu = &engine->gt->info.sseu;
2783 
2784 	*out_sseu = intel_sseu_from_device_info(devinfo_sseu);
2785 
2786 	if (IS_GEN(engine->i915, 11)) {
2787 		/*
2788 		 * We only need subslice count so it doesn't matter which ones
2789 		 * we select - just turn off low bits in the amount of half of
2790 		 * all available subslices per slice.
2791 		 */
2792 		out_sseu->subslice_mask =
2793 			~(~0 << (hweight8(out_sseu->subslice_mask) / 2));
2794 		out_sseu->slice_mask = 0x1;
2795 	}
2796 }
2797 
2798 static int
2799 get_sseu_config(struct intel_sseu *out_sseu,
2800 		struct intel_engine_cs *engine,
2801 		const struct drm_i915_gem_context_param_sseu *drm_sseu)
2802 {
2803 	if (drm_sseu->engine.engine_class != engine->uabi_class ||
2804 	    drm_sseu->engine.engine_instance != engine->uabi_instance)
2805 		return -EINVAL;
2806 
2807 	return i915_gem_user_to_context_sseu(engine->gt, drm_sseu, out_sseu);
2808 }
2809 
2810 /**
2811  * i915_oa_stream_init - validate combined props for OA stream and init
2812  * @stream: An i915 perf stream
2813  * @param: The open parameters passed to `DRM_I915_PERF_OPEN`
2814  * @props: The property state that configures stream (individually validated)
2815  *
2816  * While read_properties_unlocked() validates properties in isolation it
2817  * doesn't ensure that the combination necessarily makes sense.
2818  *
2819  * At this point it has been determined that userspace wants a stream of
2820  * OA metrics, but still we need to further validate the combined
2821  * properties are OK.
2822  *
2823  * If the configuration makes sense then we can allocate memory for
2824  * a circular OA buffer and apply the requested metric set configuration.
2825  *
2826  * Returns: zero on success or a negative error code.
2827  */
2828 static int i915_oa_stream_init(struct i915_perf_stream *stream,
2829 			       struct drm_i915_perf_open_param *param,
2830 			       struct perf_open_properties *props)
2831 {
2832 	struct drm_i915_private *i915 = stream->perf->i915;
2833 	struct i915_perf *perf = stream->perf;
2834 	int format_size;
2835 	int ret;
2836 
2837 	if (!props->engine) {
2838 		DRM_DEBUG("OA engine not specified\n");
2839 		return -EINVAL;
2840 	}
2841 
2842 	/*
2843 	 * If the sysfs metrics/ directory wasn't registered for some
2844 	 * reason then don't let userspace try their luck with config
2845 	 * IDs
2846 	 */
2847 	if (!perf->metrics_kobj) {
2848 		DRM_DEBUG("OA metrics weren't advertised via sysfs\n");
2849 		return -EINVAL;
2850 	}
2851 
2852 	if (!(props->sample_flags & SAMPLE_OA_REPORT) &&
2853 	    (INTEL_GEN(perf->i915) < 12 || !stream->ctx)) {
2854 		DRM_DEBUG("Only OA report sampling supported\n");
2855 		return -EINVAL;
2856 	}
2857 
2858 	if (!perf->ops.enable_metric_set) {
2859 		DRM_DEBUG("OA unit not supported\n");
2860 		return -ENODEV;
2861 	}
2862 
2863 	/*
2864 	 * To avoid the complexity of having to accurately filter
2865 	 * counter reports and marshal to the appropriate client
2866 	 * we currently only allow exclusive access
2867 	 */
2868 	if (perf->exclusive_stream) {
2869 		DRM_DEBUG("OA unit already in use\n");
2870 		return -EBUSY;
2871 	}
2872 
2873 	if (!props->oa_format) {
2874 		DRM_DEBUG("OA report format not specified\n");
2875 		return -EINVAL;
2876 	}
2877 
2878 	stream->engine = props->engine;
2879 	stream->uncore = stream->engine->gt->uncore;
2880 
2881 	stream->sample_size = sizeof(struct drm_i915_perf_record_header);
2882 
2883 	format_size = perf->oa_formats[props->oa_format].size;
2884 
2885 	stream->sample_flags = props->sample_flags;
2886 	stream->sample_size += format_size;
2887 
2888 	stream->oa_buffer.format_size = format_size;
2889 	if (drm_WARN_ON(&i915->drm, stream->oa_buffer.format_size == 0))
2890 		return -EINVAL;
2891 
2892 	stream->hold_preemption = props->hold_preemption;
2893 
2894 	stream->oa_buffer.format =
2895 		perf->oa_formats[props->oa_format].format;
2896 
2897 	stream->periodic = props->oa_periodic;
2898 	if (stream->periodic)
2899 		stream->period_exponent = props->oa_period_exponent;
2900 
2901 	if (stream->ctx) {
2902 		ret = oa_get_render_ctx_id(stream);
2903 		if (ret) {
2904 			DRM_DEBUG("Invalid context id to filter with\n");
2905 			return ret;
2906 		}
2907 	}
2908 
2909 	ret = alloc_noa_wait(stream);
2910 	if (ret) {
2911 		DRM_DEBUG("Unable to allocate NOA wait batch buffer\n");
2912 		goto err_noa_wait_alloc;
2913 	}
2914 
2915 	stream->oa_config = i915_perf_get_oa_config(perf, props->metrics_set);
2916 	if (!stream->oa_config) {
2917 		DRM_DEBUG("Invalid OA config id=%i\n", props->metrics_set);
2918 		ret = -EINVAL;
2919 		goto err_config;
2920 	}
2921 
2922 	/* PRM - observability performance counters:
2923 	 *
2924 	 *   OACONTROL, performance counter enable, note:
2925 	 *
2926 	 *   "When this bit is set, in order to have coherent counts,
2927 	 *   RC6 power state and trunk clock gating must be disabled.
2928 	 *   This can be achieved by programming MMIO registers as
2929 	 *   0xA094=0 and 0xA090[31]=1"
2930 	 *
2931 	 *   In our case we are expecting that taking pm + FORCEWAKE
2932 	 *   references will effectively disable RC6.
2933 	 */
2934 	intel_engine_pm_get(stream->engine);
2935 	intel_uncore_forcewake_get(stream->uncore, FORCEWAKE_ALL);
2936 
2937 	ret = alloc_oa_buffer(stream);
2938 	if (ret)
2939 		goto err_oa_buf_alloc;
2940 
2941 	stream->ops = &i915_oa_stream_ops;
2942 
2943 	perf->sseu = props->sseu;
2944 	WRITE_ONCE(perf->exclusive_stream, stream);
2945 
2946 	ret = i915_perf_stream_enable_sync(stream);
2947 	if (ret) {
2948 		DRM_DEBUG("Unable to enable metric set\n");
2949 		goto err_enable;
2950 	}
2951 
2952 	DRM_DEBUG("opening stream oa config uuid=%s\n",
2953 		  stream->oa_config->uuid);
2954 
2955 	hrtimer_init(&stream->poll_check_timer,
2956 		     CLOCK_MONOTONIC, HRTIMER_MODE_REL);
2957 	stream->poll_check_timer.function = oa_poll_check_timer_cb;
2958 	init_waitqueue_head(&stream->poll_wq);
2959 	spin_lock_init(&stream->oa_buffer.ptr_lock);
2960 
2961 	return 0;
2962 
2963 err_enable:
2964 	WRITE_ONCE(perf->exclusive_stream, NULL);
2965 	perf->ops.disable_metric_set(stream);
2966 
2967 	free_oa_buffer(stream);
2968 
2969 err_oa_buf_alloc:
2970 	free_oa_configs(stream);
2971 
2972 	intel_uncore_forcewake_put(stream->uncore, FORCEWAKE_ALL);
2973 	intel_engine_pm_put(stream->engine);
2974 
2975 err_config:
2976 	free_noa_wait(stream);
2977 
2978 err_noa_wait_alloc:
2979 	if (stream->ctx)
2980 		oa_put_render_ctx_id(stream);
2981 
2982 	return ret;
2983 }
2984 
2985 void i915_oa_init_reg_state(const struct intel_context *ce,
2986 			    const struct intel_engine_cs *engine)
2987 {
2988 	struct i915_perf_stream *stream;
2989 
2990 	if (engine->class != RENDER_CLASS)
2991 		return;
2992 
2993 	/* perf.exclusive_stream serialised by lrc_configure_all_contexts() */
2994 	stream = READ_ONCE(engine->i915->perf.exclusive_stream);
2995 	if (stream && INTEL_GEN(stream->perf->i915) < 12)
2996 		gen8_update_reg_state_unlocked(ce, stream);
2997 }
2998 
2999 /**
3000  * i915_perf_read - handles read() FOP for i915 perf stream FDs
3001  * @file: An i915 perf stream file
3002  * @buf: destination buffer given by userspace
3003  * @count: the number of bytes userspace wants to read
3004  * @ppos: (inout) file seek position (unused)
3005  *
3006  * The entry point for handling a read() on a stream file descriptor from
3007  * userspace. Most of the work is left to the i915_perf_read_locked() and
3008  * &i915_perf_stream_ops->read but to save having stream implementations (of
3009  * which we might have multiple later) we handle blocking read here.
3010  *
3011  * We can also consistently treat trying to read from a disabled stream
3012  * as an IO error so implementations can assume the stream is enabled
3013  * while reading.
3014  *
3015  * Returns: The number of bytes copied or a negative error code on failure.
3016  */
3017 static ssize_t i915_perf_read(struct file *file,
3018 			      char __user *buf,
3019 			      size_t count,
3020 			      loff_t *ppos)
3021 {
3022 	struct i915_perf_stream *stream = file->private_data;
3023 	struct i915_perf *perf = stream->perf;
3024 	size_t offset = 0;
3025 	int ret;
3026 
3027 	/* To ensure it's handled consistently we simply treat all reads of a
3028 	 * disabled stream as an error. In particular it might otherwise lead
3029 	 * to a deadlock for blocking file descriptors...
3030 	 */
3031 	if (!stream->enabled)
3032 		return -EIO;
3033 
3034 	if (!(file->f_flags & O_NONBLOCK)) {
3035 		/* There's the small chance of false positives from
3036 		 * stream->ops->wait_unlocked.
3037 		 *
3038 		 * E.g. with single context filtering since we only wait until
3039 		 * oabuffer has >= 1 report we don't immediately know whether
3040 		 * any reports really belong to the current context
3041 		 */
3042 		do {
3043 			ret = stream->ops->wait_unlocked(stream);
3044 			if (ret)
3045 				return ret;
3046 
3047 			mutex_lock(&perf->lock);
3048 			ret = stream->ops->read(stream, buf, count, &offset);
3049 			mutex_unlock(&perf->lock);
3050 		} while (!offset && !ret);
3051 	} else {
3052 		mutex_lock(&perf->lock);
3053 		ret = stream->ops->read(stream, buf, count, &offset);
3054 		mutex_unlock(&perf->lock);
3055 	}
3056 
3057 	/* We allow the poll checking to sometimes report false positive EPOLLIN
3058 	 * events where we might actually report EAGAIN on read() if there's
3059 	 * not really any data available. In this situation though we don't
3060 	 * want to enter a busy loop between poll() reporting a EPOLLIN event
3061 	 * and read() returning -EAGAIN. Clearing the oa.pollin state here
3062 	 * effectively ensures we back off until the next hrtimer callback
3063 	 * before reporting another EPOLLIN event.
3064 	 * The exception to this is if ops->read() returned -ENOSPC which means
3065 	 * that more OA data is available than could fit in the user provided
3066 	 * buffer. In this case we want the next poll() call to not block.
3067 	 */
3068 	if (ret != -ENOSPC)
3069 		stream->pollin = false;
3070 
3071 	/* Possible values for ret are 0, -EFAULT, -ENOSPC, -EIO, ... */
3072 	return offset ?: (ret ?: -EAGAIN);
3073 }
3074 
3075 static enum hrtimer_restart oa_poll_check_timer_cb(struct hrtimer *hrtimer)
3076 {
3077 	struct i915_perf_stream *stream =
3078 		container_of(hrtimer, typeof(*stream), poll_check_timer);
3079 
3080 	if (oa_buffer_check_unlocked(stream)) {
3081 		stream->pollin = true;
3082 		wake_up(&stream->poll_wq);
3083 	}
3084 
3085 	hrtimer_forward_now(hrtimer,
3086 			    ns_to_ktime(stream->poll_oa_period));
3087 
3088 	return HRTIMER_RESTART;
3089 }
3090 
3091 /**
3092  * i915_perf_poll_locked - poll_wait() with a suitable wait queue for stream
3093  * @stream: An i915 perf stream
3094  * @file: An i915 perf stream file
3095  * @wait: poll() state table
3096  *
3097  * For handling userspace polling on an i915 perf stream, this calls through to
3098  * &i915_perf_stream_ops->poll_wait to call poll_wait() with a wait queue that
3099  * will be woken for new stream data.
3100  *
3101  * Note: The &perf->lock mutex has been taken to serialize
3102  * with any non-file-operation driver hooks.
3103  *
3104  * Returns: any poll events that are ready without sleeping
3105  */
3106 static __poll_t i915_perf_poll_locked(struct i915_perf_stream *stream,
3107 				      struct file *file,
3108 				      poll_table *wait)
3109 {
3110 	__poll_t events = 0;
3111 
3112 	stream->ops->poll_wait(stream, file, wait);
3113 
3114 	/* Note: we don't explicitly check whether there's something to read
3115 	 * here since this path may be very hot depending on what else
3116 	 * userspace is polling, or on the timeout in use. We rely solely on
3117 	 * the hrtimer/oa_poll_check_timer_cb to notify us when there are
3118 	 * samples to read.
3119 	 */
3120 	if (stream->pollin)
3121 		events |= EPOLLIN;
3122 
3123 	return events;
3124 }
3125 
3126 /**
3127  * i915_perf_poll - call poll_wait() with a suitable wait queue for stream
3128  * @file: An i915 perf stream file
3129  * @wait: poll() state table
3130  *
3131  * For handling userspace polling on an i915 perf stream, this ensures
3132  * poll_wait() gets called with a wait queue that will be woken for new stream
3133  * data.
3134  *
3135  * Note: Implementation deferred to i915_perf_poll_locked()
3136  *
3137  * Returns: any poll events that are ready without sleeping
3138  */
3139 static __poll_t i915_perf_poll(struct file *file, poll_table *wait)
3140 {
3141 	struct i915_perf_stream *stream = file->private_data;
3142 	struct i915_perf *perf = stream->perf;
3143 	__poll_t ret;
3144 
3145 	mutex_lock(&perf->lock);
3146 	ret = i915_perf_poll_locked(stream, file, wait);
3147 	mutex_unlock(&perf->lock);
3148 
3149 	return ret;
3150 }
3151 
3152 /**
3153  * i915_perf_enable_locked - handle `I915_PERF_IOCTL_ENABLE` ioctl
3154  * @stream: A disabled i915 perf stream
3155  *
3156  * [Re]enables the associated capture of data for this stream.
3157  *
3158  * If a stream was previously enabled then there's currently no intention
3159  * to provide userspace any guarantee about the preservation of previously
3160  * buffered data.
3161  */
3162 static void i915_perf_enable_locked(struct i915_perf_stream *stream)
3163 {
3164 	if (stream->enabled)
3165 		return;
3166 
3167 	/* Allow stream->ops->enable() to refer to this */
3168 	stream->enabled = true;
3169 
3170 	if (stream->ops->enable)
3171 		stream->ops->enable(stream);
3172 
3173 	if (stream->hold_preemption)
3174 		intel_context_set_nopreempt(stream->pinned_ctx);
3175 }
3176 
3177 /**
3178  * i915_perf_disable_locked - handle `I915_PERF_IOCTL_DISABLE` ioctl
3179  * @stream: An enabled i915 perf stream
3180  *
3181  * Disables the associated capture of data for this stream.
3182  *
3183  * The intention is that disabling an re-enabling a stream will ideally be
3184  * cheaper than destroying and re-opening a stream with the same configuration,
3185  * though there are no formal guarantees about what state or buffered data
3186  * must be retained between disabling and re-enabling a stream.
3187  *
3188  * Note: while a stream is disabled it's considered an error for userspace
3189  * to attempt to read from the stream (-EIO).
3190  */
3191 static void i915_perf_disable_locked(struct i915_perf_stream *stream)
3192 {
3193 	if (!stream->enabled)
3194 		return;
3195 
3196 	/* Allow stream->ops->disable() to refer to this */
3197 	stream->enabled = false;
3198 
3199 	if (stream->hold_preemption)
3200 		intel_context_clear_nopreempt(stream->pinned_ctx);
3201 
3202 	if (stream->ops->disable)
3203 		stream->ops->disable(stream);
3204 }
3205 
3206 static long i915_perf_config_locked(struct i915_perf_stream *stream,
3207 				    unsigned long metrics_set)
3208 {
3209 	struct i915_oa_config *config;
3210 	long ret = stream->oa_config->id;
3211 
3212 	config = i915_perf_get_oa_config(stream->perf, metrics_set);
3213 	if (!config)
3214 		return -EINVAL;
3215 
3216 	if (config != stream->oa_config) {
3217 		int err;
3218 
3219 		/*
3220 		 * If OA is bound to a specific context, emit the
3221 		 * reconfiguration inline from that context. The update
3222 		 * will then be ordered with respect to submission on that
3223 		 * context.
3224 		 *
3225 		 * When set globally, we use a low priority kernel context,
3226 		 * so it will effectively take effect when idle.
3227 		 */
3228 		err = emit_oa_config(stream, config, oa_context(stream), NULL);
3229 		if (!err)
3230 			config = xchg(&stream->oa_config, config);
3231 		else
3232 			ret = err;
3233 	}
3234 
3235 	i915_oa_config_put(config);
3236 
3237 	return ret;
3238 }
3239 
3240 /**
3241  * i915_perf_ioctl_locked - support ioctl() usage with i915 perf stream FDs
3242  * @stream: An i915 perf stream
3243  * @cmd: the ioctl request
3244  * @arg: the ioctl data
3245  *
3246  * Note: The &perf->lock mutex has been taken to serialize
3247  * with any non-file-operation driver hooks.
3248  *
3249  * Returns: zero on success or a negative error code. Returns -EINVAL for
3250  * an unknown ioctl request.
3251  */
3252 static long i915_perf_ioctl_locked(struct i915_perf_stream *stream,
3253 				   unsigned int cmd,
3254 				   unsigned long arg)
3255 {
3256 	switch (cmd) {
3257 	case I915_PERF_IOCTL_ENABLE:
3258 		i915_perf_enable_locked(stream);
3259 		return 0;
3260 	case I915_PERF_IOCTL_DISABLE:
3261 		i915_perf_disable_locked(stream);
3262 		return 0;
3263 	case I915_PERF_IOCTL_CONFIG:
3264 		return i915_perf_config_locked(stream, arg);
3265 	}
3266 
3267 	return -EINVAL;
3268 }
3269 
3270 /**
3271  * i915_perf_ioctl - support ioctl() usage with i915 perf stream FDs
3272  * @file: An i915 perf stream file
3273  * @cmd: the ioctl request
3274  * @arg: the ioctl data
3275  *
3276  * Implementation deferred to i915_perf_ioctl_locked().
3277  *
3278  * Returns: zero on success or a negative error code. Returns -EINVAL for
3279  * an unknown ioctl request.
3280  */
3281 static long i915_perf_ioctl(struct file *file,
3282 			    unsigned int cmd,
3283 			    unsigned long arg)
3284 {
3285 	struct i915_perf_stream *stream = file->private_data;
3286 	struct i915_perf *perf = stream->perf;
3287 	long ret;
3288 
3289 	mutex_lock(&perf->lock);
3290 	ret = i915_perf_ioctl_locked(stream, cmd, arg);
3291 	mutex_unlock(&perf->lock);
3292 
3293 	return ret;
3294 }
3295 
3296 /**
3297  * i915_perf_destroy_locked - destroy an i915 perf stream
3298  * @stream: An i915 perf stream
3299  *
3300  * Frees all resources associated with the given i915 perf @stream, disabling
3301  * any associated data capture in the process.
3302  *
3303  * Note: The &perf->lock mutex has been taken to serialize
3304  * with any non-file-operation driver hooks.
3305  */
3306 static void i915_perf_destroy_locked(struct i915_perf_stream *stream)
3307 {
3308 	if (stream->enabled)
3309 		i915_perf_disable_locked(stream);
3310 
3311 	if (stream->ops->destroy)
3312 		stream->ops->destroy(stream);
3313 
3314 	if (stream->ctx)
3315 		i915_gem_context_put(stream->ctx);
3316 
3317 	kfree(stream);
3318 }
3319 
3320 /**
3321  * i915_perf_release - handles userspace close() of a stream file
3322  * @inode: anonymous inode associated with file
3323  * @file: An i915 perf stream file
3324  *
3325  * Cleans up any resources associated with an open i915 perf stream file.
3326  *
3327  * NB: close() can't really fail from the userspace point of view.
3328  *
3329  * Returns: zero on success or a negative error code.
3330  */
3331 static int i915_perf_release(struct inode *inode, struct file *file)
3332 {
3333 	struct i915_perf_stream *stream = file->private_data;
3334 	struct i915_perf *perf = stream->perf;
3335 
3336 	mutex_lock(&perf->lock);
3337 	i915_perf_destroy_locked(stream);
3338 	mutex_unlock(&perf->lock);
3339 
3340 	/* Release the reference the perf stream kept on the driver. */
3341 	drm_dev_put(&perf->i915->drm);
3342 
3343 	return 0;
3344 }
3345 
3346 
3347 static const struct file_operations fops = {
3348 	.owner		= THIS_MODULE,
3349 	.llseek		= no_llseek,
3350 	.release	= i915_perf_release,
3351 	.poll		= i915_perf_poll,
3352 	.read		= i915_perf_read,
3353 	.unlocked_ioctl	= i915_perf_ioctl,
3354 	/* Our ioctl have no arguments, so it's safe to use the same function
3355 	 * to handle 32bits compatibility.
3356 	 */
3357 	.compat_ioctl   = i915_perf_ioctl,
3358 };
3359 
3360 
3361 /**
3362  * i915_perf_open_ioctl_locked - DRM ioctl() for userspace to open a stream FD
3363  * @perf: i915 perf instance
3364  * @param: The open parameters passed to 'DRM_I915_PERF_OPEN`
3365  * @props: individually validated u64 property value pairs
3366  * @file: drm file
3367  *
3368  * See i915_perf_ioctl_open() for interface details.
3369  *
3370  * Implements further stream config validation and stream initialization on
3371  * behalf of i915_perf_open_ioctl() with the &perf->lock mutex
3372  * taken to serialize with any non-file-operation driver hooks.
3373  *
3374  * Note: at this point the @props have only been validated in isolation and
3375  * it's still necessary to validate that the combination of properties makes
3376  * sense.
3377  *
3378  * In the case where userspace is interested in OA unit metrics then further
3379  * config validation and stream initialization details will be handled by
3380  * i915_oa_stream_init(). The code here should only validate config state that
3381  * will be relevant to all stream types / backends.
3382  *
3383  * Returns: zero on success or a negative error code.
3384  */
3385 static int
3386 i915_perf_open_ioctl_locked(struct i915_perf *perf,
3387 			    struct drm_i915_perf_open_param *param,
3388 			    struct perf_open_properties *props,
3389 			    struct drm_file *file)
3390 {
3391 	struct i915_gem_context *specific_ctx = NULL;
3392 	struct i915_perf_stream *stream = NULL;
3393 	unsigned long f_flags = 0;
3394 	bool privileged_op = true;
3395 	int stream_fd;
3396 	int ret;
3397 
3398 	if (props->single_context) {
3399 		u32 ctx_handle = props->ctx_handle;
3400 		struct drm_i915_file_private *file_priv = file->driver_priv;
3401 
3402 		specific_ctx = i915_gem_context_lookup(file_priv, ctx_handle);
3403 		if (!specific_ctx) {
3404 			DRM_DEBUG("Failed to look up context with ID %u for opening perf stream\n",
3405 				  ctx_handle);
3406 			ret = -ENOENT;
3407 			goto err;
3408 		}
3409 	}
3410 
3411 	/*
3412 	 * On Haswell the OA unit supports clock gating off for a specific
3413 	 * context and in this mode there's no visibility of metrics for the
3414 	 * rest of the system, which we consider acceptable for a
3415 	 * non-privileged client.
3416 	 *
3417 	 * For Gen8->11 the OA unit no longer supports clock gating off for a
3418 	 * specific context and the kernel can't securely stop the counters
3419 	 * from updating as system-wide / global values. Even though we can
3420 	 * filter reports based on the included context ID we can't block
3421 	 * clients from seeing the raw / global counter values via
3422 	 * MI_REPORT_PERF_COUNT commands and so consider it a privileged op to
3423 	 * enable the OA unit by default.
3424 	 *
3425 	 * For Gen12+ we gain a new OAR unit that only monitors the RCS on a
3426 	 * per context basis. So we can relax requirements there if the user
3427 	 * doesn't request global stream access (i.e. query based sampling
3428 	 * using MI_RECORD_PERF_COUNT.
3429 	 */
3430 	if (IS_HASWELL(perf->i915) && specific_ctx)
3431 		privileged_op = false;
3432 	else if (IS_GEN(perf->i915, 12) && specific_ctx &&
3433 		 (props->sample_flags & SAMPLE_OA_REPORT) == 0)
3434 		privileged_op = false;
3435 
3436 	if (props->hold_preemption) {
3437 		if (!props->single_context) {
3438 			DRM_DEBUG("preemption disable with no context\n");
3439 			ret = -EINVAL;
3440 			goto err;
3441 		}
3442 		privileged_op = true;
3443 	}
3444 
3445 	/*
3446 	 * Asking for SSEU configuration is a priviliged operation.
3447 	 */
3448 	if (props->has_sseu)
3449 		privileged_op = true;
3450 	else
3451 		get_default_sseu_config(&props->sseu, props->engine);
3452 
3453 	/* Similar to perf's kernel.perf_paranoid_cpu sysctl option
3454 	 * we check a dev.i915.perf_stream_paranoid sysctl option
3455 	 * to determine if it's ok to access system wide OA counters
3456 	 * without CAP_PERFMON or CAP_SYS_ADMIN privileges.
3457 	 */
3458 	if (privileged_op &&
3459 	    i915_perf_stream_paranoid && !perfmon_capable()) {
3460 		DRM_DEBUG("Insufficient privileges to open i915 perf stream\n");
3461 		ret = -EACCES;
3462 		goto err_ctx;
3463 	}
3464 
3465 	stream = kzalloc(sizeof(*stream), GFP_KERNEL);
3466 	if (!stream) {
3467 		ret = -ENOMEM;
3468 		goto err_ctx;
3469 	}
3470 
3471 	stream->perf = perf;
3472 	stream->ctx = specific_ctx;
3473 	stream->poll_oa_period = props->poll_oa_period;
3474 
3475 	ret = i915_oa_stream_init(stream, param, props);
3476 	if (ret)
3477 		goto err_alloc;
3478 
3479 	/* we avoid simply assigning stream->sample_flags = props->sample_flags
3480 	 * to have _stream_init check the combination of sample flags more
3481 	 * thoroughly, but still this is the expected result at this point.
3482 	 */
3483 	if (WARN_ON(stream->sample_flags != props->sample_flags)) {
3484 		ret = -ENODEV;
3485 		goto err_flags;
3486 	}
3487 
3488 	if (param->flags & I915_PERF_FLAG_FD_CLOEXEC)
3489 		f_flags |= O_CLOEXEC;
3490 	if (param->flags & I915_PERF_FLAG_FD_NONBLOCK)
3491 		f_flags |= O_NONBLOCK;
3492 
3493 	stream_fd = anon_inode_getfd("[i915_perf]", &fops, stream, f_flags);
3494 	if (stream_fd < 0) {
3495 		ret = stream_fd;
3496 		goto err_flags;
3497 	}
3498 
3499 	if (!(param->flags & I915_PERF_FLAG_DISABLED))
3500 		i915_perf_enable_locked(stream);
3501 
3502 	/* Take a reference on the driver that will be kept with stream_fd
3503 	 * until its release.
3504 	 */
3505 	drm_dev_get(&perf->i915->drm);
3506 
3507 	return stream_fd;
3508 
3509 err_flags:
3510 	if (stream->ops->destroy)
3511 		stream->ops->destroy(stream);
3512 err_alloc:
3513 	kfree(stream);
3514 err_ctx:
3515 	if (specific_ctx)
3516 		i915_gem_context_put(specific_ctx);
3517 err:
3518 	return ret;
3519 }
3520 
3521 static u64 oa_exponent_to_ns(struct i915_perf *perf, int exponent)
3522 {
3523 	return intel_gt_clock_interval_to_ns(perf->i915->ggtt.vm.gt,
3524 					     2ULL << exponent);
3525 }
3526 
3527 /**
3528  * read_properties_unlocked - validate + copy userspace stream open properties
3529  * @perf: i915 perf instance
3530  * @uprops: The array of u64 key value pairs given by userspace
3531  * @n_props: The number of key value pairs expected in @uprops
3532  * @props: The stream configuration built up while validating properties
3533  *
3534  * Note this function only validates properties in isolation it doesn't
3535  * validate that the combination of properties makes sense or that all
3536  * properties necessary for a particular kind of stream have been set.
3537  *
3538  * Note that there currently aren't any ordering requirements for properties so
3539  * we shouldn't validate or assume anything about ordering here. This doesn't
3540  * rule out defining new properties with ordering requirements in the future.
3541  */
3542 static int read_properties_unlocked(struct i915_perf *perf,
3543 				    u64 __user *uprops,
3544 				    u32 n_props,
3545 				    struct perf_open_properties *props)
3546 {
3547 	u64 __user *uprop = uprops;
3548 	u32 i;
3549 	int ret;
3550 
3551 	memset(props, 0, sizeof(struct perf_open_properties));
3552 	props->poll_oa_period = DEFAULT_POLL_PERIOD_NS;
3553 
3554 	if (!n_props) {
3555 		DRM_DEBUG("No i915 perf properties given\n");
3556 		return -EINVAL;
3557 	}
3558 
3559 	/* At the moment we only support using i915-perf on the RCS. */
3560 	props->engine = intel_engine_lookup_user(perf->i915,
3561 						 I915_ENGINE_CLASS_RENDER,
3562 						 0);
3563 	if (!props->engine) {
3564 		DRM_DEBUG("No RENDER-capable engines\n");
3565 		return -EINVAL;
3566 	}
3567 
3568 	/* Considering that ID = 0 is reserved and assuming that we don't
3569 	 * (currently) expect any configurations to ever specify duplicate
3570 	 * values for a particular property ID then the last _PROP_MAX value is
3571 	 * one greater than the maximum number of properties we expect to get
3572 	 * from userspace.
3573 	 */
3574 	if (n_props >= DRM_I915_PERF_PROP_MAX) {
3575 		DRM_DEBUG("More i915 perf properties specified than exist\n");
3576 		return -EINVAL;
3577 	}
3578 
3579 	for (i = 0; i < n_props; i++) {
3580 		u64 oa_period, oa_freq_hz;
3581 		u64 id, value;
3582 
3583 		ret = get_user(id, uprop);
3584 		if (ret)
3585 			return ret;
3586 
3587 		ret = get_user(value, uprop + 1);
3588 		if (ret)
3589 			return ret;
3590 
3591 		if (id == 0 || id >= DRM_I915_PERF_PROP_MAX) {
3592 			DRM_DEBUG("Unknown i915 perf property ID\n");
3593 			return -EINVAL;
3594 		}
3595 
3596 		switch ((enum drm_i915_perf_property_id)id) {
3597 		case DRM_I915_PERF_PROP_CTX_HANDLE:
3598 			props->single_context = 1;
3599 			props->ctx_handle = value;
3600 			break;
3601 		case DRM_I915_PERF_PROP_SAMPLE_OA:
3602 			if (value)
3603 				props->sample_flags |= SAMPLE_OA_REPORT;
3604 			break;
3605 		case DRM_I915_PERF_PROP_OA_METRICS_SET:
3606 			if (value == 0) {
3607 				DRM_DEBUG("Unknown OA metric set ID\n");
3608 				return -EINVAL;
3609 			}
3610 			props->metrics_set = value;
3611 			break;
3612 		case DRM_I915_PERF_PROP_OA_FORMAT:
3613 			if (value == 0 || value >= I915_OA_FORMAT_MAX) {
3614 				DRM_DEBUG("Out-of-range OA report format %llu\n",
3615 					  value);
3616 				return -EINVAL;
3617 			}
3618 			if (!perf->oa_formats[value].size) {
3619 				DRM_DEBUG("Unsupported OA report format %llu\n",
3620 					  value);
3621 				return -EINVAL;
3622 			}
3623 			props->oa_format = value;
3624 			break;
3625 		case DRM_I915_PERF_PROP_OA_EXPONENT:
3626 			if (value > OA_EXPONENT_MAX) {
3627 				DRM_DEBUG("OA timer exponent too high (> %u)\n",
3628 					 OA_EXPONENT_MAX);
3629 				return -EINVAL;
3630 			}
3631 
3632 			/* Theoretically we can program the OA unit to sample
3633 			 * e.g. every 160ns for HSW, 167ns for BDW/SKL or 104ns
3634 			 * for BXT. We don't allow such high sampling
3635 			 * frequencies by default unless root.
3636 			 */
3637 
3638 			BUILD_BUG_ON(sizeof(oa_period) != 8);
3639 			oa_period = oa_exponent_to_ns(perf, value);
3640 
3641 			/* This check is primarily to ensure that oa_period <=
3642 			 * UINT32_MAX (before passing to do_div which only
3643 			 * accepts a u32 denominator), but we can also skip
3644 			 * checking anything < 1Hz which implicitly can't be
3645 			 * limited via an integer oa_max_sample_rate.
3646 			 */
3647 			if (oa_period <= NSEC_PER_SEC) {
3648 				u64 tmp = NSEC_PER_SEC;
3649 				do_div(tmp, oa_period);
3650 				oa_freq_hz = tmp;
3651 			} else
3652 				oa_freq_hz = 0;
3653 
3654 			if (oa_freq_hz > i915_oa_max_sample_rate && !perfmon_capable()) {
3655 				DRM_DEBUG("OA exponent would exceed the max sampling frequency (sysctl dev.i915.oa_max_sample_rate) %uHz without CAP_PERFMON or CAP_SYS_ADMIN privileges\n",
3656 					  i915_oa_max_sample_rate);
3657 				return -EACCES;
3658 			}
3659 
3660 			props->oa_periodic = true;
3661 			props->oa_period_exponent = value;
3662 			break;
3663 		case DRM_I915_PERF_PROP_HOLD_PREEMPTION:
3664 			props->hold_preemption = !!value;
3665 			break;
3666 		case DRM_I915_PERF_PROP_GLOBAL_SSEU: {
3667 			struct drm_i915_gem_context_param_sseu user_sseu;
3668 
3669 			if (copy_from_user(&user_sseu,
3670 					   u64_to_user_ptr(value),
3671 					   sizeof(user_sseu))) {
3672 				DRM_DEBUG("Unable to copy global sseu parameter\n");
3673 				return -EFAULT;
3674 			}
3675 
3676 			ret = get_sseu_config(&props->sseu, props->engine, &user_sseu);
3677 			if (ret) {
3678 				DRM_DEBUG("Invalid SSEU configuration\n");
3679 				return ret;
3680 			}
3681 			props->has_sseu = true;
3682 			break;
3683 		}
3684 		case DRM_I915_PERF_PROP_POLL_OA_PERIOD:
3685 			if (value < 100000 /* 100us */) {
3686 				DRM_DEBUG("OA availability timer too small (%lluns < 100us)\n",
3687 					  value);
3688 				return -EINVAL;
3689 			}
3690 			props->poll_oa_period = value;
3691 			break;
3692 		case DRM_I915_PERF_PROP_MAX:
3693 			MISSING_CASE(id);
3694 			return -EINVAL;
3695 		}
3696 
3697 		uprop += 2;
3698 	}
3699 
3700 	return 0;
3701 }
3702 
3703 /**
3704  * i915_perf_open_ioctl - DRM ioctl() for userspace to open a stream FD
3705  * @dev: drm device
3706  * @data: ioctl data copied from userspace (unvalidated)
3707  * @file: drm file
3708  *
3709  * Validates the stream open parameters given by userspace including flags
3710  * and an array of u64 key, value pair properties.
3711  *
3712  * Very little is assumed up front about the nature of the stream being
3713  * opened (for instance we don't assume it's for periodic OA unit metrics). An
3714  * i915-perf stream is expected to be a suitable interface for other forms of
3715  * buffered data written by the GPU besides periodic OA metrics.
3716  *
3717  * Note we copy the properties from userspace outside of the i915 perf
3718  * mutex to avoid an awkward lockdep with mmap_lock.
3719  *
3720  * Most of the implementation details are handled by
3721  * i915_perf_open_ioctl_locked() after taking the &perf->lock
3722  * mutex for serializing with any non-file-operation driver hooks.
3723  *
3724  * Return: A newly opened i915 Perf stream file descriptor or negative
3725  * error code on failure.
3726  */
3727 int i915_perf_open_ioctl(struct drm_device *dev, void *data,
3728 			 struct drm_file *file)
3729 {
3730 	struct i915_perf *perf = &to_i915(dev)->perf;
3731 	struct drm_i915_perf_open_param *param = data;
3732 	struct perf_open_properties props;
3733 	u32 known_open_flags;
3734 	int ret;
3735 
3736 	if (!perf->i915) {
3737 		DRM_DEBUG("i915 perf interface not available for this system\n");
3738 		return -ENOTSUPP;
3739 	}
3740 
3741 	known_open_flags = I915_PERF_FLAG_FD_CLOEXEC |
3742 			   I915_PERF_FLAG_FD_NONBLOCK |
3743 			   I915_PERF_FLAG_DISABLED;
3744 	if (param->flags & ~known_open_flags) {
3745 		DRM_DEBUG("Unknown drm_i915_perf_open_param flag\n");
3746 		return -EINVAL;
3747 	}
3748 
3749 	ret = read_properties_unlocked(perf,
3750 				       u64_to_user_ptr(param->properties_ptr),
3751 				       param->num_properties,
3752 				       &props);
3753 	if (ret)
3754 		return ret;
3755 
3756 	mutex_lock(&perf->lock);
3757 	ret = i915_perf_open_ioctl_locked(perf, param, &props, file);
3758 	mutex_unlock(&perf->lock);
3759 
3760 	return ret;
3761 }
3762 
3763 /**
3764  * i915_perf_register - exposes i915-perf to userspace
3765  * @i915: i915 device instance
3766  *
3767  * In particular OA metric sets are advertised under a sysfs metrics/
3768  * directory allowing userspace to enumerate valid IDs that can be
3769  * used to open an i915-perf stream.
3770  */
3771 void i915_perf_register(struct drm_i915_private *i915)
3772 {
3773 	struct i915_perf *perf = &i915->perf;
3774 
3775 	if (!perf->i915)
3776 		return;
3777 
3778 	/* To be sure we're synchronized with an attempted
3779 	 * i915_perf_open_ioctl(); considering that we register after
3780 	 * being exposed to userspace.
3781 	 */
3782 	mutex_lock(&perf->lock);
3783 
3784 	perf->metrics_kobj =
3785 		kobject_create_and_add("metrics",
3786 				       &i915->drm.primary->kdev->kobj);
3787 
3788 	mutex_unlock(&perf->lock);
3789 }
3790 
3791 /**
3792  * i915_perf_unregister - hide i915-perf from userspace
3793  * @i915: i915 device instance
3794  *
3795  * i915-perf state cleanup is split up into an 'unregister' and
3796  * 'deinit' phase where the interface is first hidden from
3797  * userspace by i915_perf_unregister() before cleaning up
3798  * remaining state in i915_perf_fini().
3799  */
3800 void i915_perf_unregister(struct drm_i915_private *i915)
3801 {
3802 	struct i915_perf *perf = &i915->perf;
3803 
3804 	if (!perf->metrics_kobj)
3805 		return;
3806 
3807 	kobject_put(perf->metrics_kobj);
3808 	perf->metrics_kobj = NULL;
3809 }
3810 
3811 static bool gen8_is_valid_flex_addr(struct i915_perf *perf, u32 addr)
3812 {
3813 	static const i915_reg_t flex_eu_regs[] = {
3814 		EU_PERF_CNTL0,
3815 		EU_PERF_CNTL1,
3816 		EU_PERF_CNTL2,
3817 		EU_PERF_CNTL3,
3818 		EU_PERF_CNTL4,
3819 		EU_PERF_CNTL5,
3820 		EU_PERF_CNTL6,
3821 	};
3822 	int i;
3823 
3824 	for (i = 0; i < ARRAY_SIZE(flex_eu_regs); i++) {
3825 		if (i915_mmio_reg_offset(flex_eu_regs[i]) == addr)
3826 			return true;
3827 	}
3828 	return false;
3829 }
3830 
3831 #define ADDR_IN_RANGE(addr, start, end) \
3832 	((addr) >= (start) && \
3833 	 (addr) <= (end))
3834 
3835 #define REG_IN_RANGE(addr, start, end) \
3836 	((addr) >= i915_mmio_reg_offset(start) && \
3837 	 (addr) <= i915_mmio_reg_offset(end))
3838 
3839 #define REG_EQUAL(addr, mmio) \
3840 	((addr) == i915_mmio_reg_offset(mmio))
3841 
3842 static bool gen7_is_valid_b_counter_addr(struct i915_perf *perf, u32 addr)
3843 {
3844 	return REG_IN_RANGE(addr, OASTARTTRIG1, OASTARTTRIG8) ||
3845 	       REG_IN_RANGE(addr, OAREPORTTRIG1, OAREPORTTRIG8) ||
3846 	       REG_IN_RANGE(addr, OACEC0_0, OACEC7_1);
3847 }
3848 
3849 static bool gen7_is_valid_mux_addr(struct i915_perf *perf, u32 addr)
3850 {
3851 	return REG_EQUAL(addr, HALF_SLICE_CHICKEN2) ||
3852 	       REG_IN_RANGE(addr, MICRO_BP0_0, NOA_WRITE) ||
3853 	       REG_IN_RANGE(addr, OA_PERFCNT1_LO, OA_PERFCNT2_HI) ||
3854 	       REG_IN_RANGE(addr, OA_PERFMATRIX_LO, OA_PERFMATRIX_HI);
3855 }
3856 
3857 static bool gen8_is_valid_mux_addr(struct i915_perf *perf, u32 addr)
3858 {
3859 	return gen7_is_valid_mux_addr(perf, addr) ||
3860 	       REG_EQUAL(addr, WAIT_FOR_RC6_EXIT) ||
3861 	       REG_IN_RANGE(addr, RPM_CONFIG0, NOA_CONFIG(8));
3862 }
3863 
3864 static bool gen10_is_valid_mux_addr(struct i915_perf *perf, u32 addr)
3865 {
3866 	return gen8_is_valid_mux_addr(perf, addr) ||
3867 	       REG_EQUAL(addr, GEN10_NOA_WRITE_HIGH) ||
3868 	       REG_IN_RANGE(addr, OA_PERFCNT3_LO, OA_PERFCNT4_HI);
3869 }
3870 
3871 static bool hsw_is_valid_mux_addr(struct i915_perf *perf, u32 addr)
3872 {
3873 	return gen7_is_valid_mux_addr(perf, addr) ||
3874 	       ADDR_IN_RANGE(addr, 0x25100, 0x2FF90) ||
3875 	       REG_IN_RANGE(addr, HSW_MBVID2_NOA0, HSW_MBVID2_NOA9) ||
3876 	       REG_EQUAL(addr, HSW_MBVID2_MISR0);
3877 }
3878 
3879 static bool chv_is_valid_mux_addr(struct i915_perf *perf, u32 addr)
3880 {
3881 	return gen7_is_valid_mux_addr(perf, addr) ||
3882 	       ADDR_IN_RANGE(addr, 0x182300, 0x1823A4);
3883 }
3884 
3885 static bool gen12_is_valid_b_counter_addr(struct i915_perf *perf, u32 addr)
3886 {
3887 	return REG_IN_RANGE(addr, GEN12_OAG_OASTARTTRIG1, GEN12_OAG_OASTARTTRIG8) ||
3888 	       REG_IN_RANGE(addr, GEN12_OAG_OAREPORTTRIG1, GEN12_OAG_OAREPORTTRIG8) ||
3889 	       REG_IN_RANGE(addr, GEN12_OAG_CEC0_0, GEN12_OAG_CEC7_1) ||
3890 	       REG_IN_RANGE(addr, GEN12_OAG_SCEC0_0, GEN12_OAG_SCEC7_1) ||
3891 	       REG_EQUAL(addr, GEN12_OAA_DBG_REG) ||
3892 	       REG_EQUAL(addr, GEN12_OAG_OA_PESS) ||
3893 	       REG_EQUAL(addr, GEN12_OAG_SPCTR_CNF);
3894 }
3895 
3896 static bool gen12_is_valid_mux_addr(struct i915_perf *perf, u32 addr)
3897 {
3898 	return REG_EQUAL(addr, NOA_WRITE) ||
3899 	       REG_EQUAL(addr, GEN10_NOA_WRITE_HIGH) ||
3900 	       REG_EQUAL(addr, GDT_CHICKEN_BITS) ||
3901 	       REG_EQUAL(addr, WAIT_FOR_RC6_EXIT) ||
3902 	       REG_EQUAL(addr, RPM_CONFIG0) ||
3903 	       REG_EQUAL(addr, RPM_CONFIG1) ||
3904 	       REG_IN_RANGE(addr, NOA_CONFIG(0), NOA_CONFIG(8));
3905 }
3906 
3907 static u32 mask_reg_value(u32 reg, u32 val)
3908 {
3909 	/* HALF_SLICE_CHICKEN2 is programmed with a the
3910 	 * WaDisableSTUnitPowerOptimization workaround. Make sure the value
3911 	 * programmed by userspace doesn't change this.
3912 	 */
3913 	if (REG_EQUAL(reg, HALF_SLICE_CHICKEN2))
3914 		val = val & ~_MASKED_BIT_ENABLE(GEN8_ST_PO_DISABLE);
3915 
3916 	/* WAIT_FOR_RC6_EXIT has only one bit fullfilling the function
3917 	 * indicated by its name and a bunch of selection fields used by OA
3918 	 * configs.
3919 	 */
3920 	if (REG_EQUAL(reg, WAIT_FOR_RC6_EXIT))
3921 		val = val & ~_MASKED_BIT_ENABLE(HSW_WAIT_FOR_RC6_EXIT_ENABLE);
3922 
3923 	return val;
3924 }
3925 
3926 static struct i915_oa_reg *alloc_oa_regs(struct i915_perf *perf,
3927 					 bool (*is_valid)(struct i915_perf *perf, u32 addr),
3928 					 u32 __user *regs,
3929 					 u32 n_regs)
3930 {
3931 	struct i915_oa_reg *oa_regs;
3932 	int err;
3933 	u32 i;
3934 
3935 	if (!n_regs)
3936 		return NULL;
3937 
3938 	/* No is_valid function means we're not allowing any register to be programmed. */
3939 	GEM_BUG_ON(!is_valid);
3940 	if (!is_valid)
3941 		return ERR_PTR(-EINVAL);
3942 
3943 	oa_regs = kmalloc_array(n_regs, sizeof(*oa_regs), GFP_KERNEL);
3944 	if (!oa_regs)
3945 		return ERR_PTR(-ENOMEM);
3946 
3947 	for (i = 0; i < n_regs; i++) {
3948 		u32 addr, value;
3949 
3950 		err = get_user(addr, regs);
3951 		if (err)
3952 			goto addr_err;
3953 
3954 		if (!is_valid(perf, addr)) {
3955 			DRM_DEBUG("Invalid oa_reg address: %X\n", addr);
3956 			err = -EINVAL;
3957 			goto addr_err;
3958 		}
3959 
3960 		err = get_user(value, regs + 1);
3961 		if (err)
3962 			goto addr_err;
3963 
3964 		oa_regs[i].addr = _MMIO(addr);
3965 		oa_regs[i].value = mask_reg_value(addr, value);
3966 
3967 		regs += 2;
3968 	}
3969 
3970 	return oa_regs;
3971 
3972 addr_err:
3973 	kfree(oa_regs);
3974 	return ERR_PTR(err);
3975 }
3976 
3977 static ssize_t show_dynamic_id(struct device *dev,
3978 			       struct device_attribute *attr,
3979 			       char *buf)
3980 {
3981 	struct i915_oa_config *oa_config =
3982 		container_of(attr, typeof(*oa_config), sysfs_metric_id);
3983 
3984 	return sprintf(buf, "%d\n", oa_config->id);
3985 }
3986 
3987 static int create_dynamic_oa_sysfs_entry(struct i915_perf *perf,
3988 					 struct i915_oa_config *oa_config)
3989 {
3990 	sysfs_attr_init(&oa_config->sysfs_metric_id.attr);
3991 	oa_config->sysfs_metric_id.attr.name = "id";
3992 	oa_config->sysfs_metric_id.attr.mode = S_IRUGO;
3993 	oa_config->sysfs_metric_id.show = show_dynamic_id;
3994 	oa_config->sysfs_metric_id.store = NULL;
3995 
3996 	oa_config->attrs[0] = &oa_config->sysfs_metric_id.attr;
3997 	oa_config->attrs[1] = NULL;
3998 
3999 	oa_config->sysfs_metric.name = oa_config->uuid;
4000 	oa_config->sysfs_metric.attrs = oa_config->attrs;
4001 
4002 	return sysfs_create_group(perf->metrics_kobj,
4003 				  &oa_config->sysfs_metric);
4004 }
4005 
4006 /**
4007  * i915_perf_add_config_ioctl - DRM ioctl() for userspace to add a new OA config
4008  * @dev: drm device
4009  * @data: ioctl data (pointer to struct drm_i915_perf_oa_config) copied from
4010  *        userspace (unvalidated)
4011  * @file: drm file
4012  *
4013  * Validates the submitted OA register to be saved into a new OA config that
4014  * can then be used for programming the OA unit and its NOA network.
4015  *
4016  * Returns: A new allocated config number to be used with the perf open ioctl
4017  * or a negative error code on failure.
4018  */
4019 int i915_perf_add_config_ioctl(struct drm_device *dev, void *data,
4020 			       struct drm_file *file)
4021 {
4022 	struct i915_perf *perf = &to_i915(dev)->perf;
4023 	struct drm_i915_perf_oa_config *args = data;
4024 	struct i915_oa_config *oa_config, *tmp;
4025 	struct i915_oa_reg *regs;
4026 	int err, id;
4027 
4028 	if (!perf->i915) {
4029 		DRM_DEBUG("i915 perf interface not available for this system\n");
4030 		return -ENOTSUPP;
4031 	}
4032 
4033 	if (!perf->metrics_kobj) {
4034 		DRM_DEBUG("OA metrics weren't advertised via sysfs\n");
4035 		return -EINVAL;
4036 	}
4037 
4038 	if (i915_perf_stream_paranoid && !perfmon_capable()) {
4039 		DRM_DEBUG("Insufficient privileges to add i915 OA config\n");
4040 		return -EACCES;
4041 	}
4042 
4043 	if ((!args->mux_regs_ptr || !args->n_mux_regs) &&
4044 	    (!args->boolean_regs_ptr || !args->n_boolean_regs) &&
4045 	    (!args->flex_regs_ptr || !args->n_flex_regs)) {
4046 		DRM_DEBUG("No OA registers given\n");
4047 		return -EINVAL;
4048 	}
4049 
4050 	oa_config = kzalloc(sizeof(*oa_config), GFP_KERNEL);
4051 	if (!oa_config) {
4052 		DRM_DEBUG("Failed to allocate memory for the OA config\n");
4053 		return -ENOMEM;
4054 	}
4055 
4056 	oa_config->perf = perf;
4057 	kref_init(&oa_config->ref);
4058 
4059 	if (!uuid_is_valid(args->uuid)) {
4060 		DRM_DEBUG("Invalid uuid format for OA config\n");
4061 		err = -EINVAL;
4062 		goto reg_err;
4063 	}
4064 
4065 	/* Last character in oa_config->uuid will be 0 because oa_config is
4066 	 * kzalloc.
4067 	 */
4068 	memcpy(oa_config->uuid, args->uuid, sizeof(args->uuid));
4069 
4070 	oa_config->mux_regs_len = args->n_mux_regs;
4071 	regs = alloc_oa_regs(perf,
4072 			     perf->ops.is_valid_mux_reg,
4073 			     u64_to_user_ptr(args->mux_regs_ptr),
4074 			     args->n_mux_regs);
4075 
4076 	if (IS_ERR(regs)) {
4077 		DRM_DEBUG("Failed to create OA config for mux_regs\n");
4078 		err = PTR_ERR(regs);
4079 		goto reg_err;
4080 	}
4081 	oa_config->mux_regs = regs;
4082 
4083 	oa_config->b_counter_regs_len = args->n_boolean_regs;
4084 	regs = alloc_oa_regs(perf,
4085 			     perf->ops.is_valid_b_counter_reg,
4086 			     u64_to_user_ptr(args->boolean_regs_ptr),
4087 			     args->n_boolean_regs);
4088 
4089 	if (IS_ERR(regs)) {
4090 		DRM_DEBUG("Failed to create OA config for b_counter_regs\n");
4091 		err = PTR_ERR(regs);
4092 		goto reg_err;
4093 	}
4094 	oa_config->b_counter_regs = regs;
4095 
4096 	if (INTEL_GEN(perf->i915) < 8) {
4097 		if (args->n_flex_regs != 0) {
4098 			err = -EINVAL;
4099 			goto reg_err;
4100 		}
4101 	} else {
4102 		oa_config->flex_regs_len = args->n_flex_regs;
4103 		regs = alloc_oa_regs(perf,
4104 				     perf->ops.is_valid_flex_reg,
4105 				     u64_to_user_ptr(args->flex_regs_ptr),
4106 				     args->n_flex_regs);
4107 
4108 		if (IS_ERR(regs)) {
4109 			DRM_DEBUG("Failed to create OA config for flex_regs\n");
4110 			err = PTR_ERR(regs);
4111 			goto reg_err;
4112 		}
4113 		oa_config->flex_regs = regs;
4114 	}
4115 
4116 	err = mutex_lock_interruptible(&perf->metrics_lock);
4117 	if (err)
4118 		goto reg_err;
4119 
4120 	/* We shouldn't have too many configs, so this iteration shouldn't be
4121 	 * too costly.
4122 	 */
4123 	idr_for_each_entry(&perf->metrics_idr, tmp, id) {
4124 		if (!strcmp(tmp->uuid, oa_config->uuid)) {
4125 			DRM_DEBUG("OA config already exists with this uuid\n");
4126 			err = -EADDRINUSE;
4127 			goto sysfs_err;
4128 		}
4129 	}
4130 
4131 	err = create_dynamic_oa_sysfs_entry(perf, oa_config);
4132 	if (err) {
4133 		DRM_DEBUG("Failed to create sysfs entry for OA config\n");
4134 		goto sysfs_err;
4135 	}
4136 
4137 	/* Config id 0 is invalid, id 1 for kernel stored test config. */
4138 	oa_config->id = idr_alloc(&perf->metrics_idr,
4139 				  oa_config, 2,
4140 				  0, GFP_KERNEL);
4141 	if (oa_config->id < 0) {
4142 		DRM_DEBUG("Failed to create sysfs entry for OA config\n");
4143 		err = oa_config->id;
4144 		goto sysfs_err;
4145 	}
4146 
4147 	mutex_unlock(&perf->metrics_lock);
4148 
4149 	DRM_DEBUG("Added config %s id=%i\n", oa_config->uuid, oa_config->id);
4150 
4151 	return oa_config->id;
4152 
4153 sysfs_err:
4154 	mutex_unlock(&perf->metrics_lock);
4155 reg_err:
4156 	i915_oa_config_put(oa_config);
4157 	DRM_DEBUG("Failed to add new OA config\n");
4158 	return err;
4159 }
4160 
4161 /**
4162  * i915_perf_remove_config_ioctl - DRM ioctl() for userspace to remove an OA config
4163  * @dev: drm device
4164  * @data: ioctl data (pointer to u64 integer) copied from userspace
4165  * @file: drm file
4166  *
4167  * Configs can be removed while being used, the will stop appearing in sysfs
4168  * and their content will be freed when the stream using the config is closed.
4169  *
4170  * Returns: 0 on success or a negative error code on failure.
4171  */
4172 int i915_perf_remove_config_ioctl(struct drm_device *dev, void *data,
4173 				  struct drm_file *file)
4174 {
4175 	struct i915_perf *perf = &to_i915(dev)->perf;
4176 	u64 *arg = data;
4177 	struct i915_oa_config *oa_config;
4178 	int ret;
4179 
4180 	if (!perf->i915) {
4181 		DRM_DEBUG("i915 perf interface not available for this system\n");
4182 		return -ENOTSUPP;
4183 	}
4184 
4185 	if (i915_perf_stream_paranoid && !perfmon_capable()) {
4186 		DRM_DEBUG("Insufficient privileges to remove i915 OA config\n");
4187 		return -EACCES;
4188 	}
4189 
4190 	ret = mutex_lock_interruptible(&perf->metrics_lock);
4191 	if (ret)
4192 		return ret;
4193 
4194 	oa_config = idr_find(&perf->metrics_idr, *arg);
4195 	if (!oa_config) {
4196 		DRM_DEBUG("Failed to remove unknown OA config\n");
4197 		ret = -ENOENT;
4198 		goto err_unlock;
4199 	}
4200 
4201 	GEM_BUG_ON(*arg != oa_config->id);
4202 
4203 	sysfs_remove_group(perf->metrics_kobj, &oa_config->sysfs_metric);
4204 
4205 	idr_remove(&perf->metrics_idr, *arg);
4206 
4207 	mutex_unlock(&perf->metrics_lock);
4208 
4209 	DRM_DEBUG("Removed config %s id=%i\n", oa_config->uuid, oa_config->id);
4210 
4211 	i915_oa_config_put(oa_config);
4212 
4213 	return 0;
4214 
4215 err_unlock:
4216 	mutex_unlock(&perf->metrics_lock);
4217 	return ret;
4218 }
4219 
4220 static struct ctl_table oa_table[] = {
4221 	{
4222 	 .procname = "perf_stream_paranoid",
4223 	 .data = &i915_perf_stream_paranoid,
4224 	 .maxlen = sizeof(i915_perf_stream_paranoid),
4225 	 .mode = 0644,
4226 	 .proc_handler = proc_dointvec_minmax,
4227 	 .extra1 = SYSCTL_ZERO,
4228 	 .extra2 = SYSCTL_ONE,
4229 	 },
4230 	{
4231 	 .procname = "oa_max_sample_rate",
4232 	 .data = &i915_oa_max_sample_rate,
4233 	 .maxlen = sizeof(i915_oa_max_sample_rate),
4234 	 .mode = 0644,
4235 	 .proc_handler = proc_dointvec_minmax,
4236 	 .extra1 = SYSCTL_ZERO,
4237 	 .extra2 = &oa_sample_rate_hard_limit,
4238 	 },
4239 	{}
4240 };
4241 
4242 static struct ctl_table i915_root[] = {
4243 	{
4244 	 .procname = "i915",
4245 	 .maxlen = 0,
4246 	 .mode = 0555,
4247 	 .child = oa_table,
4248 	 },
4249 	{}
4250 };
4251 
4252 static struct ctl_table dev_root[] = {
4253 	{
4254 	 .procname = "dev",
4255 	 .maxlen = 0,
4256 	 .mode = 0555,
4257 	 .child = i915_root,
4258 	 },
4259 	{}
4260 };
4261 
4262 /**
4263  * i915_perf_init - initialize i915-perf state on module bind
4264  * @i915: i915 device instance
4265  *
4266  * Initializes i915-perf state without exposing anything to userspace.
4267  *
4268  * Note: i915-perf initialization is split into an 'init' and 'register'
4269  * phase with the i915_perf_register() exposing state to userspace.
4270  */
4271 void i915_perf_init(struct drm_i915_private *i915)
4272 {
4273 	struct i915_perf *perf = &i915->perf;
4274 
4275 	/* XXX const struct i915_perf_ops! */
4276 
4277 	if (IS_HASWELL(i915)) {
4278 		perf->ops.is_valid_b_counter_reg = gen7_is_valid_b_counter_addr;
4279 		perf->ops.is_valid_mux_reg = hsw_is_valid_mux_addr;
4280 		perf->ops.is_valid_flex_reg = NULL;
4281 		perf->ops.enable_metric_set = hsw_enable_metric_set;
4282 		perf->ops.disable_metric_set = hsw_disable_metric_set;
4283 		perf->ops.oa_enable = gen7_oa_enable;
4284 		perf->ops.oa_disable = gen7_oa_disable;
4285 		perf->ops.read = gen7_oa_read;
4286 		perf->ops.oa_hw_tail_read = gen7_oa_hw_tail_read;
4287 
4288 		perf->oa_formats = hsw_oa_formats;
4289 	} else if (HAS_LOGICAL_RING_CONTEXTS(i915)) {
4290 		/* Note: that although we could theoretically also support the
4291 		 * legacy ringbuffer mode on BDW (and earlier iterations of
4292 		 * this driver, before upstreaming did this) it didn't seem
4293 		 * worth the complexity to maintain now that BDW+ enable
4294 		 * execlist mode by default.
4295 		 */
4296 		perf->ops.read = gen8_oa_read;
4297 
4298 		if (IS_GEN_RANGE(i915, 8, 9)) {
4299 			perf->oa_formats = gen8_plus_oa_formats;
4300 
4301 			perf->ops.is_valid_b_counter_reg =
4302 				gen7_is_valid_b_counter_addr;
4303 			perf->ops.is_valid_mux_reg =
4304 				gen8_is_valid_mux_addr;
4305 			perf->ops.is_valid_flex_reg =
4306 				gen8_is_valid_flex_addr;
4307 
4308 			if (IS_CHERRYVIEW(i915)) {
4309 				perf->ops.is_valid_mux_reg =
4310 					chv_is_valid_mux_addr;
4311 			}
4312 
4313 			perf->ops.oa_enable = gen8_oa_enable;
4314 			perf->ops.oa_disable = gen8_oa_disable;
4315 			perf->ops.enable_metric_set = gen8_enable_metric_set;
4316 			perf->ops.disable_metric_set = gen8_disable_metric_set;
4317 			perf->ops.oa_hw_tail_read = gen8_oa_hw_tail_read;
4318 
4319 			if (IS_GEN(i915, 8)) {
4320 				perf->ctx_oactxctrl_offset = 0x120;
4321 				perf->ctx_flexeu0_offset = 0x2ce;
4322 
4323 				perf->gen8_valid_ctx_bit = BIT(25);
4324 			} else {
4325 				perf->ctx_oactxctrl_offset = 0x128;
4326 				perf->ctx_flexeu0_offset = 0x3de;
4327 
4328 				perf->gen8_valid_ctx_bit = BIT(16);
4329 			}
4330 		} else if (IS_GEN_RANGE(i915, 10, 11)) {
4331 			perf->oa_formats = gen8_plus_oa_formats;
4332 
4333 			perf->ops.is_valid_b_counter_reg =
4334 				gen7_is_valid_b_counter_addr;
4335 			perf->ops.is_valid_mux_reg =
4336 				gen10_is_valid_mux_addr;
4337 			perf->ops.is_valid_flex_reg =
4338 				gen8_is_valid_flex_addr;
4339 
4340 			perf->ops.oa_enable = gen8_oa_enable;
4341 			perf->ops.oa_disable = gen8_oa_disable;
4342 			perf->ops.enable_metric_set = gen8_enable_metric_set;
4343 			perf->ops.disable_metric_set = gen10_disable_metric_set;
4344 			perf->ops.oa_hw_tail_read = gen8_oa_hw_tail_read;
4345 
4346 			if (IS_GEN(i915, 10)) {
4347 				perf->ctx_oactxctrl_offset = 0x128;
4348 				perf->ctx_flexeu0_offset = 0x3de;
4349 			} else {
4350 				perf->ctx_oactxctrl_offset = 0x124;
4351 				perf->ctx_flexeu0_offset = 0x78e;
4352 			}
4353 			perf->gen8_valid_ctx_bit = BIT(16);
4354 		} else if (IS_GEN(i915, 12)) {
4355 			perf->oa_formats = gen12_oa_formats;
4356 
4357 			perf->ops.is_valid_b_counter_reg =
4358 				gen12_is_valid_b_counter_addr;
4359 			perf->ops.is_valid_mux_reg =
4360 				gen12_is_valid_mux_addr;
4361 			perf->ops.is_valid_flex_reg =
4362 				gen8_is_valid_flex_addr;
4363 
4364 			perf->ops.oa_enable = gen12_oa_enable;
4365 			perf->ops.oa_disable = gen12_oa_disable;
4366 			perf->ops.enable_metric_set = gen12_enable_metric_set;
4367 			perf->ops.disable_metric_set = gen12_disable_metric_set;
4368 			perf->ops.oa_hw_tail_read = gen12_oa_hw_tail_read;
4369 
4370 			perf->ctx_flexeu0_offset = 0;
4371 			perf->ctx_oactxctrl_offset = 0x144;
4372 		}
4373 	}
4374 
4375 	if (perf->ops.enable_metric_set) {
4376 		mutex_init(&perf->lock);
4377 
4378 		/* Choose a representative limit */
4379 		oa_sample_rate_hard_limit = i915->gt.clock_frequency / 2;
4380 
4381 		mutex_init(&perf->metrics_lock);
4382 		idr_init_base(&perf->metrics_idr, 1);
4383 
4384 		/* We set up some ratelimit state to potentially throttle any
4385 		 * _NOTES about spurious, invalid OA reports which we don't
4386 		 * forward to userspace.
4387 		 *
4388 		 * We print a _NOTE about any throttling when closing the
4389 		 * stream instead of waiting until driver _fini which no one
4390 		 * would ever see.
4391 		 *
4392 		 * Using the same limiting factors as printk_ratelimit()
4393 		 */
4394 		ratelimit_state_init(&perf->spurious_report_rs, 5 * HZ, 10);
4395 		/* Since we use a DRM_NOTE for spurious reports it would be
4396 		 * inconsistent to let __ratelimit() automatically print a
4397 		 * warning for throttling.
4398 		 */
4399 		ratelimit_set_flags(&perf->spurious_report_rs,
4400 				    RATELIMIT_MSG_ON_RELEASE);
4401 
4402 		ratelimit_state_init(&perf->tail_pointer_race,
4403 				     5 * HZ, 10);
4404 		ratelimit_set_flags(&perf->tail_pointer_race,
4405 				    RATELIMIT_MSG_ON_RELEASE);
4406 
4407 		atomic64_set(&perf->noa_programming_delay,
4408 			     500 * 1000 /* 500us */);
4409 
4410 		perf->i915 = i915;
4411 	}
4412 }
4413 
4414 static int destroy_config(int id, void *p, void *data)
4415 {
4416 	i915_oa_config_put(p);
4417 	return 0;
4418 }
4419 
4420 void i915_perf_sysctl_register(void)
4421 {
4422 	sysctl_header = register_sysctl_table(dev_root);
4423 }
4424 
4425 void i915_perf_sysctl_unregister(void)
4426 {
4427 	unregister_sysctl_table(sysctl_header);
4428 }
4429 
4430 /**
4431  * i915_perf_fini - Counter part to i915_perf_init()
4432  * @i915: i915 device instance
4433  */
4434 void i915_perf_fini(struct drm_i915_private *i915)
4435 {
4436 	struct i915_perf *perf = &i915->perf;
4437 
4438 	if (!perf->i915)
4439 		return;
4440 
4441 	idr_for_each(&perf->metrics_idr, destroy_config, perf);
4442 	idr_destroy(&perf->metrics_idr);
4443 
4444 	memset(&perf->ops, 0, sizeof(perf->ops));
4445 	perf->i915 = NULL;
4446 }
4447 
4448 /**
4449  * i915_perf_ioctl_version - Version of the i915-perf subsystem
4450  *
4451  * This version number is used by userspace to detect available features.
4452  */
4453 int i915_perf_ioctl_version(void)
4454 {
4455 	/*
4456 	 * 1: Initial version
4457 	 *   I915_PERF_IOCTL_ENABLE
4458 	 *   I915_PERF_IOCTL_DISABLE
4459 	 *
4460 	 * 2: Added runtime modification of OA config.
4461 	 *   I915_PERF_IOCTL_CONFIG
4462 	 *
4463 	 * 3: Add DRM_I915_PERF_PROP_HOLD_PREEMPTION parameter to hold
4464 	 *    preemption on a particular context so that performance data is
4465 	 *    accessible from a delta of MI_RPC reports without looking at the
4466 	 *    OA buffer.
4467 	 *
4468 	 * 4: Add DRM_I915_PERF_PROP_ALLOWED_SSEU to limit what contexts can
4469 	 *    be run for the duration of the performance recording based on
4470 	 *    their SSEU configuration.
4471 	 *
4472 	 * 5: Add DRM_I915_PERF_PROP_POLL_OA_PERIOD parameter that controls the
4473 	 *    interval for the hrtimer used to check for OA data.
4474 	 */
4475 	return 5;
4476 }
4477 
4478 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
4479 #include "selftests/i915_perf.c"
4480 #endif
4481