xref: /openbmc/linux/drivers/gpu/drm/i915/i915_perf.c (revision 08d34f12)
1 /*
2  * Copyright © 2015-2016 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  *
23  * Authors:
24  *   Robert Bragg <robert@sixbynine.org>
25  */
26 
27 
28 /**
29  * DOC: i915 Perf Overview
30  *
31  * Gen graphics supports a large number of performance counters that can help
32  * driver and application developers understand and optimize their use of the
33  * GPU.
34  *
35  * This i915 perf interface enables userspace to configure and open a file
36  * descriptor representing a stream of GPU metrics which can then be read() as
37  * a stream of sample records.
38  *
39  * The interface is particularly suited to exposing buffered metrics that are
40  * captured by DMA from the GPU, unsynchronized with and unrelated to the CPU.
41  *
42  * Streams representing a single context are accessible to applications with a
43  * corresponding drm file descriptor, such that OpenGL can use the interface
44  * without special privileges. Access to system-wide metrics requires root
45  * privileges by default, unless changed via the dev.i915.perf_event_paranoid
46  * sysctl option.
47  *
48  */
49 
50 /**
51  * DOC: i915 Perf History and Comparison with Core Perf
52  *
53  * The interface was initially inspired by the core Perf infrastructure but
54  * some notable differences are:
55  *
56  * i915 perf file descriptors represent a "stream" instead of an "event"; where
57  * a perf event primarily corresponds to a single 64bit value, while a stream
58  * might sample sets of tightly-coupled counters, depending on the
59  * configuration.  For example the Gen OA unit isn't designed to support
60  * orthogonal configurations of individual counters; it's configured for a set
61  * of related counters. Samples for an i915 perf stream capturing OA metrics
62  * will include a set of counter values packed in a compact HW specific format.
63  * The OA unit supports a number of different packing formats which can be
64  * selected by the user opening the stream. Perf has support for grouping
65  * events, but each event in the group is configured, validated and
66  * authenticated individually with separate system calls.
67  *
68  * i915 perf stream configurations are provided as an array of u64 (key,value)
69  * pairs, instead of a fixed struct with multiple miscellaneous config members,
70  * interleaved with event-type specific members.
71  *
72  * i915 perf doesn't support exposing metrics via an mmap'd circular buffer.
73  * The supported metrics are being written to memory by the GPU unsynchronized
74  * with the CPU, using HW specific packing formats for counter sets. Sometimes
75  * the constraints on HW configuration require reports to be filtered before it
76  * would be acceptable to expose them to unprivileged applications - to hide
77  * the metrics of other processes/contexts. For these use cases a read() based
78  * interface is a good fit, and provides an opportunity to filter data as it
79  * gets copied from the GPU mapped buffers to userspace buffers.
80  *
81  *
82  * Issues hit with first prototype based on Core Perf
83  * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
84  *
85  * The first prototype of this driver was based on the core perf
86  * infrastructure, and while we did make that mostly work, with some changes to
87  * perf, we found we were breaking or working around too many assumptions baked
88  * into perf's currently cpu centric design.
89  *
90  * In the end we didn't see a clear benefit to making perf's implementation and
91  * interface more complex by changing design assumptions while we knew we still
92  * wouldn't be able to use any existing perf based userspace tools.
93  *
94  * Also considering the Gen specific nature of the Observability hardware and
95  * how userspace will sometimes need to combine i915 perf OA metrics with
96  * side-band OA data captured via MI_REPORT_PERF_COUNT commands; we're
97  * expecting the interface to be used by a platform specific userspace such as
98  * OpenGL or tools. This is to say; we aren't inherently missing out on having
99  * a standard vendor/architecture agnostic interface by not using perf.
100  *
101  *
102  * For posterity, in case we might re-visit trying to adapt core perf to be
103  * better suited to exposing i915 metrics these were the main pain points we
104  * hit:
105  *
106  * - The perf based OA PMU driver broke some significant design assumptions:
107  *
108  *   Existing perf pmus are used for profiling work on a cpu and we were
109  *   introducing the idea of _IS_DEVICE pmus with different security
110  *   implications, the need to fake cpu-related data (such as user/kernel
111  *   registers) to fit with perf's current design, and adding _DEVICE records
112  *   as a way to forward device-specific status records.
113  *
114  *   The OA unit writes reports of counters into a circular buffer, without
115  *   involvement from the CPU, making our PMU driver the first of a kind.
116  *
117  *   Given the way we were periodically forward data from the GPU-mapped, OA
118  *   buffer to perf's buffer, those bursts of sample writes looked to perf like
119  *   we were sampling too fast and so we had to subvert its throttling checks.
120  *
121  *   Perf supports groups of counters and allows those to be read via
122  *   transactions internally but transactions currently seem designed to be
123  *   explicitly initiated from the cpu (say in response to a userspace read())
124  *   and while we could pull a report out of the OA buffer we can't
125  *   trigger a report from the cpu on demand.
126  *
127  *   Related to being report based; the OA counters are configured in HW as a
128  *   set while perf generally expects counter configurations to be orthogonal.
129  *   Although counters can be associated with a group leader as they are
130  *   opened, there's no clear precedent for being able to provide group-wide
131  *   configuration attributes (for example we want to let userspace choose the
132  *   OA unit report format used to capture all counters in a set, or specify a
133  *   GPU context to filter metrics on). We avoided using perf's grouping
134  *   feature and forwarded OA reports to userspace via perf's 'raw' sample
135  *   field. This suited our userspace well considering how coupled the counters
136  *   are when dealing with normalizing. It would be inconvenient to split
137  *   counters up into separate events, only to require userspace to recombine
138  *   them. For Mesa it's also convenient to be forwarded raw, periodic reports
139  *   for combining with the side-band raw reports it captures using
140  *   MI_REPORT_PERF_COUNT commands.
141  *
142  *   - As a side note on perf's grouping feature; there was also some concern
143  *     that using PERF_FORMAT_GROUP as a way to pack together counter values
144  *     would quite drastically inflate our sample sizes, which would likely
145  *     lower the effective sampling resolutions we could use when the available
146  *     memory bandwidth is limited.
147  *
148  *     With the OA unit's report formats, counters are packed together as 32
149  *     or 40bit values, with the largest report size being 256 bytes.
150  *
151  *     PERF_FORMAT_GROUP values are 64bit, but there doesn't appear to be a
152  *     documented ordering to the values, implying PERF_FORMAT_ID must also be
153  *     used to add a 64bit ID before each value; giving 16 bytes per counter.
154  *
155  *   Related to counter orthogonality; we can't time share the OA unit, while
156  *   event scheduling is a central design idea within perf for allowing
157  *   userspace to open + enable more events than can be configured in HW at any
158  *   one time.  The OA unit is not designed to allow re-configuration while in
159  *   use. We can't reconfigure the OA unit without losing internal OA unit
160  *   state which we can't access explicitly to save and restore. Reconfiguring
161  *   the OA unit is also relatively slow, involving ~100 register writes. From
162  *   userspace Mesa also depends on a stable OA configuration when emitting
163  *   MI_REPORT_PERF_COUNT commands and importantly the OA unit can't be
164  *   disabled while there are outstanding MI_RPC commands lest we hang the
165  *   command streamer.
166  *
167  *   The contents of sample records aren't extensible by device drivers (i.e.
168  *   the sample_type bits). As an example; Sourab Gupta had been looking to
169  *   attach GPU timestamps to our OA samples. We were shoehorning OA reports
170  *   into sample records by using the 'raw' field, but it's tricky to pack more
171  *   than one thing into this field because events/core.c currently only lets a
172  *   pmu give a single raw data pointer plus len which will be copied into the
173  *   ring buffer. To include more than the OA report we'd have to copy the
174  *   report into an intermediate larger buffer. I'd been considering allowing a
175  *   vector of data+len values to be specified for copying the raw data, but
176  *   it felt like a kludge to being using the raw field for this purpose.
177  *
178  * - It felt like our perf based PMU was making some technical compromises
179  *   just for the sake of using perf:
180  *
181  *   perf_event_open() requires events to either relate to a pid or a specific
182  *   cpu core, while our device pmu related to neither.  Events opened with a
183  *   pid will be automatically enabled/disabled according to the scheduling of
184  *   that process - so not appropriate for us. When an event is related to a
185  *   cpu id, perf ensures pmu methods will be invoked via an inter process
186  *   interrupt on that core. To avoid invasive changes our userspace opened OA
187  *   perf events for a specific cpu. This was workable but it meant the
188  *   majority of the OA driver ran in atomic context, including all OA report
189  *   forwarding, which wasn't really necessary in our case and seems to make
190  *   our locking requirements somewhat complex as we handled the interaction
191  *   with the rest of the i915 driver.
192  */
193 
194 #include <linux/anon_inodes.h>
195 #include <linux/sizes.h>
196 #include <linux/uuid.h>
197 
198 #include "gem/i915_gem_context.h"
199 #include "gem/i915_gem_internal.h"
200 #include "gt/intel_engine_pm.h"
201 #include "gt/intel_engine_regs.h"
202 #include "gt/intel_engine_user.h"
203 #include "gt/intel_execlists_submission.h"
204 #include "gt/intel_gpu_commands.h"
205 #include "gt/intel_gt.h"
206 #include "gt/intel_gt_clock_utils.h"
207 #include "gt/intel_gt_mcr.h"
208 #include "gt/intel_gt_regs.h"
209 #include "gt/intel_lrc.h"
210 #include "gt/intel_lrc_reg.h"
211 #include "gt/intel_ring.h"
212 #include "gt/uc/intel_guc_slpc.h"
213 
214 #include "i915_drv.h"
215 #include "i915_file_private.h"
216 #include "i915_perf.h"
217 #include "i915_perf_oa_regs.h"
218 
219 /* HW requires this to be a power of two, between 128k and 16M, though driver
220  * is currently generally designed assuming the largest 16M size is used such
221  * that the overflow cases are unlikely in normal operation.
222  */
223 #define OA_BUFFER_SIZE		SZ_16M
224 
225 #define OA_TAKEN(tail, head)	((tail - head) & (OA_BUFFER_SIZE - 1))
226 
227 /**
228  * DOC: OA Tail Pointer Race
229  *
230  * There's a HW race condition between OA unit tail pointer register updates and
231  * writes to memory whereby the tail pointer can sometimes get ahead of what's
232  * been written out to the OA buffer so far (in terms of what's visible to the
233  * CPU).
234  *
235  * Although this can be observed explicitly while copying reports to userspace
236  * by checking for a zeroed report-id field in tail reports, we want to account
237  * for this earlier, as part of the oa_buffer_check_unlocked to avoid lots of
238  * redundant read() attempts.
239  *
240  * We workaround this issue in oa_buffer_check_unlocked() by reading the reports
241  * in the OA buffer, starting from the tail reported by the HW until we find a
242  * report with its first 2 dwords not 0 meaning its previous report is
243  * completely in memory and ready to be read. Those dwords are also set to 0
244  * once read and the whole buffer is cleared upon OA buffer initialization. The
245  * first dword is the reason for this report while the second is the timestamp,
246  * making the chances of having those 2 fields at 0 fairly unlikely. A more
247  * detailed explanation is available in oa_buffer_check_unlocked().
248  *
249  * Most of the implementation details for this workaround are in
250  * oa_buffer_check_unlocked() and _append_oa_reports()
251  *
252  * Note for posterity: previously the driver used to define an effective tail
253  * pointer that lagged the real pointer by a 'tail margin' measured in bytes
254  * derived from %OA_TAIL_MARGIN_NSEC and the configured sampling frequency.
255  * This was flawed considering that the OA unit may also automatically generate
256  * non-periodic reports (such as on context switch) or the OA unit may be
257  * enabled without any periodic sampling.
258  */
259 #define OA_TAIL_MARGIN_NSEC	100000ULL
260 #define INVALID_TAIL_PTR	0xffffffff
261 
262 /* The default frequency for checking whether the OA unit has written new
263  * reports to the circular OA buffer...
264  */
265 #define DEFAULT_POLL_FREQUENCY_HZ 200
266 #define DEFAULT_POLL_PERIOD_NS (NSEC_PER_SEC / DEFAULT_POLL_FREQUENCY_HZ)
267 
268 /* for sysctl proc_dointvec_minmax of dev.i915.perf_stream_paranoid */
269 static u32 i915_perf_stream_paranoid = true;
270 
271 /* The maximum exponent the hardware accepts is 63 (essentially it selects one
272  * of the 64bit timestamp bits to trigger reports from) but there's currently
273  * no known use case for sampling as infrequently as once per 47 thousand years.
274  *
275  * Since the timestamps included in OA reports are only 32bits it seems
276  * reasonable to limit the OA exponent where it's still possible to account for
277  * overflow in OA report timestamps.
278  */
279 #define OA_EXPONENT_MAX 31
280 
281 #define INVALID_CTX_ID 0xffffffff
282 
283 /* On Gen8+ automatically triggered OA reports include a 'reason' field... */
284 #define OAREPORT_REASON_MASK           0x3f
285 #define OAREPORT_REASON_MASK_EXTENDED  0x7f
286 #define OAREPORT_REASON_SHIFT          19
287 #define OAREPORT_REASON_TIMER          (1<<0)
288 #define OAREPORT_REASON_CTX_SWITCH     (1<<3)
289 #define OAREPORT_REASON_CLK_RATIO      (1<<5)
290 
291 #define HAS_MI_SET_PREDICATE(i915) (GRAPHICS_VER_FULL(i915) >= IP_VER(12, 50))
292 
293 /* For sysctl proc_dointvec_minmax of i915_oa_max_sample_rate
294  *
295  * The highest sampling frequency we can theoretically program the OA unit
296  * with is always half the timestamp frequency: E.g. 6.25Mhz for Haswell.
297  *
298  * Initialized just before we register the sysctl parameter.
299  */
300 static int oa_sample_rate_hard_limit;
301 
302 /* Theoretically we can program the OA unit to sample every 160ns but don't
303  * allow that by default unless root...
304  *
305  * The default threshold of 100000Hz is based on perf's similar
306  * kernel.perf_event_max_sample_rate sysctl parameter.
307  */
308 static u32 i915_oa_max_sample_rate = 100000;
309 
310 /* XXX: beware if future OA HW adds new report formats that the current
311  * code assumes all reports have a power-of-two size and ~(size - 1) can
312  * be used as a mask to align the OA tail pointer.
313  */
314 static const struct i915_oa_format oa_formats[I915_OA_FORMAT_MAX] = {
315 	[I915_OA_FORMAT_A13]	    = { 0, 64 },
316 	[I915_OA_FORMAT_A29]	    = { 1, 128 },
317 	[I915_OA_FORMAT_A13_B8_C8]  = { 2, 128 },
318 	/* A29_B8_C8 Disallowed as 192 bytes doesn't factor into buffer size */
319 	[I915_OA_FORMAT_B4_C8]	    = { 4, 64 },
320 	[I915_OA_FORMAT_A45_B8_C8]  = { 5, 256 },
321 	[I915_OA_FORMAT_B4_C8_A16]  = { 6, 128 },
322 	[I915_OA_FORMAT_C4_B8]	    = { 7, 64 },
323 	[I915_OA_FORMAT_A12]		    = { 0, 64 },
324 	[I915_OA_FORMAT_A12_B8_C8]	    = { 2, 128 },
325 	[I915_OA_FORMAT_A32u40_A4u32_B8_C8] = { 5, 256 },
326 	[I915_OAR_FORMAT_A32u40_A4u32_B8_C8]    = { 5, 256 },
327 	[I915_OA_FORMAT_A24u40_A14u32_B8_C8]    = { 5, 256 },
328 };
329 
330 #define SAMPLE_OA_REPORT      (1<<0)
331 
332 /**
333  * struct perf_open_properties - for validated properties given to open a stream
334  * @sample_flags: `DRM_I915_PERF_PROP_SAMPLE_*` properties are tracked as flags
335  * @single_context: Whether a single or all gpu contexts should be monitored
336  * @hold_preemption: Whether the preemption is disabled for the filtered
337  *                   context
338  * @ctx_handle: A gem ctx handle for use with @single_context
339  * @metrics_set: An ID for an OA unit metric set advertised via sysfs
340  * @oa_format: An OA unit HW report format
341  * @oa_periodic: Whether to enable periodic OA unit sampling
342  * @oa_period_exponent: The OA unit sampling period is derived from this
343  * @engine: The engine (typically rcs0) being monitored by the OA unit
344  * @has_sseu: Whether @sseu was specified by userspace
345  * @sseu: internal SSEU configuration computed either from the userspace
346  *        specified configuration in the opening parameters or a default value
347  *        (see get_default_sseu_config())
348  * @poll_oa_period: The period in nanoseconds at which the CPU will check for OA
349  * data availability
350  *
351  * As read_properties_unlocked() enumerates and validates the properties given
352  * to open a stream of metrics the configuration is built up in the structure
353  * which starts out zero initialized.
354  */
355 struct perf_open_properties {
356 	u32 sample_flags;
357 
358 	u64 single_context:1;
359 	u64 hold_preemption:1;
360 	u64 ctx_handle;
361 
362 	/* OA sampling state */
363 	int metrics_set;
364 	int oa_format;
365 	bool oa_periodic;
366 	int oa_period_exponent;
367 
368 	struct intel_engine_cs *engine;
369 
370 	bool has_sseu;
371 	struct intel_sseu sseu;
372 
373 	u64 poll_oa_period;
374 };
375 
376 struct i915_oa_config_bo {
377 	struct llist_node node;
378 
379 	struct i915_oa_config *oa_config;
380 	struct i915_vma *vma;
381 };
382 
383 static struct ctl_table_header *sysctl_header;
384 
385 static enum hrtimer_restart oa_poll_check_timer_cb(struct hrtimer *hrtimer);
386 
387 void i915_oa_config_release(struct kref *ref)
388 {
389 	struct i915_oa_config *oa_config =
390 		container_of(ref, typeof(*oa_config), ref);
391 
392 	kfree(oa_config->flex_regs);
393 	kfree(oa_config->b_counter_regs);
394 	kfree(oa_config->mux_regs);
395 
396 	kfree_rcu(oa_config, rcu);
397 }
398 
399 struct i915_oa_config *
400 i915_perf_get_oa_config(struct i915_perf *perf, int metrics_set)
401 {
402 	struct i915_oa_config *oa_config;
403 
404 	rcu_read_lock();
405 	oa_config = idr_find(&perf->metrics_idr, metrics_set);
406 	if (oa_config)
407 		oa_config = i915_oa_config_get(oa_config);
408 	rcu_read_unlock();
409 
410 	return oa_config;
411 }
412 
413 static void free_oa_config_bo(struct i915_oa_config_bo *oa_bo)
414 {
415 	i915_oa_config_put(oa_bo->oa_config);
416 	i915_vma_put(oa_bo->vma);
417 	kfree(oa_bo);
418 }
419 
420 static u32 gen12_oa_hw_tail_read(struct i915_perf_stream *stream)
421 {
422 	struct intel_uncore *uncore = stream->uncore;
423 
424 	return intel_uncore_read(uncore, GEN12_OAG_OATAILPTR) &
425 	       GEN12_OAG_OATAILPTR_MASK;
426 }
427 
428 static u32 gen8_oa_hw_tail_read(struct i915_perf_stream *stream)
429 {
430 	struct intel_uncore *uncore = stream->uncore;
431 
432 	return intel_uncore_read(uncore, GEN8_OATAILPTR) & GEN8_OATAILPTR_MASK;
433 }
434 
435 static u32 gen7_oa_hw_tail_read(struct i915_perf_stream *stream)
436 {
437 	struct intel_uncore *uncore = stream->uncore;
438 	u32 oastatus1 = intel_uncore_read(uncore, GEN7_OASTATUS1);
439 
440 	return oastatus1 & GEN7_OASTATUS1_TAIL_MASK;
441 }
442 
443 /**
444  * oa_buffer_check_unlocked - check for data and update tail ptr state
445  * @stream: i915 stream instance
446  *
447  * This is either called via fops (for blocking reads in user ctx) or the poll
448  * check hrtimer (atomic ctx) to check the OA buffer tail pointer and check
449  * if there is data available for userspace to read.
450  *
451  * This function is central to providing a workaround for the OA unit tail
452  * pointer having a race with respect to what data is visible to the CPU.
453  * It is responsible for reading tail pointers from the hardware and giving
454  * the pointers time to 'age' before they are made available for reading.
455  * (See description of OA_TAIL_MARGIN_NSEC above for further details.)
456  *
457  * Besides returning true when there is data available to read() this function
458  * also updates the tail, aging_tail and aging_timestamp in the oa_buffer
459  * object.
460  *
461  * Note: It's safe to read OA config state here unlocked, assuming that this is
462  * only called while the stream is enabled, while the global OA configuration
463  * can't be modified.
464  *
465  * Returns: %true if the OA buffer contains data, else %false
466  */
467 static bool oa_buffer_check_unlocked(struct i915_perf_stream *stream)
468 {
469 	u32 gtt_offset = i915_ggtt_offset(stream->oa_buffer.vma);
470 	int report_size = stream->oa_buffer.format->size;
471 	unsigned long flags;
472 	bool pollin;
473 	u32 hw_tail;
474 	u64 now;
475 
476 	/* We have to consider the (unlikely) possibility that read() errors
477 	 * could result in an OA buffer reset which might reset the head and
478 	 * tail state.
479 	 */
480 	spin_lock_irqsave(&stream->oa_buffer.ptr_lock, flags);
481 
482 	hw_tail = stream->perf->ops.oa_hw_tail_read(stream);
483 
484 	/* The tail pointer increases in 64 byte increments,
485 	 * not in report_size steps...
486 	 */
487 	hw_tail &= ~(report_size - 1);
488 
489 	now = ktime_get_mono_fast_ns();
490 
491 	if (hw_tail == stream->oa_buffer.aging_tail &&
492 	    (now - stream->oa_buffer.aging_timestamp) > OA_TAIL_MARGIN_NSEC) {
493 		/* If the HW tail hasn't move since the last check and the HW
494 		 * tail has been aging for long enough, declare it the new
495 		 * tail.
496 		 */
497 		stream->oa_buffer.tail = stream->oa_buffer.aging_tail;
498 	} else {
499 		u32 head, tail, aged_tail;
500 
501 		/* NB: The head we observe here might effectively be a little
502 		 * out of date. If a read() is in progress, the head could be
503 		 * anywhere between this head and stream->oa_buffer.tail.
504 		 */
505 		head = stream->oa_buffer.head - gtt_offset;
506 		aged_tail = stream->oa_buffer.tail - gtt_offset;
507 
508 		hw_tail -= gtt_offset;
509 		tail = hw_tail;
510 
511 		/* Walk the stream backward until we find a report with dword 0
512 		 * & 1 not at 0. Since the circular buffer pointers progress by
513 		 * increments of 64 bytes and that reports can be up to 256
514 		 * bytes long, we can't tell whether a report has fully landed
515 		 * in memory before the first 2 dwords of the following report
516 		 * have effectively landed.
517 		 *
518 		 * This is assuming that the writes of the OA unit land in
519 		 * memory in the order they were written to.
520 		 * If not : (╯°□°)╯︵ ┻━┻
521 		 */
522 		while (OA_TAKEN(tail, aged_tail) >= report_size) {
523 			u32 *report32 = (void *)(stream->oa_buffer.vaddr + tail);
524 
525 			if (report32[0] != 0 || report32[1] != 0)
526 				break;
527 
528 			tail = (tail - report_size) & (OA_BUFFER_SIZE - 1);
529 		}
530 
531 		if (OA_TAKEN(hw_tail, tail) > report_size &&
532 		    __ratelimit(&stream->perf->tail_pointer_race))
533 			drm_notice(&stream->uncore->i915->drm,
534 				   "unlanded report(s) head=0x%x tail=0x%x hw_tail=0x%x\n",
535 				   head, tail, hw_tail);
536 
537 		stream->oa_buffer.tail = gtt_offset + tail;
538 		stream->oa_buffer.aging_tail = gtt_offset + hw_tail;
539 		stream->oa_buffer.aging_timestamp = now;
540 	}
541 
542 	pollin = OA_TAKEN(stream->oa_buffer.tail - gtt_offset,
543 			  stream->oa_buffer.head - gtt_offset) >= report_size;
544 
545 	spin_unlock_irqrestore(&stream->oa_buffer.ptr_lock, flags);
546 
547 	return pollin;
548 }
549 
550 /**
551  * append_oa_status - Appends a status record to a userspace read() buffer.
552  * @stream: An i915-perf stream opened for OA metrics
553  * @buf: destination buffer given by userspace
554  * @count: the number of bytes userspace wants to read
555  * @offset: (inout): the current position for writing into @buf
556  * @type: The kind of status to report to userspace
557  *
558  * Writes a status record (such as `DRM_I915_PERF_RECORD_OA_REPORT_LOST`)
559  * into the userspace read() buffer.
560  *
561  * The @buf @offset will only be updated on success.
562  *
563  * Returns: 0 on success, negative error code on failure.
564  */
565 static int append_oa_status(struct i915_perf_stream *stream,
566 			    char __user *buf,
567 			    size_t count,
568 			    size_t *offset,
569 			    enum drm_i915_perf_record_type type)
570 {
571 	struct drm_i915_perf_record_header header = { type, 0, sizeof(header) };
572 
573 	if ((count - *offset) < header.size)
574 		return -ENOSPC;
575 
576 	if (copy_to_user(buf + *offset, &header, sizeof(header)))
577 		return -EFAULT;
578 
579 	(*offset) += header.size;
580 
581 	return 0;
582 }
583 
584 /**
585  * append_oa_sample - Copies single OA report into userspace read() buffer.
586  * @stream: An i915-perf stream opened for OA metrics
587  * @buf: destination buffer given by userspace
588  * @count: the number of bytes userspace wants to read
589  * @offset: (inout): the current position for writing into @buf
590  * @report: A single OA report to (optionally) include as part of the sample
591  *
592  * The contents of a sample are configured through `DRM_I915_PERF_PROP_SAMPLE_*`
593  * properties when opening a stream, tracked as `stream->sample_flags`. This
594  * function copies the requested components of a single sample to the given
595  * read() @buf.
596  *
597  * The @buf @offset will only be updated on success.
598  *
599  * Returns: 0 on success, negative error code on failure.
600  */
601 static int append_oa_sample(struct i915_perf_stream *stream,
602 			    char __user *buf,
603 			    size_t count,
604 			    size_t *offset,
605 			    const u8 *report)
606 {
607 	int report_size = stream->oa_buffer.format->size;
608 	struct drm_i915_perf_record_header header;
609 
610 	header.type = DRM_I915_PERF_RECORD_SAMPLE;
611 	header.pad = 0;
612 	header.size = stream->sample_size;
613 
614 	if ((count - *offset) < header.size)
615 		return -ENOSPC;
616 
617 	buf += *offset;
618 	if (copy_to_user(buf, &header, sizeof(header)))
619 		return -EFAULT;
620 	buf += sizeof(header);
621 
622 	if (copy_to_user(buf, report, report_size))
623 		return -EFAULT;
624 
625 	(*offset) += header.size;
626 
627 	return 0;
628 }
629 
630 /**
631  * gen8_append_oa_reports - Copies all buffered OA reports into
632  *			    userspace read() buffer.
633  * @stream: An i915-perf stream opened for OA metrics
634  * @buf: destination buffer given by userspace
635  * @count: the number of bytes userspace wants to read
636  * @offset: (inout): the current position for writing into @buf
637  *
638  * Notably any error condition resulting in a short read (-%ENOSPC or
639  * -%EFAULT) will be returned even though one or more records may
640  * have been successfully copied. In this case it's up to the caller
641  * to decide if the error should be squashed before returning to
642  * userspace.
643  *
644  * Note: reports are consumed from the head, and appended to the
645  * tail, so the tail chases the head?... If you think that's mad
646  * and back-to-front you're not alone, but this follows the
647  * Gen PRM naming convention.
648  *
649  * Returns: 0 on success, negative error code on failure.
650  */
651 static int gen8_append_oa_reports(struct i915_perf_stream *stream,
652 				  char __user *buf,
653 				  size_t count,
654 				  size_t *offset)
655 {
656 	struct intel_uncore *uncore = stream->uncore;
657 	int report_size = stream->oa_buffer.format->size;
658 	u8 *oa_buf_base = stream->oa_buffer.vaddr;
659 	u32 gtt_offset = i915_ggtt_offset(stream->oa_buffer.vma);
660 	u32 mask = (OA_BUFFER_SIZE - 1);
661 	size_t start_offset = *offset;
662 	unsigned long flags;
663 	u32 head, tail;
664 	int ret = 0;
665 
666 	if (drm_WARN_ON(&uncore->i915->drm, !stream->enabled))
667 		return -EIO;
668 
669 	spin_lock_irqsave(&stream->oa_buffer.ptr_lock, flags);
670 
671 	head = stream->oa_buffer.head;
672 	tail = stream->oa_buffer.tail;
673 
674 	spin_unlock_irqrestore(&stream->oa_buffer.ptr_lock, flags);
675 
676 	/*
677 	 * NB: oa_buffer.head/tail include the gtt_offset which we don't want
678 	 * while indexing relative to oa_buf_base.
679 	 */
680 	head -= gtt_offset;
681 	tail -= gtt_offset;
682 
683 	/*
684 	 * An out of bounds or misaligned head or tail pointer implies a driver
685 	 * bug since we validate + align the tail pointers we read from the
686 	 * hardware and we are in full control of the head pointer which should
687 	 * only be incremented by multiples of the report size (notably also
688 	 * all a power of two).
689 	 */
690 	if (drm_WARN_ONCE(&uncore->i915->drm,
691 			  head > OA_BUFFER_SIZE || head % report_size ||
692 			  tail > OA_BUFFER_SIZE || tail % report_size,
693 			  "Inconsistent OA buffer pointers: head = %u, tail = %u\n",
694 			  head, tail))
695 		return -EIO;
696 
697 
698 	for (/* none */;
699 	     OA_TAKEN(tail, head);
700 	     head = (head + report_size) & mask) {
701 		u8 *report = oa_buf_base + head;
702 		u32 *report32 = (void *)report;
703 		u32 ctx_id;
704 		u32 reason;
705 
706 		/*
707 		 * All the report sizes factor neatly into the buffer
708 		 * size so we never expect to see a report split
709 		 * between the beginning and end of the buffer.
710 		 *
711 		 * Given the initial alignment check a misalignment
712 		 * here would imply a driver bug that would result
713 		 * in an overrun.
714 		 */
715 		if (drm_WARN_ON(&uncore->i915->drm,
716 				(OA_BUFFER_SIZE - head) < report_size)) {
717 			drm_err(&uncore->i915->drm,
718 				"Spurious OA head ptr: non-integral report offset\n");
719 			break;
720 		}
721 
722 		/*
723 		 * The reason field includes flags identifying what
724 		 * triggered this specific report (mostly timer
725 		 * triggered or e.g. due to a context switch).
726 		 *
727 		 * This field is never expected to be zero so we can
728 		 * check that the report isn't invalid before copying
729 		 * it to userspace...
730 		 */
731 		reason = ((report32[0] >> OAREPORT_REASON_SHIFT) &
732 			  (GRAPHICS_VER(stream->perf->i915) == 12 ?
733 			   OAREPORT_REASON_MASK_EXTENDED :
734 			   OAREPORT_REASON_MASK));
735 
736 		ctx_id = report32[2] & stream->specific_ctx_id_mask;
737 
738 		/*
739 		 * Squash whatever is in the CTX_ID field if it's marked as
740 		 * invalid to be sure we avoid false-positive, single-context
741 		 * filtering below...
742 		 *
743 		 * Note: that we don't clear the valid_ctx_bit so userspace can
744 		 * understand that the ID has been squashed by the kernel.
745 		 */
746 		if (!(report32[0] & stream->perf->gen8_valid_ctx_bit) &&
747 		    GRAPHICS_VER(stream->perf->i915) <= 11)
748 			ctx_id = report32[2] = INVALID_CTX_ID;
749 
750 		/*
751 		 * NB: For Gen 8 the OA unit no longer supports clock gating
752 		 * off for a specific context and the kernel can't securely
753 		 * stop the counters from updating as system-wide / global
754 		 * values.
755 		 *
756 		 * Automatic reports now include a context ID so reports can be
757 		 * filtered on the cpu but it's not worth trying to
758 		 * automatically subtract/hide counter progress for other
759 		 * contexts while filtering since we can't stop userspace
760 		 * issuing MI_REPORT_PERF_COUNT commands which would still
761 		 * provide a side-band view of the real values.
762 		 *
763 		 * To allow userspace (such as Mesa/GL_INTEL_performance_query)
764 		 * to normalize counters for a single filtered context then it
765 		 * needs be forwarded bookend context-switch reports so that it
766 		 * can track switches in between MI_REPORT_PERF_COUNT commands
767 		 * and can itself subtract/ignore the progress of counters
768 		 * associated with other contexts. Note that the hardware
769 		 * automatically triggers reports when switching to a new
770 		 * context which are tagged with the ID of the newly active
771 		 * context. To avoid the complexity (and likely fragility) of
772 		 * reading ahead while parsing reports to try and minimize
773 		 * forwarding redundant context switch reports (i.e. between
774 		 * other, unrelated contexts) we simply elect to forward them
775 		 * all.
776 		 *
777 		 * We don't rely solely on the reason field to identify context
778 		 * switches since it's not-uncommon for periodic samples to
779 		 * identify a switch before any 'context switch' report.
780 		 */
781 		if (!stream->ctx ||
782 		    stream->specific_ctx_id == ctx_id ||
783 		    stream->oa_buffer.last_ctx_id == stream->specific_ctx_id ||
784 		    reason & OAREPORT_REASON_CTX_SWITCH) {
785 
786 			/*
787 			 * While filtering for a single context we avoid
788 			 * leaking the IDs of other contexts.
789 			 */
790 			if (stream->ctx &&
791 			    stream->specific_ctx_id != ctx_id) {
792 				report32[2] = INVALID_CTX_ID;
793 			}
794 
795 			ret = append_oa_sample(stream, buf, count, offset,
796 					       report);
797 			if (ret)
798 				break;
799 
800 			stream->oa_buffer.last_ctx_id = ctx_id;
801 		}
802 
803 		/*
804 		 * Clear out the first 2 dword as a mean to detect unlanded
805 		 * reports.
806 		 */
807 		report32[0] = 0;
808 		report32[1] = 0;
809 	}
810 
811 	if (start_offset != *offset) {
812 		i915_reg_t oaheadptr;
813 
814 		oaheadptr = GRAPHICS_VER(stream->perf->i915) == 12 ?
815 			    GEN12_OAG_OAHEADPTR : GEN8_OAHEADPTR;
816 
817 		spin_lock_irqsave(&stream->oa_buffer.ptr_lock, flags);
818 
819 		/*
820 		 * We removed the gtt_offset for the copy loop above, indexing
821 		 * relative to oa_buf_base so put back here...
822 		 */
823 		head += gtt_offset;
824 		intel_uncore_write(uncore, oaheadptr,
825 				   head & GEN12_OAG_OAHEADPTR_MASK);
826 		stream->oa_buffer.head = head;
827 
828 		spin_unlock_irqrestore(&stream->oa_buffer.ptr_lock, flags);
829 	}
830 
831 	return ret;
832 }
833 
834 /**
835  * gen8_oa_read - copy status records then buffered OA reports
836  * @stream: An i915-perf stream opened for OA metrics
837  * @buf: destination buffer given by userspace
838  * @count: the number of bytes userspace wants to read
839  * @offset: (inout): the current position for writing into @buf
840  *
841  * Checks OA unit status registers and if necessary appends corresponding
842  * status records for userspace (such as for a buffer full condition) and then
843  * initiate appending any buffered OA reports.
844  *
845  * Updates @offset according to the number of bytes successfully copied into
846  * the userspace buffer.
847  *
848  * NB: some data may be successfully copied to the userspace buffer
849  * even if an error is returned, and this is reflected in the
850  * updated @offset.
851  *
852  * Returns: zero on success or a negative error code
853  */
854 static int gen8_oa_read(struct i915_perf_stream *stream,
855 			char __user *buf,
856 			size_t count,
857 			size_t *offset)
858 {
859 	struct intel_uncore *uncore = stream->uncore;
860 	u32 oastatus;
861 	i915_reg_t oastatus_reg;
862 	int ret;
863 
864 	if (drm_WARN_ON(&uncore->i915->drm, !stream->oa_buffer.vaddr))
865 		return -EIO;
866 
867 	oastatus_reg = GRAPHICS_VER(stream->perf->i915) == 12 ?
868 		       GEN12_OAG_OASTATUS : GEN8_OASTATUS;
869 
870 	oastatus = intel_uncore_read(uncore, oastatus_reg);
871 
872 	/*
873 	 * We treat OABUFFER_OVERFLOW as a significant error:
874 	 *
875 	 * Although theoretically we could handle this more gracefully
876 	 * sometimes, some Gens don't correctly suppress certain
877 	 * automatically triggered reports in this condition and so we
878 	 * have to assume that old reports are now being trampled
879 	 * over.
880 	 *
881 	 * Considering how we don't currently give userspace control
882 	 * over the OA buffer size and always configure a large 16MB
883 	 * buffer, then a buffer overflow does anyway likely indicate
884 	 * that something has gone quite badly wrong.
885 	 */
886 	if (oastatus & GEN8_OASTATUS_OABUFFER_OVERFLOW) {
887 		ret = append_oa_status(stream, buf, count, offset,
888 				       DRM_I915_PERF_RECORD_OA_BUFFER_LOST);
889 		if (ret)
890 			return ret;
891 
892 		drm_dbg(&stream->perf->i915->drm,
893 			"OA buffer overflow (exponent = %d): force restart\n",
894 			stream->period_exponent);
895 
896 		stream->perf->ops.oa_disable(stream);
897 		stream->perf->ops.oa_enable(stream);
898 
899 		/*
900 		 * Note: .oa_enable() is expected to re-init the oabuffer and
901 		 * reset GEN8_OASTATUS for us
902 		 */
903 		oastatus = intel_uncore_read(uncore, oastatus_reg);
904 	}
905 
906 	if (oastatus & GEN8_OASTATUS_REPORT_LOST) {
907 		ret = append_oa_status(stream, buf, count, offset,
908 				       DRM_I915_PERF_RECORD_OA_REPORT_LOST);
909 		if (ret)
910 			return ret;
911 
912 		intel_uncore_rmw(uncore, oastatus_reg,
913 				 GEN8_OASTATUS_COUNTER_OVERFLOW |
914 				 GEN8_OASTATUS_REPORT_LOST,
915 				 IS_GRAPHICS_VER(uncore->i915, 8, 11) ?
916 				 (GEN8_OASTATUS_HEAD_POINTER_WRAP |
917 				  GEN8_OASTATUS_TAIL_POINTER_WRAP) : 0);
918 	}
919 
920 	return gen8_append_oa_reports(stream, buf, count, offset);
921 }
922 
923 /**
924  * gen7_append_oa_reports - Copies all buffered OA reports into
925  *			    userspace read() buffer.
926  * @stream: An i915-perf stream opened for OA metrics
927  * @buf: destination buffer given by userspace
928  * @count: the number of bytes userspace wants to read
929  * @offset: (inout): the current position for writing into @buf
930  *
931  * Notably any error condition resulting in a short read (-%ENOSPC or
932  * -%EFAULT) will be returned even though one or more records may
933  * have been successfully copied. In this case it's up to the caller
934  * to decide if the error should be squashed before returning to
935  * userspace.
936  *
937  * Note: reports are consumed from the head, and appended to the
938  * tail, so the tail chases the head?... If you think that's mad
939  * and back-to-front you're not alone, but this follows the
940  * Gen PRM naming convention.
941  *
942  * Returns: 0 on success, negative error code on failure.
943  */
944 static int gen7_append_oa_reports(struct i915_perf_stream *stream,
945 				  char __user *buf,
946 				  size_t count,
947 				  size_t *offset)
948 {
949 	struct intel_uncore *uncore = stream->uncore;
950 	int report_size = stream->oa_buffer.format->size;
951 	u8 *oa_buf_base = stream->oa_buffer.vaddr;
952 	u32 gtt_offset = i915_ggtt_offset(stream->oa_buffer.vma);
953 	u32 mask = (OA_BUFFER_SIZE - 1);
954 	size_t start_offset = *offset;
955 	unsigned long flags;
956 	u32 head, tail;
957 	int ret = 0;
958 
959 	if (drm_WARN_ON(&uncore->i915->drm, !stream->enabled))
960 		return -EIO;
961 
962 	spin_lock_irqsave(&stream->oa_buffer.ptr_lock, flags);
963 
964 	head = stream->oa_buffer.head;
965 	tail = stream->oa_buffer.tail;
966 
967 	spin_unlock_irqrestore(&stream->oa_buffer.ptr_lock, flags);
968 
969 	/* NB: oa_buffer.head/tail include the gtt_offset which we don't want
970 	 * while indexing relative to oa_buf_base.
971 	 */
972 	head -= gtt_offset;
973 	tail -= gtt_offset;
974 
975 	/* An out of bounds or misaligned head or tail pointer implies a driver
976 	 * bug since we validate + align the tail pointers we read from the
977 	 * hardware and we are in full control of the head pointer which should
978 	 * only be incremented by multiples of the report size (notably also
979 	 * all a power of two).
980 	 */
981 	if (drm_WARN_ONCE(&uncore->i915->drm,
982 			  head > OA_BUFFER_SIZE || head % report_size ||
983 			  tail > OA_BUFFER_SIZE || tail % report_size,
984 			  "Inconsistent OA buffer pointers: head = %u, tail = %u\n",
985 			  head, tail))
986 		return -EIO;
987 
988 
989 	for (/* none */;
990 	     OA_TAKEN(tail, head);
991 	     head = (head + report_size) & mask) {
992 		u8 *report = oa_buf_base + head;
993 		u32 *report32 = (void *)report;
994 
995 		/* All the report sizes factor neatly into the buffer
996 		 * size so we never expect to see a report split
997 		 * between the beginning and end of the buffer.
998 		 *
999 		 * Given the initial alignment check a misalignment
1000 		 * here would imply a driver bug that would result
1001 		 * in an overrun.
1002 		 */
1003 		if (drm_WARN_ON(&uncore->i915->drm,
1004 				(OA_BUFFER_SIZE - head) < report_size)) {
1005 			drm_err(&uncore->i915->drm,
1006 				"Spurious OA head ptr: non-integral report offset\n");
1007 			break;
1008 		}
1009 
1010 		/* The report-ID field for periodic samples includes
1011 		 * some undocumented flags related to what triggered
1012 		 * the report and is never expected to be zero so we
1013 		 * can check that the report isn't invalid before
1014 		 * copying it to userspace...
1015 		 */
1016 		if (report32[0] == 0) {
1017 			if (__ratelimit(&stream->perf->spurious_report_rs))
1018 				drm_notice(&uncore->i915->drm,
1019 					   "Skipping spurious, invalid OA report\n");
1020 			continue;
1021 		}
1022 
1023 		ret = append_oa_sample(stream, buf, count, offset, report);
1024 		if (ret)
1025 			break;
1026 
1027 		/* Clear out the first 2 dwords as a mean to detect unlanded
1028 		 * reports.
1029 		 */
1030 		report32[0] = 0;
1031 		report32[1] = 0;
1032 	}
1033 
1034 	if (start_offset != *offset) {
1035 		spin_lock_irqsave(&stream->oa_buffer.ptr_lock, flags);
1036 
1037 		/* We removed the gtt_offset for the copy loop above, indexing
1038 		 * relative to oa_buf_base so put back here...
1039 		 */
1040 		head += gtt_offset;
1041 
1042 		intel_uncore_write(uncore, GEN7_OASTATUS2,
1043 				   (head & GEN7_OASTATUS2_HEAD_MASK) |
1044 				   GEN7_OASTATUS2_MEM_SELECT_GGTT);
1045 		stream->oa_buffer.head = head;
1046 
1047 		spin_unlock_irqrestore(&stream->oa_buffer.ptr_lock, flags);
1048 	}
1049 
1050 	return ret;
1051 }
1052 
1053 /**
1054  * gen7_oa_read - copy status records then buffered OA reports
1055  * @stream: An i915-perf stream opened for OA metrics
1056  * @buf: destination buffer given by userspace
1057  * @count: the number of bytes userspace wants to read
1058  * @offset: (inout): the current position for writing into @buf
1059  *
1060  * Checks Gen 7 specific OA unit status registers and if necessary appends
1061  * corresponding status records for userspace (such as for a buffer full
1062  * condition) and then initiate appending any buffered OA reports.
1063  *
1064  * Updates @offset according to the number of bytes successfully copied into
1065  * the userspace buffer.
1066  *
1067  * Returns: zero on success or a negative error code
1068  */
1069 static int gen7_oa_read(struct i915_perf_stream *stream,
1070 			char __user *buf,
1071 			size_t count,
1072 			size_t *offset)
1073 {
1074 	struct intel_uncore *uncore = stream->uncore;
1075 	u32 oastatus1;
1076 	int ret;
1077 
1078 	if (drm_WARN_ON(&uncore->i915->drm, !stream->oa_buffer.vaddr))
1079 		return -EIO;
1080 
1081 	oastatus1 = intel_uncore_read(uncore, GEN7_OASTATUS1);
1082 
1083 	/* XXX: On Haswell we don't have a safe way to clear oastatus1
1084 	 * bits while the OA unit is enabled (while the tail pointer
1085 	 * may be updated asynchronously) so we ignore status bits
1086 	 * that have already been reported to userspace.
1087 	 */
1088 	oastatus1 &= ~stream->perf->gen7_latched_oastatus1;
1089 
1090 	/* We treat OABUFFER_OVERFLOW as a significant error:
1091 	 *
1092 	 * - The status can be interpreted to mean that the buffer is
1093 	 *   currently full (with a higher precedence than OA_TAKEN()
1094 	 *   which will start to report a near-empty buffer after an
1095 	 *   overflow) but it's awkward that we can't clear the status
1096 	 *   on Haswell, so without a reset we won't be able to catch
1097 	 *   the state again.
1098 	 *
1099 	 * - Since it also implies the HW has started overwriting old
1100 	 *   reports it may also affect our sanity checks for invalid
1101 	 *   reports when copying to userspace that assume new reports
1102 	 *   are being written to cleared memory.
1103 	 *
1104 	 * - In the future we may want to introduce a flight recorder
1105 	 *   mode where the driver will automatically maintain a safe
1106 	 *   guard band between head/tail, avoiding this overflow
1107 	 *   condition, but we avoid the added driver complexity for
1108 	 *   now.
1109 	 */
1110 	if (unlikely(oastatus1 & GEN7_OASTATUS1_OABUFFER_OVERFLOW)) {
1111 		ret = append_oa_status(stream, buf, count, offset,
1112 				       DRM_I915_PERF_RECORD_OA_BUFFER_LOST);
1113 		if (ret)
1114 			return ret;
1115 
1116 		drm_dbg(&stream->perf->i915->drm,
1117 			"OA buffer overflow (exponent = %d): force restart\n",
1118 			stream->period_exponent);
1119 
1120 		stream->perf->ops.oa_disable(stream);
1121 		stream->perf->ops.oa_enable(stream);
1122 
1123 		oastatus1 = intel_uncore_read(uncore, GEN7_OASTATUS1);
1124 	}
1125 
1126 	if (unlikely(oastatus1 & GEN7_OASTATUS1_REPORT_LOST)) {
1127 		ret = append_oa_status(stream, buf, count, offset,
1128 				       DRM_I915_PERF_RECORD_OA_REPORT_LOST);
1129 		if (ret)
1130 			return ret;
1131 		stream->perf->gen7_latched_oastatus1 |=
1132 			GEN7_OASTATUS1_REPORT_LOST;
1133 	}
1134 
1135 	return gen7_append_oa_reports(stream, buf, count, offset);
1136 }
1137 
1138 /**
1139  * i915_oa_wait_unlocked - handles blocking IO until OA data available
1140  * @stream: An i915-perf stream opened for OA metrics
1141  *
1142  * Called when userspace tries to read() from a blocking stream FD opened
1143  * for OA metrics. It waits until the hrtimer callback finds a non-empty
1144  * OA buffer and wakes us.
1145  *
1146  * Note: it's acceptable to have this return with some false positives
1147  * since any subsequent read handling will return -EAGAIN if there isn't
1148  * really data ready for userspace yet.
1149  *
1150  * Returns: zero on success or a negative error code
1151  */
1152 static int i915_oa_wait_unlocked(struct i915_perf_stream *stream)
1153 {
1154 	/* We would wait indefinitely if periodic sampling is not enabled */
1155 	if (!stream->periodic)
1156 		return -EIO;
1157 
1158 	return wait_event_interruptible(stream->poll_wq,
1159 					oa_buffer_check_unlocked(stream));
1160 }
1161 
1162 /**
1163  * i915_oa_poll_wait - call poll_wait() for an OA stream poll()
1164  * @stream: An i915-perf stream opened for OA metrics
1165  * @file: An i915 perf stream file
1166  * @wait: poll() state table
1167  *
1168  * For handling userspace polling on an i915 perf stream opened for OA metrics,
1169  * this starts a poll_wait with the wait queue that our hrtimer callback wakes
1170  * when it sees data ready to read in the circular OA buffer.
1171  */
1172 static void i915_oa_poll_wait(struct i915_perf_stream *stream,
1173 			      struct file *file,
1174 			      poll_table *wait)
1175 {
1176 	poll_wait(file, &stream->poll_wq, wait);
1177 }
1178 
1179 /**
1180  * i915_oa_read - just calls through to &i915_oa_ops->read
1181  * @stream: An i915-perf stream opened for OA metrics
1182  * @buf: destination buffer given by userspace
1183  * @count: the number of bytes userspace wants to read
1184  * @offset: (inout): the current position for writing into @buf
1185  *
1186  * Updates @offset according to the number of bytes successfully copied into
1187  * the userspace buffer.
1188  *
1189  * Returns: zero on success or a negative error code
1190  */
1191 static int i915_oa_read(struct i915_perf_stream *stream,
1192 			char __user *buf,
1193 			size_t count,
1194 			size_t *offset)
1195 {
1196 	return stream->perf->ops.read(stream, buf, count, offset);
1197 }
1198 
1199 static struct intel_context *oa_pin_context(struct i915_perf_stream *stream)
1200 {
1201 	struct i915_gem_engines_iter it;
1202 	struct i915_gem_context *ctx = stream->ctx;
1203 	struct intel_context *ce;
1204 	struct i915_gem_ww_ctx ww;
1205 	int err = -ENODEV;
1206 
1207 	for_each_gem_engine(ce, i915_gem_context_lock_engines(ctx), it) {
1208 		if (ce->engine != stream->engine) /* first match! */
1209 			continue;
1210 
1211 		err = 0;
1212 		break;
1213 	}
1214 	i915_gem_context_unlock_engines(ctx);
1215 
1216 	if (err)
1217 		return ERR_PTR(err);
1218 
1219 	i915_gem_ww_ctx_init(&ww, true);
1220 retry:
1221 	/*
1222 	 * As the ID is the gtt offset of the context's vma we
1223 	 * pin the vma to ensure the ID remains fixed.
1224 	 */
1225 	err = intel_context_pin_ww(ce, &ww);
1226 	if (err == -EDEADLK) {
1227 		err = i915_gem_ww_ctx_backoff(&ww);
1228 		if (!err)
1229 			goto retry;
1230 	}
1231 	i915_gem_ww_ctx_fini(&ww);
1232 
1233 	if (err)
1234 		return ERR_PTR(err);
1235 
1236 	stream->pinned_ctx = ce;
1237 	return stream->pinned_ctx;
1238 }
1239 
1240 static int
1241 __store_reg_to_mem(struct i915_request *rq, i915_reg_t reg, u32 ggtt_offset)
1242 {
1243 	u32 *cs, cmd;
1244 
1245 	cmd = MI_STORE_REGISTER_MEM | MI_SRM_LRM_GLOBAL_GTT;
1246 	if (GRAPHICS_VER(rq->engine->i915) >= 8)
1247 		cmd++;
1248 
1249 	cs = intel_ring_begin(rq, 4);
1250 	if (IS_ERR(cs))
1251 		return PTR_ERR(cs);
1252 
1253 	*cs++ = cmd;
1254 	*cs++ = i915_mmio_reg_offset(reg);
1255 	*cs++ = ggtt_offset;
1256 	*cs++ = 0;
1257 
1258 	intel_ring_advance(rq, cs);
1259 
1260 	return 0;
1261 }
1262 
1263 static int
1264 __read_reg(struct intel_context *ce, i915_reg_t reg, u32 ggtt_offset)
1265 {
1266 	struct i915_request *rq;
1267 	int err;
1268 
1269 	rq = i915_request_create(ce);
1270 	if (IS_ERR(rq))
1271 		return PTR_ERR(rq);
1272 
1273 	i915_request_get(rq);
1274 
1275 	err = __store_reg_to_mem(rq, reg, ggtt_offset);
1276 
1277 	i915_request_add(rq);
1278 	if (!err && i915_request_wait(rq, 0, HZ / 2) < 0)
1279 		err = -ETIME;
1280 
1281 	i915_request_put(rq);
1282 
1283 	return err;
1284 }
1285 
1286 static int
1287 gen12_guc_sw_ctx_id(struct intel_context *ce, u32 *ctx_id)
1288 {
1289 	struct i915_vma *scratch;
1290 	u32 *val;
1291 	int err;
1292 
1293 	scratch = __vm_create_scratch_for_read_pinned(&ce->engine->gt->ggtt->vm, 4);
1294 	if (IS_ERR(scratch))
1295 		return PTR_ERR(scratch);
1296 
1297 	err = i915_vma_sync(scratch);
1298 	if (err)
1299 		goto err_scratch;
1300 
1301 	err = __read_reg(ce, RING_EXECLIST_STATUS_HI(ce->engine->mmio_base),
1302 			 i915_ggtt_offset(scratch));
1303 	if (err)
1304 		goto err_scratch;
1305 
1306 	val = i915_gem_object_pin_map_unlocked(scratch->obj, I915_MAP_WB);
1307 	if (IS_ERR(val)) {
1308 		err = PTR_ERR(val);
1309 		goto err_scratch;
1310 	}
1311 
1312 	*ctx_id = *val;
1313 	i915_gem_object_unpin_map(scratch->obj);
1314 
1315 err_scratch:
1316 	i915_vma_unpin_and_release(&scratch, 0);
1317 	return err;
1318 }
1319 
1320 /*
1321  * For execlist mode of submission, pick an unused context id
1322  * 0 - (NUM_CONTEXT_TAG -1) are used by other contexts
1323  * XXX_MAX_CONTEXT_HW_ID is used by idle context
1324  *
1325  * For GuC mode of submission read context id from the upper dword of the
1326  * EXECLIST_STATUS register. Note that we read this value only once and expect
1327  * that the value stays fixed for the entire OA use case. There are cases where
1328  * GuC KMD implementation may deregister a context to reuse it's context id, but
1329  * we prevent that from happening to the OA context by pinning it.
1330  */
1331 static int gen12_get_render_context_id(struct i915_perf_stream *stream)
1332 {
1333 	u32 ctx_id, mask;
1334 	int ret;
1335 
1336 	if (intel_engine_uses_guc(stream->engine)) {
1337 		ret = gen12_guc_sw_ctx_id(stream->pinned_ctx, &ctx_id);
1338 		if (ret)
1339 			return ret;
1340 
1341 		mask = ((1U << GEN12_GUC_SW_CTX_ID_WIDTH) - 1) <<
1342 			(GEN12_GUC_SW_CTX_ID_SHIFT - 32);
1343 	} else if (GRAPHICS_VER_FULL(stream->engine->i915) >= IP_VER(12, 50)) {
1344 		ctx_id = (XEHP_MAX_CONTEXT_HW_ID - 1) <<
1345 			(XEHP_SW_CTX_ID_SHIFT - 32);
1346 
1347 		mask = ((1U << XEHP_SW_CTX_ID_WIDTH) - 1) <<
1348 			(XEHP_SW_CTX_ID_SHIFT - 32);
1349 	} else {
1350 		ctx_id = (GEN12_MAX_CONTEXT_HW_ID - 1) <<
1351 			 (GEN11_SW_CTX_ID_SHIFT - 32);
1352 
1353 		mask = ((1U << GEN11_SW_CTX_ID_WIDTH) - 1) <<
1354 			(GEN11_SW_CTX_ID_SHIFT - 32);
1355 	}
1356 	stream->specific_ctx_id = ctx_id & mask;
1357 	stream->specific_ctx_id_mask = mask;
1358 
1359 	return 0;
1360 }
1361 
1362 static bool oa_find_reg_in_lri(u32 *state, u32 reg, u32 *offset, u32 end)
1363 {
1364 	u32 idx = *offset;
1365 	u32 len = min(MI_LRI_LEN(state[idx]) + idx, end);
1366 	bool found = false;
1367 
1368 	idx++;
1369 	for (; idx < len; idx += 2) {
1370 		if (state[idx] == reg) {
1371 			found = true;
1372 			break;
1373 		}
1374 	}
1375 
1376 	*offset = idx;
1377 	return found;
1378 }
1379 
1380 static u32 oa_context_image_offset(struct intel_context *ce, u32 reg)
1381 {
1382 	u32 offset, len = (ce->engine->context_size - PAGE_SIZE) / 4;
1383 	u32 *state = ce->lrc_reg_state;
1384 
1385 	if (drm_WARN_ON(&ce->engine->i915->drm, !state))
1386 		return U32_MAX;
1387 
1388 	for (offset = 0; offset < len; ) {
1389 		if (IS_MI_LRI_CMD(state[offset])) {
1390 			/*
1391 			 * We expect reg-value pairs in MI_LRI command, so
1392 			 * MI_LRI_LEN() should be even, if not, issue a warning.
1393 			 */
1394 			drm_WARN_ON(&ce->engine->i915->drm,
1395 				    MI_LRI_LEN(state[offset]) & 0x1);
1396 
1397 			if (oa_find_reg_in_lri(state, reg, &offset, len))
1398 				break;
1399 		} else {
1400 			offset++;
1401 		}
1402 	}
1403 
1404 	return offset < len ? offset : U32_MAX;
1405 }
1406 
1407 static int set_oa_ctx_ctrl_offset(struct intel_context *ce)
1408 {
1409 	i915_reg_t reg = GEN12_OACTXCONTROL(ce->engine->mmio_base);
1410 	struct i915_perf *perf = &ce->engine->i915->perf;
1411 	u32 offset = perf->ctx_oactxctrl_offset;
1412 
1413 	/* Do this only once. Failure is stored as offset of U32_MAX */
1414 	if (offset)
1415 		goto exit;
1416 
1417 	offset = oa_context_image_offset(ce, i915_mmio_reg_offset(reg));
1418 	perf->ctx_oactxctrl_offset = offset;
1419 
1420 	drm_dbg(&ce->engine->i915->drm,
1421 		"%s oa ctx control at 0x%08x dword offset\n",
1422 		ce->engine->name, offset);
1423 
1424 exit:
1425 	return offset && offset != U32_MAX ? 0 : -ENODEV;
1426 }
1427 
1428 static bool engine_supports_mi_query(struct intel_engine_cs *engine)
1429 {
1430 	return engine->class == RENDER_CLASS;
1431 }
1432 
1433 /**
1434  * oa_get_render_ctx_id - determine and hold ctx hw id
1435  * @stream: An i915-perf stream opened for OA metrics
1436  *
1437  * Determine the render context hw id, and ensure it remains fixed for the
1438  * lifetime of the stream. This ensures that we don't have to worry about
1439  * updating the context ID in OACONTROL on the fly.
1440  *
1441  * Returns: zero on success or a negative error code
1442  */
1443 static int oa_get_render_ctx_id(struct i915_perf_stream *stream)
1444 {
1445 	struct intel_context *ce;
1446 	int ret = 0;
1447 
1448 	ce = oa_pin_context(stream);
1449 	if (IS_ERR(ce))
1450 		return PTR_ERR(ce);
1451 
1452 	if (engine_supports_mi_query(stream->engine) &&
1453 	    HAS_LOGICAL_RING_CONTEXTS(stream->perf->i915)) {
1454 		/*
1455 		 * We are enabling perf query here. If we don't find the context
1456 		 * offset here, just return an error.
1457 		 */
1458 		ret = set_oa_ctx_ctrl_offset(ce);
1459 		if (ret) {
1460 			intel_context_unpin(ce);
1461 			drm_err(&stream->perf->i915->drm,
1462 				"Enabling perf query failed for %s\n",
1463 				stream->engine->name);
1464 			return ret;
1465 		}
1466 	}
1467 
1468 	switch (GRAPHICS_VER(ce->engine->i915)) {
1469 	case 7: {
1470 		/*
1471 		 * On Haswell we don't do any post processing of the reports
1472 		 * and don't need to use the mask.
1473 		 */
1474 		stream->specific_ctx_id = i915_ggtt_offset(ce->state);
1475 		stream->specific_ctx_id_mask = 0;
1476 		break;
1477 	}
1478 
1479 	case 8:
1480 	case 9:
1481 		if (intel_engine_uses_guc(ce->engine)) {
1482 			/*
1483 			 * When using GuC, the context descriptor we write in
1484 			 * i915 is read by GuC and rewritten before it's
1485 			 * actually written into the hardware. The LRCA is
1486 			 * what is put into the context id field of the
1487 			 * context descriptor by GuC. Because it's aligned to
1488 			 * a page, the lower 12bits are always at 0 and
1489 			 * dropped by GuC. They won't be part of the context
1490 			 * ID in the OA reports, so squash those lower bits.
1491 			 */
1492 			stream->specific_ctx_id = ce->lrc.lrca >> 12;
1493 
1494 			/*
1495 			 * GuC uses the top bit to signal proxy submission, so
1496 			 * ignore that bit.
1497 			 */
1498 			stream->specific_ctx_id_mask =
1499 				(1U << (GEN8_CTX_ID_WIDTH - 1)) - 1;
1500 		} else {
1501 			stream->specific_ctx_id_mask =
1502 				(1U << GEN8_CTX_ID_WIDTH) - 1;
1503 			stream->specific_ctx_id = stream->specific_ctx_id_mask;
1504 		}
1505 		break;
1506 
1507 	case 11:
1508 	case 12:
1509 		ret = gen12_get_render_context_id(stream);
1510 		break;
1511 
1512 	default:
1513 		MISSING_CASE(GRAPHICS_VER(ce->engine->i915));
1514 	}
1515 
1516 	ce->tag = stream->specific_ctx_id;
1517 
1518 	drm_dbg(&stream->perf->i915->drm,
1519 		"filtering on ctx_id=0x%x ctx_id_mask=0x%x\n",
1520 		stream->specific_ctx_id,
1521 		stream->specific_ctx_id_mask);
1522 
1523 	return ret;
1524 }
1525 
1526 /**
1527  * oa_put_render_ctx_id - counterpart to oa_get_render_ctx_id releases hold
1528  * @stream: An i915-perf stream opened for OA metrics
1529  *
1530  * In case anything needed doing to ensure the context HW ID would remain valid
1531  * for the lifetime of the stream, then that can be undone here.
1532  */
1533 static void oa_put_render_ctx_id(struct i915_perf_stream *stream)
1534 {
1535 	struct intel_context *ce;
1536 
1537 	ce = fetch_and_zero(&stream->pinned_ctx);
1538 	if (ce) {
1539 		ce->tag = 0; /* recomputed on next submission after parking */
1540 		intel_context_unpin(ce);
1541 	}
1542 
1543 	stream->specific_ctx_id = INVALID_CTX_ID;
1544 	stream->specific_ctx_id_mask = 0;
1545 }
1546 
1547 static void
1548 free_oa_buffer(struct i915_perf_stream *stream)
1549 {
1550 	i915_vma_unpin_and_release(&stream->oa_buffer.vma,
1551 				   I915_VMA_RELEASE_MAP);
1552 
1553 	stream->oa_buffer.vaddr = NULL;
1554 }
1555 
1556 static void
1557 free_oa_configs(struct i915_perf_stream *stream)
1558 {
1559 	struct i915_oa_config_bo *oa_bo, *tmp;
1560 
1561 	i915_oa_config_put(stream->oa_config);
1562 	llist_for_each_entry_safe(oa_bo, tmp, stream->oa_config_bos.first, node)
1563 		free_oa_config_bo(oa_bo);
1564 }
1565 
1566 static void
1567 free_noa_wait(struct i915_perf_stream *stream)
1568 {
1569 	i915_vma_unpin_and_release(&stream->noa_wait, 0);
1570 }
1571 
1572 static void i915_oa_stream_destroy(struct i915_perf_stream *stream)
1573 {
1574 	struct i915_perf *perf = stream->perf;
1575 	struct intel_gt *gt = stream->engine->gt;
1576 
1577 	if (WARN_ON(stream != gt->perf.exclusive_stream))
1578 		return;
1579 
1580 	/*
1581 	 * Unset exclusive_stream first, it will be checked while disabling
1582 	 * the metric set on gen8+.
1583 	 *
1584 	 * See i915_oa_init_reg_state() and lrc_configure_all_contexts()
1585 	 */
1586 	WRITE_ONCE(gt->perf.exclusive_stream, NULL);
1587 	perf->ops.disable_metric_set(stream);
1588 
1589 	free_oa_buffer(stream);
1590 
1591 	/*
1592 	 * Wa_16011777198:dg2: Unset the override of GUCRC mode to enable rc6.
1593 	 */
1594 	if (intel_uc_uses_guc_rc(&gt->uc) &&
1595 	    (IS_DG2_GRAPHICS_STEP(gt->i915, G10, STEP_A0, STEP_C0) ||
1596 	     IS_DG2_GRAPHICS_STEP(gt->i915, G11, STEP_A0, STEP_B0)))
1597 		drm_WARN_ON(&gt->i915->drm,
1598 			    intel_guc_slpc_unset_gucrc_mode(&gt->uc.guc.slpc));
1599 
1600 	intel_uncore_forcewake_put(stream->uncore, FORCEWAKE_ALL);
1601 	intel_engine_pm_put(stream->engine);
1602 
1603 	if (stream->ctx)
1604 		oa_put_render_ctx_id(stream);
1605 
1606 	free_oa_configs(stream);
1607 	free_noa_wait(stream);
1608 
1609 	if (perf->spurious_report_rs.missed) {
1610 		drm_notice(&gt->i915->drm,
1611 			   "%d spurious OA report notices suppressed due to ratelimiting\n",
1612 			   perf->spurious_report_rs.missed);
1613 	}
1614 }
1615 
1616 static void gen7_init_oa_buffer(struct i915_perf_stream *stream)
1617 {
1618 	struct intel_uncore *uncore = stream->uncore;
1619 	u32 gtt_offset = i915_ggtt_offset(stream->oa_buffer.vma);
1620 	unsigned long flags;
1621 
1622 	spin_lock_irqsave(&stream->oa_buffer.ptr_lock, flags);
1623 
1624 	/* Pre-DevBDW: OABUFFER must be set with counters off,
1625 	 * before OASTATUS1, but after OASTATUS2
1626 	 */
1627 	intel_uncore_write(uncore, GEN7_OASTATUS2, /* head */
1628 			   gtt_offset | GEN7_OASTATUS2_MEM_SELECT_GGTT);
1629 	stream->oa_buffer.head = gtt_offset;
1630 
1631 	intel_uncore_write(uncore, GEN7_OABUFFER, gtt_offset);
1632 
1633 	intel_uncore_write(uncore, GEN7_OASTATUS1, /* tail */
1634 			   gtt_offset | OABUFFER_SIZE_16M);
1635 
1636 	/* Mark that we need updated tail pointers to read from... */
1637 	stream->oa_buffer.aging_tail = INVALID_TAIL_PTR;
1638 	stream->oa_buffer.tail = gtt_offset;
1639 
1640 	spin_unlock_irqrestore(&stream->oa_buffer.ptr_lock, flags);
1641 
1642 	/* On Haswell we have to track which OASTATUS1 flags we've
1643 	 * already seen since they can't be cleared while periodic
1644 	 * sampling is enabled.
1645 	 */
1646 	stream->perf->gen7_latched_oastatus1 = 0;
1647 
1648 	/* NB: although the OA buffer will initially be allocated
1649 	 * zeroed via shmfs (and so this memset is redundant when
1650 	 * first allocating), we may re-init the OA buffer, either
1651 	 * when re-enabling a stream or in error/reset paths.
1652 	 *
1653 	 * The reason we clear the buffer for each re-init is for the
1654 	 * sanity check in gen7_append_oa_reports() that looks at the
1655 	 * report-id field to make sure it's non-zero which relies on
1656 	 * the assumption that new reports are being written to zeroed
1657 	 * memory...
1658 	 */
1659 	memset(stream->oa_buffer.vaddr, 0, OA_BUFFER_SIZE);
1660 }
1661 
1662 static void gen8_init_oa_buffer(struct i915_perf_stream *stream)
1663 {
1664 	struct intel_uncore *uncore = stream->uncore;
1665 	u32 gtt_offset = i915_ggtt_offset(stream->oa_buffer.vma);
1666 	unsigned long flags;
1667 
1668 	spin_lock_irqsave(&stream->oa_buffer.ptr_lock, flags);
1669 
1670 	intel_uncore_write(uncore, GEN8_OASTATUS, 0);
1671 	intel_uncore_write(uncore, GEN8_OAHEADPTR, gtt_offset);
1672 	stream->oa_buffer.head = gtt_offset;
1673 
1674 	intel_uncore_write(uncore, GEN8_OABUFFER_UDW, 0);
1675 
1676 	/*
1677 	 * PRM says:
1678 	 *
1679 	 *  "This MMIO must be set before the OATAILPTR
1680 	 *  register and after the OAHEADPTR register. This is
1681 	 *  to enable proper functionality of the overflow
1682 	 *  bit."
1683 	 */
1684 	intel_uncore_write(uncore, GEN8_OABUFFER, gtt_offset |
1685 		   OABUFFER_SIZE_16M | GEN8_OABUFFER_MEM_SELECT_GGTT);
1686 	intel_uncore_write(uncore, GEN8_OATAILPTR, gtt_offset & GEN8_OATAILPTR_MASK);
1687 
1688 	/* Mark that we need updated tail pointers to read from... */
1689 	stream->oa_buffer.aging_tail = INVALID_TAIL_PTR;
1690 	stream->oa_buffer.tail = gtt_offset;
1691 
1692 	/*
1693 	 * Reset state used to recognise context switches, affecting which
1694 	 * reports we will forward to userspace while filtering for a single
1695 	 * context.
1696 	 */
1697 	stream->oa_buffer.last_ctx_id = INVALID_CTX_ID;
1698 
1699 	spin_unlock_irqrestore(&stream->oa_buffer.ptr_lock, flags);
1700 
1701 	/*
1702 	 * NB: although the OA buffer will initially be allocated
1703 	 * zeroed via shmfs (and so this memset is redundant when
1704 	 * first allocating), we may re-init the OA buffer, either
1705 	 * when re-enabling a stream or in error/reset paths.
1706 	 *
1707 	 * The reason we clear the buffer for each re-init is for the
1708 	 * sanity check in gen8_append_oa_reports() that looks at the
1709 	 * reason field to make sure it's non-zero which relies on
1710 	 * the assumption that new reports are being written to zeroed
1711 	 * memory...
1712 	 */
1713 	memset(stream->oa_buffer.vaddr, 0, OA_BUFFER_SIZE);
1714 }
1715 
1716 static void gen12_init_oa_buffer(struct i915_perf_stream *stream)
1717 {
1718 	struct intel_uncore *uncore = stream->uncore;
1719 	u32 gtt_offset = i915_ggtt_offset(stream->oa_buffer.vma);
1720 	unsigned long flags;
1721 
1722 	spin_lock_irqsave(&stream->oa_buffer.ptr_lock, flags);
1723 
1724 	intel_uncore_write(uncore, GEN12_OAG_OASTATUS, 0);
1725 	intel_uncore_write(uncore, GEN12_OAG_OAHEADPTR,
1726 			   gtt_offset & GEN12_OAG_OAHEADPTR_MASK);
1727 	stream->oa_buffer.head = gtt_offset;
1728 
1729 	/*
1730 	 * PRM says:
1731 	 *
1732 	 *  "This MMIO must be set before the OATAILPTR
1733 	 *  register and after the OAHEADPTR register. This is
1734 	 *  to enable proper functionality of the overflow
1735 	 *  bit."
1736 	 */
1737 	intel_uncore_write(uncore, GEN12_OAG_OABUFFER, gtt_offset |
1738 			   OABUFFER_SIZE_16M | GEN8_OABUFFER_MEM_SELECT_GGTT);
1739 	intel_uncore_write(uncore, GEN12_OAG_OATAILPTR,
1740 			   gtt_offset & GEN12_OAG_OATAILPTR_MASK);
1741 
1742 	/* Mark that we need updated tail pointers to read from... */
1743 	stream->oa_buffer.aging_tail = INVALID_TAIL_PTR;
1744 	stream->oa_buffer.tail = gtt_offset;
1745 
1746 	/*
1747 	 * Reset state used to recognise context switches, affecting which
1748 	 * reports we will forward to userspace while filtering for a single
1749 	 * context.
1750 	 */
1751 	stream->oa_buffer.last_ctx_id = INVALID_CTX_ID;
1752 
1753 	spin_unlock_irqrestore(&stream->oa_buffer.ptr_lock, flags);
1754 
1755 	/*
1756 	 * NB: although the OA buffer will initially be allocated
1757 	 * zeroed via shmfs (and so this memset is redundant when
1758 	 * first allocating), we may re-init the OA buffer, either
1759 	 * when re-enabling a stream or in error/reset paths.
1760 	 *
1761 	 * The reason we clear the buffer for each re-init is for the
1762 	 * sanity check in gen8_append_oa_reports() that looks at the
1763 	 * reason field to make sure it's non-zero which relies on
1764 	 * the assumption that new reports are being written to zeroed
1765 	 * memory...
1766 	 */
1767 	memset(stream->oa_buffer.vaddr, 0,
1768 	       stream->oa_buffer.vma->size);
1769 }
1770 
1771 static int alloc_oa_buffer(struct i915_perf_stream *stream)
1772 {
1773 	struct drm_i915_private *i915 = stream->perf->i915;
1774 	struct intel_gt *gt = stream->engine->gt;
1775 	struct drm_i915_gem_object *bo;
1776 	struct i915_vma *vma;
1777 	int ret;
1778 
1779 	if (drm_WARN_ON(&i915->drm, stream->oa_buffer.vma))
1780 		return -ENODEV;
1781 
1782 	BUILD_BUG_ON_NOT_POWER_OF_2(OA_BUFFER_SIZE);
1783 	BUILD_BUG_ON(OA_BUFFER_SIZE < SZ_128K || OA_BUFFER_SIZE > SZ_16M);
1784 
1785 	bo = i915_gem_object_create_shmem(stream->perf->i915, OA_BUFFER_SIZE);
1786 	if (IS_ERR(bo)) {
1787 		drm_err(&i915->drm, "Failed to allocate OA buffer\n");
1788 		return PTR_ERR(bo);
1789 	}
1790 
1791 	i915_gem_object_set_cache_coherency(bo, I915_CACHE_LLC);
1792 
1793 	/* PreHSW required 512K alignment, HSW requires 16M */
1794 	vma = i915_vma_instance(bo, &gt->ggtt->vm, NULL);
1795 	if (IS_ERR(vma)) {
1796 		ret = PTR_ERR(vma);
1797 		goto err_unref;
1798 	}
1799 
1800 	/*
1801 	 * PreHSW required 512K alignment.
1802 	 * HSW and onwards, align to requested size of OA buffer.
1803 	 */
1804 	ret = i915_vma_pin(vma, 0, SZ_16M, PIN_GLOBAL | PIN_HIGH);
1805 	if (ret) {
1806 		drm_err(&gt->i915->drm, "Failed to pin OA buffer %d\n", ret);
1807 		goto err_unref;
1808 	}
1809 
1810 	stream->oa_buffer.vma = vma;
1811 
1812 	stream->oa_buffer.vaddr =
1813 		i915_gem_object_pin_map_unlocked(bo, I915_MAP_WB);
1814 	if (IS_ERR(stream->oa_buffer.vaddr)) {
1815 		ret = PTR_ERR(stream->oa_buffer.vaddr);
1816 		goto err_unpin;
1817 	}
1818 
1819 	return 0;
1820 
1821 err_unpin:
1822 	__i915_vma_unpin(vma);
1823 
1824 err_unref:
1825 	i915_gem_object_put(bo);
1826 
1827 	stream->oa_buffer.vaddr = NULL;
1828 	stream->oa_buffer.vma = NULL;
1829 
1830 	return ret;
1831 }
1832 
1833 static u32 *save_restore_register(struct i915_perf_stream *stream, u32 *cs,
1834 				  bool save, i915_reg_t reg, u32 offset,
1835 				  u32 dword_count)
1836 {
1837 	u32 cmd;
1838 	u32 d;
1839 
1840 	cmd = save ? MI_STORE_REGISTER_MEM : MI_LOAD_REGISTER_MEM;
1841 	cmd |= MI_SRM_LRM_GLOBAL_GTT;
1842 	if (GRAPHICS_VER(stream->perf->i915) >= 8)
1843 		cmd++;
1844 
1845 	for (d = 0; d < dword_count; d++) {
1846 		*cs++ = cmd;
1847 		*cs++ = i915_mmio_reg_offset(reg) + 4 * d;
1848 		*cs++ = i915_ggtt_offset(stream->noa_wait) + offset + 4 * d;
1849 		*cs++ = 0;
1850 	}
1851 
1852 	return cs;
1853 }
1854 
1855 static int alloc_noa_wait(struct i915_perf_stream *stream)
1856 {
1857 	struct drm_i915_private *i915 = stream->perf->i915;
1858 	struct intel_gt *gt = stream->engine->gt;
1859 	struct drm_i915_gem_object *bo;
1860 	struct i915_vma *vma;
1861 	const u64 delay_ticks = 0xffffffffffffffff -
1862 		intel_gt_ns_to_clock_interval(to_gt(stream->perf->i915),
1863 		atomic64_read(&stream->perf->noa_programming_delay));
1864 	const u32 base = stream->engine->mmio_base;
1865 #define CS_GPR(x) GEN8_RING_CS_GPR(base, x)
1866 	u32 *batch, *ts0, *cs, *jump;
1867 	struct i915_gem_ww_ctx ww;
1868 	int ret, i;
1869 	enum {
1870 		START_TS,
1871 		NOW_TS,
1872 		DELTA_TS,
1873 		JUMP_PREDICATE,
1874 		DELTA_TARGET,
1875 		N_CS_GPR
1876 	};
1877 	i915_reg_t mi_predicate_result = HAS_MI_SET_PREDICATE(i915) ?
1878 					  MI_PREDICATE_RESULT_2_ENGINE(base) :
1879 					  MI_PREDICATE_RESULT_1(RENDER_RING_BASE);
1880 
1881 	/*
1882 	 * gt->scratch was being used to save/restore the GPR registers, but on
1883 	 * MTL the scratch uses stolen lmem. An MI_SRM to this memory region
1884 	 * causes an engine hang. Instead allocate an additional page here to
1885 	 * save/restore GPR registers
1886 	 */
1887 	bo = i915_gem_object_create_internal(i915, 8192);
1888 	if (IS_ERR(bo)) {
1889 		drm_err(&i915->drm,
1890 			"Failed to allocate NOA wait batchbuffer\n");
1891 		return PTR_ERR(bo);
1892 	}
1893 
1894 	i915_gem_ww_ctx_init(&ww, true);
1895 retry:
1896 	ret = i915_gem_object_lock(bo, &ww);
1897 	if (ret)
1898 		goto out_ww;
1899 
1900 	/*
1901 	 * We pin in GGTT because we jump into this buffer now because
1902 	 * multiple OA config BOs will have a jump to this address and it
1903 	 * needs to be fixed during the lifetime of the i915/perf stream.
1904 	 */
1905 	vma = i915_vma_instance(bo, &gt->ggtt->vm, NULL);
1906 	if (IS_ERR(vma)) {
1907 		ret = PTR_ERR(vma);
1908 		goto out_ww;
1909 	}
1910 
1911 	ret = i915_vma_pin_ww(vma, &ww, 0, 0, PIN_GLOBAL | PIN_HIGH);
1912 	if (ret)
1913 		goto out_ww;
1914 
1915 	batch = cs = i915_gem_object_pin_map(bo, I915_MAP_WB);
1916 	if (IS_ERR(batch)) {
1917 		ret = PTR_ERR(batch);
1918 		goto err_unpin;
1919 	}
1920 
1921 	stream->noa_wait = vma;
1922 
1923 #define GPR_SAVE_OFFSET 4096
1924 #define PREDICATE_SAVE_OFFSET 4160
1925 
1926 	/* Save registers. */
1927 	for (i = 0; i < N_CS_GPR; i++)
1928 		cs = save_restore_register(
1929 			stream, cs, true /* save */, CS_GPR(i),
1930 			GPR_SAVE_OFFSET + 8 * i, 2);
1931 	cs = save_restore_register(
1932 		stream, cs, true /* save */, mi_predicate_result,
1933 		PREDICATE_SAVE_OFFSET, 1);
1934 
1935 	/* First timestamp snapshot location. */
1936 	ts0 = cs;
1937 
1938 	/*
1939 	 * Initial snapshot of the timestamp register to implement the wait.
1940 	 * We work with 32b values, so clear out the top 32b bits of the
1941 	 * register because the ALU works 64bits.
1942 	 */
1943 	*cs++ = MI_LOAD_REGISTER_IMM(1);
1944 	*cs++ = i915_mmio_reg_offset(CS_GPR(START_TS)) + 4;
1945 	*cs++ = 0;
1946 	*cs++ = MI_LOAD_REGISTER_REG | (3 - 2);
1947 	*cs++ = i915_mmio_reg_offset(RING_TIMESTAMP(base));
1948 	*cs++ = i915_mmio_reg_offset(CS_GPR(START_TS));
1949 
1950 	/*
1951 	 * This is the location we're going to jump back into until the
1952 	 * required amount of time has passed.
1953 	 */
1954 	jump = cs;
1955 
1956 	/*
1957 	 * Take another snapshot of the timestamp register. Take care to clear
1958 	 * up the top 32bits of CS_GPR(1) as we're using it for other
1959 	 * operations below.
1960 	 */
1961 	*cs++ = MI_LOAD_REGISTER_IMM(1);
1962 	*cs++ = i915_mmio_reg_offset(CS_GPR(NOW_TS)) + 4;
1963 	*cs++ = 0;
1964 	*cs++ = MI_LOAD_REGISTER_REG | (3 - 2);
1965 	*cs++ = i915_mmio_reg_offset(RING_TIMESTAMP(base));
1966 	*cs++ = i915_mmio_reg_offset(CS_GPR(NOW_TS));
1967 
1968 	/*
1969 	 * Do a diff between the 2 timestamps and store the result back into
1970 	 * CS_GPR(1).
1971 	 */
1972 	*cs++ = MI_MATH(5);
1973 	*cs++ = MI_MATH_LOAD(MI_MATH_REG_SRCA, MI_MATH_REG(NOW_TS));
1974 	*cs++ = MI_MATH_LOAD(MI_MATH_REG_SRCB, MI_MATH_REG(START_TS));
1975 	*cs++ = MI_MATH_SUB;
1976 	*cs++ = MI_MATH_STORE(MI_MATH_REG(DELTA_TS), MI_MATH_REG_ACCU);
1977 	*cs++ = MI_MATH_STORE(MI_MATH_REG(JUMP_PREDICATE), MI_MATH_REG_CF);
1978 
1979 	/*
1980 	 * Transfer the carry flag (set to 1 if ts1 < ts0, meaning the
1981 	 * timestamp have rolled over the 32bits) into the predicate register
1982 	 * to be used for the predicated jump.
1983 	 */
1984 	*cs++ = MI_LOAD_REGISTER_REG | (3 - 2);
1985 	*cs++ = i915_mmio_reg_offset(CS_GPR(JUMP_PREDICATE));
1986 	*cs++ = i915_mmio_reg_offset(mi_predicate_result);
1987 
1988 	if (HAS_MI_SET_PREDICATE(i915))
1989 		*cs++ = MI_SET_PREDICATE | 1;
1990 
1991 	/* Restart from the beginning if we had timestamps roll over. */
1992 	*cs++ = (GRAPHICS_VER(i915) < 8 ?
1993 		 MI_BATCH_BUFFER_START :
1994 		 MI_BATCH_BUFFER_START_GEN8) |
1995 		MI_BATCH_PREDICATE;
1996 	*cs++ = i915_ggtt_offset(vma) + (ts0 - batch) * 4;
1997 	*cs++ = 0;
1998 
1999 	if (HAS_MI_SET_PREDICATE(i915))
2000 		*cs++ = MI_SET_PREDICATE;
2001 
2002 	/*
2003 	 * Now add the diff between to previous timestamps and add it to :
2004 	 *      (((1 * << 64) - 1) - delay_ns)
2005 	 *
2006 	 * When the Carry Flag contains 1 this means the elapsed time is
2007 	 * longer than the expected delay, and we can exit the wait loop.
2008 	 */
2009 	*cs++ = MI_LOAD_REGISTER_IMM(2);
2010 	*cs++ = i915_mmio_reg_offset(CS_GPR(DELTA_TARGET));
2011 	*cs++ = lower_32_bits(delay_ticks);
2012 	*cs++ = i915_mmio_reg_offset(CS_GPR(DELTA_TARGET)) + 4;
2013 	*cs++ = upper_32_bits(delay_ticks);
2014 
2015 	*cs++ = MI_MATH(4);
2016 	*cs++ = MI_MATH_LOAD(MI_MATH_REG_SRCA, MI_MATH_REG(DELTA_TS));
2017 	*cs++ = MI_MATH_LOAD(MI_MATH_REG_SRCB, MI_MATH_REG(DELTA_TARGET));
2018 	*cs++ = MI_MATH_ADD;
2019 	*cs++ = MI_MATH_STOREINV(MI_MATH_REG(JUMP_PREDICATE), MI_MATH_REG_CF);
2020 
2021 	*cs++ = MI_ARB_CHECK;
2022 
2023 	/*
2024 	 * Transfer the result into the predicate register to be used for the
2025 	 * predicated jump.
2026 	 */
2027 	*cs++ = MI_LOAD_REGISTER_REG | (3 - 2);
2028 	*cs++ = i915_mmio_reg_offset(CS_GPR(JUMP_PREDICATE));
2029 	*cs++ = i915_mmio_reg_offset(mi_predicate_result);
2030 
2031 	if (HAS_MI_SET_PREDICATE(i915))
2032 		*cs++ = MI_SET_PREDICATE | 1;
2033 
2034 	/* Predicate the jump.  */
2035 	*cs++ = (GRAPHICS_VER(i915) < 8 ?
2036 		 MI_BATCH_BUFFER_START :
2037 		 MI_BATCH_BUFFER_START_GEN8) |
2038 		MI_BATCH_PREDICATE;
2039 	*cs++ = i915_ggtt_offset(vma) + (jump - batch) * 4;
2040 	*cs++ = 0;
2041 
2042 	if (HAS_MI_SET_PREDICATE(i915))
2043 		*cs++ = MI_SET_PREDICATE;
2044 
2045 	/* Restore registers. */
2046 	for (i = 0; i < N_CS_GPR; i++)
2047 		cs = save_restore_register(
2048 			stream, cs, false /* restore */, CS_GPR(i),
2049 			GPR_SAVE_OFFSET + 8 * i, 2);
2050 	cs = save_restore_register(
2051 		stream, cs, false /* restore */, mi_predicate_result,
2052 		PREDICATE_SAVE_OFFSET, 1);
2053 
2054 	/* And return to the ring. */
2055 	*cs++ = MI_BATCH_BUFFER_END;
2056 
2057 	GEM_BUG_ON(cs - batch > PAGE_SIZE / sizeof(*batch));
2058 
2059 	i915_gem_object_flush_map(bo);
2060 	__i915_gem_object_release_map(bo);
2061 
2062 	goto out_ww;
2063 
2064 err_unpin:
2065 	i915_vma_unpin_and_release(&vma, 0);
2066 out_ww:
2067 	if (ret == -EDEADLK) {
2068 		ret = i915_gem_ww_ctx_backoff(&ww);
2069 		if (!ret)
2070 			goto retry;
2071 	}
2072 	i915_gem_ww_ctx_fini(&ww);
2073 	if (ret)
2074 		i915_gem_object_put(bo);
2075 	return ret;
2076 }
2077 
2078 static u32 *write_cs_mi_lri(u32 *cs,
2079 			    const struct i915_oa_reg *reg_data,
2080 			    u32 n_regs)
2081 {
2082 	u32 i;
2083 
2084 	for (i = 0; i < n_regs; i++) {
2085 		if ((i % MI_LOAD_REGISTER_IMM_MAX_REGS) == 0) {
2086 			u32 n_lri = min_t(u32,
2087 					  n_regs - i,
2088 					  MI_LOAD_REGISTER_IMM_MAX_REGS);
2089 
2090 			*cs++ = MI_LOAD_REGISTER_IMM(n_lri);
2091 		}
2092 		*cs++ = i915_mmio_reg_offset(reg_data[i].addr);
2093 		*cs++ = reg_data[i].value;
2094 	}
2095 
2096 	return cs;
2097 }
2098 
2099 static int num_lri_dwords(int num_regs)
2100 {
2101 	int count = 0;
2102 
2103 	if (num_regs > 0) {
2104 		count += DIV_ROUND_UP(num_regs, MI_LOAD_REGISTER_IMM_MAX_REGS);
2105 		count += num_regs * 2;
2106 	}
2107 
2108 	return count;
2109 }
2110 
2111 static struct i915_oa_config_bo *
2112 alloc_oa_config_buffer(struct i915_perf_stream *stream,
2113 		       struct i915_oa_config *oa_config)
2114 {
2115 	struct drm_i915_gem_object *obj;
2116 	struct i915_oa_config_bo *oa_bo;
2117 	struct i915_gem_ww_ctx ww;
2118 	size_t config_length = 0;
2119 	u32 *cs;
2120 	int err;
2121 
2122 	oa_bo = kzalloc(sizeof(*oa_bo), GFP_KERNEL);
2123 	if (!oa_bo)
2124 		return ERR_PTR(-ENOMEM);
2125 
2126 	config_length += num_lri_dwords(oa_config->mux_regs_len);
2127 	config_length += num_lri_dwords(oa_config->b_counter_regs_len);
2128 	config_length += num_lri_dwords(oa_config->flex_regs_len);
2129 	config_length += 3; /* MI_BATCH_BUFFER_START */
2130 	config_length = ALIGN(sizeof(u32) * config_length, I915_GTT_PAGE_SIZE);
2131 
2132 	obj = i915_gem_object_create_shmem(stream->perf->i915, config_length);
2133 	if (IS_ERR(obj)) {
2134 		err = PTR_ERR(obj);
2135 		goto err_free;
2136 	}
2137 
2138 	i915_gem_ww_ctx_init(&ww, true);
2139 retry:
2140 	err = i915_gem_object_lock(obj, &ww);
2141 	if (err)
2142 		goto out_ww;
2143 
2144 	cs = i915_gem_object_pin_map(obj, I915_MAP_WB);
2145 	if (IS_ERR(cs)) {
2146 		err = PTR_ERR(cs);
2147 		goto out_ww;
2148 	}
2149 
2150 	cs = write_cs_mi_lri(cs,
2151 			     oa_config->mux_regs,
2152 			     oa_config->mux_regs_len);
2153 	cs = write_cs_mi_lri(cs,
2154 			     oa_config->b_counter_regs,
2155 			     oa_config->b_counter_regs_len);
2156 	cs = write_cs_mi_lri(cs,
2157 			     oa_config->flex_regs,
2158 			     oa_config->flex_regs_len);
2159 
2160 	/* Jump into the active wait. */
2161 	*cs++ = (GRAPHICS_VER(stream->perf->i915) < 8 ?
2162 		 MI_BATCH_BUFFER_START :
2163 		 MI_BATCH_BUFFER_START_GEN8);
2164 	*cs++ = i915_ggtt_offset(stream->noa_wait);
2165 	*cs++ = 0;
2166 
2167 	i915_gem_object_flush_map(obj);
2168 	__i915_gem_object_release_map(obj);
2169 
2170 	oa_bo->vma = i915_vma_instance(obj,
2171 				       &stream->engine->gt->ggtt->vm,
2172 				       NULL);
2173 	if (IS_ERR(oa_bo->vma)) {
2174 		err = PTR_ERR(oa_bo->vma);
2175 		goto out_ww;
2176 	}
2177 
2178 	oa_bo->oa_config = i915_oa_config_get(oa_config);
2179 	llist_add(&oa_bo->node, &stream->oa_config_bos);
2180 
2181 out_ww:
2182 	if (err == -EDEADLK) {
2183 		err = i915_gem_ww_ctx_backoff(&ww);
2184 		if (!err)
2185 			goto retry;
2186 	}
2187 	i915_gem_ww_ctx_fini(&ww);
2188 
2189 	if (err)
2190 		i915_gem_object_put(obj);
2191 err_free:
2192 	if (err) {
2193 		kfree(oa_bo);
2194 		return ERR_PTR(err);
2195 	}
2196 	return oa_bo;
2197 }
2198 
2199 static struct i915_vma *
2200 get_oa_vma(struct i915_perf_stream *stream, struct i915_oa_config *oa_config)
2201 {
2202 	struct i915_oa_config_bo *oa_bo;
2203 
2204 	/*
2205 	 * Look for the buffer in the already allocated BOs attached
2206 	 * to the stream.
2207 	 */
2208 	llist_for_each_entry(oa_bo, stream->oa_config_bos.first, node) {
2209 		if (oa_bo->oa_config == oa_config &&
2210 		    memcmp(oa_bo->oa_config->uuid,
2211 			   oa_config->uuid,
2212 			   sizeof(oa_config->uuid)) == 0)
2213 			goto out;
2214 	}
2215 
2216 	oa_bo = alloc_oa_config_buffer(stream, oa_config);
2217 	if (IS_ERR(oa_bo))
2218 		return ERR_CAST(oa_bo);
2219 
2220 out:
2221 	return i915_vma_get(oa_bo->vma);
2222 }
2223 
2224 static int
2225 emit_oa_config(struct i915_perf_stream *stream,
2226 	       struct i915_oa_config *oa_config,
2227 	       struct intel_context *ce,
2228 	       struct i915_active *active)
2229 {
2230 	struct i915_request *rq;
2231 	struct i915_vma *vma;
2232 	struct i915_gem_ww_ctx ww;
2233 	int err;
2234 
2235 	vma = get_oa_vma(stream, oa_config);
2236 	if (IS_ERR(vma))
2237 		return PTR_ERR(vma);
2238 
2239 	i915_gem_ww_ctx_init(&ww, true);
2240 retry:
2241 	err = i915_gem_object_lock(vma->obj, &ww);
2242 	if (err)
2243 		goto err;
2244 
2245 	err = i915_vma_pin_ww(vma, &ww, 0, 0, PIN_GLOBAL | PIN_HIGH);
2246 	if (err)
2247 		goto err;
2248 
2249 	intel_engine_pm_get(ce->engine);
2250 	rq = i915_request_create(ce);
2251 	intel_engine_pm_put(ce->engine);
2252 	if (IS_ERR(rq)) {
2253 		err = PTR_ERR(rq);
2254 		goto err_vma_unpin;
2255 	}
2256 
2257 	if (!IS_ERR_OR_NULL(active)) {
2258 		/* After all individual context modifications */
2259 		err = i915_request_await_active(rq, active,
2260 						I915_ACTIVE_AWAIT_ACTIVE);
2261 		if (err)
2262 			goto err_add_request;
2263 
2264 		err = i915_active_add_request(active, rq);
2265 		if (err)
2266 			goto err_add_request;
2267 	}
2268 
2269 	err = i915_vma_move_to_active(vma, rq, 0);
2270 	if (err)
2271 		goto err_add_request;
2272 
2273 	err = rq->engine->emit_bb_start(rq,
2274 					i915_vma_offset(vma), 0,
2275 					I915_DISPATCH_SECURE);
2276 	if (err)
2277 		goto err_add_request;
2278 
2279 err_add_request:
2280 	i915_request_add(rq);
2281 err_vma_unpin:
2282 	i915_vma_unpin(vma);
2283 err:
2284 	if (err == -EDEADLK) {
2285 		err = i915_gem_ww_ctx_backoff(&ww);
2286 		if (!err)
2287 			goto retry;
2288 	}
2289 
2290 	i915_gem_ww_ctx_fini(&ww);
2291 	i915_vma_put(vma);
2292 	return err;
2293 }
2294 
2295 static struct intel_context *oa_context(struct i915_perf_stream *stream)
2296 {
2297 	return stream->pinned_ctx ?: stream->engine->kernel_context;
2298 }
2299 
2300 static int
2301 hsw_enable_metric_set(struct i915_perf_stream *stream,
2302 		      struct i915_active *active)
2303 {
2304 	struct intel_uncore *uncore = stream->uncore;
2305 
2306 	/*
2307 	 * PRM:
2308 	 *
2309 	 * OA unit is using “crclk” for its functionality. When trunk
2310 	 * level clock gating takes place, OA clock would be gated,
2311 	 * unable to count the events from non-render clock domain.
2312 	 * Render clock gating must be disabled when OA is enabled to
2313 	 * count the events from non-render domain. Unit level clock
2314 	 * gating for RCS should also be disabled.
2315 	 */
2316 	intel_uncore_rmw(uncore, GEN7_MISCCPCTL,
2317 			 GEN7_DOP_CLOCK_GATE_ENABLE, 0);
2318 	intel_uncore_rmw(uncore, GEN6_UCGCTL1,
2319 			 0, GEN6_CSUNIT_CLOCK_GATE_DISABLE);
2320 
2321 	return emit_oa_config(stream,
2322 			      stream->oa_config, oa_context(stream),
2323 			      active);
2324 }
2325 
2326 static void hsw_disable_metric_set(struct i915_perf_stream *stream)
2327 {
2328 	struct intel_uncore *uncore = stream->uncore;
2329 
2330 	intel_uncore_rmw(uncore, GEN6_UCGCTL1,
2331 			 GEN6_CSUNIT_CLOCK_GATE_DISABLE, 0);
2332 	intel_uncore_rmw(uncore, GEN7_MISCCPCTL,
2333 			 0, GEN7_DOP_CLOCK_GATE_ENABLE);
2334 
2335 	intel_uncore_rmw(uncore, GDT_CHICKEN_BITS, GT_NOA_ENABLE, 0);
2336 }
2337 
2338 static u32 oa_config_flex_reg(const struct i915_oa_config *oa_config,
2339 			      i915_reg_t reg)
2340 {
2341 	u32 mmio = i915_mmio_reg_offset(reg);
2342 	int i;
2343 
2344 	/*
2345 	 * This arbitrary default will select the 'EU FPU0 Pipeline
2346 	 * Active' event. In the future it's anticipated that there
2347 	 * will be an explicit 'No Event' we can select, but not yet...
2348 	 */
2349 	if (!oa_config)
2350 		return 0;
2351 
2352 	for (i = 0; i < oa_config->flex_regs_len; i++) {
2353 		if (i915_mmio_reg_offset(oa_config->flex_regs[i].addr) == mmio)
2354 			return oa_config->flex_regs[i].value;
2355 	}
2356 
2357 	return 0;
2358 }
2359 /*
2360  * NB: It must always remain pointer safe to run this even if the OA unit
2361  * has been disabled.
2362  *
2363  * It's fine to put out-of-date values into these per-context registers
2364  * in the case that the OA unit has been disabled.
2365  */
2366 static void
2367 gen8_update_reg_state_unlocked(const struct intel_context *ce,
2368 			       const struct i915_perf_stream *stream)
2369 {
2370 	u32 ctx_oactxctrl = stream->perf->ctx_oactxctrl_offset;
2371 	u32 ctx_flexeu0 = stream->perf->ctx_flexeu0_offset;
2372 	/* The MMIO offsets for Flex EU registers aren't contiguous */
2373 	static const i915_reg_t flex_regs[] = {
2374 		EU_PERF_CNTL0,
2375 		EU_PERF_CNTL1,
2376 		EU_PERF_CNTL2,
2377 		EU_PERF_CNTL3,
2378 		EU_PERF_CNTL4,
2379 		EU_PERF_CNTL5,
2380 		EU_PERF_CNTL6,
2381 	};
2382 	u32 *reg_state = ce->lrc_reg_state;
2383 	int i;
2384 
2385 	reg_state[ctx_oactxctrl + 1] =
2386 		(stream->period_exponent << GEN8_OA_TIMER_PERIOD_SHIFT) |
2387 		(stream->periodic ? GEN8_OA_TIMER_ENABLE : 0) |
2388 		GEN8_OA_COUNTER_RESUME;
2389 
2390 	for (i = 0; i < ARRAY_SIZE(flex_regs); i++)
2391 		reg_state[ctx_flexeu0 + i * 2 + 1] =
2392 			oa_config_flex_reg(stream->oa_config, flex_regs[i]);
2393 }
2394 
2395 struct flex {
2396 	i915_reg_t reg;
2397 	u32 offset;
2398 	u32 value;
2399 };
2400 
2401 static int
2402 gen8_store_flex(struct i915_request *rq,
2403 		struct intel_context *ce,
2404 		const struct flex *flex, unsigned int count)
2405 {
2406 	u32 offset;
2407 	u32 *cs;
2408 
2409 	cs = intel_ring_begin(rq, 4 * count);
2410 	if (IS_ERR(cs))
2411 		return PTR_ERR(cs);
2412 
2413 	offset = i915_ggtt_offset(ce->state) + LRC_STATE_OFFSET;
2414 	do {
2415 		*cs++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT;
2416 		*cs++ = offset + flex->offset * sizeof(u32);
2417 		*cs++ = 0;
2418 		*cs++ = flex->value;
2419 	} while (flex++, --count);
2420 
2421 	intel_ring_advance(rq, cs);
2422 
2423 	return 0;
2424 }
2425 
2426 static int
2427 gen8_load_flex(struct i915_request *rq,
2428 	       struct intel_context *ce,
2429 	       const struct flex *flex, unsigned int count)
2430 {
2431 	u32 *cs;
2432 
2433 	GEM_BUG_ON(!count || count > 63);
2434 
2435 	cs = intel_ring_begin(rq, 2 * count + 2);
2436 	if (IS_ERR(cs))
2437 		return PTR_ERR(cs);
2438 
2439 	*cs++ = MI_LOAD_REGISTER_IMM(count);
2440 	do {
2441 		*cs++ = i915_mmio_reg_offset(flex->reg);
2442 		*cs++ = flex->value;
2443 	} while (flex++, --count);
2444 	*cs++ = MI_NOOP;
2445 
2446 	intel_ring_advance(rq, cs);
2447 
2448 	return 0;
2449 }
2450 
2451 static int gen8_modify_context(struct intel_context *ce,
2452 			       const struct flex *flex, unsigned int count)
2453 {
2454 	struct i915_request *rq;
2455 	int err;
2456 
2457 	rq = intel_engine_create_kernel_request(ce->engine);
2458 	if (IS_ERR(rq))
2459 		return PTR_ERR(rq);
2460 
2461 	/* Serialise with the remote context */
2462 	err = intel_context_prepare_remote_request(ce, rq);
2463 	if (err == 0)
2464 		err = gen8_store_flex(rq, ce, flex, count);
2465 
2466 	i915_request_add(rq);
2467 	return err;
2468 }
2469 
2470 static int
2471 gen8_modify_self(struct intel_context *ce,
2472 		 const struct flex *flex, unsigned int count,
2473 		 struct i915_active *active)
2474 {
2475 	struct i915_request *rq;
2476 	int err;
2477 
2478 	intel_engine_pm_get(ce->engine);
2479 	rq = i915_request_create(ce);
2480 	intel_engine_pm_put(ce->engine);
2481 	if (IS_ERR(rq))
2482 		return PTR_ERR(rq);
2483 
2484 	if (!IS_ERR_OR_NULL(active)) {
2485 		err = i915_active_add_request(active, rq);
2486 		if (err)
2487 			goto err_add_request;
2488 	}
2489 
2490 	err = gen8_load_flex(rq, ce, flex, count);
2491 	if (err)
2492 		goto err_add_request;
2493 
2494 err_add_request:
2495 	i915_request_add(rq);
2496 	return err;
2497 }
2498 
2499 static int gen8_configure_context(struct i915_gem_context *ctx,
2500 				  struct flex *flex, unsigned int count)
2501 {
2502 	struct i915_gem_engines_iter it;
2503 	struct intel_context *ce;
2504 	int err = 0;
2505 
2506 	for_each_gem_engine(ce, i915_gem_context_lock_engines(ctx), it) {
2507 		GEM_BUG_ON(ce == ce->engine->kernel_context);
2508 
2509 		if (ce->engine->class != RENDER_CLASS)
2510 			continue;
2511 
2512 		/* Otherwise OA settings will be set upon first use */
2513 		if (!intel_context_pin_if_active(ce))
2514 			continue;
2515 
2516 		flex->value = intel_sseu_make_rpcs(ce->engine->gt, &ce->sseu);
2517 		err = gen8_modify_context(ce, flex, count);
2518 
2519 		intel_context_unpin(ce);
2520 		if (err)
2521 			break;
2522 	}
2523 	i915_gem_context_unlock_engines(ctx);
2524 
2525 	return err;
2526 }
2527 
2528 static int gen12_configure_oar_context(struct i915_perf_stream *stream,
2529 				       struct i915_active *active)
2530 {
2531 	int err;
2532 	struct intel_context *ce = stream->pinned_ctx;
2533 	u32 format = stream->oa_buffer.format->format;
2534 	u32 offset = stream->perf->ctx_oactxctrl_offset;
2535 	struct flex regs_context[] = {
2536 		{
2537 			GEN8_OACTXCONTROL,
2538 			offset + 1,
2539 			active ? GEN8_OA_COUNTER_RESUME : 0,
2540 		},
2541 	};
2542 	/* Offsets in regs_lri are not used since this configuration is only
2543 	 * applied using LRI. Initialize the correct offsets for posterity.
2544 	 */
2545 #define GEN12_OAR_OACONTROL_OFFSET 0x5B0
2546 	struct flex regs_lri[] = {
2547 		{
2548 			GEN12_OAR_OACONTROL,
2549 			GEN12_OAR_OACONTROL_OFFSET + 1,
2550 			(format << GEN12_OAR_OACONTROL_COUNTER_FORMAT_SHIFT) |
2551 			(active ? GEN12_OAR_OACONTROL_COUNTER_ENABLE : 0)
2552 		},
2553 		{
2554 			RING_CONTEXT_CONTROL(ce->engine->mmio_base),
2555 			CTX_CONTEXT_CONTROL,
2556 			_MASKED_FIELD(GEN12_CTX_CTRL_OAR_CONTEXT_ENABLE,
2557 				      active ?
2558 				      GEN12_CTX_CTRL_OAR_CONTEXT_ENABLE :
2559 				      0)
2560 		},
2561 	};
2562 
2563 	/* Modify the context image of pinned context with regs_context */
2564 	err = intel_context_lock_pinned(ce);
2565 	if (err)
2566 		return err;
2567 
2568 	err = gen8_modify_context(ce, regs_context,
2569 				  ARRAY_SIZE(regs_context));
2570 	intel_context_unlock_pinned(ce);
2571 	if (err)
2572 		return err;
2573 
2574 	/* Apply regs_lri using LRI with pinned context */
2575 	return gen8_modify_self(ce, regs_lri, ARRAY_SIZE(regs_lri), active);
2576 }
2577 
2578 /*
2579  * Manages updating the per-context aspects of the OA stream
2580  * configuration across all contexts.
2581  *
2582  * The awkward consideration here is that OACTXCONTROL controls the
2583  * exponent for periodic sampling which is primarily used for system
2584  * wide profiling where we'd like a consistent sampling period even in
2585  * the face of context switches.
2586  *
2587  * Our approach of updating the register state context (as opposed to
2588  * say using a workaround batch buffer) ensures that the hardware
2589  * won't automatically reload an out-of-date timer exponent even
2590  * transiently before a WA BB could be parsed.
2591  *
2592  * This function needs to:
2593  * - Ensure the currently running context's per-context OA state is
2594  *   updated
2595  * - Ensure that all existing contexts will have the correct per-context
2596  *   OA state if they are scheduled for use.
2597  * - Ensure any new contexts will be initialized with the correct
2598  *   per-context OA state.
2599  *
2600  * Note: it's only the RCS/Render context that has any OA state.
2601  * Note: the first flex register passed must always be R_PWR_CLK_STATE
2602  */
2603 static int
2604 oa_configure_all_contexts(struct i915_perf_stream *stream,
2605 			  struct flex *regs,
2606 			  size_t num_regs,
2607 			  struct i915_active *active)
2608 {
2609 	struct drm_i915_private *i915 = stream->perf->i915;
2610 	struct intel_engine_cs *engine;
2611 	struct intel_gt *gt = stream->engine->gt;
2612 	struct i915_gem_context *ctx, *cn;
2613 	int err;
2614 
2615 	lockdep_assert_held(&gt->perf.lock);
2616 
2617 	/*
2618 	 * The OA register config is setup through the context image. This image
2619 	 * might be written to by the GPU on context switch (in particular on
2620 	 * lite-restore). This means we can't safely update a context's image,
2621 	 * if this context is scheduled/submitted to run on the GPU.
2622 	 *
2623 	 * We could emit the OA register config through the batch buffer but
2624 	 * this might leave small interval of time where the OA unit is
2625 	 * configured at an invalid sampling period.
2626 	 *
2627 	 * Note that since we emit all requests from a single ring, there
2628 	 * is still an implicit global barrier here that may cause a high
2629 	 * priority context to wait for an otherwise independent low priority
2630 	 * context. Contexts idle at the time of reconfiguration are not
2631 	 * trapped behind the barrier.
2632 	 */
2633 	spin_lock(&i915->gem.contexts.lock);
2634 	list_for_each_entry_safe(ctx, cn, &i915->gem.contexts.list, link) {
2635 		if (!kref_get_unless_zero(&ctx->ref))
2636 			continue;
2637 
2638 		spin_unlock(&i915->gem.contexts.lock);
2639 
2640 		err = gen8_configure_context(ctx, regs, num_regs);
2641 		if (err) {
2642 			i915_gem_context_put(ctx);
2643 			return err;
2644 		}
2645 
2646 		spin_lock(&i915->gem.contexts.lock);
2647 		list_safe_reset_next(ctx, cn, link);
2648 		i915_gem_context_put(ctx);
2649 	}
2650 	spin_unlock(&i915->gem.contexts.lock);
2651 
2652 	/*
2653 	 * After updating all other contexts, we need to modify ourselves.
2654 	 * If we don't modify the kernel_context, we do not get events while
2655 	 * idle.
2656 	 */
2657 	for_each_uabi_engine(engine, i915) {
2658 		struct intel_context *ce = engine->kernel_context;
2659 
2660 		if (engine->class != RENDER_CLASS)
2661 			continue;
2662 
2663 		regs[0].value = intel_sseu_make_rpcs(engine->gt, &ce->sseu);
2664 
2665 		err = gen8_modify_self(ce, regs, num_regs, active);
2666 		if (err)
2667 			return err;
2668 	}
2669 
2670 	return 0;
2671 }
2672 
2673 static int
2674 gen12_configure_all_contexts(struct i915_perf_stream *stream,
2675 			     const struct i915_oa_config *oa_config,
2676 			     struct i915_active *active)
2677 {
2678 	struct flex regs[] = {
2679 		{
2680 			GEN8_R_PWR_CLK_STATE(RENDER_RING_BASE),
2681 			CTX_R_PWR_CLK_STATE,
2682 		},
2683 	};
2684 
2685 	return oa_configure_all_contexts(stream,
2686 					 regs, ARRAY_SIZE(regs),
2687 					 active);
2688 }
2689 
2690 static int
2691 lrc_configure_all_contexts(struct i915_perf_stream *stream,
2692 			   const struct i915_oa_config *oa_config,
2693 			   struct i915_active *active)
2694 {
2695 	u32 ctx_oactxctrl = stream->perf->ctx_oactxctrl_offset;
2696 	/* The MMIO offsets for Flex EU registers aren't contiguous */
2697 	const u32 ctx_flexeu0 = stream->perf->ctx_flexeu0_offset;
2698 #define ctx_flexeuN(N) (ctx_flexeu0 + 2 * (N) + 1)
2699 	struct flex regs[] = {
2700 		{
2701 			GEN8_R_PWR_CLK_STATE(RENDER_RING_BASE),
2702 			CTX_R_PWR_CLK_STATE,
2703 		},
2704 		{
2705 			GEN8_OACTXCONTROL,
2706 			ctx_oactxctrl + 1,
2707 		},
2708 		{ EU_PERF_CNTL0, ctx_flexeuN(0) },
2709 		{ EU_PERF_CNTL1, ctx_flexeuN(1) },
2710 		{ EU_PERF_CNTL2, ctx_flexeuN(2) },
2711 		{ EU_PERF_CNTL3, ctx_flexeuN(3) },
2712 		{ EU_PERF_CNTL4, ctx_flexeuN(4) },
2713 		{ EU_PERF_CNTL5, ctx_flexeuN(5) },
2714 		{ EU_PERF_CNTL6, ctx_flexeuN(6) },
2715 	};
2716 #undef ctx_flexeuN
2717 	int i;
2718 
2719 	regs[1].value =
2720 		(stream->period_exponent << GEN8_OA_TIMER_PERIOD_SHIFT) |
2721 		(stream->periodic ? GEN8_OA_TIMER_ENABLE : 0) |
2722 		GEN8_OA_COUNTER_RESUME;
2723 
2724 	for (i = 2; i < ARRAY_SIZE(regs); i++)
2725 		regs[i].value = oa_config_flex_reg(oa_config, regs[i].reg);
2726 
2727 	return oa_configure_all_contexts(stream,
2728 					 regs, ARRAY_SIZE(regs),
2729 					 active);
2730 }
2731 
2732 static int
2733 gen8_enable_metric_set(struct i915_perf_stream *stream,
2734 		       struct i915_active *active)
2735 {
2736 	struct intel_uncore *uncore = stream->uncore;
2737 	struct i915_oa_config *oa_config = stream->oa_config;
2738 	int ret;
2739 
2740 	/*
2741 	 * We disable slice/unslice clock ratio change reports on SKL since
2742 	 * they are too noisy. The HW generates a lot of redundant reports
2743 	 * where the ratio hasn't really changed causing a lot of redundant
2744 	 * work to processes and increasing the chances we'll hit buffer
2745 	 * overruns.
2746 	 *
2747 	 * Although we don't currently use the 'disable overrun' OABUFFER
2748 	 * feature it's worth noting that clock ratio reports have to be
2749 	 * disabled before considering to use that feature since the HW doesn't
2750 	 * correctly block these reports.
2751 	 *
2752 	 * Currently none of the high-level metrics we have depend on knowing
2753 	 * this ratio to normalize.
2754 	 *
2755 	 * Note: This register is not power context saved and restored, but
2756 	 * that's OK considering that we disable RC6 while the OA unit is
2757 	 * enabled.
2758 	 *
2759 	 * The _INCLUDE_CLK_RATIO bit allows the slice/unslice frequency to
2760 	 * be read back from automatically triggered reports, as part of the
2761 	 * RPT_ID field.
2762 	 */
2763 	if (IS_GRAPHICS_VER(stream->perf->i915, 9, 11)) {
2764 		intel_uncore_write(uncore, GEN8_OA_DEBUG,
2765 				   _MASKED_BIT_ENABLE(GEN9_OA_DEBUG_DISABLE_CLK_RATIO_REPORTS |
2766 						      GEN9_OA_DEBUG_INCLUDE_CLK_RATIO));
2767 	}
2768 
2769 	/*
2770 	 * Update all contexts prior writing the mux configurations as we need
2771 	 * to make sure all slices/subslices are ON before writing to NOA
2772 	 * registers.
2773 	 */
2774 	ret = lrc_configure_all_contexts(stream, oa_config, active);
2775 	if (ret)
2776 		return ret;
2777 
2778 	return emit_oa_config(stream,
2779 			      stream->oa_config, oa_context(stream),
2780 			      active);
2781 }
2782 
2783 static u32 oag_report_ctx_switches(const struct i915_perf_stream *stream)
2784 {
2785 	return _MASKED_FIELD(GEN12_OAG_OA_DEBUG_DISABLE_CTX_SWITCH_REPORTS,
2786 			     (stream->sample_flags & SAMPLE_OA_REPORT) ?
2787 			     0 : GEN12_OAG_OA_DEBUG_DISABLE_CTX_SWITCH_REPORTS);
2788 }
2789 
2790 static int
2791 gen12_enable_metric_set(struct i915_perf_stream *stream,
2792 			struct i915_active *active)
2793 {
2794 	struct drm_i915_private *i915 = stream->perf->i915;
2795 	struct intel_uncore *uncore = stream->uncore;
2796 	struct i915_oa_config *oa_config = stream->oa_config;
2797 	bool periodic = stream->periodic;
2798 	u32 period_exponent = stream->period_exponent;
2799 	u32 sqcnt1;
2800 	int ret;
2801 
2802 	/*
2803 	 * Wa_1508761755:xehpsdv, dg2
2804 	 * EU NOA signals behave incorrectly if EU clock gating is enabled.
2805 	 * Disable thread stall DOP gating and EU DOP gating.
2806 	 */
2807 	if (IS_XEHPSDV(i915) || IS_DG2(i915)) {
2808 		intel_gt_mcr_multicast_write(uncore->gt, GEN8_ROW_CHICKEN,
2809 					     _MASKED_BIT_ENABLE(STALL_DOP_GATING_DISABLE));
2810 		intel_uncore_write(uncore, GEN7_ROW_CHICKEN2,
2811 				   _MASKED_BIT_ENABLE(GEN12_DISABLE_DOP_GATING));
2812 	}
2813 
2814 	intel_uncore_write(uncore, GEN12_OAG_OA_DEBUG,
2815 			   /* Disable clk ratio reports, like previous Gens. */
2816 			   _MASKED_BIT_ENABLE(GEN12_OAG_OA_DEBUG_DISABLE_CLK_RATIO_REPORTS |
2817 					      GEN12_OAG_OA_DEBUG_INCLUDE_CLK_RATIO) |
2818 			   /*
2819 			    * If the user didn't require OA reports, instruct
2820 			    * the hardware not to emit ctx switch reports.
2821 			    */
2822 			   oag_report_ctx_switches(stream));
2823 
2824 	intel_uncore_write(uncore, GEN12_OAG_OAGLBCTXCTRL, periodic ?
2825 			   (GEN12_OAG_OAGLBCTXCTRL_COUNTER_RESUME |
2826 			    GEN12_OAG_OAGLBCTXCTRL_TIMER_ENABLE |
2827 			    (period_exponent << GEN12_OAG_OAGLBCTXCTRL_TIMER_PERIOD_SHIFT))
2828 			    : 0);
2829 
2830 	/*
2831 	 * Initialize Super Queue Internal Cnt Register
2832 	 * Set PMON Enable in order to collect valid metrics.
2833 	 * Enable byets per clock reporting in OA for XEHPSDV onward.
2834 	 */
2835 	sqcnt1 = GEN12_SQCNT1_PMON_ENABLE |
2836 		 (HAS_OA_BPC_REPORTING(i915) ? GEN12_SQCNT1_OABPC : 0);
2837 
2838 	intel_uncore_rmw(uncore, GEN12_SQCNT1, 0, sqcnt1);
2839 
2840 	/*
2841 	 * Update all contexts prior writing the mux configurations as we need
2842 	 * to make sure all slices/subslices are ON before writing to NOA
2843 	 * registers.
2844 	 */
2845 	ret = gen12_configure_all_contexts(stream, oa_config, active);
2846 	if (ret)
2847 		return ret;
2848 
2849 	/*
2850 	 * For Gen12, performance counters are context
2851 	 * saved/restored. Only enable it for the context that
2852 	 * requested this.
2853 	 */
2854 	if (stream->ctx) {
2855 		ret = gen12_configure_oar_context(stream, active);
2856 		if (ret)
2857 			return ret;
2858 	}
2859 
2860 	return emit_oa_config(stream,
2861 			      stream->oa_config, oa_context(stream),
2862 			      active);
2863 }
2864 
2865 static void gen8_disable_metric_set(struct i915_perf_stream *stream)
2866 {
2867 	struct intel_uncore *uncore = stream->uncore;
2868 
2869 	/* Reset all contexts' slices/subslices configurations. */
2870 	lrc_configure_all_contexts(stream, NULL, NULL);
2871 
2872 	intel_uncore_rmw(uncore, GDT_CHICKEN_BITS, GT_NOA_ENABLE, 0);
2873 }
2874 
2875 static void gen11_disable_metric_set(struct i915_perf_stream *stream)
2876 {
2877 	struct intel_uncore *uncore = stream->uncore;
2878 
2879 	/* Reset all contexts' slices/subslices configurations. */
2880 	lrc_configure_all_contexts(stream, NULL, NULL);
2881 
2882 	/* Make sure we disable noa to save power. */
2883 	intel_uncore_rmw(uncore, RPM_CONFIG1, GEN10_GT_NOA_ENABLE, 0);
2884 }
2885 
2886 static void gen12_disable_metric_set(struct i915_perf_stream *stream)
2887 {
2888 	struct intel_uncore *uncore = stream->uncore;
2889 	struct drm_i915_private *i915 = stream->perf->i915;
2890 	u32 sqcnt1;
2891 
2892 	/*
2893 	 * Wa_1508761755:xehpsdv, dg2
2894 	 * Enable thread stall DOP gating and EU DOP gating.
2895 	 */
2896 	if (IS_XEHPSDV(i915) || IS_DG2(i915)) {
2897 		intel_gt_mcr_multicast_write(uncore->gt, GEN8_ROW_CHICKEN,
2898 					     _MASKED_BIT_DISABLE(STALL_DOP_GATING_DISABLE));
2899 		intel_uncore_write(uncore, GEN7_ROW_CHICKEN2,
2900 				   _MASKED_BIT_DISABLE(GEN12_DISABLE_DOP_GATING));
2901 	}
2902 
2903 	/* Reset all contexts' slices/subslices configurations. */
2904 	gen12_configure_all_contexts(stream, NULL, NULL);
2905 
2906 	/* disable the context save/restore or OAR counters */
2907 	if (stream->ctx)
2908 		gen12_configure_oar_context(stream, NULL);
2909 
2910 	/* Make sure we disable noa to save power. */
2911 	intel_uncore_rmw(uncore, RPM_CONFIG1, GEN10_GT_NOA_ENABLE, 0);
2912 
2913 	sqcnt1 = GEN12_SQCNT1_PMON_ENABLE |
2914 		 (HAS_OA_BPC_REPORTING(i915) ? GEN12_SQCNT1_OABPC : 0);
2915 
2916 	/* Reset PMON Enable to save power. */
2917 	intel_uncore_rmw(uncore, GEN12_SQCNT1, sqcnt1, 0);
2918 }
2919 
2920 static void gen7_oa_enable(struct i915_perf_stream *stream)
2921 {
2922 	struct intel_uncore *uncore = stream->uncore;
2923 	struct i915_gem_context *ctx = stream->ctx;
2924 	u32 ctx_id = stream->specific_ctx_id;
2925 	bool periodic = stream->periodic;
2926 	u32 period_exponent = stream->period_exponent;
2927 	u32 report_format = stream->oa_buffer.format->format;
2928 
2929 	/*
2930 	 * Reset buf pointers so we don't forward reports from before now.
2931 	 *
2932 	 * Think carefully if considering trying to avoid this, since it
2933 	 * also ensures status flags and the buffer itself are cleared
2934 	 * in error paths, and we have checks for invalid reports based
2935 	 * on the assumption that certain fields are written to zeroed
2936 	 * memory which this helps maintains.
2937 	 */
2938 	gen7_init_oa_buffer(stream);
2939 
2940 	intel_uncore_write(uncore, GEN7_OACONTROL,
2941 			   (ctx_id & GEN7_OACONTROL_CTX_MASK) |
2942 			   (period_exponent <<
2943 			    GEN7_OACONTROL_TIMER_PERIOD_SHIFT) |
2944 			   (periodic ? GEN7_OACONTROL_TIMER_ENABLE : 0) |
2945 			   (report_format << GEN7_OACONTROL_FORMAT_SHIFT) |
2946 			   (ctx ? GEN7_OACONTROL_PER_CTX_ENABLE : 0) |
2947 			   GEN7_OACONTROL_ENABLE);
2948 }
2949 
2950 static void gen8_oa_enable(struct i915_perf_stream *stream)
2951 {
2952 	struct intel_uncore *uncore = stream->uncore;
2953 	u32 report_format = stream->oa_buffer.format->format;
2954 
2955 	/*
2956 	 * Reset buf pointers so we don't forward reports from before now.
2957 	 *
2958 	 * Think carefully if considering trying to avoid this, since it
2959 	 * also ensures status flags and the buffer itself are cleared
2960 	 * in error paths, and we have checks for invalid reports based
2961 	 * on the assumption that certain fields are written to zeroed
2962 	 * memory which this helps maintains.
2963 	 */
2964 	gen8_init_oa_buffer(stream);
2965 
2966 	/*
2967 	 * Note: we don't rely on the hardware to perform single context
2968 	 * filtering and instead filter on the cpu based on the context-id
2969 	 * field of reports
2970 	 */
2971 	intel_uncore_write(uncore, GEN8_OACONTROL,
2972 			   (report_format << GEN8_OA_REPORT_FORMAT_SHIFT) |
2973 			   GEN8_OA_COUNTER_ENABLE);
2974 }
2975 
2976 static void gen12_oa_enable(struct i915_perf_stream *stream)
2977 {
2978 	struct intel_uncore *uncore = stream->uncore;
2979 	u32 report_format = stream->oa_buffer.format->format;
2980 
2981 	/*
2982 	 * If we don't want OA reports from the OA buffer, then we don't even
2983 	 * need to program the OAG unit.
2984 	 */
2985 	if (!(stream->sample_flags & SAMPLE_OA_REPORT))
2986 		return;
2987 
2988 	gen12_init_oa_buffer(stream);
2989 
2990 	intel_uncore_write(uncore, GEN12_OAG_OACONTROL,
2991 			   (report_format << GEN12_OAG_OACONTROL_OA_COUNTER_FORMAT_SHIFT) |
2992 			   GEN12_OAG_OACONTROL_OA_COUNTER_ENABLE);
2993 }
2994 
2995 /**
2996  * i915_oa_stream_enable - handle `I915_PERF_IOCTL_ENABLE` for OA stream
2997  * @stream: An i915 perf stream opened for OA metrics
2998  *
2999  * [Re]enables hardware periodic sampling according to the period configured
3000  * when opening the stream. This also starts a hrtimer that will periodically
3001  * check for data in the circular OA buffer for notifying userspace (e.g.
3002  * during a read() or poll()).
3003  */
3004 static void i915_oa_stream_enable(struct i915_perf_stream *stream)
3005 {
3006 	stream->pollin = false;
3007 
3008 	stream->perf->ops.oa_enable(stream);
3009 
3010 	if (stream->sample_flags & SAMPLE_OA_REPORT)
3011 		hrtimer_start(&stream->poll_check_timer,
3012 			      ns_to_ktime(stream->poll_oa_period),
3013 			      HRTIMER_MODE_REL_PINNED);
3014 }
3015 
3016 static void gen7_oa_disable(struct i915_perf_stream *stream)
3017 {
3018 	struct intel_uncore *uncore = stream->uncore;
3019 
3020 	intel_uncore_write(uncore, GEN7_OACONTROL, 0);
3021 	if (intel_wait_for_register(uncore,
3022 				    GEN7_OACONTROL, GEN7_OACONTROL_ENABLE, 0,
3023 				    50))
3024 		drm_err(&stream->perf->i915->drm,
3025 			"wait for OA to be disabled timed out\n");
3026 }
3027 
3028 static void gen8_oa_disable(struct i915_perf_stream *stream)
3029 {
3030 	struct intel_uncore *uncore = stream->uncore;
3031 
3032 	intel_uncore_write(uncore, GEN8_OACONTROL, 0);
3033 	if (intel_wait_for_register(uncore,
3034 				    GEN8_OACONTROL, GEN8_OA_COUNTER_ENABLE, 0,
3035 				    50))
3036 		drm_err(&stream->perf->i915->drm,
3037 			"wait for OA to be disabled timed out\n");
3038 }
3039 
3040 static void gen12_oa_disable(struct i915_perf_stream *stream)
3041 {
3042 	struct intel_uncore *uncore = stream->uncore;
3043 
3044 	intel_uncore_write(uncore, GEN12_OAG_OACONTROL, 0);
3045 	if (intel_wait_for_register(uncore,
3046 				    GEN12_OAG_OACONTROL,
3047 				    GEN12_OAG_OACONTROL_OA_COUNTER_ENABLE, 0,
3048 				    50))
3049 		drm_err(&stream->perf->i915->drm,
3050 			"wait for OA to be disabled timed out\n");
3051 
3052 	intel_uncore_write(uncore, GEN12_OA_TLB_INV_CR, 1);
3053 	if (intel_wait_for_register(uncore,
3054 				    GEN12_OA_TLB_INV_CR,
3055 				    1, 0,
3056 				    50))
3057 		drm_err(&stream->perf->i915->drm,
3058 			"wait for OA tlb invalidate timed out\n");
3059 }
3060 
3061 /**
3062  * i915_oa_stream_disable - handle `I915_PERF_IOCTL_DISABLE` for OA stream
3063  * @stream: An i915 perf stream opened for OA metrics
3064  *
3065  * Stops the OA unit from periodically writing counter reports into the
3066  * circular OA buffer. This also stops the hrtimer that periodically checks for
3067  * data in the circular OA buffer, for notifying userspace.
3068  */
3069 static void i915_oa_stream_disable(struct i915_perf_stream *stream)
3070 {
3071 	stream->perf->ops.oa_disable(stream);
3072 
3073 	if (stream->sample_flags & SAMPLE_OA_REPORT)
3074 		hrtimer_cancel(&stream->poll_check_timer);
3075 }
3076 
3077 static const struct i915_perf_stream_ops i915_oa_stream_ops = {
3078 	.destroy = i915_oa_stream_destroy,
3079 	.enable = i915_oa_stream_enable,
3080 	.disable = i915_oa_stream_disable,
3081 	.wait_unlocked = i915_oa_wait_unlocked,
3082 	.poll_wait = i915_oa_poll_wait,
3083 	.read = i915_oa_read,
3084 };
3085 
3086 static int i915_perf_stream_enable_sync(struct i915_perf_stream *stream)
3087 {
3088 	struct i915_active *active;
3089 	int err;
3090 
3091 	active = i915_active_create();
3092 	if (!active)
3093 		return -ENOMEM;
3094 
3095 	err = stream->perf->ops.enable_metric_set(stream, active);
3096 	if (err == 0)
3097 		__i915_active_wait(active, TASK_UNINTERRUPTIBLE);
3098 
3099 	i915_active_put(active);
3100 	return err;
3101 }
3102 
3103 static void
3104 get_default_sseu_config(struct intel_sseu *out_sseu,
3105 			struct intel_engine_cs *engine)
3106 {
3107 	const struct sseu_dev_info *devinfo_sseu = &engine->gt->info.sseu;
3108 
3109 	*out_sseu = intel_sseu_from_device_info(devinfo_sseu);
3110 
3111 	if (GRAPHICS_VER(engine->i915) == 11) {
3112 		/*
3113 		 * We only need subslice count so it doesn't matter which ones
3114 		 * we select - just turn off low bits in the amount of half of
3115 		 * all available subslices per slice.
3116 		 */
3117 		out_sseu->subslice_mask =
3118 			~(~0 << (hweight8(out_sseu->subslice_mask) / 2));
3119 		out_sseu->slice_mask = 0x1;
3120 	}
3121 }
3122 
3123 static int
3124 get_sseu_config(struct intel_sseu *out_sseu,
3125 		struct intel_engine_cs *engine,
3126 		const struct drm_i915_gem_context_param_sseu *drm_sseu)
3127 {
3128 	if (drm_sseu->engine.engine_class != engine->uabi_class ||
3129 	    drm_sseu->engine.engine_instance != engine->uabi_instance)
3130 		return -EINVAL;
3131 
3132 	return i915_gem_user_to_context_sseu(engine->gt, drm_sseu, out_sseu);
3133 }
3134 
3135 /*
3136  * OA timestamp frequency = CS timestamp frequency in most platforms. On some
3137  * platforms OA unit ignores the CTC_SHIFT and the 2 timestamps differ. In such
3138  * cases, return the adjusted CS timestamp frequency to the user.
3139  */
3140 u32 i915_perf_oa_timestamp_frequency(struct drm_i915_private *i915)
3141 {
3142 	/*
3143 	 * Wa_18013179988:dg2
3144 	 * Wa_14015846243:mtl
3145 	 */
3146 	if (IS_DG2(i915) || IS_METEORLAKE(i915)) {
3147 		intel_wakeref_t wakeref;
3148 		u32 reg, shift;
3149 
3150 		with_intel_runtime_pm(to_gt(i915)->uncore->rpm, wakeref)
3151 			reg = intel_uncore_read(to_gt(i915)->uncore, RPM_CONFIG0);
3152 
3153 		shift = REG_FIELD_GET(GEN10_RPM_CONFIG0_CTC_SHIFT_PARAMETER_MASK,
3154 				      reg);
3155 
3156 		return to_gt(i915)->clock_frequency << (3 - shift);
3157 	}
3158 
3159 	return to_gt(i915)->clock_frequency;
3160 }
3161 
3162 /**
3163  * i915_oa_stream_init - validate combined props for OA stream and init
3164  * @stream: An i915 perf stream
3165  * @param: The open parameters passed to `DRM_I915_PERF_OPEN`
3166  * @props: The property state that configures stream (individually validated)
3167  *
3168  * While read_properties_unlocked() validates properties in isolation it
3169  * doesn't ensure that the combination necessarily makes sense.
3170  *
3171  * At this point it has been determined that userspace wants a stream of
3172  * OA metrics, but still we need to further validate the combined
3173  * properties are OK.
3174  *
3175  * If the configuration makes sense then we can allocate memory for
3176  * a circular OA buffer and apply the requested metric set configuration.
3177  *
3178  * Returns: zero on success or a negative error code.
3179  */
3180 static int i915_oa_stream_init(struct i915_perf_stream *stream,
3181 			       struct drm_i915_perf_open_param *param,
3182 			       struct perf_open_properties *props)
3183 {
3184 	struct drm_i915_private *i915 = stream->perf->i915;
3185 	struct i915_perf *perf = stream->perf;
3186 	struct intel_gt *gt;
3187 	int ret;
3188 
3189 	if (!props->engine) {
3190 		drm_dbg(&stream->perf->i915->drm,
3191 			"OA engine not specified\n");
3192 		return -EINVAL;
3193 	}
3194 	gt = props->engine->gt;
3195 
3196 	/*
3197 	 * If the sysfs metrics/ directory wasn't registered for some
3198 	 * reason then don't let userspace try their luck with config
3199 	 * IDs
3200 	 */
3201 	if (!perf->metrics_kobj) {
3202 		drm_dbg(&stream->perf->i915->drm,
3203 			"OA metrics weren't advertised via sysfs\n");
3204 		return -EINVAL;
3205 	}
3206 
3207 	if (!(props->sample_flags & SAMPLE_OA_REPORT) &&
3208 	    (GRAPHICS_VER(perf->i915) < 12 || !stream->ctx)) {
3209 		drm_dbg(&stream->perf->i915->drm,
3210 			"Only OA report sampling supported\n");
3211 		return -EINVAL;
3212 	}
3213 
3214 	if (!perf->ops.enable_metric_set) {
3215 		drm_dbg(&stream->perf->i915->drm,
3216 			"OA unit not supported\n");
3217 		return -ENODEV;
3218 	}
3219 
3220 	/*
3221 	 * To avoid the complexity of having to accurately filter
3222 	 * counter reports and marshal to the appropriate client
3223 	 * we currently only allow exclusive access
3224 	 */
3225 	if (gt->perf.exclusive_stream) {
3226 		drm_dbg(&stream->perf->i915->drm,
3227 			"OA unit already in use\n");
3228 		return -EBUSY;
3229 	}
3230 
3231 	if (!props->oa_format) {
3232 		drm_dbg(&stream->perf->i915->drm,
3233 			"OA report format not specified\n");
3234 		return -EINVAL;
3235 	}
3236 
3237 	stream->engine = props->engine;
3238 	stream->uncore = stream->engine->gt->uncore;
3239 
3240 	stream->sample_size = sizeof(struct drm_i915_perf_record_header);
3241 
3242 	stream->oa_buffer.format = &perf->oa_formats[props->oa_format];
3243 	if (drm_WARN_ON(&i915->drm, stream->oa_buffer.format->size == 0))
3244 		return -EINVAL;
3245 
3246 	stream->sample_flags = props->sample_flags;
3247 	stream->sample_size += stream->oa_buffer.format->size;
3248 
3249 	stream->hold_preemption = props->hold_preemption;
3250 
3251 	stream->periodic = props->oa_periodic;
3252 	if (stream->periodic)
3253 		stream->period_exponent = props->oa_period_exponent;
3254 
3255 	if (stream->ctx) {
3256 		ret = oa_get_render_ctx_id(stream);
3257 		if (ret) {
3258 			drm_dbg(&stream->perf->i915->drm,
3259 				"Invalid context id to filter with\n");
3260 			return ret;
3261 		}
3262 	}
3263 
3264 	ret = alloc_noa_wait(stream);
3265 	if (ret) {
3266 		drm_dbg(&stream->perf->i915->drm,
3267 			"Unable to allocate NOA wait batch buffer\n");
3268 		goto err_noa_wait_alloc;
3269 	}
3270 
3271 	stream->oa_config = i915_perf_get_oa_config(perf, props->metrics_set);
3272 	if (!stream->oa_config) {
3273 		drm_dbg(&stream->perf->i915->drm,
3274 			"Invalid OA config id=%i\n", props->metrics_set);
3275 		ret = -EINVAL;
3276 		goto err_config;
3277 	}
3278 
3279 	/* PRM - observability performance counters:
3280 	 *
3281 	 *   OACONTROL, performance counter enable, note:
3282 	 *
3283 	 *   "When this bit is set, in order to have coherent counts,
3284 	 *   RC6 power state and trunk clock gating must be disabled.
3285 	 *   This can be achieved by programming MMIO registers as
3286 	 *   0xA094=0 and 0xA090[31]=1"
3287 	 *
3288 	 *   In our case we are expecting that taking pm + FORCEWAKE
3289 	 *   references will effectively disable RC6.
3290 	 */
3291 	intel_engine_pm_get(stream->engine);
3292 	intel_uncore_forcewake_get(stream->uncore, FORCEWAKE_ALL);
3293 
3294 	/*
3295 	 * Wa_16011777198:dg2: GuC resets render as part of the Wa. This causes
3296 	 * OA to lose the configuration state. Prevent this by overriding GUCRC
3297 	 * mode.
3298 	 */
3299 	if (intel_uc_uses_guc_rc(&gt->uc) &&
3300 	    (IS_DG2_GRAPHICS_STEP(gt->i915, G10, STEP_A0, STEP_C0) ||
3301 	     IS_DG2_GRAPHICS_STEP(gt->i915, G11, STEP_A0, STEP_B0))) {
3302 		ret = intel_guc_slpc_override_gucrc_mode(&gt->uc.guc.slpc,
3303 							 SLPC_GUCRC_MODE_GUCRC_NO_RC6);
3304 		if (ret) {
3305 			drm_dbg(&stream->perf->i915->drm,
3306 				"Unable to override gucrc mode\n");
3307 			goto err_config;
3308 		}
3309 	}
3310 
3311 	ret = alloc_oa_buffer(stream);
3312 	if (ret)
3313 		goto err_oa_buf_alloc;
3314 
3315 	stream->ops = &i915_oa_stream_ops;
3316 
3317 	stream->engine->gt->perf.sseu = props->sseu;
3318 	WRITE_ONCE(gt->perf.exclusive_stream, stream);
3319 
3320 	ret = i915_perf_stream_enable_sync(stream);
3321 	if (ret) {
3322 		drm_dbg(&stream->perf->i915->drm,
3323 			"Unable to enable metric set\n");
3324 		goto err_enable;
3325 	}
3326 
3327 	drm_dbg(&stream->perf->i915->drm,
3328 		"opening stream oa config uuid=%s\n",
3329 		  stream->oa_config->uuid);
3330 
3331 	hrtimer_init(&stream->poll_check_timer,
3332 		     CLOCK_MONOTONIC, HRTIMER_MODE_REL);
3333 	stream->poll_check_timer.function = oa_poll_check_timer_cb;
3334 	init_waitqueue_head(&stream->poll_wq);
3335 	spin_lock_init(&stream->oa_buffer.ptr_lock);
3336 	mutex_init(&stream->lock);
3337 
3338 	return 0;
3339 
3340 err_enable:
3341 	WRITE_ONCE(gt->perf.exclusive_stream, NULL);
3342 	perf->ops.disable_metric_set(stream);
3343 
3344 	free_oa_buffer(stream);
3345 
3346 err_oa_buf_alloc:
3347 	free_oa_configs(stream);
3348 
3349 	intel_uncore_forcewake_put(stream->uncore, FORCEWAKE_ALL);
3350 	intel_engine_pm_put(stream->engine);
3351 
3352 err_config:
3353 	free_noa_wait(stream);
3354 
3355 err_noa_wait_alloc:
3356 	if (stream->ctx)
3357 		oa_put_render_ctx_id(stream);
3358 
3359 	return ret;
3360 }
3361 
3362 void i915_oa_init_reg_state(const struct intel_context *ce,
3363 			    const struct intel_engine_cs *engine)
3364 {
3365 	struct i915_perf_stream *stream;
3366 
3367 	if (engine->class != RENDER_CLASS)
3368 		return;
3369 
3370 	/* perf.exclusive_stream serialised by lrc_configure_all_contexts() */
3371 	stream = READ_ONCE(engine->gt->perf.exclusive_stream);
3372 	if (stream && GRAPHICS_VER(stream->perf->i915) < 12)
3373 		gen8_update_reg_state_unlocked(ce, stream);
3374 }
3375 
3376 /**
3377  * i915_perf_read - handles read() FOP for i915 perf stream FDs
3378  * @file: An i915 perf stream file
3379  * @buf: destination buffer given by userspace
3380  * @count: the number of bytes userspace wants to read
3381  * @ppos: (inout) file seek position (unused)
3382  *
3383  * The entry point for handling a read() on a stream file descriptor from
3384  * userspace. Most of the work is left to the i915_perf_read_locked() and
3385  * &i915_perf_stream_ops->read but to save having stream implementations (of
3386  * which we might have multiple later) we handle blocking read here.
3387  *
3388  * We can also consistently treat trying to read from a disabled stream
3389  * as an IO error so implementations can assume the stream is enabled
3390  * while reading.
3391  *
3392  * Returns: The number of bytes copied or a negative error code on failure.
3393  */
3394 static ssize_t i915_perf_read(struct file *file,
3395 			      char __user *buf,
3396 			      size_t count,
3397 			      loff_t *ppos)
3398 {
3399 	struct i915_perf_stream *stream = file->private_data;
3400 	size_t offset = 0;
3401 	int ret;
3402 
3403 	/* To ensure it's handled consistently we simply treat all reads of a
3404 	 * disabled stream as an error. In particular it might otherwise lead
3405 	 * to a deadlock for blocking file descriptors...
3406 	 */
3407 	if (!stream->enabled || !(stream->sample_flags & SAMPLE_OA_REPORT))
3408 		return -EIO;
3409 
3410 	if (!(file->f_flags & O_NONBLOCK)) {
3411 		/* There's the small chance of false positives from
3412 		 * stream->ops->wait_unlocked.
3413 		 *
3414 		 * E.g. with single context filtering since we only wait until
3415 		 * oabuffer has >= 1 report we don't immediately know whether
3416 		 * any reports really belong to the current context
3417 		 */
3418 		do {
3419 			ret = stream->ops->wait_unlocked(stream);
3420 			if (ret)
3421 				return ret;
3422 
3423 			mutex_lock(&stream->lock);
3424 			ret = stream->ops->read(stream, buf, count, &offset);
3425 			mutex_unlock(&stream->lock);
3426 		} while (!offset && !ret);
3427 	} else {
3428 		mutex_lock(&stream->lock);
3429 		ret = stream->ops->read(stream, buf, count, &offset);
3430 		mutex_unlock(&stream->lock);
3431 	}
3432 
3433 	/* We allow the poll checking to sometimes report false positive EPOLLIN
3434 	 * events where we might actually report EAGAIN on read() if there's
3435 	 * not really any data available. In this situation though we don't
3436 	 * want to enter a busy loop between poll() reporting a EPOLLIN event
3437 	 * and read() returning -EAGAIN. Clearing the oa.pollin state here
3438 	 * effectively ensures we back off until the next hrtimer callback
3439 	 * before reporting another EPOLLIN event.
3440 	 * The exception to this is if ops->read() returned -ENOSPC which means
3441 	 * that more OA data is available than could fit in the user provided
3442 	 * buffer. In this case we want the next poll() call to not block.
3443 	 */
3444 	if (ret != -ENOSPC)
3445 		stream->pollin = false;
3446 
3447 	/* Possible values for ret are 0, -EFAULT, -ENOSPC, -EIO, ... */
3448 	return offset ?: (ret ?: -EAGAIN);
3449 }
3450 
3451 static enum hrtimer_restart oa_poll_check_timer_cb(struct hrtimer *hrtimer)
3452 {
3453 	struct i915_perf_stream *stream =
3454 		container_of(hrtimer, typeof(*stream), poll_check_timer);
3455 
3456 	if (oa_buffer_check_unlocked(stream)) {
3457 		stream->pollin = true;
3458 		wake_up(&stream->poll_wq);
3459 	}
3460 
3461 	hrtimer_forward_now(hrtimer,
3462 			    ns_to_ktime(stream->poll_oa_period));
3463 
3464 	return HRTIMER_RESTART;
3465 }
3466 
3467 /**
3468  * i915_perf_poll_locked - poll_wait() with a suitable wait queue for stream
3469  * @stream: An i915 perf stream
3470  * @file: An i915 perf stream file
3471  * @wait: poll() state table
3472  *
3473  * For handling userspace polling on an i915 perf stream, this calls through to
3474  * &i915_perf_stream_ops->poll_wait to call poll_wait() with a wait queue that
3475  * will be woken for new stream data.
3476  *
3477  * Returns: any poll events that are ready without sleeping
3478  */
3479 static __poll_t i915_perf_poll_locked(struct i915_perf_stream *stream,
3480 				      struct file *file,
3481 				      poll_table *wait)
3482 {
3483 	__poll_t events = 0;
3484 
3485 	stream->ops->poll_wait(stream, file, wait);
3486 
3487 	/* Note: we don't explicitly check whether there's something to read
3488 	 * here since this path may be very hot depending on what else
3489 	 * userspace is polling, or on the timeout in use. We rely solely on
3490 	 * the hrtimer/oa_poll_check_timer_cb to notify us when there are
3491 	 * samples to read.
3492 	 */
3493 	if (stream->pollin)
3494 		events |= EPOLLIN;
3495 
3496 	return events;
3497 }
3498 
3499 /**
3500  * i915_perf_poll - call poll_wait() with a suitable wait queue for stream
3501  * @file: An i915 perf stream file
3502  * @wait: poll() state table
3503  *
3504  * For handling userspace polling on an i915 perf stream, this ensures
3505  * poll_wait() gets called with a wait queue that will be woken for new stream
3506  * data.
3507  *
3508  * Note: Implementation deferred to i915_perf_poll_locked()
3509  *
3510  * Returns: any poll events that are ready without sleeping
3511  */
3512 static __poll_t i915_perf_poll(struct file *file, poll_table *wait)
3513 {
3514 	struct i915_perf_stream *stream = file->private_data;
3515 	__poll_t ret;
3516 
3517 	mutex_lock(&stream->lock);
3518 	ret = i915_perf_poll_locked(stream, file, wait);
3519 	mutex_unlock(&stream->lock);
3520 
3521 	return ret;
3522 }
3523 
3524 /**
3525  * i915_perf_enable_locked - handle `I915_PERF_IOCTL_ENABLE` ioctl
3526  * @stream: A disabled i915 perf stream
3527  *
3528  * [Re]enables the associated capture of data for this stream.
3529  *
3530  * If a stream was previously enabled then there's currently no intention
3531  * to provide userspace any guarantee about the preservation of previously
3532  * buffered data.
3533  */
3534 static void i915_perf_enable_locked(struct i915_perf_stream *stream)
3535 {
3536 	if (stream->enabled)
3537 		return;
3538 
3539 	/* Allow stream->ops->enable() to refer to this */
3540 	stream->enabled = true;
3541 
3542 	if (stream->ops->enable)
3543 		stream->ops->enable(stream);
3544 
3545 	if (stream->hold_preemption)
3546 		intel_context_set_nopreempt(stream->pinned_ctx);
3547 }
3548 
3549 /**
3550  * i915_perf_disable_locked - handle `I915_PERF_IOCTL_DISABLE` ioctl
3551  * @stream: An enabled i915 perf stream
3552  *
3553  * Disables the associated capture of data for this stream.
3554  *
3555  * The intention is that disabling an re-enabling a stream will ideally be
3556  * cheaper than destroying and re-opening a stream with the same configuration,
3557  * though there are no formal guarantees about what state or buffered data
3558  * must be retained between disabling and re-enabling a stream.
3559  *
3560  * Note: while a stream is disabled it's considered an error for userspace
3561  * to attempt to read from the stream (-EIO).
3562  */
3563 static void i915_perf_disable_locked(struct i915_perf_stream *stream)
3564 {
3565 	if (!stream->enabled)
3566 		return;
3567 
3568 	/* Allow stream->ops->disable() to refer to this */
3569 	stream->enabled = false;
3570 
3571 	if (stream->hold_preemption)
3572 		intel_context_clear_nopreempt(stream->pinned_ctx);
3573 
3574 	if (stream->ops->disable)
3575 		stream->ops->disable(stream);
3576 }
3577 
3578 static long i915_perf_config_locked(struct i915_perf_stream *stream,
3579 				    unsigned long metrics_set)
3580 {
3581 	struct i915_oa_config *config;
3582 	long ret = stream->oa_config->id;
3583 
3584 	config = i915_perf_get_oa_config(stream->perf, metrics_set);
3585 	if (!config)
3586 		return -EINVAL;
3587 
3588 	if (config != stream->oa_config) {
3589 		int err;
3590 
3591 		/*
3592 		 * If OA is bound to a specific context, emit the
3593 		 * reconfiguration inline from that context. The update
3594 		 * will then be ordered with respect to submission on that
3595 		 * context.
3596 		 *
3597 		 * When set globally, we use a low priority kernel context,
3598 		 * so it will effectively take effect when idle.
3599 		 */
3600 		err = emit_oa_config(stream, config, oa_context(stream), NULL);
3601 		if (!err)
3602 			config = xchg(&stream->oa_config, config);
3603 		else
3604 			ret = err;
3605 	}
3606 
3607 	i915_oa_config_put(config);
3608 
3609 	return ret;
3610 }
3611 
3612 /**
3613  * i915_perf_ioctl_locked - support ioctl() usage with i915 perf stream FDs
3614  * @stream: An i915 perf stream
3615  * @cmd: the ioctl request
3616  * @arg: the ioctl data
3617  *
3618  * Returns: zero on success or a negative error code. Returns -EINVAL for
3619  * an unknown ioctl request.
3620  */
3621 static long i915_perf_ioctl_locked(struct i915_perf_stream *stream,
3622 				   unsigned int cmd,
3623 				   unsigned long arg)
3624 {
3625 	switch (cmd) {
3626 	case I915_PERF_IOCTL_ENABLE:
3627 		i915_perf_enable_locked(stream);
3628 		return 0;
3629 	case I915_PERF_IOCTL_DISABLE:
3630 		i915_perf_disable_locked(stream);
3631 		return 0;
3632 	case I915_PERF_IOCTL_CONFIG:
3633 		return i915_perf_config_locked(stream, arg);
3634 	}
3635 
3636 	return -EINVAL;
3637 }
3638 
3639 /**
3640  * i915_perf_ioctl - support ioctl() usage with i915 perf stream FDs
3641  * @file: An i915 perf stream file
3642  * @cmd: the ioctl request
3643  * @arg: the ioctl data
3644  *
3645  * Implementation deferred to i915_perf_ioctl_locked().
3646  *
3647  * Returns: zero on success or a negative error code. Returns -EINVAL for
3648  * an unknown ioctl request.
3649  */
3650 static long i915_perf_ioctl(struct file *file,
3651 			    unsigned int cmd,
3652 			    unsigned long arg)
3653 {
3654 	struct i915_perf_stream *stream = file->private_data;
3655 	long ret;
3656 
3657 	mutex_lock(&stream->lock);
3658 	ret = i915_perf_ioctl_locked(stream, cmd, arg);
3659 	mutex_unlock(&stream->lock);
3660 
3661 	return ret;
3662 }
3663 
3664 /**
3665  * i915_perf_destroy_locked - destroy an i915 perf stream
3666  * @stream: An i915 perf stream
3667  *
3668  * Frees all resources associated with the given i915 perf @stream, disabling
3669  * any associated data capture in the process.
3670  *
3671  * Note: The &gt->perf.lock mutex has been taken to serialize
3672  * with any non-file-operation driver hooks.
3673  */
3674 static void i915_perf_destroy_locked(struct i915_perf_stream *stream)
3675 {
3676 	if (stream->enabled)
3677 		i915_perf_disable_locked(stream);
3678 
3679 	if (stream->ops->destroy)
3680 		stream->ops->destroy(stream);
3681 
3682 	if (stream->ctx)
3683 		i915_gem_context_put(stream->ctx);
3684 
3685 	kfree(stream);
3686 }
3687 
3688 /**
3689  * i915_perf_release - handles userspace close() of a stream file
3690  * @inode: anonymous inode associated with file
3691  * @file: An i915 perf stream file
3692  *
3693  * Cleans up any resources associated with an open i915 perf stream file.
3694  *
3695  * NB: close() can't really fail from the userspace point of view.
3696  *
3697  * Returns: zero on success or a negative error code.
3698  */
3699 static int i915_perf_release(struct inode *inode, struct file *file)
3700 {
3701 	struct i915_perf_stream *stream = file->private_data;
3702 	struct i915_perf *perf = stream->perf;
3703 	struct intel_gt *gt = stream->engine->gt;
3704 
3705 	/*
3706 	 * Within this call, we know that the fd is being closed and we have no
3707 	 * other user of stream->lock. Use the perf lock to destroy the stream
3708 	 * here.
3709 	 */
3710 	mutex_lock(&gt->perf.lock);
3711 	i915_perf_destroy_locked(stream);
3712 	mutex_unlock(&gt->perf.lock);
3713 
3714 	/* Release the reference the perf stream kept on the driver. */
3715 	drm_dev_put(&perf->i915->drm);
3716 
3717 	return 0;
3718 }
3719 
3720 
3721 static const struct file_operations fops = {
3722 	.owner		= THIS_MODULE,
3723 	.llseek		= no_llseek,
3724 	.release	= i915_perf_release,
3725 	.poll		= i915_perf_poll,
3726 	.read		= i915_perf_read,
3727 	.unlocked_ioctl	= i915_perf_ioctl,
3728 	/* Our ioctl have no arguments, so it's safe to use the same function
3729 	 * to handle 32bits compatibility.
3730 	 */
3731 	.compat_ioctl   = i915_perf_ioctl,
3732 };
3733 
3734 
3735 /**
3736  * i915_perf_open_ioctl_locked - DRM ioctl() for userspace to open a stream FD
3737  * @perf: i915 perf instance
3738  * @param: The open parameters passed to 'DRM_I915_PERF_OPEN`
3739  * @props: individually validated u64 property value pairs
3740  * @file: drm file
3741  *
3742  * See i915_perf_ioctl_open() for interface details.
3743  *
3744  * Implements further stream config validation and stream initialization on
3745  * behalf of i915_perf_open_ioctl() with the &gt->perf.lock mutex
3746  * taken to serialize with any non-file-operation driver hooks.
3747  *
3748  * Note: at this point the @props have only been validated in isolation and
3749  * it's still necessary to validate that the combination of properties makes
3750  * sense.
3751  *
3752  * In the case where userspace is interested in OA unit metrics then further
3753  * config validation and stream initialization details will be handled by
3754  * i915_oa_stream_init(). The code here should only validate config state that
3755  * will be relevant to all stream types / backends.
3756  *
3757  * Returns: zero on success or a negative error code.
3758  */
3759 static int
3760 i915_perf_open_ioctl_locked(struct i915_perf *perf,
3761 			    struct drm_i915_perf_open_param *param,
3762 			    struct perf_open_properties *props,
3763 			    struct drm_file *file)
3764 {
3765 	struct i915_gem_context *specific_ctx = NULL;
3766 	struct i915_perf_stream *stream = NULL;
3767 	unsigned long f_flags = 0;
3768 	bool privileged_op = true;
3769 	int stream_fd;
3770 	int ret;
3771 
3772 	if (props->single_context) {
3773 		u32 ctx_handle = props->ctx_handle;
3774 		struct drm_i915_file_private *file_priv = file->driver_priv;
3775 
3776 		specific_ctx = i915_gem_context_lookup(file_priv, ctx_handle);
3777 		if (IS_ERR(specific_ctx)) {
3778 			drm_dbg(&perf->i915->drm,
3779 				"Failed to look up context with ID %u for opening perf stream\n",
3780 				  ctx_handle);
3781 			ret = PTR_ERR(specific_ctx);
3782 			goto err;
3783 		}
3784 	}
3785 
3786 	/*
3787 	 * On Haswell the OA unit supports clock gating off for a specific
3788 	 * context and in this mode there's no visibility of metrics for the
3789 	 * rest of the system, which we consider acceptable for a
3790 	 * non-privileged client.
3791 	 *
3792 	 * For Gen8->11 the OA unit no longer supports clock gating off for a
3793 	 * specific context and the kernel can't securely stop the counters
3794 	 * from updating as system-wide / global values. Even though we can
3795 	 * filter reports based on the included context ID we can't block
3796 	 * clients from seeing the raw / global counter values via
3797 	 * MI_REPORT_PERF_COUNT commands and so consider it a privileged op to
3798 	 * enable the OA unit by default.
3799 	 *
3800 	 * For Gen12+ we gain a new OAR unit that only monitors the RCS on a
3801 	 * per context basis. So we can relax requirements there if the user
3802 	 * doesn't request global stream access (i.e. query based sampling
3803 	 * using MI_RECORD_PERF_COUNT.
3804 	 */
3805 	if (IS_HASWELL(perf->i915) && specific_ctx)
3806 		privileged_op = false;
3807 	else if (GRAPHICS_VER(perf->i915) == 12 && specific_ctx &&
3808 		 (props->sample_flags & SAMPLE_OA_REPORT) == 0)
3809 		privileged_op = false;
3810 
3811 	if (props->hold_preemption) {
3812 		if (!props->single_context) {
3813 			drm_dbg(&perf->i915->drm,
3814 				"preemption disable with no context\n");
3815 			ret = -EINVAL;
3816 			goto err;
3817 		}
3818 		privileged_op = true;
3819 	}
3820 
3821 	/*
3822 	 * Asking for SSEU configuration is a priviliged operation.
3823 	 */
3824 	if (props->has_sseu)
3825 		privileged_op = true;
3826 	else
3827 		get_default_sseu_config(&props->sseu, props->engine);
3828 
3829 	/* Similar to perf's kernel.perf_paranoid_cpu sysctl option
3830 	 * we check a dev.i915.perf_stream_paranoid sysctl option
3831 	 * to determine if it's ok to access system wide OA counters
3832 	 * without CAP_PERFMON or CAP_SYS_ADMIN privileges.
3833 	 */
3834 	if (privileged_op &&
3835 	    i915_perf_stream_paranoid && !perfmon_capable()) {
3836 		drm_dbg(&perf->i915->drm,
3837 			"Insufficient privileges to open i915 perf stream\n");
3838 		ret = -EACCES;
3839 		goto err_ctx;
3840 	}
3841 
3842 	stream = kzalloc(sizeof(*stream), GFP_KERNEL);
3843 	if (!stream) {
3844 		ret = -ENOMEM;
3845 		goto err_ctx;
3846 	}
3847 
3848 	stream->perf = perf;
3849 	stream->ctx = specific_ctx;
3850 	stream->poll_oa_period = props->poll_oa_period;
3851 
3852 	ret = i915_oa_stream_init(stream, param, props);
3853 	if (ret)
3854 		goto err_alloc;
3855 
3856 	/* we avoid simply assigning stream->sample_flags = props->sample_flags
3857 	 * to have _stream_init check the combination of sample flags more
3858 	 * thoroughly, but still this is the expected result at this point.
3859 	 */
3860 	if (WARN_ON(stream->sample_flags != props->sample_flags)) {
3861 		ret = -ENODEV;
3862 		goto err_flags;
3863 	}
3864 
3865 	if (param->flags & I915_PERF_FLAG_FD_CLOEXEC)
3866 		f_flags |= O_CLOEXEC;
3867 	if (param->flags & I915_PERF_FLAG_FD_NONBLOCK)
3868 		f_flags |= O_NONBLOCK;
3869 
3870 	stream_fd = anon_inode_getfd("[i915_perf]", &fops, stream, f_flags);
3871 	if (stream_fd < 0) {
3872 		ret = stream_fd;
3873 		goto err_flags;
3874 	}
3875 
3876 	if (!(param->flags & I915_PERF_FLAG_DISABLED))
3877 		i915_perf_enable_locked(stream);
3878 
3879 	/* Take a reference on the driver that will be kept with stream_fd
3880 	 * until its release.
3881 	 */
3882 	drm_dev_get(&perf->i915->drm);
3883 
3884 	return stream_fd;
3885 
3886 err_flags:
3887 	if (stream->ops->destroy)
3888 		stream->ops->destroy(stream);
3889 err_alloc:
3890 	kfree(stream);
3891 err_ctx:
3892 	if (specific_ctx)
3893 		i915_gem_context_put(specific_ctx);
3894 err:
3895 	return ret;
3896 }
3897 
3898 static u64 oa_exponent_to_ns(struct i915_perf *perf, int exponent)
3899 {
3900 	u64 nom = (2ULL << exponent) * NSEC_PER_SEC;
3901 	u32 den = i915_perf_oa_timestamp_frequency(perf->i915);
3902 
3903 	return div_u64(nom + den - 1, den);
3904 }
3905 
3906 static __always_inline bool
3907 oa_format_valid(struct i915_perf *perf, enum drm_i915_oa_format format)
3908 {
3909 	return test_bit(format, perf->format_mask);
3910 }
3911 
3912 static __always_inline void
3913 oa_format_add(struct i915_perf *perf, enum drm_i915_oa_format format)
3914 {
3915 	__set_bit(format, perf->format_mask);
3916 }
3917 
3918 /**
3919  * read_properties_unlocked - validate + copy userspace stream open properties
3920  * @perf: i915 perf instance
3921  * @uprops: The array of u64 key value pairs given by userspace
3922  * @n_props: The number of key value pairs expected in @uprops
3923  * @props: The stream configuration built up while validating properties
3924  *
3925  * Note this function only validates properties in isolation it doesn't
3926  * validate that the combination of properties makes sense or that all
3927  * properties necessary for a particular kind of stream have been set.
3928  *
3929  * Note that there currently aren't any ordering requirements for properties so
3930  * we shouldn't validate or assume anything about ordering here. This doesn't
3931  * rule out defining new properties with ordering requirements in the future.
3932  */
3933 static int read_properties_unlocked(struct i915_perf *perf,
3934 				    u64 __user *uprops,
3935 				    u32 n_props,
3936 				    struct perf_open_properties *props)
3937 {
3938 	u64 __user *uprop = uprops;
3939 	u32 i;
3940 	int ret;
3941 
3942 	memset(props, 0, sizeof(struct perf_open_properties));
3943 	props->poll_oa_period = DEFAULT_POLL_PERIOD_NS;
3944 
3945 	if (!n_props) {
3946 		drm_dbg(&perf->i915->drm,
3947 			"No i915 perf properties given\n");
3948 		return -EINVAL;
3949 	}
3950 
3951 	/* At the moment we only support using i915-perf on the RCS. */
3952 	props->engine = intel_engine_lookup_user(perf->i915,
3953 						 I915_ENGINE_CLASS_RENDER,
3954 						 0);
3955 	if (!props->engine) {
3956 		drm_dbg(&perf->i915->drm,
3957 			"No RENDER-capable engines\n");
3958 		return -EINVAL;
3959 	}
3960 
3961 	/* Considering that ID = 0 is reserved and assuming that we don't
3962 	 * (currently) expect any configurations to ever specify duplicate
3963 	 * values for a particular property ID then the last _PROP_MAX value is
3964 	 * one greater than the maximum number of properties we expect to get
3965 	 * from userspace.
3966 	 */
3967 	if (n_props >= DRM_I915_PERF_PROP_MAX) {
3968 		drm_dbg(&perf->i915->drm,
3969 			"More i915 perf properties specified than exist\n");
3970 		return -EINVAL;
3971 	}
3972 
3973 	for (i = 0; i < n_props; i++) {
3974 		u64 oa_period, oa_freq_hz;
3975 		u64 id, value;
3976 
3977 		ret = get_user(id, uprop);
3978 		if (ret)
3979 			return ret;
3980 
3981 		ret = get_user(value, uprop + 1);
3982 		if (ret)
3983 			return ret;
3984 
3985 		if (id == 0 || id >= DRM_I915_PERF_PROP_MAX) {
3986 			drm_dbg(&perf->i915->drm,
3987 				"Unknown i915 perf property ID\n");
3988 			return -EINVAL;
3989 		}
3990 
3991 		switch ((enum drm_i915_perf_property_id)id) {
3992 		case DRM_I915_PERF_PROP_CTX_HANDLE:
3993 			props->single_context = 1;
3994 			props->ctx_handle = value;
3995 			break;
3996 		case DRM_I915_PERF_PROP_SAMPLE_OA:
3997 			if (value)
3998 				props->sample_flags |= SAMPLE_OA_REPORT;
3999 			break;
4000 		case DRM_I915_PERF_PROP_OA_METRICS_SET:
4001 			if (value == 0) {
4002 				drm_dbg(&perf->i915->drm,
4003 					"Unknown OA metric set ID\n");
4004 				return -EINVAL;
4005 			}
4006 			props->metrics_set = value;
4007 			break;
4008 		case DRM_I915_PERF_PROP_OA_FORMAT:
4009 			if (value == 0 || value >= I915_OA_FORMAT_MAX) {
4010 				drm_dbg(&perf->i915->drm,
4011 					"Out-of-range OA report format %llu\n",
4012 					  value);
4013 				return -EINVAL;
4014 			}
4015 			if (!oa_format_valid(perf, value)) {
4016 				drm_dbg(&perf->i915->drm,
4017 					"Unsupported OA report format %llu\n",
4018 					  value);
4019 				return -EINVAL;
4020 			}
4021 			props->oa_format = value;
4022 			break;
4023 		case DRM_I915_PERF_PROP_OA_EXPONENT:
4024 			if (value > OA_EXPONENT_MAX) {
4025 				drm_dbg(&perf->i915->drm,
4026 					"OA timer exponent too high (> %u)\n",
4027 					 OA_EXPONENT_MAX);
4028 				return -EINVAL;
4029 			}
4030 
4031 			/* Theoretically we can program the OA unit to sample
4032 			 * e.g. every 160ns for HSW, 167ns for BDW/SKL or 104ns
4033 			 * for BXT. We don't allow such high sampling
4034 			 * frequencies by default unless root.
4035 			 */
4036 
4037 			BUILD_BUG_ON(sizeof(oa_period) != 8);
4038 			oa_period = oa_exponent_to_ns(perf, value);
4039 
4040 			/* This check is primarily to ensure that oa_period <=
4041 			 * UINT32_MAX (before passing to do_div which only
4042 			 * accepts a u32 denominator), but we can also skip
4043 			 * checking anything < 1Hz which implicitly can't be
4044 			 * limited via an integer oa_max_sample_rate.
4045 			 */
4046 			if (oa_period <= NSEC_PER_SEC) {
4047 				u64 tmp = NSEC_PER_SEC;
4048 				do_div(tmp, oa_period);
4049 				oa_freq_hz = tmp;
4050 			} else
4051 				oa_freq_hz = 0;
4052 
4053 			if (oa_freq_hz > i915_oa_max_sample_rate && !perfmon_capable()) {
4054 				drm_dbg(&perf->i915->drm,
4055 					"OA exponent would exceed the max sampling frequency (sysctl dev.i915.oa_max_sample_rate) %uHz without CAP_PERFMON or CAP_SYS_ADMIN privileges\n",
4056 					  i915_oa_max_sample_rate);
4057 				return -EACCES;
4058 			}
4059 
4060 			props->oa_periodic = true;
4061 			props->oa_period_exponent = value;
4062 			break;
4063 		case DRM_I915_PERF_PROP_HOLD_PREEMPTION:
4064 			props->hold_preemption = !!value;
4065 			break;
4066 		case DRM_I915_PERF_PROP_GLOBAL_SSEU: {
4067 			struct drm_i915_gem_context_param_sseu user_sseu;
4068 
4069 			if (GRAPHICS_VER_FULL(perf->i915) >= IP_VER(12, 50)) {
4070 				drm_dbg(&perf->i915->drm,
4071 					"SSEU config not supported on gfx %x\n",
4072 					GRAPHICS_VER_FULL(perf->i915));
4073 				return -ENODEV;
4074 			}
4075 
4076 			if (copy_from_user(&user_sseu,
4077 					   u64_to_user_ptr(value),
4078 					   sizeof(user_sseu))) {
4079 				drm_dbg(&perf->i915->drm,
4080 					"Unable to copy global sseu parameter\n");
4081 				return -EFAULT;
4082 			}
4083 
4084 			ret = get_sseu_config(&props->sseu, props->engine, &user_sseu);
4085 			if (ret) {
4086 				drm_dbg(&perf->i915->drm,
4087 					"Invalid SSEU configuration\n");
4088 				return ret;
4089 			}
4090 			props->has_sseu = true;
4091 			break;
4092 		}
4093 		case DRM_I915_PERF_PROP_POLL_OA_PERIOD:
4094 			if (value < 100000 /* 100us */) {
4095 				drm_dbg(&perf->i915->drm,
4096 					"OA availability timer too small (%lluns < 100us)\n",
4097 					  value);
4098 				return -EINVAL;
4099 			}
4100 			props->poll_oa_period = value;
4101 			break;
4102 		case DRM_I915_PERF_PROP_MAX:
4103 			MISSING_CASE(id);
4104 			return -EINVAL;
4105 		}
4106 
4107 		uprop += 2;
4108 	}
4109 
4110 	return 0;
4111 }
4112 
4113 /**
4114  * i915_perf_open_ioctl - DRM ioctl() for userspace to open a stream FD
4115  * @dev: drm device
4116  * @data: ioctl data copied from userspace (unvalidated)
4117  * @file: drm file
4118  *
4119  * Validates the stream open parameters given by userspace including flags
4120  * and an array of u64 key, value pair properties.
4121  *
4122  * Very little is assumed up front about the nature of the stream being
4123  * opened (for instance we don't assume it's for periodic OA unit metrics). An
4124  * i915-perf stream is expected to be a suitable interface for other forms of
4125  * buffered data written by the GPU besides periodic OA metrics.
4126  *
4127  * Note we copy the properties from userspace outside of the i915 perf
4128  * mutex to avoid an awkward lockdep with mmap_lock.
4129  *
4130  * Most of the implementation details are handled by
4131  * i915_perf_open_ioctl_locked() after taking the &gt->perf.lock
4132  * mutex for serializing with any non-file-operation driver hooks.
4133  *
4134  * Return: A newly opened i915 Perf stream file descriptor or negative
4135  * error code on failure.
4136  */
4137 int i915_perf_open_ioctl(struct drm_device *dev, void *data,
4138 			 struct drm_file *file)
4139 {
4140 	struct i915_perf *perf = &to_i915(dev)->perf;
4141 	struct drm_i915_perf_open_param *param = data;
4142 	struct intel_gt *gt;
4143 	struct perf_open_properties props;
4144 	u32 known_open_flags;
4145 	int ret;
4146 
4147 	if (!perf->i915) {
4148 		drm_dbg(&perf->i915->drm,
4149 			"i915 perf interface not available for this system\n");
4150 		return -ENOTSUPP;
4151 	}
4152 
4153 	known_open_flags = I915_PERF_FLAG_FD_CLOEXEC |
4154 			   I915_PERF_FLAG_FD_NONBLOCK |
4155 			   I915_PERF_FLAG_DISABLED;
4156 	if (param->flags & ~known_open_flags) {
4157 		drm_dbg(&perf->i915->drm,
4158 			"Unknown drm_i915_perf_open_param flag\n");
4159 		return -EINVAL;
4160 	}
4161 
4162 	ret = read_properties_unlocked(perf,
4163 				       u64_to_user_ptr(param->properties_ptr),
4164 				       param->num_properties,
4165 				       &props);
4166 	if (ret)
4167 		return ret;
4168 
4169 	gt = props.engine->gt;
4170 
4171 	mutex_lock(&gt->perf.lock);
4172 	ret = i915_perf_open_ioctl_locked(perf, param, &props, file);
4173 	mutex_unlock(&gt->perf.lock);
4174 
4175 	return ret;
4176 }
4177 
4178 /**
4179  * i915_perf_register - exposes i915-perf to userspace
4180  * @i915: i915 device instance
4181  *
4182  * In particular OA metric sets are advertised under a sysfs metrics/
4183  * directory allowing userspace to enumerate valid IDs that can be
4184  * used to open an i915-perf stream.
4185  */
4186 void i915_perf_register(struct drm_i915_private *i915)
4187 {
4188 	struct i915_perf *perf = &i915->perf;
4189 	struct intel_gt *gt = to_gt(i915);
4190 
4191 	if (!perf->i915)
4192 		return;
4193 
4194 	/* To be sure we're synchronized with an attempted
4195 	 * i915_perf_open_ioctl(); considering that we register after
4196 	 * being exposed to userspace.
4197 	 */
4198 	mutex_lock(&gt->perf.lock);
4199 
4200 	perf->metrics_kobj =
4201 		kobject_create_and_add("metrics",
4202 				       &i915->drm.primary->kdev->kobj);
4203 
4204 	mutex_unlock(&gt->perf.lock);
4205 }
4206 
4207 /**
4208  * i915_perf_unregister - hide i915-perf from userspace
4209  * @i915: i915 device instance
4210  *
4211  * i915-perf state cleanup is split up into an 'unregister' and
4212  * 'deinit' phase where the interface is first hidden from
4213  * userspace by i915_perf_unregister() before cleaning up
4214  * remaining state in i915_perf_fini().
4215  */
4216 void i915_perf_unregister(struct drm_i915_private *i915)
4217 {
4218 	struct i915_perf *perf = &i915->perf;
4219 
4220 	if (!perf->metrics_kobj)
4221 		return;
4222 
4223 	kobject_put(perf->metrics_kobj);
4224 	perf->metrics_kobj = NULL;
4225 }
4226 
4227 static bool gen8_is_valid_flex_addr(struct i915_perf *perf, u32 addr)
4228 {
4229 	static const i915_reg_t flex_eu_regs[] = {
4230 		EU_PERF_CNTL0,
4231 		EU_PERF_CNTL1,
4232 		EU_PERF_CNTL2,
4233 		EU_PERF_CNTL3,
4234 		EU_PERF_CNTL4,
4235 		EU_PERF_CNTL5,
4236 		EU_PERF_CNTL6,
4237 	};
4238 	int i;
4239 
4240 	for (i = 0; i < ARRAY_SIZE(flex_eu_regs); i++) {
4241 		if (i915_mmio_reg_offset(flex_eu_regs[i]) == addr)
4242 			return true;
4243 	}
4244 	return false;
4245 }
4246 
4247 static bool reg_in_range_table(u32 addr, const struct i915_range *table)
4248 {
4249 	while (table->start || table->end) {
4250 		if (addr >= table->start && addr <= table->end)
4251 			return true;
4252 
4253 		table++;
4254 	}
4255 
4256 	return false;
4257 }
4258 
4259 #define REG_EQUAL(addr, mmio) \
4260 	((addr) == i915_mmio_reg_offset(mmio))
4261 
4262 static const struct i915_range gen7_oa_b_counters[] = {
4263 	{ .start = 0x2710, .end = 0x272c },	/* OASTARTTRIG[1-8] */
4264 	{ .start = 0x2740, .end = 0x275c },	/* OAREPORTTRIG[1-8] */
4265 	{ .start = 0x2770, .end = 0x27ac },	/* OACEC[0-7][0-1] */
4266 	{}
4267 };
4268 
4269 static const struct i915_range gen12_oa_b_counters[] = {
4270 	{ .start = 0x2b2c, .end = 0x2b2c },	/* GEN12_OAG_OA_PESS */
4271 	{ .start = 0xd900, .end = 0xd91c },	/* GEN12_OAG_OASTARTTRIG[1-8] */
4272 	{ .start = 0xd920, .end = 0xd93c },	/* GEN12_OAG_OAREPORTTRIG1[1-8] */
4273 	{ .start = 0xd940, .end = 0xd97c },	/* GEN12_OAG_CEC[0-7][0-1] */
4274 	{ .start = 0xdc00, .end = 0xdc3c },	/* GEN12_OAG_SCEC[0-7][0-1] */
4275 	{ .start = 0xdc40, .end = 0xdc40 },	/* GEN12_OAG_SPCTR_CNF */
4276 	{ .start = 0xdc44, .end = 0xdc44 },	/* GEN12_OAA_DBG_REG */
4277 	{}
4278 };
4279 
4280 static const struct i915_range xehp_oa_b_counters[] = {
4281 	{ .start = 0xdc48, .end = 0xdc48 },	/* OAA_ENABLE_REG */
4282 	{ .start = 0xdd00, .end = 0xdd48 },	/* OAG_LCE0_0 - OAA_LENABLE_REG */
4283 };
4284 
4285 static const struct i915_range gen7_oa_mux_regs[] = {
4286 	{ .start = 0x91b8, .end = 0x91cc },	/* OA_PERFCNT[1-2], OA_PERFMATRIX */
4287 	{ .start = 0x9800, .end = 0x9888 },	/* MICRO_BP0_0 - NOA_WRITE */
4288 	{ .start = 0xe180, .end = 0xe180 },	/* HALF_SLICE_CHICKEN2 */
4289 	{}
4290 };
4291 
4292 static const struct i915_range hsw_oa_mux_regs[] = {
4293 	{ .start = 0x09e80, .end = 0x09ea4 }, /* HSW_MBVID2_NOA[0-9] */
4294 	{ .start = 0x09ec0, .end = 0x09ec0 }, /* HSW_MBVID2_MISR0 */
4295 	{ .start = 0x25100, .end = 0x2ff90 },
4296 	{}
4297 };
4298 
4299 static const struct i915_range chv_oa_mux_regs[] = {
4300 	{ .start = 0x182300, .end = 0x1823a4 },
4301 	{}
4302 };
4303 
4304 static const struct i915_range gen8_oa_mux_regs[] = {
4305 	{ .start = 0x0d00, .end = 0x0d2c },	/* RPM_CONFIG[0-1], NOA_CONFIG[0-8] */
4306 	{ .start = 0x20cc, .end = 0x20cc },	/* WAIT_FOR_RC6_EXIT */
4307 	{}
4308 };
4309 
4310 static const struct i915_range gen11_oa_mux_regs[] = {
4311 	{ .start = 0x91c8, .end = 0x91dc },	/* OA_PERFCNT[3-4] */
4312 	{}
4313 };
4314 
4315 static const struct i915_range gen12_oa_mux_regs[] = {
4316 	{ .start = 0x0d00, .end = 0x0d04 },     /* RPM_CONFIG[0-1] */
4317 	{ .start = 0x0d0c, .end = 0x0d2c },     /* NOA_CONFIG[0-8] */
4318 	{ .start = 0x9840, .end = 0x9840 },	/* GDT_CHICKEN_BITS */
4319 	{ .start = 0x9884, .end = 0x9888 },	/* NOA_WRITE */
4320 	{ .start = 0x20cc, .end = 0x20cc },	/* WAIT_FOR_RC6_EXIT */
4321 	{}
4322 };
4323 
4324 /*
4325  * Ref: 14010536224:
4326  * 0x20cc is repurposed on MTL, so use a separate array for MTL.
4327  */
4328 static const struct i915_range mtl_oa_mux_regs[] = {
4329 	{ .start = 0x0d00, .end = 0x0d04 },	/* RPM_CONFIG[0-1] */
4330 	{ .start = 0x0d0c, .end = 0x0d2c },	/* NOA_CONFIG[0-8] */
4331 	{ .start = 0x9840, .end = 0x9840 },	/* GDT_CHICKEN_BITS */
4332 	{ .start = 0x9884, .end = 0x9888 },	/* NOA_WRITE */
4333 };
4334 
4335 static bool gen7_is_valid_b_counter_addr(struct i915_perf *perf, u32 addr)
4336 {
4337 	return reg_in_range_table(addr, gen7_oa_b_counters);
4338 }
4339 
4340 static bool gen8_is_valid_mux_addr(struct i915_perf *perf, u32 addr)
4341 {
4342 	return reg_in_range_table(addr, gen7_oa_mux_regs) ||
4343 		reg_in_range_table(addr, gen8_oa_mux_regs);
4344 }
4345 
4346 static bool gen11_is_valid_mux_addr(struct i915_perf *perf, u32 addr)
4347 {
4348 	return reg_in_range_table(addr, gen7_oa_mux_regs) ||
4349 		reg_in_range_table(addr, gen8_oa_mux_regs) ||
4350 		reg_in_range_table(addr, gen11_oa_mux_regs);
4351 }
4352 
4353 static bool hsw_is_valid_mux_addr(struct i915_perf *perf, u32 addr)
4354 {
4355 	return reg_in_range_table(addr, gen7_oa_mux_regs) ||
4356 		reg_in_range_table(addr, hsw_oa_mux_regs);
4357 }
4358 
4359 static bool chv_is_valid_mux_addr(struct i915_perf *perf, u32 addr)
4360 {
4361 	return reg_in_range_table(addr, gen7_oa_mux_regs) ||
4362 		reg_in_range_table(addr, chv_oa_mux_regs);
4363 }
4364 
4365 static bool gen12_is_valid_b_counter_addr(struct i915_perf *perf, u32 addr)
4366 {
4367 	return reg_in_range_table(addr, gen12_oa_b_counters);
4368 }
4369 
4370 static bool xehp_is_valid_b_counter_addr(struct i915_perf *perf, u32 addr)
4371 {
4372 	return reg_in_range_table(addr, xehp_oa_b_counters) ||
4373 		reg_in_range_table(addr, gen12_oa_b_counters);
4374 }
4375 
4376 static bool gen12_is_valid_mux_addr(struct i915_perf *perf, u32 addr)
4377 {
4378 	if (IS_METEORLAKE(perf->i915))
4379 		return reg_in_range_table(addr, mtl_oa_mux_regs);
4380 	else
4381 		return reg_in_range_table(addr, gen12_oa_mux_regs);
4382 }
4383 
4384 static u32 mask_reg_value(u32 reg, u32 val)
4385 {
4386 	/* HALF_SLICE_CHICKEN2 is programmed with a the
4387 	 * WaDisableSTUnitPowerOptimization workaround. Make sure the value
4388 	 * programmed by userspace doesn't change this.
4389 	 */
4390 	if (REG_EQUAL(reg, HALF_SLICE_CHICKEN2))
4391 		val = val & ~_MASKED_BIT_ENABLE(GEN8_ST_PO_DISABLE);
4392 
4393 	/* WAIT_FOR_RC6_EXIT has only one bit fullfilling the function
4394 	 * indicated by its name and a bunch of selection fields used by OA
4395 	 * configs.
4396 	 */
4397 	if (REG_EQUAL(reg, WAIT_FOR_RC6_EXIT))
4398 		val = val & ~_MASKED_BIT_ENABLE(HSW_WAIT_FOR_RC6_EXIT_ENABLE);
4399 
4400 	return val;
4401 }
4402 
4403 static struct i915_oa_reg *alloc_oa_regs(struct i915_perf *perf,
4404 					 bool (*is_valid)(struct i915_perf *perf, u32 addr),
4405 					 u32 __user *regs,
4406 					 u32 n_regs)
4407 {
4408 	struct i915_oa_reg *oa_regs;
4409 	int err;
4410 	u32 i;
4411 
4412 	if (!n_regs)
4413 		return NULL;
4414 
4415 	/* No is_valid function means we're not allowing any register to be programmed. */
4416 	GEM_BUG_ON(!is_valid);
4417 	if (!is_valid)
4418 		return ERR_PTR(-EINVAL);
4419 
4420 	oa_regs = kmalloc_array(n_regs, sizeof(*oa_regs), GFP_KERNEL);
4421 	if (!oa_regs)
4422 		return ERR_PTR(-ENOMEM);
4423 
4424 	for (i = 0; i < n_regs; i++) {
4425 		u32 addr, value;
4426 
4427 		err = get_user(addr, regs);
4428 		if (err)
4429 			goto addr_err;
4430 
4431 		if (!is_valid(perf, addr)) {
4432 			drm_dbg(&perf->i915->drm,
4433 				"Invalid oa_reg address: %X\n", addr);
4434 			err = -EINVAL;
4435 			goto addr_err;
4436 		}
4437 
4438 		err = get_user(value, regs + 1);
4439 		if (err)
4440 			goto addr_err;
4441 
4442 		oa_regs[i].addr = _MMIO(addr);
4443 		oa_regs[i].value = mask_reg_value(addr, value);
4444 
4445 		regs += 2;
4446 	}
4447 
4448 	return oa_regs;
4449 
4450 addr_err:
4451 	kfree(oa_regs);
4452 	return ERR_PTR(err);
4453 }
4454 
4455 static ssize_t show_dynamic_id(struct kobject *kobj,
4456 			       struct kobj_attribute *attr,
4457 			       char *buf)
4458 {
4459 	struct i915_oa_config *oa_config =
4460 		container_of(attr, typeof(*oa_config), sysfs_metric_id);
4461 
4462 	return sprintf(buf, "%d\n", oa_config->id);
4463 }
4464 
4465 static int create_dynamic_oa_sysfs_entry(struct i915_perf *perf,
4466 					 struct i915_oa_config *oa_config)
4467 {
4468 	sysfs_attr_init(&oa_config->sysfs_metric_id.attr);
4469 	oa_config->sysfs_metric_id.attr.name = "id";
4470 	oa_config->sysfs_metric_id.attr.mode = S_IRUGO;
4471 	oa_config->sysfs_metric_id.show = show_dynamic_id;
4472 	oa_config->sysfs_metric_id.store = NULL;
4473 
4474 	oa_config->attrs[0] = &oa_config->sysfs_metric_id.attr;
4475 	oa_config->attrs[1] = NULL;
4476 
4477 	oa_config->sysfs_metric.name = oa_config->uuid;
4478 	oa_config->sysfs_metric.attrs = oa_config->attrs;
4479 
4480 	return sysfs_create_group(perf->metrics_kobj,
4481 				  &oa_config->sysfs_metric);
4482 }
4483 
4484 /**
4485  * i915_perf_add_config_ioctl - DRM ioctl() for userspace to add a new OA config
4486  * @dev: drm device
4487  * @data: ioctl data (pointer to struct drm_i915_perf_oa_config) copied from
4488  *        userspace (unvalidated)
4489  * @file: drm file
4490  *
4491  * Validates the submitted OA register to be saved into a new OA config that
4492  * can then be used for programming the OA unit and its NOA network.
4493  *
4494  * Returns: A new allocated config number to be used with the perf open ioctl
4495  * or a negative error code on failure.
4496  */
4497 int i915_perf_add_config_ioctl(struct drm_device *dev, void *data,
4498 			       struct drm_file *file)
4499 {
4500 	struct i915_perf *perf = &to_i915(dev)->perf;
4501 	struct drm_i915_perf_oa_config *args = data;
4502 	struct i915_oa_config *oa_config, *tmp;
4503 	struct i915_oa_reg *regs;
4504 	int err, id;
4505 
4506 	if (!perf->i915) {
4507 		drm_dbg(&perf->i915->drm,
4508 			"i915 perf interface not available for this system\n");
4509 		return -ENOTSUPP;
4510 	}
4511 
4512 	if (!perf->metrics_kobj) {
4513 		drm_dbg(&perf->i915->drm,
4514 			"OA metrics weren't advertised via sysfs\n");
4515 		return -EINVAL;
4516 	}
4517 
4518 	if (i915_perf_stream_paranoid && !perfmon_capable()) {
4519 		drm_dbg(&perf->i915->drm,
4520 			"Insufficient privileges to add i915 OA config\n");
4521 		return -EACCES;
4522 	}
4523 
4524 	if ((!args->mux_regs_ptr || !args->n_mux_regs) &&
4525 	    (!args->boolean_regs_ptr || !args->n_boolean_regs) &&
4526 	    (!args->flex_regs_ptr || !args->n_flex_regs)) {
4527 		drm_dbg(&perf->i915->drm,
4528 			"No OA registers given\n");
4529 		return -EINVAL;
4530 	}
4531 
4532 	oa_config = kzalloc(sizeof(*oa_config), GFP_KERNEL);
4533 	if (!oa_config) {
4534 		drm_dbg(&perf->i915->drm,
4535 			"Failed to allocate memory for the OA config\n");
4536 		return -ENOMEM;
4537 	}
4538 
4539 	oa_config->perf = perf;
4540 	kref_init(&oa_config->ref);
4541 
4542 	if (!uuid_is_valid(args->uuid)) {
4543 		drm_dbg(&perf->i915->drm,
4544 			"Invalid uuid format for OA config\n");
4545 		err = -EINVAL;
4546 		goto reg_err;
4547 	}
4548 
4549 	/* Last character in oa_config->uuid will be 0 because oa_config is
4550 	 * kzalloc.
4551 	 */
4552 	memcpy(oa_config->uuid, args->uuid, sizeof(args->uuid));
4553 
4554 	oa_config->mux_regs_len = args->n_mux_regs;
4555 	regs = alloc_oa_regs(perf,
4556 			     perf->ops.is_valid_mux_reg,
4557 			     u64_to_user_ptr(args->mux_regs_ptr),
4558 			     args->n_mux_regs);
4559 
4560 	if (IS_ERR(regs)) {
4561 		drm_dbg(&perf->i915->drm,
4562 			"Failed to create OA config for mux_regs\n");
4563 		err = PTR_ERR(regs);
4564 		goto reg_err;
4565 	}
4566 	oa_config->mux_regs = regs;
4567 
4568 	oa_config->b_counter_regs_len = args->n_boolean_regs;
4569 	regs = alloc_oa_regs(perf,
4570 			     perf->ops.is_valid_b_counter_reg,
4571 			     u64_to_user_ptr(args->boolean_regs_ptr),
4572 			     args->n_boolean_regs);
4573 
4574 	if (IS_ERR(regs)) {
4575 		drm_dbg(&perf->i915->drm,
4576 			"Failed to create OA config for b_counter_regs\n");
4577 		err = PTR_ERR(regs);
4578 		goto reg_err;
4579 	}
4580 	oa_config->b_counter_regs = regs;
4581 
4582 	if (GRAPHICS_VER(perf->i915) < 8) {
4583 		if (args->n_flex_regs != 0) {
4584 			err = -EINVAL;
4585 			goto reg_err;
4586 		}
4587 	} else {
4588 		oa_config->flex_regs_len = args->n_flex_regs;
4589 		regs = alloc_oa_regs(perf,
4590 				     perf->ops.is_valid_flex_reg,
4591 				     u64_to_user_ptr(args->flex_regs_ptr),
4592 				     args->n_flex_regs);
4593 
4594 		if (IS_ERR(regs)) {
4595 			drm_dbg(&perf->i915->drm,
4596 				"Failed to create OA config for flex_regs\n");
4597 			err = PTR_ERR(regs);
4598 			goto reg_err;
4599 		}
4600 		oa_config->flex_regs = regs;
4601 	}
4602 
4603 	err = mutex_lock_interruptible(&perf->metrics_lock);
4604 	if (err)
4605 		goto reg_err;
4606 
4607 	/* We shouldn't have too many configs, so this iteration shouldn't be
4608 	 * too costly.
4609 	 */
4610 	idr_for_each_entry(&perf->metrics_idr, tmp, id) {
4611 		if (!strcmp(tmp->uuid, oa_config->uuid)) {
4612 			drm_dbg(&perf->i915->drm,
4613 				"OA config already exists with this uuid\n");
4614 			err = -EADDRINUSE;
4615 			goto sysfs_err;
4616 		}
4617 	}
4618 
4619 	err = create_dynamic_oa_sysfs_entry(perf, oa_config);
4620 	if (err) {
4621 		drm_dbg(&perf->i915->drm,
4622 			"Failed to create sysfs entry for OA config\n");
4623 		goto sysfs_err;
4624 	}
4625 
4626 	/* Config id 0 is invalid, id 1 for kernel stored test config. */
4627 	oa_config->id = idr_alloc(&perf->metrics_idr,
4628 				  oa_config, 2,
4629 				  0, GFP_KERNEL);
4630 	if (oa_config->id < 0) {
4631 		drm_dbg(&perf->i915->drm,
4632 			"Failed to create sysfs entry for OA config\n");
4633 		err = oa_config->id;
4634 		goto sysfs_err;
4635 	}
4636 
4637 	mutex_unlock(&perf->metrics_lock);
4638 
4639 	drm_dbg(&perf->i915->drm,
4640 		"Added config %s id=%i\n", oa_config->uuid, oa_config->id);
4641 
4642 	return oa_config->id;
4643 
4644 sysfs_err:
4645 	mutex_unlock(&perf->metrics_lock);
4646 reg_err:
4647 	i915_oa_config_put(oa_config);
4648 	drm_dbg(&perf->i915->drm,
4649 		"Failed to add new OA config\n");
4650 	return err;
4651 }
4652 
4653 /**
4654  * i915_perf_remove_config_ioctl - DRM ioctl() for userspace to remove an OA config
4655  * @dev: drm device
4656  * @data: ioctl data (pointer to u64 integer) copied from userspace
4657  * @file: drm file
4658  *
4659  * Configs can be removed while being used, the will stop appearing in sysfs
4660  * and their content will be freed when the stream using the config is closed.
4661  *
4662  * Returns: 0 on success or a negative error code on failure.
4663  */
4664 int i915_perf_remove_config_ioctl(struct drm_device *dev, void *data,
4665 				  struct drm_file *file)
4666 {
4667 	struct i915_perf *perf = &to_i915(dev)->perf;
4668 	u64 *arg = data;
4669 	struct i915_oa_config *oa_config;
4670 	int ret;
4671 
4672 	if (!perf->i915) {
4673 		drm_dbg(&perf->i915->drm,
4674 			"i915 perf interface not available for this system\n");
4675 		return -ENOTSUPP;
4676 	}
4677 
4678 	if (i915_perf_stream_paranoid && !perfmon_capable()) {
4679 		drm_dbg(&perf->i915->drm,
4680 			"Insufficient privileges to remove i915 OA config\n");
4681 		return -EACCES;
4682 	}
4683 
4684 	ret = mutex_lock_interruptible(&perf->metrics_lock);
4685 	if (ret)
4686 		return ret;
4687 
4688 	oa_config = idr_find(&perf->metrics_idr, *arg);
4689 	if (!oa_config) {
4690 		drm_dbg(&perf->i915->drm,
4691 			"Failed to remove unknown OA config\n");
4692 		ret = -ENOENT;
4693 		goto err_unlock;
4694 	}
4695 
4696 	GEM_BUG_ON(*arg != oa_config->id);
4697 
4698 	sysfs_remove_group(perf->metrics_kobj, &oa_config->sysfs_metric);
4699 
4700 	idr_remove(&perf->metrics_idr, *arg);
4701 
4702 	mutex_unlock(&perf->metrics_lock);
4703 
4704 	drm_dbg(&perf->i915->drm,
4705 		"Removed config %s id=%i\n", oa_config->uuid, oa_config->id);
4706 
4707 	i915_oa_config_put(oa_config);
4708 
4709 	return 0;
4710 
4711 err_unlock:
4712 	mutex_unlock(&perf->metrics_lock);
4713 	return ret;
4714 }
4715 
4716 static struct ctl_table oa_table[] = {
4717 	{
4718 	 .procname = "perf_stream_paranoid",
4719 	 .data = &i915_perf_stream_paranoid,
4720 	 .maxlen = sizeof(i915_perf_stream_paranoid),
4721 	 .mode = 0644,
4722 	 .proc_handler = proc_dointvec_minmax,
4723 	 .extra1 = SYSCTL_ZERO,
4724 	 .extra2 = SYSCTL_ONE,
4725 	 },
4726 	{
4727 	 .procname = "oa_max_sample_rate",
4728 	 .data = &i915_oa_max_sample_rate,
4729 	 .maxlen = sizeof(i915_oa_max_sample_rate),
4730 	 .mode = 0644,
4731 	 .proc_handler = proc_dointvec_minmax,
4732 	 .extra1 = SYSCTL_ZERO,
4733 	 .extra2 = &oa_sample_rate_hard_limit,
4734 	 },
4735 	{}
4736 };
4737 
4738 static void oa_init_supported_formats(struct i915_perf *perf)
4739 {
4740 	struct drm_i915_private *i915 = perf->i915;
4741 	enum intel_platform platform = INTEL_INFO(i915)->platform;
4742 
4743 	switch (platform) {
4744 	case INTEL_HASWELL:
4745 		oa_format_add(perf, I915_OA_FORMAT_A13);
4746 		oa_format_add(perf, I915_OA_FORMAT_A13);
4747 		oa_format_add(perf, I915_OA_FORMAT_A29);
4748 		oa_format_add(perf, I915_OA_FORMAT_A13_B8_C8);
4749 		oa_format_add(perf, I915_OA_FORMAT_B4_C8);
4750 		oa_format_add(perf, I915_OA_FORMAT_A45_B8_C8);
4751 		oa_format_add(perf, I915_OA_FORMAT_B4_C8_A16);
4752 		oa_format_add(perf, I915_OA_FORMAT_C4_B8);
4753 		break;
4754 
4755 	case INTEL_BROADWELL:
4756 	case INTEL_CHERRYVIEW:
4757 	case INTEL_SKYLAKE:
4758 	case INTEL_BROXTON:
4759 	case INTEL_KABYLAKE:
4760 	case INTEL_GEMINILAKE:
4761 	case INTEL_COFFEELAKE:
4762 	case INTEL_COMETLAKE:
4763 	case INTEL_ICELAKE:
4764 	case INTEL_ELKHARTLAKE:
4765 	case INTEL_JASPERLAKE:
4766 	case INTEL_TIGERLAKE:
4767 	case INTEL_ROCKETLAKE:
4768 	case INTEL_DG1:
4769 	case INTEL_ALDERLAKE_S:
4770 	case INTEL_ALDERLAKE_P:
4771 		oa_format_add(perf, I915_OA_FORMAT_A12);
4772 		oa_format_add(perf, I915_OA_FORMAT_A12_B8_C8);
4773 		oa_format_add(perf, I915_OA_FORMAT_A32u40_A4u32_B8_C8);
4774 		oa_format_add(perf, I915_OA_FORMAT_C4_B8);
4775 		break;
4776 
4777 	case INTEL_DG2:
4778 	case INTEL_METEORLAKE:
4779 		oa_format_add(perf, I915_OAR_FORMAT_A32u40_A4u32_B8_C8);
4780 		oa_format_add(perf, I915_OA_FORMAT_A24u40_A14u32_B8_C8);
4781 		break;
4782 
4783 	default:
4784 		MISSING_CASE(platform);
4785 	}
4786 }
4787 
4788 static void i915_perf_init_info(struct drm_i915_private *i915)
4789 {
4790 	struct i915_perf *perf = &i915->perf;
4791 
4792 	switch (GRAPHICS_VER(i915)) {
4793 	case 8:
4794 		perf->ctx_oactxctrl_offset = 0x120;
4795 		perf->ctx_flexeu0_offset = 0x2ce;
4796 		perf->gen8_valid_ctx_bit = BIT(25);
4797 		break;
4798 	case 9:
4799 		perf->ctx_oactxctrl_offset = 0x128;
4800 		perf->ctx_flexeu0_offset = 0x3de;
4801 		perf->gen8_valid_ctx_bit = BIT(16);
4802 		break;
4803 	case 11:
4804 		perf->ctx_oactxctrl_offset = 0x124;
4805 		perf->ctx_flexeu0_offset = 0x78e;
4806 		perf->gen8_valid_ctx_bit = BIT(16);
4807 		break;
4808 	case 12:
4809 		/*
4810 		 * Calculate offset at runtime in oa_pin_context for gen12 and
4811 		 * cache the value in perf->ctx_oactxctrl_offset.
4812 		 */
4813 		break;
4814 	default:
4815 		MISSING_CASE(GRAPHICS_VER(i915));
4816 	}
4817 }
4818 
4819 /**
4820  * i915_perf_init - initialize i915-perf state on module bind
4821  * @i915: i915 device instance
4822  *
4823  * Initializes i915-perf state without exposing anything to userspace.
4824  *
4825  * Note: i915-perf initialization is split into an 'init' and 'register'
4826  * phase with the i915_perf_register() exposing state to userspace.
4827  */
4828 void i915_perf_init(struct drm_i915_private *i915)
4829 {
4830 	struct i915_perf *perf = &i915->perf;
4831 
4832 	perf->oa_formats = oa_formats;
4833 	if (IS_HASWELL(i915)) {
4834 		perf->ops.is_valid_b_counter_reg = gen7_is_valid_b_counter_addr;
4835 		perf->ops.is_valid_mux_reg = hsw_is_valid_mux_addr;
4836 		perf->ops.is_valid_flex_reg = NULL;
4837 		perf->ops.enable_metric_set = hsw_enable_metric_set;
4838 		perf->ops.disable_metric_set = hsw_disable_metric_set;
4839 		perf->ops.oa_enable = gen7_oa_enable;
4840 		perf->ops.oa_disable = gen7_oa_disable;
4841 		perf->ops.read = gen7_oa_read;
4842 		perf->ops.oa_hw_tail_read = gen7_oa_hw_tail_read;
4843 	} else if (HAS_LOGICAL_RING_CONTEXTS(i915)) {
4844 		/* Note: that although we could theoretically also support the
4845 		 * legacy ringbuffer mode on BDW (and earlier iterations of
4846 		 * this driver, before upstreaming did this) it didn't seem
4847 		 * worth the complexity to maintain now that BDW+ enable
4848 		 * execlist mode by default.
4849 		 */
4850 		perf->ops.read = gen8_oa_read;
4851 		i915_perf_init_info(i915);
4852 
4853 		if (IS_GRAPHICS_VER(i915, 8, 9)) {
4854 			perf->ops.is_valid_b_counter_reg =
4855 				gen7_is_valid_b_counter_addr;
4856 			perf->ops.is_valid_mux_reg =
4857 				gen8_is_valid_mux_addr;
4858 			perf->ops.is_valid_flex_reg =
4859 				gen8_is_valid_flex_addr;
4860 
4861 			if (IS_CHERRYVIEW(i915)) {
4862 				perf->ops.is_valid_mux_reg =
4863 					chv_is_valid_mux_addr;
4864 			}
4865 
4866 			perf->ops.oa_enable = gen8_oa_enable;
4867 			perf->ops.oa_disable = gen8_oa_disable;
4868 			perf->ops.enable_metric_set = gen8_enable_metric_set;
4869 			perf->ops.disable_metric_set = gen8_disable_metric_set;
4870 			perf->ops.oa_hw_tail_read = gen8_oa_hw_tail_read;
4871 		} else if (GRAPHICS_VER(i915) == 11) {
4872 			perf->ops.is_valid_b_counter_reg =
4873 				gen7_is_valid_b_counter_addr;
4874 			perf->ops.is_valid_mux_reg =
4875 				gen11_is_valid_mux_addr;
4876 			perf->ops.is_valid_flex_reg =
4877 				gen8_is_valid_flex_addr;
4878 
4879 			perf->ops.oa_enable = gen8_oa_enable;
4880 			perf->ops.oa_disable = gen8_oa_disable;
4881 			perf->ops.enable_metric_set = gen8_enable_metric_set;
4882 			perf->ops.disable_metric_set = gen11_disable_metric_set;
4883 			perf->ops.oa_hw_tail_read = gen8_oa_hw_tail_read;
4884 		} else if (GRAPHICS_VER(i915) == 12) {
4885 			perf->ops.is_valid_b_counter_reg =
4886 				HAS_OA_SLICE_CONTRIB_LIMITS(i915) ?
4887 				xehp_is_valid_b_counter_addr :
4888 				gen12_is_valid_b_counter_addr;
4889 			perf->ops.is_valid_mux_reg =
4890 				gen12_is_valid_mux_addr;
4891 			perf->ops.is_valid_flex_reg =
4892 				gen8_is_valid_flex_addr;
4893 
4894 			perf->ops.oa_enable = gen12_oa_enable;
4895 			perf->ops.oa_disable = gen12_oa_disable;
4896 			perf->ops.enable_metric_set = gen12_enable_metric_set;
4897 			perf->ops.disable_metric_set = gen12_disable_metric_set;
4898 			perf->ops.oa_hw_tail_read = gen12_oa_hw_tail_read;
4899 		}
4900 	}
4901 
4902 	if (perf->ops.enable_metric_set) {
4903 		struct intel_gt *gt;
4904 		int i;
4905 
4906 		for_each_gt(gt, i915, i)
4907 			mutex_init(&gt->perf.lock);
4908 
4909 		/* Choose a representative limit */
4910 		oa_sample_rate_hard_limit = to_gt(i915)->clock_frequency / 2;
4911 
4912 		mutex_init(&perf->metrics_lock);
4913 		idr_init_base(&perf->metrics_idr, 1);
4914 
4915 		/* We set up some ratelimit state to potentially throttle any
4916 		 * _NOTES about spurious, invalid OA reports which we don't
4917 		 * forward to userspace.
4918 		 *
4919 		 * We print a _NOTE about any throttling when closing the
4920 		 * stream instead of waiting until driver _fini which no one
4921 		 * would ever see.
4922 		 *
4923 		 * Using the same limiting factors as printk_ratelimit()
4924 		 */
4925 		ratelimit_state_init(&perf->spurious_report_rs, 5 * HZ, 10);
4926 		/* Since we use a DRM_NOTE for spurious reports it would be
4927 		 * inconsistent to let __ratelimit() automatically print a
4928 		 * warning for throttling.
4929 		 */
4930 		ratelimit_set_flags(&perf->spurious_report_rs,
4931 				    RATELIMIT_MSG_ON_RELEASE);
4932 
4933 		ratelimit_state_init(&perf->tail_pointer_race,
4934 				     5 * HZ, 10);
4935 		ratelimit_set_flags(&perf->tail_pointer_race,
4936 				    RATELIMIT_MSG_ON_RELEASE);
4937 
4938 		atomic64_set(&perf->noa_programming_delay,
4939 			     500 * 1000 /* 500us */);
4940 
4941 		perf->i915 = i915;
4942 
4943 		oa_init_supported_formats(perf);
4944 	}
4945 }
4946 
4947 static int destroy_config(int id, void *p, void *data)
4948 {
4949 	i915_oa_config_put(p);
4950 	return 0;
4951 }
4952 
4953 int i915_perf_sysctl_register(void)
4954 {
4955 	sysctl_header = register_sysctl("dev/i915", oa_table);
4956 	return 0;
4957 }
4958 
4959 void i915_perf_sysctl_unregister(void)
4960 {
4961 	unregister_sysctl_table(sysctl_header);
4962 }
4963 
4964 /**
4965  * i915_perf_fini - Counter part to i915_perf_init()
4966  * @i915: i915 device instance
4967  */
4968 void i915_perf_fini(struct drm_i915_private *i915)
4969 {
4970 	struct i915_perf *perf = &i915->perf;
4971 
4972 	if (!perf->i915)
4973 		return;
4974 
4975 	idr_for_each(&perf->metrics_idr, destroy_config, perf);
4976 	idr_destroy(&perf->metrics_idr);
4977 
4978 	memset(&perf->ops, 0, sizeof(perf->ops));
4979 	perf->i915 = NULL;
4980 }
4981 
4982 /**
4983  * i915_perf_ioctl_version - Version of the i915-perf subsystem
4984  *
4985  * This version number is used by userspace to detect available features.
4986  */
4987 int i915_perf_ioctl_version(void)
4988 {
4989 	/*
4990 	 * 1: Initial version
4991 	 *   I915_PERF_IOCTL_ENABLE
4992 	 *   I915_PERF_IOCTL_DISABLE
4993 	 *
4994 	 * 2: Added runtime modification of OA config.
4995 	 *   I915_PERF_IOCTL_CONFIG
4996 	 *
4997 	 * 3: Add DRM_I915_PERF_PROP_HOLD_PREEMPTION parameter to hold
4998 	 *    preemption on a particular context so that performance data is
4999 	 *    accessible from a delta of MI_RPC reports without looking at the
5000 	 *    OA buffer.
5001 	 *
5002 	 * 4: Add DRM_I915_PERF_PROP_ALLOWED_SSEU to limit what contexts can
5003 	 *    be run for the duration of the performance recording based on
5004 	 *    their SSEU configuration.
5005 	 *
5006 	 * 5: Add DRM_I915_PERF_PROP_POLL_OA_PERIOD parameter that controls the
5007 	 *    interval for the hrtimer used to check for OA data.
5008 	 */
5009 	return 5;
5010 }
5011 
5012 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
5013 #include "selftests/i915_perf.c"
5014 #endif
5015