1 /* 2 * Copyright © 2015-2016 Intel Corporation 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice (including the next 12 * paragraph) shall be included in all copies or substantial portions of the 13 * Software. 14 * 15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS 21 * IN THE SOFTWARE. 22 * 23 * Authors: 24 * Robert Bragg <robert@sixbynine.org> 25 */ 26 27 28 /** 29 * DOC: i915 Perf Overview 30 * 31 * Gen graphics supports a large number of performance counters that can help 32 * driver and application developers understand and optimize their use of the 33 * GPU. 34 * 35 * This i915 perf interface enables userspace to configure and open a file 36 * descriptor representing a stream of GPU metrics which can then be read() as 37 * a stream of sample records. 38 * 39 * The interface is particularly suited to exposing buffered metrics that are 40 * captured by DMA from the GPU, unsynchronized with and unrelated to the CPU. 41 * 42 * Streams representing a single context are accessible to applications with a 43 * corresponding drm file descriptor, such that OpenGL can use the interface 44 * without special privileges. Access to system-wide metrics requires root 45 * privileges by default, unless changed via the dev.i915.perf_event_paranoid 46 * sysctl option. 47 * 48 */ 49 50 /** 51 * DOC: i915 Perf History and Comparison with Core Perf 52 * 53 * The interface was initially inspired by the core Perf infrastructure but 54 * some notable differences are: 55 * 56 * i915 perf file descriptors represent a "stream" instead of an "event"; where 57 * a perf event primarily corresponds to a single 64bit value, while a stream 58 * might sample sets of tightly-coupled counters, depending on the 59 * configuration. For example the Gen OA unit isn't designed to support 60 * orthogonal configurations of individual counters; it's configured for a set 61 * of related counters. Samples for an i915 perf stream capturing OA metrics 62 * will include a set of counter values packed in a compact HW specific format. 63 * The OA unit supports a number of different packing formats which can be 64 * selected by the user opening the stream. Perf has support for grouping 65 * events, but each event in the group is configured, validated and 66 * authenticated individually with separate system calls. 67 * 68 * i915 perf stream configurations are provided as an array of u64 (key,value) 69 * pairs, instead of a fixed struct with multiple miscellaneous config members, 70 * interleaved with event-type specific members. 71 * 72 * i915 perf doesn't support exposing metrics via an mmap'd circular buffer. 73 * The supported metrics are being written to memory by the GPU unsynchronized 74 * with the CPU, using HW specific packing formats for counter sets. Sometimes 75 * the constraints on HW configuration require reports to be filtered before it 76 * would be acceptable to expose them to unprivileged applications - to hide 77 * the metrics of other processes/contexts. For these use cases a read() based 78 * interface is a good fit, and provides an opportunity to filter data as it 79 * gets copied from the GPU mapped buffers to userspace buffers. 80 * 81 * 82 * Issues hit with first prototype based on Core Perf 83 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 84 * 85 * The first prototype of this driver was based on the core perf 86 * infrastructure, and while we did make that mostly work, with some changes to 87 * perf, we found we were breaking or working around too many assumptions baked 88 * into perf's currently cpu centric design. 89 * 90 * In the end we didn't see a clear benefit to making perf's implementation and 91 * interface more complex by changing design assumptions while we knew we still 92 * wouldn't be able to use any existing perf based userspace tools. 93 * 94 * Also considering the Gen specific nature of the Observability hardware and 95 * how userspace will sometimes need to combine i915 perf OA metrics with 96 * side-band OA data captured via MI_REPORT_PERF_COUNT commands; we're 97 * expecting the interface to be used by a platform specific userspace such as 98 * OpenGL or tools. This is to say; we aren't inherently missing out on having 99 * a standard vendor/architecture agnostic interface by not using perf. 100 * 101 * 102 * For posterity, in case we might re-visit trying to adapt core perf to be 103 * better suited to exposing i915 metrics these were the main pain points we 104 * hit: 105 * 106 * - The perf based OA PMU driver broke some significant design assumptions: 107 * 108 * Existing perf pmus are used for profiling work on a cpu and we were 109 * introducing the idea of _IS_DEVICE pmus with different security 110 * implications, the need to fake cpu-related data (such as user/kernel 111 * registers) to fit with perf's current design, and adding _DEVICE records 112 * as a way to forward device-specific status records. 113 * 114 * The OA unit writes reports of counters into a circular buffer, without 115 * involvement from the CPU, making our PMU driver the first of a kind. 116 * 117 * Given the way we were periodically forward data from the GPU-mapped, OA 118 * buffer to perf's buffer, those bursts of sample writes looked to perf like 119 * we were sampling too fast and so we had to subvert its throttling checks. 120 * 121 * Perf supports groups of counters and allows those to be read via 122 * transactions internally but transactions currently seem designed to be 123 * explicitly initiated from the cpu (say in response to a userspace read()) 124 * and while we could pull a report out of the OA buffer we can't 125 * trigger a report from the cpu on demand. 126 * 127 * Related to being report based; the OA counters are configured in HW as a 128 * set while perf generally expects counter configurations to be orthogonal. 129 * Although counters can be associated with a group leader as they are 130 * opened, there's no clear precedent for being able to provide group-wide 131 * configuration attributes (for example we want to let userspace choose the 132 * OA unit report format used to capture all counters in a set, or specify a 133 * GPU context to filter metrics on). We avoided using perf's grouping 134 * feature and forwarded OA reports to userspace via perf's 'raw' sample 135 * field. This suited our userspace well considering how coupled the counters 136 * are when dealing with normalizing. It would be inconvenient to split 137 * counters up into separate events, only to require userspace to recombine 138 * them. For Mesa it's also convenient to be forwarded raw, periodic reports 139 * for combining with the side-band raw reports it captures using 140 * MI_REPORT_PERF_COUNT commands. 141 * 142 * - As a side note on perf's grouping feature; there was also some concern 143 * that using PERF_FORMAT_GROUP as a way to pack together counter values 144 * would quite drastically inflate our sample sizes, which would likely 145 * lower the effective sampling resolutions we could use when the available 146 * memory bandwidth is limited. 147 * 148 * With the OA unit's report formats, counters are packed together as 32 149 * or 40bit values, with the largest report size being 256 bytes. 150 * 151 * PERF_FORMAT_GROUP values are 64bit, but there doesn't appear to be a 152 * documented ordering to the values, implying PERF_FORMAT_ID must also be 153 * used to add a 64bit ID before each value; giving 16 bytes per counter. 154 * 155 * Related to counter orthogonality; we can't time share the OA unit, while 156 * event scheduling is a central design idea within perf for allowing 157 * userspace to open + enable more events than can be configured in HW at any 158 * one time. The OA unit is not designed to allow re-configuration while in 159 * use. We can't reconfigure the OA unit without losing internal OA unit 160 * state which we can't access explicitly to save and restore. Reconfiguring 161 * the OA unit is also relatively slow, involving ~100 register writes. From 162 * userspace Mesa also depends on a stable OA configuration when emitting 163 * MI_REPORT_PERF_COUNT commands and importantly the OA unit can't be 164 * disabled while there are outstanding MI_RPC commands lest we hang the 165 * command streamer. 166 * 167 * The contents of sample records aren't extensible by device drivers (i.e. 168 * the sample_type bits). As an example; Sourab Gupta had been looking to 169 * attach GPU timestamps to our OA samples. We were shoehorning OA reports 170 * into sample records by using the 'raw' field, but it's tricky to pack more 171 * than one thing into this field because events/core.c currently only lets a 172 * pmu give a single raw data pointer plus len which will be copied into the 173 * ring buffer. To include more than the OA report we'd have to copy the 174 * report into an intermediate larger buffer. I'd been considering allowing a 175 * vector of data+len values to be specified for copying the raw data, but 176 * it felt like a kludge to being using the raw field for this purpose. 177 * 178 * - It felt like our perf based PMU was making some technical compromises 179 * just for the sake of using perf: 180 * 181 * perf_event_open() requires events to either relate to a pid or a specific 182 * cpu core, while our device pmu related to neither. Events opened with a 183 * pid will be automatically enabled/disabled according to the scheduling of 184 * that process - so not appropriate for us. When an event is related to a 185 * cpu id, perf ensures pmu methods will be invoked via an inter process 186 * interrupt on that core. To avoid invasive changes our userspace opened OA 187 * perf events for a specific cpu. This was workable but it meant the 188 * majority of the OA driver ran in atomic context, including all OA report 189 * forwarding, which wasn't really necessary in our case and seems to make 190 * our locking requirements somewhat complex as we handled the interaction 191 * with the rest of the i915 driver. 192 */ 193 194 #include <linux/anon_inodes.h> 195 #include <linux/sizes.h> 196 #include <linux/uuid.h> 197 198 #include "gem/i915_gem_context.h" 199 #include "gem/i915_gem_internal.h" 200 #include "gt/intel_engine_pm.h" 201 #include "gt/intel_engine_regs.h" 202 #include "gt/intel_engine_user.h" 203 #include "gt/intel_execlists_submission.h" 204 #include "gt/intel_gpu_commands.h" 205 #include "gt/intel_gt.h" 206 #include "gt/intel_gt_clock_utils.h" 207 #include "gt/intel_gt_mcr.h" 208 #include "gt/intel_gt_regs.h" 209 #include "gt/intel_lrc.h" 210 #include "gt/intel_lrc_reg.h" 211 #include "gt/intel_ring.h" 212 #include "gt/uc/intel_guc_slpc.h" 213 214 #include "i915_drv.h" 215 #include "i915_file_private.h" 216 #include "i915_perf.h" 217 #include "i915_perf_oa_regs.h" 218 #include "i915_reg.h" 219 220 /* HW requires this to be a power of two, between 128k and 16M, though driver 221 * is currently generally designed assuming the largest 16M size is used such 222 * that the overflow cases are unlikely in normal operation. 223 */ 224 #define OA_BUFFER_SIZE SZ_16M 225 226 #define OA_TAKEN(tail, head) ((tail - head) & (OA_BUFFER_SIZE - 1)) 227 228 /** 229 * DOC: OA Tail Pointer Race 230 * 231 * There's a HW race condition between OA unit tail pointer register updates and 232 * writes to memory whereby the tail pointer can sometimes get ahead of what's 233 * been written out to the OA buffer so far (in terms of what's visible to the 234 * CPU). 235 * 236 * Although this can be observed explicitly while copying reports to userspace 237 * by checking for a zeroed report-id field in tail reports, we want to account 238 * for this earlier, as part of the oa_buffer_check_unlocked to avoid lots of 239 * redundant read() attempts. 240 * 241 * We workaround this issue in oa_buffer_check_unlocked() by reading the reports 242 * in the OA buffer, starting from the tail reported by the HW until we find a 243 * report with its first 2 dwords not 0 meaning its previous report is 244 * completely in memory and ready to be read. Those dwords are also set to 0 245 * once read and the whole buffer is cleared upon OA buffer initialization. The 246 * first dword is the reason for this report while the second is the timestamp, 247 * making the chances of having those 2 fields at 0 fairly unlikely. A more 248 * detailed explanation is available in oa_buffer_check_unlocked(). 249 * 250 * Most of the implementation details for this workaround are in 251 * oa_buffer_check_unlocked() and _append_oa_reports() 252 * 253 * Note for posterity: previously the driver used to define an effective tail 254 * pointer that lagged the real pointer by a 'tail margin' measured in bytes 255 * derived from %OA_TAIL_MARGIN_NSEC and the configured sampling frequency. 256 * This was flawed considering that the OA unit may also automatically generate 257 * non-periodic reports (such as on context switch) or the OA unit may be 258 * enabled without any periodic sampling. 259 */ 260 #define OA_TAIL_MARGIN_NSEC 100000ULL 261 #define INVALID_TAIL_PTR 0xffffffff 262 263 /* The default frequency for checking whether the OA unit has written new 264 * reports to the circular OA buffer... 265 */ 266 #define DEFAULT_POLL_FREQUENCY_HZ 200 267 #define DEFAULT_POLL_PERIOD_NS (NSEC_PER_SEC / DEFAULT_POLL_FREQUENCY_HZ) 268 269 /* for sysctl proc_dointvec_minmax of dev.i915.perf_stream_paranoid */ 270 static u32 i915_perf_stream_paranoid = true; 271 272 /* The maximum exponent the hardware accepts is 63 (essentially it selects one 273 * of the 64bit timestamp bits to trigger reports from) but there's currently 274 * no known use case for sampling as infrequently as once per 47 thousand years. 275 * 276 * Since the timestamps included in OA reports are only 32bits it seems 277 * reasonable to limit the OA exponent where it's still possible to account for 278 * overflow in OA report timestamps. 279 */ 280 #define OA_EXPONENT_MAX 31 281 282 #define INVALID_CTX_ID 0xffffffff 283 284 /* On Gen8+ automatically triggered OA reports include a 'reason' field... */ 285 #define OAREPORT_REASON_MASK 0x3f 286 #define OAREPORT_REASON_MASK_EXTENDED 0x7f 287 #define OAREPORT_REASON_SHIFT 19 288 #define OAREPORT_REASON_TIMER (1<<0) 289 #define OAREPORT_REASON_CTX_SWITCH (1<<3) 290 #define OAREPORT_REASON_CLK_RATIO (1<<5) 291 292 #define HAS_MI_SET_PREDICATE(i915) (GRAPHICS_VER_FULL(i915) >= IP_VER(12, 50)) 293 294 /* For sysctl proc_dointvec_minmax of i915_oa_max_sample_rate 295 * 296 * The highest sampling frequency we can theoretically program the OA unit 297 * with is always half the timestamp frequency: E.g. 6.25Mhz for Haswell. 298 * 299 * Initialized just before we register the sysctl parameter. 300 */ 301 static int oa_sample_rate_hard_limit; 302 303 /* Theoretically we can program the OA unit to sample every 160ns but don't 304 * allow that by default unless root... 305 * 306 * The default threshold of 100000Hz is based on perf's similar 307 * kernel.perf_event_max_sample_rate sysctl parameter. 308 */ 309 static u32 i915_oa_max_sample_rate = 100000; 310 311 /* XXX: beware if future OA HW adds new report formats that the current 312 * code assumes all reports have a power-of-two size and ~(size - 1) can 313 * be used as a mask to align the OA tail pointer. 314 */ 315 static const struct i915_oa_format oa_formats[I915_OA_FORMAT_MAX] = { 316 [I915_OA_FORMAT_A13] = { 0, 64 }, 317 [I915_OA_FORMAT_A29] = { 1, 128 }, 318 [I915_OA_FORMAT_A13_B8_C8] = { 2, 128 }, 319 /* A29_B8_C8 Disallowed as 192 bytes doesn't factor into buffer size */ 320 [I915_OA_FORMAT_B4_C8] = { 4, 64 }, 321 [I915_OA_FORMAT_A45_B8_C8] = { 5, 256 }, 322 [I915_OA_FORMAT_B4_C8_A16] = { 6, 128 }, 323 [I915_OA_FORMAT_C4_B8] = { 7, 64 }, 324 [I915_OA_FORMAT_A12] = { 0, 64 }, 325 [I915_OA_FORMAT_A12_B8_C8] = { 2, 128 }, 326 [I915_OA_FORMAT_A32u40_A4u32_B8_C8] = { 5, 256 }, 327 [I915_OAR_FORMAT_A32u40_A4u32_B8_C8] = { 5, 256 }, 328 [I915_OA_FORMAT_A24u40_A14u32_B8_C8] = { 5, 256 }, 329 }; 330 331 #define SAMPLE_OA_REPORT (1<<0) 332 333 /** 334 * struct perf_open_properties - for validated properties given to open a stream 335 * @sample_flags: `DRM_I915_PERF_PROP_SAMPLE_*` properties are tracked as flags 336 * @single_context: Whether a single or all gpu contexts should be monitored 337 * @hold_preemption: Whether the preemption is disabled for the filtered 338 * context 339 * @ctx_handle: A gem ctx handle for use with @single_context 340 * @metrics_set: An ID for an OA unit metric set advertised via sysfs 341 * @oa_format: An OA unit HW report format 342 * @oa_periodic: Whether to enable periodic OA unit sampling 343 * @oa_period_exponent: The OA unit sampling period is derived from this 344 * @engine: The engine (typically rcs0) being monitored by the OA unit 345 * @has_sseu: Whether @sseu was specified by userspace 346 * @sseu: internal SSEU configuration computed either from the userspace 347 * specified configuration in the opening parameters or a default value 348 * (see get_default_sseu_config()) 349 * @poll_oa_period: The period in nanoseconds at which the CPU will check for OA 350 * data availability 351 * 352 * As read_properties_unlocked() enumerates and validates the properties given 353 * to open a stream of metrics the configuration is built up in the structure 354 * which starts out zero initialized. 355 */ 356 struct perf_open_properties { 357 u32 sample_flags; 358 359 u64 single_context:1; 360 u64 hold_preemption:1; 361 u64 ctx_handle; 362 363 /* OA sampling state */ 364 int metrics_set; 365 int oa_format; 366 bool oa_periodic; 367 int oa_period_exponent; 368 369 struct intel_engine_cs *engine; 370 371 bool has_sseu; 372 struct intel_sseu sseu; 373 374 u64 poll_oa_period; 375 }; 376 377 struct i915_oa_config_bo { 378 struct llist_node node; 379 380 struct i915_oa_config *oa_config; 381 struct i915_vma *vma; 382 }; 383 384 static struct ctl_table_header *sysctl_header; 385 386 static enum hrtimer_restart oa_poll_check_timer_cb(struct hrtimer *hrtimer); 387 388 void i915_oa_config_release(struct kref *ref) 389 { 390 struct i915_oa_config *oa_config = 391 container_of(ref, typeof(*oa_config), ref); 392 393 kfree(oa_config->flex_regs); 394 kfree(oa_config->b_counter_regs); 395 kfree(oa_config->mux_regs); 396 397 kfree_rcu(oa_config, rcu); 398 } 399 400 struct i915_oa_config * 401 i915_perf_get_oa_config(struct i915_perf *perf, int metrics_set) 402 { 403 struct i915_oa_config *oa_config; 404 405 rcu_read_lock(); 406 oa_config = idr_find(&perf->metrics_idr, metrics_set); 407 if (oa_config) 408 oa_config = i915_oa_config_get(oa_config); 409 rcu_read_unlock(); 410 411 return oa_config; 412 } 413 414 static void free_oa_config_bo(struct i915_oa_config_bo *oa_bo) 415 { 416 i915_oa_config_put(oa_bo->oa_config); 417 i915_vma_put(oa_bo->vma); 418 kfree(oa_bo); 419 } 420 421 static u32 gen12_oa_hw_tail_read(struct i915_perf_stream *stream) 422 { 423 struct intel_uncore *uncore = stream->uncore; 424 425 return intel_uncore_read(uncore, GEN12_OAG_OATAILPTR) & 426 GEN12_OAG_OATAILPTR_MASK; 427 } 428 429 static u32 gen8_oa_hw_tail_read(struct i915_perf_stream *stream) 430 { 431 struct intel_uncore *uncore = stream->uncore; 432 433 return intel_uncore_read(uncore, GEN8_OATAILPTR) & GEN8_OATAILPTR_MASK; 434 } 435 436 static u32 gen7_oa_hw_tail_read(struct i915_perf_stream *stream) 437 { 438 struct intel_uncore *uncore = stream->uncore; 439 u32 oastatus1 = intel_uncore_read(uncore, GEN7_OASTATUS1); 440 441 return oastatus1 & GEN7_OASTATUS1_TAIL_MASK; 442 } 443 444 /** 445 * oa_buffer_check_unlocked - check for data and update tail ptr state 446 * @stream: i915 stream instance 447 * 448 * This is either called via fops (for blocking reads in user ctx) or the poll 449 * check hrtimer (atomic ctx) to check the OA buffer tail pointer and check 450 * if there is data available for userspace to read. 451 * 452 * This function is central to providing a workaround for the OA unit tail 453 * pointer having a race with respect to what data is visible to the CPU. 454 * It is responsible for reading tail pointers from the hardware and giving 455 * the pointers time to 'age' before they are made available for reading. 456 * (See description of OA_TAIL_MARGIN_NSEC above for further details.) 457 * 458 * Besides returning true when there is data available to read() this function 459 * also updates the tail, aging_tail and aging_timestamp in the oa_buffer 460 * object. 461 * 462 * Note: It's safe to read OA config state here unlocked, assuming that this is 463 * only called while the stream is enabled, while the global OA configuration 464 * can't be modified. 465 * 466 * Returns: %true if the OA buffer contains data, else %false 467 */ 468 static bool oa_buffer_check_unlocked(struct i915_perf_stream *stream) 469 { 470 u32 gtt_offset = i915_ggtt_offset(stream->oa_buffer.vma); 471 int report_size = stream->oa_buffer.format->size; 472 unsigned long flags; 473 bool pollin; 474 u32 hw_tail; 475 u64 now; 476 477 /* We have to consider the (unlikely) possibility that read() errors 478 * could result in an OA buffer reset which might reset the head and 479 * tail state. 480 */ 481 spin_lock_irqsave(&stream->oa_buffer.ptr_lock, flags); 482 483 hw_tail = stream->perf->ops.oa_hw_tail_read(stream); 484 485 /* The tail pointer increases in 64 byte increments, 486 * not in report_size steps... 487 */ 488 hw_tail &= ~(report_size - 1); 489 490 now = ktime_get_mono_fast_ns(); 491 492 if (hw_tail == stream->oa_buffer.aging_tail && 493 (now - stream->oa_buffer.aging_timestamp) > OA_TAIL_MARGIN_NSEC) { 494 /* If the HW tail hasn't move since the last check and the HW 495 * tail has been aging for long enough, declare it the new 496 * tail. 497 */ 498 stream->oa_buffer.tail = stream->oa_buffer.aging_tail; 499 } else { 500 u32 head, tail, aged_tail; 501 502 /* NB: The head we observe here might effectively be a little 503 * out of date. If a read() is in progress, the head could be 504 * anywhere between this head and stream->oa_buffer.tail. 505 */ 506 head = stream->oa_buffer.head - gtt_offset; 507 aged_tail = stream->oa_buffer.tail - gtt_offset; 508 509 hw_tail -= gtt_offset; 510 tail = hw_tail; 511 512 /* Walk the stream backward until we find a report with dword 0 513 * & 1 not at 0. Since the circular buffer pointers progress by 514 * increments of 64 bytes and that reports can be up to 256 515 * bytes long, we can't tell whether a report has fully landed 516 * in memory before the first 2 dwords of the following report 517 * have effectively landed. 518 * 519 * This is assuming that the writes of the OA unit land in 520 * memory in the order they were written to. 521 * If not : (╯°□°)╯︵ ┻━┻ 522 */ 523 while (OA_TAKEN(tail, aged_tail) >= report_size) { 524 u32 *report32 = (void *)(stream->oa_buffer.vaddr + tail); 525 526 if (report32[0] != 0 || report32[1] != 0) 527 break; 528 529 tail = (tail - report_size) & (OA_BUFFER_SIZE - 1); 530 } 531 532 if (OA_TAKEN(hw_tail, tail) > report_size && 533 __ratelimit(&stream->perf->tail_pointer_race)) 534 drm_notice(&stream->uncore->i915->drm, 535 "unlanded report(s) head=0x%x tail=0x%x hw_tail=0x%x\n", 536 head, tail, hw_tail); 537 538 stream->oa_buffer.tail = gtt_offset + tail; 539 stream->oa_buffer.aging_tail = gtt_offset + hw_tail; 540 stream->oa_buffer.aging_timestamp = now; 541 } 542 543 pollin = OA_TAKEN(stream->oa_buffer.tail - gtt_offset, 544 stream->oa_buffer.head - gtt_offset) >= report_size; 545 546 spin_unlock_irqrestore(&stream->oa_buffer.ptr_lock, flags); 547 548 return pollin; 549 } 550 551 /** 552 * append_oa_status - Appends a status record to a userspace read() buffer. 553 * @stream: An i915-perf stream opened for OA metrics 554 * @buf: destination buffer given by userspace 555 * @count: the number of bytes userspace wants to read 556 * @offset: (inout): the current position for writing into @buf 557 * @type: The kind of status to report to userspace 558 * 559 * Writes a status record (such as `DRM_I915_PERF_RECORD_OA_REPORT_LOST`) 560 * into the userspace read() buffer. 561 * 562 * The @buf @offset will only be updated on success. 563 * 564 * Returns: 0 on success, negative error code on failure. 565 */ 566 static int append_oa_status(struct i915_perf_stream *stream, 567 char __user *buf, 568 size_t count, 569 size_t *offset, 570 enum drm_i915_perf_record_type type) 571 { 572 struct drm_i915_perf_record_header header = { type, 0, sizeof(header) }; 573 574 if ((count - *offset) < header.size) 575 return -ENOSPC; 576 577 if (copy_to_user(buf + *offset, &header, sizeof(header))) 578 return -EFAULT; 579 580 (*offset) += header.size; 581 582 return 0; 583 } 584 585 /** 586 * append_oa_sample - Copies single OA report into userspace read() buffer. 587 * @stream: An i915-perf stream opened for OA metrics 588 * @buf: destination buffer given by userspace 589 * @count: the number of bytes userspace wants to read 590 * @offset: (inout): the current position for writing into @buf 591 * @report: A single OA report to (optionally) include as part of the sample 592 * 593 * The contents of a sample are configured through `DRM_I915_PERF_PROP_SAMPLE_*` 594 * properties when opening a stream, tracked as `stream->sample_flags`. This 595 * function copies the requested components of a single sample to the given 596 * read() @buf. 597 * 598 * The @buf @offset will only be updated on success. 599 * 600 * Returns: 0 on success, negative error code on failure. 601 */ 602 static int append_oa_sample(struct i915_perf_stream *stream, 603 char __user *buf, 604 size_t count, 605 size_t *offset, 606 const u8 *report) 607 { 608 int report_size = stream->oa_buffer.format->size; 609 struct drm_i915_perf_record_header header; 610 611 header.type = DRM_I915_PERF_RECORD_SAMPLE; 612 header.pad = 0; 613 header.size = stream->sample_size; 614 615 if ((count - *offset) < header.size) 616 return -ENOSPC; 617 618 buf += *offset; 619 if (copy_to_user(buf, &header, sizeof(header))) 620 return -EFAULT; 621 buf += sizeof(header); 622 623 if (copy_to_user(buf, report, report_size)) 624 return -EFAULT; 625 626 (*offset) += header.size; 627 628 return 0; 629 } 630 631 /** 632 * gen8_append_oa_reports - Copies all buffered OA reports into 633 * userspace read() buffer. 634 * @stream: An i915-perf stream opened for OA metrics 635 * @buf: destination buffer given by userspace 636 * @count: the number of bytes userspace wants to read 637 * @offset: (inout): the current position for writing into @buf 638 * 639 * Notably any error condition resulting in a short read (-%ENOSPC or 640 * -%EFAULT) will be returned even though one or more records may 641 * have been successfully copied. In this case it's up to the caller 642 * to decide if the error should be squashed before returning to 643 * userspace. 644 * 645 * Note: reports are consumed from the head, and appended to the 646 * tail, so the tail chases the head?... If you think that's mad 647 * and back-to-front you're not alone, but this follows the 648 * Gen PRM naming convention. 649 * 650 * Returns: 0 on success, negative error code on failure. 651 */ 652 static int gen8_append_oa_reports(struct i915_perf_stream *stream, 653 char __user *buf, 654 size_t count, 655 size_t *offset) 656 { 657 struct intel_uncore *uncore = stream->uncore; 658 int report_size = stream->oa_buffer.format->size; 659 u8 *oa_buf_base = stream->oa_buffer.vaddr; 660 u32 gtt_offset = i915_ggtt_offset(stream->oa_buffer.vma); 661 u32 mask = (OA_BUFFER_SIZE - 1); 662 size_t start_offset = *offset; 663 unsigned long flags; 664 u32 head, tail; 665 int ret = 0; 666 667 if (drm_WARN_ON(&uncore->i915->drm, !stream->enabled)) 668 return -EIO; 669 670 spin_lock_irqsave(&stream->oa_buffer.ptr_lock, flags); 671 672 head = stream->oa_buffer.head; 673 tail = stream->oa_buffer.tail; 674 675 spin_unlock_irqrestore(&stream->oa_buffer.ptr_lock, flags); 676 677 /* 678 * NB: oa_buffer.head/tail include the gtt_offset which we don't want 679 * while indexing relative to oa_buf_base. 680 */ 681 head -= gtt_offset; 682 tail -= gtt_offset; 683 684 /* 685 * An out of bounds or misaligned head or tail pointer implies a driver 686 * bug since we validate + align the tail pointers we read from the 687 * hardware and we are in full control of the head pointer which should 688 * only be incremented by multiples of the report size (notably also 689 * all a power of two). 690 */ 691 if (drm_WARN_ONCE(&uncore->i915->drm, 692 head > OA_BUFFER_SIZE || head % report_size || 693 tail > OA_BUFFER_SIZE || tail % report_size, 694 "Inconsistent OA buffer pointers: head = %u, tail = %u\n", 695 head, tail)) 696 return -EIO; 697 698 699 for (/* none */; 700 OA_TAKEN(tail, head); 701 head = (head + report_size) & mask) { 702 u8 *report = oa_buf_base + head; 703 u32 *report32 = (void *)report; 704 u32 ctx_id; 705 u32 reason; 706 707 /* 708 * All the report sizes factor neatly into the buffer 709 * size so we never expect to see a report split 710 * between the beginning and end of the buffer. 711 * 712 * Given the initial alignment check a misalignment 713 * here would imply a driver bug that would result 714 * in an overrun. 715 */ 716 if (drm_WARN_ON(&uncore->i915->drm, 717 (OA_BUFFER_SIZE - head) < report_size)) { 718 drm_err(&uncore->i915->drm, 719 "Spurious OA head ptr: non-integral report offset\n"); 720 break; 721 } 722 723 /* 724 * The reason field includes flags identifying what 725 * triggered this specific report (mostly timer 726 * triggered or e.g. due to a context switch). 727 * 728 * This field is never expected to be zero so we can 729 * check that the report isn't invalid before copying 730 * it to userspace... 731 */ 732 reason = ((report32[0] >> OAREPORT_REASON_SHIFT) & 733 (GRAPHICS_VER(stream->perf->i915) == 12 ? 734 OAREPORT_REASON_MASK_EXTENDED : 735 OAREPORT_REASON_MASK)); 736 737 ctx_id = report32[2] & stream->specific_ctx_id_mask; 738 739 /* 740 * Squash whatever is in the CTX_ID field if it's marked as 741 * invalid to be sure we avoid false-positive, single-context 742 * filtering below... 743 * 744 * Note: that we don't clear the valid_ctx_bit so userspace can 745 * understand that the ID has been squashed by the kernel. 746 */ 747 if (!(report32[0] & stream->perf->gen8_valid_ctx_bit) && 748 GRAPHICS_VER(stream->perf->i915) <= 11) 749 ctx_id = report32[2] = INVALID_CTX_ID; 750 751 /* 752 * NB: For Gen 8 the OA unit no longer supports clock gating 753 * off for a specific context and the kernel can't securely 754 * stop the counters from updating as system-wide / global 755 * values. 756 * 757 * Automatic reports now include a context ID so reports can be 758 * filtered on the cpu but it's not worth trying to 759 * automatically subtract/hide counter progress for other 760 * contexts while filtering since we can't stop userspace 761 * issuing MI_REPORT_PERF_COUNT commands which would still 762 * provide a side-band view of the real values. 763 * 764 * To allow userspace (such as Mesa/GL_INTEL_performance_query) 765 * to normalize counters for a single filtered context then it 766 * needs be forwarded bookend context-switch reports so that it 767 * can track switches in between MI_REPORT_PERF_COUNT commands 768 * and can itself subtract/ignore the progress of counters 769 * associated with other contexts. Note that the hardware 770 * automatically triggers reports when switching to a new 771 * context which are tagged with the ID of the newly active 772 * context. To avoid the complexity (and likely fragility) of 773 * reading ahead while parsing reports to try and minimize 774 * forwarding redundant context switch reports (i.e. between 775 * other, unrelated contexts) we simply elect to forward them 776 * all. 777 * 778 * We don't rely solely on the reason field to identify context 779 * switches since it's not-uncommon for periodic samples to 780 * identify a switch before any 'context switch' report. 781 */ 782 if (!stream->ctx || 783 stream->specific_ctx_id == ctx_id || 784 stream->oa_buffer.last_ctx_id == stream->specific_ctx_id || 785 reason & OAREPORT_REASON_CTX_SWITCH) { 786 787 /* 788 * While filtering for a single context we avoid 789 * leaking the IDs of other contexts. 790 */ 791 if (stream->ctx && 792 stream->specific_ctx_id != ctx_id) { 793 report32[2] = INVALID_CTX_ID; 794 } 795 796 ret = append_oa_sample(stream, buf, count, offset, 797 report); 798 if (ret) 799 break; 800 801 stream->oa_buffer.last_ctx_id = ctx_id; 802 } 803 804 /* 805 * Clear out the first 2 dword as a mean to detect unlanded 806 * reports. 807 */ 808 report32[0] = 0; 809 report32[1] = 0; 810 } 811 812 if (start_offset != *offset) { 813 i915_reg_t oaheadptr; 814 815 oaheadptr = GRAPHICS_VER(stream->perf->i915) == 12 ? 816 GEN12_OAG_OAHEADPTR : GEN8_OAHEADPTR; 817 818 spin_lock_irqsave(&stream->oa_buffer.ptr_lock, flags); 819 820 /* 821 * We removed the gtt_offset for the copy loop above, indexing 822 * relative to oa_buf_base so put back here... 823 */ 824 head += gtt_offset; 825 intel_uncore_write(uncore, oaheadptr, 826 head & GEN12_OAG_OAHEADPTR_MASK); 827 stream->oa_buffer.head = head; 828 829 spin_unlock_irqrestore(&stream->oa_buffer.ptr_lock, flags); 830 } 831 832 return ret; 833 } 834 835 /** 836 * gen8_oa_read - copy status records then buffered OA reports 837 * @stream: An i915-perf stream opened for OA metrics 838 * @buf: destination buffer given by userspace 839 * @count: the number of bytes userspace wants to read 840 * @offset: (inout): the current position for writing into @buf 841 * 842 * Checks OA unit status registers and if necessary appends corresponding 843 * status records for userspace (such as for a buffer full condition) and then 844 * initiate appending any buffered OA reports. 845 * 846 * Updates @offset according to the number of bytes successfully copied into 847 * the userspace buffer. 848 * 849 * NB: some data may be successfully copied to the userspace buffer 850 * even if an error is returned, and this is reflected in the 851 * updated @offset. 852 * 853 * Returns: zero on success or a negative error code 854 */ 855 static int gen8_oa_read(struct i915_perf_stream *stream, 856 char __user *buf, 857 size_t count, 858 size_t *offset) 859 { 860 struct intel_uncore *uncore = stream->uncore; 861 u32 oastatus; 862 i915_reg_t oastatus_reg; 863 int ret; 864 865 if (drm_WARN_ON(&uncore->i915->drm, !stream->oa_buffer.vaddr)) 866 return -EIO; 867 868 oastatus_reg = GRAPHICS_VER(stream->perf->i915) == 12 ? 869 GEN12_OAG_OASTATUS : GEN8_OASTATUS; 870 871 oastatus = intel_uncore_read(uncore, oastatus_reg); 872 873 /* 874 * We treat OABUFFER_OVERFLOW as a significant error: 875 * 876 * Although theoretically we could handle this more gracefully 877 * sometimes, some Gens don't correctly suppress certain 878 * automatically triggered reports in this condition and so we 879 * have to assume that old reports are now being trampled 880 * over. 881 * 882 * Considering how we don't currently give userspace control 883 * over the OA buffer size and always configure a large 16MB 884 * buffer, then a buffer overflow does anyway likely indicate 885 * that something has gone quite badly wrong. 886 */ 887 if (oastatus & GEN8_OASTATUS_OABUFFER_OVERFLOW) { 888 ret = append_oa_status(stream, buf, count, offset, 889 DRM_I915_PERF_RECORD_OA_BUFFER_LOST); 890 if (ret) 891 return ret; 892 893 drm_dbg(&stream->perf->i915->drm, 894 "OA buffer overflow (exponent = %d): force restart\n", 895 stream->period_exponent); 896 897 stream->perf->ops.oa_disable(stream); 898 stream->perf->ops.oa_enable(stream); 899 900 /* 901 * Note: .oa_enable() is expected to re-init the oabuffer and 902 * reset GEN8_OASTATUS for us 903 */ 904 oastatus = intel_uncore_read(uncore, oastatus_reg); 905 } 906 907 if (oastatus & GEN8_OASTATUS_REPORT_LOST) { 908 ret = append_oa_status(stream, buf, count, offset, 909 DRM_I915_PERF_RECORD_OA_REPORT_LOST); 910 if (ret) 911 return ret; 912 913 intel_uncore_rmw(uncore, oastatus_reg, 914 GEN8_OASTATUS_COUNTER_OVERFLOW | 915 GEN8_OASTATUS_REPORT_LOST, 916 IS_GRAPHICS_VER(uncore->i915, 8, 11) ? 917 (GEN8_OASTATUS_HEAD_POINTER_WRAP | 918 GEN8_OASTATUS_TAIL_POINTER_WRAP) : 0); 919 } 920 921 return gen8_append_oa_reports(stream, buf, count, offset); 922 } 923 924 /** 925 * gen7_append_oa_reports - Copies all buffered OA reports into 926 * userspace read() buffer. 927 * @stream: An i915-perf stream opened for OA metrics 928 * @buf: destination buffer given by userspace 929 * @count: the number of bytes userspace wants to read 930 * @offset: (inout): the current position for writing into @buf 931 * 932 * Notably any error condition resulting in a short read (-%ENOSPC or 933 * -%EFAULT) will be returned even though one or more records may 934 * have been successfully copied. In this case it's up to the caller 935 * to decide if the error should be squashed before returning to 936 * userspace. 937 * 938 * Note: reports are consumed from the head, and appended to the 939 * tail, so the tail chases the head?... If you think that's mad 940 * and back-to-front you're not alone, but this follows the 941 * Gen PRM naming convention. 942 * 943 * Returns: 0 on success, negative error code on failure. 944 */ 945 static int gen7_append_oa_reports(struct i915_perf_stream *stream, 946 char __user *buf, 947 size_t count, 948 size_t *offset) 949 { 950 struct intel_uncore *uncore = stream->uncore; 951 int report_size = stream->oa_buffer.format->size; 952 u8 *oa_buf_base = stream->oa_buffer.vaddr; 953 u32 gtt_offset = i915_ggtt_offset(stream->oa_buffer.vma); 954 u32 mask = (OA_BUFFER_SIZE - 1); 955 size_t start_offset = *offset; 956 unsigned long flags; 957 u32 head, tail; 958 int ret = 0; 959 960 if (drm_WARN_ON(&uncore->i915->drm, !stream->enabled)) 961 return -EIO; 962 963 spin_lock_irqsave(&stream->oa_buffer.ptr_lock, flags); 964 965 head = stream->oa_buffer.head; 966 tail = stream->oa_buffer.tail; 967 968 spin_unlock_irqrestore(&stream->oa_buffer.ptr_lock, flags); 969 970 /* NB: oa_buffer.head/tail include the gtt_offset which we don't want 971 * while indexing relative to oa_buf_base. 972 */ 973 head -= gtt_offset; 974 tail -= gtt_offset; 975 976 /* An out of bounds or misaligned head or tail pointer implies a driver 977 * bug since we validate + align the tail pointers we read from the 978 * hardware and we are in full control of the head pointer which should 979 * only be incremented by multiples of the report size (notably also 980 * all a power of two). 981 */ 982 if (drm_WARN_ONCE(&uncore->i915->drm, 983 head > OA_BUFFER_SIZE || head % report_size || 984 tail > OA_BUFFER_SIZE || tail % report_size, 985 "Inconsistent OA buffer pointers: head = %u, tail = %u\n", 986 head, tail)) 987 return -EIO; 988 989 990 for (/* none */; 991 OA_TAKEN(tail, head); 992 head = (head + report_size) & mask) { 993 u8 *report = oa_buf_base + head; 994 u32 *report32 = (void *)report; 995 996 /* All the report sizes factor neatly into the buffer 997 * size so we never expect to see a report split 998 * between the beginning and end of the buffer. 999 * 1000 * Given the initial alignment check a misalignment 1001 * here would imply a driver bug that would result 1002 * in an overrun. 1003 */ 1004 if (drm_WARN_ON(&uncore->i915->drm, 1005 (OA_BUFFER_SIZE - head) < report_size)) { 1006 drm_err(&uncore->i915->drm, 1007 "Spurious OA head ptr: non-integral report offset\n"); 1008 break; 1009 } 1010 1011 /* The report-ID field for periodic samples includes 1012 * some undocumented flags related to what triggered 1013 * the report and is never expected to be zero so we 1014 * can check that the report isn't invalid before 1015 * copying it to userspace... 1016 */ 1017 if (report32[0] == 0) { 1018 if (__ratelimit(&stream->perf->spurious_report_rs)) 1019 drm_notice(&uncore->i915->drm, 1020 "Skipping spurious, invalid OA report\n"); 1021 continue; 1022 } 1023 1024 ret = append_oa_sample(stream, buf, count, offset, report); 1025 if (ret) 1026 break; 1027 1028 /* Clear out the first 2 dwords as a mean to detect unlanded 1029 * reports. 1030 */ 1031 report32[0] = 0; 1032 report32[1] = 0; 1033 } 1034 1035 if (start_offset != *offset) { 1036 spin_lock_irqsave(&stream->oa_buffer.ptr_lock, flags); 1037 1038 /* We removed the gtt_offset for the copy loop above, indexing 1039 * relative to oa_buf_base so put back here... 1040 */ 1041 head += gtt_offset; 1042 1043 intel_uncore_write(uncore, GEN7_OASTATUS2, 1044 (head & GEN7_OASTATUS2_HEAD_MASK) | 1045 GEN7_OASTATUS2_MEM_SELECT_GGTT); 1046 stream->oa_buffer.head = head; 1047 1048 spin_unlock_irqrestore(&stream->oa_buffer.ptr_lock, flags); 1049 } 1050 1051 return ret; 1052 } 1053 1054 /** 1055 * gen7_oa_read - copy status records then buffered OA reports 1056 * @stream: An i915-perf stream opened for OA metrics 1057 * @buf: destination buffer given by userspace 1058 * @count: the number of bytes userspace wants to read 1059 * @offset: (inout): the current position for writing into @buf 1060 * 1061 * Checks Gen 7 specific OA unit status registers and if necessary appends 1062 * corresponding status records for userspace (such as for a buffer full 1063 * condition) and then initiate appending any buffered OA reports. 1064 * 1065 * Updates @offset according to the number of bytes successfully copied into 1066 * the userspace buffer. 1067 * 1068 * Returns: zero on success or a negative error code 1069 */ 1070 static int gen7_oa_read(struct i915_perf_stream *stream, 1071 char __user *buf, 1072 size_t count, 1073 size_t *offset) 1074 { 1075 struct intel_uncore *uncore = stream->uncore; 1076 u32 oastatus1; 1077 int ret; 1078 1079 if (drm_WARN_ON(&uncore->i915->drm, !stream->oa_buffer.vaddr)) 1080 return -EIO; 1081 1082 oastatus1 = intel_uncore_read(uncore, GEN7_OASTATUS1); 1083 1084 /* XXX: On Haswell we don't have a safe way to clear oastatus1 1085 * bits while the OA unit is enabled (while the tail pointer 1086 * may be updated asynchronously) so we ignore status bits 1087 * that have already been reported to userspace. 1088 */ 1089 oastatus1 &= ~stream->perf->gen7_latched_oastatus1; 1090 1091 /* We treat OABUFFER_OVERFLOW as a significant error: 1092 * 1093 * - The status can be interpreted to mean that the buffer is 1094 * currently full (with a higher precedence than OA_TAKEN() 1095 * which will start to report a near-empty buffer after an 1096 * overflow) but it's awkward that we can't clear the status 1097 * on Haswell, so without a reset we won't be able to catch 1098 * the state again. 1099 * 1100 * - Since it also implies the HW has started overwriting old 1101 * reports it may also affect our sanity checks for invalid 1102 * reports when copying to userspace that assume new reports 1103 * are being written to cleared memory. 1104 * 1105 * - In the future we may want to introduce a flight recorder 1106 * mode where the driver will automatically maintain a safe 1107 * guard band between head/tail, avoiding this overflow 1108 * condition, but we avoid the added driver complexity for 1109 * now. 1110 */ 1111 if (unlikely(oastatus1 & GEN7_OASTATUS1_OABUFFER_OVERFLOW)) { 1112 ret = append_oa_status(stream, buf, count, offset, 1113 DRM_I915_PERF_RECORD_OA_BUFFER_LOST); 1114 if (ret) 1115 return ret; 1116 1117 drm_dbg(&stream->perf->i915->drm, 1118 "OA buffer overflow (exponent = %d): force restart\n", 1119 stream->period_exponent); 1120 1121 stream->perf->ops.oa_disable(stream); 1122 stream->perf->ops.oa_enable(stream); 1123 1124 oastatus1 = intel_uncore_read(uncore, GEN7_OASTATUS1); 1125 } 1126 1127 if (unlikely(oastatus1 & GEN7_OASTATUS1_REPORT_LOST)) { 1128 ret = append_oa_status(stream, buf, count, offset, 1129 DRM_I915_PERF_RECORD_OA_REPORT_LOST); 1130 if (ret) 1131 return ret; 1132 stream->perf->gen7_latched_oastatus1 |= 1133 GEN7_OASTATUS1_REPORT_LOST; 1134 } 1135 1136 return gen7_append_oa_reports(stream, buf, count, offset); 1137 } 1138 1139 /** 1140 * i915_oa_wait_unlocked - handles blocking IO until OA data available 1141 * @stream: An i915-perf stream opened for OA metrics 1142 * 1143 * Called when userspace tries to read() from a blocking stream FD opened 1144 * for OA metrics. It waits until the hrtimer callback finds a non-empty 1145 * OA buffer and wakes us. 1146 * 1147 * Note: it's acceptable to have this return with some false positives 1148 * since any subsequent read handling will return -EAGAIN if there isn't 1149 * really data ready for userspace yet. 1150 * 1151 * Returns: zero on success or a negative error code 1152 */ 1153 static int i915_oa_wait_unlocked(struct i915_perf_stream *stream) 1154 { 1155 /* We would wait indefinitely if periodic sampling is not enabled */ 1156 if (!stream->periodic) 1157 return -EIO; 1158 1159 return wait_event_interruptible(stream->poll_wq, 1160 oa_buffer_check_unlocked(stream)); 1161 } 1162 1163 /** 1164 * i915_oa_poll_wait - call poll_wait() for an OA stream poll() 1165 * @stream: An i915-perf stream opened for OA metrics 1166 * @file: An i915 perf stream file 1167 * @wait: poll() state table 1168 * 1169 * For handling userspace polling on an i915 perf stream opened for OA metrics, 1170 * this starts a poll_wait with the wait queue that our hrtimer callback wakes 1171 * when it sees data ready to read in the circular OA buffer. 1172 */ 1173 static void i915_oa_poll_wait(struct i915_perf_stream *stream, 1174 struct file *file, 1175 poll_table *wait) 1176 { 1177 poll_wait(file, &stream->poll_wq, wait); 1178 } 1179 1180 /** 1181 * i915_oa_read - just calls through to &i915_oa_ops->read 1182 * @stream: An i915-perf stream opened for OA metrics 1183 * @buf: destination buffer given by userspace 1184 * @count: the number of bytes userspace wants to read 1185 * @offset: (inout): the current position for writing into @buf 1186 * 1187 * Updates @offset according to the number of bytes successfully copied into 1188 * the userspace buffer. 1189 * 1190 * Returns: zero on success or a negative error code 1191 */ 1192 static int i915_oa_read(struct i915_perf_stream *stream, 1193 char __user *buf, 1194 size_t count, 1195 size_t *offset) 1196 { 1197 return stream->perf->ops.read(stream, buf, count, offset); 1198 } 1199 1200 static struct intel_context *oa_pin_context(struct i915_perf_stream *stream) 1201 { 1202 struct i915_gem_engines_iter it; 1203 struct i915_gem_context *ctx = stream->ctx; 1204 struct intel_context *ce; 1205 struct i915_gem_ww_ctx ww; 1206 int err = -ENODEV; 1207 1208 for_each_gem_engine(ce, i915_gem_context_lock_engines(ctx), it) { 1209 if (ce->engine != stream->engine) /* first match! */ 1210 continue; 1211 1212 err = 0; 1213 break; 1214 } 1215 i915_gem_context_unlock_engines(ctx); 1216 1217 if (err) 1218 return ERR_PTR(err); 1219 1220 i915_gem_ww_ctx_init(&ww, true); 1221 retry: 1222 /* 1223 * As the ID is the gtt offset of the context's vma we 1224 * pin the vma to ensure the ID remains fixed. 1225 */ 1226 err = intel_context_pin_ww(ce, &ww); 1227 if (err == -EDEADLK) { 1228 err = i915_gem_ww_ctx_backoff(&ww); 1229 if (!err) 1230 goto retry; 1231 } 1232 i915_gem_ww_ctx_fini(&ww); 1233 1234 if (err) 1235 return ERR_PTR(err); 1236 1237 stream->pinned_ctx = ce; 1238 return stream->pinned_ctx; 1239 } 1240 1241 static int 1242 __store_reg_to_mem(struct i915_request *rq, i915_reg_t reg, u32 ggtt_offset) 1243 { 1244 u32 *cs, cmd; 1245 1246 cmd = MI_STORE_REGISTER_MEM | MI_SRM_LRM_GLOBAL_GTT; 1247 if (GRAPHICS_VER(rq->engine->i915) >= 8) 1248 cmd++; 1249 1250 cs = intel_ring_begin(rq, 4); 1251 if (IS_ERR(cs)) 1252 return PTR_ERR(cs); 1253 1254 *cs++ = cmd; 1255 *cs++ = i915_mmio_reg_offset(reg); 1256 *cs++ = ggtt_offset; 1257 *cs++ = 0; 1258 1259 intel_ring_advance(rq, cs); 1260 1261 return 0; 1262 } 1263 1264 static int 1265 __read_reg(struct intel_context *ce, i915_reg_t reg, u32 ggtt_offset) 1266 { 1267 struct i915_request *rq; 1268 int err; 1269 1270 rq = i915_request_create(ce); 1271 if (IS_ERR(rq)) 1272 return PTR_ERR(rq); 1273 1274 i915_request_get(rq); 1275 1276 err = __store_reg_to_mem(rq, reg, ggtt_offset); 1277 1278 i915_request_add(rq); 1279 if (!err && i915_request_wait(rq, 0, HZ / 2) < 0) 1280 err = -ETIME; 1281 1282 i915_request_put(rq); 1283 1284 return err; 1285 } 1286 1287 static int 1288 gen12_guc_sw_ctx_id(struct intel_context *ce, u32 *ctx_id) 1289 { 1290 struct i915_vma *scratch; 1291 u32 *val; 1292 int err; 1293 1294 scratch = __vm_create_scratch_for_read_pinned(&ce->engine->gt->ggtt->vm, 4); 1295 if (IS_ERR(scratch)) 1296 return PTR_ERR(scratch); 1297 1298 err = i915_vma_sync(scratch); 1299 if (err) 1300 goto err_scratch; 1301 1302 err = __read_reg(ce, RING_EXECLIST_STATUS_HI(ce->engine->mmio_base), 1303 i915_ggtt_offset(scratch)); 1304 if (err) 1305 goto err_scratch; 1306 1307 val = i915_gem_object_pin_map_unlocked(scratch->obj, I915_MAP_WB); 1308 if (IS_ERR(val)) { 1309 err = PTR_ERR(val); 1310 goto err_scratch; 1311 } 1312 1313 *ctx_id = *val; 1314 i915_gem_object_unpin_map(scratch->obj); 1315 1316 err_scratch: 1317 i915_vma_unpin_and_release(&scratch, 0); 1318 return err; 1319 } 1320 1321 /* 1322 * For execlist mode of submission, pick an unused context id 1323 * 0 - (NUM_CONTEXT_TAG -1) are used by other contexts 1324 * XXX_MAX_CONTEXT_HW_ID is used by idle context 1325 * 1326 * For GuC mode of submission read context id from the upper dword of the 1327 * EXECLIST_STATUS register. Note that we read this value only once and expect 1328 * that the value stays fixed for the entire OA use case. There are cases where 1329 * GuC KMD implementation may deregister a context to reuse it's context id, but 1330 * we prevent that from happening to the OA context by pinning it. 1331 */ 1332 static int gen12_get_render_context_id(struct i915_perf_stream *stream) 1333 { 1334 u32 ctx_id, mask; 1335 int ret; 1336 1337 if (intel_engine_uses_guc(stream->engine)) { 1338 ret = gen12_guc_sw_ctx_id(stream->pinned_ctx, &ctx_id); 1339 if (ret) 1340 return ret; 1341 1342 mask = ((1U << GEN12_GUC_SW_CTX_ID_WIDTH) - 1) << 1343 (GEN12_GUC_SW_CTX_ID_SHIFT - 32); 1344 } else if (GRAPHICS_VER_FULL(stream->engine->i915) >= IP_VER(12, 50)) { 1345 ctx_id = (XEHP_MAX_CONTEXT_HW_ID - 1) << 1346 (XEHP_SW_CTX_ID_SHIFT - 32); 1347 1348 mask = ((1U << XEHP_SW_CTX_ID_WIDTH) - 1) << 1349 (XEHP_SW_CTX_ID_SHIFT - 32); 1350 } else { 1351 ctx_id = (GEN12_MAX_CONTEXT_HW_ID - 1) << 1352 (GEN11_SW_CTX_ID_SHIFT - 32); 1353 1354 mask = ((1U << GEN11_SW_CTX_ID_WIDTH) - 1) << 1355 (GEN11_SW_CTX_ID_SHIFT - 32); 1356 } 1357 stream->specific_ctx_id = ctx_id & mask; 1358 stream->specific_ctx_id_mask = mask; 1359 1360 return 0; 1361 } 1362 1363 static bool oa_find_reg_in_lri(u32 *state, u32 reg, u32 *offset, u32 end) 1364 { 1365 u32 idx = *offset; 1366 u32 len = min(MI_LRI_LEN(state[idx]) + idx, end); 1367 bool found = false; 1368 1369 idx++; 1370 for (; idx < len; idx += 2) { 1371 if (state[idx] == reg) { 1372 found = true; 1373 break; 1374 } 1375 } 1376 1377 *offset = idx; 1378 return found; 1379 } 1380 1381 static u32 oa_context_image_offset(struct intel_context *ce, u32 reg) 1382 { 1383 u32 offset, len = (ce->engine->context_size - PAGE_SIZE) / 4; 1384 u32 *state = ce->lrc_reg_state; 1385 1386 if (drm_WARN_ON(&ce->engine->i915->drm, !state)) 1387 return U32_MAX; 1388 1389 for (offset = 0; offset < len; ) { 1390 if (IS_MI_LRI_CMD(state[offset])) { 1391 /* 1392 * We expect reg-value pairs in MI_LRI command, so 1393 * MI_LRI_LEN() should be even, if not, issue a warning. 1394 */ 1395 drm_WARN_ON(&ce->engine->i915->drm, 1396 MI_LRI_LEN(state[offset]) & 0x1); 1397 1398 if (oa_find_reg_in_lri(state, reg, &offset, len)) 1399 break; 1400 } else { 1401 offset++; 1402 } 1403 } 1404 1405 return offset < len ? offset : U32_MAX; 1406 } 1407 1408 static int set_oa_ctx_ctrl_offset(struct intel_context *ce) 1409 { 1410 i915_reg_t reg = GEN12_OACTXCONTROL(ce->engine->mmio_base); 1411 struct i915_perf *perf = &ce->engine->i915->perf; 1412 u32 offset = perf->ctx_oactxctrl_offset; 1413 1414 /* Do this only once. Failure is stored as offset of U32_MAX */ 1415 if (offset) 1416 goto exit; 1417 1418 offset = oa_context_image_offset(ce, i915_mmio_reg_offset(reg)); 1419 perf->ctx_oactxctrl_offset = offset; 1420 1421 drm_dbg(&ce->engine->i915->drm, 1422 "%s oa ctx control at 0x%08x dword offset\n", 1423 ce->engine->name, offset); 1424 1425 exit: 1426 return offset && offset != U32_MAX ? 0 : -ENODEV; 1427 } 1428 1429 static bool engine_supports_mi_query(struct intel_engine_cs *engine) 1430 { 1431 return engine->class == RENDER_CLASS; 1432 } 1433 1434 /** 1435 * oa_get_render_ctx_id - determine and hold ctx hw id 1436 * @stream: An i915-perf stream opened for OA metrics 1437 * 1438 * Determine the render context hw id, and ensure it remains fixed for the 1439 * lifetime of the stream. This ensures that we don't have to worry about 1440 * updating the context ID in OACONTROL on the fly. 1441 * 1442 * Returns: zero on success or a negative error code 1443 */ 1444 static int oa_get_render_ctx_id(struct i915_perf_stream *stream) 1445 { 1446 struct intel_context *ce; 1447 int ret = 0; 1448 1449 ce = oa_pin_context(stream); 1450 if (IS_ERR(ce)) 1451 return PTR_ERR(ce); 1452 1453 if (engine_supports_mi_query(stream->engine) && 1454 HAS_LOGICAL_RING_CONTEXTS(stream->perf->i915)) { 1455 /* 1456 * We are enabling perf query here. If we don't find the context 1457 * offset here, just return an error. 1458 */ 1459 ret = set_oa_ctx_ctrl_offset(ce); 1460 if (ret) { 1461 intel_context_unpin(ce); 1462 drm_err(&stream->perf->i915->drm, 1463 "Enabling perf query failed for %s\n", 1464 stream->engine->name); 1465 return ret; 1466 } 1467 } 1468 1469 switch (GRAPHICS_VER(ce->engine->i915)) { 1470 case 7: { 1471 /* 1472 * On Haswell we don't do any post processing of the reports 1473 * and don't need to use the mask. 1474 */ 1475 stream->specific_ctx_id = i915_ggtt_offset(ce->state); 1476 stream->specific_ctx_id_mask = 0; 1477 break; 1478 } 1479 1480 case 8: 1481 case 9: 1482 if (intel_engine_uses_guc(ce->engine)) { 1483 /* 1484 * When using GuC, the context descriptor we write in 1485 * i915 is read by GuC and rewritten before it's 1486 * actually written into the hardware. The LRCA is 1487 * what is put into the context id field of the 1488 * context descriptor by GuC. Because it's aligned to 1489 * a page, the lower 12bits are always at 0 and 1490 * dropped by GuC. They won't be part of the context 1491 * ID in the OA reports, so squash those lower bits. 1492 */ 1493 stream->specific_ctx_id = ce->lrc.lrca >> 12; 1494 1495 /* 1496 * GuC uses the top bit to signal proxy submission, so 1497 * ignore that bit. 1498 */ 1499 stream->specific_ctx_id_mask = 1500 (1U << (GEN8_CTX_ID_WIDTH - 1)) - 1; 1501 } else { 1502 stream->specific_ctx_id_mask = 1503 (1U << GEN8_CTX_ID_WIDTH) - 1; 1504 stream->specific_ctx_id = stream->specific_ctx_id_mask; 1505 } 1506 break; 1507 1508 case 11: 1509 case 12: 1510 ret = gen12_get_render_context_id(stream); 1511 break; 1512 1513 default: 1514 MISSING_CASE(GRAPHICS_VER(ce->engine->i915)); 1515 } 1516 1517 ce->tag = stream->specific_ctx_id; 1518 1519 drm_dbg(&stream->perf->i915->drm, 1520 "filtering on ctx_id=0x%x ctx_id_mask=0x%x\n", 1521 stream->specific_ctx_id, 1522 stream->specific_ctx_id_mask); 1523 1524 return ret; 1525 } 1526 1527 /** 1528 * oa_put_render_ctx_id - counterpart to oa_get_render_ctx_id releases hold 1529 * @stream: An i915-perf stream opened for OA metrics 1530 * 1531 * In case anything needed doing to ensure the context HW ID would remain valid 1532 * for the lifetime of the stream, then that can be undone here. 1533 */ 1534 static void oa_put_render_ctx_id(struct i915_perf_stream *stream) 1535 { 1536 struct intel_context *ce; 1537 1538 ce = fetch_and_zero(&stream->pinned_ctx); 1539 if (ce) { 1540 ce->tag = 0; /* recomputed on next submission after parking */ 1541 intel_context_unpin(ce); 1542 } 1543 1544 stream->specific_ctx_id = INVALID_CTX_ID; 1545 stream->specific_ctx_id_mask = 0; 1546 } 1547 1548 static void 1549 free_oa_buffer(struct i915_perf_stream *stream) 1550 { 1551 i915_vma_unpin_and_release(&stream->oa_buffer.vma, 1552 I915_VMA_RELEASE_MAP); 1553 1554 stream->oa_buffer.vaddr = NULL; 1555 } 1556 1557 static void 1558 free_oa_configs(struct i915_perf_stream *stream) 1559 { 1560 struct i915_oa_config_bo *oa_bo, *tmp; 1561 1562 i915_oa_config_put(stream->oa_config); 1563 llist_for_each_entry_safe(oa_bo, tmp, stream->oa_config_bos.first, node) 1564 free_oa_config_bo(oa_bo); 1565 } 1566 1567 static void 1568 free_noa_wait(struct i915_perf_stream *stream) 1569 { 1570 i915_vma_unpin_and_release(&stream->noa_wait, 0); 1571 } 1572 1573 static void i915_oa_stream_destroy(struct i915_perf_stream *stream) 1574 { 1575 struct i915_perf *perf = stream->perf; 1576 struct intel_gt *gt = stream->engine->gt; 1577 1578 if (WARN_ON(stream != gt->perf.exclusive_stream)) 1579 return; 1580 1581 /* 1582 * Unset exclusive_stream first, it will be checked while disabling 1583 * the metric set on gen8+. 1584 * 1585 * See i915_oa_init_reg_state() and lrc_configure_all_contexts() 1586 */ 1587 WRITE_ONCE(gt->perf.exclusive_stream, NULL); 1588 perf->ops.disable_metric_set(stream); 1589 1590 free_oa_buffer(stream); 1591 1592 /* 1593 * Wa_16011777198:dg2: Unset the override of GUCRC mode to enable rc6. 1594 */ 1595 if (stream->override_gucrc) 1596 drm_WARN_ON(>->i915->drm, 1597 intel_guc_slpc_unset_gucrc_mode(>->uc.guc.slpc)); 1598 1599 intel_uncore_forcewake_put(stream->uncore, FORCEWAKE_ALL); 1600 intel_engine_pm_put(stream->engine); 1601 1602 if (stream->ctx) 1603 oa_put_render_ctx_id(stream); 1604 1605 free_oa_configs(stream); 1606 free_noa_wait(stream); 1607 1608 if (perf->spurious_report_rs.missed) { 1609 drm_notice(>->i915->drm, 1610 "%d spurious OA report notices suppressed due to ratelimiting\n", 1611 perf->spurious_report_rs.missed); 1612 } 1613 } 1614 1615 static void gen7_init_oa_buffer(struct i915_perf_stream *stream) 1616 { 1617 struct intel_uncore *uncore = stream->uncore; 1618 u32 gtt_offset = i915_ggtt_offset(stream->oa_buffer.vma); 1619 unsigned long flags; 1620 1621 spin_lock_irqsave(&stream->oa_buffer.ptr_lock, flags); 1622 1623 /* Pre-DevBDW: OABUFFER must be set with counters off, 1624 * before OASTATUS1, but after OASTATUS2 1625 */ 1626 intel_uncore_write(uncore, GEN7_OASTATUS2, /* head */ 1627 gtt_offset | GEN7_OASTATUS2_MEM_SELECT_GGTT); 1628 stream->oa_buffer.head = gtt_offset; 1629 1630 intel_uncore_write(uncore, GEN7_OABUFFER, gtt_offset); 1631 1632 intel_uncore_write(uncore, GEN7_OASTATUS1, /* tail */ 1633 gtt_offset | OABUFFER_SIZE_16M); 1634 1635 /* Mark that we need updated tail pointers to read from... */ 1636 stream->oa_buffer.aging_tail = INVALID_TAIL_PTR; 1637 stream->oa_buffer.tail = gtt_offset; 1638 1639 spin_unlock_irqrestore(&stream->oa_buffer.ptr_lock, flags); 1640 1641 /* On Haswell we have to track which OASTATUS1 flags we've 1642 * already seen since they can't be cleared while periodic 1643 * sampling is enabled. 1644 */ 1645 stream->perf->gen7_latched_oastatus1 = 0; 1646 1647 /* NB: although the OA buffer will initially be allocated 1648 * zeroed via shmfs (and so this memset is redundant when 1649 * first allocating), we may re-init the OA buffer, either 1650 * when re-enabling a stream or in error/reset paths. 1651 * 1652 * The reason we clear the buffer for each re-init is for the 1653 * sanity check in gen7_append_oa_reports() that looks at the 1654 * report-id field to make sure it's non-zero which relies on 1655 * the assumption that new reports are being written to zeroed 1656 * memory... 1657 */ 1658 memset(stream->oa_buffer.vaddr, 0, OA_BUFFER_SIZE); 1659 } 1660 1661 static void gen8_init_oa_buffer(struct i915_perf_stream *stream) 1662 { 1663 struct intel_uncore *uncore = stream->uncore; 1664 u32 gtt_offset = i915_ggtt_offset(stream->oa_buffer.vma); 1665 unsigned long flags; 1666 1667 spin_lock_irqsave(&stream->oa_buffer.ptr_lock, flags); 1668 1669 intel_uncore_write(uncore, GEN8_OASTATUS, 0); 1670 intel_uncore_write(uncore, GEN8_OAHEADPTR, gtt_offset); 1671 stream->oa_buffer.head = gtt_offset; 1672 1673 intel_uncore_write(uncore, GEN8_OABUFFER_UDW, 0); 1674 1675 /* 1676 * PRM says: 1677 * 1678 * "This MMIO must be set before the OATAILPTR 1679 * register and after the OAHEADPTR register. This is 1680 * to enable proper functionality of the overflow 1681 * bit." 1682 */ 1683 intel_uncore_write(uncore, GEN8_OABUFFER, gtt_offset | 1684 OABUFFER_SIZE_16M | GEN8_OABUFFER_MEM_SELECT_GGTT); 1685 intel_uncore_write(uncore, GEN8_OATAILPTR, gtt_offset & GEN8_OATAILPTR_MASK); 1686 1687 /* Mark that we need updated tail pointers to read from... */ 1688 stream->oa_buffer.aging_tail = INVALID_TAIL_PTR; 1689 stream->oa_buffer.tail = gtt_offset; 1690 1691 /* 1692 * Reset state used to recognise context switches, affecting which 1693 * reports we will forward to userspace while filtering for a single 1694 * context. 1695 */ 1696 stream->oa_buffer.last_ctx_id = INVALID_CTX_ID; 1697 1698 spin_unlock_irqrestore(&stream->oa_buffer.ptr_lock, flags); 1699 1700 /* 1701 * NB: although the OA buffer will initially be allocated 1702 * zeroed via shmfs (and so this memset is redundant when 1703 * first allocating), we may re-init the OA buffer, either 1704 * when re-enabling a stream or in error/reset paths. 1705 * 1706 * The reason we clear the buffer for each re-init is for the 1707 * sanity check in gen8_append_oa_reports() that looks at the 1708 * reason field to make sure it's non-zero which relies on 1709 * the assumption that new reports are being written to zeroed 1710 * memory... 1711 */ 1712 memset(stream->oa_buffer.vaddr, 0, OA_BUFFER_SIZE); 1713 } 1714 1715 static void gen12_init_oa_buffer(struct i915_perf_stream *stream) 1716 { 1717 struct intel_uncore *uncore = stream->uncore; 1718 u32 gtt_offset = i915_ggtt_offset(stream->oa_buffer.vma); 1719 unsigned long flags; 1720 1721 spin_lock_irqsave(&stream->oa_buffer.ptr_lock, flags); 1722 1723 intel_uncore_write(uncore, GEN12_OAG_OASTATUS, 0); 1724 intel_uncore_write(uncore, GEN12_OAG_OAHEADPTR, 1725 gtt_offset & GEN12_OAG_OAHEADPTR_MASK); 1726 stream->oa_buffer.head = gtt_offset; 1727 1728 /* 1729 * PRM says: 1730 * 1731 * "This MMIO must be set before the OATAILPTR 1732 * register and after the OAHEADPTR register. This is 1733 * to enable proper functionality of the overflow 1734 * bit." 1735 */ 1736 intel_uncore_write(uncore, GEN12_OAG_OABUFFER, gtt_offset | 1737 OABUFFER_SIZE_16M | GEN8_OABUFFER_MEM_SELECT_GGTT); 1738 intel_uncore_write(uncore, GEN12_OAG_OATAILPTR, 1739 gtt_offset & GEN12_OAG_OATAILPTR_MASK); 1740 1741 /* Mark that we need updated tail pointers to read from... */ 1742 stream->oa_buffer.aging_tail = INVALID_TAIL_PTR; 1743 stream->oa_buffer.tail = gtt_offset; 1744 1745 /* 1746 * Reset state used to recognise context switches, affecting which 1747 * reports we will forward to userspace while filtering for a single 1748 * context. 1749 */ 1750 stream->oa_buffer.last_ctx_id = INVALID_CTX_ID; 1751 1752 spin_unlock_irqrestore(&stream->oa_buffer.ptr_lock, flags); 1753 1754 /* 1755 * NB: although the OA buffer will initially be allocated 1756 * zeroed via shmfs (and so this memset is redundant when 1757 * first allocating), we may re-init the OA buffer, either 1758 * when re-enabling a stream or in error/reset paths. 1759 * 1760 * The reason we clear the buffer for each re-init is for the 1761 * sanity check in gen8_append_oa_reports() that looks at the 1762 * reason field to make sure it's non-zero which relies on 1763 * the assumption that new reports are being written to zeroed 1764 * memory... 1765 */ 1766 memset(stream->oa_buffer.vaddr, 0, 1767 stream->oa_buffer.vma->size); 1768 } 1769 1770 static int alloc_oa_buffer(struct i915_perf_stream *stream) 1771 { 1772 struct drm_i915_private *i915 = stream->perf->i915; 1773 struct intel_gt *gt = stream->engine->gt; 1774 struct drm_i915_gem_object *bo; 1775 struct i915_vma *vma; 1776 int ret; 1777 1778 if (drm_WARN_ON(&i915->drm, stream->oa_buffer.vma)) 1779 return -ENODEV; 1780 1781 BUILD_BUG_ON_NOT_POWER_OF_2(OA_BUFFER_SIZE); 1782 BUILD_BUG_ON(OA_BUFFER_SIZE < SZ_128K || OA_BUFFER_SIZE > SZ_16M); 1783 1784 bo = i915_gem_object_create_shmem(stream->perf->i915, OA_BUFFER_SIZE); 1785 if (IS_ERR(bo)) { 1786 drm_err(&i915->drm, "Failed to allocate OA buffer\n"); 1787 return PTR_ERR(bo); 1788 } 1789 1790 i915_gem_object_set_cache_coherency(bo, I915_CACHE_LLC); 1791 1792 /* PreHSW required 512K alignment, HSW requires 16M */ 1793 vma = i915_vma_instance(bo, >->ggtt->vm, NULL); 1794 if (IS_ERR(vma)) { 1795 ret = PTR_ERR(vma); 1796 goto err_unref; 1797 } 1798 1799 /* 1800 * PreHSW required 512K alignment. 1801 * HSW and onwards, align to requested size of OA buffer. 1802 */ 1803 ret = i915_vma_pin(vma, 0, SZ_16M, PIN_GLOBAL | PIN_HIGH); 1804 if (ret) { 1805 drm_err(>->i915->drm, "Failed to pin OA buffer %d\n", ret); 1806 goto err_unref; 1807 } 1808 1809 stream->oa_buffer.vma = vma; 1810 1811 stream->oa_buffer.vaddr = 1812 i915_gem_object_pin_map_unlocked(bo, I915_MAP_WB); 1813 if (IS_ERR(stream->oa_buffer.vaddr)) { 1814 ret = PTR_ERR(stream->oa_buffer.vaddr); 1815 goto err_unpin; 1816 } 1817 1818 return 0; 1819 1820 err_unpin: 1821 __i915_vma_unpin(vma); 1822 1823 err_unref: 1824 i915_gem_object_put(bo); 1825 1826 stream->oa_buffer.vaddr = NULL; 1827 stream->oa_buffer.vma = NULL; 1828 1829 return ret; 1830 } 1831 1832 static u32 *save_restore_register(struct i915_perf_stream *stream, u32 *cs, 1833 bool save, i915_reg_t reg, u32 offset, 1834 u32 dword_count) 1835 { 1836 u32 cmd; 1837 u32 d; 1838 1839 cmd = save ? MI_STORE_REGISTER_MEM : MI_LOAD_REGISTER_MEM; 1840 cmd |= MI_SRM_LRM_GLOBAL_GTT; 1841 if (GRAPHICS_VER(stream->perf->i915) >= 8) 1842 cmd++; 1843 1844 for (d = 0; d < dword_count; d++) { 1845 *cs++ = cmd; 1846 *cs++ = i915_mmio_reg_offset(reg) + 4 * d; 1847 *cs++ = i915_ggtt_offset(stream->noa_wait) + offset + 4 * d; 1848 *cs++ = 0; 1849 } 1850 1851 return cs; 1852 } 1853 1854 static int alloc_noa_wait(struct i915_perf_stream *stream) 1855 { 1856 struct drm_i915_private *i915 = stream->perf->i915; 1857 struct intel_gt *gt = stream->engine->gt; 1858 struct drm_i915_gem_object *bo; 1859 struct i915_vma *vma; 1860 const u64 delay_ticks = 0xffffffffffffffff - 1861 intel_gt_ns_to_clock_interval(to_gt(stream->perf->i915), 1862 atomic64_read(&stream->perf->noa_programming_delay)); 1863 const u32 base = stream->engine->mmio_base; 1864 #define CS_GPR(x) GEN8_RING_CS_GPR(base, x) 1865 u32 *batch, *ts0, *cs, *jump; 1866 struct i915_gem_ww_ctx ww; 1867 int ret, i; 1868 enum { 1869 START_TS, 1870 NOW_TS, 1871 DELTA_TS, 1872 JUMP_PREDICATE, 1873 DELTA_TARGET, 1874 N_CS_GPR 1875 }; 1876 i915_reg_t mi_predicate_result = HAS_MI_SET_PREDICATE(i915) ? 1877 MI_PREDICATE_RESULT_2_ENGINE(base) : 1878 MI_PREDICATE_RESULT_1(RENDER_RING_BASE); 1879 1880 /* 1881 * gt->scratch was being used to save/restore the GPR registers, but on 1882 * MTL the scratch uses stolen lmem. An MI_SRM to this memory region 1883 * causes an engine hang. Instead allocate an additional page here to 1884 * save/restore GPR registers 1885 */ 1886 bo = i915_gem_object_create_internal(i915, 8192); 1887 if (IS_ERR(bo)) { 1888 drm_err(&i915->drm, 1889 "Failed to allocate NOA wait batchbuffer\n"); 1890 return PTR_ERR(bo); 1891 } 1892 1893 i915_gem_ww_ctx_init(&ww, true); 1894 retry: 1895 ret = i915_gem_object_lock(bo, &ww); 1896 if (ret) 1897 goto out_ww; 1898 1899 /* 1900 * We pin in GGTT because we jump into this buffer now because 1901 * multiple OA config BOs will have a jump to this address and it 1902 * needs to be fixed during the lifetime of the i915/perf stream. 1903 */ 1904 vma = i915_vma_instance(bo, >->ggtt->vm, NULL); 1905 if (IS_ERR(vma)) { 1906 ret = PTR_ERR(vma); 1907 goto out_ww; 1908 } 1909 1910 ret = i915_vma_pin_ww(vma, &ww, 0, 0, PIN_GLOBAL | PIN_HIGH); 1911 if (ret) 1912 goto out_ww; 1913 1914 batch = cs = i915_gem_object_pin_map(bo, I915_MAP_WB); 1915 if (IS_ERR(batch)) { 1916 ret = PTR_ERR(batch); 1917 goto err_unpin; 1918 } 1919 1920 stream->noa_wait = vma; 1921 1922 #define GPR_SAVE_OFFSET 4096 1923 #define PREDICATE_SAVE_OFFSET 4160 1924 1925 /* Save registers. */ 1926 for (i = 0; i < N_CS_GPR; i++) 1927 cs = save_restore_register( 1928 stream, cs, true /* save */, CS_GPR(i), 1929 GPR_SAVE_OFFSET + 8 * i, 2); 1930 cs = save_restore_register( 1931 stream, cs, true /* save */, mi_predicate_result, 1932 PREDICATE_SAVE_OFFSET, 1); 1933 1934 /* First timestamp snapshot location. */ 1935 ts0 = cs; 1936 1937 /* 1938 * Initial snapshot of the timestamp register to implement the wait. 1939 * We work with 32b values, so clear out the top 32b bits of the 1940 * register because the ALU works 64bits. 1941 */ 1942 *cs++ = MI_LOAD_REGISTER_IMM(1); 1943 *cs++ = i915_mmio_reg_offset(CS_GPR(START_TS)) + 4; 1944 *cs++ = 0; 1945 *cs++ = MI_LOAD_REGISTER_REG | (3 - 2); 1946 *cs++ = i915_mmio_reg_offset(RING_TIMESTAMP(base)); 1947 *cs++ = i915_mmio_reg_offset(CS_GPR(START_TS)); 1948 1949 /* 1950 * This is the location we're going to jump back into until the 1951 * required amount of time has passed. 1952 */ 1953 jump = cs; 1954 1955 /* 1956 * Take another snapshot of the timestamp register. Take care to clear 1957 * up the top 32bits of CS_GPR(1) as we're using it for other 1958 * operations below. 1959 */ 1960 *cs++ = MI_LOAD_REGISTER_IMM(1); 1961 *cs++ = i915_mmio_reg_offset(CS_GPR(NOW_TS)) + 4; 1962 *cs++ = 0; 1963 *cs++ = MI_LOAD_REGISTER_REG | (3 - 2); 1964 *cs++ = i915_mmio_reg_offset(RING_TIMESTAMP(base)); 1965 *cs++ = i915_mmio_reg_offset(CS_GPR(NOW_TS)); 1966 1967 /* 1968 * Do a diff between the 2 timestamps and store the result back into 1969 * CS_GPR(1). 1970 */ 1971 *cs++ = MI_MATH(5); 1972 *cs++ = MI_MATH_LOAD(MI_MATH_REG_SRCA, MI_MATH_REG(NOW_TS)); 1973 *cs++ = MI_MATH_LOAD(MI_MATH_REG_SRCB, MI_MATH_REG(START_TS)); 1974 *cs++ = MI_MATH_SUB; 1975 *cs++ = MI_MATH_STORE(MI_MATH_REG(DELTA_TS), MI_MATH_REG_ACCU); 1976 *cs++ = MI_MATH_STORE(MI_MATH_REG(JUMP_PREDICATE), MI_MATH_REG_CF); 1977 1978 /* 1979 * Transfer the carry flag (set to 1 if ts1 < ts0, meaning the 1980 * timestamp have rolled over the 32bits) into the predicate register 1981 * to be used for the predicated jump. 1982 */ 1983 *cs++ = MI_LOAD_REGISTER_REG | (3 - 2); 1984 *cs++ = i915_mmio_reg_offset(CS_GPR(JUMP_PREDICATE)); 1985 *cs++ = i915_mmio_reg_offset(mi_predicate_result); 1986 1987 if (HAS_MI_SET_PREDICATE(i915)) 1988 *cs++ = MI_SET_PREDICATE | 1; 1989 1990 /* Restart from the beginning if we had timestamps roll over. */ 1991 *cs++ = (GRAPHICS_VER(i915) < 8 ? 1992 MI_BATCH_BUFFER_START : 1993 MI_BATCH_BUFFER_START_GEN8) | 1994 MI_BATCH_PREDICATE; 1995 *cs++ = i915_ggtt_offset(vma) + (ts0 - batch) * 4; 1996 *cs++ = 0; 1997 1998 if (HAS_MI_SET_PREDICATE(i915)) 1999 *cs++ = MI_SET_PREDICATE; 2000 2001 /* 2002 * Now add the diff between to previous timestamps and add it to : 2003 * (((1 * << 64) - 1) - delay_ns) 2004 * 2005 * When the Carry Flag contains 1 this means the elapsed time is 2006 * longer than the expected delay, and we can exit the wait loop. 2007 */ 2008 *cs++ = MI_LOAD_REGISTER_IMM(2); 2009 *cs++ = i915_mmio_reg_offset(CS_GPR(DELTA_TARGET)); 2010 *cs++ = lower_32_bits(delay_ticks); 2011 *cs++ = i915_mmio_reg_offset(CS_GPR(DELTA_TARGET)) + 4; 2012 *cs++ = upper_32_bits(delay_ticks); 2013 2014 *cs++ = MI_MATH(4); 2015 *cs++ = MI_MATH_LOAD(MI_MATH_REG_SRCA, MI_MATH_REG(DELTA_TS)); 2016 *cs++ = MI_MATH_LOAD(MI_MATH_REG_SRCB, MI_MATH_REG(DELTA_TARGET)); 2017 *cs++ = MI_MATH_ADD; 2018 *cs++ = MI_MATH_STOREINV(MI_MATH_REG(JUMP_PREDICATE), MI_MATH_REG_CF); 2019 2020 *cs++ = MI_ARB_CHECK; 2021 2022 /* 2023 * Transfer the result into the predicate register to be used for the 2024 * predicated jump. 2025 */ 2026 *cs++ = MI_LOAD_REGISTER_REG | (3 - 2); 2027 *cs++ = i915_mmio_reg_offset(CS_GPR(JUMP_PREDICATE)); 2028 *cs++ = i915_mmio_reg_offset(mi_predicate_result); 2029 2030 if (HAS_MI_SET_PREDICATE(i915)) 2031 *cs++ = MI_SET_PREDICATE | 1; 2032 2033 /* Predicate the jump. */ 2034 *cs++ = (GRAPHICS_VER(i915) < 8 ? 2035 MI_BATCH_BUFFER_START : 2036 MI_BATCH_BUFFER_START_GEN8) | 2037 MI_BATCH_PREDICATE; 2038 *cs++ = i915_ggtt_offset(vma) + (jump - batch) * 4; 2039 *cs++ = 0; 2040 2041 if (HAS_MI_SET_PREDICATE(i915)) 2042 *cs++ = MI_SET_PREDICATE; 2043 2044 /* Restore registers. */ 2045 for (i = 0; i < N_CS_GPR; i++) 2046 cs = save_restore_register( 2047 stream, cs, false /* restore */, CS_GPR(i), 2048 GPR_SAVE_OFFSET + 8 * i, 2); 2049 cs = save_restore_register( 2050 stream, cs, false /* restore */, mi_predicate_result, 2051 PREDICATE_SAVE_OFFSET, 1); 2052 2053 /* And return to the ring. */ 2054 *cs++ = MI_BATCH_BUFFER_END; 2055 2056 GEM_BUG_ON(cs - batch > PAGE_SIZE / sizeof(*batch)); 2057 2058 i915_gem_object_flush_map(bo); 2059 __i915_gem_object_release_map(bo); 2060 2061 goto out_ww; 2062 2063 err_unpin: 2064 i915_vma_unpin_and_release(&vma, 0); 2065 out_ww: 2066 if (ret == -EDEADLK) { 2067 ret = i915_gem_ww_ctx_backoff(&ww); 2068 if (!ret) 2069 goto retry; 2070 } 2071 i915_gem_ww_ctx_fini(&ww); 2072 if (ret) 2073 i915_gem_object_put(bo); 2074 return ret; 2075 } 2076 2077 static u32 *write_cs_mi_lri(u32 *cs, 2078 const struct i915_oa_reg *reg_data, 2079 u32 n_regs) 2080 { 2081 u32 i; 2082 2083 for (i = 0; i < n_regs; i++) { 2084 if ((i % MI_LOAD_REGISTER_IMM_MAX_REGS) == 0) { 2085 u32 n_lri = min_t(u32, 2086 n_regs - i, 2087 MI_LOAD_REGISTER_IMM_MAX_REGS); 2088 2089 *cs++ = MI_LOAD_REGISTER_IMM(n_lri); 2090 } 2091 *cs++ = i915_mmio_reg_offset(reg_data[i].addr); 2092 *cs++ = reg_data[i].value; 2093 } 2094 2095 return cs; 2096 } 2097 2098 static int num_lri_dwords(int num_regs) 2099 { 2100 int count = 0; 2101 2102 if (num_regs > 0) { 2103 count += DIV_ROUND_UP(num_regs, MI_LOAD_REGISTER_IMM_MAX_REGS); 2104 count += num_regs * 2; 2105 } 2106 2107 return count; 2108 } 2109 2110 static struct i915_oa_config_bo * 2111 alloc_oa_config_buffer(struct i915_perf_stream *stream, 2112 struct i915_oa_config *oa_config) 2113 { 2114 struct drm_i915_gem_object *obj; 2115 struct i915_oa_config_bo *oa_bo; 2116 struct i915_gem_ww_ctx ww; 2117 size_t config_length = 0; 2118 u32 *cs; 2119 int err; 2120 2121 oa_bo = kzalloc(sizeof(*oa_bo), GFP_KERNEL); 2122 if (!oa_bo) 2123 return ERR_PTR(-ENOMEM); 2124 2125 config_length += num_lri_dwords(oa_config->mux_regs_len); 2126 config_length += num_lri_dwords(oa_config->b_counter_regs_len); 2127 config_length += num_lri_dwords(oa_config->flex_regs_len); 2128 config_length += 3; /* MI_BATCH_BUFFER_START */ 2129 config_length = ALIGN(sizeof(u32) * config_length, I915_GTT_PAGE_SIZE); 2130 2131 obj = i915_gem_object_create_shmem(stream->perf->i915, config_length); 2132 if (IS_ERR(obj)) { 2133 err = PTR_ERR(obj); 2134 goto err_free; 2135 } 2136 2137 i915_gem_ww_ctx_init(&ww, true); 2138 retry: 2139 err = i915_gem_object_lock(obj, &ww); 2140 if (err) 2141 goto out_ww; 2142 2143 cs = i915_gem_object_pin_map(obj, I915_MAP_WB); 2144 if (IS_ERR(cs)) { 2145 err = PTR_ERR(cs); 2146 goto out_ww; 2147 } 2148 2149 cs = write_cs_mi_lri(cs, 2150 oa_config->mux_regs, 2151 oa_config->mux_regs_len); 2152 cs = write_cs_mi_lri(cs, 2153 oa_config->b_counter_regs, 2154 oa_config->b_counter_regs_len); 2155 cs = write_cs_mi_lri(cs, 2156 oa_config->flex_regs, 2157 oa_config->flex_regs_len); 2158 2159 /* Jump into the active wait. */ 2160 *cs++ = (GRAPHICS_VER(stream->perf->i915) < 8 ? 2161 MI_BATCH_BUFFER_START : 2162 MI_BATCH_BUFFER_START_GEN8); 2163 *cs++ = i915_ggtt_offset(stream->noa_wait); 2164 *cs++ = 0; 2165 2166 i915_gem_object_flush_map(obj); 2167 __i915_gem_object_release_map(obj); 2168 2169 oa_bo->vma = i915_vma_instance(obj, 2170 &stream->engine->gt->ggtt->vm, 2171 NULL); 2172 if (IS_ERR(oa_bo->vma)) { 2173 err = PTR_ERR(oa_bo->vma); 2174 goto out_ww; 2175 } 2176 2177 oa_bo->oa_config = i915_oa_config_get(oa_config); 2178 llist_add(&oa_bo->node, &stream->oa_config_bos); 2179 2180 out_ww: 2181 if (err == -EDEADLK) { 2182 err = i915_gem_ww_ctx_backoff(&ww); 2183 if (!err) 2184 goto retry; 2185 } 2186 i915_gem_ww_ctx_fini(&ww); 2187 2188 if (err) 2189 i915_gem_object_put(obj); 2190 err_free: 2191 if (err) { 2192 kfree(oa_bo); 2193 return ERR_PTR(err); 2194 } 2195 return oa_bo; 2196 } 2197 2198 static struct i915_vma * 2199 get_oa_vma(struct i915_perf_stream *stream, struct i915_oa_config *oa_config) 2200 { 2201 struct i915_oa_config_bo *oa_bo; 2202 2203 /* 2204 * Look for the buffer in the already allocated BOs attached 2205 * to the stream. 2206 */ 2207 llist_for_each_entry(oa_bo, stream->oa_config_bos.first, node) { 2208 if (oa_bo->oa_config == oa_config && 2209 memcmp(oa_bo->oa_config->uuid, 2210 oa_config->uuid, 2211 sizeof(oa_config->uuid)) == 0) 2212 goto out; 2213 } 2214 2215 oa_bo = alloc_oa_config_buffer(stream, oa_config); 2216 if (IS_ERR(oa_bo)) 2217 return ERR_CAST(oa_bo); 2218 2219 out: 2220 return i915_vma_get(oa_bo->vma); 2221 } 2222 2223 static int 2224 emit_oa_config(struct i915_perf_stream *stream, 2225 struct i915_oa_config *oa_config, 2226 struct intel_context *ce, 2227 struct i915_active *active) 2228 { 2229 struct i915_request *rq; 2230 struct i915_vma *vma; 2231 struct i915_gem_ww_ctx ww; 2232 int err; 2233 2234 vma = get_oa_vma(stream, oa_config); 2235 if (IS_ERR(vma)) 2236 return PTR_ERR(vma); 2237 2238 i915_gem_ww_ctx_init(&ww, true); 2239 retry: 2240 err = i915_gem_object_lock(vma->obj, &ww); 2241 if (err) 2242 goto err; 2243 2244 err = i915_vma_pin_ww(vma, &ww, 0, 0, PIN_GLOBAL | PIN_HIGH); 2245 if (err) 2246 goto err; 2247 2248 intel_engine_pm_get(ce->engine); 2249 rq = i915_request_create(ce); 2250 intel_engine_pm_put(ce->engine); 2251 if (IS_ERR(rq)) { 2252 err = PTR_ERR(rq); 2253 goto err_vma_unpin; 2254 } 2255 2256 if (!IS_ERR_OR_NULL(active)) { 2257 /* After all individual context modifications */ 2258 err = i915_request_await_active(rq, active, 2259 I915_ACTIVE_AWAIT_ACTIVE); 2260 if (err) 2261 goto err_add_request; 2262 2263 err = i915_active_add_request(active, rq); 2264 if (err) 2265 goto err_add_request; 2266 } 2267 2268 err = i915_vma_move_to_active(vma, rq, 0); 2269 if (err) 2270 goto err_add_request; 2271 2272 err = rq->engine->emit_bb_start(rq, 2273 i915_vma_offset(vma), 0, 2274 I915_DISPATCH_SECURE); 2275 if (err) 2276 goto err_add_request; 2277 2278 err_add_request: 2279 i915_request_add(rq); 2280 err_vma_unpin: 2281 i915_vma_unpin(vma); 2282 err: 2283 if (err == -EDEADLK) { 2284 err = i915_gem_ww_ctx_backoff(&ww); 2285 if (!err) 2286 goto retry; 2287 } 2288 2289 i915_gem_ww_ctx_fini(&ww); 2290 i915_vma_put(vma); 2291 return err; 2292 } 2293 2294 static struct intel_context *oa_context(struct i915_perf_stream *stream) 2295 { 2296 return stream->pinned_ctx ?: stream->engine->kernel_context; 2297 } 2298 2299 static int 2300 hsw_enable_metric_set(struct i915_perf_stream *stream, 2301 struct i915_active *active) 2302 { 2303 struct intel_uncore *uncore = stream->uncore; 2304 2305 /* 2306 * PRM: 2307 * 2308 * OA unit is using “crclk” for its functionality. When trunk 2309 * level clock gating takes place, OA clock would be gated, 2310 * unable to count the events from non-render clock domain. 2311 * Render clock gating must be disabled when OA is enabled to 2312 * count the events from non-render domain. Unit level clock 2313 * gating for RCS should also be disabled. 2314 */ 2315 intel_uncore_rmw(uncore, GEN7_MISCCPCTL, 2316 GEN7_DOP_CLOCK_GATE_ENABLE, 0); 2317 intel_uncore_rmw(uncore, GEN6_UCGCTL1, 2318 0, GEN6_CSUNIT_CLOCK_GATE_DISABLE); 2319 2320 return emit_oa_config(stream, 2321 stream->oa_config, oa_context(stream), 2322 active); 2323 } 2324 2325 static void hsw_disable_metric_set(struct i915_perf_stream *stream) 2326 { 2327 struct intel_uncore *uncore = stream->uncore; 2328 2329 intel_uncore_rmw(uncore, GEN6_UCGCTL1, 2330 GEN6_CSUNIT_CLOCK_GATE_DISABLE, 0); 2331 intel_uncore_rmw(uncore, GEN7_MISCCPCTL, 2332 0, GEN7_DOP_CLOCK_GATE_ENABLE); 2333 2334 intel_uncore_rmw(uncore, GDT_CHICKEN_BITS, GT_NOA_ENABLE, 0); 2335 } 2336 2337 static u32 oa_config_flex_reg(const struct i915_oa_config *oa_config, 2338 i915_reg_t reg) 2339 { 2340 u32 mmio = i915_mmio_reg_offset(reg); 2341 int i; 2342 2343 /* 2344 * This arbitrary default will select the 'EU FPU0 Pipeline 2345 * Active' event. In the future it's anticipated that there 2346 * will be an explicit 'No Event' we can select, but not yet... 2347 */ 2348 if (!oa_config) 2349 return 0; 2350 2351 for (i = 0; i < oa_config->flex_regs_len; i++) { 2352 if (i915_mmio_reg_offset(oa_config->flex_regs[i].addr) == mmio) 2353 return oa_config->flex_regs[i].value; 2354 } 2355 2356 return 0; 2357 } 2358 /* 2359 * NB: It must always remain pointer safe to run this even if the OA unit 2360 * has been disabled. 2361 * 2362 * It's fine to put out-of-date values into these per-context registers 2363 * in the case that the OA unit has been disabled. 2364 */ 2365 static void 2366 gen8_update_reg_state_unlocked(const struct intel_context *ce, 2367 const struct i915_perf_stream *stream) 2368 { 2369 u32 ctx_oactxctrl = stream->perf->ctx_oactxctrl_offset; 2370 u32 ctx_flexeu0 = stream->perf->ctx_flexeu0_offset; 2371 /* The MMIO offsets for Flex EU registers aren't contiguous */ 2372 static const i915_reg_t flex_regs[] = { 2373 EU_PERF_CNTL0, 2374 EU_PERF_CNTL1, 2375 EU_PERF_CNTL2, 2376 EU_PERF_CNTL3, 2377 EU_PERF_CNTL4, 2378 EU_PERF_CNTL5, 2379 EU_PERF_CNTL6, 2380 }; 2381 u32 *reg_state = ce->lrc_reg_state; 2382 int i; 2383 2384 reg_state[ctx_oactxctrl + 1] = 2385 (stream->period_exponent << GEN8_OA_TIMER_PERIOD_SHIFT) | 2386 (stream->periodic ? GEN8_OA_TIMER_ENABLE : 0) | 2387 GEN8_OA_COUNTER_RESUME; 2388 2389 for (i = 0; i < ARRAY_SIZE(flex_regs); i++) 2390 reg_state[ctx_flexeu0 + i * 2 + 1] = 2391 oa_config_flex_reg(stream->oa_config, flex_regs[i]); 2392 } 2393 2394 struct flex { 2395 i915_reg_t reg; 2396 u32 offset; 2397 u32 value; 2398 }; 2399 2400 static int 2401 gen8_store_flex(struct i915_request *rq, 2402 struct intel_context *ce, 2403 const struct flex *flex, unsigned int count) 2404 { 2405 u32 offset; 2406 u32 *cs; 2407 2408 cs = intel_ring_begin(rq, 4 * count); 2409 if (IS_ERR(cs)) 2410 return PTR_ERR(cs); 2411 2412 offset = i915_ggtt_offset(ce->state) + LRC_STATE_OFFSET; 2413 do { 2414 *cs++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT; 2415 *cs++ = offset + flex->offset * sizeof(u32); 2416 *cs++ = 0; 2417 *cs++ = flex->value; 2418 } while (flex++, --count); 2419 2420 intel_ring_advance(rq, cs); 2421 2422 return 0; 2423 } 2424 2425 static int 2426 gen8_load_flex(struct i915_request *rq, 2427 struct intel_context *ce, 2428 const struct flex *flex, unsigned int count) 2429 { 2430 u32 *cs; 2431 2432 GEM_BUG_ON(!count || count > 63); 2433 2434 cs = intel_ring_begin(rq, 2 * count + 2); 2435 if (IS_ERR(cs)) 2436 return PTR_ERR(cs); 2437 2438 *cs++ = MI_LOAD_REGISTER_IMM(count); 2439 do { 2440 *cs++ = i915_mmio_reg_offset(flex->reg); 2441 *cs++ = flex->value; 2442 } while (flex++, --count); 2443 *cs++ = MI_NOOP; 2444 2445 intel_ring_advance(rq, cs); 2446 2447 return 0; 2448 } 2449 2450 static int gen8_modify_context(struct intel_context *ce, 2451 const struct flex *flex, unsigned int count) 2452 { 2453 struct i915_request *rq; 2454 int err; 2455 2456 rq = intel_engine_create_kernel_request(ce->engine); 2457 if (IS_ERR(rq)) 2458 return PTR_ERR(rq); 2459 2460 /* Serialise with the remote context */ 2461 err = intel_context_prepare_remote_request(ce, rq); 2462 if (err == 0) 2463 err = gen8_store_flex(rq, ce, flex, count); 2464 2465 i915_request_add(rq); 2466 return err; 2467 } 2468 2469 static int 2470 gen8_modify_self(struct intel_context *ce, 2471 const struct flex *flex, unsigned int count, 2472 struct i915_active *active) 2473 { 2474 struct i915_request *rq; 2475 int err; 2476 2477 intel_engine_pm_get(ce->engine); 2478 rq = i915_request_create(ce); 2479 intel_engine_pm_put(ce->engine); 2480 if (IS_ERR(rq)) 2481 return PTR_ERR(rq); 2482 2483 if (!IS_ERR_OR_NULL(active)) { 2484 err = i915_active_add_request(active, rq); 2485 if (err) 2486 goto err_add_request; 2487 } 2488 2489 err = gen8_load_flex(rq, ce, flex, count); 2490 if (err) 2491 goto err_add_request; 2492 2493 err_add_request: 2494 i915_request_add(rq); 2495 return err; 2496 } 2497 2498 static int gen8_configure_context(struct i915_gem_context *ctx, 2499 struct flex *flex, unsigned int count) 2500 { 2501 struct i915_gem_engines_iter it; 2502 struct intel_context *ce; 2503 int err = 0; 2504 2505 for_each_gem_engine(ce, i915_gem_context_lock_engines(ctx), it) { 2506 GEM_BUG_ON(ce == ce->engine->kernel_context); 2507 2508 if (ce->engine->class != RENDER_CLASS) 2509 continue; 2510 2511 /* Otherwise OA settings will be set upon first use */ 2512 if (!intel_context_pin_if_active(ce)) 2513 continue; 2514 2515 flex->value = intel_sseu_make_rpcs(ce->engine->gt, &ce->sseu); 2516 err = gen8_modify_context(ce, flex, count); 2517 2518 intel_context_unpin(ce); 2519 if (err) 2520 break; 2521 } 2522 i915_gem_context_unlock_engines(ctx); 2523 2524 return err; 2525 } 2526 2527 static int gen12_configure_oar_context(struct i915_perf_stream *stream, 2528 struct i915_active *active) 2529 { 2530 int err; 2531 struct intel_context *ce = stream->pinned_ctx; 2532 u32 format = stream->oa_buffer.format->format; 2533 u32 offset = stream->perf->ctx_oactxctrl_offset; 2534 struct flex regs_context[] = { 2535 { 2536 GEN8_OACTXCONTROL, 2537 offset + 1, 2538 active ? GEN8_OA_COUNTER_RESUME : 0, 2539 }, 2540 }; 2541 /* Offsets in regs_lri are not used since this configuration is only 2542 * applied using LRI. Initialize the correct offsets for posterity. 2543 */ 2544 #define GEN12_OAR_OACONTROL_OFFSET 0x5B0 2545 struct flex regs_lri[] = { 2546 { 2547 GEN12_OAR_OACONTROL, 2548 GEN12_OAR_OACONTROL_OFFSET + 1, 2549 (format << GEN12_OAR_OACONTROL_COUNTER_FORMAT_SHIFT) | 2550 (active ? GEN12_OAR_OACONTROL_COUNTER_ENABLE : 0) 2551 }, 2552 { 2553 RING_CONTEXT_CONTROL(ce->engine->mmio_base), 2554 CTX_CONTEXT_CONTROL, 2555 _MASKED_FIELD(GEN12_CTX_CTRL_OAR_CONTEXT_ENABLE, 2556 active ? 2557 GEN12_CTX_CTRL_OAR_CONTEXT_ENABLE : 2558 0) 2559 }, 2560 }; 2561 2562 /* Modify the context image of pinned context with regs_context */ 2563 err = intel_context_lock_pinned(ce); 2564 if (err) 2565 return err; 2566 2567 err = gen8_modify_context(ce, regs_context, 2568 ARRAY_SIZE(regs_context)); 2569 intel_context_unlock_pinned(ce); 2570 if (err) 2571 return err; 2572 2573 /* Apply regs_lri using LRI with pinned context */ 2574 return gen8_modify_self(ce, regs_lri, ARRAY_SIZE(regs_lri), active); 2575 } 2576 2577 /* 2578 * Manages updating the per-context aspects of the OA stream 2579 * configuration across all contexts. 2580 * 2581 * The awkward consideration here is that OACTXCONTROL controls the 2582 * exponent for periodic sampling which is primarily used for system 2583 * wide profiling where we'd like a consistent sampling period even in 2584 * the face of context switches. 2585 * 2586 * Our approach of updating the register state context (as opposed to 2587 * say using a workaround batch buffer) ensures that the hardware 2588 * won't automatically reload an out-of-date timer exponent even 2589 * transiently before a WA BB could be parsed. 2590 * 2591 * This function needs to: 2592 * - Ensure the currently running context's per-context OA state is 2593 * updated 2594 * - Ensure that all existing contexts will have the correct per-context 2595 * OA state if they are scheduled for use. 2596 * - Ensure any new contexts will be initialized with the correct 2597 * per-context OA state. 2598 * 2599 * Note: it's only the RCS/Render context that has any OA state. 2600 * Note: the first flex register passed must always be R_PWR_CLK_STATE 2601 */ 2602 static int 2603 oa_configure_all_contexts(struct i915_perf_stream *stream, 2604 struct flex *regs, 2605 size_t num_regs, 2606 struct i915_active *active) 2607 { 2608 struct drm_i915_private *i915 = stream->perf->i915; 2609 struct intel_engine_cs *engine; 2610 struct intel_gt *gt = stream->engine->gt; 2611 struct i915_gem_context *ctx, *cn; 2612 int err; 2613 2614 lockdep_assert_held(>->perf.lock); 2615 2616 /* 2617 * The OA register config is setup through the context image. This image 2618 * might be written to by the GPU on context switch (in particular on 2619 * lite-restore). This means we can't safely update a context's image, 2620 * if this context is scheduled/submitted to run on the GPU. 2621 * 2622 * We could emit the OA register config through the batch buffer but 2623 * this might leave small interval of time where the OA unit is 2624 * configured at an invalid sampling period. 2625 * 2626 * Note that since we emit all requests from a single ring, there 2627 * is still an implicit global barrier here that may cause a high 2628 * priority context to wait for an otherwise independent low priority 2629 * context. Contexts idle at the time of reconfiguration are not 2630 * trapped behind the barrier. 2631 */ 2632 spin_lock(&i915->gem.contexts.lock); 2633 list_for_each_entry_safe(ctx, cn, &i915->gem.contexts.list, link) { 2634 if (!kref_get_unless_zero(&ctx->ref)) 2635 continue; 2636 2637 spin_unlock(&i915->gem.contexts.lock); 2638 2639 err = gen8_configure_context(ctx, regs, num_regs); 2640 if (err) { 2641 i915_gem_context_put(ctx); 2642 return err; 2643 } 2644 2645 spin_lock(&i915->gem.contexts.lock); 2646 list_safe_reset_next(ctx, cn, link); 2647 i915_gem_context_put(ctx); 2648 } 2649 spin_unlock(&i915->gem.contexts.lock); 2650 2651 /* 2652 * After updating all other contexts, we need to modify ourselves. 2653 * If we don't modify the kernel_context, we do not get events while 2654 * idle. 2655 */ 2656 for_each_uabi_engine(engine, i915) { 2657 struct intel_context *ce = engine->kernel_context; 2658 2659 if (engine->class != RENDER_CLASS) 2660 continue; 2661 2662 regs[0].value = intel_sseu_make_rpcs(engine->gt, &ce->sseu); 2663 2664 err = gen8_modify_self(ce, regs, num_regs, active); 2665 if (err) 2666 return err; 2667 } 2668 2669 return 0; 2670 } 2671 2672 static int 2673 gen12_configure_all_contexts(struct i915_perf_stream *stream, 2674 const struct i915_oa_config *oa_config, 2675 struct i915_active *active) 2676 { 2677 struct flex regs[] = { 2678 { 2679 GEN8_R_PWR_CLK_STATE(RENDER_RING_BASE), 2680 CTX_R_PWR_CLK_STATE, 2681 }, 2682 }; 2683 2684 return oa_configure_all_contexts(stream, 2685 regs, ARRAY_SIZE(regs), 2686 active); 2687 } 2688 2689 static int 2690 lrc_configure_all_contexts(struct i915_perf_stream *stream, 2691 const struct i915_oa_config *oa_config, 2692 struct i915_active *active) 2693 { 2694 u32 ctx_oactxctrl = stream->perf->ctx_oactxctrl_offset; 2695 /* The MMIO offsets for Flex EU registers aren't contiguous */ 2696 const u32 ctx_flexeu0 = stream->perf->ctx_flexeu0_offset; 2697 #define ctx_flexeuN(N) (ctx_flexeu0 + 2 * (N) + 1) 2698 struct flex regs[] = { 2699 { 2700 GEN8_R_PWR_CLK_STATE(RENDER_RING_BASE), 2701 CTX_R_PWR_CLK_STATE, 2702 }, 2703 { 2704 GEN8_OACTXCONTROL, 2705 ctx_oactxctrl + 1, 2706 }, 2707 { EU_PERF_CNTL0, ctx_flexeuN(0) }, 2708 { EU_PERF_CNTL1, ctx_flexeuN(1) }, 2709 { EU_PERF_CNTL2, ctx_flexeuN(2) }, 2710 { EU_PERF_CNTL3, ctx_flexeuN(3) }, 2711 { EU_PERF_CNTL4, ctx_flexeuN(4) }, 2712 { EU_PERF_CNTL5, ctx_flexeuN(5) }, 2713 { EU_PERF_CNTL6, ctx_flexeuN(6) }, 2714 }; 2715 #undef ctx_flexeuN 2716 int i; 2717 2718 regs[1].value = 2719 (stream->period_exponent << GEN8_OA_TIMER_PERIOD_SHIFT) | 2720 (stream->periodic ? GEN8_OA_TIMER_ENABLE : 0) | 2721 GEN8_OA_COUNTER_RESUME; 2722 2723 for (i = 2; i < ARRAY_SIZE(regs); i++) 2724 regs[i].value = oa_config_flex_reg(oa_config, regs[i].reg); 2725 2726 return oa_configure_all_contexts(stream, 2727 regs, ARRAY_SIZE(regs), 2728 active); 2729 } 2730 2731 static int 2732 gen8_enable_metric_set(struct i915_perf_stream *stream, 2733 struct i915_active *active) 2734 { 2735 struct intel_uncore *uncore = stream->uncore; 2736 struct i915_oa_config *oa_config = stream->oa_config; 2737 int ret; 2738 2739 /* 2740 * We disable slice/unslice clock ratio change reports on SKL since 2741 * they are too noisy. The HW generates a lot of redundant reports 2742 * where the ratio hasn't really changed causing a lot of redundant 2743 * work to processes and increasing the chances we'll hit buffer 2744 * overruns. 2745 * 2746 * Although we don't currently use the 'disable overrun' OABUFFER 2747 * feature it's worth noting that clock ratio reports have to be 2748 * disabled before considering to use that feature since the HW doesn't 2749 * correctly block these reports. 2750 * 2751 * Currently none of the high-level metrics we have depend on knowing 2752 * this ratio to normalize. 2753 * 2754 * Note: This register is not power context saved and restored, but 2755 * that's OK considering that we disable RC6 while the OA unit is 2756 * enabled. 2757 * 2758 * The _INCLUDE_CLK_RATIO bit allows the slice/unslice frequency to 2759 * be read back from automatically triggered reports, as part of the 2760 * RPT_ID field. 2761 */ 2762 if (IS_GRAPHICS_VER(stream->perf->i915, 9, 11)) { 2763 intel_uncore_write(uncore, GEN8_OA_DEBUG, 2764 _MASKED_BIT_ENABLE(GEN9_OA_DEBUG_DISABLE_CLK_RATIO_REPORTS | 2765 GEN9_OA_DEBUG_INCLUDE_CLK_RATIO)); 2766 } 2767 2768 /* 2769 * Update all contexts prior writing the mux configurations as we need 2770 * to make sure all slices/subslices are ON before writing to NOA 2771 * registers. 2772 */ 2773 ret = lrc_configure_all_contexts(stream, oa_config, active); 2774 if (ret) 2775 return ret; 2776 2777 return emit_oa_config(stream, 2778 stream->oa_config, oa_context(stream), 2779 active); 2780 } 2781 2782 static u32 oag_report_ctx_switches(const struct i915_perf_stream *stream) 2783 { 2784 return _MASKED_FIELD(GEN12_OAG_OA_DEBUG_DISABLE_CTX_SWITCH_REPORTS, 2785 (stream->sample_flags & SAMPLE_OA_REPORT) ? 2786 0 : GEN12_OAG_OA_DEBUG_DISABLE_CTX_SWITCH_REPORTS); 2787 } 2788 2789 static int 2790 gen12_enable_metric_set(struct i915_perf_stream *stream, 2791 struct i915_active *active) 2792 { 2793 struct drm_i915_private *i915 = stream->perf->i915; 2794 struct intel_uncore *uncore = stream->uncore; 2795 struct i915_oa_config *oa_config = stream->oa_config; 2796 bool periodic = stream->periodic; 2797 u32 period_exponent = stream->period_exponent; 2798 u32 sqcnt1; 2799 int ret; 2800 2801 /* 2802 * Wa_1508761755:xehpsdv, dg2 2803 * EU NOA signals behave incorrectly if EU clock gating is enabled. 2804 * Disable thread stall DOP gating and EU DOP gating. 2805 */ 2806 if (IS_XEHPSDV(i915) || IS_DG2(i915)) { 2807 intel_gt_mcr_multicast_write(uncore->gt, GEN8_ROW_CHICKEN, 2808 _MASKED_BIT_ENABLE(STALL_DOP_GATING_DISABLE)); 2809 intel_uncore_write(uncore, GEN7_ROW_CHICKEN2, 2810 _MASKED_BIT_ENABLE(GEN12_DISABLE_DOP_GATING)); 2811 } 2812 2813 intel_uncore_write(uncore, GEN12_OAG_OA_DEBUG, 2814 /* Disable clk ratio reports, like previous Gens. */ 2815 _MASKED_BIT_ENABLE(GEN12_OAG_OA_DEBUG_DISABLE_CLK_RATIO_REPORTS | 2816 GEN12_OAG_OA_DEBUG_INCLUDE_CLK_RATIO) | 2817 /* 2818 * If the user didn't require OA reports, instruct 2819 * the hardware not to emit ctx switch reports. 2820 */ 2821 oag_report_ctx_switches(stream)); 2822 2823 intel_uncore_write(uncore, GEN12_OAG_OAGLBCTXCTRL, periodic ? 2824 (GEN12_OAG_OAGLBCTXCTRL_COUNTER_RESUME | 2825 GEN12_OAG_OAGLBCTXCTRL_TIMER_ENABLE | 2826 (period_exponent << GEN12_OAG_OAGLBCTXCTRL_TIMER_PERIOD_SHIFT)) 2827 : 0); 2828 2829 /* 2830 * Initialize Super Queue Internal Cnt Register 2831 * Set PMON Enable in order to collect valid metrics. 2832 * Enable byets per clock reporting in OA for XEHPSDV onward. 2833 */ 2834 sqcnt1 = GEN12_SQCNT1_PMON_ENABLE | 2835 (HAS_OA_BPC_REPORTING(i915) ? GEN12_SQCNT1_OABPC : 0); 2836 2837 intel_uncore_rmw(uncore, GEN12_SQCNT1, 0, sqcnt1); 2838 2839 /* 2840 * Update all contexts prior writing the mux configurations as we need 2841 * to make sure all slices/subslices are ON before writing to NOA 2842 * registers. 2843 */ 2844 ret = gen12_configure_all_contexts(stream, oa_config, active); 2845 if (ret) 2846 return ret; 2847 2848 /* 2849 * For Gen12, performance counters are context 2850 * saved/restored. Only enable it for the context that 2851 * requested this. 2852 */ 2853 if (stream->ctx) { 2854 ret = gen12_configure_oar_context(stream, active); 2855 if (ret) 2856 return ret; 2857 } 2858 2859 return emit_oa_config(stream, 2860 stream->oa_config, oa_context(stream), 2861 active); 2862 } 2863 2864 static void gen8_disable_metric_set(struct i915_perf_stream *stream) 2865 { 2866 struct intel_uncore *uncore = stream->uncore; 2867 2868 /* Reset all contexts' slices/subslices configurations. */ 2869 lrc_configure_all_contexts(stream, NULL, NULL); 2870 2871 intel_uncore_rmw(uncore, GDT_CHICKEN_BITS, GT_NOA_ENABLE, 0); 2872 } 2873 2874 static void gen11_disable_metric_set(struct i915_perf_stream *stream) 2875 { 2876 struct intel_uncore *uncore = stream->uncore; 2877 2878 /* Reset all contexts' slices/subslices configurations. */ 2879 lrc_configure_all_contexts(stream, NULL, NULL); 2880 2881 /* Make sure we disable noa to save power. */ 2882 intel_uncore_rmw(uncore, RPM_CONFIG1, GEN10_GT_NOA_ENABLE, 0); 2883 } 2884 2885 static void gen12_disable_metric_set(struct i915_perf_stream *stream) 2886 { 2887 struct intel_uncore *uncore = stream->uncore; 2888 struct drm_i915_private *i915 = stream->perf->i915; 2889 u32 sqcnt1; 2890 2891 /* 2892 * Wa_1508761755:xehpsdv, dg2 2893 * Enable thread stall DOP gating and EU DOP gating. 2894 */ 2895 if (IS_XEHPSDV(i915) || IS_DG2(i915)) { 2896 intel_gt_mcr_multicast_write(uncore->gt, GEN8_ROW_CHICKEN, 2897 _MASKED_BIT_DISABLE(STALL_DOP_GATING_DISABLE)); 2898 intel_uncore_write(uncore, GEN7_ROW_CHICKEN2, 2899 _MASKED_BIT_DISABLE(GEN12_DISABLE_DOP_GATING)); 2900 } 2901 2902 /* Reset all contexts' slices/subslices configurations. */ 2903 gen12_configure_all_contexts(stream, NULL, NULL); 2904 2905 /* disable the context save/restore or OAR counters */ 2906 if (stream->ctx) 2907 gen12_configure_oar_context(stream, NULL); 2908 2909 /* Make sure we disable noa to save power. */ 2910 intel_uncore_rmw(uncore, RPM_CONFIG1, GEN10_GT_NOA_ENABLE, 0); 2911 2912 sqcnt1 = GEN12_SQCNT1_PMON_ENABLE | 2913 (HAS_OA_BPC_REPORTING(i915) ? GEN12_SQCNT1_OABPC : 0); 2914 2915 /* Reset PMON Enable to save power. */ 2916 intel_uncore_rmw(uncore, GEN12_SQCNT1, sqcnt1, 0); 2917 } 2918 2919 static void gen7_oa_enable(struct i915_perf_stream *stream) 2920 { 2921 struct intel_uncore *uncore = stream->uncore; 2922 struct i915_gem_context *ctx = stream->ctx; 2923 u32 ctx_id = stream->specific_ctx_id; 2924 bool periodic = stream->periodic; 2925 u32 period_exponent = stream->period_exponent; 2926 u32 report_format = stream->oa_buffer.format->format; 2927 2928 /* 2929 * Reset buf pointers so we don't forward reports from before now. 2930 * 2931 * Think carefully if considering trying to avoid this, since it 2932 * also ensures status flags and the buffer itself are cleared 2933 * in error paths, and we have checks for invalid reports based 2934 * on the assumption that certain fields are written to zeroed 2935 * memory which this helps maintains. 2936 */ 2937 gen7_init_oa_buffer(stream); 2938 2939 intel_uncore_write(uncore, GEN7_OACONTROL, 2940 (ctx_id & GEN7_OACONTROL_CTX_MASK) | 2941 (period_exponent << 2942 GEN7_OACONTROL_TIMER_PERIOD_SHIFT) | 2943 (periodic ? GEN7_OACONTROL_TIMER_ENABLE : 0) | 2944 (report_format << GEN7_OACONTROL_FORMAT_SHIFT) | 2945 (ctx ? GEN7_OACONTROL_PER_CTX_ENABLE : 0) | 2946 GEN7_OACONTROL_ENABLE); 2947 } 2948 2949 static void gen8_oa_enable(struct i915_perf_stream *stream) 2950 { 2951 struct intel_uncore *uncore = stream->uncore; 2952 u32 report_format = stream->oa_buffer.format->format; 2953 2954 /* 2955 * Reset buf pointers so we don't forward reports from before now. 2956 * 2957 * Think carefully if considering trying to avoid this, since it 2958 * also ensures status flags and the buffer itself are cleared 2959 * in error paths, and we have checks for invalid reports based 2960 * on the assumption that certain fields are written to zeroed 2961 * memory which this helps maintains. 2962 */ 2963 gen8_init_oa_buffer(stream); 2964 2965 /* 2966 * Note: we don't rely on the hardware to perform single context 2967 * filtering and instead filter on the cpu based on the context-id 2968 * field of reports 2969 */ 2970 intel_uncore_write(uncore, GEN8_OACONTROL, 2971 (report_format << GEN8_OA_REPORT_FORMAT_SHIFT) | 2972 GEN8_OA_COUNTER_ENABLE); 2973 } 2974 2975 static void gen12_oa_enable(struct i915_perf_stream *stream) 2976 { 2977 struct intel_uncore *uncore = stream->uncore; 2978 u32 report_format = stream->oa_buffer.format->format; 2979 2980 /* 2981 * If we don't want OA reports from the OA buffer, then we don't even 2982 * need to program the OAG unit. 2983 */ 2984 if (!(stream->sample_flags & SAMPLE_OA_REPORT)) 2985 return; 2986 2987 gen12_init_oa_buffer(stream); 2988 2989 intel_uncore_write(uncore, GEN12_OAG_OACONTROL, 2990 (report_format << GEN12_OAG_OACONTROL_OA_COUNTER_FORMAT_SHIFT) | 2991 GEN12_OAG_OACONTROL_OA_COUNTER_ENABLE); 2992 } 2993 2994 /** 2995 * i915_oa_stream_enable - handle `I915_PERF_IOCTL_ENABLE` for OA stream 2996 * @stream: An i915 perf stream opened for OA metrics 2997 * 2998 * [Re]enables hardware periodic sampling according to the period configured 2999 * when opening the stream. This also starts a hrtimer that will periodically 3000 * check for data in the circular OA buffer for notifying userspace (e.g. 3001 * during a read() or poll()). 3002 */ 3003 static void i915_oa_stream_enable(struct i915_perf_stream *stream) 3004 { 3005 stream->pollin = false; 3006 3007 stream->perf->ops.oa_enable(stream); 3008 3009 if (stream->sample_flags & SAMPLE_OA_REPORT) 3010 hrtimer_start(&stream->poll_check_timer, 3011 ns_to_ktime(stream->poll_oa_period), 3012 HRTIMER_MODE_REL_PINNED); 3013 } 3014 3015 static void gen7_oa_disable(struct i915_perf_stream *stream) 3016 { 3017 struct intel_uncore *uncore = stream->uncore; 3018 3019 intel_uncore_write(uncore, GEN7_OACONTROL, 0); 3020 if (intel_wait_for_register(uncore, 3021 GEN7_OACONTROL, GEN7_OACONTROL_ENABLE, 0, 3022 50)) 3023 drm_err(&stream->perf->i915->drm, 3024 "wait for OA to be disabled timed out\n"); 3025 } 3026 3027 static void gen8_oa_disable(struct i915_perf_stream *stream) 3028 { 3029 struct intel_uncore *uncore = stream->uncore; 3030 3031 intel_uncore_write(uncore, GEN8_OACONTROL, 0); 3032 if (intel_wait_for_register(uncore, 3033 GEN8_OACONTROL, GEN8_OA_COUNTER_ENABLE, 0, 3034 50)) 3035 drm_err(&stream->perf->i915->drm, 3036 "wait for OA to be disabled timed out\n"); 3037 } 3038 3039 static void gen12_oa_disable(struct i915_perf_stream *stream) 3040 { 3041 struct intel_uncore *uncore = stream->uncore; 3042 3043 intel_uncore_write(uncore, GEN12_OAG_OACONTROL, 0); 3044 if (intel_wait_for_register(uncore, 3045 GEN12_OAG_OACONTROL, 3046 GEN12_OAG_OACONTROL_OA_COUNTER_ENABLE, 0, 3047 50)) 3048 drm_err(&stream->perf->i915->drm, 3049 "wait for OA to be disabled timed out\n"); 3050 3051 intel_uncore_write(uncore, GEN12_OA_TLB_INV_CR, 1); 3052 if (intel_wait_for_register(uncore, 3053 GEN12_OA_TLB_INV_CR, 3054 1, 0, 3055 50)) 3056 drm_err(&stream->perf->i915->drm, 3057 "wait for OA tlb invalidate timed out\n"); 3058 } 3059 3060 /** 3061 * i915_oa_stream_disable - handle `I915_PERF_IOCTL_DISABLE` for OA stream 3062 * @stream: An i915 perf stream opened for OA metrics 3063 * 3064 * Stops the OA unit from periodically writing counter reports into the 3065 * circular OA buffer. This also stops the hrtimer that periodically checks for 3066 * data in the circular OA buffer, for notifying userspace. 3067 */ 3068 static void i915_oa_stream_disable(struct i915_perf_stream *stream) 3069 { 3070 stream->perf->ops.oa_disable(stream); 3071 3072 if (stream->sample_flags & SAMPLE_OA_REPORT) 3073 hrtimer_cancel(&stream->poll_check_timer); 3074 } 3075 3076 static const struct i915_perf_stream_ops i915_oa_stream_ops = { 3077 .destroy = i915_oa_stream_destroy, 3078 .enable = i915_oa_stream_enable, 3079 .disable = i915_oa_stream_disable, 3080 .wait_unlocked = i915_oa_wait_unlocked, 3081 .poll_wait = i915_oa_poll_wait, 3082 .read = i915_oa_read, 3083 }; 3084 3085 static int i915_perf_stream_enable_sync(struct i915_perf_stream *stream) 3086 { 3087 struct i915_active *active; 3088 int err; 3089 3090 active = i915_active_create(); 3091 if (!active) 3092 return -ENOMEM; 3093 3094 err = stream->perf->ops.enable_metric_set(stream, active); 3095 if (err == 0) 3096 __i915_active_wait(active, TASK_UNINTERRUPTIBLE); 3097 3098 i915_active_put(active); 3099 return err; 3100 } 3101 3102 static void 3103 get_default_sseu_config(struct intel_sseu *out_sseu, 3104 struct intel_engine_cs *engine) 3105 { 3106 const struct sseu_dev_info *devinfo_sseu = &engine->gt->info.sseu; 3107 3108 *out_sseu = intel_sseu_from_device_info(devinfo_sseu); 3109 3110 if (GRAPHICS_VER(engine->i915) == 11) { 3111 /* 3112 * We only need subslice count so it doesn't matter which ones 3113 * we select - just turn off low bits in the amount of half of 3114 * all available subslices per slice. 3115 */ 3116 out_sseu->subslice_mask = 3117 ~(~0 << (hweight8(out_sseu->subslice_mask) / 2)); 3118 out_sseu->slice_mask = 0x1; 3119 } 3120 } 3121 3122 static int 3123 get_sseu_config(struct intel_sseu *out_sseu, 3124 struct intel_engine_cs *engine, 3125 const struct drm_i915_gem_context_param_sseu *drm_sseu) 3126 { 3127 if (drm_sseu->engine.engine_class != engine->uabi_class || 3128 drm_sseu->engine.engine_instance != engine->uabi_instance) 3129 return -EINVAL; 3130 3131 return i915_gem_user_to_context_sseu(engine->gt, drm_sseu, out_sseu); 3132 } 3133 3134 /* 3135 * OA timestamp frequency = CS timestamp frequency in most platforms. On some 3136 * platforms OA unit ignores the CTC_SHIFT and the 2 timestamps differ. In such 3137 * cases, return the adjusted CS timestamp frequency to the user. 3138 */ 3139 u32 i915_perf_oa_timestamp_frequency(struct drm_i915_private *i915) 3140 { 3141 /* 3142 * Wa_18013179988:dg2 3143 * Wa_14015846243:mtl 3144 */ 3145 if (IS_DG2(i915) || IS_METEORLAKE(i915)) { 3146 intel_wakeref_t wakeref; 3147 u32 reg, shift; 3148 3149 with_intel_runtime_pm(to_gt(i915)->uncore->rpm, wakeref) 3150 reg = intel_uncore_read(to_gt(i915)->uncore, RPM_CONFIG0); 3151 3152 shift = REG_FIELD_GET(GEN10_RPM_CONFIG0_CTC_SHIFT_PARAMETER_MASK, 3153 reg); 3154 3155 return to_gt(i915)->clock_frequency << (3 - shift); 3156 } 3157 3158 return to_gt(i915)->clock_frequency; 3159 } 3160 3161 /** 3162 * i915_oa_stream_init - validate combined props for OA stream and init 3163 * @stream: An i915 perf stream 3164 * @param: The open parameters passed to `DRM_I915_PERF_OPEN` 3165 * @props: The property state that configures stream (individually validated) 3166 * 3167 * While read_properties_unlocked() validates properties in isolation it 3168 * doesn't ensure that the combination necessarily makes sense. 3169 * 3170 * At this point it has been determined that userspace wants a stream of 3171 * OA metrics, but still we need to further validate the combined 3172 * properties are OK. 3173 * 3174 * If the configuration makes sense then we can allocate memory for 3175 * a circular OA buffer and apply the requested metric set configuration. 3176 * 3177 * Returns: zero on success or a negative error code. 3178 */ 3179 static int i915_oa_stream_init(struct i915_perf_stream *stream, 3180 struct drm_i915_perf_open_param *param, 3181 struct perf_open_properties *props) 3182 { 3183 struct drm_i915_private *i915 = stream->perf->i915; 3184 struct i915_perf *perf = stream->perf; 3185 struct intel_gt *gt; 3186 int ret; 3187 3188 if (!props->engine) { 3189 drm_dbg(&stream->perf->i915->drm, 3190 "OA engine not specified\n"); 3191 return -EINVAL; 3192 } 3193 gt = props->engine->gt; 3194 3195 /* 3196 * If the sysfs metrics/ directory wasn't registered for some 3197 * reason then don't let userspace try their luck with config 3198 * IDs 3199 */ 3200 if (!perf->metrics_kobj) { 3201 drm_dbg(&stream->perf->i915->drm, 3202 "OA metrics weren't advertised via sysfs\n"); 3203 return -EINVAL; 3204 } 3205 3206 if (!(props->sample_flags & SAMPLE_OA_REPORT) && 3207 (GRAPHICS_VER(perf->i915) < 12 || !stream->ctx)) { 3208 drm_dbg(&stream->perf->i915->drm, 3209 "Only OA report sampling supported\n"); 3210 return -EINVAL; 3211 } 3212 3213 if (!perf->ops.enable_metric_set) { 3214 drm_dbg(&stream->perf->i915->drm, 3215 "OA unit not supported\n"); 3216 return -ENODEV; 3217 } 3218 3219 /* 3220 * To avoid the complexity of having to accurately filter 3221 * counter reports and marshal to the appropriate client 3222 * we currently only allow exclusive access 3223 */ 3224 if (gt->perf.exclusive_stream) { 3225 drm_dbg(&stream->perf->i915->drm, 3226 "OA unit already in use\n"); 3227 return -EBUSY; 3228 } 3229 3230 if (!props->oa_format) { 3231 drm_dbg(&stream->perf->i915->drm, 3232 "OA report format not specified\n"); 3233 return -EINVAL; 3234 } 3235 3236 stream->engine = props->engine; 3237 stream->uncore = stream->engine->gt->uncore; 3238 3239 stream->sample_size = sizeof(struct drm_i915_perf_record_header); 3240 3241 stream->oa_buffer.format = &perf->oa_formats[props->oa_format]; 3242 if (drm_WARN_ON(&i915->drm, stream->oa_buffer.format->size == 0)) 3243 return -EINVAL; 3244 3245 stream->sample_flags = props->sample_flags; 3246 stream->sample_size += stream->oa_buffer.format->size; 3247 3248 stream->hold_preemption = props->hold_preemption; 3249 3250 stream->periodic = props->oa_periodic; 3251 if (stream->periodic) 3252 stream->period_exponent = props->oa_period_exponent; 3253 3254 if (stream->ctx) { 3255 ret = oa_get_render_ctx_id(stream); 3256 if (ret) { 3257 drm_dbg(&stream->perf->i915->drm, 3258 "Invalid context id to filter with\n"); 3259 return ret; 3260 } 3261 } 3262 3263 ret = alloc_noa_wait(stream); 3264 if (ret) { 3265 drm_dbg(&stream->perf->i915->drm, 3266 "Unable to allocate NOA wait batch buffer\n"); 3267 goto err_noa_wait_alloc; 3268 } 3269 3270 stream->oa_config = i915_perf_get_oa_config(perf, props->metrics_set); 3271 if (!stream->oa_config) { 3272 drm_dbg(&stream->perf->i915->drm, 3273 "Invalid OA config id=%i\n", props->metrics_set); 3274 ret = -EINVAL; 3275 goto err_config; 3276 } 3277 3278 /* PRM - observability performance counters: 3279 * 3280 * OACONTROL, performance counter enable, note: 3281 * 3282 * "When this bit is set, in order to have coherent counts, 3283 * RC6 power state and trunk clock gating must be disabled. 3284 * This can be achieved by programming MMIO registers as 3285 * 0xA094=0 and 0xA090[31]=1" 3286 * 3287 * In our case we are expecting that taking pm + FORCEWAKE 3288 * references will effectively disable RC6. 3289 */ 3290 intel_engine_pm_get(stream->engine); 3291 intel_uncore_forcewake_get(stream->uncore, FORCEWAKE_ALL); 3292 3293 /* 3294 * Wa_16011777198:dg2: GuC resets render as part of the Wa. This causes 3295 * OA to lose the configuration state. Prevent this by overriding GUCRC 3296 * mode. 3297 */ 3298 if (intel_uc_uses_guc_rc(>->uc) && 3299 (IS_DG2_GRAPHICS_STEP(gt->i915, G10, STEP_A0, STEP_C0) || 3300 IS_DG2_GRAPHICS_STEP(gt->i915, G11, STEP_A0, STEP_B0))) { 3301 ret = intel_guc_slpc_override_gucrc_mode(>->uc.guc.slpc, 3302 SLPC_GUCRC_MODE_GUCRC_NO_RC6); 3303 if (ret) { 3304 drm_dbg(&stream->perf->i915->drm, 3305 "Unable to override gucrc mode\n"); 3306 goto err_gucrc; 3307 } 3308 3309 stream->override_gucrc = true; 3310 } 3311 3312 ret = alloc_oa_buffer(stream); 3313 if (ret) 3314 goto err_oa_buf_alloc; 3315 3316 stream->ops = &i915_oa_stream_ops; 3317 3318 stream->engine->gt->perf.sseu = props->sseu; 3319 WRITE_ONCE(gt->perf.exclusive_stream, stream); 3320 3321 ret = i915_perf_stream_enable_sync(stream); 3322 if (ret) { 3323 drm_dbg(&stream->perf->i915->drm, 3324 "Unable to enable metric set\n"); 3325 goto err_enable; 3326 } 3327 3328 drm_dbg(&stream->perf->i915->drm, 3329 "opening stream oa config uuid=%s\n", 3330 stream->oa_config->uuid); 3331 3332 hrtimer_init(&stream->poll_check_timer, 3333 CLOCK_MONOTONIC, HRTIMER_MODE_REL); 3334 stream->poll_check_timer.function = oa_poll_check_timer_cb; 3335 init_waitqueue_head(&stream->poll_wq); 3336 spin_lock_init(&stream->oa_buffer.ptr_lock); 3337 mutex_init(&stream->lock); 3338 3339 return 0; 3340 3341 err_enable: 3342 WRITE_ONCE(gt->perf.exclusive_stream, NULL); 3343 perf->ops.disable_metric_set(stream); 3344 3345 free_oa_buffer(stream); 3346 3347 err_oa_buf_alloc: 3348 if (stream->override_gucrc) 3349 intel_guc_slpc_unset_gucrc_mode(>->uc.guc.slpc); 3350 3351 err_gucrc: 3352 intel_uncore_forcewake_put(stream->uncore, FORCEWAKE_ALL); 3353 intel_engine_pm_put(stream->engine); 3354 3355 free_oa_configs(stream); 3356 3357 err_config: 3358 free_noa_wait(stream); 3359 3360 err_noa_wait_alloc: 3361 if (stream->ctx) 3362 oa_put_render_ctx_id(stream); 3363 3364 return ret; 3365 } 3366 3367 void i915_oa_init_reg_state(const struct intel_context *ce, 3368 const struct intel_engine_cs *engine) 3369 { 3370 struct i915_perf_stream *stream; 3371 3372 if (engine->class != RENDER_CLASS) 3373 return; 3374 3375 /* perf.exclusive_stream serialised by lrc_configure_all_contexts() */ 3376 stream = READ_ONCE(engine->gt->perf.exclusive_stream); 3377 if (stream && GRAPHICS_VER(stream->perf->i915) < 12) 3378 gen8_update_reg_state_unlocked(ce, stream); 3379 } 3380 3381 /** 3382 * i915_perf_read - handles read() FOP for i915 perf stream FDs 3383 * @file: An i915 perf stream file 3384 * @buf: destination buffer given by userspace 3385 * @count: the number of bytes userspace wants to read 3386 * @ppos: (inout) file seek position (unused) 3387 * 3388 * The entry point for handling a read() on a stream file descriptor from 3389 * userspace. Most of the work is left to the i915_perf_read_locked() and 3390 * &i915_perf_stream_ops->read but to save having stream implementations (of 3391 * which we might have multiple later) we handle blocking read here. 3392 * 3393 * We can also consistently treat trying to read from a disabled stream 3394 * as an IO error so implementations can assume the stream is enabled 3395 * while reading. 3396 * 3397 * Returns: The number of bytes copied or a negative error code on failure. 3398 */ 3399 static ssize_t i915_perf_read(struct file *file, 3400 char __user *buf, 3401 size_t count, 3402 loff_t *ppos) 3403 { 3404 struct i915_perf_stream *stream = file->private_data; 3405 size_t offset = 0; 3406 int ret; 3407 3408 /* To ensure it's handled consistently we simply treat all reads of a 3409 * disabled stream as an error. In particular it might otherwise lead 3410 * to a deadlock for blocking file descriptors... 3411 */ 3412 if (!stream->enabled || !(stream->sample_flags & SAMPLE_OA_REPORT)) 3413 return -EIO; 3414 3415 if (!(file->f_flags & O_NONBLOCK)) { 3416 /* There's the small chance of false positives from 3417 * stream->ops->wait_unlocked. 3418 * 3419 * E.g. with single context filtering since we only wait until 3420 * oabuffer has >= 1 report we don't immediately know whether 3421 * any reports really belong to the current context 3422 */ 3423 do { 3424 ret = stream->ops->wait_unlocked(stream); 3425 if (ret) 3426 return ret; 3427 3428 mutex_lock(&stream->lock); 3429 ret = stream->ops->read(stream, buf, count, &offset); 3430 mutex_unlock(&stream->lock); 3431 } while (!offset && !ret); 3432 } else { 3433 mutex_lock(&stream->lock); 3434 ret = stream->ops->read(stream, buf, count, &offset); 3435 mutex_unlock(&stream->lock); 3436 } 3437 3438 /* We allow the poll checking to sometimes report false positive EPOLLIN 3439 * events where we might actually report EAGAIN on read() if there's 3440 * not really any data available. In this situation though we don't 3441 * want to enter a busy loop between poll() reporting a EPOLLIN event 3442 * and read() returning -EAGAIN. Clearing the oa.pollin state here 3443 * effectively ensures we back off until the next hrtimer callback 3444 * before reporting another EPOLLIN event. 3445 * The exception to this is if ops->read() returned -ENOSPC which means 3446 * that more OA data is available than could fit in the user provided 3447 * buffer. In this case we want the next poll() call to not block. 3448 */ 3449 if (ret != -ENOSPC) 3450 stream->pollin = false; 3451 3452 /* Possible values for ret are 0, -EFAULT, -ENOSPC, -EIO, ... */ 3453 return offset ?: (ret ?: -EAGAIN); 3454 } 3455 3456 static enum hrtimer_restart oa_poll_check_timer_cb(struct hrtimer *hrtimer) 3457 { 3458 struct i915_perf_stream *stream = 3459 container_of(hrtimer, typeof(*stream), poll_check_timer); 3460 3461 if (oa_buffer_check_unlocked(stream)) { 3462 stream->pollin = true; 3463 wake_up(&stream->poll_wq); 3464 } 3465 3466 hrtimer_forward_now(hrtimer, 3467 ns_to_ktime(stream->poll_oa_period)); 3468 3469 return HRTIMER_RESTART; 3470 } 3471 3472 /** 3473 * i915_perf_poll_locked - poll_wait() with a suitable wait queue for stream 3474 * @stream: An i915 perf stream 3475 * @file: An i915 perf stream file 3476 * @wait: poll() state table 3477 * 3478 * For handling userspace polling on an i915 perf stream, this calls through to 3479 * &i915_perf_stream_ops->poll_wait to call poll_wait() with a wait queue that 3480 * will be woken for new stream data. 3481 * 3482 * Returns: any poll events that are ready without sleeping 3483 */ 3484 static __poll_t i915_perf_poll_locked(struct i915_perf_stream *stream, 3485 struct file *file, 3486 poll_table *wait) 3487 { 3488 __poll_t events = 0; 3489 3490 stream->ops->poll_wait(stream, file, wait); 3491 3492 /* Note: we don't explicitly check whether there's something to read 3493 * here since this path may be very hot depending on what else 3494 * userspace is polling, or on the timeout in use. We rely solely on 3495 * the hrtimer/oa_poll_check_timer_cb to notify us when there are 3496 * samples to read. 3497 */ 3498 if (stream->pollin) 3499 events |= EPOLLIN; 3500 3501 return events; 3502 } 3503 3504 /** 3505 * i915_perf_poll - call poll_wait() with a suitable wait queue for stream 3506 * @file: An i915 perf stream file 3507 * @wait: poll() state table 3508 * 3509 * For handling userspace polling on an i915 perf stream, this ensures 3510 * poll_wait() gets called with a wait queue that will be woken for new stream 3511 * data. 3512 * 3513 * Note: Implementation deferred to i915_perf_poll_locked() 3514 * 3515 * Returns: any poll events that are ready without sleeping 3516 */ 3517 static __poll_t i915_perf_poll(struct file *file, poll_table *wait) 3518 { 3519 struct i915_perf_stream *stream = file->private_data; 3520 __poll_t ret; 3521 3522 mutex_lock(&stream->lock); 3523 ret = i915_perf_poll_locked(stream, file, wait); 3524 mutex_unlock(&stream->lock); 3525 3526 return ret; 3527 } 3528 3529 /** 3530 * i915_perf_enable_locked - handle `I915_PERF_IOCTL_ENABLE` ioctl 3531 * @stream: A disabled i915 perf stream 3532 * 3533 * [Re]enables the associated capture of data for this stream. 3534 * 3535 * If a stream was previously enabled then there's currently no intention 3536 * to provide userspace any guarantee about the preservation of previously 3537 * buffered data. 3538 */ 3539 static void i915_perf_enable_locked(struct i915_perf_stream *stream) 3540 { 3541 if (stream->enabled) 3542 return; 3543 3544 /* Allow stream->ops->enable() to refer to this */ 3545 stream->enabled = true; 3546 3547 if (stream->ops->enable) 3548 stream->ops->enable(stream); 3549 3550 if (stream->hold_preemption) 3551 intel_context_set_nopreempt(stream->pinned_ctx); 3552 } 3553 3554 /** 3555 * i915_perf_disable_locked - handle `I915_PERF_IOCTL_DISABLE` ioctl 3556 * @stream: An enabled i915 perf stream 3557 * 3558 * Disables the associated capture of data for this stream. 3559 * 3560 * The intention is that disabling an re-enabling a stream will ideally be 3561 * cheaper than destroying and re-opening a stream with the same configuration, 3562 * though there are no formal guarantees about what state or buffered data 3563 * must be retained between disabling and re-enabling a stream. 3564 * 3565 * Note: while a stream is disabled it's considered an error for userspace 3566 * to attempt to read from the stream (-EIO). 3567 */ 3568 static void i915_perf_disable_locked(struct i915_perf_stream *stream) 3569 { 3570 if (!stream->enabled) 3571 return; 3572 3573 /* Allow stream->ops->disable() to refer to this */ 3574 stream->enabled = false; 3575 3576 if (stream->hold_preemption) 3577 intel_context_clear_nopreempt(stream->pinned_ctx); 3578 3579 if (stream->ops->disable) 3580 stream->ops->disable(stream); 3581 } 3582 3583 static long i915_perf_config_locked(struct i915_perf_stream *stream, 3584 unsigned long metrics_set) 3585 { 3586 struct i915_oa_config *config; 3587 long ret = stream->oa_config->id; 3588 3589 config = i915_perf_get_oa_config(stream->perf, metrics_set); 3590 if (!config) 3591 return -EINVAL; 3592 3593 if (config != stream->oa_config) { 3594 int err; 3595 3596 /* 3597 * If OA is bound to a specific context, emit the 3598 * reconfiguration inline from that context. The update 3599 * will then be ordered with respect to submission on that 3600 * context. 3601 * 3602 * When set globally, we use a low priority kernel context, 3603 * so it will effectively take effect when idle. 3604 */ 3605 err = emit_oa_config(stream, config, oa_context(stream), NULL); 3606 if (!err) 3607 config = xchg(&stream->oa_config, config); 3608 else 3609 ret = err; 3610 } 3611 3612 i915_oa_config_put(config); 3613 3614 return ret; 3615 } 3616 3617 /** 3618 * i915_perf_ioctl_locked - support ioctl() usage with i915 perf stream FDs 3619 * @stream: An i915 perf stream 3620 * @cmd: the ioctl request 3621 * @arg: the ioctl data 3622 * 3623 * Returns: zero on success or a negative error code. Returns -EINVAL for 3624 * an unknown ioctl request. 3625 */ 3626 static long i915_perf_ioctl_locked(struct i915_perf_stream *stream, 3627 unsigned int cmd, 3628 unsigned long arg) 3629 { 3630 switch (cmd) { 3631 case I915_PERF_IOCTL_ENABLE: 3632 i915_perf_enable_locked(stream); 3633 return 0; 3634 case I915_PERF_IOCTL_DISABLE: 3635 i915_perf_disable_locked(stream); 3636 return 0; 3637 case I915_PERF_IOCTL_CONFIG: 3638 return i915_perf_config_locked(stream, arg); 3639 } 3640 3641 return -EINVAL; 3642 } 3643 3644 /** 3645 * i915_perf_ioctl - support ioctl() usage with i915 perf stream FDs 3646 * @file: An i915 perf stream file 3647 * @cmd: the ioctl request 3648 * @arg: the ioctl data 3649 * 3650 * Implementation deferred to i915_perf_ioctl_locked(). 3651 * 3652 * Returns: zero on success or a negative error code. Returns -EINVAL for 3653 * an unknown ioctl request. 3654 */ 3655 static long i915_perf_ioctl(struct file *file, 3656 unsigned int cmd, 3657 unsigned long arg) 3658 { 3659 struct i915_perf_stream *stream = file->private_data; 3660 long ret; 3661 3662 mutex_lock(&stream->lock); 3663 ret = i915_perf_ioctl_locked(stream, cmd, arg); 3664 mutex_unlock(&stream->lock); 3665 3666 return ret; 3667 } 3668 3669 /** 3670 * i915_perf_destroy_locked - destroy an i915 perf stream 3671 * @stream: An i915 perf stream 3672 * 3673 * Frees all resources associated with the given i915 perf @stream, disabling 3674 * any associated data capture in the process. 3675 * 3676 * Note: The >->perf.lock mutex has been taken to serialize 3677 * with any non-file-operation driver hooks. 3678 */ 3679 static void i915_perf_destroy_locked(struct i915_perf_stream *stream) 3680 { 3681 if (stream->enabled) 3682 i915_perf_disable_locked(stream); 3683 3684 if (stream->ops->destroy) 3685 stream->ops->destroy(stream); 3686 3687 if (stream->ctx) 3688 i915_gem_context_put(stream->ctx); 3689 3690 kfree(stream); 3691 } 3692 3693 /** 3694 * i915_perf_release - handles userspace close() of a stream file 3695 * @inode: anonymous inode associated with file 3696 * @file: An i915 perf stream file 3697 * 3698 * Cleans up any resources associated with an open i915 perf stream file. 3699 * 3700 * NB: close() can't really fail from the userspace point of view. 3701 * 3702 * Returns: zero on success or a negative error code. 3703 */ 3704 static int i915_perf_release(struct inode *inode, struct file *file) 3705 { 3706 struct i915_perf_stream *stream = file->private_data; 3707 struct i915_perf *perf = stream->perf; 3708 struct intel_gt *gt = stream->engine->gt; 3709 3710 /* 3711 * Within this call, we know that the fd is being closed and we have no 3712 * other user of stream->lock. Use the perf lock to destroy the stream 3713 * here. 3714 */ 3715 mutex_lock(>->perf.lock); 3716 i915_perf_destroy_locked(stream); 3717 mutex_unlock(>->perf.lock); 3718 3719 /* Release the reference the perf stream kept on the driver. */ 3720 drm_dev_put(&perf->i915->drm); 3721 3722 return 0; 3723 } 3724 3725 3726 static const struct file_operations fops = { 3727 .owner = THIS_MODULE, 3728 .llseek = no_llseek, 3729 .release = i915_perf_release, 3730 .poll = i915_perf_poll, 3731 .read = i915_perf_read, 3732 .unlocked_ioctl = i915_perf_ioctl, 3733 /* Our ioctl have no arguments, so it's safe to use the same function 3734 * to handle 32bits compatibility. 3735 */ 3736 .compat_ioctl = i915_perf_ioctl, 3737 }; 3738 3739 3740 /** 3741 * i915_perf_open_ioctl_locked - DRM ioctl() for userspace to open a stream FD 3742 * @perf: i915 perf instance 3743 * @param: The open parameters passed to 'DRM_I915_PERF_OPEN` 3744 * @props: individually validated u64 property value pairs 3745 * @file: drm file 3746 * 3747 * See i915_perf_ioctl_open() for interface details. 3748 * 3749 * Implements further stream config validation and stream initialization on 3750 * behalf of i915_perf_open_ioctl() with the >->perf.lock mutex 3751 * taken to serialize with any non-file-operation driver hooks. 3752 * 3753 * Note: at this point the @props have only been validated in isolation and 3754 * it's still necessary to validate that the combination of properties makes 3755 * sense. 3756 * 3757 * In the case where userspace is interested in OA unit metrics then further 3758 * config validation and stream initialization details will be handled by 3759 * i915_oa_stream_init(). The code here should only validate config state that 3760 * will be relevant to all stream types / backends. 3761 * 3762 * Returns: zero on success or a negative error code. 3763 */ 3764 static int 3765 i915_perf_open_ioctl_locked(struct i915_perf *perf, 3766 struct drm_i915_perf_open_param *param, 3767 struct perf_open_properties *props, 3768 struct drm_file *file) 3769 { 3770 struct i915_gem_context *specific_ctx = NULL; 3771 struct i915_perf_stream *stream = NULL; 3772 unsigned long f_flags = 0; 3773 bool privileged_op = true; 3774 int stream_fd; 3775 int ret; 3776 3777 if (props->single_context) { 3778 u32 ctx_handle = props->ctx_handle; 3779 struct drm_i915_file_private *file_priv = file->driver_priv; 3780 3781 specific_ctx = i915_gem_context_lookup(file_priv, ctx_handle); 3782 if (IS_ERR(specific_ctx)) { 3783 drm_dbg(&perf->i915->drm, 3784 "Failed to look up context with ID %u for opening perf stream\n", 3785 ctx_handle); 3786 ret = PTR_ERR(specific_ctx); 3787 goto err; 3788 } 3789 } 3790 3791 /* 3792 * On Haswell the OA unit supports clock gating off for a specific 3793 * context and in this mode there's no visibility of metrics for the 3794 * rest of the system, which we consider acceptable for a 3795 * non-privileged client. 3796 * 3797 * For Gen8->11 the OA unit no longer supports clock gating off for a 3798 * specific context and the kernel can't securely stop the counters 3799 * from updating as system-wide / global values. Even though we can 3800 * filter reports based on the included context ID we can't block 3801 * clients from seeing the raw / global counter values via 3802 * MI_REPORT_PERF_COUNT commands and so consider it a privileged op to 3803 * enable the OA unit by default. 3804 * 3805 * For Gen12+ we gain a new OAR unit that only monitors the RCS on a 3806 * per context basis. So we can relax requirements there if the user 3807 * doesn't request global stream access (i.e. query based sampling 3808 * using MI_RECORD_PERF_COUNT. 3809 */ 3810 if (IS_HASWELL(perf->i915) && specific_ctx) 3811 privileged_op = false; 3812 else if (GRAPHICS_VER(perf->i915) == 12 && specific_ctx && 3813 (props->sample_flags & SAMPLE_OA_REPORT) == 0) 3814 privileged_op = false; 3815 3816 if (props->hold_preemption) { 3817 if (!props->single_context) { 3818 drm_dbg(&perf->i915->drm, 3819 "preemption disable with no context\n"); 3820 ret = -EINVAL; 3821 goto err; 3822 } 3823 privileged_op = true; 3824 } 3825 3826 /* 3827 * Asking for SSEU configuration is a priviliged operation. 3828 */ 3829 if (props->has_sseu) 3830 privileged_op = true; 3831 else 3832 get_default_sseu_config(&props->sseu, props->engine); 3833 3834 /* Similar to perf's kernel.perf_paranoid_cpu sysctl option 3835 * we check a dev.i915.perf_stream_paranoid sysctl option 3836 * to determine if it's ok to access system wide OA counters 3837 * without CAP_PERFMON or CAP_SYS_ADMIN privileges. 3838 */ 3839 if (privileged_op && 3840 i915_perf_stream_paranoid && !perfmon_capable()) { 3841 drm_dbg(&perf->i915->drm, 3842 "Insufficient privileges to open i915 perf stream\n"); 3843 ret = -EACCES; 3844 goto err_ctx; 3845 } 3846 3847 stream = kzalloc(sizeof(*stream), GFP_KERNEL); 3848 if (!stream) { 3849 ret = -ENOMEM; 3850 goto err_ctx; 3851 } 3852 3853 stream->perf = perf; 3854 stream->ctx = specific_ctx; 3855 stream->poll_oa_period = props->poll_oa_period; 3856 3857 ret = i915_oa_stream_init(stream, param, props); 3858 if (ret) 3859 goto err_alloc; 3860 3861 /* we avoid simply assigning stream->sample_flags = props->sample_flags 3862 * to have _stream_init check the combination of sample flags more 3863 * thoroughly, but still this is the expected result at this point. 3864 */ 3865 if (WARN_ON(stream->sample_flags != props->sample_flags)) { 3866 ret = -ENODEV; 3867 goto err_flags; 3868 } 3869 3870 if (param->flags & I915_PERF_FLAG_FD_CLOEXEC) 3871 f_flags |= O_CLOEXEC; 3872 if (param->flags & I915_PERF_FLAG_FD_NONBLOCK) 3873 f_flags |= O_NONBLOCK; 3874 3875 stream_fd = anon_inode_getfd("[i915_perf]", &fops, stream, f_flags); 3876 if (stream_fd < 0) { 3877 ret = stream_fd; 3878 goto err_flags; 3879 } 3880 3881 if (!(param->flags & I915_PERF_FLAG_DISABLED)) 3882 i915_perf_enable_locked(stream); 3883 3884 /* Take a reference on the driver that will be kept with stream_fd 3885 * until its release. 3886 */ 3887 drm_dev_get(&perf->i915->drm); 3888 3889 return stream_fd; 3890 3891 err_flags: 3892 if (stream->ops->destroy) 3893 stream->ops->destroy(stream); 3894 err_alloc: 3895 kfree(stream); 3896 err_ctx: 3897 if (specific_ctx) 3898 i915_gem_context_put(specific_ctx); 3899 err: 3900 return ret; 3901 } 3902 3903 static u64 oa_exponent_to_ns(struct i915_perf *perf, int exponent) 3904 { 3905 u64 nom = (2ULL << exponent) * NSEC_PER_SEC; 3906 u32 den = i915_perf_oa_timestamp_frequency(perf->i915); 3907 3908 return div_u64(nom + den - 1, den); 3909 } 3910 3911 static __always_inline bool 3912 oa_format_valid(struct i915_perf *perf, enum drm_i915_oa_format format) 3913 { 3914 return test_bit(format, perf->format_mask); 3915 } 3916 3917 static __always_inline void 3918 oa_format_add(struct i915_perf *perf, enum drm_i915_oa_format format) 3919 { 3920 __set_bit(format, perf->format_mask); 3921 } 3922 3923 /** 3924 * read_properties_unlocked - validate + copy userspace stream open properties 3925 * @perf: i915 perf instance 3926 * @uprops: The array of u64 key value pairs given by userspace 3927 * @n_props: The number of key value pairs expected in @uprops 3928 * @props: The stream configuration built up while validating properties 3929 * 3930 * Note this function only validates properties in isolation it doesn't 3931 * validate that the combination of properties makes sense or that all 3932 * properties necessary for a particular kind of stream have been set. 3933 * 3934 * Note that there currently aren't any ordering requirements for properties so 3935 * we shouldn't validate or assume anything about ordering here. This doesn't 3936 * rule out defining new properties with ordering requirements in the future. 3937 */ 3938 static int read_properties_unlocked(struct i915_perf *perf, 3939 u64 __user *uprops, 3940 u32 n_props, 3941 struct perf_open_properties *props) 3942 { 3943 u64 __user *uprop = uprops; 3944 u32 i; 3945 int ret; 3946 3947 memset(props, 0, sizeof(struct perf_open_properties)); 3948 props->poll_oa_period = DEFAULT_POLL_PERIOD_NS; 3949 3950 if (!n_props) { 3951 drm_dbg(&perf->i915->drm, 3952 "No i915 perf properties given\n"); 3953 return -EINVAL; 3954 } 3955 3956 /* At the moment we only support using i915-perf on the RCS. */ 3957 props->engine = intel_engine_lookup_user(perf->i915, 3958 I915_ENGINE_CLASS_RENDER, 3959 0); 3960 if (!props->engine) { 3961 drm_dbg(&perf->i915->drm, 3962 "No RENDER-capable engines\n"); 3963 return -EINVAL; 3964 } 3965 3966 /* Considering that ID = 0 is reserved and assuming that we don't 3967 * (currently) expect any configurations to ever specify duplicate 3968 * values for a particular property ID then the last _PROP_MAX value is 3969 * one greater than the maximum number of properties we expect to get 3970 * from userspace. 3971 */ 3972 if (n_props >= DRM_I915_PERF_PROP_MAX) { 3973 drm_dbg(&perf->i915->drm, 3974 "More i915 perf properties specified than exist\n"); 3975 return -EINVAL; 3976 } 3977 3978 for (i = 0; i < n_props; i++) { 3979 u64 oa_period, oa_freq_hz; 3980 u64 id, value; 3981 3982 ret = get_user(id, uprop); 3983 if (ret) 3984 return ret; 3985 3986 ret = get_user(value, uprop + 1); 3987 if (ret) 3988 return ret; 3989 3990 if (id == 0 || id >= DRM_I915_PERF_PROP_MAX) { 3991 drm_dbg(&perf->i915->drm, 3992 "Unknown i915 perf property ID\n"); 3993 return -EINVAL; 3994 } 3995 3996 switch ((enum drm_i915_perf_property_id)id) { 3997 case DRM_I915_PERF_PROP_CTX_HANDLE: 3998 props->single_context = 1; 3999 props->ctx_handle = value; 4000 break; 4001 case DRM_I915_PERF_PROP_SAMPLE_OA: 4002 if (value) 4003 props->sample_flags |= SAMPLE_OA_REPORT; 4004 break; 4005 case DRM_I915_PERF_PROP_OA_METRICS_SET: 4006 if (value == 0) { 4007 drm_dbg(&perf->i915->drm, 4008 "Unknown OA metric set ID\n"); 4009 return -EINVAL; 4010 } 4011 props->metrics_set = value; 4012 break; 4013 case DRM_I915_PERF_PROP_OA_FORMAT: 4014 if (value == 0 || value >= I915_OA_FORMAT_MAX) { 4015 drm_dbg(&perf->i915->drm, 4016 "Out-of-range OA report format %llu\n", 4017 value); 4018 return -EINVAL; 4019 } 4020 if (!oa_format_valid(perf, value)) { 4021 drm_dbg(&perf->i915->drm, 4022 "Unsupported OA report format %llu\n", 4023 value); 4024 return -EINVAL; 4025 } 4026 props->oa_format = value; 4027 break; 4028 case DRM_I915_PERF_PROP_OA_EXPONENT: 4029 if (value > OA_EXPONENT_MAX) { 4030 drm_dbg(&perf->i915->drm, 4031 "OA timer exponent too high (> %u)\n", 4032 OA_EXPONENT_MAX); 4033 return -EINVAL; 4034 } 4035 4036 /* Theoretically we can program the OA unit to sample 4037 * e.g. every 160ns for HSW, 167ns for BDW/SKL or 104ns 4038 * for BXT. We don't allow such high sampling 4039 * frequencies by default unless root. 4040 */ 4041 4042 BUILD_BUG_ON(sizeof(oa_period) != 8); 4043 oa_period = oa_exponent_to_ns(perf, value); 4044 4045 /* This check is primarily to ensure that oa_period <= 4046 * UINT32_MAX (before passing to do_div which only 4047 * accepts a u32 denominator), but we can also skip 4048 * checking anything < 1Hz which implicitly can't be 4049 * limited via an integer oa_max_sample_rate. 4050 */ 4051 if (oa_period <= NSEC_PER_SEC) { 4052 u64 tmp = NSEC_PER_SEC; 4053 do_div(tmp, oa_period); 4054 oa_freq_hz = tmp; 4055 } else 4056 oa_freq_hz = 0; 4057 4058 if (oa_freq_hz > i915_oa_max_sample_rate && !perfmon_capable()) { 4059 drm_dbg(&perf->i915->drm, 4060 "OA exponent would exceed the max sampling frequency (sysctl dev.i915.oa_max_sample_rate) %uHz without CAP_PERFMON or CAP_SYS_ADMIN privileges\n", 4061 i915_oa_max_sample_rate); 4062 return -EACCES; 4063 } 4064 4065 props->oa_periodic = true; 4066 props->oa_period_exponent = value; 4067 break; 4068 case DRM_I915_PERF_PROP_HOLD_PREEMPTION: 4069 props->hold_preemption = !!value; 4070 break; 4071 case DRM_I915_PERF_PROP_GLOBAL_SSEU: { 4072 struct drm_i915_gem_context_param_sseu user_sseu; 4073 4074 if (GRAPHICS_VER_FULL(perf->i915) >= IP_VER(12, 50)) { 4075 drm_dbg(&perf->i915->drm, 4076 "SSEU config not supported on gfx %x\n", 4077 GRAPHICS_VER_FULL(perf->i915)); 4078 return -ENODEV; 4079 } 4080 4081 if (copy_from_user(&user_sseu, 4082 u64_to_user_ptr(value), 4083 sizeof(user_sseu))) { 4084 drm_dbg(&perf->i915->drm, 4085 "Unable to copy global sseu parameter\n"); 4086 return -EFAULT; 4087 } 4088 4089 ret = get_sseu_config(&props->sseu, props->engine, &user_sseu); 4090 if (ret) { 4091 drm_dbg(&perf->i915->drm, 4092 "Invalid SSEU configuration\n"); 4093 return ret; 4094 } 4095 props->has_sseu = true; 4096 break; 4097 } 4098 case DRM_I915_PERF_PROP_POLL_OA_PERIOD: 4099 if (value < 100000 /* 100us */) { 4100 drm_dbg(&perf->i915->drm, 4101 "OA availability timer too small (%lluns < 100us)\n", 4102 value); 4103 return -EINVAL; 4104 } 4105 props->poll_oa_period = value; 4106 break; 4107 case DRM_I915_PERF_PROP_MAX: 4108 MISSING_CASE(id); 4109 return -EINVAL; 4110 } 4111 4112 uprop += 2; 4113 } 4114 4115 return 0; 4116 } 4117 4118 /** 4119 * i915_perf_open_ioctl - DRM ioctl() for userspace to open a stream FD 4120 * @dev: drm device 4121 * @data: ioctl data copied from userspace (unvalidated) 4122 * @file: drm file 4123 * 4124 * Validates the stream open parameters given by userspace including flags 4125 * and an array of u64 key, value pair properties. 4126 * 4127 * Very little is assumed up front about the nature of the stream being 4128 * opened (for instance we don't assume it's for periodic OA unit metrics). An 4129 * i915-perf stream is expected to be a suitable interface for other forms of 4130 * buffered data written by the GPU besides periodic OA metrics. 4131 * 4132 * Note we copy the properties from userspace outside of the i915 perf 4133 * mutex to avoid an awkward lockdep with mmap_lock. 4134 * 4135 * Most of the implementation details are handled by 4136 * i915_perf_open_ioctl_locked() after taking the >->perf.lock 4137 * mutex for serializing with any non-file-operation driver hooks. 4138 * 4139 * Return: A newly opened i915 Perf stream file descriptor or negative 4140 * error code on failure. 4141 */ 4142 int i915_perf_open_ioctl(struct drm_device *dev, void *data, 4143 struct drm_file *file) 4144 { 4145 struct i915_perf *perf = &to_i915(dev)->perf; 4146 struct drm_i915_perf_open_param *param = data; 4147 struct intel_gt *gt; 4148 struct perf_open_properties props; 4149 u32 known_open_flags; 4150 int ret; 4151 4152 if (!perf->i915) { 4153 drm_dbg(&perf->i915->drm, 4154 "i915 perf interface not available for this system\n"); 4155 return -ENOTSUPP; 4156 } 4157 4158 known_open_flags = I915_PERF_FLAG_FD_CLOEXEC | 4159 I915_PERF_FLAG_FD_NONBLOCK | 4160 I915_PERF_FLAG_DISABLED; 4161 if (param->flags & ~known_open_flags) { 4162 drm_dbg(&perf->i915->drm, 4163 "Unknown drm_i915_perf_open_param flag\n"); 4164 return -EINVAL; 4165 } 4166 4167 ret = read_properties_unlocked(perf, 4168 u64_to_user_ptr(param->properties_ptr), 4169 param->num_properties, 4170 &props); 4171 if (ret) 4172 return ret; 4173 4174 gt = props.engine->gt; 4175 4176 mutex_lock(>->perf.lock); 4177 ret = i915_perf_open_ioctl_locked(perf, param, &props, file); 4178 mutex_unlock(>->perf.lock); 4179 4180 return ret; 4181 } 4182 4183 /** 4184 * i915_perf_register - exposes i915-perf to userspace 4185 * @i915: i915 device instance 4186 * 4187 * In particular OA metric sets are advertised under a sysfs metrics/ 4188 * directory allowing userspace to enumerate valid IDs that can be 4189 * used to open an i915-perf stream. 4190 */ 4191 void i915_perf_register(struct drm_i915_private *i915) 4192 { 4193 struct i915_perf *perf = &i915->perf; 4194 struct intel_gt *gt = to_gt(i915); 4195 4196 if (!perf->i915) 4197 return; 4198 4199 /* To be sure we're synchronized with an attempted 4200 * i915_perf_open_ioctl(); considering that we register after 4201 * being exposed to userspace. 4202 */ 4203 mutex_lock(>->perf.lock); 4204 4205 perf->metrics_kobj = 4206 kobject_create_and_add("metrics", 4207 &i915->drm.primary->kdev->kobj); 4208 4209 mutex_unlock(>->perf.lock); 4210 } 4211 4212 /** 4213 * i915_perf_unregister - hide i915-perf from userspace 4214 * @i915: i915 device instance 4215 * 4216 * i915-perf state cleanup is split up into an 'unregister' and 4217 * 'deinit' phase where the interface is first hidden from 4218 * userspace by i915_perf_unregister() before cleaning up 4219 * remaining state in i915_perf_fini(). 4220 */ 4221 void i915_perf_unregister(struct drm_i915_private *i915) 4222 { 4223 struct i915_perf *perf = &i915->perf; 4224 4225 if (!perf->metrics_kobj) 4226 return; 4227 4228 kobject_put(perf->metrics_kobj); 4229 perf->metrics_kobj = NULL; 4230 } 4231 4232 static bool gen8_is_valid_flex_addr(struct i915_perf *perf, u32 addr) 4233 { 4234 static const i915_reg_t flex_eu_regs[] = { 4235 EU_PERF_CNTL0, 4236 EU_PERF_CNTL1, 4237 EU_PERF_CNTL2, 4238 EU_PERF_CNTL3, 4239 EU_PERF_CNTL4, 4240 EU_PERF_CNTL5, 4241 EU_PERF_CNTL6, 4242 }; 4243 int i; 4244 4245 for (i = 0; i < ARRAY_SIZE(flex_eu_regs); i++) { 4246 if (i915_mmio_reg_offset(flex_eu_regs[i]) == addr) 4247 return true; 4248 } 4249 return false; 4250 } 4251 4252 static bool reg_in_range_table(u32 addr, const struct i915_range *table) 4253 { 4254 while (table->start || table->end) { 4255 if (addr >= table->start && addr <= table->end) 4256 return true; 4257 4258 table++; 4259 } 4260 4261 return false; 4262 } 4263 4264 #define REG_EQUAL(addr, mmio) \ 4265 ((addr) == i915_mmio_reg_offset(mmio)) 4266 4267 static const struct i915_range gen7_oa_b_counters[] = { 4268 { .start = 0x2710, .end = 0x272c }, /* OASTARTTRIG[1-8] */ 4269 { .start = 0x2740, .end = 0x275c }, /* OAREPORTTRIG[1-8] */ 4270 { .start = 0x2770, .end = 0x27ac }, /* OACEC[0-7][0-1] */ 4271 {} 4272 }; 4273 4274 static const struct i915_range gen12_oa_b_counters[] = { 4275 { .start = 0x2b2c, .end = 0x2b2c }, /* GEN12_OAG_OA_PESS */ 4276 { .start = 0xd900, .end = 0xd91c }, /* GEN12_OAG_OASTARTTRIG[1-8] */ 4277 { .start = 0xd920, .end = 0xd93c }, /* GEN12_OAG_OAREPORTTRIG1[1-8] */ 4278 { .start = 0xd940, .end = 0xd97c }, /* GEN12_OAG_CEC[0-7][0-1] */ 4279 { .start = 0xdc00, .end = 0xdc3c }, /* GEN12_OAG_SCEC[0-7][0-1] */ 4280 { .start = 0xdc40, .end = 0xdc40 }, /* GEN12_OAG_SPCTR_CNF */ 4281 { .start = 0xdc44, .end = 0xdc44 }, /* GEN12_OAA_DBG_REG */ 4282 {} 4283 }; 4284 4285 static const struct i915_range xehp_oa_b_counters[] = { 4286 { .start = 0xdc48, .end = 0xdc48 }, /* OAA_ENABLE_REG */ 4287 { .start = 0xdd00, .end = 0xdd48 }, /* OAG_LCE0_0 - OAA_LENABLE_REG */ 4288 }; 4289 4290 static const struct i915_range gen7_oa_mux_regs[] = { 4291 { .start = 0x91b8, .end = 0x91cc }, /* OA_PERFCNT[1-2], OA_PERFMATRIX */ 4292 { .start = 0x9800, .end = 0x9888 }, /* MICRO_BP0_0 - NOA_WRITE */ 4293 { .start = 0xe180, .end = 0xe180 }, /* HALF_SLICE_CHICKEN2 */ 4294 {} 4295 }; 4296 4297 static const struct i915_range hsw_oa_mux_regs[] = { 4298 { .start = 0x09e80, .end = 0x09ea4 }, /* HSW_MBVID2_NOA[0-9] */ 4299 { .start = 0x09ec0, .end = 0x09ec0 }, /* HSW_MBVID2_MISR0 */ 4300 { .start = 0x25100, .end = 0x2ff90 }, 4301 {} 4302 }; 4303 4304 static const struct i915_range chv_oa_mux_regs[] = { 4305 { .start = 0x182300, .end = 0x1823a4 }, 4306 {} 4307 }; 4308 4309 static const struct i915_range gen8_oa_mux_regs[] = { 4310 { .start = 0x0d00, .end = 0x0d2c }, /* RPM_CONFIG[0-1], NOA_CONFIG[0-8] */ 4311 { .start = 0x20cc, .end = 0x20cc }, /* WAIT_FOR_RC6_EXIT */ 4312 {} 4313 }; 4314 4315 static const struct i915_range gen11_oa_mux_regs[] = { 4316 { .start = 0x91c8, .end = 0x91dc }, /* OA_PERFCNT[3-4] */ 4317 {} 4318 }; 4319 4320 static const struct i915_range gen12_oa_mux_regs[] = { 4321 { .start = 0x0d00, .end = 0x0d04 }, /* RPM_CONFIG[0-1] */ 4322 { .start = 0x0d0c, .end = 0x0d2c }, /* NOA_CONFIG[0-8] */ 4323 { .start = 0x9840, .end = 0x9840 }, /* GDT_CHICKEN_BITS */ 4324 { .start = 0x9884, .end = 0x9888 }, /* NOA_WRITE */ 4325 { .start = 0x20cc, .end = 0x20cc }, /* WAIT_FOR_RC6_EXIT */ 4326 {} 4327 }; 4328 4329 /* 4330 * Ref: 14010536224: 4331 * 0x20cc is repurposed on MTL, so use a separate array for MTL. 4332 */ 4333 static const struct i915_range mtl_oa_mux_regs[] = { 4334 { .start = 0x0d00, .end = 0x0d04 }, /* RPM_CONFIG[0-1] */ 4335 { .start = 0x0d0c, .end = 0x0d2c }, /* NOA_CONFIG[0-8] */ 4336 { .start = 0x9840, .end = 0x9840 }, /* GDT_CHICKEN_BITS */ 4337 { .start = 0x9884, .end = 0x9888 }, /* NOA_WRITE */ 4338 }; 4339 4340 static bool gen7_is_valid_b_counter_addr(struct i915_perf *perf, u32 addr) 4341 { 4342 return reg_in_range_table(addr, gen7_oa_b_counters); 4343 } 4344 4345 static bool gen8_is_valid_mux_addr(struct i915_perf *perf, u32 addr) 4346 { 4347 return reg_in_range_table(addr, gen7_oa_mux_regs) || 4348 reg_in_range_table(addr, gen8_oa_mux_regs); 4349 } 4350 4351 static bool gen11_is_valid_mux_addr(struct i915_perf *perf, u32 addr) 4352 { 4353 return reg_in_range_table(addr, gen7_oa_mux_regs) || 4354 reg_in_range_table(addr, gen8_oa_mux_regs) || 4355 reg_in_range_table(addr, gen11_oa_mux_regs); 4356 } 4357 4358 static bool hsw_is_valid_mux_addr(struct i915_perf *perf, u32 addr) 4359 { 4360 return reg_in_range_table(addr, gen7_oa_mux_regs) || 4361 reg_in_range_table(addr, hsw_oa_mux_regs); 4362 } 4363 4364 static bool chv_is_valid_mux_addr(struct i915_perf *perf, u32 addr) 4365 { 4366 return reg_in_range_table(addr, gen7_oa_mux_regs) || 4367 reg_in_range_table(addr, chv_oa_mux_regs); 4368 } 4369 4370 static bool gen12_is_valid_b_counter_addr(struct i915_perf *perf, u32 addr) 4371 { 4372 return reg_in_range_table(addr, gen12_oa_b_counters); 4373 } 4374 4375 static bool xehp_is_valid_b_counter_addr(struct i915_perf *perf, u32 addr) 4376 { 4377 return reg_in_range_table(addr, xehp_oa_b_counters) || 4378 reg_in_range_table(addr, gen12_oa_b_counters); 4379 } 4380 4381 static bool gen12_is_valid_mux_addr(struct i915_perf *perf, u32 addr) 4382 { 4383 if (IS_METEORLAKE(perf->i915)) 4384 return reg_in_range_table(addr, mtl_oa_mux_regs); 4385 else 4386 return reg_in_range_table(addr, gen12_oa_mux_regs); 4387 } 4388 4389 static u32 mask_reg_value(u32 reg, u32 val) 4390 { 4391 /* HALF_SLICE_CHICKEN2 is programmed with a the 4392 * WaDisableSTUnitPowerOptimization workaround. Make sure the value 4393 * programmed by userspace doesn't change this. 4394 */ 4395 if (REG_EQUAL(reg, HALF_SLICE_CHICKEN2)) 4396 val = val & ~_MASKED_BIT_ENABLE(GEN8_ST_PO_DISABLE); 4397 4398 /* WAIT_FOR_RC6_EXIT has only one bit fullfilling the function 4399 * indicated by its name and a bunch of selection fields used by OA 4400 * configs. 4401 */ 4402 if (REG_EQUAL(reg, WAIT_FOR_RC6_EXIT)) 4403 val = val & ~_MASKED_BIT_ENABLE(HSW_WAIT_FOR_RC6_EXIT_ENABLE); 4404 4405 return val; 4406 } 4407 4408 static struct i915_oa_reg *alloc_oa_regs(struct i915_perf *perf, 4409 bool (*is_valid)(struct i915_perf *perf, u32 addr), 4410 u32 __user *regs, 4411 u32 n_regs) 4412 { 4413 struct i915_oa_reg *oa_regs; 4414 int err; 4415 u32 i; 4416 4417 if (!n_regs) 4418 return NULL; 4419 4420 /* No is_valid function means we're not allowing any register to be programmed. */ 4421 GEM_BUG_ON(!is_valid); 4422 if (!is_valid) 4423 return ERR_PTR(-EINVAL); 4424 4425 oa_regs = kmalloc_array(n_regs, sizeof(*oa_regs), GFP_KERNEL); 4426 if (!oa_regs) 4427 return ERR_PTR(-ENOMEM); 4428 4429 for (i = 0; i < n_regs; i++) { 4430 u32 addr, value; 4431 4432 err = get_user(addr, regs); 4433 if (err) 4434 goto addr_err; 4435 4436 if (!is_valid(perf, addr)) { 4437 drm_dbg(&perf->i915->drm, 4438 "Invalid oa_reg address: %X\n", addr); 4439 err = -EINVAL; 4440 goto addr_err; 4441 } 4442 4443 err = get_user(value, regs + 1); 4444 if (err) 4445 goto addr_err; 4446 4447 oa_regs[i].addr = _MMIO(addr); 4448 oa_regs[i].value = mask_reg_value(addr, value); 4449 4450 regs += 2; 4451 } 4452 4453 return oa_regs; 4454 4455 addr_err: 4456 kfree(oa_regs); 4457 return ERR_PTR(err); 4458 } 4459 4460 static ssize_t show_dynamic_id(struct kobject *kobj, 4461 struct kobj_attribute *attr, 4462 char *buf) 4463 { 4464 struct i915_oa_config *oa_config = 4465 container_of(attr, typeof(*oa_config), sysfs_metric_id); 4466 4467 return sprintf(buf, "%d\n", oa_config->id); 4468 } 4469 4470 static int create_dynamic_oa_sysfs_entry(struct i915_perf *perf, 4471 struct i915_oa_config *oa_config) 4472 { 4473 sysfs_attr_init(&oa_config->sysfs_metric_id.attr); 4474 oa_config->sysfs_metric_id.attr.name = "id"; 4475 oa_config->sysfs_metric_id.attr.mode = S_IRUGO; 4476 oa_config->sysfs_metric_id.show = show_dynamic_id; 4477 oa_config->sysfs_metric_id.store = NULL; 4478 4479 oa_config->attrs[0] = &oa_config->sysfs_metric_id.attr; 4480 oa_config->attrs[1] = NULL; 4481 4482 oa_config->sysfs_metric.name = oa_config->uuid; 4483 oa_config->sysfs_metric.attrs = oa_config->attrs; 4484 4485 return sysfs_create_group(perf->metrics_kobj, 4486 &oa_config->sysfs_metric); 4487 } 4488 4489 /** 4490 * i915_perf_add_config_ioctl - DRM ioctl() for userspace to add a new OA config 4491 * @dev: drm device 4492 * @data: ioctl data (pointer to struct drm_i915_perf_oa_config) copied from 4493 * userspace (unvalidated) 4494 * @file: drm file 4495 * 4496 * Validates the submitted OA register to be saved into a new OA config that 4497 * can then be used for programming the OA unit and its NOA network. 4498 * 4499 * Returns: A new allocated config number to be used with the perf open ioctl 4500 * or a negative error code on failure. 4501 */ 4502 int i915_perf_add_config_ioctl(struct drm_device *dev, void *data, 4503 struct drm_file *file) 4504 { 4505 struct i915_perf *perf = &to_i915(dev)->perf; 4506 struct drm_i915_perf_oa_config *args = data; 4507 struct i915_oa_config *oa_config, *tmp; 4508 struct i915_oa_reg *regs; 4509 int err, id; 4510 4511 if (!perf->i915) { 4512 drm_dbg(&perf->i915->drm, 4513 "i915 perf interface not available for this system\n"); 4514 return -ENOTSUPP; 4515 } 4516 4517 if (!perf->metrics_kobj) { 4518 drm_dbg(&perf->i915->drm, 4519 "OA metrics weren't advertised via sysfs\n"); 4520 return -EINVAL; 4521 } 4522 4523 if (i915_perf_stream_paranoid && !perfmon_capable()) { 4524 drm_dbg(&perf->i915->drm, 4525 "Insufficient privileges to add i915 OA config\n"); 4526 return -EACCES; 4527 } 4528 4529 if ((!args->mux_regs_ptr || !args->n_mux_regs) && 4530 (!args->boolean_regs_ptr || !args->n_boolean_regs) && 4531 (!args->flex_regs_ptr || !args->n_flex_regs)) { 4532 drm_dbg(&perf->i915->drm, 4533 "No OA registers given\n"); 4534 return -EINVAL; 4535 } 4536 4537 oa_config = kzalloc(sizeof(*oa_config), GFP_KERNEL); 4538 if (!oa_config) { 4539 drm_dbg(&perf->i915->drm, 4540 "Failed to allocate memory for the OA config\n"); 4541 return -ENOMEM; 4542 } 4543 4544 oa_config->perf = perf; 4545 kref_init(&oa_config->ref); 4546 4547 if (!uuid_is_valid(args->uuid)) { 4548 drm_dbg(&perf->i915->drm, 4549 "Invalid uuid format for OA config\n"); 4550 err = -EINVAL; 4551 goto reg_err; 4552 } 4553 4554 /* Last character in oa_config->uuid will be 0 because oa_config is 4555 * kzalloc. 4556 */ 4557 memcpy(oa_config->uuid, args->uuid, sizeof(args->uuid)); 4558 4559 oa_config->mux_regs_len = args->n_mux_regs; 4560 regs = alloc_oa_regs(perf, 4561 perf->ops.is_valid_mux_reg, 4562 u64_to_user_ptr(args->mux_regs_ptr), 4563 args->n_mux_regs); 4564 4565 if (IS_ERR(regs)) { 4566 drm_dbg(&perf->i915->drm, 4567 "Failed to create OA config for mux_regs\n"); 4568 err = PTR_ERR(regs); 4569 goto reg_err; 4570 } 4571 oa_config->mux_regs = regs; 4572 4573 oa_config->b_counter_regs_len = args->n_boolean_regs; 4574 regs = alloc_oa_regs(perf, 4575 perf->ops.is_valid_b_counter_reg, 4576 u64_to_user_ptr(args->boolean_regs_ptr), 4577 args->n_boolean_regs); 4578 4579 if (IS_ERR(regs)) { 4580 drm_dbg(&perf->i915->drm, 4581 "Failed to create OA config for b_counter_regs\n"); 4582 err = PTR_ERR(regs); 4583 goto reg_err; 4584 } 4585 oa_config->b_counter_regs = regs; 4586 4587 if (GRAPHICS_VER(perf->i915) < 8) { 4588 if (args->n_flex_regs != 0) { 4589 err = -EINVAL; 4590 goto reg_err; 4591 } 4592 } else { 4593 oa_config->flex_regs_len = args->n_flex_regs; 4594 regs = alloc_oa_regs(perf, 4595 perf->ops.is_valid_flex_reg, 4596 u64_to_user_ptr(args->flex_regs_ptr), 4597 args->n_flex_regs); 4598 4599 if (IS_ERR(regs)) { 4600 drm_dbg(&perf->i915->drm, 4601 "Failed to create OA config for flex_regs\n"); 4602 err = PTR_ERR(regs); 4603 goto reg_err; 4604 } 4605 oa_config->flex_regs = regs; 4606 } 4607 4608 err = mutex_lock_interruptible(&perf->metrics_lock); 4609 if (err) 4610 goto reg_err; 4611 4612 /* We shouldn't have too many configs, so this iteration shouldn't be 4613 * too costly. 4614 */ 4615 idr_for_each_entry(&perf->metrics_idr, tmp, id) { 4616 if (!strcmp(tmp->uuid, oa_config->uuid)) { 4617 drm_dbg(&perf->i915->drm, 4618 "OA config already exists with this uuid\n"); 4619 err = -EADDRINUSE; 4620 goto sysfs_err; 4621 } 4622 } 4623 4624 err = create_dynamic_oa_sysfs_entry(perf, oa_config); 4625 if (err) { 4626 drm_dbg(&perf->i915->drm, 4627 "Failed to create sysfs entry for OA config\n"); 4628 goto sysfs_err; 4629 } 4630 4631 /* Config id 0 is invalid, id 1 for kernel stored test config. */ 4632 oa_config->id = idr_alloc(&perf->metrics_idr, 4633 oa_config, 2, 4634 0, GFP_KERNEL); 4635 if (oa_config->id < 0) { 4636 drm_dbg(&perf->i915->drm, 4637 "Failed to create sysfs entry for OA config\n"); 4638 err = oa_config->id; 4639 goto sysfs_err; 4640 } 4641 id = oa_config->id; 4642 4643 drm_dbg(&perf->i915->drm, 4644 "Added config %s id=%i\n", oa_config->uuid, oa_config->id); 4645 mutex_unlock(&perf->metrics_lock); 4646 4647 return id; 4648 4649 sysfs_err: 4650 mutex_unlock(&perf->metrics_lock); 4651 reg_err: 4652 i915_oa_config_put(oa_config); 4653 drm_dbg(&perf->i915->drm, 4654 "Failed to add new OA config\n"); 4655 return err; 4656 } 4657 4658 /** 4659 * i915_perf_remove_config_ioctl - DRM ioctl() for userspace to remove an OA config 4660 * @dev: drm device 4661 * @data: ioctl data (pointer to u64 integer) copied from userspace 4662 * @file: drm file 4663 * 4664 * Configs can be removed while being used, the will stop appearing in sysfs 4665 * and their content will be freed when the stream using the config is closed. 4666 * 4667 * Returns: 0 on success or a negative error code on failure. 4668 */ 4669 int i915_perf_remove_config_ioctl(struct drm_device *dev, void *data, 4670 struct drm_file *file) 4671 { 4672 struct i915_perf *perf = &to_i915(dev)->perf; 4673 u64 *arg = data; 4674 struct i915_oa_config *oa_config; 4675 int ret; 4676 4677 if (!perf->i915) { 4678 drm_dbg(&perf->i915->drm, 4679 "i915 perf interface not available for this system\n"); 4680 return -ENOTSUPP; 4681 } 4682 4683 if (i915_perf_stream_paranoid && !perfmon_capable()) { 4684 drm_dbg(&perf->i915->drm, 4685 "Insufficient privileges to remove i915 OA config\n"); 4686 return -EACCES; 4687 } 4688 4689 ret = mutex_lock_interruptible(&perf->metrics_lock); 4690 if (ret) 4691 return ret; 4692 4693 oa_config = idr_find(&perf->metrics_idr, *arg); 4694 if (!oa_config) { 4695 drm_dbg(&perf->i915->drm, 4696 "Failed to remove unknown OA config\n"); 4697 ret = -ENOENT; 4698 goto err_unlock; 4699 } 4700 4701 GEM_BUG_ON(*arg != oa_config->id); 4702 4703 sysfs_remove_group(perf->metrics_kobj, &oa_config->sysfs_metric); 4704 4705 idr_remove(&perf->metrics_idr, *arg); 4706 4707 mutex_unlock(&perf->metrics_lock); 4708 4709 drm_dbg(&perf->i915->drm, 4710 "Removed config %s id=%i\n", oa_config->uuid, oa_config->id); 4711 4712 i915_oa_config_put(oa_config); 4713 4714 return 0; 4715 4716 err_unlock: 4717 mutex_unlock(&perf->metrics_lock); 4718 return ret; 4719 } 4720 4721 static struct ctl_table oa_table[] = { 4722 { 4723 .procname = "perf_stream_paranoid", 4724 .data = &i915_perf_stream_paranoid, 4725 .maxlen = sizeof(i915_perf_stream_paranoid), 4726 .mode = 0644, 4727 .proc_handler = proc_dointvec_minmax, 4728 .extra1 = SYSCTL_ZERO, 4729 .extra2 = SYSCTL_ONE, 4730 }, 4731 { 4732 .procname = "oa_max_sample_rate", 4733 .data = &i915_oa_max_sample_rate, 4734 .maxlen = sizeof(i915_oa_max_sample_rate), 4735 .mode = 0644, 4736 .proc_handler = proc_dointvec_minmax, 4737 .extra1 = SYSCTL_ZERO, 4738 .extra2 = &oa_sample_rate_hard_limit, 4739 }, 4740 {} 4741 }; 4742 4743 static void oa_init_supported_formats(struct i915_perf *perf) 4744 { 4745 struct drm_i915_private *i915 = perf->i915; 4746 enum intel_platform platform = INTEL_INFO(i915)->platform; 4747 4748 switch (platform) { 4749 case INTEL_HASWELL: 4750 oa_format_add(perf, I915_OA_FORMAT_A13); 4751 oa_format_add(perf, I915_OA_FORMAT_A13); 4752 oa_format_add(perf, I915_OA_FORMAT_A29); 4753 oa_format_add(perf, I915_OA_FORMAT_A13_B8_C8); 4754 oa_format_add(perf, I915_OA_FORMAT_B4_C8); 4755 oa_format_add(perf, I915_OA_FORMAT_A45_B8_C8); 4756 oa_format_add(perf, I915_OA_FORMAT_B4_C8_A16); 4757 oa_format_add(perf, I915_OA_FORMAT_C4_B8); 4758 break; 4759 4760 case INTEL_BROADWELL: 4761 case INTEL_CHERRYVIEW: 4762 case INTEL_SKYLAKE: 4763 case INTEL_BROXTON: 4764 case INTEL_KABYLAKE: 4765 case INTEL_GEMINILAKE: 4766 case INTEL_COFFEELAKE: 4767 case INTEL_COMETLAKE: 4768 case INTEL_ICELAKE: 4769 case INTEL_ELKHARTLAKE: 4770 case INTEL_JASPERLAKE: 4771 case INTEL_TIGERLAKE: 4772 case INTEL_ROCKETLAKE: 4773 case INTEL_DG1: 4774 case INTEL_ALDERLAKE_S: 4775 case INTEL_ALDERLAKE_P: 4776 oa_format_add(perf, I915_OA_FORMAT_A12); 4777 oa_format_add(perf, I915_OA_FORMAT_A12_B8_C8); 4778 oa_format_add(perf, I915_OA_FORMAT_A32u40_A4u32_B8_C8); 4779 oa_format_add(perf, I915_OA_FORMAT_C4_B8); 4780 break; 4781 4782 case INTEL_DG2: 4783 case INTEL_METEORLAKE: 4784 oa_format_add(perf, I915_OAR_FORMAT_A32u40_A4u32_B8_C8); 4785 oa_format_add(perf, I915_OA_FORMAT_A24u40_A14u32_B8_C8); 4786 break; 4787 4788 default: 4789 MISSING_CASE(platform); 4790 } 4791 } 4792 4793 static void i915_perf_init_info(struct drm_i915_private *i915) 4794 { 4795 struct i915_perf *perf = &i915->perf; 4796 4797 switch (GRAPHICS_VER(i915)) { 4798 case 8: 4799 perf->ctx_oactxctrl_offset = 0x120; 4800 perf->ctx_flexeu0_offset = 0x2ce; 4801 perf->gen8_valid_ctx_bit = BIT(25); 4802 break; 4803 case 9: 4804 perf->ctx_oactxctrl_offset = 0x128; 4805 perf->ctx_flexeu0_offset = 0x3de; 4806 perf->gen8_valid_ctx_bit = BIT(16); 4807 break; 4808 case 11: 4809 perf->ctx_oactxctrl_offset = 0x124; 4810 perf->ctx_flexeu0_offset = 0x78e; 4811 perf->gen8_valid_ctx_bit = BIT(16); 4812 break; 4813 case 12: 4814 /* 4815 * Calculate offset at runtime in oa_pin_context for gen12 and 4816 * cache the value in perf->ctx_oactxctrl_offset. 4817 */ 4818 break; 4819 default: 4820 MISSING_CASE(GRAPHICS_VER(i915)); 4821 } 4822 } 4823 4824 /** 4825 * i915_perf_init - initialize i915-perf state on module bind 4826 * @i915: i915 device instance 4827 * 4828 * Initializes i915-perf state without exposing anything to userspace. 4829 * 4830 * Note: i915-perf initialization is split into an 'init' and 'register' 4831 * phase with the i915_perf_register() exposing state to userspace. 4832 */ 4833 void i915_perf_init(struct drm_i915_private *i915) 4834 { 4835 struct i915_perf *perf = &i915->perf; 4836 4837 perf->oa_formats = oa_formats; 4838 if (IS_HASWELL(i915)) { 4839 perf->ops.is_valid_b_counter_reg = gen7_is_valid_b_counter_addr; 4840 perf->ops.is_valid_mux_reg = hsw_is_valid_mux_addr; 4841 perf->ops.is_valid_flex_reg = NULL; 4842 perf->ops.enable_metric_set = hsw_enable_metric_set; 4843 perf->ops.disable_metric_set = hsw_disable_metric_set; 4844 perf->ops.oa_enable = gen7_oa_enable; 4845 perf->ops.oa_disable = gen7_oa_disable; 4846 perf->ops.read = gen7_oa_read; 4847 perf->ops.oa_hw_tail_read = gen7_oa_hw_tail_read; 4848 } else if (HAS_LOGICAL_RING_CONTEXTS(i915)) { 4849 /* Note: that although we could theoretically also support the 4850 * legacy ringbuffer mode on BDW (and earlier iterations of 4851 * this driver, before upstreaming did this) it didn't seem 4852 * worth the complexity to maintain now that BDW+ enable 4853 * execlist mode by default. 4854 */ 4855 perf->ops.read = gen8_oa_read; 4856 i915_perf_init_info(i915); 4857 4858 if (IS_GRAPHICS_VER(i915, 8, 9)) { 4859 perf->ops.is_valid_b_counter_reg = 4860 gen7_is_valid_b_counter_addr; 4861 perf->ops.is_valid_mux_reg = 4862 gen8_is_valid_mux_addr; 4863 perf->ops.is_valid_flex_reg = 4864 gen8_is_valid_flex_addr; 4865 4866 if (IS_CHERRYVIEW(i915)) { 4867 perf->ops.is_valid_mux_reg = 4868 chv_is_valid_mux_addr; 4869 } 4870 4871 perf->ops.oa_enable = gen8_oa_enable; 4872 perf->ops.oa_disable = gen8_oa_disable; 4873 perf->ops.enable_metric_set = gen8_enable_metric_set; 4874 perf->ops.disable_metric_set = gen8_disable_metric_set; 4875 perf->ops.oa_hw_tail_read = gen8_oa_hw_tail_read; 4876 } else if (GRAPHICS_VER(i915) == 11) { 4877 perf->ops.is_valid_b_counter_reg = 4878 gen7_is_valid_b_counter_addr; 4879 perf->ops.is_valid_mux_reg = 4880 gen11_is_valid_mux_addr; 4881 perf->ops.is_valid_flex_reg = 4882 gen8_is_valid_flex_addr; 4883 4884 perf->ops.oa_enable = gen8_oa_enable; 4885 perf->ops.oa_disable = gen8_oa_disable; 4886 perf->ops.enable_metric_set = gen8_enable_metric_set; 4887 perf->ops.disable_metric_set = gen11_disable_metric_set; 4888 perf->ops.oa_hw_tail_read = gen8_oa_hw_tail_read; 4889 } else if (GRAPHICS_VER(i915) == 12) { 4890 perf->ops.is_valid_b_counter_reg = 4891 HAS_OA_SLICE_CONTRIB_LIMITS(i915) ? 4892 xehp_is_valid_b_counter_addr : 4893 gen12_is_valid_b_counter_addr; 4894 perf->ops.is_valid_mux_reg = 4895 gen12_is_valid_mux_addr; 4896 perf->ops.is_valid_flex_reg = 4897 gen8_is_valid_flex_addr; 4898 4899 perf->ops.oa_enable = gen12_oa_enable; 4900 perf->ops.oa_disable = gen12_oa_disable; 4901 perf->ops.enable_metric_set = gen12_enable_metric_set; 4902 perf->ops.disable_metric_set = gen12_disable_metric_set; 4903 perf->ops.oa_hw_tail_read = gen12_oa_hw_tail_read; 4904 } 4905 } 4906 4907 if (perf->ops.enable_metric_set) { 4908 struct intel_gt *gt; 4909 int i; 4910 4911 for_each_gt(gt, i915, i) 4912 mutex_init(>->perf.lock); 4913 4914 /* Choose a representative limit */ 4915 oa_sample_rate_hard_limit = to_gt(i915)->clock_frequency / 2; 4916 4917 mutex_init(&perf->metrics_lock); 4918 idr_init_base(&perf->metrics_idr, 1); 4919 4920 /* We set up some ratelimit state to potentially throttle any 4921 * _NOTES about spurious, invalid OA reports which we don't 4922 * forward to userspace. 4923 * 4924 * We print a _NOTE about any throttling when closing the 4925 * stream instead of waiting until driver _fini which no one 4926 * would ever see. 4927 * 4928 * Using the same limiting factors as printk_ratelimit() 4929 */ 4930 ratelimit_state_init(&perf->spurious_report_rs, 5 * HZ, 10); 4931 /* Since we use a DRM_NOTE for spurious reports it would be 4932 * inconsistent to let __ratelimit() automatically print a 4933 * warning for throttling. 4934 */ 4935 ratelimit_set_flags(&perf->spurious_report_rs, 4936 RATELIMIT_MSG_ON_RELEASE); 4937 4938 ratelimit_state_init(&perf->tail_pointer_race, 4939 5 * HZ, 10); 4940 ratelimit_set_flags(&perf->tail_pointer_race, 4941 RATELIMIT_MSG_ON_RELEASE); 4942 4943 atomic64_set(&perf->noa_programming_delay, 4944 500 * 1000 /* 500us */); 4945 4946 perf->i915 = i915; 4947 4948 oa_init_supported_formats(perf); 4949 } 4950 } 4951 4952 static int destroy_config(int id, void *p, void *data) 4953 { 4954 i915_oa_config_put(p); 4955 return 0; 4956 } 4957 4958 int i915_perf_sysctl_register(void) 4959 { 4960 sysctl_header = register_sysctl("dev/i915", oa_table); 4961 return 0; 4962 } 4963 4964 void i915_perf_sysctl_unregister(void) 4965 { 4966 unregister_sysctl_table(sysctl_header); 4967 } 4968 4969 /** 4970 * i915_perf_fini - Counter part to i915_perf_init() 4971 * @i915: i915 device instance 4972 */ 4973 void i915_perf_fini(struct drm_i915_private *i915) 4974 { 4975 struct i915_perf *perf = &i915->perf; 4976 4977 if (!perf->i915) 4978 return; 4979 4980 idr_for_each(&perf->metrics_idr, destroy_config, perf); 4981 idr_destroy(&perf->metrics_idr); 4982 4983 memset(&perf->ops, 0, sizeof(perf->ops)); 4984 perf->i915 = NULL; 4985 } 4986 4987 /** 4988 * i915_perf_ioctl_version - Version of the i915-perf subsystem 4989 * 4990 * This version number is used by userspace to detect available features. 4991 */ 4992 int i915_perf_ioctl_version(void) 4993 { 4994 /* 4995 * 1: Initial version 4996 * I915_PERF_IOCTL_ENABLE 4997 * I915_PERF_IOCTL_DISABLE 4998 * 4999 * 2: Added runtime modification of OA config. 5000 * I915_PERF_IOCTL_CONFIG 5001 * 5002 * 3: Add DRM_I915_PERF_PROP_HOLD_PREEMPTION parameter to hold 5003 * preemption on a particular context so that performance data is 5004 * accessible from a delta of MI_RPC reports without looking at the 5005 * OA buffer. 5006 * 5007 * 4: Add DRM_I915_PERF_PROP_ALLOWED_SSEU to limit what contexts can 5008 * be run for the duration of the performance recording based on 5009 * their SSEU configuration. 5010 * 5011 * 5: Add DRM_I915_PERF_PROP_POLL_OA_PERIOD parameter that controls the 5012 * interval for the hrtimer used to check for OA data. 5013 */ 5014 return 5; 5015 } 5016 5017 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST) 5018 #include "selftests/i915_perf.c" 5019 #endif 5020