1 /* 2 * Copyright © 2015-2016 Intel Corporation 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice (including the next 12 * paragraph) shall be included in all copies or substantial portions of the 13 * Software. 14 * 15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS 21 * IN THE SOFTWARE. 22 * 23 * Authors: 24 * Robert Bragg <robert@sixbynine.org> 25 */ 26 27 28 /** 29 * DOC: i915 Perf Overview 30 * 31 * Gen graphics supports a large number of performance counters that can help 32 * driver and application developers understand and optimize their use of the 33 * GPU. 34 * 35 * This i915 perf interface enables userspace to configure and open a file 36 * descriptor representing a stream of GPU metrics which can then be read() as 37 * a stream of sample records. 38 * 39 * The interface is particularly suited to exposing buffered metrics that are 40 * captured by DMA from the GPU, unsynchronized with and unrelated to the CPU. 41 * 42 * Streams representing a single context are accessible to applications with a 43 * corresponding drm file descriptor, such that OpenGL can use the interface 44 * without special privileges. Access to system-wide metrics requires root 45 * privileges by default, unless changed via the dev.i915.perf_event_paranoid 46 * sysctl option. 47 * 48 */ 49 50 /** 51 * DOC: i915 Perf History and Comparison with Core Perf 52 * 53 * The interface was initially inspired by the core Perf infrastructure but 54 * some notable differences are: 55 * 56 * i915 perf file descriptors represent a "stream" instead of an "event"; where 57 * a perf event primarily corresponds to a single 64bit value, while a stream 58 * might sample sets of tightly-coupled counters, depending on the 59 * configuration. For example the Gen OA unit isn't designed to support 60 * orthogonal configurations of individual counters; it's configured for a set 61 * of related counters. Samples for an i915 perf stream capturing OA metrics 62 * will include a set of counter values packed in a compact HW specific format. 63 * The OA unit supports a number of different packing formats which can be 64 * selected by the user opening the stream. Perf has support for grouping 65 * events, but each event in the group is configured, validated and 66 * authenticated individually with separate system calls. 67 * 68 * i915 perf stream configurations are provided as an array of u64 (key,value) 69 * pairs, instead of a fixed struct with multiple miscellaneous config members, 70 * interleaved with event-type specific members. 71 * 72 * i915 perf doesn't support exposing metrics via an mmap'd circular buffer. 73 * The supported metrics are being written to memory by the GPU unsynchronized 74 * with the CPU, using HW specific packing formats for counter sets. Sometimes 75 * the constraints on HW configuration require reports to be filtered before it 76 * would be acceptable to expose them to unprivileged applications - to hide 77 * the metrics of other processes/contexts. For these use cases a read() based 78 * interface is a good fit, and provides an opportunity to filter data as it 79 * gets copied from the GPU mapped buffers to userspace buffers. 80 * 81 * 82 * Issues hit with first prototype based on Core Perf 83 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 84 * 85 * The first prototype of this driver was based on the core perf 86 * infrastructure, and while we did make that mostly work, with some changes to 87 * perf, we found we were breaking or working around too many assumptions baked 88 * into perf's currently cpu centric design. 89 * 90 * In the end we didn't see a clear benefit to making perf's implementation and 91 * interface more complex by changing design assumptions while we knew we still 92 * wouldn't be able to use any existing perf based userspace tools. 93 * 94 * Also considering the Gen specific nature of the Observability hardware and 95 * how userspace will sometimes need to combine i915 perf OA metrics with 96 * side-band OA data captured via MI_REPORT_PERF_COUNT commands; we're 97 * expecting the interface to be used by a platform specific userspace such as 98 * OpenGL or tools. This is to say; we aren't inherently missing out on having 99 * a standard vendor/architecture agnostic interface by not using perf. 100 * 101 * 102 * For posterity, in case we might re-visit trying to adapt core perf to be 103 * better suited to exposing i915 metrics these were the main pain points we 104 * hit: 105 * 106 * - The perf based OA PMU driver broke some significant design assumptions: 107 * 108 * Existing perf pmus are used for profiling work on a cpu and we were 109 * introducing the idea of _IS_DEVICE pmus with different security 110 * implications, the need to fake cpu-related data (such as user/kernel 111 * registers) to fit with perf's current design, and adding _DEVICE records 112 * as a way to forward device-specific status records. 113 * 114 * The OA unit writes reports of counters into a circular buffer, without 115 * involvement from the CPU, making our PMU driver the first of a kind. 116 * 117 * Given the way we were periodically forward data from the GPU-mapped, OA 118 * buffer to perf's buffer, those bursts of sample writes looked to perf like 119 * we were sampling too fast and so we had to subvert its throttling checks. 120 * 121 * Perf supports groups of counters and allows those to be read via 122 * transactions internally but transactions currently seem designed to be 123 * explicitly initiated from the cpu (say in response to a userspace read()) 124 * and while we could pull a report out of the OA buffer we can't 125 * trigger a report from the cpu on demand. 126 * 127 * Related to being report based; the OA counters are configured in HW as a 128 * set while perf generally expects counter configurations to be orthogonal. 129 * Although counters can be associated with a group leader as they are 130 * opened, there's no clear precedent for being able to provide group-wide 131 * configuration attributes (for example we want to let userspace choose the 132 * OA unit report format used to capture all counters in a set, or specify a 133 * GPU context to filter metrics on). We avoided using perf's grouping 134 * feature and forwarded OA reports to userspace via perf's 'raw' sample 135 * field. This suited our userspace well considering how coupled the counters 136 * are when dealing with normalizing. It would be inconvenient to split 137 * counters up into separate events, only to require userspace to recombine 138 * them. For Mesa it's also convenient to be forwarded raw, periodic reports 139 * for combining with the side-band raw reports it captures using 140 * MI_REPORT_PERF_COUNT commands. 141 * 142 * - As a side note on perf's grouping feature; there was also some concern 143 * that using PERF_FORMAT_GROUP as a way to pack together counter values 144 * would quite drastically inflate our sample sizes, which would likely 145 * lower the effective sampling resolutions we could use when the available 146 * memory bandwidth is limited. 147 * 148 * With the OA unit's report formats, counters are packed together as 32 149 * or 40bit values, with the largest report size being 256 bytes. 150 * 151 * PERF_FORMAT_GROUP values are 64bit, but there doesn't appear to be a 152 * documented ordering to the values, implying PERF_FORMAT_ID must also be 153 * used to add a 64bit ID before each value; giving 16 bytes per counter. 154 * 155 * Related to counter orthogonality; we can't time share the OA unit, while 156 * event scheduling is a central design idea within perf for allowing 157 * userspace to open + enable more events than can be configured in HW at any 158 * one time. The OA unit is not designed to allow re-configuration while in 159 * use. We can't reconfigure the OA unit without losing internal OA unit 160 * state which we can't access explicitly to save and restore. Reconfiguring 161 * the OA unit is also relatively slow, involving ~100 register writes. From 162 * userspace Mesa also depends on a stable OA configuration when emitting 163 * MI_REPORT_PERF_COUNT commands and importantly the OA unit can't be 164 * disabled while there are outstanding MI_RPC commands lest we hang the 165 * command streamer. 166 * 167 * The contents of sample records aren't extensible by device drivers (i.e. 168 * the sample_type bits). As an example; Sourab Gupta had been looking to 169 * attach GPU timestamps to our OA samples. We were shoehorning OA reports 170 * into sample records by using the 'raw' field, but it's tricky to pack more 171 * than one thing into this field because events/core.c currently only lets a 172 * pmu give a single raw data pointer plus len which will be copied into the 173 * ring buffer. To include more than the OA report we'd have to copy the 174 * report into an intermediate larger buffer. I'd been considering allowing a 175 * vector of data+len values to be specified for copying the raw data, but 176 * it felt like a kludge to being using the raw field for this purpose. 177 * 178 * - It felt like our perf based PMU was making some technical compromises 179 * just for the sake of using perf: 180 * 181 * perf_event_open() requires events to either relate to a pid or a specific 182 * cpu core, while our device pmu related to neither. Events opened with a 183 * pid will be automatically enabled/disabled according to the scheduling of 184 * that process - so not appropriate for us. When an event is related to a 185 * cpu id, perf ensures pmu methods will be invoked via an inter process 186 * interrupt on that core. To avoid invasive changes our userspace opened OA 187 * perf events for a specific cpu. This was workable but it meant the 188 * majority of the OA driver ran in atomic context, including all OA report 189 * forwarding, which wasn't really necessary in our case and seems to make 190 * our locking requirements somewhat complex as we handled the interaction 191 * with the rest of the i915 driver. 192 */ 193 194 #include <linux/anon_inodes.h> 195 #include <linux/sizes.h> 196 #include <linux/uuid.h> 197 198 #include "gem/i915_gem_context.h" 199 #include "gem/i915_gem_internal.h" 200 #include "gt/intel_engine_pm.h" 201 #include "gt/intel_engine_regs.h" 202 #include "gt/intel_engine_user.h" 203 #include "gt/intel_execlists_submission.h" 204 #include "gt/intel_gpu_commands.h" 205 #include "gt/intel_gt.h" 206 #include "gt/intel_gt_clock_utils.h" 207 #include "gt/intel_gt_regs.h" 208 #include "gt/intel_lrc.h" 209 #include "gt/intel_lrc_reg.h" 210 #include "gt/intel_ring.h" 211 212 #include "i915_drv.h" 213 #include "i915_file_private.h" 214 #include "i915_perf.h" 215 #include "i915_perf_oa_regs.h" 216 #include "i915_reg.h" 217 218 /* HW requires this to be a power of two, between 128k and 16M, though driver 219 * is currently generally designed assuming the largest 16M size is used such 220 * that the overflow cases are unlikely in normal operation. 221 */ 222 #define OA_BUFFER_SIZE SZ_16M 223 224 #define OA_TAKEN(tail, head) ((tail - head) & (OA_BUFFER_SIZE - 1)) 225 226 /** 227 * DOC: OA Tail Pointer Race 228 * 229 * There's a HW race condition between OA unit tail pointer register updates and 230 * writes to memory whereby the tail pointer can sometimes get ahead of what's 231 * been written out to the OA buffer so far (in terms of what's visible to the 232 * CPU). 233 * 234 * Although this can be observed explicitly while copying reports to userspace 235 * by checking for a zeroed report-id field in tail reports, we want to account 236 * for this earlier, as part of the oa_buffer_check_unlocked to avoid lots of 237 * redundant read() attempts. 238 * 239 * We workaround this issue in oa_buffer_check_unlocked() by reading the reports 240 * in the OA buffer, starting from the tail reported by the HW until we find a 241 * report with its first 2 dwords not 0 meaning its previous report is 242 * completely in memory and ready to be read. Those dwords are also set to 0 243 * once read and the whole buffer is cleared upon OA buffer initialization. The 244 * first dword is the reason for this report while the second is the timestamp, 245 * making the chances of having those 2 fields at 0 fairly unlikely. A more 246 * detailed explanation is available in oa_buffer_check_unlocked(). 247 * 248 * Most of the implementation details for this workaround are in 249 * oa_buffer_check_unlocked() and _append_oa_reports() 250 * 251 * Note for posterity: previously the driver used to define an effective tail 252 * pointer that lagged the real pointer by a 'tail margin' measured in bytes 253 * derived from %OA_TAIL_MARGIN_NSEC and the configured sampling frequency. 254 * This was flawed considering that the OA unit may also automatically generate 255 * non-periodic reports (such as on context switch) or the OA unit may be 256 * enabled without any periodic sampling. 257 */ 258 #define OA_TAIL_MARGIN_NSEC 100000ULL 259 #define INVALID_TAIL_PTR 0xffffffff 260 261 /* The default frequency for checking whether the OA unit has written new 262 * reports to the circular OA buffer... 263 */ 264 #define DEFAULT_POLL_FREQUENCY_HZ 200 265 #define DEFAULT_POLL_PERIOD_NS (NSEC_PER_SEC / DEFAULT_POLL_FREQUENCY_HZ) 266 267 /* for sysctl proc_dointvec_minmax of dev.i915.perf_stream_paranoid */ 268 static u32 i915_perf_stream_paranoid = true; 269 270 /* The maximum exponent the hardware accepts is 63 (essentially it selects one 271 * of the 64bit timestamp bits to trigger reports from) but there's currently 272 * no known use case for sampling as infrequently as once per 47 thousand years. 273 * 274 * Since the timestamps included in OA reports are only 32bits it seems 275 * reasonable to limit the OA exponent where it's still possible to account for 276 * overflow in OA report timestamps. 277 */ 278 #define OA_EXPONENT_MAX 31 279 280 #define INVALID_CTX_ID 0xffffffff 281 282 /* On Gen8+ automatically triggered OA reports include a 'reason' field... */ 283 #define OAREPORT_REASON_MASK 0x3f 284 #define OAREPORT_REASON_MASK_EXTENDED 0x7f 285 #define OAREPORT_REASON_SHIFT 19 286 #define OAREPORT_REASON_TIMER (1<<0) 287 #define OAREPORT_REASON_CTX_SWITCH (1<<3) 288 #define OAREPORT_REASON_CLK_RATIO (1<<5) 289 290 291 /* For sysctl proc_dointvec_minmax of i915_oa_max_sample_rate 292 * 293 * The highest sampling frequency we can theoretically program the OA unit 294 * with is always half the timestamp frequency: E.g. 6.25Mhz for Haswell. 295 * 296 * Initialized just before we register the sysctl parameter. 297 */ 298 static int oa_sample_rate_hard_limit; 299 300 /* Theoretically we can program the OA unit to sample every 160ns but don't 301 * allow that by default unless root... 302 * 303 * The default threshold of 100000Hz is based on perf's similar 304 * kernel.perf_event_max_sample_rate sysctl parameter. 305 */ 306 static u32 i915_oa_max_sample_rate = 100000; 307 308 /* XXX: beware if future OA HW adds new report formats that the current 309 * code assumes all reports have a power-of-two size and ~(size - 1) can 310 * be used as a mask to align the OA tail pointer. 311 */ 312 static const struct i915_oa_format oa_formats[I915_OA_FORMAT_MAX] = { 313 [I915_OA_FORMAT_A13] = { 0, 64 }, 314 [I915_OA_FORMAT_A29] = { 1, 128 }, 315 [I915_OA_FORMAT_A13_B8_C8] = { 2, 128 }, 316 /* A29_B8_C8 Disallowed as 192 bytes doesn't factor into buffer size */ 317 [I915_OA_FORMAT_B4_C8] = { 4, 64 }, 318 [I915_OA_FORMAT_A45_B8_C8] = { 5, 256 }, 319 [I915_OA_FORMAT_B4_C8_A16] = { 6, 128 }, 320 [I915_OA_FORMAT_C4_B8] = { 7, 64 }, 321 [I915_OA_FORMAT_A12] = { 0, 64 }, 322 [I915_OA_FORMAT_A12_B8_C8] = { 2, 128 }, 323 [I915_OA_FORMAT_A32u40_A4u32_B8_C8] = { 5, 256 }, 324 }; 325 326 #define SAMPLE_OA_REPORT (1<<0) 327 328 /** 329 * struct perf_open_properties - for validated properties given to open a stream 330 * @sample_flags: `DRM_I915_PERF_PROP_SAMPLE_*` properties are tracked as flags 331 * @single_context: Whether a single or all gpu contexts should be monitored 332 * @hold_preemption: Whether the preemption is disabled for the filtered 333 * context 334 * @ctx_handle: A gem ctx handle for use with @single_context 335 * @metrics_set: An ID for an OA unit metric set advertised via sysfs 336 * @oa_format: An OA unit HW report format 337 * @oa_periodic: Whether to enable periodic OA unit sampling 338 * @oa_period_exponent: The OA unit sampling period is derived from this 339 * @engine: The engine (typically rcs0) being monitored by the OA unit 340 * @has_sseu: Whether @sseu was specified by userspace 341 * @sseu: internal SSEU configuration computed either from the userspace 342 * specified configuration in the opening parameters or a default value 343 * (see get_default_sseu_config()) 344 * @poll_oa_period: The period in nanoseconds at which the CPU will check for OA 345 * data availability 346 * 347 * As read_properties_unlocked() enumerates and validates the properties given 348 * to open a stream of metrics the configuration is built up in the structure 349 * which starts out zero initialized. 350 */ 351 struct perf_open_properties { 352 u32 sample_flags; 353 354 u64 single_context:1; 355 u64 hold_preemption:1; 356 u64 ctx_handle; 357 358 /* OA sampling state */ 359 int metrics_set; 360 int oa_format; 361 bool oa_periodic; 362 int oa_period_exponent; 363 364 struct intel_engine_cs *engine; 365 366 bool has_sseu; 367 struct intel_sseu sseu; 368 369 u64 poll_oa_period; 370 }; 371 372 struct i915_oa_config_bo { 373 struct llist_node node; 374 375 struct i915_oa_config *oa_config; 376 struct i915_vma *vma; 377 }; 378 379 static struct ctl_table_header *sysctl_header; 380 381 static enum hrtimer_restart oa_poll_check_timer_cb(struct hrtimer *hrtimer); 382 383 void i915_oa_config_release(struct kref *ref) 384 { 385 struct i915_oa_config *oa_config = 386 container_of(ref, typeof(*oa_config), ref); 387 388 kfree(oa_config->flex_regs); 389 kfree(oa_config->b_counter_regs); 390 kfree(oa_config->mux_regs); 391 392 kfree_rcu(oa_config, rcu); 393 } 394 395 struct i915_oa_config * 396 i915_perf_get_oa_config(struct i915_perf *perf, int metrics_set) 397 { 398 struct i915_oa_config *oa_config; 399 400 rcu_read_lock(); 401 oa_config = idr_find(&perf->metrics_idr, metrics_set); 402 if (oa_config) 403 oa_config = i915_oa_config_get(oa_config); 404 rcu_read_unlock(); 405 406 return oa_config; 407 } 408 409 static void free_oa_config_bo(struct i915_oa_config_bo *oa_bo) 410 { 411 i915_oa_config_put(oa_bo->oa_config); 412 i915_vma_put(oa_bo->vma); 413 kfree(oa_bo); 414 } 415 416 static u32 gen12_oa_hw_tail_read(struct i915_perf_stream *stream) 417 { 418 struct intel_uncore *uncore = stream->uncore; 419 420 return intel_uncore_read(uncore, GEN12_OAG_OATAILPTR) & 421 GEN12_OAG_OATAILPTR_MASK; 422 } 423 424 static u32 gen8_oa_hw_tail_read(struct i915_perf_stream *stream) 425 { 426 struct intel_uncore *uncore = stream->uncore; 427 428 return intel_uncore_read(uncore, GEN8_OATAILPTR) & GEN8_OATAILPTR_MASK; 429 } 430 431 static u32 gen7_oa_hw_tail_read(struct i915_perf_stream *stream) 432 { 433 struct intel_uncore *uncore = stream->uncore; 434 u32 oastatus1 = intel_uncore_read(uncore, GEN7_OASTATUS1); 435 436 return oastatus1 & GEN7_OASTATUS1_TAIL_MASK; 437 } 438 439 /** 440 * oa_buffer_check_unlocked - check for data and update tail ptr state 441 * @stream: i915 stream instance 442 * 443 * This is either called via fops (for blocking reads in user ctx) or the poll 444 * check hrtimer (atomic ctx) to check the OA buffer tail pointer and check 445 * if there is data available for userspace to read. 446 * 447 * This function is central to providing a workaround for the OA unit tail 448 * pointer having a race with respect to what data is visible to the CPU. 449 * It is responsible for reading tail pointers from the hardware and giving 450 * the pointers time to 'age' before they are made available for reading. 451 * (See description of OA_TAIL_MARGIN_NSEC above for further details.) 452 * 453 * Besides returning true when there is data available to read() this function 454 * also updates the tail, aging_tail and aging_timestamp in the oa_buffer 455 * object. 456 * 457 * Note: It's safe to read OA config state here unlocked, assuming that this is 458 * only called while the stream is enabled, while the global OA configuration 459 * can't be modified. 460 * 461 * Returns: %true if the OA buffer contains data, else %false 462 */ 463 static bool oa_buffer_check_unlocked(struct i915_perf_stream *stream) 464 { 465 u32 gtt_offset = i915_ggtt_offset(stream->oa_buffer.vma); 466 int report_size = stream->oa_buffer.format_size; 467 unsigned long flags; 468 bool pollin; 469 u32 hw_tail; 470 u64 now; 471 472 /* We have to consider the (unlikely) possibility that read() errors 473 * could result in an OA buffer reset which might reset the head and 474 * tail state. 475 */ 476 spin_lock_irqsave(&stream->oa_buffer.ptr_lock, flags); 477 478 hw_tail = stream->perf->ops.oa_hw_tail_read(stream); 479 480 /* The tail pointer increases in 64 byte increments, 481 * not in report_size steps... 482 */ 483 hw_tail &= ~(report_size - 1); 484 485 now = ktime_get_mono_fast_ns(); 486 487 if (hw_tail == stream->oa_buffer.aging_tail && 488 (now - stream->oa_buffer.aging_timestamp) > OA_TAIL_MARGIN_NSEC) { 489 /* If the HW tail hasn't move since the last check and the HW 490 * tail has been aging for long enough, declare it the new 491 * tail. 492 */ 493 stream->oa_buffer.tail = stream->oa_buffer.aging_tail; 494 } else { 495 u32 head, tail, aged_tail; 496 497 /* NB: The head we observe here might effectively be a little 498 * out of date. If a read() is in progress, the head could be 499 * anywhere between this head and stream->oa_buffer.tail. 500 */ 501 head = stream->oa_buffer.head - gtt_offset; 502 aged_tail = stream->oa_buffer.tail - gtt_offset; 503 504 hw_tail -= gtt_offset; 505 tail = hw_tail; 506 507 /* Walk the stream backward until we find a report with dword 0 508 * & 1 not at 0. Since the circular buffer pointers progress by 509 * increments of 64 bytes and that reports can be up to 256 510 * bytes long, we can't tell whether a report has fully landed 511 * in memory before the first 2 dwords of the following report 512 * have effectively landed. 513 * 514 * This is assuming that the writes of the OA unit land in 515 * memory in the order they were written to. 516 * If not : (╯°□°)╯︵ ┻━┻ 517 */ 518 while (OA_TAKEN(tail, aged_tail) >= report_size) { 519 u32 *report32 = (void *)(stream->oa_buffer.vaddr + tail); 520 521 if (report32[0] != 0 || report32[1] != 0) 522 break; 523 524 tail = (tail - report_size) & (OA_BUFFER_SIZE - 1); 525 } 526 527 if (OA_TAKEN(hw_tail, tail) > report_size && 528 __ratelimit(&stream->perf->tail_pointer_race)) 529 DRM_NOTE("unlanded report(s) head=0x%x " 530 "tail=0x%x hw_tail=0x%x\n", 531 head, tail, hw_tail); 532 533 stream->oa_buffer.tail = gtt_offset + tail; 534 stream->oa_buffer.aging_tail = gtt_offset + hw_tail; 535 stream->oa_buffer.aging_timestamp = now; 536 } 537 538 pollin = OA_TAKEN(stream->oa_buffer.tail - gtt_offset, 539 stream->oa_buffer.head - gtt_offset) >= report_size; 540 541 spin_unlock_irqrestore(&stream->oa_buffer.ptr_lock, flags); 542 543 return pollin; 544 } 545 546 /** 547 * append_oa_status - Appends a status record to a userspace read() buffer. 548 * @stream: An i915-perf stream opened for OA metrics 549 * @buf: destination buffer given by userspace 550 * @count: the number of bytes userspace wants to read 551 * @offset: (inout): the current position for writing into @buf 552 * @type: The kind of status to report to userspace 553 * 554 * Writes a status record (such as `DRM_I915_PERF_RECORD_OA_REPORT_LOST`) 555 * into the userspace read() buffer. 556 * 557 * The @buf @offset will only be updated on success. 558 * 559 * Returns: 0 on success, negative error code on failure. 560 */ 561 static int append_oa_status(struct i915_perf_stream *stream, 562 char __user *buf, 563 size_t count, 564 size_t *offset, 565 enum drm_i915_perf_record_type type) 566 { 567 struct drm_i915_perf_record_header header = { type, 0, sizeof(header) }; 568 569 if ((count - *offset) < header.size) 570 return -ENOSPC; 571 572 if (copy_to_user(buf + *offset, &header, sizeof(header))) 573 return -EFAULT; 574 575 (*offset) += header.size; 576 577 return 0; 578 } 579 580 /** 581 * append_oa_sample - Copies single OA report into userspace read() buffer. 582 * @stream: An i915-perf stream opened for OA metrics 583 * @buf: destination buffer given by userspace 584 * @count: the number of bytes userspace wants to read 585 * @offset: (inout): the current position for writing into @buf 586 * @report: A single OA report to (optionally) include as part of the sample 587 * 588 * The contents of a sample are configured through `DRM_I915_PERF_PROP_SAMPLE_*` 589 * properties when opening a stream, tracked as `stream->sample_flags`. This 590 * function copies the requested components of a single sample to the given 591 * read() @buf. 592 * 593 * The @buf @offset will only be updated on success. 594 * 595 * Returns: 0 on success, negative error code on failure. 596 */ 597 static int append_oa_sample(struct i915_perf_stream *stream, 598 char __user *buf, 599 size_t count, 600 size_t *offset, 601 const u8 *report) 602 { 603 int report_size = stream->oa_buffer.format_size; 604 struct drm_i915_perf_record_header header; 605 606 header.type = DRM_I915_PERF_RECORD_SAMPLE; 607 header.pad = 0; 608 header.size = stream->sample_size; 609 610 if ((count - *offset) < header.size) 611 return -ENOSPC; 612 613 buf += *offset; 614 if (copy_to_user(buf, &header, sizeof(header))) 615 return -EFAULT; 616 buf += sizeof(header); 617 618 if (copy_to_user(buf, report, report_size)) 619 return -EFAULT; 620 621 (*offset) += header.size; 622 623 return 0; 624 } 625 626 /** 627 * gen8_append_oa_reports - Copies all buffered OA reports into 628 * userspace read() buffer. 629 * @stream: An i915-perf stream opened for OA metrics 630 * @buf: destination buffer given by userspace 631 * @count: the number of bytes userspace wants to read 632 * @offset: (inout): the current position for writing into @buf 633 * 634 * Notably any error condition resulting in a short read (-%ENOSPC or 635 * -%EFAULT) will be returned even though one or more records may 636 * have been successfully copied. In this case it's up to the caller 637 * to decide if the error should be squashed before returning to 638 * userspace. 639 * 640 * Note: reports are consumed from the head, and appended to the 641 * tail, so the tail chases the head?... If you think that's mad 642 * and back-to-front you're not alone, but this follows the 643 * Gen PRM naming convention. 644 * 645 * Returns: 0 on success, negative error code on failure. 646 */ 647 static int gen8_append_oa_reports(struct i915_perf_stream *stream, 648 char __user *buf, 649 size_t count, 650 size_t *offset) 651 { 652 struct intel_uncore *uncore = stream->uncore; 653 int report_size = stream->oa_buffer.format_size; 654 u8 *oa_buf_base = stream->oa_buffer.vaddr; 655 u32 gtt_offset = i915_ggtt_offset(stream->oa_buffer.vma); 656 u32 mask = (OA_BUFFER_SIZE - 1); 657 size_t start_offset = *offset; 658 unsigned long flags; 659 u32 head, tail; 660 u32 taken; 661 int ret = 0; 662 663 if (drm_WARN_ON(&uncore->i915->drm, !stream->enabled)) 664 return -EIO; 665 666 spin_lock_irqsave(&stream->oa_buffer.ptr_lock, flags); 667 668 head = stream->oa_buffer.head; 669 tail = stream->oa_buffer.tail; 670 671 spin_unlock_irqrestore(&stream->oa_buffer.ptr_lock, flags); 672 673 /* 674 * NB: oa_buffer.head/tail include the gtt_offset which we don't want 675 * while indexing relative to oa_buf_base. 676 */ 677 head -= gtt_offset; 678 tail -= gtt_offset; 679 680 /* 681 * An out of bounds or misaligned head or tail pointer implies a driver 682 * bug since we validate + align the tail pointers we read from the 683 * hardware and we are in full control of the head pointer which should 684 * only be incremented by multiples of the report size (notably also 685 * all a power of two). 686 */ 687 if (drm_WARN_ONCE(&uncore->i915->drm, 688 head > OA_BUFFER_SIZE || head % report_size || 689 tail > OA_BUFFER_SIZE || tail % report_size, 690 "Inconsistent OA buffer pointers: head = %u, tail = %u\n", 691 head, tail)) 692 return -EIO; 693 694 695 for (/* none */; 696 (taken = OA_TAKEN(tail, head)); 697 head = (head + report_size) & mask) { 698 u8 *report = oa_buf_base + head; 699 u32 *report32 = (void *)report; 700 u32 ctx_id; 701 u32 reason; 702 703 /* 704 * All the report sizes factor neatly into the buffer 705 * size so we never expect to see a report split 706 * between the beginning and end of the buffer. 707 * 708 * Given the initial alignment check a misalignment 709 * here would imply a driver bug that would result 710 * in an overrun. 711 */ 712 if (drm_WARN_ON(&uncore->i915->drm, 713 (OA_BUFFER_SIZE - head) < report_size)) { 714 drm_err(&uncore->i915->drm, 715 "Spurious OA head ptr: non-integral report offset\n"); 716 break; 717 } 718 719 /* 720 * The reason field includes flags identifying what 721 * triggered this specific report (mostly timer 722 * triggered or e.g. due to a context switch). 723 * 724 * This field is never expected to be zero so we can 725 * check that the report isn't invalid before copying 726 * it to userspace... 727 */ 728 reason = ((report32[0] >> OAREPORT_REASON_SHIFT) & 729 (GRAPHICS_VER(stream->perf->i915) == 12 ? 730 OAREPORT_REASON_MASK_EXTENDED : 731 OAREPORT_REASON_MASK)); 732 733 ctx_id = report32[2] & stream->specific_ctx_id_mask; 734 735 /* 736 * Squash whatever is in the CTX_ID field if it's marked as 737 * invalid to be sure we avoid false-positive, single-context 738 * filtering below... 739 * 740 * Note: that we don't clear the valid_ctx_bit so userspace can 741 * understand that the ID has been squashed by the kernel. 742 */ 743 if (!(report32[0] & stream->perf->gen8_valid_ctx_bit) && 744 GRAPHICS_VER(stream->perf->i915) <= 11) 745 ctx_id = report32[2] = INVALID_CTX_ID; 746 747 /* 748 * NB: For Gen 8 the OA unit no longer supports clock gating 749 * off for a specific context and the kernel can't securely 750 * stop the counters from updating as system-wide / global 751 * values. 752 * 753 * Automatic reports now include a context ID so reports can be 754 * filtered on the cpu but it's not worth trying to 755 * automatically subtract/hide counter progress for other 756 * contexts while filtering since we can't stop userspace 757 * issuing MI_REPORT_PERF_COUNT commands which would still 758 * provide a side-band view of the real values. 759 * 760 * To allow userspace (such as Mesa/GL_INTEL_performance_query) 761 * to normalize counters for a single filtered context then it 762 * needs be forwarded bookend context-switch reports so that it 763 * can track switches in between MI_REPORT_PERF_COUNT commands 764 * and can itself subtract/ignore the progress of counters 765 * associated with other contexts. Note that the hardware 766 * automatically triggers reports when switching to a new 767 * context which are tagged with the ID of the newly active 768 * context. To avoid the complexity (and likely fragility) of 769 * reading ahead while parsing reports to try and minimize 770 * forwarding redundant context switch reports (i.e. between 771 * other, unrelated contexts) we simply elect to forward them 772 * all. 773 * 774 * We don't rely solely on the reason field to identify context 775 * switches since it's not-uncommon for periodic samples to 776 * identify a switch before any 'context switch' report. 777 */ 778 if (!stream->perf->exclusive_stream->ctx || 779 stream->specific_ctx_id == ctx_id || 780 stream->oa_buffer.last_ctx_id == stream->specific_ctx_id || 781 reason & OAREPORT_REASON_CTX_SWITCH) { 782 783 /* 784 * While filtering for a single context we avoid 785 * leaking the IDs of other contexts. 786 */ 787 if (stream->perf->exclusive_stream->ctx && 788 stream->specific_ctx_id != ctx_id) { 789 report32[2] = INVALID_CTX_ID; 790 } 791 792 ret = append_oa_sample(stream, buf, count, offset, 793 report); 794 if (ret) 795 break; 796 797 stream->oa_buffer.last_ctx_id = ctx_id; 798 } 799 800 /* 801 * Clear out the first 2 dword as a mean to detect unlanded 802 * reports. 803 */ 804 report32[0] = 0; 805 report32[1] = 0; 806 } 807 808 if (start_offset != *offset) { 809 i915_reg_t oaheadptr; 810 811 oaheadptr = GRAPHICS_VER(stream->perf->i915) == 12 ? 812 GEN12_OAG_OAHEADPTR : GEN8_OAHEADPTR; 813 814 spin_lock_irqsave(&stream->oa_buffer.ptr_lock, flags); 815 816 /* 817 * We removed the gtt_offset for the copy loop above, indexing 818 * relative to oa_buf_base so put back here... 819 */ 820 head += gtt_offset; 821 intel_uncore_write(uncore, oaheadptr, 822 head & GEN12_OAG_OAHEADPTR_MASK); 823 stream->oa_buffer.head = head; 824 825 spin_unlock_irqrestore(&stream->oa_buffer.ptr_lock, flags); 826 } 827 828 return ret; 829 } 830 831 /** 832 * gen8_oa_read - copy status records then buffered OA reports 833 * @stream: An i915-perf stream opened for OA metrics 834 * @buf: destination buffer given by userspace 835 * @count: the number of bytes userspace wants to read 836 * @offset: (inout): the current position for writing into @buf 837 * 838 * Checks OA unit status registers and if necessary appends corresponding 839 * status records for userspace (such as for a buffer full condition) and then 840 * initiate appending any buffered OA reports. 841 * 842 * Updates @offset according to the number of bytes successfully copied into 843 * the userspace buffer. 844 * 845 * NB: some data may be successfully copied to the userspace buffer 846 * even if an error is returned, and this is reflected in the 847 * updated @offset. 848 * 849 * Returns: zero on success or a negative error code 850 */ 851 static int gen8_oa_read(struct i915_perf_stream *stream, 852 char __user *buf, 853 size_t count, 854 size_t *offset) 855 { 856 struct intel_uncore *uncore = stream->uncore; 857 u32 oastatus; 858 i915_reg_t oastatus_reg; 859 int ret; 860 861 if (drm_WARN_ON(&uncore->i915->drm, !stream->oa_buffer.vaddr)) 862 return -EIO; 863 864 oastatus_reg = GRAPHICS_VER(stream->perf->i915) == 12 ? 865 GEN12_OAG_OASTATUS : GEN8_OASTATUS; 866 867 oastatus = intel_uncore_read(uncore, oastatus_reg); 868 869 /* 870 * We treat OABUFFER_OVERFLOW as a significant error: 871 * 872 * Although theoretically we could handle this more gracefully 873 * sometimes, some Gens don't correctly suppress certain 874 * automatically triggered reports in this condition and so we 875 * have to assume that old reports are now being trampled 876 * over. 877 * 878 * Considering how we don't currently give userspace control 879 * over the OA buffer size and always configure a large 16MB 880 * buffer, then a buffer overflow does anyway likely indicate 881 * that something has gone quite badly wrong. 882 */ 883 if (oastatus & GEN8_OASTATUS_OABUFFER_OVERFLOW) { 884 ret = append_oa_status(stream, buf, count, offset, 885 DRM_I915_PERF_RECORD_OA_BUFFER_LOST); 886 if (ret) 887 return ret; 888 889 drm_dbg(&stream->perf->i915->drm, 890 "OA buffer overflow (exponent = %d): force restart\n", 891 stream->period_exponent); 892 893 stream->perf->ops.oa_disable(stream); 894 stream->perf->ops.oa_enable(stream); 895 896 /* 897 * Note: .oa_enable() is expected to re-init the oabuffer and 898 * reset GEN8_OASTATUS for us 899 */ 900 oastatus = intel_uncore_read(uncore, oastatus_reg); 901 } 902 903 if (oastatus & GEN8_OASTATUS_REPORT_LOST) { 904 ret = append_oa_status(stream, buf, count, offset, 905 DRM_I915_PERF_RECORD_OA_REPORT_LOST); 906 if (ret) 907 return ret; 908 909 intel_uncore_rmw(uncore, oastatus_reg, 910 GEN8_OASTATUS_COUNTER_OVERFLOW | 911 GEN8_OASTATUS_REPORT_LOST, 912 IS_GRAPHICS_VER(uncore->i915, 8, 11) ? 913 (GEN8_OASTATUS_HEAD_POINTER_WRAP | 914 GEN8_OASTATUS_TAIL_POINTER_WRAP) : 0); 915 } 916 917 return gen8_append_oa_reports(stream, buf, count, offset); 918 } 919 920 /** 921 * gen7_append_oa_reports - Copies all buffered OA reports into 922 * userspace read() buffer. 923 * @stream: An i915-perf stream opened for OA metrics 924 * @buf: destination buffer given by userspace 925 * @count: the number of bytes userspace wants to read 926 * @offset: (inout): the current position for writing into @buf 927 * 928 * Notably any error condition resulting in a short read (-%ENOSPC or 929 * -%EFAULT) will be returned even though one or more records may 930 * have been successfully copied. In this case it's up to the caller 931 * to decide if the error should be squashed before returning to 932 * userspace. 933 * 934 * Note: reports are consumed from the head, and appended to the 935 * tail, so the tail chases the head?... If you think that's mad 936 * and back-to-front you're not alone, but this follows the 937 * Gen PRM naming convention. 938 * 939 * Returns: 0 on success, negative error code on failure. 940 */ 941 static int gen7_append_oa_reports(struct i915_perf_stream *stream, 942 char __user *buf, 943 size_t count, 944 size_t *offset) 945 { 946 struct intel_uncore *uncore = stream->uncore; 947 int report_size = stream->oa_buffer.format_size; 948 u8 *oa_buf_base = stream->oa_buffer.vaddr; 949 u32 gtt_offset = i915_ggtt_offset(stream->oa_buffer.vma); 950 u32 mask = (OA_BUFFER_SIZE - 1); 951 size_t start_offset = *offset; 952 unsigned long flags; 953 u32 head, tail; 954 u32 taken; 955 int ret = 0; 956 957 if (drm_WARN_ON(&uncore->i915->drm, !stream->enabled)) 958 return -EIO; 959 960 spin_lock_irqsave(&stream->oa_buffer.ptr_lock, flags); 961 962 head = stream->oa_buffer.head; 963 tail = stream->oa_buffer.tail; 964 965 spin_unlock_irqrestore(&stream->oa_buffer.ptr_lock, flags); 966 967 /* NB: oa_buffer.head/tail include the gtt_offset which we don't want 968 * while indexing relative to oa_buf_base. 969 */ 970 head -= gtt_offset; 971 tail -= gtt_offset; 972 973 /* An out of bounds or misaligned head or tail pointer implies a driver 974 * bug since we validate + align the tail pointers we read from the 975 * hardware and we are in full control of the head pointer which should 976 * only be incremented by multiples of the report size (notably also 977 * all a power of two). 978 */ 979 if (drm_WARN_ONCE(&uncore->i915->drm, 980 head > OA_BUFFER_SIZE || head % report_size || 981 tail > OA_BUFFER_SIZE || tail % report_size, 982 "Inconsistent OA buffer pointers: head = %u, tail = %u\n", 983 head, tail)) 984 return -EIO; 985 986 987 for (/* none */; 988 (taken = OA_TAKEN(tail, head)); 989 head = (head + report_size) & mask) { 990 u8 *report = oa_buf_base + head; 991 u32 *report32 = (void *)report; 992 993 /* All the report sizes factor neatly into the buffer 994 * size so we never expect to see a report split 995 * between the beginning and end of the buffer. 996 * 997 * Given the initial alignment check a misalignment 998 * here would imply a driver bug that would result 999 * in an overrun. 1000 */ 1001 if (drm_WARN_ON(&uncore->i915->drm, 1002 (OA_BUFFER_SIZE - head) < report_size)) { 1003 drm_err(&uncore->i915->drm, 1004 "Spurious OA head ptr: non-integral report offset\n"); 1005 break; 1006 } 1007 1008 /* The report-ID field for periodic samples includes 1009 * some undocumented flags related to what triggered 1010 * the report and is never expected to be zero so we 1011 * can check that the report isn't invalid before 1012 * copying it to userspace... 1013 */ 1014 if (report32[0] == 0) { 1015 if (__ratelimit(&stream->perf->spurious_report_rs)) 1016 DRM_NOTE("Skipping spurious, invalid OA report\n"); 1017 continue; 1018 } 1019 1020 ret = append_oa_sample(stream, buf, count, offset, report); 1021 if (ret) 1022 break; 1023 1024 /* Clear out the first 2 dwords as a mean to detect unlanded 1025 * reports. 1026 */ 1027 report32[0] = 0; 1028 report32[1] = 0; 1029 } 1030 1031 if (start_offset != *offset) { 1032 spin_lock_irqsave(&stream->oa_buffer.ptr_lock, flags); 1033 1034 /* We removed the gtt_offset for the copy loop above, indexing 1035 * relative to oa_buf_base so put back here... 1036 */ 1037 head += gtt_offset; 1038 1039 intel_uncore_write(uncore, GEN7_OASTATUS2, 1040 (head & GEN7_OASTATUS2_HEAD_MASK) | 1041 GEN7_OASTATUS2_MEM_SELECT_GGTT); 1042 stream->oa_buffer.head = head; 1043 1044 spin_unlock_irqrestore(&stream->oa_buffer.ptr_lock, flags); 1045 } 1046 1047 return ret; 1048 } 1049 1050 /** 1051 * gen7_oa_read - copy status records then buffered OA reports 1052 * @stream: An i915-perf stream opened for OA metrics 1053 * @buf: destination buffer given by userspace 1054 * @count: the number of bytes userspace wants to read 1055 * @offset: (inout): the current position for writing into @buf 1056 * 1057 * Checks Gen 7 specific OA unit status registers and if necessary appends 1058 * corresponding status records for userspace (such as for a buffer full 1059 * condition) and then initiate appending any buffered OA reports. 1060 * 1061 * Updates @offset according to the number of bytes successfully copied into 1062 * the userspace buffer. 1063 * 1064 * Returns: zero on success or a negative error code 1065 */ 1066 static int gen7_oa_read(struct i915_perf_stream *stream, 1067 char __user *buf, 1068 size_t count, 1069 size_t *offset) 1070 { 1071 struct intel_uncore *uncore = stream->uncore; 1072 u32 oastatus1; 1073 int ret; 1074 1075 if (drm_WARN_ON(&uncore->i915->drm, !stream->oa_buffer.vaddr)) 1076 return -EIO; 1077 1078 oastatus1 = intel_uncore_read(uncore, GEN7_OASTATUS1); 1079 1080 /* XXX: On Haswell we don't have a safe way to clear oastatus1 1081 * bits while the OA unit is enabled (while the tail pointer 1082 * may be updated asynchronously) so we ignore status bits 1083 * that have already been reported to userspace. 1084 */ 1085 oastatus1 &= ~stream->perf->gen7_latched_oastatus1; 1086 1087 /* We treat OABUFFER_OVERFLOW as a significant error: 1088 * 1089 * - The status can be interpreted to mean that the buffer is 1090 * currently full (with a higher precedence than OA_TAKEN() 1091 * which will start to report a near-empty buffer after an 1092 * overflow) but it's awkward that we can't clear the status 1093 * on Haswell, so without a reset we won't be able to catch 1094 * the state again. 1095 * 1096 * - Since it also implies the HW has started overwriting old 1097 * reports it may also affect our sanity checks for invalid 1098 * reports when copying to userspace that assume new reports 1099 * are being written to cleared memory. 1100 * 1101 * - In the future we may want to introduce a flight recorder 1102 * mode where the driver will automatically maintain a safe 1103 * guard band between head/tail, avoiding this overflow 1104 * condition, but we avoid the added driver complexity for 1105 * now. 1106 */ 1107 if (unlikely(oastatus1 & GEN7_OASTATUS1_OABUFFER_OVERFLOW)) { 1108 ret = append_oa_status(stream, buf, count, offset, 1109 DRM_I915_PERF_RECORD_OA_BUFFER_LOST); 1110 if (ret) 1111 return ret; 1112 1113 drm_dbg(&stream->perf->i915->drm, 1114 "OA buffer overflow (exponent = %d): force restart\n", 1115 stream->period_exponent); 1116 1117 stream->perf->ops.oa_disable(stream); 1118 stream->perf->ops.oa_enable(stream); 1119 1120 oastatus1 = intel_uncore_read(uncore, GEN7_OASTATUS1); 1121 } 1122 1123 if (unlikely(oastatus1 & GEN7_OASTATUS1_REPORT_LOST)) { 1124 ret = append_oa_status(stream, buf, count, offset, 1125 DRM_I915_PERF_RECORD_OA_REPORT_LOST); 1126 if (ret) 1127 return ret; 1128 stream->perf->gen7_latched_oastatus1 |= 1129 GEN7_OASTATUS1_REPORT_LOST; 1130 } 1131 1132 return gen7_append_oa_reports(stream, buf, count, offset); 1133 } 1134 1135 /** 1136 * i915_oa_wait_unlocked - handles blocking IO until OA data available 1137 * @stream: An i915-perf stream opened for OA metrics 1138 * 1139 * Called when userspace tries to read() from a blocking stream FD opened 1140 * for OA metrics. It waits until the hrtimer callback finds a non-empty 1141 * OA buffer and wakes us. 1142 * 1143 * Note: it's acceptable to have this return with some false positives 1144 * since any subsequent read handling will return -EAGAIN if there isn't 1145 * really data ready for userspace yet. 1146 * 1147 * Returns: zero on success or a negative error code 1148 */ 1149 static int i915_oa_wait_unlocked(struct i915_perf_stream *stream) 1150 { 1151 /* We would wait indefinitely if periodic sampling is not enabled */ 1152 if (!stream->periodic) 1153 return -EIO; 1154 1155 return wait_event_interruptible(stream->poll_wq, 1156 oa_buffer_check_unlocked(stream)); 1157 } 1158 1159 /** 1160 * i915_oa_poll_wait - call poll_wait() for an OA stream poll() 1161 * @stream: An i915-perf stream opened for OA metrics 1162 * @file: An i915 perf stream file 1163 * @wait: poll() state table 1164 * 1165 * For handling userspace polling on an i915 perf stream opened for OA metrics, 1166 * this starts a poll_wait with the wait queue that our hrtimer callback wakes 1167 * when it sees data ready to read in the circular OA buffer. 1168 */ 1169 static void i915_oa_poll_wait(struct i915_perf_stream *stream, 1170 struct file *file, 1171 poll_table *wait) 1172 { 1173 poll_wait(file, &stream->poll_wq, wait); 1174 } 1175 1176 /** 1177 * i915_oa_read - just calls through to &i915_oa_ops->read 1178 * @stream: An i915-perf stream opened for OA metrics 1179 * @buf: destination buffer given by userspace 1180 * @count: the number of bytes userspace wants to read 1181 * @offset: (inout): the current position for writing into @buf 1182 * 1183 * Updates @offset according to the number of bytes successfully copied into 1184 * the userspace buffer. 1185 * 1186 * Returns: zero on success or a negative error code 1187 */ 1188 static int i915_oa_read(struct i915_perf_stream *stream, 1189 char __user *buf, 1190 size_t count, 1191 size_t *offset) 1192 { 1193 return stream->perf->ops.read(stream, buf, count, offset); 1194 } 1195 1196 static struct intel_context *oa_pin_context(struct i915_perf_stream *stream) 1197 { 1198 struct i915_gem_engines_iter it; 1199 struct i915_gem_context *ctx = stream->ctx; 1200 struct intel_context *ce; 1201 struct i915_gem_ww_ctx ww; 1202 int err = -ENODEV; 1203 1204 for_each_gem_engine(ce, i915_gem_context_lock_engines(ctx), it) { 1205 if (ce->engine != stream->engine) /* first match! */ 1206 continue; 1207 1208 err = 0; 1209 break; 1210 } 1211 i915_gem_context_unlock_engines(ctx); 1212 1213 if (err) 1214 return ERR_PTR(err); 1215 1216 i915_gem_ww_ctx_init(&ww, true); 1217 retry: 1218 /* 1219 * As the ID is the gtt offset of the context's vma we 1220 * pin the vma to ensure the ID remains fixed. 1221 */ 1222 err = intel_context_pin_ww(ce, &ww); 1223 if (err == -EDEADLK) { 1224 err = i915_gem_ww_ctx_backoff(&ww); 1225 if (!err) 1226 goto retry; 1227 } 1228 i915_gem_ww_ctx_fini(&ww); 1229 1230 if (err) 1231 return ERR_PTR(err); 1232 1233 stream->pinned_ctx = ce; 1234 return stream->pinned_ctx; 1235 } 1236 1237 /** 1238 * oa_get_render_ctx_id - determine and hold ctx hw id 1239 * @stream: An i915-perf stream opened for OA metrics 1240 * 1241 * Determine the render context hw id, and ensure it remains fixed for the 1242 * lifetime of the stream. This ensures that we don't have to worry about 1243 * updating the context ID in OACONTROL on the fly. 1244 * 1245 * Returns: zero on success or a negative error code 1246 */ 1247 static int oa_get_render_ctx_id(struct i915_perf_stream *stream) 1248 { 1249 struct intel_context *ce; 1250 1251 ce = oa_pin_context(stream); 1252 if (IS_ERR(ce)) 1253 return PTR_ERR(ce); 1254 1255 switch (GRAPHICS_VER(ce->engine->i915)) { 1256 case 7: { 1257 /* 1258 * On Haswell we don't do any post processing of the reports 1259 * and don't need to use the mask. 1260 */ 1261 stream->specific_ctx_id = i915_ggtt_offset(ce->state); 1262 stream->specific_ctx_id_mask = 0; 1263 break; 1264 } 1265 1266 case 8: 1267 case 9: 1268 if (intel_engine_uses_guc(ce->engine)) { 1269 /* 1270 * When using GuC, the context descriptor we write in 1271 * i915 is read by GuC and rewritten before it's 1272 * actually written into the hardware. The LRCA is 1273 * what is put into the context id field of the 1274 * context descriptor by GuC. Because it's aligned to 1275 * a page, the lower 12bits are always at 0 and 1276 * dropped by GuC. They won't be part of the context 1277 * ID in the OA reports, so squash those lower bits. 1278 */ 1279 stream->specific_ctx_id = ce->lrc.lrca >> 12; 1280 1281 /* 1282 * GuC uses the top bit to signal proxy submission, so 1283 * ignore that bit. 1284 */ 1285 stream->specific_ctx_id_mask = 1286 (1U << (GEN8_CTX_ID_WIDTH - 1)) - 1; 1287 } else { 1288 stream->specific_ctx_id_mask = 1289 (1U << GEN8_CTX_ID_WIDTH) - 1; 1290 stream->specific_ctx_id = stream->specific_ctx_id_mask; 1291 } 1292 break; 1293 1294 case 11: 1295 case 12: 1296 if (GRAPHICS_VER_FULL(ce->engine->i915) >= IP_VER(12, 50)) { 1297 stream->specific_ctx_id_mask = 1298 ((1U << XEHP_SW_CTX_ID_WIDTH) - 1) << 1299 (XEHP_SW_CTX_ID_SHIFT - 32); 1300 stream->specific_ctx_id = 1301 (XEHP_MAX_CONTEXT_HW_ID - 1) << 1302 (XEHP_SW_CTX_ID_SHIFT - 32); 1303 } else { 1304 stream->specific_ctx_id_mask = 1305 ((1U << GEN11_SW_CTX_ID_WIDTH) - 1) << (GEN11_SW_CTX_ID_SHIFT - 32); 1306 /* 1307 * Pick an unused context id 1308 * 0 - BITS_PER_LONG are used by other contexts 1309 * GEN12_MAX_CONTEXT_HW_ID (0x7ff) is used by idle context 1310 */ 1311 stream->specific_ctx_id = 1312 (GEN12_MAX_CONTEXT_HW_ID - 1) << (GEN11_SW_CTX_ID_SHIFT - 32); 1313 } 1314 break; 1315 1316 default: 1317 MISSING_CASE(GRAPHICS_VER(ce->engine->i915)); 1318 } 1319 1320 ce->tag = stream->specific_ctx_id; 1321 1322 drm_dbg(&stream->perf->i915->drm, 1323 "filtering on ctx_id=0x%x ctx_id_mask=0x%x\n", 1324 stream->specific_ctx_id, 1325 stream->specific_ctx_id_mask); 1326 1327 return 0; 1328 } 1329 1330 /** 1331 * oa_put_render_ctx_id - counterpart to oa_get_render_ctx_id releases hold 1332 * @stream: An i915-perf stream opened for OA metrics 1333 * 1334 * In case anything needed doing to ensure the context HW ID would remain valid 1335 * for the lifetime of the stream, then that can be undone here. 1336 */ 1337 static void oa_put_render_ctx_id(struct i915_perf_stream *stream) 1338 { 1339 struct intel_context *ce; 1340 1341 ce = fetch_and_zero(&stream->pinned_ctx); 1342 if (ce) { 1343 ce->tag = 0; /* recomputed on next submission after parking */ 1344 intel_context_unpin(ce); 1345 } 1346 1347 stream->specific_ctx_id = INVALID_CTX_ID; 1348 stream->specific_ctx_id_mask = 0; 1349 } 1350 1351 static void 1352 free_oa_buffer(struct i915_perf_stream *stream) 1353 { 1354 i915_vma_unpin_and_release(&stream->oa_buffer.vma, 1355 I915_VMA_RELEASE_MAP); 1356 1357 stream->oa_buffer.vaddr = NULL; 1358 } 1359 1360 static void 1361 free_oa_configs(struct i915_perf_stream *stream) 1362 { 1363 struct i915_oa_config_bo *oa_bo, *tmp; 1364 1365 i915_oa_config_put(stream->oa_config); 1366 llist_for_each_entry_safe(oa_bo, tmp, stream->oa_config_bos.first, node) 1367 free_oa_config_bo(oa_bo); 1368 } 1369 1370 static void 1371 free_noa_wait(struct i915_perf_stream *stream) 1372 { 1373 i915_vma_unpin_and_release(&stream->noa_wait, 0); 1374 } 1375 1376 static void i915_oa_stream_destroy(struct i915_perf_stream *stream) 1377 { 1378 struct i915_perf *perf = stream->perf; 1379 1380 if (WARN_ON(stream != perf->exclusive_stream)) 1381 return; 1382 1383 /* 1384 * Unset exclusive_stream first, it will be checked while disabling 1385 * the metric set on gen8+. 1386 * 1387 * See i915_oa_init_reg_state() and lrc_configure_all_contexts() 1388 */ 1389 WRITE_ONCE(perf->exclusive_stream, NULL); 1390 perf->ops.disable_metric_set(stream); 1391 1392 free_oa_buffer(stream); 1393 1394 intel_uncore_forcewake_put(stream->uncore, FORCEWAKE_ALL); 1395 intel_engine_pm_put(stream->engine); 1396 1397 if (stream->ctx) 1398 oa_put_render_ctx_id(stream); 1399 1400 free_oa_configs(stream); 1401 free_noa_wait(stream); 1402 1403 if (perf->spurious_report_rs.missed) { 1404 DRM_NOTE("%d spurious OA report notices suppressed due to ratelimiting\n", 1405 perf->spurious_report_rs.missed); 1406 } 1407 } 1408 1409 static void gen7_init_oa_buffer(struct i915_perf_stream *stream) 1410 { 1411 struct intel_uncore *uncore = stream->uncore; 1412 u32 gtt_offset = i915_ggtt_offset(stream->oa_buffer.vma); 1413 unsigned long flags; 1414 1415 spin_lock_irqsave(&stream->oa_buffer.ptr_lock, flags); 1416 1417 /* Pre-DevBDW: OABUFFER must be set with counters off, 1418 * before OASTATUS1, but after OASTATUS2 1419 */ 1420 intel_uncore_write(uncore, GEN7_OASTATUS2, /* head */ 1421 gtt_offset | GEN7_OASTATUS2_MEM_SELECT_GGTT); 1422 stream->oa_buffer.head = gtt_offset; 1423 1424 intel_uncore_write(uncore, GEN7_OABUFFER, gtt_offset); 1425 1426 intel_uncore_write(uncore, GEN7_OASTATUS1, /* tail */ 1427 gtt_offset | OABUFFER_SIZE_16M); 1428 1429 /* Mark that we need updated tail pointers to read from... */ 1430 stream->oa_buffer.aging_tail = INVALID_TAIL_PTR; 1431 stream->oa_buffer.tail = gtt_offset; 1432 1433 spin_unlock_irqrestore(&stream->oa_buffer.ptr_lock, flags); 1434 1435 /* On Haswell we have to track which OASTATUS1 flags we've 1436 * already seen since they can't be cleared while periodic 1437 * sampling is enabled. 1438 */ 1439 stream->perf->gen7_latched_oastatus1 = 0; 1440 1441 /* NB: although the OA buffer will initially be allocated 1442 * zeroed via shmfs (and so this memset is redundant when 1443 * first allocating), we may re-init the OA buffer, either 1444 * when re-enabling a stream or in error/reset paths. 1445 * 1446 * The reason we clear the buffer for each re-init is for the 1447 * sanity check in gen7_append_oa_reports() that looks at the 1448 * report-id field to make sure it's non-zero which relies on 1449 * the assumption that new reports are being written to zeroed 1450 * memory... 1451 */ 1452 memset(stream->oa_buffer.vaddr, 0, OA_BUFFER_SIZE); 1453 } 1454 1455 static void gen8_init_oa_buffer(struct i915_perf_stream *stream) 1456 { 1457 struct intel_uncore *uncore = stream->uncore; 1458 u32 gtt_offset = i915_ggtt_offset(stream->oa_buffer.vma); 1459 unsigned long flags; 1460 1461 spin_lock_irqsave(&stream->oa_buffer.ptr_lock, flags); 1462 1463 intel_uncore_write(uncore, GEN8_OASTATUS, 0); 1464 intel_uncore_write(uncore, GEN8_OAHEADPTR, gtt_offset); 1465 stream->oa_buffer.head = gtt_offset; 1466 1467 intel_uncore_write(uncore, GEN8_OABUFFER_UDW, 0); 1468 1469 /* 1470 * PRM says: 1471 * 1472 * "This MMIO must be set before the OATAILPTR 1473 * register and after the OAHEADPTR register. This is 1474 * to enable proper functionality of the overflow 1475 * bit." 1476 */ 1477 intel_uncore_write(uncore, GEN8_OABUFFER, gtt_offset | 1478 OABUFFER_SIZE_16M | GEN8_OABUFFER_MEM_SELECT_GGTT); 1479 intel_uncore_write(uncore, GEN8_OATAILPTR, gtt_offset & GEN8_OATAILPTR_MASK); 1480 1481 /* Mark that we need updated tail pointers to read from... */ 1482 stream->oa_buffer.aging_tail = INVALID_TAIL_PTR; 1483 stream->oa_buffer.tail = gtt_offset; 1484 1485 /* 1486 * Reset state used to recognise context switches, affecting which 1487 * reports we will forward to userspace while filtering for a single 1488 * context. 1489 */ 1490 stream->oa_buffer.last_ctx_id = INVALID_CTX_ID; 1491 1492 spin_unlock_irqrestore(&stream->oa_buffer.ptr_lock, flags); 1493 1494 /* 1495 * NB: although the OA buffer will initially be allocated 1496 * zeroed via shmfs (and so this memset is redundant when 1497 * first allocating), we may re-init the OA buffer, either 1498 * when re-enabling a stream or in error/reset paths. 1499 * 1500 * The reason we clear the buffer for each re-init is for the 1501 * sanity check in gen8_append_oa_reports() that looks at the 1502 * reason field to make sure it's non-zero which relies on 1503 * the assumption that new reports are being written to zeroed 1504 * memory... 1505 */ 1506 memset(stream->oa_buffer.vaddr, 0, OA_BUFFER_SIZE); 1507 } 1508 1509 static void gen12_init_oa_buffer(struct i915_perf_stream *stream) 1510 { 1511 struct intel_uncore *uncore = stream->uncore; 1512 u32 gtt_offset = i915_ggtt_offset(stream->oa_buffer.vma); 1513 unsigned long flags; 1514 1515 spin_lock_irqsave(&stream->oa_buffer.ptr_lock, flags); 1516 1517 intel_uncore_write(uncore, GEN12_OAG_OASTATUS, 0); 1518 intel_uncore_write(uncore, GEN12_OAG_OAHEADPTR, 1519 gtt_offset & GEN12_OAG_OAHEADPTR_MASK); 1520 stream->oa_buffer.head = gtt_offset; 1521 1522 /* 1523 * PRM says: 1524 * 1525 * "This MMIO must be set before the OATAILPTR 1526 * register and after the OAHEADPTR register. This is 1527 * to enable proper functionality of the overflow 1528 * bit." 1529 */ 1530 intel_uncore_write(uncore, GEN12_OAG_OABUFFER, gtt_offset | 1531 OABUFFER_SIZE_16M | GEN8_OABUFFER_MEM_SELECT_GGTT); 1532 intel_uncore_write(uncore, GEN12_OAG_OATAILPTR, 1533 gtt_offset & GEN12_OAG_OATAILPTR_MASK); 1534 1535 /* Mark that we need updated tail pointers to read from... */ 1536 stream->oa_buffer.aging_tail = INVALID_TAIL_PTR; 1537 stream->oa_buffer.tail = gtt_offset; 1538 1539 /* 1540 * Reset state used to recognise context switches, affecting which 1541 * reports we will forward to userspace while filtering for a single 1542 * context. 1543 */ 1544 stream->oa_buffer.last_ctx_id = INVALID_CTX_ID; 1545 1546 spin_unlock_irqrestore(&stream->oa_buffer.ptr_lock, flags); 1547 1548 /* 1549 * NB: although the OA buffer will initially be allocated 1550 * zeroed via shmfs (and so this memset is redundant when 1551 * first allocating), we may re-init the OA buffer, either 1552 * when re-enabling a stream or in error/reset paths. 1553 * 1554 * The reason we clear the buffer for each re-init is for the 1555 * sanity check in gen8_append_oa_reports() that looks at the 1556 * reason field to make sure it's non-zero which relies on 1557 * the assumption that new reports are being written to zeroed 1558 * memory... 1559 */ 1560 memset(stream->oa_buffer.vaddr, 0, 1561 stream->oa_buffer.vma->size); 1562 } 1563 1564 static int alloc_oa_buffer(struct i915_perf_stream *stream) 1565 { 1566 struct drm_i915_private *i915 = stream->perf->i915; 1567 struct drm_i915_gem_object *bo; 1568 struct i915_vma *vma; 1569 int ret; 1570 1571 if (drm_WARN_ON(&i915->drm, stream->oa_buffer.vma)) 1572 return -ENODEV; 1573 1574 BUILD_BUG_ON_NOT_POWER_OF_2(OA_BUFFER_SIZE); 1575 BUILD_BUG_ON(OA_BUFFER_SIZE < SZ_128K || OA_BUFFER_SIZE > SZ_16M); 1576 1577 bo = i915_gem_object_create_shmem(stream->perf->i915, OA_BUFFER_SIZE); 1578 if (IS_ERR(bo)) { 1579 drm_err(&i915->drm, "Failed to allocate OA buffer\n"); 1580 return PTR_ERR(bo); 1581 } 1582 1583 i915_gem_object_set_cache_coherency(bo, I915_CACHE_LLC); 1584 1585 /* PreHSW required 512K alignment, HSW requires 16M */ 1586 vma = i915_gem_object_ggtt_pin(bo, NULL, 0, SZ_16M, 0); 1587 if (IS_ERR(vma)) { 1588 ret = PTR_ERR(vma); 1589 goto err_unref; 1590 } 1591 stream->oa_buffer.vma = vma; 1592 1593 stream->oa_buffer.vaddr = 1594 i915_gem_object_pin_map_unlocked(bo, I915_MAP_WB); 1595 if (IS_ERR(stream->oa_buffer.vaddr)) { 1596 ret = PTR_ERR(stream->oa_buffer.vaddr); 1597 goto err_unpin; 1598 } 1599 1600 return 0; 1601 1602 err_unpin: 1603 __i915_vma_unpin(vma); 1604 1605 err_unref: 1606 i915_gem_object_put(bo); 1607 1608 stream->oa_buffer.vaddr = NULL; 1609 stream->oa_buffer.vma = NULL; 1610 1611 return ret; 1612 } 1613 1614 static u32 *save_restore_register(struct i915_perf_stream *stream, u32 *cs, 1615 bool save, i915_reg_t reg, u32 offset, 1616 u32 dword_count) 1617 { 1618 u32 cmd; 1619 u32 d; 1620 1621 cmd = save ? MI_STORE_REGISTER_MEM : MI_LOAD_REGISTER_MEM; 1622 cmd |= MI_SRM_LRM_GLOBAL_GTT; 1623 if (GRAPHICS_VER(stream->perf->i915) >= 8) 1624 cmd++; 1625 1626 for (d = 0; d < dword_count; d++) { 1627 *cs++ = cmd; 1628 *cs++ = i915_mmio_reg_offset(reg) + 4 * d; 1629 *cs++ = intel_gt_scratch_offset(stream->engine->gt, 1630 offset) + 4 * d; 1631 *cs++ = 0; 1632 } 1633 1634 return cs; 1635 } 1636 1637 static int alloc_noa_wait(struct i915_perf_stream *stream) 1638 { 1639 struct drm_i915_private *i915 = stream->perf->i915; 1640 struct drm_i915_gem_object *bo; 1641 struct i915_vma *vma; 1642 const u64 delay_ticks = 0xffffffffffffffff - 1643 intel_gt_ns_to_clock_interval(to_gt(stream->perf->i915), 1644 atomic64_read(&stream->perf->noa_programming_delay)); 1645 const u32 base = stream->engine->mmio_base; 1646 #define CS_GPR(x) GEN8_RING_CS_GPR(base, x) 1647 u32 *batch, *ts0, *cs, *jump; 1648 struct i915_gem_ww_ctx ww; 1649 int ret, i; 1650 enum { 1651 START_TS, 1652 NOW_TS, 1653 DELTA_TS, 1654 JUMP_PREDICATE, 1655 DELTA_TARGET, 1656 N_CS_GPR 1657 }; 1658 1659 bo = i915_gem_object_create_internal(i915, 4096); 1660 if (IS_ERR(bo)) { 1661 drm_err(&i915->drm, 1662 "Failed to allocate NOA wait batchbuffer\n"); 1663 return PTR_ERR(bo); 1664 } 1665 1666 i915_gem_ww_ctx_init(&ww, true); 1667 retry: 1668 ret = i915_gem_object_lock(bo, &ww); 1669 if (ret) 1670 goto out_ww; 1671 1672 /* 1673 * We pin in GGTT because we jump into this buffer now because 1674 * multiple OA config BOs will have a jump to this address and it 1675 * needs to be fixed during the lifetime of the i915/perf stream. 1676 */ 1677 vma = i915_gem_object_ggtt_pin_ww(bo, &ww, NULL, 0, 0, PIN_HIGH); 1678 if (IS_ERR(vma)) { 1679 ret = PTR_ERR(vma); 1680 goto out_ww; 1681 } 1682 1683 batch = cs = i915_gem_object_pin_map(bo, I915_MAP_WB); 1684 if (IS_ERR(batch)) { 1685 ret = PTR_ERR(batch); 1686 goto err_unpin; 1687 } 1688 1689 /* Save registers. */ 1690 for (i = 0; i < N_CS_GPR; i++) 1691 cs = save_restore_register( 1692 stream, cs, true /* save */, CS_GPR(i), 1693 INTEL_GT_SCRATCH_FIELD_PERF_CS_GPR + 8 * i, 2); 1694 cs = save_restore_register( 1695 stream, cs, true /* save */, MI_PREDICATE_RESULT_1(RENDER_RING_BASE), 1696 INTEL_GT_SCRATCH_FIELD_PERF_PREDICATE_RESULT_1, 1); 1697 1698 /* First timestamp snapshot location. */ 1699 ts0 = cs; 1700 1701 /* 1702 * Initial snapshot of the timestamp register to implement the wait. 1703 * We work with 32b values, so clear out the top 32b bits of the 1704 * register because the ALU works 64bits. 1705 */ 1706 *cs++ = MI_LOAD_REGISTER_IMM(1); 1707 *cs++ = i915_mmio_reg_offset(CS_GPR(START_TS)) + 4; 1708 *cs++ = 0; 1709 *cs++ = MI_LOAD_REGISTER_REG | (3 - 2); 1710 *cs++ = i915_mmio_reg_offset(RING_TIMESTAMP(base)); 1711 *cs++ = i915_mmio_reg_offset(CS_GPR(START_TS)); 1712 1713 /* 1714 * This is the location we're going to jump back into until the 1715 * required amount of time has passed. 1716 */ 1717 jump = cs; 1718 1719 /* 1720 * Take another snapshot of the timestamp register. Take care to clear 1721 * up the top 32bits of CS_GPR(1) as we're using it for other 1722 * operations below. 1723 */ 1724 *cs++ = MI_LOAD_REGISTER_IMM(1); 1725 *cs++ = i915_mmio_reg_offset(CS_GPR(NOW_TS)) + 4; 1726 *cs++ = 0; 1727 *cs++ = MI_LOAD_REGISTER_REG | (3 - 2); 1728 *cs++ = i915_mmio_reg_offset(RING_TIMESTAMP(base)); 1729 *cs++ = i915_mmio_reg_offset(CS_GPR(NOW_TS)); 1730 1731 /* 1732 * Do a diff between the 2 timestamps and store the result back into 1733 * CS_GPR(1). 1734 */ 1735 *cs++ = MI_MATH(5); 1736 *cs++ = MI_MATH_LOAD(MI_MATH_REG_SRCA, MI_MATH_REG(NOW_TS)); 1737 *cs++ = MI_MATH_LOAD(MI_MATH_REG_SRCB, MI_MATH_REG(START_TS)); 1738 *cs++ = MI_MATH_SUB; 1739 *cs++ = MI_MATH_STORE(MI_MATH_REG(DELTA_TS), MI_MATH_REG_ACCU); 1740 *cs++ = MI_MATH_STORE(MI_MATH_REG(JUMP_PREDICATE), MI_MATH_REG_CF); 1741 1742 /* 1743 * Transfer the carry flag (set to 1 if ts1 < ts0, meaning the 1744 * timestamp have rolled over the 32bits) into the predicate register 1745 * to be used for the predicated jump. 1746 */ 1747 *cs++ = MI_LOAD_REGISTER_REG | (3 - 2); 1748 *cs++ = i915_mmio_reg_offset(CS_GPR(JUMP_PREDICATE)); 1749 *cs++ = i915_mmio_reg_offset(MI_PREDICATE_RESULT_1(RENDER_RING_BASE)); 1750 1751 /* Restart from the beginning if we had timestamps roll over. */ 1752 *cs++ = (GRAPHICS_VER(i915) < 8 ? 1753 MI_BATCH_BUFFER_START : 1754 MI_BATCH_BUFFER_START_GEN8) | 1755 MI_BATCH_PREDICATE; 1756 *cs++ = i915_ggtt_offset(vma) + (ts0 - batch) * 4; 1757 *cs++ = 0; 1758 1759 /* 1760 * Now add the diff between to previous timestamps and add it to : 1761 * (((1 * << 64) - 1) - delay_ns) 1762 * 1763 * When the Carry Flag contains 1 this means the elapsed time is 1764 * longer than the expected delay, and we can exit the wait loop. 1765 */ 1766 *cs++ = MI_LOAD_REGISTER_IMM(2); 1767 *cs++ = i915_mmio_reg_offset(CS_GPR(DELTA_TARGET)); 1768 *cs++ = lower_32_bits(delay_ticks); 1769 *cs++ = i915_mmio_reg_offset(CS_GPR(DELTA_TARGET)) + 4; 1770 *cs++ = upper_32_bits(delay_ticks); 1771 1772 *cs++ = MI_MATH(4); 1773 *cs++ = MI_MATH_LOAD(MI_MATH_REG_SRCA, MI_MATH_REG(DELTA_TS)); 1774 *cs++ = MI_MATH_LOAD(MI_MATH_REG_SRCB, MI_MATH_REG(DELTA_TARGET)); 1775 *cs++ = MI_MATH_ADD; 1776 *cs++ = MI_MATH_STOREINV(MI_MATH_REG(JUMP_PREDICATE), MI_MATH_REG_CF); 1777 1778 *cs++ = MI_ARB_CHECK; 1779 1780 /* 1781 * Transfer the result into the predicate register to be used for the 1782 * predicated jump. 1783 */ 1784 *cs++ = MI_LOAD_REGISTER_REG | (3 - 2); 1785 *cs++ = i915_mmio_reg_offset(CS_GPR(JUMP_PREDICATE)); 1786 *cs++ = i915_mmio_reg_offset(MI_PREDICATE_RESULT_1(RENDER_RING_BASE)); 1787 1788 /* Predicate the jump. */ 1789 *cs++ = (GRAPHICS_VER(i915) < 8 ? 1790 MI_BATCH_BUFFER_START : 1791 MI_BATCH_BUFFER_START_GEN8) | 1792 MI_BATCH_PREDICATE; 1793 *cs++ = i915_ggtt_offset(vma) + (jump - batch) * 4; 1794 *cs++ = 0; 1795 1796 /* Restore registers. */ 1797 for (i = 0; i < N_CS_GPR; i++) 1798 cs = save_restore_register( 1799 stream, cs, false /* restore */, CS_GPR(i), 1800 INTEL_GT_SCRATCH_FIELD_PERF_CS_GPR + 8 * i, 2); 1801 cs = save_restore_register( 1802 stream, cs, false /* restore */, MI_PREDICATE_RESULT_1(RENDER_RING_BASE), 1803 INTEL_GT_SCRATCH_FIELD_PERF_PREDICATE_RESULT_1, 1); 1804 1805 /* And return to the ring. */ 1806 *cs++ = MI_BATCH_BUFFER_END; 1807 1808 GEM_BUG_ON(cs - batch > PAGE_SIZE / sizeof(*batch)); 1809 1810 i915_gem_object_flush_map(bo); 1811 __i915_gem_object_release_map(bo); 1812 1813 stream->noa_wait = vma; 1814 goto out_ww; 1815 1816 err_unpin: 1817 i915_vma_unpin_and_release(&vma, 0); 1818 out_ww: 1819 if (ret == -EDEADLK) { 1820 ret = i915_gem_ww_ctx_backoff(&ww); 1821 if (!ret) 1822 goto retry; 1823 } 1824 i915_gem_ww_ctx_fini(&ww); 1825 if (ret) 1826 i915_gem_object_put(bo); 1827 return ret; 1828 } 1829 1830 static u32 *write_cs_mi_lri(u32 *cs, 1831 const struct i915_oa_reg *reg_data, 1832 u32 n_regs) 1833 { 1834 u32 i; 1835 1836 for (i = 0; i < n_regs; i++) { 1837 if ((i % MI_LOAD_REGISTER_IMM_MAX_REGS) == 0) { 1838 u32 n_lri = min_t(u32, 1839 n_regs - i, 1840 MI_LOAD_REGISTER_IMM_MAX_REGS); 1841 1842 *cs++ = MI_LOAD_REGISTER_IMM(n_lri); 1843 } 1844 *cs++ = i915_mmio_reg_offset(reg_data[i].addr); 1845 *cs++ = reg_data[i].value; 1846 } 1847 1848 return cs; 1849 } 1850 1851 static int num_lri_dwords(int num_regs) 1852 { 1853 int count = 0; 1854 1855 if (num_regs > 0) { 1856 count += DIV_ROUND_UP(num_regs, MI_LOAD_REGISTER_IMM_MAX_REGS); 1857 count += num_regs * 2; 1858 } 1859 1860 return count; 1861 } 1862 1863 static struct i915_oa_config_bo * 1864 alloc_oa_config_buffer(struct i915_perf_stream *stream, 1865 struct i915_oa_config *oa_config) 1866 { 1867 struct drm_i915_gem_object *obj; 1868 struct i915_oa_config_bo *oa_bo; 1869 struct i915_gem_ww_ctx ww; 1870 size_t config_length = 0; 1871 u32 *cs; 1872 int err; 1873 1874 oa_bo = kzalloc(sizeof(*oa_bo), GFP_KERNEL); 1875 if (!oa_bo) 1876 return ERR_PTR(-ENOMEM); 1877 1878 config_length += num_lri_dwords(oa_config->mux_regs_len); 1879 config_length += num_lri_dwords(oa_config->b_counter_regs_len); 1880 config_length += num_lri_dwords(oa_config->flex_regs_len); 1881 config_length += 3; /* MI_BATCH_BUFFER_START */ 1882 config_length = ALIGN(sizeof(u32) * config_length, I915_GTT_PAGE_SIZE); 1883 1884 obj = i915_gem_object_create_shmem(stream->perf->i915, config_length); 1885 if (IS_ERR(obj)) { 1886 err = PTR_ERR(obj); 1887 goto err_free; 1888 } 1889 1890 i915_gem_ww_ctx_init(&ww, true); 1891 retry: 1892 err = i915_gem_object_lock(obj, &ww); 1893 if (err) 1894 goto out_ww; 1895 1896 cs = i915_gem_object_pin_map(obj, I915_MAP_WB); 1897 if (IS_ERR(cs)) { 1898 err = PTR_ERR(cs); 1899 goto out_ww; 1900 } 1901 1902 cs = write_cs_mi_lri(cs, 1903 oa_config->mux_regs, 1904 oa_config->mux_regs_len); 1905 cs = write_cs_mi_lri(cs, 1906 oa_config->b_counter_regs, 1907 oa_config->b_counter_regs_len); 1908 cs = write_cs_mi_lri(cs, 1909 oa_config->flex_regs, 1910 oa_config->flex_regs_len); 1911 1912 /* Jump into the active wait. */ 1913 *cs++ = (GRAPHICS_VER(stream->perf->i915) < 8 ? 1914 MI_BATCH_BUFFER_START : 1915 MI_BATCH_BUFFER_START_GEN8); 1916 *cs++ = i915_ggtt_offset(stream->noa_wait); 1917 *cs++ = 0; 1918 1919 i915_gem_object_flush_map(obj); 1920 __i915_gem_object_release_map(obj); 1921 1922 oa_bo->vma = i915_vma_instance(obj, 1923 &stream->engine->gt->ggtt->vm, 1924 NULL); 1925 if (IS_ERR(oa_bo->vma)) { 1926 err = PTR_ERR(oa_bo->vma); 1927 goto out_ww; 1928 } 1929 1930 oa_bo->oa_config = i915_oa_config_get(oa_config); 1931 llist_add(&oa_bo->node, &stream->oa_config_bos); 1932 1933 out_ww: 1934 if (err == -EDEADLK) { 1935 err = i915_gem_ww_ctx_backoff(&ww); 1936 if (!err) 1937 goto retry; 1938 } 1939 i915_gem_ww_ctx_fini(&ww); 1940 1941 if (err) 1942 i915_gem_object_put(obj); 1943 err_free: 1944 if (err) { 1945 kfree(oa_bo); 1946 return ERR_PTR(err); 1947 } 1948 return oa_bo; 1949 } 1950 1951 static struct i915_vma * 1952 get_oa_vma(struct i915_perf_stream *stream, struct i915_oa_config *oa_config) 1953 { 1954 struct i915_oa_config_bo *oa_bo; 1955 1956 /* 1957 * Look for the buffer in the already allocated BOs attached 1958 * to the stream. 1959 */ 1960 llist_for_each_entry(oa_bo, stream->oa_config_bos.first, node) { 1961 if (oa_bo->oa_config == oa_config && 1962 memcmp(oa_bo->oa_config->uuid, 1963 oa_config->uuid, 1964 sizeof(oa_config->uuid)) == 0) 1965 goto out; 1966 } 1967 1968 oa_bo = alloc_oa_config_buffer(stream, oa_config); 1969 if (IS_ERR(oa_bo)) 1970 return ERR_CAST(oa_bo); 1971 1972 out: 1973 return i915_vma_get(oa_bo->vma); 1974 } 1975 1976 static int 1977 emit_oa_config(struct i915_perf_stream *stream, 1978 struct i915_oa_config *oa_config, 1979 struct intel_context *ce, 1980 struct i915_active *active) 1981 { 1982 struct i915_request *rq; 1983 struct i915_vma *vma; 1984 struct i915_gem_ww_ctx ww; 1985 int err; 1986 1987 vma = get_oa_vma(stream, oa_config); 1988 if (IS_ERR(vma)) 1989 return PTR_ERR(vma); 1990 1991 i915_gem_ww_ctx_init(&ww, true); 1992 retry: 1993 err = i915_gem_object_lock(vma->obj, &ww); 1994 if (err) 1995 goto err; 1996 1997 err = i915_vma_pin_ww(vma, &ww, 0, 0, PIN_GLOBAL | PIN_HIGH); 1998 if (err) 1999 goto err; 2000 2001 intel_engine_pm_get(ce->engine); 2002 rq = i915_request_create(ce); 2003 intel_engine_pm_put(ce->engine); 2004 if (IS_ERR(rq)) { 2005 err = PTR_ERR(rq); 2006 goto err_vma_unpin; 2007 } 2008 2009 if (!IS_ERR_OR_NULL(active)) { 2010 /* After all individual context modifications */ 2011 err = i915_request_await_active(rq, active, 2012 I915_ACTIVE_AWAIT_ACTIVE); 2013 if (err) 2014 goto err_add_request; 2015 2016 err = i915_active_add_request(active, rq); 2017 if (err) 2018 goto err_add_request; 2019 } 2020 2021 err = i915_request_await_object(rq, vma->obj, 0); 2022 if (!err) 2023 err = i915_vma_move_to_active(vma, rq, 0); 2024 if (err) 2025 goto err_add_request; 2026 2027 err = rq->engine->emit_bb_start(rq, 2028 vma->node.start, 0, 2029 I915_DISPATCH_SECURE); 2030 if (err) 2031 goto err_add_request; 2032 2033 err_add_request: 2034 i915_request_add(rq); 2035 err_vma_unpin: 2036 i915_vma_unpin(vma); 2037 err: 2038 if (err == -EDEADLK) { 2039 err = i915_gem_ww_ctx_backoff(&ww); 2040 if (!err) 2041 goto retry; 2042 } 2043 2044 i915_gem_ww_ctx_fini(&ww); 2045 i915_vma_put(vma); 2046 return err; 2047 } 2048 2049 static struct intel_context *oa_context(struct i915_perf_stream *stream) 2050 { 2051 return stream->pinned_ctx ?: stream->engine->kernel_context; 2052 } 2053 2054 static int 2055 hsw_enable_metric_set(struct i915_perf_stream *stream, 2056 struct i915_active *active) 2057 { 2058 struct intel_uncore *uncore = stream->uncore; 2059 2060 /* 2061 * PRM: 2062 * 2063 * OA unit is using “crclk” for its functionality. When trunk 2064 * level clock gating takes place, OA clock would be gated, 2065 * unable to count the events from non-render clock domain. 2066 * Render clock gating must be disabled when OA is enabled to 2067 * count the events from non-render domain. Unit level clock 2068 * gating for RCS should also be disabled. 2069 */ 2070 intel_uncore_rmw(uncore, GEN7_MISCCPCTL, 2071 GEN7_DOP_CLOCK_GATE_ENABLE, 0); 2072 intel_uncore_rmw(uncore, GEN6_UCGCTL1, 2073 0, GEN6_CSUNIT_CLOCK_GATE_DISABLE); 2074 2075 return emit_oa_config(stream, 2076 stream->oa_config, oa_context(stream), 2077 active); 2078 } 2079 2080 static void hsw_disable_metric_set(struct i915_perf_stream *stream) 2081 { 2082 struct intel_uncore *uncore = stream->uncore; 2083 2084 intel_uncore_rmw(uncore, GEN6_UCGCTL1, 2085 GEN6_CSUNIT_CLOCK_GATE_DISABLE, 0); 2086 intel_uncore_rmw(uncore, GEN7_MISCCPCTL, 2087 0, GEN7_DOP_CLOCK_GATE_ENABLE); 2088 2089 intel_uncore_rmw(uncore, GDT_CHICKEN_BITS, GT_NOA_ENABLE, 0); 2090 } 2091 2092 static u32 oa_config_flex_reg(const struct i915_oa_config *oa_config, 2093 i915_reg_t reg) 2094 { 2095 u32 mmio = i915_mmio_reg_offset(reg); 2096 int i; 2097 2098 /* 2099 * This arbitrary default will select the 'EU FPU0 Pipeline 2100 * Active' event. In the future it's anticipated that there 2101 * will be an explicit 'No Event' we can select, but not yet... 2102 */ 2103 if (!oa_config) 2104 return 0; 2105 2106 for (i = 0; i < oa_config->flex_regs_len; i++) { 2107 if (i915_mmio_reg_offset(oa_config->flex_regs[i].addr) == mmio) 2108 return oa_config->flex_regs[i].value; 2109 } 2110 2111 return 0; 2112 } 2113 /* 2114 * NB: It must always remain pointer safe to run this even if the OA unit 2115 * has been disabled. 2116 * 2117 * It's fine to put out-of-date values into these per-context registers 2118 * in the case that the OA unit has been disabled. 2119 */ 2120 static void 2121 gen8_update_reg_state_unlocked(const struct intel_context *ce, 2122 const struct i915_perf_stream *stream) 2123 { 2124 u32 ctx_oactxctrl = stream->perf->ctx_oactxctrl_offset; 2125 u32 ctx_flexeu0 = stream->perf->ctx_flexeu0_offset; 2126 /* The MMIO offsets for Flex EU registers aren't contiguous */ 2127 static const i915_reg_t flex_regs[] = { 2128 EU_PERF_CNTL0, 2129 EU_PERF_CNTL1, 2130 EU_PERF_CNTL2, 2131 EU_PERF_CNTL3, 2132 EU_PERF_CNTL4, 2133 EU_PERF_CNTL5, 2134 EU_PERF_CNTL6, 2135 }; 2136 u32 *reg_state = ce->lrc_reg_state; 2137 int i; 2138 2139 reg_state[ctx_oactxctrl + 1] = 2140 (stream->period_exponent << GEN8_OA_TIMER_PERIOD_SHIFT) | 2141 (stream->periodic ? GEN8_OA_TIMER_ENABLE : 0) | 2142 GEN8_OA_COUNTER_RESUME; 2143 2144 for (i = 0; i < ARRAY_SIZE(flex_regs); i++) 2145 reg_state[ctx_flexeu0 + i * 2 + 1] = 2146 oa_config_flex_reg(stream->oa_config, flex_regs[i]); 2147 } 2148 2149 struct flex { 2150 i915_reg_t reg; 2151 u32 offset; 2152 u32 value; 2153 }; 2154 2155 static int 2156 gen8_store_flex(struct i915_request *rq, 2157 struct intel_context *ce, 2158 const struct flex *flex, unsigned int count) 2159 { 2160 u32 offset; 2161 u32 *cs; 2162 2163 cs = intel_ring_begin(rq, 4 * count); 2164 if (IS_ERR(cs)) 2165 return PTR_ERR(cs); 2166 2167 offset = i915_ggtt_offset(ce->state) + LRC_STATE_OFFSET; 2168 do { 2169 *cs++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT; 2170 *cs++ = offset + flex->offset * sizeof(u32); 2171 *cs++ = 0; 2172 *cs++ = flex->value; 2173 } while (flex++, --count); 2174 2175 intel_ring_advance(rq, cs); 2176 2177 return 0; 2178 } 2179 2180 static int 2181 gen8_load_flex(struct i915_request *rq, 2182 struct intel_context *ce, 2183 const struct flex *flex, unsigned int count) 2184 { 2185 u32 *cs; 2186 2187 GEM_BUG_ON(!count || count > 63); 2188 2189 cs = intel_ring_begin(rq, 2 * count + 2); 2190 if (IS_ERR(cs)) 2191 return PTR_ERR(cs); 2192 2193 *cs++ = MI_LOAD_REGISTER_IMM(count); 2194 do { 2195 *cs++ = i915_mmio_reg_offset(flex->reg); 2196 *cs++ = flex->value; 2197 } while (flex++, --count); 2198 *cs++ = MI_NOOP; 2199 2200 intel_ring_advance(rq, cs); 2201 2202 return 0; 2203 } 2204 2205 static int gen8_modify_context(struct intel_context *ce, 2206 const struct flex *flex, unsigned int count) 2207 { 2208 struct i915_request *rq; 2209 int err; 2210 2211 rq = intel_engine_create_kernel_request(ce->engine); 2212 if (IS_ERR(rq)) 2213 return PTR_ERR(rq); 2214 2215 /* Serialise with the remote context */ 2216 err = intel_context_prepare_remote_request(ce, rq); 2217 if (err == 0) 2218 err = gen8_store_flex(rq, ce, flex, count); 2219 2220 i915_request_add(rq); 2221 return err; 2222 } 2223 2224 static int 2225 gen8_modify_self(struct intel_context *ce, 2226 const struct flex *flex, unsigned int count, 2227 struct i915_active *active) 2228 { 2229 struct i915_request *rq; 2230 int err; 2231 2232 intel_engine_pm_get(ce->engine); 2233 rq = i915_request_create(ce); 2234 intel_engine_pm_put(ce->engine); 2235 if (IS_ERR(rq)) 2236 return PTR_ERR(rq); 2237 2238 if (!IS_ERR_OR_NULL(active)) { 2239 err = i915_active_add_request(active, rq); 2240 if (err) 2241 goto err_add_request; 2242 } 2243 2244 err = gen8_load_flex(rq, ce, flex, count); 2245 if (err) 2246 goto err_add_request; 2247 2248 err_add_request: 2249 i915_request_add(rq); 2250 return err; 2251 } 2252 2253 static int gen8_configure_context(struct i915_gem_context *ctx, 2254 struct flex *flex, unsigned int count) 2255 { 2256 struct i915_gem_engines_iter it; 2257 struct intel_context *ce; 2258 int err = 0; 2259 2260 for_each_gem_engine(ce, i915_gem_context_lock_engines(ctx), it) { 2261 GEM_BUG_ON(ce == ce->engine->kernel_context); 2262 2263 if (ce->engine->class != RENDER_CLASS) 2264 continue; 2265 2266 /* Otherwise OA settings will be set upon first use */ 2267 if (!intel_context_pin_if_active(ce)) 2268 continue; 2269 2270 flex->value = intel_sseu_make_rpcs(ce->engine->gt, &ce->sseu); 2271 err = gen8_modify_context(ce, flex, count); 2272 2273 intel_context_unpin(ce); 2274 if (err) 2275 break; 2276 } 2277 i915_gem_context_unlock_engines(ctx); 2278 2279 return err; 2280 } 2281 2282 static int gen12_configure_oar_context(struct i915_perf_stream *stream, 2283 struct i915_active *active) 2284 { 2285 int err; 2286 struct intel_context *ce = stream->pinned_ctx; 2287 u32 format = stream->oa_buffer.format; 2288 struct flex regs_context[] = { 2289 { 2290 GEN8_OACTXCONTROL, 2291 stream->perf->ctx_oactxctrl_offset + 1, 2292 active ? GEN8_OA_COUNTER_RESUME : 0, 2293 }, 2294 }; 2295 /* Offsets in regs_lri are not used since this configuration is only 2296 * applied using LRI. Initialize the correct offsets for posterity. 2297 */ 2298 #define GEN12_OAR_OACONTROL_OFFSET 0x5B0 2299 struct flex regs_lri[] = { 2300 { 2301 GEN12_OAR_OACONTROL, 2302 GEN12_OAR_OACONTROL_OFFSET + 1, 2303 (format << GEN12_OAR_OACONTROL_COUNTER_FORMAT_SHIFT) | 2304 (active ? GEN12_OAR_OACONTROL_COUNTER_ENABLE : 0) 2305 }, 2306 { 2307 RING_CONTEXT_CONTROL(ce->engine->mmio_base), 2308 CTX_CONTEXT_CONTROL, 2309 _MASKED_FIELD(GEN12_CTX_CTRL_OAR_CONTEXT_ENABLE, 2310 active ? 2311 GEN12_CTX_CTRL_OAR_CONTEXT_ENABLE : 2312 0) 2313 }, 2314 }; 2315 2316 /* Modify the context image of pinned context with regs_context*/ 2317 err = intel_context_lock_pinned(ce); 2318 if (err) 2319 return err; 2320 2321 err = gen8_modify_context(ce, regs_context, ARRAY_SIZE(regs_context)); 2322 intel_context_unlock_pinned(ce); 2323 if (err) 2324 return err; 2325 2326 /* Apply regs_lri using LRI with pinned context */ 2327 return gen8_modify_self(ce, regs_lri, ARRAY_SIZE(regs_lri), active); 2328 } 2329 2330 /* 2331 * Manages updating the per-context aspects of the OA stream 2332 * configuration across all contexts. 2333 * 2334 * The awkward consideration here is that OACTXCONTROL controls the 2335 * exponent for periodic sampling which is primarily used for system 2336 * wide profiling where we'd like a consistent sampling period even in 2337 * the face of context switches. 2338 * 2339 * Our approach of updating the register state context (as opposed to 2340 * say using a workaround batch buffer) ensures that the hardware 2341 * won't automatically reload an out-of-date timer exponent even 2342 * transiently before a WA BB could be parsed. 2343 * 2344 * This function needs to: 2345 * - Ensure the currently running context's per-context OA state is 2346 * updated 2347 * - Ensure that all existing contexts will have the correct per-context 2348 * OA state if they are scheduled for use. 2349 * - Ensure any new contexts will be initialized with the correct 2350 * per-context OA state. 2351 * 2352 * Note: it's only the RCS/Render context that has any OA state. 2353 * Note: the first flex register passed must always be R_PWR_CLK_STATE 2354 */ 2355 static int 2356 oa_configure_all_contexts(struct i915_perf_stream *stream, 2357 struct flex *regs, 2358 size_t num_regs, 2359 struct i915_active *active) 2360 { 2361 struct drm_i915_private *i915 = stream->perf->i915; 2362 struct intel_engine_cs *engine; 2363 struct i915_gem_context *ctx, *cn; 2364 int err; 2365 2366 lockdep_assert_held(&stream->perf->lock); 2367 2368 /* 2369 * The OA register config is setup through the context image. This image 2370 * might be written to by the GPU on context switch (in particular on 2371 * lite-restore). This means we can't safely update a context's image, 2372 * if this context is scheduled/submitted to run on the GPU. 2373 * 2374 * We could emit the OA register config through the batch buffer but 2375 * this might leave small interval of time where the OA unit is 2376 * configured at an invalid sampling period. 2377 * 2378 * Note that since we emit all requests from a single ring, there 2379 * is still an implicit global barrier here that may cause a high 2380 * priority context to wait for an otherwise independent low priority 2381 * context. Contexts idle at the time of reconfiguration are not 2382 * trapped behind the barrier. 2383 */ 2384 spin_lock(&i915->gem.contexts.lock); 2385 list_for_each_entry_safe(ctx, cn, &i915->gem.contexts.list, link) { 2386 if (!kref_get_unless_zero(&ctx->ref)) 2387 continue; 2388 2389 spin_unlock(&i915->gem.contexts.lock); 2390 2391 err = gen8_configure_context(ctx, regs, num_regs); 2392 if (err) { 2393 i915_gem_context_put(ctx); 2394 return err; 2395 } 2396 2397 spin_lock(&i915->gem.contexts.lock); 2398 list_safe_reset_next(ctx, cn, link); 2399 i915_gem_context_put(ctx); 2400 } 2401 spin_unlock(&i915->gem.contexts.lock); 2402 2403 /* 2404 * After updating all other contexts, we need to modify ourselves. 2405 * If we don't modify the kernel_context, we do not get events while 2406 * idle. 2407 */ 2408 for_each_uabi_engine(engine, i915) { 2409 struct intel_context *ce = engine->kernel_context; 2410 2411 if (engine->class != RENDER_CLASS) 2412 continue; 2413 2414 regs[0].value = intel_sseu_make_rpcs(engine->gt, &ce->sseu); 2415 2416 err = gen8_modify_self(ce, regs, num_regs, active); 2417 if (err) 2418 return err; 2419 } 2420 2421 return 0; 2422 } 2423 2424 static int 2425 gen12_configure_all_contexts(struct i915_perf_stream *stream, 2426 const struct i915_oa_config *oa_config, 2427 struct i915_active *active) 2428 { 2429 struct flex regs[] = { 2430 { 2431 GEN8_R_PWR_CLK_STATE(RENDER_RING_BASE), 2432 CTX_R_PWR_CLK_STATE, 2433 }, 2434 }; 2435 2436 return oa_configure_all_contexts(stream, 2437 regs, ARRAY_SIZE(regs), 2438 active); 2439 } 2440 2441 static int 2442 lrc_configure_all_contexts(struct i915_perf_stream *stream, 2443 const struct i915_oa_config *oa_config, 2444 struct i915_active *active) 2445 { 2446 /* The MMIO offsets for Flex EU registers aren't contiguous */ 2447 const u32 ctx_flexeu0 = stream->perf->ctx_flexeu0_offset; 2448 #define ctx_flexeuN(N) (ctx_flexeu0 + 2 * (N) + 1) 2449 struct flex regs[] = { 2450 { 2451 GEN8_R_PWR_CLK_STATE(RENDER_RING_BASE), 2452 CTX_R_PWR_CLK_STATE, 2453 }, 2454 { 2455 GEN8_OACTXCONTROL, 2456 stream->perf->ctx_oactxctrl_offset + 1, 2457 }, 2458 { EU_PERF_CNTL0, ctx_flexeuN(0) }, 2459 { EU_PERF_CNTL1, ctx_flexeuN(1) }, 2460 { EU_PERF_CNTL2, ctx_flexeuN(2) }, 2461 { EU_PERF_CNTL3, ctx_flexeuN(3) }, 2462 { EU_PERF_CNTL4, ctx_flexeuN(4) }, 2463 { EU_PERF_CNTL5, ctx_flexeuN(5) }, 2464 { EU_PERF_CNTL6, ctx_flexeuN(6) }, 2465 }; 2466 #undef ctx_flexeuN 2467 int i; 2468 2469 regs[1].value = 2470 (stream->period_exponent << GEN8_OA_TIMER_PERIOD_SHIFT) | 2471 (stream->periodic ? GEN8_OA_TIMER_ENABLE : 0) | 2472 GEN8_OA_COUNTER_RESUME; 2473 2474 for (i = 2; i < ARRAY_SIZE(regs); i++) 2475 regs[i].value = oa_config_flex_reg(oa_config, regs[i].reg); 2476 2477 return oa_configure_all_contexts(stream, 2478 regs, ARRAY_SIZE(regs), 2479 active); 2480 } 2481 2482 static int 2483 gen8_enable_metric_set(struct i915_perf_stream *stream, 2484 struct i915_active *active) 2485 { 2486 struct intel_uncore *uncore = stream->uncore; 2487 struct i915_oa_config *oa_config = stream->oa_config; 2488 int ret; 2489 2490 /* 2491 * We disable slice/unslice clock ratio change reports on SKL since 2492 * they are too noisy. The HW generates a lot of redundant reports 2493 * where the ratio hasn't really changed causing a lot of redundant 2494 * work to processes and increasing the chances we'll hit buffer 2495 * overruns. 2496 * 2497 * Although we don't currently use the 'disable overrun' OABUFFER 2498 * feature it's worth noting that clock ratio reports have to be 2499 * disabled before considering to use that feature since the HW doesn't 2500 * correctly block these reports. 2501 * 2502 * Currently none of the high-level metrics we have depend on knowing 2503 * this ratio to normalize. 2504 * 2505 * Note: This register is not power context saved and restored, but 2506 * that's OK considering that we disable RC6 while the OA unit is 2507 * enabled. 2508 * 2509 * The _INCLUDE_CLK_RATIO bit allows the slice/unslice frequency to 2510 * be read back from automatically triggered reports, as part of the 2511 * RPT_ID field. 2512 */ 2513 if (IS_GRAPHICS_VER(stream->perf->i915, 9, 11)) { 2514 intel_uncore_write(uncore, GEN8_OA_DEBUG, 2515 _MASKED_BIT_ENABLE(GEN9_OA_DEBUG_DISABLE_CLK_RATIO_REPORTS | 2516 GEN9_OA_DEBUG_INCLUDE_CLK_RATIO)); 2517 } 2518 2519 /* 2520 * Update all contexts prior writing the mux configurations as we need 2521 * to make sure all slices/subslices are ON before writing to NOA 2522 * registers. 2523 */ 2524 ret = lrc_configure_all_contexts(stream, oa_config, active); 2525 if (ret) 2526 return ret; 2527 2528 return emit_oa_config(stream, 2529 stream->oa_config, oa_context(stream), 2530 active); 2531 } 2532 2533 static u32 oag_report_ctx_switches(const struct i915_perf_stream *stream) 2534 { 2535 return _MASKED_FIELD(GEN12_OAG_OA_DEBUG_DISABLE_CTX_SWITCH_REPORTS, 2536 (stream->sample_flags & SAMPLE_OA_REPORT) ? 2537 0 : GEN12_OAG_OA_DEBUG_DISABLE_CTX_SWITCH_REPORTS); 2538 } 2539 2540 static int 2541 gen12_enable_metric_set(struct i915_perf_stream *stream, 2542 struct i915_active *active) 2543 { 2544 struct intel_uncore *uncore = stream->uncore; 2545 struct i915_oa_config *oa_config = stream->oa_config; 2546 bool periodic = stream->periodic; 2547 u32 period_exponent = stream->period_exponent; 2548 int ret; 2549 2550 intel_uncore_write(uncore, GEN12_OAG_OA_DEBUG, 2551 /* Disable clk ratio reports, like previous Gens. */ 2552 _MASKED_BIT_ENABLE(GEN12_OAG_OA_DEBUG_DISABLE_CLK_RATIO_REPORTS | 2553 GEN12_OAG_OA_DEBUG_INCLUDE_CLK_RATIO) | 2554 /* 2555 * If the user didn't require OA reports, instruct 2556 * the hardware not to emit ctx switch reports. 2557 */ 2558 oag_report_ctx_switches(stream)); 2559 2560 intel_uncore_write(uncore, GEN12_OAG_OAGLBCTXCTRL, periodic ? 2561 (GEN12_OAG_OAGLBCTXCTRL_COUNTER_RESUME | 2562 GEN12_OAG_OAGLBCTXCTRL_TIMER_ENABLE | 2563 (period_exponent << GEN12_OAG_OAGLBCTXCTRL_TIMER_PERIOD_SHIFT)) 2564 : 0); 2565 2566 /* 2567 * Update all contexts prior writing the mux configurations as we need 2568 * to make sure all slices/subslices are ON before writing to NOA 2569 * registers. 2570 */ 2571 ret = gen12_configure_all_contexts(stream, oa_config, active); 2572 if (ret) 2573 return ret; 2574 2575 /* 2576 * For Gen12, performance counters are context 2577 * saved/restored. Only enable it for the context that 2578 * requested this. 2579 */ 2580 if (stream->ctx) { 2581 ret = gen12_configure_oar_context(stream, active); 2582 if (ret) 2583 return ret; 2584 } 2585 2586 return emit_oa_config(stream, 2587 stream->oa_config, oa_context(stream), 2588 active); 2589 } 2590 2591 static void gen8_disable_metric_set(struct i915_perf_stream *stream) 2592 { 2593 struct intel_uncore *uncore = stream->uncore; 2594 2595 /* Reset all contexts' slices/subslices configurations. */ 2596 lrc_configure_all_contexts(stream, NULL, NULL); 2597 2598 intel_uncore_rmw(uncore, GDT_CHICKEN_BITS, GT_NOA_ENABLE, 0); 2599 } 2600 2601 static void gen11_disable_metric_set(struct i915_perf_stream *stream) 2602 { 2603 struct intel_uncore *uncore = stream->uncore; 2604 2605 /* Reset all contexts' slices/subslices configurations. */ 2606 lrc_configure_all_contexts(stream, NULL, NULL); 2607 2608 /* Make sure we disable noa to save power. */ 2609 intel_uncore_rmw(uncore, RPM_CONFIG1, GEN10_GT_NOA_ENABLE, 0); 2610 } 2611 2612 static void gen12_disable_metric_set(struct i915_perf_stream *stream) 2613 { 2614 struct intel_uncore *uncore = stream->uncore; 2615 2616 /* Reset all contexts' slices/subslices configurations. */ 2617 gen12_configure_all_contexts(stream, NULL, NULL); 2618 2619 /* disable the context save/restore or OAR counters */ 2620 if (stream->ctx) 2621 gen12_configure_oar_context(stream, NULL); 2622 2623 /* Make sure we disable noa to save power. */ 2624 intel_uncore_rmw(uncore, RPM_CONFIG1, GEN10_GT_NOA_ENABLE, 0); 2625 } 2626 2627 static void gen7_oa_enable(struct i915_perf_stream *stream) 2628 { 2629 struct intel_uncore *uncore = stream->uncore; 2630 struct i915_gem_context *ctx = stream->ctx; 2631 u32 ctx_id = stream->specific_ctx_id; 2632 bool periodic = stream->periodic; 2633 u32 period_exponent = stream->period_exponent; 2634 u32 report_format = stream->oa_buffer.format; 2635 2636 /* 2637 * Reset buf pointers so we don't forward reports from before now. 2638 * 2639 * Think carefully if considering trying to avoid this, since it 2640 * also ensures status flags and the buffer itself are cleared 2641 * in error paths, and we have checks for invalid reports based 2642 * on the assumption that certain fields are written to zeroed 2643 * memory which this helps maintains. 2644 */ 2645 gen7_init_oa_buffer(stream); 2646 2647 intel_uncore_write(uncore, GEN7_OACONTROL, 2648 (ctx_id & GEN7_OACONTROL_CTX_MASK) | 2649 (period_exponent << 2650 GEN7_OACONTROL_TIMER_PERIOD_SHIFT) | 2651 (periodic ? GEN7_OACONTROL_TIMER_ENABLE : 0) | 2652 (report_format << GEN7_OACONTROL_FORMAT_SHIFT) | 2653 (ctx ? GEN7_OACONTROL_PER_CTX_ENABLE : 0) | 2654 GEN7_OACONTROL_ENABLE); 2655 } 2656 2657 static void gen8_oa_enable(struct i915_perf_stream *stream) 2658 { 2659 struct intel_uncore *uncore = stream->uncore; 2660 u32 report_format = stream->oa_buffer.format; 2661 2662 /* 2663 * Reset buf pointers so we don't forward reports from before now. 2664 * 2665 * Think carefully if considering trying to avoid this, since it 2666 * also ensures status flags and the buffer itself are cleared 2667 * in error paths, and we have checks for invalid reports based 2668 * on the assumption that certain fields are written to zeroed 2669 * memory which this helps maintains. 2670 */ 2671 gen8_init_oa_buffer(stream); 2672 2673 /* 2674 * Note: we don't rely on the hardware to perform single context 2675 * filtering and instead filter on the cpu based on the context-id 2676 * field of reports 2677 */ 2678 intel_uncore_write(uncore, GEN8_OACONTROL, 2679 (report_format << GEN8_OA_REPORT_FORMAT_SHIFT) | 2680 GEN8_OA_COUNTER_ENABLE); 2681 } 2682 2683 static void gen12_oa_enable(struct i915_perf_stream *stream) 2684 { 2685 struct intel_uncore *uncore = stream->uncore; 2686 u32 report_format = stream->oa_buffer.format; 2687 2688 /* 2689 * If we don't want OA reports from the OA buffer, then we don't even 2690 * need to program the OAG unit. 2691 */ 2692 if (!(stream->sample_flags & SAMPLE_OA_REPORT)) 2693 return; 2694 2695 gen12_init_oa_buffer(stream); 2696 2697 intel_uncore_write(uncore, GEN12_OAG_OACONTROL, 2698 (report_format << GEN12_OAG_OACONTROL_OA_COUNTER_FORMAT_SHIFT) | 2699 GEN12_OAG_OACONTROL_OA_COUNTER_ENABLE); 2700 } 2701 2702 /** 2703 * i915_oa_stream_enable - handle `I915_PERF_IOCTL_ENABLE` for OA stream 2704 * @stream: An i915 perf stream opened for OA metrics 2705 * 2706 * [Re]enables hardware periodic sampling according to the period configured 2707 * when opening the stream. This also starts a hrtimer that will periodically 2708 * check for data in the circular OA buffer for notifying userspace (e.g. 2709 * during a read() or poll()). 2710 */ 2711 static void i915_oa_stream_enable(struct i915_perf_stream *stream) 2712 { 2713 stream->pollin = false; 2714 2715 stream->perf->ops.oa_enable(stream); 2716 2717 if (stream->sample_flags & SAMPLE_OA_REPORT) 2718 hrtimer_start(&stream->poll_check_timer, 2719 ns_to_ktime(stream->poll_oa_period), 2720 HRTIMER_MODE_REL_PINNED); 2721 } 2722 2723 static void gen7_oa_disable(struct i915_perf_stream *stream) 2724 { 2725 struct intel_uncore *uncore = stream->uncore; 2726 2727 intel_uncore_write(uncore, GEN7_OACONTROL, 0); 2728 if (intel_wait_for_register(uncore, 2729 GEN7_OACONTROL, GEN7_OACONTROL_ENABLE, 0, 2730 50)) 2731 drm_err(&stream->perf->i915->drm, 2732 "wait for OA to be disabled timed out\n"); 2733 } 2734 2735 static void gen8_oa_disable(struct i915_perf_stream *stream) 2736 { 2737 struct intel_uncore *uncore = stream->uncore; 2738 2739 intel_uncore_write(uncore, GEN8_OACONTROL, 0); 2740 if (intel_wait_for_register(uncore, 2741 GEN8_OACONTROL, GEN8_OA_COUNTER_ENABLE, 0, 2742 50)) 2743 drm_err(&stream->perf->i915->drm, 2744 "wait for OA to be disabled timed out\n"); 2745 } 2746 2747 static void gen12_oa_disable(struct i915_perf_stream *stream) 2748 { 2749 struct intel_uncore *uncore = stream->uncore; 2750 2751 intel_uncore_write(uncore, GEN12_OAG_OACONTROL, 0); 2752 if (intel_wait_for_register(uncore, 2753 GEN12_OAG_OACONTROL, 2754 GEN12_OAG_OACONTROL_OA_COUNTER_ENABLE, 0, 2755 50)) 2756 drm_err(&stream->perf->i915->drm, 2757 "wait for OA to be disabled timed out\n"); 2758 2759 intel_uncore_write(uncore, GEN12_OA_TLB_INV_CR, 1); 2760 if (intel_wait_for_register(uncore, 2761 GEN12_OA_TLB_INV_CR, 2762 1, 0, 2763 50)) 2764 drm_err(&stream->perf->i915->drm, 2765 "wait for OA tlb invalidate timed out\n"); 2766 } 2767 2768 /** 2769 * i915_oa_stream_disable - handle `I915_PERF_IOCTL_DISABLE` for OA stream 2770 * @stream: An i915 perf stream opened for OA metrics 2771 * 2772 * Stops the OA unit from periodically writing counter reports into the 2773 * circular OA buffer. This also stops the hrtimer that periodically checks for 2774 * data in the circular OA buffer, for notifying userspace. 2775 */ 2776 static void i915_oa_stream_disable(struct i915_perf_stream *stream) 2777 { 2778 stream->perf->ops.oa_disable(stream); 2779 2780 if (stream->sample_flags & SAMPLE_OA_REPORT) 2781 hrtimer_cancel(&stream->poll_check_timer); 2782 } 2783 2784 static const struct i915_perf_stream_ops i915_oa_stream_ops = { 2785 .destroy = i915_oa_stream_destroy, 2786 .enable = i915_oa_stream_enable, 2787 .disable = i915_oa_stream_disable, 2788 .wait_unlocked = i915_oa_wait_unlocked, 2789 .poll_wait = i915_oa_poll_wait, 2790 .read = i915_oa_read, 2791 }; 2792 2793 static int i915_perf_stream_enable_sync(struct i915_perf_stream *stream) 2794 { 2795 struct i915_active *active; 2796 int err; 2797 2798 active = i915_active_create(); 2799 if (!active) 2800 return -ENOMEM; 2801 2802 err = stream->perf->ops.enable_metric_set(stream, active); 2803 if (err == 0) 2804 __i915_active_wait(active, TASK_UNINTERRUPTIBLE); 2805 2806 i915_active_put(active); 2807 return err; 2808 } 2809 2810 static void 2811 get_default_sseu_config(struct intel_sseu *out_sseu, 2812 struct intel_engine_cs *engine) 2813 { 2814 const struct sseu_dev_info *devinfo_sseu = &engine->gt->info.sseu; 2815 2816 *out_sseu = intel_sseu_from_device_info(devinfo_sseu); 2817 2818 if (GRAPHICS_VER(engine->i915) == 11) { 2819 /* 2820 * We only need subslice count so it doesn't matter which ones 2821 * we select - just turn off low bits in the amount of half of 2822 * all available subslices per slice. 2823 */ 2824 out_sseu->subslice_mask = 2825 ~(~0 << (hweight8(out_sseu->subslice_mask) / 2)); 2826 out_sseu->slice_mask = 0x1; 2827 } 2828 } 2829 2830 static int 2831 get_sseu_config(struct intel_sseu *out_sseu, 2832 struct intel_engine_cs *engine, 2833 const struct drm_i915_gem_context_param_sseu *drm_sseu) 2834 { 2835 if (drm_sseu->engine.engine_class != engine->uabi_class || 2836 drm_sseu->engine.engine_instance != engine->uabi_instance) 2837 return -EINVAL; 2838 2839 return i915_gem_user_to_context_sseu(engine->gt, drm_sseu, out_sseu); 2840 } 2841 2842 /** 2843 * i915_oa_stream_init - validate combined props for OA stream and init 2844 * @stream: An i915 perf stream 2845 * @param: The open parameters passed to `DRM_I915_PERF_OPEN` 2846 * @props: The property state that configures stream (individually validated) 2847 * 2848 * While read_properties_unlocked() validates properties in isolation it 2849 * doesn't ensure that the combination necessarily makes sense. 2850 * 2851 * At this point it has been determined that userspace wants a stream of 2852 * OA metrics, but still we need to further validate the combined 2853 * properties are OK. 2854 * 2855 * If the configuration makes sense then we can allocate memory for 2856 * a circular OA buffer and apply the requested metric set configuration. 2857 * 2858 * Returns: zero on success or a negative error code. 2859 */ 2860 static int i915_oa_stream_init(struct i915_perf_stream *stream, 2861 struct drm_i915_perf_open_param *param, 2862 struct perf_open_properties *props) 2863 { 2864 struct drm_i915_private *i915 = stream->perf->i915; 2865 struct i915_perf *perf = stream->perf; 2866 int format_size; 2867 int ret; 2868 2869 if (!props->engine) { 2870 drm_dbg(&stream->perf->i915->drm, 2871 "OA engine not specified\n"); 2872 return -EINVAL; 2873 } 2874 2875 /* 2876 * If the sysfs metrics/ directory wasn't registered for some 2877 * reason then don't let userspace try their luck with config 2878 * IDs 2879 */ 2880 if (!perf->metrics_kobj) { 2881 drm_dbg(&stream->perf->i915->drm, 2882 "OA metrics weren't advertised via sysfs\n"); 2883 return -EINVAL; 2884 } 2885 2886 if (!(props->sample_flags & SAMPLE_OA_REPORT) && 2887 (GRAPHICS_VER(perf->i915) < 12 || !stream->ctx)) { 2888 drm_dbg(&stream->perf->i915->drm, 2889 "Only OA report sampling supported\n"); 2890 return -EINVAL; 2891 } 2892 2893 if (!perf->ops.enable_metric_set) { 2894 drm_dbg(&stream->perf->i915->drm, 2895 "OA unit not supported\n"); 2896 return -ENODEV; 2897 } 2898 2899 /* 2900 * To avoid the complexity of having to accurately filter 2901 * counter reports and marshal to the appropriate client 2902 * we currently only allow exclusive access 2903 */ 2904 if (perf->exclusive_stream) { 2905 drm_dbg(&stream->perf->i915->drm, 2906 "OA unit already in use\n"); 2907 return -EBUSY; 2908 } 2909 2910 if (!props->oa_format) { 2911 drm_dbg(&stream->perf->i915->drm, 2912 "OA report format not specified\n"); 2913 return -EINVAL; 2914 } 2915 2916 stream->engine = props->engine; 2917 stream->uncore = stream->engine->gt->uncore; 2918 2919 stream->sample_size = sizeof(struct drm_i915_perf_record_header); 2920 2921 format_size = perf->oa_formats[props->oa_format].size; 2922 2923 stream->sample_flags = props->sample_flags; 2924 stream->sample_size += format_size; 2925 2926 stream->oa_buffer.format_size = format_size; 2927 if (drm_WARN_ON(&i915->drm, stream->oa_buffer.format_size == 0)) 2928 return -EINVAL; 2929 2930 stream->hold_preemption = props->hold_preemption; 2931 2932 stream->oa_buffer.format = 2933 perf->oa_formats[props->oa_format].format; 2934 2935 stream->periodic = props->oa_periodic; 2936 if (stream->periodic) 2937 stream->period_exponent = props->oa_period_exponent; 2938 2939 if (stream->ctx) { 2940 ret = oa_get_render_ctx_id(stream); 2941 if (ret) { 2942 drm_dbg(&stream->perf->i915->drm, 2943 "Invalid context id to filter with\n"); 2944 return ret; 2945 } 2946 } 2947 2948 ret = alloc_noa_wait(stream); 2949 if (ret) { 2950 drm_dbg(&stream->perf->i915->drm, 2951 "Unable to allocate NOA wait batch buffer\n"); 2952 goto err_noa_wait_alloc; 2953 } 2954 2955 stream->oa_config = i915_perf_get_oa_config(perf, props->metrics_set); 2956 if (!stream->oa_config) { 2957 drm_dbg(&stream->perf->i915->drm, 2958 "Invalid OA config id=%i\n", props->metrics_set); 2959 ret = -EINVAL; 2960 goto err_config; 2961 } 2962 2963 /* PRM - observability performance counters: 2964 * 2965 * OACONTROL, performance counter enable, note: 2966 * 2967 * "When this bit is set, in order to have coherent counts, 2968 * RC6 power state and trunk clock gating must be disabled. 2969 * This can be achieved by programming MMIO registers as 2970 * 0xA094=0 and 0xA090[31]=1" 2971 * 2972 * In our case we are expecting that taking pm + FORCEWAKE 2973 * references will effectively disable RC6. 2974 */ 2975 intel_engine_pm_get(stream->engine); 2976 intel_uncore_forcewake_get(stream->uncore, FORCEWAKE_ALL); 2977 2978 ret = alloc_oa_buffer(stream); 2979 if (ret) 2980 goto err_oa_buf_alloc; 2981 2982 stream->ops = &i915_oa_stream_ops; 2983 2984 perf->sseu = props->sseu; 2985 WRITE_ONCE(perf->exclusive_stream, stream); 2986 2987 ret = i915_perf_stream_enable_sync(stream); 2988 if (ret) { 2989 drm_dbg(&stream->perf->i915->drm, 2990 "Unable to enable metric set\n"); 2991 goto err_enable; 2992 } 2993 2994 drm_dbg(&stream->perf->i915->drm, 2995 "opening stream oa config uuid=%s\n", 2996 stream->oa_config->uuid); 2997 2998 hrtimer_init(&stream->poll_check_timer, 2999 CLOCK_MONOTONIC, HRTIMER_MODE_REL); 3000 stream->poll_check_timer.function = oa_poll_check_timer_cb; 3001 init_waitqueue_head(&stream->poll_wq); 3002 spin_lock_init(&stream->oa_buffer.ptr_lock); 3003 3004 return 0; 3005 3006 err_enable: 3007 WRITE_ONCE(perf->exclusive_stream, NULL); 3008 perf->ops.disable_metric_set(stream); 3009 3010 free_oa_buffer(stream); 3011 3012 err_oa_buf_alloc: 3013 free_oa_configs(stream); 3014 3015 intel_uncore_forcewake_put(stream->uncore, FORCEWAKE_ALL); 3016 intel_engine_pm_put(stream->engine); 3017 3018 err_config: 3019 free_noa_wait(stream); 3020 3021 err_noa_wait_alloc: 3022 if (stream->ctx) 3023 oa_put_render_ctx_id(stream); 3024 3025 return ret; 3026 } 3027 3028 void i915_oa_init_reg_state(const struct intel_context *ce, 3029 const struct intel_engine_cs *engine) 3030 { 3031 struct i915_perf_stream *stream; 3032 3033 if (engine->class != RENDER_CLASS) 3034 return; 3035 3036 /* perf.exclusive_stream serialised by lrc_configure_all_contexts() */ 3037 stream = READ_ONCE(engine->i915->perf.exclusive_stream); 3038 if (stream && GRAPHICS_VER(stream->perf->i915) < 12) 3039 gen8_update_reg_state_unlocked(ce, stream); 3040 } 3041 3042 /** 3043 * i915_perf_read - handles read() FOP for i915 perf stream FDs 3044 * @file: An i915 perf stream file 3045 * @buf: destination buffer given by userspace 3046 * @count: the number of bytes userspace wants to read 3047 * @ppos: (inout) file seek position (unused) 3048 * 3049 * The entry point for handling a read() on a stream file descriptor from 3050 * userspace. Most of the work is left to the i915_perf_read_locked() and 3051 * &i915_perf_stream_ops->read but to save having stream implementations (of 3052 * which we might have multiple later) we handle blocking read here. 3053 * 3054 * We can also consistently treat trying to read from a disabled stream 3055 * as an IO error so implementations can assume the stream is enabled 3056 * while reading. 3057 * 3058 * Returns: The number of bytes copied or a negative error code on failure. 3059 */ 3060 static ssize_t i915_perf_read(struct file *file, 3061 char __user *buf, 3062 size_t count, 3063 loff_t *ppos) 3064 { 3065 struct i915_perf_stream *stream = file->private_data; 3066 struct i915_perf *perf = stream->perf; 3067 size_t offset = 0; 3068 int ret; 3069 3070 /* To ensure it's handled consistently we simply treat all reads of a 3071 * disabled stream as an error. In particular it might otherwise lead 3072 * to a deadlock for blocking file descriptors... 3073 */ 3074 if (!stream->enabled || !(stream->sample_flags & SAMPLE_OA_REPORT)) 3075 return -EIO; 3076 3077 if (!(file->f_flags & O_NONBLOCK)) { 3078 /* There's the small chance of false positives from 3079 * stream->ops->wait_unlocked. 3080 * 3081 * E.g. with single context filtering since we only wait until 3082 * oabuffer has >= 1 report we don't immediately know whether 3083 * any reports really belong to the current context 3084 */ 3085 do { 3086 ret = stream->ops->wait_unlocked(stream); 3087 if (ret) 3088 return ret; 3089 3090 mutex_lock(&perf->lock); 3091 ret = stream->ops->read(stream, buf, count, &offset); 3092 mutex_unlock(&perf->lock); 3093 } while (!offset && !ret); 3094 } else { 3095 mutex_lock(&perf->lock); 3096 ret = stream->ops->read(stream, buf, count, &offset); 3097 mutex_unlock(&perf->lock); 3098 } 3099 3100 /* We allow the poll checking to sometimes report false positive EPOLLIN 3101 * events where we might actually report EAGAIN on read() if there's 3102 * not really any data available. In this situation though we don't 3103 * want to enter a busy loop between poll() reporting a EPOLLIN event 3104 * and read() returning -EAGAIN. Clearing the oa.pollin state here 3105 * effectively ensures we back off until the next hrtimer callback 3106 * before reporting another EPOLLIN event. 3107 * The exception to this is if ops->read() returned -ENOSPC which means 3108 * that more OA data is available than could fit in the user provided 3109 * buffer. In this case we want the next poll() call to not block. 3110 */ 3111 if (ret != -ENOSPC) 3112 stream->pollin = false; 3113 3114 /* Possible values for ret are 0, -EFAULT, -ENOSPC, -EIO, ... */ 3115 return offset ?: (ret ?: -EAGAIN); 3116 } 3117 3118 static enum hrtimer_restart oa_poll_check_timer_cb(struct hrtimer *hrtimer) 3119 { 3120 struct i915_perf_stream *stream = 3121 container_of(hrtimer, typeof(*stream), poll_check_timer); 3122 3123 if (oa_buffer_check_unlocked(stream)) { 3124 stream->pollin = true; 3125 wake_up(&stream->poll_wq); 3126 } 3127 3128 hrtimer_forward_now(hrtimer, 3129 ns_to_ktime(stream->poll_oa_period)); 3130 3131 return HRTIMER_RESTART; 3132 } 3133 3134 /** 3135 * i915_perf_poll_locked - poll_wait() with a suitable wait queue for stream 3136 * @stream: An i915 perf stream 3137 * @file: An i915 perf stream file 3138 * @wait: poll() state table 3139 * 3140 * For handling userspace polling on an i915 perf stream, this calls through to 3141 * &i915_perf_stream_ops->poll_wait to call poll_wait() with a wait queue that 3142 * will be woken for new stream data. 3143 * 3144 * Note: The &perf->lock mutex has been taken to serialize 3145 * with any non-file-operation driver hooks. 3146 * 3147 * Returns: any poll events that are ready without sleeping 3148 */ 3149 static __poll_t i915_perf_poll_locked(struct i915_perf_stream *stream, 3150 struct file *file, 3151 poll_table *wait) 3152 { 3153 __poll_t events = 0; 3154 3155 stream->ops->poll_wait(stream, file, wait); 3156 3157 /* Note: we don't explicitly check whether there's something to read 3158 * here since this path may be very hot depending on what else 3159 * userspace is polling, or on the timeout in use. We rely solely on 3160 * the hrtimer/oa_poll_check_timer_cb to notify us when there are 3161 * samples to read. 3162 */ 3163 if (stream->pollin) 3164 events |= EPOLLIN; 3165 3166 return events; 3167 } 3168 3169 /** 3170 * i915_perf_poll - call poll_wait() with a suitable wait queue for stream 3171 * @file: An i915 perf stream file 3172 * @wait: poll() state table 3173 * 3174 * For handling userspace polling on an i915 perf stream, this ensures 3175 * poll_wait() gets called with a wait queue that will be woken for new stream 3176 * data. 3177 * 3178 * Note: Implementation deferred to i915_perf_poll_locked() 3179 * 3180 * Returns: any poll events that are ready without sleeping 3181 */ 3182 static __poll_t i915_perf_poll(struct file *file, poll_table *wait) 3183 { 3184 struct i915_perf_stream *stream = file->private_data; 3185 struct i915_perf *perf = stream->perf; 3186 __poll_t ret; 3187 3188 mutex_lock(&perf->lock); 3189 ret = i915_perf_poll_locked(stream, file, wait); 3190 mutex_unlock(&perf->lock); 3191 3192 return ret; 3193 } 3194 3195 /** 3196 * i915_perf_enable_locked - handle `I915_PERF_IOCTL_ENABLE` ioctl 3197 * @stream: A disabled i915 perf stream 3198 * 3199 * [Re]enables the associated capture of data for this stream. 3200 * 3201 * If a stream was previously enabled then there's currently no intention 3202 * to provide userspace any guarantee about the preservation of previously 3203 * buffered data. 3204 */ 3205 static void i915_perf_enable_locked(struct i915_perf_stream *stream) 3206 { 3207 if (stream->enabled) 3208 return; 3209 3210 /* Allow stream->ops->enable() to refer to this */ 3211 stream->enabled = true; 3212 3213 if (stream->ops->enable) 3214 stream->ops->enable(stream); 3215 3216 if (stream->hold_preemption) 3217 intel_context_set_nopreempt(stream->pinned_ctx); 3218 } 3219 3220 /** 3221 * i915_perf_disable_locked - handle `I915_PERF_IOCTL_DISABLE` ioctl 3222 * @stream: An enabled i915 perf stream 3223 * 3224 * Disables the associated capture of data for this stream. 3225 * 3226 * The intention is that disabling an re-enabling a stream will ideally be 3227 * cheaper than destroying and re-opening a stream with the same configuration, 3228 * though there are no formal guarantees about what state or buffered data 3229 * must be retained between disabling and re-enabling a stream. 3230 * 3231 * Note: while a stream is disabled it's considered an error for userspace 3232 * to attempt to read from the stream (-EIO). 3233 */ 3234 static void i915_perf_disable_locked(struct i915_perf_stream *stream) 3235 { 3236 if (!stream->enabled) 3237 return; 3238 3239 /* Allow stream->ops->disable() to refer to this */ 3240 stream->enabled = false; 3241 3242 if (stream->hold_preemption) 3243 intel_context_clear_nopreempt(stream->pinned_ctx); 3244 3245 if (stream->ops->disable) 3246 stream->ops->disable(stream); 3247 } 3248 3249 static long i915_perf_config_locked(struct i915_perf_stream *stream, 3250 unsigned long metrics_set) 3251 { 3252 struct i915_oa_config *config; 3253 long ret = stream->oa_config->id; 3254 3255 config = i915_perf_get_oa_config(stream->perf, metrics_set); 3256 if (!config) 3257 return -EINVAL; 3258 3259 if (config != stream->oa_config) { 3260 int err; 3261 3262 /* 3263 * If OA is bound to a specific context, emit the 3264 * reconfiguration inline from that context. The update 3265 * will then be ordered with respect to submission on that 3266 * context. 3267 * 3268 * When set globally, we use a low priority kernel context, 3269 * so it will effectively take effect when idle. 3270 */ 3271 err = emit_oa_config(stream, config, oa_context(stream), NULL); 3272 if (!err) 3273 config = xchg(&stream->oa_config, config); 3274 else 3275 ret = err; 3276 } 3277 3278 i915_oa_config_put(config); 3279 3280 return ret; 3281 } 3282 3283 /** 3284 * i915_perf_ioctl_locked - support ioctl() usage with i915 perf stream FDs 3285 * @stream: An i915 perf stream 3286 * @cmd: the ioctl request 3287 * @arg: the ioctl data 3288 * 3289 * Note: The &perf->lock mutex has been taken to serialize 3290 * with any non-file-operation driver hooks. 3291 * 3292 * Returns: zero on success or a negative error code. Returns -EINVAL for 3293 * an unknown ioctl request. 3294 */ 3295 static long i915_perf_ioctl_locked(struct i915_perf_stream *stream, 3296 unsigned int cmd, 3297 unsigned long arg) 3298 { 3299 switch (cmd) { 3300 case I915_PERF_IOCTL_ENABLE: 3301 i915_perf_enable_locked(stream); 3302 return 0; 3303 case I915_PERF_IOCTL_DISABLE: 3304 i915_perf_disable_locked(stream); 3305 return 0; 3306 case I915_PERF_IOCTL_CONFIG: 3307 return i915_perf_config_locked(stream, arg); 3308 } 3309 3310 return -EINVAL; 3311 } 3312 3313 /** 3314 * i915_perf_ioctl - support ioctl() usage with i915 perf stream FDs 3315 * @file: An i915 perf stream file 3316 * @cmd: the ioctl request 3317 * @arg: the ioctl data 3318 * 3319 * Implementation deferred to i915_perf_ioctl_locked(). 3320 * 3321 * Returns: zero on success or a negative error code. Returns -EINVAL for 3322 * an unknown ioctl request. 3323 */ 3324 static long i915_perf_ioctl(struct file *file, 3325 unsigned int cmd, 3326 unsigned long arg) 3327 { 3328 struct i915_perf_stream *stream = file->private_data; 3329 struct i915_perf *perf = stream->perf; 3330 long ret; 3331 3332 mutex_lock(&perf->lock); 3333 ret = i915_perf_ioctl_locked(stream, cmd, arg); 3334 mutex_unlock(&perf->lock); 3335 3336 return ret; 3337 } 3338 3339 /** 3340 * i915_perf_destroy_locked - destroy an i915 perf stream 3341 * @stream: An i915 perf stream 3342 * 3343 * Frees all resources associated with the given i915 perf @stream, disabling 3344 * any associated data capture in the process. 3345 * 3346 * Note: The &perf->lock mutex has been taken to serialize 3347 * with any non-file-operation driver hooks. 3348 */ 3349 static void i915_perf_destroy_locked(struct i915_perf_stream *stream) 3350 { 3351 if (stream->enabled) 3352 i915_perf_disable_locked(stream); 3353 3354 if (stream->ops->destroy) 3355 stream->ops->destroy(stream); 3356 3357 if (stream->ctx) 3358 i915_gem_context_put(stream->ctx); 3359 3360 kfree(stream); 3361 } 3362 3363 /** 3364 * i915_perf_release - handles userspace close() of a stream file 3365 * @inode: anonymous inode associated with file 3366 * @file: An i915 perf stream file 3367 * 3368 * Cleans up any resources associated with an open i915 perf stream file. 3369 * 3370 * NB: close() can't really fail from the userspace point of view. 3371 * 3372 * Returns: zero on success or a negative error code. 3373 */ 3374 static int i915_perf_release(struct inode *inode, struct file *file) 3375 { 3376 struct i915_perf_stream *stream = file->private_data; 3377 struct i915_perf *perf = stream->perf; 3378 3379 mutex_lock(&perf->lock); 3380 i915_perf_destroy_locked(stream); 3381 mutex_unlock(&perf->lock); 3382 3383 /* Release the reference the perf stream kept on the driver. */ 3384 drm_dev_put(&perf->i915->drm); 3385 3386 return 0; 3387 } 3388 3389 3390 static const struct file_operations fops = { 3391 .owner = THIS_MODULE, 3392 .llseek = no_llseek, 3393 .release = i915_perf_release, 3394 .poll = i915_perf_poll, 3395 .read = i915_perf_read, 3396 .unlocked_ioctl = i915_perf_ioctl, 3397 /* Our ioctl have no arguments, so it's safe to use the same function 3398 * to handle 32bits compatibility. 3399 */ 3400 .compat_ioctl = i915_perf_ioctl, 3401 }; 3402 3403 3404 /** 3405 * i915_perf_open_ioctl_locked - DRM ioctl() for userspace to open a stream FD 3406 * @perf: i915 perf instance 3407 * @param: The open parameters passed to 'DRM_I915_PERF_OPEN` 3408 * @props: individually validated u64 property value pairs 3409 * @file: drm file 3410 * 3411 * See i915_perf_ioctl_open() for interface details. 3412 * 3413 * Implements further stream config validation and stream initialization on 3414 * behalf of i915_perf_open_ioctl() with the &perf->lock mutex 3415 * taken to serialize with any non-file-operation driver hooks. 3416 * 3417 * Note: at this point the @props have only been validated in isolation and 3418 * it's still necessary to validate that the combination of properties makes 3419 * sense. 3420 * 3421 * In the case where userspace is interested in OA unit metrics then further 3422 * config validation and stream initialization details will be handled by 3423 * i915_oa_stream_init(). The code here should only validate config state that 3424 * will be relevant to all stream types / backends. 3425 * 3426 * Returns: zero on success or a negative error code. 3427 */ 3428 static int 3429 i915_perf_open_ioctl_locked(struct i915_perf *perf, 3430 struct drm_i915_perf_open_param *param, 3431 struct perf_open_properties *props, 3432 struct drm_file *file) 3433 { 3434 struct i915_gem_context *specific_ctx = NULL; 3435 struct i915_perf_stream *stream = NULL; 3436 unsigned long f_flags = 0; 3437 bool privileged_op = true; 3438 int stream_fd; 3439 int ret; 3440 3441 if (props->single_context) { 3442 u32 ctx_handle = props->ctx_handle; 3443 struct drm_i915_file_private *file_priv = file->driver_priv; 3444 3445 specific_ctx = i915_gem_context_lookup(file_priv, ctx_handle); 3446 if (IS_ERR(specific_ctx)) { 3447 drm_dbg(&perf->i915->drm, 3448 "Failed to look up context with ID %u for opening perf stream\n", 3449 ctx_handle); 3450 ret = PTR_ERR(specific_ctx); 3451 goto err; 3452 } 3453 } 3454 3455 /* 3456 * On Haswell the OA unit supports clock gating off for a specific 3457 * context and in this mode there's no visibility of metrics for the 3458 * rest of the system, which we consider acceptable for a 3459 * non-privileged client. 3460 * 3461 * For Gen8->11 the OA unit no longer supports clock gating off for a 3462 * specific context and the kernel can't securely stop the counters 3463 * from updating as system-wide / global values. Even though we can 3464 * filter reports based on the included context ID we can't block 3465 * clients from seeing the raw / global counter values via 3466 * MI_REPORT_PERF_COUNT commands and so consider it a privileged op to 3467 * enable the OA unit by default. 3468 * 3469 * For Gen12+ we gain a new OAR unit that only monitors the RCS on a 3470 * per context basis. So we can relax requirements there if the user 3471 * doesn't request global stream access (i.e. query based sampling 3472 * using MI_RECORD_PERF_COUNT. 3473 */ 3474 if (IS_HASWELL(perf->i915) && specific_ctx) 3475 privileged_op = false; 3476 else if (GRAPHICS_VER(perf->i915) == 12 && specific_ctx && 3477 (props->sample_flags & SAMPLE_OA_REPORT) == 0) 3478 privileged_op = false; 3479 3480 if (props->hold_preemption) { 3481 if (!props->single_context) { 3482 drm_dbg(&perf->i915->drm, 3483 "preemption disable with no context\n"); 3484 ret = -EINVAL; 3485 goto err; 3486 } 3487 privileged_op = true; 3488 } 3489 3490 /* 3491 * Asking for SSEU configuration is a priviliged operation. 3492 */ 3493 if (props->has_sseu) 3494 privileged_op = true; 3495 else 3496 get_default_sseu_config(&props->sseu, props->engine); 3497 3498 /* Similar to perf's kernel.perf_paranoid_cpu sysctl option 3499 * we check a dev.i915.perf_stream_paranoid sysctl option 3500 * to determine if it's ok to access system wide OA counters 3501 * without CAP_PERFMON or CAP_SYS_ADMIN privileges. 3502 */ 3503 if (privileged_op && 3504 i915_perf_stream_paranoid && !perfmon_capable()) { 3505 drm_dbg(&perf->i915->drm, 3506 "Insufficient privileges to open i915 perf stream\n"); 3507 ret = -EACCES; 3508 goto err_ctx; 3509 } 3510 3511 stream = kzalloc(sizeof(*stream), GFP_KERNEL); 3512 if (!stream) { 3513 ret = -ENOMEM; 3514 goto err_ctx; 3515 } 3516 3517 stream->perf = perf; 3518 stream->ctx = specific_ctx; 3519 stream->poll_oa_period = props->poll_oa_period; 3520 3521 ret = i915_oa_stream_init(stream, param, props); 3522 if (ret) 3523 goto err_alloc; 3524 3525 /* we avoid simply assigning stream->sample_flags = props->sample_flags 3526 * to have _stream_init check the combination of sample flags more 3527 * thoroughly, but still this is the expected result at this point. 3528 */ 3529 if (WARN_ON(stream->sample_flags != props->sample_flags)) { 3530 ret = -ENODEV; 3531 goto err_flags; 3532 } 3533 3534 if (param->flags & I915_PERF_FLAG_FD_CLOEXEC) 3535 f_flags |= O_CLOEXEC; 3536 if (param->flags & I915_PERF_FLAG_FD_NONBLOCK) 3537 f_flags |= O_NONBLOCK; 3538 3539 stream_fd = anon_inode_getfd("[i915_perf]", &fops, stream, f_flags); 3540 if (stream_fd < 0) { 3541 ret = stream_fd; 3542 goto err_flags; 3543 } 3544 3545 if (!(param->flags & I915_PERF_FLAG_DISABLED)) 3546 i915_perf_enable_locked(stream); 3547 3548 /* Take a reference on the driver that will be kept with stream_fd 3549 * until its release. 3550 */ 3551 drm_dev_get(&perf->i915->drm); 3552 3553 return stream_fd; 3554 3555 err_flags: 3556 if (stream->ops->destroy) 3557 stream->ops->destroy(stream); 3558 err_alloc: 3559 kfree(stream); 3560 err_ctx: 3561 if (specific_ctx) 3562 i915_gem_context_put(specific_ctx); 3563 err: 3564 return ret; 3565 } 3566 3567 static u64 oa_exponent_to_ns(struct i915_perf *perf, int exponent) 3568 { 3569 return intel_gt_clock_interval_to_ns(to_gt(perf->i915), 3570 2ULL << exponent); 3571 } 3572 3573 static __always_inline bool 3574 oa_format_valid(struct i915_perf *perf, enum drm_i915_oa_format format) 3575 { 3576 return test_bit(format, perf->format_mask); 3577 } 3578 3579 static __always_inline void 3580 oa_format_add(struct i915_perf *perf, enum drm_i915_oa_format format) 3581 { 3582 __set_bit(format, perf->format_mask); 3583 } 3584 3585 /** 3586 * read_properties_unlocked - validate + copy userspace stream open properties 3587 * @perf: i915 perf instance 3588 * @uprops: The array of u64 key value pairs given by userspace 3589 * @n_props: The number of key value pairs expected in @uprops 3590 * @props: The stream configuration built up while validating properties 3591 * 3592 * Note this function only validates properties in isolation it doesn't 3593 * validate that the combination of properties makes sense or that all 3594 * properties necessary for a particular kind of stream have been set. 3595 * 3596 * Note that there currently aren't any ordering requirements for properties so 3597 * we shouldn't validate or assume anything about ordering here. This doesn't 3598 * rule out defining new properties with ordering requirements in the future. 3599 */ 3600 static int read_properties_unlocked(struct i915_perf *perf, 3601 u64 __user *uprops, 3602 u32 n_props, 3603 struct perf_open_properties *props) 3604 { 3605 u64 __user *uprop = uprops; 3606 u32 i; 3607 int ret; 3608 3609 memset(props, 0, sizeof(struct perf_open_properties)); 3610 props->poll_oa_period = DEFAULT_POLL_PERIOD_NS; 3611 3612 if (!n_props) { 3613 drm_dbg(&perf->i915->drm, 3614 "No i915 perf properties given\n"); 3615 return -EINVAL; 3616 } 3617 3618 /* At the moment we only support using i915-perf on the RCS. */ 3619 props->engine = intel_engine_lookup_user(perf->i915, 3620 I915_ENGINE_CLASS_RENDER, 3621 0); 3622 if (!props->engine) { 3623 drm_dbg(&perf->i915->drm, 3624 "No RENDER-capable engines\n"); 3625 return -EINVAL; 3626 } 3627 3628 /* Considering that ID = 0 is reserved and assuming that we don't 3629 * (currently) expect any configurations to ever specify duplicate 3630 * values for a particular property ID then the last _PROP_MAX value is 3631 * one greater than the maximum number of properties we expect to get 3632 * from userspace. 3633 */ 3634 if (n_props >= DRM_I915_PERF_PROP_MAX) { 3635 drm_dbg(&perf->i915->drm, 3636 "More i915 perf properties specified than exist\n"); 3637 return -EINVAL; 3638 } 3639 3640 for (i = 0; i < n_props; i++) { 3641 u64 oa_period, oa_freq_hz; 3642 u64 id, value; 3643 3644 ret = get_user(id, uprop); 3645 if (ret) 3646 return ret; 3647 3648 ret = get_user(value, uprop + 1); 3649 if (ret) 3650 return ret; 3651 3652 if (id == 0 || id >= DRM_I915_PERF_PROP_MAX) { 3653 drm_dbg(&perf->i915->drm, 3654 "Unknown i915 perf property ID\n"); 3655 return -EINVAL; 3656 } 3657 3658 switch ((enum drm_i915_perf_property_id)id) { 3659 case DRM_I915_PERF_PROP_CTX_HANDLE: 3660 props->single_context = 1; 3661 props->ctx_handle = value; 3662 break; 3663 case DRM_I915_PERF_PROP_SAMPLE_OA: 3664 if (value) 3665 props->sample_flags |= SAMPLE_OA_REPORT; 3666 break; 3667 case DRM_I915_PERF_PROP_OA_METRICS_SET: 3668 if (value == 0) { 3669 drm_dbg(&perf->i915->drm, 3670 "Unknown OA metric set ID\n"); 3671 return -EINVAL; 3672 } 3673 props->metrics_set = value; 3674 break; 3675 case DRM_I915_PERF_PROP_OA_FORMAT: 3676 if (value == 0 || value >= I915_OA_FORMAT_MAX) { 3677 drm_dbg(&perf->i915->drm, 3678 "Out-of-range OA report format %llu\n", 3679 value); 3680 return -EINVAL; 3681 } 3682 if (!oa_format_valid(perf, value)) { 3683 drm_dbg(&perf->i915->drm, 3684 "Unsupported OA report format %llu\n", 3685 value); 3686 return -EINVAL; 3687 } 3688 props->oa_format = value; 3689 break; 3690 case DRM_I915_PERF_PROP_OA_EXPONENT: 3691 if (value > OA_EXPONENT_MAX) { 3692 drm_dbg(&perf->i915->drm, 3693 "OA timer exponent too high (> %u)\n", 3694 OA_EXPONENT_MAX); 3695 return -EINVAL; 3696 } 3697 3698 /* Theoretically we can program the OA unit to sample 3699 * e.g. every 160ns for HSW, 167ns for BDW/SKL or 104ns 3700 * for BXT. We don't allow such high sampling 3701 * frequencies by default unless root. 3702 */ 3703 3704 BUILD_BUG_ON(sizeof(oa_period) != 8); 3705 oa_period = oa_exponent_to_ns(perf, value); 3706 3707 /* This check is primarily to ensure that oa_period <= 3708 * UINT32_MAX (before passing to do_div which only 3709 * accepts a u32 denominator), but we can also skip 3710 * checking anything < 1Hz which implicitly can't be 3711 * limited via an integer oa_max_sample_rate. 3712 */ 3713 if (oa_period <= NSEC_PER_SEC) { 3714 u64 tmp = NSEC_PER_SEC; 3715 do_div(tmp, oa_period); 3716 oa_freq_hz = tmp; 3717 } else 3718 oa_freq_hz = 0; 3719 3720 if (oa_freq_hz > i915_oa_max_sample_rate && !perfmon_capable()) { 3721 drm_dbg(&perf->i915->drm, 3722 "OA exponent would exceed the max sampling frequency (sysctl dev.i915.oa_max_sample_rate) %uHz without CAP_PERFMON or CAP_SYS_ADMIN privileges\n", 3723 i915_oa_max_sample_rate); 3724 return -EACCES; 3725 } 3726 3727 props->oa_periodic = true; 3728 props->oa_period_exponent = value; 3729 break; 3730 case DRM_I915_PERF_PROP_HOLD_PREEMPTION: 3731 props->hold_preemption = !!value; 3732 break; 3733 case DRM_I915_PERF_PROP_GLOBAL_SSEU: { 3734 struct drm_i915_gem_context_param_sseu user_sseu; 3735 3736 if (GRAPHICS_VER_FULL(perf->i915) >= IP_VER(12, 50)) { 3737 drm_dbg(&perf->i915->drm, 3738 "SSEU config not supported on gfx %x\n", 3739 GRAPHICS_VER_FULL(perf->i915)); 3740 return -ENODEV; 3741 } 3742 3743 if (copy_from_user(&user_sseu, 3744 u64_to_user_ptr(value), 3745 sizeof(user_sseu))) { 3746 drm_dbg(&perf->i915->drm, 3747 "Unable to copy global sseu parameter\n"); 3748 return -EFAULT; 3749 } 3750 3751 ret = get_sseu_config(&props->sseu, props->engine, &user_sseu); 3752 if (ret) { 3753 drm_dbg(&perf->i915->drm, 3754 "Invalid SSEU configuration\n"); 3755 return ret; 3756 } 3757 props->has_sseu = true; 3758 break; 3759 } 3760 case DRM_I915_PERF_PROP_POLL_OA_PERIOD: 3761 if (value < 100000 /* 100us */) { 3762 drm_dbg(&perf->i915->drm, 3763 "OA availability timer too small (%lluns < 100us)\n", 3764 value); 3765 return -EINVAL; 3766 } 3767 props->poll_oa_period = value; 3768 break; 3769 case DRM_I915_PERF_PROP_MAX: 3770 MISSING_CASE(id); 3771 return -EINVAL; 3772 } 3773 3774 uprop += 2; 3775 } 3776 3777 return 0; 3778 } 3779 3780 /** 3781 * i915_perf_open_ioctl - DRM ioctl() for userspace to open a stream FD 3782 * @dev: drm device 3783 * @data: ioctl data copied from userspace (unvalidated) 3784 * @file: drm file 3785 * 3786 * Validates the stream open parameters given by userspace including flags 3787 * and an array of u64 key, value pair properties. 3788 * 3789 * Very little is assumed up front about the nature of the stream being 3790 * opened (for instance we don't assume it's for periodic OA unit metrics). An 3791 * i915-perf stream is expected to be a suitable interface for other forms of 3792 * buffered data written by the GPU besides periodic OA metrics. 3793 * 3794 * Note we copy the properties from userspace outside of the i915 perf 3795 * mutex to avoid an awkward lockdep with mmap_lock. 3796 * 3797 * Most of the implementation details are handled by 3798 * i915_perf_open_ioctl_locked() after taking the &perf->lock 3799 * mutex for serializing with any non-file-operation driver hooks. 3800 * 3801 * Return: A newly opened i915 Perf stream file descriptor or negative 3802 * error code on failure. 3803 */ 3804 int i915_perf_open_ioctl(struct drm_device *dev, void *data, 3805 struct drm_file *file) 3806 { 3807 struct i915_perf *perf = &to_i915(dev)->perf; 3808 struct drm_i915_perf_open_param *param = data; 3809 struct perf_open_properties props; 3810 u32 known_open_flags; 3811 int ret; 3812 3813 if (!perf->i915) { 3814 drm_dbg(&perf->i915->drm, 3815 "i915 perf interface not available for this system\n"); 3816 return -ENOTSUPP; 3817 } 3818 3819 known_open_flags = I915_PERF_FLAG_FD_CLOEXEC | 3820 I915_PERF_FLAG_FD_NONBLOCK | 3821 I915_PERF_FLAG_DISABLED; 3822 if (param->flags & ~known_open_flags) { 3823 drm_dbg(&perf->i915->drm, 3824 "Unknown drm_i915_perf_open_param flag\n"); 3825 return -EINVAL; 3826 } 3827 3828 ret = read_properties_unlocked(perf, 3829 u64_to_user_ptr(param->properties_ptr), 3830 param->num_properties, 3831 &props); 3832 if (ret) 3833 return ret; 3834 3835 mutex_lock(&perf->lock); 3836 ret = i915_perf_open_ioctl_locked(perf, param, &props, file); 3837 mutex_unlock(&perf->lock); 3838 3839 return ret; 3840 } 3841 3842 /** 3843 * i915_perf_register - exposes i915-perf to userspace 3844 * @i915: i915 device instance 3845 * 3846 * In particular OA metric sets are advertised under a sysfs metrics/ 3847 * directory allowing userspace to enumerate valid IDs that can be 3848 * used to open an i915-perf stream. 3849 */ 3850 void i915_perf_register(struct drm_i915_private *i915) 3851 { 3852 struct i915_perf *perf = &i915->perf; 3853 3854 if (!perf->i915) 3855 return; 3856 3857 /* To be sure we're synchronized with an attempted 3858 * i915_perf_open_ioctl(); considering that we register after 3859 * being exposed to userspace. 3860 */ 3861 mutex_lock(&perf->lock); 3862 3863 perf->metrics_kobj = 3864 kobject_create_and_add("metrics", 3865 &i915->drm.primary->kdev->kobj); 3866 3867 mutex_unlock(&perf->lock); 3868 } 3869 3870 /** 3871 * i915_perf_unregister - hide i915-perf from userspace 3872 * @i915: i915 device instance 3873 * 3874 * i915-perf state cleanup is split up into an 'unregister' and 3875 * 'deinit' phase where the interface is first hidden from 3876 * userspace by i915_perf_unregister() before cleaning up 3877 * remaining state in i915_perf_fini(). 3878 */ 3879 void i915_perf_unregister(struct drm_i915_private *i915) 3880 { 3881 struct i915_perf *perf = &i915->perf; 3882 3883 if (!perf->metrics_kobj) 3884 return; 3885 3886 kobject_put(perf->metrics_kobj); 3887 perf->metrics_kobj = NULL; 3888 } 3889 3890 static bool gen8_is_valid_flex_addr(struct i915_perf *perf, u32 addr) 3891 { 3892 static const i915_reg_t flex_eu_regs[] = { 3893 EU_PERF_CNTL0, 3894 EU_PERF_CNTL1, 3895 EU_PERF_CNTL2, 3896 EU_PERF_CNTL3, 3897 EU_PERF_CNTL4, 3898 EU_PERF_CNTL5, 3899 EU_PERF_CNTL6, 3900 }; 3901 int i; 3902 3903 for (i = 0; i < ARRAY_SIZE(flex_eu_regs); i++) { 3904 if (i915_mmio_reg_offset(flex_eu_regs[i]) == addr) 3905 return true; 3906 } 3907 return false; 3908 } 3909 3910 static bool reg_in_range_table(u32 addr, const struct i915_range *table) 3911 { 3912 while (table->start || table->end) { 3913 if (addr >= table->start && addr <= table->end) 3914 return true; 3915 3916 table++; 3917 } 3918 3919 return false; 3920 } 3921 3922 #define REG_EQUAL(addr, mmio) \ 3923 ((addr) == i915_mmio_reg_offset(mmio)) 3924 3925 static const struct i915_range gen7_oa_b_counters[] = { 3926 { .start = 0x2710, .end = 0x272c }, /* OASTARTTRIG[1-8] */ 3927 { .start = 0x2740, .end = 0x275c }, /* OAREPORTTRIG[1-8] */ 3928 { .start = 0x2770, .end = 0x27ac }, /* OACEC[0-7][0-1] */ 3929 {} 3930 }; 3931 3932 static const struct i915_range gen12_oa_b_counters[] = { 3933 { .start = 0x2b2c, .end = 0x2b2c }, /* GEN12_OAG_OA_PESS */ 3934 { .start = 0xd900, .end = 0xd91c }, /* GEN12_OAG_OASTARTTRIG[1-8] */ 3935 { .start = 0xd920, .end = 0xd93c }, /* GEN12_OAG_OAREPORTTRIG1[1-8] */ 3936 { .start = 0xd940, .end = 0xd97c }, /* GEN12_OAG_CEC[0-7][0-1] */ 3937 { .start = 0xdc00, .end = 0xdc3c }, /* GEN12_OAG_SCEC[0-7][0-1] */ 3938 { .start = 0xdc40, .end = 0xdc40 }, /* GEN12_OAG_SPCTR_CNF */ 3939 { .start = 0xdc44, .end = 0xdc44 }, /* GEN12_OAA_DBG_REG */ 3940 {} 3941 }; 3942 3943 static const struct i915_range gen7_oa_mux_regs[] = { 3944 { .start = 0x91b8, .end = 0x91cc }, /* OA_PERFCNT[1-2], OA_PERFMATRIX */ 3945 { .start = 0x9800, .end = 0x9888 }, /* MICRO_BP0_0 - NOA_WRITE */ 3946 { .start = 0xe180, .end = 0xe180 }, /* HALF_SLICE_CHICKEN2 */ 3947 {} 3948 }; 3949 3950 static const struct i915_range hsw_oa_mux_regs[] = { 3951 { .start = 0x09e80, .end = 0x09ea4 }, /* HSW_MBVID2_NOA[0-9] */ 3952 { .start = 0x09ec0, .end = 0x09ec0 }, /* HSW_MBVID2_MISR0 */ 3953 { .start = 0x25100, .end = 0x2ff90 }, 3954 {} 3955 }; 3956 3957 static const struct i915_range chv_oa_mux_regs[] = { 3958 { .start = 0x182300, .end = 0x1823a4 }, 3959 {} 3960 }; 3961 3962 static const struct i915_range gen8_oa_mux_regs[] = { 3963 { .start = 0x0d00, .end = 0x0d2c }, /* RPM_CONFIG[0-1], NOA_CONFIG[0-8] */ 3964 { .start = 0x20cc, .end = 0x20cc }, /* WAIT_FOR_RC6_EXIT */ 3965 {} 3966 }; 3967 3968 static const struct i915_range gen11_oa_mux_regs[] = { 3969 { .start = 0x91c8, .end = 0x91dc }, /* OA_PERFCNT[3-4] */ 3970 {} 3971 }; 3972 3973 static const struct i915_range gen12_oa_mux_regs[] = { 3974 { .start = 0x0d00, .end = 0x0d04 }, /* RPM_CONFIG[0-1] */ 3975 { .start = 0x0d0c, .end = 0x0d2c }, /* NOA_CONFIG[0-8] */ 3976 { .start = 0x9840, .end = 0x9840 }, /* GDT_CHICKEN_BITS */ 3977 { .start = 0x9884, .end = 0x9888 }, /* NOA_WRITE */ 3978 { .start = 0x20cc, .end = 0x20cc }, /* WAIT_FOR_RC6_EXIT */ 3979 {} 3980 }; 3981 3982 static bool gen7_is_valid_b_counter_addr(struct i915_perf *perf, u32 addr) 3983 { 3984 return reg_in_range_table(addr, gen7_oa_b_counters); 3985 } 3986 3987 static bool gen8_is_valid_mux_addr(struct i915_perf *perf, u32 addr) 3988 { 3989 return reg_in_range_table(addr, gen7_oa_mux_regs) || 3990 reg_in_range_table(addr, gen8_oa_mux_regs); 3991 } 3992 3993 static bool gen11_is_valid_mux_addr(struct i915_perf *perf, u32 addr) 3994 { 3995 return reg_in_range_table(addr, gen7_oa_mux_regs) || 3996 reg_in_range_table(addr, gen8_oa_mux_regs) || 3997 reg_in_range_table(addr, gen11_oa_mux_regs); 3998 } 3999 4000 static bool hsw_is_valid_mux_addr(struct i915_perf *perf, u32 addr) 4001 { 4002 return reg_in_range_table(addr, gen7_oa_mux_regs) || 4003 reg_in_range_table(addr, hsw_oa_mux_regs); 4004 } 4005 4006 static bool chv_is_valid_mux_addr(struct i915_perf *perf, u32 addr) 4007 { 4008 return reg_in_range_table(addr, gen7_oa_mux_regs) || 4009 reg_in_range_table(addr, chv_oa_mux_regs); 4010 } 4011 4012 static bool gen12_is_valid_b_counter_addr(struct i915_perf *perf, u32 addr) 4013 { 4014 return reg_in_range_table(addr, gen12_oa_b_counters); 4015 } 4016 4017 static bool gen12_is_valid_mux_addr(struct i915_perf *perf, u32 addr) 4018 { 4019 return reg_in_range_table(addr, gen12_oa_mux_regs); 4020 } 4021 4022 static u32 mask_reg_value(u32 reg, u32 val) 4023 { 4024 /* HALF_SLICE_CHICKEN2 is programmed with a the 4025 * WaDisableSTUnitPowerOptimization workaround. Make sure the value 4026 * programmed by userspace doesn't change this. 4027 */ 4028 if (REG_EQUAL(reg, HALF_SLICE_CHICKEN2)) 4029 val = val & ~_MASKED_BIT_ENABLE(GEN8_ST_PO_DISABLE); 4030 4031 /* WAIT_FOR_RC6_EXIT has only one bit fullfilling the function 4032 * indicated by its name and a bunch of selection fields used by OA 4033 * configs. 4034 */ 4035 if (REG_EQUAL(reg, WAIT_FOR_RC6_EXIT)) 4036 val = val & ~_MASKED_BIT_ENABLE(HSW_WAIT_FOR_RC6_EXIT_ENABLE); 4037 4038 return val; 4039 } 4040 4041 static struct i915_oa_reg *alloc_oa_regs(struct i915_perf *perf, 4042 bool (*is_valid)(struct i915_perf *perf, u32 addr), 4043 u32 __user *regs, 4044 u32 n_regs) 4045 { 4046 struct i915_oa_reg *oa_regs; 4047 int err; 4048 u32 i; 4049 4050 if (!n_regs) 4051 return NULL; 4052 4053 /* No is_valid function means we're not allowing any register to be programmed. */ 4054 GEM_BUG_ON(!is_valid); 4055 if (!is_valid) 4056 return ERR_PTR(-EINVAL); 4057 4058 oa_regs = kmalloc_array(n_regs, sizeof(*oa_regs), GFP_KERNEL); 4059 if (!oa_regs) 4060 return ERR_PTR(-ENOMEM); 4061 4062 for (i = 0; i < n_regs; i++) { 4063 u32 addr, value; 4064 4065 err = get_user(addr, regs); 4066 if (err) 4067 goto addr_err; 4068 4069 if (!is_valid(perf, addr)) { 4070 drm_dbg(&perf->i915->drm, 4071 "Invalid oa_reg address: %X\n", addr); 4072 err = -EINVAL; 4073 goto addr_err; 4074 } 4075 4076 err = get_user(value, regs + 1); 4077 if (err) 4078 goto addr_err; 4079 4080 oa_regs[i].addr = _MMIO(addr); 4081 oa_regs[i].value = mask_reg_value(addr, value); 4082 4083 regs += 2; 4084 } 4085 4086 return oa_regs; 4087 4088 addr_err: 4089 kfree(oa_regs); 4090 return ERR_PTR(err); 4091 } 4092 4093 static ssize_t show_dynamic_id(struct kobject *kobj, 4094 struct kobj_attribute *attr, 4095 char *buf) 4096 { 4097 struct i915_oa_config *oa_config = 4098 container_of(attr, typeof(*oa_config), sysfs_metric_id); 4099 4100 return sprintf(buf, "%d\n", oa_config->id); 4101 } 4102 4103 static int create_dynamic_oa_sysfs_entry(struct i915_perf *perf, 4104 struct i915_oa_config *oa_config) 4105 { 4106 sysfs_attr_init(&oa_config->sysfs_metric_id.attr); 4107 oa_config->sysfs_metric_id.attr.name = "id"; 4108 oa_config->sysfs_metric_id.attr.mode = S_IRUGO; 4109 oa_config->sysfs_metric_id.show = show_dynamic_id; 4110 oa_config->sysfs_metric_id.store = NULL; 4111 4112 oa_config->attrs[0] = &oa_config->sysfs_metric_id.attr; 4113 oa_config->attrs[1] = NULL; 4114 4115 oa_config->sysfs_metric.name = oa_config->uuid; 4116 oa_config->sysfs_metric.attrs = oa_config->attrs; 4117 4118 return sysfs_create_group(perf->metrics_kobj, 4119 &oa_config->sysfs_metric); 4120 } 4121 4122 /** 4123 * i915_perf_add_config_ioctl - DRM ioctl() for userspace to add a new OA config 4124 * @dev: drm device 4125 * @data: ioctl data (pointer to struct drm_i915_perf_oa_config) copied from 4126 * userspace (unvalidated) 4127 * @file: drm file 4128 * 4129 * Validates the submitted OA register to be saved into a new OA config that 4130 * can then be used for programming the OA unit and its NOA network. 4131 * 4132 * Returns: A new allocated config number to be used with the perf open ioctl 4133 * or a negative error code on failure. 4134 */ 4135 int i915_perf_add_config_ioctl(struct drm_device *dev, void *data, 4136 struct drm_file *file) 4137 { 4138 struct i915_perf *perf = &to_i915(dev)->perf; 4139 struct drm_i915_perf_oa_config *args = data; 4140 struct i915_oa_config *oa_config, *tmp; 4141 struct i915_oa_reg *regs; 4142 int err, id; 4143 4144 if (!perf->i915) { 4145 drm_dbg(&perf->i915->drm, 4146 "i915 perf interface not available for this system\n"); 4147 return -ENOTSUPP; 4148 } 4149 4150 if (!perf->metrics_kobj) { 4151 drm_dbg(&perf->i915->drm, 4152 "OA metrics weren't advertised via sysfs\n"); 4153 return -EINVAL; 4154 } 4155 4156 if (i915_perf_stream_paranoid && !perfmon_capable()) { 4157 drm_dbg(&perf->i915->drm, 4158 "Insufficient privileges to add i915 OA config\n"); 4159 return -EACCES; 4160 } 4161 4162 if ((!args->mux_regs_ptr || !args->n_mux_regs) && 4163 (!args->boolean_regs_ptr || !args->n_boolean_regs) && 4164 (!args->flex_regs_ptr || !args->n_flex_regs)) { 4165 drm_dbg(&perf->i915->drm, 4166 "No OA registers given\n"); 4167 return -EINVAL; 4168 } 4169 4170 oa_config = kzalloc(sizeof(*oa_config), GFP_KERNEL); 4171 if (!oa_config) { 4172 drm_dbg(&perf->i915->drm, 4173 "Failed to allocate memory for the OA config\n"); 4174 return -ENOMEM; 4175 } 4176 4177 oa_config->perf = perf; 4178 kref_init(&oa_config->ref); 4179 4180 if (!uuid_is_valid(args->uuid)) { 4181 drm_dbg(&perf->i915->drm, 4182 "Invalid uuid format for OA config\n"); 4183 err = -EINVAL; 4184 goto reg_err; 4185 } 4186 4187 /* Last character in oa_config->uuid will be 0 because oa_config is 4188 * kzalloc. 4189 */ 4190 memcpy(oa_config->uuid, args->uuid, sizeof(args->uuid)); 4191 4192 oa_config->mux_regs_len = args->n_mux_regs; 4193 regs = alloc_oa_regs(perf, 4194 perf->ops.is_valid_mux_reg, 4195 u64_to_user_ptr(args->mux_regs_ptr), 4196 args->n_mux_regs); 4197 4198 if (IS_ERR(regs)) { 4199 drm_dbg(&perf->i915->drm, 4200 "Failed to create OA config for mux_regs\n"); 4201 err = PTR_ERR(regs); 4202 goto reg_err; 4203 } 4204 oa_config->mux_regs = regs; 4205 4206 oa_config->b_counter_regs_len = args->n_boolean_regs; 4207 regs = alloc_oa_regs(perf, 4208 perf->ops.is_valid_b_counter_reg, 4209 u64_to_user_ptr(args->boolean_regs_ptr), 4210 args->n_boolean_regs); 4211 4212 if (IS_ERR(regs)) { 4213 drm_dbg(&perf->i915->drm, 4214 "Failed to create OA config for b_counter_regs\n"); 4215 err = PTR_ERR(regs); 4216 goto reg_err; 4217 } 4218 oa_config->b_counter_regs = regs; 4219 4220 if (GRAPHICS_VER(perf->i915) < 8) { 4221 if (args->n_flex_regs != 0) { 4222 err = -EINVAL; 4223 goto reg_err; 4224 } 4225 } else { 4226 oa_config->flex_regs_len = args->n_flex_regs; 4227 regs = alloc_oa_regs(perf, 4228 perf->ops.is_valid_flex_reg, 4229 u64_to_user_ptr(args->flex_regs_ptr), 4230 args->n_flex_regs); 4231 4232 if (IS_ERR(regs)) { 4233 drm_dbg(&perf->i915->drm, 4234 "Failed to create OA config for flex_regs\n"); 4235 err = PTR_ERR(regs); 4236 goto reg_err; 4237 } 4238 oa_config->flex_regs = regs; 4239 } 4240 4241 err = mutex_lock_interruptible(&perf->metrics_lock); 4242 if (err) 4243 goto reg_err; 4244 4245 /* We shouldn't have too many configs, so this iteration shouldn't be 4246 * too costly. 4247 */ 4248 idr_for_each_entry(&perf->metrics_idr, tmp, id) { 4249 if (!strcmp(tmp->uuid, oa_config->uuid)) { 4250 drm_dbg(&perf->i915->drm, 4251 "OA config already exists with this uuid\n"); 4252 err = -EADDRINUSE; 4253 goto sysfs_err; 4254 } 4255 } 4256 4257 err = create_dynamic_oa_sysfs_entry(perf, oa_config); 4258 if (err) { 4259 drm_dbg(&perf->i915->drm, 4260 "Failed to create sysfs entry for OA config\n"); 4261 goto sysfs_err; 4262 } 4263 4264 /* Config id 0 is invalid, id 1 for kernel stored test config. */ 4265 oa_config->id = idr_alloc(&perf->metrics_idr, 4266 oa_config, 2, 4267 0, GFP_KERNEL); 4268 if (oa_config->id < 0) { 4269 drm_dbg(&perf->i915->drm, 4270 "Failed to create sysfs entry for OA config\n"); 4271 err = oa_config->id; 4272 goto sysfs_err; 4273 } 4274 4275 mutex_unlock(&perf->metrics_lock); 4276 4277 drm_dbg(&perf->i915->drm, 4278 "Added config %s id=%i\n", oa_config->uuid, oa_config->id); 4279 4280 return oa_config->id; 4281 4282 sysfs_err: 4283 mutex_unlock(&perf->metrics_lock); 4284 reg_err: 4285 i915_oa_config_put(oa_config); 4286 drm_dbg(&perf->i915->drm, 4287 "Failed to add new OA config\n"); 4288 return err; 4289 } 4290 4291 /** 4292 * i915_perf_remove_config_ioctl - DRM ioctl() for userspace to remove an OA config 4293 * @dev: drm device 4294 * @data: ioctl data (pointer to u64 integer) copied from userspace 4295 * @file: drm file 4296 * 4297 * Configs can be removed while being used, the will stop appearing in sysfs 4298 * and their content will be freed when the stream using the config is closed. 4299 * 4300 * Returns: 0 on success or a negative error code on failure. 4301 */ 4302 int i915_perf_remove_config_ioctl(struct drm_device *dev, void *data, 4303 struct drm_file *file) 4304 { 4305 struct i915_perf *perf = &to_i915(dev)->perf; 4306 u64 *arg = data; 4307 struct i915_oa_config *oa_config; 4308 int ret; 4309 4310 if (!perf->i915) { 4311 drm_dbg(&perf->i915->drm, 4312 "i915 perf interface not available for this system\n"); 4313 return -ENOTSUPP; 4314 } 4315 4316 if (i915_perf_stream_paranoid && !perfmon_capable()) { 4317 drm_dbg(&perf->i915->drm, 4318 "Insufficient privileges to remove i915 OA config\n"); 4319 return -EACCES; 4320 } 4321 4322 ret = mutex_lock_interruptible(&perf->metrics_lock); 4323 if (ret) 4324 return ret; 4325 4326 oa_config = idr_find(&perf->metrics_idr, *arg); 4327 if (!oa_config) { 4328 drm_dbg(&perf->i915->drm, 4329 "Failed to remove unknown OA config\n"); 4330 ret = -ENOENT; 4331 goto err_unlock; 4332 } 4333 4334 GEM_BUG_ON(*arg != oa_config->id); 4335 4336 sysfs_remove_group(perf->metrics_kobj, &oa_config->sysfs_metric); 4337 4338 idr_remove(&perf->metrics_idr, *arg); 4339 4340 mutex_unlock(&perf->metrics_lock); 4341 4342 drm_dbg(&perf->i915->drm, 4343 "Removed config %s id=%i\n", oa_config->uuid, oa_config->id); 4344 4345 i915_oa_config_put(oa_config); 4346 4347 return 0; 4348 4349 err_unlock: 4350 mutex_unlock(&perf->metrics_lock); 4351 return ret; 4352 } 4353 4354 static struct ctl_table oa_table[] = { 4355 { 4356 .procname = "perf_stream_paranoid", 4357 .data = &i915_perf_stream_paranoid, 4358 .maxlen = sizeof(i915_perf_stream_paranoid), 4359 .mode = 0644, 4360 .proc_handler = proc_dointvec_minmax, 4361 .extra1 = SYSCTL_ZERO, 4362 .extra2 = SYSCTL_ONE, 4363 }, 4364 { 4365 .procname = "oa_max_sample_rate", 4366 .data = &i915_oa_max_sample_rate, 4367 .maxlen = sizeof(i915_oa_max_sample_rate), 4368 .mode = 0644, 4369 .proc_handler = proc_dointvec_minmax, 4370 .extra1 = SYSCTL_ZERO, 4371 .extra2 = &oa_sample_rate_hard_limit, 4372 }, 4373 {} 4374 }; 4375 4376 static void oa_init_supported_formats(struct i915_perf *perf) 4377 { 4378 struct drm_i915_private *i915 = perf->i915; 4379 enum intel_platform platform = INTEL_INFO(i915)->platform; 4380 4381 switch (platform) { 4382 case INTEL_HASWELL: 4383 oa_format_add(perf, I915_OA_FORMAT_A13); 4384 oa_format_add(perf, I915_OA_FORMAT_A13); 4385 oa_format_add(perf, I915_OA_FORMAT_A29); 4386 oa_format_add(perf, I915_OA_FORMAT_A13_B8_C8); 4387 oa_format_add(perf, I915_OA_FORMAT_B4_C8); 4388 oa_format_add(perf, I915_OA_FORMAT_A45_B8_C8); 4389 oa_format_add(perf, I915_OA_FORMAT_B4_C8_A16); 4390 oa_format_add(perf, I915_OA_FORMAT_C4_B8); 4391 break; 4392 4393 case INTEL_BROADWELL: 4394 case INTEL_CHERRYVIEW: 4395 case INTEL_SKYLAKE: 4396 case INTEL_BROXTON: 4397 case INTEL_KABYLAKE: 4398 case INTEL_GEMINILAKE: 4399 case INTEL_COFFEELAKE: 4400 case INTEL_COMETLAKE: 4401 case INTEL_ICELAKE: 4402 case INTEL_ELKHARTLAKE: 4403 case INTEL_JASPERLAKE: 4404 case INTEL_TIGERLAKE: 4405 case INTEL_ROCKETLAKE: 4406 case INTEL_DG1: 4407 case INTEL_ALDERLAKE_S: 4408 case INTEL_ALDERLAKE_P: 4409 oa_format_add(perf, I915_OA_FORMAT_A12); 4410 oa_format_add(perf, I915_OA_FORMAT_A12_B8_C8); 4411 oa_format_add(perf, I915_OA_FORMAT_A32u40_A4u32_B8_C8); 4412 oa_format_add(perf, I915_OA_FORMAT_C4_B8); 4413 break; 4414 4415 default: 4416 MISSING_CASE(platform); 4417 } 4418 } 4419 4420 /** 4421 * i915_perf_init - initialize i915-perf state on module bind 4422 * @i915: i915 device instance 4423 * 4424 * Initializes i915-perf state without exposing anything to userspace. 4425 * 4426 * Note: i915-perf initialization is split into an 'init' and 'register' 4427 * phase with the i915_perf_register() exposing state to userspace. 4428 */ 4429 void i915_perf_init(struct drm_i915_private *i915) 4430 { 4431 struct i915_perf *perf = &i915->perf; 4432 4433 /* XXX const struct i915_perf_ops! */ 4434 4435 /* i915_perf is not enabled for DG2 yet */ 4436 if (IS_DG2(i915)) 4437 return; 4438 4439 perf->oa_formats = oa_formats; 4440 if (IS_HASWELL(i915)) { 4441 perf->ops.is_valid_b_counter_reg = gen7_is_valid_b_counter_addr; 4442 perf->ops.is_valid_mux_reg = hsw_is_valid_mux_addr; 4443 perf->ops.is_valid_flex_reg = NULL; 4444 perf->ops.enable_metric_set = hsw_enable_metric_set; 4445 perf->ops.disable_metric_set = hsw_disable_metric_set; 4446 perf->ops.oa_enable = gen7_oa_enable; 4447 perf->ops.oa_disable = gen7_oa_disable; 4448 perf->ops.read = gen7_oa_read; 4449 perf->ops.oa_hw_tail_read = gen7_oa_hw_tail_read; 4450 } else if (HAS_LOGICAL_RING_CONTEXTS(i915)) { 4451 /* Note: that although we could theoretically also support the 4452 * legacy ringbuffer mode on BDW (and earlier iterations of 4453 * this driver, before upstreaming did this) it didn't seem 4454 * worth the complexity to maintain now that BDW+ enable 4455 * execlist mode by default. 4456 */ 4457 perf->ops.read = gen8_oa_read; 4458 4459 if (IS_GRAPHICS_VER(i915, 8, 9)) { 4460 perf->ops.is_valid_b_counter_reg = 4461 gen7_is_valid_b_counter_addr; 4462 perf->ops.is_valid_mux_reg = 4463 gen8_is_valid_mux_addr; 4464 perf->ops.is_valid_flex_reg = 4465 gen8_is_valid_flex_addr; 4466 4467 if (IS_CHERRYVIEW(i915)) { 4468 perf->ops.is_valid_mux_reg = 4469 chv_is_valid_mux_addr; 4470 } 4471 4472 perf->ops.oa_enable = gen8_oa_enable; 4473 perf->ops.oa_disable = gen8_oa_disable; 4474 perf->ops.enable_metric_set = gen8_enable_metric_set; 4475 perf->ops.disable_metric_set = gen8_disable_metric_set; 4476 perf->ops.oa_hw_tail_read = gen8_oa_hw_tail_read; 4477 4478 if (GRAPHICS_VER(i915) == 8) { 4479 perf->ctx_oactxctrl_offset = 0x120; 4480 perf->ctx_flexeu0_offset = 0x2ce; 4481 4482 perf->gen8_valid_ctx_bit = BIT(25); 4483 } else { 4484 perf->ctx_oactxctrl_offset = 0x128; 4485 perf->ctx_flexeu0_offset = 0x3de; 4486 4487 perf->gen8_valid_ctx_bit = BIT(16); 4488 } 4489 } else if (GRAPHICS_VER(i915) == 11) { 4490 perf->ops.is_valid_b_counter_reg = 4491 gen7_is_valid_b_counter_addr; 4492 perf->ops.is_valid_mux_reg = 4493 gen11_is_valid_mux_addr; 4494 perf->ops.is_valid_flex_reg = 4495 gen8_is_valid_flex_addr; 4496 4497 perf->ops.oa_enable = gen8_oa_enable; 4498 perf->ops.oa_disable = gen8_oa_disable; 4499 perf->ops.enable_metric_set = gen8_enable_metric_set; 4500 perf->ops.disable_metric_set = gen11_disable_metric_set; 4501 perf->ops.oa_hw_tail_read = gen8_oa_hw_tail_read; 4502 4503 perf->ctx_oactxctrl_offset = 0x124; 4504 perf->ctx_flexeu0_offset = 0x78e; 4505 4506 perf->gen8_valid_ctx_bit = BIT(16); 4507 } else if (GRAPHICS_VER(i915) == 12) { 4508 perf->ops.is_valid_b_counter_reg = 4509 gen12_is_valid_b_counter_addr; 4510 perf->ops.is_valid_mux_reg = 4511 gen12_is_valid_mux_addr; 4512 perf->ops.is_valid_flex_reg = 4513 gen8_is_valid_flex_addr; 4514 4515 perf->ops.oa_enable = gen12_oa_enable; 4516 perf->ops.oa_disable = gen12_oa_disable; 4517 perf->ops.enable_metric_set = gen12_enable_metric_set; 4518 perf->ops.disable_metric_set = gen12_disable_metric_set; 4519 perf->ops.oa_hw_tail_read = gen12_oa_hw_tail_read; 4520 4521 perf->ctx_flexeu0_offset = 0; 4522 perf->ctx_oactxctrl_offset = 0x144; 4523 } 4524 } 4525 4526 if (perf->ops.enable_metric_set) { 4527 mutex_init(&perf->lock); 4528 4529 /* Choose a representative limit */ 4530 oa_sample_rate_hard_limit = to_gt(i915)->clock_frequency / 2; 4531 4532 mutex_init(&perf->metrics_lock); 4533 idr_init_base(&perf->metrics_idr, 1); 4534 4535 /* We set up some ratelimit state to potentially throttle any 4536 * _NOTES about spurious, invalid OA reports which we don't 4537 * forward to userspace. 4538 * 4539 * We print a _NOTE about any throttling when closing the 4540 * stream instead of waiting until driver _fini which no one 4541 * would ever see. 4542 * 4543 * Using the same limiting factors as printk_ratelimit() 4544 */ 4545 ratelimit_state_init(&perf->spurious_report_rs, 5 * HZ, 10); 4546 /* Since we use a DRM_NOTE for spurious reports it would be 4547 * inconsistent to let __ratelimit() automatically print a 4548 * warning for throttling. 4549 */ 4550 ratelimit_set_flags(&perf->spurious_report_rs, 4551 RATELIMIT_MSG_ON_RELEASE); 4552 4553 ratelimit_state_init(&perf->tail_pointer_race, 4554 5 * HZ, 10); 4555 ratelimit_set_flags(&perf->tail_pointer_race, 4556 RATELIMIT_MSG_ON_RELEASE); 4557 4558 atomic64_set(&perf->noa_programming_delay, 4559 500 * 1000 /* 500us */); 4560 4561 perf->i915 = i915; 4562 4563 oa_init_supported_formats(perf); 4564 } 4565 } 4566 4567 static int destroy_config(int id, void *p, void *data) 4568 { 4569 i915_oa_config_put(p); 4570 return 0; 4571 } 4572 4573 int i915_perf_sysctl_register(void) 4574 { 4575 sysctl_header = register_sysctl("dev/i915", oa_table); 4576 return 0; 4577 } 4578 4579 void i915_perf_sysctl_unregister(void) 4580 { 4581 unregister_sysctl_table(sysctl_header); 4582 } 4583 4584 /** 4585 * i915_perf_fini - Counter part to i915_perf_init() 4586 * @i915: i915 device instance 4587 */ 4588 void i915_perf_fini(struct drm_i915_private *i915) 4589 { 4590 struct i915_perf *perf = &i915->perf; 4591 4592 if (!perf->i915) 4593 return; 4594 4595 idr_for_each(&perf->metrics_idr, destroy_config, perf); 4596 idr_destroy(&perf->metrics_idr); 4597 4598 memset(&perf->ops, 0, sizeof(perf->ops)); 4599 perf->i915 = NULL; 4600 } 4601 4602 /** 4603 * i915_perf_ioctl_version - Version of the i915-perf subsystem 4604 * 4605 * This version number is used by userspace to detect available features. 4606 */ 4607 int i915_perf_ioctl_version(void) 4608 { 4609 /* 4610 * 1: Initial version 4611 * I915_PERF_IOCTL_ENABLE 4612 * I915_PERF_IOCTL_DISABLE 4613 * 4614 * 2: Added runtime modification of OA config. 4615 * I915_PERF_IOCTL_CONFIG 4616 * 4617 * 3: Add DRM_I915_PERF_PROP_HOLD_PREEMPTION parameter to hold 4618 * preemption on a particular context so that performance data is 4619 * accessible from a delta of MI_RPC reports without looking at the 4620 * OA buffer. 4621 * 4622 * 4: Add DRM_I915_PERF_PROP_ALLOWED_SSEU to limit what contexts can 4623 * be run for the duration of the performance recording based on 4624 * their SSEU configuration. 4625 * 4626 * 5: Add DRM_I915_PERF_PROP_POLL_OA_PERIOD parameter that controls the 4627 * interval for the hrtimer used to check for OA data. 4628 */ 4629 return 5; 4630 } 4631 4632 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST) 4633 #include "selftests/i915_perf.c" 4634 #endif 4635