1 /* 2 * Copyright © 2015-2016 Intel Corporation 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice (including the next 12 * paragraph) shall be included in all copies or substantial portions of the 13 * Software. 14 * 15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS 21 * IN THE SOFTWARE. 22 * 23 * Authors: 24 * Robert Bragg <robert@sixbynine.org> 25 */ 26 27 28 /** 29 * DOC: i915 Perf Overview 30 * 31 * Gen graphics supports a large number of performance counters that can help 32 * driver and application developers understand and optimize their use of the 33 * GPU. 34 * 35 * This i915 perf interface enables userspace to configure and open a file 36 * descriptor representing a stream of GPU metrics which can then be read() as 37 * a stream of sample records. 38 * 39 * The interface is particularly suited to exposing buffered metrics that are 40 * captured by DMA from the GPU, unsynchronized with and unrelated to the CPU. 41 * 42 * Streams representing a single context are accessible to applications with a 43 * corresponding drm file descriptor, such that OpenGL can use the interface 44 * without special privileges. Access to system-wide metrics requires root 45 * privileges by default, unless changed via the dev.i915.perf_event_paranoid 46 * sysctl option. 47 * 48 */ 49 50 /** 51 * DOC: i915 Perf History and Comparison with Core Perf 52 * 53 * The interface was initially inspired by the core Perf infrastructure but 54 * some notable differences are: 55 * 56 * i915 perf file descriptors represent a "stream" instead of an "event"; where 57 * a perf event primarily corresponds to a single 64bit value, while a stream 58 * might sample sets of tightly-coupled counters, depending on the 59 * configuration. For example the Gen OA unit isn't designed to support 60 * orthogonal configurations of individual counters; it's configured for a set 61 * of related counters. Samples for an i915 perf stream capturing OA metrics 62 * will include a set of counter values packed in a compact HW specific format. 63 * The OA unit supports a number of different packing formats which can be 64 * selected by the user opening the stream. Perf has support for grouping 65 * events, but each event in the group is configured, validated and 66 * authenticated individually with separate system calls. 67 * 68 * i915 perf stream configurations are provided as an array of u64 (key,value) 69 * pairs, instead of a fixed struct with multiple miscellaneous config members, 70 * interleaved with event-type specific members. 71 * 72 * i915 perf doesn't support exposing metrics via an mmap'd circular buffer. 73 * The supported metrics are being written to memory by the GPU unsynchronized 74 * with the CPU, using HW specific packing formats for counter sets. Sometimes 75 * the constraints on HW configuration require reports to be filtered before it 76 * would be acceptable to expose them to unprivileged applications - to hide 77 * the metrics of other processes/contexts. For these use cases a read() based 78 * interface is a good fit, and provides an opportunity to filter data as it 79 * gets copied from the GPU mapped buffers to userspace buffers. 80 * 81 * 82 * Issues hit with first prototype based on Core Perf 83 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 84 * 85 * The first prototype of this driver was based on the core perf 86 * infrastructure, and while we did make that mostly work, with some changes to 87 * perf, we found we were breaking or working around too many assumptions baked 88 * into perf's currently cpu centric design. 89 * 90 * In the end we didn't see a clear benefit to making perf's implementation and 91 * interface more complex by changing design assumptions while we knew we still 92 * wouldn't be able to use any existing perf based userspace tools. 93 * 94 * Also considering the Gen specific nature of the Observability hardware and 95 * how userspace will sometimes need to combine i915 perf OA metrics with 96 * side-band OA data captured via MI_REPORT_PERF_COUNT commands; we're 97 * expecting the interface to be used by a platform specific userspace such as 98 * OpenGL or tools. This is to say; we aren't inherently missing out on having 99 * a standard vendor/architecture agnostic interface by not using perf. 100 * 101 * 102 * For posterity, in case we might re-visit trying to adapt core perf to be 103 * better suited to exposing i915 metrics these were the main pain points we 104 * hit: 105 * 106 * - The perf based OA PMU driver broke some significant design assumptions: 107 * 108 * Existing perf pmus are used for profiling work on a cpu and we were 109 * introducing the idea of _IS_DEVICE pmus with different security 110 * implications, the need to fake cpu-related data (such as user/kernel 111 * registers) to fit with perf's current design, and adding _DEVICE records 112 * as a way to forward device-specific status records. 113 * 114 * The OA unit writes reports of counters into a circular buffer, without 115 * involvement from the CPU, making our PMU driver the first of a kind. 116 * 117 * Given the way we were periodically forward data from the GPU-mapped, OA 118 * buffer to perf's buffer, those bursts of sample writes looked to perf like 119 * we were sampling too fast and so we had to subvert its throttling checks. 120 * 121 * Perf supports groups of counters and allows those to be read via 122 * transactions internally but transactions currently seem designed to be 123 * explicitly initiated from the cpu (say in response to a userspace read()) 124 * and while we could pull a report out of the OA buffer we can't 125 * trigger a report from the cpu on demand. 126 * 127 * Related to being report based; the OA counters are configured in HW as a 128 * set while perf generally expects counter configurations to be orthogonal. 129 * Although counters can be associated with a group leader as they are 130 * opened, there's no clear precedent for being able to provide group-wide 131 * configuration attributes (for example we want to let userspace choose the 132 * OA unit report format used to capture all counters in a set, or specify a 133 * GPU context to filter metrics on). We avoided using perf's grouping 134 * feature and forwarded OA reports to userspace via perf's 'raw' sample 135 * field. This suited our userspace well considering how coupled the counters 136 * are when dealing with normalizing. It would be inconvenient to split 137 * counters up into separate events, only to require userspace to recombine 138 * them. For Mesa it's also convenient to be forwarded raw, periodic reports 139 * for combining with the side-band raw reports it captures using 140 * MI_REPORT_PERF_COUNT commands. 141 * 142 * - As a side note on perf's grouping feature; there was also some concern 143 * that using PERF_FORMAT_GROUP as a way to pack together counter values 144 * would quite drastically inflate our sample sizes, which would likely 145 * lower the effective sampling resolutions we could use when the available 146 * memory bandwidth is limited. 147 * 148 * With the OA unit's report formats, counters are packed together as 32 149 * or 40bit values, with the largest report size being 256 bytes. 150 * 151 * PERF_FORMAT_GROUP values are 64bit, but there doesn't appear to be a 152 * documented ordering to the values, implying PERF_FORMAT_ID must also be 153 * used to add a 64bit ID before each value; giving 16 bytes per counter. 154 * 155 * Related to counter orthogonality; we can't time share the OA unit, while 156 * event scheduling is a central design idea within perf for allowing 157 * userspace to open + enable more events than can be configured in HW at any 158 * one time. The OA unit is not designed to allow re-configuration while in 159 * use. We can't reconfigure the OA unit without losing internal OA unit 160 * state which we can't access explicitly to save and restore. Reconfiguring 161 * the OA unit is also relatively slow, involving ~100 register writes. From 162 * userspace Mesa also depends on a stable OA configuration when emitting 163 * MI_REPORT_PERF_COUNT commands and importantly the OA unit can't be 164 * disabled while there are outstanding MI_RPC commands lest we hang the 165 * command streamer. 166 * 167 * The contents of sample records aren't extensible by device drivers (i.e. 168 * the sample_type bits). As an example; Sourab Gupta had been looking to 169 * attach GPU timestamps to our OA samples. We were shoehorning OA reports 170 * into sample records by using the 'raw' field, but it's tricky to pack more 171 * than one thing into this field because events/core.c currently only lets a 172 * pmu give a single raw data pointer plus len which will be copied into the 173 * ring buffer. To include more than the OA report we'd have to copy the 174 * report into an intermediate larger buffer. I'd been considering allowing a 175 * vector of data+len values to be specified for copying the raw data, but 176 * it felt like a kludge to being using the raw field for this purpose. 177 * 178 * - It felt like our perf based PMU was making some technical compromises 179 * just for the sake of using perf: 180 * 181 * perf_event_open() requires events to either relate to a pid or a specific 182 * cpu core, while our device pmu related to neither. Events opened with a 183 * pid will be automatically enabled/disabled according to the scheduling of 184 * that process - so not appropriate for us. When an event is related to a 185 * cpu id, perf ensures pmu methods will be invoked via an inter process 186 * interrupt on that core. To avoid invasive changes our userspace opened OA 187 * perf events for a specific cpu. This was workable but it meant the 188 * majority of the OA driver ran in atomic context, including all OA report 189 * forwarding, which wasn't really necessary in our case and seems to make 190 * our locking requirements somewhat complex as we handled the interaction 191 * with the rest of the i915 driver. 192 */ 193 194 #include <linux/anon_inodes.h> 195 #include <linux/sizes.h> 196 #include <linux/uuid.h> 197 198 #include "gem/i915_gem_context.h" 199 #include "gem/i915_gem_internal.h" 200 #include "gt/intel_engine_pm.h" 201 #include "gt/intel_engine_regs.h" 202 #include "gt/intel_engine_user.h" 203 #include "gt/intel_execlists_submission.h" 204 #include "gt/intel_gpu_commands.h" 205 #include "gt/intel_gt.h" 206 #include "gt/intel_gt_clock_utils.h" 207 #include "gt/intel_gt_regs.h" 208 #include "gt/intel_lrc.h" 209 #include "gt/intel_lrc_reg.h" 210 #include "gt/intel_ring.h" 211 212 #include "i915_drv.h" 213 #include "i915_file_private.h" 214 #include "i915_perf.h" 215 #include "i915_perf_oa_regs.h" 216 217 /* HW requires this to be a power of two, between 128k and 16M, though driver 218 * is currently generally designed assuming the largest 16M size is used such 219 * that the overflow cases are unlikely in normal operation. 220 */ 221 #define OA_BUFFER_SIZE SZ_16M 222 223 #define OA_TAKEN(tail, head) ((tail - head) & (OA_BUFFER_SIZE - 1)) 224 225 /** 226 * DOC: OA Tail Pointer Race 227 * 228 * There's a HW race condition between OA unit tail pointer register updates and 229 * writes to memory whereby the tail pointer can sometimes get ahead of what's 230 * been written out to the OA buffer so far (in terms of what's visible to the 231 * CPU). 232 * 233 * Although this can be observed explicitly while copying reports to userspace 234 * by checking for a zeroed report-id field in tail reports, we want to account 235 * for this earlier, as part of the oa_buffer_check_unlocked to avoid lots of 236 * redundant read() attempts. 237 * 238 * We workaround this issue in oa_buffer_check_unlocked() by reading the reports 239 * in the OA buffer, starting from the tail reported by the HW until we find a 240 * report with its first 2 dwords not 0 meaning its previous report is 241 * completely in memory and ready to be read. Those dwords are also set to 0 242 * once read and the whole buffer is cleared upon OA buffer initialization. The 243 * first dword is the reason for this report while the second is the timestamp, 244 * making the chances of having those 2 fields at 0 fairly unlikely. A more 245 * detailed explanation is available in oa_buffer_check_unlocked(). 246 * 247 * Most of the implementation details for this workaround are in 248 * oa_buffer_check_unlocked() and _append_oa_reports() 249 * 250 * Note for posterity: previously the driver used to define an effective tail 251 * pointer that lagged the real pointer by a 'tail margin' measured in bytes 252 * derived from %OA_TAIL_MARGIN_NSEC and the configured sampling frequency. 253 * This was flawed considering that the OA unit may also automatically generate 254 * non-periodic reports (such as on context switch) or the OA unit may be 255 * enabled without any periodic sampling. 256 */ 257 #define OA_TAIL_MARGIN_NSEC 100000ULL 258 #define INVALID_TAIL_PTR 0xffffffff 259 260 /* The default frequency for checking whether the OA unit has written new 261 * reports to the circular OA buffer... 262 */ 263 #define DEFAULT_POLL_FREQUENCY_HZ 200 264 #define DEFAULT_POLL_PERIOD_NS (NSEC_PER_SEC / DEFAULT_POLL_FREQUENCY_HZ) 265 266 /* for sysctl proc_dointvec_minmax of dev.i915.perf_stream_paranoid */ 267 static u32 i915_perf_stream_paranoid = true; 268 269 /* The maximum exponent the hardware accepts is 63 (essentially it selects one 270 * of the 64bit timestamp bits to trigger reports from) but there's currently 271 * no known use case for sampling as infrequently as once per 47 thousand years. 272 * 273 * Since the timestamps included in OA reports are only 32bits it seems 274 * reasonable to limit the OA exponent where it's still possible to account for 275 * overflow in OA report timestamps. 276 */ 277 #define OA_EXPONENT_MAX 31 278 279 #define INVALID_CTX_ID 0xffffffff 280 281 /* On Gen8+ automatically triggered OA reports include a 'reason' field... */ 282 #define OAREPORT_REASON_MASK 0x3f 283 #define OAREPORT_REASON_MASK_EXTENDED 0x7f 284 #define OAREPORT_REASON_SHIFT 19 285 #define OAREPORT_REASON_TIMER (1<<0) 286 #define OAREPORT_REASON_CTX_SWITCH (1<<3) 287 #define OAREPORT_REASON_CLK_RATIO (1<<5) 288 289 290 /* For sysctl proc_dointvec_minmax of i915_oa_max_sample_rate 291 * 292 * The highest sampling frequency we can theoretically program the OA unit 293 * with is always half the timestamp frequency: E.g. 6.25Mhz for Haswell. 294 * 295 * Initialized just before we register the sysctl parameter. 296 */ 297 static int oa_sample_rate_hard_limit; 298 299 /* Theoretically we can program the OA unit to sample every 160ns but don't 300 * allow that by default unless root... 301 * 302 * The default threshold of 100000Hz is based on perf's similar 303 * kernel.perf_event_max_sample_rate sysctl parameter. 304 */ 305 static u32 i915_oa_max_sample_rate = 100000; 306 307 /* XXX: beware if future OA HW adds new report formats that the current 308 * code assumes all reports have a power-of-two size and ~(size - 1) can 309 * be used as a mask to align the OA tail pointer. 310 */ 311 static const struct i915_oa_format oa_formats[I915_OA_FORMAT_MAX] = { 312 [I915_OA_FORMAT_A13] = { 0, 64 }, 313 [I915_OA_FORMAT_A29] = { 1, 128 }, 314 [I915_OA_FORMAT_A13_B8_C8] = { 2, 128 }, 315 /* A29_B8_C8 Disallowed as 192 bytes doesn't factor into buffer size */ 316 [I915_OA_FORMAT_B4_C8] = { 4, 64 }, 317 [I915_OA_FORMAT_A45_B8_C8] = { 5, 256 }, 318 [I915_OA_FORMAT_B4_C8_A16] = { 6, 128 }, 319 [I915_OA_FORMAT_C4_B8] = { 7, 64 }, 320 [I915_OA_FORMAT_A12] = { 0, 64 }, 321 [I915_OA_FORMAT_A12_B8_C8] = { 2, 128 }, 322 [I915_OA_FORMAT_A32u40_A4u32_B8_C8] = { 5, 256 }, 323 }; 324 325 #define SAMPLE_OA_REPORT (1<<0) 326 327 /** 328 * struct perf_open_properties - for validated properties given to open a stream 329 * @sample_flags: `DRM_I915_PERF_PROP_SAMPLE_*` properties are tracked as flags 330 * @single_context: Whether a single or all gpu contexts should be monitored 331 * @hold_preemption: Whether the preemption is disabled for the filtered 332 * context 333 * @ctx_handle: A gem ctx handle for use with @single_context 334 * @metrics_set: An ID for an OA unit metric set advertised via sysfs 335 * @oa_format: An OA unit HW report format 336 * @oa_periodic: Whether to enable periodic OA unit sampling 337 * @oa_period_exponent: The OA unit sampling period is derived from this 338 * @engine: The engine (typically rcs0) being monitored by the OA unit 339 * @has_sseu: Whether @sseu was specified by userspace 340 * @sseu: internal SSEU configuration computed either from the userspace 341 * specified configuration in the opening parameters or a default value 342 * (see get_default_sseu_config()) 343 * @poll_oa_period: The period in nanoseconds at which the CPU will check for OA 344 * data availability 345 * 346 * As read_properties_unlocked() enumerates and validates the properties given 347 * to open a stream of metrics the configuration is built up in the structure 348 * which starts out zero initialized. 349 */ 350 struct perf_open_properties { 351 u32 sample_flags; 352 353 u64 single_context:1; 354 u64 hold_preemption:1; 355 u64 ctx_handle; 356 357 /* OA sampling state */ 358 int metrics_set; 359 int oa_format; 360 bool oa_periodic; 361 int oa_period_exponent; 362 363 struct intel_engine_cs *engine; 364 365 bool has_sseu; 366 struct intel_sseu sseu; 367 368 u64 poll_oa_period; 369 }; 370 371 struct i915_oa_config_bo { 372 struct llist_node node; 373 374 struct i915_oa_config *oa_config; 375 struct i915_vma *vma; 376 }; 377 378 static struct ctl_table_header *sysctl_header; 379 380 static enum hrtimer_restart oa_poll_check_timer_cb(struct hrtimer *hrtimer); 381 382 void i915_oa_config_release(struct kref *ref) 383 { 384 struct i915_oa_config *oa_config = 385 container_of(ref, typeof(*oa_config), ref); 386 387 kfree(oa_config->flex_regs); 388 kfree(oa_config->b_counter_regs); 389 kfree(oa_config->mux_regs); 390 391 kfree_rcu(oa_config, rcu); 392 } 393 394 struct i915_oa_config * 395 i915_perf_get_oa_config(struct i915_perf *perf, int metrics_set) 396 { 397 struct i915_oa_config *oa_config; 398 399 rcu_read_lock(); 400 oa_config = idr_find(&perf->metrics_idr, metrics_set); 401 if (oa_config) 402 oa_config = i915_oa_config_get(oa_config); 403 rcu_read_unlock(); 404 405 return oa_config; 406 } 407 408 static void free_oa_config_bo(struct i915_oa_config_bo *oa_bo) 409 { 410 i915_oa_config_put(oa_bo->oa_config); 411 i915_vma_put(oa_bo->vma); 412 kfree(oa_bo); 413 } 414 415 static u32 gen12_oa_hw_tail_read(struct i915_perf_stream *stream) 416 { 417 struct intel_uncore *uncore = stream->uncore; 418 419 return intel_uncore_read(uncore, GEN12_OAG_OATAILPTR) & 420 GEN12_OAG_OATAILPTR_MASK; 421 } 422 423 static u32 gen8_oa_hw_tail_read(struct i915_perf_stream *stream) 424 { 425 struct intel_uncore *uncore = stream->uncore; 426 427 return intel_uncore_read(uncore, GEN8_OATAILPTR) & GEN8_OATAILPTR_MASK; 428 } 429 430 static u32 gen7_oa_hw_tail_read(struct i915_perf_stream *stream) 431 { 432 struct intel_uncore *uncore = stream->uncore; 433 u32 oastatus1 = intel_uncore_read(uncore, GEN7_OASTATUS1); 434 435 return oastatus1 & GEN7_OASTATUS1_TAIL_MASK; 436 } 437 438 /** 439 * oa_buffer_check_unlocked - check for data and update tail ptr state 440 * @stream: i915 stream instance 441 * 442 * This is either called via fops (for blocking reads in user ctx) or the poll 443 * check hrtimer (atomic ctx) to check the OA buffer tail pointer and check 444 * if there is data available for userspace to read. 445 * 446 * This function is central to providing a workaround for the OA unit tail 447 * pointer having a race with respect to what data is visible to the CPU. 448 * It is responsible for reading tail pointers from the hardware and giving 449 * the pointers time to 'age' before they are made available for reading. 450 * (See description of OA_TAIL_MARGIN_NSEC above for further details.) 451 * 452 * Besides returning true when there is data available to read() this function 453 * also updates the tail, aging_tail and aging_timestamp in the oa_buffer 454 * object. 455 * 456 * Note: It's safe to read OA config state here unlocked, assuming that this is 457 * only called while the stream is enabled, while the global OA configuration 458 * can't be modified. 459 * 460 * Returns: %true if the OA buffer contains data, else %false 461 */ 462 static bool oa_buffer_check_unlocked(struct i915_perf_stream *stream) 463 { 464 u32 gtt_offset = i915_ggtt_offset(stream->oa_buffer.vma); 465 int report_size = stream->oa_buffer.format_size; 466 unsigned long flags; 467 bool pollin; 468 u32 hw_tail; 469 u64 now; 470 471 /* We have to consider the (unlikely) possibility that read() errors 472 * could result in an OA buffer reset which might reset the head and 473 * tail state. 474 */ 475 spin_lock_irqsave(&stream->oa_buffer.ptr_lock, flags); 476 477 hw_tail = stream->perf->ops.oa_hw_tail_read(stream); 478 479 /* The tail pointer increases in 64 byte increments, 480 * not in report_size steps... 481 */ 482 hw_tail &= ~(report_size - 1); 483 484 now = ktime_get_mono_fast_ns(); 485 486 if (hw_tail == stream->oa_buffer.aging_tail && 487 (now - stream->oa_buffer.aging_timestamp) > OA_TAIL_MARGIN_NSEC) { 488 /* If the HW tail hasn't move since the last check and the HW 489 * tail has been aging for long enough, declare it the new 490 * tail. 491 */ 492 stream->oa_buffer.tail = stream->oa_buffer.aging_tail; 493 } else { 494 u32 head, tail, aged_tail; 495 496 /* NB: The head we observe here might effectively be a little 497 * out of date. If a read() is in progress, the head could be 498 * anywhere between this head and stream->oa_buffer.tail. 499 */ 500 head = stream->oa_buffer.head - gtt_offset; 501 aged_tail = stream->oa_buffer.tail - gtt_offset; 502 503 hw_tail -= gtt_offset; 504 tail = hw_tail; 505 506 /* Walk the stream backward until we find a report with dword 0 507 * & 1 not at 0. Since the circular buffer pointers progress by 508 * increments of 64 bytes and that reports can be up to 256 509 * bytes long, we can't tell whether a report has fully landed 510 * in memory before the first 2 dwords of the following report 511 * have effectively landed. 512 * 513 * This is assuming that the writes of the OA unit land in 514 * memory in the order they were written to. 515 * If not : (╯°□°)╯︵ ┻━┻ 516 */ 517 while (OA_TAKEN(tail, aged_tail) >= report_size) { 518 u32 *report32 = (void *)(stream->oa_buffer.vaddr + tail); 519 520 if (report32[0] != 0 || report32[1] != 0) 521 break; 522 523 tail = (tail - report_size) & (OA_BUFFER_SIZE - 1); 524 } 525 526 if (OA_TAKEN(hw_tail, tail) > report_size && 527 __ratelimit(&stream->perf->tail_pointer_race)) 528 DRM_NOTE("unlanded report(s) head=0x%x " 529 "tail=0x%x hw_tail=0x%x\n", 530 head, tail, hw_tail); 531 532 stream->oa_buffer.tail = gtt_offset + tail; 533 stream->oa_buffer.aging_tail = gtt_offset + hw_tail; 534 stream->oa_buffer.aging_timestamp = now; 535 } 536 537 pollin = OA_TAKEN(stream->oa_buffer.tail - gtt_offset, 538 stream->oa_buffer.head - gtt_offset) >= report_size; 539 540 spin_unlock_irqrestore(&stream->oa_buffer.ptr_lock, flags); 541 542 return pollin; 543 } 544 545 /** 546 * append_oa_status - Appends a status record to a userspace read() buffer. 547 * @stream: An i915-perf stream opened for OA metrics 548 * @buf: destination buffer given by userspace 549 * @count: the number of bytes userspace wants to read 550 * @offset: (inout): the current position for writing into @buf 551 * @type: The kind of status to report to userspace 552 * 553 * Writes a status record (such as `DRM_I915_PERF_RECORD_OA_REPORT_LOST`) 554 * into the userspace read() buffer. 555 * 556 * The @buf @offset will only be updated on success. 557 * 558 * Returns: 0 on success, negative error code on failure. 559 */ 560 static int append_oa_status(struct i915_perf_stream *stream, 561 char __user *buf, 562 size_t count, 563 size_t *offset, 564 enum drm_i915_perf_record_type type) 565 { 566 struct drm_i915_perf_record_header header = { type, 0, sizeof(header) }; 567 568 if ((count - *offset) < header.size) 569 return -ENOSPC; 570 571 if (copy_to_user(buf + *offset, &header, sizeof(header))) 572 return -EFAULT; 573 574 (*offset) += header.size; 575 576 return 0; 577 } 578 579 /** 580 * append_oa_sample - Copies single OA report into userspace read() buffer. 581 * @stream: An i915-perf stream opened for OA metrics 582 * @buf: destination buffer given by userspace 583 * @count: the number of bytes userspace wants to read 584 * @offset: (inout): the current position for writing into @buf 585 * @report: A single OA report to (optionally) include as part of the sample 586 * 587 * The contents of a sample are configured through `DRM_I915_PERF_PROP_SAMPLE_*` 588 * properties when opening a stream, tracked as `stream->sample_flags`. This 589 * function copies the requested components of a single sample to the given 590 * read() @buf. 591 * 592 * The @buf @offset will only be updated on success. 593 * 594 * Returns: 0 on success, negative error code on failure. 595 */ 596 static int append_oa_sample(struct i915_perf_stream *stream, 597 char __user *buf, 598 size_t count, 599 size_t *offset, 600 const u8 *report) 601 { 602 int report_size = stream->oa_buffer.format_size; 603 struct drm_i915_perf_record_header header; 604 605 header.type = DRM_I915_PERF_RECORD_SAMPLE; 606 header.pad = 0; 607 header.size = stream->sample_size; 608 609 if ((count - *offset) < header.size) 610 return -ENOSPC; 611 612 buf += *offset; 613 if (copy_to_user(buf, &header, sizeof(header))) 614 return -EFAULT; 615 buf += sizeof(header); 616 617 if (copy_to_user(buf, report, report_size)) 618 return -EFAULT; 619 620 (*offset) += header.size; 621 622 return 0; 623 } 624 625 /** 626 * gen8_append_oa_reports - Copies all buffered OA reports into 627 * userspace read() buffer. 628 * @stream: An i915-perf stream opened for OA metrics 629 * @buf: destination buffer given by userspace 630 * @count: the number of bytes userspace wants to read 631 * @offset: (inout): the current position for writing into @buf 632 * 633 * Notably any error condition resulting in a short read (-%ENOSPC or 634 * -%EFAULT) will be returned even though one or more records may 635 * have been successfully copied. In this case it's up to the caller 636 * to decide if the error should be squashed before returning to 637 * userspace. 638 * 639 * Note: reports are consumed from the head, and appended to the 640 * tail, so the tail chases the head?... If you think that's mad 641 * and back-to-front you're not alone, but this follows the 642 * Gen PRM naming convention. 643 * 644 * Returns: 0 on success, negative error code on failure. 645 */ 646 static int gen8_append_oa_reports(struct i915_perf_stream *stream, 647 char __user *buf, 648 size_t count, 649 size_t *offset) 650 { 651 struct intel_uncore *uncore = stream->uncore; 652 int report_size = stream->oa_buffer.format_size; 653 u8 *oa_buf_base = stream->oa_buffer.vaddr; 654 u32 gtt_offset = i915_ggtt_offset(stream->oa_buffer.vma); 655 u32 mask = (OA_BUFFER_SIZE - 1); 656 size_t start_offset = *offset; 657 unsigned long flags; 658 u32 head, tail; 659 u32 taken; 660 int ret = 0; 661 662 if (drm_WARN_ON(&uncore->i915->drm, !stream->enabled)) 663 return -EIO; 664 665 spin_lock_irqsave(&stream->oa_buffer.ptr_lock, flags); 666 667 head = stream->oa_buffer.head; 668 tail = stream->oa_buffer.tail; 669 670 spin_unlock_irqrestore(&stream->oa_buffer.ptr_lock, flags); 671 672 /* 673 * NB: oa_buffer.head/tail include the gtt_offset which we don't want 674 * while indexing relative to oa_buf_base. 675 */ 676 head -= gtt_offset; 677 tail -= gtt_offset; 678 679 /* 680 * An out of bounds or misaligned head or tail pointer implies a driver 681 * bug since we validate + align the tail pointers we read from the 682 * hardware and we are in full control of the head pointer which should 683 * only be incremented by multiples of the report size (notably also 684 * all a power of two). 685 */ 686 if (drm_WARN_ONCE(&uncore->i915->drm, 687 head > OA_BUFFER_SIZE || head % report_size || 688 tail > OA_BUFFER_SIZE || tail % report_size, 689 "Inconsistent OA buffer pointers: head = %u, tail = %u\n", 690 head, tail)) 691 return -EIO; 692 693 694 for (/* none */; 695 (taken = OA_TAKEN(tail, head)); 696 head = (head + report_size) & mask) { 697 u8 *report = oa_buf_base + head; 698 u32 *report32 = (void *)report; 699 u32 ctx_id; 700 u32 reason; 701 702 /* 703 * All the report sizes factor neatly into the buffer 704 * size so we never expect to see a report split 705 * between the beginning and end of the buffer. 706 * 707 * Given the initial alignment check a misalignment 708 * here would imply a driver bug that would result 709 * in an overrun. 710 */ 711 if (drm_WARN_ON(&uncore->i915->drm, 712 (OA_BUFFER_SIZE - head) < report_size)) { 713 drm_err(&uncore->i915->drm, 714 "Spurious OA head ptr: non-integral report offset\n"); 715 break; 716 } 717 718 /* 719 * The reason field includes flags identifying what 720 * triggered this specific report (mostly timer 721 * triggered or e.g. due to a context switch). 722 * 723 * This field is never expected to be zero so we can 724 * check that the report isn't invalid before copying 725 * it to userspace... 726 */ 727 reason = ((report32[0] >> OAREPORT_REASON_SHIFT) & 728 (GRAPHICS_VER(stream->perf->i915) == 12 ? 729 OAREPORT_REASON_MASK_EXTENDED : 730 OAREPORT_REASON_MASK)); 731 732 ctx_id = report32[2] & stream->specific_ctx_id_mask; 733 734 /* 735 * Squash whatever is in the CTX_ID field if it's marked as 736 * invalid to be sure we avoid false-positive, single-context 737 * filtering below... 738 * 739 * Note: that we don't clear the valid_ctx_bit so userspace can 740 * understand that the ID has been squashed by the kernel. 741 */ 742 if (!(report32[0] & stream->perf->gen8_valid_ctx_bit) && 743 GRAPHICS_VER(stream->perf->i915) <= 11) 744 ctx_id = report32[2] = INVALID_CTX_ID; 745 746 /* 747 * NB: For Gen 8 the OA unit no longer supports clock gating 748 * off for a specific context and the kernel can't securely 749 * stop the counters from updating as system-wide / global 750 * values. 751 * 752 * Automatic reports now include a context ID so reports can be 753 * filtered on the cpu but it's not worth trying to 754 * automatically subtract/hide counter progress for other 755 * contexts while filtering since we can't stop userspace 756 * issuing MI_REPORT_PERF_COUNT commands which would still 757 * provide a side-band view of the real values. 758 * 759 * To allow userspace (such as Mesa/GL_INTEL_performance_query) 760 * to normalize counters for a single filtered context then it 761 * needs be forwarded bookend context-switch reports so that it 762 * can track switches in between MI_REPORT_PERF_COUNT commands 763 * and can itself subtract/ignore the progress of counters 764 * associated with other contexts. Note that the hardware 765 * automatically triggers reports when switching to a new 766 * context which are tagged with the ID of the newly active 767 * context. To avoid the complexity (and likely fragility) of 768 * reading ahead while parsing reports to try and minimize 769 * forwarding redundant context switch reports (i.e. between 770 * other, unrelated contexts) we simply elect to forward them 771 * all. 772 * 773 * We don't rely solely on the reason field to identify context 774 * switches since it's not-uncommon for periodic samples to 775 * identify a switch before any 'context switch' report. 776 */ 777 if (!stream->perf->exclusive_stream->ctx || 778 stream->specific_ctx_id == ctx_id || 779 stream->oa_buffer.last_ctx_id == stream->specific_ctx_id || 780 reason & OAREPORT_REASON_CTX_SWITCH) { 781 782 /* 783 * While filtering for a single context we avoid 784 * leaking the IDs of other contexts. 785 */ 786 if (stream->perf->exclusive_stream->ctx && 787 stream->specific_ctx_id != ctx_id) { 788 report32[2] = INVALID_CTX_ID; 789 } 790 791 ret = append_oa_sample(stream, buf, count, offset, 792 report); 793 if (ret) 794 break; 795 796 stream->oa_buffer.last_ctx_id = ctx_id; 797 } 798 799 /* 800 * Clear out the first 2 dword as a mean to detect unlanded 801 * reports. 802 */ 803 report32[0] = 0; 804 report32[1] = 0; 805 } 806 807 if (start_offset != *offset) { 808 i915_reg_t oaheadptr; 809 810 oaheadptr = GRAPHICS_VER(stream->perf->i915) == 12 ? 811 GEN12_OAG_OAHEADPTR : GEN8_OAHEADPTR; 812 813 spin_lock_irqsave(&stream->oa_buffer.ptr_lock, flags); 814 815 /* 816 * We removed the gtt_offset for the copy loop above, indexing 817 * relative to oa_buf_base so put back here... 818 */ 819 head += gtt_offset; 820 intel_uncore_write(uncore, oaheadptr, 821 head & GEN12_OAG_OAHEADPTR_MASK); 822 stream->oa_buffer.head = head; 823 824 spin_unlock_irqrestore(&stream->oa_buffer.ptr_lock, flags); 825 } 826 827 return ret; 828 } 829 830 /** 831 * gen8_oa_read - copy status records then buffered OA reports 832 * @stream: An i915-perf stream opened for OA metrics 833 * @buf: destination buffer given by userspace 834 * @count: the number of bytes userspace wants to read 835 * @offset: (inout): the current position for writing into @buf 836 * 837 * Checks OA unit status registers and if necessary appends corresponding 838 * status records for userspace (such as for a buffer full condition) and then 839 * initiate appending any buffered OA reports. 840 * 841 * Updates @offset according to the number of bytes successfully copied into 842 * the userspace buffer. 843 * 844 * NB: some data may be successfully copied to the userspace buffer 845 * even if an error is returned, and this is reflected in the 846 * updated @offset. 847 * 848 * Returns: zero on success or a negative error code 849 */ 850 static int gen8_oa_read(struct i915_perf_stream *stream, 851 char __user *buf, 852 size_t count, 853 size_t *offset) 854 { 855 struct intel_uncore *uncore = stream->uncore; 856 u32 oastatus; 857 i915_reg_t oastatus_reg; 858 int ret; 859 860 if (drm_WARN_ON(&uncore->i915->drm, !stream->oa_buffer.vaddr)) 861 return -EIO; 862 863 oastatus_reg = GRAPHICS_VER(stream->perf->i915) == 12 ? 864 GEN12_OAG_OASTATUS : GEN8_OASTATUS; 865 866 oastatus = intel_uncore_read(uncore, oastatus_reg); 867 868 /* 869 * We treat OABUFFER_OVERFLOW as a significant error: 870 * 871 * Although theoretically we could handle this more gracefully 872 * sometimes, some Gens don't correctly suppress certain 873 * automatically triggered reports in this condition and so we 874 * have to assume that old reports are now being trampled 875 * over. 876 * 877 * Considering how we don't currently give userspace control 878 * over the OA buffer size and always configure a large 16MB 879 * buffer, then a buffer overflow does anyway likely indicate 880 * that something has gone quite badly wrong. 881 */ 882 if (oastatus & GEN8_OASTATUS_OABUFFER_OVERFLOW) { 883 ret = append_oa_status(stream, buf, count, offset, 884 DRM_I915_PERF_RECORD_OA_BUFFER_LOST); 885 if (ret) 886 return ret; 887 888 drm_dbg(&stream->perf->i915->drm, 889 "OA buffer overflow (exponent = %d): force restart\n", 890 stream->period_exponent); 891 892 stream->perf->ops.oa_disable(stream); 893 stream->perf->ops.oa_enable(stream); 894 895 /* 896 * Note: .oa_enable() is expected to re-init the oabuffer and 897 * reset GEN8_OASTATUS for us 898 */ 899 oastatus = intel_uncore_read(uncore, oastatus_reg); 900 } 901 902 if (oastatus & GEN8_OASTATUS_REPORT_LOST) { 903 ret = append_oa_status(stream, buf, count, offset, 904 DRM_I915_PERF_RECORD_OA_REPORT_LOST); 905 if (ret) 906 return ret; 907 908 intel_uncore_rmw(uncore, oastatus_reg, 909 GEN8_OASTATUS_COUNTER_OVERFLOW | 910 GEN8_OASTATUS_REPORT_LOST, 911 IS_GRAPHICS_VER(uncore->i915, 8, 11) ? 912 (GEN8_OASTATUS_HEAD_POINTER_WRAP | 913 GEN8_OASTATUS_TAIL_POINTER_WRAP) : 0); 914 } 915 916 return gen8_append_oa_reports(stream, buf, count, offset); 917 } 918 919 /** 920 * gen7_append_oa_reports - Copies all buffered OA reports into 921 * userspace read() buffer. 922 * @stream: An i915-perf stream opened for OA metrics 923 * @buf: destination buffer given by userspace 924 * @count: the number of bytes userspace wants to read 925 * @offset: (inout): the current position for writing into @buf 926 * 927 * Notably any error condition resulting in a short read (-%ENOSPC or 928 * -%EFAULT) will be returned even though one or more records may 929 * have been successfully copied. In this case it's up to the caller 930 * to decide if the error should be squashed before returning to 931 * userspace. 932 * 933 * Note: reports are consumed from the head, and appended to the 934 * tail, so the tail chases the head?... If you think that's mad 935 * and back-to-front you're not alone, but this follows the 936 * Gen PRM naming convention. 937 * 938 * Returns: 0 on success, negative error code on failure. 939 */ 940 static int gen7_append_oa_reports(struct i915_perf_stream *stream, 941 char __user *buf, 942 size_t count, 943 size_t *offset) 944 { 945 struct intel_uncore *uncore = stream->uncore; 946 int report_size = stream->oa_buffer.format_size; 947 u8 *oa_buf_base = stream->oa_buffer.vaddr; 948 u32 gtt_offset = i915_ggtt_offset(stream->oa_buffer.vma); 949 u32 mask = (OA_BUFFER_SIZE - 1); 950 size_t start_offset = *offset; 951 unsigned long flags; 952 u32 head, tail; 953 u32 taken; 954 int ret = 0; 955 956 if (drm_WARN_ON(&uncore->i915->drm, !stream->enabled)) 957 return -EIO; 958 959 spin_lock_irqsave(&stream->oa_buffer.ptr_lock, flags); 960 961 head = stream->oa_buffer.head; 962 tail = stream->oa_buffer.tail; 963 964 spin_unlock_irqrestore(&stream->oa_buffer.ptr_lock, flags); 965 966 /* NB: oa_buffer.head/tail include the gtt_offset which we don't want 967 * while indexing relative to oa_buf_base. 968 */ 969 head -= gtt_offset; 970 tail -= gtt_offset; 971 972 /* An out of bounds or misaligned head or tail pointer implies a driver 973 * bug since we validate + align the tail pointers we read from the 974 * hardware and we are in full control of the head pointer which should 975 * only be incremented by multiples of the report size (notably also 976 * all a power of two). 977 */ 978 if (drm_WARN_ONCE(&uncore->i915->drm, 979 head > OA_BUFFER_SIZE || head % report_size || 980 tail > OA_BUFFER_SIZE || tail % report_size, 981 "Inconsistent OA buffer pointers: head = %u, tail = %u\n", 982 head, tail)) 983 return -EIO; 984 985 986 for (/* none */; 987 (taken = OA_TAKEN(tail, head)); 988 head = (head + report_size) & mask) { 989 u8 *report = oa_buf_base + head; 990 u32 *report32 = (void *)report; 991 992 /* All the report sizes factor neatly into the buffer 993 * size so we never expect to see a report split 994 * between the beginning and end of the buffer. 995 * 996 * Given the initial alignment check a misalignment 997 * here would imply a driver bug that would result 998 * in an overrun. 999 */ 1000 if (drm_WARN_ON(&uncore->i915->drm, 1001 (OA_BUFFER_SIZE - head) < report_size)) { 1002 drm_err(&uncore->i915->drm, 1003 "Spurious OA head ptr: non-integral report offset\n"); 1004 break; 1005 } 1006 1007 /* The report-ID field for periodic samples includes 1008 * some undocumented flags related to what triggered 1009 * the report and is never expected to be zero so we 1010 * can check that the report isn't invalid before 1011 * copying it to userspace... 1012 */ 1013 if (report32[0] == 0) { 1014 if (__ratelimit(&stream->perf->spurious_report_rs)) 1015 DRM_NOTE("Skipping spurious, invalid OA report\n"); 1016 continue; 1017 } 1018 1019 ret = append_oa_sample(stream, buf, count, offset, report); 1020 if (ret) 1021 break; 1022 1023 /* Clear out the first 2 dwords as a mean to detect unlanded 1024 * reports. 1025 */ 1026 report32[0] = 0; 1027 report32[1] = 0; 1028 } 1029 1030 if (start_offset != *offset) { 1031 spin_lock_irqsave(&stream->oa_buffer.ptr_lock, flags); 1032 1033 /* We removed the gtt_offset for the copy loop above, indexing 1034 * relative to oa_buf_base so put back here... 1035 */ 1036 head += gtt_offset; 1037 1038 intel_uncore_write(uncore, GEN7_OASTATUS2, 1039 (head & GEN7_OASTATUS2_HEAD_MASK) | 1040 GEN7_OASTATUS2_MEM_SELECT_GGTT); 1041 stream->oa_buffer.head = head; 1042 1043 spin_unlock_irqrestore(&stream->oa_buffer.ptr_lock, flags); 1044 } 1045 1046 return ret; 1047 } 1048 1049 /** 1050 * gen7_oa_read - copy status records then buffered OA reports 1051 * @stream: An i915-perf stream opened for OA metrics 1052 * @buf: destination buffer given by userspace 1053 * @count: the number of bytes userspace wants to read 1054 * @offset: (inout): the current position for writing into @buf 1055 * 1056 * Checks Gen 7 specific OA unit status registers and if necessary appends 1057 * corresponding status records for userspace (such as for a buffer full 1058 * condition) and then initiate appending any buffered OA reports. 1059 * 1060 * Updates @offset according to the number of bytes successfully copied into 1061 * the userspace buffer. 1062 * 1063 * Returns: zero on success or a negative error code 1064 */ 1065 static int gen7_oa_read(struct i915_perf_stream *stream, 1066 char __user *buf, 1067 size_t count, 1068 size_t *offset) 1069 { 1070 struct intel_uncore *uncore = stream->uncore; 1071 u32 oastatus1; 1072 int ret; 1073 1074 if (drm_WARN_ON(&uncore->i915->drm, !stream->oa_buffer.vaddr)) 1075 return -EIO; 1076 1077 oastatus1 = intel_uncore_read(uncore, GEN7_OASTATUS1); 1078 1079 /* XXX: On Haswell we don't have a safe way to clear oastatus1 1080 * bits while the OA unit is enabled (while the tail pointer 1081 * may be updated asynchronously) so we ignore status bits 1082 * that have already been reported to userspace. 1083 */ 1084 oastatus1 &= ~stream->perf->gen7_latched_oastatus1; 1085 1086 /* We treat OABUFFER_OVERFLOW as a significant error: 1087 * 1088 * - The status can be interpreted to mean that the buffer is 1089 * currently full (with a higher precedence than OA_TAKEN() 1090 * which will start to report a near-empty buffer after an 1091 * overflow) but it's awkward that we can't clear the status 1092 * on Haswell, so without a reset we won't be able to catch 1093 * the state again. 1094 * 1095 * - Since it also implies the HW has started overwriting old 1096 * reports it may also affect our sanity checks for invalid 1097 * reports when copying to userspace that assume new reports 1098 * are being written to cleared memory. 1099 * 1100 * - In the future we may want to introduce a flight recorder 1101 * mode where the driver will automatically maintain a safe 1102 * guard band between head/tail, avoiding this overflow 1103 * condition, but we avoid the added driver complexity for 1104 * now. 1105 */ 1106 if (unlikely(oastatus1 & GEN7_OASTATUS1_OABUFFER_OVERFLOW)) { 1107 ret = append_oa_status(stream, buf, count, offset, 1108 DRM_I915_PERF_RECORD_OA_BUFFER_LOST); 1109 if (ret) 1110 return ret; 1111 1112 drm_dbg(&stream->perf->i915->drm, 1113 "OA buffer overflow (exponent = %d): force restart\n", 1114 stream->period_exponent); 1115 1116 stream->perf->ops.oa_disable(stream); 1117 stream->perf->ops.oa_enable(stream); 1118 1119 oastatus1 = intel_uncore_read(uncore, GEN7_OASTATUS1); 1120 } 1121 1122 if (unlikely(oastatus1 & GEN7_OASTATUS1_REPORT_LOST)) { 1123 ret = append_oa_status(stream, buf, count, offset, 1124 DRM_I915_PERF_RECORD_OA_REPORT_LOST); 1125 if (ret) 1126 return ret; 1127 stream->perf->gen7_latched_oastatus1 |= 1128 GEN7_OASTATUS1_REPORT_LOST; 1129 } 1130 1131 return gen7_append_oa_reports(stream, buf, count, offset); 1132 } 1133 1134 /** 1135 * i915_oa_wait_unlocked - handles blocking IO until OA data available 1136 * @stream: An i915-perf stream opened for OA metrics 1137 * 1138 * Called when userspace tries to read() from a blocking stream FD opened 1139 * for OA metrics. It waits until the hrtimer callback finds a non-empty 1140 * OA buffer and wakes us. 1141 * 1142 * Note: it's acceptable to have this return with some false positives 1143 * since any subsequent read handling will return -EAGAIN if there isn't 1144 * really data ready for userspace yet. 1145 * 1146 * Returns: zero on success or a negative error code 1147 */ 1148 static int i915_oa_wait_unlocked(struct i915_perf_stream *stream) 1149 { 1150 /* We would wait indefinitely if periodic sampling is not enabled */ 1151 if (!stream->periodic) 1152 return -EIO; 1153 1154 return wait_event_interruptible(stream->poll_wq, 1155 oa_buffer_check_unlocked(stream)); 1156 } 1157 1158 /** 1159 * i915_oa_poll_wait - call poll_wait() for an OA stream poll() 1160 * @stream: An i915-perf stream opened for OA metrics 1161 * @file: An i915 perf stream file 1162 * @wait: poll() state table 1163 * 1164 * For handling userspace polling on an i915 perf stream opened for OA metrics, 1165 * this starts a poll_wait with the wait queue that our hrtimer callback wakes 1166 * when it sees data ready to read in the circular OA buffer. 1167 */ 1168 static void i915_oa_poll_wait(struct i915_perf_stream *stream, 1169 struct file *file, 1170 poll_table *wait) 1171 { 1172 poll_wait(file, &stream->poll_wq, wait); 1173 } 1174 1175 /** 1176 * i915_oa_read - just calls through to &i915_oa_ops->read 1177 * @stream: An i915-perf stream opened for OA metrics 1178 * @buf: destination buffer given by userspace 1179 * @count: the number of bytes userspace wants to read 1180 * @offset: (inout): the current position for writing into @buf 1181 * 1182 * Updates @offset according to the number of bytes successfully copied into 1183 * the userspace buffer. 1184 * 1185 * Returns: zero on success or a negative error code 1186 */ 1187 static int i915_oa_read(struct i915_perf_stream *stream, 1188 char __user *buf, 1189 size_t count, 1190 size_t *offset) 1191 { 1192 return stream->perf->ops.read(stream, buf, count, offset); 1193 } 1194 1195 static struct intel_context *oa_pin_context(struct i915_perf_stream *stream) 1196 { 1197 struct i915_gem_engines_iter it; 1198 struct i915_gem_context *ctx = stream->ctx; 1199 struct intel_context *ce; 1200 struct i915_gem_ww_ctx ww; 1201 int err = -ENODEV; 1202 1203 for_each_gem_engine(ce, i915_gem_context_lock_engines(ctx), it) { 1204 if (ce->engine != stream->engine) /* first match! */ 1205 continue; 1206 1207 err = 0; 1208 break; 1209 } 1210 i915_gem_context_unlock_engines(ctx); 1211 1212 if (err) 1213 return ERR_PTR(err); 1214 1215 i915_gem_ww_ctx_init(&ww, true); 1216 retry: 1217 /* 1218 * As the ID is the gtt offset of the context's vma we 1219 * pin the vma to ensure the ID remains fixed. 1220 */ 1221 err = intel_context_pin_ww(ce, &ww); 1222 if (err == -EDEADLK) { 1223 err = i915_gem_ww_ctx_backoff(&ww); 1224 if (!err) 1225 goto retry; 1226 } 1227 i915_gem_ww_ctx_fini(&ww); 1228 1229 if (err) 1230 return ERR_PTR(err); 1231 1232 stream->pinned_ctx = ce; 1233 return stream->pinned_ctx; 1234 } 1235 1236 /** 1237 * oa_get_render_ctx_id - determine and hold ctx hw id 1238 * @stream: An i915-perf stream opened for OA metrics 1239 * 1240 * Determine the render context hw id, and ensure it remains fixed for the 1241 * lifetime of the stream. This ensures that we don't have to worry about 1242 * updating the context ID in OACONTROL on the fly. 1243 * 1244 * Returns: zero on success or a negative error code 1245 */ 1246 static int oa_get_render_ctx_id(struct i915_perf_stream *stream) 1247 { 1248 struct intel_context *ce; 1249 1250 ce = oa_pin_context(stream); 1251 if (IS_ERR(ce)) 1252 return PTR_ERR(ce); 1253 1254 switch (GRAPHICS_VER(ce->engine->i915)) { 1255 case 7: { 1256 /* 1257 * On Haswell we don't do any post processing of the reports 1258 * and don't need to use the mask. 1259 */ 1260 stream->specific_ctx_id = i915_ggtt_offset(ce->state); 1261 stream->specific_ctx_id_mask = 0; 1262 break; 1263 } 1264 1265 case 8: 1266 case 9: 1267 if (intel_engine_uses_guc(ce->engine)) { 1268 /* 1269 * When using GuC, the context descriptor we write in 1270 * i915 is read by GuC and rewritten before it's 1271 * actually written into the hardware. The LRCA is 1272 * what is put into the context id field of the 1273 * context descriptor by GuC. Because it's aligned to 1274 * a page, the lower 12bits are always at 0 and 1275 * dropped by GuC. They won't be part of the context 1276 * ID in the OA reports, so squash those lower bits. 1277 */ 1278 stream->specific_ctx_id = ce->lrc.lrca >> 12; 1279 1280 /* 1281 * GuC uses the top bit to signal proxy submission, so 1282 * ignore that bit. 1283 */ 1284 stream->specific_ctx_id_mask = 1285 (1U << (GEN8_CTX_ID_WIDTH - 1)) - 1; 1286 } else { 1287 stream->specific_ctx_id_mask = 1288 (1U << GEN8_CTX_ID_WIDTH) - 1; 1289 stream->specific_ctx_id = stream->specific_ctx_id_mask; 1290 } 1291 break; 1292 1293 case 11: 1294 case 12: 1295 if (GRAPHICS_VER_FULL(ce->engine->i915) >= IP_VER(12, 50)) { 1296 stream->specific_ctx_id_mask = 1297 ((1U << XEHP_SW_CTX_ID_WIDTH) - 1) << 1298 (XEHP_SW_CTX_ID_SHIFT - 32); 1299 stream->specific_ctx_id = 1300 (XEHP_MAX_CONTEXT_HW_ID - 1) << 1301 (XEHP_SW_CTX_ID_SHIFT - 32); 1302 } else { 1303 stream->specific_ctx_id_mask = 1304 ((1U << GEN11_SW_CTX_ID_WIDTH) - 1) << (GEN11_SW_CTX_ID_SHIFT - 32); 1305 /* 1306 * Pick an unused context id 1307 * 0 - BITS_PER_LONG are used by other contexts 1308 * GEN12_MAX_CONTEXT_HW_ID (0x7ff) is used by idle context 1309 */ 1310 stream->specific_ctx_id = 1311 (GEN12_MAX_CONTEXT_HW_ID - 1) << (GEN11_SW_CTX_ID_SHIFT - 32); 1312 } 1313 break; 1314 1315 default: 1316 MISSING_CASE(GRAPHICS_VER(ce->engine->i915)); 1317 } 1318 1319 ce->tag = stream->specific_ctx_id; 1320 1321 drm_dbg(&stream->perf->i915->drm, 1322 "filtering on ctx_id=0x%x ctx_id_mask=0x%x\n", 1323 stream->specific_ctx_id, 1324 stream->specific_ctx_id_mask); 1325 1326 return 0; 1327 } 1328 1329 /** 1330 * oa_put_render_ctx_id - counterpart to oa_get_render_ctx_id releases hold 1331 * @stream: An i915-perf stream opened for OA metrics 1332 * 1333 * In case anything needed doing to ensure the context HW ID would remain valid 1334 * for the lifetime of the stream, then that can be undone here. 1335 */ 1336 static void oa_put_render_ctx_id(struct i915_perf_stream *stream) 1337 { 1338 struct intel_context *ce; 1339 1340 ce = fetch_and_zero(&stream->pinned_ctx); 1341 if (ce) { 1342 ce->tag = 0; /* recomputed on next submission after parking */ 1343 intel_context_unpin(ce); 1344 } 1345 1346 stream->specific_ctx_id = INVALID_CTX_ID; 1347 stream->specific_ctx_id_mask = 0; 1348 } 1349 1350 static void 1351 free_oa_buffer(struct i915_perf_stream *stream) 1352 { 1353 i915_vma_unpin_and_release(&stream->oa_buffer.vma, 1354 I915_VMA_RELEASE_MAP); 1355 1356 stream->oa_buffer.vaddr = NULL; 1357 } 1358 1359 static void 1360 free_oa_configs(struct i915_perf_stream *stream) 1361 { 1362 struct i915_oa_config_bo *oa_bo, *tmp; 1363 1364 i915_oa_config_put(stream->oa_config); 1365 llist_for_each_entry_safe(oa_bo, tmp, stream->oa_config_bos.first, node) 1366 free_oa_config_bo(oa_bo); 1367 } 1368 1369 static void 1370 free_noa_wait(struct i915_perf_stream *stream) 1371 { 1372 i915_vma_unpin_and_release(&stream->noa_wait, 0); 1373 } 1374 1375 static void i915_oa_stream_destroy(struct i915_perf_stream *stream) 1376 { 1377 struct i915_perf *perf = stream->perf; 1378 1379 if (WARN_ON(stream != perf->exclusive_stream)) 1380 return; 1381 1382 /* 1383 * Unset exclusive_stream first, it will be checked while disabling 1384 * the metric set on gen8+. 1385 * 1386 * See i915_oa_init_reg_state() and lrc_configure_all_contexts() 1387 */ 1388 WRITE_ONCE(perf->exclusive_stream, NULL); 1389 perf->ops.disable_metric_set(stream); 1390 1391 free_oa_buffer(stream); 1392 1393 intel_uncore_forcewake_put(stream->uncore, FORCEWAKE_ALL); 1394 intel_engine_pm_put(stream->engine); 1395 1396 if (stream->ctx) 1397 oa_put_render_ctx_id(stream); 1398 1399 free_oa_configs(stream); 1400 free_noa_wait(stream); 1401 1402 if (perf->spurious_report_rs.missed) { 1403 DRM_NOTE("%d spurious OA report notices suppressed due to ratelimiting\n", 1404 perf->spurious_report_rs.missed); 1405 } 1406 } 1407 1408 static void gen7_init_oa_buffer(struct i915_perf_stream *stream) 1409 { 1410 struct intel_uncore *uncore = stream->uncore; 1411 u32 gtt_offset = i915_ggtt_offset(stream->oa_buffer.vma); 1412 unsigned long flags; 1413 1414 spin_lock_irqsave(&stream->oa_buffer.ptr_lock, flags); 1415 1416 /* Pre-DevBDW: OABUFFER must be set with counters off, 1417 * before OASTATUS1, but after OASTATUS2 1418 */ 1419 intel_uncore_write(uncore, GEN7_OASTATUS2, /* head */ 1420 gtt_offset | GEN7_OASTATUS2_MEM_SELECT_GGTT); 1421 stream->oa_buffer.head = gtt_offset; 1422 1423 intel_uncore_write(uncore, GEN7_OABUFFER, gtt_offset); 1424 1425 intel_uncore_write(uncore, GEN7_OASTATUS1, /* tail */ 1426 gtt_offset | OABUFFER_SIZE_16M); 1427 1428 /* Mark that we need updated tail pointers to read from... */ 1429 stream->oa_buffer.aging_tail = INVALID_TAIL_PTR; 1430 stream->oa_buffer.tail = gtt_offset; 1431 1432 spin_unlock_irqrestore(&stream->oa_buffer.ptr_lock, flags); 1433 1434 /* On Haswell we have to track which OASTATUS1 flags we've 1435 * already seen since they can't be cleared while periodic 1436 * sampling is enabled. 1437 */ 1438 stream->perf->gen7_latched_oastatus1 = 0; 1439 1440 /* NB: although the OA buffer will initially be allocated 1441 * zeroed via shmfs (and so this memset is redundant when 1442 * first allocating), we may re-init the OA buffer, either 1443 * when re-enabling a stream or in error/reset paths. 1444 * 1445 * The reason we clear the buffer for each re-init is for the 1446 * sanity check in gen7_append_oa_reports() that looks at the 1447 * report-id field to make sure it's non-zero which relies on 1448 * the assumption that new reports are being written to zeroed 1449 * memory... 1450 */ 1451 memset(stream->oa_buffer.vaddr, 0, OA_BUFFER_SIZE); 1452 } 1453 1454 static void gen8_init_oa_buffer(struct i915_perf_stream *stream) 1455 { 1456 struct intel_uncore *uncore = stream->uncore; 1457 u32 gtt_offset = i915_ggtt_offset(stream->oa_buffer.vma); 1458 unsigned long flags; 1459 1460 spin_lock_irqsave(&stream->oa_buffer.ptr_lock, flags); 1461 1462 intel_uncore_write(uncore, GEN8_OASTATUS, 0); 1463 intel_uncore_write(uncore, GEN8_OAHEADPTR, gtt_offset); 1464 stream->oa_buffer.head = gtt_offset; 1465 1466 intel_uncore_write(uncore, GEN8_OABUFFER_UDW, 0); 1467 1468 /* 1469 * PRM says: 1470 * 1471 * "This MMIO must be set before the OATAILPTR 1472 * register and after the OAHEADPTR register. This is 1473 * to enable proper functionality of the overflow 1474 * bit." 1475 */ 1476 intel_uncore_write(uncore, GEN8_OABUFFER, gtt_offset | 1477 OABUFFER_SIZE_16M | GEN8_OABUFFER_MEM_SELECT_GGTT); 1478 intel_uncore_write(uncore, GEN8_OATAILPTR, gtt_offset & GEN8_OATAILPTR_MASK); 1479 1480 /* Mark that we need updated tail pointers to read from... */ 1481 stream->oa_buffer.aging_tail = INVALID_TAIL_PTR; 1482 stream->oa_buffer.tail = gtt_offset; 1483 1484 /* 1485 * Reset state used to recognise context switches, affecting which 1486 * reports we will forward to userspace while filtering for a single 1487 * context. 1488 */ 1489 stream->oa_buffer.last_ctx_id = INVALID_CTX_ID; 1490 1491 spin_unlock_irqrestore(&stream->oa_buffer.ptr_lock, flags); 1492 1493 /* 1494 * NB: although the OA buffer will initially be allocated 1495 * zeroed via shmfs (and so this memset is redundant when 1496 * first allocating), we may re-init the OA buffer, either 1497 * when re-enabling a stream or in error/reset paths. 1498 * 1499 * The reason we clear the buffer for each re-init is for the 1500 * sanity check in gen8_append_oa_reports() that looks at the 1501 * reason field to make sure it's non-zero which relies on 1502 * the assumption that new reports are being written to zeroed 1503 * memory... 1504 */ 1505 memset(stream->oa_buffer.vaddr, 0, OA_BUFFER_SIZE); 1506 } 1507 1508 static void gen12_init_oa_buffer(struct i915_perf_stream *stream) 1509 { 1510 struct intel_uncore *uncore = stream->uncore; 1511 u32 gtt_offset = i915_ggtt_offset(stream->oa_buffer.vma); 1512 unsigned long flags; 1513 1514 spin_lock_irqsave(&stream->oa_buffer.ptr_lock, flags); 1515 1516 intel_uncore_write(uncore, GEN12_OAG_OASTATUS, 0); 1517 intel_uncore_write(uncore, GEN12_OAG_OAHEADPTR, 1518 gtt_offset & GEN12_OAG_OAHEADPTR_MASK); 1519 stream->oa_buffer.head = gtt_offset; 1520 1521 /* 1522 * PRM says: 1523 * 1524 * "This MMIO must be set before the OATAILPTR 1525 * register and after the OAHEADPTR register. This is 1526 * to enable proper functionality of the overflow 1527 * bit." 1528 */ 1529 intel_uncore_write(uncore, GEN12_OAG_OABUFFER, gtt_offset | 1530 OABUFFER_SIZE_16M | GEN8_OABUFFER_MEM_SELECT_GGTT); 1531 intel_uncore_write(uncore, GEN12_OAG_OATAILPTR, 1532 gtt_offset & GEN12_OAG_OATAILPTR_MASK); 1533 1534 /* Mark that we need updated tail pointers to read from... */ 1535 stream->oa_buffer.aging_tail = INVALID_TAIL_PTR; 1536 stream->oa_buffer.tail = gtt_offset; 1537 1538 /* 1539 * Reset state used to recognise context switches, affecting which 1540 * reports we will forward to userspace while filtering for a single 1541 * context. 1542 */ 1543 stream->oa_buffer.last_ctx_id = INVALID_CTX_ID; 1544 1545 spin_unlock_irqrestore(&stream->oa_buffer.ptr_lock, flags); 1546 1547 /* 1548 * NB: although the OA buffer will initially be allocated 1549 * zeroed via shmfs (and so this memset is redundant when 1550 * first allocating), we may re-init the OA buffer, either 1551 * when re-enabling a stream or in error/reset paths. 1552 * 1553 * The reason we clear the buffer for each re-init is for the 1554 * sanity check in gen8_append_oa_reports() that looks at the 1555 * reason field to make sure it's non-zero which relies on 1556 * the assumption that new reports are being written to zeroed 1557 * memory... 1558 */ 1559 memset(stream->oa_buffer.vaddr, 0, 1560 stream->oa_buffer.vma->size); 1561 } 1562 1563 static int alloc_oa_buffer(struct i915_perf_stream *stream) 1564 { 1565 struct drm_i915_private *i915 = stream->perf->i915; 1566 struct drm_i915_gem_object *bo; 1567 struct i915_vma *vma; 1568 int ret; 1569 1570 if (drm_WARN_ON(&i915->drm, stream->oa_buffer.vma)) 1571 return -ENODEV; 1572 1573 BUILD_BUG_ON_NOT_POWER_OF_2(OA_BUFFER_SIZE); 1574 BUILD_BUG_ON(OA_BUFFER_SIZE < SZ_128K || OA_BUFFER_SIZE > SZ_16M); 1575 1576 bo = i915_gem_object_create_shmem(stream->perf->i915, OA_BUFFER_SIZE); 1577 if (IS_ERR(bo)) { 1578 drm_err(&i915->drm, "Failed to allocate OA buffer\n"); 1579 return PTR_ERR(bo); 1580 } 1581 1582 i915_gem_object_set_cache_coherency(bo, I915_CACHE_LLC); 1583 1584 /* PreHSW required 512K alignment, HSW requires 16M */ 1585 vma = i915_gem_object_ggtt_pin(bo, NULL, 0, SZ_16M, 0); 1586 if (IS_ERR(vma)) { 1587 ret = PTR_ERR(vma); 1588 goto err_unref; 1589 } 1590 stream->oa_buffer.vma = vma; 1591 1592 stream->oa_buffer.vaddr = 1593 i915_gem_object_pin_map_unlocked(bo, I915_MAP_WB); 1594 if (IS_ERR(stream->oa_buffer.vaddr)) { 1595 ret = PTR_ERR(stream->oa_buffer.vaddr); 1596 goto err_unpin; 1597 } 1598 1599 return 0; 1600 1601 err_unpin: 1602 __i915_vma_unpin(vma); 1603 1604 err_unref: 1605 i915_gem_object_put(bo); 1606 1607 stream->oa_buffer.vaddr = NULL; 1608 stream->oa_buffer.vma = NULL; 1609 1610 return ret; 1611 } 1612 1613 static u32 *save_restore_register(struct i915_perf_stream *stream, u32 *cs, 1614 bool save, i915_reg_t reg, u32 offset, 1615 u32 dword_count) 1616 { 1617 u32 cmd; 1618 u32 d; 1619 1620 cmd = save ? MI_STORE_REGISTER_MEM : MI_LOAD_REGISTER_MEM; 1621 cmd |= MI_SRM_LRM_GLOBAL_GTT; 1622 if (GRAPHICS_VER(stream->perf->i915) >= 8) 1623 cmd++; 1624 1625 for (d = 0; d < dword_count; d++) { 1626 *cs++ = cmd; 1627 *cs++ = i915_mmio_reg_offset(reg) + 4 * d; 1628 *cs++ = intel_gt_scratch_offset(stream->engine->gt, 1629 offset) + 4 * d; 1630 *cs++ = 0; 1631 } 1632 1633 return cs; 1634 } 1635 1636 static int alloc_noa_wait(struct i915_perf_stream *stream) 1637 { 1638 struct drm_i915_private *i915 = stream->perf->i915; 1639 struct drm_i915_gem_object *bo; 1640 struct i915_vma *vma; 1641 const u64 delay_ticks = 0xffffffffffffffff - 1642 intel_gt_ns_to_clock_interval(to_gt(stream->perf->i915), 1643 atomic64_read(&stream->perf->noa_programming_delay)); 1644 const u32 base = stream->engine->mmio_base; 1645 #define CS_GPR(x) GEN8_RING_CS_GPR(base, x) 1646 u32 *batch, *ts0, *cs, *jump; 1647 struct i915_gem_ww_ctx ww; 1648 int ret, i; 1649 enum { 1650 START_TS, 1651 NOW_TS, 1652 DELTA_TS, 1653 JUMP_PREDICATE, 1654 DELTA_TARGET, 1655 N_CS_GPR 1656 }; 1657 1658 bo = i915_gem_object_create_internal(i915, 4096); 1659 if (IS_ERR(bo)) { 1660 drm_err(&i915->drm, 1661 "Failed to allocate NOA wait batchbuffer\n"); 1662 return PTR_ERR(bo); 1663 } 1664 1665 i915_gem_ww_ctx_init(&ww, true); 1666 retry: 1667 ret = i915_gem_object_lock(bo, &ww); 1668 if (ret) 1669 goto out_ww; 1670 1671 /* 1672 * We pin in GGTT because we jump into this buffer now because 1673 * multiple OA config BOs will have a jump to this address and it 1674 * needs to be fixed during the lifetime of the i915/perf stream. 1675 */ 1676 vma = i915_gem_object_ggtt_pin_ww(bo, &ww, NULL, 0, 0, PIN_HIGH); 1677 if (IS_ERR(vma)) { 1678 ret = PTR_ERR(vma); 1679 goto out_ww; 1680 } 1681 1682 batch = cs = i915_gem_object_pin_map(bo, I915_MAP_WB); 1683 if (IS_ERR(batch)) { 1684 ret = PTR_ERR(batch); 1685 goto err_unpin; 1686 } 1687 1688 /* Save registers. */ 1689 for (i = 0; i < N_CS_GPR; i++) 1690 cs = save_restore_register( 1691 stream, cs, true /* save */, CS_GPR(i), 1692 INTEL_GT_SCRATCH_FIELD_PERF_CS_GPR + 8 * i, 2); 1693 cs = save_restore_register( 1694 stream, cs, true /* save */, MI_PREDICATE_RESULT_1(RENDER_RING_BASE), 1695 INTEL_GT_SCRATCH_FIELD_PERF_PREDICATE_RESULT_1, 1); 1696 1697 /* First timestamp snapshot location. */ 1698 ts0 = cs; 1699 1700 /* 1701 * Initial snapshot of the timestamp register to implement the wait. 1702 * We work with 32b values, so clear out the top 32b bits of the 1703 * register because the ALU works 64bits. 1704 */ 1705 *cs++ = MI_LOAD_REGISTER_IMM(1); 1706 *cs++ = i915_mmio_reg_offset(CS_GPR(START_TS)) + 4; 1707 *cs++ = 0; 1708 *cs++ = MI_LOAD_REGISTER_REG | (3 - 2); 1709 *cs++ = i915_mmio_reg_offset(RING_TIMESTAMP(base)); 1710 *cs++ = i915_mmio_reg_offset(CS_GPR(START_TS)); 1711 1712 /* 1713 * This is the location we're going to jump back into until the 1714 * required amount of time has passed. 1715 */ 1716 jump = cs; 1717 1718 /* 1719 * Take another snapshot of the timestamp register. Take care to clear 1720 * up the top 32bits of CS_GPR(1) as we're using it for other 1721 * operations below. 1722 */ 1723 *cs++ = MI_LOAD_REGISTER_IMM(1); 1724 *cs++ = i915_mmio_reg_offset(CS_GPR(NOW_TS)) + 4; 1725 *cs++ = 0; 1726 *cs++ = MI_LOAD_REGISTER_REG | (3 - 2); 1727 *cs++ = i915_mmio_reg_offset(RING_TIMESTAMP(base)); 1728 *cs++ = i915_mmio_reg_offset(CS_GPR(NOW_TS)); 1729 1730 /* 1731 * Do a diff between the 2 timestamps and store the result back into 1732 * CS_GPR(1). 1733 */ 1734 *cs++ = MI_MATH(5); 1735 *cs++ = MI_MATH_LOAD(MI_MATH_REG_SRCA, MI_MATH_REG(NOW_TS)); 1736 *cs++ = MI_MATH_LOAD(MI_MATH_REG_SRCB, MI_MATH_REG(START_TS)); 1737 *cs++ = MI_MATH_SUB; 1738 *cs++ = MI_MATH_STORE(MI_MATH_REG(DELTA_TS), MI_MATH_REG_ACCU); 1739 *cs++ = MI_MATH_STORE(MI_MATH_REG(JUMP_PREDICATE), MI_MATH_REG_CF); 1740 1741 /* 1742 * Transfer the carry flag (set to 1 if ts1 < ts0, meaning the 1743 * timestamp have rolled over the 32bits) into the predicate register 1744 * to be used for the predicated jump. 1745 */ 1746 *cs++ = MI_LOAD_REGISTER_REG | (3 - 2); 1747 *cs++ = i915_mmio_reg_offset(CS_GPR(JUMP_PREDICATE)); 1748 *cs++ = i915_mmio_reg_offset(MI_PREDICATE_RESULT_1(RENDER_RING_BASE)); 1749 1750 /* Restart from the beginning if we had timestamps roll over. */ 1751 *cs++ = (GRAPHICS_VER(i915) < 8 ? 1752 MI_BATCH_BUFFER_START : 1753 MI_BATCH_BUFFER_START_GEN8) | 1754 MI_BATCH_PREDICATE; 1755 *cs++ = i915_ggtt_offset(vma) + (ts0 - batch) * 4; 1756 *cs++ = 0; 1757 1758 /* 1759 * Now add the diff between to previous timestamps and add it to : 1760 * (((1 * << 64) - 1) - delay_ns) 1761 * 1762 * When the Carry Flag contains 1 this means the elapsed time is 1763 * longer than the expected delay, and we can exit the wait loop. 1764 */ 1765 *cs++ = MI_LOAD_REGISTER_IMM(2); 1766 *cs++ = i915_mmio_reg_offset(CS_GPR(DELTA_TARGET)); 1767 *cs++ = lower_32_bits(delay_ticks); 1768 *cs++ = i915_mmio_reg_offset(CS_GPR(DELTA_TARGET)) + 4; 1769 *cs++ = upper_32_bits(delay_ticks); 1770 1771 *cs++ = MI_MATH(4); 1772 *cs++ = MI_MATH_LOAD(MI_MATH_REG_SRCA, MI_MATH_REG(DELTA_TS)); 1773 *cs++ = MI_MATH_LOAD(MI_MATH_REG_SRCB, MI_MATH_REG(DELTA_TARGET)); 1774 *cs++ = MI_MATH_ADD; 1775 *cs++ = MI_MATH_STOREINV(MI_MATH_REG(JUMP_PREDICATE), MI_MATH_REG_CF); 1776 1777 *cs++ = MI_ARB_CHECK; 1778 1779 /* 1780 * Transfer the result into the predicate register to be used for the 1781 * predicated jump. 1782 */ 1783 *cs++ = MI_LOAD_REGISTER_REG | (3 - 2); 1784 *cs++ = i915_mmio_reg_offset(CS_GPR(JUMP_PREDICATE)); 1785 *cs++ = i915_mmio_reg_offset(MI_PREDICATE_RESULT_1(RENDER_RING_BASE)); 1786 1787 /* Predicate the jump. */ 1788 *cs++ = (GRAPHICS_VER(i915) < 8 ? 1789 MI_BATCH_BUFFER_START : 1790 MI_BATCH_BUFFER_START_GEN8) | 1791 MI_BATCH_PREDICATE; 1792 *cs++ = i915_ggtt_offset(vma) + (jump - batch) * 4; 1793 *cs++ = 0; 1794 1795 /* Restore registers. */ 1796 for (i = 0; i < N_CS_GPR; i++) 1797 cs = save_restore_register( 1798 stream, cs, false /* restore */, CS_GPR(i), 1799 INTEL_GT_SCRATCH_FIELD_PERF_CS_GPR + 8 * i, 2); 1800 cs = save_restore_register( 1801 stream, cs, false /* restore */, MI_PREDICATE_RESULT_1(RENDER_RING_BASE), 1802 INTEL_GT_SCRATCH_FIELD_PERF_PREDICATE_RESULT_1, 1); 1803 1804 /* And return to the ring. */ 1805 *cs++ = MI_BATCH_BUFFER_END; 1806 1807 GEM_BUG_ON(cs - batch > PAGE_SIZE / sizeof(*batch)); 1808 1809 i915_gem_object_flush_map(bo); 1810 __i915_gem_object_release_map(bo); 1811 1812 stream->noa_wait = vma; 1813 goto out_ww; 1814 1815 err_unpin: 1816 i915_vma_unpin_and_release(&vma, 0); 1817 out_ww: 1818 if (ret == -EDEADLK) { 1819 ret = i915_gem_ww_ctx_backoff(&ww); 1820 if (!ret) 1821 goto retry; 1822 } 1823 i915_gem_ww_ctx_fini(&ww); 1824 if (ret) 1825 i915_gem_object_put(bo); 1826 return ret; 1827 } 1828 1829 static u32 *write_cs_mi_lri(u32 *cs, 1830 const struct i915_oa_reg *reg_data, 1831 u32 n_regs) 1832 { 1833 u32 i; 1834 1835 for (i = 0; i < n_regs; i++) { 1836 if ((i % MI_LOAD_REGISTER_IMM_MAX_REGS) == 0) { 1837 u32 n_lri = min_t(u32, 1838 n_regs - i, 1839 MI_LOAD_REGISTER_IMM_MAX_REGS); 1840 1841 *cs++ = MI_LOAD_REGISTER_IMM(n_lri); 1842 } 1843 *cs++ = i915_mmio_reg_offset(reg_data[i].addr); 1844 *cs++ = reg_data[i].value; 1845 } 1846 1847 return cs; 1848 } 1849 1850 static int num_lri_dwords(int num_regs) 1851 { 1852 int count = 0; 1853 1854 if (num_regs > 0) { 1855 count += DIV_ROUND_UP(num_regs, MI_LOAD_REGISTER_IMM_MAX_REGS); 1856 count += num_regs * 2; 1857 } 1858 1859 return count; 1860 } 1861 1862 static struct i915_oa_config_bo * 1863 alloc_oa_config_buffer(struct i915_perf_stream *stream, 1864 struct i915_oa_config *oa_config) 1865 { 1866 struct drm_i915_gem_object *obj; 1867 struct i915_oa_config_bo *oa_bo; 1868 struct i915_gem_ww_ctx ww; 1869 size_t config_length = 0; 1870 u32 *cs; 1871 int err; 1872 1873 oa_bo = kzalloc(sizeof(*oa_bo), GFP_KERNEL); 1874 if (!oa_bo) 1875 return ERR_PTR(-ENOMEM); 1876 1877 config_length += num_lri_dwords(oa_config->mux_regs_len); 1878 config_length += num_lri_dwords(oa_config->b_counter_regs_len); 1879 config_length += num_lri_dwords(oa_config->flex_regs_len); 1880 config_length += 3; /* MI_BATCH_BUFFER_START */ 1881 config_length = ALIGN(sizeof(u32) * config_length, I915_GTT_PAGE_SIZE); 1882 1883 obj = i915_gem_object_create_shmem(stream->perf->i915, config_length); 1884 if (IS_ERR(obj)) { 1885 err = PTR_ERR(obj); 1886 goto err_free; 1887 } 1888 1889 i915_gem_ww_ctx_init(&ww, true); 1890 retry: 1891 err = i915_gem_object_lock(obj, &ww); 1892 if (err) 1893 goto out_ww; 1894 1895 cs = i915_gem_object_pin_map(obj, I915_MAP_WB); 1896 if (IS_ERR(cs)) { 1897 err = PTR_ERR(cs); 1898 goto out_ww; 1899 } 1900 1901 cs = write_cs_mi_lri(cs, 1902 oa_config->mux_regs, 1903 oa_config->mux_regs_len); 1904 cs = write_cs_mi_lri(cs, 1905 oa_config->b_counter_regs, 1906 oa_config->b_counter_regs_len); 1907 cs = write_cs_mi_lri(cs, 1908 oa_config->flex_regs, 1909 oa_config->flex_regs_len); 1910 1911 /* Jump into the active wait. */ 1912 *cs++ = (GRAPHICS_VER(stream->perf->i915) < 8 ? 1913 MI_BATCH_BUFFER_START : 1914 MI_BATCH_BUFFER_START_GEN8); 1915 *cs++ = i915_ggtt_offset(stream->noa_wait); 1916 *cs++ = 0; 1917 1918 i915_gem_object_flush_map(obj); 1919 __i915_gem_object_release_map(obj); 1920 1921 oa_bo->vma = i915_vma_instance(obj, 1922 &stream->engine->gt->ggtt->vm, 1923 NULL); 1924 if (IS_ERR(oa_bo->vma)) { 1925 err = PTR_ERR(oa_bo->vma); 1926 goto out_ww; 1927 } 1928 1929 oa_bo->oa_config = i915_oa_config_get(oa_config); 1930 llist_add(&oa_bo->node, &stream->oa_config_bos); 1931 1932 out_ww: 1933 if (err == -EDEADLK) { 1934 err = i915_gem_ww_ctx_backoff(&ww); 1935 if (!err) 1936 goto retry; 1937 } 1938 i915_gem_ww_ctx_fini(&ww); 1939 1940 if (err) 1941 i915_gem_object_put(obj); 1942 err_free: 1943 if (err) { 1944 kfree(oa_bo); 1945 return ERR_PTR(err); 1946 } 1947 return oa_bo; 1948 } 1949 1950 static struct i915_vma * 1951 get_oa_vma(struct i915_perf_stream *stream, struct i915_oa_config *oa_config) 1952 { 1953 struct i915_oa_config_bo *oa_bo; 1954 1955 /* 1956 * Look for the buffer in the already allocated BOs attached 1957 * to the stream. 1958 */ 1959 llist_for_each_entry(oa_bo, stream->oa_config_bos.first, node) { 1960 if (oa_bo->oa_config == oa_config && 1961 memcmp(oa_bo->oa_config->uuid, 1962 oa_config->uuid, 1963 sizeof(oa_config->uuid)) == 0) 1964 goto out; 1965 } 1966 1967 oa_bo = alloc_oa_config_buffer(stream, oa_config); 1968 if (IS_ERR(oa_bo)) 1969 return ERR_CAST(oa_bo); 1970 1971 out: 1972 return i915_vma_get(oa_bo->vma); 1973 } 1974 1975 static int 1976 emit_oa_config(struct i915_perf_stream *stream, 1977 struct i915_oa_config *oa_config, 1978 struct intel_context *ce, 1979 struct i915_active *active) 1980 { 1981 struct i915_request *rq; 1982 struct i915_vma *vma; 1983 struct i915_gem_ww_ctx ww; 1984 int err; 1985 1986 vma = get_oa_vma(stream, oa_config); 1987 if (IS_ERR(vma)) 1988 return PTR_ERR(vma); 1989 1990 i915_gem_ww_ctx_init(&ww, true); 1991 retry: 1992 err = i915_gem_object_lock(vma->obj, &ww); 1993 if (err) 1994 goto err; 1995 1996 err = i915_vma_pin_ww(vma, &ww, 0, 0, PIN_GLOBAL | PIN_HIGH); 1997 if (err) 1998 goto err; 1999 2000 intel_engine_pm_get(ce->engine); 2001 rq = i915_request_create(ce); 2002 intel_engine_pm_put(ce->engine); 2003 if (IS_ERR(rq)) { 2004 err = PTR_ERR(rq); 2005 goto err_vma_unpin; 2006 } 2007 2008 if (!IS_ERR_OR_NULL(active)) { 2009 /* After all individual context modifications */ 2010 err = i915_request_await_active(rq, active, 2011 I915_ACTIVE_AWAIT_ACTIVE); 2012 if (err) 2013 goto err_add_request; 2014 2015 err = i915_active_add_request(active, rq); 2016 if (err) 2017 goto err_add_request; 2018 } 2019 2020 err = i915_request_await_object(rq, vma->obj, 0); 2021 if (!err) 2022 err = i915_vma_move_to_active(vma, rq, 0); 2023 if (err) 2024 goto err_add_request; 2025 2026 err = rq->engine->emit_bb_start(rq, 2027 vma->node.start, 0, 2028 I915_DISPATCH_SECURE); 2029 if (err) 2030 goto err_add_request; 2031 2032 err_add_request: 2033 i915_request_add(rq); 2034 err_vma_unpin: 2035 i915_vma_unpin(vma); 2036 err: 2037 if (err == -EDEADLK) { 2038 err = i915_gem_ww_ctx_backoff(&ww); 2039 if (!err) 2040 goto retry; 2041 } 2042 2043 i915_gem_ww_ctx_fini(&ww); 2044 i915_vma_put(vma); 2045 return err; 2046 } 2047 2048 static struct intel_context *oa_context(struct i915_perf_stream *stream) 2049 { 2050 return stream->pinned_ctx ?: stream->engine->kernel_context; 2051 } 2052 2053 static int 2054 hsw_enable_metric_set(struct i915_perf_stream *stream, 2055 struct i915_active *active) 2056 { 2057 struct intel_uncore *uncore = stream->uncore; 2058 2059 /* 2060 * PRM: 2061 * 2062 * OA unit is using “crclk” for its functionality. When trunk 2063 * level clock gating takes place, OA clock would be gated, 2064 * unable to count the events from non-render clock domain. 2065 * Render clock gating must be disabled when OA is enabled to 2066 * count the events from non-render domain. Unit level clock 2067 * gating for RCS should also be disabled. 2068 */ 2069 intel_uncore_rmw(uncore, GEN7_MISCCPCTL, 2070 GEN7_DOP_CLOCK_GATE_ENABLE, 0); 2071 intel_uncore_rmw(uncore, GEN6_UCGCTL1, 2072 0, GEN6_CSUNIT_CLOCK_GATE_DISABLE); 2073 2074 return emit_oa_config(stream, 2075 stream->oa_config, oa_context(stream), 2076 active); 2077 } 2078 2079 static void hsw_disable_metric_set(struct i915_perf_stream *stream) 2080 { 2081 struct intel_uncore *uncore = stream->uncore; 2082 2083 intel_uncore_rmw(uncore, GEN6_UCGCTL1, 2084 GEN6_CSUNIT_CLOCK_GATE_DISABLE, 0); 2085 intel_uncore_rmw(uncore, GEN7_MISCCPCTL, 2086 0, GEN7_DOP_CLOCK_GATE_ENABLE); 2087 2088 intel_uncore_rmw(uncore, GDT_CHICKEN_BITS, GT_NOA_ENABLE, 0); 2089 } 2090 2091 static u32 oa_config_flex_reg(const struct i915_oa_config *oa_config, 2092 i915_reg_t reg) 2093 { 2094 u32 mmio = i915_mmio_reg_offset(reg); 2095 int i; 2096 2097 /* 2098 * This arbitrary default will select the 'EU FPU0 Pipeline 2099 * Active' event. In the future it's anticipated that there 2100 * will be an explicit 'No Event' we can select, but not yet... 2101 */ 2102 if (!oa_config) 2103 return 0; 2104 2105 for (i = 0; i < oa_config->flex_regs_len; i++) { 2106 if (i915_mmio_reg_offset(oa_config->flex_regs[i].addr) == mmio) 2107 return oa_config->flex_regs[i].value; 2108 } 2109 2110 return 0; 2111 } 2112 /* 2113 * NB: It must always remain pointer safe to run this even if the OA unit 2114 * has been disabled. 2115 * 2116 * It's fine to put out-of-date values into these per-context registers 2117 * in the case that the OA unit has been disabled. 2118 */ 2119 static void 2120 gen8_update_reg_state_unlocked(const struct intel_context *ce, 2121 const struct i915_perf_stream *stream) 2122 { 2123 u32 ctx_oactxctrl = stream->perf->ctx_oactxctrl_offset; 2124 u32 ctx_flexeu0 = stream->perf->ctx_flexeu0_offset; 2125 /* The MMIO offsets for Flex EU registers aren't contiguous */ 2126 static const i915_reg_t flex_regs[] = { 2127 EU_PERF_CNTL0, 2128 EU_PERF_CNTL1, 2129 EU_PERF_CNTL2, 2130 EU_PERF_CNTL3, 2131 EU_PERF_CNTL4, 2132 EU_PERF_CNTL5, 2133 EU_PERF_CNTL6, 2134 }; 2135 u32 *reg_state = ce->lrc_reg_state; 2136 int i; 2137 2138 reg_state[ctx_oactxctrl + 1] = 2139 (stream->period_exponent << GEN8_OA_TIMER_PERIOD_SHIFT) | 2140 (stream->periodic ? GEN8_OA_TIMER_ENABLE : 0) | 2141 GEN8_OA_COUNTER_RESUME; 2142 2143 for (i = 0; i < ARRAY_SIZE(flex_regs); i++) 2144 reg_state[ctx_flexeu0 + i * 2 + 1] = 2145 oa_config_flex_reg(stream->oa_config, flex_regs[i]); 2146 } 2147 2148 struct flex { 2149 i915_reg_t reg; 2150 u32 offset; 2151 u32 value; 2152 }; 2153 2154 static int 2155 gen8_store_flex(struct i915_request *rq, 2156 struct intel_context *ce, 2157 const struct flex *flex, unsigned int count) 2158 { 2159 u32 offset; 2160 u32 *cs; 2161 2162 cs = intel_ring_begin(rq, 4 * count); 2163 if (IS_ERR(cs)) 2164 return PTR_ERR(cs); 2165 2166 offset = i915_ggtt_offset(ce->state) + LRC_STATE_OFFSET; 2167 do { 2168 *cs++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT; 2169 *cs++ = offset + flex->offset * sizeof(u32); 2170 *cs++ = 0; 2171 *cs++ = flex->value; 2172 } while (flex++, --count); 2173 2174 intel_ring_advance(rq, cs); 2175 2176 return 0; 2177 } 2178 2179 static int 2180 gen8_load_flex(struct i915_request *rq, 2181 struct intel_context *ce, 2182 const struct flex *flex, unsigned int count) 2183 { 2184 u32 *cs; 2185 2186 GEM_BUG_ON(!count || count > 63); 2187 2188 cs = intel_ring_begin(rq, 2 * count + 2); 2189 if (IS_ERR(cs)) 2190 return PTR_ERR(cs); 2191 2192 *cs++ = MI_LOAD_REGISTER_IMM(count); 2193 do { 2194 *cs++ = i915_mmio_reg_offset(flex->reg); 2195 *cs++ = flex->value; 2196 } while (flex++, --count); 2197 *cs++ = MI_NOOP; 2198 2199 intel_ring_advance(rq, cs); 2200 2201 return 0; 2202 } 2203 2204 static int gen8_modify_context(struct intel_context *ce, 2205 const struct flex *flex, unsigned int count) 2206 { 2207 struct i915_request *rq; 2208 int err; 2209 2210 rq = intel_engine_create_kernel_request(ce->engine); 2211 if (IS_ERR(rq)) 2212 return PTR_ERR(rq); 2213 2214 /* Serialise with the remote context */ 2215 err = intel_context_prepare_remote_request(ce, rq); 2216 if (err == 0) 2217 err = gen8_store_flex(rq, ce, flex, count); 2218 2219 i915_request_add(rq); 2220 return err; 2221 } 2222 2223 static int 2224 gen8_modify_self(struct intel_context *ce, 2225 const struct flex *flex, unsigned int count, 2226 struct i915_active *active) 2227 { 2228 struct i915_request *rq; 2229 int err; 2230 2231 intel_engine_pm_get(ce->engine); 2232 rq = i915_request_create(ce); 2233 intel_engine_pm_put(ce->engine); 2234 if (IS_ERR(rq)) 2235 return PTR_ERR(rq); 2236 2237 if (!IS_ERR_OR_NULL(active)) { 2238 err = i915_active_add_request(active, rq); 2239 if (err) 2240 goto err_add_request; 2241 } 2242 2243 err = gen8_load_flex(rq, ce, flex, count); 2244 if (err) 2245 goto err_add_request; 2246 2247 err_add_request: 2248 i915_request_add(rq); 2249 return err; 2250 } 2251 2252 static int gen8_configure_context(struct i915_gem_context *ctx, 2253 struct flex *flex, unsigned int count) 2254 { 2255 struct i915_gem_engines_iter it; 2256 struct intel_context *ce; 2257 int err = 0; 2258 2259 for_each_gem_engine(ce, i915_gem_context_lock_engines(ctx), it) { 2260 GEM_BUG_ON(ce == ce->engine->kernel_context); 2261 2262 if (ce->engine->class != RENDER_CLASS) 2263 continue; 2264 2265 /* Otherwise OA settings will be set upon first use */ 2266 if (!intel_context_pin_if_active(ce)) 2267 continue; 2268 2269 flex->value = intel_sseu_make_rpcs(ce->engine->gt, &ce->sseu); 2270 err = gen8_modify_context(ce, flex, count); 2271 2272 intel_context_unpin(ce); 2273 if (err) 2274 break; 2275 } 2276 i915_gem_context_unlock_engines(ctx); 2277 2278 return err; 2279 } 2280 2281 static int gen12_configure_oar_context(struct i915_perf_stream *stream, 2282 struct i915_active *active) 2283 { 2284 int err; 2285 struct intel_context *ce = stream->pinned_ctx; 2286 u32 format = stream->oa_buffer.format; 2287 struct flex regs_context[] = { 2288 { 2289 GEN8_OACTXCONTROL, 2290 stream->perf->ctx_oactxctrl_offset + 1, 2291 active ? GEN8_OA_COUNTER_RESUME : 0, 2292 }, 2293 }; 2294 /* Offsets in regs_lri are not used since this configuration is only 2295 * applied using LRI. Initialize the correct offsets for posterity. 2296 */ 2297 #define GEN12_OAR_OACONTROL_OFFSET 0x5B0 2298 struct flex regs_lri[] = { 2299 { 2300 GEN12_OAR_OACONTROL, 2301 GEN12_OAR_OACONTROL_OFFSET + 1, 2302 (format << GEN12_OAR_OACONTROL_COUNTER_FORMAT_SHIFT) | 2303 (active ? GEN12_OAR_OACONTROL_COUNTER_ENABLE : 0) 2304 }, 2305 { 2306 RING_CONTEXT_CONTROL(ce->engine->mmio_base), 2307 CTX_CONTEXT_CONTROL, 2308 _MASKED_FIELD(GEN12_CTX_CTRL_OAR_CONTEXT_ENABLE, 2309 active ? 2310 GEN12_CTX_CTRL_OAR_CONTEXT_ENABLE : 2311 0) 2312 }, 2313 }; 2314 2315 /* Modify the context image of pinned context with regs_context*/ 2316 err = intel_context_lock_pinned(ce); 2317 if (err) 2318 return err; 2319 2320 err = gen8_modify_context(ce, regs_context, ARRAY_SIZE(regs_context)); 2321 intel_context_unlock_pinned(ce); 2322 if (err) 2323 return err; 2324 2325 /* Apply regs_lri using LRI with pinned context */ 2326 return gen8_modify_self(ce, regs_lri, ARRAY_SIZE(regs_lri), active); 2327 } 2328 2329 /* 2330 * Manages updating the per-context aspects of the OA stream 2331 * configuration across all contexts. 2332 * 2333 * The awkward consideration here is that OACTXCONTROL controls the 2334 * exponent for periodic sampling which is primarily used for system 2335 * wide profiling where we'd like a consistent sampling period even in 2336 * the face of context switches. 2337 * 2338 * Our approach of updating the register state context (as opposed to 2339 * say using a workaround batch buffer) ensures that the hardware 2340 * won't automatically reload an out-of-date timer exponent even 2341 * transiently before a WA BB could be parsed. 2342 * 2343 * This function needs to: 2344 * - Ensure the currently running context's per-context OA state is 2345 * updated 2346 * - Ensure that all existing contexts will have the correct per-context 2347 * OA state if they are scheduled for use. 2348 * - Ensure any new contexts will be initialized with the correct 2349 * per-context OA state. 2350 * 2351 * Note: it's only the RCS/Render context that has any OA state. 2352 * Note: the first flex register passed must always be R_PWR_CLK_STATE 2353 */ 2354 static int 2355 oa_configure_all_contexts(struct i915_perf_stream *stream, 2356 struct flex *regs, 2357 size_t num_regs, 2358 struct i915_active *active) 2359 { 2360 struct drm_i915_private *i915 = stream->perf->i915; 2361 struct intel_engine_cs *engine; 2362 struct i915_gem_context *ctx, *cn; 2363 int err; 2364 2365 lockdep_assert_held(&stream->perf->lock); 2366 2367 /* 2368 * The OA register config is setup through the context image. This image 2369 * might be written to by the GPU on context switch (in particular on 2370 * lite-restore). This means we can't safely update a context's image, 2371 * if this context is scheduled/submitted to run on the GPU. 2372 * 2373 * We could emit the OA register config through the batch buffer but 2374 * this might leave small interval of time where the OA unit is 2375 * configured at an invalid sampling period. 2376 * 2377 * Note that since we emit all requests from a single ring, there 2378 * is still an implicit global barrier here that may cause a high 2379 * priority context to wait for an otherwise independent low priority 2380 * context. Contexts idle at the time of reconfiguration are not 2381 * trapped behind the barrier. 2382 */ 2383 spin_lock(&i915->gem.contexts.lock); 2384 list_for_each_entry_safe(ctx, cn, &i915->gem.contexts.list, link) { 2385 if (!kref_get_unless_zero(&ctx->ref)) 2386 continue; 2387 2388 spin_unlock(&i915->gem.contexts.lock); 2389 2390 err = gen8_configure_context(ctx, regs, num_regs); 2391 if (err) { 2392 i915_gem_context_put(ctx); 2393 return err; 2394 } 2395 2396 spin_lock(&i915->gem.contexts.lock); 2397 list_safe_reset_next(ctx, cn, link); 2398 i915_gem_context_put(ctx); 2399 } 2400 spin_unlock(&i915->gem.contexts.lock); 2401 2402 /* 2403 * After updating all other contexts, we need to modify ourselves. 2404 * If we don't modify the kernel_context, we do not get events while 2405 * idle. 2406 */ 2407 for_each_uabi_engine(engine, i915) { 2408 struct intel_context *ce = engine->kernel_context; 2409 2410 if (engine->class != RENDER_CLASS) 2411 continue; 2412 2413 regs[0].value = intel_sseu_make_rpcs(engine->gt, &ce->sseu); 2414 2415 err = gen8_modify_self(ce, regs, num_regs, active); 2416 if (err) 2417 return err; 2418 } 2419 2420 return 0; 2421 } 2422 2423 static int 2424 gen12_configure_all_contexts(struct i915_perf_stream *stream, 2425 const struct i915_oa_config *oa_config, 2426 struct i915_active *active) 2427 { 2428 struct flex regs[] = { 2429 { 2430 GEN8_R_PWR_CLK_STATE(RENDER_RING_BASE), 2431 CTX_R_PWR_CLK_STATE, 2432 }, 2433 }; 2434 2435 return oa_configure_all_contexts(stream, 2436 regs, ARRAY_SIZE(regs), 2437 active); 2438 } 2439 2440 static int 2441 lrc_configure_all_contexts(struct i915_perf_stream *stream, 2442 const struct i915_oa_config *oa_config, 2443 struct i915_active *active) 2444 { 2445 /* The MMIO offsets for Flex EU registers aren't contiguous */ 2446 const u32 ctx_flexeu0 = stream->perf->ctx_flexeu0_offset; 2447 #define ctx_flexeuN(N) (ctx_flexeu0 + 2 * (N) + 1) 2448 struct flex regs[] = { 2449 { 2450 GEN8_R_PWR_CLK_STATE(RENDER_RING_BASE), 2451 CTX_R_PWR_CLK_STATE, 2452 }, 2453 { 2454 GEN8_OACTXCONTROL, 2455 stream->perf->ctx_oactxctrl_offset + 1, 2456 }, 2457 { EU_PERF_CNTL0, ctx_flexeuN(0) }, 2458 { EU_PERF_CNTL1, ctx_flexeuN(1) }, 2459 { EU_PERF_CNTL2, ctx_flexeuN(2) }, 2460 { EU_PERF_CNTL3, ctx_flexeuN(3) }, 2461 { EU_PERF_CNTL4, ctx_flexeuN(4) }, 2462 { EU_PERF_CNTL5, ctx_flexeuN(5) }, 2463 { EU_PERF_CNTL6, ctx_flexeuN(6) }, 2464 }; 2465 #undef ctx_flexeuN 2466 int i; 2467 2468 regs[1].value = 2469 (stream->period_exponent << GEN8_OA_TIMER_PERIOD_SHIFT) | 2470 (stream->periodic ? GEN8_OA_TIMER_ENABLE : 0) | 2471 GEN8_OA_COUNTER_RESUME; 2472 2473 for (i = 2; i < ARRAY_SIZE(regs); i++) 2474 regs[i].value = oa_config_flex_reg(oa_config, regs[i].reg); 2475 2476 return oa_configure_all_contexts(stream, 2477 regs, ARRAY_SIZE(regs), 2478 active); 2479 } 2480 2481 static int 2482 gen8_enable_metric_set(struct i915_perf_stream *stream, 2483 struct i915_active *active) 2484 { 2485 struct intel_uncore *uncore = stream->uncore; 2486 struct i915_oa_config *oa_config = stream->oa_config; 2487 int ret; 2488 2489 /* 2490 * We disable slice/unslice clock ratio change reports on SKL since 2491 * they are too noisy. The HW generates a lot of redundant reports 2492 * where the ratio hasn't really changed causing a lot of redundant 2493 * work to processes and increasing the chances we'll hit buffer 2494 * overruns. 2495 * 2496 * Although we don't currently use the 'disable overrun' OABUFFER 2497 * feature it's worth noting that clock ratio reports have to be 2498 * disabled before considering to use that feature since the HW doesn't 2499 * correctly block these reports. 2500 * 2501 * Currently none of the high-level metrics we have depend on knowing 2502 * this ratio to normalize. 2503 * 2504 * Note: This register is not power context saved and restored, but 2505 * that's OK considering that we disable RC6 while the OA unit is 2506 * enabled. 2507 * 2508 * The _INCLUDE_CLK_RATIO bit allows the slice/unslice frequency to 2509 * be read back from automatically triggered reports, as part of the 2510 * RPT_ID field. 2511 */ 2512 if (IS_GRAPHICS_VER(stream->perf->i915, 9, 11)) { 2513 intel_uncore_write(uncore, GEN8_OA_DEBUG, 2514 _MASKED_BIT_ENABLE(GEN9_OA_DEBUG_DISABLE_CLK_RATIO_REPORTS | 2515 GEN9_OA_DEBUG_INCLUDE_CLK_RATIO)); 2516 } 2517 2518 /* 2519 * Update all contexts prior writing the mux configurations as we need 2520 * to make sure all slices/subslices are ON before writing to NOA 2521 * registers. 2522 */ 2523 ret = lrc_configure_all_contexts(stream, oa_config, active); 2524 if (ret) 2525 return ret; 2526 2527 return emit_oa_config(stream, 2528 stream->oa_config, oa_context(stream), 2529 active); 2530 } 2531 2532 static u32 oag_report_ctx_switches(const struct i915_perf_stream *stream) 2533 { 2534 return _MASKED_FIELD(GEN12_OAG_OA_DEBUG_DISABLE_CTX_SWITCH_REPORTS, 2535 (stream->sample_flags & SAMPLE_OA_REPORT) ? 2536 0 : GEN12_OAG_OA_DEBUG_DISABLE_CTX_SWITCH_REPORTS); 2537 } 2538 2539 static int 2540 gen12_enable_metric_set(struct i915_perf_stream *stream, 2541 struct i915_active *active) 2542 { 2543 struct intel_uncore *uncore = stream->uncore; 2544 struct i915_oa_config *oa_config = stream->oa_config; 2545 bool periodic = stream->periodic; 2546 u32 period_exponent = stream->period_exponent; 2547 int ret; 2548 2549 intel_uncore_write(uncore, GEN12_OAG_OA_DEBUG, 2550 /* Disable clk ratio reports, like previous Gens. */ 2551 _MASKED_BIT_ENABLE(GEN12_OAG_OA_DEBUG_DISABLE_CLK_RATIO_REPORTS | 2552 GEN12_OAG_OA_DEBUG_INCLUDE_CLK_RATIO) | 2553 /* 2554 * If the user didn't require OA reports, instruct 2555 * the hardware not to emit ctx switch reports. 2556 */ 2557 oag_report_ctx_switches(stream)); 2558 2559 intel_uncore_write(uncore, GEN12_OAG_OAGLBCTXCTRL, periodic ? 2560 (GEN12_OAG_OAGLBCTXCTRL_COUNTER_RESUME | 2561 GEN12_OAG_OAGLBCTXCTRL_TIMER_ENABLE | 2562 (period_exponent << GEN12_OAG_OAGLBCTXCTRL_TIMER_PERIOD_SHIFT)) 2563 : 0); 2564 2565 /* 2566 * Update all contexts prior writing the mux configurations as we need 2567 * to make sure all slices/subslices are ON before writing to NOA 2568 * registers. 2569 */ 2570 ret = gen12_configure_all_contexts(stream, oa_config, active); 2571 if (ret) 2572 return ret; 2573 2574 /* 2575 * For Gen12, performance counters are context 2576 * saved/restored. Only enable it for the context that 2577 * requested this. 2578 */ 2579 if (stream->ctx) { 2580 ret = gen12_configure_oar_context(stream, active); 2581 if (ret) 2582 return ret; 2583 } 2584 2585 return emit_oa_config(stream, 2586 stream->oa_config, oa_context(stream), 2587 active); 2588 } 2589 2590 static void gen8_disable_metric_set(struct i915_perf_stream *stream) 2591 { 2592 struct intel_uncore *uncore = stream->uncore; 2593 2594 /* Reset all contexts' slices/subslices configurations. */ 2595 lrc_configure_all_contexts(stream, NULL, NULL); 2596 2597 intel_uncore_rmw(uncore, GDT_CHICKEN_BITS, GT_NOA_ENABLE, 0); 2598 } 2599 2600 static void gen11_disable_metric_set(struct i915_perf_stream *stream) 2601 { 2602 struct intel_uncore *uncore = stream->uncore; 2603 2604 /* Reset all contexts' slices/subslices configurations. */ 2605 lrc_configure_all_contexts(stream, NULL, NULL); 2606 2607 /* Make sure we disable noa to save power. */ 2608 intel_uncore_rmw(uncore, RPM_CONFIG1, GEN10_GT_NOA_ENABLE, 0); 2609 } 2610 2611 static void gen12_disable_metric_set(struct i915_perf_stream *stream) 2612 { 2613 struct intel_uncore *uncore = stream->uncore; 2614 2615 /* Reset all contexts' slices/subslices configurations. */ 2616 gen12_configure_all_contexts(stream, NULL, NULL); 2617 2618 /* disable the context save/restore or OAR counters */ 2619 if (stream->ctx) 2620 gen12_configure_oar_context(stream, NULL); 2621 2622 /* Make sure we disable noa to save power. */ 2623 intel_uncore_rmw(uncore, RPM_CONFIG1, GEN10_GT_NOA_ENABLE, 0); 2624 } 2625 2626 static void gen7_oa_enable(struct i915_perf_stream *stream) 2627 { 2628 struct intel_uncore *uncore = stream->uncore; 2629 struct i915_gem_context *ctx = stream->ctx; 2630 u32 ctx_id = stream->specific_ctx_id; 2631 bool periodic = stream->periodic; 2632 u32 period_exponent = stream->period_exponent; 2633 u32 report_format = stream->oa_buffer.format; 2634 2635 /* 2636 * Reset buf pointers so we don't forward reports from before now. 2637 * 2638 * Think carefully if considering trying to avoid this, since it 2639 * also ensures status flags and the buffer itself are cleared 2640 * in error paths, and we have checks for invalid reports based 2641 * on the assumption that certain fields are written to zeroed 2642 * memory which this helps maintains. 2643 */ 2644 gen7_init_oa_buffer(stream); 2645 2646 intel_uncore_write(uncore, GEN7_OACONTROL, 2647 (ctx_id & GEN7_OACONTROL_CTX_MASK) | 2648 (period_exponent << 2649 GEN7_OACONTROL_TIMER_PERIOD_SHIFT) | 2650 (periodic ? GEN7_OACONTROL_TIMER_ENABLE : 0) | 2651 (report_format << GEN7_OACONTROL_FORMAT_SHIFT) | 2652 (ctx ? GEN7_OACONTROL_PER_CTX_ENABLE : 0) | 2653 GEN7_OACONTROL_ENABLE); 2654 } 2655 2656 static void gen8_oa_enable(struct i915_perf_stream *stream) 2657 { 2658 struct intel_uncore *uncore = stream->uncore; 2659 u32 report_format = stream->oa_buffer.format; 2660 2661 /* 2662 * Reset buf pointers so we don't forward reports from before now. 2663 * 2664 * Think carefully if considering trying to avoid this, since it 2665 * also ensures status flags and the buffer itself are cleared 2666 * in error paths, and we have checks for invalid reports based 2667 * on the assumption that certain fields are written to zeroed 2668 * memory which this helps maintains. 2669 */ 2670 gen8_init_oa_buffer(stream); 2671 2672 /* 2673 * Note: we don't rely on the hardware to perform single context 2674 * filtering and instead filter on the cpu based on the context-id 2675 * field of reports 2676 */ 2677 intel_uncore_write(uncore, GEN8_OACONTROL, 2678 (report_format << GEN8_OA_REPORT_FORMAT_SHIFT) | 2679 GEN8_OA_COUNTER_ENABLE); 2680 } 2681 2682 static void gen12_oa_enable(struct i915_perf_stream *stream) 2683 { 2684 struct intel_uncore *uncore = stream->uncore; 2685 u32 report_format = stream->oa_buffer.format; 2686 2687 /* 2688 * If we don't want OA reports from the OA buffer, then we don't even 2689 * need to program the OAG unit. 2690 */ 2691 if (!(stream->sample_flags & SAMPLE_OA_REPORT)) 2692 return; 2693 2694 gen12_init_oa_buffer(stream); 2695 2696 intel_uncore_write(uncore, GEN12_OAG_OACONTROL, 2697 (report_format << GEN12_OAG_OACONTROL_OA_COUNTER_FORMAT_SHIFT) | 2698 GEN12_OAG_OACONTROL_OA_COUNTER_ENABLE); 2699 } 2700 2701 /** 2702 * i915_oa_stream_enable - handle `I915_PERF_IOCTL_ENABLE` for OA stream 2703 * @stream: An i915 perf stream opened for OA metrics 2704 * 2705 * [Re]enables hardware periodic sampling according to the period configured 2706 * when opening the stream. This also starts a hrtimer that will periodically 2707 * check for data in the circular OA buffer for notifying userspace (e.g. 2708 * during a read() or poll()). 2709 */ 2710 static void i915_oa_stream_enable(struct i915_perf_stream *stream) 2711 { 2712 stream->pollin = false; 2713 2714 stream->perf->ops.oa_enable(stream); 2715 2716 if (stream->sample_flags & SAMPLE_OA_REPORT) 2717 hrtimer_start(&stream->poll_check_timer, 2718 ns_to_ktime(stream->poll_oa_period), 2719 HRTIMER_MODE_REL_PINNED); 2720 } 2721 2722 static void gen7_oa_disable(struct i915_perf_stream *stream) 2723 { 2724 struct intel_uncore *uncore = stream->uncore; 2725 2726 intel_uncore_write(uncore, GEN7_OACONTROL, 0); 2727 if (intel_wait_for_register(uncore, 2728 GEN7_OACONTROL, GEN7_OACONTROL_ENABLE, 0, 2729 50)) 2730 drm_err(&stream->perf->i915->drm, 2731 "wait for OA to be disabled timed out\n"); 2732 } 2733 2734 static void gen8_oa_disable(struct i915_perf_stream *stream) 2735 { 2736 struct intel_uncore *uncore = stream->uncore; 2737 2738 intel_uncore_write(uncore, GEN8_OACONTROL, 0); 2739 if (intel_wait_for_register(uncore, 2740 GEN8_OACONTROL, GEN8_OA_COUNTER_ENABLE, 0, 2741 50)) 2742 drm_err(&stream->perf->i915->drm, 2743 "wait for OA to be disabled timed out\n"); 2744 } 2745 2746 static void gen12_oa_disable(struct i915_perf_stream *stream) 2747 { 2748 struct intel_uncore *uncore = stream->uncore; 2749 2750 intel_uncore_write(uncore, GEN12_OAG_OACONTROL, 0); 2751 if (intel_wait_for_register(uncore, 2752 GEN12_OAG_OACONTROL, 2753 GEN12_OAG_OACONTROL_OA_COUNTER_ENABLE, 0, 2754 50)) 2755 drm_err(&stream->perf->i915->drm, 2756 "wait for OA to be disabled timed out\n"); 2757 2758 intel_uncore_write(uncore, GEN12_OA_TLB_INV_CR, 1); 2759 if (intel_wait_for_register(uncore, 2760 GEN12_OA_TLB_INV_CR, 2761 1, 0, 2762 50)) 2763 drm_err(&stream->perf->i915->drm, 2764 "wait for OA tlb invalidate timed out\n"); 2765 } 2766 2767 /** 2768 * i915_oa_stream_disable - handle `I915_PERF_IOCTL_DISABLE` for OA stream 2769 * @stream: An i915 perf stream opened for OA metrics 2770 * 2771 * Stops the OA unit from periodically writing counter reports into the 2772 * circular OA buffer. This also stops the hrtimer that periodically checks for 2773 * data in the circular OA buffer, for notifying userspace. 2774 */ 2775 static void i915_oa_stream_disable(struct i915_perf_stream *stream) 2776 { 2777 stream->perf->ops.oa_disable(stream); 2778 2779 if (stream->sample_flags & SAMPLE_OA_REPORT) 2780 hrtimer_cancel(&stream->poll_check_timer); 2781 } 2782 2783 static const struct i915_perf_stream_ops i915_oa_stream_ops = { 2784 .destroy = i915_oa_stream_destroy, 2785 .enable = i915_oa_stream_enable, 2786 .disable = i915_oa_stream_disable, 2787 .wait_unlocked = i915_oa_wait_unlocked, 2788 .poll_wait = i915_oa_poll_wait, 2789 .read = i915_oa_read, 2790 }; 2791 2792 static int i915_perf_stream_enable_sync(struct i915_perf_stream *stream) 2793 { 2794 struct i915_active *active; 2795 int err; 2796 2797 active = i915_active_create(); 2798 if (!active) 2799 return -ENOMEM; 2800 2801 err = stream->perf->ops.enable_metric_set(stream, active); 2802 if (err == 0) 2803 __i915_active_wait(active, TASK_UNINTERRUPTIBLE); 2804 2805 i915_active_put(active); 2806 return err; 2807 } 2808 2809 static void 2810 get_default_sseu_config(struct intel_sseu *out_sseu, 2811 struct intel_engine_cs *engine) 2812 { 2813 const struct sseu_dev_info *devinfo_sseu = &engine->gt->info.sseu; 2814 2815 *out_sseu = intel_sseu_from_device_info(devinfo_sseu); 2816 2817 if (GRAPHICS_VER(engine->i915) == 11) { 2818 /* 2819 * We only need subslice count so it doesn't matter which ones 2820 * we select - just turn off low bits in the amount of half of 2821 * all available subslices per slice. 2822 */ 2823 out_sseu->subslice_mask = 2824 ~(~0 << (hweight8(out_sseu->subslice_mask) / 2)); 2825 out_sseu->slice_mask = 0x1; 2826 } 2827 } 2828 2829 static int 2830 get_sseu_config(struct intel_sseu *out_sseu, 2831 struct intel_engine_cs *engine, 2832 const struct drm_i915_gem_context_param_sseu *drm_sseu) 2833 { 2834 if (drm_sseu->engine.engine_class != engine->uabi_class || 2835 drm_sseu->engine.engine_instance != engine->uabi_instance) 2836 return -EINVAL; 2837 2838 return i915_gem_user_to_context_sseu(engine->gt, drm_sseu, out_sseu); 2839 } 2840 2841 /** 2842 * i915_oa_stream_init - validate combined props for OA stream and init 2843 * @stream: An i915 perf stream 2844 * @param: The open parameters passed to `DRM_I915_PERF_OPEN` 2845 * @props: The property state that configures stream (individually validated) 2846 * 2847 * While read_properties_unlocked() validates properties in isolation it 2848 * doesn't ensure that the combination necessarily makes sense. 2849 * 2850 * At this point it has been determined that userspace wants a stream of 2851 * OA metrics, but still we need to further validate the combined 2852 * properties are OK. 2853 * 2854 * If the configuration makes sense then we can allocate memory for 2855 * a circular OA buffer and apply the requested metric set configuration. 2856 * 2857 * Returns: zero on success or a negative error code. 2858 */ 2859 static int i915_oa_stream_init(struct i915_perf_stream *stream, 2860 struct drm_i915_perf_open_param *param, 2861 struct perf_open_properties *props) 2862 { 2863 struct drm_i915_private *i915 = stream->perf->i915; 2864 struct i915_perf *perf = stream->perf; 2865 int format_size; 2866 int ret; 2867 2868 if (!props->engine) { 2869 drm_dbg(&stream->perf->i915->drm, 2870 "OA engine not specified\n"); 2871 return -EINVAL; 2872 } 2873 2874 /* 2875 * If the sysfs metrics/ directory wasn't registered for some 2876 * reason then don't let userspace try their luck with config 2877 * IDs 2878 */ 2879 if (!perf->metrics_kobj) { 2880 drm_dbg(&stream->perf->i915->drm, 2881 "OA metrics weren't advertised via sysfs\n"); 2882 return -EINVAL; 2883 } 2884 2885 if (!(props->sample_flags & SAMPLE_OA_REPORT) && 2886 (GRAPHICS_VER(perf->i915) < 12 || !stream->ctx)) { 2887 drm_dbg(&stream->perf->i915->drm, 2888 "Only OA report sampling supported\n"); 2889 return -EINVAL; 2890 } 2891 2892 if (!perf->ops.enable_metric_set) { 2893 drm_dbg(&stream->perf->i915->drm, 2894 "OA unit not supported\n"); 2895 return -ENODEV; 2896 } 2897 2898 /* 2899 * To avoid the complexity of having to accurately filter 2900 * counter reports and marshal to the appropriate client 2901 * we currently only allow exclusive access 2902 */ 2903 if (perf->exclusive_stream) { 2904 drm_dbg(&stream->perf->i915->drm, 2905 "OA unit already in use\n"); 2906 return -EBUSY; 2907 } 2908 2909 if (!props->oa_format) { 2910 drm_dbg(&stream->perf->i915->drm, 2911 "OA report format not specified\n"); 2912 return -EINVAL; 2913 } 2914 2915 stream->engine = props->engine; 2916 stream->uncore = stream->engine->gt->uncore; 2917 2918 stream->sample_size = sizeof(struct drm_i915_perf_record_header); 2919 2920 format_size = perf->oa_formats[props->oa_format].size; 2921 2922 stream->sample_flags = props->sample_flags; 2923 stream->sample_size += format_size; 2924 2925 stream->oa_buffer.format_size = format_size; 2926 if (drm_WARN_ON(&i915->drm, stream->oa_buffer.format_size == 0)) 2927 return -EINVAL; 2928 2929 stream->hold_preemption = props->hold_preemption; 2930 2931 stream->oa_buffer.format = 2932 perf->oa_formats[props->oa_format].format; 2933 2934 stream->periodic = props->oa_periodic; 2935 if (stream->periodic) 2936 stream->period_exponent = props->oa_period_exponent; 2937 2938 if (stream->ctx) { 2939 ret = oa_get_render_ctx_id(stream); 2940 if (ret) { 2941 drm_dbg(&stream->perf->i915->drm, 2942 "Invalid context id to filter with\n"); 2943 return ret; 2944 } 2945 } 2946 2947 ret = alloc_noa_wait(stream); 2948 if (ret) { 2949 drm_dbg(&stream->perf->i915->drm, 2950 "Unable to allocate NOA wait batch buffer\n"); 2951 goto err_noa_wait_alloc; 2952 } 2953 2954 stream->oa_config = i915_perf_get_oa_config(perf, props->metrics_set); 2955 if (!stream->oa_config) { 2956 drm_dbg(&stream->perf->i915->drm, 2957 "Invalid OA config id=%i\n", props->metrics_set); 2958 ret = -EINVAL; 2959 goto err_config; 2960 } 2961 2962 /* PRM - observability performance counters: 2963 * 2964 * OACONTROL, performance counter enable, note: 2965 * 2966 * "When this bit is set, in order to have coherent counts, 2967 * RC6 power state and trunk clock gating must be disabled. 2968 * This can be achieved by programming MMIO registers as 2969 * 0xA094=0 and 0xA090[31]=1" 2970 * 2971 * In our case we are expecting that taking pm + FORCEWAKE 2972 * references will effectively disable RC6. 2973 */ 2974 intel_engine_pm_get(stream->engine); 2975 intel_uncore_forcewake_get(stream->uncore, FORCEWAKE_ALL); 2976 2977 ret = alloc_oa_buffer(stream); 2978 if (ret) 2979 goto err_oa_buf_alloc; 2980 2981 stream->ops = &i915_oa_stream_ops; 2982 2983 perf->sseu = props->sseu; 2984 WRITE_ONCE(perf->exclusive_stream, stream); 2985 2986 ret = i915_perf_stream_enable_sync(stream); 2987 if (ret) { 2988 drm_dbg(&stream->perf->i915->drm, 2989 "Unable to enable metric set\n"); 2990 goto err_enable; 2991 } 2992 2993 drm_dbg(&stream->perf->i915->drm, 2994 "opening stream oa config uuid=%s\n", 2995 stream->oa_config->uuid); 2996 2997 hrtimer_init(&stream->poll_check_timer, 2998 CLOCK_MONOTONIC, HRTIMER_MODE_REL); 2999 stream->poll_check_timer.function = oa_poll_check_timer_cb; 3000 init_waitqueue_head(&stream->poll_wq); 3001 spin_lock_init(&stream->oa_buffer.ptr_lock); 3002 3003 return 0; 3004 3005 err_enable: 3006 WRITE_ONCE(perf->exclusive_stream, NULL); 3007 perf->ops.disable_metric_set(stream); 3008 3009 free_oa_buffer(stream); 3010 3011 err_oa_buf_alloc: 3012 free_oa_configs(stream); 3013 3014 intel_uncore_forcewake_put(stream->uncore, FORCEWAKE_ALL); 3015 intel_engine_pm_put(stream->engine); 3016 3017 err_config: 3018 free_noa_wait(stream); 3019 3020 err_noa_wait_alloc: 3021 if (stream->ctx) 3022 oa_put_render_ctx_id(stream); 3023 3024 return ret; 3025 } 3026 3027 void i915_oa_init_reg_state(const struct intel_context *ce, 3028 const struct intel_engine_cs *engine) 3029 { 3030 struct i915_perf_stream *stream; 3031 3032 if (engine->class != RENDER_CLASS) 3033 return; 3034 3035 /* perf.exclusive_stream serialised by lrc_configure_all_contexts() */ 3036 stream = READ_ONCE(engine->i915->perf.exclusive_stream); 3037 if (stream && GRAPHICS_VER(stream->perf->i915) < 12) 3038 gen8_update_reg_state_unlocked(ce, stream); 3039 } 3040 3041 /** 3042 * i915_perf_read - handles read() FOP for i915 perf stream FDs 3043 * @file: An i915 perf stream file 3044 * @buf: destination buffer given by userspace 3045 * @count: the number of bytes userspace wants to read 3046 * @ppos: (inout) file seek position (unused) 3047 * 3048 * The entry point for handling a read() on a stream file descriptor from 3049 * userspace. Most of the work is left to the i915_perf_read_locked() and 3050 * &i915_perf_stream_ops->read but to save having stream implementations (of 3051 * which we might have multiple later) we handle blocking read here. 3052 * 3053 * We can also consistently treat trying to read from a disabled stream 3054 * as an IO error so implementations can assume the stream is enabled 3055 * while reading. 3056 * 3057 * Returns: The number of bytes copied or a negative error code on failure. 3058 */ 3059 static ssize_t i915_perf_read(struct file *file, 3060 char __user *buf, 3061 size_t count, 3062 loff_t *ppos) 3063 { 3064 struct i915_perf_stream *stream = file->private_data; 3065 struct i915_perf *perf = stream->perf; 3066 size_t offset = 0; 3067 int ret; 3068 3069 /* To ensure it's handled consistently we simply treat all reads of a 3070 * disabled stream as an error. In particular it might otherwise lead 3071 * to a deadlock for blocking file descriptors... 3072 */ 3073 if (!stream->enabled || !(stream->sample_flags & SAMPLE_OA_REPORT)) 3074 return -EIO; 3075 3076 if (!(file->f_flags & O_NONBLOCK)) { 3077 /* There's the small chance of false positives from 3078 * stream->ops->wait_unlocked. 3079 * 3080 * E.g. with single context filtering since we only wait until 3081 * oabuffer has >= 1 report we don't immediately know whether 3082 * any reports really belong to the current context 3083 */ 3084 do { 3085 ret = stream->ops->wait_unlocked(stream); 3086 if (ret) 3087 return ret; 3088 3089 mutex_lock(&perf->lock); 3090 ret = stream->ops->read(stream, buf, count, &offset); 3091 mutex_unlock(&perf->lock); 3092 } while (!offset && !ret); 3093 } else { 3094 mutex_lock(&perf->lock); 3095 ret = stream->ops->read(stream, buf, count, &offset); 3096 mutex_unlock(&perf->lock); 3097 } 3098 3099 /* We allow the poll checking to sometimes report false positive EPOLLIN 3100 * events where we might actually report EAGAIN on read() if there's 3101 * not really any data available. In this situation though we don't 3102 * want to enter a busy loop between poll() reporting a EPOLLIN event 3103 * and read() returning -EAGAIN. Clearing the oa.pollin state here 3104 * effectively ensures we back off until the next hrtimer callback 3105 * before reporting another EPOLLIN event. 3106 * The exception to this is if ops->read() returned -ENOSPC which means 3107 * that more OA data is available than could fit in the user provided 3108 * buffer. In this case we want the next poll() call to not block. 3109 */ 3110 if (ret != -ENOSPC) 3111 stream->pollin = false; 3112 3113 /* Possible values for ret are 0, -EFAULT, -ENOSPC, -EIO, ... */ 3114 return offset ?: (ret ?: -EAGAIN); 3115 } 3116 3117 static enum hrtimer_restart oa_poll_check_timer_cb(struct hrtimer *hrtimer) 3118 { 3119 struct i915_perf_stream *stream = 3120 container_of(hrtimer, typeof(*stream), poll_check_timer); 3121 3122 if (oa_buffer_check_unlocked(stream)) { 3123 stream->pollin = true; 3124 wake_up(&stream->poll_wq); 3125 } 3126 3127 hrtimer_forward_now(hrtimer, 3128 ns_to_ktime(stream->poll_oa_period)); 3129 3130 return HRTIMER_RESTART; 3131 } 3132 3133 /** 3134 * i915_perf_poll_locked - poll_wait() with a suitable wait queue for stream 3135 * @stream: An i915 perf stream 3136 * @file: An i915 perf stream file 3137 * @wait: poll() state table 3138 * 3139 * For handling userspace polling on an i915 perf stream, this calls through to 3140 * &i915_perf_stream_ops->poll_wait to call poll_wait() with a wait queue that 3141 * will be woken for new stream data. 3142 * 3143 * Note: The &perf->lock mutex has been taken to serialize 3144 * with any non-file-operation driver hooks. 3145 * 3146 * Returns: any poll events that are ready without sleeping 3147 */ 3148 static __poll_t i915_perf_poll_locked(struct i915_perf_stream *stream, 3149 struct file *file, 3150 poll_table *wait) 3151 { 3152 __poll_t events = 0; 3153 3154 stream->ops->poll_wait(stream, file, wait); 3155 3156 /* Note: we don't explicitly check whether there's something to read 3157 * here since this path may be very hot depending on what else 3158 * userspace is polling, or on the timeout in use. We rely solely on 3159 * the hrtimer/oa_poll_check_timer_cb to notify us when there are 3160 * samples to read. 3161 */ 3162 if (stream->pollin) 3163 events |= EPOLLIN; 3164 3165 return events; 3166 } 3167 3168 /** 3169 * i915_perf_poll - call poll_wait() with a suitable wait queue for stream 3170 * @file: An i915 perf stream file 3171 * @wait: poll() state table 3172 * 3173 * For handling userspace polling on an i915 perf stream, this ensures 3174 * poll_wait() gets called with a wait queue that will be woken for new stream 3175 * data. 3176 * 3177 * Note: Implementation deferred to i915_perf_poll_locked() 3178 * 3179 * Returns: any poll events that are ready without sleeping 3180 */ 3181 static __poll_t i915_perf_poll(struct file *file, poll_table *wait) 3182 { 3183 struct i915_perf_stream *stream = file->private_data; 3184 struct i915_perf *perf = stream->perf; 3185 __poll_t ret; 3186 3187 mutex_lock(&perf->lock); 3188 ret = i915_perf_poll_locked(stream, file, wait); 3189 mutex_unlock(&perf->lock); 3190 3191 return ret; 3192 } 3193 3194 /** 3195 * i915_perf_enable_locked - handle `I915_PERF_IOCTL_ENABLE` ioctl 3196 * @stream: A disabled i915 perf stream 3197 * 3198 * [Re]enables the associated capture of data for this stream. 3199 * 3200 * If a stream was previously enabled then there's currently no intention 3201 * to provide userspace any guarantee about the preservation of previously 3202 * buffered data. 3203 */ 3204 static void i915_perf_enable_locked(struct i915_perf_stream *stream) 3205 { 3206 if (stream->enabled) 3207 return; 3208 3209 /* Allow stream->ops->enable() to refer to this */ 3210 stream->enabled = true; 3211 3212 if (stream->ops->enable) 3213 stream->ops->enable(stream); 3214 3215 if (stream->hold_preemption) 3216 intel_context_set_nopreempt(stream->pinned_ctx); 3217 } 3218 3219 /** 3220 * i915_perf_disable_locked - handle `I915_PERF_IOCTL_DISABLE` ioctl 3221 * @stream: An enabled i915 perf stream 3222 * 3223 * Disables the associated capture of data for this stream. 3224 * 3225 * The intention is that disabling an re-enabling a stream will ideally be 3226 * cheaper than destroying and re-opening a stream with the same configuration, 3227 * though there are no formal guarantees about what state or buffered data 3228 * must be retained between disabling and re-enabling a stream. 3229 * 3230 * Note: while a stream is disabled it's considered an error for userspace 3231 * to attempt to read from the stream (-EIO). 3232 */ 3233 static void i915_perf_disable_locked(struct i915_perf_stream *stream) 3234 { 3235 if (!stream->enabled) 3236 return; 3237 3238 /* Allow stream->ops->disable() to refer to this */ 3239 stream->enabled = false; 3240 3241 if (stream->hold_preemption) 3242 intel_context_clear_nopreempt(stream->pinned_ctx); 3243 3244 if (stream->ops->disable) 3245 stream->ops->disable(stream); 3246 } 3247 3248 static long i915_perf_config_locked(struct i915_perf_stream *stream, 3249 unsigned long metrics_set) 3250 { 3251 struct i915_oa_config *config; 3252 long ret = stream->oa_config->id; 3253 3254 config = i915_perf_get_oa_config(stream->perf, metrics_set); 3255 if (!config) 3256 return -EINVAL; 3257 3258 if (config != stream->oa_config) { 3259 int err; 3260 3261 /* 3262 * If OA is bound to a specific context, emit the 3263 * reconfiguration inline from that context. The update 3264 * will then be ordered with respect to submission on that 3265 * context. 3266 * 3267 * When set globally, we use a low priority kernel context, 3268 * so it will effectively take effect when idle. 3269 */ 3270 err = emit_oa_config(stream, config, oa_context(stream), NULL); 3271 if (!err) 3272 config = xchg(&stream->oa_config, config); 3273 else 3274 ret = err; 3275 } 3276 3277 i915_oa_config_put(config); 3278 3279 return ret; 3280 } 3281 3282 /** 3283 * i915_perf_ioctl_locked - support ioctl() usage with i915 perf stream FDs 3284 * @stream: An i915 perf stream 3285 * @cmd: the ioctl request 3286 * @arg: the ioctl data 3287 * 3288 * Note: The &perf->lock mutex has been taken to serialize 3289 * with any non-file-operation driver hooks. 3290 * 3291 * Returns: zero on success or a negative error code. Returns -EINVAL for 3292 * an unknown ioctl request. 3293 */ 3294 static long i915_perf_ioctl_locked(struct i915_perf_stream *stream, 3295 unsigned int cmd, 3296 unsigned long arg) 3297 { 3298 switch (cmd) { 3299 case I915_PERF_IOCTL_ENABLE: 3300 i915_perf_enable_locked(stream); 3301 return 0; 3302 case I915_PERF_IOCTL_DISABLE: 3303 i915_perf_disable_locked(stream); 3304 return 0; 3305 case I915_PERF_IOCTL_CONFIG: 3306 return i915_perf_config_locked(stream, arg); 3307 } 3308 3309 return -EINVAL; 3310 } 3311 3312 /** 3313 * i915_perf_ioctl - support ioctl() usage with i915 perf stream FDs 3314 * @file: An i915 perf stream file 3315 * @cmd: the ioctl request 3316 * @arg: the ioctl data 3317 * 3318 * Implementation deferred to i915_perf_ioctl_locked(). 3319 * 3320 * Returns: zero on success or a negative error code. Returns -EINVAL for 3321 * an unknown ioctl request. 3322 */ 3323 static long i915_perf_ioctl(struct file *file, 3324 unsigned int cmd, 3325 unsigned long arg) 3326 { 3327 struct i915_perf_stream *stream = file->private_data; 3328 struct i915_perf *perf = stream->perf; 3329 long ret; 3330 3331 mutex_lock(&perf->lock); 3332 ret = i915_perf_ioctl_locked(stream, cmd, arg); 3333 mutex_unlock(&perf->lock); 3334 3335 return ret; 3336 } 3337 3338 /** 3339 * i915_perf_destroy_locked - destroy an i915 perf stream 3340 * @stream: An i915 perf stream 3341 * 3342 * Frees all resources associated with the given i915 perf @stream, disabling 3343 * any associated data capture in the process. 3344 * 3345 * Note: The &perf->lock mutex has been taken to serialize 3346 * with any non-file-operation driver hooks. 3347 */ 3348 static void i915_perf_destroy_locked(struct i915_perf_stream *stream) 3349 { 3350 if (stream->enabled) 3351 i915_perf_disable_locked(stream); 3352 3353 if (stream->ops->destroy) 3354 stream->ops->destroy(stream); 3355 3356 if (stream->ctx) 3357 i915_gem_context_put(stream->ctx); 3358 3359 kfree(stream); 3360 } 3361 3362 /** 3363 * i915_perf_release - handles userspace close() of a stream file 3364 * @inode: anonymous inode associated with file 3365 * @file: An i915 perf stream file 3366 * 3367 * Cleans up any resources associated with an open i915 perf stream file. 3368 * 3369 * NB: close() can't really fail from the userspace point of view. 3370 * 3371 * Returns: zero on success or a negative error code. 3372 */ 3373 static int i915_perf_release(struct inode *inode, struct file *file) 3374 { 3375 struct i915_perf_stream *stream = file->private_data; 3376 struct i915_perf *perf = stream->perf; 3377 3378 mutex_lock(&perf->lock); 3379 i915_perf_destroy_locked(stream); 3380 mutex_unlock(&perf->lock); 3381 3382 /* Release the reference the perf stream kept on the driver. */ 3383 drm_dev_put(&perf->i915->drm); 3384 3385 return 0; 3386 } 3387 3388 3389 static const struct file_operations fops = { 3390 .owner = THIS_MODULE, 3391 .llseek = no_llseek, 3392 .release = i915_perf_release, 3393 .poll = i915_perf_poll, 3394 .read = i915_perf_read, 3395 .unlocked_ioctl = i915_perf_ioctl, 3396 /* Our ioctl have no arguments, so it's safe to use the same function 3397 * to handle 32bits compatibility. 3398 */ 3399 .compat_ioctl = i915_perf_ioctl, 3400 }; 3401 3402 3403 /** 3404 * i915_perf_open_ioctl_locked - DRM ioctl() for userspace to open a stream FD 3405 * @perf: i915 perf instance 3406 * @param: The open parameters passed to 'DRM_I915_PERF_OPEN` 3407 * @props: individually validated u64 property value pairs 3408 * @file: drm file 3409 * 3410 * See i915_perf_ioctl_open() for interface details. 3411 * 3412 * Implements further stream config validation and stream initialization on 3413 * behalf of i915_perf_open_ioctl() with the &perf->lock mutex 3414 * taken to serialize with any non-file-operation driver hooks. 3415 * 3416 * Note: at this point the @props have only been validated in isolation and 3417 * it's still necessary to validate that the combination of properties makes 3418 * sense. 3419 * 3420 * In the case where userspace is interested in OA unit metrics then further 3421 * config validation and stream initialization details will be handled by 3422 * i915_oa_stream_init(). The code here should only validate config state that 3423 * will be relevant to all stream types / backends. 3424 * 3425 * Returns: zero on success or a negative error code. 3426 */ 3427 static int 3428 i915_perf_open_ioctl_locked(struct i915_perf *perf, 3429 struct drm_i915_perf_open_param *param, 3430 struct perf_open_properties *props, 3431 struct drm_file *file) 3432 { 3433 struct i915_gem_context *specific_ctx = NULL; 3434 struct i915_perf_stream *stream = NULL; 3435 unsigned long f_flags = 0; 3436 bool privileged_op = true; 3437 int stream_fd; 3438 int ret; 3439 3440 if (props->single_context) { 3441 u32 ctx_handle = props->ctx_handle; 3442 struct drm_i915_file_private *file_priv = file->driver_priv; 3443 3444 specific_ctx = i915_gem_context_lookup(file_priv, ctx_handle); 3445 if (IS_ERR(specific_ctx)) { 3446 drm_dbg(&perf->i915->drm, 3447 "Failed to look up context with ID %u for opening perf stream\n", 3448 ctx_handle); 3449 ret = PTR_ERR(specific_ctx); 3450 goto err; 3451 } 3452 } 3453 3454 /* 3455 * On Haswell the OA unit supports clock gating off for a specific 3456 * context and in this mode there's no visibility of metrics for the 3457 * rest of the system, which we consider acceptable for a 3458 * non-privileged client. 3459 * 3460 * For Gen8->11 the OA unit no longer supports clock gating off for a 3461 * specific context and the kernel can't securely stop the counters 3462 * from updating as system-wide / global values. Even though we can 3463 * filter reports based on the included context ID we can't block 3464 * clients from seeing the raw / global counter values via 3465 * MI_REPORT_PERF_COUNT commands and so consider it a privileged op to 3466 * enable the OA unit by default. 3467 * 3468 * For Gen12+ we gain a new OAR unit that only monitors the RCS on a 3469 * per context basis. So we can relax requirements there if the user 3470 * doesn't request global stream access (i.e. query based sampling 3471 * using MI_RECORD_PERF_COUNT. 3472 */ 3473 if (IS_HASWELL(perf->i915) && specific_ctx) 3474 privileged_op = false; 3475 else if (GRAPHICS_VER(perf->i915) == 12 && specific_ctx && 3476 (props->sample_flags & SAMPLE_OA_REPORT) == 0) 3477 privileged_op = false; 3478 3479 if (props->hold_preemption) { 3480 if (!props->single_context) { 3481 drm_dbg(&perf->i915->drm, 3482 "preemption disable with no context\n"); 3483 ret = -EINVAL; 3484 goto err; 3485 } 3486 privileged_op = true; 3487 } 3488 3489 /* 3490 * Asking for SSEU configuration is a priviliged operation. 3491 */ 3492 if (props->has_sseu) 3493 privileged_op = true; 3494 else 3495 get_default_sseu_config(&props->sseu, props->engine); 3496 3497 /* Similar to perf's kernel.perf_paranoid_cpu sysctl option 3498 * we check a dev.i915.perf_stream_paranoid sysctl option 3499 * to determine if it's ok to access system wide OA counters 3500 * without CAP_PERFMON or CAP_SYS_ADMIN privileges. 3501 */ 3502 if (privileged_op && 3503 i915_perf_stream_paranoid && !perfmon_capable()) { 3504 drm_dbg(&perf->i915->drm, 3505 "Insufficient privileges to open i915 perf stream\n"); 3506 ret = -EACCES; 3507 goto err_ctx; 3508 } 3509 3510 stream = kzalloc(sizeof(*stream), GFP_KERNEL); 3511 if (!stream) { 3512 ret = -ENOMEM; 3513 goto err_ctx; 3514 } 3515 3516 stream->perf = perf; 3517 stream->ctx = specific_ctx; 3518 stream->poll_oa_period = props->poll_oa_period; 3519 3520 ret = i915_oa_stream_init(stream, param, props); 3521 if (ret) 3522 goto err_alloc; 3523 3524 /* we avoid simply assigning stream->sample_flags = props->sample_flags 3525 * to have _stream_init check the combination of sample flags more 3526 * thoroughly, but still this is the expected result at this point. 3527 */ 3528 if (WARN_ON(stream->sample_flags != props->sample_flags)) { 3529 ret = -ENODEV; 3530 goto err_flags; 3531 } 3532 3533 if (param->flags & I915_PERF_FLAG_FD_CLOEXEC) 3534 f_flags |= O_CLOEXEC; 3535 if (param->flags & I915_PERF_FLAG_FD_NONBLOCK) 3536 f_flags |= O_NONBLOCK; 3537 3538 stream_fd = anon_inode_getfd("[i915_perf]", &fops, stream, f_flags); 3539 if (stream_fd < 0) { 3540 ret = stream_fd; 3541 goto err_flags; 3542 } 3543 3544 if (!(param->flags & I915_PERF_FLAG_DISABLED)) 3545 i915_perf_enable_locked(stream); 3546 3547 /* Take a reference on the driver that will be kept with stream_fd 3548 * until its release. 3549 */ 3550 drm_dev_get(&perf->i915->drm); 3551 3552 return stream_fd; 3553 3554 err_flags: 3555 if (stream->ops->destroy) 3556 stream->ops->destroy(stream); 3557 err_alloc: 3558 kfree(stream); 3559 err_ctx: 3560 if (specific_ctx) 3561 i915_gem_context_put(specific_ctx); 3562 err: 3563 return ret; 3564 } 3565 3566 static u64 oa_exponent_to_ns(struct i915_perf *perf, int exponent) 3567 { 3568 return intel_gt_clock_interval_to_ns(to_gt(perf->i915), 3569 2ULL << exponent); 3570 } 3571 3572 static __always_inline bool 3573 oa_format_valid(struct i915_perf *perf, enum drm_i915_oa_format format) 3574 { 3575 return test_bit(format, perf->format_mask); 3576 } 3577 3578 static __always_inline void 3579 oa_format_add(struct i915_perf *perf, enum drm_i915_oa_format format) 3580 { 3581 __set_bit(format, perf->format_mask); 3582 } 3583 3584 /** 3585 * read_properties_unlocked - validate + copy userspace stream open properties 3586 * @perf: i915 perf instance 3587 * @uprops: The array of u64 key value pairs given by userspace 3588 * @n_props: The number of key value pairs expected in @uprops 3589 * @props: The stream configuration built up while validating properties 3590 * 3591 * Note this function only validates properties in isolation it doesn't 3592 * validate that the combination of properties makes sense or that all 3593 * properties necessary for a particular kind of stream have been set. 3594 * 3595 * Note that there currently aren't any ordering requirements for properties so 3596 * we shouldn't validate or assume anything about ordering here. This doesn't 3597 * rule out defining new properties with ordering requirements in the future. 3598 */ 3599 static int read_properties_unlocked(struct i915_perf *perf, 3600 u64 __user *uprops, 3601 u32 n_props, 3602 struct perf_open_properties *props) 3603 { 3604 u64 __user *uprop = uprops; 3605 u32 i; 3606 int ret; 3607 3608 memset(props, 0, sizeof(struct perf_open_properties)); 3609 props->poll_oa_period = DEFAULT_POLL_PERIOD_NS; 3610 3611 if (!n_props) { 3612 drm_dbg(&perf->i915->drm, 3613 "No i915 perf properties given\n"); 3614 return -EINVAL; 3615 } 3616 3617 /* At the moment we only support using i915-perf on the RCS. */ 3618 props->engine = intel_engine_lookup_user(perf->i915, 3619 I915_ENGINE_CLASS_RENDER, 3620 0); 3621 if (!props->engine) { 3622 drm_dbg(&perf->i915->drm, 3623 "No RENDER-capable engines\n"); 3624 return -EINVAL; 3625 } 3626 3627 /* Considering that ID = 0 is reserved and assuming that we don't 3628 * (currently) expect any configurations to ever specify duplicate 3629 * values for a particular property ID then the last _PROP_MAX value is 3630 * one greater than the maximum number of properties we expect to get 3631 * from userspace. 3632 */ 3633 if (n_props >= DRM_I915_PERF_PROP_MAX) { 3634 drm_dbg(&perf->i915->drm, 3635 "More i915 perf properties specified than exist\n"); 3636 return -EINVAL; 3637 } 3638 3639 for (i = 0; i < n_props; i++) { 3640 u64 oa_period, oa_freq_hz; 3641 u64 id, value; 3642 3643 ret = get_user(id, uprop); 3644 if (ret) 3645 return ret; 3646 3647 ret = get_user(value, uprop + 1); 3648 if (ret) 3649 return ret; 3650 3651 if (id == 0 || id >= DRM_I915_PERF_PROP_MAX) { 3652 drm_dbg(&perf->i915->drm, 3653 "Unknown i915 perf property ID\n"); 3654 return -EINVAL; 3655 } 3656 3657 switch ((enum drm_i915_perf_property_id)id) { 3658 case DRM_I915_PERF_PROP_CTX_HANDLE: 3659 props->single_context = 1; 3660 props->ctx_handle = value; 3661 break; 3662 case DRM_I915_PERF_PROP_SAMPLE_OA: 3663 if (value) 3664 props->sample_flags |= SAMPLE_OA_REPORT; 3665 break; 3666 case DRM_I915_PERF_PROP_OA_METRICS_SET: 3667 if (value == 0) { 3668 drm_dbg(&perf->i915->drm, 3669 "Unknown OA metric set ID\n"); 3670 return -EINVAL; 3671 } 3672 props->metrics_set = value; 3673 break; 3674 case DRM_I915_PERF_PROP_OA_FORMAT: 3675 if (value == 0 || value >= I915_OA_FORMAT_MAX) { 3676 drm_dbg(&perf->i915->drm, 3677 "Out-of-range OA report format %llu\n", 3678 value); 3679 return -EINVAL; 3680 } 3681 if (!oa_format_valid(perf, value)) { 3682 drm_dbg(&perf->i915->drm, 3683 "Unsupported OA report format %llu\n", 3684 value); 3685 return -EINVAL; 3686 } 3687 props->oa_format = value; 3688 break; 3689 case DRM_I915_PERF_PROP_OA_EXPONENT: 3690 if (value > OA_EXPONENT_MAX) { 3691 drm_dbg(&perf->i915->drm, 3692 "OA timer exponent too high (> %u)\n", 3693 OA_EXPONENT_MAX); 3694 return -EINVAL; 3695 } 3696 3697 /* Theoretically we can program the OA unit to sample 3698 * e.g. every 160ns for HSW, 167ns for BDW/SKL or 104ns 3699 * for BXT. We don't allow such high sampling 3700 * frequencies by default unless root. 3701 */ 3702 3703 BUILD_BUG_ON(sizeof(oa_period) != 8); 3704 oa_period = oa_exponent_to_ns(perf, value); 3705 3706 /* This check is primarily to ensure that oa_period <= 3707 * UINT32_MAX (before passing to do_div which only 3708 * accepts a u32 denominator), but we can also skip 3709 * checking anything < 1Hz which implicitly can't be 3710 * limited via an integer oa_max_sample_rate. 3711 */ 3712 if (oa_period <= NSEC_PER_SEC) { 3713 u64 tmp = NSEC_PER_SEC; 3714 do_div(tmp, oa_period); 3715 oa_freq_hz = tmp; 3716 } else 3717 oa_freq_hz = 0; 3718 3719 if (oa_freq_hz > i915_oa_max_sample_rate && !perfmon_capable()) { 3720 drm_dbg(&perf->i915->drm, 3721 "OA exponent would exceed the max sampling frequency (sysctl dev.i915.oa_max_sample_rate) %uHz without CAP_PERFMON or CAP_SYS_ADMIN privileges\n", 3722 i915_oa_max_sample_rate); 3723 return -EACCES; 3724 } 3725 3726 props->oa_periodic = true; 3727 props->oa_period_exponent = value; 3728 break; 3729 case DRM_I915_PERF_PROP_HOLD_PREEMPTION: 3730 props->hold_preemption = !!value; 3731 break; 3732 case DRM_I915_PERF_PROP_GLOBAL_SSEU: { 3733 struct drm_i915_gem_context_param_sseu user_sseu; 3734 3735 if (GRAPHICS_VER_FULL(perf->i915) >= IP_VER(12, 50)) { 3736 drm_dbg(&perf->i915->drm, 3737 "SSEU config not supported on gfx %x\n", 3738 GRAPHICS_VER_FULL(perf->i915)); 3739 return -ENODEV; 3740 } 3741 3742 if (copy_from_user(&user_sseu, 3743 u64_to_user_ptr(value), 3744 sizeof(user_sseu))) { 3745 drm_dbg(&perf->i915->drm, 3746 "Unable to copy global sseu parameter\n"); 3747 return -EFAULT; 3748 } 3749 3750 ret = get_sseu_config(&props->sseu, props->engine, &user_sseu); 3751 if (ret) { 3752 drm_dbg(&perf->i915->drm, 3753 "Invalid SSEU configuration\n"); 3754 return ret; 3755 } 3756 props->has_sseu = true; 3757 break; 3758 } 3759 case DRM_I915_PERF_PROP_POLL_OA_PERIOD: 3760 if (value < 100000 /* 100us */) { 3761 drm_dbg(&perf->i915->drm, 3762 "OA availability timer too small (%lluns < 100us)\n", 3763 value); 3764 return -EINVAL; 3765 } 3766 props->poll_oa_period = value; 3767 break; 3768 case DRM_I915_PERF_PROP_MAX: 3769 MISSING_CASE(id); 3770 return -EINVAL; 3771 } 3772 3773 uprop += 2; 3774 } 3775 3776 return 0; 3777 } 3778 3779 /** 3780 * i915_perf_open_ioctl - DRM ioctl() for userspace to open a stream FD 3781 * @dev: drm device 3782 * @data: ioctl data copied from userspace (unvalidated) 3783 * @file: drm file 3784 * 3785 * Validates the stream open parameters given by userspace including flags 3786 * and an array of u64 key, value pair properties. 3787 * 3788 * Very little is assumed up front about the nature of the stream being 3789 * opened (for instance we don't assume it's for periodic OA unit metrics). An 3790 * i915-perf stream is expected to be a suitable interface for other forms of 3791 * buffered data written by the GPU besides periodic OA metrics. 3792 * 3793 * Note we copy the properties from userspace outside of the i915 perf 3794 * mutex to avoid an awkward lockdep with mmap_lock. 3795 * 3796 * Most of the implementation details are handled by 3797 * i915_perf_open_ioctl_locked() after taking the &perf->lock 3798 * mutex for serializing with any non-file-operation driver hooks. 3799 * 3800 * Return: A newly opened i915 Perf stream file descriptor or negative 3801 * error code on failure. 3802 */ 3803 int i915_perf_open_ioctl(struct drm_device *dev, void *data, 3804 struct drm_file *file) 3805 { 3806 struct i915_perf *perf = &to_i915(dev)->perf; 3807 struct drm_i915_perf_open_param *param = data; 3808 struct perf_open_properties props; 3809 u32 known_open_flags; 3810 int ret; 3811 3812 if (!perf->i915) { 3813 drm_dbg(&perf->i915->drm, 3814 "i915 perf interface not available for this system\n"); 3815 return -ENOTSUPP; 3816 } 3817 3818 known_open_flags = I915_PERF_FLAG_FD_CLOEXEC | 3819 I915_PERF_FLAG_FD_NONBLOCK | 3820 I915_PERF_FLAG_DISABLED; 3821 if (param->flags & ~known_open_flags) { 3822 drm_dbg(&perf->i915->drm, 3823 "Unknown drm_i915_perf_open_param flag\n"); 3824 return -EINVAL; 3825 } 3826 3827 ret = read_properties_unlocked(perf, 3828 u64_to_user_ptr(param->properties_ptr), 3829 param->num_properties, 3830 &props); 3831 if (ret) 3832 return ret; 3833 3834 mutex_lock(&perf->lock); 3835 ret = i915_perf_open_ioctl_locked(perf, param, &props, file); 3836 mutex_unlock(&perf->lock); 3837 3838 return ret; 3839 } 3840 3841 /** 3842 * i915_perf_register - exposes i915-perf to userspace 3843 * @i915: i915 device instance 3844 * 3845 * In particular OA metric sets are advertised under a sysfs metrics/ 3846 * directory allowing userspace to enumerate valid IDs that can be 3847 * used to open an i915-perf stream. 3848 */ 3849 void i915_perf_register(struct drm_i915_private *i915) 3850 { 3851 struct i915_perf *perf = &i915->perf; 3852 3853 if (!perf->i915) 3854 return; 3855 3856 /* To be sure we're synchronized with an attempted 3857 * i915_perf_open_ioctl(); considering that we register after 3858 * being exposed to userspace. 3859 */ 3860 mutex_lock(&perf->lock); 3861 3862 perf->metrics_kobj = 3863 kobject_create_and_add("metrics", 3864 &i915->drm.primary->kdev->kobj); 3865 3866 mutex_unlock(&perf->lock); 3867 } 3868 3869 /** 3870 * i915_perf_unregister - hide i915-perf from userspace 3871 * @i915: i915 device instance 3872 * 3873 * i915-perf state cleanup is split up into an 'unregister' and 3874 * 'deinit' phase where the interface is first hidden from 3875 * userspace by i915_perf_unregister() before cleaning up 3876 * remaining state in i915_perf_fini(). 3877 */ 3878 void i915_perf_unregister(struct drm_i915_private *i915) 3879 { 3880 struct i915_perf *perf = &i915->perf; 3881 3882 if (!perf->metrics_kobj) 3883 return; 3884 3885 kobject_put(perf->metrics_kobj); 3886 perf->metrics_kobj = NULL; 3887 } 3888 3889 static bool gen8_is_valid_flex_addr(struct i915_perf *perf, u32 addr) 3890 { 3891 static const i915_reg_t flex_eu_regs[] = { 3892 EU_PERF_CNTL0, 3893 EU_PERF_CNTL1, 3894 EU_PERF_CNTL2, 3895 EU_PERF_CNTL3, 3896 EU_PERF_CNTL4, 3897 EU_PERF_CNTL5, 3898 EU_PERF_CNTL6, 3899 }; 3900 int i; 3901 3902 for (i = 0; i < ARRAY_SIZE(flex_eu_regs); i++) { 3903 if (i915_mmio_reg_offset(flex_eu_regs[i]) == addr) 3904 return true; 3905 } 3906 return false; 3907 } 3908 3909 static bool reg_in_range_table(u32 addr, const struct i915_range *table) 3910 { 3911 while (table->start || table->end) { 3912 if (addr >= table->start && addr <= table->end) 3913 return true; 3914 3915 table++; 3916 } 3917 3918 return false; 3919 } 3920 3921 #define REG_EQUAL(addr, mmio) \ 3922 ((addr) == i915_mmio_reg_offset(mmio)) 3923 3924 static const struct i915_range gen7_oa_b_counters[] = { 3925 { .start = 0x2710, .end = 0x272c }, /* OASTARTTRIG[1-8] */ 3926 { .start = 0x2740, .end = 0x275c }, /* OAREPORTTRIG[1-8] */ 3927 { .start = 0x2770, .end = 0x27ac }, /* OACEC[0-7][0-1] */ 3928 {} 3929 }; 3930 3931 static const struct i915_range gen12_oa_b_counters[] = { 3932 { .start = 0x2b2c, .end = 0x2b2c }, /* GEN12_OAG_OA_PESS */ 3933 { .start = 0xd900, .end = 0xd91c }, /* GEN12_OAG_OASTARTTRIG[1-8] */ 3934 { .start = 0xd920, .end = 0xd93c }, /* GEN12_OAG_OAREPORTTRIG1[1-8] */ 3935 { .start = 0xd940, .end = 0xd97c }, /* GEN12_OAG_CEC[0-7][0-1] */ 3936 { .start = 0xdc00, .end = 0xdc3c }, /* GEN12_OAG_SCEC[0-7][0-1] */ 3937 { .start = 0xdc40, .end = 0xdc40 }, /* GEN12_OAG_SPCTR_CNF */ 3938 { .start = 0xdc44, .end = 0xdc44 }, /* GEN12_OAA_DBG_REG */ 3939 {} 3940 }; 3941 3942 static const struct i915_range gen7_oa_mux_regs[] = { 3943 { .start = 0x91b8, .end = 0x91cc }, /* OA_PERFCNT[1-2], OA_PERFMATRIX */ 3944 { .start = 0x9800, .end = 0x9888 }, /* MICRO_BP0_0 - NOA_WRITE */ 3945 { .start = 0xe180, .end = 0xe180 }, /* HALF_SLICE_CHICKEN2 */ 3946 {} 3947 }; 3948 3949 static const struct i915_range hsw_oa_mux_regs[] = { 3950 { .start = 0x09e80, .end = 0x09ea4 }, /* HSW_MBVID2_NOA[0-9] */ 3951 { .start = 0x09ec0, .end = 0x09ec0 }, /* HSW_MBVID2_MISR0 */ 3952 { .start = 0x25100, .end = 0x2ff90 }, 3953 {} 3954 }; 3955 3956 static const struct i915_range chv_oa_mux_regs[] = { 3957 { .start = 0x182300, .end = 0x1823a4 }, 3958 {} 3959 }; 3960 3961 static const struct i915_range gen8_oa_mux_regs[] = { 3962 { .start = 0x0d00, .end = 0x0d2c }, /* RPM_CONFIG[0-1], NOA_CONFIG[0-8] */ 3963 { .start = 0x20cc, .end = 0x20cc }, /* WAIT_FOR_RC6_EXIT */ 3964 {} 3965 }; 3966 3967 static const struct i915_range gen11_oa_mux_regs[] = { 3968 { .start = 0x91c8, .end = 0x91dc }, /* OA_PERFCNT[3-4] */ 3969 {} 3970 }; 3971 3972 static const struct i915_range gen12_oa_mux_regs[] = { 3973 { .start = 0x0d00, .end = 0x0d04 }, /* RPM_CONFIG[0-1] */ 3974 { .start = 0x0d0c, .end = 0x0d2c }, /* NOA_CONFIG[0-8] */ 3975 { .start = 0x9840, .end = 0x9840 }, /* GDT_CHICKEN_BITS */ 3976 { .start = 0x9884, .end = 0x9888 }, /* NOA_WRITE */ 3977 { .start = 0x20cc, .end = 0x20cc }, /* WAIT_FOR_RC6_EXIT */ 3978 {} 3979 }; 3980 3981 static bool gen7_is_valid_b_counter_addr(struct i915_perf *perf, u32 addr) 3982 { 3983 return reg_in_range_table(addr, gen7_oa_b_counters); 3984 } 3985 3986 static bool gen8_is_valid_mux_addr(struct i915_perf *perf, u32 addr) 3987 { 3988 return reg_in_range_table(addr, gen7_oa_mux_regs) || 3989 reg_in_range_table(addr, gen8_oa_mux_regs); 3990 } 3991 3992 static bool gen11_is_valid_mux_addr(struct i915_perf *perf, u32 addr) 3993 { 3994 return reg_in_range_table(addr, gen7_oa_mux_regs) || 3995 reg_in_range_table(addr, gen8_oa_mux_regs) || 3996 reg_in_range_table(addr, gen11_oa_mux_regs); 3997 } 3998 3999 static bool hsw_is_valid_mux_addr(struct i915_perf *perf, u32 addr) 4000 { 4001 return reg_in_range_table(addr, gen7_oa_mux_regs) || 4002 reg_in_range_table(addr, hsw_oa_mux_regs); 4003 } 4004 4005 static bool chv_is_valid_mux_addr(struct i915_perf *perf, u32 addr) 4006 { 4007 return reg_in_range_table(addr, gen7_oa_mux_regs) || 4008 reg_in_range_table(addr, chv_oa_mux_regs); 4009 } 4010 4011 static bool gen12_is_valid_b_counter_addr(struct i915_perf *perf, u32 addr) 4012 { 4013 return reg_in_range_table(addr, gen12_oa_b_counters); 4014 } 4015 4016 static bool gen12_is_valid_mux_addr(struct i915_perf *perf, u32 addr) 4017 { 4018 return reg_in_range_table(addr, gen12_oa_mux_regs); 4019 } 4020 4021 static u32 mask_reg_value(u32 reg, u32 val) 4022 { 4023 /* HALF_SLICE_CHICKEN2 is programmed with a the 4024 * WaDisableSTUnitPowerOptimization workaround. Make sure the value 4025 * programmed by userspace doesn't change this. 4026 */ 4027 if (REG_EQUAL(reg, HALF_SLICE_CHICKEN2)) 4028 val = val & ~_MASKED_BIT_ENABLE(GEN8_ST_PO_DISABLE); 4029 4030 /* WAIT_FOR_RC6_EXIT has only one bit fullfilling the function 4031 * indicated by its name and a bunch of selection fields used by OA 4032 * configs. 4033 */ 4034 if (REG_EQUAL(reg, WAIT_FOR_RC6_EXIT)) 4035 val = val & ~_MASKED_BIT_ENABLE(HSW_WAIT_FOR_RC6_EXIT_ENABLE); 4036 4037 return val; 4038 } 4039 4040 static struct i915_oa_reg *alloc_oa_regs(struct i915_perf *perf, 4041 bool (*is_valid)(struct i915_perf *perf, u32 addr), 4042 u32 __user *regs, 4043 u32 n_regs) 4044 { 4045 struct i915_oa_reg *oa_regs; 4046 int err; 4047 u32 i; 4048 4049 if (!n_regs) 4050 return NULL; 4051 4052 /* No is_valid function means we're not allowing any register to be programmed. */ 4053 GEM_BUG_ON(!is_valid); 4054 if (!is_valid) 4055 return ERR_PTR(-EINVAL); 4056 4057 oa_regs = kmalloc_array(n_regs, sizeof(*oa_regs), GFP_KERNEL); 4058 if (!oa_regs) 4059 return ERR_PTR(-ENOMEM); 4060 4061 for (i = 0; i < n_regs; i++) { 4062 u32 addr, value; 4063 4064 err = get_user(addr, regs); 4065 if (err) 4066 goto addr_err; 4067 4068 if (!is_valid(perf, addr)) { 4069 drm_dbg(&perf->i915->drm, 4070 "Invalid oa_reg address: %X\n", addr); 4071 err = -EINVAL; 4072 goto addr_err; 4073 } 4074 4075 err = get_user(value, regs + 1); 4076 if (err) 4077 goto addr_err; 4078 4079 oa_regs[i].addr = _MMIO(addr); 4080 oa_regs[i].value = mask_reg_value(addr, value); 4081 4082 regs += 2; 4083 } 4084 4085 return oa_regs; 4086 4087 addr_err: 4088 kfree(oa_regs); 4089 return ERR_PTR(err); 4090 } 4091 4092 static ssize_t show_dynamic_id(struct kobject *kobj, 4093 struct kobj_attribute *attr, 4094 char *buf) 4095 { 4096 struct i915_oa_config *oa_config = 4097 container_of(attr, typeof(*oa_config), sysfs_metric_id); 4098 4099 return sprintf(buf, "%d\n", oa_config->id); 4100 } 4101 4102 static int create_dynamic_oa_sysfs_entry(struct i915_perf *perf, 4103 struct i915_oa_config *oa_config) 4104 { 4105 sysfs_attr_init(&oa_config->sysfs_metric_id.attr); 4106 oa_config->sysfs_metric_id.attr.name = "id"; 4107 oa_config->sysfs_metric_id.attr.mode = S_IRUGO; 4108 oa_config->sysfs_metric_id.show = show_dynamic_id; 4109 oa_config->sysfs_metric_id.store = NULL; 4110 4111 oa_config->attrs[0] = &oa_config->sysfs_metric_id.attr; 4112 oa_config->attrs[1] = NULL; 4113 4114 oa_config->sysfs_metric.name = oa_config->uuid; 4115 oa_config->sysfs_metric.attrs = oa_config->attrs; 4116 4117 return sysfs_create_group(perf->metrics_kobj, 4118 &oa_config->sysfs_metric); 4119 } 4120 4121 /** 4122 * i915_perf_add_config_ioctl - DRM ioctl() for userspace to add a new OA config 4123 * @dev: drm device 4124 * @data: ioctl data (pointer to struct drm_i915_perf_oa_config) copied from 4125 * userspace (unvalidated) 4126 * @file: drm file 4127 * 4128 * Validates the submitted OA register to be saved into a new OA config that 4129 * can then be used for programming the OA unit and its NOA network. 4130 * 4131 * Returns: A new allocated config number to be used with the perf open ioctl 4132 * or a negative error code on failure. 4133 */ 4134 int i915_perf_add_config_ioctl(struct drm_device *dev, void *data, 4135 struct drm_file *file) 4136 { 4137 struct i915_perf *perf = &to_i915(dev)->perf; 4138 struct drm_i915_perf_oa_config *args = data; 4139 struct i915_oa_config *oa_config, *tmp; 4140 struct i915_oa_reg *regs; 4141 int err, id; 4142 4143 if (!perf->i915) { 4144 drm_dbg(&perf->i915->drm, 4145 "i915 perf interface not available for this system\n"); 4146 return -ENOTSUPP; 4147 } 4148 4149 if (!perf->metrics_kobj) { 4150 drm_dbg(&perf->i915->drm, 4151 "OA metrics weren't advertised via sysfs\n"); 4152 return -EINVAL; 4153 } 4154 4155 if (i915_perf_stream_paranoid && !perfmon_capable()) { 4156 drm_dbg(&perf->i915->drm, 4157 "Insufficient privileges to add i915 OA config\n"); 4158 return -EACCES; 4159 } 4160 4161 if ((!args->mux_regs_ptr || !args->n_mux_regs) && 4162 (!args->boolean_regs_ptr || !args->n_boolean_regs) && 4163 (!args->flex_regs_ptr || !args->n_flex_regs)) { 4164 drm_dbg(&perf->i915->drm, 4165 "No OA registers given\n"); 4166 return -EINVAL; 4167 } 4168 4169 oa_config = kzalloc(sizeof(*oa_config), GFP_KERNEL); 4170 if (!oa_config) { 4171 drm_dbg(&perf->i915->drm, 4172 "Failed to allocate memory for the OA config\n"); 4173 return -ENOMEM; 4174 } 4175 4176 oa_config->perf = perf; 4177 kref_init(&oa_config->ref); 4178 4179 if (!uuid_is_valid(args->uuid)) { 4180 drm_dbg(&perf->i915->drm, 4181 "Invalid uuid format for OA config\n"); 4182 err = -EINVAL; 4183 goto reg_err; 4184 } 4185 4186 /* Last character in oa_config->uuid will be 0 because oa_config is 4187 * kzalloc. 4188 */ 4189 memcpy(oa_config->uuid, args->uuid, sizeof(args->uuid)); 4190 4191 oa_config->mux_regs_len = args->n_mux_regs; 4192 regs = alloc_oa_regs(perf, 4193 perf->ops.is_valid_mux_reg, 4194 u64_to_user_ptr(args->mux_regs_ptr), 4195 args->n_mux_regs); 4196 4197 if (IS_ERR(regs)) { 4198 drm_dbg(&perf->i915->drm, 4199 "Failed to create OA config for mux_regs\n"); 4200 err = PTR_ERR(regs); 4201 goto reg_err; 4202 } 4203 oa_config->mux_regs = regs; 4204 4205 oa_config->b_counter_regs_len = args->n_boolean_regs; 4206 regs = alloc_oa_regs(perf, 4207 perf->ops.is_valid_b_counter_reg, 4208 u64_to_user_ptr(args->boolean_regs_ptr), 4209 args->n_boolean_regs); 4210 4211 if (IS_ERR(regs)) { 4212 drm_dbg(&perf->i915->drm, 4213 "Failed to create OA config for b_counter_regs\n"); 4214 err = PTR_ERR(regs); 4215 goto reg_err; 4216 } 4217 oa_config->b_counter_regs = regs; 4218 4219 if (GRAPHICS_VER(perf->i915) < 8) { 4220 if (args->n_flex_regs != 0) { 4221 err = -EINVAL; 4222 goto reg_err; 4223 } 4224 } else { 4225 oa_config->flex_regs_len = args->n_flex_regs; 4226 regs = alloc_oa_regs(perf, 4227 perf->ops.is_valid_flex_reg, 4228 u64_to_user_ptr(args->flex_regs_ptr), 4229 args->n_flex_regs); 4230 4231 if (IS_ERR(regs)) { 4232 drm_dbg(&perf->i915->drm, 4233 "Failed to create OA config for flex_regs\n"); 4234 err = PTR_ERR(regs); 4235 goto reg_err; 4236 } 4237 oa_config->flex_regs = regs; 4238 } 4239 4240 err = mutex_lock_interruptible(&perf->metrics_lock); 4241 if (err) 4242 goto reg_err; 4243 4244 /* We shouldn't have too many configs, so this iteration shouldn't be 4245 * too costly. 4246 */ 4247 idr_for_each_entry(&perf->metrics_idr, tmp, id) { 4248 if (!strcmp(tmp->uuid, oa_config->uuid)) { 4249 drm_dbg(&perf->i915->drm, 4250 "OA config already exists with this uuid\n"); 4251 err = -EADDRINUSE; 4252 goto sysfs_err; 4253 } 4254 } 4255 4256 err = create_dynamic_oa_sysfs_entry(perf, oa_config); 4257 if (err) { 4258 drm_dbg(&perf->i915->drm, 4259 "Failed to create sysfs entry for OA config\n"); 4260 goto sysfs_err; 4261 } 4262 4263 /* Config id 0 is invalid, id 1 for kernel stored test config. */ 4264 oa_config->id = idr_alloc(&perf->metrics_idr, 4265 oa_config, 2, 4266 0, GFP_KERNEL); 4267 if (oa_config->id < 0) { 4268 drm_dbg(&perf->i915->drm, 4269 "Failed to create sysfs entry for OA config\n"); 4270 err = oa_config->id; 4271 goto sysfs_err; 4272 } 4273 4274 mutex_unlock(&perf->metrics_lock); 4275 4276 drm_dbg(&perf->i915->drm, 4277 "Added config %s id=%i\n", oa_config->uuid, oa_config->id); 4278 4279 return oa_config->id; 4280 4281 sysfs_err: 4282 mutex_unlock(&perf->metrics_lock); 4283 reg_err: 4284 i915_oa_config_put(oa_config); 4285 drm_dbg(&perf->i915->drm, 4286 "Failed to add new OA config\n"); 4287 return err; 4288 } 4289 4290 /** 4291 * i915_perf_remove_config_ioctl - DRM ioctl() for userspace to remove an OA config 4292 * @dev: drm device 4293 * @data: ioctl data (pointer to u64 integer) copied from userspace 4294 * @file: drm file 4295 * 4296 * Configs can be removed while being used, the will stop appearing in sysfs 4297 * and their content will be freed when the stream using the config is closed. 4298 * 4299 * Returns: 0 on success or a negative error code on failure. 4300 */ 4301 int i915_perf_remove_config_ioctl(struct drm_device *dev, void *data, 4302 struct drm_file *file) 4303 { 4304 struct i915_perf *perf = &to_i915(dev)->perf; 4305 u64 *arg = data; 4306 struct i915_oa_config *oa_config; 4307 int ret; 4308 4309 if (!perf->i915) { 4310 drm_dbg(&perf->i915->drm, 4311 "i915 perf interface not available for this system\n"); 4312 return -ENOTSUPP; 4313 } 4314 4315 if (i915_perf_stream_paranoid && !perfmon_capable()) { 4316 drm_dbg(&perf->i915->drm, 4317 "Insufficient privileges to remove i915 OA config\n"); 4318 return -EACCES; 4319 } 4320 4321 ret = mutex_lock_interruptible(&perf->metrics_lock); 4322 if (ret) 4323 return ret; 4324 4325 oa_config = idr_find(&perf->metrics_idr, *arg); 4326 if (!oa_config) { 4327 drm_dbg(&perf->i915->drm, 4328 "Failed to remove unknown OA config\n"); 4329 ret = -ENOENT; 4330 goto err_unlock; 4331 } 4332 4333 GEM_BUG_ON(*arg != oa_config->id); 4334 4335 sysfs_remove_group(perf->metrics_kobj, &oa_config->sysfs_metric); 4336 4337 idr_remove(&perf->metrics_idr, *arg); 4338 4339 mutex_unlock(&perf->metrics_lock); 4340 4341 drm_dbg(&perf->i915->drm, 4342 "Removed config %s id=%i\n", oa_config->uuid, oa_config->id); 4343 4344 i915_oa_config_put(oa_config); 4345 4346 return 0; 4347 4348 err_unlock: 4349 mutex_unlock(&perf->metrics_lock); 4350 return ret; 4351 } 4352 4353 static struct ctl_table oa_table[] = { 4354 { 4355 .procname = "perf_stream_paranoid", 4356 .data = &i915_perf_stream_paranoid, 4357 .maxlen = sizeof(i915_perf_stream_paranoid), 4358 .mode = 0644, 4359 .proc_handler = proc_dointvec_minmax, 4360 .extra1 = SYSCTL_ZERO, 4361 .extra2 = SYSCTL_ONE, 4362 }, 4363 { 4364 .procname = "oa_max_sample_rate", 4365 .data = &i915_oa_max_sample_rate, 4366 .maxlen = sizeof(i915_oa_max_sample_rate), 4367 .mode = 0644, 4368 .proc_handler = proc_dointvec_minmax, 4369 .extra1 = SYSCTL_ZERO, 4370 .extra2 = &oa_sample_rate_hard_limit, 4371 }, 4372 {} 4373 }; 4374 4375 static void oa_init_supported_formats(struct i915_perf *perf) 4376 { 4377 struct drm_i915_private *i915 = perf->i915; 4378 enum intel_platform platform = INTEL_INFO(i915)->platform; 4379 4380 switch (platform) { 4381 case INTEL_HASWELL: 4382 oa_format_add(perf, I915_OA_FORMAT_A13); 4383 oa_format_add(perf, I915_OA_FORMAT_A13); 4384 oa_format_add(perf, I915_OA_FORMAT_A29); 4385 oa_format_add(perf, I915_OA_FORMAT_A13_B8_C8); 4386 oa_format_add(perf, I915_OA_FORMAT_B4_C8); 4387 oa_format_add(perf, I915_OA_FORMAT_A45_B8_C8); 4388 oa_format_add(perf, I915_OA_FORMAT_B4_C8_A16); 4389 oa_format_add(perf, I915_OA_FORMAT_C4_B8); 4390 break; 4391 4392 case INTEL_BROADWELL: 4393 case INTEL_CHERRYVIEW: 4394 case INTEL_SKYLAKE: 4395 case INTEL_BROXTON: 4396 case INTEL_KABYLAKE: 4397 case INTEL_GEMINILAKE: 4398 case INTEL_COFFEELAKE: 4399 case INTEL_COMETLAKE: 4400 case INTEL_ICELAKE: 4401 case INTEL_ELKHARTLAKE: 4402 case INTEL_JASPERLAKE: 4403 case INTEL_TIGERLAKE: 4404 case INTEL_ROCKETLAKE: 4405 case INTEL_DG1: 4406 case INTEL_ALDERLAKE_S: 4407 case INTEL_ALDERLAKE_P: 4408 oa_format_add(perf, I915_OA_FORMAT_A12); 4409 oa_format_add(perf, I915_OA_FORMAT_A12_B8_C8); 4410 oa_format_add(perf, I915_OA_FORMAT_A32u40_A4u32_B8_C8); 4411 oa_format_add(perf, I915_OA_FORMAT_C4_B8); 4412 break; 4413 4414 default: 4415 MISSING_CASE(platform); 4416 } 4417 } 4418 4419 /** 4420 * i915_perf_init - initialize i915-perf state on module bind 4421 * @i915: i915 device instance 4422 * 4423 * Initializes i915-perf state without exposing anything to userspace. 4424 * 4425 * Note: i915-perf initialization is split into an 'init' and 'register' 4426 * phase with the i915_perf_register() exposing state to userspace. 4427 */ 4428 void i915_perf_init(struct drm_i915_private *i915) 4429 { 4430 struct i915_perf *perf = &i915->perf; 4431 4432 /* XXX const struct i915_perf_ops! */ 4433 4434 /* i915_perf is not enabled for DG2 yet */ 4435 if (IS_DG2(i915)) 4436 return; 4437 4438 perf->oa_formats = oa_formats; 4439 if (IS_HASWELL(i915)) { 4440 perf->ops.is_valid_b_counter_reg = gen7_is_valid_b_counter_addr; 4441 perf->ops.is_valid_mux_reg = hsw_is_valid_mux_addr; 4442 perf->ops.is_valid_flex_reg = NULL; 4443 perf->ops.enable_metric_set = hsw_enable_metric_set; 4444 perf->ops.disable_metric_set = hsw_disable_metric_set; 4445 perf->ops.oa_enable = gen7_oa_enable; 4446 perf->ops.oa_disable = gen7_oa_disable; 4447 perf->ops.read = gen7_oa_read; 4448 perf->ops.oa_hw_tail_read = gen7_oa_hw_tail_read; 4449 } else if (HAS_LOGICAL_RING_CONTEXTS(i915)) { 4450 /* Note: that although we could theoretically also support the 4451 * legacy ringbuffer mode on BDW (and earlier iterations of 4452 * this driver, before upstreaming did this) it didn't seem 4453 * worth the complexity to maintain now that BDW+ enable 4454 * execlist mode by default. 4455 */ 4456 perf->ops.read = gen8_oa_read; 4457 4458 if (IS_GRAPHICS_VER(i915, 8, 9)) { 4459 perf->ops.is_valid_b_counter_reg = 4460 gen7_is_valid_b_counter_addr; 4461 perf->ops.is_valid_mux_reg = 4462 gen8_is_valid_mux_addr; 4463 perf->ops.is_valid_flex_reg = 4464 gen8_is_valid_flex_addr; 4465 4466 if (IS_CHERRYVIEW(i915)) { 4467 perf->ops.is_valid_mux_reg = 4468 chv_is_valid_mux_addr; 4469 } 4470 4471 perf->ops.oa_enable = gen8_oa_enable; 4472 perf->ops.oa_disable = gen8_oa_disable; 4473 perf->ops.enable_metric_set = gen8_enable_metric_set; 4474 perf->ops.disable_metric_set = gen8_disable_metric_set; 4475 perf->ops.oa_hw_tail_read = gen8_oa_hw_tail_read; 4476 4477 if (GRAPHICS_VER(i915) == 8) { 4478 perf->ctx_oactxctrl_offset = 0x120; 4479 perf->ctx_flexeu0_offset = 0x2ce; 4480 4481 perf->gen8_valid_ctx_bit = BIT(25); 4482 } else { 4483 perf->ctx_oactxctrl_offset = 0x128; 4484 perf->ctx_flexeu0_offset = 0x3de; 4485 4486 perf->gen8_valid_ctx_bit = BIT(16); 4487 } 4488 } else if (GRAPHICS_VER(i915) == 11) { 4489 perf->ops.is_valid_b_counter_reg = 4490 gen7_is_valid_b_counter_addr; 4491 perf->ops.is_valid_mux_reg = 4492 gen11_is_valid_mux_addr; 4493 perf->ops.is_valid_flex_reg = 4494 gen8_is_valid_flex_addr; 4495 4496 perf->ops.oa_enable = gen8_oa_enable; 4497 perf->ops.oa_disable = gen8_oa_disable; 4498 perf->ops.enable_metric_set = gen8_enable_metric_set; 4499 perf->ops.disable_metric_set = gen11_disable_metric_set; 4500 perf->ops.oa_hw_tail_read = gen8_oa_hw_tail_read; 4501 4502 perf->ctx_oactxctrl_offset = 0x124; 4503 perf->ctx_flexeu0_offset = 0x78e; 4504 4505 perf->gen8_valid_ctx_bit = BIT(16); 4506 } else if (GRAPHICS_VER(i915) == 12) { 4507 perf->ops.is_valid_b_counter_reg = 4508 gen12_is_valid_b_counter_addr; 4509 perf->ops.is_valid_mux_reg = 4510 gen12_is_valid_mux_addr; 4511 perf->ops.is_valid_flex_reg = 4512 gen8_is_valid_flex_addr; 4513 4514 perf->ops.oa_enable = gen12_oa_enable; 4515 perf->ops.oa_disable = gen12_oa_disable; 4516 perf->ops.enable_metric_set = gen12_enable_metric_set; 4517 perf->ops.disable_metric_set = gen12_disable_metric_set; 4518 perf->ops.oa_hw_tail_read = gen12_oa_hw_tail_read; 4519 4520 perf->ctx_flexeu0_offset = 0; 4521 perf->ctx_oactxctrl_offset = 0x144; 4522 } 4523 } 4524 4525 if (perf->ops.enable_metric_set) { 4526 mutex_init(&perf->lock); 4527 4528 /* Choose a representative limit */ 4529 oa_sample_rate_hard_limit = to_gt(i915)->clock_frequency / 2; 4530 4531 mutex_init(&perf->metrics_lock); 4532 idr_init_base(&perf->metrics_idr, 1); 4533 4534 /* We set up some ratelimit state to potentially throttle any 4535 * _NOTES about spurious, invalid OA reports which we don't 4536 * forward to userspace. 4537 * 4538 * We print a _NOTE about any throttling when closing the 4539 * stream instead of waiting until driver _fini which no one 4540 * would ever see. 4541 * 4542 * Using the same limiting factors as printk_ratelimit() 4543 */ 4544 ratelimit_state_init(&perf->spurious_report_rs, 5 * HZ, 10); 4545 /* Since we use a DRM_NOTE for spurious reports it would be 4546 * inconsistent to let __ratelimit() automatically print a 4547 * warning for throttling. 4548 */ 4549 ratelimit_set_flags(&perf->spurious_report_rs, 4550 RATELIMIT_MSG_ON_RELEASE); 4551 4552 ratelimit_state_init(&perf->tail_pointer_race, 4553 5 * HZ, 10); 4554 ratelimit_set_flags(&perf->tail_pointer_race, 4555 RATELIMIT_MSG_ON_RELEASE); 4556 4557 atomic64_set(&perf->noa_programming_delay, 4558 500 * 1000 /* 500us */); 4559 4560 perf->i915 = i915; 4561 4562 oa_init_supported_formats(perf); 4563 } 4564 } 4565 4566 static int destroy_config(int id, void *p, void *data) 4567 { 4568 i915_oa_config_put(p); 4569 return 0; 4570 } 4571 4572 int i915_perf_sysctl_register(void) 4573 { 4574 sysctl_header = register_sysctl("dev/i915", oa_table); 4575 return 0; 4576 } 4577 4578 void i915_perf_sysctl_unregister(void) 4579 { 4580 unregister_sysctl_table(sysctl_header); 4581 } 4582 4583 /** 4584 * i915_perf_fini - Counter part to i915_perf_init() 4585 * @i915: i915 device instance 4586 */ 4587 void i915_perf_fini(struct drm_i915_private *i915) 4588 { 4589 struct i915_perf *perf = &i915->perf; 4590 4591 if (!perf->i915) 4592 return; 4593 4594 idr_for_each(&perf->metrics_idr, destroy_config, perf); 4595 idr_destroy(&perf->metrics_idr); 4596 4597 memset(&perf->ops, 0, sizeof(perf->ops)); 4598 perf->i915 = NULL; 4599 } 4600 4601 /** 4602 * i915_perf_ioctl_version - Version of the i915-perf subsystem 4603 * 4604 * This version number is used by userspace to detect available features. 4605 */ 4606 int i915_perf_ioctl_version(void) 4607 { 4608 /* 4609 * 1: Initial version 4610 * I915_PERF_IOCTL_ENABLE 4611 * I915_PERF_IOCTL_DISABLE 4612 * 4613 * 2: Added runtime modification of OA config. 4614 * I915_PERF_IOCTL_CONFIG 4615 * 4616 * 3: Add DRM_I915_PERF_PROP_HOLD_PREEMPTION parameter to hold 4617 * preemption on a particular context so that performance data is 4618 * accessible from a delta of MI_RPC reports without looking at the 4619 * OA buffer. 4620 * 4621 * 4: Add DRM_I915_PERF_PROP_ALLOWED_SSEU to limit what contexts can 4622 * be run for the duration of the performance recording based on 4623 * their SSEU configuration. 4624 * 4625 * 5: Add DRM_I915_PERF_PROP_POLL_OA_PERIOD parameter that controls the 4626 * interval for the hrtimer used to check for OA data. 4627 */ 4628 return 5; 4629 } 4630 4631 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST) 4632 #include "selftests/i915_perf.c" 4633 #endif 4634