1 /* 2 * Copyright © 2015-2016 Intel Corporation 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice (including the next 12 * paragraph) shall be included in all copies or substantial portions of the 13 * Software. 14 * 15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS 21 * IN THE SOFTWARE. 22 * 23 * Authors: 24 * Robert Bragg <robert@sixbynine.org> 25 */ 26 27 28 /** 29 * DOC: i915 Perf Overview 30 * 31 * Gen graphics supports a large number of performance counters that can help 32 * driver and application developers understand and optimize their use of the 33 * GPU. 34 * 35 * This i915 perf interface enables userspace to configure and open a file 36 * descriptor representing a stream of GPU metrics which can then be read() as 37 * a stream of sample records. 38 * 39 * The interface is particularly suited to exposing buffered metrics that are 40 * captured by DMA from the GPU, unsynchronized with and unrelated to the CPU. 41 * 42 * Streams representing a single context are accessible to applications with a 43 * corresponding drm file descriptor, such that OpenGL can use the interface 44 * without special privileges. Access to system-wide metrics requires root 45 * privileges by default, unless changed via the dev.i915.perf_event_paranoid 46 * sysctl option. 47 * 48 */ 49 50 /** 51 * DOC: i915 Perf History and Comparison with Core Perf 52 * 53 * The interface was initially inspired by the core Perf infrastructure but 54 * some notable differences are: 55 * 56 * i915 perf file descriptors represent a "stream" instead of an "event"; where 57 * a perf event primarily corresponds to a single 64bit value, while a stream 58 * might sample sets of tightly-coupled counters, depending on the 59 * configuration. For example the Gen OA unit isn't designed to support 60 * orthogonal configurations of individual counters; it's configured for a set 61 * of related counters. Samples for an i915 perf stream capturing OA metrics 62 * will include a set of counter values packed in a compact HW specific format. 63 * The OA unit supports a number of different packing formats which can be 64 * selected by the user opening the stream. Perf has support for grouping 65 * events, but each event in the group is configured, validated and 66 * authenticated individually with separate system calls. 67 * 68 * i915 perf stream configurations are provided as an array of u64 (key,value) 69 * pairs, instead of a fixed struct with multiple miscellaneous config members, 70 * interleaved with event-type specific members. 71 * 72 * i915 perf doesn't support exposing metrics via an mmap'd circular buffer. 73 * The supported metrics are being written to memory by the GPU unsynchronized 74 * with the CPU, using HW specific packing formats for counter sets. Sometimes 75 * the constraints on HW configuration require reports to be filtered before it 76 * would be acceptable to expose them to unprivileged applications - to hide 77 * the metrics of other processes/contexts. For these use cases a read() based 78 * interface is a good fit, and provides an opportunity to filter data as it 79 * gets copied from the GPU mapped buffers to userspace buffers. 80 * 81 * 82 * Issues hit with first prototype based on Core Perf 83 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 84 * 85 * The first prototype of this driver was based on the core perf 86 * infrastructure, and while we did make that mostly work, with some changes to 87 * perf, we found we were breaking or working around too many assumptions baked 88 * into perf's currently cpu centric design. 89 * 90 * In the end we didn't see a clear benefit to making perf's implementation and 91 * interface more complex by changing design assumptions while we knew we still 92 * wouldn't be able to use any existing perf based userspace tools. 93 * 94 * Also considering the Gen specific nature of the Observability hardware and 95 * how userspace will sometimes need to combine i915 perf OA metrics with 96 * side-band OA data captured via MI_REPORT_PERF_COUNT commands; we're 97 * expecting the interface to be used by a platform specific userspace such as 98 * OpenGL or tools. This is to say; we aren't inherently missing out on having 99 * a standard vendor/architecture agnostic interface by not using perf. 100 * 101 * 102 * For posterity, in case we might re-visit trying to adapt core perf to be 103 * better suited to exposing i915 metrics these were the main pain points we 104 * hit: 105 * 106 * - The perf based OA PMU driver broke some significant design assumptions: 107 * 108 * Existing perf pmus are used for profiling work on a cpu and we were 109 * introducing the idea of _IS_DEVICE pmus with different security 110 * implications, the need to fake cpu-related data (such as user/kernel 111 * registers) to fit with perf's current design, and adding _DEVICE records 112 * as a way to forward device-specific status records. 113 * 114 * The OA unit writes reports of counters into a circular buffer, without 115 * involvement from the CPU, making our PMU driver the first of a kind. 116 * 117 * Given the way we were periodically forward data from the GPU-mapped, OA 118 * buffer to perf's buffer, those bursts of sample writes looked to perf like 119 * we were sampling too fast and so we had to subvert its throttling checks. 120 * 121 * Perf supports groups of counters and allows those to be read via 122 * transactions internally but transactions currently seem designed to be 123 * explicitly initiated from the cpu (say in response to a userspace read()) 124 * and while we could pull a report out of the OA buffer we can't 125 * trigger a report from the cpu on demand. 126 * 127 * Related to being report based; the OA counters are configured in HW as a 128 * set while perf generally expects counter configurations to be orthogonal. 129 * Although counters can be associated with a group leader as they are 130 * opened, there's no clear precedent for being able to provide group-wide 131 * configuration attributes (for example we want to let userspace choose the 132 * OA unit report format used to capture all counters in a set, or specify a 133 * GPU context to filter metrics on). We avoided using perf's grouping 134 * feature and forwarded OA reports to userspace via perf's 'raw' sample 135 * field. This suited our userspace well considering how coupled the counters 136 * are when dealing with normalizing. It would be inconvenient to split 137 * counters up into separate events, only to require userspace to recombine 138 * them. For Mesa it's also convenient to be forwarded raw, periodic reports 139 * for combining with the side-band raw reports it captures using 140 * MI_REPORT_PERF_COUNT commands. 141 * 142 * - As a side note on perf's grouping feature; there was also some concern 143 * that using PERF_FORMAT_GROUP as a way to pack together counter values 144 * would quite drastically inflate our sample sizes, which would likely 145 * lower the effective sampling resolutions we could use when the available 146 * memory bandwidth is limited. 147 * 148 * With the OA unit's report formats, counters are packed together as 32 149 * or 40bit values, with the largest report size being 256 bytes. 150 * 151 * PERF_FORMAT_GROUP values are 64bit, but there doesn't appear to be a 152 * documented ordering to the values, implying PERF_FORMAT_ID must also be 153 * used to add a 64bit ID before each value; giving 16 bytes per counter. 154 * 155 * Related to counter orthogonality; we can't time share the OA unit, while 156 * event scheduling is a central design idea within perf for allowing 157 * userspace to open + enable more events than can be configured in HW at any 158 * one time. The OA unit is not designed to allow re-configuration while in 159 * use. We can't reconfigure the OA unit without losing internal OA unit 160 * state which we can't access explicitly to save and restore. Reconfiguring 161 * the OA unit is also relatively slow, involving ~100 register writes. From 162 * userspace Mesa also depends on a stable OA configuration when emitting 163 * MI_REPORT_PERF_COUNT commands and importantly the OA unit can't be 164 * disabled while there are outstanding MI_RPC commands lest we hang the 165 * command streamer. 166 * 167 * The contents of sample records aren't extensible by device drivers (i.e. 168 * the sample_type bits). As an example; Sourab Gupta had been looking to 169 * attach GPU timestamps to our OA samples. We were shoehorning OA reports 170 * into sample records by using the 'raw' field, but it's tricky to pack more 171 * than one thing into this field because events/core.c currently only lets a 172 * pmu give a single raw data pointer plus len which will be copied into the 173 * ring buffer. To include more than the OA report we'd have to copy the 174 * report into an intermediate larger buffer. I'd been considering allowing a 175 * vector of data+len values to be specified for copying the raw data, but 176 * it felt like a kludge to being using the raw field for this purpose. 177 * 178 * - It felt like our perf based PMU was making some technical compromises 179 * just for the sake of using perf: 180 * 181 * perf_event_open() requires events to either relate to a pid or a specific 182 * cpu core, while our device pmu related to neither. Events opened with a 183 * pid will be automatically enabled/disabled according to the scheduling of 184 * that process - so not appropriate for us. When an event is related to a 185 * cpu id, perf ensures pmu methods will be invoked via an inter process 186 * interrupt on that core. To avoid invasive changes our userspace opened OA 187 * perf events for a specific cpu. This was workable but it meant the 188 * majority of the OA driver ran in atomic context, including all OA report 189 * forwarding, which wasn't really necessary in our case and seems to make 190 * our locking requirements somewhat complex as we handled the interaction 191 * with the rest of the i915 driver. 192 */ 193 194 #include <linux/anon_inodes.h> 195 #include <linux/sizes.h> 196 #include <linux/uuid.h> 197 198 #include "gem/i915_gem_context.h" 199 #include "gt/intel_engine_pm.h" 200 #include "gt/intel_engine_user.h" 201 #include "gt/intel_execlists_submission.h" 202 #include "gt/intel_gpu_commands.h" 203 #include "gt/intel_gt.h" 204 #include "gt/intel_gt_clock_utils.h" 205 #include "gt/intel_lrc.h" 206 #include "gt/intel_ring.h" 207 208 #include "i915_drv.h" 209 #include "i915_perf.h" 210 211 /* HW requires this to be a power of two, between 128k and 16M, though driver 212 * is currently generally designed assuming the largest 16M size is used such 213 * that the overflow cases are unlikely in normal operation. 214 */ 215 #define OA_BUFFER_SIZE SZ_16M 216 217 #define OA_TAKEN(tail, head) ((tail - head) & (OA_BUFFER_SIZE - 1)) 218 219 /** 220 * DOC: OA Tail Pointer Race 221 * 222 * There's a HW race condition between OA unit tail pointer register updates and 223 * writes to memory whereby the tail pointer can sometimes get ahead of what's 224 * been written out to the OA buffer so far (in terms of what's visible to the 225 * CPU). 226 * 227 * Although this can be observed explicitly while copying reports to userspace 228 * by checking for a zeroed report-id field in tail reports, we want to account 229 * for this earlier, as part of the oa_buffer_check_unlocked to avoid lots of 230 * redundant read() attempts. 231 * 232 * We workaround this issue in oa_buffer_check_unlocked() by reading the reports 233 * in the OA buffer, starting from the tail reported by the HW until we find a 234 * report with its first 2 dwords not 0 meaning its previous report is 235 * completely in memory and ready to be read. Those dwords are also set to 0 236 * once read and the whole buffer is cleared upon OA buffer initialization. The 237 * first dword is the reason for this report while the second is the timestamp, 238 * making the chances of having those 2 fields at 0 fairly unlikely. A more 239 * detailed explanation is available in oa_buffer_check_unlocked(). 240 * 241 * Most of the implementation details for this workaround are in 242 * oa_buffer_check_unlocked() and _append_oa_reports() 243 * 244 * Note for posterity: previously the driver used to define an effective tail 245 * pointer that lagged the real pointer by a 'tail margin' measured in bytes 246 * derived from %OA_TAIL_MARGIN_NSEC and the configured sampling frequency. 247 * This was flawed considering that the OA unit may also automatically generate 248 * non-periodic reports (such as on context switch) or the OA unit may be 249 * enabled without any periodic sampling. 250 */ 251 #define OA_TAIL_MARGIN_NSEC 100000ULL 252 #define INVALID_TAIL_PTR 0xffffffff 253 254 /* The default frequency for checking whether the OA unit has written new 255 * reports to the circular OA buffer... 256 */ 257 #define DEFAULT_POLL_FREQUENCY_HZ 200 258 #define DEFAULT_POLL_PERIOD_NS (NSEC_PER_SEC / DEFAULT_POLL_FREQUENCY_HZ) 259 260 /* for sysctl proc_dointvec_minmax of dev.i915.perf_stream_paranoid */ 261 static u32 i915_perf_stream_paranoid = true; 262 263 /* The maximum exponent the hardware accepts is 63 (essentially it selects one 264 * of the 64bit timestamp bits to trigger reports from) but there's currently 265 * no known use case for sampling as infrequently as once per 47 thousand years. 266 * 267 * Since the timestamps included in OA reports are only 32bits it seems 268 * reasonable to limit the OA exponent where it's still possible to account for 269 * overflow in OA report timestamps. 270 */ 271 #define OA_EXPONENT_MAX 31 272 273 #define INVALID_CTX_ID 0xffffffff 274 275 /* On Gen8+ automatically triggered OA reports include a 'reason' field... */ 276 #define OAREPORT_REASON_MASK 0x3f 277 #define OAREPORT_REASON_MASK_EXTENDED 0x7f 278 #define OAREPORT_REASON_SHIFT 19 279 #define OAREPORT_REASON_TIMER (1<<0) 280 #define OAREPORT_REASON_CTX_SWITCH (1<<3) 281 #define OAREPORT_REASON_CLK_RATIO (1<<5) 282 283 284 /* For sysctl proc_dointvec_minmax of i915_oa_max_sample_rate 285 * 286 * The highest sampling frequency we can theoretically program the OA unit 287 * with is always half the timestamp frequency: E.g. 6.25Mhz for Haswell. 288 * 289 * Initialized just before we register the sysctl parameter. 290 */ 291 static int oa_sample_rate_hard_limit; 292 293 /* Theoretically we can program the OA unit to sample every 160ns but don't 294 * allow that by default unless root... 295 * 296 * The default threshold of 100000Hz is based on perf's similar 297 * kernel.perf_event_max_sample_rate sysctl parameter. 298 */ 299 static u32 i915_oa_max_sample_rate = 100000; 300 301 /* XXX: beware if future OA HW adds new report formats that the current 302 * code assumes all reports have a power-of-two size and ~(size - 1) can 303 * be used as a mask to align the OA tail pointer. 304 */ 305 static const struct i915_oa_format hsw_oa_formats[I915_OA_FORMAT_MAX] = { 306 [I915_OA_FORMAT_A13] = { 0, 64 }, 307 [I915_OA_FORMAT_A29] = { 1, 128 }, 308 [I915_OA_FORMAT_A13_B8_C8] = { 2, 128 }, 309 /* A29_B8_C8 Disallowed as 192 bytes doesn't factor into buffer size */ 310 [I915_OA_FORMAT_B4_C8] = { 4, 64 }, 311 [I915_OA_FORMAT_A45_B8_C8] = { 5, 256 }, 312 [I915_OA_FORMAT_B4_C8_A16] = { 6, 128 }, 313 [I915_OA_FORMAT_C4_B8] = { 7, 64 }, 314 }; 315 316 static const struct i915_oa_format gen8_plus_oa_formats[I915_OA_FORMAT_MAX] = { 317 [I915_OA_FORMAT_A12] = { 0, 64 }, 318 [I915_OA_FORMAT_A12_B8_C8] = { 2, 128 }, 319 [I915_OA_FORMAT_A32u40_A4u32_B8_C8] = { 5, 256 }, 320 [I915_OA_FORMAT_C4_B8] = { 7, 64 }, 321 }; 322 323 static const struct i915_oa_format gen12_oa_formats[I915_OA_FORMAT_MAX] = { 324 [I915_OA_FORMAT_A32u40_A4u32_B8_C8] = { 5, 256 }, 325 }; 326 327 #define SAMPLE_OA_REPORT (1<<0) 328 329 /** 330 * struct perf_open_properties - for validated properties given to open a stream 331 * @sample_flags: `DRM_I915_PERF_PROP_SAMPLE_*` properties are tracked as flags 332 * @single_context: Whether a single or all gpu contexts should be monitored 333 * @hold_preemption: Whether the preemption is disabled for the filtered 334 * context 335 * @ctx_handle: A gem ctx handle for use with @single_context 336 * @metrics_set: An ID for an OA unit metric set advertised via sysfs 337 * @oa_format: An OA unit HW report format 338 * @oa_periodic: Whether to enable periodic OA unit sampling 339 * @oa_period_exponent: The OA unit sampling period is derived from this 340 * @engine: The engine (typically rcs0) being monitored by the OA unit 341 * @has_sseu: Whether @sseu was specified by userspace 342 * @sseu: internal SSEU configuration computed either from the userspace 343 * specified configuration in the opening parameters or a default value 344 * (see get_default_sseu_config()) 345 * @poll_oa_period: The period in nanoseconds at which the CPU will check for OA 346 * data availability 347 * 348 * As read_properties_unlocked() enumerates and validates the properties given 349 * to open a stream of metrics the configuration is built up in the structure 350 * which starts out zero initialized. 351 */ 352 struct perf_open_properties { 353 u32 sample_flags; 354 355 u64 single_context:1; 356 u64 hold_preemption:1; 357 u64 ctx_handle; 358 359 /* OA sampling state */ 360 int metrics_set; 361 int oa_format; 362 bool oa_periodic; 363 int oa_period_exponent; 364 365 struct intel_engine_cs *engine; 366 367 bool has_sseu; 368 struct intel_sseu sseu; 369 370 u64 poll_oa_period; 371 }; 372 373 struct i915_oa_config_bo { 374 struct llist_node node; 375 376 struct i915_oa_config *oa_config; 377 struct i915_vma *vma; 378 }; 379 380 static struct ctl_table_header *sysctl_header; 381 382 static enum hrtimer_restart oa_poll_check_timer_cb(struct hrtimer *hrtimer); 383 384 void i915_oa_config_release(struct kref *ref) 385 { 386 struct i915_oa_config *oa_config = 387 container_of(ref, typeof(*oa_config), ref); 388 389 kfree(oa_config->flex_regs); 390 kfree(oa_config->b_counter_regs); 391 kfree(oa_config->mux_regs); 392 393 kfree_rcu(oa_config, rcu); 394 } 395 396 struct i915_oa_config * 397 i915_perf_get_oa_config(struct i915_perf *perf, int metrics_set) 398 { 399 struct i915_oa_config *oa_config; 400 401 rcu_read_lock(); 402 oa_config = idr_find(&perf->metrics_idr, metrics_set); 403 if (oa_config) 404 oa_config = i915_oa_config_get(oa_config); 405 rcu_read_unlock(); 406 407 return oa_config; 408 } 409 410 static void free_oa_config_bo(struct i915_oa_config_bo *oa_bo) 411 { 412 i915_oa_config_put(oa_bo->oa_config); 413 i915_vma_put(oa_bo->vma); 414 kfree(oa_bo); 415 } 416 417 static u32 gen12_oa_hw_tail_read(struct i915_perf_stream *stream) 418 { 419 struct intel_uncore *uncore = stream->uncore; 420 421 return intel_uncore_read(uncore, GEN12_OAG_OATAILPTR) & 422 GEN12_OAG_OATAILPTR_MASK; 423 } 424 425 static u32 gen8_oa_hw_tail_read(struct i915_perf_stream *stream) 426 { 427 struct intel_uncore *uncore = stream->uncore; 428 429 return intel_uncore_read(uncore, GEN8_OATAILPTR) & GEN8_OATAILPTR_MASK; 430 } 431 432 static u32 gen7_oa_hw_tail_read(struct i915_perf_stream *stream) 433 { 434 struct intel_uncore *uncore = stream->uncore; 435 u32 oastatus1 = intel_uncore_read(uncore, GEN7_OASTATUS1); 436 437 return oastatus1 & GEN7_OASTATUS1_TAIL_MASK; 438 } 439 440 /** 441 * oa_buffer_check_unlocked - check for data and update tail ptr state 442 * @stream: i915 stream instance 443 * 444 * This is either called via fops (for blocking reads in user ctx) or the poll 445 * check hrtimer (atomic ctx) to check the OA buffer tail pointer and check 446 * if there is data available for userspace to read. 447 * 448 * This function is central to providing a workaround for the OA unit tail 449 * pointer having a race with respect to what data is visible to the CPU. 450 * It is responsible for reading tail pointers from the hardware and giving 451 * the pointers time to 'age' before they are made available for reading. 452 * (See description of OA_TAIL_MARGIN_NSEC above for further details.) 453 * 454 * Besides returning true when there is data available to read() this function 455 * also updates the tail, aging_tail and aging_timestamp in the oa_buffer 456 * object. 457 * 458 * Note: It's safe to read OA config state here unlocked, assuming that this is 459 * only called while the stream is enabled, while the global OA configuration 460 * can't be modified. 461 * 462 * Returns: %true if the OA buffer contains data, else %false 463 */ 464 static bool oa_buffer_check_unlocked(struct i915_perf_stream *stream) 465 { 466 u32 gtt_offset = i915_ggtt_offset(stream->oa_buffer.vma); 467 int report_size = stream->oa_buffer.format_size; 468 unsigned long flags; 469 bool pollin; 470 u32 hw_tail; 471 u64 now; 472 473 /* We have to consider the (unlikely) possibility that read() errors 474 * could result in an OA buffer reset which might reset the head and 475 * tail state. 476 */ 477 spin_lock_irqsave(&stream->oa_buffer.ptr_lock, flags); 478 479 hw_tail = stream->perf->ops.oa_hw_tail_read(stream); 480 481 /* The tail pointer increases in 64 byte increments, 482 * not in report_size steps... 483 */ 484 hw_tail &= ~(report_size - 1); 485 486 now = ktime_get_mono_fast_ns(); 487 488 if (hw_tail == stream->oa_buffer.aging_tail && 489 (now - stream->oa_buffer.aging_timestamp) > OA_TAIL_MARGIN_NSEC) { 490 /* If the HW tail hasn't move since the last check and the HW 491 * tail has been aging for long enough, declare it the new 492 * tail. 493 */ 494 stream->oa_buffer.tail = stream->oa_buffer.aging_tail; 495 } else { 496 u32 head, tail, aged_tail; 497 498 /* NB: The head we observe here might effectively be a little 499 * out of date. If a read() is in progress, the head could be 500 * anywhere between this head and stream->oa_buffer.tail. 501 */ 502 head = stream->oa_buffer.head - gtt_offset; 503 aged_tail = stream->oa_buffer.tail - gtt_offset; 504 505 hw_tail -= gtt_offset; 506 tail = hw_tail; 507 508 /* Walk the stream backward until we find a report with dword 0 509 * & 1 not at 0. Since the circular buffer pointers progress by 510 * increments of 64 bytes and that reports can be up to 256 511 * bytes long, we can't tell whether a report has fully landed 512 * in memory before the first 2 dwords of the following report 513 * have effectively landed. 514 * 515 * This is assuming that the writes of the OA unit land in 516 * memory in the order they were written to. 517 * If not : (╯°□°)╯︵ ┻━┻ 518 */ 519 while (OA_TAKEN(tail, aged_tail) >= report_size) { 520 u32 *report32 = (void *)(stream->oa_buffer.vaddr + tail); 521 522 if (report32[0] != 0 || report32[1] != 0) 523 break; 524 525 tail = (tail - report_size) & (OA_BUFFER_SIZE - 1); 526 } 527 528 if (OA_TAKEN(hw_tail, tail) > report_size && 529 __ratelimit(&stream->perf->tail_pointer_race)) 530 DRM_NOTE("unlanded report(s) head=0x%x " 531 "tail=0x%x hw_tail=0x%x\n", 532 head, tail, hw_tail); 533 534 stream->oa_buffer.tail = gtt_offset + tail; 535 stream->oa_buffer.aging_tail = gtt_offset + hw_tail; 536 stream->oa_buffer.aging_timestamp = now; 537 } 538 539 pollin = OA_TAKEN(stream->oa_buffer.tail - gtt_offset, 540 stream->oa_buffer.head - gtt_offset) >= report_size; 541 542 spin_unlock_irqrestore(&stream->oa_buffer.ptr_lock, flags); 543 544 return pollin; 545 } 546 547 /** 548 * append_oa_status - Appends a status record to a userspace read() buffer. 549 * @stream: An i915-perf stream opened for OA metrics 550 * @buf: destination buffer given by userspace 551 * @count: the number of bytes userspace wants to read 552 * @offset: (inout): the current position for writing into @buf 553 * @type: The kind of status to report to userspace 554 * 555 * Writes a status record (such as `DRM_I915_PERF_RECORD_OA_REPORT_LOST`) 556 * into the userspace read() buffer. 557 * 558 * The @buf @offset will only be updated on success. 559 * 560 * Returns: 0 on success, negative error code on failure. 561 */ 562 static int append_oa_status(struct i915_perf_stream *stream, 563 char __user *buf, 564 size_t count, 565 size_t *offset, 566 enum drm_i915_perf_record_type type) 567 { 568 struct drm_i915_perf_record_header header = { type, 0, sizeof(header) }; 569 570 if ((count - *offset) < header.size) 571 return -ENOSPC; 572 573 if (copy_to_user(buf + *offset, &header, sizeof(header))) 574 return -EFAULT; 575 576 (*offset) += header.size; 577 578 return 0; 579 } 580 581 /** 582 * append_oa_sample - Copies single OA report into userspace read() buffer. 583 * @stream: An i915-perf stream opened for OA metrics 584 * @buf: destination buffer given by userspace 585 * @count: the number of bytes userspace wants to read 586 * @offset: (inout): the current position for writing into @buf 587 * @report: A single OA report to (optionally) include as part of the sample 588 * 589 * The contents of a sample are configured through `DRM_I915_PERF_PROP_SAMPLE_*` 590 * properties when opening a stream, tracked as `stream->sample_flags`. This 591 * function copies the requested components of a single sample to the given 592 * read() @buf. 593 * 594 * The @buf @offset will only be updated on success. 595 * 596 * Returns: 0 on success, negative error code on failure. 597 */ 598 static int append_oa_sample(struct i915_perf_stream *stream, 599 char __user *buf, 600 size_t count, 601 size_t *offset, 602 const u8 *report) 603 { 604 int report_size = stream->oa_buffer.format_size; 605 struct drm_i915_perf_record_header header; 606 607 header.type = DRM_I915_PERF_RECORD_SAMPLE; 608 header.pad = 0; 609 header.size = stream->sample_size; 610 611 if ((count - *offset) < header.size) 612 return -ENOSPC; 613 614 buf += *offset; 615 if (copy_to_user(buf, &header, sizeof(header))) 616 return -EFAULT; 617 buf += sizeof(header); 618 619 if (copy_to_user(buf, report, report_size)) 620 return -EFAULT; 621 622 (*offset) += header.size; 623 624 return 0; 625 } 626 627 /** 628 * gen8_append_oa_reports - Copies all buffered OA reports into 629 * userspace read() buffer. 630 * @stream: An i915-perf stream opened for OA metrics 631 * @buf: destination buffer given by userspace 632 * @count: the number of bytes userspace wants to read 633 * @offset: (inout): the current position for writing into @buf 634 * 635 * Notably any error condition resulting in a short read (-%ENOSPC or 636 * -%EFAULT) will be returned even though one or more records may 637 * have been successfully copied. In this case it's up to the caller 638 * to decide if the error should be squashed before returning to 639 * userspace. 640 * 641 * Note: reports are consumed from the head, and appended to the 642 * tail, so the tail chases the head?... If you think that's mad 643 * and back-to-front you're not alone, but this follows the 644 * Gen PRM naming convention. 645 * 646 * Returns: 0 on success, negative error code on failure. 647 */ 648 static int gen8_append_oa_reports(struct i915_perf_stream *stream, 649 char __user *buf, 650 size_t count, 651 size_t *offset) 652 { 653 struct intel_uncore *uncore = stream->uncore; 654 int report_size = stream->oa_buffer.format_size; 655 u8 *oa_buf_base = stream->oa_buffer.vaddr; 656 u32 gtt_offset = i915_ggtt_offset(stream->oa_buffer.vma); 657 u32 mask = (OA_BUFFER_SIZE - 1); 658 size_t start_offset = *offset; 659 unsigned long flags; 660 u32 head, tail; 661 u32 taken; 662 int ret = 0; 663 664 if (drm_WARN_ON(&uncore->i915->drm, !stream->enabled)) 665 return -EIO; 666 667 spin_lock_irqsave(&stream->oa_buffer.ptr_lock, flags); 668 669 head = stream->oa_buffer.head; 670 tail = stream->oa_buffer.tail; 671 672 spin_unlock_irqrestore(&stream->oa_buffer.ptr_lock, flags); 673 674 /* 675 * NB: oa_buffer.head/tail include the gtt_offset which we don't want 676 * while indexing relative to oa_buf_base. 677 */ 678 head -= gtt_offset; 679 tail -= gtt_offset; 680 681 /* 682 * An out of bounds or misaligned head or tail pointer implies a driver 683 * bug since we validate + align the tail pointers we read from the 684 * hardware and we are in full control of the head pointer which should 685 * only be incremented by multiples of the report size (notably also 686 * all a power of two). 687 */ 688 if (drm_WARN_ONCE(&uncore->i915->drm, 689 head > OA_BUFFER_SIZE || head % report_size || 690 tail > OA_BUFFER_SIZE || tail % report_size, 691 "Inconsistent OA buffer pointers: head = %u, tail = %u\n", 692 head, tail)) 693 return -EIO; 694 695 696 for (/* none */; 697 (taken = OA_TAKEN(tail, head)); 698 head = (head + report_size) & mask) { 699 u8 *report = oa_buf_base + head; 700 u32 *report32 = (void *)report; 701 u32 ctx_id; 702 u32 reason; 703 704 /* 705 * All the report sizes factor neatly into the buffer 706 * size so we never expect to see a report split 707 * between the beginning and end of the buffer. 708 * 709 * Given the initial alignment check a misalignment 710 * here would imply a driver bug that would result 711 * in an overrun. 712 */ 713 if (drm_WARN_ON(&uncore->i915->drm, 714 (OA_BUFFER_SIZE - head) < report_size)) { 715 drm_err(&uncore->i915->drm, 716 "Spurious OA head ptr: non-integral report offset\n"); 717 break; 718 } 719 720 /* 721 * The reason field includes flags identifying what 722 * triggered this specific report (mostly timer 723 * triggered or e.g. due to a context switch). 724 * 725 * This field is never expected to be zero so we can 726 * check that the report isn't invalid before copying 727 * it to userspace... 728 */ 729 reason = ((report32[0] >> OAREPORT_REASON_SHIFT) & 730 (IS_GEN(stream->perf->i915, 12) ? 731 OAREPORT_REASON_MASK_EXTENDED : 732 OAREPORT_REASON_MASK)); 733 if (reason == 0) { 734 if (__ratelimit(&stream->perf->spurious_report_rs)) 735 DRM_NOTE("Skipping spurious, invalid OA report\n"); 736 continue; 737 } 738 739 ctx_id = report32[2] & stream->specific_ctx_id_mask; 740 741 /* 742 * Squash whatever is in the CTX_ID field if it's marked as 743 * invalid to be sure we avoid false-positive, single-context 744 * filtering below... 745 * 746 * Note: that we don't clear the valid_ctx_bit so userspace can 747 * understand that the ID has been squashed by the kernel. 748 */ 749 if (!(report32[0] & stream->perf->gen8_valid_ctx_bit) && 750 INTEL_GEN(stream->perf->i915) <= 11) 751 ctx_id = report32[2] = INVALID_CTX_ID; 752 753 /* 754 * NB: For Gen 8 the OA unit no longer supports clock gating 755 * off for a specific context and the kernel can't securely 756 * stop the counters from updating as system-wide / global 757 * values. 758 * 759 * Automatic reports now include a context ID so reports can be 760 * filtered on the cpu but it's not worth trying to 761 * automatically subtract/hide counter progress for other 762 * contexts while filtering since we can't stop userspace 763 * issuing MI_REPORT_PERF_COUNT commands which would still 764 * provide a side-band view of the real values. 765 * 766 * To allow userspace (such as Mesa/GL_INTEL_performance_query) 767 * to normalize counters for a single filtered context then it 768 * needs be forwarded bookend context-switch reports so that it 769 * can track switches in between MI_REPORT_PERF_COUNT commands 770 * and can itself subtract/ignore the progress of counters 771 * associated with other contexts. Note that the hardware 772 * automatically triggers reports when switching to a new 773 * context which are tagged with the ID of the newly active 774 * context. To avoid the complexity (and likely fragility) of 775 * reading ahead while parsing reports to try and minimize 776 * forwarding redundant context switch reports (i.e. between 777 * other, unrelated contexts) we simply elect to forward them 778 * all. 779 * 780 * We don't rely solely on the reason field to identify context 781 * switches since it's not-uncommon for periodic samples to 782 * identify a switch before any 'context switch' report. 783 */ 784 if (!stream->perf->exclusive_stream->ctx || 785 stream->specific_ctx_id == ctx_id || 786 stream->oa_buffer.last_ctx_id == stream->specific_ctx_id || 787 reason & OAREPORT_REASON_CTX_SWITCH) { 788 789 /* 790 * While filtering for a single context we avoid 791 * leaking the IDs of other contexts. 792 */ 793 if (stream->perf->exclusive_stream->ctx && 794 stream->specific_ctx_id != ctx_id) { 795 report32[2] = INVALID_CTX_ID; 796 } 797 798 ret = append_oa_sample(stream, buf, count, offset, 799 report); 800 if (ret) 801 break; 802 803 stream->oa_buffer.last_ctx_id = ctx_id; 804 } 805 806 /* 807 * Clear out the first 2 dword as a mean to detect unlanded 808 * reports. 809 */ 810 report32[0] = 0; 811 report32[1] = 0; 812 } 813 814 if (start_offset != *offset) { 815 i915_reg_t oaheadptr; 816 817 oaheadptr = IS_GEN(stream->perf->i915, 12) ? 818 GEN12_OAG_OAHEADPTR : GEN8_OAHEADPTR; 819 820 spin_lock_irqsave(&stream->oa_buffer.ptr_lock, flags); 821 822 /* 823 * We removed the gtt_offset for the copy loop above, indexing 824 * relative to oa_buf_base so put back here... 825 */ 826 head += gtt_offset; 827 intel_uncore_write(uncore, oaheadptr, 828 head & GEN12_OAG_OAHEADPTR_MASK); 829 stream->oa_buffer.head = head; 830 831 spin_unlock_irqrestore(&stream->oa_buffer.ptr_lock, flags); 832 } 833 834 return ret; 835 } 836 837 /** 838 * gen8_oa_read - copy status records then buffered OA reports 839 * @stream: An i915-perf stream opened for OA metrics 840 * @buf: destination buffer given by userspace 841 * @count: the number of bytes userspace wants to read 842 * @offset: (inout): the current position for writing into @buf 843 * 844 * Checks OA unit status registers and if necessary appends corresponding 845 * status records for userspace (such as for a buffer full condition) and then 846 * initiate appending any buffered OA reports. 847 * 848 * Updates @offset according to the number of bytes successfully copied into 849 * the userspace buffer. 850 * 851 * NB: some data may be successfully copied to the userspace buffer 852 * even if an error is returned, and this is reflected in the 853 * updated @offset. 854 * 855 * Returns: zero on success or a negative error code 856 */ 857 static int gen8_oa_read(struct i915_perf_stream *stream, 858 char __user *buf, 859 size_t count, 860 size_t *offset) 861 { 862 struct intel_uncore *uncore = stream->uncore; 863 u32 oastatus; 864 i915_reg_t oastatus_reg; 865 int ret; 866 867 if (drm_WARN_ON(&uncore->i915->drm, !stream->oa_buffer.vaddr)) 868 return -EIO; 869 870 oastatus_reg = IS_GEN(stream->perf->i915, 12) ? 871 GEN12_OAG_OASTATUS : GEN8_OASTATUS; 872 873 oastatus = intel_uncore_read(uncore, oastatus_reg); 874 875 /* 876 * We treat OABUFFER_OVERFLOW as a significant error: 877 * 878 * Although theoretically we could handle this more gracefully 879 * sometimes, some Gens don't correctly suppress certain 880 * automatically triggered reports in this condition and so we 881 * have to assume that old reports are now being trampled 882 * over. 883 * 884 * Considering how we don't currently give userspace control 885 * over the OA buffer size and always configure a large 16MB 886 * buffer, then a buffer overflow does anyway likely indicate 887 * that something has gone quite badly wrong. 888 */ 889 if (oastatus & GEN8_OASTATUS_OABUFFER_OVERFLOW) { 890 ret = append_oa_status(stream, buf, count, offset, 891 DRM_I915_PERF_RECORD_OA_BUFFER_LOST); 892 if (ret) 893 return ret; 894 895 DRM_DEBUG("OA buffer overflow (exponent = %d): force restart\n", 896 stream->period_exponent); 897 898 stream->perf->ops.oa_disable(stream); 899 stream->perf->ops.oa_enable(stream); 900 901 /* 902 * Note: .oa_enable() is expected to re-init the oabuffer and 903 * reset GEN8_OASTATUS for us 904 */ 905 oastatus = intel_uncore_read(uncore, oastatus_reg); 906 } 907 908 if (oastatus & GEN8_OASTATUS_REPORT_LOST) { 909 ret = append_oa_status(stream, buf, count, offset, 910 DRM_I915_PERF_RECORD_OA_REPORT_LOST); 911 if (ret) 912 return ret; 913 914 intel_uncore_rmw(uncore, oastatus_reg, 915 GEN8_OASTATUS_COUNTER_OVERFLOW | 916 GEN8_OASTATUS_REPORT_LOST, 917 IS_GEN_RANGE(uncore->i915, 8, 11) ? 918 (GEN8_OASTATUS_HEAD_POINTER_WRAP | 919 GEN8_OASTATUS_TAIL_POINTER_WRAP) : 0); 920 } 921 922 return gen8_append_oa_reports(stream, buf, count, offset); 923 } 924 925 /** 926 * gen7_append_oa_reports - Copies all buffered OA reports into 927 * userspace read() buffer. 928 * @stream: An i915-perf stream opened for OA metrics 929 * @buf: destination buffer given by userspace 930 * @count: the number of bytes userspace wants to read 931 * @offset: (inout): the current position for writing into @buf 932 * 933 * Notably any error condition resulting in a short read (-%ENOSPC or 934 * -%EFAULT) will be returned even though one or more records may 935 * have been successfully copied. In this case it's up to the caller 936 * to decide if the error should be squashed before returning to 937 * userspace. 938 * 939 * Note: reports are consumed from the head, and appended to the 940 * tail, so the tail chases the head?... If you think that's mad 941 * and back-to-front you're not alone, but this follows the 942 * Gen PRM naming convention. 943 * 944 * Returns: 0 on success, negative error code on failure. 945 */ 946 static int gen7_append_oa_reports(struct i915_perf_stream *stream, 947 char __user *buf, 948 size_t count, 949 size_t *offset) 950 { 951 struct intel_uncore *uncore = stream->uncore; 952 int report_size = stream->oa_buffer.format_size; 953 u8 *oa_buf_base = stream->oa_buffer.vaddr; 954 u32 gtt_offset = i915_ggtt_offset(stream->oa_buffer.vma); 955 u32 mask = (OA_BUFFER_SIZE - 1); 956 size_t start_offset = *offset; 957 unsigned long flags; 958 u32 head, tail; 959 u32 taken; 960 int ret = 0; 961 962 if (drm_WARN_ON(&uncore->i915->drm, !stream->enabled)) 963 return -EIO; 964 965 spin_lock_irqsave(&stream->oa_buffer.ptr_lock, flags); 966 967 head = stream->oa_buffer.head; 968 tail = stream->oa_buffer.tail; 969 970 spin_unlock_irqrestore(&stream->oa_buffer.ptr_lock, flags); 971 972 /* NB: oa_buffer.head/tail include the gtt_offset which we don't want 973 * while indexing relative to oa_buf_base. 974 */ 975 head -= gtt_offset; 976 tail -= gtt_offset; 977 978 /* An out of bounds or misaligned head or tail pointer implies a driver 979 * bug since we validate + align the tail pointers we read from the 980 * hardware and we are in full control of the head pointer which should 981 * only be incremented by multiples of the report size (notably also 982 * all a power of two). 983 */ 984 if (drm_WARN_ONCE(&uncore->i915->drm, 985 head > OA_BUFFER_SIZE || head % report_size || 986 tail > OA_BUFFER_SIZE || tail % report_size, 987 "Inconsistent OA buffer pointers: head = %u, tail = %u\n", 988 head, tail)) 989 return -EIO; 990 991 992 for (/* none */; 993 (taken = OA_TAKEN(tail, head)); 994 head = (head + report_size) & mask) { 995 u8 *report = oa_buf_base + head; 996 u32 *report32 = (void *)report; 997 998 /* All the report sizes factor neatly into the buffer 999 * size so we never expect to see a report split 1000 * between the beginning and end of the buffer. 1001 * 1002 * Given the initial alignment check a misalignment 1003 * here would imply a driver bug that would result 1004 * in an overrun. 1005 */ 1006 if (drm_WARN_ON(&uncore->i915->drm, 1007 (OA_BUFFER_SIZE - head) < report_size)) { 1008 drm_err(&uncore->i915->drm, 1009 "Spurious OA head ptr: non-integral report offset\n"); 1010 break; 1011 } 1012 1013 /* The report-ID field for periodic samples includes 1014 * some undocumented flags related to what triggered 1015 * the report and is never expected to be zero so we 1016 * can check that the report isn't invalid before 1017 * copying it to userspace... 1018 */ 1019 if (report32[0] == 0) { 1020 if (__ratelimit(&stream->perf->spurious_report_rs)) 1021 DRM_NOTE("Skipping spurious, invalid OA report\n"); 1022 continue; 1023 } 1024 1025 ret = append_oa_sample(stream, buf, count, offset, report); 1026 if (ret) 1027 break; 1028 1029 /* Clear out the first 2 dwords as a mean to detect unlanded 1030 * reports. 1031 */ 1032 report32[0] = 0; 1033 report32[1] = 0; 1034 } 1035 1036 if (start_offset != *offset) { 1037 spin_lock_irqsave(&stream->oa_buffer.ptr_lock, flags); 1038 1039 /* We removed the gtt_offset for the copy loop above, indexing 1040 * relative to oa_buf_base so put back here... 1041 */ 1042 head += gtt_offset; 1043 1044 intel_uncore_write(uncore, GEN7_OASTATUS2, 1045 (head & GEN7_OASTATUS2_HEAD_MASK) | 1046 GEN7_OASTATUS2_MEM_SELECT_GGTT); 1047 stream->oa_buffer.head = head; 1048 1049 spin_unlock_irqrestore(&stream->oa_buffer.ptr_lock, flags); 1050 } 1051 1052 return ret; 1053 } 1054 1055 /** 1056 * gen7_oa_read - copy status records then buffered OA reports 1057 * @stream: An i915-perf stream opened for OA metrics 1058 * @buf: destination buffer given by userspace 1059 * @count: the number of bytes userspace wants to read 1060 * @offset: (inout): the current position for writing into @buf 1061 * 1062 * Checks Gen 7 specific OA unit status registers and if necessary appends 1063 * corresponding status records for userspace (such as for a buffer full 1064 * condition) and then initiate appending any buffered OA reports. 1065 * 1066 * Updates @offset according to the number of bytes successfully copied into 1067 * the userspace buffer. 1068 * 1069 * Returns: zero on success or a negative error code 1070 */ 1071 static int gen7_oa_read(struct i915_perf_stream *stream, 1072 char __user *buf, 1073 size_t count, 1074 size_t *offset) 1075 { 1076 struct intel_uncore *uncore = stream->uncore; 1077 u32 oastatus1; 1078 int ret; 1079 1080 if (drm_WARN_ON(&uncore->i915->drm, !stream->oa_buffer.vaddr)) 1081 return -EIO; 1082 1083 oastatus1 = intel_uncore_read(uncore, GEN7_OASTATUS1); 1084 1085 /* XXX: On Haswell we don't have a safe way to clear oastatus1 1086 * bits while the OA unit is enabled (while the tail pointer 1087 * may be updated asynchronously) so we ignore status bits 1088 * that have already been reported to userspace. 1089 */ 1090 oastatus1 &= ~stream->perf->gen7_latched_oastatus1; 1091 1092 /* We treat OABUFFER_OVERFLOW as a significant error: 1093 * 1094 * - The status can be interpreted to mean that the buffer is 1095 * currently full (with a higher precedence than OA_TAKEN() 1096 * which will start to report a near-empty buffer after an 1097 * overflow) but it's awkward that we can't clear the status 1098 * on Haswell, so without a reset we won't be able to catch 1099 * the state again. 1100 * 1101 * - Since it also implies the HW has started overwriting old 1102 * reports it may also affect our sanity checks for invalid 1103 * reports when copying to userspace that assume new reports 1104 * are being written to cleared memory. 1105 * 1106 * - In the future we may want to introduce a flight recorder 1107 * mode where the driver will automatically maintain a safe 1108 * guard band between head/tail, avoiding this overflow 1109 * condition, but we avoid the added driver complexity for 1110 * now. 1111 */ 1112 if (unlikely(oastatus1 & GEN7_OASTATUS1_OABUFFER_OVERFLOW)) { 1113 ret = append_oa_status(stream, buf, count, offset, 1114 DRM_I915_PERF_RECORD_OA_BUFFER_LOST); 1115 if (ret) 1116 return ret; 1117 1118 DRM_DEBUG("OA buffer overflow (exponent = %d): force restart\n", 1119 stream->period_exponent); 1120 1121 stream->perf->ops.oa_disable(stream); 1122 stream->perf->ops.oa_enable(stream); 1123 1124 oastatus1 = intel_uncore_read(uncore, GEN7_OASTATUS1); 1125 } 1126 1127 if (unlikely(oastatus1 & GEN7_OASTATUS1_REPORT_LOST)) { 1128 ret = append_oa_status(stream, buf, count, offset, 1129 DRM_I915_PERF_RECORD_OA_REPORT_LOST); 1130 if (ret) 1131 return ret; 1132 stream->perf->gen7_latched_oastatus1 |= 1133 GEN7_OASTATUS1_REPORT_LOST; 1134 } 1135 1136 return gen7_append_oa_reports(stream, buf, count, offset); 1137 } 1138 1139 /** 1140 * i915_oa_wait_unlocked - handles blocking IO until OA data available 1141 * @stream: An i915-perf stream opened for OA metrics 1142 * 1143 * Called when userspace tries to read() from a blocking stream FD opened 1144 * for OA metrics. It waits until the hrtimer callback finds a non-empty 1145 * OA buffer and wakes us. 1146 * 1147 * Note: it's acceptable to have this return with some false positives 1148 * since any subsequent read handling will return -EAGAIN if there isn't 1149 * really data ready for userspace yet. 1150 * 1151 * Returns: zero on success or a negative error code 1152 */ 1153 static int i915_oa_wait_unlocked(struct i915_perf_stream *stream) 1154 { 1155 /* We would wait indefinitely if periodic sampling is not enabled */ 1156 if (!stream->periodic) 1157 return -EIO; 1158 1159 return wait_event_interruptible(stream->poll_wq, 1160 oa_buffer_check_unlocked(stream)); 1161 } 1162 1163 /** 1164 * i915_oa_poll_wait - call poll_wait() for an OA stream poll() 1165 * @stream: An i915-perf stream opened for OA metrics 1166 * @file: An i915 perf stream file 1167 * @wait: poll() state table 1168 * 1169 * For handling userspace polling on an i915 perf stream opened for OA metrics, 1170 * this starts a poll_wait with the wait queue that our hrtimer callback wakes 1171 * when it sees data ready to read in the circular OA buffer. 1172 */ 1173 static void i915_oa_poll_wait(struct i915_perf_stream *stream, 1174 struct file *file, 1175 poll_table *wait) 1176 { 1177 poll_wait(file, &stream->poll_wq, wait); 1178 } 1179 1180 /** 1181 * i915_oa_read - just calls through to &i915_oa_ops->read 1182 * @stream: An i915-perf stream opened for OA metrics 1183 * @buf: destination buffer given by userspace 1184 * @count: the number of bytes userspace wants to read 1185 * @offset: (inout): the current position for writing into @buf 1186 * 1187 * Updates @offset according to the number of bytes successfully copied into 1188 * the userspace buffer. 1189 * 1190 * Returns: zero on success or a negative error code 1191 */ 1192 static int i915_oa_read(struct i915_perf_stream *stream, 1193 char __user *buf, 1194 size_t count, 1195 size_t *offset) 1196 { 1197 return stream->perf->ops.read(stream, buf, count, offset); 1198 } 1199 1200 static struct intel_context *oa_pin_context(struct i915_perf_stream *stream) 1201 { 1202 struct i915_gem_engines_iter it; 1203 struct i915_gem_context *ctx = stream->ctx; 1204 struct intel_context *ce; 1205 struct i915_gem_ww_ctx ww; 1206 int err = -ENODEV; 1207 1208 for_each_gem_engine(ce, i915_gem_context_lock_engines(ctx), it) { 1209 if (ce->engine != stream->engine) /* first match! */ 1210 continue; 1211 1212 err = 0; 1213 break; 1214 } 1215 i915_gem_context_unlock_engines(ctx); 1216 1217 if (err) 1218 return ERR_PTR(err); 1219 1220 i915_gem_ww_ctx_init(&ww, true); 1221 retry: 1222 /* 1223 * As the ID is the gtt offset of the context's vma we 1224 * pin the vma to ensure the ID remains fixed. 1225 */ 1226 err = intel_context_pin_ww(ce, &ww); 1227 if (err == -EDEADLK) { 1228 err = i915_gem_ww_ctx_backoff(&ww); 1229 if (!err) 1230 goto retry; 1231 } 1232 i915_gem_ww_ctx_fini(&ww); 1233 1234 if (err) 1235 return ERR_PTR(err); 1236 1237 stream->pinned_ctx = ce; 1238 return stream->pinned_ctx; 1239 } 1240 1241 /** 1242 * oa_get_render_ctx_id - determine and hold ctx hw id 1243 * @stream: An i915-perf stream opened for OA metrics 1244 * 1245 * Determine the render context hw id, and ensure it remains fixed for the 1246 * lifetime of the stream. This ensures that we don't have to worry about 1247 * updating the context ID in OACONTROL on the fly. 1248 * 1249 * Returns: zero on success or a negative error code 1250 */ 1251 static int oa_get_render_ctx_id(struct i915_perf_stream *stream) 1252 { 1253 struct intel_context *ce; 1254 1255 ce = oa_pin_context(stream); 1256 if (IS_ERR(ce)) 1257 return PTR_ERR(ce); 1258 1259 switch (INTEL_GEN(ce->engine->i915)) { 1260 case 7: { 1261 /* 1262 * On Haswell we don't do any post processing of the reports 1263 * and don't need to use the mask. 1264 */ 1265 stream->specific_ctx_id = i915_ggtt_offset(ce->state); 1266 stream->specific_ctx_id_mask = 0; 1267 break; 1268 } 1269 1270 case 8: 1271 case 9: 1272 case 10: 1273 if (intel_engine_in_execlists_submission_mode(ce->engine)) { 1274 stream->specific_ctx_id_mask = 1275 (1U << GEN8_CTX_ID_WIDTH) - 1; 1276 stream->specific_ctx_id = stream->specific_ctx_id_mask; 1277 } else { 1278 /* 1279 * When using GuC, the context descriptor we write in 1280 * i915 is read by GuC and rewritten before it's 1281 * actually written into the hardware. The LRCA is 1282 * what is put into the context id field of the 1283 * context descriptor by GuC. Because it's aligned to 1284 * a page, the lower 12bits are always at 0 and 1285 * dropped by GuC. They won't be part of the context 1286 * ID in the OA reports, so squash those lower bits. 1287 */ 1288 stream->specific_ctx_id = ce->lrc.lrca >> 12; 1289 1290 /* 1291 * GuC uses the top bit to signal proxy submission, so 1292 * ignore that bit. 1293 */ 1294 stream->specific_ctx_id_mask = 1295 (1U << (GEN8_CTX_ID_WIDTH - 1)) - 1; 1296 } 1297 break; 1298 1299 case 11: 1300 case 12: { 1301 stream->specific_ctx_id_mask = 1302 ((1U << GEN11_SW_CTX_ID_WIDTH) - 1) << (GEN11_SW_CTX_ID_SHIFT - 32); 1303 /* 1304 * Pick an unused context id 1305 * 0 - BITS_PER_LONG are used by other contexts 1306 * GEN12_MAX_CONTEXT_HW_ID (0x7ff) is used by idle context 1307 */ 1308 stream->specific_ctx_id = (GEN12_MAX_CONTEXT_HW_ID - 1) << (GEN11_SW_CTX_ID_SHIFT - 32); 1309 break; 1310 } 1311 1312 default: 1313 MISSING_CASE(INTEL_GEN(ce->engine->i915)); 1314 } 1315 1316 ce->tag = stream->specific_ctx_id; 1317 1318 drm_dbg(&stream->perf->i915->drm, 1319 "filtering on ctx_id=0x%x ctx_id_mask=0x%x\n", 1320 stream->specific_ctx_id, 1321 stream->specific_ctx_id_mask); 1322 1323 return 0; 1324 } 1325 1326 /** 1327 * oa_put_render_ctx_id - counterpart to oa_get_render_ctx_id releases hold 1328 * @stream: An i915-perf stream opened for OA metrics 1329 * 1330 * In case anything needed doing to ensure the context HW ID would remain valid 1331 * for the lifetime of the stream, then that can be undone here. 1332 */ 1333 static void oa_put_render_ctx_id(struct i915_perf_stream *stream) 1334 { 1335 struct intel_context *ce; 1336 1337 ce = fetch_and_zero(&stream->pinned_ctx); 1338 if (ce) { 1339 ce->tag = 0; /* recomputed on next submission after parking */ 1340 intel_context_unpin(ce); 1341 } 1342 1343 stream->specific_ctx_id = INVALID_CTX_ID; 1344 stream->specific_ctx_id_mask = 0; 1345 } 1346 1347 static void 1348 free_oa_buffer(struct i915_perf_stream *stream) 1349 { 1350 i915_vma_unpin_and_release(&stream->oa_buffer.vma, 1351 I915_VMA_RELEASE_MAP); 1352 1353 stream->oa_buffer.vaddr = NULL; 1354 } 1355 1356 static void 1357 free_oa_configs(struct i915_perf_stream *stream) 1358 { 1359 struct i915_oa_config_bo *oa_bo, *tmp; 1360 1361 i915_oa_config_put(stream->oa_config); 1362 llist_for_each_entry_safe(oa_bo, tmp, stream->oa_config_bos.first, node) 1363 free_oa_config_bo(oa_bo); 1364 } 1365 1366 static void 1367 free_noa_wait(struct i915_perf_stream *stream) 1368 { 1369 i915_vma_unpin_and_release(&stream->noa_wait, 0); 1370 } 1371 1372 static void i915_oa_stream_destroy(struct i915_perf_stream *stream) 1373 { 1374 struct i915_perf *perf = stream->perf; 1375 1376 BUG_ON(stream != perf->exclusive_stream); 1377 1378 /* 1379 * Unset exclusive_stream first, it will be checked while disabling 1380 * the metric set on gen8+. 1381 * 1382 * See i915_oa_init_reg_state() and lrc_configure_all_contexts() 1383 */ 1384 WRITE_ONCE(perf->exclusive_stream, NULL); 1385 perf->ops.disable_metric_set(stream); 1386 1387 free_oa_buffer(stream); 1388 1389 intel_uncore_forcewake_put(stream->uncore, FORCEWAKE_ALL); 1390 intel_engine_pm_put(stream->engine); 1391 1392 if (stream->ctx) 1393 oa_put_render_ctx_id(stream); 1394 1395 free_oa_configs(stream); 1396 free_noa_wait(stream); 1397 1398 if (perf->spurious_report_rs.missed) { 1399 DRM_NOTE("%d spurious OA report notices suppressed due to ratelimiting\n", 1400 perf->spurious_report_rs.missed); 1401 } 1402 } 1403 1404 static void gen7_init_oa_buffer(struct i915_perf_stream *stream) 1405 { 1406 struct intel_uncore *uncore = stream->uncore; 1407 u32 gtt_offset = i915_ggtt_offset(stream->oa_buffer.vma); 1408 unsigned long flags; 1409 1410 spin_lock_irqsave(&stream->oa_buffer.ptr_lock, flags); 1411 1412 /* Pre-DevBDW: OABUFFER must be set with counters off, 1413 * before OASTATUS1, but after OASTATUS2 1414 */ 1415 intel_uncore_write(uncore, GEN7_OASTATUS2, /* head */ 1416 gtt_offset | GEN7_OASTATUS2_MEM_SELECT_GGTT); 1417 stream->oa_buffer.head = gtt_offset; 1418 1419 intel_uncore_write(uncore, GEN7_OABUFFER, gtt_offset); 1420 1421 intel_uncore_write(uncore, GEN7_OASTATUS1, /* tail */ 1422 gtt_offset | OABUFFER_SIZE_16M); 1423 1424 /* Mark that we need updated tail pointers to read from... */ 1425 stream->oa_buffer.aging_tail = INVALID_TAIL_PTR; 1426 stream->oa_buffer.tail = gtt_offset; 1427 1428 spin_unlock_irqrestore(&stream->oa_buffer.ptr_lock, flags); 1429 1430 /* On Haswell we have to track which OASTATUS1 flags we've 1431 * already seen since they can't be cleared while periodic 1432 * sampling is enabled. 1433 */ 1434 stream->perf->gen7_latched_oastatus1 = 0; 1435 1436 /* NB: although the OA buffer will initially be allocated 1437 * zeroed via shmfs (and so this memset is redundant when 1438 * first allocating), we may re-init the OA buffer, either 1439 * when re-enabling a stream or in error/reset paths. 1440 * 1441 * The reason we clear the buffer for each re-init is for the 1442 * sanity check in gen7_append_oa_reports() that looks at the 1443 * report-id field to make sure it's non-zero which relies on 1444 * the assumption that new reports are being written to zeroed 1445 * memory... 1446 */ 1447 memset(stream->oa_buffer.vaddr, 0, OA_BUFFER_SIZE); 1448 } 1449 1450 static void gen8_init_oa_buffer(struct i915_perf_stream *stream) 1451 { 1452 struct intel_uncore *uncore = stream->uncore; 1453 u32 gtt_offset = i915_ggtt_offset(stream->oa_buffer.vma); 1454 unsigned long flags; 1455 1456 spin_lock_irqsave(&stream->oa_buffer.ptr_lock, flags); 1457 1458 intel_uncore_write(uncore, GEN8_OASTATUS, 0); 1459 intel_uncore_write(uncore, GEN8_OAHEADPTR, gtt_offset); 1460 stream->oa_buffer.head = gtt_offset; 1461 1462 intel_uncore_write(uncore, GEN8_OABUFFER_UDW, 0); 1463 1464 /* 1465 * PRM says: 1466 * 1467 * "This MMIO must be set before the OATAILPTR 1468 * register and after the OAHEADPTR register. This is 1469 * to enable proper functionality of the overflow 1470 * bit." 1471 */ 1472 intel_uncore_write(uncore, GEN8_OABUFFER, gtt_offset | 1473 OABUFFER_SIZE_16M | GEN8_OABUFFER_MEM_SELECT_GGTT); 1474 intel_uncore_write(uncore, GEN8_OATAILPTR, gtt_offset & GEN8_OATAILPTR_MASK); 1475 1476 /* Mark that we need updated tail pointers to read from... */ 1477 stream->oa_buffer.aging_tail = INVALID_TAIL_PTR; 1478 stream->oa_buffer.tail = gtt_offset; 1479 1480 /* 1481 * Reset state used to recognise context switches, affecting which 1482 * reports we will forward to userspace while filtering for a single 1483 * context. 1484 */ 1485 stream->oa_buffer.last_ctx_id = INVALID_CTX_ID; 1486 1487 spin_unlock_irqrestore(&stream->oa_buffer.ptr_lock, flags); 1488 1489 /* 1490 * NB: although the OA buffer will initially be allocated 1491 * zeroed via shmfs (and so this memset is redundant when 1492 * first allocating), we may re-init the OA buffer, either 1493 * when re-enabling a stream or in error/reset paths. 1494 * 1495 * The reason we clear the buffer for each re-init is for the 1496 * sanity check in gen8_append_oa_reports() that looks at the 1497 * reason field to make sure it's non-zero which relies on 1498 * the assumption that new reports are being written to zeroed 1499 * memory... 1500 */ 1501 memset(stream->oa_buffer.vaddr, 0, OA_BUFFER_SIZE); 1502 } 1503 1504 static void gen12_init_oa_buffer(struct i915_perf_stream *stream) 1505 { 1506 struct intel_uncore *uncore = stream->uncore; 1507 u32 gtt_offset = i915_ggtt_offset(stream->oa_buffer.vma); 1508 unsigned long flags; 1509 1510 spin_lock_irqsave(&stream->oa_buffer.ptr_lock, flags); 1511 1512 intel_uncore_write(uncore, GEN12_OAG_OASTATUS, 0); 1513 intel_uncore_write(uncore, GEN12_OAG_OAHEADPTR, 1514 gtt_offset & GEN12_OAG_OAHEADPTR_MASK); 1515 stream->oa_buffer.head = gtt_offset; 1516 1517 /* 1518 * PRM says: 1519 * 1520 * "This MMIO must be set before the OATAILPTR 1521 * register and after the OAHEADPTR register. This is 1522 * to enable proper functionality of the overflow 1523 * bit." 1524 */ 1525 intel_uncore_write(uncore, GEN12_OAG_OABUFFER, gtt_offset | 1526 OABUFFER_SIZE_16M | GEN8_OABUFFER_MEM_SELECT_GGTT); 1527 intel_uncore_write(uncore, GEN12_OAG_OATAILPTR, 1528 gtt_offset & GEN12_OAG_OATAILPTR_MASK); 1529 1530 /* Mark that we need updated tail pointers to read from... */ 1531 stream->oa_buffer.aging_tail = INVALID_TAIL_PTR; 1532 stream->oa_buffer.tail = gtt_offset; 1533 1534 /* 1535 * Reset state used to recognise context switches, affecting which 1536 * reports we will forward to userspace while filtering for a single 1537 * context. 1538 */ 1539 stream->oa_buffer.last_ctx_id = INVALID_CTX_ID; 1540 1541 spin_unlock_irqrestore(&stream->oa_buffer.ptr_lock, flags); 1542 1543 /* 1544 * NB: although the OA buffer will initially be allocated 1545 * zeroed via shmfs (and so this memset is redundant when 1546 * first allocating), we may re-init the OA buffer, either 1547 * when re-enabling a stream or in error/reset paths. 1548 * 1549 * The reason we clear the buffer for each re-init is for the 1550 * sanity check in gen8_append_oa_reports() that looks at the 1551 * reason field to make sure it's non-zero which relies on 1552 * the assumption that new reports are being written to zeroed 1553 * memory... 1554 */ 1555 memset(stream->oa_buffer.vaddr, 0, 1556 stream->oa_buffer.vma->size); 1557 } 1558 1559 static int alloc_oa_buffer(struct i915_perf_stream *stream) 1560 { 1561 struct drm_i915_private *i915 = stream->perf->i915; 1562 struct drm_i915_gem_object *bo; 1563 struct i915_vma *vma; 1564 int ret; 1565 1566 if (drm_WARN_ON(&i915->drm, stream->oa_buffer.vma)) 1567 return -ENODEV; 1568 1569 BUILD_BUG_ON_NOT_POWER_OF_2(OA_BUFFER_SIZE); 1570 BUILD_BUG_ON(OA_BUFFER_SIZE < SZ_128K || OA_BUFFER_SIZE > SZ_16M); 1571 1572 bo = i915_gem_object_create_shmem(stream->perf->i915, OA_BUFFER_SIZE); 1573 if (IS_ERR(bo)) { 1574 drm_err(&i915->drm, "Failed to allocate OA buffer\n"); 1575 return PTR_ERR(bo); 1576 } 1577 1578 i915_gem_object_set_cache_coherency(bo, I915_CACHE_LLC); 1579 1580 /* PreHSW required 512K alignment, HSW requires 16M */ 1581 vma = i915_gem_object_ggtt_pin(bo, NULL, 0, SZ_16M, 0); 1582 if (IS_ERR(vma)) { 1583 ret = PTR_ERR(vma); 1584 goto err_unref; 1585 } 1586 stream->oa_buffer.vma = vma; 1587 1588 stream->oa_buffer.vaddr = 1589 i915_gem_object_pin_map(bo, I915_MAP_WB); 1590 if (IS_ERR(stream->oa_buffer.vaddr)) { 1591 ret = PTR_ERR(stream->oa_buffer.vaddr); 1592 goto err_unpin; 1593 } 1594 1595 return 0; 1596 1597 err_unpin: 1598 __i915_vma_unpin(vma); 1599 1600 err_unref: 1601 i915_gem_object_put(bo); 1602 1603 stream->oa_buffer.vaddr = NULL; 1604 stream->oa_buffer.vma = NULL; 1605 1606 return ret; 1607 } 1608 1609 static u32 *save_restore_register(struct i915_perf_stream *stream, u32 *cs, 1610 bool save, i915_reg_t reg, u32 offset, 1611 u32 dword_count) 1612 { 1613 u32 cmd; 1614 u32 d; 1615 1616 cmd = save ? MI_STORE_REGISTER_MEM : MI_LOAD_REGISTER_MEM; 1617 cmd |= MI_SRM_LRM_GLOBAL_GTT; 1618 if (INTEL_GEN(stream->perf->i915) >= 8) 1619 cmd++; 1620 1621 for (d = 0; d < dword_count; d++) { 1622 *cs++ = cmd; 1623 *cs++ = i915_mmio_reg_offset(reg) + 4 * d; 1624 *cs++ = intel_gt_scratch_offset(stream->engine->gt, 1625 offset) + 4 * d; 1626 *cs++ = 0; 1627 } 1628 1629 return cs; 1630 } 1631 1632 static int alloc_noa_wait(struct i915_perf_stream *stream) 1633 { 1634 struct drm_i915_private *i915 = stream->perf->i915; 1635 struct drm_i915_gem_object *bo; 1636 struct i915_vma *vma; 1637 const u64 delay_ticks = 0xffffffffffffffff - 1638 intel_gt_ns_to_clock_interval(stream->perf->i915->ggtt.vm.gt, 1639 atomic64_read(&stream->perf->noa_programming_delay)); 1640 const u32 base = stream->engine->mmio_base; 1641 #define CS_GPR(x) GEN8_RING_CS_GPR(base, x) 1642 u32 *batch, *ts0, *cs, *jump; 1643 int ret, i; 1644 enum { 1645 START_TS, 1646 NOW_TS, 1647 DELTA_TS, 1648 JUMP_PREDICATE, 1649 DELTA_TARGET, 1650 N_CS_GPR 1651 }; 1652 1653 bo = i915_gem_object_create_internal(i915, 4096); 1654 if (IS_ERR(bo)) { 1655 drm_err(&i915->drm, 1656 "Failed to allocate NOA wait batchbuffer\n"); 1657 return PTR_ERR(bo); 1658 } 1659 1660 /* 1661 * We pin in GGTT because we jump into this buffer now because 1662 * multiple OA config BOs will have a jump to this address and it 1663 * needs to be fixed during the lifetime of the i915/perf stream. 1664 */ 1665 vma = i915_gem_object_ggtt_pin(bo, NULL, 0, 0, PIN_HIGH); 1666 if (IS_ERR(vma)) { 1667 ret = PTR_ERR(vma); 1668 goto err_unref; 1669 } 1670 1671 batch = cs = i915_gem_object_pin_map(bo, I915_MAP_WB); 1672 if (IS_ERR(batch)) { 1673 ret = PTR_ERR(batch); 1674 goto err_unpin; 1675 } 1676 1677 /* Save registers. */ 1678 for (i = 0; i < N_CS_GPR; i++) 1679 cs = save_restore_register( 1680 stream, cs, true /* save */, CS_GPR(i), 1681 INTEL_GT_SCRATCH_FIELD_PERF_CS_GPR + 8 * i, 2); 1682 cs = save_restore_register( 1683 stream, cs, true /* save */, MI_PREDICATE_RESULT_1, 1684 INTEL_GT_SCRATCH_FIELD_PERF_PREDICATE_RESULT_1, 1); 1685 1686 /* First timestamp snapshot location. */ 1687 ts0 = cs; 1688 1689 /* 1690 * Initial snapshot of the timestamp register to implement the wait. 1691 * We work with 32b values, so clear out the top 32b bits of the 1692 * register because the ALU works 64bits. 1693 */ 1694 *cs++ = MI_LOAD_REGISTER_IMM(1); 1695 *cs++ = i915_mmio_reg_offset(CS_GPR(START_TS)) + 4; 1696 *cs++ = 0; 1697 *cs++ = MI_LOAD_REGISTER_REG | (3 - 2); 1698 *cs++ = i915_mmio_reg_offset(RING_TIMESTAMP(base)); 1699 *cs++ = i915_mmio_reg_offset(CS_GPR(START_TS)); 1700 1701 /* 1702 * This is the location we're going to jump back into until the 1703 * required amount of time has passed. 1704 */ 1705 jump = cs; 1706 1707 /* 1708 * Take another snapshot of the timestamp register. Take care to clear 1709 * up the top 32bits of CS_GPR(1) as we're using it for other 1710 * operations below. 1711 */ 1712 *cs++ = MI_LOAD_REGISTER_IMM(1); 1713 *cs++ = i915_mmio_reg_offset(CS_GPR(NOW_TS)) + 4; 1714 *cs++ = 0; 1715 *cs++ = MI_LOAD_REGISTER_REG | (3 - 2); 1716 *cs++ = i915_mmio_reg_offset(RING_TIMESTAMP(base)); 1717 *cs++ = i915_mmio_reg_offset(CS_GPR(NOW_TS)); 1718 1719 /* 1720 * Do a diff between the 2 timestamps and store the result back into 1721 * CS_GPR(1). 1722 */ 1723 *cs++ = MI_MATH(5); 1724 *cs++ = MI_MATH_LOAD(MI_MATH_REG_SRCA, MI_MATH_REG(NOW_TS)); 1725 *cs++ = MI_MATH_LOAD(MI_MATH_REG_SRCB, MI_MATH_REG(START_TS)); 1726 *cs++ = MI_MATH_SUB; 1727 *cs++ = MI_MATH_STORE(MI_MATH_REG(DELTA_TS), MI_MATH_REG_ACCU); 1728 *cs++ = MI_MATH_STORE(MI_MATH_REG(JUMP_PREDICATE), MI_MATH_REG_CF); 1729 1730 /* 1731 * Transfer the carry flag (set to 1 if ts1 < ts0, meaning the 1732 * timestamp have rolled over the 32bits) into the predicate register 1733 * to be used for the predicated jump. 1734 */ 1735 *cs++ = MI_LOAD_REGISTER_REG | (3 - 2); 1736 *cs++ = i915_mmio_reg_offset(CS_GPR(JUMP_PREDICATE)); 1737 *cs++ = i915_mmio_reg_offset(MI_PREDICATE_RESULT_1); 1738 1739 /* Restart from the beginning if we had timestamps roll over. */ 1740 *cs++ = (INTEL_GEN(i915) < 8 ? 1741 MI_BATCH_BUFFER_START : 1742 MI_BATCH_BUFFER_START_GEN8) | 1743 MI_BATCH_PREDICATE; 1744 *cs++ = i915_ggtt_offset(vma) + (ts0 - batch) * 4; 1745 *cs++ = 0; 1746 1747 /* 1748 * Now add the diff between to previous timestamps and add it to : 1749 * (((1 * << 64) - 1) - delay_ns) 1750 * 1751 * When the Carry Flag contains 1 this means the elapsed time is 1752 * longer than the expected delay, and we can exit the wait loop. 1753 */ 1754 *cs++ = MI_LOAD_REGISTER_IMM(2); 1755 *cs++ = i915_mmio_reg_offset(CS_GPR(DELTA_TARGET)); 1756 *cs++ = lower_32_bits(delay_ticks); 1757 *cs++ = i915_mmio_reg_offset(CS_GPR(DELTA_TARGET)) + 4; 1758 *cs++ = upper_32_bits(delay_ticks); 1759 1760 *cs++ = MI_MATH(4); 1761 *cs++ = MI_MATH_LOAD(MI_MATH_REG_SRCA, MI_MATH_REG(DELTA_TS)); 1762 *cs++ = MI_MATH_LOAD(MI_MATH_REG_SRCB, MI_MATH_REG(DELTA_TARGET)); 1763 *cs++ = MI_MATH_ADD; 1764 *cs++ = MI_MATH_STOREINV(MI_MATH_REG(JUMP_PREDICATE), MI_MATH_REG_CF); 1765 1766 *cs++ = MI_ARB_CHECK; 1767 1768 /* 1769 * Transfer the result into the predicate register to be used for the 1770 * predicated jump. 1771 */ 1772 *cs++ = MI_LOAD_REGISTER_REG | (3 - 2); 1773 *cs++ = i915_mmio_reg_offset(CS_GPR(JUMP_PREDICATE)); 1774 *cs++ = i915_mmio_reg_offset(MI_PREDICATE_RESULT_1); 1775 1776 /* Predicate the jump. */ 1777 *cs++ = (INTEL_GEN(i915) < 8 ? 1778 MI_BATCH_BUFFER_START : 1779 MI_BATCH_BUFFER_START_GEN8) | 1780 MI_BATCH_PREDICATE; 1781 *cs++ = i915_ggtt_offset(vma) + (jump - batch) * 4; 1782 *cs++ = 0; 1783 1784 /* Restore registers. */ 1785 for (i = 0; i < N_CS_GPR; i++) 1786 cs = save_restore_register( 1787 stream, cs, false /* restore */, CS_GPR(i), 1788 INTEL_GT_SCRATCH_FIELD_PERF_CS_GPR + 8 * i, 2); 1789 cs = save_restore_register( 1790 stream, cs, false /* restore */, MI_PREDICATE_RESULT_1, 1791 INTEL_GT_SCRATCH_FIELD_PERF_PREDICATE_RESULT_1, 1); 1792 1793 /* And return to the ring. */ 1794 *cs++ = MI_BATCH_BUFFER_END; 1795 1796 GEM_BUG_ON(cs - batch > PAGE_SIZE / sizeof(*batch)); 1797 1798 i915_gem_object_flush_map(bo); 1799 __i915_gem_object_release_map(bo); 1800 1801 stream->noa_wait = vma; 1802 return 0; 1803 1804 err_unpin: 1805 i915_vma_unpin_and_release(&vma, 0); 1806 err_unref: 1807 i915_gem_object_put(bo); 1808 return ret; 1809 } 1810 1811 static u32 *write_cs_mi_lri(u32 *cs, 1812 const struct i915_oa_reg *reg_data, 1813 u32 n_regs) 1814 { 1815 u32 i; 1816 1817 for (i = 0; i < n_regs; i++) { 1818 if ((i % MI_LOAD_REGISTER_IMM_MAX_REGS) == 0) { 1819 u32 n_lri = min_t(u32, 1820 n_regs - i, 1821 MI_LOAD_REGISTER_IMM_MAX_REGS); 1822 1823 *cs++ = MI_LOAD_REGISTER_IMM(n_lri); 1824 } 1825 *cs++ = i915_mmio_reg_offset(reg_data[i].addr); 1826 *cs++ = reg_data[i].value; 1827 } 1828 1829 return cs; 1830 } 1831 1832 static int num_lri_dwords(int num_regs) 1833 { 1834 int count = 0; 1835 1836 if (num_regs > 0) { 1837 count += DIV_ROUND_UP(num_regs, MI_LOAD_REGISTER_IMM_MAX_REGS); 1838 count += num_regs * 2; 1839 } 1840 1841 return count; 1842 } 1843 1844 static struct i915_oa_config_bo * 1845 alloc_oa_config_buffer(struct i915_perf_stream *stream, 1846 struct i915_oa_config *oa_config) 1847 { 1848 struct drm_i915_gem_object *obj; 1849 struct i915_oa_config_bo *oa_bo; 1850 size_t config_length = 0; 1851 u32 *cs; 1852 int err; 1853 1854 oa_bo = kzalloc(sizeof(*oa_bo), GFP_KERNEL); 1855 if (!oa_bo) 1856 return ERR_PTR(-ENOMEM); 1857 1858 config_length += num_lri_dwords(oa_config->mux_regs_len); 1859 config_length += num_lri_dwords(oa_config->b_counter_regs_len); 1860 config_length += num_lri_dwords(oa_config->flex_regs_len); 1861 config_length += 3; /* MI_BATCH_BUFFER_START */ 1862 config_length = ALIGN(sizeof(u32) * config_length, I915_GTT_PAGE_SIZE); 1863 1864 obj = i915_gem_object_create_shmem(stream->perf->i915, config_length); 1865 if (IS_ERR(obj)) { 1866 err = PTR_ERR(obj); 1867 goto err_free; 1868 } 1869 1870 cs = i915_gem_object_pin_map(obj, I915_MAP_WB); 1871 if (IS_ERR(cs)) { 1872 err = PTR_ERR(cs); 1873 goto err_oa_bo; 1874 } 1875 1876 cs = write_cs_mi_lri(cs, 1877 oa_config->mux_regs, 1878 oa_config->mux_regs_len); 1879 cs = write_cs_mi_lri(cs, 1880 oa_config->b_counter_regs, 1881 oa_config->b_counter_regs_len); 1882 cs = write_cs_mi_lri(cs, 1883 oa_config->flex_regs, 1884 oa_config->flex_regs_len); 1885 1886 /* Jump into the active wait. */ 1887 *cs++ = (INTEL_GEN(stream->perf->i915) < 8 ? 1888 MI_BATCH_BUFFER_START : 1889 MI_BATCH_BUFFER_START_GEN8); 1890 *cs++ = i915_ggtt_offset(stream->noa_wait); 1891 *cs++ = 0; 1892 1893 i915_gem_object_flush_map(obj); 1894 __i915_gem_object_release_map(obj); 1895 1896 oa_bo->vma = i915_vma_instance(obj, 1897 &stream->engine->gt->ggtt->vm, 1898 NULL); 1899 if (IS_ERR(oa_bo->vma)) { 1900 err = PTR_ERR(oa_bo->vma); 1901 goto err_oa_bo; 1902 } 1903 1904 oa_bo->oa_config = i915_oa_config_get(oa_config); 1905 llist_add(&oa_bo->node, &stream->oa_config_bos); 1906 1907 return oa_bo; 1908 1909 err_oa_bo: 1910 i915_gem_object_put(obj); 1911 err_free: 1912 kfree(oa_bo); 1913 return ERR_PTR(err); 1914 } 1915 1916 static struct i915_vma * 1917 get_oa_vma(struct i915_perf_stream *stream, struct i915_oa_config *oa_config) 1918 { 1919 struct i915_oa_config_bo *oa_bo; 1920 1921 /* 1922 * Look for the buffer in the already allocated BOs attached 1923 * to the stream. 1924 */ 1925 llist_for_each_entry(oa_bo, stream->oa_config_bos.first, node) { 1926 if (oa_bo->oa_config == oa_config && 1927 memcmp(oa_bo->oa_config->uuid, 1928 oa_config->uuid, 1929 sizeof(oa_config->uuid)) == 0) 1930 goto out; 1931 } 1932 1933 oa_bo = alloc_oa_config_buffer(stream, oa_config); 1934 if (IS_ERR(oa_bo)) 1935 return ERR_CAST(oa_bo); 1936 1937 out: 1938 return i915_vma_get(oa_bo->vma); 1939 } 1940 1941 static int 1942 emit_oa_config(struct i915_perf_stream *stream, 1943 struct i915_oa_config *oa_config, 1944 struct intel_context *ce, 1945 struct i915_active *active) 1946 { 1947 struct i915_request *rq; 1948 struct i915_vma *vma; 1949 struct i915_gem_ww_ctx ww; 1950 int err; 1951 1952 vma = get_oa_vma(stream, oa_config); 1953 if (IS_ERR(vma)) 1954 return PTR_ERR(vma); 1955 1956 i915_gem_ww_ctx_init(&ww, true); 1957 retry: 1958 err = i915_gem_object_lock(vma->obj, &ww); 1959 if (err) 1960 goto err; 1961 1962 err = i915_vma_pin_ww(vma, &ww, 0, 0, PIN_GLOBAL | PIN_HIGH); 1963 if (err) 1964 goto err; 1965 1966 intel_engine_pm_get(ce->engine); 1967 rq = i915_request_create(ce); 1968 intel_engine_pm_put(ce->engine); 1969 if (IS_ERR(rq)) { 1970 err = PTR_ERR(rq); 1971 goto err_vma_unpin; 1972 } 1973 1974 if (!IS_ERR_OR_NULL(active)) { 1975 /* After all individual context modifications */ 1976 err = i915_request_await_active(rq, active, 1977 I915_ACTIVE_AWAIT_ACTIVE); 1978 if (err) 1979 goto err_add_request; 1980 1981 err = i915_active_add_request(active, rq); 1982 if (err) 1983 goto err_add_request; 1984 } 1985 1986 err = i915_request_await_object(rq, vma->obj, 0); 1987 if (!err) 1988 err = i915_vma_move_to_active(vma, rq, 0); 1989 if (err) 1990 goto err_add_request; 1991 1992 err = rq->engine->emit_bb_start(rq, 1993 vma->node.start, 0, 1994 I915_DISPATCH_SECURE); 1995 if (err) 1996 goto err_add_request; 1997 1998 err_add_request: 1999 i915_request_add(rq); 2000 err_vma_unpin: 2001 i915_vma_unpin(vma); 2002 err: 2003 if (err == -EDEADLK) { 2004 err = i915_gem_ww_ctx_backoff(&ww); 2005 if (!err) 2006 goto retry; 2007 } 2008 2009 i915_gem_ww_ctx_fini(&ww); 2010 i915_vma_put(vma); 2011 return err; 2012 } 2013 2014 static struct intel_context *oa_context(struct i915_perf_stream *stream) 2015 { 2016 return stream->pinned_ctx ?: stream->engine->kernel_context; 2017 } 2018 2019 static int 2020 hsw_enable_metric_set(struct i915_perf_stream *stream, 2021 struct i915_active *active) 2022 { 2023 struct intel_uncore *uncore = stream->uncore; 2024 2025 /* 2026 * PRM: 2027 * 2028 * OA unit is using “crclk” for its functionality. When trunk 2029 * level clock gating takes place, OA clock would be gated, 2030 * unable to count the events from non-render clock domain. 2031 * Render clock gating must be disabled when OA is enabled to 2032 * count the events from non-render domain. Unit level clock 2033 * gating for RCS should also be disabled. 2034 */ 2035 intel_uncore_rmw(uncore, GEN7_MISCCPCTL, 2036 GEN7_DOP_CLOCK_GATE_ENABLE, 0); 2037 intel_uncore_rmw(uncore, GEN6_UCGCTL1, 2038 0, GEN6_CSUNIT_CLOCK_GATE_DISABLE); 2039 2040 return emit_oa_config(stream, 2041 stream->oa_config, oa_context(stream), 2042 active); 2043 } 2044 2045 static void hsw_disable_metric_set(struct i915_perf_stream *stream) 2046 { 2047 struct intel_uncore *uncore = stream->uncore; 2048 2049 intel_uncore_rmw(uncore, GEN6_UCGCTL1, 2050 GEN6_CSUNIT_CLOCK_GATE_DISABLE, 0); 2051 intel_uncore_rmw(uncore, GEN7_MISCCPCTL, 2052 0, GEN7_DOP_CLOCK_GATE_ENABLE); 2053 2054 intel_uncore_rmw(uncore, GDT_CHICKEN_BITS, GT_NOA_ENABLE, 0); 2055 } 2056 2057 static u32 oa_config_flex_reg(const struct i915_oa_config *oa_config, 2058 i915_reg_t reg) 2059 { 2060 u32 mmio = i915_mmio_reg_offset(reg); 2061 int i; 2062 2063 /* 2064 * This arbitrary default will select the 'EU FPU0 Pipeline 2065 * Active' event. In the future it's anticipated that there 2066 * will be an explicit 'No Event' we can select, but not yet... 2067 */ 2068 if (!oa_config) 2069 return 0; 2070 2071 for (i = 0; i < oa_config->flex_regs_len; i++) { 2072 if (i915_mmio_reg_offset(oa_config->flex_regs[i].addr) == mmio) 2073 return oa_config->flex_regs[i].value; 2074 } 2075 2076 return 0; 2077 } 2078 /* 2079 * NB: It must always remain pointer safe to run this even if the OA unit 2080 * has been disabled. 2081 * 2082 * It's fine to put out-of-date values into these per-context registers 2083 * in the case that the OA unit has been disabled. 2084 */ 2085 static void 2086 gen8_update_reg_state_unlocked(const struct intel_context *ce, 2087 const struct i915_perf_stream *stream) 2088 { 2089 u32 ctx_oactxctrl = stream->perf->ctx_oactxctrl_offset; 2090 u32 ctx_flexeu0 = stream->perf->ctx_flexeu0_offset; 2091 /* The MMIO offsets for Flex EU registers aren't contiguous */ 2092 i915_reg_t flex_regs[] = { 2093 EU_PERF_CNTL0, 2094 EU_PERF_CNTL1, 2095 EU_PERF_CNTL2, 2096 EU_PERF_CNTL3, 2097 EU_PERF_CNTL4, 2098 EU_PERF_CNTL5, 2099 EU_PERF_CNTL6, 2100 }; 2101 u32 *reg_state = ce->lrc_reg_state; 2102 int i; 2103 2104 reg_state[ctx_oactxctrl + 1] = 2105 (stream->period_exponent << GEN8_OA_TIMER_PERIOD_SHIFT) | 2106 (stream->periodic ? GEN8_OA_TIMER_ENABLE : 0) | 2107 GEN8_OA_COUNTER_RESUME; 2108 2109 for (i = 0; i < ARRAY_SIZE(flex_regs); i++) 2110 reg_state[ctx_flexeu0 + i * 2 + 1] = 2111 oa_config_flex_reg(stream->oa_config, flex_regs[i]); 2112 } 2113 2114 struct flex { 2115 i915_reg_t reg; 2116 u32 offset; 2117 u32 value; 2118 }; 2119 2120 static int 2121 gen8_store_flex(struct i915_request *rq, 2122 struct intel_context *ce, 2123 const struct flex *flex, unsigned int count) 2124 { 2125 u32 offset; 2126 u32 *cs; 2127 2128 cs = intel_ring_begin(rq, 4 * count); 2129 if (IS_ERR(cs)) 2130 return PTR_ERR(cs); 2131 2132 offset = i915_ggtt_offset(ce->state) + LRC_STATE_OFFSET; 2133 do { 2134 *cs++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT; 2135 *cs++ = offset + flex->offset * sizeof(u32); 2136 *cs++ = 0; 2137 *cs++ = flex->value; 2138 } while (flex++, --count); 2139 2140 intel_ring_advance(rq, cs); 2141 2142 return 0; 2143 } 2144 2145 static int 2146 gen8_load_flex(struct i915_request *rq, 2147 struct intel_context *ce, 2148 const struct flex *flex, unsigned int count) 2149 { 2150 u32 *cs; 2151 2152 GEM_BUG_ON(!count || count > 63); 2153 2154 cs = intel_ring_begin(rq, 2 * count + 2); 2155 if (IS_ERR(cs)) 2156 return PTR_ERR(cs); 2157 2158 *cs++ = MI_LOAD_REGISTER_IMM(count); 2159 do { 2160 *cs++ = i915_mmio_reg_offset(flex->reg); 2161 *cs++ = flex->value; 2162 } while (flex++, --count); 2163 *cs++ = MI_NOOP; 2164 2165 intel_ring_advance(rq, cs); 2166 2167 return 0; 2168 } 2169 2170 static int gen8_modify_context(struct intel_context *ce, 2171 const struct flex *flex, unsigned int count) 2172 { 2173 struct i915_request *rq; 2174 int err; 2175 2176 rq = intel_engine_create_kernel_request(ce->engine); 2177 if (IS_ERR(rq)) 2178 return PTR_ERR(rq); 2179 2180 /* Serialise with the remote context */ 2181 err = intel_context_prepare_remote_request(ce, rq); 2182 if (err == 0) 2183 err = gen8_store_flex(rq, ce, flex, count); 2184 2185 i915_request_add(rq); 2186 return err; 2187 } 2188 2189 static int 2190 gen8_modify_self(struct intel_context *ce, 2191 const struct flex *flex, unsigned int count, 2192 struct i915_active *active) 2193 { 2194 struct i915_request *rq; 2195 int err; 2196 2197 intel_engine_pm_get(ce->engine); 2198 rq = i915_request_create(ce); 2199 intel_engine_pm_put(ce->engine); 2200 if (IS_ERR(rq)) 2201 return PTR_ERR(rq); 2202 2203 if (!IS_ERR_OR_NULL(active)) { 2204 err = i915_active_add_request(active, rq); 2205 if (err) 2206 goto err_add_request; 2207 } 2208 2209 err = gen8_load_flex(rq, ce, flex, count); 2210 if (err) 2211 goto err_add_request; 2212 2213 err_add_request: 2214 i915_request_add(rq); 2215 return err; 2216 } 2217 2218 static int gen8_configure_context(struct i915_gem_context *ctx, 2219 struct flex *flex, unsigned int count) 2220 { 2221 struct i915_gem_engines_iter it; 2222 struct intel_context *ce; 2223 int err = 0; 2224 2225 for_each_gem_engine(ce, i915_gem_context_lock_engines(ctx), it) { 2226 GEM_BUG_ON(ce == ce->engine->kernel_context); 2227 2228 if (ce->engine->class != RENDER_CLASS) 2229 continue; 2230 2231 /* Otherwise OA settings will be set upon first use */ 2232 if (!intel_context_pin_if_active(ce)) 2233 continue; 2234 2235 flex->value = intel_sseu_make_rpcs(ce->engine->gt, &ce->sseu); 2236 err = gen8_modify_context(ce, flex, count); 2237 2238 intel_context_unpin(ce); 2239 if (err) 2240 break; 2241 } 2242 i915_gem_context_unlock_engines(ctx); 2243 2244 return err; 2245 } 2246 2247 static int gen12_configure_oar_context(struct i915_perf_stream *stream, 2248 struct i915_active *active) 2249 { 2250 int err; 2251 struct intel_context *ce = stream->pinned_ctx; 2252 u32 format = stream->oa_buffer.format; 2253 struct flex regs_context[] = { 2254 { 2255 GEN8_OACTXCONTROL, 2256 stream->perf->ctx_oactxctrl_offset + 1, 2257 active ? GEN8_OA_COUNTER_RESUME : 0, 2258 }, 2259 }; 2260 /* Offsets in regs_lri are not used since this configuration is only 2261 * applied using LRI. Initialize the correct offsets for posterity. 2262 */ 2263 #define GEN12_OAR_OACONTROL_OFFSET 0x5B0 2264 struct flex regs_lri[] = { 2265 { 2266 GEN12_OAR_OACONTROL, 2267 GEN12_OAR_OACONTROL_OFFSET + 1, 2268 (format << GEN12_OAR_OACONTROL_COUNTER_FORMAT_SHIFT) | 2269 (active ? GEN12_OAR_OACONTROL_COUNTER_ENABLE : 0) 2270 }, 2271 { 2272 RING_CONTEXT_CONTROL(ce->engine->mmio_base), 2273 CTX_CONTEXT_CONTROL, 2274 _MASKED_FIELD(GEN12_CTX_CTRL_OAR_CONTEXT_ENABLE, 2275 active ? 2276 GEN12_CTX_CTRL_OAR_CONTEXT_ENABLE : 2277 0) 2278 }, 2279 }; 2280 2281 /* Modify the context image of pinned context with regs_context*/ 2282 err = intel_context_lock_pinned(ce); 2283 if (err) 2284 return err; 2285 2286 err = gen8_modify_context(ce, regs_context, ARRAY_SIZE(regs_context)); 2287 intel_context_unlock_pinned(ce); 2288 if (err) 2289 return err; 2290 2291 /* Apply regs_lri using LRI with pinned context */ 2292 return gen8_modify_self(ce, regs_lri, ARRAY_SIZE(regs_lri), active); 2293 } 2294 2295 /* 2296 * Manages updating the per-context aspects of the OA stream 2297 * configuration across all contexts. 2298 * 2299 * The awkward consideration here is that OACTXCONTROL controls the 2300 * exponent for periodic sampling which is primarily used for system 2301 * wide profiling where we'd like a consistent sampling period even in 2302 * the face of context switches. 2303 * 2304 * Our approach of updating the register state context (as opposed to 2305 * say using a workaround batch buffer) ensures that the hardware 2306 * won't automatically reload an out-of-date timer exponent even 2307 * transiently before a WA BB could be parsed. 2308 * 2309 * This function needs to: 2310 * - Ensure the currently running context's per-context OA state is 2311 * updated 2312 * - Ensure that all existing contexts will have the correct per-context 2313 * OA state if they are scheduled for use. 2314 * - Ensure any new contexts will be initialized with the correct 2315 * per-context OA state. 2316 * 2317 * Note: it's only the RCS/Render context that has any OA state. 2318 * Note: the first flex register passed must always be R_PWR_CLK_STATE 2319 */ 2320 static int 2321 oa_configure_all_contexts(struct i915_perf_stream *stream, 2322 struct flex *regs, 2323 size_t num_regs, 2324 struct i915_active *active) 2325 { 2326 struct drm_i915_private *i915 = stream->perf->i915; 2327 struct intel_engine_cs *engine; 2328 struct i915_gem_context *ctx, *cn; 2329 int err; 2330 2331 lockdep_assert_held(&stream->perf->lock); 2332 2333 /* 2334 * The OA register config is setup through the context image. This image 2335 * might be written to by the GPU on context switch (in particular on 2336 * lite-restore). This means we can't safely update a context's image, 2337 * if this context is scheduled/submitted to run on the GPU. 2338 * 2339 * We could emit the OA register config through the batch buffer but 2340 * this might leave small interval of time where the OA unit is 2341 * configured at an invalid sampling period. 2342 * 2343 * Note that since we emit all requests from a single ring, there 2344 * is still an implicit global barrier here that may cause a high 2345 * priority context to wait for an otherwise independent low priority 2346 * context. Contexts idle at the time of reconfiguration are not 2347 * trapped behind the barrier. 2348 */ 2349 spin_lock(&i915->gem.contexts.lock); 2350 list_for_each_entry_safe(ctx, cn, &i915->gem.contexts.list, link) { 2351 if (!kref_get_unless_zero(&ctx->ref)) 2352 continue; 2353 2354 spin_unlock(&i915->gem.contexts.lock); 2355 2356 err = gen8_configure_context(ctx, regs, num_regs); 2357 if (err) { 2358 i915_gem_context_put(ctx); 2359 return err; 2360 } 2361 2362 spin_lock(&i915->gem.contexts.lock); 2363 list_safe_reset_next(ctx, cn, link); 2364 i915_gem_context_put(ctx); 2365 } 2366 spin_unlock(&i915->gem.contexts.lock); 2367 2368 /* 2369 * After updating all other contexts, we need to modify ourselves. 2370 * If we don't modify the kernel_context, we do not get events while 2371 * idle. 2372 */ 2373 for_each_uabi_engine(engine, i915) { 2374 struct intel_context *ce = engine->kernel_context; 2375 2376 if (engine->class != RENDER_CLASS) 2377 continue; 2378 2379 regs[0].value = intel_sseu_make_rpcs(engine->gt, &ce->sseu); 2380 2381 err = gen8_modify_self(ce, regs, num_regs, active); 2382 if (err) 2383 return err; 2384 } 2385 2386 return 0; 2387 } 2388 2389 static int 2390 gen12_configure_all_contexts(struct i915_perf_stream *stream, 2391 const struct i915_oa_config *oa_config, 2392 struct i915_active *active) 2393 { 2394 struct flex regs[] = { 2395 { 2396 GEN8_R_PWR_CLK_STATE, 2397 CTX_R_PWR_CLK_STATE, 2398 }, 2399 }; 2400 2401 return oa_configure_all_contexts(stream, 2402 regs, ARRAY_SIZE(regs), 2403 active); 2404 } 2405 2406 static int 2407 lrc_configure_all_contexts(struct i915_perf_stream *stream, 2408 const struct i915_oa_config *oa_config, 2409 struct i915_active *active) 2410 { 2411 /* The MMIO offsets for Flex EU registers aren't contiguous */ 2412 const u32 ctx_flexeu0 = stream->perf->ctx_flexeu0_offset; 2413 #define ctx_flexeuN(N) (ctx_flexeu0 + 2 * (N) + 1) 2414 struct flex regs[] = { 2415 { 2416 GEN8_R_PWR_CLK_STATE, 2417 CTX_R_PWR_CLK_STATE, 2418 }, 2419 { 2420 GEN8_OACTXCONTROL, 2421 stream->perf->ctx_oactxctrl_offset + 1, 2422 }, 2423 { EU_PERF_CNTL0, ctx_flexeuN(0) }, 2424 { EU_PERF_CNTL1, ctx_flexeuN(1) }, 2425 { EU_PERF_CNTL2, ctx_flexeuN(2) }, 2426 { EU_PERF_CNTL3, ctx_flexeuN(3) }, 2427 { EU_PERF_CNTL4, ctx_flexeuN(4) }, 2428 { EU_PERF_CNTL5, ctx_flexeuN(5) }, 2429 { EU_PERF_CNTL6, ctx_flexeuN(6) }, 2430 }; 2431 #undef ctx_flexeuN 2432 int i; 2433 2434 regs[1].value = 2435 (stream->period_exponent << GEN8_OA_TIMER_PERIOD_SHIFT) | 2436 (stream->periodic ? GEN8_OA_TIMER_ENABLE : 0) | 2437 GEN8_OA_COUNTER_RESUME; 2438 2439 for (i = 2; i < ARRAY_SIZE(regs); i++) 2440 regs[i].value = oa_config_flex_reg(oa_config, regs[i].reg); 2441 2442 return oa_configure_all_contexts(stream, 2443 regs, ARRAY_SIZE(regs), 2444 active); 2445 } 2446 2447 static int 2448 gen8_enable_metric_set(struct i915_perf_stream *stream, 2449 struct i915_active *active) 2450 { 2451 struct intel_uncore *uncore = stream->uncore; 2452 struct i915_oa_config *oa_config = stream->oa_config; 2453 int ret; 2454 2455 /* 2456 * We disable slice/unslice clock ratio change reports on SKL since 2457 * they are too noisy. The HW generates a lot of redundant reports 2458 * where the ratio hasn't really changed causing a lot of redundant 2459 * work to processes and increasing the chances we'll hit buffer 2460 * overruns. 2461 * 2462 * Although we don't currently use the 'disable overrun' OABUFFER 2463 * feature it's worth noting that clock ratio reports have to be 2464 * disabled before considering to use that feature since the HW doesn't 2465 * correctly block these reports. 2466 * 2467 * Currently none of the high-level metrics we have depend on knowing 2468 * this ratio to normalize. 2469 * 2470 * Note: This register is not power context saved and restored, but 2471 * that's OK considering that we disable RC6 while the OA unit is 2472 * enabled. 2473 * 2474 * The _INCLUDE_CLK_RATIO bit allows the slice/unslice frequency to 2475 * be read back from automatically triggered reports, as part of the 2476 * RPT_ID field. 2477 */ 2478 if (IS_GEN_RANGE(stream->perf->i915, 9, 11)) { 2479 intel_uncore_write(uncore, GEN8_OA_DEBUG, 2480 _MASKED_BIT_ENABLE(GEN9_OA_DEBUG_DISABLE_CLK_RATIO_REPORTS | 2481 GEN9_OA_DEBUG_INCLUDE_CLK_RATIO)); 2482 } 2483 2484 /* 2485 * Update all contexts prior writing the mux configurations as we need 2486 * to make sure all slices/subslices are ON before writing to NOA 2487 * registers. 2488 */ 2489 ret = lrc_configure_all_contexts(stream, oa_config, active); 2490 if (ret) 2491 return ret; 2492 2493 return emit_oa_config(stream, 2494 stream->oa_config, oa_context(stream), 2495 active); 2496 } 2497 2498 static u32 oag_report_ctx_switches(const struct i915_perf_stream *stream) 2499 { 2500 return _MASKED_FIELD(GEN12_OAG_OA_DEBUG_DISABLE_CTX_SWITCH_REPORTS, 2501 (stream->sample_flags & SAMPLE_OA_REPORT) ? 2502 0 : GEN12_OAG_OA_DEBUG_DISABLE_CTX_SWITCH_REPORTS); 2503 } 2504 2505 static int 2506 gen12_enable_metric_set(struct i915_perf_stream *stream, 2507 struct i915_active *active) 2508 { 2509 struct intel_uncore *uncore = stream->uncore; 2510 struct i915_oa_config *oa_config = stream->oa_config; 2511 bool periodic = stream->periodic; 2512 u32 period_exponent = stream->period_exponent; 2513 int ret; 2514 2515 intel_uncore_write(uncore, GEN12_OAG_OA_DEBUG, 2516 /* Disable clk ratio reports, like previous Gens. */ 2517 _MASKED_BIT_ENABLE(GEN12_OAG_OA_DEBUG_DISABLE_CLK_RATIO_REPORTS | 2518 GEN12_OAG_OA_DEBUG_INCLUDE_CLK_RATIO) | 2519 /* 2520 * If the user didn't require OA reports, instruct 2521 * the hardware not to emit ctx switch reports. 2522 */ 2523 oag_report_ctx_switches(stream)); 2524 2525 intel_uncore_write(uncore, GEN12_OAG_OAGLBCTXCTRL, periodic ? 2526 (GEN12_OAG_OAGLBCTXCTRL_COUNTER_RESUME | 2527 GEN12_OAG_OAGLBCTXCTRL_TIMER_ENABLE | 2528 (period_exponent << GEN12_OAG_OAGLBCTXCTRL_TIMER_PERIOD_SHIFT)) 2529 : 0); 2530 2531 /* 2532 * Update all contexts prior writing the mux configurations as we need 2533 * to make sure all slices/subslices are ON before writing to NOA 2534 * registers. 2535 */ 2536 ret = gen12_configure_all_contexts(stream, oa_config, active); 2537 if (ret) 2538 return ret; 2539 2540 /* 2541 * For Gen12, performance counters are context 2542 * saved/restored. Only enable it for the context that 2543 * requested this. 2544 */ 2545 if (stream->ctx) { 2546 ret = gen12_configure_oar_context(stream, active); 2547 if (ret) 2548 return ret; 2549 } 2550 2551 return emit_oa_config(stream, 2552 stream->oa_config, oa_context(stream), 2553 active); 2554 } 2555 2556 static void gen8_disable_metric_set(struct i915_perf_stream *stream) 2557 { 2558 struct intel_uncore *uncore = stream->uncore; 2559 2560 /* Reset all contexts' slices/subslices configurations. */ 2561 lrc_configure_all_contexts(stream, NULL, NULL); 2562 2563 intel_uncore_rmw(uncore, GDT_CHICKEN_BITS, GT_NOA_ENABLE, 0); 2564 } 2565 2566 static void gen10_disable_metric_set(struct i915_perf_stream *stream) 2567 { 2568 struct intel_uncore *uncore = stream->uncore; 2569 2570 /* Reset all contexts' slices/subslices configurations. */ 2571 lrc_configure_all_contexts(stream, NULL, NULL); 2572 2573 /* Make sure we disable noa to save power. */ 2574 intel_uncore_rmw(uncore, RPM_CONFIG1, GEN10_GT_NOA_ENABLE, 0); 2575 } 2576 2577 static void gen12_disable_metric_set(struct i915_perf_stream *stream) 2578 { 2579 struct intel_uncore *uncore = stream->uncore; 2580 2581 /* Reset all contexts' slices/subslices configurations. */ 2582 gen12_configure_all_contexts(stream, NULL, NULL); 2583 2584 /* disable the context save/restore or OAR counters */ 2585 if (stream->ctx) 2586 gen12_configure_oar_context(stream, NULL); 2587 2588 /* Make sure we disable noa to save power. */ 2589 intel_uncore_rmw(uncore, RPM_CONFIG1, GEN10_GT_NOA_ENABLE, 0); 2590 } 2591 2592 static void gen7_oa_enable(struct i915_perf_stream *stream) 2593 { 2594 struct intel_uncore *uncore = stream->uncore; 2595 struct i915_gem_context *ctx = stream->ctx; 2596 u32 ctx_id = stream->specific_ctx_id; 2597 bool periodic = stream->periodic; 2598 u32 period_exponent = stream->period_exponent; 2599 u32 report_format = stream->oa_buffer.format; 2600 2601 /* 2602 * Reset buf pointers so we don't forward reports from before now. 2603 * 2604 * Think carefully if considering trying to avoid this, since it 2605 * also ensures status flags and the buffer itself are cleared 2606 * in error paths, and we have checks for invalid reports based 2607 * on the assumption that certain fields are written to zeroed 2608 * memory which this helps maintains. 2609 */ 2610 gen7_init_oa_buffer(stream); 2611 2612 intel_uncore_write(uncore, GEN7_OACONTROL, 2613 (ctx_id & GEN7_OACONTROL_CTX_MASK) | 2614 (period_exponent << 2615 GEN7_OACONTROL_TIMER_PERIOD_SHIFT) | 2616 (periodic ? GEN7_OACONTROL_TIMER_ENABLE : 0) | 2617 (report_format << GEN7_OACONTROL_FORMAT_SHIFT) | 2618 (ctx ? GEN7_OACONTROL_PER_CTX_ENABLE : 0) | 2619 GEN7_OACONTROL_ENABLE); 2620 } 2621 2622 static void gen8_oa_enable(struct i915_perf_stream *stream) 2623 { 2624 struct intel_uncore *uncore = stream->uncore; 2625 u32 report_format = stream->oa_buffer.format; 2626 2627 /* 2628 * Reset buf pointers so we don't forward reports from before now. 2629 * 2630 * Think carefully if considering trying to avoid this, since it 2631 * also ensures status flags and the buffer itself are cleared 2632 * in error paths, and we have checks for invalid reports based 2633 * on the assumption that certain fields are written to zeroed 2634 * memory which this helps maintains. 2635 */ 2636 gen8_init_oa_buffer(stream); 2637 2638 /* 2639 * Note: we don't rely on the hardware to perform single context 2640 * filtering and instead filter on the cpu based on the context-id 2641 * field of reports 2642 */ 2643 intel_uncore_write(uncore, GEN8_OACONTROL, 2644 (report_format << GEN8_OA_REPORT_FORMAT_SHIFT) | 2645 GEN8_OA_COUNTER_ENABLE); 2646 } 2647 2648 static void gen12_oa_enable(struct i915_perf_stream *stream) 2649 { 2650 struct intel_uncore *uncore = stream->uncore; 2651 u32 report_format = stream->oa_buffer.format; 2652 2653 /* 2654 * If we don't want OA reports from the OA buffer, then we don't even 2655 * need to program the OAG unit. 2656 */ 2657 if (!(stream->sample_flags & SAMPLE_OA_REPORT)) 2658 return; 2659 2660 gen12_init_oa_buffer(stream); 2661 2662 intel_uncore_write(uncore, GEN12_OAG_OACONTROL, 2663 (report_format << GEN12_OAG_OACONTROL_OA_COUNTER_FORMAT_SHIFT) | 2664 GEN12_OAG_OACONTROL_OA_COUNTER_ENABLE); 2665 } 2666 2667 /** 2668 * i915_oa_stream_enable - handle `I915_PERF_IOCTL_ENABLE` for OA stream 2669 * @stream: An i915 perf stream opened for OA metrics 2670 * 2671 * [Re]enables hardware periodic sampling according to the period configured 2672 * when opening the stream. This also starts a hrtimer that will periodically 2673 * check for data in the circular OA buffer for notifying userspace (e.g. 2674 * during a read() or poll()). 2675 */ 2676 static void i915_oa_stream_enable(struct i915_perf_stream *stream) 2677 { 2678 stream->pollin = false; 2679 2680 stream->perf->ops.oa_enable(stream); 2681 2682 if (stream->sample_flags & SAMPLE_OA_REPORT) 2683 hrtimer_start(&stream->poll_check_timer, 2684 ns_to_ktime(stream->poll_oa_period), 2685 HRTIMER_MODE_REL_PINNED); 2686 } 2687 2688 static void gen7_oa_disable(struct i915_perf_stream *stream) 2689 { 2690 struct intel_uncore *uncore = stream->uncore; 2691 2692 intel_uncore_write(uncore, GEN7_OACONTROL, 0); 2693 if (intel_wait_for_register(uncore, 2694 GEN7_OACONTROL, GEN7_OACONTROL_ENABLE, 0, 2695 50)) 2696 drm_err(&stream->perf->i915->drm, 2697 "wait for OA to be disabled timed out\n"); 2698 } 2699 2700 static void gen8_oa_disable(struct i915_perf_stream *stream) 2701 { 2702 struct intel_uncore *uncore = stream->uncore; 2703 2704 intel_uncore_write(uncore, GEN8_OACONTROL, 0); 2705 if (intel_wait_for_register(uncore, 2706 GEN8_OACONTROL, GEN8_OA_COUNTER_ENABLE, 0, 2707 50)) 2708 drm_err(&stream->perf->i915->drm, 2709 "wait for OA to be disabled timed out\n"); 2710 } 2711 2712 static void gen12_oa_disable(struct i915_perf_stream *stream) 2713 { 2714 struct intel_uncore *uncore = stream->uncore; 2715 2716 intel_uncore_write(uncore, GEN12_OAG_OACONTROL, 0); 2717 if (intel_wait_for_register(uncore, 2718 GEN12_OAG_OACONTROL, 2719 GEN12_OAG_OACONTROL_OA_COUNTER_ENABLE, 0, 2720 50)) 2721 drm_err(&stream->perf->i915->drm, 2722 "wait for OA to be disabled timed out\n"); 2723 2724 intel_uncore_write(uncore, GEN12_OA_TLB_INV_CR, 1); 2725 if (intel_wait_for_register(uncore, 2726 GEN12_OA_TLB_INV_CR, 2727 1, 0, 2728 50)) 2729 drm_err(&stream->perf->i915->drm, 2730 "wait for OA tlb invalidate timed out\n"); 2731 } 2732 2733 /** 2734 * i915_oa_stream_disable - handle `I915_PERF_IOCTL_DISABLE` for OA stream 2735 * @stream: An i915 perf stream opened for OA metrics 2736 * 2737 * Stops the OA unit from periodically writing counter reports into the 2738 * circular OA buffer. This also stops the hrtimer that periodically checks for 2739 * data in the circular OA buffer, for notifying userspace. 2740 */ 2741 static void i915_oa_stream_disable(struct i915_perf_stream *stream) 2742 { 2743 stream->perf->ops.oa_disable(stream); 2744 2745 if (stream->sample_flags & SAMPLE_OA_REPORT) 2746 hrtimer_cancel(&stream->poll_check_timer); 2747 } 2748 2749 static const struct i915_perf_stream_ops i915_oa_stream_ops = { 2750 .destroy = i915_oa_stream_destroy, 2751 .enable = i915_oa_stream_enable, 2752 .disable = i915_oa_stream_disable, 2753 .wait_unlocked = i915_oa_wait_unlocked, 2754 .poll_wait = i915_oa_poll_wait, 2755 .read = i915_oa_read, 2756 }; 2757 2758 static int i915_perf_stream_enable_sync(struct i915_perf_stream *stream) 2759 { 2760 struct i915_active *active; 2761 int err; 2762 2763 active = i915_active_create(); 2764 if (!active) 2765 return -ENOMEM; 2766 2767 err = stream->perf->ops.enable_metric_set(stream, active); 2768 if (err == 0) 2769 __i915_active_wait(active, TASK_UNINTERRUPTIBLE); 2770 2771 i915_active_put(active); 2772 return err; 2773 } 2774 2775 static void 2776 get_default_sseu_config(struct intel_sseu *out_sseu, 2777 struct intel_engine_cs *engine) 2778 { 2779 const struct sseu_dev_info *devinfo_sseu = &engine->gt->info.sseu; 2780 2781 *out_sseu = intel_sseu_from_device_info(devinfo_sseu); 2782 2783 if (IS_GEN(engine->i915, 11)) { 2784 /* 2785 * We only need subslice count so it doesn't matter which ones 2786 * we select - just turn off low bits in the amount of half of 2787 * all available subslices per slice. 2788 */ 2789 out_sseu->subslice_mask = 2790 ~(~0 << (hweight8(out_sseu->subslice_mask) / 2)); 2791 out_sseu->slice_mask = 0x1; 2792 } 2793 } 2794 2795 static int 2796 get_sseu_config(struct intel_sseu *out_sseu, 2797 struct intel_engine_cs *engine, 2798 const struct drm_i915_gem_context_param_sseu *drm_sseu) 2799 { 2800 if (drm_sseu->engine.engine_class != engine->uabi_class || 2801 drm_sseu->engine.engine_instance != engine->uabi_instance) 2802 return -EINVAL; 2803 2804 return i915_gem_user_to_context_sseu(engine->gt, drm_sseu, out_sseu); 2805 } 2806 2807 /** 2808 * i915_oa_stream_init - validate combined props for OA stream and init 2809 * @stream: An i915 perf stream 2810 * @param: The open parameters passed to `DRM_I915_PERF_OPEN` 2811 * @props: The property state that configures stream (individually validated) 2812 * 2813 * While read_properties_unlocked() validates properties in isolation it 2814 * doesn't ensure that the combination necessarily makes sense. 2815 * 2816 * At this point it has been determined that userspace wants a stream of 2817 * OA metrics, but still we need to further validate the combined 2818 * properties are OK. 2819 * 2820 * If the configuration makes sense then we can allocate memory for 2821 * a circular OA buffer and apply the requested metric set configuration. 2822 * 2823 * Returns: zero on success or a negative error code. 2824 */ 2825 static int i915_oa_stream_init(struct i915_perf_stream *stream, 2826 struct drm_i915_perf_open_param *param, 2827 struct perf_open_properties *props) 2828 { 2829 struct drm_i915_private *i915 = stream->perf->i915; 2830 struct i915_perf *perf = stream->perf; 2831 int format_size; 2832 int ret; 2833 2834 if (!props->engine) { 2835 DRM_DEBUG("OA engine not specified\n"); 2836 return -EINVAL; 2837 } 2838 2839 /* 2840 * If the sysfs metrics/ directory wasn't registered for some 2841 * reason then don't let userspace try their luck with config 2842 * IDs 2843 */ 2844 if (!perf->metrics_kobj) { 2845 DRM_DEBUG("OA metrics weren't advertised via sysfs\n"); 2846 return -EINVAL; 2847 } 2848 2849 if (!(props->sample_flags & SAMPLE_OA_REPORT) && 2850 (INTEL_GEN(perf->i915) < 12 || !stream->ctx)) { 2851 DRM_DEBUG("Only OA report sampling supported\n"); 2852 return -EINVAL; 2853 } 2854 2855 if (!perf->ops.enable_metric_set) { 2856 DRM_DEBUG("OA unit not supported\n"); 2857 return -ENODEV; 2858 } 2859 2860 /* 2861 * To avoid the complexity of having to accurately filter 2862 * counter reports and marshal to the appropriate client 2863 * we currently only allow exclusive access 2864 */ 2865 if (perf->exclusive_stream) { 2866 DRM_DEBUG("OA unit already in use\n"); 2867 return -EBUSY; 2868 } 2869 2870 if (!props->oa_format) { 2871 DRM_DEBUG("OA report format not specified\n"); 2872 return -EINVAL; 2873 } 2874 2875 stream->engine = props->engine; 2876 stream->uncore = stream->engine->gt->uncore; 2877 2878 stream->sample_size = sizeof(struct drm_i915_perf_record_header); 2879 2880 format_size = perf->oa_formats[props->oa_format].size; 2881 2882 stream->sample_flags = props->sample_flags; 2883 stream->sample_size += format_size; 2884 2885 stream->oa_buffer.format_size = format_size; 2886 if (drm_WARN_ON(&i915->drm, stream->oa_buffer.format_size == 0)) 2887 return -EINVAL; 2888 2889 stream->hold_preemption = props->hold_preemption; 2890 2891 stream->oa_buffer.format = 2892 perf->oa_formats[props->oa_format].format; 2893 2894 stream->periodic = props->oa_periodic; 2895 if (stream->periodic) 2896 stream->period_exponent = props->oa_period_exponent; 2897 2898 if (stream->ctx) { 2899 ret = oa_get_render_ctx_id(stream); 2900 if (ret) { 2901 DRM_DEBUG("Invalid context id to filter with\n"); 2902 return ret; 2903 } 2904 } 2905 2906 ret = alloc_noa_wait(stream); 2907 if (ret) { 2908 DRM_DEBUG("Unable to allocate NOA wait batch buffer\n"); 2909 goto err_noa_wait_alloc; 2910 } 2911 2912 stream->oa_config = i915_perf_get_oa_config(perf, props->metrics_set); 2913 if (!stream->oa_config) { 2914 DRM_DEBUG("Invalid OA config id=%i\n", props->metrics_set); 2915 ret = -EINVAL; 2916 goto err_config; 2917 } 2918 2919 /* PRM - observability performance counters: 2920 * 2921 * OACONTROL, performance counter enable, note: 2922 * 2923 * "When this bit is set, in order to have coherent counts, 2924 * RC6 power state and trunk clock gating must be disabled. 2925 * This can be achieved by programming MMIO registers as 2926 * 0xA094=0 and 0xA090[31]=1" 2927 * 2928 * In our case we are expecting that taking pm + FORCEWAKE 2929 * references will effectively disable RC6. 2930 */ 2931 intel_engine_pm_get(stream->engine); 2932 intel_uncore_forcewake_get(stream->uncore, FORCEWAKE_ALL); 2933 2934 ret = alloc_oa_buffer(stream); 2935 if (ret) 2936 goto err_oa_buf_alloc; 2937 2938 stream->ops = &i915_oa_stream_ops; 2939 2940 perf->sseu = props->sseu; 2941 WRITE_ONCE(perf->exclusive_stream, stream); 2942 2943 ret = i915_perf_stream_enable_sync(stream); 2944 if (ret) { 2945 DRM_DEBUG("Unable to enable metric set\n"); 2946 goto err_enable; 2947 } 2948 2949 DRM_DEBUG("opening stream oa config uuid=%s\n", 2950 stream->oa_config->uuid); 2951 2952 hrtimer_init(&stream->poll_check_timer, 2953 CLOCK_MONOTONIC, HRTIMER_MODE_REL); 2954 stream->poll_check_timer.function = oa_poll_check_timer_cb; 2955 init_waitqueue_head(&stream->poll_wq); 2956 spin_lock_init(&stream->oa_buffer.ptr_lock); 2957 2958 return 0; 2959 2960 err_enable: 2961 WRITE_ONCE(perf->exclusive_stream, NULL); 2962 perf->ops.disable_metric_set(stream); 2963 2964 free_oa_buffer(stream); 2965 2966 err_oa_buf_alloc: 2967 free_oa_configs(stream); 2968 2969 intel_uncore_forcewake_put(stream->uncore, FORCEWAKE_ALL); 2970 intel_engine_pm_put(stream->engine); 2971 2972 err_config: 2973 free_noa_wait(stream); 2974 2975 err_noa_wait_alloc: 2976 if (stream->ctx) 2977 oa_put_render_ctx_id(stream); 2978 2979 return ret; 2980 } 2981 2982 void i915_oa_init_reg_state(const struct intel_context *ce, 2983 const struct intel_engine_cs *engine) 2984 { 2985 struct i915_perf_stream *stream; 2986 2987 if (engine->class != RENDER_CLASS) 2988 return; 2989 2990 /* perf.exclusive_stream serialised by lrc_configure_all_contexts() */ 2991 stream = READ_ONCE(engine->i915->perf.exclusive_stream); 2992 if (stream && INTEL_GEN(stream->perf->i915) < 12) 2993 gen8_update_reg_state_unlocked(ce, stream); 2994 } 2995 2996 /** 2997 * i915_perf_read - handles read() FOP for i915 perf stream FDs 2998 * @file: An i915 perf stream file 2999 * @buf: destination buffer given by userspace 3000 * @count: the number of bytes userspace wants to read 3001 * @ppos: (inout) file seek position (unused) 3002 * 3003 * The entry point for handling a read() on a stream file descriptor from 3004 * userspace. Most of the work is left to the i915_perf_read_locked() and 3005 * &i915_perf_stream_ops->read but to save having stream implementations (of 3006 * which we might have multiple later) we handle blocking read here. 3007 * 3008 * We can also consistently treat trying to read from a disabled stream 3009 * as an IO error so implementations can assume the stream is enabled 3010 * while reading. 3011 * 3012 * Returns: The number of bytes copied or a negative error code on failure. 3013 */ 3014 static ssize_t i915_perf_read(struct file *file, 3015 char __user *buf, 3016 size_t count, 3017 loff_t *ppos) 3018 { 3019 struct i915_perf_stream *stream = file->private_data; 3020 struct i915_perf *perf = stream->perf; 3021 size_t offset = 0; 3022 int ret; 3023 3024 /* To ensure it's handled consistently we simply treat all reads of a 3025 * disabled stream as an error. In particular it might otherwise lead 3026 * to a deadlock for blocking file descriptors... 3027 */ 3028 if (!stream->enabled || !(stream->sample_flags & SAMPLE_OA_REPORT)) 3029 return -EIO; 3030 3031 if (!(file->f_flags & O_NONBLOCK)) { 3032 /* There's the small chance of false positives from 3033 * stream->ops->wait_unlocked. 3034 * 3035 * E.g. with single context filtering since we only wait until 3036 * oabuffer has >= 1 report we don't immediately know whether 3037 * any reports really belong to the current context 3038 */ 3039 do { 3040 ret = stream->ops->wait_unlocked(stream); 3041 if (ret) 3042 return ret; 3043 3044 mutex_lock(&perf->lock); 3045 ret = stream->ops->read(stream, buf, count, &offset); 3046 mutex_unlock(&perf->lock); 3047 } while (!offset && !ret); 3048 } else { 3049 mutex_lock(&perf->lock); 3050 ret = stream->ops->read(stream, buf, count, &offset); 3051 mutex_unlock(&perf->lock); 3052 } 3053 3054 /* We allow the poll checking to sometimes report false positive EPOLLIN 3055 * events where we might actually report EAGAIN on read() if there's 3056 * not really any data available. In this situation though we don't 3057 * want to enter a busy loop between poll() reporting a EPOLLIN event 3058 * and read() returning -EAGAIN. Clearing the oa.pollin state here 3059 * effectively ensures we back off until the next hrtimer callback 3060 * before reporting another EPOLLIN event. 3061 * The exception to this is if ops->read() returned -ENOSPC which means 3062 * that more OA data is available than could fit in the user provided 3063 * buffer. In this case we want the next poll() call to not block. 3064 */ 3065 if (ret != -ENOSPC) 3066 stream->pollin = false; 3067 3068 /* Possible values for ret are 0, -EFAULT, -ENOSPC, -EIO, ... */ 3069 return offset ?: (ret ?: -EAGAIN); 3070 } 3071 3072 static enum hrtimer_restart oa_poll_check_timer_cb(struct hrtimer *hrtimer) 3073 { 3074 struct i915_perf_stream *stream = 3075 container_of(hrtimer, typeof(*stream), poll_check_timer); 3076 3077 if (oa_buffer_check_unlocked(stream)) { 3078 stream->pollin = true; 3079 wake_up(&stream->poll_wq); 3080 } 3081 3082 hrtimer_forward_now(hrtimer, 3083 ns_to_ktime(stream->poll_oa_period)); 3084 3085 return HRTIMER_RESTART; 3086 } 3087 3088 /** 3089 * i915_perf_poll_locked - poll_wait() with a suitable wait queue for stream 3090 * @stream: An i915 perf stream 3091 * @file: An i915 perf stream file 3092 * @wait: poll() state table 3093 * 3094 * For handling userspace polling on an i915 perf stream, this calls through to 3095 * &i915_perf_stream_ops->poll_wait to call poll_wait() with a wait queue that 3096 * will be woken for new stream data. 3097 * 3098 * Note: The &perf->lock mutex has been taken to serialize 3099 * with any non-file-operation driver hooks. 3100 * 3101 * Returns: any poll events that are ready without sleeping 3102 */ 3103 static __poll_t i915_perf_poll_locked(struct i915_perf_stream *stream, 3104 struct file *file, 3105 poll_table *wait) 3106 { 3107 __poll_t events = 0; 3108 3109 stream->ops->poll_wait(stream, file, wait); 3110 3111 /* Note: we don't explicitly check whether there's something to read 3112 * here since this path may be very hot depending on what else 3113 * userspace is polling, or on the timeout in use. We rely solely on 3114 * the hrtimer/oa_poll_check_timer_cb to notify us when there are 3115 * samples to read. 3116 */ 3117 if (stream->pollin) 3118 events |= EPOLLIN; 3119 3120 return events; 3121 } 3122 3123 /** 3124 * i915_perf_poll - call poll_wait() with a suitable wait queue for stream 3125 * @file: An i915 perf stream file 3126 * @wait: poll() state table 3127 * 3128 * For handling userspace polling on an i915 perf stream, this ensures 3129 * poll_wait() gets called with a wait queue that will be woken for new stream 3130 * data. 3131 * 3132 * Note: Implementation deferred to i915_perf_poll_locked() 3133 * 3134 * Returns: any poll events that are ready without sleeping 3135 */ 3136 static __poll_t i915_perf_poll(struct file *file, poll_table *wait) 3137 { 3138 struct i915_perf_stream *stream = file->private_data; 3139 struct i915_perf *perf = stream->perf; 3140 __poll_t ret; 3141 3142 mutex_lock(&perf->lock); 3143 ret = i915_perf_poll_locked(stream, file, wait); 3144 mutex_unlock(&perf->lock); 3145 3146 return ret; 3147 } 3148 3149 /** 3150 * i915_perf_enable_locked - handle `I915_PERF_IOCTL_ENABLE` ioctl 3151 * @stream: A disabled i915 perf stream 3152 * 3153 * [Re]enables the associated capture of data for this stream. 3154 * 3155 * If a stream was previously enabled then there's currently no intention 3156 * to provide userspace any guarantee about the preservation of previously 3157 * buffered data. 3158 */ 3159 static void i915_perf_enable_locked(struct i915_perf_stream *stream) 3160 { 3161 if (stream->enabled) 3162 return; 3163 3164 /* Allow stream->ops->enable() to refer to this */ 3165 stream->enabled = true; 3166 3167 if (stream->ops->enable) 3168 stream->ops->enable(stream); 3169 3170 if (stream->hold_preemption) 3171 intel_context_set_nopreempt(stream->pinned_ctx); 3172 } 3173 3174 /** 3175 * i915_perf_disable_locked - handle `I915_PERF_IOCTL_DISABLE` ioctl 3176 * @stream: An enabled i915 perf stream 3177 * 3178 * Disables the associated capture of data for this stream. 3179 * 3180 * The intention is that disabling an re-enabling a stream will ideally be 3181 * cheaper than destroying and re-opening a stream with the same configuration, 3182 * though there are no formal guarantees about what state or buffered data 3183 * must be retained between disabling and re-enabling a stream. 3184 * 3185 * Note: while a stream is disabled it's considered an error for userspace 3186 * to attempt to read from the stream (-EIO). 3187 */ 3188 static void i915_perf_disable_locked(struct i915_perf_stream *stream) 3189 { 3190 if (!stream->enabled) 3191 return; 3192 3193 /* Allow stream->ops->disable() to refer to this */ 3194 stream->enabled = false; 3195 3196 if (stream->hold_preemption) 3197 intel_context_clear_nopreempt(stream->pinned_ctx); 3198 3199 if (stream->ops->disable) 3200 stream->ops->disable(stream); 3201 } 3202 3203 static long i915_perf_config_locked(struct i915_perf_stream *stream, 3204 unsigned long metrics_set) 3205 { 3206 struct i915_oa_config *config; 3207 long ret = stream->oa_config->id; 3208 3209 config = i915_perf_get_oa_config(stream->perf, metrics_set); 3210 if (!config) 3211 return -EINVAL; 3212 3213 if (config != stream->oa_config) { 3214 int err; 3215 3216 /* 3217 * If OA is bound to a specific context, emit the 3218 * reconfiguration inline from that context. The update 3219 * will then be ordered with respect to submission on that 3220 * context. 3221 * 3222 * When set globally, we use a low priority kernel context, 3223 * so it will effectively take effect when idle. 3224 */ 3225 err = emit_oa_config(stream, config, oa_context(stream), NULL); 3226 if (!err) 3227 config = xchg(&stream->oa_config, config); 3228 else 3229 ret = err; 3230 } 3231 3232 i915_oa_config_put(config); 3233 3234 return ret; 3235 } 3236 3237 /** 3238 * i915_perf_ioctl_locked - support ioctl() usage with i915 perf stream FDs 3239 * @stream: An i915 perf stream 3240 * @cmd: the ioctl request 3241 * @arg: the ioctl data 3242 * 3243 * Note: The &perf->lock mutex has been taken to serialize 3244 * with any non-file-operation driver hooks. 3245 * 3246 * Returns: zero on success or a negative error code. Returns -EINVAL for 3247 * an unknown ioctl request. 3248 */ 3249 static long i915_perf_ioctl_locked(struct i915_perf_stream *stream, 3250 unsigned int cmd, 3251 unsigned long arg) 3252 { 3253 switch (cmd) { 3254 case I915_PERF_IOCTL_ENABLE: 3255 i915_perf_enable_locked(stream); 3256 return 0; 3257 case I915_PERF_IOCTL_DISABLE: 3258 i915_perf_disable_locked(stream); 3259 return 0; 3260 case I915_PERF_IOCTL_CONFIG: 3261 return i915_perf_config_locked(stream, arg); 3262 } 3263 3264 return -EINVAL; 3265 } 3266 3267 /** 3268 * i915_perf_ioctl - support ioctl() usage with i915 perf stream FDs 3269 * @file: An i915 perf stream file 3270 * @cmd: the ioctl request 3271 * @arg: the ioctl data 3272 * 3273 * Implementation deferred to i915_perf_ioctl_locked(). 3274 * 3275 * Returns: zero on success or a negative error code. Returns -EINVAL for 3276 * an unknown ioctl request. 3277 */ 3278 static long i915_perf_ioctl(struct file *file, 3279 unsigned int cmd, 3280 unsigned long arg) 3281 { 3282 struct i915_perf_stream *stream = file->private_data; 3283 struct i915_perf *perf = stream->perf; 3284 long ret; 3285 3286 mutex_lock(&perf->lock); 3287 ret = i915_perf_ioctl_locked(stream, cmd, arg); 3288 mutex_unlock(&perf->lock); 3289 3290 return ret; 3291 } 3292 3293 /** 3294 * i915_perf_destroy_locked - destroy an i915 perf stream 3295 * @stream: An i915 perf stream 3296 * 3297 * Frees all resources associated with the given i915 perf @stream, disabling 3298 * any associated data capture in the process. 3299 * 3300 * Note: The &perf->lock mutex has been taken to serialize 3301 * with any non-file-operation driver hooks. 3302 */ 3303 static void i915_perf_destroy_locked(struct i915_perf_stream *stream) 3304 { 3305 if (stream->enabled) 3306 i915_perf_disable_locked(stream); 3307 3308 if (stream->ops->destroy) 3309 stream->ops->destroy(stream); 3310 3311 if (stream->ctx) 3312 i915_gem_context_put(stream->ctx); 3313 3314 kfree(stream); 3315 } 3316 3317 /** 3318 * i915_perf_release - handles userspace close() of a stream file 3319 * @inode: anonymous inode associated with file 3320 * @file: An i915 perf stream file 3321 * 3322 * Cleans up any resources associated with an open i915 perf stream file. 3323 * 3324 * NB: close() can't really fail from the userspace point of view. 3325 * 3326 * Returns: zero on success or a negative error code. 3327 */ 3328 static int i915_perf_release(struct inode *inode, struct file *file) 3329 { 3330 struct i915_perf_stream *stream = file->private_data; 3331 struct i915_perf *perf = stream->perf; 3332 3333 mutex_lock(&perf->lock); 3334 i915_perf_destroy_locked(stream); 3335 mutex_unlock(&perf->lock); 3336 3337 /* Release the reference the perf stream kept on the driver. */ 3338 drm_dev_put(&perf->i915->drm); 3339 3340 return 0; 3341 } 3342 3343 3344 static const struct file_operations fops = { 3345 .owner = THIS_MODULE, 3346 .llseek = no_llseek, 3347 .release = i915_perf_release, 3348 .poll = i915_perf_poll, 3349 .read = i915_perf_read, 3350 .unlocked_ioctl = i915_perf_ioctl, 3351 /* Our ioctl have no arguments, so it's safe to use the same function 3352 * to handle 32bits compatibility. 3353 */ 3354 .compat_ioctl = i915_perf_ioctl, 3355 }; 3356 3357 3358 /** 3359 * i915_perf_open_ioctl_locked - DRM ioctl() for userspace to open a stream FD 3360 * @perf: i915 perf instance 3361 * @param: The open parameters passed to 'DRM_I915_PERF_OPEN` 3362 * @props: individually validated u64 property value pairs 3363 * @file: drm file 3364 * 3365 * See i915_perf_ioctl_open() for interface details. 3366 * 3367 * Implements further stream config validation and stream initialization on 3368 * behalf of i915_perf_open_ioctl() with the &perf->lock mutex 3369 * taken to serialize with any non-file-operation driver hooks. 3370 * 3371 * Note: at this point the @props have only been validated in isolation and 3372 * it's still necessary to validate that the combination of properties makes 3373 * sense. 3374 * 3375 * In the case where userspace is interested in OA unit metrics then further 3376 * config validation and stream initialization details will be handled by 3377 * i915_oa_stream_init(). The code here should only validate config state that 3378 * will be relevant to all stream types / backends. 3379 * 3380 * Returns: zero on success or a negative error code. 3381 */ 3382 static int 3383 i915_perf_open_ioctl_locked(struct i915_perf *perf, 3384 struct drm_i915_perf_open_param *param, 3385 struct perf_open_properties *props, 3386 struct drm_file *file) 3387 { 3388 struct i915_gem_context *specific_ctx = NULL; 3389 struct i915_perf_stream *stream = NULL; 3390 unsigned long f_flags = 0; 3391 bool privileged_op = true; 3392 int stream_fd; 3393 int ret; 3394 3395 if (props->single_context) { 3396 u32 ctx_handle = props->ctx_handle; 3397 struct drm_i915_file_private *file_priv = file->driver_priv; 3398 3399 specific_ctx = i915_gem_context_lookup(file_priv, ctx_handle); 3400 if (!specific_ctx) { 3401 DRM_DEBUG("Failed to look up context with ID %u for opening perf stream\n", 3402 ctx_handle); 3403 ret = -ENOENT; 3404 goto err; 3405 } 3406 } 3407 3408 /* 3409 * On Haswell the OA unit supports clock gating off for a specific 3410 * context and in this mode there's no visibility of metrics for the 3411 * rest of the system, which we consider acceptable for a 3412 * non-privileged client. 3413 * 3414 * For Gen8->11 the OA unit no longer supports clock gating off for a 3415 * specific context and the kernel can't securely stop the counters 3416 * from updating as system-wide / global values. Even though we can 3417 * filter reports based on the included context ID we can't block 3418 * clients from seeing the raw / global counter values via 3419 * MI_REPORT_PERF_COUNT commands and so consider it a privileged op to 3420 * enable the OA unit by default. 3421 * 3422 * For Gen12+ we gain a new OAR unit that only monitors the RCS on a 3423 * per context basis. So we can relax requirements there if the user 3424 * doesn't request global stream access (i.e. query based sampling 3425 * using MI_RECORD_PERF_COUNT. 3426 */ 3427 if (IS_HASWELL(perf->i915) && specific_ctx) 3428 privileged_op = false; 3429 else if (IS_GEN(perf->i915, 12) && specific_ctx && 3430 (props->sample_flags & SAMPLE_OA_REPORT) == 0) 3431 privileged_op = false; 3432 3433 if (props->hold_preemption) { 3434 if (!props->single_context) { 3435 DRM_DEBUG("preemption disable with no context\n"); 3436 ret = -EINVAL; 3437 goto err; 3438 } 3439 privileged_op = true; 3440 } 3441 3442 /* 3443 * Asking for SSEU configuration is a priviliged operation. 3444 */ 3445 if (props->has_sseu) 3446 privileged_op = true; 3447 else 3448 get_default_sseu_config(&props->sseu, props->engine); 3449 3450 /* Similar to perf's kernel.perf_paranoid_cpu sysctl option 3451 * we check a dev.i915.perf_stream_paranoid sysctl option 3452 * to determine if it's ok to access system wide OA counters 3453 * without CAP_PERFMON or CAP_SYS_ADMIN privileges. 3454 */ 3455 if (privileged_op && 3456 i915_perf_stream_paranoid && !perfmon_capable()) { 3457 DRM_DEBUG("Insufficient privileges to open i915 perf stream\n"); 3458 ret = -EACCES; 3459 goto err_ctx; 3460 } 3461 3462 stream = kzalloc(sizeof(*stream), GFP_KERNEL); 3463 if (!stream) { 3464 ret = -ENOMEM; 3465 goto err_ctx; 3466 } 3467 3468 stream->perf = perf; 3469 stream->ctx = specific_ctx; 3470 stream->poll_oa_period = props->poll_oa_period; 3471 3472 ret = i915_oa_stream_init(stream, param, props); 3473 if (ret) 3474 goto err_alloc; 3475 3476 /* we avoid simply assigning stream->sample_flags = props->sample_flags 3477 * to have _stream_init check the combination of sample flags more 3478 * thoroughly, but still this is the expected result at this point. 3479 */ 3480 if (WARN_ON(stream->sample_flags != props->sample_flags)) { 3481 ret = -ENODEV; 3482 goto err_flags; 3483 } 3484 3485 if (param->flags & I915_PERF_FLAG_FD_CLOEXEC) 3486 f_flags |= O_CLOEXEC; 3487 if (param->flags & I915_PERF_FLAG_FD_NONBLOCK) 3488 f_flags |= O_NONBLOCK; 3489 3490 stream_fd = anon_inode_getfd("[i915_perf]", &fops, stream, f_flags); 3491 if (stream_fd < 0) { 3492 ret = stream_fd; 3493 goto err_flags; 3494 } 3495 3496 if (!(param->flags & I915_PERF_FLAG_DISABLED)) 3497 i915_perf_enable_locked(stream); 3498 3499 /* Take a reference on the driver that will be kept with stream_fd 3500 * until its release. 3501 */ 3502 drm_dev_get(&perf->i915->drm); 3503 3504 return stream_fd; 3505 3506 err_flags: 3507 if (stream->ops->destroy) 3508 stream->ops->destroy(stream); 3509 err_alloc: 3510 kfree(stream); 3511 err_ctx: 3512 if (specific_ctx) 3513 i915_gem_context_put(specific_ctx); 3514 err: 3515 return ret; 3516 } 3517 3518 static u64 oa_exponent_to_ns(struct i915_perf *perf, int exponent) 3519 { 3520 return intel_gt_clock_interval_to_ns(perf->i915->ggtt.vm.gt, 3521 2ULL << exponent); 3522 } 3523 3524 /** 3525 * read_properties_unlocked - validate + copy userspace stream open properties 3526 * @perf: i915 perf instance 3527 * @uprops: The array of u64 key value pairs given by userspace 3528 * @n_props: The number of key value pairs expected in @uprops 3529 * @props: The stream configuration built up while validating properties 3530 * 3531 * Note this function only validates properties in isolation it doesn't 3532 * validate that the combination of properties makes sense or that all 3533 * properties necessary for a particular kind of stream have been set. 3534 * 3535 * Note that there currently aren't any ordering requirements for properties so 3536 * we shouldn't validate or assume anything about ordering here. This doesn't 3537 * rule out defining new properties with ordering requirements in the future. 3538 */ 3539 static int read_properties_unlocked(struct i915_perf *perf, 3540 u64 __user *uprops, 3541 u32 n_props, 3542 struct perf_open_properties *props) 3543 { 3544 u64 __user *uprop = uprops; 3545 u32 i; 3546 int ret; 3547 3548 memset(props, 0, sizeof(struct perf_open_properties)); 3549 props->poll_oa_period = DEFAULT_POLL_PERIOD_NS; 3550 3551 if (!n_props) { 3552 DRM_DEBUG("No i915 perf properties given\n"); 3553 return -EINVAL; 3554 } 3555 3556 /* At the moment we only support using i915-perf on the RCS. */ 3557 props->engine = intel_engine_lookup_user(perf->i915, 3558 I915_ENGINE_CLASS_RENDER, 3559 0); 3560 if (!props->engine) { 3561 DRM_DEBUG("No RENDER-capable engines\n"); 3562 return -EINVAL; 3563 } 3564 3565 /* Considering that ID = 0 is reserved and assuming that we don't 3566 * (currently) expect any configurations to ever specify duplicate 3567 * values for a particular property ID then the last _PROP_MAX value is 3568 * one greater than the maximum number of properties we expect to get 3569 * from userspace. 3570 */ 3571 if (n_props >= DRM_I915_PERF_PROP_MAX) { 3572 DRM_DEBUG("More i915 perf properties specified than exist\n"); 3573 return -EINVAL; 3574 } 3575 3576 for (i = 0; i < n_props; i++) { 3577 u64 oa_period, oa_freq_hz; 3578 u64 id, value; 3579 3580 ret = get_user(id, uprop); 3581 if (ret) 3582 return ret; 3583 3584 ret = get_user(value, uprop + 1); 3585 if (ret) 3586 return ret; 3587 3588 if (id == 0 || id >= DRM_I915_PERF_PROP_MAX) { 3589 DRM_DEBUG("Unknown i915 perf property ID\n"); 3590 return -EINVAL; 3591 } 3592 3593 switch ((enum drm_i915_perf_property_id)id) { 3594 case DRM_I915_PERF_PROP_CTX_HANDLE: 3595 props->single_context = 1; 3596 props->ctx_handle = value; 3597 break; 3598 case DRM_I915_PERF_PROP_SAMPLE_OA: 3599 if (value) 3600 props->sample_flags |= SAMPLE_OA_REPORT; 3601 break; 3602 case DRM_I915_PERF_PROP_OA_METRICS_SET: 3603 if (value == 0) { 3604 DRM_DEBUG("Unknown OA metric set ID\n"); 3605 return -EINVAL; 3606 } 3607 props->metrics_set = value; 3608 break; 3609 case DRM_I915_PERF_PROP_OA_FORMAT: 3610 if (value == 0 || value >= I915_OA_FORMAT_MAX) { 3611 DRM_DEBUG("Out-of-range OA report format %llu\n", 3612 value); 3613 return -EINVAL; 3614 } 3615 if (!perf->oa_formats[value].size) { 3616 DRM_DEBUG("Unsupported OA report format %llu\n", 3617 value); 3618 return -EINVAL; 3619 } 3620 props->oa_format = value; 3621 break; 3622 case DRM_I915_PERF_PROP_OA_EXPONENT: 3623 if (value > OA_EXPONENT_MAX) { 3624 DRM_DEBUG("OA timer exponent too high (> %u)\n", 3625 OA_EXPONENT_MAX); 3626 return -EINVAL; 3627 } 3628 3629 /* Theoretically we can program the OA unit to sample 3630 * e.g. every 160ns for HSW, 167ns for BDW/SKL or 104ns 3631 * for BXT. We don't allow such high sampling 3632 * frequencies by default unless root. 3633 */ 3634 3635 BUILD_BUG_ON(sizeof(oa_period) != 8); 3636 oa_period = oa_exponent_to_ns(perf, value); 3637 3638 /* This check is primarily to ensure that oa_period <= 3639 * UINT32_MAX (before passing to do_div which only 3640 * accepts a u32 denominator), but we can also skip 3641 * checking anything < 1Hz which implicitly can't be 3642 * limited via an integer oa_max_sample_rate. 3643 */ 3644 if (oa_period <= NSEC_PER_SEC) { 3645 u64 tmp = NSEC_PER_SEC; 3646 do_div(tmp, oa_period); 3647 oa_freq_hz = tmp; 3648 } else 3649 oa_freq_hz = 0; 3650 3651 if (oa_freq_hz > i915_oa_max_sample_rate && !perfmon_capable()) { 3652 DRM_DEBUG("OA exponent would exceed the max sampling frequency (sysctl dev.i915.oa_max_sample_rate) %uHz without CAP_PERFMON or CAP_SYS_ADMIN privileges\n", 3653 i915_oa_max_sample_rate); 3654 return -EACCES; 3655 } 3656 3657 props->oa_periodic = true; 3658 props->oa_period_exponent = value; 3659 break; 3660 case DRM_I915_PERF_PROP_HOLD_PREEMPTION: 3661 props->hold_preemption = !!value; 3662 break; 3663 case DRM_I915_PERF_PROP_GLOBAL_SSEU: { 3664 struct drm_i915_gem_context_param_sseu user_sseu; 3665 3666 if (copy_from_user(&user_sseu, 3667 u64_to_user_ptr(value), 3668 sizeof(user_sseu))) { 3669 DRM_DEBUG("Unable to copy global sseu parameter\n"); 3670 return -EFAULT; 3671 } 3672 3673 ret = get_sseu_config(&props->sseu, props->engine, &user_sseu); 3674 if (ret) { 3675 DRM_DEBUG("Invalid SSEU configuration\n"); 3676 return ret; 3677 } 3678 props->has_sseu = true; 3679 break; 3680 } 3681 case DRM_I915_PERF_PROP_POLL_OA_PERIOD: 3682 if (value < 100000 /* 100us */) { 3683 DRM_DEBUG("OA availability timer too small (%lluns < 100us)\n", 3684 value); 3685 return -EINVAL; 3686 } 3687 props->poll_oa_period = value; 3688 break; 3689 case DRM_I915_PERF_PROP_MAX: 3690 MISSING_CASE(id); 3691 return -EINVAL; 3692 } 3693 3694 uprop += 2; 3695 } 3696 3697 return 0; 3698 } 3699 3700 /** 3701 * i915_perf_open_ioctl - DRM ioctl() for userspace to open a stream FD 3702 * @dev: drm device 3703 * @data: ioctl data copied from userspace (unvalidated) 3704 * @file: drm file 3705 * 3706 * Validates the stream open parameters given by userspace including flags 3707 * and an array of u64 key, value pair properties. 3708 * 3709 * Very little is assumed up front about the nature of the stream being 3710 * opened (for instance we don't assume it's for periodic OA unit metrics). An 3711 * i915-perf stream is expected to be a suitable interface for other forms of 3712 * buffered data written by the GPU besides periodic OA metrics. 3713 * 3714 * Note we copy the properties from userspace outside of the i915 perf 3715 * mutex to avoid an awkward lockdep with mmap_lock. 3716 * 3717 * Most of the implementation details are handled by 3718 * i915_perf_open_ioctl_locked() after taking the &perf->lock 3719 * mutex for serializing with any non-file-operation driver hooks. 3720 * 3721 * Return: A newly opened i915 Perf stream file descriptor or negative 3722 * error code on failure. 3723 */ 3724 int i915_perf_open_ioctl(struct drm_device *dev, void *data, 3725 struct drm_file *file) 3726 { 3727 struct i915_perf *perf = &to_i915(dev)->perf; 3728 struct drm_i915_perf_open_param *param = data; 3729 struct perf_open_properties props; 3730 u32 known_open_flags; 3731 int ret; 3732 3733 if (!perf->i915) { 3734 DRM_DEBUG("i915 perf interface not available for this system\n"); 3735 return -ENOTSUPP; 3736 } 3737 3738 known_open_flags = I915_PERF_FLAG_FD_CLOEXEC | 3739 I915_PERF_FLAG_FD_NONBLOCK | 3740 I915_PERF_FLAG_DISABLED; 3741 if (param->flags & ~known_open_flags) { 3742 DRM_DEBUG("Unknown drm_i915_perf_open_param flag\n"); 3743 return -EINVAL; 3744 } 3745 3746 ret = read_properties_unlocked(perf, 3747 u64_to_user_ptr(param->properties_ptr), 3748 param->num_properties, 3749 &props); 3750 if (ret) 3751 return ret; 3752 3753 mutex_lock(&perf->lock); 3754 ret = i915_perf_open_ioctl_locked(perf, param, &props, file); 3755 mutex_unlock(&perf->lock); 3756 3757 return ret; 3758 } 3759 3760 /** 3761 * i915_perf_register - exposes i915-perf to userspace 3762 * @i915: i915 device instance 3763 * 3764 * In particular OA metric sets are advertised under a sysfs metrics/ 3765 * directory allowing userspace to enumerate valid IDs that can be 3766 * used to open an i915-perf stream. 3767 */ 3768 void i915_perf_register(struct drm_i915_private *i915) 3769 { 3770 struct i915_perf *perf = &i915->perf; 3771 3772 if (!perf->i915) 3773 return; 3774 3775 /* To be sure we're synchronized with an attempted 3776 * i915_perf_open_ioctl(); considering that we register after 3777 * being exposed to userspace. 3778 */ 3779 mutex_lock(&perf->lock); 3780 3781 perf->metrics_kobj = 3782 kobject_create_and_add("metrics", 3783 &i915->drm.primary->kdev->kobj); 3784 3785 mutex_unlock(&perf->lock); 3786 } 3787 3788 /** 3789 * i915_perf_unregister - hide i915-perf from userspace 3790 * @i915: i915 device instance 3791 * 3792 * i915-perf state cleanup is split up into an 'unregister' and 3793 * 'deinit' phase where the interface is first hidden from 3794 * userspace by i915_perf_unregister() before cleaning up 3795 * remaining state in i915_perf_fini(). 3796 */ 3797 void i915_perf_unregister(struct drm_i915_private *i915) 3798 { 3799 struct i915_perf *perf = &i915->perf; 3800 3801 if (!perf->metrics_kobj) 3802 return; 3803 3804 kobject_put(perf->metrics_kobj); 3805 perf->metrics_kobj = NULL; 3806 } 3807 3808 static bool gen8_is_valid_flex_addr(struct i915_perf *perf, u32 addr) 3809 { 3810 static const i915_reg_t flex_eu_regs[] = { 3811 EU_PERF_CNTL0, 3812 EU_PERF_CNTL1, 3813 EU_PERF_CNTL2, 3814 EU_PERF_CNTL3, 3815 EU_PERF_CNTL4, 3816 EU_PERF_CNTL5, 3817 EU_PERF_CNTL6, 3818 }; 3819 int i; 3820 3821 for (i = 0; i < ARRAY_SIZE(flex_eu_regs); i++) { 3822 if (i915_mmio_reg_offset(flex_eu_regs[i]) == addr) 3823 return true; 3824 } 3825 return false; 3826 } 3827 3828 #define ADDR_IN_RANGE(addr, start, end) \ 3829 ((addr) >= (start) && \ 3830 (addr) <= (end)) 3831 3832 #define REG_IN_RANGE(addr, start, end) \ 3833 ((addr) >= i915_mmio_reg_offset(start) && \ 3834 (addr) <= i915_mmio_reg_offset(end)) 3835 3836 #define REG_EQUAL(addr, mmio) \ 3837 ((addr) == i915_mmio_reg_offset(mmio)) 3838 3839 static bool gen7_is_valid_b_counter_addr(struct i915_perf *perf, u32 addr) 3840 { 3841 return REG_IN_RANGE(addr, OASTARTTRIG1, OASTARTTRIG8) || 3842 REG_IN_RANGE(addr, OAREPORTTRIG1, OAREPORTTRIG8) || 3843 REG_IN_RANGE(addr, OACEC0_0, OACEC7_1); 3844 } 3845 3846 static bool gen7_is_valid_mux_addr(struct i915_perf *perf, u32 addr) 3847 { 3848 return REG_EQUAL(addr, HALF_SLICE_CHICKEN2) || 3849 REG_IN_RANGE(addr, MICRO_BP0_0, NOA_WRITE) || 3850 REG_IN_RANGE(addr, OA_PERFCNT1_LO, OA_PERFCNT2_HI) || 3851 REG_IN_RANGE(addr, OA_PERFMATRIX_LO, OA_PERFMATRIX_HI); 3852 } 3853 3854 static bool gen8_is_valid_mux_addr(struct i915_perf *perf, u32 addr) 3855 { 3856 return gen7_is_valid_mux_addr(perf, addr) || 3857 REG_EQUAL(addr, WAIT_FOR_RC6_EXIT) || 3858 REG_IN_RANGE(addr, RPM_CONFIG0, NOA_CONFIG(8)); 3859 } 3860 3861 static bool gen10_is_valid_mux_addr(struct i915_perf *perf, u32 addr) 3862 { 3863 return gen8_is_valid_mux_addr(perf, addr) || 3864 REG_EQUAL(addr, GEN10_NOA_WRITE_HIGH) || 3865 REG_IN_RANGE(addr, OA_PERFCNT3_LO, OA_PERFCNT4_HI); 3866 } 3867 3868 static bool hsw_is_valid_mux_addr(struct i915_perf *perf, u32 addr) 3869 { 3870 return gen7_is_valid_mux_addr(perf, addr) || 3871 ADDR_IN_RANGE(addr, 0x25100, 0x2FF90) || 3872 REG_IN_RANGE(addr, HSW_MBVID2_NOA0, HSW_MBVID2_NOA9) || 3873 REG_EQUAL(addr, HSW_MBVID2_MISR0); 3874 } 3875 3876 static bool chv_is_valid_mux_addr(struct i915_perf *perf, u32 addr) 3877 { 3878 return gen7_is_valid_mux_addr(perf, addr) || 3879 ADDR_IN_RANGE(addr, 0x182300, 0x1823A4); 3880 } 3881 3882 static bool gen12_is_valid_b_counter_addr(struct i915_perf *perf, u32 addr) 3883 { 3884 return REG_IN_RANGE(addr, GEN12_OAG_OASTARTTRIG1, GEN12_OAG_OASTARTTRIG8) || 3885 REG_IN_RANGE(addr, GEN12_OAG_OAREPORTTRIG1, GEN12_OAG_OAREPORTTRIG8) || 3886 REG_IN_RANGE(addr, GEN12_OAG_CEC0_0, GEN12_OAG_CEC7_1) || 3887 REG_IN_RANGE(addr, GEN12_OAG_SCEC0_0, GEN12_OAG_SCEC7_1) || 3888 REG_EQUAL(addr, GEN12_OAA_DBG_REG) || 3889 REG_EQUAL(addr, GEN12_OAG_OA_PESS) || 3890 REG_EQUAL(addr, GEN12_OAG_SPCTR_CNF); 3891 } 3892 3893 static bool gen12_is_valid_mux_addr(struct i915_perf *perf, u32 addr) 3894 { 3895 return REG_EQUAL(addr, NOA_WRITE) || 3896 REG_EQUAL(addr, GEN10_NOA_WRITE_HIGH) || 3897 REG_EQUAL(addr, GDT_CHICKEN_BITS) || 3898 REG_EQUAL(addr, WAIT_FOR_RC6_EXIT) || 3899 REG_EQUAL(addr, RPM_CONFIG0) || 3900 REG_EQUAL(addr, RPM_CONFIG1) || 3901 REG_IN_RANGE(addr, NOA_CONFIG(0), NOA_CONFIG(8)); 3902 } 3903 3904 static u32 mask_reg_value(u32 reg, u32 val) 3905 { 3906 /* HALF_SLICE_CHICKEN2 is programmed with a the 3907 * WaDisableSTUnitPowerOptimization workaround. Make sure the value 3908 * programmed by userspace doesn't change this. 3909 */ 3910 if (REG_EQUAL(reg, HALF_SLICE_CHICKEN2)) 3911 val = val & ~_MASKED_BIT_ENABLE(GEN8_ST_PO_DISABLE); 3912 3913 /* WAIT_FOR_RC6_EXIT has only one bit fullfilling the function 3914 * indicated by its name and a bunch of selection fields used by OA 3915 * configs. 3916 */ 3917 if (REG_EQUAL(reg, WAIT_FOR_RC6_EXIT)) 3918 val = val & ~_MASKED_BIT_ENABLE(HSW_WAIT_FOR_RC6_EXIT_ENABLE); 3919 3920 return val; 3921 } 3922 3923 static struct i915_oa_reg *alloc_oa_regs(struct i915_perf *perf, 3924 bool (*is_valid)(struct i915_perf *perf, u32 addr), 3925 u32 __user *regs, 3926 u32 n_regs) 3927 { 3928 struct i915_oa_reg *oa_regs; 3929 int err; 3930 u32 i; 3931 3932 if (!n_regs) 3933 return NULL; 3934 3935 /* No is_valid function means we're not allowing any register to be programmed. */ 3936 GEM_BUG_ON(!is_valid); 3937 if (!is_valid) 3938 return ERR_PTR(-EINVAL); 3939 3940 oa_regs = kmalloc_array(n_regs, sizeof(*oa_regs), GFP_KERNEL); 3941 if (!oa_regs) 3942 return ERR_PTR(-ENOMEM); 3943 3944 for (i = 0; i < n_regs; i++) { 3945 u32 addr, value; 3946 3947 err = get_user(addr, regs); 3948 if (err) 3949 goto addr_err; 3950 3951 if (!is_valid(perf, addr)) { 3952 DRM_DEBUG("Invalid oa_reg address: %X\n", addr); 3953 err = -EINVAL; 3954 goto addr_err; 3955 } 3956 3957 err = get_user(value, regs + 1); 3958 if (err) 3959 goto addr_err; 3960 3961 oa_regs[i].addr = _MMIO(addr); 3962 oa_regs[i].value = mask_reg_value(addr, value); 3963 3964 regs += 2; 3965 } 3966 3967 return oa_regs; 3968 3969 addr_err: 3970 kfree(oa_regs); 3971 return ERR_PTR(err); 3972 } 3973 3974 static ssize_t show_dynamic_id(struct device *dev, 3975 struct device_attribute *attr, 3976 char *buf) 3977 { 3978 struct i915_oa_config *oa_config = 3979 container_of(attr, typeof(*oa_config), sysfs_metric_id); 3980 3981 return sprintf(buf, "%d\n", oa_config->id); 3982 } 3983 3984 static int create_dynamic_oa_sysfs_entry(struct i915_perf *perf, 3985 struct i915_oa_config *oa_config) 3986 { 3987 sysfs_attr_init(&oa_config->sysfs_metric_id.attr); 3988 oa_config->sysfs_metric_id.attr.name = "id"; 3989 oa_config->sysfs_metric_id.attr.mode = S_IRUGO; 3990 oa_config->sysfs_metric_id.show = show_dynamic_id; 3991 oa_config->sysfs_metric_id.store = NULL; 3992 3993 oa_config->attrs[0] = &oa_config->sysfs_metric_id.attr; 3994 oa_config->attrs[1] = NULL; 3995 3996 oa_config->sysfs_metric.name = oa_config->uuid; 3997 oa_config->sysfs_metric.attrs = oa_config->attrs; 3998 3999 return sysfs_create_group(perf->metrics_kobj, 4000 &oa_config->sysfs_metric); 4001 } 4002 4003 /** 4004 * i915_perf_add_config_ioctl - DRM ioctl() for userspace to add a new OA config 4005 * @dev: drm device 4006 * @data: ioctl data (pointer to struct drm_i915_perf_oa_config) copied from 4007 * userspace (unvalidated) 4008 * @file: drm file 4009 * 4010 * Validates the submitted OA register to be saved into a new OA config that 4011 * can then be used for programming the OA unit and its NOA network. 4012 * 4013 * Returns: A new allocated config number to be used with the perf open ioctl 4014 * or a negative error code on failure. 4015 */ 4016 int i915_perf_add_config_ioctl(struct drm_device *dev, void *data, 4017 struct drm_file *file) 4018 { 4019 struct i915_perf *perf = &to_i915(dev)->perf; 4020 struct drm_i915_perf_oa_config *args = data; 4021 struct i915_oa_config *oa_config, *tmp; 4022 struct i915_oa_reg *regs; 4023 int err, id; 4024 4025 if (!perf->i915) { 4026 DRM_DEBUG("i915 perf interface not available for this system\n"); 4027 return -ENOTSUPP; 4028 } 4029 4030 if (!perf->metrics_kobj) { 4031 DRM_DEBUG("OA metrics weren't advertised via sysfs\n"); 4032 return -EINVAL; 4033 } 4034 4035 if (i915_perf_stream_paranoid && !perfmon_capable()) { 4036 DRM_DEBUG("Insufficient privileges to add i915 OA config\n"); 4037 return -EACCES; 4038 } 4039 4040 if ((!args->mux_regs_ptr || !args->n_mux_regs) && 4041 (!args->boolean_regs_ptr || !args->n_boolean_regs) && 4042 (!args->flex_regs_ptr || !args->n_flex_regs)) { 4043 DRM_DEBUG("No OA registers given\n"); 4044 return -EINVAL; 4045 } 4046 4047 oa_config = kzalloc(sizeof(*oa_config), GFP_KERNEL); 4048 if (!oa_config) { 4049 DRM_DEBUG("Failed to allocate memory for the OA config\n"); 4050 return -ENOMEM; 4051 } 4052 4053 oa_config->perf = perf; 4054 kref_init(&oa_config->ref); 4055 4056 if (!uuid_is_valid(args->uuid)) { 4057 DRM_DEBUG("Invalid uuid format for OA config\n"); 4058 err = -EINVAL; 4059 goto reg_err; 4060 } 4061 4062 /* Last character in oa_config->uuid will be 0 because oa_config is 4063 * kzalloc. 4064 */ 4065 memcpy(oa_config->uuid, args->uuid, sizeof(args->uuid)); 4066 4067 oa_config->mux_regs_len = args->n_mux_regs; 4068 regs = alloc_oa_regs(perf, 4069 perf->ops.is_valid_mux_reg, 4070 u64_to_user_ptr(args->mux_regs_ptr), 4071 args->n_mux_regs); 4072 4073 if (IS_ERR(regs)) { 4074 DRM_DEBUG("Failed to create OA config for mux_regs\n"); 4075 err = PTR_ERR(regs); 4076 goto reg_err; 4077 } 4078 oa_config->mux_regs = regs; 4079 4080 oa_config->b_counter_regs_len = args->n_boolean_regs; 4081 regs = alloc_oa_regs(perf, 4082 perf->ops.is_valid_b_counter_reg, 4083 u64_to_user_ptr(args->boolean_regs_ptr), 4084 args->n_boolean_regs); 4085 4086 if (IS_ERR(regs)) { 4087 DRM_DEBUG("Failed to create OA config for b_counter_regs\n"); 4088 err = PTR_ERR(regs); 4089 goto reg_err; 4090 } 4091 oa_config->b_counter_regs = regs; 4092 4093 if (INTEL_GEN(perf->i915) < 8) { 4094 if (args->n_flex_regs != 0) { 4095 err = -EINVAL; 4096 goto reg_err; 4097 } 4098 } else { 4099 oa_config->flex_regs_len = args->n_flex_regs; 4100 regs = alloc_oa_regs(perf, 4101 perf->ops.is_valid_flex_reg, 4102 u64_to_user_ptr(args->flex_regs_ptr), 4103 args->n_flex_regs); 4104 4105 if (IS_ERR(regs)) { 4106 DRM_DEBUG("Failed to create OA config for flex_regs\n"); 4107 err = PTR_ERR(regs); 4108 goto reg_err; 4109 } 4110 oa_config->flex_regs = regs; 4111 } 4112 4113 err = mutex_lock_interruptible(&perf->metrics_lock); 4114 if (err) 4115 goto reg_err; 4116 4117 /* We shouldn't have too many configs, so this iteration shouldn't be 4118 * too costly. 4119 */ 4120 idr_for_each_entry(&perf->metrics_idr, tmp, id) { 4121 if (!strcmp(tmp->uuid, oa_config->uuid)) { 4122 DRM_DEBUG("OA config already exists with this uuid\n"); 4123 err = -EADDRINUSE; 4124 goto sysfs_err; 4125 } 4126 } 4127 4128 err = create_dynamic_oa_sysfs_entry(perf, oa_config); 4129 if (err) { 4130 DRM_DEBUG("Failed to create sysfs entry for OA config\n"); 4131 goto sysfs_err; 4132 } 4133 4134 /* Config id 0 is invalid, id 1 for kernel stored test config. */ 4135 oa_config->id = idr_alloc(&perf->metrics_idr, 4136 oa_config, 2, 4137 0, GFP_KERNEL); 4138 if (oa_config->id < 0) { 4139 DRM_DEBUG("Failed to create sysfs entry for OA config\n"); 4140 err = oa_config->id; 4141 goto sysfs_err; 4142 } 4143 4144 mutex_unlock(&perf->metrics_lock); 4145 4146 DRM_DEBUG("Added config %s id=%i\n", oa_config->uuid, oa_config->id); 4147 4148 return oa_config->id; 4149 4150 sysfs_err: 4151 mutex_unlock(&perf->metrics_lock); 4152 reg_err: 4153 i915_oa_config_put(oa_config); 4154 DRM_DEBUG("Failed to add new OA config\n"); 4155 return err; 4156 } 4157 4158 /** 4159 * i915_perf_remove_config_ioctl - DRM ioctl() for userspace to remove an OA config 4160 * @dev: drm device 4161 * @data: ioctl data (pointer to u64 integer) copied from userspace 4162 * @file: drm file 4163 * 4164 * Configs can be removed while being used, the will stop appearing in sysfs 4165 * and their content will be freed when the stream using the config is closed. 4166 * 4167 * Returns: 0 on success or a negative error code on failure. 4168 */ 4169 int i915_perf_remove_config_ioctl(struct drm_device *dev, void *data, 4170 struct drm_file *file) 4171 { 4172 struct i915_perf *perf = &to_i915(dev)->perf; 4173 u64 *arg = data; 4174 struct i915_oa_config *oa_config; 4175 int ret; 4176 4177 if (!perf->i915) { 4178 DRM_DEBUG("i915 perf interface not available for this system\n"); 4179 return -ENOTSUPP; 4180 } 4181 4182 if (i915_perf_stream_paranoid && !perfmon_capable()) { 4183 DRM_DEBUG("Insufficient privileges to remove i915 OA config\n"); 4184 return -EACCES; 4185 } 4186 4187 ret = mutex_lock_interruptible(&perf->metrics_lock); 4188 if (ret) 4189 return ret; 4190 4191 oa_config = idr_find(&perf->metrics_idr, *arg); 4192 if (!oa_config) { 4193 DRM_DEBUG("Failed to remove unknown OA config\n"); 4194 ret = -ENOENT; 4195 goto err_unlock; 4196 } 4197 4198 GEM_BUG_ON(*arg != oa_config->id); 4199 4200 sysfs_remove_group(perf->metrics_kobj, &oa_config->sysfs_metric); 4201 4202 idr_remove(&perf->metrics_idr, *arg); 4203 4204 mutex_unlock(&perf->metrics_lock); 4205 4206 DRM_DEBUG("Removed config %s id=%i\n", oa_config->uuid, oa_config->id); 4207 4208 i915_oa_config_put(oa_config); 4209 4210 return 0; 4211 4212 err_unlock: 4213 mutex_unlock(&perf->metrics_lock); 4214 return ret; 4215 } 4216 4217 static struct ctl_table oa_table[] = { 4218 { 4219 .procname = "perf_stream_paranoid", 4220 .data = &i915_perf_stream_paranoid, 4221 .maxlen = sizeof(i915_perf_stream_paranoid), 4222 .mode = 0644, 4223 .proc_handler = proc_dointvec_minmax, 4224 .extra1 = SYSCTL_ZERO, 4225 .extra2 = SYSCTL_ONE, 4226 }, 4227 { 4228 .procname = "oa_max_sample_rate", 4229 .data = &i915_oa_max_sample_rate, 4230 .maxlen = sizeof(i915_oa_max_sample_rate), 4231 .mode = 0644, 4232 .proc_handler = proc_dointvec_minmax, 4233 .extra1 = SYSCTL_ZERO, 4234 .extra2 = &oa_sample_rate_hard_limit, 4235 }, 4236 {} 4237 }; 4238 4239 static struct ctl_table i915_root[] = { 4240 { 4241 .procname = "i915", 4242 .maxlen = 0, 4243 .mode = 0555, 4244 .child = oa_table, 4245 }, 4246 {} 4247 }; 4248 4249 static struct ctl_table dev_root[] = { 4250 { 4251 .procname = "dev", 4252 .maxlen = 0, 4253 .mode = 0555, 4254 .child = i915_root, 4255 }, 4256 {} 4257 }; 4258 4259 /** 4260 * i915_perf_init - initialize i915-perf state on module bind 4261 * @i915: i915 device instance 4262 * 4263 * Initializes i915-perf state without exposing anything to userspace. 4264 * 4265 * Note: i915-perf initialization is split into an 'init' and 'register' 4266 * phase with the i915_perf_register() exposing state to userspace. 4267 */ 4268 void i915_perf_init(struct drm_i915_private *i915) 4269 { 4270 struct i915_perf *perf = &i915->perf; 4271 4272 /* XXX const struct i915_perf_ops! */ 4273 4274 if (IS_HASWELL(i915)) { 4275 perf->ops.is_valid_b_counter_reg = gen7_is_valid_b_counter_addr; 4276 perf->ops.is_valid_mux_reg = hsw_is_valid_mux_addr; 4277 perf->ops.is_valid_flex_reg = NULL; 4278 perf->ops.enable_metric_set = hsw_enable_metric_set; 4279 perf->ops.disable_metric_set = hsw_disable_metric_set; 4280 perf->ops.oa_enable = gen7_oa_enable; 4281 perf->ops.oa_disable = gen7_oa_disable; 4282 perf->ops.read = gen7_oa_read; 4283 perf->ops.oa_hw_tail_read = gen7_oa_hw_tail_read; 4284 4285 perf->oa_formats = hsw_oa_formats; 4286 } else if (HAS_LOGICAL_RING_CONTEXTS(i915)) { 4287 /* Note: that although we could theoretically also support the 4288 * legacy ringbuffer mode on BDW (and earlier iterations of 4289 * this driver, before upstreaming did this) it didn't seem 4290 * worth the complexity to maintain now that BDW+ enable 4291 * execlist mode by default. 4292 */ 4293 perf->ops.read = gen8_oa_read; 4294 4295 if (IS_GEN_RANGE(i915, 8, 9)) { 4296 perf->oa_formats = gen8_plus_oa_formats; 4297 4298 perf->ops.is_valid_b_counter_reg = 4299 gen7_is_valid_b_counter_addr; 4300 perf->ops.is_valid_mux_reg = 4301 gen8_is_valid_mux_addr; 4302 perf->ops.is_valid_flex_reg = 4303 gen8_is_valid_flex_addr; 4304 4305 if (IS_CHERRYVIEW(i915)) { 4306 perf->ops.is_valid_mux_reg = 4307 chv_is_valid_mux_addr; 4308 } 4309 4310 perf->ops.oa_enable = gen8_oa_enable; 4311 perf->ops.oa_disable = gen8_oa_disable; 4312 perf->ops.enable_metric_set = gen8_enable_metric_set; 4313 perf->ops.disable_metric_set = gen8_disable_metric_set; 4314 perf->ops.oa_hw_tail_read = gen8_oa_hw_tail_read; 4315 4316 if (IS_GEN(i915, 8)) { 4317 perf->ctx_oactxctrl_offset = 0x120; 4318 perf->ctx_flexeu0_offset = 0x2ce; 4319 4320 perf->gen8_valid_ctx_bit = BIT(25); 4321 } else { 4322 perf->ctx_oactxctrl_offset = 0x128; 4323 perf->ctx_flexeu0_offset = 0x3de; 4324 4325 perf->gen8_valid_ctx_bit = BIT(16); 4326 } 4327 } else if (IS_GEN_RANGE(i915, 10, 11)) { 4328 perf->oa_formats = gen8_plus_oa_formats; 4329 4330 perf->ops.is_valid_b_counter_reg = 4331 gen7_is_valid_b_counter_addr; 4332 perf->ops.is_valid_mux_reg = 4333 gen10_is_valid_mux_addr; 4334 perf->ops.is_valid_flex_reg = 4335 gen8_is_valid_flex_addr; 4336 4337 perf->ops.oa_enable = gen8_oa_enable; 4338 perf->ops.oa_disable = gen8_oa_disable; 4339 perf->ops.enable_metric_set = gen8_enable_metric_set; 4340 perf->ops.disable_metric_set = gen10_disable_metric_set; 4341 perf->ops.oa_hw_tail_read = gen8_oa_hw_tail_read; 4342 4343 if (IS_GEN(i915, 10)) { 4344 perf->ctx_oactxctrl_offset = 0x128; 4345 perf->ctx_flexeu0_offset = 0x3de; 4346 } else { 4347 perf->ctx_oactxctrl_offset = 0x124; 4348 perf->ctx_flexeu0_offset = 0x78e; 4349 } 4350 perf->gen8_valid_ctx_bit = BIT(16); 4351 } else if (IS_GEN(i915, 12)) { 4352 perf->oa_formats = gen12_oa_formats; 4353 4354 perf->ops.is_valid_b_counter_reg = 4355 gen12_is_valid_b_counter_addr; 4356 perf->ops.is_valid_mux_reg = 4357 gen12_is_valid_mux_addr; 4358 perf->ops.is_valid_flex_reg = 4359 gen8_is_valid_flex_addr; 4360 4361 perf->ops.oa_enable = gen12_oa_enable; 4362 perf->ops.oa_disable = gen12_oa_disable; 4363 perf->ops.enable_metric_set = gen12_enable_metric_set; 4364 perf->ops.disable_metric_set = gen12_disable_metric_set; 4365 perf->ops.oa_hw_tail_read = gen12_oa_hw_tail_read; 4366 4367 perf->ctx_flexeu0_offset = 0; 4368 perf->ctx_oactxctrl_offset = 0x144; 4369 } 4370 } 4371 4372 if (perf->ops.enable_metric_set) { 4373 mutex_init(&perf->lock); 4374 4375 /* Choose a representative limit */ 4376 oa_sample_rate_hard_limit = i915->gt.clock_frequency / 2; 4377 4378 mutex_init(&perf->metrics_lock); 4379 idr_init_base(&perf->metrics_idr, 1); 4380 4381 /* We set up some ratelimit state to potentially throttle any 4382 * _NOTES about spurious, invalid OA reports which we don't 4383 * forward to userspace. 4384 * 4385 * We print a _NOTE about any throttling when closing the 4386 * stream instead of waiting until driver _fini which no one 4387 * would ever see. 4388 * 4389 * Using the same limiting factors as printk_ratelimit() 4390 */ 4391 ratelimit_state_init(&perf->spurious_report_rs, 5 * HZ, 10); 4392 /* Since we use a DRM_NOTE for spurious reports it would be 4393 * inconsistent to let __ratelimit() automatically print a 4394 * warning for throttling. 4395 */ 4396 ratelimit_set_flags(&perf->spurious_report_rs, 4397 RATELIMIT_MSG_ON_RELEASE); 4398 4399 ratelimit_state_init(&perf->tail_pointer_race, 4400 5 * HZ, 10); 4401 ratelimit_set_flags(&perf->tail_pointer_race, 4402 RATELIMIT_MSG_ON_RELEASE); 4403 4404 atomic64_set(&perf->noa_programming_delay, 4405 500 * 1000 /* 500us */); 4406 4407 perf->i915 = i915; 4408 } 4409 } 4410 4411 static int destroy_config(int id, void *p, void *data) 4412 { 4413 i915_oa_config_put(p); 4414 return 0; 4415 } 4416 4417 void i915_perf_sysctl_register(void) 4418 { 4419 sysctl_header = register_sysctl_table(dev_root); 4420 } 4421 4422 void i915_perf_sysctl_unregister(void) 4423 { 4424 unregister_sysctl_table(sysctl_header); 4425 } 4426 4427 /** 4428 * i915_perf_fini - Counter part to i915_perf_init() 4429 * @i915: i915 device instance 4430 */ 4431 void i915_perf_fini(struct drm_i915_private *i915) 4432 { 4433 struct i915_perf *perf = &i915->perf; 4434 4435 if (!perf->i915) 4436 return; 4437 4438 idr_for_each(&perf->metrics_idr, destroy_config, perf); 4439 idr_destroy(&perf->metrics_idr); 4440 4441 memset(&perf->ops, 0, sizeof(perf->ops)); 4442 perf->i915 = NULL; 4443 } 4444 4445 /** 4446 * i915_perf_ioctl_version - Version of the i915-perf subsystem 4447 * 4448 * This version number is used by userspace to detect available features. 4449 */ 4450 int i915_perf_ioctl_version(void) 4451 { 4452 /* 4453 * 1: Initial version 4454 * I915_PERF_IOCTL_ENABLE 4455 * I915_PERF_IOCTL_DISABLE 4456 * 4457 * 2: Added runtime modification of OA config. 4458 * I915_PERF_IOCTL_CONFIG 4459 * 4460 * 3: Add DRM_I915_PERF_PROP_HOLD_PREEMPTION parameter to hold 4461 * preemption on a particular context so that performance data is 4462 * accessible from a delta of MI_RPC reports without looking at the 4463 * OA buffer. 4464 * 4465 * 4: Add DRM_I915_PERF_PROP_ALLOWED_SSEU to limit what contexts can 4466 * be run for the duration of the performance recording based on 4467 * their SSEU configuration. 4468 * 4469 * 5: Add DRM_I915_PERF_PROP_POLL_OA_PERIOD parameter that controls the 4470 * interval for the hrtimer used to check for OA data. 4471 */ 4472 return 5; 4473 } 4474 4475 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST) 4476 #include "selftests/i915_perf.c" 4477 #endif 4478