1 /* 2 * Copyright © 2015-2016 Intel Corporation 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice (including the next 12 * paragraph) shall be included in all copies or substantial portions of the 13 * Software. 14 * 15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS 21 * IN THE SOFTWARE. 22 * 23 * Authors: 24 * Robert Bragg <robert@sixbynine.org> 25 */ 26 27 28 /** 29 * DOC: i915 Perf Overview 30 * 31 * Gen graphics supports a large number of performance counters that can help 32 * driver and application developers understand and optimize their use of the 33 * GPU. 34 * 35 * This i915 perf interface enables userspace to configure and open a file 36 * descriptor representing a stream of GPU metrics which can then be read() as 37 * a stream of sample records. 38 * 39 * The interface is particularly suited to exposing buffered metrics that are 40 * captured by DMA from the GPU, unsynchronized with and unrelated to the CPU. 41 * 42 * Streams representing a single context are accessible to applications with a 43 * corresponding drm file descriptor, such that OpenGL can use the interface 44 * without special privileges. Access to system-wide metrics requires root 45 * privileges by default, unless changed via the dev.i915.perf_event_paranoid 46 * sysctl option. 47 * 48 */ 49 50 /** 51 * DOC: i915 Perf History and Comparison with Core Perf 52 * 53 * The interface was initially inspired by the core Perf infrastructure but 54 * some notable differences are: 55 * 56 * i915 perf file descriptors represent a "stream" instead of an "event"; where 57 * a perf event primarily corresponds to a single 64bit value, while a stream 58 * might sample sets of tightly-coupled counters, depending on the 59 * configuration. For example the Gen OA unit isn't designed to support 60 * orthogonal configurations of individual counters; it's configured for a set 61 * of related counters. Samples for an i915 perf stream capturing OA metrics 62 * will include a set of counter values packed in a compact HW specific format. 63 * The OA unit supports a number of different packing formats which can be 64 * selected by the user opening the stream. Perf has support for grouping 65 * events, but each event in the group is configured, validated and 66 * authenticated individually with separate system calls. 67 * 68 * i915 perf stream configurations are provided as an array of u64 (key,value) 69 * pairs, instead of a fixed struct with multiple miscellaneous config members, 70 * interleaved with event-type specific members. 71 * 72 * i915 perf doesn't support exposing metrics via an mmap'd circular buffer. 73 * The supported metrics are being written to memory by the GPU unsynchronized 74 * with the CPU, using HW specific packing formats for counter sets. Sometimes 75 * the constraints on HW configuration require reports to be filtered before it 76 * would be acceptable to expose them to unprivileged applications - to hide 77 * the metrics of other processes/contexts. For these use cases a read() based 78 * interface is a good fit, and provides an opportunity to filter data as it 79 * gets copied from the GPU mapped buffers to userspace buffers. 80 * 81 * 82 * Issues hit with first prototype based on Core Perf 83 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 84 * 85 * The first prototype of this driver was based on the core perf 86 * infrastructure, and while we did make that mostly work, with some changes to 87 * perf, we found we were breaking or working around too many assumptions baked 88 * into perf's currently cpu centric design. 89 * 90 * In the end we didn't see a clear benefit to making perf's implementation and 91 * interface more complex by changing design assumptions while we knew we still 92 * wouldn't be able to use any existing perf based userspace tools. 93 * 94 * Also considering the Gen specific nature of the Observability hardware and 95 * how userspace will sometimes need to combine i915 perf OA metrics with 96 * side-band OA data captured via MI_REPORT_PERF_COUNT commands; we're 97 * expecting the interface to be used by a platform specific userspace such as 98 * OpenGL or tools. This is to say; we aren't inherently missing out on having 99 * a standard vendor/architecture agnostic interface by not using perf. 100 * 101 * 102 * For posterity, in case we might re-visit trying to adapt core perf to be 103 * better suited to exposing i915 metrics these were the main pain points we 104 * hit: 105 * 106 * - The perf based OA PMU driver broke some significant design assumptions: 107 * 108 * Existing perf pmus are used for profiling work on a cpu and we were 109 * introducing the idea of _IS_DEVICE pmus with different security 110 * implications, the need to fake cpu-related data (such as user/kernel 111 * registers) to fit with perf's current design, and adding _DEVICE records 112 * as a way to forward device-specific status records. 113 * 114 * The OA unit writes reports of counters into a circular buffer, without 115 * involvement from the CPU, making our PMU driver the first of a kind. 116 * 117 * Given the way we were periodically forward data from the GPU-mapped, OA 118 * buffer to perf's buffer, those bursts of sample writes looked to perf like 119 * we were sampling too fast and so we had to subvert its throttling checks. 120 * 121 * Perf supports groups of counters and allows those to be read via 122 * transactions internally but transactions currently seem designed to be 123 * explicitly initiated from the cpu (say in response to a userspace read()) 124 * and while we could pull a report out of the OA buffer we can't 125 * trigger a report from the cpu on demand. 126 * 127 * Related to being report based; the OA counters are configured in HW as a 128 * set while perf generally expects counter configurations to be orthogonal. 129 * Although counters can be associated with a group leader as they are 130 * opened, there's no clear precedent for being able to provide group-wide 131 * configuration attributes (for example we want to let userspace choose the 132 * OA unit report format used to capture all counters in a set, or specify a 133 * GPU context to filter metrics on). We avoided using perf's grouping 134 * feature and forwarded OA reports to userspace via perf's 'raw' sample 135 * field. This suited our userspace well considering how coupled the counters 136 * are when dealing with normalizing. It would be inconvenient to split 137 * counters up into separate events, only to require userspace to recombine 138 * them. For Mesa it's also convenient to be forwarded raw, periodic reports 139 * for combining with the side-band raw reports it captures using 140 * MI_REPORT_PERF_COUNT commands. 141 * 142 * - As a side note on perf's grouping feature; there was also some concern 143 * that using PERF_FORMAT_GROUP as a way to pack together counter values 144 * would quite drastically inflate our sample sizes, which would likely 145 * lower the effective sampling resolutions we could use when the available 146 * memory bandwidth is limited. 147 * 148 * With the OA unit's report formats, counters are packed together as 32 149 * or 40bit values, with the largest report size being 256 bytes. 150 * 151 * PERF_FORMAT_GROUP values are 64bit, but there doesn't appear to be a 152 * documented ordering to the values, implying PERF_FORMAT_ID must also be 153 * used to add a 64bit ID before each value; giving 16 bytes per counter. 154 * 155 * Related to counter orthogonality; we can't time share the OA unit, while 156 * event scheduling is a central design idea within perf for allowing 157 * userspace to open + enable more events than can be configured in HW at any 158 * one time. The OA unit is not designed to allow re-configuration while in 159 * use. We can't reconfigure the OA unit without losing internal OA unit 160 * state which we can't access explicitly to save and restore. Reconfiguring 161 * the OA unit is also relatively slow, involving ~100 register writes. From 162 * userspace Mesa also depends on a stable OA configuration when emitting 163 * MI_REPORT_PERF_COUNT commands and importantly the OA unit can't be 164 * disabled while there are outstanding MI_RPC commands lest we hang the 165 * command streamer. 166 * 167 * The contents of sample records aren't extensible by device drivers (i.e. 168 * the sample_type bits). As an example; Sourab Gupta had been looking to 169 * attach GPU timestamps to our OA samples. We were shoehorning OA reports 170 * into sample records by using the 'raw' field, but it's tricky to pack more 171 * than one thing into this field because events/core.c currently only lets a 172 * pmu give a single raw data pointer plus len which will be copied into the 173 * ring buffer. To include more than the OA report we'd have to copy the 174 * report into an intermediate larger buffer. I'd been considering allowing a 175 * vector of data+len values to be specified for copying the raw data, but 176 * it felt like a kludge to being using the raw field for this purpose. 177 * 178 * - It felt like our perf based PMU was making some technical compromises 179 * just for the sake of using perf: 180 * 181 * perf_event_open() requires events to either relate to a pid or a specific 182 * cpu core, while our device pmu related to neither. Events opened with a 183 * pid will be automatically enabled/disabled according to the scheduling of 184 * that process - so not appropriate for us. When an event is related to a 185 * cpu id, perf ensures pmu methods will be invoked via an inter process 186 * interrupt on that core. To avoid invasive changes our userspace opened OA 187 * perf events for a specific cpu. This was workable but it meant the 188 * majority of the OA driver ran in atomic context, including all OA report 189 * forwarding, which wasn't really necessary in our case and seems to make 190 * our locking requirements somewhat complex as we handled the interaction 191 * with the rest of the i915 driver. 192 */ 193 194 #include <linux/anon_inodes.h> 195 #include <linux/sizes.h> 196 #include <linux/uuid.h> 197 198 #include "i915_drv.h" 199 #include "i915_oa_hsw.h" 200 #include "i915_oa_bdw.h" 201 #include "i915_oa_chv.h" 202 #include "i915_oa_sklgt2.h" 203 #include "i915_oa_sklgt3.h" 204 #include "i915_oa_sklgt4.h" 205 #include "i915_oa_bxt.h" 206 #include "i915_oa_kblgt2.h" 207 #include "i915_oa_kblgt3.h" 208 #include "i915_oa_glk.h" 209 #include "i915_oa_cflgt2.h" 210 #include "i915_oa_cflgt3.h" 211 #include "i915_oa_cnl.h" 212 #include "i915_oa_icl.h" 213 214 /* HW requires this to be a power of two, between 128k and 16M, though driver 215 * is currently generally designed assuming the largest 16M size is used such 216 * that the overflow cases are unlikely in normal operation. 217 */ 218 #define OA_BUFFER_SIZE SZ_16M 219 220 #define OA_TAKEN(tail, head) ((tail - head) & (OA_BUFFER_SIZE - 1)) 221 222 /** 223 * DOC: OA Tail Pointer Race 224 * 225 * There's a HW race condition between OA unit tail pointer register updates and 226 * writes to memory whereby the tail pointer can sometimes get ahead of what's 227 * been written out to the OA buffer so far (in terms of what's visible to the 228 * CPU). 229 * 230 * Although this can be observed explicitly while copying reports to userspace 231 * by checking for a zeroed report-id field in tail reports, we want to account 232 * for this earlier, as part of the oa_buffer_check to avoid lots of redundant 233 * read() attempts. 234 * 235 * In effect we define a tail pointer for reading that lags the real tail 236 * pointer by at least %OA_TAIL_MARGIN_NSEC nanoseconds, which gives enough 237 * time for the corresponding reports to become visible to the CPU. 238 * 239 * To manage this we actually track two tail pointers: 240 * 1) An 'aging' tail with an associated timestamp that is tracked until we 241 * can trust the corresponding data is visible to the CPU; at which point 242 * it is considered 'aged'. 243 * 2) An 'aged' tail that can be used for read()ing. 244 * 245 * The two separate pointers let us decouple read()s from tail pointer aging. 246 * 247 * The tail pointers are checked and updated at a limited rate within a hrtimer 248 * callback (the same callback that is used for delivering EPOLLIN events) 249 * 250 * Initially the tails are marked invalid with %INVALID_TAIL_PTR which 251 * indicates that an updated tail pointer is needed. 252 * 253 * Most of the implementation details for this workaround are in 254 * oa_buffer_check_unlocked() and _append_oa_reports() 255 * 256 * Note for posterity: previously the driver used to define an effective tail 257 * pointer that lagged the real pointer by a 'tail margin' measured in bytes 258 * derived from %OA_TAIL_MARGIN_NSEC and the configured sampling frequency. 259 * This was flawed considering that the OA unit may also automatically generate 260 * non-periodic reports (such as on context switch) or the OA unit may be 261 * enabled without any periodic sampling. 262 */ 263 #define OA_TAIL_MARGIN_NSEC 100000ULL 264 #define INVALID_TAIL_PTR 0xffffffff 265 266 /* frequency for checking whether the OA unit has written new reports to the 267 * circular OA buffer... 268 */ 269 #define POLL_FREQUENCY 200 270 #define POLL_PERIOD (NSEC_PER_SEC / POLL_FREQUENCY) 271 272 /* for sysctl proc_dointvec_minmax of dev.i915.perf_stream_paranoid */ 273 static int zero; 274 static int one = 1; 275 static u32 i915_perf_stream_paranoid = true; 276 277 /* The maximum exponent the hardware accepts is 63 (essentially it selects one 278 * of the 64bit timestamp bits to trigger reports from) but there's currently 279 * no known use case for sampling as infrequently as once per 47 thousand years. 280 * 281 * Since the timestamps included in OA reports are only 32bits it seems 282 * reasonable to limit the OA exponent where it's still possible to account for 283 * overflow in OA report timestamps. 284 */ 285 #define OA_EXPONENT_MAX 31 286 287 #define INVALID_CTX_ID 0xffffffff 288 289 /* On Gen8+ automatically triggered OA reports include a 'reason' field... */ 290 #define OAREPORT_REASON_MASK 0x3f 291 #define OAREPORT_REASON_SHIFT 19 292 #define OAREPORT_REASON_TIMER (1<<0) 293 #define OAREPORT_REASON_CTX_SWITCH (1<<3) 294 #define OAREPORT_REASON_CLK_RATIO (1<<5) 295 296 297 /* For sysctl proc_dointvec_minmax of i915_oa_max_sample_rate 298 * 299 * The highest sampling frequency we can theoretically program the OA unit 300 * with is always half the timestamp frequency: E.g. 6.25Mhz for Haswell. 301 * 302 * Initialized just before we register the sysctl parameter. 303 */ 304 static int oa_sample_rate_hard_limit; 305 306 /* Theoretically we can program the OA unit to sample every 160ns but don't 307 * allow that by default unless root... 308 * 309 * The default threshold of 100000Hz is based on perf's similar 310 * kernel.perf_event_max_sample_rate sysctl parameter. 311 */ 312 static u32 i915_oa_max_sample_rate = 100000; 313 314 /* XXX: beware if future OA HW adds new report formats that the current 315 * code assumes all reports have a power-of-two size and ~(size - 1) can 316 * be used as a mask to align the OA tail pointer. 317 */ 318 static struct i915_oa_format hsw_oa_formats[I915_OA_FORMAT_MAX] = { 319 [I915_OA_FORMAT_A13] = { 0, 64 }, 320 [I915_OA_FORMAT_A29] = { 1, 128 }, 321 [I915_OA_FORMAT_A13_B8_C8] = { 2, 128 }, 322 /* A29_B8_C8 Disallowed as 192 bytes doesn't factor into buffer size */ 323 [I915_OA_FORMAT_B4_C8] = { 4, 64 }, 324 [I915_OA_FORMAT_A45_B8_C8] = { 5, 256 }, 325 [I915_OA_FORMAT_B4_C8_A16] = { 6, 128 }, 326 [I915_OA_FORMAT_C4_B8] = { 7, 64 }, 327 }; 328 329 static struct i915_oa_format gen8_plus_oa_formats[I915_OA_FORMAT_MAX] = { 330 [I915_OA_FORMAT_A12] = { 0, 64 }, 331 [I915_OA_FORMAT_A12_B8_C8] = { 2, 128 }, 332 [I915_OA_FORMAT_A32u40_A4u32_B8_C8] = { 5, 256 }, 333 [I915_OA_FORMAT_C4_B8] = { 7, 64 }, 334 }; 335 336 #define SAMPLE_OA_REPORT (1<<0) 337 338 /** 339 * struct perf_open_properties - for validated properties given to open a stream 340 * @sample_flags: `DRM_I915_PERF_PROP_SAMPLE_*` properties are tracked as flags 341 * @single_context: Whether a single or all gpu contexts should be monitored 342 * @ctx_handle: A gem ctx handle for use with @single_context 343 * @metrics_set: An ID for an OA unit metric set advertised via sysfs 344 * @oa_format: An OA unit HW report format 345 * @oa_periodic: Whether to enable periodic OA unit sampling 346 * @oa_period_exponent: The OA unit sampling period is derived from this 347 * 348 * As read_properties_unlocked() enumerates and validates the properties given 349 * to open a stream of metrics the configuration is built up in the structure 350 * which starts out zero initialized. 351 */ 352 struct perf_open_properties { 353 u32 sample_flags; 354 355 u64 single_context:1; 356 u64 ctx_handle; 357 358 /* OA sampling state */ 359 int metrics_set; 360 int oa_format; 361 bool oa_periodic; 362 int oa_period_exponent; 363 }; 364 365 static void free_oa_config(struct drm_i915_private *dev_priv, 366 struct i915_oa_config *oa_config) 367 { 368 if (!PTR_ERR(oa_config->flex_regs)) 369 kfree(oa_config->flex_regs); 370 if (!PTR_ERR(oa_config->b_counter_regs)) 371 kfree(oa_config->b_counter_regs); 372 if (!PTR_ERR(oa_config->mux_regs)) 373 kfree(oa_config->mux_regs); 374 kfree(oa_config); 375 } 376 377 static void put_oa_config(struct drm_i915_private *dev_priv, 378 struct i915_oa_config *oa_config) 379 { 380 if (!atomic_dec_and_test(&oa_config->ref_count)) 381 return; 382 383 free_oa_config(dev_priv, oa_config); 384 } 385 386 static int get_oa_config(struct drm_i915_private *dev_priv, 387 int metrics_set, 388 struct i915_oa_config **out_config) 389 { 390 int ret; 391 392 if (metrics_set == 1) { 393 *out_config = &dev_priv->perf.oa.test_config; 394 atomic_inc(&dev_priv->perf.oa.test_config.ref_count); 395 return 0; 396 } 397 398 ret = mutex_lock_interruptible(&dev_priv->perf.metrics_lock); 399 if (ret) 400 return ret; 401 402 *out_config = idr_find(&dev_priv->perf.metrics_idr, metrics_set); 403 if (!*out_config) 404 ret = -EINVAL; 405 else 406 atomic_inc(&(*out_config)->ref_count); 407 408 mutex_unlock(&dev_priv->perf.metrics_lock); 409 410 return ret; 411 } 412 413 static u32 gen8_oa_hw_tail_read(struct drm_i915_private *dev_priv) 414 { 415 return I915_READ(GEN8_OATAILPTR) & GEN8_OATAILPTR_MASK; 416 } 417 418 static u32 gen7_oa_hw_tail_read(struct drm_i915_private *dev_priv) 419 { 420 u32 oastatus1 = I915_READ(GEN7_OASTATUS1); 421 422 return oastatus1 & GEN7_OASTATUS1_TAIL_MASK; 423 } 424 425 /** 426 * oa_buffer_check_unlocked - check for data and update tail ptr state 427 * @dev_priv: i915 device instance 428 * 429 * This is either called via fops (for blocking reads in user ctx) or the poll 430 * check hrtimer (atomic ctx) to check the OA buffer tail pointer and check 431 * if there is data available for userspace to read. 432 * 433 * This function is central to providing a workaround for the OA unit tail 434 * pointer having a race with respect to what data is visible to the CPU. 435 * It is responsible for reading tail pointers from the hardware and giving 436 * the pointers time to 'age' before they are made available for reading. 437 * (See description of OA_TAIL_MARGIN_NSEC above for further details.) 438 * 439 * Besides returning true when there is data available to read() this function 440 * also has the side effect of updating the oa_buffer.tails[], .aging_timestamp 441 * and .aged_tail_idx state used for reading. 442 * 443 * Note: It's safe to read OA config state here unlocked, assuming that this is 444 * only called while the stream is enabled, while the global OA configuration 445 * can't be modified. 446 * 447 * Returns: %true if the OA buffer contains data, else %false 448 */ 449 static bool oa_buffer_check_unlocked(struct drm_i915_private *dev_priv) 450 { 451 int report_size = dev_priv->perf.oa.oa_buffer.format_size; 452 unsigned long flags; 453 unsigned int aged_idx; 454 u32 head, hw_tail, aged_tail, aging_tail; 455 u64 now; 456 457 /* We have to consider the (unlikely) possibility that read() errors 458 * could result in an OA buffer reset which might reset the head, 459 * tails[] and aged_tail state. 460 */ 461 spin_lock_irqsave(&dev_priv->perf.oa.oa_buffer.ptr_lock, flags); 462 463 /* NB: The head we observe here might effectively be a little out of 464 * date (between head and tails[aged_idx].offset if there is currently 465 * a read() in progress. 466 */ 467 head = dev_priv->perf.oa.oa_buffer.head; 468 469 aged_idx = dev_priv->perf.oa.oa_buffer.aged_tail_idx; 470 aged_tail = dev_priv->perf.oa.oa_buffer.tails[aged_idx].offset; 471 aging_tail = dev_priv->perf.oa.oa_buffer.tails[!aged_idx].offset; 472 473 hw_tail = dev_priv->perf.oa.ops.oa_hw_tail_read(dev_priv); 474 475 /* The tail pointer increases in 64 byte increments, 476 * not in report_size steps... 477 */ 478 hw_tail &= ~(report_size - 1); 479 480 now = ktime_get_mono_fast_ns(); 481 482 /* Update the aged tail 483 * 484 * Flip the tail pointer available for read()s once the aging tail is 485 * old enough to trust that the corresponding data will be visible to 486 * the CPU... 487 * 488 * Do this before updating the aging pointer in case we may be able to 489 * immediately start aging a new pointer too (if new data has become 490 * available) without needing to wait for a later hrtimer callback. 491 */ 492 if (aging_tail != INVALID_TAIL_PTR && 493 ((now - dev_priv->perf.oa.oa_buffer.aging_timestamp) > 494 OA_TAIL_MARGIN_NSEC)) { 495 496 aged_idx ^= 1; 497 dev_priv->perf.oa.oa_buffer.aged_tail_idx = aged_idx; 498 499 aged_tail = aging_tail; 500 501 /* Mark that we need a new pointer to start aging... */ 502 dev_priv->perf.oa.oa_buffer.tails[!aged_idx].offset = INVALID_TAIL_PTR; 503 aging_tail = INVALID_TAIL_PTR; 504 } 505 506 /* Update the aging tail 507 * 508 * We throttle aging tail updates until we have a new tail that 509 * represents >= one report more data than is already available for 510 * reading. This ensures there will be enough data for a successful 511 * read once this new pointer has aged and ensures we will give the new 512 * pointer time to age. 513 */ 514 if (aging_tail == INVALID_TAIL_PTR && 515 (aged_tail == INVALID_TAIL_PTR || 516 OA_TAKEN(hw_tail, aged_tail) >= report_size)) { 517 struct i915_vma *vma = dev_priv->perf.oa.oa_buffer.vma; 518 u32 gtt_offset = i915_ggtt_offset(vma); 519 520 /* Be paranoid and do a bounds check on the pointer read back 521 * from hardware, just in case some spurious hardware condition 522 * could put the tail out of bounds... 523 */ 524 if (hw_tail >= gtt_offset && 525 hw_tail < (gtt_offset + OA_BUFFER_SIZE)) { 526 dev_priv->perf.oa.oa_buffer.tails[!aged_idx].offset = 527 aging_tail = hw_tail; 528 dev_priv->perf.oa.oa_buffer.aging_timestamp = now; 529 } else { 530 DRM_ERROR("Ignoring spurious out of range OA buffer tail pointer = %u\n", 531 hw_tail); 532 } 533 } 534 535 spin_unlock_irqrestore(&dev_priv->perf.oa.oa_buffer.ptr_lock, flags); 536 537 return aged_tail == INVALID_TAIL_PTR ? 538 false : OA_TAKEN(aged_tail, head) >= report_size; 539 } 540 541 /** 542 * append_oa_status - Appends a status record to a userspace read() buffer. 543 * @stream: An i915-perf stream opened for OA metrics 544 * @buf: destination buffer given by userspace 545 * @count: the number of bytes userspace wants to read 546 * @offset: (inout): the current position for writing into @buf 547 * @type: The kind of status to report to userspace 548 * 549 * Writes a status record (such as `DRM_I915_PERF_RECORD_OA_REPORT_LOST`) 550 * into the userspace read() buffer. 551 * 552 * The @buf @offset will only be updated on success. 553 * 554 * Returns: 0 on success, negative error code on failure. 555 */ 556 static int append_oa_status(struct i915_perf_stream *stream, 557 char __user *buf, 558 size_t count, 559 size_t *offset, 560 enum drm_i915_perf_record_type type) 561 { 562 struct drm_i915_perf_record_header header = { type, 0, sizeof(header) }; 563 564 if ((count - *offset) < header.size) 565 return -ENOSPC; 566 567 if (copy_to_user(buf + *offset, &header, sizeof(header))) 568 return -EFAULT; 569 570 (*offset) += header.size; 571 572 return 0; 573 } 574 575 /** 576 * append_oa_sample - Copies single OA report into userspace read() buffer. 577 * @stream: An i915-perf stream opened for OA metrics 578 * @buf: destination buffer given by userspace 579 * @count: the number of bytes userspace wants to read 580 * @offset: (inout): the current position for writing into @buf 581 * @report: A single OA report to (optionally) include as part of the sample 582 * 583 * The contents of a sample are configured through `DRM_I915_PERF_PROP_SAMPLE_*` 584 * properties when opening a stream, tracked as `stream->sample_flags`. This 585 * function copies the requested components of a single sample to the given 586 * read() @buf. 587 * 588 * The @buf @offset will only be updated on success. 589 * 590 * Returns: 0 on success, negative error code on failure. 591 */ 592 static int append_oa_sample(struct i915_perf_stream *stream, 593 char __user *buf, 594 size_t count, 595 size_t *offset, 596 const u8 *report) 597 { 598 struct drm_i915_private *dev_priv = stream->dev_priv; 599 int report_size = dev_priv->perf.oa.oa_buffer.format_size; 600 struct drm_i915_perf_record_header header; 601 u32 sample_flags = stream->sample_flags; 602 603 header.type = DRM_I915_PERF_RECORD_SAMPLE; 604 header.pad = 0; 605 header.size = stream->sample_size; 606 607 if ((count - *offset) < header.size) 608 return -ENOSPC; 609 610 buf += *offset; 611 if (copy_to_user(buf, &header, sizeof(header))) 612 return -EFAULT; 613 buf += sizeof(header); 614 615 if (sample_flags & SAMPLE_OA_REPORT) { 616 if (copy_to_user(buf, report, report_size)) 617 return -EFAULT; 618 } 619 620 (*offset) += header.size; 621 622 return 0; 623 } 624 625 /** 626 * Copies all buffered OA reports into userspace read() buffer. 627 * @stream: An i915-perf stream opened for OA metrics 628 * @buf: destination buffer given by userspace 629 * @count: the number of bytes userspace wants to read 630 * @offset: (inout): the current position for writing into @buf 631 * 632 * Notably any error condition resulting in a short read (-%ENOSPC or 633 * -%EFAULT) will be returned even though one or more records may 634 * have been successfully copied. In this case it's up to the caller 635 * to decide if the error should be squashed before returning to 636 * userspace. 637 * 638 * Note: reports are consumed from the head, and appended to the 639 * tail, so the tail chases the head?... If you think that's mad 640 * and back-to-front you're not alone, but this follows the 641 * Gen PRM naming convention. 642 * 643 * Returns: 0 on success, negative error code on failure. 644 */ 645 static int gen8_append_oa_reports(struct i915_perf_stream *stream, 646 char __user *buf, 647 size_t count, 648 size_t *offset) 649 { 650 struct drm_i915_private *dev_priv = stream->dev_priv; 651 int report_size = dev_priv->perf.oa.oa_buffer.format_size; 652 u8 *oa_buf_base = dev_priv->perf.oa.oa_buffer.vaddr; 653 u32 gtt_offset = i915_ggtt_offset(dev_priv->perf.oa.oa_buffer.vma); 654 u32 mask = (OA_BUFFER_SIZE - 1); 655 size_t start_offset = *offset; 656 unsigned long flags; 657 unsigned int aged_tail_idx; 658 u32 head, tail; 659 u32 taken; 660 int ret = 0; 661 662 if (WARN_ON(!stream->enabled)) 663 return -EIO; 664 665 spin_lock_irqsave(&dev_priv->perf.oa.oa_buffer.ptr_lock, flags); 666 667 head = dev_priv->perf.oa.oa_buffer.head; 668 aged_tail_idx = dev_priv->perf.oa.oa_buffer.aged_tail_idx; 669 tail = dev_priv->perf.oa.oa_buffer.tails[aged_tail_idx].offset; 670 671 spin_unlock_irqrestore(&dev_priv->perf.oa.oa_buffer.ptr_lock, flags); 672 673 /* 674 * An invalid tail pointer here means we're still waiting for the poll 675 * hrtimer callback to give us a pointer 676 */ 677 if (tail == INVALID_TAIL_PTR) 678 return -EAGAIN; 679 680 /* 681 * NB: oa_buffer.head/tail include the gtt_offset which we don't want 682 * while indexing relative to oa_buf_base. 683 */ 684 head -= gtt_offset; 685 tail -= gtt_offset; 686 687 /* 688 * An out of bounds or misaligned head or tail pointer implies a driver 689 * bug since we validate + align the tail pointers we read from the 690 * hardware and we are in full control of the head pointer which should 691 * only be incremented by multiples of the report size (notably also 692 * all a power of two). 693 */ 694 if (WARN_ONCE(head > OA_BUFFER_SIZE || head % report_size || 695 tail > OA_BUFFER_SIZE || tail % report_size, 696 "Inconsistent OA buffer pointers: head = %u, tail = %u\n", 697 head, tail)) 698 return -EIO; 699 700 701 for (/* none */; 702 (taken = OA_TAKEN(tail, head)); 703 head = (head + report_size) & mask) { 704 u8 *report = oa_buf_base + head; 705 u32 *report32 = (void *)report; 706 u32 ctx_id; 707 u32 reason; 708 709 /* 710 * All the report sizes factor neatly into the buffer 711 * size so we never expect to see a report split 712 * between the beginning and end of the buffer. 713 * 714 * Given the initial alignment check a misalignment 715 * here would imply a driver bug that would result 716 * in an overrun. 717 */ 718 if (WARN_ON((OA_BUFFER_SIZE - head) < report_size)) { 719 DRM_ERROR("Spurious OA head ptr: non-integral report offset\n"); 720 break; 721 } 722 723 /* 724 * The reason field includes flags identifying what 725 * triggered this specific report (mostly timer 726 * triggered or e.g. due to a context switch). 727 * 728 * This field is never expected to be zero so we can 729 * check that the report isn't invalid before copying 730 * it to userspace... 731 */ 732 reason = ((report32[0] >> OAREPORT_REASON_SHIFT) & 733 OAREPORT_REASON_MASK); 734 if (reason == 0) { 735 if (__ratelimit(&dev_priv->perf.oa.spurious_report_rs)) 736 DRM_NOTE("Skipping spurious, invalid OA report\n"); 737 continue; 738 } 739 740 /* 741 * XXX: Just keep the lower 21 bits for now since I'm not 742 * entirely sure if the HW touches any of the higher bits in 743 * this field 744 */ 745 ctx_id = report32[2] & 0x1fffff; 746 747 /* 748 * Squash whatever is in the CTX_ID field if it's marked as 749 * invalid to be sure we avoid false-positive, single-context 750 * filtering below... 751 * 752 * Note: that we don't clear the valid_ctx_bit so userspace can 753 * understand that the ID has been squashed by the kernel. 754 */ 755 if (!(report32[0] & dev_priv->perf.oa.gen8_valid_ctx_bit)) 756 ctx_id = report32[2] = INVALID_CTX_ID; 757 758 /* 759 * NB: For Gen 8 the OA unit no longer supports clock gating 760 * off for a specific context and the kernel can't securely 761 * stop the counters from updating as system-wide / global 762 * values. 763 * 764 * Automatic reports now include a context ID so reports can be 765 * filtered on the cpu but it's not worth trying to 766 * automatically subtract/hide counter progress for other 767 * contexts while filtering since we can't stop userspace 768 * issuing MI_REPORT_PERF_COUNT commands which would still 769 * provide a side-band view of the real values. 770 * 771 * To allow userspace (such as Mesa/GL_INTEL_performance_query) 772 * to normalize counters for a single filtered context then it 773 * needs be forwarded bookend context-switch reports so that it 774 * can track switches in between MI_REPORT_PERF_COUNT commands 775 * and can itself subtract/ignore the progress of counters 776 * associated with other contexts. Note that the hardware 777 * automatically triggers reports when switching to a new 778 * context which are tagged with the ID of the newly active 779 * context. To avoid the complexity (and likely fragility) of 780 * reading ahead while parsing reports to try and minimize 781 * forwarding redundant context switch reports (i.e. between 782 * other, unrelated contexts) we simply elect to forward them 783 * all. 784 * 785 * We don't rely solely on the reason field to identify context 786 * switches since it's not-uncommon for periodic samples to 787 * identify a switch before any 'context switch' report. 788 */ 789 if (!dev_priv->perf.oa.exclusive_stream->ctx || 790 dev_priv->perf.oa.specific_ctx_id == ctx_id || 791 (dev_priv->perf.oa.oa_buffer.last_ctx_id == 792 dev_priv->perf.oa.specific_ctx_id) || 793 reason & OAREPORT_REASON_CTX_SWITCH) { 794 795 /* 796 * While filtering for a single context we avoid 797 * leaking the IDs of other contexts. 798 */ 799 if (dev_priv->perf.oa.exclusive_stream->ctx && 800 dev_priv->perf.oa.specific_ctx_id != ctx_id) { 801 report32[2] = INVALID_CTX_ID; 802 } 803 804 ret = append_oa_sample(stream, buf, count, offset, 805 report); 806 if (ret) 807 break; 808 809 dev_priv->perf.oa.oa_buffer.last_ctx_id = ctx_id; 810 } 811 812 /* 813 * The above reason field sanity check is based on 814 * the assumption that the OA buffer is initially 815 * zeroed and we reset the field after copying so the 816 * check is still meaningful once old reports start 817 * being overwritten. 818 */ 819 report32[0] = 0; 820 } 821 822 if (start_offset != *offset) { 823 spin_lock_irqsave(&dev_priv->perf.oa.oa_buffer.ptr_lock, flags); 824 825 /* 826 * We removed the gtt_offset for the copy loop above, indexing 827 * relative to oa_buf_base so put back here... 828 */ 829 head += gtt_offset; 830 831 I915_WRITE(GEN8_OAHEADPTR, head & GEN8_OAHEADPTR_MASK); 832 dev_priv->perf.oa.oa_buffer.head = head; 833 834 spin_unlock_irqrestore(&dev_priv->perf.oa.oa_buffer.ptr_lock, flags); 835 } 836 837 return ret; 838 } 839 840 /** 841 * gen8_oa_read - copy status records then buffered OA reports 842 * @stream: An i915-perf stream opened for OA metrics 843 * @buf: destination buffer given by userspace 844 * @count: the number of bytes userspace wants to read 845 * @offset: (inout): the current position for writing into @buf 846 * 847 * Checks OA unit status registers and if necessary appends corresponding 848 * status records for userspace (such as for a buffer full condition) and then 849 * initiate appending any buffered OA reports. 850 * 851 * Updates @offset according to the number of bytes successfully copied into 852 * the userspace buffer. 853 * 854 * NB: some data may be successfully copied to the userspace buffer 855 * even if an error is returned, and this is reflected in the 856 * updated @offset. 857 * 858 * Returns: zero on success or a negative error code 859 */ 860 static int gen8_oa_read(struct i915_perf_stream *stream, 861 char __user *buf, 862 size_t count, 863 size_t *offset) 864 { 865 struct drm_i915_private *dev_priv = stream->dev_priv; 866 u32 oastatus; 867 int ret; 868 869 if (WARN_ON(!dev_priv->perf.oa.oa_buffer.vaddr)) 870 return -EIO; 871 872 oastatus = I915_READ(GEN8_OASTATUS); 873 874 /* 875 * We treat OABUFFER_OVERFLOW as a significant error: 876 * 877 * Although theoretically we could handle this more gracefully 878 * sometimes, some Gens don't correctly suppress certain 879 * automatically triggered reports in this condition and so we 880 * have to assume that old reports are now being trampled 881 * over. 882 * 883 * Considering how we don't currently give userspace control 884 * over the OA buffer size and always configure a large 16MB 885 * buffer, then a buffer overflow does anyway likely indicate 886 * that something has gone quite badly wrong. 887 */ 888 if (oastatus & GEN8_OASTATUS_OABUFFER_OVERFLOW) { 889 ret = append_oa_status(stream, buf, count, offset, 890 DRM_I915_PERF_RECORD_OA_BUFFER_LOST); 891 if (ret) 892 return ret; 893 894 DRM_DEBUG("OA buffer overflow (exponent = %d): force restart\n", 895 dev_priv->perf.oa.period_exponent); 896 897 dev_priv->perf.oa.ops.oa_disable(dev_priv); 898 dev_priv->perf.oa.ops.oa_enable(dev_priv); 899 900 /* 901 * Note: .oa_enable() is expected to re-init the oabuffer and 902 * reset GEN8_OASTATUS for us 903 */ 904 oastatus = I915_READ(GEN8_OASTATUS); 905 } 906 907 if (oastatus & GEN8_OASTATUS_REPORT_LOST) { 908 ret = append_oa_status(stream, buf, count, offset, 909 DRM_I915_PERF_RECORD_OA_REPORT_LOST); 910 if (ret) 911 return ret; 912 I915_WRITE(GEN8_OASTATUS, 913 oastatus & ~GEN8_OASTATUS_REPORT_LOST); 914 } 915 916 return gen8_append_oa_reports(stream, buf, count, offset); 917 } 918 919 /** 920 * Copies all buffered OA reports into userspace read() buffer. 921 * @stream: An i915-perf stream opened for OA metrics 922 * @buf: destination buffer given by userspace 923 * @count: the number of bytes userspace wants to read 924 * @offset: (inout): the current position for writing into @buf 925 * 926 * Notably any error condition resulting in a short read (-%ENOSPC or 927 * -%EFAULT) will be returned even though one or more records may 928 * have been successfully copied. In this case it's up to the caller 929 * to decide if the error should be squashed before returning to 930 * userspace. 931 * 932 * Note: reports are consumed from the head, and appended to the 933 * tail, so the tail chases the head?... If you think that's mad 934 * and back-to-front you're not alone, but this follows the 935 * Gen PRM naming convention. 936 * 937 * Returns: 0 on success, negative error code on failure. 938 */ 939 static int gen7_append_oa_reports(struct i915_perf_stream *stream, 940 char __user *buf, 941 size_t count, 942 size_t *offset) 943 { 944 struct drm_i915_private *dev_priv = stream->dev_priv; 945 int report_size = dev_priv->perf.oa.oa_buffer.format_size; 946 u8 *oa_buf_base = dev_priv->perf.oa.oa_buffer.vaddr; 947 u32 gtt_offset = i915_ggtt_offset(dev_priv->perf.oa.oa_buffer.vma); 948 u32 mask = (OA_BUFFER_SIZE - 1); 949 size_t start_offset = *offset; 950 unsigned long flags; 951 unsigned int aged_tail_idx; 952 u32 head, tail; 953 u32 taken; 954 int ret = 0; 955 956 if (WARN_ON(!stream->enabled)) 957 return -EIO; 958 959 spin_lock_irqsave(&dev_priv->perf.oa.oa_buffer.ptr_lock, flags); 960 961 head = dev_priv->perf.oa.oa_buffer.head; 962 aged_tail_idx = dev_priv->perf.oa.oa_buffer.aged_tail_idx; 963 tail = dev_priv->perf.oa.oa_buffer.tails[aged_tail_idx].offset; 964 965 spin_unlock_irqrestore(&dev_priv->perf.oa.oa_buffer.ptr_lock, flags); 966 967 /* An invalid tail pointer here means we're still waiting for the poll 968 * hrtimer callback to give us a pointer 969 */ 970 if (tail == INVALID_TAIL_PTR) 971 return -EAGAIN; 972 973 /* NB: oa_buffer.head/tail include the gtt_offset which we don't want 974 * while indexing relative to oa_buf_base. 975 */ 976 head -= gtt_offset; 977 tail -= gtt_offset; 978 979 /* An out of bounds or misaligned head or tail pointer implies a driver 980 * bug since we validate + align the tail pointers we read from the 981 * hardware and we are in full control of the head pointer which should 982 * only be incremented by multiples of the report size (notably also 983 * all a power of two). 984 */ 985 if (WARN_ONCE(head > OA_BUFFER_SIZE || head % report_size || 986 tail > OA_BUFFER_SIZE || tail % report_size, 987 "Inconsistent OA buffer pointers: head = %u, tail = %u\n", 988 head, tail)) 989 return -EIO; 990 991 992 for (/* none */; 993 (taken = OA_TAKEN(tail, head)); 994 head = (head + report_size) & mask) { 995 u8 *report = oa_buf_base + head; 996 u32 *report32 = (void *)report; 997 998 /* All the report sizes factor neatly into the buffer 999 * size so we never expect to see a report split 1000 * between the beginning and end of the buffer. 1001 * 1002 * Given the initial alignment check a misalignment 1003 * here would imply a driver bug that would result 1004 * in an overrun. 1005 */ 1006 if (WARN_ON((OA_BUFFER_SIZE - head) < report_size)) { 1007 DRM_ERROR("Spurious OA head ptr: non-integral report offset\n"); 1008 break; 1009 } 1010 1011 /* The report-ID field for periodic samples includes 1012 * some undocumented flags related to what triggered 1013 * the report and is never expected to be zero so we 1014 * can check that the report isn't invalid before 1015 * copying it to userspace... 1016 */ 1017 if (report32[0] == 0) { 1018 if (__ratelimit(&dev_priv->perf.oa.spurious_report_rs)) 1019 DRM_NOTE("Skipping spurious, invalid OA report\n"); 1020 continue; 1021 } 1022 1023 ret = append_oa_sample(stream, buf, count, offset, report); 1024 if (ret) 1025 break; 1026 1027 /* The above report-id field sanity check is based on 1028 * the assumption that the OA buffer is initially 1029 * zeroed and we reset the field after copying so the 1030 * check is still meaningful once old reports start 1031 * being overwritten. 1032 */ 1033 report32[0] = 0; 1034 } 1035 1036 if (start_offset != *offset) { 1037 spin_lock_irqsave(&dev_priv->perf.oa.oa_buffer.ptr_lock, flags); 1038 1039 /* We removed the gtt_offset for the copy loop above, indexing 1040 * relative to oa_buf_base so put back here... 1041 */ 1042 head += gtt_offset; 1043 1044 I915_WRITE(GEN7_OASTATUS2, 1045 ((head & GEN7_OASTATUS2_HEAD_MASK) | 1046 GEN7_OASTATUS2_MEM_SELECT_GGTT)); 1047 dev_priv->perf.oa.oa_buffer.head = head; 1048 1049 spin_unlock_irqrestore(&dev_priv->perf.oa.oa_buffer.ptr_lock, flags); 1050 } 1051 1052 return ret; 1053 } 1054 1055 /** 1056 * gen7_oa_read - copy status records then buffered OA reports 1057 * @stream: An i915-perf stream opened for OA metrics 1058 * @buf: destination buffer given by userspace 1059 * @count: the number of bytes userspace wants to read 1060 * @offset: (inout): the current position for writing into @buf 1061 * 1062 * Checks Gen 7 specific OA unit status registers and if necessary appends 1063 * corresponding status records for userspace (such as for a buffer full 1064 * condition) and then initiate appending any buffered OA reports. 1065 * 1066 * Updates @offset according to the number of bytes successfully copied into 1067 * the userspace buffer. 1068 * 1069 * Returns: zero on success or a negative error code 1070 */ 1071 static int gen7_oa_read(struct i915_perf_stream *stream, 1072 char __user *buf, 1073 size_t count, 1074 size_t *offset) 1075 { 1076 struct drm_i915_private *dev_priv = stream->dev_priv; 1077 u32 oastatus1; 1078 int ret; 1079 1080 if (WARN_ON(!dev_priv->perf.oa.oa_buffer.vaddr)) 1081 return -EIO; 1082 1083 oastatus1 = I915_READ(GEN7_OASTATUS1); 1084 1085 /* XXX: On Haswell we don't have a safe way to clear oastatus1 1086 * bits while the OA unit is enabled (while the tail pointer 1087 * may be updated asynchronously) so we ignore status bits 1088 * that have already been reported to userspace. 1089 */ 1090 oastatus1 &= ~dev_priv->perf.oa.gen7_latched_oastatus1; 1091 1092 /* We treat OABUFFER_OVERFLOW as a significant error: 1093 * 1094 * - The status can be interpreted to mean that the buffer is 1095 * currently full (with a higher precedence than OA_TAKEN() 1096 * which will start to report a near-empty buffer after an 1097 * overflow) but it's awkward that we can't clear the status 1098 * on Haswell, so without a reset we won't be able to catch 1099 * the state again. 1100 * 1101 * - Since it also implies the HW has started overwriting old 1102 * reports it may also affect our sanity checks for invalid 1103 * reports when copying to userspace that assume new reports 1104 * are being written to cleared memory. 1105 * 1106 * - In the future we may want to introduce a flight recorder 1107 * mode where the driver will automatically maintain a safe 1108 * guard band between head/tail, avoiding this overflow 1109 * condition, but we avoid the added driver complexity for 1110 * now. 1111 */ 1112 if (unlikely(oastatus1 & GEN7_OASTATUS1_OABUFFER_OVERFLOW)) { 1113 ret = append_oa_status(stream, buf, count, offset, 1114 DRM_I915_PERF_RECORD_OA_BUFFER_LOST); 1115 if (ret) 1116 return ret; 1117 1118 DRM_DEBUG("OA buffer overflow (exponent = %d): force restart\n", 1119 dev_priv->perf.oa.period_exponent); 1120 1121 dev_priv->perf.oa.ops.oa_disable(dev_priv); 1122 dev_priv->perf.oa.ops.oa_enable(dev_priv); 1123 1124 oastatus1 = I915_READ(GEN7_OASTATUS1); 1125 } 1126 1127 if (unlikely(oastatus1 & GEN7_OASTATUS1_REPORT_LOST)) { 1128 ret = append_oa_status(stream, buf, count, offset, 1129 DRM_I915_PERF_RECORD_OA_REPORT_LOST); 1130 if (ret) 1131 return ret; 1132 dev_priv->perf.oa.gen7_latched_oastatus1 |= 1133 GEN7_OASTATUS1_REPORT_LOST; 1134 } 1135 1136 return gen7_append_oa_reports(stream, buf, count, offset); 1137 } 1138 1139 /** 1140 * i915_oa_wait_unlocked - handles blocking IO until OA data available 1141 * @stream: An i915-perf stream opened for OA metrics 1142 * 1143 * Called when userspace tries to read() from a blocking stream FD opened 1144 * for OA metrics. It waits until the hrtimer callback finds a non-empty 1145 * OA buffer and wakes us. 1146 * 1147 * Note: it's acceptable to have this return with some false positives 1148 * since any subsequent read handling will return -EAGAIN if there isn't 1149 * really data ready for userspace yet. 1150 * 1151 * Returns: zero on success or a negative error code 1152 */ 1153 static int i915_oa_wait_unlocked(struct i915_perf_stream *stream) 1154 { 1155 struct drm_i915_private *dev_priv = stream->dev_priv; 1156 1157 /* We would wait indefinitely if periodic sampling is not enabled */ 1158 if (!dev_priv->perf.oa.periodic) 1159 return -EIO; 1160 1161 return wait_event_interruptible(dev_priv->perf.oa.poll_wq, 1162 oa_buffer_check_unlocked(dev_priv)); 1163 } 1164 1165 /** 1166 * i915_oa_poll_wait - call poll_wait() for an OA stream poll() 1167 * @stream: An i915-perf stream opened for OA metrics 1168 * @file: An i915 perf stream file 1169 * @wait: poll() state table 1170 * 1171 * For handling userspace polling on an i915 perf stream opened for OA metrics, 1172 * this starts a poll_wait with the wait queue that our hrtimer callback wakes 1173 * when it sees data ready to read in the circular OA buffer. 1174 */ 1175 static void i915_oa_poll_wait(struct i915_perf_stream *stream, 1176 struct file *file, 1177 poll_table *wait) 1178 { 1179 struct drm_i915_private *dev_priv = stream->dev_priv; 1180 1181 poll_wait(file, &dev_priv->perf.oa.poll_wq, wait); 1182 } 1183 1184 /** 1185 * i915_oa_read - just calls through to &i915_oa_ops->read 1186 * @stream: An i915-perf stream opened for OA metrics 1187 * @buf: destination buffer given by userspace 1188 * @count: the number of bytes userspace wants to read 1189 * @offset: (inout): the current position for writing into @buf 1190 * 1191 * Updates @offset according to the number of bytes successfully copied into 1192 * the userspace buffer. 1193 * 1194 * Returns: zero on success or a negative error code 1195 */ 1196 static int i915_oa_read(struct i915_perf_stream *stream, 1197 char __user *buf, 1198 size_t count, 1199 size_t *offset) 1200 { 1201 struct drm_i915_private *dev_priv = stream->dev_priv; 1202 1203 return dev_priv->perf.oa.ops.read(stream, buf, count, offset); 1204 } 1205 1206 /** 1207 * oa_get_render_ctx_id - determine and hold ctx hw id 1208 * @stream: An i915-perf stream opened for OA metrics 1209 * 1210 * Determine the render context hw id, and ensure it remains fixed for the 1211 * lifetime of the stream. This ensures that we don't have to worry about 1212 * updating the context ID in OACONTROL on the fly. 1213 * 1214 * Returns: zero on success or a negative error code 1215 */ 1216 static int oa_get_render_ctx_id(struct i915_perf_stream *stream) 1217 { 1218 struct drm_i915_private *dev_priv = stream->dev_priv; 1219 1220 if (HAS_LOGICAL_RING_CONTEXTS(dev_priv)) { 1221 dev_priv->perf.oa.specific_ctx_id = stream->ctx->hw_id; 1222 } else { 1223 struct intel_engine_cs *engine = dev_priv->engine[RCS]; 1224 struct intel_ring *ring; 1225 int ret; 1226 1227 ret = i915_mutex_lock_interruptible(&dev_priv->drm); 1228 if (ret) 1229 return ret; 1230 1231 /* 1232 * As the ID is the gtt offset of the context's vma we 1233 * pin the vma to ensure the ID remains fixed. 1234 * 1235 * NB: implied RCS engine... 1236 */ 1237 ring = intel_context_pin(stream->ctx, engine); 1238 mutex_unlock(&dev_priv->drm.struct_mutex); 1239 if (IS_ERR(ring)) 1240 return PTR_ERR(ring); 1241 1242 1243 /* 1244 * Explicitly track the ID (instead of calling 1245 * i915_ggtt_offset() on the fly) considering the difference 1246 * with gen8+ and execlists 1247 */ 1248 dev_priv->perf.oa.specific_ctx_id = 1249 i915_ggtt_offset(to_intel_context(stream->ctx, engine)->state); 1250 } 1251 1252 return 0; 1253 } 1254 1255 /** 1256 * oa_put_render_ctx_id - counterpart to oa_get_render_ctx_id releases hold 1257 * @stream: An i915-perf stream opened for OA metrics 1258 * 1259 * In case anything needed doing to ensure the context HW ID would remain valid 1260 * for the lifetime of the stream, then that can be undone here. 1261 */ 1262 static void oa_put_render_ctx_id(struct i915_perf_stream *stream) 1263 { 1264 struct drm_i915_private *dev_priv = stream->dev_priv; 1265 1266 if (HAS_LOGICAL_RING_CONTEXTS(dev_priv)) { 1267 dev_priv->perf.oa.specific_ctx_id = INVALID_CTX_ID; 1268 } else { 1269 struct intel_engine_cs *engine = dev_priv->engine[RCS]; 1270 1271 mutex_lock(&dev_priv->drm.struct_mutex); 1272 1273 dev_priv->perf.oa.specific_ctx_id = INVALID_CTX_ID; 1274 intel_context_unpin(stream->ctx, engine); 1275 1276 mutex_unlock(&dev_priv->drm.struct_mutex); 1277 } 1278 } 1279 1280 static void 1281 free_oa_buffer(struct drm_i915_private *i915) 1282 { 1283 mutex_lock(&i915->drm.struct_mutex); 1284 1285 i915_gem_object_unpin_map(i915->perf.oa.oa_buffer.vma->obj); 1286 i915_vma_unpin(i915->perf.oa.oa_buffer.vma); 1287 i915_gem_object_put(i915->perf.oa.oa_buffer.vma->obj); 1288 1289 i915->perf.oa.oa_buffer.vma = NULL; 1290 i915->perf.oa.oa_buffer.vaddr = NULL; 1291 1292 mutex_unlock(&i915->drm.struct_mutex); 1293 } 1294 1295 static void i915_oa_stream_destroy(struct i915_perf_stream *stream) 1296 { 1297 struct drm_i915_private *dev_priv = stream->dev_priv; 1298 1299 BUG_ON(stream != dev_priv->perf.oa.exclusive_stream); 1300 1301 /* 1302 * Unset exclusive_stream first, it will be checked while disabling 1303 * the metric set on gen8+. 1304 */ 1305 mutex_lock(&dev_priv->drm.struct_mutex); 1306 dev_priv->perf.oa.exclusive_stream = NULL; 1307 dev_priv->perf.oa.ops.disable_metric_set(dev_priv); 1308 mutex_unlock(&dev_priv->drm.struct_mutex); 1309 1310 free_oa_buffer(dev_priv); 1311 1312 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL); 1313 intel_runtime_pm_put(dev_priv); 1314 1315 if (stream->ctx) 1316 oa_put_render_ctx_id(stream); 1317 1318 put_oa_config(dev_priv, stream->oa_config); 1319 1320 if (dev_priv->perf.oa.spurious_report_rs.missed) { 1321 DRM_NOTE("%d spurious OA report notices suppressed due to ratelimiting\n", 1322 dev_priv->perf.oa.spurious_report_rs.missed); 1323 } 1324 } 1325 1326 static void gen7_init_oa_buffer(struct drm_i915_private *dev_priv) 1327 { 1328 u32 gtt_offset = i915_ggtt_offset(dev_priv->perf.oa.oa_buffer.vma); 1329 unsigned long flags; 1330 1331 spin_lock_irqsave(&dev_priv->perf.oa.oa_buffer.ptr_lock, flags); 1332 1333 /* Pre-DevBDW: OABUFFER must be set with counters off, 1334 * before OASTATUS1, but after OASTATUS2 1335 */ 1336 I915_WRITE(GEN7_OASTATUS2, 1337 gtt_offset | GEN7_OASTATUS2_MEM_SELECT_GGTT); /* head */ 1338 dev_priv->perf.oa.oa_buffer.head = gtt_offset; 1339 1340 I915_WRITE(GEN7_OABUFFER, gtt_offset); 1341 1342 I915_WRITE(GEN7_OASTATUS1, gtt_offset | OABUFFER_SIZE_16M); /* tail */ 1343 1344 /* Mark that we need updated tail pointers to read from... */ 1345 dev_priv->perf.oa.oa_buffer.tails[0].offset = INVALID_TAIL_PTR; 1346 dev_priv->perf.oa.oa_buffer.tails[1].offset = INVALID_TAIL_PTR; 1347 1348 spin_unlock_irqrestore(&dev_priv->perf.oa.oa_buffer.ptr_lock, flags); 1349 1350 /* On Haswell we have to track which OASTATUS1 flags we've 1351 * already seen since they can't be cleared while periodic 1352 * sampling is enabled. 1353 */ 1354 dev_priv->perf.oa.gen7_latched_oastatus1 = 0; 1355 1356 /* NB: although the OA buffer will initially be allocated 1357 * zeroed via shmfs (and so this memset is redundant when 1358 * first allocating), we may re-init the OA buffer, either 1359 * when re-enabling a stream or in error/reset paths. 1360 * 1361 * The reason we clear the buffer for each re-init is for the 1362 * sanity check in gen7_append_oa_reports() that looks at the 1363 * report-id field to make sure it's non-zero which relies on 1364 * the assumption that new reports are being written to zeroed 1365 * memory... 1366 */ 1367 memset(dev_priv->perf.oa.oa_buffer.vaddr, 0, OA_BUFFER_SIZE); 1368 1369 /* Maybe make ->pollin per-stream state if we support multiple 1370 * concurrent streams in the future. 1371 */ 1372 dev_priv->perf.oa.pollin = false; 1373 } 1374 1375 static void gen8_init_oa_buffer(struct drm_i915_private *dev_priv) 1376 { 1377 u32 gtt_offset = i915_ggtt_offset(dev_priv->perf.oa.oa_buffer.vma); 1378 unsigned long flags; 1379 1380 spin_lock_irqsave(&dev_priv->perf.oa.oa_buffer.ptr_lock, flags); 1381 1382 I915_WRITE(GEN8_OASTATUS, 0); 1383 I915_WRITE(GEN8_OAHEADPTR, gtt_offset); 1384 dev_priv->perf.oa.oa_buffer.head = gtt_offset; 1385 1386 I915_WRITE(GEN8_OABUFFER_UDW, 0); 1387 1388 /* 1389 * PRM says: 1390 * 1391 * "This MMIO must be set before the OATAILPTR 1392 * register and after the OAHEADPTR register. This is 1393 * to enable proper functionality of the overflow 1394 * bit." 1395 */ 1396 I915_WRITE(GEN8_OABUFFER, gtt_offset | 1397 OABUFFER_SIZE_16M | GEN8_OABUFFER_MEM_SELECT_GGTT); 1398 I915_WRITE(GEN8_OATAILPTR, gtt_offset & GEN8_OATAILPTR_MASK); 1399 1400 /* Mark that we need updated tail pointers to read from... */ 1401 dev_priv->perf.oa.oa_buffer.tails[0].offset = INVALID_TAIL_PTR; 1402 dev_priv->perf.oa.oa_buffer.tails[1].offset = INVALID_TAIL_PTR; 1403 1404 /* 1405 * Reset state used to recognise context switches, affecting which 1406 * reports we will forward to userspace while filtering for a single 1407 * context. 1408 */ 1409 dev_priv->perf.oa.oa_buffer.last_ctx_id = INVALID_CTX_ID; 1410 1411 spin_unlock_irqrestore(&dev_priv->perf.oa.oa_buffer.ptr_lock, flags); 1412 1413 /* 1414 * NB: although the OA buffer will initially be allocated 1415 * zeroed via shmfs (and so this memset is redundant when 1416 * first allocating), we may re-init the OA buffer, either 1417 * when re-enabling a stream or in error/reset paths. 1418 * 1419 * The reason we clear the buffer for each re-init is for the 1420 * sanity check in gen8_append_oa_reports() that looks at the 1421 * reason field to make sure it's non-zero which relies on 1422 * the assumption that new reports are being written to zeroed 1423 * memory... 1424 */ 1425 memset(dev_priv->perf.oa.oa_buffer.vaddr, 0, OA_BUFFER_SIZE); 1426 1427 /* 1428 * Maybe make ->pollin per-stream state if we support multiple 1429 * concurrent streams in the future. 1430 */ 1431 dev_priv->perf.oa.pollin = false; 1432 } 1433 1434 static int alloc_oa_buffer(struct drm_i915_private *dev_priv) 1435 { 1436 struct drm_i915_gem_object *bo; 1437 struct i915_vma *vma; 1438 int ret; 1439 1440 if (WARN_ON(dev_priv->perf.oa.oa_buffer.vma)) 1441 return -ENODEV; 1442 1443 ret = i915_mutex_lock_interruptible(&dev_priv->drm); 1444 if (ret) 1445 return ret; 1446 1447 BUILD_BUG_ON_NOT_POWER_OF_2(OA_BUFFER_SIZE); 1448 BUILD_BUG_ON(OA_BUFFER_SIZE < SZ_128K || OA_BUFFER_SIZE > SZ_16M); 1449 1450 bo = i915_gem_object_create(dev_priv, OA_BUFFER_SIZE); 1451 if (IS_ERR(bo)) { 1452 DRM_ERROR("Failed to allocate OA buffer\n"); 1453 ret = PTR_ERR(bo); 1454 goto unlock; 1455 } 1456 1457 ret = i915_gem_object_set_cache_level(bo, I915_CACHE_LLC); 1458 if (ret) 1459 goto err_unref; 1460 1461 /* PreHSW required 512K alignment, HSW requires 16M */ 1462 vma = i915_gem_object_ggtt_pin(bo, NULL, 0, SZ_16M, 0); 1463 if (IS_ERR(vma)) { 1464 ret = PTR_ERR(vma); 1465 goto err_unref; 1466 } 1467 dev_priv->perf.oa.oa_buffer.vma = vma; 1468 1469 dev_priv->perf.oa.oa_buffer.vaddr = 1470 i915_gem_object_pin_map(bo, I915_MAP_WB); 1471 if (IS_ERR(dev_priv->perf.oa.oa_buffer.vaddr)) { 1472 ret = PTR_ERR(dev_priv->perf.oa.oa_buffer.vaddr); 1473 goto err_unpin; 1474 } 1475 1476 dev_priv->perf.oa.ops.init_oa_buffer(dev_priv); 1477 1478 DRM_DEBUG_DRIVER("OA Buffer initialized, gtt offset = 0x%x, vaddr = %p\n", 1479 i915_ggtt_offset(dev_priv->perf.oa.oa_buffer.vma), 1480 dev_priv->perf.oa.oa_buffer.vaddr); 1481 1482 goto unlock; 1483 1484 err_unpin: 1485 __i915_vma_unpin(vma); 1486 1487 err_unref: 1488 i915_gem_object_put(bo); 1489 1490 dev_priv->perf.oa.oa_buffer.vaddr = NULL; 1491 dev_priv->perf.oa.oa_buffer.vma = NULL; 1492 1493 unlock: 1494 mutex_unlock(&dev_priv->drm.struct_mutex); 1495 return ret; 1496 } 1497 1498 static void config_oa_regs(struct drm_i915_private *dev_priv, 1499 const struct i915_oa_reg *regs, 1500 u32 n_regs) 1501 { 1502 u32 i; 1503 1504 for (i = 0; i < n_regs; i++) { 1505 const struct i915_oa_reg *reg = regs + i; 1506 1507 I915_WRITE(reg->addr, reg->value); 1508 } 1509 } 1510 1511 static int hsw_enable_metric_set(struct drm_i915_private *dev_priv, 1512 const struct i915_oa_config *oa_config) 1513 { 1514 /* PRM: 1515 * 1516 * OA unit is using “crclk” for its functionality. When trunk 1517 * level clock gating takes place, OA clock would be gated, 1518 * unable to count the events from non-render clock domain. 1519 * Render clock gating must be disabled when OA is enabled to 1520 * count the events from non-render domain. Unit level clock 1521 * gating for RCS should also be disabled. 1522 */ 1523 I915_WRITE(GEN7_MISCCPCTL, (I915_READ(GEN7_MISCCPCTL) & 1524 ~GEN7_DOP_CLOCK_GATE_ENABLE)); 1525 I915_WRITE(GEN6_UCGCTL1, (I915_READ(GEN6_UCGCTL1) | 1526 GEN6_CSUNIT_CLOCK_GATE_DISABLE)); 1527 1528 config_oa_regs(dev_priv, oa_config->mux_regs, oa_config->mux_regs_len); 1529 1530 /* It apparently takes a fairly long time for a new MUX 1531 * configuration to be be applied after these register writes. 1532 * This delay duration was derived empirically based on the 1533 * render_basic config but hopefully it covers the maximum 1534 * configuration latency. 1535 * 1536 * As a fallback, the checks in _append_oa_reports() to skip 1537 * invalid OA reports do also seem to work to discard reports 1538 * generated before this config has completed - albeit not 1539 * silently. 1540 * 1541 * Unfortunately this is essentially a magic number, since we 1542 * don't currently know of a reliable mechanism for predicting 1543 * how long the MUX config will take to apply and besides 1544 * seeing invalid reports we don't know of a reliable way to 1545 * explicitly check that the MUX config has landed. 1546 * 1547 * It's even possible we've miss characterized the underlying 1548 * problem - it just seems like the simplest explanation why 1549 * a delay at this location would mitigate any invalid reports. 1550 */ 1551 usleep_range(15000, 20000); 1552 1553 config_oa_regs(dev_priv, oa_config->b_counter_regs, 1554 oa_config->b_counter_regs_len); 1555 1556 return 0; 1557 } 1558 1559 static void hsw_disable_metric_set(struct drm_i915_private *dev_priv) 1560 { 1561 I915_WRITE(GEN6_UCGCTL1, (I915_READ(GEN6_UCGCTL1) & 1562 ~GEN6_CSUNIT_CLOCK_GATE_DISABLE)); 1563 I915_WRITE(GEN7_MISCCPCTL, (I915_READ(GEN7_MISCCPCTL) | 1564 GEN7_DOP_CLOCK_GATE_ENABLE)); 1565 1566 I915_WRITE(GDT_CHICKEN_BITS, (I915_READ(GDT_CHICKEN_BITS) & 1567 ~GT_NOA_ENABLE)); 1568 } 1569 1570 /* 1571 * NB: It must always remain pointer safe to run this even if the OA unit 1572 * has been disabled. 1573 * 1574 * It's fine to put out-of-date values into these per-context registers 1575 * in the case that the OA unit has been disabled. 1576 */ 1577 static void gen8_update_reg_state_unlocked(struct i915_gem_context *ctx, 1578 u32 *reg_state, 1579 const struct i915_oa_config *oa_config) 1580 { 1581 struct drm_i915_private *dev_priv = ctx->i915; 1582 u32 ctx_oactxctrl = dev_priv->perf.oa.ctx_oactxctrl_offset; 1583 u32 ctx_flexeu0 = dev_priv->perf.oa.ctx_flexeu0_offset; 1584 /* The MMIO offsets for Flex EU registers aren't contiguous */ 1585 u32 flex_mmio[] = { 1586 i915_mmio_reg_offset(EU_PERF_CNTL0), 1587 i915_mmio_reg_offset(EU_PERF_CNTL1), 1588 i915_mmio_reg_offset(EU_PERF_CNTL2), 1589 i915_mmio_reg_offset(EU_PERF_CNTL3), 1590 i915_mmio_reg_offset(EU_PERF_CNTL4), 1591 i915_mmio_reg_offset(EU_PERF_CNTL5), 1592 i915_mmio_reg_offset(EU_PERF_CNTL6), 1593 }; 1594 int i; 1595 1596 reg_state[ctx_oactxctrl] = i915_mmio_reg_offset(GEN8_OACTXCONTROL); 1597 reg_state[ctx_oactxctrl+1] = (dev_priv->perf.oa.period_exponent << 1598 GEN8_OA_TIMER_PERIOD_SHIFT) | 1599 (dev_priv->perf.oa.periodic ? 1600 GEN8_OA_TIMER_ENABLE : 0) | 1601 GEN8_OA_COUNTER_RESUME; 1602 1603 for (i = 0; i < ARRAY_SIZE(flex_mmio); i++) { 1604 u32 state_offset = ctx_flexeu0 + i * 2; 1605 u32 mmio = flex_mmio[i]; 1606 1607 /* 1608 * This arbitrary default will select the 'EU FPU0 Pipeline 1609 * Active' event. In the future it's anticipated that there 1610 * will be an explicit 'No Event' we can select, but not yet... 1611 */ 1612 u32 value = 0; 1613 1614 if (oa_config) { 1615 u32 j; 1616 1617 for (j = 0; j < oa_config->flex_regs_len; j++) { 1618 if (i915_mmio_reg_offset(oa_config->flex_regs[j].addr) == mmio) { 1619 value = oa_config->flex_regs[j].value; 1620 break; 1621 } 1622 } 1623 } 1624 1625 reg_state[state_offset] = mmio; 1626 reg_state[state_offset+1] = value; 1627 } 1628 } 1629 1630 /* 1631 * Same as gen8_update_reg_state_unlocked only through the batchbuffer. This 1632 * is only used by the kernel context. 1633 */ 1634 static int gen8_emit_oa_config(struct i915_request *rq, 1635 const struct i915_oa_config *oa_config) 1636 { 1637 struct drm_i915_private *dev_priv = rq->i915; 1638 /* The MMIO offsets for Flex EU registers aren't contiguous */ 1639 u32 flex_mmio[] = { 1640 i915_mmio_reg_offset(EU_PERF_CNTL0), 1641 i915_mmio_reg_offset(EU_PERF_CNTL1), 1642 i915_mmio_reg_offset(EU_PERF_CNTL2), 1643 i915_mmio_reg_offset(EU_PERF_CNTL3), 1644 i915_mmio_reg_offset(EU_PERF_CNTL4), 1645 i915_mmio_reg_offset(EU_PERF_CNTL5), 1646 i915_mmio_reg_offset(EU_PERF_CNTL6), 1647 }; 1648 u32 *cs; 1649 int i; 1650 1651 cs = intel_ring_begin(rq, ARRAY_SIZE(flex_mmio) * 2 + 4); 1652 if (IS_ERR(cs)) 1653 return PTR_ERR(cs); 1654 1655 *cs++ = MI_LOAD_REGISTER_IMM(ARRAY_SIZE(flex_mmio) + 1); 1656 1657 *cs++ = i915_mmio_reg_offset(GEN8_OACTXCONTROL); 1658 *cs++ = (dev_priv->perf.oa.period_exponent << GEN8_OA_TIMER_PERIOD_SHIFT) | 1659 (dev_priv->perf.oa.periodic ? GEN8_OA_TIMER_ENABLE : 0) | 1660 GEN8_OA_COUNTER_RESUME; 1661 1662 for (i = 0; i < ARRAY_SIZE(flex_mmio); i++) { 1663 u32 mmio = flex_mmio[i]; 1664 1665 /* 1666 * This arbitrary default will select the 'EU FPU0 Pipeline 1667 * Active' event. In the future it's anticipated that there 1668 * will be an explicit 'No Event' we can select, but not 1669 * yet... 1670 */ 1671 u32 value = 0; 1672 1673 if (oa_config) { 1674 u32 j; 1675 1676 for (j = 0; j < oa_config->flex_regs_len; j++) { 1677 if (i915_mmio_reg_offset(oa_config->flex_regs[j].addr) == mmio) { 1678 value = oa_config->flex_regs[j].value; 1679 break; 1680 } 1681 } 1682 } 1683 1684 *cs++ = mmio; 1685 *cs++ = value; 1686 } 1687 1688 *cs++ = MI_NOOP; 1689 intel_ring_advance(rq, cs); 1690 1691 return 0; 1692 } 1693 1694 static int gen8_switch_to_updated_kernel_context(struct drm_i915_private *dev_priv, 1695 const struct i915_oa_config *oa_config) 1696 { 1697 struct intel_engine_cs *engine = dev_priv->engine[RCS]; 1698 struct i915_timeline *timeline; 1699 struct i915_request *rq; 1700 int ret; 1701 1702 lockdep_assert_held(&dev_priv->drm.struct_mutex); 1703 1704 i915_retire_requests(dev_priv); 1705 1706 rq = i915_request_alloc(engine, dev_priv->kernel_context); 1707 if (IS_ERR(rq)) 1708 return PTR_ERR(rq); 1709 1710 ret = gen8_emit_oa_config(rq, oa_config); 1711 if (ret) { 1712 i915_request_add(rq); 1713 return ret; 1714 } 1715 1716 /* Queue this switch after all other activity */ 1717 list_for_each_entry(timeline, &dev_priv->gt.timelines, link) { 1718 struct i915_request *prev; 1719 1720 prev = i915_gem_active_raw(&timeline->last_request, 1721 &dev_priv->drm.struct_mutex); 1722 if (prev) 1723 i915_request_await_dma_fence(rq, &prev->fence); 1724 } 1725 1726 i915_request_add(rq); 1727 1728 return 0; 1729 } 1730 1731 /* 1732 * Manages updating the per-context aspects of the OA stream 1733 * configuration across all contexts. 1734 * 1735 * The awkward consideration here is that OACTXCONTROL controls the 1736 * exponent for periodic sampling which is primarily used for system 1737 * wide profiling where we'd like a consistent sampling period even in 1738 * the face of context switches. 1739 * 1740 * Our approach of updating the register state context (as opposed to 1741 * say using a workaround batch buffer) ensures that the hardware 1742 * won't automatically reload an out-of-date timer exponent even 1743 * transiently before a WA BB could be parsed. 1744 * 1745 * This function needs to: 1746 * - Ensure the currently running context's per-context OA state is 1747 * updated 1748 * - Ensure that all existing contexts will have the correct per-context 1749 * OA state if they are scheduled for use. 1750 * - Ensure any new contexts will be initialized with the correct 1751 * per-context OA state. 1752 * 1753 * Note: it's only the RCS/Render context that has any OA state. 1754 */ 1755 static int gen8_configure_all_contexts(struct drm_i915_private *dev_priv, 1756 const struct i915_oa_config *oa_config) 1757 { 1758 struct intel_engine_cs *engine = dev_priv->engine[RCS]; 1759 struct i915_gem_context *ctx; 1760 int ret; 1761 unsigned int wait_flags = I915_WAIT_LOCKED; 1762 1763 lockdep_assert_held(&dev_priv->drm.struct_mutex); 1764 1765 /* Switch away from any user context. */ 1766 ret = gen8_switch_to_updated_kernel_context(dev_priv, oa_config); 1767 if (ret) 1768 goto out; 1769 1770 /* 1771 * The OA register config is setup through the context image. This image 1772 * might be written to by the GPU on context switch (in particular on 1773 * lite-restore). This means we can't safely update a context's image, 1774 * if this context is scheduled/submitted to run on the GPU. 1775 * 1776 * We could emit the OA register config through the batch buffer but 1777 * this might leave small interval of time where the OA unit is 1778 * configured at an invalid sampling period. 1779 * 1780 * So far the best way to work around this issue seems to be draining 1781 * the GPU from any submitted work. 1782 */ 1783 ret = i915_gem_wait_for_idle(dev_priv, wait_flags); 1784 if (ret) 1785 goto out; 1786 1787 /* Update all contexts now that we've stalled the submission. */ 1788 list_for_each_entry(ctx, &dev_priv->contexts.list, link) { 1789 struct intel_context *ce = to_intel_context(ctx, engine); 1790 u32 *regs; 1791 1792 /* OA settings will be set upon first use */ 1793 if (!ce->state) 1794 continue; 1795 1796 regs = i915_gem_object_pin_map(ce->state->obj, I915_MAP_WB); 1797 if (IS_ERR(regs)) { 1798 ret = PTR_ERR(regs); 1799 goto out; 1800 } 1801 1802 ce->state->obj->mm.dirty = true; 1803 regs += LRC_STATE_PN * PAGE_SIZE / sizeof(*regs); 1804 1805 gen8_update_reg_state_unlocked(ctx, regs, oa_config); 1806 1807 i915_gem_object_unpin_map(ce->state->obj); 1808 } 1809 1810 out: 1811 return ret; 1812 } 1813 1814 static int gen8_enable_metric_set(struct drm_i915_private *dev_priv, 1815 const struct i915_oa_config *oa_config) 1816 { 1817 int ret; 1818 1819 /* 1820 * We disable slice/unslice clock ratio change reports on SKL since 1821 * they are too noisy. The HW generates a lot of redundant reports 1822 * where the ratio hasn't really changed causing a lot of redundant 1823 * work to processes and increasing the chances we'll hit buffer 1824 * overruns. 1825 * 1826 * Although we don't currently use the 'disable overrun' OABUFFER 1827 * feature it's worth noting that clock ratio reports have to be 1828 * disabled before considering to use that feature since the HW doesn't 1829 * correctly block these reports. 1830 * 1831 * Currently none of the high-level metrics we have depend on knowing 1832 * this ratio to normalize. 1833 * 1834 * Note: This register is not power context saved and restored, but 1835 * that's OK considering that we disable RC6 while the OA unit is 1836 * enabled. 1837 * 1838 * The _INCLUDE_CLK_RATIO bit allows the slice/unslice frequency to 1839 * be read back from automatically triggered reports, as part of the 1840 * RPT_ID field. 1841 */ 1842 if (IS_GEN(dev_priv, 9, 11)) { 1843 I915_WRITE(GEN8_OA_DEBUG, 1844 _MASKED_BIT_ENABLE(GEN9_OA_DEBUG_DISABLE_CLK_RATIO_REPORTS | 1845 GEN9_OA_DEBUG_INCLUDE_CLK_RATIO)); 1846 } 1847 1848 /* 1849 * Update all contexts prior writing the mux configurations as we need 1850 * to make sure all slices/subslices are ON before writing to NOA 1851 * registers. 1852 */ 1853 ret = gen8_configure_all_contexts(dev_priv, oa_config); 1854 if (ret) 1855 return ret; 1856 1857 config_oa_regs(dev_priv, oa_config->mux_regs, oa_config->mux_regs_len); 1858 1859 config_oa_regs(dev_priv, oa_config->b_counter_regs, 1860 oa_config->b_counter_regs_len); 1861 1862 return 0; 1863 } 1864 1865 static void gen8_disable_metric_set(struct drm_i915_private *dev_priv) 1866 { 1867 /* Reset all contexts' slices/subslices configurations. */ 1868 gen8_configure_all_contexts(dev_priv, NULL); 1869 1870 I915_WRITE(GDT_CHICKEN_BITS, (I915_READ(GDT_CHICKEN_BITS) & 1871 ~GT_NOA_ENABLE)); 1872 } 1873 1874 static void gen10_disable_metric_set(struct drm_i915_private *dev_priv) 1875 { 1876 /* Reset all contexts' slices/subslices configurations. */ 1877 gen8_configure_all_contexts(dev_priv, NULL); 1878 1879 /* Make sure we disable noa to save power. */ 1880 I915_WRITE(RPM_CONFIG1, 1881 I915_READ(RPM_CONFIG1) & ~GEN10_GT_NOA_ENABLE); 1882 } 1883 1884 static void gen7_oa_enable(struct drm_i915_private *dev_priv) 1885 { 1886 struct i915_gem_context *ctx = 1887 dev_priv->perf.oa.exclusive_stream->ctx; 1888 u32 ctx_id = dev_priv->perf.oa.specific_ctx_id; 1889 bool periodic = dev_priv->perf.oa.periodic; 1890 u32 period_exponent = dev_priv->perf.oa.period_exponent; 1891 u32 report_format = dev_priv->perf.oa.oa_buffer.format; 1892 1893 /* 1894 * Reset buf pointers so we don't forward reports from before now. 1895 * 1896 * Think carefully if considering trying to avoid this, since it 1897 * also ensures status flags and the buffer itself are cleared 1898 * in error paths, and we have checks for invalid reports based 1899 * on the assumption that certain fields are written to zeroed 1900 * memory which this helps maintains. 1901 */ 1902 gen7_init_oa_buffer(dev_priv); 1903 1904 I915_WRITE(GEN7_OACONTROL, 1905 (ctx_id & GEN7_OACONTROL_CTX_MASK) | 1906 (period_exponent << 1907 GEN7_OACONTROL_TIMER_PERIOD_SHIFT) | 1908 (periodic ? GEN7_OACONTROL_TIMER_ENABLE : 0) | 1909 (report_format << GEN7_OACONTROL_FORMAT_SHIFT) | 1910 (ctx ? GEN7_OACONTROL_PER_CTX_ENABLE : 0) | 1911 GEN7_OACONTROL_ENABLE); 1912 } 1913 1914 static void gen8_oa_enable(struct drm_i915_private *dev_priv) 1915 { 1916 u32 report_format = dev_priv->perf.oa.oa_buffer.format; 1917 1918 /* 1919 * Reset buf pointers so we don't forward reports from before now. 1920 * 1921 * Think carefully if considering trying to avoid this, since it 1922 * also ensures status flags and the buffer itself are cleared 1923 * in error paths, and we have checks for invalid reports based 1924 * on the assumption that certain fields are written to zeroed 1925 * memory which this helps maintains. 1926 */ 1927 gen8_init_oa_buffer(dev_priv); 1928 1929 /* 1930 * Note: we don't rely on the hardware to perform single context 1931 * filtering and instead filter on the cpu based on the context-id 1932 * field of reports 1933 */ 1934 I915_WRITE(GEN8_OACONTROL, (report_format << 1935 GEN8_OA_REPORT_FORMAT_SHIFT) | 1936 GEN8_OA_COUNTER_ENABLE); 1937 } 1938 1939 /** 1940 * i915_oa_stream_enable - handle `I915_PERF_IOCTL_ENABLE` for OA stream 1941 * @stream: An i915 perf stream opened for OA metrics 1942 * 1943 * [Re]enables hardware periodic sampling according to the period configured 1944 * when opening the stream. This also starts a hrtimer that will periodically 1945 * check for data in the circular OA buffer for notifying userspace (e.g. 1946 * during a read() or poll()). 1947 */ 1948 static void i915_oa_stream_enable(struct i915_perf_stream *stream) 1949 { 1950 struct drm_i915_private *dev_priv = stream->dev_priv; 1951 1952 dev_priv->perf.oa.ops.oa_enable(dev_priv); 1953 1954 if (dev_priv->perf.oa.periodic) 1955 hrtimer_start(&dev_priv->perf.oa.poll_check_timer, 1956 ns_to_ktime(POLL_PERIOD), 1957 HRTIMER_MODE_REL_PINNED); 1958 } 1959 1960 static void gen7_oa_disable(struct drm_i915_private *dev_priv) 1961 { 1962 I915_WRITE(GEN7_OACONTROL, 0); 1963 if (intel_wait_for_register(dev_priv, 1964 GEN7_OACONTROL, GEN7_OACONTROL_ENABLE, 0, 1965 50)) 1966 DRM_ERROR("wait for OA to be disabled timed out\n"); 1967 } 1968 1969 static void gen8_oa_disable(struct drm_i915_private *dev_priv) 1970 { 1971 I915_WRITE(GEN8_OACONTROL, 0); 1972 if (intel_wait_for_register(dev_priv, 1973 GEN8_OACONTROL, GEN8_OA_COUNTER_ENABLE, 0, 1974 50)) 1975 DRM_ERROR("wait for OA to be disabled timed out\n"); 1976 } 1977 1978 /** 1979 * i915_oa_stream_disable - handle `I915_PERF_IOCTL_DISABLE` for OA stream 1980 * @stream: An i915 perf stream opened for OA metrics 1981 * 1982 * Stops the OA unit from periodically writing counter reports into the 1983 * circular OA buffer. This also stops the hrtimer that periodically checks for 1984 * data in the circular OA buffer, for notifying userspace. 1985 */ 1986 static void i915_oa_stream_disable(struct i915_perf_stream *stream) 1987 { 1988 struct drm_i915_private *dev_priv = stream->dev_priv; 1989 1990 dev_priv->perf.oa.ops.oa_disable(dev_priv); 1991 1992 if (dev_priv->perf.oa.periodic) 1993 hrtimer_cancel(&dev_priv->perf.oa.poll_check_timer); 1994 } 1995 1996 static const struct i915_perf_stream_ops i915_oa_stream_ops = { 1997 .destroy = i915_oa_stream_destroy, 1998 .enable = i915_oa_stream_enable, 1999 .disable = i915_oa_stream_disable, 2000 .wait_unlocked = i915_oa_wait_unlocked, 2001 .poll_wait = i915_oa_poll_wait, 2002 .read = i915_oa_read, 2003 }; 2004 2005 /** 2006 * i915_oa_stream_init - validate combined props for OA stream and init 2007 * @stream: An i915 perf stream 2008 * @param: The open parameters passed to `DRM_I915_PERF_OPEN` 2009 * @props: The property state that configures stream (individually validated) 2010 * 2011 * While read_properties_unlocked() validates properties in isolation it 2012 * doesn't ensure that the combination necessarily makes sense. 2013 * 2014 * At this point it has been determined that userspace wants a stream of 2015 * OA metrics, but still we need to further validate the combined 2016 * properties are OK. 2017 * 2018 * If the configuration makes sense then we can allocate memory for 2019 * a circular OA buffer and apply the requested metric set configuration. 2020 * 2021 * Returns: zero on success or a negative error code. 2022 */ 2023 static int i915_oa_stream_init(struct i915_perf_stream *stream, 2024 struct drm_i915_perf_open_param *param, 2025 struct perf_open_properties *props) 2026 { 2027 struct drm_i915_private *dev_priv = stream->dev_priv; 2028 int format_size; 2029 int ret; 2030 2031 /* If the sysfs metrics/ directory wasn't registered for some 2032 * reason then don't let userspace try their luck with config 2033 * IDs 2034 */ 2035 if (!dev_priv->perf.metrics_kobj) { 2036 DRM_DEBUG("OA metrics weren't advertised via sysfs\n"); 2037 return -EINVAL; 2038 } 2039 2040 if (!(props->sample_flags & SAMPLE_OA_REPORT)) { 2041 DRM_DEBUG("Only OA report sampling supported\n"); 2042 return -EINVAL; 2043 } 2044 2045 if (!dev_priv->perf.oa.ops.init_oa_buffer) { 2046 DRM_DEBUG("OA unit not supported\n"); 2047 return -ENODEV; 2048 } 2049 2050 /* To avoid the complexity of having to accurately filter 2051 * counter reports and marshal to the appropriate client 2052 * we currently only allow exclusive access 2053 */ 2054 if (dev_priv->perf.oa.exclusive_stream) { 2055 DRM_DEBUG("OA unit already in use\n"); 2056 return -EBUSY; 2057 } 2058 2059 if (!props->oa_format) { 2060 DRM_DEBUG("OA report format not specified\n"); 2061 return -EINVAL; 2062 } 2063 2064 /* We set up some ratelimit state to potentially throttle any _NOTES 2065 * about spurious, invalid OA reports which we don't forward to 2066 * userspace. 2067 * 2068 * The initialization is associated with opening the stream (not driver 2069 * init) considering we print a _NOTE about any throttling when closing 2070 * the stream instead of waiting until driver _fini which no one would 2071 * ever see. 2072 * 2073 * Using the same limiting factors as printk_ratelimit() 2074 */ 2075 ratelimit_state_init(&dev_priv->perf.oa.spurious_report_rs, 2076 5 * HZ, 10); 2077 /* Since we use a DRM_NOTE for spurious reports it would be 2078 * inconsistent to let __ratelimit() automatically print a warning for 2079 * throttling. 2080 */ 2081 ratelimit_set_flags(&dev_priv->perf.oa.spurious_report_rs, 2082 RATELIMIT_MSG_ON_RELEASE); 2083 2084 stream->sample_size = sizeof(struct drm_i915_perf_record_header); 2085 2086 format_size = dev_priv->perf.oa.oa_formats[props->oa_format].size; 2087 2088 stream->sample_flags |= SAMPLE_OA_REPORT; 2089 stream->sample_size += format_size; 2090 2091 dev_priv->perf.oa.oa_buffer.format_size = format_size; 2092 if (WARN_ON(dev_priv->perf.oa.oa_buffer.format_size == 0)) 2093 return -EINVAL; 2094 2095 dev_priv->perf.oa.oa_buffer.format = 2096 dev_priv->perf.oa.oa_formats[props->oa_format].format; 2097 2098 dev_priv->perf.oa.periodic = props->oa_periodic; 2099 if (dev_priv->perf.oa.periodic) 2100 dev_priv->perf.oa.period_exponent = props->oa_period_exponent; 2101 2102 if (stream->ctx) { 2103 ret = oa_get_render_ctx_id(stream); 2104 if (ret) { 2105 DRM_DEBUG("Invalid context id to filter with\n"); 2106 return ret; 2107 } 2108 } 2109 2110 ret = get_oa_config(dev_priv, props->metrics_set, &stream->oa_config); 2111 if (ret) { 2112 DRM_DEBUG("Invalid OA config id=%i\n", props->metrics_set); 2113 goto err_config; 2114 } 2115 2116 /* PRM - observability performance counters: 2117 * 2118 * OACONTROL, performance counter enable, note: 2119 * 2120 * "When this bit is set, in order to have coherent counts, 2121 * RC6 power state and trunk clock gating must be disabled. 2122 * This can be achieved by programming MMIO registers as 2123 * 0xA094=0 and 0xA090[31]=1" 2124 * 2125 * In our case we are expecting that taking pm + FORCEWAKE 2126 * references will effectively disable RC6. 2127 */ 2128 intel_runtime_pm_get(dev_priv); 2129 intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL); 2130 2131 ret = alloc_oa_buffer(dev_priv); 2132 if (ret) 2133 goto err_oa_buf_alloc; 2134 2135 ret = i915_mutex_lock_interruptible(&dev_priv->drm); 2136 if (ret) 2137 goto err_lock; 2138 2139 ret = dev_priv->perf.oa.ops.enable_metric_set(dev_priv, 2140 stream->oa_config); 2141 if (ret) { 2142 DRM_DEBUG("Unable to enable metric set\n"); 2143 goto err_enable; 2144 } 2145 2146 stream->ops = &i915_oa_stream_ops; 2147 2148 dev_priv->perf.oa.exclusive_stream = stream; 2149 2150 mutex_unlock(&dev_priv->drm.struct_mutex); 2151 2152 return 0; 2153 2154 err_enable: 2155 dev_priv->perf.oa.ops.disable_metric_set(dev_priv); 2156 mutex_unlock(&dev_priv->drm.struct_mutex); 2157 2158 err_lock: 2159 free_oa_buffer(dev_priv); 2160 2161 err_oa_buf_alloc: 2162 put_oa_config(dev_priv, stream->oa_config); 2163 2164 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL); 2165 intel_runtime_pm_put(dev_priv); 2166 2167 err_config: 2168 if (stream->ctx) 2169 oa_put_render_ctx_id(stream); 2170 2171 return ret; 2172 } 2173 2174 void i915_oa_init_reg_state(struct intel_engine_cs *engine, 2175 struct i915_gem_context *ctx, 2176 u32 *reg_state) 2177 { 2178 struct i915_perf_stream *stream; 2179 2180 if (engine->id != RCS) 2181 return; 2182 2183 stream = engine->i915->perf.oa.exclusive_stream; 2184 if (stream) 2185 gen8_update_reg_state_unlocked(ctx, reg_state, stream->oa_config); 2186 } 2187 2188 /** 2189 * i915_perf_read_locked - &i915_perf_stream_ops->read with error normalisation 2190 * @stream: An i915 perf stream 2191 * @file: An i915 perf stream file 2192 * @buf: destination buffer given by userspace 2193 * @count: the number of bytes userspace wants to read 2194 * @ppos: (inout) file seek position (unused) 2195 * 2196 * Besides wrapping &i915_perf_stream_ops->read this provides a common place to 2197 * ensure that if we've successfully copied any data then reporting that takes 2198 * precedence over any internal error status, so the data isn't lost. 2199 * 2200 * For example ret will be -ENOSPC whenever there is more buffered data than 2201 * can be copied to userspace, but that's only interesting if we weren't able 2202 * to copy some data because it implies the userspace buffer is too small to 2203 * receive a single record (and we never split records). 2204 * 2205 * Another case with ret == -EFAULT is more of a grey area since it would seem 2206 * like bad form for userspace to ask us to overrun its buffer, but the user 2207 * knows best: 2208 * 2209 * http://yarchive.net/comp/linux/partial_reads_writes.html 2210 * 2211 * Returns: The number of bytes copied or a negative error code on failure. 2212 */ 2213 static ssize_t i915_perf_read_locked(struct i915_perf_stream *stream, 2214 struct file *file, 2215 char __user *buf, 2216 size_t count, 2217 loff_t *ppos) 2218 { 2219 /* Note we keep the offset (aka bytes read) separate from any 2220 * error status so that the final check for whether we return 2221 * the bytes read with a higher precedence than any error (see 2222 * comment below) doesn't need to be handled/duplicated in 2223 * stream->ops->read() implementations. 2224 */ 2225 size_t offset = 0; 2226 int ret = stream->ops->read(stream, buf, count, &offset); 2227 2228 return offset ?: (ret ?: -EAGAIN); 2229 } 2230 2231 /** 2232 * i915_perf_read - handles read() FOP for i915 perf stream FDs 2233 * @file: An i915 perf stream file 2234 * @buf: destination buffer given by userspace 2235 * @count: the number of bytes userspace wants to read 2236 * @ppos: (inout) file seek position (unused) 2237 * 2238 * The entry point for handling a read() on a stream file descriptor from 2239 * userspace. Most of the work is left to the i915_perf_read_locked() and 2240 * &i915_perf_stream_ops->read but to save having stream implementations (of 2241 * which we might have multiple later) we handle blocking read here. 2242 * 2243 * We can also consistently treat trying to read from a disabled stream 2244 * as an IO error so implementations can assume the stream is enabled 2245 * while reading. 2246 * 2247 * Returns: The number of bytes copied or a negative error code on failure. 2248 */ 2249 static ssize_t i915_perf_read(struct file *file, 2250 char __user *buf, 2251 size_t count, 2252 loff_t *ppos) 2253 { 2254 struct i915_perf_stream *stream = file->private_data; 2255 struct drm_i915_private *dev_priv = stream->dev_priv; 2256 ssize_t ret; 2257 2258 /* To ensure it's handled consistently we simply treat all reads of a 2259 * disabled stream as an error. In particular it might otherwise lead 2260 * to a deadlock for blocking file descriptors... 2261 */ 2262 if (!stream->enabled) 2263 return -EIO; 2264 2265 if (!(file->f_flags & O_NONBLOCK)) { 2266 /* There's the small chance of false positives from 2267 * stream->ops->wait_unlocked. 2268 * 2269 * E.g. with single context filtering since we only wait until 2270 * oabuffer has >= 1 report we don't immediately know whether 2271 * any reports really belong to the current context 2272 */ 2273 do { 2274 ret = stream->ops->wait_unlocked(stream); 2275 if (ret) 2276 return ret; 2277 2278 mutex_lock(&dev_priv->perf.lock); 2279 ret = i915_perf_read_locked(stream, file, 2280 buf, count, ppos); 2281 mutex_unlock(&dev_priv->perf.lock); 2282 } while (ret == -EAGAIN); 2283 } else { 2284 mutex_lock(&dev_priv->perf.lock); 2285 ret = i915_perf_read_locked(stream, file, buf, count, ppos); 2286 mutex_unlock(&dev_priv->perf.lock); 2287 } 2288 2289 /* We allow the poll checking to sometimes report false positive EPOLLIN 2290 * events where we might actually report EAGAIN on read() if there's 2291 * not really any data available. In this situation though we don't 2292 * want to enter a busy loop between poll() reporting a EPOLLIN event 2293 * and read() returning -EAGAIN. Clearing the oa.pollin state here 2294 * effectively ensures we back off until the next hrtimer callback 2295 * before reporting another EPOLLIN event. 2296 */ 2297 if (ret >= 0 || ret == -EAGAIN) { 2298 /* Maybe make ->pollin per-stream state if we support multiple 2299 * concurrent streams in the future. 2300 */ 2301 dev_priv->perf.oa.pollin = false; 2302 } 2303 2304 return ret; 2305 } 2306 2307 static enum hrtimer_restart oa_poll_check_timer_cb(struct hrtimer *hrtimer) 2308 { 2309 struct drm_i915_private *dev_priv = 2310 container_of(hrtimer, typeof(*dev_priv), 2311 perf.oa.poll_check_timer); 2312 2313 if (oa_buffer_check_unlocked(dev_priv)) { 2314 dev_priv->perf.oa.pollin = true; 2315 wake_up(&dev_priv->perf.oa.poll_wq); 2316 } 2317 2318 hrtimer_forward_now(hrtimer, ns_to_ktime(POLL_PERIOD)); 2319 2320 return HRTIMER_RESTART; 2321 } 2322 2323 /** 2324 * i915_perf_poll_locked - poll_wait() with a suitable wait queue for stream 2325 * @dev_priv: i915 device instance 2326 * @stream: An i915 perf stream 2327 * @file: An i915 perf stream file 2328 * @wait: poll() state table 2329 * 2330 * For handling userspace polling on an i915 perf stream, this calls through to 2331 * &i915_perf_stream_ops->poll_wait to call poll_wait() with a wait queue that 2332 * will be woken for new stream data. 2333 * 2334 * Note: The &drm_i915_private->perf.lock mutex has been taken to serialize 2335 * with any non-file-operation driver hooks. 2336 * 2337 * Returns: any poll events that are ready without sleeping 2338 */ 2339 static __poll_t i915_perf_poll_locked(struct drm_i915_private *dev_priv, 2340 struct i915_perf_stream *stream, 2341 struct file *file, 2342 poll_table *wait) 2343 { 2344 __poll_t events = 0; 2345 2346 stream->ops->poll_wait(stream, file, wait); 2347 2348 /* Note: we don't explicitly check whether there's something to read 2349 * here since this path may be very hot depending on what else 2350 * userspace is polling, or on the timeout in use. We rely solely on 2351 * the hrtimer/oa_poll_check_timer_cb to notify us when there are 2352 * samples to read. 2353 */ 2354 if (dev_priv->perf.oa.pollin) 2355 events |= EPOLLIN; 2356 2357 return events; 2358 } 2359 2360 /** 2361 * i915_perf_poll - call poll_wait() with a suitable wait queue for stream 2362 * @file: An i915 perf stream file 2363 * @wait: poll() state table 2364 * 2365 * For handling userspace polling on an i915 perf stream, this ensures 2366 * poll_wait() gets called with a wait queue that will be woken for new stream 2367 * data. 2368 * 2369 * Note: Implementation deferred to i915_perf_poll_locked() 2370 * 2371 * Returns: any poll events that are ready without sleeping 2372 */ 2373 static __poll_t i915_perf_poll(struct file *file, poll_table *wait) 2374 { 2375 struct i915_perf_stream *stream = file->private_data; 2376 struct drm_i915_private *dev_priv = stream->dev_priv; 2377 __poll_t ret; 2378 2379 mutex_lock(&dev_priv->perf.lock); 2380 ret = i915_perf_poll_locked(dev_priv, stream, file, wait); 2381 mutex_unlock(&dev_priv->perf.lock); 2382 2383 return ret; 2384 } 2385 2386 /** 2387 * i915_perf_enable_locked - handle `I915_PERF_IOCTL_ENABLE` ioctl 2388 * @stream: A disabled i915 perf stream 2389 * 2390 * [Re]enables the associated capture of data for this stream. 2391 * 2392 * If a stream was previously enabled then there's currently no intention 2393 * to provide userspace any guarantee about the preservation of previously 2394 * buffered data. 2395 */ 2396 static void i915_perf_enable_locked(struct i915_perf_stream *stream) 2397 { 2398 if (stream->enabled) 2399 return; 2400 2401 /* Allow stream->ops->enable() to refer to this */ 2402 stream->enabled = true; 2403 2404 if (stream->ops->enable) 2405 stream->ops->enable(stream); 2406 } 2407 2408 /** 2409 * i915_perf_disable_locked - handle `I915_PERF_IOCTL_DISABLE` ioctl 2410 * @stream: An enabled i915 perf stream 2411 * 2412 * Disables the associated capture of data for this stream. 2413 * 2414 * The intention is that disabling an re-enabling a stream will ideally be 2415 * cheaper than destroying and re-opening a stream with the same configuration, 2416 * though there are no formal guarantees about what state or buffered data 2417 * must be retained between disabling and re-enabling a stream. 2418 * 2419 * Note: while a stream is disabled it's considered an error for userspace 2420 * to attempt to read from the stream (-EIO). 2421 */ 2422 static void i915_perf_disable_locked(struct i915_perf_stream *stream) 2423 { 2424 if (!stream->enabled) 2425 return; 2426 2427 /* Allow stream->ops->disable() to refer to this */ 2428 stream->enabled = false; 2429 2430 if (stream->ops->disable) 2431 stream->ops->disable(stream); 2432 } 2433 2434 /** 2435 * i915_perf_ioctl - support ioctl() usage with i915 perf stream FDs 2436 * @stream: An i915 perf stream 2437 * @cmd: the ioctl request 2438 * @arg: the ioctl data 2439 * 2440 * Note: The &drm_i915_private->perf.lock mutex has been taken to serialize 2441 * with any non-file-operation driver hooks. 2442 * 2443 * Returns: zero on success or a negative error code. Returns -EINVAL for 2444 * an unknown ioctl request. 2445 */ 2446 static long i915_perf_ioctl_locked(struct i915_perf_stream *stream, 2447 unsigned int cmd, 2448 unsigned long arg) 2449 { 2450 switch (cmd) { 2451 case I915_PERF_IOCTL_ENABLE: 2452 i915_perf_enable_locked(stream); 2453 return 0; 2454 case I915_PERF_IOCTL_DISABLE: 2455 i915_perf_disable_locked(stream); 2456 return 0; 2457 } 2458 2459 return -EINVAL; 2460 } 2461 2462 /** 2463 * i915_perf_ioctl - support ioctl() usage with i915 perf stream FDs 2464 * @file: An i915 perf stream file 2465 * @cmd: the ioctl request 2466 * @arg: the ioctl data 2467 * 2468 * Implementation deferred to i915_perf_ioctl_locked(). 2469 * 2470 * Returns: zero on success or a negative error code. Returns -EINVAL for 2471 * an unknown ioctl request. 2472 */ 2473 static long i915_perf_ioctl(struct file *file, 2474 unsigned int cmd, 2475 unsigned long arg) 2476 { 2477 struct i915_perf_stream *stream = file->private_data; 2478 struct drm_i915_private *dev_priv = stream->dev_priv; 2479 long ret; 2480 2481 mutex_lock(&dev_priv->perf.lock); 2482 ret = i915_perf_ioctl_locked(stream, cmd, arg); 2483 mutex_unlock(&dev_priv->perf.lock); 2484 2485 return ret; 2486 } 2487 2488 /** 2489 * i915_perf_destroy_locked - destroy an i915 perf stream 2490 * @stream: An i915 perf stream 2491 * 2492 * Frees all resources associated with the given i915 perf @stream, disabling 2493 * any associated data capture in the process. 2494 * 2495 * Note: The &drm_i915_private->perf.lock mutex has been taken to serialize 2496 * with any non-file-operation driver hooks. 2497 */ 2498 static void i915_perf_destroy_locked(struct i915_perf_stream *stream) 2499 { 2500 if (stream->enabled) 2501 i915_perf_disable_locked(stream); 2502 2503 if (stream->ops->destroy) 2504 stream->ops->destroy(stream); 2505 2506 list_del(&stream->link); 2507 2508 if (stream->ctx) 2509 i915_gem_context_put(stream->ctx); 2510 2511 kfree(stream); 2512 } 2513 2514 /** 2515 * i915_perf_release - handles userspace close() of a stream file 2516 * @inode: anonymous inode associated with file 2517 * @file: An i915 perf stream file 2518 * 2519 * Cleans up any resources associated with an open i915 perf stream file. 2520 * 2521 * NB: close() can't really fail from the userspace point of view. 2522 * 2523 * Returns: zero on success or a negative error code. 2524 */ 2525 static int i915_perf_release(struct inode *inode, struct file *file) 2526 { 2527 struct i915_perf_stream *stream = file->private_data; 2528 struct drm_i915_private *dev_priv = stream->dev_priv; 2529 2530 mutex_lock(&dev_priv->perf.lock); 2531 i915_perf_destroy_locked(stream); 2532 mutex_unlock(&dev_priv->perf.lock); 2533 2534 return 0; 2535 } 2536 2537 2538 static const struct file_operations fops = { 2539 .owner = THIS_MODULE, 2540 .llseek = no_llseek, 2541 .release = i915_perf_release, 2542 .poll = i915_perf_poll, 2543 .read = i915_perf_read, 2544 .unlocked_ioctl = i915_perf_ioctl, 2545 /* Our ioctl have no arguments, so it's safe to use the same function 2546 * to handle 32bits compatibility. 2547 */ 2548 .compat_ioctl = i915_perf_ioctl, 2549 }; 2550 2551 2552 /** 2553 * i915_perf_open_ioctl_locked - DRM ioctl() for userspace to open a stream FD 2554 * @dev_priv: i915 device instance 2555 * @param: The open parameters passed to 'DRM_I915_PERF_OPEN` 2556 * @props: individually validated u64 property value pairs 2557 * @file: drm file 2558 * 2559 * See i915_perf_ioctl_open() for interface details. 2560 * 2561 * Implements further stream config validation and stream initialization on 2562 * behalf of i915_perf_open_ioctl() with the &drm_i915_private->perf.lock mutex 2563 * taken to serialize with any non-file-operation driver hooks. 2564 * 2565 * Note: at this point the @props have only been validated in isolation and 2566 * it's still necessary to validate that the combination of properties makes 2567 * sense. 2568 * 2569 * In the case where userspace is interested in OA unit metrics then further 2570 * config validation and stream initialization details will be handled by 2571 * i915_oa_stream_init(). The code here should only validate config state that 2572 * will be relevant to all stream types / backends. 2573 * 2574 * Returns: zero on success or a negative error code. 2575 */ 2576 static int 2577 i915_perf_open_ioctl_locked(struct drm_i915_private *dev_priv, 2578 struct drm_i915_perf_open_param *param, 2579 struct perf_open_properties *props, 2580 struct drm_file *file) 2581 { 2582 struct i915_gem_context *specific_ctx = NULL; 2583 struct i915_perf_stream *stream = NULL; 2584 unsigned long f_flags = 0; 2585 bool privileged_op = true; 2586 int stream_fd; 2587 int ret; 2588 2589 if (props->single_context) { 2590 u32 ctx_handle = props->ctx_handle; 2591 struct drm_i915_file_private *file_priv = file->driver_priv; 2592 2593 specific_ctx = i915_gem_context_lookup(file_priv, ctx_handle); 2594 if (!specific_ctx) { 2595 DRM_DEBUG("Failed to look up context with ID %u for opening perf stream\n", 2596 ctx_handle); 2597 ret = -ENOENT; 2598 goto err; 2599 } 2600 } 2601 2602 /* 2603 * On Haswell the OA unit supports clock gating off for a specific 2604 * context and in this mode there's no visibility of metrics for the 2605 * rest of the system, which we consider acceptable for a 2606 * non-privileged client. 2607 * 2608 * For Gen8+ the OA unit no longer supports clock gating off for a 2609 * specific context and the kernel can't securely stop the counters 2610 * from updating as system-wide / global values. Even though we can 2611 * filter reports based on the included context ID we can't block 2612 * clients from seeing the raw / global counter values via 2613 * MI_REPORT_PERF_COUNT commands and so consider it a privileged op to 2614 * enable the OA unit by default. 2615 */ 2616 if (IS_HASWELL(dev_priv) && specific_ctx) 2617 privileged_op = false; 2618 2619 /* Similar to perf's kernel.perf_paranoid_cpu sysctl option 2620 * we check a dev.i915.perf_stream_paranoid sysctl option 2621 * to determine if it's ok to access system wide OA counters 2622 * without CAP_SYS_ADMIN privileges. 2623 */ 2624 if (privileged_op && 2625 i915_perf_stream_paranoid && !capable(CAP_SYS_ADMIN)) { 2626 DRM_DEBUG("Insufficient privileges to open system-wide i915 perf stream\n"); 2627 ret = -EACCES; 2628 goto err_ctx; 2629 } 2630 2631 stream = kzalloc(sizeof(*stream), GFP_KERNEL); 2632 if (!stream) { 2633 ret = -ENOMEM; 2634 goto err_ctx; 2635 } 2636 2637 stream->dev_priv = dev_priv; 2638 stream->ctx = specific_ctx; 2639 2640 ret = i915_oa_stream_init(stream, param, props); 2641 if (ret) 2642 goto err_alloc; 2643 2644 /* we avoid simply assigning stream->sample_flags = props->sample_flags 2645 * to have _stream_init check the combination of sample flags more 2646 * thoroughly, but still this is the expected result at this point. 2647 */ 2648 if (WARN_ON(stream->sample_flags != props->sample_flags)) { 2649 ret = -ENODEV; 2650 goto err_flags; 2651 } 2652 2653 list_add(&stream->link, &dev_priv->perf.streams); 2654 2655 if (param->flags & I915_PERF_FLAG_FD_CLOEXEC) 2656 f_flags |= O_CLOEXEC; 2657 if (param->flags & I915_PERF_FLAG_FD_NONBLOCK) 2658 f_flags |= O_NONBLOCK; 2659 2660 stream_fd = anon_inode_getfd("[i915_perf]", &fops, stream, f_flags); 2661 if (stream_fd < 0) { 2662 ret = stream_fd; 2663 goto err_open; 2664 } 2665 2666 if (!(param->flags & I915_PERF_FLAG_DISABLED)) 2667 i915_perf_enable_locked(stream); 2668 2669 return stream_fd; 2670 2671 err_open: 2672 list_del(&stream->link); 2673 err_flags: 2674 if (stream->ops->destroy) 2675 stream->ops->destroy(stream); 2676 err_alloc: 2677 kfree(stream); 2678 err_ctx: 2679 if (specific_ctx) 2680 i915_gem_context_put(specific_ctx); 2681 err: 2682 return ret; 2683 } 2684 2685 static u64 oa_exponent_to_ns(struct drm_i915_private *dev_priv, int exponent) 2686 { 2687 return div64_u64(1000000000ULL * (2ULL << exponent), 2688 1000ULL * INTEL_INFO(dev_priv)->cs_timestamp_frequency_khz); 2689 } 2690 2691 /** 2692 * read_properties_unlocked - validate + copy userspace stream open properties 2693 * @dev_priv: i915 device instance 2694 * @uprops: The array of u64 key value pairs given by userspace 2695 * @n_props: The number of key value pairs expected in @uprops 2696 * @props: The stream configuration built up while validating properties 2697 * 2698 * Note this function only validates properties in isolation it doesn't 2699 * validate that the combination of properties makes sense or that all 2700 * properties necessary for a particular kind of stream have been set. 2701 * 2702 * Note that there currently aren't any ordering requirements for properties so 2703 * we shouldn't validate or assume anything about ordering here. This doesn't 2704 * rule out defining new properties with ordering requirements in the future. 2705 */ 2706 static int read_properties_unlocked(struct drm_i915_private *dev_priv, 2707 u64 __user *uprops, 2708 u32 n_props, 2709 struct perf_open_properties *props) 2710 { 2711 u64 __user *uprop = uprops; 2712 u32 i; 2713 2714 memset(props, 0, sizeof(struct perf_open_properties)); 2715 2716 if (!n_props) { 2717 DRM_DEBUG("No i915 perf properties given\n"); 2718 return -EINVAL; 2719 } 2720 2721 /* Considering that ID = 0 is reserved and assuming that we don't 2722 * (currently) expect any configurations to ever specify duplicate 2723 * values for a particular property ID then the last _PROP_MAX value is 2724 * one greater than the maximum number of properties we expect to get 2725 * from userspace. 2726 */ 2727 if (n_props >= DRM_I915_PERF_PROP_MAX) { 2728 DRM_DEBUG("More i915 perf properties specified than exist\n"); 2729 return -EINVAL; 2730 } 2731 2732 for (i = 0; i < n_props; i++) { 2733 u64 oa_period, oa_freq_hz; 2734 u64 id, value; 2735 int ret; 2736 2737 ret = get_user(id, uprop); 2738 if (ret) 2739 return ret; 2740 2741 ret = get_user(value, uprop + 1); 2742 if (ret) 2743 return ret; 2744 2745 if (id == 0 || id >= DRM_I915_PERF_PROP_MAX) { 2746 DRM_DEBUG("Unknown i915 perf property ID\n"); 2747 return -EINVAL; 2748 } 2749 2750 switch ((enum drm_i915_perf_property_id)id) { 2751 case DRM_I915_PERF_PROP_CTX_HANDLE: 2752 props->single_context = 1; 2753 props->ctx_handle = value; 2754 break; 2755 case DRM_I915_PERF_PROP_SAMPLE_OA: 2756 if (value) 2757 props->sample_flags |= SAMPLE_OA_REPORT; 2758 break; 2759 case DRM_I915_PERF_PROP_OA_METRICS_SET: 2760 if (value == 0) { 2761 DRM_DEBUG("Unknown OA metric set ID\n"); 2762 return -EINVAL; 2763 } 2764 props->metrics_set = value; 2765 break; 2766 case DRM_I915_PERF_PROP_OA_FORMAT: 2767 if (value == 0 || value >= I915_OA_FORMAT_MAX) { 2768 DRM_DEBUG("Out-of-range OA report format %llu\n", 2769 value); 2770 return -EINVAL; 2771 } 2772 if (!dev_priv->perf.oa.oa_formats[value].size) { 2773 DRM_DEBUG("Unsupported OA report format %llu\n", 2774 value); 2775 return -EINVAL; 2776 } 2777 props->oa_format = value; 2778 break; 2779 case DRM_I915_PERF_PROP_OA_EXPONENT: 2780 if (value > OA_EXPONENT_MAX) { 2781 DRM_DEBUG("OA timer exponent too high (> %u)\n", 2782 OA_EXPONENT_MAX); 2783 return -EINVAL; 2784 } 2785 2786 /* Theoretically we can program the OA unit to sample 2787 * e.g. every 160ns for HSW, 167ns for BDW/SKL or 104ns 2788 * for BXT. We don't allow such high sampling 2789 * frequencies by default unless root. 2790 */ 2791 2792 BUILD_BUG_ON(sizeof(oa_period) != 8); 2793 oa_period = oa_exponent_to_ns(dev_priv, value); 2794 2795 /* This check is primarily to ensure that oa_period <= 2796 * UINT32_MAX (before passing to do_div which only 2797 * accepts a u32 denominator), but we can also skip 2798 * checking anything < 1Hz which implicitly can't be 2799 * limited via an integer oa_max_sample_rate. 2800 */ 2801 if (oa_period <= NSEC_PER_SEC) { 2802 u64 tmp = NSEC_PER_SEC; 2803 do_div(tmp, oa_period); 2804 oa_freq_hz = tmp; 2805 } else 2806 oa_freq_hz = 0; 2807 2808 if (oa_freq_hz > i915_oa_max_sample_rate && 2809 !capable(CAP_SYS_ADMIN)) { 2810 DRM_DEBUG("OA exponent would exceed the max sampling frequency (sysctl dev.i915.oa_max_sample_rate) %uHz without root privileges\n", 2811 i915_oa_max_sample_rate); 2812 return -EACCES; 2813 } 2814 2815 props->oa_periodic = true; 2816 props->oa_period_exponent = value; 2817 break; 2818 case DRM_I915_PERF_PROP_MAX: 2819 MISSING_CASE(id); 2820 return -EINVAL; 2821 } 2822 2823 uprop += 2; 2824 } 2825 2826 return 0; 2827 } 2828 2829 /** 2830 * i915_perf_open_ioctl - DRM ioctl() for userspace to open a stream FD 2831 * @dev: drm device 2832 * @data: ioctl data copied from userspace (unvalidated) 2833 * @file: drm file 2834 * 2835 * Validates the stream open parameters given by userspace including flags 2836 * and an array of u64 key, value pair properties. 2837 * 2838 * Very little is assumed up front about the nature of the stream being 2839 * opened (for instance we don't assume it's for periodic OA unit metrics). An 2840 * i915-perf stream is expected to be a suitable interface for other forms of 2841 * buffered data written by the GPU besides periodic OA metrics. 2842 * 2843 * Note we copy the properties from userspace outside of the i915 perf 2844 * mutex to avoid an awkward lockdep with mmap_sem. 2845 * 2846 * Most of the implementation details are handled by 2847 * i915_perf_open_ioctl_locked() after taking the &drm_i915_private->perf.lock 2848 * mutex for serializing with any non-file-operation driver hooks. 2849 * 2850 * Return: A newly opened i915 Perf stream file descriptor or negative 2851 * error code on failure. 2852 */ 2853 int i915_perf_open_ioctl(struct drm_device *dev, void *data, 2854 struct drm_file *file) 2855 { 2856 struct drm_i915_private *dev_priv = dev->dev_private; 2857 struct drm_i915_perf_open_param *param = data; 2858 struct perf_open_properties props; 2859 u32 known_open_flags; 2860 int ret; 2861 2862 if (!dev_priv->perf.initialized) { 2863 DRM_DEBUG("i915 perf interface not available for this system\n"); 2864 return -ENOTSUPP; 2865 } 2866 2867 known_open_flags = I915_PERF_FLAG_FD_CLOEXEC | 2868 I915_PERF_FLAG_FD_NONBLOCK | 2869 I915_PERF_FLAG_DISABLED; 2870 if (param->flags & ~known_open_flags) { 2871 DRM_DEBUG("Unknown drm_i915_perf_open_param flag\n"); 2872 return -EINVAL; 2873 } 2874 2875 ret = read_properties_unlocked(dev_priv, 2876 u64_to_user_ptr(param->properties_ptr), 2877 param->num_properties, 2878 &props); 2879 if (ret) 2880 return ret; 2881 2882 mutex_lock(&dev_priv->perf.lock); 2883 ret = i915_perf_open_ioctl_locked(dev_priv, param, &props, file); 2884 mutex_unlock(&dev_priv->perf.lock); 2885 2886 return ret; 2887 } 2888 2889 /** 2890 * i915_perf_register - exposes i915-perf to userspace 2891 * @dev_priv: i915 device instance 2892 * 2893 * In particular OA metric sets are advertised under a sysfs metrics/ 2894 * directory allowing userspace to enumerate valid IDs that can be 2895 * used to open an i915-perf stream. 2896 */ 2897 void i915_perf_register(struct drm_i915_private *dev_priv) 2898 { 2899 int ret; 2900 2901 if (!dev_priv->perf.initialized) 2902 return; 2903 2904 /* To be sure we're synchronized with an attempted 2905 * i915_perf_open_ioctl(); considering that we register after 2906 * being exposed to userspace. 2907 */ 2908 mutex_lock(&dev_priv->perf.lock); 2909 2910 dev_priv->perf.metrics_kobj = 2911 kobject_create_and_add("metrics", 2912 &dev_priv->drm.primary->kdev->kobj); 2913 if (!dev_priv->perf.metrics_kobj) 2914 goto exit; 2915 2916 sysfs_attr_init(&dev_priv->perf.oa.test_config.sysfs_metric_id.attr); 2917 2918 if (IS_HASWELL(dev_priv)) { 2919 i915_perf_load_test_config_hsw(dev_priv); 2920 } else if (IS_BROADWELL(dev_priv)) { 2921 i915_perf_load_test_config_bdw(dev_priv); 2922 } else if (IS_CHERRYVIEW(dev_priv)) { 2923 i915_perf_load_test_config_chv(dev_priv); 2924 } else if (IS_SKYLAKE(dev_priv)) { 2925 if (IS_SKL_GT2(dev_priv)) 2926 i915_perf_load_test_config_sklgt2(dev_priv); 2927 else if (IS_SKL_GT3(dev_priv)) 2928 i915_perf_load_test_config_sklgt3(dev_priv); 2929 else if (IS_SKL_GT4(dev_priv)) 2930 i915_perf_load_test_config_sklgt4(dev_priv); 2931 } else if (IS_BROXTON(dev_priv)) { 2932 i915_perf_load_test_config_bxt(dev_priv); 2933 } else if (IS_KABYLAKE(dev_priv)) { 2934 if (IS_KBL_GT2(dev_priv)) 2935 i915_perf_load_test_config_kblgt2(dev_priv); 2936 else if (IS_KBL_GT3(dev_priv)) 2937 i915_perf_load_test_config_kblgt3(dev_priv); 2938 } else if (IS_GEMINILAKE(dev_priv)) { 2939 i915_perf_load_test_config_glk(dev_priv); 2940 } else if (IS_COFFEELAKE(dev_priv)) { 2941 if (IS_CFL_GT2(dev_priv)) 2942 i915_perf_load_test_config_cflgt2(dev_priv); 2943 if (IS_CFL_GT3(dev_priv)) 2944 i915_perf_load_test_config_cflgt3(dev_priv); 2945 } else if (IS_CANNONLAKE(dev_priv)) { 2946 i915_perf_load_test_config_cnl(dev_priv); 2947 } else if (IS_ICELAKE(dev_priv)) { 2948 i915_perf_load_test_config_icl(dev_priv); 2949 } 2950 2951 if (dev_priv->perf.oa.test_config.id == 0) 2952 goto sysfs_error; 2953 2954 ret = sysfs_create_group(dev_priv->perf.metrics_kobj, 2955 &dev_priv->perf.oa.test_config.sysfs_metric); 2956 if (ret) 2957 goto sysfs_error; 2958 2959 atomic_set(&dev_priv->perf.oa.test_config.ref_count, 1); 2960 2961 goto exit; 2962 2963 sysfs_error: 2964 kobject_put(dev_priv->perf.metrics_kobj); 2965 dev_priv->perf.metrics_kobj = NULL; 2966 2967 exit: 2968 mutex_unlock(&dev_priv->perf.lock); 2969 } 2970 2971 /** 2972 * i915_perf_unregister - hide i915-perf from userspace 2973 * @dev_priv: i915 device instance 2974 * 2975 * i915-perf state cleanup is split up into an 'unregister' and 2976 * 'deinit' phase where the interface is first hidden from 2977 * userspace by i915_perf_unregister() before cleaning up 2978 * remaining state in i915_perf_fini(). 2979 */ 2980 void i915_perf_unregister(struct drm_i915_private *dev_priv) 2981 { 2982 if (!dev_priv->perf.metrics_kobj) 2983 return; 2984 2985 sysfs_remove_group(dev_priv->perf.metrics_kobj, 2986 &dev_priv->perf.oa.test_config.sysfs_metric); 2987 2988 kobject_put(dev_priv->perf.metrics_kobj); 2989 dev_priv->perf.metrics_kobj = NULL; 2990 } 2991 2992 static bool gen8_is_valid_flex_addr(struct drm_i915_private *dev_priv, u32 addr) 2993 { 2994 static const i915_reg_t flex_eu_regs[] = { 2995 EU_PERF_CNTL0, 2996 EU_PERF_CNTL1, 2997 EU_PERF_CNTL2, 2998 EU_PERF_CNTL3, 2999 EU_PERF_CNTL4, 3000 EU_PERF_CNTL5, 3001 EU_PERF_CNTL6, 3002 }; 3003 int i; 3004 3005 for (i = 0; i < ARRAY_SIZE(flex_eu_regs); i++) { 3006 if (i915_mmio_reg_offset(flex_eu_regs[i]) == addr) 3007 return true; 3008 } 3009 return false; 3010 } 3011 3012 static bool gen7_is_valid_b_counter_addr(struct drm_i915_private *dev_priv, u32 addr) 3013 { 3014 return (addr >= i915_mmio_reg_offset(OASTARTTRIG1) && 3015 addr <= i915_mmio_reg_offset(OASTARTTRIG8)) || 3016 (addr >= i915_mmio_reg_offset(OAREPORTTRIG1) && 3017 addr <= i915_mmio_reg_offset(OAREPORTTRIG8)) || 3018 (addr >= i915_mmio_reg_offset(OACEC0_0) && 3019 addr <= i915_mmio_reg_offset(OACEC7_1)); 3020 } 3021 3022 static bool gen7_is_valid_mux_addr(struct drm_i915_private *dev_priv, u32 addr) 3023 { 3024 return addr == i915_mmio_reg_offset(HALF_SLICE_CHICKEN2) || 3025 (addr >= i915_mmio_reg_offset(MICRO_BP0_0) && 3026 addr <= i915_mmio_reg_offset(NOA_WRITE)) || 3027 (addr >= i915_mmio_reg_offset(OA_PERFCNT1_LO) && 3028 addr <= i915_mmio_reg_offset(OA_PERFCNT2_HI)) || 3029 (addr >= i915_mmio_reg_offset(OA_PERFMATRIX_LO) && 3030 addr <= i915_mmio_reg_offset(OA_PERFMATRIX_HI)); 3031 } 3032 3033 static bool gen8_is_valid_mux_addr(struct drm_i915_private *dev_priv, u32 addr) 3034 { 3035 return gen7_is_valid_mux_addr(dev_priv, addr) || 3036 addr == i915_mmio_reg_offset(WAIT_FOR_RC6_EXIT) || 3037 (addr >= i915_mmio_reg_offset(RPM_CONFIG0) && 3038 addr <= i915_mmio_reg_offset(NOA_CONFIG(8))); 3039 } 3040 3041 static bool gen10_is_valid_mux_addr(struct drm_i915_private *dev_priv, u32 addr) 3042 { 3043 return gen8_is_valid_mux_addr(dev_priv, addr) || 3044 (addr >= i915_mmio_reg_offset(OA_PERFCNT3_LO) && 3045 addr <= i915_mmio_reg_offset(OA_PERFCNT4_HI)); 3046 } 3047 3048 static bool hsw_is_valid_mux_addr(struct drm_i915_private *dev_priv, u32 addr) 3049 { 3050 return gen7_is_valid_mux_addr(dev_priv, addr) || 3051 (addr >= 0x25100 && addr <= 0x2FF90) || 3052 (addr >= i915_mmio_reg_offset(HSW_MBVID2_NOA0) && 3053 addr <= i915_mmio_reg_offset(HSW_MBVID2_NOA9)) || 3054 addr == i915_mmio_reg_offset(HSW_MBVID2_MISR0); 3055 } 3056 3057 static bool chv_is_valid_mux_addr(struct drm_i915_private *dev_priv, u32 addr) 3058 { 3059 return gen7_is_valid_mux_addr(dev_priv, addr) || 3060 (addr >= 0x182300 && addr <= 0x1823A4); 3061 } 3062 3063 static uint32_t mask_reg_value(u32 reg, u32 val) 3064 { 3065 /* HALF_SLICE_CHICKEN2 is programmed with a the 3066 * WaDisableSTUnitPowerOptimization workaround. Make sure the value 3067 * programmed by userspace doesn't change this. 3068 */ 3069 if (i915_mmio_reg_offset(HALF_SLICE_CHICKEN2) == reg) 3070 val = val & ~_MASKED_BIT_ENABLE(GEN8_ST_PO_DISABLE); 3071 3072 /* WAIT_FOR_RC6_EXIT has only one bit fullfilling the function 3073 * indicated by its name and a bunch of selection fields used by OA 3074 * configs. 3075 */ 3076 if (i915_mmio_reg_offset(WAIT_FOR_RC6_EXIT) == reg) 3077 val = val & ~_MASKED_BIT_ENABLE(HSW_WAIT_FOR_RC6_EXIT_ENABLE); 3078 3079 return val; 3080 } 3081 3082 static struct i915_oa_reg *alloc_oa_regs(struct drm_i915_private *dev_priv, 3083 bool (*is_valid)(struct drm_i915_private *dev_priv, u32 addr), 3084 u32 __user *regs, 3085 u32 n_regs) 3086 { 3087 struct i915_oa_reg *oa_regs; 3088 int err; 3089 u32 i; 3090 3091 if (!n_regs) 3092 return NULL; 3093 3094 if (!access_ok(VERIFY_READ, regs, n_regs * sizeof(u32) * 2)) 3095 return ERR_PTR(-EFAULT); 3096 3097 /* No is_valid function means we're not allowing any register to be programmed. */ 3098 GEM_BUG_ON(!is_valid); 3099 if (!is_valid) 3100 return ERR_PTR(-EINVAL); 3101 3102 oa_regs = kmalloc_array(n_regs, sizeof(*oa_regs), GFP_KERNEL); 3103 if (!oa_regs) 3104 return ERR_PTR(-ENOMEM); 3105 3106 for (i = 0; i < n_regs; i++) { 3107 u32 addr, value; 3108 3109 err = get_user(addr, regs); 3110 if (err) 3111 goto addr_err; 3112 3113 if (!is_valid(dev_priv, addr)) { 3114 DRM_DEBUG("Invalid oa_reg address: %X\n", addr); 3115 err = -EINVAL; 3116 goto addr_err; 3117 } 3118 3119 err = get_user(value, regs + 1); 3120 if (err) 3121 goto addr_err; 3122 3123 oa_regs[i].addr = _MMIO(addr); 3124 oa_regs[i].value = mask_reg_value(addr, value); 3125 3126 regs += 2; 3127 } 3128 3129 return oa_regs; 3130 3131 addr_err: 3132 kfree(oa_regs); 3133 return ERR_PTR(err); 3134 } 3135 3136 static ssize_t show_dynamic_id(struct device *dev, 3137 struct device_attribute *attr, 3138 char *buf) 3139 { 3140 struct i915_oa_config *oa_config = 3141 container_of(attr, typeof(*oa_config), sysfs_metric_id); 3142 3143 return sprintf(buf, "%d\n", oa_config->id); 3144 } 3145 3146 static int create_dynamic_oa_sysfs_entry(struct drm_i915_private *dev_priv, 3147 struct i915_oa_config *oa_config) 3148 { 3149 sysfs_attr_init(&oa_config->sysfs_metric_id.attr); 3150 oa_config->sysfs_metric_id.attr.name = "id"; 3151 oa_config->sysfs_metric_id.attr.mode = S_IRUGO; 3152 oa_config->sysfs_metric_id.show = show_dynamic_id; 3153 oa_config->sysfs_metric_id.store = NULL; 3154 3155 oa_config->attrs[0] = &oa_config->sysfs_metric_id.attr; 3156 oa_config->attrs[1] = NULL; 3157 3158 oa_config->sysfs_metric.name = oa_config->uuid; 3159 oa_config->sysfs_metric.attrs = oa_config->attrs; 3160 3161 return sysfs_create_group(dev_priv->perf.metrics_kobj, 3162 &oa_config->sysfs_metric); 3163 } 3164 3165 /** 3166 * i915_perf_add_config_ioctl - DRM ioctl() for userspace to add a new OA config 3167 * @dev: drm device 3168 * @data: ioctl data (pointer to struct drm_i915_perf_oa_config) copied from 3169 * userspace (unvalidated) 3170 * @file: drm file 3171 * 3172 * Validates the submitted OA register to be saved into a new OA config that 3173 * can then be used for programming the OA unit and its NOA network. 3174 * 3175 * Returns: A new allocated config number to be used with the perf open ioctl 3176 * or a negative error code on failure. 3177 */ 3178 int i915_perf_add_config_ioctl(struct drm_device *dev, void *data, 3179 struct drm_file *file) 3180 { 3181 struct drm_i915_private *dev_priv = dev->dev_private; 3182 struct drm_i915_perf_oa_config *args = data; 3183 struct i915_oa_config *oa_config, *tmp; 3184 int err, id; 3185 3186 if (!dev_priv->perf.initialized) { 3187 DRM_DEBUG("i915 perf interface not available for this system\n"); 3188 return -ENOTSUPP; 3189 } 3190 3191 if (!dev_priv->perf.metrics_kobj) { 3192 DRM_DEBUG("OA metrics weren't advertised via sysfs\n"); 3193 return -EINVAL; 3194 } 3195 3196 if (i915_perf_stream_paranoid && !capable(CAP_SYS_ADMIN)) { 3197 DRM_DEBUG("Insufficient privileges to add i915 OA config\n"); 3198 return -EACCES; 3199 } 3200 3201 if ((!args->mux_regs_ptr || !args->n_mux_regs) && 3202 (!args->boolean_regs_ptr || !args->n_boolean_regs) && 3203 (!args->flex_regs_ptr || !args->n_flex_regs)) { 3204 DRM_DEBUG("No OA registers given\n"); 3205 return -EINVAL; 3206 } 3207 3208 oa_config = kzalloc(sizeof(*oa_config), GFP_KERNEL); 3209 if (!oa_config) { 3210 DRM_DEBUG("Failed to allocate memory for the OA config\n"); 3211 return -ENOMEM; 3212 } 3213 3214 atomic_set(&oa_config->ref_count, 1); 3215 3216 if (!uuid_is_valid(args->uuid)) { 3217 DRM_DEBUG("Invalid uuid format for OA config\n"); 3218 err = -EINVAL; 3219 goto reg_err; 3220 } 3221 3222 /* Last character in oa_config->uuid will be 0 because oa_config is 3223 * kzalloc. 3224 */ 3225 memcpy(oa_config->uuid, args->uuid, sizeof(args->uuid)); 3226 3227 oa_config->mux_regs_len = args->n_mux_regs; 3228 oa_config->mux_regs = 3229 alloc_oa_regs(dev_priv, 3230 dev_priv->perf.oa.ops.is_valid_mux_reg, 3231 u64_to_user_ptr(args->mux_regs_ptr), 3232 args->n_mux_regs); 3233 3234 if (IS_ERR(oa_config->mux_regs)) { 3235 DRM_DEBUG("Failed to create OA config for mux_regs\n"); 3236 err = PTR_ERR(oa_config->mux_regs); 3237 goto reg_err; 3238 } 3239 3240 oa_config->b_counter_regs_len = args->n_boolean_regs; 3241 oa_config->b_counter_regs = 3242 alloc_oa_regs(dev_priv, 3243 dev_priv->perf.oa.ops.is_valid_b_counter_reg, 3244 u64_to_user_ptr(args->boolean_regs_ptr), 3245 args->n_boolean_regs); 3246 3247 if (IS_ERR(oa_config->b_counter_regs)) { 3248 DRM_DEBUG("Failed to create OA config for b_counter_regs\n"); 3249 err = PTR_ERR(oa_config->b_counter_regs); 3250 goto reg_err; 3251 } 3252 3253 if (INTEL_GEN(dev_priv) < 8) { 3254 if (args->n_flex_regs != 0) { 3255 err = -EINVAL; 3256 goto reg_err; 3257 } 3258 } else { 3259 oa_config->flex_regs_len = args->n_flex_regs; 3260 oa_config->flex_regs = 3261 alloc_oa_regs(dev_priv, 3262 dev_priv->perf.oa.ops.is_valid_flex_reg, 3263 u64_to_user_ptr(args->flex_regs_ptr), 3264 args->n_flex_regs); 3265 3266 if (IS_ERR(oa_config->flex_regs)) { 3267 DRM_DEBUG("Failed to create OA config for flex_regs\n"); 3268 err = PTR_ERR(oa_config->flex_regs); 3269 goto reg_err; 3270 } 3271 } 3272 3273 err = mutex_lock_interruptible(&dev_priv->perf.metrics_lock); 3274 if (err) 3275 goto reg_err; 3276 3277 /* We shouldn't have too many configs, so this iteration shouldn't be 3278 * too costly. 3279 */ 3280 idr_for_each_entry(&dev_priv->perf.metrics_idr, tmp, id) { 3281 if (!strcmp(tmp->uuid, oa_config->uuid)) { 3282 DRM_DEBUG("OA config already exists with this uuid\n"); 3283 err = -EADDRINUSE; 3284 goto sysfs_err; 3285 } 3286 } 3287 3288 err = create_dynamic_oa_sysfs_entry(dev_priv, oa_config); 3289 if (err) { 3290 DRM_DEBUG("Failed to create sysfs entry for OA config\n"); 3291 goto sysfs_err; 3292 } 3293 3294 /* Config id 0 is invalid, id 1 for kernel stored test config. */ 3295 oa_config->id = idr_alloc(&dev_priv->perf.metrics_idr, 3296 oa_config, 2, 3297 0, GFP_KERNEL); 3298 if (oa_config->id < 0) { 3299 DRM_DEBUG("Failed to create sysfs entry for OA config\n"); 3300 err = oa_config->id; 3301 goto sysfs_err; 3302 } 3303 3304 mutex_unlock(&dev_priv->perf.metrics_lock); 3305 3306 DRM_DEBUG("Added config %s id=%i\n", oa_config->uuid, oa_config->id); 3307 3308 return oa_config->id; 3309 3310 sysfs_err: 3311 mutex_unlock(&dev_priv->perf.metrics_lock); 3312 reg_err: 3313 put_oa_config(dev_priv, oa_config); 3314 DRM_DEBUG("Failed to add new OA config\n"); 3315 return err; 3316 } 3317 3318 /** 3319 * i915_perf_remove_config_ioctl - DRM ioctl() for userspace to remove an OA config 3320 * @dev: drm device 3321 * @data: ioctl data (pointer to u64 integer) copied from userspace 3322 * @file: drm file 3323 * 3324 * Configs can be removed while being used, the will stop appearing in sysfs 3325 * and their content will be freed when the stream using the config is closed. 3326 * 3327 * Returns: 0 on success or a negative error code on failure. 3328 */ 3329 int i915_perf_remove_config_ioctl(struct drm_device *dev, void *data, 3330 struct drm_file *file) 3331 { 3332 struct drm_i915_private *dev_priv = dev->dev_private; 3333 u64 *arg = data; 3334 struct i915_oa_config *oa_config; 3335 int ret; 3336 3337 if (!dev_priv->perf.initialized) { 3338 DRM_DEBUG("i915 perf interface not available for this system\n"); 3339 return -ENOTSUPP; 3340 } 3341 3342 if (i915_perf_stream_paranoid && !capable(CAP_SYS_ADMIN)) { 3343 DRM_DEBUG("Insufficient privileges to remove i915 OA config\n"); 3344 return -EACCES; 3345 } 3346 3347 ret = mutex_lock_interruptible(&dev_priv->perf.metrics_lock); 3348 if (ret) 3349 goto lock_err; 3350 3351 oa_config = idr_find(&dev_priv->perf.metrics_idr, *arg); 3352 if (!oa_config) { 3353 DRM_DEBUG("Failed to remove unknown OA config\n"); 3354 ret = -ENOENT; 3355 goto config_err; 3356 } 3357 3358 GEM_BUG_ON(*arg != oa_config->id); 3359 3360 sysfs_remove_group(dev_priv->perf.metrics_kobj, 3361 &oa_config->sysfs_metric); 3362 3363 idr_remove(&dev_priv->perf.metrics_idr, *arg); 3364 3365 DRM_DEBUG("Removed config %s id=%i\n", oa_config->uuid, oa_config->id); 3366 3367 put_oa_config(dev_priv, oa_config); 3368 3369 config_err: 3370 mutex_unlock(&dev_priv->perf.metrics_lock); 3371 lock_err: 3372 return ret; 3373 } 3374 3375 static struct ctl_table oa_table[] = { 3376 { 3377 .procname = "perf_stream_paranoid", 3378 .data = &i915_perf_stream_paranoid, 3379 .maxlen = sizeof(i915_perf_stream_paranoid), 3380 .mode = 0644, 3381 .proc_handler = proc_dointvec_minmax, 3382 .extra1 = &zero, 3383 .extra2 = &one, 3384 }, 3385 { 3386 .procname = "oa_max_sample_rate", 3387 .data = &i915_oa_max_sample_rate, 3388 .maxlen = sizeof(i915_oa_max_sample_rate), 3389 .mode = 0644, 3390 .proc_handler = proc_dointvec_minmax, 3391 .extra1 = &zero, 3392 .extra2 = &oa_sample_rate_hard_limit, 3393 }, 3394 {} 3395 }; 3396 3397 static struct ctl_table i915_root[] = { 3398 { 3399 .procname = "i915", 3400 .maxlen = 0, 3401 .mode = 0555, 3402 .child = oa_table, 3403 }, 3404 {} 3405 }; 3406 3407 static struct ctl_table dev_root[] = { 3408 { 3409 .procname = "dev", 3410 .maxlen = 0, 3411 .mode = 0555, 3412 .child = i915_root, 3413 }, 3414 {} 3415 }; 3416 3417 /** 3418 * i915_perf_init - initialize i915-perf state on module load 3419 * @dev_priv: i915 device instance 3420 * 3421 * Initializes i915-perf state without exposing anything to userspace. 3422 * 3423 * Note: i915-perf initialization is split into an 'init' and 'register' 3424 * phase with the i915_perf_register() exposing state to userspace. 3425 */ 3426 void i915_perf_init(struct drm_i915_private *dev_priv) 3427 { 3428 if (IS_HASWELL(dev_priv)) { 3429 dev_priv->perf.oa.ops.is_valid_b_counter_reg = 3430 gen7_is_valid_b_counter_addr; 3431 dev_priv->perf.oa.ops.is_valid_mux_reg = 3432 hsw_is_valid_mux_addr; 3433 dev_priv->perf.oa.ops.is_valid_flex_reg = NULL; 3434 dev_priv->perf.oa.ops.init_oa_buffer = gen7_init_oa_buffer; 3435 dev_priv->perf.oa.ops.enable_metric_set = hsw_enable_metric_set; 3436 dev_priv->perf.oa.ops.disable_metric_set = hsw_disable_metric_set; 3437 dev_priv->perf.oa.ops.oa_enable = gen7_oa_enable; 3438 dev_priv->perf.oa.ops.oa_disable = gen7_oa_disable; 3439 dev_priv->perf.oa.ops.read = gen7_oa_read; 3440 dev_priv->perf.oa.ops.oa_hw_tail_read = 3441 gen7_oa_hw_tail_read; 3442 3443 dev_priv->perf.oa.oa_formats = hsw_oa_formats; 3444 } else if (HAS_LOGICAL_RING_CONTEXTS(dev_priv)) { 3445 /* Note: that although we could theoretically also support the 3446 * legacy ringbuffer mode on BDW (and earlier iterations of 3447 * this driver, before upstreaming did this) it didn't seem 3448 * worth the complexity to maintain now that BDW+ enable 3449 * execlist mode by default. 3450 */ 3451 dev_priv->perf.oa.oa_formats = gen8_plus_oa_formats; 3452 3453 dev_priv->perf.oa.ops.init_oa_buffer = gen8_init_oa_buffer; 3454 dev_priv->perf.oa.ops.oa_enable = gen8_oa_enable; 3455 dev_priv->perf.oa.ops.oa_disable = gen8_oa_disable; 3456 dev_priv->perf.oa.ops.read = gen8_oa_read; 3457 dev_priv->perf.oa.ops.oa_hw_tail_read = gen8_oa_hw_tail_read; 3458 3459 if (IS_GEN8(dev_priv) || IS_GEN9(dev_priv)) { 3460 dev_priv->perf.oa.ops.is_valid_b_counter_reg = 3461 gen7_is_valid_b_counter_addr; 3462 dev_priv->perf.oa.ops.is_valid_mux_reg = 3463 gen8_is_valid_mux_addr; 3464 dev_priv->perf.oa.ops.is_valid_flex_reg = 3465 gen8_is_valid_flex_addr; 3466 3467 if (IS_CHERRYVIEW(dev_priv)) { 3468 dev_priv->perf.oa.ops.is_valid_mux_reg = 3469 chv_is_valid_mux_addr; 3470 } 3471 3472 dev_priv->perf.oa.ops.enable_metric_set = gen8_enable_metric_set; 3473 dev_priv->perf.oa.ops.disable_metric_set = gen8_disable_metric_set; 3474 3475 if (IS_GEN8(dev_priv)) { 3476 dev_priv->perf.oa.ctx_oactxctrl_offset = 0x120; 3477 dev_priv->perf.oa.ctx_flexeu0_offset = 0x2ce; 3478 3479 dev_priv->perf.oa.gen8_valid_ctx_bit = (1<<25); 3480 } else { 3481 dev_priv->perf.oa.ctx_oactxctrl_offset = 0x128; 3482 dev_priv->perf.oa.ctx_flexeu0_offset = 0x3de; 3483 3484 dev_priv->perf.oa.gen8_valid_ctx_bit = (1<<16); 3485 } 3486 } else if (IS_GEN(dev_priv, 10, 11)) { 3487 dev_priv->perf.oa.ops.is_valid_b_counter_reg = 3488 gen7_is_valid_b_counter_addr; 3489 dev_priv->perf.oa.ops.is_valid_mux_reg = 3490 gen10_is_valid_mux_addr; 3491 dev_priv->perf.oa.ops.is_valid_flex_reg = 3492 gen8_is_valid_flex_addr; 3493 3494 dev_priv->perf.oa.ops.enable_metric_set = gen8_enable_metric_set; 3495 dev_priv->perf.oa.ops.disable_metric_set = gen10_disable_metric_set; 3496 3497 dev_priv->perf.oa.ctx_oactxctrl_offset = 0x128; 3498 dev_priv->perf.oa.ctx_flexeu0_offset = 0x3de; 3499 3500 dev_priv->perf.oa.gen8_valid_ctx_bit = (1<<16); 3501 } 3502 } 3503 3504 if (dev_priv->perf.oa.ops.enable_metric_set) { 3505 hrtimer_init(&dev_priv->perf.oa.poll_check_timer, 3506 CLOCK_MONOTONIC, HRTIMER_MODE_REL); 3507 dev_priv->perf.oa.poll_check_timer.function = oa_poll_check_timer_cb; 3508 init_waitqueue_head(&dev_priv->perf.oa.poll_wq); 3509 3510 INIT_LIST_HEAD(&dev_priv->perf.streams); 3511 mutex_init(&dev_priv->perf.lock); 3512 spin_lock_init(&dev_priv->perf.oa.oa_buffer.ptr_lock); 3513 3514 oa_sample_rate_hard_limit = 1000 * 3515 (INTEL_INFO(dev_priv)->cs_timestamp_frequency_khz / 2); 3516 dev_priv->perf.sysctl_header = register_sysctl_table(dev_root); 3517 3518 mutex_init(&dev_priv->perf.metrics_lock); 3519 idr_init(&dev_priv->perf.metrics_idr); 3520 3521 dev_priv->perf.initialized = true; 3522 } 3523 } 3524 3525 static int destroy_config(int id, void *p, void *data) 3526 { 3527 struct drm_i915_private *dev_priv = data; 3528 struct i915_oa_config *oa_config = p; 3529 3530 put_oa_config(dev_priv, oa_config); 3531 3532 return 0; 3533 } 3534 3535 /** 3536 * i915_perf_fini - Counter part to i915_perf_init() 3537 * @dev_priv: i915 device instance 3538 */ 3539 void i915_perf_fini(struct drm_i915_private *dev_priv) 3540 { 3541 if (!dev_priv->perf.initialized) 3542 return; 3543 3544 idr_for_each(&dev_priv->perf.metrics_idr, destroy_config, dev_priv); 3545 idr_destroy(&dev_priv->perf.metrics_idr); 3546 3547 unregister_sysctl_table(dev_priv->perf.sysctl_header); 3548 3549 memset(&dev_priv->perf.oa.ops, 0, sizeof(dev_priv->perf.oa.ops)); 3550 3551 dev_priv->perf.initialized = false; 3552 } 3553